This repository has been archived by the owner on Mar 2, 2025. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 2
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Signed-off-by: benny-nottonson <[email protected]>
- Loading branch information
0 parents
commit 3610e85
Showing
8 changed files
with
854 additions
and
0 deletions.
There are no files selected for viewing
Binary file not shown.
Large diffs are not rendered by default.
Oops, something went wrong.
Large diffs are not rendered by default.
Oops, something went wrong.
Binary file not shown.
Binary file not shown.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,73 @@ | ||
from time.time import now | ||
|
||
import basalt.nn as nn | ||
from basalt import Tensor, TensorShape | ||
from basalt import Graph, Symbol, OP | ||
from basalt.utils.datasets import BostonHousing | ||
from basalt.utils.dataloader import DataLoader | ||
|
||
|
||
fn linear_regression(batch_size: Int, n_inputs: Int, n_outputs: Int) -> Graph: | ||
var g = Graph() | ||
|
||
var x = g.input(TensorShape(batch_size, n_inputs)) | ||
var y_true = g.input(TensorShape(batch_size, n_outputs)) | ||
|
||
var y_pred = nn.Linear(g, x, n_outputs) | ||
g.out(y_pred) | ||
|
||
var loss = nn.MSELoss(g, y_pred, y_true) | ||
g.loss(loss) | ||
|
||
return g^ | ||
|
||
|
||
fn main(): | ||
alias batch_size = 32 | ||
alias num_epochs = 200 | ||
alias learning_rate = 0.02 | ||
|
||
alias graph = linear_regression(batch_size, 13, 1) | ||
|
||
var model = nn.Model[graph]() | ||
var optim = nn.optim.Adam[graph]( | ||
Reference(model.parameters), lr=learning_rate | ||
) | ||
|
||
print("Loading data...") | ||
var train_data: BostonHousing | ||
try: | ||
train_data = BostonHousing(file_path="./data/housing.csv") | ||
except: | ||
print("Could not load data") | ||
return | ||
|
||
var training_loader = DataLoader( | ||
data=train_data.data, labels=train_data.labels, batch_size=batch_size | ||
) | ||
|
||
print("Training started.") | ||
var start = now() | ||
for epoch in range(num_epochs): | ||
var num_batches: Int = 0 | ||
var epoch_loss: Float32 = 0.0 | ||
for batch in training_loader: | ||
var loss = model.forward(batch.data, batch.labels) | ||
|
||
optim.zero_grad() | ||
model.backward() | ||
optim.step() | ||
|
||
epoch_loss += loss[0] | ||
num_batches += 1 | ||
|
||
print( | ||
"Epoch: [", | ||
epoch + 1, | ||
"/", | ||
num_epochs, | ||
"] \t Avg loss per epoch:", | ||
epoch_loss / num_batches, | ||
) | ||
|
||
print("Training finished: ", (now() - start) / 1e9, "seconds") |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,106 @@ | ||
from time.time import now | ||
|
||
import basalt.nn as nn | ||
from basalt import Tensor, TensorShape | ||
from basalt import Graph, Symbol, OP, dtype | ||
from basalt.utils.datasets import MNIST | ||
from basalt.utils.dataloader import DataLoader | ||
from basalt.autograd.attributes import AttributeVector, Attribute | ||
|
||
|
||
fn create_CNN(batch_size: Int) -> Graph: | ||
var g = Graph() | ||
var x = g.input(TensorShape(batch_size, 1, 28, 28)) | ||
|
||
var x1 = nn.Conv2d(g, x, out_channels=16, kernel_size=5, padding=2) | ||
var x2 = nn.ReLU(g, x1) | ||
var x3 = nn.MaxPool2d(g, x2, kernel_size=2) | ||
var x4 = nn.Conv2d(g, x3, out_channels=32, kernel_size=5, padding=2) | ||
var x5 = nn.ReLU(g, x4) | ||
var x6 = nn.MaxPool2d(g, x5, kernel_size=2) | ||
var x7 = g.op( | ||
OP.RESHAPE, | ||
x6, | ||
attributes=AttributeVector( | ||
Attribute( | ||
"shape", | ||
TensorShape( | ||
x6.shape[0], x6.shape[1] * x6.shape[2] * x6.shape[3] | ||
), | ||
) | ||
), | ||
) | ||
var out = nn.Linear(g, x7, n_outputs=10) | ||
g.out(out) | ||
|
||
var y_true = g.input(TensorShape(batch_size, 10)) | ||
var loss = nn.CrossEntropyLoss(g, out, y_true) | ||
g.loss(loss) | ||
|
||
return g^ | ||
|
||
|
||
fn main(): | ||
alias num_epochs = 20 | ||
alias batch_size = 4 | ||
alias learning_rate = 1e-3 | ||
|
||
alias graph = create_CNN(batch_size) | ||
|
||
var model = nn.Model[graph]() | ||
var optim = nn.optim.Adam[graph]( | ||
Reference(model.parameters), lr=learning_rate | ||
) | ||
|
||
print("Loading data ...") | ||
var train_data: MNIST | ||
try: | ||
train_data = MNIST(file_path="./data/mnist_test_small.csv") | ||
except e: | ||
print("Could not load data") | ||
print(e) | ||
return | ||
|
||
var training_loader = DataLoader( | ||
data=train_data.data, labels=train_data.labels, batch_size=batch_size | ||
) | ||
|
||
print("Training started/") | ||
var start = now() | ||
|
||
for epoch in range(num_epochs): | ||
var num_batches: Int = 0 | ||
var epoch_loss: Float32 = 0.0 | ||
var epoch_start = now() | ||
for batch in training_loader: | ||
var labels_one_hot = Tensor[dtype](batch.labels.dim(0), 10) | ||
for bb in range(batch.labels.dim(0)): | ||
labels_one_hot[int((bb * 10 + batch.labels[bb]))] = 1.0 | ||
|
||
var loss = model.forward(batch.data, labels_one_hot) | ||
|
||
optim.zero_grad() | ||
model.backward() | ||
optim.step() | ||
|
||
epoch_loss += loss[0] | ||
num_batches += 1 | ||
|
||
print( | ||
"Epoch [", | ||
epoch + 1, | ||
"/", | ||
num_epochs, | ||
"],\t Step [", | ||
num_batches, | ||
"/", | ||
train_data.data.dim(0) // batch_size, | ||
"],\t Loss:", | ||
epoch_loss / num_batches, | ||
) | ||
|
||
print("Epoch time: ", (now() - epoch_start) / 1e9, "seconds") | ||
|
||
print("Training finished: ", (now() - start) / 1e9, "seconds") | ||
|
||
model.print_perf_metrics("ms", True) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,67 @@ | ||
from random import rand | ||
from time.time import now | ||
import math | ||
|
||
import basalt.nn as nn | ||
from basalt import Tensor, TensorShape | ||
from basalt import dtype | ||
from basalt import Graph, Symbol, OP | ||
from basalt.utils.tensorutils import fill | ||
|
||
|
||
fn create_simple_nn(batch_size: Int, n_inputs: Int, n_outputs: Int) -> Graph: | ||
var g = Graph() | ||
|
||
var x = g.input(TensorShape(batch_size, n_inputs)) | ||
var y_true = g.input(TensorShape(batch_size, n_outputs)) | ||
|
||
var x1 = nn.Linear(g, x, n_outputs=32) | ||
var x2 = nn.ReLU(g, x1) | ||
var x3 = nn.Linear(g, x2, n_outputs=32) | ||
var x4 = nn.ReLU(g, x3) | ||
var y_pred = nn.Linear(g, x4, n_outputs=n_outputs) | ||
g.out(y_pred) | ||
|
||
var loss = nn.MSELoss(g, y_pred, y_true) | ||
g.loss(loss) | ||
|
||
g.compile() | ||
|
||
return g ^ | ||
|
||
|
||
fn main(): | ||
alias batch_size = 32 | ||
alias n_inputs = 1 | ||
alias n_outputs = 1 | ||
alias learning_rate = 0.01 | ||
|
||
alias epochs = 20000 | ||
|
||
alias graph = create_simple_nn(batch_size, n_inputs, n_outputs) | ||
|
||
var model = nn.Model[graph]() | ||
var optimizer = nn.optim.Adam[graph](Reference(model.parameters), lr=learning_rate) | ||
|
||
var x_data = Tensor[dtype](batch_size, n_inputs) | ||
var y_data = Tensor[dtype](batch_size, n_outputs) | ||
|
||
print("Training started") | ||
var start = now() | ||
for i in range(epochs): | ||
rand[dtype](x_data.data(), x_data.num_elements()) | ||
|
||
for j in range(batch_size): | ||
x_data[j] = x_data[j] * 2 - 1 | ||
y_data[j] = math.sin(x_data[j]) | ||
|
||
var out = model.forward(x_data, y_data) | ||
|
||
if (i + 1) % 1000 == 0: | ||
print("[", i + 1, "/", epochs, "] \tLoss: ", out[0]) | ||
|
||
optimizer.zero_grad() | ||
model.backward() | ||
optimizer.step() | ||
|
||
print("Training finished: ", (now() - start) / 1e9, "seconds") |