Skip to content
This repository has been archived by the owner on Mar 2, 2025. It is now read-only.

Commit

Permalink
init
Browse files Browse the repository at this point in the history
Signed-off-by: benny-nottonson <[email protected]>
  • Loading branch information
Benny-Nottonson committed May 24, 2024
0 parents commit 3610e85
Show file tree
Hide file tree
Showing 8 changed files with 854 additions and 0 deletions.
Binary file added basalt.mojopkg
Binary file not shown.
507 changes: 507 additions & 0 deletions data/housing.csv

Large diffs are not rendered by default.

101 changes: 101 additions & 0 deletions data/mnist_test_small.csv

Large diffs are not rendered by default.

Binary file added data/mnist_torch.onnx
Binary file not shown.
Binary file added data/yolov8n.onnx
Binary file not shown.
73 changes: 73 additions & 0 deletions housing.mojo
Original file line number Diff line number Diff line change
@@ -0,0 +1,73 @@
from time.time import now

import basalt.nn as nn
from basalt import Tensor, TensorShape
from basalt import Graph, Symbol, OP
from basalt.utils.datasets import BostonHousing
from basalt.utils.dataloader import DataLoader


fn linear_regression(batch_size: Int, n_inputs: Int, n_outputs: Int) -> Graph:
var g = Graph()

var x = g.input(TensorShape(batch_size, n_inputs))
var y_true = g.input(TensorShape(batch_size, n_outputs))

var y_pred = nn.Linear(g, x, n_outputs)
g.out(y_pred)

var loss = nn.MSELoss(g, y_pred, y_true)
g.loss(loss)

return g^


fn main():
alias batch_size = 32
alias num_epochs = 200
alias learning_rate = 0.02

alias graph = linear_regression(batch_size, 13, 1)

var model = nn.Model[graph]()
var optim = nn.optim.Adam[graph](
Reference(model.parameters), lr=learning_rate
)

print("Loading data...")
var train_data: BostonHousing
try:
train_data = BostonHousing(file_path="./data/housing.csv")
except:
print("Could not load data")
return

var training_loader = DataLoader(
data=train_data.data, labels=train_data.labels, batch_size=batch_size
)

print("Training started.")
var start = now()
for epoch in range(num_epochs):
var num_batches: Int = 0
var epoch_loss: Float32 = 0.0
for batch in training_loader:
var loss = model.forward(batch.data, batch.labels)

optim.zero_grad()
model.backward()
optim.step()

epoch_loss += loss[0]
num_batches += 1

print(
"Epoch: [",
epoch + 1,
"/",
num_epochs,
"] \t Avg loss per epoch:",
epoch_loss / num_batches,
)

print("Training finished: ", (now() - start) / 1e9, "seconds")
106 changes: 106 additions & 0 deletions mnist.mojo
Original file line number Diff line number Diff line change
@@ -0,0 +1,106 @@
from time.time import now

import basalt.nn as nn
from basalt import Tensor, TensorShape
from basalt import Graph, Symbol, OP, dtype
from basalt.utils.datasets import MNIST
from basalt.utils.dataloader import DataLoader
from basalt.autograd.attributes import AttributeVector, Attribute


fn create_CNN(batch_size: Int) -> Graph:
var g = Graph()
var x = g.input(TensorShape(batch_size, 1, 28, 28))

var x1 = nn.Conv2d(g, x, out_channels=16, kernel_size=5, padding=2)
var x2 = nn.ReLU(g, x1)
var x3 = nn.MaxPool2d(g, x2, kernel_size=2)
var x4 = nn.Conv2d(g, x3, out_channels=32, kernel_size=5, padding=2)
var x5 = nn.ReLU(g, x4)
var x6 = nn.MaxPool2d(g, x5, kernel_size=2)
var x7 = g.op(
OP.RESHAPE,
x6,
attributes=AttributeVector(
Attribute(
"shape",
TensorShape(
x6.shape[0], x6.shape[1] * x6.shape[2] * x6.shape[3]
),
)
),
)
var out = nn.Linear(g, x7, n_outputs=10)
g.out(out)

var y_true = g.input(TensorShape(batch_size, 10))
var loss = nn.CrossEntropyLoss(g, out, y_true)
g.loss(loss)

return g^


fn main():
alias num_epochs = 20
alias batch_size = 4
alias learning_rate = 1e-3

alias graph = create_CNN(batch_size)

var model = nn.Model[graph]()
var optim = nn.optim.Adam[graph](
Reference(model.parameters), lr=learning_rate
)

print("Loading data ...")
var train_data: MNIST
try:
train_data = MNIST(file_path="./data/mnist_test_small.csv")
except e:
print("Could not load data")
print(e)
return

var training_loader = DataLoader(
data=train_data.data, labels=train_data.labels, batch_size=batch_size
)

print("Training started/")
var start = now()

for epoch in range(num_epochs):
var num_batches: Int = 0
var epoch_loss: Float32 = 0.0
var epoch_start = now()
for batch in training_loader:
var labels_one_hot = Tensor[dtype](batch.labels.dim(0), 10)
for bb in range(batch.labels.dim(0)):
labels_one_hot[int((bb * 10 + batch.labels[bb]))] = 1.0

var loss = model.forward(batch.data, labels_one_hot)

optim.zero_grad()
model.backward()
optim.step()

epoch_loss += loss[0]
num_batches += 1

print(
"Epoch [",
epoch + 1,
"/",
num_epochs,
"],\t Step [",
num_batches,
"/",
train_data.data.dim(0) // batch_size,
"],\t Loss:",
epoch_loss / num_batches,
)

print("Epoch time: ", (now() - epoch_start) / 1e9, "seconds")

print("Training finished: ", (now() - start) / 1e9, "seconds")

model.print_perf_metrics("ms", True)
67 changes: 67 additions & 0 deletions sin_estimate.mojo
Original file line number Diff line number Diff line change
@@ -0,0 +1,67 @@
from random import rand
from time.time import now
import math

import basalt.nn as nn
from basalt import Tensor, TensorShape
from basalt import dtype
from basalt import Graph, Symbol, OP
from basalt.utils.tensorutils import fill


fn create_simple_nn(batch_size: Int, n_inputs: Int, n_outputs: Int) -> Graph:
var g = Graph()

var x = g.input(TensorShape(batch_size, n_inputs))
var y_true = g.input(TensorShape(batch_size, n_outputs))

var x1 = nn.Linear(g, x, n_outputs=32)
var x2 = nn.ReLU(g, x1)
var x3 = nn.Linear(g, x2, n_outputs=32)
var x4 = nn.ReLU(g, x3)
var y_pred = nn.Linear(g, x4, n_outputs=n_outputs)
g.out(y_pred)

var loss = nn.MSELoss(g, y_pred, y_true)
g.loss(loss)

g.compile()

return g ^


fn main():
alias batch_size = 32
alias n_inputs = 1
alias n_outputs = 1
alias learning_rate = 0.01

alias epochs = 20000

alias graph = create_simple_nn(batch_size, n_inputs, n_outputs)

var model = nn.Model[graph]()
var optimizer = nn.optim.Adam[graph](Reference(model.parameters), lr=learning_rate)

var x_data = Tensor[dtype](batch_size, n_inputs)
var y_data = Tensor[dtype](batch_size, n_outputs)

print("Training started")
var start = now()
for i in range(epochs):
rand[dtype](x_data.data(), x_data.num_elements())

for j in range(batch_size):
x_data[j] = x_data[j] * 2 - 1
y_data[j] = math.sin(x_data[j])

var out = model.forward(x_data, y_data)

if (i + 1) % 1000 == 0:
print("[", i + 1, "/", epochs, "] \tLoss: ", out[0])

optimizer.zero_grad()
model.backward()
optimizer.step()

print("Training finished: ", (now() - start) / 1e9, "seconds")

0 comments on commit 3610e85

Please sign in to comment.