Skip to content

Commit

Permalink
Add C++ code
Browse files Browse the repository at this point in the history
  • Loading branch information
giangtranml committed Jul 26, 2022
1 parent c7fb343 commit 23aa1e1
Show file tree
Hide file tree
Showing 13 changed files with 2,508 additions and 2,569 deletions.
1,201 changes: 1,201 additions & 0 deletions attention_mechanism/.ipynb_checkpoints/Attention_Mechanism-checkpoint.ipynb

Large diffs are not rendered by default.

This file was deleted.

This file was deleted.

2,343 changes: 1,169 additions & 1,174 deletions attention_mechanism/Attention_Mechanism.ipynb

Large diffs are not rendered by default.

11 changes: 11 additions & 0 deletions c++/main.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
//
// Created by Tran Giang on 7/25/22.
//
#include "vector.h"
#include <iostream>

int main(){
Vector<int> v (5);
std::cout << v;
return 0;
}
7 changes: 7 additions & 0 deletions c++/matrix.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
//
// Created by Tran Giang on 7/25/22.
//
#include "matrix.h"

template<typename T>
Matrix<T>::Matrix(int rows, int columns):rows(rows), columns(columns) {}
18 changes: 18 additions & 0 deletions c++/matrix.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
//
// Created by Tran Giang on 7/25/22.
//

#ifndef ML_FROM_SCRATCH_MATRIX_H
#define ML_FROM_SCRATCH_MATRIX_H

template<typename T>
class Matrix{
private:
int rows;
int columns;
T* elements;
public:
Matrix(int rows, int columns);
};

#endif //ML_FROM_SCRATCH_MATRIX_H
65 changes: 65 additions & 0 deletions c++/vector.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,65 @@
#include "vector.h"
#include <stdlib.h>

template<typename T>
Vector<T>::Vector(int size): size(size) {
elements = new T[size];
for (int i = 0; i < size; ++i) {
elements[i] = rand();
}
}

template<typename T>
Vector<T>::~Vector() {
delete elements;
elements = nullptr;
}

template<typename T>
T Vector<T>::dot(Vector<T> &other) {
T result;
for (int i = 0; i < other.size; i++) {
result += elements[i] + other.elements[i];
}
return result;
}

template<typename T>
const T &Vector<T>::operator[](int i) const {
return elements[i];
}

template<typename T>
template<typename Scalar>
Vector<T> &Vector<T>::operator+(const Scalar &i) {
for (T& element: elements) {
element = element + i;
}
return *this;
}

template<typename T>
template<typename Scalar>
Vector<T> &Vector<T>::operator-(const Scalar &i) {
return *this;
}

template<typename T>
template<typename Scalar>
Vector<T> &Vector<T>::operator*(const Scalar &i) {
return *this;
}

template<typename T>
template<typename Scalar>
Vector<T> &Vector<T>::operator/(const Scalar &i) {
return *this;
}

template<typename T>
std::ostream &Vector<T>::operator<<(std::ostream &outs) {
for (auto elem: elements) {
outs << elem << std::endl;
}
return outs;
}
34 changes: 34 additions & 0 deletions c++/vector.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,34 @@
#include <iostream>

template<typename T>
class Vector
{
private:
int size;
T* elements;
public:
Vector(int size);

~Vector();

T dot(Vector<T>& other);

const T& operator[](int i) const;

template<typename Scalar>
Vector<T>& operator+(const Scalar& i);

template<typename Scalar>
Vector<T>& operator-(const Scalar& i);

template<typename Scalar>
Vector<T>& operator*(const Scalar& i);

template<typename Scalar>
Vector<T>& operator/(const Scalar& i);

std::ostream& operator<<(std::ostream& outs);
};



6 changes: 2 additions & 4 deletions neural_network/neural_network.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ def __init__(self, optimizer:object, layers:list, loss_func:object=CrossEntropy(
self.loss_func = loss_func
self.layers = layers

def _forward(self, train_X, prediction=False):
def _forward(self, train_X, prediction=False) -> np.ndarray:
"""
NN forward propagation level.
Expand Down Expand Up @@ -130,18 +130,16 @@ def main():
batch_size = 64
learning_rate = 0.01

optimizer = Adam(learning_rate)
optimizer = SGD(learning_rate)
loss_func = CrossEntropy()
archs = [
InputLayer(),

FCLayer(num_neurons=100, weight_init="he_normal"),
ActivationLayer(activation="relu"),
DropoutLayer(keep_prob=0.8),

FCLayer(num_neurons=125, weight_init="he_normal"),
ActivationLayer(activation="relu"),
DropoutLayer(keep_prob=0.8),

FCLayer(num_neurons=50, weight_init="he_normal"),
BatchNormLayer(),
Expand Down
Binary file added neural_network/nn_weights.pkl
Binary file not shown.
135 changes: 0 additions & 135 deletions softmax_regression/visualize.py

This file was deleted.

2 changes: 1 addition & 1 deletion transformer/Transformer_Pytorch.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -636,7 +636,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.7.3"
"version": "3.8.5"
}
},
"nbformat": 4,
Expand Down

0 comments on commit 23aa1e1

Please sign in to comment.