diff --git a/convolutional_neural_network/convolutional_neural_network.py b/convolutional_neural_network/convolutional_neural_network.py index f99b07a..35d1b1f 100644 --- a/convolutional_neural_network/convolutional_neural_network.py +++ b/convolutional_neural_network/convolutional_neural_network.py @@ -157,4 +157,4 @@ def test(): if args.test: test() else: - main() \ No newline at end of file + main() diff --git a/linear_regression/linear_regression.py b/linear_regression/linear_regression.py index 86a5860..28fe6ac 100644 --- a/linear_regression/linear_regression.py +++ b/linear_regression/linear_regression.py @@ -7,14 +7,15 @@ sys.path.append("..") import numpy as np from sklearn.model_selection import train_test_split -from optimizations_algorithms.optimizers import SGD class LinearRegression: - def __init__(self, optimizer, epochs=1000, lambda_=0.1): + def __init__(self, alpha, epochs=1000, lambda_=0.1): + self.alpha = alpha self.epochs = epochs self.lambda_ = lambda_ - self.optimizer = optimizer + self.w = None + self.b = None def _hypothesis(self, X): return np.dot(X, self.w) + self.b @@ -42,8 +43,8 @@ def _train(self, X_train, y_train): break def _update_params(self, w_grad, b_grad): - self.w -= self.optimizer.minimize(w_grad) - self.b -= self.optimizer.minimize(b_grad) + self.w -= self.alpha*w_grad + self.b -= self.alpha*b_grad def train(self, X_train, y_train): self.w = np.random.normal(size=(X_train.shape[1], 1)) @@ -79,8 +80,7 @@ def main(): alpha = 0.01 epochs = 500 lambda_ = 0 - optimizer = SGD(alpha=alpha) - linear_regression = LinearRegression(optimizer, epochs, lambda_) + linear_regression = LinearRegression(alpha, epochs, lambda_) linear_regression.train(X_train, y_train) (X_test, x_mean, x_std), (y_test, y_mean, y_std) = standardize_regression(X_test, y_test) diff --git a/logistic_regression/logistic_regression.py b/logistic_regression/logistic_regression.py index a9a2d7b..17569b6 100644 --- a/logistic_regression/logistic_regression.py +++ b/logistic_regression/logistic_regression.py @@ -10,23 +10,22 @@ from sklearn.model_selection import train_test_split import sys sys.path.append("..") -from optimizations_algorithms.optimizers import SGD class LogisticRegression: - def __init__(self, epochs, optimizer, batch_size): + def __init__(self, epochs, learning_rate, batch_size): """ Constructor for logistic regression. Parameter --------- epochs: number of epoch to train logistic regression. - optimizer: optimizer algorithm to update weights. + learning_rate: learning rate to use optimize parameters. batch_size: number of batch size using each iteration. """ self.epochs = epochs - self.optimizer = optimizer + self.learning_rate = learning_rate self.batch_size = batch_size def _sigmoid(self, X): @@ -52,6 +51,8 @@ def _cross_entropy_loss(self, y_true, y_pred): Compute cross entropy loss. """ m = y_true.shape[0] + epsilon = 1e-20 + y_pred[y_pred == 0] = epsilon return -np.sum(y_true*np.log(y_pred) + (1-y_true)*np.log(1 - y_pred))/m def _gradient(self, X, y_true, y_pred): @@ -74,7 +75,7 @@ def _train(self, X_train, y_train): loss = self._cross_entropy_loss(y_train[it:it+self.batch_size], y_hat) batch_loss += loss grad = self._gradient(X_train[it:it+self.batch_size], y_train[it:it+self.batch_size], y_hat) - self.w -= self.optimizer.minimize(grad) + self.w -= self.learning_rate*grad it += self.batch_size num_batches += 1 print("Loss at epoch %s: %f" % (e + 1 , batch_loss / num_batches)) @@ -131,8 +132,7 @@ def main(): epochs = 20 learning_rate = 0.1 batch_size = 64 - optimizer = SGD(alpha=learning_rate) - logistic = LogisticRegression(epochs, optimizer, batch_size) + logistic = LogisticRegression(epochs, learning_rate, batch_size) logistic.train(X_train, y_train) pred = logistic.predict(X_test) y_test = y_test.reshape((-1, 1)) diff --git a/neural_network/neural_network.py b/neural_network/neural_network.py index c1d5920..e3f6149 100644 --- a/neural_network/neural_network.py +++ b/neural_network/neural_network.py @@ -27,7 +27,7 @@ def __init__(self, optimizer:object, layers:list, loss_func:object=CrossEntropy( self.optimizer = optimizer self.loss_func = loss_func self.layers = layers - + def _forward(self, train_X, prediction=False): """ NN forward propagation level. @@ -170,4 +170,4 @@ def main(): print(confusion_matrix(labels_test, pred)) if __name__ == "__main__": - main() \ No newline at end of file + main() diff --git a/neural_network/test b/neural_network/test new file mode 100644 index 0000000..81bf396 --- /dev/null +++ b/neural_network/test @@ -0,0 +1 @@ +ab