Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Refactor: remove legacy component arrays in inference_engine_t #67

Merged
merged 22 commits into from
Jul 19, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
22 commits
Select commit Hold shift + click to select a range
c586324
refac(to_json): replace all legacy arrays
rouson Jul 11, 2023
bcb774a
refac(from_json): rm all refs to legacy arrays
rouson Jul 11, 2023
d5cc319
feat(difference_t): add type
rouson Jul 11, 2023
f9d902a
refac: replace assert_consisten{t,cy}
rouson Jul 11, 2023
760f5e7
refac: rm all remaining refs to legacy arrays!
rouson Jul 11, 2023
800e8b7
WIP: demo JSON I/O errors
rouson Jul 12, 2023
aec3bed
fix(to_json): comma-separate output-layer nodes
rouson Jul 12, 2023
2297d4f
chore: make legacy constructor private
rouson Jul 12, 2023
805d5fe
feat(trainable_engine): inference_engine convertor
rouson Jul 15, 2023
21c4471
chore(fmp): update sourcery dependency version
rouson Jul 15, 2023
889902e
feat(trainable_engine): add metadata & convertor
rouson Jul 15, 2023
b3901c3
feat(example): add train-and-write program
rouson Jul 15, 2023
46f3b79
fix(trainable_engine): ubound for activated output
rouson Jul 15, 2023
9839fad
feat(example): print expected/actual inputs/outputs
rouson Jul 15, 2023
f01142d
fix(to_json): input layer write & better test
rouson Jul 16, 2023
30272ab
feat(example): train longer to match PyTorch
rouson Jul 17, 2023
e15e912
WIP(layer_s): allocate inference_engine components
rouson Jul 18, 2023
b312b87
fix(from_json): define hidden-layer weights/biases
rouson Jul 19, 2023
09ffece
chore(inference_engine): rm legacy constructor
rouson Jul 19, 2023
757a1c0
fix(inference_engine_test):bound operand subscript
rouson Jul 19, 2023
4b6315b
chore(example): remove debugging code
rouson Jul 19, 2023
abd125a
chore(layer_{m,s}): rm unused procedures
rouson Jul 19, 2023
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
92 changes: 92 additions & 0 deletions example/train-and-write.f90
Original file line number Diff line number Diff line change
@@ -0,0 +1,92 @@
! Copyright (c), The Regents of the University of California
! Terms of use are as specified in LICENSE.txt
program train_and_write
!! This program demonstrates how to train a neural network and write it to a JSON file.
use inference_engine_m, only : &
inference_engine_t, trainable_engine_t, rkind, sigmoid_t, mini_batch_t, inputs_t, expected_outputs_t, input_output_pair_t
use sourcery_m, only : string_t, file_t, command_line_t
implicit none

type(string_t) file_name
type(command_line_t) command_line
real(rkind), parameter :: false=0._rkind, true=1._rkind

file_name = string_t(command_line%flag_value("--output-file"))

if (len(file_name%string())==0) then
error stop new_line('a') // new_line('a') // &
'Usage: ./build/run-fpm.sh run --example train-and-write -- --output-file "<file-name>"'
end if

block
type(trainable_engine_t) trainable_engine
type(inference_engine_t) inference_engine
type(file_t) json_file
type(mini_batch_t), allocatable :: mini_batches(:)
type(inputs_t), allocatable :: training_inputs(:,:), tmp(:), inputs(:)
type(expected_outputs_t), allocatable :: training_outputs(:,:), tmp2(:), expected_outputs(:)
real(rkind) t_start, t_end
real(rkind), allocatable :: harvest(:,:,:)
integer, parameter :: num_inputs=2, mini_batch_size = 1, num_iterations=8000000
integer batch, iter, i

allocate(harvest(num_inputs, mini_batch_size, num_iterations))
call random_number(harvest)

! The following temporary copies are required by gfortran bug 100650 and possibly 49324
! See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=100650 and https://gcc.gnu.org/bugzilla/show_bug.cgi?id=49324
tmp = [([(inputs_t(merge(true, false, harvest(:,batch,iter) < 0.5E0)), batch=1, mini_batch_size)], iter=1, num_iterations)]
training_inputs = reshape(tmp, [mini_batch_size, num_iterations])

tmp2 = [([(xor(training_inputs(batch, iter)), batch = 1, mini_batch_size)], iter = 1, num_iterations )]
training_outputs = reshape(tmp2, [mini_batch_size, num_iterations])

mini_batches = [(mini_batch_t(input_output_pair_t(training_inputs(:,iter), training_outputs(:,iter))), iter=1, num_iterations)]
trainable_engine = one_random_hidden_layer()

call cpu_time(t_start)
call trainable_engine%train(mini_batches)
call cpu_time(t_end)

print *,"Training time: ",t_end - t_start

inputs = [inputs_t([true,true]), inputs_t([true,false]), inputs_t([false,true]), inputs_t([false,false])]
print *, "sample inputs: ",("[",inputs(i)%values(),"]", i=1, size(inputs))
expected_outputs = xor(inputs)
print *, "expected outputs: ",(expected_outputs(i)%outputs(), i=1, size(expected_outputs))
associate(outputs => trainable_engine%infer(inputs))
print *, "actual outputs: ",(outputs(i)%outputs(), i=1, size(outputs))
end associate

inference_engine = trainable_engine%to_inference_engine()
json_file = inference_engine%to_json()
call json_file%write_lines(file_name)
end block

contains

elemental function xor(inputs) result(expected_outputs)
type(inputs_t), intent(in) :: inputs
type(expected_outputs_t) expected_outputs
associate(sum_inputs => sum(inputs%values()))
expected_outputs = expected_outputs_t([merge(true, false, sum_inputs > 0.99 .and. sum_inputs < 1.01)])
end associate
end function

function one_random_hidden_layer() result(trainable_engine)
type(trainable_engine_t) trainable_engine
integer, parameter :: inputs = 2, outputs = 1, hidden = 2 ! number of neurons in input, output, and hidden layers
integer, parameter :: n(*) = [inputs, hidden, outputs] ! neurons per layer
integer, parameter :: n_max = maxval(n), layers=size(n) ! max layer width, number of layers
real(rkind) w(n_max, n_max, layers-1), b(n_max, n_max)

call random_number(b)
call random_number(w)

trainable_engine = trainable_engine_t( &
nodes = n, weights = w, biases = b, differentiable_activation_strategy = sigmoid_t(), &
metadata = [string_t("1 hide|2 wide"), string_t("D. Rouson"), string_t("2023-06-30"), string_t("sigmoid"), string_t("false")]&
)
end function

end program
23 changes: 15 additions & 8 deletions example/write-read-infer.f90
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,19 @@ program write_read_infer

contains

function single_hidden_layer_xor_network() result(inference_engine)
type(inference_engine_t) inference_engine
integer, parameter :: nodes_per_layer(*) = [2, 3, 1]
integer, parameter :: max_n = maxval(nodes_per_layer), layers = size(nodes_per_layer)

inference_engine = inference_engine_t( &
metadata = [string_t("XOR"), string_t("Damian Rouson"), string_t("2023-07-02"), string_t("step"), string_t("false")], &
weights = reshape([real(rkind):: [1,1,0, 0,1,1, 0,0,0], [1,0,0, -2,0,0, 1,0,0]], [max_n, max_n, layers-1]), &
biases = reshape([[0.,-1.99,0.], [0., 0., 0.]], [max_n, layers-1]), &
nodes = nodes_per_layer &
)
end function

subroutine write_read_query_infer(output_file_name)
type(string_t), intent(in) :: output_file_name
type(string_t) activation_name
Expand All @@ -42,14 +55,8 @@ subroutine write_read_query_infer(output_file_name)
real(rkind), parameter :: false = 0._rkind, true = 1._rkind

print *, "Constructing an inference_engine_t neural-network object from scratch."
xor_network = inference_engine_t( &
metadata = [string_t("XOR"), string_t("Damian Rouson"), string_t("2023-02-18"), string_t("step"), string_t("false")], &
input_weights = real(reshape([1,0,1,1,0,1], [num_inputs, num_neurons]), rkind), &
hidden_weights = real(identity, rkind), &
output_weights = real(reshape([1,-2,1], [num_outputs, num_neurons]), rkind), &
biases = reshape([real(rkind):: 0.,-1.99,0., 0.,0.,0.], [num_neurons, num_hidden_layers]), &
output_biases = [real(rkind):: 0.] &
)
xor_network = single_hidden_layer_xor_network()

print *, "Converting an inference_engine_t object to a file_t object."
json_output_file = xor_network%to_json()

Expand Down
2 changes: 1 addition & 1 deletion fpm.toml
Original file line number Diff line number Diff line change
Expand Up @@ -6,5 +6,5 @@ maintainer = "[email protected]"

[dependencies]
assert = {git = "https://github.com/sourceryinstitute/assert", tag = "1.4.0"}
sourcery = {git = "https://github.com/sourceryinstitute/sourcery", tag = "3.6.0"}
sourcery = {git = "https://github.com/sourceryinstitute/sourcery", tag = "3.8.2"}
netcdf-interfaces = {git = "https://github.com/rouson/netcdf-interfaces.git", branch = "implicit-interfaces"}
33 changes: 12 additions & 21 deletions src/inference_engine/inference_engine_m_.f90
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@ module inference_engine_m_

private
public :: inference_engine_t
public :: difference_t

character(len=*), parameter :: key(*) = [character(len=len("usingSkipConnections")) :: &
"modelName", "modelAuthor", "compilationDate", "activationFunction", "usingSkipConnections"]
Expand All @@ -21,30 +22,30 @@ module inference_engine_m_
!! Encapsulate the minimal information needed to perform inference
private
type(string_t) metadata_(size(key))
real(rkind), allocatable :: weights_(:,:,:), biases__(:,:)
real(rkind), allocatable :: weights_(:,:,:), biases_(:,:)
integer, allocatable :: nodes_(:)
class(activation_strategy_t), allocatable :: activation_strategy_ ! Strategy Pattern facilitates elemental activation

! TODO: rm these legacy components
real(rkind), allocatable :: input_weights_(:,:) ! weights applied to go from the inputs to first hidden layer
real(rkind), allocatable :: hidden_weights_(:,:,:) ! weights applied to go from one hidden layer to the next
real(rkind), allocatable :: output_weights_(:,:) ! weights applied to go from the final hidden layer to the outputs
real(rkind), allocatable :: biases_(:,:) ! neuronal offsets for each hidden layer
real(rkind), allocatable :: output_biases_(:) ! neuronal offsets applied to outputs
contains
procedure :: infer
procedure :: to_json
procedure :: num_inputs
procedure :: num_outputs
procedure :: nodes_per_layer
procedure :: norm
procedure :: assert_conformable_with
procedure :: skip
procedure, private :: subtract
generic :: operator(-) => subtract
procedure :: activation_function_name
end type

type difference_t
private
real(rkind), allocatable :: weights_difference_(:,:,:), biases_difference_(:,:)
integer, allocatable :: nodes_difference_(:)
contains
procedure :: norm
end type

interface inference_engine_t

pure module function construct_from_padded_arrays(metadata, weights, biases, nodes) result(inference_engine)
Expand All @@ -55,16 +56,6 @@ pure module function construct_from_padded_arrays(metadata, weights, biases, nod
type(inference_engine_t) inference_engine
end function

pure module function construct_from_legacy_arrays( &
metadata, input_weights, hidden_weights, output_weights, biases, output_biases &
) result(inference_engine)
implicit none
type(string_t), intent(in) :: metadata(:)
real(rkind), intent(in), dimension(:,:) :: input_weights, output_weights, biases
real(rkind), intent(in) :: hidden_weights(:,:,:), output_biases(:)
type(inference_engine_t) inference_engine
end function

impure elemental module function construct_from_json(file_) result(inference_engine)
implicit none
type(file_t), intent(in) :: file_
Expand All @@ -83,15 +74,15 @@ impure elemental module function to_json(self) result(json_file)

elemental module function norm(self) result(norm_of_self)
implicit none
class(inference_engine_t), intent(in) :: self
class(difference_t), intent(in) :: self
real(rkind) norm_of_self
end function

elemental module function subtract(self, rhs) result(difference)
implicit none
class(inference_engine_t), intent(in) :: self
type(inference_engine_t), intent(in) :: rhs
type(inference_engine_t) difference
type(difference_t) difference
end function

elemental module subroutine assert_conformable_with(self, inference_engine)
Expand Down
Loading