Skip to content

Commit

Permalink
SORRY BY THAT! :)
Browse files Browse the repository at this point in the history
  • Loading branch information
Piotr Sowa committed Jun 13, 2021
1 parent 4fda55d commit 079b79e
Showing 1 changed file with 12 additions and 12 deletions.
24 changes: 12 additions & 12 deletions src/network.c
Original file line number Diff line number Diff line change
Expand Up @@ -1007,24 +1007,24 @@ void scale_weights(layer l, float s)
void pull_weights(layer l)
{
if(l.type == CONVOLUTIONAL || l.type == DECONVOLUTIONAL){
opencl_pull_array_map(l.biases_gpu, l.bias_updates, l.n);
opencl_pull_array_map(l.weights_gpu, l.weight_updates, l.nweights);
if(l.scales) opencl_pull_array_map(l.scales_gpu, l.scale_updates, l.n);
cuda_pull_array(l.biases_gpu, l.bias_updates, l.n);
cuda_pull_array(l.weights_gpu, l.weight_updates, l.nweights);
if(l.scales) cuda_pull_array(l.scales_gpu, l.scale_updates, l.n);
} else if(l.type == CONNECTED){
opencl_pull_array_map(l.biases_gpu, l.bias_updates, l.outputs);
opencl_pull_array_map(l.weights_gpu, l.weight_updates, l.outputs*l.inputs);
cuda_pull_array(l.biases_gpu, l.bias_updates, l.outputs);
cuda_pull_array(l.weights_gpu, l.weight_updates, l.outputs*l.inputs);
}
}

void push_weights(layer l)
{
if(l.type == CONVOLUTIONAL || l.type == DECONVOLUTIONAL){
opencl_push_array_map(l.biases_gpu, l.bias_updates, l.n);
opencl_push_array_map(l.weights_gpu, l.weight_updates, l.nweights);
if(l.scales) opencl_push_array_map(l.scales_gpu, l.scale_updates, l.n);
cuda_push_array(l.biases_gpu, l.bias_updates, l.n);
cuda_push_array(l.weights_gpu, l.weight_updates, l.nweights);
if(l.scales) cuda_push_array(l.scales_gpu, l.scale_updates, l.n);
} else if(l.type == CONNECTED){
opencl_push_array_map(l.biases_gpu, l.bias_updates, l.outputs);
opencl_push_array_map(l.weights_gpu, l.weight_updates, l.outputs*l.inputs);
cuda_push_array(l.biases_gpu, l.bias_updates, l.outputs);
cuda_push_array(l.weights_gpu, l.weight_updates, l.outputs*l.inputs);
}
}

Expand Down Expand Up @@ -1119,14 +1119,14 @@ void sync_layer(network **nets, int n, int j)
layer base = net->layers[j];
scale_weights(base, 0);
for (i = 0; i < n; ++i) {
opencl_set_device(nets[i]->gpu_index);
cuda_set_device(nets[i]->gpu_index);
layer l = nets[i]->layers[j];
pull_weights(l);
merge_weights(l);
}
scale_weights(base, 1./n);
for (i = 0; i < n; ++i) {
opencl_set_device(nets[i]->gpu_index);
cuda_set_device(nets[i]->gpu_index);
layer l = nets[i]->layers[j];
push_weights(l);
}
Expand Down

0 comments on commit 079b79e

Please sign in to comment.