Skip to content

Commit

Permalink
add eltwise and sigmoid
Browse files Browse the repository at this point in the history
  • Loading branch information
longcw committed Aug 29, 2017
1 parent 5f415a5 commit 8c7e86e
Show file tree
Hide file tree
Showing 3 changed files with 29 additions and 6 deletions.
3 changes: 2 additions & 1 deletion .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -5,4 +5,5 @@
*.caffemodel
*.prototxt
*.dot
hw/
hw_seg
pva
2 changes: 2 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,8 @@ Add support for
+ Dilated Convolution Layer
+ Concat Layer
+ Upsampling (converted to Deconvolution with bilinear initialization)
+ Eltwise Product
+ Sigmoid Layer

```python
import torch
Expand Down
30 changes: 25 additions & 5 deletions pytorch2caffe.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,10 @@
'ConcatBackward': 'Concat',
'UpsamplingNearest2d': 'Deconvolution',
'UpsamplingBilinear2d': 'Deconvolution',
'SigmoidBackward': 'Sigmoid'}
'SigmoidBackward': 'Sigmoid',
'LeakyReLUBackward': 'ReLU',
'NegateBackward': 'Power',
'MulBackward': 'Eltwise'}

layer_id = 0

Expand Down Expand Up @@ -85,9 +88,10 @@ def convert_layer(func):
elif parent_type == 'UpsamplingNearest2d':
print('UpsamplingNearest2d')

convert_layer(output_var.grad_fn)
print('save caffemodel to %s' % caffemodel)
net.save(caffemodel)
if caffemodel is not None:
convert_layer(output_var.grad_fn)
print('save caffemodel to %s' % caffemodel)
net.save(caffemodel)


def save_conv2caffe(weights, biases, conv_param):
Expand Down Expand Up @@ -162,7 +166,23 @@ def add_layer(func):
layer['bottom'] = ['data']
layer['top'] = parent_top

if parent_type == 'UpsamplingNearest2d':
if parent_type == 'MulBackward':
eltwise_param = {
'operation': 'PROD',
}
layer['eltwise_param'] = eltwise_param
elif parent_type == 'NegateBackward':
power_param = {
'power': 1,
'scale': -1.,
'shift': 0
}
layer['power_param'] = power_param
elif parent_type == 'LeakyReLUBackward':
negative_slope = func.additional_args[0]
layer['relu_param'] = {'negative_slope': negative_slope}

elif parent_type == 'UpsamplingNearest2d':
conv_param = OrderedDict()
factor = func.scale_factor
conv_param['num_output'] = func.saved_tensors[0].size(1)
Expand Down

0 comments on commit 8c7e86e

Please sign in to comment.