-
Notifications
You must be signed in to change notification settings - Fork 47
/
Copy pathutils.lua
217 lines (187 loc) · 5.56 KB
/
utils.lua
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
require 'torch'
require 'nn'
local cjson = require 'cjson'
local M = {}
-- Parse a string of comma-separated numbers
-- For example convert "1.0,3.14" to {1.0, 3.14}
function M.parse_num_list(s)
local nums = {}
for _, ss in ipairs(s:split(',')) do
table.insert(nums, tonumber(ss))
end
return nums
end
-- Parse a layer string and associated weights string.
-- The layers string is a string of comma-separated layer strings, and the
-- weight string contains comma-separated numbers. If the weights string
-- contains only a single number it is duplicated to be the same length as the
-- layers.
function M.parse_layers(layers_string, weights_string)
local layers = layers_string:split(',')
local weights = M.parse_num_list(weights_string)
if #weights == 1 and #layers > 1 then
-- Duplicate the same weight for all layers
local w = weights[1]
weights = {}
for i = 1, #layers do
table.insert(weights, w)
end
elseif #weights ~= #layers then
local msg = 'size mismatch between layers "%s" and weights "%s"'
error(string.format(msg, layers_string, weights_string))
end
return layers, weights
end
function M.setup_gpu(gpu, backend, use_cudnn)
local dtype = 'torch.FloatTensor'
if gpu >= 0 then
if backend == 'cuda' then
require 'cutorch'
require 'cunn'
cutorch.setDevice(gpu + 1)
dtype = 'torch.CudaTensor'
if use_cudnn then
require 'cudnn'
cudnn.benchmark = true
end
elseif backend == 'opencl' then
require 'cltorch'
require 'clnn'
cltorch.setDevice(gpu + 1)
dtype = torch.Tensor():cl():type()
use_cudnn = false
end
else
use_cudnn = false
end
return dtype, use_cudnn
end
function M.file_exists(name)
local f=io.open(name,"r")
if f~=nil then io.close(f) return true else return false end
end
function M.wait_for_file(filePath)
if not M.file_exists(filePath) then
print('Waiting for file \"' .. filePath .. '\"')
while not M.file_exists(filePath) do os.execute("sleep 1") end
os.execute("sleep 1")
end
end
function M.clear_gradients(m)
if torch.isTypeOf(m, nn.Container) then
m:applyToModules(M.clear_gradients)
end
if m.weight and m.gradWeight then
m.gradWeight = m.gradWeight.new()
end
if m.bias and m.gradBias then
m.gradBias = m.gradBias.new()
end
end
function M.restore_gradients(m)
if torch.isTypeOf(m, nn.Container) then
m:applyToModules(M.restore_gradients)
end
if m.weight and m.gradWeight then
m.gradWeight = m.gradWeight.new(#m.weight):zero()
end
if m.bias and m.gradBias then
m.gradBias = m.gradBias.new(#m.bias):zero()
end
end
function M.read_json(path)
local file = io.open(path, 'r')
local text = file:read()
file:close()
local info = cjson.decode(text)
return info
end
function M.write_json(path, j)
cjson.encode_sparse_array(true, 2, 10)
local text = cjson.encode(j)
local file = io.open(path, 'w')
file:write(text)
file:close()
end
local IMAGE_EXTS = {'jpg', 'jpeg', 'png', 'ppm', 'pgm'}
function M.is_image_file(filename)
-- Hidden file are not images
if string.sub(filename, 1, 1) == '.' then
return false
end
-- Check against a list of known image extensions
local ext = string.lower(paths.extname(filename))
for _, image_ext in ipairs(IMAGE_EXTS) do
if ext == image_ext then
return true
end
end
return false
end
function M.warp_image(img, map, dtype)
if dtype == 'torch.CudaTensor' then
local warpNet = nn.BilinearSamplerBDHW():cuda()
return warpNet:forward( { img, map } )
else
--CPU warping
return image.warp(img:float(), map:float(), 'bilinear', true, 'pad', 0):type(dtype)
end
end
function M.median_filter(img, r)
local u = img:unfold(2, r, 1):contiguous()
u = u:unfold(3, r, 1):contiguous()
local HH, WW = u:size(2), u:size(3)
local dtype = u:type()
-- Median is not defined for CudaTensors, cast to float and back
local med = u:view(3, HH, WW, r * r):float():median():type(dtype)
return med[{{}, {}, {}, 1}]
end
function M.min_filter(batch, r, dtype)
local net = nn.Sequential()
:add(nn.MulConstant(-1))
:add(nn.AddConstant(1))
:add(nn.SpatialMaxPooling(r, r, 1, 1, math.floor(r/2), math.floor(r/2)))
:add(nn.MulConstant(-1))
:add(nn.AddConstant(1)):type(dtype)
return net:forward(batch)
end
function M.make_flow_magnitude_mask(flow, max_magn)
if flow:size():size() == 4 then
return torch.add(torch.cmax(torch.div(torch.sqrt(torch.add(torch.pow(flow[ { {}, {1}, {}, {} } ], 2), torch.pow(flow[ { {}, {2}, {}, {} } ], 2))), -1 * max_magn), -1), 1)
else
return torch.add(torch.cmax(torch.div(torch.sqrt(torch.add(torch.pow(flow[ { {1}, {}, {} } ], 2), torch.pow(flow[ { {2}, {}, {} } ], 2))), -1 * max_magn), -1), 1)
end
end
function M.make_gradient_mask_h_inc(c, h, w)
local i = 0
local mask_h_inc = torch.Tensor(1, h, 1):apply(function(x)
i = i + 1
return i
end):div(h+1):expand(c, h, w)
return mask_h_inc
end
function M.make_gradient_mask_h_dec(c, h, w)
local i = h + 1
local mask_h_dec = torch.Tensor(1, h, 1):apply(function(x)
i = i - 1
return i
end):div(h+1):expand(c, h, w)
return mask_h_dec
end
function M.make_gradient_mask_w_inc(c, h, w)
local i = 0
local mask_w_inc = torch.Tensor(1, 1, w):apply(function(x)
i = i + 1
return i
end):div(w+1):expand(c, h, w):float()
return mask_w_inc
end
function M.make_gradient_mask_w_dec(c, h, w)
local i = w + 1
local mask_w_dec = torch.Tensor(1, 1, w):apply(function(x)
i = i - 1
return i
end):div(w+1):expand(c, h, w)
return mask_w_dec
end
return M