-
Notifications
You must be signed in to change notification settings - Fork 88
/
Copy pathcommon.py
41 lines (29 loc) · 1.05 KB
/
common.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
# Copyright 2019-present NAVER Corp.
# CC BY-NC-SA 3.0
# Available only for non-commercial use
import os, pdb#, shutil
import numpy as np
import torch
def mkdir_for(file_path):
os.makedirs(os.path.split(file_path)[0], exist_ok=True)
def model_size(model):
''' Computes the number of parameters of the model
'''
size = 0
for weights in model.state_dict().values():
size += np.prod(weights.shape)
return size
def torch_set_gpu(gpus):
if type(gpus) is int:
gpus = [gpus]
cuda = all(gpu>=0 for gpu in gpus)
if cuda:
os.environ['CUDA_VISIBLE_DEVICES'] = ','.join([str(gpu) for gpu in gpus])
assert cuda and torch.cuda.is_available(), "%s has GPUs %s unavailable" % (
os.environ['HOSTNAME'],os.environ['CUDA_VISIBLE_DEVICES'])
torch.backends.cudnn.benchmark = True # speed-up cudnn
torch.backends.cudnn.fastest = True # even more speed-up?
print( 'Launching on GPUs ' + os.environ['CUDA_VISIBLE_DEVICES'] )
else:
print( 'Launching on CPU' )
return cuda