-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathdefault.conf
44 lines (39 loc) · 1 KB
/
default.conf
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
# saving
trained_model_path = trained_models
save_trained = True
n_saved = 2
load_name = capsnet__3.pth
load_model = False
save_best = True
score_file_name = best_acc
model_name = capsnet
exp_name = test_experiment
# batch related
epochs = 100
batch_size = 128
drop_last = True
shuffle = True
seed = 42
debug = False
valid_size = 0.1
# logging
print_time = True
log_file = False
log_file_name = None
use_visdom = True
start_visdom = True
# training
early_stop = False
cudnn_benchmark = True
learning_rate = 0.001 # source code uses a exponential_decay, not an option in torch's Adam https://discuss.pytorch.org/t/adaptive-learning-rate/320/9
dataset = mnist
# margin loss
use_recon = True # use reconstruction in the loss, author code suggest no reconstruction for cifar10
alpha = 0.0005 # recon scalar
m_plus = 0.9 # presences factor
m_min = 0.1 # absence factor
# network
stdev_W = 0.1
architecture = 32,8;10,16
routing_iters = 3
bias_routing = False # author code has bias True