-
Notifications
You must be signed in to change notification settings - Fork 18
/
Copy pathconfig.py
55 lines (49 loc) · 1.23 KB
/
config.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
"""Config for ATDA."""
# params for dataset and data loader
data_root = "data"
source_dataset = "MNIST"
target_dataset = "MNIST-M"
dataset_mean_value = 0.5
dataset_std_value = 0.5
dataset_mean = (dataset_mean_value, dataset_mean_value, dataset_mean_value)
dataset_std = (dataset_std_value, dataset_std_value, dataset_std_value)
batch_size = 128
image_size = 28
num_classes = 10
# params for encoder (F)
model_trained = {
"pretrain": True,
"domain_adapt": True
}
model_restore = {
# "F": "snapshots/pretrain-F-final.pt",
# "F_1": "snapshots/pretrain-F_1-final.pt",
# "F_2": "snapshots/pretrain-F_2-final.pt",
# "F_t": "snapshots/pretrain-F_t-final.pt"
"F": "snapshots/adapt-F-80.pt",
"F_1": "snapshots/adapt-F_1-80.pt",
"F_2": "snapshots/adapt-F_2-80.pt",
"F_t": "snapshots/adapt-F_t-80.pt"
}
# params for classifier(F1, F2, Ft)
dropout_keep = {
"F_1": 0.5,
"F_2": 0.5,
"F_t": 0.2,
}
# params for training network
num_gpu = 1
num_epochs_pre = 5
num_epochs_adapt = 5
num_epochs_k = 100
num_target_init = 5000
num_target_max = 40000
log_step = 100
save_step = 10
manual_seed = None
model_root = "snapshots"
eval_only = False
# params for optimizing models
learning_rate = 1e-4
beta1 = 0.9
beta2 = 0.999