-
Notifications
You must be signed in to change notification settings - Fork 56
/
finetune.sh
executable file
·125 lines (110 loc) · 3.42 KB
/
finetune.sh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
set -e
:<<!
*****************Instruction*****************
Here you can easily creat a model by selecting
an arbitray backbone model and global method.
You can fine-tune it on your own datasets by
using a pre-trained model.
Modify the following settings as you wish !
*********************************************
!
#***************Backbone model****************
#Our code provides some mainstream architectures:
#alexnet
#vgg family:vgg11, vgg11_bn, vgg13, vgg13_bn,
# vgg16, vgg16_bn, vgg19_bn, vgg19
#resnet family: resnet18, resnet34, resnet50,
# resnet101, resnet152
#mpncovresnet: mpncovresnet50, mpncovresnet101
#inceptionv3
#You can also add your own network in src/network
arch=mpncovresnet50
#*********************************************
#***************global method****************
#Our code provides some global method at the end
#of network:
#GAvP (global average pooling),
#MPNCOV (matrix power normalized cov pooling),
#BCNN (bilinear pooling)
#CBP (compact bilinear pooling)
#...
#You can also add your own method in src/representation
image_representation=MPNCOV
# short description of method
description=reproduce
#*********************************************
#*******************Dataset*******************
#Choose the dataset folder
benchmark=Dataset Name
datadir=/path/to/the/data
dataset=$datadir/$benchmark
num_classes=#classes
#*********************************************
#****************Hyper-parameters*************
# Freeze the layers before a certain layer.
freeze_layer=0
# Batch size
batchsize=10
# The number of total epochs for training
epoch=100
# The inital learning rate
# decreased by step method
lr=1.2e-3
lr_method=step
lr_params=100
# log method
# description: lr = logspace(params1, params2, #epoch)
#lr_method=log
#lr_params=-1.1\ -5.0
weight_decay=1e-3
classifier_factor=5
#*********************************************
echo "Start finetuning!"
modeldir=Results/Finetune-$benchmark-$arch-$image_representation-$description-lr$lr-bs$batchsize
if [ ! -d "Results" ]; then
mkdir Results
fi
if [ ! -e $modeldir/*.pth.tar ]; then
if [ ! -d "$modeldir" ]; then
mkdir $modeldir
fi
cp finetune.sh $modeldir
python main.py $dataset\
--benchmark $benchmark\
--pretrained\
-a $arch\
-p 100\
--epochs $epoch\
--lr $lr\
--lr-method $lr_method\
--lr-params $lr_params\
-j 8\
-b $batchsize\
--num-classes $num_classes\
--representation $image_representation\
--freezed-layer $freeze_layer\
--classifier-factor $classifier_factor\
--benchmark $benchmark\
--modeldir $modeldir
else
checkpointfile=$(ls -rt $modeldir/*.pth.tar | tail -1)
python main.py $dataset\
--benchmark $benchmark\
--pretrained\
-a $arch\
-p 100\
--epochs $epoch\
--lr $lr\
--lr-method $lr_method\
--lr-params $lr_params\
-j 8\
-b $batchsize\
--num-classes $num_classes\
--representation $image_representation\
--freezed-layer $freeze_layer\
--modeldir $modeldir\
--classifier-factor $classifier_factor\
--benchmark $benchmark\
--resume $checkpointfile
fi
echo "Done!"