-
Notifications
You must be signed in to change notification settings - Fork 8
/
Copy pathmbptrack_kitti_ped_cfg.yaml
160 lines (147 loc) · 2.99 KB
/
mbptrack_kitti_ped_cfg.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
# dataset configs
dataset_cfg:
dataset_type: KITTIMem
data_root_dir: data/KITTI_Tracking/training/
category_name: Pedestrian
coordinate_mode: camera
num_smp_frames_per_tracklet: 8
max_frame_dis: 1
use_seq_aug: False
use_smp_aug: True
# crop local area for tracking
frame_npts: 1024
frame_offset: 2.0
frame_offset2: 0.0
frame_scale: 1.0
# crop the tracked target across frames to generate completion gt
target_scale: 1.25
target_offset: 0.0
# params to simulate motion
offset_max: [3., 10., 10.]
up_axis: [0,-1,0]
degree: True
train_cfg:
cache: True
preload_offset: 10
use_z: True
eval_cfg:
cache: False
use_z: True
preload_offset: -1
dynamic_threshold: 0.15
# model configs
model_cfg:
model_type: MBPTrack
train_memory_size: 2
# ref_memory_size: 2
eval_memory_size: 3
backbone_cfg:
type: DGCNN
layers_cfg:
- {
mlps: [0, 64, 64, 128],
use_xyz: True,
sample_method: Range,
nsample: 32,
}
- {
mlps: [128, 128, 128, 128],
use_xyz: True,
sample_method: Range,
nsample: 32,
}
- {
mlps: [128, 256, 256, 256],
use_xyz: True,
sample_method: Range,
nsample: 32,
}
out_channels: 128
downsample_ratios: [2,4,8]
transformer_cfg:
feat_dim: 128
layers_cfg:
- {
type: attn,
feat_dim: 128,
num_heads: 1,
attn_dropout: 0.1,
dropout: 0.1,
norm: 'layer_norm',
ffn_cfg: {
hidden_dim: 128,
activation: 'relu',
dropout: 0.1,
use_bias: True,
norm: 'layer_norm'
},
pos_emb_cfg: {
type: mlp
},
}
- {
type: attn,
feat_dim: 128,
num_heads: 1,
attn_dropout: 0.1,
dropout: 0.1,
norm: 'layer_norm',
ffn_cfg: {
hidden_dim: 128,
activation: 'relu',
dropout: 0.1,
use_bias: True,
norm: 'layer_norm'
},
pos_emb_cfg: {
type: mlp
},
}
rpn_cfg:
feat_dim: 128
n_smp_x: 3
n_smp_y: 3
n_smp_z: 5
n_proposals: 64
n_proposals_train: 48
sample_method: shrink
edge_aggr:
pre_mlps: [129, 128, 128]
mlps: [128, 128, 128]
use_xyz: True
nsample: 8
# sigma_n2: 0.15
# fixed_sigma_n2: True
# task configs
task_type: MBPTask
missing_threshold: 0.0
# optimizer & scheduler configs
optimizer_cfg:
optimizer_type: Adam
lr: 0.001
weight_decay: 0
betas: [0.5, 0.999]
eps: 1.0e-6
scheduler_cfg:
scheduler_type: StepLR
step_size: 40
gamma: 0.2
# loss configs
loss_cfg:
mask_weight: 0.2
rfn_obj_weight: 1.0
crs_obj_weight: 1.0
bbox_weight: 1.0
center_weight: 10.0
# train & eval configs
train_cfg:
max_epochs: 160
batch_size: 16
num_workers: 4
save_per_epoch: 40
save_top_k: 5
val_per_epoch: 1
eval_cfg:
batch_size: 1
num_workers: 4
iou_space: 3