-
Notifications
You must be signed in to change notification settings - Fork 10
/
Copy pathconfig.yaml
56 lines (56 loc) · 5.23 KB
/
config.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
nb_points : 2048 # number of points in the 3d dataeset sampled from the meshes if `return_points_sampled` == `True`
save_all : true # help='save save the checkpoint and results at every epoch.... default saves only best test accuracy epoch'
exp_set : "00" # help='pick '
exp_id : "0001" # help='pick '
renderings_dir : "renderings" # help='the destinatiojn for the renderings '
results_dir : "results" # help='the destinatiojn for the results '
logs_dir : "logs" # help='the destinatiojn for the tensorboard logs '
cameras_dir : "cameras" # help='the destination for the 3D plots of the cameras '
return_points_sampled : true # help='reuturn 3d point clouds from the data loader sampled from the mesh '
return_points_saved : false # help='reuturn 3d point clouds from the data loader saved under `filePOINTS.pkl` '
rotated_test : false # help=' test on rotation noise on the meshes from ModelNet40 to make it realistic '
rotated_train : false # help=' train on rotation noise on the meshes from ModelNet40 to make it realistic '
simplified_mesh : true # help='use simplified meshes in learning .. not the full meshes ... it ends in `_SMPLER.obj` '
cleaned_mesh : true # help='use cleaned meshes using reversion of light direction for faulted meshes'
dset_norm : "2" # help='the L P normlization tyoe of the 3D dataset' choices=["inf", "2", "1", "fro", "no"]
image_size : 224 # help='the size of the images rendered by the differntibe renderer'
light_direction : "random" # help=' if rendering a mesh, apply random light direction on the rendered images during training , or relative to the camera or otherwise default (0, 1.0, 0)'
cull_backfaces : false # help='cull back_faces ( remove them from the image) '
points_radius : 0.006 # help='the size of the rendered points if `pc_rendering` is True '
points_per_pixel : 1 # help='max number of points in every rendered pixel if `pc_rendering` is True '
faces_per_pixel : 1 # help='max number of faces in every rendered pixel if `pc_rendering` is False '
background_color : "white" # help='the color of the background of the rendered images' ["white", "random", "black", "red", "green", "blue", "custom"]
canonical_elevation : 30.0 # help='if views_config== canoncal , the elevation of the view points is givene by this angle'
canonical_distance : 2.2 # help='the distnace of the view points from the center if the object '
input_view_noise : 0.0 # help='the variance of the gaussian noise (before normalization with parametre range) added to the azim,elev,dist inputs to the MVTN ... this option is valid only if `learned_offset` or `learned_direct` options are sleected '
mvtn_learning_rate : 0.0001 # help='initial learning rate for view selector (default: 0.00001)'
mvtn_weight_decay : 0.01 # help='weight decay for MVTN ... default 0.01'
clip_grads : true # help='clip the gradients of the MVTN with L2= `clip_grads_value` '
mvtn_clip_grads_value : 30.0 # help='the clip value for L2 of gradeients of the MVTN '
shape_extractor : "PointNet" # help='pretrained point cloud mvnetwork to get coarse featrures '
features_type : "logits" # help='the type of the features extracted from the feature extractor ( early , middle , late) '
transform_distance : false # help='transform the distance to the object as well '
screatch_feature_extractor : false # help='start training the feature extractor from scracth ...applies ONLY if `is_learning_points` == True '
learning_rate : 0.001 # help='initial learning rate (default: 0.0001)'
weight_decay : 0.01 # help='weight decay for MVTN ... default 0.01'
momentum : 0.9 # help='momentum (default: 0.9)'
lr_decay_freq : 30 # help='learning rate decay frequencey(default: 30)'
lr_decay : 0.1 # help='learning rate decay (default: 0.1)'
print_freq : 50 # help='print frequency (default: 10)'
pretrained : true # help='use pre-trained CNN for the multi-view network '
depth : 18 # help='resnet depth (default: resnet18) for the CNN' choices: [18, 34, 50, 101, 152]
view_reg : 0.0 # help='use regulizer on the rendered views by dropout with this probablity (default: 0.0)'
augment_training : false # help='augment the training of the CNN by scaling , rotation , translation , etc '
crop_ratio : 0.3 # help='the crop ratio of the images when `augment_training` == True '
ignore_normalize : false # help='ignore any normalization performed on the image (mean,std) before passing to the network'
resume_mvtn : false # help='use a pretrained MVTN and freez during this training ( not the viewGCN) '
first_stage_bs : 400 # help="Batch size for the first stage", default=400
first_stage_epochs : 30 # help="number of epochs for the first stage", default=30
resume_first : true # help='continue training from the `setup[weights_file] checkpoint '
resume_second : false # help='continue training from the `setup[weights_file] checkpoint '
log_metrics : true # help='logs loss and acuracy and other metrics in `logs_dir` for tensorboard '
plot_freq : 3 # help='the frequqency of plotting the renderings and camera positions'
LFDA_dimension : 64 # help='dimension for LFDA projection (0 = no projection'
LFDA_layer : 0 # help='layer for LFDA projection'
max_degs : 180 # help='the maximum allowed Z rotation degrees on the meshes '
repeat_exp : 3 # help='the number of repeated exps for each setup ( due to randomness)'