Skip to content

Commit

Permalink
fix bugs [occured in different environments]
Browse files Browse the repository at this point in the history
  • Loading branch information
zhulf0804 committed Aug 23, 2022
1 parent a0e8af7 commit 9c98d51
Show file tree
Hide file tree
Showing 4 changed files with 13 additions and 12 deletions.
14 changes: 7 additions & 7 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,13 @@ A Simple PointPillars PyTorch Implenmentation for 3D Lidar(KITTI) Detection. [[Z
![](./figures/pc_pred_000134.png)
![](./figures/img_3dbbox_000134.png)

## [Compile]

```
cd ops
python setup.py develop
```

## [Datasets]

1. Download
Expand Down Expand Up @@ -74,13 +81,6 @@ A Simple PointPillars PyTorch Implenmentation for 3D Lidar(KITTI) Detection. [[Z
```
## [Compile]
```
cd ops
python setup.py develop
```
## [Training]
```
Expand Down
2 changes: 1 addition & 1 deletion model/anchors.py
Original file line number Diff line number Diff line change
Expand Up @@ -174,7 +174,7 @@ def anchor_target(batched_anchors, batched_gt_bboxes, batched_gt_labels, assigne
neg_flag = assigned_gt_inds == 0
# 1. anchor labels
assigned_gt_labels = torch.zeros_like(cur_anchors[:, 0], dtype=torch.long) + nclasses # -1 is not optimal, for some bboxes are with labels -1
assigned_gt_labels[pos_flag] = gt_labels[assigned_gt_inds[pos_flag] - 1]
assigned_gt_labels[pos_flag] = gt_labels[assigned_gt_inds[pos_flag] - 1].long()
assigned_gt_labels_weights = torch.zeros_like(cur_anchors[:, 0])
assigned_gt_labels_weights[pos_flag] = 1
assigned_gt_labels_weights[neg_flag] = 1
Expand Down
4 changes: 2 additions & 2 deletions model/pointpillars.py
Original file line number Diff line number Diff line change
Expand Up @@ -183,7 +183,7 @@ def forward(self, x):


class Head(nn.Module):
def __init__(self, in_channel, n_anchors=6, n_classes=3):
def __init__(self, in_channel, n_anchors, n_classes):
super().__init__()

self.conv_cls = nn.Conv2d(in_channel, n_anchors*n_classes, 1)
Expand Down Expand Up @@ -240,7 +240,7 @@ def __init__(self,
self.neck = Neck(in_channels=[64, 128, 256],
upsample_strides=[1, 2, 4],
out_channels=[128, 128, 128])
self.head = Head(in_channel=384)
self.head = Head(in_channel=384, n_anchors=2*nclasses, n_classes=nclasses)

# anchors
ranges = [[0, -39.68, -0.6, 69.12, 39.68, -0.6],
Expand Down
5 changes: 3 additions & 2 deletions pre_process_kitti.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,7 @@ def judge_difficulty(annotation_dict):


def create_data_info_pkl(data_root, data_type, prefix, label=True, db=False):
sep = os.path.sep
print(f"Processing {data_type} data..")
ids_file = os.path.join(CUR, 'dataset', 'ImageSets', f'{data_type}.txt')
with open(ids_file, 'r') as f:
Expand All @@ -50,13 +51,13 @@ def create_data_info_pkl(data_root, data_type, prefix, label=True, db=False):
img_path = os.path.join(data_root, split, 'image_2', f'{id}.png')
lidar_path = os.path.join(data_root, split, 'velodyne', f'{id}.bin')
calib_path = os.path.join(data_root, split, 'calib', f'{id}.txt')
cur_info_dict['velodyne_path'] = '/'.join(lidar_path.split('/')[-3:])
cur_info_dict['velodyne_path'] = sep.join(lidar_path.split(sep)[-3:])

img = cv2.imread(img_path)
image_shape = img.shape[:2]
cur_info_dict['image'] = {
'image_shape': image_shape,
'image_path': '/'.join(img_path.split('/')[-3:]),
'image_path': sep.join(img_path.split(sep)[-3:]),
'image_idx': int(id),
}

Expand Down

0 comments on commit 9c98d51

Please sign in to comment.