Skip to content

Commit

Permalink
BDD Evaluation (#7)
Browse files Browse the repository at this point in the history
* bdd100k evaluation

* minor fix
  • Loading branch information
haofengac authored Mar 11, 2019
1 parent 18b6e28 commit 8900a81
Show file tree
Hide file tree
Showing 3 changed files with 17 additions and 10 deletions.
2 changes: 1 addition & 1 deletion maskrcnn_benchmark/config/defaults.py
Original file line number Diff line number Diff line change
Expand Up @@ -268,4 +268,4 @@

_C.PATHS_CATALOG = os.path.join(os.path.dirname(__file__), "paths_catalog.py")

_C.TENSORBOARD_LOGDIR = 'logs'
_C.TENSORBOARD_LOGDIR = 'logs'
11 changes: 7 additions & 4 deletions maskrcnn_benchmark/data/datasets/bdd100k.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,6 +56,12 @@ def __init__(
if l['category'] in CLASS_TYPE_CONVERSION.keys()]

self.labels = [l for l in self.labels if len(l['labels']) > 0]
for i in range(len(self.labels)):
for j in range(len(self.labels[i]['labels'])):
label_type = CLASS_TYPE_CONVERSION[self.labels[i]['labels'][j]['category']]
self.labels[i]['labels'][j]['category'] = TYPE_ID_CONVERSION[label_type]

self.image_paths = [os.path.join(self.image_dir, l['name']) for l in self.labels]
self.length = len(self.labels)

def __len__(self):
Expand All @@ -76,16 +82,13 @@ def __getitem__(self, idx):
for label in annotations['labels']:
# TODO: further filter annotations if needed

label_type = CLASS_TYPE_CONVERSION[label['category']]
classes += [TYPE_ID_CONVERSION[label_type]]

boxes += [
label['box2d']['x1'],
label['box2d']['y1'],
label['box2d']['x2'],
label['box2d']['y2']
]
fns = annotations['name']
fns = os.path.join(self.image_dir, annotations['name'])

boxes = torch.as_tensor(boxes).reshape(-1, 4)
target = BoxList(boxes, (W, H), mode="xyxy")
Expand Down
14 changes: 9 additions & 5 deletions tools/evaluate.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,6 @@ def parse_args():
"""Use argparse to get command line arguments."""
parser = argparse.ArgumentParser()
parser.add_argument('--result', type=str, help='path to results to be evaluated')
parser.add_argument('--fig_dir', type=str, help='path to save output figures', default='')
parser.add_argument(
"opts",
help="Modify config options using the command-line",
Expand Down Expand Up @@ -146,25 +145,29 @@ def cat_pc(gt, predictions, thresholds):
return recalls, precisions, ap


def evaluate_detection(gt, pred, class_id_dict, fig_dir, model_name):
def evaluate_detection(gt, pred, class_id_dict, fig_dir, model_name, image_dir):

thresholds = [0.5, 0.75]
aps = np.zeros((len(thresholds), len(class_id_dict.keys())))
cat_list = [class_id_dict[k] for k in class_id_dict]
print(cat_list)
counters = np.zeros(len(cat_list))
for idx in range(len(gt)):
cat_gt = group_by_key(gt[idx]['labels'], 'category')
cat_pred = group_by_key(pred[idx]['labels'], 'category')
image = Image.open(gt[idx]['name'])

for i, cat in enumerate(cat_list):

if cat in cat_pred and cat in cat_gt:

r, p, ap = cat_pc(cat_gt[cat], cat_pred[cat], thresholds)

aps[:, i] += ap
counters[i] += 1
if len(fig_dir) > 0 and idx % 100 == 0 and False:

if len(fig_dir) > 0 and idx % 1000 == 0 and False:
fig, ax = plt.figure(), plt.gca()
image = Image.open(os.path.join(image_dir, gt[idx]['name']))
ax.imshow(image)

for l in cat_pred[cat]:
Expand Down Expand Up @@ -225,7 +228,8 @@ def main():
with open(os.path.join(args.result, r)) as f:
result = json.load(f)

mean, breakdown = evaluate_detection(gt, result, class_id_dict, args.fig_dir, r[:-5])
fig_dir = os.path.join(args.result, 'figures')
mean, breakdown = evaluate_detection(gt, result, class_id_dict, fig_dir, r[:-5], data_loader.dataset.image_dir)

print('{:.2f}'.format(mean),
', '.join(['{:.2f}'.format(n) for n in breakdown]))
Expand Down

0 comments on commit 8900a81

Please sign in to comment.