-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathmy_detection_eval.py
406 lines (346 loc) · 18 KB
/
my_detection_eval.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
# nuScenes dev-kit.
# Code written by Holger Caesar & Oscar Beijbom, 2018.
import argparse
import json
import os
import random
import time
from typing import Tuple, Dict, Any
import numpy as np
import tqdm
from nuscenes import NuScenes
from nuscenes.eval.common.config import config_factory
from nuscenes.eval.common.data_classes import EvalBoxes
from nuscenes.eval.tracking.data_classes import TrackingBox
from nuscenes.eval.common.loaders import load_prediction, add_center_dist, filter_eval_boxes
from nuscenes.eval.detection.algo import accumulate, calc_ap, calc_tp
from nuscenes.eval.detection.constants import TP_METRICS
from nuscenes.eval.detection.data_classes import DetectionConfig, DetectionMetrics, DetectionBox, \
DetectionMetricDataList
from nuscenes.eval.detection.render import summary_plot, class_pr_curve, class_tp_curve, dist_pr_curve, visualize_sample
from nuscenes.eval.detection.utils import category_to_detection_name
def load_gt(nusc: NuScenes, sample_tokens, box_cls, verbose: bool = False) -> EvalBoxes:
"""
Loads ground truth boxes from DB.
:param nusc: A NuScenes instance.
:param eval_split: The evaluation split for which we load GT boxes.
:param box_cls: Type of box to load, e.g. DetectionBox or TrackingBox.
:param verbose: Whether to print messages to stdout.
:return: The GT boxes.
"""
# Init.
if box_cls == DetectionBox:
attribute_map = {a['token']: a['name'] for a in nusc.attribute}
# if verbose:
# print('Loading annotations for {} split from nuScenes version: {}'.format(eval_split, nusc.version))
all_annotations = EvalBoxes()
# Load annotations and filter predictions and annotations.
tracking_id_set = set()
for sample_token in tqdm.tqdm(sample_tokens, leave=verbose):
sample = nusc.get('sample', sample_token)
sample_annotation_tokens = sample['anns']
sample_boxes = []
for sample_annotation_token in sample_annotation_tokens:
sample_annotation = nusc.get('sample_annotation', sample_annotation_token)
if box_cls == DetectionBox:
# Get label name in detection task and filter unused labels.
detection_name = category_to_detection_name(sample_annotation['category_name'])
if detection_name is None:
continue
# Get attribute_name.
attr_tokens = sample_annotation['attribute_tokens']
attr_count = len(attr_tokens)
if attr_count == 0:
attribute_name = ''
elif attr_count == 1:
attribute_name = attribute_map[attr_tokens[0]]
else:
raise Exception('Error: GT annotations must not have more than one attribute!')
sample_boxes.append(
box_cls(
sample_token=sample_token,
translation=sample_annotation['translation'],
size=sample_annotation['size'],
rotation=sample_annotation['rotation'],
velocity=nusc.box_velocity(sample_annotation['token'])[:2],
num_pts=sample_annotation['num_lidar_pts'] + sample_annotation['num_radar_pts'],
detection_name=detection_name,
detection_score=-1.0, # GT samples do not have a score.
attribute_name=attribute_name
)
)
elif box_cls == TrackingBox:
# Use nuScenes token as tracking id.
tracking_id = sample_annotation['instance_token']
tracking_id_set.add(tracking_id)
# Get label name in detection task and filter unused labels.
# Import locally to avoid errors when motmetrics package is not installed.
from nuscenes.eval.tracking.utils import category_to_tracking_name
tracking_name = category_to_tracking_name(sample_annotation['category_name'])
if tracking_name is None:
continue
sample_boxes.append(
box_cls(
sample_token=sample_token,
translation=sample_annotation['translation'],
size=sample_annotation['size'],
rotation=sample_annotation['rotation'],
velocity=nusc.box_velocity(sample_annotation['token'])[:2],
num_pts=sample_annotation['num_lidar_pts'] + sample_annotation['num_radar_pts'],
tracking_id=tracking_id,
tracking_name=tracking_name,
tracking_score=-1.0 # GT samples do not have a score.
)
)
else:
raise NotImplementedError('Error: Invalid box_cls %s!' % box_cls)
all_annotations.add_boxes(sample_token, sample_boxes)
if verbose:
print("Loaded ground truth annotations for {} samples.".format(len(all_annotations.sample_tokens)))
return all_annotations
class DetectionEval:
"""
This is the official nuScenes detection evaluation code.
Results are written to the provided output_dir.
nuScenes uses the following detection metrics:
- Mean Average Precision (mAP): Uses center-distance as matching criterion; averaged over distance thresholds.
- True Positive (TP) metrics: Average of translation, velocity, scale, orientation and attribute errors.
- nuScenes Detection Score (NDS): The weighted sum of the above.
Here is an overview of the functions in this method:
- init: Loads GT annotations and predictions stored in JSON format and filters the boxes.
- run: Performs evaluation and dumps the metric data to disk.
- render: Renders various plots and dumps to disk.
We assume that:
- Every sample_token is given in the results, although there may be not predictions for that sample.
Please see https://www.nuscenes.org/object-detection for more details.
"""
def __init__(self,
nusc: NuScenes,
config: DetectionConfig,
result_path: str,
# eval_set: str,
sample_tokens,
exclude_objs: dict = None,
output_dir: str = None,
verbose: bool = True):
"""
Initialize a DetectionEval object.
:param nusc: A NuScenes object.
:param config: A DetectionConfig object.
:param result_path: Path of the nuScenes JSON result file.
:param eval_set: The dataset split to evaluate on, e.g. train, val or test.
:param output_dir: Folder to save plots and results to.
:param verbose: Whether to print to stdout.
"""
self.nusc = nusc
self.result_path = result_path
# self.eval_set = eval_set
self.output_dir = output_dir
self.verbose = verbose
self.cfg = config
# Check result file exists.
assert os.path.exists(result_path), 'Error: The result file does not exist!'
# Make dirs.
self.plot_dir = os.path.join(self.output_dir, 'plots')
if not os.path.isdir(self.output_dir):
os.makedirs(self.output_dir)
if not os.path.isdir(self.plot_dir):
os.makedirs(self.plot_dir)
# Load data.
if verbose:
print('Initializing nuScenes detection evaluation')
self.pred_boxes, self.meta = load_prediction(self.result_path, self.cfg.max_boxes_per_sample, DetectionBox,
verbose=verbose)
self.gt_boxes = load_gt(self.nusc, sample_tokens, DetectionBox, verbose=verbose)
if exclude_objs is not None:
for token, obj_list in exclude_objs.items():
for obj_idx in sorted(obj_list, reverse=True):
del self.gt_boxes.boxes[token][obj_idx]
assert set(self.pred_boxes.sample_tokens) == set(self.gt_boxes.sample_tokens), \
"Samples in split doesn't match samples in predictions."
# Add center distances.
self.pred_boxes = add_center_dist(nusc, self.pred_boxes)
self.gt_boxes = add_center_dist(nusc, self.gt_boxes)
# Filter boxes (distance, points per box, etc.).
if verbose:
print('Filtering predictions')
self.pred_boxes = filter_eval_boxes(nusc, self.pred_boxes, self.cfg.class_range, verbose=verbose)
if verbose:
print('Filtering ground truth annotations')
self.gt_boxes = filter_eval_boxes(nusc, self.gt_boxes, self.cfg.class_range, verbose=verbose)
self.sample_tokens = self.gt_boxes.sample_tokens
def evaluate(self) -> Tuple[DetectionMetrics, DetectionMetricDataList]:
"""
Performs the actual evaluation.
:return: A tuple of high-level and the raw metric data.
"""
start_time = time.time()
# -----------------------------------
# Step 1: Accumulate metric data for all classes and distance thresholds.
# -----------------------------------
if self.verbose:
print('Accumulating metric data...')
metric_data_list = DetectionMetricDataList()
for class_name in self.cfg.class_names:
for dist_th in self.cfg.dist_ths:
md = accumulate(self.gt_boxes, self.pred_boxes, class_name, self.cfg.dist_fcn_callable, dist_th)
metric_data_list.set(class_name, dist_th, md)
# -----------------------------------
# Step 2: Calculate metrics from the data.
# -----------------------------------
if self.verbose:
print('Calculating metrics...')
metrics = DetectionMetrics(self.cfg)
for class_name in self.cfg.class_names:
# Compute APs.
for dist_th in self.cfg.dist_ths:
metric_data = metric_data_list[(class_name, dist_th)]
ap = calc_ap(metric_data, self.cfg.min_recall, self.cfg.min_precision)
metrics.add_label_ap(class_name, dist_th, ap)
# Compute TP metrics.
for metric_name in TP_METRICS:
metric_data = metric_data_list[(class_name, self.cfg.dist_th_tp)]
if class_name in ['traffic_cone'] and metric_name in ['attr_err', 'vel_err', 'orient_err']:
tp = np.nan
elif class_name in ['barrier'] and metric_name in ['attr_err', 'vel_err']:
tp = np.nan
else:
tp = calc_tp(metric_data, self.cfg.min_recall, metric_name)
metrics.add_label_tp(class_name, metric_name, tp)
# Compute evaluation time.
metrics.add_runtime(time.time() - start_time)
return metrics, metric_data_list
def render(self, metrics: DetectionMetrics, md_list: DetectionMetricDataList) -> None:
"""
Renders various PR and TP curves.
:param metrics: DetectionMetrics instance.
:param md_list: DetectionMetricDataList instance.
"""
if self.verbose:
print('Rendering PR and TP curves')
def savepath(name):
return os.path.join(self.plot_dir, name + '.pdf')
summary_plot(md_list, metrics, min_precision=self.cfg.min_precision, min_recall=self.cfg.min_recall,
dist_th_tp=self.cfg.dist_th_tp, savepath=savepath('summary'))
for detection_name in self.cfg.class_names:
class_pr_curve(md_list, metrics, detection_name, self.cfg.min_precision, self.cfg.min_recall,
savepath=savepath(detection_name + '_pr'))
class_tp_curve(md_list, metrics, detection_name, self.cfg.min_recall, self.cfg.dist_th_tp,
savepath=savepath(detection_name + '_tp'))
for dist_th in self.cfg.dist_ths:
dist_pr_curve(md_list, metrics, dist_th, self.cfg.min_precision, self.cfg.min_recall,
savepath=savepath('dist_pr_' + str(dist_th)))
def main(self,
plot_examples: int = 0,
render_curves: bool = True) -> Dict[str, Any]:
"""
Main function that loads the evaluation code, visualizes samples, runs the evaluation and renders stat plots.
:param plot_examples: How many example visualizations to write to disk.
:param render_curves: Whether to render PR and TP curves to disk.
:return: A dict that stores the high-level metrics and meta data.
"""
if plot_examples > 0:
# Select a random but fixed subset to plot.
random.seed(42)
sample_tokens = list(self.sample_tokens)
random.shuffle(sample_tokens)
sample_tokens = sample_tokens[:plot_examples]
# Visualize samples.
example_dir = os.path.join(self.output_dir, 'examples')
if not os.path.isdir(example_dir):
os.mkdir(example_dir)
for sample_token in sample_tokens:
visualize_sample(self.nusc,
sample_token,
self.gt_boxes,
# Don't render test GT.
self.pred_boxes,
eval_range=max(self.cfg.class_range.values()),
savepath=os.path.join(example_dir, '{}.png'.format(sample_token)))
# Run evaluation.
metrics, metric_data_list = self.evaluate()
# Render PR and TP curves.
if render_curves:
self.render(metrics, metric_data_list)
# Dump the metric data, meta and metrics to disk.
if self.verbose:
print('Saving metrics to: %s' % self.output_dir)
metrics_summary = metrics.serialize()
metrics_summary['meta'] = self.meta.copy()
with open(os.path.join(self.output_dir, 'metrics_summary.json'), 'w') as f:
json.dump(metrics_summary, f, indent=2)
with open(os.path.join(self.output_dir, 'metrics_details.json'), 'w') as f:
json.dump(metric_data_list.serialize(), f, indent=2)
# Print high-level metrics.
print('mAP: %.4f' % (metrics_summary['mean_ap']))
err_name_mapping = {
'trans_err': 'mATE',
'scale_err': 'mASE',
'orient_err': 'mAOE',
'vel_err': 'mAVE',
'attr_err': 'mAAE'
}
for tp_name, tp_val in metrics_summary['tp_errors'].items():
print('%s: %.4f' % (err_name_mapping[tp_name], tp_val))
print('NDS: %.4f' % (metrics_summary['nd_score']))
print('Eval time: %.1fs' % metrics_summary['eval_time'])
# Print per-class metrics.
print()
print('Per-class results:')
print('Object Class\tAP\tATE\tASE\tAOE\tAVE\tAAE')
class_aps = metrics_summary['mean_dist_aps']
class_tps = metrics_summary['label_tp_errors']
for class_name in class_aps.keys():
print('%s\t%.3f\t%.3f\t%.3f\t%.3f\t%.3f\t%.3f'
% (class_name, class_aps[class_name],
class_tps[class_name]['trans_err'],
class_tps[class_name]['scale_err'],
class_tps[class_name]['orient_err'],
class_tps[class_name]['vel_err'],
class_tps[class_name]['attr_err']))
return metrics_summary
class NuScenesEval(DetectionEval):
"""
Dummy class for backward-compatibility. Same as DetectionEval.
"""
# if __name__ == "__main__":
# # Settings.
# parser = argparse.ArgumentParser(description='Evaluate nuScenes detection results.',
# formatter_class=argparse.ArgumentDefaultsHelpFormatter)
# parser.add_argument('result_path', type=str, help='The submission as a JSON file.')
# parser.add_argument('--output_dir', type=str, default='~/nuscenes-metrics',
# help='Folder to store result metrics, graphs and example visualizations.')
# parser.add_argument('--eval_set', type=str, default='val',
# help='Which dataset split to evaluate on, train, val or test.')
# parser.add_argument('--dataroot', type=str, default='/data/sets/nuscenes',
# help='Default nuScenes data directory.')
# parser.add_argument('--version', type=str, default='v1.0-trainval',
# help='Which version of the nuScenes dataset to evaluate on, e.g. v1.0-trainval.')
# parser.add_argument('--config_path', type=str, default='',
# help='Path to the configuration file.'
# 'If no path given, the CVPR 2019 configuration will be used.')
# parser.add_argument('--plot_examples', type=int, default=10,
# help='How many example visualizations to write to disk.')
# parser.add_argument('--render_curves', type=int, default=1,
# help='Whether to render PR and TP curves to disk.')
# parser.add_argument('--verbose', type=int, default=1,
# help='Whether to print to stdout.')
# args = parser.parse_args()
# result_path_ = os.path.expanduser(args.result_path)
# output_dir_ = os.path.expanduser(args.output_dir)
# eval_set_ = args.eval_set
# dataroot_ = args.dataroot
# version_ = args.version
# config_path = args.config_path
# plot_examples_ = args.plot_examples
# render_curves_ = bool(args.render_curves)
# verbose_ = bool(args.verbose)
# if config_path == '':
# cfg_ = config_factory('detection_cvpr_2019')
# else:
# with open(config_path, 'r') as _f:
# cfg_ = DetectionConfig.deserialize(json.load(_f))
# nusc_ = NuScenes(version=version_, verbose=verbose_, dataroot=dataroot_)
# nusc_eval = DetectionEval(nusc_, config=cfg_, result_path=result_path_, eval_set=eval_set_,
# output_dir=output_dir_, verbose=verbose_)
# nusc_eval.main(plot_examples=plot_examples_, render_curves=render_curves_)