From 2273859572983ceddd272c7632a0f9093c397db9 Mon Sep 17 00:00:00 2001 From: Marat Kopytjuk Date: Sun, 21 Jul 2024 20:50:05 +0200 Subject: [PATCH 01/27] initial skeleton --- stonesoup/metricgenerator/clearmotmetrics.py | 66 ++++++++++++++++++++ 1 file changed, 66 insertions(+) create mode 100644 stonesoup/metricgenerator/clearmotmetrics.py diff --git a/stonesoup/metricgenerator/clearmotmetrics.py b/stonesoup/metricgenerator/clearmotmetrics.py new file mode 100644 index 000000000..a9552c9ca --- /dev/null +++ b/stonesoup/metricgenerator/clearmotmetrics.py @@ -0,0 +1,66 @@ +from ..base import Property +from ..types.association import AssociationSet +from ..types.metric import TimeRangeMetric +from ..types.time import TimeRange +from .base import MetricGenerator +from .manager import MultiManager + + +class CLEARMOTMetrics(MetricGenerator): + """TODO""" + tracks_key: str = Property(doc='Key to access set of tracks added to MetricManager', + default='tracks') + truths_key: str = Property(doc="Key to access set of ground truths added to MetricManager. " + "Or key to access a second set of tracks for track-to-track " + "metric generation", + default='groundtruth_paths') + + def compute_metric(self, manager: MultiManager, **kwargs): + + timestamps = manager.list_timestamps(generator=self) + tracks = self._get_data(manager, self.tracks_key) + ground_truths = self._get_data(manager, self.truths_key) + + # TODO: CODE HERE + + time_range = TimeRange(min(timestamps), max(timestamps)) + + motp = TimeRangeMetric(title="MOTP", + value=0.0, + time_range=time_range, + generator=self) + mota = TimeRangeMetric(title="MOTA", + value=0.0, + time_range=time_range, + generator=self) + return [motp, mota] + + def compute_mota(self, manager: MultiManager) -> float: + # TODO: WIP + associations: AssociationSet = self.manager.association_set + associations.associations_including_objects + return 0.0 + + @staticmethod + def num_associated_truths_at_time(manager: MultiManager, ground_truths, timestamp): + """:math:`JT(t)`. Calculate the number of associated true objects held by `manager` at + `timestamp`. + + Parameters + ---------- + manager: MetricManager + Containing the data to be used + ground_truths: set or list of :class:`~.GroundTruthPath` or :class:`~.Track` objects + Containing the groundtruth or track data to be used + timestamp: datetime.datetime + Timestamp at which to compute the value + + Returns + ------- + float + Number of associated true objects held by `manager` at `timestamp` + """ + associations = manager.association_set.associations_at_timestamp(timestamp) + association_objects = {thing for assoc in associations for thing in assoc.objects} + + return sum(1 for truth in ground_truths if truth in association_objects) \ No newline at end of file From 7987c61d13ca4c0d4fd30a23612d9752e440db50 Mon Sep 17 00:00:00 2001 From: Marat Kopytjuk Date: Tue, 23 Jul 2024 21:55:59 +0200 Subject: [PATCH 02/27] continue work --- stonesoup/metricgenerator/clearmotmetrics.py | 121 ++++++++++++++++--- 1 file changed, 101 insertions(+), 20 deletions(-) diff --git a/stonesoup/metricgenerator/clearmotmetrics.py b/stonesoup/metricgenerator/clearmotmetrics.py index a9552c9ca..54397437d 100644 --- a/stonesoup/metricgenerator/clearmotmetrics.py +++ b/stonesoup/metricgenerator/clearmotmetrics.py @@ -1,5 +1,10 @@ +from typing import Set, Tuple + +from stonesoup.measures.state import Measure +from stonesoup.types.track import Track + from ..base import Property -from ..types.association import AssociationSet +from ..types.association import AssociationSet, TimeRangeAssociation from ..types.metric import TimeRangeMetric from ..types.time import TimeRange from .base import MetricGenerator @@ -14,6 +19,9 @@ class CLEARMOTMetrics(MetricGenerator): "Or key to access a second set of tracks for track-to-track " "metric generation", default='groundtruth_paths') + + distance_measure: Measure = Property( + doc="Distance measure used in calculating position accuracy scores.") def compute_metric(self, manager: MultiManager, **kwargs): @@ -22,11 +30,12 @@ def compute_metric(self, manager: MultiManager, **kwargs): ground_truths = self._get_data(manager, self.truths_key) # TODO: CODE HERE + motp_score = self.compute_motp(manager) time_range = TimeRange(min(timestamps), max(timestamps)) motp = TimeRangeMetric(title="MOTP", - value=0.0, + value=motp_score, time_range=time_range, generator=self) mota = TimeRangeMetric(title="MOTA", @@ -35,32 +44,104 @@ def compute_metric(self, manager: MultiManager, **kwargs): generator=self) return [motp, mota] - def compute_mota(self, manager: MultiManager) -> float: - # TODO: WIP + def compute_motp(self, manager: MultiManager) -> float: associations: AssociationSet = self.manager.association_set associations.associations_including_objects - return 0.0 + associations: AssociationSet = manager.association_set + + timestamps = manager.list_timestamps(generator=self) + + associations: Set[TimeRangeAssociation] = manager.association_set.associations + + error_sum = 0.0 + num_associated_truth_timestamps = 0 + for association in associations: + + truth, track = self.truth_track_from_association(association) + + time_range = association.time_range + timestamps_for_association = timestamps[(timestamps >= time_range.start_timestamp) & + (timestamps <= time_range.end_timestamp)] + + num_associated_truth_timestamps += len(timestamps_for_association) + for t in timestamps_for_association: + truth_state_at_t = truth[t] + track_state_at_t = track[t] + error = self.distance_measure(truth_state_at_t, track_state_at_t) + error_sum += error + if num_associated_truth_timestamps > 0: + return error_sum / num_associated_truth_timestamps + else: + return float("inf") + + def compute_total_number_of_gt_states(self, manager: MultiManager) -> int: + truth_state_set: Set[Track] = manager.states_sets[self.truths_key] + total_number_of_gt_states = sum(len(truth_track) for truth_track in truth_state_set) + return total_number_of_gt_states + + + def compute_mota(self, manager: MultiManager): + + timestamps = manager.list_timestamps(generator=self) + + truth_state_set = manager.states_sets[self.truths_key] + tracks_state_set = manager.states_sets[self.tracks_key] + + num_misses, num_false_positives, num_id_switches = 0, 0, 0 + + for i, timestamp in enumerate(timestamps): + + truths_at_timestamp = [] + tracks_at_timestamp = [] + + associations = manager.association_set.associations_at_timestamp(timestamp) + + matched_truths_at_timestamp = set() + matched_tracks_at_timestamp = set() + for association in associations: + truth, track = self.truth_track_from_association(association) + matched_truths_at_timestamp.add(truth.id) + matched_tracks_at_timestamp.add(track.id) + + unmatched_truth_ids = list(filter(lambda x: x.id not in matched_truths_at_timestamp, + truths_at_timestamp)) + num_misses += len(unmatched_truth_ids) + + unmatched_track_ids = list(filter(lambda x: x.id not in matched_tracks_at_timestamp, + tracks_at_timestamp)) + num_false_positives += len(unmatched_track_ids) + + # TODO: num_id_switches + # if i > 0: + # associations_prev = manager.association_set.associations_at_timestamp(timestamps[i-1]) + + # truths_at_prev_timestamp = set() + # for association in associations_prev: + # truth, track = self.truth_track_from_association(association) + # truths_at_prev_timestamp.add(truth.id) + + # truths_ids_at_both_timestamps = truths_at_prev_timestamp.intersection(matched_truths_at_timestamp) + + number_of_gt_states = self.compute_total_number_of_gt_states(manager) + return 1 - (num_misses + num_false_positives + num_id_switches)/number_of_gt_states + @staticmethod - def num_associated_truths_at_time(manager: MultiManager, ground_truths, timestamp): - """:math:`JT(t)`. Calculate the number of associated true objects held by `manager` at - `timestamp`. + def truth_track_from_association(association) -> Tuple[Track, Track]: + """Find truth and track from an association. Parameters ---------- - manager: MetricManager - Containing the data to be used - ground_truths: set or list of :class:`~.GroundTruthPath` or :class:`~.Track` objects - Containing the groundtruth or track data to be used - timestamp: datetime.datetime - Timestamp at which to compute the value + association: Association + Association that contains truth and track as its objects Returns ------- - float - Number of associated true objects held by `manager` at `timestamp` + GroundTruthPath, Track + True object and track that are the objects of the `association` """ - associations = manager.association_set.associations_at_timestamp(timestamp) - association_objects = {thing for assoc in associations for thing in assoc.objects} - - return sum(1 for truth in ground_truths if truth in association_objects) \ No newline at end of file + truth, track = association.objects + # Sets aren't ordered, so need to ensure correct path is truth/track + if isinstance(truth, Track): + truth, track = track, truth + return truth, track From bf5334c4f53d71c892ccb8ee1dcf5a87866a5c45 Mon Sep 17 00:00:00 2001 From: Marat Kopytjuk Date: Thu, 25 Jul 2024 21:27:34 +0200 Subject: [PATCH 03/27] rename class --- stonesoup/metricgenerator/clearmotmetrics.py | 58 ++++++++++------ stonesoup/metricgenerator/tests/conftest.py | 14 ++-- .../tests/test_clearmotmetrics.py | 69 +++++++++++++++++++ 3 files changed, 115 insertions(+), 26 deletions(-) create mode 100644 stonesoup/metricgenerator/tests/test_clearmotmetrics.py diff --git a/stonesoup/metricgenerator/clearmotmetrics.py b/stonesoup/metricgenerator/clearmotmetrics.py index 54397437d..fa794af0d 100644 --- a/stonesoup/metricgenerator/clearmotmetrics.py +++ b/stonesoup/metricgenerator/clearmotmetrics.py @@ -1,17 +1,19 @@ -from typing import Set, Tuple +from typing import List, Set, Tuple + +import numpy as np from stonesoup.measures.state import Measure from stonesoup.types.track import Track from ..base import Property from ..types.association import AssociationSet, TimeRangeAssociation -from ..types.metric import TimeRangeMetric -from ..types.time import TimeRange +from ..types.metric import Metric, TimeRangeMetric +from ..types.time import CompoundTimeRange, TimeRange from .base import MetricGenerator from .manager import MultiManager -class CLEARMOTMetrics(MetricGenerator): +class ClearMotMetrics(MetricGenerator): """TODO""" tracks_key: str = Property(doc='Key to access set of tracks added to MetricManager', default='tracks') @@ -19,11 +21,11 @@ class CLEARMOTMetrics(MetricGenerator): "Or key to access a second set of tracks for track-to-track " "metric generation", default='groundtruth_paths') - + distance_measure: Measure = Property( doc="Distance measure used in calculating position accuracy scores.") - def compute_metric(self, manager: MultiManager, **kwargs): + def compute_metric(self, manager: MultiManager, **kwargs) -> List[Metric]: timestamps = manager.list_timestamps(generator=self) tracks = self._get_data(manager, self.tracks_key) @@ -32,6 +34,8 @@ def compute_metric(self, manager: MultiManager, **kwargs): # TODO: CODE HERE motp_score = self.compute_motp(manager) + mota_score = self.compute_mota(manager) + time_range = TimeRange(min(timestamps), max(timestamps)) motp = TimeRangeMetric(title="MOTP", @@ -39,19 +43,19 @@ def compute_metric(self, manager: MultiManager, **kwargs): time_range=time_range, generator=self) mota = TimeRangeMetric(title="MOTA", - value=0.0, + value=mota_score, time_range=time_range, generator=self) return [motp, mota] - + def compute_motp(self, manager: MultiManager) -> float: - associations: AssociationSet = self.manager.association_set - associations.associations_including_objects associations: AssociationSet = manager.association_set timestamps = manager.list_timestamps(generator=self) + timestamps_as_numpy_array = np.array(timestamps) + associations: Set[TimeRangeAssociation] = manager.association_set.associations error_sum = 0.0 @@ -61,9 +65,19 @@ def compute_motp(self, manager: MultiManager) -> float: truth, track = self.truth_track_from_association(association) time_range = association.time_range - timestamps_for_association = timestamps[(timestamps >= time_range.start_timestamp) & - (timestamps <= time_range.end_timestamp)] - + + if isinstance(time_range, CompoundTimeRange): + time_ranges = time_range.time_ranges + else: + time_ranges = [time_range,] + + mask = np.zeros(len(timestamps_as_numpy_array), dtype=bool) + for time_range in time_ranges: + mask = mask | ((timestamps_as_numpy_array >= time_range.start_timestamp) + & (timestamps_as_numpy_array <= time_range.end_timestamp)) + + timestamps_for_association = timestamps_as_numpy_array[mask] + num_associated_truth_timestamps += len(timestamps_for_association) for t in timestamps_for_association: truth_state_at_t = truth[t] @@ -74,13 +88,12 @@ def compute_motp(self, manager: MultiManager) -> float: return error_sum / num_associated_truth_timestamps else: return float("inf") - + def compute_total_number_of_gt_states(self, manager: MultiManager) -> int: truth_state_set: Set[Track] = manager.states_sets[self.truths_key] total_number_of_gt_states = sum(len(truth_track) for truth_track in truth_state_set) return total_number_of_gt_states - def compute_mota(self, manager: MultiManager): timestamps = manager.list_timestamps(generator=self) @@ -92,6 +105,7 @@ def compute_mota(self, manager: MultiManager): for i, timestamp in enumerate(timestamps): + # TODO: add lookup here! truths_at_timestamp = [] tracks_at_timestamp = [] @@ -104,14 +118,14 @@ def compute_mota(self, manager: MultiManager): matched_truths_at_timestamp.add(truth.id) matched_tracks_at_timestamp.add(track.id) - unmatched_truth_ids = list(filter(lambda x: x.id not in matched_truths_at_timestamp, - truths_at_timestamp)) + unmatched_truth_ids = list(filter(lambda x: x.id not in matched_truths_at_timestamp, + truths_at_timestamp)) num_misses += len(unmatched_truth_ids) - unmatched_track_ids = list(filter(lambda x: x.id not in matched_tracks_at_timestamp, - tracks_at_timestamp)) + unmatched_track_ids = list(filter(lambda x: x.id not in matched_tracks_at_timestamp, + tracks_at_timestamp)) num_false_positives += len(unmatched_track_ids) - + # TODO: num_id_switches # if i > 0: # associations_prev = manager.association_set.associations_at_timestamp(timestamps[i-1]) @@ -120,12 +134,12 @@ def compute_mota(self, manager: MultiManager): # for association in associations_prev: # truth, track = self.truth_track_from_association(association) # truths_at_prev_timestamp.add(truth.id) - + # truths_ids_at_both_timestamps = truths_at_prev_timestamp.intersection(matched_truths_at_timestamp) number_of_gt_states = self.compute_total_number_of_gt_states(manager) return 1 - (num_misses + num_false_positives + num_id_switches)/number_of_gt_states - + @staticmethod def truth_track_from_association(association) -> Tuple[Track, Track]: """Find truth and track from an association. diff --git a/stonesoup/metricgenerator/tests/conftest.py b/stonesoup/metricgenerator/tests/conftest.py index 7d041fa73..4052e551b 100644 --- a/stonesoup/metricgenerator/tests/conftest.py +++ b/stonesoup/metricgenerator/tests/conftest.py @@ -4,7 +4,13 @@ import pytest from ...metricgenerator.manager import MultiManager -from ...types.association import TimeRangeAssociation, AssociationSet +from ...models.measurement.linear import LinearGaussian +from ...models.transition.linear import ( + CombinedLinearGaussianTransitionModel, + ConstantVelocity, +) +from ...types.array import CovarianceMatrix, StateVector +from ...types.association import AssociationSet, TimeRangeAssociation from ...types.detection import Detection from ...types.groundtruth import GroundTruthPath, GroundTruthState from ...types.hypothesis import SingleDistanceHypothesis @@ -12,9 +18,6 @@ from ...types.time import TimeRange from ...types.track import Track from ...types.update import GaussianStateUpdate -from ...types.array import CovarianceMatrix, StateVector -from ...models.transition.linear import CombinedLinearGaussianTransitionModel, ConstantVelocity -from ...models.measurement.linear import LinearGaussian @pytest.fixture() @@ -26,6 +29,7 @@ def trial_timestamps(): @pytest.fixture() def trial_truths(trial_timestamps): return [ + # object moving from (x=0, y=0) to (x=3, y=3) with (vx=1, vy=1) GroundTruthPath([ GroundTruthState(np.array([[0, 1, 0, 1]]), timestamp=trial_timestamps[0], metadata={"colour": "red"}), @@ -36,6 +40,7 @@ def trial_truths(trial_timestamps): GroundTruthState(np.array([[3, 1, 3, 1]]), timestamp=trial_timestamps[3], metadata={"colour": "red"}) ]), + # object moving from (x=-2, y=-2) to (x=2, y=2) with (vx=1, vy=1) GroundTruthPath([ GroundTruthState(np.array([[-2, 1, -2, 1]]), timestamp=trial_timestamps[0], metadata={"colour": "green"}), @@ -46,6 +51,7 @@ def trial_truths(trial_timestamps): GroundTruthState(np.array([[2, 1, 2, 1]]), timestamp=trial_timestamps[3], metadata={"colour": "green"}) ]), + # object moving from (x=--1, y=1) to (x=3, y=3) with (vx=1, vy=0) GroundTruthPath([ GroundTruthState(np.array([[-1, 1, 1, 0]]), timestamp=trial_timestamps[0], metadata={"colour": "blue"}), diff --git a/stonesoup/metricgenerator/tests/test_clearmotmetrics.py b/stonesoup/metricgenerator/tests/test_clearmotmetrics.py new file mode 100644 index 000000000..bbca9a3e0 --- /dev/null +++ b/stonesoup/metricgenerator/tests/test_clearmotmetrics.py @@ -0,0 +1,69 @@ + +import numpy as np +import pytest + +from ...measures import Euclidean +from ...metricgenerator.manager import MultiManager +from ...types.association import AssociationSet, TimeRangeAssociation +from ...types.metric import TimeRangeMetric +from ...types.time import TimeRange +from ..clearmotmetrics import ClearMotMetrics + + +def test_clearmot_simple(trial_truths, trial_tracks, trial_timestamps): + """We test the most obvious scenario, where we have a single truth track and + a single estimated track. They are both associated for the full time extent of the experiment. + """ + trial_manager = MultiManager() + trial_manager.add_data({'groundtruth_paths': trial_truths[:1], + 'tracks': trial_tracks[:1]}) + + # we test the mo + trial_associations = AssociationSet({ + # association for full time range (4 timesteps) + TimeRangeAssociation(objects={trial_truths[0], trial_tracks[0]}, + time_range=TimeRange(trial_timestamps[0], trial_timestamps[-1])), + }) + trial_manager.association_set = trial_associations + + position_measure = Euclidean((0, 2)) + clearmot_generator = ClearMotMetrics(distance_measure=position_measure) + + trial_manager.generators = [clearmot_generator] + + dx = dy = 0.1 + expected_avg_pos_accuracy = np.sqrt(dx ** 2 + dy ** 2) + + metrics = clearmot_generator.compute_metric(trial_manager) + + motp = metrics[0].value + assert motp == pytest.approx(expected_avg_pos_accuracy) + + mota = metrics[1].value + assert mota == pytest.approx(1.0) + + +def test_clearmot(trial_manager, trial_truths, trial_tracks, trial_associations): + position_measure = Euclidean((0, 2)) + clearmot_generator = ClearMotMetrics(distance_measure=position_measure) + + trial_manager.generators = [clearmot_generator] + + # Test compute_metric + metrics = clearmot_generator.compute_metric(trial_manager) + expected_titles = ["MOTP", "MOTA"] + + # make sure that the titles are correct + returned_metric_titles = [metric.title for metric in metrics] + assert len(expected_titles) == len(returned_metric_titles) + assert set(expected_titles) == set(returned_metric_titles) + + timestamps = trial_manager.list_timestamps(clearmot_generator) + + for metric in metrics: + assert isinstance(metric, TimeRangeMetric) + assert metric.time_range.start == timestamps[0] + assert metric.time_range.end == timestamps[-1] + assert metric.generator == clearmot_generator + + assert isinstance(metric.value, (float, int)) From d789afae2cfa15564348ecb792c0b8a0d936550a Mon Sep 17 00:00:00 2001 From: Marat Kopytjuk Date: Thu, 25 Jul 2024 21:28:58 +0200 Subject: [PATCH 04/27] adapt imports --- stonesoup/metricgenerator/clearmotmetrics.py | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/stonesoup/metricgenerator/clearmotmetrics.py b/stonesoup/metricgenerator/clearmotmetrics.py index fa794af0d..8c7ac3aa6 100644 --- a/stonesoup/metricgenerator/clearmotmetrics.py +++ b/stonesoup/metricgenerator/clearmotmetrics.py @@ -2,19 +2,23 @@ import numpy as np -from stonesoup.measures.state import Measure -from stonesoup.types.track import Track - from ..base import Property +from ..measures.state import Measure from ..types.association import AssociationSet, TimeRangeAssociation from ..types.metric import Metric, TimeRangeMetric from ..types.time import CompoundTimeRange, TimeRange +from ..types.track import Track from .base import MetricGenerator from .manager import MultiManager class ClearMotMetrics(MetricGenerator): - """TODO""" + """TODO + + Reference + [1] Evaluating Multiple Object Tracking Performance: The CLEAR MOT Metrics, + Bernardin et al, 2008 + """ tracks_key: str = Property(doc='Key to access set of tracks added to MetricManager', default='tracks') truths_key: str = Property(doc="Key to access set of ground truths added to MetricManager. " From 9a78bd09692f6d285badf56d7fac5bfc287309a0 Mon Sep 17 00:00:00 2001 From: Marat Kopytjuk Date: Thu, 25 Jul 2024 21:31:13 +0200 Subject: [PATCH 05/27] more docs in tests --- stonesoup/metricgenerator/tests/test_clearmotmetrics.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/stonesoup/metricgenerator/tests/test_clearmotmetrics.py b/stonesoup/metricgenerator/tests/test_clearmotmetrics.py index bbca9a3e0..2f748e03f 100644 --- a/stonesoup/metricgenerator/tests/test_clearmotmetrics.py +++ b/stonesoup/metricgenerator/tests/test_clearmotmetrics.py @@ -40,7 +40,11 @@ def test_clearmot_simple(trial_truths, trial_tracks, trial_timestamps): assert motp == pytest.approx(expected_avg_pos_accuracy) mota = metrics[1].value - assert mota == pytest.approx(1.0) + + # because the track is associated with the complete extent of the truth, + # i.e. there are no false positves or misses + expected_mota = 1.0 + assert mota == pytest.approx(expected_mota) def test_clearmot(trial_manager, trial_truths, trial_tracks, trial_associations): From 87bb6500cf0b68e5cff3f62a9d95b7091827ad46 Mon Sep 17 00:00:00 2001 From: Marat Kopytjuk Date: Mon, 29 Jul 2024 22:06:56 +0200 Subject: [PATCH 06/27] continue work --- stonesoup/metricgenerator/clearmotmetrics.py | 117 ++++++++++++++---- .../tests/test_clearmotmetrics.py | 90 +++++++++++++- 2 files changed, 179 insertions(+), 28 deletions(-) diff --git a/stonesoup/metricgenerator/clearmotmetrics.py b/stonesoup/metricgenerator/clearmotmetrics.py index 8c7ac3aa6..d1bc7550e 100644 --- a/stonesoup/metricgenerator/clearmotmetrics.py +++ b/stonesoup/metricgenerator/clearmotmetrics.py @@ -1,16 +1,21 @@ -from typing import List, Set, Tuple +import datetime +from collections import defaultdict +from typing import Dict, List, Set, Tuple, Union import numpy as np from ..base import Property from ..measures.state import Measure from ..types.association import AssociationSet, TimeRangeAssociation +from ..types.groundtruth import GroundTruthPath from ..types.metric import Metric, TimeRangeMetric from ..types.time import CompoundTimeRange, TimeRange from ..types.track import Track from .base import MetricGenerator from .manager import MultiManager +MatchSetAtTimestamp = Set[Tuple[str, str]] + class ClearMotMetrics(MetricGenerator): """TODO @@ -56,7 +61,7 @@ def compute_motp(self, manager: MultiManager) -> float: associations: AssociationSet = manager.association_set - timestamps = manager.list_timestamps(generator=self) + timestamps = sorted(manager.list_timestamps(generator=self)) timestamps_as_numpy_array = np.array(timestamps) @@ -98,6 +103,21 @@ def compute_total_number_of_gt_states(self, manager: MultiManager) -> int: total_number_of_gt_states = sum(len(truth_track) for truth_track in truth_state_set) return total_number_of_gt_states + def create_matches_at_time_lookup(self, manager: MultiManager) -> Dict[datetime.datetime, MatchSetAtTimestamp]: + timestamps = manager.list_timestamps(generator=self) + + matches_by_timestamp = defaultdict(set) + + for i, timestamp in enumerate(timestamps): + + associations = manager.association_set.associations_at_timestamp(timestamp) + + for association in associations: + truth, track = self.truth_track_from_association(association) + match_truth_track = (truth.id, track.id) + matches_by_timestamp[timestamp].add(match_truth_track) + return matches_by_timestamp + def compute_mota(self, manager: MultiManager): timestamps = manager.list_timestamps(generator=self) @@ -105,44 +125,78 @@ def compute_mota(self, manager: MultiManager): truth_state_set = manager.states_sets[self.truths_key] tracks_state_set = manager.states_sets[self.tracks_key] - num_misses, num_false_positives, num_id_switches = 0, 0, 0 + truth_ids_at_time = create_ids_at_time_lookup(truth_state_set) + track_ids_at_time = create_ids_at_time_lookup(tracks_state_set) + + matches_at_time_lookup = self.create_matches_at_time_lookup(manager) + + num_misses, num_false_positives, num_miss_matches = 0, 0, 0 for i, timestamp in enumerate(timestamps): - # TODO: add lookup here! - truths_at_timestamp = [] - tracks_at_timestamp = [] + print(f"i={i}") - associations = manager.association_set.associations_at_timestamp(timestamp) + # TODO: add lookup here! + truths_ids_at_timestamp = truth_ids_at_time[timestamp] + tracks_ids_at_timestamp = track_ids_at_time[timestamp] - matched_truths_at_timestamp = set() - matched_tracks_at_timestamp = set() - for association in associations: - truth, track = self.truth_track_from_association(association) - matched_truths_at_timestamp.add(truth.id) - matched_tracks_at_timestamp.add(track.id) + matches_current = matches_at_time_lookup[timestamp] + matched_truth_ids_curr = {match[0] for match in matches_current} + matched_tracks_at_timestamp = {match[1] for match in matches_current} - unmatched_truth_ids = list(filter(lambda x: x.id not in matched_truths_at_timestamp, - truths_at_timestamp)) + unmatched_truth_ids = list(filter(lambda x: x not in matched_truth_ids_curr, + truths_ids_at_timestamp)) num_misses += len(unmatched_truth_ids) - unmatched_track_ids = list(filter(lambda x: x.id not in matched_tracks_at_timestamp, - tracks_at_timestamp)) + unmatched_track_ids = list(filter(lambda x: x not in matched_tracks_at_timestamp, + tracks_ids_at_timestamp)) num_false_positives += len(unmatched_track_ids) - # TODO: num_id_switches - # if i > 0: - # associations_prev = manager.association_set.associations_at_timestamp(timestamps[i-1]) + if i > 0: + + matches_prev = matches_at_time_lookup[timestamps[i-1]] - # truths_at_prev_timestamp = set() - # for association in associations_prev: - # truth, track = self.truth_track_from_association(association) - # truths_at_prev_timestamp.add(truth.id) + num_miss_matches_current = self._compute_miss_matches_from_match_sets( + matches_prev, matches_current) - # truths_ids_at_both_timestamps = truths_at_prev_timestamp.intersection(matched_truths_at_timestamp) + num_miss_matches += num_miss_matches_current number_of_gt_states = self.compute_total_number_of_gt_states(manager) - return 1 - (num_misses + num_false_positives + num_id_switches)/number_of_gt_states + + print(num_misses) + print(num_false_positives) + print(num_miss_matches) + print(number_of_gt_states) + return 1 - (num_misses + num_false_positives + num_miss_matches)/number_of_gt_states + + def _compute_miss_matches_from_match_sets(self, matches_prev: MatchSetAtTimestamp, + matches_current: MatchSetAtTimestamp): + num_miss_matches_current = 0 + + matched_truth_ids_prev = {match[0] for match in matches_prev} + matched_truth_ids_curr = {match[0] for match in matches_current} + truths_ids_at_both_timestamps = matched_truth_ids_prev & matched_truth_ids_curr + + for truth_id in truths_ids_at_both_timestamps: + prev_matches_with_truth_id = list( + filter(lambda match: match[0] == truth_id, matches_prev)) + cur_matches_with_truth_id = list( + filter(lambda match: match[0] == truth_id, matches_current)) + + # if len(prev_matches_with_truth_id) > 1: + # warnings.warn("More than one track per truth is not supported!") + # continue + + # if len(cur_matches_with_truth_id) > 1: + # warnings.warn("More than one track per truth is not supported!") + # continue + + matched_track_id_prev = prev_matches_with_truth_id[0][1] + matched_track_id_curr = cur_matches_with_truth_id[0][1] + + if matched_track_id_prev != matched_track_id_curr: + num_miss_matches_current += 1 + return num_miss_matches_current @staticmethod def truth_track_from_association(association) -> Tuple[Track, Track]: @@ -163,3 +217,14 @@ def truth_track_from_association(association) -> Tuple[Track, Track]: if isinstance(truth, Track): truth, track = track, truth return truth, track + + +def create_ids_at_time_lookup(tracks_set: Set[Union[Track, GroundTruthPath]]) \ + -> Dict[datetime.datetime, Set[str]]: + + track_ids_by_time = defaultdict(set) + for track in tracks_set: + for state in track.last_timestamp_generator(): + track_ids_by_time[state.timestamp].add(track.id) + + return track_ids_by_time diff --git a/stonesoup/metricgenerator/tests/test_clearmotmetrics.py b/stonesoup/metricgenerator/tests/test_clearmotmetrics.py index 2f748e03f..f75c3df7f 100644 --- a/stonesoup/metricgenerator/tests/test_clearmotmetrics.py +++ b/stonesoup/metricgenerator/tests/test_clearmotmetrics.py @@ -1,4 +1,6 @@ +from datetime import timedelta + import numpy as np import pytest @@ -7,6 +9,7 @@ from ...types.association import AssociationSet, TimeRangeAssociation from ...types.metric import TimeRangeMetric from ...types.time import TimeRange +from ...types.track import Track from ..clearmotmetrics import ClearMotMetrics @@ -17,7 +20,7 @@ def test_clearmot_simple(trial_truths, trial_tracks, trial_timestamps): trial_manager = MultiManager() trial_manager.add_data({'groundtruth_paths': trial_truths[:1], 'tracks': trial_tracks[:1]}) - + # we test the mo trial_associations = AssociationSet({ # association for full time range (4 timesteps) @@ -47,7 +50,90 @@ def test_clearmot_simple(trial_truths, trial_tracks, trial_timestamps): assert mota == pytest.approx(expected_mota) -def test_clearmot(trial_manager, trial_truths, trial_tracks, trial_associations): +def test_clearmot_with_false_positives(trial_truths, trial_tracks, trial_timestamps): + """We test the most obvious scenario, where we have a single truth track and + a single estimated track. They are both associated for the full time extent of the experiment. + """ + trial_manager = MultiManager() + trial_manager.add_data({'groundtruth_paths': trial_truths[:1], + 'tracks': trial_tracks[:2]}) + + # we test the mo + trial_associations = AssociationSet({ + # association for full time range (4 timesteps) + TimeRangeAssociation(objects={trial_truths[0], trial_tracks[0]}, + time_range=TimeRange(trial_timestamps[0], trial_timestamps[-1])), + }) + trial_manager.association_set = trial_associations + + position_measure = Euclidean((0, 2)) + clearmot_generator = ClearMotMetrics(distance_measure=position_measure) + + trial_manager.generators = [clearmot_generator] + + dx = dy = 0.1 + expected_avg_pos_accuracy = np.sqrt(dx ** 2 + dy ** 2) + + metrics = clearmot_generator.compute_metric(trial_manager) + + motp = metrics[0].value + assert motp == pytest.approx(expected_avg_pos_accuracy) + + mota = metrics[1].value + + num_gt_samples = len(trial_truths[0]) + num_false_positives = len(trial_tracks[1]) + expected_mota = 1.0 - (num_false_positives)/num_gt_samples + + assert mota == pytest.approx(expected_mota) + + +def test_clearmot_with_miss_matches_and_false_positives(trial_truths, trial_tracks, trial_timestamps): + """We test the most obvious scenario, where we have a single truth track and + a single estimated track. They are both associated for the full time extent of the experiment. + """ + trial_manager = MultiManager() + + cut_timestamp = trial_timestamps[2] + track_part_a = Track(states=trial_tracks[0][:cut_timestamp], id=trial_tracks[0].id + "-a") + track_part_b = Track(states=trial_tracks[0][cut_timestamp:], id=trial_tracks[0].id + "-b") + + trial_manager.add_data({'groundtruth_paths': trial_truths[:2], + 'tracks': {track_part_a, track_part_b, trial_tracks[1]}}) + + trial_associations = AssociationSet({ + TimeRangeAssociation(objects={trial_truths[0], track_part_a}, + time_range=TimeRange(trial_timestamps[0], cut_timestamp - timedelta(seconds=1))), + TimeRangeAssociation(objects={trial_truths[0], track_part_b}, + time_range=TimeRange(cut_timestamp, trial_timestamps[-1])), + }) + trial_manager.association_set = trial_associations + + position_measure = Euclidean((0, 2)) + clearmot_generator = ClearMotMetrics(distance_measure=position_measure) + + trial_manager.generators = [clearmot_generator] + + dx = dy = 0.1 + expected_avg_pos_accuracy = np.sqrt(dx ** 2 + dy ** 2) + + metrics = clearmot_generator.compute_metric(trial_manager) + + motp = metrics[0].value + assert motp == pytest.approx(expected_avg_pos_accuracy) + + mota = metrics[1].value + + num_gt_samples = len(trial_truths[0]) + len(trial_truths[1]) + num_false_positives = len(trial_tracks[1]) + num_miss_matches = 1 # ID switch at the cut timestamp + num_misses = len(trial_truths[1]) # GT-1 was not associated at all + + expected_mota = 1.0 - (num_false_positives + num_miss_matches + num_misses)/num_gt_samples + assert mota == pytest.approx(expected_mota) + + +def test_clearmot_interface(trial_manager, trial_truths, trial_tracks, trial_associations): position_measure = Euclidean((0, 2)) clearmot_generator = ClearMotMetrics(distance_measure=position_measure) From 242270b1e547908382f12af2aebbe084327092f3 Mon Sep 17 00:00:00 2001 From: Marat Kopytjuk Date: Mon, 29 Jul 2024 22:09:54 +0200 Subject: [PATCH 07/27] remove print statements --- stonesoup/metricgenerator/clearmotmetrics.py | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/stonesoup/metricgenerator/clearmotmetrics.py b/stonesoup/metricgenerator/clearmotmetrics.py index d1bc7550e..76021fc7e 100644 --- a/stonesoup/metricgenerator/clearmotmetrics.py +++ b/stonesoup/metricgenerator/clearmotmetrics.py @@ -156,21 +156,17 @@ def compute_mota(self, manager: MultiManager): matches_prev = matches_at_time_lookup[timestamps[i-1]] - num_miss_matches_current = self._compute_miss_matches_from_match_sets( + num_miss_matches_current = self._compute_number_of_miss_matches_from_match_sets( matches_prev, matches_current) num_miss_matches += num_miss_matches_current number_of_gt_states = self.compute_total_number_of_gt_states(manager) - print(num_misses) - print(num_false_positives) - print(num_miss_matches) - print(number_of_gt_states) return 1 - (num_misses + num_false_positives + num_miss_matches)/number_of_gt_states - def _compute_miss_matches_from_match_sets(self, matches_prev: MatchSetAtTimestamp, - matches_current: MatchSetAtTimestamp): + def _compute_number_of_miss_matches_from_match_sets(self, matches_prev: MatchSetAtTimestamp, + matches_current: MatchSetAtTimestamp) -> int: num_miss_matches_current = 0 matched_truth_ids_prev = {match[0] for match in matches_prev} From 5b0d600d253219f784c3b9b1781cef94d1ffddf2 Mon Sep 17 00:00:00 2001 From: Marat Kopytjuk Date: Mon, 29 Jul 2024 22:10:28 +0200 Subject: [PATCH 08/27] remove todos --- stonesoup/metricgenerator/clearmotmetrics.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/stonesoup/metricgenerator/clearmotmetrics.py b/stonesoup/metricgenerator/clearmotmetrics.py index 76021fc7e..9d04f0d27 100644 --- a/stonesoup/metricgenerator/clearmotmetrics.py +++ b/stonesoup/metricgenerator/clearmotmetrics.py @@ -37,10 +37,7 @@ class ClearMotMetrics(MetricGenerator): def compute_metric(self, manager: MultiManager, **kwargs) -> List[Metric]: timestamps = manager.list_timestamps(generator=self) - tracks = self._get_data(manager, self.tracks_key) - ground_truths = self._get_data(manager, self.truths_key) - # TODO: CODE HERE motp_score = self.compute_motp(manager) mota_score = self.compute_mota(manager) From 1e880b0348917f442b31b814932ef0e77c2abf75 Mon Sep 17 00:00:00 2001 From: Marat Kopytjuk Date: Mon, 29 Jul 2024 22:18:42 +0200 Subject: [PATCH 09/27] refactor time-period --- stonesoup/metricgenerator/tests/conftest.py | 9 +++++++-- .../metricgenerator/tests/test_clearmotmetrics.py | 11 ++++------- .../metricgenerator/tests/test_tracktotruthmetrics.py | 4 ++-- 3 files changed, 13 insertions(+), 11 deletions(-) diff --git a/stonesoup/metricgenerator/tests/conftest.py b/stonesoup/metricgenerator/tests/conftest.py index 4052e551b..3e6d35d2f 100644 --- a/stonesoup/metricgenerator/tests/conftest.py +++ b/stonesoup/metricgenerator/tests/conftest.py @@ -20,10 +20,15 @@ from ...types.update import GaussianStateUpdate +@pytest.fixture +def time_period() -> timedelta: + return timedelta(seconds=1) + + @pytest.fixture() -def trial_timestamps(): +def trial_timestamps(time_period: timedelta): now = datetime.now() - return [now + timedelta(seconds=i) for i in range(4)] + return [now + i*time_period for i in range(4)] @pytest.fixture() diff --git a/stonesoup/metricgenerator/tests/test_clearmotmetrics.py b/stonesoup/metricgenerator/tests/test_clearmotmetrics.py index f75c3df7f..96dc9c68e 100644 --- a/stonesoup/metricgenerator/tests/test_clearmotmetrics.py +++ b/stonesoup/metricgenerator/tests/test_clearmotmetrics.py @@ -1,5 +1,4 @@ -from datetime import timedelta import numpy as np import pytest @@ -51,8 +50,7 @@ def test_clearmot_simple(trial_truths, trial_tracks, trial_timestamps): def test_clearmot_with_false_positives(trial_truths, trial_tracks, trial_timestamps): - """We test the most obvious scenario, where we have a single truth track and - a single estimated track. They are both associated for the full time extent of the experiment. + """TODO """ trial_manager = MultiManager() trial_manager.add_data({'groundtruth_paths': trial_truths[:1], @@ -88,9 +86,8 @@ def test_clearmot_with_false_positives(trial_truths, trial_tracks, trial_timesta assert mota == pytest.approx(expected_mota) -def test_clearmot_with_miss_matches_and_false_positives(trial_truths, trial_tracks, trial_timestamps): - """We test the most obvious scenario, where we have a single truth track and - a single estimated track. They are both associated for the full time extent of the experiment. +def test_clearmot_with_false_positives_and_miss_matches(trial_truths, trial_tracks, trial_timestamps, time_period): + """TODO """ trial_manager = MultiManager() @@ -103,7 +100,7 @@ def test_clearmot_with_miss_matches_and_false_positives(trial_truths, trial_trac trial_associations = AssociationSet({ TimeRangeAssociation(objects={trial_truths[0], track_part_a}, - time_range=TimeRange(trial_timestamps[0], cut_timestamp - timedelta(seconds=1))), + time_range=TimeRange(trial_timestamps[0], cut_timestamp - time_period)), TimeRangeAssociation(objects={trial_truths[0], track_part_b}, time_range=TimeRange(cut_timestamp, trial_timestamps[-1])), }) diff --git a/stonesoup/metricgenerator/tests/test_tracktotruthmetrics.py b/stonesoup/metricgenerator/tests/test_tracktotruthmetrics.py index 262a2d5b3..83114c9c4 100644 --- a/stonesoup/metricgenerator/tests/test_tracktotruthmetrics.py +++ b/stonesoup/metricgenerator/tests/test_tracktotruthmetrics.py @@ -1,11 +1,11 @@ import numpy as np import pytest -from ..tracktotruthmetrics import SIAPMetrics, IDSIAPMetrics from ...measures import Euclidean, Mahalanobis from ...types.groundtruth import GroundTruthPath from ...types.metric import SingleTimeMetric, TimeRangeMetric from ...types.track import Track +from ..tracktotruthmetrics import IDSIAPMetrics, SIAPMetrics @pytest.mark.parametrize('measure_class', [Euclidean, Mahalanobis]) @@ -44,7 +44,7 @@ def test_siap(trial_manager, trial_truths, trial_tracks, trial_associations, mea pos_accuracy = siap_generator.accuracy_at_time(trial_manager, timestamps[0], position_measure) assert pos_accuracy == exp_pos_accuracy vel_accuracy = siap_generator.accuracy_at_time(trial_manager, timestamps[0], velocity_measure) - assert vel_accuracy == exp_vel_accuracy + assert vel_accuracy == pytest.approx(exp_vel_accuracy, abs=1e-9) # Test truth_track_from_association for association in trial_associations: From 912f867cc906d1596a4ec67500cd9507c74b526b Mon Sep 17 00:00:00 2001 From: Marat Kopytjuk Date: Wed, 31 Jul 2024 18:55:21 +0200 Subject: [PATCH 10/27] add docstrings --- stonesoup/metricgenerator/clearmotmetrics.py | 20 ++++++++++++++++++-- 1 file changed, 18 insertions(+), 2 deletions(-) diff --git a/stonesoup/metricgenerator/clearmotmetrics.py b/stonesoup/metricgenerator/clearmotmetrics.py index 9d04f0d27..83e0eb418 100644 --- a/stonesoup/metricgenerator/clearmotmetrics.py +++ b/stonesoup/metricgenerator/clearmotmetrics.py @@ -18,8 +18,12 @@ class ClearMotMetrics(MetricGenerator): - """TODO - + """CLEAR MOT Metrics + + Computes multi-object tracking (MOT) metrics designed for the classification of events, + activities, and relationships (CLEAR) evaluation workshops. The implementation provided here + is derived from [1] and focuses on providing the MOTP and MOTP scores. + Reference [1] Evaluating Multiple Object Tracking Performance: The CLEAR MOT Metrics, Bernardin et al, 2008 @@ -35,6 +39,18 @@ class ClearMotMetrics(MetricGenerator): doc="Distance measure used in calculating position accuracy scores.") def compute_metric(self, manager: MultiManager, **kwargs) -> List[Metric]: + """Compute MOTP and MOTA metrics for a given time-period covered by truths and the tracks. + + Parameters + ---------- + manager : MetricManager + containing the data to be used to create the metric(s) + + Returns + ------- + : list of :class:`~.Metric` objects + Generated metrics + """ timestamps = manager.list_timestamps(generator=self) From eb177701be356f24f6fa91a727314c440cbe5b86 Mon Sep 17 00:00:00 2001 From: Marat Kopytjuk Date: Wed, 31 Jul 2024 18:56:03 +0200 Subject: [PATCH 11/27] reimplement motp and restructure code --- stonesoup/metricgenerator/clearmotmetrics.py | 113 ++++++++++--------- 1 file changed, 57 insertions(+), 56 deletions(-) diff --git a/stonesoup/metricgenerator/clearmotmetrics.py b/stonesoup/metricgenerator/clearmotmetrics.py index 83e0eb418..c2eec6d2d 100644 --- a/stonesoup/metricgenerator/clearmotmetrics.py +++ b/stonesoup/metricgenerator/clearmotmetrics.py @@ -2,19 +2,18 @@ from collections import defaultdict from typing import Dict, List, Set, Tuple, Union -import numpy as np - from ..base import Property from ..measures.state import Measure -from ..types.association import AssociationSet, TimeRangeAssociation from ..types.groundtruth import GroundTruthPath from ..types.metric import Metric, TimeRangeMetric -from ..types.time import CompoundTimeRange, TimeRange +from ..types.state import State +from ..types.time import TimeRange from ..types.track import Track from .base import MetricGenerator from .manager import MultiManager MatchSetAtTimestamp = Set[Tuple[str, str]] +StatesFromTimeIdLookup = Dict[datetime.datetime, Dict[str, State]] class ClearMotMetrics(MetricGenerator): @@ -54,9 +53,9 @@ def compute_metric(self, manager: MultiManager, **kwargs) -> List[Metric]: timestamps = manager.list_timestamps(generator=self) - motp_score = self.compute_motp(manager) + motp_score = self._compute_motp_v2(manager) - mota_score = self.compute_mota(manager) + mota_score = self._compute_mota(manager) time_range = TimeRange(min(timestamps), max(timestamps)) @@ -70,70 +69,51 @@ def compute_metric(self, manager: MultiManager, **kwargs) -> List[Metric]: generator=self) return [motp, mota] - def compute_motp(self, manager: MultiManager) -> float: + def _compute_motp(self, manager: MultiManager) -> float: + + unique_timestamps = sorted(manager.list_timestamps(generator=self)) - associations: AssociationSet = manager.association_set + matches_at_time_lookup = self._create_matches_at_time_lookup(manager) - timestamps = sorted(manager.list_timestamps(generator=self)) + truths_set = manager.states_sets[self.truths_key] + tracks_set = manager.states_sets[self.tracks_key] - timestamps_as_numpy_array = np.array(timestamps) + truth_states_by_time_id: StatesFromTimeIdLookup = defaultdict(dict) + for truth in truths_set: + for state in truth.last_timestamp_generator(): + truth_states_by_time_id[state.timestamp][truth.id] = state - associations: Set[TimeRangeAssociation] = manager.association_set.associations + track_states_by_time_id: StatesFromTimeIdLookup = defaultdict(dict) + for track in tracks_set: + for state in track.last_timestamp_generator(): + track_states_by_time_id[state.timestamp][track.id] = state error_sum = 0.0 num_associated_truth_timestamps = 0 - for association in associations: - - truth, track = self.truth_track_from_association(association) - time_range = association.time_range + for timestamp in unique_timestamps: + matches = matches_at_time_lookup[timestamp] - if isinstance(time_range, CompoundTimeRange): - time_ranges = time_range.time_ranges - else: - time_ranges = [time_range,] + num_associated_truth_timestamps += len(matches) - mask = np.zeros(len(timestamps_as_numpy_array), dtype=bool) - for time_range in time_ranges: - mask = mask | ((timestamps_as_numpy_array >= time_range.start_timestamp) - & (timestamps_as_numpy_array <= time_range.end_timestamp)) + for match in matches: + truth_id = match[0] + track_id = match[1] - timestamps_for_association = timestamps_as_numpy_array[mask] + truth_state_at_t = truth_states_by_time_id[timestamp][truth_id] + track_state_at_t = track_states_by_time_id[timestamp][track_id] - num_associated_truth_timestamps += len(timestamps_for_association) - for t in timestamps_for_association: - truth_state_at_t = truth[t] - track_state_at_t = track[t] error = self.distance_measure(truth_state_at_t, track_state_at_t) error_sum += error + if num_associated_truth_timestamps > 0: return error_sum / num_associated_truth_timestamps else: return float("inf") - def compute_total_number_of_gt_states(self, manager: MultiManager) -> int: - truth_state_set: Set[Track] = manager.states_sets[self.truths_key] - total_number_of_gt_states = sum(len(truth_track) for truth_track in truth_state_set) - return total_number_of_gt_states - - def create_matches_at_time_lookup(self, manager: MultiManager) -> Dict[datetime.datetime, MatchSetAtTimestamp]: - timestamps = manager.list_timestamps(generator=self) - - matches_by_timestamp = defaultdict(set) - - for i, timestamp in enumerate(timestamps): - - associations = manager.association_set.associations_at_timestamp(timestamp) - - for association in associations: - truth, track = self.truth_track_from_association(association) - match_truth_track = (truth.id, track.id) - matches_by_timestamp[timestamp].add(match_truth_track) - return matches_by_timestamp - - def compute_mota(self, manager: MultiManager): + def _compute_mota(self, manager: MultiManager): - timestamps = manager.list_timestamps(generator=self) + unique_timestamps = manager.list_timestamps(generator=self) truth_state_set = manager.states_sets[self.truths_key] tracks_state_set = manager.states_sets[self.tracks_key] @@ -141,15 +121,14 @@ def compute_mota(self, manager: MultiManager): truth_ids_at_time = create_ids_at_time_lookup(truth_state_set) track_ids_at_time = create_ids_at_time_lookup(tracks_state_set) - matches_at_time_lookup = self.create_matches_at_time_lookup(manager) + matches_at_time_lookup = self._create_matches_at_time_lookup(manager) num_misses, num_false_positives, num_miss_matches = 0, 0, 0 - for i, timestamp in enumerate(timestamps): + for i, timestamp in enumerate(unique_timestamps): print(f"i={i}") - # TODO: add lookup here! truths_ids_at_timestamp = truth_ids_at_time[timestamp] tracks_ids_at_timestamp = track_ids_at_time[timestamp] @@ -167,19 +146,41 @@ def compute_mota(self, manager: MultiManager): if i > 0: - matches_prev = matches_at_time_lookup[timestamps[i-1]] + matches_prev = matches_at_time_lookup[unique_timestamps[i-1]] num_miss_matches_current = self._compute_number_of_miss_matches_from_match_sets( matches_prev, matches_current) num_miss_matches += num_miss_matches_current - number_of_gt_states = self.compute_total_number_of_gt_states(manager) + number_of_gt_states = self._compute_total_number_of_gt_states(manager) return 1 - (num_misses + num_false_positives + num_miss_matches)/number_of_gt_states + def _compute_total_number_of_gt_states(self, manager: MultiManager) -> int: + truth_state_set: Set[Track] = manager.states_sets[self.truths_key] + total_number_of_gt_states = sum(len(truth_track) for truth_track in truth_state_set) + return total_number_of_gt_states + + def _create_matches_at_time_lookup(self, manager: MultiManager) \ + -> Dict[datetime.datetime, MatchSetAtTimestamp]: + timestamps = manager.list_timestamps(generator=self) + + matches_by_timestamp = defaultdict(set) + + for i, timestamp in enumerate(timestamps): + + associations = manager.association_set.associations_at_timestamp(timestamp) + + for association in associations: + truth, track = self.truth_track_from_association(association) + match_truth_track = (truth.id, track.id) + matches_by_timestamp[timestamp].add(match_truth_track) + return matches_by_timestamp + def _compute_number_of_miss_matches_from_match_sets(self, matches_prev: MatchSetAtTimestamp, - matches_current: MatchSetAtTimestamp) -> int: + matches_current: MatchSetAtTimestamp)\ + -> int: num_miss_matches_current = 0 matched_truth_ids_prev = {match[0] for match in matches_prev} From 758dfe9fb6a117dd8505fa3957ff8742ed5fa812 Mon Sep 17 00:00:00 2001 From: Marat Kopytjuk Date: Wed, 31 Jul 2024 19:00:40 +0200 Subject: [PATCH 12/27] refactor code --- stonesoup/metricgenerator/clearmotmetrics.py | 24 +++++++++++--------- 1 file changed, 13 insertions(+), 11 deletions(-) diff --git a/stonesoup/metricgenerator/clearmotmetrics.py b/stonesoup/metricgenerator/clearmotmetrics.py index c2eec6d2d..9a6fd53e9 100644 --- a/stonesoup/metricgenerator/clearmotmetrics.py +++ b/stonesoup/metricgenerator/clearmotmetrics.py @@ -53,7 +53,7 @@ def compute_metric(self, manager: MultiManager, **kwargs) -> List[Metric]: timestamps = manager.list_timestamps(generator=self) - motp_score = self._compute_motp_v2(manager) + motp_score = self._compute_motp(manager) mota_score = self._compute_mota(manager) @@ -78,15 +78,11 @@ def _compute_motp(self, manager: MultiManager) -> float: truths_set = manager.states_sets[self.truths_key] tracks_set = manager.states_sets[self.tracks_key] - truth_states_by_time_id: StatesFromTimeIdLookup = defaultdict(dict) - for truth in truths_set: - for state in truth.last_timestamp_generator(): - truth_states_by_time_id[state.timestamp][truth.id] = state + truth_states_by_time_id: StatesFromTimeIdLookup = _create_state_from_time_and_id_lookup( + truths_set) - track_states_by_time_id: StatesFromTimeIdLookup = defaultdict(dict) - for track in tracks_set: - for state in track.last_timestamp_generator(): - track_states_by_time_id[state.timestamp][track.id] = state + track_states_by_time_id: StatesFromTimeIdLookup = _create_state_from_time_and_id_lookup( + tracks_set) error_sum = 0.0 num_associated_truth_timestamps = 0 @@ -127,8 +123,6 @@ def _compute_mota(self, manager: MultiManager): for i, timestamp in enumerate(unique_timestamps): - print(f"i={i}") - truths_ids_at_timestamp = truth_ids_at_time[timestamp] tracks_ids_at_timestamp = track_ids_at_time[timestamp] @@ -238,3 +232,11 @@ def create_ids_at_time_lookup(tracks_set: Set[Union[Track, GroundTruthPath]]) \ track_ids_by_time[state.timestamp].add(track.id) return track_ids_by_time + + +def _create_state_from_time_and_id_lookup(tracks_set: Set[Track]) -> StatesFromTimeIdLookup: + track_states_by_time_id: StatesFromTimeIdLookup = defaultdict(dict) + for track in tracks_set: + for state in track.last_timestamp_generator(): + track_states_by_time_id[state.timestamp][track.id] = state + return track_states_by_time_id From 71ab0ab6ca207b480a4bcf5d6814bb52be5f8b97 Mon Sep 17 00:00:00 2001 From: Marat Kopytjuk Date: Wed, 31 Jul 2024 20:21:19 +0200 Subject: [PATCH 13/27] combine two functions into one --- stonesoup/metricgenerator/clearmotmetrics.py | 58 ++++++-------------- 1 file changed, 18 insertions(+), 40 deletions(-) diff --git a/stonesoup/metricgenerator/clearmotmetrics.py b/stonesoup/metricgenerator/clearmotmetrics.py index 9a6fd53e9..2365a7e7c 100644 --- a/stonesoup/metricgenerator/clearmotmetrics.py +++ b/stonesoup/metricgenerator/clearmotmetrics.py @@ -53,9 +53,7 @@ def compute_metric(self, manager: MultiManager, **kwargs) -> List[Metric]: timestamps = manager.list_timestamps(generator=self) - motp_score = self._compute_motp(manager) - - mota_score = self._compute_mota(manager) + motp_score, mota_score = self._compute_metrics(manager) time_range = TimeRange(min(timestamps), max(timestamps)) @@ -69,8 +67,7 @@ def compute_metric(self, manager: MultiManager, **kwargs) -> List[Metric]: generator=self) return [motp, mota] - def _compute_motp(self, manager: MultiManager) -> float: - + def _compute_metrics(self, manager: MultiManager) -> Tuple[float, float]: unique_timestamps = sorted(manager.list_timestamps(generator=self)) matches_at_time_lookup = self._create_matches_at_time_lookup(manager) @@ -80,19 +77,25 @@ def _compute_motp(self, manager: MultiManager) -> float: truth_states_by_time_id: StatesFromTimeIdLookup = _create_state_from_time_and_id_lookup( truths_set) - track_states_by_time_id: StatesFromTimeIdLookup = _create_state_from_time_and_id_lookup( tracks_set) + truth_ids_at_time = create_ids_at_time_lookup(truths_set) + track_ids_at_time = create_ids_at_time_lookup(tracks_set) + error_sum = 0.0 num_associated_truth_timestamps = 0 - for timestamp in unique_timestamps: - matches = matches_at_time_lookup[timestamp] + num_misses, num_false_positives, num_miss_matches = 0, 0, 0 - num_associated_truth_timestamps += len(matches) + for i, timestamp in enumerate(unique_timestamps): + matches_current = matches_at_time_lookup[timestamp] + matched_truth_ids_curr = {match[0] for match in matches_current} + matched_tracks_at_timestamp = {match[1] for match in matches_current} + + num_associated_truth_timestamps += len(matches_current) - for match in matches: + for match in matches_current: truth_id = match[0] track_id = match[1] @@ -102,34 +105,9 @@ def _compute_motp(self, manager: MultiManager) -> float: error = self.distance_measure(truth_state_at_t, track_state_at_t) error_sum += error - if num_associated_truth_timestamps > 0: - return error_sum / num_associated_truth_timestamps - else: - return float("inf") - - def _compute_mota(self, manager: MultiManager): - - unique_timestamps = manager.list_timestamps(generator=self) - - truth_state_set = manager.states_sets[self.truths_key] - tracks_state_set = manager.states_sets[self.tracks_key] - - truth_ids_at_time = create_ids_at_time_lookup(truth_state_set) - track_ids_at_time = create_ids_at_time_lookup(tracks_state_set) - - matches_at_time_lookup = self._create_matches_at_time_lookup(manager) - - num_misses, num_false_positives, num_miss_matches = 0, 0, 0 - - for i, timestamp in enumerate(unique_timestamps): - truths_ids_at_timestamp = truth_ids_at_time[timestamp] tracks_ids_at_timestamp = track_ids_at_time[timestamp] - matches_current = matches_at_time_lookup[timestamp] - matched_truth_ids_curr = {match[0] for match in matches_current} - matched_tracks_at_timestamp = {match[1] for match in matches_current} - unmatched_truth_ids = list(filter(lambda x: x not in matched_truth_ids_curr, truths_ids_at_timestamp)) num_misses += len(unmatched_truth_ids) @@ -139,17 +117,17 @@ def _compute_mota(self, manager: MultiManager): num_false_positives += len(unmatched_track_ids) if i > 0: - - matches_prev = matches_at_time_lookup[unique_timestamps[i-1]] - + matches_prev = matches_at_time_lookup[unique_timestamps[i - 1]] num_miss_matches_current = self._compute_number_of_miss_matches_from_match_sets( matches_prev, matches_current) - num_miss_matches += num_miss_matches_current number_of_gt_states = self._compute_total_number_of_gt_states(manager) - return 1 - (num_misses + num_false_positives + num_miss_matches)/number_of_gt_states + motp = (error_sum / num_associated_truth_timestamps) if num_associated_truth_timestamps > 0 else float("inf") + mota = 1 - (num_misses + num_false_positives + num_miss_matches) / number_of_gt_states + + return motp, mota def _compute_total_number_of_gt_states(self, manager: MultiManager) -> int: truth_state_set: Set[Track] = manager.states_sets[self.truths_key] From 0d41958499809fda911b81bd36dca59ab75e2a30 Mon Sep 17 00:00:00 2001 From: Marat Kopytjuk Date: Wed, 31 Jul 2024 20:38:35 +0200 Subject: [PATCH 14/27] finalize implementation --- stonesoup/metricgenerator/clearmotmetrics.py | 75 +++++++++++--------- 1 file changed, 43 insertions(+), 32 deletions(-) diff --git a/stonesoup/metricgenerator/clearmotmetrics.py b/stonesoup/metricgenerator/clearmotmetrics.py index 2365a7e7c..f73041598 100644 --- a/stonesoup/metricgenerator/clearmotmetrics.py +++ b/stonesoup/metricgenerator/clearmotmetrics.py @@ -53,7 +53,7 @@ def compute_metric(self, manager: MultiManager, **kwargs) -> List[Metric]: timestamps = manager.list_timestamps(generator=self) - motp_score, mota_score = self._compute_metrics(manager) + motp_score, mota_score = self._compute_mota_and_motp(manager) time_range = TimeRange(min(timestamps), max(timestamps)) @@ -67,68 +67,79 @@ def compute_metric(self, manager: MultiManager, **kwargs) -> List[Metric]: generator=self) return [motp, mota] - def _compute_metrics(self, manager: MultiManager) -> Tuple[float, float]: - unique_timestamps = sorted(manager.list_timestamps(generator=self)) + def _compute_mota_and_motp(self, manager: MultiManager) -> Tuple[float, float]: matches_at_time_lookup = self._create_matches_at_time_lookup(manager) truths_set = manager.states_sets[self.truths_key] tracks_set = manager.states_sets[self.tracks_key] - truth_states_by_time_id: StatesFromTimeIdLookup = _create_state_from_time_and_id_lookup( + truth_states_by_time_and_id: StatesFromTimeIdLookup = _create_state_from_time_and_id_lookup( truths_set) - track_states_by_time_id: StatesFromTimeIdLookup = _create_state_from_time_and_id_lookup( + track_states_by_time_and_id: StatesFromTimeIdLookup = _create_state_from_time_and_id_lookup( tracks_set) - truth_ids_at_time = create_ids_at_time_lookup(truths_set) - track_ids_at_time = create_ids_at_time_lookup(tracks_set) - + # used for the MOTP (avg-distance over matches) error_sum = 0.0 num_associated_truth_timestamps = 0 + # used for the MOTA (1 - number-FPs, ID-changes etc.) num_misses, num_false_positives, num_miss_matches = 0, 0, 0 + unique_timestamps = sorted(manager.list_timestamps(generator=self)) + for i, timestamp in enumerate(unique_timestamps): + matches_current = matches_at_time_lookup[timestamp] + matched_truth_ids_curr = {match[0] for match in matches_current} matched_tracks_at_timestamp = {match[1] for match in matches_current} + # adapt the variables for MOTP calculation + error_sum_in_timestep = self._compute_sum_of_distances_at_timestep( + truth_states_by_time_and_id, track_states_by_time_and_id, timestamp, matches_current) + error_sum += error_sum_in_timestep num_associated_truth_timestamps += len(matches_current) - for match in matches_current: - truth_id = match[0] - track_id = match[1] - - truth_state_at_t = truth_states_by_time_id[timestamp][truth_id] - track_state_at_t = track_states_by_time_id[timestamp][track_id] - - error = self.distance_measure(truth_state_at_t, track_state_at_t) - error_sum += error - - truths_ids_at_timestamp = truth_ids_at_time[timestamp] - tracks_ids_at_timestamp = track_ids_at_time[timestamp] + truths_ids_at_timestamp = truth_states_by_time_and_id[timestamp].keys() + tracks_ids_at_timestamp = track_states_by_time_and_id[timestamp].keys() unmatched_truth_ids = list(filter(lambda x: x not in matched_truth_ids_curr, truths_ids_at_timestamp)) - num_misses += len(unmatched_truth_ids) - unmatched_track_ids = list(filter(lambda x: x not in matched_tracks_at_timestamp, tracks_ids_at_timestamp)) + + # update counter variables used for MOTA + num_misses += len(unmatched_truth_ids) num_false_positives += len(unmatched_track_ids) if i > 0: + # for number of mis-matches (i.e. track ID changes for a single truth track) matches_prev = matches_at_time_lookup[unique_timestamps[i - 1]] num_miss_matches_current = self._compute_number_of_miss_matches_from_match_sets( matches_prev, matches_current) num_miss_matches += num_miss_matches_current - number_of_gt_states = self._compute_total_number_of_gt_states(manager) - motp = (error_sum / num_associated_truth_timestamps) if num_associated_truth_timestamps > 0 else float("inf") + + number_of_gt_states = self._compute_total_number_of_gt_states(manager) mota = 1 - (num_misses + num_false_positives + num_miss_matches) / number_of_gt_states return motp, mota + def _compute_sum_of_distances_at_timestep(self, truth_states_by_time_id, track_states_by_time_id, timestamp, matches_current): + error_sum_in_timestep = 0.0 + for match in matches_current: + truth_id = match[0] + track_id = match[1] + + truth_state_at_t = truth_states_by_time_id[timestamp][truth_id] + track_state_at_t = track_states_by_time_id[timestamp][track_id] + + error = self.distance_measure(truth_state_at_t, track_state_at_t) + error_sum_in_timestep += error + return error_sum_in_timestep + def _compute_total_number_of_gt_states(self, manager: MultiManager) -> int: truth_state_set: Set[Track] = manager.states_sets[self.truths_key] total_number_of_gt_states = sum(len(truth_track) for truth_track in truth_state_set) @@ -201,6 +212,14 @@ def truth_track_from_association(association) -> Tuple[Track, Track]: return truth, track +def _create_state_from_time_and_id_lookup(tracks_set: Set[Union[Track, GroundTruthPath]]) -> StatesFromTimeIdLookup: + track_states_by_time_id: StatesFromTimeIdLookup = defaultdict(dict) + for track in tracks_set: + for state in track.last_timestamp_generator(): + track_states_by_time_id[state.timestamp][track.id] = state + return track_states_by_time_id + + def create_ids_at_time_lookup(tracks_set: Set[Union[Track, GroundTruthPath]]) \ -> Dict[datetime.datetime, Set[str]]: @@ -210,11 +229,3 @@ def create_ids_at_time_lookup(tracks_set: Set[Union[Track, GroundTruthPath]]) \ track_ids_by_time[state.timestamp].add(track.id) return track_ids_by_time - - -def _create_state_from_time_and_id_lookup(tracks_set: Set[Track]) -> StatesFromTimeIdLookup: - track_states_by_time_id: StatesFromTimeIdLookup = defaultdict(dict) - for track in tracks_set: - for state in track.last_timestamp_generator(): - track_states_by_time_id[state.timestamp][track.id] = state - return track_states_by_time_id From af0143e2b688e4f6c6b2394d4dd4f4be01cc907c Mon Sep 17 00:00:00 2001 From: Marat Kopytjuk Date: Wed, 31 Jul 2024 20:44:43 +0200 Subject: [PATCH 15/27] fix linting --- stonesoup/metricgenerator/clearmotmetrics.py | 23 ++++++++++++------- .../tests/test_clearmotmetrics.py | 8 ++++--- 2 files changed, 20 insertions(+), 11 deletions(-) diff --git a/stonesoup/metricgenerator/clearmotmetrics.py b/stonesoup/metricgenerator/clearmotmetrics.py index f73041598..8c50e9afe 100644 --- a/stonesoup/metricgenerator/clearmotmetrics.py +++ b/stonesoup/metricgenerator/clearmotmetrics.py @@ -74,10 +74,10 @@ def _compute_mota_and_motp(self, manager: MultiManager) -> Tuple[float, float]: truths_set = manager.states_sets[self.truths_key] tracks_set = manager.states_sets[self.tracks_key] - truth_states_by_time_and_id: StatesFromTimeIdLookup = _create_state_from_time_and_id_lookup( - truths_set) - track_states_by_time_and_id: StatesFromTimeIdLookup = _create_state_from_time_and_id_lookup( - tracks_set) + truth_states_by_time_and_id: StatesFromTimeIdLookup = \ + _create_state_from_time_and_id_lookup(truths_set) + track_states_by_time_and_id: StatesFromTimeIdLookup = \ + _create_state_from_time_and_id_lookup(tracks_set) # used for the MOTP (avg-distance over matches) error_sum = 0.0 @@ -97,7 +97,8 @@ def _compute_mota_and_motp(self, manager: MultiManager) -> Tuple[float, float]: # adapt the variables for MOTP calculation error_sum_in_timestep = self._compute_sum_of_distances_at_timestep( - truth_states_by_time_and_id, track_states_by_time_and_id, timestamp, matches_current) + truth_states_by_time_and_id, track_states_by_time_and_id, timestamp, + matches_current) error_sum += error_sum_in_timestep num_associated_truth_timestamps += len(matches_current) @@ -120,14 +121,19 @@ def _compute_mota_and_motp(self, manager: MultiManager) -> Tuple[float, float]: matches_prev, matches_current) num_miss_matches += num_miss_matches_current - motp = (error_sum / num_associated_truth_timestamps) if num_associated_truth_timestamps > 0 else float("inf") + motp = (error_sum / num_associated_truth_timestamps) \ + if num_associated_truth_timestamps > 0 else float("inf") number_of_gt_states = self._compute_total_number_of_gt_states(manager) mota = 1 - (num_misses + num_false_positives + num_miss_matches) / number_of_gt_states return motp, mota - def _compute_sum_of_distances_at_timestep(self, truth_states_by_time_id, track_states_by_time_id, timestamp, matches_current): + def _compute_sum_of_distances_at_timestep(self, + truth_states_by_time_id: StatesFromTimeIdLookup, + track_states_by_time_id: StatesFromTimeIdLookup, + timestamp: datetime.datetime, + matches_current: MatchSetAtTimestamp): error_sum_in_timestep = 0.0 for match in matches_current: truth_id = match[0] @@ -212,7 +218,8 @@ def truth_track_from_association(association) -> Tuple[Track, Track]: return truth, track -def _create_state_from_time_and_id_lookup(tracks_set: Set[Union[Track, GroundTruthPath]]) -> StatesFromTimeIdLookup: +def _create_state_from_time_and_id_lookup(tracks_set: Set[Union[Track, GroundTruthPath]]) \ + -> StatesFromTimeIdLookup: track_states_by_time_id: StatesFromTimeIdLookup = defaultdict(dict) for track in tracks_set: for state in track.last_timestamp_generator(): diff --git a/stonesoup/metricgenerator/tests/test_clearmotmetrics.py b/stonesoup/metricgenerator/tests/test_clearmotmetrics.py index 96dc9c68e..9d6e25153 100644 --- a/stonesoup/metricgenerator/tests/test_clearmotmetrics.py +++ b/stonesoup/metricgenerator/tests/test_clearmotmetrics.py @@ -13,7 +13,7 @@ def test_clearmot_simple(trial_truths, trial_tracks, trial_timestamps): - """We test the most obvious scenario, where we have a single truth track and + """We test the most obvious scenario, where we have a single truth track and a single estimated track. They are both associated for the full time extent of the experiment. """ trial_manager = MultiManager() @@ -86,7 +86,8 @@ def test_clearmot_with_false_positives(trial_truths, trial_tracks, trial_timesta assert mota == pytest.approx(expected_mota) -def test_clearmot_with_false_positives_and_miss_matches(trial_truths, trial_tracks, trial_timestamps, time_period): +def test_clearmot_with_false_positives_and_miss_matches(trial_truths, trial_tracks, + trial_timestamps, time_period): """TODO """ trial_manager = MultiManager() @@ -100,7 +101,8 @@ def test_clearmot_with_false_positives_and_miss_matches(trial_truths, trial_trac trial_associations = AssociationSet({ TimeRangeAssociation(objects={trial_truths[0], track_part_a}, - time_range=TimeRange(trial_timestamps[0], cut_timestamp - time_period)), + time_range=TimeRange(trial_timestamps[0], cut_timestamp - time_period) + ), TimeRangeAssociation(objects={trial_truths[0], track_part_b}, time_range=TimeRange(cut_timestamp, trial_timestamps[-1])), }) From 74baf56e85a3ed19d6d60dda0b04448602266025 Mon Sep 17 00:00:00 2001 From: Marat Kopytjuk Date: Wed, 31 Jul 2024 20:50:58 +0200 Subject: [PATCH 16/27] document tests --- .../tests/test_clearmotmetrics.py | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) diff --git a/stonesoup/metricgenerator/tests/test_clearmotmetrics.py b/stonesoup/metricgenerator/tests/test_clearmotmetrics.py index 9d6e25153..4cf14960e 100644 --- a/stonesoup/metricgenerator/tests/test_clearmotmetrics.py +++ b/stonesoup/metricgenerator/tests/test_clearmotmetrics.py @@ -32,12 +32,11 @@ def test_clearmot_simple(trial_truths, trial_tracks, trial_timestamps): clearmot_generator = ClearMotMetrics(distance_measure=position_measure) trial_manager.generators = [clearmot_generator] + metrics = clearmot_generator.compute_metric(trial_manager) dx = dy = 0.1 expected_avg_pos_accuracy = np.sqrt(dx ** 2 + dy ** 2) - metrics = clearmot_generator.compute_metric(trial_manager) - motp = metrics[0].value assert motp == pytest.approx(expected_avg_pos_accuracy) @@ -50,7 +49,8 @@ def test_clearmot_simple(trial_truths, trial_tracks, trial_timestamps): def test_clearmot_with_false_positives(trial_truths, trial_tracks, trial_timestamps): - """TODO + """Test with a single truth track and two hypothesis tracks, where the second track is + not assigned, i.e. causing false positives over its lifetime """ trial_manager = MultiManager() trial_manager.add_data({'groundtruth_paths': trial_truths[:1], @@ -69,11 +69,11 @@ def test_clearmot_with_false_positives(trial_truths, trial_tracks, trial_timesta trial_manager.generators = [clearmot_generator] + metrics = clearmot_generator.compute_metric(trial_manager) + dx = dy = 0.1 expected_avg_pos_accuracy = np.sqrt(dx ** 2 + dy ** 2) - metrics = clearmot_generator.compute_metric(trial_manager) - motp = metrics[0].value assert motp == pytest.approx(expected_avg_pos_accuracy) @@ -88,7 +88,10 @@ def test_clearmot_with_false_positives(trial_truths, trial_tracks, trial_timesta def test_clearmot_with_false_positives_and_miss_matches(trial_truths, trial_tracks, trial_timestamps, time_period): - """TODO + """Test with a single truth track and 3 hypothesis tracks, where: + - the first and second track are assigned to the truth track, but have different IDs over + different periods of time, caussing an ID-mismatch + - the third track is track is not assigned, i.e. causing false positives over its lifetime """ trial_manager = MultiManager() @@ -113,11 +116,11 @@ def test_clearmot_with_false_positives_and_miss_matches(trial_truths, trial_trac trial_manager.generators = [clearmot_generator] + metrics = clearmot_generator.compute_metric(trial_manager) + dx = dy = 0.1 expected_avg_pos_accuracy = np.sqrt(dx ** 2 + dy ** 2) - metrics = clearmot_generator.compute_metric(trial_manager) - motp = metrics[0].value assert motp == pytest.approx(expected_avg_pos_accuracy) From ea3edb362415d62366ffc2068e8a2f2c394ee08e Mon Sep 17 00:00:00 2001 From: Marat Kopytjuk Date: Wed, 31 Jul 2024 20:58:22 +0200 Subject: [PATCH 17/27] remove whitespace at the top of tests --- stonesoup/metricgenerator/tests/test_clearmotmetrics.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/stonesoup/metricgenerator/tests/test_clearmotmetrics.py b/stonesoup/metricgenerator/tests/test_clearmotmetrics.py index 4cf14960e..a2dd88d3c 100644 --- a/stonesoup/metricgenerator/tests/test_clearmotmetrics.py +++ b/stonesoup/metricgenerator/tests/test_clearmotmetrics.py @@ -1,5 +1,3 @@ - - import numpy as np import pytest From 9fde5fc163801837f5ed5c3d84e0fe1ced0410d2 Mon Sep 17 00:00:00 2001 From: Marat Kopytjuk Date: Wed, 31 Jul 2024 20:59:30 +0200 Subject: [PATCH 18/27] clean up comments --- stonesoup/metricgenerator/tests/test_clearmotmetrics.py | 1 - 1 file changed, 1 deletion(-) diff --git a/stonesoup/metricgenerator/tests/test_clearmotmetrics.py b/stonesoup/metricgenerator/tests/test_clearmotmetrics.py index a2dd88d3c..72206f178 100644 --- a/stonesoup/metricgenerator/tests/test_clearmotmetrics.py +++ b/stonesoup/metricgenerator/tests/test_clearmotmetrics.py @@ -18,7 +18,6 @@ def test_clearmot_simple(trial_truths, trial_tracks, trial_timestamps): trial_manager.add_data({'groundtruth_paths': trial_truths[:1], 'tracks': trial_tracks[:1]}) - # we test the mo trial_associations = AssociationSet({ # association for full time range (4 timesteps) TimeRangeAssociation(objects={trial_truths[0], trial_tracks[0]}, From d5aff35a53b2601ab29f41c4789cb5851fcfbe7d Mon Sep 17 00:00:00 2001 From: Marat Kopytjuk Date: Sat, 3 Aug 2024 10:28:27 +0200 Subject: [PATCH 19/27] improve and test interface --- stonesoup/metricgenerator/clearmotmetrics.py | 51 +++++++++++++++---- stonesoup/metricgenerator/tests/conftest.py | 2 +- .../tests/test_clearmotmetrics.py | 48 +++++++++-------- 3 files changed, 69 insertions(+), 32 deletions(-) diff --git a/stonesoup/metricgenerator/clearmotmetrics.py b/stonesoup/metricgenerator/clearmotmetrics.py index 8c50e9afe..3c918aa4a 100644 --- a/stonesoup/metricgenerator/clearmotmetrics.py +++ b/stonesoup/metricgenerator/clearmotmetrics.py @@ -12,7 +12,7 @@ from .base import MetricGenerator from .manager import MultiManager -MatchSetAtTimestamp = Set[Tuple[str, str]] +MatchSetAtTimestamp = Set[Tuple[str, str]] # tuples of (truth, track) StatesFromTimeIdLookup = Dict[datetime.datetime, Dict[str, State]] @@ -71,6 +71,8 @@ def _compute_mota_and_motp(self, manager: MultiManager) -> Tuple[float, float]: matches_at_time_lookup = self._create_matches_at_time_lookup(manager) + check_matches_for_metric_calculation(matches_at_time_lookup) + truths_set = manager.states_sets[self.truths_key] tracks_set = manager.states_sets[self.tracks_key] @@ -133,7 +135,7 @@ def _compute_sum_of_distances_at_timestep(self, truth_states_by_time_id: StatesFromTimeIdLookup, track_states_by_time_id: StatesFromTimeIdLookup, timestamp: datetime.datetime, - matches_current: MatchSetAtTimestamp): + matches_current: MatchSetAtTimestamp) -> float: error_sum_in_timestep = 0.0 for match in matches_current: truth_id = match[0] @@ -167,7 +169,8 @@ def _create_matches_at_time_lookup(self, manager: MultiManager) \ matches_by_timestamp[timestamp].add(match_truth_track) return matches_by_timestamp - def _compute_number_of_miss_matches_from_match_sets(self, matches_prev: MatchSetAtTimestamp, + def _compute_number_of_miss_matches_from_match_sets(self, + matches_prev: MatchSetAtTimestamp, matches_current: MatchSetAtTimestamp)\ -> int: num_miss_matches_current = 0 @@ -182,14 +185,6 @@ def _compute_number_of_miss_matches_from_match_sets(self, matches_prev: MatchSet cur_matches_with_truth_id = list( filter(lambda match: match[0] == truth_id, matches_current)) - # if len(prev_matches_with_truth_id) > 1: - # warnings.warn("More than one track per truth is not supported!") - # continue - - # if len(cur_matches_with_truth_id) > 1: - # warnings.warn("More than one track per truth is not supported!") - # continue - matched_track_id_prev = prev_matches_with_truth_id[0][1] matched_track_id_curr = cur_matches_with_truth_id[0][1] @@ -236,3 +231,37 @@ def create_ids_at_time_lookup(tracks_set: Set[Union[Track, GroundTruthPath]]) \ track_ids_by_time[state.timestamp].add(track.id) return track_ids_by_time + + +class AssociationSetNotValid(Exception): + pass + + +def check_matches_for_metric_calculation(matches_by_timestamp: + Dict[datetime.datetime, MatchSetAtTimestamp]): + """Checks the matches prior to computing CLEAR MOT metrics. If this function returns + without raising an exception, it is checked that a single track is associated with one truth + (one-2-one relationship) at a given timestep and vice versa. + + Parameters + ---------- + matches_by_timestamp: Dict[datetime.datetime, MatchSetAtTimestamp] + Dictionary which returns a set of (truth, track) matches for a given timestamp. + + Raises + ------ + AssociationSetNotValid + """ + + for t, matches in matches_by_timestamp.items(): + truth_ids = [m[0] for m in matches] + if len(truth_ids) > len(set(truth_ids)): + raise AssociationSetNotValid(f"Multiple tracks are assigned with " + f"the same truth track at time {t}!" + " Resolve this ambiguity in order to continue.") + + track_ids = [m[1] for m in matches] + if len(track_ids) > len(set(track_ids)): + raise AssociationSetNotValid(f"A single track is assigned with " + f"multiple truth tracks at time {t}!" + " Resolve this ambiguity in order to continue.") diff --git a/stonesoup/metricgenerator/tests/conftest.py b/stonesoup/metricgenerator/tests/conftest.py index 3e6d35d2f..9cabc3be0 100644 --- a/stonesoup/metricgenerator/tests/conftest.py +++ b/stonesoup/metricgenerator/tests/conftest.py @@ -27,7 +27,7 @@ def time_period() -> timedelta: @pytest.fixture() def trial_timestamps(time_period: timedelta): - now = datetime.now() + now = datetime(2024, 1, 1, 0, 0, 0) return [now + i*time_period for i in range(4)] diff --git a/stonesoup/metricgenerator/tests/test_clearmotmetrics.py b/stonesoup/metricgenerator/tests/test_clearmotmetrics.py index 72206f178..94121e4b2 100644 --- a/stonesoup/metricgenerator/tests/test_clearmotmetrics.py +++ b/stonesoup/metricgenerator/tests/test_clearmotmetrics.py @@ -7,7 +7,7 @@ from ...types.metric import TimeRangeMetric from ...types.time import TimeRange from ...types.track import Track -from ..clearmotmetrics import ClearMotMetrics +from ..clearmotmetrics import AssociationSetNotValid, ClearMotMetrics def test_clearmot_simple(trial_truths, trial_tracks, trial_timestamps): @@ -31,6 +31,8 @@ def test_clearmot_simple(trial_truths, trial_tracks, trial_timestamps): trial_manager.generators = [clearmot_generator] metrics = clearmot_generator.compute_metric(trial_manager) + _check_metric_interface(trial_manager, clearmot_generator, metrics) + dx = dy = 0.1 expected_avg_pos_accuracy = np.sqrt(dx ** 2 + dy ** 2) @@ -45,6 +47,25 @@ def test_clearmot_simple(trial_truths, trial_tracks, trial_timestamps): assert mota == pytest.approx(expected_mota) +def _check_metric_interface(trial_manager, clearmot_generator, metrics): + expected_titles = ["MOTP", "MOTA"] + + # make sure that the titles are correct + returned_metric_titles = [metric.title for metric in metrics] + assert len(expected_titles) == len(returned_metric_titles) + assert set(expected_titles) == set(returned_metric_titles) + + timestamps = trial_manager.list_timestamps(clearmot_generator) + + for metric in metrics: + assert isinstance(metric, TimeRangeMetric) + assert metric.time_range.start == timestamps[0] + assert metric.time_range.end == timestamps[-1] + assert metric.generator == clearmot_generator + + assert isinstance(metric.value, (float, float)) + + def test_clearmot_with_false_positives(trial_truths, trial_tracks, trial_timestamps): """Test with a single truth track and two hypothesis tracks, where the second track is not assigned, i.e. causing false positives over its lifetime @@ -132,27 +153,14 @@ def test_clearmot_with_false_positives_and_miss_matches(trial_truths, trial_trac assert mota == pytest.approx(expected_mota) -def test_clearmot_interface(trial_manager, trial_truths, trial_tracks, trial_associations): +def test_clearmot_match_check(trial_manager): + """Since there are multiple tracks assigned with the truth at the second timestep, + and CLEAR MOT does not support that, we raise an exception. This test checks that. + """ position_measure = Euclidean((0, 2)) clearmot_generator = ClearMotMetrics(distance_measure=position_measure) trial_manager.generators = [clearmot_generator] - # Test compute_metric - metrics = clearmot_generator.compute_metric(trial_manager) - expected_titles = ["MOTP", "MOTA"] - - # make sure that the titles are correct - returned_metric_titles = [metric.title for metric in metrics] - assert len(expected_titles) == len(returned_metric_titles) - assert set(expected_titles) == set(returned_metric_titles) - - timestamps = trial_manager.list_timestamps(clearmot_generator) - - for metric in metrics: - assert isinstance(metric, TimeRangeMetric) - assert metric.time_range.start == timestamps[0] - assert metric.time_range.end == timestamps[-1] - assert metric.generator == clearmot_generator - - assert isinstance(metric.value, (float, int)) + with pytest.raises(AssociationSetNotValid): + _ = clearmot_generator.compute_metric(trial_manager) From d09339f2073ae35c1efac0ed4a7b26ca6c6f3b35 Mon Sep 17 00:00:00 2001 From: Marat Kopytjuk Date: Sat, 3 Aug 2024 10:30:00 +0200 Subject: [PATCH 20/27] remove unused function --- stonesoup/metricgenerator/clearmotmetrics.py | 11 ----------- 1 file changed, 11 deletions(-) diff --git a/stonesoup/metricgenerator/clearmotmetrics.py b/stonesoup/metricgenerator/clearmotmetrics.py index 3c918aa4a..d578c2b6d 100644 --- a/stonesoup/metricgenerator/clearmotmetrics.py +++ b/stonesoup/metricgenerator/clearmotmetrics.py @@ -222,17 +222,6 @@ def _create_state_from_time_and_id_lookup(tracks_set: Set[Union[Track, GroundTru return track_states_by_time_id -def create_ids_at_time_lookup(tracks_set: Set[Union[Track, GroundTruthPath]]) \ - -> Dict[datetime.datetime, Set[str]]: - - track_ids_by_time = defaultdict(set) - for track in tracks_set: - for state in track.last_timestamp_generator(): - track_ids_by_time[state.timestamp].add(track.id) - - return track_ids_by_time - - class AssociationSetNotValid(Exception): pass From 4f922afcf2df74a57c0b2e064d1ae888d04eea36 Mon Sep 17 00:00:00 2001 From: Marat Kopytjuk Date: Sat, 3 Aug 2024 10:37:46 +0200 Subject: [PATCH 21/27] add more documentation --- stonesoup/metricgenerator/clearmotmetrics.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/stonesoup/metricgenerator/clearmotmetrics.py b/stonesoup/metricgenerator/clearmotmetrics.py index d578c2b6d..a7ffb29ff 100644 --- a/stonesoup/metricgenerator/clearmotmetrics.py +++ b/stonesoup/metricgenerator/clearmotmetrics.py @@ -21,7 +21,12 @@ class ClearMotMetrics(MetricGenerator): Computes multi-object tracking (MOT) metrics designed for the classification of events, activities, and relationships (CLEAR) evaluation workshops. The implementation provided here - is derived from [1] and focuses on providing the MOTP and MOTP scores. + is derived from [1] and focuses on providing the MOTP (precision) and MOTA (accuracy) scores: + + - MOTP: average distance between all associated truth and track states. The target score is 0. + - MOTA: 1 - ratio of the number of misses, false positives, and mismatches (ID-switches) + relative to the total number of truth states. The target score is 1. This score can become + negative with a higher number of errors. Reference [1] Evaluating Multiple Object Tracking Performance: The CLEAR MOT Metrics, @@ -35,7 +40,7 @@ class ClearMotMetrics(MetricGenerator): default='groundtruth_paths') distance_measure: Measure = Property( - doc="Distance measure used in calculating position accuracy scores.") + doc="Distance measure used in calculating the MOTP score.") def compute_metric(self, manager: MultiManager, **kwargs) -> List[Metric]: """Compute MOTP and MOTA metrics for a given time-period covered by truths and the tracks. From 5da0cf1273130e7b74707ac12daed19117bda553 Mon Sep 17 00:00:00 2001 From: Marat Kopytjuk Date: Sat, 3 Aug 2024 10:38:02 +0200 Subject: [PATCH 22/27] remove whitespaces --- stonesoup/metricgenerator/clearmotmetrics.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stonesoup/metricgenerator/clearmotmetrics.py b/stonesoup/metricgenerator/clearmotmetrics.py index a7ffb29ff..ba89c2fb4 100644 --- a/stonesoup/metricgenerator/clearmotmetrics.py +++ b/stonesoup/metricgenerator/clearmotmetrics.py @@ -25,7 +25,7 @@ class ClearMotMetrics(MetricGenerator): - MOTP: average distance between all associated truth and track states. The target score is 0. - MOTA: 1 - ratio of the number of misses, false positives, and mismatches (ID-switches) - relative to the total number of truth states. The target score is 1. This score can become + relative to the total number of truth states. The target score is 1. This score can become negative with a higher number of errors. Reference From fe950749399655824a62f04d6036ad7a83b72005 Mon Sep 17 00:00:00 2001 From: Marat Kopytjuk Date: Sat, 3 Aug 2024 10:40:34 +0200 Subject: [PATCH 23/27] better comments --- stonesoup/metricgenerator/clearmotmetrics.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/stonesoup/metricgenerator/clearmotmetrics.py b/stonesoup/metricgenerator/clearmotmetrics.py index ba89c2fb4..77ff0cf84 100644 --- a/stonesoup/metricgenerator/clearmotmetrics.py +++ b/stonesoup/metricgenerator/clearmotmetrics.py @@ -86,11 +86,11 @@ def _compute_mota_and_motp(self, manager: MultiManager) -> Tuple[float, float]: track_states_by_time_and_id: StatesFromTimeIdLookup = \ _create_state_from_time_and_id_lookup(tracks_set) - # used for the MOTP (avg-distance over matches) + # used for the MOTP (avg-distance over truth-track associations) error_sum = 0.0 num_associated_truth_timestamps = 0 - # used for the MOTA (1 - number-FPs, ID-changes etc.) + # used for the MOTA (1 - number-FPs, ID-changes etc. / number-GT-states) num_misses, num_false_positives, num_miss_matches = 0, 0, 0 unique_timestamps = sorted(manager.list_timestamps(generator=self)) @@ -102,7 +102,7 @@ def _compute_mota_and_motp(self, manager: MultiManager) -> Tuple[float, float]: matched_truth_ids_curr = {match[0] for match in matches_current} matched_tracks_at_timestamp = {match[1] for match in matches_current} - # adapt the variables for MOTP calculation + # update the variables for MOTP calculation error_sum_in_timestep = self._compute_sum_of_distances_at_timestep( truth_states_by_time_and_id, track_states_by_time_and_id, timestamp, matches_current) From 0aa36db525070640c0bd23c3ce00060e6f8f35f4 Mon Sep 17 00:00:00 2001 From: Marat Kopytjuk Date: Fri, 6 Sep 2024 21:18:58 +0200 Subject: [PATCH 24/27] refactor code wrt PR comments --- stonesoup/metricgenerator/clearmotmetrics.py | 17 ++++++----------- 1 file changed, 6 insertions(+), 11 deletions(-) diff --git a/stonesoup/metricgenerator/clearmotmetrics.py b/stonesoup/metricgenerator/clearmotmetrics.py index 77ff0cf84..9f778bf6e 100644 --- a/stonesoup/metricgenerator/clearmotmetrics.py +++ b/stonesoup/metricgenerator/clearmotmetrics.py @@ -112,10 +112,8 @@ def _compute_mota_and_motp(self, manager: MultiManager) -> Tuple[float, float]: truths_ids_at_timestamp = truth_states_by_time_and_id[timestamp].keys() tracks_ids_at_timestamp = track_states_by_time_and_id[timestamp].keys() - unmatched_truth_ids = list(filter(lambda x: x not in matched_truth_ids_curr, - truths_ids_at_timestamp)) - unmatched_track_ids = list(filter(lambda x: x not in matched_tracks_at_timestamp, - tracks_ids_at_timestamp)) + unmatched_truth_ids = truths_ids_at_timestamp - matched_truth_ids_curr + unmatched_track_ids = tracks_ids_at_timestamp - matched_tracks_at_timestamp # update counter variables used for MOTA num_misses += len(unmatched_truth_ids) @@ -185,13 +183,10 @@ def _compute_number_of_miss_matches_from_match_sets(self, truths_ids_at_both_timestamps = matched_truth_ids_prev & matched_truth_ids_curr for truth_id in truths_ids_at_both_timestamps: - prev_matches_with_truth_id = list( - filter(lambda match: match[0] == truth_id, matches_prev)) - cur_matches_with_truth_id = list( - filter(lambda match: match[0] == truth_id, matches_current)) - - matched_track_id_prev = prev_matches_with_truth_id[0][1] - matched_track_id_curr = cur_matches_with_truth_id[0][1] + matched_track_id_prev = next( + match[1] for match in matches_prev if match[0] == truth_id) + matched_track_id_curr = next( + match[1] for match in matches_current if match[0] == truth_id) if matched_track_id_prev != matched_track_id_curr: num_miss_matches_current += 1 From 4612725f1db414435c418ca2859ce32822ef0abf Mon Sep 17 00:00:00 2001 From: Marat Kopytjuk Date: Fri, 6 Sep 2024 21:49:44 +0200 Subject: [PATCH 25/27] adapt metricgenerator docs wrt clearmot --- docs/source/stonesoup.metricgenerator.clearmotmetrics.rst | 5 +++++ docs/source/stonesoup.metricgenerator.rst | 1 + 2 files changed, 6 insertions(+) create mode 100644 docs/source/stonesoup.metricgenerator.clearmotmetrics.rst diff --git a/docs/source/stonesoup.metricgenerator.clearmotmetrics.rst b/docs/source/stonesoup.metricgenerator.clearmotmetrics.rst new file mode 100644 index 000000000..01698ff3d --- /dev/null +++ b/docs/source/stonesoup.metricgenerator.clearmotmetrics.rst @@ -0,0 +1,5 @@ +Track-to-truth metrics +====================== + +.. automodule:: stonesoup.metricgenerator.clearmotmetrics + :show-inheritance: \ No newline at end of file diff --git a/docs/source/stonesoup.metricgenerator.rst b/docs/source/stonesoup.metricgenerator.rst index 59856208c..ba6729b1e 100644 --- a/docs/source/stonesoup.metricgenerator.rst +++ b/docs/source/stonesoup.metricgenerator.rst @@ -9,6 +9,7 @@ Metric Generators stonesoup.metricgenerator.pcrbmetric stonesoup.metricgenerator.uncertaintymetric stonesoup.metricgenerator.plotter + stonesoup.metricgenerator.clearmotmetrics .. automodule:: stonesoup.metricgenerator :no-members: From ae0bae595e7697171ab33ea867647dfe15ee4c20 Mon Sep 17 00:00:00 2001 From: Marat Kopytjuk Date: Fri, 6 Sep 2024 22:21:58 +0200 Subject: [PATCH 26/27] make docs more pretty --- ...onesoup.metricgenerator.clearmotmetrics.rst | 4 ++-- stonesoup/metricgenerator/clearmotmetrics.py | 18 +++++++++--------- 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/docs/source/stonesoup.metricgenerator.clearmotmetrics.rst b/docs/source/stonesoup.metricgenerator.clearmotmetrics.rst index 01698ff3d..b0198f992 100644 --- a/docs/source/stonesoup.metricgenerator.clearmotmetrics.rst +++ b/docs/source/stonesoup.metricgenerator.clearmotmetrics.rst @@ -1,5 +1,5 @@ -Track-to-truth metrics -====================== +CLEAR MOT Metrics +================= .. automodule:: stonesoup.metricgenerator.clearmotmetrics :show-inheritance: \ No newline at end of file diff --git a/stonesoup/metricgenerator/clearmotmetrics.py b/stonesoup/metricgenerator/clearmotmetrics.py index 9f778bf6e..3edbbe600 100644 --- a/stonesoup/metricgenerator/clearmotmetrics.py +++ b/stonesoup/metricgenerator/clearmotmetrics.py @@ -17,20 +17,20 @@ class ClearMotMetrics(MetricGenerator): - """CLEAR MOT Metrics + """CLEAR MOT metrics Computes multi-object tracking (MOT) metrics designed for the classification of events, - activities, and relationships (CLEAR) evaluation workshops. The implementation provided here - is derived from [1] and focuses on providing the MOTP (precision) and MOTA (accuracy) scores: + activities, and relationships (CLEAR) evaluation workshops. The implementation here + is derived from [1] and provides following metrics: - - MOTP: average distance between all associated truth and track states. The target score is 0. - - MOTA: 1 - ratio of the number of misses, false positives, and mismatches (ID-switches) - relative to the total number of truth states. The target score is 1. This score can become - negative with a higher number of errors. + * MOTP (precision): average distance between all associated truth and track states. The target score is 0. + * MOTA (accuracy): 1 - ratio of the number of misses, false positives, and mismatches (ID-switches) + relative to the total number of truth states. The target score is 1. This score can become + negative with a higher number of errors. - Reference + Reference: [1] Evaluating Multiple Object Tracking Performance: The CLEAR MOT Metrics, - Bernardin et al, 2008 + Bernardin et al, 2008 """ tracks_key: str = Property(doc='Key to access set of tracks added to MetricManager', default='tracks') From 14b04e0efa775da08e4ed8f9a2a4097cb743fb4b Mon Sep 17 00:00:00 2001 From: Marat Kopytjuk Date: Fri, 6 Sep 2024 22:25:32 +0200 Subject: [PATCH 27/27] fix flake8 linting errors --- stonesoup/metricgenerator/clearmotmetrics.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/stonesoup/metricgenerator/clearmotmetrics.py b/stonesoup/metricgenerator/clearmotmetrics.py index 3edbbe600..2689e4bc1 100644 --- a/stonesoup/metricgenerator/clearmotmetrics.py +++ b/stonesoup/metricgenerator/clearmotmetrics.py @@ -23,10 +23,11 @@ class ClearMotMetrics(MetricGenerator): activities, and relationships (CLEAR) evaluation workshops. The implementation here is derived from [1] and provides following metrics: - * MOTP (precision): average distance between all associated truth and track states. The target score is 0. - * MOTA (accuracy): 1 - ratio of the number of misses, false positives, and mismatches (ID-switches) - relative to the total number of truth states. The target score is 1. This score can become - negative with a higher number of errors. + * MOTP (precision): average distance between all associated truth and track states. + The target score is 0. + * MOTA (accuracy): 1 - ratio of the number of misses, false positives, and mismatches + (ID-switches)relative to the total number of truth states. The target score is 1. + This score can become negative with a higher number of errors. Reference: [1] Evaluating Multiple Object Tracking Performance: The CLEAR MOT Metrics,