From d3c34f3734b1a8efeab3e9d15172574741cde471 Mon Sep 17 00:00:00 2001 From: Isaac-JenkinsRA Date: Thu, 15 Sep 2022 17:11:00 +0100 Subject: [PATCH 01/11] Add Multi-model and Raoblackwellised versions of particle filter This is a squash and clean up of commits from pull request #271, adapted to work with changes on the `main` branch. --- LICENSE | 1 + stonesoup/predictor/particle.py | 168 ++++++++++++++ stonesoup/predictor/tests/test_multimodel.py | 140 ++++++++++++ stonesoup/resampler/particle.py | 81 +++++++ stonesoup/types/particle.py | 36 ++- stonesoup/types/state.py | 9 +- stonesoup/updater/particle.py | 209 +++++++++++++++++- .../updater/tests/test_raoblackwellised.py | 51 +++++ 8 files changed, 687 insertions(+), 8 deletions(-) create mode 100644 stonesoup/predictor/tests/test_multimodel.py create mode 100644 stonesoup/updater/tests/test_raoblackwellised.py diff --git a/LICENSE b/LICENSE index 6c3cb1e7b..787356517 100644 --- a/LICENSE +++ b/LICENSE @@ -4,6 +4,7 @@ MIT License © Crown Copyright 2018-2022 Defence Research and Development Canada / Recherche et développement pour la défense Canada © Copyright 2018-2022 University of Liverpool UK © Copyright 2020-2022 John Hiles +© Copyright 2020-2022 Riskaware Ltd Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/stonesoup/predictor/particle.py b/stonesoup/predictor/particle.py index fc2f5fdb8..cd165938b 100644 --- a/stonesoup/predictor/particle.py +++ b/stonesoup/predictor/particle.py @@ -1,9 +1,16 @@ +import copy +from typing import Sequence + +import numpy as np + from .base import Predictor from ._utils import predict_lru_cache from .kalman import KalmanPredictor, ExtendedKalmanPredictor from ..base import Property +from ..models.transition import TransitionModel from ..types.prediction import Prediction from ..types.state import GaussianState +from ..types.particle import MultiModelParticle, RaoBlackwellisedParticle class ParticlePredictor(Predictor): @@ -91,3 +98,164 @@ def predict(self, prior, *args, **kwargs): timestamp=particle_prediction.timestamp, fixed_covar=kalman_prediction.covar, particle_list=None, transition_model=self.transition_model) + + +class MultiModelPredictor(Predictor): + """MultiModelPredictor class + + An implementation of a Particle Filter predictor. + """ + transition_model = None + transition_models: Sequence[TransitionModel] = Property( + doc="Transition models used to for particle transition, selected by model index on " + "particle." + ) + transition_matrix: np.ndarray = Property( + doc="n-model by n-model transition matrix." + ) + position_mapping: Sequence[Sequence[int]] = Property( + doc="Sequence of position mappings associated with each transition model.") + + @property + def probabilities(self): + return np.cumsum(self.transition_matrix, axis=1) + + @predict_lru_cache() + def predict(self, prior, control_input=None, timestamp=None, **kwargs): + """Particle Filter prediction step + + Parameters + ---------- + prior : :class:`~.ParticleState` + A prior state object + control_input : :class:`~.State`, optional + The control input. It will only have an effect if + :attr:`control_model` is not `None` (the default is `None`) + timestamp: :class:`datetime.datetime`, optional + A timestamp signifying when the prediction is performed + (the default is `None`) + Returns + ------- + : :class:`~.ParticleStatePrediction` + The predicted state + """ + + # Compute time_interval + try: + time_interval = timestamp - prior.timestamp + except TypeError: + # TypeError: (timestamp or prior.timestamp) is None + time_interval = None + new_particles = [] + for particle in prior.particles: + for model_index in range(len(self.transition_matrix)): + if particle.dynamic_model == model_index: + + # Change the value of the dynamic value randomly according to the defined + # transition matrix + new_dynamic_model = np.searchsorted( + self.probabilities[model_index], np.random.random()) + + transition_model = self.transition_models[particle.dynamic_model] + + # Based on given position mapping create a new state vector that contains only + # the required states + required_state_space = copy.copy(particle) + required_state_space.state_vector = particle.state_vector[ + self.position_mapping[particle.dynamic_model]] + new_state_vector = transition_model.function( + required_state_space, + noise=True, + time_interval=time_interval, + **kwargs) + + # Calculate the indices removed from the state vector to become compatible with + # the dynamic model + for j in range(len(particle.state_vector)): + if j not in self.position_mapping[particle.dynamic_model]: + new_state_vector = np.insert(new_state_vector, j, 0, axis=0) + + new_particles.append(MultiModelParticle(new_state_vector, + weight=particle.weight, + parent=particle, + dynamic_model=new_dynamic_model)) + + return Prediction.from_state(prior, + state_vector=None, + weight=None, + particle_list=new_particles, + timestamp=timestamp) + + +class RaoBlackwellisedMultiModelPredictor(MultiModelPredictor): + """ParticlePredictor class + + An implementation of a Particle Filter predictor. + """ + + @predict_lru_cache() + def predict(self, prior, control_input=None, timestamp=None, **kwargs): + """Particle Filter prediction step + + Parameters + ---------- + prior : :class:`~.ParticleState` + A prior state object + control_input : :class:`~.State`, optional + The control input. It will only have an effect if + :attr:`control_model` is not `None` (the default is `None`) + timestamp: :class:`datetime.datetime`, optional + A timestamp signifying when the prediction is performed + (the default is `None`) + Returns + ------- + : :class:`~.ParticleStatePrediction` + The predicted state + """ + + # Compute time_interval + try: + time_interval = timestamp - prior.timestamp + except TypeError: + # TypeError: (timestamp or prior.timestamp) is None + time_interval = None + + new_particles = [] + for particle in prior.particles: + + # Change the value of the dynamic value randomly according to the defined transition + # matrix + new_dynamic_model = np.random.choice( + list(range(len(particle.model_probabilities))), + p=particle.model_probabilities) + + # Based on given position mapping create a new state vector that contains only the + # required states + required_state_space = particle.state_vector[ + np.array(self.position_mapping[new_dynamic_model]) + ] + + new_state_vector = self.transition_models[new_dynamic_model].function( + required_state_space, + noise=True, + time_interval=time_interval, + **kwargs) + + # Calculate the indices removed from the state vector to become compatible with the + # dynamic model + for j in range(len(particle.state_vector)): + if j not in self.position_mapping[new_dynamic_model]: + new_state_vector = np.insert(new_state_vector, j, 0, axis=0) + + new_particles.append( + RaoBlackwellisedParticle(new_state_vector, + weight=particle.weight, + parent=particle, + model_probabilities=particle.model_probabilities) + ) + + return Prediction.from_state(prior, + state_vector=None, + weight=None, + particle_list=new_particles, + timestamp=timestamp) diff --git a/stonesoup/predictor/tests/test_multimodel.py b/stonesoup/predictor/tests/test_multimodel.py new file mode 100644 index 000000000..6c6914688 --- /dev/null +++ b/stonesoup/predictor/tests/test_multimodel.py @@ -0,0 +1,140 @@ +import datetime + +import numpy as np + +from ...models.transition.linear import ( + ConstantVelocity, CombinedLinearGaussianTransitionModel, ConstantAcceleration) +from ...predictor.particle import MultiModelPredictor +from ...types.particle import MultiModelParticle +from ...types.state import ParticleState + + +def test_multi_model(): + + # Define time related variables. + timestamp = datetime.datetime.now() + time_diff = 2 # 2sec. + new_timestamp = timestamp + datetime.timedelta(seconds=time_diff) + + # Define prior state. + prior_particles = [ + MultiModelParticle(np.array([[10], [10], [10], [10], [10], [10], [10], [10], [10]]), + 1 / 9, dynamic_model=0), + MultiModelParticle(np.array([[10], [20], [10], [10], [10], [10], [10], [10], [10]]), + 1 / 9, dynamic_model=0), + MultiModelParticle(np.array([[10], [30], [10], [10], [10], [10], [10], [10], [10]]), + 1 / 9, dynamic_model=0), + MultiModelParticle(np.array([[20], [10], [10], [10], [10], [10], [10], [10], [10]]), + 1 / 9, dynamic_model=0), + MultiModelParticle(np.array([[20], [20], [10], [10], [10], [10], [10], [10], [10]]), + 1 / 9, dynamic_model=0), + MultiModelParticle(np.array([[20], [30], [10], [10], [10], [10], [10], [10], [10]]), + 1 / 9, dynamic_model=0), + MultiModelParticle(np.array([[30], [10], [10], [10], [10], [10], [10], [10], [10]]), + 1 / 9, dynamic_model=0), + MultiModelParticle(np.array([[30], [20], [10], [10], [10], [10], [10], [10], [10]]), + 1 / 9, dynamic_model=0), + MultiModelParticle(np.array([[30], [30], [10], [10], [10], [10], [10], [10], [10]]), + 1 / 9, dynamic_model=0), + MultiModelParticle(np.array([[30], [30], [10], [10], [10], [10], [10], [10], [10]]), + 1 / 9, dynamic_model=0), + MultiModelParticle(np.array([[30], [30], [10], [10], [10], [10], [10], [10], [10]]), + 1 / 9, dynamic_model=0), + MultiModelParticle(np.array([[30], [30], [10], [10], [10], [10], [10], [10], [10]]), + 1 / 9, dynamic_model=0), + MultiModelParticle(np.array([[30], [30], [10], [10], [10], [10], [10], [10], [10]]), + 1 / 9, dynamic_model=0), + MultiModelParticle(np.array([[30], [30], [10], [10], [10], [10], [10], [10], [10]]), + 1 / 9, dynamic_model=0), + MultiModelParticle(np.array([[30], [30], [10], [10], [10], [10], [10], [10], [10]]), + 1 / 9, dynamic_model=0), + MultiModelParticle(np.array([[30], [30], [10], [10], [10], [10], [10], [10], [10]]), + 1 / 9, dynamic_model=0), + MultiModelParticle(np.array([[30], [30], [10], [10], [10], [10], [10], [10], [10]]), + 1 / 9, dynamic_model=0), + MultiModelParticle(np.array([[30], [30], [10], [10], [10], [10], [10], [10], [10]]), + 1 / 9, dynamic_model=0), + MultiModelParticle(np.array([[30], [30], [10], [10], [10], [10], [10], [10], [10]]), + 1 / 9, dynamic_model=0), + MultiModelParticle(np.array([[30], [30], [10], [10], [10], [10], [10], [10], [10]]), + 1 / 9, dynamic_model=0), + MultiModelParticle(np.array([[30], [30], [10], [10], [10], [10], [10], [10], [10]]), + 1 / 9, dynamic_model=0), + MultiModelParticle(np.array([[30], [30], [10], [10], [10], [10], [10], [10], [10]]), + 1 / 9, dynamic_model=0), + MultiModelParticle(np.array([[30], [30], [10], [10], [10], [10], [10], [10], [10]]), + 1 / 9, dynamic_model=0), + MultiModelParticle(np.array([[30], [30], [10], [10], [10], [10], [10], [10], [10]]), + 1 / 9, dynamic_model=0), + MultiModelParticle(np.array([[30], [30], [10], [10], [10], [10], [10], [10], [10]]), + 1 / 9, dynamic_model=0), + MultiModelParticle(np.array([[30], [30], [10], [10], [10], [10], [10], [10], [10]]), + 1 / 9, dynamic_model=0), + ] + + # Declare the model list. + model_list = [ + CombinedLinearGaussianTransitionModel((ConstantVelocity(0.1), + ConstantVelocity(0.1), + ConstantVelocity(0.1))), + CombinedLinearGaussianTransitionModel((ConstantAcceleration(0.01), + ConstantAcceleration(0.01), + ConstantAcceleration(0.01))), + CombinedLinearGaussianTransitionModel((ConstantVelocity(0.01), + ConstantVelocity(0.01), + ConstantVelocity(0.01))), + CombinedLinearGaussianTransitionModel((ConstantAcceleration(0.01), + ConstantAcceleration(0.01), + ConstantAcceleration(0.01))), + CombinedLinearGaussianTransitionModel((ConstantVelocity(0.01), + ConstantVelocity(0.01), + ConstantAcceleration(0.01))), + CombinedLinearGaussianTransitionModel((ConstantAcceleration(0.01), + ConstantVelocity(0.01), + ConstantVelocity(0.01))), + CombinedLinearGaussianTransitionModel((ConstantVelocity(0.01), + ConstantAcceleration(0.01), + ConstantVelocity(0.01))), + CombinedLinearGaussianTransitionModel((ConstantAcceleration(0.01), + ConstantAcceleration(0.01), + ConstantVelocity(0.01))), + ] + # Give the respective position mapping. + position_mapping = [ + [0, 1, 3, 4, 6, 7], + [0, 1, 2, 3, 4, 5, 6, 7, 8], + [0, 1, 3, 4, 6, 7], + [0, 1, 2, 3, 4, 5, 6, 7, 8], + [0, 1, 3, 4, 6, 7, 8], + [0, 1, 2, 3, 4, 6, 7], + [0, 1, 3, 4, 5, 6, 7], + [0, 1, 2, 3, 4, 5, 6, 7], + ] + + # Provide the required transition matrix. + transition = [ + [0.65, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05], + [0.05, 0.65, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05], + [0.05, 0.05, 0.65, 0.05, 0.05, 0.05, 0.05, 0.05], + [0.05, 0.05, 0.05, 0.65, 0.05, 0.05, 0.05, 0.05], + [0.05, 0.05, 0.05, 0.05, 0.65, 0.05, 0.05, 0.05], + [0.05, 0.05, 0.05, 0.05, 0.05, 0.65, 0.05, 0.05], + [0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.65, 0.05], + [0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.65], + ] + + prior = ParticleState(None, particle_list=prior_particles, timestamp=timestamp) + + predictor = MultiModelPredictor(position_mapping=position_mapping, + transition_matrix=transition, + transition_models=model_list) + + prediction = predictor.predict(prior, timestamp=new_timestamp) + + dynamic_model_list = [p.dynamic_model for p in prediction.particles] + dynamic_model_proportions = [dynamic_model_list.count(i) for i in range(len(transition))] + dynamic_model_proportions = np.array(dynamic_model_proportions) + + assert prediction.timestamp == new_timestamp + assert np.all([prediction.particles[i].weight == 1 / 9 for i in range(9)]) + assert len(dynamic_model_proportions) == len(transition) diff --git a/stonesoup/resampler/particle.py b/stonesoup/resampler/particle.py index 4edee416b..f75081c17 100644 --- a/stonesoup/resampler/particle.py +++ b/stonesoup/resampler/particle.py @@ -1,9 +1,12 @@ +from operator import attrgetter + import numpy as np from .base import Resampler from ..base import Property from ..types.numeric import Probability from ..types.state import ParticleState +from ..types.particle import RaoBlackwellisedParticle, MultiModelParticle class SystematicResampler(Resampler): @@ -92,3 +95,81 @@ def resample(self, particles): return self.resampler.resample(self.resampler, particles) else: return particles + + +class MultiModelSystematicResampler(Resampler): + + def resample(self, particles): + """Resample the particles + + Parameters + ---------- + particles : list of :class:`~.MultiModelParticle` + The particles to be resampled according to their weight + + Returns + ------- + particles : list of :class:`~.MultiModelParticle` + The resampled particles + """ + + n_particles = len(particles) + weight = Probability(1 / n_particles) + particles_sorted = sorted(particles, key=attrgetter('weight'), reverse=False) + cdf = np.cumsum([p.weight for p in particles_sorted]) + + # Pick random starting point + u_i = np.random.uniform(0, 1 / n_particles) + new_particles = [] + + # Cycle through the cumulative distribution and copy the particle + # that pushed the score over the current value + for j in range(n_particles): + u_j = u_i + (1 / n_particles) * j + particle = particles_sorted[np.argmax(u_j < cdf)] + new_particles.append( + MultiModelParticle(particle.state_vector, + weight=weight, + parent=particle, + dynamic_model=particle.dynamic_model)) + + return new_particles + + +class RaoBlackwellisedSystematicResampler(Resampler): + + def resample(self, particles): + """Resample the particles + Parameters + ---------- + particles : list of :class:`~.RaoBlackwellisedParticle` + The particles to be resampled according to their weight + Returns + ------- + particles : list of :class:`~.RaoBlackwellisedParticle` + The resampled particles + """ + + n_particles = len(particles) + weight = Probability(1/n_particles) + particles_sorted = sorted(particles, key=attrgetter('weight'), reverse=False) + cdf = np.cumsum([p.weight for p in particles_sorted]) + + # Pick random starting point + u_i = np.random.uniform(0, 1 / n_particles) + new_particles = [] + + # Cycle through the cumulative distribution and copy the particle + # that pushed the score over the current value + for j in range(n_particles): + + u_j = u_i + (1 / n_particles) * j + particle = particles_sorted[np.argmax(u_j < cdf)] + + new_particles.append( + RaoBlackwellisedParticle(particle.state_vector, + weight=weight, + parent=particle, + model_probabilities=particle.model_probabilities)) + + return new_particles diff --git a/stonesoup/types/particle.py b/stonesoup/types/particle.py index c8e886f6e..b1387e5b5 100644 --- a/stonesoup/types/particle.py +++ b/stonesoup/types/particle.py @@ -1,3 +1,5 @@ +from typing import Sequence + from ..base import Property from .array import StateVector from .base import Type @@ -13,13 +15,35 @@ class Particle(Type): weight: float = Property(doc='Weight of particle') parent: 'Particle' = Property(default=None, doc='Parent particle') - def __init__(self, state_vector, weight, parent=None, *args, **kwargs): - if parent: - parent.parent = None - if state_vector is not None and not isinstance(state_vector, StateVector): - state_vector = StateVector(state_vector) - super().__init__(state_vector, weight, parent, *args, **kwargs) + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + if self.parent: + self.parent.parent = None + if self.state_vector is not None and not isinstance(self.state_vector, StateVector): + self.state_vector = StateVector(self.state_vector) @property def ndim(self): return self.state_vector.shape[0] + + +class MultiModelParticle(Particle): + """ + Particle type + + A MultiModelParticle type which contains a state, weight and the dynamic_model + """ + dynamic_model: int = Property(doc='Assigned dynamic model') + parent: 'MultiModelParticle' = Property(default=None, doc='Parent particle') + + +class RaoBlackwellisedParticle(Particle): + """ + Particle type + + A RaoBlackwellisedParticle type which contains a state, weight, dynamic_model and + associated model probabilities + """ + model_probabilities: Sequence[float] = Property( + doc="The dynamic probabilities of changing models") + parent: 'RaoBlackwellisedParticle' = Property(default=None, doc='Parent particle') diff --git a/stonesoup/types/state.py b/stonesoup/types/state.py index b9e9740fe..7e71e773f 100644 --- a/stonesoup/types/state.py +++ b/stonesoup/types/state.py @@ -532,14 +532,21 @@ def __getitem__(self, item): else: p = None + if self.weight is not None: + weight = self.weight[item] + else: + weight = None + particle = Particle(state_vector=self.state_vector[:, item], - weight=self.weight[item], + weight=weight, parent=p) return particle @clearable_cached_property('state_vector', 'weight') def particles(self): """Sequence of individual :class:`~.Particle` objects.""" + if self.particle_list is not None: + return self.particle_list return tuple(particle for particle in self) def __len__(self): diff --git a/stonesoup/updater/particle.py b/stonesoup/updater/particle.py index f72c05758..7b9f290bc 100644 --- a/stonesoup/updater/particle.py +++ b/stonesoup/updater/particle.py @@ -2,14 +2,17 @@ from functools import lru_cache import numpy as np -from scipy.linalg import inv +from scipy.linalg import inv, block_diag +from scipy.stats import multivariate_normal from .base import Updater from .kalman import KalmanUpdater, ExtendedKalmanUpdater from ..base import Property from ..functions import cholesky_eps, sde_euler_maruyama_integration +from ..predictor.particle import MultiModelPredictor, RaoBlackwellisedMultiModelPredictor from ..resampler import Resampler from ..types.numeric import Probability +from ..types.particle import RaoBlackwellisedParticle, MultiModelParticle from ..types.prediction import ( Prediction, ParticleMeasurementPrediction, GaussianStatePrediction, MeasurementPrediction) from ..types.update import ParticleStateUpdate, Update @@ -221,3 +224,207 @@ def predict_measurement(self, state_prediction, *args, **kwargs): fixed_covar=kalman_prediction.covar, particle_list=None, timestamp=particle_prediction.timestamp) + + +class MultiModelParticleUpdater(Updater): + """Particle Updater for the Multi Model system""" + + resampler: Resampler = Property(doc='Resampler to prevent particle degeneracy') + predictor: MultiModelPredictor = Property() + + def update(self, hypothesis, **kwargs): + """Particle Filter update step + + Parameters + ---------- + hypothesis : :class:`~.Hypothesis` + Hypothesis with predicted state and associated detection used for + updating. + + Returns + ------- + : :class:`~.ParticleState` + The state posterior + """ + if hypothesis.measurement.measurement_model is None: + measurement_model = self.measurement_model + else: + measurement_model = hypothesis.measurement.measurement_model + + new_particles = [copy.copy(particle) for particle in hypothesis.prediction.particles] + + for particle in new_particles: + particle.weight = particle.weight \ + * measurement_model.pdf(hypothesis.measurement, particle, **kwargs) \ + * self.predictor.transition_matrix[particle.parent.dynamic_model][particle.dynamic_model] # noqa: E501 + + # Normalise the weights + sum_w = Probability.sum(i.weight for i in hypothesis.prediction.particles) + for particle in new_particles: + particle.weight /= sum_w + + new_particles = self.resampler.resample(new_particles) + + return ParticleStateUpdate(None, + particle_list=new_particles, + hypothesis=hypothesis, + timestamp=hypothesis.measurement.timestamp) + + @lru_cache() + def predict_measurement(self, state_prediction, measurement_model=None, + **kwargs): + + if measurement_model is None: + measurement_model = self.measurement_model + + new_particles = [] + for particle in state_prediction.particles: + new_state_vector = measurement_model.function( + particle.state_vector, noise=False, **kwargs) + new_particles.append( + MultiModelParticle(new_state_vector, + weight=particle.weight, + parent=particle.parent, + dynamic_model=particle.dynamicmodel)) + + return ParticleMeasurementPrediction( + None, particle_list=new_particles, timestamp=state_prediction.timestamp) + + +class RaoBlackwellisedParticleUpdater(Updater): + """Particle Updater for the Raoblackwellised scheme""" + + resampler: Resampler = Property(doc='Resampler to prevent particle degeneracy') + predictor: RaoBlackwellisedMultiModelPredictor = Property( + doc="Predictor which hold holds transition matrix, models and mappings") + + def update(self, hypothesis, prior_timestamp, **kwargs): # TODO: Handle prior timestamp + """Particle Filter update step + + Parameters + ---------- + hypothesis : :class:`~.Hypothesis` + Hypothesis with predicted state and associated detection used for + updating. + + Returns + ------- + : :class:`~.ParticleState` + The state posterior + """ + + if hypothesis.measurement.measurement_model is None: + measurement_model = self.measurement_model + else: + measurement_model = hypothesis.measurement.measurement_model + time_interval = hypothesis.prediction.timestamp - prior_timestamp + + for particle in hypothesis.prediction.particles: + + new_model_probabilities, prob_position_given_previous_position = \ + self.calculate_model_probabilities( + particle, measurement_model, self.predictor.position_mapping, + self.transition_matrix, self.predictor.transition_models, time_interval) + + particle.model_probabilities = new_model_probabilities + + prob_y_given_x = measurement_model.pdf( + hypothesis.measurement, particle, + **kwargs) + + particle.weight *= prob_y_given_x + + # Normalise the weights + sum_w = Probability.sum( + i.weight for i in hypothesis.prediction.particles) + for particle in hypothesis.prediction.particles: + particle.weight /= sum_w + + new_particles = self.resampler.resample( + hypothesis.prediction.particles) + + return ParticleStateUpdate(None, + particle_list=new_particles, + hypothesis=hypothesis, + timestamp=hypothesis.measurement.timestamp) + + @lru_cache() + def predict_measurement(self, state_prediction, measurement_model=None, + **kwargs): + + if measurement_model is None: + measurement_model = self.measurement_model + + new_particles = [] + for particle in state_prediction.particles: + new_state_vector = measurement_model.function( + particle.state_vector, noise=False, **kwargs) + new_particles.append( + RaoBlackwellisedParticle(new_state_vector, + weight=particle.weight, + parent=particle.parent, + model_probabilities=particle.model_probabilities)) + + return ParticleMeasurementPrediction( + None, particle_list=new_particles, timestamp=state_prediction.timestamp) + + @staticmethod + def calculate_model_probabilities(particle, measurement_model, position_mapping, + transition_matrix, transition_models, time_interval): + """Calculates the new model probabilities based + on the ones calculated in the previous time step""" + + previous_probabilities = particle.model_probabilities + + denominator_components = [] + # Loop over the current models m_k + for l in range(len(previous_probabilities)): # noqa: E741 + selected_model_sum = 0 + # Loop over the previous models m_k-1 + for i, model in enumerate(transition_models): + # Looks up p(m_k|m_k-1) + # Note: if p(m_k|m_k-1) = 0 then p(m_k|x_1:k) = 0 + transition_probability = transition_matrix[i][l] + if transition_probability == 0: + break + # Getting required states to apply the model to that state vector + parent_required_state_space = copy.copy(particle.parent) + parent_required_state_space.state_vector = \ + particle.parent.state_vector[position_mapping[i], :] + + # The noiseless application of m_k onto x_k-1 + mean = model.function( + parent_required_state_space, time_interval=time_interval, noise=False) + + # Input the indices that were removed previously + for j in range(len(particle.state_vector)): + if j not in position_mapping[i]: + mean = np.insert(mean, j, 0, axis=0) + + cov_matrices = [measurement_model.covar() + for _ in range(len(model.model_list))] + + prob_position_given_model_and_old_position = multivariate_normal.pdf( + particle.state_vector.ravel(), + mean=mean.ravel(), + cov=block_diag(*cov_matrices) + ) + # p(m_k-1|x_1:k-1) + prob_previous_iteration_given_model = previous_probabilities[l] + + product_of_probs = Probability(prob_position_given_model_and_old_position * + transition_probability * + prob_previous_iteration_given_model) + selected_model_sum = selected_model_sum + product_of_probs + denominator_components.append(selected_model_sum) + + # Calculate the denominator + denominator = sum(denominator_components) + + # Calculate the new probabilities + new_probabilities = [] + for i in range(len(previous_probabilities)): + new_probabilities.append( + Probability(denominator_components[i] / denominator)) + + return [new_probabilities, denominator] diff --git a/stonesoup/updater/tests/test_raoblackwellised.py b/stonesoup/updater/tests/test_raoblackwellised.py new file mode 100644 index 000000000..6b330137a --- /dev/null +++ b/stonesoup/updater/tests/test_raoblackwellised.py @@ -0,0 +1,51 @@ +"""Test for updater.particle module""" +import numpy as np +import datetime + +from ...models.measurement.linear import LinearGaussian + +from ...updater.particle import RaoBlackwellisedParticleUpdater +from ...types.particle import RaoBlackwellisedParticle +from ...models.transition.linear import ConstantVelocity, ConstantAcceleration +from ...models.transition.linear import CombinedLinearGaussianTransitionModel + + +def test_rao_blackwellised_updater(): + # Initialise two particles + particle1 = RaoBlackwellisedParticle( + state_vector=[1, 1, 1, 1, 1, 1, 1, 1, 1], + weight=0.5, + model_probabilities=[0.5, 0.5]) + particle2 = RaoBlackwellisedParticle( + state_vector=[1, 1, 1, 1, 1, 1, 1, 1, 1], + weight=0.5, + model_probabilities=[0.5, 0.5]) + particle1.parent = particle2 + particle2.parent = particle1 + + particles = [particle1, particle2] + + dynamic_model_list = [CombinedLinearGaussianTransitionModel((ConstantVelocity(0.01), + ConstantVelocity(0.01), + ConstantVelocity(0.01))), + CombinedLinearGaussianTransitionModel((ConstantAcceleration(0.1), + ConstantAcceleration(0.1), + ConstantAcceleration(0.1)))] + + transition = [[0.50, 0.50], + [0.50, 0.50]] + + position_mapping = [[0, 1, 3, 4, 6, 7], + [0, 1, 2, 3, 4, 5, 6, 7, 8]] + + measurement_model = LinearGaussian( + ndim_state=9, + mapping=(0, 3, 6), + noise_covar=np.diag([0.75, 0.75, 0.75])) + + for particle in particles: + rv = RaoBlackwellisedParticleUpdater.calculate_model_probabilities( + particle, measurement_model, position_mapping, transition, dynamic_model_list, + datetime.timedelta(seconds=1) + ) + assert type(rv) == list From f1c61f5bf00ab09ee0a0c34625dde3f8a0666310 Mon Sep 17 00:00:00 2001 From: Steven Hiscocks Date: Wed, 21 Sep 2022 14:28:41 +0100 Subject: [PATCH 02/11] Add ability to slice ParticleState This returns a new ParticleState, only containing relevant particles --- stonesoup/types/state.py | 22 +++++++++++++++------- 1 file changed, 15 insertions(+), 7 deletions(-) diff --git a/stonesoup/types/state.py b/stonesoup/types/state.py index 7e71e773f..01c4ec8ef 100644 --- a/stonesoup/types/state.py +++ b/stonesoup/types/state.py @@ -527,20 +527,28 @@ def __init__(self, *args, **kwargs): self.weight = np.array(self.weight) def __getitem__(self, item): - if self.parent: - p = self.parent[item] + if self.parent is not None: + parent = self.parent[item] else: - p = None + parent = None if self.weight is not None: weight = self.weight[item] else: weight = None - particle = Particle(state_vector=self.state_vector[:, item], - weight=weight, - parent=p) - return particle + if isinstance(item, int): + result = Particle(state_vector=self.state_vector[:, item], + weight=weight, + parent=parent) + else: + # Allow for Prediction/Update sub-types + result = type(self).from_state(self, + state_vector=self.state_vector[:, item], + weight=weight, + parent=parent, + particle_list=None) + return result @clearable_cached_property('state_vector', 'weight') def particles(self): From 02a761fccd7631921c62dd6f83696b86558d2170 Mon Sep 17 00:00:00 2001 From: Steven Hiscocks Date: Wed, 21 Sep 2022 14:32:35 +0100 Subject: [PATCH 03/11] Update SystematicResampler to use new ParticleState slicing --- stonesoup/resampler/particle.py | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/stonesoup/resampler/particle.py b/stonesoup/resampler/particle.py index f75081c17..1bacb28db 100644 --- a/stonesoup/resampler/particle.py +++ b/stonesoup/resampler/particle.py @@ -44,13 +44,9 @@ def resample(self, particles): # that pushed the score over the current value u_j = u_i + (1 / n_particles) * np.arange(n_particles) index = weight_order[np.searchsorted(cdf, np.log(u_j))] - new_particles = ParticleState(state_vector=particles.state_vector[:, index], - weight=[weight]*n_particles, - parent=ParticleState( - state_vector=particles.state_vector[:, index], - weight=particles.weight[index], - timestamp=particles.timestamp), - timestamp=particles.timestamp) + + new_particles = particles[index] + new_particles.weight = np.full((n_particles, ), weight) return new_particles From 127b1767055e9c469b33edd72405da39a7fa54a8 Mon Sep 17 00:00:00 2001 From: Steven Hiscocks Date: Thu, 22 Sep 2022 16:45:51 +0100 Subject: [PATCH 04/11] Vectorise Multi-Model and Rao Blackwellised particle filters This also modifies use of measurement model covariance to transition model covariance in Rao Blackwellised update step. New states for multi-model and Rao Blackwellised have also been created to enable capture of the overall state. Test has been improved for Rao Blackwellised to ensure behaviour is as expected, increasing probability for transition models that align with measurements. Custom resamplers for these filters have also been removed as standard one can now be used. --- stonesoup/predictor/particle.py | 176 +++++++-------- stonesoup/predictor/tests/test_multimodel.py | 24 +-- stonesoup/resampler/particle.py | 81 ------- stonesoup/types/prediction.py | 17 +- stonesoup/types/state.py | 98 ++++++++- stonesoup/types/update.py | 20 +- stonesoup/updater/particle.py | 203 ++++++------------ .../updater/tests/test_raoblackwellised.py | 95 +++++--- 8 files changed, 341 insertions(+), 373 deletions(-) diff --git a/stonesoup/predictor/particle.py b/stonesoup/predictor/particle.py index cd165938b..dcfd8aef9 100644 --- a/stonesoup/predictor/particle.py +++ b/stonesoup/predictor/particle.py @@ -10,7 +10,6 @@ from ..models.transition import TransitionModel from ..types.prediction import Prediction from ..types.state import GaussianState -from ..types.particle import MultiModelParticle, RaoBlackwellisedParticle class ParticlePredictor(Predictor): @@ -113,7 +112,7 @@ class MultiModelPredictor(Predictor): transition_matrix: np.ndarray = Property( doc="n-model by n-model transition matrix." ) - position_mapping: Sequence[Sequence[int]] = Property( + position_mappings: Sequence[Sequence[int]] = Property( doc="Sequence of position mappings associated with each transition model.") @property @@ -121,16 +120,13 @@ def probabilities(self): return np.cumsum(self.transition_matrix, axis=1) @predict_lru_cache() - def predict(self, prior, control_input=None, timestamp=None, **kwargs): + def predict(self, prior, timestamp=None, **kwargs): """Particle Filter prediction step Parameters ---------- - prior : :class:`~.ParticleState` + prior : :class:`~.MultiModelParticleState` A prior state object - control_input : :class:`~.State`, optional - The control input. It will only have an effect if - :attr:`control_model` is not `None` (the default is `None`) timestamp: :class:`datetime.datetime`, optional A timestamp signifying when the prediction is performed (the default is `None`) @@ -140,51 +136,50 @@ def predict(self, prior, control_input=None, timestamp=None, **kwargs): The predicted state """ - # Compute time_interval - try: - time_interval = timestamp - prior.timestamp - except TypeError: - # TypeError: (timestamp or prior.timestamp) is None - time_interval = None - new_particles = [] - for particle in prior.particles: - for model_index in range(len(self.transition_matrix)): - if particle.dynamic_model == model_index: - - # Change the value of the dynamic value randomly according to the defined - # transition matrix - new_dynamic_model = np.searchsorted( - self.probabilities[model_index], np.random.random()) - - transition_model = self.transition_models[particle.dynamic_model] - - # Based on given position mapping create a new state vector that contains only - # the required states - required_state_space = copy.copy(particle) - required_state_space.state_vector = particle.state_vector[ - self.position_mapping[particle.dynamic_model]] - new_state_vector = transition_model.function( - required_state_space, - noise=True, - time_interval=time_interval, - **kwargs) - - # Calculate the indices removed from the state vector to become compatible with - # the dynamic model - for j in range(len(particle.state_vector)): - if j not in self.position_mapping[particle.dynamic_model]: - new_state_vector = np.insert(new_state_vector, j, 0, axis=0) - - new_particles.append(MultiModelParticle(new_state_vector, - weight=particle.weight, - parent=particle, - dynamic_model=new_dynamic_model)) - - return Prediction.from_state(prior, - state_vector=None, - weight=None, - particle_list=new_particles, - timestamp=timestamp) + new_particle_state = Prediction.from_state( + prior, + state_vector=copy.copy(prior.state_vector), + parent=prior, + particle_list=None, + dynamic_model=copy.copy(prior.dynamic_model), + timestamp=timestamp) + + for model_index, transition_model in enumerate(self.transition_models): + # Change the value of the dynamic value randomly according to the defined + # transition matrix + current_model_indices = prior.dynamic_model == model_index + current_model_count = np.count_nonzero(current_model_indices) + if current_model_count == 0: + continue + new_dynamic_models = np.searchsorted( + self.probabilities[model_index], + np.random.random(size=current_model_count)) + + new_state_vector = self.apply_model( + prior[current_model_indices], transition_model, timestamp, + self.position_mappings[model_index], noise=True, **kwargs) + + new_particle_state.state_vector[:, current_model_indices] = new_state_vector + new_particle_state.dynamic_model[current_model_indices] = new_dynamic_models + + return new_particle_state + + @staticmethod + def apply_model(prior, transition_model, timestamp, position_mapping, **kwargs): + # Based on given position mapping create a new state vector that contains only the + # required states + orig_ndim = prior.ndim + prior.state_vector = prior.state_vector[position_mapping, :] + new_state_vector = transition_model.function( + prior, time_interval=timestamp - prior.timestamp, **kwargs) + + # Calculate the indices removed from the state vector to become compatible with the + # dynamic model + for j in range(orig_ndim): + if j not in position_mapping: + new_state_vector = np.insert(new_state_vector, j, 0, axis=0) + + return new_state_vector class RaoBlackwellisedMultiModelPredictor(MultiModelPredictor): @@ -194,16 +189,13 @@ class RaoBlackwellisedMultiModelPredictor(MultiModelPredictor): """ @predict_lru_cache() - def predict(self, prior, control_input=None, timestamp=None, **kwargs): + def predict(self, prior, timestamp=None, **kwargs): """Particle Filter prediction step Parameters ---------- - prior : :class:`~.ParticleState` + prior : :class:`~.RaoBlackwellisedParticleState` A prior state object - control_input : :class:`~.State`, optional - The control input. It will only have an effect if - :attr:`control_model` is not `None` (the default is `None`) timestamp: :class:`datetime.datetime`, optional A timestamp signifying when the prediction is performed (the default is `None`) @@ -213,49 +205,29 @@ def predict(self, prior, control_input=None, timestamp=None, **kwargs): The predicted state """ - # Compute time_interval - try: - time_interval = timestamp - prior.timestamp - except TypeError: - # TypeError: (timestamp or prior.timestamp) is None - time_interval = None + new_particle_state = Prediction.from_state( + prior, + state_vector=copy.copy(prior.state_vector), + parent=prior, + particle_list=None, + timestamp=timestamp) + + # Change the value of the dynamic value randomly according to the + # matrix + new_dynamic_models = np.array([ + np.random.choice(len(self.transition_models), p=model_probabiliy) + for model_probabiliy in prior.model_probabilities.T]) + + for model_index, transition_model in enumerate(self.transition_models): + + new_model_indices = new_dynamic_models == model_index + if np.count_nonzero(new_model_indices) == 0: + continue + + new_state_vector = self.apply_model( + prior[new_model_indices], transition_model, timestamp, + self.position_mappings[model_index], noise=True, **kwargs) + + new_particle_state.state_vector[:, new_model_indices] = new_state_vector - new_particles = [] - for particle in prior.particles: - - # Change the value of the dynamic value randomly according to the defined transition - # matrix - new_dynamic_model = np.random.choice( - list(range(len(particle.model_probabilities))), - p=particle.model_probabilities) - - # Based on given position mapping create a new state vector that contains only the - # required states - required_state_space = particle.state_vector[ - np.array(self.position_mapping[new_dynamic_model]) - ] - - new_state_vector = self.transition_models[new_dynamic_model].function( - required_state_space, - noise=True, - time_interval=time_interval, - **kwargs) - - # Calculate the indices removed from the state vector to become compatible with the - # dynamic model - for j in range(len(particle.state_vector)): - if j not in self.position_mapping[new_dynamic_model]: - new_state_vector = np.insert(new_state_vector, j, 0, axis=0) - - new_particles.append( - RaoBlackwellisedParticle(new_state_vector, - weight=particle.weight, - parent=particle, - model_probabilities=particle.model_probabilities) - ) - - return Prediction.from_state(prior, - state_vector=None, - weight=None, - particle_list=new_particles, - timestamp=timestamp) + return new_particle_state diff --git a/stonesoup/predictor/tests/test_multimodel.py b/stonesoup/predictor/tests/test_multimodel.py index 6c6914688..e2b4d9250 100644 --- a/stonesoup/predictor/tests/test_multimodel.py +++ b/stonesoup/predictor/tests/test_multimodel.py @@ -6,7 +6,7 @@ ConstantVelocity, CombinedLinearGaussianTransitionModel, ConstantAcceleration) from ...predictor.particle import MultiModelPredictor from ...types.particle import MultiModelParticle -from ...types.state import ParticleState +from ...types.state import MultiModelParticleState def test_multi_model(): @@ -100,15 +100,15 @@ def test_multi_model(): ConstantVelocity(0.01))), ] # Give the respective position mapping. - position_mapping = [ - [0, 1, 3, 4, 6, 7], - [0, 1, 2, 3, 4, 5, 6, 7, 8], - [0, 1, 3, 4, 6, 7], - [0, 1, 2, 3, 4, 5, 6, 7, 8], - [0, 1, 3, 4, 6, 7, 8], - [0, 1, 2, 3, 4, 6, 7], - [0, 1, 3, 4, 5, 6, 7], - [0, 1, 2, 3, 4, 5, 6, 7], + position_mappings = [ + [0, 1, 3, 4, 6, 7], + [0, 1, 2, 3, 4, 5, 6, 7, 8], + [0, 1, 3, 4, 6, 7], + [0, 1, 2, 3, 4, 5, 6, 7, 8], + [0, 1, 3, 4, 6, 7, 8], + [0, 1, 2, 3, 4, 6, 7], + [0, 1, 3, 4, 5, 6, 7], + [0, 1, 2, 3, 4, 5, 6, 7], ] # Provide the required transition matrix. @@ -123,9 +123,9 @@ def test_multi_model(): [0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.65], ] - prior = ParticleState(None, particle_list=prior_particles, timestamp=timestamp) + prior = MultiModelParticleState(None, particle_list=prior_particles, timestamp=timestamp) - predictor = MultiModelPredictor(position_mapping=position_mapping, + predictor = MultiModelPredictor(position_mappings=position_mappings, transition_matrix=transition, transition_models=model_list) diff --git a/stonesoup/resampler/particle.py b/stonesoup/resampler/particle.py index 1bacb28db..01c4264b8 100644 --- a/stonesoup/resampler/particle.py +++ b/stonesoup/resampler/particle.py @@ -1,12 +1,9 @@ -from operator import attrgetter - import numpy as np from .base import Resampler from ..base import Property from ..types.numeric import Probability from ..types.state import ParticleState -from ..types.particle import RaoBlackwellisedParticle, MultiModelParticle class SystematicResampler(Resampler): @@ -91,81 +88,3 @@ def resample(self, particles): return self.resampler.resample(self.resampler, particles) else: return particles - - -class MultiModelSystematicResampler(Resampler): - - def resample(self, particles): - """Resample the particles - - Parameters - ---------- - particles : list of :class:`~.MultiModelParticle` - The particles to be resampled according to their weight - - Returns - ------- - particles : list of :class:`~.MultiModelParticle` - The resampled particles - """ - - n_particles = len(particles) - weight = Probability(1 / n_particles) - particles_sorted = sorted(particles, key=attrgetter('weight'), reverse=False) - cdf = np.cumsum([p.weight for p in particles_sorted]) - - # Pick random starting point - u_i = np.random.uniform(0, 1 / n_particles) - new_particles = [] - - # Cycle through the cumulative distribution and copy the particle - # that pushed the score over the current value - for j in range(n_particles): - u_j = u_i + (1 / n_particles) * j - particle = particles_sorted[np.argmax(u_j < cdf)] - new_particles.append( - MultiModelParticle(particle.state_vector, - weight=weight, - parent=particle, - dynamic_model=particle.dynamic_model)) - - return new_particles - - -class RaoBlackwellisedSystematicResampler(Resampler): - - def resample(self, particles): - """Resample the particles - Parameters - ---------- - particles : list of :class:`~.RaoBlackwellisedParticle` - The particles to be resampled according to their weight - Returns - ------- - particles : list of :class:`~.RaoBlackwellisedParticle` - The resampled particles - """ - - n_particles = len(particles) - weight = Probability(1/n_particles) - particles_sorted = sorted(particles, key=attrgetter('weight'), reverse=False) - cdf = np.cumsum([p.weight for p in particles_sorted]) - - # Pick random starting point - u_i = np.random.uniform(0, 1 / n_particles) - new_particles = [] - - # Cycle through the cumulative distribution and copy the particle - # that pushed the score over the current value - for j in range(n_particles): - - u_j = u_i + (1 / n_particles) * j - particle = particles_sorted[np.argmax(u_j < cdf)] - - new_particles.append( - RaoBlackwellisedParticle(particle.state_vector, - weight=weight, - parent=particle, - model_probabilities=particle.model_probabilities)) - - return new_particles diff --git a/stonesoup/types/prediction.py b/stonesoup/types/prediction.py index bc1aee941..5df06836b 100644 --- a/stonesoup/types/prediction.py +++ b/stonesoup/types/prediction.py @@ -2,7 +2,8 @@ from .array import CovarianceMatrix from .base import Type -from .state import (State, GaussianState, ParticleState, EnsembleState, +from .state import (State, GaussianState, EnsembleState, + ParticleState, MultiModelParticleState, RaoBlackwellisedParticleState, SqrtGaussianState, InformationState, TaggedWeightedGaussianState, WeightedGaussianState, CategoricalState) from ..base import Property @@ -118,6 +119,20 @@ class ParticleMeasurementPrediction(MeasurementPrediction, ParticleState): """ +class MulitModelParticleStatePrediction(Prediction, MultiModelParticleState): + """MultiModelParticleStatePrediction type + + This is a simple multi-model Particle state prediction object. + """ + + +class RaoBlackwellisedParticleStatePrediction(Prediction, RaoBlackwellisedParticleState): + """RaoBlackwellisedParticleStatePrediction type + + This is a simple Rao Blackwellised Particle state prediction object. + """ + + class EnsembleStatePrediction(Prediction, EnsembleState): """EnsembleStatePrediction type diff --git a/stonesoup/types/state.py b/stonesoup/types/state.py index 01c4ec8ef..75e325e7a 100644 --- a/stonesoup/types/state.py +++ b/stonesoup/types/state.py @@ -10,7 +10,7 @@ from ..base import Property, clearable_cached_property from .array import StateVector, CovarianceMatrix, PrecisionMatrix, StateVectors from .base import Type -from .particle import Particle +from .particle import Particle, MultiModelParticle, RaoBlackwellisedParticle from .numeric import Probability @@ -583,6 +583,102 @@ def covar(self): State.register(ParticleState) # noqa: E305 +class MultiModelParticleState(ParticleState): + """Multi-Model Particle State type + + This is a particle state object which describes the state as a + distribution of particles, where each particle has an associated + dynamics model + """ + + dynamic_model: np.ndarray = Property( + default=None, + doc="Array of indices that identify which model is associated with each particle.") + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + if self.particle_list and isinstance(self.particle_list, list): + self.dynamic_model = \ + np.array([particle.dynamic_model for particle in self.particle_list]) + + def __getitem__(self, item): + if self.parent is not None: + parent = self.parent[item] + else: + parent = None + + if self.weight is not None: + weight = self.weight[item] + else: + weight = None + + if self.dynamic_model is not None: + dynamic_model = self.dynamic_model[item] + else: + dynamic_model = None + + if isinstance(item, int): + result = MultiModelParticle(state_vector=self.state_vector[:, item], + weight=weight, + parent=parent, + dynamic_model=dynamic_model) + else: + # Allow for Prediction/Update sub-types + result = type(self).from_state(self, + state_vector=self.state_vector[:, item], + weight=weight, + parent=parent, + particle_list=None, + dynamic_model=dynamic_model) + return result + + +class RaoBlackwellisedParticleState(ParticleState): + + model_probabilities: np.ndarray = Property( + default=None, + doc="2d NumPy array containing probability of particle belong to particular model. " + "Shape (n-models, m-particles)." + ) + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + if self.particle_list and isinstance(self.particle_list, list): + self.model_probabilities = \ + np.column_stack([particle.model_probabilities for particle in self.particle_list]) + + def __getitem__(self, item): + if self.parent is not None: + parent = self.parent[item] + else: + parent = None + + if self.weight is not None: + weight = self.weight[item] + else: + weight = None + + if self.model_probabilities is not None: + model_probabilities = self.model_probabilities[:, item] + else: + model_probabilities = None + + if isinstance(item, int): + result = RaoBlackwellisedParticle(state_vector=self.state_vector[:, item], + weight=weight, + parent=parent, + model_probabilities=model_probabilities) + else: + # Allow for Prediction/Update sub-types + result = type(self).from_state(self, + state_vector=self.state_vector[:, item], + weight=weight, + parent=parent, + particle_list=None, + model_probabilities=model_probabilities) + return result + + class EnsembleState(Type): r"""Ensemble State type diff --git a/stonesoup/types/update.py b/stonesoup/types/update.py index db5d081c9..71401c251 100644 --- a/stonesoup/types/update.py +++ b/stonesoup/types/update.py @@ -4,8 +4,10 @@ from .hypothesis import Hypothesis, CompositeHypothesis from .mixture import GaussianMixture from .state import CreatableFromState, CompositeState -from .state import State, GaussianState, ParticleState, EnsembleState, \ - SqrtGaussianState, InformationState, CategoricalState +from .state import ( + State, GaussianState, EnsembleState, + ParticleState, MultiModelParticleState, RaoBlackwellisedParticleState, + SqrtGaussianState, InformationState, CategoricalState) from ..base import Property @@ -59,6 +61,20 @@ class ParticleStateUpdate(Update, ParticleState): """ +class MultiModelParticleStateUpdate(Update, MultiModelParticleState): + """MultiModelStateUpdate type + + This is a simple Multi-Model Particle state update object. + """ + + +class RaoBlackwellisedParticleStateUpdate(Update, RaoBlackwellisedParticleState): + """RaoBlackwellisedStateUpdate type + + This is a simple Rao Blackwellised Particle state update object. + """ + + class EnsembleStateUpdate(Update, EnsembleState): """EnsembleStateUpdate type diff --git a/stonesoup/updater/particle.py b/stonesoup/updater/particle.py index 7b9f290bc..aad443cb3 100644 --- a/stonesoup/updater/particle.py +++ b/stonesoup/updater/particle.py @@ -2,8 +2,7 @@ from functools import lru_cache import numpy as np -from scipy.linalg import inv, block_diag -from scipy.stats import multivariate_normal +from scipy.linalg import inv from .base import Updater from .kalman import KalmanUpdater, ExtendedKalmanUpdater @@ -12,7 +11,6 @@ from ..predictor.particle import MultiModelPredictor, RaoBlackwellisedMultiModelPredictor from ..resampler import Resampler from ..types.numeric import Probability -from ..types.particle import RaoBlackwellisedParticle, MultiModelParticle from ..types.prediction import ( Prediction, ParticleMeasurementPrediction, GaussianStatePrediction, MeasurementPrediction) from ..types.update import ParticleStateUpdate, Update @@ -226,11 +224,11 @@ def predict_measurement(self, state_prediction, *args, **kwargs): timestamp=particle_prediction.timestamp) -class MultiModelParticleUpdater(Updater): +class MultiModelParticleUpdater(ParticleUpdater): """Particle Updater for the Multi Model system""" - resampler: Resampler = Property(doc='Resampler to prevent particle degeneracy') - predictor: MultiModelPredictor = Property() + predictor: MultiModelPredictor = Property( + doc="Predictor which hold holds transition matrix") def update(self, hypothesis, **kwargs): """Particle Filter update step @@ -251,54 +249,34 @@ def update(self, hypothesis, **kwargs): else: measurement_model = hypothesis.measurement.measurement_model - new_particles = [copy.copy(particle) for particle in hypothesis.prediction.particles] - - for particle in new_particles: - particle.weight = particle.weight \ - * measurement_model.pdf(hypothesis.measurement, particle, **kwargs) \ - * self.predictor.transition_matrix[particle.parent.dynamic_model][particle.dynamic_model] # noqa: E501 - - # Normalise the weights - sum_w = Probability.sum(i.weight for i in hypothesis.prediction.particles) - for particle in new_particles: - particle.weight /= sum_w - - new_particles = self.resampler.resample(new_particles) + update = Update.from_state( + hypothesis.prediction, + hypothesis=hypothesis, + timestamp=hypothesis.measurement.timestamp, + particle_list=None + ) - return ParticleStateUpdate(None, - particle_list=new_particles, - hypothesis=hypothesis, - timestamp=hypothesis.measurement.timestamp) + transition_matrix = np.asarray(self.predictor.transition_matrix) - @lru_cache() - def predict_measurement(self, state_prediction, measurement_model=None, - **kwargs): + update.weight = update.weight \ + * measurement_model.pdf(hypothesis.measurement, update, **kwargs) \ + * transition_matrix[update.parent.dynamic_model, update.dynamic_model] - if measurement_model is None: - measurement_model = self.measurement_model - - new_particles = [] - for particle in state_prediction.particles: - new_state_vector = measurement_model.function( - particle.state_vector, noise=False, **kwargs) - new_particles.append( - MultiModelParticle(new_state_vector, - weight=particle.weight, - parent=particle.parent, - dynamic_model=particle.dynamicmodel)) + # Normalise the weights + update.weight /= Probability.sum(update.weight) - return ParticleMeasurementPrediction( - None, particle_list=new_particles, timestamp=state_prediction.timestamp) + if self.resampler: + update = self.resampler.resample(update) + return update -class RaoBlackwellisedParticleUpdater(Updater): +class RaoBlackwellisedParticleUpdater(MultiModelParticleUpdater): """Particle Updater for the Raoblackwellised scheme""" - resampler: Resampler = Property(doc='Resampler to prevent particle degeneracy') predictor: RaoBlackwellisedMultiModelPredictor = Property( doc="Predictor which hold holds transition matrix, models and mappings") - def update(self, hypothesis, prior_timestamp, **kwargs): # TODO: Handle prior timestamp + def update(self, hypothesis, **kwargs): """Particle Filter update step Parameters @@ -317,114 +295,61 @@ def update(self, hypothesis, prior_timestamp, **kwargs): # TODO: Handle prior t measurement_model = self.measurement_model else: measurement_model = hypothesis.measurement.measurement_model - time_interval = hypothesis.prediction.timestamp - prior_timestamp - - for particle in hypothesis.prediction.particles: - new_model_probabilities, prob_position_given_previous_position = \ - self.calculate_model_probabilities( - particle, measurement_model, self.predictor.position_mapping, - self.transition_matrix, self.predictor.transition_models, time_interval) - - particle.model_probabilities = new_model_probabilities + update = Update.from_state( + hypothesis.prediction, + weight=copy.copy(hypothesis.prediction.weight), + hypothesis=hypothesis, + timestamp=hypothesis.measurement.timestamp, + particle_list=None + ) - prob_y_given_x = measurement_model.pdf( - hypothesis.measurement, particle, - **kwargs) + update.model_probabilities = self.calculate_model_probabilities( + hypothesis.prediction, self.predictor) - particle.weight *= prob_y_given_x + update.weight = update.weight * measurement_model.pdf(hypothesis.measurement, + update, + **kwargs) # Normalise the weights - sum_w = Probability.sum( - i.weight for i in hypothesis.prediction.particles) - for particle in hypothesis.prediction.particles: - particle.weight /= sum_w - - new_particles = self.resampler.resample( - hypothesis.prediction.particles) - - return ParticleStateUpdate(None, - particle_list=new_particles, - hypothesis=hypothesis, - timestamp=hypothesis.measurement.timestamp) - - @lru_cache() - def predict_measurement(self, state_prediction, measurement_model=None, - **kwargs): + update.weight /= Probability.sum(update.weight) - if measurement_model is None: - measurement_model = self.measurement_model - - new_particles = [] - for particle in state_prediction.particles: - new_state_vector = measurement_model.function( - particle.state_vector, noise=False, **kwargs) - new_particles.append( - RaoBlackwellisedParticle(new_state_vector, - weight=particle.weight, - parent=particle.parent, - model_probabilities=particle.model_probabilities)) - - return ParticleMeasurementPrediction( - None, particle_list=new_particles, timestamp=state_prediction.timestamp) + if self.resampler: + update = self.resampler.resample(update) + return update @staticmethod - def calculate_model_probabilities(particle, measurement_model, position_mapping, - transition_matrix, transition_models, time_interval): + def calculate_model_probabilities(prediction, predictor): """Calculates the new model probabilities based on the ones calculated in the previous time step""" - previous_probabilities = particle.model_probabilities - denominator_components = [] - # Loop over the current models m_k - for l in range(len(previous_probabilities)): # noqa: E741 - selected_model_sum = 0 - # Loop over the previous models m_k-1 - for i, model in enumerate(transition_models): - # Looks up p(m_k|m_k-1) - # Note: if p(m_k|m_k-1) = 0 then p(m_k|x_1:k) = 0 - transition_probability = transition_matrix[i][l] - if transition_probability == 0: - break - # Getting required states to apply the model to that state vector - parent_required_state_space = copy.copy(particle.parent) - parent_required_state_space.state_vector = \ - particle.parent.state_vector[position_mapping[i], :] - - # The noiseless application of m_k onto x_k-1 - mean = model.function( - parent_required_state_space, time_interval=time_interval, noise=False) - - # Input the indices that were removed previously - for j in range(len(particle.state_vector)): - if j not in position_mapping[i]: - mean = np.insert(mean, j, 0, axis=0) - - cov_matrices = [measurement_model.covar() - for _ in range(len(model.model_list))] - - prob_position_given_model_and_old_position = multivariate_normal.pdf( - particle.state_vector.ravel(), - mean=mean.ravel(), - cov=block_diag(*cov_matrices) - ) - # p(m_k-1|x_1:k-1) - prob_previous_iteration_given_model = previous_probabilities[l] - - product_of_probs = Probability(prob_position_given_model_and_old_position * - transition_probability * - prob_previous_iteration_given_model) - selected_model_sum = selected_model_sum + product_of_probs - denominator_components.append(selected_model_sum) + # Loop over the previous models m_k-1 + for model_index, transition_model in enumerate(predictor.transition_models): + required_space_prior = copy.copy(prediction.parent) + required_space_prior.state_vector = \ + required_space_prior.state_vector[predictor.position_mappings[model_index], :] + required_space_pred = copy.copy(prediction) + required_space_pred.state_vector = \ + required_space_pred.state_vector[predictor.position_mappings[model_index], :] + + prob_position_given_model_and_old_position = transition_model.pdf( + required_space_pred, required_space_prior, + time_interval=prediction.timestamp - prediction.parent.timestamp + ) - # Calculate the denominator - denominator = sum(denominator_components) + # Looks up p(m_k|m_k-1) + prob_of_transition = np.asanyarray(predictor.transition_matrix)[model_index, :] + + product_of_probs = \ + prob_position_given_model_and_old_position \ + * prob_of_transition[:, np.newaxis] \ + * prediction.model_probabilities # p(m_k-1|x_1:k-1) - # Calculate the new probabilities - new_probabilities = [] - for i in range(len(previous_probabilities)): - new_probabilities.append( - Probability(denominator_components[i] / denominator)) + denominator_components.append(np.sum(product_of_probs, axis=0)) + + # Calculate the denominator + new_probabilities = np.stack(denominator_components) + new_probabilities /= np.sum(new_probabilities, axis=0) - return [new_probabilities, denominator] + return new_probabilities diff --git a/stonesoup/updater/tests/test_raoblackwellised.py b/stonesoup/updater/tests/test_raoblackwellised.py index 6b330137a..f73f0789f 100644 --- a/stonesoup/updater/tests/test_raoblackwellised.py +++ b/stonesoup/updater/tests/test_raoblackwellised.py @@ -1,51 +1,76 @@ """Test for updater.particle module""" -import numpy as np import datetime -from ...models.measurement.linear import LinearGaussian +import numpy as np +from stonesoup.types.detection import Detection +from ...models.measurement.linear import LinearGaussian +from ...models.transition.linear import ConstantVelocity, ConstantAcceleration, KnownTurnRate +from ...models.transition.linear import CombinedLinearGaussianTransitionModel +from ...predictor.particle import RaoBlackwellisedMultiModelPredictor from ...updater.particle import RaoBlackwellisedParticleUpdater +from ...types.hypothesis import SingleHypothesis from ...types.particle import RaoBlackwellisedParticle -from ...models.transition.linear import ConstantVelocity, ConstantAcceleration -from ...models.transition.linear import CombinedLinearGaussianTransitionModel +from ...types.prediction import RaoBlackwellisedParticleStatePrediction +from ...types.state import RaoBlackwellisedParticleState +from ...types.update import RaoBlackwellisedParticleStateUpdate -def test_rao_blackwellised_updater(): +def test_rao_blackwellised(): # Initialise two particles particle1 = RaoBlackwellisedParticle( - state_vector=[1, 1, 1, 1, 1, 1, 1, 1, 1], - weight=0.5, - model_probabilities=[0.5, 0.5]) + state_vector=[1, 1, -0.5, 1, 1, -0.5], + weight=1/3000, + model_probabilities=[0.01, 0.98, 0.01]) particle2 = RaoBlackwellisedParticle( - state_vector=[1, 1, 1, 1, 1, 1, 1, 1, 1], - weight=0.5, - model_probabilities=[0.5, 0.5]) - particle1.parent = particle2 - particle2.parent = particle1 + state_vector=[1, 1, 0.5, 1, 1, 0.5], + weight=1/3000, + model_probabilities=[0.98, 0.01, 0.01]) + particle3 = RaoBlackwellisedParticle( + state_vector=[1, 1, 0.5, 1, 1, 0.5], + weight=1/3000, + model_probabilities=[0.2, 0.2, 0.6]) + + particles = [particle1, particle2, particle3] * 1000 + timestamp = datetime.datetime.now() - particles = [particle1, particle2] + particle_state = RaoBlackwellisedParticleState( + None, particle_list=particles, timestamp=timestamp) dynamic_model_list = [CombinedLinearGaussianTransitionModel((ConstantVelocity(0.01), - ConstantVelocity(0.01), ConstantVelocity(0.01))), CombinedLinearGaussianTransitionModel((ConstantAcceleration(0.1), - ConstantAcceleration(0.1), - ConstantAcceleration(0.1)))] - - transition = [[0.50, 0.50], - [0.50, 0.50]] - - position_mapping = [[0, 1, 3, 4, 6, 7], - [0, 1, 2, 3, 4, 5, 6, 7, 8]] - - measurement_model = LinearGaussian( - ndim_state=9, - mapping=(0, 3, 6), - noise_covar=np.diag([0.75, 0.75, 0.75])) - - for particle in particles: - rv = RaoBlackwellisedParticleUpdater.calculate_model_probabilities( - particle, measurement_model, position_mapping, transition, dynamic_model_list, - datetime.timedelta(seconds=1) - ) - assert type(rv) == list + ConstantAcceleration(0.1))), + KnownTurnRate([0.1, 0.1], np.radians(20))] + + transition_matrix = [[0.40, 0.40, 0.2], + [0.40, 0.40, 0.2], + [0.40, 0.40, 0.2]] + + position_mappings = [[0, 1, 3, 4], + [0, 1, 2, 3, 4, 5], + [1, 2, 3, 4]] + + predictor = RaoBlackwellisedMultiModelPredictor( + dynamic_model_list, transition_matrix, position_mappings + ) + + timestamp += datetime.timedelta(seconds=5) + prediction = predictor.predict(particle_state, timestamp) + + assert isinstance(prediction, RaoBlackwellisedParticleStatePrediction) + + measurement_model = LinearGaussian(6, [0, 3], np.diag([1, 1])) + updater = RaoBlackwellisedParticleUpdater(measurement_model, predictor) + + # Detection close to where known turn rate model would place particles + detection = Detection([[0.5, 7.]], timestamp) + + update = updater.update(hypothesis=SingleHypothesis(prediction, detection)) + + assert isinstance(update, RaoBlackwellisedParticleStateUpdate) + + average_model_proabilities = np.average( + update.model_probabilities, weights=update.weight, axis=1) + assert len(average_model_proabilities) == update.model_probabilities.shape[0] + assert isinstance(dynamic_model_list[np.argmax(average_model_proabilities)], KnownTurnRate) From d48b7aa16e64312051fc2c55c67a10df2b85ef73 Mon Sep 17 00:00:00 2001 From: Steven Hiscocks Date: Fri, 23 Sep 2022 15:52:34 +0100 Subject: [PATCH 05/11] Add multi model particle updater test, combining with Rao Blackwellised As both testing form of multi model test, the files have been combined, so they can share fixtures. --- stonesoup/types/prediction.py | 2 +- stonesoup/updater/particle.py | 6 +- .../tests/test_multi_model_particle.py | 135 ++++++++++++++++++ .../updater/tests/test_raoblackwellised.py | 76 ---------- 4 files changed, 139 insertions(+), 80 deletions(-) create mode 100644 stonesoup/updater/tests/test_multi_model_particle.py delete mode 100644 stonesoup/updater/tests/test_raoblackwellised.py diff --git a/stonesoup/types/prediction.py b/stonesoup/types/prediction.py index 5df06836b..50a199ee7 100644 --- a/stonesoup/types/prediction.py +++ b/stonesoup/types/prediction.py @@ -119,7 +119,7 @@ class ParticleMeasurementPrediction(MeasurementPrediction, ParticleState): """ -class MulitModelParticleStatePrediction(Prediction, MultiModelParticleState): +class MultiModelParticleStatePrediction(Prediction, MultiModelParticleState): """MultiModelParticleStatePrediction type This is a simple multi-model Particle state prediction object. diff --git a/stonesoup/updater/particle.py b/stonesoup/updater/particle.py index aad443cb3..0951ce051 100644 --- a/stonesoup/updater/particle.py +++ b/stonesoup/updater/particle.py @@ -241,7 +241,7 @@ def update(self, hypothesis, **kwargs): Returns ------- - : :class:`~.ParticleState` + : :class:`~.MultiModelParticleStateUpdate` The state posterior """ if hypothesis.measurement.measurement_model is None: @@ -256,7 +256,7 @@ def update(self, hypothesis, **kwargs): particle_list=None ) - transition_matrix = np.asarray(self.predictor.transition_matrix) + transition_matrix = np.asanyarray(self.predictor.transition_matrix) update.weight = update.weight \ * measurement_model.pdf(hypothesis.measurement, update, **kwargs) \ @@ -287,7 +287,7 @@ def update(self, hypothesis, **kwargs): Returns ------- - : :class:`~.ParticleState` + : :class:`~.RaoBlackwellisedParticleStateUpdate` The state posterior """ diff --git a/stonesoup/updater/tests/test_multi_model_particle.py b/stonesoup/updater/tests/test_multi_model_particle.py new file mode 100644 index 000000000..cc46708b3 --- /dev/null +++ b/stonesoup/updater/tests/test_multi_model_particle.py @@ -0,0 +1,135 @@ +"""Test for updater.particle module""" +import datetime + +import numpy as np +import pytest + +from stonesoup.types.detection import Detection +from ...models.measurement.linear import LinearGaussian +from ...models.transition.linear import ConstantVelocity, ConstantAcceleration, KnownTurnRate +from ...models.transition.linear import CombinedLinearGaussianTransitionModel +from ...predictor.particle import RaoBlackwellisedMultiModelPredictor, MultiModelPredictor +from ...updater.particle import RaoBlackwellisedParticleUpdater, MultiModelParticleUpdater +from ...types.hypothesis import SingleHypothesis +from ...types.particle import RaoBlackwellisedParticle, MultiModelParticle +from ...types.prediction import ( + RaoBlackwellisedParticleStatePrediction, MultiModelParticleStatePrediction) +from ...types.state import RaoBlackwellisedParticleState, MultiModelParticleState +from ...types.update import RaoBlackwellisedParticleStateUpdate, MultiModelParticleStateUpdate + + +@pytest.fixture() +def dynamic_model_list(): + return [CombinedLinearGaussianTransitionModel((ConstantVelocity(0.01), + ConstantVelocity(0.01))), + CombinedLinearGaussianTransitionModel((ConstantAcceleration(0.1), + ConstantAcceleration(0.1))), + KnownTurnRate([0.1, 0.1], np.radians(20))] + + +@pytest.fixture() +def position_mappings(): + return [[0, 1, 3, 4], + [0, 1, 2, 3, 4, 5], + [1, 2, 3, 4]] + + +@pytest.fixture() +def transition_matrix(): + return [[0.40, 0.40, 0.2], + [0.40, 0.40, 0.2], + [0.40, 0.40, 0.2]] + + +def test_multi_model(dynamic_model_list, position_mappings, transition_matrix): + # Initialise particles + particle1 = MultiModelParticle( + state_vector=[1, 1, -0.5, 1, 1, -0.5], + weight=1/3000, + dynamic_model=0) + particle2 = MultiModelParticle( + state_vector=[1, 1, 0.5, 1, 1, 0.5], + weight=1/3000, + dynamic_model=1) + particle3 = MultiModelParticle( + state_vector=[1, 1, 0.5, 1, 1, 0.5], + weight=1/3000, + dynamic_model=2) + + particles = [particle1, particle2, particle3] * 1000 + timestamp = datetime.datetime.now() + + particle_state = MultiModelParticleState( + None, particle_list=particles, timestamp=timestamp) + + predictor = MultiModelPredictor( + dynamic_model_list, transition_matrix, position_mappings + ) + + timestamp += datetime.timedelta(seconds=5) + prediction = predictor.predict(particle_state, timestamp) + + assert isinstance(prediction, MultiModelParticleStatePrediction) + + measurement_model = LinearGaussian(6, [0, 3], np.diag([1, 1])) + updater = MultiModelParticleUpdater(measurement_model, predictor) + + # Detection close to where known turn rate model would place particles + detection = Detection([[0.5, 7.]], timestamp) + + update = updater.update(hypothesis=SingleHypothesis(prediction, detection)) + + assert isinstance(update, MultiModelParticleStateUpdate) + # NOTE: This uses the parent particles' model, to see which model was applied + # rather, than the new model that has been changed to. + model_weights = [ + np.sum(update.weight[update.parent.dynamic_model == model_index]) + for model_index in range(len(dynamic_model_list))] + assert float(np.sum(model_weights)) == pytest.approx(1) + assert isinstance(dynamic_model_list[np.argmax(model_weights)], KnownTurnRate) + + +def test_rao_blackwellised(dynamic_model_list, position_mappings, transition_matrix): + # Initialise particles + particle1 = RaoBlackwellisedParticle( + state_vector=[1, 1, -0.5, 1, 1, -0.5], + weight=1/3000, + model_probabilities=[0.01, 0.98, 0.01]) + particle2 = RaoBlackwellisedParticle( + state_vector=[1, 1, 0.5, 1, 1, 0.5], + weight=1/3000, + model_probabilities=[0.98, 0.01, 0.01]) + particle3 = RaoBlackwellisedParticle( + state_vector=[1, 1, 0.5, 1, 1, 0.5], + weight=1/3000, + model_probabilities=[0.2, 0.2, 0.6]) + + particles = [particle1, particle2, particle3] * 1000 + timestamp = datetime.datetime.now() + + particle_state = RaoBlackwellisedParticleState( + None, particle_list=particles, timestamp=timestamp) + + predictor = RaoBlackwellisedMultiModelPredictor( + dynamic_model_list, transition_matrix, position_mappings + ) + + timestamp += datetime.timedelta(seconds=5) + prediction = predictor.predict(particle_state, timestamp) + + assert isinstance(prediction, RaoBlackwellisedParticleStatePrediction) + + measurement_model = LinearGaussian(6, [0, 3], np.diag([1, 1])) + updater = RaoBlackwellisedParticleUpdater(measurement_model, predictor) + + # Detection close to where known turn rate model would place particles + detection = Detection([[0.5, 7.]], timestamp) + + update = updater.update(hypothesis=SingleHypothesis(prediction, detection)) + + assert isinstance(update, RaoBlackwellisedParticleStateUpdate) + + average_model_proabilities = np.average( + update.model_probabilities, weights=update.weight, axis=1) + assert len(average_model_proabilities) == update.model_probabilities.shape[0] + assert isinstance(dynamic_model_list[np.argmax(average_model_proabilities)], KnownTurnRate) diff --git a/stonesoup/updater/tests/test_raoblackwellised.py b/stonesoup/updater/tests/test_raoblackwellised.py deleted file mode 100644 index f73f0789f..000000000 --- a/stonesoup/updater/tests/test_raoblackwellised.py +++ /dev/null @@ -1,76 +0,0 @@ -"""Test for updater.particle module""" -import datetime - -import numpy as np - -from stonesoup.types.detection import Detection -from ...models.measurement.linear import LinearGaussian -from ...models.transition.linear import ConstantVelocity, ConstantAcceleration, KnownTurnRate -from ...models.transition.linear import CombinedLinearGaussianTransitionModel -from ...predictor.particle import RaoBlackwellisedMultiModelPredictor -from ...updater.particle import RaoBlackwellisedParticleUpdater -from ...types.hypothesis import SingleHypothesis -from ...types.particle import RaoBlackwellisedParticle -from ...types.prediction import RaoBlackwellisedParticleStatePrediction -from ...types.state import RaoBlackwellisedParticleState -from ...types.update import RaoBlackwellisedParticleStateUpdate - - -def test_rao_blackwellised(): - # Initialise two particles - particle1 = RaoBlackwellisedParticle( - state_vector=[1, 1, -0.5, 1, 1, -0.5], - weight=1/3000, - model_probabilities=[0.01, 0.98, 0.01]) - particle2 = RaoBlackwellisedParticle( - state_vector=[1, 1, 0.5, 1, 1, 0.5], - weight=1/3000, - model_probabilities=[0.98, 0.01, 0.01]) - particle3 = RaoBlackwellisedParticle( - state_vector=[1, 1, 0.5, 1, 1, 0.5], - weight=1/3000, - model_probabilities=[0.2, 0.2, 0.6]) - - particles = [particle1, particle2, particle3] * 1000 - timestamp = datetime.datetime.now() - - particle_state = RaoBlackwellisedParticleState( - None, particle_list=particles, timestamp=timestamp) - - dynamic_model_list = [CombinedLinearGaussianTransitionModel((ConstantVelocity(0.01), - ConstantVelocity(0.01))), - CombinedLinearGaussianTransitionModel((ConstantAcceleration(0.1), - ConstantAcceleration(0.1))), - KnownTurnRate([0.1, 0.1], np.radians(20))] - - transition_matrix = [[0.40, 0.40, 0.2], - [0.40, 0.40, 0.2], - [0.40, 0.40, 0.2]] - - position_mappings = [[0, 1, 3, 4], - [0, 1, 2, 3, 4, 5], - [1, 2, 3, 4]] - - predictor = RaoBlackwellisedMultiModelPredictor( - dynamic_model_list, transition_matrix, position_mappings - ) - - timestamp += datetime.timedelta(seconds=5) - prediction = predictor.predict(particle_state, timestamp) - - assert isinstance(prediction, RaoBlackwellisedParticleStatePrediction) - - measurement_model = LinearGaussian(6, [0, 3], np.diag([1, 1])) - updater = RaoBlackwellisedParticleUpdater(measurement_model, predictor) - - # Detection close to where known turn rate model would place particles - detection = Detection([[0.5, 7.]], timestamp) - - update = updater.update(hypothesis=SingleHypothesis(prediction, detection)) - - assert isinstance(update, RaoBlackwellisedParticleStateUpdate) - - average_model_proabilities = np.average( - update.model_probabilities, weights=update.weight, axis=1) - assert len(average_model_proabilities) == update.model_probabilities.shape[0] - assert isinstance(dynamic_model_list[np.argmax(average_model_proabilities)], KnownTurnRate) From a343fd71ff98e15a75d2542caa33e9a8b84c2ebe Mon Sep 17 00:00:00 2001 From: Steven Hiscocks Date: Mon, 26 Sep 2022 09:48:01 +0100 Subject: [PATCH 06/11] Add resamplers to multi-model particle updater tests --- .../tests/test_multi_model_particle.py | 19 ++++++++++++++----- 1 file changed, 14 insertions(+), 5 deletions(-) diff --git a/stonesoup/updater/tests/test_multi_model_particle.py b/stonesoup/updater/tests/test_multi_model_particle.py index cc46708b3..49fc2bf3c 100644 --- a/stonesoup/updater/tests/test_multi_model_particle.py +++ b/stonesoup/updater/tests/test_multi_model_particle.py @@ -4,12 +4,13 @@ import numpy as np import pytest -from stonesoup.types.detection import Detection +from ...resampler.particle import SystematicResampler from ...models.measurement.linear import LinearGaussian from ...models.transition.linear import ConstantVelocity, ConstantAcceleration, KnownTurnRate from ...models.transition.linear import CombinedLinearGaussianTransitionModel from ...predictor.particle import RaoBlackwellisedMultiModelPredictor, MultiModelPredictor from ...updater.particle import RaoBlackwellisedParticleUpdater, MultiModelParticleUpdater +from ...types.detection import Detection from ...types.hypothesis import SingleHypothesis from ...types.particle import RaoBlackwellisedParticle, MultiModelParticle from ...types.prediction import ( @@ -41,7 +42,15 @@ def transition_matrix(): [0.40, 0.40, 0.2]] -def test_multi_model(dynamic_model_list, position_mappings, transition_matrix): +@pytest.fixture(params=[None, SystematicResampler]) +def resampler(request): + if request.param is None: + return None + else: + return request.param() + + +def test_multi_model(dynamic_model_list, position_mappings, transition_matrix, resampler): # Initialise particles particle1 = MultiModelParticle( state_vector=[1, 1, -0.5, 1, 1, -0.5], @@ -72,7 +81,7 @@ def test_multi_model(dynamic_model_list, position_mappings, transition_matrix): assert isinstance(prediction, MultiModelParticleStatePrediction) measurement_model = LinearGaussian(6, [0, 3], np.diag([1, 1])) - updater = MultiModelParticleUpdater(measurement_model, predictor) + updater = MultiModelParticleUpdater(measurement_model, predictor, resampler=resampler) # Detection close to where known turn rate model would place particles detection = Detection([[0.5, 7.]], timestamp) @@ -89,7 +98,7 @@ def test_multi_model(dynamic_model_list, position_mappings, transition_matrix): assert isinstance(dynamic_model_list[np.argmax(model_weights)], KnownTurnRate) -def test_rao_blackwellised(dynamic_model_list, position_mappings, transition_matrix): +def test_rao_blackwellised(dynamic_model_list, position_mappings, transition_matrix, resampler): # Initialise particles particle1 = RaoBlackwellisedParticle( state_vector=[1, 1, -0.5, 1, 1, -0.5], @@ -120,7 +129,7 @@ def test_rao_blackwellised(dynamic_model_list, position_mappings, transition_mat assert isinstance(prediction, RaoBlackwellisedParticleStatePrediction) measurement_model = LinearGaussian(6, [0, 3], np.diag([1, 1])) - updater = RaoBlackwellisedParticleUpdater(measurement_model, predictor) + updater = RaoBlackwellisedParticleUpdater(measurement_model, predictor, resampler=resampler) # Detection close to where known turn rate model would place particles detection = Detection([[0.5, 7.]], timestamp) From fb7af46c380a28fb18a21c7dc58e9f38b15c9382 Mon Sep 17 00:00:00 2001 From: Steven Hiscocks Date: Wed, 28 Sep 2022 11:30:24 +0100 Subject: [PATCH 07/11] Update multi-model particle predictor test to use State not Particles --- stonesoup/predictor/tests/test_multimodel.py | 68 +++----------------- 1 file changed, 10 insertions(+), 58 deletions(-) diff --git a/stonesoup/predictor/tests/test_multimodel.py b/stonesoup/predictor/tests/test_multimodel.py index e2b4d9250..7c6578e24 100644 --- a/stonesoup/predictor/tests/test_multimodel.py +++ b/stonesoup/predictor/tests/test_multimodel.py @@ -5,7 +5,7 @@ from ...models.transition.linear import ( ConstantVelocity, CombinedLinearGaussianTransitionModel, ConstantAcceleration) from ...predictor.particle import MultiModelPredictor -from ...types.particle import MultiModelParticle +from ...types.array import StateVectors from ...types.state import MultiModelParticleState @@ -17,60 +17,14 @@ def test_multi_model(): new_timestamp = timestamp + datetime.timedelta(seconds=time_diff) # Define prior state. - prior_particles = [ - MultiModelParticle(np.array([[10], [10], [10], [10], [10], [10], [10], [10], [10]]), - 1 / 9, dynamic_model=0), - MultiModelParticle(np.array([[10], [20], [10], [10], [10], [10], [10], [10], [10]]), - 1 / 9, dynamic_model=0), - MultiModelParticle(np.array([[10], [30], [10], [10], [10], [10], [10], [10], [10]]), - 1 / 9, dynamic_model=0), - MultiModelParticle(np.array([[20], [10], [10], [10], [10], [10], [10], [10], [10]]), - 1 / 9, dynamic_model=0), - MultiModelParticle(np.array([[20], [20], [10], [10], [10], [10], [10], [10], [10]]), - 1 / 9, dynamic_model=0), - MultiModelParticle(np.array([[20], [30], [10], [10], [10], [10], [10], [10], [10]]), - 1 / 9, dynamic_model=0), - MultiModelParticle(np.array([[30], [10], [10], [10], [10], [10], [10], [10], [10]]), - 1 / 9, dynamic_model=0), - MultiModelParticle(np.array([[30], [20], [10], [10], [10], [10], [10], [10], [10]]), - 1 / 9, dynamic_model=0), - MultiModelParticle(np.array([[30], [30], [10], [10], [10], [10], [10], [10], [10]]), - 1 / 9, dynamic_model=0), - MultiModelParticle(np.array([[30], [30], [10], [10], [10], [10], [10], [10], [10]]), - 1 / 9, dynamic_model=0), - MultiModelParticle(np.array([[30], [30], [10], [10], [10], [10], [10], [10], [10]]), - 1 / 9, dynamic_model=0), - MultiModelParticle(np.array([[30], [30], [10], [10], [10], [10], [10], [10], [10]]), - 1 / 9, dynamic_model=0), - MultiModelParticle(np.array([[30], [30], [10], [10], [10], [10], [10], [10], [10]]), - 1 / 9, dynamic_model=0), - MultiModelParticle(np.array([[30], [30], [10], [10], [10], [10], [10], [10], [10]]), - 1 / 9, dynamic_model=0), - MultiModelParticle(np.array([[30], [30], [10], [10], [10], [10], [10], [10], [10]]), - 1 / 9, dynamic_model=0), - MultiModelParticle(np.array([[30], [30], [10], [10], [10], [10], [10], [10], [10]]), - 1 / 9, dynamic_model=0), - MultiModelParticle(np.array([[30], [30], [10], [10], [10], [10], [10], [10], [10]]), - 1 / 9, dynamic_model=0), - MultiModelParticle(np.array([[30], [30], [10], [10], [10], [10], [10], [10], [10]]), - 1 / 9, dynamic_model=0), - MultiModelParticle(np.array([[30], [30], [10], [10], [10], [10], [10], [10], [10]]), - 1 / 9, dynamic_model=0), - MultiModelParticle(np.array([[30], [30], [10], [10], [10], [10], [10], [10], [10]]), - 1 / 9, dynamic_model=0), - MultiModelParticle(np.array([[30], [30], [10], [10], [10], [10], [10], [10], [10]]), - 1 / 9, dynamic_model=0), - MultiModelParticle(np.array([[30], [30], [10], [10], [10], [10], [10], [10], [10]]), - 1 / 9, dynamic_model=0), - MultiModelParticle(np.array([[30], [30], [10], [10], [10], [10], [10], [10], [10]]), - 1 / 9, dynamic_model=0), - MultiModelParticle(np.array([[30], [30], [10], [10], [10], [10], [10], [10], [10]]), - 1 / 9, dynamic_model=0), - MultiModelParticle(np.array([[30], [30], [10], [10], [10], [10], [10], [10], [10]]), - 1 / 9, dynamic_model=0), - MultiModelParticle(np.array([[30], [30], [10], [10], [10], [10], [10], [10], [10]]), - 1 / 9, dynamic_model=0), - ] + prior_vectors = np.full((9, 10), 10.).view(StateVectors) + # Change particles starting x position, and velocity + prior_vectors[0, :] = [10, 10, 10, 20, 20, 20, 30, 30, 30, 10] + prior_vectors[1, :] = [10, 20, 30, 10, 20, 30, 10, 20, 30, 30] + weight = np.full((prior_vectors.shape[1],), 1/prior_vectors.shape[1]) + model = np.full((prior_vectors.shape[1], ), 0) + prior = MultiModelParticleState( + prior_vectors, weight=weight, dynamic_model=model, timestamp=timestamp) # Declare the model list. model_list = [ @@ -123,8 +77,6 @@ def test_multi_model(): [0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.65], ] - prior = MultiModelParticleState(None, particle_list=prior_particles, timestamp=timestamp) - predictor = MultiModelPredictor(position_mappings=position_mappings, transition_matrix=transition, transition_models=model_list) @@ -136,5 +88,5 @@ def test_multi_model(): dynamic_model_proportions = np.array(dynamic_model_proportions) assert prediction.timestamp == new_timestamp - assert np.all([prediction.particles[i].weight == 1 / 9 for i in range(9)]) + assert np.all([prediction.particles[i].weight == 1/10 for i in range(9)]) assert len(dynamic_model_proportions) == len(transition) From 2ab3a2e3e7aaf8d55cb1b69c109ac9a32a329c60 Mon Sep 17 00:00:00 2001 From: Steven Hiscocks Date: Mon, 10 Oct 2022 12:26:46 +0100 Subject: [PATCH 08/11] Add test for ParticleState, and subclass, get item method --- stonesoup/types/tests/test_state.py | 37 ++++++++++++++++++++++++++++- 1 file changed, 36 insertions(+), 1 deletion(-) diff --git a/stonesoup/types/tests/test_state.py b/stonesoup/types/tests/test_state.py index 27968a054..90f344152 100644 --- a/stonesoup/types/tests/test_state.py +++ b/stonesoup/types/tests/test_state.py @@ -13,7 +13,7 @@ from ..state import CreatableFromState from ..state import State, GaussianState, ParticleState, EnsembleState, \ StateMutableSequence, WeightedGaussianState, SqrtGaussianState, CategoricalState, \ - CompositeState, InformationState + CompositeState, InformationState, MultiModelParticleState, RaoBlackwellisedParticleState from ...base import Property @@ -241,6 +241,41 @@ def test_particlestate(): ParticleState(None, particle_list=particle_list3, timestamp=timestamp) +@pytest.mark.parametrize( + 'particle_class', [ParticleState, MultiModelParticleState, RaoBlackwellisedParticleState]) +def test_particle_get_item(particle_class): + with pytest.raises(TypeError): + particle_class() + + # Create 10 1d particles: [[0,0,0,0,0,100,100,100,100,100]] + # with equal weight + num_particles = 10 + weight = Probability(1/num_particles) + particles = StateVectors(np.concatenate( + (np.tile([[0]], num_particles//2), np.tile([[100]], num_particles//2)), axis=1)) + weights = np.tile(weight, num_particles) + timestamp = datetime.datetime.now() + + # Test state without timestamp with weight and parent + parent_state = particle_class(particles, weight=weights) + state = particle_class(particles, parent=parent_state, weight=weights) + assert np.allclose(state[0].state_vector, StateVector([[0]])) + assert np.allclose(state[-1].state_vector, StateVector([[100]])) + + assert pytest.approx(1/num_particles) == state[0].weight + assert pytest.approx(1/num_particles) == state[-1].weight + + assert np.allclose(state[0].parent.state_vector, state[0].state_vector) + assert np.allclose(state[-1].parent.state_vector, state[-1].state_vector) + + # Single particle, timestamp only + state = particle_class([[0], [1]], timestamp=timestamp) + assert state.ndim == 2 + assert np.array_equal(state.mean, state[0].state_vector) + assert np.allclose(state[0].state_vector, StateVector([[0], [1]])) + assert np.allclose(state[-1].state_vector, StateVector([[0], [1]])) + + def test_particlestate_weighted(): num_particles = 10 From 823af1ec03cdc9c3725cb7a765451e5aee132a43 Mon Sep 17 00:00:00 2001 From: Steven Hiscocks Date: Thu, 20 Oct 2022 13:17:21 +0100 Subject: [PATCH 09/11] Vectorise model selection in Rao Blackwellised Particle Predictor --- stonesoup/predictor/particle.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/stonesoup/predictor/particle.py b/stonesoup/predictor/particle.py index dcfd8aef9..7e83f677c 100644 --- a/stonesoup/predictor/particle.py +++ b/stonesoup/predictor/particle.py @@ -214,9 +214,9 @@ def predict(self, prior, timestamp=None, **kwargs): # Change the value of the dynamic value randomly according to the # matrix - new_dynamic_models = np.array([ - np.random.choice(len(self.transition_models), p=model_probabiliy) - for model_probabiliy in prior.model_probabilities.T]) + new_dynamic_models = np.argmax( + prior.model_probabilities.astype(float).cumsum(0) > np.random.rand(len(prior)), + axis=0) for model_index, transition_model in enumerate(self.transition_models): From 67ae1070834d57fde9b894d8f02f7b11b4fd6ff8 Mon Sep 17 00:00:00 2001 From: Steven Hiscocks Date: Thu, 20 Oct 2022 14:47:41 +0100 Subject: [PATCH 10/11] Renamed position/model mapping in multi model particles filters Also improve documentation to explain usage better --- stonesoup/predictor/particle.py | 27 ++++++++++++-------- stonesoup/predictor/tests/test_multimodel.py | 22 ++++++++-------- stonesoup/updater/particle.py | 4 +-- 3 files changed, 29 insertions(+), 24 deletions(-) diff --git a/stonesoup/predictor/particle.py b/stonesoup/predictor/particle.py index 7e83f677c..97f7ed205 100644 --- a/stonesoup/predictor/particle.py +++ b/stonesoup/predictor/particle.py @@ -102,18 +102,22 @@ def predict(self, prior, *args, **kwargs): class MultiModelPredictor(Predictor): """MultiModelPredictor class - An implementation of a Particle Filter predictor. + An implementation of a Particle Filter predictor utilising multiple models. """ transition_model = None transition_models: Sequence[TransitionModel] = Property( doc="Transition models used to for particle transition, selected by model index on " - "particle." + "particle. Models dimensions can be subset of the overall state space, by " + "using :attr:`model_mappings`." ) transition_matrix: np.ndarray = Property( doc="n-model by n-model transition matrix." ) - position_mappings: Sequence[Sequence[int]] = Property( - doc="Sequence of position mappings associated with each transition model.") + model_mappings: Sequence[Sequence[int]] = Property( + doc="Sequence of mappings associated with each transition model. This enables mapping " + "between model and state space, enabling use of models that may have different " + "dimensions (e.g. velocity or acceleration). Parts of the state that aren't mapped " + "are set to zero.") @property def probabilities(self): @@ -157,7 +161,7 @@ def predict(self, prior, timestamp=None, **kwargs): new_state_vector = self.apply_model( prior[current_model_indices], transition_model, timestamp, - self.position_mappings[model_index], noise=True, **kwargs) + self.model_mappings[model_index], noise=True, **kwargs) new_particle_state.state_vector[:, current_model_indices] = new_state_vector new_particle_state.dynamic_model[current_model_indices] = new_dynamic_models @@ -165,27 +169,28 @@ def predict(self, prior, timestamp=None, **kwargs): return new_particle_state @staticmethod - def apply_model(prior, transition_model, timestamp, position_mapping, **kwargs): + def apply_model(prior, transition_model, timestamp, model_mapping, **kwargs): # Based on given position mapping create a new state vector that contains only the # required states orig_ndim = prior.ndim - prior.state_vector = prior.state_vector[position_mapping, :] + prior.state_vector = prior.state_vector[model_mapping, :] new_state_vector = transition_model.function( prior, time_interval=timestamp - prior.timestamp, **kwargs) # Calculate the indices removed from the state vector to become compatible with the # dynamic model for j in range(orig_ndim): - if j not in position_mapping: + if j not in model_mapping: new_state_vector = np.insert(new_state_vector, j, 0, axis=0) return new_state_vector class RaoBlackwellisedMultiModelPredictor(MultiModelPredictor): - """ParticlePredictor class + """Rao-Blackwellised Multi Model Predictor class - An implementation of a Particle Filter predictor. + An implementation of a Particle Filter predictor utilising multiple models, with per + particle model probabilities. """ @predict_lru_cache() @@ -226,7 +231,7 @@ def predict(self, prior, timestamp=None, **kwargs): new_state_vector = self.apply_model( prior[new_model_indices], transition_model, timestamp, - self.position_mappings[model_index], noise=True, **kwargs) + self.model_mappings[model_index], noise=True, **kwargs) new_particle_state.state_vector[:, new_model_indices] = new_state_vector diff --git a/stonesoup/predictor/tests/test_multimodel.py b/stonesoup/predictor/tests/test_multimodel.py index 7c6578e24..2471f596c 100644 --- a/stonesoup/predictor/tests/test_multimodel.py +++ b/stonesoup/predictor/tests/test_multimodel.py @@ -54,16 +54,16 @@ def test_multi_model(): ConstantVelocity(0.01))), ] # Give the respective position mapping. - position_mappings = [ - [0, 1, 3, 4, 6, 7], - [0, 1, 2, 3, 4, 5, 6, 7, 8], - [0, 1, 3, 4, 6, 7], - [0, 1, 2, 3, 4, 5, 6, 7, 8], - [0, 1, 3, 4, 6, 7, 8], - [0, 1, 2, 3, 4, 6, 7], - [0, 1, 3, 4, 5, 6, 7], - [0, 1, 2, 3, 4, 5, 6, 7], - ] + model_mappings = [ + [0, 1, 3, 4, 6, 7], + [0, 1, 2, 3, 4, 5, 6, 7, 8], + [0, 1, 3, 4, 6, 7], + [0, 1, 2, 3, 4, 5, 6, 7, 8], + [0, 1, 3, 4, 6, 7, 8], + [0, 1, 2, 3, 4, 6, 7], + [0, 1, 3, 4, 5, 6, 7], + [0, 1, 2, 3, 4, 5, 6, 7] + ] # Provide the required transition matrix. transition = [ @@ -77,7 +77,7 @@ def test_multi_model(): [0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.65], ] - predictor = MultiModelPredictor(position_mappings=position_mappings, + predictor = MultiModelPredictor(model_mappings=model_mappings, transition_matrix=transition, transition_models=model_list) diff --git a/stonesoup/updater/particle.py b/stonesoup/updater/particle.py index 0951ce051..eee695081 100644 --- a/stonesoup/updater/particle.py +++ b/stonesoup/updater/particle.py @@ -328,10 +328,10 @@ def calculate_model_probabilities(prediction, predictor): for model_index, transition_model in enumerate(predictor.transition_models): required_space_prior = copy.copy(prediction.parent) required_space_prior.state_vector = \ - required_space_prior.state_vector[predictor.position_mappings[model_index], :] + required_space_prior.state_vector[predictor.model_mappings[model_index], :] required_space_pred = copy.copy(prediction) required_space_pred.state_vector = \ - required_space_pred.state_vector[predictor.position_mappings[model_index], :] + required_space_pred.state_vector[predictor.model_mappings[model_index], :] prob_position_given_model_and_old_position = transition_model.pdf( required_space_pred, required_space_prior, From 53a67b9b3f0ef2ac8e15f2e64203df2ff73498e2 Mon Sep 17 00:00:00 2001 From: Steven Hiscocks Date: Fri, 21 Oct 2022 16:20:56 +0100 Subject: [PATCH 11/11] Calculate RBPF Updated model probabilities with logs directly This avoids use of Probability type which isn't designed for use in large array calculations. --- stonesoup/updater/particle.py | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/stonesoup/updater/particle.py b/stonesoup/updater/particle.py index eee695081..39e95a921 100644 --- a/stonesoup/updater/particle.py +++ b/stonesoup/updater/particle.py @@ -3,6 +3,7 @@ import numpy as np from scipy.linalg import inv +from scipy.special import logsumexp from .base import Updater from .kalman import KalmanUpdater, ExtendedKalmanUpdater @@ -339,17 +340,18 @@ def calculate_model_probabilities(prediction, predictor): ) # Looks up p(m_k|m_k-1) - prob_of_transition = np.asanyarray(predictor.transition_matrix)[model_index, :] + log_prob_of_transition = np.log( + np.asfarray(predictor.transition_matrix)[model_index, :]) - product_of_probs = \ - prob_position_given_model_and_old_position \ - * prob_of_transition[:, np.newaxis] \ - * prediction.model_probabilities # p(m_k-1|x_1:k-1) + log_product_of_probs = \ + np.asfarray(np.log(prob_position_given_model_and_old_position)) \ + + log_prob_of_transition[:, np.newaxis] \ + + np.asfarray(np.log(prediction.model_probabilities)) # p(m_k-1|x_1:k-1) - denominator_components.append(np.sum(product_of_probs, axis=0)) + denominator_components.append(logsumexp(log_product_of_probs, axis=0)) # Calculate the denominator - new_probabilities = np.stack(denominator_components) - new_probabilities /= np.sum(new_probabilities, axis=0) + new_log_probabilities = np.stack(denominator_components) + new_log_probabilities -= logsumexp(new_log_probabilities, axis=0) - return new_probabilities + return np.exp(new_log_probabilities)