|
14 | 14 | from ..predictor.particle import MultiModelPredictor, RaoBlackwellisedMultiModelPredictor |
15 | 15 | from ..resampler import Resampler |
16 | 16 | from ..regulariser import Regulariser |
| 17 | +from ..types.numeric import Probability |
17 | 18 | from ..types.prediction import ( |
18 | 19 | Prediction, ParticleMeasurementPrediction, GaussianStatePrediction, MeasurementPrediction) |
19 | 20 | from ..types.update import ParticleStateUpdate, Update |
@@ -519,3 +520,100 @@ def _log_space_product(A, B): |
519 | 520 | Astack = np.stack([A] * B.shape[1]).transpose(1, 0, 2) |
520 | 521 | Bstack = np.stack([B] * A.shape[0]).transpose(0, 2, 1) |
521 | 522 | return np.squeeze(logsumexp(Astack + Bstack, axis=2)) |
| 523 | + |
| 524 | + |
| 525 | +class SMCPHDUpdater(ParticleUpdater): |
| 526 | + """ SMC-PHD updater class |
| 527 | +
|
| 528 | + Sequential Monte-Carlo (SMC) PHD updater implementation, based on [1]_ . |
| 529 | +
|
| 530 | + Notes |
| 531 | + ----- |
| 532 | + - It is assumed that the proposal distribution is the same as the dynamics |
| 533 | + - Target "spawing" is not implemented |
| 534 | +
|
| 535 | + .. [1] Ba-Ngu Vo, S. Singh and A. Doucet, "Sequential Monte Carlo Implementation of the |
| 536 | + PHD Filter for Multi-target Tracking," Sixth International Conference of Information |
| 537 | + Fusion, 2003. Proceedings of the, 2003, pp. 792-799, doi: 10.1109/ICIF.2003.177320. |
| 538 | + .. [2] P. Horridge and S. Maskell, “Using a probabilistic hypothesis density filter to |
| 539 | + confirm tracks in a multi-target environment,” in 2011 Jahrestagung der Gesellschaft |
| 540 | + fr Informatik, October 2011. |
| 541 | + """ |
| 542 | + prob_detect: Probability = Property( |
| 543 | + default=Probability(0.85), |
| 544 | + doc="Target Detection Probability") |
| 545 | + clutter_intensity: float = Property( |
| 546 | + doc="Average number of clutter measurements per time step, per unit volume") |
| 547 | + num_samples: int = Property( |
| 548 | + default=1024, |
| 549 | + doc="The number of samples. Default is 1024") |
| 550 | + |
| 551 | + def update(self, multihypothesis, **kwargs): |
| 552 | + """ SMC-PHD update step |
| 553 | +
|
| 554 | + Parameters |
| 555 | + ---------- |
| 556 | + multihypothesis : :class:`~.MultipleHypothesis` |
| 557 | + A container of :class:`~SingleHypothesis` objects. All hypotheses are assumed to have |
| 558 | + the same prediction (and hence same timestamp). |
| 559 | +
|
| 560 | + Returns |
| 561 | + ------- |
| 562 | + : :class:`~.ParticleStateUpdate` |
| 563 | + The state posterior |
| 564 | + """ |
| 565 | + |
| 566 | + prediction = copy.copy(multihypothesis[0].prediction) |
| 567 | + detections = [hypothesis.measurement for hypothesis in multihypothesis if hypothesis] |
| 568 | + |
| 569 | + # Calculate w^{n,i} Eq. (20) of [2] |
| 570 | + log_weights_per_hyp = self.get_log_weights_per_hypothesis(prediction, detections) |
| 571 | + |
| 572 | + # Update weights Eq. (8) of [1] |
| 573 | + # w_k^i = \sum_{z \in Z_k}{w^{n,i}}, where i is the index of z in Z_k |
| 574 | + log_post_weights = logsumexp(log_weights_per_hyp, axis=1) |
| 575 | + prediction.log_weight = log_post_weights |
| 576 | + |
| 577 | + # Resample |
| 578 | + log_num_targets = logsumexp(log_post_weights) # N_{k|k} |
| 579 | + # Normalize weights |
| 580 | + prediction.log_weight = log_post_weights - log_num_targets |
| 581 | + if self.resampler is not None: |
| 582 | + prediction = self.resampler.resample(prediction, self.num_samples) # Resample |
| 583 | + # De-normalize |
| 584 | + prediction.log_weight = prediction.log_weight + log_num_targets |
| 585 | + |
| 586 | + return Update.from_state( |
| 587 | + state=multihypothesis[0].prediction, |
| 588 | + state_vector=prediction.state_vector, |
| 589 | + log_weight=prediction.log_weight, |
| 590 | + hypothesis=multihypothesis, |
| 591 | + timestamp=multihypothesis[0].measurement.timestamp, |
| 592 | + ) |
| 593 | + |
| 594 | + def get_log_weights_per_hypothesis(self, prediction, detections): |
| 595 | + num_samples = prediction.state_vector.shape[1] |
| 596 | + |
| 597 | + # Compute g(z|x) matrix as in [1] |
| 598 | + g = self._get_measurement_loglikelihoods(prediction, detections) |
| 599 | + |
| 600 | + # Calculate w^{n,i} Eq. (20) of [2] |
| 601 | + Ck = self.prob_detect.log() + g + prediction.log_weight[:, np.newaxis] |
| 602 | + C = logsumexp(Ck, axis=0) |
| 603 | + k = np.log(self.clutter_intensity) |
| 604 | + C_plus = np.logaddexp(C, k) |
| 605 | + log_weights_per_hyp = np.full((num_samples, len(detections) + 1), -np.inf) |
| 606 | + log_weights_per_hyp[:, 0] = np.log(1 - self.prob_detect) + prediction.log_weight |
| 607 | + if len(detections): |
| 608 | + log_weights_per_hyp[:, 1:] = Ck - C_plus |
| 609 | + |
| 610 | + return log_weights_per_hyp |
| 611 | + |
| 612 | + def _get_measurement_loglikelihoods(self, prediction, detections): |
| 613 | + num_samples = prediction.state_vector.shape[1] |
| 614 | + # Compute g(z|x) matrix as in [1] |
| 615 | + g = np.zeros((num_samples, len(detections))) |
| 616 | + for i, detection in enumerate(detections): |
| 617 | + measurement_model = self._check_measurement_model(detection.measurement_model) |
| 618 | + g[:, i] = measurement_model.logpdf(detection, prediction, noise=True) |
| 619 | + return g |
0 commit comments