Skip to content

Commit

Permalink
Add metric MultiManager and MetricPlotter (#811)
Browse files Browse the repository at this point in the history
- MultiManager can take multiple metric generators of the same type and multiple sets of the same type to generate metrics. 
- Have made SimpleManager metric manager subclass of MultiManager class so this will be a breaking change. 
- All metrics that take in track and truth can now also take in 2 tracks.
- Additional functionality added to MultiManager through new methods to help easier extraction of metrics and insights.
- New MetricPlotter class created to make plotting metrics easier.
  • Loading branch information
rcgorman-dstl authored and gawebb-dstl committed Oct 3, 2023
1 parent faccbc7 commit 8f1a45b
Show file tree
Hide file tree
Showing 27 changed files with 1,625 additions and 838 deletions.
63 changes: 25 additions & 38 deletions docs/examples/MTT_3D_Platform.py
Original file line number Diff line number Diff line change
Expand Up @@ -320,7 +320,11 @@

# OSPA metric
from stonesoup.metricgenerator.ospametric import OSPAMetric
ospa_generator = OSPAMetric(c=40, p=1)
ospa_generator = OSPAMetric(c=40, p=1,
generator_name='OSPA metrics',
tracks_key='tracks',
truths_key='truths'
)

# SIAP metrics
from stonesoup.metricgenerator.tracktotruthmetrics import SIAPMetrics
Expand All @@ -329,77 +333,60 @@
SIAPvel_measure = Euclidean(mapping=np.array([1, 3]))
siap_generator = SIAPMetrics(
position_measure=SIAPpos_measure,
velocity_measure=SIAPvel_measure
velocity_measure=SIAPvel_measure,
generator_name='SIAP metrics',
tracks_key='tracks',
truths_key='truths'
)

# Uncertainty metric
from stonesoup.metricgenerator.uncertaintymetric import \
SumofCovarianceNormsMetric
uncertainty_generator = SumofCovarianceNormsMetric()

uncertainty_generator = SumofCovarianceNormsMetric(generator_name='Uncertainty metric',
tracks_key='tracks')

# %%
# The metric manager requires us to define an associator. Here we want to
# compare the track estimates with the ground truth.

from stonesoup.dataassociator.tracktotrack import TrackToTruth
associator = TrackToTruth(association_threshold=30)

from stonesoup.metricgenerator.manager import SimpleManager
metric_manager = SimpleManager(
from stonesoup.metricgenerator.manager import MultiManager
metric_manager = MultiManager(
[ospa_generator, siap_generator, uncertainty_generator],
associator=associator
)


# %%
# Since we saved the groundtruth and tracks before, we can easily add them
# to the metric manager now, and then tell it to generate the metrics.
metric_manager.add_data(groundtruth_plot, tracks_plot)
metric_manager.add_data({'truths': groundtruth_plot, 'tracks': tracks_plot})
metrics = metric_manager.generate_metrics()


# %%
# The first metric we will look at is the OSPA metric.
ospa_metric = metrics["OSPA distances"]
from stonesoup.plotter import MetricPlotter

fig, ax = plt.subplots()
ax.plot([i.timestamp for i in ospa_metric.value],
[i.value for i in ospa_metric.value])
ax.set_ylabel("OSPA distance")
_ = ax.set_xlabel("Time")
fig1 = MetricPlotter()
fig1.plot_metrics(metrics, generator_names=['OSPA metrics'])


# %%
# Next are the SIAP metrics. Specifically, we will look at the position and
# velocity accuracy.
position_accuracy = metrics['SIAP Position Accuracy at times']
velocity_accuracy = metrics['SIAP Velocity Accuracy at times']
times = metric_manager.list_timestamps()

# Make a figure with 2 subplots.
fig, axes = plt.subplots(2)

# The first subplot will show the position accuracy
axes[0].set(title='Positional Accuracy Over Time', xlabel='Time',
ylabel='Accuracy')
axes[0].plot(times, [metric.value for metric in position_accuracy.value])

# The second subplot will show the velocity accuracy
axes[1].set(title='Velocity Accuracy Over Time', xlabel='Time',
ylabel='Accuracy')
axes[1].plot(times, [metric.value for metric in velocity_accuracy.value])
plt.tight_layout()
fig2 = MetricPlotter()
fig2.plot_metrics(metrics, metric_names=['SIAP Position Accuracy at times',
'SIAP Velocity Accuracy at times'])
fig2.set_fig_title('SIAP metrics')


# %%
# Finally, we will examine a general uncertainty metric. This is calculated as
# the sum of the norms of the covariance matrices of each estimated state.
# Since the sum is not normalized for the number of estimated states, it is
# most important to look at the trends of this graph rather than the values.
uncertainty_metric = metrics["Sum of Covariance Norms Metric"]

fig, ax = plt.subplots()
ax.plot([i.timestamp for i in uncertainty_metric.value],
[i.value for i in uncertainty_metric.value])
_ = ax.set(title="Track Uncertainty Over Time", xlabel="Time",
ylabel="Sum of covariance matrix norms")
fig3 = MetricPlotter()
fig3.plot_metrics(metrics, generator_names=['Uncertainty metric'])
fig3.set_ax_title(['Track uncertainty over time'])
Loading

0 comments on commit 8f1a45b

Please sign in to comment.