Skip to content

Commit

Permalink
Merge branch '259-bug-incorrect-gradient-object-from-basemodels1' int…
Browse files Browse the repository at this point in the history
…o 251-bug-scheduled-test-failing-on-pybamm-235
  • Loading branch information
BradyPlanden committed Mar 25, 2024
2 parents 8a1d77b + 47767c7 commit c445aff
Show file tree
Hide file tree
Showing 6 changed files with 39 additions and 73 deletions.
1 change: 1 addition & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@

## Bug Fixes

- [#259](https://github.com/pybop-team/PyBOP/pull/259) - Fix gradient calculation from `model.simulateS1` to remove cross-polution and refactor cost._evaluateS1 for fitting costs.
- [#233](https://github.com/pybop-team/PyBOP/pull/233) - Enforces model rebuild on initialisation of a Problem to allow a change of experiment, fixes if statement triggering current function update, updates `predictions` to `simulation` to keep distinction between `predict` and `simulate` and adds `test_changes`.
- [#123](https://github.com/pybop-team/PyBOP/issues/123) - Reinstates check for availability of parameter sets via PyBaMM upon retrieval by `pybop.ParameterSet.pybamm()`.
- [#196](https://github.com/pybop-team/PyBOP/issues/196) - Fixes failing observer cost tests.
Expand Down
5 changes: 3 additions & 2 deletions examples/standalone/problem.py
Original file line number Diff line number Diff line change
Expand Up @@ -78,6 +78,7 @@ def evaluateS1(self, x):

y = {signal: x[0] * self._time_data + x[1] for signal in self.signal}

dy = [self._time_data, np.zeros(self._time_data.shape)]
dy = np.zeros((self.n_time_data, self.n_outputs, self.n_parameters))
dy[:, 0, 0] = self._time_data

return (y, np.asarray(dy))
return (y, dy)
30 changes: 6 additions & 24 deletions pybop/costs/_likelihoods.py
Original file line number Diff line number Diff line change
Expand Up @@ -101,16 +101,8 @@ def _evaluateS1(self, x, grad=None):

r = np.array([self._target[signal] - y[signal] for signal in self.signal])
likelihood = self._evaluate(x)

if self.n_outputs == 1:
r = r.reshape(self.problem.n_time_data)
dy = dy.reshape(self.n_parameters, self.problem.n_time_data)
dl = self.sigma2 * np.sum((r * dy), axis=1)
return likelihood, dl
else:
r = r.reshape(self.n_outputs, self.problem.n_time_data)
dl = self.sigma2 * np.sum((r[:, :, np.newaxis] * dy), axis=1)
return likelihood, np.sum(dl, axis=1)
dl = np.sum((self.sigma2 * np.sum((r * dy.T), axis=2)), axis=1)
return likelihood, dl


class GaussianLogLikelihood(BaseLikelihood):
Expand Down Expand Up @@ -186,17 +178,7 @@ def _evaluateS1(self, x, grad=None):

r = np.array([self._target[signal] - y[signal] for signal in self.signal])
likelihood = self._evaluate(x)

if self.n_outputs == 1:
r = r.reshape(self.problem.n_time_data)
dy = dy.reshape(self.n_parameters, self.problem.n_time_data)
dl = sigma ** (-2.0) * np.sum((r * dy), axis=1)
dsigma = -self.n_time_data / sigma + sigma**-(3.0) * np.sum(r**2, axis=0)
dl = np.concatenate((dl, dsigma))
return likelihood, dl
else:
r = r.reshape(self.n_outputs, self.problem.n_time_data)
dl = sigma ** (-2.0) * np.sum((r[:, :, np.newaxis] * dy), axis=1)
dsigma = -self.n_time_data / sigma + sigma**-(3.0) * np.sum(r**2, axis=0)
dl = np.concatenate((dl, dsigma))
return likelihood, np.sum(dl, axis=1)
dl = sigma ** (-2.0) * np.sum((r * dy.T), axis=2)
dsigma = -self.n_time_data / sigma + sigma**-(3.0) * np.sum(r**2, axis=1)
dl = np.concatenate((dl.flatten(), dsigma))
return likelihood, dl
32 changes: 7 additions & 25 deletions pybop/costs/fitting_costs.py
Original file line number Diff line number Diff line change
Expand Up @@ -87,23 +87,14 @@ def _evaluateS1(self, x):
return e, de

r = np.array([y[signal] - self._target[signal] for signal in self.signal])
e = np.sqrt(np.mean(r**2, axis=1))
de = np.mean((r * dy.T), axis=2) / (
np.sqrt(np.mean((r * dy.T) ** 2, axis=2)) + np.finfo(float).eps
)

if self.n_outputs == 1:
r = r.reshape(self.problem.n_time_data)
dy = dy.reshape(self.n_parameters, self.problem.n_time_data)
e = np.sqrt(np.mean(r**2))
de = np.mean((r * dy), axis=1) / (
np.sqrt(np.mean((r * dy) ** 2, axis=1) + np.finfo(float).eps)
)
return e.item(), de.flatten()

else:
r = r.reshape(self.n_outputs, self.problem.n_time_data)
e = np.sqrt(np.mean(r**2, axis=1))
de = np.mean((r[:, :, np.newaxis] * dy), axis=1) / (
np.sqrt(np.mean((r[:, :, np.newaxis] * dy) ** 2, axis=1))
+ np.finfo(float).eps
)
return np.sum(e), np.sum(de, axis=1)

def set_fail_gradient(self, de):
Expand Down Expand Up @@ -208,19 +199,10 @@ def _evaluateS1(self, x):
return e, de

r = np.array([y[signal] - self._target[signal] for signal in self.signal])
e = np.sum(np.sum(r**2, axis=0), axis=0)
de = 2 * np.sum(np.sum((r * dy.T), axis=2), axis=1)

if self.n_outputs == 1:
r = r.reshape(self.problem.n_time_data)
dy = dy.reshape(self.n_parameters, self.problem.n_time_data)
e = np.sum(r**2, axis=0)
de = 2 * np.sum((r * dy), axis=1)
return e.item(), de

else:
r = r.reshape(self.n_outputs, self.problem.n_time_data)
e = np.sum(r**2, axis=1)
de = 2 * np.sum((r[:, :, np.newaxis] * dy), axis=1)
return np.sum(e), np.sum(de, axis=1)
return e, de

def set_fail_gradient(self, de):
"""
Expand Down
24 changes: 16 additions & 8 deletions pybop/models/base_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -406,16 +406,24 @@ def simulateS1(self, inputs, t_eval):
)
y = {signal: sol[signal].data for signal in self.signal}

dy = np.asarray(
[
sol[signal].sensitivities[key].toarray()
for signal in self.signal
for key in self.fit_keys
]
).reshape(
self.n_parameters, sol[self.signal[0]].data.shape[0], self.n_outputs
# Extract the sensitivities and stack them along a new axis for each signal
dy = np.empty(
(
sol[self.signal[0]].data.shape[0],
self.n_outputs,
self.n_parameters,
)
)

for i, signal in enumerate(self.signal):
dy[:, i, :] = np.stack(
[
sol[signal].sensitivities[key].toarray()[:, 0]
for key in self.fit_keys
],
axis=-1,
)

return y, dy

else:
Expand Down
20 changes: 6 additions & 14 deletions tests/integration/test_parameterisations.py
Original file line number Diff line number Diff line change
Expand Up @@ -120,11 +120,8 @@ def test_spm_optimisers(self, optimiser, spm_costs):
elif optimiser in [pybop.GradientDescent]:
if isinstance(spm_costs, pybop.GaussianLogLikelihoodKnownSigma):
parameterisation.optimiser.set_learning_rate(1.8e-5)
parameterisation.set_max_unchanged_iterations(iterations=75)
parameterisation.set_max_iterations(200)
else:
parameterisation.optimiser.set_learning_rate(0.02)
parameterisation.set_max_iterations(150)
x, final_cost = parameterisation.run()

elif optimiser in [pybop.SciPyMinimize]:
Expand All @@ -136,10 +133,7 @@ def test_spm_optimisers(self, optimiser, spm_costs):

# Assertions
assert initial_cost > final_cost
if optimiser in [pybop.Adam, pybop.GradientDescent]:
np.testing.assert_allclose(x, self.ground_truth, atol=3.0e-2)
else:
np.testing.assert_allclose(x, self.ground_truth, atol=2.0e-2)
np.testing.assert_allclose(x, self.ground_truth, atol=2.0e-2)

@pytest.fixture
def spm_two_signal_cost(self, parameters, model, cost_class):
Expand Down Expand Up @@ -174,7 +168,7 @@ def spm_two_signal_cost(self, parameters, model, cost_class):
"multi_optimiser",
[
pybop.SciPyDifferentialEvolution,
pybop.Adam,
pybop.IRPropMin,
pybop.CMAES,
],
)
Expand All @@ -198,12 +192,10 @@ def test_multiple_signals(self, multi_optimiser, spm_two_signal_cost):
optimiser=multi_optimiser,
sigma0=0.02,
)
parameterisation.set_max_iterations(200)
parameterisation.set_max_unchanged_iterations(iterations=35, threshold=5e-4)
parameterisation.set_max_iterations(125)

if multi_optimiser in [pybop.Adam]:
parameterisation.set_max_unchanged_iterations(iterations=65, threshold=1e-5)
else:
parameterisation.set_max_unchanged_iterations(iterations=35, threshold=5e-4)
initial_cost = parameterisation.cost(spm_two_signal_cost.x0)

if multi_optimiser in [pybop.SciPyDifferentialEvolution]:
parameterisation.optimiser.set_population_size(5)
Expand All @@ -213,7 +205,7 @@ def test_multiple_signals(self, multi_optimiser, spm_two_signal_cost):

# Assertions
assert initial_cost > final_cost
np.testing.assert_allclose(x, self.ground_truth, atol=3e-2)
np.testing.assert_allclose(x, self.ground_truth, atol=2.5e-2)

@pytest.mark.parametrize("init_soc", [0.4, 0.6])
@pytest.mark.integration
Expand Down

0 comments on commit c445aff

Please sign in to comment.