Skip to content

Commit

Permalink
remove non-integration test sections, twitch hyper values, add optimi…
Browse files Browse the repository at this point in the history
…ser dependant assertions
  • Loading branch information
BradyPlanden committed Mar 23, 2024
1 parent 8e7b777 commit 8a1d77b
Show file tree
Hide file tree
Showing 2 changed files with 13 additions and 30 deletions.
2 changes: 1 addition & 1 deletion pybop/optimisers/scipy_optimisers.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ class SciPyMinimize(BaseOptimiser):
"""

def __init__(
self, method=None, bounds=None, maxiter=None, wrap_cost=False, tol=1e-5
self, method=None, bounds=None, maxiter=None, wrap_cost=False, tol=1e-6
):
super().__init__()
self.method = method
Expand Down
41 changes: 12 additions & 29 deletions tests/integration/test_parameterisations.py
Original file line number Diff line number Diff line change
Expand Up @@ -109,41 +109,24 @@ def test_spm_optimisers(self, optimiser, spm_costs):
optimiser=optimiser,
sigma0=0.05,
)
parameterisation.set_max_unchanged_iterations(iterations=45, threshold=5e-4)
parameterisation.set_max_unchanged_iterations(iterations=55, threshold=1e-5)
parameterisation.set_max_iterations(125)

initial_cost = parameterisation.cost(spm_costs.x0)

if optimiser in [pybop.CMAES]:
parameterisation.set_f_guessed_tracking(True)
parameterisation.cost.problem.model.allow_infeasible_solutions = False
assert parameterisation._use_f_guessed is True
parameterisation.set_max_iterations(1)
x, final_cost = parameterisation.run()

parameterisation.set_f_guessed_tracking(False)
parameterisation.set_max_iterations(125)

x, final_cost = parameterisation.run()
assert parameterisation._max_iterations == 125

elif optimiser in [pybop.GradientDescent]:
if isinstance(spm_costs, pybop.GaussianLogLikelihoodKnownSigma):
parameterisation.optimiser.set_learning_rate(1.8e-5)
parameterisation.set_max_unchanged_iterations(iterations=65)
parameterisation.set_max_unchanged_iterations(iterations=75)
parameterisation.set_max_iterations(200)
else:
parameterisation.optimiser.set_learning_rate(0.02)
parameterisation.set_max_iterations(150)
x, final_cost = parameterisation.run()

elif optimiser in [pybop.SciPyDifferentialEvolution]:
with pytest.raises(ValueError):
parameterisation.optimiser.set_population_size(-5)

parameterisation.optimiser.set_population_size(5)
x, final_cost = parameterisation.run()

elif optimiser in [pybop.SciPyMinimize]:
parameterisation.cost.problem.model.allow_infeasible_solutions = False
x, final_cost = parameterisation.run()
Expand All @@ -153,7 +136,10 @@ def test_spm_optimisers(self, optimiser, spm_costs):

# Assertions
assert initial_cost > final_cost
np.testing.assert_allclose(x, self.ground_truth, atol=2.5e-2)
if optimiser in [pybop.Adam, pybop.GradientDescent]:
np.testing.assert_allclose(x, self.ground_truth, atol=3.0e-2)
else:
np.testing.assert_allclose(x, self.ground_truth, atol=2.0e-2)

@pytest.fixture
def spm_two_signal_cost(self, parameters, model, cost_class):
Expand Down Expand Up @@ -210,12 +196,12 @@ def test_multiple_signals(self, multi_optimiser, spm_two_signal_cost):
parameterisation = pybop.Optimisation(
cost=spm_two_signal_cost,
optimiser=multi_optimiser,
sigma0=0.03,
sigma0=0.02,
)
parameterisation.set_max_iterations(200)

if multi_optimiser in [pybop.Adam]:
parameterisation.set_max_unchanged_iterations(iterations=85, threshold=1e-5)
parameterisation.set_max_unchanged_iterations(iterations=65, threshold=1e-5)
else:
parameterisation.set_max_unchanged_iterations(iterations=35, threshold=5e-4)

Expand All @@ -227,15 +213,15 @@ def test_multiple_signals(self, multi_optimiser, spm_two_signal_cost):

# Assertions
assert initial_cost > final_cost
np.testing.assert_allclose(x, self.ground_truth, atol=2.5e-2)
np.testing.assert_allclose(x, self.ground_truth, atol=3e-2)

@pytest.mark.parametrize("init_soc", [0.4, 0.6])
@pytest.mark.integration
def test_model_misparameterisation(self, parameters, model, init_soc):
# Define two different models with different parameter sets
# The optimisation should fail as the models are not the same
second_parameter_set = pybop.ParameterSet.pybamm("Ecker2015")
second_model = pybop.lithium_ion.SPM(parameter_set=second_parameter_set)
second_model = pybop.lithium_ion.SPMe(parameter_set=second_parameter_set)

# Form dataset
solution = self.getdata(second_model, self.ground_truth, init_soc)
Expand All @@ -248,10 +234,7 @@ def test_model_misparameterisation(self, parameters, model, init_soc):
)

# Define the cost to optimise
signal = ["Voltage [V]"]
problem = pybop.FittingProblem(
model, parameters, dataset, signal=signal, init_soc=init_soc
)
problem = pybop.FittingProblem(model, parameters, dataset, init_soc=init_soc)
cost = pybop.RootMeanSquaredError(problem)

# Select optimiser
Expand All @@ -266,7 +249,7 @@ def test_model_misparameterisation(self, parameters, model, init_soc):
# Assertions
with np.testing.assert_raises(AssertionError):
np.testing.assert_allclose(final_cost, 0, atol=1e-2)
np.testing.assert_allclose(x, self.ground_truth, atol=2e-2)
np.testing.assert_allclose(x, self.ground_truth, atol=2.5e-2)

def getdata(self, model, x, init_soc):
model.parameter_set.update(
Expand Down

0 comments on commit 8a1d77b

Please sign in to comment.