Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Update to allow no bounds #213

Merged
merged 15 commits into from
Feb 29, 2024
Merged
1 change: 1 addition & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@
- [#196](https://github.com/pybop-team/PyBOP/issues/196) - Fixes failing observer cost tests.
- [#63](https://github.com/pybop-team/PyBOP/issues/63) - Removes NLOpt Optimiser from future releases. This is to support deployment to the Apple M-Series platform.
- [#164](https://github.com/pybop-team/PyBOP/issues/164) - Fixes convergence issues with gradient-based optimisers, changes default `model.check_params()` to allow infeasible solutions during optimisation iterations. Adds a feasibility check on the optimal parameters.
- [#211](https://github.com/pybop-team/PyBOP/issues/211) - Allows a subset of parameter bounds or bounds=None to be passed, returning warnings where needed.

# [v23.12](https://github.com/pybop-team/PyBOP/tree/v23.12) - 2023-12-19

Expand Down
7 changes: 3 additions & 4 deletions examples/scripts/spm_IRPropMin.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,12 +10,10 @@
pybop.Parameter(
"Negative electrode active material volume fraction",
prior=pybop.Gaussian(0.6, 0.05),
bounds=[0.5, 0.8],
),
pybop.Parameter(
"Positive electrode active material volume fraction",
prior=pybop.Gaussian(0.48, 0.05),
bounds=[0.4, 0.7],
),
]

Expand Down Expand Up @@ -52,7 +50,8 @@
pybop.plot_parameters(optim)

# Plot the cost landscape
pybop.plot_cost2d(cost, steps=15)
bounds = np.array([[0.5, 0.8], [0.4, 0.7]])
pybop.plot_cost2d(cost, bounds=bounds, steps=15)

# Plot the cost landscape with optimisation path
pybop.plot_cost2d(cost, optim=optim, steps=15)
pybop.plot_cost2d(cost, optim=optim, bounds=bounds, steps=15)
7 changes: 3 additions & 4 deletions examples/scripts/spm_adam.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,12 +10,10 @@
pybop.Parameter(
"Negative electrode active material volume fraction",
prior=pybop.Gaussian(0.68, 0.05),
bounds=[0.5, 0.8],
),
pybop.Parameter(
"Positive electrode active material volume fraction",
prior=pybop.Gaussian(0.58, 0.05),
bounds=[0.4, 0.7],
),
]

Expand Down Expand Up @@ -54,7 +52,8 @@
pybop.plot_parameters(optim)

# Plot the cost landscape
pybop.plot_cost2d(cost, steps=15)
bounds = np.array([[0.5, 0.8], [0.4, 0.7]])
pybop.plot_cost2d(cost, bounds=bounds, steps=15)

# Plot the cost landscape with optimisation path
pybop.plot_cost2d(cost, optim=optim, steps=15)
pybop.plot_cost2d(cost, optim=optim, bounds=bounds, steps=15)
7 changes: 3 additions & 4 deletions examples/scripts/spm_descent.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,12 +10,10 @@
pybop.Parameter(
"Negative electrode active material volume fraction",
prior=pybop.Gaussian(0.68, 0.05),
bounds=[0.5, 0.8],
),
pybop.Parameter(
"Positive electrode active material volume fraction",
prior=pybop.Gaussian(0.58, 0.05),
bounds=[0.4, 0.7],
),
]

Expand Down Expand Up @@ -56,7 +54,8 @@
pybop.plot_parameters(optim)

# Plot the cost landscape
pybop.plot_cost2d(cost, steps=15)
bounds = np.array([[0.5, 0.8], [0.4, 0.7]])
pybop.plot_cost2d(cost, bounds=bounds, steps=15)

# Plot the cost landscape with optimisation path
pybop.plot_cost2d(cost, optim=optim, steps=15)
pybop.plot_cost2d(cost, optim=optim, bounds=bounds, steps=15)
5 changes: 2 additions & 3 deletions pybop/_optimisation.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,8 +51,8 @@ def __init__(
self.verbose = verbose
self.x0 = cost.x0
self.bounds = cost.bounds
self.sigma0 = sigma0 or cost.sigma0
self.n_parameters = cost.n_parameters
self.sigma0 = sigma0
self.physical_viability = physical_viability
self.allow_infeasible_solutions = allow_infeasible_solutions
self.log = []
Expand Down Expand Up @@ -94,7 +94,7 @@ def __init__(
if issubclass(
self.optimiser, (pybop.SciPyMinimize, pybop.SciPyDifferentialEvolution)
):
self.optimiser = self.optimiser()
self.optimiser = self.optimiser(bounds=self.bounds)

else:
raise ValueError("Unknown optimiser type")
Expand Down Expand Up @@ -178,7 +178,6 @@ def _run_pybop(self):
x, final_cost = self.optimiser.optimise(
cost_function=self.cost,
x0=self.x0,
bounds=self.bounds,
maxiter=self._max_iterations,
)
self.log = self.optimiser.log
Expand Down
29 changes: 24 additions & 5 deletions pybop/_problem.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,11 +45,30 @@
self._time_data = None
self._target = None

# Set bounds
self.bounds = dict(
lower=[param.bounds[0] for param in self.parameters],
upper=[param.bounds[1] for param in self.parameters],
)
# Set bounds (for all or no parameters)
all_unbounded = True # assumption
self.bounds = {"lower": [], "upper": []}
for param in self.parameters:
if param.bounds is not None:
self.bounds["lower"].append(param.bounds[0])
self.bounds["upper"].append(param.bounds[1])
NicolaCourtier marked this conversation as resolved.
Show resolved Hide resolved
all_unbounded = False
else:
self.bounds["lower"].append(-np.inf)
self.bounds["upper"].append(np.inf)
if all_unbounded:
self.bounds = None

# Set initial standard deviation (for all or no parameters)
all_have_sigma = True # assumption
self.sigma0 = []
for param in self.parameters:
if hasattr(param.prior, "sigma"):
self.sigma0.append(param.prior.sigma)
NicolaCourtier marked this conversation as resolved.
Show resolved Hide resolved
else:
all_have_sigma = False

Check warning on line 69 in pybop/_problem.py

View check run for this annotation

Codecov / codecov/patch

pybop/_problem.py#L69

Added line #L69 was not covered by tests
if not all_have_sigma:
self.sigma0 = None

Check warning on line 71 in pybop/_problem.py

View check run for this annotation

Codecov / codecov/patch

pybop/_problem.py#L71

Added line #L71 was not covered by tests

# Sample from prior for x0
if x0 is None:
Expand Down
8 changes: 8 additions & 0 deletions pybop/costs/base_cost.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,10 @@ class BaseCost:
The initial guess for the model parameters.
bounds : tuple
The bounds for the model parameters.
sigma0 : scalar or array
Initial standard deviation around ``x0``. Either a scalar value (one
standard deviation for all coordinates) or an array with one entry
per dimension. Not all methods will use this information.
n_parameters : int
The number of parameters in the model.
n_outputs : int
Expand All @@ -26,10 +30,14 @@ class BaseCost:

def __init__(self, problem):
self.problem = problem
self.x0 = None
self.bounds = None
self.sigma0 = None
if problem is not None:
self._target = problem._target
self.x0 = problem.x0
self.bounds = problem.bounds
self.sigma0 = problem.sigma0
self.n_parameters = problem.n_parameters
self.n_outputs = problem.n_outputs

Expand Down
19 changes: 10 additions & 9 deletions pybop/optimisers/base_optimiser.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,13 +8,19 @@ class BaseOptimiser:
methods with specific algorithms.
"""

def __init__(self):
def __init__(self, bounds=None):
"""
Initializes the BaseOptimiser.

Parameters
----------
bounds : sequence or Bounds, optional
Bounds on the parameters. Default is None.
"""
self.bounds = bounds
pass

def optimise(self, cost_function, x0=None, bounds=None, maxiter=None):
def optimise(self, cost_function, x0=None, maxiter=None):
"""
Initiates the optimisation process.

Expand All @@ -26,8 +32,6 @@ def optimise(self, cost_function, x0=None, bounds=None, maxiter=None):
The cost function to be minimised by the optimiser.
x0 : ndarray, optional
Initial guess for the parameters. Default is None.
bounds : sequence or Bounds, optional
Bounds on the parameters. Default is None.
maxiter : int, optional
Maximum number of iterations to perform. Default is None.

Expand All @@ -37,15 +41,14 @@ def optimise(self, cost_function, x0=None, bounds=None, maxiter=None):
"""
self.cost_function = cost_function
self.x0 = x0
self.bounds = bounds
self.maxiter = maxiter

# Run optimisation
result = self._runoptimise(self.cost_function, x0=self.x0, bounds=self.bounds)
result = self._runoptimise(self.cost_function, x0=self.x0)

return result

def _runoptimise(self, cost_function, x0=None, bounds=None):
def _runoptimise(self, cost_function, x0=None):
"""
Contains the logic for the optimisation algorithm.

Expand All @@ -57,8 +60,6 @@ def _runoptimise(self, cost_function, x0=None, bounds=None):
The cost function to be minimised by the optimiser.
x0 : ndarray, optional
Initial guess for the parameters. Default is None.
bounds : sequence or Bounds, optional
Bounds on the parameters. Default is None.

Returns
-------
Expand Down
15 changes: 11 additions & 4 deletions pybop/optimisers/pints_optimisers.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
import pints
import numpy as np


class GradientDescent(pints.GradientDescent):
Expand Down Expand Up @@ -116,12 +117,18 @@
"""

def __init__(self, x0, sigma0=0.1, bounds=None):
if bounds is not None:
if bounds is None:
self.boundaries = None
elif not all(
np.isfinite(value) for sublist in bounds.values() for value in sublist
):
raise ValueError(

Check warning on line 125 in pybop/optimisers/pints_optimisers.py

View check run for this annotation

Codecov / codecov/patch

pybop/optimisers/pints_optimisers.py#L125

Added line #L125 was not covered by tests
"Either all bounds or no bounds must be set for Pints PSO."
)
else:
self.boundaries = pints.RectangularBoundaries(
bounds["lower"], bounds["upper"]
)
else:
self.boundaries = None
super().__init__(x0, sigma0, self.boundaries)


Expand All @@ -138,7 +145,7 @@
x0 : array_like
Initial position from which optimization will start.
sigma0 : float, optional
Initial step size (default is 0.1).
Initial standard deviation of the sampling distribution, defaults to 0.1.
bounds : dict, optional
Lower and upper bounds for each optimization parameter.

Expand Down
37 changes: 18 additions & 19 deletions pybop/optimisers/scipy_optimisers.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@
if self.method is None:
self.method = "COBYLA" # "L-BFGS-B"

def _runoptimise(self, cost_function, x0, bounds):
def _runoptimise(self, cost_function, x0):
"""
Executes the optimization process using SciPy's minimize function.

Expand All @@ -39,8 +39,6 @@
The objective function to minimize.
x0 : array_like
Initial guess for the parameters.
bounds : sequence or `Bounds`
Bounds for the variables.

Returns
-------
Expand Down Expand Up @@ -68,9 +66,10 @@
return cost

# Reformat bounds
if bounds is not None:
if self.bounds is not None:
bounds = (
(lower, upper) for lower, upper in zip(bounds["lower"], bounds["upper"])
(lower, upper)
for lower, upper in zip(self.bounds["lower"], self.bounds["upper"])
)

# Set max iterations
Expand Down Expand Up @@ -137,12 +136,23 @@

def __init__(self, bounds=None, strategy="best1bin", maxiter=1000, popsize=15):
super().__init__()
self.bounds = bounds
self.strategy = strategy
self._max_iterations = maxiter
self._population_size = popsize

def _runoptimise(self, cost_function, x0=None, bounds=None):
if bounds is None:
raise ValueError("Bounds must be specified for differential_evolution.")
elif not all(
np.isfinite(value) for sublist in bounds.values() for value in sublist
):
raise ValueError("Bounds must be specified for differential_evolution.")

Check warning on line 148 in pybop/optimisers/scipy_optimisers.py

View check run for this annotation

Codecov / codecov/patch

pybop/optimisers/scipy_optimisers.py#L148

Added line #L148 was not covered by tests
elif isinstance(bounds, dict):
bounds = [
(lower, upper) for lower, upper in zip(bounds["lower"], bounds["upper"])
]
self.bounds = bounds

def _runoptimise(self, cost_function, x0=None):
"""
Executes the optimization process using SciPy's differential_evolution function.

Expand All @@ -152,18 +162,13 @@
The objective function to minimize.
x0 : array_like, optional
Ignored parameter, provided for API consistency.
bounds : sequence or ``Bounds``
Bounds for the variables, required for differential evolution.

Returns
-------
tuple
A tuple (x, final_cost) containing the optimized parameters and the value of ``cost_function`` at the optimum.
"""

if bounds is None:
raise ValueError("Bounds must be specified for differential_evolution.")

if x0 is not None:
print(
"Ignoring x0. Initial conditions are not used for differential_evolution."
Expand All @@ -175,15 +180,9 @@
def callback(x, convergence):
self.log.append([x])

# Reformat bounds if necessary
if isinstance(bounds, dict):
bounds = [
(lower, upper) for lower, upper in zip(bounds["lower"], bounds["upper"])
]

output = differential_evolution(
cost_function,
bounds,
self.bounds,
strategy=self.strategy,
maxiter=self._max_iterations,
popsize=self._population_size,
Expand Down
Loading
Loading