Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix transformations #474

Merged
merged 21 commits into from
Sep 10, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
21 commits
Select commit Hold shift + click to select a range
5de70d1
Update cost descriptions
NicolaCourtier Aug 22, 2024
46e5748
Move error catches to optimiser cost_call
NicolaCourtier Aug 22, 2024
3c4efee
Add apply_transform and log_update
NicolaCourtier Aug 23, 2024
f2f05c1
Update parameter.py
NicolaCourtier Aug 23, 2024
ad978a7
Merge branch 'develop' into 473-fix-transformations
NicolaCourtier Aug 23, 2024
d4b5092
Convert transformation to _transformation
NicolaCourtier Aug 23, 2024
b3a272d
Fix typo
NicolaCourtier Aug 23, 2024
417530f
Update scipy_optimisers.py
NicolaCourtier Aug 23, 2024
73bc851
Update optim.sigma0 default
NicolaCourtier Aug 23, 2024
3be6e9f
Update cost_call
NicolaCourtier Aug 23, 2024
84f8abb
Use Log gradient from PINTS
NicolaCourtier Aug 23, 2024
22e29d6
Use ScaledTransformation in test
NicolaCourtier Aug 23, 2024
d0c2f49
Update parameter rvs and add tests
NicolaCourtier Aug 24, 2024
ac2075d
Merge branch 'develop' into 473-fix-transformations
NicolaCourtier Aug 28, 2024
d2cee47
style: pre-commit fixes
pre-commit-ci[bot] Aug 28, 2024
83c3621
Merge branch 'refs/heads/develop' into 473-fix-transformations
BradyPlanden Sep 9, 2024
56da0f1
removes try-except for BaseCost, moves cost_call logic into BaseCost.…
BradyPlanden Sep 9, 2024
383419c
apply suggestion from review
BradyPlanden Sep 9, 2024
0273bee
Merge pull request #488 from pybop-team/473b-fix-transformations
martinjrobins Sep 10, 2024
a4384e5
Merge branch 'refs/heads/develop' into 473-fix-transformations
BradyPlanden Sep 10, 2024
ec58489
Adds changelog entry
BradyPlanden Sep 10, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@

## Bug Fixes

- [#473](https://github.com/pybop-team/PyBOP/pull/473) - Bugfixes for transformation class, adds optional `apply_transform` arg to `BaseCost.__call__()`, adds `log_update()` method to `BaseOptimiser`
- [#464](https://github.com/pybop-team/PyBOP/issues/464) - Fix order of design `parameter_set` updates and refactor `update_capacity`.
- [#468](https://github.com/pybop-team/PyBOP/issue/468) - Renames `quick_plot.py` to `standard_plots.py`.
- [#454](https://github.com/pybop-team/PyBOP/issue/454) - Fixes benchmarking suite.
Expand Down
51 changes: 30 additions & 21 deletions pybop/costs/base_cost.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,13 +21,16 @@ class BaseCost:
problem : object
A problem instance containing the data and functions necessary for
evaluating the cost function.
_target : array-like
target : array-like
An array containing the target data to fit.
n_outputs : int
The number of outputs in the model.
_has_separable_problem : bool
has_separable_problem : bool
If True, the problem is separable from the cost function and will be
evaluated in advance of the call to self.compute() (default: False).
_de : float
The gradient of the cost function to use if an error occurs during
evaluation. Defaults to 1.0.
"""

def __init__(self, problem: Optional[BaseProblem] = None):
Expand Down Expand Up @@ -61,7 +64,12 @@ def has_separable_problem(self):
def target(self):
return self._target

def __call__(self, inputs: Union[Inputs, list], calculate_grad: bool = False):
def __call__(
self,
inputs: Union[Inputs, list],
calculate_grad: bool = False,
apply_transform: bool = False,
):
"""
This method calls the forward model via problem.evaluate(inputs),
and computes the cost for the given output by calling self.compute().
Expand All @@ -70,6 +78,8 @@ def __call__(self, inputs: Union[Inputs, list], calculate_grad: bool = False):
----------
inputs : Inputs or array-like
The parameters for which to compute the cost and gradient.
calculate_grad : bool, optional
A bool condition designating whether to calculate the gradient.

Returns
-------
Expand All @@ -81,26 +91,25 @@ def __call__(self, inputs: Union[Inputs, list], calculate_grad: bool = False):
ValueError
If an error occurs during the calculation of the cost.
"""
if self.transformation:
p = self.transformation.to_model(inputs)
inputs = self.parameters.verify(p if self.transformation else inputs)
# Apply transformation if needed
self.has_transform = self.transformation is not None and apply_transform
if self.has_transform:
inputs = self.transformation.to_model(inputs)
inputs = self.parameters.verify(inputs)
self.parameters.update(values=list(inputs.values()))
y, dy = None, None

try:
if self._has_separable_problem:
if calculate_grad is True:
y, dy = self.problem.evaluateS1(self.problem.parameters.as_dict())
else:
y = self.problem.evaluate(self.problem.parameters.as_dict())

return self.compute(y, dy=dy, calculate_grad=calculate_grad)

except NotImplementedError as e:
raise e

except Exception as e:
raise ValueError(f"Error in cost calculation: {e}") from e
y, dy = None, None
if self._has_separable_problem:
BradyPlanden marked this conversation as resolved.
Show resolved Hide resolved
if calculate_grad:
y, dy = self.problem.evaluateS1(self.problem.parameters.as_dict())
cost, grad = self.compute(y, dy=dy, calculate_grad=calculate_grad)
if self.has_transform and np.isfinite(cost):
jac = self.transformation.jacobian(inputs)
grad = np.matmul(grad, jac)
return cost, grad

y = self.problem.evaluate(self.problem.parameters.as_dict())
return self.compute(y, dy=dy, calculate_grad=calculate_grad)

def compute(self, y: dict, dy: ndarray, calculate_grad: bool = False):
"""
Expand Down
18 changes: 6 additions & 12 deletions pybop/costs/fitting_costs.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,6 @@ def compute(
-------
float
The root mean square error.

"""
# Verify we have dy if calculate_grad is True
self.verify_args(dy, calculate_grad)
Expand Down Expand Up @@ -77,12 +76,6 @@ class SumSquaredError(BaseCost):

Inherits all parameters and attributes from ``BaseCost``.

Additional Attributes
---------------------
_de : float
The gradient of the cost function to use if an error occurs during
evaluation. Defaults to 1.0.

"""

def __init__(self, problem):
Expand Down Expand Up @@ -152,8 +145,8 @@ class Minkowski(BaseCost):
optimisation problems, allowing for flexible distance-based optimisation
across various problem domains.

Attributes
----------
Additional Attributes
---------------------
p : float, optional
The order of the Minkowski distance.
"""
Expand Down Expand Up @@ -240,9 +233,10 @@ class SumofPower(BaseCost):

[1]: https://mathworld.wolfram.com/PowerSum.html

Attributes:
p : float, optional
The power order for Sum of Power.
Additional Attributes
---------------------
p : float, optional
The power order for Sum of Power.
"""

def __init__(self, problem, p: float = 2.0):
Expand Down
2 changes: 1 addition & 1 deletion pybop/models/base_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -649,7 +649,7 @@ def simulateS1(
key in self.geometric_parameters for key in inputs.keys()
):
raise ValueError(
"Cannot use sensitivies for parameters which require a model rebuild"
"Cannot use sensitivities for parameters which require a model rebuild"
)

# Build if required
Expand Down
47 changes: 38 additions & 9 deletions pybop/optimisers/base_optimiser.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,19 +61,19 @@ def __init__(
self.parameters = Parameters()
self.x0 = None
self.bounds = None
self.sigma0 = 0.1
self.sigma0 = 0.02
BradyPlanden marked this conversation as resolved.
Show resolved Hide resolved
self.verbose = False
self.log = dict(x=[], x_best=[], cost=[])
self.minimising = True
self.transformation = None
self._transformation = None
self.physical_viability = False
self.allow_infeasible_solutions = False
self.default_max_iterations = 1000
self.result = None

if isinstance(cost, BaseCost):
self.cost = cost
self.transformation = self.cost.transformation
self._transformation = self.cost.transformation
self.parameters.join(cost.parameters)
self.set_allow_infeasible_solutions()
if isinstance(cost, WeightedCost):
Expand Down Expand Up @@ -129,22 +129,21 @@ def set_base_options(self):
"""
# Set initial values, if x0 is None, initial values are unmodified.
self.parameters.update(initial_values=self.unset_options.pop("x0", None))
self.x0 = self.parameters.reset_initial_value()
self.x0 = self.parameters.reset_initial_value(apply_transform=True)

# Set default bounds (for all or no parameters)
self.bounds = self.unset_options.pop("bounds", self.parameters.get_bounds())
self.bounds = self.unset_options.pop(
"bounds", self.parameters.get_bounds(apply_transform=True)
)

# Set default initial standard deviation (for all or no parameters)
self.sigma0 = self.unset_options.pop(
"sigma0", self.parameters.get_sigma0() or self.sigma0
"sigma0", self.parameters.get_sigma0(apply_transform=True) or self.sigma0
)

# Set other options
self.verbose = self.unset_options.pop("verbose", self.verbose)
self.minimising = self.unset_options.pop("minimising", self.minimising)
self.transformation = self.unset_options.pop(
"transformation", self.transformation
)
if "allow_infeasible_solutions" in self.unset_options.keys():
self.set_allow_infeasible_solutions(
self.unset_options.pop("allow_infeasible_solutions")
Expand Down Expand Up @@ -199,6 +198,36 @@ def _run(self):
"""
raise NotImplementedError

def log_update(self, x=None, x_best=None, cost=None):
"""
Update the log with new values.

Parameters
----------
x : list or array-like, optional
Parameter values (default: None).
x_best : list or array-like, optional
Paraneter values corresponding to the best cost yet (default: None).
cost : float, optional
Cost value (default: None).
"""
if x is not None:
if self._transformation:
x = list(x)
for i, xi in enumerate(x):
x[i] = self._transformation.to_model(xi)
self.log["x"].append(x)

if x_best is not None:
if self._transformation:
x_best = list(x_best)
for i, xi in enumerate(x_best):
x_best[i] = self._transformation.to_model(xi)
self.log["x_best"].append(x_best)

if cost is not None:
self.log["cost"].append(cost)

def check_optimal_parameters(self, x):
"""
Check if the optimised parameters are physically viable.
Expand Down
37 changes: 19 additions & 18 deletions pybop/optimisers/base_pints_optimiser.py
Original file line number Diff line number Diff line change
Expand Up @@ -190,15 +190,14 @@ def _run(self):
unchanged_iterations = 0

# Choose method to evaluate
if self._needs_sensitivities:

def f(x):
L, dl = self.cost(x, calculate_grad=True)
return (L, dl) if self.minimising else (-L, -dl)
else:

def f(x):
return self.cost(x) if self.minimising else -self.cost(x)
def fun(x):
if self._needs_sensitivities:
L, dl = self.cost(x, calculate_grad=True, apply_transform=True)
else:
L = self.cost(x, apply_transform=True)
dl = None
sign = -1 if not self.minimising else 1
return (sign * L, sign * dl) if dl is not None else sign * L

# Create evaluator object
if self._parallel:
Expand All @@ -209,9 +208,9 @@ def f(x):
# particles!
if isinstance(self.pints_optimiser, PintsPopulationBasedOptimiser):
n_workers = min(n_workers, self.pints_optimiser.population_size())
evaluator = PintsParallelEvaluator(f, n_workers=n_workers)
evaluator = PintsParallelEvaluator(fun, n_workers=n_workers)
else:
evaluator = PintsSequentialEvaluator(f)
evaluator = PintsSequentialEvaluator(fun)

# Keep track of current best and best-guess scores.
fb = fg = np.inf
Expand Down Expand Up @@ -253,9 +252,11 @@ def f(x):
# Update counts
evaluations += len(fs)
iteration += 1
self.log["x"].append(xs)
self.log["x_best"].append(self.pints_optimiser.x_best())
self.log["cost"].append(fb if self.minimising else -fb)
self.log_update(
x=xs,
x_best=self.pints_optimiser.x_best(),
cost=fb if self.minimising else -fb,
)

# Check stopping criteria:
# Maximum number of iterations
Expand Down Expand Up @@ -322,8 +323,8 @@ def f(x):

# Show current parameters
x_user = self.pints_optimiser.x_guessed()
if self.transformation is not None:
x_user = self.transformation.to_model(x_user)
if self._transformation:
x_user = self._transformation.to_model(x_user)
for p in x_user:
print(PintsStrFloat(p))
print("-" * 40)
Expand All @@ -345,8 +346,8 @@ def f(x):
f = self.pints_optimiser.f_best()

# Inverse transform search parameters
if self.transformation is not None:
x = self.transformation.to_model(x)
if self._transformation:
x = self._transformation.to_model(x)

return Result(
x=x, final_cost=f if self.minimising else -f, n_iterations=self._iterations
Expand Down
4 changes: 1 addition & 3 deletions pybop/optimisers/pints_optimisers.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,16 +28,14 @@ class GradientDescent(BasePintsOptimiser):
x0 : array_like
Initial position from which optimisation will start.
sigma0 : float
The learning rate / Initial step size (default: 0.02).
The learning rate / Initial step size.

See Also
--------
pints.GradientDescent : The PINTS implementation this class is based on.
"""

def __init__(self, cost, **optimiser_kwargs):
if "sigma0" not in optimiser_kwargs.keys():
optimiser_kwargs["sigma0"] = 0.02 # set default
super().__init__(cost, PintsGradientDescent, **optimiser_kwargs)


Expand Down
25 changes: 14 additions & 11 deletions pybop/optimisers/scipy_optimisers.py
Original file line number Diff line number Diff line change
Expand Up @@ -85,7 +85,9 @@ def _run(self):
nit = -1

return Result(
x=result.x,
x=self._transformation.to_model(result.x)
if self._transformation
else result.x,
final_cost=self.cost(result.x),
n_iterations=nit,
scipy_result=result,
Expand Down Expand Up @@ -160,7 +162,7 @@ def cost_wrapper(self, x):
"""
Scale the cost function, preserving the sign convention, and eliminate nan values
"""
self.log["x"].append([x])
self.log_update(x=[x])

if not self._options["jac"]:
cost = self.cost(x) / self._cost0
Expand All @@ -184,7 +186,6 @@ def _run_optimiser(self):
self.inf_count = 0

# Add callback storing history of parameter values

def base_callback(intermediate_result: Union[OptimizeResult, np.ndarray]):
"""
Log intermediate optimisation solutions. Depending on the
Expand All @@ -199,9 +200,9 @@ def base_callback(intermediate_result: Union[OptimizeResult, np.ndarray]):
x_best = intermediate_result
cost = self.cost(x_best)

self.log["x_best"].append(x_best)
self.log["cost"].append(
(-1 if not self.minimising else 1) * cost * self._cost0
self.log_update(
x_best=x_best,
cost=(-1 if not self.minimising else 1) * cost * self._cost0,
)

callback = (
Expand All @@ -215,7 +216,7 @@ def base_callback(intermediate_result: Union[OptimizeResult, np.ndarray]):
if np.isinf(self._cost0):
for _i in range(1, self.num_resamples):
try:
self.x0 = self.parameters.rvs()
self.x0 = self.parameters.rvs(apply_transform=True)
except AttributeError:
warnings.warn(
"Parameter does not have a prior distribution. Stopping resampling.",
Expand Down Expand Up @@ -342,13 +343,15 @@ def _run_optimiser(self):

# Add callback storing history of parameter values
def callback(intermediate_result: OptimizeResult):
self.log["x_best"].append(intermediate_result.x)
self.log["cost"].append(
intermediate_result.fun if self.minimising else -intermediate_result.fun
self.log_update(
x_best=intermediate_result.x,
cost=intermediate_result.fun
if self.minimising
else -intermediate_result.fun,
)

def cost_wrapper(x):
self.log["x"].append([x])
self.log_update(x=[x])
return self.cost(x) if self.minimising else -self.cost(x)

return differential_evolution(
Expand Down
Loading
Loading