From 423bb04768f6079fc4b5fc538867d47a9bc870ad Mon Sep 17 00:00:00 2001 From: Brady Planden Date: Mon, 19 Feb 2024 15:55:22 +0000 Subject: [PATCH 01/15] Inital benchmarking setup, suite --- .gitignore | 3 + asv.conf.json | 32 ++++++++ benchmarks/README.md | 98 ++++++++++++++++++++++++ benchmarks/__init__.py | 0 benchmarks/parameterisation_benchmark.py | 70 +++++++++++++++++ 5 files changed, 203 insertions(+) create mode 100644 asv.conf.json create mode 100644 benchmarks/README.md create mode 100644 benchmarks/__init__.py create mode 100644 benchmarks/parameterisation_benchmark.py diff --git a/.gitignore b/.gitignore index bc3caa2c..6d37ddd8 100644 --- a/.gitignore +++ b/.gitignore @@ -310,3 +310,6 @@ $RECYCLE.BIN/ # Output JSON files **/fit_ecm_parameters.json + +# Airspeed Velocity +*.asv/ diff --git a/asv.conf.json b/asv.conf.json new file mode 100644 index 00000000..78ba3209 --- /dev/null +++ b/asv.conf.json @@ -0,0 +1,32 @@ +{ + "version": 1, + "project": "PyBOP", + "project_url": "https://github.com/pybop-team/pybop", + "repo": ".", + // "build_command": [ + // "python -m pip install build", + // "python -m build", + // "python -mpip wheel -w {build_cache_dir} {build_dir}" + // ], + "build_command": [ + "python setup.py build", + "python -mpip wheel -w {build_cache_dir} {build_dir}" + ], + "build_cache_dir": ".asv/build_cache", + "build_dir": ".asv/build", + "branches": ["179-add-airspeed-velocity-for-automated-benchmarking"], + "environment_type": "virtualenv", + + "matrix": { + "req":{ + "pybamm": [], + "numpy": [], + "scipy": [], + "pandas": [], + "pints": [] + } + }, + "env_dir": ".asv/env", + "results_dir": ".asv/results", + "html_dir": ".asv/html" +} diff --git a/benchmarks/README.md b/benchmarks/README.md new file mode 100644 index 00000000..bb9b3c97 --- /dev/null +++ b/benchmarks/README.md @@ -0,0 +1,98 @@ +# Benchmarking Directory for PyBOP + +Welcome to the benchmarking directory for PyBOP. We use `asv` (airspeed velocity) for benchmarking, which is a tool for running Python benchmarks over time in a consistent environment. This document will guide you through the setup, execution, and viewing of benchmarks. + +## Quick Links + +- [Airspeed Velocity (asv) Documentation](https://asv.readthedocs.io/) + +## Prerequisites + +Before you can run benchmarks, you need to ensure that `asv` is installed and that you have a working Python environment. It is also recommended to run benchmarks in a clean, dedicated virtual environment to avoid any side-effects from your local environment. + +### Installing `asv` + +You can install `asv` using `pip`. It's recommended to do this within a virtual environment: + +```bash +pip install asv +``` + +## Setting Up Benchmarks + +The `benchmarks` directory already contains a set of benchmarks for the package. To add or modify benchmarks, edit the `.py` files within this directory. + +Each benchmark file should contain one or more classes with methods that `asv` will automatically recognize as benchmarks. Here's an example structure for a benchmark file: + +```python +class ExampleBenchmarks: + def setup(self): + # Code to run before each benchmark method is executed + pass + + def time_example_benchmark(self): + # The actual benchmark code + pass + + def teardown(self): + # Code to run after each benchmark method is executed + pass +``` + +## Running Benchmarks + +With `asv` installed and your benchmarks set up, you can now run benchmarks using the following standard `asv` commands: + +### Running All Benchmarks + +To run all benchmarks in your python env: + +```bash +asv run --python=same +``` + +This will test the current state of your codebase by default. You can specify a range of commits to run benchmarks against by appending a commit range to the command, like so: + +```bash +asv run .. +``` + +### Running Specific Benchmarks + +To run a specific benchmark, use: + +```bash +asv run --bench +``` + +### Running Benchmarks for a Specific Environment + +To run benchmarks against a specific Python version: + +```bash +asv run --python=same # To use the same Python version as the current environment +asv run --python=3.8 # To specify the Python version +``` + +## Viewing Benchmark Results + +After running benchmarks, `asv` will generate results which can be viewed as a web page: + +```bash +asv publish +asv preview +``` + +Now you can open your web browser to the URL provided by `asv` to view the results. + +## Continuous Benchmarking + +You can also set up `asv` for continuous benchmarking where it will track the performance over time. This typically involves integration with a continuous integration (CI) system. + +For more detailed instructions on setting up continuous benchmarking, consult the [asv documentation](https://asv.readthedocs.io/en/stable/using.html#continuous-benchmarking). + +## Reporting Issues + +If you encounter any issues or have suggestions for improving the benchmarks, please open an issue or a pull request in the project repository. + +Thank you for contributing to the performance of the package! diff --git a/benchmarks/__init__.py b/benchmarks/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/benchmarks/parameterisation_benchmark.py b/benchmarks/parameterisation_benchmark.py new file mode 100644 index 00000000..ae3ccda0 --- /dev/null +++ b/benchmarks/parameterisation_benchmark.py @@ -0,0 +1,70 @@ +import pybop +import numpy as np + + +class ParameterisationBenchmark: + param_names = ["model", "parameter_set", "dataset", "optimiser"] + params = [ + [pybop.lithium_ion.SPM, pybop.lithium_ion.SPMe], + ["Chen2020"], + [ + pybop.SciPyMinimize, + pybop.SciPyDifferentialEvolution, + pybop.Adam, + pybop.CMAES, + pybop.GradientDescent, + pybop.IRPropMin, + pybop.PSO, + pybop.SNES, + pybop.XNES, + ], + ] + + def setup(self, model, parameter_set, optimiser): + """ + Setup the parameterisation problem + """ + # Create model + model = model(parameter_set=pybop.ParameterSet.pybamm(parameter_set)) + + # Fitting parameters + parameters = [ + pybop.Parameter( + "Negative electrode active material volume fraction", + prior=pybop.Gaussian(0.68, 0.05), + bounds=[0.5, 0.8], + ), + pybop.Parameter( + "Positive electrode active material volume fraction", + prior=pybop.Gaussian(0.58, 0.05), + bounds=[0.4, 0.7], + ), + ] + + # Generate data + sigma = 0.002 + t_eval = np.arange(0, 900, 2) + values = model.predict(t_eval=t_eval) + corrupt_values = values["Voltage [V]"].data + np.random.normal( + 0, sigma, len(t_eval) + ) + + # Form dataset + dataset = pybop.Dataset( + { + "Time [s]": t_eval, + "Current function [A]": values["Current [A]"].data, + "Voltage [V]": corrupt_values, + } + ) + problem = pybop.FittingProblem( + model=model, dataset=dataset, parameters=parameters + ) + self.cost = pybop.SumSquaredError(problem=problem) + + def time_parameterisation(self, model, parameter_set, optimiser): + """ + Run parameterisation across the pybop optimisers + """ + # Run parameterisation + pybop.Optimisation(self.cost, optimiser=optimiser).run() From 96021900155230d52445dca6e664d1e54e471115 Mon Sep 17 00:00:00 2001 From: Brady Planden Date: Mon, 19 Feb 2024 17:16:38 +0000 Subject: [PATCH 02/15] Updt. asv config, changelog, parameterisaton benchmarks --- CHANGELOG.md | 1 + asv.conf.json | 12 ++++-------- benchmarks/parameterisation_benchmark.py | 14 ++++++++------ 3 files changed, 13 insertions(+), 14 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 3e743a4b..3d64d264 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,7 @@ ## Features +- [#179](https://github.com/pybop-team/PyBOP/pull/203) - Adds `asv` configuration for benchmarking and initial benchmark suite. - [#203](https://github.com/pybop-team/PyBOP/pull/203) - Adds support for modern Python packaging via a `pyproject.toml` file and configures the `pytest` test runner and `ruff` linter to use their configurations stored as declarative metadata. - [#123](https://github.com/pybop-team/PyBOP/issues/123) - Configures scheduled tests to run against the last three PyPI releases of PyBaMM via dynamic GitHub Actions matrix generation. - [#187](https://github.com/pybop-team/PyBOP/issues/187) - Adds M1 Github runner to `test_on_push` workflow, updt. self-hosted supported python versions in scheduled tests. diff --git a/asv.conf.json b/asv.conf.json index 78ba3209..4b539943 100644 --- a/asv.conf.json +++ b/asv.conf.json @@ -3,20 +3,16 @@ "project": "PyBOP", "project_url": "https://github.com/pybop-team/pybop", "repo": ".", - // "build_command": [ - // "python -m pip install build", - // "python -m build", - // "python -mpip wheel -w {build_cache_dir} {build_dir}" - // ], "build_command": [ - "python setup.py build", + "python -m pip install build", + "python -m build", "python -mpip wheel -w {build_cache_dir} {build_dir}" ], + "default_benchmark_timeout": 180, "build_cache_dir": ".asv/build_cache", "build_dir": ".asv/build", - "branches": ["179-add-airspeed-velocity-for-automated-benchmarking"], + "branches": ["develop"], "environment_type": "virtualenv", - "matrix": { "req":{ "pybamm": [], diff --git a/benchmarks/parameterisation_benchmark.py b/benchmarks/parameterisation_benchmark.py index ae3ccda0..b1cb4c61 100644 --- a/benchmarks/parameterisation_benchmark.py +++ b/benchmarks/parameterisation_benchmark.py @@ -31,18 +31,20 @@ def setup(self, model, parameter_set, optimiser): parameters = [ pybop.Parameter( "Negative electrode active material volume fraction", - prior=pybop.Gaussian(0.68, 0.05), - bounds=[0.5, 0.8], + prior=pybop.Gaussian(0.6, 0.02), + bounds=[0.375, 0.7], + initial_value=0.63, ), pybop.Parameter( "Positive electrode active material volume fraction", - prior=pybop.Gaussian(0.58, 0.05), - bounds=[0.4, 0.7], + prior=pybop.Gaussian(0.5, 0.02), + bounds=[0.375, 0.625], + initial_value=0.51, ), ] # Generate data - sigma = 0.002 + sigma = 0.001 t_eval = np.arange(0, 900, 2) values = model.predict(t_eval=t_eval) corrupt_values = values["Voltage [V]"].data + np.random.normal( @@ -58,7 +60,7 @@ def setup(self, model, parameter_set, optimiser): } ) problem = pybop.FittingProblem( - model=model, dataset=dataset, parameters=parameters + model=model, dataset=dataset, parameters=parameters, init_soc=0.5 ) self.cost = pybop.SumSquaredError(problem=problem) From 7f7d6df9aafd80fc2c6dfbf0c1ae1aaeaa957fc3 Mon Sep 17 00:00:00 2001 From: Brady Planden Date: Wed, 21 Feb 2024 15:01:00 +0000 Subject: [PATCH 03/15] Add peridoci benchmark workflow and nox session --- .github/workflows/periodic_benchmarks.yaml | 96 ++++++++++++++++++++++ noxfile.py | 11 +++ 2 files changed, 107 insertions(+) create mode 100644 .github/workflows/periodic_benchmarks.yaml diff --git a/.github/workflows/periodic_benchmarks.yaml b/.github/workflows/periodic_benchmarks.yaml new file mode 100644 index 00000000..83252b0d --- /dev/null +++ b/.github/workflows/periodic_benchmarks.yaml @@ -0,0 +1,96 @@ +# Initial Source: pybop-team/PyBop + +# This workflow periodically runs the benchmarks suite in benchmarks/ +# using asv and publish the results, effectively updating +# the display website hosted in the pybop-bench repo + +# Steps: +# - Benchmark all commits since the last one that was benchmarked +# - Push results to pybop-bench repo +# - Publish website +name: Benchmarks +on: + # Everyday at 12 pm UTC + schedule: + - cron: "0 12 * * *" + # Make it possible to trigger the + # workflow manually + workflow_dispatch: + +jobs: + benchmarks: + runs-on: [self-hosted, macOS, ARM64] + steps: + - uses: actions/checkout@v4 + + - name: Install python & create virtualenv + shell: bash + run: | + eval "$(pyenv init -)" + pyenv install 3.11 -s + pyenv virtualenv 3.11 pybop-311-bench + + - name: Install dependencies & run benchmarks + shell: bash + run: | + eval "$(pyenv init -)" + pyenv activate pybop-311-bench + python -m pip install --upgrade pip nox + python -m nox -s benchmarks + + - name: Upload results as artifact + uses: actions/upload-artifact@v4 + with: + name: asv_periodic_results + path: results + + - name: Uninstall pyenv-virtualenv & python + if: always() + shell: bash + run: | + eval "$(pyenv init -)" + pyenv activate pybop-311-bench + pyenv uninstall -f $( python --version ) + + publish-results: + name: Push and publish results + needs: benchmarks + runs-on: ubuntu-latest + steps: + - name: Set up Python 3.11 + uses: actions/setup-python@v5 + with: + python-version: 3.11 + + - name: Install asv + run: pip install asv + + - name: Checkout pybop-bench repo + uses: actions/checkout@v4 + with: + repository: pybop-team/pybop-bench + token: ${{ github.token }} + + - name: Download results artifact + uses: actions/download-artifact@v4 + with: + name: asv_periodic_results + path: new_results + + - name: Copy new results and push to pybop-bench repo + env: + PUSH_BENCH_EMAIL: ${{ secrets.PUSH_BENCH_EMAIL }} + PUSH_BENCH_NAME: ${{ secrets.PUSH_BENCH_NAME }} + run: | + cp -vr new_results/* results + git config user.email "$PUSH_BENCH_EMAIL" + git config user.name "$PUSH_BENCH_NAME" + git add results + git commit -am "Add new benchmark results" + git push + + - name: Publish results + run: | + asv publish + git fetch origin gh-pages:gh-pages + asv gh-pages diff --git a/noxfile.py b/noxfile.py index e88df260..5d259298 100644 --- a/noxfile.py +++ b/noxfile.py @@ -42,6 +42,17 @@ def notebooks(session): session.run("pytest", "--nbmake", "--examples", "examples/", external=True) +@nox.session +def bench(session): + """Run the benchmarks.""" + session.install("-e", ".[all,dev]", silent=False) + session.run("pip", "install", "asv") + session.run("asv", "machine", "--machine", "SelfHostedRunner") + session.run( + "asv", "run", "--machine", "SelfHostedRunner", "--show-stderr", "--python=same" + ) + + @nox.session def docs(session): """ From 8698a5c899d8e956f6fccc5b34ba3949abb24743 Mon Sep 17 00:00:00 2001 From: Brady Planden Date: Wed, 21 Feb 2024 15:11:29 +0000 Subject: [PATCH 04/15] updt results path for upload --- .github/workflows/periodic_benchmarks.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/periodic_benchmarks.yaml b/.github/workflows/periodic_benchmarks.yaml index 83252b0d..b27a2e51 100644 --- a/.github/workflows/periodic_benchmarks.yaml +++ b/.github/workflows/periodic_benchmarks.yaml @@ -42,7 +42,7 @@ jobs: uses: actions/upload-artifact@v4 with: name: asv_periodic_results - path: results + path: .asv/results - name: Uninstall pyenv-virtualenv & python if: always() From fc94e48a5426a291488b98951e1cd3d569f889fd Mon Sep 17 00:00:00 2001 From: Brady Planden <55357039+BradyPlanden@users.noreply.github.com> Date: Sat, 24 Feb 2024 10:35:33 +0000 Subject: [PATCH 05/15] Increment to Python 3.12, fix typos, add `push` trigger to workflow Co-authored-by: Agriya Khetarpal <74401230+agriyakhetarpal@users.noreply.github.com> --- .github/workflows/periodic_benchmarks.yaml | 9 +++++---- asv.conf.json | 2 +- noxfile.py | 2 +- 3 files changed, 7 insertions(+), 6 deletions(-) diff --git a/.github/workflows/periodic_benchmarks.yaml b/.github/workflows/periodic_benchmarks.yaml index b27a2e51..f974dbf4 100644 --- a/.github/workflows/periodic_benchmarks.yaml +++ b/.github/workflows/periodic_benchmarks.yaml @@ -10,6 +10,7 @@ # - Publish website name: Benchmarks on: + push: # Everyday at 12 pm UTC schedule: - cron: "0 12 * * *" @@ -27,14 +28,14 @@ jobs: shell: bash run: | eval "$(pyenv init -)" - pyenv install 3.11 -s - pyenv virtualenv 3.11 pybop-311-bench + pyenv install 3.12 -s + pyenv virtualenv 3.12 pybop-312-bench - name: Install dependencies & run benchmarks shell: bash run: | eval "$(pyenv init -)" - pyenv activate pybop-311-bench + pyenv activate pybop-312-bench python -m pip install --upgrade pip nox python -m nox -s benchmarks @@ -49,7 +50,7 @@ jobs: shell: bash run: | eval "$(pyenv init -)" - pyenv activate pybop-311-bench + pyenv activate pybop-312-bench pyenv uninstall -f $( python --version ) publish-results: diff --git a/asv.conf.json b/asv.conf.json index 4b539943..10da446c 100644 --- a/asv.conf.json +++ b/asv.conf.json @@ -6,7 +6,7 @@ "build_command": [ "python -m pip install build", "python -m build", - "python -mpip wheel -w {build_cache_dir} {build_dir}" + "python -m pip wheel -w {build_cache_dir} {build_dir}" ], "default_benchmark_timeout": 180, "build_cache_dir": ".asv/build_cache", diff --git a/noxfile.py b/noxfile.py index 5d259298..2c43f5b4 100644 --- a/noxfile.py +++ b/noxfile.py @@ -46,7 +46,7 @@ def notebooks(session): def bench(session): """Run the benchmarks.""" session.install("-e", ".[all,dev]", silent=False) - session.run("pip", "install", "asv") + session.install("asv") session.run("asv", "machine", "--machine", "SelfHostedRunner") session.run( "asv", "run", "--machine", "SelfHostedRunner", "--show-stderr", "--python=same" From 1c630614f246ae504ff00dea61ef6ac64f2ac09d Mon Sep 17 00:00:00 2001 From: Brady Planden Date: Sat, 24 Feb 2024 11:14:15 +0000 Subject: [PATCH 06/15] Updt. nox session name --- noxfile.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/noxfile.py b/noxfile.py index 2c43f5b4..7804d790 100644 --- a/noxfile.py +++ b/noxfile.py @@ -43,7 +43,7 @@ def notebooks(session): @nox.session -def bench(session): +def benchmarks(session): """Run the benchmarks.""" session.install("-e", ".[all,dev]", silent=False) session.install("asv") From 554004233e293fa1fce3918a7a4a0743adedb435 Mon Sep 17 00:00:00 2001 From: Brady Planden Date: Sat, 24 Feb 2024 17:55:22 +0000 Subject: [PATCH 07/15] Updt. nox benchmarks session, tests for CI on benchmark workflow, remove unused pandas dependancy --- .github/workflows/periodic_benchmarks.yaml | 8 +++++--- asv.conf.json | 13 +++---------- benchmarks/parameterisation_benchmark.py | 16 ++++++++-------- noxfile.py | 7 ++----- pyproject.toml | 1 - 5 files changed, 18 insertions(+), 27 deletions(-) diff --git a/.github/workflows/periodic_benchmarks.yaml b/.github/workflows/periodic_benchmarks.yaml index f974dbf4..bab40e3e 100644 --- a/.github/workflows/periodic_benchmarks.yaml +++ b/.github/workflows/periodic_benchmarks.yaml @@ -36,14 +36,16 @@ jobs: run: | eval "$(pyenv init -)" pyenv activate pybop-312-bench - python -m pip install --upgrade pip nox - python -m nox -s benchmarks + python -m pip install --upgrade pip asv[virtualenv] + python -m pip install -e .[all,dev] + asv machine --machine "SelfHostedRunner" + asv run --machine "SelfHostedRunner" NEW --show-stderr -v - name: Upload results as artifact uses: actions/upload-artifact@v4 with: name: asv_periodic_results - path: .asv/results + path: results - name: Uninstall pyenv-virtualenv & python if: always() diff --git a/asv.conf.json b/asv.conf.json index 10da446c..c5c10871 100644 --- a/asv.conf.json +++ b/asv.conf.json @@ -5,24 +5,17 @@ "repo": ".", "build_command": [ "python -m pip install build", - "python -m build", - "python -m pip wheel -w {build_cache_dir} {build_dir}" + "python -m build" ], "default_benchmark_timeout": 180, - "build_cache_dir": ".asv/build_cache", - "build_dir": ".asv/build", - "branches": ["develop"], + "branches": ["179-add-airspeed-velocity-for-automated-benchmarking"], "environment_type": "virtualenv", "matrix": { "req":{ "pybamm": [], "numpy": [], "scipy": [], - "pandas": [], "pints": [] } - }, - "env_dir": ".asv/env", - "results_dir": ".asv/results", - "html_dir": ".asv/html" + } } diff --git a/benchmarks/parameterisation_benchmark.py b/benchmarks/parameterisation_benchmark.py index b1cb4c61..1bf3286b 100644 --- a/benchmarks/parameterisation_benchmark.py +++ b/benchmarks/parameterisation_benchmark.py @@ -8,15 +8,15 @@ class ParameterisationBenchmark: [pybop.lithium_ion.SPM, pybop.lithium_ion.SPMe], ["Chen2020"], [ - pybop.SciPyMinimize, - pybop.SciPyDifferentialEvolution, - pybop.Adam, + # pybop.SciPyMinimize, + # pybop.SciPyDifferentialEvolution, + # pybop.Adam, pybop.CMAES, - pybop.GradientDescent, - pybop.IRPropMin, - pybop.PSO, - pybop.SNES, - pybop.XNES, + # pybop.GradientDescent, + # pybop.IRPropMin, + # pybop.PSO, + # pybop.SNES, + # pybop.XNES, ], ] diff --git a/noxfile.py b/noxfile.py index 7804d790..8b16805f 100644 --- a/noxfile.py +++ b/noxfile.py @@ -46,11 +46,8 @@ def notebooks(session): def benchmarks(session): """Run the benchmarks.""" session.install("-e", ".[all,dev]", silent=False) - session.install("asv") - session.run("asv", "machine", "--machine", "SelfHostedRunner") - session.run( - "asv", "run", "--machine", "SelfHostedRunner", "--show-stderr", "--python=same" - ) + session.install("asv[virtualenv]") + session.run("asv", "run", "--show-stderr", "--python=same") @nox.session diff --git a/pyproject.toml b/pyproject.toml index 43928f48..10b102b0 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -21,7 +21,6 @@ dependencies = [ "pybamm>=23.5", "numpy>=1.16", "scipy>=1.3", - "pandas>=1.0", "pints>=0.5", ] From aceedc5795abde8630bd2ca0a2871023142b0e6b Mon Sep 17 00:00:00 2001 From: Brady Planden Date: Sat, 24 Feb 2024 18:10:35 +0000 Subject: [PATCH 08/15] asv installation, calls --- .github/workflows/periodic_benchmarks.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/periodic_benchmarks.yaml b/.github/workflows/periodic_benchmarks.yaml index bab40e3e..50686b22 100644 --- a/.github/workflows/periodic_benchmarks.yaml +++ b/.github/workflows/periodic_benchmarks.yaml @@ -36,10 +36,10 @@ jobs: run: | eval "$(pyenv init -)" pyenv activate pybop-312-bench - python -m pip install --upgrade pip asv[virtualenv] python -m pip install -e .[all,dev] - asv machine --machine "SelfHostedRunner" - asv run --machine "SelfHostedRunner" NEW --show-stderr -v + python -m pip install asv[virtualenv] + python -m asv machine --machine "SelfHostedRunner" + python -m asv run --machine "SelfHostedRunner" NEW --show-stderr -v - name: Upload results as artifact uses: actions/upload-artifact@v4 From 4e3722f3a6b20137a22d424b56fa99f3e896c07e Mon Sep 17 00:00:00 2001 From: Brady Planden Date: Fri, 15 Mar 2024 14:45:48 +0000 Subject: [PATCH 09/15] Additional benchmarks, updt build wheels --- asv.conf.json | 6 +- benchmarks/benchmark_model.py | 77 +++++++++++++++++++ benchmarks/benchmark_optim_construction.py | 86 ++++++++++++++++++++++ benchmarks/parameterisation_benchmark.py | 78 +++++++++++++------- 4 files changed, 220 insertions(+), 27 deletions(-) create mode 100644 benchmarks/benchmark_model.py create mode 100644 benchmarks/benchmark_optim_construction.py diff --git a/asv.conf.json b/asv.conf.json index c5c10871..7d032dcf 100644 --- a/asv.conf.json +++ b/asv.conf.json @@ -5,7 +5,7 @@ "repo": ".", "build_command": [ "python -m pip install build", - "python -m build" + "python -m build --wheel -o {build_cache_dir} {build_dir}" ], "default_benchmark_timeout": 180, "branches": ["179-add-airspeed-velocity-for-automated-benchmarking"], @@ -17,5 +17,7 @@ "scipy": [], "pints": [] } - } + }, + "build_cache_dir": ".asv/cache", + "build_dir": ".asv/build" } diff --git a/benchmarks/benchmark_model.py b/benchmarks/benchmark_model.py new file mode 100644 index 00000000..b65c8a21 --- /dev/null +++ b/benchmarks/benchmark_model.py @@ -0,0 +1,77 @@ +import pybop +import numpy as np + + +class BenchmarkModel: + param_names = ["model", "parameter_set"] + params = [ + [pybop.lithium_ion.SPM, pybop.lithium_ion.SPMe], + ["Chen2020"], + ] + + def setup(self, model, parameter_set): + """ + Setup the model and problem for predict and simulate benchmarks. + + Args: + model (pybop.Model): The model class to be benchmarked. + parameter_set (str): The name of the parameter set to be used. + """ + # Create model instance + self.model = model(parameter_set=pybop.ParameterSet.pybamm(parameter_set)) + + # Define fitting parameters + parameters = [ + pybop.Parameter( + "Current function [A]", + prior=pybop.Gaussian(0.4, 0.02), + bounds=[0.2, 0.7], + initial_value=0.4, + ) + ] + + # Generate synthetic data + sigma = 0.001 + self.t_eval = np.arange(0, 900, 2) + values = self.model.predict(t_eval=self.t_eval) + corrupt_values = values["Voltage [V]"].data + np.random.normal( + 0, sigma, len(self.t_eval) + ) + + self.inputs = { + "Current function [A]": 0.4, + } + + # Create dataset + dataset = pybop.Dataset( + { + "Time [s]": self.t_eval, + "Current function [A]": values["Current [A]"].data, + "Voltage [V]": corrupt_values, + } + ) + + # Create fitting problem + self.problem = pybop.FittingProblem( + model=self.model, dataset=dataset, parameters=parameters, init_soc=0.5 + ) + + def time_model_predict(self, model, parameter_set): + """ + Benchmark the predict method of the model. + + Args: + model (pybop.Model): The model class being benchmarked. + parameter_set (str): The name of the parameter set being used. + """ + self.model.predict(inputs=self.inputs, t_eval=self.t_eval) + + def time_model_simulate(self, model, parameter_set): + """ + Benchmark the simulate method of the model. + + Args: + model (pybop.Model): The model class being benchmarked. + parameter_set (str): The name of the parameter set being used. + """ + self.problem._model.simulate(inputs=self.inputs, t_eval=self.t_eval) diff --git a/benchmarks/benchmark_optim_construction.py b/benchmarks/benchmark_optim_construction.py new file mode 100644 index 00000000..fcaf1deb --- /dev/null +++ b/benchmarks/benchmark_optim_construction.py @@ -0,0 +1,86 @@ +import pybop +import numpy as np + + +class BenchmarkOptimisationConstruction: + param_names = ["model", "parameter_set", "optimiser"] + params = [ + [pybop.lithium_ion.SPM, pybop.lithium_ion.SPMe], + ["Chen2020"], + [pybop.CMAES], + ] + + def setup(self, model, parameter_set, optimiser): + """ + Set up the model, problem, and cost for optimization benchmarking. + + Args: + model (pybop.Model): The model class to be benchmarked. + parameter_set (str): The name of the parameter set to be used. + optimiser (pybop.Optimiser): The optimizer class to be used. + """ + # Create model instance + model_instance = model(parameter_set=pybop.ParameterSet.pybamm(parameter_set)) + + # Define fitting parameters + parameters = [ + pybop.Parameter( + "Negative electrode active material volume fraction", + prior=pybop.Gaussian(0.6, 0.02), + bounds=[0.375, 0.7], + initial_value=0.63, + ), + pybop.Parameter( + "Positive electrode active material volume fraction", + prior=pybop.Gaussian(0.5, 0.02), + bounds=[0.375, 0.625], + initial_value=0.51, + ), + ] + + # Generate synthetic data + sigma = 0.001 + t_eval = np.arange(0, 900, 2) + values = model_instance.predict(t_eval=t_eval) + corrupt_values = values["Voltage [V]"].data + np.random.normal( + 0, sigma, len(t_eval) + ) + + # Create dataset + dataset = pybop.Dataset( + { + "Time [s]": t_eval, + "Current function [A]": values["Current [A]"].data, + "Voltage [V]": corrupt_values, + } + ) + + # Create fitting problem + problem = pybop.FittingProblem( + model=model_instance, dataset=dataset, parameters=parameters, init_soc=0.5 + ) + + # Create cost function + self.cost = pybop.SumSquaredError(problem=problem) + + def time_optimisation_construction(self, model, parameter_set, optimiser): + """ + Benchmark the construction of the optimization class. + + Args: + model (pybop.Model): The model class being benchmarked. + parameter_set (str): The name of the parameter set being used. + optimiser (pybop.Optimiser): The optimizer class being used. + """ + self.optim = pybop.Optimisation(self.cost, optimiser=optimiser) + + def time_cost(self, model, parameter_set, optimiser): + """ + Benchmark the cost function evaluation. + + Args: + model (pybop.Model): The model class being benchmarked. + parameter_set (str): The name of the parameter set being used. + optimiser (pybop.Optimiser): The optimizer class being used. + """ + self.cost([0.63, 0.51]) diff --git a/benchmarks/parameterisation_benchmark.py b/benchmarks/parameterisation_benchmark.py index 1bf3286b..99964a3a 100644 --- a/benchmarks/parameterisation_benchmark.py +++ b/benchmarks/parameterisation_benchmark.py @@ -3,55 +3,60 @@ class ParameterisationBenchmark: - param_names = ["model", "parameter_set", "dataset", "optimiser"] + param_names = ["model", "parameter_set", "optimiser"] params = [ [pybop.lithium_ion.SPM, pybop.lithium_ion.SPMe], ["Chen2020"], [ - # pybop.SciPyMinimize, - # pybop.SciPyDifferentialEvolution, - # pybop.Adam, + pybop.SciPyMinimize, + pybop.SciPyDifferentialEvolution, + pybop.Adam, pybop.CMAES, - # pybop.GradientDescent, - # pybop.IRPropMin, - # pybop.PSO, - # pybop.SNES, - # pybop.XNES, + pybop.GradientDescent, + pybop.IRPropMin, + pybop.PSO, + pybop.SNES, + pybop.XNES, ], ] def setup(self, model, parameter_set, optimiser): """ - Setup the parameterisation problem + Set up the parameterization problem for benchmarking. + + Args: + model (pybop.Model): The model class to be benchmarked. + parameter_set (str): The name of the parameter set to be used. + optimiser (pybop.Optimiser): The optimizer class to be used. """ - # Create model - model = model(parameter_set=pybop.ParameterSet.pybamm(parameter_set)) + # Create model instance + model_instance = model(parameter_set=pybop.ParameterSet.pybamm(parameter_set)) - # Fitting parameters + # Define fitting parameters parameters = [ pybop.Parameter( "Negative electrode active material volume fraction", - prior=pybop.Gaussian(0.6, 0.02), + prior=pybop.Gaussian(0.6, 0.03), bounds=[0.375, 0.7], initial_value=0.63, ), pybop.Parameter( "Positive electrode active material volume fraction", - prior=pybop.Gaussian(0.5, 0.02), + prior=pybop.Gaussian(0.5, 0.03), bounds=[0.375, 0.625], initial_value=0.51, ), ] - # Generate data - sigma = 0.001 + # Generate synthetic data + sigma = 0.003 t_eval = np.arange(0, 900, 2) - values = model.predict(t_eval=t_eval) + values = model_instance.predict(t_eval=t_eval) corrupt_values = values["Voltage [V]"].data + np.random.normal( 0, sigma, len(t_eval) ) - # Form dataset + # Create dataset dataset = pybop.Dataset( { "Time [s]": t_eval, @@ -59,14 +64,37 @@ def setup(self, model, parameter_set, optimiser): "Voltage [V]": corrupt_values, } ) + + # Create fitting problem problem = pybop.FittingProblem( - model=model, dataset=dataset, parameters=parameters, init_soc=0.5 + model=model_instance, dataset=dataset, parameters=parameters, init_soc=0.5 ) - self.cost = pybop.SumSquaredError(problem=problem) - def time_parameterisation(self, model, parameter_set, optimiser): + # Create cost function + cost = pybop.SumSquaredError(problem=problem) + + # Create optimization instance + self.optim = pybop.Optimisation(cost, optimiser=optimiser) + + def time_parameterisation(self, _model, _parameter_set, _optimiser): """ - Run parameterisation across the pybop optimisers + Benchmark the parameterization process. + + Args: + _model (pybop.Model): The model class being benchmarked (unused). + _parameter_set (str): The name of the parameter set being used (unused). + _optimiser (pybop.Optimiser): The optimizer class being used (unused). + """ + self.optim.run() + + def time_optimiser_ask(self, _model, _parameter_set, optimiser): + """ + Benchmark the optimizer's ask method. + + Args: + _model (pybop.Model): The model class being benchmarked (unused). + _parameter_set (str): The name of the parameter set being used (unused). + optimiser (pybop.Optimiser): The optimizer class being used. """ - # Run parameterisation - pybop.Optimisation(self.cost, optimiser=optimiser).run() + if optimiser not in [pybop.SciPyMinimize, pybop.SciPyDifferentialEvolution]: + self.optim.optimiser.ask() From 8f0daf1c1e7afc99a54e82a07c24c9bf3b3b2832 Mon Sep 17 00:00:00 2001 From: Brady Planden Date: Fri, 15 Mar 2024 16:19:22 +0000 Subject: [PATCH 10/15] updt permissions for deployment --- .github/workflows/periodic_benchmarks.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/periodic_benchmarks.yaml b/.github/workflows/periodic_benchmarks.yaml index 50686b22..4750d6ee 100644 --- a/.github/workflows/periodic_benchmarks.yaml +++ b/.github/workflows/periodic_benchmarks.yaml @@ -72,7 +72,7 @@ jobs: uses: actions/checkout@v4 with: repository: pybop-team/pybop-bench - token: ${{ github.token }} + token: ${{ secrets.PUSH_BENCH_TOKEN }} - name: Download results artifact uses: actions/download-artifact@v4 From ebc7b628b10c4835f8e842158b44f4622f30d6d2 Mon Sep 17 00:00:00 2001 From: Brady Planden Date: Fri, 15 Mar 2024 18:13:47 +0000 Subject: [PATCH 11/15] add --global arg to git config --- .github/workflows/periodic_benchmarks.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/periodic_benchmarks.yaml b/.github/workflows/periodic_benchmarks.yaml index 4750d6ee..9fc5cd3b 100644 --- a/.github/workflows/periodic_benchmarks.yaml +++ b/.github/workflows/periodic_benchmarks.yaml @@ -86,8 +86,8 @@ jobs: PUSH_BENCH_NAME: ${{ secrets.PUSH_BENCH_NAME }} run: | cp -vr new_results/* results - git config user.email "$PUSH_BENCH_EMAIL" - git config user.name "$PUSH_BENCH_NAME" + git config --global user.email "$PUSH_BENCH_EMAIL" + git config --global user.name "$PUSH_BENCH_NAME" git add results git commit -am "Add new benchmark results" git push From aa9a3951b1e24a9f2a1100a1b63e990db8e093dc Mon Sep 17 00:00:00 2001 From: Brady Planden <55357039+BradyPlanden@users.noreply.github.com> Date: Sat, 16 Mar 2024 10:11:23 +0000 Subject: [PATCH 12/15] Limit workflow to pybop repo Co-authored-by: Agriya Khetarpal <74401230+agriyakhetarpal@users.noreply.github.com> --- .github/workflows/periodic_benchmarks.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/workflows/periodic_benchmarks.yaml b/.github/workflows/periodic_benchmarks.yaml index 9fc5cd3b..a58c0048 100644 --- a/.github/workflows/periodic_benchmarks.yaml +++ b/.github/workflows/periodic_benchmarks.yaml @@ -21,6 +21,7 @@ on: jobs: benchmarks: runs-on: [self-hosted, macOS, ARM64] + if: github.repository == 'pybop-team/PyBOP' steps: - uses: actions/checkout@v4 @@ -59,6 +60,7 @@ jobs: name: Push and publish results needs: benchmarks runs-on: ubuntu-latest + if: github.repository == 'pybop-team/PyBOP' steps: - name: Set up Python 3.11 uses: actions/setup-python@v5 From 8e6f609e2d507df1f3cb5feeda66288f58466e16 Mon Sep 17 00:00:00 2001 From: Brady Planden Date: Sat, 16 Mar 2024 13:18:31 +0000 Subject: [PATCH 13/15] Add random seed, updt branch target, increment python for publish job in workflow --- .github/workflows/periodic_benchmarks.yaml | 5 ++--- asv.conf.json | 2 +- benchmarks/README.md | 8 +++++++- benchmarks/benchmark_model.py | 4 ++++ benchmarks/benchmark_optim_construction.py | 6 +++++- benchmarks/benchmark_utils.py | 5 +++++ benchmarks/parameterisation_benchmark.py | 6 +++++- 7 files changed, 29 insertions(+), 7 deletions(-) create mode 100644 benchmarks/benchmark_utils.py diff --git a/.github/workflows/periodic_benchmarks.yaml b/.github/workflows/periodic_benchmarks.yaml index a58c0048..6ea0b489 100644 --- a/.github/workflows/periodic_benchmarks.yaml +++ b/.github/workflows/periodic_benchmarks.yaml @@ -10,7 +10,6 @@ # - Publish website name: Benchmarks on: - push: # Everyday at 12 pm UTC schedule: - cron: "0 12 * * *" @@ -62,10 +61,10 @@ jobs: runs-on: ubuntu-latest if: github.repository == 'pybop-team/PyBOP' steps: - - name: Set up Python 3.11 + - name: Set up Python 3.12 uses: actions/setup-python@v5 with: - python-version: 3.11 + python-version: 3.12 - name: Install asv run: pip install asv diff --git a/asv.conf.json b/asv.conf.json index 7d032dcf..fdd830ce 100644 --- a/asv.conf.json +++ b/asv.conf.json @@ -8,7 +8,7 @@ "python -m build --wheel -o {build_cache_dir} {build_dir}" ], "default_benchmark_timeout": 180, - "branches": ["179-add-airspeed-velocity-for-automated-benchmarking"], + "branches": ["develop"], "environment_type": "virtualenv", "matrix": { "req":{ diff --git a/benchmarks/README.md b/benchmarks/README.md index bb9b3c97..55a014a5 100644 --- a/benchmarks/README.md +++ b/benchmarks/README.md @@ -48,7 +48,7 @@ With `asv` installed and your benchmarks set up, you can now run benchmarks usin To run all benchmarks in your python env: ```bash -asv run --python=same +asv run ``` This will test the current state of your codebase by default. You can specify a range of commits to run benchmarks against by appending a commit range to the command, like so: @@ -57,6 +57,12 @@ This will test the current state of your codebase by default. You can specify a asv run .. ``` +For quick benchmarking, pass the `--quick` argument to `asv run`. This runs each benchmark once and returns the singular value. + +```bash +asv run --quick +``` + ### Running Specific Benchmarks To run a specific benchmark, use: diff --git a/benchmarks/benchmark_model.py b/benchmarks/benchmark_model.py index b65c8a21..ed53914c 100644 --- a/benchmarks/benchmark_model.py +++ b/benchmarks/benchmark_model.py @@ -1,5 +1,6 @@ import pybop import numpy as np +from .benchmark_utils import set_random_seed class BenchmarkModel: @@ -17,6 +18,9 @@ def setup(self, model, parameter_set): model (pybop.Model): The model class to be benchmarked. parameter_set (str): The name of the parameter set to be used. """ + # Set random seed + set_random_seed() + # Create model instance self.model = model(parameter_set=pybop.ParameterSet.pybamm(parameter_set)) diff --git a/benchmarks/benchmark_optim_construction.py b/benchmarks/benchmark_optim_construction.py index fcaf1deb..f79da4fc 100644 --- a/benchmarks/benchmark_optim_construction.py +++ b/benchmarks/benchmark_optim_construction.py @@ -1,5 +1,6 @@ import pybop import numpy as np +from .benchmark_utils import set_random_seed class BenchmarkOptimisationConstruction: @@ -19,6 +20,9 @@ def setup(self, model, parameter_set, optimiser): parameter_set (str): The name of the parameter set to be used. optimiser (pybop.Optimiser): The optimizer class to be used. """ + # Set random seed + set_random_seed() + # Create model instance model_instance = model(parameter_set=pybop.ParameterSet.pybamm(parameter_set)) @@ -74,7 +78,7 @@ def time_optimisation_construction(self, model, parameter_set, optimiser): """ self.optim = pybop.Optimisation(self.cost, optimiser=optimiser) - def time_cost(self, model, parameter_set, optimiser): + def time_cost_evaluate(self, model, parameter_set, optimiser): """ Benchmark the cost function evaluation. diff --git a/benchmarks/benchmark_utils.py b/benchmarks/benchmark_utils.py new file mode 100644 index 00000000..3126e8bb --- /dev/null +++ b/benchmarks/benchmark_utils.py @@ -0,0 +1,5 @@ +import numpy as np + + +def set_random_seed(seed_value=8): + np.random.seed(seed_value) diff --git a/benchmarks/parameterisation_benchmark.py b/benchmarks/parameterisation_benchmark.py index 99964a3a..1c92700b 100644 --- a/benchmarks/parameterisation_benchmark.py +++ b/benchmarks/parameterisation_benchmark.py @@ -1,8 +1,9 @@ import pybop import numpy as np +from .benchmark_utils import set_random_seed -class ParameterisationBenchmark: +class BenchmarkParameterisation: param_names = ["model", "parameter_set", "optimiser"] params = [ [pybop.lithium_ion.SPM, pybop.lithium_ion.SPMe], @@ -29,6 +30,9 @@ def setup(self, model, parameter_set, optimiser): parameter_set (str): The name of the parameter set to be used. optimiser (pybop.Optimiser): The optimizer class to be used. """ + # Set random seed + set_random_seed() + # Create model instance model_instance = model(parameter_set=pybop.ParameterSet.pybamm(parameter_set)) From 7e530b06f581e01a4594edd952e3bde0baa909fa Mon Sep 17 00:00:00 2001 From: Brady Planden Date: Sun, 17 Mar 2024 11:38:29 +0000 Subject: [PATCH 14/15] Updt benchmark intial soc, add tracking of optimisation results, increment default_max_unchanged_iterations for more stable default --- .gitignore | 1 + benchmarks/benchmark_optim_construction.py | 2 +- ...hmark.py => benchmark_parameterisation.py} | 65 ++++++++++++++----- pybop/_optimisation.py | 4 +- 4 files changed, 51 insertions(+), 21 deletions(-) rename benchmarks/{parameterisation_benchmark.py => benchmark_parameterisation.py} (50%) diff --git a/.gitignore b/.gitignore index 6d37ddd8..3c3bb708 100644 --- a/.gitignore +++ b/.gitignore @@ -313,3 +313,4 @@ $RECYCLE.BIN/ # Airspeed Velocity *.asv/ +results/ diff --git a/benchmarks/benchmark_optim_construction.py b/benchmarks/benchmark_optim_construction.py index f79da4fc..d92f2ec3 100644 --- a/benchmarks/benchmark_optim_construction.py +++ b/benchmarks/benchmark_optim_construction.py @@ -61,7 +61,7 @@ def setup(self, model, parameter_set, optimiser): # Create fitting problem problem = pybop.FittingProblem( - model=model_instance, dataset=dataset, parameters=parameters, init_soc=0.5 + model=model_instance, dataset=dataset, parameters=parameters ) # Create cost function diff --git a/benchmarks/parameterisation_benchmark.py b/benchmarks/benchmark_parameterisation.py similarity index 50% rename from benchmarks/parameterisation_benchmark.py rename to benchmarks/benchmark_parameterisation.py index 1c92700b..4315f89b 100644 --- a/benchmarks/parameterisation_benchmark.py +++ b/benchmarks/benchmark_parameterisation.py @@ -34,21 +34,26 @@ def setup(self, model, parameter_set, optimiser): set_random_seed() # Create model instance - model_instance = model(parameter_set=pybop.ParameterSet.pybamm(parameter_set)) + params = pybop.ParameterSet.pybamm(parameter_set) + params.update( + { + "Negative electrode active material volume fraction": 0.63, + "Positive electrode active material volume fraction": 0.51, + } + ) + model_instance = model(parameter_set=params) # Define fitting parameters parameters = [ pybop.Parameter( "Negative electrode active material volume fraction", - prior=pybop.Gaussian(0.6, 0.03), + prior=pybop.Gaussian(0.55, 0.03), bounds=[0.375, 0.7], - initial_value=0.63, ), pybop.Parameter( "Positive electrode active material volume fraction", - prior=pybop.Gaussian(0.5, 0.03), - bounds=[0.375, 0.625], - initial_value=0.51, + prior=pybop.Gaussian(0.55, 0.03), + bounds=[0.375, 0.7], ), ] @@ -70,34 +75,58 @@ def setup(self, model, parameter_set, optimiser): ) # Create fitting problem - problem = pybop.FittingProblem( - model=model_instance, dataset=dataset, parameters=parameters, init_soc=0.5 - ) + problem = pybop.FittingProblem(model_instance, parameters, dataset) # Create cost function cost = pybop.SumSquaredError(problem=problem) # Create optimization instance self.optim = pybop.Optimisation(cost, optimiser=optimiser) + if optimiser in [pybop.GradientDescent]: + self.optim.optimiser.set_learning_rate( + 0.008 + ) # Compromise between stability & performance - def time_parameterisation(self, _model, _parameter_set, _optimiser): + def time_parameterisation(self, model, parameter_set, optimiser): """ - Benchmark the parameterization process. + Benchmark the parameterization process. Optimiser options are left at high values + to ensure the threshold is met and the optimisation process is completed. Args: - _model (pybop.Model): The model class being benchmarked (unused). - _parameter_set (str): The name of the parameter set being used (unused). - _optimiser (pybop.Optimiser): The optimizer class being used (unused). + model (pybop.Model): The model class being benchmarked (unused). + parameter_set (str): The name of the parameter set being used (unused). + optimiser (pybop.Optimiser): The optimizer class being used (unused). """ - self.optim.run() + # Set optimizer options for consistent benchmarking + self.optim.set_max_unchanged_iterations(iterations=25, threshold=1e-5) + self.optim.set_max_iterations(250) + self.optim.set_min_iterations(2) + x, _ = self.optim.run() + return x + + def track_results(self, model, parameter_set, optimiser): + """ + Track the results of the optimization. + Note: These results will be different than the time_parameterisation + as they are ran seperately. These results should be used to verify the + optimisation algorithm typically converges. + + Args: + model (pybop.Model): The model class being benchmarked (unused). + parameter_set (str): The name of the parameter set being used (unused). + optimiser (pybop.Optimiser): The optimizer class being used (unused). + """ + x = self.time_parameterisation(model, parameter_set, optimiser) + + return tuple(x) - def time_optimiser_ask(self, _model, _parameter_set, optimiser): + def time_optimiser_ask(self, model, parameter_set, optimiser): """ Benchmark the optimizer's ask method. Args: - _model (pybop.Model): The model class being benchmarked (unused). - _parameter_set (str): The name of the parameter set being used (unused). + model (pybop.Model): The model class being benchmarked (unused). + parameter_set (str): The name of the parameter set being used (unused). optimiser (pybop.Optimiser): The optimizer class being used. """ if optimiser not in [pybop.SciPyMinimize, pybop.SciPyDifferentialEvolution]: diff --git a/pybop/_optimisation.py b/pybop/_optimisation.py index 3eb839ef..f122535e 100644 --- a/pybop/_optimisation.py +++ b/pybop/_optimisation.py @@ -468,7 +468,7 @@ def set_min_iterations(self, iterations=2): raise ValueError("Minimum number of iterations cannot be negative.") self._min_iterations = iterations - def set_max_unchanged_iterations(self, iterations=5, threshold=1e-5): + def set_max_unchanged_iterations(self, iterations=15, threshold=1e-5): """ Set the maximum number of iterations without significant change as a stopping criterion. Credit: PINTS @@ -476,7 +476,7 @@ def set_max_unchanged_iterations(self, iterations=5, threshold=1e-5): Parameters ---------- iterations : int, optional - The maximum number of unchanged iterations to run (default is 25). + The maximum number of unchanged iterations to run (default is 15). Set to `None` to remove this stopping criterion. threshold : float, optional The minimum significant change in the objective function value that resets the unchanged iteration counter (default is 1e-5). From 201316103e3d995d673aac5d05b668c6ad8dbf43 Mon Sep 17 00:00:00 2001 From: Brady Planden Date: Mon, 18 Mar 2024 09:22:45 +0000 Subject: [PATCH 15/15] Adds benchmark badge --- README.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/README.md b/README.md index e4e347e1..e4342443 100644 --- a/README.md +++ b/README.md @@ -31,6 +31,9 @@ open In colab + + Static Badge + releases