From a4d32112f1f1cb552c3e5b310541acdbc0c34ed6 Mon Sep 17 00:00:00 2001 From: Brady Planden Date: Fri, 16 Feb 2024 20:24:43 +0000 Subject: [PATCH 01/15] Split integration and unit tests, update pytest --examples, additional scheduled workflow trigger, add test_parameterisation cost/optimiser test matrix --- .github/workflows/scheduled_tests.yaml | 7 +-- conftest.py | 48 +++++++++++++---- noxfile.py | 16 +++++- pybop/_optimisation.py | 2 +- pybop/costs/fitting_costs.py | 40 ++++++++++++++ tests/{unit => examples}/test_examples.py | 1 + .../test_parameterisations.py | 54 +++++-------------- .../test_plotly_manager.py} | 8 +-- tests/unit/test_optimisation.py | 4 +- 9 files changed, 118 insertions(+), 62 deletions(-) rename tests/{unit => examples}/test_examples.py (97%) rename tests/{unit => integration}/test_parameterisations.py (82%) rename tests/{unit/test_plotting.py => integration/test_plotly_manager.py} (97%) diff --git a/.github/workflows/scheduled_tests.yaml b/.github/workflows/scheduled_tests.yaml index 490efea9..f4b6a1a9 100644 --- a/.github/workflows/scheduled_tests.yaml +++ b/.github/workflows/scheduled_tests.yaml @@ -6,9 +6,10 @@ on: branches: - main - # runs every day at 09:00 UTC + # runs every day at 09:00 and 15:00 UTC schedule: - cron: '0 9 * * *' + - cron: '0 15 * * *' jobs: build: @@ -30,7 +31,7 @@ jobs: python -m pip install --upgrade pip nox - name: Unit tests with nox run: | - python -m nox -s unit + python -m nox -s coverage python -m nox -s notebooks #M-series Mac Mini @@ -58,7 +59,7 @@ jobs: eval "$(pyenv init -)" pyenv activate pybop-${{ matrix.python-version }} python -m pip install --upgrade pip wheel setuptools nox - python -m nox -s unit + python -m nox -s coverage python -m nox -s notebooks - name: Uninstall pyenv-virtualenv & python diff --git a/conftest.py b/conftest.py index c632f3e5..a8a801ab 100644 --- a/conftest.py +++ b/conftest.py @@ -10,6 +10,12 @@ def pytest_addoption(parser): parser.addoption( "--unit", action="store_true", default=False, help="run unit tests" ) + parser.addoption( + "--integration", + action="store_true", + default=False, + help="run integration tests", + ) parser.addoption( "--examples", action="store_true", default=False, help="run examples tests" ) @@ -25,28 +31,48 @@ def pytest_terminal_summary(terminalreporter, exitstatus, config): def pytest_configure(config): config.addinivalue_line("markers", "unit: mark test as a unit test") + config.addinivalue_line("markers", "integration: mark test as an integration test") config.addinivalue_line("markers", "examples: mark test as an example") def pytest_collection_modifyitems(config, items): - unit_option = config.getoption("--unit") - examples_option = config.getoption("--examples") + unit = config.getoption("--unit") + integration = config.getoption("--integration") + examples = config.getoption("--examples") - if not unit_option and not examples_option: - skip_all = pytest.mark.skip(reason="need --unit or --examples option to run") + if not unit and not examples and not integration: + skip_all = pytest.mark.skip( + reason="need --unit or --examples or --integration option to run" + ) for item in items: item.add_marker(skip_all) - elif unit_option and not examples_option: - skip_examples = pytest.mark.skip( - reason="need --examples option to run examples tests" + elif unit and not examples and not integration: + skip_examples_integration = pytest.mark.skip( + reason="need --examples option to run examples tests, or --integration option to run integration tests" ) for item in items: if "examples" in item.keywords: - item.add_marker(skip_examples) + item.add_marker(skip_examples_integration) + if "integration" in item.keywords: + item.add_marker(skip_examples_integration) - if examples_option and not unit_option: - skip_unit = pytest.mark.skip(reason="need --unit option to run unit tests") + elif examples and not unit and not integration: + skip_unit_integration = pytest.mark.skip( + reason="need --unit option to run unit tests or --integration option to run integration tests" + ) for item in items: if "unit" in item.keywords: - item.add_marker(skip_unit) + item.add_marker(skip_unit_integration) + if "integration" in item.keywords: + item.add_marker(skip_unit_integration) + + elif integration and not unit and not examples: + skip_unit_examples = pytest.mark.skip( + reason="need --unit option to run unit tests or --examples option to run examples tests" + ) + for item in items: + if "unit" in item.keywords: + item.add_marker(skip_unit_examples) + if "examples" in item.keywords: + item.add_marker(skip_unit_examples) diff --git a/noxfile.py b/noxfile.py index ddb7bd19..e0d22acf 100644 --- a/noxfile.py +++ b/noxfile.py @@ -18,12 +18,26 @@ def coverage(session): session.run( "pytest", "--unit", - "--examples", + "--integration", "--cov", "--cov-report=xml", ) +@nox.session +def integration(session): + session.run_always("pip", "install", "-e", ".[all]") + session.install("pytest", "pytest-mock") + session.run("pytest", "--integration") + + +@nox.session +def examples(session): + session.run_always("pip", "install", "-e", ".[all]") + session.install("pytest", "pytest-mock") + session.run("pytest", "--examples") + + @nox.session def notebooks(session): """Run the examples tests for Jupyter notebooks.""" diff --git a/pybop/_optimisation.py b/pybop/_optimisation.py index 423ec9ba..cc367ccc 100644 --- a/pybop/_optimisation.py +++ b/pybop/_optimisation.py @@ -449,7 +449,7 @@ def set_max_iterations(self, iterations=1000): raise ValueError("Maximum number of iterations cannot be negative.") self._max_iterations = iterations - def set_max_unchanged_iterations(self, iterations=25, threshold=1e-5): + def set_max_unchanged_iterations(self, iterations=5, threshold=1e-5): """ Set the maximum number of iterations without significant change as a stopping criterion. Credit: PINTS diff --git a/pybop/costs/fitting_costs.py b/pybop/costs/fitting_costs.py index a02aeca6..4f05c37f 100644 --- a/pybop/costs/fitting_costs.py +++ b/pybop/costs/fitting_costs.py @@ -45,6 +45,46 @@ def _evaluate(self, x, grad=None): else: return np.sqrt(np.mean((prediction - self._target) ** 2)) + def _evaluateS1(self, x): + """ + Compute the cost and its gradient with respect to the parameters. + + Parameters + ---------- + x : array-like + The parameters for which to compute the cost and gradient. + + Returns + ------- + tuple + A tuple containing the cost and the gradient. The cost is a float, + and the gradient is an array-like of the same length as `x`. + + Raises + ------ + ValueError + If an error occurs during the calculation of the cost or gradient. + """ + y, dy = self.problem.evaluateS1(x) + if len(y) < len(self._target): + e = np.float64(np.inf) + de = self._de * np.ones(self.n_parameters) + else: + dy = dy.reshape( + ( + self.problem.n_time_data, + self.n_outputs, + self.n_parameters, + ) + ) + r = y - self._target + e = np.sqrt(np.mean((r) ** 2)) + de = np.mean((r.T * dy.T), axis=2) / np.sqrt( + np.mean((r.T * dy.T) ** 2, axis=2) + ) + + return e, de.flatten() + class SumSquaredError(BaseCost): """ diff --git a/tests/unit/test_examples.py b/tests/examples/test_examples.py similarity index 97% rename from tests/unit/test_examples.py rename to tests/examples/test_examples.py index ebc3570a..414701de 100644 --- a/tests/unit/test_examples.py +++ b/tests/examples/test_examples.py @@ -9,6 +9,7 @@ class TestExamples: A class to test the example scripts. """ + @staticmethod def list_of_examples(): list = [] path_to_example_scripts = os.path.join( diff --git a/tests/unit/test_parameterisations.py b/tests/integration/test_parameterisations.py similarity index 82% rename from tests/unit/test_parameterisations.py rename to tests/integration/test_parameterisations.py index 9d53e39b..2e07f488 100644 --- a/tests/unit/test_parameterisations.py +++ b/tests/integration/test_parameterisations.py @@ -32,43 +32,17 @@ def parameters(self): def x0(self): return np.array([0.63, 0.51]) - @pytest.mark.parametrize("init_soc", [0.3, 0.7]) - @pytest.mark.unit - def test_spm(self, parameters, model, x0, init_soc): - # Form dataset - solution = self.getdata(model, x0, init_soc) - dataset = pybop.Dataset( - { - "Time [s]": solution["Time [s]"].data, - "Current function [A]": solution["Current [A]"].data, - "Terminal voltage [V]": solution["Terminal voltage [V]"].data, - } - ) - - # Define the cost to optimise - signal = ["Terminal voltage [V]"] - problem = pybop.FittingProblem( - model, parameters, dataset, signal=signal, init_soc=init_soc - ) - cost = pybop.RootMeanSquaredError(problem) - - # Select optimiser - optimiser = pybop.CMAES - - # Build the optimisation problem - parameterisation = pybop.Optimisation(cost=cost, optimiser=optimiser) - - # Run the optimisation problem - x, final_cost = parameterisation.run() + @pytest.fixture(params=[0.3, 0.7]) + def init_soc(self, request): + return request.param - # Assertions - np.testing.assert_allclose(final_cost, 0, atol=1e-2) - np.testing.assert_allclose(x, x0, atol=1e-1) + @pytest.fixture(params=[pybop.RootMeanSquaredError, pybop.SumSquaredError]) + def cost_class(self, request): + return request.param @pytest.fixture - def spm_cost(self, parameters, model, x0): + def spm_costs(self, parameters, model, x0, cost_class, init_soc): # Form dataset - init_soc = 0.5 solution = self.getdata(model, x0, init_soc) dataset = pybop.Dataset( { @@ -83,7 +57,7 @@ def spm_cost(self, parameters, model, x0): problem = pybop.FittingProblem( model, parameters, dataset, signal=signal, init_soc=init_soc ) - return pybop.SumSquaredError(problem) + return cost_class(problem) @pytest.mark.parametrize( "optimiser", @@ -99,10 +73,10 @@ def spm_cost(self, parameters, model, x0): pybop.XNES, ], ) - @pytest.mark.unit - def test_spm_optimisers(self, optimiser, spm_cost, x0): + @pytest.mark.integration + def test_spm_optimisers(self, optimiser, spm_costs, x0): # Test each optimiser - parameterisation = pybop.Optimisation(cost=spm_cost, optimiser=optimiser) + parameterisation = pybop.Optimisation(cost=spm_costs, optimiser=optimiser) parameterisation.set_max_unchanged_iterations(iterations=25, threshold=5e-4) if optimiser in [pybop.CMAES]: @@ -141,7 +115,7 @@ def test_spm_optimisers(self, optimiser, spm_cost, x0): # Assertions np.testing.assert_allclose(final_cost, 0, atol=1e-2) - np.testing.assert_allclose(x, x0, atol=1e-1) + np.testing.assert_allclose(x, x0, atol=1e-2) @pytest.fixture def spm_two_signal_cost(self, parameters, model, x0): @@ -171,7 +145,7 @@ def spm_two_signal_cost(self, parameters, model, x0): pybop.CMAES, ], ) - @pytest.mark.unit + @pytest.mark.integration def test_multiple_signals(self, optimiser, spm_two_signal_cost, x0): # Test each optimiser parameterisation = pybop.Optimisation( @@ -190,7 +164,7 @@ def test_multiple_signals(self, optimiser, spm_two_signal_cost, x0): np.testing.assert_allclose(x, x0, atol=1e-1) @pytest.mark.parametrize("init_soc", [0.3, 0.7]) - @pytest.mark.unit + @pytest.mark.integration def test_model_misparameterisation(self, parameters, model, x0, init_soc): # Define two different models with different parameter sets # The optimisation should fail as the models are not the same diff --git a/tests/unit/test_plotting.py b/tests/integration/test_plotly_manager.py similarity index 97% rename from tests/unit/test_plotting.py rename to tests/integration/test_plotly_manager.py index 92a327c6..01b8166a 100644 --- a/tests/unit/test_plotting.py +++ b/tests/integration/test_plotly_manager.py @@ -52,7 +52,7 @@ def uninstall_plotly_if_installed(): plotly.io.renderers.default = None -@pytest.mark.unit +@pytest.mark.integration def test_initialization_with_plotly_installed(plotly_installed): """Test initialization when Plotly is installed.""" assert is_package_installed("plotly") @@ -67,7 +67,7 @@ def test_initialization_with_plotly_installed(plotly_installed): assert plotly_manager.make_subplots == make_subplots -@pytest.mark.unit +@pytest.mark.integration def test_prompt_for_plotly_installation(mocker, uninstall_plotly_if_installed): """Test prompt for Plotly installation when not installed.""" assert not is_package_installed("plotly") @@ -83,7 +83,7 @@ def test_prompt_for_plotly_installation(mocker, uninstall_plotly_if_installed): assert plotly_manager.make_subplots == make_subplots -@pytest.mark.unit +@pytest.mark.integration def test_cancel_installation(mocker, uninstall_plotly_if_installed): """Test exit if Plotly installation is canceled.""" assert not is_package_installed("plotly") @@ -96,7 +96,7 @@ def test_cancel_installation(mocker, uninstall_plotly_if_installed): assert not is_package_installed("plotly") -@pytest.mark.unit +@pytest.mark.integration def test_post_install_setup(plotly_installed): """Test post-install setup.""" plotly_manager = PlotlyManager() diff --git a/tests/unit/test_optimisation.py b/tests/unit/test_optimisation.py index 8e7a50b2..6dc6c5df 100644 --- a/tests/unit/test_optimisation.py +++ b/tests/unit/test_optimisation.py @@ -104,9 +104,9 @@ def test_prior_sampling(self, cost): def test_halting(self, cost): # Test max evalutions optim = pybop.Optimisation(cost=cost, optimiser=pybop.GradientDescent) - optim.set_max_evaluations(10) + optim.set_max_evaluations(1) x, __ = optim.run() - assert optim._iterations == 10 + assert optim._iterations == 1 # Test max unchanged iterations optim = pybop.Optimisation(cost=cost, optimiser=pybop.GradientDescent) From 1e9d5abf45de8457b4062b4dff431ac3b3554cb2 Mon Sep 17 00:00:00 2001 From: Brady Planden Date: Fri, 16 Feb 2024 20:41:39 +0000 Subject: [PATCH 02/15] updt coverage action for style --- .github/workflows/test_on_push.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/test_on_push.yaml b/.github/workflows/test_on_push.yaml index a55219bb..e3371357 100644 --- a/.github/workflows/test_on_push.yaml +++ b/.github/workflows/test_on_push.yaml @@ -62,6 +62,7 @@ jobs: # Runs only on Ubuntu with Python 3.11 check_coverage: + needs: style runs-on: ubuntu-latest strategy: fail-fast: false From e9fe14b5964fd792750f3da3146fa0bf2ae9fa37 Mon Sep 17 00:00:00 2001 From: Brady Planden Date: Sat, 17 Feb 2024 16:02:22 +0000 Subject: [PATCH 03/15] revert @staticmethod definition for list_of_examples --- tests/examples/test_examples.py | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/examples/test_examples.py b/tests/examples/test_examples.py index 414701de..ebc3570a 100644 --- a/tests/examples/test_examples.py +++ b/tests/examples/test_examples.py @@ -9,7 +9,6 @@ class TestExamples: A class to test the example scripts. """ - @staticmethod def list_of_examples(): list = [] path_to_example_scripts = os.path.join( From 3f651f4f0a0e24ffc830c382b57b74ce4c907858 Mon Sep 17 00:00:00 2001 From: Brady Planden Date: Wed, 21 Feb 2024 16:56:26 +0000 Subject: [PATCH 04/15] Create --plots pytest marker, add xdist to non-plots nox test sessions, automate the skip marker procedure --- conftest.py | 73 ++++++++++--------- noxfile.py | 15 ++-- .../test_plotly_manager.py | 8 +- 3 files changed, 52 insertions(+), 44 deletions(-) rename tests/{integration => plotting}/test_plotly_manager.py (97%) diff --git a/conftest.py b/conftest.py index a8a801ab..3641cbd1 100644 --- a/conftest.py +++ b/conftest.py @@ -19,6 +19,12 @@ def pytest_addoption(parser): parser.addoption( "--examples", action="store_true", default=False, help="run examples tests" ) + parser.addoption( + "--plots", action="store_true", default=False, help="run plotting tests" + ) + parser.addoption( + "--notebooks", action="store_true", default=False, help="run notebook tests" + ) def pytest_terminal_summary(terminalreporter, exitstatus, config): @@ -33,46 +39,45 @@ def pytest_configure(config): config.addinivalue_line("markers", "unit: mark test as a unit test") config.addinivalue_line("markers", "integration: mark test as an integration test") config.addinivalue_line("markers", "examples: mark test as an example") + config.addinivalue_line("markers", "plots: mark test as a plot test") + config.addinivalue_line("markers", "notebook: mark test as a notebook test") def pytest_collection_modifyitems(config, items): - unit = config.getoption("--unit") - integration = config.getoption("--integration") - examples = config.getoption("--examples") + options = { + "unit": "unit", + "examples": "examples", + "integration": "integration", + "plots": "plots", + "notebooks": "notebooks", + } + selected_markers = [ + marker for option, marker in options.items() if config.getoption(option) + ] - if not unit and not examples and not integration: + if ( + "notebooks" in selected_markers + ): # Notebooks are meant to be run as an individual session + return + + # If no options were passed, skip all tests + if not selected_markers: skip_all = pytest.mark.skip( - reason="need --unit or --examples or --integration option to run" + reason="Need at least one of --unit, --examples, --integration, or --plots option to run" ) for item in items: item.add_marker(skip_all) + return - elif unit and not examples and not integration: - skip_examples_integration = pytest.mark.skip( - reason="need --examples option to run examples tests, or --integration option to run integration tests" - ) - for item in items: - if "examples" in item.keywords: - item.add_marker(skip_examples_integration) - if "integration" in item.keywords: - item.add_marker(skip_examples_integration) - - elif examples and not unit and not integration: - skip_unit_integration = pytest.mark.skip( - reason="need --unit option to run unit tests or --integration option to run integration tests" - ) - for item in items: - if "unit" in item.keywords: - item.add_marker(skip_unit_integration) - if "integration" in item.keywords: - item.add_marker(skip_unit_integration) - - elif integration and not unit and not examples: - skip_unit_examples = pytest.mark.skip( - reason="need --unit option to run unit tests or --examples option to run examples tests" - ) - for item in items: - if "unit" in item.keywords: - item.add_marker(skip_unit_examples) - if "examples" in item.keywords: - item.add_marker(skip_unit_examples) + # Skip tests that don't match any of the selected markers + for item in items: + item_markers = { + mark.name for mark in item.iter_markers() + } # Gather markers of the test item + if not item_markers.intersection( + selected_markers + ): # Skip if there's no intersection with selected markers + skip_this = pytest.mark.skip( + reason=f"Test does not match the selected options: {', '.join(selected_markers)}" + ) + item.add_marker(skip_this) diff --git a/noxfile.py b/noxfile.py index 1f3661b0..66f6e7eb 100644 --- a/noxfile.py +++ b/noxfile.py @@ -16,7 +16,7 @@ def unit(session): session.install("-e", ".[all,dev]", silent=False) if PYBOP_SCHEDULED: session.run("pip", "install", f"pybamm=={PYBAMM_VERSION}", silent=False) - session.run("pytest", "--unit") + session.run("pytest", "--unit", "-n", "auto") @nox.session @@ -28,6 +28,7 @@ def coverage(session): "pytest", "--unit", "--integration", + "--plots", "--cov", "--cov-report=xml", ) @@ -35,16 +36,16 @@ def coverage(session): @nox.session def integration(session): - session.run_always("pip", "install", "-e", ".[all]") + session.run_always("pip", "install", "-e", ".[all,dev]") session.install("pytest", "pytest-mock") - session.run("pytest", "--integration") + session.run("pytest", "--integration", "-n", "auto") @nox.session def examples(session): - session.run_always("pip", "install", "-e", ".[all]") + session.run_always("pip", "install", "-e", ".[all,dev]") session.install("pytest", "pytest-mock") - session.run("pytest", "--examples") + session.run("pytest", "--examples", "-n", "auto") @nox.session @@ -53,7 +54,9 @@ def notebooks(session): session.install("-e", ".[all,dev]", silent=False) if PYBOP_SCHEDULED: session.run("pip", "install", f"pybamm=={PYBAMM_VERSION}", silent=False) - session.run("pytest", "--nbmake", "--examples", "examples/", external=True) + session.run( + "pytest", "--notebooks", "--nbmake", "examples/", "-n", "auto", external=True + ) @nox.session diff --git a/tests/integration/test_plotly_manager.py b/tests/plotting/test_plotly_manager.py similarity index 97% rename from tests/integration/test_plotly_manager.py rename to tests/plotting/test_plotly_manager.py index 01b8166a..159d8f2e 100644 --- a/tests/integration/test_plotly_manager.py +++ b/tests/plotting/test_plotly_manager.py @@ -52,7 +52,7 @@ def uninstall_plotly_if_installed(): plotly.io.renderers.default = None -@pytest.mark.integration +@pytest.mark.plots def test_initialization_with_plotly_installed(plotly_installed): """Test initialization when Plotly is installed.""" assert is_package_installed("plotly") @@ -67,7 +67,7 @@ def test_initialization_with_plotly_installed(plotly_installed): assert plotly_manager.make_subplots == make_subplots -@pytest.mark.integration +@pytest.mark.plots def test_prompt_for_plotly_installation(mocker, uninstall_plotly_if_installed): """Test prompt for Plotly installation when not installed.""" assert not is_package_installed("plotly") @@ -83,7 +83,7 @@ def test_prompt_for_plotly_installation(mocker, uninstall_plotly_if_installed): assert plotly_manager.make_subplots == make_subplots -@pytest.mark.integration +@pytest.mark.plots def test_cancel_installation(mocker, uninstall_plotly_if_installed): """Test exit if Plotly installation is canceled.""" assert not is_package_installed("plotly") @@ -96,7 +96,7 @@ def test_cancel_installation(mocker, uninstall_plotly_if_installed): assert not is_package_installed("plotly") -@pytest.mark.integration +@pytest.mark.plots def test_post_install_setup(plotly_installed): """Test post-install setup.""" plotly_manager = PlotlyManager() From 79f71998546d8eed24e9bb66a6739aaec572178e Mon Sep 17 00:00:00 2001 From: Brady Planden Date: Thu, 22 Feb 2024 13:16:10 +0000 Subject: [PATCH 05/15] Update test_on_push workflow to add examples and integration, updates coverage to use parallel workers where possible --- .github/workflows/test_on_push.yaml | 14 +++++++++++++- noxfile.py | 9 +++++++-- 2 files changed, 20 insertions(+), 3 deletions(-) diff --git a/.github/workflows/test_on_push.yaml b/.github/workflows/test_on_push.yaml index 88f690ef..399e66fd 100644 --- a/.github/workflows/test_on_push.yaml +++ b/.github/workflows/test_on_push.yaml @@ -57,9 +57,21 @@ jobs: - name: Install dependencies run: | python -m pip install --upgrade pip nox - - name: Unit and notebook tests with nox + + - name: Unit tests run: | nox -s unit + + - name: Integration tests + run: | + nox -s integration + + - name: Run examples + run: | + nox -s examples + + - name: Run notebooks + run: | nox -s notebooks # Runs only on Ubuntu with Python 3.11 diff --git a/noxfile.py b/noxfile.py index 66f6e7eb..5e84a310 100644 --- a/noxfile.py +++ b/noxfile.py @@ -24,14 +24,19 @@ def coverage(session): session.install("-e", ".[all,dev]", silent=False) if PYBOP_SCHEDULED: session.run("pip", "install", f"pybamm=={PYBAMM_VERSION}", silent=False) + session.run( + "pytest", "--unit", "--cov", "--cov-append", "--cov-report=xml", "-n", "auto" + ) session.run( "pytest", - "--unit", "--integration", - "--plots", "--cov", + "--cov-append", "--cov-report=xml", + "-n", + "auto", ) + session.run("pytest", "--plots", "--cov", "--cov-append", "--cov-report=xml") @nox.session From 37e4062697782490794e1b40adcad0a5cca76e05 Mon Sep 17 00:00:00 2001 From: Brady Planden Date: Thu, 22 Feb 2024 13:51:35 +0000 Subject: [PATCH 06/15] relax integration asserts --- tests/integration/test_parameterisations.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/integration/test_parameterisations.py b/tests/integration/test_parameterisations.py index 2e07f488..e9d7cd9a 100644 --- a/tests/integration/test_parameterisations.py +++ b/tests/integration/test_parameterisations.py @@ -115,7 +115,7 @@ def test_spm_optimisers(self, optimiser, spm_costs, x0): # Assertions np.testing.assert_allclose(final_cost, 0, atol=1e-2) - np.testing.assert_allclose(x, x0, atol=1e-2) + np.testing.assert_allclose(x, x0, atol=5e-2) @pytest.fixture def spm_two_signal_cost(self, parameters, model, x0): @@ -160,8 +160,8 @@ def test_multiple_signals(self, optimiser, spm_two_signal_cost, x0): x, final_cost = parameterisation.run() # Assertions - np.testing.assert_allclose(final_cost, 0, atol=2.5e-2) - np.testing.assert_allclose(x, x0, atol=1e-1) + np.testing.assert_allclose(final_cost, 0, atol=1e-2) + np.testing.assert_allclose(x, x0, atol=5e-2) @pytest.mark.parametrize("init_soc", [0.3, 0.7]) @pytest.mark.integration @@ -200,7 +200,7 @@ def test_model_misparameterisation(self, parameters, model, x0, init_soc): # Assertions with np.testing.assert_raises(AssertionError): np.testing.assert_allclose(final_cost, 0, atol=1e-2) - np.testing.assert_allclose(x, x0, atol=1e-1) + np.testing.assert_allclose(x, x0, atol=5e-2) def getdata(self, model, x0, init_soc): model.parameter_set.update( From 88522c1c61d66a4de11fc92d3ccd2749db542365 Mon Sep 17 00:00:00 2001 From: Brady Planden Date: Thu, 22 Feb 2024 14:26:27 +0000 Subject: [PATCH 07/15] Split test_on_push jobs, reduce integration and example tests to 3.11 --- .github/workflows/test_on_push.yaml | 59 +++++++++++++++++++++++++++-- 1 file changed, 55 insertions(+), 4 deletions(-) diff --git a/.github/workflows/test_on_push.yaml b/.github/workflows/test_on_push.yaml index 399e66fd..1170e0fa 100644 --- a/.github/workflows/test_on_push.yaml +++ b/.github/workflows/test_on_push.yaml @@ -1,4 +1,4 @@ -name: test_on_push +name: PyBOP on: workflow_dispatch: @@ -27,8 +27,36 @@ jobs: python -m pip install pre-commit pre-commit run ruff + integration_tests: + needs: style + runs-on: ${{ matrix.os }} + strategy: + fail-fast: false + matrix: + os: [ubuntu-latest, windows-latest, macos-latest] + python-version: ["3.11"] + # Include MacOS M-series Runners + include: + - os: macos-14 + python-version: "3.11" + + name: Integration tests (${{ matrix.os }} / Python ${{ matrix.python-version }}) + + steps: + - uses: actions/checkout@v4 + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v4 + with: + python-version: ${{ matrix.python-version }} + - name: Install dependencies + run: | + python -m pip install --upgrade pip nox + + - name: Integration tests + run: | + nox -s integration - build: + unit_tests: needs: style runs-on: ${{ matrix.os }} strategy: @@ -48,6 +76,8 @@ jobs: - os: macos-14 python-version: "3.12" + name: Unit tests (${{ matrix.os }} / Python ${{ matrix.python-version }}) + steps: - uses: actions/checkout@v4 - name: Set up Python ${{ matrix.python-version }} @@ -62,9 +92,30 @@ jobs: run: | nox -s unit - - name: Integration tests + example_tests: + needs: style + runs-on: ${{ matrix.os }} + strategy: + fail-fast: false + matrix: + os: [ubuntu-latest, windows-latest, macos-latest] + python-version: ["3.11"] + # Include MacOS M-series Runners + include: + - os: macos-14 + python-version: "3.11" + + name: Test examples (${{ matrix.os }} / Python ${{ matrix.python-version }}) + + steps: + - uses: actions/checkout@v4 + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v4 + with: + python-version: ${{ matrix.python-version }} + - name: Install dependencies run: | - nox -s integration + python -m pip install --upgrade pip nox - name: Run examples run: | From 8175bcbe111364f0d2bf9b4c4401275cbe732f0d Mon Sep 17 00:00:00 2001 From: Brady Planden <55357039+BradyPlanden@users.noreply.github.com> Date: Fri, 23 Feb 2024 08:58:17 +0000 Subject: [PATCH 08/15] Update .github/workflows/test_on_push.yaml Co-authored-by: Agriya Khetarpal <74401230+agriyakhetarpal@users.noreply.github.com> --- .github/workflows/test_on_push.yaml | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/.github/workflows/test_on_push.yaml b/.github/workflows/test_on_push.yaml index 1170e0fa..2efa9660 100644 --- a/.github/workflows/test_on_push.yaml +++ b/.github/workflows/test_on_push.yaml @@ -98,12 +98,8 @@ jobs: strategy: fail-fast: false matrix: - os: [ubuntu-latest, windows-latest, macos-latest] - python-version: ["3.11"] - # Include MacOS M-series Runners - include: - - os: macos-14 - python-version: "3.11" + os: [ubuntu-latest, windows-latest, macos-latest, macos-14] + python-version: ["3.12"] name: Test examples (${{ matrix.os }} / Python ${{ matrix.python-version }}) From 522bb983b69bac5c6d4b7db3702b4eb84ea556b0 Mon Sep 17 00:00:00 2001 From: Brady Planden <55357039+BradyPlanden@users.noreply.github.com> Date: Fri, 23 Feb 2024 09:03:37 +0000 Subject: [PATCH 09/15] Update noxfile.py Co-authored-by: Agriya Khetarpal <74401230+agriyakhetarpal@users.noreply.github.com> --- noxfile.py | 1 - 1 file changed, 1 deletion(-) diff --git a/noxfile.py b/noxfile.py index 5e84a310..c17d4f95 100644 --- a/noxfile.py +++ b/noxfile.py @@ -49,7 +49,6 @@ def integration(session): @nox.session def examples(session): session.run_always("pip", "install", "-e", ".[all,dev]") - session.install("pytest", "pytest-mock") session.run("pytest", "--examples", "-n", "auto") From 890f4901d400ea25da1dca1e3550ceb8721ceba1 Mon Sep 17 00:00:00 2001 From: Brady Planden <55357039+BradyPlanden@users.noreply.github.com> Date: Fri, 23 Feb 2024 09:11:13 +0000 Subject: [PATCH 10/15] Update noxfile.py Co-authored-by: Agriya Khetarpal <74401230+agriyakhetarpal@users.noreply.github.com> --- noxfile.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/noxfile.py b/noxfile.py index c17d4f95..0ad3272d 100644 --- a/noxfile.py +++ b/noxfile.py @@ -59,7 +59,7 @@ def notebooks(session): if PYBOP_SCHEDULED: session.run("pip", "install", f"pybamm=={PYBAMM_VERSION}", silent=False) session.run( - "pytest", "--notebooks", "--nbmake", "examples/", "-n", "auto", external=True + "pytest", "--notebooks", "--nbmake", "examples/", "-n", "auto", ) From dd686a78171fe2a53bf998ebf09d88f00a7b613f Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Fri, 23 Feb 2024 09:11:22 +0000 Subject: [PATCH 11/15] style: pre-commit fixes --- noxfile.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/noxfile.py b/noxfile.py index 0ad3272d..41087ba5 100644 --- a/noxfile.py +++ b/noxfile.py @@ -59,7 +59,12 @@ def notebooks(session): if PYBOP_SCHEDULED: session.run("pip", "install", f"pybamm=={PYBAMM_VERSION}", silent=False) session.run( - "pytest", "--notebooks", "--nbmake", "examples/", "-n", "auto", + "pytest", + "--notebooks", + "--nbmake", + "examples/", + "-n", + "auto", ) From 240eeb0929815b431366b978b997f7724bb49d0f Mon Sep 17 00:00:00 2001 From: Brady Planden Date: Fri, 23 Feb 2024 09:17:10 +0000 Subject: [PATCH 12/15] convert to session.install() --- noxfile.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/noxfile.py b/noxfile.py index 41087ba5..75b66617 100644 --- a/noxfile.py +++ b/noxfile.py @@ -41,14 +41,14 @@ def coverage(session): @nox.session def integration(session): - session.run_always("pip", "install", "-e", ".[all,dev]") + session.install("-e", ".[all,dev]", silent=False) session.install("pytest", "pytest-mock") session.run("pytest", "--integration", "-n", "auto") @nox.session def examples(session): - session.run_always("pip", "install", "-e", ".[all,dev]") + session.install("-e", ".[all,dev]", silent=False) session.run("pytest", "--examples", "-n", "auto") From 817a77895358dd7d9b5a5b561c73ab60a3e1b4f2 Mon Sep 17 00:00:00 2001 From: Brady Planden Date: Fri, 23 Feb 2024 09:52:01 +0000 Subject: [PATCH 13/15] updt pytest default opts, test_on_push python vers --- .github/workflows/test_on_push.yaml | 22 +++++++++------------- noxfile.py | 18 +++++++----------- pyproject.toml | 2 +- 3 files changed, 17 insertions(+), 25 deletions(-) diff --git a/.github/workflows/test_on_push.yaml b/.github/workflows/test_on_push.yaml index 2efa9660..af98e8ad 100644 --- a/.github/workflows/test_on_push.yaml +++ b/.github/workflows/test_on_push.yaml @@ -33,12 +33,8 @@ jobs: strategy: fail-fast: false matrix: - os: [ubuntu-latest, windows-latest, macos-latest] - python-version: ["3.11"] - # Include MacOS M-series Runners - include: - - os: macos-14 - python-version: "3.11" + os: [ubuntu-latest, windows-latest, macos-latest, macos-14] + python-version: ["3.12"] name: Integration tests (${{ matrix.os }} / Python ${{ matrix.python-version }}) @@ -64,9 +60,9 @@ jobs: matrix: os: [ubuntu-latest, windows-latest, macos-latest] python-version: ["3.8", "3.9", "3.10", "3.11", "3.12"] - exclude: # We run the coverage tests on Ubuntu with Python 3.11 + exclude: # We run the coverage tests on Ubuntu with Python 3.12 - os: ubuntu-latest - python-version: "3.11" + python-version: "3.12" # Include MacOS M-series Runners include: - os: macos-14 @@ -121,29 +117,29 @@ jobs: run: | nox -s notebooks - # Runs only on Ubuntu with Python 3.11 + # Runs only on Ubuntu with Python 3.12 check_coverage: needs: style runs-on: ubuntu-latest strategy: fail-fast: false - name: Coverage tests (ubuntu-latest / Python 3.11) + name: Coverage tests (ubuntu-latest / Python 3.12) steps: - name: Check out PyBOP repository uses: actions/checkout@v4 - - name: Set up Python 3.11 + - name: Set up Python 3.12 id: setup-python uses: actions/setup-python@v4 with: - python-version: 3.11 + python-version: 3.12 cache: 'pip' cache-dependency-path: setup.py - name: Install dependencies run: | python -m pip install --upgrade pip nox - - name: Run coverage tests for Ubuntu with Python 3.11 and generate report + - name: Run coverage tests for Ubuntu with Python 3.12 and generate report run: nox -s coverage - name: Upload coverage report diff --git a/noxfile.py b/noxfile.py index 75b66617..c732bc3f 100644 --- a/noxfile.py +++ b/noxfile.py @@ -16,7 +16,7 @@ def unit(session): session.install("-e", ".[all,dev]", silent=False) if PYBOP_SCHEDULED: session.run("pip", "install", f"pybamm=={PYBAMM_VERSION}", silent=False) - session.run("pytest", "--unit", "-n", "auto") + session.run("pytest", "--unit") @nox.session @@ -24,32 +24,30 @@ def coverage(session): session.install("-e", ".[all,dev]", silent=False) if PYBOP_SCHEDULED: session.run("pip", "install", f"pybamm=={PYBAMM_VERSION}", silent=False) - session.run( - "pytest", "--unit", "--cov", "--cov-append", "--cov-report=xml", "-n", "auto" - ) + session.run("pytest", "--unit", "--cov", "--cov-append", "--cov-report=xml") session.run( "pytest", "--integration", "--cov", "--cov-append", "--cov-report=xml", - "-n", - "auto", ) - session.run("pytest", "--plots", "--cov", "--cov-append", "--cov-report=xml") + session.run( + "pytest", "--plots", "--cov", "--cov-append", "--cov-report=xml", "-n", "1" + ) @nox.session def integration(session): session.install("-e", ".[all,dev]", silent=False) session.install("pytest", "pytest-mock") - session.run("pytest", "--integration", "-n", "auto") + session.run("pytest", "--integration") @nox.session def examples(session): session.install("-e", ".[all,dev]", silent=False) - session.run("pytest", "--examples", "-n", "auto") + session.run("pytest", "--examples") @nox.session @@ -63,8 +61,6 @@ def notebooks(session): "--notebooks", "--nbmake", "examples/", - "-n", - "auto", ) diff --git a/pyproject.toml b/pyproject.toml index 43928f48..539745ad 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -56,7 +56,7 @@ include = ["pybop", "pybop.*"] Homepage = "https://github.com/pybop-team/PyBOP" [tool.pytest.ini_options] -addopts = "--showlocals -v" +addopts = "--showlocals -v -n auto" [tool.ruff] extend-include = ["*.ipynb"] From 54e97f5d2de937ac7441c845957d2b672b6673f6 Mon Sep 17 00:00:00 2001 From: NicolaCourtier <45851982+NicolaCourtier@users.noreply.github.com> Date: Fri, 23 Feb 2024 11:38:01 +0000 Subject: [PATCH 14/15] 139b Add unit tests for plotting and Thevenin model (#212) * Create test_plots.py * Parametrize test_models * Add check on n_states * Add test for json parameter set * Add test for json export * Add test for invalid max values * Add optimisation tests * Add observer tests * Add tests on observer evaluate * Add tests on invalid parameter inputs * Add invalid sample size tests --- tests/unit/test_models.py | 80 +++++++++++++++------------- tests/unit/test_observers.py | 33 ++++++++++++ tests/unit/test_optimisation.py | 14 +++++ tests/unit/test_parameter_sets.py | 66 +++++++++++++++++++++++ tests/unit/test_parameters.py | 14 +++++ tests/unit/test_plots.py | 87 +++++++++++++++++++++++++++++++ tests/unit/test_priors.py | 9 ++++ 7 files changed, 267 insertions(+), 36 deletions(-) create mode 100644 tests/unit/test_plots.py diff --git a/tests/unit/test_models.py b/tests/unit/test_models.py index dfce88e4..ce9be93e 100644 --- a/tests/unit/test_models.py +++ b/tests/unit/test_models.py @@ -11,11 +11,19 @@ class TestModels: A class to test the models. """ - @pytest.mark.unit - def test_simulate_without_build_model(self): - # Define model - model = pybop.lithium_ion.SPM() + @pytest.fixture( + params=[ + pybop.lithium_ion.SPM(), + pybop.lithium_ion.SPMe(), + pybop.empirical.Thevenin(), + ] + ) + def model(self, request): + model = request.param + return model.copy() + @pytest.mark.unit + def test_simulate_without_build_model(self, model): with pytest.raises( ValueError, match="Model must be built before calling simulate" ): @@ -27,49 +35,47 @@ def test_simulate_without_build_model(self): model.simulateS1(None, None) @pytest.mark.unit - def test_predict_without_pybamm(self): - # Define model - model = pybop.lithium_ion.SPM() + def test_predict_without_pybamm(self, model): model._unprocessed_model = None with pytest.raises(ValueError): model.predict(None, None) @pytest.mark.unit - def test_predict_with_inputs(self): - # Define SPM - model = pybop.lithium_ion.SPM() + def test_predict_with_inputs(self, model): + # Define inputs t_eval = np.linspace(0, 10, 100) - inputs = { - "Negative electrode active material volume fraction": 0.52, - "Positive electrode active material volume fraction": 0.63, - } - - res = model.predict(t_eval=t_eval, inputs=inputs) - assert len(res["Terminal voltage [V]"].data) == 100 + if isinstance(model, (pybop.lithium_ion.SPM, pybop.lithium_ion.SPMe)): + inputs = { + "Negative electrode active material volume fraction": 0.52, + "Positive electrode active material volume fraction": 0.63, + } + elif isinstance(model, (pybop.empirical.Thevenin)): + inputs = { + "R0 [Ohm]": 0.0002, + "R1 [Ohm]": 0.0001, + } + else: + raise ValueError("Inputs not defined for this type of model.") - # Define SPMe - model = pybop.lithium_ion.SPMe() res = model.predict(t_eval=t_eval, inputs=inputs) - assert len(res["Terminal voltage [V]"].data) == 100 + assert len(res["Voltage [V]"].data) == 100 @pytest.mark.unit - def test_predict_without_allow_infeasible_solutions(self): - # Define SPM - model = pybop.lithium_ion.SPM() - model.allow_infeasible_solutions = False - t_eval = np.linspace(0, 10, 100) - inputs = { - "Negative electrode active material volume fraction": 0.9, - "Positive electrode active material volume fraction": 0.9, - } - - res = model.predict(t_eval=t_eval, inputs=inputs) - assert np.isinf(res).any() + def test_predict_without_allow_infeasible_solutions(self, model): + if isinstance(model, (pybop.lithium_ion.SPM, pybop.lithium_ion.SPMe)): + model.allow_infeasible_solutions = False + t_eval = np.linspace(0, 10, 100) + inputs = { + "Negative electrode active material volume fraction": 0.9, + "Positive electrode active material volume fraction": 0.9, + } + + res = model.predict(t_eval=t_eval, inputs=inputs) + assert np.isinf(res).any() @pytest.mark.unit - def test_build(self): - model = pybop.lithium_ion.SPM() + def test_build(self, model): model.build() assert model.built_model is not None @@ -78,8 +84,7 @@ def test_build(self): assert model.built_model is not None @pytest.mark.unit - def test_rebuild(self): - model = pybop.lithium_ion.SPM() + def test_rebuild(self, model): model.build() initial_built_model = model._built_model assert model._built_model is not None @@ -201,6 +206,9 @@ def test_simulate(self): solved = model.simulate(inputs, t_eval) np.testing.assert_array_almost_equal(solved, expected, decimal=5) + with pytest.raises(ValueError): + ExponentialDecay(n_states=-1) + @pytest.mark.unit def test_basemodel(self): base = pybop.BaseModel() diff --git a/tests/unit/test_observers.py b/tests/unit/test_observers.py index 020f8f91..ab77428c 100644 --- a/tests/unit/test_observers.py +++ b/tests/unit/test_observers.py @@ -56,3 +56,36 @@ def test_observer(self, model, parameters, x0): np.array([[2 * y]]), decimal=4, ) + + # Test with invalid inputs + with pytest.raises(ValueError): + observer.observe(-1) + with pytest.raises(ValueError): + observer.log_likelihood( + t_eval, np.array([1]), inputs=observer._state.inputs + ) + + # Test covariance + covariance = observer.get_current_covariance() + assert np.shape(covariance) == (n, n) + + # Test evaluate with different inputs + observer._time_data = t_eval + observer.evaluate(x0) + observer.evaluate(parameters) + + # Test evaluate with dataset + observer._dataset = pybop.Dataset( + { + "Time [s]": t_eval, + "Output": expected, + } + ) + observer._target = expected + observer.evaluate(x0) + + @pytest.mark.unit + def test_unbuilt_model(self, parameters): + model = ExponentialDecay() + with pytest.raises(ValueError): + pybop.Observer(parameters, model) diff --git a/tests/unit/test_optimisation.py b/tests/unit/test_optimisation.py index 6dc6c5df..6569d1ad 100644 --- a/tests/unit/test_optimisation.py +++ b/tests/unit/test_optimisation.py @@ -113,3 +113,17 @@ def test_halting(self, cost): optim.set_max_unchanged_iterations(1) x, __ = optim.run() assert optim._iterations == 2 + + # Test invalid maximum values + with pytest.raises(ValueError): + optim.set_max_evaluations(-1) + with pytest.raises(ValueError): + optim.set_max_unchanged_iterations(-1) + with pytest.raises(ValueError): + optim.set_max_unchanged_iterations(1, threshold=-1) + + @pytest.mark.unit + def test_unphysical_result(self, cost): + # Trigger parameters not physically viable warning + optim = pybop.Optimisation(cost=cost) + optim.check_optimal_parameters(np.array([2])) diff --git a/tests/unit/test_parameter_sets.py b/tests/unit/test_parameter_sets.py index 39d29d41..a7ac703c 100644 --- a/tests/unit/test_parameter_sets.py +++ b/tests/unit/test_parameter_sets.py @@ -18,3 +18,69 @@ def test_parameter_set(self): np.testing.assert_allclose( parameter_test["Negative electrode active material volume fraction"], 0.75 ) + + @pytest.mark.unit + def test_ecm_parameter_sets(self): + # Test importing a json file + json_params = pybop.ParameterSet( + json_path="examples/scripts/parameters/initial_ecm_parameters.json" + ) + json_params.import_parameters() + + params = pybop.ParameterSet( + params_dict={ + "chemistry": "ecm", + "Initial SoC": 0.5, + "Initial temperature [K]": 25 + 273.15, + "Cell capacity [A.h]": 5, + "Nominal cell capacity [A.h]": 5, + "Ambient temperature [K]": 25 + 273.15, + "Current function [A]": 5, + "Upper voltage cut-off [V]": 4.2, + "Lower voltage cut-off [V]": 3.0, + "Cell thermal mass [J/K]": 1000, + "Cell-jig heat transfer coefficient [W/K]": 10, + "Jig thermal mass [J/K]": 500, + "Jig-air heat transfer coefficient [W/K]": 10, + "Open-circuit voltage [V]": pybop.empirical.Thevenin().default_parameter_values[ + "Open-circuit voltage [V]" + ], + "R0 [Ohm]": 0.001, + "Element-1 initial overpotential [V]": 0, + "Element-2 initial overpotential [V]": 0, + "R1 [Ohm]": 0.0002, + "R2 [Ohm]": 0.0003, + "C1 [F]": 10000, + "C2 [F]": 5000, + "Entropic change [V/K]": 0.0004, + } + ) + params.import_parameters() + + assert json_params.params == params.params + + # Test exporting a json file + parameters = [ + pybop.Parameter( + "R0 [Ohm]", + prior=pybop.Gaussian(0.0002, 0.0001), + bounds=[1e-4, 1e-2], + initial_value=0.001, + ), + pybop.Parameter( + "R1 [Ohm]", + prior=pybop.Gaussian(0.0001, 0.0001), + bounds=[1e-5, 1e-2], + initial_value=0.0002, + ), + ] + params.export_parameters( + "examples/scripts/parameters/fit_ecm_parameters.json", fit_params=parameters + ) + + # Test error when there no parameters to export + empty_params = pybop.ParameterSet() + with pytest.raises(ValueError): + empty_params.export_parameters( + "examples/scripts/parameters/fit_ecm_parameters.json" + ) diff --git a/tests/unit/test_parameters.py b/tests/unit/test_parameters.py index 52f0c69f..28d5e5cd 100644 --- a/tests/unit/test_parameters.py +++ b/tests/unit/test_parameters.py @@ -51,3 +51,17 @@ def test_parameter_margin(self, parameter): assert parameter.margin == 1e-4 parameter.set_margin(margin=1e-3) assert parameter.margin == 1e-3 + + @pytest.mark.unit + def test_invalid_inputs(self, parameter): + # Test error with invalid value + with pytest.raises(ValueError): + parameter.set_margin(margin=-1) + + # Test error with no parameter value + with pytest.raises(ValueError): + parameter.update() + + # Test error with opposite bounds + with pytest.raises(ValueError): + pybop.Parameter("Name", bounds=[0.7, 0.3]) diff --git a/tests/unit/test_plots.py b/tests/unit/test_plots.py new file mode 100644 index 00000000..11e935f0 --- /dev/null +++ b/tests/unit/test_plots.py @@ -0,0 +1,87 @@ +import pybop +import numpy as np +import pytest + + +class TestPlots: + """ + A class to test the plotting classes. + """ + + @pytest.fixture + def model(self): + # Define an example model + return pybop.lithium_ion.SPM() + + @pytest.mark.unit + def test_model_plots(self): + # Test plotting of Model objects + pass + + @pytest.fixture + def problem(self, model): + # Define an example problem + parameters = [ + pybop.Parameter( + "Negative particle radius [m]", + prior=pybop.Gaussian(6e-06, 0.1e-6), + bounds=[1e-6, 9e-6], + ), + pybop.Parameter( + "Positive particle radius [m]", + prior=pybop.Gaussian(4.5e-06, 0.1e-6), + bounds=[1e-6, 9e-6], + ), + ] + + # Generate data + t_eval = np.arange(0, 50, 2) + values = model.predict(t_eval=t_eval) + + # Form dataset + dataset = pybop.Dataset( + { + "Time [s]": t_eval, + "Current function [A]": values["Current [A]"].data, + "Voltage [V]": values["Voltage [V]"].data, + } + ) + + # Generate problem + return pybop.FittingProblem(model, parameters, dataset) + + @pytest.mark.unit + def test_problem_plots(self): + # Test plotting of Problem objects + pass + + @pytest.fixture + def cost(self, problem): + # Define an example cost + return pybop.SumSquaredError(problem) + + @pytest.mark.unit + def test_cost_plots(self, cost): + # Test plotting of Cost objects + pybop.quick_plot(cost.x0, cost, title="Optimised Comparison") + + # Plot the cost landscape + pybop.plot_cost2d(cost, steps=5) + + @pytest.fixture + def optim(self, cost): + # Define and run an example optimisation + optim = pybop.Optimisation(cost, optimiser=pybop.CMAES) + optim.run() + return optim + + @pytest.mark.unit + def test_optim_plots(self, optim): + # Plot convergence + pybop.plot_convergence(optim) + + # Plot the parameter traces + pybop.plot_parameters(optim) + + # Plot the cost landscape with optimisation path + pybop.plot_cost2d(optim.cost, optim=optim, steps=5) diff --git a/tests/unit/test_priors.py b/tests/unit/test_priors.py index 342c35c4..d9b73388 100644 --- a/tests/unit/test_priors.py +++ b/tests/unit/test_priors.py @@ -57,3 +57,12 @@ def test_repr(self, Gaussian, Uniform, Exponential): assert repr(Gaussian) == "Gaussian, mean: 0.5, sigma: 1" assert repr(Uniform) == "Uniform, lower: 0, upper: 1" assert repr(Exponential) == "Exponential, scale: 1" + + @pytest.mark.unit + def test_invalid_size(self, Gaussian, Uniform, Exponential): + with pytest.raises(ValueError): + Gaussian.rvs(-1) + with pytest.raises(ValueError): + Uniform.rvs(-1) + with pytest.raises(ValueError): + Exponential.rvs(-1) From 29c6ff220a60a804de7ab395edb2437a9bb4e277 Mon Sep 17 00:00:00 2001 From: Brady Planden Date: Fri, 23 Feb 2024 12:36:34 +0000 Subject: [PATCH 15/15] Updt. CHANGELOG --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 46210212..283dd7e6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,7 @@ ## Features +- [#204](https://github.com/pybop-team/PyBOP/pull/204) - Splits integration, unit, examples, plots tests, update workflows. Adds pytest `--examples`, `--integration`, `--plots` args. Adds tests for coverage after removal of examples. Adds examples and integrations nox sessions. Adds `pybop.RMSE._evaluateS1()` method - [#206](https://github.com/pybop-team/PyBOP/pull/206) - Adds Python 3.12 support with corresponding github actions changes. - [#18](https://github.com/pybop-team/PyBOP/pull/18) - Adds geometric parameter fitting capability, via `model.rebuild()` with `model.rebuild_parameters`. - [#203](https://github.com/pybop-team/PyBOP/pull/203) - Adds support for modern Python packaging via a `pyproject.toml` file and configures the `pytest` test runner and `ruff` linter to use their configurations stored as declarative metadata.