From d6ddfbe4b51c859d228bfcdb41ecb177025e2389 Mon Sep 17 00:00:00 2001 From: NicolaCourtier <45851982+NicolaCourtier@users.noreply.github.com> Date: Wed, 22 May 2024 21:32:12 +0100 Subject: [PATCH] Restructure optimisers to enable passing of kwargs (#255) * Enable passing of kwargs to SciPy * Update checks on bounds * Add update_options and error for Pints * Rename opt to optim * Remove stray comments * Add BaseSciPyOptimiser and BasePintsOptimiser * Align optimiser option setting * Update notebooks with kwargs * Update scripts with kwargs * Update notebooks * Align optimisers with Optimisation as base class * Update stopping criteria in spm_NelderMead.py Co-authored-by: Brady Planden <55357039+BradyPlanden@users.noreply.github.com> * Update stopping criteria in spm_adam.py Co-authored-by: Brady Planden <55357039+BradyPlanden@users.noreply.github.com> * Update sigma0 in spm_descent.py Co-authored-by: Brady Planden <55357039+BradyPlanden@users.noreply.github.com> * Update GradientDescent * Change update to set and check pints_method * Update test_optimisation_options * Update notebooks * Update set learning rate * Pop threshold * Fix bug in model.simulate * Update notebooks * Update test_models.py * Store SciPy result * Update x0 input and add tests * Update bounds to avoid x0 outside * Re-initialise pints_method on certain options * Update x0_new test * Update test_optimisation.py * Create initialise_method for PINTS optimisers * Align optimisation result * Update checks on bounds * Apply suggestions Co-authored-by: Brady Planden <55357039+BradyPlanden@users.noreply.github.com> * Add standalone optimiser * Simplify optimiser set-up and align _minimising * Update option setting in notebooks * Take abs of cost0 * Implement suggestions from Brady * Update tests and base option setting * Update test_invalid_cost * Increase coverage * Sort out notebook changes * Reset scale parameter * Move settings into arguments * Update comments * Update optimiser call * Move check on jac * Add assertions * Add maxiter to test * Add assertion * Update to lambda functions Co-authored-by: Brady Planden <55357039+BradyPlanden@users.noreply.github.com> * Update comment Co-authored-by: Brady Planden <55357039+BradyPlanden@users.noreply.github.com> * Update to list comprehension Co-authored-by: Brady Planden <55357039+BradyPlanden@users.noreply.github.com> * Formatting * Revert "Update to lambda functions" This reverts commit aa73bff23c5805706e5aa6b0646730159d1c8ceb. * Move minimising out of costs * Update description * Updates to #236 to avoid breaking change to `pybop.Optimisation` (#309) * Splits Optimisation -> BaseOptimiser/Optimisation, enables two optimisation APIs, updts where required, moves _optimisation to optimisers/ * increase coverage * Pass optimiser_kwargs though run() * updt examples * Converts DefaultOptimiser -> Optimisation * split Optimisation and BaseOptimsier classes, loosen standalone cost unit test * add incorrect attr test * fix: updt changelog entry, optimsation_interface notebook, review suggestions * fix: updt notebook state * Updt assertions, optimisation object name --------- Co-authored-by: NicolaCourtier <45851982+NicolaCourtier@users.noreply.github.com> * Rename method to pints_optimiser * Rename base_optimiser to pints_base_optimiser * Rename _optimisation to base_optimiser --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Brady Planden <55357039+BradyPlanden@users.noreply.github.com> --- CHANGELOG.md | 1 + .../1-single-pulse-circuit-model.ipynb | 3 +- .../equivalent_circuit_identification.ipynb | 3 +- .../multi_model_identification.ipynb | 6 +- .../multi_optimiser_identification.ipynb | 29 +- .../notebooks/optimiser_calibration.ipynb | 10 +- examples/notebooks/optimiser_interface.ipynb | 285 ++++++++++++ .../notebooks/pouch_cell_identification.ipynb | 3 +- examples/notebooks/spm_electrode_design.ipynb | 5 +- examples/scripts/BPX_spm.py | 3 +- examples/scripts/ecm_CMAES.py | 3 +- examples/scripts/exp_UKF.py | 2 +- examples/scripts/gitt.py | 2 +- examples/scripts/spm_CMAES.py | 3 +- examples/scripts/spm_IRPropMin.py | 3 +- examples/scripts/spm_MAP.py | 10 +- examples/scripts/spm_MLE.py | 10 +- examples/scripts/spm_NelderMead.py | 7 +- examples/scripts/spm_SNES.py | 3 +- examples/scripts/spm_UKF.py | 2 +- examples/scripts/spm_XNES.py | 3 +- examples/scripts/spm_adam.py | 7 +- examples/scripts/spm_descent.py | 8 +- examples/scripts/spm_pso.py | 3 +- examples/scripts/spm_scipymin.py | 2 +- examples/scripts/spme_max_energy.py | 17 +- examples/standalone/cost.py | 2 +- examples/standalone/optimiser.py | 90 ++++ pybop/__init__.py | 13 +- pybop/costs/base_cost.py | 12 +- pybop/costs/design_costs.py | 28 +- pybop/costs/fitting_costs.py | 3 +- pybop/optimisers/base_optimiser.py | 230 ++++++++-- .../base_pints_optimiser.py} | 418 ++++++++---------- pybop/optimisers/optimisation.py | 65 +++ pybop/optimisers/pints_optimisers.py | 223 ++++------ pybop/optimisers/scipy_optimisers.py | 372 ++++++++++------ pybop/plotting/plot2d.py | 33 +- pybop/plotting/plot_convergence.py | 12 +- .../test_model_experiment_changes.py | 2 +- .../integration/test_optimisation_options.py | 25 +- .../integration/test_spm_parameterisations.py | 46 +- .../test_thevenin_parameterisation.py | 25 +- tests/unit/test_cost.py | 10 +- tests/unit/test_optimisation.py | 264 +++++++++-- tests/unit/test_plots.py | 4 +- tests/unit/test_standalone.py | 29 +- 47 files changed, 1572 insertions(+), 767 deletions(-) create mode 100644 examples/notebooks/optimiser_interface.ipynb create mode 100644 examples/standalone/optimiser.py rename pybop/{_optimisation.py => optimisers/base_pints_optimiser.py} (57%) create mode 100644 pybop/optimisers/optimisation.py diff --git a/CHANGELOG.md b/CHANGELOG.md index d0d5f7cb3..25b20e7e2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,6 +3,7 @@ ## Features - [#6](https://github.com/pybop-team/PyBOP/issues/6) - Adds Monte Carlo functionality, with methods based on Pints' algorithms. A base class is added `BaseSampler`, in addition to `PintsBaseSampler`. +- [#236](https://github.com/pybop-team/PyBOP/issues/236) - Restructures the optimiser classes, adds a new optimisation API through direct construction and keyword arguments, and fixes the setting of `max_iterations`, and `_minimising`. Introduces `pybop.BaseOptimiser`, `pybop.BasePintsOptimiser`, and `pybop.BaseSciPyOptimiser` classes. - [#321](https://github.com/pybop-team/PyBOP/pull/321) - Updates Prior classes with BaseClass, adds a `problem.sample_initial_conditions` method to improve stability of SciPy.Minimize optimiser. - [#249](https://github.com/pybop-team/PyBOP/pull/249) - Add WeppnerHuggins model and GITT example. - [#304](https://github.com/pybop-team/PyBOP/pull/304) - Decreases the testing suite completion time. diff --git a/examples/notebooks/LG_M50_ECM/1-single-pulse-circuit-model.ipynb b/examples/notebooks/LG_M50_ECM/1-single-pulse-circuit-model.ipynb index a0dd872e8..3181d952a 100644 --- a/examples/notebooks/LG_M50_ECM/1-single-pulse-circuit-model.ipynb +++ b/examples/notebooks/LG_M50_ECM/1-single-pulse-circuit-model.ipynb @@ -1639,8 +1639,7 @@ } ], "source": [ - "optim = pybop.Optimisation(cost, optimiser=pybop.PSO)\n", - "optim.set_max_unchanged_iterations(iterations=55, threshold=1e-6)\n", + "optim = pybop.PSO(cost, max_unchanged_iterations=55, threshold=1e-6)\n", "x, final_cost = optim.run()\n", "print(\"Initial parameters:\", cost.x0)\n", "print(\"Estimated parameters:\", x)" diff --git a/examples/notebooks/equivalent_circuit_identification.ipynb b/examples/notebooks/equivalent_circuit_identification.ipynb index 0c4c2572e..95511e843 100644 --- a/examples/notebooks/equivalent_circuit_identification.ipynb +++ b/examples/notebooks/equivalent_circuit_identification.ipynb @@ -417,8 +417,7 @@ } ], "source": [ - "optim = pybop.Optimisation(cost, optimiser=pybop.CMAES)\n", - "optim.set_max_iterations(300)\n", + "optim = pybop.CMAES(cost, max_iterations=300)\n", "x, final_cost = optim.run()\n", "print(\"Initial parameters:\", cost.x0)\n", "print(\"Estimated parameters:\", x)" diff --git a/examples/notebooks/multi_model_identification.ipynb b/examples/notebooks/multi_model_identification.ipynb index fa23de6cd..f2ab1822f 100644 --- a/examples/notebooks/multi_model_identification.ipynb +++ b/examples/notebooks/multi_model_identification.ipynb @@ -3813,9 +3813,9 @@ " print(f\"Running {model.name}\")\n", " problem = pybop.FittingProblem(model, parameters, dataset, init_soc=init_soc)\n", " cost = pybop.SumSquaredError(problem)\n", - " optim = pybop.Optimisation(cost, optimiser=pybop.XNES, verbose=True)\n", - " optim.set_max_iterations(60)\n", - " optim.set_max_unchanged_iterations(15)\n", + " optim = pybop.XNES(\n", + " cost, verbose=True, max_iterations=60, max_unchanged_iterations=15\n", + " )\n", " x, final_cost = optim.run()\n", " optims.append(optim)\n", " xs.append(x)" diff --git a/examples/notebooks/multi_optimiser_identification.ipynb b/examples/notebooks/multi_optimiser_identification.ipynb index 8112f2e9a..5c38e963e 100644 --- a/examples/notebooks/multi_optimiser_identification.ipynb +++ b/examples/notebooks/multi_optimiser_identification.ipynb @@ -358,9 +358,7 @@ "cost = pybop.SumSquaredError(problem)\n", "for optimiser in gradient_optimisers:\n", " print(f\"Running {optimiser.__name__}\")\n", - " optim = pybop.Optimisation(cost, optimiser=optimiser)\n", - " optim.set_max_unchanged_iterations(20)\n", - " optim.set_max_iterations(60)\n", + " optim = optimiser(cost, max_unchanged_iterations=20, max_iterations=60)\n", " x, _ = optim.run()\n", " optims.append(optim)\n", " xs.append(x)" @@ -387,9 +385,7 @@ "source": [ "for optimiser in non_gradient_optimisers:\n", " print(f\"Running {optimiser.__name__}\")\n", - " optim = pybop.Optimisation(cost, optimiser=optimiser)\n", - " optim.set_max_unchanged_iterations(20)\n", - " optim.set_max_iterations(60)\n", + " optim = optimiser(cost, max_unchanged_iterations=20, max_iterations=60)\n", " x, _ = optim.run()\n", " optims.append(optim)\n", " xs.append(x)" @@ -413,9 +409,7 @@ "source": [ "for optimiser in scipy_optimisers:\n", " print(f\"Running {optimiser.__name__}\")\n", - " optim = pybop.Optimisation(cost, optimiser=optimiser)\n", - " optim.set_max_unchanged_iterations(20)\n", - " optim.set_max_iterations(60)\n", + " optim = optimiser(cost, max_iterations=60)\n", " x, _ = optim.run()\n", " optims.append(optim)\n", " xs.append(x)" @@ -462,14 +456,7 @@ ], "source": [ "for optim in optims:\n", - " if isinstance(\n", - " optim.optimiser, (pybop.SciPyMinimize, pybop.SciPyDifferentialEvolution)\n", - " ):\n", - " print(f\"| Optimiser: {optim.optimiser.name()} | Results: {optim.result.x} |\")\n", - " else:\n", - " print(\n", - " f\"| Optimiser: {optim.optimiser.name()} | Results: {optim.optimiser.x_best()} |\"\n", - " )" + " print(f\"| Optimiser: {optim.name()} | Results: {optim.result.x} |\")" ] }, { @@ -612,9 +599,7 @@ ], "source": [ "for optim, x in zip(optims, xs):\n", - " pybop.quick_plot(\n", - " optim.cost.problem, parameter_values=x, title=optim.optimiser.name()\n", - " )" + " pybop.quick_plot(optim.cost.problem, parameter_values=x, title=optim.name())" ] }, { @@ -822,7 +807,7 @@ ], "source": [ "for optim in optims:\n", - " pybop.plot_convergence(optim, title=optim.optimiser.name())\n", + " pybop.plot_convergence(optim, title=optim.name())\n", " pybop.plot_parameters(optim)" ] }, @@ -942,7 +927,7 @@ "# Plot the cost landscape with optimisation path and updated bounds\n", "bounds = np.array([[0.5, 0.8], [0.55, 0.8]])\n", "for optim in optims:\n", - " pybop.plot2d(optim, bounds=bounds, steps=10, title=optim.optimiser.name())" + " pybop.plot2d(optim, bounds=bounds, steps=10, title=optim.name())" ] }, { diff --git a/examples/notebooks/optimiser_calibration.ipynb b/examples/notebooks/optimiser_calibration.ipynb index beed72875..f94ecec65 100644 --- a/examples/notebooks/optimiser_calibration.ipynb +++ b/examples/notebooks/optimiser_calibration.ipynb @@ -281,8 +281,7 @@ "source": [ "problem = pybop.FittingProblem(model, parameters, dataset)\n", "cost = pybop.SumSquaredError(problem)\n", - "optim = pybop.Optimisation(cost, optimiser=pybop.GradientDescent, sigma0=0.2)\n", - "optim.set_max_iterations(100)" + "optim = pybop.GradientDescent(cost, sigma0=0.2, max_iterations=100)" ] }, { @@ -456,8 +455,7 @@ " print(sigma)\n", " problem = pybop.FittingProblem(model, parameters, dataset)\n", " cost = pybop.SumSquaredError(problem)\n", - " optim = pybop.Optimisation(cost, optimiser=pybop.GradientDescent, sigma0=sigma)\n", - " optim.set_max_iterations(100)\n", + " optim = pybop.GradientDescent(cost, sigma0=sigma, max_iterations=100)\n", " x, final_cost = optim.run()\n", " optims.append(optim)\n", " xs.append(x)" @@ -490,7 +488,7 @@ "source": [ "for optim, sigma in zip(optims, sigmas):\n", " print(\n", - " f\"| Sigma: {sigma} | Num Iterations: {optim._iterations} | Best Cost: {optim.optimiser.f_best()} | Results: {optim.optimiser.x_best()} |\"\n", + " f\"| Sigma: {sigma} | Num Iterations: {optim._iterations} | Best Cost: {optim.pints_optimiser.f_best()} | Results: {optim.pints_optimiser.x_best()} |\"\n", " )" ] }, @@ -723,7 +721,7 @@ } ], "source": [ - "optim = pybop.Optimisation(cost, optimiser=pybop.GradientDescent, sigma0=0.0115)\n", + "optim = pybop.GradientDescent(cost, sigma0=0.0115)\n", "x, final_cost = optim.run()\n", "pybop.quick_plot(problem, parameter_values=x, title=\"Optimised Comparison\");" ] diff --git a/examples/notebooks/optimiser_interface.ipynb b/examples/notebooks/optimiser_interface.ipynb new file mode 100644 index 000000000..1200e3996 --- /dev/null +++ b/examples/notebooks/optimiser_interface.ipynb @@ -0,0 +1,285 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "00940c64-4748-4b08-9a35-ea98ce311e71", + "metadata": {}, + "source": [ + "# Interacting with PyBOP optimisers\n", + "\n", + "This notebook introduces two interfaces to interact with PyBOP's optimiser classes.\n", + "\n", + "### Set the Environment" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "dd0e1a20-1ba3-4ff5-8f6a-f9c6f25c2a4a", + "metadata": { + "execution": { + "iopub.execute_input": "2024-04-14T18:57:35.622147Z", + "iopub.status.busy": "2024-04-14T18:57:35.621660Z", + "iopub.status.idle": "2024-04-14T18:57:40.849137Z", + "shell.execute_reply": "2024-04-14T18:57:40.848620Z" + } + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Requirement already satisfied: pip in /Users/engs2510/.pyenv/versions/3.12.2/envs/pybop-3.12/lib/python3.12/site-packages (24.0)\n", + "Requirement already satisfied: ipywidgets in /Users/engs2510/.pyenv/versions/3.12.2/envs/pybop-3.12/lib/python3.12/site-packages (8.1.2)\n", + "Requirement already satisfied: comm>=0.1.3 in /Users/engs2510/.pyenv/versions/3.12.2/envs/pybop-3.12/lib/python3.12/site-packages (from ipywidgets) (0.2.2)\n", + "Requirement already satisfied: ipython>=6.1.0 in /Users/engs2510/.pyenv/versions/3.12.2/envs/pybop-3.12/lib/python3.12/site-packages (from ipywidgets) (8.23.0)\n", + "Requirement already satisfied: traitlets>=4.3.1 in /Users/engs2510/.pyenv/versions/3.12.2/envs/pybop-3.12/lib/python3.12/site-packages (from ipywidgets) (5.14.2)\n", + "Requirement already satisfied: widgetsnbextension~=4.0.10 in /Users/engs2510/.pyenv/versions/3.12.2/envs/pybop-3.12/lib/python3.12/site-packages (from ipywidgets) (4.0.10)\n", + "Requirement already satisfied: jupyterlab-widgets~=3.0.10 in /Users/engs2510/.pyenv/versions/3.12.2/envs/pybop-3.12/lib/python3.12/site-packages (from ipywidgets) (3.0.10)\n", + "Requirement already satisfied: decorator in /Users/engs2510/.pyenv/versions/3.12.2/envs/pybop-3.12/lib/python3.12/site-packages (from ipython>=6.1.0->ipywidgets) (5.1.1)\n", + "Requirement already satisfied: jedi>=0.16 in /Users/engs2510/.pyenv/versions/3.12.2/envs/pybop-3.12/lib/python3.12/site-packages (from ipython>=6.1.0->ipywidgets) (0.19.1)\n", + "Requirement already satisfied: matplotlib-inline in /Users/engs2510/.pyenv/versions/3.12.2/envs/pybop-3.12/lib/python3.12/site-packages (from ipython>=6.1.0->ipywidgets) (0.1.6)\n", + "Requirement already satisfied: prompt-toolkit<3.1.0,>=3.0.41 in /Users/engs2510/.pyenv/versions/3.12.2/envs/pybop-3.12/lib/python3.12/site-packages (from ipython>=6.1.0->ipywidgets) (3.0.43)\n", + "Requirement already satisfied: pygments>=2.4.0 in /Users/engs2510/.pyenv/versions/3.12.2/envs/pybop-3.12/lib/python3.12/site-packages (from ipython>=6.1.0->ipywidgets) (2.17.2)\n", + "Requirement already satisfied: stack-data in /Users/engs2510/.pyenv/versions/3.12.2/envs/pybop-3.12/lib/python3.12/site-packages (from ipython>=6.1.0->ipywidgets) (0.6.3)\n", + "Requirement already satisfied: pexpect>4.3 in /Users/engs2510/.pyenv/versions/3.12.2/envs/pybop-3.12/lib/python3.12/site-packages (from ipython>=6.1.0->ipywidgets) (4.9.0)\n", + "Requirement already satisfied: parso<0.9.0,>=0.8.3 in /Users/engs2510/.pyenv/versions/3.12.2/envs/pybop-3.12/lib/python3.12/site-packages (from jedi>=0.16->ipython>=6.1.0->ipywidgets) (0.8.4)\n", + "Requirement already satisfied: ptyprocess>=0.5 in /Users/engs2510/.pyenv/versions/3.12.2/envs/pybop-3.12/lib/python3.12/site-packages (from pexpect>4.3->ipython>=6.1.0->ipywidgets) (0.7.0)\n", + "Requirement already satisfied: wcwidth in /Users/engs2510/.pyenv/versions/3.12.2/envs/pybop-3.12/lib/python3.12/site-packages (from prompt-toolkit<3.1.0,>=3.0.41->ipython>=6.1.0->ipywidgets) (0.2.13)\n", + "Requirement already satisfied: executing>=1.2.0 in /Users/engs2510/.pyenv/versions/3.12.2/envs/pybop-3.12/lib/python3.12/site-packages (from stack-data->ipython>=6.1.0->ipywidgets) (2.0.1)\n", + "Requirement already satisfied: asttokens>=2.1.0 in /Users/engs2510/.pyenv/versions/3.12.2/envs/pybop-3.12/lib/python3.12/site-packages (from stack-data->ipython>=6.1.0->ipywidgets) (2.4.1)\n", + "Requirement already satisfied: pure-eval in /Users/engs2510/.pyenv/versions/3.12.2/envs/pybop-3.12/lib/python3.12/site-packages (from stack-data->ipython>=6.1.0->ipywidgets) (0.2.2)\n", + "Requirement already satisfied: six>=1.12.0 in /Users/engs2510/.pyenv/versions/3.12.2/envs/pybop-3.12/lib/python3.12/site-packages (from asttokens>=2.1.0->stack-data->ipython>=6.1.0->ipywidgets) (1.16.0)\n", + "Note: you may need to restart the kernel to use updated packages.\n", + "Note: you may need to restart the kernel to use updated packages.\n" + ] + } + ], + "source": [ + "%pip install --upgrade pip ipywidgets\n", + "%pip install pybop -q\n", + "\n", + "# Import the necessary libraries\n", + "import numpy as np\n", + "\n", + "import pybop" + ] + }, + { + "cell_type": "markdown", + "id": "017695fd-ee78-4113-af18-2fea04cf6126", + "metadata": {}, + "source": [ + "## Setup the model, problem, and cost\n", + "\n", + "The code block below sets up the model, problem, and cost objects. For more information on this process, take a look at other notebooks in the examples directory." + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "c346b106-99a9-46bc-8b5d-d330ed911660", + "metadata": { + "execution": { + "iopub.execute_input": "2024-04-14T18:57:46.438835Z", + "iopub.status.busy": "2024-04-14T18:57:46.438684Z", + "iopub.status.idle": "2024-04-14T18:57:46.478613Z", + "shell.execute_reply": "2024-04-14T18:57:46.478339Z" + } + }, + "outputs": [], + "source": [ + "# Load the parameters\n", + "parameter_set = pybop.ParameterSet(\n", + " json_path=\"../scripts/parameters/initial_ecm_parameters.json\"\n", + ")\n", + "parameter_set.import_parameters()\n", + "# Define the model\n", + "model = pybop.empirical.Thevenin(\n", + " parameter_set=parameter_set, options={\"number of rc elements\": 1}\n", + ")\n", + "\n", + "# Define the parameters\n", + "parameters = [\n", + " pybop.Parameter(\n", + " \"R0 [Ohm]\",\n", + " prior=pybop.Gaussian(0.0002, 0.0001),\n", + " bounds=[1e-4, 1e-2],\n", + " )\n", + "]\n", + "\n", + "# Generate synthetic data\n", + "t_eval = np.arange(0, 900, 2)\n", + "values = model.predict(t_eval=t_eval)\n", + "\n", + "# Form dataset\n", + "dataset = pybop.Dataset(\n", + " {\n", + " \"Time [s]\": t_eval,\n", + " \"Current function [A]\": values[\"Current [A]\"].data,\n", + " \"Voltage [V]\": values[\"Voltage [V]\"].data,\n", + " }\n", + ")\n", + "\n", + "# Construct problem and cost\n", + "problem = pybop.FittingProblem(model, parameters, dataset)\n", + "cost = pybop.SumSquaredError(problem)" + ] + }, + { + "cell_type": "markdown", + "id": "3ef5b0da-f755-43c6-8904-79d7ee0f218c", + "metadata": {}, + "source": [ + "## Interacting with the Optimisers\n", + "\n", + "Now that we have set up the required objects, we can introduce the two interfaces for interacting with PyBOP optimisers. These are:\n", + " \n", + "1. The direct optimiser (e.g. `pybop.XNES`)\n", + "2. The optimisation class (i.e. `pybop.Optimisation`)\n", + " \n", + "These two methods provide two equivalent ways of interacting with PyBOP's optimisers. The first method provides a direct way to select the Optimiser, with the second method being a more general method with a default optimiser (`pybop.XNES`) set if you don't provide an optimiser. \n", + "\n", + "First, the direct interface is presented. With this interface the user can select from the [list of optimisers](https://github.com/pybop-team/PyBOP?tab=readme-ov-file#supported-methods) supported in PyBOP and construct them directly. Options can be passed as kwargs, or through get() / set() methods in the case of PINTS-based optimisers." + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "6244882e-11ad-4bfe-a512-f1c687a06a08", + "metadata": { + "execution": { + "iopub.execute_input": "2024-04-14T18:57:46.512725Z", + "iopub.status.busy": "2024-04-14T18:57:46.512597Z", + "iopub.status.idle": "2024-04-14T18:57:49.259154Z", + "shell.execute_reply": "2024-04-14T18:57:49.257712Z" + } + }, + "outputs": [], + "source": [ + "optim_one = pybop.XNES(\n", + " cost, max_iterations=50\n", + ") # Direct optimiser class with options as kwargs\n", + "optim_one.set_max_iterations(\n", + " 50\n", + ") # Alternative set() / get() methods for PINTS optimisers\n", + "x1, final_cost = optim_one.run()" + ] + }, + { + "cell_type": "markdown", + "id": "c62e23f7", + "metadata": {}, + "source": [ + "Next, the `Optimisation` interface is less direct than the previous one, but provides a single class to work with across PyBOP workflows. The options are passed the same way as the above method, through kwargs or get() / set() methods." + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "479fc846", + "metadata": {}, + "outputs": [], + "source": [ + "optim_two = pybop.Optimisation(\n", + " cost, optimiser=pybop.XNES, max_iterations=50\n", + ") # Optimisation class with options as kwargs\n", + "optim_two.set_max_iterations(\n", + " 50\n", + ") # Alternative set() / get() methods for PINTS optimisers\n", + "x2, final_cost = optim_two.run()" + ] + }, + { + "cell_type": "markdown", + "id": "5c6ea9fd", + "metadata": {}, + "source": [ + "We can show the equivalence of these two methods by comparing the optimiser objects:" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "de56587e", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "True" + ] + }, + "execution_count": 5, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "isinstance(optim_one, type(optim_two.optimiser))" + ] + }, + { + "cell_type": "markdown", + "id": "9f6634c0", + "metadata": {}, + "source": [ + "For completeness, we can show the optimiser solutions:" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "66b74f3e", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Estimated parameters x1: [0.00099965]\n", + "Estimated parameters x2: [0.00099985]\n" + ] + } + ], + "source": [ + "print(\"Estimated parameters x1:\", x1)\n", + "print(\"Estimated parameters x2:\", x2)" + ] + }, + { + "cell_type": "markdown", + "id": "94653584", + "metadata": {}, + "source": [ + "## Closing Comments\n", + "\n", + "As both of these API's provide access to the same optimisers, please use either as you prefer. A couple things to note:\n", + "\n", + "- If you are using a SciPy-based optimiser (`pybop.SciPyMinimize`, `pybop.SciPyDifferentialEvolution`), the `set()` / `get()` methods for the optimiser options are not currently supported. These optimisers require options to be passed as kwargs.\n", + "- The optimiser passed to `pybop.Optimisation` must not be a constructed object." + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.2" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/examples/notebooks/pouch_cell_identification.ipynb b/examples/notebooks/pouch_cell_identification.ipynb index 1d073d314..0cf64bf5d 100644 --- a/examples/notebooks/pouch_cell_identification.ipynb +++ b/examples/notebooks/pouch_cell_identification.ipynb @@ -392,8 +392,7 @@ " model, parameters, dataset, additional_variables=additional_variables\n", ")\n", "cost = pybop.SumSquaredError(problem)\n", - "optim = pybop.Optimisation(cost, optimiser=pybop.CMAES)\n", - "optim.set_max_iterations(30)" + "optim = pybop.CMAES(cost, max_iterations=30)" ] }, { diff --git a/examples/notebooks/spm_electrode_design.ipynb b/examples/notebooks/spm_electrode_design.ipynb index 127251b17..fd5373528 100644 --- a/examples/notebooks/spm_electrode_design.ipynb +++ b/examples/notebooks/spm_electrode_design.ipynb @@ -215,7 +215,7 @@ "id": "eQiGurUV04qB" }, "source": [ - "Let's construct PyBOP's optimisation class. This class provides the methods needed to fit the forward model. For this example, we use particle swarm optimisation (PSO). Due to the computational requirements of the design optimisation methods, we limit the number of iterations to 5 for this example." + "Let's construct PyBOP's optimisation class. This class provides the methods needed to fit the forward model. For this example, we use particle swarm optimisation (PSO). Due to the computational requirements of the design optimisation methods, we limit the number of iterations to 15 for this example." ] }, { @@ -232,8 +232,7 @@ }, "outputs": [], "source": [ - "optim = pybop.Optimisation(cost, optimiser=pybop.PSO, verbose=True)\n", - "optim.set_max_iterations(15)" + "optim = pybop.PSO(cost, verbose=True, max_iterations=15)" ] }, { diff --git a/examples/scripts/BPX_spm.py b/examples/scripts/BPX_spm.py index d77fdae0e..0d1089359 100644 --- a/examples/scripts/BPX_spm.py +++ b/examples/scripts/BPX_spm.py @@ -43,8 +43,7 @@ # Generate problem, cost function, and optimisation class problem = pybop.FittingProblem(model, parameters, dataset) cost = pybop.SumSquaredError(problem) -optim = pybop.Optimisation(cost, optimiser=pybop.CMAES) -optim.set_max_iterations(100) +optim = pybop.CMAES(cost, max_iterations=100) # Run the optimisation x, final_cost = optim.run() diff --git a/examples/scripts/ecm_CMAES.py b/examples/scripts/ecm_CMAES.py index 7316b37eb..5d241a6b0 100644 --- a/examples/scripts/ecm_CMAES.py +++ b/examples/scripts/ecm_CMAES.py @@ -75,8 +75,7 @@ # Generate problem, cost function, and optimisation class problem = pybop.FittingProblem(model, parameters, dataset) cost = pybop.SumSquaredError(problem) -optim = pybop.Optimisation(cost, optimiser=pybop.CMAES) -optim.set_max_iterations(100) +optim = pybop.CMAES(cost, max_iterations=100) x, final_cost = optim.run() print("Estimated parameters:", x) diff --git a/examples/scripts/exp_UKF.py b/examples/scripts/exp_UKF.py index aa42bbf24..5a61436b6 100644 --- a/examples/scripts/exp_UKF.py +++ b/examples/scripts/exp_UKF.py @@ -97,7 +97,7 @@ # Generate problem, cost function, and optimisation class cost = pybop.ObserverCost(observer) -optim = pybop.Optimisation(cost, optimiser=pybop.CMAES, verbose=True) +optim = pybop.CMAES(cost, verbose=True) # Run optimisation x, final_cost = optim.run() diff --git a/examples/scripts/gitt.py b/examples/scripts/gitt.py index d31ac2d4c..b81f7433a 100644 --- a/examples/scripts/gitt.py +++ b/examples/scripts/gitt.py @@ -54,7 +54,7 @@ cost = pybop.RootMeanSquaredError(problem) # Build the optimisation problem -optim = pybop.Optimisation(cost=cost, optimiser=pybop.PSO, verbose=True) +optim = pybop.PSO(cost=cost, verbose=True) # Run the optimisation problem x, final_cost = optim.run() diff --git a/examples/scripts/spm_CMAES.py b/examples/scripts/spm_CMAES.py index 59c3a1f37..b60bc0194 100644 --- a/examples/scripts/spm_CMAES.py +++ b/examples/scripts/spm_CMAES.py @@ -42,8 +42,7 @@ # Generate problem, cost function, and optimisation class problem = pybop.FittingProblem(model, parameters, dataset, signal=signal) cost = pybop.SumSquaredError(problem) -optim = pybop.Optimisation(cost, optimiser=pybop.CMAES) -optim.set_max_iterations(100) +optim = pybop.CMAES(cost, max_iterations=100) # Run the optimisation x, final_cost = optim.run() diff --git a/examples/scripts/spm_IRPropMin.py b/examples/scripts/spm_IRPropMin.py index 23d15eea3..2aedff907 100644 --- a/examples/scripts/spm_IRPropMin.py +++ b/examples/scripts/spm_IRPropMin.py @@ -36,8 +36,7 @@ # Generate problem, cost function, and optimisation class problem = pybop.FittingProblem(model, parameters, dataset) cost = pybop.SumSquaredError(problem) -optim = pybop.Optimisation(cost, optimiser=pybop.IRPropMin) -optim.set_max_iterations(100) +optim = pybop.IRPropMin(cost, max_iterations=100) x, final_cost = optim.run() print("Estimated parameters:", x) diff --git a/examples/scripts/spm_MAP.py b/examples/scripts/spm_MAP.py index 9ed4032e2..e09ce2315 100644 --- a/examples/scripts/spm_MAP.py +++ b/examples/scripts/spm_MAP.py @@ -45,10 +45,12 @@ # Generate problem, cost function, and optimisation class problem = pybop.FittingProblem(model, parameters, dataset) cost = pybop.MAP(problem, pybop.GaussianLogLikelihoodKnownSigma) -optim = pybop.Optimisation(cost, optimiser=pybop.CMAES) -optim.set_max_unchanged_iterations(20) -optim.set_min_iterations(20) -optim.set_max_iterations(100) +optim = pybop.CMAES( + cost, + max_unchanged_iterations=20, + min_iterations=20, + max_iterations=100, +) # Run the optimisation x, final_cost = optim.run() diff --git a/examples/scripts/spm_MLE.py b/examples/scripts/spm_MLE.py index bace88e32..9a3636de1 100644 --- a/examples/scripts/spm_MLE.py +++ b/examples/scripts/spm_MLE.py @@ -45,10 +45,12 @@ # Generate problem, cost function, and optimisation class problem = pybop.FittingProblem(model, parameters, dataset) likelihood = pybop.GaussianLogLikelihoodKnownSigma(problem, sigma=[0.03, 0.03]) -optim = pybop.Optimisation(likelihood, optimiser=pybop.CMAES) -optim.set_max_unchanged_iterations(20) -optim.set_min_iterations(20) -optim.set_max_iterations(100) +optim = pybop.CMAES( + likelihood, + max_unchanged_iterations=20, + min_iterations=20, + max_iterations=100, +) # Run the optimisation x, final_cost = optim.run() diff --git a/examples/scripts/spm_NelderMead.py b/examples/scripts/spm_NelderMead.py index 8bcc28d09..90a95bfff 100644 --- a/examples/scripts/spm_NelderMead.py +++ b/examples/scripts/spm_NelderMead.py @@ -54,15 +54,14 @@ def noise(sigma): model, parameters, dataset, signal=signal, init_soc=init_soc ) cost = pybop.RootMeanSquaredError(problem) -optim = pybop.Optimisation( +optim = pybop.NelderMead( cost, - optimiser=pybop.NelderMead, verbose=True, allow_infeasible_solutions=True, sigma0=0.05, + max_iterations=100, + max_unchanged_iterations=20, ) -optim.set_max_iterations(100) -optim.set_max_unchanged_iterations(45) # Run optimisation x, final_cost = optim.run() diff --git a/examples/scripts/spm_SNES.py b/examples/scripts/spm_SNES.py index 570c6c30a..a304dd047 100644 --- a/examples/scripts/spm_SNES.py +++ b/examples/scripts/spm_SNES.py @@ -36,8 +36,7 @@ # Generate problem, cost function, and optimisation class problem = pybop.FittingProblem(model, parameters, dataset) cost = pybop.SumSquaredError(problem) -optim = pybop.Optimisation(cost, optimiser=pybop.SNES) -optim.set_max_iterations(100) +optim = pybop.SNES(cost, max_iterations=100) x, final_cost = optim.run() print("Estimated parameters:", x) diff --git a/examples/scripts/spm_UKF.py b/examples/scripts/spm_UKF.py index 5299c5816..0814a22c2 100644 --- a/examples/scripts/spm_UKF.py +++ b/examples/scripts/spm_UKF.py @@ -57,7 +57,7 @@ # Generate problem, cost function, and optimisation class cost = pybop.ObserverCost(observer) -optim = pybop.Optimisation(cost, optimiser=pybop.PSO, verbose=True) +optim = pybop.PSO(cost, verbose=True) # Parameter identification using the current observer implementation is very slow # so let's restrict the number of iterations and reduce the number of plots diff --git a/examples/scripts/spm_XNES.py b/examples/scripts/spm_XNES.py index e6007e4d1..bcca73de1 100644 --- a/examples/scripts/spm_XNES.py +++ b/examples/scripts/spm_XNES.py @@ -37,8 +37,7 @@ # Generate problem, cost function, and optimisation class problem = pybop.FittingProblem(model, parameters, dataset) cost = pybop.SumSquaredError(problem) -optim = pybop.Optimisation(cost, optimiser=pybop.XNES) -optim.set_max_iterations(100) +optim = pybop.XNES(cost, max_iterations=100) x, final_cost = optim.run() print("Estimated parameters:", x) diff --git a/examples/scripts/spm_adam.py b/examples/scripts/spm_adam.py index e5df4d0c4..7523384ab 100644 --- a/examples/scripts/spm_adam.py +++ b/examples/scripts/spm_adam.py @@ -54,15 +54,14 @@ def noise(sigma): model, parameters, dataset, signal=signal, init_soc=init_soc ) cost = pybop.RootMeanSquaredError(problem) -optim = pybop.Optimisation( +optim = pybop.Adam( cost, - optimiser=pybop.Adam, verbose=True, allow_infeasible_solutions=True, sigma0=0.05, + max_iterations=100, + max_unchanged_iterations=20, ) -optim.set_max_iterations(100) -optim.set_max_unchanged_iterations(45) # Run optimisation x, final_cost = optim.run() diff --git a/examples/scripts/spm_descent.py b/examples/scripts/spm_descent.py index 89f4f8e27..9bacfdc8e 100644 --- a/examples/scripts/spm_descent.py +++ b/examples/scripts/spm_descent.py @@ -36,10 +36,12 @@ # Generate problem, cost function, and optimisation class problem = pybop.FittingProblem(model, parameters, dataset) cost = pybop.SumSquaredError(problem) -optim = pybop.Optimisation( - cost, optimiser=pybop.GradientDescent, sigma0=0.022, verbose=True +optim = pybop.GradientDescent( + cost, + sigma0=0.011, + verbose=True, + max_iterations=125, ) -optim.set_max_iterations(125) # Run optimisation x, final_cost = optim.run() diff --git a/examples/scripts/spm_pso.py b/examples/scripts/spm_pso.py index f35bc402f..4b99bd129 100644 --- a/examples/scripts/spm_pso.py +++ b/examples/scripts/spm_pso.py @@ -37,8 +37,7 @@ # Generate problem, cost function, and optimisation class problem = pybop.FittingProblem(model, parameters, dataset) cost = pybop.SumSquaredError(problem) -optim = pybop.Optimisation(cost, optimiser=pybop.PSO) -optim.set_max_iterations(100) +optim = pybop.Optimisation(cost, optimiser=pybop.PSO, max_iterations=100) x, final_cost = optim.run() print("Estimated parameters:", x) diff --git a/examples/scripts/spm_scipymin.py b/examples/scripts/spm_scipymin.py index 74bd72523..12a152349 100644 --- a/examples/scripts/spm_scipymin.py +++ b/examples/scripts/spm_scipymin.py @@ -38,7 +38,7 @@ cost = pybop.RootMeanSquaredError(problem) # Build the optimisation problem -optim = pybop.Optimisation(cost=cost, optimiser=pybop.SciPyMinimize) +optim = pybop.SciPyMinimize(cost) # Run the optimisation problem x, final_cost = optim.run() diff --git a/examples/scripts/spme_max_energy.py b/examples/scripts/spme_max_energy.py index e4441c326..8590f68c6 100644 --- a/examples/scripts/spme_max_energy.py +++ b/examples/scripts/spme_max_energy.py @@ -12,8 +12,8 @@ # NOTE: This script can be easily adjusted to consider the volumetric # (instead of gravimetric) energy density by changing the line which # defines the cost and changing the output to: -# print(f"Initial volumetric energy density: {-cost(cost.x0):.2f} Wh.m-3") -# print(f"Optimised volumetric energy density: {-final_cost:.2f} Wh.m-3") +# print(f"Initial volumetric energy density: {cost(cost.x0):.2f} Wh.m-3") +# print(f"Optimised volumetric energy density: {final_cost:.2f} Wh.m-3") # Define parameter set and model parameter_set = pybop.ParameterSet.pybamm("Chen2020") @@ -23,12 +23,12 @@ parameters = [ pybop.Parameter( "Positive electrode thickness [m]", - prior=pybop.Gaussian(7.56e-05, 0.05e-05), + prior=pybop.Gaussian(7.56e-05, 0.1e-05), bounds=[65e-06, 10e-05], ), pybop.Parameter( "Positive particle radius [m]", - prior=pybop.Gaussian(5.22e-06, 0.05e-06), + prior=pybop.Gaussian(5.22e-06, 0.1e-06), bounds=[2e-06, 9e-06], ), ] @@ -47,16 +47,15 @@ # Generate cost function and optimisation class: cost = pybop.GravimetricEnergyDensity(problem) -optim = pybop.Optimisation( - cost, optimiser=pybop.PSO, verbose=True, allow_infeasible_solutions=False +optim = pybop.PSO( + cost, verbose=True, allow_infeasible_solutions=False, max_iterations=15 ) -optim.set_max_iterations(15) # Run optimisation x, final_cost = optim.run() print("Estimated parameters:", x) -print(f"Initial gravimetric energy density: {-cost(cost.x0):.2f} Wh.kg-1") -print(f"Optimised gravimetric energy density: {-final_cost:.2f} Wh.kg-1") +print(f"Initial gravimetric energy density: {cost(cost.x0):.2f} Wh.kg-1") +print(f"Optimised gravimetric energy density: {final_cost:.2f} Wh.kg-1") # Plot the timeseries output if cost.update_capacity: diff --git a/examples/standalone/cost.py b/examples/standalone/cost.py index 149acd567..c3763ff4c 100644 --- a/examples/standalone/cost.py +++ b/examples/standalone/cost.py @@ -48,7 +48,7 @@ def __init__(self, problem=None): upper=[10], ) - def __call__(self, x, grad=None): + def _evaluate(self, x, grad=None): """ Calculate the cost for a given parameter value. diff --git a/examples/standalone/optimiser.py b/examples/standalone/optimiser.py new file mode 100644 index 000000000..eb16fe555 --- /dev/null +++ b/examples/standalone/optimiser.py @@ -0,0 +1,90 @@ +import numpy as np +from scipy.optimize import minimize + +from pybop import BaseOptimiser + + +class StandaloneOptimiser(BaseOptimiser): + """ + Defines an example standalone optimiser without a Cost. + """ + + def __init__(self, cost=None, **optimiser_kwargs): + # Define cost function + def cost(x): + x1, x2 = x + return (x1 - 2) ** 2 + (x2 - 4) ** 4 + + # Set initial values and other options + optimiser_options = dict( + x0=np.array([0, 0]), + bounds=None, + method="Nelder-Mead", + jac=False, + maxiter=100, + ) + optimiser_options.update(optimiser_kwargs) + super().__init__(cost, **optimiser_options) + + def _set_up_optimiser(self): + """ + Parse optimiser options. + """ + # Reformat bounds + if isinstance(self.bounds, dict): + self._scipy_bounds = [ + (lower, upper) + for lower, upper in zip(self.bounds["lower"], self.bounds["upper"]) + ] + else: + self._scipy_bounds = self.bounds + + # Parse additional options and remove them from the options dictionary + self._options = self.unset_options + self.unset_options = dict() + self._options["options"] = self._options.pop("options", dict()) + if "maxiter" in self._options.keys(): + # Nest this option within an options dictionary for SciPy minimize + self._options["options"]["maxiter"] = self._options.pop("maxiter") + + def _run(self): + """ + Executes the optimisation process using SciPy's minimize function. + + Returns + ------- + x : numpy.ndarray + The best parameter set found by the optimization. + final_cost : float + The final cost associated with the best parameters. + """ + self.log = [[self.x0]] + + # Add callback storing history of parameter values + def callback(x): + self.log.append([x]) + + # Run optimiser + self.result = minimize( + self.cost, + self.x0, + bounds=self._scipy_bounds, + callback=callback, + **self._options, + ) + + self.result.final_cost = self.cost(self.result.x) + self._iterations = self.result.nit + + return self.result.x, self.result.final_cost + + def name(self): + """ + Provides the name of the optimization strategy. + + Returns + ------- + str + The name 'SciPyMinimize'. + """ + return "StandaloneOptimiser" diff --git a/pybop/__init__.py b/pybop/__init__.py index 6e6de6d15..621e5aade 100644 --- a/pybop/__init__.py +++ b/pybop/__init__.py @@ -93,16 +93,16 @@ # from ._experiment import Experiment -# -# Main optimisation class -# -from ._optimisation import Optimisation - # # Optimiser class # from .optimisers.base_optimiser import BaseOptimiser -from .optimisers.scipy_optimisers import SciPyMinimize, SciPyDifferentialEvolution +from .optimisers.base_pints_optimiser import BasePintsOptimiser +from .optimisers.scipy_optimisers import ( + BaseSciPyOptimiser, + SciPyMinimize, + SciPyDifferentialEvolution +) from .optimisers.pints_optimisers import ( GradientDescent, Adam, @@ -113,6 +113,7 @@ SNES, XNES, ) +from .optimisers.optimisation import Optimisation # # Monte Carlo classes diff --git a/pybop/costs/base_cost.py b/pybop/costs/base_cost.py index f3ac95170..025cb2e42 100644 --- a/pybop/costs/base_cost.py +++ b/pybop/costs/base_cost.py @@ -38,7 +38,6 @@ def __init__(self, problem=None, sigma=None): self.x0 = None self.bounds = None self.sigma0 = sigma - self._minimising = True if isinstance(self.problem, BaseProblem): self._target = problem._target self.parameters = problem.parameters @@ -82,10 +81,7 @@ def evaluate(self, x, grad=None): If an error occurs during the calculation of the cost. """ try: - if self._minimising: - return self._evaluate(x, grad) - else: # minimise the negative cost - return -self._evaluate(x, grad) + return self._evaluate(x, grad) except NotImplementedError as e: raise e @@ -140,11 +136,7 @@ def evaluateS1(self, x): If an error occurs during the calculation of the cost or gradient. """ try: - if self._minimising: - return self._evaluateS1(x) - else: # minimise the negative cost - L, dl = self._evaluateS1(x) - return -L, -dl + return self._evaluateS1(x) except NotImplementedError as e: raise e diff --git a/pybop/costs/design_costs.py b/pybop/costs/design_costs.py index b02940bf0..f6364cdc6 100644 --- a/pybop/costs/design_costs.py +++ b/pybop/costs/design_costs.py @@ -90,8 +90,8 @@ class GravimetricEnergyDensity(DesignCost): """ Represents the gravimetric energy density of a battery cell, calculated based on a normalised discharge from upper to lower voltage limits. The goal is to - maximise the energy density, which is achieved by minimizing the negative energy - density reported by this class. + maximise the energy density, which is achieved by setting minimising = False + in the optimiser settings. Inherits all parameters and attributes from ``DesignCost``. """ @@ -113,7 +113,7 @@ def _evaluate(self, x, grad=None): Returns ------- float - The negative gravimetric energy density or infinity in case of infeasible parameters. + The gravimetric energy density or -infinity in case of infeasible parameters. """ if not all(is_numeric(i) for i in x): raise ValueError("Input must be a numeric array.") @@ -128,29 +128,29 @@ def _evaluate(self, x, grad=None): solution = self.problem.evaluate(x) voltage, current = solution["Voltage [V]"], solution["Current [A]"] - negative_energy_density = -np.trapz(voltage * current, dx=self.dt) / ( + energy_density = np.trapz(voltage * current, dx=self.dt) / ( 3600 * self.problem.model.cell_mass(self.parameter_set) ) - return negative_energy_density + return energy_density # Catch infeasible solutions and return infinity except UserWarning as e: print(f"Ignoring this sample due to: {e}") - return np.inf + return -np.inf # Catch any other exception and return infinity except Exception as e: print(f"An error occurred during the evaluation: {e}") - return np.inf + return -np.inf class VolumetricEnergyDensity(DesignCost): """ Represents the volumetric energy density of a battery cell, calculated based on a normalised discharge from upper to lower voltage limits. The goal is to - maximise the energy density, which is achieved by minimizing the negative energy - density reported by this class. + maximise the energy density, which is achieved by setting minimising = False + in the optimiser settings. Inherits all parameters and attributes from ``DesignCost``. """ @@ -172,7 +172,7 @@ def _evaluate(self, x, grad=None): Returns ------- float - The negative volumetric energy density or infinity in case of infeasible parameters. + The volumetric energy density or -infinity in case of infeasible parameters. """ if not all(is_numeric(i) for i in x): raise ValueError("Input must be a numeric array.") @@ -186,18 +186,18 @@ def _evaluate(self, x, grad=None): solution = self.problem.evaluate(x) voltage, current = solution["Voltage [V]"], solution["Current [A]"] - negative_energy_density = -np.trapz(voltage * current, dx=self.dt) / ( + energy_density = np.trapz(voltage * current, dx=self.dt) / ( 3600 * self.problem.model.cell_volume(self.parameter_set) ) - return negative_energy_density + return energy_density # Catch infeasible solutions and return infinity except UserWarning as e: print(f"Ignoring this sample due to: {e}") - return np.inf + return -np.inf # Catch any other exception and return infinity except Exception as e: print(f"An error occurred during the evaluation: {e}") - return np.inf + return -np.inf diff --git a/pybop/costs/fitting_costs.py b/pybop/costs/fitting_costs.py index 0665454b0..b7b266591 100644 --- a/pybop/costs/fitting_costs.py +++ b/pybop/costs/fitting_costs.py @@ -287,7 +287,8 @@ class MAP(BaseLikelihood): Maximum a posteriori cost function. Computes the maximum a posteriori cost function, which is the sum of the - negative log likelihood and the log prior. + log likelihood and the log prior. The goal of maximising is achieved by + setting minimising = False in the optimiser settings. Inherits all parameters and attributes from ``BaseLikelihood``. diff --git a/pybop/optimisers/base_optimiser.py b/pybop/optimisers/base_optimiser.py index d89c3df04..713df4d49 100644 --- a/pybop/optimisers/base_optimiser.py +++ b/pybop/optimisers/base_optimiser.py @@ -1,80 +1,222 @@ +import warnings + +import numpy as np + +from pybop import BaseCost, BaseLikelihood, DesignCost + + class BaseOptimiser: """ A base class for defining optimisation methods. - This class serves as a template for creating optimisers. It provides a basic structure for - an optimisation algorithm, including the initial setup and a method stub for performing - the optimisation process. Child classes should override the optimise and _runoptimise - methods with specific algorithms. + This class serves as a base class for creating optimisers. It provides a basic structure for + an optimisation algorithm, including the initial setup and a method stub for performing the + optimisation process. Child classes should override _set_up_optimiser and the _run method with + a specific algorithm. + + Parameters + ---------- + cost : pybop.BaseCost or pints.ErrorMeasure + An objective function to be optimised, which can be either a pybop.Cost or PINTS error measure + **optimiser_kwargs : optional + Valid option keys and their values. + + Attributes + ---------- + x0 : numpy.ndarray + Initial parameter values for the optimisation. + bounds : dict + Dictionary containing the parameter bounds with keys 'lower' and 'upper'. + sigma0 : float or sequence + Initial step size or standard deviation for the optimiser. + verbose : bool, optional + If True, the optimisation progress is printed (default: False). + minimising : bool, optional + If True, the target is to minimise the cost, else target is to maximise by minimising + the negative cost (default: True). + physical_viability : bool, optional + If True, the feasibility of the optimised parameters is checked (default: True). + allow_infeasible_solutions : bool, optional + If True, infeasible parameter values will be allowed in the optimisation (default: True). + log : list + A log of the parameter values tried during the optimisation. """ - def __init__(self, bounds=None): - """ - Initializes the BaseOptimiser. + def __init__( + self, + cost, + **optimiser_kwargs, + ): + # First set attributes to default values + self.x0 = None + self.bounds = None + self.sigma0 = 0.1 + self.verbose = False + self.log = [] + self.minimising = True + self.physical_viability = False + self.allow_infeasible_solutions = False + self.default_max_iterations = 1000 + self.result = None - Parameters - ---------- - bounds : sequence or Bounds, optional - Bounds on the parameters. Default is None. + if isinstance(cost, BaseCost): + self.cost = cost + self.x0 = cost.x0 + self.bounds = cost.bounds + self.sigma0 = cost.sigma0 + self.set_allow_infeasible_solutions() + if isinstance(cost, (BaseLikelihood, DesignCost)): + self.minimising = False + else: + try: + cost_test = cost(optimiser_kwargs.get("x0", [])) + warnings.warn( + "The cost is not an instance of pybop.BaseCost, but let's continue " + + "assuming that it is a callable function to be minimised.", + UserWarning, + ) + self.cost = cost + self.minimising = True + + except Exception: + raise Exception("The cost is not a recognised cost object or function.") + + if not np.isscalar(cost_test) or not np.isreal(cost_test): + raise TypeError( + f"Cost returned {type(cost_test)}, not a scalar numeric value." + ) + + self.unset_options = optimiser_kwargs + self.set_base_options() + self._set_up_optimiser() + + # Throw an error if any options remain + if self.unset_options: + raise ValueError(f"Unrecognised keyword arguments: {self.unset_options}") + + def set_base_options(self): + """ + Update the base optimiser options and remove them from the options dictionary. """ - self.bounds = bounds - pass + self.x0 = self.unset_options.pop("x0", self.x0) + self.bounds = self.unset_options.pop("bounds", self.bounds) + self.sigma0 = self.unset_options.pop("sigma0", self.sigma0) + self.verbose = self.unset_options.pop("verbose", self.verbose) + self.minimising = self.unset_options.pop("minimising", self.minimising) + if "allow_infeasible_solutions" in self.unset_options.keys(): + self.set_allow_infeasible_solutions( + self.unset_options.pop("allow_infeasible_solutions") + ) - def optimise(self, cost_function, x0=None, maxiter=None): + def _set_up_optimiser(self): """ - Initiates the optimisation process. + Parse optimiser options and prepare the optimiser. - This method should be overridden by child classes with the specific optimisation algorithm. + This method should be implemented by child classes. - Parameters - ---------- - cost_function : callable - The cost function to be minimised by the optimiser. - x0 : ndarray, optional - Initial guess for the parameters. Default is None. - maxiter : int, optional - Maximum number of iterations to perform. Default is None. + Raises + ------ + NotImplementedError + If the method has not been implemented by the subclass. + """ + raise NotImplementedError + + def run(self): + """ + Run the optimisation and return the optimised parameters and final cost. Returns ------- - The result of the optimisation process. The specific type of this result will depend on the child implementation. + x : numpy.ndarray + The best parameter set found by the optimisation. + final_cost : float + The final cost associated with the best parameters. """ - self.cost_function = cost_function - self.x0 = x0 - self._max_iterations = maxiter + x, final_cost = self._run() + + # Store the optimised parameters + if hasattr(self.cost, "parameters"): + self.store_optimised_parameters(x) - # Run optimisation - result = self._runoptimise(self.cost_function, x0=self.x0) + # Check if parameters are viable + if self.physical_viability: + self.check_optimal_parameters(x) - return result + return x, final_cost - def _runoptimise(self, cost_function, x0=None): + def _run(self): """ Contains the logic for the optimisation algorithm. This method should be implemented by child classes to perform the actual optimisation. + Raises + ------ + NotImplementedError + If the method has not been implemented by the subclass. + """ + raise NotImplementedError + + def store_optimised_parameters(self, x): + """ + Update the problem parameters with optimised values. + + The optimised parameter values are stored within the associated PyBOP parameter class. + Parameters ---------- - cost_function : callable - The cost function to be minimised by the optimiser. - x0 : ndarray, optional - Initial guess for the parameters. Default is None. + x : array-like + Optimised parameter values. + """ + for i, param in enumerate(self.cost.parameters): + param.update(value=x[i]) - Returns - ------- - This method is expected to return the result of the optimisation, the format of which - will be determined by the child class implementation. + def check_optimal_parameters(self, x): """ - pass + Check if the optimised parameters are physically viable. + """ + + if self.cost.problem._model.check_params( + inputs=x, allow_infeasible_solutions=False + ): + return + else: + warnings.warn( + "Optimised parameters are not physically viable! \nConsider retrying the optimisation" + + " with a non-gradient-based optimiser and the option allow_infeasible_solutions=False", + UserWarning, + stacklevel=2, + ) def name(self): """ - Returns the name of the optimiser. + Returns the name of the optimiser, to be overwritten by child classes. Returns ------- str - The name of the optimiser, which is "BaseOptimiser" for this base class. + The name of the optimiser, which is "Optimisation" for this base class. """ - return "BaseOptimiser" + return "Optimisation" + + def set_allow_infeasible_solutions(self, allow=True): + """ + Set whether to allow infeasible solutions or not. + + Parameters + ---------- + iterations : bool, optional + Whether to allow infeasible solutions. + """ + # Set whether to allow infeasible locations + self.physical_viability = allow + self.allow_infeasible_solutions = allow + + if hasattr(self.cost, "problem") and hasattr(self.cost.problem, "_model"): + self.cost.problem._model.allow_infeasible_solutions = ( + self.allow_infeasible_solutions + ) + else: + # Turn off this feature as there is no model + self.physical_viability = False + self.allow_infeasible_solutions = False diff --git a/pybop/_optimisation.py b/pybop/optimisers/base_pints_optimiser.py similarity index 57% rename from pybop/_optimisation.py rename to pybop/optimisers/base_pints_optimiser.py index 67a40a151..543d32b64 100644 --- a/pybop/_optimisation.py +++ b/pybop/optimisers/base_pints_optimiser.py @@ -1,196 +1,162 @@ -import warnings - import numpy as np import pints -import pybop +from pybop import BaseOptimiser -class Optimisation: +class BasePintsOptimiser(BaseOptimiser): """ - A class for conducting optimization using PyBOP or PINTS optimisers. + A base class for defining optimisation methods from the PINTS library. Parameters ---------- - cost : pybop.BaseCost or pints.ErrorMeasure - An objective function to be optimized, which can be either a pybop.Cost or PINTS error measure - optimiser : pybop.Optimiser or subclass of pybop.BaseOptimiser, optional - An optimiser from either the PINTS or PyBOP framework to perform the optimization (default: None). - sigma0 : float or sequence, optional - Initial step size or standard deviation for the optimiser (default: None). - verbose : bool, optional - If True, the optimization progress is printed (default: False). - physical_viability : bool, optional - If True, the feasibility of the optimised parameters is checked (default: True). - allow_infeasible_solutions : bool, optional - If True, infeasible parameter values will be allowed in the optimisation (default: True). - - Attributes - ---------- - x0 : numpy.ndarray - Initial parameter values for the optimization. - bounds : dict - Dictionary containing the parameter bounds with keys 'lower' and 'upper'. - _n_parameters : int - Number of parameters in the optimization problem. - sigma0 : float or sequence - Initial step size or standard deviation for the optimiser. - log : list - Log of the optimization process. + **optimiser_kwargs : optional + Valid PINTS option keys and their values, for example: + x0 : array_like + Initial position from which optimization will start. + sigma0 : float + Initial step size or standard deviation depending on the optimiser. + bounds : dict + A dictionary with 'lower' and 'upper' keys containing arrays for lower and + upper bounds on the parameters. """ - def __init__( - self, - cost, - x0=None, - optimiser=None, - sigma0=None, - verbose=False, - physical_viability=True, - allow_infeasible_solutions=True, - ): - self.cost = cost - self.x0 = x0 or cost.x0 - self.optimiser = optimiser - self.verbose = verbose - self.bounds = cost.bounds - self.sigma0 = sigma0 or cost.sigma0 - self._n_parameters = cost._n_parameters - self.physical_viability = physical_viability - self.allow_infeasible_solutions = allow_infeasible_solutions - self.log = [] - - # Convert x0 to pints vector - self._x0 = pints.vector(self.x0) - - # Set whether to allow infeasible locations - if self.cost.problem is not None and hasattr(self.cost.problem, "_model"): - self.cost.problem._model.allow_infeasible_solutions = ( - self.allow_infeasible_solutions - ) - else: - # Turn off this feature as there is no model - self.physical_viability = False - self.allow_infeasible_solutions = False - - # PyBOP doesn't currently support the pints transformation class - self._transformation = None - - # Check if minimising or maximising - if isinstance(cost, pybop.BaseLikelihood): - self.cost._minimising = False - self._minimising = self.cost._minimising - self._function = self.cost - - # Construct Optimiser - self.pints = True - - if self.optimiser is None: - self.optimiser = pybop.XNES - elif issubclass(self.optimiser, pints.Optimiser): - pass - else: - self.pints = False - - if issubclass( - self.optimiser, (pybop.SciPyMinimize, pybop.SciPyDifferentialEvolution) - ): - self.optimiser = self.optimiser(bounds=self.bounds) - - else: - raise ValueError("Unknown optimiser type") - - if self.pints: - self.optimiser = self.optimiser(self.x0, self.sigma0, self.bounds) - - # Check if sensitivities are required - self._needs_sensitivities = self.optimiser.needs_sensitivities() - - # Track optimiser's f_best or f_guessed + def __init__(self, cost, pints_optimiser, **optimiser_kwargs): + # First set attributes to default values + self._boundaries = None + self._needs_sensitivities = None self._use_f_guessed = None - self.set_f_guessed_tracking() - - # Parallelisation self._parallel = False self._n_workers = 1 - self.set_parallel() - - # User callback self._callback = None - - # Define stopping criteria - # Maximum iterations self._max_iterations = None - self.set_max_iterations() - - # Minimum iterations - self._min_iterations = None - self.set_min_iterations() - - # Maximum unchanged iterations - self._unchanged_threshold = 1 # smallest significant f change - self._unchanged_max_iterations = None - self.set_max_unchanged_iterations() - - # Maximum evaluations + self._min_iterations = 2 + self._unchanged_threshold = 1e-5 # smallest significant f change + self._unchanged_max_iterations = 15 self._max_evaluations = None - - # Threshold value self._threshold = None - - # Post-run statistics self._evaluations = None self._iterations = None - def run(self): - """ - Run the optimization and return the optimized parameters and final cost. + # PyBOP doesn't currently support the PINTS transformation class + self._transformation = None - Returns - ------- - x : numpy.ndarray - The best parameter set found by the optimization. - final_cost : float - The final cost associated with the best parameters. + self.pints_optimiser = pints_optimiser + super().__init__(cost, **optimiser_kwargs) + + def _set_up_optimiser(self): + """ + Parse optimiser options and create an instance of the PINTS optimiser. """ + # Check and remove any duplicate keywords in self.unset_options + self._sanitise_inputs() - if self.pints: - x, final_cost = self._run_pints() - elif not self.pints: - x, final_cost = self._run_pybop() + # Create an instance of the PINTS optimiser class + if issubclass(self.pints_optimiser, pints.Optimiser): + self.pints_optimiser = self.pints_optimiser( + self.x0, sigma0=self.sigma0, boundaries=self._boundaries + ) + else: + raise ValueError( + "The pints_optimiser is not a recognised PINTS optimiser class." + ) - # Store the optimised parameters - if self.cost.problem is not None: - self.store_optimised_parameters(x) + # Check if sensitivities are required + self._needs_sensitivities = self.pints_optimiser.needs_sensitivities() - # Check if parameters are viable - if self.physical_viability: - self.check_optimal_parameters(x) + # Apply default maxiter + self.set_max_iterations() - return x, final_cost + # Apply additional options and remove them from options + key_list = list(self.unset_options.keys()) + for key in key_list: + if key == "use_f_guessed": + self.set_f_guessed_tracking(self.unset_options.pop(key)) + elif key == "parallel": + self.set_parallel(self.unset_options.pop(key)) + elif key == "max_iterations": + self.set_max_iterations(self.unset_options.pop(key)) + elif key == "min_iterations": + self.set_min_iterations(self.unset_options.pop(key)) + elif key == "max_unchanged_iterations": + if "threshold" in self.unset_options.keys(): + self.set_max_unchanged_iterations( + self.unset_options.pop(key), + self.unset_options.pop("threshold"), + ) + else: + self.set_max_unchanged_iterations(self.unset_options.pop(key)) + elif key == "threshold": + pass # only used with unchanged_max_iterations + elif key == "max_evaluations": + self.set_max_evaluations(self.unset_options.pop(key)) - def _run_pybop(self): + def _sanitise_inputs(self): """ - Internal method to run the optimization using a PyBOP optimiser. + Check and remove any duplicate optimiser options. + """ + # Unpack values from any nested options dictionary + if "options" in self.unset_options.keys(): + key_list = list(self.unset_options["options"].keys()) + for key in key_list: + if key not in self.unset_options.keys(): + self.unset_options[key] = self.unset_options["options"].pop(key) + else: + raise Exception( + f"A duplicate {key} option was found in the options dictionary." + ) + self.unset_options.pop("options") + + # Check for duplicate keywords + expected_keys = [ + "max_iterations", + "popsize", + "threshold", + ] + alternative_keys = ["maxiter", "population_size", "tol"] + for exp_key, alt_key in zip(expected_keys, alternative_keys): + if alt_key in self.unset_options.keys(): + if exp_key in self.unset_options.keys(): + raise Exception( + "The alternative {alt_key} option was passed in addition to the expected {exp_key} option." + ) + else: # rename + self.unset_options[exp_key] = self.unset_options.pop(alt_key) + + # Convert bounds to PINTS boundaries + if self.bounds is not None: + if issubclass( + self.pints_optimiser, + (pints.GradientDescent, pints.Adam, pints.NelderMead), + ): + print(f"NOTE: Boundaries ignored by {self.pints_optimiser}") + self.bounds = None + elif issubclass(self.pints_optimiser, pints.PSO): + if not all( + np.isfinite(value) + for sublist in self.bounds.values() + for value in sublist + ): + raise ValueError( + "Either all bounds or no bounds must be set for Pints PSO." + ) + else: + self._boundaries = pints.RectangularBoundaries( + self.bounds["lower"], self.bounds["upper"] + ) + + def name(self): + """ + Provides the name of the optimisation strategy. Returns ------- - x : numpy.ndarray - The best parameter set found by the optimization. - final_cost : float - The final cost associated with the best parameters. + str + The name given by PINTS. """ - self.result = self.optimiser.optimise( - cost_function=self.cost, - x0=self.x0, - maxiter=self._max_iterations, - ) - self.log = self.optimiser.log - self._iterations = self.result.nit + return self.pints_optimiser.name() - return self.result.x, self.cost(self.result.x) - - def _run_pints(self): + def _run(self): """ Internal method to run the optimization using a PINTS optimiser. @@ -205,7 +171,6 @@ def _run_pints(self): -------- This method is heavily based on the run method in the PINTS.OptimisationController class. """ - # Check stopping criteria has_stopping_criterion = False has_stopping_criterion |= self._max_iterations is not None @@ -223,9 +188,15 @@ def _run_pints(self): unchanged_iterations = 0 # Choose method to evaluate - f = self._function if self._needs_sensitivities: - f = f.evaluateS1 + + def f(x): + L, dl = self.cost.evaluateS1(x) + return (L, dl) if self.minimising else (-L, -dl) + else: + + def f(x, grad=None): + return self.cost(x, grad) if self.minimising else -self.cost(x, grad) # Create evaluator object if self._parallel: @@ -234,8 +205,8 @@ def _run_pints(self): # For population based optimisers, don't use more workers than # particles! - if isinstance(self.optimiser, pints.PopulationBasedOptimiser): - n_workers = min(n_workers, self.optimiser.population_size()) + if isinstance(self.pints_optimiser, pints.PopulationBasedOptimiser): + n_workers = min(n_workers, self.pints_optimiser.population_size()) evaluator = pints.ParallelEvaluator(f, n_workers=n_workers) else: evaluator = pints.SequentialEvaluator(f) @@ -244,7 +215,7 @@ def _run_pints(self): fb = fg = np.inf # Internally we always minimise! Keep a 2nd value to show the user. - fg_user = (fb, fg) if self._minimising else (-fb, -fg) + fg_user = (fb, fg) if self.minimising else (-fb, -fg) # Keep track of the last significant change f_sig = np.inf @@ -254,18 +225,18 @@ def _run_pints(self): try: while running: # Ask optimiser for new points - xs = self.optimiser.ask() + xs = self.pints_optimiser.ask() # Evaluate points fs = evaluator.evaluate(xs) # Tell optimiser about function values - self.optimiser.tell(fs) + self.pints_optimiser.tell(fs) # Update the scores - fb = self.optimiser.f_best() - fg = self.optimiser.f_guessed() - fg_user = (fb, fg) if self._minimising else (-fb, -fg) + fb = self.pints_optimiser.f_best() + fg = self.pints_optimiser.f_guessed() + fg_user = (fb, fg) if self.minimising else (-fb, -fg) # Check for significant changes f_new = fg if self._use_f_guessed else fb @@ -328,13 +299,13 @@ def _run_pints(self): ) # Error in optimiser - error = self.optimiser.stop() + error = self.pints_optimiser.stop() if error: running = False halt_message = str(error) elif self._callback is not None: - self._callback(iteration - 1, self.optimiser) + self._callback(iteration - 1, self) except (Exception, SystemExit, KeyboardInterrupt): # Show last result and exit @@ -344,7 +315,7 @@ def _run_pints(self): print("Current position:") # Show current parameters - x_user = self.optimiser.x_guessed() + x_user = self.pints_optimiser.x_guessed() if self._transformation is not None: x_user = self._transformation.to_model(x_user) for p in x_user: @@ -361,23 +332,22 @@ def _run_pints(self): # Get best parameters if self._use_f_guessed: - x = self.optimiser.x_guessed() - f = self.optimiser.f_guessed() + x = self.pints_optimiser.x_guessed() + f = self.pints_optimiser.f_guessed() else: - x = self.optimiser.x_best() - f = self.optimiser.f_best() + x = self.pints_optimiser.x_best() + f = self.pints_optimiser.f_best() # Inverse transform search parameters if self._transformation is not None: x = self._transformation.to_model(x) - # Store the optimised parameters - self.store_optimised_parameters(x) + # Store result + final_cost = f if self.minimising else -f + self.result = Result(x=x, final_cost=final_cost, nit=self._iterations) - # Return best position and the score used internally, - # i.e the negative log-likelihood in the case of - # self._minimising = False - return x, f + # Return best position and its cost + return x, final_cost def f_guessed_tracking(self): """ @@ -403,22 +373,6 @@ def set_f_guessed_tracking(self, use_f_guessed=False): """ self._use_f_guessed = bool(use_f_guessed) - def set_max_evaluations(self, evaluations=None): - """ - Set a maximum number of evaluations stopping criterion. - Credit: PINTS - - Parameters - ---------- - evaluations : int, optional - The maximum number of evaluations after which to stop the optimization (default: None). - """ - if evaluations is not None: - evaluations = int(evaluations) - if evaluations < 0: - raise ValueError("Maximum number of evaluations cannot be negative.") - self._max_evaluations = evaluations - def set_parallel(self, parallel=False): """ Enable or disable parallel evaluation. @@ -440,7 +394,7 @@ def set_parallel(self, parallel=False): self._parallel = False self._n_workers = 1 - def set_max_iterations(self, iterations=1000): + def set_max_iterations(self, iterations="default"): """ Set the maximum number of iterations as a stopping criterion. Credit: PINTS @@ -448,9 +402,11 @@ def set_max_iterations(self, iterations=1000): Parameters ---------- iterations : int, optional - The maximum number of iterations to run (default is 1000). + The maximum number of iterations to run. Set to `None` to remove this stopping criterion. """ + if iterations == "default": + iterations = self.default_max_iterations if iterations is not None: iterations = int(iterations) if iterations < 0: @@ -464,7 +420,7 @@ def set_min_iterations(self, iterations=2): Parameters ---------- iterations : int, optional - The minimum number of iterations to run (default is 100). + The minimum number of iterations to run (default: 2). Set to `None` to remove this stopping criterion. """ if iterations is not None: @@ -481,10 +437,11 @@ def set_max_unchanged_iterations(self, iterations=15, threshold=1e-5): Parameters ---------- iterations : int, optional - The maximum number of unchanged iterations to run (default is 15). + The maximum number of unchanged iterations to run (default: 15). Set to `None` to remove this stopping criterion. threshold : float, optional - The minimum significant change in the objective function value that resets the unchanged iteration counter (default is 1e-5). + The minimum significant change in the objective function value that resets the + unchanged iteration counter (default: 1e-5). """ if iterations is not None: iterations = int(iterations) @@ -498,33 +455,40 @@ def set_max_unchanged_iterations(self, iterations=15, threshold=1e-5): self._unchanged_max_iterations = iterations self._unchanged_threshold = threshold - def store_optimised_parameters(self, x): + def set_max_evaluations(self, evaluations=None): """ - Update the problem parameters with optimized values. - - The optimized parameter values are stored within the associated PyBOP parameter class. + Set a maximum number of evaluations stopping criterion. + Credit: PINTS Parameters ---------- - x : array-like - Optimized parameter values. + evaluations : int, optional + The maximum number of evaluations after which to stop the optimisation + (default: None). """ - for i, param in enumerate(self.cost.parameters): - param.update(value=x[i]) + if evaluations is not None: + evaluations = int(evaluations) + if evaluations < 0: + raise ValueError("Maximum number of evaluations cannot be negative.") + self._max_evaluations = evaluations - def check_optimal_parameters(self, x): - """ - Check if the optimised parameters are physically viable. - """ - if self.cost.problem._model.check_params( - inputs=x, allow_infeasible_solutions=False - ): - return - else: - warnings.warn( - "Optimised parameters are not physically viable! \nConsider retrying the optimisation" - + " with a non-gradient-based optimiser and the option allow_infeasible_solutions=False", - UserWarning, - stacklevel=2, - ) +class Result: + """ + Stores the result of the optimisation. + + Attributes + ---------- + x : ndarray + The solution of the optimisation. + final_cost : float + The cost associated with the solution x. + nit : int + Number of iterations performed by the optimiser. + + """ + + def __init__(self, x=None, final_cost=None, nit=None): + self.x = x + self.final_cost = final_cost + self.nit = nit diff --git a/pybop/optimisers/optimisation.py b/pybop/optimisers/optimisation.py new file mode 100644 index 000000000..aaa0ab3bf --- /dev/null +++ b/pybop/optimisers/optimisation.py @@ -0,0 +1,65 @@ +from pybop import XNES, BasePintsOptimiser, BaseSciPyOptimiser + + +class Optimisation: + """ + A high-level class for optimisation using PyBOP or PINTS optimisers. + + This class provides an alternative API to the `PyBOP.Optimiser()` API, + specifically allowing for single user-friendly interface for the + optimisation process.The class can be used with either PyBOP or PINTS + optimisers. + + Parameters + ---------- + cost : pybop.BaseCost or pints.ErrorMeasure + An objective function to be optimized, which can be either a pybop.Cost + optimiser : pybop.Optimiser or subclass of pybop.BaseOptimiser, optional + An optimiser from either the PINTS or PyBOP framework to perform the optimization (default: None). + sigma0 : float or sequence, optional + Initial step size or standard deviation for the optimiser (default: None). + verbose : bool, optional + If True, the optimization progress is printed (default: False). + physical_viability : bool, optional + If True, the feasibility of the optimised parameters is checked (default: True). + allow_infeasible_solutions : bool, optional + If True, infeasible parameter values will be allowed in the optimisation (default: True). + + Attributes + ---------- + All attributes from the pybop.optimiser() class + + """ + + def __init__(self, cost, optimiser=None, **optimiser_kwargs): + self.__dict__["optimiser"] = ( + None # Pre-define optimiser to avoid recursion during initialisation + ) + if optimiser is None: + self.optimiser = XNES(cost, **optimiser_kwargs) + elif issubclass(optimiser, BasePintsOptimiser): + self.optimiser = optimiser(cost, **optimiser_kwargs) + elif issubclass(optimiser, BaseSciPyOptimiser): + self.optimiser = optimiser(cost, **optimiser_kwargs) + else: + raise ValueError("Unknown optimiser type") + + def run(self): + return self.optimiser.run() + + def __getattr__(self, attr): + if "optimiser" in self.__dict__ and hasattr(self.optimiser, attr): + return getattr(self.optimiser, attr) + raise AttributeError( + f"'{self.__class__.__name__}' object has no attribute '{attr}'" + ) + + def __setattr__(self, name: str, value) -> None: + if ( + name in self.__dict__ + or "optimiser" not in self.__dict__ + or not hasattr(self.optimiser, name) + ): + object.__setattr__(self, name, value) + else: + setattr(self.optimiser, name, value) diff --git a/pybop/optimisers/pints_optimisers.py b/pybop/optimisers/pints_optimisers.py index 1ee289f1b..e3d8ee31b 100644 --- a/pybop/optimisers/pints_optimisers.py +++ b/pybop/optimisers/pints_optimisers.py @@ -1,8 +1,9 @@ -import numpy as np import pints +from pybop import BasePintsOptimiser -class GradientDescent(pints.GradientDescent): + +class GradientDescent(BasePintsOptimiser): """ Implements a simple gradient descent optimization algorithm. @@ -12,27 +13,25 @@ class GradientDescent(pints.GradientDescent): Parameters ---------- - x0 : array_like - Initial position from which optimization will start. - sigma0 : float, optional - Initial step size (default is 0.1). - bounds : dict, optional - Ignored by this optimiser, provided for API consistency. + **optimiser_kwargs : optional + Valid PINTS option keys and their values, for example: + x0 : array_like + Initial position from which optimisation will start. + sigma0 : float + The learning rate / Initial step size (default: 0.02). See Also -------- pints.GradientDescent : The PINTS implementation this class is based on. """ - def __init__(self, x0, sigma0=0.1, bounds=None): - if bounds is not None: - print("NOTE: Boundaries ignored by Gradient Descent") - - self.boundaries = None # Bounds ignored in pints.GradDesc - super().__init__(x0, sigma0, self.boundaries) + def __init__(self, cost, **optimiser_kwargs): + if "sigma0" not in optimiser_kwargs.keys(): + optimiser_kwargs["sigma0"] = 0.02 # set default + super().__init__(cost, pints.GradientDescent, **optimiser_kwargs) -class Adam(pints.Adam): +class Adam(BasePintsOptimiser): """ Implements the Adam optimization algorithm. @@ -42,27 +41,23 @@ class Adam(pints.Adam): Parameters ---------- - x0 : array_like - Initial position from which optimization will start. - sigma0 : float, optional - Initial step size (default is 0.1). - bounds : dict, optional - Ignored by this optimiser, provided for API consistency. + **optimiser_kwargs : optional + Valid PINTS option keys and their values, for example: + x0 : array_like + Initial position from which optimisation will start. + sigma0 : float + Initial step size. See Also -------- pints.Adam : The PINTS implementation this class is based on. """ - def __init__(self, x0, sigma0=0.1, bounds=None): - if bounds is not None: - print("NOTE: Boundaries ignored by Adam") - - self.boundaries = None # Bounds ignored in pints.Adam - super().__init__(x0, sigma0, self.boundaries) + def __init__(self, cost, **optimiser_kwargs): + super().__init__(cost, pints.Adam, **optimiser_kwargs) -class IRPropMin(pints.IRPropMin): +class IRPropMin(BasePintsOptimiser): """ Implements the iRpropMin optimization algorithm. @@ -72,30 +67,26 @@ class IRPropMin(pints.IRPropMin): Parameters ---------- - x0 : array_like - Initial position from which optimization will start. - sigma0 : float, optional - Initial step size (default is 0.1). - bounds : dict, optional - A dictionary with 'lower' and 'upper' keys containing arrays for lower and upper - bounds on the parameters. + **optimiser_kwargs : optional + Valid PINTS option keys and their values, for example: + x0 : array_like + Initial position from which optimisation will start. + sigma0 : float + Initial step size. + bounds : dict + A dictionary with 'lower' and 'upper' keys containing arrays for lower and + upper bounds on the parameters. See Also -------- pints.IRPropMin : The PINTS implementation this class is based on. """ - def __init__(self, x0, sigma0=0.1, bounds=None): - if bounds is not None: - self.boundaries = pints.RectangularBoundaries( - bounds["lower"], bounds["upper"] - ) - else: - self.boundaries = None - super().__init__(x0, sigma0, self.boundaries) + def __init__(self, cost, **optimiser_kwargs): + super().__init__(cost, pints.IRPropMin, **optimiser_kwargs) -class PSO(pints.PSO): +class PSO(BasePintsOptimiser): """ Implements a particle swarm optimization (PSO) algorithm. @@ -105,36 +96,26 @@ class PSO(pints.PSO): Parameters ---------- - x0 : array_like - Initial positions of particles, which the optimization will use. - sigma0 : float, optional - Spread of the initial particle positions (default is 0.1). - bounds : dict, optional - A dictionary with 'lower' and 'upper' keys containing arrays for lower and upper - bounds on the parameters. + **optimiser_kwargs : optional + Valid PINTS option keys and their values, for example: + x0 : array_like + Initial positions of particles, which the optimisation will use. + sigma0 : float + Spread of the initial particle positions. + bounds : dict + A dictionary with 'lower' and 'upper' keys containing arrays for lower and + upper bounds on the parameters. See Also -------- pints.PSO : The PINTS implementation this class is based on. """ - def __init__(self, x0, sigma0=0.1, bounds=None): - if bounds is None: - self.boundaries = None - elif not all( - np.isfinite(value) for sublist in bounds.values() for value in sublist - ): - raise ValueError( - "Either all bounds or no bounds must be set for Pints PSO." - ) - else: - self.boundaries = pints.RectangularBoundaries( - bounds["lower"], bounds["upper"] - ) - super().__init__(x0, sigma0, self.boundaries) + def __init__(self, cost, **optimiser_kwargs): + super().__init__(cost, pints.PSO, **optimiser_kwargs) -class SNES(pints.SNES): +class SNES(BasePintsOptimiser): """ Implements the stochastic natural evolution strategy (SNES) optimization algorithm. @@ -144,30 +125,26 @@ class SNES(pints.SNES): Parameters ---------- - x0 : array_like - Initial position from which optimization will start. - sigma0 : float, optional - Initial standard deviation of the sampling distribution, defaults to 0.1. - bounds : dict, optional - A dictionary with 'lower' and 'upper' keys containing arrays for lower and upper - bounds on the parameters. + **optimiser_kwargs : optional + Valid PINTS option keys and their values, for example: + x0 : array_like + Initial position from which optimisation will start. + sigma0 : float + Initial standard deviation of the sampling distribution. + bounds : dict + A dictionary with 'lower' and 'upper' keys containing arrays for lower and + upper bounds on the parameters. See Also -------- pints.SNES : The PINTS implementation this class is based on. """ - def __init__(self, x0, sigma0=0.1, bounds=None): - if bounds is not None: - self.boundaries = pints.RectangularBoundaries( - bounds["lower"], bounds["upper"] - ) - else: - self.boundaries = None - super().__init__(x0, sigma0, self.boundaries) + def __init__(self, cost, **optimiser_kwargs): + super().__init__(cost, pints.SNES, **optimiser_kwargs) -class XNES(pints.XNES): +class XNES(BasePintsOptimiser): """ Implements the Exponential Natural Evolution Strategy (XNES) optimiser from PINTS. @@ -177,30 +154,26 @@ class XNES(pints.XNES): Parameters ---------- - x0 : array_like - The initial parameter vector to optimize. - sigma0 : float, optional - Initial standard deviation of the sampling distribution, defaults to 0.1. - bounds : dict, optional - A dictionary with 'lower' and 'upper' keys containing arrays for lower and upper - bounds on the parameters. If ``None``, no bounds are enforced. + **optimiser_kwargs : optional + Valid PINTS option keys and their values, for example: + x0 : array_like + The initial parameter vector to optimise. + sigma0 : float + Initial standard deviation of the sampling distribution. + bounds : dict + A dictionary with 'lower' and 'upper' keys containing arrays for lower and + upperbounds on the parameters. If ``None``, no bounds are enforced. See Also -------- pints.XNES : PINTS implementation of XNES algorithm. """ - def __init__(self, x0, sigma0=0.1, bounds=None): - if bounds is not None: - self.boundaries = pints.RectangularBoundaries( - bounds["lower"], bounds["upper"] - ) - else: - self.boundaries = None - super().__init__(x0, sigma0, self.boundaries) + def __init__(self, cost, **optimiser_kwargs): + super().__init__(cost, pints.XNES, **optimiser_kwargs) -class NelderMead(pints.NelderMead): +class NelderMead(BasePintsOptimiser): """ Implements the Nelder-Mead downhill simplex method from PINTS. @@ -210,28 +183,24 @@ class NelderMead(pints.NelderMead): Parameters ---------- - x0 : array_like - The initial parameter vector to optimize. - sigma0 : float, optional - Initial standard deviation of the sampling distribution, defaults to 0.1. - Does not appear to be used. - bounds : dict, optional - Ignored by this optimiser, provided for API consistency. + **optimiser_kwargs : optional + Valid PINTS option keys and their values, for example: + x0 : array_like + The initial parameter vector to optimise. + sigma0 : float + Initial standard deviation of the sampling distribution. + Does not appear to be used. See Also -------- pints.NelderMead : PINTS implementation of Nelder-Mead algorithm. """ - def __init__(self, x0, sigma0=0.1, bounds=None): - if bounds is not None: - print("NOTE: Boundaries ignored by NelderMead") + def __init__(self, cost, **optimiser_kwargs): + super().__init__(cost, pints.NelderMead, **optimiser_kwargs) - self.boundaries = None # Bounds ignored in pints.NelderMead - super().__init__(x0, sigma0, self.boundaries) - -class CMAES(pints.CMAES): +class CMAES(BasePintsOptimiser): """ Adapter for the Covariance Matrix Adaptation Evolution Strategy (CMA-ES) optimiser in PINTS. @@ -241,30 +210,26 @@ class CMAES(pints.CMAES): Parameters ---------- - x0 : array_like - The initial parameter vector to optimize. - sigma0 : float, optional - Initial standard deviation of the sampling distribution, defaults to 0.1. - bounds : dict, optional - A dictionary with 'lower' and 'upper' keys containing arrays for lower and upper - bounds on the parameters. If ``None``, no bounds are enforced. + **optimiser_kwargs : optional + Valid PINTS option keys and their values, for example: + x0 : array_like + The initial parameter vector to optimise. + sigma0 : float + Initial standard deviation of the sampling distribution. + bounds : dict + A dictionary with 'lower' and 'upper' keys containing arrays for lower and + upper bounds on the parameters. If ``None``, no bounds are enforced. See Also -------- pints.CMAES : PINTS implementation of CMA-ES algorithm. """ - def __init__(self, x0, sigma0=0.1, bounds=None): - if len(x0) == 1: + def __init__(self, cost, **optimiser_kwargs): + x0 = optimiser_kwargs.pop("x0", cost.x0) + if x0 is not None and len(x0) == 1: raise ValueError( "CMAES requires optimisation of >= 2 parameters at once. " + "Please choose another optimiser." ) - if bounds is not None: - self.boundaries = pints.RectangularBoundaries( - bounds["lower"], bounds["upper"] - ) - else: - self.boundaries = None - - super().__init__(x0, sigma0, self.boundaries) + super().__init__(cost, pints.CMAES, **optimiser_kwargs) diff --git a/pybop/optimisers/scipy_optimisers.py b/pybop/optimisers/scipy_optimisers.py index dd51237b2..6c2498093 100644 --- a/pybop/optimisers/scipy_optimisers.py +++ b/pybop/optimisers/scipy_optimisers.py @@ -1,115 +1,206 @@ import numpy as np from scipy.optimize import differential_evolution, minimize -from .base_optimiser import BaseOptimiser +from pybop import BaseOptimiser -class SciPyMinimize(BaseOptimiser): +class BaseSciPyOptimiser(BaseOptimiser): """ - Adapts SciPy's minimize function for use as an optimization strategy. - - This class provides an interface to various scalar minimization algorithms implemented in SciPy, allowing fine-tuning of the optimization process through method selection and option configuration. + A base class for defining optimisation methods from the SciPy library. Parameters ---------- - method : str, optional - The type of solver to use. If not specified, defaults to 'Nelder-Mead'. - Options: 'Nelder-Mead', 'Powell', 'CG', 'BFGS', 'Newton-CG', 'L-BFGS-B', 'TNC', 'COBYLA', 'SLSQP', 'trust-constr', 'dogleg', 'trust-ncg', 'trust-exact', 'trust-krylov'. - bounds : sequence or ``Bounds``, optional + x0 : array_like + Initial position from which optimisation will start. + bounds : dict, sequence or scipy.optimize.Bounds, optional Bounds for variables as supported by the selected method. - maxiter : int, optional - Maximum number of iterations to perform. + **optimiser_kwargs : optional + Valid SciPy option keys and their values. """ - def __init__(self, method=None, bounds=None, maxiter=None, tol=1e-5): - super().__init__() - self.method = method - self.bounds = bounds + def __init__(self, cost, **optimiser_kwargs): + super().__init__(cost, **optimiser_kwargs) self.num_resamples = 40 - self.tol = tol - self.options = {} - self._max_iterations = maxiter - if self.method is None: - self.method = "Nelder-Mead" + def _sanitise_inputs(self): + """ + Check and remove any duplicate optimiser options. + """ + # Unpack values from any nested options dictionary + if "options" in self.unset_options.keys(): + key_list = list(self.unset_options["options"].keys()) + for key in key_list: + if key not in self.unset_options.keys(): + self.unset_options[key] = self.unset_options["options"].pop(key) + else: + raise Exception( + f"A duplicate {key} option was found in the options dictionary." + ) + self.unset_options.pop("options") + + # Check for duplicate keywords + expected_keys = ["maxiter", "popsize", "tol"] + alternative_keys = [ + "max_iterations", + "population_size", + "threshold", + ] + for exp_key, alt_key in zip(expected_keys, alternative_keys): + if alt_key in self.unset_options.keys(): + if exp_key in self.unset_options.keys(): + raise Exception( + "The alternative {alt_key} option was passed in addition to the expected {exp_key} option." + ) + else: # rename + self.unset_options[exp_key] = self.unset_options.pop(alt_key) + + # Convert bounds to SciPy format + if isinstance(self.bounds, dict): + self._scipy_bounds = [ + (lower, upper) + for lower, upper in zip(self.bounds["lower"], self.bounds["upper"]) + ] + else: + self._scipy_bounds = self.bounds + + def _run(self): + """ + Internal method to run the optimization using a PyBOP optimiser. - def _runoptimise(self, cost_function, x0): + Returns + ------- + x : numpy.ndarray + The best parameter set found by the optimization. + final_cost : float + The final cost associated with the best parameters. """ - Executes the optimization process using SciPy's minimize function. + self.result = self._run_optimiser() + + self.result.final_cost = self.cost(self.result.x) + self._iterations = self.result.nit - Parameters - ---------- - cost_function : callable - The objective function to minimize. + return self.result.x, self.result.final_cost + + +class SciPyMinimize(BaseSciPyOptimiser): + """ + Adapts SciPy's minimize function for use as an optimization strategy. + + This class provides an interface to various scalar minimization algorithms implemented in SciPy, + allowing fine-tuning of the optimization process through method selection and option configuration. + + Parameters + ---------- + **optimiser_kwargs : optional + Valid SciPy Minimize option keys and their values, For example: x0 : array_like - Initial guess for the parameters. + Initial position from which optimisation will start. + bounds : dict, sequence or scipy.optimize.Bounds + Bounds for variables as supported by the selected method. + method : str + The optimisation method, options include: + 'Nelder-Mead', 'Powell', 'CG', 'BFGS', 'Newton-CG', 'L-BFGS-B', 'TNC', 'COBYLA', + 'SLSQP', 'trust-constr', 'dogleg', 'trust-ncg', 'trust-exact', 'trust-krylov'. + + See Also + -------- + scipy.optimize.minimize : The SciPy method this class is based on. + """ + + def __init__(self, cost, **optimiser_kwargs): + optimiser_options = dict(method="Nelder-Mead", jac=False) + optimiser_options.update(**optimiser_kwargs) + super().__init__(cost, **optimiser_options) + + def _set_up_optimiser(self): + """ + Parse optimiser options. + """ + # Check and remove any duplicate keywords in self.unset_options + self._sanitise_inputs() + + # Apply default maxiter + self._options = dict() + self._options["options"] = dict() + self._options["options"]["maxiter"] = self.default_max_iterations + + # Apply additional options and remove them from the options dictionary + key_list = list(self.unset_options.keys()) + for key in key_list: + if key in [ + "method", + "hess", + "hessp", + "constraints", + "tol", + ]: + self._options.update({key: self.unset_options.pop(key)}) + elif key == "jac": + if self.unset_options["jac"] not in [True, False, None]: + raise ValueError( + f"Expected the jac option to be either True, False or None. Received: {self.unset_options[key]}" + ) + self._options.update({key: self.unset_options.pop(key)}) + elif key == "maxiter": + # Nest this option within an options dictionary for SciPy minimize + self._options["options"]["maxiter"] = self.unset_options.pop(key) + + def _run_optimiser(self): + """ + Executes the optimisation process using SciPy's minimize function. Returns ------- tuple - A tuple (x, final_cost) containing the optimized parameters and the value of `cost_function` at the optimum. + A tuple (x, final_cost) containing the optimized parameters and the value of `cost_function` + at the optimum. """ - - self.log = [[x0]] - self.options = {"maxiter": self._max_iterations} + self.log = [[self.x0]] # Add callback storing history of parameter values def callback(x): self.log.append([x]) - # Check x0 and resample if required - self.cost0 = cost_function(x0) - if np.isinf(self.cost0): + # Compute the absolute initial cost and resample if required + self._cost0 = np.abs(self.cost(self.x0)) + if np.isinf(self._cost0): for i in range(1, self.num_resamples): - x0 = cost_function.problem.sample_initial_conditions(seed=i) - self.cost0 = cost_function(x0) - if not np.isinf(self.cost0): + x0 = self.cost.problem.sample_initial_conditions(seed=i) + self._cost0 = np.abs(self.cost(x0)) + if not np.isinf(self._cost0): break - if np.isinf(self.cost0): + if np.isinf(self._cost0): raise ValueError( "The initial parameter values return an infinite cost." ) - # Scale the cost function and eliminate nan values + # Scale the cost function, preserving the sign convention, and eliminate nan values self.inf_count = 0 - def cost_wrapper(x): - cost = cost_function(x) / self.cost0 - if np.isinf(cost): - self.inf_count += 1 - cost = 1 + 0.9**self.inf_count # for fake finite gradient - return cost - - # Reformat bounds - if self.bounds is not None: - bounds = ( - (lower, upper) - for lower, upper in zip(self.bounds["lower"], self.bounds["upper"]) - ) + if not self._options["jac"]: + + def cost_wrapper(x): + cost = self.cost(x) / self._cost0 + if np.isinf(cost): + self.inf_count += 1 + cost = 1 + 0.9**self.inf_count # for fake finite gradient + return cost if self.minimising else -cost + elif self._options["jac"] is True: + + def cost_wrapper(x): + L, dl = self.cost.evaluateS1(x) + return L, dl if self.minimising else -L, -dl result = minimize( cost_wrapper, - x0, - method=self.method, - bounds=bounds, - tol=self.tol, - options=self.options, + self.x0, + bounds=self._scipy_bounds, callback=callback, + **self._options, ) return result - def needs_sensitivities(self): - """ - Determines if the optimization algorithm requires gradient information. - - Returns - ------- - bool - False, indicating that gradient information is not required. - """ - return False - def name(self): """ Provides the name of the optimization strategy. @@ -122,109 +213,112 @@ def name(self): return "SciPyMinimize" -class SciPyDifferentialEvolution(BaseOptimiser): +class SciPyDifferentialEvolution(BaseSciPyOptimiser): """ Adapts SciPy's differential_evolution function for global optimization. - This class provides a global optimization strategy based on differential evolution, useful for problems involving continuous parameters and potentially multiple local minima. + This class provides a global optimization strategy based on differential evolution, useful for + problems involving continuous parameters and potentially multiple local minima. Parameters ---------- - bounds : sequence or ``Bounds`` + bounds : dict, sequence or scipy.optimize.Bounds Bounds for variables. Must be provided as it is essential for differential evolution. - strategy : str, optional - The differential evolution strategy to use. Defaults to 'best1bin'. - maxiter : int, optional - Maximum number of iterations to perform. Defaults to 1000. - popsize : int, optional - The number of individuals in the population. Defaults to 15. + **optimiser_kwargs : optional + Valid SciPy option keys and their values, for example: + strategy : str + The differential evolution strategy to use. + maxiter : int + Maximum number of iterations to perform. + popsize : int + The number of individuals in the population. + + See Also + -------- + scipy.optimize.differential_evolution : The SciPy method this class is based on. """ - def __init__( - self, bounds=None, strategy="best1bin", maxiter=1000, popsize=15, tol=1e-5 - ): - super().__init__() - self.tol = tol - self.strategy = strategy - self._max_iterations = maxiter - self._population_size = popsize + def __init__(self, cost, **optimiser_kwargs): + optimiser_options = dict(strategy="best1bin", popsize=15) + optimiser_options.update(**optimiser_kwargs) + super().__init__(cost, **optimiser_options) - if bounds is None: - raise ValueError("Bounds must be specified for differential_evolution.") - elif not all( - np.isfinite(value) for sublist in bounds.values() for value in sublist - ): - raise ValueError("Bounds must be specified for differential_evolution.") - elif isinstance(bounds, dict): - bounds = [ - (lower, upper) for lower, upper in zip(bounds["lower"], bounds["upper"]) - ] - self.bounds = bounds + def _set_up_optimiser(self): + """ + Parse optimiser options. + """ + # Check and remove any duplicate keywords in self.unset_options + self._sanitise_inputs() - def _runoptimise(self, cost_function, x0=None): + # Check bounds + if self._scipy_bounds is None: + raise ValueError("Bounds must be specified for differential_evolution.") + else: + if not all( + np.isfinite(value) for pair in self._scipy_bounds for value in pair + ): + raise ValueError("Bounds must be specified for differential_evolution.") + + # Apply default maxiter + self._options = dict() + self._options["maxiter"] = self.default_max_iterations + + # Apply additional options and remove them from the options dictionary + key_list = list(self.unset_options.keys()) + for key in key_list: + if key in [ + "strategy", + "maxiter", + "popsize", + "tol", + "mutation", + "recombination", + "seed", + "disp", + "polish", + "init", + "atol", + "updating", + "workers", + "constraints", + "tol", + "integrality", + "vectorized", + ]: + self._options.update({key: self.unset_options.pop(key)}) + + def _run_optimiser(self): """ Executes the optimization process using SciPy's differential_evolution function. - Parameters - ---------- - cost_function : callable - The objective function to minimize. - x0 : array_like, optional - Ignored parameter, provided for API consistency. - Returns ------- tuple - A tuple (x, final_cost) containing the optimized parameters and the value of ``cost_function`` at the optimum. + A tuple (x, final_cost) containing the optimized parameters and the value of + the cost function at the optimum. """ - - self.log = [] - - if x0 is not None: + if self.x0 is not None: print( "Ignoring x0. Initial conditions are not used for differential_evolution." ) + self.x0 = None # Add callback storing history of parameter values def callback(x, convergence): self.log.append([x]) + def cost_wrapper(x): + return self.cost(x) if self.minimising else -self.cost(x) + result = differential_evolution( - cost_function, - self.bounds, - strategy=self.strategy, - maxiter=self._max_iterations, - popsize=self._population_size, - tol=self.tol, + cost_wrapper, + self._scipy_bounds, callback=callback, + **self._options, ) return result - def set_population_size(self, population_size=None): - """ - Sets a population size to use in this optimisation. - Credit: PINTS - - """ - # Check population size or set using heuristic - if population_size is not None: - population_size = int(population_size) - if population_size < 1: - raise ValueError("Population size must be at least 1.") - self._population_size = population_size - - def needs_sensitivities(self): - """ - Determines if the optimization algorithm requires gradient information. - - Returns - ------- - bool - False, indicating that gradient information is not required for differential evolution. - """ - return False - def name(self): """ Provides the name of the optimization strategy. diff --git a/pybop/plotting/plot2d.py b/pybop/plotting/plot2d.py index 34e75ddd8..957279613 100644 --- a/pybop/plotting/plot2d.py +++ b/pybop/plotting/plot2d.py @@ -45,7 +45,7 @@ def plot2d( """ # Assign input as a cost or optimisation object - if isinstance(cost_or_optim, pybop.Optimisation): + if isinstance(cost_or_optim, (pybop.BaseOptimiser, pybop.Optimisation)): optim = cost_or_optim plot_optim = True cost = optim.cost @@ -128,22 +128,23 @@ def plot2d( ) # Plot the initial guess - fig.add_trace( - go.Scatter( - x=[optim.x0[0]], - y=[optim.x0[1]], - mode="markers", - marker_symbol="circle", - marker=dict( - color="mediumspringgreen", - line_color="mediumspringgreen", - line_width=1, - size=14, - showscale=False, - ), - showlegend=False, + if optim.x0 is not None: + fig.add_trace( + go.Scatter( + x=[optim.x0[0]], + y=[optim.x0[1]], + mode="markers", + marker_symbol="circle", + marker=dict( + color="mediumspringgreen", + line_color="mediumspringgreen", + line_width=1, + size=14, + showscale=False, + ), + showlegend=False, + ) ) - ) # Update the layout and display the figure fig.update_layout(**layout_kwargs) diff --git a/pybop/plotting/plot_convergence.py b/pybop/plotting/plot_convergence.py index 662dedcfa..06b3274d3 100644 --- a/pybop/plotting/plot_convergence.py +++ b/pybop/plotting/plot_convergence.py @@ -30,23 +30,25 @@ def plot_convergence(optim, show=True, **layout_kwargs): cost = optim.cost log = optim.log - # Compute the minimum cost for each iteration - min_cost_per_iteration = [ + # Find the best cost from each iteration + best_cost_per_iteration = [ min((cost(solution) for solution in log_entry), default=np.inf) + if optim.minimising + else max((cost(solution) for solution in log_entry), default=-np.inf) for log_entry in log ] # Generate a list of iteration numbers - iteration_numbers = list(range(1, len(min_cost_per_iteration) + 1)) + iteration_numbers = list(range(1, len(best_cost_per_iteration) + 1)) # Create a plotting dictionary plot_dict = pybop.StandardPlot( x=iteration_numbers, - y=min_cost_per_iteration, + y=best_cost_per_iteration, layout_options=dict( xaxis_title="Iteration", yaxis_title="Cost", title="Convergence" ), - trace_names=optim.optimiser.name(), + trace_names=optim.name(), ) # Generate and display the figure diff --git a/tests/integration/test_model_experiment_changes.py b/tests/integration/test_model_experiment_changes.py index b8fb16880..b3c822776 100644 --- a/tests/integration/test_model_experiment_changes.py +++ b/tests/integration/test_model_experiment_changes.py @@ -99,6 +99,6 @@ def final_cost(self, solution, model, parameters, init_soc): model, parameters, dataset, signal=signal, x0=x0, init_soc=init_soc ) cost = pybop.RootMeanSquaredError(problem) - optim = pybop.Optimisation(cost, optimiser=pybop.PSO) + optim = pybop.PSO(cost) x, final_cost = optim.run() return final_cost diff --git a/tests/integration/test_optimisation_options.py b/tests/integration/test_optimisation_options.py index 2f3f991ae..1505a37dd 100644 --- a/tests/integration/test_optimisation_options.py +++ b/tests/integration/test_optimisation_options.py @@ -80,23 +80,30 @@ def spm_costs(self, model, parameters, cost_class): ) @pytest.mark.integration def test_optimisation_f_guessed(self, f_guessed, spm_costs): + x0 = spm_costs.x0 # Test each optimiser - parameterisation = pybop.Optimisation( - cost=spm_costs, optimiser=pybop.XNES, sigma0=0.05 + optim = pybop.XNES( + cost=spm_costs, + sigma0=0.05, + max_iterations=125, + max_unchanged_iterations=35, + threshold=1e-5, + use_f_guessed=f_guessed, ) - parameterisation.set_max_unchanged_iterations(iterations=35, threshold=1e-5) - parameterisation.set_max_iterations(125) - parameterisation.set_f_guessed_tracking(f_guessed) # Set parallelisation if not on Windows if sys.platform != "win32": - parameterisation.set_parallel(True) + optim.set_parallel(True) - initial_cost = parameterisation.cost(spm_costs.x0) - x, final_cost = parameterisation.run() + initial_cost = optim.cost(x0) + x, final_cost = optim.run() # Assertions - assert initial_cost > final_cost + if not np.allclose(x0, self.ground_truth, atol=1e-5): + if optim.minimising: + assert initial_cost > final_cost + else: + assert initial_cost < final_cost np.testing.assert_allclose(x, self.ground_truth, atol=2.5e-2) def getdata(self, model, x, init_soc): diff --git a/tests/integration/test_spm_parameterisations.py b/tests/integration/test_spm_parameterisations.py index 02ca9ef85..eada8d9b9 100644 --- a/tests/integration/test_spm_parameterisations.py +++ b/tests/integration/test_spm_parameterisations.py @@ -110,18 +110,24 @@ def test_spm_optimisers(self, optimiser, spm_costs): spm_costs.bounds = bounds # Test each optimiser - parameterisation = pybop.Optimisation( - cost=spm_costs, optimiser=optimiser, sigma0=0.05 - ) + if optimiser in [pybop.PSO]: + optim = pybop.Optimisation( + cost=spm_costs, optimiser=optimiser, sigma0=0.05, max_iterations=125 + ) + else: + optim = optimiser(cost=spm_costs, sigma0=0.05, max_iterations=125) + if issubclass(optimiser, pybop.BasePintsOptimiser): + optim.set_max_unchanged_iterations(iterations=35, threshold=1e-5) - parameterisation.set_max_unchanged_iterations(iterations=35, threshold=1e-5) - parameterisation.set_max_iterations(125) - initial_cost = parameterisation.cost(x0) - x, final_cost = parameterisation.run() + initial_cost = optim.cost(x0) + x, final_cost = optim.run() # Assertions if not np.allclose(x0, self.ground_truth, atol=1e-5): - assert initial_cost > final_cost + if optim.minimising: + assert initial_cost > final_cost + else: + assert initial_cost < final_cost if pybamm_version <= "23.9": np.testing.assert_allclose(x, self.ground_truth, atol=2.5e-2) else: @@ -182,18 +188,21 @@ def test_multiple_signals(self, multi_optimiser, spm_two_signal_cost): spm_two_signal_cost.bounds = bounds # Test each optimiser - parameterisation = pybop.Optimisation( - cost=spm_two_signal_cost, optimiser=multi_optimiser, sigma0=0.03 + optim = multi_optimiser( + cost=spm_two_signal_cost, sigma0=0.03, max_iterations=125 ) - parameterisation.set_max_unchanged_iterations(iterations=35, threshold=5e-4) - parameterisation.set_max_iterations(125) + if issubclass(multi_optimiser, pybop.BasePintsOptimiser): + optim.set_max_unchanged_iterations(iterations=35, threshold=5e-4) - initial_cost = parameterisation.cost(spm_two_signal_cost.x0) - x, final_cost = parameterisation.run() + initial_cost = optim.cost(spm_two_signal_cost.x0) + x, final_cost = optim.run() # Assertions if not np.allclose(x0, self.ground_truth, atol=1e-5): - assert initial_cost > final_cost + if optim.minimising: + assert initial_cost > final_cost + else: + assert initial_cost < final_cost np.testing.assert_allclose(x, self.ground_truth, atol=2.5e-2) @pytest.mark.parametrize("init_soc", [0.4, 0.6]) @@ -222,14 +231,17 @@ def test_model_misparameterisation(self, parameters, model, init_soc): optimiser = pybop.CMAES # Build the optimisation problem - parameterisation = pybop.Optimisation(cost=cost, optimiser=optimiser) + parameterisation = optimiser(cost=cost) # Run the optimisation problem x, final_cost = parameterisation.run() - # Assertions + # Assertion for final_cost with np.testing.assert_raises(AssertionError): np.testing.assert_allclose(final_cost, 0, atol=1e-2) + + # Assertion for x + with np.testing.assert_raises(AssertionError): np.testing.assert_allclose(x, self.ground_truth, atol=2e-2) def getdata(self, model, x, init_soc): diff --git a/tests/integration/test_thevenin_parameterisation.py b/tests/integration/test_thevenin_parameterisation.py index 2f609a0e5..5ac6e84ef 100644 --- a/tests/integration/test_thevenin_parameterisation.py +++ b/tests/integration/test_thevenin_parameterisation.py @@ -70,22 +70,29 @@ def cost(self, model, parameters, cost_class): def test_optimisers_on_simple_model(self, optimiser, cost): x0 = cost.x0 if optimiser in [pybop.GradientDescent]: - parameterisation = pybop.Optimisation( - cost=cost, optimiser=optimiser, sigma0=2.5e-4 + optim = optimiser( + cost=cost, + sigma0=2.5e-4, + max_iterations=250, ) else: - parameterisation = pybop.Optimisation( - cost=cost, optimiser=optimiser, sigma0=0.03 + optim = optimiser( + cost=cost, + sigma0=0.03, + max_iterations=250, ) + if isinstance(optimiser, pybop.BasePintsOptimiser): + optim.set_max_unchanged_iterations(iterations=55, threshold=1e-5) - parameterisation.set_max_unchanged_iterations(iterations=55, threshold=1e-5) - parameterisation.set_max_iterations(250) - initial_cost = parameterisation.cost(x0) - x, final_cost = parameterisation.run() + initial_cost = optim.cost(x0) + x, final_cost = optim.run() # Assertions if not np.allclose(x0, self.ground_truth, atol=1e-5): - assert initial_cost > final_cost + if optim.minimising: + assert initial_cost > final_cost + else: + assert initial_cost < final_cost np.testing.assert_allclose(x, self.ground_truth, atol=1e-2) def getdata(self, model, x): diff --git a/tests/unit/test_cost.py b/tests/unit/test_cost.py index 6ffeb58ea..f68df92bb 100644 --- a/tests/unit/test_cost.py +++ b/tests/unit/test_cost.py @@ -211,14 +211,14 @@ def test_energy_density_costs( # Test type of returned value assert np.isscalar(cost([0.5])) - assert cost([0.4]) <= 0 # Should be a viable design - assert cost([0.8]) == np.inf # Should exceed active material + porosity < 1 - assert cost([1.4]) == np.inf # Definitely not viable - assert cost([-0.1]) == np.inf # Should not be a viable design + assert cost([0.4]) >= 0 # Should be a viable design + assert cost([0.8]) == -np.inf # Should exceed active material + porosity < 1 + assert cost([1.4]) == -np.inf # Definitely not viable + assert cost([-0.1]) == -np.inf # Should not be a viable design # Test infeasible locations cost.problem._model.allow_infeasible_solutions = False - assert cost([1.1]) == np.inf + assert cost([1.1]) == -np.inf # Test exception for non-numeric inputs with pytest.raises(ValueError): diff --git a/tests/unit/test_optimisation.py b/tests/unit/test_optimisation.py index 3bf1c6d4b..949fe7ef1 100644 --- a/tests/unit/test_optimisation.py +++ b/tests/unit/test_optimisation.py @@ -67,7 +67,7 @@ def two_param_cost(self, model, two_parameters, dataset): return pybop.SumSquaredError(problem) @pytest.mark.parametrize( - "optimiser_class, expected_name", + "optimiser, expected_name", [ (pybop.SciPyMinimize, "SciPyMinimize"), (pybop.SciPyDifferentialEvolution, "SciPyDifferentialEvolution"), @@ -82,33 +82,148 @@ def two_param_cost(self, model, two_parameters, dataset): ], ) @pytest.mark.unit - def test_optimiser_classes(self, two_param_cost, optimiser_class, expected_name): + def test_optimiser_classes(self, two_param_cost, optimiser, expected_name): # Test class construction cost = two_param_cost - opt = pybop.Optimisation(cost=cost, optimiser=optimiser_class) - - assert opt.optimiser is not None - assert opt.optimiser.name() == expected_name - - # Test without bounds - cost.bounds = None - if optimiser_class in [pybop.SciPyMinimize]: - opt = pybop.Optimisation(cost=cost, optimiser=optimiser_class) - assert opt.optimiser.bounds is None - elif optimiser_class in [pybop.SciPyDifferentialEvolution]: - with pytest.raises(ValueError): - pybop.Optimisation(cost=cost, optimiser=optimiser_class) + optim = optimiser(cost=cost) + + assert optim.cost is not None + assert optim.name() == expected_name + + # Test pybop.Optimisation construction + optim = pybop.Optimisation(cost=cost, optimiser=optimiser) + + assert optim.cost is not None + assert optim.name() == expected_name + + if optimiser not in [pybop.SciPyDifferentialEvolution]: + # Test construction without bounds + optim = optimiser(cost=cost, bounds=None) + assert optim.bounds is None + if issubclass(optimiser, pybop.BasePintsOptimiser): + assert optim._boundaries is None + + @pytest.mark.parametrize( + "optimiser", + [ + pybop.SciPyMinimize, + pybop.SciPyDifferentialEvolution, + pybop.GradientDescent, + pybop.Adam, + pybop.SNES, + pybop.XNES, + pybop.PSO, + pybop.IRPropMin, + pybop.NelderMead, + ], + ) + @pytest.mark.unit + def test_optimiser_kwargs(self, cost, optimiser): + optim = optimiser(cost=cost, maxiter=1) + + # Check maximum iterations + optim.run() + assert optim._iterations == 1 + + if optimiser in [pybop.GradientDescent, pybop.Adam, pybop.NelderMead]: + # Ignored bounds + optim = optimiser(cost=cost, bounds=cost.bounds) + assert optim.bounds is None + elif optimiser in [pybop.PSO]: + assert optim.bounds == cost.bounds + # Cannot accept infinite bounds + bounds = {"upper": [np.inf], "lower": [0.57]} + with pytest.raises( + ValueError, + match="Either all bounds or no bounds must be set", + ): + optim = optimiser(cost=cost, bounds=bounds) else: - opt = pybop.Optimisation(cost=cost, optimiser=optimiser_class) - assert opt.optimiser.boundaries is None + # Check and update bounds + assert optim.bounds == cost.bounds + bounds = {"upper": [0.63], "lower": [0.57]} + optim = optimiser(cost=cost, bounds=bounds) + assert optim.bounds == bounds - # Test setting population size - if optimiser_class in [pybop.SciPyDifferentialEvolution]: - with pytest.raises(ValueError): - opt.optimiser.set_population_size(-5) + if issubclass(optimiser, pybop.BasePintsOptimiser): + optim = optimiser( + cost=cost, + use_f_guessed=True, + parallel=False, + min_iterations=3, + max_unchanged_iterations=5, + threshold=1e-2, + max_evaluations=20, + ) + with pytest.raises( + ValueError, + match="Unrecognised keyword arguments", + ): + optim = optimiser(cost=cost, tol=1e-3) + else: + # Check bounds in list format and update tol + bounds = [ + (lower, upper) for lower, upper in zip(bounds["lower"], bounds["upper"]) + ] + optim = optimiser(cost=cost, bounds=bounds, tol=1e-2) + assert optim.bounds == bounds + + if optimiser in [ + pybop.SciPyMinimize, + pybop.SciPyDifferentialEvolution, + pybop.XNES, + ]: + # Pass nested options + optim = optimiser(cost=cost, options=dict(maxiter=10)) + with pytest.raises( + Exception, + match="A duplicate maxiter option was found in the options dictionary.", + ): + optimiser(cost=cost, maxiter=5, options=dict(maxiter=10)) + + # Pass similar keywords + with pytest.raises( + Exception, + match="option was passed in addition to the expected", + ): + optimiser(cost=cost, maxiter=5, max_iterations=10) + + if optimiser in [pybop.SciPyDifferentialEvolution]: + # Update population size + optimiser(cost=cost, popsize=5) + + # Test invalid bounds + with pytest.raises( + ValueError, match="Bounds must be specified for differential_evolution." + ): + optimiser(cost=cost, bounds=None) + with pytest.raises( + ValueError, match="Bounds must be specified for differential_evolution." + ): + optimiser(cost=cost, bounds=[(0, np.inf)]) + with pytest.raises( + ValueError, match="Bounds must be specified for differential_evolution." + ): + optimiser(cost=cost, bounds={"upper": [np.inf], "lower": [0.57]}) + + else: + # Check and update initial values + assert optim.x0 == cost.x0 + x0_new = np.array([0.6]) + optim = optimiser(cost=cost, x0=x0_new) + assert optim.x0 == x0_new + assert optim.x0 != cost.x0 - # Correct value - opt.optimiser.set_population_size(5) + if optimiser in [pybop.SciPyMinimize]: + # Check a method that uses gradient information + optimiser(cost=cost, method="L-BFGS-B", jac=True, maxiter=10) + optim.run() + assert optim._iterations > 0 + with pytest.raises( + ValueError, + match="Expected the jac option to be either True, False or None.", + ): + optim = optimiser(cost=cost, jac="Invalid string") @pytest.mark.unit def test_single_parameter(self, cost): @@ -117,18 +232,52 @@ def test_single_parameter(self, cost): ValueError, match=r"requires optimisation of >= 2 parameters at once.", ): - pybop.Optimisation(cost=cost, optimiser=pybop.CMAES) + pybop.CMAES(cost=cost) + + @pytest.mark.unit + def test_invalid_cost(self): + # Test without valid cost + with pytest.raises( + Exception, + match="The cost is not a recognised cost object or function.", + ): + pybop.Optimisation(cost="Invalid string") + + def invalid_cost(x): + return [1, 2] + + with pytest.raises( + Exception, + match="not a scalar numeric value.", + ): + pybop.Optimisation(cost=invalid_cost) @pytest.mark.unit def test_default_optimiser(self, cost): - opt = pybop.Optimisation(cost=cost) - assert opt.optimiser.name() == "Exponential Natural Evolution Strategy (xNES)" + optim = pybop.Optimisation(cost=cost) + assert optim.name() == "Exponential Natural Evolution Strategy (xNES)" + + # Test incorrect setting attribute + with pytest.raises( + AttributeError, + match="'Optimisation' object has no attribute 'not_a_valid_attribute'", + ): + optim.not_a_valid_attribute @pytest.mark.unit def test_incorrect_optimiser_class(self, cost): class RandomClass: pass + with pytest.raises( + ValueError, + match="The pints_optimiser is not a recognised PINTS optimiser class.", + ): + pybop.BasePintsOptimiser(cost=cost, pints_optimiser=RandomClass) + + with pytest.raises(NotImplementedError): + pybop.BaseOptimiser(cost=cost) + with pytest.raises(ValueError): pybop.Optimisation(cost=cost, optimiser=RandomClass) @@ -136,9 +285,9 @@ class RandomClass: def test_prior_sampling(self, cost): # Tests prior sampling for i in range(50): - opt = pybop.Optimisation(cost=cost) + optim = pybop.Optimisation(cost=cost) - assert opt.x0 <= 0.62 and opt.x0 >= 0.58 + assert optim.x0 <= 0.62 and optim.x0 >= 0.58 @pytest.mark.unit @pytest.mark.parametrize( @@ -163,10 +312,11 @@ def test_scipy_prior_resampling( cost = pybop.SumSquaredError(problem) # Create the optimisation class with infeasible solutions disabled - opt = pybop.Optimisation( - cost=cost, optimiser=pybop.SciPyMinimize, allow_infeasible_solutions=False + opt = pybop.SciPyMinimize( + cost=cost, + allow_infeasible_solutions=False, + max_iterations=1, ) - opt.set_max_iterations(1) # If small sigma, expect a ValueError due inability to resample a non np.inf cost if expect_exception: @@ -181,23 +331,24 @@ def test_scipy_prior_resampling( @pytest.mark.unit def test_halting(self, cost): # Test max evalutions - optim = pybop.Optimisation(cost=cost, optimiser=pybop.GradientDescent) - optim.set_max_evaluations(1) + optim = pybop.GradientDescent(cost=cost, max_evaluations=1, verbose=True) x, __ = optim.run() assert optim._iterations == 1 # Test max unchanged iterations - optim = pybop.Optimisation(cost=cost, optimiser=pybop.GradientDescent) - optim.set_max_unchanged_iterations(1) - optim.set_min_iterations(1) + optim = pybop.GradientDescent( + cost=cost, max_unchanged_iterations=1, min_iterations=1 + ) x, __ = optim.run() assert optim._iterations == 2 # Test guessed values optim.set_f_guessed_tracking(True) - assert optim._use_f_guessed is True + assert optim.f_guessed_tracking() is True # Test invalid values + with pytest.raises(ValueError): + optim.set_max_iterations(-1) with pytest.raises(ValueError): optim.set_max_evaluations(-1) with pytest.raises(ValueError): @@ -207,14 +358,45 @@ def test_halting(self, cost): with pytest.raises(ValueError): optim.set_max_unchanged_iterations(1, threshold=-1) + optim = pybop.Optimisation(cost=cost) + + # Trigger threshold + optim._threshold = np.inf + optim.run() + optim.set_max_unchanged_iterations() + + # Test callback and halting output + def callback_error(iteration, s): + raise Exception("Callback error message") + + optim._callback = callback_error + with pytest.raises(Exception, match="Callback error message"): + optim.run() + optim._callback = None + + # Trigger optimiser error + def optimiser_error(): + return "Optimiser error message" + + optim.pints_optimiser.stop = optimiser_error + optim.run() + assert optim._iterations == 1 + + # Test no stopping condition + with pytest.raises( + ValueError, match="At least one stopping criterion must be set." + ): + optim._max_iterations = None + optim._unchanged_max_iterations = None + optim._max_evaluations = None + optim._threshold = None + optim.run() + @pytest.mark.unit def test_infeasible_solutions(self, cost): # Test infeasible solutions for optimiser in [pybop.SciPyMinimize, pybop.GradientDescent]: - optim = pybop.Optimisation( - cost=cost, optimiser=optimiser, allow_infeasible_solutions=False - ) - optim.set_max_iterations(1) + optim = optimiser(cost=cost, allow_infeasible_solutions=False, maxiter=1) optim.run() assert optim._iterations == 1 diff --git a/tests/unit/test_plots.py b/tests/unit/test_plots.py index 6d0fadb34..f5998c0af 100644 --- a/tests/unit/test_plots.py +++ b/tests/unit/test_plots.py @@ -108,7 +108,7 @@ def test_cost_plots(self, cost): @pytest.fixture def optim(self, cost): # Define and run an example optimisation - optim = pybop.Optimisation(cost, optimiser=pybop.CMAES) + optim = pybop.Optimisation(cost) optim.run() return optim @@ -116,6 +116,8 @@ def optim(self, cost): def test_optim_plots(self, optim): # Plot convergence pybop.plot_convergence(optim) + optim._minimising = False + pybop.plot_convergence(optim) # Plot the parameter traces pybop.plot_parameters(optim) diff --git a/tests/unit/test_standalone.py b/tests/unit/test_standalone.py index e9054f5b4..d524555a5 100644 --- a/tests/unit/test_standalone.py +++ b/tests/unit/test_standalone.py @@ -3,6 +3,7 @@ import pybop from examples.standalone.cost import StandaloneCost +from examples.standalone.optimiser import StandaloneOptimiser from examples.standalone.problem import StandaloneProblem @@ -12,15 +13,31 @@ class TestStandalone: """ @pytest.mark.unit - def test_standalone(self): + def test_standalone_optimiser(self): + optim = StandaloneOptimiser() + assert optim.name() == "StandaloneOptimiser" + + x, final_cost = optim.run() + assert optim.cost(optim.x0) > final_cost + np.testing.assert_allclose(x, [2, 4], atol=1e-2) + + # Test with bounds + optim = StandaloneOptimiser(bounds=dict(upper=[5, 6], lower=[1, 2])) + + x, final_cost = optim.run() + assert optim.cost(optim.x0) > final_cost + np.testing.assert_allclose(x, [2, 4], atol=1e-2) + + @pytest.mark.unit + def test_optimisation_on_standalone_cost(self): # Build an Optimisation problem with a StandaloneCost cost = StandaloneCost() - opt = pybop.Optimisation(cost=cost, optimiser=pybop.SciPyDifferentialEvolution) - x, final_cost = opt.run() + optim = pybop.SciPyDifferentialEvolution(cost=cost) + x, final_cost = optim.run() - assert len(opt.x0) == opt._n_parameters - np.testing.assert_allclose(x, 0, atol=1e-2) - np.testing.assert_allclose(final_cost, 42, atol=1e-2) + initial_cost = optim.cost(cost.x0) + assert initial_cost > final_cost + np.testing.assert_allclose(final_cost, 42, atol=1e-1) @pytest.mark.unit def test_standalone_problem(self):