From 4c4a31ef777f26a72231faf41b9428ac9ff16692 Mon Sep 17 00:00:00 2001 From: NicolaCourtier <45851982+NicolaCourtier@users.noreply.github.com> Date: Thu, 4 Jul 2024 15:19:03 +0100 Subject: [PATCH] Pass Inputs dictionary (#359) * Add optimiser.parameters, remove problem.x0 * Update integration tests * Pass inputs instead of x * Specify inputs as Inputs * Update notebooks * Add initial and true options to as_dict * Update parameter_values to inputs * Update notebooks * Add parameters tests * Add quick_plot test * Add test_no_optimisation_parameters * Add test_error_in_cost_calculation * Add parameters.verify * Fix change to base_model * Add more base_model tests * fix: restore ValueError on incorrect parameter __getitem__ * Apply suggestions from code review Co-authored-by: Brady Planden <55357039+BradyPlanden@users.noreply.github.com> * style: pre-commit fixes * Import Union and Inputs * Move Inputs definition to parameter.py * Retrieve x0 from SciPyDE * Add test for evaluate(List) * Refactor as suggested Co-authored-by: Brady Planden <55357039+BradyPlanden@users.noreply.github.com> * style: pre-commit fixes * Remove duplicate line * Change quick_plot inputs to problem_inputs * Remove duplicate lines --------- Co-authored-by: Brady Planden <55357039+BradyPlanden@users.noreply.github.com> Co-authored-by: Brady Planden Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .../1-single-pulse-circuit-model.ipynb | 6 +- .../equivalent_circuit_identification.ipynb | 4 +- .../multi_model_identification.ipynb | 2 +- .../multi_optimiser_identification.ipynb | 2 +- .../notebooks/optimiser_calibration.ipynb | 4 +- .../notebooks/pouch_cell_identification.ipynb | 4 +- examples/notebooks/spm_AdamW.ipynb | 2 +- examples/notebooks/spm_electrode_design.ipynb | 4 +- examples/scripts/BPX_spm.py | 2 +- examples/scripts/ecm_CMAES.py | 2 +- examples/scripts/exp_UKF.py | 9 +- examples/scripts/gitt.py | 2 +- examples/scripts/spm_AdamW.py | 2 +- examples/scripts/spm_CMAES.py | 2 +- examples/scripts/spm_IRPropMin.py | 2 +- examples/scripts/spm_MAP.py | 2 +- examples/scripts/spm_MLE.py | 2 +- examples/scripts/spm_NelderMead.py | 2 +- examples/scripts/spm_SNES.py | 2 +- examples/scripts/spm_UKF.py | 2 +- examples/scripts/spm_XNES.py | 2 +- examples/scripts/spm_descent.py | 2 +- examples/scripts/spm_pso.py | 2 +- examples/scripts/spm_scipymin.py | 2 +- examples/scripts/spme_max_energy.py | 6 +- examples/standalone/cost.py | 9 +- examples/standalone/problem.py | 21 +++-- pybop/costs/_likelihoods.py | 34 +++---- pybop/costs/base_cost.py | 23 ++--- pybop/costs/design_costs.py | 43 ++++----- pybop/costs/fitting_costs.py | 50 +++++----- pybop/models/base_model.py | 64 +++++-------- pybop/models/empirical/base_ecm.py | 6 +- pybop/models/empirical/ecm.py | 5 +- pybop/models/lithium_ion/base_echem.py | 17 ++-- pybop/observers/observer.py | 24 ++--- pybop/observers/unscented_kalman.py | 4 +- pybop/optimisers/base_optimiser.py | 43 ++++++--- pybop/optimisers/pints_optimisers.py | 2 +- pybop/optimisers/scipy_optimisers.py | 4 +- pybop/parameters/parameter.py | 91 +++++++++++++++++-- pybop/plotting/plot_problem.py | 13 ++- pybop/problems/base_problem.py | 16 ++-- pybop/problems/design_problem.py | 15 +-- pybop/problems/fitting_problem.py | 39 +++++--- .../test_model_experiment_changes.py | 4 +- .../integration/test_optimisation_options.py | 2 +- .../integration/test_spm_parameterisations.py | 8 +- .../test_thevenin_parameterisation.py | 4 +- tests/unit/test_cost.py | 25 ++++- tests/unit/test_likelihoods.py | 3 +- tests/unit/test_models.py | 17 +++- tests/unit/test_observer_unscented_kalman.py | 18 ++-- tests/unit/test_observers.py | 24 ++--- tests/unit/test_optimisation.py | 21 +++-- tests/unit/test_parameters.py | 39 ++++++++ tests/unit/test_plots.py | 3 + tests/unit/test_standalone.py | 3 +- 58 files changed, 465 insertions(+), 302 deletions(-) diff --git a/examples/notebooks/LG_M50_ECM/1-single-pulse-circuit-model.ipynb b/examples/notebooks/LG_M50_ECM/1-single-pulse-circuit-model.ipynb index 365eb6e1d..9fd084dd6 100644 --- a/examples/notebooks/LG_M50_ECM/1-single-pulse-circuit-model.ipynb +++ b/examples/notebooks/LG_M50_ECM/1-single-pulse-circuit-model.ipynb @@ -1641,7 +1641,7 @@ "source": [ "optim = pybop.PSO(cost, max_unchanged_iterations=55, threshold=1e-6)\n", "x, final_cost = optim.run()\n", - "print(\"Initial parameters:\", cost.x0)\n", + "print(\"Initial parameters:\", optim.x0)\n", "print(\"Estimated parameters:\", x)" ] }, @@ -1679,7 +1679,7 @@ } ], "source": [ - "pybop.quick_plot(problem, parameter_values=x, title=\"Optimised Comparison\");" + "pybop.quick_plot(problem, problem_inputs=x, title=\"Optimised Comparison\");" ] }, { @@ -1850,7 +1850,7 @@ } ], "source": [ - "pybop.quick_plot(problem, parameter_values=x, title=\"Parameter Extrapolation\");" + "pybop.quick_plot(problem, problem_inputs=x, title=\"Parameter Extrapolation\");" ] }, { diff --git a/examples/notebooks/equivalent_circuit_identification.ipynb b/examples/notebooks/equivalent_circuit_identification.ipynb index 8a13a199e..6184c191a 100644 --- a/examples/notebooks/equivalent_circuit_identification.ipynb +++ b/examples/notebooks/equivalent_circuit_identification.ipynb @@ -419,7 +419,7 @@ "source": [ "optim = pybop.CMAES(cost, max_iterations=300)\n", "x, final_cost = optim.run()\n", - "print(\"Initial parameters:\", cost.x0)\n", + "print(\"Initial parameters:\", optim.x0)\n", "print(\"Estimated parameters:\", x)" ] }, @@ -457,7 +457,7 @@ } ], "source": [ - "pybop.quick_plot(problem, parameter_values=x, title=\"Optimised Comparison\");" + "pybop.quick_plot(problem, problem_inputs=x, title=\"Optimised Comparison\");" ] }, { diff --git a/examples/notebooks/multi_model_identification.ipynb b/examples/notebooks/multi_model_identification.ipynb index e7c6b158c..a66a78f2b 100644 --- a/examples/notebooks/multi_model_identification.ipynb +++ b/examples/notebooks/multi_model_identification.ipynb @@ -3905,7 +3905,7 @@ "source": [ "for optim, x in zip(optims, xs):\n", " pybop.quick_plot(\n", - " optim.cost.problem, parameter_values=x, title=optim.cost.problem.model.name\n", + " optim.cost.problem, problem_inputs=x, title=optim.cost.problem.model.name\n", " )" ] }, diff --git a/examples/notebooks/multi_optimiser_identification.ipynb b/examples/notebooks/multi_optimiser_identification.ipynb index 8b2a83500..1422985da 100644 --- a/examples/notebooks/multi_optimiser_identification.ipynb +++ b/examples/notebooks/multi_optimiser_identification.ipynb @@ -599,7 +599,7 @@ ], "source": [ "for optim, x in zip(optims, xs):\n", - " pybop.quick_plot(optim.cost.problem, parameter_values=x, title=optim.name())" + " pybop.quick_plot(optim.cost.problem, problem_inputs=x, title=optim.name())" ] }, { diff --git a/examples/notebooks/optimiser_calibration.ipynb b/examples/notebooks/optimiser_calibration.ipynb index accfbf259..ec4c1551a 100644 --- a/examples/notebooks/optimiser_calibration.ipynb +++ b/examples/notebooks/optimiser_calibration.ipynb @@ -404,7 +404,7 @@ } ], "source": [ - "pybop.quick_plot(problem, parameter_values=x, title=\"Optimised Comparison\");" + "pybop.quick_plot(problem, problem_inputs=x, title=\"Optimised Comparison\");" ] }, { @@ -723,7 +723,7 @@ "source": [ "optim = pybop.GradientDescent(cost, sigma0=0.0115)\n", "x, final_cost = optim.run()\n", - "pybop.quick_plot(problem, parameter_values=x, title=\"Optimised Comparison\");" + "pybop.quick_plot(problem, problem_inputs=x, title=\"Optimised Comparison\");" ] }, { diff --git a/examples/notebooks/pouch_cell_identification.ipynb b/examples/notebooks/pouch_cell_identification.ipynb index c24300ead..d952e22c7 100644 --- a/examples/notebooks/pouch_cell_identification.ipynb +++ b/examples/notebooks/pouch_cell_identification.ipynb @@ -517,7 +517,7 @@ } ], "source": [ - "pybop.quick_plot(problem, parameter_values=x, title=\"Optimised Comparison\");" + "pybop.quick_plot(problem, problem_inputs=x, title=\"Optimised Comparison\");" ] }, { @@ -1539,7 +1539,7 @@ } ], "source": [ - "sol = problem.evaluate(x)\n", + "sol = problem.evaluate(parameters.as_dict(x))\n", "\n", "go.Figure(\n", " [\n", diff --git a/examples/notebooks/spm_AdamW.ipynb b/examples/notebooks/spm_AdamW.ipynb index 6b2330907..ec9a961a5 100644 --- a/examples/notebooks/spm_AdamW.ipynb +++ b/examples/notebooks/spm_AdamW.ipynb @@ -437,7 +437,7 @@ } ], "source": [ - "pybop.quick_plot(problem, parameter_values=x, title=\"Optimised Comparison\");" + "pybop.quick_plot(problem, problem_inputs=x, title=\"Optimised Comparison\");" ] }, { diff --git a/examples/notebooks/spm_electrode_design.ipynb b/examples/notebooks/spm_electrode_design.ipynb index 950cee323..e1fd58204 100644 --- a/examples/notebooks/spm_electrode_design.ipynb +++ b/examples/notebooks/spm_electrode_design.ipynb @@ -277,7 +277,7 @@ "source": [ "x, final_cost = optim.run()\n", "print(\"Estimated parameters:\", x)\n", - "print(f\"Initial gravimetric energy density: {-cost(cost.x0):.2f} Wh.kg-1\")\n", + "print(f\"Initial gravimetric energy density: {-cost(optim.x0):.2f} Wh.kg-1\")\n", "print(f\"Optimised gravimetric energy density: {-final_cost:.2f} Wh.kg-1\")" ] }, @@ -329,7 +329,7 @@ "source": [ "if cost.update_capacity:\n", " problem._model.approximate_capacity(x)\n", - "pybop.quick_plot(problem, parameter_values=x, title=\"Optimised Comparison\");" + "pybop.quick_plot(problem, problem_inputs=x, title=\"Optimised Comparison\");" ] }, { diff --git a/examples/scripts/BPX_spm.py b/examples/scripts/BPX_spm.py index 6fdb76490..eea658846 100644 --- a/examples/scripts/BPX_spm.py +++ b/examples/scripts/BPX_spm.py @@ -51,7 +51,7 @@ print("Estimated parameters:", x) # Plot the timeseries output -pybop.quick_plot(problem, parameter_values=x, title="Optimised Comparison") +pybop.quick_plot(problem, problem_inputs=x, title="Optimised Comparison") # Plot convergence pybop.plot_convergence(optim) diff --git a/examples/scripts/ecm_CMAES.py b/examples/scripts/ecm_CMAES.py index 96a36ec44..953d7e6aa 100644 --- a/examples/scripts/ecm_CMAES.py +++ b/examples/scripts/ecm_CMAES.py @@ -89,7 +89,7 @@ pybop.plot_dataset(dataset) # Plot the timeseries output -pybop.quick_plot(problem, parameter_values=x, title="Optimised Comparison") +pybop.quick_plot(problem, problem_inputs=x, title="Optimised Comparison") # Plot convergence pybop.plot_convergence(optim) diff --git a/examples/scripts/exp_UKF.py b/examples/scripts/exp_UKF.py index ff7a7045d..622a68e50 100644 --- a/examples/scripts/exp_UKF.py +++ b/examples/scripts/exp_UKF.py @@ -27,7 +27,8 @@ sigma = 1e-2 t_eval = np.linspace(0, 20, 10) model.parameters = parameters -values = model.predict(t_eval=t_eval, inputs=parameters.true_value()) +true_inputs = parameters.as_dict("true") +values = model.predict(t_eval=t_eval, inputs=true_inputs) values = values["2y"].data corrupt_values = values + np.random.normal(0, sigma, len(t_eval)) @@ -40,7 +41,7 @@ model.build(parameters=parameters) simulator = pybop.Observer(parameters, model, signal=["2y"]) simulator._time_data = t_eval -measurements = simulator.evaluate(parameters.true_value()) +measurements = simulator.evaluate(true_inputs) # Verification step: Compare by plotting go = pybop.PlotlyManager().go @@ -83,7 +84,7 @@ ) # Verification step: Find the maximum likelihood estimate given the true parameters -estimation = observer.evaluate(parameters.true_value()) +estimation = observer.evaluate(true_inputs) # Verification step: Add the estimate to the plot line4 = go.Scatter( @@ -101,7 +102,7 @@ print("Estimated parameters:", x) # Plot the timeseries output (requires model that returns Voltage) -pybop.quick_plot(observer, parameter_values=x, title="Optimised Comparison") +pybop.quick_plot(observer, problem_inputs=x, title="Optimised Comparison") # Plot convergence pybop.plot_convergence(optim) diff --git a/examples/scripts/gitt.py b/examples/scripts/gitt.py index 52517fdb3..6d3b4a94b 100644 --- a/examples/scripts/gitt.py +++ b/examples/scripts/gitt.py @@ -59,7 +59,7 @@ print("Estimated parameters:", x) # Plot the timeseries output -pybop.quick_plot(problem, parameter_values=x, title="Optimised Comparison") +pybop.quick_plot(problem, problem_inputs=x, title="Optimised Comparison") # Plot convergence pybop.plot_convergence(optim) diff --git a/examples/scripts/spm_AdamW.py b/examples/scripts/spm_AdamW.py index 44bbf8b11..796849bee 100644 --- a/examples/scripts/spm_AdamW.py +++ b/examples/scripts/spm_AdamW.py @@ -68,7 +68,7 @@ def noise(sigma): print("Estimated parameters:", x) # Plot the timeseries output -pybop.quick_plot(problem, parameter_values=x, title="Optimised Comparison") +pybop.quick_plot(problem, problem_inputs=x, title="Optimised Comparison") # Plot convergence pybop.plot_convergence(optim) diff --git a/examples/scripts/spm_CMAES.py b/examples/scripts/spm_CMAES.py index 1fc051cc2..ed38144a9 100644 --- a/examples/scripts/spm_CMAES.py +++ b/examples/scripts/spm_CMAES.py @@ -53,7 +53,7 @@ pybop.plot_dataset(dataset) # Plot the timeseries output -pybop.quick_plot(problem, parameter_values=x, title="Optimised Comparison") +pybop.quick_plot(problem, problem_inputs=x, title="Optimised Comparison") # Plot convergence pybop.plot_convergence(optim) diff --git a/examples/scripts/spm_IRPropMin.py b/examples/scripts/spm_IRPropMin.py index 727536ff8..1969f6f9d 100644 --- a/examples/scripts/spm_IRPropMin.py +++ b/examples/scripts/spm_IRPropMin.py @@ -42,7 +42,7 @@ print("Estimated parameters:", x) # Plot the timeseries output -pybop.quick_plot(problem, parameter_values=x, title="Optimised Comparison") +pybop.quick_plot(problem, problem_inputs=x, title="Optimised Comparison") # Plot convergence pybop.plot_convergence(optim) diff --git a/examples/scripts/spm_MAP.py b/examples/scripts/spm_MAP.py index d8460915c..dc135fdc8 100644 --- a/examples/scripts/spm_MAP.py +++ b/examples/scripts/spm_MAP.py @@ -57,7 +57,7 @@ print("Estimated parameters:", x) # Plot the timeseries output -pybop.quick_plot(problem, parameter_values=x[0:2], title="Optimised Comparison") +pybop.quick_plot(problem, problem_inputs=x[0:2], title="Optimised Comparison") # Plot convergence pybop.plot_convergence(optim) diff --git a/examples/scripts/spm_MLE.py b/examples/scripts/spm_MLE.py index 6fc0238ca..d5d6e641d 100644 --- a/examples/scripts/spm_MLE.py +++ b/examples/scripts/spm_MLE.py @@ -57,7 +57,7 @@ print("Estimated parameters:", x) # Plot the timeseries output -pybop.quick_plot(problem, parameter_values=x[0:2], title="Optimised Comparison") +pybop.quick_plot(problem, problem_inputs=x[0:2], title="Optimised Comparison") # Plot convergence pybop.plot_convergence(optim) diff --git a/examples/scripts/spm_NelderMead.py b/examples/scripts/spm_NelderMead.py index 569dbadf2..e07801e04 100644 --- a/examples/scripts/spm_NelderMead.py +++ b/examples/scripts/spm_NelderMead.py @@ -68,7 +68,7 @@ def noise(sigma): print("Estimated parameters:", x) # Plot the timeseries output -pybop.quick_plot(problem, parameter_values=x, title="Optimised Comparison") +pybop.quick_plot(problem, problem_inputs=x, title="Optimised Comparison") # Plot convergence pybop.plot_convergence(optim) diff --git a/examples/scripts/spm_SNES.py b/examples/scripts/spm_SNES.py index d2afcc85a..93046d63a 100644 --- a/examples/scripts/spm_SNES.py +++ b/examples/scripts/spm_SNES.py @@ -42,7 +42,7 @@ print("Estimated parameters:", x) # Plot the timeseries output -pybop.quick_plot(problem, parameter_values=x, title="Optimised Comparison") +pybop.quick_plot(problem, problem_inputs=x, title="Optimised Comparison") # Plot convergence pybop.plot_convergence(optim) diff --git a/examples/scripts/spm_UKF.py b/examples/scripts/spm_UKF.py index e9972bd0f..e528c715e 100644 --- a/examples/scripts/spm_UKF.py +++ b/examples/scripts/spm_UKF.py @@ -68,7 +68,7 @@ print("Estimated parameters:", x) # Plot the timeseries output (requires model that returns Voltage) -pybop.quick_plot(observer, parameter_values=x, title="Optimised Comparison") +pybop.quick_plot(observer, problem_inputs=x, title="Optimised Comparison") # # Plot convergence # pybop.plot_convergence(optim) diff --git a/examples/scripts/spm_XNES.py b/examples/scripts/spm_XNES.py index 59b6eca87..40900640f 100644 --- a/examples/scripts/spm_XNES.py +++ b/examples/scripts/spm_XNES.py @@ -43,7 +43,7 @@ print("Estimated parameters:", x) # Plot the timeseries output -pybop.quick_plot(problem, parameter_values=x, title="Optimised Comparison") +pybop.quick_plot(problem, problem_inputs=x, title="Optimised Comparison") # Plot convergence pybop.plot_convergence(optim) diff --git a/examples/scripts/spm_descent.py b/examples/scripts/spm_descent.py index 7c7629b04..94573f0c0 100644 --- a/examples/scripts/spm_descent.py +++ b/examples/scripts/spm_descent.py @@ -48,7 +48,7 @@ print("Estimated parameters:", x) # Plot the timeseries output -pybop.quick_plot(problem, parameter_values=x, title="Optimised Comparison") +pybop.quick_plot(problem, problem_inputs=x, title="Optimised Comparison") # Plot convergence pybop.plot_convergence(optim) diff --git a/examples/scripts/spm_pso.py b/examples/scripts/spm_pso.py index acb3e1c6e..efc97ad2a 100644 --- a/examples/scripts/spm_pso.py +++ b/examples/scripts/spm_pso.py @@ -43,7 +43,7 @@ print("Estimated parameters:", x) # Plot the timeseries output -pybop.quick_plot(problem, parameter_values=x, title="Optimised Comparison") +pybop.quick_plot(problem, problem_inputs=x, title="Optimised Comparison") # Plot convergence pybop.plot_convergence(optim) diff --git a/examples/scripts/spm_scipymin.py b/examples/scripts/spm_scipymin.py index 8c7b80c5a..b6cec3f08 100644 --- a/examples/scripts/spm_scipymin.py +++ b/examples/scripts/spm_scipymin.py @@ -45,7 +45,7 @@ print("Estimated parameters:", x) # Plot the timeseries output -pybop.quick_plot(problem, parameter_values=x, title="Optimised Comparison") +pybop.quick_plot(problem, problem_inputs=x, title="Optimised Comparison") # Plot convergence pybop.plot_convergence(optim) diff --git a/examples/scripts/spme_max_energy.py b/examples/scripts/spme_max_energy.py index 81b2b231d..f5b7c827c 100644 --- a/examples/scripts/spme_max_energy.py +++ b/examples/scripts/spme_max_energy.py @@ -12,7 +12,7 @@ # NOTE: This script can be easily adjusted to consider the volumetric # (instead of gravimetric) energy density by changing the line which # defines the cost and changing the output to: -# print(f"Initial volumetric energy density: {cost(cost.x0):.2f} Wh.m-3") +# print(f"Initial volumetric energy density: {cost(optim.x0):.2f} Wh.m-3") # print(f"Optimised volumetric energy density: {final_cost:.2f} Wh.m-3") # Define parameter set and model @@ -54,13 +54,13 @@ # Run optimisation x, final_cost = optim.run() print("Estimated parameters:", x) -print(f"Initial gravimetric energy density: {cost(cost.x0):.2f} Wh.kg-1") +print(f"Initial gravimetric energy density: {cost(optim.x0):.2f} Wh.kg-1") print(f"Optimised gravimetric energy density: {final_cost:.2f} Wh.kg-1") # Plot the timeseries output if cost.update_capacity: problem._model.approximate_capacity(x) -pybop.quick_plot(problem, parameter_values=x, title="Optimised Comparison") +pybop.quick_plot(problem, problem_inputs=x, title="Optimised Comparison") # Plot the cost landscape with optimisation path if len(x) == 2: diff --git a/examples/standalone/cost.py b/examples/standalone/cost.py index 806bc0eab..99917f3fd 100644 --- a/examples/standalone/cost.py +++ b/examples/standalone/cost.py @@ -43,7 +43,7 @@ def __init__(self, problem=None): ) self.x0 = self.parameters.initial_value() - def _evaluate(self, x, grad=None): + def _evaluate(self, inputs, grad=None): """ Calculate the cost for a given parameter value. @@ -52,9 +52,8 @@ def _evaluate(self, x, grad=None): Parameters ---------- - x : array-like - A one-element array containing the parameter value for which to - evaluate the cost. + inputs : Dict + The parameters for which to evaluate the cost. grad : array-like, optional Unused parameter, present for compatibility with gradient-based optimizers. @@ -65,4 +64,4 @@ def _evaluate(self, x, grad=None): The calculated cost value for the given parameter. """ - return x[0] ** 2 + 42 + return inputs["x"] ** 2 + 42 diff --git a/examples/standalone/problem.py b/examples/standalone/problem.py index d6d1f4b01..d76f9dca5 100644 --- a/examples/standalone/problem.py +++ b/examples/standalone/problem.py @@ -42,31 +42,34 @@ def __init__( ) self._target = {signal: self._dataset[signal] for signal in self.signal} - def evaluate(self, x): + def evaluate(self, inputs): """ Evaluate the model with the given parameters and return the signal. Parameters ---------- - x : np.ndarray - Parameter values to evaluate the model at. + inputs : Dict + Parameters for evaluation of the model. Returns ------- y : np.ndarray - The model output y(t) simulated with inputs x. + The model output y(t) simulated with given inputs. """ - return {signal: x[0] * self._time_data + x[1] for signal in self.signal} + return { + signal: inputs["Gradient"] * self._time_data + inputs["Intercept"] + for signal in self.signal + } - def evaluateS1(self, x): + def evaluateS1(self, inputs): """ Evaluate the model with the given parameters and return the signal and its derivatives. Parameters ---------- - x : np.ndarray - Parameter values to evaluate the model at. + inputs : Dict + Parameters for evaluation of the model. Returns ------- @@ -75,7 +78,7 @@ def evaluateS1(self, x): with given inputs x. """ - y = {signal: x[0] * self._time_data + x[1] for signal in self.signal} + y = self.evaluate(inputs) dy = np.zeros((self.n_time_data, self.n_outputs, self.n_parameters)) dy[:, 0, 0] = self._time_data diff --git a/pybop/costs/_likelihoods.py b/pybop/costs/_likelihoods.py index cce09f9bb..9406572bc 100644 --- a/pybop/costs/_likelihoods.py +++ b/pybop/costs/_likelihoods.py @@ -1,6 +1,7 @@ import numpy as np from pybop.costs.base_cost import BaseCost +from pybop.parameters.parameter import Inputs class BaseLikelihood(BaseCost): @@ -63,12 +64,12 @@ def get_sigma(self): """ return self.sigma - def _evaluate(self, x, grad=None): + def _evaluate(self, inputs: Inputs, grad=None): """ Calls the problem.evaluate method and calculates the log-likelihood """ - y = self.problem.evaluate(x) + y = self.problem.evaluate(inputs) for key in self.signal: if len(y.get(key, [])) != len(self._target.get(key, [])): @@ -89,12 +90,12 @@ def _evaluate(self, x, grad=None): else: return np.sum(e) - def _evaluateS1(self, x, grad=None): + def _evaluateS1(self, inputs: Inputs, grad=None): """ Calls the problem.evaluateS1 method and calculates the log-likelihood """ - y, dy = self.problem.evaluateS1(x) + y, dy = self.problem.evaluateS1(inputs) for key in self.signal: if len(y.get(key, [])) != len(self._target.get(key, [])): @@ -103,7 +104,7 @@ def _evaluateS1(self, x, grad=None): return -likelihood, -dl r = np.asarray([self._target[signal] - y[signal] for signal in self.signal]) - likelihood = self._evaluate(x) + likelihood = self._evaluate(inputs) dl = np.sum((self.sigma2 * np.sum((r * dy.T), axis=2)), axis=1) return likelihood, dl @@ -125,24 +126,25 @@ def __init__(self, problem): self._logpi = -0.5 * self.n_time_data * np.log(2 * np.pi) self._dl = np.ones(self.n_parameters + self.n_outputs) - def _evaluate(self, x, grad=None): + def _evaluate(self, inputs: Inputs, grad=None): """ Evaluates the Gaussian log-likelihood for the given parameters. - Args: - x (array_like): The parameters for which to evaluate the log-likelihood. - The last `self.n_outputs` elements are assumed to be the - standard deviations of the Gaussian distributions. + Parameters + ---------- + inputs : Inputs + The parameters for which to evaluate the log-likelihood, including the `n_outputs` + standard deviations of the Gaussian distributions. Returns: float: The log-likelihood value, or -inf if the standard deviations are received as non-positive. """ - sigma = np.asarray(x[-self.n_outputs :]) + sigma = np.asarray([0.002]) # TEMPORARY WORKAROUND (replace in #338) if np.any(sigma <= 0): return -np.inf - y = self.problem.evaluate(x[: -self.n_outputs]) + y = self.problem.evaluate(inputs) for key in self.signal: if len(y.get(key, [])) != len(self._target.get(key, [])): @@ -164,17 +166,17 @@ def _evaluate(self, x, grad=None): else: return np.sum(e) - def _evaluateS1(self, x, grad=None): + def _evaluateS1(self, inputs: Inputs, grad=None): """ Calls the problem.evaluateS1 method and calculates the log-likelihood """ - sigma = np.asarray(x[-self.n_outputs :]) + sigma = np.asarray([0.002]) # TEMPORARY WORKAROUND (replace in #338) if np.any(sigma <= 0): return -np.float64(np.inf), -self._dl * np.ones(self.n_parameters) - y, dy = self.problem.evaluateS1(x[: -self.n_outputs]) + y, dy = self.problem.evaluateS1(inputs) for key in self.signal: if len(y.get(key, [])) != len(self._target.get(key, [])): likelihood = np.float64(np.inf) @@ -182,7 +184,7 @@ def _evaluateS1(self, x, grad=None): return -likelihood, -dl r = np.asarray([self._target[signal] - y[signal] for signal in self.signal]) - likelihood = self._evaluate(x) + likelihood = self._evaluate(inputs) dl = sigma ** (-2.0) * np.sum((r * dy.T), axis=2) dsigma = -self.n_time_data / sigma + sigma**-(3.0) * np.sum(r**2, axis=1) dl = np.concatenate((dl.flatten(), dsigma)) diff --git a/pybop/costs/base_cost.py b/pybop/costs/base_cost.py index 04d0a3934..659e3f7f0 100644 --- a/pybop/costs/base_cost.py +++ b/pybop/costs/base_cost.py @@ -1,4 +1,5 @@ from pybop import BaseProblem +from pybop.parameters.parameter import Inputs, Parameters class BaseCost: @@ -17,20 +18,16 @@ class BaseCost: evaluating the cost function. _target : array-like An array containing the target data to fit. - x0 : array-like - The initial guess for the model parameters. n_outputs : int The number of outputs in the model. """ def __init__(self, problem=None): - self.parameters = None + self.parameters = Parameters() self.problem = problem - self.x0 = None if isinstance(self.problem, BaseProblem): self._target = self.problem._target self.parameters = self.problem.parameters - self.x0 = self.problem.x0 self.n_outputs = self.problem.n_outputs self.signal = self.problem.signal @@ -66,8 +63,10 @@ def evaluate(self, x, grad=None): ValueError If an error occurs during the calculation of the cost. """ + inputs = self.parameters.verify(x) + try: - return self._evaluate(x, grad) + return self._evaluate(inputs, grad) except NotImplementedError as e: raise e @@ -75,7 +74,7 @@ def evaluate(self, x, grad=None): except Exception as e: raise ValueError(f"Error in cost calculation: {e}") - def _evaluate(self, x, grad=None): + def _evaluate(self, inputs: Inputs, grad=None): """ Calculate the cost function value for a given set of parameters. @@ -83,7 +82,7 @@ def _evaluate(self, x, grad=None): Parameters ---------- - x : array-like + inputs : Inputs The parameters for which to evaluate the cost. grad : array-like, optional An array to store the gradient of the cost function with respect @@ -121,8 +120,10 @@ def evaluateS1(self, x): ValueError If an error occurs during the calculation of the cost or gradient. """ + inputs = self.parameters.verify(x) + try: - return self._evaluateS1(x) + return self._evaluateS1(inputs) except NotImplementedError as e: raise e @@ -130,13 +131,13 @@ def evaluateS1(self, x): except Exception as e: raise ValueError(f"Error in cost calculation: {e}") - def _evaluateS1(self, x): + def _evaluateS1(self, inputs: Inputs): """ Compute the cost and its gradient with respect to the parameters. Parameters ---------- - x : array-like + inputs : Inputs The parameters for which to compute the cost and gradient. Returns diff --git a/pybop/costs/design_costs.py b/pybop/costs/design_costs.py index 60064c65c..85f3dee40 100644 --- a/pybop/costs/design_costs.py +++ b/pybop/costs/design_costs.py @@ -2,8 +2,8 @@ import numpy as np -from pybop import is_numeric from pybop.costs.base_cost import BaseCost +from pybop.parameters.parameter import Inputs class DesignCost(BaseCost): @@ -44,20 +44,20 @@ def __init__(self, problem, update_capacity=False): warnings.warn(nominal_capacity_warning, UserWarning) self.update_capacity = update_capacity self.parameter_set = problem.model.parameter_set - self.update_simulation_data(self.x0) + self.update_simulation_data(self.parameters.as_dict("initial")) - def update_simulation_data(self, x0): + def update_simulation_data(self, inputs: Inputs): """ Updates the simulation data based on the initial parameter values. Parameters ---------- - x0 : array + inputs : Inputs The initial parameter values for the simulation. """ if self.update_capacity: - self.problem.model.approximate_capacity(x0) - solution = self.problem.evaluate(x0) + self.problem.model.approximate_capacity(inputs) + solution = self.problem.evaluate(inputs) if "Time [s]" not in solution: raise ValueError("The solution does not contain time data.") @@ -65,7 +65,7 @@ def update_simulation_data(self, x0): self.problem._target = {key: solution[key] for key in self.problem.signal} self.dt = solution["Time [s]"][1] - solution["Time [s]"][0] - def _evaluate(self, x, grad=None): + def _evaluate(self, inputs: Inputs, grad=None): """ Computes the value of the cost function. @@ -73,8 +73,8 @@ def _evaluate(self, x, grad=None): Parameters ---------- - x : array - The parameter set for which to compute the cost. + inputs : Inputs + The parameters for which to compute the cost. grad : array, optional Gradient information, not used in this method. @@ -99,14 +99,14 @@ class GravimetricEnergyDensity(DesignCost): def __init__(self, problem, update_capacity=False): super(GravimetricEnergyDensity, self).__init__(problem, update_capacity) - def _evaluate(self, x, grad=None): + def _evaluate(self, inputs: Inputs, grad=None): """ Computes the cost function for the energy density. Parameters ---------- - x : array - The parameter set for which to compute the cost. + inputs : Inputs + The parameters for which to compute the cost. grad : array, optional Gradient information, not used in this method. @@ -115,17 +115,14 @@ def _evaluate(self, x, grad=None): float The gravimetric energy density or -infinity in case of infeasible parameters. """ - if not all(is_numeric(i) for i in x): - raise ValueError("Input must be a numeric array.") - try: with warnings.catch_warnings(): # Convert UserWarning to an exception warnings.filterwarnings("error", category=UserWarning) if self.update_capacity: - self.problem.model.approximate_capacity(x) - solution = self.problem.evaluate(x) + self.problem.model.approximate_capacity(inputs) + solution = self.problem.evaluate(inputs) voltage, current = solution["Voltage [V]"], solution["Current [A]"] energy_density = np.trapz(voltage * current, dx=self.dt) / ( @@ -158,14 +155,14 @@ class VolumetricEnergyDensity(DesignCost): def __init__(self, problem, update_capacity=False): super(VolumetricEnergyDensity, self).__init__(problem, update_capacity) - def _evaluate(self, x, grad=None): + def _evaluate(self, inputs: Inputs, grad=None): """ Computes the cost function for the energy density. Parameters ---------- - x : array - The parameter set for which to compute the cost. + inputs : Inputs + The parameters for which to compute the cost. grad : array, optional Gradient information, not used in this method. @@ -174,16 +171,14 @@ def _evaluate(self, x, grad=None): float The volumetric energy density or -infinity in case of infeasible parameters. """ - if not all(is_numeric(i) for i in x): - raise ValueError("Input must be a numeric array.") try: with warnings.catch_warnings(): # Convert UserWarning to an exception warnings.filterwarnings("error", category=UserWarning) if self.update_capacity: - self.problem.model.approximate_capacity(x) - solution = self.problem.evaluate(x) + self.problem.model.approximate_capacity(inputs) + solution = self.problem.evaluate(inputs) voltage, current = solution["Voltage [V]"], solution["Current [A]"] energy_density = np.trapz(voltage * current, dx=self.dt) / ( diff --git a/pybop/costs/fitting_costs.py b/pybop/costs/fitting_costs.py index eff56059c..3cb57ec94 100644 --- a/pybop/costs/fitting_costs.py +++ b/pybop/costs/fitting_costs.py @@ -3,6 +3,7 @@ from pybop.costs._likelihoods import BaseLikelihood from pybop.costs.base_cost import BaseCost from pybop.observers.observer import Observer +from pybop.parameters.parameter import Inputs class RootMeanSquaredError(BaseCost): @@ -23,13 +24,13 @@ def __init__(self, problem): # Default fail gradient self._de = 1.0 - def _evaluate(self, x, grad=None): + def _evaluate(self, inputs: Inputs, grad=None): """ Calculate the root mean square error for a given set of parameters. Parameters ---------- - x : array-like + inputs : Inputs The parameters for which to evaluate the cost. grad : array-like, optional An array to store the gradient of the cost function with respect @@ -41,7 +42,7 @@ def _evaluate(self, x, grad=None): The root mean square error. """ - prediction = self.problem.evaluate(x) + prediction = self.problem.evaluate(inputs) for key in self.signal: if len(prediction.get(key, [])) != len(self._target.get(key, [])): @@ -59,13 +60,13 @@ def _evaluate(self, x, grad=None): else: return np.sum(e) - def _evaluateS1(self, x): + def _evaluateS1(self, inputs: Inputs): """ Compute the cost and its gradient with respect to the parameters. Parameters ---------- - x : array-like + inputs : Inputs The parameters for which to compute the cost and gradient. Returns @@ -79,7 +80,7 @@ def _evaluateS1(self, x): ValueError If an error occurs during the calculation of the cost or gradient. """ - y, dy = self.problem.evaluateS1(x) + y, dy = self.problem.evaluateS1(inputs) for key in self.signal: if len(y.get(key, [])) != len(self._target.get(key, [])): @@ -136,13 +137,13 @@ def __init__(self, problem): # Default fail gradient self._de = 1.0 - def _evaluate(self, x, grad=None): + def _evaluate(self, inputs: Inputs, grad=None): """ Calculate the sum of squared errors for a given set of parameters. Parameters ---------- - x : array-like + inputs : Inputs The parameters for which to evaluate the cost. grad : array-like, optional An array to store the gradient of the cost function with respect @@ -153,7 +154,7 @@ def _evaluate(self, x, grad=None): float The sum of squared errors. """ - prediction = self.problem.evaluate(x) + prediction = self.problem.evaluate(inputs) for key in self.signal: if len(prediction.get(key, [])) != len(self._target.get(key, [])): @@ -170,13 +171,13 @@ def _evaluate(self, x, grad=None): else: return np.sum(e) - def _evaluateS1(self, x): + def _evaluateS1(self, inputs: Inputs): """ Compute the cost and its gradient with respect to the parameters. Parameters ---------- - x : array-like + inputs : Inputs The parameters for which to compute the cost and gradient. Returns @@ -190,7 +191,7 @@ def _evaluateS1(self, x): ValueError If an error occurs during the calculation of the cost or gradient. """ - y, dy = self.problem.evaluateS1(x) + y, dy = self.problem.evaluateS1(inputs) for key in self.signal: if len(y.get(key, [])) != len(self._target.get(key, [])): e = np.float64(np.inf) @@ -234,13 +235,13 @@ def __init__(self, observer: Observer): super().__init__(problem=observer) self._observer = observer - def _evaluate(self, x, grad=None): + def _evaluate(self, inputs: Inputs, grad=None): """ Calculate the observer cost for a given set of parameters. Parameters ---------- - x : array-like + inputs : Inputs The parameters for which to evaluate the cost. grad : array-like, optional An array to store the gradient of the cost function with respect @@ -251,19 +252,18 @@ def _evaluate(self, x, grad=None): float The observer cost (negative of the log likelihood). """ - inputs = self._observer.parameters.as_dict(x) log_likelihood = self._observer.log_likelihood( self._target, self._observer.time_data(), inputs ) return -log_likelihood - def evaluateS1(self, x): + def evaluateS1(self, inputs: Inputs): """ Compute the cost and its gradient with respect to the parameters. Parameters ---------- - x : array-like + inputs : Inputs The parameters for which to compute the cost and gradient. Returns @@ -312,13 +312,13 @@ def __init__(self, problem, likelihood, sigma=None): ): raise ValueError(f"{self.likelihood} must be a subclass of BaseLikelihood") - def _evaluate(self, x, grad=None): + def _evaluate(self, inputs: Inputs, grad=None): """ Calculate the maximum a posteriori cost for a given set of parameters. Parameters ---------- - x : array-like + inputs : Inputs The parameters for which to evaluate the cost. grad : array-like, optional An array to store the gradient of the cost function with respect @@ -329,22 +329,22 @@ def _evaluate(self, x, grad=None): float The maximum a posteriori cost. """ - log_likelihood = self.likelihood.evaluate(x) + log_likelihood = self.likelihood._evaluate(inputs) log_prior = sum( - param.prior.logpdf(x_i) for x_i, param in zip(x, self.problem.parameters) + self.parameters[key].prior.logpdf(value) for key, value in inputs.items() ) posterior = log_likelihood + log_prior return posterior - def _evaluateS1(self, x): + def _evaluateS1(self, inputs: Inputs): """ Compute the maximum a posteriori with respect to the parameters. The method passes the likelihood gradient to the optimiser without modification. Parameters ---------- - x : array-like + inputs : Inputs The parameters for which to compute the cost and gradient. Returns @@ -358,9 +358,9 @@ def _evaluateS1(self, x): ValueError If an error occurs during the calculation of the cost or gradient. """ - log_likelihood, dl = self.likelihood.evaluateS1(x) + log_likelihood, dl = self.likelihood._evaluateS1(inputs) log_prior = sum( - param.prior.logpdf(x_i) for x_i, param in zip(x, self.problem.parameters) + self.parameters[key].prior.logpdf(inputs[key]) for key in inputs.keys() ) posterior = log_likelihood + log_prior diff --git a/pybop/models/base_model.py b/pybop/models/base_model.py index a05062672..a016bbc6f 100644 --- a/pybop/models/base_model.py +++ b/pybop/models/base_model.py @@ -7,8 +7,7 @@ import pybamm from pybop import Dataset, Experiment, Parameters, ParameterSet - -Inputs = Dict[str, float] +from pybop.parameters.parameter import Inputs @dataclass @@ -65,7 +64,7 @@ def __init__(self, name="Base Model", parameter_set=None): else: # a pybop parameter set self._parameter_set = pybamm.ParameterValues(parameter_set.params) - self.parameters = None + self.parameters = Parameters() self.dataset = None self.signal = None self.additional_variables = [] @@ -104,8 +103,8 @@ def build( The initial state of charge to be used in simulations. """ self.dataset = dataset - self.parameters = parameters - if self.parameters is not None: + if parameters is not None: + self.parameters = parameters self.classify_and_update_parameters(self.parameters) if init_soc is not None: @@ -284,8 +283,7 @@ def reinit( if self._built_model is None: raise ValueError("Model must be built before calling reinit") - if not isinstance(inputs, dict): - inputs = self.parameters.as_dict(inputs) + inputs = self.parameters.verify(inputs) self._solver.set_up(self._built_model, inputs=inputs) @@ -332,9 +330,8 @@ def simulate( Parameters ---------- - inputs : dict or array-like - The input parameters for the simulation. If array-like, it will be - converted to a dictionary using the model's fit keys. + inputs : Inputs + The input parameters for the simulation. t_eval : array-like An array of time points at which to evaluate the solution. @@ -348,6 +345,8 @@ def simulate( ValueError If the model has not been built before simulation. """ + inputs = self.parameters.verify(inputs) + if self._built_model is None: raise ValueError("Model must be built before calling simulate") else: @@ -355,9 +354,6 @@ def simulate( sol = self.solver.solve(self.built_model, t_eval=t_eval) else: - if not isinstance(inputs, dict): - inputs = self.parameters.as_dict(inputs) - if self.check_params( inputs=inputs, allow_infeasible_solutions=self.allow_infeasible_solutions, @@ -385,9 +381,8 @@ def simulateS1(self, inputs: Inputs, t_eval: np.array): Parameters ---------- - inputs : dict or array-like - The input parameters for the simulation. If array-like, it will be - converted to a dictionary using the model's fit keys. + inputs : Inputs + The input parameters for the simulation. t_eval : array-like An array of time points at which to evaluate the solution and its sensitivities. @@ -402,6 +397,7 @@ def simulateS1(self, inputs: Inputs, t_eval: np.array): ValueError If the model has not been built before simulation. """ + inputs = self.parameters.verify(inputs) if self._built_model is None: raise ValueError("Model must be built before calling simulate") @@ -411,9 +407,6 @@ def simulateS1(self, inputs: Inputs, t_eval: np.array): "Cannot use sensitivies for parameters which require a model rebuild" ) - if not isinstance(inputs, dict): - inputs = self.parameters.as_dict(inputs) - if self.check_params( inputs=inputs, allow_infeasible_solutions=self.allow_infeasible_solutions, @@ -470,10 +463,9 @@ def predict( Parameters ---------- - inputs : dict or array-like, optional - Input parameters for the simulation. If the input is array-like, it is converted - to a dictionary using the model's fitting keys. Defaults to None, indicating - that the default parameters should be used. + inputs : Inputs, optional + Input parameters for the simulation. Defaults to None, indicating that the + default parameters should be used. t_eval : array-like, optional An array of time points at which to evaluate the solution. Defaults to None, which means the time points need to be specified within experiment or elsewhere. @@ -499,13 +491,13 @@ def predict( if PyBaMM models are not supported by the current simulation method. """ + inputs = self.parameters.verify(inputs) + if not self.pybamm_model._built: self.pybamm_model.build_model() parameter_set = parameter_set or self._unprocessed_parameter_set if inputs is not None: - if not isinstance(inputs, dict): - inputs = self.parameters.as_dict(inputs) parameter_set.update(inputs) if self.check_params( @@ -544,7 +536,7 @@ def check_params( Parameters ---------- - inputs : dict + inputs : Inputs The input parameters for the simulation. allow_infeasible_solutions : bool, optional If True, infeasible parameter values will be allowed in the optimisation (default: True). @@ -555,17 +547,7 @@ def check_params( A boolean which signifies whether the parameters are compatible. """ - if inputs is not None: - if not isinstance(inputs, dict): - if isinstance(inputs, list): - for entry in inputs: - if not isinstance(entry, (int, float)): - raise ValueError( - "Expecting inputs in the form of a dictionary, numeric list" - + f" or None, but received a list with type: {type(inputs)}" - ) - else: - inputs = self.parameters.as_dict(inputs) + inputs = self.parameters.verify(inputs) return self._check_params( inputs=inputs, allow_infeasible_solutions=allow_infeasible_solutions @@ -580,7 +562,7 @@ def _check_params( Parameters ---------- - inputs : dict + inputs : Inputs The input parameters for the simulation. allow_infeasible_solutions : bool, optional If True, infeasible parameter values will be allowed in the optimisation (default: True). @@ -641,7 +623,7 @@ def cell_volume(self, parameter_set: ParameterSet = None): """ raise NotImplementedError - def approximate_capacity(self, x): + def approximate_capacity(self, inputs: Inputs): """ Calculate a new estimate for the nominal capacity based on the theoretical energy density and an average voltage. @@ -650,8 +632,8 @@ def approximate_capacity(self, x): Parameters ---------- - x : array-like - An array of values representing the model inputs. + inputs : Inputs + The parameters that are the inputs of the model. Raises ------ diff --git a/pybop/models/empirical/base_ecm.py b/pybop/models/empirical/base_ecm.py index c8252c041..38d94d147 100644 --- a/pybop/models/empirical/base_ecm.py +++ b/pybop/models/empirical/base_ecm.py @@ -1,4 +1,4 @@ -from pybop.models.base_model import BaseModel +from pybop.models.base_model import BaseModel, Inputs class ECircuitModel(BaseModel): @@ -85,13 +85,13 @@ def __init__( self._disc = None self.geometric_parameters = {} - def _check_params(self, inputs=None, allow_infeasible_solutions=True): + def _check_params(self, inputs: Inputs = None, allow_infeasible_solutions=True): """ Check the compatibility of the model parameters. Parameters ---------- - inputs : dict + inputs : Inputs The input parameters for the simulation. allow_infeasible_solutions : bool, optional If True, infeasible parameter values will be allowed in the optimisation (default: True). diff --git a/pybop/models/empirical/ecm.py b/pybop/models/empirical/ecm.py index 031da3fde..d2d97d6de 100644 --- a/pybop/models/empirical/ecm.py +++ b/pybop/models/empirical/ecm.py @@ -1,6 +1,7 @@ from pybamm import equivalent_circuit as pybamm_equivalent_circuit from pybop.models.empirical.base_ecm import ECircuitModel +from pybop.parameters.parameter import Inputs class Thevenin(ECircuitModel): @@ -44,13 +45,13 @@ def __init__( pybamm_model=pybamm_equivalent_circuit.Thevenin, name=name, **model_kwargs ) - def _check_params(self, inputs=None, allow_infeasible_solutions=True): + def _check_params(self, inputs: Inputs = None, allow_infeasible_solutions=True): """ Check the compatibility of the model parameters. Parameters ---------- - inputs : dict + inputs : Dict The input parameters for the simulation. allow_infeasible_solutions : bool, optional If True, infeasible parameter values will be allowed in the optimisation (default: True). diff --git a/pybop/models/lithium_ion/base_echem.py b/pybop/models/lithium_ion/base_echem.py index 54650fa16..523d5fb0c 100644 --- a/pybop/models/lithium_ion/base_echem.py +++ b/pybop/models/lithium_ion/base_echem.py @@ -1,9 +1,12 @@ import warnings +from typing import Dict from pybamm import lithium_ion as pybamm_lithium_ion from pybop.models.base_model import BaseModel +Inputs = Dict[str, float] + class EChemBaseModel(BaseModel): """ @@ -85,14 +88,14 @@ def __init__( self.geometric_parameters = self.set_geometric_parameters() def _check_params( - self, inputs=None, parameter_set=None, allow_infeasible_solutions=True + self, inputs: Inputs = None, parameter_set=None, allow_infeasible_solutions=True ): """ Check compatibility of the model parameters. Parameters ---------- - inputs : dict + inputs : Inputs The input parameters for the simulation. allow_infeasible_solutions : bool, optional If True, infeasible parameter values will be allowed in the optimisation (default: True). @@ -264,7 +267,7 @@ def area_density(thickness, mass_density): ) return cross_sectional_area * total_area_density - def approximate_capacity(self, x): + def approximate_capacity(self, inputs: Inputs): """ Calculate and update an estimate for the nominal cell capacity based on the theoretical energy density and an average voltage. @@ -274,17 +277,15 @@ def approximate_capacity(self, x): Parameters ---------- - x : array-like - An array of values representing the model inputs. + inputs : Inputs + The parameters that are the inputs of the model. Returns ------- None The nominal cell capacity is updated directly in the model's parameter set. """ - inputs = { - key: x[i] for i, key in enumerate([param.name for param in self.parameters]) - } + inputs = self.parameters.verify(inputs) self._parameter_set.update(inputs) # Calculate theoretical energy density diff --git a/pybop/observers/observer.py b/pybop/observers/observer.py index 1b81c5ac7..1c35c25df 100644 --- a/pybop/observers/observer.py +++ b/pybop/observers/observer.py @@ -50,16 +50,15 @@ def __init__( if model.signal is None: model.signal = self.signal - inputs = dict() - for param in self.parameters: - inputs[param.name] = param.value - + inputs = self.parameters.as_dict("initial") self._state = model.reinit(inputs) self._model = model self._signal = self.signal self._n_outputs = len(self._signal) def reset(self, inputs: Inputs) -> None: + inputs = self.parameters.verify(inputs) + self._state = self._model.reinit(inputs) def observe(self, time: float, value: Optional[np.ndarray] = None) -> float: @@ -96,6 +95,8 @@ def log_likelihood(self, values: dict, times: np.ndarray, inputs: Inputs) -> flo inputs : Inputs The inputs to the model. """ + inputs = self.parameters.verify(inputs) + if self._n_outputs == 1: signal = self._signal[0] if len(values[signal]) != len(times): @@ -142,27 +143,20 @@ def get_current_time(self) -> float: """ return self._state.t - def evaluate(self, x): + def evaluate(self, inputs: Inputs): """ Evaluate the model with the given parameters and return the signal. Parameters ---------- - x : np.ndarray - Parameter values to evaluate the model at. + inputs : Inputs + Parameters for evaluation of the model. Returns ------- y : np.ndarray - The model output y(t) simulated with inputs x. + The model output y(t) simulated with given inputs. """ - inputs = dict() - if isinstance(x, Parameters): - for param in x: - inputs[param.name] = param.value - else: # x is an array of parameter values - for i, param in enumerate(self.parameters): - inputs[param.name] = x[i] self.reset(inputs) output = {} diff --git a/pybop/observers/unscented_kalman.py b/pybop/observers/unscented_kalman.py index 0b6425db9..afbc2a010 100644 --- a/pybop/observers/unscented_kalman.py +++ b/pybop/observers/unscented_kalman.py @@ -15,8 +15,8 @@ class UnscentedKalmanFilterObserver(Observer): Parameters ---------- - parameters: List[Parameters] - The inputs to the model. + parameters: Parameters + The parameters for the model. model : BaseModel The model to observe. sigma0 : np.ndarray | float diff --git a/pybop/optimisers/base_optimiser.py b/pybop/optimisers/base_optimiser.py index b283b9d8e..ba4330639 100644 --- a/pybop/optimisers/base_optimiser.py +++ b/pybop/optimisers/base_optimiser.py @@ -2,7 +2,7 @@ import numpy as np -from pybop import BaseCost, BaseLikelihood, DesignCost +from pybop import BaseCost, BaseLikelihood, DesignCost, Parameter, Parameters class BaseOptimiser: @@ -50,6 +50,7 @@ def __init__( **optimiser_kwargs, ): # First set attributes to default values + self.parameters = Parameters() self.x0 = None self.bounds = None self.sigma0 = 0.1 @@ -63,26 +64,25 @@ def __init__( if isinstance(cost, BaseCost): self.cost = cost - self.x0 = cost.x0 + self.parameters.join(cost.parameters) self.set_allow_infeasible_solutions() if isinstance(cost, (BaseLikelihood, DesignCost)): self.minimising = False - # Set default bounds (for all or no parameters) - self.bounds = cost.parameters.get_bounds() - - # Set default initial standard deviation (for all or no parameters) - self.sigma0 = cost.parameters.get_sigma0() or self.sigma0 - else: try: - cost_test = cost(optimiser_kwargs.get("x0", [])) + self.x0 = optimiser_kwargs.get("x0", []) + cost_test = cost(self.x0) warnings.warn( "The cost is not an instance of pybop.BaseCost, but let's continue " + "assuming that it is a callable function to be minimised.", UserWarning, ) self.cost = cost + for i, value in enumerate(self.x0): + self.parameters.add( + Parameter(name=f"Parameter {i}", initial_value=value) + ) self.minimising = True except Exception: @@ -93,6 +93,9 @@ def __init__( f"Cost returned {type(cost_test)}, not a scalar numeric value." ) + if len(self.parameters) == 0: + raise ValueError("There are no parameters to optimise.") + self.unset_options = optimiser_kwargs self.set_base_options() self._set_up_optimiser() @@ -109,9 +112,19 @@ def set_base_options(self): """ Update the base optimiser options and remove them from the options dictionary. """ - self.x0 = self.unset_options.pop("x0", self.x0) - self.bounds = self.unset_options.pop("bounds", self.bounds) - self.sigma0 = self.unset_options.pop("sigma0", self.sigma0) + # Set initial values, if x0 is None, initial values are unmodified. + self.parameters.update(initial_values=self.unset_options.pop("x0", None)) + self.x0 = self.parameters.initial_value() + + # Set default bounds (for all or no parameters) + self.bounds = self.unset_options.pop("bounds", self.parameters.get_bounds()) + + # Set default initial standard deviation (for all or no parameters) + self.sigma0 = self.unset_options.pop( + "sigma0", self.parameters.get_sigma0() or self.sigma0 + ) + + # Set other options self.verbose = self.unset_options.pop("verbose", self.verbose) self.minimising = self.unset_options.pop("minimising", self.minimising) if "allow_infeasible_solutions" in self.unset_options.keys(): @@ -186,8 +199,12 @@ def store_optimised_parameters(self, x): def check_optimal_parameters(self, x): """ Check if the optimised parameters are physically viable. - """ + Parameters + ---------- + x : array-like + Optimised parameter values. + """ if self.cost.problem._model.check_params( inputs=x, allow_infeasible_solutions=False ): diff --git a/pybop/optimisers/pints_optimisers.py b/pybop/optimisers/pints_optimisers.py index 4872973a8..2f99e5efe 100644 --- a/pybop/optimisers/pints_optimisers.py +++ b/pybop/optimisers/pints_optimisers.py @@ -268,7 +268,7 @@ class CMAES(BasePintsOptimiser): """ def __init__(self, cost, **optimiser_kwargs): - x0 = optimiser_kwargs.pop("x0", cost.x0) + x0 = optimiser_kwargs.pop("x0", cost.parameters.initial_value()) if x0 is not None and len(x0) == 1: raise ValueError( "CMAES requires optimisation of >= 2 parameters at once. " diff --git a/pybop/optimisers/scipy_optimisers.py b/pybop/optimisers/scipy_optimisers.py index 843423042..544abfc88 100644 --- a/pybop/optimisers/scipy_optimisers.py +++ b/pybop/optimisers/scipy_optimisers.py @@ -162,8 +162,8 @@ def callback(intermediate_result: OptimizeResult): self._cost0 = np.abs(self.cost(self.x0)) if np.isinf(self._cost0): for i in range(1, self.num_resamples): - x0 = self.cost.parameters.rvs(1) - self._cost0 = np.abs(self.cost(x0)) + self.x0 = self.parameters.rvs(1)[0] + self._cost0 = np.abs(self.cost(self.x0)) if not np.isinf(self._cost0): break if np.isinf(self._cost0): diff --git a/pybop/parameters/parameter.py b/pybop/parameters/parameter.py index 76754847d..e1a828af3 100644 --- a/pybop/parameters/parameter.py +++ b/pybop/parameters/parameter.py @@ -1,8 +1,12 @@ from collections import OrderedDict -from typing import Dict, List +from typing import Dict, List, Union import numpy as np +from pybop._utils import is_numeric + +Inputs = Dict[str, float] + class Parameter: """ @@ -73,7 +77,7 @@ def rvs(self, n_samples, random_state=None): return samples - def update(self, value=None, initial_value=None): + def update(self, initial_value=None, value=None): """ Update the parameter's current value. @@ -82,12 +86,12 @@ def update(self, value=None, initial_value=None): value : float The new value to be assigned to the parameter. """ - if value is not None: - self.value = value - elif initial_value is not None: + if initial_value is not None: self.initial_value = initial_value self.value = initial_value - else: + if value is not None: + self.value = value + if initial_value is None and value is None: raise ValueError("No value provided to update parameter") def __repr__(self): @@ -180,7 +184,15 @@ def __getitem__(self, key: str) -> Parameter: ------- pybop.Parameter The Parameter object. + + Raises + ------ + ValueError + The key must be the name of one of the parameters. """ + if key not in self.param.keys(): + raise ValueError(f"The key {key} is not the name of a parameter.") + return self.param[key] def __len__(self) -> int: @@ -240,6 +252,20 @@ def remove(self, parameter_name): # Remove the parameter self.param.pop(parameter_name) + def join(self, parameters=None): + """ + Join two Parameters objects into the first by copying across each Parameter. + + Parameters + ---------- + parameters : pybop.Parameters + """ + for param in parameters: + if param not in self.param.values(): + self.add(param) + else: + print(f"Discarding duplicate {param.name}.") + def get_bounds(self) -> Dict: """ Get bounds, for either all or no parameters. @@ -260,12 +286,20 @@ def get_bounds(self) -> Dict: return bounds - def update(self, values): + def update(self, initial_values=None, values=None, bounds=None): """ Set value of each parameter. """ for i, param in enumerate(self.param.values()): - param.update(value=values[i]) + if initial_values is not None: + param.update(initial_value=initial_values[i]) + if values is not None: + param.update(value=values[i]) + if bounds is not None: + if isinstance(bounds, Dict): + param.set_bounds(bounds=[bounds["lower"][i], bounds["upper"][i]]) + else: + param.set_bounds(bounds=bounds[i]) def rvs(self, n_samples: int) -> List: """ @@ -325,8 +359,8 @@ def initial_value(self) -> List: for param in self.param.values(): if param.initial_value is None: - initial_value = param.rvs(1) - param.update(initial_value=initial_value[0]) + initial_value = param.rvs(1)[0] + param.update(initial_value=initial_value) initial_values.append(param.initial_value) return initial_values @@ -373,6 +407,43 @@ def get_bounds_for_plotly(self): return bounds def as_dict(self, values=None) -> Dict: + """ + Parameters + ---------- + values : list or str, optional + A list of parameter values or one of the strings "initial" or "true" which can be used + to obtain a dictionary of parameters. + + Returns + ------- + Inputs + A parameters dictionary. + """ if values is None: values = self.current_value() + elif isinstance(values, str): + if values == "initial": + values = self.initial_value() + elif values == "true": + values = self.true_value() return {key: values[i] for i, key in enumerate(self.param.keys())} + + def verify(self, inputs: Union[Inputs, None] = None): + """ + Verify that the inputs are an Inputs dictionary or numeric values + which can be used to construct an Inputs dictionary + + Parameters + ---------- + inputs : Inputs or numeric + """ + if inputs is None or isinstance(inputs, Dict): + return inputs + elif (isinstance(inputs, list) and all(is_numeric(x) for x in inputs)) or all( + is_numeric(x) for x in list(inputs) + ): + return self.as_dict(inputs) + else: + raise TypeError( + f"Inputs must be a dictionary or numeric. Received {type(inputs)}" + ) diff --git a/pybop/plotting/plot_problem.py b/pybop/plotting/plot_problem.py index 968da94d6..fb8759c98 100644 --- a/pybop/plotting/plot_problem.py +++ b/pybop/plotting/plot_problem.py @@ -3,9 +3,10 @@ import numpy as np from pybop import DesignProblem, FittingProblem, StandardPlot +from pybop.parameters.parameter import Inputs -def quick_plot(problem, parameter_values=None, show=True, **layout_kwargs): +def quick_plot(problem, problem_inputs: Inputs = None, show=True, **layout_kwargs): """ Quickly plot the target dataset against optimised model output. @@ -16,7 +17,7 @@ def quick_plot(problem, parameter_values=None, show=True, **layout_kwargs): ---------- problem : object Problem object with dataset and signal attributes. - parameter_values : array-like + problem_inputs : Inputs Optimised (or example) parameter values. show : bool, optional If True, the figure is shown upon creation (default: True). @@ -30,12 +31,14 @@ def quick_plot(problem, parameter_values=None, show=True, **layout_kwargs): plotly.graph_objs.Figure The Plotly figure object for the scatter plot. """ - if parameter_values is None: - parameter_values = problem.x0 + if problem_inputs is None: + problem_inputs = problem.parameters.as_dict() + else: + problem_inputs = problem.parameters.verify(problem_inputs) # Extract the time data and evaluate the model's output and target values xaxis_data = problem.time_data() - model_output = problem.evaluate(parameter_values) + model_output = problem.evaluate(problem_inputs) target_output = problem.get_target() # Create a plot for each output diff --git a/pybop/problems/base_problem.py b/pybop/problems/base_problem.py index 48f53dab1..4d9d85194 100644 --- a/pybop/problems/base_problem.py +++ b/pybop/problems/base_problem.py @@ -1,4 +1,5 @@ from pybop import BaseModel, Dataset, Parameter, Parameters +from pybop.parameters.parameter import Inputs class BaseProblem: @@ -65,21 +66,18 @@ def __init__( else: self.additional_variables = [] - # Set initial values - self.x0 = self.parameters.initial_value() - @property def n_parameters(self): return len(self.parameters) - def evaluate(self, x): + def evaluate(self, inputs: Inputs): """ Evaluate the model with the given parameters and return the signal. Parameters ---------- - x : np.ndarray - Parameter values to evaluate the model at. + inputs : Inputs + Parameters for evaluation of the model. Raises ------ @@ -88,15 +86,15 @@ def evaluate(self, x): """ raise NotImplementedError - def evaluateS1(self, x): + def evaluateS1(self, inputs: Inputs): """ Evaluate the model with the given parameters and return the signal and its derivatives. Parameters ---------- - x : np.ndarray - Parameter values to evaluate the model at. + inputs : Inputs + Parameters for evaluation of the model. Raises ------ diff --git a/pybop/problems/design_problem.py b/pybop/problems/design_problem.py index 3217ca95d..b99a9357b 100644 --- a/pybop/problems/design_problem.py +++ b/pybop/problems/design_problem.py @@ -1,6 +1,7 @@ import numpy as np from pybop import BaseProblem +from pybop.parameters.parameter import Inputs class DesignProblem(BaseProblem): @@ -65,27 +66,29 @@ def __init__( ) # Add an example dataset for plotting comparison - sol = self.evaluate(self.x0) + sol = self.evaluate(self.parameters.as_dict("initial")) self._time_data = sol["Time [s]"] self._target = {key: sol[key] for key in self.signal} self._dataset = None - def evaluate(self, x): + def evaluate(self, inputs: Inputs): """ Evaluate the model with the given parameters and return the signal. Parameters ---------- - x : np.ndarray - Parameter values to evaluate the model at. + inputs : Inputs + Parameters for evaluation of the model. Returns ------- y : np.ndarray - The model output y(t) simulated with inputs x. + The model output y(t) simulated with inputs. """ + inputs = self.parameters.verify(inputs) + sol = self._model.predict( - inputs=x, + inputs=inputs, experiment=self.experiment, init_soc=self.init_soc, ) diff --git a/pybop/problems/fitting_problem.py b/pybop/problems/fitting_problem.py index 15d1ed7e2..b27955479 100644 --- a/pybop/problems/fitting_problem.py +++ b/pybop/problems/fitting_problem.py @@ -1,6 +1,7 @@ import numpy as np from pybop import BaseProblem +from pybop.parameters.parameter import Inputs class FittingProblem(BaseProblem): @@ -43,7 +44,7 @@ def __init__( parameters, model, check_model, signal, additional_variables, init_soc ) self._dataset = dataset.data - self.x = self.x0 + self.parameters.initial_value() # Check that the dataset contains time and current dataset.check(self.signal + ["Current function [A]"]) @@ -74,51 +75,61 @@ def __init__( init_soc=self.init_soc, ) - def evaluate(self, x): + def evaluate(self, inputs: Inputs): """ Evaluate the model with the given parameters and return the signal. Parameters ---------- - x : np.ndarray - Parameter values to evaluate the model at. + inputs : Inputs + Parameters for evaluation of the model. Returns ------- y : np.ndarray - The model output y(t) simulated with inputs x. + The model output y(t) simulated with given inputs. """ - if np.any(x != self.x) and self._model.rebuild_parameters: - self.parameters.update(values=x) + inputs = self.parameters.verify(inputs) + + requires_rebuild = False + for key, value in inputs.items(): + if key in self._model.rebuild_parameters: + current_value = self.parameters[key].value + if value != current_value: + self.parameters[key].update(value=value) + requires_rebuild = True + + if requires_rebuild: self._model.rebuild(parameters=self.parameters) - self.x = x - y = self._model.simulate(inputs=x, t_eval=self._time_data) + y = self._model.simulate(inputs=inputs, t_eval=self._time_data) return y - def evaluateS1(self, x): + def evaluateS1(self, inputs: Inputs): """ Evaluate the model with the given parameters and return the signal and its derivatives. Parameters ---------- - x : np.ndarray - Parameter values to evaluate the model at. + inputs : Inputs + Parameters for evaluation of the model. Returns ------- tuple A tuple containing the simulation result y(t) and the sensitivities dy/dx(t) evaluated - with given inputs x. + with given inputs. """ + inputs = self.parameters.verify(inputs) + if self._model.rebuild_parameters: raise RuntimeError( "Gradient not available when using geometric parameters." ) y, dy = self._model.simulateS1( - inputs=x, + inputs=inputs, t_eval=self._time_data, ) diff --git a/tests/integration/test_model_experiment_changes.py b/tests/integration/test_model_experiment_changes.py index 6902f873e..64d27132a 100644 --- a/tests/integration/test_model_experiment_changes.py +++ b/tests/integration/test_model_experiment_changes.py @@ -48,7 +48,9 @@ def test_changing_experiment(self, parameters): experiment = pybop.Experiment(["Charge at 1C until 4.1 V (2 seconds period)"]) solution_2 = model.predict( - init_soc=init_soc, experiment=experiment, inputs=parameters.true_value() + init_soc=init_soc, + experiment=experiment, + inputs=parameters.as_dict("true"), ) cost_2 = self.final_cost(solution_2, model, parameters, init_soc) diff --git a/tests/integration/test_optimisation_options.py b/tests/integration/test_optimisation_options.py index 01702ba24..a196ac676 100644 --- a/tests/integration/test_optimisation_options.py +++ b/tests/integration/test_optimisation_options.py @@ -80,7 +80,7 @@ def spm_costs(self, model, parameters, cost_class): ) @pytest.mark.integration def test_optimisation_f_guessed(self, f_guessed, spm_costs): - x0 = spm_costs.x0 + x0 = spm_costs.parameters.initial_value() # Test each optimiser optim = pybop.XNES( cost=spm_costs, diff --git a/tests/integration/test_spm_parameterisations.py b/tests/integration/test_spm_parameterisations.py index 7eeb0b7c0..20fdee0eb 100644 --- a/tests/integration/test_spm_parameterisations.py +++ b/tests/integration/test_spm_parameterisations.py @@ -91,7 +91,7 @@ def spm_costs(self, model, parameters, cost_class, init_soc): ) @pytest.mark.integration def test_spm_optimisers(self, optimiser, spm_costs): - x0 = spm_costs.x0 + x0 = spm_costs.parameters.initial_value() # Some optimisers require a complete set of bounds if optimiser in [ pybop.SciPyDifferentialEvolution, @@ -165,7 +165,7 @@ def spm_two_signal_cost(self, parameters, model, cost_class): ) @pytest.mark.integration def test_multiple_signals(self, multi_optimiser, spm_two_signal_cost): - x0 = spm_two_signal_cost.x0 + x0 = spm_two_signal_cost.parameters.initial_value() # Some optimisers require a complete set of bounds if multi_optimiser in [pybop.SciPyDifferentialEvolution]: spm_two_signal_cost.problem.parameters[ @@ -184,7 +184,7 @@ def test_multiple_signals(self, multi_optimiser, spm_two_signal_cost): if issubclass(multi_optimiser, pybop.BasePintsOptimiser): optim.set_max_unchanged_iterations(iterations=35, absolute_tolerance=1e-5) - initial_cost = optim.cost(spm_two_signal_cost.x0) + initial_cost = optim.cost(optim.parameters.initial_value()) x, final_cost = optim.run() # Assertions @@ -222,7 +222,7 @@ def test_model_misparameterisation(self, parameters, model, init_soc): # Build the optimisation problem optim = optimiser(cost=cost) - initial_cost = optim.cost(cost.x0) + initial_cost = optim.cost(optim.x0) # Run the optimisation problem x, final_cost = optim.run() diff --git a/tests/integration/test_thevenin_parameterisation.py b/tests/integration/test_thevenin_parameterisation.py index 1ef1bc3eb..45df6ba42 100644 --- a/tests/integration/test_thevenin_parameterisation.py +++ b/tests/integration/test_thevenin_parameterisation.py @@ -65,7 +65,7 @@ def cost(self, model, parameters, cost_class): ) @pytest.mark.integration def test_optimisers_on_simple_model(self, optimiser, cost): - x0 = cost.x0 + x0 = cost.parameters.initial_value() if optimiser in [pybop.GradientDescent]: optim = optimiser( cost=cost, @@ -81,7 +81,7 @@ def test_optimisers_on_simple_model(self, optimiser, cost): if isinstance(optimiser, pybop.BasePintsOptimiser): optim.set_max_unchanged_iterations(iterations=35, absolute_tolerance=1e-5) - initial_cost = optim.cost(x0) + initial_cost = optim.cost(optim.parameters.initial_value()) x, final_cost = optim.run() # Assertions diff --git a/tests/unit/test_cost.py b/tests/unit/test_cost.py index 3c0d81514..e09d3cc42 100644 --- a/tests/unit/test_cost.py +++ b/tests/unit/test_cost.py @@ -113,6 +113,21 @@ def test_base(self, problem): with pytest.raises(NotImplementedError): base_cost.evaluateS1([0.5]) + @pytest.mark.unit + def test_error_in_cost_calculation(self, problem): + class RaiseErrorCost(pybop.BaseCost): + def _evaluate(self, inputs, grad=None): + raise ValueError("Error test.") + + def _evaluateS1(self, inputs): + raise ValueError("Error test.") + + cost = RaiseErrorCost(problem) + with pytest.raises(ValueError, match="Error in cost calculation: Error test."): + cost([0.5]) + with pytest.raises(ValueError, match="Error in cost calculation: Error test."): + cost.evaluateS1([0.5]) + @pytest.mark.unit def test_MAP(self, problem): # Incorrect likelihood @@ -158,7 +173,9 @@ def test_costs(self, cost): assert type(de) == np.ndarray # Test exception for non-numeric inputs - with pytest.raises(ValueError): + with pytest.raises( + TypeError, match="Inputs must be a dictionary or numeric." + ): cost.evaluateS1(["StringInputShouldNotWork"]) with pytest.warns(UserWarning) as record: @@ -175,7 +192,7 @@ def test_costs(self, cost): assert cost.evaluateS1([0.01]) == (np.inf, cost._de) # Test exception for non-numeric inputs - with pytest.raises(ValueError): + with pytest.raises(TypeError, match="Inputs must be a dictionary or numeric."): cost(["StringInputShouldNotWork"]) # Test treatment of simulations that terminated early @@ -224,7 +241,9 @@ def test_design_costs( assert cost([1.1]) == -np.inf # Test exception for non-numeric inputs - with pytest.raises(ValueError): + with pytest.raises( + TypeError, match="Inputs must be a dictionary or numeric." + ): cost(["StringInputShouldNotWork"]) # Compute after updating nominal capacity diff --git a/tests/unit/test_likelihoods.py b/tests/unit/test_likelihoods.py index 41ee36673..b99aa5d0b 100644 --- a/tests/unit/test_likelihoods.py +++ b/tests/unit/test_likelihoods.py @@ -76,7 +76,6 @@ def test_base_likelihood_init(self, problem_name, n_outputs, request): assert likelihood.problem == problem assert likelihood.n_outputs == n_outputs assert likelihood.n_time_data == problem.n_time_data - assert likelihood.x0 == problem.x0 assert likelihood.n_parameters == 1 assert np.array_equal(likelihood._target, problem._target) @@ -132,7 +131,7 @@ def test_gaussian_log_likelihood(self, one_signal_problem): grad_result, grad_likelihood = likelihood.evaluateS1(np.array([0.5, 0.5])) assert isinstance(result, float) np.testing.assert_allclose(result, grad_result, atol=1e-5) - assert np.all(grad_likelihood <= 0) + assert grad_likelihood[0] <= 0 # TEMPORARY WORKAROUND (Remove in #338) @pytest.mark.unit def test_gaussian_log_likelihood_returns_negative_inf(self, one_signal_problem): diff --git a/tests/unit/test_models.py b/tests/unit/test_models.py index 9c11b4c6b..6809aec83 100644 --- a/tests/unit/test_models.py +++ b/tests/unit/test_models.py @@ -137,10 +137,19 @@ def test_build(self, model): @pytest.mark.unit def test_rebuild(self, model): + # Test rebuild before build + with pytest.raises( + ValueError, match="Model must be built before calling rebuild" + ): + model.rebuild() + model.build() initial_built_model = model._built_model assert model._built_model is not None + model.set_params() + assert model.model_with_set_params is not None + # Test that the model can be built again model.rebuild() rebuilt_model = model._built_model @@ -252,6 +261,12 @@ def test_reinit(self): k = 0.1 y0 = 1 model = ExponentialDecay(pybamm.ParameterValues({"k": k, "y0": y0})) + + with pytest.raises( + ValueError, match="Model must be built before calling get_state" + ): + model.get_state({"k": k, "y0": y0}, 0, np.array([0])) + model.build() state = model.reinit(inputs={}) np.testing.assert_array_almost_equal(state.as_ndarray(), np.array([[y0]])) @@ -317,7 +332,7 @@ def test_check_params(self): assert base.check_params() assert base.check_params(inputs={"a": 1}) assert base.check_params(inputs=[1]) - with pytest.raises(ValueError, match="Expecting inputs in the form of"): + with pytest.raises(TypeError, match="Inputs must be a dictionary or numeric."): base.check_params(inputs=["unexpected_string"]) @pytest.mark.unit diff --git a/tests/unit/test_observer_unscented_kalman.py b/tests/unit/test_observer_unscented_kalman.py index 2a947e716..ce60abbc0 100644 --- a/tests/unit/test_observer_unscented_kalman.py +++ b/tests/unit/test_observer_unscented_kalman.py @@ -14,15 +14,6 @@ class TestUKF: measure_noise = 1e-4 - @pytest.fixture(params=[1, 2, 3]) - def model(self, request): - model = ExponentialDecay( - parameter_set=pybamm.ParameterValues({"k": "[input]", "y0": "[input]"}), - n_states=request.param, - ) - model.build() - return model - @pytest.fixture def parameters(self): return pybop.Parameters( @@ -40,6 +31,15 @@ def parameters(self): ), ) + @pytest.fixture(params=[1, 2, 3]) + def model(self, parameters, request): + model = ExponentialDecay( + parameter_set=pybamm.ParameterValues({"k": "[input]", "y0": "[input]"}), + n_states=request.param, + ) + model.build(parameters=parameters) + return model + @pytest.fixture def dataset(self, model: pybop.BaseModel, parameters): observer = pybop.Observer(parameters, model, signal=["2y"]) diff --git a/tests/unit/test_observers.py b/tests/unit/test_observers.py index 46987bae9..2d2e3bc6e 100644 --- a/tests/unit/test_observers.py +++ b/tests/unit/test_observers.py @@ -11,15 +11,6 @@ class TestObserver: A class to test the observer class. """ - @pytest.fixture(params=[1, 2]) - def model(self, request): - model = ExponentialDecay( - parameter_set=pybamm.ParameterValues({"k": "[input]", "y0": "[input]"}), - n_states=request.param, - ) - model.build() - return model - @pytest.fixture def parameters(self): return pybop.Parameters( @@ -37,6 +28,15 @@ def parameters(self): ), ) + @pytest.fixture(params=[1, 2]) + def model(self, parameters, request): + model = ExponentialDecay( + parameter_set=pybamm.ParameterValues({"k": "[input]", "y0": "[input]"}), + n_states=request.param, + ) + model.build(parameters=parameters) + return model + @pytest.mark.unit def test_observer(self, model, parameters): n = model.n_states @@ -72,8 +72,8 @@ def test_observer(self, model, parameters): # Test evaluate with different inputs observer._time_data = t_eval - observer.evaluate(parameters.initial_value()) - observer.evaluate(parameters) + observer.evaluate(parameters.as_dict()) + observer.evaluate(parameters.current_value()) # Test evaluate with dataset observer._dataset = pybop.Dataset( @@ -83,7 +83,7 @@ def test_observer(self, model, parameters): } ) observer._target = {"2y": expected} - observer.evaluate(parameters.initial_value()) + observer.evaluate(parameters.as_dict()) @pytest.mark.unit def test_unbuilt_model(self, parameters): diff --git a/tests/unit/test_optimisation.py b/tests/unit/test_optimisation.py index 5f827430c..740e42d3d 100644 --- a/tests/unit/test_optimisation.py +++ b/tests/unit/test_optimisation.py @@ -104,6 +104,15 @@ def test_optimiser_classes(self, two_param_cost, optimiser, expected_name): if issubclass(optimiser, pybop.BasePintsOptimiser): assert optim._boundaries is None + @pytest.mark.unit + def test_no_optimisation_parameters(self, model, dataset): + problem = pybop.FittingProblem( + model=model, parameters=pybop.Parameters(), dataset=dataset + ) + cost = pybop.RootMeanSquaredError(problem) + with pytest.raises(ValueError, match="There are no parameters to optimise."): + pybop.Optimisation(cost=cost) + @pytest.mark.parametrize( "optimiser", [ @@ -247,11 +256,12 @@ def test_optimiser_kwargs(self, cost, optimiser): else: # Check and update initial values - assert optim.x0 == cost.x0 + x0 = cost.parameters.initial_value() + assert optim.x0 == x0 x0_new = np.array([0.6]) optim = optimiser(cost=cost, x0=x0_new) assert optim.x0 == x0_new - assert optim.x0 != cost.x0 + assert optim.x0 != x0 @pytest.mark.unit def test_scipy_minimize_with_jac(self, cost): @@ -322,13 +332,6 @@ class RandomClass: with pytest.raises(ValueError): pybop.Optimisation(cost=cost, optimiser=RandomClass) - @pytest.mark.unit - def test_prior_sampling(self, cost): - # Tests prior sampling - for i in range(50): - optim = pybop.Optimisation(cost=cost) - assert optim.x0[0] < 0.62 and optim.x0[0] > 0.58 - @pytest.mark.unit @pytest.mark.parametrize( "mean, sigma, expect_exception", diff --git a/tests/unit/test_parameters.py b/tests/unit/test_parameters.py index 736684fef..02b3ea5ce 100644 --- a/tests/unit/test_parameters.py +++ b/tests/unit/test_parameters.py @@ -105,6 +105,18 @@ def test_parameters_construction(self, parameter): assert parameter.name in params.param.keys() assert parameter in params.param.values() + params.join( + pybop.Parameters( + parameter, + pybop.Parameter( + "Positive electrode active material volume fraction", + prior=pybop.Gaussian(0.6, 0.02), + bounds=[0.375, 0.7], + initial_value=0.6, + ), + ) + ) + with pytest.raises( ValueError, match="There is already a parameter with the name " @@ -128,6 +140,11 @@ def test_parameters_construction(self, parameter): initial_value=0.6, ) ) + with pytest.raises( + Exception, + match="Parameter requires a name.", + ): + params.add(dict(value=1)) with pytest.raises( ValueError, match="There is already a parameter with the name " @@ -156,6 +173,28 @@ def test_parameters_construction(self, parameter): ): params.remove(parameter_name=parameter) + @pytest.mark.unit + def test_parameters_naming(self, parameter): + params = pybop.Parameters(parameter) + param = params["Negative electrode active material volume fraction"] + assert param == parameter + + with pytest.raises( + ValueError, + match="is not the name of a parameter.", + ): + params["Positive electrode active material volume fraction"] + + @pytest.mark.unit + def test_parameters_update(self, parameter): + params = pybop.Parameters(parameter) + params.update(values=[0.5]) + assert parameter.value == 0.5 + params.update(bounds=[[0.38, 0.68]]) + assert parameter.bounds == [0.38, 0.68] + params.update(bounds=dict(lower=[0.37], upper=[0.7])) + assert parameter.bounds == [0.37, 0.7] + @pytest.mark.unit def test_get_sigma(self, parameter): params = pybop.Parameters(parameter) diff --git a/tests/unit/test_plots.py b/tests/unit/test_plots.py index b810e3f0e..57f0e4eef 100644 --- a/tests/unit/test_plots.py +++ b/tests/unit/test_plots.py @@ -88,6 +88,9 @@ def test_problem_plots(self, fitting_problem, design_problem): pybop.quick_plot(fitting_problem, title="Optimised Comparison") pybop.quick_plot(design_problem) + # Test conversion of values into inputs + pybop.quick_plot(fitting_problem, problem_inputs=[0.6, 0.6]) + @pytest.fixture def cost(self, fitting_problem): # Define an example cost diff --git a/tests/unit/test_standalone.py b/tests/unit/test_standalone.py index 026692011..2d5727b60 100644 --- a/tests/unit/test_standalone.py +++ b/tests/unit/test_standalone.py @@ -35,7 +35,8 @@ def test_optimisation_on_standalone_cost(self): optim = pybop.SciPyDifferentialEvolution(cost=cost) x, final_cost = optim.run() - initial_cost = optim.cost(cost.x0) + optim.x0 = optim.log["x"][0][0] + initial_cost = optim.cost(optim.x0) assert initial_cost > final_cost np.testing.assert_allclose(final_cost, 42, atol=1e-1)