From 10801fac96d38b264c76e6d2eb4522cb7b17432d Mon Sep 17 00:00:00 2001 From: Brady Planden Date: Mon, 26 Feb 2024 13:39:05 +0000 Subject: [PATCH 1/9] Add gaussian likelihoods, update optimisation cls to for likelihood definition, add probablilitybased cost function, Add MLE example --- examples/scripts/spm_IRPropMin.py | 1 + examples/scripts/spm_MLE.py | 63 ++++++++++++ pybop/__init__.py | 15 ++- pybop/_likelihoods.py | 162 ++++++++++++++++++++++++++++++ pybop/_optimisation.py | 32 +++++- pybop/costs/base_cost.py | 10 +- pybop/costs/fitting_costs.py | 55 ++++++++++ tests/unit/test_optimisation.py | 5 +- 8 files changed, 332 insertions(+), 11 deletions(-) create mode 100644 examples/scripts/spm_MLE.py create mode 100644 pybop/_likelihoods.py diff --git a/examples/scripts/spm_IRPropMin.py b/examples/scripts/spm_IRPropMin.py index a72bc92c8..52ae63dda 100644 --- a/examples/scripts/spm_IRPropMin.py +++ b/examples/scripts/spm_IRPropMin.py @@ -19,6 +19,7 @@ ), ] +# Generate data sigma = 0.001 t_eval = np.arange(0, 900, 2) values = model.predict(t_eval=t_eval) diff --git a/examples/scripts/spm_MLE.py b/examples/scripts/spm_MLE.py new file mode 100644 index 000000000..da27198eb --- /dev/null +++ b/examples/scripts/spm_MLE.py @@ -0,0 +1,63 @@ +import pybop +import numpy as np + +# Define model +parameter_set = pybop.ParameterSet.pybamm("Chen2020") +model = pybop.lithium_ion.SPM(parameter_set=parameter_set) + +# Fitting parameters +parameters = [ + pybop.Parameter( + "Negative electrode active material volume fraction", + prior=pybop.Gaussian(0.6, 0.05), + bounds=[0.5, 0.8], + ), + pybop.Parameter( + "Positive electrode active material volume fraction", + prior=pybop.Gaussian(0.48, 0.05), + bounds=[0.4, 0.7], + ), +] + +# Generate data +sigma = 0.01 +t_eval = np.arange(0, 900, 2) +values = model.predict(t_eval=t_eval) +corrupt_values = values["Voltage [V]"].data + np.random.normal(0, sigma, len(t_eval)) + +# Form dataset +dataset = pybop.Dataset( + { + "Time [s]": t_eval, + "Current function [A]": values["Current [A]"].data, + "Voltage [V]": corrupt_values, + } +) + +# Generate problem, cost function, and optimisation class +problem = pybop.FittingProblem(model, parameters, dataset) +likelihood = pybop.GaussianLogLikelihoodKnownSigma(problem, sigma=sigma) +optim = pybop.Optimisation(likelihood, optimiser=pybop.CMAES) +optim.set_max_unchanged_iterations(20) +optim.set_min_iterations(20) +optim.set_max_iterations(100) + +# Run the optimisation +x, final_cost = optim.run() +print("Estimated parameters:", x) + +# Plot the timeseries output +pybop.quick_plot(x[0:2], likelihood, title="Optimised Comparison") + +# Plot convergence +pybop.plot_convergence(optim) + +# Plot the parameter traces +pybop.plot_parameters(optim) + +# Plot the cost landscape +pybop.plot_cost2d(likelihood, steps=15) + +# Plot the cost landscape with optimisation path and updated bounds +bounds = np.array([[0.55, 0.77], [0.48, 0.68]]) +pybop.plot_cost2d(likelihood, optim=optim, bounds=bounds, steps=15) diff --git a/pybop/__init__.py b/pybop/__init__.py index b623add9e..f4ef686ec 100644 --- a/pybop/__init__.py +++ b/pybop/__init__.py @@ -23,6 +23,16 @@ # Absolute path to the pybop repo script_path = path.dirname(__file__) +# +# Problem class +# +from ._problem import BaseProblem, FittingProblem, DesignProblem + +# +# Likelihood classes +# +from ._likelihoods import BaseLikelihood, GaussianLogLikelihood, GaussianLogLikelihoodKnownSigma + # # Cost function class # @@ -31,6 +41,7 @@ RootMeanSquaredError, SumSquaredError, ObserverCost, + ProbabilityCost, ) from .costs.design_costs import ( DesignCost, @@ -84,10 +95,6 @@ from .parameters.parameter_set import ParameterSet from .parameters.priors import Gaussian, Uniform, Exponential -# -# Problem class -# -from ._problem import FittingProblem, DesignProblem # # Observer classes diff --git a/pybop/_likelihoods.py b/pybop/_likelihoods.py new file mode 100644 index 000000000..e2af337c9 --- /dev/null +++ b/pybop/_likelihoods.py @@ -0,0 +1,162 @@ +import numpy as np + + +class BaseLikelihood: + """ + Base class for likelihoods + """ + + def __init__(self, problem, sigma=None): + self.problem = problem + self._n_output = problem.n_outputs + self._n_times = problem.n_time_data + self.sigma = np.zeros(self._n_output) + self.x0 = problem.x0 + self.bounds = problem.bounds + self._n_parameters = problem.n_parameters + self._target = problem._target + + def __call__(self, x): + """ + Calls the problem.evaluate method and calculates + the log-likelihood + """ + raise NotImplementedError + + def update_sigma(self, sigma): + """ + Setter for sigma parameter + """ + self.sigma = sigma + if np.any(sigma) <= 0: + raise ValueError("Sigma must not be negative") + + def get_n_parameters(self): + """ + Returns the number of parameters + """ + return self._n_parameters + + +class GaussianLogLikelihoodKnownSigma(BaseLikelihood): + """ + This class represents a Gaussian Log Likelihood with a known signma, + which assumes that the data follows a Gaussian distribution and computes + the log-likelihood of observed data under this assumption. + + Attributes: + _logpi (float): Precomputed offset value for the log-likelihood function. + """ + + def __init__(self, problem, sigma=None): + super(GaussianLogLikelihoodKnownSigma, self).__init__(problem, sigma=sigma) + if sigma is not None: + self.update_sigma(sigma) + self._offset = -0.5 * self._n_times * np.log(2 * np.pi / self.sigma) + self._multip = -1 / (2.0 * self.sigma**2) + self._sigma2 = self.sigma**-2 + self._dl = np.ones(self._n_parameters) + + def __call__(self, x): + """ + Calls the problem.evaluate method and calculates + the log-likelihood + """ + e = self._target - self.problem.evaluate(x) + return np.sum(self._offset + self._multip * np.sum(e**2, axis=0)) + + def _evaluateS1(self, x): + """ + Calls the problem.evaluateS1 method and calculates + the log-likelihood + """ + + y, dy = self.problem.evaluateS1(x) + if len(y) < len(self._target): + likelihood = -np.float64(np.inf) + dl = self._dl * np.ones(self._n_parameters) + else: + dy = dy.reshape( + ( + self._n_times, + self._n_output, + self._n_parameters, + ) + ) + e = self._target - y + likelihood = np.sum(self._offset + self._multip * np.sum(e**2, axis=0)) + dl = np.sum((self._sigma2 * np.sum((e.T * dy.T), axis=2)), axis=1) + + return likelihood, dl + + +class GaussianLogLikelihood(BaseLikelihood): + """ + This class represents a Gaussian Log Likelihood, which assumes that the + data follows a Gaussian distribution and computes the log-likelihood of + observed data under this assumption. + + Attributes: + _logpi (float): Precomputed offset value for the log-likelihood function. + """ + + def __init__(self, problem): + super(GaussianLogLikelihood, self).__init__(problem) + self._logpi = -0.5 * self._n_times * np.log(2 * np.pi) + self._dl = np.ones(self._n_parameters) + + def __call__(self, x): + """ + Evaluates the Gaussian log-likelihood for the given parameters. + + Args: + x (array_like): The parameters for which to evaluate the log-likelihood. + The last `self._n_output` elements are assumed to be the + standard deviations of the Gaussian distributions. + + Returns: + float: The log-likelihood value, or -inf if the standard deviations are received as non-positive. + """ + sigma = np.asarray(x[-self._n_output :]) + + if any(sigma <= 0): + return -np.inf + + e = self._target - self.problem.evaluate(x[: -self._n_output]) + return np.sum( + self._logpi + - self._n_times * np.log(sigma) + - np.sum(e**2, axis=0) / (2.0 * sigma**2) + ) + + def _evaluateS1(self, x): + """ + Calls the problem.evaluateS1 method and calculates + the log-likelihood + """ + sigma = np.asarray(x[-self._n_output :]) + + if any(sigma <= 0): + return -np.inf + + y, dy = self.problem.evaluateS1(x[: -self._n_output]) + if len(y) < len(self._target): + likelihood = -np.float64(np.inf) + dl = self._dl * np.ones(self._n_parameters) + else: + dy = dy.reshape( + ( + self._n_times, + self._n_output, + self._n_parameters, + ) + ) + e = self._target - y + likelihood = self.__call__(x) + dl = np.sum((sigma**-(2.0) * np.sum((e.T * dy.T), axis=2)), axis=1) + + # Add sigma gradient to dl + dsigma = -self._n_times / sigma + sigma**-(3.0) * np.sum(e**2, axis=0) + dl = np.concatenate((dl, np.array(list(dsigma)))) + + return likelihood, dl diff --git a/pybop/_optimisation.py b/pybop/_optimisation.py index cc367ccc5..33a17b075 100644 --- a/pybop/_optimisation.py +++ b/pybop/_optimisation.py @@ -40,6 +40,7 @@ class Optimisation: def __init__( self, cost, + x0=None, optimiser=None, sigma0=None, verbose=False, @@ -47,9 +48,9 @@ def __init__( allow_infeasible_solutions=True, ): self.cost = cost + self.x0 = x0 self.optimiser = optimiser self.verbose = verbose - self.x0 = cost.x0 self.bounds = cost.bounds self.n_parameters = cost.n_parameters self.sigma0 = sigma0 @@ -57,7 +58,9 @@ def __init__( self.allow_infeasible_solutions = allow_infeasible_solutions self.log = [] - # Convert x0 to pints vector + # Catch x0, and convert to pints vector + if x0 is None: + self.x0 = cost.x0 self._x0 = pints.vector(self.x0) # Set whether to allow infeasible locations @@ -74,11 +77,11 @@ def __init__( self._transformation = None # Check if minimising or maximising - self._minimising = not isinstance(cost, pints.LogPDF) + self._minimising = not isinstance(cost, pybop.BaseLikelihood) if self._minimising: self._function = self.cost else: - self._function = pints.ProbabilityBasedError(cost) + self._function = pybop.ProbabilityCost(cost) del cost # Construct Optimiser @@ -122,6 +125,10 @@ def __init__( self._max_iterations = None self.set_max_iterations() + # Minimum iterations + self._min_iterations = None + self.set_min_iterations() + # Maximum unchanged iterations self._unchanged_threshold = 1 # smallest significant f change self._unchanged_max_iterations = None @@ -290,6 +297,7 @@ def _run_pints(self): halt = ( self._unchanged_max_iterations is not None and unchanged_iterations >= self._unchanged_max_iterations + and iteration >= self._min_iterations ) if running and halt: running = False @@ -449,6 +457,22 @@ def set_max_iterations(self, iterations=1000): raise ValueError("Maximum number of iterations cannot be negative.") self._max_iterations = iterations + def set_min_iterations(self, iterations=2): + """ + Set the minimum number of iterations as a stopping criterion. + + Parameters + ---------- + iterations : int, optional + The minimum number of iterations to run (default is 100). + Set to `None` to remove this stopping criterion. + """ + if iterations is not None: + iterations = int(iterations) + if iterations < 0: + raise ValueError("Minimum number of iterations cannot be negative.") + self._min_iterations = iterations + def set_max_unchanged_iterations(self, iterations=5, threshold=1e-5): """ Set the maximum number of iterations without significant change as a stopping criterion. diff --git a/pybop/costs/base_cost.py b/pybop/costs/base_cost.py index 171e09b37..72bfc79cb 100644 --- a/pybop/costs/base_cost.py +++ b/pybop/costs/base_cost.py @@ -1,3 +1,7 @@ +from pybop import BaseProblem +from pybop import BaseLikelihood + + class BaseCost: """ Base class for defining cost functions. @@ -24,14 +28,16 @@ class BaseCost: The number of outputs in the model. """ - def __init__(self, problem): + def __init__(self, problem=None): self.problem = problem - if problem is not None: + if isinstance(self.problem, BaseProblem): self._target = problem._target self.x0 = problem.x0 self.bounds = problem.bounds self.n_parameters = problem.n_parameters self.n_outputs = problem.n_outputs + elif isinstance(self.problem, BaseLikelihood): + self.log_likelihood = problem def __call__(self, x, grad=None): """ diff --git a/pybop/costs/fitting_costs.py b/pybop/costs/fitting_costs.py index 8db3111c6..5bd4aa4da 100644 --- a/pybop/costs/fitting_costs.py +++ b/pybop/costs/fitting_costs.py @@ -190,6 +190,61 @@ def set_fail_gradient(self, de): self._de = de +class ProbabilityCost(BaseCost): + """ + Probability based cost function. + + Changes the sign of the log likelihood to make it a cost function. + + Inherits all parameters and attributes from ``BaseCost``. + """ + + def __init__(self, log_likelihood): + super(ProbabilityCost, self).__init__(log_likelihood) + + def _evaluate(self, x, grad=None): + """ + Calculate the probability based cost for a given set of parameters. + + Parameters + ---------- + x : array-like + The parameters for which to evaluate the cost. + grad : array-like, optional + An array to store the gradient of the cost function with respect + to the parameters. + + Returns + ------- + float + The probability based cost. + """ + return -self.log_likelihood(x) + + def _evaluateS1(self, x): + """ + Compute the cost and its gradient with respect to the parameters. + + Parameters + ---------- + x : array-like + The parameters for which to compute the cost and gradient. + + Returns + ------- + tuple + A tuple containing the cost and the gradient. The cost is a float, + and the gradient is an array-like of the same length as `x`. + + Raises + ------ + ValueError + If an error occurs during the calculation of the cost or gradient. + """ + likelihood, dl = self.log_likelihood._evaluateS1(x) + return -likelihood, -dl + + class ObserverCost(BaseCost): """ Observer cost function. diff --git a/tests/unit/test_optimisation.py b/tests/unit/test_optimisation.py index 6569d1ad5..6141a7f96 100644 --- a/tests/unit/test_optimisation.py +++ b/tests/unit/test_optimisation.py @@ -111,12 +111,15 @@ def test_halting(self, cost): # Test max unchanged iterations optim = pybop.Optimisation(cost=cost, optimiser=pybop.GradientDescent) optim.set_max_unchanged_iterations(1) + optim.set_min_iterations(3) x, __ = optim.run() assert optim._iterations == 2 - # Test invalid maximum values + # Test invalid values with pytest.raises(ValueError): optim.set_max_evaluations(-1) + with pytest.raises(ValueError): + optim.set_min_iterations(-1) with pytest.raises(ValueError): optim.set_max_unchanged_iterations(-1) with pytest.raises(ValueError): From b2fd01a0ffa2e2b30aca490f5b79ff441e94fc9e Mon Sep 17 00:00:00 2001 From: Brady Planden Date: Mon, 26 Feb 2024 15:47:06 +0000 Subject: [PATCH 2/9] fix mistyped min_iterations --- tests/unit/test_optimisation.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/unit/test_optimisation.py b/tests/unit/test_optimisation.py index 6141a7f96..b49d9461c 100644 --- a/tests/unit/test_optimisation.py +++ b/tests/unit/test_optimisation.py @@ -111,7 +111,7 @@ def test_halting(self, cost): # Test max unchanged iterations optim = pybop.Optimisation(cost=cost, optimiser=pybop.GradientDescent) optim.set_max_unchanged_iterations(1) - optim.set_min_iterations(3) + optim.set_min_iterations(1) x, __ = optim.run() assert optim._iterations == 2 From b7b34bbac6ecf629f56b799fd6729ed4b5d0c54e Mon Sep 17 00:00:00 2001 From: Brady Planden Date: Mon, 26 Feb 2024 16:03:36 +0000 Subject: [PATCH 3/9] add property decorator for private n_paramters --- pybop/_likelihoods.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/pybop/_likelihoods.py b/pybop/_likelihoods.py index e2af337c9..959db3e25 100644 --- a/pybop/_likelihoods.py +++ b/pybop/_likelihoods.py @@ -37,6 +37,10 @@ def get_n_parameters(self): """ return self._n_parameters + @property + def n_parameters(self): + return self._n_parameters + class GaussianLogLikelihoodKnownSigma(BaseLikelihood): """ From e5396d60cb922481c61014aad18b132e6df31b54 Mon Sep 17 00:00:00 2001 From: Brady Planden Date: Tue, 5 Mar 2024 09:17:05 +0000 Subject: [PATCH 4/9] _sigma -> sigma0, updt tests --- pybop/_likelihoods.py | 16 ++++++++-------- tests/unit/test_likelihoods.py | 2 +- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/pybop/_likelihoods.py b/pybop/_likelihoods.py index 2d193564b..b711f94fd 100644 --- a/pybop/_likelihoods.py +++ b/pybop/_likelihoods.py @@ -10,7 +10,7 @@ def __init__(self, problem, sigma=None): self.problem = problem self._n_output = problem.n_outputs self._n_times = problem.n_time_data - self._sigma = sigma or np.zeros(self._n_output) + self.sigma0 = sigma or np.zeros(self._n_output) self.x0 = problem.x0 self.bounds = problem.bounds self._n_parameters = problem.n_parameters @@ -30,13 +30,13 @@ def set_sigma(self, sigma): if np.any(sigma <= 0): raise ValueError("Sigma must not be negative") else: - self._sigma = sigma + self.sigma0 = sigma def get_sigma(self): """ Getter for sigma parameter """ - return self._sigma + return self.sigma0 def get_n_parameters(self): """ @@ -51,7 +51,7 @@ def n_parameters(self): class GaussianLogLikelihoodKnownSigma(BaseLikelihood): """ - This class represents a Gaussian Log Likelihood with a known signma, + This class represents a Gaussian Log Likelihood with a known sigma, which assumes that the data follows a Gaussian distribution and computes the log-likelihood of observed data under this assumption. @@ -63,9 +63,9 @@ def __init__(self, problem, sigma=None): super(GaussianLogLikelihoodKnownSigma, self).__init__(problem, sigma=sigma) if sigma is not None: self.set_sigma(sigma) - self._offset = -0.5 * self._n_times * np.log(2 * np.pi / self._sigma) - self._multip = -1 / (2.0 * self._sigma**2) - self._sigma2 = self._sigma**-2 + self._offset = -0.5 * self._n_times * np.log(2 * np.pi / self.sigma0) + self._multip = -1 / (2.0 * self.sigma0**2) + self.sigma2 = self.sigma0**-2 self._dl = np.ones(self._n_parameters) def __call__(self, x): @@ -96,7 +96,7 @@ def _evaluateS1(self, x): ) e = self._target - y likelihood = np.sum(self._offset + self._multip * np.sum(e**2, axis=0)) - dl = np.sum((self._sigma2 * np.sum((e.T * dy.T), axis=2)), axis=1) + dl = np.sum((self.sigma2 * np.sum((e.T * dy.T), axis=2)), axis=1) return likelihood, dl diff --git a/tests/unit/test_likelihoods.py b/tests/unit/test_likelihoods.py index 514b51fdc..50df84899 100644 --- a/tests/unit/test_likelihoods.py +++ b/tests/unit/test_likelihoods.py @@ -69,7 +69,7 @@ def test_base_likelihood_init(self, problem): assert likelihood.problem == problem assert likelihood._n_output == 1 assert likelihood._n_times == problem.n_time_data - assert np.array_equal(likelihood._sigma, np.array([0.2])) + assert np.array_equal(likelihood.get_sigma(), np.array([0.2])) assert likelihood.x0 == problem.x0 assert likelihood.bounds == problem.bounds assert likelihood._n_parameters == 1 From 56e62f8f27b03f7944859d2ba1fa32dcdd6fc60b Mon Sep 17 00:00:00 2001 From: Brady Planden Date: Tue, 5 Mar 2024 12:35:29 +0000 Subject: [PATCH 5/9] Add ProbabilityBased cost tests, Updt. changelog --- CHANGELOG.md | 5 +++-- tests/unit/test_cost.py | 16 ++++++++++++---- 2 files changed, 15 insertions(+), 6 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 4e50fbb17..637b5459a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,8 +2,9 @@ ## Features -- [#215](https://github.com/pybop-team/PyBOP/pull/215) - Adds `release_workflow.md` and updates `release_action.yaml` -- [#204](https://github.com/pybop-team/PyBOP/pull/204) - Splits integration, unit, examples, plots tests, update workflows. Adds pytest `--examples`, `--integration`, `--plots` args. Adds tests for coverage after removal of examples. Adds examples and integrations nox sessions. Adds `pybop.RMSE._evaluateS1()` method +- [#218](https://github.com/pybop-team/PyBOP/pull/218) - Adds likelihood base class, `GaussianLogLikelihoodKnownSigma`, `GaussianLogLikelihood`, and `ProbabilityBased` cost function. As well as addition of a maximum likelihood estimation (MLE) example. +- [#215](https://github.com/pybop-team/PyBOP/pull/215) - Adds `release_workflow.md` and updates `release_action.yaml`. +- [#204](https://github.com/pybop-team/PyBOP/pull/204) - Splits integration, unit, examples, plots tests, update workflows. Adds pytest `--examples`, `--integration`, `--plots` args. Adds tests for coverage after removal of examples. Adds examples and integrations nox sessions. Adds `pybop.RMSE._evaluateS1()` method. - [#206](https://github.com/pybop-team/PyBOP/pull/206) - Adds Python 3.12 support with corresponding github actions changes. - [#18](https://github.com/pybop-team/PyBOP/pull/18) - Adds geometric parameter fitting capability, via `model.rebuild()` with `model.rebuild_parameters`. - [#203](https://github.com/pybop-team/PyBOP/pull/203) - Adds support for modern Python packaging via a `pyproject.toml` file and configures the `pytest` test runner and `ruff` linter to use their configurations stored as declarative metadata. diff --git a/tests/unit/test_cost.py b/tests/unit/test_cost.py index aa127ffd4..a2b21fe3c 100644 --- a/tests/unit/test_cost.py +++ b/tests/unit/test_cost.py @@ -66,13 +66,21 @@ def problem(self, model, parameters, dataset, signal, x0, request): return problem @pytest.fixture( - params=[pybop.RootMeanSquaredError, pybop.SumSquaredError, pybop.ObserverCost] + params=[ + pybop.RootMeanSquaredError, + pybop.SumSquaredError, + pybop.ObserverCost, + pybop.ProbabilityCost, + ] ) def cost(self, problem, request): cls = request.param - if cls == pybop.RootMeanSquaredError or cls == pybop.SumSquaredError: + if cls in [pybop.SumSquaredError, pybop.RootMeanSquaredError]: return cls(problem) - elif cls == pybop.ObserverCost: + elif cls in [pybop.ProbabilityCost]: + likelihood = pybop.GaussianLogLikelihoodKnownSigma(problem, sigma=0.01) + return cls(likelihood) + elif cls in [pybop.ObserverCost]: inputs = {p.name: problem.x0[i] for i, p in enumerate(problem.parameters)} state = problem._model.reinit(inputs) n = len(state) @@ -133,7 +141,7 @@ def test_costs(self, cost): with pytest.warns(UserWarning) as record: cost([1.1]) - if isinstance(cost, pybop.SumSquaredError): + if cost in [pybop.SumSquaredError, pybop.ProbabilityCost]: e, de = cost.evaluateS1([0.5]) assert type(e) == np.float64 From 8d5873127f7efdec915ac2220f04c0e12231e35e Mon Sep 17 00:00:00 2001 From: Brady Planden Date: Tue, 5 Mar 2024 13:06:14 +0000 Subject: [PATCH 6/9] updt cost tests, add set_fail_gradient to RootMeanSquared cost function --- pybop/costs/fitting_costs.py | 18 ++++++++++++++++++ tests/unit/test_cost.py | 8 ++++---- 2 files changed, 22 insertions(+), 4 deletions(-) diff --git a/pybop/costs/fitting_costs.py b/pybop/costs/fitting_costs.py index 5bd4aa4da..80d72847e 100644 --- a/pybop/costs/fitting_costs.py +++ b/pybop/costs/fitting_costs.py @@ -19,6 +19,9 @@ class RootMeanSquaredError(BaseCost): def __init__(self, problem): super(RootMeanSquaredError, self).__init__(problem) + # Default fail gradient + self._de = 1.0 + def _evaluate(self, x, grad=None): """ Calculate the root mean square error for a given set of parameters. @@ -84,6 +87,21 @@ def _evaluateS1(self, x): return e, de.flatten() + def set_fail_gradient(self, de): + """ + Set the fail gradient to a specified value. + + The fail gradient is used if an error occurs during the calculation + of the gradient. This method allows updating the default gradient value. + + Parameters + ---------- + de : float + The new fail gradient value to be used. + """ + de = float(de) + self._de = de + class SumSquaredError(BaseCost): """ diff --git a/tests/unit/test_cost.py b/tests/unit/test_cost.py index a2b21fe3c..02504bf94 100644 --- a/tests/unit/test_cost.py +++ b/tests/unit/test_cost.py @@ -141,15 +141,15 @@ def test_costs(self, cost): with pytest.warns(UserWarning) as record: cost([1.1]) - if cost in [pybop.SumSquaredError, pybop.ProbabilityCost]: + # Test option setting + cost.set_fail_gradient(1) + + if isinstance(cost, (pybop.SumSquaredError, pybop.ProbabilityCost)): e, de = cost.evaluateS1([0.5]) assert type(e) == np.float64 assert type(de) == np.ndarray - # Test option setting - cost.set_fail_gradient(1) - # Test exception for non-numeric inputs with pytest.raises(ValueError): cost.evaluateS1(["StringInputShouldNotWork"]) From 0b530480492529fc9ea0c93a99f32cbf5eb43cee Mon Sep 17 00:00:00 2001 From: NicolaCourtier <45851982+NicolaCourtier@users.noreply.github.com> Date: Fri, 8 Mar 2024 15:53:27 +0000 Subject: [PATCH 7/9] Make likelihoods a type of cost (#230) * Move _likelihoods.py to /costs * Make BaseLikelihood a type of cost * Update test_cost * Update n_parameters to _n_parameters * Move n_parameters property * Change _minimising to Boolean * Update _likelihoods.py --- examples/standalone/cost.py | 4 +- pybop/__init__.py | 11 +++--- pybop/_optimisation.py | 21 ++++------- pybop/{ => costs}/_likelihoods.py | 30 +++++---------- pybop/costs/base_cost.py | 28 ++++++++++---- pybop/costs/fitting_costs.py | 63 ++----------------------------- tests/unit/test_cost.py | 6 +-- tests/unit/test_likelihoods.py | 6 --- tests/unit/test_standalone.py | 2 +- 9 files changed, 51 insertions(+), 120 deletions(-) rename pybop/{ => costs}/_likelihoods.py (89%) diff --git a/examples/standalone/cost.py b/examples/standalone/cost.py index 9836a7e7d..d632d3f1d 100644 --- a/examples/standalone/cost.py +++ b/examples/standalone/cost.py @@ -18,7 +18,7 @@ class StandaloneCost(pybop.BaseCost): BaseCost interface. x0 : array-like The initial guess for the optimization problem, set to [4.2]. - n_parameters : int + _n_parameters : int The number of parameters in the model, which is 1 in this case. bounds : dict A dictionary containing the lower and upper bounds for the parameter, @@ -40,7 +40,7 @@ def __init__(self, problem=None): super().__init__(problem) self.x0 = np.array([4.2]) - self.n_parameters = len(self.x0) + self._n_parameters = len(self.x0) self.bounds = dict( lower=[-1], diff --git a/pybop/__init__.py b/pybop/__init__.py index f4ef686ec..236c15012 100644 --- a/pybop/__init__.py +++ b/pybop/__init__.py @@ -28,11 +28,6 @@ # from ._problem import BaseProblem, FittingProblem, DesignProblem -# -# Likelihood classes -# -from ._likelihoods import BaseLikelihood, GaussianLogLikelihood, GaussianLogLikelihoodKnownSigma - # # Cost function class # @@ -41,13 +36,17 @@ RootMeanSquaredError, SumSquaredError, ObserverCost, - ProbabilityCost, ) from .costs.design_costs import ( DesignCost, GravimetricEnergyDensity, VolumetricEnergyDensity, ) +from .costs._likelihoods import ( + BaseLikelihood, + GaussianLogLikelihood, + GaussianLogLikelihoodKnownSigma, +) # # Dataset class diff --git a/pybop/_optimisation.py b/pybop/_optimisation.py index 5b2568ae5..35f3f11ba 100644 --- a/pybop/_optimisation.py +++ b/pybop/_optimisation.py @@ -29,7 +29,7 @@ class Optimisation: Initial parameter values for the optimization. bounds : dict Dictionary containing the parameter bounds with keys 'lower' and 'upper'. - n_parameters : int + _n_parameters : int Number of parameters in the optimization problem. sigma0 : float or sequence Initial step size or standard deviation for the optimiser. @@ -40,7 +40,6 @@ class Optimisation: def __init__( self, cost, - x0=None, optimiser=None, sigma0=None, verbose=False, @@ -48,19 +47,17 @@ def __init__( allow_infeasible_solutions=True, ): self.cost = cost - self.x0 = x0 + self.x0 = cost.x0 self.optimiser = optimiser self.verbose = verbose self.bounds = cost.bounds self.sigma0 = sigma0 or cost.sigma0 - self.n_parameters = cost.n_parameters + self._n_parameters = cost._n_parameters self.physical_viability = physical_viability self.allow_infeasible_solutions = allow_infeasible_solutions self.log = [] - # Catch x0, and convert to pints vector - if x0 is None: - self.x0 = cost.x0 + # Convert x0 to pints vector self._x0 = pints.vector(self.x0) # Set whether to allow infeasible locations @@ -77,12 +74,10 @@ def __init__( self._transformation = None # Check if minimising or maximising - self._minimising = not isinstance(cost, pybop.BaseLikelihood) - if self._minimising: - self._function = self.cost - else: - self._function = pybop.ProbabilityCost(cost) - del cost + if isinstance(cost, pybop.BaseLikelihood): + self.cost._minimising = False + self._minimising = self.cost._minimising + self._function = self.cost # Construct Optimiser self.pints = True diff --git a/pybop/_likelihoods.py b/pybop/costs/_likelihoods.py similarity index 89% rename from pybop/_likelihoods.py rename to pybop/costs/_likelihoods.py index b711f94fd..cc4c63251 100644 --- a/pybop/_likelihoods.py +++ b/pybop/costs/_likelihoods.py @@ -1,27 +1,19 @@ import numpy as np +from pybop.costs.base_cost import BaseCost -class BaseLikelihood: +class BaseLikelihood(BaseCost): """ Base class for likelihoods """ def __init__(self, problem, sigma=None): - self.problem = problem + super(BaseLikelihood, self).__init__(problem) self._n_output = problem.n_outputs self._n_times = problem.n_time_data self.sigma0 = sigma or np.zeros(self._n_output) - self.x0 = problem.x0 - self.bounds = problem.bounds self._n_parameters = problem.n_parameters - self._target = problem._target - - def __call__(self, x): - """ - Calls the problem.evaluate method and calculates - the log-likelihood - """ - raise NotImplementedError + self.log_likelihood = problem def set_sigma(self, sigma): """ @@ -44,10 +36,6 @@ def get_n_parameters(self): """ return self._n_parameters - @property - def n_parameters(self): - return self._n_parameters - class GaussianLogLikelihoodKnownSigma(BaseLikelihood): """ @@ -68,7 +56,7 @@ def __init__(self, problem, sigma=None): self.sigma2 = self.sigma0**-2 self._dl = np.ones(self._n_parameters) - def __call__(self, x): + def _evaluate(self, x, grad=None): """ Calls the problem.evaluate method and calculates the log-likelihood @@ -76,7 +64,7 @@ def __call__(self, x): e = self._target - self.problem.evaluate(x) return np.sum(self._offset + self._multip * np.sum(e**2, axis=0)) - def _evaluateS1(self, x): + def _evaluateS1(self, x, grad=None): """ Calls the problem.evaluateS1 method and calculates the log-likelihood @@ -116,7 +104,7 @@ def __init__(self, problem): self._logpi = -0.5 * self._n_times * np.log(2 * np.pi) self._dl = np.ones(self._n_parameters + self._n_output) - def __call__(self, x): + def _evaluate(self, x, grad=None): """ Evaluates the Gaussian log-likelihood for the given parameters. @@ -140,7 +128,7 @@ def __call__(self, x): - np.sum(e**2, axis=0) / (2.0 * sigma**2) ) - def _evaluateS1(self, x): + def _evaluateS1(self, x, grad=None): """ Calls the problem.evaluateS1 method and calculates the log-likelihood @@ -163,7 +151,7 @@ def _evaluateS1(self, x): ) ) e = self._target - y - likelihood = self.__call__(x) + likelihood = self._evaluate(x) dl = np.sum((sigma**-(2.0) * np.sum((e.T * dy.T), axis=2)), axis=1) # Add sigma gradient to dl diff --git a/pybop/costs/base_cost.py b/pybop/costs/base_cost.py index ab77e4377..6b22f0907 100644 --- a/pybop/costs/base_cost.py +++ b/pybop/costs/base_cost.py @@ -1,5 +1,4 @@ from pybop import BaseProblem -from pybop import BaseLikelihood class BaseCost: @@ -26,7 +25,7 @@ class BaseCost: Initial standard deviation around ``x0``. Either a scalar value (one standard deviation for all coordinates) or an array with one entry per dimension. Not all methods will use this information. - n_parameters : int + _n_parameters : int The number of parameters in the model. n_outputs : int The number of outputs in the model. @@ -37,19 +36,28 @@ def __init__(self, problem=None): self.x0 = None self.bounds = None self.sigma0 = None + self._minimising = True if isinstance(self.problem, BaseProblem): self._target = problem._target self.x0 = problem.x0 self.bounds = problem.bounds self.sigma0 = problem.sigma0 - self.n_parameters = problem.n_parameters + self._n_parameters = problem.n_parameters self.n_outputs = problem.n_outputs - elif isinstance(self.problem, BaseLikelihood): - self.log_likelihood = problem + + @property + def n_parameters(self): + return self._n_parameters def __call__(self, x, grad=None): """ Call the evaluate function for a given set of parameters. + """ + return self.evaluate(x, grad) + + def evaluate(self, x, grad=None): + """ + Call the evaluate function for a given set of parameters. Parameters ---------- @@ -70,7 +78,10 @@ def __call__(self, x, grad=None): If an error occurs during the calculation of the cost. """ try: - return self._evaluate(x, grad) + if self._minimising: + return self._evaluate(x, grad) + else: # minimise the negative cost + return -self._evaluate(x, grad) except NotImplementedError as e: raise e @@ -125,7 +136,10 @@ def evaluateS1(self, x): If an error occurs during the calculation of the cost or gradient. """ try: - return self._evaluateS1(x) + if self._minimising: + return self._evaluateS1(x) + else: # minimise the negative cost + return -self._evaluateS1(x) except NotImplementedError as e: raise e diff --git a/pybop/costs/fitting_costs.py b/pybop/costs/fitting_costs.py index 80d72847e..a0ebc664f 100644 --- a/pybop/costs/fitting_costs.py +++ b/pybop/costs/fitting_costs.py @@ -70,13 +70,13 @@ def _evaluateS1(self, x): y, dy = self.problem.evaluateS1(x) if len(y) < len(self._target): e = np.float64(np.inf) - de = self._de * np.ones(self.n_parameters) + de = self._de * np.ones(self._n_parameters) else: dy = dy.reshape( ( self.problem.n_time_data, self.n_outputs, - self.n_parameters, + self._n_parameters, ) ) r = y - self._target @@ -177,13 +177,13 @@ def _evaluateS1(self, x): y, dy = self.problem.evaluateS1(x) if len(y) < len(self._target): e = np.float64(np.inf) - de = self._de * np.ones(self.n_parameters) + de = self._de * np.ones(self._n_parameters) else: dy = dy.reshape( ( self.problem.n_time_data, self.n_outputs, - self.n_parameters, + self._n_parameters, ) ) r = y - self._target @@ -208,61 +208,6 @@ def set_fail_gradient(self, de): self._de = de -class ProbabilityCost(BaseCost): - """ - Probability based cost function. - - Changes the sign of the log likelihood to make it a cost function. - - Inherits all parameters and attributes from ``BaseCost``. - """ - - def __init__(self, log_likelihood): - super(ProbabilityCost, self).__init__(log_likelihood) - - def _evaluate(self, x, grad=None): - """ - Calculate the probability based cost for a given set of parameters. - - Parameters - ---------- - x : array-like - The parameters for which to evaluate the cost. - grad : array-like, optional - An array to store the gradient of the cost function with respect - to the parameters. - - Returns - ------- - float - The probability based cost. - """ - return -self.log_likelihood(x) - - def _evaluateS1(self, x): - """ - Compute the cost and its gradient with respect to the parameters. - - Parameters - ---------- - x : array-like - The parameters for which to compute the cost and gradient. - - Returns - ------- - tuple - A tuple containing the cost and the gradient. The cost is a float, - and the gradient is an array-like of the same length as `x`. - - Raises - ------ - ValueError - If an error occurs during the calculation of the cost or gradient. - """ - likelihood, dl = self.log_likelihood._evaluateS1(x) - return -likelihood, -dl - - class ObserverCost(BaseCost): """ Observer cost function. diff --git a/tests/unit/test_cost.py b/tests/unit/test_cost.py index 02504bf94..49e3d4ee7 100644 --- a/tests/unit/test_cost.py +++ b/tests/unit/test_cost.py @@ -70,16 +70,12 @@ def problem(self, model, parameters, dataset, signal, x0, request): pybop.RootMeanSquaredError, pybop.SumSquaredError, pybop.ObserverCost, - pybop.ProbabilityCost, ] ) def cost(self, problem, request): cls = request.param if cls in [pybop.SumSquaredError, pybop.RootMeanSquaredError]: return cls(problem) - elif cls in [pybop.ProbabilityCost]: - likelihood = pybop.GaussianLogLikelihoodKnownSigma(problem, sigma=0.01) - return cls(likelihood) elif cls in [pybop.ObserverCost]: inputs = {p.name: problem.x0[i] for i, p in enumerate(problem.parameters)} state = problem._model.reinit(inputs) @@ -144,7 +140,7 @@ def test_costs(self, cost): # Test option setting cost.set_fail_gradient(1) - if isinstance(cost, (pybop.SumSquaredError, pybop.ProbabilityCost)): + if isinstance(cost, pybop.SumSquaredError): e, de = cost.evaluateS1([0.5]) assert type(e) == np.float64 diff --git a/tests/unit/test_likelihoods.py b/tests/unit/test_likelihoods.py index 50df84899..5ba34efec 100644 --- a/tests/unit/test_likelihoods.py +++ b/tests/unit/test_likelihoods.py @@ -75,12 +75,6 @@ def test_base_likelihood_init(self, problem): assert likelihood._n_parameters == 1 assert np.array_equal(likelihood._target, problem._target) - @pytest.mark.unit - def test_base_likelihood_call_raises_not_implemented_error(self, problem): - likelihood = pybop.BaseLikelihood(problem) - with pytest.raises(NotImplementedError): - likelihood(np.array([0.5, 0.5])) - @pytest.mark.unit def test_base_likelihood_set_get_sigma(self, problem): likelihood = pybop.BaseLikelihood(problem) diff --git a/tests/unit/test_standalone.py b/tests/unit/test_standalone.py index 4ba611a96..430dc925c 100644 --- a/tests/unit/test_standalone.py +++ b/tests/unit/test_standalone.py @@ -17,7 +17,7 @@ def test_standalone(self): opt = pybop.Optimisation(cost=cost, optimiser=pybop.SciPyDifferentialEvolution) x, final_cost = opt.run() - assert len(opt.x0) == opt.n_parameters + assert len(opt.x0) == opt._n_parameters np.testing.assert_allclose(x, 0, atol=1e-2) np.testing.assert_allclose(final_cost, 42, atol=1e-2) From fd49cf631fc0ebda6226c28fe9200219414b4cd3 Mon Sep 17 00:00:00 2001 From: Brady Planden Date: Fri, 8 Mar 2024 16:21:35 +0000 Subject: [PATCH 8/9] Updt tests, add optional x0 arg to optimisation cls, remove redundant __init__ variables from baselikelihood, updt basecost to align with baselikelihood args --- pybop/_optimisation.py | 3 ++- pybop/costs/_likelihoods.py | 22 +++++++++------------- pybop/costs/base_cost.py | 7 ++++--- tests/unit/test_likelihoods.py | 8 +++++++- 4 files changed, 22 insertions(+), 18 deletions(-) diff --git a/pybop/_optimisation.py b/pybop/_optimisation.py index 35f3f11ba..3eb839ef8 100644 --- a/pybop/_optimisation.py +++ b/pybop/_optimisation.py @@ -40,6 +40,7 @@ class Optimisation: def __init__( self, cost, + x0=None, optimiser=None, sigma0=None, verbose=False, @@ -47,7 +48,7 @@ def __init__( allow_infeasible_solutions=True, ): self.cost = cost - self.x0 = cost.x0 + self.x0 = x0 or cost.x0 self.optimiser = optimiser self.verbose = verbose self.bounds = cost.bounds diff --git a/pybop/costs/_likelihoods.py b/pybop/costs/_likelihoods.py index cc4c63251..b6c2611ed 100644 --- a/pybop/costs/_likelihoods.py +++ b/pybop/costs/_likelihoods.py @@ -8,12 +8,8 @@ class BaseLikelihood(BaseCost): """ def __init__(self, problem, sigma=None): - super(BaseLikelihood, self).__init__(problem) - self._n_output = problem.n_outputs + super(BaseLikelihood, self).__init__(problem, sigma=sigma) self._n_times = problem.n_time_data - self.sigma0 = sigma or np.zeros(self._n_output) - self._n_parameters = problem.n_parameters - self.log_likelihood = problem def set_sigma(self, sigma): """ @@ -78,7 +74,7 @@ def _evaluateS1(self, x, grad=None): dy = dy.reshape( ( self._n_times, - self._n_output, + self.n_outputs, self._n_parameters, ) ) @@ -102,7 +98,7 @@ class GaussianLogLikelihood(BaseLikelihood): def __init__(self, problem): super(GaussianLogLikelihood, self).__init__(problem) self._logpi = -0.5 * self._n_times * np.log(2 * np.pi) - self._dl = np.ones(self._n_parameters + self._n_output) + self._dl = np.ones(self._n_parameters + self.n_outputs) def _evaluate(self, x, grad=None): """ @@ -110,18 +106,18 @@ def _evaluate(self, x, grad=None): Args: x (array_like): The parameters for which to evaluate the log-likelihood. - The last `self._n_output` elements are assumed to be the + The last `self.n_outputs` elements are assumed to be the standard deviations of the Gaussian distributions. Returns: float: The log-likelihood value, or -inf if the standard deviations are received as non-positive. """ - sigma = np.asarray(x[-self._n_output :]) + sigma = np.asarray(x[-self.n_outputs :]) if np.any(sigma <= 0): return -np.inf - e = self._target - self.problem.evaluate(x[: -self._n_output]) + e = self._target - self.problem.evaluate(x[: -self.n_outputs]) return np.sum( self._logpi - self._n_times * np.log(sigma) @@ -133,12 +129,12 @@ def _evaluateS1(self, x, grad=None): Calls the problem.evaluateS1 method and calculates the log-likelihood """ - sigma = np.asarray(x[-self._n_output :]) + sigma = np.asarray(x[-self.n_outputs :]) if np.any(sigma <= 0): return -np.inf, self._dl - y, dy = self.problem.evaluateS1(x[: -self._n_output]) + y, dy = self.problem.evaluateS1(x[: -self.n_outputs]) if len(y) < len(self._target): likelihood = -np.float64(np.inf) dl = self._dl @@ -146,7 +142,7 @@ def _evaluateS1(self, x, grad=None): dy = dy.reshape( ( self._n_times, - self._n_output, + self.n_outputs, self._n_parameters, ) ) diff --git a/pybop/costs/base_cost.py b/pybop/costs/base_cost.py index 6b22f0907..2057b5e5f 100644 --- a/pybop/costs/base_cost.py +++ b/pybop/costs/base_cost.py @@ -1,4 +1,5 @@ from pybop import BaseProblem +import numpy as np class BaseCost: @@ -31,17 +32,17 @@ class BaseCost: The number of outputs in the model. """ - def __init__(self, problem=None): + def __init__(self, problem=None, sigma=None): self.problem = problem self.x0 = None self.bounds = None - self.sigma0 = None + self.sigma0 = sigma self._minimising = True if isinstance(self.problem, BaseProblem): self._target = problem._target self.x0 = problem.x0 self.bounds = problem.bounds - self.sigma0 = problem.sigma0 + self.sigma0 = sigma or problem.sigma0 or np.zeros(self.n_outputs) self._n_parameters = problem.n_parameters self.n_outputs = problem.n_outputs diff --git a/tests/unit/test_likelihoods.py b/tests/unit/test_likelihoods.py index 5ba34efec..c73ac033b 100644 --- a/tests/unit/test_likelihoods.py +++ b/tests/unit/test_likelihoods.py @@ -67,7 +67,7 @@ def problem(self, model, parameters, dataset, signal, x0): def test_base_likelihood_init(self, problem): likelihood = pybop.BaseLikelihood(problem, sigma=np.array([0.2])) assert likelihood.problem == problem - assert likelihood._n_output == 1 + assert likelihood.n_outputs == 1 assert likelihood._n_times == problem.n_time_data assert np.array_equal(likelihood.get_sigma(), np.array([0.2])) assert likelihood.x0 == problem.x0 @@ -75,6 +75,12 @@ def test_base_likelihood_init(self, problem): assert likelihood._n_parameters == 1 assert np.array_equal(likelihood._target, problem._target) + @pytest.mark.unit + def test_base_likelihood_call_raises_not_implemented_error(self, problem): + likelihood = pybop.BaseLikelihood(problem) + with pytest.raises(NotImplementedError): + likelihood(np.array([0.5, 0.5])) + @pytest.mark.unit def test_base_likelihood_set_get_sigma(self, problem): likelihood = pybop.BaseLikelihood(problem) From 13309fbdf9bcece0276e0fca4bdf65f8c6392664 Mon Sep 17 00:00:00 2001 From: Brady Planden Date: Fri, 8 Mar 2024 18:30:51 +0000 Subject: [PATCH 9/9] Fix basecost.evaluateS1 tuple negate, up coverage via integration additions, small bugfixes to basecost --- examples/scripts/spm_MLE.py | 11 +++++++++-- pybop/costs/_likelihoods.py | 13 ++++++++++--- pybop/costs/base_cost.py | 7 ++++--- tests/integration/test_parameterisations.py | 16 +++++++++++++--- tests/unit/test_likelihoods.py | 4 ++-- 5 files changed, 38 insertions(+), 13 deletions(-) diff --git a/examples/scripts/spm_MLE.py b/examples/scripts/spm_MLE.py index da27198eb..7aa4254a7 100644 --- a/examples/scripts/spm_MLE.py +++ b/examples/scripts/spm_MLE.py @@ -19,8 +19,15 @@ ), ] +# Set initial parameter values +parameter_set.update( + { + "Negative electrode active material volume fraction": 0.63, + "Positive electrode active material volume fraction": 0.51, + } +) # Generate data -sigma = 0.01 +sigma = 0.005 t_eval = np.arange(0, 900, 2) values = model.predict(t_eval=t_eval) corrupt_values = values["Voltage [V]"].data + np.random.normal(0, sigma, len(t_eval)) @@ -36,7 +43,7 @@ # Generate problem, cost function, and optimisation class problem = pybop.FittingProblem(model, parameters, dataset) -likelihood = pybop.GaussianLogLikelihoodKnownSigma(problem, sigma=sigma) +likelihood = pybop.GaussianLogLikelihoodKnownSigma(problem, sigma=[0.03, 0.03]) optim = pybop.Optimisation(likelihood, optimiser=pybop.CMAES) optim.set_max_unchanged_iterations(20) optim.set_min_iterations(20) diff --git a/pybop/costs/_likelihoods.py b/pybop/costs/_likelihoods.py index b6c2611ed..031344933 100644 --- a/pybop/costs/_likelihoods.py +++ b/pybop/costs/_likelihoods.py @@ -8,13 +8,20 @@ class BaseLikelihood(BaseCost): """ def __init__(self, problem, sigma=None): - super(BaseLikelihood, self).__init__(problem, sigma=sigma) + super(BaseLikelihood, self).__init__(problem, sigma) self._n_times = problem.n_time_data def set_sigma(self, sigma): """ Setter for sigma parameter """ + + if sigma is not type(np.array([])): + try: + sigma = np.array(sigma) + except Exception: + raise ValueError("Sigma must be a numpy array") + if np.any(sigma <= 0): raise ValueError("Sigma must not be negative") else: @@ -43,8 +50,8 @@ class GaussianLogLikelihoodKnownSigma(BaseLikelihood): _logpi (float): Precomputed offset value for the log-likelihood function. """ - def __init__(self, problem, sigma=None): - super(GaussianLogLikelihoodKnownSigma, self).__init__(problem, sigma=sigma) + def __init__(self, problem, sigma): + super(GaussianLogLikelihoodKnownSigma, self).__init__(problem, sigma) if sigma is not None: self.set_sigma(sigma) self._offset = -0.5 * self._n_times * np.log(2 * np.pi / self.sigma0) diff --git a/pybop/costs/base_cost.py b/pybop/costs/base_cost.py index 2057b5e5f..a13381aec 100644 --- a/pybop/costs/base_cost.py +++ b/pybop/costs/base_cost.py @@ -42,9 +42,9 @@ def __init__(self, problem=None, sigma=None): self._target = problem._target self.x0 = problem.x0 self.bounds = problem.bounds - self.sigma0 = sigma or problem.sigma0 or np.zeros(self.n_outputs) - self._n_parameters = problem.n_parameters self.n_outputs = problem.n_outputs + self._n_parameters = problem.n_parameters + self.sigma0 = sigma or problem.sigma0 or np.zeros(self._n_parameters) @property def n_parameters(self): @@ -140,7 +140,8 @@ def evaluateS1(self, x): if self._minimising: return self._evaluateS1(x) else: # minimise the negative cost - return -self._evaluateS1(x) + L, dl = self._evaluateS1(x) + return -L, -dl except NotImplementedError as e: raise e diff --git a/tests/integration/test_parameterisations.py b/tests/integration/test_parameterisations.py index 78c82d3ed..c4a1f8df9 100644 --- a/tests/integration/test_parameterisations.py +++ b/tests/integration/test_parameterisations.py @@ -36,7 +36,13 @@ def x0(self): def init_soc(self, request): return request.param - @pytest.fixture(params=[pybop.RootMeanSquaredError, pybop.SumSquaredError]) + @pytest.fixture( + params=[ + pybop.GaussianLogLikelihoodKnownSigma, + pybop.RootMeanSquaredError, + pybop.SumSquaredError, + ] + ) def cost_class(self, request): return request.param @@ -57,7 +63,10 @@ def spm_cost(self, parameters, model, x0, cost_class, init_soc): problem = pybop.FittingProblem( model, parameters, dataset, signal=signal, init_soc=init_soc ) - return cost_class(problem) + if cost_class in [pybop.GaussianLogLikelihoodKnownSigma]: + return cost_class(problem, sigma=[0.01, 0.01]) + else: + return cost_class(problem) @pytest.mark.parametrize( "optimiser", @@ -124,7 +133,8 @@ def test_spm_optimisers(self, optimiser, spm_cost, x0): x, final_cost = parameterisation.run() # Assertions - np.testing.assert_allclose(final_cost, 0, atol=1e-2) + if not isinstance(spm_cost, pybop.GaussianLogLikelihoodKnownSigma): + np.testing.assert_allclose(final_cost, 0, atol=1e-2) np.testing.assert_allclose(x, x0, atol=5e-2) @pytest.fixture diff --git a/tests/unit/test_likelihoods.py b/tests/unit/test_likelihoods.py index c73ac033b..02db1e87f 100644 --- a/tests/unit/test_likelihoods.py +++ b/tests/unit/test_likelihoods.py @@ -111,7 +111,7 @@ def test_gaussian_log_likelihood_known_sigma(self, problem): problem, sigma=np.array([1.0]) ) result = likelihood(np.array([0.5])) - grad_result, grad_likelihood = likelihood._evaluateS1(np.array([0.5])) + grad_result, grad_likelihood = likelihood.evaluateS1(np.array([0.5])) assert isinstance(result, float) np.testing.assert_allclose(result, grad_result, atol=1e-5) assert np.all(grad_likelihood <= 0) @@ -120,7 +120,7 @@ def test_gaussian_log_likelihood_known_sigma(self, problem): def test_gaussian_log_likelihood(self, problem): likelihood = pybop.GaussianLogLikelihood(problem) result = likelihood(np.array([0.5, 0.5])) - grad_result, grad_likelihood = likelihood._evaluateS1(np.array([0.5, 0.5])) + grad_result, grad_likelihood = likelihood.evaluateS1(np.array([0.5, 0.5])) assert isinstance(result, float) np.testing.assert_allclose(result, grad_result, atol=1e-5) assert np.all(grad_likelihood <= 0)