From f3a32bcbd5fda8e2b6fb79c69989f7f41a31de5c Mon Sep 17 00:00:00 2001 From: NicolaCourtier <45851982+NicolaCourtier@users.noreply.github.com> Date: Mon, 22 Jul 2024 10:54:43 +0100 Subject: [PATCH] Add a WeightedCost (#329) * Add a WeightedCost * Fix setting * Add tests * Update base_cost.py * Update CHANGELOG.md * Update imports * Update x0 to [0.5] * Update spm_weighted_cost.py * Update TypeError with test * Update spm_weighted_cost.py * Update evaluate and _evaluate * Pass current_sensitivities to MAP * Add test_weighted_design_cost * Add evaluate back into GaussianLogLikelihood * Update to super() * Update prediction to self._current_prediction * Update y to self._current_prediction * Update cost_list to args * Add descriptions * refactor: move WeightedCost into separate file --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Brady Planden Co-authored-by: Brady Planden <55357039+BradyPlanden@users.noreply.github.com> --- CHANGELOG.md | 1 + examples/scripts/spm_weighted_cost.py | 61 ++++++++++++ pybop/__init__.py | 1 + pybop/costs/_likelihoods.py | 64 ++++++++---- pybop/costs/_weighted_cost.py | 138 ++++++++++++++++++++++++++ pybop/costs/base_cost.py | 20 +++- pybop/costs/design_costs.py | 2 + pybop/costs/fitting_costs.py | 92 +++++++++++------ tests/unit/test_cost.py | 101 ++++++++++++++++--- 9 files changed, 413 insertions(+), 67 deletions(-) create mode 100644 examples/scripts/spm_weighted_cost.py create mode 100644 pybop/costs/_weighted_cost.py diff --git a/CHANGELOG.md b/CHANGELOG.md index be08914bf..5a92d6d92 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,7 @@ ## Features +- [#327](https://github.com/pybop-team/PyBOP/issues/327) - Adds the `WeightedCost` subclass, defines when to evaluate a problem and adds the `spm_weighted_cost` example script. - [#393](https://github.com/pybop-team/PyBOP/pull/383) - Adds Minkowski and SumofPower cost classes, with an example and corresponding tests. - [#403](https://github.com/pybop-team/PyBOP/pull/403/) - Adds lychee link checking action. diff --git a/examples/scripts/spm_weighted_cost.py b/examples/scripts/spm_weighted_cost.py new file mode 100644 index 000000000..74c43a33c --- /dev/null +++ b/examples/scripts/spm_weighted_cost.py @@ -0,0 +1,61 @@ +import numpy as np + +import pybop + +# Parameter set and model definition +parameter_set = pybop.ParameterSet.pybamm("Chen2020") +model = pybop.lithium_ion.SPM(parameter_set=parameter_set) + +# Fitting parameters +parameters = pybop.Parameters( + pybop.Parameter( + "Negative electrode active material volume fraction", + prior=pybop.Gaussian(0.68, 0.05), + bounds=[0.5, 0.8], + true_value=parameter_set["Negative electrode active material volume fraction"], + ), + pybop.Parameter( + "Positive electrode active material volume fraction", + prior=pybop.Gaussian(0.58, 0.05), + bounds=[0.4, 0.7], + true_value=parameter_set["Positive electrode active material volume fraction"], + ), +) + +# Generate data +sigma = 0.001 +t_eval = np.arange(0, 900, 3) +values = model.predict(t_eval=t_eval) +corrupt_values = values["Voltage [V]"].data + np.random.normal(0, sigma, len(t_eval)) + +# Form dataset +dataset = pybop.Dataset( + { + "Time [s]": t_eval, + "Current function [A]": values["Current [A]"].data, + "Voltage [V]": corrupt_values, + } +) + +# Generate problem, cost function, and optimisation class +problem = pybop.FittingProblem(model, parameters, dataset) +cost1 = pybop.SumSquaredError(problem) +cost2 = pybop.RootMeanSquaredError(problem) +weighted_cost = pybop.WeightedCost(cost1, cost2, weights=[1, 100]) + +for cost in [weighted_cost, cost1, cost2]: + optim = pybop.IRPropMin(cost, max_iterations=60) + + # Run the optimisation + x, final_cost = optim.run() + print("True parameters:", parameters.true_value()) + print("Estimated parameters:", x) + + # Plot the timeseries output + pybop.quick_plot(problem, problem_inputs=x, title="Optimised Comparison") + + # Plot convergence + pybop.plot_convergence(optim) + + # Plot the cost landscape with optimisation path + pybop.plot2d(optim, steps=15) diff --git a/pybop/__init__.py b/pybop/__init__.py index c9aabb8d6..922a64803 100644 --- a/pybop/__init__.py +++ b/pybop/__init__.py @@ -100,6 +100,7 @@ GaussianLogLikelihoodKnownSigma, MAP, ) +from .costs._weighted_cost import WeightedCost # # Optimiser class diff --git a/pybop/costs/_likelihoods.py b/pybop/costs/_likelihoods.py index e65f02fcb..1f96e2fb5 100644 --- a/pybop/costs/_likelihoods.py +++ b/pybop/costs/_likelihoods.py @@ -43,16 +43,17 @@ def _evaluate(self, inputs: Inputs, grad: Union[None, np.ndarray] = None) -> flo """ Evaluates the Gaussian log-likelihood for the given parameters with known sigma. """ - y = self.problem.evaluate(inputs) - - if not self.verify_prediction(y): + if not self.verify_prediction(self._current_prediction): return -np.inf e = np.asarray( [ np.sum( self._offset - + self._multip * np.sum((self._target[signal] - y[signal]) ** 2.0) + + self._multip + * np.sum( + (self._target[signal] - self._current_prediction[signal]) ** 2.0 + ) ) for signal in self.signal ] @@ -62,17 +63,22 @@ def _evaluate(self, inputs: Inputs, grad: Union[None, np.ndarray] = None) -> flo def _evaluateS1(self, inputs: Inputs) -> tuple[float, np.ndarray]: """ - Calls the problem.evaluateS1 method and calculates the log-likelihood and gradient. + Calculates the log-likelihood and gradient. """ - y, dy = self.problem.evaluateS1(inputs) - - if not self.verify_prediction(y): + if not self.verify_prediction(self._current_prediction): return -np.inf, -self._de * np.ones(self.n_parameters) likelihood = self._evaluate(inputs) - r = np.asarray([self._target[signal] - y[signal] for signal in self.signal]) - dl = np.sum((np.sum((r * dy.T), axis=2) / self.sigma2), axis=1) + r = np.asarray( + [ + self._target[signal] - self._current_prediction[signal] + for signal in self.signal + ] + ) + dl = np.sum( + (np.sum((r * self._current_sensitivities.T), axis=2) / self.sigma2), axis=1 + ) return likelihood, dl @@ -117,6 +123,7 @@ def __init__( super().__init__(problem) self._dsigma_scale = dsigma_scale self._logpi = -0.5 * self.n_time_data * np.log(2 * np.pi) + self._fixed_problem = False # keep problem evaluation within _evaluate self.sigma = Parameters() self._add_sigma_parameters(sigma0) @@ -189,8 +196,10 @@ def _evaluate(self, inputs: Inputs, grad: Union[None, np.ndarray] = None) -> flo if np.any(sigma <= 0): return -np.inf - y = self.problem.evaluate(self.problem.parameters.as_dict()) - if not self.verify_prediction(y): + self._current_prediction = self.problem.evaluate( + self.problem.parameters.as_dict() + ) + if not self.verify_prediction(self._current_prediction): return -np.inf e = np.asarray( @@ -198,7 +207,9 @@ def _evaluate(self, inputs: Inputs, grad: Union[None, np.ndarray] = None) -> flo np.sum( self._logpi - self.n_time_data * np.log(sigma) - - np.sum((self._target[signal] - y[signal]) ** 2.0) + - np.sum( + (self._target[signal] - self._current_prediction[signal]) ** 2.0 + ) / (2.0 * sigma**2.0) ) for signal in self.signal @@ -209,7 +220,7 @@ def _evaluate(self, inputs: Inputs, grad: Union[None, np.ndarray] = None) -> flo def _evaluateS1(self, inputs: Inputs) -> tuple[float, np.ndarray]: """ - Calls the problem.evaluateS1 method and calculates the log-likelihood. + Calculates the log-likelihood and sensitivities. Parameters ---------- @@ -227,14 +238,23 @@ def _evaluateS1(self, inputs: Inputs) -> tuple[float, np.ndarray]: if np.any(sigma <= 0): return -np.inf, -self._de * np.ones(self.n_parameters) - y, dy = self.problem.evaluateS1(self.problem.parameters.as_dict()) - if not self.verify_prediction(y): + self._current_prediction, self._current_sensitivities = self.problem.evaluateS1( + self.problem.parameters.as_dict() + ) + if not self.verify_prediction(self._current_prediction): return -np.inf, -self._de * np.ones(self.n_parameters) likelihood = self._evaluate(inputs) - r = np.asarray([self._target[signal] - y[signal] for signal in self.signal]) - dl = np.sum((np.sum((r * dy.T), axis=2) / (sigma**2.0)), axis=1) + r = np.asarray( + [ + self._target[signal] - self._current_prediction[signal] + for signal in self.signal + ] + ) + dl = np.sum( + (np.sum((r * self._current_sensitivities.T), axis=2) / (sigma**2.0)), axis=1 + ) dsigma = ( -self.n_time_data / sigma + np.sum(r**2.0, axis=1) / (sigma**3.0) ) / self._dsigma_scale @@ -300,7 +320,10 @@ def _evaluate(self, inputs: Inputs, grad=None) -> float: if not np.isfinite(log_prior).any(): return -np.inf + if self._fixed_problem: + self.likelihood._current_prediction = self._current_prediction log_likelihood = self.likelihood._evaluate(inputs) + posterior = log_likelihood + log_prior return posterior @@ -331,6 +354,11 @@ def _evaluateS1(self, inputs: Inputs) -> tuple[float, np.ndarray]: if not np.isfinite(log_prior).any(): return -np.inf, -self._de * np.ones(self.n_parameters) + if self._fixed_problem: + ( + self.likelihood._current_prediction, + self.likelihood._current_sensitivities, + ) = self._current_prediction, self._current_sensitivities log_likelihood, dl = self.likelihood._evaluateS1(inputs) # Compute a finite difference approximation of the gradient of the log prior diff --git a/pybop/costs/_weighted_cost.py b/pybop/costs/_weighted_cost.py new file mode 100644 index 000000000..effa5a510 --- /dev/null +++ b/pybop/costs/_weighted_cost.py @@ -0,0 +1,138 @@ +from typing import Optional + +import numpy as np + +from pybop import BaseCost +from pybop.parameters.parameter import Inputs + + +class WeightedCost(BaseCost): + """ + A subclass for constructing a linear combination of cost functions as + a single weighted cost function. + + Inherits all parameters and attributes from ``BaseCost``. + + Additional Attributes + --------------------- + costs : list[pybop.BaseCost] + A list of PyBOP cost objects. + weights : list[float] + A list of values with which to weight the cost values. + _different_problems : bool + If True, the problem for each cost is evaluated independently during + each evaluation of the cost (default: False). + """ + + def __init__(self, *args, weights: Optional[list[float]] = None): + self.costs = [] + for cost in args: + if not isinstance(cost, BaseCost): + raise TypeError(f"Received {type(cost)} instead of cost object.") + self.costs.append(cost) + self.weights = weights + self._different_problems = False + + if self.weights is None: + self.weights = np.ones(len(self.costs)) + elif isinstance(self.weights, list): + self.weights = np.array(self.weights) + if not isinstance(self.weights, np.ndarray): + raise TypeError( + "Expected a list or array of weights the same length as costs." + ) + if not len(self.weights) == len(self.costs): + raise ValueError( + "Expected a list or array of weights the same length as costs." + ) + + # Check if all costs depend on the same problem + for cost in self.costs: + if hasattr(cost, "problem") and cost.problem is not self.costs[0].problem: + self._different_problems = True + + if not self._different_problems: + super().__init__(self.costs[0].problem) + self._fixed_problem = self.costs[0]._fixed_problem + else: + super().__init__() + self._fixed_problem = False + for cost in self.costs: + self.parameters.join(cost.parameters) + + def _evaluate(self, inputs: Inputs, grad=None): + """ + Calculate the weighted cost for a given set of parameters. + + Parameters + ---------- + inputs : Inputs + The parameters for which to compute the cost. + grad : array-like, optional + An array to store the gradient of the cost function with respect + to the parameters. + + Returns + ------- + float + The weighted cost value. + """ + e = np.empty_like(self.costs) + + if not self._fixed_problem and self._different_problems: + self.parameters.update(values=list(inputs.values())) + elif not self._fixed_problem: + self._current_prediction = self.problem.evaluate(inputs) + + for i, cost in enumerate(self.costs): + if not self._fixed_problem and self._different_problems: + inputs = cost.parameters.as_dict() + cost._current_prediction = cost.problem.evaluate(inputs) + else: + cost._current_prediction = self._current_prediction + e[i] = cost._evaluate(inputs, grad) + + return np.dot(e, self.weights) + + def _evaluateS1(self, inputs: Inputs): + """ + Compute the weighted cost and its gradient with respect to the parameters. + + Parameters + ---------- + inputs : Inputs + The parameters for which to compute the cost and gradient. + + Returns + ------- + tuple + A tuple containing the cost and the gradient. The cost is a float, + and the gradient is an array-like of the same length as `x`. + """ + e = np.empty_like(self.costs) + de = np.empty((len(self.parameters), len(self.costs))) + + if not self._fixed_problem and self._different_problems: + self.parameters.update(values=list(inputs.values())) + elif not self._fixed_problem: + self._current_prediction, self._current_sensitivities = ( + self.problem.evaluateS1(inputs) + ) + + for i, cost in enumerate(self.costs): + if not self._fixed_problem and self._different_problems: + inputs = cost.parameters.as_dict() + cost._current_prediction, cost._current_sensitivities = ( + cost.problem.evaluateS1(inputs) + ) + else: + cost._current_prediction, cost._current_sensitivities = ( + self._current_prediction, + self._current_sensitivities, + ) + e[i], de[:, i] = cost._evaluateS1(inputs) + + e = np.dot(e, self.weights) + de = np.dot(de, self.weights) + + return e, de diff --git a/pybop/costs/base_cost.py b/pybop/costs/base_cost.py index d40bbb996..eedbbc2c0 100644 --- a/pybop/costs/base_cost.py +++ b/pybop/costs/base_cost.py @@ -1,4 +1,4 @@ -from typing import Union +from typing import Optional, Union from pybop import BaseProblem from pybop.parameters.parameter import Inputs, Parameters @@ -22,17 +22,25 @@ class BaseCost: An array containing the target data to fit. n_outputs : int The number of outputs in the model. + + Additional Attributes + --------------------- + _fixed_problem : bool + If True, the problem does not need to be rebuilt before the cost is + calculated (default: False). """ - def __init__(self, problem=None): + def __init__(self, problem: Optional[BaseProblem] = None): self.parameters = Parameters() self.problem = problem + self._fixed_problem = False self.set_fail_gradient() if isinstance(self.problem, BaseProblem): self._target = self.problem._target self.parameters.join(self.problem.parameters) self.n_outputs = self.problem.n_outputs self.signal = self.problem.signal + self._fixed_problem = True @property def n_parameters(self): @@ -69,6 +77,9 @@ def evaluate(self, inputs: Union[Inputs, list], grad=None): inputs = self.parameters.verify(inputs) try: + if self._fixed_problem: + self._current_prediction = self.problem.evaluate(inputs) + return self._evaluate(inputs, grad) except NotImplementedError as e: @@ -126,6 +137,11 @@ def evaluateS1(self, inputs: Union[Inputs, list]): inputs = self.parameters.verify(inputs) try: + if self._fixed_problem: + self._current_prediction, self._current_sensitivities = ( + self.problem.evaluateS1(inputs) + ) + return self._evaluateS1(inputs) except NotImplementedError as e: diff --git a/pybop/costs/design_costs.py b/pybop/costs/design_costs.py index ac8ecacac..738dfe61e 100644 --- a/pybop/costs/design_costs.py +++ b/pybop/costs/design_costs.py @@ -98,6 +98,7 @@ class GravimetricEnergyDensity(DesignCost): def __init__(self, problem, update_capacity=False): super().__init__(problem, update_capacity) + self._fixed_problem = False # keep problem evaluation within _evaluate def _evaluate(self, inputs: Inputs, grad=None): """ @@ -154,6 +155,7 @@ class VolumetricEnergyDensity(DesignCost): def __init__(self, problem, update_capacity=False): super().__init__(problem, update_capacity) + self._fixed_problem = False # keep problem evaluation within _evaluate def _evaluate(self, inputs: Inputs, grad=None): """ diff --git a/pybop/costs/fitting_costs.py b/pybop/costs/fitting_costs.py index ac17f3eac..18cc752d6 100644 --- a/pybop/costs/fitting_costs.py +++ b/pybop/costs/fitting_costs.py @@ -38,14 +38,16 @@ def _evaluate(self, inputs: Inputs, grad=None): The root mean square error. """ - prediction = self.problem.evaluate(inputs) - - if not self.verify_prediction(prediction): + if not self.verify_prediction(self._current_prediction): return np.inf e = np.asarray( [ - np.sqrt(np.mean((prediction[signal] - self._target[signal]) ** 2)) + np.sqrt( + np.mean( + (self._current_prediction[signal] - self._target[signal]) ** 2 + ) + ) for signal in self.signal ] ) @@ -72,13 +74,19 @@ def _evaluateS1(self, inputs: Inputs): ValueError If an error occurs during the calculation of the cost or gradient. """ - y, dy = self.problem.evaluateS1(inputs) - if not self.verify_prediction(y): + if not self.verify_prediction(self._current_prediction): return np.inf, self._de * np.ones(self.n_parameters) - r = np.asarray([y[signal] - self._target[signal] for signal in self.signal]) + r = np.asarray( + [ + self._current_prediction[signal] - self._target[signal] + for signal in self.signal + ] + ) e = np.sqrt(np.mean(r**2, axis=1)) - de = np.mean((r * dy.T), axis=2) / (e + np.finfo(float).eps) + de = np.mean((r * self._current_sensitivities.T), axis=2) / ( + e + np.finfo(float).eps + ) if self.n_outputs == 1: return e.item(), de.flatten() @@ -124,14 +132,12 @@ def _evaluate(self, inputs: Inputs, grad=None): float The Sum of Squared Error. """ - prediction = self.problem.evaluate(inputs) - - if not self.verify_prediction(prediction): + if not self.verify_prediction(self._current_prediction): return np.inf e = np.asarray( [ - np.sum((prediction[signal] - self._target[signal]) ** 2) + np.sum((self._current_prediction[signal] - self._target[signal]) ** 2) for signal in self.signal ] ) @@ -158,13 +164,17 @@ def _evaluateS1(self, inputs: Inputs): ValueError If an error occurs during the calculation of the cost or gradient. """ - y, dy = self.problem.evaluateS1(inputs) - if not self.verify_prediction(y): + if not self.verify_prediction(self._current_prediction): return np.inf, self._de * np.ones(self.n_parameters) - r = np.asarray([y[signal] - self._target[signal] for signal in self.signal]) + r = np.asarray( + [ + self._current_prediction[signal] - self._target[signal] + for signal in self.signal + ] + ) e = np.sum(np.sum(r**2, axis=0), axis=0) - de = 2 * np.sum(np.sum((r * dy.T), axis=2), axis=1) + de = 2 * np.sum(np.sum((r * self._current_sensitivities.T), axis=2), axis=1) return e, de @@ -224,13 +234,15 @@ def _evaluate(self, inputs: Inputs, grad=None): float The Minkowski cost. """ - prediction = self.problem.evaluate(inputs) - if not self.verify_prediction(prediction): + if not self.verify_prediction(self._current_prediction): return np.inf e = np.asarray( [ - np.sum(np.abs(prediction[signal] - self._target[signal]) ** self.p) + np.sum( + np.abs(self._current_prediction[signal] - self._target[signal]) + ** self.p + ) ** (1 / self.p) for signal in self.signal ] @@ -258,20 +270,27 @@ def _evaluateS1(self, inputs): ValueError If an error occurs during the calculation of the cost or gradient. """ - y, dy = self.problem.evaluateS1(inputs) - if not self.verify_prediction(y): + if not self.verify_prediction(self._current_prediction): return np.inf, self._de * np.ones(self.n_parameters) - r = np.asarray([y[signal] - self._target[signal] for signal in self.signal]) + r = np.asarray( + [ + self._current_prediction[signal] - self._target[signal] + for signal in self.signal + ] + ) e = np.asarray( [ - np.sum(np.abs(y[signal] - self._target[signal]) ** self.p) + np.sum( + np.abs(self._current_prediction[signal] - self._target[signal]) + ** self.p + ) ** (1 / self.p) for signal in self.signal ] ) de = np.sum( - np.sum(r ** (self.p - 1) * dy.T, axis=2) + np.sum(r ** (self.p - 1) * self._current_sensitivities.T, axis=2) / (e ** (self.p - 1) + np.finfo(float).eps), axis=1, ) @@ -331,13 +350,15 @@ def _evaluate(self, inputs: Inputs, grad=None): float The Sum of Power cost. """ - prediction = self.problem.evaluate(inputs) - if not self.verify_prediction(prediction): + if not self.verify_prediction(self._current_prediction): return np.inf e = np.asarray( [ - np.sum(np.abs(prediction[signal] - self._target[signal]) ** self.p) + np.sum( + np.abs(self._current_prediction[signal] - self._target[signal]) + ** self.p + ) for signal in self.signal ] ) @@ -364,13 +385,19 @@ def _evaluateS1(self, inputs): ValueError If an error occurs during the calculation of the cost or gradient. """ - y, dy = self.problem.evaluateS1(inputs) - if not self.verify_prediction(y): + if not self.verify_prediction(self._current_prediction): return np.inf, self._de * np.ones(self.n_parameters) - r = np.asarray([y[signal] - self._target[signal] for signal in self.signal]) + r = np.asarray( + [ + self._current_prediction[signal] - self._target[signal] + for signal in self.signal + ] + ) e = np.sum(np.sum(np.abs(r) ** self.p)) - de = self.p * np.sum(np.sum(r ** (self.p - 1) * dy.T, axis=2), axis=1) + de = self.p * np.sum( + np.sum(r ** (self.p - 1) * self._current_sensitivities.T, axis=2), axis=1 + ) return e, de @@ -389,6 +416,7 @@ class ObserverCost(BaseCost): def __init__(self, observer: Observer): super().__init__(problem=observer) self._observer = observer + self._fixed_problem = False # keep problem evaluation within _evaluate def _evaluate(self, inputs: Inputs, grad=None): """ @@ -412,7 +440,7 @@ def _evaluate(self, inputs: Inputs, grad=None): ) return -log_likelihood - def evaluateS1(self, inputs: Inputs): + def _evaluateS1(self, inputs: Inputs): """ Compute the cost and its gradient with respect to the parameters. diff --git a/tests/unit/test_cost.py b/tests/unit/test_cost.py index 6a7d1a900..802accf19 100644 --- a/tests/unit/test_cost.py +++ b/tests/unit/test_cost.py @@ -1,3 +1,5 @@ +from copy import copy + import numpy as np import pytest @@ -250,6 +252,12 @@ def test_SumofPower(self, problem): with pytest.raises(ValueError, match="p = np.inf is not yet supported."): pybop.SumofPower(problem, p=np.inf) + @pytest.fixture + def design_problem(self, model, parameters, experiment, signal): + return pybop.DesignProblem( + model, parameters, experiment, signal=signal, init_soc=0.5 + ) + @pytest.mark.parametrize( "cost_class", [ @@ -259,21 +267,9 @@ def test_SumofPower(self, problem): ], ) @pytest.mark.unit - def test_design_costs( - self, - cost_class, - model, - parameters, - experiment, - signal, - ): - # Construct Problem - problem = pybop.DesignProblem( - model, parameters, experiment, signal=signal, init_soc=0.5 - ) - + def test_design_costs(self, cost_class, design_problem): # Construct Cost - cost = cost_class(problem) + cost = cost_class(design_problem) if cost_class in [pybop.DesignCost]: with pytest.raises(NotImplementedError): @@ -299,5 +295,80 @@ def test_design_costs( cost(["StringInputShouldNotWork"]) # Compute after updating nominal capacity - cost = cost_class(problem, update_capacity=True) + cost = cost_class(design_problem, update_capacity=True) cost([0.4]) + + @pytest.mark.unit + def test_weighted_fitting_cost(self, problem): + cost1 = pybop.SumSquaredError(problem) + cost2 = pybop.RootMeanSquaredError(problem) + + # Test with and without weights + weighted_cost = pybop.WeightedCost(cost1, cost2) + np.testing.assert_array_equal(weighted_cost.weights, np.ones(2)) + weighted_cost = pybop.WeightedCost(cost1, cost2, weights=[1, 1]) + np.testing.assert_array_equal(weighted_cost.weights, np.ones(2)) + weighted_cost = pybop.WeightedCost(cost1, cost2, weights=np.array([1, 1])) + np.testing.assert_array_equal(weighted_cost.weights, np.ones(2)) + with pytest.raises( + TypeError, + match=r"Received instead of cost object.", + ): + weighted_cost = pybop.WeightedCost("Invalid string") + with pytest.raises( + TypeError, + match="Expected a list or array of weights the same length as costs.", + ): + weighted_cost = pybop.WeightedCost(cost1, cost2, weights="Invalid string") + with pytest.raises( + ValueError, + match="Expected a list or array of weights the same length as costs.", + ): + weighted_cost = pybop.WeightedCost(cost1, cost2, weights=[1]) + + # Test with and without different problems + weight = 100 + weighted_cost_2 = pybop.WeightedCost(cost1, cost2, weights=[1, weight]) + assert weighted_cost_2._different_problems is False + assert weighted_cost_2._fixed_problem is True + assert weighted_cost_2.problem is problem + assert weighted_cost_2([0.5]) >= 0 + np.testing.assert_allclose( + weighted_cost_2.evaluate([0.6]), + cost1([0.6]) + weight * cost2([0.6]), + atol=1e-5, + ) + + cost3 = pybop.RootMeanSquaredError(copy(problem)) + weighted_cost_3 = pybop.WeightedCost(cost1, cost3, weights=[1, weight]) + assert weighted_cost_3._different_problems is True + assert weighted_cost_3._fixed_problem is False + assert weighted_cost_3.problem is None + assert weighted_cost_3([0.5]) >= 0 + np.testing.assert_allclose( + weighted_cost_3.evaluate([0.6]), + cost1([0.6]) + weight * cost3([0.6]), + atol=1e-5, + ) + + errors_2, sensitivities_2 = weighted_cost_2.evaluateS1([0.5]) + errors_3, sensitivities_3 = weighted_cost_3.evaluateS1([0.5]) + np.testing.assert_allclose(errors_2, errors_3, atol=1e-5) + np.testing.assert_allclose(sensitivities_2, sensitivities_3, atol=1e-5) + + @pytest.mark.unit + def test_weighted_design_cost(self, design_problem): + cost1 = pybop.GravimetricEnergyDensity(design_problem) + cost2 = pybop.RootMeanSquaredError(design_problem) + + # Test with and without weights + weighted_cost = pybop.WeightedCost(cost1, cost2) + assert weighted_cost._different_problems is False + assert weighted_cost._fixed_problem is False + assert weighted_cost.problem is design_problem + assert weighted_cost([0.5]) >= 0 + np.testing.assert_allclose( + weighted_cost.evaluate([0.6]), + cost1([0.6]) + cost2([0.6]), + atol=1e-5, + )