-
Notifications
You must be signed in to change notification settings - Fork 24
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Merge pull request #218 from pybop-team/210-add-likelihood-classes
Adds Base Likelihoods, Maximum Likelihood Example
- Loading branch information
Showing
14 changed files
with
491 additions
and
38 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,70 @@ | ||
import pybop | ||
import numpy as np | ||
|
||
# Define model | ||
parameter_set = pybop.ParameterSet.pybamm("Chen2020") | ||
model = pybop.lithium_ion.SPM(parameter_set=parameter_set) | ||
|
||
# Fitting parameters | ||
parameters = [ | ||
pybop.Parameter( | ||
"Negative electrode active material volume fraction", | ||
prior=pybop.Gaussian(0.6, 0.05), | ||
bounds=[0.5, 0.8], | ||
), | ||
pybop.Parameter( | ||
"Positive electrode active material volume fraction", | ||
prior=pybop.Gaussian(0.48, 0.05), | ||
bounds=[0.4, 0.7], | ||
), | ||
] | ||
|
||
# Set initial parameter values | ||
parameter_set.update( | ||
{ | ||
"Negative electrode active material volume fraction": 0.63, | ||
"Positive electrode active material volume fraction": 0.51, | ||
} | ||
) | ||
# Generate data | ||
sigma = 0.005 | ||
t_eval = np.arange(0, 900, 2) | ||
values = model.predict(t_eval=t_eval) | ||
corrupt_values = values["Voltage [V]"].data + np.random.normal(0, sigma, len(t_eval)) | ||
|
||
# Form dataset | ||
dataset = pybop.Dataset( | ||
{ | ||
"Time [s]": t_eval, | ||
"Current function [A]": values["Current [A]"].data, | ||
"Voltage [V]": corrupt_values, | ||
} | ||
) | ||
|
||
# Generate problem, cost function, and optimisation class | ||
problem = pybop.FittingProblem(model, parameters, dataset) | ||
likelihood = pybop.GaussianLogLikelihoodKnownSigma(problem, sigma=[0.03, 0.03]) | ||
optim = pybop.Optimisation(likelihood, optimiser=pybop.CMAES) | ||
optim.set_max_unchanged_iterations(20) | ||
optim.set_min_iterations(20) | ||
optim.set_max_iterations(100) | ||
|
||
# Run the optimisation | ||
x, final_cost = optim.run() | ||
print("Estimated parameters:", x) | ||
|
||
# Plot the timeseries output | ||
pybop.quick_plot(x[0:2], likelihood, title="Optimised Comparison") | ||
|
||
# Plot convergence | ||
pybop.plot_convergence(optim) | ||
|
||
# Plot the parameter traces | ||
pybop.plot_parameters(optim) | ||
|
||
# Plot the cost landscape | ||
pybop.plot_cost2d(likelihood, steps=15) | ||
|
||
# Plot the cost landscape with optimisation path and updated bounds | ||
bounds = np.array([[0.55, 0.77], [0.48, 0.68]]) | ||
pybop.plot_cost2d(likelihood, optim=optim, bounds=bounds, steps=15) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,164 @@ | ||
import numpy as np | ||
from pybop.costs.base_cost import BaseCost | ||
|
||
|
||
class BaseLikelihood(BaseCost): | ||
""" | ||
Base class for likelihoods | ||
""" | ||
|
||
def __init__(self, problem, sigma=None): | ||
super(BaseLikelihood, self).__init__(problem, sigma) | ||
self._n_times = problem.n_time_data | ||
|
||
def set_sigma(self, sigma): | ||
""" | ||
Setter for sigma parameter | ||
""" | ||
|
||
if sigma is not type(np.array([])): | ||
try: | ||
sigma = np.array(sigma) | ||
except Exception: | ||
raise ValueError("Sigma must be a numpy array") | ||
|
||
if np.any(sigma <= 0): | ||
raise ValueError("Sigma must not be negative") | ||
else: | ||
self.sigma0 = sigma | ||
|
||
def get_sigma(self): | ||
""" | ||
Getter for sigma parameter | ||
""" | ||
return self.sigma0 | ||
|
||
def get_n_parameters(self): | ||
""" | ||
Returns the number of parameters | ||
""" | ||
return self._n_parameters | ||
|
||
|
||
class GaussianLogLikelihoodKnownSigma(BaseLikelihood): | ||
""" | ||
This class represents a Gaussian Log Likelihood with a known sigma, | ||
which assumes that the data follows a Gaussian distribution and computes | ||
the log-likelihood of observed data under this assumption. | ||
Attributes: | ||
_logpi (float): Precomputed offset value for the log-likelihood function. | ||
""" | ||
|
||
def __init__(self, problem, sigma): | ||
super(GaussianLogLikelihoodKnownSigma, self).__init__(problem, sigma) | ||
if sigma is not None: | ||
self.set_sigma(sigma) | ||
self._offset = -0.5 * self._n_times * np.log(2 * np.pi / self.sigma0) | ||
self._multip = -1 / (2.0 * self.sigma0**2) | ||
self.sigma2 = self.sigma0**-2 | ||
self._dl = np.ones(self._n_parameters) | ||
|
||
def _evaluate(self, x, grad=None): | ||
""" | ||
Calls the problem.evaluate method and calculates | ||
the log-likelihood | ||
""" | ||
e = self._target - self.problem.evaluate(x) | ||
return np.sum(self._offset + self._multip * np.sum(e**2, axis=0)) | ||
|
||
def _evaluateS1(self, x, grad=None): | ||
""" | ||
Calls the problem.evaluateS1 method and calculates | ||
the log-likelihood | ||
""" | ||
|
||
y, dy = self.problem.evaluateS1(x) | ||
if len(y) < len(self._target): | ||
likelihood = -np.float64(np.inf) | ||
dl = self._dl * np.ones(self._n_parameters) | ||
else: | ||
dy = dy.reshape( | ||
( | ||
self._n_times, | ||
self.n_outputs, | ||
self._n_parameters, | ||
) | ||
) | ||
e = self._target - y | ||
likelihood = np.sum(self._offset + self._multip * np.sum(e**2, axis=0)) | ||
dl = np.sum((self.sigma2 * np.sum((e.T * dy.T), axis=2)), axis=1) | ||
|
||
return likelihood, dl | ||
|
||
|
||
class GaussianLogLikelihood(BaseLikelihood): | ||
""" | ||
This class represents a Gaussian Log Likelihood, which assumes that the | ||
data follows a Gaussian distribution and computes the log-likelihood of | ||
observed data under this assumption. | ||
Attributes: | ||
_logpi (float): Precomputed offset value for the log-likelihood function. | ||
""" | ||
|
||
def __init__(self, problem): | ||
super(GaussianLogLikelihood, self).__init__(problem) | ||
self._logpi = -0.5 * self._n_times * np.log(2 * np.pi) | ||
self._dl = np.ones(self._n_parameters + self.n_outputs) | ||
|
||
def _evaluate(self, x, grad=None): | ||
""" | ||
Evaluates the Gaussian log-likelihood for the given parameters. | ||
Args: | ||
x (array_like): The parameters for which to evaluate the log-likelihood. | ||
The last `self.n_outputs` elements are assumed to be the | ||
standard deviations of the Gaussian distributions. | ||
Returns: | ||
float: The log-likelihood value, or -inf if the standard deviations are received as non-positive. | ||
""" | ||
sigma = np.asarray(x[-self.n_outputs :]) | ||
|
||
if np.any(sigma <= 0): | ||
return -np.inf | ||
|
||
e = self._target - self.problem.evaluate(x[: -self.n_outputs]) | ||
return np.sum( | ||
self._logpi | ||
- self._n_times * np.log(sigma) | ||
- np.sum(e**2, axis=0) / (2.0 * sigma**2) | ||
) | ||
|
||
def _evaluateS1(self, x, grad=None): | ||
""" | ||
Calls the problem.evaluateS1 method and calculates | ||
the log-likelihood | ||
""" | ||
sigma = np.asarray(x[-self.n_outputs :]) | ||
|
||
if np.any(sigma <= 0): | ||
return -np.inf, self._dl | ||
|
||
y, dy = self.problem.evaluateS1(x[: -self.n_outputs]) | ||
if len(y) < len(self._target): | ||
likelihood = -np.float64(np.inf) | ||
dl = self._dl | ||
else: | ||
dy = dy.reshape( | ||
( | ||
self._n_times, | ||
self.n_outputs, | ||
self._n_parameters, | ||
) | ||
) | ||
e = self._target - y | ||
likelihood = self._evaluate(x) | ||
dl = np.sum((sigma**-(2.0) * np.sum((e.T * dy.T), axis=2)), axis=1) | ||
|
||
# Add sigma gradient to dl | ||
dsigma = -self._n_times / sigma + sigma**-(3.0) * np.sum(e**2, axis=0) | ||
dl = np.concatenate((dl, dsigma)) | ||
|
||
return likelihood, dl |
Oops, something went wrong.