diff --git a/CHANGELOG.md b/CHANGELOG.md index e49c6db90..082b2064a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,7 @@ ## Features +- [#6](https://github.com/pybop-team/PyBOP/issues/6) - Adds Monte Carlo functionality, with methods based on Pints' algorithms. A base class is added `BaseSampler`, in addition to `PintsBaseSampler`. - [#353](https://github.com/pybop-team/PyBOP/issues/353) - Allow user-defined check_params functions to enforce nonlinear constraints, and enable SciPy constrained optimisation methods - [#222](https://github.com/pybop-team/PyBOP/issues/222) - Adds an example for performing and electrode balancing. - [#441](https://github.com/pybop-team/PyBOP/issues/441) - Adds an example for estimating constants within a `pybamm.FunctionalParameter`. @@ -38,7 +39,6 @@ ## Bug Fixes - ## Breaking Changes # [v24.6](https://github.com/pybop-team/PyBOP/tree/v24.6) - 2024-07-08 diff --git a/examples/scripts/mcmc_example.py b/examples/scripts/mcmc_example.py new file mode 100644 index 000000000..436886d00 --- /dev/null +++ b/examples/scripts/mcmc_example.py @@ -0,0 +1,94 @@ +import numpy as np +import plotly.graph_objects as go +import pybamm + +import pybop + +# Parameter set and model definition +solver = pybamm.IDAKLUSolver() +parameter_set = pybop.ParameterSet.pybamm("Chen2020") +parameter_set.update( + { + "Negative electrode active material volume fraction": 0.63, + "Positive electrode active material volume fraction": 0.71, + } +) +synth_model = pybop.lithium_ion.DFN(parameter_set=parameter_set, solver=solver) + +# Fitting parameters +parameters = pybop.Parameters( + pybop.Parameter( + "Negative electrode active material volume fraction", + prior=pybop.Gaussian(0.68, 0.02), + transformation=pybop.LogTransformation(), + ), + pybop.Parameter( + "Positive electrode active material volume fraction", + prior=pybop.Gaussian(0.65, 0.02), + transformation=pybop.LogTransformation(), + ), +) + +# Generate data +init_soc = 0.5 +sigma = 0.002 +experiment = pybop.Experiment( + [ + ("Discharge at 0.5C for 6 minutes (5 second period)",), + ] +) +values = synth_model.predict( + initial_state={"Initial SoC": init_soc}, experiment=experiment +) + + +def noise(sigma): + return np.random.normal(0, sigma, len(values["Voltage [V]"].data)) + + +# Form dataset +dataset = pybop.Dataset( + { + "Time [s]": values["Time [s]"].data, + "Current function [A]": values["Current [A]"].data, + "Voltage [V]": values["Voltage [V]"].data + noise(sigma), + "Bulk open-circuit voltage [V]": values["Bulk open-circuit voltage [V]"].data + + noise(sigma), + } +) + +model = pybop.lithium_ion.SPM(parameter_set=parameter_set, solver=pybamm.IDAKLUSolver()) +model.build(initial_state={"Initial SoC": init_soc}) +signal = ["Voltage [V]", "Bulk open-circuit voltage [V]"] + +# Generate problem, likelihood, and sampler +problem = pybop.FittingProblem(model, parameters, dataset, signal=signal) +likelihood = pybop.GaussianLogLikelihoodKnownSigma(problem, sigma0=0.002) +posterior = pybop.LogPosterior(likelihood) + +optim = pybop.DifferentialEvolutionMCMC( + posterior, + chains=3, + max_iterations=300, + warm_up=100, + verbose=True, + # parallel=True, # uncomment to enable parallelisation (MacOS/WSL/Linux only) +) +result = optim.run() + +# Create a histogram +fig = go.Figure() +for _i, data in enumerate(result): + fig.add_trace(go.Histogram(x=data[:, 0], name="Neg", opacity=0.75)) + fig.add_trace(go.Histogram(x=data[:, 1], name="Pos", opacity=0.75)) + +# Update layout for better visualization +fig.update_layout( + title="Posterior distribution of volume fractions", + xaxis_title="Value", + yaxis_title="Count", + barmode="overlay", +) + +# Show the plot +fig.show() diff --git a/pybop/__init__.py b/pybop/__init__.py index 3969f1f59..99f30e1a6 100644 --- a/pybop/__init__.py +++ b/pybop/__init__.py @@ -71,7 +71,7 @@ # from .parameters.parameter import Parameter, Parameters from .parameters.parameter_set import ParameterSet -from .parameters.priors import BasePrior, Gaussian, Uniform, Exponential +from .parameters.priors import BasePrior, Gaussian, Uniform, Exponential, JointLogPrior # # Model classes @@ -83,7 +83,7 @@ from .models.base_model import Inputs # -# Problem class +# Problem classes # from .problems.base_problem import BaseProblem from .problems.fitting_problem import FittingProblem @@ -91,7 +91,7 @@ from .problems.design_problem import DesignProblem # -# Cost function class +# Cost classes # from .costs.base_cost import BaseCost from .costs.fitting_costs import ( @@ -110,12 +110,13 @@ BaseLikelihood, GaussianLogLikelihood, GaussianLogLikelihoodKnownSigma, + LogPosterior, MAP, ) from .costs._weighted_cost import WeightedCost # -# Optimiser class +# Optimiser classes # from .optimisers._cuckoo import CuckooSearchImpl @@ -141,6 +142,24 @@ ) from .optimisers.optimisation import Optimisation +# +# Monte Carlo classes +# +from .samplers.base_sampler import BaseSampler +from .samplers.base_pints_sampler import BasePintsSampler +from .samplers.pints_samplers import ( + NUTS, DREAM, AdaptiveCovarianceMCMC, + DifferentialEvolutionMCMC, DramACMC, + EmceeHammerMCMC, + HaarioACMC, HaarioBardenetACMC, + HamiltonianMCMC, MALAMCMC, + MetropolisRandomWalkMCMC, MonomialGammaHamiltonianMCMC, + PopulationMCMC, RaoBlackwellACMC, + RelativisticMCMC, SliceDoublingMCMC, + SliceRankShrinkingMCMC, SliceStepoutMCMC, +) +from .samplers.mcmc_sampler import MCMCSampler + # # Observer classes # @@ -148,7 +167,7 @@ from .observers.observer import Observer # -# Plotting class +# Plotting classes # from .plotting.plotly_manager import PlotlyManager from .plotting.standard_plots import StandardPlot, StandardSubplot, plot_trajectories diff --git a/pybop/costs/_likelihoods.py b/pybop/costs/_likelihoods.py index d651d89eb..6da737e5f 100644 --- a/pybop/costs/_likelihoods.py +++ b/pybop/costs/_likelihoods.py @@ -4,7 +4,7 @@ from pybop.costs.base_cost import BaseCost from pybop.parameters.parameter import Parameter, Parameters -from pybop.parameters.priors import Uniform +from pybop.parameters.priors import JointLogPrior, Uniform from pybop.problems.base_problem import BaseProblem @@ -290,3 +290,93 @@ def compute( log_likelihood = self.likelihood.compute(y) posterior = log_likelihood + log_prior return posterior + + +class LogPosterior(BaseCost): + """ + The Log Posterior for a given problem. + + Computes the log posterior which is the sum of the log + likelihood and the log prior. + + Inherits all parameters and attributes from ``BaseCost``. + """ + + def __init__(self, log_likelihood, log_prior=None): + super().__init__(problem=log_likelihood.problem) + + # Store the likelihood and prior + self._log_likelihood = log_likelihood + if log_prior is None: + self._prior = JointLogPrior(*log_likelihood.problem.parameters.priors()) + else: + self._prior = log_prior + + def compute( + self, + y: dict, + dy: np.ndarray = None, + calculate_grad: bool = False, + ) -> Union[float, tuple[float, np.ndarray]]: + """ + Calculate the posterior cost for a given set of parameters. + + Parameters + ---------- + x : array-like + The parameters for which to evaluate the cost. + grad : array-like, optional + An array to store the gradient of the cost function with respect + to the parameters. + + Returns + ------- + float + The posterior cost. + """ + # Verify we have dy if calculate_grad is True + self.verify_args(dy, calculate_grad) + + if calculate_grad: + log_prior, dp = self._prior.logpdfS1(self.parameters.current_value()) + else: + log_prior = self._prior.logpdf(self.parameters.current_value()) + + if not np.isfinite(log_prior).any(): + return (-np.inf, -self.grad_fail) if calculate_grad else -np.inf + + if calculate_grad: + log_likelihood, dl = self._log_likelihood.compute( + y, dy, calculate_grad=True + ) + + posterior = log_likelihood + log_prior + total_gradient = dl + dp + + return posterior, total_gradient + + log_likelihood = self._log_likelihood.compute(y) + posterior = log_likelihood + log_prior + return posterior + + def prior(self): + """ + Return the prior object. + + Returns + ------- + object + The prior object. + """ + return self._prior + + def likelihood(self): + """ + Returns the likelihood. + + Returns + ------- + object + The likelihood object. + """ + return self._log_likelihood diff --git a/pybop/optimisers/base_optimiser.py b/pybop/optimisers/base_optimiser.py index a3de5b662..7d927f1eb 100644 --- a/pybop/optimisers/base_optimiser.py +++ b/pybop/optimisers/base_optimiser.py @@ -115,7 +115,7 @@ def __init__( self.set_base_options() self._set_up_optimiser() - # Throw an warning if any options remain + # Throw a warning if any options remain if self.unset_options: warnings.warn( f"Unrecognised keyword arguments: {self.unset_options} will not be used.", diff --git a/pybop/parameters/parameter.py b/pybop/parameters/parameter.py index 5631dfbc0..370abc276 100644 --- a/pybop/parameters/parameter.py +++ b/pybop/parameters/parameter.py @@ -56,6 +56,9 @@ def __init__( self.value = initial_value self.transformation = transformation self.applied_prior_bounds = False + self.bounds = None + self.lower_bounds = None + self.upper_bounds = None self.set_bounds(bounds) self.margin = 1e-4 @@ -162,17 +165,9 @@ def set_bounds(self, bounds=None, boundary_multiplier=6): if bounds is not None: if bounds[0] >= bounds[1]: raise ValueError("Lower bound must be less than upper bound") - elif self.transformation is not None: - self.lower_bound = np.ndarray.item( - self.transformation.to_search(bounds[0]) - ) - self.upper_bound = np.ndarray.item( - self.transformation.to_search(bounds[1]) - ) else: self.lower_bound = bounds[0] self.upper_bound = bounds[1] - elif self.prior is not None: self.applied_prior_bounds = True self.lower_bound = self.prior.mean - boundary_multiplier * self.prior.sigma @@ -181,6 +176,13 @@ def set_bounds(self, bounds=None, boundary_multiplier=6): else: self.bounds = None return + if self.transformation is not None: + self.lower_bound = np.ndarray.item( + self.transformation.to_search(self.lower_bound) + ) + self.upper_bound = np.ndarray.item( + self.transformation.to_search(self.upper_bound) + ) self.bounds = [self.lower_bound, self.upper_bound] @@ -409,6 +411,12 @@ def get_sigma0(self) -> list: sigma0 = None return sigma0 + def priors(self) -> list: + """ + Return the prior distribution of each parameter. + """ + return [param.prior for param in self.param.values()] + def initial_value(self) -> np.ndarray: """ Return the initial value of each parameter. @@ -541,7 +549,9 @@ def verify(self, inputs: Optional[Inputs] = None): """ if inputs is None or isinstance(inputs, dict): return inputs - elif (isinstance(inputs, list) and all(is_numeric(x) for x in inputs)) or all( + if isinstance(inputs, np.ndarray) and inputs.ndim == 0: + inputs = inputs[np.newaxis] + if (isinstance(inputs, list) and all(is_numeric(x) for x in inputs)) or all( is_numeric(x) for x in list(inputs) ): return self.as_dict(inputs) diff --git a/pybop/parameters/priors.py b/pybop/parameters/priors.py index 10472e17f..9e68cf008 100644 --- a/pybop/parameters/priors.py +++ b/pybop/parameters/priors.py @@ -1,3 +1,5 @@ +from typing import Union + import numpy as np import scipy.stats as stats @@ -56,6 +58,38 @@ def logpdf(self, x): """ return self.prior.logpdf(x, loc=self.loc, scale=self.scale) + def icdf(self, q): + """ + Calculates the inverse cumulative distribution function (CDF) of the distribution at q. + + Parameters + ---------- + q : float + The point(s) at which to evaluate the inverse CDF. + + Returns + ------- + float + The inverse cumulative distribution function value at q. + """ + return self.prior.ppf(q, scale=self.scale, loc=self.loc) + + def cdf(self, x): + """ + Calculates the cumulative distribution function (CDF) of the distribution at x. + + Parameters + ---------- + x : float + The point(s) at which to evaluate the CDF. + + Returns + ------- + float + The cumulative distribution function value at x. + """ + return self.prior.cdf(x, scale=self.scale, loc=self.loc) + def rvs(self, size=1, random_state=None): """ Generates random variates from the distribution. @@ -90,6 +124,66 @@ def rvs(self, size=1, random_state=None): loc=self.loc, scale=self.scale, size=size, random_state=random_state ) + def __call__(self, x): + """ + Evaluates the distribution at x. + + Parameters + ---------- + x : float + The point(s) at which to evaluate the distribution. + + Returns + ------- + float + The value(s) of the distribution at x. + """ + inputs = self.verify(x) + return self.logpdf(inputs) + + def logpdfS1(self, x): + """ + Evaluates the first derivative of the distribution at x. + + Parameters + ---------- + x : float + The point(s) at which to evaluate the first derivative. + + Returns + ------- + float + The value(s) of the first derivative at x. + """ + inputs = self.verify(x) + return self._logpdfS1(inputs) + + def _logpdfS1(self, x): + """ + Evaluates the first derivative of the distribution at x. + + Parameters + ---------- + x : float + The point(s) at which to evaluate the first derivative. + + Returns + ------- + float + The value(s) of the first derivative at x. + """ + raise NotImplementedError + + def verify(self, x): + """ + Verifies that the input is a numpy array and converts it if necessary. + """ + if isinstance(x, dict): + x = np.asarray(list(x.values())) + elif not isinstance(x, np.ndarray): + x = np.asarray(x) + return x + def __repr__(self): """ Returns a string representation of the object. @@ -137,10 +231,31 @@ class Gaussian(BasePrior): """ def __init__(self, mean, sigma, random_state=None): + super().__init__() self.name = "Gaussian" self.loc = mean self.scale = sigma self.prior = stats.norm + self._offset = -0.5 * np.log(2 * np.pi * self.scale**2) + self.sigma2 = self.scale**2 + self._multip = -1 / (2.0 * self.sigma2) + self._n_parameters = 1 + + def _logpdfS1(self, x): + """ + Evaluates the first derivative of the gaussian (log) distribution at x. + + Parameters + ---------- + x : float + The point(s) at which to evaluate the first derivative. + + Returns + ------- + float + The value(s) of the first derivative at x. + """ + return self(x), -(x - self.loc) * self._multip class Uniform(BasePrior): @@ -159,12 +274,32 @@ class Uniform(BasePrior): """ def __init__(self, lower, upper, random_state=None): + super().__init__() self.name = "Uniform" self.lower = lower self.upper = upper self.loc = lower self.scale = upper - lower self.prior = stats.uniform + self._n_parameters = 1 + + def _logpdfS1(self, x): + """ + Evaluates the first derivative of the log uniform distribution at x. + + Parameters + ---------- + x : float + The point(s) at which to evaluate the first derivative. + + Returns + ------- + float + The value(s) of the first derivative at x. + """ + log_pdf = self.__call__(x) + dlog_pdf = np.zeros_like(x) + return log_pdf, dlog_pdf @property def mean(self): @@ -195,7 +330,107 @@ class Exponential(BasePrior): """ def __init__(self, scale, loc=0, random_state=None): + super().__init__() self.name = "Exponential" self.loc = loc self.scale = scale self.prior = stats.expon + self._n_parameters = 1 + + def _logpdfS1(self, x): + """ + Evaluates the first derivative of the log exponential distribution at x. + + Parameters + ---------- + x : float + The point(s) at which to evaluate the first derivative. + + Returns + ------- + float + The value(s) of the first derivative at x. + """ + log_pdf = self.__call__(x) + dlog_pdf = -1 / self.scale * np.ones_like(x) + return log_pdf, dlog_pdf + + +class JointLogPrior(BasePrior): + """ + Represents a joint prior distribution composed of multiple prior distributions. + + Parameters + ---------- + priors : BasePrior + One or more prior distributions to combine into a joint distribution. + """ + + def __init__(self, *priors: BasePrior): + super().__init__() + + if not all(isinstance(prior, BasePrior) for prior in priors): + raise ValueError("All priors must be instances of BasePrior") + + self._n_parameters = len(priors) + self._priors: list[BasePrior] = list(priors) + + def logpdf(self, x: Union[float, np.ndarray]) -> float: + """ + Evaluates the joint log-prior distribution at a given point. + + Parameters + ---------- + x : Union[float, np.ndarray] + The point(s) at which to evaluate the distribution. The length of `x` + should match the total number of parameters in the joint distribution. + + Returns + ------- + float + The joint log-probability density of the distribution at `x`. + """ + if len(x) != self._n_parameters: + raise ValueError( + f"Input x must have length {self._n_parameters}, got {len(x)}" + ) + + return sum(prior(x) for prior, x in zip(self._priors, x)) + + def _logpdfS1(self, x: Union[float, np.ndarray]) -> tuple[float, np.ndarray]: + """ + Evaluates the first derivative of the joint log-prior distribution at a given point. + + Parameters + ---------- + x : Union[float, np.ndarray] + The point(s) at which to evaluate the first derivative. The length of `x` + should match the total number of parameters in the joint distribution. + + Returns + ------- + Tuple[float, np.ndarray] + A tuple containing the log-probability density and its first derivative at `x`. + """ + if len(x) != self._n_parameters: + raise ValueError( + f"Input x must have length {self._n_parameters}, got {len(x)}" + ) + + output = 0 + doutput = np.zeros(self._n_parameters) + index = 0 + + for prior in self._priors: + num_params = 1 + x_subset = x[index : index + num_params] + p, dp = prior.logpdfS1(x_subset) + output += p + doutput[index : index + num_params] = dp + index += num_params + + return output, doutput + + def __repr__(self) -> str: + priors_repr = ", ".join([repr(prior) for prior in self._priors]) + return f"{self.__class__.__name__}(priors: [{priors_repr}])" diff --git a/pybop/samplers/__init__.py b/pybop/samplers/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/pybop/samplers/base_pints_sampler.py b/pybop/samplers/base_pints_sampler.py new file mode 100644 index 000000000..76446e198 --- /dev/null +++ b/pybop/samplers/base_pints_sampler.py @@ -0,0 +1,293 @@ +import logging +from functools import partial +from typing import Optional + +import numpy as np +from pints import ( + MultiSequentialEvaluator, + ParallelEvaluator, + SequentialEvaluator, + SingleChainMCMC, +) + +from pybop import BaseCost, BaseSampler, LogPosterior + + +class BasePintsSampler(BaseSampler): + """ + Base class for PINTS samplers. + + This class extends the BaseSampler class to provide a common interface for + PINTS samplers. The class provides a sample() method that can be used to + sample from the posterior distribution using a PINTS sampler. + """ + + def __init__( + self, + log_pdf: LogPosterior, + sampler, + chains: int = 1, + warm_up=None, + x0=None, + cov0=0.1, + **kwargs, + ): + """ + Initialise the base PINTS sampler. + + Args: + log_pdf (pybop.LogPosterior or List[pybop.LogPosterior]): The distribution(s) to be sampled. + chains (int): Number of chains to be used. + sampler: The sampler class to be used. + x0 (list): Initial states for the chains. + cov0: Initial standard deviation for the chains. + kwargs: Additional keyword arguments. + """ + super().__init__(log_pdf, x0, cov0) + + # Set kwargs + self._max_iterations = kwargs.get("max_iterations", 500) + self._log_to_screen = kwargs.get("log_to_screen", True) + self._log_filename = kwargs.get("log_filename", None) + self._initial_phase_iterations = kwargs.get("initial_phase_iterations", 250) + self._chains_in_memory = kwargs.get("chains_in_memory", True) + self._chain_files = kwargs.get("chain_files", None) + self._evaluation_files = kwargs.get("evaluation_files", None) + self._parallel = kwargs.get("parallel", False) + self._verbose = kwargs.get("verbose", False) + self._iteration = 0 + self._warm_up = warm_up + self.n_parameters = ( + self._log_pdf[0].n_parameters + if isinstance(self._log_pdf, list) + else self._log_pdf.n_parameters + ) + + # Check log_pdf + if isinstance(self._log_pdf, BaseCost): + self._multi_log_pdf = False + else: + if len(self._log_pdf) != chains: + raise ValueError("Number of log pdf's must match number of chains") + + first_pdf_parameters = self._log_pdf[0].n_parameters + for pdf in self._log_pdf: + if not isinstance(pdf, BaseCost): + raise ValueError("All log pdf's must be instances of BaseCost") + if pdf.n_parameters != first_pdf_parameters: + raise ValueError( + "All log pdf's must have the same number of parameters" + ) + + self._multi_log_pdf = True + + # Number of chains + self._n_chains = chains + if self._n_chains < 1: + raise ValueError("Number of chains must be greater than 0") + + # Check initial conditions + if self._x0.size != self.n_parameters: + raise ValueError("x0 must have the same number of parameters as log_pdf") + if len(self._x0) != self._n_chains or len(self._x0) == 1: + self._x0 = np.tile(self._x0, (self._n_chains, 1)) + + # Single chain vs multiple chain samplers + self._single_chain = issubclass(sampler, SingleChainMCMC) + + # Construct the samplers object + if self._single_chain: + self._n_samplers = self._n_chains + self._samplers = [sampler(x0, sigma0=self._cov0) for x0 in self._x0] + else: + self._n_samplers = 1 + self._samplers = [sampler(self._n_chains, self._x0, self._cov0)] + + # Check for sensitivities from sampler and set evaluation + self._needs_sensitivities = self._samplers[0].needs_sensitivities() + + # Check initial phase + self._initial_phase = self._samplers[0].needs_initial_phase() + if self._initial_phase: + self.set_initial_phase_iterations() + + # Parallelisation (Might be able to move into parent class) + self._n_workers = 1 + self.set_parallel(self._parallel) + + def run(self) -> Optional[np.ndarray]: + """ + Executes the Monte Carlo sampling process and generates samples + from the posterior distribution. + + This method orchestrates the entire sampling process, managing + iterations, evaluations, logging, and stopping criteria. It + initialises the necessary structures, handles both single and + multi-chain scenarios, and manages parallel or sequential + evaluation based on the configuration. + + Returns: + np.ndarray: A numpy array containing the samples from the + posterior distribution if chains are stored in memory, + otherwise returns None. + + Raises: + ValueError: If no stopping criterion is set (i.e., + _max_iterations is None). + + Details: + - Checks and ensures at least one stopping criterion is set. + - Initialises iterations, evaluations, and other required + structures. + - Sets up the evaluator (parallel or sequential) based on the + configuration. + - Handles the initial phase, if applicable, and manages + intermediate steps in the sampling process. + - Logs progress and relevant information based on the logging + configuration. + - Iterates through the sampling process, evaluating the log + PDF, updating chains, and managing the stopping criteria. + - Finalises and returns the collected samples, or None if + chains are not stored in memory. + """ + + self._initialise_logging() + self._check_stopping_criteria() + + # Initialise iterations and evaluations + self._iteration = 0 + + evaluator = self._create_evaluator() + self._check_initial_phase() + self._initialise_storage() + + running = True + while running: + if ( + self._initial_phase + and self._iteration == self._initial_phase_iterations + ): + self._end_initial_phase() + + xs = self._ask_for_samples() + self.fxs = evaluator.evaluate(xs) + self._evaluations += len(self.fxs) + + if self._single_chain: + self._process_single_chain() + self._intermediate_step = min(self._n_samples) <= self._iteration + else: + self._process_multi_chain() + + if self._intermediate_step: + continue + + self._iteration += 1 + if self._log_to_screen and self._verbose: + logging.info(f"Iteration: {self._iteration}") # TODO: Add more info + + if self._max_iterations and self._iteration >= self._max_iterations: + running = False + + self._finalise_logging() + + if self._warm_up: + self._samples = self._samples[:, self._warm_up :, :] + + return self._samples if self._chains_in_memory else None + + def _process_single_chain(self): + self.fxs_iterator = iter(self.fxs) + for i in list(self._active): + reply = self._samplers[i].tell(next(self.fxs_iterator)) + if reply: + y, fy, accepted = reply + y_store = self._inverse_transform( + y, self._log_pdf[i] if self._multi_log_pdf else self._log_pdf + ) + if self._chains_in_memory: + self._samples[i][self._n_samples[i]] = y_store + else: + self._samples[i] = y_store + + if accepted: + self._sampled_logpdf[i] = ( + fy[0] if self._needs_sensitivities else fy + ) # Not storing sensitivities + if self._prior: + self._sampled_prior[i] = self._prior(y) + + e = self._sampled_logpdf[i] + if self._prior: + e = [ + e, + self._sampled_logpdf[i] - self._sampled_prior[i], + self._sampled_prior[i], + ] + + self._evaluations[i][self._n_samples[i]] = e + self._n_samples[i] += 1 + if self._n_samples[i] == self._max_iterations: + self._active.remove(i) + + def _process_multi_chain(self): + reply = self._samplers[0].tell(self.fxs) + self._intermediate_step = reply is None + if reply: + ys, fys, accepted = reply + ys_store = np.asarray( + [self._inverse_transform(y, self._log_pdf) for y in ys] + ) + if self._chains_in_memory: + self._samples[:, self._iteration] = ys_store + else: + self._samples = ys_store + + es = [] + for i, _y in enumerate(ys): + if accepted[i]: + self._sampled_logpdf[i] = ( + fys[0][i] if self._needs_sensitivities else fys[i] + ) + if self._prior: + self._sampled_prior[i] = self._prior(ys[i]) + e = self._sampled_logpdf[i] + if self._prior: + e = [ + e, + self._sampled_logpdf[i] - self._sampled_prior[i], + self._sampled_prior[i], + ] + es.append(e) + + for i, e in enumerate(es): + self._evaluations[i, self._iteration] = e + + def _check_stopping_criteria(self): + has_stopping_criterion = False + has_stopping_criterion |= self._max_iterations is not None + if not has_stopping_criterion: + raise ValueError("At least one stopping criterion must be set.") + + def _create_evaluator(self): + f = self._log_pdf + # Check for sensitivities from sampler and set evaluator + if self._needs_sensitivities: + if not self._multi_log_pdf: + f = partial(f, calculate_grad=True) + else: + f = [partial(pdf, calculate_grad=True) for pdf in f] + + if self._parallel: + if not self._multi_log_pdf: + self._n_workers = min(self._n_workers, self._n_chains) + return ParallelEvaluator(f, n_workers=self._n_workers) + else: + return ( + SequentialEvaluator(f) + if not self._multi_log_pdf + else MultiSequentialEvaluator(f) + ) + + def _inverse_transform(self, y, log_pdf): + return log_pdf.transformation.to_model(y) if log_pdf.transformation else y diff --git a/pybop/samplers/base_sampler.py b/pybop/samplers/base_sampler.py new file mode 100644 index 000000000..1766759bf --- /dev/null +++ b/pybop/samplers/base_sampler.py @@ -0,0 +1,165 @@ +import logging +from typing import Union + +import numpy as np +from pints import ParallelEvaluator + +from pybop import LogPosterior, Parameters + + +class BaseSampler: + """ + Base class for Monte Carlo samplers. + """ + + def __init__(self, log_pdf: LogPosterior, x0, cov0: Union[np.ndarray, float]): + """ + Initialise the base sampler. + + Parameters + ---------------- + log_pdf (pybop.LogPosterior or List[pybop.LogPosterior]): The posterior or PDF to be sampled. + x0: List-like initial condition for Monte Carlo sampling. + cov0: The covariance matrix to be sampled. + """ + self._log_pdf = log_pdf + self._cov0 = cov0 + + # Set up parameters based on log_pdf + self.parameters = ( + log_pdf.parameters if isinstance(log_pdf, LogPosterior) else Parameters() + ) + + # Initialize x0 + self._x0 = ( + self.parameters.initial_value() + if x0 is None + else np.asarray([x0], dtype=float) + ) + + def run(self) -> np.ndarray: + """ + Sample from the posterior distribution. + + Returns: + np.ndarray: Samples from the posterior distribution. + """ + raise NotImplementedError + + def set_initial_phase_iterations(self, iterations=250): + """ + Set the number of iterations for the initial phase of the sampler. + + Args: + iterations (int): Number of iterations for the initial phase. + """ + self._initial_phase_iterations = iterations + + def set_max_iterations(self, iterations=500): + """ + Set the maximum number of iterations for the sampler. + + Args: + iterations (int): Maximum number of iterations. + """ + iterations = int(iterations) + if iterations < 1: + raise ValueError("Number of iterations must be greater than 0") + + self._max_iterations = iterations + + def set_parallel(self, parallel=False): + """ + Enable or disable parallel evaluation. + Credit: PINTS + + Parameters + ---------- + parallel : bool or int, optional + If True, use as many worker processes as there are CPU cores. If an integer, use that many workers. + If False or 0, disable parallelism (default: False). + """ + if parallel is True: + self._parallel = True + self._n_workers = ParallelEvaluator.cpu_count() + elif parallel >= 1: + self._parallel = True + self._n_workers = int(parallel) + else: + self._parallel = False + self._n_workers = 1 + + def _ask_for_samples(self): + if self._single_chain: + return [self._samplers[i].ask() for i in self._active] + else: + return self._samplers[0].ask() + + def _check_initial_phase(self): + # Set initial phase if needed + if self._initial_phase: + for sampler in self._samplers: + sampler.set_initial_phase(True) + + def _end_initial_phase(self): + for sampler in self._samplers: + sampler.set_initial_phase(False) + if self._log_to_screen: + logging.info("Initial phase completed.") + + def _initialise_storage(self): + self._prior = None + if isinstance(self._log_pdf, LogPosterior): + self._prior = self._log_pdf.prior() + + # Storage of the received samples + self._sampled_logpdf = np.zeros(self._n_chains) + self._sampled_prior = np.zeros(self._n_chains) + + # Pre-allocate arrays for chain storage + self._samples = np.zeros( + (self._n_chains, self._max_iterations, self.n_parameters) + ) + + # Pre-allocate arrays for evaluation storage + if self._prior: + # Store posterior, likelihood, prior + self._evaluations = np.zeros((self._n_chains, self._max_iterations, 3)) + else: + # Store pdf + self._evaluations = np.zeros((self._n_chains, self._max_iterations)) + + # From PINTS: + # Some samplers need intermediate steps, where `None` is returned instead + # of a sample. But samplers can run asynchronously, so that one can return + # `None` while another returns a sample. To deal with this, we maintain a + # list of 'active' samplers that have not reached `max_iterations`, + # and store the number of samples so far in each chain. + if self._single_chain: + self._active = list(range(self._n_chains)) + self._n_samples = [0] * self._n_chains + + def _initialise_logging(self): + logging.basicConfig(format="%(message)s", level=logging.INFO) + + if self._log_to_screen: + logging.info("Using " + str(self._samplers[0].name())) + logging.info("Generating " + str(self._n_chains) + " chains.") + if self._parallel: + logging.info( + f"Running in parallel with {self._n_workers} worker processes." + ) + else: + logging.info("Running in sequential mode.") + if self._chain_files: + logging.info("Writing chains to " + self._chain_files[0] + " etc.") + if self._evaluation_files: + logging.info( + "Writing evaluations to " + self._evaluation_files[0] + " etc." + ) + + def _finalise_logging(self): + if self._log_to_screen: + logging.info( + f"Halting: Maximum number of iterations ({self._iteration}) reached." + ) diff --git a/pybop/samplers/mcmc_sampler.py b/pybop/samplers/mcmc_sampler.py new file mode 100644 index 000000000..933c36957 --- /dev/null +++ b/pybop/samplers/mcmc_sampler.py @@ -0,0 +1,105 @@ +from pybop import AdaptiveCovarianceMCMC + + +class MCMCSampler: + """ + A high-level class for MCMC sampling. + + This class provides an alternative API to the `PyBOP.Sampler()` API, + specifically allowing for single user-friendly interface for the + optimisation process. + """ + + def __init__( + self, + log_pdf, + chains, + sampler=AdaptiveCovarianceMCMC, + x0=None, + cov0=None, + **kwargs, + ): + """ + Initialize the MCMCSampler. + + Parameters + ---------- + log_pdf : pybop.BaseCost + The log-probability density function to be sampled. + chains : int + The number of MCMC chains to be run. + sampler : pybop.MCMCSampler, optional + The MCMC sampler class to be used. Defaults to `pybop.MCMC`. + x0 : np.ndarray, optional + Initial positions for the MCMC chains. Defaults to None. + cov0 : np.ndarray, optional + Initial step sizes for the MCMC chains. Defaults to None. + **kwargs : dict + Additional keyword arguments to pass to the sampler. + + Raises + ------ + ValueError + If the sampler could not be constructed due to an exception. + """ + + self.sampler = sampler(log_pdf, chains, x0=x0, sigma0=cov0, **kwargs) + + def run(self): + """ + Run the MCMC sampling process. + + Returns + ------- + list + The result of the sampling process. + """ + return self.sampler.run() + + def __getattr__(self, attr): + """ + Delegate attribute access to the underlying sampler if the attribute + is not found in the MCMCSampler instance. + + Parameters + ---------- + attr : str + The attribute name to be accessed. + + Returns + ------- + Any + The attribute value from the underlying sampler. + + Raises + ------ + AttributeError + If the attribute is not found in both the MCMCSampler instance + and the underlying sampler. + """ + if "sampler" in self.__dict__ and hasattr(self.sampler, attr): + return getattr(self.sampler, attr) + raise AttributeError( + f"'{self.__class__.__name__}' object has no attribute '{attr}'" + ) + + def __setattr__(self, name: str, value) -> None: + """ + Delegate attribute setting to the underlying sampler if the attribute + exists in the sampler and not in the MCMCSampler instance. + + Parameters + ---------- + name : str + The attribute name to be set. + value : Any + The value to be set to the attribute. + """ + if ( + name in self.__dict__ + or "sampler" not in self.__dict__ + or not hasattr(self.sampler, name) + ): + object.__setattr__(self, name, value) + else: + setattr(self.sampler, name, value) diff --git a/pybop/samplers/pints_samplers.py b/pybop/samplers/pints_samplers.py new file mode 100644 index 000000000..481a5eaeb --- /dev/null +++ b/pybop/samplers/pints_samplers.py @@ -0,0 +1,607 @@ +from pints import MALAMCMC as PintsMALAMCMC +from pints import AdaptiveCovarianceMCMC as PintsAdaptiveCovarianceMCMC +from pints import DifferentialEvolutionMCMC as PintsDifferentialEvolutionMCMC +from pints import DramACMC as PintsDramACMC +from pints import DreamMCMC as PintsDREAM +from pints import EmceeHammerMCMC as PintsEmceeHammerMCMC +from pints import HaarioACMC as PintsHaarioACMC +from pints import HaarioBardenetACMC as PintsHaarioBardenetACMC +from pints import HamiltonianMCMC as PintsHamiltonianMCMC +from pints import MetropolisRandomWalkMCMC as PintsMetropolisRandomWalkMCMC +from pints import MonomialGammaHamiltonianMCMC as PintsMonomialGammaHamiltonianMCMC +from pints import NoUTurnMCMC +from pints import PopulationMCMC as PintsPopulationMCMC +from pints import RaoBlackwellACMC as PintsRaoBlackwellACMC +from pints import RelativisticMCMC as PintsRelativisticMCMC +from pints import SliceDoublingMCMC as PintsSliceDoublingMCMC +from pints import SliceRankShrinkingMCMC as PintsSliceRankShrinkingMCMC +from pints import SliceStepoutMCMC as PintsSliceStepoutMCMC + +from pybop import BasePintsSampler + + +class NUTS(BasePintsSampler): + """ + Implements the No-U-Turn Sampler (NUTS) algorithm. + + This class extends the NUTS sampler from the PINTS library. + NUTS is a Markov chain Monte Carlo (MCMC) method for sampling + from a probability distribution. It is an extension of the + Hamiltonian Monte Carlo (HMC) method, which uses a dynamic + integration time to explore the parameter space more efficiently. + + Parameters + ---------- + log_pdf : (pybop.LogPosterior or List[pybop.LogPosterior]) + A function that calculates the log-probability density. + chains : int + The number of chains to run. + x0 : ndarray, optional + Initial positions for the chains. + cov0 : ndarray, optional + Initial covariance matrix. + **kwargs + Additional arguments to pass to the NUTS sampler. + """ + + def __init__(self, log_pdf, chains, x0=None, cov0=None, **kwargs): + super().__init__( + log_pdf, NoUTurnMCMC, chains=chains, x0=x0, cov0=cov0, **kwargs + ) + + +class DREAM(BasePintsSampler): + """ + Implements the DiffeRential Evolution Adaptive Metropolis (DREAM) algorithm. + + This class extends the DREAM sampler from the PINTS library. + DREAM is a Markov chain Monte Carlo (MCMC) method for sampling + from a probability distribution. It combines the Differential + Evolution (DE) algorithm with the Adaptive Metropolis (AM) algorithm + to explore the parameter space more efficiently. + + Parameters + ---------- + log_pdf : (pybop.LogPosterior or List[pybop.LogPosterior]) + A function that calculates the log-probability density. + chains : int + The number of chains to run. + x0 : ndarray, optional + Initial positions for the chains. + cov0 : ndarray, optional + Initial covariance matrix. + **kwargs + Additional arguments to pass to the DREAM sampler. + """ + + def __init__(self, log_pdf, chains, x0=None, cov0=None, **kwargs): + super().__init__(log_pdf, PintsDREAM, chains=chains, x0=x0, cov0=cov0, **kwargs) + + +class AdaptiveCovarianceMCMC(BasePintsSampler): + """ + Implements the Adaptive Covariance Markov Chain Monte Carlo (MCMC) algorithm. + + This class extends the Adaptive Covariance MCMC sampler from the PINTS library. + This MCMC method adapts the proposal distribution covariance matrix + during the sampling process to improve efficiency and convergence. + + Parameters + ---------- + log_pdf : (pybop.LogPosterior or List[pybop.LogPosterior]) + A function that calculates the log-probability density. + chains : int + The number of chains to run. + x0 : ndarray, optional + Initial positions for the chains. + cov0 : ndarray, optional + Initial covariance matrix. + **kwargs + Additional arguments to pass to the Adaptive Covariance MCMC sampler. + """ + + def __init__(self, log_pdf, chains, x0=None, cov0=None, **kwargs): + super().__init__( + log_pdf, + PintsAdaptiveCovarianceMCMC, + chains=chains, + x0=x0, + cov0=cov0, + **kwargs, + ) + + +class DifferentialEvolutionMCMC(BasePintsSampler): + """ + Implements the Differential Evolution Markov Chain Monte Carlo (MCMC) algorithm. + + This class extends the Differential Evolution MCMC sampler from the PINTS library. + This MCMC method uses the Differential Evolution algorithm to explore the + parameter space more efficiently by evolving a population of chains. + + Parameters + ---------- + log_pdf : (pybop.LogPosterior or List[pybop.LogPosterior]) + A function that calculates the log-probability density. + chains : int + The number of chains to run. + x0 : ndarray, optional + Initial positions for the chains. + cov0 : ndarray, optional + Initial covariance matrix. + **kwargs + Additional arguments to pass to the Differential Evolution MCMC sampler. + """ + + def __init__(self, log_pdf, chains, x0=None, cov0=None, **kwargs): + super().__init__( + log_pdf, + PintsDifferentialEvolutionMCMC, + chains=chains, + x0=x0, + cov0=cov0, + **kwargs, + ) + + +class DramACMC(BasePintsSampler): + """ + Implements the Delayed Rejection Adaptive Metropolis (DRAM) Adaptive Covariance Markov Chain + Monte Carlo (MCMC) algorithm. + + This class extends the DRAM Adaptive Covariance MCMC sampler from the PINTS library. + This MCMC method combines Delayed Rejection with Adaptive Metropolis to enhance + the efficiency and robustness of the sampling process. + + Parameters + ---------- + log_pdf : (pybop.LogPosterior or List[pybop.LogPosterior]) + A function that calculates the log-probability density. + chains : int + The number of chains to run. + x0 : ndarray, optional + Initial positions for the chains. + cov0 : ndarray, optional + Initial covariance matrix. + **kwargs + Additional arguments to pass to the DRAM Adaptive Covariance MCMC sampler. + """ + + def __init__(self, log_pdf, chains, x0=None, cov0=None, **kwargs): + super().__init__( + log_pdf, + PintsDramACMC, + chains=chains, + x0=x0, + cov0=cov0, + **kwargs, + ) + + +class EmceeHammerMCMC(BasePintsSampler): + """ + Implements the Emcee Hammer Markov Chain Monte Carlo (MCMC) algorithm. + + This class extends the Emcee Hammer MCMC sampler from the PINTS library. + The Emcee Hammer is an affine-invariant ensemble sampler for MCMC, which is + particularly effective for high-dimensional parameter spaces. + + Parameters + ---------- + log_pdf : (pybop.LogPosterior or List[pybop.LogPosterior]) + A function that calculates the log-probability density. + chains : int + The number of chains to run. + x0 : ndarray, optional + Initial positions for the chains. + cov0 : ndarray, optional + Initial covariance matrix. + **kwargs + Additional arguments to pass to the Emcee Hammer MCMC sampler. + """ + + def __init__(self, log_pdf, chains, x0=None, cov0=None, **kwargs): + super().__init__( + log_pdf, + PintsEmceeHammerMCMC, + chains=chains, + x0=x0, + cov0=cov0, + **kwargs, + ) + + +class HaarioACMC(BasePintsSampler): + """ + Implements the Haario Adaptive Covariance Markov Chain Monte Carlo (MCMC) algorithm. + + This class extends the Haario Adaptive Covariance MCMC sampler from the PINTS library. + This MCMC method adapts the proposal distribution's covariance matrix based on the + history of the chain, improving sampling efficiency and convergence. + + Parameters + ---------- + log_pdf : (pybop.LogPosterior or List[pybop.LogPosterior]) + A function that calculates the log-probability density. + chains : int + The number of chains to run. + x0 : ndarray, optional + Initial positions for the chains. + cov0 : ndarray, optional + Initial covariance matrix. + **kwargs + Additional arguments to pass to the Haario Adaptive Covariance MCMC sampler. + """ + + def __init__(self, log_pdf, chains, x0=None, cov0=None, **kwargs): + super().__init__( + log_pdf, + PintsHaarioACMC, + chains=chains, + x0=x0, + cov0=cov0, + **kwargs, + ) + + +class HaarioBardenetACMC(BasePintsSampler): + """ + Implements the Haario-Bardenet Adaptive Covariance Markov Chain Monte Carlo (MCMC) algorithm. + + This class extends the Haario-Bardenet Adaptive Covariance MCMC sampler from the PINTS library. + This MCMC method combines the adaptive covariance approach with an additional + mechanism to improve performance in high-dimensional parameter spaces. + + Parameters + ---------- + log_pdf : (pybop.LogPosterior or List[pybop.LogPosterior]) + A function that calculates the log-probability density. + chains : int + The number of chains to run. + x0 : ndarray, optional + Initial positions for the chains. + cov0 : ndarray, optional + Initial covariance matrix. + **kwargs + Additional arguments to pass to the Haario-Bardenet Adaptive Covariance MCMC sampler. + """ + + def __init__(self, log_pdf, chains, x0=None, cov0=None, **kwargs): + super().__init__( + log_pdf, + PintsHaarioBardenetACMC, + chains=chains, + x0=x0, + cov0=cov0, + **kwargs, + ) + + +class HamiltonianMCMC(BasePintsSampler): + """ + Implements the Hamiltonian Markov Chain Monte Carlo (MCMC) algorithm. + + This class extends the Hamiltonian MCMC sampler from the PINTS library. + This MCMC method uses Hamiltonian dynamics to propose new states, + allowing for efficient exploration of high-dimensional parameter spaces. + + Parameters + ---------- + log_pdf : (pybop.LogPosterior or List[pybop.LogPosterior]) + A function that calculates the log-probability density. + chains : int + The number of chains to run. + x0 : ndarray, optional + Initial positions for the chains. + cov0 : ndarray, optional + Initial covariance matrix. + **kwargs + Additional arguments to pass to the Hamiltonian MCMC sampler. + """ + + def __init__(self, log_pdf, chains, x0=None, cov0=None, **kwargs): + super().__init__( + log_pdf, + PintsHamiltonianMCMC, + chains=chains, + x0=x0, + cov0=cov0, + **kwargs, + ) + + +class MALAMCMC(BasePintsSampler): + """ + Implements the Metropolis Adjusted Langevin Algorithm (MALA) Markov Chain Monte Carlo (MCMC) algorithm. + + This class extends the MALA MCMC sampler from the PINTS library. + This MCMC method combines the Metropolis-Hastings algorithm with + Langevin dynamics to improve sampling efficiency and convergence. + + Parameters + ---------- + log_pdf : (pybop.LogPosterior or List[pybop.LogPosterior]) + A function that calculates the log-probability density. + chains : int + The number of chains to run. + x0 : ndarray, optional + Initial positions for the chains. + cov0 : ndarray, optional + Initial covariance matrix. + **kwargs + Additional arguments to pass to the MALA MCMC sampler. + """ + + def __init__(self, log_pdf, chains, x0=None, cov0=None, **kwargs): + super().__init__( + log_pdf, + PintsMALAMCMC, + chains=chains, + x0=x0, + cov0=cov0, + **kwargs, + ) + + +class MetropolisRandomWalkMCMC(BasePintsSampler): + """ + Implements the Metropolis Random Walk Markov Chain Monte Carlo (MCMC) algorithm. + + This class extends the Metropolis Random Walk MCMC sampler from the PINTS library. + This classic MCMC method uses a simple random walk proposal distribution + and the Metropolis-Hastings acceptance criterion. + + Parameters + ---------- + log_pdf : (pybop.LogPosterior or List[pybop.LogPosterior]) + A function that calculates the log-probability density. + chains : int + The number of chains to run. + x0 : ndarray, optional + Initial positions for the chains. + cov0 : ndarray, optional + Initial covariance matrix. + **kwargs + Additional arguments to pass to the Metropolis Random Walk MCMC sampler. + """ + + def __init__(self, log_pdf, chains, x0=None, cov0=None, **kwargs): + super().__init__( + log_pdf, + PintsMetropolisRandomWalkMCMC, + chains=chains, + x0=x0, + cov0=cov0, + **kwargs, + ) + + +class MonomialGammaHamiltonianMCMC(BasePintsSampler): + """ + Implements the Monomial Gamma Hamiltonian Markov Chain Monte Carlo (MCMC) algorithm. + + This class extends the Monomial Gamma Hamiltonian MCMC sampler from the PINTS library. + This MCMC method uses Hamiltonian dynamics with a monomial gamma distribution + for efficient exploration of the parameter space. + + Parameters + ---------- + log_pdf : (pybop.LogPosterior or List[pybop.LogPosterior]) + A function that calculates the log-probability density. + chains : int + The number of chains to run. + x0 : ndarray, optional + Initial positions for the chains. + cov0 : ndarray, optional + Initial covariance matrix. + **kwargs + Additional arguments to pass to the Monomial Gamma Hamiltonian MCMC sampler. + """ + + def __init__(self, log_pdf, chains, x0=None, cov0=None, **kwargs): + super().__init__( + log_pdf, + PintsMonomialGammaHamiltonianMCMC, + chains=chains, + x0=x0, + cov0=cov0, + **kwargs, + ) + + +class PopulationMCMC(BasePintsSampler): + """ + Implements the Population Markov Chain Monte Carlo (MCMC) algorithm. + + This class extends the Population MCMC sampler from the PINTS library. + This MCMC method uses a population of chains at different temperatures + to explore the parameter space more efficiently and avoid local minima. + + Parameters + ---------- + log_pdf : (pybop.LogPosterior or List[pybop.LogPosterior]) + A function that calculates the log-probability density. + chains : int + The number of chains to run. + x0 : ndarray, optional + Initial positions for the chains. + cov0 : ndarray, optional + Initial covariance matrix. + **kwargs + Additional arguments to pass to the Population MCMC sampler. + """ + + def __init__(self, log_pdf, chains, x0=None, cov0=None, **kwargs): + super().__init__( + log_pdf, + PintsPopulationMCMC, + chains=chains, + x0=x0, + cov0=cov0, + **kwargs, + ) + + +class RaoBlackwellACMC(BasePintsSampler): + """ + Implements the Rao-Blackwell Adaptive Covariance Markov Chain Monte Carlo (MCMC) algorithm. + + This class extends the Rao-Blackwell Adaptive Covariance MCMC sampler from the PINTS library. + This MCMC method improves sampling efficiency by combining Rao-Blackwellisation + with adaptive covariance strategies. + + Parameters + ---------- + log_pdf : (pybop.LogPosterior or List[pybop.LogPosterior]) + A function that calculates the log-probability density. + chains : int + The number of chains to run. + x0 : ndarray, optional + Initial positions for the chains. + cov0 : ndarray, optional + Initial covariance matrix. + **kwargs + Additional arguments to pass to the Rao-Blackwell Adaptive Covariance MCMC sampler. + """ + + def __init__(self, log_pdf, chains, x0=None, cov0=None, **kwargs): + super().__init__( + log_pdf, + PintsRaoBlackwellACMC, + chains=chains, + x0=x0, + cov0=cov0, + **kwargs, + ) + + +class RelativisticMCMC(BasePintsSampler): + """ + Implements the Relativistic Markov Chain Monte Carlo (MCMC) algorithm. + + This class extends the Relativistic MCMC sampler from the PINTS library. + This MCMC method uses concepts from relativistic mechanics to propose new states, + allowing for efficient exploration of the parameter space. + + Parameters + ---------- + log_pdf : (pybop.LogPosterior or List[pybop.LogPosterior]) + A function that calculates the log-probability density. + chains : int + The number of chains to run. + x0 : ndarray, optional + Initial positions for the chains. + cov0 : ndarray, optional + Initial covariance matrix. + **kwargs + Additional arguments to pass to the Relativistic MCMC sampler. + """ + + def __init__(self, log_pdf, chains, x0=None, cov0=None, **kwargs): + super().__init__( + log_pdf, + PintsRelativisticMCMC, + chains=chains, + x0=x0, + cov0=cov0, + **kwargs, + ) + + +class SliceDoublingMCMC(BasePintsSampler): + """ + Implements the Slice Doubling Markov Chain Monte Carlo (MCMC) algorithm. + + This class extends the Slice Doubling MCMC sampler from the PINTS library. + This MCMC method uses slice sampling with a doubling procedure to propose new states, + allowing for efficient exploration of the parameter space. + + Parameters + ---------- + log_pdf : (pybop.LogPosterior or List[pybop.LogPosterior]) + A function that calculates the log-probability density. + chains : int + The number of chains to run. + x0 : ndarray, optional + Initial positions for the chains. + cov0 : ndarray, optional + Initial covariance matrix. + **kwargs + Additional arguments to pass to the Slice Doubling MCMC sampler. + """ + + def __init__(self, log_pdf, chains, x0=None, cov0=None, **kwargs): + super().__init__( + log_pdf, + PintsSliceDoublingMCMC, + chains=chains, + x0=x0, + cov0=cov0, + **kwargs, + ) + + +class SliceRankShrinkingMCMC(BasePintsSampler): + """ + Implements the Slice Rank Shrinking Markov Chain Monte Carlo (MCMC) algorithm. + + This class extends the Slice Rank Shrinking MCMC sampler from the PINTS library. + This MCMC method uses slice sampling with a rank shrinking procedure to propose new states, + allowing for efficient exploration of the parameter space. + + Parameters + ---------- + log_pdf : (pybop.LogPosterior or List[pybop.LogPosterior]) + A function that calculates the log-probability density. + chains : int + The number of chains to run. + x0 : ndarray, optional + Initial positions for the chains. + cov0 : ndarray, optional + Initial covariance matrix. + **kwargs + Additional arguments to pass to the Slice Rank Shrinking MCMC sampler. + """ + + def __init__(self, log_pdf, chains, x0=None, cov0=None, **kwargs): + super().__init__( + log_pdf, + PintsSliceRankShrinkingMCMC, + chains=chains, + x0=x0, + cov0=cov0, + **kwargs, + ) + + +class SliceStepoutMCMC(BasePintsSampler): + """ + Implements the Slice Stepout Markov Chain Monte Carlo (MCMC) algorithm. + + This class extends the Slice Stepout MCMC sampler from the PINTS library. + This MCMC method uses slice sampling with a stepout procedure to propose new states, + allowing for efficient exploration of the parameter space. + + Parameters + ---------- + log_pdf : (pybop.LogPosterior or List[pybop.LogPosterior]) + A function that calculates the log-probability density. + chains : int + The number of chains to run. + x0 : ndarray, optional + Initial positions for the chains. + cov0 : ndarray, optional + Initial covariance matrix. + **kwargs + Additional arguments to pass to the Slice Stepout MCMC sampler. + """ + + def __init__(self, log_pdf, chains, x0=None, cov0=None, **kwargs): + super().__init__( + log_pdf, + PintsSliceStepoutMCMC, + chains=chains, + x0=x0, + cov0=cov0, + **kwargs, + ) diff --git a/pyproject.toml b/pyproject.toml index a1b892fd0..e94e9742d 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -26,7 +26,7 @@ classifiers = [ ] requires-python = ">=3.9, <3.13" dependencies = [ - "pybamm>=24.5", + "pybamm[cite]>=24.5", "numpy>=1.16, <2.0", "scipy>=1.3", "pints>=0.5", diff --git a/tests/integration/test_monte_carlo.py b/tests/integration/test_monte_carlo.py new file mode 100644 index 000000000..6d2a51219 --- /dev/null +++ b/tests/integration/test_monte_carlo.py @@ -0,0 +1,136 @@ +import numpy as np +import pybamm +import pytest + +import pybop +from pybop import ( + DREAM, + DifferentialEvolutionMCMC, + HaarioACMC, + HaarioBardenetACMC, + MetropolisRandomWalkMCMC, + PopulationMCMC, +) + + +class Test_Sampling_SPM: + """ + A class to test the MCMC samplers on a physics-based model. + """ + + @pytest.fixture(autouse=True) + def setup(self): + self.ground_truth = np.clip( + np.asarray([0.55, 0.55]) + np.random.normal(loc=0.0, scale=0.05, size=2), + a_min=0.4, + a_max=0.75, + ) + + @pytest.fixture + def model(self): + parameter_set = pybop.ParameterSet.pybamm("Chen2020") + x = self.ground_truth + parameter_set.update( + { + "Negative electrode active material volume fraction": x[0], + "Positive electrode active material volume fraction": x[1], + } + ) + solver = pybamm.IDAKLUSolver() + return pybop.lithium_ion.SPM(parameter_set=parameter_set, solver=solver) + + @pytest.fixture + def parameters(self): + return pybop.Parameters( + pybop.Parameter( + "Negative electrode active material volume fraction", + prior=pybop.Uniform(0.4, 0.7), + bounds=[0.375, 0.725], + ), + pybop.Parameter( + "Positive electrode active material volume fraction", + prior=pybop.Uniform(0.4, 0.7), + # no bounds + ), + ) + + @pytest.fixture(params=[0.5]) + def init_soc(self, request): + return request.param + + @pytest.fixture( + params=[ + pybop.GaussianLogLikelihoodKnownSigma, + ] + ) + def cost_class(self, request): + return request.param + + def noise(self, sigma, values): + return np.random.normal(0, sigma, values) + + @pytest.fixture + def spm_likelihood(self, model, parameters, cost_class, init_soc): + # Form dataset + solution = self.get_data(model, init_soc) + dataset = pybop.Dataset( + { + "Time [s]": solution["Time [s]"].data, + "Current function [A]": solution["Current [A]"].data, + "Voltage [V]": solution["Voltage [V]"].data + + self.noise(0.002, len(solution["Time [s]"].data)), + } + ) + + # Define the cost to optimise + problem = pybop.FittingProblem(model, parameters, dataset) + return cost_class(problem, sigma0=0.002) + + @pytest.mark.parametrize( + "quick_sampler", + [ + DREAM, + DifferentialEvolutionMCMC, + HaarioACMC, + HaarioBardenetACMC, + MetropolisRandomWalkMCMC, + PopulationMCMC, + ], + ) + @pytest.mark.integration + def test_sampling_spm(self, quick_sampler, spm_likelihood): + posterior = pybop.LogPosterior(spm_likelihood) + + # set common args + common_args = { + "log_pdf": posterior, + "chains": 3, + "warm_up": 250, + "max_iterations": 550, + } + + if issubclass(quick_sampler, DifferentialEvolutionMCMC): + common_args["warm_up"] = 750 + common_args["max_iterations"] = 900 + # construct and run + sampler = quick_sampler(**common_args) + results = sampler.run() + + # Assert both final sample and posterior mean + x = np.mean(results, axis=1) + for i in range(len(x)): + np.testing.assert_allclose(x[i], self.ground_truth, atol=2.5e-2) + np.testing.assert_allclose(results[i][-1], self.ground_truth, atol=2.0e-2) + + def get_data(self, model, init_soc): + initial_state = {"Initial SoC": init_soc} + experiment = pybop.Experiment( + [ + ( + "Discharge at 0.5C for 4 minutes (12 second period)", + "Charge at 0.5C for 4 minutes (12 second period)", + ), + ] + ) + sim = model.predict(initial_state=initial_state, experiment=experiment) + return sim diff --git a/tests/integration/test_monte_carlo_thevenin.py b/tests/integration/test_monte_carlo_thevenin.py new file mode 100644 index 000000000..6a9046f62 --- /dev/null +++ b/tests/integration/test_monte_carlo_thevenin.py @@ -0,0 +1,146 @@ +import numpy as np +import pytest + +import pybop +from pybop import ( + MALAMCMC, + NUTS, + DramACMC, + HamiltonianMCMC, + MonomialGammaHamiltonianMCMC, + RaoBlackwellACMC, + RelativisticMCMC, + SliceDoublingMCMC, + SliceRankShrinkingMCMC, + SliceStepoutMCMC, +) + + +class TestSamplingThevenin: + """ + A class to test a subset of samplers on the simple Thevenin Model. + """ + + @pytest.fixture(autouse=True) + def setup(self): + self.sigma0 = 1e-3 + self.ground_truth = np.clip( + np.asarray([0.05, 0.05]) + np.random.normal(loc=0.0, scale=0.01, size=2), + a_min=0.0, + a_max=0.1, + ) + self.fast_samplers = [ + MALAMCMC, + RaoBlackwellACMC, + SliceDoublingMCMC, + SliceStepoutMCMC, + DramACMC, + ] + + @pytest.fixture + def model(self): + parameter_set = pybop.ParameterSet( + json_path="examples/scripts/parameters/initial_ecm_parameters.json" + ) + parameter_set.import_parameters() + parameter_set.params.update( + { + "C1 [F]": 1000, + "R0 [Ohm]": self.ground_truth[0], + "R1 [Ohm]": self.ground_truth[1], + } + ) + + return pybop.empirical.Thevenin(parameter_set=parameter_set) + + @pytest.fixture + def parameters(self): + return pybop.Parameters( + pybop.Parameter( + "R0 [Ohm]", prior=pybop.Uniform(1e-2, 8e-2), bounds=[1e-2, 8e-2] + ), + pybop.Parameter( + "R1 [Ohm]", prior=pybop.Uniform(1e-2, 8e-2), bounds=[1e-2, 8e-2] + ), + ) + + @pytest.fixture(params=[0.5]) + def init_soc(self, request): + return request.param + + def noise(self, sigma, values): + return np.random.normal(0, sigma, values) + + @pytest.fixture + def likelihood(self, model, parameters, init_soc): + # Form dataset + solution = self.get_data(model, init_soc) + dataset = pybop.Dataset( + { + "Time [s]": solution["Time [s]"].data, + "Current function [A]": solution["Current [A]"].data, + "Voltage [V]": solution["Voltage [V]"].data + + self.noise(self.sigma0, len(solution["Time [s]"].data)), + } + ) + + # Define the cost to optimise + problem = pybop.FittingProblem(model, parameters, dataset) + return pybop.GaussianLogLikelihoodKnownSigma(problem, sigma0=0.0075) + + # Parameterize the samplers + @pytest.mark.parametrize( + "sampler", + [ + NUTS, + HamiltonianMCMC, + MonomialGammaHamiltonianMCMC, + RelativisticMCMC, + SliceRankShrinkingMCMC, + MALAMCMC, + RaoBlackwellACMC, + SliceDoublingMCMC, + SliceStepoutMCMC, + DramACMC, + ], + ) + @pytest.mark.integration + def test_sampling_thevenin(self, sampler, likelihood): + posterior = pybop.LogPosterior(likelihood) + + # set common args + common_args = { + "log_pdf": posterior, + "chains": 1, + "warm_up": 250, + "max_iterations": 500, + "cov0": [3e-4, 3e-4], + } + if sampler in self.fast_samplers: + common_args["warm_up"] = 600 + common_args["max_iterations"] = 1200 + + # construct and run + sampler = sampler(**common_args) + if isinstance(sampler, SliceRankShrinkingMCMC): + sampler._samplers[0].set_hyper_parameters([1e-3]) + results = sampler.run() + + # Assert both final sample and posterior mean + x = np.mean(results, axis=1) + for i in range(len(x)): + np.testing.assert_allclose(x[i], self.ground_truth, atol=1.5e-2) + np.testing.assert_allclose(results[i][-1], self.ground_truth, atol=1e-2) + + def get_data(self, model, init_soc): + initial_state = {"Initial SoC": init_soc} + experiment = pybop.Experiment( + [ + ( + "Discharge at 0.5C for 2 minutes (4 second period)", + "Rest for 1 minute (4 second period)", + ), + ] + ) + sim = model.predict(initial_state=initial_state, experiment=experiment) + return sim diff --git a/tests/integration/test_optimisation_options.py b/tests/integration/test_optimisation_options.py index b2feb7629..258bdc17b 100644 --- a/tests/integration/test_optimisation_options.py +++ b/tests/integration/test_optimisation_options.py @@ -100,7 +100,7 @@ def test_optimisation_f_guessed(self, f_guessed, spm_costs): # Set parallelisation if not on Windows if sys.platform != "win32": - optim.set_parallel(True) + optim.set_parallel(1) initial_cost = optim.cost(x0) x, final_cost = optim.run() diff --git a/tests/unit/test_likelihoods.py b/tests/unit/test_likelihoods.py index cd1d676b5..a22c63e35 100644 --- a/tests/unit/test_likelihoods.py +++ b/tests/unit/test_likelihoods.py @@ -35,7 +35,7 @@ def parameters(self): def experiment(self): return pybop.Experiment( [ - ("Discharge at 1C for 10 minutes (20 second period)"), + ("Discharge at 1C for 1 minutes (5 second period)"), ] ) diff --git a/tests/unit/test_posterior.py b/tests/unit/test_posterior.py new file mode 100644 index 000000000..7073b94c3 --- /dev/null +++ b/tests/unit/test_posterior.py @@ -0,0 +1,121 @@ +import numpy as np +import pytest + +import pybop + + +class TestLogPosterior: + """ + Class for log posterior unit tests + """ + + @pytest.fixture + def model(self): + return pybop.lithium_ion.SPM() + + @pytest.fixture + def ground_truth(self): + return 0.52 + + @pytest.fixture + def parameters(self, ground_truth): + return pybop.Parameter( + "Negative electrode active material volume fraction", + prior=pybop.Gaussian(0.5, 0.01), + bounds=[0.375, 0.625], + initial_value=ground_truth, + ) + + @pytest.fixture + def experiment(self): + return pybop.Experiment( + [ + ("Discharge at 1C for 1 minutes (5 second period)"), + ] + ) + + @pytest.fixture + def dataset(self, model, experiment, ground_truth): + model._parameter_set = model.pybamm_model.default_parameter_values + model._parameter_set.update( + { + "Negative electrode active material volume fraction": ground_truth, + } + ) + solution = model.predict(experiment=experiment) + return pybop.Dataset( + { + "Time [s]": solution["Time [s]"].data, + "Current function [A]": solution["Current [A]"].data, + "Voltage [V]": solution["Terminal voltage [V]"].data, + } + ) + + @pytest.fixture + def one_signal_problem(self, model, parameters, dataset): + return pybop.FittingProblem(model, parameters, dataset) + + @pytest.fixture + def likelihood(self, one_signal_problem): + return pybop.GaussianLogLikelihoodKnownSigma(one_signal_problem, sigma0=0.01) + + @pytest.fixture + def prior(self): + return pybop.Gaussian(0.5, 0.01) + + @pytest.mark.unit + def test_log_posterior_construction(self, likelihood, prior): + # Test log posterior construction + posterior = pybop.LogPosterior(likelihood, prior) + + assert posterior._log_likelihood == likelihood + assert posterior._prior == prior + + # Test log posterior construction without parameters + likelihood.problem.parameters.priors = None + + with pytest.raises(TypeError, match="'NoneType' object is not callable"): + pybop.LogPosterior(likelihood, log_prior=None) + + @pytest.mark.unit + def test_log_posterior_construction_no_prior(self, likelihood): + # Test log posterior construction without prior + posterior = pybop.LogPosterior(likelihood, None) + + assert posterior._prior is not None + assert isinstance(posterior._prior, pybop.JointLogPrior) + + for i, p in enumerate(posterior._prior._priors): + assert p == posterior._log_likelihood.problem.parameters.priors()[i] + + @pytest.fixture + def posterior(self, likelihood, prior): + return pybop.LogPosterior(likelihood, prior) + + @pytest.mark.unit + def test_log_posterior(self, posterior): + # Test log posterior + x = np.array([0.50]) + assert np.allclose(posterior(x), 51.5236, atol=2e-2) + + # Test log posterior evaluateS1 + p, dp = posterior(x, calculate_grad=True) + assert np.allclose(p, 51.5236, atol=2e-2) + assert np.allclose(dp, 2.0, atol=2e-2) + + # Get log likelihood and log prior + likelihood = posterior.likelihood() + prior = posterior.prior() + + assert likelihood == posterior._log_likelihood + assert prior == posterior._prior + + @pytest.fixture + def posterior_uniform_prior(self, likelihood): + return pybop.LogPosterior(likelihood, pybop.Uniform(0.45, 0.55)) + + @pytest.mark.unit + def test_log_posterior_inf(self, posterior_uniform_prior): + # Test prior np.inf + assert not np.isfinite(posterior_uniform_prior([1])) + assert not np.isfinite(posterior_uniform_prior([1], calculate_grad=True)[0]) diff --git a/tests/unit/test_priors.py b/tests/unit/test_priors.py index b773049f8..3e9184374 100644 --- a/tests/unit/test_priors.py +++ b/tests/unit/test_priors.py @@ -21,13 +21,23 @@ def Uniform(self): def Exponential(self): return pybop.Exponential(scale=1) + @pytest.fixture + def JointPrior1(self, Gaussian, Uniform): + return pybop.JointLogPrior(Gaussian, Uniform) + + @pytest.fixture + def JointPrior2(self, Gaussian, Exponential): + return pybop.JointLogPrior(Gaussian, Exponential) + @pytest.mark.unit def test_base_prior(self): base = pybop.BasePrior() assert isinstance(base, pybop.BasePrior) + with pytest.raises(NotImplementedError): + base._logpdfS1(0.0) @pytest.mark.unit - def test_priors(self, Gaussian, Uniform, Exponential): + def test_priors(self, Gaussian, Uniform, Exponential, JointPrior1, JointPrior2): # Test pdf np.testing.assert_allclose(Gaussian.pdf(0.5), 0.3989422804014327, atol=1e-4) np.testing.assert_allclose(Uniform.pdf(0.5), 1, atol=1e-4) @@ -38,6 +48,69 @@ def test_priors(self, Gaussian, Uniform, Exponential): np.testing.assert_allclose(Uniform.logpdf(0.5), 0, atol=1e-4) np.testing.assert_allclose(Exponential.logpdf(1), -1, atol=1e-4) + # Test icdf + np.testing.assert_allclose(Gaussian.icdf(0.5), 0.5, atol=1e-4) + np.testing.assert_allclose(Uniform.icdf(0.5), 0.5, atol=1e-4) + np.testing.assert_allclose(Exponential.icdf(0.5), 0.6931471805599453, atol=1e-4) + + # Test cdf + np.testing.assert_allclose(Gaussian.cdf(0.5), 0.5, atol=1e-4) + np.testing.assert_allclose(Uniform.cdf(0.5), 0.5, atol=1e-4) + np.testing.assert_allclose(Exponential.cdf(1), 0.6321205588285577, atol=1e-4) + + # Test __call__ + assert Gaussian(0.5) == Gaussian.logpdf(0.5) + assert Uniform(0.5) == Uniform.logpdf(0.5) + assert Exponential(1) == Exponential.logpdf(1) + assert JointPrior1([0.5, 0.5]) == Gaussian.logpdf(0.5) + Uniform.logpdf(0.5) + assert JointPrior2([0.5, 1]) == Gaussian.logpdf(0.5) + Exponential.logpdf(1) + + # Test Gaussian.logpdfS1 + p, dp = Gaussian.logpdfS1(0.5) + assert p == Gaussian.logpdf(0.5) + assert dp == 0.0 + + # Test Uniform.logpdfS1 + p, dp = Uniform.logpdfS1(0.5) + assert p == Uniform.logpdf(0.5) + assert dp == 0.0 + + # Test Exponential.logpdfS1 + p, dp = Exponential.logpdfS1(1) + assert p == Exponential.logpdf(1) + assert dp == Exponential.logpdf(1) + + # Test JointPrior1.logpdfS1 + p, dp = JointPrior1.logpdfS1([0.5, 0.5]) + assert p == Gaussian.logpdf(0.5) + Uniform.logpdf(0.5) + np.testing.assert_allclose(dp, np.array([0.0, 0.0]), atol=1e-4) + + # Test JointPrior.logpdfS1 + p, dp = JointPrior2.logpdfS1([0.5, 1]) + assert p == Gaussian.logpdf(0.5) + Exponential.logpdf(1) + np.testing.assert_allclose( + dp, np.array([0.0, Exponential.logpdf(1)]), atol=1e-4 + ) + + # Test JointPrior1 non-symmetric + with pytest.raises(AssertionError): + np.testing.assert_allclose( + JointPrior1([0.4, 0.5]), JointPrior1([0.5, 0.4]), atol=1e-4 + ) + + # Test JointPrior2 non-symmetric + with pytest.raises(AssertionError): + np.testing.assert_allclose( + JointPrior2([0.4, 1]), JointPrior2([1, 0.4]), atol=1e-4 + ) + + # Test JointPrior with incorrect dimensions + with pytest.raises(ValueError, match="Input x must have length 2, got 1"): + JointPrior1([0.4]) + + with pytest.raises(ValueError, match="Input x must have length 2, got 1"): + JointPrior1.logpdfS1([0.4]) + # Test properties assert Uniform.mean == (Uniform.upper - Uniform.lower) / 2 np.testing.assert_allclose( @@ -74,10 +147,14 @@ def test_exponential_rvs(self, Exponential): assert abs(mean - 1) < 0.2 @pytest.mark.unit - def test_repr(self, Gaussian, Uniform, Exponential): + def test_repr(self, Gaussian, Uniform, Exponential, JointPrior1): assert repr(Gaussian) == "Gaussian, loc: 0.5, scale: 1" assert repr(Uniform) == "Uniform, loc: 0, scale: 1" assert repr(Exponential) == "Exponential, loc: 0, scale: 1" + assert ( + repr(JointPrior1) + == "JointLogPrior(priors: [Gaussian, loc: 0.5, scale: 1, Uniform, loc: 0, scale: 1])" + ) @pytest.mark.unit def test_invalid_size(self, Gaussian, Uniform, Exponential): @@ -87,3 +164,14 @@ def test_invalid_size(self, Gaussian, Uniform, Exponential): Uniform.rvs(-1) with pytest.raises(ValueError): Exponential.rvs(-1) + + @pytest.mark.unit + def test_incorrect_composed_priors(self, Gaussian, Uniform): + with pytest.raises( + ValueError, match="All priors must be instances of BasePrior" + ): + pybop.JointLogPrior(Gaussian, Uniform, "string") + with pytest.raises( + ValueError, match="All priors must be instances of BasePrior" + ): + pybop.JointLogPrior(Gaussian, Uniform, 0.5) diff --git a/tests/unit/test_sampling.py b/tests/unit/test_sampling.py new file mode 100644 index 000000000..e854d24ab --- /dev/null +++ b/tests/unit/test_sampling.py @@ -0,0 +1,388 @@ +import copy +import logging +from unittest.mock import call, patch + +import numpy as np +import pytest +from pints import ParallelEvaluator + +import pybop +from pybop import ( + DREAM, + MALAMCMC, + NUTS, + AdaptiveCovarianceMCMC, + DifferentialEvolutionMCMC, + DramACMC, + EmceeHammerMCMC, + HaarioACMC, + HaarioBardenetACMC, + HamiltonianMCMC, + MetropolisRandomWalkMCMC, + MonomialGammaHamiltonianMCMC, + PopulationMCMC, + RaoBlackwellACMC, + RelativisticMCMC, + SliceDoublingMCMC, + SliceRankShrinkingMCMC, + SliceStepoutMCMC, +) + + +class TestPintsSamplers: + """ + Class for testing the Pints-based MCMC Samplers + """ + + @pytest.fixture + def dataset(self): + return pybop.Dataset( + { + "Time [s]": np.linspace(0, 360, 10), + "Current function [A]": np.zeros(10), + "Voltage [V]": np.ones(10), + } + ) + + @pytest.fixture + def parameters(self): + return pybop.Parameters( + pybop.Parameter( + "Negative electrode active material volume fraction", + prior=pybop.Gaussian(0.6, 0.2), + bounds=[0.58, 0.62], + ), + pybop.Parameter( + "Positive electrode active material volume fraction", + prior=pybop.Gaussian(0.55, 0.05), + bounds=[0.53, 0.57], + ), + ) + + @pytest.fixture + def model(self): + return pybop.lithium_ion.SPM() + + @pytest.fixture + def log_posterior(self, model, parameters, dataset): + problem = pybop.FittingProblem( + model, + parameters, + dataset, + ) + likelihood = pybop.GaussianLogLikelihoodKnownSigma(problem, sigma0=0.01) + prior1 = pybop.Gaussian(0.7, 0.02) + prior2 = pybop.Gaussian(0.6, 0.02) + composed_prior = pybop.JointLogPrior(prior1, prior2) + log_posterior = pybop.LogPosterior(likelihood, composed_prior) + + return log_posterior + + @pytest.fixture + def x0(self): + return [0.68, 0.58] + + @pytest.fixture + def chains(self): + return 3 + + @pytest.fixture + def multi_samplers(self): + return (pybop.DREAM, pybop.EmceeHammerMCMC, pybop.DifferentialEvolutionMCMC) + + @pytest.fixture( + params=[ + NUTS, + DREAM, + AdaptiveCovarianceMCMC, + DifferentialEvolutionMCMC, + DramACMC, + EmceeHammerMCMC, + HaarioACMC, + HaarioBardenetACMC, + HamiltonianMCMC, + MALAMCMC, + MetropolisRandomWalkMCMC, + MonomialGammaHamiltonianMCMC, + PopulationMCMC, + RaoBlackwellACMC, + RelativisticMCMC, + SliceDoublingMCMC, + SliceRankShrinkingMCMC, + SliceStepoutMCMC, + ] + ) + def MCMC(self, request): + return request.param + + @pytest.mark.unit + def test_initialisation_and_run( + self, log_posterior, x0, chains, MCMC, multi_samplers + ): + sampler = pybop.MCMCSampler( + log_pdf=log_posterior, + chains=chains, + sampler=MCMC, + x0=x0, + max_iterations=1, + verbose=True, + ) + assert sampler._n_chains == chains + assert sampler._log_pdf == log_posterior + if isinstance(sampler.sampler, multi_samplers): + np.testing.assert_allclose(sampler._samplers[0]._x0[0], x0) + else: + np.testing.assert_allclose(sampler._samplers[0]._x0, x0) + + # Test incorrect __getattr__ + with pytest.raises( + AttributeError, match="'MCMCSampler' object has no attribute 'test'" + ): + sampler.__getattr__("test") + + # Test __setattr__ + sampler.some_attribute = 1 + assert sampler.some_attribute == 1 + sampler.verbose = True + assert sampler.verbose is True + + # Run the sampler + samples = sampler.run() + assert samples is not None + assert samples.shape == (chains, 1, 2) + + @pytest.mark.unit + def test_single_parameter_sampling(self, model, dataset, MCMC, chains): + parameters = pybop.Parameters( + pybop.Parameter( + "Negative electrode active material volume fraction", + prior=pybop.Gaussian(0.6, 0.2), + bounds=[0.58, 0.62], + ) + ) + problem = pybop.FittingProblem( + model, + parameters, + dataset, + ) + likelihood = pybop.GaussianLogLikelihoodKnownSigma(problem, sigma0=0.01) + log_posterior = pybop.LogPosterior(likelihood) + + # Skip RelativisticMCMC as it requires > 1 parameter + if issubclass(MCMC, RelativisticMCMC): + return + + # Construct and run + sampler = pybop.MCMCSampler( + log_pdf=log_posterior, + chains=chains, + sampler=MCMC, + max_iterations=1, + verbose=True, + ) + sampler.run() + + @pytest.mark.unit + def test_multi_log_pdf(self, log_posterior, x0, chains): + multi_log_posterior = [log_posterior, log_posterior, log_posterior] + sampler = pybop.MCMCSampler( + log_pdf=multi_log_posterior, + chains=chains, + sampler=HamiltonianMCMC, + x0=x0, + max_iterations=1, + ) + assert sampler._n_chains == chains + assert sampler._log_pdf == multi_log_posterior + + # Run the sampler + samples = sampler.run() + assert samples is not None + assert samples.shape == (chains, 1, 2) + + # Test incorrect multi log pdf + incorrect_multi_log_posterior = [log_posterior, log_posterior, chains] + with pytest.raises( + ValueError, match="All log pdf's must be instances of BaseCost" + ): + sampler = pybop.MCMCSampler( + log_pdf=incorrect_multi_log_posterior, + chains=chains, + sampler=HaarioBardenetACMC, + x0=x0, + max_iterations=1, + ) + + # Test incorrect number of parameters + new_multi_log_posterior = copy.copy(log_posterior) + new_multi_log_posterior.parameters = [ + new_multi_log_posterior.parameters[ + "Positive electrode active material volume fraction" + ] + ] + with pytest.raises( + ValueError, match="All log pdf's must have the same number of parameters" + ): + sampler = pybop.MCMCSampler( + log_pdf=[log_posterior, log_posterior, new_multi_log_posterior], + chains=chains, + sampler=HaarioBardenetACMC, + x0=x0, + max_iterations=1, + ) + + @pytest.mark.unit + def test_invalid_initialisation(self, log_posterior, x0): + with pytest.raises(ValueError, match="Number of chains must be greater than 0"): + AdaptiveCovarianceMCMC( + log_pdf=log_posterior, + chains=0, + x0=x0, + ) + + with pytest.raises( + ValueError, match="Number of log pdf's must match number of chains" + ): + AdaptiveCovarianceMCMC( + log_pdf=[log_posterior, log_posterior, log_posterior], + chains=2, + x0=x0, + ) + + with pytest.raises( + ValueError, match="x0 must have the same number of parameters as log_pdf" + ): + AdaptiveCovarianceMCMC( + log_pdf=[log_posterior, log_posterior, log_posterior], + chains=3, + x0=[0.4, 0.4, 0.4, 0.4], + ) + + # SingleChain & MultiChain Sampler + @pytest.mark.parametrize( + "sampler", + [ + AdaptiveCovarianceMCMC, + DifferentialEvolutionMCMC, + ], + ) + @pytest.mark.unit + def test_no_chains_in_memory(self, log_posterior, x0, chains, sampler): + sampler = sampler( + log_pdf=log_posterior, + chains=chains, + x0=x0, + max_iterations=1, + chains_in_memory=False, + ) + assert sampler._chains_in_memory is False + + # Run the sampler + samples = sampler.run() + assert sampler._samples is not None + assert samples is None + + @patch("logging.basicConfig") + @patch("logging.info") + @pytest.mark.unit + def test_initialise_logging( + self, mock_info, mock_basicConfig, log_posterior, x0, chains + ): + sampler = AdaptiveCovarianceMCMC( + log_pdf=log_posterior, + chains=chains, + x0=x0, + parallel=True, + evaluation_files=["eval1.txt", "eval2.txt"], + chain_files=["chain1.txt", "chain2.txt"], + ) + + # Set parallel workers + sampler.set_parallel(parallel=2) + sampler._initialise_logging() + + # Check if basicConfig was called with correct arguments + mock_basicConfig.assert_called_once_with( + format="%(message)s", level=logging.INFO + ) + + # Check if correct messages were called + expected_calls = [ + call("Using Haario-Bardenet adaptive covariance MCMC"), + call("Generating 3 chains."), + call("Running in parallel with 2 worker processes."), + call("Writing chains to chain1.txt etc."), + call("Writing evaluations to eval1.txt etc."), + ] + mock_info.assert_has_calls(expected_calls, any_order=False) + + # Test when _log_to_screen is False + sampler._log_to_screen = False + sampler._initialise_logging() + assert mock_info.call_count == len(expected_calls) # No additional calls + + @pytest.mark.unit + def test_check_stopping_criteria(self, log_posterior, x0, chains): + sampler = AdaptiveCovarianceMCMC( + log_pdf=log_posterior, + chains=chains, + x0=x0, + ) + # Set stopping criteria + sampler.set_max_iterations(10) + assert sampler._max_iterations == 10 + + # Remove stopping criteria + sampler._max_iterations = None + with pytest.raises( + ValueError, match="At least one stopping criterion must be set." + ): + sampler._check_stopping_criteria() + + # Incorrect stopping criteria + with pytest.raises( + ValueError, match="Number of iterations must be greater than 0" + ): + sampler.set_max_iterations(-1) + + @pytest.mark.unit + def test_set_parallel(self, log_posterior, x0, chains): + sampler = AdaptiveCovarianceMCMC( + log_pdf=log_posterior, + chains=chains, + x0=x0, + ) + + # Disable parallelism + sampler.set_parallel(False) + assert sampler._parallel is False + assert sampler._n_workers == 1 + + # Enable parallelism + sampler.set_parallel(True) + assert sampler._parallel is True + + # Enable parallelism with number of workers + sampler.set_parallel(2) + assert sampler._parallel is True + assert sampler._n_workers == 2 + + # Test evaluator construction + sampler.set_parallel(2) + evaluator = sampler._create_evaluator() + assert isinstance(evaluator, ParallelEvaluator) + + @pytest.mark.unit + def test_base_sampler(self, log_posterior, x0): + sampler = pybop.BaseSampler(log_posterior, x0, cov0=0.1) + with pytest.raises(NotImplementedError): + sampler.run() + + @pytest.mark.unit + def test_MCMC_sampler(self, log_posterior, x0, chains): + with pytest.raises(TypeError): + pybop.MCMCSampler( + log_pdf=log_posterior, + chains=chains, + sampler=log_posterior, # Incorrect sampler + )