diff --git a/sbi/analysis/tensorboard_output.py b/sbi/analysis/tensorboard_output.py
index fc32ecf1b..447158de2 100644
--- a/sbi/analysis/tensorboard_output.py
+++ b/sbi/analysis/tensorboard_output.py
@@ -61,7 +61,7 @@ def plot_summary(
logger = logging.getLogger(__name__)
if tags is None:
- tags = ["validation_log_probs"]
+ tags = ["validation_loss"]
size_guidance = deepcopy(DEFAULT_SIZE_GUIDANCE)
size_guidance.update(scalars=tensorboard_scalar_limit)
diff --git a/sbi/inference/__init__.py b/sbi/inference/__init__.py
index 4cf8210d1..f1275e392 100644
--- a/sbi/inference/__init__.py
+++ b/sbi/inference/__init__.py
@@ -6,6 +6,7 @@
simulate_for_sbi,
)
from sbi.inference.fmpe import FMPE
+from sbi.inference.npse.npse import NPSE
from sbi.inference.snle import MNLE, SNLE_A
from sbi.inference.snpe import SNPE_A, SNPE_B, SNPE_C # noqa: F401
from sbi.inference.snre import BNRE, SNRE, SNRE_A, SNRE_B, SNRE_C # noqa: F401
diff --git a/sbi/inference/base.py b/sbi/inference/base.py
index 2072420b8..b4ca20f99 100644
--- a/sbi/inference/base.py
+++ b/sbi/inference/base.py
@@ -176,7 +176,7 @@ def __init__(
self._data_round_index = []
self._round = 0
- self._val_log_prob = float("-Inf")
+ self._val_loss = float("Inf")
# XXX We could instantiate here the Posterior for all children. Two problems:
# 1. We must dispatch to right PotentialProvider for mcmc based on name
@@ -190,9 +190,9 @@ def __init__(
# Logging during training (by SummaryWriter).
self._summary = dict(
epochs_trained=[],
- best_validation_log_prob=[],
- validation_log_probs=[],
- training_log_probs=[],
+ best_validation_loss=[],
+ validation_loss=[],
+ training_loss=[],
epoch_durations_sec=[],
)
@@ -393,8 +393,8 @@ def _converged(self, epoch: int, stop_after_epochs: int) -> bool:
neural_net = self._neural_net
# (Re)-start the epoch count with the first epoch or any improvement.
- if epoch == 0 or self._val_log_prob > self._best_val_log_prob:
- self._best_val_log_prob = self._val_log_prob
+ if epoch == 0 or self._val_loss < self._best_val_loss:
+ self._best_val_loss = self._val_loss
self._epochs_since_last_improvement = 0
self._best_model_state_dict = deepcopy(neural_net.state_dict())
else:
@@ -419,14 +419,14 @@ def _default_summary_writer(self) -> SummaryWriter:
@staticmethod
def _describe_round(round_: int, summary: Dict[str, list]) -> str:
epochs = summary["epochs_trained"][-1]
- best_validation_log_prob = summary["best_validation_log_prob"][-1]
+ best_validation_loss = summary["best_validation_loss"][-1]
description = f"""
-------------------------
||||| ROUND {round_ + 1} STATS |||||:
-------------------------
Epochs trained: {epochs}
- Best validation performance: {best_validation_log_prob:.4f}
+ Best validation performance: {best_validation_loss:.4f}
-------------------------
"""
@@ -472,12 +472,12 @@ def _summarize(
Scalar tags:
- epochs_trained:
number of epochs trained
- - best_validation_log_prob:
- best validation log prob (for each round).
- - validation_log_probs:
- validation log probs for every epoch (for each round).
- - training_log_probs
- training log probs for every epoch (for each round).
+ - best_validation_loss:
+ best validation loss (for each round).
+ - validation_loss:
+ validation loss for every epoch (for each round).
+ - training_loss
+ training loss for every epoch (for each round).
- epoch_durations_sec
epoch duration for every epoch (for each round)
@@ -491,28 +491,28 @@ def _summarize(
)
self._summary_writer.add_scalar(
- tag="best_validation_log_prob",
- scalar_value=self._summary["best_validation_log_prob"][-1],
+ tag="best_validation_loss",
+ scalar_value=self._summary["best_validation_loss"][-1],
global_step=round_ + 1,
)
- # Add validation log prob for every epoch.
+ # Add validation loss for every epoch.
# Offset with all previous epochs.
offset = (
torch.tensor(self._summary["epochs_trained"][:-1], dtype=torch.int)
.sum()
.item()
)
- for i, vlp in enumerate(self._summary["validation_log_probs"][offset:]):
+ for i, vlp in enumerate(self._summary["validation_loss"][offset:]):
self._summary_writer.add_scalar(
- tag="validation_log_probs",
+ tag="validation_loss",
scalar_value=vlp,
global_step=offset + i,
)
- for i, tlp in enumerate(self._summary["training_log_probs"][offset:]):
+ for i, tlp in enumerate(self._summary["training_loss"][offset:]):
self._summary_writer.add_scalar(
- tag="training_log_probs",
+ tag="training_loss",
scalar_value=tlp,
global_step=offset + i,
)
diff --git a/sbi/inference/fmpe/fmpe_base.py b/sbi/inference/fmpe/fmpe_base.py
index d17471d53..043a064ae 100644
--- a/sbi/inference/fmpe/fmpe_base.py
+++ b/sbi/inference/fmpe/fmpe_base.py
@@ -241,8 +241,7 @@ def train(
self.epoch += 1
train_loss_average = train_loss_sum / len(train_loader) # type: ignore
- # TODO: rename to loss once renaming is done in base class.
- self._summary["training_log_probs"].append(-train_loss_average)
+ self._summary["training_loss"].append(train_loss_average)
# Calculate validation performance.
self._neural_net.eval()
@@ -262,11 +261,8 @@ def train(
self._val_loss = val_loss_sum / (
len(val_loader) * val_loader.batch_size # type: ignore
)
- # TODO: remove this once renaming to loss in base class is done.
- self._val_log_prob = -self._val_loss
- # Log validation log prob for every epoch.
- # TODO: rename to loss and fix sign once renaming in base is done.
- self._summary["validation_log_probs"].append(-self._val_loss)
+ # Log validation loss for every epoch.
+ self._summary["validation_loss"].append(self._val_loss)
self._summary["epoch_durations_sec"].append(time.time() - epoch_start_time)
self._maybe_show_progress(self._show_progress_bars, self.epoch)
@@ -275,8 +271,7 @@ def train(
# Update summary.
self._summary["epochs_trained"].append(self.epoch)
- # TODO: rename to loss once renaming is done in base class.
- self._summary["best_validation_log_prob"].append(self._best_val_log_prob)
+ self._summary["best_validation_loss"].append(self._best_val_loss)
# Update tensorboard and summary dict.
self._summarize(round_=self._round)
diff --git a/sbi/inference/npse/__init__.py b/sbi/inference/npse/__init__.py
new file mode 100644
index 000000000..f861c3450
--- /dev/null
+++ b/sbi/inference/npse/__init__.py
@@ -0,0 +1 @@
+from sbi.inference.npse.npse import NPSE
diff --git a/sbi/inference/npse/npse.py b/sbi/inference/npse/npse.py
new file mode 100644
index 000000000..fbbe4fcd9
--- /dev/null
+++ b/sbi/inference/npse/npse.py
@@ -0,0 +1,550 @@
+# This file is part of sbi, a toolkit for simulation-based inference. sbi is licensed
+# under the Apache License Version 2.0, see
+import time
+from copy import deepcopy
+from typing import Any, Callable, Optional, Union
+
+import torch
+from torch import Tensor, ones
+from torch.distributions import Distribution
+from torch.nn.utils.clip_grad import clip_grad_norm_
+from torch.optim.adam import Adam
+from torch.utils.tensorboard.writer import SummaryWriter
+
+from sbi import utils as utils
+from sbi.inference import NeuralInference
+from sbi.inference.posteriors import (
+ DirectPosterior,
+)
+from sbi.inference.posteriors.score_posterior import ScorePosterior
+from sbi.neural_nets.estimators.score_estimator import ConditionalScoreEstimator
+from sbi.neural_nets.factory import posterior_score_nn
+from sbi.utils import (
+ check_estimator_arg,
+ handle_invalid_x,
+ npe_msg_on_invalid_x,
+ test_posterior_net_for_multi_d_x,
+ validate_theta_and_x,
+ warn_if_zscoring_changes_data,
+ x_shape_from_simulation,
+)
+from sbi.utils.sbiutils import ImproperEmpirical, mask_sims_from_prior
+
+
+class NPSE(NeuralInference):
+ def __init__(
+ self,
+ prior: Optional[Distribution] = None,
+ score_estimator: Union[str, Callable] = "mlp",
+ sde_type: str = "ve",
+ device: str = "cpu",
+ logging_level: Union[int, str] = "WARNING",
+ summary_writer: Optional[SummaryWriter] = None,
+ show_progress_bars: bool = True,
+ **kwargs,
+ ):
+ """Base class for Neural Posterior Score Estimation methods.
+
+ Instead of performing conditonal *density* estimation, NPSE methods perform
+ conditional *score* estimation i.e. they estimate the gradient of the log
+ density using denoising score matching loss.
+
+ NOTE: NPSE does not support multi-round inference with flexible proposals yet.
+ You can try to run multi-round with truncated proposals, but note that this is
+ not tested yet.
+
+ Args:
+ prior: Prior distribution.
+ score_estimator: Neural network architecture for the score estimator. Can be
+ a string (e.g. 'mlp' or 'ada_mlp') or a callable that returns a neural
+ network.
+ sde_type: Type of SDE to use. Must be one of ['vp', 've', 'subvp'].
+ device: Device to run the training on.
+ logging_level: Logging level for the training. Can be an integer or a
+ string.
+ summary_writer: Tensorboard summary writer.
+ show_progress_bars: Whether to show progress bars during training.
+ kwargs: Additional keyword arguments.
+
+ References:
+ - Geffner, Tomas, George Papamakarios, and Andriy Mnih. "Score modeling for
+ simulation-based inference." ICML 2023.
+ - Sharrock, Louis, et al. "Sequential neural score estimation: Likelihood-
+ free inference with conditional score based diffusion models." ICML 2024.
+ """
+
+ super().__init__(
+ prior=prior,
+ device=device,
+ logging_level=logging_level,
+ summary_writer=summary_writer,
+ show_progress_bars=show_progress_bars,
+ )
+
+ # As detailed in the docstring, `score_estimator` is either a string or
+ # a callable. The function creating the neural network is attached to
+ # `_build_neural_net`. It will be called in the first round and receive
+ # thetas and xs as inputs, so that they can be used for shape inference and
+ # potentially for z-scoring.
+ check_estimator_arg(score_estimator)
+ if isinstance(score_estimator, str):
+ self._build_neural_net = posterior_score_nn(
+ sde_type=sde_type, score_net_type=score_estimator, **kwargs
+ )
+ else:
+ self._build_neural_net = score_estimator
+
+ self._proposal_roundwise = []
+
+ def append_simulations(
+ self,
+ theta: Tensor,
+ x: Tensor,
+ proposal: Optional[DirectPosterior] = None,
+ exclude_invalid_x: Optional[bool] = None,
+ data_device: Optional[str] = None,
+ ) -> "NPSE":
+ r"""Store parameters and simulation outputs to use them for later training.
+
+ Data are stored as entries in lists for each type of variable (parameter/data).
+
+ Stores $\theta$, $x$, prior_masks (indicating if simulations are coming from the
+ prior or not) and an index indicating which round the batch of simulations came
+ from.
+
+ Args:
+ theta: Parameter sets.
+ x: Simulation outputs.
+ proposal: The distribution that the parameters $\theta$ were sampled from.
+ Pass `None` if the parameters were sampled from the prior. If not
+ `None`, it will trigger a different loss-function.
+ exclude_invalid_x: Whether invalid simulations are discarded during
+ training. For single-round SNPE, it is fine to discard invalid
+ simulations, but for multi-round SNPE (atomic), discarding invalid
+ simulations gives systematically wrong results. If `None`, it will
+ be `True` in the first round and `False` in later rounds.
+ data_device: Where to store the data, default is on the same device where
+ the training is happening. If training a large dataset on a GPU with not
+ much VRAM can set to 'cpu' to store data on system memory instead.
+
+ Returns:
+ NeuralInference object (returned so that this function is chainable).
+ """
+ assert (
+ proposal is None
+ ), "Multi-round NPSE is not yet implemented. Please use single-round NPSE."
+ current_round = 0
+
+ if exclude_invalid_x is None:
+ exclude_invalid_x = current_round == 0
+
+ if data_device is None:
+ data_device = self._device
+
+ theta, x = validate_theta_and_x(
+ theta, x, data_device=data_device, training_device=self._device
+ )
+
+ is_valid_x, num_nans, num_infs = handle_invalid_x(
+ x, exclude_invalid_x=exclude_invalid_x
+ )
+
+ x = x[is_valid_x]
+ theta = theta[is_valid_x]
+
+ # Check for problematic z-scoring
+ warn_if_zscoring_changes_data(x)
+
+ npe_msg_on_invalid_x(num_nans, num_infs, exclude_invalid_x, "Single-round NPE")
+
+ self._data_round_index.append(current_round)
+ prior_masks = mask_sims_from_prior(int(current_round > 0), theta.size(0))
+
+ self._theta_roundwise.append(theta)
+ self._x_roundwise.append(x)
+ self._prior_masks.append(prior_masks)
+
+ self._proposal_roundwise.append(proposal)
+
+ if self._prior is None or isinstance(self._prior, ImproperEmpirical):
+ theta_prior = self.get_simulations()[0].to(self._device)
+ self._prior = ImproperEmpirical(
+ theta_prior, ones(theta_prior.shape[0], device=self._device)
+ )
+
+ return self
+
+ def train(
+ self,
+ training_batch_size: int = 200,
+ learning_rate: float = 5e-4,
+ validation_fraction: float = 0.1,
+ stop_after_epochs: int = 200,
+ max_num_epochs: int = 2**31 - 1,
+ clip_max_norm: Optional[float] = 5.0,
+ calibration_kernel: Optional[Callable] = None,
+ ema_loss_decay: float = 0.1,
+ resume_training: bool = False,
+ force_first_round_loss: bool = False,
+ discard_prior_samples: bool = False,
+ retrain_from_scratch: bool = False,
+ show_train_summary: bool = False,
+ dataloader_kwargs: Optional[dict] = None,
+ ) -> ConditionalScoreEstimator:
+ r"""Returns a score estimator that approximates the score
+ $\nabla_\theta \log p(\theta|x)$.
+
+ Args:
+ training_batch_size: Training batch size.
+ learning_rate: Learning rate for Adam optimizer.
+ validation_fraction: The fraction of data to use for validation.
+ stop_after_epochs: The number of epochs to wait for improvement on the
+ validation set before terminating training.
+ max_num_epochs: Maximum number of epochs to run. If reached, we stop
+ training even when the validation loss is still decreasing. Otherwise,
+ we train until validation loss increases (see also `stop_after_epochs`).
+ clip_max_norm: Value at which to clip the total gradient norm in order to
+ prevent exploding gradients. Use None for no clipping.
+ calibration_kernel: A function to calibrate the loss with respect
+ to the simulations `x` (optional). See Lueckmann, Gonçalves et al.,
+ NeurIPS 2017. If `None`, no calibration is used.
+ resume_training: Can be used in case training time is limited, e.g. on a
+ cluster. If `True`, the split between train and validation set, the
+ optimizer, the number of epochs, and the best validation log-prob will
+ be restored from the last time `.train()` was called.
+ force_first_round_loss: If `True`, train with maximum likelihood,
+ i.e., potentially ignoring the correction for using a proposal
+ distribution different from the prior.
+ discard_prior_samples: Whether to discard samples simulated in round 1, i.e.
+ from the prior. Training may be sped up by ignoring such less targeted
+ samples.
+ retrain_from_scratch: Whether to retrain the conditional density
+ estimator for the posterior from scratch each round.
+ show_train_summary: Whether to print the number of epochs and validation
+ loss after the training.
+ dataloader_kwargs: Additional or updated kwargs to be passed to the training
+ and validation dataloaders (like, e.g., a collate_fn)
+
+ Returns:
+ Score estimator that approximates the posterior score.
+ """
+ # Load data from most recent round.
+ self._round = max(self._data_round_index)
+
+ if self._round == 0 and self._neural_net is not None:
+ assert force_first_round_loss or resume_training, (
+ "You have already trained this neural network. After you had trained "
+ "the network, you again appended simulations with `append_simulations"
+ "(theta, x)`, but you did not provide a proposal. If the new "
+ "simulations are sampled from the prior, you can set "
+ "`.train(..., force_first_round_loss=True`). However, if the new "
+ "simulations were not sampled from the prior, you should pass the "
+ "proposal, i.e. `append_simulations(theta, x, proposal)`. If "
+ "your samples are not sampled from the prior and you do not pass a "
+ "proposal and you set `force_first_round_loss=True`, the result of "
+ "NPSE will not be the true posterior. Instead, it will be the proposal "
+ "posterior, which (usually) is more narrow than the true posterior."
+ )
+
+ # Calibration kernels proposed in Lueckmann, Gonçalves et al., 2017.
+ if calibration_kernel is None:
+
+ def default_calibration_kernel(x):
+ return ones([len(x)], device=self._device)
+
+ calibration_kernel = default_calibration_kernel
+
+ # Starting index for the training set (1 = discard round-0 samples).
+ start_idx = int(discard_prior_samples and self._round > 0)
+
+ # Set the proposal to the last proposal that was passed by the user. For
+ # atomic SNPE, it does not matter what the proposal is. For non-atomic
+ # SNPE, we only use the latest data that was passed, i.e. the one from the
+ # last proposal.
+ proposal = self._proposal_roundwise[-1]
+
+ train_loader, val_loader = self.get_dataloaders(
+ start_idx,
+ training_batch_size,
+ validation_fraction,
+ resume_training,
+ dataloader_kwargs=dataloader_kwargs,
+ )
+ # First round or if retraining from scratch:
+ # Call the `self._build_neural_net` with the rounds' thetas and xs as
+ # arguments, which will build the neural network.
+ if self._neural_net is None or retrain_from_scratch:
+ # Get theta,x to initialize NN
+ theta, x, _ = self.get_simulations(starting_round=start_idx)
+ # Use only training data for building the neural net (z-scoring transforms)
+
+ self._neural_net = self._build_neural_net(
+ theta[self.train_indices].to("cpu"),
+ x[self.train_indices].to("cpu"),
+ )
+ self._x_shape = x_shape_from_simulation(x.to("cpu"))
+
+ test_posterior_net_for_multi_d_x(
+ self._neural_net,
+ theta.to("cpu"),
+ x.to("cpu"),
+ )
+
+ del theta, x
+
+ # Move entire net to device for training.
+ self._neural_net.to(self._device)
+
+ if not resume_training:
+ self.optimizer = Adam(list(self._neural_net.parameters()), lr=learning_rate)
+
+ self.epoch, self._val_loss = 0, float("Inf")
+
+ while self.epoch <= max_num_epochs and not self._converged(
+ self.epoch, stop_after_epochs
+ ):
+ # Train for a single epoch.
+ self._neural_net.train()
+ train_loss_sum = 0
+ epoch_start_time = time.time()
+ for batch in train_loader:
+ self.optimizer.zero_grad()
+ # Get batches on current device.
+ theta_batch, x_batch, masks_batch = (
+ batch[0].to(self._device),
+ batch[1].to(self._device),
+ batch[2].to(self._device),
+ )
+
+ train_losses = self._loss(
+ theta_batch,
+ x_batch,
+ masks_batch,
+ proposal,
+ calibration_kernel,
+ force_first_round_loss=force_first_round_loss,
+ )
+
+ train_loss = torch.mean(train_losses)
+
+ train_loss_sum += train_losses.sum().item()
+
+ train_loss.backward()
+ if clip_max_norm is not None:
+ clip_grad_norm_(
+ self._neural_net.parameters(), max_norm=clip_max_norm
+ )
+ self.optimizer.step()
+
+ self.epoch += 1
+
+ train_loss_average = train_loss_sum / (
+ len(train_loader) * train_loader.batch_size # type: ignore
+ )
+
+ # NOTE: Due to the inherently noisy nature we do instead log a exponential
+ # moving average of the training loss.
+ if len(self._summary["training_loss"]) == 0:
+ self._summary["training_loss"].append(train_loss_average)
+ else:
+ previous_loss = self._summary["training_loss"][-1]
+ self._summary["training_loss"].append(
+ (1.0 - ema_loss_decay) * previous_loss
+ + ema_loss_decay * train_loss_average
+ )
+
+ # Calculate validation performance.
+ self._neural_net.eval()
+ val_loss_sum = 0
+
+ with torch.no_grad():
+ for batch in val_loader:
+ theta_batch, x_batch, masks_batch = (
+ batch[0].to(self._device),
+ batch[1].to(self._device),
+ batch[2].to(self._device),
+ )
+ # Take negative loss here to get validation log_prob.
+ val_losses = self._loss(
+ theta_batch,
+ x_batch,
+ masks_batch,
+ proposal,
+ calibration_kernel,
+ force_first_round_loss=force_first_round_loss,
+ )
+ val_loss_sum += val_losses.sum().item()
+
+ # Take mean over all validation samples.
+ val_loss = val_loss_sum / (
+ len(val_loader) * val_loader.batch_size # type: ignore
+ )
+
+ # NOTE: Due to the inherently noisy nature we do instead log a exponential
+ # moving average of the validation loss.
+ if len(self._summary["validation_loss"]) == 0:
+ val_loss_ema = val_loss
+ else:
+ previous_loss = self._summary["validation_loss"][-1]
+ val_loss_ema = (
+ 1 - ema_loss_decay
+ ) * previous_loss + ema_loss_decay * val_loss
+
+ self._val_loss = val_loss_ema
+ self._summary["validation_loss"].append(self._val_loss)
+ self._summary["epoch_durations_sec"].append(time.time() - epoch_start_time)
+
+ self._maybe_show_progress(self._show_progress_bars, self.epoch)
+
+ self._report_convergence_at_end(self.epoch, stop_after_epochs, max_num_epochs)
+
+ # Update summary.
+ self._summary["epochs_trained"].append(self.epoch)
+ self._summary["best_validation_loss"].append(self._val_loss)
+
+ # Update tensorboard and summary dict.
+ self._summarize(round_=self._round)
+
+ # Update description for progress bar.
+ if show_train_summary:
+ print(self._describe_round(self._round, self._summary))
+
+ # Avoid keeping the gradients in the resulting network, which can
+ # cause memory leakage when benchmarking.
+ self._neural_net.zero_grad(set_to_none=True)
+
+ return deepcopy(self._neural_net)
+
+ def build_posterior(
+ self,
+ score_estimator: Optional[ConditionalScoreEstimator] = None,
+ prior: Optional[Distribution] = None,
+ sample_with: str = "sde",
+ ) -> ScorePosterior:
+ r"""Build posterior from the score estimator.
+
+ For NPSE, the posterior distribution that is returned here implements the
+ following functionality over the raw neural density estimator:
+ - correct the calculation of the log probability such that it compensates for
+ the leakage.
+ - reject samples that lie outside of the prior bounds.
+
+ Args:
+ score_estimator: The score estimator that the posterior is based on.
+ If `None`, use the latest neural score estimator that was trained.
+ prior: Prior distribution.
+ sample_with: Method to use for sampling from the posterior. Can be one of
+ 'sde' (default) or 'ode'. The 'sde' method uses the score to
+ do a Langevin diffusion step, while the 'ode' method uses the score to
+ define a probabilistic ODE and solves it with a numerical ODE solver.
+
+ Returns:
+ Posterior $p(\theta|x)$ with `.sample()` and `.log_prob()` methods.
+ """
+ if prior is None:
+ assert self._prior is not None, (
+ "You did not pass a prior. You have to pass the prior either at "
+ "initialization `inference = NPSE(prior)` or to "
+ "`.build_posterior(prior=prior)`."
+ )
+ prior = self._prior
+ else:
+ utils.check_prior(prior)
+
+ if score_estimator is None:
+ score_estimator = self._neural_net
+ # If internal net is used device is defined.
+ device = self._device
+ # Otherwise, infer it from the device of the net parameters.
+ else:
+ # TODO: Add protocol for checking if the score estimator has forward and
+ # loss methods with the correct signature.
+ device = str(next(score_estimator.parameters()).device)
+
+ posterior = ScorePosterior(
+ score_estimator, # type: ignore
+ prior,
+ device=device,
+ sample_with=sample_with,
+ )
+
+ self._posterior = posterior
+ # Store models at end of each round.
+ self._model_bank.append(deepcopy(self._posterior))
+
+ return deepcopy(self._posterior)
+
+ def _loss_proposal_posterior(
+ self,
+ theta: Tensor,
+ x: Tensor,
+ masks: Tensor,
+ proposal: Optional[Any],
+ ) -> Tensor:
+ raise NotImplementedError("Multi-round NPSE is not yet implemented.")
+
+ def _loss(
+ self,
+ theta: Tensor,
+ x: Tensor,
+ masks: Tensor,
+ proposal: Optional[Any],
+ calibration_kernel: Callable,
+ force_first_round_loss: bool = False,
+ ) -> Tensor:
+ """Return loss from score estimator. Currently only single-round NPSE
+ is implemented, i.e., no proposal correction is applied for later rounds.
+
+ The loss is the negative log prob. Irrespective of the round or SNPE method
+ (A, B, or C), it can be weighted with a calibration kernel.
+
+ Returns:
+ Calibration kernel-weighted negative log prob.
+ force_first_round_loss: If `True`, train with maximum likelihood,
+ i.e., potentially ignoring the correction for using a proposal
+ distribution different from the prior.
+ """
+ if self._round == 0 or force_first_round_loss:
+ # First round loss.
+ loss = self._neural_net.loss(theta, x)
+ else:
+ raise NotImplementedError(
+ "Multi-round NPSE with arbitrary proposals is not implemented"
+ )
+
+ return calibration_kernel(x) * loss
+
+ def _converged(self, epoch: int, stop_after_epochs: int) -> bool:
+ """Check if training has converged.
+
+ Unlike the `._converged` method in base.py, this method does not reset to the
+ best model. We noticed that this improves performance. Deleting this method
+ will make C2ST tests fail. This is because the loss is very stochastic, so
+ resetting might reset to an underfitted model. Ideally, we would write a
+ custom `._converged()` method which checks whether the loss is still going
+ down **for all t**.
+
+ Args:
+ epoch: Current epoch.
+ stop_after_epochs: Number of epochs to wait for improvement on the
+ validation set before terminating training.
+
+ Returns:
+ Whether training has converged.
+ """
+ converged = False
+
+ # No checkpointing, just check if the validation loss has improved.
+
+ # (Re)-start the epoch count with the first epoch or any improvement.
+ if epoch == 0 or self._val_loss < self._best_val_loss:
+ self._best_val_loss = self._val_loss
+ self._epochs_since_last_improvement = 0
+ else:
+ self._epochs_since_last_improvement += 1
+
+ # If no validation improvement over many epochs, stop training.
+ if self._epochs_since_last_improvement > stop_after_epochs - 1:
+ converged = True
+
+ return converged
diff --git a/sbi/inference/posteriors/base_posterior.py b/sbi/inference/posteriors/base_posterior.py
index a4b9d49fa..3d810cc5f 100644
--- a/sbi/inference/posteriors/base_posterior.py
+++ b/sbi/inference/posteriors/base_posterior.py
@@ -2,7 +2,7 @@
# under the Apache License Version 2.0, see
import inspect
-from abc import ABC, abstractmethod
+from abc import abstractmethod
from typing import Any, Callable, Dict, Optional, Union
from warnings import warn
@@ -20,7 +20,7 @@
from sbi.utils.user_input_checks import process_x
-class NeuralPosterior(ABC):
+class NeuralPosterior:
r"""Posterior $p(\theta|x)$ with `log_prob()` and `sample()` methods.
All inference methods in sbi train a neural network which is then used to obtain
the posterior distribution. The `NeuralPosterior` class wraps the trained network
@@ -52,6 +52,7 @@ def __init__(
stacklevel=2,
)
+ # Wrap as `CallablePotentialWrapper` if `potential_fn` is a Callable.
if not isinstance(potential_fn, BasePotential):
kwargs_of_callable = list(inspect.signature(potential_fn).parameters.keys())
for key in ["theta", "x_o"]:
@@ -191,7 +192,6 @@ def _calculate_map(
show_progress_bars: bool = False,
) -> Tensor:
"""Calculates the maximum-a-posteriori estimate (MAP).
-
See `map()` method of child classes for docstring.
"""
@@ -215,7 +215,6 @@ def _calculate_map(
show_progress_bars=show_progress_bars,
)[0]
- @abstractmethod
def map(
self,
x: Optional[Tensor] = None,
@@ -228,11 +227,44 @@ def map(
show_progress_bars: bool = False,
force_update: bool = False,
) -> Tensor:
- """Returns stored maximum-a-posterior estimate (MAP), otherwise calculates it.
+ r"""Returns the maximum-a-posteriori estimate (MAP).
- See child classes for docstring.
- """
+ The MAP is obtained by running gradient
+ ascent from a given number of starting positions (samples from the posterior
+ with the highest log-probability). After the optimization is done, we select the
+ parameter set that has the highest log-probability after the optimization.
+
+ Warning: The default values used by this function are not well-tested. They
+ might require hand-tuning for the problem at hand.
+
+ For developers: if the prior is a `BoxUniform`, we carry out the optimization
+ in unbounded space and transform the result back into bounded space.
+ Args:
+ x: Deprecated - use `.set_default_x()` prior to `.map()`.
+ num_iter: Number of optimization steps that the algorithm takes
+ to find the MAP.
+ num_to_optimize: From the drawn `num_init_samples`, use the
+ `num_to_optimize` with highest log-probability as the initial points
+ for the optimization.
+ learning_rate: Learning rate of the optimizer.
+ init_method: How to select the starting parameters for the optimization. If
+ it is a string, it can be either [`posterior`, `prior`], which samples
+ the respective distribution `num_init_samples` times. If it is a
+ tensor, the tensor will be used as init locations.
+ num_init_samples: Draw this number of samples from the posterior and
+ evaluate the log-probability of all of them.
+ save_best_every: The best log-probability is computed, saved in the
+ `map`-attribute, and printed every `save_best_every`-th iteration.
+ Computing the best log-probability creates a significant overhead
+ for score-based estimators (thus, the default is `1000`.)
+ show_progress_bars: Whether to show a progressbar during sampling from
+ the posterior.
+ force_update: Whether to re-calculate the MAP when x is unchanged and
+ have a cached value.
+ Returns:
+ The MAP estimate.
+ """
if x is not None:
raise ValueError(
"Passing `x` directly to `.map()` has been deprecated."
@@ -266,10 +298,8 @@ def __repr__(self):
def __str__(self):
desc = (
- f"Posterior conditional density p(θ|x) of type {self.__class__.__name__}. "
- f"{self._purpose}"
+ f"Posterior p(θ|x) of type {self.__class__.__name__}. " f"{self._purpose}"
)
-
return desc
def __getstate__(self) -> Dict:
diff --git a/sbi/inference/posteriors/direct_posterior.py b/sbi/inference/posteriors/direct_posterior.py
index 46d56b77d..76b9fdf48 100644
--- a/sbi/inference/posteriors/direct_posterior.py
+++ b/sbi/inference/posteriors/direct_posterior.py
@@ -11,8 +11,8 @@
from sbi.inference.potentials.posterior_based_potential import (
posterior_estimator_based_potential,
)
-from sbi.neural_nets.density_estimators.base import ConditionalDensityEstimator
-from sbi.neural_nets.density_estimators.shape_handling import (
+from sbi.neural_nets.estimators.base import ConditionalDensityEstimator
+from sbi.neural_nets.estimators.shape_handling import (
reshape_to_batch_event,
reshape_to_sample_batch_event,
)
diff --git a/sbi/inference/posteriors/mcmc_posterior.py b/sbi/inference/posteriors/mcmc_posterior.py
index e150819bb..65f59b95c 100644
--- a/sbi/inference/posteriors/mcmc_posterior.py
+++ b/sbi/inference/posteriors/mcmc_posterior.py
@@ -21,7 +21,7 @@
from sbi.inference.posteriors.base_posterior import NeuralPosterior
from sbi.inference.potentials.base_potential import BasePotential
-from sbi.neural_nets.density_estimators.shape_handling import reshape_to_batch_event
+from sbi.neural_nets.estimators.shape_handling import reshape_to_batch_event
from sbi.samplers.mcmc import (
IterateParameters,
PyMCSampler,
diff --git a/sbi/inference/posteriors/score_posterior.py b/sbi/inference/posteriors/score_posterior.py
new file mode 100644
index 000000000..d689f989f
--- /dev/null
+++ b/sbi/inference/posteriors/score_posterior.py
@@ -0,0 +1,367 @@
+# This file is part of sbi, a toolkit for simulation-based inference. sbi is licensed
+# under the Apache License Version 2.0, see
+
+from typing import Dict, Optional, Union
+
+import torch
+from torch import Tensor
+from torch.distributions import Distribution
+
+from sbi.inference.posteriors.base_posterior import NeuralPosterior
+from sbi.inference.potentials.score_based_potential import (
+ PosteriorScoreBasedPotential,
+ score_estimator_based_potential,
+)
+from sbi.neural_nets.estimators.score_estimator import ConditionalScoreEstimator
+from sbi.neural_nets.estimators.shape_handling import (
+ reshape_to_batch_event,
+)
+from sbi.samplers.score.correctors import Corrector
+from sbi.samplers.score.predictors import Predictor
+from sbi.samplers.score.score import Diffuser
+from sbi.sbi_types import Shape
+from sbi.utils import check_prior
+from sbi.utils.torchutils import ensure_theta_batched
+
+
+class ScorePosterior(NeuralPosterior):
+ r"""Posterior $p(\theta|x_o)$ with `log_prob()` and `sample()` methods. It samples
+ from the diffusion model given the score_estimator and rejects samples that lie
+ outside of the prior bounds.
+
+ The posterior is defined by a score estimator and a prior. The score estimator
+ provides the gradient of the log-posterior with respect to the parameters. The prior
+ is used to reject samples that lie outside of the prior bounds.
+
+ Sampling is done by running a diffusion process with a predictor and optionally a
+ corrector.
+
+ Log probabilities are obtained by calling the potential function, which in turn uses
+ zuko probabilistic ODEs to compute the log-probability.
+ """
+
+ def __init__(
+ self,
+ score_estimator: ConditionalScoreEstimator,
+ prior: Distribution,
+ max_sampling_batch_size: int = 10_000,
+ device: Optional[str] = None,
+ enable_transform: bool = False,
+ sample_with: str = "sde",
+ ):
+ """
+ Args:
+ prior: Prior distribution with `.log_prob()` and `.sample()`.
+ score_estimator: The trained neural score estimator.
+ max_sampling_batch_size: Batchsize of samples being drawn from
+ the proposal at every iteration.
+ device: Training device, e.g., "cpu", "cuda" or "cuda:0". If None,
+ `potential_fn.device` is used.
+ enable_transform: Whether to transform parameters to unconstrained space
+ during MAP optimization. When False, an identity transform will be
+ returned for `theta_transform`. True is not supported yet.
+ sample_with: Whether to sample from the posterior using the ODE-based
+ sampler or the SDE-based sampler.
+ """
+
+ check_prior(prior)
+ potential_fn, theta_transform = score_estimator_based_potential(
+ score_estimator,
+ prior,
+ x_o=None,
+ enable_transform=enable_transform,
+ )
+ super().__init__(
+ potential_fn=potential_fn,
+ theta_transform=theta_transform,
+ device=device,
+ )
+ # Set the potential function type.
+ self.potential_fn: PosteriorScoreBasedPotential = potential_fn
+
+ self.prior = prior
+ self.score_estimator = score_estimator
+
+ self.sample_with = sample_with
+ assert self.sample_with in [
+ "ode",
+ "sde",
+ ], f"sample_with must be 'ode' or 'sde', but is {self.sample_with}."
+ self.max_sampling_batch_size = max_sampling_batch_size
+
+ self._purpose = """It samples from the diffusion model given the \
+ score_estimator."""
+
+ def sample(
+ self,
+ sample_shape: Shape = torch.Size(),
+ x: Optional[Tensor] = None,
+ predictor: Union[str, Predictor] = "euler_maruyama",
+ corrector: Optional[Union[str, Corrector]] = None,
+ predictor_params: Optional[Dict] = None,
+ corrector_params: Optional[Dict] = None,
+ steps: int = 500,
+ ts: Optional[Tensor] = None,
+ max_sampling_batch_size: int = 10_000,
+ sample_with: Optional[str] = None,
+ show_progress_bars: bool = True,
+ ) -> Tensor:
+ r"""Return samples from posterior distribution $p(\theta|x)$.
+
+ Args:
+ sample_shape: Shape of the samples to be drawn.
+ x: Deprecated - use `.set_default_x()` prior to `.sample()`.
+ predictor: The predictor for the diffusion-based sampler. Can be a string or
+ a custom predictor following the API in `sbi.samplers.score.predictors`.
+ Currently, only `euler_maruyama` is implemented.
+ corrector: The corrector for the diffusion-based sampler. Either of
+ [None].
+ predictor_params: Additional parameters passed to predictor.
+ corrector_params: Additional parameters passed to corrector.
+ steps: Number of steps to take for the Euler-Maruyama method.
+ ts: Time points at which to evaluate the diffusion process. If None, a
+ linear grid between t_max and t_min is used.
+ max_sampling_batch_size: Maximum batch size for sampling.
+ sample_with: Deprecated - use `.build_posterior(sample_with=...)` prior to
+ `.sample()`.
+ show_progress_bars: Whether to show a progress bar during sampling.
+ """
+
+ if sample_with is not None:
+ raise ValueError(
+ f"You set `sample_with={sample_with}`. As of sbi v0.18.0, setting "
+ f"`sample_with` is no longer supported. You have to rerun "
+ f"`.build_posterior(sample_with={sample_with}).`"
+ )
+
+ x = self._x_else_default_x(x)
+ x = reshape_to_batch_event(x, self.score_estimator.condition_shape)
+ self.potential_fn.set_x(x)
+
+ if self.sample_with == "ode":
+ samples = self.sample_via_zuko(sample_shape=sample_shape, x=x)
+ elif self.sample_with == "sde":
+ samples = self._sample_via_diffusion(
+ sample_shape=sample_shape,
+ predictor=predictor,
+ corrector=corrector,
+ predictor_params=predictor_params,
+ corrector_params=corrector_params,
+ steps=steps,
+ ts=ts,
+ max_sampling_batch_size=max_sampling_batch_size,
+ show_progress_bars=show_progress_bars,
+ )
+
+ return samples
+
+ def _sample_via_diffusion(
+ self,
+ sample_shape: Shape = torch.Size(),
+ predictor: Union[str, Predictor] = "euler_maruyama",
+ corrector: Optional[Union[str, Corrector]] = None,
+ predictor_params: Optional[Dict] = None,
+ corrector_params: Optional[Dict] = None,
+ steps: int = 500,
+ ts: Optional[Tensor] = None,
+ max_sampling_batch_size: int = 10_000,
+ show_progress_bars: bool = True,
+ ) -> Tensor:
+ r"""Return samples from posterior distribution $p(\theta|x)$.
+
+ Args:
+ sample_shape: Shape of the samples to be drawn.
+ x: Deprecated - use `.set_default_x()` prior to `.sample()`.
+ predictor: The predictor for the diffusion-based sampler. Can be a string or
+ a custom predictor following the API in `sbi.samplers.score.predictors`.
+ Currently, only `euler_maruyama` is implemented.
+ corrector: The corrector for the diffusion-based sampler. Either of
+ [None].
+ steps: Number of steps to take for the Euler-Maruyama method.
+ ts: Time points at which to evaluate the diffusion process. If None, a
+ linear grid between t_max and t_min is used.
+ max_sampling_batch_size: Maximum batch size for sampling.
+ sample_with: Deprecated - use `.build_posterior(sample_with=...)` prior to
+ `.sample()`.
+ show_progress_bars: Whether to show a progress bar during sampling.
+ """
+
+ num_samples = torch.Size(sample_shape).numel()
+
+ max_sampling_batch_size = (
+ self.max_sampling_batch_size
+ if max_sampling_batch_size is None
+ else max_sampling_batch_size
+ )
+
+ if ts is None:
+ t_max = self.score_estimator.t_max
+ t_min = self.score_estimator.t_min
+ ts = torch.linspace(t_max, t_min, steps)
+
+ diffuser = Diffuser(
+ self.potential_fn,
+ predictor=predictor,
+ corrector=corrector,
+ predictor_params=predictor_params,
+ corrector_params=corrector_params,
+ )
+ max_sampling_batch_size = min(max_sampling_batch_size, num_samples)
+ samples = []
+ num_iter = num_samples // max_sampling_batch_size
+ num_iter = (
+ num_iter + 1 if (num_samples % max_sampling_batch_size) != 0 else num_iter
+ )
+ for _ in range(num_iter):
+ samples.append(
+ diffuser.run(
+ num_samples=max_sampling_batch_size,
+ ts=ts,
+ show_progress_bars=show_progress_bars,
+ )
+ )
+ samples = torch.cat(samples, dim=0)[:num_samples]
+
+ return samples.reshape(sample_shape + self.score_estimator.input_shape)
+
+ def sample_via_zuko(
+ self,
+ x: Tensor,
+ sample_shape: Shape = torch.Size(),
+ ) -> Tensor:
+ r"""Return samples from posterior distribution with probability flow ODE.
+
+ This build the probability flow ODE and then samples from the corresponding
+ flow. This is implemented via the zuko library.
+
+ Args:
+ x: Condition.
+ sample_shape: The shape of the samples to be returned.
+
+ Returns:
+ Samples.
+ """
+ num_samples = torch.Size(sample_shape).numel()
+
+ flow = self.potential_fn.get_continuous_normalizing_flow(condition=x)
+ samples = flow.sample(torch.Size((num_samples,)))
+
+ return samples.reshape(sample_shape + self.score_estimator.input_shape)
+
+ def log_prob(
+ self,
+ theta: Tensor,
+ x: Optional[Tensor] = None,
+ track_gradients: bool = False,
+ atol: float = 1e-5,
+ rtol: float = 1e-6,
+ exact: bool = True,
+ ) -> Tensor:
+ r"""Returns the log-probability of the posterior $p(\theta|x)$.
+
+ This requires building and evaluating the probability flow ODE.
+
+ Args:
+ theta: Parameters $\theta$.
+ x: Observed data $x_o$. If None, the default $x_o$ is used.
+ track_gradients: Whether the returned tensor supports tracking gradients.
+ This can be helpful for e.g. sensitivity analysis, but increases memory
+ consumption.
+ atol: Absolute tolerance for the ODE solver.
+ rtol: Relative tolerance for the ODE solver.
+ exact: Whether to use the exact Jacobian of the transformation or an
+ stochastic approximation, which is faster but less accurate.
+
+ Returns:
+ `(len(θ),)`-shaped log posterior probability $\log p(\theta|x)$ for θ in the
+ support of the prior, -∞ (corresponding to 0 probability) outside.
+ """
+ self.potential_fn.set_x(self._x_else_default_x(x))
+
+ theta = ensure_theta_batched(torch.as_tensor(theta))
+ return self.potential_fn(
+ theta.to(self._device),
+ track_gradients=track_gradients,
+ atol=atol,
+ rtol=rtol,
+ exact=exact,
+ )
+
+ def sample_batched(
+ self,
+ sample_shape: torch.Size,
+ x: Tensor,
+ max_sampling_batch_size: int = 10000,
+ show_progress_bars: bool = True,
+ ) -> Tensor:
+ raise NotImplementedError(
+ "Batched sampling is not implemented for ScorePosterior."
+ )
+
+ def map(
+ self,
+ x: Optional[Tensor] = None,
+ num_iter: int = 1000,
+ num_to_optimize: int = 1000,
+ learning_rate: float = 1e-5,
+ init_method: Union[str, Tensor] = "posterior",
+ num_init_samples: int = 1000,
+ save_best_every: int = 1000,
+ show_progress_bars: bool = False,
+ force_update: bool = False,
+ ) -> Tensor:
+ r"""Returns the maximum-a-posteriori estimate (MAP).
+
+ The method can be interrupted (Ctrl-C) when the user sees that the
+ log-probability converges. The best estimate will be saved in `self._map` and
+ can be accessed with `self.map()`. The MAP is obtained by running gradient
+ ascent from a given number of starting positions (samples from the posterior
+ with the highest log-probability). After the optimization is done, we select the
+ parameter set that has the highest log-probability after the optimization.
+
+ Warning: The default values used by this function are not well-tested. They
+ might require hand-tuning for the problem at hand.
+
+ For developers: if the prior is a `BoxUniform`, we carry out the optimization
+ in unbounded space and transform the result back into bounded space.
+
+ Args:
+ x: Deprecated - use `.set_default_x()` prior to `.map()`.
+ num_iter: Number of optimization steps that the algorithm takes
+ to find the MAP.
+ num_to_optimize: From the drawn `num_init_samples`, use the
+ `num_to_optimize` with highest log-probability as the initial points
+ for the optimization.
+ learning_rate: Learning rate of the optimizer.
+ init_method: How to select the starting parameters for the optimization. If
+ it is a string, it can be either [`posterior`, `prior`], which samples
+ the respective distribution `num_init_samples` times. If it is a
+ tensor, the tensor will be used as init locations.
+ num_init_samples: Draw this number of samples from the posterior and
+ evaluate the log-probability of all of them.
+ save_best_every: The best log-probability is computed, saved in the
+ `map`-attribute, and printed every `save_best_every`-th iteration.
+ Computing the best log-probability creates a significant overhead
+ (thus, the default is `10`.)
+ show_progress_bars: Whether to show a progressbar during sampling from
+ the posterior.
+ force_update: Whether to re-calculate the MAP when x is unchanged and
+ have a cached value.
+
+ Returns:
+ The MAP estimate.
+ """
+ raise NotImplementedError(
+ "MAP estimation is currently not working accurately for ScorePosterior."
+ )
+ return super().map(
+ x=x,
+ num_iter=num_iter,
+ num_to_optimize=num_to_optimize,
+ learning_rate=learning_rate,
+ init_method=init_method,
+ num_init_samples=num_init_samples,
+ save_best_every=save_best_every,
+ show_progress_bars=show_progress_bars,
+ force_update=force_update,
+ )
diff --git a/sbi/inference/potentials/base_potential.py b/sbi/inference/potentials/base_potential.py
index 769031321..f7f9dfe41 100644
--- a/sbi/inference/potentials/base_potential.py
+++ b/sbi/inference/potentials/base_potential.py
@@ -35,6 +35,11 @@ def __init__(
def __call__(self, theta: Tensor, track_gradients: bool = True) -> Tensor:
raise NotImplementedError
+ def gradient(
+ self, theta: Tensor, time: Optional[Tensor] = None, track_gradients: bool = True
+ ) -> Tensor:
+ raise NotImplementedError
+
@property
def x_is_iid(self) -> bool:
"""If x has batch dimension greater than 1, whether to intepret the batch as iid
diff --git a/sbi/inference/potentials/likelihood_based_potential.py b/sbi/inference/potentials/likelihood_based_potential.py
index c824a5dc5..11101975d 100644
--- a/sbi/inference/potentials/likelihood_based_potential.py
+++ b/sbi/inference/potentials/likelihood_based_potential.py
@@ -8,8 +8,8 @@
from torch.distributions import Distribution
from sbi.inference.potentials.base_potential import BasePotential
-from sbi.neural_nets.density_estimators import ConditionalDensityEstimator
-from sbi.neural_nets.density_estimators.shape_handling import (
+from sbi.neural_nets.estimators import ConditionalDensityEstimator
+from sbi.neural_nets.estimators.shape_handling import (
reshape_to_batch_event,
reshape_to_sample_batch_event,
)
diff --git a/sbi/inference/potentials/posterior_based_potential.py b/sbi/inference/potentials/posterior_based_potential.py
index 9272b2d68..000d1f89f 100644
--- a/sbi/inference/potentials/posterior_based_potential.py
+++ b/sbi/inference/potentials/posterior_based_potential.py
@@ -9,8 +9,8 @@
from torch.distributions import Distribution
from sbi.inference.potentials.base_potential import BasePotential
-from sbi.neural_nets.density_estimators import ConditionalDensityEstimator
-from sbi.neural_nets.density_estimators.shape_handling import (
+from sbi.neural_nets.estimators import ConditionalDensityEstimator
+from sbi.neural_nets.estimators.shape_handling import (
reshape_to_batch_event,
reshape_to_sample_batch_event,
)
diff --git a/sbi/inference/potentials/score_based_potential.py b/sbi/inference/potentials/score_based_potential.py
new file mode 100644
index 000000000..5dcf7b5a7
--- /dev/null
+++ b/sbi/inference/potentials/score_based_potential.py
@@ -0,0 +1,231 @@
+# This file is part of sbi, a toolkit for simulation-based inference. sbi is licensed
+# under the Apache License Version 2.0, see
+
+from typing import Optional, Tuple
+
+import torch
+from torch import Tensor
+from torch.distributions import Distribution
+from zuko.distributions import NormalizingFlow
+from zuko.transforms import FreeFormJacobianTransform
+
+from sbi.inference.potentials.base_potential import BasePotential
+from sbi.neural_nets.estimators.score_estimator import ConditionalScoreEstimator
+from sbi.neural_nets.estimators.shape_handling import (
+ reshape_to_batch_event,
+ reshape_to_sample_batch_event,
+)
+from sbi.sbi_types import TorchTransform
+from sbi.utils import mcmc_transform
+from sbi.utils.sbiutils import within_support
+from sbi.utils.torchutils import ensure_theta_batched
+
+
+def score_estimator_based_potential(
+ score_estimator: ConditionalScoreEstimator,
+ prior: Optional[Distribution],
+ x_o: Optional[Tensor],
+ enable_transform: bool = False,
+) -> Tuple["PosteriorScoreBasedPotential", TorchTransform]:
+ r"""Returns the potential function gradient for score estimators.
+
+ Args:
+ score_estimator: The neural network modelling the score.
+ prior: The prior distribution.
+ x_o: The observed data at which to evaluate the score.
+ enable_transform: Whether to enable transforms. Not supported yet.
+ """
+ device = str(next(score_estimator.parameters()).device)
+
+ potential_fn = PosteriorScoreBasedPotential(
+ score_estimator, prior, x_o, device=device
+ )
+
+ assert (
+ enable_transform is False
+ ), "Transforms are not yet supported for score estimators."
+
+ if prior is not None:
+ theta_transform = mcmc_transform(
+ prior, device=device, enable_transform=enable_transform
+ )
+ else:
+ theta_transform = torch.distributions.transforms.identity_transform
+
+ return potential_fn, theta_transform
+
+
+class PosteriorScoreBasedPotential(BasePotential):
+ def __init__(
+ self,
+ score_estimator: ConditionalScoreEstimator,
+ prior: Optional[Distribution],
+ x_o: Optional[Tensor],
+ iid_method: str = "iid_bridge",
+ device: str = "cpu",
+ ):
+ r"""Returns the score function for score-based methods.
+
+ Args:
+ score_estimator: The neural network modelling the score.
+ prior: The prior distribution.
+ x_o: The observed data at which to evaluate the posterior.
+ iid_method: Which method to use for computing the score. Currently, only
+ `iid_bridge` as proposed in Geffner et al. is implemented.
+ device: The device on which to evaluate the potential.
+ """
+
+ super().__init__(prior, x_o, device=device)
+ self.score_estimator = score_estimator
+ self.score_estimator.eval()
+ self.iid_method = iid_method
+
+ def __call__(
+ self,
+ theta: Tensor,
+ track_gradients: bool = True,
+ atol: float = 1e-5,
+ rtol: float = 1e-6,
+ exact: bool = True,
+ ) -> Tensor:
+ """Return the potential (posterior log prob) via probability flow ODE.
+
+ Args:
+ theta: The parameters at which to evaluate the potential.
+ track_gradients: Whether to track gradients.
+ atol: Absolute tolerance for the ODE solver.
+ rtol: Relative tolerance for the ODE solver.
+ exact: Whether to use the exact ODE solver.
+
+ Returns:
+ The potential function, i.e., the log probability of the posterior.
+ """
+ theta = ensure_theta_batched(torch.as_tensor(theta))
+ theta_density_estimator = reshape_to_sample_batch_event(
+ theta, theta.shape[1:], leading_is_sample=True
+ )
+ x_density_estimator = reshape_to_batch_event(
+ self.x_o, event_shape=self.score_estimator.condition_shape
+ )
+ assert (
+ x_density_estimator.shape[0] == 1
+ ), "PosteriorScoreBasedPotential supports only x batchsize of 1`."
+
+ self.score_estimator.eval()
+
+ flow = self.get_continuous_normalizing_flow(
+ condition=x_density_estimator, atol=atol, rtol=rtol, exact=exact
+ )
+
+ with torch.set_grad_enabled(track_gradients):
+ log_probs = flow.log_prob(theta_density_estimator).squeeze(-1)
+ # Force probability to be zero outside prior support.
+ in_prior_support = within_support(self.prior, theta)
+
+ masked_log_prob = torch.where(
+ in_prior_support,
+ log_probs,
+ torch.tensor(float("-inf"), dtype=torch.float32, device=self.device),
+ )
+ return masked_log_prob
+
+ def gradient(
+ self, theta: Tensor, time: Optional[Tensor] = None, track_gradients: bool = True
+ ) -> Tensor:
+ r"""Returns the potential function gradient for score-based methods.
+
+ Args:
+ theta: The parameters at which to evaluate the potential.
+ time: The diffusion time. If None, then `t_min` of the
+ self.score_estimator is used (i.e. we evaluate the gradient of the
+ actual data distribution).
+ track_gradients: Whether to track gradients.
+
+ Returns:
+ The gradient of the potential function.
+ """
+ if time is None:
+ time = torch.tensor([self.score_estimator.t_min])
+
+ if self._x_o is None:
+ raise ValueError(
+ "No observed data x_o is available. Please reinitialize \
+ the potential or manually set self._x_o."
+ )
+
+ with torch.set_grad_enabled(track_gradients):
+ if not self.x_is_iid or self._x_o.shape[0] == 1:
+ score = self.score_estimator.forward(
+ input=theta, condition=self.x_o, time=time
+ )
+ else:
+ raise NotImplementedError(
+ "Score accumulation for IID data is not yet implemented."
+ )
+
+ return score
+
+ def get_continuous_normalizing_flow(
+ self,
+ condition: Tensor,
+ atol: float = 1e-5,
+ rtol: float = 1e-6,
+ exact: bool = True,
+ ) -> NormalizingFlow:
+ r"""Returns the normalizing flow for the score-based estimator."""
+
+ # Compute the base density
+ mean_t = self.score_estimator.mean_t
+ std_t = self.score_estimator.std_t
+ base_density = torch.distributions.Normal(mean_t, std_t)
+ # TODO: is this correct? should we use append base_density for each dimension?
+ for _ in range(len(self.score_estimator.input_shape)):
+ base_density = torch.distributions.Independent(base_density, 1)
+
+ # Build the freeform jacobian transformation by probability flow ODEs
+ transform = build_freeform_jacobian_transform(
+ self.score_estimator, condition, atol=atol, rtol=rtol, exact=exact
+ )
+ # Use zuko to build the normalizing flow.
+ return NormalizingFlow(transform, base=base_density)
+
+
+def build_freeform_jacobian_transform(
+ score_estimator: ConditionalScoreEstimator,
+ x_o: Tensor,
+ atol: float = 1e-5,
+ rtol: float = 1e-6,
+ exact: bool = True,
+) -> FreeFormJacobianTransform:
+ """Builds the free-form Jacobian for the probability flow ODE, used for log-prob.
+
+ Args:
+ score_estimator: The neural network estimating the score.
+ x_o: Observation.
+ atol: Absolute tolerance for the ODE solver.
+ rtol: Relative tolerance for the ODE solver.
+ exact: Whether to use the exact ODE solver.
+
+ Returns:
+ Transformation of probability flow ODE.
+ """
+ # Create a freeform jacobian transformation
+ phi = (x_o, *score_estimator.parameters())
+
+ def f(t, x):
+ score = score_estimator(input=x, condition=x_o, time=t)
+ f = score_estimator.drift_fn(x, t)
+ g = score_estimator.diffusion_fn(x, t)
+ v = f - 0.5 * g**2 * score
+ return v
+
+ transform = FreeFormJacobianTransform(
+ f=f,
+ t0=score_estimator.t_min,
+ t1=score_estimator.t_max,
+ phi=phi,
+ atol=atol,
+ rtol=rtol,
+ exact=exact,
+ )
+ return transform
diff --git a/sbi/inference/snle/snle_base.py b/sbi/inference/snle/snle_base.py
index d05353134..ae036526d 100644
--- a/sbi/inference/snle/snle_base.py
+++ b/sbi/inference/snle/snle_base.py
@@ -17,7 +17,7 @@
from sbi.inference.posteriors.importance_posterior import ImportanceSamplingPosterior
from sbi.inference.potentials import likelihood_estimator_based_potential
from sbi.neural_nets import ConditionalDensityEstimator, likelihood_nn
-from sbi.neural_nets.density_estimators.shape_handling import (
+from sbi.neural_nets.estimators.shape_handling import (
reshape_to_batch_event,
)
from sbi.utils import check_estimator_arg, check_prior, x_shape_from_simulation
@@ -187,14 +187,14 @@ def train(
list(self._neural_net.parameters()),
lr=learning_rate,
)
- self.epoch, self._val_log_prob = 0, float("-Inf")
+ self.epoch, self._val_loss = 0, float("Inf")
while self.epoch <= max_num_epochs and not self._converged(
self.epoch, stop_after_epochs
):
# Train for a single epoch.
self._neural_net.train()
- train_log_probs_sum = 0
+ train_loss_sum = 0
for batch in train_loader:
self.optimizer.zero_grad()
theta_batch, x_batch = (
@@ -204,7 +204,7 @@ def train(
# Evaluate on x with theta as context.
train_losses = self._loss(theta=theta_batch, x=x_batch)
train_loss = torch.mean(train_losses)
- train_log_probs_sum -= train_losses.sum().item()
+ train_loss_sum += train_losses.sum().item()
train_loss.backward()
if clip_max_norm is not None:
@@ -216,14 +216,14 @@ def train(
self.epoch += 1
- train_log_prob_average = train_log_probs_sum / (
+ train_loss_average = train_loss_sum / (
len(train_loader) * train_loader.batch_size # type: ignore
)
- self._summary["training_log_probs"].append(train_log_prob_average)
+ self._summary["training_loss"].append(train_loss_average)
# Calculate validation performance.
self._neural_net.eval()
- val_log_prob_sum = 0
+ val_loss_sum = 0
with torch.no_grad():
for batch in val_loader:
theta_batch, x_batch = (
@@ -232,14 +232,14 @@ def train(
)
# Evaluate on x with theta as context.
val_losses = self._loss(theta=theta_batch, x=x_batch)
- val_log_prob_sum -= val_losses.sum().item()
+ val_loss_sum += val_losses.sum().item()
# Take mean over all validation samples.
- self._val_log_prob = val_log_prob_sum / (
+ self._val_loss = val_loss_sum / (
len(val_loader) * val_loader.batch_size # type: ignore
)
- # Log validation log prob for every epoch.
- self._summary["validation_log_probs"].append(self._val_log_prob)
+ # Log validation loss for every epoch.
+ self._summary["validation_loss"].append(self._val_loss)
self._maybe_show_progress(self._show_progress_bars, self.epoch)
@@ -247,7 +247,7 @@ def train(
# Update summary.
self._summary["epochs_trained"].append(self.epoch)
- self._summary["best_validation_log_prob"].append(self._best_val_log_prob)
+ self._summary["best_validation_loss"].append(self._best_val_loss)
# Update TensorBoard and summary dict.
self._summarize(round_=self._round)
diff --git a/sbi/inference/snpe/snpe_a.py b/sbi/inference/snpe/snpe_a.py
index 64e72fd58..15add3393 100644
--- a/sbi/inference/snpe/snpe_a.py
+++ b/sbi/inference/snpe/snpe_a.py
@@ -14,7 +14,7 @@
from sbi.inference.posteriors.direct_posterior import DirectPosterior
from sbi.inference.snpe.snpe_base import PosteriorEstimator
-from sbi.neural_nets.density_estimators.base import ConditionalDensityEstimator
+from sbi.neural_nets.estimators.base import ConditionalDensityEstimator
from sbi.sbi_types import TensorboardSummaryWriter, TorchModule
from sbi.utils import torchutils
from sbi.utils.sbiutils import (
diff --git a/sbi/inference/snpe/snpe_base.py b/sbi/inference/snpe/snpe_base.py
index 55030229d..c53ee55d1 100644
--- a/sbi/inference/snpe/snpe_base.py
+++ b/sbi/inference/snpe/snpe_base.py
@@ -25,7 +25,7 @@
from sbi.inference.posteriors.importance_posterior import ImportanceSamplingPosterior
from sbi.inference.potentials import posterior_estimator_based_potential
from sbi.neural_nets import ConditionalDensityEstimator, posterior_nn
-from sbi.neural_nets.density_estimators.shape_handling import (
+from sbi.neural_nets.estimators.shape_handling import (
reshape_to_batch_event,
reshape_to_sample_batch_event,
)
@@ -336,14 +336,14 @@ def default_calibration_kernel(x):
if not resume_training:
self.optimizer = Adam(list(self._neural_net.parameters()), lr=learning_rate)
- self.epoch, self._val_log_prob = 0, float("-Inf")
+ self.epoch, self._val_loss = 0, float("Inf")
while self.epoch <= max_num_epochs and not self._converged(
self.epoch, stop_after_epochs
):
# Train for a single epoch.
self._neural_net.train()
- train_log_probs_sum = 0
+ train_loss_sum = 0
epoch_start_time = time.time()
for batch in train_loader:
self.optimizer.zero_grad()
@@ -363,7 +363,7 @@ def default_calibration_kernel(x):
force_first_round_loss=force_first_round_loss,
)
train_loss = torch.mean(train_losses)
- train_log_probs_sum -= train_losses.sum().item()
+ train_loss_sum += train_losses.sum().item()
train_loss.backward()
if clip_max_norm is not None:
@@ -374,14 +374,14 @@ def default_calibration_kernel(x):
self.epoch += 1
- train_log_prob_average = train_log_probs_sum / (
+ train_loss_average = train_loss_sum / (
len(train_loader) * train_loader.batch_size # type: ignore
)
- self._summary["training_log_probs"].append(train_log_prob_average)
+ self._summary["training_loss"].append(train_loss_average)
# Calculate validation performance.
self._neural_net.eval()
- val_log_prob_sum = 0
+ val_loss_sum = 0
with torch.no_grad():
for batch in val_loader:
@@ -399,14 +399,14 @@ def default_calibration_kernel(x):
calibration_kernel,
force_first_round_loss=force_first_round_loss,
)
- val_log_prob_sum -= val_losses.sum().item()
+ val_loss_sum += val_losses.sum().item()
# Take mean over all validation samples.
- self._val_log_prob = val_log_prob_sum / (
+ self._val_loss = val_loss_sum / (
len(val_loader) * val_loader.batch_size # type: ignore
)
- # Log validation log prob for every epoch.
- self._summary["validation_log_probs"].append(self._val_log_prob)
+ # Log validation loss for every epoch.
+ self._summary["validation_loss"].append(self._val_loss)
self._summary["epoch_durations_sec"].append(time.time() - epoch_start_time)
self._maybe_show_progress(self._show_progress_bars, self.epoch)
@@ -415,7 +415,7 @@ def default_calibration_kernel(x):
# Update summary.
self._summary["epochs_trained"].append(self.epoch)
- self._summary["best_validation_log_prob"].append(self._best_val_log_prob)
+ self._summary["best_validation_loss"].append(self._best_val_loss)
# Update tensorboard and summary dict.
self._summarize(round_=self._round)
diff --git a/sbi/inference/snpe/snpe_c.py b/sbi/inference/snpe/snpe_c.py
index e72b25e33..812908a48 100644
--- a/sbi/inference/snpe/snpe_c.py
+++ b/sbi/inference/snpe/snpe_c.py
@@ -11,7 +11,7 @@
from sbi.inference.posteriors.direct_posterior import DirectPosterior
from sbi.inference.snpe.snpe_base import PosteriorEstimator
-from sbi.neural_nets.density_estimators.shape_handling import (
+from sbi.neural_nets.estimators.shape_handling import (
reshape_to_batch_event,
reshape_to_sample_batch_event,
)
diff --git a/sbi/inference/snre/snre_base.py b/sbi/inference/snre/snre_base.py
index 522e8ab56..dff310467 100644
--- a/sbi/inference/snre/snre_base.py
+++ b/sbi/inference/snre/snre_base.py
@@ -212,14 +212,14 @@ def train(
list(self._neural_net.parameters()),
lr=learning_rate,
)
- self.epoch, self._val_log_prob = 0, float("-Inf")
+ self.epoch, self._val_loss = 0, float("Inf")
while self.epoch <= max_num_epochs and not self._converged(
self.epoch, stop_after_epochs
):
# Train for a single epoch.
self._neural_net.train()
- train_log_probs_sum = 0
+ train_loss_sum = 0
for batch in train_loader:
self.optimizer.zero_grad()
theta_batch, x_batch = (
@@ -231,7 +231,7 @@ def train(
theta_batch, x_batch, num_atoms, **loss_kwargs
)
train_loss = torch.mean(train_losses)
- train_log_probs_sum -= train_losses.sum().item()
+ train_loss_sum += train_losses.sum().item()
train_loss.backward()
if clip_max_norm is not None:
@@ -243,14 +243,14 @@ def train(
self.epoch += 1
- train_log_prob_average = train_log_probs_sum / (
+ train_loss_average = train_loss_sum / (
len(train_loader) * train_loader.batch_size # type: ignore
)
- self._summary["training_log_probs"].append(train_log_prob_average)
+ self._summary["training_loss"].append(train_loss_average)
# Calculate validation performance.
self._neural_net.eval()
- val_log_prob_sum = 0
+ val_loss_sum = 0
with torch.no_grad():
for batch in val_loader:
theta_batch, x_batch = (
@@ -260,13 +260,13 @@ def train(
val_losses = self._loss(
theta_batch, x_batch, num_atoms, **loss_kwargs
)
- val_log_prob_sum -= val_losses.sum().item()
+ val_loss_sum += val_losses.sum().item()
# Take mean over all validation samples.
- self._val_log_prob = val_log_prob_sum / (
+ self._val_loss = val_loss_sum / (
len(val_loader) * val_loader.batch_size # type: ignore
)
# Log validation log prob for every epoch.
- self._summary["validation_log_probs"].append(self._val_log_prob)
+ self._summary["validation_loss"].append(self._val_loss)
self._maybe_show_progress(self._show_progress_bars, self.epoch)
@@ -274,7 +274,7 @@ def train(
# Update summary.
self._summary["epochs_trained"].append(self.epoch)
- self._summary["best_validation_log_prob"].append(self._best_val_log_prob)
+ self._summary["best_validation_loss"].append(self._best_val_loss)
# Update TensorBoard and summary dict.
self._summarize(round_=self._round)
diff --git a/sbi/neural_nets/__init__.py b/sbi/neural_nets/__init__.py
index e6d7a7839..1d521bf5e 100644
--- a/sbi/neural_nets/__init__.py
+++ b/sbi/neural_nets/__init__.py
@@ -3,12 +3,12 @@
build_mlp_classifier,
build_resnet_classifier,
)
-from sbi.neural_nets.density_estimators import ConditionalDensityEstimator, NFlowsFlow
from sbi.neural_nets.embedding_nets import (
CNNEmbedding,
FCEmbedding,
PermutationInvariantEmbedding,
)
+from sbi.neural_nets.estimators import ConditionalDensityEstimator, NFlowsFlow
from sbi.neural_nets.factory import (
classifier_nn,
flowmatching_nn,
diff --git a/sbi/neural_nets/categorial.py b/sbi/neural_nets/categorial.py
index 1e84b5731..0bf32c687 100644
--- a/sbi/neural_nets/categorial.py
+++ b/sbi/neural_nets/categorial.py
@@ -5,7 +5,7 @@
from torch import Tensor, nn, unique
-from sbi.neural_nets.density_estimators import CategoricalMassEstimator, CategoricalNet
+from sbi.neural_nets.estimators import CategoricalMassEstimator, CategoricalNet
from sbi.utils.nn_utils import get_numel
from sbi.utils.sbiutils import (
standardizing_net,
diff --git a/sbi/neural_nets/density_estimators/__init__.py b/sbi/neural_nets/density_estimators/__init__.py
deleted file mode 100644
index 4f96bbb53..000000000
--- a/sbi/neural_nets/density_estimators/__init__.py
+++ /dev/null
@@ -1,13 +0,0 @@
-from sbi.neural_nets.density_estimators.base import ConditionalDensityEstimator
-from sbi.neural_nets.density_estimators.categorical_net import (
- CategoricalMassEstimator,
- CategoricalNet,
-)
-from sbi.neural_nets.density_estimators.flowmatching_estimator import (
- FlowMatchingEstimator,
-)
-from sbi.neural_nets.density_estimators.mixed_density_estimator import (
- MixedDensityEstimator,
-)
-from sbi.neural_nets.density_estimators.nflows_flow import NFlowsFlow
-from sbi.neural_nets.density_estimators.zuko_flow import ZukoFlow
diff --git a/sbi/neural_nets/embedding_nets.py b/sbi/neural_nets/embedding_nets.py
index 6365d211f..63f639b7b 100644
--- a/sbi/neural_nets/embedding_nets.py
+++ b/sbi/neural_nets/embedding_nets.py
@@ -4,6 +4,7 @@
from typing import List, Optional, Tuple, Union
import torch
+from numpy import pi
from torch import Tensor, nn
@@ -309,3 +310,18 @@ def forward(self, x: Tensor) -> Tensor:
# add number of trials as additional input
return self.fc_subnet(torch.cat([combined_embedding, trial_counts], dim=1))
+
+
+class GaussianFourierTimeEmbedding(nn.Module):
+ """Gaussian random features for encoding time steps."""
+
+ def __init__(self, embed_dim=256, scale=30.0):
+ super().__init__()
+ # Randomly sample weights during initialization. These weights are fixed
+ # during optimization and are not trainable.
+ self.W = nn.Parameter(torch.randn(embed_dim // 2) * scale, requires_grad=False)
+
+ def forward(self, times: Tensor):
+ times_proj = times[:, None] * self.W[None, :] * 2 * pi
+ embedding = torch.cat([torch.sin(times_proj), torch.cos(times_proj)], dim=-1)
+ return torch.squeeze(embedding, dim=1)
diff --git a/sbi/neural_nets/estimators/__init__.py b/sbi/neural_nets/estimators/__init__.py
new file mode 100644
index 000000000..1d67308f4
--- /dev/null
+++ b/sbi/neural_nets/estimators/__init__.py
@@ -0,0 +1,10 @@
+from sbi.neural_nets.estimators.base import ConditionalDensityEstimator
+from sbi.neural_nets.estimators.categorical_net import (
+ CategoricalMassEstimator,
+ CategoricalNet,
+)
+from sbi.neural_nets.estimators.mixed_density_estimator import (
+ MixedDensityEstimator,
+)
+from sbi.neural_nets.estimators.nflows_flow import NFlowsFlow
+from sbi.neural_nets.estimators.zuko_flow import ZukoFlow
diff --git a/sbi/neural_nets/density_estimators/base.py b/sbi/neural_nets/estimators/base.py
similarity index 85%
rename from sbi/neural_nets/density_estimators/base.py
rename to sbi/neural_nets/estimators/base.py
index 45d50ff7f..cc1438447 100644
--- a/sbi/neural_nets/density_estimators/base.py
+++ b/sbi/neural_nets/estimators/base.py
@@ -221,3 +221,44 @@ def sample_and_log_prob(
samples = self.sample(sample_shape, condition, **kwargs)
log_probs = self.log_prob(samples, condition, **kwargs)
return samples, log_probs
+
+
+class ConditionalVectorFieldEstimator(ConditionalEstimator):
+ r"""Base class for vector field (e.g., score and ODE flow) estimators.
+
+ The density estimator class is a wrapper around neural networks that
+ allows to evaluate the `vector_field`, and provide the `loss` of $\theta,x$
+ pairs. Here $\theta$ would be the `input` and $x$ would be the `condition`.
+
+ Note:
+ We assume that the input to the density estimator is a tensor of shape
+ (batch_size, input_size), where input_size is the dimensionality of the input.
+ The condition is a tensor of shape (batch_size, *condition_shape), where
+ condition_shape is the shape of the condition tensor.
+
+ """
+
+ def __init__(
+ self, net: nn.Module, input_shape: torch.Size, condition_shape: torch.Size
+ ) -> None:
+ r"""Base class for vector field estimators.
+
+ Args:
+ net: Neural network.
+ condition_shape: Shape of the condition. If not provided, it will assume a
+ 1D input.
+ """
+ super().__init__(input_shape, condition_shape)
+ self.net = net
+
+ @abstractmethod
+ def forward(self, input: Tensor, condition: Tensor, **kwargs) -> Tensor:
+ """Forward pass of the score estimator.
+
+ Args:
+ input: variable whose distribution is estimated.
+ condition: Conditioning variable.
+
+ Raises:
+ NotImplementedError: This method should be implemented by sub-classes.
+ """
diff --git a/sbi/neural_nets/density_estimators/categorical_net.py b/sbi/neural_nets/estimators/categorical_net.py
similarity index 96%
rename from sbi/neural_nets/density_estimators/categorical_net.py
rename to sbi/neural_nets/estimators/categorical_net.py
index 63e496698..e1f3ea8ca 100644
--- a/sbi/neural_nets/density_estimators/categorical_net.py
+++ b/sbi/neural_nets/estimators/categorical_net.py
@@ -1,3 +1,6 @@
+# This file is part of sbi, a toolkit for simulation-based inference. sbi is licensed
+# under the Apache License Version 2.0, see
+
from typing import Optional
import torch
@@ -5,7 +8,7 @@
from torch.distributions import Categorical
from torch.nn import Sigmoid, Softmax
-from sbi.neural_nets.density_estimators.base import ConditionalDensityEstimator
+from sbi.neural_nets.estimators.base import ConditionalDensityEstimator
class CategoricalNet(nn.Module):
diff --git a/sbi/neural_nets/density_estimators/flowmatching_estimator.py b/sbi/neural_nets/estimators/flowmatching_estimator.py
similarity index 98%
rename from sbi/neural_nets/density_estimators/flowmatching_estimator.py
rename to sbi/neural_nets/estimators/flowmatching_estimator.py
index 3500b732c..8b6494054 100644
--- a/sbi/neural_nets/density_estimators/flowmatching_estimator.py
+++ b/sbi/neural_nets/estimators/flowmatching_estimator.py
@@ -11,7 +11,7 @@
from zuko.transforms import FreeFormJacobianTransform
from zuko.utils import broadcast
-from sbi.neural_nets.density_estimators.base import ConditionalDensityEstimator
+from sbi.neural_nets.estimators.base import ConditionalDensityEstimator
# abstract class to ensure forward signature for flow matching networks
diff --git a/sbi/neural_nets/density_estimators/mixed_density_estimator.py b/sbi/neural_nets/estimators/mixed_density_estimator.py
similarity index 98%
rename from sbi/neural_nets/density_estimators/mixed_density_estimator.py
rename to sbi/neural_nets/estimators/mixed_density_estimator.py
index 5369e3547..f251adc23 100644
--- a/sbi/neural_nets/density_estimators/mixed_density_estimator.py
+++ b/sbi/neural_nets/estimators/mixed_density_estimator.py
@@ -6,8 +6,8 @@
import torch
from torch import Tensor, nn
-from sbi.neural_nets.density_estimators.base import ConditionalDensityEstimator
-from sbi.neural_nets.density_estimators.categorical_net import CategoricalMassEstimator
+from sbi.neural_nets.estimators.base import ConditionalDensityEstimator
+from sbi.neural_nets.estimators.categorical_net import CategoricalMassEstimator
class MixedDensityEstimator(ConditionalDensityEstimator):
diff --git a/sbi/neural_nets/density_estimators/nflows_flow.py b/sbi/neural_nets/estimators/nflows_flow.py
similarity index 98%
rename from sbi/neural_nets/density_estimators/nflows_flow.py
rename to sbi/neural_nets/estimators/nflows_flow.py
index 198a66776..8edd9763b 100644
--- a/sbi/neural_nets/density_estimators/nflows_flow.py
+++ b/sbi/neural_nets/estimators/nflows_flow.py
@@ -7,7 +7,7 @@
from pyknos.nflows.flows import Flow
from torch import Tensor, nn
-from sbi.neural_nets.density_estimators.base import ConditionalDensityEstimator
+from sbi.neural_nets.estimators.base import ConditionalDensityEstimator
from sbi.sbi_types import Shape
diff --git a/sbi/neural_nets/estimators/score_estimator.py b/sbi/neural_nets/estimators/score_estimator.py
new file mode 100644
index 000000000..4b01a0267
--- /dev/null
+++ b/sbi/neural_nets/estimators/score_estimator.py
@@ -0,0 +1,654 @@
+# This file is part of sbi, a toolkit for simulation-based inference. sbi is licensed
+# under the Apache License Version 2.0, see
+
+import math
+from typing import Callable, Optional, Union
+
+import torch
+from torch import Tensor, nn
+
+from sbi.neural_nets.estimators.base import ConditionalVectorFieldEstimator
+
+
+class ConditionalScoreEstimator(ConditionalVectorFieldEstimator):
+ r"""Score matching for score-based generative models (e.g., denoising diffusion).
+ The estimator neural network (this class) learns the score function, i.e., gradient
+ of the conditional probability density with respect to the input, which can be used
+ to generate samples from the target distribution by solving the SDE starting from
+ the base (Gaussian) distribution.
+
+ We assume the following SDE:
+ dx = A(t)xdt + B(t)dW,
+ where A(t) and B(t) are the drift and diffusion functions, respectively, and dW is
+ a Wiener process. This will lead to marginal distribution of the form:
+ p(xt|x0) = N(xt; mean_t(t)*x0, std_t(t)),
+ where mean_t(t) and std_t(t) are the conditional mean and standard deviation at a
+ given time t, respectively.
+
+ Relevant literature:
+ - Score-based generative modeling through SDE: https://arxiv.org/abs/2011.13456
+ - Denoising diffusion probabilistic models: https://arxiv.org/abs/2006.11239
+ - Noise conditional score networks: https://arxiv.org/abs/1907.05600
+
+ NOTE: This will follow the "noise matching" approach, we could also train a
+ "denoising" network aiming to predict the original input given the noised input. We
+ can still approx. the score by Tweedie's formula, but training might be easier.
+ """
+
+ def __init__(
+ self,
+ net: nn.Module,
+ input_shape: torch.Size,
+ condition_shape: torch.Size,
+ weight_fn: Union[str, Callable] = "max_likelihood",
+ mean_0: Union[Tensor, float] = 0.0,
+ std_0: Union[Tensor, float] = 1.0,
+ t_min: float = 1e-3,
+ t_max: float = 1.0,
+ ) -> None:
+ r"""Score estimator class that estimates the conditional score function, i.e.,
+ gradient of the density p(xt|x0).
+
+ Args:
+ net: Score estimator neural network with call signature: input, condition,
+ and time (in [0,1])].
+ condition_shape: Shape of the conditioning variable.
+ weight_fn: Function to compute the weights over time. Can be one of the
+ following:
+ - "identity": constant weights (1.),
+ - "max_likelihood": weights proportional to the diffusion function, or
+ - a custom function that returns a Callable.
+
+ """
+ super().__init__(net, input_shape, condition_shape)
+
+ # Set lambdas (variance weights) function.
+ self._set_weight_fn(weight_fn)
+
+ # Min time for diffusion (0 can be numerically unstable).
+ self.t_min = t_min
+ self.t_max = t_max
+
+ # Starting mean and std of the target distribution (otherwise assumes 0,1).
+ # This will be used to precondition the score network to improve training.
+ if not isinstance(mean_0, Tensor):
+ mean_0 = torch.tensor([mean_0])
+ if not isinstance(std_0, Tensor):
+ std_0 = torch.tensor([std_0])
+
+ self.register_buffer("mean_0", mean_0.clone().detach())
+ self.register_buffer("std_0", std_0.clone().detach())
+
+ # We estimate the mean and std of the source distribution at time t_max.
+ mean_t = self.approx_marginal_mean(torch.tensor([t_max]))
+ std_t = self.approx_marginal_std(torch.tensor([t_max]))
+ self.register_buffer("mean_t", mean_t)
+ self.register_buffer("std_t", std_t)
+
+ def forward(self, input: Tensor, condition: Tensor, time: Tensor) -> Tensor:
+ r"""Forward pass of the score estimator network to compute the conditional score
+ at a given time.
+
+ Args:
+ input: Original data, x0. (input_batch_shape, *input_shape)
+ condition: Conditioning variable. (condition_batch_shape, *condition_shape)
+ times: SDE time variable in [0,1].
+
+ Returns:
+ Score (gradient of the density) at a given time, matches input shape.
+ """
+ batch_shape = torch.broadcast_shapes(
+ input.shape[: -len(self.input_shape)],
+ condition.shape[: -len(self.condition_shape)],
+ )
+
+ input = torch.broadcast_to(input, batch_shape + self.input_shape)
+ condition = torch.broadcast_to(condition, batch_shape + self.condition_shape)
+ time = torch.broadcast_to(time, batch_shape)
+
+ # Time dependent mean and std of the target distribution to z-score the input
+ # and to approximate the score at the end of the diffusion.
+ mean = self.approx_marginal_mean(time)
+ std = self.approx_marginal_std(time)
+
+ # As input to the neural net we want to have something that changes proportianl
+ # to how the scores change
+ time_enc = self.std_fn(time)
+
+ # Time dependent z-scoring! Keeps input at similar scales
+ input_enc = (input - mean) / std
+
+ # Approximate score becoming exact for t -> t_max, "skip connection"
+ score_gaussian = (input - mean) / std**2
+
+ # Score prediction by the network
+ score_pred = self.net(input_enc, condition, time_enc)
+
+ # Output pre-conditioned score
+ # The learnable part will be largly scaled at the beginning of the diffusion
+ # and the gaussian part (where it should end up) will dominate at the end of
+ # the diffusion.
+ scale = self.mean_t_fn(time) / self.std_fn(time)
+ output_score = -scale * score_pred - score_gaussian
+
+ return output_score
+
+ def loss(
+ self,
+ input: Tensor,
+ condition: Tensor,
+ times: Optional[Tensor] = None,
+ control_variate=True,
+ control_variate_threshold=torch.inf,
+ ) -> Tensor:
+ r"""Defines the denoising score matching loss (e.g., from Song et al., ICLR
+ 2021). A random diffusion time is sampled from [0,1], and the network is trained
+ to predict thescore of the true conditional distribution given the noised input,
+ which is equivalent to predicting the (scaled) Gaussian noise added to the
+ input.
+
+ Args:
+ input: Input variable i.e. theta.
+ condition: Conditioning variable.
+ times: SDE time variable in [t_min, t_max]. Uniformly sampled if None.
+ control_variate: Whether to use a control variate to reduce the variance of
+ the stochastic loss estimator.
+ control_variate_threshold: Threshold for the control variate. If the std
+ exceeds this threshold, the control variate is not used.
+
+ Returns:
+ MSE between target score and network output, scaled by the weight function.
+
+ """
+ # Sample diffusion times.
+ if times is None:
+ times = (
+ torch.rand(input.shape[0], device=input.device)
+ * (self.t_max - self.t_min)
+ + self.t_min
+ )
+
+ # Sample noise.
+ eps = torch.randn_like(input)
+
+ # Compute mean and standard deviation.
+ mean = self.mean_fn(input, times)
+ std = self.std_fn(times)
+
+ # Get noised input, i.e., p(xt|x0).
+ input_noised = mean + std * eps
+
+ # Compute true cond. score: -(noised_input - mean) / (std**2).
+ score_target = -eps / std
+
+ # Predict score from noised input and diffusion time.
+ score_pred = self.forward(input_noised, condition, times)
+
+ # Compute weights over time.
+ weights = self.weight_fn(times)
+
+ # Compute MSE loss between network output and true score.
+ loss = torch.sum((score_pred - score_target) ** 2.0, dim=-1)
+
+ # For times -> 0 this loss has high variance a standard method to reduce the
+ # variance is to use a control variate i.e. a term that has zero expectation but
+ # is strongly correlated with our objective.
+ # Such a term can be derived by performing a 0 th order taylor expansion score
+ # network around the mean (https://arxiv.org/pdf/2101.03288 for details).
+ # NOTE: As it is a taylor expansion it will only work well for small std.
+
+ if control_variate:
+ D = input.shape[-1]
+ score_mean_pred = self.forward(mean, condition, times)
+ s = torch.squeeze(std, -1)
+
+ # Loss terms that depend on eps
+ term1 = 2 / s * torch.sum(eps * score_mean_pred, dim=-1)
+ term2 = torch.sum(eps**2, dim=-1) / s**2
+ # This term is the analytical expectation of the above term
+ term3 = D / s**2
+
+ control_variate = term3 - term1 - term2
+
+ control_variate = torch.where(
+ s < control_variate_threshold, control_variate, 0.0
+ )
+
+ loss = loss + control_variate
+
+ return weights * loss
+
+ def approx_marginal_mean(self, times: Tensor) -> Tensor:
+ r"""Approximate the marginal mean of the target distribution at a given time.
+
+ Args:
+ times: SDE time variable in [0,1].
+
+ Returns:
+ Approximate marginal mean at a given time.
+ """
+ return self.mean_t_fn(times) * self.mean_0
+
+ def approx_marginal_std(self, times: Tensor) -> Tensor:
+ r"""Approximate the marginal standard deviation of the target distribution at a
+ given time.
+
+ Args:
+ times: SDE time variable in [0,1].
+
+ Returns:
+ Approximate marginal standard deviation at a given time.
+ """
+ vars = self.mean_t_fn(times) ** 2 * self.std_0**2 + self.std_fn(times) ** 2
+ return torch.sqrt(vars)
+
+ def mean_t_fn(self, times: Tensor) -> Tensor:
+ r"""Conditional mean function, E[xt|x0], specifying the "mean factor" at a given
+ time, which is always multiplied by x0 to get the mean of the noise distribution
+ , i.e., p(xt|x0) = N(xt; mean_t(t)*x0, std_t(t)).
+
+ Args:
+ times: SDE time variable in [0,1].
+
+ Raises:
+ NotImplementedError: This method is implemented in each individual SDE
+ classes.
+ """
+ raise NotImplementedError
+
+ def mean_fn(self, x0: Tensor, times: Tensor) -> Tensor:
+ r"""Mean function of the SDE, which just multiplies the specific "mean factor"
+ by the original input x0, to get the mean of the noise distribution, i.e.,
+ p(xt|x0) = N(xt; mean_t(t)*x0, std_t(t)).
+
+ Args:
+ x0: Initial input data.
+ times: SDE time variable in [0,1].
+
+ Returns:
+ Mean of the noise distribution at a given time.
+ """
+ return self.mean_t_fn(times) * x0
+
+ def std_fn(self, times: Tensor) -> Tensor:
+ r"""Standard deviation function of the noise distribution at a given time,
+
+ i.e., p(xt|x0) = N(xt; mean_t(t)*x0, std_t(t)).
+
+ Args:
+ times: SDE time variable in [0,1].
+
+ Raises:
+ NotImplementedError: This method is implemented in each individual SDE
+ classes.
+ """
+ raise NotImplementedError
+
+ def drift_fn(self, input: Tensor, times: Tensor) -> Tensor:
+ r"""Drift function, f(x,t), of the SDE described by dx = f(x,t)dt + g(x,t)dW.
+
+ Args:
+ input: Original data, x0.
+ times: SDE time variable in [0,1].
+
+ Raises:
+ NotImplementedError: This method is implemented in each individual SDE
+ classes.
+ """
+ raise NotImplementedError
+
+ def diffusion_fn(self, input: Tensor, times: Tensor) -> Tensor:
+ r"""Diffusion function, g(x,t), of the SDE described by
+ dx = f(x,t)dt + g(x,t)dW.
+
+ Args:
+ input: Original data, x0.
+ times: SDE time variable in [0,1].
+
+ Raises:
+ NotImplementedError: This method is implemented in each individual SDE
+ classes.
+ """
+ raise NotImplementedError
+
+ def _set_weight_fn(self, weight_fn: Union[str, Callable]):
+ """Set the weight function.
+
+ Args:
+ weight_fn: Function to compute the weights over time. Can be one of the
+ following:
+ - "identity": constant weights (1.),
+ - "max_likelihood": weights proportional to the diffusion function, or
+ - a custom function that returns a Callable.
+ """
+ if weight_fn == "identity":
+ self.weight_fn = lambda times: 1
+ elif weight_fn == "max_likelihood":
+ self.weight_fn = (
+ lambda times: self.diffusion_fn(
+ torch.ones((1,), device=times.device), times
+ )
+ ** 2
+ )
+ elif weight_fn == "variance":
+ self.weight_fn = lambda times: self.std_fn(times) ** 2
+ elif callable(weight_fn):
+ self.weight_fn = weight_fn
+ else:
+ raise ValueError(f"Weight function {weight_fn} not recognized.")
+
+
+class VPScoreEstimator(ConditionalScoreEstimator):
+ """Class for score estimators with variance preserving SDEs (i.e., DDPM)."""
+
+ def __init__(
+ self,
+ net: nn.Module,
+ input_shape: torch.Size,
+ condition_shape: torch.Size,
+ weight_fn: Union[str, Callable] = "max_likelihood",
+ beta_min: float = 0.01,
+ beta_max: float = 10.0,
+ mean_0: Union[Tensor, float] = 0.0,
+ std_0: Union[Tensor, float] = 1.0,
+ t_min: float = 1e-5,
+ t_max: float = 1.0,
+ ) -> None:
+ self.beta_min = beta_min
+ self.beta_max = beta_max
+ super().__init__(
+ net,
+ input_shape,
+ condition_shape,
+ mean_0=mean_0,
+ std_0=std_0,
+ weight_fn=weight_fn,
+ t_min=t_min,
+ t_max=t_max,
+ )
+
+ def mean_t_fn(self, times: Tensor) -> Tensor:
+ """Conditional mean function for variance preserving SDEs.
+ Args:
+ times: SDE time variable in [0,1].
+
+ Returns:
+ Conditional mean at a given time.
+ """
+ phi = torch.exp(
+ -0.25 * times**2.0 * (self.beta_max - self.beta_min)
+ - 0.5 * times * self.beta_min
+ )
+ for _ in range(len(self.input_shape)):
+ phi = phi.unsqueeze(-1)
+ return phi
+
+ def std_fn(self, times: Tensor) -> Tensor:
+ """Standard deviation function for variance preserving SDEs.
+ Args:
+ times: SDE time variable in [0,1].
+
+ Returns:
+ Standard deviation at a given time.
+ """
+ std = 1.0 - torch.exp(
+ -0.5 * times**2.0 * (self.beta_max - self.beta_min) - times * self.beta_min
+ )
+ for _ in range(len(self.input_shape)):
+ std = std.unsqueeze(-1)
+ return torch.sqrt(std)
+
+ def _beta_schedule(self, times: Tensor) -> Tensor:
+ """Linear beta schedule for mean scaling in variance preserving SDEs.
+
+ Args:
+ times: SDE time variable in [0,1].
+
+ Returns:
+ Beta schedule at a given time.
+ """
+ return self.beta_min + (self.beta_max - self.beta_min) * times
+
+ def drift_fn(self, input: Tensor, times: Tensor) -> Tensor:
+ """Drift function for variance preserving SDEs.
+
+ Args:
+ input: Original data, x0.
+ times: SDE time variable in [0,1].
+
+ Returns:
+ Drift function at a given time.
+ """
+ phi = -0.5 * self._beta_schedule(times)
+ while len(phi.shape) < len(input.shape):
+ phi = phi.unsqueeze(-1)
+ return phi * input
+
+ def diffusion_fn(self, input: Tensor, times: Tensor) -> Tensor:
+ """Diffusion function for variance preserving SDEs.
+
+ Args:
+ input: Original data, x0.
+ times: SDE time variable in [0,1].
+
+ Returns:
+ Drift function at a given time.
+ """
+ g = torch.sqrt(self._beta_schedule(times))
+ while len(g.shape) < len(input.shape):
+ g = g.unsqueeze(-1)
+ return g
+
+
+class SubVPScoreEstimator(ConditionalScoreEstimator):
+ """Class for score estimators with sub-variance preserving SDEs."""
+
+ def __init__(
+ self,
+ net: nn.Module,
+ input_shape: torch.Size,
+ condition_shape: torch.Size,
+ weight_fn: Union[str, Callable] = "max_likelihood",
+ beta_min: float = 0.01,
+ beta_max: float = 10.0,
+ mean_0: float = 0.0,
+ std_0: float = 1.0,
+ t_min: float = 1e-2,
+ t_max: float = 1.0,
+ ) -> None:
+ self.beta_min = beta_min
+ self.beta_max = beta_max
+ super().__init__(
+ net,
+ input_shape,
+ condition_shape,
+ weight_fn=weight_fn,
+ mean_0=mean_0,
+ std_0=std_0,
+ t_min=t_min,
+ t_max=t_max,
+ )
+
+ def mean_t_fn(self, times: Tensor) -> Tensor:
+ """Conditional mean function for sub-variance preserving SDEs.
+ Args:
+ times: SDE time variable in [0,1].
+
+ Returns:
+ Conditional mean at a given time.
+ """
+ phi = torch.exp(
+ -0.25 * times**2.0 * (self.beta_max - self.beta_min)
+ - 0.5 * times * self.beta_min
+ )
+ for _ in range(len(self.input_shape)):
+ phi = phi.unsqueeze(-1)
+ return phi
+
+ def std_fn(self, times: Tensor) -> Tensor:
+ """Standard deviation function for variance preserving SDEs.
+ Args:
+ times: SDE time variable in [0,1].
+
+ Returns:
+ Standard deviation at a given time.
+ """
+ std = 1.0 - torch.exp(
+ -0.5 * times**2.0 * (self.beta_max - self.beta_min) - times * self.beta_min
+ )
+ for _ in range(len(self.input_shape)):
+ std = std.unsqueeze(-1)
+ return std
+
+ def _beta_schedule(self, times: Tensor) -> Tensor:
+ """Linear beta schedule for mean scaling in sub-variance preserving SDEs.
+ (Same as for variance preserving SDEs.)
+
+ Args:
+ times: SDE time variable in [0,1].
+
+ Returns:
+ Beta schedule at a given time.
+ """
+ return self.beta_min + (self.beta_max - self.beta_min) * times
+
+ def drift_fn(self, input: Tensor, times: Tensor) -> Tensor:
+ """Drift function for sub-variance preserving SDEs.
+
+ Args:
+ input: Original data, x0.
+ times: SDE time variable in [0,1].
+
+ Returns:
+ Drift function at a given time.
+ """
+ phi = -0.5 * self._beta_schedule(times)
+
+ while len(phi.shape) < len(input.shape):
+ phi = phi.unsqueeze(-1)
+
+ return phi * input
+
+ def diffusion_fn(self, input: Tensor, times: Tensor) -> Tensor:
+ """Diffusion function for sub-variance preserving SDEs.
+
+ Args:
+ input: Original data, x0.
+ times: SDE time variable in [0,1].
+
+ Returns:
+ Diffusion function at a given time.
+ """
+ g = torch.sqrt(
+ torch.abs(
+ self._beta_schedule(times)
+ * (
+ 1
+ - torch.exp(
+ -2 * self.beta_min * times
+ - (self.beta_max - self.beta_min) * times**2
+ )
+ )
+ )
+ )
+
+ while len(g.shape) < len(input.shape):
+ g = g.unsqueeze(-1)
+
+ return g
+
+
+class VEScoreEstimator(ConditionalScoreEstimator):
+ """Class for score estimators with variance exploding SDEs (i.e., NCSN / SMLD)."""
+
+ def __init__(
+ self,
+ net: nn.Module,
+ input_shape: torch.Size,
+ condition_shape: torch.Size,
+ weight_fn: Union[str, Callable] = "max_likelihood",
+ sigma_min: float = 1e-5,
+ sigma_max: float = 5.0,
+ mean_0: float = 0.0,
+ std_0: float = 1.0,
+ ) -> None:
+ self.sigma_min = sigma_min
+ self.sigma_max = sigma_max
+ super().__init__(
+ net,
+ input_shape,
+ condition_shape,
+ weight_fn=weight_fn,
+ mean_0=mean_0,
+ std_0=std_0,
+ )
+
+ def mean_t_fn(self, times: Tensor) -> Tensor:
+ """Conditional mean function for variance exploding SDEs, which is always 1.
+
+ Args:
+ times: SDE time variable in [0,1].
+
+ Returns:
+ Conditional mean at a given time.
+ """
+ phi = torch.ones_like(times, device=times.device)
+ for _ in range(len(self.input_shape)):
+ phi = phi.unsqueeze(-1)
+ return phi
+
+ def std_fn(self, times: Tensor) -> Tensor:
+ """Standard deviation function for variance exploding SDEs.
+
+ Args:
+ times: SDE time variable in [0,1].
+
+ Returns:
+ Standard deviation at a given time.
+ """
+ std = self.sigma_min * (self.sigma_max / self.sigma_min) ** times
+ for _ in range(len(self.input_shape)):
+ std = std.unsqueeze(-1)
+ return std
+
+ def _sigma_schedule(self, times: Tensor) -> Tensor:
+ """Geometric sigma schedule for variance exploding SDEs.
+
+ Args:
+ times: SDE time variable in [0,1].
+
+ Returns:
+ Sigma schedule at a given time.
+ """
+ return self.sigma_min * (self.sigma_max / self.sigma_min) ** times
+
+ def drift_fn(self, input: Tensor, times: Tensor) -> Tensor:
+ """Drift function for variance exploding SDEs.
+
+ Args:
+ input: Original data, x0.
+ times: SDE time variable in [0,1].
+
+ Returns:
+ Drift function at a given time.
+ """
+ return torch.tensor([0.0])
+
+ def diffusion_fn(self, input: Tensor, times: Tensor) -> Tensor:
+ """Diffusion function for variance exploding SDEs.
+
+ Args:
+ input: Original data, x0.
+ times: SDE time variable in [0,1].
+
+ Returns:
+ Diffusion function at a given time.
+ """
+ g = self._sigma_schedule(times) * math.sqrt(
+ (2 * math.log(self.sigma_max / self.sigma_min))
+ )
+
+ while len(g.shape) < len(input.shape):
+ g = g.unsqueeze(-1)
+
+ return g
diff --git a/sbi/neural_nets/density_estimators/shape_handling.py b/sbi/neural_nets/estimators/shape_handling.py
similarity index 100%
rename from sbi/neural_nets/density_estimators/shape_handling.py
rename to sbi/neural_nets/estimators/shape_handling.py
diff --git a/sbi/neural_nets/density_estimators/zuko_flow.py b/sbi/neural_nets/estimators/zuko_flow.py
similarity index 98%
rename from sbi/neural_nets/density_estimators/zuko_flow.py
rename to sbi/neural_nets/estimators/zuko_flow.py
index b8c9c8726..edc535d69 100644
--- a/sbi/neural_nets/density_estimators/zuko_flow.py
+++ b/sbi/neural_nets/estimators/zuko_flow.py
@@ -7,7 +7,7 @@
from torch import Tensor, nn
from zuko.flows.core import Flow
-from sbi.neural_nets.density_estimators.base import ConditionalDensityEstimator
+from sbi.neural_nets.estimators.base import ConditionalDensityEstimator
from sbi.sbi_types import Shape
diff --git a/sbi/neural_nets/factory.py b/sbi/neural_nets/factory.py
index 9a7af4c6e..1db2a096c 100644
--- a/sbi/neural_nets/factory.py
+++ b/sbi/neural_nets/factory.py
@@ -2,7 +2,7 @@
# under the Affero General Public License v3, see .
-from typing import Any, Callable, Optional
+from typing import Any, Callable, Optional, Union
from torch import nn
@@ -32,6 +32,7 @@
)
from sbi.neural_nets.mdn import build_mdn
from sbi.neural_nets.mnle import build_mnle
+from sbi.neural_nets.score_nets import build_score_estimator
from sbi.utils.nn_utils import check_net_device
model_builders = {
@@ -222,8 +223,8 @@ def flowmatching_nn(
be used for Flow Matching. The returned function is to be passed to the
Args:
- model: The type of density estimator that will be created. One of [`mdn`,
- `made`, `maf`, `maf_rqs`, `nsf`].
+ model: the type of regression network to learn the vector field. One of ['mlp',
+ 'resnet'].
z_score_theta: Whether to z-score parameters $\theta$ before passing them into
the network, can take one of the following:
- `none`, or None: do not z-score.
@@ -238,9 +239,8 @@ def flowmatching_nn(
density estimator is a normalizing flow (i.e. currently either a `maf` or a
`nsf`). Ignored if density estimator is a `mdn` or `made`.
num_blocks: Number of blocks if a ResNet is used.
- embedding_net: Optional embedding network for x.
- num_components: Number of mixture components for a mixture of Gaussians.
- Ignored if density estimator is not an mdn.
+ num_frequencies: Number of frequencies for the time embedding.
+ embedding_net: Optional embedding network for the condition.
kwargs: additional custom arguments passed to downstream build functions.
"""
implemented_models = ["mlp", "resnet"]
@@ -370,3 +370,85 @@ def build_fn(batch_theta, batch_x):
kwargs.pop("num_components")
return build_fn_snpe_a if model == "mdn_snpe_a" else build_fn
+
+
+def posterior_score_nn(
+ sde_type: str,
+ score_net_type: Union[str, nn.Module] = "mlp",
+ z_score_theta: Optional[str] = "independent",
+ z_score_x: Optional[str] = "independent",
+ t_embedding_dim: int = 16,
+ hidden_features: int = 50,
+ embedding_net: nn.Module = nn.Identity(),
+ **kwargs: Any,
+) -> Callable:
+ """Build util function that builds a ScoreEstimator object for score-based
+ posteriors.
+
+ Args:
+ sde_type: SDE type used, which defines the mean and std functions. One of:
+ - 'vp': Variance preserving.
+ - 'subvp': Sub-variance preserving.
+ - 've': Variance exploding.
+ Defaults to 'vp'.
+ score_net: Type of regression network. One of:
+ - 'mlp': Fully connected feed-forward network.
+ - 'resnet': Residual network (NOT IMPLEMENTED).
+ - nn.Module: Custom network
+ Defaults to 'mlp'.
+ z_score_theta: Whether to z-score thetas passing into the network, can be one
+ of:
+ - `none`, or None: do not z-score.
+ - `independent`: z-score each dimension independently.
+ - `structured`: treat dimensions as related, therefore compute mean and std
+ over the entire batch, instead of per-dimension. Should be used when each
+ sample is, for example, a time series or an image.
+ z_score_x: Whether to z-score xs passing into the network, same options as
+ z_score_theta.
+ t_embedding_dim: Embedding dimension of diffusion time. Defaults to 16.
+ hidden_features: Number of hidden units per layer. Defaults to 50.
+ embedding_net: Embedding network for x (conditioning variable). Defaults to
+ nn.Identity().
+
+ Returns:
+ Constructor function for NPSE.
+ """
+
+ kwargs = dict(
+ zip(
+ (
+ "z_score_x",
+ "z_score_y",
+ "sde_type",
+ "score_net",
+ "t_embedding_dim",
+ "hidden_features",
+ "embedding_net_y",
+ ),
+ (
+ z_score_theta,
+ z_score_x,
+ sde_type,
+ score_net_type,
+ t_embedding_dim,
+ hidden_features,
+ embedding_net,
+ ),
+ ),
+ **kwargs,
+ )
+
+ def build_fn(batch_theta, batch_x):
+ """Build function wrapper for the build_score_estimator function that
+ is required for the score posterior class.
+
+ Args:
+ batch_theta: a batch of theta.
+ batch_x: a batch of x.
+
+ Returns:
+ Callable: a ScoreEstimator object.
+ """
+ return build_score_estimator(batch_x=batch_theta, batch_y=batch_x, **kwargs)
+
+ return build_fn
diff --git a/sbi/neural_nets/flow.py b/sbi/neural_nets/flow.py
index c8f505e98..d820cfac7 100644
--- a/sbi/neural_nets/flow.py
+++ b/sbi/neural_nets/flow.py
@@ -1,7 +1,6 @@
# This file is part of sbi, a toolkit for simulation-based inference. sbi is licensed
# under the Apache License Version 2.0, see
-
from functools import partial
from typing import List, Optional, Sequence, Union
@@ -15,7 +14,7 @@
)
from torch import Tensor, nn, relu, tanh, tensor, uint8
-from sbi.neural_nets.density_estimators import NFlowsFlow, ZukoFlow
+from sbi.neural_nets.estimators import NFlowsFlow, ZukoFlow
from sbi.utils.nn_utils import get_numel
from sbi.utils.sbiutils import (
standardizing_net,
diff --git a/sbi/neural_nets/flow_matcher.py b/sbi/neural_nets/flow_matcher.py
index 82e8abcae..16e891d9f 100644
--- a/sbi/neural_nets/flow_matcher.py
+++ b/sbi/neural_nets/flow_matcher.py
@@ -14,7 +14,7 @@
from torch.nn import functional as F
from zuko.nn import MLP as ZukoMLP
-from sbi.neural_nets.density_estimators.flowmatching_estimator import (
+from sbi.neural_nets.estimators.flowmatching_estimator import (
FlowMatchingEstimator,
VectorFieldNet,
)
diff --git a/sbi/neural_nets/mdn.py b/sbi/neural_nets/mdn.py
index 14e3a3955..a80254312 100644
--- a/sbi/neural_nets/mdn.py
+++ b/sbi/neural_nets/mdn.py
@@ -7,7 +7,7 @@
from pyknos.nflows import flows, transforms
from torch import Tensor, nn
-from sbi.neural_nets.density_estimators import NFlowsFlow
+from sbi.neural_nets.estimators import NFlowsFlow
from sbi.utils.nn_utils import get_numel
from sbi.utils.sbiutils import (
standardizing_net,
diff --git a/sbi/neural_nets/mnle.py b/sbi/neural_nets/mnle.py
index cf661c89e..73bb5ea03 100644
--- a/sbi/neural_nets/mnle.py
+++ b/sbi/neural_nets/mnle.py
@@ -8,8 +8,8 @@
from torch import Tensor, nn
from sbi.neural_nets.categorial import build_categoricalmassestimator
-from sbi.neural_nets.density_estimators import MixedDensityEstimator
-from sbi.neural_nets.density_estimators.mixed_density_estimator import _separate_input
+from sbi.neural_nets.estimators import MixedDensityEstimator
+from sbi.neural_nets.estimators.mixed_density_estimator import _separate_input
from sbi.neural_nets.flow import (
build_made,
build_maf,
diff --git a/sbi/neural_nets/score_nets.py b/sbi/neural_nets/score_nets.py
new file mode 100644
index 000000000..6fa704722
--- /dev/null
+++ b/sbi/neural_nets/score_nets.py
@@ -0,0 +1,376 @@
+from typing import Optional, Union
+
+import torch
+import torch.nn as nn
+from torch import Tensor
+
+from sbi.neural_nets.embedding_nets import GaussianFourierTimeEmbedding
+from sbi.neural_nets.estimators.score_estimator import (
+ ConditionalScoreEstimator,
+ SubVPScoreEstimator,
+ VEScoreEstimator,
+ VPScoreEstimator,
+)
+from sbi.utils.sbiutils import standardizing_net, z_score_parser, z_standardization
+from sbi.utils.user_input_checks import check_data_device
+
+
+class EmbedInputs(nn.Module):
+ """Constructs input handler that optionally standardizes and/or
+ embeds the input and conditioning variables, as well as the diffusion time
+ embedding.
+ """
+
+ def __init__(self, embedding_net_x, embedding_net_y, embedding_net_t):
+ """Initializes the input handler.
+
+ Args:
+ embedding_net_x: Embedding network for x.
+ embedding_net_y: Embedding network for y.
+ embedding_net_t: Embedding network for time.
+ """
+ super().__init__()
+ self.embedding_net_x = embedding_net_x
+ self.embedding_net_y = embedding_net_y
+ self.embedding_net_t = embedding_net_t
+
+ def forward(self, x: Tensor, y: Tensor, t: Tensor) -> tuple:
+ """Forward pass of the input layer.
+
+ Args:
+ inputs: theta (x), x (y), and diffusion time (t).
+
+ Returns:
+ Potentially standardized and/or embedded output.
+ """
+
+ return (
+ self.embedding_net_x(x),
+ self.embedding_net_y(y),
+ self.embedding_net_t(t),
+ )
+
+
+def build_input_handler(
+ batch_y: Tensor,
+ t_embedding_dim: int,
+ z_score_y: Optional[str] = "independent",
+ embedding_net_x: nn.Module = nn.Identity(),
+ embedding_net_y: nn.Module = nn.Identity(),
+) -> nn.Module:
+ """Builds input layer for vector field regression, including time embedding, and
+ optionally z-scores.
+
+ Args:
+ batch_x: Batch of xs, used to infer dimensionality and (optional) z-scoring.
+ batch_y: Batch of ys, used to infer dimensionality and (optional) z-scoring.
+ t_embedding_dim: Dimensionality of the time embedding.
+ z_score_x: Whether to z-score xs passing into the network, can be one of:
+ - `none`, or None: do not z-score.
+ - `independent`: z-score each dimension independently.
+ - `structured`: treat dimensions as related, therefore compute mean and std
+ over the entire batch, instead of per-dimension. Should be used when each
+ sample is, for example, a time series or an image.
+ z_score_y: Whether to z-score ys passing into the network, same options as
+ z_score_x.
+ embedding_net_x: Optional embedding network for x.
+ embedding_net_y: Optional embedding network for y.
+
+ Returns:
+ Input handler that provides x, y, and time embedding, and optionally z-scores.
+ """
+
+ z_score_y_bool, structured_y = z_score_parser(z_score_y)
+ if z_score_y_bool:
+ embedding_net_y = nn.Sequential(
+ standardizing_net(batch_y, structured_y), embedding_net_y
+ )
+ embedding_net_t = GaussianFourierTimeEmbedding(t_embedding_dim)
+ input_handler = EmbedInputs(
+ embedding_net_x,
+ embedding_net_y,
+ embedding_net_t,
+ )
+ return input_handler
+
+
+def build_score_estimator(
+ batch_x: Tensor,
+ batch_y: Tensor,
+ sde_type: Optional[str] = "vp",
+ score_net: Optional[Union[str, nn.Module]] = "mlp",
+ z_score_x: Optional[str] = "independent",
+ z_score_y: Optional[str] = "independent",
+ t_embedding_dim: int = 16,
+ num_layers: int = 3,
+ hidden_features: int = 50,
+ embedding_net_x: nn.Module = nn.Identity(),
+ embedding_net_y: nn.Module = nn.Identity(),
+ **kwargs,
+) -> ConditionalScoreEstimator:
+ """Builds score estimator for score-based generative models.
+
+ Args:
+ batch_x: Batch of xs, used to infer dimensionality and (optional) z-scoring.
+ batch_y: Batch of ys, used to infer dimensionality and (optional) z-scoring.
+ sde_type: SDE type used, which defines the mean and std functions. One of:
+ - 'vp': Variance preserving.
+ - 'subvp': Sub-variance preserving.
+ - 've': Variance exploding.
+ Defaults to 'vp'.
+ score_net: Type of regression network. One of:
+ - 'mlp': Fully connected feed-forward network.
+ - 'resnet': Residual network (NOT IMPLEMENTED).
+ - nn.Module: Custom network
+ Defaults to 'mlp'.
+ z_score_x: Whether to z-score xs passing into the network, can be one of:
+ - `none`, or None: do not z-score.
+ - `independent`: z-score each dimension independently.
+ - `structured`: treat dimensions as related, therefore compute mean and std
+ over the entire batch, instead of per-dimension. Should be used when each
+ sample is, for example, a time series or an image.
+ z_score_y: Whether to z-score ys passing into the network, same options as
+ z_score_x.
+ t_embedding_dim: Embedding dimension of diffusion time. Defaults to 16.
+ num_layers: Number of MLP hidden layers. Defaults to 3.
+ hidden_features: Number of hidden units per layer. Defaults to 50.
+ embedding_net_x: Embedding network for x. Defaults to nn.Identity().
+ embedding_net_y: Embedding network for y. Defaults to nn.Identity().
+ kwargs: Additional arguments that are passed by the build function for score
+ network hyperparameters.
+
+
+ Returns:
+ ScoreEstimator object with a specific SDE implementation.
+ """
+
+ """Builds score estimator for score-based generative models."""
+ check_data_device(batch_x, batch_y)
+
+ mean_0, std_0 = z_standardization(batch_x, z_score_x == "structured")
+
+ # Default to variance-preserving SDE
+ if sde_type is None:
+ sde_type = "vp"
+
+ input_handler = build_input_handler(
+ batch_y,
+ t_embedding_dim,
+ z_score_y,
+ embedding_net_x,
+ embedding_net_y,
+ )
+
+ # Infer the output dimensionalities of the embedding_net by making a forward pass.
+ x_numel = embedding_net_x(batch_x).shape[1:].numel()
+ y_numel = embedding_net_y(batch_y).shape[1:].numel()
+
+ if score_net == "mlp":
+ score_net = MLP(
+ x_numel + y_numel + t_embedding_dim,
+ x_numel,
+ input_handler,
+ hidden_dim=hidden_features,
+ num_layers=num_layers,
+ )
+ elif score_net == "ada_mlp":
+ score_net = AdaMLP(
+ x_numel,
+ t_embedding_dim + y_numel,
+ input_handler,
+ hidden_dim=hidden_features,
+ num_layers=num_layers,
+ )
+ elif score_net == "resnet":
+ raise NotImplementedError
+ elif isinstance(score_net, nn.Module):
+ pass
+ else:
+ raise ValueError(f"Invalid score network: {score_net}")
+
+ if sde_type == "vp":
+ estimator = VPScoreEstimator
+ elif sde_type == "ve":
+ estimator = VEScoreEstimator
+ elif sde_type == "subvp":
+ estimator = SubVPScoreEstimator
+ else:
+ raise ValueError(f"SDE type: {sde_type} not supported.")
+
+ input_shape = batch_x.shape[1:]
+ condition_shape = batch_y.shape[1:]
+ return estimator(
+ score_net, input_shape, condition_shape, mean_0=mean_0, std_0=std_0, **kwargs
+ )
+
+
+class MLP(nn.Module):
+ """Simple fully connected neural network."""
+
+ def __init__(
+ self,
+ input_dim: int,
+ output_dim: int,
+ input_handler: nn.Module,
+ hidden_dim: int = 100,
+ num_layers: int = 5,
+ activation: nn.Module = nn.GELU(),
+ layer_norm: bool = True,
+ skip_connection: bool = True,
+ ):
+ """Initializes the MLP.
+
+ Args:
+ input_dim: The dimensionality of the input tensor.
+ output_dim: The dimensionality of the output tensor.
+ input_handler: The input handler module.
+ hidden_dim: The dimensionality of the hidden layers.
+ num_layers: The number of hidden layers.
+ activation: The activation function.
+ layer_norm: Whether to use layer normalization.
+ skip_connection: Whether to use skip connections.
+ """
+ super().__init__()
+
+ self.input_handler = input_handler
+ self.num_layers = num_layers
+ self.activation = activation
+ self.skip_connection = skip_connection
+
+ # Initialize layers
+ self.layers = nn.ModuleList()
+
+ # Input layer
+ self.layers.append(nn.Linear(input_dim, hidden_dim))
+
+ # Hidden layers
+ for _ in range(num_layers - 1):
+ if layer_norm:
+ block = nn.Sequential(
+ nn.Linear(hidden_dim, hidden_dim),
+ nn.LayerNorm(hidden_dim),
+ activation,
+ )
+ else:
+ block = nn.Sequential(nn.Linear(hidden_dim, hidden_dim), activation)
+ self.layers.append(block)
+
+ # Output layer
+ self.layers.append(nn.Linear(hidden_dim, output_dim))
+
+ def forward(self, x: Tensor, y: Tensor, t: Tensor) -> Tensor:
+ x, y, t = self.input_handler(x, y, t)
+ xyt = torch.cat([x, y, t], dim=-1)
+
+ h = self.activation(self.layers[0](xyt))
+
+ # Forward pass through hidden layers
+ for i in range(1, self.num_layers - 1):
+ h_new = self.layers[i](h)
+ h = (h + h_new) if self.skip_connection else h_new
+
+ # Output layer
+ output = self.layers[-1](h)
+
+ return output
+
+
+class AdaMLPBlock(nn.Module):
+ r"""Creates a residual MLP block module with adaptive layer norm for conditioning.
+
+ Arguments:
+ hidden_dim: The dimensionality of the MLP block.
+ cond_dim: The number of embedding features.
+ """
+
+ def __init__(
+ self,
+ hidden_dim: int,
+ cond_dim: int,
+ mlp_ratio: int = 1,
+ ):
+ super().__init__()
+
+ self.ada_ln = nn.Sequential(
+ nn.Linear(cond_dim, hidden_dim),
+ nn.SiLU(),
+ nn.Linear(hidden_dim, 3 * hidden_dim),
+ )
+
+ # Initialize the last layer to zero
+ self.ada_ln[-1].weight.data.zero_()
+ self.ada_ln[-1].bias.data.zero_()
+
+ # MLP block
+ # NOTE: This can be made more flexible to support layer types.
+ self.block = nn.Sequential(
+ nn.LayerNorm(hidden_dim, elementwise_affine=False),
+ nn.Linear(hidden_dim, hidden_dim * mlp_ratio),
+ nn.GELU(),
+ nn.Linear(hidden_dim * mlp_ratio, hidden_dim),
+ )
+
+ def forward(self, x: Tensor, yt: Tensor) -> Tensor:
+ """
+ Arguments:
+ x: The input tensor, with shape (B, D_x).
+ t: The embedding vector, with shape (B, D_t).
+
+ Returns:
+ The output tensor, with shape (B, D_x).
+ """
+
+ a, b, c = self.ada_ln(yt).chunk(3, dim=-1)
+
+ y = (a + 1) * x + b
+ y = self.block(y)
+ y = x + c * y
+ y = y / torch.sqrt(1 + c * c)
+
+ return y
+
+
+class AdaMLP(nn.Module):
+ """
+ MLP denoising network using adaptive layer normalization for conditioning.
+ Relevant literature: https://arxiv.org/abs/2212.09748
+
+ See "Scalable Diffusion Models with Transformers", by William Peebles, Saining Xie.
+
+ Arguments:
+ x_dim: The dimensionality of the input tensor.
+ emb_dim: The number of embedding features.
+ input_handler: The input handler module.
+ hidden_dim: The dimensionality of the MLP block.
+ num_layers: The number of MLP blocks.
+ **kwargs: Key word arguments handed to the AdaMLPBlock.
+ """
+
+ def __init__(
+ self,
+ x_dim: int,
+ emb_dim: int,
+ input_handler: nn.Module,
+ hidden_dim: int = 100,
+ num_layers: int = 3,
+ **kwargs,
+ ):
+ super().__init__()
+ self.input_handler = input_handler
+ self.num_layers = num_layers
+
+ self.ada_blocks = nn.ModuleList()
+ for _i in range(num_layers):
+ self.ada_blocks.append(AdaMLPBlock(hidden_dim, emb_dim, **kwargs))
+
+ self.input_layer = nn.Linear(x_dim, hidden_dim)
+ self.output_layer = nn.Linear(hidden_dim, x_dim)
+
+ def forward(self, x: Tensor, y: Tensor, t: Tensor) -> Tensor:
+ x, y, t = self.input_handler(x, y, t)
+ yt = torch.cat([y, t], dim=-1)
+
+ h = self.input_layer(x)
+ for i in range(self.num_layers):
+ h = self.ada_blocks[i](h, yt)
+ return self.output_layer(h)
diff --git a/sbi/samplers/score/correctors.py b/sbi/samplers/score/correctors.py
new file mode 100644
index 000000000..e64b370d7
--- /dev/null
+++ b/sbi/samplers/score/correctors.py
@@ -0,0 +1,65 @@
+from abc import ABC, abstractmethod
+from typing import Callable, Optional, Type
+
+from torch import Tensor
+
+from sbi.samplers.score.predictors import Predictor
+
+CORRECTORS = {}
+
+
+def get_corrector(name: str, predictor: Predictor, **kwargs) -> "Corrector":
+ """Helper function to get corrector by name.
+
+ Args:
+ name: Name of the corrector.
+ predictor: Predictor to initialize the corrector.
+
+ Returns:
+ Corrector: The corrector.
+ """
+ return CORRECTORS[name](predictor, **kwargs)
+
+
+def register_corrector(name: str) -> Callable:
+ """Register a corrector.
+
+ Args:
+ name (str): Name of the corrector.
+
+ Returns:
+ Callable: Decorator for registering the corrector.
+ """
+
+ def decorator(corrector: Type[Corrector]) -> Callable:
+ assert issubclass(
+ corrector, Corrector
+ ), "Corrector must be a subclass of Corrector."
+ CORRECTORS[name] = corrector
+ return corrector
+
+ return decorator
+
+
+class Corrector(ABC):
+ def __init__(
+ self,
+ predictor: Predictor,
+ ):
+ """Base class for correctors.
+
+ Args:
+ predictor (Predictor): The associated predictor.
+ """
+ self.predictor = predictor
+ self.potential_fn = predictor.potential_fn
+ self.device = predictor.device
+
+ def __call__(
+ self, theta: Tensor, t0: Tensor, t1: Optional[Tensor] = None
+ ) -> Tensor:
+ return self.correct(theta, t0, t1)
+
+ @abstractmethod
+ def correct(self, theta: Tensor, t0: Tensor, t1: Optional[Tensor] = None) -> Tensor:
+ pass
diff --git a/sbi/samplers/score/predictors.py b/sbi/samplers/score/predictors.py
new file mode 100644
index 000000000..3f0a2eba6
--- /dev/null
+++ b/sbi/samplers/score/predictors.py
@@ -0,0 +1,122 @@
+# This file is part of sbi, a toolkit for simulation-based inference. sbi is licensed
+# under the Apache License Version 2.0, see
+
+from abc import ABC, abstractmethod
+from typing import Callable, Type
+
+import torch
+from torch import Tensor
+
+from sbi.inference.potentials.score_based_potential import (
+ PosteriorScoreBasedPotential,
+)
+
+PREDICTORS = {}
+
+
+def get_predictor(
+ name: str, score_based_potential: PosteriorScoreBasedPotential, **kwargs
+) -> "Predictor":
+ """Helper function to get predictor by name.
+
+ Args:
+ name: Name of the predictor.
+ score_based_potential: Score-based potential to initialize the predictor.
+ """
+ return PREDICTORS[name](score_based_potential, **kwargs)
+
+
+def register_predictor(name: str) -> Callable:
+ """Register a predictor.
+
+ Args:
+ name (str): Name of the predictor.
+
+ Returns:
+ Callable: Decorator for registering the predictor.
+ """
+
+ def decorator(predictor: Type[Predictor]) -> Callable:
+ assert issubclass(
+ predictor, Predictor
+ ), "Predictor must be a subclass of Predictor."
+ PREDICTORS[name] = predictor
+ return predictor
+
+ return decorator
+
+
+class Predictor(ABC):
+ """Predictor base class.
+
+ See child classes for more detail.
+ """
+
+ def __init__(
+ self,
+ potential_fn: PosteriorScoreBasedPotential,
+ ):
+ """Initialize predictor.
+
+ Args:
+ potential_fn: potential with gradient from which to sample.
+ """
+ self.potential_fn = potential_fn
+ self.device = potential_fn.device
+
+ # Extract relevant functions from the score function
+ self.drift = self.potential_fn.score_estimator.drift_fn
+ self.diffusion = self.potential_fn.score_estimator.diffusion_fn
+
+ def __call__(self, theta: Tensor, t1: Tensor, t0: Tensor) -> Tensor:
+ """Run prediction.
+
+ Args:
+ theta: Parameters.
+ t1: Time.
+ t0: Time.
+ """
+ return self.predict(theta, t1, t0)
+
+ @abstractmethod
+ def predict(self, theta: Tensor, t1: Tensor, t0: Tensor) -> Tensor:
+ """Run prediction.
+
+ Args:
+ theta: Parameters.
+ t1: Time.
+ t0: Time.
+ """
+ pass
+
+
+@register_predictor("euler_maruyama")
+class EulerMaruyama(Predictor):
+ def __init__(
+ self,
+ potential_fn: PosteriorScoreBasedPotential,
+ eta: float = 1.0,
+ ):
+ """Simple Euler-Maruyama discretization of the associated family of reverse
+ SDEs.
+
+ Args:
+ potential_fn: Score-based potential to predict.
+ eta: Mediates how much noise is added during sampling i.e.
+ for values approaching 0 this becomes the deterministic probabilifty
+ flow ODE. For large values it becomes a more stochastic reverse SDE.
+ Defaults to 1.0.
+ """
+ super().__init__(potential_fn)
+ assert eta > 0, "eta must be positive."
+ self.eta = eta
+
+ def predict(self, theta: Tensor, t1: Tensor, t0: Tensor):
+ dt = t1 - t0
+ dt_sqrt = torch.sqrt(dt)
+ f = self.drift(theta, t1)
+ g = self.diffusion(theta, t1)
+ score = self.potential_fn.gradient(theta, t1)
+ f_backward = f - (1 + self.eta**2) / 2 * g**2 * score
+ g_backward = self.eta * g
+ return theta - f_backward * dt + g_backward * torch.randn_like(theta) * dt_sqrt
diff --git a/sbi/samplers/score/score.py b/sbi/samplers/score/score.py
new file mode 100644
index 000000000..57aa6d5b1
--- /dev/null
+++ b/sbi/samplers/score/score.py
@@ -0,0 +1,160 @@
+# This file is part of sbi, a toolkit for simulation-based inference. sbi is licensed
+# under the Apache License Version 2.0, see
+
+from typing import Optional, Union
+
+import torch
+from torch import Tensor
+from tqdm.auto import tqdm
+
+from sbi.inference.potentials.score_based_potential import (
+ PosteriorScoreBasedPotential,
+)
+from sbi.samplers.score.correctors import Corrector, get_corrector
+from sbi.samplers.score.predictors import Predictor, get_predictor
+
+
+class Diffuser:
+ predictor: Predictor
+ corrector: Optional[Corrector]
+
+ def __init__(
+ self,
+ score_based_potential: PosteriorScoreBasedPotential,
+ predictor: Union[str, Predictor],
+ corrector: Optional[Union[str, Corrector]] = None,
+ predictor_params: Optional[dict] = None,
+ corrector_params: Optional[dict] = None,
+ ):
+ """Diffusion-based sampler for score-based sampling i.e it requires the
+ gradient of a family of distributions (for different times) characterized by the
+ gradient of a potential function (i.e. the score function). The sampler uses a
+ predictor to propagate samples forward in time. Optionally, a corrector can be
+ used to refine the samples at the current time.
+
+ Args:
+ score_based_potential_gradient: A time-dependent score-based potential.
+ predictor: A predictor to propagate samples forward in time.
+ corrector (Ooptional): A corrector to refine the samples. Defaults to None.
+ predictor_params (optional): Parameters passed to the predictor, if given as
+ string. Defaults to None.
+ corrector_params (optional): Parameters passed to the corrector, if given as
+ string. Defaults to None.
+ """
+ # Set predictor and corrector
+ self.set_predictor(predictor, score_based_potential, **(predictor_params or {}))
+ self.set_corrector(corrector, **(corrector_params or {}))
+ self.device = self.predictor.device
+
+ # Extract time limits from the score function
+ self.t_min = score_based_potential.score_estimator.t_min
+ self.t_max = score_based_potential.score_estimator.t_max
+
+ # Extract initial moments
+ self.init_mean = score_based_potential.score_estimator.mean_t
+ self.init_std = score_based_potential.score_estimator.std_t
+
+ # Extract relevant shapes from the score function
+ self.input_shape = score_based_potential.score_estimator.input_shape
+ self.condition_shape = score_based_potential.score_estimator.condition_shape
+ condition_dim = len(self.condition_shape)
+ # TODO: this is the iid setting and we don't want to generate num_obs samples,
+ # but only one sample given the condition.
+ self.batch_shape = score_based_potential.x_o.shape[:-condition_dim]
+
+ def set_predictor(
+ self,
+ predictor: Union[str, Predictor],
+ score_based_potential: PosteriorScoreBasedPotential,
+ **kwargs,
+ ):
+ """Set the predictor for the diffusion-based sampler."""
+ if isinstance(predictor, str):
+ self.predictor = get_predictor(predictor, score_based_potential, **kwargs)
+ else:
+ self.predictor = predictor
+
+ def set_corrector(self, corrector: Optional[Union[str, Corrector]], **kwargs):
+ """Set the corrector for the diffusion-based sampler."""
+ if corrector is None:
+ self.corrector = None
+ elif isinstance(corrector, Corrector):
+ self.corrector = corrector
+ else:
+ self.corrector = get_corrector(corrector, self.predictor, **kwargs)
+
+ def initialize(self, num_samples: int) -> Tensor:
+ """Initialize the sampler by drawing samples from the initial distribution.
+
+ If we have to sample from a batch of distributions, we draw samples from each
+ distribution in the batch i.e. of shape (num_batch, num_samples, input_shape).
+
+ Args:
+ num_samples (int): Number of samples to draw.
+
+ Returns:
+ Tensor: _description_
+ """
+ # TODO: for iid setting, self.batch_shape.numel() will be the iid-batch. But we
+ # don't want to generate num_obs samples, but only one sample given the the iid
+ # batch.
+ # TODO: the solution will probably be to distinguish between the iid setting and
+ # batched sampling setting with a flag.
+ # TODO: this fixes the iid setting shape problems, but iid inference via
+ # iid_bridge is not accurate.
+ # num_batch = self.batch_shape.numel()
+ # init_shape = (num_batch, num_samples) + self.input_shape
+ init_shape = (
+ num_samples,
+ ) + self.input_shape # just use num_samples, not num_batch
+ # NOTE: for the IID setting we might need to scale the noise with iid batch
+ # size, as in equation (7) in the paper.
+ eps = torch.randn(init_shape, device=self.device)
+ mean, std, eps = torch.broadcast_tensors(self.init_mean, self.init_std, eps)
+ return mean + std * eps
+
+ @torch.no_grad()
+ def run(
+ self,
+ num_samples: int,
+ ts: Tensor,
+ show_progress_bars: bool = True,
+ save_intermediate: bool = False,
+ ) -> Tensor:
+ """Samples from the distribution at the final time point by propagating samples
+ forward in time using the predictor and optionally refining them using the a
+ corrector.
+
+ Args:
+ num_samples: Number of samples to draw.
+ ts: Time grid to propagate samples forward, or "solve" the SDE.
+ show_progress_bars (optional): Shows a progressbar or not. Defaults to True.
+ save_intermediate (optional): Returns samples at all time point, instead of
+ only returning samples at the end. Defaults to False.
+
+ Returns:
+ Tensor: Samples from the distribution(s).
+ """
+ samples = self.initialize(num_samples)
+ pbar = tqdm(
+ range(1, ts.numel()),
+ disable=not show_progress_bars,
+ desc=f"Drawing {num_samples} posterior samples",
+ )
+
+ if save_intermediate:
+ intermediate_samples = [samples]
+
+ for i in pbar:
+ t1 = ts[i - 1]
+ t0 = ts[i]
+ samples = self.predictor(samples, t1, t0)
+ if self.corrector is not None:
+ samples = self.corrector(samples, t0, t1)
+ if save_intermediate:
+ intermediate_samples.append(samples)
+
+ if save_intermediate:
+ return torch.cat(intermediate_samples, dim=0)
+ else:
+ return samples
diff --git a/sbi/simulators/linear_gaussian.py b/sbi/simulators/linear_gaussian.py
index 985fa0eae..251b807b0 100644
--- a/sbi/simulators/linear_gaussian.py
+++ b/sbi/simulators/linear_gaussian.py
@@ -50,7 +50,7 @@ def linear_gaussian(
Returns: Simulated data.
"""
-
+ theta = torch.as_tensor(theta) # Must be a tensor
if num_discarded_dims:
theta = theta[:, :-num_discarded_dims]
diff --git a/sbi/utils/__init__.py b/sbi/utils/__init__.py
index 621758232..93a301580 100644
--- a/sbi/utils/__init__.py
+++ b/sbi/utils/__init__.py
@@ -69,3 +69,4 @@
validate_theta_and_x,
)
from sbi.utils.user_input_checks_utils import MultipleIndependent
+from sbi.utils.get_nn_models import posterior_nn, likelihood_nn, classifier_nn
diff --git a/sbi/utils/metrics.py b/sbi/utils/metrics.py
index 1e9c148c2..7de4a67cd 100644
--- a/sbi/utils/metrics.py
+++ b/sbi/utils/metrics.py
@@ -99,6 +99,8 @@ def c2st(
X_std = torch.std(X, dim=0)
# Set std to 1 if it is close to zero.
X_std[X_std < 1e-14] = 1
+ assert not torch.any(torch.isnan(X_mean)), "X_mean contains NaNs"
+ assert not torch.any(torch.isnan(X_std)), "X_std contains NaNs"
X = (X - X_mean) / X_std
Y = (Y - X_mean) / X_std
diff --git a/sbi/utils/user_input_checks.py b/sbi/utils/user_input_checks.py
index 44d87f5c6..1f093a260 100644
--- a/sbi/utils/user_input_checks.py
+++ b/sbi/utils/user_input_checks.py
@@ -748,7 +748,10 @@ def test_posterior_net_for_multi_d_x(net, theta: Tensor, x: Tensor) -> None:
"""
try:
# torch.nn.functional needs at least two inputs here.
- net.log_prob(theta[:, :2], condition=x[:2])
+ if hasattr(net, "log_prob"):
+ # This only is checked for density estimators, not for classifiers and
+ # others
+ net.log_prob(theta[:, :2], condition=x[:2])
except RuntimeError as rte:
ndims = x.ndim
diff --git a/tests/density_estimator_test.py b/tests/density_estimator_test.py
index 1431bdaac..4e4f76cd7 100644
--- a/tests/density_estimator_test.py
+++ b/tests/density_estimator_test.py
@@ -12,10 +12,10 @@
from sbi.neural_nets import build_mnle
from sbi.neural_nets.categorial import build_categoricalmassestimator
-from sbi.neural_nets.density_estimators.shape_handling import (
+from sbi.neural_nets.embedding_nets import CNNEmbedding
+from sbi.neural_nets.estimators.shape_handling import (
reshape_to_sample_batch_event,
)
-from sbi.neural_nets.embedding_nets import CNNEmbedding
from sbi.neural_nets.flow import (
build_maf,
build_maf_rqs,
diff --git a/tests/lc2st_test.py b/tests/lc2st_test.py
index 7dbed8aca..32c0c668b 100644
--- a/tests/lc2st_test.py
+++ b/tests/lc2st_test.py
@@ -197,7 +197,7 @@ def test_lc2st_true_positiv_rate(method):
# good estimator: big training and num_epochs = accept
# (convergence of the estimator)
- num_train = 10_000
+ num_train = 5_000
num_epochs = 200
num_cal = 1_000
diff --git a/tests/linearGaussian_npse_test.py b/tests/linearGaussian_npse_test.py
new file mode 100644
index 000000000..ca3156f11
--- /dev/null
+++ b/tests/linearGaussian_npse_test.py
@@ -0,0 +1,237 @@
+from typing import List
+
+import pytest
+import torch
+from torch import eye, ones, zeros
+from torch.distributions import MultivariateNormal
+
+from sbi import analysis as analysis
+from sbi import utils as utils
+from sbi.inference import NPSE
+from sbi.simulators import linear_gaussian
+from sbi.simulators.linear_gaussian import (
+ samples_true_posterior_linear_gaussian_mvn_prior_different_dims,
+ samples_true_posterior_linear_gaussian_uniform_prior,
+ true_posterior_linear_gaussian_mvn_prior,
+)
+
+from .test_utils import check_c2st, get_dkl_gaussian_prior
+
+
+# We always test num_dim and sample_with with defaults and mark the rests as slow.
+@pytest.mark.parametrize(
+ "sde_type, num_dim, prior_str, sample_with",
+ [
+ ("vp", 1, "gaussian", ["sde", "ode"]),
+ ("vp", 3, "uniform", ["sde", "ode"]),
+ ("vp", 3, "gaussian", ["sde", "ode"]),
+ ("ve", 3, "uniform", ["sde", "ode"]),
+ ("subvp", 3, "uniform", ["sde", "ode"]),
+ ],
+)
+def test_c2st_npse_on_linearGaussian(
+ sde_type, num_dim: int, prior_str: str, sample_with: List[str]
+):
+ """Test whether NPSE infers well a simple example with available ground truth."""
+
+ x_o = zeros(1, num_dim)
+ num_samples = 1000
+ num_simulations = 10_000
+
+ # likelihood_mean will be likelihood_shift+theta
+ likelihood_shift = -1.0 * ones(num_dim)
+ likelihood_cov = 0.3 * eye(num_dim)
+
+ if prior_str == "gaussian":
+ prior_mean = zeros(num_dim)
+ prior_cov = eye(num_dim)
+ prior = MultivariateNormal(loc=prior_mean, covariance_matrix=prior_cov)
+ gt_posterior = true_posterior_linear_gaussian_mvn_prior(
+ x_o, likelihood_shift, likelihood_cov, prior_mean, prior_cov
+ )
+ target_samples = gt_posterior.sample((num_samples,))
+ else:
+ prior = utils.BoxUniform(-2.0 * ones(num_dim), 2.0 * ones(num_dim))
+ target_samples = samples_true_posterior_linear_gaussian_uniform_prior(
+ x_o,
+ likelihood_shift,
+ likelihood_cov,
+ prior=prior,
+ num_samples=num_samples,
+ )
+
+ inference = NPSE(prior, sde_type=sde_type, show_progress_bars=True)
+
+ theta = prior.sample((num_simulations,))
+ x = linear_gaussian(theta, likelihood_shift, likelihood_cov)
+
+ score_estimator = inference.append_simulations(theta, x).train(
+ training_batch_size=100
+ )
+ # amortize the training when testing sample_with.
+ for method in sample_with:
+ posterior = inference.build_posterior(score_estimator, sample_with=method)
+ posterior.set_default_x(x_o)
+ samples = posterior.sample((num_samples,))
+
+ # Compute the c2st and assert it is near chance level of 0.5.
+ check_c2st(
+ samples,
+ target_samples,
+ alg=f"npse-{sde_type or 'vp'}-{prior_str}-{num_dim}D-{method}",
+ )
+
+ # Checks for log_prob()
+ if prior_str == "gaussian":
+ # For the Gaussian prior, we compute the KLd between ground truth and
+ # posterior.
+ dkl = get_dkl_gaussian_prior(
+ posterior,
+ x_o[0],
+ likelihood_shift,
+ likelihood_cov,
+ prior_mean,
+ prior_cov,
+ )
+
+ max_dkl = 0.15
+
+ assert (
+ dkl < max_dkl
+ ), f"D-KL={dkl} is more than 2 stds above the average performance."
+
+
+def test_c2st_npse_on_linearGaussian_different_dims():
+ """Test SNPE on linear Gaussian with different theta and x dimensionality."""
+
+ theta_dim = 3
+ x_dim = 2
+ discard_dims = theta_dim - x_dim
+
+ x_o = zeros(1, x_dim)
+ num_samples = 1000
+ num_simulations = 2000
+
+ # likelihood_mean will be likelihood_shift+theta
+ likelihood_shift = -1.0 * ones(x_dim)
+ likelihood_cov = 0.3 * eye(x_dim)
+
+ prior_mean = zeros(theta_dim)
+ prior_cov = eye(theta_dim)
+ prior = MultivariateNormal(loc=prior_mean, covariance_matrix=prior_cov)
+ target_samples = samples_true_posterior_linear_gaussian_mvn_prior_different_dims(
+ x_o,
+ likelihood_shift,
+ likelihood_cov,
+ prior_mean,
+ prior_cov,
+ num_discarded_dims=discard_dims,
+ num_samples=num_samples,
+ )
+
+ def simulator(theta):
+ return linear_gaussian(
+ theta,
+ likelihood_shift,
+ likelihood_cov,
+ num_discarded_dims=discard_dims,
+ )
+
+ # Test whether prior can be `None`.
+ inference = NPSE(prior=None)
+
+ theta = prior.sample((num_simulations,))
+ x = simulator(theta)
+
+ # Test whether we can stop and resume.
+ inference.append_simulations(theta, x).train(
+ max_num_epochs=10, training_batch_size=100
+ )
+ inference.train(
+ resume_training=True, force_first_round_loss=True, training_batch_size=100
+ )
+ posterior = inference.build_posterior().set_default_x(x_o)
+ samples = posterior.sample((num_samples,))
+
+ # Compute the c2st and assert it is near chance level of 0.5.
+ check_c2st(samples, target_samples, alg="npse_different_dims_and_resume_training")
+
+
+@pytest.mark.xfail(
+ reason="iid_bridge not working.",
+ raises=NotImplementedError,
+ strict=True,
+ match="Score accumulation*",
+)
+@pytest.mark.parametrize("num_trials", [2, 10])
+def test_npse_iid_inference(num_trials):
+ """Test whether NPSE infers well a simple example with available ground truth."""
+
+ num_dim = 2
+ x_o = zeros(num_trials, num_dim)
+ num_samples = 1000
+ num_simulations = 3000
+
+ # likelihood_mean will be likelihood_shift+theta
+ likelihood_shift = -1.0 * ones(num_dim)
+ likelihood_cov = 0.3 * eye(num_dim)
+
+ prior_mean = zeros(num_dim)
+ prior_cov = eye(num_dim)
+ prior = MultivariateNormal(loc=prior_mean, covariance_matrix=prior_cov)
+ gt_posterior = true_posterior_linear_gaussian_mvn_prior(
+ x_o, likelihood_shift, likelihood_cov, prior_mean, prior_cov
+ )
+ target_samples = gt_posterior.sample((num_samples,))
+
+ inference = NPSE(prior, show_progress_bars=True)
+
+ theta = prior.sample((num_simulations,))
+ x = linear_gaussian(theta, likelihood_shift, likelihood_cov)
+
+ score_estimator = inference.append_simulations(theta, x).train(
+ training_batch_size=100,
+ )
+ posterior = inference.build_posterior(score_estimator)
+ posterior.set_default_x(x_o)
+ samples = posterior.sample((num_samples,))
+
+ # Compute the c2st and assert it is near chance level of 0.5.
+ check_c2st(
+ samples, target_samples, alg=f"npse-vp-gaussian-1D-{num_trials}iid-trials"
+ )
+
+
+@pytest.mark.slow
+@pytest.mark.xfail(
+ raises=NotImplementedError,
+ reason="MAP optimization via score not working accurately.",
+)
+def test_npse_map():
+ num_dim = 2
+ x_o = zeros(num_dim)
+ num_simulations = 3000
+
+ # likelihood_mean will be likelihood_shift+theta
+ likelihood_shift = -1.0 * ones(num_dim)
+ likelihood_cov = 0.3 * eye(num_dim)
+
+ prior_mean = zeros(num_dim)
+ prior_cov = eye(num_dim)
+ prior = MultivariateNormal(loc=prior_mean, covariance_matrix=prior_cov)
+ gt_posterior = true_posterior_linear_gaussian_mvn_prior(
+ x_o, likelihood_shift, likelihood_cov, prior_mean, prior_cov
+ )
+ inference = NPSE(prior, show_progress_bars=True)
+
+ theta = prior.sample((num_simulations,))
+ x = linear_gaussian(theta, likelihood_shift, likelihood_cov)
+
+ inference.append_simulations(theta, x).train(
+ training_batch_size=100, max_num_epochs=10
+ )
+ posterior = inference.build_posterior().set_default_x(x_o)
+
+ map_ = posterior.map(show_progress_bars=True)
+
+ assert torch.allclose(map_, gt_posterior.mean, atol=0.2), "MAP is not close to GT."
diff --git a/tests/linearGaussian_snpe_test.py b/tests/linearGaussian_snpe_test.py
index e4dcfd9ab..2eeeb4d7e 100644
--- a/tests/linearGaussian_snpe_test.py
+++ b/tests/linearGaussian_snpe_test.py
@@ -30,6 +30,7 @@
true_posterior_linear_gaussian_mvn_prior,
)
from sbi.utils import RestrictedPrior, get_density_thresholder
+from sbi.utils.user_input_checks import process_prior, process_simulator
from .sbiutils_test import conditional_of_mvn
from .test_utils import (
@@ -156,7 +157,7 @@ def test_density_estimators_on_linearGaussian(density_estimator):
x_o = zeros(1, x_dim)
num_samples = 1000
- num_simulations = 2000
+ num_simulations = 2500
# likelihood_mean will be likelihood_shift+theta
likelihood_shift = -1.0 * ones(x_dim)
@@ -477,6 +478,8 @@ def simulator(theta):
return linear_gaussian(theta, likelihood_shift, likelihood_cov)
inference = SNPE_C(prior, show_progress_bars=False)
+ prior, _, prior_returns_numpy = process_prior(prior)
+ simulator = process_simulator(simulator, prior, prior_returns_numpy)
proposal = prior
for _ in range(2):
diff --git a/tests/posterior_nn_test.py b/tests/posterior_nn_test.py
index 10ecf5490..8f42a2dfb 100644
--- a/tests/posterior_nn_test.py
+++ b/tests/posterior_nn_test.py
@@ -32,7 +32,13 @@
(
0,
1,
- pytest.param(2, marks=pytest.mark.xfail(raises=AssertionError)),
+ pytest.param(
+ 2,
+ marks=pytest.mark.xfail(
+ raises=AssertionError,
+ reason=".log_prob() supports only batch size 1 for x_o.",
+ ),
+ ),
),
)
def test_log_prob_with_different_x(snpe_method: type, x_o_batch_dim: bool):
diff --git a/tests/sbc_test.py b/tests/sbc_test.py
index 9d48160b3..ea611500a 100644
--- a/tests/sbc_test.py
+++ b/tests/sbc_test.py
@@ -12,12 +12,9 @@
from sbi.analysis import sbc_rank_plot
from sbi.diagnostics import check_sbc, get_nltp, run_sbc
-from sbi.inference import SNLE, SNPE, simulate_for_sbi
-from sbi.simulators.linear_gaussian import (
- linear_gaussian,
-)
+from sbi.inference import NPSE, SNLE, SNPE
+from sbi.simulators.linear_gaussian import linear_gaussian
from sbi.utils import BoxUniform, MultipleIndependent
-from sbi.utils.user_input_checks import process_prior, process_simulator
from tests.test_utils import PosteriorPotential, TractablePosterior
@@ -29,11 +26,10 @@
(SNPE, None),
pytest.param(SNLE, "mcmc", marks=pytest.mark.mcmc),
pytest.param(SNLE, "vi", marks=pytest.mark.mcmc),
+ (NPSE, None),
),
)
-def test_running_sbc(
- method, prior, reduce_fn_str, sampler, mcmc_params_accurate: dict, model="mdn"
-):
+def test_running_sbc(method, prior, reduce_fn_str, sampler, mcmc_params_accurate: dict):
"""Tests running inference and then SBC and obtaining nltp."""
num_dim = 2
@@ -53,18 +49,12 @@ def test_running_sbc(
likelihood_shift = -1.0 * ones(num_dim)
likelihood_cov = 0.3 * eye(num_dim)
- def simulator(theta):
- return linear_gaussian(theta, likelihood_shift, likelihood_cov)
-
- inferer = method(prior, show_progress_bars=False, density_estimator=model)
+ theta = prior.sample((num_simulations,))
+ x = linear_gaussian(theta, likelihood_shift, likelihood_cov)
- prior, _, prior_returns_numpy = process_prior(prior)
- simulator = process_simulator(simulator, prior, prior_returns_numpy)
- theta, x = simulate_for_sbi(simulator, prior, num_simulations)
+ inferer = method(prior, show_progress_bars=False)
- _ = inferer.append_simulations(theta, x).train(
- training_batch_size=100, max_num_epochs=max_num_epochs
- )
+ inferer.append_simulations(theta, x).train(max_num_epochs=max_num_epochs)
if method == SNLE:
posterior_kwargs = {
"sample_with": "mcmc" if sampler == "mcmc" else "vi",
@@ -77,7 +67,7 @@ def simulator(theta):
posterior = inferer.build_posterior(**posterior_kwargs)
thetas = prior.sample((num_sbc_runs,))
- xs = simulator(thetas)
+ xs = linear_gaussian(thetas, likelihood_shift, likelihood_cov)
reduce_fn = "marginals" if reduce_fn_str == "marginals" else posterior.log_prob
run_sbc(
@@ -109,7 +99,7 @@ def test_consistent_sbc_results(density_estimator, cov_method):
def simulator(theta):
return linear_gaussian(theta, likelihood_shift, likelihood_cov)
- num_simulations = 2000
+ num_simulations = 4000
num_posterior_samples = 1000
num_sbc_runs = 100
diff --git a/tests/score_estimator_test.py b/tests/score_estimator_test.py
new file mode 100644
index 000000000..ef03b6275
--- /dev/null
+++ b/tests/score_estimator_test.py
@@ -0,0 +1,146 @@
+# This file is part of sbi, a toolkit for simulation-based inference. sbi is licensed
+# under the Apache License Version 2.0, see
+
+from __future__ import annotations
+
+from typing import Tuple
+
+import pytest
+import torch
+
+from sbi.neural_nets.embedding_nets import CNNEmbedding
+from sbi.neural_nets.score_nets import build_score_estimator
+
+
+@pytest.mark.parametrize("sde_type", ["vp", "ve", "subvp"])
+@pytest.mark.parametrize("input_sample_dim", (1, 2))
+@pytest.mark.parametrize("input_event_shape", ((1,), (4,)))
+@pytest.mark.parametrize("condition_event_shape", ((1,), (7,)))
+@pytest.mark.parametrize("batch_dim", (1, 10))
+@pytest.mark.parametrize("score_net", ["mlp", "ada_mlp"])
+def test_score_estimator_loss_shapes(
+ sde_type,
+ input_sample_dim,
+ input_event_shape,
+ condition_event_shape,
+ batch_dim,
+ score_net,
+):
+ """Test whether `loss` of DensityEstimators follow the shape convention."""
+ score_estimator, inputs, conditions = _build_score_estimator_and_tensors(
+ sde_type,
+ input_event_shape,
+ condition_event_shape,
+ batch_dim,
+ input_sample_dim,
+ score_net=score_net,
+ )
+
+ losses = score_estimator.loss(inputs[0], condition=conditions)
+ assert losses.shape == (batch_dim,)
+
+
+@pytest.mark.gpu
+@pytest.mark.parametrize("sde_type", ["vp", "ve", "subvp"])
+@pytest.mark.parametrize("device", ["cpu", "cuda"])
+def test_score_estimator_on_device(sde_type, device):
+ """Test whether DensityEstimators can be moved to the device."""
+ score_estimator = build_score_estimator(
+ torch.randn(100, 1), torch.randn(100, 1), sde_type=sde_type
+ )
+ score_estimator.to(device)
+
+ # Test forward
+ inputs = torch.randn(100, 1, device=device)
+ condition = torch.randn(100, 1, device=device)
+ time = torch.randn(1, device=device)
+ out = score_estimator(inputs, condition, time)
+
+ assert str(out.device).split(":")[0] == device, "Output device mismatch."
+
+ # Test loss
+ loss = score_estimator.loss(inputs, condition)
+ assert str(loss.device).split(":")[0] == device, "Loss device mismatch."
+
+
+@pytest.mark.parametrize("sde_type", ["vp", "ve", "subvp"])
+@pytest.mark.parametrize("input_sample_dim", (1, 2))
+@pytest.mark.parametrize("input_event_shape", ((1,), (4,)))
+@pytest.mark.parametrize("condition_event_shape", ((1,), (7,)))
+@pytest.mark.parametrize("batch_dim", (1, 10))
+@pytest.mark.parametrize("score_net", ["mlp", "ada_mlp"])
+def test_score_estimator_forward_shapes(
+ sde_type,
+ input_sample_dim,
+ input_event_shape,
+ condition_event_shape,
+ batch_dim,
+ score_net,
+):
+ """Test whether `forward` of DensityEstimators follow the shape convention."""
+ score_estimator, inputs, conditions = _build_score_estimator_and_tensors(
+ sde_type,
+ input_event_shape,
+ condition_event_shape,
+ batch_dim,
+ input_sample_dim,
+ score_net=score_net,
+ )
+ # Batched times
+ times = torch.rand((batch_dim,))
+ outputs = score_estimator(inputs[0], condition=conditions, time=times)
+ assert outputs.shape == (batch_dim, *input_event_shape), "Output shape mismatch."
+
+ # Single time
+ time = torch.rand(())
+ outputs = score_estimator(inputs[0], condition=conditions, time=time)
+ assert outputs.shape == (batch_dim, *input_event_shape), "Output shape mismatch."
+
+
+def _build_score_estimator_and_tensors(
+ sde_type: str,
+ input_event_shape: Tuple[int],
+ condition_event_shape: Tuple[int],
+ batch_dim: int,
+ input_sample_dim: int = 1,
+ **kwargs,
+):
+ """Helper function for all tests that deal with shapes of density estimators."""
+
+ # Use discrete thetas such that categorical density esitmators can also use them.
+ building_thetas = torch.randint(
+ 0, 4, (1000, *input_event_shape), dtype=torch.float32
+ )
+ building_xs = torch.randn((1000, *condition_event_shape))
+
+ if len(condition_event_shape) > 1:
+ embedding_net_y = CNNEmbedding(condition_event_shape, kernel_size=1)
+ else:
+ embedding_net_y = torch.nn.Identity()
+
+ if len(input_event_shape) > 1:
+ embedding_net_x = CNNEmbedding(input_event_shape, kernel_size=1)
+ else:
+ embedding_net_x = torch.nn.Identity()
+
+ score_estimator = build_score_estimator(
+ torch.randn_like(building_thetas),
+ torch.randn_like(building_xs),
+ sde_type=sde_type,
+ embedding_net_x=embedding_net_x,
+ embedding_net_y=embedding_net_y,
+ **kwargs,
+ )
+
+ inputs = building_thetas[:batch_dim]
+ condition = building_xs[:batch_dim]
+
+ inputs = inputs.unsqueeze(0)
+ inputs = inputs.expand(
+ [
+ input_sample_dim,
+ ]
+ + [-1] * (1 + len(input_event_shape))
+ )
+ condition = condition
+ return score_estimator, inputs, condition
diff --git a/tests/score_samplers_test.py b/tests/score_samplers_test.py
new file mode 100644
index 000000000..84af99224
--- /dev/null
+++ b/tests/score_samplers_test.py
@@ -0,0 +1,82 @@
+# This file is part of sbi, a toolkit for simulation-based inference. sbi is licensed
+# under the Apache License Version 2.0, see
+
+from __future__ import annotations
+
+from typing import Tuple
+
+import pytest
+import torch
+from torch import Tensor
+
+from sbi.inference.potentials.score_based_potential import (
+ score_estimator_based_potential,
+)
+from sbi.neural_nets.score_nets import build_score_estimator
+from sbi.samplers.score.score import Diffuser
+
+
+@pytest.mark.parametrize("sde_type", ["vp", "ve", "subvp"])
+@pytest.mark.parametrize("predictor", ("euler_maruyama",))
+@pytest.mark.parametrize("corrector", (None,))
+@pytest.mark.parametrize("input_event_shape", ((1,), (4,)))
+@pytest.mark.parametrize("mu", (-1.0, 0.0, 1.0))
+@pytest.mark.parametrize("std", (1.0, 0.1))
+def test_gaussian_score_sampling(
+ sde_type, predictor, corrector, input_event_shape, mu, std
+):
+ mean0 = mu * torch.ones(input_event_shape)
+ std0 = std * torch.ones(input_event_shape)
+
+ score_fn = _build_gaussian_score_estimator(sde_type, input_event_shape, mean0, std0)
+
+ sampler = Diffuser(score_fn, predictor, corrector)
+
+ t_min = score_fn.score_estimator.t_min
+ t_max = score_fn.score_estimator.t_max
+ ts = torch.linspace(t_max, t_min, 500)
+ samples = sampler.run(1_000, ts)
+
+ mean_est = samples.mean(0)
+ std_est = samples.std(0)
+
+ assert torch.allclose(mean_est, mean0, atol=1e-1)
+ assert torch.allclose(std_est, std0, atol=1e-1)
+
+
+def _build_gaussian_score_estimator(
+ sde_type: str,
+ input_event_shape: Tuple[int],
+ mean0: Tensor,
+ std0: Tensor,
+):
+ """Helper function for all tests that deal with shapes of density estimators."""
+
+ # Use discrete thetas such that categorical density esitmators can also use them.
+ building_thetas = (
+ torch.randn((1000, *input_event_shape), dtype=torch.float32) * std0 + mean0
+ )
+ building_xs = torch.ones((1000, 1))
+
+ # Note the precondition predicts a correct Gaussian score by default if the neural
+ # net predicts 0!
+ class DummyNet(torch.nn.Module):
+ def __init__(self):
+ super().__init__()
+ self.dummy_param_for_device_detection = torch.nn.Linear(1, 1)
+
+ def forward(self, input, condition, time):
+ return torch.zeros_like(input)
+
+ score_estimator = build_score_estimator(
+ building_thetas,
+ building_xs,
+ sde_type=sde_type,
+ score_net=DummyNet(),
+ )
+
+ score_fn, _ = score_estimator_based_potential(
+ score_estimator, prior=None, x_o=torch.ones((1,))
+ )
+
+ return score_fn
diff --git a/tests/test_utils.py b/tests/test_utils.py
index a1cea1e07..8f750f741 100644
--- a/tests/test_utils.py
+++ b/tests/test_utils.py
@@ -146,7 +146,6 @@ def check_c2st(x: Tensor, y: Tensor, alg: str, tol: float = 0.1) -> None:
chance."""
score = c2st(x, y).item()
-
print(f"c2st for {alg} is {score:.2f}.")
assert (
diff --git a/tutorials/16_implemented_methods.ipynb b/tutorials/16_implemented_methods.ipynb
index f26bf7ad3..8f7dda6f5 100644
--- a/tutorials/16_implemented_methods.ipynb
+++ b/tutorials/16_implemented_methods.ipynb
@@ -72,7 +72,7 @@
"name": "stdout",
"output_type": "stream",
"text": [
- " Neural network successfully converged after 60 epochs."
+ " Neural network successfully converged after 83 epochs."
]
},
{
@@ -93,7 +93,7 @@
"name": "stdout",
"output_type": "stream",
"text": [
- " Neural network successfully converged after 85 epochs."
+ " Neural network successfully converged after 27 epochs."
]
}
],
@@ -128,7 +128,7 @@
"name": "stdout",
"output_type": "stream",
"text": [
- " Neural network successfully converged after 116 epochs."
+ " Neural network successfully converged after 304 epochs."
]
},
{
@@ -150,7 +150,7 @@
"output_type": "stream",
"text": [
"Using SNPE-C with atomic loss\n",
- " Neural network successfully converged after 56 epochs."
+ " Neural network successfully converged after 40 epochs."
]
}
],
@@ -193,7 +193,7 @@
"name": "stdout",
"output_type": "stream",
"text": [
- " Neural network successfully converged after 142 epochs."
+ " Neural network successfully converged after 129 epochs."
]
},
{
@@ -241,7 +241,7 @@
"name": "stdout",
"output_type": "stream",
"text": [
- " Neural network successfully converged after 258 epochs."
+ " Neural network successfully converged after 180 epochs."
]
},
{
@@ -262,10 +262,10 @@
"name": "stdout",
"output_type": "stream",
"text": [
- "The `RestrictedPrior` rejected 40.8%\n",
+ "The `RestrictedPrior` rejected 44.8%\n",
" of prior samples. You will get a speed-up of\n",
- " 69.0%.\n",
- " Neural network successfully converged after 43 epochs."
+ " 81.2%.\n",
+ " Neural network successfully converged after 34 epochs."
]
},
{
@@ -299,6 +299,78 @@
" proposal = RestrictedPrior(prior, accept_reject_fn, sample_with=\"rejection\")"
]
},
+ {
+ "cell_type": "markdown",
+ "id": "d4379824-e775-46ad-946b-07cfc3ff4c43",
+ "metadata": {},
+ "source": [
+ "**Flow Matching for Scalable Simulation-Based Inference** by Dax, Wildberger, Buchholz, Green, Macke,\n",
+ "Schölkopf (NeurIPS 2023) [[Paper]](https://arxiv.org/abs/2305.17161)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 6,
+ "id": "2922328f-2d31-48c8-8ba4-0e0a40e5b308",
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ " Neural network successfully converged after 93 epochs."
+ ]
+ }
+ ],
+ "source": [
+ "from sbi.inference import FMPE\n",
+ "\n",
+ "inference = FMPE(prior)\n",
+ "# FMPE does support multiple rounds of inference\n",
+ "theta = prior.sample((num_sims,))\n",
+ "x = simulator(theta)\n",
+ "inference.append_simulations(theta, x).train()\n",
+ "posterior = inference.build_posterior().set_default_x(x_o)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "4ad583ea-e140-4cf5-89eb-eb77292c77c3",
+ "metadata": {},
+ "source": [
+ "**Neural posterior score estimation** \n",
+ "based on:\n",
+ "- Geffner, T., Papamakarios, G., & Mnih, A. Compositional score modeling for simulation-based inference. ICML 2023.\n",
+ "- Sharrock, L., Simons, J., Liu, S., & Beaumont, M.. Sequential neural score estimation: Likelihood-free inference with conditional score based diffusion models. arXiv preprint arXiv:2210.04872. ICML 2024.\n",
+ " \n",
+ "Note that currently only the single-round variant is implemented."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 7,
+ "id": "d1e49c3f-a16d-4e79-ad0b-2fb4cc9ce527",
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ " Neural network successfully converged after 659 epochs."
+ ]
+ }
+ ],
+ "source": [
+ "from sbi.inference import NPSE\n",
+ "\n",
+ "theta = prior.sample((num_sims,))\n",
+ "x = simulator(theta)\n",
+ "\n",
+ "inference = NPSE(prior, sde_type=\"ve\")\n",
+ "_ = inference.append_simulations(theta, x).train()\n",
+ "posterior = inference.build_posterior().set_default_x(x_o)"
+ ]
+ },
{
"cell_type": "markdown",
"id": "d13f84e2-d35a-4f54-8cbf-0e4be1a38fb3",
@@ -317,7 +389,7 @@
},
{
"cell_type": "code",
- "execution_count": 6,
+ "execution_count": 8,
"id": "d4430dbe-ac60-4978-9695-d0a5b317ee57",
"metadata": {},
"outputs": [
@@ -325,7 +397,7 @@
"name": "stdout",
"output_type": "stream",
"text": [
- " Neural network successfully converged after 146 epochs."
+ " Neural network successfully converged after 68 epochs."
]
},
{
@@ -346,7 +418,7 @@
"name": "stdout",
"output_type": "stream",
"text": [
- " Neural network successfully converged after 55 epochs."
+ " Neural network successfully converged after 24 epochs."
]
}
],
@@ -375,7 +447,7 @@
},
{
"cell_type": "code",
- "execution_count": 7,
+ "execution_count": 9,
"id": "d284d6c5-e6f6-4b1d-9c15-d6fa1736a10e",
"metadata": {},
"outputs": [
@@ -383,7 +455,7 @@
"name": "stdout",
"output_type": "stream",
"text": [
- " Neural network successfully converged after 55 epochs."
+ " Neural network successfully converged after 167 epochs."
]
},
{
@@ -405,9 +477,9 @@
"output_type": "stream",
"text": [
"\n",
- "Converged with loss: 0.04\n",
- "Quality Score: 0.192 \t Good: Smaller than 0.5 Bad: Larger than 1.0 \t NOTE: Less sensitive to mode collapse.\n",
- " Neural network successfully converged after 86 epochs."
+ "Converged with loss: 0.03\n",
+ "Quality Score: 0.108 \t Good: Smaller than 0.5 Bad: Larger than 1.0 \t NOTE: Less sensitive to mode collapse.\n",
+ " Neural network successfully converged after 25 epochs."
]
},
{
@@ -429,8 +501,8 @@
"output_type": "stream",
"text": [
"\n",
- "Converged with loss: 0.0\n",
- "Quality Score: -0.034 \t Good: Smaller than 0.5 Bad: Larger than 1.0 \t NOTE: Less sensitive to mode collapse.\n"
+ "Converged with loss: 0.01\n",
+ "Quality Score: 0.077 \t Good: Smaller than 0.5 Bad: Larger than 1.0 \t NOTE: Less sensitive to mode collapse.\n"
]
}
],
@@ -490,7 +562,7 @@
},
{
"cell_type": "code",
- "execution_count": 9,
+ "execution_count": 11,
"id": "b58c3609-7bd7-40ce-a154-f72a190da2ef",
"metadata": {},
"outputs": [
@@ -498,7 +570,7 @@
"name": "stdout",
"output_type": "stream",
"text": [
- " Neural network successfully converged after 52 epochs."
+ " Neural network successfully converged after 76 epochs."
]
}
],
@@ -522,7 +594,7 @@
},
{
"cell_type": "code",
- "execution_count": 10,
+ "execution_count": 12,
"id": "e36ab4e7-713f-4ff2-b467-8b481a149861",
"metadata": {},
"outputs": [
@@ -530,7 +602,7 @@
"name": "stdout",
"output_type": "stream",
"text": [
- " Neural network successfully converged after 70 epochs."
+ " Neural network successfully converged after 68 epochs."
]
},
{
@@ -551,7 +623,7 @@
"name": "stdout",
"output_type": "stream",
"text": [
- " Neural network successfully converged after 39 epochs."
+ " Neural network successfully converged after 46 epochs."
]
}
],
@@ -580,7 +652,7 @@
},
{
"cell_type": "code",
- "execution_count": 11,
+ "execution_count": 13,
"id": "85e6cf8c",
"metadata": {},
"outputs": [
@@ -588,7 +660,7 @@
"name": "stdout",
"output_type": "stream",
"text": [
- " Neural network successfully converged after 181 epochs."
+ " Neural network successfully converged after 228 epochs."
]
}
],
@@ -612,7 +684,7 @@
},
{
"cell_type": "code",
- "execution_count": 12,
+ "execution_count": 14,
"id": "1ec55e76-dd86-46d1-a7cc-643324488820",
"metadata": {},
"outputs": [
@@ -620,7 +692,7 @@
"name": "stdout",
"output_type": "stream",
"text": [
- " Neural network successfully converged after 92 epochs."
+ " Neural network successfully converged after 83 epochs."
]
}
],
@@ -641,20 +713,9 @@
"posterior = inference.build_posterior().set_default_x(x_o)"
]
},
- {
- "cell_type": "markdown",
- "id": "75296db0",
- "metadata": {},
- "source": [
- "## Flow Matching Posterior Estimation\n",
- "\n",
- "**Flow Matching for Scalable Simulation-Based Inference** by Dax, Wildberger, Buchholz, Green, Macke,\n",
- "Schölkopf (NeurIPS 2023) [[Paper]](https://arxiv.org/abs/2305.17161)"
- ]
- },
{
"cell_type": "code",
- "execution_count": 13,
+ "execution_count": 15,
"id": "a5fc6047",
"metadata": {},
"outputs": [
@@ -662,7 +723,7 @@
"name": "stdout",
"output_type": "stream",
"text": [
- " Neural network successfully converged after 66 epochs."
+ " Neural network successfully converged after 38 epochs."
]
}
],
@@ -695,7 +756,7 @@
},
{
"cell_type": "code",
- "execution_count": 22,
+ "execution_count": 16,
"id": "7066ef9b-0e3d-44d3-a80e-5e06de7845ce",
"metadata": {},
"outputs": [
@@ -703,13 +764,13 @@
"name": "stdout",
"output_type": "stream",
"text": [
- " Neural network successfully converged after 164 epochs."
+ " Neural network successfully converged after 148 epochs."
]
},
{
"data": {
"application/vnd.jupyter.widget-view+json": {
- "model_id": "3ad7dfa6a28347d6945466458090c49b",
+ "model_id": "",
"version_major": 2,
"version_minor": 0
},
@@ -723,7 +784,7 @@
{
"data": {
"application/vnd.jupyter.widget-view+json": {
- "model_id": "fc4353a74b0f498d886df56e7db4ebe2",
+ "model_id": "",
"version_major": 2,
"version_minor": 0
},
@@ -736,7 +797,7 @@
},
{
"data": {
- "image/png": "iVBORw0KGgoAAAANSUhEUgAAAoAAAAHACAYAAAAyfdnSAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjkuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8hTgPZAAAACXBIWXMAAA9hAAAPYQGoP6dpAAAvt0lEQVR4nO3dd3RUdf7/8ddMeoAk1BQSIEoMEUOIoICA4IoCooKyyrqsgBtEhHwRKZZFaYIUwVVcRFYXsK2CBSwUQRE5RGWFNYj0EsoR2FjoECbJfH5/+GNkJEACmUySz/NxTs5h7v3Mve+5N/Phlc9tDmOMEQAAAKzh9HcBAAAAKFsEQAAAAMsQAAEAACxDAAQAALAMARAAAMAyBEAAAADLEAABAAAsQwAEAACwTGBxGrndbu3bt0/VqlWTw+HwdU0ALGSM0dGjRxUXFyens/L9bUo/CsDXStKPFisA7tu3TwkJCaVSHACcz969exUfH+/vMkod/SiAslKcfrRYAbBatWqeBUZERFx6ZQDwO0eOHFFCQoKnv6ls6EcB+FpJ+tFiBcDThysiIiLouAD4VGU9PEo/CqCsFKcfrXwn2gAAAOC8CIAAAACWIQACAABYpljnABaHMUYFBQUqLCwsrUUC+P8CAgIUGBhYac+PAwCUrVIJgC6XS/v379eJEydKY3EAihAeHq7Y2FgFBwf7uxQAQAV3yQHQ7XYrJydHAQEBiouLU3BwMKMUQCkyxsjlcunHH39UTk6OkpKSKuWNkgEAZeeSA6DL5ZLb7VZCQoLCw8NLoyYAvxMWFqagoCDt3r1bLpdLoaGh/i4JAFCBldowAiMSgG/xHQMAlBb+RwEAALAMARAAAMAyBMAi9OnTR926dfN3GQAAAD5RavcBLMrKrl2L3fb6Dz7wYSXl2+jRo7VgwQJlZ2eft92GDRs0cuRIrV27Vrt379bf//53DR48uExqBAAAlUelGQF0uVz+LsHnTpw4ocsuu0wTJ05UTEyMv8sBAAAVVIUNgO3bt1dmZqYGDx6sWrVqqWPHjpKkZ599VqmpqapSpYoSEhI0YMAAHTt2zPO+OXPmKCoqSp988olSUlJUtWpVderUSfv37z/nur755hvVrl1bkyZNKnK+y+VSZmamYmNjFRoaqvr162vChAme+YcOHVLfvn1Vu3ZtRURE6A9/+IPWrVvnqWfMmDFat26dHA6HHA6H5syZU+R6rrnmGj3zzDP605/+pJCQkJJuMgAAAEk+PgTsa6+++qoefPBBZWVleaY5nU5NmzZNiYmJ2rlzpwYMGKBHHnlEL774oqfNiRMnNGXKFL3++utyOp36y1/+omHDhunNN988ax3Lly/XnXfeqcmTJ6tfv35F1jFt2jR9+OGHmjdvnurVq6e9e/dq7969nvl33XWXwsLCtHjxYkVGRmrmzJm68cYbtXXrVvXo0UPff/+9lixZok8//VSSFBkZWVqbCIBFTt+XFUD55nQ6/f5UpwodAJOSkjR58mSvaWeeE9egQQONGzdO/fv39wqA+fn5eumll3T55ZdLkjIzMzV27Nizlj9//nz16tVLr7zyinr06HHOOvbs2aOkpCS1adNGDodD9evX98xbtWqV/vOf/yg3N9czajdlyhQtWLBA7777rvr166eqVasqMDCQw7q4ZCU571ay+9zbysblcmnz5s06deqUv0sBcAEhISFq1KiRX0NghQ6AzZo1O2vap59+qgkTJmjz5s06cuSICgoKlJeXpxMnTnieVBIeHu4Jf5IUGxur3Nxcr+WsXr1aH3/8sd59990LXhHcp08f3XTTTUpOTlanTp1066236uabb5YkrVu3TseOHVPNmjW93nPy5Ent2LHjYj42AJzF7Xbr1KlTCgwMVGBghe7agUqtoKBAp06d8vtofYXuJapUqeL1eteuXbr11lv14IMPavz48apRo4ZWrVqljIwMuVwuTwAMCgryep/D4ZAxxmva5Zdfrpo1a2rWrFnq0qXLWe8509VXX62cnBwtXrxYn376qe6++2516NBB7777ro4dO6bY2FitWLHirPdFRUVd3AcHgHMIDAz0+6ElAOdXUFDg7xIqdgD8vbVr18rtdmvq1Kmex2bNmzfvopZVq1Ytvf/++2rfvr3uvvtuzZs377whMCIiQj169FCPHj30xz/+UZ06ddIvv/yiq6++WgcOHFBgYKAaNGhQ5HuDg4NVWFh4UXUCAACUVIW9CrgoDRs2VH5+vl544QXt3LlTr7/+ul566aWLXl6dOnW0fPlybd68Wffcc885E/uzzz6rt956S5s3b9bWrVv1zjvvKCYmRlFRUerQoYNatWqlbt26aenSpdq1a5e+/PJLjRgxQmvWrJH067mKOTk5ys7O1k8//XTOc3hcLpeys7OVnZ0tl8ulH374QdnZ2dq+fftFf0YAAGAfn44AlvUJ5mlpaXr22Wc1adIkPf7447r++us1YcIE9erV66KXGRMTo+XLl6t9+/bq2bOn/v3vfysgIMCrTbVq1TR58mRt27ZNAQEBuuaaa7Ro0SLPKOSiRYs0YsQI3Xffffrxxx8VExOj66+/XtHR0ZKk7t276/3339cNN9ygQ4cOafbs2erTp89Ztezbt0/p6eme11OmTNGUKVPUrl27Ig8xAwAAFMVhfn/yWxGOHDmiyMhIHT58WBEREV7z8vLylJOTo8TERIWGhvqsUMB2xfmuVeSrgM/Xz1QGvv58eXl5Wr9+vUJDQzkHECjHXC6X8vLylJqaWuq5qST9TKU6BAwAAIALIwACAABYhgAIAABgGQIgAACAZQiAAAAAliEAAgAAWIYACAAAYBkCIAAAgGV8GgDz8/OVl5dXrJ/8/HxfllLhrVixQg6HQ4cOHSrzdY8ePVpNmzYt8/UWV1H1jR49WtHR0XI4HFqwYEGZ1rNr1y45HA5lZ2eX6XoBACgunz0KLj8/X1u2bNHJkyeL1T4sLEzJyckKCgryVUllbsWKFbrhhht08OBBRUVF+bucSmvYsGH6v//7P8/rTZs2acyYMZo/f75atmyp6tWrl2k9CQkJ2r9/v2rVqiWJ3wMAQPnjswBYWFiokydPKjAwUIGB519NQUGBTp48qcLCwkoVAPGb/Px8n+3bqlWrqmrVqp7XO3bskCR17dpVDofjopd7sTUHBAQoJibmotcLAICv+fwcwMDAQAUHB5/350IBsShut1sTJkxQYmKiwsLClJaWpnfffVeSZIxRhw4d1LFjR51+1PEvv/yi+Ph4jRw5UtJvh1QXLlyoJk2aKDQ0VC1bttT333/vtZ5Vq1apbdu2CgsLU0JCggYNGqTjx4975p86dUqPPvqoEhISFBISooYNG+pf//qXdu3apRtuuEGSVL16dTkcDvXp0+eCtZ+2aNEiXXHFFQoLC9MNN9ygXbt2nXd7FHXY8dChQ3I4HFqxYoXXZ/7ss8/UvHlzhYeH67rrrtOWLVu8ljVx4kRFR0erWrVqysjIUF5e3lnre+WVV5SSkqLQ0FA1atRIL7744lm1zJ07V+3atVNoaKjefPNNn9V85iHg0aNH67bbbpMkOZ1OTwB0u90aO3as4uPjFRISoqZNm2rJkiUXrLlPnz7q1q2bnn76aUVHRysqKkpjx45VQUGBhg8frho1aig+Pl6zZ88u8nOd6/fgtddeU82aNXXq1CmvbdKtWzfde++9Z20rAABKk89GAH1twoQJeuONN/TSSy8pKSlJK1eu1F/+8hfVrl1b7dq106uvvqrU1FRNmzZNDz30kPr376+6det6AuBpw4cP1/PPP6+YmBj97W9/02233aatW7cqKChIO3bsUKdOnTRu3DjNmjVLP/74ozIzM5WZmen5D79Xr1766quvNG3aNKWlpSknJ0c//fSTEhIS9N5776l79+7asmWLIiIiFBYWVqza9+7dqzvvvFMDBw5Uv379tGbNGg0dOrTUtt2IESM0depU1a5dW/3799df//pXZWVlSZLmzZun0aNHa/r06WrTpo1ef/11TZs2TZdddpnn/W+++aZGjhypf/zjH0pPT9e3336r+++/X1WqVFHv3r097R577DFNnTpV6enpl/zA6/PVfKZhw4apQYMGuu+++7R//37P9Oeff15Tp07VzJkzlZ6erlmzZun222/Xhg0blJSUdM6aV6xYoeXLlys+Pl4rV65UVlaWMjIy9OWXX+r666/X6tWrNXfuXD3wwAO66aabFB8f71XPuX4PgoODNWjQIH344Ye66667JEm5ublauHChli5deknbqiRWdu1aovbXf/CBjyoBAJSlChkAT506paefflqffvqpWrVqJUm67LLLtGrVKs2cOVPt2rVT3bp1NXPmTPXq1UsHDhzQokWL9O2335412jhq1CjddNNNkqRXX31V8fHxmj9/vu6++25NmDBBPXv21ODBgyVJSUlJmjZtmtq1a6cZM2Zoz549mjdvnpYtW6YOHTp46jitRo0akqQ6dep4zv0qTu0zZszQ5ZdfrqlTp0qSkpOTtX79ek2aNKlUtt/48ePVrl07Sb8Gni5duigvL0+hoaF67rnnlJGRoYyMDEnSuHHj9Omnn3qNAo4aNUpTp07VnXfeKUlKTEzUxo0bNXPmTK8AOHjwYE8bX9Z8pqpVq3q29ZmHYadMmaJHH31Uf/rTnyRJkyZN0ueff67nnntO06dPP2/NNWrU0LRp0+R0OpWcnKzJkyfrxIkT+tvf/iZJevzxxzVx4kStWrXKs/zTAgICivw9kKQ///nPmj17ticAvvHGG6pXr57at29/kVsJAIDiqZABcPv27Tpx4oQnuJ3mcrmUnp7ueX3XXXdp/vz5mjhxombMmOE10nPa6RAm/foffXJysjZt2iRJWrdunb777juvw5fGGLndbuXk5Gj9+vUKCAjwBJPSqn3Tpk1q0aLFOeu8VE2aNPH8OzY2VtKvo0/16tXTpk2b1L9//7PW/fnnn0uSjh8/rh07digjI0P333+/p01BQYEiIyO93te8efMyqflCjhw5on379ql169Ze01u3bq1169Z5TSuq5saNG8vp/O1siejoaF111VWe1wEBAapZs6Zyc3OL92H+v/vvv1/XXHONfvjhB9WtW1dz5sxRnz59Lum8RQAAiqNCBsBjx45JkhYuXKi6det6zQsJCfH8+8SJE1q7dq0CAgK0bdu2i1rPAw88oEGDBp01r169etq+fftFLVO6cO0ldTqgnD7nUdI5b61z5oUNZ54jVxyn63/55ZfPCqkBAQFer6tUqVIuai6Jomr+/YUgDoejyGklrSc9PV1paWl67bXXdPPNN2vDhg1auHBhyYsGyqEN48eXqH3jESN8VAmAolTIAHjllVcqJCREe/bsOe/o29ChQ+V0OrV48WLdcsst6tKli/7whz94tfn66689o0gHDx7U1q1blZKSIkm6+uqrtXHjRjVs2LDI5aempsrtduuLL77wHAI+U3BwsKRfr4guSe0pKSn68MMPz6rzfGrXri1J2r9/v2ck8WLuQ5eSkqLVq1erV69eRa47OjpacXFx2rlzp3r27Fni5fui5guJiIhQXFycsrKyvLZ5VlaWrr322lJf3+8V9XtwWt++ffXcc8/phx9+UIcOHZSQkODzegAAqJABsFq1aho2bJgefvhhud1utWnTRocPH1ZWVpYiIiLUu3dvLVy4ULNmzdJXX32lq6++WsOHD1fv3r313Xffed0XbuzYsapZs6aio6M1YsQI1apVS926dZMkPfroo2rZsqUyMzPVt29fValSRRs3btSyZcv0j3/8Qw0aNFDv3r3117/+1XMRyO7du5Wbm6u7775b9evXl8Ph0Mcff6xbbrlFYWFhxaq9f//+mjp1qoYPH66+fftq7dq1mjNnznm3SVhYmFq2bKmJEycqMTFRubm5euKJJ0q8bR966CH16dNHzZs3V+vWrfXmm29qw4YNXuc2jhkzRoMGDVJkZKQ6deqkU6dOac2aNTp48KCGDBlS7HWVVs3FMXz4cI0aNUqXX365mjZtqtmzZys7O7vIq5NLW1G/B6dvW/PnP/9Zw4YN08svv6zXXnvN57UAACCVwW1gCgoK5HK5zvtTUFBQ4uU+9dRTevLJJzVhwgSlpKSoU6dOWrhwoRITE/Xjjz8qIyNDo0eP1tVXXy3p19ASHR191vltEydO1EMPPaRmzZrpwIED+uijjzwjNk2aNNEXX3yhrVu3qm3btkpPT9fIkSMVFxfnef+MGTP0xz/+UQMGDFCjRo10//33e24TU7duXY0ZM0aPPfaYoqOjlZmZecHapV8PL7/33ntasGCB0tLS9NJLL+npp5++4DaZNWuWCgoK1KxZMw0ePFjjxo0r8Xbt0aOHnnzyST3yyCNq1qyZdu/erQcffNCrTd++ffXKK69o9uzZSk1NVbt27TRnzhxP/SVRGjUXx6BBgzRkyBANHTpUqampWrJkiT788MMizwstbef6PZCkyMhIde/eXVWrVvX84QEAgK85zJknYJ3DkSNHFBkZqcOHDysiIsJrXl5ennJycpSYmOh1RWZ5fxIIT2dAeXHjjTeqcePGmjZt2nnbneu7dqaS3talpHx5G5jz9TOVga8/X15entavX6/Q0FDPH7H+xDmAQNFcLpfy8vKUmpp6ybdI+72S9DM+OwQcFBSk5OTkIs97KkpAQABPAYFVDh48qBUrVmjFihVeN9IGAMDXfHoOYFBQEKEOOIf09HQdPHhQkyZNUnJysr/LAQBYpEJeBFIa2rdvr2Ic/QZ85kKP9wMAwFd8fhEIAAAAyhcCIAAAgGVKLQByOBXwLb5jAIDScskB8PRFHidOnLjkYgCc2+nvGBdWAQAu1SVfBBIQEKCoqCjl5uZKksLDw3mYPVCKjDE6ceKEcnNzFRUVddYzlwEAKKlSuQo4JiZGkjwhEEDpi4qK8nzXAAC4FKUSAB0Oh2JjY1WnTh3l5+eXxiIBnCEoKIiRPwBAqSnV+wAGBATwnxQAAEA5x21gAAAALEMABAAAsAwBEAAAwDIEQAAAAMsQAAEAACxDAAQAALAMARAAAMAypXofQABAxbBh/PgStW88YoSPKgEqvpJ8n5KGD/dhJcXHCCAAAIBlCIAAAACWKReHgFd27Vqi9td/8IGPKgEAAKj8GAEEAACwTLkYAQQAACgvSnqRVEVULgJgodMpU4L2eXl5PqsFqMgKnL4d1He5XAoODvbpOgAAvuf3AOhyufRjZKQKS/Af1/r1631YEVBx/a96dZ8uf/PmzWrUqBEhEAAqOL8HQLfbrUKnU05j5DTFGwcMDQ31cVVAxRTodvts2W6HQ6dOnZLbh+sAAJQNvwfA05zGKKCY/7Ew+gAUrbjfoYvi48PLAICyQ48OAABgGQIgAACAZQiAAAAAliEAAgAAWIYACAAAYBkCIAAAgGXKzW1gAACXZvuMGQp0u317OyAAlQIjgAAAAJYhAAIAAFiGAAgAAGAZAiAAAIBlCIAAAACWIQACAABYhgAIAABgGQIgAACAZQiAAAAAliEAAgAAWIZHwQEAcIYN48eXqH3jESN8VAngO4wAAgAAWIYACAAAYBkCIAAAgGUIgAAAAJYhAAIAAFiGq4ABALAQVzvbjRFAAAAAyxAAAQAALEMABAAAsAwBEAAAwDJcBAIA8LuSXJDAxQgVAxeZlG+MAAIAAFiGAAgAAGAZAiAAAIBlCIAAAACWIQACAABYhquAAQAXVNIrOuEftuwnrjC+dIwAAgAAWIYRQFRI/PUHAMDFYwQQAADAMgRAAAAAyxAAAQAALMM5gAAAoFKz5erokmAEEAAAwDIEQAAAAMsQAAEAACxDAAQAALAMF4EAZYybWAPA2bhQo2wxAggAAGAZAiAAAIBlCIAAAACWIQACAABYpkJeBMJJ9LAJJ0YDAEpbhQyAAAB7MQgAXDoOAQMAAFiGAAgAAGAZAiAAAIBlCIAAAACWIQACAABYhgAIAABgGQIgAACAZbgPIHCJuFEzAKCiYQQQAADAMgRAAAAAyxAAAQAALEMABAAAsAwXgViMB6oDAGAnRgABAAAsQwAEAACwDIeAAQCVGvfqBM7GCCAAAIBlCIAAAACWIQACAABYhgAIAABgGQIgAACAZQiAAAAAliEAAgAAWIYACAAAYBkCIAAAgGUIgAAAAJbhUXAAAJRT5ekxduWpFlw6RgABAAAsQwAEAACwDAEQAADAMgRAAAAAyxAAAQAALMNVwAAAlBGupEV5wQggAACAZQiAAAAAliEAAgAAWIYACAAAYBkCIAAAgGW4ChgAgEvAlb2oiBgBBAAAsAwBEAAAwDIcAoYVSnqIpvGIET6qBAAA/2MEEAAAwDIEQAAAAMsQAAEAACxjxTmAvjz/i3PLKidu6wAAqMwYAQQAALAMARAAAMAyBEAAAADLEAABAAAsY8VFIBUZF5kAAIDSxgggAACAZQiAAAAAliEAAgAAWIYACAAAYBkCIAAAgGUIgAAAAJYhAAIAAFiGAAgAAGAZAiAAAIBlCIAAAACWIQACAABYhgAIAABgGQIgAACAZQiAAAAAliEAAgAAWIYACAAAYBkCIAAAgGUIgAAAAJYJ9HcB5dGG8eP9XcJF82XtJV124xEjfLZsAABw8RgBBAAAsAwBEAAAwDIEQAAAAMsQAAEAACxDAAQAALAMARAAAMAyBEAAAADLEAABAAAsQwAEAACwDAEQAADAMgRAAAAAyxAAAQAALEMABAAAsAwBEAAAwDIEQAAAAMsQAAEAACxDAAQAALBMoL8LAACUHrfDITn52x4orwoKCvxdgiQCIABUCk6nUwFutwqdzl9DIIByqaCgQCEhIXL6+Q81AiAAVALBwcGqffiwjL8LAXBeqampcjqdCg4O9msdBEAAqCQC3G5/lwDgAkJDQ/1dgiQuAgEAALAOARAAAMAyHAIuYxvGj/d3CQAAwHKMAAIAAFiGAAgAAGAZAiAAAIBlCIAAAACW4SIQ+AwXvAAAUD4xAggAAGAZAiAAAIBlCIAAAACWIQACAABYhgAIAABgGQIgAACAZQiAAAAAliEAAgAAWIYACAAAYBkCIAAAgGUIgAAAAJYhAAIAAFgm0N8FnOZ2OCQneRQor9wOh79LAACUEr8HQKfTqQC3W4VOJ//BAOVcSEiInPyhBgAVnt8DYHBwsGofPizj70IAXFCjRo0UHBzs7zIAAJfI7wFQkgLcbn+XAKAYCH8AUDlwLAcAAMAyBEAAAADLEAABAAAsQwAEAACwDAEQAADAMgRAAAAAyxAAAQAALEMABAAAsAwBEAAAwDIEQAAAAMsQAAEAACxDAAQAALAMARAAAMAyBEAAAADLEAABAAAsQwAEAACwDAEQAADAMgRAAAAAyxAAAQAALEMABAAAsAwBEAAAwDIEQAAAAMsQAAEAACxDAAQAALAMARAAAMAyBEAAAADLEAABAAAsQwAEAACwDAEQAADAMgRAAAAAyxAAAQAALEMABAAAsAwBEAAAwDIEQAAAAMsQAAEAACxDAAQAALAMARAAAMAyBEAAAADLEAABAAAsQwAEAACwDAEQAADAMgRAAAAAyxAAAQAALEMABAAAsAwBEAAAwDIEQAAAAMsQAAEAACxDAAQAALAMARAAAMAyBEAAAADLEAABAAAsQwAEAACwDAEQAADAMgRAAAAAyxAAAQAALEMABAAAsAwBEAAAwDIEQAAAAMsQAAEAACxDAAQAALAMARAAAMAyBEAAAADLEAABAAAsQwAEAACwDAEQAADAMgRAAAAAyxAAAQAALEMABAAAsAwBEAAAwDIEQAAAAMsQAAEAACxDAAQAALAMARAAAMAyBEAAAADLEAABAAAsQwAEAACwDAEQAADAMgRAAAAAyxAAAQAALEMABAAAsAwBEAAAwDIEQAAAAMsQAAEAACxDAAQAALAMARAAAMAyBEAAAADLEAABAAAsQwAEAACwDAEQAADAMgRAAAAAyxAAAQAALEMABAAAsAwBEAAAwDIEQAAAAMsQAAEAACxDAAQAALAMARAAAMAyBEAAAADLEAABAAAsQwAEAACwDAEQAADAMgRAAAAAyxAAAQAALEMABAAAsAwBEAAAwDIEQAAAAMsQAAEAACxDAAQAALAMARAAAMAyBEAAAADLEAABAAAsQwAEAACwDAEQAADAMgRAAAAAyxAAAQAALEMABAAAsAwBEAAAwDIEQAAAAMsQAAEAACxDAAQAALAMARAAAMAyBEAAAADLEAABAAAsQwAEAACwDAEQAADAMgRAAAAAyxAAAQAALEMABAAAsAwBEAAAwDIEQAAAAMsQAAEAACxDAAQAALAMARAAAMAyBEAAAADLEAABAAAsQwAEAACwDAEQAADAMgRAAAAAyxAAAQAALEMABAAAsAwBEAAAwDIEQAAAAMsQAAEAACxDAAQAALBMYHEaGWMkSUeOHPFJEcfz832yXACly1d9wJnLPt3fVDa+7kcl+lKgIigv/WixAuDRo0clSQkJCZdQFoAKLzLS56s4evSoIstgPWWNfhSApHLTjzpMMWKi2+3Wvn37VK1aNTkcjlIrUPo1rSYkJGjv3r2KiIgo1WXDf9ivlY+v96kxRkePHlVcXJyczsp3doov+1GJ71xlxD6tnHy5X0vSjxZrBNDpdCo+Pr5UijuXiIgIfsErIfZr5ePLfVoZR/5OK4t+VOI7VxmxTysnX+3X4vajle/PbAAAAJwXARAAAMAyfg+AISEhGjVqlEJCQvxdCkoR+7XyYZ+Wb+yfyod9WjmVl/1arItAAAAAUHn4fQQQAAAAZYsACAAAYBkCIAAAgGUIgAAAAJbxewCcPn26GjRooNDQULVo0UL/+c9//F0SzmH06NFyOBxeP40aNfLMz8vL08CBA1WzZk1VrVpV3bt31//+9z+vZezZs0ddunRReHi46tSpo+HDh6ugoKCsP4q1Vq5cqdtuu01xcXFyOBxasGCB13xjjEaOHKnY2FiFhYWpQ4cO2rZtm1ebX375RT179lRERISioqKUkZGhY8eOebX57rvv1LZtW4WGhiohIUGTJ0/29UezGv1oxUE/WjlUhr7UrwFw7ty5GjJkiEaNGqX//ve/SktLU8eOHZWbm+vPsnAejRs31v79+z0/q1at8sx7+OGH9dFHH+mdd97RF198oX379unOO+/0zC8sLFSXLl3kcrn05Zdf6tVXX9WcOXM0cuRIf3wUKx0/flxpaWmaPn16kfMnT56sadOm6aWXXtLq1atVpUoVdezYUXl5eZ42PXv21IYNG7Rs2TJ9/PHHWrlypfr16+eZf+TIEd18882qX7++1q5dq2eeeUajR4/WP//5T59/PhvRj1Y89KMVX6XoS40fXXvttWbgwIGe14WFhSYuLs5MmDDBj1XhXEaNGmXS0tKKnHfo0CETFBRk3nnnHc+0TZs2GUnmq6++MsYYs2jRIuN0Os2BAwc8bWbMmGEiIiLMqVOnfFo7zibJzJ8/3/Pa7XabmJgY88wzz3imHTp0yISEhJi33nrLGGPMxo0bjSTzzTffeNosXrzYOBwO88MPPxhjjHnxxRdN9erVvfbpo48+apKTk338iexEP1qx0I9WPhW1L/XbCKDL5dLatWvVoUMHzzSn06kOHTroq6++8ldZuIBt27YpLi5Ol112mXr27Kk9e/ZIktauXav8/Hyv/dmoUSPVq1fPsz+/+uorpaamKjo62tOmY8eOOnLkiDZs2FC2HwRnycnJ0YEDB7z2YWRkpFq0aOG1D6OiotS8eXNPmw4dOsjpdGr16tWeNtdff72Cg4M9bTp27KgtW7bo4MGDZfRp7EA/WjHRj1ZuFaUv9VsA/Omnn1RYWOj1SyxJ0dHROnDggJ+qwvm0aNFCc+bM0ZIlSzRjxgzl5OSobdu2Onr0qA4cOKDg4GBFRUV5vefM/XngwIEi9/fpefCv0/vgfN/JAwcOqE6dOl7zAwMDVaNGDfazH9CPVjz0o5VfRelLAy95CbBG586dPf9u0qSJWrRoofr162vevHkKCwvzY2UAUDHQj6K88NsIYK1atRQQEHDW1U3/+9//FBMT46eqUBJRUVG64oortH37dsXExMjlcunQoUNebc7cnzExMUXu79Pz4F+n98H5vpMxMTFnXVxQUFCgX375hf3sB/SjFR/9aOVTUfpSvwXA4OBgNWvWTJ999plnmtvt1meffaZWrVr5qyyUwLFjx7Rjxw7FxsaqWbNmCgoK8tqfW7Zs0Z49ezz7s1WrVlq/fr3XL/2yZcsUERGhK6+8sszrh7fExETFxMR47cMjR45o9erVXvvw0KFDWrt2rafN8uXL5Xa71aJFC0+blStXKj8/39Nm2bJlSk5OVvXq1cvo09iBfrTiox+tfCpMX1oql5JcpLffftuEhISYOXPmmI0bN5p+/fqZqKgor6ubUH4MHTrUrFixwuTk5JisrCzToUMHU6tWLZObm2uMMaZ///6mXr16Zvny5WbNmjWmVatWplWrVp73FxQUmKuuusrcfPPNJjs72yxZssTUrl3bPP744/76SNY5evSo+fbbb823335rJJlnn33WfPvtt2b37t3GGGMmTpxooqKizAcffGC+++4707VrV5OYmGhOnjzpWUanTp1Menq6Wb16tVm1apVJSkoy99xzj2f+oUOHTHR0tLn33nvN999/b95++20THh5uZs6cWeaf1wb0oxUL/WjlUBn6Ur8GQGOMeeGFF0y9evVMcHCwufbaa83XX3/t75JwDj169DCxsbEmODjY1K1b1/To0cNs377dM//kyZNmwIABpnr16iY8PNzccccdZv/+/V7L2LVrl+ncubMJCwsztWrVMkOHDjX5+fll/VGs9fnnnxtJZ/307t3bGPPr7QuefPJJEx0dbUJCQsyNN95otmzZ4rWMn3/+2dxzzz2matWqJiIiwtx3333m6NGjXm3WrVtn2rRpY0JCQkzdunXNxIkTy+ojWol+tOKgH60cKkNf6jDGmEsfRwQAAEBF4fdHwQEAAKBsEQABAAAsQwAEAACwDAEQAADAMgRAAAAAyxAAAQAALEMABAAAsAwB0EKjR49W06ZN/V1GiTVo0EDPPffcJS1jzpw5ioqK8ryuqNsCgH9V1L6DfhSnEQArgBUrVsjhcJz1gPCLNWzYMK9nFNrMV9ti5cqVuu222xQXFyeHw6EFCxaU+joAFB/9qO/4altMmDBB11xzjapVq6Y6deqoW7du2rJlS6mvx1YEQIsYY1RQUKCqVauqZs2al7SsMx9OXRrt/KU0tkVRjh8/rrS0NE2fPr3Ulw3Af+hHz+arfvSLL77QwIED9fXXX2vZsmXKz8/XzTffrOPHj5f6umxEACwF7du3V2ZmpjIzMxUZGalatWrpySef1JlP2Tt48KB69eql6tWrKzw8XJ07d9a2bds883fv3q3bbrtN1atXV5UqVdS4cWMtWrRIu3bt0g033CBJql69uhwOh/r06SNJcrvdmjBhghITExUWFqa0tDS9++67nmWe/ot38eLFatasmUJCQrRq1aqzhuvdbrfGjh2r+Ph4hYSEqGnTplqyZIln/q5du+RwODR37ly1a9dOoaGhevPNN4vcFg6HQzNmzNDtt9+uKlWqaPz48SosLFRGRoanzuTkZD3//PNe7+vTp4+6deumKVOmKDY2VjVr1tTAgQPP2/G98sorioqKOu9fnnPmzFG9evUUHh6uO+64Qz///LPX/N9vi9N1PP3004qOjlZUVJTGjh2rgoICDR8+XDVq1FB8fLxmz559znVKUufOnTVu3Djdcccd520H4Ff0o7+hH/3VkiVL1KdPHzVu3FhpaWmaM2eO9uzZo7Vr1573fSimUnuqsMXatWtnqlatah566CGzefNm88Ybb5jw8HDzz3/+09Pm9ttvNykpKWblypUmOzvbdOzY0TRs2NC4XC5jjDFdunQxN910k/nuu+/Mjh07zEcffWS++OILU1BQYN577z0jyWzZssXs37/fHDp0yBhjzLhx40yjRo3MkiVLzI4dO8zs2bNNSEiIWbFihTHmt4dVN2nSxCxdutRs377d/Pzzz2bUqFEmLS3NU9uzzz5rIiIizFtvvWU2b95sHnnkERMUFGS2bt1qjDEmJyfHSDINGjQw7733ntm5c6fZt29fkdtCkqlTp46ZNWuW2bFjh9m9e7dxuVxm5MiR5ptvvjE7d+70bJ+5c+d63te7d28TERFh+vfvbzZt2mQ++uijs7Zh/fr1zd///ndjjDGTJk0yNWvWNKtXrz7nfvn666+N0+k0kyZNMlu2bDHPP/+8iYqKMpGRkZ42v98WvXv3NtWqVTMDBw40mzdvNv/617+MJNOxY0czfvx4s3XrVvPUU0+ZoKAgs3fv3vP8Vnhvk/nz5xerLWAr+tHf0I8Wbdu2bUaSWb9+fbHfg3MjAJaCdu3amZSUFON2uz3THn30UZOSkmKMMWbr1q1GksnKyvLM/+mnn0xYWJiZN2+eMcaY1NRUM3r06CKXf7oDOnjwoGdaXl6eCQ8PN19++aVX24yMDHPPPfd4vW/BggVebX7/ZY2LizPjx4/3anPNNdeYAQMGGGN+67iee+65C24LSWbw4MEXbDdw4EDTvXt3z+vevXub+vXrm4KCAs+0u+66y/To0cPz+nTH9cgjj5jY2Fjz/fffn3cd99xzj7nlllu8pvXo0eOCHVf9+vVNYWGhZ1pycrJp27at53VBQYGpUqWKeeutty74OY0hAALFQT/6G/rRsxUWFpouXbqY1q1bF6s9LiywDAcbK7WWLVvK4XB4Xrdq1UpTp05VYWGhNm3apMDAQLVo0cIzv2bNmkpOTtamTZskSYMGDdKDDz6opUuXqkOHDurevbuaNGlyzvVt375dJ06c0E033eQ13eVyKT093Wta8+bNz7mcI0eOaN++fWrdurXX9NatW2vdunXFXs6F2k2fPl2zZs3Snj17dPLkSblcrrOuGmvcuLECAgI8r2NjY7V+/XqvNlOnTtXx48e1Zs0aXXbZZeetY9OmTWcdgm3VqpXXYZmiNG7cWE7nb2dHREdH66qrrvK8DggIUM2aNZWbm3ve5QAoGfrR87ezuR8dOHCgvv/+e61atapY7XFhnANYTvTt21c7d+7Uvffeq/Xr16t58+Z64YUXztn+2LFjkqSFCxcqOzvb87Nx40av81ckqUqVKqVSY3GX8/t2b7/9toYNG6aMjAwtXbpU2dnZuu++++RyubzaBQUFeb12OBxyu91e09q2bavCwkLNmzfvIj5B8RRVR3FqA+Bf9KOVsx/NzMzUxx9/rM8//1zx8fGlWqfNCIClZPXq1V6vv/76ayUlJSkgIEApKSkqKCjwavPzzz9ry5YtuvLKKz3TEhIS1L9/f73//vsaOnSoXn75ZUlScHCwJKmwsNDT9sorr1RISIj27Nmjhg0bev0kJCQUu+6IiAjFxcUpKyvLa3pWVpZXbZciKytL1113nQYMGKD09HQ1bNhQO3bsuKhlXXvttVq8eLGefvppTZky5bxtU1JSitwvAMon+tFzs7EfNcYoMzNT8+fP1/Lly5WYmFgm67UFh4BLyZ49ezRkyBA98MAD+u9//6sXXnhBU6dOlSQlJSWpa9euuv/++zVz5kxVq1ZNjz32mOrWrauuXbtKkgYPHqzOnTvriiuu0MGDB/X5558rJSVFklS/fn05HA59/PHHuuWWWxQWFqZq1app2LBhevjhh+V2u9WmTRsdPnxYWVlZioiIUO/evYtd+/DhwzVq1Chdfvnlatq0qWbPnq3s7OxzXqFWUklJSXrttdf0ySefKDExUa+//rq++eabi/4yX3fddVq0aJE6d+6swMBADR48uMh2gwYNUuvWrTVlyhR17dpVn3zyyQUPW5SWY8eOafv27Z7XOTk5ys7OVo0aNVSvXr0yqQGoaOhHz83GfnTgwIH697//rQ8++EDVqlXTgQMHJEmRkZEKCwsrkxoqM0YAS0mvXr108uRJXXvttRo4cKAeeugh9evXzzN/9uzZatasmW699Va1atVKxhgtWrTIMyReWFiogQMHKiUlRZ06ddIVV1yhF198UZJUt25djRkzRo899piio6OVmZkpSXrqqaf05JNPasKECZ73LVy4sMQdwqBBgzRkyBANHTpUqampWrJkiT788EMlJSWVyrZ54IEHdOedd6pHjx5q0aKFfv75Zw0YMOCSltmmTRstXLhQTzzxxDkP8bRs2VIvv/yynn/+eaWlpWnp0qV64oknLmm9xbVmzRqlp6d7ziMaMmSI0tPTNXLkyDJZP1AR0Y+em4396IwZM3T48GG1b99esbGxnp+5c+eWyforO4cxZ9xkCRelffv2atq06SU/XgcAbEU/CpQtRgABAAAsQwAEAACwDIeAAQAALMMIIAAAgGUIgAAAAJYhAAIAAFiGAAgAAGAZAiAAAIBlCIAAAACWIQACAABYhgAIAABgGQIgAACAZf4fknFmeJXkNz4AAAAASUVORK5CYII=\n",
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAAAoAAAAHACAYAAAAyfdnSAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjkuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8hTgPZAAAACXBIWXMAAA9hAAAPYQGoP6dpAAAwlElEQVR4nO3de5xN9f7H8ffeM2bGMGNc52LcYjIa14iELic1SRKSI6dGOYnGUS6JyiXlUqFQISnqiC6USOIUKZf5RUS5XzIKKcUgc93f3x9lZzOYMbNnz8z39Xw85vGw9/rutT57rT1f7/mu9d3LYYwxAgAAgDWcvi4AAAAABYsACAAAYBkCIAAAgGUIgAAAAJYhAAIAAFiGAAgAAGAZAiAAAIBlCIAAAACW8c9JI5fLpQMHDigkJEQOh8PbNQGwkDFGx48fV1RUlJzO4ve3Kf0oAG/LTT+aowB44MABValSJV+KA4AL2b9/v6Kjo31dRr6jHwVQUHLSj+YoAIaEhLhXGBoamvfKAOAsKSkpqlKliru/KW7oRwF4W2760RwFwNOnK0JDQ+m4AHhVcT09Sj8KoKDkpB8tfhfaAAAA4IIIgAAAAJYhAAIAAFiGAAgAAGAZAiAAAIBlCIAAAACWIQACAABYhgAIAABgGQIgAACAZQiAAAAAliEAAgAAWIYACAAAYBkCIAAAgGUIgAAAAJYhAAIAAFjG39cFFEYr27fPcdtrFyzwYiUAkHP0XQByihFAAAAAyxAAAQAALEMABAAAsAwBEAAAwDIEQAAAAMsQAAEAACxDAAQAALAMARAAAMAyBEAAAADLFIo7gaSnp8vlcvm6DLdMZ85zcWpqqhcrAQoXp9OpgIAAX5cBAMgjnwfA9PR0bdu2TWlpab4uxe3nsmVz3Hbz5s1erAQoXAIDAxUbG0sIBIAizucB0OVyKS0tTf7+/vL393k5kiT/XIxGBgUFebESoPDIzMxUWlpaoRqtBwBcmsKRuCT5+/sXmlEFv1z8B1dYagYKQmZmpq9LAADkAyaBAAAAWIYACAAAYBkCIAAAgGUIgAAAAJYhAAIAAFiGAAgAAGAZAiAAAIBlCIAAAACWKTRfBI3sfT9qVK7axz3xhJcqAQAAxQUjgAAAAJYhAAIAAFiGAAgAAGAZAiAAAIBlCsUkkF1Tpsjf5ZKfy5Wj9kx0AAAAuHSMAAIAAFiGAAgAAGAZAiAAAIBlCIAAAACWKRSTQAD4Tk7vNpPldKp6YqKXqwEAFARGAAEAACxDAAQAALAMARAAAMAyBEAAAADLEAABAAAsUyRnAed01uJp3DoOAADgb4wAAgAAWIYACAAAYBkCIAAAgGUIgAAAAJYhAAIAAFiGAAgAAGAZAiAAAIBlCIAAAACWIQACAABYhgAIAABgGQIgAACAZQiAAAAAliEAAgAAWIYACAAAYBkCIAAAgGUIgAAAAJYhAAIAAFiGAAgAAGAZAiAAAIBlCIAAAACWIQACAABYhgAIAABgGQIgAACAZQiAAAAAliEAAgAAWIYACAAAYBkCIAAAgGUIgAAAAJYhAAIAAFiGAAgAAGAZAiAAAIBlCIAAAACWIQACAABYhgAIAABgGQIgAACAZfx9XQAAIH9kOZ0yOWybmprq1VoAnJ/T6VRAQIBPayAAAkAxkJ6erl/KlFGWM2cndjZv3uzligCcT2BgoGJjY30aAgmABez7UaN8XQKAYsjlcinL6ZTTGDnNxccBg4KCCqAqAGfLzMxUWlqaXC6XT+sgAAJAMeI0Rn45+I/F16efAJtlZmb6ugQmgQAAANiGAAgAAGAZAiAAAIBlCIAAAACWIQACAABYhgAIAABgGQIgAACAZQiAAAAAluGLoIF8kJs7vMQ98YQXK+FuMwCAi2MEEAAAwDIEQAAAAMsQAAEAACxDAAQAALAMARAAAMAyzAIGssFMWgBAccYIIAAAgGUIgAAAAJYhAAIAAFiGAAgAAGAZKyaBcEE/AADA3xgBBAAAsAwBEAAAwDIEQAAAAMsQAAEAACxDAAQAALAMARAAAMAyBEAAAADLEAABAAAsQwAEAACwDAEQAADAMlbcCg75I7e31It74gkvVQIAAPKCEUAAAADLEAABAAAsQwAEAACwDAEQAADAMkwCgRVyO4EFAIDijBFAAAAAyzACCADId3xtFFC4MQIIAABgGQIgAACAZQiAAAAAluEaQKCAcW0UCgM+h4DdGAEEAACwDAEQAADAMgRAAAAAyxAAAQAALMMkkDziFmMAULCYwALkHSOAAAAAliEAAgAAWIYACAAAYBmuAQQAAEWON6/Bt+G6UQIgvIYLtQEAKJw4BQwAAGAZAiAAAIBlCIAAAACW4RpAAACAPMjNNe9ZTqeqJyZ6sZqcYQQQAADAMowAFjO5+SuEWbcAANiJEUAAAADLMAIIACjW+E5S32C/F26MAAIAAFiGAAgAAGAZTgGjSPLmPSABACjuCIAAgIvijy6geOEUMAAAgGUIgAAAAJYhAAIAAFiGAAgAAGAZAiAAAIBlCIAAAACWIQACAABYhgAIAABgGQIgAACAZQiAAAAAliEAAgAAWIYACAAAYBkCIAAAgGX8fV0AAABF2fejRuW4bdwTT3ixEiDnGAEEAACwDAEQAADAMgRAAAAAy3ANIADA53JzHZ1NcrtfuMYQOcUIIAAAgGUIgAAAAJYhAAIAAFiGAAgAAGAZJoEAAACcwYZJSYwAAgAAWIYACAAAYBlOAaPQsGHIHQCAwoAACABAASnKf+gW5dpxLk4BAwAAWIYACAAAYBkCIAAAgGUIgAAAAJZhEojFuKAXAAA7MQIIAABgGQIgAACAZQiAAAAAluEaQAAALFWYrgUvTLXYgAAIFHJ0igCA/MYpYAAAAMsQAAEAACxDAAQAALAMARAAAMAyBEAAAADLEAABAAAsQwAEAACwDAEQAADAMgRAAAAAyxAAAQAALEMABAAAsAwBEAAAwDIEQAAAAMsQAAEAACxDAAQAALCMv68LAAAA+eP7UaN8XQKKCEYAAQAALEMABAAAsAwBEAAAwDIEQAAAAMsUmkkgLodDcpJHgcLK5XD4ugQAQD7xeQB0Op3yc7mU5XTyHwxQyAUGBsrJH2oAUOT5PAAGBASo4rFjMr4uBMBFxcbGKiAgwNdlAADyyOcBUJL8XC5flwAgBwh/AFA8cC4HAADAMgRAAAAAyxAAAQAALEMABAAAsAwBEAAAwDIEQAAAAMsQAAEAACxDAAQAALAMARAAAMAyBEAAAADLEAABAAAsQwAEAACwDAEQAADAMgRAAAAAyxAAAQAALEMABAAAsAwBEAAAwDIEQAAAAMsQAAEAACxDAAQAALAMARAAAMAyBEAAAADLEAABAAAsQwAEAACwDAEQAADAMgRAAAAAy/j7ugAAQP5xORySk7/t8yI9PT1X7bPY38gFl8Ph6xIkEQABoFhwOp3yc7mU5XQWmv9giqrU1NRctc8kACKXAgMD5fTx54YACADFQEBAgCoeOybj60KKgXr16uWq/fHff/dSJSiuYmNjFRAQ4NMaCIAAUEz4uVy+LqFYCAoKylV7f/Y7csnX4U9iEggAAIB1GAEEAOAMK9u393UJgNcxAggAAGAZAiAAAIBlCIAAAACWIQACAABYhgAIAABgGQIgAACAZQiAAAAAliEAAgAAWIYACAAAYBkCIAAAgGUIgAAAAJYhAAIAAFiGAAgAAGAZAiAAAIBlCIAAAACWIQACAABYhgAIAABgGQIgAACAZQiAAAAAliEAAgAAWIYACAAAYBkCIAAAgGUIgAAAAJYhAAIAAFiGAAgAAGAZAiAAAIBlCIAAAACWIQACAABYhgAIAABgGQIgAACAZQiAAAAAliEAAgAAWIYACAAAYBkCIAAAgGUIgAAAAJYhAAIAAFiGAAgAAGAZAiAAAIBlCIAAAACWIQACAABYhgAIAABgGQIgAACAZQiAAAAAliEAAgAAWIYACAAAYBkCIAAAgGUIgAAAAJYhAAIAAFiGAAgAAGAZAiAAAIBlCIAAAACWIQACAABYhgAIAABgGQIgAACAZQiAAAAAliEAAgAAWIYACAAAYBkCIAAAgGUIgAAAAJYhAAIAAFiGAAgAAGAZAiAAAIBlCIAAAACWIQACAABYhgAIAABgGQIgAACAZQiAAAAAliEAAgAAWIYACAAAYBkCIAAAgGUIgAAAAJYhAAIAAFiGAAgAAGAZAiAAAIBlCIAAAACWIQACAABYhgAIAABgGQIgAACAZfzzc2VZWVnKyMjI9esc5cvnZxlA8WKMzIkTUnq6rysBABQT+RIAjTE6dOiQjh49ekmvL52QkB9lAMWWycxU2oYNyvjyS1+XAgAoBvIlAJ4Of5UqVVJwcLAcDkeuXn/SyZlo4HyMpLSsLP0SGChJhEAAQJ7lOQBmZWW5w1/5SzyVm+Hnl9cygGIt0M9PKldOPzdqpIykJE4HAwDyJM9Db6ev+QsODs5zMQDOL9DPTw5/fzlKl/Z1KQCAIi7fzr3m9rQvgNxx/4bxuwYAyCMuvgMAALAMATAbvQYNUtfevX1dBgAAgFfk6/cAnm1l+/Y5apeVlqYmL73kzVIKtdGTJunjZcu0auHCC7bbunOnRr34ojZ+/72Sf/pJYx5/XIn33VdAVQIAgOKi2IwAplswK/KPU6dUvUoVjRg4UOEVK/q6HAAAUEQV2QB4a7duGvDUU3rsmWdUvWlTdbj/fknSS6+/rqvbtlVE/fqq06qV+g0frhMnT7pfN3vePFW58kr978sv1SQ+XpENGqjD/ffr0OHD593W+k2bVKNpU70wbVq2y9PT0zXgqacUc801qhgXp7jrrtP4qVPdy4+mpKjP44+rRtOmqtywoW675x5t3rrVXc/YyZO1eds2hcbEKDQmRrPnzct2O43r19czgwfrzttuU2BAQK73GQAAgOTlU8DeNueDD9Sja1ctmzvX/ZzD4dBzQ4eqWnS0fti/X/1HjNDQ557TC0895W7zR2qqJr/2ml4dN05Op1MPDBigJ8aO1YwJE87Zxhdr1uhfiYkaOWiQ7vvnP7OtY+qbb+qTzz7TrIkTFR0VpZ8OHtSPBw+6lyf85z8KCgrSvBkzFBoSojfmzlW7hAR9s3SpOrZtqy07d+p/K1fqo1mzJEmhISH5tYsAAADOUaQDYM1q1fT0Y495PHfmNXHVoqM19JFH9Mjw4R4BMCMjQy+MHKnLqlWTJPW85x49m801iAuXLtWDgwZp8qhR6tS27Xnr2H/ggGpWr67mTZrI4XCoauXK7mVr1q3T+k2btHvtWgX+dSeHUYMHa9GyZVqwZInu++c/VSo4WP5+fpzWBQAABaJIB8CGdeue89zyVas0Ydo07dizR8dPnFBmZqZS09L0x6lTCi5ZUpIUXLKkO/xJUkTFivrlyBGP9az79lstWb5cb02erNtuuumCdXTr1Entu3fXlTffrNatWumWG27Qja1aSZI2b9umE3/8oepNm3q85lRqqvYmJ1/S+wYAAMiLIh0ATwe60/b9+KPu6tlTPe6+W0P79VPZsDCtXbdOiY8/rvSMDHf7Ev6eb9vhcMgY4/FcjapVVS4sTG+9/77ir79eJUqUOG8dDePitPnzz7Vs5UqtWL1a3R9+WNdfc43eeuklnTx5UhEVK+rj2bPPeV0Yp3oBAIAPFOkAeLaN330nlzEaPWSInM4/57d8sHjxJa2rfNmymv3yy7q1WzclPPywZk2ceMEQGBoSok5t26pT27Zqf8st6nj//frt6FE1iIvTz7/+Kn8/P1WLjs72tQElSijL5bqkOgEAAHKryM4Czs5l1aopIyNDU998U3uTkzXnww/1+pw5l7y+iuXLa9Fbb2nH7t26v18/ZWZmZtvupddf13sLF2rH7t3auXevPvzkE4VXrKiw0FDd0KKFmjZqpLt799ZnX36pfT/+qKRvvtHICRP0zebNkqSqlStr348/atOWLTry229KS0vLdjvp6enatGWLNm3ZovSMDB38+Wdt2rJFu/ftu+T3CAAA7OPVEcBrFyzIUbvju3bly/bq1amj0Y8/rhenT9dT48frmquu0vCBA/Xgo49e8jrDK1bUorfe0q3/+pf+PWCAZkyYID8/P482pUuV0sTp07V73z75OZ26sn59vT99unsU8v3p0zVywgQ9NGSIfv3tN4VXqKBrrrpKlSpUkCS1j4/XwqVLdds99+hoSoqmjB2rbp06nVPLwcOH1fKML9eeNGOGJs2YoZZNm2pxNqeYAQAAsuMwZ1/8lo2UlBSVKVNGx44dU2hoqMey1NRU7d27VzVq1FBQUNAlFZFfARAoztKzspR86JBOzJolc9akpYKS0z/qLsWF+pnioCDeX07vvgTAt7zVl+amnylWp4ABAABwcQRAAAAAyxAAAQAALEMABAAAsAwBEAAAwDIEQAAAAMsQAAEAACxDAAQAALAMAbCI+DIpSaExMTqaklLg2x49aZJatGtX4NvNqezqGz1pkmpefbVCY2K0aNmyAq1n348/KjQmRpu2bCnQ7QIAkFNeDYAZGRlKTU296E9aeroyznOf3aLMl6HNJn179NBHb77pfrx91y6NnTxZE59+WjtXr9ZN115boPVER0Zq5+rVuuLyyyXxOQAAFD5euxdwRkaGtm/frlOnTl20bdovvygwIEA1o6NVwt+rtyeGj2RkZKhEiRJeWXfpUqVUulQp9+M9ycmSpLatW8vhcFzyei+1Zj8/P4VXrHjJ2wUAwNu8NgKYlZWlU6dOyd/fX0FBQRf88fPzU1p6ulwuV47X73K5NH7qVNW74QZVqltX17Rrpw8/+USSZIzR7QkJuuO++3T6Vse/HT2q2JYt9cyLL0r6e1RmyfLlan7bbaoYF6d/3HmntuzY4bGdNevWKb5rV1WqW1d1WrXSoyNH6uQff7iXp6Wladhzz6lOq1aqcMUVanDjjXrzvfe078cf1fZf/5IkVW3cWKExMeo1aNBFaz/t0xUr1Oimm1Spbl21/de/lPzjjxfcH9mddjyakqLQmBh9mZTk8Z5XrF6t6zp0UHi9emp9113auWePx7omTJummldfraiGDZU4ZIjS0tLO2d6sd99Vk/h4VYyLU+P4eE2fPfucWuZ9/LHa3H23KsbF6d2PPvJazWeeAh49aZK6PPigJKnM5ZcrNCbGvc/HTp6s2JYtVeGKK9SiXTstW7nyojX3GjRIXXv31rgpU1Tz6qtV5corNXbyZGVmZurJsWNVtUkTxbZsqf++/3627+t8n4O3P/hA1a666px927V3bz0wcOC5BxgAgHzk9WsA/f39FRAQcMEffz+/XK93/NSpmvPBB3ph5EglLV6sxO7d9cDAgfoqKUkOh0NTn31W32zerCmzZkmS+g0bpsjwcA3u08djPUOffVajBg/WivnzVaFcOXV58EFlZGRIkvbs26eOPXro9ptv1upFizRz4kStXb9eA596yv36BwcN0vuLFum5oUP19ZIlmvj00yoVHKzoyEj996WXJEnrly7VztWr9ezQoRetXZJ+PHhQ/0pMVJt//EOrPvpI93burOHjxuV+55/HyAkTNGrwYH0xf778/fz00JAh7mXzFy/WmEmTNHzAAH0xf77CK1bUa2eEO0l6Z8ECjZo4UcP699fXS5ZoeP/+eubFFzV7/nyPdiPGjVPvhAR9vWSJbmzVyms1n6lvjx6aMnasJGnn6tXauXq1JOmVWbP00uuv65nBg7Vm0SLd2KqV/tmrl3b98MNFa165Zo0OHj6sJW+/rdFDhmj0pEnq3LOnwsqU0efvv6/7u3bVw8OG6aeDB8+p53yfgw5t2siVlaXFn3/ubvvLkSP6dMUK3XPnnXnaVwAAXEyRPN+alpam8VOnasGsWWrWqJEkqUbVqlqzfr1enztXLZs1U1REhCY+/bQefPRRHf71Vy394gt9uWCB/M86xTz4P//RP1q2lCRN/Wskb+GyZep4662aMG2a7mrXTon33SdJqlW9up4bOlRtunXTCyNHav+BA5q/eLEWzJypG1q0cNdxWtmwMElSxfLlFRYamuPaZ7z9tmpUrarRf4WcmMsu05YdO/TCq6/my/4b1r+/WjZrJknq9+CD6vzAA0pNS1NQYKBemTlT93TurHs7d3a3XbF6tcdI1ehJkzRq8GDdHh8vSapepYq27dqlN+bOVbeOHd3teickuNt4s+YzlS5VSmX+2tdnnoadPGOGHunZU3fedpskaeSgQVqZlKRXZs7UhBEjLlhz2bAwPT90qJxOp2Iuu0wvvvaaTp06pYG9e0uSBvTqpRdefVVr1q93r/80Pz+/bD8HknRnu3b677x56tCmjaQ/g3V0ZKRa/fU+AQDwliIZAPckJ+uPU6d0R/fuHs+nZ2Sofp067scd2rTRwqVLNWHaNL3w1FOqVb36Oetq+lcIk6RyYWGKqVFD23fvliRt3rZN32/bpncXLnS3McbI5XJp3/79+n7HDvn5+all06b5Wvv23bvVpEGD89aZV3Vr13b/O+KvkPTLkSOqEhWlHbt36/6uXc/Z9pdr10qSTv7xh/YmJ6vP44+r75NPuttkZmYqNCTE43VX1qtXIDVfTMrx4zr488+6+sorPZ6/+sor9d22bR7PZVdzbK1acjr/HiyvVL686vw1wUP6M+SVCwvTL0eO5OzN/KX7XXfp+k6ddODQIUVFRGj2/Pnq1rFjnq5bBAAgJ4pkADxx8qQk6b1XX1VkRITHssCAAPe//zh1Shu//15+fn7avW9frrdz8uRJ3ffPf6pXQsI5y6pERmrPJawzp7Xn1umAcvqaR0nK/OtU9tn8z5jYcDps5PT6y9PXP0565hk1adjQY5mf0/OKguCSJQtFzbmRXc1nTwRxOBznTFZyOBy5rqdBXJzqxcZqzocf6h8tW2rrzp16b/r03BcNAEAuFckAGFurlgIDArT/4EH3acHsPDFmjJwOh+a99prufOABxV9/va5r3tyjzdcbN7pHkX4/dky7fvhBtWvWlPTnf9Dbd+9WzWrVsl3/FbVry+Vy6av/+z/3KeAzBfwVHLKysnJVe+2aNbX4s8/OqfNCKpQrJ0k69MsvOj12uGnr1gu+JjuX16ypdd9+q7s7dMh225UqVFBkeLh+2L9fXdq3z/X6z5RfNV9MaEiIIsPDtfabbzz2+dpvvlHj+vXzfXtny+5zcNq9nTvrlVmzdODnn3X9NdcoOjLS6/UAAFAkA2BI6dL6T48eGjJ6tFwul5o3bqyUEye0dv16hZQurW4dO2rJ8uV66/339b/33lPDuDg9/O9/q9egQVq9aJHKlinjXtezL72kcmFhqlShgkZOmKDyZcvqttatJUn9evbUjZ07a8BTTymhc2cFBwdr+65d+nzVKo0fPlzVoqN1d4cOShwyRM8NHaq6sbHaf+CAfjlyRB1vvVVVoqLkcDi0ZPlyxV93nYKCgnJU+/1du2ryjBl6cuxYJdx1lzZ89905EyzOVjIoSFc1bKgXpk1T9eho/XLkiJ5+4YVc79veCQnq/dhjurJuXTVr3FjvfvSRtu3cqepVqrjbPN63rwY9/bRCQ0LU+tprlZ6erg2bN+toSor63H9/jreVXzXnRN8ePTRm0iTVqFpV9evU0X/nzdPmrVv12vjxXtnembL7HJz+2prOt9+uJ599VrPeeUfTnn/e67UAACAVwCzgzMxMpaenX/AnM5uRkYsZ2q+fBj30kCZMm6ar2rRRx/vv16crVqh6dLR+PXJEfR5/XEP69lXDuDhJf4aWShUqqN+wYR7rGTFwoB575hlde8cd+vmXX/TOtGkK+OtUbN3YWC2ePVu79u7VLXffrVbt22vUxImKrFTJ/foXRo5U+1tuUf8RI9QkPl7/eeIJ/fHXadKoiAg93revRowbp5rNm7tnD1+odunPwPDWSy9p0f/+p2vatdPrc+ZoeP/+F90nr4wZo8ysLF3boYMGjxqlof365Xq/dmrbVoMSEzX0ued0XYcO2v/TT+px990ebRLuukuTR43S7Hnz1LxtW7Xp1k2z589Xtb/qz438qDkneickKPH++/XEmDG6+rbb9L+VKzV36tRsrwvNb+f7HEhSmZAQ3R4fr1KlSrn/8AAAwNsc5swLsM4jJSVFZcqU0bFjxxR6xixGSUpNTdXevXtVo0YNBQUFuZ8v7F8E/WVS0p/fr7d+vcfMTKCgtbv3XsXWqqXnz/rj5GzpWVlKPnRIJ2bNksnlhJP8cu2CBV5b94X6meKgIN7fyjxelgGgYHirL81NP+O1tFWiRAnVrl072+ueznZizx45nU7uAgKr/H7smL5KStKXSUkeX0UDAIC3eTVxlShRIke30srIw+xXoKhq1b69jh47ppGPPqqYyy7zdTkAAItYO+TWqlkzpezc6esyYLHvVqzwdQkAAEt5fRIIAAAAChcCIAAAgGXyLQDmYDIxgDxw/4bxuwYAyKM8B8DTkzxOf/cdAO9Iy8qSycyUOXHC16UAAIq4PE8C8fPzU1hYmA4fPixJCg4OzvXN7NMv4YugAVsY/Rn+fvntN6Vt2CClp/u6JABAEZcvs4AjIiIkyR0Ccyv1El8H2MJkZiptwwZlfPmlr0sBABQD+RIAHQ6HIiMjValSJWVkZOT69V9zD1Tg/Iz587QvI38AgHySr98D6OfnJz8/v1y/zle3tQIAALARXwMDAABgGQIgAACAZQiAAAAAlsnRNYCnv+Q5JSXFK0WcvISJIwAKnrf6gDPXXVy/VN7b/ahEXwoUFd7qB3LTj+YoAB4/flySVKVKlTyUBaDIK1PG65s4fvy4yhTAdgoa/SgANy/3cTnpRx0mBzHR5XLpwIEDCgkJyfWXPF9MSkqKqlSpov379ys0NDRf1w3f4bgWP94+psYYHT9+XFFRUXI6i9/VKd7sRyV+54ojjmnx5M3jmpt+NEcjgE6nU9HR0flS3PmEhobyAS+GOK7FjzePaXEc+TutIPpRid+54ohjWjx567jmtB8tfn9mAwAA4IIIgAAAAJbxeQAMDAzU8OHDFRgY6OtSkI84rsUPx7Rw4/gUPxzT4qmwHNccTQIBAABA8eHzEUAAAAAULAIgAACAZQiAAAAAliEAAgAAWMbnAfDll19W9erVFRQUpGbNmun//u//fF0SzmPEiBFyOBweP7Gxse7lqampSkxMVPny5VW6dGl16tRJP//8s8c6kpOT1bZtWwUHB6tSpUp69NFHlZmZWdBvxVorV65Uu3btFBUVJYfDoQ8//NBjuTFGw4YNU2RkpEqWLKnWrVtr586dHm1+++03devWTaGhoQoLC1OPHj104sQJjzabNm1Sq1atFBQUpCpVqui5557z9luzGv1o0UE/WjwUh77UpwHwnXfeUf/+/TV8+HB98803atCggeLj43X48GFfloULiIuL08GDB90/X331lXtZv379tHDhQr333nv64osvdODAAXXs2NG9PCsrS23btlV6erpWr16tWbNmaebMmRo2bJgv3oqVTp48qQYNGujll1/Odvlzzz2nSZMmaerUqUpKSlKpUqUUHx+v1NRUd5tu3brp+++/17Jly7Ro0SKtXLlSPXv2dC9PSUnRzTffrGrVqmn9+vV6/vnnNWLECL366qtef382oh8teuhHi75i0ZcaH2ratKlJTEx0P87KyjJRUVFmzJgxPqwK5zN8+HDToEGDbJcdPXrUlChRwrz33nvu57Zu3WokmTVr1hhjjFm8eLFxOp3m0KFD7jZTpkwxoaGhJi0tzau141ySzAcffOB+7HK5TEREhHn++efdzx09etQEBgaaOXPmGGOM2bJli5Fkvv76a3ebTz75xDgcDvPTTz8ZY4x55ZVXTNmyZT2O6WOPPWZq167t5XdkJ/rRooV+tPgpqn2pz0YA09PTtX79erVu3dr9nNPpVOvWrbVmzRpflYWL2Llzp6KionTZZZepW7duSk5OliStX79eGRkZHsczNjZWVatWdR/PNWvWqF69egoPD3e3iY+PV0pKir7//vuCfSM4x969e3Xo0CGPY1imTBk1a9bM4xiGhYWpSZMm7jatW7eW0+lUUlKSu821116rgIAAd5v4+Hht375dv//+ewG9GzvQjxZN9KPFW1HpS30WAH/99VdlZWV5fIglKTw8XIcOHfJRVbiQZs2aaebMmVqyZImmTJmivXv3qlWrVjp+/LgOHTqkgIAAhYWFebzmzON56NChbI/36WXwrdPH4EK/k4cOHVKlSpU8lvv7+6tcuXIcZx+gHy166EeLv6LSl/rneQ2wRps2bdz/rl+/vpo1a6Zq1arp3XffVcmSJX1YGQAUDfSjKCx8NgJYoUIF+fn5nTO76eeff1ZERISPqkJuhIWF6fLLL9euXbsUERGh9PR0HT161KPNmcczIiIi2+N9ehl86/QxuNDvZERExDmTCzIzM/Xbb79xnH2AfrToox8tfopKX+qzABgQEKDGjRvrs88+cz/ncrn02WefqXnz5r4qC7lw4sQJ7d69W5GRkWrcuLFKlCjhcTy3b9+u5ORk9/Fs3ry5Nm/e7PGhX7ZsmUJDQ3XFFVcUeP3wVKNGDUVERHgcw5SUFCUlJXkcw6NHj2r9+vXuNp9//rlcLpeaNWvmbrNy5UplZGS42yxbtky1a9dW2bJlC+jd2IF+tOijHy1+ikxfmi9TSS7R3LlzTWBgoJk5c6bZsmWL6dmzpwkLC/OY3YTCY8CAAWbFihVm7969ZtWqVaZ169amQoUK5vDhw8YYY3r16mWqVq1qPv/8c7Nu3TrTvHlz07x5c/frMzMzTd26dc3NN99sNm7caJYsWWIqVqxohgwZ4qu3ZJ3jx4+bDRs2mA0bNhhJZsKECWbDhg1m3759xhhjxo4da8LCwsyCBQvMpk2bTPv27U2NGjXMqVOn3Ou45ZZbTKNGjUxSUpL56quvTExMjOnatat7+dGjR014eLi55557zHfffWfmzp1rgoODzbRp0wr8/dqAfrRooR8tHopDX+rTAGiMMZMnTzZVq1Y1AQEBpmnTpmbt2rW+Lgnn0aVLFxMZGWkCAgJM5cqVTZcuXcyuXbvcy0+dOmUeeughU7ZsWRMcHGw6dOhgDh486LGOH374wbRp08aULFnSVKhQwQwYMMBkZGQU9Fux1vLly42kc34SEhKMMX9+fcHQoUNNeHi4CQwMNDfeeKPZvn27xzqOHDliunbtakqXLm1CQ0PNfffdZ44fP+7R5ttvvzUtW7Y0gYGBpnLlymbs2LEF9RatRD9adNCPFg/FoS91GGNM3scRAQAAUFT4/FZwAAAAKFgEQAAAAMsQAAEAACxDAAQAALAMARAAAMAyBEAAAADLEAABAAAsQwC00IgRI9SwYUNfl5Fr1atX14svvpindcycOVNhYWHux0V1XwDwraLad9CP4jQCYBGwYsUKORyOc24QfqkGDhzocY9Cm3lrX6xcuVLt2rVTVFSUHA6HPvzww3zfBoCcox/1Hm/tizFjxuiqq65SSEiIKlWqpDvuuEPbt2/P9+3YigBoEWOMMjMzVbp0aZUvXz5P6zrz5tT50c5X8mNfZOfkyZNq0KCBXn755XxfNwDfoR89l7f60S+++EKJiYlau3atli1bpoyMDN188806efJkvm/LRgTAfHD99derT58+6tOnj8qUKaMKFSpo6NChOvMue7///rvuvfdelS1bVsHBwWrTpo127tzpXr5v3z61a9dOZcuWValSpRQXF6fFixfrhx9+0A033CBJKlu2rBwOh7p37y5JcrlcGjNmjGrUqKGSJUuqQYMGev/9993rPP0X7yeffKLGjRsrMDBQX3311TnD9S6XSyNHjlR0dLQCAwPVsGFDLVmyxL38hx9+kMPh0DvvvKPrrrtOQUFBmj17drb7wuFwaMqUKbr99ttVqlQpjRo1SllZWerRo4e7ztq1a2vixIker+vevbvuuOMOjRs3TpGRkSpfvrwSExMv2PG99tprCgsLu+BfnjNnzlTVqlUVHBysDh066MiRIx7Lz94Xp+sYPXq0wsPDFRYWppEjRyozM1OPPvqoypUrp+joaL3xxhvn3aYktWnTRs8884w6dOhwwXYA/kQ/+jf60T8tWbJE3bt3V1xcnBo0aKCZM2cqOTlZ69evv+DrkEP5dldhi1133XWmdOnS5uGHHzbbtm0z//3vf01wcLB59dVX3W1uv/12U6dOHbNy5UqzceNGEx8fb2rVqmXS09ONMca0bdvW3HTTTWbTpk1m9+7dZuHCheaLL74wmZmZZt68eUaS2b59uzl48KA5evSoMcaYZ555xsTGxpolS5aY3bt3mzfeeMMEBgaaFStWGGP+vll1/fr1zdKlS82uXbvMkSNHzPDhw02DBg3ctU2YMMGEhoaaOXPmmG3btplBgwaZEiVKmB07dhhjjNm7d6+RZKpXr27mzZtn9uzZYw4cOJDtvpBkKlWqZF5//XWze/dus2/fPpOenm6GDRtmvv76a7Nnzx73/nnnnXfcr0tISDChoaGmV69eZuvWrWbhwoXn7MNq1aqZF154wRhjzLPPPmvKly9vkpKSzntc1q5da5xOp3n22WfN9u3bzcSJE01YWJgpU6aMu83Z+yIhIcGEhISYxMREs23bNjNjxgwjycTHx5tRo0aZHTt2mKefftqUKFHC7N+//wKfCs998sEHH+SoLWAr+tG/0Y9mb+fOnUaS2bx5c45fg/MjAOaD6667ztSpU8e4XC73c4899pipU6eOMcaYHTt2GElm1apV7uW//vqrKVmypHn33XeNMcbUq1fPjBgxItv1n+6Afv/9d/dzqampJjg42KxevdqjbY8ePUzXrl09Xvfhhx96tDn7lzUqKsqMGjXKo81VV11lHnroIWPM3x3Xiy++eNF9Ick88sgjF22XmJhoOnXq5H6ckJBgqlWrZjIzM93Pde7c2XTp0sX9+HTHNWjQIBMZGWm+++67C26ja9eu5tZbb/V4rkuXLhftuKpVq2aysrLcz9WuXdu0atXK/TgzM9OUKlXKzJkz56Lv0xgCIJAT9KN/ox89V1ZWlmnbtq1p0aJFjtrj4vwLcLCxWLv66qvlcDjcj5s3b67x48crKytLW7dulb+/v5o1a+ZeXr58edWuXVtbt26VJPXt21e9e/fW0qVL1bp1a3Xq1En169c/7/Z27dqlP/74QzfddJPH8+np6WrUqJHHc02aNDnvelJSUnTgwAG1aNHC4/kWLVro22+/zfF6Ltbu5Zdf1uuvv67k5GSdOnVK6enp58wai4uLk5+fn/txZGSkNm/e7NFm/PjxOnnypNatW6fLLrvsgnVs3br1nFOwzZs39zgtk524uDg5nX9fHREeHq66deu6H/v5+al8+fI6fPjwBdcDIHfoRy/czuZ+NDExUd99952++uqrHLXHxXENYCHx73//W3v27NE999yjzZs3q0mTJpo8efJ52584cUKS9PHHH2vjxo3uny1btnhcvyJJpUqVypcac7qes9vNnTtXAwcOVI8ePbR06VJt3LhR9913n9LT0z3alShRwuOxw+GQy+XyeK5Vq1bKysrSu+++ewnvIGeyqyMntQHwLfrR4tmP9unTR4sWLdLy5csVHR2dr3XajACYT5KSkjwer127VjExMfLz81OdOnWUmZnp0ebIkSPavn27rrjiCvdzVapUUa9evTR//nwNGDBA06dPlyQFBARIkrKystxtr7jiCgUGBio5OVm1atXy+KlSpUqO6w4NDVVUVJRWrVrl8fyqVas8asuLVatW6ZprrtFDDz2kRo0aqVatWtq9e/clratp06b65JNPNHr0aI0bN+6CbevUqZPtcQFQONGPnp+N/agxRn369NEHH3ygzz//XDVq1CiQ7dqCU8D5JDk5Wf3799eDDz6ob775RpMnT9b48eMlSTExMWrfvr0eeOABTZs2TSEhIRo8eLAqV66s9u3bS5IeeeQRtWnTRpdffrl+//13LV++XHXq1JEkVatWTQ6HQ4sWLdKtt96qkiVLKiQkRAMHDlS/fv3kcrnUsmVLHTt2TKtWrVJoaKgSEhJyXPujjz6q4cOHq2bNmmrYsKHeeOMNbdy48bwz1HIrJiZGb775pj799FPVqFFDb731lr7++utL/mW+5pprtHjxYrVp00b+/v565JFHsm3Xt29ftWjRQuPGjVP79u316aefXvS0RX45ceKEdu3a5X68d+9ebdy4UeXKlVPVqlULpAagqKEfPT8b+9HExES9/fbbWrBggUJCQnTo0CFJUpkyZVSyZMkCqaE4YwQwn9x77706deqUmjZtqsTERD388MPq2bOne/kbb7yhxo0b67bbblPz5s1ljNHixYvdQ+JZWVlKTExUnTp1dMstt+jyyy/XK6+8IkmqXLmynnrqKQ0ePFjh4eHq06ePJOnpp5/W0KFDNWbMGPfrPv7441x3CH379lX//v01YMAA1atXT0uWLNFHH32kmJiYfNk3Dz74oDp27KguXbqoWbNmOnLkiB566KE8rbNly5b6+OOP9eSTT573FM/VV1+t6dOna+LEiWrQoIGWLl2qJ598Mk/bzal169apUaNG7uuI+vfvr0aNGmnYsGEFsn2gKKIfPT8b+9EpU6bo2LFjuv766xUZGen+eeeddwpk+8Wdw5gzvmQJl+T6669Xw4YN83x7HQCwFf0oULAYAQQAALAMARAAAMAynAIGAACwDCOAAAAAliEAAgAAWIYACAAAYBkCIAAAgGUIgAAAAJYhAAIAAFiGAAgAAGAZAiAAAIBlCIAAAACW+X/Nq86ejfJ5pQAAAABJRU5ErkJggg==\n",
"text/plain": [
"