diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 8cabf0089..e344f2395 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -14,12 +14,18 @@ on: branches: - master - 'feature/*' - types: [opened, synchronize, reopened, ready_for_review] + types: [ opened, synchronize, reopened, ready_for_review ] # Trigger workflow once per week schedule: - cron: '0 0 * * *' # Trigger the workflow on manual dispatch workflow_dispatch: + inputs: + tmate_enabled: + type: boolean + description: 'Enable tmate debugging?' + required: false + default: false jobs: @@ -29,13 +35,13 @@ jobs: runs-on: ${{ matrix.os }} strategy: matrix: - os: [ubuntu-latest] + os: [ ubuntu-latest ] python-version: [ '3.7', '3.8', '3.9', '3.10' ] include: # Run macos and windows tests on only one python version - - os: windows-latest - python-version: '3.9' # PyTorch doesn't yet have 3.10 support on Windows (https://pytorch.org/get-started/locally/#windows-python) - - os: macos-latest - python-version: '3.10' + - os: windows-latest + python-version: '3.9' # PyTorch doesn't yet have 3.10 support on Windows (https://pytorch.org/get-started/locally/#windows-python) + - os: macos-latest + python-version: '3.10' steps: - name: Checkout code @@ -51,12 +57,18 @@ jobs: python -m pip install --upgrade pip setuptools wheel python -m pip install --upgrade --upgrade-strategy eager -r requirements/dev.txt python -m pip install --upgrade --upgrade-strategy eager -e . - if [ "$RUNNER_OS" != "Windows" ] && [ ${{ matrix.python }} < '3.10' ]; then # Skip Prophet tests on Windows as installation complex. Skip on Python 3.10 as not supported. - python -m pip install --upgrade --upgrade-strategy eager -e .[prophet] + if [ "$RUNNER_OS" == "Linux" ]; then # Currently, we only support KeOps on Linux. + python -m pip install --upgrade --upgrade-strategy eager -e .[keops] fi - python -m pip install --upgrade --upgrade-strategy eager -e .[tensorflow,torch] + python -m pip install --upgrade --upgrade-strategy eager -e .[prophet,tensorflow,torch] python -m pip freeze + - name: Setup tmate session + uses: mxschmitt/action-tmate@v3 + if: ${{ github.event_name == 'workflow_dispatch' && inputs.tmate_enabled }} + with: + limit-access-to-actor: true + - name: Lint with flake8 run: | flake8 alibi_detect @@ -67,12 +79,15 @@ jobs: - name: Test with pytest run: | + if [ "$RUNNER_OS" == "macOS" ]; then # Avoid numba/OpenMP segfault in CVMDrift (https://github.com/SeldonIO/alibi-detect/issues/648) + export NUMBA_THREADING_LAYER="workqueue" + fi pytest alibi_detect - name: Upload coverage to Codecov if: ${{ success() }} run: | - codecov + codecov -F ${{ matrix.os }}-${{ matrix.python-version }} - name: Build Python package run: | diff --git a/.github/workflows/test_all_notebooks.yml b/.github/workflows/test_all_notebooks.yml index 73b0026b6..9ab63f0e8 100644 --- a/.github/workflows/test_all_notebooks.yml +++ b/.github/workflows/test_all_notebooks.yml @@ -41,10 +41,7 @@ jobs: python -m pip install --upgrade pip setuptools wheel python -m pip install --upgrade --upgrade-strategy eager -r requirements/dev.txt -r testing/requirements.txt python -m pip install --upgrade --upgrade-strategy eager -e . - if [ "$RUNNER_OS" != "Windows" ] && [ ${{ matrix.python }} < '3.10' ]; then # Skip Prophet tests on Windows as installation complex. Skip on Python 3.10 as not supported. - python -m pip install --upgrade --upgrade-strategy eager -e .[prophet] - fi - python -m pip install --upgrade --upgrade-strategy eager -e .[torch,tensorflow] + python -m pip install --upgrade --upgrade-strategy eager -e .[prophet,torch,tensorflow] python -m pip freeze - name: Run notebooks diff --git a/.github/workflows/test_changed_notebooks.yml b/.github/workflows/test_changed_notebooks.yml index fa275da43..6637e68bb 100644 --- a/.github/workflows/test_changed_notebooks.yml +++ b/.github/workflows/test_changed_notebooks.yml @@ -7,17 +7,17 @@ defaults: shell: bash # To override PowerShell on Windows on: + # Trigger the workflow on push or PR to any branch push: - branches: - - master paths: - 'doc/source/examples/**/*.ipynb' pull_request: - branches: - - master paths: - 'doc/source/examples/**/*.ipynb' + # don't trigger for draft PRs types: [ opened, synchronize, reopened, ready_for_review ] + # Trigger the workflow on manual dispatch + workflow_dispatch: jobs: test_changed_notebooks: @@ -56,10 +56,7 @@ jobs: python -m pip install --upgrade pip setuptools wheel python -m pip install --upgrade --upgrade-strategy eager -r requirements/dev.txt -r testing/requirements.txt python -m pip install --upgrade --upgrade-strategy eager -e . - if [ "$RUNNER_OS" != "Windows" ] && [ ${{ matrix.python }} < '3.10' ]; then # Skip Prophet tests on Windows as installation complex. Skip on Python 3.10 as not supported. - python -m pip install --upgrade --upgrade-strategy eager -e .[prophet] - fi - python -m pip install --upgrade --upgrade-strategy eager -e .[torch,tensorflow] + python -m pip install --upgrade --upgrade-strategy eager -e .[prophet,torch,tensorflow] python -m pip freeze - name: Run notebooks diff --git a/CHANGELOG.md b/CHANGELOG.md index c8e8a28e0..17e380194 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,22 @@ # Change Log +## v0.11.0dev +[Full Changelog](https://github.com/SeldonIO/alibi-detect/compare/v0.10.3...master) + +### Added +- **New feature** MMD drift detector has been extended with a [KeOps](https://www.kernel-operations.io/keops/index.html) backend to scale and speed up the detector. +See the [documentation](https://docs.seldon.io/projects/alibi-detect/en/latest/cd/methods/mmddrift.html) and [example notebook](https://docs.seldon.io/projects/alibi-detect/en/latest/examples/cd_mmd_keops.html) for more info ([#548](https://github.com/SeldonIO/alibi-detect/pull/548)). +- If a `categories_per_feature` dictionary is not passed to `TabularDrift`, a warning is now raised to inform the user that all features are assumed to be numerical ([#606](https://github.com/SeldonIO/alibi-detect/pull/606)). + +### Changed +- Minimum `prophet` version bumped to `1.1.0` (used by `OutlierProphet`). This upgrade removes the dependency on `pystan` as `cmdstanpy` is used instead. This version also comes with pre-built wheels for all major platforms and Python versions, making both installation and testing easier ([#627](https://github.com/SeldonIO/alibi-detect/pull/627)). +- **Breaking change** The configuration field `config_spec` has been removed. In order to load detectors serialized from previous Alibi Detect versions, the field will need to be deleted from the detector's `config.toml` file. However, in any case, serialization compatibility across Alibi Detect versions is not currently guranteed. ([#641](https://github.com/SeldonIO/alibi-detect/pull/641)). + + +### Development +- UTF-8 decoding is enforced when `README.md` is opened by `setup.py`. This is to prevent pip install errors on systems with `PYTHONIOENCODING` set to use other encoders ([#605](https://github.com/SeldonIO/alibi-detect/pull/605)). +- Skip specific save/load tests that require downloading remote artefacts if the relevant URI(s) is/are down ([#607](https://github.com/SeldonIO/alibi-detect/pull/607)). + ## v0.10.4 ## [v0.10.4](https://github.com/SeldonIO/alibi-detect/tree/v0.10.4) (2022-10-21) [Full Changelog](https://github.com/SeldonIO/alibi-detect/compare/v0.10.3...v0.10.4) @@ -8,14 +25,12 @@ - Fixed an incorrect default value for the `alternative` kwarg in the `FETDrift` detector ([#661](https://github.com/SeldonIO/alibi-detect/pull/661)). - Fixed an issue with `ClassifierDrift` returning incorrect prediction probabilities when `train_size` given ([#662](https://github.com/SeldonIO/alibi-detect/pull/662)). -## v0.10.3 ## [v0.10.3](https://github.com/SeldonIO/alibi-detect/tree/v0.10.3) (2022-08-17) [Full Changelog](https://github.com/SeldonIO/alibi-detect/compare/v0.10.2...v0.10.3) ### Fixed - Fix to allow `config.toml` files to be loaded when the [meta] field is not present ([#591](https://github.com/SeldonIO/alibi-detect/pull/591)). -## v0.10.2 ## [v0.10.2](https://github.com/SeldonIO/alibi-detect/tree/v0.10.2) (2022-08-16) [Full Changelog](https://github.com/SeldonIO/alibi-detect/compare/v0.10.1...v0.10.2) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 28d15a0fc..a10e54b53 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -36,15 +36,51 @@ options are defined in `setup.cfg`. ## Docstrings We adhere to the `numpy` style docstrings (https://numpydoc.readthedocs.io/en/stable/format.html) with the exception of ommiting argument types in docstrings in favour of type hints in function -and class signatures. If you're using a `PyCharm`, you can configure this under -`File -> Settings -> Tools -> Python Integrated Tools -> Docstrings`. +and class signatures. If you use an IDE, you may be able to configure it to assist you with writing +docstrings in the correct format. For `PyCharm`, you can configure this under +`File -> Settings -> Tools -> Python Integrated Tools -> Docstrings`. For `Visual Studio Code`, you can obtain +docstring generator extensions from the [VisualStudio Marketplace](https://marketplace.visualstudio.com/). + +When documenting Python classes, we adhere to the convention of including docstrings in their `__init__` method, +rather than as a class level docstring. Docstrings should only be included at the class-level if a class does +not posess an `__init__` method, for example because it is a static class. ## Building documentation We use `sphinx` for building documentation. You can call `make build_docs` from the project root, the docs will be built under `doc/_build/html`. ## CI -All PRs triger a Github Actions build to run linting, type checking, tests, and build docs. +All PRs triger a Github Actions build to run linting, type checking, tests, and build docs. The status of each +Github Action can be viewed on the [actions page](https://github.com/SeldonIO/alibi-detect/actions). + +### Debugging via CI + +For various reasons, CI runs might occasionally fail. They can often be debugged locally, but sometimes it is helpful +to debug them in the exact enviroment seen during CI. For this purpose, there is the facilty to ssh directly into +the CI Guthub Action runner. + +#### Instructions + +1. Go to the "CI" workflows section on the Alibi Detect GitHub Actions page. + +2. Click on "Run Workflow", and select the "Enable tmate debugging" toggle. + +3. Select the workflow once it starts, and then select the build of interest (e.g. `ubuntu-latest, 3.10`). + +4. Once the workflow reaches the `Setup tmate session` step, click on the toggle to expand it. + +5. Copy and paste the `ssh` command that is being printed to your terminal e.g. `ssh TaHAR65KFbjbSdrkwxNz996TL@nyc1.tmate.io`. + +6. Run the ssh command locally. Assuming your ssh keys are properly set up for github, you should now be inside the GutHub Action runner. + +7. The tmate session is opened after the Python and pip installs are completed, so you should be ready to run `alibi-detect` and debug as required. + +#### Additional notes + +- If the registered public SSH key is not your default private SSH key, you will need to specify the path manually, like so: ssh -i . +- Once you have finished debugging, you can continue the workflow (i.e. let the full build CI run) by running `touch continue` whilst in the root directory (`~/work/alibi-detect/alibi-detect`). This will close the tmate session. +- This new capability is currently temperamental on the `MacOS` build due to [this issue](https://github.com/mxschmitt/action-tmate/issues/69). If the MacOS build fails all the builds are failed. If this happens, it is +recommended to retrigger only the workflow build of interest e.g. `ubuntu-latest, 3.10`, and then follow the instructions above from step 3. ## Optional Dependencies @@ -104,4 +140,4 @@ replaced with an instance of the MissingDependency class. For example: ... ``` - Developers can use `make repl tox-env=` to run a python REPL with the specified optional dependency -installed. This is to allow manual testing. \ No newline at end of file +installed. This is to allow manual testing. diff --git a/README.md b/README.md index c62f574f6..edd68d3c3 100644 --- a/README.md +++ b/README.md @@ -79,7 +79,7 @@ The package, `alibi-detect` can be installed from: pip install git+https://github.com/SeldonIO/alibi-detect.git ``` -- To install with the tensorflow backend: +- To install with the TensorFlow backend: ```bash pip install alibi-detect[tensorflow] ``` @@ -89,6 +89,11 @@ The package, `alibi-detect` can be installed from: pip install alibi-detect[torch] ``` +- To install with the KeOps backend: + ```bash + pip install alibi-detect[keops] + ``` + - To use the `Prophet` time series outlier detector: ```bash @@ -181,8 +186,8 @@ The following tables show the advised use cases for each algorithm. The column * #### TensorFlow and PyTorch support -The drift detectors support TensorFlow and PyTorch backends. Alibi Detect does not install these as default. See the -[installation options](#installation-and-usage) for more details. +The drift detectors support TensorFlow, PyTorch and (where applicable) [KeOps](https://www.kernel-operations.io/keops/index.html) backends. +However, Alibi Detect does not install these by default. See the [installation options](#installation-and-usage) for more details. ```python from alibi_detect.cd import MMDDrift @@ -198,6 +203,13 @@ cd = MMDDrift(x_ref, backend='pytorch', p_val=.05) preds = cd.predict(x) ``` +Or in KeOps: + +```python +cd = MMDDrift(x_ref, backend='keops', p_val=.05) +preds = cd.predict(x) +``` + #### Built-in preprocessing steps Alibi Detect also comes with various preprocessing steps such as randomly initialized encoders, pretrained text diff --git a/alibi_detect/base.py b/alibi_detect/base.py index db1ef8067..aae6896eb 100644 --- a/alibi_detect/base.py +++ b/alibi_detect/base.py @@ -4,7 +4,7 @@ import numpy as np from typing import Dict, Any, Optional from typing_extensions import Protocol, runtime_checkable -from alibi_detect.version import __version__, __config_spec__ +from alibi_detect.version import __version__ DEFAULT_META = { @@ -119,16 +119,14 @@ def get_config(self) -> dict: # TODO - move to BaseDetector once config save/lo if self.config is not None: # Get config (stored in top-level self) cfg = self.config - # Get low-level nested detector (if needed) - detector = self._detector if hasattr(self, '_detector') else self # type: ignore[attr-defined] - detector = detector._detector if hasattr(detector, '_detector') else detector # type: ignore[attr-defined] # Add large artefacts back to config for key in LARGE_ARTEFACTS: - if key in cfg: # self.config is validated, therefore if a key is not in cfg, it isn't valid to insert - cfg[key] = getattr(detector, key) + if key in cfg and hasattr(self._nested_detector, key): + cfg[key] = getattr(self._nested_detector, key) # Set x_ref_preprocessed flag - preprocess_at_init = getattr(detector, 'preprocess_at_init', True) # If no preprocess_at_init, always true! - cfg['x_ref_preprocessed'] = preprocess_at_init and detector.preprocess_fn is not None + # If no preprocess_at_init, always true! + preprocess_at_init = getattr(self._nested_detector, 'preprocess_at_init', True) + cfg['x_ref_preprocessed'] = preprocess_at_init and self._nested_detector.preprocess_fn is not None return cfg else: raise NotImplementedError('Getting a config (or saving via a config file) is not yet implemented for this' @@ -175,7 +173,6 @@ def _set_config(self, inputs): # TODO - move to BaseDetector once config save/l 'name': name, 'meta': { 'version': __version__, - 'config_spec': __config_spec__, } } @@ -185,17 +182,26 @@ def _set_config(self, inputs): # TODO - move to BaseDetector once config save/l # Overwrite any large artefacts with None to save memory. They'll be added back by get_config() for key in LARGE_ARTEFACTS: - if key in inputs: + if key in inputs and hasattr(self._nested_detector, key): inputs[key] = None self.config.update(inputs) + @property + def _nested_detector(self): + """ + The low-level nested detector. + """ + detector = self._detector if hasattr(self, '_detector') else self # type: ignore[attr-defined] + detector = detector._detector if hasattr(detector, '_detector') else detector # type: ignore[attr-defined] + return detector + @runtime_checkable class Detector(Protocol): """Type Protocol for all detectors. - Used for typing legacy save and load functionality in `alibi_detect.saving.tensorflow._saving.py`. + Used for typing legacy save and load functionality in `alibi_detect.saving._tensorflow.saving.py`. Note: This exists to distinguish between detectors with and without support for config saving and loading. Once all diff --git a/alibi_detect/cd/base.py b/alibi_detect/cd/base.py index 997366ddf..f32f682e5 100644 --- a/alibi_detect/cd/base.py +++ b/alibi_detect/cd/base.py @@ -601,11 +601,6 @@ def preprocess(self, x: Union[np.ndarray, list]) -> Tuple[np.ndarray, np.ndarray else: return self.x_ref, x # type: ignore[return-value] - @abstractmethod - def kernel_matrix(self, x: Union['torch.Tensor', 'tf.Tensor'], y: Union['torch.Tensor', 'tf.Tensor']) \ - -> Union['torch.Tensor', 'tf.Tensor']: - pass - @abstractmethod def score(self, x: Union[np.ndarray, list]) -> Tuple[float, float, float]: pass diff --git a/alibi_detect/cd/classifier.py b/alibi_detect/cd/classifier.py index 89b5277ae..180320931 100644 --- a/alibi_detect/cd/classifier.py +++ b/alibi_detect/cd/classifier.py @@ -1,7 +1,7 @@ import numpy as np from typing import Callable, Dict, Optional, Union from alibi_detect.utils.frameworks import has_pytorch, has_tensorflow, \ - BackendValidator + BackendValidator, Framework from alibi_detect.base import DriftConfigMixin @@ -149,9 +149,9 @@ def __init__( backend = backend.lower() BackendValidator( - backend_options={'tensorflow': ['tensorflow'], - 'pytorch': ['pytorch'], - 'sklearn': ['sklearn']}, + backend_options={Framework.TENSORFLOW: [Framework.TENSORFLOW], + Framework.PYTORCH: [Framework.PYTORCH], + Framework.SKLEARN: [Framework.SKLEARN]}, construct_name=self.__class__.__name__ ).verify_backend(backend) @@ -162,13 +162,13 @@ def __init__( pop_kwargs += ['optimizer'] [kwargs.pop(k, None) for k in pop_kwargs] - if backend == 'tensorflow': + if backend == Framework.TENSORFLOW: pop_kwargs = ['device', 'dataloader', 'use_calibration', 'calibration_kwargs', 'use_oob'] [kwargs.pop(k, None) for k in pop_kwargs] if dataset is None: kwargs.update({'dataset': TFDataset}) self._detector = ClassifierDriftTF(*args, **kwargs) # type: ignore - elif backend == 'pytorch': + elif backend == Framework.PYTORCH: pop_kwargs = ['use_calibration', 'calibration_kwargs', 'use_oob'] [kwargs.pop(k, None) for k in pop_kwargs] if dataset is None: diff --git a/alibi_detect/cd/context_aware.py b/alibi_detect/cd/context_aware.py index 4f0093481..bb02c2ad3 100644 --- a/alibi_detect/cd/context_aware.py +++ b/alibi_detect/cd/context_aware.py @@ -1,7 +1,7 @@ import logging import numpy as np from typing import Callable, Dict, Optional, Union, Tuple -from alibi_detect.utils.frameworks import has_pytorch, has_tensorflow, BackendValidator +from alibi_detect.utils.frameworks import has_pytorch, has_tensorflow, BackendValidator, Framework from alibi_detect.utils.warnings import deprecated_alias from alibi_detect.base import DriftConfigMixin @@ -93,8 +93,8 @@ def __init__( backend = backend.lower() BackendValidator( - backend_options={'tensorflow': ['tensorflow'], - 'pytorch': ['pytorch']}, + backend_options={Framework.TENSORFLOW: [Framework.TENSORFLOW], + Framework.PYTORCH: [Framework.PYTORCH]}, construct_name=self.__class__.__name__ ).verify_backend(backend) @@ -104,7 +104,7 @@ def __init__( [kwargs.pop(k, None) for k in pop_kwargs] if x_kernel is None or c_kernel is None: - if backend == 'tensorflow': + if backend == Framework.TENSORFLOW: from alibi_detect.utils.tensorflow.kernels import GaussianRBF else: from alibi_detect.utils.pytorch.kernels import GaussianRBF # type: ignore[no-redef] @@ -113,7 +113,7 @@ def __init__( if c_kernel is None: kwargs.update({'c_kernel': GaussianRBF}) - if backend == 'tensorflow' and has_tensorflow: + if backend == Framework.TENSORFLOW: kwargs.pop('device', None) self._detector = ContextMMDDriftTF(*args, **kwargs) # type: ignore else: diff --git a/alibi_detect/cd/keops/__init__.py b/alibi_detect/cd/keops/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/alibi_detect/cd/keops/learned_kernel.py b/alibi_detect/cd/keops/learned_kernel.py new file mode 100644 index 000000000..e3073713d --- /dev/null +++ b/alibi_detect/cd/keops/learned_kernel.py @@ -0,0 +1,342 @@ +from copy import deepcopy +from functools import partial +from tqdm import tqdm +import numpy as np +from pykeops.torch import LazyTensor +import torch +import torch.nn as nn +from torch.utils.data import DataLoader +from typing import Callable, Dict, List, Optional, Union, Tuple +from alibi_detect.cd.base import BaseLearnedKernelDrift +from alibi_detect.utils.pytorch import get_device, predict_batch +from alibi_detect.utils.pytorch.data import TorchDataset +from alibi_detect.utils.frameworks import Framework + + +class LearnedKernelDriftKeops(BaseLearnedKernelDrift): + def __init__( + self, + x_ref: Union[np.ndarray, list], + kernel: Union[nn.Module, nn.Sequential], + p_val: float = .05, + x_ref_preprocessed: bool = False, + preprocess_at_init: bool = True, + update_x_ref: Optional[Dict[str, int]] = None, + preprocess_fn: Optional[Callable] = None, + n_permutations: int = 100, + batch_size_permutations: int = 1000000, + var_reg: float = 1e-5, + reg_loss_fn: Callable = (lambda kernel: 0), + train_size: Optional[float] = .75, + retrain_from_scratch: bool = True, + optimizer: torch.optim.Optimizer = torch.optim.Adam, # type: ignore + learning_rate: float = 1e-3, + batch_size: int = 32, + batch_size_predict: int = 1000000, + preprocess_batch_fn: Optional[Callable] = None, + epochs: int = 3, + num_workers: int = 4, + verbose: int = 0, + train_kwargs: Optional[dict] = None, + device: Optional[str] = None, + dataset: Callable = TorchDataset, + dataloader: Callable = DataLoader, + input_shape: Optional[tuple] = None, + data_type: Optional[str] = None + ) -> None: + """ + Maximum Mean Discrepancy (MMD) data drift detector where the kernel is trained to maximise an + estimate of the test power. The kernel is trained on a split of the reference and test instances + and then the MMD is evaluated on held out instances and a permutation test is performed. + + For details see Liu et al (2020): Learning Deep Kernels for Non-Parametric Two-Sample Tests + (https://arxiv.org/abs/2002.09116) + + Parameters + ---------- + x_ref + Data used as reference distribution. + kernel + Trainable PyTorch module that returns a similarity between two instances. + p_val + p-value used for the significance of the test. + x_ref_preprocessed + Whether the given reference data `x_ref` has been preprocessed yet. If `x_ref_preprocessed=True`, only + the test data `x` will be preprocessed at prediction time. If `x_ref_preprocessed=False`, the reference + data will also be preprocessed. + preprocess_at_init + Whether to preprocess the reference data when the detector is instantiated. Otherwise, the reference + data will be preprocessed at prediction time. Only applies if `x_ref_preprocessed=False`. + update_x_ref + Reference data can optionally be updated to the last n instances seen by the detector + or via reservoir sampling with size n. For the former, the parameter equals {'last': n} while + for reservoir sampling {'reservoir_sampling': n} is passed. + preprocess_fn + Function to preprocess the data before applying the kernel. + n_permutations + The number of permutations to use in the permutation test once the MMD has been computed. + batch_size_permutations + KeOps computes the n_permutations of the MMD^2 statistics in chunks of batch_size_permutations. + var_reg + Constant added to the estimated variance of the MMD for stability. + reg_loss_fn + The regularisation term reg_loss_fn(kernel) is added to the loss function being optimized. + train_size + Optional fraction (float between 0 and 1) of the dataset used to train the kernel. + The drift is detected on `1 - train_size`. + retrain_from_scratch + Whether the kernel should be retrained from scratch for each set of test data or whether + it should instead continue training from where it left off on the previous set. + optimizer + Optimizer used during training of the kernel. + learning_rate + Learning rate used by optimizer. + batch_size + Batch size used during training of the kernel. + batch_size_predict + Batch size used for the trained drift detector predictions. + preprocess_batch_fn + Optional batch preprocessing function. For example to convert a list of objects to a batch which can be + processed by the kernel. + epochs + Number of training epochs for the kernel. Corresponds to the smaller of the reference and test sets. + num_workers + Number of workers for the dataloader. + verbose + Verbosity level during the training of the kernel. 0 is silent, 1 a progress bar. + train_kwargs + Optional additional kwargs when training the kernel. + device + Device type used. The default None tries to use the GPU and falls back on CPU if needed. + Can be specified by passing either 'cuda', 'gpu' or 'cpu'. Relevant for 'pytorch' and 'keops' backends. + dataset + Dataset object used during training. + dataloader + Dataloader object used during training. Only relevant for 'pytorch' backend. + input_shape + Shape of input data. + data_type + Optionally specify the data type (tabular, image or time-series). Added to metadata. + """ + super().__init__( + x_ref=x_ref, + p_val=p_val, + x_ref_preprocessed=x_ref_preprocessed, + preprocess_at_init=preprocess_at_init, + update_x_ref=update_x_ref, + preprocess_fn=preprocess_fn, + n_permutations=n_permutations, + train_size=train_size, + retrain_from_scratch=retrain_from_scratch, + input_shape=input_shape, + data_type=data_type + ) + self.meta.update({'backend': Framework.KEOPS.value}) + + # Set device, define model and training kwargs + self.device = get_device(device) + self.original_kernel = kernel + self.kernel = deepcopy(kernel) + + # Check kernel format + self.has_proj = hasattr(self.kernel, 'proj') and isinstance(self.kernel.proj, nn.Module) + self.has_kernel_b = hasattr(self.kernel, 'kernel_b') and isinstance(self.kernel.kernel_b, nn.Module) + + # Define kwargs for dataloader and trainer + self.dataset = dataset + self.dataloader = partial(dataloader, batch_size=batch_size, shuffle=True, + drop_last=True, num_workers=num_workers) + self.train_kwargs = {'optimizer': optimizer, 'epochs': epochs, 'preprocess_fn': preprocess_batch_fn, + 'reg_loss_fn': reg_loss_fn, 'learning_rate': learning_rate, 'verbose': verbose} + if isinstance(train_kwargs, dict): + self.train_kwargs.update(train_kwargs) + + self.j_hat = LearnedKernelDriftKeops.JHat( + self.kernel, var_reg, self.has_proj, self.has_kernel_b).to(self.device) + + # Set prediction and permutation batch sizes + self.batch_size_predict = batch_size_predict + self.batch_size_perms = batch_size_permutations + self.n_batches = 1 + (n_permutations - 1) // batch_size_permutations + + class JHat(nn.Module): + """ + A module that wraps around the kernel. When passed a batch of reference and batch of test + instances it returns an estimate of a correlate of test power. + Equation 4 of https://arxiv.org/abs/2002.09116 + """ + def __init__(self, kernel: nn.Module, var_reg: float, has_proj: bool, has_kernel_b: bool): + super().__init__() + self.kernel = kernel + self.has_proj = has_proj + self.has_kernel_b = has_kernel_b + self.var_reg = var_reg + + def forward(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor: + n = len(x) + if self.has_proj and isinstance(self.kernel.proj, nn.Module): + x_proj, y_proj = self.kernel.proj(x), self.kernel.proj(y) + else: + x_proj, y_proj = x, y + x2_proj, x_proj = LazyTensor(x_proj[None, :, :]), LazyTensor(x_proj[:, None, :]) + y2_proj, y_proj = LazyTensor(y_proj[None, :, :]), LazyTensor(y_proj[:, None, :]) + if self.has_kernel_b: + x2, x = LazyTensor(x[None, :, :]), LazyTensor(x[:, None, :]) + y2, y = LazyTensor(y[None, :, :]), LazyTensor(y[:, None, :]) + else: + x, x2, y, y2 = None, None, None, None + + k_xy = self.kernel(x_proj, y2_proj, x, y2) + k_xx = self.kernel(x_proj, x2_proj, x, x2) + k_yy = self.kernel(y_proj, y2_proj, y, y2) + h_mat = k_xx + k_yy - k_xy - k_xy.t() + + h_i = h_mat.sum(1).squeeze(-1) + h = h_i.sum() + mmd2_est = (h - n) / (n * (n - 1)) + var_est = 4 * h_i.square().sum() / (n ** 3) - 4 * h.square() / (n ** 4) + reg_var_est = var_est + self.var_reg + + return mmd2_est/reg_var_est.sqrt() + + def score(self, x: Union[np.ndarray, list]) -> Tuple[float, float, float]: + """ + Compute the p-value resulting from a permutation test using the maximum mean discrepancy + as a distance measure between the reference data and the data to be tested. The kernel + used within the MMD is first trained to maximise an estimate of the resulting test power. + + Parameters + ---------- + x + Batch of instances. + + Returns + ------- + p-value obtained from the permutation test, the MMD^2 between the reference and test set, + and the MMD^2 threshold above which drift is flagged. + """ + x_ref, x_cur = self.preprocess(x) + (x_ref_tr, x_cur_tr), (x_ref_te, x_cur_te) = self.get_splits(x_ref, x_cur) + dl_ref_tr, dl_cur_tr = self.dataloader(self.dataset(x_ref_tr)), self.dataloader(self.dataset(x_cur_tr)) + + self.kernel = deepcopy(self.original_kernel) if self.retrain_from_scratch else self.kernel + self.kernel = self.kernel.to(self.device) + train_args = [self.j_hat, (dl_ref_tr, dl_cur_tr), self.device] + LearnedKernelDriftKeops.trainer(*train_args, **self.train_kwargs) # type: ignore + + m, n = len(x_ref_te), len(x_cur_te) + if isinstance(x_ref_te, np.ndarray) and isinstance(x_cur_te, np.ndarray): + x_all = torch.from_numpy(np.concatenate([x_ref_te, x_cur_te], axis=0)).float() + else: + x_all = x_ref_te + x_cur_te # type: ignore[assignment] + + perms = [torch.randperm(m + n) for _ in range(self.n_permutations)] + mmd2, mmd2_permuted = self._mmd2(x_all, perms, m, n) + if self.device.type == 'cuda': + mmd2, mmd2_permuted = mmd2.cpu(), mmd2_permuted.cpu() + p_val = (mmd2 <= mmd2_permuted).float().mean() + + idx_threshold = int(self.p_val * len(mmd2_permuted)) + distance_threshold = torch.sort(mmd2_permuted, descending=True).values[idx_threshold] + return p_val.numpy().item(), mmd2.numpy().item(), distance_threshold.numpy() + + def _mmd2(self, x_all: Union[list, torch.Tensor], perms: List[torch.Tensor], m: int, n: int) \ + -> Tuple[torch.Tensor, torch.Tensor]: + """ + Batched (across the permutations) MMD^2 computation for the original test statistic and the permutations. + + Parameters + ---------- + x_all + Concatenated reference and test instances. + perms + List with permutation vectors. + m + Number of reference instances. + n + Number of test instances. + + Returns + ------- + MMD^2 statistic for the original and permuted reference and test sets. + """ + preprocess_batch_fn = self.train_kwargs['preprocess_fn'] + if isinstance(preprocess_batch_fn, Callable): # type: ignore[arg-type] + x_all = preprocess_batch_fn(x_all) # type: ignore[operator] + if self.has_proj: + x_all_proj = predict_batch(x_all, self.kernel.proj, device=self.device, batch_size=self.batch_size_predict, + dtype=x_all.dtype if isinstance(x_all, torch.Tensor) else torch.float32) + else: + x_all_proj = x_all + + x, x2, y, y2 = None, None, None, None + k_xx, k_yy, k_xy = [], [], [] + for batch in range(self.n_batches): + i, j = batch * self.batch_size_perms, (batch + 1) * self.batch_size_perms + # Stack a batch of permuted reference and test tensors and their projections + x_proj = torch.cat([x_all_proj[perm[:m]][None, :, :] for perm in perms[i:j]], 0) + y_proj = torch.cat([x_all_proj[perm[m:]][None, :, :] for perm in perms[i:j]], 0) + if self.has_kernel_b: + x = torch.cat([x_all[perm[:m]][None, :, :] for perm in perms[i:j]], 0) + y = torch.cat([x_all[perm[m:]][None, :, :] for perm in perms[i:j]], 0) + if batch == 0: + x_proj = torch.cat([x_all_proj[None, :m, :], x_proj], 0) + y_proj = torch.cat([x_all_proj[None, m:, :], y_proj], 0) + if self.has_kernel_b: + x = torch.cat([x_all[None, :m, :], x], 0) # type: ignore[call-overload] + y = torch.cat([x_all[None, m:, :], y], 0) # type: ignore[call-overload] + x_proj, y_proj = x_proj.to(self.device), y_proj.to(self.device) + if self.has_kernel_b: + x, y = x.to(self.device), y.to(self.device) + + # Batch-wise kernel matrix computation over the permutations + with torch.no_grad(): + x2_proj, x_proj = LazyTensor(x_proj[:, None, :, :]), LazyTensor(x_proj[:, :, None, :]) + y2_proj, y_proj = LazyTensor(y_proj[:, None, :, :]), LazyTensor(y_proj[:, :, None, :]) + if self.has_kernel_b: + x2, x = LazyTensor(x[:, None, :, :]), LazyTensor(x[:, :, None, :]) + y2, y = LazyTensor(y[:, None, :, :]), LazyTensor(y[:, :, None, :]) + k_xy.append(self.kernel(x_proj, y2_proj, x, y2).sum(1).sum(1).squeeze(-1)) + k_xx.append(self.kernel(x_proj, x2_proj, x, x2).sum(1).sum(1).squeeze(-1)) + k_yy.append(self.kernel(y_proj, y2_proj, y, y2).sum(1).sum(1).squeeze(-1)) + + c_xx, c_yy, c_xy = 1 / (m * (m - 1)), 1 / (n * (n - 1)), 2. / (m * n) + # Note that the MMD^2 estimates assume that the diagonal of the kernel matrix consists of 1's + stats = c_xx * (torch.cat(k_xx) - m) + c_yy * (torch.cat(k_yy) - n) - c_xy * torch.cat(k_xy) + return stats[0], stats[1:] + + @staticmethod + def trainer( + j_hat: JHat, + dataloaders: Tuple[DataLoader, DataLoader], + device: torch.device, + optimizer: Callable = torch.optim.Adam, + learning_rate: float = 1e-3, + preprocess_fn: Callable = None, + epochs: int = 20, + reg_loss_fn: Callable = (lambda kernel: 0), + verbose: int = 1, + ) -> None: + """ + Train the kernel to maximise an estimate of test power using minibatch gradient descent. + """ + optimizer = optimizer(j_hat.parameters(), lr=learning_rate) + j_hat.train() + loss_ma = 0. + for epoch in range(epochs): + dl_ref, dl_cur = dataloaders + dl = tqdm(enumerate(zip(dl_ref, dl_cur)), total=min(len(dl_ref), len(dl_cur))) if verbose == 1 else \ + enumerate(zip(dl_ref, dl_cur)) + for step, (x_ref, x_cur) in dl: + if isinstance(preprocess_fn, Callable): # type: ignore + x_ref, x_cur = preprocess_fn(x_ref), preprocess_fn(x_cur) + x_ref, x_cur = x_ref.to(device), x_cur.to(device) + optimizer.zero_grad() # type: ignore + estimate = j_hat(x_ref, x_cur) + loss = -estimate + reg_loss_fn(j_hat.kernel) # ascent + loss.backward() + optimizer.step() # type: ignore + if verbose == 1: + loss_ma = loss_ma + (loss.item() - loss_ma) / (step + 1) + dl.set_description(f'Epoch {epoch + 1}/{epochs}') + dl.set_postfix(dict(loss=loss_ma)) diff --git a/alibi_detect/cd/keops/mmd.py b/alibi_detect/cd/keops/mmd.py new file mode 100644 index 000000000..5b1a2fdc0 --- /dev/null +++ b/alibi_detect/cd/keops/mmd.py @@ -0,0 +1,183 @@ +import logging +import numpy as np +from pykeops.torch import LazyTensor +import torch +from typing import Callable, Dict, List, Optional, Tuple, Union +from alibi_detect.cd.base import BaseMMDDrift +from alibi_detect.utils.keops.kernels import GaussianRBF +from alibi_detect.utils.pytorch import get_device +from alibi_detect.utils.frameworks import Framework + +logger = logging.getLogger(__name__) + + +class MMDDriftKeops(BaseMMDDrift): + def __init__( + self, + x_ref: Union[np.ndarray, list], + p_val: float = .05, + x_ref_preprocessed: bool = False, + preprocess_at_init: bool = True, + update_x_ref: Optional[Dict[str, int]] = None, + preprocess_fn: Optional[Callable] = None, + kernel: Callable = GaussianRBF, + sigma: Optional[np.ndarray] = None, + configure_kernel_from_x_ref: bool = True, + n_permutations: int = 100, + batch_size_permutations: int = 1000000, + device: Optional[str] = None, + input_shape: Optional[tuple] = None, + data_type: Optional[str] = None + ) -> None: + """ + Maximum Mean Discrepancy (MMD) data drift detector using a permutation test. + + Parameters + ---------- + x_ref + Data used as reference distribution. + p_val + p-value used for the significance of the permutation test. + x_ref_preprocessed + Whether the given reference data `x_ref` has been preprocessed yet. If `x_ref_preprocessed=True`, only + the test data `x` will be preprocessed at prediction time. If `x_ref_preprocessed=False`, the reference + data will also be preprocessed. + preprocess_at_init + Whether to preprocess the reference data when the detector is instantiated. Otherwise, the reference + data will be preprocessed at prediction time. Only applies if `x_ref_preprocessed=False`. + update_x_ref + Reference data can optionally be updated to the last n instances seen by the detector + or via reservoir sampling with size n. For the former, the parameter equals {'last': n} while + for reservoir sampling {'reservoir_sampling': n} is passed. + preprocess_fn + Function to preprocess the data before computing the data drift metrics. + kernel + Kernel used for the MMD computation, defaults to Gaussian RBF kernel. + sigma + Optionally set the GaussianRBF kernel bandwidth. Can also pass multiple bandwidth values as an array. + The kernel evaluation is then averaged over those bandwidths. + configure_kernel_from_x_ref + Whether to already configure the kernel bandwidth from the reference data. + n_permutations + Number of permutations used in the permutation test. + batch_size_permutations + KeOps computes the n_permutations of the MMD^2 statistics in chunks of batch_size_permutations. + device + Device type used. The default None tries to use the GPU and falls back on CPU if needed. + Can be specified by passing either 'cuda', 'gpu' or 'cpu'. + input_shape + Shape of input data. + data_type + Optionally specify the data type (tabular, image or time-series). Added to metadata. + """ + super().__init__( + x_ref=x_ref, + p_val=p_val, + x_ref_preprocessed=x_ref_preprocessed, + preprocess_at_init=preprocess_at_init, + update_x_ref=update_x_ref, + preprocess_fn=preprocess_fn, + sigma=sigma, + configure_kernel_from_x_ref=configure_kernel_from_x_ref, + n_permutations=n_permutations, + input_shape=input_shape, + data_type=data_type + ) + self.meta.update({'backend': Framework.KEOPS.value}) + + # set device + self.device = get_device(device) + + # initialize kernel + sigma = torch.from_numpy(sigma).to(self.device) if isinstance(sigma, # type: ignore[assignment] + np.ndarray) else None + self.kernel = kernel(sigma).to(self.device) if kernel == GaussianRBF else kernel + + # set the correct MMD^2 function based on the batch size for the permutations + self.batch_size = batch_size_permutations + self.n_batches = 1 + (n_permutations - 1) // batch_size_permutations + + # infer the kernel bandwidth from the reference data + if isinstance(sigma, torch.Tensor): + self.infer_sigma = False + elif self.infer_sigma: + x = torch.from_numpy(self.x_ref).to(self.device) + _ = self.kernel(LazyTensor(x[:, None, :]), LazyTensor(x[None, :, :]), infer_sigma=self.infer_sigma) + self.infer_sigma = False + else: + self.infer_sigma = True + + def _mmd2(self, x_all: torch.Tensor, perms: List[torch.Tensor], m: int, n: int) \ + -> Tuple[torch.Tensor, torch.Tensor]: + """ + Batched (across the permutations) MMD^2 computation for the original test statistic and the permutations. + + Parameters + ---------- + x_all + Concatenated reference and test instances. + perms + List with permutation vectors. + m + Number of reference instances. + n + Number of test instances. + + Returns + ------- + MMD^2 statistic for the original and permuted reference and test sets. + """ + k_xx, k_yy, k_xy = [], [], [] + for batch in range(self.n_batches): + i, j = batch * self.batch_size, (batch + 1) * self.batch_size + # construct stacked tensors with a batch of permutations for the reference set x and test set y + x = torch.cat([x_all[perm[:m]][None, :, :] for perm in perms[i:j]], 0) + y = torch.cat([x_all[perm[m:]][None, :, :] for perm in perms[i:j]], 0) + if batch == 0: + x = torch.cat([x_all[None, :m, :], x], 0) + y = torch.cat([x_all[None, m:, :], y], 0) + x, y = x.to(self.device), y.to(self.device) + + # batch-wise kernel matrix computation over the permutations + k_xy.append(self.kernel( + LazyTensor(x[:, :, None, :]), LazyTensor(y[:, None, :, :]), self.infer_sigma).sum(1).sum(1).squeeze(-1)) + k_xx.append(self.kernel( + LazyTensor(x[:, :, None, :]), LazyTensor(x[:, None, :, :])).sum(1).sum(1).squeeze(-1)) + k_yy.append(self.kernel( + LazyTensor(y[:, :, None, :]), LazyTensor(y[:, None, :, :])).sum(1).sum(1).squeeze(-1)) + c_xx, c_yy, c_xy = 1 / (m * (m - 1)), 1 / (n * (n - 1)), 2. / (m * n) + # Note that the MMD^2 estimates assume that the diagonal of the kernel matrix consists of 1's + stats = c_xx * (torch.cat(k_xx) - m) + c_yy * (torch.cat(k_yy) - n) - c_xy * torch.cat(k_xy) + return stats[0], stats[1:] + + def score(self, x: Union[np.ndarray, list]) -> Tuple[float, float, float]: + """ + Compute the p-value resulting from a permutation test using the maximum mean discrepancy + as a distance measure between the reference data and the data to be tested. + + Parameters + ---------- + x + Batch of instances. + + Returns + ------- + p-value obtained from the permutation test, the MMD^2 between the reference and test set, + and the MMD^2 threshold above which drift is flagged. + """ + x_ref, x = self.preprocess(x) + x_ref = torch.from_numpy(x_ref).float() # type: ignore[assignment] + x = torch.from_numpy(x).float() # type: ignore[assignment] + # compute kernel matrix, MMD^2 and apply permutation test + m, n = x_ref.shape[0], x.shape[0] # type: ignore[union-attr] + perms = [torch.randperm(m + n) for _ in range(self.n_permutations)] + # TODO - Rethink typings (related to https://github.com/SeldonIO/alibi-detect/issues/540) + x_all = torch.cat([x_ref, x], 0) # type: ignore[list-item] + mmd2, mmd2_permuted = self._mmd2(x_all, perms, m, n) + if self.device.type == 'cuda': + mmd2, mmd2_permuted = mmd2.cpu(), mmd2_permuted.cpu() + p_val = (mmd2 <= mmd2_permuted).float().mean() + # compute distance threshold + idx_threshold = int(self.p_val * len(mmd2_permuted)) + distance_threshold = torch.sort(mmd2_permuted, descending=True).values[idx_threshold] + return p_val.numpy().item(), mmd2.numpy().item(), distance_threshold.numpy() diff --git a/alibi_detect/cd/keops/tests/test_learned_kernel_keops.py b/alibi_detect/cd/keops/tests/test_learned_kernel_keops.py new file mode 100644 index 000000000..646027fe3 --- /dev/null +++ b/alibi_detect/cd/keops/tests/test_learned_kernel_keops.py @@ -0,0 +1,130 @@ +from itertools import product +import numpy as np +import pytest +import torch +import torch.nn as nn +from typing import Callable, Optional, Union +from alibi_detect.utils.frameworks import has_keops +from alibi_detect.utils.pytorch import GaussianRBF as GaussianRBFTorch +from alibi_detect.utils.pytorch import mmd2_from_kernel_matrix +if has_keops: + from alibi_detect.cd.keops.learned_kernel import LearnedKernelDriftKeops + from alibi_detect.utils.keops import GaussianRBF + from pykeops.torch import LazyTensor + +n = 50 # number of instances used for the reference and test data samples in the tests + + +if has_keops: + class MyKernel(nn.Module): + def __init__(self, n_features: int, proj: bool): + super().__init__() + sigma = .1 + self.kernel = GaussianRBF(trainable=True, sigma=torch.Tensor([sigma])) + self.has_proj = proj + if proj: + self.proj = nn.Linear(n_features, 2) + self.kernel_b = GaussianRBF(trainable=True, sigma=torch.Tensor([sigma])) + + def forward(self, x_proj: LazyTensor, y_proj: LazyTensor, x: Optional[LazyTensor] = None, + y: Optional[LazyTensor] = None) -> LazyTensor: + similarity = self.kernel(x_proj, y_proj) + if self.has_proj: + similarity = similarity + self.kernel_b(x, y) + return similarity + + +# test List[Any] inputs to the detector +def identity_fn(x: Union[torch.Tensor, list]) -> torch.Tensor: + if isinstance(x, list): + return torch.from_numpy(np.array(x)) + else: + return x + + +p_val = [.05] +n_features = [4] +preprocess_at_init = [True, False] +update_x_ref = [None, {'reservoir_sampling': 1000}] +preprocess_fn = [None, identity_fn] +n_permutations = [10] +batch_size_permutations = [5, 1000000] +train_size = [.5] +retrain_from_scratch = [True] +batch_size_predict = [1000000] +preprocess_batch = [None, identity_fn] +has_proj = [True, False] +tests_lkdrift = list(product(p_val, n_features, preprocess_at_init, update_x_ref, preprocess_fn, + n_permutations, batch_size_permutations, train_size, retrain_from_scratch, + batch_size_predict, preprocess_batch, has_proj)) +n_tests = len(tests_lkdrift) + + +@pytest.fixture +def lkdrift_params(request): + return tests_lkdrift[request.param] + + +@pytest.mark.skipif(not has_keops, reason='Skipping since pykeops is not installed.') +@pytest.mark.parametrize('lkdrift_params', list(range(n_tests)), indirect=True) +def test_lkdrift(lkdrift_params): + p_val, n_features, preprocess_at_init, update_x_ref, preprocess_fn, \ + n_permutations, batch_size_permutations, train_size, retrain_from_scratch, \ + batch_size_predict, preprocess_batch, has_proj = lkdrift_params + + np.random.seed(0) + torch.manual_seed(0) + + kernel = MyKernel(n_features, has_proj) + x_ref = np.random.randn(*(n, n_features)).astype(np.float32) + x_test1 = np.ones_like(x_ref) + to_list = False + if preprocess_batch is not None and preprocess_fn is None: + to_list = True + x_ref = [_ for _ in x_ref] + update_x_ref = None + + cd = LearnedKernelDriftKeops( + x_ref=x_ref, + kernel=kernel, + p_val=p_val, + preprocess_at_init=preprocess_at_init, + update_x_ref=update_x_ref, + preprocess_fn=preprocess_fn, + n_permutations=n_permutations, + batch_size_permutations=batch_size_permutations, + train_size=train_size, + retrain_from_scratch=retrain_from_scratch, + batch_size_predict=batch_size_predict, + preprocess_batch_fn=preprocess_batch, + batch_size=32, + epochs=1 + ) + + x_test0 = x_ref.copy() + preds_0 = cd.predict(x_test0) + assert cd.n == len(x_test0) + len(x_ref) + assert preds_0['data']['is_drift'] == 0 + + if to_list: + x_test1 = [_ for _ in x_test1] + preds_1 = cd.predict(x_test1) + assert cd.n == len(x_test1) + len(x_test0) + len(x_ref) + assert preds_1['data']['is_drift'] == 1 + assert preds_0['data']['distance'] < preds_1['data']['distance'] + + # ensure the keops MMD^2 estimate matches the pytorch implementation for the same kernel + if not isinstance(x_ref, list) and update_x_ref is None and not has_proj: + if isinstance(preprocess_fn, Callable): + x_ref, x_test1 = cd.preprocess(x_test1) + n_ref, n_test = x_ref.shape[0], x_test1.shape[0] + x_all = torch.from_numpy(np.concatenate([x_ref, x_test1], axis=0)).float() + perms = [torch.randperm(n_ref + n_test) for _ in range(n_permutations)] + mmd2 = cd._mmd2(x_all, perms, n_ref, n_test)[0] + + if isinstance(preprocess_batch, Callable): + x_all = preprocess_batch(x_all) + kernel = GaussianRBFTorch(sigma=cd.kernel.kernel.sigma) + kernel_mat = kernel(x_all, x_all) + mmd2_torch = mmd2_from_kernel_matrix(kernel_mat, n_test) + np.testing.assert_almost_equal(mmd2, mmd2_torch, decimal=6) diff --git a/alibi_detect/cd/keops/tests/test_mmd_keops.py b/alibi_detect/cd/keops/tests/test_mmd_keops.py new file mode 100644 index 000000000..a64a78173 --- /dev/null +++ b/alibi_detect/cd/keops/tests/test_mmd_keops.py @@ -0,0 +1,120 @@ +from functools import partial +from itertools import product +import numpy as np +import pytest +import torch +import torch.nn as nn +from typing import Callable, List +from alibi_detect.utils.frameworks import has_keops +from alibi_detect.utils.pytorch import GaussianRBF, mmd2_from_kernel_matrix +from alibi_detect.cd.pytorch.preprocess import HiddenOutput, preprocess_drift +if has_keops: + from alibi_detect.cd.keops.mmd import MMDDriftKeops + +n, n_hidden, n_classes = 500, 10, 5 + + +class MyModel(nn.Module): + def __init__(self, n_features: int): + super().__init__() + self.dense1 = nn.Linear(n_features, 20) + self.dense2 = nn.Linear(20, 2) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + x = nn.ReLU()(self.dense1(x)) + return self.dense2(x) + + +# test List[Any] inputs to the detector +def preprocess_list(x: List[np.ndarray]) -> np.ndarray: + return np.concatenate(x, axis=0) + + +n_features = [10] +n_enc = [None, 3] +preprocess = [ + (None, None), + (preprocess_drift, {'model': HiddenOutput, 'layer': -1}), + (preprocess_list, None) +] +update_x_ref = [{'last': 750}, {'reservoir_sampling': 750}, None] +preprocess_at_init = [True, False] +n_permutations = [10] +batch_size_permutations = [10, 1000000] +configure_kernel_from_x_ref = [True, False] +tests_mmddrift = list(product(n_features, n_enc, preprocess, n_permutations, preprocess_at_init, update_x_ref, + batch_size_permutations, configure_kernel_from_x_ref)) +n_tests = len(tests_mmddrift) + + +@pytest.fixture +def mmd_params(request): + return tests_mmddrift[request.param] + + +@pytest.mark.skipif(not has_keops, reason='Skipping since pykeops is not installed.') +@pytest.mark.parametrize('mmd_params', list(range(n_tests)), indirect=True) +def test_mmd(mmd_params): + n_features, n_enc, preprocess, n_permutations, preprocess_at_init, update_x_ref, \ + batch_size_permutations, configure_kernel_from_x_ref = mmd_params + + np.random.seed(0) + torch.manual_seed(0) + + x_ref = np.random.randn(n * n_features).reshape(n, n_features).astype(np.float32) + preprocess_fn, preprocess_kwargs = preprocess + to_list = False + if hasattr(preprocess_fn, '__name__') and preprocess_fn.__name__ == 'preprocess_list': + if not preprocess_at_init: + return + to_list = True + x_ref = [_[None, :] for _ in x_ref] + elif isinstance(preprocess_fn, Callable) and 'layer' in list(preprocess_kwargs.keys()) \ + and preprocess_kwargs['model'].__name__ == 'HiddenOutput': + model = MyModel(n_features) + layer = preprocess_kwargs['layer'] + preprocess_fn = partial(preprocess_fn, model=HiddenOutput(model=model, layer=layer)) + else: + preprocess_fn = None + + cd = MMDDriftKeops( + x_ref=x_ref, + p_val=.05, + preprocess_at_init=preprocess_at_init if isinstance(preprocess_fn, Callable) else False, + update_x_ref=update_x_ref, + preprocess_fn=preprocess_fn, + configure_kernel_from_x_ref=configure_kernel_from_x_ref, + n_permutations=n_permutations, + batch_size_permutations=batch_size_permutations + ) + x = x_ref.copy() + preds = cd.predict(x, return_p_val=True) + assert preds['data']['is_drift'] == 0 and preds['data']['p_val'] >= cd.p_val + if isinstance(update_x_ref, dict): + k = list(update_x_ref.keys())[0] + assert cd.n == len(x) + len(x_ref) + assert cd.x_ref.shape[0] == min(update_x_ref[k], len(x) + len(x_ref)) + + x_h1 = np.random.randn(n * n_features).reshape(n, n_features).astype(np.float32) + if to_list: + x_h1 = [_[None, :] for _ in x_h1] + preds = cd.predict(x_h1, return_p_val=True) + if preds['data']['is_drift'] == 1: + assert preds['data']['p_val'] < preds['data']['threshold'] == cd.p_val + assert preds['data']['distance'] > preds['data']['distance_threshold'] + else: + assert preds['data']['p_val'] >= preds['data']['threshold'] == cd.p_val + assert preds['data']['distance'] <= preds['data']['distance_threshold'] + + # ensure the keops MMD^2 estimate matches the pytorch implementation for the same kernel + if not isinstance(x_ref, list) and update_x_ref is None: + p_val, mmd2, distance_threshold = cd.score(x_h1) + kernel = GaussianRBF(sigma=cd.kernel.sigma) + if isinstance(preprocess_fn, Callable): + x_ref, x_h1 = cd.preprocess(x_h1) + x_ref = torch.from_numpy(x_ref).float() + x_h1 = torch.from_numpy(x_h1).float() + x_all = torch.cat([x_ref, x_h1], 0) + kernel_mat = kernel(x_all, x_all) + mmd2_torch = mmd2_from_kernel_matrix(kernel_mat, x_h1.shape[0]) + np.testing.assert_almost_equal(mmd2, mmd2_torch, decimal=6) diff --git a/alibi_detect/cd/learned_kernel.py b/alibi_detect/cd/learned_kernel.py index 308b7a2fa..59288c7ed 100644 --- a/alibi_detect/cd/learned_kernel.py +++ b/alibi_detect/cd/learned_kernel.py @@ -1,6 +1,6 @@ import numpy as np from typing import Callable, Dict, Optional, Union -from alibi_detect.utils.frameworks import has_pytorch, has_tensorflow, BackendValidator +from alibi_detect.utils.frameworks import has_pytorch, has_tensorflow, has_keops, BackendValidator, Framework from alibi_detect.utils.warnings import deprecated_alias from alibi_detect.base import DriftConfigMixin @@ -13,6 +13,9 @@ from alibi_detect.cd.tensorflow.learned_kernel import LearnedKernelDriftTF from alibi_detect.utils.tensorflow.data import TFDataset +if has_keops: + from alibi_detect.cd.keops.learned_kernel import LearnedKernelDriftKeops + class LearnedKernelDrift(DriftConfigMixin): @deprecated_alias(preprocess_x_ref='preprocess_at_init') @@ -27,6 +30,7 @@ def __init__( update_x_ref: Optional[Dict[str, int]] = None, preprocess_fn: Optional[Callable] = None, n_permutations: int = 100, + batch_size_permutations: int = 1000000, var_reg: float = 1e-5, reg_loss_fn: Callable = (lambda kernel: 0), train_size: Optional[float] = .75, @@ -34,6 +38,7 @@ def __init__( optimizer: Optional[Callable] = None, learning_rate: float = 1e-3, batch_size: int = 32, + batch_size_predict: int = 1000000, preprocess_batch_fn: Optional[Callable] = None, epochs: int = 3, verbose: int = 0, @@ -52,7 +57,6 @@ def __init__( For details see Liu et al (2020): Learning Deep Kernels for Non-Parametric Two-Sample Tests (https://arxiv.org/abs/2002.09116) - Parameters ---------- x_ref @@ -78,6 +82,9 @@ def __init__( Function to preprocess the data before applying the kernel. n_permutations The number of permutations to use in the permutation test once the MMD has been computed. + batch_size_permutations + KeOps computes the n_permutations of the MMD^2 statistics in chunks of batch_size_permutations. + Only relevant for 'keops' backend. var_reg Constant added to the estimated variance of the MMD for stability. reg_loss_fn @@ -94,6 +101,8 @@ def __init__( Learning rate used by optimizer. batch_size Batch size used during training of the kernel. + batch_size_predict + Batch size used for the trained drift detector predictions. Only relevant for 'keops' backend. preprocess_batch_fn Optional batch preprocessing function. For example to convert a list of objects to a batch which can be processed by the kernel. @@ -105,11 +114,11 @@ def __init__( Optional additional kwargs when training the kernel. device Device type used. The default None tries to use the GPU and falls back on CPU if needed. - Can be specified by passing either 'cuda', 'gpu' or 'cpu'. Only relevant for 'pytorch' backend. + Can be specified by passing either 'cuda', 'gpu' or 'cpu'. Relevant for 'pytorch' and 'keops' backends. dataset Dataset object used during training. dataloader - Dataloader object used during training. Only relevant for 'pytorch' backend. + Dataloader object used during training. Relevant for 'pytorch' and 'keops' backends. input_shape Shape of input data. data_type @@ -122,8 +131,9 @@ def __init__( backend = backend.lower() BackendValidator( - backend_options={'tensorflow': ['tensorflow'], - 'pytorch': ['pytorch']}, + backend_options={Framework.TENSORFLOW: [Framework.TENSORFLOW], + Framework.PYTORCH: [Framework.PYTORCH], + Framework.KEOPS: [Framework.KEOPS]}, construct_name=self.__class__.__name__ ).verify_backend(backend) @@ -134,18 +144,25 @@ def __init__( pop_kwargs += ['optimizer'] [kwargs.pop(k, None) for k in pop_kwargs] - if backend == 'tensorflow' and has_tensorflow: - pop_kwargs = ['device', 'dataloader'] + if backend == Framework.TENSORFLOW: + pop_kwargs = ['device', 'dataloader', 'batch_size_permutations', 'batch_size_predict'] [kwargs.pop(k, None) for k in pop_kwargs] if dataset is None: kwargs.update({'dataset': TFDataset}) - self._detector = LearnedKernelDriftTF(*args, **kwargs) # type: ignore + detector = LearnedKernelDriftTF else: if dataset is None: kwargs.update({'dataset': TorchDataset}) if dataloader is None: kwargs.update({'dataloader': DataLoader}) - self._detector = LearnedKernelDriftTorch(*args, **kwargs) # type: ignore + if backend == Framework.PYTORCH: + pop_kwargs = ['batch_size_permutations', 'batch_size_predict'] + [kwargs.pop(k, None) for k in pop_kwargs] + detector = LearnedKernelDriftTorch + else: + detector = LearnedKernelDriftKeops + + self._detector = detector(*args, **kwargs) # type: ignore self.meta = self._detector.meta def predict(self, x: Union[np.ndarray, list], return_p_val: bool = True, diff --git a/alibi_detect/cd/lsdd.py b/alibi_detect/cd/lsdd.py index d7264898b..e8a45d30f 100644 --- a/alibi_detect/cd/lsdd.py +++ b/alibi_detect/cd/lsdd.py @@ -1,6 +1,6 @@ import numpy as np from typing import Callable, Dict, Optional, Union, Tuple -from alibi_detect.utils.frameworks import has_pytorch, has_tensorflow, BackendValidator +from alibi_detect.utils.frameworks import has_pytorch, has_tensorflow, BackendValidator, Framework from alibi_detect.utils.warnings import deprecated_alias from alibi_detect.base import DriftConfigMixin @@ -82,8 +82,8 @@ def __init__( backend = backend.lower() BackendValidator( - backend_options={'tensorflow': ['tensorflow'], - 'pytorch': ['pytorch']}, + backend_options={Framework.TENSORFLOW: [Framework.TENSORFLOW], + Framework.PYTORCH: [Framework.PYTORCH]}, construct_name=self.__class__.__name__ ).verify_backend(backend) @@ -92,7 +92,7 @@ def __init__( pop_kwargs = ['self', 'x_ref', 'backend', '__class__'] [kwargs.pop(k, None) for k in pop_kwargs] - if backend == 'tensorflow' and has_tensorflow: + if backend == Framework.TENSORFLOW: kwargs.pop('device', None) self._detector = LSDDDriftTF(*args, **kwargs) # type: ignore else: diff --git a/alibi_detect/cd/lsdd_online.py b/alibi_detect/cd/lsdd_online.py index 57a38b3b9..d8d3d5bf6 100644 --- a/alibi_detect/cd/lsdd_online.py +++ b/alibi_detect/cd/lsdd_online.py @@ -1,6 +1,6 @@ import numpy as np from typing import Any, Callable, Dict, Optional, Union -from alibi_detect.utils.frameworks import has_pytorch, has_tensorflow, BackendValidator +from alibi_detect.utils.frameworks import has_pytorch, has_tensorflow, BackendValidator, Framework from alibi_detect.base import DriftConfigMixin if has_pytorch: from alibi_detect.cd.pytorch.lsdd_online import LSDDDriftOnlineTorch @@ -83,8 +83,8 @@ def __init__( backend = backend.lower() BackendValidator( - backend_options={'tensorflow': ['tensorflow'], - 'pytorch': ['pytorch']}, + backend_options={Framework.TENSORFLOW: [Framework.TENSORFLOW], + Framework.PYTORCH: [Framework.PYTORCH]}, construct_name=self.__class__.__name__ ).verify_backend(backend) @@ -93,7 +93,7 @@ def __init__( pop_kwargs = ['self', 'x_ref', 'ert', 'window_size', 'backend', '__class__'] [kwargs.pop(k, None) for k in pop_kwargs] - if backend == 'tensorflow' and has_tensorflow: + if backend == Framework.TENSORFLOW: kwargs.pop('device', None) self._detector = LSDDDriftOnlineTF(*args, **kwargs) # type: ignore else: diff --git a/alibi_detect/cd/mmd.py b/alibi_detect/cd/mmd.py index 4391f2ccd..3a0c289a5 100644 --- a/alibi_detect/cd/mmd.py +++ b/alibi_detect/cd/mmd.py @@ -1,7 +1,7 @@ import logging import numpy as np from typing import Callable, Dict, Optional, Union, Tuple -from alibi_detect.utils.frameworks import has_pytorch, has_tensorflow, BackendValidator +from alibi_detect.utils.frameworks import has_pytorch, has_tensorflow, has_keops, BackendValidator, Framework from alibi_detect.utils.warnings import deprecated_alias from alibi_detect.base import DriftConfigMixin @@ -11,6 +11,9 @@ if has_tensorflow: from alibi_detect.cd.tensorflow.mmd import MMDDriftTF +if has_keops and has_pytorch: + from alibi_detect.cd.keops.mmd import MMDDriftKeops + logger = logging.getLogger(__name__) @@ -29,6 +32,7 @@ def __init__( sigma: Optional[np.ndarray] = None, configure_kernel_from_x_ref: bool = True, n_permutations: int = 100, + batch_size_permutations: int = 1000000, device: Optional[str] = None, input_shape: Optional[tuple] = None, data_type: Optional[str] = None @@ -66,6 +70,9 @@ def __init__( Whether to already configure the kernel bandwidth from the reference data. n_permutations Number of permutations used in the permutation test. + batch_size_permutations + KeOps computes the n_permutations of the MMD^2 statistics in chunks of batch_size_permutations. + Only relevant for 'keops' backend. device Device type used. The default None tries to use the GPU and falls back on CPU if needed. Can be specified by passing either 'cuda', 'gpu' or 'cpu'. Only relevant for 'pytorch' backend. @@ -81,28 +88,35 @@ def __init__( backend = backend.lower() BackendValidator( - backend_options={'tensorflow': ['tensorflow'], - 'pytorch': ['pytorch']}, + backend_options={Framework.TENSORFLOW: [Framework.TENSORFLOW], + Framework.PYTORCH: [Framework.PYTORCH], + Framework.KEOPS: [Framework.KEOPS]}, construct_name=self.__class__.__name__ ).verify_backend(backend) kwargs = locals() args = [kwargs['x_ref']] pop_kwargs = ['self', 'x_ref', 'backend', '__class__'] + if backend == Framework.TENSORFLOW: + pop_kwargs += ['device', 'batch_size_permutations'] + detector = MMDDriftTF + elif backend == Framework.PYTORCH: + pop_kwargs += ['batch_size_permutations'] + detector = MMDDriftTorch + else: + detector = MMDDriftKeops [kwargs.pop(k, None) for k in pop_kwargs] if kernel is None: - if backend == 'tensorflow': + if backend == Framework.TENSORFLOW: from alibi_detect.utils.tensorflow.kernels import GaussianRBF - else: + elif backend == Framework.PYTORCH: from alibi_detect.utils.pytorch.kernels import GaussianRBF # type: ignore + else: + from alibi_detect.utils.keops.kernels import GaussianRBF # type: ignore kwargs.update({'kernel': GaussianRBF}) - if backend == 'tensorflow' and has_tensorflow: - kwargs.pop('device', None) - self._detector = MMDDriftTF(*args, **kwargs) # type: ignore - else: - self._detector = MMDDriftTorch(*args, **kwargs) # type: ignore + self._detector = detector(*args, **kwargs) # type: ignore self.meta = self._detector.meta def predict(self, x: Union[np.ndarray, list], return_p_val: bool = True, return_distance: bool = True) \ diff --git a/alibi_detect/cd/mmd_online.py b/alibi_detect/cd/mmd_online.py index 0308c5ad1..075877fe8 100644 --- a/alibi_detect/cd/mmd_online.py +++ b/alibi_detect/cd/mmd_online.py @@ -1,6 +1,6 @@ import numpy as np from typing import Any, Callable, Dict, Optional, Union -from alibi_detect.utils.frameworks import has_pytorch, has_tensorflow, BackendValidator +from alibi_detect.utils.frameworks import has_pytorch, has_tensorflow, BackendValidator, Framework from alibi_detect.base import DriftConfigMixin if has_pytorch: @@ -76,8 +76,8 @@ def __init__( backend = backend.lower() BackendValidator( - backend_options={'tensorflow': ['tensorflow'], - 'pytorch': ['pytorch']}, + backend_options={Framework.TENSORFLOW: [Framework.TENSORFLOW], + Framework.PYTORCH: [Framework.PYTORCH]}, construct_name=self.__class__.__name__ ).verify_backend(backend) @@ -87,13 +87,13 @@ def __init__( [kwargs.pop(k, None) for k in pop_kwargs] if kernel is None: - if backend == 'tensorflow': + if backend == Framework.TENSORFLOW: from alibi_detect.utils.tensorflow.kernels import GaussianRBF else: from alibi_detect.utils.pytorch.kernels import GaussianRBF # type: ignore kwargs.update({'kernel': GaussianRBF}) - if backend == 'tensorflow' and has_tensorflow: + if backend == Framework.TENSORFLOW: kwargs.pop('device', None) self._detector = MMDDriftOnlineTF(*args, **kwargs) # type: ignore else: diff --git a/alibi_detect/cd/model_uncertainty.py b/alibi_detect/cd/model_uncertainty.py index 9804d7c55..fb2527b83 100644 --- a/alibi_detect/cd/model_uncertainty.py +++ b/alibi_detect/cd/model_uncertainty.py @@ -6,7 +6,7 @@ from alibi_detect.cd.chisquare import ChiSquareDrift from alibi_detect.cd.preprocess import classifier_uncertainty, regressor_uncertainty from alibi_detect.cd.utils import encompass_batching, encompass_shuffling_and_batch_filling -from alibi_detect.utils.frameworks import BackendValidator +from alibi_detect.utils.frameworks import BackendValidator, Framework from alibi_detect.base import DriftConfigMixin logger = logging.getLogger(__name__) @@ -85,8 +85,8 @@ def __init__( if backend: backend = backend.lower() - BackendValidator(backend_options={'tensorflow': ['tensorflow'], - 'pytorch': ['pytorch'], + BackendValidator(backend_options={Framework.TENSORFLOW: [Framework.TENSORFLOW], + Framework.PYTORCH: [Framework.PYTORCH], None: []}, construct_name=self.__class__.__name__).verify_backend(backend) @@ -238,8 +238,8 @@ def __init__( if backend: backend = backend.lower() - BackendValidator(backend_options={'tensorflow': ['tensorflow'], - 'pytorch': ['pytorch'], + BackendValidator(backend_options={Framework.TENSORFLOW: [Framework.TENSORFLOW], + Framework.PYTORCH: [Framework.PYTORCH], None: []}, construct_name=self.__class__.__name__).verify_backend(backend) @@ -247,10 +247,10 @@ def __init__( model_fn = model else: if uncertainty_type == 'mc_dropout': - if backend == 'pytorch': + if backend == Framework.PYTORCH: from alibi_detect.cd.pytorch.utils import activate_train_mode_for_dropout_layers model = activate_train_mode_for_dropout_layers(model) - elif backend == 'tensorflow': + elif backend == Framework.TENSORFLOW: logger.warning( "MC dropout being applied to tensorflow model. May not be suitable if model contains" "non-dropout layers with different train and inference time behaviour" @@ -268,7 +268,7 @@ def __init__( max_len=max_len ) - if uncertainty_type == 'mc_dropout' and backend == 'tensorflow': + if uncertainty_type == 'mc_dropout' and backend == Framework.TENSORFLOW: # To average over possible batchnorm effects as all layers evaluated in training mode. model_fn = encompass_shuffling_and_batch_filling(model_fn, batch_size=batch_size) diff --git a/alibi_detect/cd/pytorch/classifier.py b/alibi_detect/cd/pytorch/classifier.py index 924105131..9280f2f5c 100644 --- a/alibi_detect/cd/pytorch/classifier.py +++ b/alibi_detect/cd/pytorch/classifier.py @@ -12,6 +12,7 @@ from alibi_detect.utils.pytorch.data import TorchDataset from alibi_detect.utils.pytorch.prediction import predict_batch from alibi_detect.utils.warnings import deprecated_alias +from alibi_detect.utils.frameworks import Framework class ClassifierDriftTorch(BaseClassifierDrift): @@ -138,7 +139,7 @@ def __init__( if preds_type not in ['probs', 'logits']: raise ValueError("'preds_type' should be 'probs' or 'logits'") - self.meta.update({'backend': 'pytorch'}) + self.meta.update({'backend': Framework.PYTORCH.value}) # set device, define model and training kwargs self.device = get_device(device) diff --git a/alibi_detect/cd/pytorch/context_aware.py b/alibi_detect/cd/pytorch/context_aware.py index 337b41922..7b63357ee 100644 --- a/alibi_detect/cd/pytorch/context_aware.py +++ b/alibi_detect/cd/pytorch/context_aware.py @@ -6,6 +6,7 @@ from alibi_detect.utils.pytorch import get_device from alibi_detect.utils.pytorch.kernels import GaussianRBF from alibi_detect.utils.warnings import deprecated_alias +from alibi_detect.utils.frameworks import Framework from alibi_detect.cd._domain_clf import _SVCDomainClf from tqdm import tqdm @@ -101,7 +102,7 @@ def __init__( data_type=data_type, verbose=verbose, ) - self.meta.update({'backend': 'pytorch'}) + self.meta.update({'backend': Framework.PYTORCH.value}) # set device self.device = get_device(device) diff --git a/alibi_detect/cd/pytorch/learned_kernel.py b/alibi_detect/cd/pytorch/learned_kernel.py index bc1ad59d9..2178d018c 100644 --- a/alibi_detect/cd/pytorch/learned_kernel.py +++ b/alibi_detect/cd/pytorch/learned_kernel.py @@ -11,6 +11,7 @@ from alibi_detect.utils.pytorch.distance import mmd2_from_kernel_matrix, batch_compute_kernel_matrix from alibi_detect.utils.pytorch.data import TorchDataset from alibi_detect.utils.warnings import deprecated_alias +from alibi_detect.utils.frameworks import Framework class LearnedKernelDriftTorch(BaseLearnedKernelDrift): @@ -124,7 +125,7 @@ def __init__( input_shape=input_shape, data_type=data_type ) - self.meta.update({'backend': 'pytorch'}) + self.meta.update({'backend': Framework.PYTORCH.value}) # set device, define model and training kwargs self.device = get_device(device) diff --git a/alibi_detect/cd/pytorch/lsdd.py b/alibi_detect/cd/pytorch/lsdd.py index 953f6381a..d62bcf60c 100644 --- a/alibi_detect/cd/pytorch/lsdd.py +++ b/alibi_detect/cd/pytorch/lsdd.py @@ -6,6 +6,7 @@ from alibi_detect.utils.pytorch.kernels import GaussianRBF from alibi_detect.utils.pytorch.distance import permed_lsdds from alibi_detect.utils.warnings import deprecated_alias +from alibi_detect.utils.frameworks import Framework class LSDDDriftTorch(BaseLSDDDrift): @@ -83,7 +84,7 @@ def __init__( input_shape=input_shape, data_type=data_type ) - self.meta.update({'backend': 'pytorch'}) + self.meta.update({'backend': Framework.PYTORCH.value}) # set device self.device = get_device(device) diff --git a/alibi_detect/cd/pytorch/lsdd_online.py b/alibi_detect/cd/pytorch/lsdd_online.py index 256cda282..92c1401c7 100644 --- a/alibi_detect/cd/pytorch/lsdd_online.py +++ b/alibi_detect/cd/pytorch/lsdd_online.py @@ -5,6 +5,7 @@ from alibi_detect.cd.base_online import BaseMultiDriftOnline from alibi_detect.utils.pytorch import get_device from alibi_detect.utils.pytorch import GaussianRBF, permed_lsdds, quantile +from alibi_detect.utils.frameworks import Framework class LSDDDriftOnlineTorch(BaseMultiDriftOnline): @@ -82,7 +83,7 @@ def __init__( input_shape=input_shape, data_type=data_type ) - self.meta.update({'backend': 'pytorch'}) + self.meta.update({'backend': Framework.PYTORCH.value}) self.n_kernel_centers = n_kernel_centers self.lambda_rd_max = lambda_rd_max diff --git a/alibi_detect/cd/pytorch/mmd.py b/alibi_detect/cd/pytorch/mmd.py index 6f279d09b..666942b6c 100644 --- a/alibi_detect/cd/pytorch/mmd.py +++ b/alibi_detect/cd/pytorch/mmd.py @@ -7,6 +7,7 @@ from alibi_detect.utils.pytorch.distance import mmd2_from_kernel_matrix from alibi_detect.utils.pytorch.kernels import GaussianRBF from alibi_detect.utils.warnings import deprecated_alias +from alibi_detect.utils.frameworks import Framework logger = logging.getLogger(__name__) @@ -81,7 +82,7 @@ def __init__( input_shape=input_shape, data_type=data_type ) - self.meta.update({'backend': 'pytorch'}) + self.meta.update({'backend': Framework.PYTORCH.value}) # set device self.device = get_device(device) diff --git a/alibi_detect/cd/pytorch/mmd_online.py b/alibi_detect/cd/pytorch/mmd_online.py index 3ea77c165..808fe5c5d 100644 --- a/alibi_detect/cd/pytorch/mmd_online.py +++ b/alibi_detect/cd/pytorch/mmd_online.py @@ -6,6 +6,7 @@ from alibi_detect.utils.pytorch import get_device from alibi_detect.utils.pytorch.kernels import GaussianRBF from alibi_detect.utils.pytorch import zero_diag, quantile +from alibi_detect.utils.frameworks import Framework class MMDDriftOnlineTorch(BaseMultiDriftOnline): @@ -75,7 +76,7 @@ def __init__( input_shape=input_shape, data_type=data_type ) - self.meta.update({'backend': 'pytorch'}) + self.meta.update({'backend': Framework.PYTORCH.value}) # set device self.device = get_device(device) diff --git a/alibi_detect/cd/sklearn/classifier.py b/alibi_detect/cd/sklearn/classifier.py index 2352bf8bf..48a6e7080 100644 --- a/alibi_detect/cd/sklearn/classifier.py +++ b/alibi_detect/cd/sklearn/classifier.py @@ -8,6 +8,7 @@ from sklearn.ensemble import RandomForestClassifier from alibi_detect.cd.base import BaseClassifierDrift from alibi_detect.utils.warnings import deprecated_alias +from alibi_detect.utils.frameworks import Framework logger = logging.getLogger(__name__) @@ -112,7 +113,7 @@ def __init__( if preds_type not in ['probs', 'scores']: raise ValueError("'preds_type' should be 'probs' or 'scores'") - self.meta.update({'backend': 'sklearn'}) + self.meta.update({'backend': Framework.SKLEARN.value}) self.original_model = model self.use_calibration = use_calibration self.calibration_kwargs = dict() if calibration_kwargs is None else calibration_kwargs diff --git a/alibi_detect/cd/spot_the_diff.py b/alibi_detect/cd/spot_the_diff.py index d24be2fc7..9c5c63a92 100644 --- a/alibi_detect/cd/spot_the_diff.py +++ b/alibi_detect/cd/spot_the_diff.py @@ -1,6 +1,6 @@ import numpy as np from typing import Callable, Dict, Optional, Union -from alibi_detect.utils.frameworks import has_pytorch, has_tensorflow, BackendValidator +from alibi_detect.utils.frameworks import has_pytorch, has_tensorflow, BackendValidator, Framework from alibi_detect.base import DriftConfigMixin if has_pytorch: @@ -127,8 +127,8 @@ def __init__( backend = backend.lower() BackendValidator( - backend_options={'tensorflow': ['tensorflow'], - 'pytorch': ['pytorch']}, + backend_options={Framework.TENSORFLOW: [Framework.TENSORFLOW], + Framework.PYTORCH: [Framework.PYTORCH]}, construct_name=self.__class__.__name__ ).verify_backend(backend) kwargs = locals() @@ -138,7 +138,7 @@ def __init__( pop_kwargs += ['optimizer'] [kwargs.pop(k, None) for k in pop_kwargs] - if backend == 'tensorflow' and has_tensorflow: + if backend == Framework.TENSORFLOW: pop_kwargs = ['device', 'dataloader'] [kwargs.pop(k, None) for k in pop_kwargs] if dataset is None: diff --git a/alibi_detect/cd/tabular.py b/alibi_detect/cd/tabular.py index 94730771a..941de2e02 100644 --- a/alibi_detect/cd/tabular.py +++ b/alibi_detect/cd/tabular.py @@ -3,6 +3,7 @@ from typing import Callable, Dict, List, Optional, Tuple, Union from alibi_detect.cd.base import BaseUnivariateDrift from alibi_detect.utils.warnings import deprecated_alias +import warnings class TabularDrift(BaseUnivariateDrift): @@ -88,7 +89,7 @@ def __init__( self._set_config(locals()) self.alternative = alternative - self.x_ref_categories, self.cat_vars = {}, [] # no categorical features assumed present + # Parse categories_per_feature dict if isinstance(categories_per_feature, dict): vals = list(categories_per_feature.values()) int_types = (int, np.int16, np.int32, np.int64) @@ -106,6 +107,11 @@ def __init__( 'Dict[int, NoneType], Dict[int, int], Dict[int, List[int]]') self.x_ref_categories = categories_per_feature self.cat_vars = list(self.x_ref_categories.keys()) + # No categories_per_feature dict so assume no categorical features present + else: + self.x_ref_categories, self.cat_vars = {}, [] + warnings.warn('No `categories_per_feature` dict provided so all features are assumed to be numerical. ' + '`KSDrift` will be applied to all features.') def feature_score(self, x_ref: np.ndarray, x: np.ndarray) -> Tuple[np.ndarray, np.ndarray]: """ diff --git a/alibi_detect/cd/tensorflow/classifier.py b/alibi_detect/cd/tensorflow/classifier.py index a40980b40..58911ec78 100644 --- a/alibi_detect/cd/tensorflow/classifier.py +++ b/alibi_detect/cd/tensorflow/classifier.py @@ -10,6 +10,7 @@ from alibi_detect.utils.tensorflow.misc import clone_model from alibi_detect.utils.tensorflow.prediction import predict_batch from alibi_detect.utils.warnings import deprecated_alias +from alibi_detect.utils.frameworks import Framework class ClassifierDriftTF(BaseClassifierDrift): @@ -129,7 +130,7 @@ def __init__( if preds_type not in ['probs', 'logits']: raise ValueError("'preds_type' should be 'probs' or 'logits'") - self.meta.update({'backend': 'tensorflow'}) + self.meta.update({'backend': Framework.TENSORFLOW.value}) # define and compile classifier model self.original_model = model diff --git a/alibi_detect/cd/tensorflow/context_aware.py b/alibi_detect/cd/tensorflow/context_aware.py index 1ee5b94e0..6f9b773e4 100644 --- a/alibi_detect/cd/tensorflow/context_aware.py +++ b/alibi_detect/cd/tensorflow/context_aware.py @@ -6,6 +6,7 @@ from alibi_detect.cd.base import BaseContextMMDDrift from alibi_detect.utils.tensorflow.kernels import GaussianRBF from alibi_detect.utils.warnings import deprecated_alias +from alibi_detect.utils.frameworks import Framework from alibi_detect.cd._domain_clf import _SVCDomainClf from tqdm import tqdm @@ -97,7 +98,7 @@ def __init__( data_type=data_type, verbose=verbose ) - self.meta.update({'backend': 'tensorflow'}) + self.meta.update({'backend': Framework.TENSORFLOW.value}) # initialize kernel self.x_kernel = x_kernel(init_sigma_fn=_sigma_median_diag) if x_kernel == GaussianRBF else x_kernel diff --git a/alibi_detect/cd/tensorflow/learned_kernel.py b/alibi_detect/cd/tensorflow/learned_kernel.py index 9a06f05f6..a838b21f7 100644 --- a/alibi_detect/cd/tensorflow/learned_kernel.py +++ b/alibi_detect/cd/tensorflow/learned_kernel.py @@ -7,6 +7,7 @@ from alibi_detect.utils.tensorflow.misc import clone_model from alibi_detect.utils.tensorflow.distance import mmd2_from_kernel_matrix, batch_compute_kernel_matrix from alibi_detect.utils.warnings import deprecated_alias +from alibi_detect.utils.frameworks import Framework class LearnedKernelDriftTF(BaseLearnedKernelDrift): @@ -113,7 +114,7 @@ def __init__( input_shape=input_shape, data_type=data_type ) - self.meta.update({'backend': 'tensorflow'}) + self.meta.update({'backend': Framework.TENSORFLOW.value}) # define and compile kernel self.original_kernel = kernel diff --git a/alibi_detect/cd/tensorflow/lsdd.py b/alibi_detect/cd/tensorflow/lsdd.py index 404767799..ef0335ae9 100644 --- a/alibi_detect/cd/tensorflow/lsdd.py +++ b/alibi_detect/cd/tensorflow/lsdd.py @@ -5,6 +5,7 @@ from alibi_detect.utils.tensorflow.kernels import GaussianRBF from alibi_detect.utils.tensorflow.distance import permed_lsdds from alibi_detect.utils.warnings import deprecated_alias +from alibi_detect.utils.frameworks import Framework class LSDDDriftTF(BaseLSDDDrift): @@ -78,7 +79,7 @@ def __init__( input_shape=input_shape, data_type=data_type ) - self.meta.update({'backend': 'tensorflow'}) + self.meta.update({'backend': Framework.TENSORFLOW.value}) if self.preprocess_at_init or self.preprocess_fn is None or self.x_ref_preprocessed: x_ref = tf.convert_to_tensor(self.x_ref) diff --git a/alibi_detect/cd/tensorflow/lsdd_online.py b/alibi_detect/cd/tensorflow/lsdd_online.py index 97baea3b9..540884c5f 100644 --- a/alibi_detect/cd/tensorflow/lsdd_online.py +++ b/alibi_detect/cd/tensorflow/lsdd_online.py @@ -4,6 +4,7 @@ from typing import Any, Callable, Optional, Union from alibi_detect.cd.base_online import BaseMultiDriftOnline from alibi_detect.utils.tensorflow import GaussianRBF, quantile, permed_lsdds +from alibi_detect.utils.frameworks import Framework class LSDDDriftOnlineTF(BaseMultiDriftOnline): @@ -77,7 +78,7 @@ def __init__( input_shape=input_shape, data_type=data_type ) - self.meta.update({'backend': 'tensorflow'}) + self.meta.update({'backend': Framework.TENSORFLOW.value}) self.n_kernel_centers = n_kernel_centers self.lambda_rd_max = lambda_rd_max diff --git a/alibi_detect/cd/tensorflow/mmd.py b/alibi_detect/cd/tensorflow/mmd.py index 1de8d908a..977e1d18c 100644 --- a/alibi_detect/cd/tensorflow/mmd.py +++ b/alibi_detect/cd/tensorflow/mmd.py @@ -6,6 +6,7 @@ from alibi_detect.utils.tensorflow.distance import mmd2_from_kernel_matrix from alibi_detect.utils.tensorflow.kernels import GaussianRBF from alibi_detect.utils.warnings import deprecated_alias +from alibi_detect.utils.frameworks import Framework logger = logging.getLogger(__name__) @@ -76,7 +77,7 @@ def __init__( input_shape=input_shape, data_type=data_type ) - self.meta.update({'backend': 'tensorflow'}) + self.meta.update({'backend': Framework.TENSORFLOW.value}) # initialize kernel if isinstance(sigma, np.ndarray): diff --git a/alibi_detect/cd/tensorflow/mmd_online.py b/alibi_detect/cd/tensorflow/mmd_online.py index 8552802a6..3d4a6b57a 100644 --- a/alibi_detect/cd/tensorflow/mmd_online.py +++ b/alibi_detect/cd/tensorflow/mmd_online.py @@ -5,6 +5,7 @@ from alibi_detect.cd.base_online import BaseMultiDriftOnline from alibi_detect.utils.tensorflow.kernels import GaussianRBF from alibi_detect.utils.tensorflow import zero_diag, quantile, subset_matrix +from alibi_detect.utils.frameworks import Framework class MMDDriftOnlineTF(BaseMultiDriftOnline): @@ -70,7 +71,7 @@ def __init__( input_shape=input_shape, data_type=data_type ) - self.meta.update({'backend': 'tensorflow'}) + self.meta.update({'backend': Framework.TENSORFLOW.value}) # initialize kernel if isinstance(sigma, np.ndarray): diff --git a/alibi_detect/cd/tests/test_learned_kernel.py b/alibi_detect/cd/tests/test_learned_kernel.py index 86cb960d2..e14565bf6 100644 --- a/alibi_detect/cd/tests/test_learned_kernel.py +++ b/alibi_detect/cd/tests/test_learned_kernel.py @@ -7,6 +7,10 @@ from alibi_detect.cd import LearnedKernelDrift from alibi_detect.cd.pytorch.learned_kernel import LearnedKernelDriftTorch from alibi_detect.cd.tensorflow.learned_kernel import LearnedKernelDriftTF +from alibi_detect.utils.frameworks import has_keops +if has_keops: + from alibi_detect.cd.keops.learned_kernel import LearnedKernelDriftKeops + from pykeops.torch import LazyTensor n, n_features = 100, 5 @@ -37,7 +41,16 @@ def forward(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor: return torch.einsum('ji,ki->jk', self.dense(x), self.dense(y)) -tests_lkdrift = ['tensorflow', 'pytorch', 'PyToRcH', 'mxnet'] +if has_keops: + class MyKernelKeops(nn.Module): + def __init__(self): + super().__init__() + + def forward(self, x: LazyTensor, y: LazyTensor) -> LazyTensor: + return (- ((x - y) ** 2).sum(-1)).exp() + + +tests_lkdrift = ['tensorflow', 'pytorch', 'keops', 'PyToRcH', 'mxnet'] n_tests = len(tests_lkdrift) @@ -53,6 +66,8 @@ def test_lkdrift(lkdrift_params): kernel = MyKernelTorch(n_features) elif backend.lower() == 'tensorflow': kernel = MyKernelTF(n_features) + elif has_keops and backend.lower() == 'keops': + kernel = MyKernelKeops() else: kernel = None x_ref = np.random.randn(*(n, n_features)) @@ -61,10 +76,15 @@ def test_lkdrift(lkdrift_params): cd = LearnedKernelDrift(x_ref=x_ref, kernel=kernel, backend=backend) except NotImplementedError: cd = None + except ImportError: + assert not has_keops + cd = None if backend.lower() == 'pytorch': assert isinstance(cd._detector, LearnedKernelDriftTorch) elif backend.lower() == 'tensorflow': assert isinstance(cd._detector, LearnedKernelDriftTF) + elif has_keops and backend.lower() == 'keops': + assert isinstance(cd._detector, LearnedKernelDriftKeops) else: assert cd is None diff --git a/alibi_detect/cd/tests/test_mmd.py b/alibi_detect/cd/tests/test_mmd.py index 33e776e14..c070dcaeb 100644 --- a/alibi_detect/cd/tests/test_mmd.py +++ b/alibi_detect/cd/tests/test_mmd.py @@ -3,10 +3,13 @@ from alibi_detect.cd import MMDDrift from alibi_detect.cd.pytorch.mmd import MMDDriftTorch from alibi_detect.cd.tensorflow.mmd import MMDDriftTF +from alibi_detect.utils.frameworks import has_keops +if has_keops: + from alibi_detect.cd.keops.mmd import MMDDriftKeops n, n_features = 100, 5 -tests_mmddrift = ['tensorflow', 'pytorch', 'PyToRcH', 'mxnet'] +tests_mmddrift = ['tensorflow', 'pytorch', 'keops', 'PyToRcH', 'mxnet'] n_tests = len(tests_mmddrift) @@ -18,16 +21,18 @@ def mmddrift_params(request): @pytest.mark.parametrize('mmddrift_params', list(range(n_tests)), indirect=True) def test_mmddrift(mmddrift_params): backend = mmddrift_params - x_ref = np.random.randn(*(n, n_features)) + x_ref = np.random.randn(*(n, n_features)).astype('float32') try: cd = MMDDrift(x_ref=x_ref, backend=backend) - except NotImplementedError: + except (NotImplementedError, ImportError): cd = None if backend.lower() == 'pytorch': assert isinstance(cd._detector, MMDDriftTorch) elif backend.lower() == 'tensorflow': assert isinstance(cd._detector, MMDDriftTF) + elif backend.lower() == 'keops' and has_keops: + assert isinstance(cd._detector, MMDDriftKeops) else: assert cd is None diff --git a/alibi_detect/cd/utils.py b/alibi_detect/cd/utils.py index 33959c67c..f8f19e9b0 100644 --- a/alibi_detect/cd/utils.py +++ b/alibi_detect/cd/utils.py @@ -4,6 +4,7 @@ import numpy as np from alibi_detect.utils.sampling import reservoir_sampling +from alibi_detect.utils.frameworks import Framework logger = logging.getLogger(__name__) @@ -63,9 +64,9 @@ def encompass_batching( backend = backend.lower() kwargs = {'batch_size': batch_size, 'tokenizer': tokenizer, 'max_len': max_len, 'preprocess_batch_fn': preprocess_batch_fn} - if backend == 'tensorflow': + if backend == Framework.TENSORFLOW: from alibi_detect.cd.tensorflow.preprocess import preprocess_drift - elif backend == 'pytorch': + elif backend == Framework.PYTORCH: from alibi_detect.cd.pytorch.preprocess import preprocess_drift # type: ignore[no-redef] kwargs['device'] = device else: diff --git a/alibi_detect/datasets.py b/alibi_detect/datasets.py index 507a25ba7..f3015e06a 100644 --- a/alibi_detect/datasets.py +++ b/alibi_detect/datasets.py @@ -11,6 +11,7 @@ from alibi_detect.utils.data import Bunch from alibi_detect.utils.url import _join_url from requests import RequestException +from urllib.error import URLError from scipy.io import arff from sklearn.datasets import fetch_kddcup99 @@ -59,7 +60,11 @@ def fetch_kdd(target: list = ['dos', 'r2l', 'u2r', 'probe'], """ # fetch raw data - data_raw = fetch_kddcup99(subset=None, data_home=None, percent10=percent10) + try: + data_raw = fetch_kddcup99(subset=None, data_home=None, percent10=percent10) + except URLError: + logger.exception("Could not connect, URL may be out of service") + raise # specify columns cols = ['duration', 'protocol_type', 'service', 'flag', 'src_bytes', 'dst_bytes', diff --git a/alibi_detect/od/prophet.py b/alibi_detect/od/prophet.py index 2c0d90865..787517791 100644 --- a/alibi_detect/od/prophet.py +++ b/alibi_detect/od/prophet.py @@ -1,4 +1,4 @@ -from fbprophet import Prophet +from prophet import Prophet import logging import pandas as pd from typing import Dict, List, Union diff --git a/alibi_detect/od/tests/test_prophet.py b/alibi_detect/od/tests/test_prophet.py index ef1c7169c..9ccd2c11d 100644 --- a/alibi_detect/od/tests/test_prophet.py +++ b/alibi_detect/od/tests/test_prophet.py @@ -33,10 +33,10 @@ def prophet_params(request): @pytest.mark.parametrize('prophet_params', list(range(n_tests)), indirect=True) def test_prophet(prophet_params): - fbprophet = pytest.importorskip('fbprophet', reason="Prophet tests skipped as Prophet not installed") + prophet = pytest.importorskip('prophet', reason="Prophet tests skipped as Prophet not installed") growth, return_instance_score, return_forecast = prophet_params od = OutlierProphet(growth=growth) - assert isinstance(od.model, fbprophet.forecaster.Prophet) + assert isinstance(od.model, prophet.forecaster.Prophet) assert od.meta == {'name': 'OutlierProphet', 'detector_type': 'outlier', 'data_type': 'time-series', 'online': False, 'version': __version__} if growth == 'logistic': diff --git a/alibi_detect/saving/_sklearn/__init__.py b/alibi_detect/saving/_sklearn/__init__.py new file mode 100644 index 000000000..59b424eb6 --- /dev/null +++ b/alibi_detect/saving/_sklearn/__init__.py @@ -0,0 +1,7 @@ +from alibi_detect.saving._sklearn.saving import save_model_config as save_model_config_sk +from alibi_detect.saving._sklearn.loading import load_model as load_model_sk + +__all__ = [ + "save_model_config_sk", + "load_model_sk" +] diff --git a/alibi_detect/saving/_sklearn/loading.py b/alibi_detect/saving/_sklearn/loading.py new file mode 100644 index 000000000..b6ed23912 --- /dev/null +++ b/alibi_detect/saving/_sklearn/loading.py @@ -0,0 +1,26 @@ +import os +from pathlib import Path +from typing import Union + +import joblib +from sklearn.base import BaseEstimator + + +def load_model(filepath: Union[str, os.PathLike], + ) -> BaseEstimator: + """ + Load scikit-learn (or xgboost) model. Models are assumed to be a subclass of :class:`~sklearn.base.BaseEstimator`. + This includes xgboost models following the scikit-learn API + (see https://xgboost.readthedocs.io/en/latest/python/python_api.html#module-xgboost.sklearn). + + Parameters + ---------- + filepath + Saved model directory. + + Returns + ------- + Loaded model. + """ + model_dir = Path(filepath) + return joblib.load(model_dir.joinpath('model.joblib')) diff --git a/alibi_detect/saving/_sklearn/saving.py b/alibi_detect/saving/_sklearn/saving.py new file mode 100644 index 000000000..b903bc2f7 --- /dev/null +++ b/alibi_detect/saving/_sklearn/saving.py @@ -0,0 +1,69 @@ +import logging +import os +from pathlib import Path +from typing import Union +import joblib +from sklearn.base import BaseEstimator + +from alibi_detect.utils.frameworks import Framework + +logger = logging.getLogger(__name__) + + +def save_model_config(model: BaseEstimator, + base_path: Path, + local_path: Path = Path('.')) -> dict: + """ + Save a scikit-learn (or xgboost) model to a config dictionary. + Models are assumed to be a subclass of :class:`~sklearn.base.BaseEstimator`. This includes xgboost models + following the scikit-learn API + (see https://xgboost.readthedocs.io/en/latest/python/python_api.html#module-xgboost.sklearn). + + Parameters + ---------- + model + The model to save. + base_path + Base filepath to save to (the location of the `config.toml` file). + local_path + A local (relative) filepath to append to base_path. + + Returns + ------- + The model config dict. + """ + filepath = base_path.joinpath(local_path) + save_model(model, filepath=filepath, save_dir='model') + cfg_model = { + 'flavour': Framework.SKLEARN.value, + 'src': local_path.joinpath('model') + } + return cfg_model + + +def save_model(model: BaseEstimator, + filepath: Union[str, os.PathLike], + save_dir: Union[str, os.PathLike] = 'model') -> None: + """ + Save scikit-learn (and xgboost) models. Models are assumed to be a subclass of :class:`~sklearn.base.BaseEstimator`. + This includes xgboost models following the scikit-learn API + (see https://xgboost.readthedocs.io/en/latest/python/python_api.html#module-xgboost.sklearn). + + Parameters + ---------- + model + The tf.keras.Model to save. + filepath + Save directory. + save_dir + Name of folder to save to within the filepath directory. + """ + # create folder to save model in + model_path = Path(filepath).joinpath(save_dir) + if not model_path.is_dir(): + logger.warning('Directory {} does not exist and is now created.'.format(model_path)) + model_path.mkdir(parents=True, exist_ok=True) + + # save model + model_path = model_path.joinpath('model.joblib') + joblib.dump(model, model_path) diff --git a/alibi_detect/saving/_sklearn/tests/test_saving_sk.py b/alibi_detect/saving/_sklearn/tests/test_saving_sk.py new file mode 100644 index 000000000..3bc588553 --- /dev/null +++ b/alibi_detect/saving/_sklearn/tests/test_saving_sk.py @@ -0,0 +1,32 @@ +from pytest_cases import param_fixture, parametrize, parametrize_with_cases + +from alibi_detect.saving.tests.datasets import ContinuousData +from alibi_detect.saving.tests.models import classifier_model, xgb_classifier_model + +from alibi_detect.saving.loading import _load_model_config +from alibi_detect.saving.saving import _path2str, _save_model_config +from alibi_detect.saving.schemas import ModelConfig + +backend = param_fixture("backend", ['sklearn']) + + +@parametrize_with_cases("data", cases=ContinuousData.data_synthetic_nd, prefix='data_') +@parametrize('model', [classifier_model, xgb_classifier_model]) +def test_save_model_sk(data, model, tmp_path): + """ + Unit test for _save_model_config and _load_model_config with scikit-learn and xgboost model. + """ + # Save model + filepath = tmp_path + cfg_model, _ = _save_model_config(model, base_path=filepath) + cfg_model = _path2str(cfg_model) + cfg_model = ModelConfig(**cfg_model).dict() + assert tmp_path.joinpath('model').is_dir() + assert tmp_path.joinpath('model/model.joblib').is_file() + + # Adjust config + cfg_model['src'] = tmp_path.joinpath('model') # Need to manually set to absolute path here + + # Load model + model_load = _load_model_config(cfg_model) + assert isinstance(model_load, type(model)) diff --git a/alibi_detect/saving/tensorflow/__init__.py b/alibi_detect/saving/_tensorflow/__init__.py similarity index 60% rename from alibi_detect/saving/tensorflow/__init__.py rename to alibi_detect/saving/_tensorflow/__init__.py index 3d4f1f84c..e2f8220a5 100644 --- a/alibi_detect/saving/tensorflow/__init__.py +++ b/alibi_detect/saving/_tensorflow/__init__.py @@ -2,7 +2,7 @@ load_detector_legacy, load_kernel_config_tf, load_embedding_tf, load_model_tf, load_optimizer_tf, \ prep_model_and_emb_tf = import_optional( - 'alibi_detect.saving.tensorflow._loading', + 'alibi_detect.saving._tensorflow.loading', names=['load_detector_legacy', 'load_kernel_config', 'load_embedding', @@ -11,11 +11,23 @@ 'prep_model_and_emb']) save_detector_legacy, save_model_config_tf = import_optional( - 'alibi_detect.saving.tensorflow._saving', + 'alibi_detect.saving._tensorflow.saving', names=['save_detector_legacy', 'save_model_config'] ) get_tf_dtype = import_optional( - 'alibi_detect.saving.tensorflow._conversions', + 'alibi_detect.saving._tensorflow.conversions', names=['get_tf_dtype'] ) + +__all__ = [ + "load_detector_legacy", + "load_kernel_config_tf", + "load_embedding_tf", + "load_model_tf", + "load_optimizer_tf", + "prep_model_and_emb_tf", + "save_detector_legacy", + "save_model_config_tf", + "get_tf_dtype" +] diff --git a/alibi_detect/saving/tensorflow/_conversions.py b/alibi_detect/saving/_tensorflow/conversions.py similarity index 100% rename from alibi_detect/saving/tensorflow/_conversions.py rename to alibi_detect/saving/_tensorflow/conversions.py diff --git a/alibi_detect/saving/tensorflow/_loading.py b/alibi_detect/saving/_tensorflow/loading.py similarity index 97% rename from alibi_detect/saving/tensorflow/_loading.py rename to alibi_detect/saving/_tensorflow/loading.py index 1ce7c43f6..0f28bc7e6 100644 --- a/alibi_detect/saving/tensorflow/_loading.py +++ b/alibi_detect/saving/_tensorflow/loading.py @@ -25,6 +25,7 @@ OutlierVAE, OutlierVAEGMM, SpectralResidual) from alibi_detect.od.llr import build_model from alibi_detect.utils.tensorflow.kernels import DeepKernel +from alibi_detect.utils.frameworks import Framework # Below imports are used for legacy loading, and will be removed (or moved to utils/loading.py) in the future from alibi_detect.version import __version__ from alibi_detect.base import Detector @@ -69,7 +70,7 @@ def load_model(filepath: Union[str, os.PathLike], return model -def prep_model_and_emb(model: Optional[Callable], emb: Optional[TransformerEmbedding]) -> Callable: +def prep_model_and_emb(model: Callable, emb: Optional[TransformerEmbedding]) -> Callable: """ Function to perform final preprocessing of model (and/or embedding) before it is passed to preprocess_drift. @@ -78,25 +79,17 @@ def prep_model_and_emb(model: Optional[Callable], emb: Optional[TransformerEmbed model A compatible model. emb - A text embedding model. + An optional text embedding model. Returns ------- The final model ready to passed to preprocess_drift. """ - # If a model exists, process it (and embedding) - if model is not None: - model = model.encoder if isinstance(model, UAE) else model # This is to avoid nesting UAE's already a UAE - if emb is not None: - model = _Encoder(emb, mlp=model) - model = UAE(encoder_net=model) - # If no model exists, store embedding as model - else: - model = emb - if model is None: - raise ValueError("A 'model' and/or `embedding` must be specified when " - "preprocess_fn='preprocess_drift'") - + # Process model (and embedding) + model = model.encoder if isinstance(model, UAE) else model # This is to avoid nesting UAE's already a UAE + if emb is not None: + model = _Encoder(emb, mlp=model) + model = UAE(encoder_net=model) return model @@ -222,7 +215,7 @@ def load_detector_legacy(filepath: Union[str, os.PathLike], suffix: str, **kwarg warnings.warn('Trying to load detector from an older version.' 'This may lead to breaking code or invalid results.') - if 'backend' in list(meta_dict.keys()) and meta_dict['backend'] == 'pytorch': + if 'backend' in list(meta_dict.keys()) and meta_dict['backend'] == Framework.PYTORCH: raise NotImplementedError('Detectors with PyTorch backend are not yet supported.') detector_name = meta_dict['name'] diff --git a/alibi_detect/saving/tensorflow/_saving.py b/alibi_detect/saving/_tensorflow/saving.py similarity index 98% rename from alibi_detect/saving/tensorflow/_saving.py rename to alibi_detect/saving/_tensorflow/saving.py index 8eeb6df66..3630c6861 100644 --- a/alibi_detect/saving/tensorflow/_saving.py +++ b/alibi_detect/saving/_tensorflow/saving.py @@ -22,16 +22,17 @@ from alibi_detect.utils._types import Literal from alibi_detect.utils.tensorflow.kernels import GaussianRBF from alibi_detect.utils.missing_optional_dependency import MissingDependency +from alibi_detect.utils.frameworks import Framework logger = logging.getLogger(__name__) def save_model_config(model: Callable, base_path: Path, - input_shape: tuple, + input_shape: Optional[tuple], local_path: Path = Path('.')) -> Tuple[dict, Optional[dict]]: """ - Save a model to a config dictionary. When a model has a text embedding model contained within it, + Save a TensorFlow model to a config dictionary. When a model has a text embedding model contained within it, this is extracted and saved separately. Parameters @@ -53,6 +54,9 @@ def save_model_config(model: Callable, cfg_embed = None # type: Optional[Dict[str, Any]] if isinstance(model, UAE): if isinstance(model.encoder.layers[0], TransformerEmbedding): # if UAE contains embedding and encoder + if input_shape is None: + raise ValueError('Cannot save combined embedding and model when `input_shape` is None.') + # embedding embed = model.encoder.layers[0] cfg_embed = save_embedding_config(embed, base_path, local_path.joinpath('embedding')) @@ -78,7 +82,10 @@ def save_model_config(model: Callable, if model is not None: filepath = base_path.joinpath(local_path) save_model(model, filepath=filepath, save_dir='model') - cfg_model = {'src': local_path.joinpath('model')} + cfg_model = { + 'flavour': Framework.TENSORFLOW.value, + 'src': local_path.joinpath('model') + } return cfg_model, cfg_embed @@ -142,6 +149,7 @@ def save_embedding_config(embed: TransformerEmbedding, cfg_embed.update({'type': embed.emb_type}) cfg_embed.update({'layers': embed.hs_emb.keywords['layers']}) cfg_embed.update({'src': local_path}) + cfg_embed.update({'flavour': Framework.TENSORFLOW.value}) # Save embedding model logger.info('Saving embedding model to {}.'.format(filepath)) @@ -150,6 +158,9 @@ def save_embedding_config(embed: TransformerEmbedding, return cfg_embed +####################################################################################################### +# TODO: Everything below here is legacy saving code, and will be removed in the future +####################################################################################################### def save_embedding_legacy(embed: TransformerEmbedding, embed_args: dict, filepath: Path) -> None: @@ -177,9 +188,6 @@ def save_embedding_legacy(embed: TransformerEmbedding, dill.dump(embed_args, f) -####################################################################################################### -# TODO: Everything below here is legacy saving code, and will be removed in the future -####################################################################################################### def save_detector_legacy(detector, filepath): detector_name = detector.meta['name'] diff --git a/alibi_detect/saving/_tensorflow/tests/test_saving_tf.py b/alibi_detect/saving/_tensorflow/tests/test_saving_tf.py new file mode 100644 index 000000000..0d96e9b75 --- /dev/null +++ b/alibi_detect/saving/_tensorflow/tests/test_saving_tf.py @@ -0,0 +1,64 @@ +from pytest_cases import param_fixture, parametrize, parametrize_with_cases + +from alibi_detect.saving.tests.datasets import ContinuousData +from alibi_detect.saving.tests.models import encoder_model + +from alibi_detect.cd.tensorflow import HiddenOutput as HiddenOutput_tf +from alibi_detect.saving.loading import _load_model_config, _load_optimizer_config +from alibi_detect.saving.saving import _path2str, _save_model_config +from alibi_detect.saving.schemas import ModelConfig + +backend = param_fixture("backend", ['tensorflow']) + + +def test_load_optimizer_tf(backend): + "Test the tensorflow _load_optimizer_config." + class_name = 'Adam' + learning_rate = 0.01 + epsilon = 1e-7 + amsgrad = False + + # Load + cfg_opt = { + 'class_name': class_name, + 'config': { + 'name': class_name, + 'learning_rate': learning_rate, + 'epsilon': epsilon, + 'amsgrad': amsgrad + } + } + optimizer = _load_optimizer_config(cfg_opt, backend=backend) + assert type(optimizer).__name__ == class_name + assert optimizer.learning_rate == learning_rate + assert optimizer.epsilon == epsilon + assert optimizer.amsgrad == amsgrad + + +@parametrize_with_cases("data", cases=ContinuousData.data_synthetic_nd, prefix='data_') +@parametrize('model', [encoder_model]) +@parametrize('layer', [None, -1]) +def test_save_model_tf(data, model, layer, tmp_path): + """ + Unit test for _save_model_config and _load_model_config with tensorflow model. + """ + # Save model + filepath = tmp_path + input_shape = (data[0].shape[1],) + cfg_model, _ = _save_model_config(model, base_path=filepath, input_shape=input_shape) + cfg_model = _path2str(cfg_model) + cfg_model = ModelConfig(**cfg_model).dict() + assert tmp_path.joinpath('model').is_dir() + assert tmp_path.joinpath('model/model.h5').is_file() + + # Adjust config + cfg_model['src'] = tmp_path.joinpath('model') # Need to manually set to absolute path here + if layer is not None: + cfg_model['layer'] = layer + + # Load model + model_load = _load_model_config(cfg_model) + if layer is None: + assert isinstance(model_load, type(model)) + else: + assert isinstance(model_load, HiddenOutput_tf) diff --git a/alibi_detect/saving/loading.py b/alibi_detect/saving/loading.py index 2d53dd3ac..0d316731a 100644 --- a/alibi_detect/saving/loading.py +++ b/alibi_detect/saving/loading.py @@ -1,4 +1,3 @@ -# TODO - Need to modularise torch and tensorflow imports and use. e.g. has_tensorflow and has_pytorch etc import logging import os from functools import partial @@ -12,10 +11,13 @@ from transformers import AutoTokenizer from alibi_detect.saving.registry import registry -from alibi_detect.saving.tensorflow import load_detector_legacy, load_embedding_tf, load_kernel_config_tf, \ +from alibi_detect.saving._tensorflow import load_detector_legacy, load_embedding_tf, load_kernel_config_tf, \ load_model_tf, load_optimizer_tf, prep_model_and_emb_tf, get_tf_dtype +from alibi_detect.saving._sklearn import load_model_sk from alibi_detect.saving.validate import validate_config from alibi_detect.base import Detector, ConfigurableDetector +from alibi_detect.utils.frameworks import has_tensorflow, has_pytorch, Framework +from alibi_detect.saving.schemas import supported_models_tf, supported_models_torch if TYPE_CHECKING: import tensorflow as tf @@ -127,9 +129,9 @@ def _load_detector_config(filepath: Union[str, os.PathLike]) -> ConfigurableDete logger.info('Validated resolved config.') # Backend - backend = cfg.pop('backend') # popping so that cfg left as kwargs + `name` when passed to _init_detector - if backend.lower() != 'tensorflow': - raise NotImplementedError('Loading detectors with PyTorch or sklearn backend is not yet supported.') + backend = cfg.get('backend', None) + if backend is not None and backend.lower() not in (Framework.TENSORFLOW, Framework.SKLEARN): + raise NotImplementedError('Loading detectors with pytorch or keops backend is not yet supported.') # Init detector from config logger.info('Instantiating detector.') @@ -160,7 +162,7 @@ def _init_detector(cfg: dict) -> ConfigurableDetector: return detector -def _load_kernel_config(cfg: dict, backend: str = 'tensorflow') -> Callable: +def _load_kernel_config(cfg: dict, backend: str = Framework.TENSORFLOW) -> Callable: """ Loads a kernel from a kernel config dict. @@ -177,7 +179,7 @@ def _load_kernel_config(cfg: dict, backend: str = 'tensorflow') -> Callable: ------- The kernel. """ - if backend == 'tensorflow': + if backend == Framework.TENSORFLOW: kernel = load_kernel_config_tf(cfg) else: kernel = None @@ -185,8 +187,7 @@ def _load_kernel_config(cfg: dict, backend: str = 'tensorflow') -> Callable: return kernel -def _load_preprocess_config(cfg: dict, - backend: Optional[str] = 'tensorflow') -> Optional[Callable]: +def _load_preprocess_config(cfg: dict) -> Optional[Callable]: """ This function builds a preprocess_fn from the preprocess dict in a detector config dict. The dict format is expected to match that generated by serialize_preprocess in alibi_detect.utils.saving (also see pydantic schema). @@ -196,8 +197,6 @@ def _load_preprocess_config(cfg: dict, ---------- cfg A preprocess_fn config dict. (see pydantic schemas). - backend - The backend. Returns ------- @@ -216,14 +215,20 @@ def _load_preprocess_config(cfg: dict, emb = kwargs.pop('embedding') # embedding passed to preprocess_drift as `model` therefore remove # Backend specifics - if backend == 'tensorflow': + if has_tensorflow and isinstance(model, supported_models_tf): model = prep_model_and_emb_tf(model, emb) kwargs.pop('device') - elif backend == 'pytorch': # TODO - once optional deps implemented + elif has_pytorch and isinstance(model, supported_models_torch): raise NotImplementedError('Loading preprocess_fn for PyTorch not yet supported.') # device = cfg['device'] # TODO - device should be set already - check # kwargs.update({'model': kwargs['model'].to(device)}) # TODO - need .to(device) here? # kwargs.update({'device': device}) + elif model is None: + kwargs.pop('device') + model = emb + if model is None: + raise ValueError("A 'model' and/or `embedding` must be specified when " + "preprocess_fn='preprocess_drift'") kwargs.update({'model': model}) else: kwargs = cfg['kwargs'] # If generic callable, kwargs is cfg['kwargs'] @@ -235,18 +240,14 @@ def _load_preprocess_config(cfg: dict, return partial(preprocess_fn, **kwargs) -def _load_model_config(cfg: dict, - backend: str) -> Callable: +def _load_model_config(cfg: dict) -> Callable: """ - Loads TensorFlow, PyTorch and scikit-learn models (currently only TensorFlow supported), from a model config - dict. + Loads supported models from a model config dict. Parameters ---------- cfg Model config dict. (see pydantic model schemas). - backend - The backend. Returns ------- @@ -254,6 +255,7 @@ def _load_model_config(cfg: dict, """ # Load model + flavour = cfg['flavour'] src = cfg['src'] custom_obj = cfg['custom_objects'] layer = cfg['layer'] @@ -262,15 +264,17 @@ def _load_model_config(cfg: dict, raise FileNotFoundError("The `src` field is not a recognised directory. It should be a directory containing " "a compatible model.") - if backend == 'tensorflow': + if flavour == Framework.TENSORFLOW: model = load_model_tf(src, load_dir='.', custom_objects=custom_obj, layer=layer) + elif flavour == Framework.SKLEARN: + model = load_model_sk(src) else: - raise NotImplementedError('Loading of non-tensorflow models not currently supported') + raise NotImplementedError('Loading of PyTorch models not currently supported') return model -def _load_embedding_config(cfg: dict, backend: str) -> Callable: # TODO: Could type return more tightly +def _load_embedding_config(cfg: dict) -> Callable: # TODO: Could type return more tightly """ Load a pre-trained text embedding from an embedding config dict. @@ -278,8 +282,6 @@ def _load_embedding_config(cfg: dict, backend: str) -> Callable: # TODO: Could ---------- cfg An embedding config dict. (see the pydantic schemas). - backend - The backend. Returns ------- @@ -288,7 +290,8 @@ def _load_embedding_config(cfg: dict, backend: str) -> Callable: # TODO: Could src = cfg['src'] layers = cfg['layers'] typ = cfg['type'] - if backend == 'tensorflow': + flavour = cfg['flavour'] + if flavour == Framework.TENSORFLOW: emb = load_embedding_tf(src, embedding_type=typ, layers=layers) else: raise NotImplementedError('Loading of non-tensorflow embedding models not currently supported') @@ -332,7 +335,7 @@ def _load_optimizer_config(cfg: dict, ------- The loaded optimizer. """ - if backend == 'tensorflow': + if backend == Framework.TENSORFLOW: optimizer = load_optimizer_tf(cfg) else: raise NotImplementedError('Loading of non-tensorflow optimizers not currently supported') @@ -490,17 +493,17 @@ def resolve_config(cfg: dict, config_dir: Optional[Path]) -> dict: # are not resolved into objects here, since they are yet to undergo a further validation step). Instead, only # their components, such as `src`, are resolved above. elif isinstance(src, dict): - backend = cfg.get('backend', 'tensorflow') + backend = cfg.get('backend', Framework.TENSORFLOW) if key[-1] in ('model', 'proj'): - obj = _load_model_config(src, backend) + obj = _load_model_config(src) elif key[-1] == 'embedding': - obj = _load_embedding_config(src, backend) + obj = _load_embedding_config(src) elif key[-1] == 'tokenizer': obj = _load_tokenizer_config(src) elif key[-1] == 'optimizer': obj = _load_optimizer_config(src, backend) elif key[-1] == 'preprocess_fn': - obj = _load_preprocess_config(src, backend) + obj = _load_preprocess_config(src) elif key[-1] in ('kernel', 'x_kernel', 'c_kernel'): obj = _load_kernel_config(src, backend) diff --git a/alibi_detect/saving/saving.py b/alibi_detect/saving/saving.py index 975fe2523..05e80831e 100644 --- a/alibi_detect/saving/saving.py +++ b/alibi_detect/saving/saving.py @@ -4,7 +4,7 @@ import warnings from functools import partial from pathlib import Path -from typing import Callable, Optional, Tuple, Union +from typing import Callable, Optional, Tuple, Union, Any import dill import numpy as np import toml @@ -13,9 +13,11 @@ from alibi_detect.saving._typing import VALID_DETECTORS from alibi_detect.saving.loading import _replace, validate_config from alibi_detect.saving.registry import registry -from alibi_detect.saving.schemas import SupportedModels -from alibi_detect.saving.tensorflow import save_detector_legacy, save_model_config_tf +from alibi_detect.utils._types import supported_models_all, supported_models_tf, supported_models_sklearn +from alibi_detect.utils.frameworks import Framework from alibi_detect.base import Detector, ConfigurableDetector +from alibi_detect.saving._tensorflow import save_detector_legacy, save_model_config_tf +from alibi_detect.saving._sklearn import save_model_config_sk # do not extend pickle dispatch table so as not to change pickle behaviour dill.extend(use_dill=False) @@ -46,8 +48,8 @@ def save_detector( if legacy: warnings.warn('The `legacy` option will be removed in a future version.', DeprecationWarning) - if 'backend' in list(detector.meta.keys()) and detector.meta['backend'] in ['pytorch', 'sklearn']: - raise NotImplementedError('Saving detectors with PyTorch or sklearn backend is not yet supported.') + if 'backend' in list(detector.meta.keys()) and detector.meta['backend'] in [Framework.PYTORCH, Framework.KEOPS]: + raise NotImplementedError('Saving detectors with pytorch or keops backend is not yet supported.') # TODO: Replace .__args__ w/ typing.get_args() once Python 3.7 dropped (and remove type ignore below) detector_name = detector.__class__.__name__ @@ -123,9 +125,9 @@ def _save_detector_config(detector: ConfigurableDetector, filepath: Union[str, o File path to save serialized artefacts to. """ # Get backend, input_shape and detector_name - backend = detector.meta.get('backend', 'tensorflow') - if backend != 'tensorflow': - raise NotImplementedError("Currently, saving is only supported with backend='tensorflow'.") + backend = detector.meta.get('backend', None) + if backend not in (None, Framework.TENSORFLOW, Framework.SKLEARN): + raise NotImplementedError("Currently, saving is only supported with backend='tensorflow' and 'sklearn'.") detector_name = detector.__class__.__name__ # Process file paths @@ -157,7 +159,7 @@ def _save_detector_config(detector: ConfigurableDetector, filepath: Union[str, o preprocess_fn = cfg.get('preprocess_fn', None) if preprocess_fn is not None: logger.info('Saving the preprocess_fn function.') - preprocess_cfg = _save_preprocess_config(preprocess_fn, backend, cfg['input_shape'], filepath) + preprocess_cfg = _save_preprocess_config(preprocess_fn, cfg['input_shape'], filepath) cfg['preprocess_fn'] = preprocess_cfg # Serialize kernels @@ -167,13 +169,13 @@ def _save_detector_config(detector: ConfigurableDetector, filepath: Union[str, o cfg[kernel_str] = _save_kernel_config(kernel, filepath, Path(kernel_str)) if 'proj' in cfg[kernel_str]: # serialise proj from DeepKernel - do here as need input_shape cfg[kernel_str]['proj'], _ = _save_model_config(cfg[kernel_str]['proj'], base_path=filepath, - input_shape=cfg['input_shape'], backend=backend) + input_shape=cfg['input_shape']) # ClassifierDrift and SpotTheDiffDrift specific artefacts. # Serialize detector model model = cfg.get('model', None) if model is not None: - model_cfg, _ = _save_model_config(model, base_path=filepath, input_shape=cfg['input_shape'], backend=backend) + model_cfg, _ = _save_model_config(model, base_path=filepath, input_shape=cfg['input_shape']) cfg['model'] = model_cfg # Serialize dataset @@ -232,7 +234,6 @@ def write_config(cfg: dict, filepath: Union[str, os.PathLike]): def _save_preprocess_config(preprocess_fn: Callable, - backend: str, input_shape: Optional[tuple], filepath: Path) -> dict: """ @@ -243,8 +244,6 @@ def _save_preprocess_config(preprocess_fn: Callable, ---------- preprocess_fn The preprocess function to be serialized. - backend - Specifies the detectors backend (if it has one). Either `'tensorflow'`, `'pytorch'` or `None`. input_shape Input shape for a model (if a model exists). filepath @@ -266,8 +265,8 @@ def _save_preprocess_config(preprocess_fn: Callable, kwargs = {} for k, v in func_kwargs.items(): # Model/embedding - if isinstance(v, SupportedModels): - cfg_model, cfg_embed = _save_model_config(v, filepath, input_shape, backend, local_path) + if isinstance(v, supported_models_all): + cfg_model, cfg_embed = _save_model_config(v, filepath, input_shape, local_path) kwargs.update({k: cfg_model}) if cfg_embed is not None: kwargs.update({'embedding': cfg_embed}) @@ -390,10 +389,9 @@ def _int2str_keys(dikt: dict) -> dict: return dikt_copy -def _save_model_config(model: Callable, +def _save_model_config(model: Any, base_path: Path, - input_shape: tuple, - backend: str, + input_shape: Optional[tuple] = None, path: Path = Path('.')) -> Tuple[dict, Optional[dict]]: """ Save a model to a config dictionary. When a model has a text embedding model contained within it, @@ -407,8 +405,6 @@ def _save_model_config(model: Callable, Base filepath to save to. input_shape The input dimensions of the model (after the optional embedding has been applied). - backend - The backend. path A local (relative) filepath to append to base_path. @@ -416,10 +412,12 @@ def _save_model_config(model: Callable, ------- A tuple containing the model and embedding config dicts. """ - if backend == 'tensorflow': + if isinstance(model, supported_models_tf): return save_model_config_tf(model, base_path, input_shape, path) + elif isinstance(model, supported_models_sklearn): + return save_model_config_sk(model, base_path, path), None else: - raise NotImplementedError("Saving of pytorch models is not yet implemented.") + raise NotImplementedError("Support for saving the given model is not yet implemented") def _save_tokenizer_config(tokenizer: PreTrainedTokenizerBase, diff --git a/alibi_detect/saving/schemas.py b/alibi_detect/saving/schemas.py index f5a8f9d33..fc37b3745 100644 --- a/alibi_detect/saving/schemas.py +++ b/alibi_detect/saving/schemas.py @@ -13,34 +13,15 @@ For detector pydantic models, the fields match the corresponding detector's args/kwargs. Refer to the detector's api docs for a full description of each arg/kwarg. """ - from typing import Callable, Dict, List, Optional, Type, Union, Any -# TODO - conditional checks depending on backend etc -# TODO - consider validating output of get_config calls import numpy as np from pydantic import BaseModel, validator -from alibi_detect.cd.tensorflow import UAE as UAE_tf -from alibi_detect.cd.tensorflow import HiddenOutput as HiddenOutput_tf -from alibi_detect.utils._types import Literal, NDArray -from alibi_detect.utils.frameworks import has_tensorflow, has_pytorch - -# Define supported models for each optional dependency -SupportedModels_tf, SupportedModels_torch, SupportedModels_sklearn = (), (), () # type: ignore -if has_tensorflow: - import tensorflow as tf - SupportedModels_tf = (tf.keras.Model, UAE_tf, HiddenOutput_tf) # type: ignore -if has_pytorch: - # import torch - SupportedModels_torch = () # type: ignore # TODO - fill - -# import sklearn -# SupportedModels_sklearn = () # type: ignore # TODO - fill - -# Build SupportedModels - a tuple of all possible models for use in isinstance() etc. -SupportedModels = SupportedModels_tf + SupportedModels_torch + SupportedModels_sklearn -# TODO - could define a Union with fwdrefs here, for use in mypy type annotations in saving.py etc +from alibi_detect.utils.frameworks import Framework +from alibi_detect.utils._types import (Literal, NDArray, supported_models_all, supported_models_tf, + supported_models_sklearn, supported_models_torch, supported_optimizers_tf, + supported_optimizers_torch, supported_optimizers_all) # Custom validators (defined here for reuse in multiple pydantic models) @@ -52,19 +33,59 @@ def coerce_int2list(value: int) -> List[int]: return value -def validate_model(model: Callable, values: dict) -> Callable: - """Validator to check the model is compatible with the given backend""" - backend = values['backend'] - if backend == 'tensorflow' and not isinstance(model, SupportedModels_tf): - raise ValueError('A TensorFlow backend is not available for this model') - elif backend == 'pytorch' and not isinstance(model, SupportedModels_torch): - raise ValueError('A PyTorch backend is not available for this model') - elif backend == 'sklearn' and not isinstance(model, SupportedModels_sklearn): - raise ValueError('A sklearn backend is not available for this model') - return model +class SupportedModel: + """ + Pydantic custom type to check the model is one of the supported types (conditional on what optional deps + are installed). + """ + @classmethod + def __get_validators__(cls): + yield cls.validate_model + + @classmethod + def validate_model(cls, model: Any, values: dict) -> Any: + backend = values['backend'] + err_msg = f"`backend={backend}` but the `model` doesn't appear to be a {backend} supported model, "\ + f"or {backend} is not installed." + if backend == Framework.TENSORFLOW and not isinstance(model, supported_models_tf): + raise TypeError(err_msg) + elif backend == Framework.PYTORCH and not isinstance(model, supported_models_torch): + raise TypeError(err_msg) + elif backend == Framework.SKLEARN and not isinstance(model, supported_models_sklearn): + raise TypeError(f"`backend={backend}` but the `model` doesn't appear to be a {backend} supported model.") + elif isinstance(model, supported_models_all): # If model supported and no `backend` incompatibility + return model + else: # Catch any other unexpected issues + raise TypeError('The model is not recognised as a supported type.') + + +class SupportedOptimizer: + """ + Pydantic custom type to check the optimizer is one of the supported types (conditional on what optional deps + are installed). + """ + @classmethod + def __get_validators__(cls): + yield cls.validate_optimizer + + @classmethod + def validate_optimizer(cls, optimizer: Any, values: dict) -> Any: + backend = values['backend'] + err_msg = f"`backend={backend}` but the `optimizer` doesn't appear to be a {backend} supported model, "\ + f"or {backend} is not installed." + if backend == Framework.TENSORFLOW and not isinstance(optimizer, supported_optimizers_tf): + raise TypeError(err_msg) + elif backend == Framework.PYTORCH and not isinstance(optimizer, supported_optimizers_torch): + raise TypeError(err_msg) + elif isinstance(optimizer, supported_optimizers_all): # If optimizer supported and no `backend` incompatibility + return optimizer + else: # Catch any other unexpected issues + raise TypeError('The model is not recognised as a supported type.') + +# TODO - We could add validator to check `model` and `embedding` type when chained together. Leave this until refactor +# of preprocess_drift. -# TODO - we could add another validator to check given "backend" against what optional deps are installed? # Custom BaseModel so that we can set default config class CustomBaseModel(BaseModel): @@ -88,7 +109,6 @@ class Config: class MetaData(CustomBaseModel): version: str - config_spec: str version_warning: bool = False @@ -98,8 +118,6 @@ class DetectorConfig(CustomBaseModel): """ name: str "Name of the detector e.g. `MMDDrift`." - backend: Literal['tensorflow', 'pytorch', 'sklearn'] = 'tensorflow' - "The detector backend." meta: Optional[MetaData] = None "Config metadata. Should not be edited." # Note: Although not all detectors have a backend, we define in base class as `backend` also determines @@ -118,9 +136,15 @@ class ModelConfig(CustomBaseModel): .. code-block :: toml [model] + flavour = "tensorflow" src = "model/" layer = -1 """ + flavour: Literal['tensorflow', 'pytorch', 'sklearn'] + """ + Whether the model is a `tensorflow`, `pytorch` or `sklearn` model. XGBoost models following the scikit-learn API + are also included under `sklearn`. + """ src: str """ Filepath to directory storing the model (relative to the `config.toml` file, or absolute). At present, @@ -136,7 +160,9 @@ class ModelConfig(CustomBaseModel): layer: Optional[int] = None """ Optional index of hidden layer to extract. If not `None`, a - :class:`~alibi_detect.cd.tensorflow.preprocess.HiddenOutput` model is returned. + :class:`~alibi_detect.cd.tensorflow.preprocess.HiddenOutput` or + :class:`~alibi_detect.cd.pytorch.preprocess.HiddenOutput` model is returned (dependent on `flavour`). + Only applies to 'tensorflow' and 'pytorch' models. """ @@ -147,16 +173,21 @@ class EmbeddingConfig(CustomBaseModel): Examples -------- - Using the hidden states at the output of each layer of the + Using the hidden states at the output of each layer of a TensorFlow `BERT base `_ model as text embeddings: .. code-block :: toml [embedding] + flavour = "tensorflow" src = "bert-base-cased" type = "hidden_state" layers = [-1, -2, -3, -4, -5, -6, -7, -8] """ + flavour: Literal['tensorflow', 'pytorch'] = 'tensorflow' + """ + Whether the embedding model is a `tensorflow` or `pytorch` model. + """ type: Literal['pooler_output', 'last_hidden_state', 'hidden_state', 'hidden_state_cls'] """ The type of embedding to be loaded. See `embedding_type` in @@ -627,6 +658,7 @@ class MMDDriftConfig(DriftDetectorConfig): Except for the `name` and `meta` fields, the fields match the detector's args and kwargs. Refer to the :class:`~alibi_detect.cd.MMDDrift` documentation for a description of each field. """ + backend: Literal['tensorflow', 'pytorch', 'keops'] = 'tensorflow' p_val: float = .05 preprocess_at_init: bool = True update_x_ref: Optional[Dict[str, int]] = None @@ -634,6 +666,7 @@ class MMDDriftConfig(DriftDetectorConfig): sigma: Optional[NDArray[np.float32]] = None configure_kernel_from_x_ref: bool = True n_permutations: int = 100 + batch_size_permutations: int = 1000000 device: Optional[Literal['cpu', 'cuda']] = None @@ -645,6 +678,7 @@ class MMDDriftConfigResolved(DriftDetectorConfigResolved): Except for the `name` and `meta` fields, the fields match the detector's args and kwargs. Refer to the :class:`~alibi_detect.cd.MMDDrift` documentation for a description of each field. """ + backend: Literal['tensorflow', 'pytorch', 'keops'] = 'tensorflow' p_val: float = .05 preprocess_at_init: bool = True update_x_ref: Optional[Dict[str, int]] = None @@ -652,6 +686,7 @@ class MMDDriftConfigResolved(DriftDetectorConfigResolved): sigma: Optional[NDArray[np.float32]] = None configure_kernel_from_x_ref: bool = True n_permutations: int = 100 + batch_size_permutations: int = 1000000 device: Optional[Literal['cpu', 'cuda']] = None @@ -663,6 +698,7 @@ class LSDDDriftConfig(DriftDetectorConfig): Except for the `name` and `meta` fields, the fields match the detector's args and kwargs. Refer to the :class:`~alibi_detect.cd.LSDDDrift` documentation for a description of each field. """ + backend: Literal['tensorflow', 'pytorch'] = 'tensorflow' p_val: float = .05 preprocess_at_init: bool = True update_x_ref: Optional[Dict[str, int]] = None @@ -681,6 +717,7 @@ class LSDDDriftConfigResolved(DriftDetectorConfigResolved): Except for the `name` and `meta` fields, the fields match the detector's args and kwargs. Refer to the :class:`~alibi_detect.cd.LSDDDrift` documentation for a description of each field. """ + backend: Literal['tensorflow', 'pytorch'] = 'tensorflow' p_val: float = .05 preprocess_at_init: bool = True update_x_ref: Optional[Dict[str, int]] = None @@ -700,6 +737,7 @@ class ClassifierDriftConfig(DriftDetectorConfig): Except for the `name` and `meta` fields, the fields match the detector's args and kwargs. Refer to the :class:`~alibi_detect.cd.ClassifierDrift` documentation for a description of each field. """ + backend: Literal['tensorflow', 'pytorch', 'sklearn'] = 'tensorflow' p_val: float = .05 preprocess_at_init: bool = True update_x_ref: Optional[Dict[str, int]] = None @@ -735,10 +773,11 @@ class ClassifierDriftConfigResolved(DriftDetectorConfigResolved): Except for the `name` and `meta` fields, the fields match the detector's args and kwargs. Refer to the :class:`~alibi_detect.cd.ClassifierDrift` documentation for a description of each field. """ + backend: Literal['tensorflow', 'pytorch', 'sklearn'] = 'tensorflow' p_val: float = .05 preprocess_at_init: bool = True update_x_ref: Optional[Dict[str, int]] = None - model: Optional[Callable] = None + model: Optional[SupportedModel] = None preds_type: Literal['probs', 'logits'] = 'probs' binarize_preds: bool = False reg_loss_fn: Optional[Callable] = None @@ -746,7 +785,7 @@ class ClassifierDriftConfigResolved(DriftDetectorConfigResolved): n_folds: Optional[int] = None retrain_from_scratch: bool = True seed: int = 0 - optimizer: Optional['tf.keras.optimizers.Optimizer'] = None + optimizer: Optional[SupportedOptimizer] = None learning_rate: float = 1e-3 batch_size: int = 32 preprocess_batch_fn: Optional[Callable] = None @@ -760,9 +799,6 @@ class ClassifierDriftConfigResolved(DriftDetectorConfigResolved): calibration_kwargs: Optional[dict] = None use_oob: bool = False - # validators - _validate_model = validator('model', allow_reuse=True, pre=True)(validate_model) - class SpotTheDiffDriftConfig(DriftDetectorConfig): """ @@ -773,6 +809,7 @@ class SpotTheDiffDriftConfig(DriftDetectorConfig): Except for the `name` and `meta` fields, the fields match the detector's args and kwargs. Refer to the :class:`~alibi_detect.cd.SpotTheDiffDrift` documentation for a description of each field. """ + backend: Literal['tensorflow', 'pytorch'] = 'tensorflow' p_val: float = .05 binarize_preds: bool = False train_size: Optional[float] = .75 @@ -804,13 +841,14 @@ class SpotTheDiffDriftConfigResolved(DriftDetectorConfigResolved): Except for the `name` and `meta` fields, the fields match the detector's args and kwargs. Refer to the :class:`~alibi_detect.cd.SpotTheDiffDrift` documentation for a description of each field. """ + backend: Literal['tensorflow', 'pytorch'] = 'tensorflow' p_val: float = .05 binarize_preds: bool = False train_size: Optional[float] = .75 n_folds: Optional[int] = None retrain_from_scratch: bool = True seed: int = 0 - optimizer: Optional['tf.keras.optimizers.Optimizer'] = None + optimizer: Optional[SupportedOptimizer] = None learning_rate: float = 1e-3 batch_size: int = 32 preprocess_batch_fn: Optional[Callable] = None @@ -835,11 +873,13 @@ class LearnedKernelDriftConfig(DriftDetectorConfig): Except for the `name` and `meta` fields, the fields match the detector's args and kwargs. Refer to the :class:`~alibi_detect.cd.LearnedKernelDrift` documentation for a description of each field. """ + backend: Literal['tensorflow', 'pytorch', 'keops'] = 'tensorflow' p_val: float = .05 kernel: Union[str, DeepKernelConfig] preprocess_at_init: bool = True update_x_ref: Optional[Dict[str, int]] = None n_permutations: int = 100 + batch_size_permutations: int = 1000000 var_reg: float = 1e-5 reg_loss_fn: Optional[str] = None train_size: Optional[float] = .75 @@ -847,6 +887,7 @@ class LearnedKernelDriftConfig(DriftDetectorConfig): optimizer: Optional[Union[str, OptimizerConfig]] = None learning_rate: float = 1e-3 batch_size: int = 32 + batch_size_predict: int = 1000000 preprocess_batch_fn: Optional[str] = None epochs: int = 3 verbose: int = 0 @@ -865,18 +906,21 @@ class LearnedKernelDriftConfigResolved(DriftDetectorConfigResolved): Except for the `name` and `meta` fields, the fields match the detector's args and kwargs. Refer to the :class:`~alibi_detect.cd.LearnedKernelDrift` documentation for a description of each field. """ + backend: Literal['tensorflow', 'pytorch', 'keops'] = 'tensorflow' p_val: float = .05 kernel: Optional[Callable] = None preprocess_at_init: bool = True update_x_ref: Optional[Dict[str, int]] = None n_permutations: int = 100 + batch_size_permutations: int = 1000000 var_reg: float = 1e-5 reg_loss_fn: Optional[Callable] = None train_size: Optional[float] = .75 retrain_from_scratch: bool = True - optimizer: Optional['tf.keras.optimizers.Optimizer'] = None + optimizer: Optional[SupportedOptimizer] = None learning_rate: float = 1e-3 batch_size: int = 32 + batch_size_predict: int = 1000000 preprocess_batch_fn: Optional[Callable] = None epochs: int = 3 verbose: int = 0 @@ -895,6 +939,7 @@ class ContextMMDDriftConfig(DriftDetectorConfig): Except for the `name` and `meta` fields, the fields match the detector's args and kwargs. Refer to the :class:`~alibi_detect.cd.ContextMMDDrift` documentation for a description of each field. """ + backend: Literal['tensorflow', 'pytorch'] = 'tensorflow' p_val: float = .05 c_ref: str preprocess_at_init: bool = True @@ -917,6 +962,7 @@ class ContextMMDDriftConfigResolved(DriftDetectorConfigResolved): Except for the `name` and `meta` fields, the fields match the detector's args and kwargs. Refer to the :class:`~alibi_detect.cd.MMDDrift` documentation for a description of each field. """ + backend: Literal['tensorflow', 'pytorch'] = 'tensorflow' p_val: float = .05 c_ref: np.ndarray preprocess_at_init: bool = True @@ -940,6 +986,7 @@ class MMDDriftOnlineConfig(DriftDetectorConfig): Except for the `name` and `meta` fields, the fields match the detector's args and kwargs. Refer to the :class:`~alibi_detect.cd.MMDDriftOnline` documentation for a description of each field. """ + backend: Literal['tensorflow', 'pytorch'] = 'tensorflow' ert: float window_size: int kernel: Optional[Union[str, KernelConfig]] = None @@ -958,6 +1005,7 @@ class MMDDriftOnlineConfigResolved(DriftDetectorConfigResolved): Except for the `name` and `meta` fields, the fields match the detector's args and kwargs. Refer to the :class:`~alibi_detect.cd.MMDDriftOnline` documentation for a description of each field. """ + backend: Literal['tensorflow', 'pytorch'] = 'tensorflow' ert: float window_size: int kernel: Optional[Callable] = None @@ -976,6 +1024,7 @@ class LSDDDriftOnlineConfig(DriftDetectorConfig): Except for the `name` and `meta` fields, the fields match the detector's args and kwargs. Refer to the :class:`~alibi_detect.cd.LSDDDriftOnline` documentation for a description of each field. """ + backend: Literal['tensorflow', 'pytorch'] = 'tensorflow' ert: float window_size: int sigma: Optional[np.ndarray] = None @@ -995,6 +1044,7 @@ class LSDDDriftOnlineConfigResolved(DriftDetectorConfigResolved): Except for the `name` and `meta` fields, the fields match the detector's args and kwargs. Refer to the :class:`~alibi_detect.cd.LSDDDriftOnline` documentation for a description of each field. """ + backend: Literal['tensorflow', 'pytorch'] = 'tensorflow' ert: float window_size: int sigma: Optional[np.ndarray] = None @@ -1100,6 +1150,7 @@ class ClassifierUncertaintyDriftConfig(DetectorConfig): Except for the `name` and `meta` fields, the fields match the detector's args and kwargs. Refer to the :class:`~alibi_detect.cd.ClassifierUncertaintyDrift` documentation for a description of each field. """ + backend: Literal['tensorflow', 'pytorch'] = 'tensorflow' x_ref: str model: Union[str, ModelConfig] p_val: float = .05 @@ -1126,8 +1177,9 @@ class ClassifierUncertaintyDriftConfigResolved(DetectorConfig): Except for the `name` and `meta` fields, the fields match the detector's args and kwargs. Refer to the :class:`~alibi_detect.cd.ClassifierUncertaintyDrift` documentation for a description of each field. """ + backend: Literal['tensorflow', 'pytorch'] = 'tensorflow' x_ref: Union[np.ndarray, list] - model: Optional[Callable] = None + model: Optional[SupportedModel] = None p_val: float = .05 x_ref_preprocessed: bool = False update_x_ref: Optional[Dict[str, int]] = None @@ -1142,9 +1194,6 @@ class ClassifierUncertaintyDriftConfigResolved(DetectorConfig): input_shape: Optional[tuple] = None data_type: Optional[str] = None - # validators - _validate_model = validator('model', allow_reuse=True, pre=True)(validate_model) - class RegressorUncertaintyDriftConfig(DetectorConfig): """ @@ -1155,6 +1204,7 @@ class RegressorUncertaintyDriftConfig(DetectorConfig): Except for the `name` and `meta` fields, the fields match the detector's args and kwargs. Refer to the :class:`~alibi_detect.cd.RegressorUncertaintyDrift` documentation for a description of each field. """ + backend: Literal['tensorflow', 'pytorch'] = 'tensorflow' x_ref: str model: Union[str, ModelConfig] p_val: float = .05 @@ -1180,8 +1230,9 @@ class RegressorUncertaintyDriftConfigResolved(DetectorConfig): Except for the `name` and `meta` fields, the fields match the detector's args and kwargs. Refer to the :class:`~alibi_detect.cd.RegressorUncertaintyDrift` documentation for a description of each field. """ + backend: Literal['tensorflow', 'pytorch'] = 'tensorflow' x_ref: Union[np.ndarray, list] - model: Optional[Callable] = None + model: Optional[SupportedModel] = None p_val: float = .05 x_ref_preprocessed: bool = False update_x_ref: Optional[Dict[str, int]] = None @@ -1195,9 +1246,6 @@ class RegressorUncertaintyDriftConfigResolved(DetectorConfig): input_shape: Optional[tuple] = None data_type: Optional[str] = None - # validators - _validate_model = validator('model', allow_reuse=True, pre=True)(validate_model) - # Unresolved schema dictionary (used in alibi_detect.utils.loading) DETECTOR_CONFIGS = { diff --git a/alibi_detect/saving/tests/datasets.py b/alibi_detect/saving/tests/datasets.py index 6f9cb607b..eadb83925 100644 --- a/alibi_detect/saving/tests/datasets.py +++ b/alibi_detect/saving/tests/datasets.py @@ -1,6 +1,8 @@ import numpy as np +import pytest from alibi_testing.data import get_movie_sentiment_data from pytest_cases import parametrize +from requests import RequestException # Note: If any of below cases become large, see https://smarie.github.io/python-pytest-cases/#c-caching-cases FLOAT = np.float32 @@ -63,4 +65,7 @@ def data_synthetic_nd(data_shape): class TextData: @staticmethod def movie_sentiment_data(): - return get_movie_sentiment_data() + try: + return get_movie_sentiment_data() + except RequestException: + pytest.skip('Movie sentiment dataset URL down') diff --git a/alibi_detect/saving/tests/models.py b/alibi_detect/saving/tests/models.py new file mode 100644 index 000000000..9857449ad --- /dev/null +++ b/alibi_detect/saving/tests/models.py @@ -0,0 +1,251 @@ +from functools import partial + +import numpy as np +import tensorflow as tf +import torch +from sklearn.ensemble import RandomForestClassifier +from xgboost import XGBClassifier + +from requests.exceptions import HTTPError + +import pytest +from pytest_cases import fixture, parametrize +from transformers import AutoTokenizer +from alibi_detect.cd.pytorch import preprocess_drift as preprocess_drift_pt +from alibi_detect.cd.tensorflow import UAE as UAE_tf +from alibi_detect.cd.tensorflow import preprocess_drift as preprocess_drift_tf +from alibi_detect.utils.pytorch.kernels import GaussianRBF as GaussianRBF_pt +from alibi_detect.utils.tensorflow.kernels import GaussianRBF as GaussianRBF_tf +from alibi_detect.utils.tensorflow.kernels import DeepKernel as DeepKernel_tf +from alibi_detect.models.pytorch import TransformerEmbedding as TransformerEmbedding_pt +from alibi_detect.models.tensorflow import TransformerEmbedding as TransformerEmbedding_tf +from alibi_detect.cd.pytorch import HiddenOutput as HiddenOutput_pt +from alibi_detect.cd.tensorflow import HiddenOutput as HiddenOutput_tf + +LATENT_DIM = 2 # Must be less than input_dim set in ./datasets.py +DEVICE = "cuda" if torch.cuda.is_available() else "cpu" + + +@fixture +def encoder_model(backend, current_cases): + """ + An untrained encoder of given input dimension and backend (this is a "custom" model, NOT an Alibi Detect UAE). + """ + _, _, data_params = current_cases["data"] + _, input_dim = data_params['data_shape'] + + if backend == 'tensorflow': + model = tf.keras.Sequential( + [ + tf.keras.layers.InputLayer(input_shape=(input_dim,)), + tf.keras.layers.Dense(5, activation=tf.nn.relu), + tf.keras.layers.Dense(LATENT_DIM, activation=None) + ] + ) + elif backend == 'pytorch': + raise NotImplementedError('`pytorch` tests not implemented.') + else: + pytest.skip('`encoder_model` only implemented for tensorflow and pytorch.') + return model + + +@fixture +def encoder_dropout_model(backend, current_cases): + """ + An untrained encoder with dropout, of given input dimension and backend. + + TODO: consolidate this model (and encoder_model above) with models like that in test_model_uncertainty.py + """ + _, _, data_params = current_cases["data"] + _, input_dim = data_params['data_shape'] + + if backend == 'tensorflow': + model = tf.keras.Sequential( + [ + tf.keras.layers.InputLayer(input_shape=(input_dim,)), + tf.keras.layers.Dense(5, activation=tf.nn.relu), + tf.keras.layers.Dropout(0.5), + tf.keras.layers.Dense(LATENT_DIM, activation=None) + ] + ) + elif backend == 'pytorch': + raise NotImplementedError('`pytorch` tests not implemented.') + else: + pytest.skip('`encoder_dropout_model` only implemented for tensorflow and pytorch.') + return model + + +@fixture +def preprocess_custom(encoder_model): + """ + Preprocess function with Untrained Autoencoder. + """ + if isinstance(encoder_model, tf.keras.Model): + preprocess_fn = partial(preprocess_drift_tf, model=encoder_model) + else: + preprocess_fn = partial(preprocess_drift_pt, model=encoder_model) + return preprocess_fn + + +@fixture +def kernel(request, backend): + """ + Gaussian RBF kernel for given backend. Settings are parametrised in the test function. + """ + kernel = request.param + if kernel is None: + pass + elif isinstance(kernel, dict): # dict of kwargs + if backend == 'tensorflow': + kernel = GaussianRBF_tf(**kernel) + elif backend == 'pytorch': + kernel = GaussianRBF_pt(**kernel) + else: + pytest.skip('`kernel` only implemented for tensorflow and pytorch.') + return kernel + + +@fixture +def deep_kernel(request, backend, encoder_model): + """ + Deep kernel, built using the `encoder_model` fixture for the projection, and using the kernel_a and eps + parametrised in the test function. + """ + # Get DeepKernel options + kernel_a = request.param.get('kernel_a', 'rbf') + kernel_b = request.param.get('kernel_b', 'rbf') + eps = request.param.get('eps', 'trainable') + + # Proj model (backend managed in encoder_model fixture) + proj = encoder_model + + # Build DeepKernel + if backend == 'tensorflow': + kernel_a = GaussianRBF_tf(**kernel_a) if isinstance(kernel_a, dict) else kernel_a + kernel_a = GaussianRBF_tf(**kernel_b) if isinstance(kernel_b, dict) else kernel_b + deep_kernel = DeepKernel_tf(proj, kernel_a=kernel_a, kernel_b=kernel_b, eps=eps) + elif backend == 'pytorch': + raise NotImplementedError('`pytorch` tests not implemented.') + else: + pytest.skip('`deep_kernel` only implemented for tensorflow and pytorch.') + return deep_kernel + + +@fixture +def classifier_model(backend, current_cases): + """ + Classification model with given input dimension and backend. + """ + _, _, data_params = current_cases["data"] + _, input_dim = data_params['data_shape'] + if backend == 'tensorflow': + inputs = tf.keras.Input(shape=(input_dim,)) + outputs = tf.keras.layers.Dense(2, activation=tf.nn.softmax)(inputs) + model = tf.keras.Model(inputs=inputs, outputs=outputs) + elif backend == 'pytorch': + raise NotImplementedError('`pytorch` tests not implemented.') + elif backend == 'sklearn': + model = RandomForestClassifier() + else: + pytest.skip('`classifier_model` only implemented for tensorflow, pytorch, and sklearn.') + return model + + +@fixture +def xgb_classifier_model(): + model = XGBClassifier() + return model + + +@fixture(unpack_into=('tokenizer, embedding, max_len, enc_dim')) +@parametrize('model_name, max_len', [('bert-base-cased', 100)]) +@parametrize('uae', [True, False]) +def nlp_embedding_and_tokenizer(model_name, max_len, uae, backend): + """ + A fixture to build nlp embedding and tokenizer models based on the HuggingFace pre-trained models. + """ + backend = 'tf' if backend == 'tensorflow' else 'pt' + + # Load tokenizer + try: + tokenizer = AutoTokenizer.from_pretrained(model_name) + except (OSError, HTTPError): + pytest.skip(f"Problem downloading {model_name} from huggingface.co") + X = 'A dummy string' # this will be padded to max_len + tokens = tokenizer(list(X[:5]), pad_to_max_length=True, + max_length=max_len, return_tensors=backend) + + # Load embedding model + emb_type = 'hidden_state' + n_layers = 8 + layers = [-_ for _ in range(1, n_layers + 1)] + enc_dim = 32 + + if backend == 'tf': + try: + embedding = TransformerEmbedding_tf(model_name, emb_type, layers) + except (OSError, HTTPError): + pytest.skip(f"Problem downloading {model_name} from huggingface.co") + if uae: + x_emb = embedding(tokens) + shape = (x_emb.shape[1],) + embedding = UAE_tf(input_layer=embedding, shape=shape, enc_dim=enc_dim) + else: + try: + embedding = TransformerEmbedding_pt(model_name, emb_type, layers) + except (OSError, HTTPError): + pytest.skip(f"Problem downloading {model_name} from huggingface.co") + if uae: + x_emb = embedding(tokens) + emb_dim = x_emb.shape[1] + device = torch.device(DEVICE) + embedding = torch.nn.Sequential( + embedding, + torch.nn.Linear(emb_dim, 256), + torch.nn.ReLU(), + torch.nn.Linear(256, enc_dim) + ).to(device).eval() + + return tokenizer, embedding, max_len, enc_dim + + +def preprocess_simple(x: np.ndarray): + """ + Simple function to test serialization of generic Python function within preprocess_fn. + """ + return x*2.0 + + +@fixture +def preprocess_nlp(embedding, tokenizer, max_len, backend): + """ + Preprocess function with Untrained Autoencoder. + """ + if backend == 'tensorflow': + preprocess_fn = partial(preprocess_drift_tf, model=embedding, tokenizer=tokenizer, + max_len=max_len, preprocess_batch_fn=preprocess_simple) + elif backend == 'pytorch': + preprocess_fn = partial(preprocess_drift_pt, model=embedding, tokenizer=tokenizer, max_len=max_len, + preprocess_batch_fn=preprocess_simple) + else: + pytest.skip('`preprocess_nlp` only implemented for tensorflow and pytorch.') + return preprocess_fn + + +@fixture +def preprocess_hiddenoutput(classifier_model, current_cases, backend): + """ + Preprocess function to extract the softmax layer of a classifier (with the HiddenOutput utility function). + """ + _, _, data_params = current_cases["data"] + _, input_dim = data_params['data_shape'] + + if backend == 'tensorflow': + model = HiddenOutput_tf(classifier_model, layer=-1, input_shape=(None, input_dim)) + preprocess_fn = partial(preprocess_drift_tf, model=model) + elif backend == 'pytorch': + model = HiddenOutput_pt(classifier_model, layer=-1) + preprocess_fn = partial(preprocess_drift_pt, model=model) + else: + pytest.skip('`preprocess_hiddenoutput` only implemented for tensorflow and pytorch.') + return preprocess_fn diff --git a/alibi_detect/saving/tests/test_saving.py b/alibi_detect/saving/tests/test_saving.py index e40a71e10..c1b6b5a06 100644 --- a/alibi_detect/saving/tests/test_saving.py +++ b/alibi_detect/saving/tests/test_saving.py @@ -5,11 +5,11 @@ Internal functions such as save_kernel/load_kernel_config etc are also tested. """ # TODO future - test pytorch save/load functionality -# TODO (could/should also add tests to backend-specific submodules) from functools import partial from pathlib import Path from typing import Callable +import sklearn.base import toml import dill import numpy as np @@ -19,47 +19,44 @@ import torch from .datasets import BinData, CategoricalData, ContinuousData, MixedData, TextData +from .models import (encoder_model, preprocess_custom, preprocess_hiddenoutput, preprocess_simple, # noqa: F401 + preprocess_nlp, LATENT_DIM, classifier_model, kernel, deep_kernel, nlp_embedding_and_tokenizer, + embedding, tokenizer, max_len, enc_dim) + from alibi_detect.utils._random import fixed_seed from packaging import version -from pytest_cases import fixture, param_fixture, parametrize, parametrize_with_cases +from pytest_cases import param_fixture, parametrize, parametrize_with_cases from sklearn.model_selection import StratifiedKFold -from transformers import AutoTokenizer + from alibi_detect.cd import (ChiSquareDrift, ClassifierUncertaintyDrift, RegressorUncertaintyDrift, ClassifierDrift, FETDrift, KSDrift, LearnedKernelDrift, LSDDDrift, MMDDrift, SpotTheDiffDrift, TabularDrift, ContextMMDDrift, MMDDriftOnline, LSDDDriftOnline, CVMDriftOnline, FETDriftOnline) -from alibi_detect.cd.pytorch import HiddenOutput as HiddenOutput_pt -from alibi_detect.cd.pytorch import preprocess_drift as preprocess_drift_pt -from alibi_detect.cd.tensorflow import UAE as UAE_tf -from alibi_detect.cd.tensorflow import HiddenOutput as HiddenOutput_tf -from alibi_detect.cd.tensorflow import preprocess_drift as preprocess_drift_tf from alibi_detect.models.pytorch import TransformerEmbedding as TransformerEmbedding_pt from alibi_detect.models.tensorflow import TransformerEmbedding as TransformerEmbedding_tf from alibi_detect.saving import (load_detector, read_config, registry, resolve_config, save_detector, write_config) -from alibi_detect.saving.loading import (_get_nested_value, _load_model_config, _load_optimizer_config, _replace, +from alibi_detect.saving.loading import (_get_nested_value, _replace, _set_dtypes, _set_nested_value, _prepend_cfg_filepaths) from alibi_detect.saving.saving import _serialize_object from alibi_detect.saving.saving import (_path2str, _int2str_keys, _save_kernel_config, _save_model_config, _save_preprocess_config) from alibi_detect.saving.schemas import DeepKernelConfig, KernelConfig, ModelConfig, PreprocessConfig from alibi_detect.utils.pytorch.kernels import DeepKernel as DeepKernel_pt -from alibi_detect.utils.pytorch.kernels import GaussianRBF as GaussianRBF_pt from alibi_detect.utils.tensorflow.kernels import DeepKernel as DeepKernel_tf -from alibi_detect.utils.tensorflow.kernels import GaussianRBF as GaussianRBF_tf if version.parse(scipy.__version__) >= version.parse('1.7.0'): from alibi_detect.cd import CVMDrift -backend = param_fixture("backend", ['tensorflow']) +# TODO: We currently parametrize encoder_model etc (in models.py) with backend, so the same flavour of +# preprocessing is used as the detector backend. In the future we could decouple this in tests. +backend = param_fixture("backend", ['tensorflow', 'sklearn']) P_VAL = 0.05 ERT = 10 N_PERMUTATIONS = 10 N_BOOTSTRAPS = 100 WINDOW_SIZE = 5 -LATENT_DIM = 2 # Must be less than input_dim set in ./datasets.py -DEVICE = "cuda" if torch.cuda.is_available() else "cpu" REGISTERED_OBJECTS = registry.get_all() # Define a detector config dict @@ -75,201 +72,6 @@ # TODO - future: Some of the fixtures can/should be moved elsewhere (i.e. if they can be recycled for use elsewhere) -@fixture -def encoder_model(backend, current_cases): - """ - An untrained encoder of given input dimension and backend (this is a "custom" model, NOT an Alibi Detect UAE). - """ - _, _, data_params = current_cases["data"] - _, input_dim = data_params['data_shape'] - - if backend == 'tensorflow': - model = tf.keras.Sequential( - [ - tf.keras.layers.InputLayer(input_shape=(input_dim,)), - tf.keras.layers.Dense(5, activation=tf.nn.relu), - tf.keras.layers.Dense(LATENT_DIM, activation=None) - ] - ) - else: - raise NotImplementedError('`pytorch` tests not implemented.') - return model - - -@fixture -def encoder_dropout_model(backend, current_cases): - """ - An untrained encoder with dropout, of given input dimension and backend. - - TODO: consolidate this model (and encoder_model above) with models like that in test_model_uncertainty.py - """ - _, _, data_params = current_cases["data"] - _, input_dim = data_params['data_shape'] - - if backend == 'tensorflow': - model = tf.keras.Sequential( - [ - tf.keras.layers.InputLayer(input_shape=(input_dim,)), - tf.keras.layers.Dense(5, activation=tf.nn.relu), - tf.keras.layers.Dropout(0.5), - tf.keras.layers.Dense(LATENT_DIM, activation=None) - ] - ) - else: - raise NotImplementedError('`pytorch` tests not implemented.') - return model - - -@fixture -def preprocess_custom(encoder_model, backend): - """ - Preprocess function with Untrained Autoencoder. - """ - if backend == 'tensorflow': - preprocess_fn = partial(preprocess_drift_tf, model=encoder_model) - else: - preprocess_fn = partial(preprocess_drift_pt, model=encoder_model) - return preprocess_fn - - -@fixture -def kernel(request, backend): - """ - Gaussian RBF kernel for given backend. Settings are parametrised in the test function. - """ - kernel = request.param - if kernel is None: - pass - elif isinstance(kernel, dict): # dict of kwargs - if backend == 'tensorflow': - kernel = GaussianRBF_tf(**kernel) - elif backend == 'pytorch': - kernel = GaussianRBF_pt(**kernel) - return kernel - - -@fixture -def deep_kernel(request, backend, encoder_model): - """ - Deep kernel, built using the `encoder_model` fixture for the projection, and using the kernel_a and eps - parametrised in the test function. - """ - # Get DeepKernel options - kernel_a = request.param.get('kernel_a', 'rbf') - kernel_b = request.param.get('kernel_b', 'rbf') - eps = request.param.get('eps', 'trainable') - - # Proj model (backend managed in encoder_model fixture) - proj = encoder_model - - # Build DeepKernel - if backend == 'tensorflow': - kernel_a = GaussianRBF_tf(**kernel_a) if isinstance(kernel_a, dict) else kernel_a - kernel_a = GaussianRBF_tf(**kernel_b) if isinstance(kernel_b, dict) else kernel_b - deep_kernel = DeepKernel_tf(proj, kernel_a=kernel_a, kernel_b=kernel_b, eps=eps) - elif backend == 'pytorch': - raise NotImplementedError('`pytorch` tests not implemented.') - else: - raise ValueError('`backend` not valid.') - return deep_kernel - - -@fixture -def classifier(backend, current_cases): - """ - Classification model with given input dimension and backend. - """ - _, _, data_params = current_cases["data"] - _, input_dim = data_params['data_shape'] - if backend == 'tensorflow': - inputs = tf.keras.Input(shape=(input_dim,)) - outputs = tf.keras.layers.Dense(2, activation=tf.nn.softmax)(inputs) - model = tf.keras.Model(inputs=inputs, outputs=outputs) - elif backend == 'pytorch': - raise NotImplementedError('`pytorch` tests not implemented.') - else: - raise ValueError('`backend` not valid.') - return model - - -@fixture(unpack_into=('tokenizer, embedding, max_len, enc_dim')) -@parametrize('model_name, max_len', [('bert-base-cased', 100)]) -@parametrize('uae', [True, False]) -def nlp_embedding_and_tokenizer(model_name, max_len, uae, backend): - """ - A fixture to build nlp embedding and tokenizer models based on the HuggingFace pre-trained models. - """ - backend = 'tf' if backend == 'tensorflow' else 'pt' - - # Load tokenizer - tokenizer = AutoTokenizer.from_pretrained(model_name) - X = 'A dummy string' # this will be padded to max_len - tokens = tokenizer(list(X[:5]), pad_to_max_length=True, - max_length=max_len, return_tensors=backend) - - # Load embedding model - emb_type = 'hidden_state' - n_layers = 8 - layers = [-_ for _ in range(1, n_layers + 1)] - enc_dim = 32 - - if backend == 'tf': - embedding = TransformerEmbedding_tf(model_name, emb_type, layers) - if uae: - x_emb = embedding(tokens) - shape = (x_emb.shape[1],) - embedding = UAE_tf(input_layer=embedding, shape=shape, enc_dim=enc_dim) - else: - embedding = TransformerEmbedding_pt(model_name, emb_type, layers) - if uae: - x_emb = embedding(tokens) - emb_dim = x_emb.shape[1] - device = torch.device(DEVICE) - embedding = torch.nn.Sequential( - embedding, - torch.nn.Linear(emb_dim, 256), - torch.nn.ReLU(), - torch.nn.Linear(256, enc_dim) - ).to(device).eval() - - return tokenizer, embedding, max_len, enc_dim - - -def preprocess_simple(x: np.ndarray): - """ - Simple function to test serialization of generic Python function within preprocess_fn. - """ - return x*2.0 - - -@fixture -def preprocess_nlp(embedding, tokenizer, max_len, backend): - """ - Preprocess function with Untrained Autoencoder. - """ - if backend == 'tensorflow': - preprocess_fn = partial(preprocess_drift_tf, model=embedding, tokenizer=tokenizer, - max_len=max_len, preprocess_batch_fn=preprocess_simple) - else: - preprocess_fn = partial(preprocess_drift_pt, model=embedding, tokenizer=tokenizer, max_len=max_len, - preprocess_batch_fn=preprocess_simple) - return preprocess_fn - - -@fixture -def preprocess_hiddenoutput(classifier, backend): - """ - Preprocess function to extract the softmax layer of a classifier (with the HiddenOutput utility function). - """ - if backend == 'tensorflow': - model = HiddenOutput_tf(classifier, layer=-1) - preprocess_fn = partial(preprocess_drift_tf, model=model) - else: - model = HiddenOutput_pt(classifier, layer=-1) - preprocess_fn = partial(preprocess_drift_pt, model=model) - return preprocess_fn - - @parametrize('cfg', CFGS) def test_load_simple_config(cfg, tmp_path): """ @@ -300,7 +102,7 @@ def test_load_simple_config(cfg, tmp_path): @parametrize_with_cases("data", cases=ContinuousData, prefix='data_') def test_save_ksdrift(data, preprocess_fn, tmp_path): """ - Test KSDrift on continuous datasets, with UAE and classifier softmax output as preprocess_fn's. Only this + Test KSDrift on continuous datasets, with UAE and classifier_model softmax output as preprocess_fn's. Only this detector is tested with preprocessing strategies, as other detectors should see the same preprocess_fn output. Detector is saved and then loaded, with assertions checking that the reinstantiated detector is equivalent. @@ -325,11 +127,12 @@ def test_save_ksdrift(data, preprocess_fn, tmp_path): cd_load.predict(X_h0)['data']['p_val']) +@pytest.mark.skipif(backend == 'sklearn', reason="Don't test with sklearn preprocessing.") @parametrize('preprocess_fn', [preprocess_nlp]) @parametrize_with_cases("data", cases=TextData.movie_sentiment_data, prefix='data_') -def test_save_ksdrift_nlp(data, preprocess_fn, max_len, enc_dim, tmp_path): +def test_save_ksdrift_nlp(data, preprocess_fn, enc_dim, tmp_path): # noqa: F811 """ - Test KSDrift on continuous datasets, with UAE and classifier softmax output as preprocess_fn's. Only this + Test KSDrift on continuous datasets, with UAE and classifier_model softmax output as preprocess_fn's. Only this detector is tested with embedding and embedding+uae, as other detectors should see the same preprocessed data. Detector is saved and then loaded, with assertions checking that the reinstantiated detector is equivalent. @@ -340,7 +143,7 @@ def test_save_ksdrift_nlp(data, preprocess_fn, max_len, enc_dim, tmp_path): p_val=P_VAL, preprocess_fn=preprocess_fn, preprocess_at_init=True, - input_shape=(max_len,), + input_shape=(768,), # hardcoded to bert-base-cased for now ) save_detector(cd, tmp_path, legacy=False) cd_load = load_detector(tmp_path) @@ -393,12 +196,15 @@ def test_save_cvmdrift(data, preprocess_custom, tmp_path): ], indirect=True ) @parametrize_with_cases("data", cases=ContinuousData, prefix='data_') -def test_save_mmddrift(data, kernel, preprocess_custom, backend, tmp_path, seed): +def test_save_mmddrift(data, kernel, preprocess_custom, backend, tmp_path, seed): # noqa: F811 """ Test MMDDrift on continuous datasets, with UAE as preprocess_fn. Detector is saved and then loaded, with assertions checking that the reinstantiated detector is equivalent. """ + if backend not in ('tensorflow', 'pytorch', 'keops'): + pytest.skip("Detector doesn't have this backend") + # Init detector and make predictions X_ref, X_h0 = data with fixed_seed(seed): @@ -441,6 +247,9 @@ def test_save_lsdddrift(data, preprocess_at_init, backend, tmp_path, seed): Detector is saved and then loaded, with assertions checking that the reinstantiated detector is equivalent. """ + if backend not in ('tensorflow', 'pytorch'): + pytest.skip("Detector doesn't have this backend") + preprocess_fn = preprocess_simple # TODO - TensorFlow based preprocessors currently cause in-deterministic behaviour with LSDD permutations. Replace # preprocess_simple with parametrized preprocess_fn's once above issue resolved. @@ -562,13 +371,16 @@ def test_save_tabulardrift(data, tmp_path): @parametrize_with_cases("data", cases=ContinuousData, prefix='data_') -def test_save_classifierdrift(data, classifier, backend, tmp_path, seed): +def test_save_classifierdrift(data, classifier_model, backend, tmp_path, seed): # noqa: F811 """ Test ClassifierDrift on continuous datasets.""" + if backend not in ('tensorflow', 'pytorch', 'sklearn'): + pytest.skip("Detector doesn't have this backend") + # Init detector and predict X_ref, X_h0 = data with fixed_seed(seed): cd = ClassifierDrift(X_ref, - model=classifier, + model=classifier_model, p_val=P_VAL, n_folds=5, backend=backend, @@ -585,9 +397,12 @@ def test_save_classifierdrift(data, classifier, backend, tmp_path, seed): np.testing.assert_array_equal(X_ref, cd_load._detector.x_ref) assert isinstance(cd_load._detector.skf, StratifiedKFold) assert cd_load._detector.p_val == P_VAL - assert isinstance(cd_load._detector.train_kwargs, dict) + if backend != 'sklearn': + assert isinstance(cd_load._detector.train_kwargs, dict) if backend == 'tensorflow': assert isinstance(cd_load._detector.model, tf.keras.Model) + elif backend == 'sklearn': + assert isinstance(cd_load._detector.model, sklearn.base.BaseEstimator) else: pass # TODO # TODO - detector still not deterministic, investigate in future @@ -596,12 +411,15 @@ def test_save_classifierdrift(data, classifier, backend, tmp_path, seed): @parametrize_with_cases("data", cases=ContinuousData, prefix='data_') -def test_save_spotthediff(data, classifier, backend, tmp_path, seed): +def test_save_spotthediff(data, classifier_model, backend, tmp_path, seed): # noqa: F811 """ Test SpotTheDiffDrift on continuous datasets. Detector is saved and then loaded, with assertions checking that the reinstantiated detector is equivalent. """ + if backend not in ('tensorflow', 'pytorch'): + pytest.skip("Detector doesn't have this backend") + # Init detector and predict X_ref, X_h0 = data with fixed_seed(seed): @@ -637,12 +455,15 @@ def test_save_spotthediff(data, classifier, backend, tmp_path, seed): ], indirect=True ) @parametrize_with_cases("data", cases=ContinuousData, prefix='data_') -def test_save_learnedkernel(data, deep_kernel, backend, tmp_path, seed): +def test_save_learnedkernel(data, deep_kernel, backend, tmp_path, seed): # noqa: F811 """ Test LearnedKernelDrift on continuous datasets. Detector is saved and then loaded, with assertions checking that the reinstantiated detector is equivalent. """ + if backend not in ('tensorflow', 'pytorch', 'keops'): + pytest.skip("Detector doesn't have this backend") + # Init detector and predict X_ref, X_h0 = data with fixed_seed(seed): @@ -677,12 +498,15 @@ def test_save_learnedkernel(data, deep_kernel, backend, tmp_path, seed): ], indirect=True ) @parametrize_with_cases("data", cases=ContinuousData, prefix='data_') -def test_save_contextmmddrift(data, kernel, backend, tmp_path, seed): +def test_save_contextmmddrift(data, kernel, backend, tmp_path, seed): # noqa: F811 """ Test ContextMMDDrift on continuous datasets, with UAE as preprocess_fn. Detector is saved and then loaded, with assertions checking that the reinstantiated detector is equivalent. """ + if backend not in ('tensorflow', 'pytorch'): + pytest.skip("Detector doesn't have this backend") + # Init detector and make predictions X_ref, X_h0 = data C_ref, C_h0 = (X_ref[:, 0] + 1).reshape(-1, 1), (X_h0[:, 0] + 1).reshape(-1, 1) @@ -721,13 +545,16 @@ def test_save_contextmmddrift(data, kernel, backend, tmp_path, seed): @parametrize_with_cases("data", cases=ContinuousData, prefix='data_') -def test_save_classifieruncertaintydrift(data, classifier, backend, tmp_path, seed): +def test_save_classifieruncertaintydrift(data, classifier_model, backend, tmp_path, seed): # noqa: F811 """ Test ClassifierDrift on continuous datasets.""" + if backend not in ('tensorflow', 'pytorch'): + pytest.skip("Detector doesn't have this backend") + # Init detector and predict X_ref, X_h0 = data with fixed_seed(seed): cd = ClassifierUncertaintyDrift(X_ref, - model=classifier, + model=classifier_model, p_val=P_VAL, backend=backend, preds_type='probs', @@ -751,6 +578,9 @@ def test_save_classifieruncertaintydrift(data, classifier, backend, tmp_path, se @parametrize('regressor', [encoder_model]) def test_save_regressoruncertaintydrift(data, regressor, backend, tmp_path, seed): """ Test RegressorDrift on continuous datasets.""" + if backend not in ('tensorflow', 'pytorch'): + pytest.skip("Detector doesn't have this backend") + # Init detector and predict X_ref, X_h0 = data with fixed_seed(seed): @@ -781,12 +611,15 @@ def test_save_regressoruncertaintydrift(data, regressor, backend, tmp_path, seed ], indirect=True ) @parametrize_with_cases("data", cases=ContinuousData, prefix='data_') -def test_save_onlinemmddrift(data, kernel, preprocess_custom, backend, tmp_path, seed): +def test_save_onlinemmddrift(data, kernel, preprocess_custom, backend, tmp_path, seed): # noqa: F811 """ Test MMDDriftOnline on continuous datasets, with UAE as preprocess_fn. Detector is saved and then loaded, with assertions checking that the reinstantiated detector is equivalent. """ + if backend not in ('tensorflow', 'pytorch'): + pytest.skip("Detector doesn't have this backend") + # Init detector and make predictions X_ref, X_h0 = data @@ -833,6 +666,9 @@ def test_save_onlinelsdddrift(data, preprocess_custom, backend, tmp_path, seed): Detector is saved and then loaded, with assertions checking that the reinstantiated detector is equivalent. """ + if backend not in ('tensorflow', 'pytorch'): + pytest.skip("Detector doesn't have this backend") + # Init detector and make predictions X_ref, X_h0 = data @@ -878,6 +714,9 @@ def test_save_onlinecvmdrift(data, preprocess_custom, tmp_path, seed): Detector is saved and then loaded, with assertions checking that the reinstantiated detector is equivalent. """ + if backend not in ('tensorflow', 'pytorch'): + pytest.skip("Detector doesn't have this backend") + # Init detector and make predictions X_ref, X_h0 = data @@ -920,6 +759,9 @@ def test_save_onlinefetdrift(data, tmp_path, seed): Detector is saved and then loaded, with assertions checking that the reinstantiated detector is equivalent. """ + if backend not in ('tensorflow', 'pytorch'): + pytest.skip("Detector doesn't have this backend") + # Init detector and make predictions X_ref, X_h0 = data @@ -1010,7 +852,7 @@ def test_version_warning(data, tmp_path): {'sigma': None, 'trainable': True, 'init_sigma_fn': None}, ], indirect=True ) -def test_save_kernel(kernel, backend, tmp_path): +def test_save_kernel(kernel, backend, tmp_path): # noqa: F811 """ Unit test for _save/_load_kernel_config, when kernel is a GaussianRBF kernel. @@ -1053,7 +895,7 @@ def test_save_kernel(kernel, backend, tmp_path): {'kernel_a': {'trainable': True}, 'kernel_b': 'rbf', 'eps': 0.01}, # Explicit kernel_a, fixed eps ], indirect=True ) -def test_save_deepkernel(data, deep_kernel, backend, tmp_path): +def test_save_deepkernel(data, deep_kernel, backend, tmp_path): # noqa: F811 """ Unit test for _save/_load_kernel_config, when kernel is a DeepKernel kernel. @@ -1061,19 +903,19 @@ def test_save_deepkernel(data, deep_kernel, backend, tmp_path): """ # Get data dim X, _ = data - input_dim = X.shape[1] + input_shape = (X.shape[1],) # Save kernel to config filepath = tmp_path filename = 'mykernel' cfg_kernel = _save_kernel_config(deep_kernel, filepath, filename) - cfg_kernel['proj'], _ = _save_model_config(cfg_kernel['proj'], base_path=filepath, input_shape=input_dim, - backend=backend) + cfg_kernel['proj'], _ = _save_model_config(cfg_kernel['proj'], base_path=filepath, input_shape=input_shape) cfg_kernel = _path2str(cfg_kernel) - cfg_kernel['proj'] = ModelConfig(**cfg_kernel['proj']).dict() # Pass thru ModelConfig to set `custom_objects` etc + cfg_kernel['proj'] = ModelConfig(**cfg_kernel['proj']).dict() # Pass thru ModelConfig to set `layers` etc cfg_kernel = DeepKernelConfig(**cfg_kernel).dict() # pydantic validation assert cfg_kernel['proj']['src'] == 'model' assert cfg_kernel['proj']['custom_objects'] is None + assert cfg_kernel['proj']['layer'] is None # Resolve and load config cfg = {'kernel': cfg_kernel, 'backend': backend} @@ -1105,11 +947,8 @@ def test_save_preprocess(data, preprocess_fn, tmp_path, backend): # Save preprocess_fn to config filepath = tmp_path X_ref, X_h0 = data - input_dim = X_ref.shape[1] - cfg_preprocess = _save_preprocess_config(preprocess_fn, - backend=backend, - input_shape=input_dim, - filepath=filepath) + input_shape = (X_ref.shape[1],) + cfg_preprocess = _save_preprocess_config(preprocess_fn, input_shape=input_shape, filepath=filepath) cfg_preprocess = _path2str(cfg_preprocess) cfg_preprocess = PreprocessConfig(**cfg_preprocess).dict() # pydantic validation assert cfg_preprocess['src'] == '@cd.' + backend + '.preprocess.preprocess_drift' @@ -1126,7 +965,7 @@ def test_save_preprocess(data, preprocess_fn, tmp_path, backend): @parametrize('preprocess_fn', [preprocess_nlp]) @parametrize_with_cases("data", cases=TextData.movie_sentiment_data, prefix='data_') -def test_save_preprocess_nlp(data, preprocess_fn, max_len, tmp_path, backend): +def test_save_preprocess_nlp(data, preprocess_fn, tmp_path, backend): """ Unit test for _save_preprocess_config and _load_preprocess_config, with text data. @@ -1136,8 +975,7 @@ def test_save_preprocess_nlp(data, preprocess_fn, max_len, tmp_path, backend): # Save preprocess_fn to config filepath = tmp_path cfg_preprocess = _save_preprocess_config(preprocess_fn, - backend=backend, - input_shape=max_len, + input_shape=(768,), # hardcoded to bert-base-cased for now filepath=filepath) cfg_preprocess = _path2str(cfg_preprocess) cfg_preprocess = PreprocessConfig(**cfg_preprocess).dict() # pydantic validation @@ -1156,69 +994,14 @@ def test_save_preprocess_nlp(data, preprocess_fn, max_len, tmp_path, backend): assert isinstance(preprocess_fn_load.keywords['tokenizer'], type(preprocess_fn.keywords['tokenizer'])) assert isinstance(preprocess_fn_load.keywords['model'], type(preprocess_fn.keywords['model'])) if isinstance(preprocess_fn.keywords['model'], (TransformerEmbedding_tf, TransformerEmbedding_pt)): - embedding = preprocess_fn.keywords['model'] - embedding_load = preprocess_fn_load.keywords['model'] + emb = preprocess_fn.keywords['model'] + emb_load = preprocess_fn_load.keywords['model'] else: - embedding = preprocess_fn.keywords['model'].encoder.layers[0] - embedding_load = preprocess_fn_load.keywords['model'].encoder.layers[0] - assert isinstance(embedding_load.model, type(embedding.model)) - assert embedding_load.emb_type == embedding.emb_type - assert embedding_load.hs_emb.keywords['layers'] == embedding.hs_emb.keywords['layers'] - - -@parametrize_with_cases("data", cases=ContinuousData.data_synthetic_nd, prefix='data_') -@parametrize('model', [encoder_model]) -@parametrize('layer', [None, -1]) -def test_save_model(data, model, layer, backend, tmp_path): - """ - Unit test for _save_model_config and _load_model_config. - """ - # Save model - filepath = tmp_path - input_dim = data[0].shape[1] - cfg_model, _ = _save_model_config(model, base_path=filepath, input_shape=input_dim, backend=backend) - cfg_model = _path2str(cfg_model) - cfg_model = ModelConfig(**cfg_model).dict() - assert tmp_path.joinpath('model').is_dir() - assert tmp_path.joinpath('model/model.h5').is_file() - - # Adjust config - cfg_model['src'] = tmp_path.joinpath('model') # Need to manually set to absolute path here - if layer is not None: - cfg_model['layer'] = layer - - # Load model - model_load = _load_model_config(cfg_model, backend=backend) - if layer is None: - assert isinstance(model_load, type(model)) - else: - assert isinstance(model_load, (HiddenOutput_tf, HiddenOutput_pt)) - - -def test_save_optimizer(backend): - class_name = 'Adam' - learning_rate = 0.01 - epsilon = 1e-7 - amsgrad = False - - if backend == 'tensorflow': - # Load - cfg_opt = { - 'class_name': class_name, - 'config': { - 'name': class_name, - 'learning_rate': learning_rate, - 'epsilon': epsilon, - 'amsgrad': amsgrad - } - } - optimizer = _load_optimizer_config(cfg_opt, backend=backend) - assert type(optimizer).__name__ == class_name - assert optimizer.learning_rate == learning_rate - assert optimizer.epsilon == epsilon - assert optimizer.amsgrad == amsgrad - - # TODO - pytorch + emb = preprocess_fn.keywords['model'].encoder.layers[0] + emb_load = preprocess_fn_load.keywords['model'].encoder.layers[0] + assert isinstance(emb_load.model, type(emb.model)) + assert emb_load.emb_type == emb.emb_type + assert emb_load.hs_emb.keywords['layers'] == emb.hs_emb.keywords['layers'] def test_nested_value(): @@ -1347,6 +1130,9 @@ def test_set_dtypes(backend): dtype = 'tf.float32' elif backend == 'pytorch': dtype = 'torch.float32' + else: + pytest.skip('Only test set_dtypes for tensorflow and pytorch.') + cfg = { 'preprocess_fn': { 'dtype': dtype diff --git a/alibi_detect/saving/tests/test_validate.py b/alibi_detect/saving/tests/test_validate.py index 19780d69f..21bc3a250 100644 --- a/alibi_detect/saving/tests/test_validate.py +++ b/alibi_detect/saving/tests/test_validate.py @@ -4,18 +4,17 @@ from alibi_detect.saving import validate_config from alibi_detect.saving.saving import X_REF_FILENAME -from alibi_detect.version import __config_spec__, __version__ +from alibi_detect.version import __version__ from copy import deepcopy # Define a detector config dict mmd_cfg = { 'meta': { 'version': __version__, - 'config_spec': __config_spec__, }, 'name': 'MMDDrift', 'x_ref': np.array([[-0.30074928], [1.50240758], [0.43135768], [2.11295779], [0.79684913]]), - 'p_val': 0.05 + 'p_val': 0.05, } # Define a detector config dict without meta (as simple as it gets!) @@ -32,7 +31,6 @@ def test_validate_config(cfg): # Check cfg is returned with correct metadata meta = cfg_full.get('meta') # pop as don't want to compare meta to cfg in next bit assert meta['version'] == __version__ - assert meta['config_spec'] == __config_spec__ assert not meta.pop('version_warning') # pop this one to remove from next check # Check remaining values of items in cfg unchanged @@ -45,19 +43,13 @@ def test_validate_config(cfg): _ = validate_config(cfg_unres) assert not cfg.get('meta').get('version_warning') - # Check warning raised and warning field added if version or config_spec different + # Check warning raised and warning field added if version different cfg_err = cfg.copy() cfg_err['meta']['version'] = '0.1.x' with pytest.warns(Warning): # error will be raised if a warning IS NOT raised cfg_err = validate_config(cfg_err, resolved=True) assert cfg_err.get('meta').get('version_warning') - cfg_err = cfg.copy() - cfg_err['meta']['config_spec'] = '0.x' - with pytest.warns(Warning): # error will be raised if a warning IS NOT raised - cfg_err = validate_config(cfg_err, resolved=True) - assert cfg_err.get('meta').get('version_warning') - # Check ValueError raised if name unrecognised cfg_err = cfg.copy() cfg_err['name'] = 'MMDDriftWrong' diff --git a/alibi_detect/saving/validate.py b/alibi_detect/saving/validate.py index 235a74965..672ee7431 100644 --- a/alibi_detect/saving/validate.py +++ b/alibi_detect/saving/validate.py @@ -2,7 +2,7 @@ from alibi_detect.saving.schemas import ( # type: ignore[attr-defined] DETECTOR_CONFIGS, DETECTOR_CONFIGS_RESOLVED) -from alibi_detect.version import __config_spec__, __version__ +from alibi_detect.version import __version__ def validate_config(cfg: dict, resolved: bool = False) -> dict: @@ -41,7 +41,6 @@ def validate_config(cfg: dict, resolved: bool = False) -> dict: meta = {} if meta is None else meta # Needed because pydantic sets meta=None if it is missing from the config version_warning = meta.get('version_warning', False) version = meta.get('version', None) - config_spec = meta.get('config_spec', None) # Raise warning if config file already contains a version_warning if version_warning: @@ -54,11 +53,4 @@ def validate_config(cfg: dict, resolved: bool = False) -> dict: f'{__version__}. This may lead to breaking code or invalid results.') cfg['meta'].update({'version_warning': True}) - # Check config specification version - if config_spec is not None and config_spec != __config_spec__: - warnings.warn(f'Config has specification {version} when the installed ' - f'alibi-detect version expects specification {__config_spec__}.' - 'This may lead to breaking code or invalid results.') - cfg['meta'].update({'version_warning': True}) - return cfg diff --git a/alibi_detect/tests/test_datasets.py b/alibi_detect/tests/test_datasets.py index fbe324e19..26b02c6a9 100644 --- a/alibi_detect/tests/test_datasets.py +++ b/alibi_detect/tests/test_datasets.py @@ -2,6 +2,7 @@ import pandas as pd import pytest from requests import RequestException +from urllib.error import URLError from alibi_detect.datasets import fetch_kdd, fetch_ecg, corruption_types_cifar10c, fetch_cifar10c, \ fetch_attack, fetch_nab, get_list_nab from alibi_detect.utils.data import Bunch @@ -24,7 +25,7 @@ def test_fetch_kdd(return_X_y): keep_cols = np.random.choice(keep_cols_list, 5, replace=False) try: data = fetch_kdd(target=target, keep_cols=keep_cols, percent10=True, return_X_y=return_X_y) - except RequestException: + except URLError: pytest.skip('KDD dataset URL down') if return_X_y: assert isinstance(data, tuple) @@ -53,13 +54,19 @@ def test_fetch_ecg(return_X_y): # CIFAR-10-C dataset -corruption_list = corruption_types_cifar10c() +try: + corruption_list = corruption_types_cifar10c() +except RequestException: + corruption_list = None +@pytest.mark.skipif(corruption_list is None, reason="CIFAR-10-C dataset URL is down") def test_types_cifar10c(): + print(corruption_list) assert len(corruption_list) == 19 +@pytest.mark.skipif(corruption_list is None, reason="CIFAR-10-C dataset URL is down") @pytest.mark.parametrize('return_X_y', [True, False]) def test_fetch_cifar10c(return_X_y): corruption = list(np.random.choice(corruption_list, 5, replace=False)) diff --git a/alibi_detect/tests/test_dep_management.py b/alibi_detect/tests/test_dep_management.py index 4ee06fd8f..e6f3fde89 100644 --- a/alibi_detect/tests/test_dep_management.py +++ b/alibi_detect/tests/test_dep_management.py @@ -66,8 +66,8 @@ def test_cd_torch_dependencies(opt_dep): dependency_map = defaultdict(lambda: ['default']) for dependency, relations in [ - ("HiddenOutput", ['torch']), - ("preprocess_drift", ['torch']) + ("HiddenOutput", ['torch', 'keops']), + ("preprocess_drift", ['torch', 'keops']) ]: dependency_map[dependency] = relations from alibi_detect.cd import pytorch as cd_pytorch @@ -156,8 +156,8 @@ def test_torch_model_dependencies(opt_dep): dependency_map = defaultdict(lambda: ['default']) for dependency, relations in [ - ("TransformerEmbedding", ['torch']), - ("trainer", ['torch']), + ("TransformerEmbedding", ['torch', 'keops']), + ("trainer", ['torch', 'keops']), ]: dependency_map[dependency] = relations from alibi_detect.models import pytorch as torch_models @@ -190,7 +190,7 @@ def test_fetching_utils_dependencies(opt_dep): def test_saving_tf_dependencies(opt_dep): - """Tests that the alibi_detect.saving.tensorflow module correctly protects against uninstalled optional + """Tests that the alibi_detect.saving._tensorflow module correctly protects against uninstalled optional dependencies. """ @@ -208,7 +208,7 @@ def test_saving_tf_dependencies(opt_dep): ('get_tf_dtype', ['tensorflow']) ]: dependency_map[dependency] = relations - from alibi_detect.saving import tensorflow as tf_saving + from alibi_detect.saving import _tensorflow as tf_saving check_correct_dependencies(tf_saving, dependency_map, opt_dep) @@ -255,20 +255,34 @@ def test_torch_utils_dependencies(opt_dep): dependency_map = defaultdict(lambda: ['default']) for dependency, relations in [ - ("batch_compute_kernel_matrix", ['torch']), - ("mmd2", ['torch']), - ("mmd2_from_kernel_matrix", ['torch']), - ("squared_pairwise_distance", ['torch']), - ("GaussianRBF", ['torch']), - ("DeepKernel", ['torch']), - ("permed_lsdds", ['torch']), - ("predict_batch", ['torch']), - ("predict_batch_transformer", ['torch']), - ("quantile", ['torch']), - ("zero_diag", ['torch']), - ("TorchDataset", ['torch']), - ("get_device", ['torch']), + ("batch_compute_kernel_matrix", ['torch', 'keops']), + ("mmd2", ['torch', 'keops']), + ("mmd2_from_kernel_matrix", ['torch', 'keops']), + ("squared_pairwise_distance", ['torch', 'keops']), + ("GaussianRBF", ['torch', 'keops']), + ("DeepKernel", ['torch', 'keops']), + ("permed_lsdds", ['torch', 'keops']), + ("predict_batch", ['torch', 'keops']), + ("predict_batch_transformer", ['torch', 'keops']), + ("quantile", ['torch', 'keops']), + ("zero_diag", ['torch', 'keops']), + ("TorchDataset", ['torch', 'keops']), + ("get_device", ['torch', 'keops']), ]: dependency_map[dependency] = relations from alibi_detect.utils import pytorch as pytorch_utils check_correct_dependencies(pytorch_utils, dependency_map, opt_dep) + + +def test_keops_utils_dependencies(opt_dep): + """Tests that the keops utils module correctly protects against uninstalled optional dependencies. + """ + + dependency_map = defaultdict(lambda: ['default']) + for dependency, relations in [ + ("GaussianRBF", ['keops']), + ("DeepKernel", ['keops']), + ]: + dependency_map[dependency] = relations + from alibi_detect.utils import keops as keops_utils + check_correct_dependencies(keops_utils, dependency_map, opt_dep) diff --git a/alibi_detect/utils/_types.py b/alibi_detect/utils/_types.py index 6cf5bf09d..5a3a38cbd 100644 --- a/alibi_detect/utils/_types.py +++ b/alibi_detect/utils/_types.py @@ -6,6 +6,8 @@ import numpy as np from numpy.lib import NumpyVersion from pydantic.fields import ModelField +from sklearn.base import BaseEstimator # import here (instead of later) since sklearn currently a core dep +from alibi_detect.utils.frameworks import has_tensorflow, has_pytorch # Literal for typing if sys.version_info >= (3, 8): @@ -51,3 +53,19 @@ def _validate(cls: Type, val: Any, field: ModelField) -> np.ndarray: return np.asarray(val, dtype=dtype_field.type_) else: return np.asarray(val) + + +# Optional dep dependent tuples of types +supported_models_tf, supported_models_torch, supported_models_sklearn = (), (), () # type: ignore +supported_optimizers_tf, supported_optimizers_torch = (), () # type: ignore +if has_tensorflow: + import tensorflow as tf + supported_models_tf = (tf.keras.Model, ) # type: ignore + supported_optimizers_tf = (tf.keras.optimizers.Optimizer, ) # type: ignore +if has_pytorch: + import torch + supported_models_torch = (torch.nn.Module, torch.nn.Sequential) # type: ignore + supported_optimizers_torch = (torch.optim.Optimizer, ) # type: ignore +supported_models_sklearn = (BaseEstimator, ) # type: ignore +supported_models_all = supported_models_tf + supported_models_torch + supported_models_sklearn +supported_optimizers_all = supported_optimizers_tf + supported_optimizers_torch diff --git a/alibi_detect/utils/frameworks.py b/alibi_detect/utils/frameworks.py index b1def72af..233f6cf26 100644 --- a/alibi_detect/utils/frameworks.py +++ b/alibi_detect/utils/frameworks.py @@ -1,5 +1,14 @@ from .missing_optional_dependency import ERROR_TYPES from typing import Optional, List, Dict, Iterable +from enum import Enum + + +class Framework(str, Enum): + PYTORCH = 'pytorch' + TENSORFLOW = 'tensorflow' + KEOPS = 'keops' + SKLEARN = 'sklearn' + try: import tensorflow as tf # noqa @@ -14,12 +23,19 @@ except ImportError: has_pytorch = False +try: + import pykeops # noqa + import torch # noqa + has_keops = True +except ImportError: + has_keops = False # Map from backend name to boolean value indicating its presence HAS_BACKEND = { 'tensorflow': has_tensorflow, 'pytorch': has_pytorch, - 'sklearn': True + 'sklearn': True, + 'keops': has_keops, } diff --git a/alibi_detect/utils/keops/__init__.py b/alibi_detect/utils/keops/__init__.py new file mode 100644 index 000000000..36dc22971 --- /dev/null +++ b/alibi_detect/utils/keops/__init__.py @@ -0,0 +1,12 @@ +from alibi_detect.utils.missing_optional_dependency import import_optional + + +GaussianRBF, DeepKernel = import_optional( + 'alibi_detect.utils.keops.kernels', + names=['GaussianRBF', 'DeepKernel'] +) + +__all__ = [ + "GaussianRBF", + "DeepKernel" +] diff --git a/alibi_detect/utils/keops/kernels.py b/alibi_detect/utils/keops/kernels.py new file mode 100644 index 000000000..5e1f6bb53 --- /dev/null +++ b/alibi_detect/utils/keops/kernels.py @@ -0,0 +1,178 @@ +from pykeops.torch import LazyTensor +import torch +import torch.nn as nn +from typing import Callable, Optional, Union + + +def sigma_mean(x: LazyTensor, y: LazyTensor, dist: LazyTensor, n_min: int = 100) -> torch.Tensor: + """ + Set bandwidth to the mean distance between instances x and y. + + Parameters + ---------- + x + LazyTensor of instances with dimension [Nx, 1, features] or [batch_size, Nx, 1, features]. + The singleton dimension is necessary for broadcasting. + y + LazyTensor of instances with dimension [1, Ny, features] or [batch_size, 1, Ny, features]. + The singleton dimension is necessary for broadcasting. + dist + LazyTensor with dimensions [Nx, Ny] or [batch_size, Nx, Ny] containing the + pairwise distances between `x` and `y`. + n_min + In order to check whether x equals y after squeezing the singleton dimensions, we check if the + diagonal of the distance matrix (which is a lazy tensor from which the diagonal cannot be directly extracted) + consists of all zeros. We do this by computing the k-min distances and k-argmin indices over the + columns of the distance matrix. We then check if the distances on the diagonal of the distance matrix + are all zero or not. If they are all zero, then we do not use these distances (zeros) when computing + the mean pairwise distance as bandwidth. If Nx becomes very large, it is advised to set `n_min` + to a low enough value to avoid OOM issues. By default we set it to 100 instances. + + Returns + ------- + The computed bandwidth, `sigma`. + """ + batched = len(dist.shape) == 3 + if not batched: + nx, ny = dist.shape + axis = 1 + else: + batch_size, nx, ny = dist.shape + axis = 2 + n_mean = nx * ny + if nx == ny: + n_min = min(n_min, nx) if isinstance(n_min, int) else nx + d_min, id_min = dist.Kmin_argKmin(n_min, axis=axis) + if batched: + d_min, id_min = d_min[0], id_min[0] # first instance in permutation test contains the original data + rows, cols = torch.where(id_min.cpu() == torch.arange(nx)[:, None]) + if (d_min[rows, cols] == 0.).all(): + n_mean = nx * (nx - 1) + dist_sum = dist.sum(1).sum(1)[0] if batched else dist.sum(1).sum().unsqueeze(-1) + sigma = (.5 * dist_sum / n_mean) ** .5 + return sigma + + +class GaussianRBF(nn.Module): + def __init__( + self, + sigma: Optional[torch.Tensor] = None, + init_sigma_fn: Callable = None, + trainable: bool = False + ) -> None: + """ + Gaussian RBF kernel: k(x,y) = exp(-(1/(2*sigma^2)||x-y||^2). A forward pass takes + a batch of instances x and y and returns the kernel matrix. + x can be of shape [Nx, 1, features] or [batch_size, Nx, 1, features]. + y can be of shape [1, Ny, features] or [batch_size, 1, Ny, features]. + The returned kernel matrix can be of shape [Nx, Ny] or [batch_size, Nx, Ny]. + x, y and the returned kernel matrix are all lazy tensors. + + Parameters + ---------- + sigma + Bandwidth used for the kernel. Needn't be specified if being inferred or trained. + Can pass multiple values to eval kernel with and then average. + init_sigma_fn + Function used to compute the bandwidth `sigma`. Used when `sigma` is to be inferred. + The function's signature should match :py:func:`~alibi_detect.utils.keops.kernels.sigma_mean`, + meaning that it should take in the lazy tensors `x`, `y` and `dist` and return a tensor `sigma`. + trainable + Whether or not to track gradients w.r.t. `sigma` to allow it to be trained. + """ + super().__init__() + init_sigma_fn = sigma_mean if init_sigma_fn is None else init_sigma_fn + if sigma is None: + self.log_sigma = nn.Parameter(torch.empty(1), requires_grad=trainable) + self.init_required = True + else: + sigma = sigma.reshape(-1) # [Ns,] + self.log_sigma = nn.Parameter(sigma.log(), requires_grad=trainable) + self.init_required = False + self.init_sigma_fn = init_sigma_fn + self.trainable = trainable + + @property + def sigma(self) -> torch.Tensor: + return self.log_sigma.exp() + + def forward(self, x: LazyTensor, y: LazyTensor, infer_sigma: bool = False) -> LazyTensor: + + dist = ((x - y) ** 2).sum(-1) + + if infer_sigma or self.init_required: + if self.trainable and infer_sigma: + raise ValueError("Gradients cannot be computed w.r.t. an inferred sigma value") + sigma = self.init_sigma_fn(x, y, dist) + with torch.no_grad(): + self.log_sigma.copy_(sigma.log().clone()) + self.init_required = False + + gamma = 1. / (2. * self.sigma ** 2) + gamma = LazyTensor(gamma[None, None, :]) if len(dist.shape) == 2 else LazyTensor(gamma[None, None, None, :]) + kernel_mat = (- gamma * dist).exp() + if len(dist.shape) < len(gamma.shape): + kernel_mat = kernel_mat.sum(-1) / len(self.sigma) + return kernel_mat + + +class DeepKernel(nn.Module): + def __init__( + self, + proj: nn.Module, + kernel_a: nn.Module = GaussianRBF(trainable=True), + kernel_b: Optional[nn.Module] = GaussianRBF(trainable=True), + eps: Union[float, str] = 'trainable' + ) -> None: + """ + Computes similarities as k(x,y) = (1-eps)*k_a(proj(x), proj(y)) + eps*k_b(x,y). + A forward pass takes an already projected batch of instances x_proj and y_proj and optionally + (if k_b is present) a batch of instances x and y and returns the kernel matrix. + x_proj can be of shape [Nx, 1, features_proj] or [batch_size, Nx, 1, features_proj]. + y_proj can be of shape [1, Ny, features_proj] or [batch_size, 1, Ny, features_proj]. + x can be of shape [Nx, 1, features] or [batch_size, Nx, 1, features]. + y can be of shape [1, Ny, features] or [batch_size, 1, Ny, features]. + The returned kernel matrix can be of shape [Nx, Ny] or [batch_size, Nx, Ny]. + x, y and the returned kernel matrix are all lazy tensors. + + Parameters + ---------- + proj + The projection to be applied to the inputs before applying kernel_a + kernel_a + The kernel to apply to the projected inputs. Defaults to a Gaussian RBF with trainable bandwidth. + kernel_b + The kernel to apply to the raw inputs. Defaults to a Gaussian RBF with trainable bandwidth. + Set to None in order to use only the deep component (i.e. eps=0). + eps + The proportion (in [0,1]) of weight to assign to the kernel applied to raw inputs. This can be + either specified or set to 'trainable'. Only relavent if kernel_b is not None. + """ + super().__init__() + + self.kernel_a = kernel_a + self.kernel_b = kernel_b + self.proj = proj + if kernel_b is not None: + self._init_eps(eps) + + def _init_eps(self, eps: Union[float, str]) -> None: + if isinstance(eps, float): + if not 0 < eps < 1: + raise ValueError("eps should be in (0,1)") + self.logit_eps = nn.Parameter(torch.tensor(eps).logit(), requires_grad=False) + elif eps == 'trainable': + self.logit_eps = nn.Parameter(torch.tensor(0.)) + else: + raise NotImplementedError("eps should be 'trainable' or a float in (0,1)") + + @property + def eps(self) -> torch.Tensor: + return self.logit_eps.sigmoid() if self.kernel_b is not None else torch.tensor(0.) + + def forward(self, x_proj: LazyTensor, y_proj: LazyTensor, x: Optional[LazyTensor] = None, + y: Optional[LazyTensor] = None) -> LazyTensor: + similarity = self.kernel_a(x_proj, y_proj) + if self.kernel_b is not None: + similarity = (1-self.eps)*similarity + self.eps*self.kernel_b(x, y) + return similarity diff --git a/alibi_detect/utils/keops/tests/test_kernels_keops.py b/alibi_detect/utils/keops/tests/test_kernels_keops.py new file mode 100644 index 000000000..b25554818 --- /dev/null +++ b/alibi_detect/utils/keops/tests/test_kernels_keops.py @@ -0,0 +1,121 @@ +from itertools import product +import numpy as np +from alibi_detect.utils.frameworks import has_keops +import pytest +import torch +import torch.nn as nn +if has_keops: + from pykeops.torch import LazyTensor + from alibi_detect.utils.keops import DeepKernel, GaussianRBF + +sigma = [None, np.array([1.]), np.array([1., 2.])] +n_features = [5, 10] +n_instances = [(100, 100), (100, 75)] +batch_size = [None, 5] +trainable = [True, False] +tests_gk = list(product(sigma, n_features, n_instances, batch_size, trainable)) +n_tests_gk = len(tests_gk) + + +@pytest.fixture +def gaussian_kernel_params(request): + return tests_gk[request.param] + + +@pytest.mark.skipif(not has_keops, reason='Skipping since pykeops is not installed.') +@pytest.mark.parametrize('gaussian_kernel_params', list(range(n_tests_gk)), indirect=True) +def test_gaussian_kernel(gaussian_kernel_params): + sigma, n_features, n_instances, batch_size, trainable = gaussian_kernel_params + + xshape, yshape = (n_instances[0], n_features), (n_instances[1], n_features) + if batch_size: + xshape = (batch_size, ) + xshape + yshape = (batch_size, ) + yshape + sigma = sigma if sigma is None else torch.from_numpy(sigma).float() + x = torch.from_numpy(np.random.random(xshape)).float() + y = torch.from_numpy(np.random.random(yshape)).float() + if batch_size: + x_lazy, y_lazy = LazyTensor(x[:, :, None, :]), LazyTensor(y[:, None, :, :]) + x_lazy2 = LazyTensor(x[:, None, :, :]) + else: + x_lazy, y_lazy = LazyTensor(x[:, None, :]), LazyTensor(y[None, :, :]) + x_lazy2 = LazyTensor(x[None, :, :]) + + kernel = GaussianRBF(sigma=sigma, trainable=trainable) + infer_sigma = True if sigma is None else False + if trainable and infer_sigma: + with pytest.raises(ValueError): + kernel(x_lazy, y_lazy, infer_sigma=infer_sigma) + else: + k_xy = kernel(x_lazy, y_lazy, infer_sigma=infer_sigma) + k_xx = kernel(x_lazy, x_lazy2, infer_sigma=infer_sigma) + k_xy_shape = n_instances + k_xx_shape = (n_instances[0], n_instances[0]) + axis = 1 + if batch_size: + k_xy_shape = (batch_size, ) + k_xy_shape + k_xx_shape = (batch_size, ) + k_xx_shape + axis = 2 + assert k_xy.shape == k_xy_shape and k_xx.shape == k_xx_shape + k_xx_argmax = k_xx.argmax(axis=axis) + k_xx_min, k_xy_min = k_xx.min(axis=axis), k_xy.min(axis=axis) + if batch_size: + k_xx_argmax, k_xx_min, k_xy_min = k_xx_argmax[0], k_xx_min[0], k_xy_min[0] + assert (torch.arange(n_instances[0]) == k_xx_argmax.cpu().view(-1)).all() + assert (k_xx_min >= 0.).all() and (k_xy_min >= 0.).all() + + +if has_keops: + class MyKernel(nn.Module): + def __init__(self): + super().__init__() + + def forward(self, x: LazyTensor, y: LazyTensor) -> LazyTensor: + return (- ((x - y) ** 2).sum(-1)).exp() + + +n_features = [5] +n_instances = [(100, 100), (100, 75)] +kernel_a = ['GaussianRBF', 'MyKernel'] +kernel_b = ['GaussianRBF', 'MyKernel', None] +eps = [0.5, 'trainable'] +tests_dk = list(product(n_features, n_instances, kernel_a, kernel_b, eps)) +n_tests_dk = len(tests_dk) + + +@pytest.fixture +def deep_kernel_params(request): + return tests_dk[request.param] + + +@pytest.mark.skipif(not has_keops, reason='Skipping since pykeops is not installed.') +@pytest.mark.parametrize('deep_kernel_params', list(range(n_tests_dk)), indirect=True) +def test_deep_kernel(deep_kernel_params): + n_features, n_instances, kernel_a, kernel_b, eps = deep_kernel_params + + proj = nn.Linear(n_features, n_features) + kernel_a = MyKernel() if kernel_a == 'MyKernel' else GaussianRBF(trainable=True) + if kernel_b == 'MyKernel': + kernel_b = MyKernel() + elif kernel_b == 'GaussianRBF': + kernel_b = GaussianRBF(trainable=True) + kernel = DeepKernel(proj, kernel_a=kernel_a, kernel_b=kernel_b, eps=eps) + + xshape, yshape = (n_instances[0], n_features), (n_instances[1], n_features) + x = torch.as_tensor(np.random.random(xshape).astype('float32')) + y = torch.as_tensor(np.random.random(yshape).astype('float32')) + x_proj, y_proj = kernel.proj(x), kernel.proj(y) + x2_proj, x_proj = LazyTensor(x_proj[None, :, :]), LazyTensor(x_proj[:, None, :]) + y2_proj, y_proj = LazyTensor(y_proj[None, :, :]), LazyTensor(y_proj[:, None, :]) + if kernel_b: + x2, x = LazyTensor(x[None, :, :]), LazyTensor(x[:, None, :]) + y2, y = LazyTensor(y[None, :, :]), LazyTensor(y[:, None, :]) + else: + x, x2, y, y2 = None, None, None, None + + k_xy = kernel(x_proj, y2_proj, x, y2) + k_yx = kernel(y_proj, x2_proj, y, x2) + k_xx = kernel(x_proj, x2_proj, x, x2) + assert k_xy.shape == n_instances and k_xx.shape == (xshape[0], xshape[0]) + assert (k_xx.Kmin_argKmin(1, axis=1)[0] > 0.).all() + assert (torch.abs(k_xy.sum(1).sum(1) - k_yx.t().sum(1).sum(1)) < 1e-5).all() diff --git a/alibi_detect/utils/missing_optional_dependency.py b/alibi_detect/utils/missing_optional_dependency.py index af52409cc..832923a24 100644 --- a/alibi_detect/utils/missing_optional_dependency.py +++ b/alibi_detect/utils/missing_optional_dependency.py @@ -20,21 +20,22 @@ """Mapping used to ensure correct pip install message is generated if a missing optional dependency is detected. This dict is used to control two behaviours: - 1. When we import objects from missing dependencies we check that any `ModuleNotFoundError` or `ImportError` + +1. When we import objects from missing dependencies we check that any `ModuleNotFoundError` or `ImportError` corresponds to a missing optional dependency by checking the name of the missing dependency is in `ERROR_TYPES`. We then map this name to the corresponding optional dependency bucket that will resolve the issue. - 2. Some optional dependencies have multiple names such as `torch` and `pytorch`, instead of enforcing a single +2. Some optional dependencies have multiple names such as `torch` and `pytorch`, instead of enforcing a single naming convention across the whole code base we instead use `ERROR_TYPES` to capture both cases. This is done right before the pip install message is issued as this is the most robust place to capture these differences. """ ERROR_TYPES = { - "fbprophet": 'prophet', - "holidays": 'prophet', - "pystan": 'prophet', + "prophet": 'prophet', "tensorflow_probability": 'tensorflow', "tensorflow": 'tensorflow', "torch": 'torch', - "pytorch": 'torch' + "pytorch": 'torch', + "keops": 'keops', + "pykeops": 'keops', } @@ -105,7 +106,7 @@ def import_optional(module_name: str, names: Optional[List[str]] = None) -> Any: return module except (ImportError, ModuleNotFoundError) as err: if err.name is None: - raise TypeError() + raise err dep_name, *_ = err.name.split('.') if str(dep_name) not in ERROR_TYPES: raise err diff --git a/alibi_detect/version.py b/alibi_detect/version.py index 87e12cf5c..714b54047 100644 --- a/alibi_detect/version.py +++ b/alibi_detect/version.py @@ -2,10 +2,5 @@ # 1) we don't load dependencies by storing it in __init__.py # 2) we can import it in setup.py for the same reason # 3) we can import it into your module module -__version__ = "0.10.4" -# Define the config specification version. This is distinct to the library version above. It is only updated when -# any detector config schema is updated, such that loading a previous config spec cannot be guaranteed to work. -# The minor version number is associated with minor changes such as adding/removing/changing kwarg's, whilst the major -# number is reserved for significant changes to the config layout. -__config_spec__ = "0.1" +__version__ = "0.11.0dev" diff --git a/codecov.yml b/codecov.yml index ca612fcc6..12d264eac 100644 --- a/codecov.yml +++ b/codecov.yml @@ -23,3 +23,7 @@ comment: layout: "reach,diff,flags,tree" behavior: default require_changes: no + +ignore: + - "**/tests/*" # ignore anything in tests/ directories + - "**/test_*.py" # ignore test_*.py files even if they are located elsewhere diff --git a/doc/source/cd/methods/learnedkerneldrift.ipynb b/doc/source/cd/methods/learnedkerneldrift.ipynb index 0f4528c6d..3ae9790d8 100644 --- a/doc/source/cd/methods/learnedkerneldrift.ipynb +++ b/doc/source/cd/methods/learnedkerneldrift.ipynb @@ -34,12 +34,11 @@ "\n", "* `x_ref`: Data used as reference distribution.\n", "\n", - "* `kernel`: A differentiable **TensorFlow** or **PyTorch** module that takes two instances as input and returns a scalar notion of similarity as output.\n", - "\n", + "* `kernel`: A differentiable **TensorFlow** or **PyTorch** module that takes two sets of instances as inputs and returns a kernel similarity matrix as output.\n", "\n", "Keyword arguments:\n", "\n", - "* `backend`: Specify the backend (*tensorflow* or *pytorch*). This depends on the framework of the `kernel`. Defaults to *tensorflow*.\n", + "* `backend`: **TensorFlow**, **PyTorch** and [**KeOps**](https://github.com/getkeops/keops) implementations of the learned kernel detector are available. The backend can be specified as *tensorflow*, *pytorch* or *keops*. Defaults to *tensorflow*.\n", "\n", "* `p_val`: p-value threshold used for the significance of the test.\n", "\n", @@ -55,11 +54,11 @@ "\n", "* `var_reg`: Constant added to the estimated variance of the MMD for stability.\n", "\n", - "* `reg_loss_fn`: The regularisation term reg_loss_fn(kernel) is added to the loss function being optimized.\n", + "* `reg_loss_fn`: The regularisation term *reg_loss_fn(kernel)* is added to the loss function being optimized.\n", "\n", "* `train_size`: Optional fraction (float between 0 and 1) of the dataset used to train the classifier. The drift is detected on *1 - train_size*.\n", "\n", - "* `retrain_from_scratch`: Whether the kernel should be retrained from scratch for each set of test data or whether it should instead continue training from where it left off on the previous set.\n", + "* `retrain_from_scratch`: Whether the kernel should be retrained from scratch for each set of test data or whether it should instead continue training from where it left off on the previous set. Defaults to *True*.\n", "\n", "* `optimizer`: Optimizer used during training of the kernel. From `torch.optim` for PyTorch and `tf.keras.optimizers` for TensorFlow.\n", "\n", @@ -75,7 +74,7 @@ "\n", "* `train_kwargs`: Optional additional kwargs for the built-in TensorFlow (`from alibi_detect.models.tensorflow import trainer`) or PyTorch (`from alibi_detect.models.pytorch import trainer`) trainer functions.\n", "\n", - "* `dataset`: Dataset object used during training of the kernel. Defaults to `alibi_detect.utils.pytorch.TorchDataset` (an instance of `torch.utils.data.Dataset`) for the PyTorch backend and `alibi_detect.utils.tensorflow.TFDataset` (an instance of `tf.keras.utils.Sequence`) for the TensorFlow backend. For PyTorch, the dataset should only take the windows x_ref and x_test as input, so when e.g. *TorchDataset* is passed to the detector at initialisation, during training *TorchDataset(x_ref, x_test)* is used. For TensorFlow, the dataset is an instance of `tf.keras.utils.Sequence`, so when e.g. *TFDataset* is passed to the detector at initialisation, during training *TFDataset(x_ref, x_test, batch_size=batch_size, shuffle=True)* is used. x_ref and x_test can be of type np.ndarray or List[Any].\n", + "* `dataset`: Dataset object used during training of the kernel. Defaults to `alibi_detect.utils.pytorch.TorchDataset` (an instance of `torch.utils.data.Dataset`) for the PyTorch and KeOps backends and `alibi_detect.utils.tensorflow.TFDataset` (an instance of `tf.keras.utils.Sequence`) for the TensorFlow backend. For PyTorch or KeOps, the dataset should only take the windows x_ref and x_test as input, so when e.g. *TorchDataset* is passed to the detector at initialisation, during training *TorchDataset(x_ref, x_test)* is used. For TensorFlow, the dataset is an instance of `tf.keras.utils.Sequence`, so when e.g. *TFDataset* is passed to the detector at initialisation, during training *TFDataset(x_ref, x_test, batch_size=batch_size, shuffle=True)* is used. x_ref and x_test can be of type np.ndarray or List[Any].\n", "\n", "* `input_shape`: Shape of input data.\n", "\n", @@ -88,9 +87,15 @@ "\n", "* `dataloader`: Dataloader object used during training of the kernel. Defaults to `torch.utils.data.DataLoader`. The dataloader is not initialized yet, this is done during init off the detector using the `batch_size`. Custom dataloaders can be passed as well, e.g. for graph data we can use `torch_geometric.data.DataLoader`.\n", "\n", + "Additional KeOps keyword arguments:\n", + "\n", + "* `batch_size_permutations`: KeOps computes the `n_permutations` of the MMD^2 statistics in chunks of `batch_size_permutations`. Defaults to 1,000,000.\n", + "\n", + "* `batch_size_predict`: Batch size used for the trained drift detector predictions. Defaults to 1,000,000.\n", + "\n", "### Defining the kernel\n", "\n", - "Any differentiable *Pytorch* or *TensorFlow* module that takes as input two instances and outputs a scalar (representing similarity) can be used as the kernel for this drift detector. However, in order to ensure that MMD=0 implies no-drift the kernel should satify a *characteristic* property. This can be guarenteed by defining a kernel as $$k(x,y)=(1-\\epsilon)*k_a(\\Phi(x), \\Phi(y)) + \\epsilon*k_b(x,y),$$ where $\\Phi$ is a learnable projection, $k_a$ and $k_b$ are simple characteristic kernels (such as a [Gaussian RBF](https://en.wikipedia.org/wiki/Radial_basis_function_kernel)), and $\\epsilon>0$ is a small constant. By letting $\\Phi$ be very flexible we can learn powerful kernels in this manner.\n", + "Any differentiable *Pytorch* or *TensorFlow* module that takes as input two instances and outputs a scalar (representing similarity) can be used as the kernel for this drift detector. However, in order to ensure that MMD=0 implies no-drift the kernel should satify a *characteristic* property. This can be guaranteed by defining a kernel as $$k(x,y)=(1-\\epsilon)*k_a(\\Phi(x), \\Phi(y)) + \\epsilon*k_b(x,y),$$ where $\\Phi$ is a learnable projection, $k_a$ and $k_b$ are simple characteristic kernels (such as a [Gaussian RBF](https://en.wikipedia.org/wiki/Radial_basis_function_kernel)), and $\\epsilon>0$ is a small constant. By letting $\\Phi$ be very flexible we can learn powerful kernels in this manner.\n", "\n", "This is easily implemented using the `DeepKernel` class provided in `alibi_detect`. We demonstrate below how we might define a convolutional kernel for images using *Pytorch*. By default `GaussianRBF` kernels are used for $k_a$ and $k_b$ and here we specify $\\epsilon=0.01$, but we could alternatively set `eps='trainable'`.\n", "\n", @@ -113,6 +118,22 @@ "kernel = DeepKernel(proj, eps=0.01)\n", "```\n", "\n", + "It is important to note that, if `retrain_from_scratch=True` and we have not initialised the kernel bandwidth `sigma` for the default `GaussianRBF` kernel $k_a$ and optionally also for $k_b$, we will initialise `sigma` using a median (*PyTorch* and *TensorFlow*) or mean (*KeOps*) bandwidth heuristic for every detector prediction. For KeOps detectors specifically, this could form a computational bottleneck and should be avoided by already specifying a bandwidth in advance. To do this, we can leverage the library's built-in heuristics:\n", + "\n", + "```python\n", + "from alibi_detect.utils.pytorch.kernels import sigma_median, GaussianRBF\n", + "\n", + "# example usage\n", + "x, y = torch.randn(*shape), torch.randn(*shape)\n", + "dist = ((x[:, None, :] - y[None, :, :]) ** 2).sum(-1) # distance used for the GaussianRBF kernel\n", + "sigma = sigma_median(x, y, dist)\n", + "kernel_b = GaussianRBF(sigma=sigma, trainable=True)\n", + "\n", + "# equivalent TensorFlow and KeOps functions\n", + "from alibi_detect.utils.tensorflow.kernels import sigma_median\n", + "from alibi_detect.utils.keops.kernels import sigma_mean\n", + "```\n", + "\n", "### Instantiating the detector\n", "\n", "Instantiating the detector is then as simple as passing the reference data and the kernel as follows:\n", @@ -123,8 +144,16 @@ "cd = LearnedKernelDrift(x_ref, kernel, backend='pytorch', p_val=.05, epochs=10, batch_size=32)\n", "```\n", "\n", + "We could have alternatively defined the kernel and instantiated the detector using *KeOps*:\n", + "\n", + "```python\n", + "from alibi_detect.utils.keops import DeepKernel\n", + "\n", + "kernel = DeepKernel(proj, eps=0.01)\n", + "cd = LearnedKernelDrift(x_ref, kernel, backend='keops', p_val=.05, epochs=10, batch_size=32)\n", + "```\n", "\n", - "We could have alternatively defined the kernel and instantiated the detector using *TensorFlow*:\n", + "Or by using *TensorFlow* as the backend:\n", "\n", "```python\n", "import tensorflow as tf\n", @@ -190,7 +219,11 @@ "\n", "### Image\n", "\n", - "[Drift detection on CIFAR10](../../examples/cd_clf_cifar10.ipynb)" + "[Drift detection on CIFAR10](../../examples/cd_clf_cifar10.ipynb)\n", + "\n", + "### Tabular\n", + "\n", + "[Scaling up drift detection with KeOps](../../examples/cd_mmd_keops.ipynb)" ] } ], diff --git a/doc/source/cd/methods/mmddrift.ipynb b/doc/source/cd/methods/mmddrift.ipynb index 3dc2e1d99..2f68cb664 100644 --- a/doc/source/cd/methods/mmddrift.ipynb +++ b/doc/source/cd/methods/mmddrift.ipynb @@ -2,14 +2,22 @@ "cells": [ { "cell_type": "markdown", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "[source](../../api/alibi_detect.cd.mmd.rst)" ] }, { "cell_type": "markdown", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "# Maximum Mean Discrepancy\n", "\n", @@ -30,7 +38,11 @@ }, { "cell_type": "markdown", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "## Usage\n", "\n", @@ -44,7 +56,7 @@ "\n", "Keyword arguments:\n", "\n", - "* `backend`: Both **TensorFlow** and **PyTorch** implementations of the MMD detector as well as various preprocessing steps are available. Specify the backend (*tensorflow* or *pytorch*). Defaults to *tensorflow*.\n", + "* `backend`: **TensorFlow**, **PyTorch** and [**KeOps**](https://github.com/getkeops/keops) implementations of the MMD detector are available. Specify the backend (*tensorflow*, *pytorch* or *keops*). Defaults to *tensorflow*.\n", "\n", "* `p_val`: p-value used for significance of the permutation test.\n", "\n", @@ -56,11 +68,11 @@ "\n", "* `preprocess_fn`: Function to preprocess the data before computing the data drift metrics. Typically a dimensionality reduction technique.\n", "\n", - "* `kernel`: Kernel used when computing the MMD. Defaults to a Gaussian RBF kernel (`from alibi_detect.utils.pytorch import GaussianRBF` or `from alibi_detect.utils.tensorflow import GaussianRBF` dependent on the backend used).\n", + "* `kernel`: Kernel used when computing the MMD. Defaults to a Gaussian RBF kernel (`from alibi_detect.utils.pytorch import GaussianRBF`, `from alibi_detect.utils.tensorflow import GaussianRBF` or `from alibi_detect.utils.keops import GaussianRBF` dependent on the backend used). Note that for the KeOps backend, the diagonal entries of the kernel matrices `kernel(x_ref, x_ref)` and `kernel(x_test, x_test)` should be equal to 1. This is compliant with the default Gaussian RBF kernel.\n", "\n", "* `sigma`: Optional bandwidth for the kernel as a `np.ndarray`. We can also average over a number of different bandwidths, e.g. `np.array([.5, 1., 1.5])`.\n", "\n", - "* `configure_kernel_from_x_ref`: If `sigma` is not specified, the detector can infer it via a heuristic and set `sigma` to the median pairwise distance between 2 samples. If `configure_kernel_from_x_ref` is *True*, we can already set `sigma` at initialization of the detector by inferring it from `x_ref`, speeding up the prediction step. If set to *False*, `sigma` is computed separately for each test batch at prediction time.\n", + "* `configure_kernel_from_x_ref`: If `sigma` is not specified, the detector can infer it via a heuristic and set `sigma` to the median (*TensorFlow* and *PyTorch*) or the mean pairwise distance between 2 samples (*KeOps*) by default. If `configure_kernel_from_x_ref` is *True*, we can already set `sigma` at initialization of the detector by inferring it from `x_ref`, speeding up the prediction step. If set to *False*, `sigma` is computed separately for each test batch at prediction time.\n", "\n", "* `n_permutations`: Number of permutations used in the permutation test.\n", "\n", @@ -73,23 +85,22 @@ "\n", "* `device`: *cuda* or *gpu* to use the GPU and *cpu* for the CPU. If the device is not specified, the detector will try to leverage the GPU if possible and otherwise fall back on CPU.\n", "\n", + "Additional KeOps keyword arguments:\n", "\n", - "Initialized drift detector example:\n", + "* `batch_size_permutations`: KeOps computes the `n_permutations` of the MMD^2 statistics in chunks of `batch_size_permutations`. Defaults to 1,000,000.\n", + "\n", + "Initialized drift detector examples for each of the available backends:\n", "\n", "\n", "```python\n", "from alibi_detect.cd import MMDDrift\n", "\n", - "cd = MMDDrift(x_ref, backend='tensorflow', p_val=.05)\n", - "```\n", - "\n", - "The same detector in PyTorch:\n", - "\n", - "```python\n", - "cd = MMDDrift(x_ref, backend='pytorch', p_val=.05)\n", + "cd_tf = MMDDrift(x_ref, backend='tensorflow', p_val=.05)\n", + "cd_torch = MMDDrift(x_ref, backend='pytorch', p_val=.05)\n", + "cd_keops = MMDDrift(x_ref, backend='keops', p_val=.05)\n", "```\n", "\n", - "We can also easily add preprocessing functions for both frameworks. The following example uses a randomly initialized image encoder in PyTorch:\n", + "We can also easily add preprocessing functions for the *TensorFlow* and *PyTorch* frameworks. Note that we can also combine for instance a PyTorch preprocessing step with a KeOps detector. The following example uses a randomly initialized image encoder in PyTorch:\n", "\n", "```python\n", "from functools import partial\n", @@ -158,7 +169,11 @@ }, { "cell_type": "markdown", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "### Detect Drift\n", "\n", @@ -184,7 +199,11 @@ }, { "cell_type": "markdown", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "## Examples\n", "\n", @@ -196,6 +215,10 @@ "\n", "[Drift detection on CIFAR10](../../examples/cd_mmd_cifar10.ipynb)\n", "\n", + "### Tabular\n", + "\n", + "[Scaling up drift detection with KeOps](../../examples/cd_mmd_keops.ipynb)\n", + "\n", "### Text\n", "\n", "[Text drift detection on IMDB movie reviews](../../examples/cd_text_imdb.ipynb)" @@ -207,7 +230,7 @@ "hash": "ffba93b5284319fb7a107c8eacae647f441487dcc7e0323a4c0d3feb66ea8c5e" }, "kernelspec": { - "display_name": "Python 3 (ipykernel)", + "display_name": "Python 3", "language": "python", "name": "python3" }, @@ -221,9 +244,9 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.8.11" + "version": "3.7.6" } }, "nbformat": 4, "nbformat_minor": 4 -} +} \ No newline at end of file diff --git a/doc/source/cd/methods/onlinecvmdrift.ipynb b/doc/source/cd/methods/onlinecvmdrift.ipynb index a5bfd80e8..71e670087 100644 --- a/doc/source/cd/methods/onlinecvmdrift.ipynb +++ b/doc/source/cd/methods/onlinecvmdrift.ipynb @@ -17,6 +17,18 @@ "\n", "The online [Cramér-von Mises](https://en.wikipedia.org/wiki/Cram%C3%A9r%E2%80%93von_Mises_criterion) detector is a non-parametric method for online drift detection on continuous data. Like the [offline Cramér-von Mises](cvmdrift.ipynb) detector, it applies a univariate Cramér-von Mises (CVM) test to each feature. This detector is an adaptation of that proposed in [this paper](https://www.tandfonline.com/doi/abs/10.1080/00224065.2012.11917887) by Ross et al. .\n", "\n", + "
\n", + "\n", + "**Warning**\n", + " \n", + "This detector is multi-threaded, with Numba used to parallelise over the simulated streams. There is a [known issue](https://github.com/SeldonIO/alibi-detect/issues/648) on MacOS, where Numba's default OpenMP [threading layer](https://numba.readthedocs.io/en/stable/user/threading-layer.html?highlight=thread_id#the-threading-layers) causes segfaults. A workaround is to use the slightly less performant `workqueue` threading layer on MacOS by setting the `NUMBA_THREADING_LAYER` enviroment variable or running:\n", + " \n", + "```python\n", + "from numba import config\n", + "config.THREADING_LAYER = 'workqueue'\n", + "```\n", + "
\n", + "\n", "### Threshold configuration\n", "Online detectors assume the reference data is large and fixed and operate on single data points at a time (rather than batches). These data points are passed into the test-windows, and a two-sample test-statistic between the reference data and test-window is computed at each time-step. When the test-statistic exceeds a preconfigured threshold, drift is detected. Configuration of the thresholds requires specification of the expected run-time (ERT) which specifies how many time-steps that the detector, on average, should run for in the absence of drift before making a false detection. Thresholds are then configured to target this ERT by simulating `n_bootstraps` number of streams of length `t_max = 2*max(window_sizes) - 1`. Conveniently, the non-parametric nature of the detector means that thresholds depend only on $M$, the length of the reference data set. Therefore, for multivariate data, configuration is only as costly as the univariate case.\n", "\n", diff --git a/doc/source/conf.py b/doc/source/conf.py index 98630dfad..cd4bf6319 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -110,7 +110,8 @@ "numba", "pydantic", "toml", - "catalogue" + "catalogue", + "pykeops" ] # Napoleon settings diff --git a/doc/source/examples/cd_clf_adult.ipynb b/doc/source/examples/cd_clf_adult.ipynb index 265b7c84f..7aedc4976 100644 --- a/doc/source/examples/cd_clf_adult.ipynb +++ b/doc/source/examples/cd_clf_adult.ipynb @@ -206,10 +206,10 @@ "# define numerical standard scaler.\n", "num_transf = StandardScaler()\n", "\n", - "# define categorical one-hot encoder with first columns dropped.\n", + "# define categorical one-hot encoder.\n", "cat_transf = OneHotEncoder(\n", " categories=[range(len(x)) for x in adult.category_map.values()],\n", - " drop=\"first\",\n", + " handle_unknown=\"ignore\"\n", ")\n", "\n", "# Define column transformer\n", @@ -286,7 +286,7 @@ "text": [ "H0\n", "Drift? No!\n", - "p-value: 0.665\n", + "p-value: 0.681\n", "\n", "H1\n", "Drift? Yes!\n", @@ -357,6 +357,7 @@ "# define model\n", "model = GradientBoostingClassifier()\n", "\n", + "\n", "# define drift detector\n", "detector = ClassifierDrift(\n", " x_ref=x_ref,\n", @@ -401,7 +402,7 @@ "text": [ "H0\n", "Drift? No!\n", - "p-value: 0.359\n", + "p-value: 0.457\n", "\n", "H1\n", "Drift? Yes!\n", @@ -483,14 +484,14 @@ "text": [ "H0\n", "Drift? No!\n", - "p-value: 0.905\n", + "p-value: 0.670\n", "\n", "H1\n", "Drift? Yes!\n", "p-value: 0.000\n", "\n", - "CPU times: user 8.68 s, sys: 42.2 ms, total: 8.72 s\n", - "Wall time: 8.72 s\n" + "CPU times: user 5.13 s, sys: 4.92 ms, total: 5.14 s\n", + "Wall time: 5.13 s\n" ] } ], @@ -531,14 +532,14 @@ "text": [ "H0\n", "Drift? No!\n", - "p-value: 0.952\n", + "p-value: 0.905\n", "\n", "H1\n", "Drift? Yes!\n", "p-value: 0.000\n", "\n", - "CPU times: user 2.02 s, sys: 12.5 ms, total: 2.03 s\n", - "Wall time: 2.03 s\n" + "CPU times: user 1.39 s, sys: 18.3 ms, total: 1.41 s\n", + "Wall time: 1.41 s\n" ] } ], diff --git a/doc/source/examples/cd_mmd_keops.ipynb b/doc/source/examples/cd_mmd_keops.ipynb new file mode 100644 index 000000000..3b5ea1f57 --- /dev/null +++ b/doc/source/examples/cd_mmd_keops.ipynb @@ -0,0 +1,638 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Scaling up drift detection with KeOps\n", + "\n", + "## Introduction\n", + "\n", + "A number of convenient and powerful kernel-based drift detectors such as the [MMD detector](https://docs.seldon.io/projects/alibi-detect/en/stable/cd/methods/mmddrift.html) ([Gretton et al., 2012](https://jmlr.csail.mit.edu/papers/v13/gretton12a.html)) or the [learned kernel MMD detector](https://docs.seldon.io/projects/alibi-detect/en/stable/cd/methods/learnedkerneldrift.html) ([Liu et al., 2020](https://arxiv.org/abs/2002.09116)) do not scale favourably with increasing dataset size $n$, leading to quadratic complexity $\\mathcal{O}(n^2)$ for naive implementations. As a result, we can quickly run into memory issues by having to store the $[N_\\text{ref} + N_\\text{test}, N_\\text{ref} + N_\\text{test}]$ kernel matrix (on the GPU if applicable) used for an efficient implementation of the permutation test. Note that $N_\\text{ref}$ is the reference data size and $N_\\text{test}$ the test data size.\n", + "\n", + "We can however drastically speed up and scale up kernel-based drift detectors to large dataset sizes by working with symbolic kernel matrices instead and leverage the [KeOps](https://www.kernel-operations.io/keops/index.html) library to do so. For the user of $\\texttt{Alibi Detect}$ the only thing that changes is the specification of the detector's backend, e.g. for the MMD detector:\n", + "\n", + "\n", + "```python\n", + "from alibi_detect.cd import MMDDrift\n", + "\n", + "detector_torch = MMDDrift(x_ref, backend='pytorch')\n", + "detector_keops = MMDDrift(x_ref, backend='keops')\n", + "```\n", + "\n", + "In this notebook we will run a few simple benchmarks to illustrate the speed and memory improvements from using KeOps over vanilla PyTorch on the GPU (1x RTX 2080 Ti) for both the standard MMD and learned kernel MMD detectors.\n", + "\n", + "## Data\n", + "\n", + "We randomly sample points from the standard normal distribution and run the detectors with PyTorch and KeOps backends for the following settings:\n", + "\n", + "- $N_\\text{ref}, N_\\text{test} = [2, 5, 10, 20, 50, 100]$ (batch sizes in '000s)\n", + "- $D = [2, 10, 50]$\n", + "\n", + "Where $D$ denotes the number of features.\n", + "\n", + "## Requirements\n", + "\n", + "The notebook requires [PyTorch](https://pytorch.org/) and KeOps to be installed. Once PyTorch is installed, KeOps can be installed via pip:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!pip install pykeops" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Before we start let’s fix the random seeds for reproducibility:" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "import numpy as np\n", + "import torch\n", + "\n", + "def set_seed(seed: int) -> None:\n", + " torch.manual_seed(seed)\n", + " torch.cuda.manual_seed(seed)\n", + " np.random.seed(seed)\n", + "\n", + "set_seed(2022)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "## Vanilla PyTorch vs. KeOps comparison\n", + "\n", + "### Utility functions\n", + "\n", + "First we define some utility functions to run the experiments:" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "from alibi_detect.cd import MMDDrift, LearnedKernelDrift\n", + "from alibi_detect.utils.keops.kernels import DeepKernel as DeepKernelKeops\n", + "from alibi_detect.utils.keops.kernels import GaussianRBF as GaussianRBFKeops\n", + "from alibi_detect.utils.pytorch.kernels import DeepKernel as DeepKernelTorch\n", + "from alibi_detect.utils.pytorch.kernels import GaussianRBF as GaussianRBFTorch\n", + "import matplotlib.pyplot as plt\n", + "from scipy.stats import kstest\n", + "from timeit import default_timer as timer\n", + "import torch.nn as nn\n", + "import torch.nn.functional as F\n", + "\n", + "\n", + "class Projection(nn.Module):\n", + " def __init__(self, d_in: int, d_out: int = 2):\n", + " super().__init__()\n", + " self.lin1 = nn.Linear(d_in, d_out)\n", + " self.lin2 = nn.Linear(d_out, d_out)\n", + " \n", + " def forward(self, x):\n", + " return self.lin2(F.relu(self.lin1(x)))\n", + " \n", + "\n", + "def eval_detector(p_vals: np.ndarray, threshold: float, is_drift: bool, t_mean: float, t_std: float) -> dict:\n", + " \"\"\" In case of drifted data (ground truth) it returns the detector's power.\n", + " In case of no drift, it computes the false positive rate (FPR) and whether the p-values\n", + " are uniformly distributed U[0,1] which is checked via a KS test. \"\"\"\n", + " results = {'power': None, 'fpr': None, 'ks': None}\n", + " below_p_val_threshold = (p_vals <= threshold).mean()\n", + " if is_drift:\n", + " results['power'] = below_p_val_threshold\n", + " else:\n", + " results['fpr'] = below_p_val_threshold\n", + " stat_ks, p_val_ks = kstest(p_vals, 'uniform')\n", + " results['ks'] = {'p_val': p_val_ks, 'stat': stat_ks}\n", + " results['p_vals'] = p_vals\n", + " results['time'] = {'mean': t_mean, 'stdev': t_std}\n", + " return results\n", + "\n", + "\n", + "def experiment(detector: str, backend: str, n_runs: int, n_ref: int, n_test: int, n_features: int, \n", + " mu: float = 0.) -> dict:\n", + " \"\"\" Runs the experiment n_runs times, each time with newly sampled reference and test data.\n", + " Returns the p-values for each test as well as the mean and standard deviations of the runtimes. \"\"\"\n", + " p_vals, t_detect = [], []\n", + " for _ in range(n_runs):\n", + " # Sample reference and test data\n", + " x_ref = np.random.randn(*(n_ref, n_features)).astype(np.float32)\n", + " x_test = np.random.randn(*(n_test, n_features)).astype(np.float32) + mu\n", + " \n", + " # Initialise detector, make and log predictions\n", + " p_val = .05\n", + " dd_kwargs = dict(p_val=p_val, backend=backend, n_permutations=100)\n", + " if detector == 'mmd':\n", + " dd = MMDDrift(x_ref, **dd_kwargs)\n", + " elif detector == 'learned_kernel':\n", + " d_out, sigma = 2, .1\n", + " proj = Projection(n_features, d_out)\n", + " Kernel = GaussianRBFKeops if backend == 'keops' else GaussianRBFTorch\n", + " kernel_a = Kernel(trainable=True, sigma = torch.Tensor([sigma]))\n", + " kernel_b = Kernel(trainable=True, sigma = torch.Tensor([sigma]))\n", + " device = torch.device('cuda')\n", + " DeepKernel = DeepKernelKeops if backend == 'keops' else DeepKernelTorch\n", + " deep_kernel = DeepKernel(proj, kernel_a, kernel_b, eps=.01).to(device)\n", + " if backend == 'pytorch' and n_ref + n_test > 20000:\n", + " batch_size = 10000\n", + " else:\n", + " batch_size = 1000000\n", + " dd_kwargs.update(\n", + " dict(\n", + " epochs=2, train_size=.75, batch_size=batch_size, batch_size_predict=1000000\n", + " )\n", + " )\n", + " dd = LearnedKernelDrift(x_ref, deep_kernel, **dd_kwargs)\n", + " start = timer()\n", + " pred = dd.predict(x_test)\n", + " end = timer()\n", + " \n", + " if _ > 0: # first run reserved for KeOps compilation\n", + " t_detect.append(end - start)\n", + " p_vals.append(pred['data']['p_val'])\n", + " \n", + " del dd, x_ref, x_test\n", + " torch.cuda.empty_cache()\n", + " \n", + " p_vals = np.array(p_vals)\n", + " t_mean, t_std = np.array(t_detect).mean(), np.array(t_detect).std()\n", + " results = eval_detector(p_vals, p_val, mu != 0., t_mean, t_std)\n", + " return results\n", + "\n", + "\n", + "def format_results(experiments: dict, n_features: list, backends: list, max_batch_size: int = 1e10) -> dict:\n", + " T = {'batch_size': None, 'keops': None, 'pytorch': None}\n", + " T['batch_size'] = np.unique([experiments['keops'][_]['n_ref'] for _ in experiments['keops'].keys()])\n", + " T['batch_size'] = list(T['batch_size'][T['batch_size'] <= max_batch_size])\n", + " T['keops'] = {f: [] for f in n_features}\n", + " T['pytorch'] = {f: [] for f in n_features}\n", + "\n", + " for backend in backends:\n", + " for f in T[backend].keys():\n", + " for bs in T['batch_size']:\n", + " for k, v in experiments[backend].items():\n", + " if f == v['n_features'] and bs == v['n_ref']:\n", + " T[backend][f].append(results[backend][k]['time']['mean'])\n", + "\n", + " for k, v in T['keops'].items(): # apply padding\n", + " n_pad = len(v) - len(T['pytorch'][k])\n", + " T['pytorch'][k] += [np.nan for _ in range(n_pad)]\n", + " return T\n", + "\n", + "\n", + "def plot_absolute_time(experiments: dict, results: dict, n_features: list, y_scale: str = 'linear', \n", + " detector: str = 'MMD', max_batch_size: int = 1e10):\n", + " T = format_results(experiments, n_features, ['keops', 'pytorch'], max_batch_size)\n", + " colors = ['b', 'g', 'r', 'c', 'm', 'y', 'b']\n", + " legend, n_c = [], 0\n", + " for f in n_features:\n", + " plt.plot(T['batch_size'], T['keops'][f], linestyle='solid', color=colors[n_c]);\n", + " legend.append(f'keops - {f}')\n", + " plt.plot(T['batch_size'], T['pytorch'][f], linestyle='dashed', color=colors[n_c]);\n", + " legend.append(f'pytorch - {f}')\n", + " n_c += 1\n", + " plt.title(f'{detector} drift detection time for 100 permutations')\n", + " plt.legend(legend, loc=(1.1,.1));\n", + " plt.xlabel('Batch size');\n", + " plt.ylabel('Time (s)');\n", + " plt.yscale(y_scale);\n", + " plt.show();\n", + "\n", + "\n", + "def plot_relative_time(experiments: dict, results: dict, n_features: list, y_scale: str = 'linear',\n", + " detector: str = 'MMD', max_batch_size: int = 1e10):\n", + " T = format_results(experiments, n_features, ['keops', 'pytorch'], max_batch_size)\n", + " colors = ['b', 'g', 'r', 'c', 'm', 'y', 'b']\n", + " legend, n_c = [], 0\n", + " for f in n_features:\n", + " t_keops, t_torch = T['keops'][f], T['pytorch'][f]\n", + " ratio = [tt / tk for tt, tk in zip(t_torch, t_keops)]\n", + " plt.plot(T['batch_size'], ratio, linestyle='solid', color=colors[n_c]);\n", + " legend.append(f'pytorch/keops - {f}')\n", + " n_c += 1\n", + " plt.title(f'{detector} drift detection pytorch/keops time ratio for 100 permutations')\n", + " plt.legend(legend, loc=(1.1,.1));\n", + " plt.xlabel('Batch size');\n", + " plt.ylabel('time pytorch / keops');\n", + " plt.yscale(y_scale);\n", + " plt.show();" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "As detailed earlier, we will compare the PyTorch with the KeOps implementation of the MMD and learned kernel MMD detectors for a variety of reference and test data batch sizes as well as different feature dimensions. Note that for the PyTorch implementation, the portion of the kernel matrix for the reference data itself can already be computed at initialisation of the detector. This computation will not be included when we record the detector's prediction time. Since use cases where $N_\\text{ref} >> N_\\text{test}$ are quite common, we will also test for this specific setting. The key reason is that we cannot amortise this computation for the KeOps detector since we are working with lazily evaluated symbolic matrices.\n", + "\n", + "### MMD detector\n", + "\n", + "#### 1. $N_\\text{ref} = N_\\text{test}$\n", + "\n", + "Note that for KeOps we could further increase the number of instances in the reference and test sets (e.g. to 500,000) without running into memory issues." + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [], + "source": [ + "experiments_eq = {\n", + " 'keops': {\n", + " 0: {'n_ref': 2000, 'n_test': 2000, 'n_runs': 5, 'n_features': 2},\n", + " 1: {'n_ref': 5000, 'n_test': 5000, 'n_runs': 5, 'n_features': 2},\n", + " 2: {'n_ref': 10000, 'n_test': 10000, 'n_runs': 5, 'n_features': 2},\n", + " 3: {'n_ref': 20000, 'n_test': 20000, 'n_runs': 5, 'n_features': 2},\n", + " 4: {'n_ref': 50000, 'n_test': 50000, 'n_runs': 5, 'n_features': 2},\n", + " 5: {'n_ref': 100000, 'n_test': 100000, 'n_runs': 5, 'n_features': 2},\n", + " 6: {'n_ref': 2000, 'n_test': 2000, 'n_runs': 5, 'n_features': 10},\n", + " 7: {'n_ref': 5000, 'n_test': 5000, 'n_runs': 5, 'n_features': 10},\n", + " 8: {'n_ref': 10000, 'n_test': 10000, 'n_runs': 5, 'n_features': 10},\n", + " 9: {'n_ref': 20000, 'n_test': 20000, 'n_runs': 5, 'n_features': 10},\n", + " 10: {'n_ref': 50000, 'n_test': 50000, 'n_runs': 5, 'n_features': 10},\n", + " 11: {'n_ref': 100000, 'n_test': 100000, 'n_runs': 5, 'n_features': 10},\n", + " 12: {'n_ref': 2000, 'n_test': 2000, 'n_runs': 5, 'n_features': 50},\n", + " 13: {'n_ref': 5000, 'n_test': 5000, 'n_runs': 5, 'n_features': 50},\n", + " 14: {'n_ref': 10000, 'n_test': 10000, 'n_runs': 5, 'n_features': 50},\n", + " 15: {'n_ref': 20000, 'n_test': 20000, 'n_runs': 5, 'n_features': 50},\n", + " 16: {'n_ref': 50000, 'n_test': 50000, 'n_runs': 5, 'n_features': 50},\n", + " 17: {'n_ref': 100000, 'n_test': 100000, 'n_runs': 5, 'n_features': 50}\n", + " },\n", + " 'pytorch': { # runs OOM after 10k instances in ref and test sets\n", + " 0: {'n_ref': 2000, 'n_test': 2000, 'n_runs': 5, 'n_features': 2},\n", + " 1: {'n_ref': 5000, 'n_test': 5000, 'n_runs': 5, 'n_features': 2},\n", + " 2: {'n_ref': 10000, 'n_test': 10000, 'n_runs': 5, 'n_features': 2},\n", + " 3: {'n_ref': 2000, 'n_test': 2000, 'n_runs': 5, 'n_features': 10},\n", + " 4: {'n_ref': 5000, 'n_test': 5000, 'n_runs': 5, 'n_features': 10},\n", + " 5: {'n_ref': 10000, 'n_test': 10000, 'n_runs': 5, 'n_features': 10},\n", + " 6: {'n_ref': 2000, 'n_test': 2000, 'n_runs': 5, 'n_features': 50},\n", + " 7: {'n_ref': 5000, 'n_test': 5000, 'n_runs': 5, 'n_features': 50},\n", + " 8: {'n_ref': 10000, 'n_test': 10000, 'n_runs': 5, 'n_features': 50}\n", + " }\n", + "}" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": { + "scrolled": true + }, + "outputs": [], + "source": [ + "backends = ['keops', 'pytorch']\n", + "results = {backend: {} for backend in backends}\n", + "\n", + "for backend in backends:\n", + " exps = experiments_eq[backend]\n", + " for i, exp in exps.items():\n", + " results[backend][i] = experiment(\n", + " 'mmd', backend, exp['n_runs'], exp['n_ref'], exp['n_test'], exp['n_features']\n", + " )" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Below we visualise the runtimes of the different experiments. We can make the following observations:\n", + "\n", + "- The relative **speed** improvements of KeOps over vanilla PyTorch increase with increasing batch size.\n", + "\n", + "- Due to the explicit kernel computation and storage, the PyTorch detector runs out-of-memory after a little over 10,000 instances in each of the reference and test sets while KeOps keeps **scaling** up without any issues.\n", + "\n", + "- The relative speed improvements decline with growing **feature dimension**. Note however that we would not recommend using a (untrained) MMD detector on very high-dimensional data in the first place.\n", + "\n", + "The plots show both the absolute and relative (PyTorch / KeOps) mean prediction times for the MMD drift detector for different feature dimensions $[2, 10, 50]$." + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAgAAAAEWCAYAAAAQHy/hAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4xLjIsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy8li6FKAAAgAElEQVR4nOzdeVzU1f748ddhRwEVQQQVxwVkECuVLCtNQ80SNTPJNFzLbLmZZuXttmi35d6faaaVuVaWdVXKNLUsC82+1PVqiyJLLuGSiICiuLHN+f3x+WCIIKAMg/J+Ph7zgPms7xmGOe/POedzjtJaI4QQQoi6xcnRAQghhBCi5kkCIIQQQtRBkgAIIYQQdZAkAEIIIUQdJAmAEEIIUQdJAiCEEELUQZIAXCGUUlop1fYi699VSj1f4vnDSqkMpdRJpVTjSzifxTyny6XGXF3M19C6Bs4TbJ7LuQbOFaCU+l4plauUmmHv84nKU0o9q5Ra6Og4hLA3SQBKUUqlKaXylVJ+pZb/YhaIFvP5++bzgaW2e8NcPsp8PkopVWQWLCeVUn8opd5TSoVWZ9xa6/Fa63+a53QFZgJ9tNZeWuvsihKIy2G+xh+q6VgblVIPlFxmvoa91XH8UudKU0r1KnGe/ea5iqr7XGUYB2QBPlrrJy/3YEqpQKXUaqXUoZKf0xLr3ZVSi5VSJ5RSh5VSk0qtj1JKpSilTiul4pVSLS83JkdQSk1VSn1Uhe17KKUOllymtX5Va/1AefsIcbWQBKBsfwD3FT9RSnUA6pWx3e/AiBLbuQAxwJ5S2/2otfYCGgC9gDPANqVURHUEW8YVawDgAeysjuMLu2gJJOlLGImrnFoZG/AVMLic3aYCIeZ5ewJPK6X6msfzAz4Dngd8ga3AsqrGVVW1oXZJiDpNay2PEg8gDXgO+F+JZa8D/wA0YDGXvW8uzwAamcuigS+BH4BR5rJRwA9lnGcNEHeROJ4C0oFDwBjz3G1LnHsusA44hZFUvA+8DISayzRwEvgO+N58fspcdm8Z53M2X08WsBd41NzHxVzfAFhkxvSneS5nwAqcBYrMY+eY27ubx9tvvkfvAp4lzjcQ+BU4gZEw9QVeMY9z1jzWW+a2JV97A2AJkAnsM/9WTiXfa/O8xzASuTvKeX8/xCg0z5jnehqwlHrNG83XmWBu8wXQGFhqxv2/4s+DuX0Y8A1wFEgFYso59/tAAZBvHreX+X7NMv/eh8zf3c3tewAHgWeAw8CHF/ncuFDic1pi+SGMGqHi5/8E/mP+Pg5IKLGuvvm+hF3kf+TvQJL5Pr8HeJRYH23+bXPM9+6aUvs+A2wH8sx40zA+79sxPqOLMJLYL4FcYAN//Y/1AA6WEU8vjM9QvvnengR+M9ePBpLNY+0FHir1Om3m9ieBIIxk6aMSxx+AkUznmJ8Ja6lzTzZjP46ROHmY6/ww/s9zzM/EZszPqjzkURseDg+gtj1KfJmkYhRuzuaXb0suTABeBuYDD5vLlmPUHFQmARgDZJQTQ1+MQjPC/JL6mAsTgOPAzRi1OB7F8ZjrLZQoyMxl5/Yv55zjgRSgBcZVYDznF4YrgXlmPE2ALSW+SC94jcAbwGrzWN4Yhedr5rouZvy9zfibYRY25hfsA6WOVfK1LwFWmce0YNTCjC0RRwHwoPl3exij4FMX+1uXeH7e+2bGshtog5F4JJnn64VRcC0B3jO3rQ8cwChsXICOGMlUeDnnPvf3Mp+/BPxkvrf+GAXnP811PYBC4N8YiYJnWcc0t70gAQAamcsCSiy7B9hh/v4mMLfUcRKBwRd53xJLfFb+j78+ex2BI8AN5t9gpLm9e4l9fzX39Syx7CeMQr+Zuf/P5rE8MJLYF0u8F2UmAObvUylReJvL+pl/QwXcCpwGOl3keOeOwV8JdW/AFSNR3A24lTj3FozEwRcj0RhvrnsNI/F1NR/dKOezKA95OOIhTQDl+xCjer83xj/1n+VstwQYoZRqiPHl8nklj38I4wujLDEYBUui1voUxhdSaau01v+ntbZprc9W8pwXEwPM0lof0FofxfjyAowOa8CdwBNa61Na6yMYBfzQsg6klFIYV5UTtdZHtda5wKslth8LLNZaf2PG/6fWOqWiAM2mjqHA37XWuVrrNGAGEFtis31a6wXaaMf/AAjEKFgu1Xta6z1a6+MYV6R7tNYbtNaFwAqMQgqMq940rfV7WutCrfUvwKfAkEqeZzjwktb6iNY6E5hW6nXZMArBPK31mSq+Bi/z5/ESy45jJFHF649zvpLry/JWic/KK/zVZDYOmKe1/q/Wukhr/QHGlf6NJfadbe5b8nXM0VpnaK3/xLhS/q/W+hfzs72Sv97nKtNarzX/hlprvQn4GqMwrox7gbXmZ7UAo3bJE7ip1Os5ZL4XXwDXmcsLMD5/LbXWBVrrzVprmXxF1BrSBle+DzGqzlthFPJl0lr/oJTyx2giWKO1PmOUfxVqhlEtWJYgYFuJ5/vK2OZAZU5SBUGljlnynC0xrmDSS7w2p4vE4I/RZ2Jbie0VxhUhGFd/6y4hRj8zjpKx7cN4L4sdLv5Fa33aPL8Xly6jxO9nynhefOyWwA1KqZwS610wPkeVEcSFryuoxPPMy0j0Tpo/fTCaV4p/zy2x3qfUPiXXl6X0Z6U41pbASKXU30qsd+P811LW56ay73OVKaXuAF7EuJp3wvhs7qjk7uf9XbTWNqXUAcr5zGHULhS/1ukYyfvX5udwvtb6X5fwEoSwC6kBKIfWeh9GG/KdGB2kLuYj4EkukiiUYRDGlU5Z0jEKyWLBZYVYhXNVxsXOeQDjKs5Pa93QfPhorduXE0sWxpd2+xLbN9BGR8ji47UpJ46Lva4sjKuqkj3Ugym/dqYi1fkeHgA2lXi9DbVxR8HDldz/EBe+rkMlnl9yrFrrYxh/32tLLL6WvzqJ7iy5TilVH+Pvc7FOpKU/K8WxHgBeKfU+1NNaf1IypEt7JYBRHX+uQ65ZK+Rf3rGVUu4YNTGvYzSBNMRIPlVZ25fhvL+LWbvVgkp85sxaqie11q0x+hFMUkpFVbSfEDVFEoCLGwvcZlbDX8xsjKaC7y+2kVLKWSnVSik1B6PtcVo5my4HRimlwpVS9TCuXi5XBnCxe+mXA48rpZorpRoBU4pXaK3TMapNZyilfJRSTkqpNkqpW0scu7lSys3c3gYsAN5QSjUBUEo1U0rdbm6/CBht3nrmZK4LqyhOs1p/OfCKUsrbvFVtEkYCdikqek+qYg0QqpSKVUq5mo/rlVLWSu7/CfCcUsrf7JX/AlV8XUopD4w+AgDu5vNiS8zjNzLf6wcx+iGAUcUeoZQabO7zArC9gmaZR83Pii9G7VfxXQMLgPFKqRuUob5Sqp9S6mLNCVXxO+BhHtMVoxOoe4n1GYBFKVX83eZmrs8ECs3agD6ltm+slGpQzvmWA/3Mz6orRqKfh9FH46KUUtFKqbZm0nAco4OrrbIvVAh7kwTgIsx2w62V2O6o1vrbi7TvdVVKncToOb4Ro3r1eq11mdWQWusvMXqBf4fR4ei7S4m/lKnAB0qpHKVUTBnrFwDrgd8wOmCVrvUYgfFlWtzzOw6jfRMzvp3AYaVUlrnsGTP2n5RSJzB6crczX98WjM5yb2B8MW7ir6usN4F7lFLHlFKzy4jzbxhXgXsxOlt+DCyu3FtwgdcwCsUcpdTkSzwGYFztYRQsQzGuGg/zV6e9yngZ4/a77RjV0z+by6qi+I4GMDp0lmxjfxHjbot9GO/3dK31V2bsmRi3D76C8be9gXL6d5TwMUZSuNc87svmsbZiJBdvmcfajdE5s1qYfTEeARZiXIWfwuikW2yF+TNbKfWz+Xd5HKMgPwYMw+icWny8FIzka6/5OSjZVIHWOhW4H5iDUQPVH+ivtc6vRLghGJ/7k8CPwDta6/iqvWIh7EdJnxQhRFUopdIw7tTY4OhYhBCXTmoAhBBCiDpIEgAhhBCiDpImACGEEKIOkhoAIYQQog66IgYC8vPz0xaLxdFhCCHEFWXbtm1ZWmv/ircUddEVkQBYLBa2bq3wbjwhhBAlKKXKGkVUCECaAIQQQog6SRIAIYQQog6SBEAIIYSogyQBEEIIIeogSQCEEEKIOkgSACGEEKIOkgRACCGEqIPsmgAopSYqpXYqpRKVUp8opTyUUq2UUv9VSu1WSi0rnkNeCCFECRkZ8MQTkJfn6EjEVcpuCYBSqhnGPNyRWusIwBljjvF/A29ordtizM891l4xCCHEFenAAejeHRYsgJ07HR2NuErZuwnABfBUSrkA9YB04DYgzlz/AXCXnWMQQogrx5490K0bHD4M69dDp06OjkhcpeyWAGit/wReB/ZjFPzHgW1Ajta60NzsINCsrP2VUuOUUluVUlszMzPtFaYQQtQeSUlG4X/yJHz3Hdxyi6MjElcxezYBNAIGAq2AIKA+0Ley+2ut52utI7XWkf7+MpeFEOIq9/PPRrW/1rBpE3Tu7OiIxFXOnk0AvYA/tNaZWusC4DPgZqCh2SQA0Bz4044xCCFE7ZeQAD17Qv36sHkztG/v6IhEHWDPBGA/cKNSqp5SSgFRQBIQD9xjbjMSWGXHGIQQonbbsAF694aAAPjhB2jb1tERiTrCnn0A/ovR2e9nYId5rvnAM8AkpdRuoDGwyF4xCCFErfbFF9CvH7RuDd9/Dy1aODoiUYe4VLzJpdNavwi8WGrxXqCLPc8rhBC13rJlcP/90LEjfPUV+Po6OiJRx8hIgEIIUdMWLYL77oObbjKaAKTwFw4gCYAQQtSkN9+EBx6APn3gyy/Bx8fREYk6ShIAIYSoKa++agzvO2gQrFoF9eo5OiJRh0kCIIQQ9qY1/P3v8I9/GO3+y5eDu7ujoxJ1nF07AQohRJ1ns8Hjj8Pbb8P48cZPJ7n2Eo4nn0IhhLCXwkIYO9Yo9CdPhnfekcJf1BpSAyCEEPaQnw/Dh0NcHEybBs8/D0o5OiohzpEEQAghqtuZM3DPPbBuHcyYAZMmOToiIS4gCYAQQlSn3FwYOBA2boR582DcOEdHJESZJAEQQojqcuwY3HEHbN0KH35oNAEIUUtJAiCEENXhyBFjcJ/kZKPd/667HB2REBclCYAQQlyugweNGf327TMm+OnTx9ERCVEhSQCEEOJy7N0LUVGQnQ3r10O3bo6OSIhKkQRACCEuVXIy9OoFZ8/Cd99BZKSjIxKi0mRECiGEuBS//ALdu0NREWzaJIW/uOJIAiCEEFX144/Qsyd4esLmzRAR4eiIhKgyuyUASql2SqlfSzxOKKWeUEr5KqW+UUrtMn82slcMQghR7b77zujw5+8PP/wAISGOjkiIS2K3BEBrnaq1vk5rfR3QGTgNrASmAN9qrUOAb83nQghR+61ZA3feCa1awfffQ3CwoyMS4pLVVBNAFLBHa70PGAh8YC7/AJCbZYUQtd/y5TBoEHToYIzyFxjo6IiEuCw1lQAMBT4xfw/QWqebvx8GAsraQSk1Tim1VSm1NTMzsyZiFEKIsr33Htx3H9x4I3z7LTRu7OiIhLhsdk8AlFJuwABgRel1WmsN6LL201rP11pHaq0j/f397RylEEKUY84cGDPGuN3vq6/Ax8fREQlRLWqiBuAO4GetdYb5PEMpFQhg/jxSAzEIIUTVvfYaPP64Mazv6tVQv76jIxKi2tREAnAff1X/A6wGRpq/jwRW1UAMQghReVrDs88aj2HDjPZ/d3dHRyVEtbJrAqCUqg/0Bj4rsfhfQG+l1C6gl/lcCCFqB5sNJkwwrv7HjYMlS8DV1dFRCVHt7DoUsNb6FNC41LJsjLsChBCidikqggcfNDr9TZoEr78OSjk6KiHsQkYCFEIIgPx8o7r/vffgxRel8BdXPZkMSAghzp6FIUOMgX6mT4fJkx0dkRB2JwmAEKJuO3kSBg6E+HiYOxfGj3d0RELUCEkAhBB1V06OMbTvli1GZ7/773d0RELUGEkAhBB1U2Ym9OkDO3fCihXGML9C1CGSAAgh6p4//zRG9ktLMwb46dvX0REJUeMkARBC1C1//AFRUZCVBevXQ/fujo5ICIeQBEAIUXekpBhX/qdPG5P6XH+9oyMSwmEkARBC1A2//mq0+StlTOd7zTWOjkgIh5KBgIQQV7+ffoKePcHDAzZvlsJfCCQBEEJc7eLjjWr/xo2Nwj801NERCVErSAIghLh6rVtn3OdvsRiFf8uWjo5IiFpDEgAhxNUpLg7uugvCw402/8BAR0ckRK0iCYAQ4urzwQdw773QpQt89x34+Tk6IiFqHUkAhBBXl7ffhlGj4LbbjPv8GzRwdERC1Ep2TQCUUg2VUnFKqRSlVLJSqqtSylcp9Y1Sapf5s5E9YxBC1CH//jc89hgMGABffAH16zs6IiFqLXvXALwJfKW1DgOuBZKBKcC3WusQ4FvzuRBCXDqt4bnnYMoUuO8+o/3fw8PRUQlRq9ktAVBKNQC6A4sAtNb5WuscYCDwgbnZB8Bd9opBCFEHaA0TJ8Irr8ADD8CHH4Krq6OjEqLWs2cNQCsgE3hPKfWLUmqhUqo+EKC1Tje3OQwElLWzUmqcUmqrUmprZmamHcMUQlyxiorgwQfhzTfhiSdg/nxwdnZ0VEJcEeyZALgAnYC5WuuOwClKVfdrrTWgy9pZaz1fax2ptY709/e3Y5hCiCtSQQEMHw6LFsHzz8PMmcYwv0KISrFnAnAQOKi1/q/5PA4jIchQSgUCmD+P2DEGIcTV6OxZGDwYli0zOv699JIU/kJUkd0SAK31YeCAUqqduSgKSAJWAyPNZSOBVfaKQQhxFTp5EqKjjV7+b78NTz/t6IiEuCLZezbAvwFLlVJuwF5gNEbSsVwpNRbYB8TYOQYhxNUiJwf69TMm9/ngAxgxwtERCXHFsmsCoLX+FYgsY1WUPc8rhLgKZWbC7bdDYiIsX240AQghLpm9awCEEOLyHTpkzOj3xx+wahXccYejIxLiiicJgBCidktLg6goOHIEvvwSevRwdERCXBUkARBC1F6pqcaV/8mTsGED3HCDoyMS4qohCYAQonbavh169zZG+tu4Ea691tERCXFVkdkAhRC1z5YtRlW/mxts3iyFvxB2IAmAEKJ22bTJaPNv1Mgo/Nu1q3gfIUSVSQIghKg9vvwS+vaF4GCj8LdYHB2REFctSQCEELXDp5/CwIFgtRq1AEFBjo5IiKuaJABCCMdbsgRiYuD66+G778DPz9ERCXHVkwRACOFYc+fCyJHQsyesXw8NGzo6IiHqBEkAhBCOM306PPII9O8Pa9aAl5ejIxKizpAEQAhR87SGF14wZvK7916j/d/Dw9FRCVGnyEBAQoiapTU8+SS88QaMGQPz54Ozs6OjEqLOkRoAIUTNKSqCceOMwv/xx2HBAin8hXAQSQCEEDWjoABiY2HhQvjHP2DWLHCSryAhHEWaAIQQ9nf2LAwdakzl+69/wTPPODoiIeo8uyYASqk0IBcoAgq11pFKKV9gGWAB0oAYrfUxe8YhhHCgU6dg0CD45ht46y149FFHRySEoGaaAHpqra/TWkeaz6cA32qtQ4BvzedCiKvR8eNw++3w7bfw3ntS+AtRiziiAW4g8IH5+wfAXQ6IQQhhb1lZxqQ+//0v/Oc/MGqUoyMSQpRg7wRAA18rpbYppcaZywK01unm74eBgLJ2VEqNU0ptVUptzczMtHOYQohqlZ4Ot94KiYnw+ecwZIijIxJClGLvToC3aK3/VEo1Ab5RSqWUXKm11kopXdaOWuv5wHyAyMjIMrcRQtRC+/YZV/6HDxuz+/Xs6eiIhBBlsGsNgNb6T/PnEWAl0AXIUEoFApg/j9gzBiFEDfr9d+jWDbKzYcMGKfyFqMXslgAopeorpbyLfwf6AInAamCkudlIYJW9YhBC1KAdO6B7d+OWv/h4uPFGR0ckhLgIezYBBAArlVLF5/lYa/2VUup/wHKl1FhgHxBjxxiEEDXhf/8zevvXq2dc+YeFOToiIUQFKkwAlFJdgfuBbkAgcAbjSn4t8JHW+nhZ+2mt9wLXlrE8G4i6jJiFELXJ999DdDT4+Rm3+7Vq5eiIhBCVcNEmAKXUl8ADwHqgL0YCEA48B3gAq5RSA+wdpBCillq/Hvr2hWbNYPNmKfyFuIJUVAMQq7XOKrXsJPCz+ZihlPKzS2RCiNrts8+M4X3bt4evvwZ/f0dHJISogovWABQX/maHPifz91Cl1ACllGvJbYQQdchHH0FMDHTubHT4k8JfiCtOZe8C+B7wUEo1A74GYoH37RWUEKIWmzcPRowwBvr55hto2NDREQkhLkFlEwCltT4N3A28o7UeArS3X1hCiFrp9ddh/Hi4805Yuxa8vBwdkRDiElU6ATDvBhiO0fsfwNk+IQkhah2tYepUeOopY1jfzz4DDw9HRyWEuAyVHQdgAvB3YKXWeqdSqjUQb7+whBC1htYweTLMnAmjR8OCBeAs+b8QV7pKJQBa6+8x+gEUP98LPG6voIQQtURRETzyCMyfD3/7G8yaBU6OmERUCFHdKhoHYIFSqkM56+orpcYopYbbJzQhhEMVFsLIkUbh//e/w5tvSuEvxFWkohqAt4HnzSQgEcjEGAAoBPABFgNL7RqhEKLm5eUZ9/h//jm8+qqRAAghrioXTQC01r8CMUopLyCSv4YCTtZap9ZAfEKImnb6NAwaZAzuM3u2UfUvhLjqVLYPwElgo31DEUI43IkTxrj+//d/sGgRjBnj6IjEFW7btm1NXFxcFgIR2HkKenEBG5BYWFj4QOfOnY+UXmnP2QCFEFeS7GxjXP9ff4VPPjFG+hPiMrm4uCxs2rSp1d/f/5iTk5N2dDx1ic1mU5mZmeGHDx9eCFwwb49kY0IIOHwYevSAHTtg5Uop/EV1ivD39z8hhX/Nc3Jy0v7+/scxal8uUKUaAKVUPXNEQCHE1WL/foiKgvR0Y3S/KJmtW1QrJyn8Hcd878u82K9UDYBS6ialVBKQYj6/Vin1TiX3dVZK/aKUWmM+b6WU+q9SardSaplSyq1yL0MIUe127YJu3SAz0xjXXwp/IeqMyjYBvAHcDmQDaK1/A7pXct8JQHKJ5/8G3tBatwWOAWMreRwhRHVKTDQK/9OnjRn9unZ1dERCVLvU1FS3kJCQWjt3zYABA1pZLJaIkJCQ9kOGDLHk5eWpmjp3pfsAaK0PlFpUVNE+SqnmQD9goflcAbcBceYmHwB3VTYGIUQ12brVmM3P2Rk2bYKOHR0dkRB10vDhw4/u3bs3MTU1defZs2fVrFmz/Grq3JVNAA4opW4CtFLKVSk1mfOv6sszC3ga41YEgMZAjta60Hx+EGhW1o5KqXFKqa1Kqa2ZmZmVDFMIUaHNm+G228DHx/g9PNzREQlRI5KSktysVmv4pk2b6hUWFvLQQw81j4iIsIaGhoZPnz7dD8Bms/HQQw81DwkJaR8aGhq+YMGCRgBr1qzxjoyMbNejR4+2FoslYtiwYcFFRUUUFhYyePBgS/H206ZNa1KVmO69997jTk5OODk5ERkZeergwYM11ixe2U6A44E3MQrrP4GvgUcvtoNSKho4orXeppTqUdXAtNbzgfkAkZGR0oFEiOrw9ddw110QHAwbNkDz5o6OSNQhY8bQIjGRetV5zIgITi9eTOka6gv89ttv7kOHDm2zePHiP7p27Xrm9ddf92vQoEFRYmJi8pkzZ9T1118f1r9//xM//fRTvR07dngmJyfvTE9Pd+nSpYu1T58+JwF27NhR/5dffkkMDQ3N7969e8iSJUsatW3bNi89Pd11165dOwGysrIuaaasvLw8tWzZssYzZ86s8LVUl8oOBJSFMRVwVdwMDFBK3YkxfLAPRhLRUCnlYtYCNMdIKIQQ9vb553DvvWC1GolAkypdqAhxxTp69KjLXXfd1TYuLm5P586dzwJs2LDBJyUlpd7q1asbAeTm5jonJSV5bN682TsmJuaoi4sLLVq0KLzhhhtO/vDDD/UaNGhg69Chw6nw8PB8gJiYmKObN2/2io6OPnHgwAH3kSNHtujfv//xQYMGnbiUGEeOHBl84403nuzbt+/J6nvlF1epBEAp1Qr4G2ApuY/W+oKBBUqs+zvGFMKYNQCTtdbDlVIrgHuA/wAjgVWXGLsQorKWLjUm9omMhC+/hEaNHB2RqIMqc6VuD97e3kVBQUH58fHxXsUJgNZazZgxY//gwYPPK7DXrl3boLzjGN3Yzn/u7+9flJiYmLRy5Uqfd99913/ZsmW+K1asSCveprCwkIiIiHCAvn375syaNetQ6eM++eSTgVlZWS7r16/fc3mvtGoq2wfgcyANmAPMKPG4FM8Ak5RSuzH6BCy6xOMIISpj/nyIjTV6/H/zjRT+os5xdXXVX3755Z5PPvmk8bvvvusL0Lt37+Nz5871L+51v337dvcTJ044de/ePTcuLs63sLCQQ4cOuWzZssWrW7dup8BoAkhJSXErKioiLi7Ot1u3brnp6ekuRUVFjBo1Kue11177c8eOHec1cbi4uJCSkpKUkpKSVFbhP3PmTL/vvvuuweeff77X2fmSWg8uWWX7AJzVWs++1JNorTdiziWgtd4LdLnUYwkhqmDmTHjySbjzToiLA09PR0ckhEP4+PjY1q9fv7tHjx6h3t7eRRMnTsxKS0tz79Chg1VrrXx9fQvWrVu3JzY2NichIcHLarW2V0rpadOmHQwODi7cvn07ERERp8aPHx+clpbmcdNNN52IjY3N2bJli+fYsWMtNptNAbz00ksHqxLX008/3TIwMDAvMjLSChAdHX3s9ddfT7fHe1Ca0rri/nVKqWEYUwB/DeQVL9da/2y/0P4SGRmpt27dWhOnEuLqoDX885/w4otwzz1GE4CbjLlV1yiltmmtIx0Zw2+//ZZ27bXXZjkyhuqwZs0a7xkzZgTEx8fvdnQsVfXbb7/5XXvttZbSyytbA9ABiMW4h7/4lj5tPhdC1CZaw9NPw+uvG+3+CxeCi8z7JYQ4X2W/FYYArbXW+fYMRghxmWw2ePRRePdd4+fs2eAkc34Jcbmio6Nzo6Ojcx0dR3Wq7DdDItDQnoEIIS5TYSGMGmUU/s88A/56WmoAACAASURBVHPmSOEvhChXZWsAGgIpSqn/cX4fgHJvAxRC1KC8PBg2DD77DF55BZ591tERCSFqucomAC/aNQohxKU7fRruvhvWr4dZs2DCBEdHJIS4AlR2JMBN9g5ECHEJTpyA/v2NMf0XLoSxMrmmEKJyLtpAqJT6wfyZq5Q6UeKRq5S6pOEOhRDV5OhR6NULEhLg44+l8BeimkyZMqVpdR1r0qRJQS+88ELApe6fkJDged1114W1bdv2vMmJqkNFPYTqA2itvbXWPiUe3lprn+oKQghRRRkZ0KMH/PYbfPopDB3q6IiEuGrMnj07sCrb22w2ioqK7BKLl5eX7cMPP/xj9+7dO7/++utdzz77bItLnXCotIoSAJmFT4ja5sABY1jfPXtg7VoYIH1xhShPamqqW6tWrdoPGDCgVevWrdv37du3dW5urtPq1au9e/Xq1aZ4u5UrV/r07t27zSOPPNIsLy/PKSwsLHzAgAGtAKZOnRoQEhLSPiQkpP1LL73UpPi4FoslYtCgQZbQ0ND2e/bscYuLi/MJDw+3tmvXLrxr166hxcdOTk727NKlS7vmzZt3ePnll6s0C9c111yT16FDhzwAi8VS4OvrW5ienl4tA3tUdJAmSqlJ5a3UWs+sjiCEEJW0e7dR7X/smDGj3803OzoiIaqkSxfalV52990cnTKFzNxcnKKiCCm9/v77yXr8cbLT03EZOJA2Jddt2UJqRedMS0vzmDdvXlqfPn1ODRkyxDJ9+nT/qVOnZkyYMCH40KFDLkFBQYWLFy9uPHr06Kxhw4Ydf//995ukpKQkAWzevLnexx9/3Hjbtm3JWms6d+5sjYqKyvXz8yvav3+/+6JFi/6IiopKO3TokMtjjz1m2bhxY0pYWFh+RkbGuav03bt3eyQkJKTm5OQ4W63WiKeeeirT3d29yhfY8fHx9QoKClR4eHhexVtXrKIaAGfAC/Au5yGEqCk7dxpX/idPQny8FP5CVFLTpk3z+/TpcwogNjY2OyEhwcvJyYmYmJjsBQsW+GZlZTn//PPPXkOGDDleet+NGzd63XnnnTk+Pj62Bg0a2Pr163csPj7eGyAwMDA/KirqlLld/S5duuSGhYXlAwQEBJxrE+jTp0+Op6enDgwMLPT19S04ePBgla/g9+3b5zp69OjWCxYsSKuuSYMqCiJda/1StZxJCHHptm2D2283xvP//nsID3d0REJckotdsXt7Y7vY+sBACitzxV9aWdP4Ajz88MPZ/fr1a+vh4aH79+9/zNXVtUrHrVevnq3iraDk1b6zszOFhYXnBbRkyZKGr776ahDA/Pnz07p373665PqjR4863XHHHW1ffPHFP4sTjupQUQ2AqmC9EMLefvgBbrsNvLyM2/2k8BeiStLT0902bNhQH2Dp0qW+N91000kw2tQDAgIKZsyYEThu3LhzExa5uLjo4mmCe/bseXLdunUNc3NznU6cOOG0bt26Rj179rxgSOAePXqc2rJli3dKSoobQMkmgIqMGDEip3jK4NKF/9mzZ1W/fv3aDh06NHv06NHHLu0dKFtFCUBUdZ5MCFFFGzYYV/5NmxqFf5s2Fe8jhDiPxWI5O2fOnCatW7dun5OT4zJ58uTM4nVDhw7NDgwMzO/UqdPZ4mXDhw/PtFqt4QMGDGh1yy23nB42bFh2p06drJ07d7bGxsZm3nzzzWdKnyMoKKhw9uzZaYMGDWrbrl278EGDBrWujtgXL17c6H//+5/Xxx9/7BcWFhYeFhYWnpCQUC3zeldqOuBLOrBSHsD3gDtGU0Oc1vpFpVQr4D9AY2AbEFvRJEMyHbCok1atgpgYCAszOvwFXPKtxKKOkumAjd760dHRIbt27dpZ1voRI0YEd+zY8fTEiROv+CmLy1PedMD2nCkkD7hNa30tcB3QVyl1I/Bv4A2tdVvgGCCjlwhR2iefwODBcN11Roc/KfyFqHbt27e3JiUleY4fPz7b0bE4gt0mCddG1cJJ86mr+dDAbcAwc/kHwFRgrr3iEOKKs3AhjBsH3bvDF1+At9xwI8SlateuXX55V/87d+5Mrul4ahO7zhWqlHJWSv0KHAG+AfYAOVrrQnOTg0Aze8YgxBVl1ix48EGj3X/dOin8hRB2Y9cEQGtdpLW+DmgOdAHCKruvUmqcUmqrUmprZmZmxTsIcSXTGl5+GSZONGb2+/xzqFfP0VEJIa5idk0Aimmtc4B4oCvQUClV3PTQHPiznH3ma60jtdaR/v7+NRGmEI6hNUyZAs8/D7GxsGwZuLs7OiohxFXObgmAUspfKdXQ/N0T6A0kYyQC95ibjQRW2SsGIWo9mw0efRT+3/+Dhx+G998HF7t1zRFCiHPsWQMQCMQrpbYD/wO+0VqvAZ4BJimldmPcCrjIjjEIUXsVFsLo0TB3Ljz1FLz9NjjVSKWcEHVGamqqW0hISHtHx1GeV1991T84ODhCKdW55CQ/NpuNUaNGtQgODo4IDQ0N/+GHH6q9TdCedwFsBzqWsXwvRn8AIequ/HwYNsyYyvef/4R//AOUDLwpRF1z6623nhw8ePDx22677bxJklasWNFg7969HmlpaYnx8fH1H3nkkeDt27enVOe55XJDiJp25gzcdZdR+M+cCc89J4W/EDUgKSnJzWq1hm/atKleYWEhDz30UPOIiAhraGho+PTp0/3AuPJ+6KGHmoeEhLQPDQ0NX7BgQSOANWvWeEdGRrbr0aNHW4vFEjFs2LDgoqIiCgsLGTx4sKV4+2nTplVput+bb775TLt27S4YDG/VqlUNhw8fnu3k5ERUVNSpEydOuOzbt69qkxVUQBobhahJubnQv78xoc/8+cYtf0LUEWNWjWmReCSxWquyI5pEnF48cPGBirb77bff3IcOHdpm8eLFf3Tt2vXM66+/7tegQYOixMTE5DNnzqjrr78+rH///id++umnejt27PBMTk7emZ6e7tKlSxdrnz59TgLs2LGj/i+//JIYGhqa371795AlS5Y0atu2bV56erpr8VgDWVlZ1TJVX3p6uqvFYjmXGAQGBubv27fPtWXLlgXVcXyQGgAhas7Ro9CrlzG5z9KlUvgLUUOOHj3qctddd7X96KOP9nbt2vUMwIYNG3yWL1/eOCwsLLxjx47WY8eOuSQlJXls3rzZOyYm5qiLiwstWrQovOGGG04Wt7936NDhVHh4eL6LiwsxMTFHN2/e7BUWFpZ34MAB95EjR7aIi4vzadSoUdHFo6k9pAZAiJqQkQF9+kBKilH1P3CgoyMSosZV5krdHry9vYuCgoLy4+PjvTp37nwWQGutZsyYsX/w4MEnSm67du3aBuUdp6xphf39/YsSExOTVq5c6fPuu+/6L1u2zHfFihVpxdsUFhYSERERDtC3b9+cWbNmHapMzIGBgQVpaWluxc/T09PdqvPqH6QGQAj7O3jQGNZ3925Ys0YKfyFqmKurq/7yyy/3fPLJJ43fffddX4DevXsfnzt3rn/xtL/bt293P3HihFP37t1z4+LifAsLCzl06JDLli1bvLp163YKjCaAlJQUt6KiIuLi4ny7deuWm56e7lJUVMSoUaNyXnvttT937NhxXhOHi4sLxVP9VrbwBxgwYEDO0qVLG9tsNr799tv63t7eRdWdAEgNgBD2tGePUe1/9CisXw+33OLoiISok3x8fGzr16/f3aNHj1Bvb++iiRMnZqWlpbl36NDBqrVWvr6+BevWrdsTGxubk5CQ4GW1WtsrpfS0adMOBgcHF27fvp2IiIhT48ePD05LS/O46aabTsTGxuZs2bLFc+zYsRabzaYAXnrppYNVievll19uMmfOnKbZ2dmu1157bXjPnj2PL1u2bF9MTMzxtWvXNmjZsmWEp6enbeHChWnV/Z7YbTrg6iTTAYsrUlKSUfjn5xuFf+fOjo5I1DEyHXD1WbNmjfeMGTMC4uPjdzs6lqpyxHTAQtRdP/8Mt95qDPO7aZMU/kKIWkcSACGqW0IC9OxpTOazeTO0r7WDkAkhKik6Ojr3Srz6vxhJAISoTt9+C717Q0CAUfi3bevoiIQQokySAAhRXb74Avr1g9atjYF+goMdHZEQQpRLEgAhqsOyZXD33dChA2zcCE2bOjoiIYS4KEkAhLhcixfDffdB165GE0Djxo6OSAghKiQJgBCXY/ZsGDvWGOXvq6/Ax8fREQkhLtOUKVOqrQpv0qRJQS+88ELA5RyjW7duId7e3tf17NnzvE5FKSkpbtdcc01YcHBwRL9+/VqfPXu2SrOKSQIgxKV69VWYMAEGDYJVq4xe/0KIK97s2bMDq7K9zWajqMh+UwBMnjz58Lx58/4ovXzSpEnNH3vssYz9+/cnNmjQoPDNN9/0q8pxJQEQoqq0hr//Hf7xD7j/fli+HNzdHR2VEKIMqampbq1atWo/YMCAVq1bt27ft2/f1rm5uU6rV6/27tWrV5vi7VauXOnTu3fvNo888kizvLw8p7CwsPABAwa0Apg6dWpASEhI+5CQkPYvvfRSk+LjWiyWiEGDBllCQ0Pb79mzxy0uLs4nPDzc2q5du/CuXbuGFh87OTnZs0uXLu2aN2/e4eWXX67SdMEAAwcOzPXx8bGVXGaz2fjxxx+9R48efQxgzJgx2V988UXDqhzXbkMBK6VaAEuAAEAD87XWbyqlfIFlgAVIA2K01sfsFYcQ1cpmg8cfh7ffhocegnfeASfJo4WorC4LurQrvexu691Hp9wyJTM3L9cpaklUSOn1919zf9bjNzyenZ6b7jLwPwPblFy35cEtqRWdMy0tzWPevHlpffr0OTVkyBDL9OnT/adOnZoxYcKE4EOHDrkEBQUVLl68uPHo0aOzhg0bdvz9999vkpKSkgSwefPmeh9//HHjbdu2JWut6dy5szUqKirXz8+vaP/+/e6LFi36IyoqKu3QoUMujz32mGXjxo0pYWFh+RkZGeemBd69e7dHQkJCak5OjrPVao146qmnMt3d3S9rGN6MjAwXb2/vIldXVwAsFkt+RkaGWwW7ncee31yFwJNa63DgRuBRpVQ4MAX4VmsdAnxrPheidtPaGM63Vy+j8H/ySZg7Vwp/Ia4ATZs2ze/Tp88pgNjY2OyEhAQvJycnYmJishcsWOCblZXl/PPPP3sNGTLkeOl9N27c6HXnnXfm+Pj42Bo0aGDr16/fsfj4eG+AwMDA/KioqFPmdvW7dOmSGxYWlg8QEBBwrk2gT58+OZ6enjowMLDQ19e34ODBg7ViHh67BaG1TgfSzd9zlVLJQDNgINDD3OwDYCPwjL3iEOKy5OXB0qUwcybs3AmBgfDWW/DII6Cq1N9GCMHFr9i93b1tF1sf6B1YWJkr/tLKmsYX4OGHH87u169fWw8PD92/f/9jxVfTlVWvXj1bxVtByat9Z2dnCgsLzwtoyZIlDV999dUggPnz56d17979dEXHDAgIKMzNzXUuKCjA1dWVtLQ0t4CAgPyqxF8jly9KKQvQEfgvEGAmBwCHMZoIytpnnFJqq1Jqa2ZmZk2EKcRfsrLg5ZehZUujl7+zM3zwAaSlwaOPSuEvxBUkPT3dbcOGDfUBli5d6nvTTTedBLBYLAUBAQEFM2bMCBw3bty5CYtcXFx08TTBPXv2PLlu3bqGubm5TidOnHBat25do549e+aWPkePHj1ObdmyxTslJcUNoGQTQEVGjBiRUzxlcGUKfwAnJyduvPHG3Pfee68RwOLFixtHR0fnVPacUAMJgFLKC/gUeEJrfaLkOm1MRVhmO4jWer7WOlJrHenv72/vMIUwpKbCww8bo/g9/zx06gTffAO//gojRoBblZrYhBC1gMViOTtnzpwmrVu3bp+Tk+MyefLkc1eVQ4cOzQ4MDMzv1KnT2eJlw4cPz7RareEDBgxodcstt5weNmxYdqdOnaydO3e2xsbGZt58881nSp8jKCiocPbs2WmDBg1q265du/BBgwa1rq74O3fu3C42Nrb1jz/+6BMQEHDNp59+6gMwY8aMg3PmzGkaHBwccezYMZcJEyZUadZFu04HrJRyBdYA67XWM81lqUAPrXW6UioQ2Ki1vqBTSEkyHbCwK62NoXtnzDCG83V3N3r3T5woE/mIK5pMB2z01o+Ojg7ZtWvXzrLWjxgxIrhjx46nJ06ceMVPWVye8qYDtuddAApYBCQXF/6m1cBI4F/mz1X2ikGIiyoogBUrjPb9bdvAzw9eeMFo3w+4rHE7hBBXgPbt21s9PT1t8+bNO+DoWBzBnj0RbwZigR1KqV/NZc9iFPzLlVJjgX1AjB1jEOJCx4/D/PnGKH4HD0K7djBvHsTGgqeno6MTQlSjdu3a5Zd39b9z587kmo6nNrHnXQA/AOX1lIqy13mFKFdaGrz5JixcCCdPQs+e8O67cMcdcjufEKLOqRX3IgphV//9r9G+/+mnRkF/770waZLRwU8IIeooSQDE1amoyBiff8YMSEiABg1g8mT429+geXNHRyeEEA4nCYC4upw8Ce+9B7Nmwd690KqVUe0/Zgx4eTk6OiGEqDWk4VNcHf7805igp0ULY6z+gACIi4Ndu4znUvgLUSelpqa6hYSE1Nr7eQcPHmxp1qxZh7CwsPCwsLDwhIQETzAm+xk1alSL4ODgiNDQ0PAffvih2qcblRoAcWX79VfjNr7//Meo9r/7bqN9v2tXR0cmhBCV8vLLLx8sntWv2IoVKxrs3bvXIy0tLTE+Pr7+I488Erx9+/aU6jyv1ACIK4/NBuvWQVQUdOwIn31mjN63a5dxX78U/kKIMiQlJblZrdbwTZs21SssLOShhx5qHhERYQ0NDQ2fPn26HxhX3g899FDzkJCQ9qGhoeELFixoBLBmzRrvyMjIdj169GhrsVgihg0bFlxUVERhYSGDBw+2FG8/bdq0Kk/3W5ZVq1Y1HD58eLaTkxNRUVGnTpw44bJv376qTVZQAakBEFeOs2fhww/hjTcgORmaNYN//xvGjYOGVZoGWwjhCGPGtCAxsXqrsiMiTrN4cYUD+fz222/uQ4cObbN48eI/unbteub111/3a9CgQVFiYmLymTNn1PXXXx/Wv3//Ez/99FO9HTt2eCYnJ+9MT0936dKli7VPnz4nAXbs2FH/l19+SQwNDc3v3r17yJIlSxq1bds2Lz093bV4rIGsrKxKzwFQbNq0ac1ee+21wG7duuW+9dZbBz09PXV6erqrxWI5N7lPYGBg/r59+1xbtmxZUNXjl0dqAETtd+QITJtmjM8/bhx4eMBHHxmd/J5+Wgp/IcRFHT161OWuu+5q+9FHH+3t2rXrGYANGzb4LF++vHFYWFh4x44drceOHXNJSkry2Lx5s3dMTMxRFxcXWrRoUXjDDTecLG5/79Chw6nw8PB8FxcXYmJijm7evNkrLCws78CBA+4jR45sERcX59OoUaOii0dzvpkzZ/65d+/exN9++y352LFjzs8//3xTe7wHZZEaAFF7JScbV/tLlhjT8kZHw5NPwq23ymx84qpVUFTA7qO7ScpMIjkrmYk3TqS+W31Hh1U9KnGlbg/e3t5FQUFB+fHx8V6dO3c+C6C1VjNmzNg/ePDg8yapW7t2bYPyjlPWtML+/v5FiYmJSStXrvR59913/ZctW+a7YsWKtOJtCgsLiYiICAfo27dvzqxZsw6VPEbxFb2np6ceM2ZM9owZMwIAAgMDC9LS0s7NPpaenu5WnVf/IAmAqG20hvh44/79deuMq/1Ro+CJJyAszNHRCVFtThecJjUr9VxBn5yVTFJmEruP7qbQVnhuu/6h/bm26bUOjPTK5+rqqr/88ss9PXv2DPHy8rKNHz/+aO/evY/PnTvXPzo6Otfd3V1v377d3WKxFHTv3j13wYIF/o899lj2kSNHXLZs2eI1e/bsA9u3b/fcsWNH/ZSUFLeQkJD8uLg43wceeCAzPT3dxd3d3TZq1Kic9u3bn42NjT1vFkAXFxdSUlKSyoutuFrfZrPx2WefNbRarWcABgwYkPPOO+80efDBB4/Gx8fX9/b2LpIEQFyd8vNh2TKjR/+vv0KTJka1/8MPg0wHLa5gOWdzSM5MvqCg35ezD23Ohu6snGnr2xarv5W7w+7G6m/F6mclzC/s6rn6dzAfHx/b+vXrd/fo0SPU29u7aOLEiVlpaWnuHTp0sGqtla+vb8G6dev2xMbG5iQkJHhZrdb2Sik9bdq0g8HBwYXbt28nIiLi1Pjx44PT0tI8brrpphOxsbE5W7Zs8Rw7dqzFZrMpgJdeeulgVeK69957Wx09etRFa63Cw8NPL1myZB9ATEzM8bVr1zZo2bJlhKenp23hwoVp1f2e2HU64Ooi0wFfxY4dMybimTMHDh2C8HDjNr7hw42rfyGuAFprMk5lkJz5VwGfnJVMcmYy6SfTz23n7uxOmF8YVn8r4X7h5wr6kMYhuDm7XeQMl0amA64+a9as8Z4xY0ZAfHz8bkfHUlU1Ph2wEBe1Z48xQt/ixXDqFPTqBYsWwe23S/u+qLVs2sb+4/vLLOiPnf3rNm5vN2/C/cO5ve3tWP2shPuHY/WzYmlowdmpyp3EhbALSQBEzUpIMNr3V64EFxcYNgwmToRrpY1T1B4FRQXsObbngoI+JSuF0wWnz23nX88fq7+Ve9vfa1zVmwV9kHfQBR3GxJUtOjo6Nzo6OtfRcVQnSQCE/RUWGgX+zJnw00/QqBFMmQKPPQZBQY6OTtRhZwrOkJqdekFBvyt7FwW2v/pbtfBpgdXfyrhO485V21v9rfjV83Ng9EJcHrslAEqpxUA0cERrHWEu8wWWARYgDYjRWh8r7xjiCpeba1Trv/kmpKVBmzbw1ltGr/760rFJ1JwTeSfK7Ij3x7E/znXEc1JOtGnUBqu/lQGhA87riOft7u3gVyBE9bNnDcD7wFvAkhLLpgDfaq3/pZSaYj5/xo4xCEc4cABmz4b58+HECbjlFuN+/v79wVnaP4V9aK3JPJ1ZZkF/KPevW6/dnN1o17gdkUGRjLhmxHkd8TxcpOOpqDvslgBorb9XSllKLR4I9DB//wDYiCQAV4+ffzba95cvN+7nv+ceo0d/ly6OjkxcRbTWHDhxoMyC/uiZo+e283LzwupnpVfrXud1xGvVqBUuTtL6KURN/xcEaK2L74k5DATU8PlFdbPZYO1ao+DftAm8vY3pdx9/HFq2dHR04gpWaCtk77G9FxT0yZnJnCo4dW67xp6NCfcP5x7rPed1xGvu01w64olLMmXKlKb/+te/DlfHsSZNmhTk5eVV9NJLL2Vc6jGcnZ07h4SEnAEICgrK/+6773YDpKSkuMXExLTOyclx6dChw+lPP/30Dw8Pj0rf2++wNFhrrZVS5QaqlBoHjAMIDg6usbhEJZ0+bQzR+8Yb8Pvv0KIFvP46PPAANCh3JE0hLnC28Cy/Z/9+QUe837N/J7/o3FwoNPNuRrh/OGM7jj2voPevLwNFieo1e/bswKokADabDa01znZq4nR3d7eVNZrgpEmTmj/22GMZ48aNOzZs2LDgN9980++ZZ57JrOxxazoByFBKBWqt05VSgcCR8jbUWs8H5oMxEFBNBSgqcPgwvP02zJ0L2dkQGQmffAKDB4Nrtc5UKa4yuXm5567gSxb0e4/txaZtACgUrRu1Jtw/nDvb3nmuoA/zC8PH3cfBr0BciVJTU9369u0b0qFDh9OJiYn1QkNDz6xYsSItPj6+/uzZs5ts2LBhD8DKlSt93nnnHf+QkJCzeXl5TmFhYeGhoaFnVq9e/cfUqVMDli5d6gcQGxub+cILLxxJTU11u/3220M7dux4cseOHfXXrVu3a/v27R4vvPBCs6KiIuXr61v4448//g6QnJzs2aVLl3aHDh1yGz9+fMZzzz1XbtlXWTabjR9//NF71apVewHGjBmTPXXq1KDanACsBkYC/zJ/rqrh84tLlZhoXO1/9BEUFMCAAcbEPLfcIgP3iPNknc4yCvdSBf3BE3+NkOrq5Epo41Cua3odwyKGneuIF9o4FE9XTwdGL+yuS5d2Fyy7++6jTJmSSW6uE1FRIResv//+LB5/PJv0dBcGDmxz3rotW1IrOmVaWprHvHnz0vr06XNqyJAhlunTp/tPnTo1Y8KECcGHDh1yCQoKKly8eHHj0aNHZw0bNuz4+++/36T4invz5s31Pv7448bbtm1L1lrTuXNna1RUVK6fn1/R/v373RctWvRHVFRU2qFDh1wee+wxy8aNG1PCwsLyMzIyzlUH7N692yMhISE1JyfH2Wq1Rjz11FOZ7u7ulb6wzc/Pd4qIiLA6OzvryZMnH46Njc3JyMhw8fb2LnI1L7wsFkt+RkZGlYaTtOdtgJ9gdPjzU0odBF7EKPiXK6XGAvuAGHudX1QDrWHDBqN9f/168PQ0qvgnTIDQUEdHJxxIa82fuX+WWdBnnf5r1Nf6rvUJ8wujh6XHeUPftvFtIx3xRI1p2rRpfp8+fU4BxMbGZs+ePbuJk5NTRkxMTPaCBQt8H3300eyff/7Z67PPPvuj9L4bN270uvPOO3N8fHxsAP369TsWHx/vPWTIkJzAwMD8qKioU+Z29bt06ZIbFhaWDxAQEHBuWuA+ffrkeHp6ak9Pz0JfX9+CgwcPurRp06bSE/vs2rVre6tWrQqSkpLcevfu3a5Tp05nfH19qzTtcFnseRfAfeWsirLXOUU1ycszqvVnzoQdO6BpU3jlFXjoIWjc2NHRiRpUZCvij5w/LijoU7JSyM3/a1C0Rh6NCPcP5652dxlt82ZB36JBC5yUkwNfgah1LnbF7u1tu+j6wMDCylzxl1bWNL4ADz/8cHa/fv3aenh46P79+x9zrWIzZr169WyV2a7k1b6zszOFhYXnBbRkyZKGr776ahDA/Pnz07p373665PpWrVoVAISHh+ffeOONuVu2bKk3cuTIY7m5uc4FBQW4urqSlpbmFhAQOD7WUgAAE0tJREFUkE8VSAou/pKdDe++awzWc/gwdOgA770H990H7u6Ojk7YUV5hHruO7jqvx31SZhK/Z/9OXlHeue0CvQIJ9w9n5LUjz+uI16R+E+lxL2qt9PR0tw0bNtTv1avXqaVLl/redNNNJwEsFktBQEBAwYwZMwK/+uqr34u3d3Fx0Xl5ecrd3V337Nnz5JgxYyz//Oc/D2utWbduXaP3339/b+lz9OjR49SkSZNapqSkuBU3AZSsBbiYESNG5IwYMSKnrHWZmZnOXl5eNk9PT52enu6ydetWr2efffawk5MTN954Y+57773XaNy4cccWL17cODo6usxjlEcSAAG7dsGsWUZhf+aMMSHPkiXGBD3ypX5VOZl/kpSslAturdtzdA9F2viuUigsDS3GZDZtbj+vI15Dj4YOfgVCVJ3FYjk7Z86cJuPGjasXEhJydvLkyec6yg0dOjT77bffdunUqdPZ4mXDhw/PtFqt4REREadXr179x7Bhw7I7depkBaMT4M0333wmNTX1vPb2oKCgwtmzZ6cNGjSorc1mo3HjxgUJCQm7Ljf2X3/91ePRRx9tqZRCa80TTzxxuHPnzmcBZsyYcfDee+9t8/LLLzdr37796QkTJlRp1kWZDriu0hp++MFo31+92ujBf//9xsQ8ERGOjk5cpuzT2ed63Jcs6Pcf339uGxcnF0J8Q85dxRcX9KGNQ6nnWs+B0YvqItMBG3cBREdHh+zatWtnWetHjBgR3LFjx9MTJ0684qcsLo9MBywMhYUQF2cU/Fu3Gm36zz0HjzxitPWLK4bWmkO5h8os6I+c+usuI08XT8L8wrgl+JbzOuK19W2Lq7Pcuinqrvbt21s9PT1t8+bNO+DoWBxBEoC64vhxWLjQGKN//36jF//cuTBiBNSTq73arMhWxL7j+8rscX8i78S57Rp6NMTqZyU6JPq8jngtG7aUjniizmrXrl1+eVf/O3fuTK7peGoTSQCudvv2GYX+ggXG7Hy33mp08uvXD5ykUKhN8ovy2X109wUFfWp2KmcLzzVP0tSrKVY/K/d3uP+8gr6pV1PpiCdqI5vNZlNOTk61v735KmSz2RRQ5t0KkgBcrbZsMW7ji4sznt97rzExT+fOjo1L8P/bu/fguKr7gOPfn1bSaqWVbWkljLHBlm2wLTzgB8MrjwkGA8YMlAmdmjQToHiYkiENgaZAQ1vSaYG+MiXTTDLEEEKaAgnJEJMhA26AMjhAwMYOxtigAAYbC8uSLVvSSqtd/frHOZKu5JWlXaRdaff3mbmz95779lnrd+65Z8/pTHSyu3X3MYG+qa1poCEewNzpc2msb+TChguHBPqaSE0er96YjO1oaWlprK+vb7dCQG719fVJS0vLdGBHuvVWACgkqRQ89ZR7v//SS65P/ltvha99zfXVb3LqUPzQYHV9INDvad8zsE1IQiysXUhjfSNfXPLFgYZ4i2KLqCqvyuPVGzM+ksnk+ubm5g3Nzc1LAat2zK0+YEcymVyfbqX9CqAQdHbCww+7n/I1NblR+G65BW64wY3OZyaMqtLc0Zy2IV5zx+BYIhWlFSyKLTqmxf3C2oWUhzLqvdNMcqpurKyODjcdPTo4n255tG02b4b587O7lsnwKwAzeVkNwFSlCu+9Bw8+6DrvOXQIzjkH7rkHrroKSi1rx0N3spvWrlZa460c7DpIa1crH7Z/OCTQH+4e7HtjWngaS+qWsGbhmiGBfu70uYRKJmakMJM9Vdf1RTaBeaR9OjrcccciFIJo1JXTo9HBac6cwTTrg8tMFIsSn0JnRw+bNz7HxV9aM7En6g/2W7YMTlu3uqAv4gL+bbfB+edP7HVMcV29XQNB/GDXwSFBfchyvHUgLTjufNAJVSewpG4J605fN/B+vrG+kVnRWdYQb4KoQnf3+AXq/rSxBuuSkmMDdTQKs2cPXR6+Tbp9gsHdvi4mX6wAkKWnf/g4y77xJZap0rG2g+j0cfopXV+fq8bfunVosG9vd+vLyuCMM+Dqq12DvtWrs68fnKJUlY5Ex8gBvKuVg/Fj04Mt6YebUTGDWCRGXWUdJ0ZPZOkJSweWBz4rY8QiMU6qPolYpY2JcDz9wTrTQD3aNn1j6nndBet0QXfWrOwCdTQKFRUWrE1hsQLAGG15YStb7v17SupPZP1/b+DcK1bzu/+YQ8eadVxVnmW7llTKdcMbDPRvvAFH/G+7w2EX7K+5BlascAF/6VIoL5x3xqrKkZ4joz6RD09PpNKPeSEINZGagcB9yvRTWD5rOXURF8CHB/S6yjpqI7VFPTKdqhv/aTwDdUeH+3qPhUj6oDtzJixYkHmgjkbdwJUWrI05vuL9qzcG+z9s4am7/oH5//czLviolZUKv1rSAEDtzFou37VnlCMEpFKwa9fQJ/tt29xfSnCPF2ee6brjXbnSBfzTT3dP/FNEn/ZxuPvwQLAeS0BvjbeS7EumPV5IQtRGageC9YLaBZw9++y0T+X98zUVNQX9rl0VEonxDdQdHa6DyLFKF3Dr611FVKaBurragrUx+WIFgGFSyT5Cpe6J/o1VZ3LjH/fzwbQQj37us5x6211cecUlox8kmYS33x76ZL9tm2saDO4v3rJlcN11LtivXAlLlkyqhnupvhRt8baMqtnb4m30afo62tKS0iGBe3Hd4uM+lcciMaZXTJ/yPdj1B+vxfG+dabAePtXXQ0ND5oG6/8na+o8ypjBMnoiTZ5t+spF937+XC3a8RtuTm1m+6hz06//Exngna2+9iS+PFJwTCdi5c+iT/fbt7gUoQFUVLF8O69cPBvvFi13z3xzpTfXSFm/LqJr9cPdhlPSto8Kh8JBgfcbMM0YM4v3z1eXVeWkcpwq9vS6bxjqNtn1/q/GxPHX39o79Wquqjg26sZj7VWemgToadT08W7A2xowkLwUAEbkUuB8IARtU9b5cnr//Kf/N321j+19dT+NHu1l9IE5S4IV59ZQe+AT6+lj7Z5fDxx/Ds8/Cvn1ufvjngcFBV6iudsH+ppsGq/FPOy3rYK+q9KR6iPfGiSfjdCe7B+bjvW65vad91IDe3tM+4jkqyyqHBO55M+albfwWXK4IVZJMyvGDaBv0JOCDBLyTQfAdaxAe676ZBOBMVFYeG3Rralx/S5kG6mjUBX8L1saYXMp5R0AiEgLeAVYDe4HXgGtUdedI+2TbEVCiO8GmBx/j4EvPE3l3G7Ob97DgUDtblqxg7bfvZt8bb1F39+0cqCznaN3JnNwwn2i8wwX3/fuRNNEjWR8jeeIJJGbW0XNiPT31NRydN4uDi+dycHYtXake4oluOhNxuhJxunrdZ7y3m65hwbs72R/Y4/SkuulOxelOxelJueVEX/eIT+HpVEg1lRKjkjoiGiOidYRTMcpTdZQlY5Ql6ihNxCjpqaOkO4bEY6R6IhkH4EyqoDMh4to9lpePfSory2z78do3hxU4xmTNOgIyx5OPAsB5wN2qeolfvhNAVe8daZ9sCwDPnjaP1e/uYSwVz+1h+Lga9lX7z2lDlz+uhuYo9GZTZ9JXAskI9EYCnxXZpSUrhq7vmQZddRCvhdSxPYaEQhMfDMcrEFtQNWZ8WQHAHE8+XgHMBoJjL+8Fzhm+kYjcCNwIcMopp2R1op2NKzlKFy1VUZqn1dJREaEzHKazPExXeZjO8go3hSMkyiopoYyQlFFCKSHKCFFGiZQSpoyFUsqibrc+JKXuEzdfVhImXBKh3E/hkgrCoQgVfiovLSMUYshUUsIxaemm0bYLBt7hQbiszIKqMcaY9CZtI0BVfQB4AFwNQDbHuOXJX4zrNRljjDGFIh/NjvYBwaHp5vg0Y4wxxuRIPgoArwGnikiDiJQD64CNebgOY4wxpmjl/BWAqiZF5GbgGdzPAB9S1bdyfR3GGGNMMctLGwBVfRp4Oh/nNsYYY0x+XgEYY4wxJs+sAGCMMcYUISsAGGOMMUXICgDGGGNMEcp5V8DZEJEWYM8om9UBB3NwOZON3XdxsfsuLp/2vueqav14XYwpLFOiADAWIvJ6MfZ5bfddXOy+i0ux3rfJDXsFYIwxxhQhKwAYY4wxRaiQCgAP5PsC8sTuu7jYfReXYr1vkwMF0wbAGGOMMWNXSDUAxhhjjBkjKwAYY4wxRWjKFwBE5FIR2S0iTSJyR76vJxsicrKIPC8iO0XkLRH5uk+vFZFNIvKu/6zx6SIi3/X3/AcRWRE41rV++3dF5NpA+koRedPv810RkdzfaXoiEhKRN0Tk1365QURe9df6uB82GhEJ++Umv35e4Bh3+vTdInJJIH1Sfj9EZIaIPCEiu0TkbRE5rxjyW0S+4b/jO0TkURGpKNT8FpGHROSAiOwIpE14Ho90DmOOoapTdsINJ/xHYD5QDmwHGvN9XVncxyxghZ+vBt4BGoF/Be7w6XcA/+LnLwN+AwhwLvCqT68F3vOfNX6+xq/7vd9W/L5r8n3fgfu/Ffgf4Nd++WfAOj//A+AmP/9V4Ad+fh3wuJ9v9HkfBhr8dyI0mb8fwI+B9X6+HJhR6PkNzAbeByKBfL6uUPMb+DywAtgRSJvwPB7pHDbZNHya6jUAZwNNqvqeqiaAx4Ar83xNGVPV/aq61c8fBd7G/bG8Ehco8J9/4uevBB5R5xVghojMAi4BNqlqm6oeAjYBl/p101T1FVVV4JHAsfJKROYAa4ENflmAVcATfpPh993/7/EEcKHf/krgMVXtUdX3gSbcd2NSfj9EZDouODwIoKoJVT1MEeQ3bgjyiIiUApXAfgo0v1X1RaBtWHIu8nikcxgzxFQvAMwGPgos7/VpU5av5lwOvArMVNX9flUzMNPPj3Tfx0vfmyZ9MvhP4G+APr8cAw6ratIvB6914P78+na/fab/HvnWALQAP/KvPjaISBUFnt+qug/4d+BDXOBvB7ZQ+PkdlIs8Hukcxgwx1QsABUVEosAvgFtU9UhwnS/lF9RvNkXkcuCAqm7J97XkWCmuavj7qroc6MRV1Q4o0PyuwT2dNgAnAVXApXm9qDzKRR4X4vfIjJ+pXgDYB5wcWJ7j06YcESnDBf+fquovffInvqoP/3nAp49038dLn5MmPd8+A1whIh/gqmtXAffjqj9L/TbBax24P79+OtBK5v8e+bYX2Kuqr/rlJ3AFgkLP74uA91W1RVV7gV/ivgOFnt9Bucjjkc5hzBBTvQDwGnCqb0VcjmsotDHP15Qx/17zQeBtVf1OYNVGoL/V77XArwLpX/Eth88F2n2V3zPAxSJS45+2Lgae8euOiMi5/lxfCRwrb1T1TlWdo6rzcHn3nKr+OfA8cLXfbPh99/97XO23V5++zrcabwBOxTWQmpTfD1VtBj4SkUU+6UJgJwWe37iq/3NFpNJfV/99F3R+D5OLPB7pHMYMle9WiJ92wrWefQfX+vdb+b6eLO/hs7hquj8A2/x0Ge5952+Bd4H/BWr99gJ8z9/zm8BZgWP9Ba5RVBNwfSD9LGCH3+e/8L1ATpYJ+AKDvwKYj/uD3gT8HAj79Aq/3OTXzw/s/y1/b7sJtHifrN8PYBnwus/zJ3EtvAs+v4FvA7v8tf0E15K/IPMbeBTX1qEXV+tzQy7yeKRz2GTT8Mm6AjbGGGOK0FR/BWCMMcaYLFgBwBhjjClCVgAwxhhjipAVAIwxxpgiZAUAY4wxpghZAcAUNBFJicg2EdkuIltF5PxRtp8hIl8dw3FfEJGzsrymp0VkRjb7GmPMeLECgCl0cVVdpqpnAncC946y/QzcKHQTRlUvUzf4jzHG5I0VAEwxmQYcAjfugoj81tcKvCki/aPG3Qcs8LUG/+a3vd1vs11E7gsc709F5Pci8o6IfG74yURkloi86I+1o38bEflAROpE5C/9um0i8r6IPO/XXywiL/tr+7kfI8IYY8aVdQRkCpqIpHA9q1UAs4BVqrqlfzhaVT0iInXAK7guZefieiRc6vdfA/wdcJGqdolIraq2icgLwBZVvU1ELgNuVdWLhp37NqBCVf9ZREL+fEf92AdnqepBv10Z8BxuHPeXcX3kr1HVThG5Hdcz3j9O5L+TMab4lI6+iTFTWlxVlwGIyHnAIyKyFNf16j0i8nncUMSzST9s6kXAj1S1C0BVg+O79w/atAWYl2bf14CHfIB/UlW3jXCN9+P6uX/Kj5DYCGx2XbxTjisUGGPMuLICgCkaqvqyf9qvx/UZXw+sVNVe/1RekeEhe/xnijT/l1T1RV/AWAs8LCLfUdVHgtuIyHW4Woeb+5OATap6TYbXYowxGbE2AKZoiMhiIIQbUnY6cMAH/wtwQRjgKFAd2G0TcL2IVPpj1GZwvrnAJ6r6Q2ADbsjf4PqVwF8DX1bVPp/8CvAZEVnot6kSkdMyu1NjjBmd1QCYQhcRkf6qdwGuVdWUiPwUeEpE3sSNyrcLQFVbRWSziOwAfqOq3xSRZcDrIpIAngb+dozn/gLwTRHpBTpwQ7YG3QzUAs/76v7XVXW9rxV4VETCfru7cCPcGWPMuLFGgMYYY0wRslcAxhhjTBGyAoAxxhhThKwAYIwxxhQhKwAYY4wxRcgKAMYYY0wRsgKAMcYYU4SsAGCMMcYUof8HonQ5JHPyQO0AAAAASUVORK5CYII=\n", + "text/plain": [ + "
" + ] + }, + "metadata": { + "needs_background": "light" + }, + "output_type": "display_data" + } + ], + "source": [ + "n_features = [2, 10, 50]\n", + "max_batch_size = 100000\n", + "\n", + "plot_absolute_time(experiments_eq, results, n_features, max_batch_size=max_batch_size)" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAiIAAAEWCAYAAABbt/wMAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4xLjIsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy8li6FKAAAgAElEQVR4nOzdeZzN9f7A8dd7DAYzY19mbMMwZsbOpJSKMCmSiJ+oqC6i1KXNLVeSitttIxVFJaRyFZUuCZfSZsluZBnbWMc+mPXz++PznXGMWc5ojmNm3s/H4zxmvt/zXd7ne77ne97n8/0sYoxBKaWUUsobfLwdgFJKKaWKLk1ElFJKKeU1mogopZRSyms0EVFKKaWU12giopRSSimv0UREKaWUUl5TYBMRETEiUi+H598TkX+6TA8WkUMickZEKl7G/kKcffpebsz5xXkNdb0dx5UiIh+JyNg8LJ/juXG1EZHvRKSft+Nwh4jUcs6/YpexbikR+VpETorIF56IT10eEekrIou8HYcqmnJNREQkVkSSRKRSpvlrnQt+iDP9kTN9Z6bl3nDm93em+4tIqnMxOyMiu0TkQxEJy7dXBRhjHjbGvOjsszjwOhBtjPE3xsR78svKeY0/5tO2lonI31znOa9hZ35sPz9cDUmaiLQWkZXe2r+7RGS0iMxwnWeMuc0Y87G3YsqJ8/nvkD5tjNnjnH+pl7G5u4GqQEVjTM98iK2EiMxxYjQi0jbT8yIi40Uk3nmMFxFxeb6ZiKwWkbPO32Z/NSZvyOv1JqvPqzFmpjEm2jMRKpUzd0tEdgH3pE+ISGOgdBbLbQPud1nOF+gF7Mi03M/GGH+gLNABOAesFpFG7oeevSx+rVUF/IBN+bF9lb/yKYHpDCzIh+0UGV5IHGsD24wxKXldMYdYfwTuBQ5m8dxAoBvQFGgC3AEMcrZXApgHzADKAx8D85z5HuMkRwW2JFopjzDG5PgAYoGRwO8u8/4NPAcYIMSZ95Ez/xBQ3pnXBfgOe7Ho78zrD/yYxX6+AebkEMdTwAEgDnjQ2Xc9l32/i/0iSsAmNx8BY4EwZ54BzgBLgOXOdIIz7/+y2F8x5/UcBXYCjzjr+DrPlwWmOjHtd/ZVDIgAzgOpzrZPOMuXdLa3xzlG7wGlXPZ3J/AHcAqbuHUCXnK2c97Z1tvOsq6vvSwwHTgC7HbeKx/XY+3s9zg2obwtl/f6H8BmZ/kPAT/nuY3AHS7LFneOTXPnNaUf3zNAa2ySO9KJ6bATY1ln3RBn+YecdZc789sAK4ETwF4unDMfAZOAb4HTwK9AaKbY1wAtsjg+bZxttXWmw4HvgWNADNDLZRu5HcufgLeBk8BWoL3Luv2x58lp5zj3zeL4dgKSgGTnOK1z5i8D/pZpP284x2EncL0zf69zLPu5bDPH8yrT/l23HY89Z0Oxn4l45/2cCZRzlv8ESMP+UDgDPO3y3qV/DoKB+c7x3A4MyGbfL2R67Q9xGedIDufuvvT32GXeSmCgy/RDwC/O/9HYz624PL8H6JTN9pcBrwC/YT+j84AKLs9fx4Vzd51rLM66LznH/hxQz5k31lnnDPA1UNE5/qeA37lwbb3omLueM2R/vekMrHW2tRcYnel1Zv689sfluow9537Hnuu/A9dn2veLzus5DSwCKjnP+WGTu3jnWPwOVM3te0YfRfuR+wL2y6kD9qIdgf2y3Yf9dZM5ERkLTAEGO/M+x5akuJOIPAgcyiaGTtiLbCOgDDCLSxORk8AN2IubX3o8zvNZfZAz1s9mnw9jv2xqAhWApVx8Af4SmOzEUwV7gRqU3WvEXvznO9sKwF54XnGea+XE39GJvzoQ7jy3DOdLKqvYsRfvec42Q7ClUg+5xJEMDHDet8HYRE6yec2x2IQj/TX/5HIMnwY+c1n2TmBDDsf3QewXU13AH5gLfJJp+enO8SuFPZ9OO+dLcexFuZnL+xvvHCdf7MV6tsu+gnD5Ukk/PtjzZi/Qyplfxpl+wNlOc+yXb6SbxzIFGObE93/Oe1bB2e4poIFLPA2zOcajgRmZ5mW8xy77ecB5z8ZivzgmYZOOaOc4+ed2XmWx7/RtD3VefynnOHV0tl0Zm6S/mfnz7zJ90XvtLP8O9jPXDJvE3eLOayeP50gu16msEpGTwLUu01HAaef/YcB3mZb/Bngim+0vw55j6deg/6S/FuznNR64Hfv57ehMV3ZZdw/Q0DnuxZ1527GJYFls8r8Ne631dV73hzl8vjKfM5mvN22Bxk48TbDXz245bC9jG9hz6ThwnxPLPc50RZd978D+yCvlTI9znhuEPQdLY8/flkBgbt8z+ijaj7wUEX6Cve3SEdiC/VBmZTpwv4iUA24GvnJz+3HYD0BWemE/lBuNMQnYC1pm84wxPxlj0owx593cZ056YS/Ie40xx7C/hgAQkarYi87fjTEJxpjD2C+E3lltyLkvPRAYZow5Zow5DbzssvxDwDRjzPdO/PuNMVtzC9C5BdUb+Icx5rQxJhZ4DXsBSbfbGPO+sff0P8Z+SVbNYbNvu7zml7hwS24GcLuIBDrT92HPiez0BV43xuw0xpzBlrT0zlTEPto5fueAPsBiY8ynxphkY0y8MeYPl2W/NMb8Zmyx/kzsl16624H/GmOMy7ye2ETxNmPMb868LkCsMeZDY0yKMWYt9gulp5vH8jD2nEg2xnyGTc47O8+lAY1EpJQx5oAx5q/cBtzlxJgKfIZNDMcYYxKNMYuwJQv13DivshJnjJnovP5zxpjtznmXaIw5gq1LdbM7QYpITWzy/4wx5rzzfn2Ay+3ZXOT1HMkrf2wyku4k4O8ct8zPpT8fkMP2PnG5Bv0T6OWcN/cCC4wxC5zP7/fAKux5me4jY8wm57gnO/M+NMbsMMacxJYc7zDGLHbO8S+wifJlMcYsM8ZscOJZD3yKm+8r9pz+0xjziRPvp9gfZXe4LPOhMWab8758zoXPYzL2R0Q9Y0yqMWa1MebU5b4OVTTk5R7xJ9hfP3WwyUaWjDE/ikhl7K2bb4wx51zqh+WkOrZ4NyvBwGqX6d1ZLLPXnZ3kQXCmbbruszb2V80Bl9fmk0MMlbG/EFa71pXD/mIA+0VzOfUbKjlxuMa2G3ss02XcOzfGnHX275/DNjO/5mBn3TgR+QnoISJfArcBj+ewneAs4vLl4iTIdV81ubQukSvXOgBnufg13I4tJXP1d2C6MWajy7zawLUicsJlni/23HbnWO7PlOzsBoKNMQki8n/Ak8BU5zg94U4ymY1DLv+fAzDGZJ7nT+7nVVYuOkedpPot4Ebsl7AP9tevO4KB9AQo3W5syYO76+flHMmrM0Cgy3QgcMYYY0Qk83Ppz58me5k/G8Wx501tbDLr+kVdHFuKmtW66TK/p1m9x5dFRK4FxmFLcEpgS7zcbamU+X2BHK4rXPx5/AT7WZ7t/BidATznknwpdQm3S0SMMbux975vxxah5mQG8AQ5JCxZuAtYkc1zB7And7paWYWYh325I6d97gUSsfdFyzmPQGNMw2xiOYq9sDR0Wb6ssRV207cXmk0cOb2uo9hfILUzxZldaZU7Mr/mOJfpj7G//npiKxyn7yerGOOyiCuFiy+2ruvldAyy5bSIuhlb78NVT6CbiLgmS3uB/7m8B+WMbQEyGPeOZXW5OKvOOD7GmIXGmI7YEqetwPvZhJyf52lu55U7+3/ZmdfYGBOIfX8lh+VdxQEVRMS1FCEv519ez5G82oStqJquKRcqrG8CmmR6P5uQc4X2zJ+NZOx7sBdbWuJ6XpUxxoxzWf6vvI4E569rA4FquWx7FvaWXU1jTFls3SHJYXlXmd8XcPN9dUoLXzDGRGLrmXTB/RIyVUTltfb2Q9j7vwm5LDcBewtneU4LiUgxEakjIhOx9zRfyGbRz4H+IhIpIqWB5/MWdpYOYe9NZ+dz4DERqSEi5YER6U8YYw5gK2i9JiKBIuIjIqEikl70eQiokV4D3xiThv1iekNEqgCISHURudVZfirwgIi0d7ZVXUTCc4vTKbr/HHhJRAJEpDYwHJsIXq5HnNdcAVuq9ZnLc18BLbAlIa5J5hHsrQnXOD8Fhjnvrz/2C+8zk32LiZlABxHpJSK+IlLRzeaUbYD1WRT/xgHtgcdFZLAz7xsgTETuE5HizuMaEYlw81hWwZ4TxUWkJ7bO1AIRqSoid4pIGWyCesY5Hlk5BITkR8sJN84rdwRg4z0pItWxlcIzx5vd+bcXW9nyFRHxE5Em2GuEu+dfXs+RS4hISRHxcyZLOHGkf+FOB4Y7xyQY++PoI+e5ZdgKno8523jUmb8kh93d63INGoOtXJ+Kfb13iMitzjXNT0TaikgNd19HTpxbZvud/RcTkQe5OGm/6HrjCMCWVp0XkVbYW5/psvq8ulqA/Zz0cT6L/wdEYj8/ORKRdiLS2LlldQqbrGX3WVAKyGMi4tzPXOXGcseMMT9kKsZ21dopGj2FvSAEAtcYYzZks73vgDexF4nt5HyxcNdo4GMROSEivbJ4/n1gIbYG/BouLQW6H1vkmd7CZA721zBOfJuAgyJy1Jn3jBP7LyJyClgMNHBe32/YyolvYO9T/48Lv0jeAu4WkeMiMiGLOIdifzHtxFYKngVMc+8QZGkWNsnaib1VktGRmHM/+D/Y23NzXeafxWkV4BzP65wY0m/n7cLW7B+a3U6NMXuwpW1PYG/R/cHFv2azk22zXWeb7YERIvI35xZCNLYORRy2eHk8ttgacj+WvwL1sb+CXwLuNsbEYz9Hw51tHsOW0Awma+nF4/EissaN15ebbM8rN72ATS5PYlslZT7PXwFGOu/rk1msfw+28mMctgL388aYxW7uO0/nSDZisKVC1bGf13Nc+OxMxlac3ICthP2tMw9jTBK2ae/92NYdD2IrcyblsK9PsInMQWzl3Mecbe3FVt5+Fvslvxeb0OVnM90BzjbjsZVeXfvMyep6MwQYIyKngVHYJBsn3qw+r7g8H48tyXjC2d/TQBdjzFFyVw17LTyFrUv4P3KuS6ZURisDpRCRWGxN/Gy/SERkFBBmjLn3igWWAxHZjE0INnt4P/2xx6aNJ/ejrk4isgzbSuYDb8eiVGHj9e7KVcHh3K55iItbkniNUxQ93dNJiFJKKc/RHv6UW0RkALbI+TtjTI51f64UY0xSpgqBSimlChi9NaOUUkopr9ESEaWUUkp5TYGoI1KpUiUTEhLi7TCUUqpAWb169VFjTGVvx6FUTgpEIhISEsKqVbm2GlZKKeVCRLLqhVqpq4remlFKKaWU12giopRSSimv0UREKaWUUl6jiYhSSimlvEYTEaWUUkp5jSYiSimllPIaTUSUUkop5TWaiCil1FUoORk+/BA++8zbkSjlWZqIKKXUVSQ5GaZNg/BwePBBmDXL2xEp5VmaiCil1FUgORk++AAaNICHHoLy5WH+fPjqK29HppRnaSKilFJelJQE778P9evDgAFQsSJ88w38/jvccQeIeDtCpTxLExGllPKCpCSYMsUmIAMHQtWq8O238Ntv0LmzJiCq6NBERCmlrqDERHjvPZuADBoEQUHw3Xfwyy9w++2agKiiRxMRpZS6AhIT4d13bQIyeDAEB8N//ws//wydOmkCooouX28HoJRShdn58zB1KowbB/v2wfXX20qpHTtq8qEUaCKilFIecf68TTjGjYP9++GGG2y/IO3bawKilCtNRJRSKh+dO2dbwYwfD3FxcOON8PHHcMstmoAolRVNRJRSKh+cO2dbwYwfDwcOwE03wYwZ0LatJiBK5UQTEaWU+gvOnoXJk+Ff/4KDB23iMWuW/auUyp0mIkopdRnOnrXNcP/1Lzh0CNq1g9mz4eabvR2ZUgWLJiJKKZUHCQm2Ge6rr8Lhw7by6Rdf2LogSqm800REKaXckJAA77xjE5AjR6BDB3j+eWjTxtuRKVWwaSKilFI5OHMGJk2Cf/8bjh6F6GibgFx/vbcjU6pw0EREKaWycPr0hQQkPh5uvdUmIK1bezsypQoXTUSUUsrFqVPw9tvw2mtw7BjcdhuMGgXXXeftyJQqnDQRUUopbAIycSK8/rpNQG6/3ZaAtGrl7ciUKtw0EVFKFWknT15IQI4fhy5dbAnINdd4OzKligZNRJRSRdLJk/DWW/DGG3DiBNxxh01AoqK8HZlSRYsmIkqpIuXECZuAvPmm/f/OO20C0qKFtyNTqmjSREQpVSQcP26Tj7fesqUh3brZBKR5c29HplTR5uPpHYhIMRFZKyLfONN1RORXEdkuIp+JSAlPx6CUKrqOHbMJR0gIjBljR8Fduxa+/FKTEKWuBh5PRIDHgS0u0+OBN4wx9YDjwENXIAalVBFz7BiMHGkTkBdfhI4d4Y8/YO5caNbM29EppdJ5NBERkRpAZ+ADZ1qAW4A5ziIfA908GYNSqmiJj4fnnrMJyEsv2Y7I1q2DOXOgaVNvR6eUyszTdUTeBJ4GApzpisAJY0yKM70PqJ7ViiIyEBgIUKtWLQ+HqZQq6I4etZ2Qvf22HRemZ0/45z+hUSNvR6aUyonHSkREpAtw2Biz+nLWN8ZMMcZEGWOiKleunM/RKaUKiyNHYMQIWwIyfjx07gwbNsBnn2kSolRB4MkSkRuAriJyO+AHBAJvAeVExNcpFakB7PdgDEqpQurwYTsOzDvvwNmz0Lu3rRMSGentyJRSeeGxEhFjzD+MMTWMMSFAb2CJMaYvsBS421msHzDPUzEopQqfw4fhqaegTh17K+bOO2HTJpg1S5MQpQqiK9FqJrNngOEish1bZ2SqF2JQShUwBw/CE0/YWzCvvw7du8PmzTBzJkREeDs6pdTluiIdmhljlgHLnP93AjqMlFLKLQcPwr/+Be+9B4mJ0LevvQUTFubtyJRS+UF7VlVKXZUOHLiQgCQnw7332ma59et7OzKlVH7SREQpdVWJi7OtX6ZMsQnIfffZBKRePW9HppTyBE1ElFJXhf37LyQgKSnQrx88+yyEhno7MqWUJ2kiopTyqn37YNw4+OADSE29kIDUrevtyJRSV4ImIkopr9i790ICkpYGDzwA//iHbZarlCo6NBFRSl1Re/bYBGTqVDDmQgISEuLtyJRS3qCJiFLqiti9G155BaZNs9MPPWS7Zq9d27txKaW8SxMRpZRHxcbaBOTDD+303/5mExAdy1IpBZqIKKU8ZNcuePll+Ogj8PGBAQNsAlKzprcjU0pdTTQRUUrlq507bQLy8cc2ARk0yCYgNWp4OzKl1NVIExGlVL7YsQNeegmmTwdfXxg8GJ55BqpX93ZkSqmrmSYiSqm/ZPt2m4B88gkULw6PPGITkOBgb0emlCoINBFRSl2WP/+EsWPt6LfFi8PQofD00xAU5O3IlFIFiSYiSqk82bbtQgJSsiQ89phNQKpV83ZkSqmCSBMRpZRbYmLgxRfh009tAjJsGDz1FFSt6u3IlFIFmSYiSqkcbd1qE5DZs8HPD4YPhyef1AREKZU/NBFRSmVpy5YLCUjp0jb5eOIJqFLF25EppQoTTUSUUhfZtMkmIJ9/bhOQp5+2CUjlyt6OTClVGOU5ERERH8DfGHPKA/Eopbxk40abgHzxBZQpYzshGz4cKlXydmRKqcLMx52FRGSWiASKSBlgI7BZRJ7ybGhKqSthwwbo2RMaN4bvvrMj4cbG2t5RNQlRSnmaW4kIEOmUgHQDvgPqAPd5LCqllMetXw933w1NmsDChTBypE1AXnoJKlb0dnRKqaLC3VszxUWkODYRedsYkywixoNxKaU85I8/YMwY+PJLCAyEf/4T/v53qFDB25EppYoidxORyUAssA5YLiK1Aa0jolQBsnatTUC++grKloXnn4fHH4fy5b0dmVKqKHMrETHGTAAmuMzaLSLtPBOSUio/rVkDL7wA8+dDuXIwerRNQMqV83ZkSinlfmXViiIyQUTWiMhqEXkLKOvh2JRSf8Hq1dC1K7RsCcuX29KQ2FhbEqJJiFLqauFuZdXZwBGgB3C38/9nngpKKXX5fv8dunSBqCj48UfbJDc21tYFKas/H5RSVxl364gEGWNedJkeKyL/54mAlFKX57ff7C2YBQtsxdOxY+2IuIGB3o5MKaWy526JyCIR6S0iPs6jF7DQk4Eppdzzyy9w221w7bXw66+2/4/YWHjuOU1ClFJXP3dLRAYAfwdmONM+QIKIDAKMMUYvd0pdYT//bEtAFi60/X688go88ggEBHg7MqWUcp+7rWb00qbUVWLlSpuALFpkez4dPx6GDAF/f29HppRSeef2WDMi0hW4yZlcZoz5xjMhKaWy8uOPNgFZvNgOQPevf8HgwZqAKKUKNrcSEREZB1wDzHRmPS4iNxhj/uGxyJRSAKxYYROQH36AKlXg3/+Ghx+2A9MppVRB526JyO1AM2NMGoCIfAysBTQRUcpD/vc/m4AsXQpVq8Jrr9kEpHRpb0emlFL5x91WMwCuXSBpbwRKeciyZdCuHbRtC1u2wBtvwM6dMHy4JiFKqcLH3RKRV4C1IrIUEGxdkREei0qpIsYYm4CMHm17QQ0KgjffhIEDoVQpb0enlFKe426rmU9FZBm2ngjAM8aYgx6LSqkiwhhYssTeglmxAoKDYcIE+NvfNAFRShUN7o41I0B7bD2R+UAJEWnlxnp+IvKbiKwTkU0i8oIzv46I/Coi20XkMxEp8ZdehVIFjDG29cuNN0KHDvbWy8SJsGOH7Q1VkxClVFHhbh2Rd4DWwD3O9GlgkhvrJQK3GGOaAs2ATiJyHTAeeMMYUw84DjyUp6iVKqCMsf1/tGkDHTvC7t0waRJs3w6PPgp+ft6OUCmlrix3E5FrjTGPAOcBjDHHgVxLMYx1xpks7jwMcAswx5n/MdAtL0ErVdAYY3tAvf56uPVW2LsX3nnHJiBDhmgCopQqutxNRJJFpBg2iUBEKgNp7qwoIsVE5A/gMPA9sAM4YYxJcRbZB1TPU9RKFRDGwHffQevW0KkTxMXBe+/Bn3/azshKlvR2hEop5V3uJiITgC+BKiLyEvAjtiVNrowxqcaYZkANoBUQ7s56IjJQRFaJyKojR464GaZSVwdj7Ci4110Ht98OBw/C5Mk2ARk0SBMQpZRK526rmZkishpbYVWAbsaYLXnZkTHmhNP8tzVQTkR8nVKRGsD+LJafAkwBiIqKMnnZl1LeYgx8+61tBbNqFYSEwJQp0K8flNAq2UopdQl3W808ZIzZaoyZZIx52xizxen2Pbf1KotIOef/UkBHYAuwFLjbWawfMO/ywlfq6mAMfP01XHMN3HEHHD0KH3wA27bBgAGahCilVHbcvTXTQ0T6pk+IyCSgshvrBQFLRWQ98DvwvTNY3jPAcBHZDlQEpuYtbKWuDsbA/PkQFQVdu8Lx4zB1qk1AHnoIihf3doRKKXV1c7dn1R7AfBFJAzphK5vm2uTWGLMeaJ7F/J3Y+iJKFUjGwLx5MGYMrF0LoaHw4YfQt68mH0oplRc5loiISAURqQCUAv4GPI3tQ+QFZ75SRUpaGnz5JTRvDnfdBadPw0cfwdat0L+/JiFKKZVXuZWIrMY22RWXv52dhwHqejQ6pa4S6QnImDGwfj3Urw/Tp8M994Cvu+WKSimlLpHjJdQYU+dKBaLU1SgtDebOtQnIhg0QFgaffAK9e2sCopRS+cHdyqpKFSlpafD559C0KfTsCUlJMHMmbN4M996rSYhSSuUXTUSUcpGaCp99Bo0bw//9n52eNQs2bYI+faBYMW9HqJRShUtulVW16p0qElJT4dNPbQLSu7edN3u2vR1zzz2agCillKfkViLys4h8JSIPi0jIFYhHqSsqvcSjUSNb4uHjY0tENmywJSKagCillGflmIgYY6KAvzuTb4rI7yLyhohEi4iOlqEKrJQUmDEDGja0fX/4+sIXX9gWMb162YREKaWU5+V6uTXGxBpj3jPGdAOuB74GOgArRORbTweoVH5KSbGtXiIj4b777OBzc+bAunVw992agCil1JWWp7r/xphkYInzQESqeyIopfJbSopt9TJ2LGzfblvDzJ0Ld96pyYdSSnnTX7oEG2MuGTVXqatJSort+TQ83PZ86u9vOyZbs8b2jKpJiFJKeZf2hqAKpeRkewvmpZdg507bJfu8eXZkXBFvR6eUUiqdJiKqUElOtl2vv/QS7NoFLVva0XG7dNEERCmlrkZuJSIiEgY8BdR2XccYc4uH4lIqT5KSLiQgsbEQFQUTJkDnzpqAKKXU1czdEpEvgPeA94FUz4WjVN4kJdk6IC+/DLt3wzXXwKRJcNttmoAopVRB4G4ikmKMedejkSiVB0lJ8OGHNgHZsweuvRbefRc6ddIERCmlCpLcunivICIVgK9FZIiIBKXPc+YrdUUlJtqEo149ePhhCA6G//4Xfv5ZS0GUUqogyq1EZDVggPTL+1MuzxmgrieCUiqzxESYOhVeeQX27YPrr4cPPoCOHTX5UEqpgizHRMQYU+dKBaJUdubPh0cfhb174YYb7C2Z9u01AVFKqcLAre6cROQRESnnMl1eRIZ4LiylIC7Odrt+551Qrhx8/z2sWAEdOmgSopRShYW7/UoOMMacSJ8wxhwHBngmJFXUpaXB5Ml2PJhvvrEVUlev1gREKaUKI3dbzRQTETHGGAARKQaU8FxYqqjavBkGDoSffoJbboH33oP69b0dlVJKKU9xt0RkIfCZiLQXkfbAp8B/PReWKmoSE+H556FZM9iyxdYDWbxYkxCllCrs3C0ReRoYCAx2pr8HPvBIRKrIWbECBgyAmBjo2xdefx2qVPF2VEoppa6EXBMR5zbMdGNMX2zvqkrlixMn4JlnYMoUCAmB776zHZIppZQqOnK9NWOMSQVqi4jWCVH5whj44guIiLB9gTzxBGzcqEmIUkoVRe7emtkJ/CQi84GE9JnGmNc9EpUqtPbuhSFDbGuYFi3g22/tX6WUUkWTu4nIDufhAwR4LhxVWKWm2sHonnvONs997TV47DHwdfcMVEopVSi59TVgjHkBQET8nekzngxKFS7r19vKqL/9BrfeaseKqaN99iqllML9nlUbichaYBOwSURWi0hDz4amCrpz5+Af/4CWLWHXLqqpA78AACAASURBVJg501ZI1SREKaVUOncLxqcAw40xSwFEpC3wPnC9h+JSBdwPP8CgQbBjBzzwALz6KlSs6O2olFJKXW3c7dCsTHoSAmCMWQaU8UhEqkCLj4f+/S90x/7DDzBtmiYhSimlsuZuIrJTRP4pIiHOYyS2JY1SgG2SO3MmhIfbv88+a+uG3HKLtyNTSil1NXM3EXkQqAzMBf4DVAIe8FRQqmDZtQtuuw3uvRdCQ+0AdS+9BKVKeTsypZRSVzt364h0MMY85jpDRHoCX+R/SKqgSEmBN9+EUaOgWDGYOBEGD7b/K6WUUu5wt0TkH27OU0XE6tXQqhU89RR07GhHzX30UU1ClFJK5U2OJSIichtwO1BdRCa4PBUIpHgyMHV1SkiwJSBvvmkHppszB7p3txVTlVJKqbzKrUQkDlgFnAdWuzzmA7fmtKKI1BSRpSKyWUQ2icjjzvwKIvK9iPzp/C3/11+GuhK++w4aNrSj4w4YAFu2QI8emoQopZS6fDmWiBhj1gHrROQ48I0xJi0P204BnjDGrBGRAGC1iHwP9Ad+MMaME5ERwAjgmcsLX10Jhw7BsGHw6ae2VcyKFdCmjbejUkopVRi4W0ekF/CniPxLRMLdWcEYc8AYs8b5/zSwBagO3Al87Cz2MdAtbyGrK8UY2wdIRAT85z8wejT88YcmIUoppfKPW4mIMeZeoDl24LuPRORnERnolHTkSkRCnPV/BaoaYw44Tx0EqmazzkARWSUiq44cOeLOblQ++vNP2wfIQw/Z2zF//AHPPw8lS3o7MqWUUoWJuyUiGGNOAXOA2UAQcBewRkSG5rSeM1Def4C/O9tw3aYBTDb7m2KMiTLGRFWuXNndMNVflJQEL78MjRvD2rUweTL873+2VEQppZTKb271IyIiXbEdmNUDpgOtjDGHRaQ0sBmYmM16xbFJyExjzFxn9iERCTLGHBCRIODwX30RKn/88outhLpxI9x9N0yYAEFB3o5KKaVUYeZuiUgP4A1jTGNjzKvGmMMAxpizwENZrSAiAkwFthhjXnd5aj7Qz/m/HzDvsiJX+ebUKdsHyPXXw4kTMG8efPGFJiFKKaU8z60SEWNMPxGp5pSMGOB3Y8xB57kfslntBuA+YIOI/OHMexYYB3wuIg8Bu7EVYZWXzJsHjzwCcXEwdCiMHQsBbtX8UUoppf46d2/NPAQ8DywBBJgoImOMMdOyW8cY86OzbFba5zVQlb/i4uCxx2xrmMaN7d9rr/V2VEoppYoad8eaeRpoboyJBxCRisBKINtERF2d0tJgyhR45hlITLQVU598EooX93ZkSimliiJ3E5F44LTL9GlnnipANm+GgQPhp59s09z33oP69b0dlVJKqaLM3URkO/CriMzD1hG5E1gvIsMBMlVGVVeZ9JKPV16x9T8++gjuv1+7ZlfqapaUmsTBMwdJM2mElAvxdjhKeYy7icgO55EuvaWLVmu8yi1fbktBYmKgb187TkyVKt6OSqmiKzk1mUMJh4g7HZfxOHD6gP3/zIXpI2dtR453NriTr3p/5eWolfIcd1vNvODpQFT+OnECnn4a3n8fQkLsgHWdOnk7KqUKr5S0FA4nHL4owchIMs5cmD6ScASTqR9HH/Ghmn81ggOCqV22Nq1rtCY4IJjggGAiKmlvgqpwc7dERBUQxsCcObZFzOHDtiLq6NFQpoy3I1OqYEpNS81IMA6cOXBxkuEyfejMoUsSDEGo6l+V4IBgagTWoFVwK4ICgjKSjOCAYIL8g6hSpgrFfIp56RUq5V2aiBQie/fCkCHwzTfQogV8+639q5S6VJpJ40jCkSyTCtd56fU0XAlClTJVMpKKFtVa2KQiU5JRpUwVfH30MqtUTvQTUgikpsKkSfDcc7Z57muv2RIRX313VRGUZtKIPxufZVLhOn0o4RApaSmXrF+5dOWMpKJp1aYXSi5ckoyqZapSvJi2eVcqP7jboVkY8C525NxGItIE6GqMGevR6FSu1q+348P89putA/LOO1CnjrejUir/GWOIPxd/ceXOLJKMA2cOZJlgVCxVMSORaFilIcH+wRffIgkIopp/NUoUK+GFV6dU0eXub+b3gaeAyQDGmPUiMgvQRMRLzp2DMWPg3/+G8uVh1izo3Vub5KqCxxjD8fPHL21BkqkVyYEzB0hKTbpk/fJ+5TOSifBK4QT5X3x7JDggmGr+1SjpW9ILr04plRt3E5HSxpjf5OJvuUt/cqgrYvFiePhh2LEDHnjAJiMVKng7KqUuZozhxPkTl1bwzNSK5MDpAySmJl6yfjm/chlJxU21b7qocqdrglGqeCkvvDqlVH5xNxE5KiKh2M7MEJG7gQMei0plKT4enngCPv4Y6tWDH36wPaQqdSUZYziVeCrXViRxp+M4n3L+kvUDSwZmJBI31LzhkhYk6bdJShcv7YVXp5S60txNRB4BpgDhIrIf2AXc67Go1EWMsbde/v532z/Is8/CyJFQSn8Iqnx2OvF0tkmF6/TZ5LOXrOtfwj8jobiuxnWX3CIJ8g8iKCAI/xL+XnhlSqmrlbsdmu0EOohIGcDHGHM6t3VU/ti5EwYPhkWL7Oi4779vR8tVKi/OJJ3JsYJn+vSZpDOXrFu6eOmMZCIqOIpg/0ubqQb5BxFQUjtaVkrlnbutZsoB9wMhgG96XRFjzGMei6yIS0mBN96A55+HYsVg4kSbkBTTPo+Ui7PJZ3NtRRJ3Oo7TSZf+dvDz9aN6QHWCAoJoHtSczv6ds+wLI6BEAKK1oJVSHuLurZkFwC/ABiAtl2XVX7R6Nfztb/DHH9C1K7z9NtSs6e2o1JV0LvlcRjKRuRWJ6/TJxJOXrFuyWMmMJKJx1cbcGnrrJc1UgwOCKVuyrCYYSimvczcR8TPGDPdoJIozZ2wJyJtv2oHp5syB7t21SW5hkpiSeHGfF5kGO0ufd/z88UvWLe5TPCOZiKwcSYe6HS6thxEQRHm/8ppgqKvK6tWrq/j6+n4ANAJ8vB2PuqLSgI0pKSl/a9my5eGsFnA3EflERAYA3wAZ7eyMMcf+eowK7KB0gwfD7t0waBCMGwflynk7KuWupNSkjL4ucrpFcuzcpR+Z4j7FMwY8a1CxAe1C2l3STDU4IJgKpSpogqEKJF9f3w+qVasWUbly5eM+Pj4m9zVUYZGWliZHjhyJPHjw4AdA16yWcTcRSQJeBZ6DjFGdDFD3L0dZxB06ZFvDzJ4NERGwYgW0aePtqFS65NRkDp45mGsrkqNnj16ybjEplnEbJLR8KDfWujHLvjAqlq6Ij+iPRFWoNdIkpGjy8fExlStXPnnw4MFG2S3jbiLyBFDPGHPp1VZdFmPgww/t6LgJCfDCC/DMM1BSO3+8IlLSUjh05lCuA57lNmR7SLmQi4Zsd00yKpeprAmGUpaPJiFFl/PeZ3sxdDcR2Q5c2nGAuizbttnbL8uW2dKPKVNsaYj661yHbM/pFsnhhMNZJhhVylQhOCCYmmVrcm31ay9pQRIcEEzl0pV1yHallMon7iYiCcAfIrKUi+uIaPPdPEhKgldfhRdfBD8/mDzZto7x0R/NuUpNS+XI2SO5NlU9lHAo1yHbWwa11CHblVI5GjFiRLVx48YdzI9tDR8+PNjf3z91zJgxh7J6vk+fPrX69+8fP3LkyBr//ve/9950001X3Q//d999t8Ibb7xRDaBMmTJp77zzzu7WrVufy49tu3vV/cp5qMv0yy92lNyNG6FnT3jrLQgK8nZUVy9jDDHxMSzasYhFOxaxLHYZCckJlyyX3ZDtromGDtmulMqrCRMmBOUlEUlLS8MYQ7HL6OxpzZo1/tOnT9+T5xWvoHr16iX+9NNPMZUrV079/PPPAwcNGlR7/fr1W/Nj2+72rPpxfuysKDp1ynbJ/s47UL06zJ8Pd9zh7aiuTsfOHeOHnT/Y5GPnIvactJ/LehXq0a9pPyIrR+qQ7UqpPIuJiSnRqVOn+o0bNz67cePG0mFhYee++OKL2KVLl5aZMGFClcWLF+8A+PLLLwPfeeedyvXr1z+fmJjoEx4eHhkWFnZu/vz5u0aPHl115syZlQDuu+++I6NGjTocExNT4tZbbw1r3rz5mQ0bNpRZsGDBn+vXr/cbNWpU9dTUVKlQoULKzz//vA1gy5YtpVq1atUgLi6uxMMPP3xo5MiRhwHWrFnjV7du3fO+vhe+jlNTU+nVq1dI9erVkyZMmBA3d+7cwDFjxgQnJSVJ7dq1E2fPnh1btmzZtHnz5gWMGDGiZmpqKk2bNj07ffr03aVKlTLVq1dvfMcddxxfsmRJYMmSJc2nn366s1GjRonTpk0r/8orrwT7+PiYgICA1FWrVsW4eww7duyY8UuwXbt2CY8++mi+XXxzTERE5HNjTC8R2QBcUtHIGNMkvwIpjObNg0cegbg4GDoUxo6FAO0FO0NyajK/7v+VhdsXsmjnIn7f/zsGQ9mSZWlftz3PtnmWjqEdqVteG2cpVVg8+CA1N24kX0c0bNSIs9OmsTenZWJjY/0mT54cGx0dndCzZ8+QV199tfLo0aMPPf7447Xi4uJ8g4ODU6ZNm1bxgQceONqnT5+TH330UZWtW7duBlixYkXpWbNmVVy9evUWYwwtW7aMaN++/elKlSql7tmzp+TUqVN3tW/fPjYuLs730UcfDVm2bNnW8PDwpEOHDmUUj2zfvt1v5cqVMSdOnCgWERHR6KmnnjpSsmRJM3/+/LLR0dEZPRMmJydLt27d6kRGRp4bP378wQMHDvi+/PLLQcuXL98WGBiY9txzz1V78cUXq44ZM+bgoEGD6ixatCimSZMmiXfddVfIq6++WnnUqFGHAcqWLZuybdu2zW+//XbFoUOH1ly6dOn2cePGBS1atGhbnTp1ko8ePXrZFd0mTpxYqV27dpf2pniZcisRedz52yW/dlgUpCcec+facWH+8x87ToyC7ce2Z9xuWbJrCaeTTuMjPlxb/VpG3TyK6NBoWlVvpXU1lFL5qlq1aknR0dEJAPfdd1/8hAkTqvj4+Bzq1atX/Pvvv1/hkUceiV+zZo3/3Llzd2Ved9myZf633377icDAwDSAzp07H1+6dGlAz549TwQFBSW1b98+wVmuTKtWrU6Hh4cnAVStWjU1fRvR0dEnSpUqZUqVKpVSoUKF5H379vmGhoYmL168OHDGjBmx6csNGTKkdrdu3Y6NHz/+YPo2d+zY4deqVatwsIlKy5Ytz6xbt86vRo0aiU2aNEkE6N+/f/ykSZOqAIcB+vXrdwxgwIABx0aOHFkTICoq6kzfvn1DevTocbxv376X9prohq+//jpgxowZlVauXJkvt2Ugl0TEGHPA+XeIMeYZ1+dEZDzwzKVrFV1pabYFzDPP2Iqpr7wCTzwBxYtw9YST50+yZNeSjNstO4/vBKB22drc0+gebq13K7fUuYVyftp7m1JFQW4lF56SuTPA9OnBgwfHd+7cuZ6fn5+54447jhfP4wW7dOnSbg17UrJkyYy7CsWKFSMlJUVOnz7tc+rUqWIhISHJ6c9FRUWdWbFiReDZs2cPlS5d2hhjaNOmzamvv/76ogTp559/znH8dR+XVhAiYgBmzZq1Z8mSJWXmz59ftmXLlpGrV6/eXK1atYxkaejQodW///77sgDppUGufv3111JDhgyp/e233/7put5f5W57jY5ZzLstv4IoDDZvhptusr2jRkXBhg0wYkTRS0JS0lL4Zd8vjPnfGG6YdgMV/1WR7p93Z8aGGTSq0oi3b3ubbY9uY9fju5h8x2S6R3TXJEQp5XEHDhwosXjx4jIAM2fOrHD99defAQgJCUmuWrVq8muvvRY0cODAjL6yfH19TWJiogC0a9fuzIIFC8o5iYPPggULyrdr1+6SkSTbtm2b8NtvvwVs3bq1BIDrrZmsfPvttwFt2rS5aDuDBg06Gh0dfbJLly6hycnJtG3bNmHVqlX+GzduLAlw6tQpn/Xr15ds2rTp+f3795dInz99+vSKN954Y8a2pk+fXgFg6tSp5Zs3b54AsGnTppK33HJLwptvvhlXvnz5lJ07d15Uz2PixIn7t27dujmrJOTPP/8s0bNnz9Bp06btSi+FyS+51REZDAwB6orIepenAoCf8jOQgioxEV5+2ZZ+BATARx/B/fcXrfFhdp/YzcIdC1m0YxE/7PqBE+dPIAhRwVGMaDOC6NBoWtdorS1XlFJeExIScn7ixIlVBg4cWLp+/frnn3zyySPpz/Xu3Tt+0qRJvi1atDifPq9v375HIiIiIhs1anR2/vz5u/r06RPfokWLCLCVVW+44YZzMTExF32RBwcHp0yYMCH2rrvuqpeWlkbFihWTV65c+Wd2MS1YsKBsr169LrlFMnr06EPDhg0r1r179zpfffXVrsmTJ8f27t27blJSkgA8//zz+5s0aZL43nvvxfbs2TM0vbKq62s6fvx4sbCwsMgSJUqY2bNn7wQYNmxYjdjY2JLGGGnTps2p6667zu3mtyNHjgw6ceKE79ChQ2uDTdQ2bty4xd31cyLGZN/ZnYiUBcoDrwAjXJ46fSXHmYmKijKrVq26Urtz2/LlMHAgxMRA377w+ut2sLrC7nTiaZbFLsu43bItfhsANQJrEF03mlvr3Ur7Ou2pWLqilyNVqmgTkdXGmChvx7Fu3brYpk2beq1n7piYmBJdunSp/+eff27K6vn777+/VvPmzc8OGzbsisYYGRkZsXbt2q2ut23yQ/Xq1RuvWrVqS1BQUEp+bvevWLduXaWmTZuGZPVcbnVETgIngXs8EFeBdfy4rQfy/vsQEgL//S/cequ3o/KcNJPGmgNrMiqZrty7kuS0ZEoXL03bkLYMiRpCdGg04ZXCdVA2pVSB0rBhw4hSpUqlTZ48+YrXXdm8eXO+lCgUdNo0IQ+MgS++gMcegyNH7Dgxo0dDmTLejiz/7Tu1j+93fM/CHQtZvHMx8efiAWherTnDWw8nOjSaG2reQElfHRxHKXV1a9CgQVJ2pSGbNm0qdMnA/v37N3g7hrzQRMRNe/bYPkG++QZatIAFC+zfwiIhKYHlu5dn3G7ZfMTWVarmX43OYZ2JrhtNx9COVClTBO49KaWUumI0EclFaipMmgTPPWeb5772mi0R8S3gRy7NpLH+0PqM2y0r9qwgKTUJP18/bqp9Ew82e5Do0GgaVWmkt1uUUkp5TAH/OvWsdevs+DC//w6dOsG779o6IQXVwTMH+X7H9yzauYjvd3zPoQQ7/lLjKo0Z2moo0aHR3FjrRkoVz7F5ulJKKZVvNBHJwrlzMGaMHSm3QgWYNQt69y54TXLPp5znxz0/ZnShvv6QbYFduXRlOoZ2zLjdEhwQ7OVIlVJKFVU6AH0mixfbbtnHjYN+/WDrVrjnnoKRhBhj2Hh4I6///DqdZnSi/PjydPykI2/9+hYVS1VkXPtxrBm4hoNPHmRm95n0a9ZPkxCllMrCiBEjquXXtoYPHx48atSoqtk936dPn1qLFi0q06pVqwbLly/P13F48svatWv9mjVrFl6iRIkWmV/LnDlzAkNCQhrVqlWr0bPPPpvn4+bREhERmYYdp+awMaaRM68C8BkQAsQCvYwxl9XnfX46etS2gvn4Y6hXD5YsgXbtvB1V7o4kHGHxzsUs2mnresSdjgMgolIEg1oOIjo0mptr30yZEoWwaY9SSnnIhAkTgsaNG3fQ3eXT0tIwxlCsWN7HkluzZo3/9OnT9+R5xSuoSpUqKW+99daeOXPmlHedn5KSwrBhw2otXLhwW926dZObNm0a0aNHjxMtW7Y8n922MvN0ichHQKdM80YAPxhj6gM/cHFHaVecMTBjBkREwMyZ8OyzsH791ZuEJKUmsSx2Gc/+8Cwtp7Skyr+r0GduH+bHzKdNrTZ8cMcH7P77bjY/spk3O73J7fVv1yREKVWkxcTElKhTp07Drl271qlbt27DTp061T19+rTP/PnzAzp06BCavtyXX34Z2LFjx9AhQ4ZUT0xM9AkPD4/s2rVrHYDRo0dXrV+/fsP69es3HDNmTJX07YaEhDS66667QsLCwhru2LGjxJw5cwIjIyMjGjRoENm6deuw9G1v2bKlVKtWrRrUqFGj8dixYzOaH65Zs8avbt26531dWkCkpqbSo0ePkMceeywYYO7cuYHNmjULj4yMjLjtttvqnjx50gdg3rx5AREREZFhYWGRPXv2DDl37pyA7dDs4YcfrhEWFhbZuHHjiPRu4KdNm1a+fv36DRs0aBAZFRXVIC/HsHr16ik333zz2eLFi1/U+dqyZcvK1K5dOzEyMjLJz8/PdO/e/dicOXPyNG6HR0tEjDHLRSQk0+w7gbbO/x8Dy/DS4Hk7d9qxYRYtsqPjvv++vS1zNTHGEBMfk9G6ZVnsMhKSE/D18aV1jdaMbTeW6NBoWgS1oJjPZY/qrJRSV8SD8x6sufHwxny9/dCoSqOz0+6clmOHZLGxsX6TJ0+OjY6OTujZs2fIq6++Wnn06NGHHn/88VpxcXG+wcHBKdOmTav4wAMPHO3Tp8/Jjz76qEr6mCsrVqwoPWvWrIqrV6/eYoyhZcuWEe3btz9dqVKl1D179pScOnXqrvbt28fGxcX5PvrooyHLli3bGh4enuQ61sz27dv9Vq5cGXPixIliERERjZ566qkjJUuWNPPnzy8bHR19Mn255ORk6datW53IyMhz48ePP3jgwAHfl19+OWj58uXbAgMD05577rlqL774YtUxY8YcHDRoUJ1FixbFNGnSJPGuu+4KefXVVyuPGjXqMEDZsmVTtm3btvntt9+uOHTo0JpLly7dPm7cuKBFixZtq1OnTvLRo0fz5Qtj7969JapXr56UPl2jRo2kX3/91T8v2/BGZdWqLqP6HgSyvG8mIgOBgQC1atXK1wBSUuCNN+D556FYMZg40SYkl1Gi5hHHzh3jh50/ZPTpseekLbGrV6Ee/Zv1Jzo0mrYhbQksGejlSJVSqmCoVq1aUnR0dALAfffdFz9hwoQqPj4+h3r16hX//vvvV3jkkUfi16xZ4z937txdmdddtmyZ/+23334iMDAwDaBz587Hly5dGtCzZ88TQUFBSe3bt09wlivTqlWr0+Hh4UkAVatWzRihNjo6+kSpUqVMqVKlUipUqJC8b98+39DQ0OTFixcHzpgxIzZ9uSFDhtTu1q3bsfHjxx9M3+aOHTv8WrVqFQ42UWnZsuWZdevW+dWoUSMxfQC6/v37x0+aNKkKcBigX79+xwAGDBhwbOTIkTXBjuzbt2/fkB49ehzv27ev16tEpPNqqxljjEkfnjiL56YAU8CONZNf+1y1yjbJ/eMP6NoV3n4batbMr61fnuTUZH7d/2tGqcfvcb+TZtIoW7Is7eu259k2z9IxtCN1y9f1bqBKKfUX5VZy4SmZ+0NKnx48eHB8586d6/n5+Zk77rjjePE8DpleunTpNHeWcx1PplixYqSkpIgzmm+xkJCQ5PTnoqKizqxYsSLw7Nmzh0qXLm2MMbRp0+bU119/fVGC9PPPP+fYz4KPz4WaF+nfs7NmzdqzZMmSMvPnzy/bsmXLyNWrV2+uVq1aRrI0dOjQ6t9//31ZgKxG4M1KzZo1k/bv358x+N++ffsuKiFxhzdazRwSkSAA5+/hK7HTM2dg+HB7C+bQIZgzB776yntJyI5jO3j393fpNrsbFf9VkRs/vJGXVryEj/jwz5v+ycoHV3L06aP8p9d/GBQ1SJMQpZT6Cw4cOFBi8eLFZQBmzpxZ4frrrz8DEBISkly1atXk1157LWjgwIEZg975+vqaxMREAWjXrt2ZBQsWlHMSB58FCxaUb9eu3enM+2jbtm3Cb7/9FrB169YSAK63ZrLy7bffBrRp0+ai7QwaNOhodHT0yS5duoQmJyfTtm3bhFWrVvmn1/M4deqUz/r160s2bdr0/P79+0ukz58+fXrFG2+8MWNb06dPrwAwderU8s2bN08A2LRpU8lbbrkl4c0334wrX758ys6dOy8aPXjixIn7t27dutndJATg5ptvToiNjfXbunVrifPnz8vcuXMr9OjR44S764N3SkTmA/2Acc7feZ7e4Xff2Vsvu3fDww/DK69AuTxVpfnrTp4/yZJdSzJut+w8vhOAkHIh9Gnch+jQaG6pcwvl/K5wYEopVQSEhIScnzhxYpWBAweWrl+//vknn3zySPpzvXv3jp80aZJvixYtMlp69O3b90hERERko0aNzs6fP39Xnz594lu0aBEBcN999x254YYbzsXExFz0RR4cHJwyYcKE2LvuuqteWloaFStWTF65cuWf2cW0YMGCsr169brkFsno0aMPDRs2rFj37t3rfPXVV7smT54c27t377pJSUkC8Pzzz+9v0qRJ4nvvvRfbs2fP0NTUVJo2bXrW9TUdP368WFhYWGSJEiXM7NmzdwIMGzasRmxsbEljjLRp0+bUddddd87d47dnzx7fa665JjIhIaGYiJjJkydX3bJly8YKFSqkvfbaa3s6deoUlpqaSp8+fY5GRUW53WIGQIzJ19GHL964yKfYiqmVgEPA88BXwOdALWA3tvnusZy2ExUVZVatWpXn/R8+DI8/DrNn21YxU6ZAmzZ53sxlSUlLYVXcqozbLb/s+4VUk4p/CX9uqXML0XWjiQ6Npl6FetqFulLKI0RktTEmyttxrFu3LrZp06ZHc1/SM2JiYkp06dKlfnYD391///21mjdvfnbYsGFXNMbIyMiItWvXbnW9bZMfqlev3njVqlVbgoKCUvJzu3/FunXrKjVt2jQkq+c83Wrmnmyeau/J/aY7ccIOTvfCC/DMM1DSwwPF7j6xm0U7FrFwx0J+2PUDJ86fQBCigqMY0WYE0aHRtK7RmuLF8nYPUimllGc0bNgwolSpUmmTJ0++4nVXNm/eXOhG/r0chbqL97AwO2pu2bKe2f6ZpDMs3vYSvQAAFQVJREFU3bU043bLtvhtANQIrEGPiB5Eh0bTvk57Kpau6JkAlFJK5apBgwZJ2ZWGbNq0qdAlA/v379/g7RjyolAnIpC/SUiaSWPNgTUZt1tW7l1JcloypYuXpm1IW4ZEDSE6NJrwSuF6u0UppZRyQ6FPRP6qfaf2XTRibfy5eACaV2vO8NbDiQ6N5oaaN1DS18P3fZRSSqlCSBORTM4mn2X57uUZdT02H7GtmIL8g+gS1oXo0Gg61O1AlTJVctmSUkoppXJT5BORNJPGhkMbWLhjIYt2LGLFnhUkpSbh5+vHTbVv4sFmDxIdGk2jKo30dotSSimVz7zRoZnXHTxzkE/WfcJ9X95H8GvBNJvcjGcWP8PhhMMMbTWUhfcu5NjTx1h470KeuP4JGldtrEmIUkoVISNGjMjzcPbZGT58ePCoUaOyHM4EoE+fPrUWLVpUplWrVg2WL1+er+Pw5JdvvvkmICAgoFl4eHhkeHh45JNPPhmU/tycOXMCQ0JCGtWqVavRs88+m+fjViRKRM6nnOfHPT9m3G5Zf2g9AJVLV6ZjaEduDb2VDnU7EBwQ7OVIlVJKXQ0mTJgQNG7cuIPuLp+WloYxhmKXMWjZmjVr/KdPn74nzyteYVFRUWeWLl263XVeSkoKw4YNq7Vw4cJtdevWTW7atGlEjx49TrRs2dLtTs0KdYlIzNEYOs3oRPnx5en4SUfe+vUtKpWuxLj241gzcA0HnzzIzO4zub/p/ZqEKKVUIRUTE1OiTp06Dbt27Vqnbt26DTt16lT39OnTPvPnzw/o0KFDaPpyX375ZWDHjh1DhwwZUj0xMdEnPDw8smvXrnUARo8eXbV+/foN69ev33DMmDFV0rcbEhLS6K677goJCwtruGPHjhJz5swJjIyMjGjQoEFk69atw9K3vWXLllKtWrVqUKNGjcZjx47NqGS4Zs0av7p165739b1QLpCamkqPHj1CHnvssWCAuXPnBjZr1iw8MjIy4rbbbqt78uRJH4B58+YFRERERIaFhUX27Nkz5Ny5cwK2Q7OHH364RlhYWGTjxo0j0ruBnzZtWvn69es3bNCgQWRUVFSD/Di2y5YtK1O7du3EyMjIJD8/P9O9e/djc+bMyVMX4YW6RKRCqQrsPbWXQS0HER0azc21b6ZMiTLeDksppYquBx+sycaN+Xv7oVGjs0zLeTC92Nj/b+/eo7Oo7zyOv79JCCFcAgEM4aKhikDKgkAOi4gtiJsqYCsqFmEV3bW43o7F6rauu9baU1fL4rFezkot1GUrpWvXddHSFahwpMVigxgFTBAhRW5ykYtyzeW7f8zvwYc0KCEJk8vndc6cZ+Y3v+c338wzyXzzm3nmV5Yxa9asssLCwoMTJ07MmzFjRtcHH3zwo7vuuuvsbdu2pXXv3r1izpw5nW+66abdkydP3v/cc8+dlRhzZfny5Znz5s3rvGrVqvfcnaFDh/YfM2bMJ126dKncvHlz69mzZ28aM2ZM2bZt29LuuOOOvGXLlpX069fvWPJYMxs2bMhYsWJF6b59+1L79+8/4N57793VunVrX7BgQVZhYeH+RL3y8nK78sore+fn5x9+9NFHd2zfvj3t4Ycfzn399dfXd+jQoer+++/v9sMf/jDnoYce2nHLLbf0XrRoUenAgQOPTpgwIW/GjBldH3jggZ0AWVlZFevXr1/31FNPdb7zzjt7LV26dMMjjzySu2jRovW9e/cu3717d627blavXt2ub9+++Tk5OeWPPfbYhwUFBUc+/PDDEwa569mz57GVK1e2q027zbpHpGvbrqy9bS2PX/Y4Y/uMVRIiItJCdevW7VhhYeFBgOuvv37PihUr2qWkpHDttdfuefbZZ7N3796d+tZbb7WbOHHi/urvXbZsWbuxY8fu69ChQ1VWVlbVuHHj9i5durQ9QG5u7rExY8YcDPXaDhs27JN+/fodA8jJyTk+sm1hYeG+Nm3aeG5ubkV2dnb5li1b0gCWLFnS4corrzyQqHfbbbedk0hCEm1+8MEHGcOGDevXr1+//Pnz53fevHlzenFxcUbPnj2PDhw48CjAjTfeuOf3v/99+0Q7U6dO/RjgW9/61serV69uB9GllSlTpuTNnDmzS0VF7Z7+PmLEiIN//vOf3yktLV13++2377z66qvPq1UDn6NZ94iIiEgj8wU9Fw2l+hcOEsu33nrrnnHjxp2XkZHhV1xxxd5WrWo3BEdmZmbVqdRLHk8mNTWViooKC6P5publ5ZUn1hUUFHy6fPnyDocOHfooMzPT3Z2RI0ceePnllzclt/fGG2+0+bztpaR81s9gZg4wb968za+99lrbBQsWZA0dOjR/1apV67p163Y8Wbrzzjt7LF68OAug+gi82dnZx3/Ob37zm/vvvvvus7dv357Wq1evY1u3bj0++N+WLVtO6CE5Fc26R0RERARg+/bt6UuWLGkL8Pzzz2ePGDHiU4C8vLzynJyc8pkzZ+ZOmzbt+KB3aWlpfvToUQMYPXr0pwsXLuwYEoeUhQsXdho9evQn1bcxatSog2+++Wb7kpKSdIDkSzM1+c1vftN+5MiRJ7Rzyy237C4sLNw/fvz4c8vLyxk1atTBoqKidon7PA4cOJDyzjvvtB40aNCRrVu3pifK586d2/niiy8+3tbcuXOzAWbPnt1p8ODBBwHWrl3b+pJLLjn4+OOPb+vUqVPFxo0bTxg9+Mknn9xaUlKyrnoSAtHou1VVUS6ydOnSzKqqKnJyciq++tWvHiwrK8soKSlJP3LkiL344ovZV1999b7P+7mrU4+IiIg0e3l5eUeefPLJs6ZNm5bZp0+fI/fcc8+uxLpJkybtefrpp9OGDBly/JseU6ZM2dW/f//8AQMGHFqwYMGmyZMn7xkyZEh/gOuvv37XRRdddLi0tPSEE3n37t0rnnjiibIJEyacV1VVRefOnctXrFjx/sliWrhwYda11167t3r5gw8++NH06dNTr7rqqt4vvfTSplmzZpVNmjTpS8eOHTOA73//+1sHDhx49JlnnimbOHHiuZWVlQwaNOhQ8s+0d+/e1PPPPz8/PT3d58+fvxFg+vTpPcvKylq7u40cOfLA8OHDD5/q/vvFL37Rac6cOWelpqZ6RkZG1dy5czempKSQkpLCzJkzN1922WXnV1ZWMnny5N0FBQWn/I0ZAHOv19GHG0RBQYEXFRXFHYaISJNiZqvcvSDuOIqLi8sGDRq0+4trNozS0tL08ePH9znZwHc33HDD2YMHDz40ffr0Mxpjfn5+/9WrV5ckX7apDz169PiroqKi93Jzc2t3I0gDKi4u7jJo0KC8mtapR0RERFqsL3/5y/3btGlTNWvWrDN+78q6deua3ci/p0OJiIiINGt9+/Y9drLekLVr1za7ZGDr1q3vxh1DbehmVRERaWhVVVVVGiejhQqf/Um/XaREREREGtqaXbt2ZSkZaXmqqqps165dWcCak9XRpRkREWlQFRUVN+/YseNnO3bsGID+AW5pqoA1FRUVN5+sghIRERFpUEOHDt0JfD3uOKRxUmYqIiIisVEiIiIiIrFRIiIiIiKx0T0iItIyuENlJVRVnfj6RWUN8Z7atHnuuXDNNXHvPZEGo0REpKG5/+UJ5kyd8Br6JBlnm7V9TxMYzqJGV1yhRESaNSUicma4w9GjcOQIHD4cvSamU12uqGiaJ8mmegKsLiUlmlJTP3tNnq+prDbrU1MhPb1+24zzPfXVZpr+TEvzpiO8JUlOBmo66Z9ugnAqdY/UajDGv2QGrVo13B/9RNvN+YRWlzZTUqLPQESknikROdPc4dix+j3J1+a9dWEGbdpEU0bGZ1NiuW1b6NKl5nWnsvx569LSdCIUEWmGWmYikkgGGroX4GR16yKRDJzsxJ2ZCdnZdTvpn6yukgEREalnzTsRWb8eJkyo+TJBXa7bJycDNZ24k5OB+uwVyMiILiEoGRARkWaieScimZmQn396CcHnJQhKBkREROpF805EevaEF16IOwoRERE5CT1ZVURERGKjRERERERio0REREREYqNERERERGKjRERERERiE1siYmaXmVmpmW0ws+/FFYeIiIjEJ5ZExMxSgaeBy4F84Dozy48jFhEREYlPXD0iw4AN7r7R3Y8B84FvxBSLiIiIxCSuB5r1AD5MWt4C/HVyBTObBkwLi5+aWWkdttcF2F2H9zcUxVU7iqt2FFftNMe4zqnPQEQaQqN9sqq7/xT4aX20ZWZF7l5QH23VJ8VVO4qrdhRX7SgukXjEdWlmK9ArablnKBMREZEWJK5E5E9AHzPrbWbpwCRgQUyxiIiISExiuTTj7hVmdgfwKpAKzHH3tQ24yXq5xNMAFFftKK7aUVy1o7hEYmDuHncMIiIi0kLpyaoiIiISGyUiIiIiEpsmmYiYWS8zW2pm68xsrZndFcqzzWyxmb0fXjuFcjOzJ8Lj5N8xsyFJbU0N9d83s6l1jCvDzN40s+IQ1w9CeW8zWxm2/6twgy5m1josbwjr85Laui+Ul5rZ1+oSV1KbqWa22sxeaSxxmVmZmb1rZm+bWVEoi/VzDO11NLNfm1mJmb1nZhfGHZeZ9Q37KTEdMLNvxx1XaG96OObXmNkvw+9CYzi+7goxrTWzb4eyM76/zGyOme00szVJZfUWh5kNDb9HG8J77fT2mEgM3L3JTUAuMCTMtwfWEz0q/sfA90L594BHw/xY4LeAAcOBlaE8G9gYXjuF+U51iMuAdmG+FbAybO+/gEmh/Bng1jB/G/BMmJ8E/CrM5wPFQGugN/ABkFoP++1uYB7wSliOPS6gDOhSrSzWzzG0+R/AzWE+HejYGOJKii8V2EH0wKq4j/sewCagTdJxdWPcxxcwAFgDZBLdmL8EOC+O/QV8BRgCrGmI4xx4M9S18N7L6+M406TpTEyxB1AvPwT8L/A3QCmQG8pygdIwPwu4Lql+aVh/HTArqfyEenWMKRN4i+iJsbuBtFB+IfBqmH8VuDDMp4V6BtwH3JfU1vF6dYinJ/A74BLglbCdxhBXGX+ZiMT6OQJZRCdWa0xxVYulEPhDY4iLz56UnB2Ol1eAr8V9fAETgdlJy/8C/GNc+wvI48REpF7iCOtKkspPqKdJU2OfmuSlmWShW3cwUe9DjrtvD6t2ADlhvqZHyvf4nPK6xJNqZm8DO4HFRP/V7XP3ihq2cXz7Yf1+oHNDxAU8TvRHuCosd24kcTmwyMxWWfRYf4j/c+wN7AJ+btGlrJ+ZWdtGEFeyScAvw3yscbn7VuDfgM3AdqLjZRXxH19rgIvNrLOZZRL1NPSi8XyO9RVHjzBf3/GJnBFNOhExs3bAfwPfdvcDyevc3YlOcmeUu1e6+wVEPRDDgH5nOobqzGw8sNPdV8UdSw1GuvsQopGYbzezrySvjOlzTCPqRv93dx8MHCTqOo87LgDCvRZfB16ovi6OuMK9Dd8gSuC6A22By85kDDVx9/eAR4FFwP8BbwOV1erE9jk2xjhE4tBkExEza0WUhDzv7i+G4o/MLDeszyXqlYCTP1K+wR417+77gKVEXdIdzSzx8LjkbRzfflifBexpgLguAr5uZmVEIx1fAvykEcSV+G8ad98J/A9R8hb357gF2OLuK8Pyr4kSk7jjSrgceMvdPwrLccd1KbDJ3Xe5eznwItEx1xiOr9nuPtTdvwLsJbqfLO79lVBfcWwN8/Udn8gZ0SQTkXBH+GzgPXd/LGnVAiBxJ/lUontHEuU3hLvRhwP7Q5foq0ChmXUK/9UVhrLTjaurmXUM822I7lt5jyghueYkcSXivQZ4LfxntACYFL5d0BvoQ3Qz2mlx9/vcvae75xF16b/m7lPijsvM2ppZ+8Q80f5fQ8yfo7vvAD40s76haAywLu64klzHZ5dlEtuPM67NwHAzywy/m4n9FevxBWBmZ4XXs4GriG7Wjnt/JdRLHGHdATMbHvb/DUltiTR+cd+kcjoTMJKoG/Mdou7Wt4mu/3YmuiHzfaI75LNDfQOeJrpf412gIKmtvwM2hOmmOsY1EFgd4loDPBDKv0T0B3UDUXd661CeEZY3hPVfSmrr/hBvKfV4Bzwwis++NRNrXGH7xWFaC9wfymP9HEN7FwBF4bN8iehbCo0hrrZEvQdZSWWNIa4fACXhuP9Pom++xH7cA8uJkqJiYExc+4socdwOlBP1uP19fcYBFIR9/wHwFNVutNakqTFPesS7iIiIxKZJXpoRERGR5kGJiIiIiMRGiYiIiIjERomIiIiIxEaJiIiIiMRGiYg0a2ZWadFItcVm9paZjfiC+h3N7LZTaHeZmRWcZkwLE8+bERFp6ZSISHN32N0vcPdBRIOq/esX1O9INDpsg3H3sR49eVdEpMVTIiItSQeix3xjZu3M7Hehl+RdM/tGqPMIcG7oRZkR6n431Ck2s0eS2ptoZm+a2Xozu7j6xsws18xeD22tSdQxszIz62Jm/xDWvW1mm8xsaVhfaGZvhNhesGhMJRGRZkkPNJNmzcwqiZ5OmUE0XPol7r4qjHGS6e4HzKwL8EeiR4qfQ/Tk2QHh/ZcTDR9/qbsfMrNsd//YzJYBq9z9O2Y2Frjb3S+ttu3vABnu/iMzSw3b+8SiMX8K3H13qNcKeA34MfAG0Vgtl7v7QTP7LtETSR9qyP0kIhKXtC+uItKkHfZoNGTM7EJgrpkNIHqM9sMWjfZbRTRsek4N778U+Lm7HwJw94+T1iUGW1wF5NXw3j8Bc0Ki8ZK7v32SGH9CNN7KyxaNlJwP/CEaNoR0ouRERKRZUiIiLYa7vxF6P7oSjU3UFRjq7uWhlyKjlk0eDa+V1PC75O6vh0RnHPCcmT3m7nOT65jZjUS9MHckioDF7n5dLWMREWmSdI+ItBhm1g9IJQwaB+wMSchoomQA4BOgfdLbFgM3mVlmaCO7Fts7B/jI3Z8FfgYMqbZ+KHAP8LfuXhWK/whcZGbnhTptzez82v2kIiJNh3pEpLlrY2aJSyIGTHX3SjN7HnjZzN4lGmW3BMDd95jZH8xsDfBbd7/XzC4AiszsGLAQ+KdT3PYo4F4zKwc+JRqePdkdQDawNFyGKXL3m0MvyS/NrHWo98/A+lr/5CIiTYBuVhUREZHY6NKMiIiIxEaJiIiIiMRGiYiIiIjERomIiIiIxEaJiIiIiMRGiYiIiIjERomIiIiIxOb/ASTynngOYF91AAAAAElFTkSuQmCC\n", + "text/plain": [ + "
" + ] + }, + "metadata": { + "needs_background": "light" + }, + "output_type": "display_data" + } + ], + "source": [ + "plot_relative_time(experiments_eq, results, n_features, max_batch_size=max_batch_size)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The difference between KeOps and PyTorch is even more striking when we only look at $[2, 10]$ features:" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAgAAAAEWCAYAAAAQHy/hAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4xLjIsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy8li6FKAAAgAElEQVR4nOzdeXhN1/rA8e/KIILEGCSIGBJCqEqqaClNpCrGKlXE2NK6bV3qdri3t9W5v6vUUDW1qhRVWq2aqgiiqBqKIGoKIkEMkUHms35/7JOKNEgiyc7wfp7nPHL23mevd59znPWutdfeS2mtEUIIIUTZYmN2AEIIIYQoepIACCGEEGWQJABCCCFEGSQJgBBCCFEGSQIghBBClEGSAAghhBBlkCQAJYRSSiulGt9h/Wyl1H+zPH9eKXVRKZWglKqej/I8rGXa5TfmgmI9hoZFUI67tSzbIiirllJqm1IqXik1ubDLE7mnlPq3Uupzs+MQorBJApCNUipCKZWqlKqRbfl+a4XoYX2+wPq8V7btPrEuH2Z9PkwplWGtWBKUUqeVUl8qpbwKMm6t9XNa63etZdoDU4BArXUlrfWVuyUQ98J6jNsLaF9blFLPZF1mPYZTBbH/bGVFKKUCspRz1lpWRkGXlYNRwGXAWWv98r3uTCnlqpRapZSKyvo9zbLeQSk1XykVp5S6oJQan229v1IqXCl1QykVopSqf68xmUEpNVEp9XUetu+klIrMukxr/YHW+pnbvUaI0kISgJydBp7OfKKUagFUyGG7P4EhWbazA/oDJ7Ntt1NrXQmoDAQAScBepZRPQQSbQ4u1FlAeOFwQ+xeFoj5wROfjTly36ZWxAOuBvrd52UTA01puZ+AVpVRX6/5qAN8D/wWqAXuAZXmNK6+KQ++SEGWa1loeWR5ABPAG8HuWZR8D/wE04GFdtsC6/CJQ1bqsO7AO2A4Msy4bBmzPoZzVwIo7xPEvIBqIAkZYy26cpexZwFogESOpWAC8B3hZl2kgAdgMbLM+T7QueyqH8mytx3MZOAX8w/oaO+v6ysAX1pjOW8uyBbyBZCDDuu9Y6/YO1v2dtb5HswHHLOX1Av4A4jASpq7A+9b9JFv39al126zHXhlYCMQAZ6yflU3W99pa7jWMRO7x27y/izAqzSRrWa8AHtmOeYv1OHdYt/kJqA4stsb9e+b3wbp9U+AX4CpwDOh/m7IXAGlAqnW/Adb3a6r1846y/u1g3b4TEAm8ClwAFt3he2NHlu9pluVRGD1Cmc/fBb6x/j0K2JFlXUXr+9L0Dv9HXgeOWN/nL4HyWdZ3t362sdb3rmW2174KHARSrPFGYHzfD2J8R7/ASGLXAfHARm7+H+sEROYQTwDGdyjV+t4mAAes64cDR637OgWMznacFuv2CYAbRrL0dZb998RIpmOt3wnvbGVPsMZ+HSNxKm9dVwPj/3ms9TsRivW7Kg95FIeH6QEUt0eWH5NjGJWbrfXHtz5/TwDeA+YCz1uXfYvRc5CbBGAEcPE2MXTFqDR9rD9SS/h7AnAdeAijF6d8ZjzW9R5kqcisy/56/W3KfA4IB+phtAJDuLUyXAnMscZTE9id5Yf0b8cIfAKssu7LCaPy/NC6ro01/i7W+OtgrWysP7DPZNtX1mNfCPxo3acHRi/MyCxxpAHPWj+35zEqPnWnzzrL81veN2ssJ4BGGInHEWt5ARgV10LgS+u2FYFzGJWNHXA/RjLV7DZl//V5WZ+/A+yyvrcuGBXnu9Z1nYB04P8wEgXHnPZp3fZvCQBQ1bqsVpZlTwKHrH9PA2Zl208Y0PcO71tYlu/Kr9z87t0PXAIetH4GQ63bO2R57R/W1zpmWbYLo9KvY339Puu+ymMksW9leS9yTACsf08kS+VtXRZk/QwV8AhwA2h9h/39tQ9uJtRdAHuMRPEEUC5L2bsxEodqGInGc9Z1H2IkvvbWRwdu812UhzzMeMgpgNtbhNG93wXjP/X522y3EBiilKqC8ePyQy73H4Xxg5GT/hgVS5jWOhHjBym7H7XWv2qtLVrr5FyWeSf9gala63Na66sYP16AMWAN6Ab8U2udqLW+hFHBD8hpR0ophdGqHKe1vqq1jgc+yLL9SGC+1voXa/zntdbhdwvQeqpjAPC61jpeax0BTAaCs2x2Rms9Txvn8b8CXDEqlvz6Umt9Umt9HaNFelJrvVFrnQ4sx6ikwGj1Rmitv9Rap2ut9wPfAf1yWc4g4B2t9SWtdQzwdrbjsmBUgila66Q8HkMl67/Xsyy7jpFEZa6/zq2yrs/Jp1m+K+9z85TZKGCO1vo3rXWG1vorjJZ+2yyvnW59bdbjmKG1vqi1Po/RUv5Na73f+t1eyc33Oc+01musn6HWWm8FNmBUxrnxFLDG+l1Nw+hdcgTaZzueKOt78RPQyro8DeP7V19rnaa1DtVay+QrotiQc3C3twij67wBRiWfI631dqWUC8YpgtVa6ySj/rurOhjdgjlxA/ZmeX4mh23O5aaQPHDLts+sZdbHaMFEZzk2mzvE4IIxZmJvlu0VRosQjNbf2nzEWMMaR9bYzmC8l5kuZP6htb5hLb8S+Xcxy99JOTzP3Hd94EGlVGyW9XYY36PccOPvx+WW5XnMPSR6CdZ/nTFOr2T+HZ9lvXO212Rdn5Ps35XMWOsDQ5VSL2ZZX45bjyWn701u3+c8U0o9DryF0Zq3wfhuHsrly2/5XLTWFqXUOW7zncPoXcg81kkYyfsG6/dwrtb6o3wcghCFQnoAbkNrfQbjHHI3jAFSd/I18DJ3SBRy0AejpZOTaIxKMpN7TiHmoazcuFOZ5zBacTW01lWsD2etdfPbxHIZ40e7eZbtK2tjIGTm/hrdJo47HddljFZV1hHq7ty+d+ZuCvI9PAdszXK8VbRxRcHzuXx9FH8/rqgsz/Mdq9b6Gsbne1+Wxfdxc5Do4azrlFIVMT6fOw0izf5dyYz1HPB+tvehgtZ6adaQ8nckgNEd/9eAXGuvkMvt9q2UcsDoifkY4xRIFYzkU+W0fQ5u+VysvVv1yMV3ztpL9bLWuiHGOILxSin/u71OiKIiCcCdjQQetXbD38l0jFMF2+60kVLKVinVQCk1A+Pc49u32fRbYJhSqplSqgJG6+VeXQTudC39t8BLSqm6SqmqwGuZK7TW0RjdppOVUs5KKRulVCOl1CNZ9l1XKVXOur0FmAd8opSqCaCUqqOUesy6/RfAcOulZzbWdU3vFqe1W/9b4H2llJP1UrXxGAlYftztPcmL1YCXUipYKWVvfTyglPLO5euXAm8opVyso/LfJI/HpZQqjzFGAMDB+jzTQuv+q1rf62cxxiGA0cXuo5Tqa33Nm8DBu5yW+Yf1u1INo/cr86qBecBzSqkHlaGiUipIKXWn0wl58SdQ3rpPe4xBoA5Z1l8EPJRSmb9t5azrY4B0a29AYLbtqyulKt+mvG+BIOt31R4j0U/BGKNxR0qp7kqpxtak4TrGAFdLbg9UiMImCcAdWM8b7snFdle11pvucH6vnVIqAWPk+BaM7tUHtNY5dkNqrddhjALfjDHgaHN+4s9mIvCVUipWKdU/h/XzgJ+BAxgDsLL3egzB+DHNHPm9AuP8Jtb4DgMXlFKXrctetca+SykVhzGSu4n1+HZjDJb7BOOHcSs3W1nTgCeVUteUUtNziPNFjFbgKYzBlkuA+bl7C/7mQ4xKMVYpNSGf+wCM1h5GxTIAo9V4gZuD9nLjPYzL7w5idE/vsy7Li8wrGsAY0Jn1HPtbGFdbnMF4vydprddbY4/BuHzwfYzP9kFuM74jiyUYSeEp637fs+5rD0Zy8al1XycwBmcWCOtYjDHA5xit8ESMQbqZllv/vaKU2mf9XF7CqMivAQMxBqdm7i8cI/k6Zf0eZD1Vgdb6GDAYmIHRA9UD6KG1Ts1FuJ4Y3/sEYCfwmdY6JG9HLEThUTImRQiRF0qpCIwrNTaaHYsQIv+kB0AIIYQogyQBEEIIIcogOQUghBBClEHSAyCEEEKUQSXiRkA1atTQHh4eZochhBAlyt69ey9rrV3uvqUoi0pEAuDh4cGePXe9Gk8IIUQWSqmc7iIqBCCnAIQQQogySRIAIYQQogwqtARAKTVfKXVJKRWWw7qXlVLaestTIYQQQhSxwuwBWIAxr/0tlFL1MG6ZerYQyxZCCCHEHRRaAqC13kbO091+ArxCwc9mJ4QQQohcKtIxAEqpXsB5rfWBXGw7Sim1Rym1JyYmpgiiE0IIIcqOIksArNPa/htjqtG70lrP1Vr7aa39XFzkMlYhhBCiIBVlD0AjoAFwwDqbWF1gn1KqdhHGIIQQJUJCagIvrXuJ2ORYs0MRpVSRJQBa60Na65paaw+ttQfGHN6ttdYXiioGIYQoCWKTYwlcFMjM32fy69lfzQ5HlFKFeRngUmAn0EQpFamUGllYZQkhRGkRkxjDo189yp6oPXz75LcEeQWZHZIopQrtVsBa66fvst6jsMoWQoiS6Hzcebos6sLp2NOsenoVXRv/7UpqIQpMiZgLQAghSrvT104TsCiAS4mXWD9oPY94PGJ2SKKUkwRACCFMFn45nICFAdxIu8GmIZtoU6eN2SGJMkASACGEMNEfF/4gcFEgSim2DNtCy1otzQ5JlBEyGZAQQphkV+QuOn/VmfJ25QkdHiqVvyhSkgAIIYQJQk6HELAwgOqO1QkdHopXdS+zQxJljCQAQghRxNb8uYZuS7rhUcWD0OGh1K9S3+yQRBkkCYAQQhSh5YeX03tZb5q7NGfLsC24OrmaHZIooyQBEEKIIvLl/i8Z8N0A2tZty6Yhm6hRoYbZIYkyTBIAIYQoAp/u/pQRq0bg38Cf9YPWU7l8ZbNDEmWcJABCCFHIPgz9kBfXvUjvpr356emfqFiuotkhCSEJgBBCFBatNf/e9G/+vfnfDGwxkG+f/BYHOwezwxICkBsBCSFEobBoC/9c/09m7J7BqNaj+CzoM2xtbM0OS4i/SAIghBAFLMOSwTM/PcOCPxbwcruXmdRlEkops8MS4haSAAghRAFKzUhl8PeDWX5kORMfmcibj7wplb8oliQBEEKIApKUlkS/5f1Yc3wNH3f5mJfbv2x2SELcliQAQghRAOJT4un1TS+2RGxhdtBsRvuNNjskIe5IEgAhhLhH15Ku0W1JN34//zsL+yxkcMvBZockxF1JAiCEEPfgUuIlAhcFcvTyUZb3W04f7z5mhyRErkgCIIQQ+RQZF0mXRV04E3uGn57+icBGgWaHJESuSQIghBD5cOraKfwX+nPlxhV+HvwzHep3MDskIfKk0O4EqJSar5S6pJQKy7JsklIqXCl1UCm1UilVpbDKF0KIwnI05igdvuxAXEocm4dulspflEiFeSvgBUDXbMt+AXy01i2BP4HXC7F8IYQocPuj99NxQUcyLBlsHbYVPzc/s0MSIl8KLQHQWm8DrmZbtkFrnW59uguoW1jlCyFEQdtxbgedv+pMBfsKhA4Pxaemj9khCZFvZk4GNAJYd7uVSqlRSqk9Sqk9MTExRRiWEEL83aZTmwhcFEjNijUJHR6KZ3VPs0MS4p6YkgAopf4DpAOLb7eN1nqu1tpPa+3n4uJSdMEJIUQ2Px37iaAlQTSo2oBtw7fhXtnd7JCEuGdFfhWAUmoY0B3w11rroi5fCCHyYlnYMgavHEyr2q1YP2g91StUNzskIQpEkfYAKKW6Aq8APbXWN4qybCGEyKsv9n3B0989Tbu67dg0ZJNU/qJUKczLAJcCO4EmSqlIpdRI4FPACfhFKfWHUmp2YZUvhBD3YtquaTzz0zMENgpk/eD1ODs4mx2SEAWq0E4BaK2fzmHxF4VVnhBCFAStNR+EfsAbIW/Qp2kflvZdioOdg9lhCVHg5E6AQghhpbXm9U2v83+//h/BLYOZ32s+djbyMylKJ/lmCyEEYNEWXlz7Ip/t+YznfJ9jZtBMbJSZV0oLUbgkARBClHnplnSeWfUMXx34igntJvC/Lv9DKWV2WEIUKkkAhBBlWmpGKgO/G8h3R7/jnU7v8EbHN6TyF2WCJABCiDIrKS2Jvt/2Zd2JdUwJnMK4duPMDkmIIiMJgBCiTIpPiafH0h5sO7ONud3n8qzvs2aHJESRkgRACFHmXE26yuOLH2dv1F4WP7GYp1vkdNWyEKWbJABCiDLlYsJFAr8OJPxyON/1/45eTXuZHZIQppAEQAhRZpy7fo6ARQFExkWy+unVdGnUxeyQhDCNJABCiDLh5NWT+C/051ryNTYM3sBD7g+ZHZIQppIEQAhR6h2+dJgui7qQmpHK5iGb8XXzNTskIUwnt7kSQpRqe6P28siCRwDYOmyrVP5CWEkCIIQotX49+yuPLnyUSuUqsW34NprXbG52SEIUG5IACCFKpY2nNhL4dSC1K9UmdHgojas1NjskIYoVSQCEEKXOj+E/ErQkiMbVGrNt2DbqVa5ndkhCFDuSAAghSpWlh5bS99u+tKrdipChIdSqVMvskIQoliQBEEKUGvP2zmPQ94N42P1hNgZvpJpjNbNDEqLYkgRACFEqfLLzE0atHkXXxl1ZO2gtTg5OZockRLEmCYAQokTTWvPu1ncZv2E8fb378sOAH6hgX8HssIQo9gotAVBKzVdKXVJKhWVZVk0p9YtS6rj136qFVb4QovTTWvPqxld5c8ubDL1vKN88+Q3lbMuZHZYQJUJh9gAsALpmW/YasElr7Qlssj4XQog8s2gLY9aMYdKOSYzxG8P8XvOxs5GbmwqRW4WWAGittwFXsy3uBXxl/fsroHdhlS+EKL3SLekM+2EYs/fO5tWHXuXTbp9io+SMphB5UdTpci2tdbT17wvAba/PUUqNAkYBuLu7F0FoQoiSICU9hYHfD+T7o9/zXuf3+HeHf6OUMjssIUoc01JmrbUG9B3Wz9Va+2mt/VxcXIowMiFEcXUj7Qa9vunF90e/Z+pjU/lPx/9I5S9EPhV1D8BFpZSr1jpaKeUKXCri8oUQJVRcShzdl3Rn+9ntfNHzC0bcP8LskIQo0Yq6B2AVMNT691DgxyIuXwhRAl25cYWAhQHsjNzJ0r5LpfIXogAUWg+AUmop0AmooZSKBN4CPgK+VUqNBM4A/QurfCFE6XAh4QJdFnXh+JXjfN//e3o06WF2SEKUCoWWAGitn77NKv/CKlMIUbqcvX6WgIUBnI8/z5qBa/BvKD8fQhQUuWhWCFEsHb9ynIBFAVxPvs4vwb/Qvl57s0MSolSRBEAIUeyEXQojYGEAGTqDkKEh3O96v9khCVHqyJ0zhBDFyp6oPTyy4BFsbWzZOmyrVP5CFBJJAIQQxUbomVAe/epRnB2cCR0eSjOXZmaHJESpJQmAEKJY2HByA499/RhuTm6EDg+lYdWGZockRKkmCYAQwnQrj66kx9IeeFX3YtvwbdR1rmt2SEKUepIACCFM9fXBr+m3vB+tXVsTMjSEmhVrmh2SEGWCJABCCNPM2TOHISuH0LF+RzYM3kBVx6pmhyREmSEJgBDCFJN3TOa5Nc/RzbMbawauwcnByeyQhChTJAEQQhQprTUTt0xkwi8T6NesH98/9T2O9o5mhyVEmSM3AhJCFBmtNRM2TGDKrikMbzWceT3mYWtja3ZYQpRJkgAIIYpEhiWDMWvGMHffXF5s8yJTu07FRkknpBBmkQRACFHo0jLSGPbjMJYcWsLrD7/O+4++j1LK7LCEKNMkARBCFKqU9BSeWvEUPx77kQ8e/YDXO7xudkhCCCQBEEIUosTURPos68Mvp35hxuMzeKHNC2aHJISwkgRACFEoridfp/vS7uw4t4P5Pecz/P7hZockhMhCEgAhRIG7fOMyXb/uyoGLB/im7zf0a97P7JCEENlIAiCEKFDR8dF0WdSFE1dP8MNTPxDkFWR2SEKIHEgCIIQoMGdiz+C/0J8LCRdYN2gdnRt0NjskIcRtSAIghCgQf175k4CFAcSnxrNxyEba1m1rdkhCiDsw5S4cSqlxSqnDSqkwpdRSpVR5M+IQQhSMgxcP0vHLjiSnJxMyNEQqfyFKgLsmAEqpdkqpmUqpg0qpGKXUWaXUWqXUP5RSlfNaoFKqDvAS4Ke19gFsgQF5D10IURzsPr+bTgs6YWdjx7bh22hVu5XZIQkhcuGOCYBSah3wDPAz0BVwBZoBbwDlgR+VUj3zUa4d4KiUsgMqAFH52IcQwmRbI7biv9CfKuWrEDo8lKY1mpodkhAil+42BiBYa30527IEYJ/1MVkpVSMvBWqtzyulPgbOAknABq31huzbKaVGAaMA3N3d81KEEKIIrD+xnj7L+tCgSgN+Cf6FOs51zA5JCJEHd+wByKz8lVIVlTJm7VBKeSmleiql7LNuk1tKqapAL6AB4AZUVEoNzqHsuVprP621n4uLS16KEEIUsu+OfEfPpT3xruHN1mFbpfIXogTK7SDAbUB56/n7DUAwsCCfZQYAp7XWMVrrNOB7oH0+9yWEKGILDyyk/4r++Ln5sXnoZlwqSoIuREmU2wRAaa1vAE8An2mt+wHN81nmWaCtUqqCMqYD8weO5nNfQogiNOv3WQz9YSidPDqxIXgDVcpXMTskIUQ+5ToBUEq1AwYBa6zLbPNToNb6N2AFxhiCQ9YY5uZnX0KIojPp10mMWTuGHl49WDNwDZXKVTI7JCHEPcjtjYDGAq8DK7XWh5VSDYGQ/BaqtX4LeCu/rxdCFB2tNW+GvMl7oe/xVPOnWNRnEfa29maHJYS4R7lKALTW2zDGAWQ+P4VxLb8QohTTWjP+5/FM/W0qI+8fyZzuc7C1yVfnnxCimLnbfQDmKaVa3GZdRaXUCKXUoMIJTQhhpgxLBqN+GsXU36Yy9sGxzO0xVyp/IUqRu/UAzAT+a00CwoAYjBsAeQLOwHxgcaFGKIQocmkZaQz5YQjfhH3DGx3e4J3O72CM2RVClBZ3TAC01n8A/ZVSlQA/jDsBJgFHtdbHiiA+IUQRS05P5qkVT7Hq2Co+8v+IVx9+1eyQhBCFILdjABKALYUbihDCbAmpCfT+pjebTm9iZreZjHlgjNkhCSEKiUwHLIQAIDY5lqAlQeyK3MWCXgsY2mqo2SEJIQqRJABCCGISY3js68cIuxTGsieX8WSzJ80OSQhRyPKUACilKljvCCiEKCWi4qMIWBjA6djT/DjgRx73fNzskIQQRSBXdwJUSrVXSh0Bwq3P71NKfVaokQkhCl1EbAQdvuzAubhzrBu0Tip/IcqQ3N4K+BPgMeAKgNb6ANCxsIISQhS+8MvhPDz/Ya4lXWNj8EY6eXQyOyQhRBHK9SkArfW5bNcBZxR8OEKIonDgwgG6LOqCUootw7bQslZLs0MSQhSx3PYAnFNKtQe0UspeKTUBmcFPiBJpV+QuOn3VCQc7B7YN2yaVvxBlVG4TgOeAfwB1gPNAK+tzIUQJsiViCwELA6juWJ3Q4aE0qdHE7JCEECbJ7Y2ALmNMBSyEKKHWHl9L32/70rBqQ34J/gU3JzezQxJCmChXCYBSqgHwIuCR9TVa656FE5YQoiAtP7ycQd8PokWtFvw8+GdqVKhhdkhCCJPldhDgD8AXwE+ApfDCEUIUtAV/LGDkqpG0q9uONQPXULl8ZbNDEkIUA7lNAJK11tMLNRIhRIGbuXsmL6x7gYCGAfzw1A9ULFfR7JCEEMVEbhOAaUqpt4ANQErmQq31vkKJSghxzz7a/hGvb3qdnk16suzJZZS3K292SEKIYiS3CUALIBh4lJunALT1uRCiGNFa88bmN/hg+wc87fM0X/X+Cntbe7PDEkIUM7lNAPoBDbXWqYUZjBDi3li0hXHrxzF993Sebf0ss4JmYWtja3ZYQohiKLf3AQgDqhRUoUqpKkqpFUqpcKXUUaVUu4LatxBlVYYlg2dWPcP03dMZ13Ycc7rPkcpfCHFbue0BqAKEK6V+59YxAPm9DHAasF5r/aRSqhxQIZ/7EUIAqRmpBK8M5tvD3/JmxzeZ2Gki2W7dLYQQt8htAvBWQRWolKqMMZHQMADraQU5tSBEPiWnJ9NveT9W/7maSV0mMaH9BLNDEkKUALm9E+DWAiyzARADfKmUug/YC4zVWidm3UgpNQoYBeDu7l6AxQtReiSkJtDrm16EnA5hVtAsnvN7zuyQhBAlxB3HACiltlv/jVdKxWV5xCul4vJZph3QGpiltb4fSARey76R1nqu1tpPa+3n4uKSz6KEKL2uJV2jy6IubI3YysI+C6XyF0Lkyd16ACoCaK2dCrDMSCBSa/2b9fkKckgAhBC3F5MYQ+DXgRy+dJjl/ZbTx7uP2SEJIUqYu10FoAu6QK31BYzphTOnIfMHjhR0OUKUVufjztNxQUeOXT7GT0//JJW/ECJf7tYDUFMpNf52K7XWU/JZ7ovAYusVAKeA4fncjxBlyulrp/Ff6M/lG5dZP3g9Het3NDskIUQJdbcEwBaoBBTo9URa6z8Av4LcpxCl3dGYowQsCiA5PZlNQzbxQJ0HzA5JCFGC3S0BiNZav1MkkQghbmt/9H4Cvw7EVtmyZegWWtRqYXZIQogS7m5jAOROIkKYbOe5nXT+qjOOdo6EDg+Vyl8IUSDulgD4F0kUQogcbT69mS6LuuBS0YXQ4aF4Vvc0OyQhRClxxwRAa321qAIRQtxq9Z+r6ba4Gw2qNmDbsG3Ur1Lf7JCEEKVIbicDEkIUoWVhy+izrA8tarVgy9AtuDq5mh2SEKKUye1cAEKIIjJ//3ye/elZHqr3EKsHrsbZwdnskITIt71799a0s7P7HPBBGp1FzQKEpaenP+Pr63sp+0pJAIQoRqb/Np2x68cS2CiQlU+tpIK9TJQpSjY7O7vPa9eu7e3i4nLNxsamwG8uJ27PYrGomJiYZhcuXPgc+NvsvZKNCVFMfBD6AWPXj6VP0z6sGrBKKn9RWvi4uLjESeVf9GxsbLSLi8t1jN6Xv5EeACFMprXm35v+zUe/fsTgloP5steX2NnIf01RathI5W8e63ufY2NffmWEMJFFW3hp3UvM/H0mo31H81nQZ9go6ZgTQhQ++aURwiTplnRGrhrJzN9n8nK7l5kVNEsqfyEK2LFjx8p5eno2NzuO2+nZs5kKG5UAACAASURBVGcDDw8PH09Pz+b9+vXzSElJKbIb8MmvjRAmSM1I5envnmbBHwt4u9PbTOoyCaXkxptClDWDBg26eurUqbBjx44dTk5OVlOnTq1RVGVLAiBEEUtKS6LPsj6sOLKCyYGTefORN6XyF6IIHDlypJy3t3ezrVu3VkhPT2f06NF1fXx8vL28vJpNmjSpBoDFYmH06NF1PT09m3t5eTWbN29eVYDVq1c7+fn5NenUqVNjDw8Pn4EDB7pnZGSQnp5O3759PTK3f/vtt2vmJaannnrquo2NDTY2Nvj5+SVGRkaWK4xjz4mMARCiCMWnxNPzm55sjdjKnO5zGOU7yuyQhCgyI0ZQLyyMAr28xceHG/Pnc+5u2x04cMBhwIABjebPn3+6Xbt2SR9//HGNypUrZ4SFhR1NSkpSDzzwQNMePXrE7dq1q8KhQ4ccjx49ejg6OtquTZs23oGBgQkAhw4dqrh///4wLy+v1I4dO3ouXLiwauPGjVOio6Ptjx8/fhjg8uXLtvk5jpSUFLVs2bLqU6ZMueuxFBRJAIQoIteSrvH44sfZE7WHr5/4moEtBpodkhBlwtWrV+169+7deMWKFSd9fX2TATZu3OgcHh5eYdWqVVUB4uPjbY8cOVI+NDTUqX///lft7OyoV69e+oMPPpiwffv2CpUrV7a0aNEisVmzZqkA/fv3vxoaGlqpe/fucefOnXMYOnRovR49elzv06dPXH5iHDp0qHvbtm0TunbtmlBwR35nkgAIUQQuJlwk8OtAwi+Hs6L/Cno37W12SEIUudy01AuDk5NThpubW2pISEilzARAa60mT558tm/fvrdU2GvWrKl8u/1kP1WnlMLFxSUjLCzsyMqVK51nz57tsmzZsmrLly+PyNwmPT0dHx+fZgBdu3aNnTp1alT2/b788suuly9ftvv5559P3tuR5o2MARCikEXGRdJxQUeOXznO6qdXS+UvRBGzt7fX69atO7l06dLqs2fPrgbQpUuX67NmzXLJHHV/8OBBh7i4OJuOHTvGr1ixolp6ejpRUVF2u3fvrtShQ4dEME4BhIeHl8vIyGDFihXVOnToEB8dHW2XkZHBsGHDYj/88MPzhw4duuUUh52dHeHh4UfCw8OP5FT5T5kypcbmzZsr//DDD6dsbfN19iDfpAdAiEJ08upJAhYFcDXpKhuCN/Cw+8NmhyREmeTs7Gz5+eefT3Tq1MnLyckpY9y4cZcjIiIcWrRo4a21VtWqVUtbu3btyeDg4NgdO3ZU8vb2bq6U0m+//Xaku7t7+sGDB/Hx8Ul87rnn3CMiIsq3b98+Ljg4OHb37t2OI0eO9LBYLArgnXfeicxLXK+88kp9V1fXFD8/P2+A7t27X/v444+jC+M9yE4SACEKyZGYIwQsDCA1I5XNQzbj6+ZrdkhClDlNmjRJzRygV6NGjYywsLCjmes+/fTT88D57K+ZM2dOJPC3itzJySkjJCTkRNZl7dq1Szpy5MjR7NvmVnp6+t78vvZeySkAIQrBvuh9dPyyIxrN1mFbpfIX+XLjBmRkmB2FKK1MSwCUUrZKqf1KqdVmxSBEYfj17K90/qozFctVJHR4KM1rFtubkIliRms4cgQ++QQeewyqVYOdO82OSgB07949Pnvrv6Qz8xTAWOAoIJOdi1Jj46mN9PqmF3Wc6rBpyCbqVa5ndkiimLt+HTZtgvXrjcc56zh5b28YMwZcXMyNT5RepiQASqm6QBDwPjDejBiEKGirjq2i3/J+NKnehA3BG6hdqbbZIYliyGKBP/64WeHv2GF08zs5QUAAvPGG0fqvX9/sSEVpZ1YPwFTgFcDpdhsopUYBowDc3d2LKCwh8mfpoaUErwzG182XdYPWUc2xmtkhiWIkJgZ++cWo8H/+GS5dMpa3bg2vvmpU+O3agb29uXGKsqXIEwClVHfgktZ6r1Kq0+2201rPBeYC+Pn5yVzSotj6fN/njPppFB3qd2D106txcrhtXivKiPR0+O23m638vXuN8/vVqxuVfdeuEBgItWqZHakoy8wYBPgQ0FMpFQF8AzyqlPrahDiEuGdTd03l2Z+e5bHGj7Fu0Dqp/MuwyEj44gvo1w9q1ICHH4YPPoBy5eDtt2H3brh4ERYvhuBgqfyLs9dee63Azt+NHz/e7c0338z3p71jxw7HVq1aNW3cuPEtkxMVhCLvAdBavw68DmDtAZigtR5c1HEIcS+01rwf+j7/DfkvT3g/wZInluBg52B2WKIIpaRAaOjNVv7hw8byOnXgySeNVr6/P1QtsJ9rUVSmT5/u+tFHH13I7fYWiwWtNYVxJ79KlSpZFi1adLpFixYpERER9g888IB3nz594mrUqHHPF4jKfQCEyCOtNa9tfI3/hvyX4JbBLHtymVT+ZcSJE/Dpp9C9u3GJXpcuMGMG1K4NkybBoUPGKP7PPzeSAKn8zXfs2LFyDRo0aN6zZ88GDRs2bN61a9eG8fHxNqtWrXIKCAholLndypUrnbt06dJozJgxdVJSUmyaNm3arGfPng0AJk6cWMvT07O5p6dn83feeadm5n49PDx8+vTp4+Hl5dX85MmT5VasWOHcrFkz7yZNmjRr166dV+a+jx496timTZsmdevWbfHee+/labrgli1bprRo0SIFwMPDI61atWrp0dHRBdJ4N/VOgFrrLcAWM2MQIi8s2sILa19g1p5ZPO/3PJ92+xQbJXl0aZWQAFu23Gzln7RO1dK4MYwYYbTyO3WCihXNjLJkadOGJtmXPfEEV197jZj4eGz8/fHMvn7wYC6/9BJXoqOx69WLRlnX7d7NsbuVGRERUX7OnDkRgYGBif369fOYNGmSy8SJEy+OHTvWPSoqys7NzS19/vz51YcPH3554MCB1xcsWFAzPDz8CEBoaGiFJUuWVN+7d+9RrTW+vr7e/v7+8TVq1Mg4e/aswxdffHHa398/Iioqyu6FF17w2LJlS3jTpk1TL168+Fd3wIkTJ8rv2LHjWGxsrK23t7fPv/71rxgHB4c8j20LCQmpkJaWppo1a5aS19fmRH65hMiFtIw0lhxawgPzHmDWnlm80v4VZnabKZV/KaO10YqfNMnovq9eHXr0gC+/NK7L//RTOH7ceMyYAUFBUvmXBLVr104NDAxMBAgODr6yY8eOSjY2NvTv3//KvHnzql2+fNl23759lfr163c9+2u3bNlSqVu3brHOzs6WypUrW4KCgq6FhIQ4Abi6uqb6+/snWrer2KZNm/imTZumAtSqVeuvLvrAwMBYR0dH7erqml6tWrW0yMjIPDe+z5w5Yz98+PCG8+bNiyioUw0yF4AQdxCbHMu8vfOYvns6kXGRNKnehAW9FjDkviF/mxpUlEzXrsHGjTdb+VHW+dpatICXXjJa+Q8/DA5ylqdA3KnF7uSE5U7rXV1Jz02LP7ucpvEFeP75568EBQU1Ll++vO7Ro8c1+zxeh1mhQgVLbrbL2tq3tbUlPT39loAWLlxY5YMPPnADmDt3bkTHjh1vZF1/9epVm8cff7zxW2+9dT4z4SgIkgAIkYPT104z7bdpfLH/CxJSE+js0ZnZQbN53PNxafWXcBkZxmV5mRX+b78ZN+epUsU4p595iV7dumZHKgpKdHR0uY0bN1YMCAhIXLx4cbX27dsngHFOvVatWmmTJ092Xb9+/Z+Z29vZ2emUlBTl4OCgO3funDBixAiPd99994LWmrVr11ZdsGDBqexldOrUKXH8+PH1w8PDy2WeAsjaC3AnQ4YMiR0yZEhsTuuSk5NVUFBQ4wEDBlwZPnz4tfy+BzmRBECILHZF7mLKzil8d/Q7bJQNA3wGMK7tOFq7tjY7NHEPLlyADRuMCn/DBrhyBZQCPz/4z3+MSr9NG7CTX8RSycPDI3nGjBk1R40aVcHT0zN5woQJMZnrBgwYcGXmzJl2rVu3Ts5cNmjQoBhvb+9mPj4+N1atWnV64MCBV1q3bu0NEBwcHPPQQw8lHTt2rFzWMtzc3NKnT58e0adPn8YWi4Xq1aun7dix4/i9xj5//vyqv//+e6Vr167ZLVmypIZ12en27dsn3eu+ldbF/x47fn5+es+ePWaHIUqpDEsGP4T/wJRdU9hxbgdVyldhtO9oXmjzAnWdpRlYEqWlGbfY/flno9Lfv99YXrOmUdl37Wq09mvUMDfOwqaU2qu19jMzhgMHDkTcd999l80q/9ixY+W6d+/umTklcHZDhgxxv//++2+MGzfOtBgL24EDB2rcd999HtmXS74ryqyE1ATm75/P1F1TOR17mgZVGjC963SG3z+cSuUqmR2eyKMzZ25262/aBPHxRou+fXvjhjxdu8J994GNnMERVs2bN/d2dHS0zJkz55zZsZhBEoB7kJamaTT6dV7y78+EQdJFXFJExkXy6e5PmbN3DrHJsbSv155JXSbRu2lvbG0K/kYeonAkJcG2bTcr/fBwY7m7Ozz9tFHhP/ooVK5sbpzCXE2aNEm9Xev/8OHDR4s6nuJEEoB78M7CrZyr/3+cTPYAJAEo7vZH72fKril8E/YNFm2hr3dfxrcbT9u6bc0OTeSC1nDs2M0JdbZsgeRkY3R+p04werRR6TdpYpzfF0LcmSQA96LcDaomtOPjwUPNjkTchkVbWHt8LVN2TiEkIoRK5SrxwgMv8NKDL9GgagOzwxN3ERcHmzffbOWfOWMsb9LkZoXfsSNUqGBunEKURJIA3IN3g7vxLt3MDkPkICktiUUHF/HJrk8IvxxOXee6/C/gfzzr+yxVylcxOzxxGxYLHDhwc/Der78aM+tVqgQBAfD668Zseh4eZkcqRMknCUA+/XfBBsb27kCNKo5mhyKyuJhwkc9+/4zP9nzG5RuXae3amsVPLKZfs37Y28pk68XR5cvwyy83u/YvXjSWt2oFEyYYrfx27YxZ9YQQBUfGw+bDj9tO897px3l61vtmhyKsDl86zDOrnqH+1Pq8u+1d2tdrz5ahW9jz7B4GthgolX8xkp4OO3fCW2/Bgw8al+YNHAirVxuD9hYsMO7Gt38/fPghPPKIVP4i/44dO1bO09Ozudlx3M4HH3zg4u7u7qOU8s06yY/FYmHYsGH13N3dfby8vJpt3769wE90SQ9APry8YjJUsWXG0OfNDqVM01qz6fQmJu+czPoT63G0c2R4q+GMazcOr+ped9+BKDLnz9/s1t+40bj9ro2NkQBMnGi08n19oRBmUxWiWHvkkUcS+vbte/3RRx+9ZZKk5cuXVz516lT5iIiIsJCQkIpjxoxxP3jwYHhBli09AHn0W1gMJ53n01IH09StjtnhlEkp6Sl89cdXtJrTii6LurA/ej/vdn6Xs+POMqv7LKn8i4GUFGPw3iuvQMuWxm11R440zun37g3LlkFMjHGznjffNO7CJ5W/KGxHjhwp5+3t3Wzr1q0V0tPTGT16dF0fHx9vLy+vZpMmTaoBRst79OjRdT09PZt7eXk1mzdvXlWA1atXO/n5+TXp1KlTYw8PD5+BAwe6Z2RkkJ6eTt++fT0yt3/77bfzNN3vQw89lNSkSZPU7Mt//PHHKoMGDbpiY2ODv79/YlxcnN2ZM2cKtCtTegDy6B9fzYCKycx46l9mh1LmXE26yuw9s/l096dEJ0TjU9OH+T3nM7DFQBzsZKYWs508ebOVv3kzJCaCvT106AD/+5/RyvfxkUv0yrIRP46oF3YprEC7sn1q+tyY32v+XW/kc+DAAYcBAwY0mj9//ul27dolffzxxzUqV66cERYWdjQpKUk98MADTXv06BG3a9euCocOHXI8evTo4ejoaLs2bdp4BwYGJgAcOnSo4v79+8O8vLxSO3bs6Llw4cKqjRs3TomOjrbPvNfA5cuXCySVjY6Otvfw8PgrMXB1dU09c+aMff369dMKYv8gCUCeWCwQkbqX+ra96Ojd1OxwyozjV44zdddUFhxYwI20GwQ2CmRB7wV0adhFZuQzUWKicS1+5iV6J04Yyxs0gKFDjQq/c2djBL8QZrp69apd7969G69YseKkr69vMsDGjRudw8PDK6xataoqQHx8vO2RI0fKh4aGOvXv3/+qnZ0d9erVS3/wwQcTtm/fXqFy5cqWFi1aJDZr1iwVoH///ldDQ0Mrde/ePe7cuXMOQ4cOrdejR4/rffr0iTPzWPNCEoA8sLGBmKmruZpQYLMxitvQWrP97HYm75zMqmOrsLe1Z1CLQYxvNx6fmj5mh1cmaQ1Hjtys8Ldtg9RUcHQ0Bu+NHWtcote4sbTyRc5y01IvDE5OThlubm6pISEhlTITAK21mjx58tm+ffveUmGvWbPmtveOzGlaYRcXl4ywsLAjK1eudJ49e7bLsmXLqi1fvjwic5v09HR8fHyaAXTt2jV26tSpUbmJ2dXVNS0iIuKv4a/R0dHlCrL1DzIGINdi49I4HBGDUorqTtKkKSzplnS+CfuGNp+3oeOCjmw/u53/dPgPZ/55hvm95kvlX8RiY2HFCnjmGeMWuz4+xqV50dHw4ovG5XtXrxoj+F94ATw9pfIXxY+9vb1et27dyaVLl1afPXt2NYAuXbpcnzVrlktKSooCOHjwoENcXJxNx44d41esWFEtPT2dqKgou927d1fq0KFDIhinAMLDw8tlZGSwYsWKah06dIiPjo62y8jIYNiwYbEffvjh+UOHDt1yisPOzo7w8PAj4eHhR3Jb+QP07NkzdvHixdUtFgubNm2q6OTklFHQCYD0AOTSmFlLWRr/HL/020PAfc3MDqfUuZ58nc/3fc703dM5e/0sXtW9mBU0iyH3DaGCvdzmrahYLLBv381W/q5dkJFh3E8/IMC4dO+xx6BePbMjFSJvnJ2dLT///POJTp06eTk5OWWMGzfuckREhEOLFi28tdaqWrVqaWvXrj0ZHBwcu2PHjkre3t7NlVL67bffjnR3d08/ePAgPj4+ic8995x7RERE+fbt28cFBwfH7t6923HkyJEeFotFAbzzzjuReYnrvffeqzljxozaV65csb/vvvuade7c+fqyZcvO9O/f//qaNWsq169f38fR0dHy+eefRxT0e1Lk0wErpeoBC4FagAbmaq2n3ek1Zk8HnJxiwemVljiUsyH+fwfkvHMBOhN7hmm/TePzfZ8TnxrPI/Uf4eV2LxPkFYSNkg6qonDpEmzYcPNGPJetk6L6+d2cOvfBB42Z9UTJItMBF5zVq1c7TZ48uVZISMgJs2PJq+I0HXA68LLWep9SygnYq5T6RWt9xIRYcuX1+WtJr3aYsQ0WSeVfQHaf383knZP57sh3ADzl8xTj247H183X5MhKv7Q0o2Wf2crft89Y7uJys8Lv0sW4QY8QovQq8gRAax0NRFv/jldKHQXqAMUyAbBYYO7h/8O+gjsfDHzK7HBKtAxLBquOrWLKrilsP7udyg6VGd9uPC+2eZF6laVPuTCdPXvrjXji4ozr7tu3h/feMyr9++83BroKIf6ue/fu8d27d483O46CZGqnnlLKA7gf+C2HdaOAUQDu7u5FGldW328L54bLdgZXn0Y5O7mdbH4kpiby5R9fMnXXVE5eO4lHFQ+mPjaVEfePwMnByezwSqXkZGOUfmalf8SaXterB089ZVT4/v7GuX0hRNlkWgKglKoEfAf8U2v9t+smtdZzgblgjAEo4vD+8mSnpqytdIR23uYlISVVVHwUM36bwZy9c7iWfI22ddvyUcBH9G7aGzsbOaFckLSG48dvdutv2QJJSeDgYEyXO3KkUel7e8sofSGEwZRfYaWUPUblv1hr/b0ZMeRGapqFcvY2PO7nbXYoJcqBCweYsmsKSw8tJUNn0KdpH8a3G0/7eu3NDq1UiY+HkJCblf7p08ZyLy949lmjwn/kEaggF1EIIXJQ5AmAMkbRfQEc1VpPKery88Lr5VHYOaRx/H8LZPDfXVi0hfUn1jNl5xQ2nd5ERfuKPO/3PGPbjqVh1YZmh1cqaA0HD96s8H/91RjQV7Gi0Z3/r38Zl+g1lLdbCJELZgz5eQgIBh5VSv1hfXQzIY472rIvkjNVFlKtQmWp/O8gOT2ZeXvn4fOZD0FLggi/HM7/Bfwf58adY9rj06Tyv0dXrxoT5wwfDnXqQKtW8Nprxmx648cbPQBXr8KPP8Lzz0vlL0RBeO2112oX1L7Gjx/v9uabb9a6l3106NDB08nJqVXnzp0bZ10eHh5ermXLlk3d3d19goKCGiYnJ+epsjLjKoDtQLGvUV9c8glUtPBZ8HizQymWLiVeYtbvs5j5+0xibsTQqnYrFvVZRP/m/SlnK5O351dGBvz++83Be7t3G1eiVK0KgYFGt35gILi5mR2pEKXX9OnTXT/66KMLud3eYrGgtca2kKa0nDBhwoXExESbefPmuWRdPn78+LovvPDCxVGjRl0bOHCg+7Rp02q8+uqrMbndr1z0k4PDp64S5jAHz5QB+DX2MDucYuVozFFG/TQK90/cmbh1Ig/WfZDNQzazb9Q+BrccLJV/PkRHw4IFMGCAce19u3bw9tvGuv/+F3buNKbO/eYbGDZMKn8h8uLYsWPlGjRo0Lxnz54NGjZs2Lxr164N4+PjbVatWuUUEBDQKHO7lStXOnfp0qXRmDFj6qSkpNg0bdq0Wc+ePRsATJw4sZanp2dzT0/P5u+8807NzP16eHj49OnTx8PLy6v5yZMny61YscK5WbNm3k2aNGnWrl27v+YlP3r0qGObNm2a1K1bt8V7772X5zts9OrVK97Z2dmSdZnFYmHnzp1Ow4cPvwYwYsSIKz/99FOVvOxXhmLn4Pn5n0G5RD4JesXsUIoFrTWbT29myq4prD2+lvJ25RnWahj/bPtPmtaQWRHzKjUVduy4eS7/wAFjee3a0LOn0coPCIDq1c2NU4jC0GZemybZlz3h/cTV1x5+LSY+Jd7Gf6G/Z/b1g1sOvvzSgy9diY6Ptuv1Ta9GWdftfnb3sbuVGRERUX7OnDkRgYGBif369fOYNGmSy8SJEy+OHTvWPSoqys7NzS19/vz51YcPH3554MCB1xcsWFAzPDz8CEBoaGiFJUuWVN+7d+9RrTW+vr7e/v7+8TVq1Mg4e/aswxdffHHa398/Iioqyu6FF17w2LJlS3jTpk1TL168+Fd3wIkTJ8rv2LHjWGxsrK23t7fPv/71rxgHB4d7urrt4sWLdk5OThn29sbl6R4eHqkXL17MUwtMEoAcfPbsM3y63o0gv5Zmh2Kq1IxUloUtY8quKfxx4Q9qVqzJ253e5nm/53Gp6HL3HYi/nD59s8LfvBkSEoxb6z78MHz0kVHpt2wpl+gJURhq166dGhgYmAgQHBx8Zfr06TVtbGwu9u/f/8q8efOq/eMf/7iyb9++St9///3p7K/dsmVLpW7dusVmtsCDgoKuhYSEOPXr1y/W1dU11d/fP9G6XcU2bdrEN23aNBWgVq1aGZn7CAwMjHV0dNSOjo7p1apVS4uMjLRr1KhRgU7skx+SAOTAp35tZo8eYXYYprmadJW5e+cyY/cMouKjaObSjM97fM6gloMob1fe7PBKhBs3YOvWm5X+n38ayz08IDjYqPA7dwYnuQ+SKGPu1GJ3cnCy3Gm9q5Nrem5a/NnlNI0vwPPPP38lKCiocfny5XWPHj2uZbamc6tChQqWu28FWVv7tra2pKen3xLQwoULq3zwwQduAHPnzo3o2LHjjbvts1atWunx8fG2aWlp2NvbExERUa5WrVqpeYlfxgBkkZiUTt2xT/PZ6u1mh2KKk1dP8uLaF6n3ST1e3/Q6zV2as27QOsKeD2Nk65FS+d+B1sbd9j75xLgUr1o16NYN5s6FRo1g2jQ4dgxOnYLPPjO6+qXyF6JoREdHl9u4cWNFgMWLF1dr3759AoCHh0darVq10iZPnuw6atSovyYssrOz05nTBHfu3Dlh7dq1VeLj423i4uJs1q5dW7Vz585/uyVwp06dEnfv3u0UHh5eDiDrKYC7GTJkSGzmlMG5qfwBbGxsaNu2bfyXX35ZFWD+/PnVu3fvHpvbMkF6AG4x/vMVnK/2DTGJZeee/1prdpzbweSdk/kh/AfsbOwY2GIg49uNp2Wtsn0K5G6uX4dNm2628s+dM5Y3awZjxhit/A4dwNHR3DiFKOs8PDySZ8yYUXPUqFEVPD09kydMmPDXSPkBAwZcmTlzpl3r1q2TM5cNGjQoxtvbu5mPj8+NVatWnR44cOCV1q1bewMEBwfHPPTQQ0nHjh275Xy7m5tb+vTp0yP69OnT2GKxUL169bQdO3YcL4j4fX19m5w6dap8UlKSba1atVp+9tlnEX379o2bPHly5FNPPdXovffeq9O8efMbY8eOzdOsi0U+HXB+FMV0wBkZmoovtwa7ZBL/dxjbUj4rSrolne+Pfs/knZPZfX43VctX5Xm/53mhzQu4OrmaHV6xk5oKZ87AyZPG7Hnr1xsD+TIywNnZGLTXtavR+jdx6gohbiHTARuj9bt37+55/PjxwzmtHzJkiPv9999/Y9y4cSV+yuLbKU7TARdL7y75hZSqfzC69heluvKPS4nji31fMO23aZy5fobG1Rozs9tMht43lIrlKpodnqni4owKPqfHuXPG9fiZWreGV181Kv22bSGPpw6FEMVA8+bNvR0dHS1z5sw5Z3YsZpAEAOP87Se/f4StoxuThw4yO5xCcfb6Wab/Np15++YRlxJHB/cOTOs6je5e3bG1KZybVxQ3WhvX3Get2E+duvn35Wz5f40axvn7hx4y/s18NG1qrBNCFH9NmjRJvV3r//Dhw0eLOp7iRBIAwGLRBLr3orqTMxXLO5gdToHaE7WHyTsns/zwcgD6Ne/H+LbjeaDOAyZHVjhSUyEi4taKPWtln5R0c1sbG6O7vlEj6NPn1kq+USOja18Icc8sFotF2djYFP/zzaWQxWJRQI5XK0gCANjaKpZPGGt2GAUmEIJRnwAADTJJREFUw5LB6j9XM3nnZELPhuLs4Mw/2/6Tlx58CffKJf8EdV666h0djfvjN2pk3EI3awVfvz6UkxsXClHYwmJiYpq5uLhclySgaFksFhUTE1MZCMtpfZlPAOb89Duhxw8wc1QwlSuV7NZ/YmoiXx34ik92fcKJqyeoX7k+UwKnMLL1SJwdSk5ztiC66jMrfVdXubmOEGZKT09/5sKFC59fuHDBB7n0vKhZgLD09PRnclpZphOAL9bu47lfH8PO4szEuF5UrlQy724XHR/Np7s/Zfbe2VxNukqbOm1Y9uQynvB+Ajub4vkRZx1Vn9uu+oYNpateiJLG19f3EtDT7DjE3xXP2qEIzF+3j2e2BWCX4cyvz26hsVvJqvxjk2PZG7WXRQcXseTQEtIt6fRu2puX271M+3rti8UUxtJVL4QQxVeZTAC+XL+PkVtvVv5tvDzMDumOridfZ1/0PvZG72VP1B72Ru/lxNUTAFSwr8Bo39H8s+0/aVSt0V32VLDutas+s8KXrnohhCh6ZTIB2HkyDPuMKoQ+s7nYVf5xKXHsj97/V0W/J2oPx6/evJlU/cr18XXzZUSrEfi6+fJgnQepXL5yocWT1676evVyHlXfsCFULrwwhRBC5FGZuhPgtfhkqjoZ97OPTUiiSiVz79EanxLP/gv72Ru1lz3Re9gbtZc/r/yJxvhM6jnXw8/ND19XX+NfN19qVCj4C9Dz21Wf/SFd9UIUL8XhToCi+CozPQBfb9zP0J978L92C3n5iUeLvPJPSE3gjwt/3FLZh18O/6uyr+tcF19XXwa3HIyvqy++br7UrFizQMrO3lWf/Rr523XVt2//90peuuqFEKJ0KBMJwOJN+xmyMQAbKvFgE49CL+9G2g3+uPDHLd344ZfDsWijKe3m5Iafmx8DfAb81cKvVanWPZUpXfVCCCHyotQnAEtC9hP8SwA2GZUIGRrCw80bFti+tdbEpcRxJObILQP0jsQc+auyr12pNn5ufvRr1u+vyv5uk+1obVTY167d/REVlftR9ZnPPTykq14IIco6UxIApVRXYBpgC3yutf6oMMrZdvA0g382Kv/NQ0Lo4JP7yt+iLcQkxhAZF8n5+PPGv3HniYy3/mtdnpCa8NdralasiZ+bH080fYLWrr40q+JH+TS3vyrr2KPw847cVeypqXeOz9kZqlaFWrWkq14IIUTeFXkCoJSyBWYCXYBI4Hel1Cqt9ZGCLqt98/q0tR/NB0+P5IGmrlxIuMD15OvEpcRxLek6VxKtj4RrRMZFERkXyYXE81y4EcnllCjSddot+7PBFiflhpOuQ8WMFjRO64pDSl3KJTTG9qLf/7d3rzF6VHUcx78/WtrdhfbZriCpW2iLgqaSWKDBIkKgNFyKsb7ApAZDQQlRQryACAi+0HhBJUSIRgLlYg0CUggCKSEVSkgILW2x0HJfKEoRKJfeBAJL+fvinIXputt2l92d3ZnfJzl5Zs6cmTnnOdPOec6cncPbr7bTsVGs2AibNkFnZy8ZI92cG410E+8K7e3br3cPra3ps9GA0ZXvuzEzs8FUxm3kMKAjIp4HkHQTMBcY8AbArF9czMPvX80xt14Kt+/gbgzQ2QxbJuVwZPrc2p7X0+cHb32SzTGKzaQu9OZmaGqCceM+ukFPmbzjm3hXGD8+PYs3MzMrQxkNgHagOPfyeuCL3RNJOhM4E2C//fo3gc2BEz7Pf547mSaNp3m3Bi27NWgZ3WDPHMbt3mD82AaNsa20tTRoaRFNTenG3hV6Wm9qglH1mEHXzMwqath2JEfEVcBVkN4D0J9jLPjeKcApA5ktMzOzSiijE/olYN/C+qQcZ2ZmZkOkjAbACuAASVMljQHmAXeUkA8zM7PaGvJHABHxvqSzgXtIfwZ4bUQ8PtT5MDMzq7NSxgBExGJgcRnnNjMzs3IeAZiZmVnJ3AAwMzOrITcAzMzMasgNADMzsxpSRL/esTOkJL0G/GsnyfYCXt9JmipyuevF5a6fj1P2yRGx90BmxqpjRDQAdoWklRExo+x8DDWXu15c7vqpc9ltcPkRgJmZWQ25AWBmZlZDVWoAXFV2BkricteLy10/dS67DaLKjAEwMzOzXVelHgAzMzPbRW4AmJmZ1dCIbwBIOkHS05I6JF1Qdn76Q9K+kpZKekLS45K+n+PbJC2R9Gz+nJDjJemKXObHJB1SONb8nP5ZSfML8YdKWpP3uUKShr6kPZM0StI/Jd2V16dKWp7zenOeNhpJY/N6R94+pXCMC3P805KOL8QPy+tDUqukRZKekvSkpMPrUN+Sfpiv8bWSbpTUVNX6lnStpA2S1hbiBr2OezuH2f+JiBEbSNMJPwfsD4wBHgWmlZ2vfpRjInBIXh4HPANMA34LXJDjLwB+k5fnAHcDAmYCy3N8G/B8/pyQlyfkbQ/ntMr7nlh2uQvlPwf4K3BXXv8bMC8vXwl8Ny+fBVyZl+cBN+flabnuxwJT8zUxajhfH8CfgTPy8higter1DbQD64DmQj2fVtX6Bo4CDgHWFuIGvY57O4eDQ/cw0nsADgM6IuL5iHgPuAmYW3Ke+iwiXo6IR/LyVuBJ0n+Wc0k3CvLn1/LyXGBhJMuAVkkTgeOBJRHxZkRsBJYAJ+Rt4yNiWUQEsLBwrFJJmgScBCzI6wJmAYtyku7l7vo+FgHH5vRzgZsi4t2IWAd0kK6NYXl9SGqQbg7XAETEexGxiRrUN2kK8mZJo4EW4GUqWt8R8QDwZrfooajj3s5htp2R3gBoB14srK/PcSNW7uY8GFgO7BMRL+dNrwD75OXeyr2j+PU9xA8Hvwd+DHyQ1z8BbIqI9/N6Ma8fli9v35zT9/X7KNtU4DXguvzoY4GkPah4fUfES8ClwL9JN/7NwCqqX99FQ1HHvZ3DbDsjvQFQKZL2BG4FfhARW4rbciu/Un+zKekrwIaIWFV2XobYaFLX8J8i4mDgLVJX7YcqWt8TSL9OpwKfAvYATig1UyUaijqu4nVkA2ekNwBeAvYtrE/KcSOOpN1JN/8bIuK2HP1q7uojf27I8b2Ve0fxk3qIL9sRwFclvUDqrp0FXE7q/hyd0xTz+mH58vYG8AZ9/z7Kth5YHxHL8/oiUoOg6vU9G1gXEa9FRCdwG+kaqHp9Fw1FHfd2DrPtjPQGwArggDyKeAxpoNAdJeepz/JzzWuAJyPissKmO4CuUb/zgb8X4k/NI4dnAptzl989wHGSJuRfW8cB9+RtWyTNzOc6tXCs0kTEhRExKSKmkOruvog4BVgKnJyTdS931/dxck4fOX5eHjU+FTiANEBqWF4fEfEK8KKkz+aoY4EnqHh9k7r+Z0pqyfnqKnel67uboajj3s5htr2yRyF+3EAaPfsMafTvRWXnp59l+DKpm+4xYHUOc0jPO+8FngX+AbTl9AL+mMu8BphRONa3SIOiOoDTC/EzgLV5nz+Q3wI5XAJwNB/9FcD+pP/QO4BbgLE5vimvd+Tt+xf2vyiX7WkKI96H6/UBTAdW5jq/nTTCu/L1DfwMeCrn7S+kkfyVrG/gRtJYh05Sr8+3h6KOezuHg0P34FcBm5mZ1dBIfwRgZmZm/eAGgJmZWQ25AWBmZlZDbgCYmZnVkBsAZmZmNeQGgFWapG2SVkt6VNIjkr60k/Stks7ahePeL2lGP/O0WFJrf/Y1MxsobgBY1b0TEdMj4gvAhcCvd5K+lTQL3aCJiDmRJv8xMyuNGwBWJ+OBjZDmXZB0b+4VWCOpa9a4S4BP516D3+W05+c0j0q6pHC8r0t6WNIzko7sfjJJEyU9kI+1tiuNpBck7SXpO3nbaknrJC3N24+T9FDO2y15jggzswHlFwFZpUnaRnqzWhMwEZgVEau6pqONiC2S9gKWkV4pO5n0RsKD8v4nAj8FZkfE25LaIuJNSfcDqyLiXElzgHMiYna3c58LNEXELyWNyufbmuc+mBERr+d0uwP3keZxf4j0jvwTI+ItSeeT3oz388H8nsysfkbvPInZiPZOREwHkHQ4sFDSQaRXr/5K0lGkqYjb6Xna1NnAdRHxNkBEFOd375q0aRUwpYd9VwDX5hv87RGxupc8Xk56z/2deYbEacCD6RXvjCE1CszMBpQbAFYbEfFQ/rW/N+md8XsDh0ZEZ/5V3tTHQ76bP7fRw7+liHggNzBOAq6XdFlELCymkXQaqdfh7K4oYElEfKOPeTEz6xOPAbDakPQ5YBRpStkGsCHf/I8h3YQBtgLjCrstAU6X1JKP0daH800GXo2Iq4EFpCl/i9sPBX4EfDMiPsjRy4AjJH0mp9lD0oF9K6mZ2c65B8CqrllSV9e7gPkRsU3SDcCdktaQZuV7CiAi3pD0oKS1wN0RcZ6k6cBKSe8Bi4Gf7OK5jwbOk9QJ/Jc0ZWvR2UAbsDR396+MiDNyr8CNksbmdBeTZrgzMxswHgRoZmZWQ34EYGZmVkNuAJiZmdWQGwBmZmY15AaAmZlZDbkBYGZmVkNuAJiZmdWQGwBmZmY19D83l2qfJjqqcgAAAABJRU5ErkJggg==\n", + "text/plain": [ + "
" + ] + }, + "metadata": { + "needs_background": "light" + }, + "output_type": "display_data" + } + ], + "source": [ + "plot_absolute_time(experiments_eq, results, [2, 10], max_batch_size=max_batch_size)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### 2. $N_\\text{ref} >> N_\\text{test}$\n", + "\n", + "Now we check whether the speed improvements still hold when $N_\\text{ref} >> N_\\text{test}$ ($N_\\text{ref} / N_\\text{test} = 10$) and a large part of the kernel can already be computed at initialisation time of the PyTorch (but not the KeOps) detector." + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [], + "source": [ + "experiments_neq = {\n", + " 'keops': {\n", + " 0: {'n_ref': 2000, 'n_test': 200, 'n_runs': 10, 'n_features': 2},\n", + " 1: {'n_ref': 5000, 'n_test': 500, 'n_runs': 10, 'n_features': 2},\n", + " 2: {'n_ref': 10000, 'n_test': 1000, 'n_runs': 10, 'n_features': 2},\n", + " 3: {'n_ref': 20000, 'n_test': 2000, 'n_runs': 10, 'n_features': 2},\n", + " 4: {'n_ref': 50000, 'n_test': 5000, 'n_runs': 10, 'n_features': 2},\n", + " 5: {'n_ref': 100000, 'n_test': 10000, 'n_runs': 10, 'n_features': 2}\n", + " },\n", + " 'pytorch': {\n", + " 0: {'n_ref': 2000, 'n_test': 200, 'n_runs': 10, 'n_features': 2},\n", + " 1: {'n_ref': 5000, 'n_test': 500, 'n_runs': 10, 'n_features': 2},\n", + " 2: {'n_ref': 10000, 'n_test': 1000, 'n_runs': 10, 'n_features': 2}\n", + " }\n", + "}" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [], + "source": [ + "results = {backend: {} for backend in backends}\n", + "\n", + "for backend in backends:\n", + " exps = experiments_neq[backend]\n", + " for i, exp in exps.items():\n", + " results[backend][i] = experiment(\n", + " 'mmd', backend, exp['n_runs'], exp['n_ref'], exp['n_test'], exp['n_features']\n", + " )" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The below plots illustrate that KeOps indeed still provides large speed ups over PyTorch. The x-axis shows the reference batch size $N_\\text{ref}$. Note that $N_\\text{ref} / N_\\text{test} = 10$." + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAgQAAAEWCAYAAAAZ9I+bAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4xLjIsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy8li6FKAAAgAElEQVR4nO3dd5xU1fnH8c9DB0GKoIKICwrSFIQNFhAxCqIUK4oFEbuJmqgxUWPUmFgSS4xYsCFqglGxBBU0ioj4A6WoSBEEkbb0vhSBZZ/fH+eujOt2dvZu+b5fr3nN3P7cmdm9z5xz7jnm7oiIiEjFVinuAERERCR+SghERERECYGIiIgoIRARERGUEIiIiAhKCERERAQlBOWOmbmZHZbH8mFm9qeE6WvMbJWZbTGz/YpwvJTomFWKGnNxic6hRQkcp1l0rMolcKwDzOwTM0s3s4eSfTwpODO7zcyejTsOkeKihKCIzGyRme00s4bZ5n8ZXSBToukR0fTp2db7RzT/kmj6EjPbHV1otpjZ92b2vJm1Ks643f1qd/9LdMyqwMNAL3ev7e7r8kso9kZ0jp8W074+NrPLE+dF57CwOPaf7ViLzOzkhOMsiY61u7iPlYMrgbXAvu5+097uzMwam9loM1ue+D1NWF7dzIab2WYzW2lmN2ZbfpKZzTWzbWY23swO2duY4mBmd5nZvwqxfg8zW5Y4z93vdffLc9tGpKxRQrB3vgfOz5owsyOAWjms9y1wccJ6VYBzge+yrTfZ3WsDdYGTge3AdDNrXxzB5vCL9gCgBjC7OPYvSXEIMMeL0INYLqU2mcB7wNm5bHYX0DI67onA782sd7S/hsAbwJ+ABsA04JXCxlVYpaH0SaRCcHc9ivAAFgG3A1MT5j0I/BFwICWaNyKavwqoH83rC4wFPgUuieZdAnyaw3HeAUblEcfNwApgOXBpdOzDEo79JDAG2EpIMkYAfwVaRfMc2AJ8BHwSTW+N5p2Xw/EqR+ezFlgI/Drapkq0vC7wXBRTWnSsykAb4Adgd7TvjdH61aP9LYneo2FAzYTjnQ58BWwmJFC9gXui/fwQ7euxaN3Ec68LvAisARZHn1WlxPc6Ou4GQmJ3ai7v70uEi+j26Fi/B1KynfPH0XlOitZ5G9gP+HcU99Ss70O0fmvgA2A9MA84N5djjwB2ATuj/Z4cvV+PRJ/38uh19Wj9HsAy4A/ASuClPL43VUj4nibMX04oMcqa/gvwn+j1lcCkhGX7RO9L6zz+Rm4F5kTv8/NAjYTlfaPPdmP03h2Zbds/AF8DO6J4FxG+718TvqPPEZLasUA68CF7/sZ6AMtyiOdkwndoZ/TebgFmRMuHAN9E+1oIXJXtPDOj9bcATQjJ078S9t+fkFxvjL4TbbId+3dR7JsIiVSNaFlDwt/5xug7MZHou6qHHiX5iD2AsvpI+Ocyj3Cxqxz9Mz6EnycEfwWeBq6J5r1KKFkoSEJwKbAqlxh6Ey6i7aN/WiP5eUKwCehKKA2qkRVPtDyFhAtbNO/H7XM55tXAXOBgwq/E8fz04vgm8FQUz/7AlIR/rD87R+AfwOhoX3UIF9P7omVdovh7RvEfRHTxif7hXp5tX4nn/iLw32ifKYRSmssS4tgFXBF9btcQLoSW12edMP2T9y2KZQFwKCERmRMd72TChexF4Plo3X2ApYSLTxXgKEJy1TaXY//4eUXTdwOfRe9tI8KF9C/Rsh5ABvA3QuJQM6d9Ruv+LCEA6kfzDkiYdw4wM3r9T+DJbPuZBZydx/s2K+G78n/s+e4dBawGjo4+g8HR+tUTtv0q2rZmwrzPCEnAQdH2X0T7qkFIau9MeC9yTAii13eRcDGP5vWJPkMDTgC2AZ3y2N+P+2BPgt0TqEpIHBcA1RKOPYWQSDQgJB5XR8vuIyTCVaPH8eTyXdRDj2Q+VGWw914iVAf0JPyRp+Wy3ovAxWZWj/DP5q0C7n854R9ITs4lXGhmuftWwj+o7P7r7v/n7pnu/kMBj5mXc4FH3H2pu68n/DMDQgM44DTgt+6+1d1XEy74A3PakZkZ4VfnDe6+3t3TgXsT1r8MGO7uH0Txp7n73PwCjKpGBgK3unu6uy8CHgIGJay22N2f8dAO4AWgMeFCU1TPu/t37r6J8Iv1O3f/0N0zgNcIFy0Iv4oXufvz7p7h7l8CrwMDCnicC4G73X21u68B/pztvDIJF8Ud7r69kOdQO3relDBvEyGpylq+iZ9KXJ6TxxK+K/ewp4rtSuApd//c3Xe7+wuEkoBjErZ9NNo28TyGuvsqd08j/JL+3N2/jL7bb7LnfS40d383+gzd3ScA/yNcnAviPODd6Lu6i1D6VBM4Ltv5LI/ei7eBjtH8XYTv3yHuvsvdJ7q7BpmREqe6ub33EqGovTnhop8jd//UzBoRqhTecfft4XqYr4MIxYg5aQJMT5henMM6SwtykEJokm2ficc8hPALZ0XCuVXKI4ZGhDYX0xPWN8IvRgi/DscUIcaGURyJsS0mvJdZVma9cPdt0fFrU3SrEl5vz2E6a9+HAEeb2caE5VUI36OCaMLPz6tJwvSavUj8tkTP+xKqY7Jepycs3zfbNonLc5L9u5IV6yHAYDO7LmF5NX56Ljl9bwr6PheamZ0K3En4tV+J8N2cWcDNf/K5uHummS0ll+8cofQh61wfICTz/4u+h0+7+/1FOAWRvaISgr3k7osJddCnERpc5eVfwE3kkTjk4EzCL6GcrCBcNLM0yynEQhyrIPI65lLCr7yG7l4veuzr7u1yiWUt4Z94u4T163poWJm1v0NziSOv81pL+NWV2AK+GbmX3uSnON/DpcCEhPOt5+GOhWsKuP1yfn5eyxOmixyru28gfL4dEmZ3YE+j09mJy8xsH8Lnk1ej1OzflaxYlwL3ZHsfarn7y4khFe1MgFB8/2MD36jUqFFu+zaz6oSSmgcJVSb1CMmo5bR+Dn7yuUSlXwdTgO9cVIp1k7u3ILRDuNHMTspvO5HipoSgeFwG/DIqts/Lo4SqhU/yWsnMKptZczMbSqi7/HMuq74KXGJmbc2sFuHXzd5aBeR1L/+rwPVm1tTM6gO3ZC1w9xWEYtaHzGxfM6tkZoea2QkJ+25qZtWi9TOBZ4B/mNn+AGZ2kJmdEq3/HDAkutWtUrSsdX5xRtUArwL3mFmd6Na4GwkJWVHk954UxjtAKzMbZGZVo8cvzKxNAbd/GbjdzBpFrf7voJDnZWY1CG0MAKpH01lejPZfP3qvryC0Y4BQJN/ezM6OtrkD+DqfapxfR9+VBoTSsay7Ep4Brjazoy3Yx8z6mFle1Q+F8S1QI9pnVUKj0uoJy1cBKWaW9T+wWrR8DZARlRb0yrb+fmZWN5fjvQr0ib6rVQmJ/w5CG488mVlfMzssSiI2ERrMZhb0REWKixKCYhDVO04rwHrr3X1cHvWDx5rZFkLL9I8JxbG/cPcciy3dfSyhlflHhAZMHxUl/mzuAl4ws41mdm4Oy58B3gdmEBp0ZS8VuZjwzzWrZfkoQv0oUXyzgZVmtjaa94co9s/MbDOhpfjh0flNITS++wfhH+UE9vwK+ydwjpltMLNHc4jzOsKvxIWExpsjgeEFewt+5j7CRXKjmf2uiPsAwq9BwoVmIOFX5Ur2NAIsiL8Sbvf7mlCc/UU0rzCy7piA0EA0sY7+TsLdHIsJ7/cD7v5eFPsawu2K9xA+26PJpX1IgpGEJHFhtN+/RvuaRkg2Hov2tYDQ2LNYRG05fgU8S/iVvpXQ6DfLa9HzOjP7Ivpcridc2DcAFxAau2btby4hGVsYfQ8SqzZw93nARcBQQglVP6Cfu+8sQLgtCd/7LcBk4Al3H1+4MxbZe6a2KyKSDGa2iHAnyIdxxyIi+VMJgYiIiCghEBEREVUZiIiICCohEBEREcpZx0QNGzb0lJSUuMMQESkzpk+fvtbdG+W/ppR35SohSElJYdq0fO/+ExGRiJnl1MOpVECqMhARERElBCIiIqKEQERERFBCICIiIighEBEREZQQiIiICEoIREREBCUEIiIighKCEvH993DssTBhQtyRiIiI5EwJQQmoUwcuvBAOOijuSERERHJWrrouLq0aNoRrr407ChERkdwlrYTAzIab2Wozm5XL8pvN7KvoMcvMdptZg2jZIjObGS0r84MTLF4Ms2eDRpoWEZHSKplVBiOA3rktdPcH3L2ju3cEbgUmuPv6hFVOjJanJjHGEvGPf8Axx4BZ3JGIiIjkLGkJgbt/AqzPd8XgfODlZMUSt7Q0tR8QEZHSLfZGhWZWi1CS8HrCbAf+Z2bTzezKfLa/0symmdm0NWvWJDPUIlu+XAmBiIiUbrEnBEA/4P+yVRd0c/dOwKnAr82se24bu/vT7p7q7qmNGjVKdqxFohICEREp7UpDQjCQbNUF7p4WPa8G3gS6xBBXscjMVAmBiIiUfrHedmhmdYETgIsS5u0DVHL39Oh1L+DumELca+7w6qtw6KFxRyIiIpK7pCUEZvYy0ANoaGbLgDuBqgDuPixa7Uzgf+6+NWHTA4A3LTTJrwKMdPf3khVnslWuDGecEXcUIiIieUtaQuDu5xdgnRGE2xMT5y0EOiQnqpK3dCnMmQPHHw+1asUdjYiISM5KQxuCcm3sWOjdG9atizsSERGR3CkhSLK0tNAh0YEHxh2JiIhI7pQQJFlaGhxwAFStGnckIiIiuVNCkGTLl0OTJnFHISIikjclBEmmTolEJJlGjICLLgp9nojsDQ1/nGQvvqhBjUSk+GVmwu23w333wUknwfbtsM8+cUclZZkSgiTrUG5uoBSR0mLbNhg8GEaNgiuugMcfVzsl2XuqMkiidevg2Wdh2bK4IxGR8mLlSujRA15/HR58EJ56SsmAFA8lBEn0zTche589O+5IRKQ8mDkTjj46/E954w246SZVSUrxUUKQRGlp4VmNCkVkb40dC127QkYGTJyoLtGl+CkhSKLly8OzEgIR2RtDh0LfvnDYYTBlCnTqFHdEUh4pIUiitDSoWRPq1Ys7EhEpizIy4Lrr4PrrQ0LwySf6gSHJo4QgibL6IFAdn4gU1ubN0L8/PPYY3HhjaDNQu3bcUUl5ptsOk+ixx2D9+rijEJGyZsmSUCIwZw4MGwZXXRV3RFIRKCFIov32Cw8RkYKaMiWUDPzwQ2hI2LNn3BFJRaEqgyRxh7/8BT77LO5IRKSsGDUKTjgBatWCSZOUDEjJUkKQJBs2wB13wOTJcUciIqWde+iCeMAAOOqo8EOibdu4o5KKRlUGSaI+CESkIHbuhCuvhBdegPPPh+HDoUaNuKOSikglBEmihEBE8rNuHfTqFZKBO++Ef/9byYDEJ2kJgZkNN7PVZjYrl+U9zGyTmX0VPe5IWNbbzOaZ2QIzuyVZMSZTVqdETZrEG4eIlE7z58Oxx4ZqxX/9C+66S7coS7ySWWUwAngMeDGPdSa6e9/EGWZWGXgc6AksA6aa2Wh3n5OsQJNBCYGI5GbCBDjrLKhUCcaNg27d4o5IJIklBO7+CVCUu/C7AAvcfaG77wT+A5xerMGVgNtug9WroXr1uCMRkdJkxIhw98D++8PnnysZkNIj7jYEx5rZDDMba2btonkHAUsT1lkWzcuRmV1pZtPMbNqaNWuSGWuhVKoEjRrFHYWIlBaZmeGHwpAh0L17qCpo0SLuqET2iDMh+AI4xN07AEOBt4qyE3d/2t1T3T21USm6At99d2ggJCKybRucd164tfCKK0KHQxrjREqb2BICd9/s7lui12OAqmbWEEgDDk5YtWk0r0x54gn4+OO4oxCRuK1cCT16wOuvw4MPwlNPQdWqcUcl8nOx9UNgZgcCq9zdzawLITlZB2wEWppZc0IiMBC4IK44i2LXrtB+QLccilRsM2eGMQnWrg2DE51xRtwRieQuaQmBmb0M9AAamtky4E6gKoC7DwPOAa4xswxgOzDQ3R3IMLNrgfeBysBwd5+drDiTYeXK0POYEgKRimvs2FBNUKcOTJwInTrFHZFI3pKWELj7+fksf4xwW2JOy8YAY5IRV0lQp0QiFdtjj8FvfgNHHglvvw1Nm8YdkUj+4r7LoFzasAGqVVNCIFLRZGTAddeFR9++oWRAyYCUFRrLIAlOPTUMXeoedyQiUlI2b4aBA0NVwY03wt//DpUrxx2VSMEpIUgSM3VDKlJRLFkSSgTmzIFhw+Cqq+KOSKTwlBAkwQMPhEFL7r8/7khEJNmmTIH+/WH79lA60LNn3BGJFI0SgiQYOxZ27Ig7ChFJtlGjYNAgOPBA+OgjaNs27ohEik6NCpMgLU0NCkXKM/fQ6+CAAXDUUWFMAiUDUtYpIShm7koIRMqznTvh0kvDuATnnx9KBvbfP+6oRPaeEoJilp4OW7dq2GOR8mjdOujVK4xYeOedYbySGjXijkqkeKgNQTHbsAEOPRRSUuKORESK0/z50KcPLF4M//oXXHhh3BGJFC8lBMXskENgwYK4oxCR4jRhApx1VhjWfNw46NYt7ohEip+qDERE8jBiRLiVcP/94bPPlAxI+aWEoJg991z455GREXckIrI3MjPhj3+EIUOge3eYNClUB4qUV6oyKGZffgnTpkEVvbMiZdb27XDxxaGfgSuugMcfh6pV445KJLl02SpmuuVQpGxbuRJOPx2mToUHHwzjEqgbcqkIlBAUs+XLlRCIlFUzZ4YxCdauhTfegDPOiDsikZKjNgTFTCUEImXT2LHQtWto/zNxopIBqXiUEBQjd2jdGjp0iDsSESmMxx4LJQOHHhq6Ie7UKe6IREqeqgyKkRl8+GHcUYhIQWVkhDYCQ4dCv34wciTUrh13VCLxSFoJgZkNN7PVZjYrl+UXmtnXZjbTzCaZWYeEZYui+V+Z2bRkxSgiFVd6emg8OHQo3HADvPmmkgGp2JJZZTAC6J3H8u+BE9z9COAvwNPZlp/o7h3dPTVJ8RW7sWOhTRv49tu4IxGRvCxZEtoLvP8+DBsGDz8MlSvHHZVIvJJWZeDun5hZSh7LJyVMfgY0TVYsJeX772HuXKhTJ+5IRCQ3U6ZA//6hr4GxY0NHYiJSehoVXgaMTZh24H9mNt3MrsxrQzO70symmdm0NWvWJDXI/KSlhV8ZGgpVpHQaNQpOOAFq1oTJk5UMiCSKPSEwsxMJCcEfEmZ3c/dOwKnAr82se27bu/vT7p7q7qmNGjVKcrR5S0uDxo1V9ChS2rjDfffBgAFw1FHhToK2beOOSqR0iTUhMLMjgWeB0919XdZ8d0+LnlcDbwJd4omwcJYvhyZN4o5CRBLt3AmXXgq33QYDB8JHH6kUTyQnsd12aGbNgDeAQe7+bcL8fYBK7p4eve4F3B1TmIWSmqr+zkVKk/Xrw7DFEybAHXfAXXepG2KR3CQtITCzl4EeQEMzWwbcCVQFcPdhwB3AfsATFv5CM6I7Cg4A3ozmVQFGuvt7yYqzON17b9wRiEiW+fOhTx9YvBj+9S+48MK4IxIp3ZJ5l8H5+Sy/HLg8h/kLgTLX1597eNavD5H4TZgQSgYqVYJx46Bbt7gjEin9Ym9UWF4sWBBuN3zzzbgjEanYRowIdw/svz989pmSAZGCUkJQTNLSYOtW2HffuCMRqZgyM+GPf4QhQ+D442HSpDA2gYgUjMYyKCZpaeFZIx2KlLzt2+Hii0M/A5dfDk88oQa+IoWlhKCYLF8enpUQiJSslSvDmARTp8IDD8BNN6ktj0hRKCEoJmlpoQ2Bui0WKTkzZ4Zhi9euhTfegDPOiDsikbJLCUEx6dIFqlePOwqRimPsWDjvvJCET5wInTrFHZFI2aZGhcXkggvgb3+LOwqRiuGxx0LJwKGHhm6IlQyI7D0lBMVk8+Y9fRGISHJkZMD118N114VOhyZOhKZlfpxUkdJBCUExyMyEhg3h9tvjjkSk/EpPD40Hhw6FG24IfX7Urh13VCLlh9oQFIO1a2HXLjjwwLgjESmfliwJVQRz5sCTT8LVV8cdkUj5o4SgGKgPApHkmTIF+vcPfQ2MGQO9esUdkUj5pCqDYqA+CESSY9QoOOEEqFkTJk9WMiCSTEoIikFWCUGTJvHGIVJeuMN998GAAXDUUeFOgrZt445KpHxTlUEx6NgRbr1VbQhEisPOnXDVVWGQooED4fnnoUaNuKMSKf/yTQjM7FjgIuB4oDGwHZgFvAv8y903JTXCMqBLl/AQkb2zfn0YtnjCBLjjDrjrLnVDLFJS8kwIzGwssBz4L3APsBqoAbQCTgT+a2YPu/voZAdami1eDHXrQr16cUciUnbNnx/6Fli8GF56CS66KO6IRCqW/EoIBrn72mzztgBfRI+HzKxhUiIrQ/r3h5QU+O9/445EpGyaMCGUDJjBuHHQrVvcEYlUPHk2KsxKBsxsHzOrFL1uZWb9zaxq4joVWVqaGhSKFNULL0DPnrD//qHxoJIBkXgU9C6DT4AaZnYQ8D9gEDAiWUGVJT/8AOvW6ZZDkcLKzIQ//hEuuQSOPx4mTQpjE4hIPAqaEJi7bwPOAp5w9wFAu3w3MhtuZqvNbFYuy83MHjWzBWb2tZl1Slg22MzmR4/BBYyzxK1YEZ6VEIgU3PbtYaTCe++Fyy+H996D+vXjjkqkYitwQhDdbXAh4e4CgMoF2G4E0DuP5acCLaPHlcCT0cEaAHcCRwNdgDvNrFT+u1AvhSKFs3Il9OgBr78ODzwATz8NVavGHZWIFDQh+A1wK/Cmu882sxbA+Pw2cvdPgPV5rHI68KIHnwH1zKwxcArwgbuvd/cNwAfknVjEpnlzeOIJOPLIuCMRKf1mzoSjjw7Pr78Ov/udbisUKS0K1DFRdGH/JGF6IXB9MRz/IGBpwvSyaF5u83/GzK4klC7QrFmzYgipcA46CK65psQPK1LmvPcenHtuGKFw4kTo3DnuiEQkUZ4lBGb2jJkdkcuyfczsUjO7MDmhFYy7P+3uqe6e2qhRoxI//rx5MHt2iR9WpEx57LHQx8Chh4bBipQMiJQ++ZUQPA78KUoKZgFrCB0TtQT2BYYD/96L46cBBydMN43mpQE9ss3/eC+OkzR33QVTp8KCBXFHIlL6ZGTAjTfC0KHQrx+MHBlKCESk9MkzIXD3r4Bzzaw2kMqerou/cfd5xXD80cC1ZvYfQgPCTe6+wszeB+5NaEjYi9CGodRJS1ODQpGcpKeHsQjGjIEbbggNCCsXpCmyiMSioG0ItlCEX+hm9jLhl35DM1tGuHMgq0OjYcAY4DRgAbANGBItW29mfwGmRru6293zapwYm7S00EhKRPZYsgT69oU5c+DJJ+Hqq+OOSETyk9TRDt39/HyWO/DrXJYNJ1RJlFruKiEQyW7q1FA9sH17KB3o1SvuiESkIAp626HkYMMG2LFD3RaLZHn9dTjhBKhZM/Q8qGRApOwoVEJgZrWSFUhZVLMmvP12GNxIpCJzh/vug3POgY4dw5gE7fLty1RESpMCJQRmdpyZzQHmRtMdzOyJpEZWBtSsGepJ1f+6VGQ7d8Kll8Jtt4VGhB99FAYqEpGypaAlBP8g9B64DsDdZwDdkxVUWTFvHrz7bviHKFIRrV8fqgVGjIA77gi3FdaoEXdUIlIUBa4ycPel2WbtLuZYypxXXw0lBO5xRyJS8ubPh2OOgcmT4aWX4M9/VjfEImVZQe8yWGpmxwFuZlUJYxt8k7ywyoa0NGjYEKpXjzsSkZI1YQKcdVZIAMaNg27d4o5IRPZWQUsIribcHngQoRfBjuRyu2BFolsOpSJ64QXo2RMaNQqNB5UMiJQPBe2YaC1h6GNJsHy5EgKpODIz4U9/gnvvhV/+EkaNgvqlclByESmKAiUEZtYcuA5ISdzG3Sv0DXdpaRqkRSqG7dvh4otDEnD55WHI76pV445KRIpTQdsQvAU8B7wNZCYvnLLl/fehlnpmkHJu5Uo4/fTQA+EDD8BNN6nxoEh5VNCE4Ad3fzSpkZRBHTrEHYFIcs2cGe6kWbMm9EJ45plxRyQiyVLQRoX/NLM7zexYM+uU9UhqZKXckiUwbBisWhV3JCLJ8d570LUr7NoFEycqGRAp7wpaQnAEMAj4JXuqDDyarpCmToVrrgn3YR9wQNzRiBSvxx+H66+HI46Ad96Bpk3jjkhEkq2gCcEAoIW7q0++SFpaeNZdBlKeZGTAjTfC0KGhquDll6F27bijEpGSUNAqg1lAvWQGUtakpUG1aqFjIpHyID09NB4cOhRuuAHeekvJgEhFUtASgnrAXDObCuzImlmRbztMSwvDHqu1tZQHS5aEEoE5c+DJJ+Hqq+OOSERKWkETgjuTGkUZtHx5SAhEyrqpU6Ffv9DXwJgxYbAiEal4CtpT4YRkB1LWjBoFW7bEHYXI3nn9dRg0KDSMHTcO2rWLOyIRiUuebQjM7NPoOd3MNic80s1sc347N7PeZjbPzBaY2S05LP+HmX0VPb41s40Jy3YnLBtdlJNLpgYNoFmzuKMQKRp3uP9+OOec0J/G558rGRCp6PIrIdgHwN3rFHbHZlYZeBzoCSwDpprZaHefk7WOu9+QsP51wFEJu9ju7h0Le9ySsGXLnn+mHUtlhCK527kztBF4/nkYOBCGD4eaNeOOSkTilt9dBr4X++4CLHD3hdHtiv8BTs9j/fOBl/fieCVm6VK45x74psIPAC1lzfr1cMopIRm44w4YOVLJgIgE+ZUQ7G9mN+a20N0fzmPbg4ClCdPLgKNzWtHMDgGaAx8lzK5hZtOADOB+d38rl22vBK4EaFZCZfhZfRCoUaGUJfPnQ58+sHgxvPQSXHRR3BGJSGmSX0JQGagNJPvmuoHAKHffnTDvEHdPM7MWwEdmNtPdv8u+obs/DTwNkJqaujclGgWmTomkrJkwAc46K9wmO24cdOsWd0QiUtrklxCscPe7i7jvNODghOmm0bycDAR+nTjD3dOi54Vm9jGhfcHPEoI4LF8enpUQSFnwwgtwxRXQogW8+y4cemjcEYlIaZRfG4K9KRmYCrQ0s+ZmVo1w0f/Z3QJm1hqoD0xOmFffzKpHrxsCXYE52beNy8qVUL++6l6ldMvMhNtvh0sugeOPh8mTlQyISO7ySwhOKuqO3T0DuBZ4H/gGeNXdZ4VF6NcAACAASURBVJvZ3WaW2MPhQOA/7p5Y3N8GmGZmM4DxhDYEpSYheOQRWLQo7ihEcrd9e7iD4J574LLLwsiF9evHHZWIlGb20+tw2ZaamurTpk2LOwyRWK1cGcYkmDoV/vY3+N3v1MW25M7Mprt7atxxSPwKOriRJPjtb+GNN+KOQuTnZs6Eo48Oz6+/DjffrGRARApGCUEh7d4Njz0GX3wRdyQiP/Xee9C1K+zaBRMnwplnxh2RiJQlSggKadWqkBToDgMpTR5/PPQx0KIFTJkCnTvHHZGIlDVKCApJfRBIabJ7N/zmN3DttXDaafDpp9C0adxRiUhZpISgkLL6IFAvhRK39PTQePDRR0O7lrfegtq1445KRMqqAg1/LHukp4f+B1RCIHFasgT69YPZs+HJJ8NgRSIie0MlBIV00UWwdSsceGDckUhFNXUqdOkS+sIYM0bJgIgUDyUERWCmW7kkHq+/DiecEEqpJk2CXr3ijkhEygslBIX0hz/AX/4SdxRS0bjD/ffDOedAhw7w+efQrl3cUYlIeaI2BIX0zjvQunXcUUhFsnNnqBZ4/nk477zwrHE0RKS4qYSgkNLSdIeBlJz16+GUU0IS8Kc/wciRSgZEJDlUQlAIW7fCpk26w0BKxvz50LdvaDz44oswaFDcEYlIeaaEoBCy+iBQQiDJNmECnHVWaLw6bhx06xZ3RCJS3qnKoBC2bYNWreCQQ+KORMqzF16Anj2hUaPQeFDJgIiUBJUQFEKHDjBvXtxRSHmVmQl33AH33AO//CWMGgX168cdlYhUFEoIREqB7dth8GB47TW47LLQ+2DVqnFHJSIViaoMCuH++6F//7ijkPJm1So48cRQIvD3v8MzzygZEJGSpxKCQpg+Hb79Nu4opDyZOTPcSbBmTeiF8Mwz445IRCoqlRAUQlqa7jCQ4vPee9C1K+zaBZ98omRAROKV1ITAzHqb2TwzW2Bmt+Sw/BIzW2NmX0WPyxOWDTaz+dFjcDLjLCglBFJcHn8c+vSBFi1gyhRITY07IhGp6JJWZWBmlYHHgZ7AMmCqmY129znZVn3F3a/Ntm0D4E4gFXBgerTthmTFm5/MTFixQr0Uyt7ZvRtuvBEefTRUFbz8MtSuHXdUIiLJLSHoAixw94XuvhP4D3B6Abc9BfjA3ddHScAHQO8kxVkg27ZB9+5w5JFxRiFlWXo6nH56SAZ++1t46y0lAyJSeiSzUeFBwNKE6WXA0Tmsd7aZdQe+BW5w96W5bJtjYb2ZXQlcCdCsWbNiCDtntWvDhx8mbfdSzi1ZAv36wezZ8MQTcM01cUckIvJTcTcqfBtIcfcjCaUALxR2B+7+tLununtqo0aNij1Akb01dSocfXQYk+Ddd5UMiEjplMyEIA04OGG6aTTvR+6+zt13RJPPAp0Lum1JGzECWraEtWvjjELKmtdfhxNOgBo1YNKkMHKhiEhplMyEYCrQ0syam1k1YCAwOnEFM2ucMNkf+CZ6/T7Qy8zqm1l9oFc0LzYLF8J330G9enFGIWWFe+jI6pxzQpfXn38O7drFHZWISO6S1obA3TPM7FrChbwyMNzdZ5vZ3cA0dx8NXG9m/YEMYD1wSbTtejP7CyGpALjb3dcnK9aCSEuDAw6AKurKSfIxcyb89a/w6qtw3nnw/PNQs2bcUYmI5M3cPe4Yik1qaqpPmzYtKfvu1CkMNDNuXFJ2L2VcRgaMHg1Dh8LHH4cqgltvhdtvh0pxt9QRyYOZTXd39YQhsTcqLBO2bIEZM+C44+KOREqbdevgb3+DQw+Fs88OVUt/+xssWxZGLlQyICJlhQrAC2DrVrj0UujVK+5IpLSYMSOUBvz73/DDD2FwokceCbcWqlpJRMoi/esqgAMOCCPQScWWkRE6Exo6NIw9ULMmXHwxXHstHHFE3NGJiOwdJQQFsGxZ6LJYxb8V09q1ISF88klYuhRSUuCBB0KpUYMGcUcnUj5Mnz59/ypVqjwLtEfV2cmQCczKyMi4vHPnzqtzWkEJQT7coWNHGDAgXBCk4vjyy1AaMHIk7NgBJ50Upvv2hcqV445OpHypUqXKswceeGCbRo0abahUqVL5ae1eSmRmZtqaNWvarly58lnCbf4/o4QgH/Pnh4ZjnTvnv66Ufbt2hWqBRx+FTz+FWrXgkktCtUD79nFHJ1KutVcykDyVKlXyRo0abVq5cmWu/8mUEORj0qTwfOyx8cYhybVmTagWeOKJ0OdE8+bw0EMwZEi43VREkq6SkoHkit7fXKtjlBDkY/JkqFsX2rSJOxJJhi++CNUAL78cqgVOPjlUDZ12mqoFRKRiUcONfEyaBMccowaF5cmuXfDKK9CtW6gKeu210EBw9mz44INw66CSAZGKZd68edVatmxZajsY79+/f/OUlJT2LVu2bDdgwICUHTt2WHEfQ5e5fNx7L9x8c9xRSHFYvTp0KZySAgMHwooV8PDD4S6SJ56Atm3jjlBEJGcXXnjh+oULF86aN2/e7B9++MEeeeSRhsV9DCUE+ejXL7Qul7Jr+nQYPBgOPhj+9KfQOPCdd0KD0Rtu0IBVIvJTc+bMqdamTZu2EyZMqJWRkcFVV13VtH379m1atWrV9oEHHmgIkJmZyVVXXdW0ZcuW7Vq1atX2mWeeqQ/wzjvv1ElNTT28R48eh6WkpLS/4IILmu3evZuMjAzOPvvslKz1//znP+9fmJjOO++8TZUqVaJSpUqkpqZuXbZsWbXiPm+1IcjD//1fuO2wW7e4I5HC2rUrDD386KOhHcg++8AVV4S7BVq3jjs6EcnLpZdy8KxZ1CrOfbZvz7bhw1ma33ozZsyoPnDgwEOHDx/+/bHHHrv9wQcfbFi3bt3ds2bN+mb79u32i1/8onW/fv02f/bZZ7VmzpxZ85tvvpm9YsWKKl26dGnTq1evLQAzZ87c58svv5zVqlWrnd27d2/54osv1j/ssMN2rFixour8+fNnA6xdu7ZIFZM7duywV155Zb+HH34433MpLCUEebj7bli5MnRTK2XDqlXw1FMwbFioEjj00NCl8CWXhMahIiK5Wb9+fZUzzjjjsFGjRn3XuXPnHwA+/PDDfefOnVtr9OjR9QHS09Mrz5kzp8bEiRPrnHvuueurVKnCwQcfnHH00Udv+fTTT2vVrVs384gjjtjatm3bnQDnnnvu+okTJ9bu27fv5qVLl1YfPHjwwf369dt05plnbi5KjIMHD252zDHHbOndu/eW4jvzQAlBLjIz4bPP4Pzz445ECmLq1FAa8OqrsHMnnHIKPPss9O6tBqEiZU1BfsknQ506dXY3adJk5/jx42tnJQTubg899NCSs88++ycX8HfffTfXnxhm9rPpRo0a7Z41a9acN998c99hw4Y1euWVVxq89tpri7LWycjIoH379m0BevfuvfGRRx5Znn2/N910U+O1a9dWef/997/buzPNmf5V5mLOHNi8Wf0PlGY7d4ZeBI85Brp0CR0KXXUVzJ0L770Xbh1UMiAiBVW1alUfO3bsdy+//PJ+w4YNawDQs2fPTU8++WSjrFb9X3/9dfXNmzdX6t69e/qoUaMaZGRksHz58ipTpkypffzxx2+FUGUwd+7cart372bUqFENjj/++PQVK1ZU2b17N5dccsnG++67L23mzJk/qRKpUqUKc+fOnTN37tw5OSUDDz/8cMOPPvqo7ltvvbWwcpJug1IJQS4mTw7PGvK49Fm5ck+1wMqV0LJlKB0YPBj23Tfu6ESkLNt3330z33///QU9evRoVadOnd033HDD2kWLFlU/4ogj2ri7NWjQYNeYMWO+GzRo0MZJkybVbtOmTTsz8z//+c/LmjVrlvH111/Tvn37rVdffXWzRYsW1TjuuOM2Dxo0aOOUKVNqXnbZZSmZmZkGcPfddy8rTFy///3vD2ncuPGO1NTUNgB9+/bd8OCDD64oznNXQpCLyZOhYUM47LC4I5Esn38eOhF69dXQaPDUU+H668Ow1CoJEJG9cfjhh+/MavDXsGHD3bNmzfoma9ljjz2WBqRl3+app55aBvzswl6nTp3d48ePX5A479hjj90+Z86cb7KvW1AZGRnTi7ptQSkhyMUTT8Dvfw9W7F0/SGHs2BE6Dho6FKZMgTp14Jpr4Ne/hlat4o5ORKT8SOrvKjPrbWbzzGyBmd2Sw/IbzWyOmX1tZuPM7JCEZbvN7KvoMTqZceakRg3dnhanFSvgzjvhkENg0CDYtCkkBWlp8M9/KhkQkdKpb9++6dlLB8qKpJUQmFll4HGgJ6FIZaqZjXb3OQmrfQmkuvs2M7sG+DtwXrRsu7t3TFZ8eZk8Gd58E/7wB9hvvzgiqJjcQ7XAo4+GUoHdu0PDwOuug549VS0gIpJMyfwX2wVY4O4L3X0n8B/g9MQV3H28u2+LJj8DmiYxngJ7993QpW2NGnFHUjHs2AEvvRTuFDj22PD+X3stfPtt6FHwlFOUDIiIJFsy2xAcBD+5l3QZcHQe618GjE2YrmFm04AM4H53fyunjczsSuBKgGbNmu1VwFkmT4YOHULvdpI8y5eHOwWeeiqMM9C6NTz+OFx8MdSuHXd0IiIVS6loVGhmFwGpwAkJsw9x9zQzawF8ZGYz3f1nnTG4+9PA0wCpqal7PZZ2RkYoth4yZG/3JDlxDwnX0KEwalSoFujbN1QLnHyyGnGKiMQlmQWxacDBCdNNyeG2DTM7Gfgj0N/dd2TNd/e06Hkh8DFwVBJj/dGsWbB1q/ofKG4//AAvvAC/+AV07Qpjx4ZbBufPh9GjQxsBJQMiUpbccsstBxbXvm688cYmd9xxxwFF3X7SpEk1O3bs2Pqwww77yWBLhZHMhGAq0NLMmptZNWAg8JO7BczsKOApQjKwOmF+fTOrHr1uCHQFEhsjJk1aGjRooB4Ki0taGtx+OzRrFsYT2LYt3NK5bBk89FAYa0BEpCx69NFHGxdm/czMTHbv3p2UWGrXrp350ksvfb9gwYLZ//vf/+bfdtttBxd2AKWkJQTungFcC7wPfAO86u6zzexuM+sfrfYAUBt4LdvthW2AaWY2AxhPaENQIglBnz6wdm243U2Kxj2MFHneeeF9vPfekGB9+CHMnh36EVAbAREpTebNm1etefPm7fr379+8RYsW7Xr37t0iPT290ujRo+ucfPLJP/50efPNN/ft2bPnob/61a8O2rFjR6XWrVu37d+/f3OAu+6664CWLVu2a9myZbu77757/6z9pqSktD/zzDNTWrVq1e67776rNmrUqH3btm3b5vDDD2977LHH/ngT9TfffFOzS5cuhzdt2vSIv/71r4UaHvnII4/cccQRR+wASElJ2dWgQYOMFStWFKpZQFLbELj7GGBMtnl3JLw+OZftJgFHJDO2vKjoumh++AFefjm0D/jyS6hXD377W/jVr6BFi7ijE5GypEsXDs8+76yzWH/LLaxJT6fSSSfRMvvyiy5i7fXXs27FCqqcfjo/KX+cMoV5+R1z0aJFNZ566qlFvXr12jpgwICUBx54oNFdd9216je/+U2z5cuXV2nSpEnG8OHD9xsyZMjaCy64YNOIESP2nzt37hyAiRMn1ho5cuR+06dP/8bd6dy5c5uTTjopvWHDhruXLFlS/bnnnvv+pJNOWrR8+fIq1157bcrHH388t3Xr1jtXrVr146/4BQsW1Jg0adK8jRs3Vm7Tpk37m2++eU316tUL3TZu/PjxtXbt2mVt27bdkf/ae+hmrgSrVkGbNmFgHCm4pUvhttvg4IPh0kvDoEPDhoVqgQcfVDIgImXDgQceuLNXr15bAQYNGrRu0qRJtStVqsS555677plnnmmwdu3ayl988UXtAQMGbMq+7ccff1z7tNNO27jvvvtm1q1bN7NPnz4bxo8fXwegcePGO0866aSt0Xr7dOnSJb1169Y7AQ444IAf6xB69eq1sWbNmt64ceOMBg0a7Fq2bFmhf7QvXry46pAhQ1o888wziwo7CFKpuMugtJg8OYyUV6dO3JGUfu7w6aehE6E33wzT/fuHuwVOPFGlLCKyd/L6RV+nDpl5LW/cmIyClAhkl9OwxQDXXHPNuj59+hxWo0YN79ev34aqVasWar+1atXKLMh6iaUBlStXJiMj4ycBvfjii/XuvffeJgBPP/30ou7du29LXL5+/fpKp5566mF33nlnWlYCUhgqIUgweTJUrQqdO8cdSem1fTsMHw5HHQXdu8O4cXDjjfDddyEx+OUvlQyISNm0YsWKah9++OE+AP/+978bHHfccVsg1MkfcMABux566KHGV1555dqs9atUqeJZwyKfeOKJW8aMGVMvPT290ubNmyuNGTOm/oknnpie/Rg9evTYOmXKlDpz586tBpBYZZCfiy++eGPWEMnZk4EffvjB+vTpc9jAgQPXDRkyZENRzl8JQYJJk6BTJ/VQmJMlS+DWW0O1wGWXQWYmPP10qBb4+98hJSXuCEVE9k5KSsoPQ4cO3b9FixbtNm7cWOV3v/vdmqxlAwcOXNe4ceOdnTp1+iFr3oUXXrimTZs2bfv379+8W7du2y644IJ1nTp1atO5c+c2gwYNWtO1a9ft2Y/RpEmTjEcffXTRmWeeedjhhx/e9swzzyyWStXhw4fXnzp1au2RI0c2bN26ddvWrVu3nTRpUs3C7MPc97ovn1IjNTXVp02bVqRtd+6EunVDC/iHHy7mwMood/jkk9BI8M03w7wzzgjVAiecoJIAkfLAzKa7e2rcccyYMWNRhw4d1ua/ZnLMmzevWt++fVtmDYGc3cUXX9zsqKOO2nbDDTfEFmNxmDFjRsMOHTqk5LRMbQgi6enhNrneveOOJH7btsHIkSER+Prr0C/DzTeHZEm3Y4pIRdOuXbs2NWvWzHzqqaeW5r922aWEILLffjBiRNxRxGvx4tBp0LPPwvr1cOSR4fX550OtWnFHJyKSPIcffvjO3EoHZs+e/U1JxxMHJQSRNWugYcOKVwzuDhMmhLsF/vvfcP5nnBG6FT7++Ir3fohIbDIzMzOtUqVK5aceu5TJzMw0INc7HtSoMJKaCpdfHncUJWfbNnjmmVAKcOKJoa3A738PCxeGQYe6d1cyICIlataaNWvqRhctKWaZmZm2Zs2ausCs3NZRCQGhv/0lS8LFsbxbtGhPtcCGDdCxIzz3XKgWqFmo9qgiIsUnIyPj8pUrVz67cuXK9ujHajJkArMyMjJy/emrhIDQ/wCU3xEO3WH8+FAt8Pbb4Zf/WWeFaoGuXVUSICLx69y582qgf74rStIoISD0P1CjBnToEHckxSM9HWbMgOnT4YsvwvktWBDaSNxyC1x9dehPQEREJIsSAkIJwS9+AdWqxR1J4W3YEAYS+uKLPY9vvw2lAgAHHhg6W/rjH2HgQHW6JCIiOVNCQOiBr5BjQMRi9eqfXvi/+AK+/37P8mbNwsX/wgvDc6dO0LhQo3WLiEhFpYSAMChPaeIOy5f//OK/bNmedQ49NJRqXHVVuPAfdVSoEhARESkKJQQxcw8dAmXV92c9Vq8Oy82gdevQVXDWr/6OHaFevXjjFhGR8kUJQQnKzAyN+7L/8t8QjUtVpQq0awd9+uy5+B95JNSuHW/cIiJS/ikhKGYZGaFo//vvwz3/ixaF1wsXhpb/W7aE9apVCxf7AQP2XPyPOEKN/kREJB5KCApp9+5Qv591oU98XrQIli4N62SpVAmaNg3DA19yyZ6Lf9u2ULVqHGcgIiLyc0lNCMysN/BPoDLwrLvfn215deBFoDOwDjjP3RdFy24FLgN2A9e7+/vJjBXCEMjLl4dHWtrPn5cuDT0a7tr10+2aNAkX/K5dw3Pz5nuemzYtm7cziohIxZK0hMDMKgOPAz2BZcBUMxvt7nMSVrsM2ODuh5nZQOBvwHlm1hYYCLQDmgAfmlkrd99NMcvMDK31ly4NAxxlV61auOAfdFAY72DAgJ9e9Js1UzG/iIiUfcksIegCLHD3hQBm9h/gdCAxITgduCt6PQp4zMwsmv8fd98BfG9mC6L9TS7uICtVCsX3v/jFngt/4vN++6lrXxERKf+SmRAcBCxNmF4GHJ3bOu6eYWabgP2i+Z9l2/agnA5iZlcCVwI0a9asSIG+9FKRNhMRESk3yvyIUu7+tLununtqo0aN4g5HRESkTEpmQpAGJA6h0zSal+M6ZlYFqEtoXFiQbUVERKSYJDMhmAq0NLPmZlaN0EhwdLZ1RgODo9fnAB+5u0fzB5pZdTNrDrQEpiQxVhERkQotaW0IojYB1wLvE247HO7us83sbmCau48GngNeihoNrickDUTrvUpogJgB/DoZdxiIiIhIYJ41Tm45kJqa6tOmTYs7DBGRMsPMprt7atxxSPzKfKNCERER2XtKCEREREQJgYiIiJSzNgRmtgZYnM9qDYG1JRBOaaPzrlh03hXL3pz3Ie6uTlykfCUEBWFm0ypiAxqdd8Wi865YKup5S/FSlYGIiIgoIRAREZGKmRA8HXcAMdF5Vyw674qlop63FKMK14ZAREREfq4ilhCIiIhINkoIREREpOIkBGbW28zmmdkCM7sl7niKwswONrPxZjbHzGab2W+i+Q3M7AMzmx8914/mm5k9Gp3z12bWKWFfg6P155vZ4IT5nc1sZrTNo2ZmJX+mOTOzymb2pZm9E003N7PPo1hfiUbVJBol85Vo/udmlpKwj1uj+fPM7JSE+aXy+2Fm9cxslJnNNbNvzOzYivB5m9kN0Xd8lpm9bGY1yuPnbWbDzWy1mc1KmJf0zze3Y0gF5+7l/kEYbfE7oAVQDZgBtI07riKcR2OgU/S6DvAt0Bb4O3BLNP8W4G/R69OAsYABxwCfR/MbAAuj5/rR6/rRsinRuhZte2rc551w/jcCI4F3oulXgYHR62HANdHrXwHDotcDgVei122jz7460Dz6TlQuzd8P4AXg8uh1NaBeef+8gYOA74GaCZ/zJeXx8wa6A52AWQnzkv755nYMPSr2o6KUEHQBFrj7QnffCfwHOD3mmArN3Ve4+xfR63TgG8I/z9MJFw6i5zOi16cDL3rwGVDPzBoDpwAfuPt6d98AfAD0jpbt6+6fubsDLybsK1Zm1hToAzwbTRvwS2BUtEr28856P0YBJ0Xrnw78x913uPv3wALCd6NUfj/MrC7hgvEcgLvvdPeNVIDPmzA0e00zqwLUAlZQDj9vd/+EMPR7opL4fHM7hlRgFSUhOAhYmjC9LJpXZkXFokcBnwMHuPuKaNFK4IDodW7nndf8ZTnMLw0eAX4PZEbT+wEb3T0jmk6M9cfzi5ZvitYv7PsRt+bAGuD5qKrkWTPbh3L+ebt7GvAgsISQCGwCplP+P+8sJfH55nYMqcAqSkJQrphZbeB14LfuvjlxWfRLoFzdS2pmfYHV7j497lhKWBVCcfKT7n4UsJVQvPujcvp51yf8gm0ONAH2AXrHGlRMSuLzLY/fISmaipIQpAEHJ0w3jeaVOWZWlZAM/Nvd34hmr4qKB4meV0fzczvvvOY3zWF+3LoC/c1sEaF495fAPwlFplWidRJj/fH8ouV1gXUU/v2I2zJgmbt/Hk2PIiQI5f3zPhn43t3XuPsu4A3Cd6C8f95ZSuLzze0YUoFVlIRgKtAyaqVcjdDwaHTMMRVaVC/6HPCNuz+csGg0kNWyeDDw34T5F0etk48BNkXFhO8DvcysfvRrrBfwfrRss5kdEx3r4oR9xcbdb3X3pu6eQvjsPnL3C4HxwDnRatnPO+v9OCda36P5A6NW6c2BloRGV6Xy++HuK4GlZnZ4NOskYA7l/PMmVBUcY2a1oriyzrtcf94JSuLzze0YUpHF3aqxpB6EFrrfEloX/zHueIp4Dt0IRXtfA19Fj9MI9aXjgPnAh0CDaH0DHo/OeSaQmrCvSwmNrBYAQxLmpwKzom0eI+rNsrQ8gB7sucugBeEf/ALgNaB6NL9GNL0gWt4iYfs/Ruc2j4QW9aX1+wF0BKZFn/lbhFbk5f7zBv4MzI1ie4lwp0C5+7yBlwntJHYRSoQuK4nPN7dj6FGxH+q6WERERCpMlYGIiIjkQQmBiIiIKCEQERERJQQiIiKCEgIRERFBCYEIZrbbzL4ysxlm9oWZHZfP+vXM7FcF2O/HZpZaxJjGmFm9omwrIlIUSghEYLu7d3T3DsCtwH35rF+PMMJe0rj7aR4GMhIRKRFKCER+al9gA4QxI8xsXFRqMNPMskbEux84NCpVeCBa9w/ROjPM7P6E/Q0wsylm9q2ZHZ/9YGbW2Mw+ifY1K2sdM1tkZg3N7Opo2Vdm9r2ZjY+W9zKzyVFsr0XjW4iIFJk6JpIKz8x2E3p+qwE0Bn7p7tOzht51981m1hD4jND97SGE3hLbR9ufCvwJONndt5lZA3dfb2YfA9Pd/SYzOw240d1Pznbsm4Aa7n6PmVWOjpcejduQ6u5ro/WqAh8RxrGfTOjf/1R332pmfyD02nd3Mt8nESnfquS/iki5t93dOwKY2bHAi2bWntBV7L1m1p0w7PJB5DxM7MnA8+6+DcDdE8e3zxqAajqQksO2U4Hh0QX/LXf/KpcY/0noo//taPTHtsD/hS7qqUZIEkREikwJgUgCd58clQY0IvR33wjo7O67ol/tNQq5yx3R825y+Htz90+ihKMPMMLMHnb3FxPXMbNLCKUS12bNAj5w9/MLGYuISK7UhkAkgZm1BioThs+tC6yOkoETCRdlgHSgTsJmHwBDzKxWtI8GhTjeIcAqd38GeJYwvHHi8s7A74CL3D0zmv0Z0NXMDovW2cfMWhXuTEVEfkolBCJQ08yyiuoNGOzuu83s38DbZjaTMOLgXAB3X2dm/2dms4Cx7n6zmXUEppnZTmAMcFsBj90DuNnMdgFbCEPUJroWaACMj6oHprn75VGpwctmVj1a73bC6H0iIkWiRoUiIiKiKgMRERFRQiAiIiIoIRAREn3ulAAAACBJREFUERGUEIiIiAhKCERERAQlBCIiIoISAhEREQH+H/2kSP0zoJtaAAAAAElFTkSuQmCC\n", + "text/plain": [ + "
" + ] + }, + "metadata": { + "needs_background": "light" + }, + "output_type": "display_data" + } + ], + "source": [ + "plot_absolute_time(experiments_neq, results, [2], max_batch_size=max_batch_size)" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAhsAAAEWCAYAAADPUVX+AAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4xLjIsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy8li6FKAAAgAElEQVR4nO3dd5gUVdbH8e8hiwQFERFFUCQpIsJiwndFlBV0FVdhWRMqioo5rbq65uyadRUDYo6rgoouSjCjEkSiCoooGSSDpDnvH/eONrMTeobpqQm/z/P001230unq6urTt27dMndHREREJFMqJR2AiIiIlG9KNkRERCSjlGyIiIhIRinZEBERkYxSsiEiIiIZpWRDREREMqrMJhtm5mbWPJ/xj5jZP1OGzzazBWa2yszqF2F9TeM6qxQ15uIS38OuScdRUsxssJndVIjp8903Shsze8fM+iYdRzrMrEnc/yoXYd6tzOxNM1tuZq9kIj4pGjM7wcyGJx2HlF8FJhtmNsvM1pvZdjnKJ8SDetM4PDgOH51junti+Slx+BQz2xQPWKvM7Acze9LMWhTbuwLc/Sx3vzGusypwN9DN3Wu5+5JM/iDF9/hxMS1rtJmdnloW38P3xbH84lAaEjEz29/MPk1q/ekys+vM7NnUMnfv7u5PJRVTfuL3/9DsYXefHfe/TUVY3HFAQ6C+u/cqhtiqmdmrMUY3s4NzjDczu93MlsTH7WZmKeP3NrNxZrYmPu+9pTElobDHm9y+r+7+nLt3y0yEIunXbPwA/C17wMzaAjVzme5b4OSU6aoAvYGZOab7zN1rAXWBQ4G1wDgz2zP90POWy7+uhkANYEpxLF+KVzElKUcAw4phORVGAsnhLsC37r6xsDPmE+vHwInA/FzG9Qd6Au2AvYA/A2fG5VUDhgDPAtsCTwFDYnnGxASozNYoixSZu+f7AGYBVwNfppT9C7gKcKBpLBscyxcA28ayI4F3CAeEU2LZKcDHuaznLeDVfOK4DJgHzAVOi+tunrLuhwk/NqsJCcxg4CagRSxzYBUwEvgwDq+OZX/NZX2V4/tZDHwPnBPnqRLH1wWeiDHNieuqDLQGfgU2xWUvi9NXj8ubHbfRI8BWKes7GvgKWEFIzg4Hbo7L+TUu68E4bep7rws8DSwCfoyfVaXUbR3Xu5SQNHYv4LO+Epgap38SqBHHTQb+nDJt1bht2sf3lL19VwH7ExLZq2NMC2OMdeO8TeP0/eK8H8byzsCnwDLgJ37fZwYDDwFvAyuBz4HdcsQ+Htgnl+3TOS7r4DjcCngP+AX4BuidsoyCtuUnwIPAcmA60DVl3lMI+8nKuJ1PyGX7Hg6sBzbE7TQxlo8GTs+xnnvidvgeOCCW/xS3Zd+UZea7X+VYf+qylxD22d0I34kl8fN8DtgmTv8MkEX4M7AK+HvKZ5f9PdgRGBq35wzgjDzWfX2O996PIuwj+ey7P2d/xillnwL9U4b7AWPi626E762ljJ8NHJ7H8kcDtwJfEL6jQ4B6KeP34/d9d2JqLHHem+O2Xws0j2U3xXlWAW8C9eP2XwF8ye/H1s22eeo+Q97HmyOACXFZPwHX5XifOb+vp5ByXCbsc18S9vUvgQNyrPvG+H5WAsOB7eK4GoQEbkncFl8CDQv6ndGj/D8KniD8AB1KODC3Jvyg/kz4l5Iz2bgJeBQ4O5a9TKgRSSfZOA1YkEcMhxMOpHsCWwPP87/JxnLgQMIBrEZ2PHF8bl/W3+bPY51nEX5QdgbqAaPY/CD7OjAwxrM94SB0Zl7vkXCAHxqXVZtwcLk1jusU4z8sxt8YaBXHjSb+EOUWO+EAPSQusymhdqlfShwbgDPi53Y2IVmzPN7zLEJSkf2eP0nZhn8HXkqZ9mhgUj7b9zTCj8+uQC3gNeCZHNM/HbffVoT9aWXcX6oSDrx7p3y+S+J2qkI4IL+Ysq5GpPxwZG8fwn7zE9Aplm8dh0+Ny2lP+IFtk+a23AhcFOP7a/zM6sXlrgBapsSzRx7b+Drg2Rxlv33GKes5NX5mNxF+HB4iJBbd4naqVdB+lcu6s5d9Xnz/W8XtdFhcdgNCIn5vzu9/yvBmn3Wc/t+E79zehETtkHTeO4XcRwo4TuWWbCwH9k0Z7gisjK8vAt7JMf1bwCV5LH80YR/LPgb9J/u9EL6vS4AehO/vYXG4Qcq8s4E94navGstmEJK9uoQE/1vCsbZKfN9P5vP9yrnP5DzeHAy0jfHsRTh+9sxneb8tg7AvLQVOirH8LQ7XT1n3TMIfua3i8G1x3JmEfbAmYf/tANQp6HdGj/L/KEx13jOEUySHAdMIX7zcPA2cbGbbAH8E3khz+XMJO3luehO+eJPdfTXhoJXTEHf/xN2z3P3XNNeZn96Eg+5P7v4L4V8NAGbWkHBgudDdV7v7QsJBv09uC4rnifsDF7n7L+6+ErglZfp+wCB3fy/GP8fdpxcUYDxd1Ae40t1Xuvss4C7CQSLbj+7+mIdz7E8Rfggb5rPYB1Pe8838fvrsWaCHmdWJwycR9om8nADc7e7fu/sqQo1JnxzV4dfF7bcWOB54391fcPcN7r7E3b9KmfZ1d//CQxX8c4Qftmw9gHfd3VPKehGSwe7u/kUsOxKY5e5PuvtGd59A+NHolea2XEjYJza4+0uEBPyIOC4L2NPMtnL3ee6+JafsfogxbgJeIiR/N7j7OncfTqghaJ7GfpWbue7+QHz/a919Rtzv1rn7IkLbpj+mE6SZ7UxI8C9391/j5/U4KadSC1DYfaSwahESjmzLgVpxu+Uclz2+dj7LeyblGPRPoHfcb04Ehrn7sPj9fQ8YS9gvsw129ylxu2+IZU+6+0x3X06oAZ7p7u/HffwVQjJcJO4+2t0nxXi+Bl4gzc+VsE9/5+7PxHhfIPzx+nPKNE+6+7fxc3mZ37+PGwh/FJq7+yZ3H+fuK4r6PqT8KMw522cI/2KaERKKXLn7x2bWgHCa5S13X5vSJis/jQlVsbnZERiXMvxjLtP8lM5KCmHHHMtMXecuhH8n81LeW6V8YmhAyPTHpbZPI2T+EH5MitLeYLsYR2psPxK2ZbbfzmW7+5q4/lr5LDPne94xzjvXzD4BjjWz14HuwAX5LGfHXOKqwuaJTuq6duZ/2/akSj0nv4bN30MPQm1XqguBp919ckrZLsC+ZrYspawKYd9OZ1vOyZHQ/Ajs6O6rzeyvwKXAE3E7XZJOwpiHBSmv1wK4e86yWhS8X+Vms300Js73AQcRfmgrEf7FpmNHIDvJyfYjoQYh3fkLs48U1iqgTspwHWCVu7uZ5RyXPX4lecv53ahK2G92ISSsqT/GVQm1obnNmy3nZ5rbZ1wkZrYvcBuhJqYaoeYq3SuAcn4ukM9xhc2/j88Qvssvxj+czwJXpSRYUkGlXbPh7j8SzkX3IFR35udZ4BLySUpycQzwUR7j5hF24GxNcguxEOtKR37r/AlYRzhPuU181HH3PfKIZTHh4LFHyvR1PTSSzV7ebnnEkd/7Wkz4J7FLjjjzqnVKR873PDdl+CnCv7hehEa+2evJLca5ucS1kc0PqKnz5bcN8hSvNPojoR1Gql5ATzNLTYh+Aj5I+Qy28XBlxdmkty0b2+aZ82/bx93/6+6HEWqOpgOP5RFyce6nBe1X6az/lljW1t3rED5fy2f6VHOBemaWWhtQmP2vsPtIYU0hNA7N1o7fG4lPAfbK8XnuRf6NyHN+NzYQPoOfCLUeqfvV1u5+W8r0W/I+Vsfn1Eb5OxSw7OcJp9d2dve6hLY8ls/0qXJ+LpDm5xpr/a539zaEdh9Hkn5Nl5RjhW0V3Y9wPnZ1AdPdTzjd8mF+E5lZZTNrZmYPEM4xXp/HpC8Dp5hZGzOrCVxbuLBztYBwrjgvLwPnm9lOZrYtcEX2CHefR2gUdZeZ1TGzSma2m5llV1MuAHbKbtnu7lmEH597zGx7ADNrbGZ/itM/AZxqZl3jshqbWauC4ozV7C8DN5tZbTPbBbiYkOwV1TnxPdcj1E69lDLuDWAfQo1GaiK5iHAaITXOF4CL4udbi/Cj9pLnfSXCc8ChZtbbzKqYWf00L0XsDHydS1XtXKArcIGZnR3L3gJamNlJZlY1Pv5gZq3T3JbbE/aJqmbWi9CGaZiZNTSzo81sa0ISuipuj9wsAJoWxxUJaexX6ahNiHe5mTUmNMTOGW9e+99PhAaOt5pZDTPbi3CMSHf/K+w+8j/MrLqZ1YiD1WIc2T+qTwMXx22yI+EP0OA4bjShUeX5cRnnxvKR+azuxJRj0A2EBu2bCO/3z2b2p3hMq2FmB5vZTum+j/zE01tz4vorm9lpbJ6Yb3a8iWoTap1+NbNOhNOU2XL7vqYaRvieHB+/i38F2hC+P/kysy5m1jaeXlpBSMjy+i5IBVKoA148vzg2jel+cfcROaqcU+0fqzFXEL70dYA/uPukPJb3DnAv4UAwg/wPCOm6DnjKzJaZWe9cxj8G/JfQsnw8/1ubczKhejL7yo1XCf9qifFNAeab2eJYdnmMfYyZrQDeB1rG9/cFoUHgPYTzxh/w+z+L+4DjzGypmd2fS5znEf75fE9oiPs8MCi9TZCr5wmJ1PeE0xq/daYVz8/+h3Aq7bWU8jXE1vZxe+4XY8g+9fYDocX8eXmt1N1nE2rNLiGcTvuKzf+V5iXPS17jMrsCV5jZ6bG6vxuhTcNcQlXw7YQqZih4W34O7E74N3szcJy7LyF8jy6Oy/yFUNNyNrnLrspeYmbj03h/Bclzv0rT9YQEcjnhap+c+/mtwNXxc700l/n/RmhwOJfQaPpad38/zXUXah/JwzeE2p3GhO/rWn7/7gwkNFacRGj4/HYsw93XEy6LPZlw1cRphAaU6/NZ1zOEZGU+oUHs+XFZPxEaTP+D8EP+EyFpK85LXM+Iy1xCaGia2qdMbsebAcANZrYSuIaQSBPjze37Ssr4JYQaiUvi+v4OHOnuiynYDoRj4QpC274PyL9tl1QQlnc+IBWNmc0itHDP88fCzK4BWrj7iSUWWD7MbCrhR39qhtdzCmHbdM7keqR0MrPRhKtPHk86FpGyKPGut6XsiKdW+rH5FRqJidXGT2c60RARkS2jnuwkLWZ2BqF6+B13z7ctTklx9/U5GuGJiEgppNMoIiIiklGq2RAREZGMKhNtNrbbbjtv2rRp0mGIiJQp48aNW+zuDZKOQ6RMJBtNmzZl7NgCr7gVEZEUZpZbb8siJU6nUURERCSjlGyIiIhIRinZEBERkYxSsiEiIiIZpWRDREREMkrJhoiIiGSUkg0RERHJKCUbIiKlkDs8/TS88krSkYhsOSUbIiKlzLRp0KUL9O0Lzz6bdDQiW07JhohIKbF2LVx9NbRrBxMnwsCB8PrrSUclsuUy3l25mc0CVgKbgI3u3tHM6gEvAU2BWUBvd1+a6VhEREqrd9+Fc86B77+Hk06Cf/0Ltt8+6ahEikdJ1Wx0cfe93b1jHL4CGOHuuwMj4rCISIUzdy707g3du0PVqjByZGiroURDypOkTqMcDTwVXz8F9EwoDhGRRGzaBPffD61awdChcOON4dRJly5JRyZS/Eoi2XBguJmNM7P+sayhu8+Lr+cDDXPOZGb9zWysmY1dtGhRCYQpIlIyxo6FTp3gggtg//1h8uTQVqN69aQjE8mMkkg2Orv7PkB34Bwz+7/Uke7uhISEHOWPuntHd+/YoEGDEghTRCSzli+H884LicbcufDSS6GtRvPmSUcmklkZTzbcfU58Xgi8DnQCFphZI4D4vDDTcYiIJMU9JBatWsFDD8G558L06aGthlnS0YlkXkaTDTPb2sxqZ78GugGTgaFA3zhZX2BIJuMQEUnKjBlw+OHQpw80bgxffBHaatStm3RkIiUn05e+NgRet5C6VwGed/d3zexL4GUz6wf8CPTOcBwiIiVq3Tq44w64+WaoVg0eeADOPhsqV046MpGSl9Fkw92/B9rlUr4E6JrJdYuIJGXkyJBYfPttOFVyzz2w445JRyWSHPUgKiJSTBYsCB1yde0KGzfCO++EthpKNKSiU7IhIrKFsrJC1+KtWoXk4uqrw+Wshx+edGQipUPGuysXESnPJk6Es86CMWPg4IPh4YdD0iEiv1PNhohIEaxaBZdcAh06wMyZoYvxkSOVaIjkRjUbIiKF4A5DhoTOuX7+Gfr3h1tvhXr1ko5MpPRSzYaISJp+/BGOPhqOOQa23RY++SS01VCiIZI/JRsiIgXYsCH0mdGmDYwYEW7/Pm4cHHBA0pGJlA06jSIiko+PPw59ZkyeDD17wn33QZMmSUclUraoZkNEJBdLlsDpp8NBB4UbqA0ZAq+/rkRDpCiUbIiIpHCHwYOhZcvwfNllMHUqHHVU0pGJlF06jSIiEk2dGk6ZfPhhaI/xyCPQtm3SUYmUfarZEJEKb80a+Mc/oF07mDQJHnsMPvpIiYZIcVHNhohUaMOGwbnnwg8/QN++cOed0KBB0lGJlC+q2RCRCmnOHOjVC444AqpXh1GjQhsNJRoixU/JhohUKBs3hstXW7WCt96Cm28O9zc5+OCkIxMpv3QaRUQqjC++CDdNmzAh3JH1oYdg112Tjkqk/FPNhoiUe8uWwTnnwH77wYIF8Moroa2GEg2RkqFkQ0TKLXd44YVwyuSRR8LN06ZNg+OOA7OkoxOpOHQaRUTKpe++gwED4P33oWPHUJOxzz5JRyVSMalmQ0TKlXXr4PrrQx8ZX3wBDz4IY8Yo0RBJkmo2RKTcGDEi1GZ8+y306QN33w2NGiUdlYioZkNEyrwFC+CEE+DQQ2HTJvjvf0NbDSUaIqWDkg0RKbOyskLDz5Yt4dVX4ZprQnfj3bolHZmIpNJpFBEpk776KvSZ8fnncMgh8O9/h6RDREof1WyISJmyciVcfDF06BDuZ/Lss+GKEyUaIqWXajZEpExwh9dfh/PPh7lz4cwz4ZZbYNttk45MRAqimg0RKfVmzYI//xmOPRa22w4+/RQefliJhkhZoWRDREqt9evhttugTRsYPTpcyjp2bOh2XETKDp1GEZFS6aOPQgPQqVPhL3+Be++FnXdOOioRKQrVbIhIqbJ4MZx2Gvzf/8Hq1fDmm/Cf/yjRECnLlGyISKmQlQVPPhlumvbMM3D55TBlChx5ZNKRiciW0mkUEUnclClw9tnh1EnnzqHx5557Jh2ViBQX1WyISGLWrIErr4S99w4JxxNPwAcfKNEQKW9UsyEiiXj7bTj33HBZ66mnwh13hMtaRaT8Uc2GiJSon38O/WUceSTUrBlqMgYNUqIhUp4p2RCRErFxI9xzD7RuDe+8A7feChMmhKtORKR802kUEcm4zz8P3YtPnAg9esCDD0KzZklHJSIlRTUbIpIxy5bBgAGw//6waFG4DfxbbynREKloCp1smFklM6tTiOkrm9kEM3srDjczs8/NbIaZvWRm1Qobg4iUbu7w3HPhTqwDB8IFF8D06aGthlnS0YlISUsr2TCz582sjpltDUwGpprZZWmu4wJgWsrw7cA97t4cWAr0K0zAIlK6ffstHHYYnHgiNG0a7mVyzz1Qu3bSkYlIUtKt2Wjj7iuAnsA7QDPgpIJmMrOdgCOAx+OwAYcAr8ZJnorLFJEy7tdf4brroG3bkGD8+9/h7qzt2ycdmYgkLd0GolXNrCohMXjQ3TeYmacx373A34Hs/zT1gWXuvjEO/ww0zm1GM+sP9Ado0qRJmmGKSBLeey+0zZgxA44/Hu66C3bYIemoRKS0SLdmYyAwC9ga+NDMdgFW5DeDmR0JLHT3cUUJzN0fdfeO7t6xQYMGRVmEiGTY/PkhuejWLbTFeO+90FZDiYaIpEqrZsPd7wfuTyn60cy6FDDbgcBRZtYDqAHUAe4DtjGzKrF2YydgTuHDFpEkbdoUGn7+4x+wdm04fXL55VCjRtKRiUhplG4D0fpmdr+ZjTezcWZ2H1A3v3nc/Up338ndmwJ9gJHufgIwCjguTtYXGFL08EWkpI0fHy5lPecc+MMfYNIkuPZaJRoikrd0T6O8CCwCjiUkCouAl4q4zsuBi81sBqENxxNFXI6IlKAVK+DCC0OCMXs2PP88DB8OLVokHZmIlHbpNhBt5O43pgzfZGZ/TXcl7j4aGB1ffw90SndeEUmWO/znP6GvjHnzwq3gb74Zttkm6chEpKxIt2ZjuJn1iR16VTKz3sB/MxmYiCTvhx/CDdN69YLtt4cxY+Chh5RoiEjhpJtsnAE8D6yPjxeBM81spZnle1WKiJQ969eHG6W1aQMffhg65fryS+ikOkkRKYJ0r0ZR338iFcSHH8JZZ8G0aaF78XvvhZ12SjoqESnL0r7rq5kdBWTfDHq0u7+VmZBEJAmLFsHf/w6DB4duxt96C444IumoRKQ8SPfS19sI9ziZGh8XmNmtmQxMREpGVhY88QS0agXPPgtXXglTpijREJHik27NRg9gb3fPAjCzp4AJwJWZCkxEMm/y5HDK5JNP4KCD4OGHYY89ko5KRMqbwtxiPrX9eb4deolI6bZ6dejxs337cOv3J5+EDz5QoiEimZFuzcatwAQzGwUYoe3GFRmLSkQy5s034dxzQ8dc/frB7bdD/fpJRyUi5Vm6V6O8YGajgT/EosvdfX7GohKRYvfTT6FjrtdfDzUYH30EnTsnHZWIVATpNhA1oCuh3cZQoJqZ6Yp7kTJg40a4+25o3RrefTfUZEyYoERDREpOum02/g3sD/wtDq8EHspIRCJSbMaMgY4d4ZJLoEsXmDo1XN5atWrSkYlIRZJusrGvu58D/Arg7kuBahmLSkS2yNKl4SqTAw6AJUvgtddg6NDQf4aISElLN9nYYGaVAQcwswZAVsaiEpEicQ99ZbRsCY8/DhddFGozjjkGzJKOTkQqqnSvRrkfeB3Y3sxuJtxm/p8Zi0pECu2bb8IdWUeNgn33Dbd/33vvpKMSEUn/apTnzGwcoZGoAT3dfVpGIxORtKxdG26advvtULMmPPIInHEGVCpMLzoiIhmUVrJhZv3c/QlgekrZbe6uvjZEEjR8OAwYADNnwoknwr/+BQ0bJh2ViMjm0v3vc6yZnZA9YGYPAQ0yE5KIFGTePOjTB/70J6hcGUaMgGeeUaIhIqVTum02jgWGmlkWcDiwzN37ZS4sEcnNpk3h/iVXXQXr1sENN4RLWatXTzoyEZG85ZtsmFm9lMHTgTeAT4Drzayeu/+SyeBE5HfjxoXLWceOhcMOg3//G5o3TzoqEZGCFVSzMY5wuaulPB8RHw7smtHoRIQVK+Cf/4QHH4Ttt4cXX4TevXUpq4iUHfkmG+7erKQCEZHNucOrr4b7mcyfHxqC3nQTbLNNwfOKiJQm6bbZEJESNHNmuDPru+/CPvvAkCHwhz8UPJ+ISGmkK/FFSpF16+Dmm2HPPeGTT+C+++CLL5RoiEjZVlAD0aruvqGkghGpyEaPDj2ATp8OvXrBPfdA48ZJRyUisuUKqtn4zMzeMLOzzKxpCcQjUuEsWgR9+4a7sq5bB8OGwcsvK9EQkfIj32TD3TsCF8bBe83sSzO7x8y6mZmu7BfZAllZ4WZpLVvCCy+EvjMmT4bu3ZOOTESkeBXYZsPdZ7n7I+7eEzgAeBM4FPjIzN7OdIAi5dHXX0PnzuEeJnvtBRMnhitNatZMOjIRkeJXqKtRYvuNkfGBmamiV6QQVq+G664L7TG23RaeegpOOkl9ZohI+bZFl766+5ziCkSkvBs6FM47D2bPDjUat94K9esnHZWISOapnw2RDJs9G84/P/SV0bYtfPwxHHhg0lGJiJQc9bMhkiEbNoRbvrduDe+9B3fcEe5vokRDRCqatGo2zKwFcBmwS+o87n5IhuISKdM+/TTcNG3SJDjqKLj/fthll6SjEhFJRrqnUV4BHgEeAzZlLhyRsu2XX+CKK+Cxx2DnneGNN+Doo5OOSkQkWekmGxvd/eGMRiJShrnDM8/ApZeGhOPSS+Haa6FWraQjExFJXkHdldeLL980swHA68C67PHu/ksGYxMpE6ZPD92Mjx4N++8PjzwS+s4QEZGgoJqNcYAD2b0AXJYyzoFdMxGUSFmwdi3ccgvcfnuowXj0UejXDyqp2bWIyGbyTTbcvVlJBSJSlrz7LpxzDnz/PZx8Mtx5J2y/fdJRiYiUTmn9BzOzc8xsm5ThbeNpFZEKZe5c+Otfw/1LqlaFkSNDL6BKNERE8pZuhe8Z7r4se8DdlwJnFDSTmdUwsy/MbKKZTTGz62N5MzP73MxmmNlLZlataOGLlIxNm+CBB6BVq9AT6I03hvuZdOmSdGQiIqVfuslGZbPf795gZpWBdBKEdcAh7t4O2Bs43Mz2A24H7nH35sBSoF/hwhYpOWPHwr77hl5ADzgg3Jn16quhuu57LCKSlnSTjf8CL5lZVzPrCrwAvFvQTB6sioNV48OBQ4BXY/lTQM9CRS1SApYvD/cy6dQpnD556SV45x3YbbekIxMRKVvS7Wfj70B/4Ow4/B7weDozxlqQcUBz4CFgJrDM3TfGSX4G/ufusWbWP66TJk2apBmmyJZzh5dfhgsvhIUL4dxzw2mTunWTjkxEpGwqMNmIycLT7n4CoRfRQnH3TcDesYHp60CrNOd7FHgUoGPHjl7Y9YoUxYwZ4SqT4cOhQwd4803o2DHpqEREyrYCT6PEZGGXLW3EGRuYjgL2B7Yxs+xEZydAt6qXRK1bF2ov9twTPvssNAb9/HMlGiIixSHd0yjfA5+Y2VBgdXahu9+d30xm1gDY4O7LzGwr4DBC49BRwHHAi0BfYEgRYhcpFqNGhR5Av/kmXNZ6992w445JRyUiUn6km2zMjI9KQO1CLL8R8FQ8FVMJeNnd3zKzqcCLZnYTMAF4ohDLFCkWCxeGe5g88wzsumvoqOtPf0o6KhGR8ietZMPds/vHqBWHV+U/x2/zfQ20z6X8e6BT+mGKFJ+sLHj8cbj8cli9Gv75T7jySthqq6QjExEpn9JKNsxsT+AZoF4cXgyc7O5TMhibSLGbOBHOOgvGjAkdcv3736GjLhERyZx0+9l4FLjY3Xdx912AS4DHMheWSPFatSqcMunQAWbODKdORoxQoiEiUhLSbbOxtdqvMS4AAB4vSURBVLuPyh5w99FmtnWGYhIpVm+8ETrn+vln6N8fbr0V6tVLOioRkYoj3ZqN783sn2bWND6uJlyhIlJq/fgjHHUUHHNMSC4+/RQGDlSiISJS0tJNNk4DGgCvAf8BtgNOzVRQIltiwwa44w5o0ybclfVf/4Jx42D//ZOOTESkYkr3NMqh7n5+aoGZ9QJeKf6QRIruk09CA9DJk6FnT7jvPlBv9yIiyUq3ZuPKNMtEErFkCZxxBnTuDCtWwJAh8PrrSjREREqDfGs2zKw70ANobGb3p4yqA2zMfS6RkuMOTz8drjRZtgz+/ne45hrYWs2XRURKjYJOo8wFxgJHEe7cmm0lcFGmghJJx7RpoZvxDz6AAw+Ehx+Gtm2TjkpERHLKN9lw94nARDNbCrzl7lklE5ZI3tasgZtvhjvvhNq1Q2+gp54KldI9KSgiIiUq3cNzb+A7M7vDzNQNkiTmnXfCnVlvuQVOOAGmT4d+/ZRoiIiUZmkdot39RMI9TmYCg83sMzPrb2aFuSmbSJHNmQO9ekGPHlCjBoweDU8+CQ0aJB2ZiIgUJO3/g+6+AniVcFv4RsAxwHgzOy9DsYngHroWb9MG3nornD756iv44x+TjkxERNKV7o3YjiJ04tUceBro5O4LzawmMBV4IHMhSkX1yy+hAejLL8NBB8HgweFW8CIiUrak26nXscA97v5haqG7rzGzfsUfllR0I0ZA376wcCHcdlu4tLVy5aSjEhGRoki3zUZf4FszO8rM/mxmO6SMG5Gx6KTC+fVXuOQSOPTQcKXJmDFw+eVKNEREyrK0ko1Ye/EF8BfgOGCMmZ2WycCk4pk0CTp1grvvhnPPDfcz2WefpKMSEZEtle5plL8D7d19CYCZ1Qc+BQZlKjCpOLKy4N574corwx1Zhw2D7t2TjkpERIpLusnGEkKvodlWxjKRLfLzz6FtxsiR4cZpjz6qy1lFRMqbdJONGcDnZjYEcOBo4GszuxjA3e/OUHxSjr38Mpx5Zrgl/OOPw2mngVnSUYmISHFLN9mYGR/ZhsRndeolhbZ8OZx3Xug/Y9994dlnoXnzpKMSEZFMSSvZcPfrMx2IVAwffQQnnRROn1x3HVx1FVRJN+UVEZEySXeUkBKxfj384x+h58+qVeGTT+Daa5VoiIhUBDrUS8ZNmwYnngjjx8MZZ4RLW2vVSjoqEREpKarZkIxxh4ceCn1lzJ4Nb7wRrjZRoiEiUrGk26lXCzMbYWaT4/BeZnZ1ZkOTsmz+fDjiiNA5V5cuocOuo49OOioREUlCujUbjwFXAhsA3P1roE+mgpKybcgQaNsWRo0KNRtvvw077FDwfCIiUj6lm2zUdPcvcpRtLO5gpGxbtSq0yejZE5o0CW00BgxQ3xkiIhVdusnGYjPbjdChF2Z2HDAvY1FJmTNmDOy9NzzxROh2/LPPoHXrpKMSEZHSIN2rUc4BHgVamdkc4AfgxIxFJWXGxo1w881w442w007wwQdw0EFJRyUiIqVJup16fQ8camZbA5XcfWVB80j5N2NGuKT1889DR10PPAB16yYdlYiIlDZpJRtmtg1wMtAUqGLxJLy7n5+xyKTUcg+nSy68EKpVg5degt69k45KRERKq3RPowwDxgCTgKzMhSOl3aJFoRHokCHQtSsMHhxOn4iIiOQl3WSjhrtfnNFIpNR75x049VRYujT0AnrBBVBJ3cKJiEgB0v2peMbMzjCzRmZWL/uR0cik1FizJnTO1aMHbL89fPklXHSREg0REUlPujUb64E7gauIl7/G510zEZSUHuPHwwknwPTpcPHF4cqTGjWSjkpERMqSdJONS4Dm7r44k8FI6bFpE9x5J/zzn9CwIbz/fmijISIiUljpVoTPANYUZsFmtrOZjTKzqWY2xcwuiOX1zOw9M/suPm9b2KAls2bNCvczufJKOOYY+PprJRoiIlJ06dZsrAa+MrNRwLrswgIufd0IXOLu482sNjDOzN4DTgFGuPttZnYFcAVweZGil2LlDs89B+ecE14//XToR0PdjYuIyJZIN9l4Iz7S5u7ziF2au/tKM5sGNAaOBg6Okz0FjEbJRuKWLoWzzw59Zhx0UEg0mjZNOioRESkP0u1B9KktWYmZNQXaA58DDWMiAjAfaLgly5YtN2IE9O0LCxbArbfCZZdB5cpJRyUiIuVFvsmGmb3s7r3NbBK/X4XyG3ffq6AVmFkt4D/Ahe6+wlLq5N3dzex/lhvn6w/0B2jSpElBq5EiWLcOrroK7roLWrYMHXV16JB0VCIiUt4UVLNxQXw+sigLN7OqhETjOXd/LRYvMLNG7j7PzBoBC3Ob190fJdz8jY4dO+aakEjRTZoULmmdNCm00bjjDqhZM+moRESkPMr3apSU0x0D3P3H1AcwIL95LVRhPAFMc/e7U0YNBfrG132BIUULXYoiKwvuuQf+8AdYuBDefhsefFCJhoiIZE66l74elktZ9wLmORA4CTjEzL6Kjx7AbcBhZvYdcGgclhLw88/QrVvonOvww0OtRo8eSUclIiLlXUFtNs4m1GDsamZfp4yqDXyS37zu/jGQ10WT6rWhhL3yCpx5Zmin8dhj0K+fLmkVEZGSUVCbjeeBd4BbCf1hZFvp7r9kLCopNitWwHnnhUtZ990XnnkGdt896ahERKQiyTfZcPflwHLgbyUTjhSnjz6Ck0+Gn36Ca6+Fq6+GKun2rCIiIlJMdN/Ocmj9+nBJ68EHh/4yPvoIrrtOiYaIiCRDPz/lzPTpoYvxceNCu4x77oHatZOOSkREKjLVbJQT7vDww7DPPuFGaq+9Bo8/rkRDRESSp5qNcmD+/FCLMWxYuKR10CBo1CjpqERERALVbJRxQ4ZA27YwcmTonGvYMCUaIiJSuijZKKNWrYL+/aFnT9h5Zxg/PnQ7rr4zRESktFGyUQZ9/jm0bx/aZFxxBYwZA61bJx2ViIhI7pRslCEbN8INN8CBB4bLW0ePDreEr1Yt6chERETypgaiZcTMmeGS1jFjwvODD0LduklHJSIiUjDVbJRy7uHqknbtQh8aL7wQuhxXoiEiImWFko1SbPFi+MtfwmWtnTrB119Dnz5JRyUiIlI4SjZKqXffDZe0DhsG//oXvP9+uOpERESkrFGyUcqsXRvu0tq9O2y3HXz5JVxyCVTSJyUiImWUfsJKkQkToEOH0PjzootCorHXXklHJSIismWUbJQCmzbB7bfDvvvC8uXw3ntw991Qo0bSkYmIiGw5XfqasB9/hJNPhg8/hOOOg4EDoV69pKMSESk+48aN275KlSqPA3uiP7nlVRYweePGjad36NBhYc6RSjYS4g7PPw8DBoTXTz0FJ52k7sZFpPypUqXK4zvssEPrBg0aLK1UqZInHY8Uv6ysLFu0aFGb+fPnPw4clXO8MswELF0Kxx8fOudq2xYmTgy1G0o0RKSc2rNBgwYrlGiUX5UqVfIGDRosJ9Re/e/4Eo6nwhs5MjT6fPVVuPlm+OADaNYs6ahERDKqkhKN8i9+xrnmFUo2Ssi6dXDppdC1K2y9NXz2GfzjH1C5ctKRiYiIZJaSjRIweXLoAfSuu0IbjfHjoWPHpKMSEZG8XHHFFTsU17IuvvjiHa+55pqGeY0//vjjmwwfPnzrTp06tfzwww9rFtd6i9PDDz9cr0WLFm1atGjRpn379q0+++yzrQozv5KNDMrKgnvvDYnF/Pnw1lvw0ENQs1TuSiIiku3+++9vVJjps7Ky2LRpU5HWNX78+FqHHHLI6iLNXEKaN2++7pNPPvnm22+/nXrllVfOPfPMM3cpzPxKNjJkzhz4059C51zdusGkSXDEEUlHJSJS8XzzzTfVmjVrtsdRRx3VbNddd93j8MMP33XlypWVhg4dWvvQQw/dLXu6119/vc5hhx2224ABAxqvW7euUqtWrdocddRRzQCuu+66hrvvvvseu++++x433HDD9tnLbdq06Z7HHHNM0xYtWuwxc+bMaq+++mqdNm3atG7ZsmWb/fffv0X2sqdNm7ZVp06dWu60005tb7rppu2zy8ePH19j1113/bVKld8vDt20aRPHHnts0/PPP39HgNdee63O3nvv3apNmzatu3fvvuvy5csrAQwZMqR269at27Ro0aJNr169mq5du9YAGjdu3Pass87aqUWLFm3atm3bevLkydUBBg0atO3uu+++R8uWLdt07NixZWG24WGHHba6QYMGmwC6dOmyev78+dUKM78ufc2AV1+F/v1DO42BA+GMM3SliYgIwGmnsfPkyRRr/e6ee7Jm0CB+ym+aWbNm1Rg4cOCsbt26re7Vq1fTO++8s8F111234IILLmgyd+7cKjvuuOPGQYMG1T/11FMXH3/88csHDx68/fTp06cCfPTRRzWff/75+uPGjZvm7nTo0KF1165dV2633XabZs+eXf2JJ574oWvXrrPmzp1b5dxzz206evTo6a1atVq/YMGC31rlzZgxo8ann376zbJlyyq3bt16z8suu2xR9erVfejQoXW7deu2PHu6DRs2WM+ePZu1adNm7e233z5/3rx5VW655ZZGH3744bd16tTJuuqqq3a48cYbG95www3zzzzzzGbDhw//Zq+99lp3zDHHNL3zzjsbXHPNNQsB6tatu/Hbb7+d+uCDD9Y/77zzdh41atSM2267rdHw4cO/bdas2YbFixcXucXgAw88sF2XLl2WFzzl71SzUYxWrIBTToFevWD33UP34/37K9EQEUnaDjvssL5bt26rAU466aQln376aa1KlSrRu3fvJY899li9xYsXVx4/fnytXr16/c+P6OjRo2v16NFjWZ06dbLq1q2bdcQRRywdNWpUbYBGjRqt79q16+o43dadOnVa2apVq/UADRs2/O28Srdu3ZZttdVW3qhRo4316tXb8PPPP1cBeP/99+v07NlzRfZ0AwYM2CU70che5syZM2t06tSpVatWrdq8+OKL9WfPnl1t4sSJNXbaaad1e+211zqAU045ZcnHH39cO3s5ffv2/QXgjDPO+GXChAm1ADp27LjqhBNOaHrXXXdtt3HjxiJtxzfffLP2s88+u9199933c2HmU81GMfn449Ap1+zZcM01cPXVULVq0lGJiJQuBdVAZIrl+NeXPXz22WcvOeKII5rXqFHD//znPy+tWsgDd82aNbPSma569eq/XfpbuXJlNm7caCtXrqy0YsWKyk2bNt2QPa5jx46rPvroozpr1qxZULNmTXd3OnfuvOLNN9/8IXV5BTXQrJRy904zc4Dnn39+9siRI7ceOnRo3Q4dOrQZN27c1B122OG3hOi8885r/N5779UFyK7VSfX5559vNWDAgF3efvvt71LnS4dqNrbQ+vVw1VXwxz+GO7N+/DFcf70SDRGR0mTevHnV3n///a0BnnvuuXoHHHDAKoCmTZtuaNiw4Ya77rqrUf/+/RdnT1+lShVft26dAXTp0mXVsGHDtonJQaVhw4Zt26VLl5U513HwwQev/uKLL2pPnz69GkDqaZTcvP3227U7d+682XLOPPPMxd26dVt+5JFH7rZhwwYOPvjg1WPHjq2V3e5ixYoVlb7++uvq7dq1+3XOnDnVssuffvrp+gcddNBvy3r66afrATzxxBPbtm/ffjXAlClTqh9yyCGr77333rnbbrvtxu+//36zdhcPPPDAnOnTp0/NLdH47rvvqvXq1Wu3QYMG/ZBdm1IYqtnYAtOnh15Ax42D004LV57Url3wfCIiUrKaNm366wMPPLB9//79a+6+++6/XnrppYuyx/Xp02fJQw89VGWfffb5NbvshBNOWNS6des2e+6555qhQ4f+cPzxxy/ZZ599WgOcdNJJiw488MC133zzzWY/1jvuuOPG+++/f9YxxxzTPCsri/r162/49NNPv8srpmHDhtXt3bv30pzl11133YKLLrqo8l/+8pdmb7zxxg8DBw6c1adPn13Xr19vANdee+2cvfbaa90jjzwyq1evXrtt2rSJdu3arUl9T0uXLq3cokWLNtWqVfMXX3zxe4CLLrpop1mzZlV3d+vcufOK/fbbb2262+/qq69utGzZsirnnXfeLhCSscmTJ09Ld35zL/2dunXs2NHHjh2bdBi/cYdHHoFLLgmXsT76KPzlL0lHJSKyOTMb5+6J9+ozceLEWe3atVtc8JSZ8c0331Q78sgjd//uu++m5Db+5JNPbtK+ffs1F110UYnG2KZNm9YTJkyYnnqKpTg0bty47dixY6c1atSoaA0ztsDEiRO3a9euXdOc5arZKKQFC6BfP3j77XBp65NPQqNCXY0tIiKlxR577NF6q622yho4cGCJtyWZOnVq2jUDZZ2SjUIYOhROPx1WroT774dzz9WVJiIipV3Lli3X51WrMWXKlHL3gz9nzpxJSceQkxqIpmH1ajjzTDj6aGjcGMaOhfPOU6IhIpKmrKysLB0xy7n4Ged6dY6SjQJ88QW0bw+PPQaXXw5jxsAeeyQdlYhImTJ50aJFdZVwlF9ZWVm2aNGiusDk3MbrNEoeNm6EW28Nl7E2bgyjRoXLW0VEpHA2btx4+vz58x+fP3/+nuhPbnmVBUzeuHHj6bmNVLKRi5kzQwddn30GJ5wADz4I22yTdFQiImVThw4dFgJHJR2HJEcZZgp3GDQI9t4bpk6F55+HZ59VoiEiIrIlMppsmNkgM1toZpNTyuqZ2Xtm9l183jaTMaRr8WI47rhwWWvHjvD11/C3vyUdlYiISNmX6ZqNwcDhOcquAEa4++7AiDicqP/+F/baC958E+68E0aMgCZNko5KRESkfMhosuHuHwK/5Cg+Gngqvn4K6JnJGPKzdi2cfz4cfjjUqwdffgmXXhrucSIiIiLFI4mf1YbuPi++ng80zG0iM+tvZmPNbOyiRYtym2SLTJgAHTrAAw/ABReERKNdu2JfjYiISIWX6H94DzdmybVPeHd/1N07unvHBg0aFNs6N22CO+6AffeF5cth+PBwA7Wt8r1Zr4iIiBRVEpe+LjCzRu4+z8waAQtLasWzZ8PJJ8MHH8Cxx8LAgVC/fkmtXUREpGJKomZjKNA3vu4LDCmJlT7/fGgEOm4cDB4Mr7yiRENERKQkZPrS1xeAz4CWZvazmfUDbgMOM7PvgEPjcMYsXQrHHx8659pjD5g4Efr21X1NRERESkpGT6O4e149VXTN5HqzTZoERxwB8+bBTTeFe5tUUZ+pIiIiJapc//Q2aQKtW8N//gN/+EPS0YiIiFRM5TrZqFs3dNglIiIiyVH3VSIiIpJRSjZEREQko5RsiIiISEYp2RAREZGMUrIhIiIiGaVkQ0RERDJKyYaIiIhklJINERERySgLd3kv3cxsEfDjFixiO2BxMYVTnBRX4SiuwlFchVMe49rF3RsUZzAiRVEmko0tZWZj3b1j0nHkpLgKR3EVjuIqHMUlkjk6jSIiIiIZpWRDREREMqqiJBuPJh1AHhRX4SiuwlFchaO4RDKkQrTZEBERkeRUlJoNERERSYiSDREREcmoMplsmNnOZjbKzKaa2RQzuyCW1zOz98zsu/i8bSw3M7vfzGaY2ddmtk/KsvrG6b8zs75bGFcNM/vCzCbGuK6P5c3M7PO4/pfMrFosrx6HZ8TxTVOWdWUs/8bM/rQlcaUss7KZTTCzt0pLXGY2y8wmmdlXZjY2liX6OcblbWNmr5rZdDObZmb7Jx2XmbWM2yn7scLMLkw6rri8i+I+P9nMXojfhdKwf10QY5piZhfGshLfXmY2yMwWmtnklLJii8PMOsTv0Yw4rxVti4lkiLuXuQfQCNgnvq4NfAu0Ae4ArojlVwC3x9c9gHcAA/YDPo/l9YDv4/O28fW2WxCXAbXi66rA53F9LwN9YvkjwNnx9QDgkfi6D/BSfN0GmAhUB5oBM4HKxbDdLgaeB96Kw4nHBcwCtstRlujnGJf5FHB6fF0N2KY0xJUSX2VgPrBL0nEBjYEfgK1S9qtTkt6/gD2ByUBNoArwPtA8ie0F/B+wDzA5E/s58EWc1uK83YtjP9NDj+J6JB5AsbwJGAIcBnwDNIpljYBv4uuBwN9Spv8mjv8bMDClfLPptjCmmsB4YF9C739VYvn+wH/j6/8C+8fXVeJ0BlwJXJmyrN+m24J4dgJGAIcAb8X1lIa4ZvG/yUainyNQl/DjaaUprhyxdAM+KQ1xEZKNnwg/glXi/vWnpPcvoBfwRMrwP4G/J7W9gKZsnmwUSxxx3PSU8s2m00OP0vAok6dRUsUq2PaEWoSG7j4vjpoPNIyvsw+G2X6OZXmVb0k8lc3sK2Ah8B7h39kyd9+Yyzp+W38cvxyon4m4gHsJB9qsOFy/lMTlwHAzG2dm/WNZ0p9jM2AR8KSF006Pm9nWpSCuVH2AF+LrRONy9znAv4DZwDzC/jKO5PevycBBZlbfzGoSagx2pvR8jsUVR+P4urjjEyk2ZTrZMLNawH+AC919Reo4d3fCD1mJcvdN7r43oSahE9CqpGPIycyOBBa6+7ikY8lFZ3ffB+gOnGNm/5c6MqHPsQqhyvthd28PrCZUcycdFwCx7cNRwCs5xyURV2xrcDQhSdsR2Bo4vCRjyI27TwNuB4YD7wJfAZtyTJPY51ga4xDJlDKbbJhZVUKi8Zy7vxaLF5hZozi+EaF2AWAO4R9Ntp1iWV7lW8zdlwGjCNXH25hZlVzW8dv64/i6wJIMxHUgcJSZzQJeJJxKua8UxJX9rxh3Xwi8TkjQkv4cfwZ+dvfP4/CrhOQj6biydQfGu/uCOJx0XIcCP7j7InffALxG2OdKw/71hLt3cPf/A5YS2nclvb2yFVccc+Lr4o5PpNiUyWQjtrR+Apjm7nenjBoKZLfQ7ktoy5FdfnJs5b0fsDxWX/4X6GZm28Z/Z91iWVHjamBm28TXWxHakUwjJB3H5RFXdrzHASPjP5yhQJ/Yar8ZsDuhAViRuPuV7r6TuzclVL+PdPcTko7LzLY2s9rZrwnbfzIJf47uPh/4ycxaxqKuwNSk40rxN34/hZK9/iTjmg3sZ2Y143cze3slun8BmNn28bkJ8BdCA+mkt1e2YokjjlthZvvF7X9yyrJESoekG40U5QF0JlQ5fk2oGv2KcD62PqER5HeEluf14vQGPERoPzEJ6JiyrNOAGfFx6hbGtRcwIcY1Gbgmlu9KOGjOIFR9V4/lNeLwjDh+15RlXRXj/YZibFkOHMzvV6MkGldc/8T4mAJcFcsT/Rzj8vYGxsbP8g1C6//SENfWhFqAuillpSGu64Hpcb9/hnBFSeL7PfARIfGZCHRNansRksN5wAZCzVm/4owD6Bi3/UzgQXI0btZDj6Qf6q5cREREMqpMnkYRERGRskPJhoiIiGSUkg0RERHJKCUbIiIiklFKNkRERCSjlGxIuWZmmyzcIXWimY03swMKmH4bMxuQxnJHm1nHIsY0LLs/FhGRikDJhpR3a919b3dvR7jR160FTL8N4a6kGePuPTz0MCsiUiEo2ZCKpA6hy2rMrJaZjYi1HZPM7Og4zW3AbrE25M447eVxmolmdlvK8nqZ2Rdm9q2ZHZRzZWbWyMw+jMuanD2Nmc0ys+3M7Kw47isz+8HMRsXx3czssxjbKxbuASQiUmapUy8p18xsE6EXxhqEW3Ef4u7j4j05arr7CjPbDhhD6B57F0IPq3vG+bsTbk1+qLuvMbN67v6LmY0Gxrn7JWbWA7jY3Q/Nse5LgBrufrOZVY7rW2nhHjUd3X1xnK4qMBK4A/iMcG+R7u6+2swuJ/S8eUMmt5OISCZVKXgSkTJtrYe78GJm+wNPm9mehC6hb7Fwl9kswi25G+Yy/6HAk+6+BsDdf0kZl30DwHFA01zm/RIYFJOJN9z9qzxivI9wf5A3Ldyhtw3wSbjNBdUICYiISJmlZEMqDHf/LNZiNCDcS6cB0MHdN8TahhqFXOS6+LyJXL5L7v5hTGaOAAab2d3u/nTqNGZ2CqE25dzsIuA9d/9bIWMRESm11GZDKgwzawVUJt7IDFgYE40uhB98gJVA7ZTZ3gNONbOacRn1CrG+XYAF7v4Y8DjhNvWp4zsAlwInuntWLB4DHGhmzeM0W5tZi8K9UxGR0kU1G1LebWVm2acvDOjr7pvM7DngTTObRLi763QAd19iZp+Y2WTgHXe/zMz2Bsaa2XpgGPCPNNd9MHCZmW0AVhFu/Z3qXKAeMCqeMhnr7qfH2o4XzKx6nO5q4NtCv3MRkVJCDURFREQko3QaRURERDJKyYaIiIhklJINERERySglGyIiIpJRSjZEREQko5RsiIiISEYp2RAREZGM+n+VeVGWVKXj8QAAAABJRU5ErkJggg==\n", + "text/plain": [ + "
" + ] + }, + "metadata": { + "needs_background": "light" + }, + "output_type": "display_data" + } + ], + "source": [ + "plot_relative_time(experiments_neq, results, [2], max_batch_size=max_batch_size)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Learned kernel MMD detector\n", + "\n", + "We conduct similar experiments as for the MMD detector for $N_\\text{ref} = N_\\text{test}$ and `n_features=50`. We use a deep learned kernel with an MLP followed by Gaussian RBF kernels and project the input features on a `d_out=2`-dimensional space. Since the learned kernel detector computes the kernel matrix in a batch-wise manner, we can also scale up the number of instances for the PyTorch backend without running out-of-memory." + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": {}, + "outputs": [], + "source": [ + "experiments_eq = {\n", + " 'keops': {\n", + " 0: {'n_ref': 2000, 'n_test': 2000, 'n_runs': 3, 'n_features': 50},\n", + " 1: {'n_ref': 5000, 'n_test': 5000, 'n_runs': 3, 'n_features': 50},\n", + " 2: {'n_ref': 10000, 'n_test': 10000, 'n_runs': 3, 'n_features': 50},\n", + " 3: {'n_ref': 20000, 'n_test': 20000, 'n_runs': 3, 'n_features': 50},\n", + " 4: {'n_ref': 50000, 'n_test': 50000, 'n_runs': 3, 'n_features': 50},\n", + " 5: {'n_ref': 100000, 'n_test': 100000, 'n_runs': 3, 'n_features': 50}\n", + " },\n", + " 'pytorch': {\n", + " 0: {'n_ref': 2000, 'n_test': 2000, 'n_runs': 3, 'n_features': 50},\n", + " 1: {'n_ref': 5000, 'n_test': 5000, 'n_runs': 3, 'n_features': 50},\n", + " 2: {'n_ref': 10000, 'n_test': 10000, 'n_runs': 3, 'n_features': 50},\n", + " 3: {'n_ref': 20000, 'n_test': 20000, 'n_runs': 3, 'n_features': 50},\n", + " 4: {'n_ref': 50000, 'n_test': 50000, 'n_runs': 3, 'n_features': 50},\n", + " 5: {'n_ref': 100000, 'n_test': 100000, 'n_runs': 3, 'n_features': 50}\n", + " }\n", + "}" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "metadata": { + "scrolled": true + }, + "outputs": [], + "source": [ + "results = {backend: {} for backend in backends}\n", + "\n", + "for backend in backends:\n", + " exps = experiments_eq[backend]\n", + " for i, exp in exps.items():\n", + " results[backend][i] = experiment(\n", + " 'learned_kernel', backend, exp['n_runs'], exp['n_ref'], exp['n_test'], exp['n_features']\n", + " )" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We again plot the absolute and relative (PyTorch / KeOps) mean prediction times for the learned kernel MMD drift detector for different feature dimensions:" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAgcAAAEWCAYAAADywzSYAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4xLjIsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy8li6FKAAAgAElEQVR4nO3deXxU1fnH8c8Twr4jiCxCQNlxBalotShIUVS0Vkq1iLtWbV1b6eJaq/6q2Lr83EWlihvVioh1xe2HikDZBBREkCVA2PclyfP749zoJCZkIZM7Sb7v12tec+fc7bkzk9xnzjn3XHN3RERERPKkxR2AiIiIpBYlByIiIpKPkgMRERHJR8mBiIiI5KPkQERERPJRciAiIiL5KDmoAszMzezAPcx/2MxuSHj9azNbZWZbzGyfMuwvI9pnelljLi/RMXSsgP20i/ZVowL21dLMPjSzzWY2Ktn7k5Izsz+a2eNxxyGSbEoOSsHMFpvZLjNrXqD8v9HJMiN6/VT0ekiB5f4elZ8bvT7XzHKik84WM/vGzJ40s87lGbe7X+ruf4n2WRO4Bxjo7g3cfW1xycXeiI7x43La1vtmdmFiWXQMi8pj+wX2tdjMBiTs59toXznlva9CXAysARq5+7V7uzEza2Vm481sReL3NGF+bTMbbWabzGylmV1TYH5/M5tvZtvMbJKZtd/bmOJgZjeb2TOlWL6fmS1LLHP32939wqLWEakqlByU3jfAL/NemNlBQL1ClvsKOCdhuXRgKPB1geU+cfcGQGNgALAdmGZmPcsj2EJ+6bYE6gBflMf2JSnaA3O9DCOUFVGbkwv8BzijiNVuBjpF+z0O+L2ZDYq21xx4GbgBaAZMBV4obVyllQq1UiLVmrvrUcIHsBj4M/B5QtndwJ8ABzKisqei8lVA06jsZOAN4GPg3KjsXODjQvYzARi3hzh+B2QCK4Dzo30fmLDvh4CJwFZCwvEUcBvQOSpzYAvwHvBh9HprVPaLQvZXIzqeNcAi4PJonfRofmPgiSim5dG+agDdgB1ATrTtDdHytaPtfRu9Rw8DdRP2NwSYAWwiJFODgL9G29kRbeuBaNnEY28MjAGygCXRZ5WW+F5H+11PSPJOLOL9/SfhhLo92tfvgYwCx/x+dJyTo2VeA/YBno3i/jzv+xAt3xV4G1gHfAkMLWLfTwG7gV3RdgdE79c/os97RTRdO1q+H7AMuB5YCfxzD9+bdBK+pwnlKwg1SXmv/wI8H01fDExOmFc/el+67uFv5A/A3Oh9fhKokzD/5Oiz3RC9dwcXWPd6YBawM4p3MeH7PovwHX2CkOC+AWwG3uH7v7F+wLJC4hlA+A7tit7bLcDMaP55wLxoW4uASwocZ260/BagNSGReiZh+6cSEu0N0XeiW4F9XxfFvpGQVNWJ5jUn/J1viL4THxF9V/XQIxUesQdQmR4J/2i+JJz4akT/mNvzw+TgNuBR4NdR2YuEGoeSJAfnA6uKiGEQ4YTaM/oHNpYfJgcbgaMJNUN18uKJ5meQcJKLyr5bv4h9XgrMB/Yn/HqcRP4T5SvAI1E8+wJTEv7J/uAYgb8D46NtNSScWO+I5vWJ4j8hir8N0Yko+ud7YYFtJR77GODVaJsZhNqbCxLi2A1cFH1uvyacFG1Pn3XC63zvWxTLQuAAQlIyN9rfAMJJbQzwZLRsfWAp4USUDhxGSLS6F7Hv7z6v6PWtwKfRe9uCcFL9SzSvH5AN/A8hiahb2DajZX+QHABNo7KWCWU/B2ZH0/cCDxXYzhzgjD28b3MSviv/x/ffvcOA1cCPos9gRLR87YR1Z0Tr1k0o+5SQELSJ1p8ebasOIcG9KeG9KDQ5iKZvJuHEHpUNjj5DA34CbAMO38P2vtsG3yfbJwA1CUnkQqBWwr6nEJKKZoQk5NJo3h2EpLhm9DiGIr6LeugRx0PNCmXzT0KTwQmEP/jlRSw3BjjHzJoQ/vH8u4TbX0H4Z1KYoYSTzhx330r4Z1XQq+7+f+6e6+47SrjPPRkK/MPdl7r7OsI/NiB0ngNOAq5y963uvppw8h9W2IbMzAi/Rq9293Xuvhm4PWH5C4DR7v52FP9yd59fXIBR88kw4A/uvtndFwOjgOEJiy1x98c89Bt4GmhFOOmU1ZPu/rW7byT8kv3a3d9x92zgJcIJDMKv5cXu/qS7Z7v7f4F/AWeWcD9nA7e6+2p3zwJuKXBcuYQT5E53317KY2gQPW9MKNtISLDy5m8kv8T5hXkg4bvyV75vhrsYeMTdP3P3HHd/mlBDcGTCuvdF6yYex/3uvsrdlxN+YX/m7v+Nvtuv8P37XGru/nr0Gbq7fwC8RThRl8QvgNej7+puQq1UXeCoAsezInovXgMOjcp3E75/7d19t7t/5O660Y2kDLXrlc0/CdXxHQgJQKHc/WMza0Fodpjg7tvDubFYbQhVjYVpDUxLeL2kkGWWlmQnpdC6wDYT99me8MsnM+HY0vYQQwtCH41pCcsb4ZckhF+NE8sQY/MojsTYlhDeyzwr8ybcfVu0/waU3aqE6e2FvM7bdnvgR2a2IWF+OuF7VBKt+eFxtU54nbUXSeCW6LkRockmb3pzwvxGBdZJnF+Ygt+VvFjbAyPM7DcJ82uR/1gK+96U9H0uNTM7EbiJUAuQRvhuzi7h6vk+F3fPNbOlFPGdI9RK5B3rXYTE/q3oe/iou99ZhkMQSQrVHJSBuy8htFmfROistSfPANeyhySiEKcTfiEVJpNwAs3TrrAQS7GvktjTPpcSfv01d/cm0aORu/coIpY1hH/oPRKWb+yhU2be9g4oIo49Hdcawq+xxJ707Si6Vqc45fkeLgU+SDjeJh6ufPh1CddfwQ+Pa0XC6zLH6u7rCZ/vIQnFh/B9h9UvEueZWX3C57OnDq0Fvyt5sS4F/lrgfajn7s8lhlS2IwFCFf93nYOj2qQWRW3bzGoTanDuJjSrNCEkplbY8oXI97lEtWL7U4LvXFS7da27dyT0W7jGzPoXt55IRVFyUHYXAMdHVft7ch+h+eHDPS1kZjXMrIOZ3U9o67yliEVfBM41s+5mVo/wq2dvrQL2NFbAi8BvzaytmTUFRubNcPdMQlXsKDNrZGZpZnaAmf0kYdttzaxWtHwu8BjwdzPbF8DM2pjZT6PlnwDOiy6fS4vmdS0uzqip4EXgr2bWMLrc7hpCclYWxb0npTEB6Gxmw82sZvQ4wsy6lXD954A/m1mL6OqBGynlcZlZHUKfBIDa0es8Y6LtN43e64sI/R4gVNv3NLMzonVuBGYV09RzefRdaUaoNcu7uuEx4FIz+5EF9c1ssJntqYmiNL4C6kTbrEnokFo7Yf4qIMPM8v7v1YrmZwHZUS3CwALL72NmjYvY34vA4Oi7WpPwI2AnoU/IHpnZyWZ2YJRQbCR0ts0t6YGKJJuSgzKK2imnlmC5de7+7h7aE/ua2RZCD/f3CVW2R7h7oVWb7v4Gobf6e4TOT++VJf4CbgaeNrMNZja0kPmPAW8CMwmdwQrWlpxD+Eeb10N9HKE9lSi+L4CVZrYmKrs+iv1TM9tE6HHeJTq+KYSOe38n/NP8gO9/nd0L/NzM1pvZfYXE+RvCr8dFhI6fY4HRJXsLfuAOwglzg5ldV8ZtAOFXIuGkM4zwa3Ml33cgLInbCJcQziJUeU+Pykoj78oLCJ1LE9v0byJcFbKE8H7f5e7/iWLPIlwC+VfCZ/sjiuhPkmAsIWFcFG33tmhbUwmJxwPRthYSOoqWi6jvx2XA44Rf71sJHYbzvBQ9rzWz6dHn8lvCSX49cBaho2ze9uYTErNF0fcgsfkDd/8S+BVwP6Hm6hTgFHffVYJwOxG+91uAT4AH3X1S6Y5YJHlMfWBEpLyY2WLCFSXvxB2LiJSdag5EREQkHyUHIiIiko+aFURERCQf1RyIiIhIPpV6EKTmzZt7RkZG3GGIiFQq06ZNW+PuLYpfUqqrSp0cZGRkMHVqsVcTiohIAjMrbGRVke+oWUFERETyUXIgIiIi+Sg5EBERkXyUHIiIiEg+Sg5EREQkHyUHIiIiko+SAxEREclHyYGISCXiDqtXxx2FVHVKDkREKpHRo+Gww2DlyrgjkaqsUo+QKCJS3Zx9NuzaBS1bxh2JVGWqORARSXHuMGoUrF8PderAr38NZnFHJVWZkgMRkRSWmwuXXw7XXQfPPBN3NFJdqFlBRCRF5ebCpZfCY4/B9dfDFVfEHZFUF6o5EBFJQbm5cNFFITH44x/hjjvUlCAVR8mBiEgKWrMG3n0XbrgBbrtNiYFULDUriIikkJyckAjsuy/MmAFNmsQdkVRHqjkQEUkR2dlwzjlw8cXhCgUlBhIXJQciIikgOxt+9SsYOxYOPFDNCBKvpCUHZra/mU0ys7lm9oWZXRmV32xmy81sRvQ4KWGdP5jZQjP70sx+mqzYRERSye7d8MtfwgsvwN/+BiNHxh2RVHfJ7HOQDVzr7tPNrCEwzczejub93d3vTlzYzLoDw4AeQGvgHTPr7O45SYxRRCR2554L48bBPffA1VfHHY1IEpMDd88EMqPpzWY2D2izh1WGAM+7+07gGzNbCPQBPklWjCIiqWDECOjbV+MYSOqokD4HZpYBHAZ8FhVdYWazzGy0mTWNytoASxNWW8aekwkRkUpr5054880wPXCgEgNJLUlPDsysAfAv4Cp33wQ8BBwAHEqoWRhVyu1dbGZTzWxqVlZWuccrIpJsO3bA6afDSSfBV1/FHY3IDyU1OTCzmoTE4Fl3fxnA3Ve5e4675wKPEZoOAJYD+yes3jYqy8fdH3X33u7eu0WLFskMX0Sk3G3fDkOGwBtvwMMPQ+fOcUck8kPJvFrBgCeAee5+T0J5q4TFTgfmRNPjgWFmVtvMOgCdgCnJik9EpKJt2wanngpvvw1PPBGGRxZJRcm8WuFoYDgw28xmRGV/BH5pZocCDiwGLgFw9y/M7EVgLuFKh8t1pYKIVCX//je89x489VQY7EgkVZm7xx1DmfXu3dunTp0adxgiIiU2Zw707BlvDGY2zd17xxuFpDKNkCgikkSbN4c+BtOnh9dxJwYiJaHkQEQkSTZtgkGD4PXXYdGiuKMRKTndlVFEJAk2bgyJwdSpYVjkM86IOyKRklNyICJSzjZuDAMbTZ8OL74YxjQQqUzUrCAiUs7q1IG2beFf/1JiIJWTag5ERMrJ2rXgDs2bh8RApLJSciAiUg7WrIEBA6BuXZg8Gczijkik7JQciIjspaws6N8fFiyA8eOVGEjlp+RARGQvrFoVEoNFi2DChDAtUtkpORAR2QsXXQTffBPGMjjuuLijESkfSg5ERPbCgw/CkiVw9NFxRyJSfnQpo4hIKS1fDiNHQk5OuGRRiYFUNUoORERKYelS+MlPQo3BggVxRyOSHEoORERKaMmSkBhkZcFbb0HXrnFHJJIc6nMgIlICixeHDofr18Pbb0OfPnFHJJI8Sg5EREpg6VLYvRvefRd69Yo7GpHkUnIgIrIHW7ZAgwZwzDGwcGG4b4JIVac+ByIiRfjqK+jWDUaPDq+VGEh1oZoDEZFCzJ8Pxx8P2dlwxBFxRyNSsZQciIgUMG9e6HzoDpMmQY8ecUckUrHUrCAikmDdOujXL9w86f33lRhI9aTkQEQkQbNmcNNNITHo1i3uaETioWYFERFgxgzYsQOOPBIuuyzuaETipeRARKq96dNhwABo0wZmzoQ01alKNac/ARGp1qZOhf79oWFDePVVJQYioORARKqxKVNCjUGTJvDBB9CxY9wRiaQGJQciUm099BDss09IDDIy4o5GJHWoz4GIVDvu4VLFRx+FtWthv/3ijkgktSSt5sDM9jezSWY218y+MLMro/JmZva2mS2InptG5WZm95nZQjObZWaHJys2Eam+Pv4Y+vaF1auhZk0lBiKFSWazQjZwrbt3B44ELjez7sBI4F137wS8G70GOBHoFD0uBh5KYmwiUg198AEMGgQbN0JOTtzRiKSupCUH7p7p7tOj6c3APKANMAR4OlrsaeC0aHoIMMaDT4EmZtYqWfGJSPUyaRKcdBK0bx+mW+m/i0iRKqRDopllAIcBnwEt3T0zmrUSaBlNtwGWJqy2LCoruK2LzWyqmU3NyspKWswiUnV8+CEMHgwdOoTEQE0JInuW9OTAzBoA/wKucvdNifPc3QEvzfbc/VF37+3uvVu0aFGOkYpIVdWpU0gOJk2CffeNOxqR1JfU5MDMahISg2fd/eWoeFVec0H0vDoqXw7sn7B626hMRKRMpk4Nt1xu1Qpeegn0e0KkZJJ5tYIBTwDz3P2ehFnjgRHR9Ajg1YTyc6KrFo4ENiY0P4iIlMqECXD00XDLLXFHIlL5JHOcg6OB4cBsM5sRlf0RuBN40cwuAJYAQ6N5E4GTgIXANuC8JMYmIlXY+PHw85/DIYfANdfEHY1I5ZO05MDdPwasiNn9C1negcuTFY+IVA+vvAJDh0KvXvCf/4ShkUWkdDR8sohUGRs3wgUXwBFHwJtvKjEQKSsNnywiVUbjxvD229C5c7jLooiUjWoORKTSGzsW7rsvTPfqpcRAZG8pORCRSu2f/4Thw0Nfg+zsuKMRqRqUHIhIpfXUUzBiBPTrFy5dTFdDqUi5UHIgIpXS6NFw/vkwYAC89hrUrx93RCJVh5IDEamUtm2Dn/4UXn0V6tWLOxqRqkXJgYhUKqtWhecrroDXX4e6deONR6QqUnIgIpXGAw/AgQfCrFnhdZr+g4kkhf60RKRSuPde+M1voH9/6No17mhEqjYlByKS8u65B666Cn72s3B3xVq14o5IpGpTciAiKW3CBLj2WjjzTHj+eahZM+6IRKo+JQciktIGDQp9DcaOVWIgUlGUHIhISnrwQVixIgxsdPnlGuBIpCIpORCRlOION98cEoIHH4w7GpHqSbm4iKQMd7jxRrjtNjj3XLjllrgjEqmeVHMgIinBHf70p5AYXHghPPEE1KgRd1Qi1ZOSAxFJCVu3hnskXHIJPPKIBjgSiZOaFUQkVu6QkwMNGsDHH0PDhkoMROKmP0ERiY07XH11GMMgOxsaN1ZiIJIK9GcoIrFwh9/+NgyLnJGh/gUiqUTJgYhUuNzccKniAw+E0Q/vuQfM4o5KRPIoORCRCvf738NDD8H118NddykxEEk16pAoIhVu6NDQv+DPf1ZiIJKKVHMgIhUiJwdefz1M9+kDN9ygxEAkVRWbHJhZXzP7XzObZWZZZvatmU00s8vNrHFFBCkilVtOThjx8OST4ZNP4o5GRIqzx+TAzN4ALgTeBAYBrYDuwJ+BOsCrZnZqsoMUkcorOxuGD4dnngmjH/btG3dEIlKc4vocDHf3NQXKtgDTo8coM2uelMhEpNLLzoZf/QpeeAHuuANGjow7IhEpiT3WHOQlBmZW38zSounOZnaqmdVMXKYgMxttZqvNbE5C2c1mttzMZkSPkxLm/cHMFprZl2b20/I4OBGJ16RJITG46y4lBiKVSUmvVvgQOMbMmgJvAZ8DvwDO3sM6TwEPAGMKlP/d3e9OLDCz7sAwoAfQGnjHzDq7e04J4xORFHTCCTBrFhx0UNyRiEhplPRqBXP3bcDPgAfd/UzCibxI7v4hsK6E2x8CPO/uO939G2Ah0KeE64pICtm5E4YNg/feC6+VGIhUPiVODsysL6GmILoYibIOdnpFdOXD6KgmAqANsDRhmWVRWWGBXGxmU81salZWVhlDEJFk2LEDzjgjNCV89VXc0YhIWZU0ObgS+APwirt/YWYdgUll2N9DwAHAoUAmMKq0G3D3R929t7v3btGiRRlCEJFk2LEDTj89jGXw0ENw6aVxRyQiZVWiPgdRE8GHCa8XAb8t7c7cfVXetJk9BkyIXi4H9k9YtG1UJiKVwI4dMGQIvPUWPPooXHRR3BGJyN4obpyDx8ys0BbD6AqG881sT50SC67TKuHl6UDelQzjgWFmVtvMOgCdgCkl3a6IxKtmTWjTBp54QomBSFVQXM3B/wI3RAnCHCCLMPhRJ6ARMBp4trAVzew5oB/Q3MyWATcB/czsUMCBxcAlAFFTxYvAXCAbuFxXKoikvq1bYcOG7xMDDYcsUjWYuxe/kFkDoDdhhMTtwDx3/zLJsRWrd+/ePnXq1LjDEKmWtmwJwyFnZsLs2VCrVtwRSUmZ2TR37x13HJK6StrnYAvwfnJDEZHKYvNmGDwYJk8OwyIrMRCpWnTLZhEplU2b4MQT4bPP4Lnn4Mwz445IRMqbkgMRKZVrr4UpU8JYBmecEXc0IpIMJR3nAAAzq5esQESkcrjzzjCWgRIDkaqrRMmBmR1lZnOB+dHrQ8zswaRGJiIpY906uO66MDTyPvvAwIFxRyQiyVTSmoO/Az8F1gK4+0zg2GQFJSKpY+1aGDAA7r8fpk+POxoRqQglblZw96UFijQOgUgVt2YN9O8Pc+fCv/8NffvGHZGIVISSdkhcamZHAW5mNQn3WpiXvLBEJG5ZWSExWLAAxo9XU4JIdVLSmoNLgcsJd0pcTrhx0uXJCkpE4peZGfoaTJigxECkuinpIEhrCLdrFpEqbssWaNAADj4YFi6EOnXijkhEKlpJr1boYGb3mNnLZjY+75Hs4ESkYq1YAb17w//8T3itxECkeippn4N/A08ArwG5yQtHROKyfDkcd1xoTjj66LijEZE4lTQ52OHu9yU1EhGJzdKlITFYvRrefBOOOiruiEQkTiVNDu41s5uAt4CdeYXurqueRSq5HTvg+OPD1QlvvQVHHhl3RCISt5ImBwcBw4Hj+b5ZwaPXIlKJ1akDN94IXbpAnz5xRyMiqaCkycGZQEd335XMYESk4nz9NXzzTRj9cPjwuKMRkVRS0uRgDtAEWJ3EWESkgixcGPoYuOtyRRH5oZImB02A+Wb2Ofn7HJyalKhEJGm++iokBrt2wTvvKDEQkR8qaXJwU1KjEJEKMX9+6HyYnQ2TJkHPnnFHJCKpqKQjJH6Q7EBEJPmeegpyckJi0KNH3NGISKra4wiJZvZx9LzZzDYlPDab2aaKCVFE9pZ7eL79dpg2TYmBiOxZccMn1wdw94bu3ijh0dDdG1VAfCKyl2bNCpcoLl4MaWnQtm3cEYlIqiuuWcErJAoRSYoZM8KlinXqwO7dcUcjIpVFccnBvmZ2TVEz3f2eco5HRMrJ9OkhMWjQIPQxOOCAuCMSkcqiuOSgBtAAsAqIRUTKycyZ0L8/NGoUEoOOHeOOSEQqk+KSg0x3v7VCIhGRctO+fag1uOsuyMiIOxoRqWyKSw5UYyBSicyaBZ06QZMm8NJLcUcjIpVVcVcr9K+QKERkr02eDD/+MVx5ZdyRiEhlt8fkwN3XlXXDZjbazFab2ZyEsmZm9raZLYiem0blZmb3mdlCM5tlZoeXdb8i1dFHH8FPfwqtWsFNGs9URPZScTUHe+MpYFCBspHAu+7eCXg3eg1wItApelwMPJTEuESqlA8+gBNPDOMXvP8+tGkTd0QiUtklLTlw9w+BgjUPQ4Cno+mngdMSysd48CnQxMxaJSs2kapi1y4YMSJ0QJw0KdQciIjsrZLeeKm8tHT3zGh6JdAymm4DLE1YbllUlkkBZnYxoXaBdu3aJS9SkUqgVi2YMAH23Tc8RETKQzKbFfbI3Z0yjMDo7o+6e293792iRYskRCaS+t58E26+OdwzoWdPJQYiUr4qOjlYlddcED2vjsqXA/snLNc2KhORAiZOhCFD4N//hm3b4o5GRKqiik4OxgMjoukRwKsJ5edEVy0cCWxMaH4QkciECXD66eGuiu+9B/Xrxx2RiFRFSetzYGbPAf2A5ma2DLgJuBN40cwuAJYAQ6PFJwInAQuBbcB5yYpLpLJ69VU480w45BB46y1o2jTuiESkqkpacuDuvyxi1g8GVor6H1yerFhEqoLt2+GII+D118MIiCIiyRJbh0QRKZnVUc+cYcPCYEdKDEQk2ZQciKSwF14IN0764IPwOk1/sSJSAfSvRiRFjR0LZ50VmhJ69Yo7GhGpTpQciKSgZ56B4cPh2GPDpYsNGsQdkYhUJ0oORFLMZ5/BOedAv36h86EuVxSRiqbkQCTF9OkDDzwAr70G9erFHY2IVEdKDkRSxJgxsGABmMFllykxEJH4KDkQSQEPPRTurvi3v8UdiYiIkgOR2D3wQKgpOOWUMC0iEjclByIx+sc/4De/gdNOg3HjoHbtuCMSEVFyIBKb7Gx46SU44wx48UWoVSvuiEREgqTdW0FEirZ7N9SsCf/5D9SpE6ZFRFKFag5EKtjtt8MJJ8C2bdCwoRIDEUk9Sg5EKtBf/gJ/+hO0aaNmBBFJXUoORCqAO9x8M9x4YxgWecwYSFejnoikKCUHIhXg7rvhllvg3HPhySehRo24IxIRKZp+u4hUgJNPhqwsuPNO3XZZRFKf/k2JJIk7TJgQnrt1C6MfKjEQkcpA/6pEksAdrrsujHr46qtxRyMiUjpqVhApZ+5w9dVw771h9MMhQ+KOSESkdFRzIFKO3OG3vw2JwVVXhWezuKMSESkdJQci5WjGjHCHxeuug3vuUWIgIpWTmhVEysGOHeHyxMMOg+nT4aCDlBiISOWl5EBkL8ydC489Bk8/DY88AmeeCQcfHHdUIqlv2rRp+6anpz8O9ES12BUtF5iTnZ19Ya9evVYXtoCSA5FSysmBsWPh0Ufh44/DvRF+9jPo2DHuyEQqj/T09Mf322+/bi1atFiflpbmccdTneTm5lpWVlb3lStXPg6cWtgySg5ESmj1ath33zBWwW23hc6Hd90FI0ZAixZxRydS6fRUYhCPtLQ0b9GixcaVK1f2LGoZJQcie7BtG7z4YqglmDcPli+HevVg0iRo1Ur9CkT2QpoSg/hE732RzTlq5xEpxDffwBVXQOvWcN55sG4d3HBDqC2AUK7EQESqqliSAzNbbGazzWyGmU2NypqZ2dtmtiB6bhpHbFJ9bd0Kq1aF6awsePzxcE+EDz4ItQbXXAP168cbo4iUjy+//LJWp06desQdR1HOOOOMjDZt2hzUtWvX7l27du0+efLkugC5ubmce+65+7dr165n586du3/88cf1krH/OJsVjnP3NQmvRwLvuvudZiJXTPYAABjOSURBVDYyen19PKFJdTJjRmg2eOYZ+MUvwtUHRxwBmZnQVCmqiMTktttuW3beeeetTyx76aWXGi9atKjO4sWL50yaNKn+ZZdd1m7WrFnzy3vfqdSsMAR4Opp+GjgtxlikGhg7Fvr0CWMTPPkknHZaaEKA0GSgxECkepg7d26tbt26df/ggw/qZWdnc8kll7Tt2bNnt86dO3e/6667mkP4xX7JJZe07dSpU4/OnTt3f+yxx5oCTJgwoWHv3r279OvX78CMjIyeZ511VrucnByys7M544wzMvKWv+WWW/Ytj1hfffXVJmefffbatLQ0+vfvv3XTpk3pS5YsqVke204UV82BA2+ZmQOPuPujQEt3z4zmrwRaFraimV0MXAzQrl27iohVqpCZM8M4BGbwySewfTvcdx/86ldKBkTicv757D9nDuVaPd6zJ9tGj2ZpccvNnDmz9rBhww4YPXr0N3379t1+9913N2/cuHHOnDlz5m3fvt2OOOKIrqeccsqmTz/9tN7s2bPrzps374vMzMz0Pn36dBs4cOAWgNmzZ9f/73//O6dz5867jj322E5jxoxpeuCBB+7MzMysuWDBgi8A1qxZU6O0x3DLLbe0ueOOO1odc8wxmx944IFldevW9czMzJoZGRm78pZp1arVriVLltRs37797tJuf0/iqjn4sbsfDpwIXG5mxybOdHcnJBA/4O6Puntvd+/dQtePSQls3hyaDXr3hkMPhQ8/DOV33QWzZoWbIykxEKl+1q1bl37aaacd+Mwzzyzq27fvdoB33nmn0YsvvrhP165dux922GHd1q9fnz537tw6H330UcOhQ4euS09PZ//998/+0Y9+tCWvvf+ggw7a2r17913p6ekMHTp03UcffdSga9euO5cuXVp7xIgR+48bN65R06ZNc0oT2z333LN80aJFc2bOnDlv/fr1NW644Yb9kvEeFCWWmgN3Xx49rzazV4A+wCoza+XumWbWCih01CaRklq3DkaODM0HW7eGIY0feAAOOSTMr1Mn3vhEJCjJL/xkaNiwYU7r1q13TZo0qUGvXr12ALi7jRo16tszzjhjU+Kyr7/+euOitmMFLl0yM1q0aJEzZ86cua+88kqjhx9+uMULL7zQ7KWXXlqct0x2djY9e/bsDjBo0KAN//jHP1YkbiOvJqBu3bp+/vnnrx01alRLgFatWu1evHhxrbzlMjMza5V3rQHEUHNgZvXNrGHeNDAQmAOMB0ZEi40AXq3o2KTy27gRpk0L0w0awDvvhE6Gn34amhQuvxyaNIk3RhFJDTVr1vQ33njj6+eee26fhx9+uBnACSecsPGhhx5qsXPnTgOYNWtW7U2bNqUde+yxm8eNG9csOzubFStWpE+ZMqXBMcccsxVCs8L8+fNr5eTkMG7cuGbHHHPM5szMzPScnBzOPffcDXfcccfy2bNn52s2SU9PZ/78+XPnz58/t2BiAJDXjyA3N5eXX365Sbdu3bYDnHrqqRueffbZfXJzc3n33XfrN2zYMCcZyUEcNQctgVeiTCsdGOvu/zGzz4EXzewCYAkwNIbYpBJyhylTQtPB88+H0Qq//hpq1YKvvoJ0DfUlIkVo1KhR7ptvvrmwX79+nRs2bJhz9dVXr1m8eHHtgw46qJu7W7NmzXZPnDjx6+HDh2+YPHlyg27duvUwM7/llluWtWvXLnvWrFn07Nlz66WXXtpu8eLFdY466qhNw4cP3zBlypS6F1xwQUZubq4B3HrrrctKE9cvfvGLDuvWrUt3d+vevfu2MWPGLAEYOnToxtdff71x+/bte9atWzf38ccfX5yEtwVzr7wDVPXu3dunTp0adxgSo4kT4Q9/CH0H6teHX/4SLr449C/QIEUihTOzae7eO84YZs6cufiQQw5ZU/ySqW3ChAkNR40a1XLSpEkL446ltGbOnNn8kEMOyShsnn5TSaXiHpoI2rWDNm0gNzfc+OiRR0Ji0LBh3BGKiFR+qTTOgUiR1q8PlxwedBAcdRQ8/HAoHzwYpk4NtQVKDESkop188smbK2OtQXFUcyApzR0uugiefRZ27AiDFj3+eOhkCGo6EBFJBtUcSMpZty50LIRw8jcLIxf+97/w2WdwwQXhSgQREUkO1RxISnCHjz4KVxyMGwc7d4Zago4dw70ORESk4qjmQGI3YwZ06wY/+QlMmBCaEWbODImBiIhUPCUHUuHcYdIkeO+98DojA1q3hqeeghUr4P77w/0PRERSyciRI8ttCONrrrmm9Y033ljoPYRKqkaNGr3ybul8/PHHH5hXPn/+/FoHH3xw13bt2vUcPHhwxx07dpS6d5aSA6kwq1eH+xl06QLHHw9//Wsob9IkJAojRkC9pNyZXERk7913332tSrN8bm4uOTmluqVCqdSuXTs3b5TF995777srJq655pq2V1xxxapvv/12TuPGjbPvvffe5qXdtpIDqRA33QRt28Lvfw8tW8KYMaEJQUQkDl9++WWtDh069Dj11FM7dOzYscegQYM6bt68OW38+PENBwwYcEDecq+88kqjE0444YDLLruszc6dO9O6du3a/dRTT+0AcPPNN7fs1KlTj06dOvW49dZb983bbkZGRs/TTz89o3Pnzj2+/vrrWuPGjWvUvXv3bl26dOnet2/fznnbnjdvXt0+ffp0adu27UG33XZbudzSOTc3l08++aTheeedtx7g/PPPX/vaa6+VetB4dUiUpFi1KjQTXHQRNGsW+hRcfnl43b173NGJSKrp04cuBct+9jPWjRxJ1ubNpPXvT6eC83/1K9b89reszcwkfcgQDkicN2UKXxa3z8WLF9d55JFHFg8cOHDrmWeemXHXXXe1uPnmm1ddeeWV7VasWJHeunXr7NGjR+9z3nnnrTnrrLM2PvXUU/vOnz9/LsBHH31Ub+zYsftMmzZtnrvTq1evbv3799/cvHnznG+//bb2E0888U3//v0Xr1ixIv2KK67IeP/99+d37dp116pVq767dfPChQvrTJ48+csNGzbU6NatW8/f/e53WbVr1y7xsMW7du1K69mzZ7caNWr4ddddt3L48OEbVq1ald6wYcOcmjVrApCRkbFr1apVtYrZ1A+o5kDKTW4uvPUW/PznoZZg5Eh4++0wb9gw+PvflRiISOrYb7/9dg0cOHArwPDhw9dOnjy5QVpaGkOHDl372GOPNVuzZk2N6dOnNzjzzDM3Flz3/fffb3DSSSdtaNSoUW7jxo1zBw8evH7SpEkNAVq1arWrf//+W6Pl6vfp02dz165ddwG0bNnyu3aGgQMHbqhbt663atUqu1mzZruXLVtWqh/sCxYsmDVnzpx5zz333KKRI0fu/8UXX9Tem/cjkWoOpFxs3gyHHgqLFsE++8CVV8KFF0LXrnFHJiKVwZ5+6TdsSO6e5rdqRXZJagoKKuxWywC//vWv1w4ePPjAOnXq+CmnnLI+71d4SdWrVy+3JMsl1hLUqFGD7OzsfAGNGTOmye23394a4NFHH1187LHHbkuc36FDh90A3bt333XkkUdunjJlSr0RI0as37x5c43du3dTs2ZNFi9eXKtly5a7SnUAqOZAyig3F958E+6+O7xu2BBOOw2eew6WLw/lSgxEJJVlZmbWeuedd+oDPPvss82OOuqoLQAZGRm7W7ZsuXvUqFGtLr744u9uDpWenu55t3I+7rjjtkycOLHJ5s2b0zZt2pQ2ceLEpscdd9zmgvvo16/f1ilTpjScP39+LYDEZoXinHPOORvyOhwWTAyysrJqbN++3aLjSJ86dWqDgw8+eHtaWhpHHnnk5ieffLIpwOjRo/c5+eSTN5T2vVFyIKWyYgXcdlsYg2DQoNBUsGNHmDdqVGg+qF1uFVsiIsmTkZGx4/7779+3Y8eOPTZs2JB+3XXXZeXNGzZs2NpWrVrtOvzww3fklZ199tlZ3bp1637qqad2+PGPf7ztrLPOWnv44Yd369WrV7fhw4dnHX300dsL7qN169bZ99133+LTTz/9wC5dunQ//fTTy2UElxkzZtQ55JBDunXp0qX7T37yk85XXXXVyl69eu0AGDVq1LL7779/v3bt2vVcv359+pVXXlnqu1/qls1SYmPHwjnnQE4O9O8fbnY0ZIiSAZHKRrdsDlcVnHzyyZ0WLFjwRWHzzznnnHaHHXbYtquvvrrS31a6KLpls5TJsmUwenS4C+KAAfDjH8Pvfhf6EhxwQPHri4hURj169OhWt27d3EceeWRp3LHERcmB5JOTA2+8Ee5x8PrroW/Bn/8ckoN27eCOO+KOUERk73Xp0mVXUbUGX3zxxbyKjifVKDmQfI4/Hj78EPbbL1yKeMEFuseBiCRFbm5urqWlpVXetu1KLDc314Air6pQh8RqLDsbXn01dCLcuTOUXXEFvPwyfPttGN5YiYGIJMmcrKysxtFJSipQbm6uZWVlNQbmFLWMag6qocWL4YknQn+CFSugVSv46is46CA488y4oxOR6iA7O/vClStXPr5y5cqe6IdqRcsF5mRnZ19Y1AJKDqqZL74ISQDAiSfCgw/C4MGQrm+CiFSgXr16rQZOjTsOKZxOCVXcN9/A44+Hk/8tt4Thi++5B372s9DBUEREpCAlB1XQxo3hngaPPRbudZCWBr/8ZZhnBlddFW98IiKS2tTOU4llZ8PcufD88/DHP8LWraH89ttD34H58+HWW2HJEnjmmXhjFRGRykM1B5XEqlXQoAHUrx9qBa6/PvQf2BXdTiM9PVx1cPDBYRTDgQOhXz+oUeJRvEVERAIlBylo3Tp47TWYNev7x+rV8Mor4eZGDRrAvvuGgYkOPjg8unaFWtEdu3v0CA8RSW25ubB79/eP7Oz8r/c0LyMDOneO+wikqlJyEBP3MDxxYgJw2mnwi1/AmjVw7rlQpw707Aknn/x9EgDQty/85z+xhi9S4dzLdhLd23nJ3FduiW7sW7jrr4c77yy/91ckkZKDCrBlC8yZE6r4jzgi3MWwdWtYv/77ZTIy4JhjwvQBB4T+AgceqGYBKRv3709AcZz0krG9nJyKe//S0qBmze8f6en5XxdVXq9e8euUZnt7mrf//hX3fkj1k3LJgZkNAu4FagCPu3tK58busHlzGExo167vf93ffjt8/jnMng1ffx3KTjkFxo8PNQKXXhr+uA8+ONQONG78/TZr1IAuXSr+WCob9/DIyQm/wHJzv58uaVl5r5MKJ9G81xXFrGwnvTp1yn4SLa8TbFHz0tRVW6q5lEoOzKwG8L/ACcAy4HMzG+/uc8tzP2vWhBEBIZxcEp/zpnfuhG3bYMMGyMqCtWvDP91Bg8L8e++FadNC/4C8oYcPOCCUu8MLL4R12reHPn3Cc7t28NJLYf7BB4fnJUvCWAR5J7rc3D1PFzc/cbosJ8NUOMGWdJ3KerfxspzA6tcv/5NoeZ1gVbslUvWkVHIA9AEWuvsiADN7HhgClGtyMGkSDB1atnXvvrvoeV9/HfoHJFq+HCZPLtu+yluNGuEXUVra99OFlRU3v7h10tPLf5vltU6y4yjuBFujRvilLSKSylItOWgDJN4/exnwo8QFzOxi4GKAdmUc4u+YY+Caa2DGjB9Wc55/fljm88/Dib1JE9hnH2jePDzq1w//3Ev6SEvL/7w306VdL/HkpROSiIiUVKolB8Vy90eBRwF69+5dporl/faDUaP2vMyAAWXZsoiISOWXat1ulgOJfXDbRmUiIiJSQVItOfgc6GRmHcysFjAMGB9zTCIiItVKSjUruHu2mV0BvEm4lHG0u38Rc1giIiLVSkolBwDuPhGYGHccIiIi1VWqNSuIiIhIzJQciIiISD5KDkRERCQfJQciIiKSj3llHaAeMLMsYEkxizUH1lRAOKlGx139VNdj13GXXnt3b1GewUjVUqmTg5Iws6nu3jvuOCqajrv6qa7HruMWKX9qVhAREZF8lByIiIhIPtUhOXg07gBiouOufqrrseu4RcpZle9zICIiIqVTHWoOREREpBSUHIiIiEg+VTo5MLNBZvalmS00s5Fxx1NaZra/mU0ys7lm9oWZXRmVNzOzt81sQfTcNCo3M7svOt5ZZnZ4wrZGRMsvMLMRCeW9zGx2tM59ZmYVf6SFM7MaZvZfM5sQve5gZp9Fsb4Q3dYbM6sdvV4Yzc9I2MYfovIvzeynCeUp+90wsyZmNs7M5pvZPDPrWx0+czO7OvqezzGz58ysTlX8zM1stJmtNrM5CWVJ/3yL2odIody9Sj4It3z+GugI1AJmAt3jjquUx9AKODyabgh8BXQH/gaMjMpHAv8TTZ8EvAEYcCTwWVTeDFgUPTeNpptG86ZEy1q07olxH3fC8V8DjAUmRK9fBIZF0w8Dv46mLwMejqaHAS9E092jz7020CH6PtRI9e8G8DRwYTRdC2hS1T9zoA3wDVA34bM+typ+5sCxwOHAnISypH++Re1DDz0Ke1TlmoM+wEJ3X+Tuu4DngSExx1Qq7p7p7tOj6c3APMI/0SGEEwjR82nR9BBgjAefAk3MrBXwU+Btd1/n7uuBt4FB0bxG7v6puzswJmFbsTKztsBg4PHotQHHA+OiRQoed977MQ7oHy0/BHje3Xe6+zfAQsL3ImW/G2bWmHDyeALA3Xe5+waqwWdOuIV8XTNLB+oBmVTBz9zdPwTWFSiuiM+3qH2I/EBVTg7aAEsTXi+LyiqlqNr0MOAzoKW7Z0azVgIto+mijnlP5csKKU8F/wB+D+RGr/cBNrh7dvQ6Mdbvji+avzFavrTvRyroAGQBT0ZNKo+bWX2q+Gfu7suBu4FvCUnBRmAa1eMzh4r5fIvah8gPVOXkoMowswbAv4Cr3H1T4rzo10GVuh7VzE4GVrv7tLhjiUE6ocr5IXc/DNhKqAL+ThX9zJsSftl2AFoD9YFBsQYVk4r4fKvid0jKV1VODpYD+ye8bhuVVSpmVpOQGDzr7i9Hxaui6kOi59VReVHHvKfytoWUx+1o4FQzW0yo/j0euJdQpZoeLZMY63fHF81vDKyl9O9HKlgGLHP3z6LX4wjJQlX/zAcA37h7lrvvBl4mfA+qw2cOFfP5FrUPkR+oysnB50CnqLdzLUKnpfExx1QqURvqE8A8d78nYdZ4IK938gjg1YTyc6IezkcCG6NqxDeBgWbWNPqFNhB4M5q3ycyOjPZ1TsK2YuPuf3D3tu6eQfjc3nP3s4FJwM+jxQoed9778fNoeY/Kh0U92zsAnQidtVL2u+HuK4GlZtYlKuoPzKWKf+aE5oQjzaxeFFfecVf5zzxSEZ9vUfsQ+aG4e0Qm80Ho6fsVoZfyn+KOpwzx/5hQ9TcLmBE9TiK0rb4LLADeAZpFyxvwv9HxzgZ6J2zrfELnrIXAeQnlvYE50ToPEI2amSoPoB/fX63QkfCPfiHwElA7Kq8TvV4Yze+YsP6fomP7koRe+an83QAOBaZGn/u/Cb3Rq/xnDtwCzI9i+yfhioMq95kDzxH6Vewm1BRdUBGfb1H70EOPwh4aPllERETyqcrNCiIiIlIGSg5EREQkHyUHIiIiko+SAxEREclHyYGIiIjko+RAqiUzyzGzGWY208ymm9lRxSzfxMwuK8F23zez3mWMaaKZNSnLuiIi5UnJgVRX2939UHc/BPgDcEcxyzch3Akwadz9JA83WRIRiZWSAxFoBKyHcB8LM3s3qk2YbWZ5d+67Ezggqm24K1r2+miZmWZ2Z8L2zjSzKWb2lZkdU3BnZtbKzD6MtjUnbxkzW2xmzc3s0mjeDDP7xswmRfMHmtknUWwvRffcEBEpdxoESaolM8shjDhXB2gFHO/u0/JuF+zum8ysOfApYQje9oSRGntG658I3AAMcPdtZtbM3deZ2fvANHe/1sxOAq5x9wEF9n0tUMfd/2pmNaL9bY7uJdHb3ddEy9UE3gP+BnxCuN/Aie6+1cyuJ4wWeGsy3ycRqZ7Si19EpEra7u6HAphZX2CMmfUkDFd7u5kdS7hddBsKv7XtAOBJd98G4O7rEubl3SBrGpBRyLqfA6Ojk/+/3X1GETHeS7hnwGvRnSq7A/8XhsynFiFhEBEpd0oOpNpz90+iWoIWhPH3WwC93H139Gu+Tik3uTN6zqGQvzF3/zBKPgYDT5nZPe4+JnEZMzuXUFtxRV4R8La7/7KUsYiIlJr6HEi1Z2ZdgRqEW/42BlZHicFxhBM0wGagYcJqbwPnmVm9aBvNSrG/9sAqd38MeJxwS+bE+b2A64BfuXtuVPwpcLSZHRgtU9/MOpfuSEVESkY1B1Jd1TWzvOp8A0a4e46ZPQu8ZmazCXdGnA/g7mvN7P/MbA7whrv/zswOBaaa2S5gIvDHEu67H/A7M9sNbCHcVjfRFUAzYFLUhDDV3S+MahOeM7Pa0XJ/JtxlUESkXKlDooiIiOSjZgURERHJR8mBiIiI5KPkQERERPJRciAiIiL5KDkQERGRfJQciIiISD5KDkRERCSf/wfym1pzeYC0sAAAAABJRU5ErkJggg==\n", + "text/plain": [ + "
" + ] + }, + "metadata": { + "needs_background": "light" + }, + "output_type": "display_data" + } + ], + "source": [ + "max_batch_size = 100000\n", + "\n", + "plot_absolute_time(experiments_eq, results, [50], max_batch_size=max_batch_size)" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAiIAAAEWCAYAAABbt/wMAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4xLjIsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy8li6FKAAAgAElEQVR4nO3dd5gUVdbH8e8hgyRBFhGEwUAYsyJrXAOKKAgqopgxYQ5g3HXXdV3X8LrmHEDEhCsmRFwjiFlBRAEBUREkSc4Cw5z3j3tH29kJPTg9NeH3eZ5+ZupWOhW6+/StW7fM3RERERFJQrWkAxAREZGqS4mIiIiIJEaJiIiIiCRGiYiIiIgkRomIiIiIJEaJiIiIiCSmwiYiZuZmtl0R4x80s7+lDJ9nZgvMbJWZNd2E9WXFddbY1JhLS9yGbZKOo6yY2RAzu6EE0xd5bpQ3ZvaamZ2WdBzpMLPW8fyrvgnz1jWzV8xsuZk9l4n4ZNOY2Ulm9kbScUjVVGwiYmYzzWy9mW2Rr3xC/MDPisND4nCvfNPdEcv7xeF+ZrYxfpitMrPvzewxM2tXalsFuPu57v7PuM6awO1AV3ev7+6LM/llFbfx/VJa1hgzOyu1LG7Dd6Wx/NJQHpI0M9vbzD5Mav3pMrPrzOzJ1DJ3P9zdH08qpqLE9/8hecPuPiuefxs3YXHHAs2Bpu7epxRiq2Vmw2OMbmYH5htvZnaLmS2Or1vMzFLG72pm481sTfy76++NKQkl/bwp6P3q7k+5e9fMRChStHRrRL4HTsgbMLOdgHoFTDcdODVluhrAccC3+ab7yN3rA42AQ4C1wHgz2zH90AtXwK+15kAdYHJpLF9KVyklMN2BUaWwnCojgcSxDTDd3XNKOmMRsb4PnAzML2Bcf+AoYBdgZ+BI4Jy4vFrAy8CTwObA48DLsTxjYnJUYWuiRTLC3Yt8ATOBvwKfpZT9G7gGcCArlg2J5QuAzWNZD+A1wodFv1jWD3i/gPWMBIYXEccVwDxgLnBGXPd2Ket+gPBFtJqQ3AwBbgDaxTIHVgHvAGPj8OpYdnwB66set2cR8B1wQZynRhzfCBgUY5oT11Ud6Aj8DGyMy14Wp68dlzcr7qMHgbop6+sFfAGsICRu3YB/xeX8HJd1b5w2ddsbAUOBhcAP8VhVS93Xcb1LCQnl4cUc6z8DU+L0jwF14rhJwJEp09aM+2a3uE15+3cVsDchyf1rjOmnGGOjOG9WnP7MOO/YWL4f8CGwDJjNr+fMEOA+4FVgJfAJsG2+2D8Hdi9g/+wXl3VgHO4AvAksAaYBx6Uso7h9+QFwL7AcmAp0SZm3H+E8WRn380kF7N9uwHpgQ9xPE2P5GOCsfOu5I+6H74B9YvnsuC9PS1lmkedVvvWnLnsx4ZzdlvCeWByP51NA4zj9E0Au4YfCKuDKlGOX9z7YChgR9+cM4OxC1v2PfNt+JptwjhRx7v6Yd4xTyj4E+qcMnwl8HP/vSnjfWsr4WUC3QpY/BrgJ+JTwHn0ZaJIyfi9+PXcnpsYS5/1X3Pdrge1i2Q1xnlXAK0DTuP9XAJ/x62frb/Z56jlD4Z833YEJcVmzgevybWf+92s/Uj6XCefcZ4Rz/TNgn3zr/mfcnpXAG8AWcVwdQnK3OO6Lz4DmxX3P6FW1X8VPEL6cDiF8aHckfNn+SPh1kz8RuQF4GDgvlv2HUJOSTiJyBrCgkBi6ET5kdwQ2A57mfxOR5cC+hA+3OnnxxPEFvZF/mb+QdZ5L+LLZGmgCjOa3H8AvAg/FeP5A+IA6p7BtJHz4j4jLakD44Lkpjusc4z80xt8S6BDHjSF+SRUUO+HD++W4zCxCrdSZKXFsAM6Ox+08QiJnhWzzTELCkbfNH6TswyuBZ1Om7QV8VcT+PYPwxbQNUB94AXgi3/RD4/6rSzifVsbzpSbhQ3nXlOO7OO6nGoQP62Ep62pBypdK3v4hnDezgc6xfLM4fHpczm6EL9/sNPdlDjAgxnd8PGZN4nJXAO1T4tmhkH18HfBkvrJfjnHKek6Px+wGwhfHfYSko2vcT/WLO68KWHfesi+K21837qdD47KbEZL0O/O//1OGf3Os4/T3E95zuxKSuIPT2XZKeI4U8zlVUCKyHPhjynAnYGX8fwDwWr7pRwKXFbL8MYRzLO8z6Pm8bSG8XxcDRxDev4fG4WYp884Cdoj7vWYsm0FIBBsRkv/phM/aGnG7Hyvi/ZX/nMn/eXMgsFOMZ2fC5+dRRSzvl2UQzqWlwCkxlhPicNOUdX9L+JFXNw7fHMedQzgH6xHO3z2AhsV9z+hVtV8lqSJ8gnDZ5VDga8KbsiBDgVPNrDFwAPBSmsufS3gDFOQ4wptykruvJnyg5feyu3/g7rnu/nOa6yzKcYQP5NnuvoTwawgAM2tO+NC51N1Xu/tPhC+EvgUtKF6X7g8McPcl7r4SuDFl+jOBwe7+Zox/jrtPLS7AeAmqL/Bnd1/p7jOB2wgfIHl+cPdHPFzTf5zwJdm8iMXem7LN/+LXS3JPAkeYWcM4fArhnCjMScDt7v6du68i1LT0zVfFfl3cf2uBE4G33P0Zd9/g7ovd/YuUaV909089VOs/RfjSy3ME8F9395SyPoRE8XB3/zSW9QBmuvtj7p7j7hMIXyh90tyXPxHOiQ3u/iwhOe8ex+UCO5pZXXef5+6/5zLg9zHGjcCzhMTwendf5+5vEGoWtkvjvCrIXHe/J27/WnefEc+7de6+kNCW6oB0gjSzrQnJ/1Xu/nM8Xo+Scnm2GCU9R0qqPiEZybMcqB/3W/5xeeMbFLG8J1I+g/4GHBfPm5OBUe4+Kr5/3wTGEc7LPEPcfXLc7xti2WPu/q27LyfUHH/r7m/Fc/w5QqK8Sdx9jLt/FeP5EniGNI8r4Zz+xt2fiPE+Q/hRdmTKNI+5+/R4XP7Dr+/HDYQfEdu5+0Z3H+/uKzZ1O6RqKMk14icIv37aEpKNArn7+2bWjHDpZqS7r01pH1aUloTq3YJsBYxPGf6hgGlmp7OSEtgq3zJT19mG8KtmXsq2VSsihmaEXwjjU9vKEX4xQPii2ZT2DVvEOFJj+4GwL/P8cu3c3dfE9dcvYpn5t3mrOO9cM/sA6G1mLwKHA5cUsZytCoirBr9NglLXtTX/25YoVWobgDX8dhuOINSSpboUGOruk1LK2gB/NLNlKWU1COd2OvtyTr5k5wdgK3dfbWbHA5cDg+J+uiydZLIQC1L+Xwvg7vnL6lP8eVWQ35yjMam+C9if8CVcjfDrNx1bAXkJUJ4fCDUP6c5fknOkpFYBDVOGGwKr3N3NLP+4vPErKVz+90ZNwnnThpDMpn5R1yTUohY0b578x7SgY7xJzOyPwM2EGpxahBqvdO9Uyn9coIjPFX77fnyC8F4eFn+MPglck5J8ifyPtGtE3P0HwrXvIwhVqEV5EriMIhKWAhwNvFfIuHmEkztP64JCLMG60lHUOmcD6wjXRRvHV0N336GQWBYRPlh2SJm+kYcGu3nL27aQOIrarkWEXyBt8sVZWG1VOvJv89yU4ccJv/76EBoc562noBjnFhBXDr/9sE2dr6h9UKh4R9QBhHYfqfoAR5lZarI0G3g35Rg09nAHyHmkty9b2m+z6l/2j7u/7u6HEmqcpgKPFBJyaZ6nxZ1X6az/xli2k7s3JBxfK2L6VHOBJmaWWotQkvOvpOdISU0mNFTNswu/NlifDOyc73juTNEN2vO/NzYQjsFsQm1J6nm1mbvfnDL979mO1fFv6g0CWxaz7KcJl+y2dvdGhLZDVsT0qfIfF0jzuMbawn+4ezahnUkP0q8hkyqqpK23zyRc/11dzHR3Ey7hjC1qIjOrbmZtzewewjXNfxQy6X+AfmaWbWb1gL+XLOwCLSBcmy7Mf4CLzayVmW0OXJ03wt3nERpo3WZmDc2smplta2Z5VZ8LgFZ5LfDdPZfwxXSHmf0BwMxamtlhcfpBwOlm1iUuq6WZdSguzlh1/x/gX2bWwMzaAAMJieCmuiBucxNCrdazKeNeAnYn1ISkJpkLCZcmUuN8BhgQj299whfes174HRNPAYeY2XFmVsPMmqZ5O+V+wJcFVP/OBboAl5jZebFsJNDOzE4xs5rxtaeZdUxzX/6BcE7UNLM+hDZTo8ysuZn1MrPNCAnqqrg/CrIAyCqNOyfSOK/S0YAQ73Iza0loFJ4/3sLOv9mExpY3mVkdM9uZ8BmR7vlX0nPkf5hZbTOrEwdrxTjyvnCHAgPjPtmK8ONoSBw3htDA8+K4jAtj+TtFrO7klM+g6wmN6zcStvdIMzssfqbVMbMDzaxVuttRlHjJbE5cf3UzO4PfJu2/+byJGhBqq342s86ES595Cnq/phpFeJ+cGN+LxwPZhPdPkczsIDPbKV6yWkFI1gp7L4gAJUxE4vXMcWlMt8Td385XjZ1q71g1uoLwgdAQ2NPdvypkea8BdxI+JGZQ9IdFuq4DHjezZWZ2XAHjHwFeJ7SA/5z/rQU6lVDlmXeHyXDCr2FifJOB+Wa2KJZdFWP/2MxWAG8B7eP2fUponHgH4Tr1u/z6i+Qu4FgzW2pmdxcQ50WEX0zfERoFPw0MTm8XFOhpQpL1HeFSyS8dicXrwc8TLs+9kFK+hnhXQNyfe8UY8i7nfU9o2X9RYSt191mE2rbLCJfovuC3v2YLU+htu3GZXYCrzeyseAmhK6ENxVxC9fIthGprKH5ffgJsT/gV/C/gWHdfTHgfDYzLXEKooTmPguVVjy82s8/T2L7iFHpepekfhORyOeGupPzn+U3AX+NxvbyA+U8gNH6cS2jA/Xd3fyvNdZfoHCnENEKtUEvC+3Utv753HiI0nPyK0Aj71ViGu68n3Np7KuHujjMIjTnXF7GuJwiJzHxC49yL47JmExpv/4XwJT+bkNCV5m26Z8dlLiY0ek3tM6egz5vzgevNbCVwLSHJJsZb0PuVlPGLCTUZl8X1XQn0cPdFFG9LwmfhCkJbwncpui2ZyC93GYhgZjMJLfEL/SIxs2uBdu5+cpkFVgQzm0JICKZkeD39CPtmv0yuR8onMxtDuEvm0aRjEalsEu+uXCqOeLnmTH57J0liYlX00EwnISIikjnq4U/SYmZnE6qcX3P3Itv+lBV3X5+vQaCIiFQwujQjIiIiiVGNiIiIiCSmQrQR2WKLLTwrKyvpMEREKpTx48cvcvdmScchUpQKkYhkZWUxblyxdw2LiEgKMyuoF2qRckWXZkRERCQxSkREREQkMUpEREREJDFKRERERCQxSkREREQkMUpEREREJDFKRERERCQxSkRERMqhn3+GQYPgmWeSjkQks5SIiIiUIwsWwN//Dq1bw1lnwbBhSUckkllKREREyoGJE6Ffv5CAXH89/PGP8Pbb8NJLSUcmklkVoot3EZHKKDcXXn0V7rgDRo+GevVCLcgll0C7dklHJ1I2lIiIiJSxVatgyBC46y6YMQNatYJbbglJSJMmSUcnUraUiIiIlJHZs+Gee+CRR2DZMujcOTRG7d0batZMOjqRZCgRERHJsI8/hjvvhOHDwT0kHgMGwN57Jx2ZSPKUiIiIZEBODrzwQmj/8fHH0LAhXHopXHQRtGmTdHQi5YcSERGRUrRsWbj0cs894VLMttvC3XeHO2IaNEg6OpHyR4mIiEgpmDEjND597DFYvRoOOCAkIz16QPXqSUcnUn4pERER2UTuMGZMuPwyciTUqAEnnBAuwey2W9LRiVQMSkREREpo3brQ4+mdd8IXX8AWW8A118D550OLFklHJ1KxKBEREUnTwoXwwANw//2hK/bs7NAe5KSToG7dpKMTqZiUiIiIFGPSpFD78eSToTakW7dw++2hh4JZ0tGJVGxKRERECpCbC//9b0hA3nwz1Hj06xe6X+/YMenoRCoPJSIiIinWrIGhQ8MdMFOnhjYf//oXnHMONG2adHQilU/GExEzqw6MA+a4ew8zawsMA5oC44FT3H19puMQESnKnDlw333w0EOwZAnssUe4FNOnD9SqlXR0IpVXtTJYxyXA1ynDtwB3uPt2wFLgzDKIQUSkQOPGhcamWVlw882h/4+xY+Gzz0K5khCRzMpoImJmrYDuwKNx2ICDgeFxkseBozIZg4hIfhs3wvPPw/77w557wiuvwIUXhk7JXnghlKsRqkjZyPSlmTuBK4G8jo2bAsvcPScO/wi0zHAMIiIArFgBgwaFLtdnzgy1ILffDmeeGZ4FIyJlL2OJiJn1AH5y9/FmduAmzN8f6A/QunXrUo5ORKqS774LycfgwbByJey3H9x2G/Tqpe7XRZKWyRqRfYGeZnYEUAdoCNwFNDazGrFWpBUwp6CZ3f1h4GGATp06eQbjFJFKyB3efz90v/7yy1CtGhx3XOj/o1OnpKMTkTwZayPi7n9291bungX0Bd5x95OA0cCxcbLTgJczFYOIVD3r14e7XfbcE/70J3j3XbjqqnAp5qmnlISIlDdJ9CNyFTDMzG4AJgCDEohBRCqZxYvDrbf33Qdz50L79qE79lNPhXr1ko5ORApTJomIu48BxsT/vwM6l8V6RaTy+/rr0PnY0KGwdm3odv3RR+Gww8LlGBEp39SzqohUOO6h2/U77gjdsNeuDSefDJdeCjvumHR0IlISSkREpMJYuza087jzTpg8GZo3h+uvh3PPhWbNko5ORDaFEhERKffmzYP774cHH4RFi2CXXWDIEOjbN9SGiEjFpURERMqtL74Il1+eeQZycuDII8PttwccoJ5PRSoLJSIiUq5s3AgjR4YE5N13YbPNwpNvL74Ytt8+6ehEpLQpERGRcmHlynC55a674NtvoXVruPVWOOssaNw46ehEJFOUiIhIon74Ae65J9xyu3w57LUX3HgjHHMM1NAnlEilp7e5iJQ5d/joo3D55YUXQnuPY48Nt9/utVfS0YlIWVIiIiJlZsMGeP75kIB8+ik0agSXXQYXXhguxYhI1aNEREQybulSePhhuPde+PFH2G678P9pp0H9+klHJyJJUiIiIhkzfXpofDpkCKxZAwcfHPoD6d5d3a+LSKBERERKlTu8807o/XTkSKhVC048MbT/2GWXpKMTkfJGiYiIlIqffw4dj915J3z5Zehy/dpr4bzzYMstk45ORMorJSIi8rssWBC6Xr//fvjpp/DQuUGDQi1InTpJRyci5Z0SERHZJF99Fe5+eeopWL8ejjgidL/epYu6XxeR9CkREZG05ebCa6+FBOTtt6FuXTjzTLjkEmjfPunoRKQiUiIiIsVavRoefzzcATN9OrRsCTfdBP37Q5MmSUcnIhWZEhERKdTs2XDffaEPkKVLYc894emnQy+oNWsmHZ2IVAZKRETkf3z6abj88txz4Xbco48O7T/22UftP0SkdCkREREAcnLgxRfD7bcffggNG4a2HxdeCG3bJh2diFRWSkREqrjly8OTb++5JzwJd5ttQjJy+ukhGRERySQlIiJV1Lffwt13w+DBsGoV/OlPIQE58kioXj3p6ESkqlAiIlKFuMPYsaH9x4gRUKMG9O0bul/fffekoxORqkiJiEgVsH49PPtsSEAmTICmTeEvf4Hzz4ettko6OhGpypSIiFRiixaF7tfvuw/mz4eOHeGhh+Dkk6FevaSjExHZhETEzKoB9d19RQbiEZFSMGVKaO/xxBPhYXSHHQZDhkDXrrr9VkTKl2rpTGRmT5tZQzPbDJgETDGzKzIbmoiUhDv8978h6dhhh5CEnHIKTJ78a7mSEBEpb9JKRIDsWANyFPAa0BY4JWNRiUja1q4NPZ/usAMcfnh4GN0NN4ReUR9+GLKzk45QRKRw6V6aqWlmNQmJyL3uvsHMPINxiUgx5s4NbT8eeggWL4bddoOhQ+H446FWraSjExFJT7qJyEPATGAiMNbM2gBqIyKSgPHjQ/uPZ58NvaH26hW6X99/f116EZGKJ61ExN3vBu5OKfrBzA7KTEgikt/GjaHfjzvugPfeg/r14bzz4OKLYdttk45ORGTTpZWImFlT4O/AfoAD7wPXA4szF5qIrFgBjz0WekD97jto0wZuuw3OPBMaNUo6OhGR3y/dxqrDgIVAb+DY+P+zmQpKpKqbORMGDoSttw69nrZoEZ6EO2NGKFcSIiKVRbptRFq4+z9Thm8ws+MzEZBIVeUOH3wQ2n+8+CJUqwZ9+oREpHPnpKMTEcmMdBORN8ysL/CfOHws8HpmQhKpWjZsCLUdd9wB48bB5pvDFVfAhRdCq1ZJRyciklnpJiJnA5cCT8bhasBqMzsHcHfXw8JFSmjJktDPx733wpw50K4d3H8/nHoqbLZZ0tGJiJSNdO+aabApCzezOsBYoHZc13B3/7uZtSW0O2kKjAdOcff1m7IOkYpm2rRw+eXxx0NnZF26hL5ADj88XI4REalK0n7WjJn1BP4UB8e4+8g0ZlsHHOzuq2KHaO+b2WvAQOAOdx9mZg8CZwIPlDB2kQrDHd5+O1x+GTUKateGk04K7T922inp6EREkpPus2ZuBi4BpsTXJWZ2U3HzebAqDtaMLwcOBobH8scJPbaKVDo//wyDBsHOO8Ohh4Y2INddB7NmhXIlISJS1aVbI3IEsKu75wKY2ePABODPxc1oZtUJl1+2A+4DvgWWuXtOnORHoGUJ4xYp1xYsCO09HngAFi4Michjj0HfvlCnTtLRiYiUH2lfmgEaA0vi/2n3YuDuG4Fdzawx8CLQIZ35zKw/0B+gdevWJQhTJDkTJ4bLL888E+6G6dEjXH456CB1vy4iUpB0E5GbgAlmNhowQluRq0uyIndfFuffG2hsZjVirUgrYE4B0z8MPAzQqVMnPWBPyq3cXHj11ZCAjB4N9erB2WfDJZfA9tsnHZ2ISPmW7l0zz5jZGGDPWHSVu88vbj4zawZsiElIXeBQ4BZgNKEvkmHAacDLmxC7SKJWrYIhQ+Cuu0KPp61awS23hCRk882Tjk5EpGJIt7GqAV0I7URGALXMLJ2+HlsAo83sS+Az4M14t81VwEAzm0G4hXfQJkUvkoDZs+HKK0P36xddBE2bwrBh4VkwV16pJEREpCTSvTRzP5BLuNvlemAl8Dy/1pAUyN2/BHYroPw7QJ1WS4XzzDNw+umh/Ufv3jBgAOy9d9JRiYhUXOkmIn90993NbAKAuy81s1oZjEukXHGHG2+Ev/4V9t8fnngiPAlXRER+n3QTkQ3xNlyHX9p+5GYsKpFyZMMGOPdcGDw4dEI2aFDokExERH6/dDuUvptw6+0fzOxfwPuEO2lEKrVly0LX64MHw9/+FmpClISIiJSedO+aecrMxhMarBpwlLt/ndHIRBL2ww9wxBEwfXrojKxfv6QjEhGpfNJKRMzsTHcfBExNKbvZ3UvUl4hIRfHZZ3DkkaGL9tdfh4MPTjoiEZHKKd1LM73N7KS8ATO7D2iWmZBEkvXyy3DAAVC3Lnz0kZIQEZFMSrexam9ghJnlAt0Iz4o5M3NhiZQ999A52cCBsOeeMGIENG+edFQiIpVbkTUiZtbEzJoAdYGzgCsJfYj8I5aLVAobN8LFF4d+QY46KnTVriRERCTziqsRGU+4ZddS/naPLwe2yWh0ImVg1So44QQYORIuuyx00169etJRiYhUDUUmIu7etqwCEUnC3LnhCbkTJ8L998N55yUdkYhI1ZJuGxGRSuerr8LtuUuXwiuvhP9FRKRspXvXjEil8vrrsO++kJsL77+vJEREJCnFNVatWVaBiJSVhx+G7t2hbVv45BPYddekIxIRqbqKqxH5yMxeMrNzzSyrDOIRyZjcXLj6ajjnHDj00FAT0qpV0lGJiFRtxTVW7RQTkG7AnWbWkvCcmdeAd919XcYjFCkFa9fCaafBc8+FB9jdcw/UUAspEZHEFdtGxN1nuvuD7n4UsA/wCnAI8J6ZvZrpAEV+r4ULoUuXkITcemu4O0ZJiIhI+VCij2N33wC8E1/EGhKRcmvatNAQde5cGD4cevdOOiIREUn1u34Xuvuc0gpEpLSNHRt6Sa1RI/SUutdeSUckIiL56fZdqZSeeio0SG3eHD7+WEmIiEh5pUREKhV3uP56OPlk2Gcf+PBD2EYPIhARKbfSujRjZu2AK4A2qfO4ux6QLuXG+vXQvz88/jiccgo8+ijUqpV0VCIiUpR024g8BzwIPAJszFw4Iptm6dLQEHX0aLjuOrj2WjBLOioRESlOuolIjrs/kNFIRDbR99+HO2O+/RaeeCJclhERkYqhyETEzJrEf18xs/OBF4FfOjFz9yUZjE2kWJ98Aj17woYN8OabcMABSUckIiIlUVyNyHjAgbxK7itSxjmgZoCSmBdegJNOgq22gldfhQ4dko5IRERKqrgu3tuWVSAi6XKH22+HK66AP/4RRoyAZs2SjkpERDZFWrfvmtkFZtY4ZXjzeKlGpEzl5MAFF8Dll4fGqe+8oyRERKQiS7cfkbPdfVnegLsvBc7OTEgiBVu5Enr1ggcegCuvhGefhbp1k45KRER+j3TvmqluZubuDmBm1QH10CBlZs4c6NEDvvoKHnoo9BciIiIVX7qJyOvAs2b2UBw+B/hvZkIS+a2JE6F7d1ixAkaOhG7dko5IRERKS7qJyJVAf+C8OPwm8GhGIhJJ8dprcNxx0LgxvP8+7Lxz0hGJiEhpKjYRiZdhhrr7SYTeVUXKxIMPwoUXhuTjlVegZcukIxIRkdJWbGNVd98ItDEztQmRMpGbG27NPe+8cBlm7FglISIilVW6l2a+Az4wsxHA6rxCd789I1FJlbV2bXhg3fPPh9t077wTaqR7loqISIWT7kf8t/FVDWiQuXCkKluwINye++mnocOySy/Vg+tERCq7tBIRd/8HgJnVj8OrMhmUVD1ffx0eXLdgQei6/aijko5IRETKQro9q+5oZhOAycBkMxtvZjsUM8/WZjbazKaY2WQzuySWNzGzN83sm/h389+/GVKRjR4N++wDa9bAu+8qCRERqUrS7Vn1YWCgu7dx9zbAZcAjxcyTA1zm7tnAXsAFZpYNXA287e7bA2/HYamihg6Fww4LD6775BPYc8+kIxIRkbKUbiKymbuPzhtw9zHAZkXN4O7z3P3z+P9K4GugJdALeDxO9jig3z/OsREAAB1DSURBVL9VkDtcdx2cdhrsvz988AFkZSUdlYiIlLW075oxs78BT8Thkwl30qTFzLKA3YBPgObuPi+Omg80L2Se/oRO1GjdunW6q5IKYN06OOssePJJ6NcvdNleSzeHi4hUSenWiJwBNANeAJ4HtgBOT2fG2MD1eeBSd1+ROi4+u8YLms/dH3b3Tu7eqZker1ppLFkCXbuGJOSGG2DwYCUhIiJVWbo1Ioe4+8WpBWbWB3iuqJnMrCYhCXnK3V+IxQvMrIW7zzOzFsBPJQ1aKqZvvw3PjPn+e3j6aTjhhKQjEhGRpKVbI/LnNMt+YWYGDAK+ztfx2QjgtPj/acDLacYgFdhHH8Fee8HChfDWW0pCREQkKLJGxMwOB44AWprZ3SmjGhLuiinKvsApwFdm9kUs+wtwM/AfMzsT+AE4blMCl4rjuefg1FNDN+2jRkG7dklHJCIi5UVxl2bmAuOAnsD4lPKVwICiZnT394HC+sXskm6AUnG5w623wlVXhX5CXn4Zttgi6ahERKQ8KTIRcfeJwEQzWwqMdPfcsglLKrqcnPCsmIcfhuOPhyFDoE6dpKMSEZHyJt02IscB35jZ/5lZh0wGJBXfihXQo0dIQv7859AwVUmIiIgUJN1nzZxsZg2BE4AhZubAY8AzsbMyEQBmzw53xkyZAo88EvoLERERKUy6NSLEPkCGA8OAFsDRwOdmdlGGYpMKZsKEcGfMDz/Aa68pCRERkeKl+9C7nmb2IjAGqAl0dvfDgV0Iz52RKm7kyNBVe/Xqobv2Qw9NOiIREakI0u3QrDdwh7uPTS109zXxNlypwu67Dy6+GHbbDV55BVq0SDoiERGpKNKqEXH304DpsWbkSDPbMmXc2xmLTsq1jRth4EC48MLQLuTdd5WEiIhIyaR7aeZM4FPgGOBY4GMzOyOTgUn5tmYNHHss3HFHqA158UXYrMjnMYuIiPyvdC/NXAns5u6LAcysKfAhMDhTgUn5NX8+9OwJ48fDXXeFRERERGRTpJuILCb0pppnZSyTKmby5HAZZuHCUAvSs2fSEYmISEWWbiIyA/jEzF4GHOgFfGlmAwHyPdROKqm334bevaFuXRg7FvbYI+mIRESkoks3Efk2vvLkPTG3QemGI+XVY49B//7Qvn14cF3r1klHJCIilUG6Pav+I9OBSPnkDtdeCzfcEPoGee45aNQo6ahERKSySLdGRKqgdevgjDPCs2LOOgvuvx9q1kw6KhERqUyUiEiBFi+Go46C99+Hm26Cq64Cs6SjEhGRykaJiPyPGTPgiCNg1iwYNgyOPz7piEREpLJKt0Ozdmb2tplNisM7m9lfMxuaJOGDD8KD65YsCXfJKAkREZFMSvfpu48AfwY2ALj7l0DfTAUlyXj2WejSBZo0gY8/hn33TToiERGp7NJNROq5+6f5ynJKOxhJhntoB9K3L3TuDB99BNttl3RUIiJSFaTbRmSRmW1L6MwMMzsWmJexqKTMbNgA550HgwbBiSfC4MFQu3bSUYmISFWRbiJyAfAw0MHM5gDfAydnLCopE8uXQ58+8Oab8Ne/wvXX684YEREpW+l2aPYdcIiZbQZUc/eVxc0j5dusWeGZMVOnhlqQ009POiIREamK0kpEzKwxcCqQBdSw+LPZ3fXc1Qpo/Hjo0QPWroX//jc0UBUREUlCupdmRgEfA18BuZkLRzJtxAg44QRo1izcnpudnXREIiJSlaWbiNRx94EZjUQy7u674dJLoVOnkJBsuWXSEYmISFWX7u27T5jZ2WbWwsya5L0yGpmUmo0b4ZJLwqtXLxgzRkmIiIiUD+nWiKwHbgWuId7CG/9uk4mgpPSsXh1uyx0xAgYOhP/7P6hePemoREREgnQTkcuA7dx9USaDkdI1bx4ceSRMmAD33gsXXJB0RCIiIr+VbiIyA1iTyUCkdE2aFG7PXbw41IZ07550RCIiIv8r3URkNfCFmY0G1uUV6vbd8umNN0JHZfXrw3vvwW67JR2RiIhIwdJNRF6KLynnHn0Uzj0XdtgBXn0VWrVKOiIREZHCpduz6uOZDkR+n9zc0E37TTdBt27hSboNGyYdlYiISNGKTETM7D/ufpyZfcWvd8v8wt13zlhkkraff4Z+/ULycc45oWFqjXTrukRERBJU3NfVJfFvj0wHIptm0aLQN8iHH4Zbcy+/XA+uExGRiqPIDs3cfV7893x3/yH1BZyf+fCkKD/8AHvvDZ9/Ds89B1dcoSREREQqlnR7Vj20gLLDSzMQKZkFC+CQQ0KNyDvvwLHHJh2RiIhIyRWZiJjZebF9SHsz+zLl9T3wZXELN7PBZvaTmU1KKWtiZm+a2Tfx7+a/fzOqlmXL4LDDYO5cGDUq1IqIiIhURMXViDwNHAmMiH/zXnu4+8lpLH8I0C1f2dXA2+6+PfB2HJY0rV4dOif7+mt46SUlISIiUrEV2VjV3ZcDy4ETNmXh7j7WzLLyFfcCDoz/Pw6MAa7alOVXNevXQ+/e8PHHoU3IoQVdMBMREalA0m0jUpqapzSCnQ80L2giM+tvZuPMbNzChQvLLrpyauNGOPlkeP310GnZMcckHZGIiMjvl0Qi8gt3dwronySOe9jdO7l7p2bNmpVxZOWLe+gf5Lnn4Pbb4fTTk45IRESkdCSRiCwwsxYA8e9PCcRQYbjDlVfCoEHwt7/BgAFJRyQiIlJ6kkhERgCnxf9PA15OIIYK46ab4N//hosugn/8I+loRERESldGExEzewb4iHD7749mdiZwM3ComX0DHBKHpQAPPADXXAOnnAJ33qnOykREpPLJ6BNJ3L2wu226ZHK9lcHTT8MFF0DPnuGyTLVEW/OIiIhkhr7eyqGRI+HUU+HAA8OD7GrWTDoiERGRzFAiUs68+y706QO77w4vvwx16iQdkYiISOYoESlHxo2DI4+EbbaB116DBg2SjkhERCSzlIiUE19/Dd26QdOm8MYb4a+IiEhlp0SkHJg5M3TXXrMmvPUWtGyZdEQiIiJlI6N3zUjx5s8PSciaNaF9yLbbJh2RiIhI2VEikqClS6FrV5g3L9SE7LRT0hGJiIiULSUiCVm9Grp3h2nT4NVXYa+9ko5IRESk7CkRScC6dXD00fDJJ+FBdoccknREIiIiyVAiUsZycuCkk+DNN+Gxx+CYY5KOSEQks8aPH/+HGjVqPArsiG6SqGpygUk5OTln7bHHHgU+5FaJSBlyh3POgeefhzvugH79ko5IRCTzatSo8eiWW27ZsVmzZkurVavmSccjZSc3N9cWLlyYPX/+/EeBngVNo8y0jLjD5ZfD4MFw7bVw6aVJRyQiUmZ2bNas2QolIVVPtWrVvFmzZssJtWEFT1OG8VRp//oX3H47XHQRXHdd0tGIiJSpakpCqq547AvNN5SIlIF774W//S08yO7OO8Es6YhERETKByUiGfbkk6EWpFcvGDQIqmmPi4iUe1dfffWWpbWsgQMHbnXttdc2L2z8iSee2PqNN97YrHPnzu3Hjh1br7TWW5pGjhzZoEGDBrt26NAhu0OHDtmXX355i7xxw4cPb5iVlbVj69atd/zLX/5S4v2mr8UMGjEiNEg9+GAYNgxqqGmwiEiFcPfdd7cofqpf5ebmsnHjxk1a1+eff17/4IMPXr1JM5ehTp06rZo6deqUqVOnTvn3v/89DyAnJ4cBAwa0HjVq1PTp06dPfv7555uMHz++RM+NVyKSIaNHw3HHwR57wEsvQZ0SHRYRESkt06ZNq9W2bdsdevbs2XabbbbZoVu3btusXLmy2ogRIxoccsghvzxY48UXX2x46KGHbnv++ee3XLduXbUOHTpk9+zZsy3Adddd13z77bffYfvtt9/h+uuv/0PecrOysnY8+uijs9q1a7fDt99+W2v48OENs7OzO7Zv3z577733bpe37K+//rpu586d27dq1WqnG2644Q955Z9//nmdbbbZ5ucaKb9UN27cSO/evbMuvvjirQBeeOGFhrvuumuH7Ozsjocffvg2y5cvrwbw8ssvN+jYsWN2u3btsvv06ZO1du1aA2jZsuVO5557bqt27dpl77TTTh0nTZpUG2Dw4MGbb7/99ju0b98+u1OnTu1LY9+OGTNmszZt2qzLzs5eX6dOHT/mmGOWDB8+vHFJlqHf6Bnw2WfQsydstx2MGgUNGiQdkYhI+XDGGWw9aRKlevlhxx1ZM3gws4uaZubMmXUeeuihmV27dl3dp0+frFtvvbXZddddt+CSSy5pPXfu3BpbbbVVzuDBg5uefvrpi0488cTlQ4YM+cPUqVOnALz33nv1nn766abjx4//2t3ZY489Onbp0mXlFltssXHWrFm1Bw0a9H2XLl1mzp07t8aFF16YNWbMmKkdOnRYv2DBgup5658xY0adDz/8cNqyZcuqd+zYcccrrrhiYe3atX3EiBGNunbtujxvug0bNthRRx3VNjs7e+0tt9wyf968eTVuvPHGFmPHjp3esGHD3GuuuWbLf/7zn82vv/76+eecc07bN954Y9rOO++87uijj8669dZbm1177bU/ATRq1Chn+vTpU+69996mF1100dajR4+ecfPNN7d44403prdt23bDokWLqv/vXirahAkT6rdv3z67efPmG26//fbZnTp1+nn27Nm1WrZsuT5vmlatWq3/5JNP6pdkuaoRKWVTpkC3btCsGbzxBjRtmnREIiKy5ZZbru/atetqgFNOOWXxhx9+WL9atWocd9xxix955JEmixYtqv7555/X79Onz/L8844ZM6b+EUccsaxhw4a5jRo1yu3evfvS0aNHNwBo0aLF+i5duqyO023WuXPnlR06dFgP0Lx581+u1XTt2nVZ3bp1vUWLFjlNmjTZ8OOPP9YAeOuttxoeddRRK/KmO//889vkJSF5y/z222/rdO7cuUOHDh2yhw0b1nTWrFm1Jk6cWKdVq1brdt5553UA/fr1W/z+++//8rP3tNNOWwJw9tlnL5kwYUJ9CJdWTjrppKzbbrtti5ycnBLtv3322Wf1Dz/88OW0adOmXHDBBT/17t17uxItoAiqESlF338fnqRbq1boOXWrrZKOSESkfCmu5iJTLN/tinnD55133uLu3btvV6dOHT/yyCOX1qxZs0TLrVevXm4609WuXfuX25erV69OTk6OrVy5stqKFSuqZ2Vlbcgb16lTp1XvvfdewzVr1iyoV6+euzv77bffildeeeX71OV99NFHdYtaX7WUOyPMzAGefvrpWe+8885mI0aMaLTHHntkjx8/fsqWW275S7J00UUXtXzzzTcbAeTVBuVp0qTJL9t5/PHHLx84cGDrefPm1dh6663Xz5kzp1beuB9//PE3NSTpUI1IKZk3LyQha9eGJGTbbYufR0REysa8efNqvfXWW5sBPPXUU0322WefVQBZWVkbmjdvvuG2225r0b9//0V509eoUcPXrVtnAAcddNCqUaNGNY6JQ7VRo0ZtftBBB63Mv44DDzxw9aefftpg6tSptQBSL80U5NVXX22w3377/WY555xzzqKuXbsu79Gjx7YbNmzgwAMPXD1u3Lj6ee08VqxYUe3LL7+svcsuu/w8Z86cWnnlQ4cObbr//vv/sqyhQ4c2ARg0aNDmu+2222qAyZMn1z744INX33nnnXM333zznO+++65W6rrvueeeOXmNUfPHOmvWrBq5uSEXGT16dL3c3FyaN2+ec8ABB6yeOXNmnalTp9b6+eef7YUXXmjSu3fvZUVtd36qESkFS5bAYYfB/Pnw9tuwY6H9x4mISBKysrJ+vueee/7Qv3//ettvv/3Pl19++cK8cX379l1833331dh9991/zis76aSTFnbs2DF7xx13XDNixIjvTzzxxMW77757R4BTTjll4b777rt22rRpv/ki32qrrXLuvvvumUcfffR2ubm5NG3adMOHH374TWExjRo1qtFxxx23NH/5ddddt2DAgAHVjznmmLYvvfTS9w899NDMvn37brN+/XoD+Pvf/z5n5513Xvfggw/O7NOnz7YbN25kl112WZO6TUuXLq3erl277Fq1avmwYcO+AxgwYECrmTNn1nZ322+//Vbstddea9Pdf08++eTmgwcP/kP16tW9Tp06uUOHDv2uWrVqVKtWjdtuu21Wt27d2m3cuJETTzxxUadOnX4ufom/Mvfy39ldp06dfNy4cUmHUaCVK6FrV/j889AwtUuXpCMSEQnMbLy7d0o6jokTJ87cZZddFhU/ZWZMmzatVo8ePbb/5ptvJhc0/tRTT2292267rRkwYECZxpidnd1xwoQJU1Mv25SGli1b7jRu3LivW7RoUbKGIBk0ceLELXbZZZesgsapRuR3mDULjjwSJk+G555TEiIiUtHssMMOHevWrZv70EMPlXnblSlTpnxd1ussj5SIbKJPPw236K5dG2pCunZNOiIRESlI+/bt1xdWGzJ58uRKlwzMmTPnq6RjKAk1Vt0Ew4fDAQdAvXrw0UdKQkREipGbm5urp2xVUfHYF3p3kRKREnCHG2+EPn1g993hk08gOzvpqEREyr1JCxcubKRkpOrJzc21hQsXNgImFTaNLs2kad066N8fhg6Fk06CRx9Vt+0iIunIyck5a/78+Y/Onz9/R/QDuKrJBSbl5OScVdgESkTSsGgRHH00vP8+XH89/PWvYMrrRUTSsscee/wE9Ew6DimflIgUY+pU6N4d5swJT9A9/vikIxIREak8lIgU4a234NhjoXZtGDMG9tor6YhEREQqF12rK8RDD4WH1229dbhVV0mIiIhI6VMiks/GjTBwIJx7brgt94MPoE2bpKMSERGpnJSIpFi1KjRKveMOuPhiGDECGjZMOioREZHKS21EotmzQ3ftkybBfffB+ecnHZGIiEjlV+UTkdxcePddOPFEWLMGXn01PElXREREMi+xSzNm1s3MppnZDDO7uqzW6w7Tp8MDD4Q7Ypo1g4MPDp2TffihkhAREZGylEiNiJlVB+4DDgV+BD4zsxHuPiUT65szB95+G955J/z98cdQvvXW4cF1XbpAjx7QuHEm1i4iIiKFSerSTGdghrt/B2Bmw4BeQKkmIl99FZ4LM21aGG7aFA46KCQeXbrAdtuph1QREZEkJZWItARmpwz/CPwxdQIz6w/0B2jduvUmrWTrrWHbbeHss0PisfPOUE33CYmIiJQb5baxqrs/DDwM0KlTJ9+UZTRuHBqfioiISPmUVP3AHGDrlOFWsUxERESqkKQSkc+A7c2srZnVAvoCIxKKRURERBKSyKUZd88xswuB14HqwGB3n5xELCIiIpKcxNqIuPsoYFRS6xcREZHk6R4SERERSYwSEREREUmMEhERERFJjBIRERERSYy5b1JfYWXKzBYCPxQz2RbAojIIp7ypqtsNVXfbtd1Vy+/Z7jbu3qw0gxEpbRUiEUmHmY1z905Jx1HWqup2Q9Xddm131VJVt1uqDl2aERERkcQoEREREZHEVKZE5OGkA0hIVd1uqLrbru2uWqrqdksVUWnaiIiIiEjFU5lqRERERKSCUSIiIiIiiakUiYiZdTOzaWY2w8yuTjqeTWFmW5vZaDObYmaTzeySWN7EzN40s2/i381juZnZ3XGbvzSz3VOWdVqc/hszOy2lfA8z+yrOc7eZWdlv6f8ys+pmNsHMRsbhtmb2SYzzWTOrFctrx+EZcXxWyjL+HMunmdlhKeXl9twws8ZmNtzMpprZ12a2dxU53gPiOT7JzJ4xszqV8Zib2WAz+8nMJqWUZfz4FrYOkXLL3Sv0C6gOfAtsA9QCJgLZSce1CdvRAtg9/t8AmA5kA/8HXB3LrwZuif8fAbwGGLAX8EksbwJ8F/9uHv/fPI77NE5rcd7Dk97uGNdA4GlgZBz+D9A3/v8gcF78/3zgwfh/X+DZ+H92PO61gbbxfKhe3s8N4HHgrPh/LaBxZT/eQEvge6BuyrHuVxmPOfAnYHdgUkpZxo9vYevQS6/y+qoMNSKdgRnu/p27rweGAb0SjqnE3H2eu38e/18JfE340O5F+MIi/j0q/t8LGOrBx0BjM2sBHAa86e5L3H0p8CbQLY5r6O4fu7sDQ1OWlRgzawV0Bx6NwwYcDAyPk+Tf5rx9MRzoEqfvBQxz93Xu/j0wg3BelNtzw8waEb6oBgG4+3p3X0YlP95RDaCumdUA6gHzqITH3N3HAkvyFZfF8S1sHSLlUmVIRFoCs1OGf4xlFVasft4N+ARo7u7z4qj5QPP4f2HbXVT5jwWUJ+1O4EogNw43BZa5e04cTo3zl22L45fH6Uu6L8qDtsBC4LF4WepRM9uMSn683X0O8G9gFiEBWQ6Mp2occyib41vYOkTKpcqQiFQqZlYfeB641N1XpI6Lv3wqzf3WZtYD+MndxycdSwJqEKrtH3D33YDVhGr0X1S24w0Q2yv0IiRiWwGbAd0SDSohZXF8K+M5JJVPZUhE5gBbpwy3imUVjpnVJCQhT7n7C7F4QayGJf79KZYXtt1FlbcqoDxJ+wI9zWwmoQr9YOAuQrV0jThNapy/bFsc3whYTMn3RXnwI/Cju38Sh4cTEpPKfLwBDgG+d/eF7r4BeIFwHlSFYw5lc3wLW4dIuVQZEpHPgO1jq/tahAZtIxKOqcTide9BwNfufnvKqBFAXkv504CXU8pPja3t9wKWx+rY14GuZrZ5/PXZFXg9jlthZnvFdZ2asqxEuPuf3b2Vu2cRjts77n4SMBo4Nk6Wf5vz9sWxcXqP5X3jHRZtge0JDfnK7bnh7vOB2WbWPhZ1AaZQiY93NAvYy8zqxbjytrvSH/OoLI5vYesQKZ+Sbi1bGi9Ci/PphNby1yQdzyZuw36EKtQvgS/i6wjC9fC3gW+At4AmcXoD7ovb/BXQKWVZZxAa780ATk8p7wRMivPcS+xZtzy8gAP59a6ZbQhfKjOA54DasbxOHJ4Rx2+TMv81cbumkXJ3SHk+N4BdgXHxmL9EuCui0h9v4B/A1BjbE4Q7XyrdMQeeIbSD2UCoATuzLI5vYevQS6/y+lIX7yIiIpKYynBpRkRERCooJSIiIiKSGCUiIiIikhglIiIiIpIYJSIiIiKSGCUiUqmZ2UYz+8LMJprZ52a2TzHTNzaz89NY7hgz67SJMY0ys8abMq+ISGWjREQqu7Xuvqu77wL8GbipmOkbE574mjHufoSHB9yJiFR5SkSkKmkILIXwTB8zezvWknxlZnlPaL0Z2DbWotwap70qTjPRzG5OWV4fM/vUzKab2f75V2ZmLcxsbFzWpLxpzGymmW1hZufGcV+Y2fdmNjqO72pmH8XYnovPHxIRqZTUoZlUama2kdBTZR2gBXCwu4/PewS9u68wsy2AjwndhLch9PC6Y5z/cOBvwCHuvsbMmrj7EjMbA4x398vM7AhgoLsfkm/dlwF13P1fZlY9rm9lfLZOJ3dfFKerCbwD/B/wEeH5K4e7+2ozu4rQy+j1mdxPIiJJqVH8JCIV2lp33xXAzPYGhprZjoQutW80sz8BuYRHqBf0uPRDgMfcfQ2Auy9JGZf3YMLxQFYB834GDI6Jxkvu/kUhMd5FeIbKK/GJxNnAB+ERItQiJCciIpWSEhGpMtz9o1j70YzwPJJmwB7uviHWUtQp4SLXxb8bKeC95O5jY6LTHRhiZre7+9DUacysH6EW5sK8IuBNdz+hhLGIiFRIaiMiVYaZdQCqEx4j3wj4KSYhBxGSAYCVQIOU2d4ETjezenEZTUqwvjbAAnd/BHgU2D3f+D2Ay4GT3T03Fn8M7Gtm28VpNjOzdiXbUhGRikM1IlLZ1TWzvEsiBpzm7hvN7CngFTP7ivAE3KkA7r7YzD4ws0nAa+5+hZntCowzs/XAKOAvaa77QOAKM9sArCI8qj3VhUATYHS8DDPO3c+KtSTPmFntON1fCU+TFRGpdNRYVURERBKjSzMiIiKSGCUiIiIikhglIiIiIpIYJSIiIiKSGCUiIiIikhglIiIiIpIYJSIiIiKSmP8Hrauy2dHrJQoAAAAASUVORK5CYII=\n", + "text/plain": [ + "
" + ] + }, + "metadata": { + "needs_background": "light" + }, + "output_type": "display_data" + } + ], + "source": [ + "plot_relative_time(experiments_eq, results, [50], max_batch_size=max_batch_size)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Conclusion\n", + "\n", + "As illustrated in the experiments, KeOps allows you to drastically speed up and scale up drift detection to larger datasets without running into memory issues. The speed benefit of KeOps over the PyTorch (or TensorFlow) MMD detectors decrease as the number of features increases. Note though that it is not advised to apply the (untrained) MMD detector to very high-dimensional data in the first place and that we can apply dimensionality reduction via the deep kernel for the learned kernel MMD detector." + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python [conda env:detect] *", + "language": "python", + "name": "conda-env-detect-py" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.6" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/doc/source/overview/config_files.md b/doc/source/overview/config_files.md index 3802524c1..669e60c76 100644 --- a/doc/source/overview/config_files.md +++ b/doc/source/overview/config_files.md @@ -461,9 +461,6 @@ artefacts before attempting the sometimes time-consuming operation of instantiat %```python %{'name': {'title': 'Name', 'type': 'string'}, % 'version': {'title': 'Version', 'default': '0.8.1dev', 'type': 'string'}, -% 'config_spec': {'title': 'Config Spec', -% 'default': '0.1.0dev', -% 'type': 'string'}, % 'backend': {'title': 'Backend', % 'default': 'tensorflow', % 'enum': ['tensorflow', 'pytorch'], diff --git a/doc/source/overview/getting_started.md b/doc/source/overview/getting_started.md index 922a17543..9e987686f 100644 --- a/doc/source/overview/getting_started.md +++ b/doc/source/overview/getting_started.md @@ -155,6 +155,32 @@ The TensorFlow installation is required to use the following detectors: ``` ```` +````{tab-item} KeOps +:sync: label-keops +:class-label: sd-pt-0 + +```{div} sd-mb-1 +Installation with [KeOps](https://www.kernel-operations.io) backend. +``` + +```bash +pip install alibi-detect[keops] +``` + +```{div} sd-mb-1 +The KeOps installation is required to use the KeOps backend for the following detectors: +- [MMDDrift](../cd/methods/mmddrift.ipynb) +``` + +```{note} +KeOps requires a C++ compiler compatible with `std=c++11`, for example `g++ >=7` or `clang++ >=8`, and a +[Cuda toolkit](https://developer.nvidia.com/cuda-toolkit) installation. For more detailed version requirements +and testing instructions for KeOps, see the +[KeOps docs](https://www.kernel-operations.io/keops/python/installation.html). **Currently, the KeOps backend is +only officially supported on Linux.** +``` +```` + ````{tab-item} Prophet :class-label: sd-pt-0 @@ -199,9 +225,10 @@ mamba install -c conda-forge alibi-detect [Alibi Detect](https://github.com/SeldonIO/alibi-detect) is an open source Python library focused on **outlier**, **adversarial** and **drift** detection. The package aims to cover both -online and offline detectors for tabular data, text, images and time series. -Both **TensorFlow** and **PyTorch** backends are supported for drift detection. Alibi-Detect does not install these as -default. See [installation options](#installation) for more details. +online and offline detectors for tabular data, text, images and time series. **TensorFlow**, **PyTorch** +and (where applicable) [KeOps](https://www.kernel-operations.io/keops/index.html) backends are supported +for drift detection. Alibi-Detect does not install these as default. See [installation options](#installation) +for more details. To get a list of respectively the latest outlier, adversarial and drift detection algorithms, you can type: diff --git a/doc/source/overview/saving.md b/doc/source/overview/saving.md index 07a075e59..38095e0c0 100644 --- a/doc/source/overview/saving.md +++ b/doc/source/overview/saving.md @@ -56,8 +56,8 @@ for the remaining detectors is in the [Roadmap](roadmap.md). | Detector | Legacy save/load | Config save/load | |:-------------------------------------------------------------------------------|:----------------:|:----------------:| | [Kolmogorov-Smirnov](../cd/methods/ksdrift.ipynb) | ✅ | ✅ | -| [Cramér-von Mises](../cd/methods/cvmdrift.ipynb) | ✅ | ✅ | -| [Fisher's Exact Test](../cd/methods/fetdrift.ipynb) | ✅ | ✅ | +| [Cramér-von Mises](../cd/methods/cvmdrift.ipynb) | ❌ | ✅ | +| [Fisher's Exact Test](../cd/methods/fetdrift.ipynb) | ❌ | ✅ | | [Least-Squares Density Difference](../cd/methods/lsdddrift.ipynb) | ❌ | ✅ | | [Maximum Mean Discrepancy](../cd/methods/mmddrift.ipynb) | ✅ | ✅ | | [Learned Kernel MMD](../cd/methods/learnedkerneldrift.ipynb) | ❌ | ✅ | @@ -98,5 +98,46 @@ for the remaining detectors is in the [Roadmap](roadmap.md). ```` ```{note} -Saving/loading of detectors using PyTorch models and/or a PyTorch backend is currently not supported. +For detectors with backends, or using preprocessing, save/load support is currently limited to TensorFlow models and backends. ``` + +(supported_models)= +## Supported ML models + +Alibi Detect drift detectors offer the option to perform [preprocessing](../cd/background.md#input-preprocessing) +with user-defined machine learning models: + +```python +model = ... # TensorFlow model; tf.keras.Model or tf.keras.Sequential +preprocess_fn = partial(preprocess_drift, model=model, batch_size=128) +cd = MMDDrift(x_ref, backend='tensorflow', p_val=.05, preprocess_fn=preprocess_fn) +``` + +Additionally, some detectors are built upon models directly, +for example the [Classifier](../cd/methods/classifierdrift.ipynb) drift detector requires a `model` to be passed +as an argument: + +```python +cd = ClassifierDrift(x_ref, model, p_val=.05, preds_type='probs') +``` + +In order for a detector to be saveable and loadable, any models contained within it (or referenced within a +[detector configuration file](config_files.md#specifying-artefacts)) must fall within the family of supported models +documented below. + +### TensorFlow models + +Alibi Detect supports serialization of any TensorFlow model that can be serialized to the +[HDF5](https://www.tensorflow.org/guide/keras/save_and_serialize#keras_h5_format) format. +Custom objects should be pre-registered with +[register_keras_serializable](https://www.tensorflow.org/api_docs/python/tf/keras/utils/register_keras_serializable). + +### Scikit-learn + +Scikit-learn models are serialized using [joblib](https://joblib.readthedocs.io/en/latest/persistence.html). +Any scikit-learn model that is a subclass of {py:class}`sklearn.base.BaseEstimator` is supported, including +[xgboost](https://xgboost.readthedocs.io/en/latest/python/python_api.html#module-xgboost.sklearn) models following +the scikit-learn API. + + +%### PyTorch diff --git a/examples/README.md b/examples/README.md index 409059976..c0f053119 100644 --- a/examples/README.md +++ b/examples/README.md @@ -1,7 +1,7 @@ Example notebooks are stored in `../doc/source/examples/`. This directory contains symbolic links to the notebooks, which serve as shortcuts. For example, -the `cd_mol.ipynb` notebook can be opened by running `jupyer-notebook examples/cd_mol.ipynb` in +the `cd_mol.ipynb` notebook can be opened by running `jupyter-notebook examples/cd_mol.ipynb` in the root directory. Note: The symbolic links might not work on some Windows versions. diff --git a/examples/cd_mmd_keops.ipynb b/examples/cd_mmd_keops.ipynb new file mode 120000 index 000000000..fddcc9f46 --- /dev/null +++ b/examples/cd_mmd_keops.ipynb @@ -0,0 +1 @@ +../doc/source/examples/cd_mmd_keops.ipynb \ No newline at end of file diff --git a/licenses/license_info.csv b/licenses/license_info.csv index 6de26e1de..39e8fda36 100644 --- a/licenses/license_info.csv +++ b/licenses/license_info.csv @@ -49,4 +49,4 @@ "transformers","4.23.1","Apache Software License" "typing-extensions","4.4.0","Python Software Foundation License" "urllib3","1.26.12","MIT License" -"zipp","3.9.0","MIT License" \ No newline at end of file +"zipp","3.9.0","MIT License" diff --git a/requirements/dev.txt b/requirements/dev.txt index 114f0b9f4..1a4d10dee 100644 --- a/requirements/dev.txt +++ b/requirements/dev.txt @@ -6,7 +6,7 @@ types-requests>=2.25, <3.0 types-toml>=0.10, <1.0 # testing pytest>=5.3.5, <8.0.0 -pytest-cov>=2.6.1, <4.0.0 +pytest-cov>=2.6.1, <5.0.0 pytest-xdist>=1.28.0, <3.0.0 # for distributed testing, currently unused (see setup.cfg) pytest_cases>=3.6.8, <4.0.0 pytest-randomly>=3.5.0, <4.0.0 @@ -14,8 +14,8 @@ pytest-custom_exit_code>=0.3.0 # for notebook tests pytest-timeout>=1.4.2, <3.0.0 # for notebook tests jupytext>=1.12.0, <2.0.0 # for notebook tests ipykernel>=5.1.0, <7.0.0 # for notebook tests -nbconvert>=6.0.7, <7.0.0 # for notebook tests -ipywidgets>=7.6.5, <8.0.0 # for notebook tests +nbconvert>=6.0.7, <8.0.0 # for notebook tests +ipywidgets>=7.6.5, <9.0.0 # for notebook tests alibi-testing @ git+https://github.com/SeldonIO/alibi-testing@master#egg=alibi-testing # pre-trained models for testing # other pre-commit>=1.20.0, <3.0.0 @@ -23,3 +23,4 @@ tox>=3.21.0, <4.0.0 # used to generate licence info via `make licenses` twine>3.2.0, <4.0.0 # 4.x causes deps clashes with testing/requirements.txt, as requires rich>=12.0.0 -> requires typing-extensions>=4.0.0 -> too high for spacy and thinc! packaging>=19.0, <22.0 # Used to check scipy version for CVMDrift test. Can be removed once python 3.6 support dropped (and scipy lower bound >=1.7.0). codecov>=2.0.15, <3.0.0 +xgboost>=1.3.2, <2.0.0 # Install for use in testing since we support serialization of xgboost models under the sklearn API diff --git a/requirements/docs.txt b/requirements/docs.txt index 435aadc29..5895093aa 100644 --- a/requirements/docs.txt +++ b/requirements/docs.txt @@ -1,13 +1,13 @@ # dependencies for building docs, separate from dev.txt as this is also used for builds on readthedocs.org # core dependencies sphinx>=4.2.0, <5.1.0 -sphinx-autodoc-typehints>=1.12.0, <2.0.0 +sphinx-autodoc-typehints>=1.12.0, <1.19.3 # limited due to https://github.com/tox-dev/sphinx-autodoc-typehints/issues/259 and 260 sphinx-rtd-theme>=1.0.0, <2.0.0 sphinxcontrib-apidoc>=0.3.0, <0.4.0 sphinxcontrib-bibtex>=2.1.0, <3.0.0 myst-parser>=0.14, <0.19 nbsphinx>=0.8.5, <0.9.0 -sphinx_design==0.2.0 # Pinning for now as sphinx_design is v.new and still in flux. +sphinx_design==0.3.0 # Pinning for now as sphinx_design is v.new and still in flux. ipykernel>=5.1.0, <7.0.0 # required for executing notebooks via nbsphinx ipython>=7.2.0, <9.0.0 # required for executing notebooks nbsphinx # pandoc diff --git a/setup.cfg b/setup.cfg index 67491ae8d..926613e4d 100644 --- a/setup.cfg +++ b/setup.cfg @@ -11,7 +11,6 @@ addopts = --tb native -W ignore --cov=alibi_detect - --cov-append --randomly-dont-reorganize --randomly-seed=0 #-n auto @@ -44,6 +43,7 @@ envlist= tensorflow torch prophet + keops all # tox test environment for generating licenses @@ -113,6 +113,17 @@ extras= commands = {env:COMMAND:pytest --no-cov alibi_detect/tests/test_dep_management.py --opt-dep=prophet} +# tox test environment for testing keops optional dependency imports +[testenv:keops] +basepython = python +deps = pytest + pytest-cov + pytest-randomly +extras= + keops +commands = + {env:COMMAND:pytest --no-cov alibi_detect/tests/test_dep_management.py --opt-dep=keops} + # environment for testing imports with all optional dependencies installed [testenv:all] basepython = python diff --git a/setup.py b/setup.py index c56e392a0..1385d3368 100644 --- a/setup.py +++ b/setup.py @@ -2,7 +2,7 @@ def readme(): - with open("README.md") as f: + with open("README.md", encoding="utf-8") as f: return f.read() @@ -11,24 +11,25 @@ def readme(): extras_require = { "prophet": [ - "fbprophet>=0.5, <0.7", - "holidays==0.9.11", - "pystan<3.0" + "prophet>=1.1.0, <2.0.0", ], "torch": [ "torch>=1.7.0, <1.13.0" ], # https://github.com/SeldonIO/alibi-detect/issues/375 and 387 "tensorflow": [ - "tensorflow_probability>=0.8.0, <0.18.0", - "tensorflow>=2.2.0, !=2.6.0, !=2.6.1, <2.10.0", # https://github.com/SeldonIO/alibi-detect/issues/375 and 387 + "tensorflow_probability>=0.8.0, <0.19.0", + "tensorflow>=2.2.0, !=2.6.0, !=2.6.1, <2.11.0", # https://github.com/SeldonIO/alibi-detect/issues/375 and 387 ], - 'all': [ - "fbprophet>=0.5, <0.7", - "holidays==0.9.11", - "pystan<3.0", - "tensorflow_probability>=0.8.0, <0.18.0", - "tensorflow>=2.2.0, !=2.6.0, !=2.6.1, <2.10.0", # https://github.com/SeldonIO/alibi-detect/issues/375 and 387 + "keops": [ + "pykeops>=2.0.0, <2.2.0", + "torch>=1.7.0, <1.13.0" + ], + "all": [ + "prophet>=1.1.0, <2.0.0", + "tensorflow_probability>=0.8.0, <0.19.0", + "tensorflow>=2.2.0, !=2.6.0, !=2.6.1, <2.11.0", # https://github.com/SeldonIO/alibi-detect/issues/375 and 387 + "pykeops>=2.0.0, <2.2.0", "torch>=1.7.0, <1.13.0" ], } @@ -63,7 +64,7 @@ def readme(): "pydantic>=1.8.0, <2.0.0", "toml>=0.10.1, <1.0.0", # STC, see https://discuss.python.org/t/adopting-recommending-a-toml-parser/4068 "catalogue>=2.0.0, <3.0.0", - "numba>=0.50.0, !=0.54.0, <0.56.0", # Avoid 0.54 due to: https://github.com/SeldonIO/alibi/issues/466 + "numba>=0.50.0, !=0.54.0, <0.57.0", # Avoid 0.54 due to: https://github.com/SeldonIO/alibi/issues/466 "typing-extensions>=3.7.4.3" ], extras_require=extras_require, diff --git a/testing/test_notebooks.py b/testing/test_notebooks.py index 48a94c264..d885f4c9c 100644 --- a/testing/test_notebooks.py +++ b/testing/test_notebooks.py @@ -38,6 +38,7 @@ 'cd_context_20newsgroup.ipynb', 'cd_context_ecg.ipynb', 'cd_text_imdb.ipynb', + 'cd_mmd_keops.ipynb', # the following requires a k8s cluster 'alibi_detect_deploy.ipynb', # the following require downloading large datasets