diff --git a/autofit/__init__.py b/autofit/__init__.py index a02199d53..4852f150a 100644 --- a/autofit/__init__.py +++ b/autofit/__init__.py @@ -57,7 +57,7 @@ from .mapper.prior_model.annotation import AnnotationPriorModel from .mapper.prior_model.collection import Collection from .mapper.prior_model.prior_model import Model -from .mapper.prior_model.prior_model import Model +from .mapper.prior_model.array import Array from .non_linear.search.abstract_search import NonLinearSearch from .non_linear.analysis.visualize import Visualizer from .non_linear.analysis.analysis import Analysis diff --git a/autofit/mapper/model_object.py b/autofit/mapper/model_object.py index aea48e872..05be46ef7 100644 --- a/autofit/mapper/model_object.py +++ b/autofit/mapper/model_object.py @@ -150,6 +150,7 @@ def from_dict( from autofit.mapper.prior_model.collection import Collection from autofit.mapper.prior_model.prior_model import Model from autofit.mapper.prior.abstract import Prior + from autofit.mapper.prior.gaussian import GaussianPrior from autofit.mapper.prior.tuple_prior import TuplePrior from autofit.mapper.prior.arithmetic.compound import Compound from autofit.mapper.prior.arithmetic.compound import ModifiedPrior @@ -234,7 +235,10 @@ def get_class_path(): f"Could not find type for class path {class_path}. Defaulting to Instance placeholder." ) instance = ModelInstance() + elif type_ == "array": + from autofit.mapper.prior_model.array import Array + return Array.from_dict(d) else: try: return Prior.from_dict(d, loaded_ids=loaded_ids) @@ -276,6 +280,7 @@ def dict(self) -> dict: from autofit.mapper.prior_model.collection import Collection from autofit.mapper.prior_model.prior_model import Model from autofit.mapper.prior.tuple_prior import TuplePrior + from autofit.mapper.prior_model.array import Array if isinstance(self, Collection): type_ = "collection" @@ -285,6 +290,8 @@ def dict(self) -> dict: type_ = "model" elif isinstance(self, TuplePrior): type_ = "tuple_prior" + elif isinstance(self, Array): + type_ = "array" else: raise AssertionError( f"{self.__class__.__name__} cannot be serialised to dict" diff --git a/autofit/mapper/prior/abstract.py b/autofit/mapper/prior/abstract.py index fd8669fc3..a1e3c30db 100644 --- a/autofit/mapper/prior/abstract.py +++ b/autofit/mapper/prior/abstract.py @@ -1,5 +1,4 @@ import itertools -import os import random from abc import ABC, abstractmethod from copy import copy @@ -115,7 +114,6 @@ def factor(self): return self.message.factor def assert_within_limits(self, value): - if jax_wrapper.use_jax: return diff --git a/autofit/mapper/prior_model/array.py b/autofit/mapper/prior_model/array.py new file mode 100644 index 000000000..c37c786b2 --- /dev/null +++ b/autofit/mapper/prior_model/array.py @@ -0,0 +1,215 @@ +from typing import Tuple, Dict, Optional, Union + +from autoconf.dictable import from_dict +from .abstract import AbstractPriorModel +from autofit.mapper.prior.abstract import Prior +import numpy as np + +from autofit.jax_wrapper import register_pytree_node_class + + +@register_pytree_node_class +class Array(AbstractPriorModel): + def __init__( + self, + shape: Tuple[int, ...], + prior: Optional[Prior] = None, + ): + """ + An array of priors. + + Parameters + ---------- + shape : (int, int) + The shape of the array. + prior : Prior + The prior of every entry in the array. + """ + super().__init__() + self.shape = shape + self.indices = list(np.ndindex(*shape)) + + if prior is not None: + for index in self.indices: + self[index] = prior.new() + + @staticmethod + def _make_key(index: Tuple[int, ...]) -> str: + """ + Make a key for the prior. + + This is so an index (e.g. (1, 2)) can be used to access a + prior (e.g. prior_1_2). + + Parameters + ---------- + index + The index of an element in an array. + + Returns + ------- + The attribute name for the prior. + """ + if isinstance(index, int): + suffix = str(index) + else: + suffix = "_".join(map(str, index)) + return f"prior_{suffix}" + + def _instance_for_arguments( + self, + arguments: Dict[Prior, float], + ignore_assertions: bool = False, + ) -> np.ndarray: + """ + Create an array where the prior at each index is replaced with the + a concrete value. + + Parameters + ---------- + arguments + The arguments to replace the priors with. + ignore_assertions + Whether to ignore assertions in the priors. + + Returns + ------- + The array with the priors replaced. + """ + array = np.zeros(self.shape) + for index in self.indices: + value = self[index] + try: + value = value.instance_for_arguments( + arguments, + ignore_assertions, + ) + except AttributeError: + pass + + array[index] = value + return array + + def __setitem__( + self, + index: Union[int, Tuple[int, ...]], + value: Union[float, Prior], + ): + """ + Set the value at an index. + + Parameters + ---------- + index + The index of the prior. + value + The new value. + """ + setattr( + self, + self._make_key(index), + value, + ) + + def __getitem__( + self, + index: Union[int, Tuple[int, ...]], + ) -> Union[float, Prior]: + """ + Get the value at an index. + + Parameters + ---------- + index + The index of the value. + + Returns + ------- + The value at the index. + """ + return getattr( + self, + self._make_key(index), + ) + + @classmethod + def from_dict( + cls, + d, + reference: Optional[Dict[str, str]] = None, + loaded_ids: Optional[dict] = None, + ) -> "Array": + """ + Create an array from a dictionary. + + Parameters + ---------- + d + The dictionary. + reference + A dictionary of references. + loaded_ids + A dictionary of loaded ids. + + Returns + ------- + The array. + """ + arguments = d["arguments"] + shape = from_dict(arguments["shape"]) + array = cls(shape) + for key, value in arguments.items(): + if key.startswith("prior"): + setattr(array, key, from_dict(value)) + + return array + + def tree_flatten(self): + """ + Flatten this array model as a PyTree. + """ + members = [self[index] for index in self.indices] + return members, (self.shape,) + + @classmethod + def tree_unflatten(cls, aux_data, children): + """ + Unflatten a PyTree into an array model. + """ + (shape,) = aux_data + instance = cls(shape) + for index, child in zip(instance.indices, children): + instance[index] = child + + return instance + + @property + def prior_class_dict(self): + return { + **{ + prior: cls + for prior_model in self.direct_prior_model_tuples + for prior, cls in prior_model[1].prior_class_dict.items() + }, + **{prior: np.ndarray for _, prior in self.direct_prior_tuples}, + } + + def gaussian_prior_model_for_arguments(self, arguments: Dict[Prior, Prior]): + """ + Returns a new instance of model mapper with a set of Gaussian priors based on + tuples provided by a previous nonlinear search. + + Parameters + ---------- + arguments + Tuples providing the mean and sigma of gaussians + + Returns + ------- + A new model mapper populated with Gaussian priors + """ + new_array = Array(self.shape) + for index in self.indices: + new_array[index] = self[index].gaussian_prior_model_for_arguments(arguments) + + return new_array diff --git a/docs/cookbooks/model.rst b/docs/cookbooks/model.rst index 1f9081d75..84590ccca 100644 --- a/docs/cookbooks/model.rst +++ b/docs/cookbooks/model.rst @@ -10,6 +10,8 @@ This cookbook provides an overview of basic model composition tools. **Contents:** +**Models:** + If first describes how to use the ``af.Model`` object to define models with a single model component from single Python classes, with the following sections: @@ -21,6 +23,8 @@ Python classes, with the following sections: - **Tuple Parameters (Model)**: Defining model components with parameters that are tuples. - **Json Output (Model)**: Output a model in human readable text via a .json file and loading it back again. +**Collections:** + It then describes how to use the ``af.Collection`` object to define models with many model components from multiple Python classes, with the following sections: @@ -31,6 +35,19 @@ Python classes, with the following sections: - **Json Output (Collection)**: Output a collection in human readable text via a .json file and loading it back again. - **Extensible Models (Collection)**: Using collections to extend models with new model components, including the use of Python dictionaries and lists. +**Arrays:** + +The cookbook next describes using NumPy arrays via tbe `af.Array` object to compose models, where each entry of the +array is a free parameters, therefore offering maximum flexibility with the number of free parameter. This has +the following sections: + + - **Model Composition (af.Array)**: Composing models using NumPy arrays and `af.Array`(). + - **Prior Customization (af.Array)**: How to customize the priors of a numpy array model. + - **Instances (af.Array)**: Create an instance of a numpy array model via input parameters. + - **Model Customization (af.Array):** Customize a numpy array model (e.g. fixing parameters or linking them to one another). + - **Json Output (af.Array)**: Output a numpy array model in human readable text via a .json file and loading it back again. + - **Extensible Models (af.Array)**: Using numpy arrays to compose models with a flexible number of parameters. + Python Class Template --------------------- @@ -718,7 +735,7 @@ Python dictionaries can easily be saved to hard disk as a ``.json`` file. This means we can save any **PyAutoFit** model to hard-disk. -Checkout the file ``autofit_workspace/*/model/jsons/model.json`` to see the model written as a .json. +Checkout the file ``autofit_workspace/*/model/jsons/collection.json`` to see the model written as a .json. .. code-block:: python @@ -910,6 +927,321 @@ This gives the following output: normalization (Gaussian) = 5.0 sigma (Gaussian) = 6.0 +Model Composition (af.Array) +---------------------------- + +Models can be composed using NumPy arrays, where each element of the array is a free parameter. + +This offers a lot more flexibility than using ``Model`` and ``Collection`` objects, as the number of parameters in the +model is chosen on initialization via the input of the ``shape`` attribute. + +For many use cases, this flexibility is key to ensuring model composition is as easy as possible, for example when +a part of the model being fitted is a matrix of parameters which may change shape depending on the dataset being +fitted. + +To compose models using NumPy arrays, we use the ``af.Array`` object. + +.. code-block:: python + + model = af.Array( + shape=(2, 2), + prior=af.GaussianPrior(mean=0.0, sigma=1.0), + ) + +Each element of the array is a free parameter, which for ``shape=(2,2)`` means the model has 4 free parameters. + +.. code-block:: python + + print(f"Model Total Free Parameters = {model.total_free_parameters}") + +The ``info`` attribute of the model gives information on all of the parameters and their priors. + +.. code-block:: python + + print(model.info) + +This gives the following output: + +.. code-block:: bash + + Total Free Parameters = 4 + + model Array (N=4) + indices list (N=0) + + shape (2, 2) + indices + 0 (0, 0) + 1 (0, 1) + 2 (1, 0) + 3 (1, 1) + prior_0_0 GaussianPrior [124], mean = 0.0, sigma = 1.0 + prior_0_1 GaussianPrior [125], mean = 0.0, sigma = 1.0 + prior_1_0 GaussianPrior [126], mean = 0.0, sigma = 1.0 + prior_1_1 GaussianPrior [127], mean = 0.0, sigma = 1.0 + +Prior Customization (af.Array) +------------------------------ + +The prior of every parameter in the array is set via the ``prior`` input above. + +NumPy array models do not currently support default priors via config files, so all priors must be manually specified. + +The prior of every parameter in the array can be customized by normal NumPy array indexing: + +.. code-block:: python + + model = af.Array(shape=(2, 2), prior=af.GaussianPrior(mean=0.0, sigma=1.0)) + + model.array[0, 0] = af.UniformPrior(lower_limit=0.0, upper_limit=1.0) + model.array[0, 1] = af.LogUniformPrior(lower_limit=1e-4, upper_limit=1e4) + model.array[1, 0] = af.GaussianPrior(mean=0.0, sigma=2.0) + +The ``info`` attribute shows the customized priors. + +.. code-block:: python + + print(model.info) + +The output is as follows: + +.. code-block:: bash + + Total Free Parameters = 4 + + model Array (N=4) + indices list (N=0) + + shape (2, 2) + indices + 0 (0, 0) + 1 (0, 1) + 2 (1, 0) + 3 (1, 1) + prior_0_0 UniformPrior [133], lower_limit = 0.0, upper_limit = 1.0 + prior_0_1 LogUniformPrior [134], lower_limit = 0.0001, upper_limit = 10000.0 + prior_1_0 GaussianPrior [135], mean = 0.0, sigma = 2.0 + prior_1_1 GaussianPrior [132], mean = 0.0, sigma = 1.0 + +Instances (af.Array) +-------------------- + +Instances of numpy array model components can be created, where an input ``vector`` of parameters is mapped to create +an instance of the Python class of the model. + +If the priors of the numpy array are not customized, ordering of parameters goes from element [0,0] to [0,1] to [1,0], +as shown by the ``paths`` attribute. + +.. code-block:: python + + model = af.Array( + shape=(2, 2), + prior=af.GaussianPrior(mean=0.0, sigma=1.0), + ) + + print(model.paths) + +The output is as follows: + +.. code-block:: bash + + ['prior_0_0', 'prior_0_1', 'prior_1_0', 'prior_1_1'] + +An instance can then be created by passing a vector of parameters to the model via the ``instance_from_vector`` method. + +The ``instance`` created is a NumPy array, where each element is the value passed in the vector. + +.. code-block:: python + + instance = model.instance_from_vector(vector=[0.0, 1.0, 2.0, 3.0]) + + print("\nModel Instance:") + print(instance) + +The output is as follows: + +.. code-block:: bash + + Model Instance: + [[0. 1.] + [2. 3.]] + +Prior customization changes the order of the parameters, therefore if you customize the priors of the numpy +array you must check the ordering of the parameters in the ``paths`` attribute before passing a vector to +the ``instance_from_vector`` + + +.. code-block:: python + + model[0, 0] = af.UniformPrior(lower_limit=0.0, upper_limit=1.0) + model[0, 1] = af.LogUniformPrior(lower_limit=1e-4, upper_limit=1e4) + model[1, 0] = af.GaussianPrior(mean=0.0, sigma=2.0) + + print(model.paths) + +The output is as follows: + +.. code-block:: bash + + [('prior_1_1',), ('prior_0_0',), ('prior_0_1',), ('prior_1_0',)] + +If we create a vector and print its values from this customized model: + +.. code-block:: python + + instance = model.instance_from_vector(vector=[0.0, 1.0, 2.0, 3.0]) + + print("\nModel Instance:") + print(instance) + +The output is as follows: + +.. code-block:: bash + + Model Instance: + [[1. 2.] + [3. 0.]] + +Model Customization (af.Array) +------------------------------ + +The model customization API for numpy array models is the same as for ``af.Model`` and ``af.Collection`` objects. + +.. code-block:: python + + model = af.Array( + shape=(2, 2), + prior=af.GaussianPrior(mean=0.0, sigma=1.0), + ) + + model[0,0] = 50.0 + model[0,1] = model[1,0] + model.add_assertion(model[1,1] > 0.0) + + print(model.info) + +The output is as follows: + +.. code-block:: bash + Total Free Parameters = 2 + + model Array (N=2) + indices list (N=0) + + shape (2, 2) + indices + 0 (0, 0) + 1 (0, 1) + 2 (1, 0) + 3 (1, 1) + prior_0_0 50.0 + prior_0_1 - prior_1_0 GaussianPrior [147], mean = 0.0, sigma = 1.0 + prior_1_1 GaussianPrior [148], mean = 0.0, sigma = 1.0 + + +JSon Outputs (af.Array) +------------------------ + +An ``Array`` has a ``dict`` attribute, which express all information about the model as a Python dictionary. + +By printing this dictionary we can therefore get a concise summary of the model. + +.. code-block:: python + + model = af.Array( + shape=(2, 2), + prior=af.GaussianPrior(mean=0.0, sigma=1.0), + ) + + print(model.dict()) + +Python dictionaries can easily be saved to hard disk as a ``.json`` file. + +This means we can save any **PyAutoFit** model to hard-disk. + +Checkout the file ``autofit_workspace/*/model/jsons/array.json`` to see the model written as a .json. + +.. code-block:: python + + model_path = path.join("scripts", "model", "jsons") + + os.makedirs(model_path, exist_ok=True) + + model_file = path.join(model_path, "array.json") + + with open(model_file, "w+") as f: + json.dump(model.dict(), f, indent=4) + +We can load the model from its ``.json`` file, meaning that one can easily save a model to hard disk and load it +elsewhere. + +.. code-block:: python + + model = af.Array.from_json(file=model_file) + + print(f"\n Model via Json Prior Count = {model.prior_count}") + +Extensible Models (af.Array) +---------------------------- + +For ``Model`` objects, the number of parameters is fixed to those listed in the input Python class when the model is +created. + +For ``Collection`` objects, the use of dictionaries and lists allows for the number of parameters to be extended, but it +was still tied to the input Python classes when the model was created. + +For ``Array`` objects, the number of parameters is fully customizable, you choose the shape of the array and therefore +the number of parameters in the model when you create it. + +This makes ``Array`` objects the most extensible and flexible way to compose models. + +You can also combine ``Array`` objects with ``Collection`` objects to create models with a mix of fixed and extensible +parameters. + +.. code-block:: python + + model = af.Collection( + gaussian=Gaussian, + array=af.Array(shape=(3, 2), prior=af.GaussianPrior(mean=0.0, sigma=1.0)) + ) + + model.gaussian.sigma = 2.0 + model.array[0, 0] = 1.0 + + print(model.info) + +The output is as follows: + +.. code-block:: python + + Total Free Parameters = 7 + + model Collection (N=7) + gaussian Gaussian (N=2) + array Array (N=5) + indices list (N=0) + + gaussian + centre UniformPrior [165], lower_limit = 0.0, upper_limit = 100.0 + normalization LogUniformPrior [166], lower_limit = 1e-06, upper_limit = 1000000.0 + sigma 2.0 + array + shape (3, 2) + indices + 0 (0, 0) + 1 (0, 1) + 2 (1, 0) + 3 (1, 1) + 4 (2, 0) + 5 (2, 1) + prior_0_0 1.0 + prior_0_1 GaussianPrior [160], mean = 0.0, sigma = 1.0 + prior_1_0 GaussianPrior [161], mean = 0.0, sigma = 1.0 + prior_1_1 GaussianPrior [162], mean = 0.0, sigma = 1.0 + prior_2_0 GaussianPrior [163], mean = 0.0, sigma = 1.0 + prior_2_1 GaussianPrior [164], mean = 0.0, sigma = 1.0 + + Wrap Up ------- diff --git a/docs/overview/scientific_workflow.rst b/docs/overview/scientific_workflow.rst index f6f1ed9be..ff670803a 100644 --- a/docs/overview/scientific_workflow.rst +++ b/docs/overview/scientific_workflow.rst @@ -525,7 +525,7 @@ For simpler scenarios, adjustments might include: In more intricate cases, models might involve numerous parameters and complex compositions of multiple model components. **PyAutoFit** offers a sophisticated model composition API designed to handle these complexities. It provides -tools for constructing elaborate models using lists of Python classes and hierarchical structures of Python classes. +tools for constructing elaborate models using lists of Python classes, NumPy arrays and hierarchical structures of Python classes. For a detailed exploration of these capabilities, you can refer to the `model cookbook `_, which provides comprehensive diff --git a/docs/overview/the_basics.rst b/docs/overview/the_basics.rst index a67e9302c..da3d213d5 100644 --- a/docs/overview/the_basics.rst +++ b/docs/overview/the_basics.rst @@ -298,13 +298,13 @@ Analysis We now tell **PyAutoFit** how to fit the model to the data. -We define an `Analysis` class, which includes: +We define an ``Analysis`` class, which includes: -- An `__init__` constructor that takes `data` and `noise_map` as inputs (this can be extended with additional elements necessary for fitting the model to the data). +- An ``__init__`` constructor that takes ``data`` and ``noise_map`` as inputs (this can be extended with additional elements necessary for fitting the model to the data). -- A `log_likelihood_function` that defines how to fit an `instance` of the model to the data and return a log likelihood value. +- A ``log_likelihood_function`` that defines how to fit an ``instance`` of the model to the data and return a log likelihood value. -Read the comments and docstrings of the `Analysis` class in detail for a full description of how the analysis works. +Read the comments and docstrings of the ``Analysis`` class in detail for a full description of how the analysis works. .. code-block:: python @@ -623,7 +623,7 @@ examples demonstrating more complex model-fitting tasks. This includes cookbooks, which provide a concise reference guide to the **PyAutoFit** API for advanced model-fitting: -- [Model Cookbook](https://pyautofit.readthedocs.io/en/latest/cookbooks/model.html): Learn how to compose complex models using multiple Python classes, lists, dictionaries, and customize their parameterization. +- [Model Cookbook](https://pyautofit.readthedocs.io/en/latest/cookbooks/model.html): Learn how to compose complex models using multiple Python classes, lists, dictionaries, NumPy arrays and customize their parameterization. - [Analysis Cookbook](https://pyautofit.readthedocs.io/en/latest/cookbooks/search.html): Customize the analysis with model-specific output and visualization to gain deeper insights into your model fits. diff --git a/test_autofit/mapper/functionality/__init__.py b/test_autofit/mapper/functionality/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/test_autofit/mapper/test_by_path.py b/test_autofit/mapper/functionality/test_by_path.py similarity index 100% rename from test_autofit/mapper/test_by_path.py rename to test_autofit/mapper/functionality/test_by_path.py diff --git a/test_autofit/mapper/test_explicit_width_modifier.py b/test_autofit/mapper/functionality/test_explicit_width_modifier.py similarity index 100% rename from test_autofit/mapper/test_explicit_width_modifier.py rename to test_autofit/mapper/functionality/test_explicit_width_modifier.py diff --git a/test_autofit/mapper/test_from_data_names.py b/test_autofit/mapper/functionality/test_from_data_names.py similarity index 100% rename from test_autofit/mapper/test_from_data_names.py rename to test_autofit/mapper/functionality/test_from_data_names.py diff --git a/test_autofit/mapper/test_has.py b/test_autofit/mapper/functionality/test_has.py similarity index 100% rename from test_autofit/mapper/test_has.py rename to test_autofit/mapper/functionality/test_has.py diff --git a/test_autofit/mapper/test_take_attributes.py b/test_autofit/mapper/functionality/test_take_attributes.py similarity index 100% rename from test_autofit/mapper/test_take_attributes.py rename to test_autofit/mapper/functionality/test_take_attributes.py diff --git a/test_autofit/mapper/test_array.py b/test_autofit/mapper/test_array.py new file mode 100644 index 000000000..836eb91f9 --- /dev/null +++ b/test_autofit/mapper/test_array.py @@ -0,0 +1,201 @@ +import pytest +import numpy as np + +import autofit as af +from autoconf.dictable import to_dict + + +@pytest.fixture +def array(): + return af.Array( + shape=(2, 2), + prior=af.GaussianPrior(mean=0.0, sigma=1.0), + ) + + +@pytest.fixture +def array_3d(): + return af.Array( + shape=(2, 2, 2), + prior=af.GaussianPrior(mean=0.0, sigma=1.0), + ) + + +def test_prior_count(array): + assert array.prior_count == 4 + + +def test_prior_count_3d(array_3d): + assert array_3d.prior_count == 8 + + +def test_instance(array): + instance = array.instance_from_prior_medians() + assert (instance == [[0.0, 0.0], [0.0, 0.0]]).all() + + +def test_instance_3d(array_3d): + instance = array_3d.instance_from_prior_medians() + assert ( + instance + == [ + [[0.0, 0.0], [0.0, 0.0]], + [[0.0, 0.0], [0.0, 0.0]], + ] + ).all() + + +def test_modify_prior(array): + array[0, 0] = 1.0 + assert array.prior_count == 3 + assert ( + array.instance_from_prior_medians() + == [ + [1.0, 0.0], + [0.0, 0.0], + ] + ).all() + + +def test_correlation(array): + array[0, 0] = array[1, 1] + array[0, 1] = array[1, 0] + + instance = array.random_instance() + + assert instance[0, 0] == instance[1, 1] + assert instance[0, 1] == instance[1, 0] + + +@pytest.fixture +def array_dict(): + return { + "arguments": { + "indices": { + "type": "list", + "values": [ + {"type": "tuple", "values": [0, 0]}, + {"type": "tuple", "values": [0, 1]}, + {"type": "tuple", "values": [1, 0]}, + {"type": "tuple", "values": [1, 1]}, + ], + }, + "prior_0_0": { + "lower_limit": float("-inf"), + "mean": 0.0, + "sigma": 1.0, + "type": "Gaussian", + "upper_limit": float("inf"), + }, + "prior_0_1": { + "lower_limit": float("-inf"), + "mean": 0.0, + "sigma": 1.0, + "type": "Gaussian", + "upper_limit": float("inf"), + }, + "prior_1_0": { + "lower_limit": float("-inf"), + "mean": 0.0, + "sigma": 1.0, + "type": "Gaussian", + "upper_limit": float("inf"), + }, + "prior_1_1": { + "lower_limit": float("-inf"), + "mean": 0.0, + "sigma": 1.0, + "type": "Gaussian", + "upper_limit": float("inf"), + }, + "shape": {"type": "tuple", "values": [2, 2]}, + }, + "type": "array", + } + + +def test_to_dict(array, array_dict, remove_ids): + assert remove_ids(to_dict(array)) == array_dict + + +def test_from_dict(array_dict): + array = af.AbstractPriorModel.from_dict(array_dict) + assert array.prior_count == 4 + assert ( + array.instance_from_prior_medians() + == [ + [0.0, 0.0], + [0.0, 0.0], + ] + ).all() + + +@pytest.fixture +def array_1d(): + return af.Array( + shape=(2,), + prior=af.GaussianPrior(mean=0.0, sigma=1.0), + ) + + +def test_1d_array(array_1d): + assert array_1d.prior_count == 2 + assert (array_1d.instance_from_prior_medians() == [0.0, 0.0]).all() + + +def test_1d_array_modify_prior(array_1d): + array_1d[0] = 1.0 + assert array_1d.prior_count == 1 + assert (array_1d.instance_from_prior_medians() == [1.0, 0.0]).all() + + +def test_tree_flatten(array): + children, aux = array.tree_flatten() + assert len(children) == 4 + assert aux == ((2, 2),) + + new_array = af.Array.tree_unflatten(aux, children) + assert new_array.prior_count == 4 + assert ( + new_array.instance_from_prior_medians() + == [ + [0.0, 0.0], + [0.0, 0.0], + ] + ).all() + + +class Analysis(af.Analysis): + def log_likelihood_function(self, instance): + return -float( + np.mean( + ( + np.array( + [ + [0.1, 0.2], + [0.3, 0.4], + ] + ) + - instance + ) + ** 2 + ) + ) + + +def test_optimisation(): + array = af.Array( + shape=(2, 2), + prior=af.UniformPrior( + lower_limit=0.0, + upper_limit=1.0, + ), + ) + result = af.DynestyStatic().fit(model=array, analysis=Analysis()) + + posterior = result.model + array[0, 0] = posterior[0, 0] + array[0, 1] = posterior[0, 1] + + result = af.DynestyStatic().fit(model=array, analysis=Analysis()) + assert isinstance(result.instance, np.ndarray) diff --git a/test_autofit/serialise/test_samples.py b/test_autofit/serialise/test_samples.py index 12e039ef4..ccb8fa2b5 100644 --- a/test_autofit/serialise/test_samples.py +++ b/test_autofit/serialise/test_samples.py @@ -45,7 +45,6 @@ def test_summary(summary, model, sample): assert summary.model is model assert summary.max_log_likelihood_sample == sample - @pytest.fixture(name="summary_dict") def make_summary_dict(): return { @@ -123,6 +122,7 @@ def make_summary_dict(): } + def test_dict(summary, summary_dict, remove_ids): assert remove_ids(to_dict(summary)) == summary_dict