From fe4d81fef9ac4a52215941b00100fc28670f0580 Mon Sep 17 00:00:00 2001 From: James Nightingale Date: Tue, 21 Mar 2023 18:26:33 +0000 Subject: [PATCH 1/8] v2023.3.21.4 --- autofit/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/autofit/__init__.py b/autofit/__init__.py index 693822f3f..ab8f9ec3b 100644 --- a/autofit/__init__.py +++ b/autofit/__init__.py @@ -109,4 +109,4 @@ def save_abc(pickler, obj): conf.instance.register(__file__) -__version__ = "2023.3.21.3" +__version__ = "2023.3.21.4" From 5d1ca26e628747de3fd847fe414ed0b602962cba Mon Sep 17 00:00:00 2001 From: James Nightingale Date: Tue, 21 Mar 2023 18:45:14 +0000 Subject: [PATCH 2/8] bump --- autofit/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/autofit/__init__.py b/autofit/__init__.py index ab8f9ec3b..5d65d6ab6 100644 --- a/autofit/__init__.py +++ b/autofit/__init__.py @@ -109,4 +109,4 @@ def save_abc(pickler, obj): conf.instance.register(__file__) -__version__ = "2023.3.21.4" +__version__ = "2023.3.21.5" From 5e68c6b057b739a4b24b988a10086362f3e79cce Mon Sep 17 00:00:00 2001 From: James Nightingale Date: Tue, 21 Mar 2023 18:46:17 +0000 Subject: [PATCH 3/8] bump to __version__ = 2023.3.21.5 --- autofit/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/autofit/__init__.py b/autofit/__init__.py index c71782ffb..98f9e8e01 100644 --- a/autofit/__init__.py +++ b/autofit/__init__.py @@ -109,4 +109,4 @@ def save_abc(pickler, obj): conf.instance.register(__file__) -__version__ = "2023.1.15.1" +__version__ = "2023.3.21.5" From 426f999f8d1f991dfebe56cd74dae71fb0282ad5 Mon Sep 17 00:00:00 2001 From: James Nightingale Date: Mon, 27 Mar 2023 15:40:04 +0100 Subject: [PATCH 4/8] docs --- autofit/config/general.yaml | 3 +++ test_autofit/config/general.yaml | 3 +++ 2 files changed, 6 insertions(+) diff --git a/autofit/config/general.yaml b/autofit/config/general.yaml index a217d5189..6440c29f7 100644 --- a/autofit/config/general.yaml +++ b/autofit/config/general.yaml @@ -3,6 +3,9 @@ analysis: hpc: hpc_mode: false # If True, use HPC mode, which disables GUI visualization, logging to screen and other settings which are not suited to running on a super computer. iterations_per_update: 5000 # The number of iterations between every update (visualization, results output, etc) in HPC mode. +inversion: + check_reconstruction: true # If True, the inversion's reconstruction is checked to ensure the solution of a meshs's mapper is not an invalid solution where the values are all the same. + reconstruction_vmax_factor: 0.5 # Plots of an Inversion's reconstruction use the reconstructed data's bright value multiplied by this factor. model: ignore_prior_limits: false # If ``True`` the limits applied to priors will be ignored, where limits set upper / lower limits. This stops PriorLimitException's from being raised. output: diff --git a/test_autofit/config/general.yaml b/test_autofit/config/general.yaml index 06ea56184..90c934f6e 100644 --- a/test_autofit/config/general.yaml +++ b/test_autofit/config/general.yaml @@ -3,6 +3,9 @@ analysis: hpc: hpc_mode: false # If True, use HPC mode, which disables GUI visualization, logging to screen and other settings which are not suited to running on a super computer. iterations_per_update: 5000 # The number of iterations between every update (visualization, results output, etc) in HPC mode. +inversion: + check_reconstruction: true # If True, the inversion's reconstruction is checked to ensure the solution of a meshs's mapper is not an invalid solution where the values are all the same. + reconstruction_vmax_factor: 0.5 # Plots of an Inversion's reconstruction use the reconstructed data's bright value multiplied by this factor. model: ignore_prior_limits: false # If ``True`` the limits applied to priors will be ignored, where limits set upper / lower limits. This stops PriorLimitException's from being raised. output: From 1f1c31cf7c9d0e6858c787ba4b95c274d5acd8da Mon Sep 17 00:00:00 2001 From: James Nightingale Date: Mon, 27 Mar 2023 15:42:38 +0100 Subject: [PATCH 5/8] v2023.3.21.5 --- autofit/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/autofit/__init__.py b/autofit/__init__.py index 5d65d6ab6..3076e0fe4 100644 --- a/autofit/__init__.py +++ b/autofit/__init__.py @@ -109,4 +109,4 @@ def save_abc(pickler, obj): conf.instance.register(__file__) -__version__ = "2023.3.21.5" +__version__ = "2023.3.27.1" From f45665293012f725f44b631323d3122ce108d809 Mon Sep 17 00:00:00 2001 From: James Nightingale Date: Mon, 27 Mar 2023 15:43:29 +0100 Subject: [PATCH 6/8] merge --- README.rst | 4 + autofit/__init__.py | 5 +- autofit/config/general.yaml | 3 + autofit/config/non_linear/optimize.yaml | 2 +- autofit/config/visualize/plots_search.yaml | 4 +- autofit/database/model/fit.py | 288 ++++-------------- autofit/example/analysis.py | 22 +- autofit/example/model.py | 44 ++- autofit/graphical/laplace/newton.py | 98 +++--- autofit/interpolator.py | 15 +- autofit/mapper/model.py | 80 ++++- autofit/mapper/model_object.py | 19 +- autofit/mapper/prior_model/abstract.py | 37 ++- autofit/non_linear/abstract_search.py | 72 +++++ autofit/non_linear/analysis/combined.py | 47 ++- autofit/non_linear/analysis/indexed.py | 12 +- autofit/non_linear/grid/sensitivity.py | 226 ++++++-------- autofit/non_linear/initializer.py | 7 +- autofit/non_linear/nest/dynesty/abstract.py | 9 +- autofit/non_linear/nest/dynesty/dynamic.py | 5 +- autofit/non_linear/nest/dynesty/plotter.py | 40 +-- autofit/non_linear/nest/dynesty/static.py | 5 +- autofit/non_linear/nest/ultranest/plotter.py | 43 ++- autofit/plot/samples_plotters.py | 34 +++ docs/overview/multi_datasets.rst | 17 ++ docs/requirements.txt | 2 +- optional_requirements.txt | 2 +- requirements.txt | 2 +- test_autofit/analysis/test_free_parameter.py | 106 ++----- test_autofit/config/general.yaml | 4 +- test_autofit/conftest.py | 59 +--- test_autofit/mapper/model/test_json.py | 140 ++------- .../mapper/model/test_model_instance.py | 65 +++- test_autofit/mapper/test_has.py | 155 ++++------ .../grid/test_sensitivity/test_results.py | 30 ++ .../grid/test_sensitivity/test_run.py | 27 +- .../non_linear/test_fit_sequential.py | 75 +++++ test_autofit/test_interpolator.py | 158 ++++++++-- 38 files changed, 1032 insertions(+), 931 deletions(-) create mode 100644 test_autofit/non_linear/grid/test_sensitivity/test_results.py create mode 100644 test_autofit/non_linear/test_fit_sequential.py diff --git a/README.rst b/README.rst index 41506226d..f0b24a30c 100644 --- a/README.rst +++ b/README.rst @@ -24,6 +24,10 @@ PyAutoFit: Classy Probabilistic Programming `Introduction on Binder `_ | `HowToFit `_ + +.. + _ One day make these BOLD with a colon like my fellowsahip proposa,s where the first is Model Composition & Fitting: Tools for composing a complex model and fitting it with dynesty... + PyAutoFit is a Python based probabilistic programming language for the fully Bayesian analysis of extremely large datasets which: diff --git a/autofit/__init__.py b/autofit/__init__.py index 98f9e8e01..3036d1f9d 100644 --- a/autofit/__init__.py +++ b/autofit/__init__.py @@ -85,7 +85,7 @@ from .example.model import Gaussian from .text import formatter from .text import samples_text -from .interpolator import LinearInterpolator +from .interpolator import LinearInterpolator, SplineInterpolator from .tools import util @@ -109,4 +109,5 @@ def save_abc(pickler, obj): conf.instance.register(__file__) -__version__ = "2023.3.21.5" +__version__ = "2023.3.27.1" + diff --git a/autofit/config/general.yaml b/autofit/config/general.yaml index a217d5189..6440c29f7 100644 --- a/autofit/config/general.yaml +++ b/autofit/config/general.yaml @@ -3,6 +3,9 @@ analysis: hpc: hpc_mode: false # If True, use HPC mode, which disables GUI visualization, logging to screen and other settings which are not suited to running on a super computer. iterations_per_update: 5000 # The number of iterations between every update (visualization, results output, etc) in HPC mode. +inversion: + check_reconstruction: true # If True, the inversion's reconstruction is checked to ensure the solution of a meshs's mapper is not an invalid solution where the values are all the same. + reconstruction_vmax_factor: 0.5 # Plots of an Inversion's reconstruction use the reconstructed data's bright value multiplied by this factor. model: ignore_prior_limits: false # If ``True`` the limits applied to priors will be ignored, where limits set upper / lower limits. This stops PriorLimitException's from being raised. output: diff --git a/autofit/config/non_linear/optimize.yaml b/autofit/config/non_linear/optimize.yaml index 8cf3d0fcf..007c93c63 100644 --- a/autofit/config/non_linear/optimize.yaml +++ b/autofit/config/non_linear/optimize.yaml @@ -5,7 +5,7 @@ # - PySwarms: https://github.com/ljvmiranda921/pyswarms / https://pyswarms.readthedocs.io/en/latest/index.html # Settings in the [search], [run] and [options] entries are specific to each nested algorithm and should be -# determined by consulting that MCMC method's own readthedocs. +# determined by consulting that optimizers method's own readthedocs. PySwarmsGlobal: run: diff --git a/autofit/config/visualize/plots_search.yaml b/autofit/config/visualize/plots_search.yaml index a536bb7b0..bb312a35d 100644 --- a/autofit/config/visualize/plots_search.yaml +++ b/autofit/config/visualize/plots_search.yaml @@ -1,6 +1,6 @@ dynesty: - cornerplot: true - cornerpoints: false # Output Dynesty cornerplot figure during a non-linear search fit? + cornerplot: true # Output Dynesty cornerplot figure during a non-linear search fit? + cornerpoints: false # Output Dynesty cornerpoints figure during a non-linear search fit? runplot: true # Output Dynesty runplot figure during a non-linear search fit? traceplot: true # Output Dynesty traceplot figure during a non-linear search fit? emcee: diff --git a/autofit/database/model/fit.py b/autofit/database/model/fit.py index c1b6a18f9..1951a92ac 100644 --- a/autofit/database/model/fit.py +++ b/autofit/database/model/fit.py @@ -18,48 +18,26 @@ class Pickle(Base): def __init__(self, **kwargs): super().__init__(**kwargs) - id = sa.Column( - sa.Integer, - primary_key=True - ) + id = sa.Column(sa.Integer, primary_key=True) - name = sa.Column( - sa.String - ) - string = sa.Column( - sa.String - ) - fit_id = sa.Column( - sa.String, - sa.ForeignKey( - "fit.id" - ) - ) - fit = sa.orm.relationship( - "Fit", - uselist=False - ) + name = sa.Column(sa.String) + string = sa.Column(sa.String) + fit_id = sa.Column(sa.String, sa.ForeignKey("fit.id")) + fit = sa.orm.relationship("Fit", uselist=False) @property def value(self): """ The unpickled object """ - if isinstance( - self.string, - str - ): + if isinstance(self.string, str): return self.string - return pickle.loads( - self.string - ) + return pickle.loads(self.string) @value.setter def value(self, value): try: - self.string = pickle.dumps( - value - ) + self.string = pickle.dumps(value) except pickle.PicklingError: pass @@ -67,24 +45,13 @@ def value(self, value): class Info(Base): __tablename__ = "info" - id = sa.Column( - sa.Integer, - primary_key=True - ) + id = sa.Column(sa.Integer, primary_key=True) key = sa.Column(sa.String) value = sa.Column(sa.String) - fit_id = sa.Column( - sa.String, - sa.ForeignKey( - "fit.id" - ) - ) - fit = sa.orm.relationship( - "Fit", - uselist=False - ) + fit_id = sa.Column(sa.String, sa.ForeignKey("fit.id")) + fit = sa.orm.relationship("Fit", uselist=False) def try_none(func): @@ -101,24 +68,13 @@ def wrapper(*args, **kwargs): class NamedInstance(Base): __tablename__ = "named_instance" - id = sa.Column( - sa.Integer, - primary_key=True - ) + id = sa.Column(sa.Integer, primary_key=True) name = sa.Column(sa.String) - instance_id = sa.Column( - sa.Integer, - sa.ForeignKey( - "object.id" - ) - ) + instance_id = sa.Column(sa.Integer, sa.ForeignKey("object.id")) __instance = sa.orm.relationship( - "Object", - uselist=False, - backref="named_instance", - foreign_keys=[instance_id] + "Object", uselist=False, backref="named_instance", foreign_keys=[instance_id] ) @property @@ -131,20 +87,10 @@ def instance(self): @instance.setter def instance(self, instance): - self.__instance = Object.from_object( - instance - ) - - fit_id = sa.Column( - sa.String, - sa.ForeignKey( - "fit.id" - ) - ) - fit = sa.orm.relationship( - "Fit", - uselist=False - ) + self.__instance = Object.from_object(instance) + + fit_id = sa.Column(sa.String, sa.ForeignKey("fit.id")) + fit = sa.orm.relationship("Fit", uselist=False) # noinspection PyProtectedMember @@ -167,56 +113,36 @@ def __getitem__(self, item: str): Raises a KeyError if no such instance exists. """ - return self._get_named_instance( - item - ).instance + return self._get_named_instance(item).instance def __setitem__(self, key: str, value): """ Set an instance for a given name """ try: - named_instance = self._get_named_instance( - key - ) + named_instance = self._get_named_instance(key) except KeyError: - named_instance = NamedInstance( - name=key - ) - self.fit._named_instances.append( - named_instance - ) + named_instance = NamedInstance(name=key) + self.fit._named_instances.append(named_instance) named_instance.instance = value - def _get_named_instance( - self, - item: str - ) -> "NamedInstance": + def _get_named_instance(self, item: str) -> "NamedInstance": """ Retrieve a NamedInstance by its name. """ for named_instance in self.fit._named_instances: if named_instance.name == item: return named_instance - raise KeyError( - f"Instance {item} not found" - ) + raise KeyError(f"Instance {item} not found") class Fit(Base): __tablename__ = "fit" - id = sa.Column( - sa.String, - primary_key=True, - ) - is_complete = sa.Column( - sa.Boolean - ) + id = sa.Column(sa.String, primary_key=True,) + is_complete = sa.Column(sa.Boolean) - _named_instances: List[NamedInstance] = sa.orm.relationship( - "NamedInstance" - ) + _named_instances: List[NamedInstance] = sa.orm.relationship("NamedInstance") @property @try_none @@ -228,45 +154,23 @@ def instance(self): @instance.setter def instance(self, instance): - self.__instance = Object.from_object( - instance - ) + self.__instance = Object.from_object(instance) @property def named_instances(self): - return NamedInstancesWrapper( - self - ) + return NamedInstancesWrapper(self) - _info: List[Info] = sa.orm.relationship( - "Info" - ) + _info: List[Info] = sa.orm.relationship("Info") - def __init__( - self, - **kwargs - ): - super().__init__( - **kwargs - ) + def __init__(self, **kwargs): + super().__init__(**kwargs) - max_log_likelihood = sa.Column( - sa.Float - ) + max_log_likelihood = sa.Column(sa.Float) - parent_id = sa.Column( - sa.String, - sa.ForeignKey( - "fit.id" - ) - ) + parent_id = sa.Column(sa.String, sa.ForeignKey("fit.id")) children: List["Fit"] = sa.orm.relationship( - "Fit", - backref=sa.orm.backref( - 'parent', - remote_side=[id] - ) + "Fit", backref=sa.orm.backref("parent", remote_side=[id]) ) @property @@ -276,13 +180,9 @@ def best_fit(self) -> "Fit": the highest log likelihood. """ if not self.is_grid_search: - raise TypeError( - f"Fit {self.id} is not a grid search" - ) + raise TypeError(f"Fit {self.id} is not a grid search") if len(self.children) == 0: - raise TypeError( - f"Grid search fit {self.id} has no children" - ) + raise TypeError(f"Grid search fit {self.id} has no children") best_fit = None max_log_likelihood = float("-inf") @@ -294,26 +194,14 @@ def best_fit(self) -> "Fit": return best_fit - is_grid_search = sa.Column( - sa.Boolean - ) + is_grid_search = sa.Column(sa.Boolean) - unique_tag = sa.Column( - sa.String - ) - name = sa.Column( - sa.String - ) - path_prefix = sa.Column( - sa.String - ) + unique_tag = sa.Column(sa.String) + name = sa.Column(sa.String) + path_prefix = sa.Column(sa.String) _samples = sa.orm.relationship( - Object, - uselist=False, - foreign_keys=[ - Object.samples_for_id - ] + Object, uselist=False, foreign_keys=[Object.samples_for_id] ) @property @@ -323,29 +211,16 @@ def samples(self) -> Samples: @samples.setter def samples(self, samples): - self._samples = Object.from_object( - samples - ) + self._samples = Object.from_object(samples) @property def info(self): - return { - info.key: info.value - for info - in self._info - } + return {info.key: info.value for info in self._info} @info.setter def info(self, info): if info is not None: - self._info = [ - Info( - key=key, - value=value - ) - for key, value - in info.items() - ] + self._info = [Info(key=key, value=value) for key, value in info.items()] @property @try_none @@ -357,14 +232,9 @@ def model(self) -> AbstractPriorModel: @model.setter def model(self, model: AbstractPriorModel): - self.__model = Object.from_object( - model - ) + self.__model = Object.from_object(model) - pickles: List[Pickle] = sa.orm.relationship( - "Pickle", - lazy="joined" - ) + pickles: List[Pickle] = sa.orm.relationship("Pickle", lazy="joined") def __getitem__(self, item: str): """ @@ -385,10 +255,7 @@ def __getitem__(self, item: str): for p in self.pickles: if p.name == item: return p.value - return getattr( - self, - item - ) + return getattr(self, item) def __contains__(self, item): for p in self.pickles: @@ -396,11 +263,7 @@ def __contains__(self, item): return True return False - def __setitem__( - self, - key: str, - value - ): + def __setitem__(self, key: str, value): """ Add a pickle. @@ -414,32 +277,15 @@ def __setitem__( value A string, bytes or object """ - new = Pickle( - name=key - ) - if isinstance( - value, - (str, bytes) - ): + new = Pickle(name=key) + if isinstance(value, (str, bytes)): new.string = value else: new.value = value - self.pickles = [ - p - for p - in self.pickles - if p.name != key - ] + [ - new - ] + self.pickles = [p for p in self.pickles if p.name != key] + [new] def __delitem__(self, key): - self.pickles = [ - p - for p - in self.pickles - if p.name != key - ] + self.pickles = [p for p in self.pickles if p.name != key] def value(self, name: str): try: @@ -447,38 +293,20 @@ def value(self, name: str): except AttributeError: return None - model_id = sa.Column( - sa.Integer, - sa.ForeignKey( - "object.id" - ) - ) + model_id = sa.Column(sa.Integer, sa.ForeignKey("object.id")) __model = sa.orm.relationship( - "Object", - uselist=False, - backref="fit_model", - foreign_keys=[model_id] + "Object", uselist=False, backref="fit_model", foreign_keys=[model_id] ) - instance_id = sa.Column( - sa.Integer, - sa.ForeignKey( - "object.id" - ) - ) + instance_id = sa.Column(sa.Integer, sa.ForeignKey("object.id")) __instance = sa.orm.relationship( - "Object", - uselist=False, - backref="fit_instance", - foreign_keys=[instance_id] + "Object", uselist=False, backref="fit_instance", foreign_keys=[instance_id] ) @classmethod def all(cls, session): - return session.query( - cls - ).all() + return session.query(cls).all() def __str__(self): return self.id diff --git a/autofit/example/analysis.py b/autofit/example/analysis.py index b0bee1e46..5c3d90cf7 100644 --- a/autofit/example/analysis.py +++ b/autofit/example/analysis.py @@ -44,13 +44,16 @@ def log_likelihood_function(self, instance: af.ModelInstance) -> float: """ xvalues = np.arange(self.data.shape[0]) + model_data_1d = np.zeros(self.data.shape[0]) try: - model_data_1d = sum( - profile.model_data_1d_via_xvalues_from(xvalues=xvalues) for profile in instance - ) + for profile in instance: + try: + model_data_1d += profile.model_data_1d_via_xvalues_from(xvalues=xvalues) + except AttributeError: + pass except TypeError: - model_data_1d = instance.model_data_1d_via_xvalues_from(xvalues=xvalues) + model_data_1d += instance.model_data_1d_via_xvalues_from(xvalues=xvalues) residual_map = self.data - model_data_1d chi_squared_map = (residual_map / self.noise_map) ** 2.0 @@ -83,13 +86,16 @@ def visualize(self, paths: af.DirectoryPaths, instance: af.ModelInstance, during """ xvalues = np.arange(self.data.shape[0]) + model_data_1d = np.zeros(self.data.shape[0]) try: - model_data_1d = sum( - profile.model_data_1d_via_xvalues_from(xvalues=xvalues) for profile in instance - ) + for profile in instance: + try: + model_data_1d += profile.model_data_1d_via_xvalues_from(xvalues=xvalues) + except AttributeError: + pass except TypeError: - model_data_1d = instance.model_data_1d_via_xvalues_from(xvalues=xvalues) + model_data_1d += instance.model_data_1d_via_xvalues_from(xvalues=xvalues) plt.errorbar( x=xvalues, diff --git a/autofit/example/model.py b/autofit/example/model.py index 936b59d15..eb18c9b2c 100644 --- a/autofit/example/model.py +++ b/autofit/example/model.py @@ -1,7 +1,8 @@ import math -import numpy as np from typing import Dict +import numpy as np + from autoconf.dictable import Dictable """ @@ -17,9 +18,9 @@ class Gaussian(Dictable): def __init__( self, - centre:float=0.0, # <- PyAutoFit recognises these constructor arguments - normalization:float=0.1, # <- are the Gaussian`s model parameters. - sigma:float=0.01, + centre: float = 0.0, # <- PyAutoFit recognises these constructor arguments + normalization: float = 0.1, # <- are the Gaussian`s model parameters. + sigma: float = 0.01, ): """ Represents a 1D `Gaussian` profile, which may be treated as a model-component of PyAutoFit the @@ -38,7 +39,7 @@ def __init__( self.normalization = normalization self.sigma = sigma - def model_data_1d_via_xvalues_from(self, xvalues:np.ndarray) -> np.ndarray: + def model_data_1d_via_xvalues_from(self, xvalues: np.ndarray) -> np.ndarray: """ Calculate the normalization of the profile on a 1D grid of Cartesian x coordinates. @@ -56,7 +57,7 @@ def model_data_1d_via_xvalues_from(self, xvalues:np.ndarray) -> np.ndarray: np.exp(-0.5 * np.square(np.divide(transformed_xvalues, self.sigma))), ) - def __call__(self, xvalues:np.ndarray) -> np.ndarray: + def __call__(self, xvalues: np.ndarray) -> np.ndarray: """ For certain graphical models, the `__call__` function is overwritten for producing the model-fit. We include this here so these examples work, but it should not be important for most PyAutoFit users. @@ -82,33 +83,24 @@ def dict(self) -> Dict: """ return super().dict() - def inverse( - self, - y - ): + def inverse(self, y): """ For graphical models, the inverse of the Gaussian is used to test certain aspects of the calculation. """ - a = self.normalization / ( - y * self.sigma * math.sqrt(2 * math.pi) - ) + a = self.normalization / (y * self.sigma * math.sqrt(2 * math.pi)) - b = 2 * math.log( - a - ) - - return self.centre + self.sigma * math.sqrt( - b - ) + b = 2 * math.log(a) + + return self.centre + self.sigma * math.sqrt(b) class Exponential(Dictable): def __init__( self, - centre:float=0.0, # <- PyAutoFit recognises these constructor arguments are the model - normalization:float=0.1, # <- parameters of the Gaussian. - rate:float=0.01, + centre: float = 0.0, # <- PyAutoFit recognises these constructor arguments are the model + normalization: float = 0.1, # <- parameters of the Gaussian. + rate: float = 0.01, ): """ Represents a 1D Exponential profile, which may be treated as a model-component of PyAutoFit the @@ -127,7 +119,7 @@ def __init__( self.normalization = normalization self.rate = rate - def model_data_1d_via_xvalues_from(self, xvalues:np.ndarray) -> np.ndarray: + def model_data_1d_via_xvalues_from(self, xvalues: np.ndarray) -> np.ndarray: """ Calculate the 1D Gaussian profile on a 1D grid of Cartesian x coordinates. @@ -143,7 +135,7 @@ def model_data_1d_via_xvalues_from(self, xvalues:np.ndarray) -> np.ndarray: self.rate, np.exp(-1.0 * self.rate * abs(transformed_xvalues)) ) - def __call__(self, xvalues:np.ndarray) -> np.ndarray: + def __call__(self, xvalues: np.ndarray) -> np.ndarray: """ Calculate the 1D Gaussian profile on a 1D grid of Cartesian x coordinates. @@ -169,4 +161,4 @@ def dict(self) -> Dict: The `Gaussian` type and model parameters as a dictionary. """ - return super().dict() \ No newline at end of file + return super().dict() diff --git a/autofit/graphical/laplace/newton.py b/autofit/graphical/laplace/newton.py index d667ca2eb..b65f6eea2 100644 --- a/autofit/graphical/laplace/newton.py +++ b/autofit/graphical/laplace/newton.py @@ -20,6 +20,7 @@ def gradient_ascent(state: OptimisationState, **kwargs) -> VariableData: def newton_direction(state: OptimisationState, **kwargs) -> VariableData: return state.hessian.ldiv(state.gradient) + def newton_abs_direction(state: OptimisationState, d=1e-6, **kwargs) -> VariableData: posdef = state.hessian.abs().diagonalupdate(state.parameters.full_like(d)) return posdef.ldiv(state.gradient) @@ -34,7 +35,7 @@ def newton_abs_direction(state: OptimisationState, d=1e-6, **kwargs) -> Variable def sr1_update( - state1: OptimisationState, state: OptimisationState, mintol=1e-8, **kwargs + state1: OptimisationState, state: OptimisationState, mintol=1e-8, **kwargs ) -> OptimisationState: yk = VariableData.sub(state1.gradient, state.gradient) dk = VariableData.sub(state1.parameters, state.parameters) @@ -57,7 +58,7 @@ def sr1_update( def diag_sr1_update( - state1: OptimisationState, state: OptimisationState, tol=1e-8, **kwargs + state1: OptimisationState, state: OptimisationState, tol=1e-8, **kwargs ) -> OptimisationState: yk = VariableData.sub(state1.gradient, state.gradient) dk = VariableData.sub(state1.parameters, state.parameters) @@ -76,7 +77,7 @@ def diag_sr1_update( def diag_sr1_update_( - state1: OptimisationState, state: OptimisationState, tol=1e-8, **kwargs + state1: OptimisationState, state: OptimisationState, tol=1e-8, **kwargs ) -> OptimisationState: yk = VariableData.sub(state1.gradient, state.gradient) dk = VariableData.sub(state1.parameters, state.parameters) @@ -99,7 +100,7 @@ def diag_sr1_update_( def diag_sr1_bfgs_update( - state1: OptimisationState, state: OptimisationState, **kwargs + state1: OptimisationState, state: OptimisationState, **kwargs ) -> OptimisationState: yk = VariableData.sub(state1.gradient, state.gradient) dk = VariableData.sub(state1.parameters, state.parameters) @@ -109,9 +110,7 @@ def diag_sr1_bfgs_update( def bfgs1_update( - state1: OptimisationState, - state: OptimisationState, - **kwargs, + state1: OptimisationState, state: OptimisationState, **kwargs, ) -> OptimisationState: """ y_k = g_{k+1} - g{k} @@ -139,9 +138,7 @@ def bfgs1_update( def bfgs_update( - state1: OptimisationState, - state: OptimisationState, - **kwargs, + state1: OptimisationState, state: OptimisationState, **kwargs, ) -> OptimisationState: yk = VariableData.sub(state1.gradient, state.gradient) dk = VariableData.sub(state1.parameters, state.parameters) @@ -158,9 +155,7 @@ def bfgs_update( def quasi_deterministic_update( - state1: OptimisationState, - state: OptimisationState, - **kwargs, + state1: OptimisationState, state: OptimisationState, **kwargs, ) -> OptimisationState: dk = VariableData.sub(state1.parameters, state.parameters) zk = VariableData.sub( @@ -179,9 +174,7 @@ def quasi_deterministic_update( def diag_quasi_deterministic_update( - state1: OptimisationState, - state: OptimisationState, - **kwargs, + state1: OptimisationState, state: OptimisationState, **kwargs, ) -> OptimisationState: dk = VariableData.sub(state1.parameters, state.parameters) zk = VariableData.sub( @@ -191,7 +184,7 @@ def diag_quasi_deterministic_update( zk2 = zk ** 2 zk4 = (zk2 ** 2).sum() alpha = (dk.dot(Bxk.dot(dk)) - zk.dot(Bzk.dot(zk))) / zk4 - state1.det_hessian = Bzk.diagonalupdate(alpha * zk2) + state1.det_hessian = Bzk.diagonalupdate(float(alpha) * zk2) return state1 @@ -202,10 +195,7 @@ def __init__(self, quasi_newton_update, det_quasi_newton_update): self.det_quasi_newton_update = det_quasi_newton_update def __call__( - self, - state1: OptimisationState, - state: OptimisationState, - **kwargs, + self, state1: OptimisationState, state: OptimisationState, **kwargs, ) -> OptimisationState: # Only update estimate if a step has been taken @@ -225,28 +215,28 @@ def __call__( def take_step( - state: OptimisationState, - old_state: Optional[OptimisationState] = None, - *, - search_direction=newton_abs_direction, - calc_line_search=line_search, - search_direction_kws: Optional[Dict[str, Any]] = None, - line_search_kws: Optional[Dict[str, Any]] = None, + state: OptimisationState, + old_state: Optional[OptimisationState] = None, + *, + search_direction=newton_abs_direction, + calc_line_search=line_search, + search_direction_kws: Optional[Dict[str, Any]] = None, + line_search_kws: Optional[Dict[str, Any]] = None, ) -> Tuple[Optional[float], OptimisationState]: state.search_direction = search_direction(state, **(search_direction_kws or {})) return calc_line_search(state, old_state, **(line_search_kws or {})) def take_quasi_newton_step( - state: OptimisationState, - old_state: Optional[OptimisationState] = None, - *, - search_direction=newton_abs_direction, - calc_line_search=line_search, - quasi_newton_update=full_bfgs_update, - search_direction_kws: Optional[Dict[str, Any]] = None, - line_search_kws: Optional[Dict[str, Any]] = None, - quasi_newton_kws: Optional[Dict[str, Any]] = None, + state: OptimisationState, + old_state: Optional[OptimisationState] = None, + *, + search_direction=newton_abs_direction, + calc_line_search=line_search, + quasi_newton_update=full_bfgs_update, + search_direction_kws: Optional[Dict[str, Any]] = None, + line_search_kws: Optional[Dict[str, Any]] = None, + quasi_newton_kws: Optional[Dict[str, Any]] = None, ) -> Tuple[Optional[float], OptimisationState]: """ """ state.search_direction = search_direction(state, **(search_direction_kws or {})) @@ -314,7 +304,7 @@ def ngev_condition(state, old_state, maxgev=10000, **kwargs): def check_stop_conditions( - stepsize, state, old_state, stop_conditions, **stop_kws + stepsize, state, old_state, stop_conditions, **stop_kws ) -> Optional[Tuple[bool, str]]: if stepsize is None: return False, "abnormal termination of line search" @@ -328,20 +318,20 @@ def check_stop_conditions( def optimise_quasi_newton( - state: OptimisationState, - old_state: Optional[OptimisationState] = None, - *, - max_iter=100, - search_direction=newton_abs_direction, - calc_line_search=line_search, - quasi_newton_update=bfgs_update, - stop_conditions=stop_conditions, - search_direction_kws: Optional[Dict[str, Any]] = None, - line_search_kws: Optional[Dict[str, Any]] = None, - quasi_newton_kws: Optional[Dict[str, Any]] = None, - stop_kws: Optional[Dict[str, Any]] = None, - callback: Optional[_OPT_CALLBACK] = None, - **kwargs, + state: OptimisationState, + old_state: Optional[OptimisationState] = None, + *, + max_iter=100, + search_direction=newton_abs_direction, + calc_line_search=line_search, + quasi_newton_update=bfgs_update, + stop_conditions=stop_conditions, + search_direction_kws: Optional[Dict[str, Any]] = None, + line_search_kws: Optional[Dict[str, Any]] = None, + quasi_newton_kws: Optional[Dict[str, Any]] = None, + stop_kws: Optional[Dict[str, Any]] = None, + callback: Optional[_OPT_CALLBACK] = None, + **kwargs, ) -> Tuple[OptimisationState, Status]: success = True updated = False @@ -356,7 +346,9 @@ def optimise_quasi_newton( success, message = stop break - with LogWarnings(logger=_log_projection_warnings, action='always') as caught_warnings: + with LogWarnings( + logger=_log_projection_warnings, action="always" + ) as caught_warnings: try: stepsize, state1 = take_quasi_newton_step( state, diff --git a/autofit/interpolator.py b/autofit/interpolator.py index b291243b9..a5481c60a 100644 --- a/autofit/interpolator.py +++ b/autofit/interpolator.py @@ -1,9 +1,11 @@ import copy from abc import ABC, abstractmethod +from scipy.interpolate import CubicSpline from typing import List, Dict, cast from scipy.stats import stats +from autoconf.dictable import Dictable from autofit.mapper.model import ModelInstance @@ -76,7 +78,7 @@ def __init__(self, path: InterpolatorPath, value: float): self.value = value -class AbstractInterpolator(ABC): +class AbstractInterpolator(Dictable, ABC): def __init__(self, instances: List[ModelInstance]): """ A TimeSeries allows interpolation on any variable. @@ -207,3 +209,14 @@ class LinearInterpolator(AbstractInterpolator): def _interpolate(x, y, value): slope, intercept, r, p, std_err = stats.linregress(x, y) return slope * value + intercept + + +class SplineInterpolator(AbstractInterpolator): + """ + Interpolate data with a piecewise cubic polynomial which is twice continuously differentiable + """ + + @staticmethod + def _interpolate(x, y, value): + f = CubicSpline(x, y) + return f(value) diff --git a/autofit/mapper/model.py b/autofit/mapper/model.py index 867c07664..ffa12f513 100644 --- a/autofit/mapper/model.py +++ b/autofit/mapper/model.py @@ -221,7 +221,35 @@ def direct_tuples_with_type(self, class_type): ) @frozen_cache - def model_tuples_with_type(self, cls, include_zero_dimension=False): + def models_with_type( + self, cls: Union[Type, Tuple[Type, ...]], include_zero_dimension=False, + ) -> List["AbstractModel"]: + """ + Return all models of a given type in the model tree. + + Parameters + ---------- + cls + The type to find instances of + include_zero_dimension + If true, include models with zero dimensions + + Returns + ------- + A list of models of the given type + """ + # noinspection PyTypeChecker + return [ + t[1] + for t in self.model_tuples_with_type( + cls, include_zero_dimension=include_zero_dimension + ) + ] + + @frozen_cache + def model_tuples_with_type( + self, cls: Union[Type, Tuple[Type, ...]], include_zero_dimension=False + ): """ All models of the class in this model which have at least one free parameter, recursively. @@ -241,7 +269,9 @@ def model_tuples_with_type(self, cls, include_zero_dimension=False): return [ (path, model) - for path, model in self.attribute_tuples_with_type(Model) + for path, model in self.attribute_tuples_with_type( + Model, ignore_children=False + ) if issubclass(model.cls, cls) and (include_zero_dimension or model.prior_count > 0) ] @@ -339,14 +369,11 @@ class ModelInstance(AbstractModel): @DynamicAttrs """ - def __init__(self, items=None): + __dictable_type__ = "instance" + + def __init__(self, child_items=None): super().__init__() - if isinstance(items, list): - for i, item in enumerate(items): - self[i] = item - if isinstance(items, dict): - for key, value in items.items(): - self[key] = value + self.child_items = child_items def __eq__(self, other): return self.__dict__ == other.__dict__ @@ -361,6 +388,19 @@ def __getitem__(self, item): def __setitem__(self, key, value): self.__dict__[key] = value + @property + def child_items(self): + return self.dict + + @child_items.setter + def child_items(self, child_items): + if isinstance(child_items, list): + for i, item in enumerate(child_items): + self[i] = item + if isinstance(child_items, dict): + for key, value in child_items.items(): + self[key] = value + def items(self): return self.dict.items() @@ -382,8 +422,26 @@ def values(self): def __len__(self): return len(self.values()) - def as_model(self, model_classes=tuple()): + def as_model( + self, + model_classes: Union[type, Iterable[type]] = tuple(), + excluded_classes: Union[type, Iterable[type]] = tuple(), + ): + """ + Convert this instance to a model + + Parameters + ---------- + model_classes + The classes to convert to models + excluded_classes + The classes to exclude from conversion + + Returns + ------- + A model + """ from autofit.mapper.prior_model.abstract import AbstractPriorModel - return AbstractPriorModel.from_instance(self, model_classes) + return AbstractPriorModel.from_instance(self, model_classes, excluded_classes,) diff --git a/autofit/mapper/model_object.py b/autofit/mapper/model_object.py index 19b01e116..25557fec6 100644 --- a/autofit/mapper/model_object.py +++ b/autofit/mapper/model_object.py @@ -48,7 +48,7 @@ def replacing_for_path(self, path: Tuple[str, ...], value) -> "ModelObject": new = copy.deepcopy(self) obj = new for key in path[:-1]: - obj = getattr(new, key) + obj = getattr(obj, key) setattr(obj, path[-1], value) return new @@ -117,13 +117,22 @@ def from_dict(d): instance = Model(get_class(d.pop("class_path"))) elif type_ == "collection": instance = Collection() - elif type_ == "instance": - cls = get_class(d.pop("class_path")) - instance = object.__new__(cls) elif type_ == "tuple_prior": instance = TuplePrior() + elif type_ == "dict": + return {key: ModelObject.from_dict(value) for key, value in d.items()} + elif type_ == "instance": + d.pop("type") + cls = get_class(d.pop("class_path")) + return cls( + **{key: ModelObject.from_dict(value) for key, value in d.items()} + ) else: - return Prior.from_dict(d) + try: + return Prior.from_dict(d) + except KeyError: + cls = get_class(type_) + instance = object.__new__(cls) d.pop("type") diff --git a/autofit/mapper/prior_model/abstract.py b/autofit/mapper/prior_model/abstract.py index 14ea28aec..9f337972b 100644 --- a/autofit/mapper/prior_model/abstract.py +++ b/autofit/mapper/prior_model/abstract.py @@ -444,9 +444,7 @@ def assert_no_assertions(obj): try: item = copy.copy(source) if isinstance(item, dict): - from autofit.mapper.prior_model.collection import ( - Collection, - ) + from autofit.mapper.prior_model.collection import Collection item = Collection(item) for attribute in path: @@ -1008,13 +1006,20 @@ def random_instance(self, ignore_prior_limits=False): @staticmethod @DynamicRecursionCache() - def from_instance(instance, model_classes=tuple()): + def from_instance( + instance, + model_classes: Union[type, Iterable[type]] = tuple(), + exclude_classes: Union[type, Iterable[type]] = tuple(), + ): """ - Recursively create an prior object model from an object model. + Recursively create a prior object model from an object model. Parameters ---------- model_classes + A tuple of classes that should be converted to a prior model + exclude_classes + A tuple of classes that should not be converted to a prior model instance A dictionary, list, class instance or model instance Returns @@ -1024,12 +1029,18 @@ def from_instance(instance, model_classes=tuple()): """ from autofit.mapper.prior_model import collection + if isinstance(instance, exclude_classes): + return instance if isinstance(instance, (Prior, AbstractPriorModel)): return instance elif isinstance(instance, list): result = collection.Collection( [ - AbstractPriorModel.from_instance(item, model_classes=model_classes) + AbstractPriorModel.from_instance( + item, + model_classes=model_classes, + exclude_classes=exclude_classes, + ) for item in instance ] ) @@ -1042,14 +1053,18 @@ def from_instance(instance, model_classes=tuple()): result, key, AbstractPriorModel.from_instance( - value, model_classes=model_classes + value, + model_classes=model_classes, + exclude_classes=exclude_classes, ), ) elif isinstance(instance, dict): result = collection.Collection( { key: AbstractPriorModel.from_instance( - value, model_classes=model_classes + value, + model_classes=model_classes, + exclude_classes=exclude_classes, ) for key, value in instance.items() } @@ -1064,7 +1079,9 @@ def from_instance(instance, model_classes=tuple()): instance.__class__, **{ key: AbstractPriorModel.from_instance( - value, model_classes=model_classes + value, + model_classes=model_classes, + exclude_classes=exclude_classes, ) for key, value in instance.__dict__.items() if key != "cls" @@ -1072,7 +1089,7 @@ def from_instance(instance, model_classes=tuple()): ) except AttributeError: return instance - if any([isinstance(instance, cls) for cls in model_classes]): + if isinstance(instance, model_classes): return result.as_model() return result diff --git a/autofit/non_linear/abstract_search.py b/autofit/non_linear/abstract_search.py index 1411e67dc..37d319311 100644 --- a/autofit/non_linear/abstract_search.py +++ b/autofit/non_linear/abstract_search.py @@ -22,6 +22,7 @@ FactorApproximation, ) from autofit.graphical.utils import Status +from autofit.mapper.prior_model.collection import Collection from autofit.non_linear.initializer import Initializer from autofit.non_linear.parallel import SneakyPool from autofit.non_linear.paths.abstract import AbstractPaths @@ -30,6 +31,8 @@ from autofit.non_linear.result import Result from autofit.non_linear.timer import Timer from .analysis import Analysis +from .analysis.combined import CombinedResult +from .analysis.indexed import IndexCollectionAnalysis from .paths.null import NullPaths from ..graphical.declarative.abstract import PriorFactor from ..graphical.expectation_propagation import AbstractFactorOptimiser @@ -445,6 +448,75 @@ def resample_figure_of_merit(self): """ return -np.inf + def fit_sequential( + self, + model, + analysis: IndexCollectionAnalysis, + info=None, + pickle_files=None, + log_likelihood_cap=None, + ) -> CombinedResult: + """ + Fit multiple analyses contained within the analysis sequentially. + + This can be useful for avoiding very high dimensional parameter spaces. + + Parameters + ---------- + log_likelihood_cap + analysis + Multiple analyses that are fit sequentially + model + An object that represents possible instances of some model with a + given dimensionality which is the number of free dimensions of the + model. + info + Optional dictionary containing information about the fit that can be loaded by the aggregator. + pickle_files : [str] + Optional list of strings specifying the path and filename of .pickle files, that are copied to each + model-fits pickles folder so they are accessible via the Aggregator. + + Returns + ------- + An object combining the results of each individual optimisation. + + Raises + ------ + AssertionError + If the model has 0 dimensions. + ValueError + If the analysis is not a combined analysis + """ + results = [] + + _paths = self.paths + original_name = self.paths.name or "analysis" + + model = analysis.modify_model(model=model) + + try: + if not isinstance(model, Collection): + model = [model for _ in range(len(analysis.analyses))] + except AttributeError: + raise ValueError( + f"Analysis with type {type(analysis)} is not supported by fit_sequential" + ) + + for i, (model, analysis) in enumerate(zip(model, analysis.analyses)): + self.paths = copy.copy(_paths) + self.paths.name = f"{original_name}/{i}" + results.append( + self.fit( + model=model, + analysis=analysis, + info=info, + pickle_files=pickle_files, + log_likelihood_cap=log_likelihood_cap, + ) + ) + self.paths = _paths + return CombinedResult(results) + def fit( self, model, diff --git a/autofit/non_linear/analysis/combined.py b/autofit/non_linear/analysis/combined.py index dcdff72d9..4bb792222 100644 --- a/autofit/non_linear/analysis/combined.py +++ b/autofit/non_linear/analysis/combined.py @@ -1,5 +1,5 @@ import logging -from typing import Union +from typing import Union, List from autoconf import conf from autofit.mapper.prior.abstract import Prior @@ -13,6 +13,39 @@ logger = logging.getLogger(__name__) +class CombinedResult: + def __init__(self, results: List[Result]): + """ + A `Result` object that is composed of multiple `Result` objects. This is used to combine the results of + multiple `Analysis` objects into a single `Result` object, for example when performing a model-fitting + analysis where there are multiple datasets. + + Parameters + ---------- + results + The list of `Result` objects that are combined into this `CombinedResult` object. + """ + self.child_results = results + + def __getattr__(self, item: str): + """ + Get an attribute of the first `Result` object in the list of `Result` objects. + """ + return getattr(self.child_results[0], item) + + def __iter__(self): + return iter(self.child_results) + + def __len__(self): + return len(self.child_results) + + def __getitem__(self, item: int) -> Result: + """ + Get a `Result` object from the list of `Result` objects. + """ + return self.child_results[item] + + class CombinedAnalysis(Analysis): def __new__(cls, *analyses, **kwargs): from .model_analysis import ModelAnalysis, CombinedModelAnalysis @@ -199,15 +232,15 @@ def func(child_paths, analysis): def make_result(self, samples, model, sigma=1.0, use_errors=True, use_widths=False): child_results = [ analysis.make_result( - samples, model, sigma=1.0, use_errors=True, use_widths=False + samples, + model, + sigma=sigma, + use_errors=use_errors, + use_widths=use_widths, ) for analysis in self.analyses ] - result = self.analyses[0].make_result( - samples=samples, model=model, sigma=1.0, use_errors=True, use_widths=False - ) - result.child_results = child_results - return result + return CombinedResult(child_results) def __len__(self): return len(self.analyses) diff --git a/autofit/non_linear/analysis/indexed.py b/autofit/non_linear/analysis/indexed.py index fd56a148f..da7f9200d 100644 --- a/autofit/non_linear/analysis/indexed.py +++ b/autofit/non_linear/analysis/indexed.py @@ -1,7 +1,7 @@ import logging from .analysis import Analysis -from .combined import CombinedAnalysis +from .combined import CombinedAnalysis, CombinedResult from ..paths.abstract import AbstractPaths from autofit.mapper.prior_model.collection import Collection @@ -80,15 +80,7 @@ def make_result(self, samples, model, sigma=1.0, use_errors=True, use_widths=Fal ) for model, analysis in zip(model, self.analyses) ] - result = self.analyses[0].make_result( - samples=samples, - model=model, - sigma=sigma, - use_errors=use_errors, - use_widths=use_widths, - ) - result.child_results = child_results - return result + return CombinedResult(child_results) def modify_before_fit(self, paths: AbstractPaths, model: Collection): """ diff --git a/autofit/non_linear/grid/sensitivity.py b/autofit/non_linear/grid/sensitivity.py index 99a86d6eb..eaefc20dc 100644 --- a/autofit/non_linear/grid/sensitivity.py +++ b/autofit/non_linear/grid/sensitivity.py @@ -18,12 +18,7 @@ class JobResult(AbstractJobResult): - def __init__( - self, - number: int, - result: Result, - perturbed_result: Result - ): + def __init__(self, number: int, result: Result, perturbed_result: Result): """ The result of a single sensitivity comparison @@ -55,14 +50,14 @@ class Job(AbstractJob): use_instance = False def __init__( - self, - analysis_factory: "AnalysisFactory", - model: AbstractPriorModel, - perturbation_model: AbstractPriorModel, - base_instance: ModelInstance, - perturbation_instance: ModelInstance, - search: NonLinearSearch, - number: int, + self, + analysis_factory: "AnalysisFactory", + model: AbstractPriorModel, + perturbation_model: AbstractPriorModel, + base_instance: ModelInstance, + perturbation_instance: ModelInstance, + search: NonLinearSearch, + number: int, ): """ Job to run non-linear searches comparing how well a model and a model with a perturbation @@ -79,9 +74,7 @@ def __init__( search A non-linear search """ - super().__init__( - number=number - ) + super().__init__(number=number) self.analysis_factory = analysis_factory self.model = model @@ -90,15 +83,9 @@ def __init__( self.base_instance = base_instance self.perturbation_instance = perturbation_instance - self.search = search.copy_with_paths( - search.paths.for_sub_analysis( - "[base]", - ) - ) + self.search = search.copy_with_paths(search.paths.for_sub_analysis("[base]",)) self.perturbed_search = search.copy_with_paths( - search.paths.for_sub_analysis( - "[perturbed]", - ) + search.paths.for_sub_analysis("[perturbed]",) ) @cached_property @@ -126,27 +113,26 @@ def perform(self) -> JobResult: perturbed_result = self.perturbation_model_func(perturbed_model=perturbed_model) return JobResult( - number=self.number, - result=result, - perturbed_result=perturbed_result + number=self.number, result=result, perturbed_result=perturbed_result ) def base_model_func(self): - return self.search.fit( - model=self.model, - analysis=self.analysis - ) + return self.search.fit(model=self.model, analysis=self.analysis) def perturbation_model_func(self, perturbed_model): - return self.perturbed_search.fit( - model=perturbed_model, - analysis=self.analysis - ) + return self.perturbed_search.fit(model=perturbed_model, analysis=self.analysis) class SensitivityResult: - def __init__(self, results: List[JobResult]): + """ + The result of a sensitivity mapping + + Parameters + ---------- + results + The results of each sensitivity job + """ self.results = sorted(results) def __getitem__(self, item): @@ -158,21 +144,41 @@ def __iter__(self): def __len__(self): return len(self.results) + @property + def log_likelihoods_base(self) -> List[float]: + """ + The log likelihoods of the base model for each sensitivity fit + """ + return [result.log_likelihood_base for result in self.results] -class Sensitivity: + @property + def log_likelihoods_perturbed(self) -> List[float]: + """ + The log likelihoods of the perturbed model for each sensitivity fit + """ + return [result.log_likelihood_perturbed for result in self.results] + @property + def log_likelihood_differences(self) -> List[float]: + """ + The log likelihood differences between the base and perturbed models + """ + return [result.log_likelihood_difference for result in self.results] + + +class Sensitivity: def __init__( - self, - base_model: AbstractPriorModel, - perturbation_model: AbstractPriorModel, - simulation_instance, - simulate_function: Callable, - analysis_class: Type[Analysis], - search: NonLinearSearch, - job_cls: ClassVar = Job, - number_of_steps: Union[Tuple[int], int] = 4, - number_of_cores: int = 2, - limit_scale: int = 1, + self, + base_model: AbstractPriorModel, + perturbation_model: AbstractPriorModel, + simulation_instance, + simulate_function: Callable, + analysis_class: Type[Analysis], + search: NonLinearSearch, + job_cls: ClassVar = Job, + number_of_steps: Union[Tuple[int], int] = 4, + number_of_cores: int = 2, + limit_scale: int = 1, ): """ Perform sensitivity mapping to evaluate whether a perturbation @@ -212,9 +218,7 @@ def __init__( A scale of 0.5 means priors have limits smaller than the grid square with width half a grid square. """ - self.logger = logging.getLogger( - f"Sensitivity ({search.name})" - ) + self.logger = logging.getLogger(f"Sensitivity ({search.name})") self.logger.info("Creating") @@ -243,7 +247,9 @@ def step_size(self): The size of a step in any given dimension in hyper space. """ if isinstance(self.number_of_steps, tuple): - return tuple([1 / number_of_steps for number_of_steps in self.number_of_steps]) + return tuple( + [1 / number_of_steps for number_of_steps in self.number_of_steps] + ) return 1 / self.number_of_steps def run(self) -> SensitivityResult: @@ -253,19 +259,20 @@ def run(self) -> SensitivityResult: """ self.logger.info("Running") + self.search.paths.save_unique_tag(is_grid_search=True) + headers = [ "index", *self._headers, "log_likelihood_base", "log_likelihood_perturbed", - "log_likelihood_difference" + "log_likelihood_difference", ] physical_values = list(self._physical_values) results = list() for result in Process.run_jobs( - self._make_jobs(), - number_of_cores=self.number_of_cores + self._make_jobs(), number_of_cores=self.number_of_cores ): if isinstance(result, Exception): raise result @@ -273,17 +280,12 @@ def run(self) -> SensitivityResult: results.append(result) results = sorted(results) - os.makedirs( - self.search.paths.output_path, - exist_ok=True - ) + os.makedirs(self.search.paths.output_path, exist_ok=True) with open(self.results_path, "w+") as f: writer = csv.writer(f) writer.writerow(headers) for result_ in results: - values = physical_values[ - result_.number - ] + values = physical_values[result_.number] writer.writerow( padding(item) for item in [ @@ -292,15 +294,18 @@ def run(self) -> SensitivityResult: result_.log_likelihood_base, result_.log_likelihood_perturbed, result_.log_likelihood_difference, - ]) + ] + ) + + result = SensitivityResult(results) + + self.search.paths.save_object("result", result) return SensitivityResult(results) @property def results_path(self): - return Path( - self.search.paths.output_path - ) / "results.csv" + return Path(self.search.paths.output_path) / "results.csv" @property def _lists(self) -> List[List[float]]: @@ -309,10 +314,7 @@ def _lists(self) -> List[List[float]]: the perturbation_model and create the individual perturbations. """ - return make_lists( - self.perturbation_model.prior_count, - step_size=self.step_size - ) + return make_lists(self.perturbation_model.prior_count, step_size=self.step_size) @property def _physical_values(self) -> List[List[float]]: @@ -321,14 +323,10 @@ def _physical_values(self) -> List[List[float]]: """ return [ [ - prior.value_for( - unit_value + prior.value_for(unit_value) + for prior, unit_value in zip( + self.perturbation_model.priors_ordered_by_id, unit_values ) - for prior, unit_value - in zip( - self.perturbation_model.priors_ordered_by_id, - unit_values - ) ] for unit_values in self._lists ] @@ -350,36 +348,23 @@ def _labels(self) -> Generator[str, None, None]: """ for list_ in self._lists: strings = list() - for value, prior_tuple in zip( - list_, - self.perturbation_model.prior_tuples - ): + for value, prior_tuple in zip(list_, self.perturbation_model.prior_tuples): path, prior = prior_tuple - value = prior.value_for( - value - ) - strings.append( - f"{path}_{value}" - ) + value = prior.value_for(value) + strings.append(f"{path}_{value}") yield "_".join(strings) @property - def _perturbation_instances(self) -> Generator[ - ModelInstance, None, None - ]: + def _perturbation_instances(self) -> Generator[ModelInstance, None, None]: """ A list of instances each of which defines a perturbation to be applied to the image. """ for list_ in self._lists: - yield self.perturbation_model.instance_from_unit_vector( - list_ - ) + yield self.perturbation_model.instance_from_unit_vector(list_) @property - def _perturbation_models(self) -> Generator[ - AbstractPriorModel, None, None - ]: + def _perturbation_models(self) -> Generator[AbstractPriorModel, None, None]: """ A list of models representing a perturbation at each grid square. @@ -395,29 +380,21 @@ def _perturbation_models(self) -> Generator[ prior.value_for(min(1.0, centre + half_step)), ) for centre, prior in zip( - list_, - self.perturbation_model.priors_ordered_by_id + list_, self.perturbation_model.priors_ordered_by_id ) ] yield self.perturbation_model.with_limits(limits) @property - def _searches(self) -> Generator[ - NonLinearSearch, None, None - ]: + def _searches(self) -> Generator[NonLinearSearch, None, None]: """ A list of non-linear searches, each of which is applied to one perturbation. """ for label in self._labels: - yield self._search_instance( - label - ) + yield self._search_instance(label) - def _search_instance( - self, - name_path: str - ) -> NonLinearSearch: + def _search_instance(self, name_path: str) -> NonLinearSearch: """ Create a search instance, distinguished by its name @@ -432,9 +409,7 @@ def _search_instance( """ paths = self.search.paths search_instance = self.search.copy_with_paths( - paths.for_sub_analysis( - name_path, - ) + paths.for_sub_analysis(name_path,) ) return search_instance @@ -446,15 +421,9 @@ def _make_jobs(self) -> Generator[Job, None, None]: Each job fits a perturbed image with the original model and a model which includes a perturbation. """ - for number, ( - perturbation_instance, - perturbation_model, - search - ) in enumerate(zip( - self._perturbation_instances, - self._perturbation_models, - self._searches - )): + for number, (perturbation_instance, perturbation_model, search) in enumerate( + zip(self._perturbation_instances, self._perturbation_models, self._searches) + ): instance = copy(self.instance) instance.perturbation = perturbation_instance @@ -469,16 +438,13 @@ def _make_jobs(self) -> Generator[Job, None, None]: base_instance=self.instance, perturbation_instance=perturbation_instance, search=search, - number=number + number=number, ) class AnalysisFactory: def __init__( - self, - instance, - simulate_function, - analysis_class, + self, instance, simulate_function, analysis_class, ): """ Callable to delay simulation such that it is performed @@ -489,9 +455,5 @@ def __init__( self.analysis_class = analysis_class def __call__(self): - dataset = self.simulate_function( - self.instance - ) - return self.analysis_class( - dataset - ) + dataset = self.simulate_function(self.instance) + return self.analysis_class(dataset) diff --git a/autofit/non_linear/initializer.py b/autofit/non_linear/initializer.py index 3b118b0d0..6c15840e4 100644 --- a/autofit/non_linear/initializer.py +++ b/autofit/non_linear/initializer.py @@ -1,9 +1,10 @@ import configparser import logging +import os import random from abc import ABC, abstractmethod from typing import Dict, Tuple, List -import os + import numpy as np from autofit import exc @@ -81,7 +82,9 @@ def samples_from_model( except exc.FitException: pass - if np.allclose(a=figures_of_merit_list[0], b=figures_of_merit_list[1:]): + if total_points > 1 and np.allclose( + a=figures_of_merit_list[0], b=figures_of_merit_list[1:] + ): raise exc.InitializerException( """ The initial samples all have the same figure of merit (e.g. log likelihood values). diff --git a/autofit/non_linear/nest/dynesty/abstract.py b/autofit/non_linear/nest/dynesty/abstract.py index cf86716f2..9fd639c22 100644 --- a/autofit/non_linear/nest/dynesty/abstract.py +++ b/autofit/non_linear/nest/dynesty/abstract.py @@ -5,7 +5,6 @@ import numpy as np from dynesty import NestedSampler, DynamicNestedSampler -from dynesty.pool import Pool from autoconf import conf from autofit import exc @@ -137,6 +136,8 @@ def _fit( set of accepted samples of the fit. """ + from dynesty.pool import Pool + fitness_function = self.fitness_function_from_model_and_analysis( model=model, analysis=analysis, log_likelihood_cap=log_likelihood_cap, ) @@ -160,7 +161,7 @@ def _fit( if conf.instance["non_linear"]["nest"][self.__class__.__name__][ "parallel" - ]["force_x1_cpu"] or self.kwargs.get("force_x1_cpu"): + ].get("force_x1_cpu") or self.kwargs.get("force_x1_cpu"): raise RuntimeError @@ -375,12 +376,12 @@ def sampler_from( model: AbstractPriorModel, fitness_function, checkpoint_exists: bool, - pool: Optional["Pool"], + pool: Optional, queue_size: Optional[int], ): raise NotImplementedError() - def check_pool(self, uses_pool: bool, pool: Pool): + def check_pool(self, uses_pool: bool, pool): if (uses_pool and pool is None) or (not uses_pool and pool is not None): raise exc.SearchException( diff --git a/autofit/non_linear/nest/dynesty/dynamic.py b/autofit/non_linear/nest/dynesty/dynamic.py index 8bb67feb7..9afa7acbb 100644 --- a/autofit/non_linear/nest/dynesty/dynamic.py +++ b/autofit/non_linear/nest/dynesty/dynamic.py @@ -1,9 +1,6 @@ from __future__ import annotations from typing import TYPE_CHECKING, Optional -if TYPE_CHECKING: - from dynesty.pool import Pool - from dynesty.dynesty import DynamicNestedSampler from autofit.non_linear.nest.dynesty.samples import SamplesDynesty from autofit.mapper.prior_model.abstract import AbstractPriorModel @@ -102,7 +99,7 @@ def sampler_from( model: AbstractPriorModel, fitness_function, checkpoint_exists : bool, - pool: Optional[Pool], + pool: Optional, queue_size: Optional[int] ): """ diff --git a/autofit/non_linear/nest/dynesty/plotter.py b/autofit/non_linear/nest/dynesty/plotter.py index 0bb96294f..d14e884ef 100644 --- a/autofit/non_linear/nest/dynesty/plotter.py +++ b/autofit/non_linear/nest/dynesty/plotter.py @@ -1,47 +1,11 @@ from dynesty import plotting as dyplot -from functools import wraps -import logging from autofit.plot import SamplesPlotter -from autofit.plot.samples_plotters import skip_plot_in_test_mode - -logger = logging.getLogger(__name__) - -def log_value_error(func): - @wraps(func) - def wrapper(self, *args, **kwargs): - - try: - return func(self, *args, **kwargs) - except ValueError: - self.log_plot_exception(func.__name__) - - return wrapper +from autofit.plot.samples_plotters import skip_plot_in_test_mode +from autofit.plot.samples_plotters import log_value_error class DynestyPlotter(SamplesPlotter): - - @staticmethod - def log_plot_exception(plot_name : str): - """ - Plotting the results of a ``dynesty`` model-fit before they have converged on an - accurate estimate of the posterior can lead the ``dynesty`` plotting routines - to raise a ``ValueError``. - - This exception is caught in each of the plotting methods below, and this - function is used to log the behaviour. - - Parameters - ---------- - plot_name - The name of the ``dynesty`` plot which raised a ``ValueError`` - """ - - logger.info( - f"Dynesty unable to produce {plot_name} visual: posterior estimate therefore" - "not yet sufficient for this model-fit is not yet robust enough to do this. Visual" - "should be produced in later update, once posterior estimate is updated." - ) @skip_plot_in_test_mode def boundplot(self, **kwargs): diff --git a/autofit/non_linear/nest/dynesty/static.py b/autofit/non_linear/nest/dynesty/static.py index 8ec31b3ce..c9955c184 100644 --- a/autofit/non_linear/nest/dynesty/static.py +++ b/autofit/non_linear/nest/dynesty/static.py @@ -1,9 +1,6 @@ from __future__ import annotations from typing import TYPE_CHECKING, Optional -if TYPE_CHECKING: - from dynesty.pool import Pool - from dynesty import NestedSampler as StaticSampler from autofit.database.sqlalchemy_ import sa from autofit.mapper.prior_model.abstract import AbstractPriorModel @@ -106,7 +103,7 @@ def sampler_from( model: AbstractPriorModel, fitness_function, checkpoint_exists : bool, - pool: Optional[Pool], + pool: Optional, queue_size: Optional[int] ): """ diff --git a/autofit/non_linear/nest/ultranest/plotter.py b/autofit/non_linear/nest/ultranest/plotter.py index c6d83d431..6adeac060 100644 --- a/autofit/non_linear/nest/ultranest/plotter.py +++ b/autofit/non_linear/nest/ultranest/plotter.py @@ -1,9 +1,18 @@ from autofit.plot import SamplesPlotter +from autofit.plot.samples_plotters import skip_plot_in_test_mode +from autofit.plot.samples_plotters import log_value_error class UltraNestPlotter(SamplesPlotter): + @skip_plot_in_test_mode + @log_value_error def cornerplot(self, **kwargs): + """ + Plots the in-built ``ultranest`` plot ``cornerplot``. + + This figure plots a corner plot of the 1-D and 2-D marginalized posteriors. + """ from ultranest import plot @@ -15,32 +24,38 @@ def cornerplot(self, **kwargs): self.output.to_figure(structure=None, auto_filename="cornerplot") self.close() + @skip_plot_in_test_mode + @log_value_error def runplot(self, **kwargs): + """ + Plots the in-built ``ultranest`` plot ``runplot``. + This figure plots live points, ln(likelihood), ln(weight), and ln(evidence) vs. ln(prior volume). + """ from ultranest import plot - try: - plot.runplot( - results=self.samples.results_internal, - **kwargs - ) - except KeyError: - pass + plot.runplot( + results=self.samples.results_internal, + **kwargs + ) self.output.to_figure(structure=None, auto_filename="runplot") self.close() + @skip_plot_in_test_mode + @log_value_error def traceplot(self, **kwargs): + """ + Plots the in-built ``ultranest`` plot ``traceplot``. + This figure plots traces and marginalized posteriors for each parameter. + """ from ultranest import plot - try: - plot.traceplot( - results=self.samples.results_internal, - **kwargs - ) - except KeyError: - pass + plot.traceplot( + results=self.samples.results_internal, + **kwargs + ) self.output.to_figure(structure=None, auto_filename="traceplot") self.close() \ No newline at end of file diff --git a/autofit/plot/samples_plotters.py b/autofit/plot/samples_plotters.py index f4d1a0d09..93f127cec 100644 --- a/autofit/plot/samples_plotters.py +++ b/autofit/plot/samples_plotters.py @@ -1,9 +1,23 @@ import matplotlib.pyplot as plt from functools import wraps +import logging import os from autofit.plot.output import Output +logger = logging.getLogger(__name__) + +def log_value_error(func): + + @wraps(func) + def wrapper(self, *args, **kwargs): + + try: + return func(self, *args, **kwargs) + except (ValueError, KeyError, AttributeError, AssertionError): + self.log_plot_exception(func.__name__) + + return wrapper def skip_plot_in_test_mode(func): """ @@ -65,6 +79,26 @@ def close(self): if plt.fignum_exists(num=1): plt.close() + def log_plot_exception(self, plot_name : str): + """ + Plotting the results of a ``dynesty`` model-fit before they have converged on an + accurate estimate of the posterior can lead the ``dynesty`` plotting routines + to raise a ``ValueError``. + + This exception is caught in each of the plotting methods below, and this + function is used to log the behaviour. + + Parameters + ---------- + plot_name + The name of the ``dynesty`` plot which raised a ``ValueError`` + """ + + logger.info( + f"{self.__class__.__name__} unable to produce {plot_name} visual: posterior estimate therefore" + "not yet sufficient for this model-fit is not yet robust enough to do this. Visual" + "should be produced in later update, once posterior estimate is updated." + ) class MCMCPlotter(SamplesPlotter): diff --git a/docs/overview/multi_datasets.rst b/docs/overview/multi_datasets.rst index 441664585..4bc2d3e60 100644 --- a/docs/overview/multi_datasets.rst +++ b/docs/overview/multi_datasets.rst @@ -189,6 +189,23 @@ We can again fit this model as per usual: result_list = search.fit(model=model, analysis=analysis) +Individual Sequential Searches +------------------------------ + +The API above is used to create a model with free parameters across ``Analysis`` objects, which are all fit +simultaneously using a summed ``log_likelihood_function`` and single non-linear search. + +Each ``Analysis`` can be fitted one-by-one, using a series of multiple non-linear searches, using +the ``fit_sequential`` method: + +.. code-block:: python + + result_list = search.fit_sequential(model=model, analysis=analysis) + +The benefit of this method is for complex high dimensionality models (e.g. when many parameters are passed +to `` analysis.with_free_parameters``, it breaks the fit down into a series of lower dimensionality non-linear +searches that may convergence on a solution more reliably. + Variable Model With Relationships --------------------------------- diff --git a/docs/requirements.txt b/docs/requirements.txt index 93c7fbbd9..d6e56a19f 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -1,7 +1,7 @@ corner==2.2.1 decorator>=4.2.1 dill>=0.3.1.1 -dynesty==2.0.2 +dynesty==2.1.0 typing-inspect>=0.4.0 emcee>=3.0.2 gprof2dot==2021.2.21 diff --git a/optional_requirements.txt b/optional_requirements.txt index 529a59a5f..a3ccbe750 100644 --- a/optional_requirements.txt +++ b/optional_requirements.txt @@ -2,4 +2,4 @@ getdist==1.4 jax==0.3.1 jaxlib==0.3.0 ultranest==3.5.5 -zeus-mcmc==2.4.1 +zeus-mcmc==2.5.4 diff --git a/requirements.txt b/requirements.txt index e12c665b6..f8bca9fa7 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,7 +1,7 @@ corner==2.2.1 decorator>=4.2.1 dill>=0.3.1.1 -dynesty==2.0.2 +dynesty==2.1.0 typing-inspect>=0.4.0 emcee>=3.1.3 matplotlib diff --git a/test_autofit/analysis/test_free_parameter.py b/test_autofit/analysis/test_free_parameter.py index 3dc430541..80cc6c1f4 100644 --- a/test_autofit/analysis/test_free_parameter.py +++ b/test_autofit/analysis/test_free_parameter.py @@ -14,13 +14,13 @@ def test_copy(): assert collection.prior_count == model.prior_count -def test_log_likelihood( - modified, - combined_analysis -): - assert combined_analysis.log_likelihood_function( - modified.instance_from_prior_medians() - ) == 2 +def test_log_likelihood(modified, combined_analysis): + assert ( + combined_analysis.log_likelihood_function( + modified.instance_from_prior_medians() + ) + == 2 + ) def test_analyses_example(Analysis): @@ -33,98 +33,59 @@ def test_analyses_example(Analysis): ]: copy = model.copy() copy.centre = prior - analyses.append( - Analysis() - ) + analyses.append(Analysis()) -@pytest.fixture( - name="combined_analysis" -) +@pytest.fixture(name="combined_analysis") def make_combined_analysis(model, Analysis): - return (Analysis() + Analysis()).with_free_parameters( - model.centre - ) + return (Analysis() + Analysis()).with_free_parameters(model.centre) def test_multiple_free_parameters(model, Analysis): combined_analysis = (Analysis() + Analysis()).with_free_parameters( - model.centre, - model.sigma + model.centre, model.sigma ) first, second = combined_analysis.modify_model(model) assert first.centre is not second.centre assert first.sigma is not second.sigma -def test_add_free_parameter( - combined_analysis -): - assert isinstance( - combined_analysis, - FreeParameterAnalysis - ) +def test_add_free_parameter(combined_analysis): + assert isinstance(combined_analysis, FreeParameterAnalysis) -@pytest.fixture( - name="modified" -) -def make_modified( - model, - combined_analysis -): +@pytest.fixture(name="modified") +def make_modified(model, combined_analysis): return combined_analysis.modify_model(model) -def test_modify_model( - modified -): +def test_modify_model(modified): assert isinstance(modified, af.Collection) assert len(modified) == 2 -def test_modified_models( - modified -): +def test_modified_models(modified): first, second = modified - assert isinstance( - first.sigma, - af.Prior - ) + assert isinstance(first.sigma, af.Prior) assert first.sigma == second.sigma assert first.centre != second.centre -@pytest.fixture( - name="result" -) +@pytest.fixture(name="result") def make_result( - combined_analysis, - model, + combined_analysis, model, ): optimizer = MockOptimizer() - return optimizer.fit( - model, - combined_analysis - ) + return optimizer.fit(model, combined_analysis) -@pytest.fixture( - autouse=True -) +@pytest.fixture(autouse=True) def do_remove_output(remove_output): yield remove_output() -def test_result_type(result, Result): - assert isinstance(result, Result) - - for result_ in result: - assert isinstance(result_, Result) - - def test_integration(result): result_1, result_2 = result @@ -133,24 +94,16 @@ def test_integration(result): def test_tuple_prior(model, Analysis): - model.centre = af.TuplePrior( - centre_0=af.UniformPrior() - ) - combined = (Analysis() + Analysis()).with_free_parameters( - model.centre - ) + model.centre = af.TuplePrior(centre_0=af.UniformPrior()) + combined = (Analysis() + Analysis()).with_free_parameters(model.centre) first, second = combined.modify_model(model) assert first.centre.centre_0 != second.centre.centre_0 def test_prior_model(model, Analysis): - model = af.Collection( - model=model - ) - combined = (Analysis() + Analysis()).with_free_parameters( - model.model - ) + model = af.Collection(model=model) + combined = (Analysis() + Analysis()).with_free_parameters(model.model) modified = combined.modify_model(model) first = modified[0].model second = modified[1].model @@ -162,12 +115,7 @@ def test_prior_model(model, Analysis): def test_split_samples(modified): samples = af.Samples( - modified, - af.Sample.from_lists( - modified, - [[1, 2, 3, 4]], - [1], [1], [1] - ), + modified, af.Sample.from_lists(modified, [[1, 2, 3, 4]], [1], [1], [1]), ) combined = samples.max_log_likelihood() diff --git a/test_autofit/config/general.yaml b/test_autofit/config/general.yaml index b547327a4..90c934f6e 100644 --- a/test_autofit/config/general.yaml +++ b/test_autofit/config/general.yaml @@ -3,11 +3,13 @@ analysis: hpc: hpc_mode: false # If True, use HPC mode, which disables GUI visualization, logging to screen and other settings which are not suited to running on a super computer. iterations_per_update: 5000 # The number of iterations between every update (visualization, results output, etc) in HPC mode. +inversion: + check_reconstruction: true # If True, the inversion's reconstruction is checked to ensure the solution of a meshs's mapper is not an invalid solution where the values are all the same. + reconstruction_vmax_factor: 0.5 # Plots of an Inversion's reconstruction use the reconstructed data's bright value multiplied by this factor. model: ignore_prior_limits: false # If ``True`` the limits applied to priors will be ignored, where limits set upper / lower limits. This stops PriorLimitException's from being raised. output: force_pickle_overwrite: false # If True pickle files output by a search (e.g. samples.pickle) are recreated when a new model-fit is performed. - identifier_version: 4 info_whitespace_length: 80 # Length of whitespace between the parameter names and values in the model.info / result.info log_level: INFO # The level of information output by logging. log_to_file: false # If True, outputs the non-linear search log to a file (and not printed to screen). diff --git a/test_autofit/conftest.py b/test_autofit/conftest.py index 4cb8a84ca..650dc80a4 100644 --- a/test_autofit/conftest.py +++ b/test_autofit/conftest.py @@ -13,37 +13,24 @@ from autofit import fixtures from autofit.database.model import sa -if sys.platform == 'darwin': - multiprocessing.set_start_method('forkserver') +if sys.platform == "darwin": + multiprocessing.set_start_method("forkserver") directory = Path(__file__).parent -@pytest.fixture( - name="test_directory", - scope="session" -) +@pytest.fixture(name="test_directory", scope="session") def make_test_directory(): return directory -@pytest.fixture( - name="output_directory", - scope="session" -) -def make_output_directory( - test_directory -): +@pytest.fixture(name="output_directory", scope="session") +def make_output_directory(test_directory): return test_directory / "output" -@pytest.fixture( - name="remove_output", - scope="session" -) -def make_remove_output( - output_directory -): +@pytest.fixture(name="remove_output", scope="session") +def make_remove_output(output_directory): def remove_output(): try: for item in os.listdir(output_directory): @@ -51,27 +38,18 @@ def remove_output(): item_path = output_directory / item if item_path.is_dir(): shutil.rmtree( - item_path, - ignore_errors=True, + item_path, ignore_errors=True, ) else: - os.remove( - item_path - ) - except FileExistsError: + os.remove(item_path) + except (FileExistsError, FileNotFoundError): pass return remove_output -@pytest.fixture( - autouse=True, - scope="session" -) -def do_remove_output( - output_directory, - remove_output -): +@pytest.fixture(autouse=True, scope="session") +def do_remove_output(output_directory, remove_output): yield remove_output() @@ -93,7 +71,7 @@ def make_plot_patch(monkeypatch): @pytest.fixture(name="session") def make_session(): - engine = sa.create_engine('sqlite://') + engine = sa.create_engine("sqlite://") session = sa.orm.sessionmaker(bind=engine)() db.Base.metadata.create_all(engine) yield session @@ -101,10 +79,7 @@ def make_session(): engine.dispose() -@pytest.fixture( - autouse=True, - scope="session" -) +@pytest.fixture(autouse=True, scope="session") def remove_logs(): yield for d, _, files in os.walk(directory): @@ -117,13 +92,11 @@ def remove_logs(): def set_config_path(): conf.instance.push( new_path=path.join(directory, "config"), - output_path=path.join(directory, "output") + output_path=path.join(directory, "output"), ) -@pytest.fixture( - name="model_gaussian_x1" -) +@pytest.fixture(name="model_gaussian_x1") def make_model_gaussian_x1(): return fixtures.make_model_gaussian_x1() diff --git a/test_autofit/mapper/model/test_json.py b/test_autofit/mapper/model/test_json.py index 74619c54e..ebbd4f4ca 100644 --- a/test_autofit/mapper/model/test_json.py +++ b/test_autofit/mapper/model/test_json.py @@ -5,54 +5,37 @@ import autofit as af -@pytest.fixture( - name="model_dict" -) + +@pytest.fixture(name="model_dict") def make_model_dict(): return { "type": "model", "class_path": "autofit.example.model.Gaussian", - "centre": {'lower_limit': 0.0, 'type': 'Uniform', 'upper_limit': 2.0}, - "normalization": {'lower_limit': 0.0, 'type': 'Uniform', 'upper_limit': 1.0}, - "sigma": {'lower_limit': 0.0, 'type': 'Uniform', 'upper_limit': 1.0}, + "centre": {"lower_limit": 0.0, "type": "Uniform", "upper_limit": 2.0}, + "normalization": {"lower_limit": 0.0, "type": "Uniform", "upper_limit": 1.0}, + "sigma": {"lower_limit": 0.0, "type": "Uniform", "upper_limit": 1.0}, } -@pytest.fixture( - name="instance_dict" -) +@pytest.fixture(name="instance_dict") def make_instance_dict(): return { "type": "instance", "class_path": "autofit.example.model.Gaussian", "centre": 0.0, "normalization": 0.1, - "sigma": 0.01 + "sigma": 0.01, } -@pytest.fixture( - name="collection_dict" -) -def make_collection_dict( - model_dict -): - return { - "gaussian": model_dict, - "type": "collection" - } +@pytest.fixture(name="collection_dict") +def make_collection_dict(model_dict): + return {"gaussian": model_dict, "type": "collection"} -@pytest.fixture( - name="model" -) +@pytest.fixture(name="model") def make_model(): - return af.Model( - af.Gaussian, - centre=af.UniformPrior( - upper_limit=2.0 - ) - ) + return af.Model(af.Gaussian, centre=af.UniformPrior(upper_limit=2.0)) class TestTuple: @@ -61,118 +44,61 @@ def test_tuple_prior(self): tuple_prior.tup_0 = 0 tuple_prior.tup_1 = 1 - result = af.Model.from_dict( - tuple_prior.dict() - ) - assert isinstance( - result, - af.TuplePrior - ) + result = af.Model.from_dict(tuple_prior.dict()) + assert isinstance(result, af.TuplePrior) def test_model_with_tuple(self): tuple_model = af.Model(af.m.MockWithTuple) tuple_model.instance_from_prior_medians() model_dict = tuple_model.dict() - model = af.Model.from_dict( - model_dict - ) + model = af.Model.from_dict(model_dict) instance = model.instance_from_prior_medians() assert instance.tup == (0.5, 0.5) class TestFromDict: - def test_model_from_dict( - self, - model_dict - ): - model = af.Model.from_dict( - model_dict - ) + def test_model_from_dict(self, model_dict): + model = af.Model.from_dict(model_dict) assert model.cls == af.Gaussian assert model.prior_count == 3 assert model.centre.upper_limit == 2.0 - def test_instance_from_dict( - self, - instance_dict - ): - instance = af.Model.from_dict( - instance_dict - ) - assert isinstance( - instance, - af.Gaussian - ) + def test_instance_from_dict(self, instance_dict): + instance = af.Model.from_dict(instance_dict) + assert isinstance(instance, af.Gaussian) assert instance.centre == 0.0 assert instance.normalization == 0.1 assert instance.sigma == 0.01 - def test_collection_from_dict( - self, - collection_dict - ): - collection = af.Model.from_dict( - collection_dict - ) - assert isinstance( - collection, - af.Collection - ) + def test_collection_from_dict(self, collection_dict): + collection = af.Model.from_dict(collection_dict) + assert isinstance(collection, af.Collection) assert len(collection) == 1 class TestToDict: - def test_model_priors( - self, - model, - model_dict - ): + def test_model_priors(self, model, model_dict): assert model.dict() == model_dict - def test_model_floats( - self, - instance_dict - ): - model = af.Model( - af.Gaussian, - centre=0.0, - normalization=0.1, - sigma=0.01 - ) + def test_model_floats(self, instance_dict): + model = af.Model(af.Gaussian, centre=0.0, normalization=0.1, sigma=0.01) assert model.dict() == instance_dict - def test_collection( - self, - model, - collection_dict - ): - collection = af.Collection( - gaussian=model - ) + def test_collection(self, model, collection_dict): + collection = af.Collection(gaussian=model) assert collection.dict() == collection_dict - def test_collection_instance( - self, - instance_dict - ): - collection = af.Collection( - gaussian=af.Gaussian() - ) - assert collection.dict() == { - "gaussian": instance_dict, - "type": "collection" - } + def test_collection_instance(self, instance_dict): + collection = af.Collection(gaussian=af.Gaussian()) + assert collection.dict() == {"gaussian": instance_dict, "type": "collection"} class TestFromJson: - def test__from_json(self, model_dict): - model = af.Model.from_dict( - model_dict - ) + model = af.Model.from_dict(model_dict) model_file = Path(__file__).parent / "model.json" @@ -190,4 +116,4 @@ def test__from_json(self, model_dict): assert model.prior_count == 3 assert model.centre.upper_limit == 2.0 - os.remove(model_file) \ No newline at end of file + os.remove(model_file) diff --git a/test_autofit/mapper/model/test_model_instance.py b/test_autofit/mapper/model/test_model_instance.py index 61e1227a9..05e29ee37 100644 --- a/test_autofit/mapper/model/test_model_instance.py +++ b/test_autofit/mapper/model/test_model_instance.py @@ -2,6 +2,7 @@ import autofit as af + @pytest.fixture(name="mock_components_1") def make_mock_components_1(): return af.m.MockComponents() @@ -42,12 +43,23 @@ def test_as_model(self, instance): def test_object_for_path(self, instance, mock_components_1, mock_components_2): assert instance.object_for_path(("mock_components_2",)) is mock_components_2 - assert instance.object_for_path(("sub", "mock_components_1")) is mock_components_1 - assert instance.object_for_path(("sub", "sub", "mock_components_1")) is mock_components_1 - setattr(instance.object_for_path(("mock_components_2",)), "mock_components", mock_components_1) + assert ( + instance.object_for_path(("sub", "mock_components_1")) is mock_components_1 + ) + assert ( + instance.object_for_path(("sub", "sub", "mock_components_1")) + is mock_components_1 + ) + setattr( + instance.object_for_path(("mock_components_2",)), + "mock_components", + mock_components_1, + ) assert mock_components_2.mock_components is mock_components_1 - def test_path_instance_tuples_for_class(self, instance, mock_components_1, mock_components_2): + def test_path_instance_tuples_for_class( + self, instance, mock_components_1, mock_components_2 + ): result = instance.path_instance_tuples_for_class(af.m.MockComponents) assert result[0] == (("mock_components_2",), mock_components_2) assert result[1] == (("sub", "mock_components_1"), mock_components_1) @@ -59,8 +71,7 @@ def test_simple_model(self): mapper.mock_class = af.m.MockClassx2 model_map = mapper.instance_from_unit_vector( - [1.0, 1.0], - ignore_prior_limits=True + [1.0, 1.0], ignore_prior_limits=True ) assert isinstance(model_map.mock_class, af.m.MockClassx2) @@ -74,8 +85,7 @@ def test_two_object_model(self): mapper.mock_class_2 = af.m.MockClassx2 model_map = mapper.instance_from_unit_vector( - [1.0, 0.0, 0.0, 1.0], - ignore_prior_limits=True + [1.0, 0.0, 0.0, 1.0], ignore_prior_limits=True ) assert isinstance(model_map.mock_class_1, af.m.MockClassx2) @@ -154,3 +164,42 @@ def test_match_tuple(self): assert model_map.mock_profile.one_tuple == (1.0, 1.0) assert model_map.mock_profile.two == 0.0 + + +class Child(af.Gaussian): + pass + + +class Child2(af.Gaussian): + pass + + +@pytest.fixture(name="exclude_instance") +def make_excluded_instance(): + return af.ModelInstance( + {"child": Child(), "gaussian": af.Gaussian(), "child2": Child2(),} + ) + + +def test_single_argument(exclude_instance): + model = exclude_instance.as_model(af.Gaussian) + + assert isinstance(model.gaussian, af.Model) + assert isinstance(model.child, af.Model) + assert isinstance(model.child2, af.Model) + + +def test_filter_child(exclude_instance): + model = exclude_instance.as_model(af.Gaussian, excluded_classes=Child) + + assert isinstance(model.gaussian, af.Model) + assert not isinstance(model.child, af.Model) + assert isinstance(model.child2, af.Model) + + +def test_filter_multiple(exclude_instance): + model = exclude_instance.as_model(af.Gaussian, excluded_classes=(Child, Child2),) + + assert isinstance(model.gaussian, af.Model) + assert not isinstance(model.child, af.Model) + assert not isinstance(model.child2, af.Model) diff --git a/test_autofit/mapper/test_has.py b/test_autofit/mapper/test_has.py index 226591572..b5cd8803b 100644 --- a/test_autofit/mapper/test_has.py +++ b/test_autofit/mapper/test_has.py @@ -1,130 +1,103 @@ +import pytest + import autofit as af +from autofit.example import Exponential + class GaussianChild(af.Gaussian): pass def test_inheritance(): - collection = af.Collection( - first=af.Model( - GaussianChild - ), - second=GaussianChild() - ) + collection = af.Collection(first=af.Model(GaussianChild), second=GaussianChild()) - assert collection.has_instance( - af.Gaussian - ) - assert collection.has_model( - af.Gaussian - ) - - -def test_embedded(): - collection = af.Collection( - model=af.Model( - af.Gaussian, - centre=GaussianChild - ) - ) - assert collection.has_model( - GaussianChild - ) + assert collection.has_instance(af.Gaussian) + assert collection.has_model(af.Gaussian) def test_no_free_parameters(): collection = af.Collection( - gaussian=af.Model( - af.Gaussian, - centre=1.0, - normalization=1.0, - sigma=1.0, - ) + gaussian=af.Model(af.Gaussian, centre=1.0, normalization=1.0, sigma=1.0,) ) assert collection.prior_count == 0 - assert collection.has_model( - af.Gaussian - ) is False + assert collection.has_model(af.Gaussian) is False def test_instance(): - collection = af.Collection( - gaussian=af.Gaussian() - ) + collection = af.Collection(gaussian=af.Gaussian()) - assert collection.has_instance( - af.Gaussian - ) is True - assert collection.has_model( - af.Gaussian - ) is False + assert collection.has_instance(af.Gaussian) is True + assert collection.has_model(af.Gaussian) is False def test_model(): - collection = af.Collection( - gaussian=af.Model( - af.Gaussian - ) - ) + collection = af.Collection(gaussian=af.Model(af.Gaussian)) - assert collection.has_model( - af.Gaussian - ) is True - assert collection.has_instance( - af.Gaussian - ) is False + assert collection.has_model(af.Gaussian) is True + assert collection.has_instance(af.Gaussian) is False def test_both(): - collection = af.Collection( - gaussian=af.Model( - af.Gaussian - ), - gaussian_2=af.Gaussian() - ) + collection = af.Collection(gaussian=af.Model(af.Gaussian), gaussian_2=af.Gaussian()) - assert collection.has_model( - af.Gaussian - ) is True - assert collection.has_instance( - af.Gaussian - ) is True + assert collection.has_model(af.Gaussian) is True + assert collection.has_instance(af.Gaussian) is True def test_embedded(): - collection = af.Collection( - gaussian=af.Model( - af.Gaussian, - centre=af.Gaussian() - ), - ) + collection = af.Collection(gaussian=af.Model(af.Gaussian, centre=af.Gaussian()),) - assert collection.has_model( - af.Gaussian - ) is True - assert collection.has_instance( - af.Gaussian - ) is True + assert collection.has_model(af.Gaussian) is True + assert collection.has_instance(af.Gaussian) is True def test_is_only_model(): collection = af.Collection( - gaussian=af.Model( - af.Gaussian - ), - gaussian_2=af.Model( - af.Gaussian - ) + gaussian=af.Model(af.Gaussian), gaussian_2=af.Model(af.Gaussian) ) - assert collection.is_only_model( - af.Gaussian - ) is True + assert collection.is_only_model(af.Gaussian) is True + + collection.other = af.Model(af.m.MockClassx2) + + assert collection.is_only_model(af.Gaussian) is False + - collection.other = af.Model( - af.m.MockClassx2 +@pytest.fixture(name="collection") +def make_collection(): + return af.Collection( + gaussian=af.Model(af.Gaussian), exponential=af.Model(Exponential), ) - assert collection.is_only_model( - af.Gaussian - ) is False + +def test_models(collection): + assert collection.models_with_type(af.Gaussian) == [collection.gaussian] + + +def test_multiple_types(collection): + assert collection.models_with_type((af.Gaussian, Exponential)) == [ + collection.gaussian, + collection.exponential, + ] + + +class Galaxy: + def __init__(self, child): + self.child = child + + +def test_instances_with_type(): + model = af.Collection(galaxy=Galaxy(child=af.Model(af.Gaussian))) + assert model.models_with_type(af.Gaussian) == [model.galaxy.child] + + +class DelaunayBrightnessImage: + pass + + +def test_model_attributes_with_type(): + mesh = af.Model(DelaunayBrightnessImage) + mesh.pixels = af.UniformPrior(lower_limit=5.0, upper_limit=10.0) + pixelization = af.Model(af.Gaussian, mesh=mesh) + + assert pixelization.models_with_type(DelaunayBrightnessImage) == [mesh] diff --git a/test_autofit/non_linear/grid/test_sensitivity/test_results.py b/test_autofit/non_linear/grid/test_sensitivity/test_results.py new file mode 100644 index 000000000..7a9a79bb4 --- /dev/null +++ b/test_autofit/non_linear/grid/test_sensitivity/test_results.py @@ -0,0 +1,30 @@ +from autofit.non_linear.grid.sensitivity import SensitivityResult, JobResult +import pytest + + +class Result: + def __init__(self, log_likelihood): + self.log_likelihood = log_likelihood + + +@pytest.fixture(name="job_result") +def make_result(): + return JobResult( + number=0, + result=Result(log_likelihood=1.0), + perturbed_result=Result(log_likelihood=2.0), + ) + + +def test_job_result(job_result): + assert job_result.log_likelihood_base == 1.0 + assert job_result.log_likelihood_perturbed == 2.0 + assert job_result.log_likelihood_difference == 1.0 + + +def test_result(job_result): + result = SensitivityResult(results=[job_result]) + + assert result.log_likelihoods_base == [1.0] + assert result.log_likelihoods_perturbed == [2.0] + assert result.log_likelihood_differences == [1.0] diff --git a/test_autofit/non_linear/grid/test_sensitivity/test_run.py b/test_autofit/non_linear/grid/test_sensitivity/test_run.py index 163f3bfb7..d133faa41 100644 --- a/test_autofit/non_linear/grid/test_sensitivity/test_run.py +++ b/test_autofit/non_linear/grid/test_sensitivity/test_run.py @@ -3,24 +3,21 @@ from autoconf.conf import with_config -@with_config( - "general", - "model", - "ignore_prior_limits", - value=True -) -def test_sensitivity( - sensitivity -): +@with_config("general", "model", "ignore_prior_limits", value=True) +def test_sensitivity(sensitivity): results = sensitivity.run() assert len(results) == 8 - path = Path( - sensitivity.search.paths.output_path - ) / "results.csv" + output_path = Path(sensitivity.search.paths.output_path) + + assert (output_path / ".is_grid_search").exists() + path = output_path / "results.csv" assert path.exists() with open(path) as f: all_lines = set(f) - assert 'index,centre,normalization,sigma,log_likelihood_base,log_likelihood_perturbed,log_likelihood_difference\n' in all_lines - assert ' 0, 0.25, 0.25, 0.25, 2.0, 2.0, 0.0\n' in all_lines - assert ' 1, 0.25, 0.25, 0.75, 2.0, 2.0, 0.0\n' in all_lines + assert ( + "index,centre,normalization,sigma,log_likelihood_base,log_likelihood_perturbed,log_likelihood_difference\n" + in all_lines + ) + assert " 0, 0.25, 0.25, 0.25, 2.0, 2.0, 0.0\n" in all_lines + assert " 1, 0.25, 0.25, 0.75, 2.0, 2.0, 0.0\n" in all_lines diff --git a/test_autofit/non_linear/test_fit_sequential.py b/test_autofit/non_linear/test_fit_sequential.py new file mode 100644 index 000000000..7f1d3faa0 --- /dev/null +++ b/test_autofit/non_linear/test_fit_sequential.py @@ -0,0 +1,75 @@ +import os +from pathlib import Path +from random import random + +import pytest + +import autofit as af +from autofit.non_linear.analysis.combined import CombinedResult + + +class Analysis(af.Analysis): + def log_likelihood_function(self, instance): + return -random() + + +@pytest.fixture(name="search") +def make_search(): + return af.LBFGS(name="test_lbfgs") + + +@pytest.fixture(name="model") +def make_model(): + return af.Model(af.Gaussian) + + +@pytest.fixture(name="analysis") +def make_analysis(): + return Analysis() + + +def count_output(paths): + return len(os.listdir(Path(str(paths)).parent)) + + +def test_with_model(analysis, model, search): + combined_analysis = sum([analysis.with_model(model) for _ in range(10)]) + + result = search.fit_sequential(model=model, analysis=combined_analysis) + + assert count_output(search.paths) == 10 + assert len(result.child_results) == 10 + + +@pytest.fixture(name="combined_analysis") +def make_combined_analysis(analysis): + return sum([analysis for _ in range(10)]) + + +def test_combined_analysis(combined_analysis, model, search): + search.fit_sequential(model=model, analysis=combined_analysis) + + assert count_output(search.paths) == 10 + + +def test_with_free_parameter(combined_analysis, model, search): + combined_analysis = combined_analysis.with_free_parameters([model.centre]) + search.fit_sequential( + model=model, analysis=combined_analysis, + ) + + assert count_output(search.paths) == 10 + + +def test_singular_analysis(analysis, model, search): + with pytest.raises(ValueError): + search.fit_sequential(model=model, analysis=analysis) + + +# noinspection PyTypeChecker +def test_index_combined_result(): + combined_result = CombinedResult([0, 1, 2]) + + assert combined_result[0] == 0 + assert combined_result[1] == 1 + assert combined_result[2] == 2 diff --git a/test_autofit/test_interpolator.py b/test_autofit/test_interpolator.py index 2e0de50bd..87953636a 100644 --- a/test_autofit/test_interpolator.py +++ b/test_autofit/test_interpolator.py @@ -1,43 +1,76 @@ import pytest import autofit as af +from autoconf.dictable import as_dict def test_trivial(): - instance = af.ModelInstance(items=dict(t=1)) - time_series = af.LinearInterpolator([instance]) + instance = af.ModelInstance(dict(t=1)) + linear_interpolator = af.LinearInterpolator([instance]) - result = time_series[time_series.t == 1] + result = linear_interpolator[linear_interpolator.t == 1] assert result is instance -@pytest.fixture(name="time_series") -def make_time_series(): - return af.LinearInterpolator( - [ - af.ModelInstance( - items=dict( - t=1.0, - gaussian=af.Gaussian(centre=0.0, normalization=1.0, sigma=-1.0), - ) - ), +@pytest.fixture(name="model_instance") +def make_model_instance(): + return af.ModelInstance( + dict(t=1.0, gaussian=af.Gaussian(centre=0.0, normalization=1.0, sigma=-1.0),) + ) + + +@pytest.fixture(name="instances") +def make_instances(model_instance): + return [ + model_instance, + af.ModelInstance( + dict( + t=2.0, gaussian=af.Gaussian(centre=1.0, normalization=2.0, sigma=-2.0), + ) + ), + ] + + +@pytest.fixture(name="linear_interpolator") +def make_linear_interpolator(instances): + return af.LinearInterpolator(instances) + + +def test_spline_interpolator(instances): + interpolator = af.SplineInterpolator(instances) + + result = interpolator[interpolator.t == 1.5] + + assert result.t == 1.5 + assert result.gaussian.centre == 0.5 + + +def test_smooth_spline_interpolator(instances): + interpolator = af.SplineInterpolator( + instances + + [ af.ModelInstance( - items=dict( - t=2.0, - gaussian=af.Gaussian(centre=1.0, normalization=2.0, sigma=-2.0), + dict( + t=3.0, + gaussian=af.Gaussian(centre=4.0, normalization=3.0, sigma=-3.0), ) ), ] ) + result = interpolator[interpolator.t == 1.5] + + assert result.t == 1.5 + assert result.gaussian.centre < 0.5 + @pytest.mark.parametrize( "t, centre", [(0.0, -1.0), (1.0, 0.0), (1.5, 0.5), (2.0, 1.0), (3.0, 2.0)] ) -def test_linear(t, centre, time_series): +def test_linear(t, centre, linear_interpolator): - result = time_series[time_series.t == t] + result = linear_interpolator[linear_interpolator.t == t] assert result.t == t assert result.gaussian.centre == centre @@ -46,10 +79,95 @@ def test_linear(t, centre, time_series): @pytest.mark.parametrize("sigma", [-0.5, 0.0, 0.5, 1.0]) -def test_alternate_attribute(time_series, sigma): +def test_alternate_attribute(linear_interpolator, sigma): - result = time_series[time_series.gaussian.sigma == sigma] + result = linear_interpolator[linear_interpolator.gaussian.sigma == sigma] assert result.gaussian.sigma == sigma assert result.t == -sigma assert result.gaussian.normalization == -sigma + + +def test_deeper_attributes(): + collection = af.Collection( + model=af.Model(af.Gaussian, centre=0.0, normalization=1.0, sigma=-1.0,) + ) + + instance_1 = af.Collection( + t=1.0, collection=collection, + ).instance_from_prior_medians() + instance_2 = af.Collection( + t=2.0, collection=collection, + ).instance_from_prior_medians() + + linear_interpolator = af.LinearInterpolator([instance_1, instance_2]) + + result = linear_interpolator[linear_interpolator.t == 1.5] + + assert result.collection.model.centre == 0.0 + assert result.collection.model.normalization == 1.0 + assert result.collection.model.sigma == -1.0 + + +def test_to_dict(linear_interpolator, linear_interpolator_dict): + assert linear_interpolator.dict() == linear_interpolator_dict + + +def test_from_dict(linear_interpolator_dict): + interpolator = af.LinearInterpolator.from_dict(linear_interpolator_dict) + assert interpolator[interpolator.t == 1.5].t == 1.5 + + +@pytest.fixture(name="instance_dict") +def make_instance_dict(): + return { + "child_items": { + "gaussian": { + "centre": 0.0, + "normalization": 1.0, + "sigma": -1.0, + "type": "autofit.example.model.Gaussian", + }, + "t": 1.0, + "type": "dict", + }, + "type": "autofit.mapper.model.ModelInstance", + } + + +@pytest.fixture(name="linear_interpolator_dict") +def make_linear_interpolator_dict(instance_dict): + return { + "instances": [ + instance_dict, + { + "child_items": { + "gaussian": { + "centre": 1.0, + "normalization": 2.0, + "sigma": -2.0, + "type": "autofit.example.model.Gaussian", + }, + "t": 2.0, + "type": "dict", + }, + "type": "autofit.mapper.model.ModelInstance", + }, + ], + "type": "autofit.interpolator.LinearInterpolator", + } + + +def test_instance_as_dict(model_instance, instance_dict): + assert as_dict(model_instance) == instance_dict + + +def test_instance_from_dict(model_instance, instance_dict): + instance = af.ModelInstance.from_dict(instance_dict) + assert instance.t == 1.0 + + gaussian = instance.gaussian + assert isinstance(gaussian, af.Gaussian) + assert gaussian.centre == 0.0 + assert gaussian.normalization == 1.0 + assert gaussian.sigma == -1.0 From 9768792f82dfd9531bbe33e813c8135d58540ae9 Mon Sep 17 00:00:00 2001 From: James Nightingale Date: Mon, 27 Mar 2023 15:46:39 +0100 Subject: [PATCH 7/8] scipy requirementr --- requirements.txt | 2 +- setup.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/requirements.txt b/requirements.txt index f8bca9fa7..249405018 100644 --- a/requirements.txt +++ b/requirements.txt @@ -10,6 +10,6 @@ pyprojroot==0.2.0 pyswarms==1.3.0 h5py>=2.10.0 SQLAlchemy==1.3.20 -scipy>=1.5.1 +scipy>=1.5.4,<=1.8.1 astunparse==1.6.3 xxhash==3.0.0 \ No newline at end of file diff --git a/setup.py b/setup.py index 25e1731d5..2953bb067 100644 --- a/setup.py +++ b/setup.py @@ -12,7 +12,7 @@ with open(join(this_dir, "requirements.txt")) as f: requirements = f.read().split("\n") -version = environ.get("VERSION", "1.0.dev0") +version = environ.get("VERSION", "2023.3.27.1") requirements.extend([ f'autoconf=={version}' ]) From 3143ab5ee24a3d5c91cc7205c8ee38e5ac402b11 Mon Sep 17 00:00:00 2001 From: James Nightingale Date: Thu, 6 Apr 2023 13:56:36 +0100 Subject: [PATCH 8/8] version bump --- setup.py | 2 +- test_autofit/config/general.yaml | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/setup.py b/setup.py index 2953bb067..25e1731d5 100644 --- a/setup.py +++ b/setup.py @@ -12,7 +12,7 @@ with open(join(this_dir, "requirements.txt")) as f: requirements = f.read().split("\n") -version = environ.get("VERSION", "2023.3.27.1") +version = environ.get("VERSION", "1.0.dev0") requirements.extend([ f'autoconf=={version}' ]) diff --git a/test_autofit/config/general.yaml b/test_autofit/config/general.yaml index 90c934f6e..66197119a 100644 --- a/test_autofit/config/general.yaml +++ b/test_autofit/config/general.yaml @@ -24,6 +24,7 @@ profiling: should_profile: false # If True, the ``profile_log_likelihood_function()`` function of an analysis class is called throughout a model-fit, profiling run times. repeats: 1 # The number of repeat function calls used to measure run-times when profiling. test: - check_preloads: false # If True, perform a sanity check that the likelihood using preloads is identical to the likelihood not using preloads. + check_preloads: false + preloads_check_threshold: 0.1 # If the figure of merit of a fit with and without preloads is greater than this threshold, the check preload test fails and an exception raised for a model-fit. # If True, perform a sanity check that the likelihood using preloads is identical to the likelihood not using preloads. exception_override: false parallel_profile: false