From 48565fa5c93789b0a13705b29e934f45444df9ee Mon Sep 17 00:00:00 2001 From: Jammy2211 Date: Sun, 27 Nov 2022 15:41:01 +0000 Subject: [PATCH 001/226] docs --- autofit/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/autofit/__init__.py b/autofit/__init__.py index b5615ba8d..dfadb4c4c 100644 --- a/autofit/__init__.py +++ b/autofit/__init__.py @@ -1,5 +1,5 @@ import abc -import pickle +import pickle from dill import register From a0eec11d9a281655914d4f8eb5200c934e01f759 Mon Sep 17 00:00:00 2001 From: Jammy2211 Date: Sun, 27 Nov 2022 15:41:05 +0000 Subject: [PATCH 002/226] docs --- autofit/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/autofit/__init__.py b/autofit/__init__.py index dfadb4c4c..b5615ba8d 100644 --- a/autofit/__init__.py +++ b/autofit/__init__.py @@ -1,5 +1,5 @@ import abc -import pickle +import pickle from dill import register From 5838b023a0559cfc495564234aebfb679c98c0d3 Mon Sep 17 00:00:00 2001 From: Jammy2211 Date: Sun, 27 Nov 2022 15:44:34 +0000 Subject: [PATCH 003/226] erm --- .github/workflows/main.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index eaa288d44..b8ea09abf 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -47,6 +47,7 @@ jobs: echo "Branch $BRANCH did not exist in $PACKAGE" else echo "Branch $BRANCH did exist in $PACKAGE" + git fetch git checkout $BRANCH fi popd From 6cd0a6d0c83f012029021e8ac087607960144ce0 Mon Sep 17 00:00:00 2001 From: Jammy2211 Date: Mon, 28 Nov 2022 10:13:40 +0000 Subject: [PATCH 004/226] docs --- CITATIONS.rst | 13 ++ __init__.py | 0 .../cookbook_3_multiple_datasets.rst | 4 +- docs/general/configs.rst | 181 +----------------- docs/general/workspace.rst | 7 +- docs/overview/multi_datasets.rst | 2 +- files/citation.tex | 4 - 7 files changed, 31 insertions(+), 180 deletions(-) create mode 100644 CITATIONS.rst delete mode 100644 __init__.py diff --git a/CITATIONS.rst b/CITATIONS.rst new file mode 100644 index 000000000..f1621b5e8 --- /dev/null +++ b/CITATIONS.rst @@ -0,0 +1,13 @@ +.. _references: + +Citations & References +====================== + +The bibtex entries for **PyAutoFit** and its affiliated software packages can be found +`here `_, with example text for citing **PyAutoFit** +in `.tex format here `_ format here and +`.md format here `_. As shown in the examples, we +would greatly appreciate it if you mention **PyAutoFit** by name and include a link to our GitHub page! + +**PyAutoFit** is published in the `Journal of Open Source Software `_ and its +entry in the above .bib file is under the key ``pyautofit``. \ No newline at end of file diff --git a/__init__.py b/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/docs/cookbooks/cookbook_3_multiple_datasets.rst b/docs/cookbooks/cookbook_3_multiple_datasets.rst index 04e57f571..e8f1b2475 100644 --- a/docs/cookbooks/cookbook_3_multiple_datasets.rst +++ b/docs/cookbooks/cookbook_3_multiple_datasets.rst @@ -87,7 +87,7 @@ This means that the model has 7 free parameters in total, the shared ``centre`` analysis = sum(analysis_list) analysis = analysis.with_free_parameters( - *[model.gaussian.sigma] + model.gaussian.sigma] ) The code above does not immediately update the model. @@ -139,7 +139,7 @@ We can make multiple parameters free by simply adding them to the input list abo analysis = sum(analysis_list) analysis = analysis.with_free_parameters( - *[model.gaussian.sigma, model.gaussian.centre] + model.gaussian.sigma, model.gaussian.centre] ) Variable Parameters As Relationship diff --git a/docs/general/configs.rst b/docs/general/configs.rst index 39ef9770c..ad7ae5670 100644 --- a/docs/general/configs.rst +++ b/docs/general/configs.rst @@ -1,11 +1,12 @@ -.. _configs: - Configs ======= -The ``autofit_workspace`` includes configuration files that customize the behaviour of the non-linear search's, -visualization and other aspects of **PyAutoFit**. Here, we describe how to configure **PyAutoFit** to use the configs -and describe every configuration file complete with input parameters. +**PyAutoFit** uses a number of configuration files that customize the default behaviour of the non-linear searches, +visualization and other aspects of **PyAutoFit**. + +Descriptions of every configuration file and their input parameters are provided in the ``README.rst`` in +the `config directory of the workspace `_ + Setup ----- @@ -13,176 +14,14 @@ Setup By default, **PyAutoFit** looks for the config files in a ``config`` folder in the current working directory, which is why we run autofit scripts from the ``autofit_workspace`` directory. -The configuration path can also be set manually in a script using **PyAutoConf** and the following command (the path -to the ``output`` folder where the results of a non-linear search are stored is also set below): +The configuration path can also be set manually in a script using the project **PyAutoConf** and the following +command (the path to the ``output`` folder where the results of a non-linear search are stored is also set below): .. code-block:: bash from autoconf import conf conf.instance.push( - config_path="path/to/config", output_path=f"path/to/output" + config_path="path/to/config", + output_path=f"path/to/output" ) - -general.ini ------------ - -This config file is found at ``autofit_workspace/config/general.ini`` and contains the following sections and variables: - -[output] - log_file -> str - The file name the logged output is written to (in the non-linear search output folder). - log_level -> str - The level of logging. - model_results_decimal_places -> int - The number of decimal places the estimated values and errors of all parameters in the ``model.results`` file are - output to. - remove_files -> bool - If `True`, all output files of a non-linear search (e.g. samples, samples_backup, model.results, images, etc.) - are deleted once the model-fit has completed. - - A .zip file of all output is always created before files are removed, thus results are not lost with this - option turned on. If **PyAutoFit** does not find the output files of a model-fit (because they were removed) but - does find this .zip file, it will unzip the contents and continue the analysis as if the files were - there all along. - - This feature was implemented because super-computers often have a limit on the number of files allowed per - user and the large number of files output by **PyAutoFit** can exceed this limit. By removing files the - number of files is restricted only to the .zip files. - grid_results_interval -> int - For a ``GridSearch`` this interval sets after how many samples on the grid output is - performed for. A ``grid_results_interval`` of -1 turns off output. - -non_linear ----------- - -These config files are found at ``autofit_workspace/config/non_linear`` and they contain the default settings used by -every non-linear search. The ``[search]``, ``[settings]`` and ``[initialize]`` sections of the non-linear configs -contains settings specific to certain non-linear search's, and the documentation for these variables should be found -by inspecting the`API Documentation `_ of the relevent -non-linear search object. - -The following config sections and variables are generic across all non-linear search configs (e.g. -``config/non_linear/nest/DynestyStatic.ini``, ``config/non_linear/mcmc/Emcee.ini``, etc.): - -[updates] - iterations_per_update -> int - The number of iterations of the non-linear search performed between every 'update', where an update performs - visualization of the maximum log likelihood model, backing-up of the samples, output of the ``model.results`` - file and logging. - visualize_every_update -> int - For every ``visualize_every_update`` updates visualization is performed and output to the hard-disk during the - non-linear using the maximum log likelihood model. A ``visualization_interval`` of -1 turns off on-the-fly - visualization. - backup_every_update -> int - For every ``backup_every_update`` the results of the non-linear search in the samples foler and backed up into the - samples_backup folder. A ``backup_every_update`` of -1 turns off backups during the non-linear search (it is still - performed when the non-linear search terminates). - model_results_every_update -> int - For every ``model_results_every_update`` the model.results file is updated with the maximum log likelihood model - and parameter estimates with errors at 1 an 3 sigma confidence. A ``model_results_every_update`` of -1 turns off - the model.results file being updated during the model-fit (it is still performed when the non-linear search - terminates). - log_every_update -> int - For every ``log_every_update`` the log file is updated with the output of the Python interpreter. A - ``log_every_update`` of -1 turns off logging during the model-fit. - -[printing] - silence -> bool - If `True`, the default print output of the non-linear search is silenced and not printed by the Python - interpreter. - -[parallel] - number_of_cores -> int - For non-linear search's that support parallel procesing via the Python ``multiprocesing`` module, the number of - cores the parallel run uses. If ``number_of_cores=1``, the model-fit is performed in serial omitting the use - of the ``multiprocessing`` module. - -visualize ---------- - -These config files are found at ``autofit_workspace/config/visualize`` and they contain the default settings used by -visualization in **PyAutoFit**. The ``general.ini`` config contains the following sections and variables: - -[general] - backend -> str - The ``matploblib backend`` used for visualization (see - https://gist.github.com/CMCDragonkai/4e9464d9f32f5893d837f3de2c43daa4 for a description of backends). - - If you use an invalid backend for your computer, **PyAutoFit** may crash without an error or reset your machine. - The following backends have worked for **PyAutoFit** users: - - TKAgg (default) - - Qt5Agg (works on new MACS) - - Qt4Agg - - WXAgg - - WX - - Agg (outputs to .fits / .png but doesn't'display figures during a run on your computer screen) - -priors ------- - -These config files are found at ``autofit_workspace/config/priors`` and they contain the default priors and related -variables for every model-component in a project, using ``.json`` format files (as opposed to ``.ini`` for most config files). - -The autofit_workspace`` contains example ``prior`` files for the 1D ``data`` fitting problem. An example entry of the -json configs for the ``sigma`` parameter of the ``Gaussian`` class is as follows: - -.. code-block:: bash - - "Gaussian": { - "sigma": { - "type": "Uniform", - "lower_limit": 0.0, - "upper_limit": 30.0, - "width_modifier": { - "type": "Absolute", - "value": 0.2 - }, - "gaussian_limits": { - "lower": 0.0, - "upper": "inf" - } - }, - -The sections of this example config set the following: - -json config - type -> Prior - The default prior given to this parameter which is used by the non-linear search. In the example above, a - ``UniformPrior`` is used with ``lower_limit`` of 0.0 and ``upper_limit`` of 30.0. A ``GaussianPrior`` could be used by - putting "``Gaussian``" in the "``type``" box, with "``mean``" and "``sigma``" used to set the default values. Any prior can be - set in an analogous fashion (see the example configs). - width_modifier - When the results of a search are linked to a subsequent search to set up the priors of its non-linear search, - this entry describes how the ``Prior`` is passed. For a full description of prior passing, checkout the examples - in ``autofit_workspace/notebooks/features/search_chaining``. - gaussian_limits - When the results of a search are linked to a subsequent search, they are passed using a ``GaussianPrior``. The - ``gaussian_limits`` set the physical lower and upper limits of this ``GaussianPrior``, such that parameter samples - can not go beyond these limits. - -notation --------- - -The notation configs define the labels of every model-component parameter and its derived quantities, which are -used when visualizing results (for example labeling the axis of the PDF triangle plots output by a non-linear search). -Two examples using the 1D ``data`` fitting example for the config file **label.ini** are: - -[label] - centre -> str - The label given to that parameter for non-linear search plots using that parameter, e.g. the PDF plots. For - example, if centre=x, the plot axis will be labeled 'x'. - -[superscript] - Gaussian -> str - The subscript used on certain plots that show the results of different model-components. For example, if - Gaussian=g, plots where the Gaussian are plotted will have a subscript g. - -The **label_format.ini** config file specifies the format certain parameters are output as in output files like the -*model.results* file. \ No newline at end of file diff --git a/docs/general/workspace.rst b/docs/general/workspace.rst index 19c6331c8..ace5c9a30 100644 --- a/docs/general/workspace.rst +++ b/docs/general/workspace.rst @@ -8,6 +8,9 @@ when you installed **PyAutoFit**. If you didn't, checkout the `installation instructions `_ for how to downloaded and configure the workspace. +The ``README.rst`` files distributed throughout the workspace describe every folder and file, and specify if +examples are for beginner or advanced users. + New users should begin by checking out the following parts of the workspace. HowToFit @@ -26,8 +29,8 @@ Scripts / Notebooks There are numerous example describing how perform model-fitting with **PyAutoFit** and providing an overview of its advanced model-fitting features. All examples are provided as Python scripts and Jupyter notebooks. -A full description of the scripts available is given on -the `autofit workspace GitHub page `_. +Descriptions of every configuration file and their input parameters are provided in the ``README.rst`` in +the `config directory of the workspace `_ Config ------ diff --git a/docs/overview/multi_datasets.rst b/docs/overview/multi_datasets.rst index 09ca587a6..49eeb63ae 100644 --- a/docs/overview/multi_datasets.rst +++ b/docs/overview/multi_datasets.rst @@ -168,7 +168,7 @@ To do that, we interface a model with a summed list of analysis objects model = af.Collection(gaussian=af.Model(Gaussian)) analysis = analysis.with_free_parameters( - *[model.gaussian.sigma] + model.gaussian.sigma] ) We code above updates the model using the summed ``Analysis ``objects to compose a model where: diff --git a/files/citation.tex b/files/citation.tex index d4af70e7c..a3936681e 100644 --- a/files/citation.tex +++ b/files/citation.tex @@ -30,10 +30,6 @@ \section*{Software Citations} \href{https://github.com/numpy/numpy}{\textt{NumPy}} \citep{numpy} -\item -\href{https://github.com/JohannesBuchner/PyMultiNest}{\textt{PyMultiNest}} -\citep{multinest, pymultinest} - \item \href{https://github.com/ljvmiranda921/pyswarms}{\textt{PySwarms}} \citep{pyswarms} From 698061663125a57b0906fd0bf832f040f46011ca Mon Sep 17 00:00:00 2001 From: Jammy2211 Date: Mon, 28 Nov 2022 10:19:24 +0000 Subject: [PATCH 005/226] change config --- test_autofit/config/visualize/plots.ini | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test_autofit/config/visualize/plots.ini b/test_autofit/config/visualize/plots.ini index d855c794b..9511644ba 100644 --- a/test_autofit/config/visualize/plots.ini +++ b/test_autofit/config/visualize/plots.ini @@ -1,3 +1,3 @@ [samples] -corner=True -progress=True \ No newline at end of file +corner=False +progress=False \ No newline at end of file From 47395ad02f0777a6c5936cbf9be7067c2de5a11c Mon Sep 17 00:00:00 2001 From: Jammy2211 Date: Mon, 28 Nov 2022 10:21:09 +0000 Subject: [PATCH 006/226] docs --- docs/cookbooks/cookbook_3_multiple_datasets.rst | 4 ++-- docs/overview/multi_datasets.rst | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/cookbooks/cookbook_3_multiple_datasets.rst b/docs/cookbooks/cookbook_3_multiple_datasets.rst index e8f1b2475..a43c38bc8 100644 --- a/docs/cookbooks/cookbook_3_multiple_datasets.rst +++ b/docs/cookbooks/cookbook_3_multiple_datasets.rst @@ -87,7 +87,7 @@ This means that the model has 7 free parameters in total, the shared ``centre`` analysis = sum(analysis_list) analysis = analysis.with_free_parameters( - model.gaussian.sigma] + model.gaussian.sigma ) The code above does not immediately update the model. @@ -139,7 +139,7 @@ We can make multiple parameters free by simply adding them to the input list abo analysis = sum(analysis_list) analysis = analysis.with_free_parameters( - model.gaussian.sigma, model.gaussian.centre] + model.gaussian.sigma, model.gaussian.centre ) Variable Parameters As Relationship diff --git a/docs/overview/multi_datasets.rst b/docs/overview/multi_datasets.rst index 49eeb63ae..76ad6d069 100644 --- a/docs/overview/multi_datasets.rst +++ b/docs/overview/multi_datasets.rst @@ -168,7 +168,7 @@ To do that, we interface a model with a summed list of analysis objects model = af.Collection(gaussian=af.Model(Gaussian)) analysis = analysis.with_free_parameters( - model.gaussian.sigma] + model.gaussian.sigma ) We code above updates the model using the summed ``Analysis ``objects to compose a model where: From cd5ba6cf9998610e4b02c1b9ea85fb76070377fb Mon Sep 17 00:00:00 2001 From: Jammy2211 Date: Mon, 28 Nov 2022 10:29:21 +0000 Subject: [PATCH 007/226] commented out unit test test_parallel_laplace --- .../regression/test_linear_regression.py | 24 +++++++++---------- 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/test_autofit/graphical/regression/test_linear_regression.py b/test_autofit/graphical/regression/test_linear_regression.py index ce8b73a9a..3b6b06042 100644 --- a/test_autofit/graphical/regression/test_linear_regression.py +++ b/test_autofit/graphical/regression/test_linear_regression.py @@ -119,18 +119,18 @@ def test_laplace( check_model_approx(mean_field, a_, b_, z_, x_, y_) -def test_parallel_laplace( - model_approx, a_, b_, x_, y_, z_, -): - laplace = LaplaceOptimiser() - opt = ParallelEPOptimiser( - model_approx.factor_graph, - n_cores=len(model_approx.factors) + 1, - default_optimiser=laplace, - ) - model_approx = opt.run(model_approx) - mean_field = model_approx.mean_field - check_model_approx(mean_field, a_, b_, z_, x_, y_) +# def test_parallel_laplace( +# model_approx, a_, b_, x_, y_, z_, +# ): +# laplace = LaplaceOptimiser() +# opt = ParallelEPOptimiser( +# model_approx.factor_graph, +# n_cores=len(model_approx.factors) + 1, +# default_optimiser=laplace, +# ) +# model_approx = opt.run(model_approx) +# mean_field = model_approx.mean_field +# check_model_approx(mean_field, a_, b_, z_, x_, y_) def _test_laplace_jac( From 84b3042284ddffe44eac9a77505cee3f6e761c1b Mon Sep 17 00:00:00 2001 From: Jammy2211 Date: Mon, 28 Nov 2022 11:08:46 +0000 Subject: [PATCH 008/226] updated MANIFEST.in file --- MANIFEST.in | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/MANIFEST.in b/MANIFEST.in index 4bbbcf074..2e5f0f0aa 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -1,19 +1,22 @@ # MANIFEST.in exclude .gitignore -exclude .coverage -exclude .travis.yml -exclude docs include README.rst include setup.cfg -recursive-include autofit/config * +include CITATIONS.rst +include LICENSE +include requirements.txt +include optional_requirements.txt + prune .cache prune .git prune build prune dist + recursive-exclude *.egg-info * -recursive-include tests * -include requirements.txt -recursive-include data * + +recursive-include autofit/config * + +exclude docs global-exclude test_autofit recursive-exclude test_autofit * \ No newline at end of file From 9e864bd71f752142c482852d7fdea8551e2b187c Mon Sep 17 00:00:00 2001 From: Richard Date: Mon, 28 Nov 2022 11:25:22 +0000 Subject: [PATCH 009/226] default value for model dist when exception thrown --- autofit/graphical/expectation_propagation/optimiser.py | 1 + 1 file changed, 1 insertion(+) diff --git a/autofit/graphical/expectation_propagation/optimiser.py b/autofit/graphical/expectation_propagation/optimiser.py index 2c5ccd782..60bce13c8 100644 --- a/autofit/graphical/expectation_propagation/optimiser.py +++ b/autofit/graphical/expectation_propagation/optimiser.py @@ -123,6 +123,7 @@ def factor_step(factor_approx, optimiser): status = Status( False, (f"Factor: {factor} experienced error {e}",), StatusFlag.FAILURE, ) + new_model_dist = factor_approx.model_dist factor_logger.debug(status) return new_model_dist, status From 22a55988b9647e1b8f5c97810af401419a2bfb34 Mon Sep 17 00:00:00 2001 From: Richard Date: Mon, 28 Nov 2022 14:23:27 +0000 Subject: [PATCH 010/226] converted config to yaml --- autofit/config/general.ini | 33 -- autofit/config/general.yaml | 27 ++ autofit/config/non_linear.yaml | 304 ++++++++++++++++++ autofit/config/non_linear/mcmc/Emcee.ini | 34 -- autofit/config/non_linear/mcmc/Zeus.ini | 44 --- .../config/non_linear/nest/DynestyDynamic.ini | 45 --- .../config/non_linear/nest/DynestyStatic.ini | 42 --- autofit/config/non_linear/nest/UltraNest.ini | 60 ---- autofit/config/non_linear/optimize/Drawer.ini | 22 -- autofit/config/non_linear/optimize/LBFGS.ini | 33 -- .../non_linear/optimize/PySwarmsGlobal.ini | 32 -- .../non_linear/optimize/PySwarmsLocal.ini | 34 -- autofit/config/notation.yaml | 23 ++ autofit/config/notation/label.ini | 14 - autofit/config/notation/label_format.ini | 8 - autofit/config/visualize.yaml | 169 ++++++++++ autofit/config/visualize/doc_general | 17 - autofit/config/visualize/general.ini | 2 - autofit/config/visualize/include.ini | 3 - autofit/config/visualize/mat_wrap/Axis.ini | 3 - autofit/config/visualize/mat_wrap/Cmap.ini | 15 - .../config/visualize/mat_wrap/Colorbar.ini | 7 - .../visualize/mat_wrap/ColorbarTickParams.ini | 5 - autofit/config/visualize/mat_wrap/Figure.ini | 7 - autofit/config/visualize/mat_wrap/Legend.ini | 7 - .../config/visualize/mat_wrap/TickParams.ini | 5 - autofit/config/visualize/mat_wrap/Title.ini | 5 - autofit/config/visualize/mat_wrap/XLabel.ini | 5 - autofit/config/visualize/mat_wrap/XTicks.ini | 5 - autofit/config/visualize/mat_wrap/YLabel.ini | 5 - autofit/config/visualize/mat_wrap/YTicks.ini | 5 - .../config/visualize/mat_wrap_1d/AXVLine.ini | 5 - .../config/visualize/mat_wrap_1d/YXPlot.ini | 5 - .../visualize/mat_wrap_2d/ArrayOverlay.ini | 5 - .../config/visualize/mat_wrap_2d/GridPlot.ini | 5 - .../visualize/mat_wrap_2d/GridScatter.ini | 9 - .../visualize/mat_wrap_2d/PatchOverlay.ini | 7 - .../mat_wrap_2d/VectorFieldQuiver.ini | 17 - .../visualize/mat_wrap_2d/VoronoiDrawer.ini | 9 - autofit/config/visualize/plots_search.ini | 28 -- 40 files changed, 523 insertions(+), 587 deletions(-) delete mode 100644 autofit/config/general.ini create mode 100644 autofit/config/general.yaml create mode 100644 autofit/config/non_linear.yaml delete mode 100644 autofit/config/non_linear/mcmc/Emcee.ini delete mode 100644 autofit/config/non_linear/mcmc/Zeus.ini delete mode 100644 autofit/config/non_linear/nest/DynestyDynamic.ini delete mode 100644 autofit/config/non_linear/nest/DynestyStatic.ini delete mode 100644 autofit/config/non_linear/nest/UltraNest.ini delete mode 100644 autofit/config/non_linear/optimize/Drawer.ini delete mode 100644 autofit/config/non_linear/optimize/LBFGS.ini delete mode 100644 autofit/config/non_linear/optimize/PySwarmsGlobal.ini delete mode 100644 autofit/config/non_linear/optimize/PySwarmsLocal.ini create mode 100644 autofit/config/notation.yaml delete mode 100644 autofit/config/notation/label.ini delete mode 100644 autofit/config/notation/label_format.ini create mode 100644 autofit/config/visualize.yaml delete mode 100644 autofit/config/visualize/doc_general delete mode 100644 autofit/config/visualize/general.ini delete mode 100644 autofit/config/visualize/include.ini delete mode 100644 autofit/config/visualize/mat_wrap/Axis.ini delete mode 100644 autofit/config/visualize/mat_wrap/Cmap.ini delete mode 100644 autofit/config/visualize/mat_wrap/Colorbar.ini delete mode 100644 autofit/config/visualize/mat_wrap/ColorbarTickParams.ini delete mode 100644 autofit/config/visualize/mat_wrap/Figure.ini delete mode 100644 autofit/config/visualize/mat_wrap/Legend.ini delete mode 100644 autofit/config/visualize/mat_wrap/TickParams.ini delete mode 100644 autofit/config/visualize/mat_wrap/Title.ini delete mode 100644 autofit/config/visualize/mat_wrap/XLabel.ini delete mode 100644 autofit/config/visualize/mat_wrap/XTicks.ini delete mode 100644 autofit/config/visualize/mat_wrap/YLabel.ini delete mode 100644 autofit/config/visualize/mat_wrap/YTicks.ini delete mode 100644 autofit/config/visualize/mat_wrap_1d/AXVLine.ini delete mode 100644 autofit/config/visualize/mat_wrap_1d/YXPlot.ini delete mode 100644 autofit/config/visualize/mat_wrap_2d/ArrayOverlay.ini delete mode 100644 autofit/config/visualize/mat_wrap_2d/GridPlot.ini delete mode 100644 autofit/config/visualize/mat_wrap_2d/GridScatter.ini delete mode 100644 autofit/config/visualize/mat_wrap_2d/PatchOverlay.ini delete mode 100644 autofit/config/visualize/mat_wrap_2d/VectorFieldQuiver.ini delete mode 100644 autofit/config/visualize/mat_wrap_2d/VoronoiDrawer.ini delete mode 100644 autofit/config/visualize/plots_search.ini diff --git a/autofit/config/general.ini b/autofit/config/general.ini deleted file mode 100644 index 526711e6e..000000000 --- a/autofit/config/general.ini +++ /dev/null @@ -1,33 +0,0 @@ -[output] -log_to_file=False -log_file=output.log -log_level=INFO -samples_to_csv=False -model_results_decimal_places=3 -info_whitespace_length=80 -remove_files=False -force_pickle_overwrite=False -identifier_version=4 - -[hpc] -hpc_mode=False -iterations_per_update=5000 - -[model] -ignore_prior_limits=False - -[profiling] -should_profile=False -repeats=1 -parallel_profile=False - -[parallel] -warn_environment_variables=True - -[analysis] -n_cores=1 - -[test] -parallel_profile=False -check_preloads=False -exception_override=False \ No newline at end of file diff --git a/autofit/config/general.yaml b/autofit/config/general.yaml new file mode 100644 index 000000000..784083c80 --- /dev/null +++ b/autofit/config/general.yaml @@ -0,0 +1,27 @@ +analysis: + n_cores: 1 +hpc: + hpc_mode: false + iterations_per_update: 5000 +model: + ignore_prior_limits: false +output: + force_pickle_overwrite: false + identifier_version: 4 + info_whitespace_length: 80 + log_file: output.log + log_level: INFO + log_to_file: false + model_results_decimal_places: 3 + remove_files: false + samples_to_csv: false +parallel: + warn_environment_variables: true +profiling: + parallel_profile: false + repeats: 1 + should_profile: false +test: + check_preloads: false + exception_override: false + parallel_profile: false diff --git a/autofit/config/non_linear.yaml b/autofit/config/non_linear.yaml new file mode 100644 index 000000000..9dc6a70ab --- /dev/null +++ b/autofit/config/non_linear.yaml @@ -0,0 +1,304 @@ +mcmc: + Emcee: + auto_correlations: + change_threshold: 0.01 + check_for_convergence: true + check_size: 100 + required_length: 50 + initialize: + ball_lower_limit: 0.49 + ball_upper_limit: 0.51 + method: ball + parallel: + number_of_cores: 1 + printing: + silence: false + prior_passer: + sigma: 3.0 + use_errors: true + use_widths: true + run: + nsteps: 2000 + search: + nwalkers: 50 + updates: + iterations_per_update: 500 + log_every_update: 1 + model_results_every_update: 1 + remove_state_files_at_end: true + visualize_every_update: 1 + Zeus: + auto_correlations: + change_threshold: 0.01 + check_for_convergence: true + check_size: 100 + required_length: 50 + initialize: + ball_lower_limit: 0.49 + ball_upper_limit: 0.51 + method: ball + parallel: + number_of_cores: 1 + printing: + silence: false + prior_passer: + sigma: 3.0 + use_errors: true + use_widths: true + run: + check_walkers: true + light_mode: false + maxiter: 10000 + maxsteps: 10000 + mu: 1.0 + nsteps: 2000 + patience: 5 + shuffle_ensemble: true + tolerance: 0.05 + tune: true + vectorize: false + search: + nwalkers: 50 + updates: + iterations_per_update: 500 + log_every_update: 1 + model_results_every_update: 1 + remove_state_files_at_end: true + visualize_every_update: 1 +nest: + DynestyDynamic: + initialize: + method: prior + parallel: + force_x1_cpu: false + number_of_cores: 1 + printing: + silence: false + prior_passer: + sigma: 3.0 + use_errors: true + use_widths: true + run: + dlogz_init: 0.01 + logl_max_init: .inf + maxcall: null + maxcall_init: null + maxiter: null + maxiter_init: null + n_effective: .inf + n_effective_init: .inf + nlive_init: 500 + search: + bootstrap: null + bound: multi + enlarge: null + facc: 0.2 + first_update: null + fmove: 0.9 + max_move: 100 + sample: auto + slices: 5 + update_interval: null + walks: 5 + updates: + iterations_per_update: 2500 + log_every_update: 1 + model_results_every_update: 1 + remove_state_files_at_end: true + visualize_every_update: 1 + DynestyStatic: + initialize: + method: prior + parallel: + force_x1_cpu: false + number_of_cores: 1 + printing: + silence: false + prior_passer: + sigma: 3.0 + use_errors: true + use_widths: true + run: + dlogz: null + logl_max: .inf + maxcall: null + maxiter: null + n_effective: null + search: + bootstrap: null + bound: multi + enlarge: null + facc: 0.2 + first_update: null + fmove: 0.9 + max_move: 100 + nlive: 50 + sample: auto + slices: 5 + update_interval: null + walks: 5 + updates: + iterations_per_update: 5000 + log_every_update: 1 + model_results_every_update: 1 + remove_state_files_at_end: true + visualize_every_update: 1 + UltraNest: + initialize: + method: prior + parallel: + number_of_cores: 1 + printing: + silence: false + prior_passer: + sigma: 3.0 + use_errors: true + use_widths: true + run: + cluster_num_live_points: 40 + dkl: 0.5 + dlogz: 0.5 + frac_remain: 0.01 + insertion_test_window: 10 + insertion_test_zscore_threshold: 2 + lepsilon: 0.001 + log_interval: null + max_iters: null + max_ncalls: null + max_num_improvement_loops: -1.0 + min_ess: 400 + min_num_live_points: 400 + show_status: true + update_interval_ncall: null + update_interval_volume_fraction: 0.8 + viz_callback: auto + search: + draw_multiple: true + ndraw_max: 65536 + ndraw_min: 128 + num_bootstraps: 30 + num_test_samples: 2 + resume: true + run_num: null + storage_backend: hdf5 + vectorized: false + warmstart_max_tau: -1.0 + stepsampler: + adaptive_nsteps: false + log: false + max_nsteps: 1000 + nsteps: 25 + region_filter: false + scale: 1.0 + stepsampler_cls: null + updates: + iterations_per_update: 5000 + log_every_update: 1 + model_results_every_update: 1 + remove_state_files_at_end: true + visualize_every_update: 1 +optimize: + Drawer: + initialize: + ball_lower_limit: 0.49 + ball_upper_limit: 0.51 + method: ball + printing: + silence: false + prior_passer: + sigma: 3.0 + use_errors: true + use_widths: true + search: + total_draws: 50 + updates: + iterations_per_update: 500 + log_every_update: 1 + model_results_every_update: 1 + remove_state_files_at_end: true + visualize_every_update: 1 + LBFGS: + initialize: + ball_lower_limit: 0.49 + ball_upper_limit: 0.51 + method: ball + options: + disp: false + eps: 1.0e-08 + ftol: 2.220446049250313e-09 + gtol: 1.0e-05 + iprint: -1.0 + maxcor: 10 + maxfun: 15000 + maxiter: 15000 + maxls: 20 + printing: + silence: false + prior_passer: + sigma: 3.0 + use_errors: true + use_widths: true + search: + tol: null + updates: + iterations_per_update: 500 + log_every_update: 1 + model_results_every_update: 1 + remove_state_files_at_end: true + visualize_every_update: 1 + PySwarmsGlobal: + initialize: + ball_lower_limit: 0.49 + ball_upper_limit: 0.51 + method: ball + parallel: + number_of_cores: 1 + printing: + silence: false + prior_passer: + sigma: 3.0 + use_errors: true + use_widths: true + run: + iters: 2000 + search: + cognitive: 0.5 + ftol: -.inf + inertia: 0.9 + n_particles: 50 + social: 0.3 + updates: + iterations_per_update: 500 + log_every_update: 1 + model_results_every_update: 1 + remove_state_files_at_end: true + visualize_every_update: 1 + PySwarmsLocal: + initialize: + ball_lower_limit: 0.49 + ball_upper_limit: 0.51 + method: ball + parallel: + number_of_cores: 1 + printing: + silence: false + prior_passer: + sigma: 3.0 + use_errors: true + use_widths: true + run: + iters: 2000 + search: + cognitive: 0.5 + ftol: -.inf + inertia: 0.9 + minkowski_p_norm: 2 + n_particles: 50 + number_of_k_neighbors: 3 + social: 0.3 + updates: + iterations_per_update: 500 + log_every_update: 1 + model_results_every_update: 1 + remove_state_files_at_end: true + visualize_every_update: 1 diff --git a/autofit/config/non_linear/mcmc/Emcee.ini b/autofit/config/non_linear/mcmc/Emcee.ini deleted file mode 100644 index 34e3e2159..000000000 --- a/autofit/config/non_linear/mcmc/Emcee.ini +++ /dev/null @@ -1,34 +0,0 @@ -[search] -nwalkers=50 - -[run] -nsteps=2000 - -[initialize] -method=ball -ball_lower_limit=0.49 -ball_upper_limit=0.51 - -[auto_correlations] -check_for_convergence=True -check_size=100 -required_length=50 -change_threshold=0.01 - -[updates] -iterations_per_update=500 -visualize_every_update=1 -model_results_every_update=1 -log_every_update=1 -remove_state_files_at_end=True - -[printing] -silence=False - -[prior_passer] -sigma=3.0 -use_errors=True -use_widths=True - -[parallel] -number_of_cores=1 \ No newline at end of file diff --git a/autofit/config/non_linear/mcmc/Zeus.ini b/autofit/config/non_linear/mcmc/Zeus.ini deleted file mode 100644 index 4d65a4e01..000000000 --- a/autofit/config/non_linear/mcmc/Zeus.ini +++ /dev/null @@ -1,44 +0,0 @@ -[search] -nwalkers=50 - -[run] -nsteps=2000 -tune=True -tolerance=0.05 -patience=5 -maxsteps=10000 -mu=1.0 -maxiter=10000 -vectorize=False -check_walkers=True -shuffle_ensemble=True -light_mode=False - -[initialize] -method=ball -ball_lower_limit=0.49 -ball_upper_limit=0.51 - -[auto_correlations] -check_for_convergence=True -check_size=100 -required_length=50 -change_threshold=0.01 - -[updates] -iterations_per_update=500 -visualize_every_update=1 -model_results_every_update=1 -log_every_update=1 -remove_state_files_at_end=True - -[printing] -silence=False - -[prior_passer] -sigma=3.0 -use_errors=True -use_widths=True - -[parallel] -number_of_cores=1 \ No newline at end of file diff --git a/autofit/config/non_linear/nest/DynestyDynamic.ini b/autofit/config/non_linear/nest/DynestyDynamic.ini deleted file mode 100644 index c8bc920a1..000000000 --- a/autofit/config/non_linear/nest/DynestyDynamic.ini +++ /dev/null @@ -1,45 +0,0 @@ -[search] -bound=multi -sample=auto -bootstrap=None -enlarge=None -update_interval=None -first_update=None -walks=5 -facc=0.2 -slices=5 -fmove=0.9 -max_move=100 - -[run] -nlive_init=500 -maxiter_init=None -maxcall_init=None -dlogz_init=0.01 -logl_max_init=inf -n_effective_init=inf -maxiter=None -maxcall=None -n_effective=inf - -[updates] -iterations_per_update=2500 -visualize_every_update=1 -model_results_every_update=1 -log_every_update=1 -remove_state_files_at_end=True - -[initialize] -method=prior - -[printing] -silence=False - -[prior_passer] -sigma=3.0 -use_errors=True -use_widths=True - -[parallel] -number_of_cores=1 -force_x1_cpu=False \ No newline at end of file diff --git a/autofit/config/non_linear/nest/DynestyStatic.ini b/autofit/config/non_linear/nest/DynestyStatic.ini deleted file mode 100644 index f3b1554cd..000000000 --- a/autofit/config/non_linear/nest/DynestyStatic.ini +++ /dev/null @@ -1,42 +0,0 @@ -[search] -nlive=50 -bound=multi -sample=auto -bootstrap=None -enlarge=None -update_interval=None -first_update=None -walks=5 -facc=0.2 -slices=5 -fmove=0.9 -max_move=100 - -[run] -maxiter=None -maxcall=None -dlogz=None -logl_max=inf -n_effective=None - -[updates] -iterations_per_update=5000 -visualize_every_update=1 -model_results_every_update=1 -log_every_update=1 -remove_state_files_at_end=True - -[initialize] -method=prior - -[printing] -silence=False - -[prior_passer] -sigma=3.0 -use_errors=True -use_widths=True - -[parallel] -number_of_cores=1 -force_x1_cpu=False \ No newline at end of file diff --git a/autofit/config/non_linear/nest/UltraNest.ini b/autofit/config/non_linear/nest/UltraNest.ini deleted file mode 100644 index 25132f34b..000000000 --- a/autofit/config/non_linear/nest/UltraNest.ini +++ /dev/null @@ -1,60 +0,0 @@ -[search] -resume=True -run_num=None -num_test_samples=2 -draw_multiple=True -num_bootstraps=30 -vectorized=False -ndraw_min=128 -ndraw_max=65536 -storage_backend=hdf5 -warmstart_max_tau=-1 - -[run] -update_interval_volume_fraction=0.8 -update_interval_ncall=None -log_interval=None -show_status=True -viz_callback=auto -dlogz=0.5 -dKL=0.5 -frac_remain=0.01 -Lepsilon=0.001 -min_ess=400 -max_iters=None -max_ncalls=None -max_num_improvement_loops=-1 -min_num_live_points=400 -cluster_num_live_points=40 -insertion_test_window=10 -insertion_test_zscore_threshold=2 - -[stepsampler] -stepsampler_cls=None -nsteps=25 -scale=1.0 -adaptive_nsteps=False -max_nsteps=1000 -region_filter=False -log=False - -[updates] -iterations_per_update=5000 -visualize_every_update=1 -model_results_every_update=1 -log_every_update=1 -remove_state_files_at_end=True - -[initialize] -method=prior - -[printing] -silence=False - -[prior_passer] -sigma=3.0 -use_errors=True -use_widths=True - -[parallel] -number_of_cores=1 \ No newline at end of file diff --git a/autofit/config/non_linear/optimize/Drawer.ini b/autofit/config/non_linear/optimize/Drawer.ini deleted file mode 100644 index 27c7cbdc9..000000000 --- a/autofit/config/non_linear/optimize/Drawer.ini +++ /dev/null @@ -1,22 +0,0 @@ -[search] -total_draws=50 - -[initialize] -method=ball -ball_lower_limit=0.49 -ball_upper_limit=0.51 - -[updates] -iterations_per_update=500 -visualize_every_update=1 -model_results_every_update=1 -log_every_update=1 -remove_state_files_at_end=True - -[printing] -silence=False - -[prior_passer] -sigma=3.0 -use_errors=True -use_widths=True \ No newline at end of file diff --git a/autofit/config/non_linear/optimize/LBFGS.ini b/autofit/config/non_linear/optimize/LBFGS.ini deleted file mode 100644 index 2bf493180..000000000 --- a/autofit/config/non_linear/optimize/LBFGS.ini +++ /dev/null @@ -1,33 +0,0 @@ -[search] -tol=None - -[options] -maxcor=10 -ftol=2.220446049250313e-09 -eps=1e-08 -gtol=1e-05 -maxfun=15000 -maxiter=15000 -iprint=-1 -maxls=20 -disp=False - -[initialize] -method=ball -ball_lower_limit=0.49 -ball_upper_limit=0.51 - -[updates] -iterations_per_update=500 -visualize_every_update=1 -model_results_every_update=1 -log_every_update=1 -remove_state_files_at_end=True - -[printing] -silence=False - -[prior_passer] -sigma=3.0 -use_errors=True -use_widths=True \ No newline at end of file diff --git a/autofit/config/non_linear/optimize/PySwarmsGlobal.ini b/autofit/config/non_linear/optimize/PySwarmsGlobal.ini deleted file mode 100644 index 770268e85..000000000 --- a/autofit/config/non_linear/optimize/PySwarmsGlobal.ini +++ /dev/null @@ -1,32 +0,0 @@ -[search] -n_particles=50 -cognitive=0.5 -social=0.3 -inertia=0.9 -ftol=-inf - -[run] -iters=2000 - -[initialize] -method=ball -ball_lower_limit=0.49 -ball_upper_limit=0.51 - -[updates] -iterations_per_update=500 -visualize_every_update=1 -model_results_every_update=1 -log_every_update=1 -remove_state_files_at_end=True - -[printing] -silence=False - -[prior_passer] -sigma=3.0 -use_errors=True -use_widths=True - -[parallel] -number_of_cores=1 \ No newline at end of file diff --git a/autofit/config/non_linear/optimize/PySwarmsLocal.ini b/autofit/config/non_linear/optimize/PySwarmsLocal.ini deleted file mode 100644 index 00c9a0cbc..000000000 --- a/autofit/config/non_linear/optimize/PySwarmsLocal.ini +++ /dev/null @@ -1,34 +0,0 @@ -[search] -n_particles=50 -cognitive=0.5 -social=0.3 -inertia=0.9 -number_of_k_neighbors=3 -minkowski_p_norm=2 -ftol=-inf - -[run] -iters=2000 - -[initialize] -method=ball -ball_lower_limit=0.49 -ball_upper_limit=0.51 - -[updates] -iterations_per_update=500 -visualize_every_update=1 -model_results_every_update=1 -log_every_update=1 -remove_state_files_at_end=True - -[printing] -silence=False - -[prior_passer] -sigma=3.0 -use_errors=True -use_widths=True - -[parallel] -number_of_cores=1 \ No newline at end of file diff --git a/autofit/config/notation.yaml b/autofit/config/notation.yaml new file mode 100644 index 000000000..0a5c8ccfd --- /dev/null +++ b/autofit/config/notation.yaml @@ -0,0 +1,23 @@ +label: + label: + centre: x + normalization: norm + parameter0: a + parameter1: b + parameter2: c + rate: \lambda + sigma: \sigma + superscript: + exponential: e + gaussian: g + modelcomponent0: M0 + modelcomponent1: M1 +label_format: + format: + centre: '{:.2f}' + normalization: '{:.2f}' + parameter0: '{:.2f}' + parameter1: '{:.2f}' + parameter2: '{:.2f}' + rate: '{:.2f}' + sigma: '{:.2f}' diff --git a/autofit/config/notation/label.ini b/autofit/config/notation/label.ini deleted file mode 100644 index b737d092b..000000000 --- a/autofit/config/notation/label.ini +++ /dev/null @@ -1,14 +0,0 @@ -[label] -parameter0=a -parameter1=b -parameter2=c -centre=x -normalization=norm -sigma=\sigma -rate=\lambda - -[superscript] -ModelComponent0=M0 -ModelComponent1=M1 -Gaussian=g -Exponential=e \ No newline at end of file diff --git a/autofit/config/notation/label_format.ini b/autofit/config/notation/label_format.ini deleted file mode 100644 index 95d2c985d..000000000 --- a/autofit/config/notation/label_format.ini +++ /dev/null @@ -1,8 +0,0 @@ -[format] -parameter0={:.2f} -parameter1={:.2f} -parameter2={:.2f} -centre={:.2f} -normalization={:.2f} -sigma={:.2f} -rate={:.2f} \ No newline at end of file diff --git a/autofit/config/visualize.yaml b/autofit/config/visualize.yaml new file mode 100644 index 000000000..814ae182a --- /dev/null +++ b/autofit/config/visualize.yaml @@ -0,0 +1,169 @@ +general: + general: + backend: default +include: + include_2d: {} +mat_wrap: + Axis: + figure: {} + subplot: {} + Cmap: + figure: + cmap: default + linscale: 0.01 + linthresh: 0.05 + norm: linear + vmax: null + vmin: null + subplot: + cmap: default + linscale: 0.01 + linthresh: 0.05 + norm: linear + vmax: null + vmin: null + Colorbar: + figure: + fraction: 0.047 + pad: 0.01 + subplot: + fraction: 0.047 + pad: 0.01 + ColorbarTickParams: + figure: + labelsize: 10 + subplot: + labelsize: 10 + Figure: + figure: + aspect: square + figsize: (7,7) + subplot: + aspect: square + figsize: auto + Legend: + figure: + fontsize: 12 + include: true + subplot: + fontsize: 12 + include: true + TickParams: + figure: + labelsize: 16 + subplot: + labelsize: 10 + Title: + figure: + fontsize: 16 + subplot: + fontsize: 10 + XLabel: + figure: + fontsize: 16 + subplot: + fontsize: 10 + XTicks: + figure: + fontsize: 16 + subplot: + fontsize: 10 + YLabel: + figure: + fontsize: 16 + subplot: + fontsize: 10 + YTicks: + figure: + fontsize: 16 + subplot: + fontsize: 10 +mat_wrap_1d: + AXVLine: + figure: + c: k + subplot: + c: k + YXPlot: + figure: + c: k + subplot: + c: k +mat_wrap_2d: + ArrayOverlay: + figure: + alpha: 0.5 + subplot: + alpha: 0.5 + GridPlot: + figure: + c: k + subplot: + c: k + GridScatter: + figure: + c: k + marker: . + s: 1 + subplot: + c: k + marker: . + s: 1 + PatchOverlay: + figure: + edgecolor: c + facecolor: null + subplot: + edgecolor: c + facecolor: null + VectorFieldQuiver: + figure: + alpha: 1.0 + angles: xy + headlength: 0 + headwidth: 1 + linewidth: 5 + pivot: middle + units: xy + subplot: + alpha: 1.0 + angles: xy + headlength: 0 + headwidth: 1 + linewidth: 5 + pivot: middle + units: xy + VoronoiDrawer: + figure: + alpha: 0.7 + edgecolor: k + linewidth: 0.3 + subplot: + alpha: 0.7 + edgecolor: k + linewidth: 0.3 +plots_search: + dynesty: + cornerplot: true + cornerpoints: false + runplot: true + traceplot: true + emcee: + corner: true + likelihood_series: true + time_series: true + trajectories: true + pyswarms: + contour: true + cost_history: true + time_series: true + trajectories: true + ultranest: + cornerplot: true + runplot: true + traceplot: true + zeus: + corner: true + likelihood_series: true + time_series: true + trajectories: true diff --git a/autofit/config/visualize/doc_general b/autofit/config/visualize/doc_general deleted file mode 100644 index 33db17f87..000000000 --- a/autofit/config/visualize/doc_general +++ /dev/null @@ -1,17 +0,0 @@ -A description of the general.ini visualization config. - -[general] - backend -> str - The matploblib backend used for visualization (see - https://gist.github.com/CMCDragonkai/4e9464d9f32f5893d837f3de2c43daa4 for a description of backends). - - If you use an invalid backend for your computer, PyAutoLens may crash without an error or reset your machine. - There may be no better way to find the right backend than simple trial and error. The following backends have - worked for PyAutoLens users: - - TKAgg (default) - Qt5Agg (works on new MACS) - Qt4Agg - WXAgg - WX - Agg (outputs to .fits / .png but doesn't'display figures during a run on your computer screen) \ No newline at end of file diff --git a/autofit/config/visualize/general.ini b/autofit/config/visualize/general.ini deleted file mode 100644 index a0108f6b4..000000000 --- a/autofit/config/visualize/general.ini +++ /dev/null @@ -1,2 +0,0 @@ -[general] -backend=default \ No newline at end of file diff --git a/autofit/config/visualize/include.ini b/autofit/config/visualize/include.ini deleted file mode 100644 index 1955c51e8..000000000 --- a/autofit/config/visualize/include.ini +++ /dev/null @@ -1,3 +0,0 @@ -[include_2d] - - diff --git a/autofit/config/visualize/mat_wrap/Axis.ini b/autofit/config/visualize/mat_wrap/Axis.ini deleted file mode 100644 index dbfb9f790..000000000 --- a/autofit/config/visualize/mat_wrap/Axis.ini +++ /dev/null @@ -1,3 +0,0 @@ -[figure] - -[subplot] \ No newline at end of file diff --git a/autofit/config/visualize/mat_wrap/Cmap.ini b/autofit/config/visualize/mat_wrap/Cmap.ini deleted file mode 100644 index 4573973d3..000000000 --- a/autofit/config/visualize/mat_wrap/Cmap.ini +++ /dev/null @@ -1,15 +0,0 @@ -[figure] -cmap=default -norm=linear -vmin=None -vmax=None -linthresh=0.05 -linscale=0.01 - -[subplot] -cmap=default -norm=linear -vmin=None -vmax=None -linthresh=0.05 -linscale=0.01 \ No newline at end of file diff --git a/autofit/config/visualize/mat_wrap/Colorbar.ini b/autofit/config/visualize/mat_wrap/Colorbar.ini deleted file mode 100644 index c9a45eb72..000000000 --- a/autofit/config/visualize/mat_wrap/Colorbar.ini +++ /dev/null @@ -1,7 +0,0 @@ -[figure] -fraction=0.047 -pad=0.01 - -[subplot] -fraction=0.047 -pad=0.01 \ No newline at end of file diff --git a/autofit/config/visualize/mat_wrap/ColorbarTickParams.ini b/autofit/config/visualize/mat_wrap/ColorbarTickParams.ini deleted file mode 100644 index c75fecf87..000000000 --- a/autofit/config/visualize/mat_wrap/ColorbarTickParams.ini +++ /dev/null @@ -1,5 +0,0 @@ -[figure] -labelsize=10 - -[subplot] -labelsize=10 \ No newline at end of file diff --git a/autofit/config/visualize/mat_wrap/Figure.ini b/autofit/config/visualize/mat_wrap/Figure.ini deleted file mode 100644 index 3793ee8e5..000000000 --- a/autofit/config/visualize/mat_wrap/Figure.ini +++ /dev/null @@ -1,7 +0,0 @@ -[figure] -figsize=(7,7) -aspect=square - -[subplot] -figsize=auto -aspect=square \ No newline at end of file diff --git a/autofit/config/visualize/mat_wrap/Legend.ini b/autofit/config/visualize/mat_wrap/Legend.ini deleted file mode 100644 index 6d934996d..000000000 --- a/autofit/config/visualize/mat_wrap/Legend.ini +++ /dev/null @@ -1,7 +0,0 @@ -[figure] -include=True -fontsize=12 - -[subplot] -include=True -fontsize=12 \ No newline at end of file diff --git a/autofit/config/visualize/mat_wrap/TickParams.ini b/autofit/config/visualize/mat_wrap/TickParams.ini deleted file mode 100644 index 6fe9b8364..000000000 --- a/autofit/config/visualize/mat_wrap/TickParams.ini +++ /dev/null @@ -1,5 +0,0 @@ -[figure] -labelsize=16 - -[subplot] -labelsize=10 \ No newline at end of file diff --git a/autofit/config/visualize/mat_wrap/Title.ini b/autofit/config/visualize/mat_wrap/Title.ini deleted file mode 100644 index 653c210a6..000000000 --- a/autofit/config/visualize/mat_wrap/Title.ini +++ /dev/null @@ -1,5 +0,0 @@ -[figure] -fontsize=16 - -[subplot] -fontsize=10 \ No newline at end of file diff --git a/autofit/config/visualize/mat_wrap/XLabel.ini b/autofit/config/visualize/mat_wrap/XLabel.ini deleted file mode 100644 index 653c210a6..000000000 --- a/autofit/config/visualize/mat_wrap/XLabel.ini +++ /dev/null @@ -1,5 +0,0 @@ -[figure] -fontsize=16 - -[subplot] -fontsize=10 \ No newline at end of file diff --git a/autofit/config/visualize/mat_wrap/XTicks.ini b/autofit/config/visualize/mat_wrap/XTicks.ini deleted file mode 100644 index 653c210a6..000000000 --- a/autofit/config/visualize/mat_wrap/XTicks.ini +++ /dev/null @@ -1,5 +0,0 @@ -[figure] -fontsize=16 - -[subplot] -fontsize=10 \ No newline at end of file diff --git a/autofit/config/visualize/mat_wrap/YLabel.ini b/autofit/config/visualize/mat_wrap/YLabel.ini deleted file mode 100644 index 653c210a6..000000000 --- a/autofit/config/visualize/mat_wrap/YLabel.ini +++ /dev/null @@ -1,5 +0,0 @@ -[figure] -fontsize=16 - -[subplot] -fontsize=10 \ No newline at end of file diff --git a/autofit/config/visualize/mat_wrap/YTicks.ini b/autofit/config/visualize/mat_wrap/YTicks.ini deleted file mode 100644 index 653c210a6..000000000 --- a/autofit/config/visualize/mat_wrap/YTicks.ini +++ /dev/null @@ -1,5 +0,0 @@ -[figure] -fontsize=16 - -[subplot] -fontsize=10 \ No newline at end of file diff --git a/autofit/config/visualize/mat_wrap_1d/AXVLine.ini b/autofit/config/visualize/mat_wrap_1d/AXVLine.ini deleted file mode 100644 index 83b47a984..000000000 --- a/autofit/config/visualize/mat_wrap_1d/AXVLine.ini +++ /dev/null @@ -1,5 +0,0 @@ -[figure] -c=k - -[subplot] -c=k \ No newline at end of file diff --git a/autofit/config/visualize/mat_wrap_1d/YXPlot.ini b/autofit/config/visualize/mat_wrap_1d/YXPlot.ini deleted file mode 100644 index 83b47a984..000000000 --- a/autofit/config/visualize/mat_wrap_1d/YXPlot.ini +++ /dev/null @@ -1,5 +0,0 @@ -[figure] -c=k - -[subplot] -c=k \ No newline at end of file diff --git a/autofit/config/visualize/mat_wrap_2d/ArrayOverlay.ini b/autofit/config/visualize/mat_wrap_2d/ArrayOverlay.ini deleted file mode 100644 index 61974a2bf..000000000 --- a/autofit/config/visualize/mat_wrap_2d/ArrayOverlay.ini +++ /dev/null @@ -1,5 +0,0 @@ -[figure] -alpha=0.5 - -[subplot] -alpha=0.5 \ No newline at end of file diff --git a/autofit/config/visualize/mat_wrap_2d/GridPlot.ini b/autofit/config/visualize/mat_wrap_2d/GridPlot.ini deleted file mode 100644 index 83b47a984..000000000 --- a/autofit/config/visualize/mat_wrap_2d/GridPlot.ini +++ /dev/null @@ -1,5 +0,0 @@ -[figure] -c=k - -[subplot] -c=k \ No newline at end of file diff --git a/autofit/config/visualize/mat_wrap_2d/GridScatter.ini b/autofit/config/visualize/mat_wrap_2d/GridScatter.ini deleted file mode 100644 index 90ef9f97d..000000000 --- a/autofit/config/visualize/mat_wrap_2d/GridScatter.ini +++ /dev/null @@ -1,9 +0,0 @@ -[figure] -s=1 -marker=. -c=k - -[subplot] -s=1 -marker=. -c=k \ No newline at end of file diff --git a/autofit/config/visualize/mat_wrap_2d/PatchOverlay.ini b/autofit/config/visualize/mat_wrap_2d/PatchOverlay.ini deleted file mode 100644 index 682e0204f..000000000 --- a/autofit/config/visualize/mat_wrap_2d/PatchOverlay.ini +++ /dev/null @@ -1,7 +0,0 @@ -[figure] -facecolor=none -edgecolor=c - -[subplot] -facecolor=none -edgecolor=c \ No newline at end of file diff --git a/autofit/config/visualize/mat_wrap_2d/VectorFieldQuiver.ini b/autofit/config/visualize/mat_wrap_2d/VectorFieldQuiver.ini deleted file mode 100644 index 195128eda..000000000 --- a/autofit/config/visualize/mat_wrap_2d/VectorFieldQuiver.ini +++ /dev/null @@ -1,17 +0,0 @@ -[figure] -headlength=0 -pivot=middle -linewidth=5 -units=xy -angles=xy -headwidth=1 -alpha=1. - -[subplot] -headlength=0 -pivot=middle -linewidth=5 -units=xy -angles=xy -headwidth=1 -alpha=1. diff --git a/autofit/config/visualize/mat_wrap_2d/VoronoiDrawer.ini b/autofit/config/visualize/mat_wrap_2d/VoronoiDrawer.ini deleted file mode 100644 index 5c2583764..000000000 --- a/autofit/config/visualize/mat_wrap_2d/VoronoiDrawer.ini +++ /dev/null @@ -1,9 +0,0 @@ -[figure] -linewidth=0.3 -edgecolor=k -alpha=0.7 - -[subplot] -linewidth=0.3 -edgecolor=k -alpha=0.7 diff --git a/autofit/config/visualize/plots_search.ini b/autofit/config/visualize/plots_search.ini deleted file mode 100644 index 4e970099a..000000000 --- a/autofit/config/visualize/plots_search.ini +++ /dev/null @@ -1,28 +0,0 @@ -[dynesty] -cornerplot=True -runplot=True -traceplot=True -cornerpoints=False - -[ultranest] -cornerplot=True -runplot=True -traceplot=True - -[emcee] -corner=True -trajectories=True -likelihood_series=True -time_series=True - -[zeus] -corner=True -trajectories=True -likelihood_series=True -time_series=True - -[pyswarms] -contour=True -cost_history=True -trajectories=True -time_series=True \ No newline at end of file From cee6d18896a68530b7102d728309a672f0e8fada Mon Sep 17 00:00:00 2001 From: Richard Date: Mon, 28 Nov 2022 14:38:52 +0000 Subject: [PATCH 011/226] convert all priors to YAML --- autofit/config/priors/Exponential.json | 41 --------- autofit/config/priors/Exponential.yaml | 30 +++++++ autofit/config/priors/Gaussian.json | 51 ----------- autofit/config/priors/Gaussian.yaml | 37 ++++++++ autofit/config/priors/GaussianKurtosis.json | 64 -------------- autofit/config/priors/GaussianKurtosis.yaml | 47 +++++++++++ .../config/priors/MultiLevelGaussians.json | 15 ---- .../config/priors/MultiLevelGaussians.yaml | 10 +++ autofit/config/priors/model.json | 84 ------------------- autofit/config/priors/model.yaml | 62 ++++++++++++++ autofit/config/priors/prior.json | 12 --- autofit/config/priors/prior.yaml | 7 ++ autofit/config/priors/profiles.json | 84 ------------------- autofit/config/priors/profiles.yaml | 62 ++++++++++++++ autofit/config/priors/template.json | 84 ------------------- autofit/config/priors/template.yaml | 62 ++++++++++++++ 16 files changed, 317 insertions(+), 435 deletions(-) delete mode 100644 autofit/config/priors/Exponential.json create mode 100644 autofit/config/priors/Exponential.yaml delete mode 100644 autofit/config/priors/Gaussian.json create mode 100644 autofit/config/priors/Gaussian.yaml delete mode 100644 autofit/config/priors/GaussianKurtosis.json create mode 100644 autofit/config/priors/GaussianKurtosis.yaml delete mode 100644 autofit/config/priors/MultiLevelGaussians.json create mode 100644 autofit/config/priors/MultiLevelGaussians.yaml delete mode 100644 autofit/config/priors/model.json create mode 100644 autofit/config/priors/model.yaml delete mode 100644 autofit/config/priors/prior.json create mode 100644 autofit/config/priors/prior.yaml delete mode 100644 autofit/config/priors/profiles.json create mode 100644 autofit/config/priors/profiles.yaml delete mode 100644 autofit/config/priors/template.json create mode 100644 autofit/config/priors/template.yaml diff --git a/autofit/config/priors/Exponential.json b/autofit/config/priors/Exponential.json deleted file mode 100644 index 707d66aab..000000000 --- a/autofit/config/priors/Exponential.json +++ /dev/null @@ -1,41 +0,0 @@ -{ - "centre": { - "type": "Uniform", - "lower_limit": 0.0, - "upper_limit": 100.0, - "width_modifier": { - "type": "Absolute", - "value": 20.0 - }, - "gaussian_limits": { - "lower": "-inf", - "upper": "inf" - } - }, - "normalization": { - "type": "LogUniform", - "lower_limit": 1e-06, - "upper_limit": 1000000.0, - "width_modifier": { - "type": "Relative", - "value": 0.5 - }, - "gaussian_limits": { - "lower": 0.0, - "upper": "inf" - } - }, - "rate": { - "type": "Uniform", - "lower_limit": 0.0, - "upper_limit": 1.0, - "width_modifier": { - "type": "Relative", - "value": 0.5 - }, - "gaussian_limits": { - "lower": 0.0, - "upper": "inf" - } - } -} \ No newline at end of file diff --git a/autofit/config/priors/Exponential.yaml b/autofit/config/priors/Exponential.yaml new file mode 100644 index 000000000..c4c387009 --- /dev/null +++ b/autofit/config/priors/Exponential.yaml @@ -0,0 +1,30 @@ +centre: + gaussian_limits: + lower: -inf + upper: inf + lower_limit: 0.0 + type: Uniform + upper_limit: 100.0 + width_modifier: + type: Absolute + value: 20.0 +normalization: + gaussian_limits: + lower: 0.0 + upper: inf + lower_limit: 1.0e-06 + type: LogUniform + upper_limit: 1000000.0 + width_modifier: + type: Relative + value: 0.5 +rate: + gaussian_limits: + lower: 0.0 + upper: inf + lower_limit: 0.0 + type: Uniform + upper_limit: 1.0 + width_modifier: + type: Relative + value: 0.5 diff --git a/autofit/config/priors/Gaussian.json b/autofit/config/priors/Gaussian.json deleted file mode 100644 index 1854bc22b..000000000 --- a/autofit/config/priors/Gaussian.json +++ /dev/null @@ -1,51 +0,0 @@ -{ - "centre": { - "type": "Uniform", - "lower_limit": 0.0, - "upper_limit": 100.0, - "width_modifier": { - "type": "Absolute", - "value": 20.0 - }, - "gaussian_limits": { - "lower": "-inf", - "upper": "inf" - } - }, - "normalization": { - "type": "LogUniform", - "lower_limit": 1e-06, - "upper_limit": 1000000.0, - "width_modifier": { - "type": "Relative", - "value": 0.5 - }, - "gaussian_limits": { - "lower": 0.0, - "upper": "inf" - } - }, - "sigma": { - "type": "Uniform", - "lower_limit": 0.0, - "upper_limit": 25.0, - "width_modifier": { - "type": "Relative", - "value": 0.5 - }, - "gaussian_limits": { - "lower": 0.0, - "upper": "inf" - } - }, - "GaussianPrior": { - "lower_limit": { - "type": "Constant", - "value": "-inf" - }, - "upper_limit": { - "type": "Constant", - "value": "inf" - } - } -} \ No newline at end of file diff --git a/autofit/config/priors/Gaussian.yaml b/autofit/config/priors/Gaussian.yaml new file mode 100644 index 000000000..4e8419ae3 --- /dev/null +++ b/autofit/config/priors/Gaussian.yaml @@ -0,0 +1,37 @@ +GaussianPrior: + lower_limit: + type: Constant + value: -inf + upper_limit: + type: Constant + value: inf +centre: + gaussian_limits: + lower: -inf + upper: inf + lower_limit: 0.0 + type: Uniform + upper_limit: 100.0 + width_modifier: + type: Absolute + value: 20.0 +normalization: + gaussian_limits: + lower: 0.0 + upper: inf + lower_limit: 1.0e-06 + type: LogUniform + upper_limit: 1000000.0 + width_modifier: + type: Relative + value: 0.5 +sigma: + gaussian_limits: + lower: 0.0 + upper: inf + lower_limit: 0.0 + type: Uniform + upper_limit: 25.0 + width_modifier: + type: Relative + value: 0.5 diff --git a/autofit/config/priors/GaussianKurtosis.json b/autofit/config/priors/GaussianKurtosis.json deleted file mode 100644 index 3c56aa9d1..000000000 --- a/autofit/config/priors/GaussianKurtosis.json +++ /dev/null @@ -1,64 +0,0 @@ -{ - "centre": { - "type": "Uniform", - "lower_limit": 0.0, - "upper_limit": 100.0, - "width_modifier": { - "type": "Absolute", - "value": 20.0 - }, - "gaussian_limits": { - "lower": "-inf", - "upper": "inf" - } - }, - "normalization": { - "type": "LogUniform", - "lower_limit": 1e-06, - "upper_limit": 1000000.0, - "width_modifier": { - "type": "Relative", - "value": 0.5 - }, - "gaussian_limits": { - "lower": 0.0, - "upper": "inf" - } - }, - "sigma": { - "type": "Uniform", - "lower_limit": 0.0, - "upper_limit": 25.0, - "width_modifier": { - "type": "Relative", - "value": 0.5 - }, - "gaussian_limits": { - "lower": 0.0, - "upper": "inf" - } - }, - "kurtosis": { - "type": "Uniform", - "lower_limit": 0.0, - "upper_limit": 100.0, - "width_modifier": { - "type": "Absolute", - "value": 20.0 - }, - "gaussian_limits": { - "lower": "-inf", - "upper": "inf" - } - }, - "GaussianPrior": { - "lower_limit": { - "type": "Constant", - "value": "-inf" - }, - "upper_limit": { - "type": "Constant", - "value": "inf" - } - } -} \ No newline at end of file diff --git a/autofit/config/priors/GaussianKurtosis.yaml b/autofit/config/priors/GaussianKurtosis.yaml new file mode 100644 index 000000000..739de5d5c --- /dev/null +++ b/autofit/config/priors/GaussianKurtosis.yaml @@ -0,0 +1,47 @@ +GaussianPrior: + lower_limit: + type: Constant + value: -inf + upper_limit: + type: Constant + value: inf +centre: + gaussian_limits: + lower: -inf + upper: inf + lower_limit: 0.0 + type: Uniform + upper_limit: 100.0 + width_modifier: + type: Absolute + value: 20.0 +kurtosis: + gaussian_limits: + lower: -inf + upper: inf + lower_limit: 0.0 + type: Uniform + upper_limit: 100.0 + width_modifier: + type: Absolute + value: 20.0 +normalization: + gaussian_limits: + lower: 0.0 + upper: inf + lower_limit: 1.0e-06 + type: LogUniform + upper_limit: 1000000.0 + width_modifier: + type: Relative + value: 0.5 +sigma: + gaussian_limits: + lower: 0.0 + upper: inf + lower_limit: 0.0 + type: Uniform + upper_limit: 25.0 + width_modifier: + type: Relative + value: 0.5 diff --git a/autofit/config/priors/MultiLevelGaussians.json b/autofit/config/priors/MultiLevelGaussians.json deleted file mode 100644 index 901930cbe..000000000 --- a/autofit/config/priors/MultiLevelGaussians.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "higher_level_centre": { - "type": "Uniform", - "lower_limit": 0.0, - "upper_limit": 100.0, - "width_modifier": { - "type": "Absolute", - "value": 20.0 - }, - "gaussian_limits": { - "lower": "-inf", - "upper": "inf" - } - } -} \ No newline at end of file diff --git a/autofit/config/priors/MultiLevelGaussians.yaml b/autofit/config/priors/MultiLevelGaussians.yaml new file mode 100644 index 000000000..1356bbe2a --- /dev/null +++ b/autofit/config/priors/MultiLevelGaussians.yaml @@ -0,0 +1,10 @@ +higher_level_centre: + gaussian_limits: + lower: -inf + upper: inf + lower_limit: 0.0 + type: Uniform + upper_limit: 100.0 + width_modifier: + type: Absolute + value: 20.0 diff --git a/autofit/config/priors/model.json b/autofit/config/priors/model.json deleted file mode 100644 index 2fcd1512d..000000000 --- a/autofit/config/priors/model.json +++ /dev/null @@ -1,84 +0,0 @@ -{ - "Gaussian": { - "centre": { - "type": "Uniform", - "lower_limit": 0.0, - "upper_limit": 100.0, - "width_modifier": { - "type": "Absolute", - "value": 20.0 - }, - "gaussian_limits": { - "lower": "-inf", - "upper": "inf" - } - }, - "normalization": { - "type": "LogUniform", - "lower_limit": 1.0e-6, - "upper_limit": 1000000.0, - "width_modifier": { - "type": "Relative", - "value": 0.5 - }, - "gaussian_limits": { - "lower": 0.0, - "upper": "inf" - } - }, - "sigma": { - "type": "Uniform", - "lower_limit": 0.0, - "upper_limit": 25.0, - "width_modifier": { - "type": "Relative", - "value": 0.5 - }, - "gaussian_limits": { - "lower": 0.0, - "upper": "inf" - } - } - }, - "Exponential": { - "centre": { - "type": "Uniform", - "lower_limit": 0.0, - "upper_limit": 100.0, - "width_modifier": { - "type": "Absolute", - "value": 20.0 - }, - "gaussian_limits": { - "lower": "-inf", - "upper": "inf" - } - }, - "normalization": { - "type": "LogUniform", - "lower_limit": 1.0e-6, - "upper_limit": 1000000.0, - "width_modifier": { - "type": "Relative", - "value": 0.5 - }, - "gaussian_limits": { - "lower": 0.0, - "upper": "inf" - } - }, - "rate": { - "type": "Uniform", - "lower_limit": 0.0, - "upper_limit": 10.0, - "width_modifier": { - "type": "Relative", - "value": 0.5 - }, - "gaussian_limits": { - "lower": 0.0, - "upper": "inf" - } - } - } -} \ No newline at end of file diff --git a/autofit/config/priors/model.yaml b/autofit/config/priors/model.yaml new file mode 100644 index 000000000..7809624cf --- /dev/null +++ b/autofit/config/priors/model.yaml @@ -0,0 +1,62 @@ +Exponential: + centre: + gaussian_limits: + lower: -inf + upper: inf + lower_limit: 0.0 + type: Uniform + upper_limit: 100.0 + width_modifier: + type: Absolute + value: 20.0 + normalization: + gaussian_limits: + lower: 0.0 + upper: inf + lower_limit: 1.0e-06 + type: LogUniform + upper_limit: 1000000.0 + width_modifier: + type: Relative + value: 0.5 + rate: + gaussian_limits: + lower: 0.0 + upper: inf + lower_limit: 0.0 + type: Uniform + upper_limit: 10.0 + width_modifier: + type: Relative + value: 0.5 +Gaussian: + centre: + gaussian_limits: + lower: -inf + upper: inf + lower_limit: 0.0 + type: Uniform + upper_limit: 100.0 + width_modifier: + type: Absolute + value: 20.0 + normalization: + gaussian_limits: + lower: 0.0 + upper: inf + lower_limit: 1.0e-06 + type: LogUniform + upper_limit: 1000000.0 + width_modifier: + type: Relative + value: 0.5 + sigma: + gaussian_limits: + lower: 0.0 + upper: inf + lower_limit: 0.0 + type: Uniform + upper_limit: 25.0 + width_modifier: + type: Relative + value: 0.5 diff --git a/autofit/config/priors/prior.json b/autofit/config/priors/prior.json deleted file mode 100644 index 0f7dab2a6..000000000 --- a/autofit/config/priors/prior.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "gaussian.GaussianPrior": { - "lower_limit": { - "type": "Constant", - "value": "-inf" - }, - "upper_limit": { - "type": "Constant", - "value": "inf" - } - } -} \ No newline at end of file diff --git a/autofit/config/priors/prior.yaml b/autofit/config/priors/prior.yaml new file mode 100644 index 000000000..4cc72c4ae --- /dev/null +++ b/autofit/config/priors/prior.yaml @@ -0,0 +1,7 @@ +gaussian.GaussianPrior: + lower_limit: + type: Constant + value: -inf + upper_limit: + type: Constant + value: inf diff --git a/autofit/config/priors/profiles.json b/autofit/config/priors/profiles.json deleted file mode 100644 index 2fcd1512d..000000000 --- a/autofit/config/priors/profiles.json +++ /dev/null @@ -1,84 +0,0 @@ -{ - "Gaussian": { - "centre": { - "type": "Uniform", - "lower_limit": 0.0, - "upper_limit": 100.0, - "width_modifier": { - "type": "Absolute", - "value": 20.0 - }, - "gaussian_limits": { - "lower": "-inf", - "upper": "inf" - } - }, - "normalization": { - "type": "LogUniform", - "lower_limit": 1.0e-6, - "upper_limit": 1000000.0, - "width_modifier": { - "type": "Relative", - "value": 0.5 - }, - "gaussian_limits": { - "lower": 0.0, - "upper": "inf" - } - }, - "sigma": { - "type": "Uniform", - "lower_limit": 0.0, - "upper_limit": 25.0, - "width_modifier": { - "type": "Relative", - "value": 0.5 - }, - "gaussian_limits": { - "lower": 0.0, - "upper": "inf" - } - } - }, - "Exponential": { - "centre": { - "type": "Uniform", - "lower_limit": 0.0, - "upper_limit": 100.0, - "width_modifier": { - "type": "Absolute", - "value": 20.0 - }, - "gaussian_limits": { - "lower": "-inf", - "upper": "inf" - } - }, - "normalization": { - "type": "LogUniform", - "lower_limit": 1.0e-6, - "upper_limit": 1000000.0, - "width_modifier": { - "type": "Relative", - "value": 0.5 - }, - "gaussian_limits": { - "lower": 0.0, - "upper": "inf" - } - }, - "rate": { - "type": "Uniform", - "lower_limit": 0.0, - "upper_limit": 10.0, - "width_modifier": { - "type": "Relative", - "value": 0.5 - }, - "gaussian_limits": { - "lower": 0.0, - "upper": "inf" - } - } - } -} \ No newline at end of file diff --git a/autofit/config/priors/profiles.yaml b/autofit/config/priors/profiles.yaml new file mode 100644 index 000000000..7809624cf --- /dev/null +++ b/autofit/config/priors/profiles.yaml @@ -0,0 +1,62 @@ +Exponential: + centre: + gaussian_limits: + lower: -inf + upper: inf + lower_limit: 0.0 + type: Uniform + upper_limit: 100.0 + width_modifier: + type: Absolute + value: 20.0 + normalization: + gaussian_limits: + lower: 0.0 + upper: inf + lower_limit: 1.0e-06 + type: LogUniform + upper_limit: 1000000.0 + width_modifier: + type: Relative + value: 0.5 + rate: + gaussian_limits: + lower: 0.0 + upper: inf + lower_limit: 0.0 + type: Uniform + upper_limit: 10.0 + width_modifier: + type: Relative + value: 0.5 +Gaussian: + centre: + gaussian_limits: + lower: -inf + upper: inf + lower_limit: 0.0 + type: Uniform + upper_limit: 100.0 + width_modifier: + type: Absolute + value: 20.0 + normalization: + gaussian_limits: + lower: 0.0 + upper: inf + lower_limit: 1.0e-06 + type: LogUniform + upper_limit: 1000000.0 + width_modifier: + type: Relative + value: 0.5 + sigma: + gaussian_limits: + lower: 0.0 + upper: inf + lower_limit: 0.0 + type: Uniform + upper_limit: 25.0 + width_modifier: + type: Relative + value: 0.5 diff --git a/autofit/config/priors/template.json b/autofit/config/priors/template.json deleted file mode 100644 index 4f23c6bfe..000000000 --- a/autofit/config/priors/template.json +++ /dev/null @@ -1,84 +0,0 @@ -{ - "ModelComponent0": { - "parameter0": { - "type": "Uniform", - "lower_limit": 0.0, - "upper_limit": 1.0, - "width_modifier": { - "type": "Absolute", - "value": 20.0 - }, - "gaussian_limits": { - "lower": "-inf", - "upper": "inf" - } - }, - "parameter1": { - "type": "LogUniform", - "lower_limit": 1e-06, - "upper_limit": 1e6, - "width_modifier": { - "type": "Relative", - "value": 0.5 - }, - "gaussian_limits": { - "lower": 0.0, - "upper": "inf" - } - }, - "parameter2": { - "type": "Uniform", - "lower_limit": 0.0, - "upper_limit": 25.0, - "width_modifier": { - "type": "Relative", - "value": 0.5 - }, - "gaussian_limits": { - "lower": 0.0, - "upper": "inf" - } - } - }, - "ModelComponent1": { - "parameter0": { - "type": "Uniform", - "lower_limit": 0.0, - "upper_limit": 1.0, - "width_modifier": { - "type": "Absolute", - "value": 20.0 - }, - "gaussian_limits": { - "lower": "-inf", - "upper": "inf" - } - }, - "parameter1": { - "type": "LogUniform", - "lower_limit": 1e-06, - "upper_limit": 1e6, - "width_modifier": { - "type": "Relative", - "value": 0.5 - }, - "gaussian_limits": { - "lower": 0.0, - "upper": "inf" - } - }, - "parameter2": { - "type": "Uniform", - "lower_limit": 0.0, - "upper_limit": 1.0, - "width_modifier": { - "type": "Relative", - "value": 0.5 - }, - "gaussian_limits": { - "lower": 0.0, - "upper": "inf" - } - } - } -} \ No newline at end of file diff --git a/autofit/config/priors/template.yaml b/autofit/config/priors/template.yaml new file mode 100644 index 000000000..82f5513c9 --- /dev/null +++ b/autofit/config/priors/template.yaml @@ -0,0 +1,62 @@ +ModelComponent0: + parameter0: + gaussian_limits: + lower: -inf + upper: inf + lower_limit: 0.0 + type: Uniform + upper_limit: 1.0 + width_modifier: + type: Absolute + value: 20.0 + parameter1: + gaussian_limits: + lower: 0.0 + upper: inf + lower_limit: 1.0e-06 + type: LogUniform + upper_limit: 1000000.0 + width_modifier: + type: Relative + value: 0.5 + parameter2: + gaussian_limits: + lower: 0.0 + upper: inf + lower_limit: 0.0 + type: Uniform + upper_limit: 25.0 + width_modifier: + type: Relative + value: 0.5 +ModelComponent1: + parameter0: + gaussian_limits: + lower: -inf + upper: inf + lower_limit: 0.0 + type: Uniform + upper_limit: 1.0 + width_modifier: + type: Absolute + value: 20.0 + parameter1: + gaussian_limits: + lower: 0.0 + upper: inf + lower_limit: 1.0e-06 + type: LogUniform + upper_limit: 1000000.0 + width_modifier: + type: Relative + value: 0.5 + parameter2: + gaussian_limits: + lower: 0.0 + upper: inf + lower_limit: 0.0 + type: Uniform + upper_limit: 1.0 + width_modifier: + type: Relative + value: 0.5 From ade76eb74bd5e7dd771f334becb821e2222dc4ef Mon Sep 17 00:00:00 2001 From: Richard Date: Mon, 28 Nov 2022 16:45:14 +0000 Subject: [PATCH 012/226] extra parameters to zip and pass to method in combined analysis --- autofit/non_linear/analysis/combined.py | 140 ++++++------------------ 1 file changed, 33 insertions(+), 107 deletions(-) diff --git a/autofit/non_linear/analysis/combined.py b/autofit/non_linear/analysis/combined.py index 213ec4cbd..d1ba416ce 100644 --- a/autofit/non_linear/analysis/combined.py +++ b/autofit/non_linear/analysis/combined.py @@ -10,18 +10,14 @@ from autofit.non_linear.result import Result from .analysis import Analysis -logger = logging.getLogger( - __name__ -) +logger = logging.getLogger(__name__) class CombinedAnalysis(Analysis): def __new__(cls, *analyses, **kwargs): from .model_analysis import ModelAnalysis, CombinedModelAnalysis - if any( - isinstance(analysis, ModelAnalysis) - for analysis in analyses - ): + + if any(isinstance(analysis, ModelAnalysis) for analysis in analyses): return object.__new__(CombinedModelAnalysis) return object.__new__(cls) @@ -44,13 +40,7 @@ def __init__(self, *analyses: Analysis): self.analyses = analyses self._n_cores = None self._log_likelihood_function = None - self.n_cores = conf.instance[ - "general" - ][ - "analysis" - ][ - "n_cores" - ] + self.n_cores = conf.instance["general"]["analysis"]["n_cores"] @property def n_cores(self): @@ -66,10 +56,7 @@ def n_cores(self, n_cores: int): """ self._n_cores = n_cores if self.n_cores > 1: - analysis_pool = AnalysisPool( - self.analyses, - self.n_cores - ) + analysis_pool = AnalysisPool(self.analyses, self.n_cores) self._log_likelihood_function = analysis_pool else: self._log_likelihood_function = self._summed_log_likelihood @@ -89,16 +76,13 @@ def _summed_log_likelihood(self, instance) -> float: A combined log likelihood """ return sum( - analysis.log_likelihood_function( - instance - ) - for analysis in self.analyses + analysis.log_likelihood_function(instance) for analysis in self.analyses ) def log_likelihood_function(self, instance): return self._log_likelihood_function(instance) - def _for_each_analysis(self, func, paths): + def _for_each_analysis(self, func, paths, *args): """ Convenience function to call an underlying function for each analysis with a paths object with an integer attached to the @@ -111,45 +95,23 @@ def _for_each_analysis(self, func, paths): paths An object describing the paths for saving data (e.g. hard-disk directories or entries in sqlite database). """ - for i, analysis in enumerate(self.analyses): - child_paths = paths.for_sub_analysis( - analysis_name=f"analyses/analysis_{i}" - ) - func(child_paths, analysis) + for (i, analysis), *args in zip(enumerate(self.analyses), *args): + child_paths = paths.for_sub_analysis(analysis_name=f"analyses/analysis_{i}") + func(child_paths, analysis, *args) def save_attributes_for_aggregator(self, paths: AbstractPaths): def func(child_paths, analysis): - analysis.save_attributes_for_aggregator( - child_paths, - ) + analysis.save_attributes_for_aggregator(child_paths,) - self._for_each_analysis( - func, - paths - ) + self._for_each_analysis(func, paths) - def save_results_for_aggregator( - self, - paths: AbstractPaths, - result:Result - ): - def func(child_paths, analysis): - analysis.save_results_for_aggregator( - paths=child_paths, - result=result - ) + def save_results_for_aggregator(self, paths: AbstractPaths, result: Result): + def func(child_paths, analysis, result_): + analysis.save_results_for_aggregator(paths=child_paths, result=result_) - self._for_each_analysis( - func, - paths - ) + self._for_each_analysis(func, paths, result) - def visualize( - self, - paths: AbstractPaths, - instance, - during_analysis - ): + def visualize(self, paths: AbstractPaths, instance, during_analysis): """ Visualise the instance according to each analysis. @@ -167,21 +129,12 @@ def visualize( """ def func(child_paths, analysis): - analysis.visualize( - child_paths, - instance, - during_analysis - ) + analysis.visualize(child_paths, instance, during_analysis) - self._for_each_analysis( - func, - paths - ) + self._for_each_analysis(func, paths) def profile_log_likelihood_function( - self, - paths: AbstractPaths, - instance, + self, paths: AbstractPaths, instance, ): """ Profile the log likelihood function of the maximum likelihood model instance using each analysis. @@ -198,35 +151,20 @@ def profile_log_likelihood_function( def func(child_paths, analysis): analysis.profile_log_likelihood_function( - child_paths, - instance, + child_paths, instance, ) - self._for_each_analysis( - func, - paths - ) + self._for_each_analysis(func, paths) - def make_result( - self, - samples, - model, - sigma=1.0, - use_errors=True, - use_widths=False - ): + def make_result(self, samples, model, sigma=1.0, use_errors=True, use_widths=False): child_results = [ analysis.make_result( - samples, - model, - sigma=1.0, use_errors=True, use_widths=False - ) for analysis in self.analyses + samples, model, sigma=1.0, use_errors=True, use_widths=False + ) + for analysis in self.analyses ] result = self.analyses[0].make_result( - samples=samples, - model=model, - sigma=1.0, use_errors=True, use_widths=False - + samples=samples, model=model, sigma=1.0, use_errors=True, use_widths=False ) result.child_results = child_results return result @@ -249,22 +187,12 @@ def __add__(self, other: Analysis): ------- An overarching analysis """ - if isinstance( - other, - CombinedAnalysis - ): - return type(self)( - *self.analyses, - *other.analyses - ) - return type(self)( - *self.analyses, - other - ) + if isinstance(other, CombinedAnalysis): + return type(self)(*self.analyses, *other.analyses) + return type(self)(*self.analyses, other) def with_free_parameters( - self, - *free_parameters: Union[Prior, TuplePrior, AbstractPriorModel] + self, *free_parameters: Union[Prior, TuplePrior, AbstractPriorModel] ): """ Set some parameters as free parameters. The are priors which vary @@ -280,7 +208,5 @@ def with_free_parameters( An analysis with freely varying parameters. """ from .free_parameter import FreeParameterAnalysis - return FreeParameterAnalysis( - *self.analyses, - free_parameters=free_parameters - ) + + return FreeParameterAnalysis(*self.analyses, free_parameters=free_parameters) From 9872d1e1384776053e2daf5e557c1ebba9535d0a Mon Sep 17 00:00:00 2001 From: Jammy2211 Date: Tue, 29 Nov 2022 13:48:28 +0000 Subject: [PATCH 013/226] documented configs --- autofit/config/README.rst | 15 ++ autofit/config/doc | 36 --- autofit/config/doc_general | 34 --- autofit/config/general.yaml | 32 +-- autofit/config/non_linear.yaml | 304 ------------------------ autofit/config/non_linear/README.rst | 9 + autofit/config/non_linear/mcmc.yaml | 70 ++++++ autofit/config/non_linear/nest.yaml | 132 ++++++++++ autofit/config/non_linear/optimize.yaml | 105 ++++++++ autofit/config/notation.yaml | 18 ++ autofit/config/visualize.yaml | 179 ++------------ docs/installation/conda.rst | 2 +- docs/installation/pip.rst | 2 +- 13 files changed, 386 insertions(+), 552 deletions(-) create mode 100644 autofit/config/README.rst delete mode 100644 autofit/config/doc delete mode 100644 autofit/config/doc_general delete mode 100644 autofit/config/non_linear.yaml create mode 100644 autofit/config/non_linear/README.rst create mode 100644 autofit/config/non_linear/mcmc.yaml create mode 100644 autofit/config/non_linear/nest.yaml create mode 100644 autofit/config/non_linear/optimize.yaml diff --git a/autofit/config/README.rst b/autofit/config/README.rst new file mode 100644 index 000000000..0264f9953 --- /dev/null +++ b/autofit/config/README.rst @@ -0,0 +1,15 @@ +The ``config`` folder contains configuration files which customize default **PyAutoLens**. + +Folders +------- + +- ``priors``: Configs defining default priors assumed on every lens model component and set of parameters. + +Files +----- + +- ``general.yaml``: Customizes general **PyAutoLens** settings. +- ``non-linear.yaml``: Configs for default non-linear search (e.g. MCMC, nested sampling) settings. +- ``logging.yaml``: Customizes the logging behaviour of **PyAutoLens**. +- ``visualize.yaml``: Configs defining what images are output by a lens model fit. +- ``notation.yaml``: Configs defining labels and formatting of model parameters when used for visualization. diff --git a/autofit/config/doc b/autofit/config/doc deleted file mode 100644 index ab6b191dd..000000000 --- a/autofit/config/doc +++ /dev/null @@ -1,36 +0,0 @@ -A general description of configs in PyAutoFit is provided here, checkout their associated doc file for a more - detailed description of every config. - -general.ini: - Customizes general PyAutoFit settings such as the frequency of logging and backing up. - -priors: - Customize the priors of every model-component in your project (see doc_priors). - - default: - The default priors used on every model component in PyAutoFit (e.g. light proiles, mass profiles, etc.), for - example if they use a UniformPrior or GausssianPrior and the range of values sampled. - limit: - The limits between which a parameter may be sampled. For example, the axis-ratio of light and mass profiles has - limits 0.0 -> 1.0 to prevent unphysical models. - width: - The width of the GaussianPrior used for each parameter if its priors are initialized via linking from a previous - search. - - -visualize: - general.ini: - General visualization setting like the matplotlib backend. - - -non-Linear: - Customize the default `NonLinearSearch` settings used by PyAutoFit (see doc_non_linear). - - -label.ini: - The labels used for every model parameter on certain figures (e.g. the label for centres are y an x and for an - axis ratio is q). - -label_format.ini: - The format of certain results output by PyAutoFit (e.g. the mass format is {:.4e}, meaning it is output as an - exponential to 4 dp). \ No newline at end of file diff --git a/autofit/config/doc_general b/autofit/config/doc_general deleted file mode 100644 index 43ecba714..000000000 --- a/autofit/config/doc_general +++ /dev/null @@ -1,34 +0,0 @@ -A description of the general.ini config. - -[output] - log_to_file -> bool - If `True`, output is logged to a file as opposed to the command line. - log_file -> str - The file name the logged output is written to (in the `NonLinearSearch` output folder). - log_level -> str - The level of logging. - model_results_decimal_places -> int - The number of decimal places the estimated values and errors of all parameters in the model.results file are - output to. - remove_files -> bool - If True, all output files of a `NonLinearSearch` (e.g. samples, samples_backup, model.results, images, etc.) - are deleted once the model-fit has completed. - A .zip file of all output is always created before files are removed, thus results are not lost with this - option turned on. If PyAutoFit does not find the output files of a model-fit (because they were removed) but - does find this .zip file, it will unzip the contents and continue the analysis as if the files were - there all along. - This feature was implemented because super-computers often have a limit on the number of files allowed per - user and the large number of files output by PyAutoFit can exceed this limit. By removing files the - number of files is restricted only to the .zip files. - force_pickle_overwrite -> bool - If `True`, the .pickle files of a resumed PyAutoFit run are overwritten for a fit even if the model-fit is - completed and skipped. This is used so that results can be refreshed with new pickles, so that changes to - source-code are refelected in `Aggregator` use. - -[hoc] - hpc_mode -> bool - If `True`, high performance computing mode is used, which adjusts many settings automatically which are - desireable for using PyAutoFit on super computers (e.g. minimizes file output, minimizes visualization, etc.). - iterations_per_update -> int - The number of iterations used per update in hpc mode, as it may be desireable to perform less iterations for - runs on super computpers that can often have much longer run times. \ No newline at end of file diff --git a/autofit/config/general.yaml b/autofit/config/general.yaml index 784083c80..e590d5091 100644 --- a/autofit/config/general.yaml +++ b/autofit/config/general.yaml @@ -1,27 +1,27 @@ analysis: - n_cores: 1 + n_cores: 1 # The number of cores a parallelized sum of Analysis classes uses by default. hpc: - hpc_mode: false - iterations_per_update: 5000 + hpc_mode: false # If True, use HPC mode, which disables GUI visualization, logging to screen and other settings which are not suited to running on a super computer. + iterations_per_update: 5000 # The number of iterations between every update (visualization, results output, etc) in HPC mode. model: - ignore_prior_limits: false + ignore_prior_limits: false # If ``True`` the limits applied to priors will be ignored, where limits set upper / lower limits. This stops PriorLimitException's from being raised. output: - force_pickle_overwrite: false + force_pickle_overwrite: false # If True pickle files output by a search (e.g. samples.pickle) are recreated when a new model-fit is performed. identifier_version: 4 - info_whitespace_length: 80 - log_file: output.log - log_level: INFO - log_to_file: false - model_results_decimal_places: 3 - remove_files: false + info_whitespace_length: 80 # Length of whitespace between the parameter names and values in the model.info / result.info + log_level: INFO # The level of information output by logging. + log_to_file: false # If True, outputs the non-linear search log to a file (and not printed to screen). + log_file: output.log # The name of the file the logged output is written to (in the non-linear search output folder) + model_results_decimal_places: 3 # Number of decimal places estimated parameter values / errors are output in model.results. + remove_files: false # If True, all output files of a non-linear search (e.g. samples, visualization, etc.) are deleted once the model-fit has completed, such that only the .zip file remains. samples_to_csv: false parallel: - warn_environment_variables: true + warn_environment_variables: true # If True, a warning is displayed when the search's number of CPU > 1 and enviromment variables related to threading are also > 1. profiling: - parallel_profile: false - repeats: 1 - should_profile: false + parallel_profile: false # If True, the parallelization of the fit is profiled outputting a cPython graph. + should_profile: false # If True, the ``profile_log_likelihood_function()`` function of an analysis class is called throughout a model-fit, profiling run times. + repeats: 1 # The number of repeat function calls used to measure run-times when profiling. test: - check_preloads: false + check_preloads: false # If True, perform a sanity check that the likelihood using preloads is identical to the likelihood not using preloads. exception_override: false parallel_profile: false diff --git a/autofit/config/non_linear.yaml b/autofit/config/non_linear.yaml deleted file mode 100644 index 9dc6a70ab..000000000 --- a/autofit/config/non_linear.yaml +++ /dev/null @@ -1,304 +0,0 @@ -mcmc: - Emcee: - auto_correlations: - change_threshold: 0.01 - check_for_convergence: true - check_size: 100 - required_length: 50 - initialize: - ball_lower_limit: 0.49 - ball_upper_limit: 0.51 - method: ball - parallel: - number_of_cores: 1 - printing: - silence: false - prior_passer: - sigma: 3.0 - use_errors: true - use_widths: true - run: - nsteps: 2000 - search: - nwalkers: 50 - updates: - iterations_per_update: 500 - log_every_update: 1 - model_results_every_update: 1 - remove_state_files_at_end: true - visualize_every_update: 1 - Zeus: - auto_correlations: - change_threshold: 0.01 - check_for_convergence: true - check_size: 100 - required_length: 50 - initialize: - ball_lower_limit: 0.49 - ball_upper_limit: 0.51 - method: ball - parallel: - number_of_cores: 1 - printing: - silence: false - prior_passer: - sigma: 3.0 - use_errors: true - use_widths: true - run: - check_walkers: true - light_mode: false - maxiter: 10000 - maxsteps: 10000 - mu: 1.0 - nsteps: 2000 - patience: 5 - shuffle_ensemble: true - tolerance: 0.05 - tune: true - vectorize: false - search: - nwalkers: 50 - updates: - iterations_per_update: 500 - log_every_update: 1 - model_results_every_update: 1 - remove_state_files_at_end: true - visualize_every_update: 1 -nest: - DynestyDynamic: - initialize: - method: prior - parallel: - force_x1_cpu: false - number_of_cores: 1 - printing: - silence: false - prior_passer: - sigma: 3.0 - use_errors: true - use_widths: true - run: - dlogz_init: 0.01 - logl_max_init: .inf - maxcall: null - maxcall_init: null - maxiter: null - maxiter_init: null - n_effective: .inf - n_effective_init: .inf - nlive_init: 500 - search: - bootstrap: null - bound: multi - enlarge: null - facc: 0.2 - first_update: null - fmove: 0.9 - max_move: 100 - sample: auto - slices: 5 - update_interval: null - walks: 5 - updates: - iterations_per_update: 2500 - log_every_update: 1 - model_results_every_update: 1 - remove_state_files_at_end: true - visualize_every_update: 1 - DynestyStatic: - initialize: - method: prior - parallel: - force_x1_cpu: false - number_of_cores: 1 - printing: - silence: false - prior_passer: - sigma: 3.0 - use_errors: true - use_widths: true - run: - dlogz: null - logl_max: .inf - maxcall: null - maxiter: null - n_effective: null - search: - bootstrap: null - bound: multi - enlarge: null - facc: 0.2 - first_update: null - fmove: 0.9 - max_move: 100 - nlive: 50 - sample: auto - slices: 5 - update_interval: null - walks: 5 - updates: - iterations_per_update: 5000 - log_every_update: 1 - model_results_every_update: 1 - remove_state_files_at_end: true - visualize_every_update: 1 - UltraNest: - initialize: - method: prior - parallel: - number_of_cores: 1 - printing: - silence: false - prior_passer: - sigma: 3.0 - use_errors: true - use_widths: true - run: - cluster_num_live_points: 40 - dkl: 0.5 - dlogz: 0.5 - frac_remain: 0.01 - insertion_test_window: 10 - insertion_test_zscore_threshold: 2 - lepsilon: 0.001 - log_interval: null - max_iters: null - max_ncalls: null - max_num_improvement_loops: -1.0 - min_ess: 400 - min_num_live_points: 400 - show_status: true - update_interval_ncall: null - update_interval_volume_fraction: 0.8 - viz_callback: auto - search: - draw_multiple: true - ndraw_max: 65536 - ndraw_min: 128 - num_bootstraps: 30 - num_test_samples: 2 - resume: true - run_num: null - storage_backend: hdf5 - vectorized: false - warmstart_max_tau: -1.0 - stepsampler: - adaptive_nsteps: false - log: false - max_nsteps: 1000 - nsteps: 25 - region_filter: false - scale: 1.0 - stepsampler_cls: null - updates: - iterations_per_update: 5000 - log_every_update: 1 - model_results_every_update: 1 - remove_state_files_at_end: true - visualize_every_update: 1 -optimize: - Drawer: - initialize: - ball_lower_limit: 0.49 - ball_upper_limit: 0.51 - method: ball - printing: - silence: false - prior_passer: - sigma: 3.0 - use_errors: true - use_widths: true - search: - total_draws: 50 - updates: - iterations_per_update: 500 - log_every_update: 1 - model_results_every_update: 1 - remove_state_files_at_end: true - visualize_every_update: 1 - LBFGS: - initialize: - ball_lower_limit: 0.49 - ball_upper_limit: 0.51 - method: ball - options: - disp: false - eps: 1.0e-08 - ftol: 2.220446049250313e-09 - gtol: 1.0e-05 - iprint: -1.0 - maxcor: 10 - maxfun: 15000 - maxiter: 15000 - maxls: 20 - printing: - silence: false - prior_passer: - sigma: 3.0 - use_errors: true - use_widths: true - search: - tol: null - updates: - iterations_per_update: 500 - log_every_update: 1 - model_results_every_update: 1 - remove_state_files_at_end: true - visualize_every_update: 1 - PySwarmsGlobal: - initialize: - ball_lower_limit: 0.49 - ball_upper_limit: 0.51 - method: ball - parallel: - number_of_cores: 1 - printing: - silence: false - prior_passer: - sigma: 3.0 - use_errors: true - use_widths: true - run: - iters: 2000 - search: - cognitive: 0.5 - ftol: -.inf - inertia: 0.9 - n_particles: 50 - social: 0.3 - updates: - iterations_per_update: 500 - log_every_update: 1 - model_results_every_update: 1 - remove_state_files_at_end: true - visualize_every_update: 1 - PySwarmsLocal: - initialize: - ball_lower_limit: 0.49 - ball_upper_limit: 0.51 - method: ball - parallel: - number_of_cores: 1 - printing: - silence: false - prior_passer: - sigma: 3.0 - use_errors: true - use_widths: true - run: - iters: 2000 - search: - cognitive: 0.5 - ftol: -.inf - inertia: 0.9 - minkowski_p_norm: 2 - n_particles: 50 - number_of_k_neighbors: 3 - social: 0.3 - updates: - iterations_per_update: 500 - log_every_update: 1 - model_results_every_update: 1 - remove_state_files_at_end: true - visualize_every_update: 1 diff --git a/autofit/config/non_linear/README.rst b/autofit/config/non_linear/README.rst new file mode 100644 index 000000000..11774c406 --- /dev/null +++ b/autofit/config/non_linear/README.rst @@ -0,0 +1,9 @@ +The ``non_linear`` folder contains configuration files which customize the default behaviour of non-linear searches in +**PyAutoLens**. + +Files +----- + +- ``mcmc.yaml``: Settings default behaviour of MCMC non-linear searches (e.g. Emcee). +- ``nest.yaml``: Settings default behaviour of nested sampler non-linear searches (e.g. Dynesty). +- ``optimizer.yaml``: Settings default behaviour of optimizer non-linear searches (e.g. PySwarms). \ No newline at end of file diff --git a/autofit/config/non_linear/mcmc.yaml b/autofit/config/non_linear/mcmc.yaml new file mode 100644 index 000000000..e570d6eda --- /dev/null +++ b/autofit/config/non_linear/mcmc.yaml @@ -0,0 +1,70 @@ +# Configuration files that customize the default behaviour of non-linear searches. + +# **PyAutoFit** supports the following MCMC algorithms: + +# - Emcee: https://github.com/dfm/emcee / https://emcee.readthedocs.io/en/stable/ +# - Zeus: https://github.com/minaskar/zeus / https://zeus-mcmc.readthedocs.io/en/latest/ + +# Settings in the [search] and [run] entries are specific to each nested algorithm and should be determined by +# consulting that MCMC method's own readthedocs. + +Emcee: + run: + nsteps: 2000 + search: + nwalkers: 50 + auto_correlations: + change_threshold: 0.01 # The threshold value by which if the change in auto_correlations is below sampling will be terminated early. + check_for_convergence: true # Whether the auto-correlation lengths of the Emcee samples are checked to determine the stopping criteria. If `True`, Emcee may stop before nsteps are performed. + check_size: 100 # The length of the samples used to check the auto-correlation lengths (from the latest sample backwards). + required_length: 50 # The length an auto_correlation chain must be for it to be used to evaluate whether its change threshold is sufficiently small to terminate sampling early. + initialize: # The method used to generate where walkers are initialized in parameter space {prior | ball}. + method: ball # priors: samples are initialized by randomly drawing from each parameter's prior. ball: samples are initialized by randomly drawing unit values from a narrow uniform distribution. + ball_lower_limit: 0.49 # The lower limit of the uniform distribution unit values are drawn from when initializing walkers using the ball method. + ball_upper_limit: 0.51 # The upper limit of the uniform distribution unit values are drawn from when initializing walkers using the ball method. + parallel: + number_of_cores: 1 # The number of cores the search is parallelized over by default, using Python multiprocessing. + printing: + silence: false # If True, the default print output of the non-linear search is silcened and not printed by the Python interpreter. + prior_passer: + sigma: 3.0 # For non-linear search chaining and model prior passing, the sigma value of the inferred model parameter used as the sigma of the passed Gaussian prior. + use_errors: true # If True, the errors of the previous model's results are used when passing priors. + use_widths: true # If True the width of the model parameters defined in the priors config file are used. + updates: + iterations_per_update: 500 # The number of iterations of the non-linear search performed between every 'update', where an update performs tasks like outputting model.results. + remove_state_files_at_end: true # Whether to remove the savestate of the seach (e.g. the Emcee hdf5 file) at the end to save hard-disk space (results are still stored as PyAutoFit pickles and loadable). +Zeus: + run: + check_walkers: true + light_mode: false + maxiter: 10000 + maxsteps: 10000 + mu: 1.0 + nsteps: 2000 + patience: 5 + shuffle_ensemble: true + tolerance: 0.05 + tune: true + vectorize: false + search: + nwalkers: 50 + auto_correlations: + change_threshold: 0.01 # The threshold value by which if the change in auto_correlations is below sampling will be terminated early. + check_for_convergence: true # Whether the auto-correlation lengths of the Emcee samples are checked to determine the stopping criteria. If `True`, Emcee may stop before nsteps are performed. + check_size: 100 # The length of the samples used to check the auto-correlation lengths (from the latest sample backwards). + required_length: 50 # The length an auto_correlation chain must be for it to be used to evaluate whether its change threshold is sufficiently small to terminate sampling early. + initialize: # The method used to generate where walkers are initialized in parameter space {prior | ball}. + method: ball # priors: samples are initialized by randomly drawing from each parameter's prior. ball: samples are initialized by randomly drawing unit values from a narrow uniform distribution. + ball_lower_limit: 0.49 # The lower limit of the uniform distribution unit values are drawn from when initializing walkers using the ball method. + ball_upper_limit: 0.51 # The upper limit of the uniform distribution unit values are drawn from when initializing walkers using the ball method. + parallel: + number_of_cores: 1 # The number of cores the search is parallelized over by default, using Python multiprocessing. + printing: + silence: false # If True, the default print output of the non-linear search is silenced and not printed by the Python interpreter. + prior_passer: + sigma: 3.0 # For non-linear search chaining and model prior passing, the sigma value of the inferred model parameter used as the sigma of the passed Gaussian prior. + use_errors: true # If True, the errors of the previous model's results are used when passing priors. + use_widths: true # If True the width of the model parameters defined in the priors config file are used. + updates: + iterations_per_update: 500 # The number of iterations of the non-linear search performed between every 'update', where an update performs tasks like outputting model.results. + remove_state_files_at_end: true # Whether to remove the savestate of the seach (e.g. the Emcee hdf5 file) at the end to save hard-disk space (results are still stored as PyAutoFit pickles and loadable). \ No newline at end of file diff --git a/autofit/config/non_linear/nest.yaml b/autofit/config/non_linear/nest.yaml new file mode 100644 index 000000000..2660ae8a2 --- /dev/null +++ b/autofit/config/non_linear/nest.yaml @@ -0,0 +1,132 @@ +# Configuration files that customize the default behaviour of non-linear searches. + +# **PyAutoFit** supports the following nested sampling algorithms: + +# - Dynesty: https://github.com/joshspeagle/dynesty / https://dynesty.readthedocs.io/en/latest/index.html +# - UltraNest: https://github.com/JohannesBuchner/UltraNest / https://johannesbuchner.github.io/UltraNest/readme.html + +# Settings in the [search] and [run] entries are specific to each nested algorithm and should be determined by +# consulting that MCMC method's own readthedocs. + +DynestyStatic: + search: + bootstrap: null + bound: multi + enlarge: null + facc: 0.2 + first_update: null + fmove: 0.9 + max_move: 100 + nlive: 50 + sample: auto + slices: 5 + update_interval: null + walks: 5 + run: + dlogz: null + logl_max: .inf + maxcall: null + maxiter: null + n_effective: null + initialize: # The method used to generate where walkers are initialized in parameter space {prior}. + method: prior # priors: samples are initialized by randomly drawing from each parameter's prior. + parallel: + number_of_cores: 1 # The number of cores the search is parallelized over by default, using Python multiprocessing. + force_x1_cpu: false # Force Dynesty to not use Python multiprocessing Pool, which can fix issues on certain operating systems. + printing: + silence: false # If True, the default print output of the non-linear search is silenced and not printed by the Python interpreter. + prior_passer: + sigma: 3.0 # For non-linear search chaining and model prior passing, the sigma value of the inferred model parameter used as the sigma of the passed Gaussian prior. + use_errors: true # If True, the errors of the previous model's results are used when passing priors. + use_widths: true # If True the width of the model parameters defined in the priors config file are used. + updates: + iterations_per_update: 500 # The number of iterations of the non-linear search performed between every 'update', where an update performs tasks like outputting model.results. + remove_state_files_at_end: true # Whether to remove the savestate of the seach (e.g. the Emcee hdf5 file) at the end to save hard-disk space (results are still stored as PyAutoFit pickles and loadable). +DynestyDynamic: + search: + bootstrap: null + bound: multi + enlarge: null + facc: 0.2 + first_update: null + fmove: 0.9 + max_move: 100 + sample: auto + slices: 5 + update_interval: null + walks: 5 + run: + dlogz_init: 0.01 + logl_max_init: .inf + maxcall: null + maxcall_init: null + maxiter: null + maxiter_init: null + n_effective: .inf + n_effective_init: .inf + nlive_init: 500 + initialize: # The method used to generate where walkers are initialized in parameter space {prior}. + method: prior # priors: samples are initialized by randomly drawing from each parameter's prior. + parallel: + number_of_cores: 1 # The number of cores the search is parallelized over by default, using Python multiprocessing. + force_x1_cpu: false # Force Dynesty to not use Python multiprocessing Pool, which can fix issues on certain operating systems. + printing: + silence: false # If True, the default print output of the non-linear search is silenced and not printed by the Python interpreter. + prior_passer: + sigma: 3.0 # For non-linear search chaining and model prior passing, the sigma value of the inferred model parameter used as the sigma of the passed Gaussian prior. + use_errors: true # If True, the errors of the previous model's results are used when passing priors. + use_widths: true # If True the width of the model parameters defined in the priors config file are used. + updates: + iterations_per_update: 500 # The number of iterations of the non-linear search performed between every 'update', where an update performs tasks like outputting model.results. + remove_state_files_at_end: true # Whether to remove the savestate of the seach (e.g. the Emcee hdf5 file) at the end to save hard-disk space (results are still stored as PyAutoFit pickles and loadable). +UltraNest: + search: + draw_multiple: true + ndraw_max: 65536 + ndraw_min: 128 + num_bootstraps: 30 + num_test_samples: 2 + resume: true + run_num: null + storage_backend: hdf5 + vectorized: false + warmstart_max_tau: -1.0 + run: + cluster_num_live_points: 40 + dkl: 0.5 + dlogz: 0.5 + frac_remain: 0.01 + insertion_test_window: 10 + insertion_test_zscore_threshold: 2 + lepsilon: 0.001 + log_interval: null + max_iters: null + max_ncalls: null + max_num_improvement_loops: -1.0 + min_ess: 400 + min_num_live_points: 400 + show_status: true + update_interval_ncall: null + update_interval_volume_fraction: 0.8 + viz_callback: auto + stepsampler: + adaptive_nsteps: false + log: false + max_nsteps: 1000 + nsteps: 25 + region_filter: false + scale: 1.0 + stepsampler_cls: null + initialize: # The method used to generate where walkers are initialized in parameter space {prior}. + method: prior # priors: samples are initialized by randomly drawing from each parameter's prior. + parallel: + number_of_cores: 1 # The number of cores the search is parallelized over by default, using Python multiprocessing. + printing: + silence: false # If True, the default print output of the non-linear search is silenced and not printed by the Python interpreter. + prior_passer: + sigma: 3.0 # For non-linear search chaining and model prior passing, the sigma value of the inferred model parameter used as the sigma of the passed Gaussian prior. + use_errors: true # If True, the errors of the previous model's results are used when passing priors. + use_widths: true # If True the width of the model parameters defined in the priors config file are used. + updates: + iterations_per_update: 500 # The number of iterations of the non-linear search performed between every 'update', where an update performs tasks like outputting model.results. + remove_state_files_at_end: true # Whether to remove the savestate of the seach (e.g. the Emcee hdf5 file) at the end to save hard-disk space (results are still stored as PyAutoFit pickles and loadable). \ No newline at end of file diff --git a/autofit/config/non_linear/optimize.yaml b/autofit/config/non_linear/optimize.yaml new file mode 100644 index 000000000..8cf3d0fcf --- /dev/null +++ b/autofit/config/non_linear/optimize.yaml @@ -0,0 +1,105 @@ +# Configuration files that customize the default behaviour of non-linear searches. + +# **PyAutoFit** supports the following optimizer algorithms: + +# - PySwarms: https://github.com/ljvmiranda921/pyswarms / https://pyswarms.readthedocs.io/en/latest/index.html + +# Settings in the [search], [run] and [options] entries are specific to each nested algorithm and should be +# determined by consulting that MCMC method's own readthedocs. + +PySwarmsGlobal: + run: + iters: 2000 + search: + cognitive: 0.5 + ftol: -.inf + inertia: 0.9 + n_particles: 50 + social: 0.3 + initialize: # The method used to generate where walkers are initialized in parameter space {prior | ball}. + method: ball # priors: samples are initialized by randomly drawing from each parameter's prior. ball: samples are initialized by randomly drawing unit values from a narrow uniform distribution. + ball_lower_limit: 0.49 # The lower limit of the uniform distribution unit values are drawn from when initializing walkers using the ball method. + ball_upper_limit: 0.51 # The upper limit of the uniform distribution unit values are drawn from when initializing walkers using the ball method. + parallel: + number_of_cores: 1 # The number of cores the search is parallelized over by default, using Python multiprocessing. + printing: + silence: false # If True, the default print output of the non-linear search is silcened and not printed by the Python interpreter. + prior_passer: + sigma: 3.0 # For non-linear search chaining and model prior passing, the sigma value of the inferred model parameter used as the sigma of the passed Gaussian prior. + use_errors: true # If True, the errors of the previous model's results are used when passing priors. + use_widths: true # If True the width of the model parameters defined in the priors config file are used. + updates: + iterations_per_update: 500 # The number of iterations of the non-linear search performed between every 'update', where an update performs tasks like outputting model.results. + remove_state_files_at_end: true # Whether to remove the savestate of the seach (e.g. the Emcee hdf5 file) at the end to save hard-disk space (results are still stored as PyAutoFit pickles and loadable). +PySwarmsLocal: + run: + iters: 2000 + search: + cognitive: 0.5 + ftol: -.inf + inertia: 0.9 + minkowski_p_norm: 2 + n_particles: 50 + number_of_k_neighbors: 3 + social: 0.3 + initialize: # The method used to generate where walkers are initialized in parameter space {prior | ball}. + method: ball # priors: samples are initialized by randomly drawing from each parameter's prior. ball: samples are initialized by randomly drawing unit values from a narrow uniform distribution. + ball_lower_limit: 0.49 # The lower limit of the uniform distribution unit values are drawn from when initializing walkers using the ball method. + ball_upper_limit: 0.51 # The upper limit of the uniform distribution unit values are drawn from when initializing walkers using the ball method. + parallel: + number_of_cores: 1 # The number of cores the search is parallelized over by default, using Python multiprocessing. + printing: + silence: false # If True, the default print output of the non-linear search is silcened and not printed by the Python interpreter. + prior_passer: + sigma: 3.0 # For non-linear search chaining and model prior passing, the sigma value of the inferred model parameter used as the sigma of the passed Gaussian prior. + use_errors: true # If True, the errors of the previous model's results are used when passing priors. + use_widths: true # If True the width of the model parameters defined in the priors config file are used. + updates: + iterations_per_update: 500 # The number of iterations of the non-linear search performed between every 'update', where an update performs tasks like outputting model.results. + remove_state_files_at_end: true # Whether to remove the savestate of the seach (e.g. the Emcee hdf5 file) at the end to save hard-disk space (results are still stored as PyAutoFit pickles and loadable). +LBFGS: + search: + tol: null + options: + disp: false + eps: 1.0e-08 + ftol: 2.220446049250313e-09 + gtol: 1.0e-05 + iprint: -1.0 + maxcor: 10 + maxfun: 15000 + maxiter: 15000 + maxls: 20 + initialize: # The method used to generate where walkers are initialized in parameter space {prior | ball}. + method: ball # priors: samples are initialized by randomly drawing from each parameter's prior. ball: samples are initialized by randomly drawing unit values from a narrow uniform distribution. + ball_lower_limit: 0.49 # The lower limit of the uniform distribution unit values are drawn from when initializing walkers using the ball method. + ball_upper_limit: 0.51 # The upper limit of the uniform distribution unit values are drawn from when initializing walkers using the ball method. + parallel: + number_of_cores: 1 # The number of cores the search is parallelized over by default, using Python multiprocessing. + printing: + silence: false # If True, the default print output of the non-linear search is silcened and not printed by the Python interpreter. + prior_passer: + sigma: 3.0 # For non-linear search chaining and model prior passing, the sigma value of the inferred model parameter used as the sigma of the passed Gaussian prior. + use_errors: true # If True, the errors of the previous model's results are used when passing priors. + use_widths: true # If True the width of the model parameters defined in the priors config file are used. + updates: + iterations_per_update: 500 # The number of iterations of the non-linear search performed between every 'update', where an update performs tasks like outputting model.results. + remove_state_files_at_end: true # Whether to remove the savestate of the seach (e.g. the Emcee hdf5 file) at the end to save hard-disk space (results are still stored as PyAutoFit pickles and loadable). +Drawer: + search: + total_draws: 50 + initialize: # The method used to generate where walkers are initialized in parameter space {prior | ball}. + method: ball # priors: samples are initialized by randomly drawing from each parameter's prior. ball: samples are initialized by randomly drawing unit values from a narrow uniform distribution. + ball_lower_limit: 0.49 # The lower limit of the uniform distribution unit values are drawn from when initializing walkers using the ball method. + ball_upper_limit: 0.51 # The upper limit of the uniform distribution unit values are drawn from when initializing walkers using the ball method. + parallel: + number_of_cores: 1 # The number of cores the search is parallelized over by default, using Python multiprocessing. + printing: + silence: false # If True, the default print output of the non-linear search is silcened and not printed by the Python interpreter. + prior_passer: + sigma: 3.0 # For non-linear search chaining and model prior passing, the sigma value of the inferred model parameter used as the sigma of the passed Gaussian prior. + use_errors: true # If True, the errors of the previous model's results are used when passing priors. + use_widths: true # If True the width of the model parameters defined in the priors config file are used. + updates: + iterations_per_update: 500 # The number of iterations of the non-linear search performed between every 'update', where an update performs tasks like outputting model.results. + remove_state_files_at_end: true # Whether to remove the savestate of the seach (e.g. the Emcee hdf5 file) at the end to save hard-disk space (results are still stored as PyAutoFit pickles and loadable). \ No newline at end of file diff --git a/autofit/config/notation.yaml b/autofit/config/notation.yaml index 0a5c8ccfd..ec55b854a 100644 --- a/autofit/config/notation.yaml +++ b/autofit/config/notation.yaml @@ -1,3 +1,16 @@ +# The notation configs define the labels of every model parameter and its derived quantities, which are used when +# visualizing results (for example labeling the axis of the PDF triangle plots output by a non-linear search). + + +# label: The label given to the `centre` parameter of `Gaussian` model-components. This is used +# in plots like PDF corner plots. + +# For example, if `centre=x`, the plot axis will be labeled 'x'. + +# superscript: the superscript used on certain plots that show the results of different model-components. + +# For example, if `Gaussian=g`, plots where the parameters of the Gaussian model-component have superscript `g`. + label: label: centre: x @@ -12,6 +25,11 @@ label: gaussian: g modelcomponent0: M0 modelcomponent1: M1 + +# label_format: The format certain parameters are output as in output files like the `model.results` file. + +# For example, if `centre={:.2d}`, the format of the centre parameter in results files will use this Python format. + label_format: format: centre: '{:.2f}' diff --git a/autofit/config/visualize.yaml b/autofit/config/visualize.yaml index 814ae182a..1d792e4b0 100644 --- a/autofit/config/visualize.yaml +++ b/autofit/config/visualize.yaml @@ -1,169 +1,28 @@ general: general: - backend: default -include: - include_2d: {} -mat_wrap: - Axis: - figure: {} - subplot: {} - Cmap: - figure: - cmap: default - linscale: 0.01 - linthresh: 0.05 - norm: linear - vmax: null - vmin: null - subplot: - cmap: default - linscale: 0.01 - linthresh: 0.05 - norm: linear - vmax: null - vmin: null - Colorbar: - figure: - fraction: 0.047 - pad: 0.01 - subplot: - fraction: 0.047 - pad: 0.01 - ColorbarTickParams: - figure: - labelsize: 10 - subplot: - labelsize: 10 - Figure: - figure: - aspect: square - figsize: (7,7) - subplot: - aspect: square - figsize: auto - Legend: - figure: - fontsize: 12 - include: true - subplot: - fontsize: 12 - include: true - TickParams: - figure: - labelsize: 16 - subplot: - labelsize: 10 - Title: - figure: - fontsize: 16 - subplot: - fontsize: 10 - XLabel: - figure: - fontsize: 16 - subplot: - fontsize: 10 - XTicks: - figure: - fontsize: 16 - subplot: - fontsize: 10 - YLabel: - figure: - fontsize: 16 - subplot: - fontsize: 10 - YTicks: - figure: - fontsize: 16 - subplot: - fontsize: 10 -mat_wrap_1d: - AXVLine: - figure: - c: k - subplot: - c: k - YXPlot: - figure: - c: k - subplot: - c: k -mat_wrap_2d: - ArrayOverlay: - figure: - alpha: 0.5 - subplot: - alpha: 0.5 - GridPlot: - figure: - c: k - subplot: - c: k - GridScatter: - figure: - c: k - marker: . - s: 1 - subplot: - c: k - marker: . - s: 1 - PatchOverlay: - figure: - edgecolor: c - facecolor: null - subplot: - edgecolor: c - facecolor: null - VectorFieldQuiver: - figure: - alpha: 1.0 - angles: xy - headlength: 0 - headwidth: 1 - linewidth: 5 - pivot: middle - units: xy - subplot: - alpha: 1.0 - angles: xy - headlength: 0 - headwidth: 1 - linewidth: 5 - pivot: middle - units: xy - VoronoiDrawer: - figure: - alpha: 0.7 - edgecolor: k - linewidth: 0.3 - subplot: - alpha: 0.7 - edgecolor: k - linewidth: 0.3 + backend: default # The matploblib backend used for visualization. `default` uses the system default, can specifiy specific backend (e.g. TKAgg, Qt5Agg, WXAgg). plots_search: dynesty: cornerplot: true - cornerpoints: false - runplot: true - traceplot: true + cornerpoints: false # Whether to output the Dynesty cornerplot figure during a non-linear search fit. + runplot: true # Whether to output the Dynesty runplot figure during a non-linear search fit. + traceplot: true # Whether to output the Dynesty traceplot figure during a non-linear search fit. emcee: - corner: true - likelihood_series: true - time_series: true - trajectories: true + corner: true # Whether to output the Emcee corner figure during a non-linear search fit. + likelihood_series: true # Whether to output the Emcee likelihood series figure during a non-linear search fit. + time_series: true # Whether to output the Emcee time series figure during a non-linear search fit. + trajectories: true # Whether to output the Emcee trajectories figure during a non-linear search fit. pyswarms: - contour: true - cost_history: true - time_series: true - trajectories: true + contour: true # Whether to output the PySwarms contour figure during a non-linear search fit. + cost_history: true # Whether to output the PySwarms cost_history figure during a non-linear search fit. + time_series: true # Whether to output the PySwarms time_series figure during a non-linear search fit. + trajectories: true # Whether to output the PySwarms trajectories figure during a non-linear search fit. ultranest: - cornerplot: true - runplot: true - traceplot: true + cornerplot: true # Whether to output the Ultranest cornerplot figure during a non-linear search fit. + runplot: true # Whether to output the Ultranest runplot figure during a non-linear search fit. + traceplot: true # Whether to output the Ultranest traceplot figure during a non-linear search fit. zeus: - corner: true - likelihood_series: true - time_series: true - trajectories: true + corner: true # Whether to output the Zeus corner figure during a non-linear search fit. + likelihood_series: true # Whether to output the Zeus likelihood series figure during a non-linear search fit. + time_series: true # Whether to output the Zeus time series figure during a non-linear search fit. + trajectories: true # Whether to output the Zeus trajectories figure during a non-linear search fit. diff --git a/docs/installation/conda.rst b/docs/installation/conda.rst index 7fbf189c6..92e483db1 100644 --- a/docs/installation/conda.rst +++ b/docs/installation/conda.rst @@ -26,7 +26,7 @@ the installation has clean dependencies): .. code-block:: bash - pip install autofit==2022.11.26.11 + pip install autofit Next, clone the ``autofit workspace`` (the line ``--depth 1`` clones only the most recent branch on the ``autofit_workspace``, reducing the download size): diff --git a/docs/installation/pip.rst b/docs/installation/pip.rst index 410403bb1..e5f86e8c2 100644 --- a/docs/installation/pip.rst +++ b/docs/installation/pip.rst @@ -12,7 +12,7 @@ the installation has clean dependencies): .. code-block:: bash - pip install autofit==2022.11.26.11 + pip install autofit If this raises no errors **PyAutoFit** is installed! If there is an error check out the `troubleshooting section `_. From 1a047e781726d7f4cf0ca81959016fcbc00cc81b Mon Sep 17 00:00:00 2001 From: Jammy2211 Date: Tue, 29 Nov 2022 13:51:44 +0000 Subject: [PATCH 014/226] removed every_update config parameters --- autofit/non_linear/abstract_search.py | 17 +++-------------- 1 file changed, 3 insertions(+), 14 deletions(-) diff --git a/autofit/non_linear/abstract_search.py b/autofit/non_linear/abstract_search.py index e4212d813..21ca4fe7c 100644 --- a/autofit/non_linear/abstract_search.py +++ b/autofit/non_linear/abstract_search.py @@ -158,20 +158,12 @@ def __init__( "iterations_per_update" ] - self.log_every_update = self._config("updates", "log_every_update") - self.visualize_every_update = self._config("updates", "visualize_every_update",) - self.model_results_every_update = self._config( - "updates", "model_results_every_update", - ) self.remove_state_files_at_end = self._config( "updates", "remove_state_files_at_end", ) self.iterations = 0 - self.should_visualize = IntervalCounter(self.visualize_every_update) - self.should_output_model_results = IntervalCounter( - self.model_results_every_update - ) + self.should_profile = conf.instance["general"]["profiling"]["should_profile"] self.silence = self._config("printing", "silence") @@ -662,9 +654,6 @@ def perform_update(self, model, analysis, during_analysis): 1) Visualize the maximum log likelihood model. 2) Output the model results to the model.reults file. - These task are performed every n updates, set by the relevent *task_every_update* variable, for example - *visualize_every_update* - Parameters ---------- model : ModelMapper @@ -696,7 +685,7 @@ def perform_update(self, model, analysis, during_analysis): except exc.FitException: return samples - if self.should_visualize() or not during_analysis: + if not during_analysis: self.logger.debug("Visualizing") analysis.visualize( paths=self.paths, instance=instance, during_analysis=during_analysis @@ -708,7 +697,7 @@ def perform_update(self, model, analysis, during_analysis): paths=self.paths, instance=instance, ) - if self.should_output_model_results() or not during_analysis: + if not during_analysis: self.logger.debug("Outputting model result") try: From 8580f593bf183733c668bd229a157a386395095d Mon Sep 17 00:00:00 2001 From: Jammy2211 Date: Tue, 29 Nov 2022 13:58:11 +0000 Subject: [PATCH 015/226] GridSearch.yaml --- autofit/config/non_linear/GridSearch.yaml | 5 +++++ test_autofit/config/non_linear/GridSearch.ini | 6 ++++-- 2 files changed, 9 insertions(+), 2 deletions(-) create mode 100644 autofit/config/non_linear/GridSearch.yaml diff --git a/autofit/config/non_linear/GridSearch.yaml b/autofit/config/non_linear/GridSearch.yaml new file mode 100644 index 000000000..cf3f56693 --- /dev/null +++ b/autofit/config/non_linear/GridSearch.yaml @@ -0,0 +1,5 @@ +# The settings of a parallelized grid search of non-linear searches. + +parallel: + number_of_cores: 3 # The number of cores the search is parallelized over by default, using Python multiprocessing. + step_size: 0.1 # The default step size of each grid search parameter, in terms of unit values of the priors. \ No newline at end of file diff --git a/test_autofit/config/non_linear/GridSearch.ini b/test_autofit/config/non_linear/GridSearch.ini index 93bf2fdce..945c545e5 100644 --- a/test_autofit/config/non_linear/GridSearch.ini +++ b/test_autofit/config/non_linear/GridSearch.ini @@ -1,3 +1,5 @@ +# The settings of a parallelized grid search of non-linear searches. + [general] -number_of_cores=3 -step_size=0.1 \ No newline at end of file +number_of_cores=3 # The number of cores the search is parallelized over by default, using Python multiprocessing. +step_size=0.1 # The default step size of each grid search parameter, in terms of unit values of the priors. \ No newline at end of file From a44516c1313e4f8e264ac1bf7573b155ad82c06f Mon Sep 17 00:00:00 2001 From: Jammy2211 Date: Tue, 29 Nov 2022 14:11:14 +0000 Subject: [PATCH 016/226] updates to test_autofit/config to use yaml --- test_autofit/config/general.ini | 32 ------------------- test_autofit/config/general.yaml | 27 ++++++++++++++++ test_autofit/config/visualize.yaml | 28 ++++++++++++++++ test_autofit/config/visualize/doc_general | 17 ---------- test_autofit/config/visualize/general.ini | 2 -- test_autofit/config/visualize/include.ini | 3 -- .../config/visualize/mat_wrap/Axis.ini | 3 -- .../config/visualize/mat_wrap/Cmap.ini | 15 --------- .../config/visualize/mat_wrap/Colorbar.ini | 7 ---- .../visualize/mat_wrap/ColorbarTickParams.ini | 5 --- .../config/visualize/mat_wrap/Figure.ini | 7 ---- .../config/visualize/mat_wrap/Legend.ini | 7 ---- .../config/visualize/mat_wrap/TickParams.ini | 5 --- .../config/visualize/mat_wrap/Title.ini | 5 --- .../config/visualize/mat_wrap/XLabel.ini | 5 --- .../config/visualize/mat_wrap/XTicks.ini | 5 --- .../config/visualize/mat_wrap/YLabel.ini | 5 --- .../config/visualize/mat_wrap/YTicks.ini | 5 --- .../config/visualize/mat_wrap_1d/AXVLine.ini | 5 --- .../config/visualize/mat_wrap_1d/YXPlot.ini | 5 --- .../visualize/mat_wrap_2d/ArrayOverlay.ini | 5 --- .../config/visualize/mat_wrap_2d/GridPlot.ini | 5 --- .../visualize/mat_wrap_2d/GridScatter.ini | 9 ------ .../visualize/mat_wrap_2d/PatchOverlay.ini | 7 ---- .../mat_wrap_2d/VectorFieldQuiver.ini | 17 ---------- .../visualize/mat_wrap_2d/VoronoiDrawer.ini | 9 ------ test_autofit/config/visualize/plots.ini | 3 -- 27 files changed, 55 insertions(+), 193 deletions(-) delete mode 100644 test_autofit/config/general.ini create mode 100644 test_autofit/config/general.yaml create mode 100644 test_autofit/config/visualize.yaml delete mode 100644 test_autofit/config/visualize/doc_general delete mode 100644 test_autofit/config/visualize/general.ini delete mode 100644 test_autofit/config/visualize/include.ini delete mode 100644 test_autofit/config/visualize/mat_wrap/Axis.ini delete mode 100644 test_autofit/config/visualize/mat_wrap/Cmap.ini delete mode 100644 test_autofit/config/visualize/mat_wrap/Colorbar.ini delete mode 100644 test_autofit/config/visualize/mat_wrap/ColorbarTickParams.ini delete mode 100644 test_autofit/config/visualize/mat_wrap/Figure.ini delete mode 100644 test_autofit/config/visualize/mat_wrap/Legend.ini delete mode 100644 test_autofit/config/visualize/mat_wrap/TickParams.ini delete mode 100644 test_autofit/config/visualize/mat_wrap/Title.ini delete mode 100644 test_autofit/config/visualize/mat_wrap/XLabel.ini delete mode 100644 test_autofit/config/visualize/mat_wrap/XTicks.ini delete mode 100644 test_autofit/config/visualize/mat_wrap/YLabel.ini delete mode 100644 test_autofit/config/visualize/mat_wrap/YTicks.ini delete mode 100644 test_autofit/config/visualize/mat_wrap_1d/AXVLine.ini delete mode 100644 test_autofit/config/visualize/mat_wrap_1d/YXPlot.ini delete mode 100644 test_autofit/config/visualize/mat_wrap_2d/ArrayOverlay.ini delete mode 100644 test_autofit/config/visualize/mat_wrap_2d/GridPlot.ini delete mode 100644 test_autofit/config/visualize/mat_wrap_2d/GridScatter.ini delete mode 100644 test_autofit/config/visualize/mat_wrap_2d/PatchOverlay.ini delete mode 100644 test_autofit/config/visualize/mat_wrap_2d/VectorFieldQuiver.ini delete mode 100644 test_autofit/config/visualize/mat_wrap_2d/VoronoiDrawer.ini delete mode 100644 test_autofit/config/visualize/plots.ini diff --git a/test_autofit/config/general.ini b/test_autofit/config/general.ini deleted file mode 100644 index aacbe0b8d..000000000 --- a/test_autofit/config/general.ini +++ /dev/null @@ -1,32 +0,0 @@ -[output] -log_to_file=False -log_file=output.log -log_level=INFO -samples_to_csv=False -model_results_decimal_places=3 -info_whitespace_length=80 -remove_files=True -force_pickle_overwrite=False -identifier_version=3 - -[hpc] -hpc_mode=False -iterations_per_update=5000 - -[model] -ignore_prior_limits=False - -[profiling] -should_profile=False -repeats=3 -parallel_profile=False - -[parallel] -warn_environment_variables=True - -[analysis] -n_cores=1 - -[test] -check_preloads=False -disable_positions_lh_inversion_check=False \ No newline at end of file diff --git a/test_autofit/config/general.yaml b/test_autofit/config/general.yaml new file mode 100644 index 000000000..34d83f5ce --- /dev/null +++ b/test_autofit/config/general.yaml @@ -0,0 +1,27 @@ +analysis: + n_cores: 1 # The number of cores a parallelized sum of Analysis classes uses by default. +hpc: + hpc_mode: false # If True, use HPC mode, which disables GUI visualization, logging to screen and other settings which are not suited to running on a super computer. + iterations_per_update: 5000 # The number of iterations between every update (visualization, results output, etc) in HPC mode. +model: + ignore_prior_limits: false # If ``True`` the limits applied to priors will be ignored, where limits set upper / lower limits. This stops PriorLimitException's from being raised. +output: + force_pickle_overwrite: false # If True pickle files output by a search (e.g. samples.pickle) are recreated when a new model-fit is performed. + identifier_version: 4 + info_whitespace_length: 80 # Length of whitespace between the parameter names and values in the model.info / result.info + log_level: INFO # The level of information output by logging. + log_to_file: false # If True, outputs the non-linear search log to a file (and not printed to screen). + log_file: output.log # The name of the file the logged output is written to (in the non-linear search output folder) + model_results_decimal_places: 3 # Number of decimal places estimated parameter values / errors are output in model.results. + remove_files: false # If True, all output files of a non-linear search (e.g. samples, visualization, etc.) are deleted once the model-fit has completed, such that only the .zip file remains. + samples_to_csv: false +parallel: + warn_environment_variables: false # If True, a warning is displayed when the search's number of CPU > 1 and enviromment variables related to threading are also > 1. +profiling: + parallel_profile: false # If True, the parallelization of the fit is profiled outputting a cPython graph. + should_profile: false # If True, the ``profile_log_likelihood_function()`` function of an analysis class is called throughout a model-fit, profiling run times. + repeats: 1 # The number of repeat function calls used to measure run-times when profiling. +test: + check_preloads: false # If True, perform a sanity check that the likelihood using preloads is identical to the likelihood not using preloads. + exception_override: false + parallel_profile: false diff --git a/test_autofit/config/visualize.yaml b/test_autofit/config/visualize.yaml new file mode 100644 index 000000000..1d792e4b0 --- /dev/null +++ b/test_autofit/config/visualize.yaml @@ -0,0 +1,28 @@ +general: + general: + backend: default # The matploblib backend used for visualization. `default` uses the system default, can specifiy specific backend (e.g. TKAgg, Qt5Agg, WXAgg). +plots_search: + dynesty: + cornerplot: true + cornerpoints: false # Whether to output the Dynesty cornerplot figure during a non-linear search fit. + runplot: true # Whether to output the Dynesty runplot figure during a non-linear search fit. + traceplot: true # Whether to output the Dynesty traceplot figure during a non-linear search fit. + emcee: + corner: true # Whether to output the Emcee corner figure during a non-linear search fit. + likelihood_series: true # Whether to output the Emcee likelihood series figure during a non-linear search fit. + time_series: true # Whether to output the Emcee time series figure during a non-linear search fit. + trajectories: true # Whether to output the Emcee trajectories figure during a non-linear search fit. + pyswarms: + contour: true # Whether to output the PySwarms contour figure during a non-linear search fit. + cost_history: true # Whether to output the PySwarms cost_history figure during a non-linear search fit. + time_series: true # Whether to output the PySwarms time_series figure during a non-linear search fit. + trajectories: true # Whether to output the PySwarms trajectories figure during a non-linear search fit. + ultranest: + cornerplot: true # Whether to output the Ultranest cornerplot figure during a non-linear search fit. + runplot: true # Whether to output the Ultranest runplot figure during a non-linear search fit. + traceplot: true # Whether to output the Ultranest traceplot figure during a non-linear search fit. + zeus: + corner: true # Whether to output the Zeus corner figure during a non-linear search fit. + likelihood_series: true # Whether to output the Zeus likelihood series figure during a non-linear search fit. + time_series: true # Whether to output the Zeus time series figure during a non-linear search fit. + trajectories: true # Whether to output the Zeus trajectories figure during a non-linear search fit. diff --git a/test_autofit/config/visualize/doc_general b/test_autofit/config/visualize/doc_general deleted file mode 100644 index 33db17f87..000000000 --- a/test_autofit/config/visualize/doc_general +++ /dev/null @@ -1,17 +0,0 @@ -A description of the general.ini visualization config. - -[general] - backend -> str - The matploblib backend used for visualization (see - https://gist.github.com/CMCDragonkai/4e9464d9f32f5893d837f3de2c43daa4 for a description of backends). - - If you use an invalid backend for your computer, PyAutoLens may crash without an error or reset your machine. - There may be no better way to find the right backend than simple trial and error. The following backends have - worked for PyAutoLens users: - - TKAgg (default) - Qt5Agg (works on new MACS) - Qt4Agg - WXAgg - WX - Agg (outputs to .fits / .png but doesn't'display figures during a run on your computer screen) \ No newline at end of file diff --git a/test_autofit/config/visualize/general.ini b/test_autofit/config/visualize/general.ini deleted file mode 100644 index a0108f6b4..000000000 --- a/test_autofit/config/visualize/general.ini +++ /dev/null @@ -1,2 +0,0 @@ -[general] -backend=default \ No newline at end of file diff --git a/test_autofit/config/visualize/include.ini b/test_autofit/config/visualize/include.ini deleted file mode 100644 index 1955c51e8..000000000 --- a/test_autofit/config/visualize/include.ini +++ /dev/null @@ -1,3 +0,0 @@ -[include_2d] - - diff --git a/test_autofit/config/visualize/mat_wrap/Axis.ini b/test_autofit/config/visualize/mat_wrap/Axis.ini deleted file mode 100644 index dbfb9f790..000000000 --- a/test_autofit/config/visualize/mat_wrap/Axis.ini +++ /dev/null @@ -1,3 +0,0 @@ -[figure] - -[subplot] \ No newline at end of file diff --git a/test_autofit/config/visualize/mat_wrap/Cmap.ini b/test_autofit/config/visualize/mat_wrap/Cmap.ini deleted file mode 100644 index 4573973d3..000000000 --- a/test_autofit/config/visualize/mat_wrap/Cmap.ini +++ /dev/null @@ -1,15 +0,0 @@ -[figure] -cmap=default -norm=linear -vmin=None -vmax=None -linthresh=0.05 -linscale=0.01 - -[subplot] -cmap=default -norm=linear -vmin=None -vmax=None -linthresh=0.05 -linscale=0.01 \ No newline at end of file diff --git a/test_autofit/config/visualize/mat_wrap/Colorbar.ini b/test_autofit/config/visualize/mat_wrap/Colorbar.ini deleted file mode 100644 index c9a45eb72..000000000 --- a/test_autofit/config/visualize/mat_wrap/Colorbar.ini +++ /dev/null @@ -1,7 +0,0 @@ -[figure] -fraction=0.047 -pad=0.01 - -[subplot] -fraction=0.047 -pad=0.01 \ No newline at end of file diff --git a/test_autofit/config/visualize/mat_wrap/ColorbarTickParams.ini b/test_autofit/config/visualize/mat_wrap/ColorbarTickParams.ini deleted file mode 100644 index c75fecf87..000000000 --- a/test_autofit/config/visualize/mat_wrap/ColorbarTickParams.ini +++ /dev/null @@ -1,5 +0,0 @@ -[figure] -labelsize=10 - -[subplot] -labelsize=10 \ No newline at end of file diff --git a/test_autofit/config/visualize/mat_wrap/Figure.ini b/test_autofit/config/visualize/mat_wrap/Figure.ini deleted file mode 100644 index 3793ee8e5..000000000 --- a/test_autofit/config/visualize/mat_wrap/Figure.ini +++ /dev/null @@ -1,7 +0,0 @@ -[figure] -figsize=(7,7) -aspect=square - -[subplot] -figsize=auto -aspect=square \ No newline at end of file diff --git a/test_autofit/config/visualize/mat_wrap/Legend.ini b/test_autofit/config/visualize/mat_wrap/Legend.ini deleted file mode 100644 index 6d934996d..000000000 --- a/test_autofit/config/visualize/mat_wrap/Legend.ini +++ /dev/null @@ -1,7 +0,0 @@ -[figure] -include=True -fontsize=12 - -[subplot] -include=True -fontsize=12 \ No newline at end of file diff --git a/test_autofit/config/visualize/mat_wrap/TickParams.ini b/test_autofit/config/visualize/mat_wrap/TickParams.ini deleted file mode 100644 index 6fe9b8364..000000000 --- a/test_autofit/config/visualize/mat_wrap/TickParams.ini +++ /dev/null @@ -1,5 +0,0 @@ -[figure] -labelsize=16 - -[subplot] -labelsize=10 \ No newline at end of file diff --git a/test_autofit/config/visualize/mat_wrap/Title.ini b/test_autofit/config/visualize/mat_wrap/Title.ini deleted file mode 100644 index 653c210a6..000000000 --- a/test_autofit/config/visualize/mat_wrap/Title.ini +++ /dev/null @@ -1,5 +0,0 @@ -[figure] -fontsize=16 - -[subplot] -fontsize=10 \ No newline at end of file diff --git a/test_autofit/config/visualize/mat_wrap/XLabel.ini b/test_autofit/config/visualize/mat_wrap/XLabel.ini deleted file mode 100644 index 653c210a6..000000000 --- a/test_autofit/config/visualize/mat_wrap/XLabel.ini +++ /dev/null @@ -1,5 +0,0 @@ -[figure] -fontsize=16 - -[subplot] -fontsize=10 \ No newline at end of file diff --git a/test_autofit/config/visualize/mat_wrap/XTicks.ini b/test_autofit/config/visualize/mat_wrap/XTicks.ini deleted file mode 100644 index 653c210a6..000000000 --- a/test_autofit/config/visualize/mat_wrap/XTicks.ini +++ /dev/null @@ -1,5 +0,0 @@ -[figure] -fontsize=16 - -[subplot] -fontsize=10 \ No newline at end of file diff --git a/test_autofit/config/visualize/mat_wrap/YLabel.ini b/test_autofit/config/visualize/mat_wrap/YLabel.ini deleted file mode 100644 index 653c210a6..000000000 --- a/test_autofit/config/visualize/mat_wrap/YLabel.ini +++ /dev/null @@ -1,5 +0,0 @@ -[figure] -fontsize=16 - -[subplot] -fontsize=10 \ No newline at end of file diff --git a/test_autofit/config/visualize/mat_wrap/YTicks.ini b/test_autofit/config/visualize/mat_wrap/YTicks.ini deleted file mode 100644 index 653c210a6..000000000 --- a/test_autofit/config/visualize/mat_wrap/YTicks.ini +++ /dev/null @@ -1,5 +0,0 @@ -[figure] -fontsize=16 - -[subplot] -fontsize=10 \ No newline at end of file diff --git a/test_autofit/config/visualize/mat_wrap_1d/AXVLine.ini b/test_autofit/config/visualize/mat_wrap_1d/AXVLine.ini deleted file mode 100644 index 83b47a984..000000000 --- a/test_autofit/config/visualize/mat_wrap_1d/AXVLine.ini +++ /dev/null @@ -1,5 +0,0 @@ -[figure] -c=k - -[subplot] -c=k \ No newline at end of file diff --git a/test_autofit/config/visualize/mat_wrap_1d/YXPlot.ini b/test_autofit/config/visualize/mat_wrap_1d/YXPlot.ini deleted file mode 100644 index 83b47a984..000000000 --- a/test_autofit/config/visualize/mat_wrap_1d/YXPlot.ini +++ /dev/null @@ -1,5 +0,0 @@ -[figure] -c=k - -[subplot] -c=k \ No newline at end of file diff --git a/test_autofit/config/visualize/mat_wrap_2d/ArrayOverlay.ini b/test_autofit/config/visualize/mat_wrap_2d/ArrayOverlay.ini deleted file mode 100644 index 61974a2bf..000000000 --- a/test_autofit/config/visualize/mat_wrap_2d/ArrayOverlay.ini +++ /dev/null @@ -1,5 +0,0 @@ -[figure] -alpha=0.5 - -[subplot] -alpha=0.5 \ No newline at end of file diff --git a/test_autofit/config/visualize/mat_wrap_2d/GridPlot.ini b/test_autofit/config/visualize/mat_wrap_2d/GridPlot.ini deleted file mode 100644 index 83b47a984..000000000 --- a/test_autofit/config/visualize/mat_wrap_2d/GridPlot.ini +++ /dev/null @@ -1,5 +0,0 @@ -[figure] -c=k - -[subplot] -c=k \ No newline at end of file diff --git a/test_autofit/config/visualize/mat_wrap_2d/GridScatter.ini b/test_autofit/config/visualize/mat_wrap_2d/GridScatter.ini deleted file mode 100644 index 90ef9f97d..000000000 --- a/test_autofit/config/visualize/mat_wrap_2d/GridScatter.ini +++ /dev/null @@ -1,9 +0,0 @@ -[figure] -s=1 -marker=. -c=k - -[subplot] -s=1 -marker=. -c=k \ No newline at end of file diff --git a/test_autofit/config/visualize/mat_wrap_2d/PatchOverlay.ini b/test_autofit/config/visualize/mat_wrap_2d/PatchOverlay.ini deleted file mode 100644 index 682e0204f..000000000 --- a/test_autofit/config/visualize/mat_wrap_2d/PatchOverlay.ini +++ /dev/null @@ -1,7 +0,0 @@ -[figure] -facecolor=none -edgecolor=c - -[subplot] -facecolor=none -edgecolor=c \ No newline at end of file diff --git a/test_autofit/config/visualize/mat_wrap_2d/VectorFieldQuiver.ini b/test_autofit/config/visualize/mat_wrap_2d/VectorFieldQuiver.ini deleted file mode 100644 index 195128eda..000000000 --- a/test_autofit/config/visualize/mat_wrap_2d/VectorFieldQuiver.ini +++ /dev/null @@ -1,17 +0,0 @@ -[figure] -headlength=0 -pivot=middle -linewidth=5 -units=xy -angles=xy -headwidth=1 -alpha=1. - -[subplot] -headlength=0 -pivot=middle -linewidth=5 -units=xy -angles=xy -headwidth=1 -alpha=1. diff --git a/test_autofit/config/visualize/mat_wrap_2d/VoronoiDrawer.ini b/test_autofit/config/visualize/mat_wrap_2d/VoronoiDrawer.ini deleted file mode 100644 index 5c2583764..000000000 --- a/test_autofit/config/visualize/mat_wrap_2d/VoronoiDrawer.ini +++ /dev/null @@ -1,9 +0,0 @@ -[figure] -linewidth=0.3 -edgecolor=k -alpha=0.7 - -[subplot] -linewidth=0.3 -edgecolor=k -alpha=0.7 diff --git a/test_autofit/config/visualize/plots.ini b/test_autofit/config/visualize/plots.ini deleted file mode 100644 index 9511644ba..000000000 --- a/test_autofit/config/visualize/plots.ini +++ /dev/null @@ -1,3 +0,0 @@ -[samples] -corner=False -progress=False \ No newline at end of file From f6d6654dea01ea6e5be2d7ecea20be3ed1f0b8bd Mon Sep 17 00:00:00 2001 From: Jammy2211 Date: Tue, 29 Nov 2022 14:21:14 +0000 Subject: [PATCH 017/226] notation capitlization fixed --- autofit/config/notation.yaml | 8 +- test_autofit/config/non_linear.yaml | 356 ++++++++++++ test_autofit/config/non_linear/GridSearch.ini | 5 - test_autofit/config/non_linear/mcmc/Emcee.ini | 34 -- test_autofit/config/non_linear/mcmc/Zeus.ini | 44 -- .../config/non_linear/mock/MockOptimizer.ini | 17 - .../config/non_linear/mock/MockSearch.ini | 19 - .../config/non_linear/mock/MockSearchBC.ini | 19 - test_autofit/config/non_linear/mock/NLO.ini | 14 - .../config/non_linear/nest/DynestyDynamic.ini | 43 -- .../config/non_linear/nest/DynestyStatic.ini | 42 -- .../config/non_linear/nest/MultiNest.ini | 35 -- .../non_linear/optimize/DownhillSimplex.ini | 26 - .../config/non_linear/optimize/Drawer.ini | 22 - .../config/non_linear/optimize/LBFGS.ini | 33 -- .../non_linear/optimize/PySwarmsGlobal.ini | 32 - .../non_linear/optimize/PySwarmsLocal.ini | 34 -- test_autofit/config/notation.yaml | 41 ++ test_autofit/config/notation/label.ini | 29 - test_autofit/config/notation/label_format.ini | 11 - test_autofit/config/priors/mock_model.json | 547 ------------------ test_autofit/config/priors/mock_model.yaml | 410 +++++++++++++ test_autofit/config/priors/model.json | 43 -- test_autofit/config/priors/model.yaml | 31 + test_autofit/config/priors/prior.json | 12 - test_autofit/config/priors/prior.yaml | 7 + 26 files changed, 849 insertions(+), 1065 deletions(-) create mode 100644 test_autofit/config/non_linear.yaml delete mode 100644 test_autofit/config/non_linear/GridSearch.ini delete mode 100644 test_autofit/config/non_linear/mcmc/Emcee.ini delete mode 100644 test_autofit/config/non_linear/mcmc/Zeus.ini delete mode 100644 test_autofit/config/non_linear/mock/MockOptimizer.ini delete mode 100644 test_autofit/config/non_linear/mock/MockSearch.ini delete mode 100644 test_autofit/config/non_linear/mock/MockSearchBC.ini delete mode 100644 test_autofit/config/non_linear/mock/NLO.ini delete mode 100644 test_autofit/config/non_linear/nest/DynestyDynamic.ini delete mode 100644 test_autofit/config/non_linear/nest/DynestyStatic.ini delete mode 100644 test_autofit/config/non_linear/nest/MultiNest.ini delete mode 100644 test_autofit/config/non_linear/optimize/DownhillSimplex.ini delete mode 100644 test_autofit/config/non_linear/optimize/Drawer.ini delete mode 100644 test_autofit/config/non_linear/optimize/LBFGS.ini delete mode 100644 test_autofit/config/non_linear/optimize/PySwarmsGlobal.ini delete mode 100644 test_autofit/config/non_linear/optimize/PySwarmsLocal.ini create mode 100644 test_autofit/config/notation.yaml delete mode 100644 test_autofit/config/notation/label.ini delete mode 100644 test_autofit/config/notation/label_format.ini delete mode 100644 test_autofit/config/priors/mock_model.json create mode 100644 test_autofit/config/priors/mock_model.yaml delete mode 100644 test_autofit/config/priors/model.json create mode 100644 test_autofit/config/priors/model.yaml delete mode 100644 test_autofit/config/priors/prior.json create mode 100644 test_autofit/config/priors/prior.yaml diff --git a/autofit/config/notation.yaml b/autofit/config/notation.yaml index ec55b854a..e42a62b70 100644 --- a/autofit/config/notation.yaml +++ b/autofit/config/notation.yaml @@ -21,10 +21,10 @@ label: rate: \lambda sigma: \sigma superscript: - exponential: e - gaussian: g - modelcomponent0: M0 - modelcomponent1: M1 + Exponential: e + Gaussian: g + ModelComponent0: M0 + ModelComponent1: M1 # label_format: The format certain parameters are output as in output files like the `model.results` file. diff --git a/test_autofit/config/non_linear.yaml b/test_autofit/config/non_linear.yaml new file mode 100644 index 000000000..b663a8935 --- /dev/null +++ b/test_autofit/config/non_linear.yaml @@ -0,0 +1,356 @@ +GridSearch: + general: + number_of_cores: '3 # The number of cores the search is parallelized over by default, + using Python multiprocessing.' + step_size: '0.1 # The default step size of each grid search parameter, in + terms of unit values of the priors.' +mock: + MockOptimizer: + initialize: + method: prior + printing: + silence: false + prior_passer: + sigma: 3.0 + use_errors: true + use_widths: true + updates: + iterations_per_update: 2500 + remove_state_files_at_end: true + MockSearch: + initialize: + method: prior + printing: + silence: false + prior_passer: + sigma: 3.0 + use_errors: true + use_widths: true + search: {} + updates: + iterations_per_update: 2500 + remove_state_files_at_end: true + MockSearchBC: + initialize: + method: prior + printing: + silence: false + prior_passer: + sigma: 3.0 + use_errors: true + use_widths: true + search: {} + updates: + iterations_per_update: 2500 + remove_state_files_at_end: true + NLO: + printing: + silence: false + prior_passer: + sigma: 3.0 + use_errors: true + use_widths: true + updates: + iterations_per_update: 2500 + remove_state_files_at_end: true +mcmc: + Emcee: + auto_correlations: + change_threshold: 0.01 + check_for_convergence: true + check_size: 100 + required_length: 50 + initialize: + ball_lower_limit: 0.49 + ball_upper_limit: 0.51 + method: prior + parallel: + number_of_cores: 1 + printing: + silence: false + prior_passer: + sigma: 3.0 + use_errors: true + use_widths: true + run: + nsteps: 2000 + search: + nwalkers: 50 + updates: + iterations_per_update: 2500 + remove_state_files_at_end: true + Zeus: + auto_correlations: + change_threshold: 0.01 + check_for_convergence: true + check_size: 100 + required_length: 50 + initialize: + ball_lower_limit: 0.49 + ball_upper_limit: 0.51 + method: prior + parallel: + number_of_cores: 1 + printing: + silence: false + prior_passer: + sigma: 3.0 + use_errors: true + use_widths: true + run: + check_walkers: true + light_mode: false + maxiter: 10000 + maxsteps: 10000 + mu: 1.0 + nsteps: 2000 + patience: 5 + shuffle_ensemble: true + tolerance: 0.05 + tune: true + vectorize: false + search: + nwalkers: 50 + updates: + iterations_per_update: 500 + log_every_update: 1 + model_results_every_update: 1 + remove_state_files_at_end: true + visualize_every_update: 1 +nest: + DynestyDynamic: + initialize: + method: prior + parallel: + number_of_cores: 4 + printing: + silence: false + prior_passer: + sigma: 3.0 + use_errors: true + use_widths: true + run: + dlogz_init: 0.01 + logl_max_init: .inf + maxcall: null + maxcall_init: null + maxiter: null + maxiter_init: null + n_effective_init: .inf + nlive_init: 5 + search: + bootstrap: 1 + bound: balls + enlarge: 2 + facc: 0.6 + fmove: 0.8 + logl_max: .inf + max_move: 101 + sample: rwalk + slices: 6 + update_interval: 2.0 + walks: 26 + updates: + iterations_per_update: 501 + log_every_update: 1 + model_results_every_update: 1 + remove_state_files_at_end: true + visualize_every_update: 1 + DynestyStatic: + initialize: + method: prior + parallel: + force_x1_cpu: false + number_of_cores: 1 + printing: + silence: false + prior_passer: + sigma: 3.0 + use_errors: true + use_widths: true + run: + dlogz: null + logl_max: .inf + maxcall: null + maxiter: null + n_effective: 0 + search: + bootstrap: null + bound: multi + enlarge: null + facc: 0.5 + first_update: null + fmove: 0.9 + max_move: 100 + nlive: 150 + sample: auto + slices: 5 + update_interval: null + walks: 5 + updates: + iterations_per_update: 500 + log_every_update: 1 + model_results_every_update: 1 + remove_state_files_at_end: true + visualize_every_update: 1 + MultiNest: + printing: + silence: false + prior_passer: + sigma: 3.0 + use_errors: true + use_widths: true + search: + const_efficiency_mode: false + context: 0 + evidence_tolerance: 0.5 + importance_nested_sampling: true + init_mpi: false + log_zero: -1.0e+100 + max_iter: 0 + max_modes: 100 + mode_tolerance: -1.0e+89 + multimodal: true + n_iter_before_update: 100 + n_live_points: 50 + null_log_evidence: -1.0e+90 + resume: true + sampling_efficiency: 0.6 + seed: -1.0 + verbose: false + write_output: true + updates: + iterations_per_update: 2500 + log_every_update: 1 + model_results_every_update: 1 + remove_state_files_at_end: true + should_update_sym: 250 + visualize_every_update: 1 +optimize: + DownhillSimplex: + initialize: + method: prior + printing: + silence: false + prior_passer: + sigma: 3.0 + use_errors: true + use_widths: true + search: + disp: 1 + ftol: 0.0001 + full_output: 0 + maxfun: null + maxiter: null + retall: 0 + xtol: 0.0001 + updates: + iterations_per_update: 11 + log_every_update: 1 + model_results_every_update: 1 + remove_state_files_at_end: true + visualize_every_update: 1 + Drawer: + initialize: + ball_lower_limit: 0.49 + ball_upper_limit: 0.51 + method: prior + printing: + silence: false + prior_passer: + sigma: 3.0 + use_errors: true + use_widths: true + search: + total_draws: 10 + updates: + iterations_per_update: 500 + log_every_update: 1 + model_results_every_update: 1 + remove_state_files_at_end: true + visualize_every_update: 1 + LBFGS: + initialize: + ball_lower_limit: 0.49 + ball_upper_limit: 0.51 + method: prior + options: + disp: false + eps: 1.0e-08 + ftol: 2.220446049250313e-09 + gtol: 1.0e-05 + iprint: -1.0 + maxcor: 10 + maxfun: 15000 + maxiter: 15000 + maxls: 20 + printing: + silence: false + prior_passer: + sigma: 3.0 + use_errors: true + use_widths: true + search: + tol: null + updates: + iterations_per_update: 11 + log_every_update: 1 + model_results_every_update: 1 + remove_state_files_at_end: true + visualize_every_update: 1 + PySwarmsGlobal: + initialize: + ball_lower_limit: 0.49 + ball_upper_limit: 0.51 + method: prior + parallel: + number_of_cores: 1 + printing: + silence: false + prior_passer: + sigma: 3.0 + use_errors: true + use_widths: true + run: + iters: 2000 + search: + cognitive: 0.1 + ftol: -.inf + inertia: 0.3 + n_particles: 50 + social: 0.2 + updates: + iterations_per_update: 11 + log_every_update: 1 + model_results_every_update: 1 + remove_state_files_at_end: true + visualize_every_update: 1 + PySwarmsLocal: + initialize: + ball_lower_limit: 0.49 + ball_upper_limit: 0.51 + method: prior + parallel: + number_of_cores: 1 + printing: + silence: false + prior_passer: + sigma: 3.0 + use_errors: true + use_widths: true + run: + iters: 2000 + search: + cognitive: 0.1 + ftol: -.inf + inertia: 0.3 + minkowski_p_norm: 2 + n_particles: 50 + number_of_k_neighbors: 3 + social: 0.2 + updates: + iterations_per_update: 11 + log_every_update: 1 + model_results_every_update: 1 + remove_state_files_at_end: true + visualize_every_update: 1 diff --git a/test_autofit/config/non_linear/GridSearch.ini b/test_autofit/config/non_linear/GridSearch.ini deleted file mode 100644 index 945c545e5..000000000 --- a/test_autofit/config/non_linear/GridSearch.ini +++ /dev/null @@ -1,5 +0,0 @@ -# The settings of a parallelized grid search of non-linear searches. - -[general] -number_of_cores=3 # The number of cores the search is parallelized over by default, using Python multiprocessing. -step_size=0.1 # The default step size of each grid search parameter, in terms of unit values of the priors. \ No newline at end of file diff --git a/test_autofit/config/non_linear/mcmc/Emcee.ini b/test_autofit/config/non_linear/mcmc/Emcee.ini deleted file mode 100644 index cb085ab01..000000000 --- a/test_autofit/config/non_linear/mcmc/Emcee.ini +++ /dev/null @@ -1,34 +0,0 @@ -[search] -nwalkers=50 - -[run] -nsteps=2000 - -[initialize] -method=prior -ball_lower_limit=0.49 -ball_upper_limit=0.51 - -[auto_correlations] -check_for_convergence=True -check_size=100 -required_length=50 -change_threshold=0.01 - -[updates] -iterations_per_update=2500 -visualize_every_update=1 -model_results_every_update=1 -log_every_update=1 -remove_state_files_at_end=True - -[printing] -silence=False - -[prior_passer] -sigma=3.0 -use_errors=True -use_widths=True - -[parallel] -number_of_cores=1 \ No newline at end of file diff --git a/test_autofit/config/non_linear/mcmc/Zeus.ini b/test_autofit/config/non_linear/mcmc/Zeus.ini deleted file mode 100644 index ad6f94afb..000000000 --- a/test_autofit/config/non_linear/mcmc/Zeus.ini +++ /dev/null @@ -1,44 +0,0 @@ -[search] -nwalkers=50 - -[run] -nsteps=2000 -tune=True -tolerance=0.05 -patience=5 -maxsteps=10000 -mu=1.0 -maxiter=10000 -vectorize=False -check_walkers=True -shuffle_ensemble=True -light_mode=False - -[initialize] -method=prior -ball_lower_limit=0.49 -ball_upper_limit=0.51 - -[auto_correlations] -check_for_convergence=True -check_size=100 -required_length=50 -change_threshold=0.01 - -[updates] -iterations_per_update=500 -visualize_every_update=1 -model_results_every_update=1 -log_every_update=1 -remove_state_files_at_end=True - -[printing] -silence=False - -[prior_passer] -sigma=3.0 -use_errors=True -use_widths=True - -[parallel] -number_of_cores=1 \ No newline at end of file diff --git a/test_autofit/config/non_linear/mock/MockOptimizer.ini b/test_autofit/config/non_linear/mock/MockOptimizer.ini deleted file mode 100644 index 0fed061b3..000000000 --- a/test_autofit/config/non_linear/mock/MockOptimizer.ini +++ /dev/null @@ -1,17 +0,0 @@ -[updates] -iterations_per_update=2500 -visualize_every_update=1 -model_results_every_update=1 -log_every_update=1 -remove_state_files_at_end=True - -[initialize] -method=prior - -[printing] -silence=False - -[prior_passer] -sigma=3.0 -use_errors=True -use_widths=True \ No newline at end of file diff --git a/test_autofit/config/non_linear/mock/MockSearch.ini b/test_autofit/config/non_linear/mock/MockSearch.ini deleted file mode 100644 index 51768b708..000000000 --- a/test_autofit/config/non_linear/mock/MockSearch.ini +++ /dev/null @@ -1,19 +0,0 @@ -[search] - -[updates] -iterations_per_update=2500 -visualize_every_update=1 -model_results_every_update=1 -log_every_update=1 -remove_state_files_at_end=True - -[initialize] -method=prior - -[printing] -silence=False - -[prior_passer] -sigma=3.0 -use_errors=True -use_widths=True diff --git a/test_autofit/config/non_linear/mock/MockSearchBC.ini b/test_autofit/config/non_linear/mock/MockSearchBC.ini deleted file mode 100644 index 51768b708..000000000 --- a/test_autofit/config/non_linear/mock/MockSearchBC.ini +++ /dev/null @@ -1,19 +0,0 @@ -[search] - -[updates] -iterations_per_update=2500 -visualize_every_update=1 -model_results_every_update=1 -log_every_update=1 -remove_state_files_at_end=True - -[initialize] -method=prior - -[printing] -silence=False - -[prior_passer] -sigma=3.0 -use_errors=True -use_widths=True diff --git a/test_autofit/config/non_linear/mock/NLO.ini b/test_autofit/config/non_linear/mock/NLO.ini deleted file mode 100644 index 02b65ce44..000000000 --- a/test_autofit/config/non_linear/mock/NLO.ini +++ /dev/null @@ -1,14 +0,0 @@ -[updates] -iterations_per_update=2500 -visualize_every_update=1 -model_results_every_update=1 -log_every_update=1 -remove_state_files_at_end=True - -[printing] -silence=False - -[prior_passer] -sigma=3.0 -use_errors=True -use_widths=True \ No newline at end of file diff --git a/test_autofit/config/non_linear/nest/DynestyDynamic.ini b/test_autofit/config/non_linear/nest/DynestyDynamic.ini deleted file mode 100644 index 78d82292d..000000000 --- a/test_autofit/config/non_linear/nest/DynestyDynamic.ini +++ /dev/null @@ -1,43 +0,0 @@ -[search] -facc=0.6 -bound=balls -sample=rwalk -bootstrap=1 -enlarge=2 -update_interval=2.0 -walks=26 -slices=6 -fmove=0.8 -max_move=101 -logl_max=inf - -[run] -nlive_init=5 -maxiter=None -maxcall=None -maxiter_init=None -maxcall_init=None -dlogz_init=0.01 -logl_max_init=inf -n_effective_init=inf - -[updates] -iterations_per_update=501 -visualize_every_update=1 -model_results_every_update=1 -log_every_update=1 -remove_state_files_at_end=True - -[initialize] -method=prior - -[printing] -silence=False - -[prior_passer] -sigma=3.0 -use_errors=True -use_widths=True - -[parallel] -number_of_cores=4 \ No newline at end of file diff --git a/test_autofit/config/non_linear/nest/DynestyStatic.ini b/test_autofit/config/non_linear/nest/DynestyStatic.ini deleted file mode 100644 index 07e633123..000000000 --- a/test_autofit/config/non_linear/nest/DynestyStatic.ini +++ /dev/null @@ -1,42 +0,0 @@ -[search] -nlive=150 -facc=0.5 -bound=multi -sample=auto -bootstrap=None -enlarge=None -update_interval=None -first_update=None -walks=5 -slices=5 -fmove=0.9 -max_move=100 - -[run] -maxiter=None -maxcall=None -dlogz=None -logl_max=inf -n_effective=0 - -[updates] -iterations_per_update=500 -visualize_every_update=1 -model_results_every_update=1 -log_every_update=1 -remove_state_files_at_end=True - -[initialize] -method=prior - -[printing] -silence=False - -[prior_passer] -sigma=3.0 -use_errors=True -use_widths=True - -[parallel] -number_of_cores=1 -force_x1_cpu=False \ No newline at end of file diff --git a/test_autofit/config/non_linear/nest/MultiNest.ini b/test_autofit/config/non_linear/nest/MultiNest.ini deleted file mode 100644 index 11cf424c0..000000000 --- a/test_autofit/config/non_linear/nest/MultiNest.ini +++ /dev/null @@ -1,35 +0,0 @@ -[search] -n_live_points=50 -sampling_efficiency=0.6 -const_efficiency_mode=False -multimodal=True -importance_nested_sampling=True -evidence_tolerance=0.5 -max_modes=100 -mode_tolerance=-1e89 -max_iter=0 -n_iter_before_update=100 -null_log_evidence=-1e90 -seed=-1 -verbose=False -resume=True -context=0 -write_output=True -log_zero=-1e100 -init_MPI=False - -[updates] -iterations_per_update=2500 -visualize_every_update=1 -model_results_every_update=1 -log_every_update=1 -remove_state_files_at_end=True -should_update_sym=250 - -[printing] -silence=False - -[prior_passer] -sigma=3.0 -use_errors=True -use_widths=True \ No newline at end of file diff --git a/test_autofit/config/non_linear/optimize/DownhillSimplex.ini b/test_autofit/config/non_linear/optimize/DownhillSimplex.ini deleted file mode 100644 index d35007d58..000000000 --- a/test_autofit/config/non_linear/optimize/DownhillSimplex.ini +++ /dev/null @@ -1,26 +0,0 @@ -[search] -xtol=1e-4 -ftol=1e-4 -maxiter=None -maxfun=None -full_output=0 -disp=1 -retall=0 - -[initialize] -method=prior - -[updates] -iterations_per_update=11 -visualize_every_update=1 -model_results_every_update=1 -log_every_update=1 -remove_state_files_at_end=True - -[printing] -silence=False - -[prior_passer] -sigma=3.0 -use_errors=True -use_widths=True \ No newline at end of file diff --git a/test_autofit/config/non_linear/optimize/Drawer.ini b/test_autofit/config/non_linear/optimize/Drawer.ini deleted file mode 100644 index c43cb4f3e..000000000 --- a/test_autofit/config/non_linear/optimize/Drawer.ini +++ /dev/null @@ -1,22 +0,0 @@ -[search] -total_draws=10 - -[initialize] -method=prior -ball_lower_limit=0.49 -ball_upper_limit=0.51 - -[updates] -iterations_per_update=500 -visualize_every_update=1 -model_results_every_update=1 -log_every_update=1 -remove_state_files_at_end=True - -[printing] -silence=False - -[prior_passer] -sigma=3.0 -use_errors=True -use_widths=True \ No newline at end of file diff --git a/test_autofit/config/non_linear/optimize/LBFGS.ini b/test_autofit/config/non_linear/optimize/LBFGS.ini deleted file mode 100644 index 3856a7d48..000000000 --- a/test_autofit/config/non_linear/optimize/LBFGS.ini +++ /dev/null @@ -1,33 +0,0 @@ -[search] -tol=None - -[options] -maxcor=10 -ftol=2.220446049250313e-09 -eps=1e-08 -gtol=1e-05 -maxfun=15000 -maxiter=15000 -iprint=-1 -maxls=20 -disp=False - -[initialize] -method=prior -ball_lower_limit=0.49 -ball_upper_limit=0.51 - -[updates] -iterations_per_update=11 -visualize_every_update=1 -model_results_every_update=1 -log_every_update=1 -remove_state_files_at_end=True - -[printing] -silence=False - -[prior_passer] -sigma=3.0 -use_errors=True -use_widths=True \ No newline at end of file diff --git a/test_autofit/config/non_linear/optimize/PySwarmsGlobal.ini b/test_autofit/config/non_linear/optimize/PySwarmsGlobal.ini deleted file mode 100644 index 93b609229..000000000 --- a/test_autofit/config/non_linear/optimize/PySwarmsGlobal.ini +++ /dev/null @@ -1,32 +0,0 @@ -[search] -n_particles=50 -cognitive=0.1 -social=0.2 -inertia=0.3 -ftol=-inf - -[run] -iters=2000 - -[initialize] -method=prior -ball_lower_limit=0.49 -ball_upper_limit=0.51 - -[updates] -iterations_per_update=11 -visualize_every_update=1 -model_results_every_update=1 -log_every_update=1 -remove_state_files_at_end=True - -[printing] -silence=False - -[prior_passer] -sigma=3.0 -use_errors=True -use_widths=True - -[parallel] -number_of_cores=1 \ No newline at end of file diff --git a/test_autofit/config/non_linear/optimize/PySwarmsLocal.ini b/test_autofit/config/non_linear/optimize/PySwarmsLocal.ini deleted file mode 100644 index 25fb98f76..000000000 --- a/test_autofit/config/non_linear/optimize/PySwarmsLocal.ini +++ /dev/null @@ -1,34 +0,0 @@ -[search] -n_particles=50 -cognitive=0.1 -social=0.2 -inertia=0.3 -number_of_k_neighbors=3 -minkowski_p_norm=2 -ftol=-inf - -[run] -iters=2000 - -[initialize] -method=prior -ball_lower_limit=0.49 -ball_upper_limit=0.51 - -[updates] -iterations_per_update=11 -visualize_every_update=1 -model_results_every_update=1 -log_every_update=1 -remove_state_files_at_end=True - -[printing] -silence=False - -[prior_passer] -sigma=3.0 -use_errors=True -use_widths=True - -[parallel] -number_of_cores=1 \ No newline at end of file diff --git a/test_autofit/config/notation.yaml b/test_autofit/config/notation.yaml new file mode 100644 index 000000000..46ca66172 --- /dev/null +++ b/test_autofit/config/notation.yaml @@ -0,0 +1,41 @@ +label: + label: + contribution_factor: \omega0 + effective_radius: R_{\rm eff} + four: four_label + gamma: \gamma + normalization: I + one: one_label + one_tuple_0: one_tuple_0_label + one_tuple_1: one_tuple_1_label + param0: p0 + parameter: p + sersic_index: n + three: three_label + tup_x2_0: x + tup_x2_1: y + two: two_label + superscript: + class: c + galaxy: g + gaussian: g + mockchildtuplex2: l + mockclassrelativewidth: r + MockClassx2: o + MockClassx2FormatExp: o + MockClassx2Tuple: a + MockClassx4: a + MockComponents: a + MockParent: gp +label_format: + format: + mass: '{:.2f}' + one: '{:.2f}' + param0: '{:.2f}' + param00: '{:.2f}' + param000: '{:.2f}' + param11: '{:.4f}' + param12: '{:.2e}' + radius: '{:.2f}' + two: '{:.2f}' + two_exp: '{:.2e}' diff --git a/test_autofit/config/notation/label.ini b/test_autofit/config/notation/label.ini deleted file mode 100644 index 9da3146e1..000000000 --- a/test_autofit/config/notation/label.ini +++ /dev/null @@ -1,29 +0,0 @@ -[label] -parameter=p -param0=p0 -one=one_label -two=two_label -three=three_label -four=four_label -one_tuple_0=one_tuple_0_label -one_tuple_1=one_tuple_1_label -tup_x2_0=x -tup_x2_1=y -gamma=\gamma -contribution_factor=\omega0 -normalization=I -effective_radius=R_{\rm eff} -sersic_index=n - -[superscript] -MockClassx2=o -MockClassx2FormatExp=o -MockClassx4=a -MockClassx2Tuple=a -MockComponents=a -MockClassRelativeWidth=r -MockChildTuplex2=l -Gaussian=g -MockParent=gp -Galaxy=g -Class=c diff --git a/test_autofit/config/notation/label_format.ini b/test_autofit/config/notation/label_format.ini deleted file mode 100644 index 3a2c15ed5..000000000 --- a/test_autofit/config/notation/label_format.ini +++ /dev/null @@ -1,11 +0,0 @@ -[format] -param0={:.2f} -param00={:.2f} -param000={:.2f} -param11={:.4f} -param12={:.2e} -one={:.2f} -two={:.2f} -two_exp={:.2e} -radius={:.2f} -mass={:.2f} diff --git a/test_autofit/config/priors/mock_model.json b/test_autofit/config/priors/mock_model.json deleted file mode 100644 index 4fe69d806..000000000 --- a/test_autofit/config/priors/mock_model.json +++ /dev/null @@ -1,547 +0,0 @@ -{ - "MockClassx2": { - "one": { - "type": "Uniform", - "lower_limit": 0.0, - "upper_limit": 1.0, - "width_modifier": { - "type": "Absolute", - "value": 1.0 - }, - "gaussian_limits": { - "lower": 0.0, - "upper": 1.0 - } - }, - "two": { - "type": "Uniform", - "lower_limit": 0.0, - "upper_limit": 2.0, - "width_modifier": { - "type": "Absolute", - "value": 2.0 - }, - "gaussian_limits": { - "lower": 0.0, - "upper": 2.0 - } - } - }, - "MockClassx2FormatExp": { - "one": { - "type": "Uniform", - "lower_limit": 0.0, - "upper_limit": 1.0, - "width_modifier": { - "type": "Absolute", - "value": 1.0 - }, - "gaussian_limits": { - "lower": 0.0, - "upper": 1.0 - } - }, - "two_exp": { - "type": "Uniform", - "lower_limit": 0.0, - "upper_limit": 2.0, - "width_modifier": { - "type": "Absolute", - "value": 2.0 - }, - "gaussian_limits": { - "lower": 0.0, - "upper": 2.0 - } - } -}, - "MockClassx2NoSuperScript": { - "one": { - "type": "Uniform", - "lower_limit": 0.0, - "upper_limit": 1.0, - "width_modifier": { - "type": "Absolute", - "value": 1.0 - }, - "gaussian_limits": { - "lower": 0.0, - "upper": 1.0 - } - }, - "two": { - "type": "Uniform", - "lower_limit": 0.0, - "upper_limit": 2.0, - "width_modifier": { - "type": "Absolute", - "value": 2.0 - }, - "gaussian_limits": { - "lower": 0.0, - "upper": 2.0 - } - } - }, - "MockClassx2Tuple": { - "one_tuple_0": { - "type": "Uniform", - "lower_limit": 0.0, - "upper_limit": 1.0, - "width_modifier": { - "type": "Absolute", - "value": 0.2 - }, - "gaussian_limits": { - "lower": 0.0, - "upper": 1.0 - } - }, - "one_tuple_1": { - "type": "Uniform", - "lower_limit": 0.0, - "upper_limit": 2.0, - "width_modifier": { - "type": "Absolute", - "value": 0.2 - }, - "gaussian_limits": { - "lower": 0.0, - "upper": 2.0 - } - } - }, - "MockClassx3TupleFloat": { - "one_tuple_0": { - "type": "Uniform", - "lower_limit": 0.0, - "upper_limit": 1.0, - "width_modifier": { - "type": "Absolute", - "value": 0.2 - }, - "gaussian_limits": { - "lower": 0.0, - "upper": 1.0 - } - }, - "one_tuple_1": { - "type": "Uniform", - "lower_limit": 0.0, - "upper_limit": 2.0, - "width_modifier": { - "type": "Absolute", - "value": 0.2 - }, - "gaussian_limits": { - "lower": 0.0, - "upper": 2.0 - } - }, - "two": { - "type": "Uniform", - "lower_limit": 0.0, - "upper_limit": 2.0, - "width_modifier": { - "type": "Absolute", - "value": 0.2 - }, - "gaussian_limits": { - "lower": 0.0, - "upper": 2.0 - } - } - }, - "MockClassx4": { - "one": { - "type": "Uniform", - "lower_limit": -120.0, - "upper_limit": 120.0, - "width_modifier": { - "type": "Absolute", - "value": 1.0 - }, - "gaussian_limits": { - "lower": -120.0, - "upper": 120.0 - } - }, - "two": { - "type": "Uniform", - "lower_limit": -120.0, - "upper_limit": 120.0, - "width_modifier": { - "type": "Absolute", - "value": 2.0 - }, - "gaussian_limits": { - "lower": -120.0, - "upper": 120.0 - } - }, - "three": { - "type": "Uniform", - "lower_limit": -120.0, - "upper_limit": 120.0, - "width_modifier": { - "type": "Absolute", - "value": 2.0 - }, - "gaussian_limits": { - "lower": -120.0, - "upper": 120.0 - } - }, - "four": { - "type": "Uniform", - "lower_limit": -120.0, - "upper_limit": 120.0, - "width_modifier": { - "type": "Absolute", - "value": 2.0 - }, - "gaussian_limits": { - "lower": -120.0, - "upper": 120.0 - } - } - }, - "MockClassInf": { - "one": { - "type": "Uniform", - "lower_limit": "-inf", - "upper_limit": 0.0, - "width_modifier": { - "type": "Absolute", - "value": 0.2 - }, - "gaussian_limits": { - "lower": 0.0, - "upper": 1.0 - } - }, - "two": { - "type": "Uniform", - "lower_limit": 0.0, - "upper_limit": "inf", - "width_modifier": { - "type": "Absolute", - "value": 0.2 - }, - "gaussian_limits": { - "lower": 0.0, - "upper": 1.0 - } - } - }, - "MockPositionClass": { - "position_0": { - "type": "Uniform", - "lower_limit": 0.0, - "upper_limit": 1.0 - }, - "position_1": { - "type": "Uniform", - "lower_limit": 0.0, - "upper_limit": 1.0 - } - }, - "MockDistanceClass": { - "two": { - "type": "Uniform", - "lower_limit": 0.0, - "upper_limit": 1.0, - "width_modifier": { - "type": "Absolute", - "value": 0.2 - }, - "gaussian_limits": { - "lower": 0.0, - "upper": 1.0 - } - }, - "one": { - "type": "Uniform", - "lower_limit": 0.0, - "upper_limit": 1.0, - "width_modifier": { - "type": "Absolute", - "value": 0.2 - }, - "gaussian_limits": { - "lower": 0.0, - "upper": 1.0 - } - } - }, - "MockComponents": { - "parameter": { - "type": "Uniform", - "lower_limit": 0.0, - "upper_limit": 1.0, - "width_modifier": { - "type": "Absolute", - "value": 0.2 - }, - "gaussian_limits": { - "lower": 0.0, - "upper": 1.0 - } - } - }, - "MockClassRelativeWidth": { - "one": { - "type": "Uniform", - "lower_limit": 0.0, - "upper_limit": 1.0, - "width_modifier": { - "type": "Absolute", - "value": 0.1 - }, - "gaussian_limits": { - "lower": 0.0, - "upper": 1.0 - } - }, - "two": { - "type": "Uniform", - "lower_limit": 0.0, - "upper_limit": 1.0, - "width_modifier": { - "type": "Absolute", - "value": 0.5 - }, - "gaussian_limits": { - "lower": 0.0, - "upper": 1.0 - } - }, - "three": { - "type": "Uniform", - "lower_limit": 0.0, - "upper_limit": 1.0, - "width_modifier": { - "type": "Absolute", - "value": 1.0 - }, - "gaussian_limits": { - "lower": 0.0, - "upper": 1.0 - } - } - }, - "MockDeferredClass": { - "one": { - "type": "Uniform", - "lower_limit": 0.0, - "upper_limit": 1.0 - }, - "two": { - "type": "Deferred" - } - }, - "MockWithFloat": { - "value": { - "type": "Uniform", - "lower_limit": 0.0, - "upper_limit": 1.0, - "width_modifier": { - "type": "Absolute", - "value": 0.2 - }, - "gaussian_limits": { - "lower": 0.0, - "upper": 1.0 - } - } - }, - "MockWithTuple": { - "tup_0": { - "type": "Uniform", - "lower_limit": 0.0, - "upper_limit": 1.0, - "width_modifier": { - "type": "Absolute", - "value": 0.2 - }, - "gaussian_limits": { - "lower": 0.0, - "upper": 1.0 - } - }, - "tup_1": { - "type": "Uniform", - "lower_limit": 0.0, - "upper_limit": 1.0, - "width_modifier": { - "type": "Absolute", - "value": 0.2 - }, - "gaussian_limits": { - "lower": 0.0, - "upper": 1.0 - } - } - }, - "MockOverload": { - "one": { - "type": "Uniform", - "lower_limit": 0.0, - "upper_limit": 2.0 - }, - "two": { - "type": "Uniform", - "lower_limit": 0.0, - "upper_limit": 2.0 - } - }, - "MockChildTuple": { - "tup_0": { - "type": "Uniform", - "lower_limit": 0.0, - "upper_limit": 1.0, - "width_modifier": { - "type": "Absolute", - "value": 0.2 - }, - "gaussian_limits": { - "lower": 0.0, - "upper": 1.0 - } - }, - "tup_1": { - "type": "Uniform", - "lower_limit": 0.0, - "upper_limit": 1.0, - "width_modifier": { - "type": "Absolute", - "value": 0.2 - }, - "gaussian_limits": { - "lower": 0.0, - "upper": 1.0 - } - } - }, - "MockChildTuplex2": { - "tup_0": { - "type": "Uniform", - "lower_limit": 0.0, - "upper_limit": 1.0, - "width_modifier": { - "type": "Absolute", - "value": 0.2 - }, - "gaussian_limits": { - "lower": 0.0, - "upper": 1.0 - } - }, - "tup_1": { - "type": "Uniform", - "lower_limit": 0.0, - "upper_limit": 1.0, - "width_modifier": { - "type": "Absolute", - "value": 0.2 - }, - "gaussian_limits": { - "lower": 0.0, - "upper": 1.0 - } - }, - "one": { - "type": "Uniform", - "lower_limit": 0.0, - "upper_limit": 2.0, - "width_modifier": { - "type": "Absolute", - "value": 0.2 - }, - "gaussian_limits": { - "lower": 0.0, - "upper": 1.0 - } - }, - "two": { - "type": "Uniform", - "lower_limit": 0.0, - "upper_limit": 2.0, - "width_modifier": { - "type": "Absolute", - "value": 0.2 - }, - "gaussian_limits": { - "lower": 0.0, - "upper": 1.0 - } - } - }, - "MockChildTuplex3": { - "tup_0": { - "type": "Uniform", - "lower_limit": 0.0, - "upper_limit": 1.0, - "width_modifier": { - "type": "Absolute", - "value": 0.2 - }, - "gaussian_limits": { - "lower": 0.0, - "upper": 1.0 - } - }, - "tup_1": { - "type": "Uniform", - "lower_limit": 0.0, - "upper_limit": 1.0, - "width_modifier": { - "type": "Absolute", - "value": 0.2 - }, - "gaussian_limits": { - "lower": 0.0, - "upper": 1.0 - } - }, - "one": { - "type": "Uniform", - "lower_limit": 0.0, - "upper_limit": 2.0, - "width_modifier": { - "type": "Absolute", - "value": 0.2 - }, - "gaussian_limits": { - "lower": 0.0, - "upper": 1.0 - } - }, - "two": { - "type": "Uniform", - "lower_limit": 0.0, - "upper_limit": 2.0, - "width_modifier": { - "type": "Absolute", - "value": 0.2 - }, - "gaussian_limits": { - "lower": 0.0, - "upper": 1.0 - } - }, - "three": { - "type": "Uniform", - "lower_limit": 0.0, - "upper_limit": 2.0, - "width_modifier": { - "type": "Absolute", - "value": 0.2 - }, - "gaussian_limits": { - "lower": 0.0, - "upper": 1.0 - } - } - } -} \ No newline at end of file diff --git a/test_autofit/config/priors/mock_model.yaml b/test_autofit/config/priors/mock_model.yaml new file mode 100644 index 000000000..74cbc268a --- /dev/null +++ b/test_autofit/config/priors/mock_model.yaml @@ -0,0 +1,410 @@ +MockChildTuple: + tup_0: + gaussian_limits: + lower: 0.0 + upper: 1.0 + lower_limit: 0.0 + type: Uniform + upper_limit: 1.0 + width_modifier: + type: Absolute + value: 0.2 + tup_1: + gaussian_limits: + lower: 0.0 + upper: 1.0 + lower_limit: 0.0 + type: Uniform + upper_limit: 1.0 + width_modifier: + type: Absolute + value: 0.2 +MockChildTuplex2: + one: + gaussian_limits: + lower: 0.0 + upper: 1.0 + lower_limit: 0.0 + type: Uniform + upper_limit: 2.0 + width_modifier: + type: Absolute + value: 0.2 + tup_0: + gaussian_limits: + lower: 0.0 + upper: 1.0 + lower_limit: 0.0 + type: Uniform + upper_limit: 1.0 + width_modifier: + type: Absolute + value: 0.2 + tup_1: + gaussian_limits: + lower: 0.0 + upper: 1.0 + lower_limit: 0.0 + type: Uniform + upper_limit: 1.0 + width_modifier: + type: Absolute + value: 0.2 + two: + gaussian_limits: + lower: 0.0 + upper: 1.0 + lower_limit: 0.0 + type: Uniform + upper_limit: 2.0 + width_modifier: + type: Absolute + value: 0.2 +MockChildTuplex3: + one: + gaussian_limits: + lower: 0.0 + upper: 1.0 + lower_limit: 0.0 + type: Uniform + upper_limit: 2.0 + width_modifier: + type: Absolute + value: 0.2 + three: + gaussian_limits: + lower: 0.0 + upper: 1.0 + lower_limit: 0.0 + type: Uniform + upper_limit: 2.0 + width_modifier: + type: Absolute + value: 0.2 + tup_0: + gaussian_limits: + lower: 0.0 + upper: 1.0 + lower_limit: 0.0 + type: Uniform + upper_limit: 1.0 + width_modifier: + type: Absolute + value: 0.2 + tup_1: + gaussian_limits: + lower: 0.0 + upper: 1.0 + lower_limit: 0.0 + type: Uniform + upper_limit: 1.0 + width_modifier: + type: Absolute + value: 0.2 + two: + gaussian_limits: + lower: 0.0 + upper: 1.0 + lower_limit: 0.0 + type: Uniform + upper_limit: 2.0 + width_modifier: + type: Absolute + value: 0.2 +MockClassInf: + one: + gaussian_limits: + lower: 0.0 + upper: 1.0 + lower_limit: -inf + type: Uniform + upper_limit: 0.0 + width_modifier: + type: Absolute + value: 0.2 + two: + gaussian_limits: + lower: 0.0 + upper: 1.0 + lower_limit: 0.0 + type: Uniform + upper_limit: inf + width_modifier: + type: Absolute + value: 0.2 +MockClassRelativeWidth: + one: + gaussian_limits: + lower: 0.0 + upper: 1.0 + lower_limit: 0.0 + type: Uniform + upper_limit: 1.0 + width_modifier: + type: Absolute + value: 0.1 + three: + gaussian_limits: + lower: 0.0 + upper: 1.0 + lower_limit: 0.0 + type: Uniform + upper_limit: 1.0 + width_modifier: + type: Absolute + value: 1.0 + two: + gaussian_limits: + lower: 0.0 + upper: 1.0 + lower_limit: 0.0 + type: Uniform + upper_limit: 1.0 + width_modifier: + type: Absolute + value: 0.5 +MockClassx2: + one: + gaussian_limits: + lower: 0.0 + upper: 1.0 + lower_limit: 0.0 + type: Uniform + upper_limit: 1.0 + width_modifier: + type: Absolute + value: 1.0 + two: + gaussian_limits: + lower: 0.0 + upper: 2.0 + lower_limit: 0.0 + type: Uniform + upper_limit: 2.0 + width_modifier: + type: Absolute + value: 2.0 +MockClassx2FormatExp: + one: + gaussian_limits: + lower: 0.0 + upper: 1.0 + lower_limit: 0.0 + type: Uniform + upper_limit: 1.0 + width_modifier: + type: Absolute + value: 1.0 + two_exp: + gaussian_limits: + lower: 0.0 + upper: 2.0 + lower_limit: 0.0 + type: Uniform + upper_limit: 2.0 + width_modifier: + type: Absolute + value: 2.0 +MockClassx2NoSuperScript: + one: + gaussian_limits: + lower: 0.0 + upper: 1.0 + lower_limit: 0.0 + type: Uniform + upper_limit: 1.0 + width_modifier: + type: Absolute + value: 1.0 + two: + gaussian_limits: + lower: 0.0 + upper: 2.0 + lower_limit: 0.0 + type: Uniform + upper_limit: 2.0 + width_modifier: + type: Absolute + value: 2.0 +MockClassx2Tuple: + one_tuple_0: + gaussian_limits: + lower: 0.0 + upper: 1.0 + lower_limit: 0.0 + type: Uniform + upper_limit: 1.0 + width_modifier: + type: Absolute + value: 0.2 + one_tuple_1: + gaussian_limits: + lower: 0.0 + upper: 2.0 + lower_limit: 0.0 + type: Uniform + upper_limit: 2.0 + width_modifier: + type: Absolute + value: 0.2 +MockClassx3TupleFloat: + one_tuple_0: + gaussian_limits: + lower: 0.0 + upper: 1.0 + lower_limit: 0.0 + type: Uniform + upper_limit: 1.0 + width_modifier: + type: Absolute + value: 0.2 + one_tuple_1: + gaussian_limits: + lower: 0.0 + upper: 2.0 + lower_limit: 0.0 + type: Uniform + upper_limit: 2.0 + width_modifier: + type: Absolute + value: 0.2 + two: + gaussian_limits: + lower: 0.0 + upper: 2.0 + lower_limit: 0.0 + type: Uniform + upper_limit: 2.0 + width_modifier: + type: Absolute + value: 0.2 +MockClassx4: + four: + gaussian_limits: + lower: -120.0 + upper: 120.0 + lower_limit: -120.0 + type: Uniform + upper_limit: 120.0 + width_modifier: + type: Absolute + value: 2.0 + one: + gaussian_limits: + lower: -120.0 + upper: 120.0 + lower_limit: -120.0 + type: Uniform + upper_limit: 120.0 + width_modifier: + type: Absolute + value: 1.0 + three: + gaussian_limits: + lower: -120.0 + upper: 120.0 + lower_limit: -120.0 + type: Uniform + upper_limit: 120.0 + width_modifier: + type: Absolute + value: 2.0 + two: + gaussian_limits: + lower: -120.0 + upper: 120.0 + lower_limit: -120.0 + type: Uniform + upper_limit: 120.0 + width_modifier: + type: Absolute + value: 2.0 +MockComponents: + parameter: + gaussian_limits: + lower: 0.0 + upper: 1.0 + lower_limit: 0.0 + type: Uniform + upper_limit: 1.0 + width_modifier: + type: Absolute + value: 0.2 +MockDeferredClass: + one: + lower_limit: 0.0 + type: Uniform + upper_limit: 1.0 + two: + type: Deferred +MockDistanceClass: + one: + gaussian_limits: + lower: 0.0 + upper: 1.0 + lower_limit: 0.0 + type: Uniform + upper_limit: 1.0 + width_modifier: + type: Absolute + value: 0.2 + two: + gaussian_limits: + lower: 0.0 + upper: 1.0 + lower_limit: 0.0 + type: Uniform + upper_limit: 1.0 + width_modifier: + type: Absolute + value: 0.2 +MockOverload: + one: + lower_limit: 0.0 + type: Uniform + upper_limit: 2.0 + two: + lower_limit: 0.0 + type: Uniform + upper_limit: 2.0 +MockPositionClass: + position_0: + lower_limit: 0.0 + type: Uniform + upper_limit: 1.0 + position_1: + lower_limit: 0.0 + type: Uniform + upper_limit: 1.0 +MockWithFloat: + value: + gaussian_limits: + lower: 0.0 + upper: 1.0 + lower_limit: 0.0 + type: Uniform + upper_limit: 1.0 + width_modifier: + type: Absolute + value: 0.2 +MockWithTuple: + tup_0: + gaussian_limits: + lower: 0.0 + upper: 1.0 + lower_limit: 0.0 + type: Uniform + upper_limit: 1.0 + width_modifier: + type: Absolute + value: 0.2 + tup_1: + gaussian_limits: + lower: 0.0 + upper: 1.0 + lower_limit: 0.0 + type: Uniform + upper_limit: 1.0 + width_modifier: + type: Absolute + value: 0.2 diff --git a/test_autofit/config/priors/model.json b/test_autofit/config/priors/model.json deleted file mode 100644 index a271526a2..000000000 --- a/test_autofit/config/priors/model.json +++ /dev/null @@ -1,43 +0,0 @@ -{ - "Gaussian": { - "centre": { - "type": "Uniform", - "lower_limit": 0.0, - "upper_limit": 1.0, - "width_modifier": { - "type": "Absolute", - "value": 1.0 - }, - "gaussian_limits": { - "lower": 0.0, - "upper": 1.0 - } - }, - "normalization": { - "type": "Uniform", - "lower_limit": 0.0, - "upper_limit": 1.0, - "width_modifier": { - "type": "Absolute", - "value": 1.0 - }, - "gaussian_limits": { - "lower": 0.0, - "upper": 1.0 - } - }, - "sigma": { - "type": "Uniform", - "lower_limit": 0.0, - "upper_limit": 1.0, - "width_modifier": { - "type": "Absolute", - "value": 1.0 - }, - "gaussian_limits": { - "lower": 0.0, - "upper": 1.0 - } - } - } -} \ No newline at end of file diff --git a/test_autofit/config/priors/model.yaml b/test_autofit/config/priors/model.yaml new file mode 100644 index 000000000..5ca637328 --- /dev/null +++ b/test_autofit/config/priors/model.yaml @@ -0,0 +1,31 @@ +Gaussian: + centre: + gaussian_limits: + lower: 0.0 + upper: 1.0 + lower_limit: 0.0 + type: Uniform + upper_limit: 1.0 + width_modifier: + type: Absolute + value: 1.0 + normalization: + gaussian_limits: + lower: 0.0 + upper: 1.0 + lower_limit: 0.0 + type: Uniform + upper_limit: 1.0 + width_modifier: + type: Absolute + value: 1.0 + sigma: + gaussian_limits: + lower: 0.0 + upper: 1.0 + lower_limit: 0.0 + type: Uniform + upper_limit: 1.0 + width_modifier: + type: Absolute + value: 1.0 diff --git a/test_autofit/config/priors/prior.json b/test_autofit/config/priors/prior.json deleted file mode 100644 index 0f7dab2a6..000000000 --- a/test_autofit/config/priors/prior.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "gaussian.GaussianPrior": { - "lower_limit": { - "type": "Constant", - "value": "-inf" - }, - "upper_limit": { - "type": "Constant", - "value": "inf" - } - } -} \ No newline at end of file diff --git a/test_autofit/config/priors/prior.yaml b/test_autofit/config/priors/prior.yaml new file mode 100644 index 000000000..4cc72c4ae --- /dev/null +++ b/test_autofit/config/priors/prior.yaml @@ -0,0 +1,7 @@ +gaussian.GaussianPrior: + lower_limit: + type: Constant + value: -inf + upper_limit: + type: Constant + value: inf From 715730485b2e7e968f551ba42008c0a6b88be203 Mon Sep 17 00:00:00 2001 From: Jammy2211 Date: Tue, 29 Nov 2022 14:35:45 +0000 Subject: [PATCH 018/226] remove MOckSearchBC --- autofit/non_linear/abstract_search.py | 6 +- autofit/non_linear/mcmc/auto_correlations.py | 2 +- .../non_linear/nest/ultranest/ultranest.py | 2 +- autofit/non_linear/optimize/lbfgs/lbfgs.py | 2 +- test_autofit/config/non_linear.yaml | 20 +-- .../config/non_linear/GridSearch.yaml | 5 + test_autofit/config/non_linear/NLO.yaml | 0 test_autofit/config/non_linear/README.rst | 9 ++ test_autofit/config/non_linear/mcmc.yaml | 66 +++++++++ test_autofit/config/non_linear/mock.yaml | 44 ++++++ test_autofit/config/non_linear/nest.yaml | 108 +++++++++++++++ test_autofit/config/non_linear/optimize.yaml | 127 ++++++++++++++++++ 12 files changed, 366 insertions(+), 25 deletions(-) create mode 100644 test_autofit/config/non_linear/GridSearch.yaml create mode 100644 test_autofit/config/non_linear/NLO.yaml create mode 100644 test_autofit/config/non_linear/README.rst create mode 100644 test_autofit/config/non_linear/mcmc.yaml create mode 100644 test_autofit/config/non_linear/mock.yaml create mode 100644 test_autofit/config/non_linear/nest.yaml create mode 100644 test_autofit/config/non_linear/optimize.yaml diff --git a/autofit/non_linear/abstract_search.py b/autofit/non_linear/abstract_search.py index 21ca4fe7c..28510ce7c 100644 --- a/autofit/non_linear/abstract_search.py +++ b/autofit/non_linear/abstract_search.py @@ -591,7 +591,7 @@ def _class_config(self) -> Dict: @property def config_dict_search(self) -> Dict: - config_dict = copy.copy(self._class_config["search"]._dict) + config_dict = copy.copy(self._class_config["search"]) for key, value in config_dict.items(): try: @@ -604,7 +604,7 @@ def config_dict_search(self) -> Dict: @property def config_dict_run(self) -> Dict: - config_dict = copy.copy(self._class_config["run"]._dict) + config_dict = copy.copy(self._class_config["run"]) for key, value in config_dict.items(): try: @@ -624,7 +624,7 @@ def config_dict_run(self) -> Dict: @property def config_dict_settings(self) -> Dict: - return self._class_config["settings"]._dict + return self._class_config["settings"] @property def config_type(self): diff --git a/autofit/non_linear/mcmc/auto_correlations.py b/autofit/non_linear/mcmc/auto_correlations.py index 57cbbb575..3fb98dd4b 100644 --- a/autofit/non_linear/mcmc/auto_correlations.py +++ b/autofit/non_linear/mcmc/auto_correlations.py @@ -42,7 +42,7 @@ def __init__( def update_via_config(self, config): - config_dict = config._dict + config_dict = config self.check_for_convergence = self.check_for_convergence if self.check_for_convergence is not None else config_dict["check_for_convergence"] self.check_size = self.check_size or config_dict["check_size"] diff --git a/autofit/non_linear/nest/ultranest/ultranest.py b/autofit/non_linear/nest/ultranest/ultranest.py index baa0792cd..16e8953d4 100644 --- a/autofit/non_linear/nest/ultranest/ultranest.py +++ b/autofit/non_linear/nest/ultranest/ultranest.py @@ -106,7 +106,7 @@ def config_dict_with_test_mode_settings_from(self, config_dict): @property def config_dict_stepsampler(self): - config_dict = copy.copy(self.config_type[self.__class__.__name__]["stepsampler"]._dict) + config_dict = copy.copy(self.config_type[self.__class__.__name__]["stepsampler"]) for key, value in config_dict.items(): try: diff --git a/autofit/non_linear/optimize/lbfgs/lbfgs.py b/autofit/non_linear/optimize/lbfgs/lbfgs.py index d6923d2f5..6a62362b9 100644 --- a/autofit/non_linear/optimize/lbfgs/lbfgs.py +++ b/autofit/non_linear/optimize/lbfgs/lbfgs.py @@ -75,7 +75,7 @@ def __init__( @property def config_dict_options(self): - config_dict = copy.copy(self._class_config["options"]._dict) + config_dict = copy.copy(self._class_config["options"]) for key, value in config_dict.items(): try: diff --git a/test_autofit/config/non_linear.yaml b/test_autofit/config/non_linear.yaml index b663a8935..613f05e04 100644 --- a/test_autofit/config/non_linear.yaml +++ b/test_autofit/config/non_linear.yaml @@ -1,9 +1,4 @@ -GridSearch: - general: - number_of_cores: '3 # The number of cores the search is parallelized over by default, - using Python multiprocessing.' - step_size: '0.1 # The default step size of each grid search parameter, in - terms of unit values of the priors.' + mock: MockOptimizer: initialize: @@ -30,19 +25,6 @@ mock: updates: iterations_per_update: 2500 remove_state_files_at_end: true - MockSearchBC: - initialize: - method: prior - printing: - silence: false - prior_passer: - sigma: 3.0 - use_errors: true - use_widths: true - search: {} - updates: - iterations_per_update: 2500 - remove_state_files_at_end: true NLO: printing: silence: false diff --git a/test_autofit/config/non_linear/GridSearch.yaml b/test_autofit/config/non_linear/GridSearch.yaml new file mode 100644 index 000000000..eb03a4d07 --- /dev/null +++ b/test_autofit/config/non_linear/GridSearch.yaml @@ -0,0 +1,5 @@ +general: + number_of_cores: '3 # The number of cores the search is parallelized over by default, + using Python multiprocessing.' + step_size: '0.1 # The default step size of each grid search parameter, in + terms of unit values of the priors.' \ No newline at end of file diff --git a/test_autofit/config/non_linear/NLO.yaml b/test_autofit/config/non_linear/NLO.yaml new file mode 100644 index 000000000..e69de29bb diff --git a/test_autofit/config/non_linear/README.rst b/test_autofit/config/non_linear/README.rst new file mode 100644 index 000000000..11774c406 --- /dev/null +++ b/test_autofit/config/non_linear/README.rst @@ -0,0 +1,9 @@ +The ``non_linear`` folder contains configuration files which customize the default behaviour of non-linear searches in +**PyAutoLens**. + +Files +----- + +- ``mcmc.yaml``: Settings default behaviour of MCMC non-linear searches (e.g. Emcee). +- ``nest.yaml``: Settings default behaviour of nested sampler non-linear searches (e.g. Dynesty). +- ``optimizer.yaml``: Settings default behaviour of optimizer non-linear searches (e.g. PySwarms). \ No newline at end of file diff --git a/test_autofit/config/non_linear/mcmc.yaml b/test_autofit/config/non_linear/mcmc.yaml new file mode 100644 index 000000000..eb02f6f1b --- /dev/null +++ b/test_autofit/config/non_linear/mcmc.yaml @@ -0,0 +1,66 @@ +Emcee: + auto_correlations: + change_threshold: 0.01 + check_for_convergence: true + check_size: 100 + required_length: 50 + initialize: + ball_lower_limit: 0.49 + ball_upper_limit: 0.51 + method: prior + parallel: + number_of_cores: 1 + printing: + silence: false + prior_passer: + sigma: 3.0 + use_errors: true + use_widths: true + run: + nsteps: 2000 + search: + nwalkers: 50 + updates: + iterations_per_update: 2500 + log_every_update: 1 + model_results_every_update: 1 + remove_state_files_at_end: true + visualize_every_update: 1 +Zeus: + auto_correlations: + change_threshold: 0.01 + check_for_convergence: true + check_size: 100 + required_length: 50 + initialize: + ball_lower_limit: 0.49 + ball_upper_limit: 0.51 + method: prior + parallel: + number_of_cores: 1 + printing: + silence: false + prior_passer: + sigma: 3.0 + use_errors: true + use_widths: true + run: + check_walkers: true + light_mode: false + maxiter: 10000 + maxsteps: 10000 + mu: 1.0 + nsteps: 2000 + patience: 5 + shuffle_ensemble: true + tolerance: 0.05 + tune: true + vectorize: false + search: + nwalkers: 50 + updates: + iterations_per_update: 500 + log_every_update: 1 + model_results_every_update: 1 + remove_state_files_at_end: true + visualize_every_update: 1 \ No newline at end of file diff --git a/test_autofit/config/non_linear/mock.yaml b/test_autofit/config/non_linear/mock.yaml new file mode 100644 index 000000000..483842a26 --- /dev/null +++ b/test_autofit/config/non_linear/mock.yaml @@ -0,0 +1,44 @@ +MockOptimizer: + initialize: + method: prior + printing: + silence: false + prior_passer: + sigma: 3.0 + use_errors: true + use_widths: true + updates: + iterations_per_update: 2500 + log_every_update: 1 + model_results_every_update: 1 + remove_state_files_at_end: true + visualize_every_update: 1 +MockSearch: + initialize: + method: prior + printing: + silence: false + prior_passer: + sigma: 3.0 + use_errors: true + use_widths: true + search: {} + updates: + iterations_per_update: 2500 + log_every_update: 1 + model_results_every_update: 1 + remove_state_files_at_end: true + visualize_every_update: 1 +NLO: + printing: + silence: false + prior_passer: + sigma: 3.0 + use_errors: true + use_widths: true + updates: + iterations_per_update: 2500 + log_every_update: 1 + model_results_every_update: 1 + remove_state_files_at_end: true + visualize_every_update: 1 \ No newline at end of file diff --git a/test_autofit/config/non_linear/nest.yaml b/test_autofit/config/non_linear/nest.yaml new file mode 100644 index 000000000..33f44a2a5 --- /dev/null +++ b/test_autofit/config/non_linear/nest.yaml @@ -0,0 +1,108 @@ +DynestyDynamic: + initialize: + method: prior + parallel: + number_of_cores: 4 + printing: + silence: false + prior_passer: + sigma: 3.0 + use_errors: true + use_widths: true + run: + dlogz_init: 0.01 + logl_max_init: .inf + maxcall: null + maxcall_init: null + maxiter: null + maxiter_init: null + n_effective_init: .inf + nlive_init: 5 + search: + bootstrap: 1 + bound: balls + enlarge: 2 + facc: 0.6 + fmove: 0.8 + logl_max: .inf + max_move: 101 + sample: rwalk + slices: 6 + update_interval: 2.0 + walks: 26 + updates: + iterations_per_update: 501 + log_every_update: 1 + model_results_every_update: 1 + remove_state_files_at_end: true + visualize_every_update: 1 +DynestyStatic: + initialize: + method: prior + parallel: + force_x1_cpu: false + number_of_cores: 1 + printing: + silence: false + prior_passer: + sigma: 3.0 + use_errors: true + use_widths: true + run: + dlogz: null + logl_max: .inf + maxcall: null + maxiter: null + n_effective: 0 + search: + bootstrap: null + bound: multi + enlarge: null + facc: 0.5 + first_update: null + fmove: 0.9 + max_move: 100 + nlive: 150 + sample: auto + slices: 5 + update_interval: null + walks: 5 + updates: + iterations_per_update: 500 + log_every_update: 1 + model_results_every_update: 1 + remove_state_files_at_end: true + visualize_every_update: 1 +MultiNest: + printing: + silence: false + prior_passer: + sigma: 3.0 + use_errors: true + use_widths: true + search: + const_efficiency_mode: false + context: 0 + evidence_tolerance: 0.5 + importance_nested_sampling: true + init_mpi: false + log_zero: -1.0e+100 + max_iter: 0 + max_modes: 100 + mode_tolerance: -1.0e+89 + multimodal: true + n_iter_before_update: 100 + n_live_points: 50 + null_log_evidence: -1.0e+90 + resume: true + sampling_efficiency: 0.6 + seed: -1.0 + verbose: false + write_output: true + updates: + iterations_per_update: 2500 + log_every_update: 1 + model_results_every_update: 1 + remove_state_files_at_end: true + should_update_sym: 250 + visualize_every_update: 1 \ No newline at end of file diff --git a/test_autofit/config/non_linear/optimize.yaml b/test_autofit/config/non_linear/optimize.yaml new file mode 100644 index 000000000..7a2201180 --- /dev/null +++ b/test_autofit/config/non_linear/optimize.yaml @@ -0,0 +1,127 @@ +DownhillSimplex: + initialize: + method: prior + printing: + silence: false + prior_passer: + sigma: 3.0 + use_errors: true + use_widths: true + search: + disp: 1 + ftol: 0.0001 + full_output: 0 + maxfun: null + maxiter: null + retall: 0 + xtol: 0.0001 + updates: + iterations_per_update: 11 + log_every_update: 1 + model_results_every_update: 1 + remove_state_files_at_end: true + visualize_every_update: 1 +Drawer: + initialize: + ball_lower_limit: 0.49 + ball_upper_limit: 0.51 + method: prior + printing: + silence: false + prior_passer: + sigma: 3.0 + use_errors: true + use_widths: true + search: + total_draws: 10 + updates: + iterations_per_update: 500 + log_every_update: 1 + model_results_every_update: 1 + remove_state_files_at_end: true + visualize_every_update: 1 +LBFGS: + initialize: + ball_lower_limit: 0.49 + ball_upper_limit: 0.51 + method: prior + options: + disp: false + eps: 1.0e-08 + ftol: 2.220446049250313e-09 + gtol: 1.0e-05 + iprint: -1.0 + maxcor: 10 + maxfun: 15000 + maxiter: 15000 + maxls: 20 + printing: + silence: false + prior_passer: + sigma: 3.0 + use_errors: true + use_widths: true + search: + tol: null + updates: + iterations_per_update: 11 + log_every_update: 1 + model_results_every_update: 1 + remove_state_files_at_end: true + visualize_every_update: 1 +PySwarmsGlobal: + initialize: + ball_lower_limit: 0.49 + ball_upper_limit: 0.51 + method: prior + parallel: + number_of_cores: 1 + printing: + silence: false + prior_passer: + sigma: 3.0 + use_errors: true + use_widths: true + run: + iters: 2000 + search: + cognitive: 0.1 + ftol: -.inf + inertia: 0.3 + n_particles: 50 + social: 0.2 + updates: + iterations_per_update: 11 + log_every_update: 1 + model_results_every_update: 1 + remove_state_files_at_end: true + visualize_every_update: 1 +PySwarmsLocal: + initialize: + ball_lower_limit: 0.49 + ball_upper_limit: 0.51 + method: prior + parallel: + number_of_cores: 1 + printing: + silence: false + prior_passer: + sigma: 3.0 + use_errors: true + use_widths: true + run: + iters: 2000 + search: + cognitive: 0.1 + ftol: -.inf + inertia: 0.3 + minkowski_p_norm: 2 + n_particles: 50 + number_of_k_neighbors: 3 + social: 0.2 + updates: + iterations_per_update: 11 + log_every_update: 1 + model_results_every_update: 1 + remove_state_files_at_end: true + visualize_every_update: 1 From 83321e64b719b9419d2c40a54b10952aa9cb18d1 Mon Sep 17 00:00:00 2001 From: Jammy2211 Date: Tue, 29 Nov 2022 15:02:28 +0000 Subject: [PATCH 019/226] disable test visualization --- test_autofit/config/non_linear.yaml | 11 ---- test_autofit/config/non_linear/NLO.yaml | 0 test_autofit/config/non_linear/mock.yaml | 76 ++++++++++-------------- test_autofit/config/notation.yaml | 10 ++-- test_autofit/config/visualize.yaml | 36 +++++------ 5 files changed, 55 insertions(+), 78 deletions(-) delete mode 100644 test_autofit/config/non_linear/NLO.yaml diff --git a/test_autofit/config/non_linear.yaml b/test_autofit/config/non_linear.yaml index 613f05e04..ef8bb77c0 100644 --- a/test_autofit/config/non_linear.yaml +++ b/test_autofit/config/non_linear.yaml @@ -1,4 +1,3 @@ - mock: MockOptimizer: initialize: @@ -25,16 +24,6 @@ mock: updates: iterations_per_update: 2500 remove_state_files_at_end: true - NLO: - printing: - silence: false - prior_passer: - sigma: 3.0 - use_errors: true - use_widths: true - updates: - iterations_per_update: 2500 - remove_state_files_at_end: true mcmc: Emcee: auto_correlations: diff --git a/test_autofit/config/non_linear/NLO.yaml b/test_autofit/config/non_linear/NLO.yaml deleted file mode 100644 index e69de29bb..000000000 diff --git a/test_autofit/config/non_linear/mock.yaml b/test_autofit/config/non_linear/mock.yaml index 483842a26..fbd7f0f6c 100644 --- a/test_autofit/config/non_linear/mock.yaml +++ b/test_autofit/config/non_linear/mock.yaml @@ -1,44 +1,32 @@ -MockOptimizer: - initialize: - method: prior - printing: - silence: false - prior_passer: - sigma: 3.0 - use_errors: true - use_widths: true - updates: - iterations_per_update: 2500 - log_every_update: 1 - model_results_every_update: 1 - remove_state_files_at_end: true - visualize_every_update: 1 -MockSearch: - initialize: - method: prior - printing: - silence: false - prior_passer: - sigma: 3.0 - use_errors: true - use_widths: true - search: {} - updates: - iterations_per_update: 2500 - log_every_update: 1 - model_results_every_update: 1 - remove_state_files_at_end: true - visualize_every_update: 1 -NLO: - printing: - silence: false - prior_passer: - sigma: 3.0 - use_errors: true - use_widths: true - updates: - iterations_per_update: 2500 - log_every_update: 1 - model_results_every_update: 1 - remove_state_files_at_end: true - visualize_every_update: 1 \ No newline at end of file +mock_search: + MockOptimizer: + initialize: + method: prior + printing: + silence: false + prior_passer: + sigma: 3.0 + use_errors: true + use_widths: true + updates: + iterations_per_update: 2500 + log_every_update: 1 + model_results_every_update: 1 + remove_state_files_at_end: true + visualize_every_update: 1 + MockSearch: + initialize: + method: prior + printing: + silence: false + prior_passer: + sigma: 3.0 + use_errors: true + use_widths: true + search: {} + updates: + iterations_per_update: 2500 + log_every_update: 1 + model_results_every_update: 1 + remove_state_files_at_end: true + visualize_every_update: 1 \ No newline at end of file diff --git a/test_autofit/config/notation.yaml b/test_autofit/config/notation.yaml index 46ca66172..195a2a97c 100644 --- a/test_autofit/config/notation.yaml +++ b/test_autofit/config/notation.yaml @@ -16,11 +16,11 @@ label: tup_x2_1: y two: two_label superscript: - class: c - galaxy: g - gaussian: g - mockchildtuplex2: l - mockclassrelativewidth: r + Class: c + Galaxy: g + Gaussian: g + MockChildTuplex2: l + MockClassRelativeWidth: r MockClassx2: o MockClassx2FormatExp: o MockClassx2Tuple: a diff --git a/test_autofit/config/visualize.yaml b/test_autofit/config/visualize.yaml index 1d792e4b0..17ccf5964 100644 --- a/test_autofit/config/visualize.yaml +++ b/test_autofit/config/visualize.yaml @@ -3,26 +3,26 @@ general: backend: default # The matploblib backend used for visualization. `default` uses the system default, can specifiy specific backend (e.g. TKAgg, Qt5Agg, WXAgg). plots_search: dynesty: - cornerplot: true + cornerplot: false cornerpoints: false # Whether to output the Dynesty cornerplot figure during a non-linear search fit. - runplot: true # Whether to output the Dynesty runplot figure during a non-linear search fit. - traceplot: true # Whether to output the Dynesty traceplot figure during a non-linear search fit. + runplot: false # Whether to output the Dynesty runplot figure during a non-linear search fit. + traceplot: false # Whether to output the Dynesty traceplot figure during a non-linear search fit. emcee: - corner: true # Whether to output the Emcee corner figure during a non-linear search fit. - likelihood_series: true # Whether to output the Emcee likelihood series figure during a non-linear search fit. - time_series: true # Whether to output the Emcee time series figure during a non-linear search fit. - trajectories: true # Whether to output the Emcee trajectories figure during a non-linear search fit. + corner: false # Whether to output the Emcee corner figure during a non-linear search fit. + likelihood_series: false # Whether to output the Emcee likelihood series figure during a non-linear search fit. + time_series: false # Whether to output the Emcee time series figure during a non-linear search fit. + trajectories: false # Whether to output the Emcee trajectories figure during a non-linear search fit. pyswarms: - contour: true # Whether to output the PySwarms contour figure during a non-linear search fit. - cost_history: true # Whether to output the PySwarms cost_history figure during a non-linear search fit. - time_series: true # Whether to output the PySwarms time_series figure during a non-linear search fit. - trajectories: true # Whether to output the PySwarms trajectories figure during a non-linear search fit. + contour: false # Whether to output the PySwarms contour figure during a non-linear search fit. + cost_history: false # Whether to output the PySwarms cost_history figure during a non-linear search fit. + time_series: false # Whether to output the PySwarms time_series figure during a non-linear search fit. + trajectories: false # Whether to output the PySwarms trajectories figure during a non-linear search fit. ultranest: - cornerplot: true # Whether to output the Ultranest cornerplot figure during a non-linear search fit. - runplot: true # Whether to output the Ultranest runplot figure during a non-linear search fit. - traceplot: true # Whether to output the Ultranest traceplot figure during a non-linear search fit. + cornerplot: false # Whether to output the Ultranest cornerplot figure during a non-linear search fit. + runplot: false # Whether to output the Ultranest runplot figure during a non-linear search fit. + traceplot: false # Whether to output the Ultranest traceplot figure during a non-linear search fit. zeus: - corner: true # Whether to output the Zeus corner figure during a non-linear search fit. - likelihood_series: true # Whether to output the Zeus likelihood series figure during a non-linear search fit. - time_series: true # Whether to output the Zeus time series figure during a non-linear search fit. - trajectories: true # Whether to output the Zeus trajectories figure during a non-linear search fit. + corner: false # Whether to output the Zeus corner figure during a non-linear search fit. + likelihood_series: false # Whether to output the Zeus likelihood series figure during a non-linear search fit. + time_series: false # Whether to output the Zeus time series figure during a non-linear search fit. + trajectories: false # Whether to output the Zeus trajectories figure during a non-linear search fit. From 982499ac2149b3fda66654afcf6e309e79b4e5e3 Mon Sep 17 00:00:00 2001 From: Jammy2211 Date: Tue, 29 Nov 2022 15:07:29 +0000 Subject: [PATCH 020/226] update config to fix unit tests --- test_autofit/config/general.yaml | 2 +- test_autofit/database/identifier/test_converter.py | 1 - 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/test_autofit/config/general.yaml b/test_autofit/config/general.yaml index 34d83f5ce..b547327a4 100644 --- a/test_autofit/config/general.yaml +++ b/test_autofit/config/general.yaml @@ -13,7 +13,7 @@ output: log_to_file: false # If True, outputs the non-linear search log to a file (and not printed to screen). log_file: output.log # The name of the file the logged output is written to (in the non-linear search output folder) model_results_decimal_places: 3 # Number of decimal places estimated parameter values / errors are output in model.results. - remove_files: false # If True, all output files of a non-linear search (e.g. samples, visualization, etc.) are deleted once the model-fit has completed, such that only the .zip file remains. + remove_files: true # If True, all output files of a non-linear search (e.g. samples, visualization, etc.) are deleted once the model-fit has completed, such that only the .zip file remains. samples_to_csv: false parallel: warn_environment_variables: false # If True, a warning is displayed when the search's number of CPU > 1 and enviromment variables related to threading are also > 1. diff --git a/test_autofit/database/identifier/test_converter.py b/test_autofit/database/identifier/test_converter.py index 3e0bf6734..eeed4759f 100644 --- a/test_autofit/database/identifier/test_converter.py +++ b/test_autofit/database/identifier/test_converter.py @@ -6,7 +6,6 @@ import autofit as af from autoconf.conf import output_path_for_test -from autofit import Gaussian from autofit import conf from autofit.database import Fit from autofit.tools.update_identifiers import ( From 229fca7d8e05b193708c5e9f6a0802f6f3069642 Mon Sep 17 00:00:00 2001 From: Jammy2211 Date: Tue, 29 Nov 2022 17:56:48 +0000 Subject: [PATCH 021/226] short docs --- autofit/config/visualize.yaml | 36 +++++++++++++++++------------------ 1 file changed, 18 insertions(+), 18 deletions(-) diff --git a/autofit/config/visualize.yaml b/autofit/config/visualize.yaml index 1d792e4b0..69f33d0b5 100644 --- a/autofit/config/visualize.yaml +++ b/autofit/config/visualize.yaml @@ -4,25 +4,25 @@ general: plots_search: dynesty: cornerplot: true - cornerpoints: false # Whether to output the Dynesty cornerplot figure during a non-linear search fit. - runplot: true # Whether to output the Dynesty runplot figure during a non-linear search fit. - traceplot: true # Whether to output the Dynesty traceplot figure during a non-linear search fit. + cornerpoints: false # Output Dynesty cornerplot figure during a non-linear search fit? + runplot: true # Output Dynesty runplot figure during a non-linear search fit? + traceplot: true # Output Dynesty traceplot figure during a non-linear search fit? emcee: - corner: true # Whether to output the Emcee corner figure during a non-linear search fit. - likelihood_series: true # Whether to output the Emcee likelihood series figure during a non-linear search fit. - time_series: true # Whether to output the Emcee time series figure during a non-linear search fit. - trajectories: true # Whether to output the Emcee trajectories figure during a non-linear search fit. + corner: true # Output Emcee corner figure during a non-linear search fit? + likelihood_series: true # Output Emcee likelihood series figure during a non-linear search fit? + time_series: true # Output Emcee time series figure during a non-linear search fit? + trajectories: true # Output Emcee trajectories figure during a non-linear search fit? pyswarms: - contour: true # Whether to output the PySwarms contour figure during a non-linear search fit. - cost_history: true # Whether to output the PySwarms cost_history figure during a non-linear search fit. - time_series: true # Whether to output the PySwarms time_series figure during a non-linear search fit. - trajectories: true # Whether to output the PySwarms trajectories figure during a non-linear search fit. + contour: true # Output PySwarms contour figure during a non-linear search fit? + cost_history: true # Output PySwarms cost_history figure during a non-linear search fit? + time_series: true # Output PySwarms time_series figure during a non-linear search fit? + trajectories: true # Output PySwarms trajectories figure during a non-linear search fit? ultranest: - cornerplot: true # Whether to output the Ultranest cornerplot figure during a non-linear search fit. - runplot: true # Whether to output the Ultranest runplot figure during a non-linear search fit. - traceplot: true # Whether to output the Ultranest traceplot figure during a non-linear search fit. + cornerplot: true # Output Ultranest cornerplot figure during a non-linear search fit? + runplot: true # Output Ultranest runplot figure during a non-linear search fit? + traceplot: true # Output Ultranest traceplot figure during a non-linear search fit? zeus: - corner: true # Whether to output the Zeus corner figure during a non-linear search fit. - likelihood_series: true # Whether to output the Zeus likelihood series figure during a non-linear search fit. - time_series: true # Whether to output the Zeus time series figure during a non-linear search fit. - trajectories: true # Whether to output the Zeus trajectories figure during a non-linear search fit. + corner: true # Output Zeus corner figure during a non-linear search fit? + likelihood_series: true # Output Zeus likelihood series figure during a non-linear search fit? + time_series: true # Output Zeus time series figure during a non-linear search fit? + trajectories: true # Output Zeus trajectories figure during a non-linear search fit? From 5ebb6f2241da2568a03654aec876e0f4d094f04b Mon Sep 17 00:00:00 2001 From: Jammy2211 Date: Tue, 29 Nov 2022 18:36:11 +0000 Subject: [PATCH 022/226] moved checkpreloads option to autogalaxy --- autofit/config/general.yaml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/autofit/config/general.yaml b/autofit/config/general.yaml index e590d5091..d50164de8 100644 --- a/autofit/config/general.yaml +++ b/autofit/config/general.yaml @@ -14,7 +14,7 @@ output: log_file: output.log # The name of the file the logged output is written to (in the non-linear search output folder) model_results_decimal_places: 3 # Number of decimal places estimated parameter values / errors are output in model.results. remove_files: false # If True, all output files of a non-linear search (e.g. samples, visualization, etc.) are deleted once the model-fit has completed, such that only the .zip file remains. - samples_to_csv: false + samples_to_csv: true # If True, non-linear search samples are written to a .csv file. parallel: warn_environment_variables: true # If True, a warning is displayed when the search's number of CPU > 1 and enviromment variables related to threading are also > 1. profiling: @@ -22,6 +22,5 @@ profiling: should_profile: false # If True, the ``profile_log_likelihood_function()`` function of an analysis class is called throughout a model-fit, profiling run times. repeats: 1 # The number of repeat function calls used to measure run-times when profiling. test: - check_preloads: false # If True, perform a sanity check that the likelihood using preloads is identical to the likelihood not using preloads. exception_override: false parallel_profile: false From 0b267173b3d4342493cafe48ec23027ba0cf9f7d Mon Sep 17 00:00:00 2001 From: Jammy2211 Date: Wed, 30 Nov 2022 11:48:56 +0000 Subject: [PATCH 023/226] restructure visualize config --- autofit/config/README.rst | 2 +- autofit/config/notation.yaml | 4 ++-- autofit/config/visualize.yaml | 28 ---------------------- autofit/config/visualize/README.rst | 7 ++++++ autofit/config/visualize/general.yaml | 2 ++ autofit/config/visualize/plots_search.yaml | 24 +++++++++++++++++++ 6 files changed, 36 insertions(+), 31 deletions(-) delete mode 100644 autofit/config/visualize.yaml create mode 100644 autofit/config/visualize/README.rst create mode 100644 autofit/config/visualize/general.yaml create mode 100644 autofit/config/visualize/plots_search.yaml diff --git a/autofit/config/README.rst b/autofit/config/README.rst index 0264f9953..7151fb9c0 100644 --- a/autofit/config/README.rst +++ b/autofit/config/README.rst @@ -4,6 +4,7 @@ Folders ------- - ``priors``: Configs defining default priors assumed on every lens model component and set of parameters. +- ``visualize``: Configs defining what images are output by a lens model fit. Files ----- @@ -11,5 +12,4 @@ Files - ``general.yaml``: Customizes general **PyAutoLens** settings. - ``non-linear.yaml``: Configs for default non-linear search (e.g. MCMC, nested sampling) settings. - ``logging.yaml``: Customizes the logging behaviour of **PyAutoLens**. -- ``visualize.yaml``: Configs defining what images are output by a lens model fit. - ``notation.yaml``: Configs defining labels and formatting of model parameters when used for visualization. diff --git a/autofit/config/notation.yaml b/autofit/config/notation.yaml index e42a62b70..ce9328103 100644 --- a/autofit/config/notation.yaml +++ b/autofit/config/notation.yaml @@ -2,11 +2,11 @@ # visualizing results (for example labeling the axis of the PDF triangle plots output by a non-linear search). -# label: The label given to the `centre` parameter of `Gaussian` model-components. This is used -# in plots like PDF corner plots. +# label: The label given to the each parameter, for plots like PDF corner plots. # For example, if `centre=x`, the plot axis will be labeled 'x'. + # superscript: the superscript used on certain plots that show the results of different model-components. # For example, if `Gaussian=g`, plots where the parameters of the Gaussian model-component have superscript `g`. diff --git a/autofit/config/visualize.yaml b/autofit/config/visualize.yaml deleted file mode 100644 index 69f33d0b5..000000000 --- a/autofit/config/visualize.yaml +++ /dev/null @@ -1,28 +0,0 @@ -general: - general: - backend: default # The matploblib backend used for visualization. `default` uses the system default, can specifiy specific backend (e.g. TKAgg, Qt5Agg, WXAgg). -plots_search: - dynesty: - cornerplot: true - cornerpoints: false # Output Dynesty cornerplot figure during a non-linear search fit? - runplot: true # Output Dynesty runplot figure during a non-linear search fit? - traceplot: true # Output Dynesty traceplot figure during a non-linear search fit? - emcee: - corner: true # Output Emcee corner figure during a non-linear search fit? - likelihood_series: true # Output Emcee likelihood series figure during a non-linear search fit? - time_series: true # Output Emcee time series figure during a non-linear search fit? - trajectories: true # Output Emcee trajectories figure during a non-linear search fit? - pyswarms: - contour: true # Output PySwarms contour figure during a non-linear search fit? - cost_history: true # Output PySwarms cost_history figure during a non-linear search fit? - time_series: true # Output PySwarms time_series figure during a non-linear search fit? - trajectories: true # Output PySwarms trajectories figure during a non-linear search fit? - ultranest: - cornerplot: true # Output Ultranest cornerplot figure during a non-linear search fit? - runplot: true # Output Ultranest runplot figure during a non-linear search fit? - traceplot: true # Output Ultranest traceplot figure during a non-linear search fit? - zeus: - corner: true # Output Zeus corner figure during a non-linear search fit? - likelihood_series: true # Output Zeus likelihood series figure during a non-linear search fit? - time_series: true # Output Zeus time series figure during a non-linear search fit? - trajectories: true # Output Zeus trajectories figure during a non-linear search fit? diff --git a/autofit/config/visualize/README.rst b/autofit/config/visualize/README.rst new file mode 100644 index 000000000..200fa3c53 --- /dev/null +++ b/autofit/config/visualize/README.rst @@ -0,0 +1,7 @@ +The ``config`` folder contains configuration files which customize default **PyAutoLens**. + +Files +----- + +- ``general.yaml``: Customizes general visualization settings (e.g. the matplotlib backend). +- ``plots_search.yaml``: Customize which non-linear search figures are output during a model-fit. \ No newline at end of file diff --git a/autofit/config/visualize/general.yaml b/autofit/config/visualize/general.yaml new file mode 100644 index 000000000..f23eed209 --- /dev/null +++ b/autofit/config/visualize/general.yaml @@ -0,0 +1,2 @@ +general: + backend: default # The matploblib backend used for visualization. `default` uses the system default, can specifiy specific backend (e.g. TKAgg, Qt5Agg, WXAgg). \ No newline at end of file diff --git a/autofit/config/visualize/plots_search.yaml b/autofit/config/visualize/plots_search.yaml new file mode 100644 index 000000000..a536bb7b0 --- /dev/null +++ b/autofit/config/visualize/plots_search.yaml @@ -0,0 +1,24 @@ +dynesty: + cornerplot: true + cornerpoints: false # Output Dynesty cornerplot figure during a non-linear search fit? + runplot: true # Output Dynesty runplot figure during a non-linear search fit? + traceplot: true # Output Dynesty traceplot figure during a non-linear search fit? +emcee: + corner: true # Output Emcee corner figure during a non-linear search fit? + likelihood_series: true # Output Emcee likelihood series figure during a non-linear search fit? + time_series: true # Output Emcee time series figure during a non-linear search fit? + trajectories: true # Output Emcee trajectories figure during a non-linear search fit? +pyswarms: + contour: true # Output PySwarms contour figure during a non-linear search fit? + cost_history: true # Output PySwarms cost_history figure during a non-linear search fit? + time_series: true # Output PySwarms time_series figure during a non-linear search fit? + trajectories: true # Output PySwarms trajectories figure during a non-linear search fit? +ultranest: + cornerplot: true # Output Ultranest cornerplot figure during a non-linear search fit? + runplot: true # Output Ultranest runplot figure during a non-linear search fit? + traceplot: true # Output Ultranest traceplot figure during a non-linear search fit? +zeus: + corner: true # Output Zeus corner figure during a non-linear search fit? + likelihood_series: true # Output Zeus likelihood series figure during a non-linear search fit? + time_series: true # Output Zeus time series figure during a non-linear search fit? + trajectories: true # Output Zeus trajectories figure during a non-linear search fit? \ No newline at end of file From 31267f6b8d5b192e89e0c3a68f62dd2e9c40ca8f Mon Sep 17 00:00:00 2001 From: Richard Date: Fri, 2 Dec 2022 08:44:56 +0000 Subject: [PATCH 024/226] removing identifier version... --- autofit/config/general.yaml | 1 - autofit/mapper/identifier.py | 174 ++++++++--------------------------- 2 files changed, 38 insertions(+), 137 deletions(-) diff --git a/autofit/config/general.yaml b/autofit/config/general.yaml index d50164de8..a217d5189 100644 --- a/autofit/config/general.yaml +++ b/autofit/config/general.yaml @@ -7,7 +7,6 @@ model: ignore_prior_limits: false # If ``True`` the limits applied to priors will be ignored, where limits set upper / lower limits. This stops PriorLimitException's from being raised. output: force_pickle_overwrite: false # If True pickle files output by a search (e.g. samples.pickle) are recreated when a new model-fit is performed. - identifier_version: 4 info_whitespace_length: 80 # Length of whitespace between the parameter names and values in the model.info / result.info log_level: INFO # The level of information output by logging. log_to_file: false # If True, outputs the non-linear search log to a file (and not printed to screen). diff --git a/autofit/mapper/identifier.py b/autofit/mapper/identifier.py index e2dc441c9..fc1a78817 100644 --- a/autofit/mapper/identifier.py +++ b/autofit/mapper/identifier.py @@ -3,11 +3,10 @@ from hashlib import md5 from typing import Optional -from autoconf import conf -# floats are rounded to this increment so floating point errors -# have no impact on identifier value from autoconf.class_path import get_class_path +# floats are rounded to this increment so floating point errors +# have no impact on identifier value RESOLUTION = 1e-8 @@ -19,11 +18,7 @@ class IdentifierField: must be recomputed prior to use. """ - def __set_name__( - self, - owner: object, - name: str - ): + def __set_name__(self, owner: object, name: str): """ Called on instantiation @@ -35,17 +30,9 @@ def __set_name__( The name of the attribute """ self.private = f"_{name}" - setattr( - owner, - self.private, - None - ) + setattr(owner, self.private, None) - def __get__( - self, - obj: object, - objtype=None - ) -> Optional: + def __get__(self, obj: object, objtype=None) -> Optional: """ Retrieve the value of this field. @@ -59,10 +46,7 @@ def __get__( ------- The value (or None if it has not been set) """ - return getattr( - obj, - self.private - ) + return getattr(obj, self.private) def __set__(self, obj, value): """ @@ -76,43 +60,19 @@ def __set__(self, obj, value): A new value for the attribute """ obj._identifier = None - setattr( - obj, - self.private, - value - ) + setattr(obj, self.private, value) class Identifier: - def __init__(self, obj, version=None): + def __init__(self, obj): """ Wraps an object and recursively generates an identifier The version can be set in general.ini (output/identifier_version). It can be overridden by specifying it explicitly in the constructor. - - The version determines how the identifier is generated. - - Version History - --------------- - 1 - Original version - 2 - Accounts for the class of objects - 3 - Include class path to distinguish prior models - 4 - __exclude_identifier_fields__ can be used to exclude specific fields - """ - self._identifier_version = version or conf.instance[ - "general" - ][ - "output" - ][ - "identifier_version" - ] - self.hash_list = list() - self._add_value_to_hash_list( - obj - ) + self._add_value_to_hash_list(obj) @property def description(self): @@ -133,122 +93,64 @@ def _add_value_to_hash_list(self, value): An object """ from .model_object import ModelObject - if self._identifier_version >= 3: - if inspect.isclass(value): - self.add_value_to_hash_list( - get_class_path(value) - ) - return - if isinstance( - value, Exception - ): + if inspect.isclass(value): + self.add_value_to_hash_list(get_class_path(value)) + return + + if isinstance(value, Exception): raise value if hasattr(value, "__dict__"): - if self._identifier_version >= 2: - if hasattr(value, "__class__"): - self.add_value_to_hash_list( - value.__class__.__name__ - ) + if hasattr(value, "__class__"): + self.add_value_to_hash_list(value.__class__.__name__) d = value.__dict__ - if hasattr( - value, - "__identifier_fields__" - ): + if hasattr(value, "__identifier_fields__"): fields = value.__identifier_fields__ try: - d = { - k: getattr(value, k) - for k in fields - } + d = {k: getattr(value, k) for k in fields} except AttributeError as e: raise AssertionError( f"Missing identifier fields for {type(value)}" ) from e - elif hasattr( - value, - "__class__" - ) and not inspect.isclass( - value - ) and not isinstance( - value, - ModelObject + elif ( + hasattr(value, "__class__") + and not inspect.isclass(value) + and not isinstance(value, ModelObject) ): - args = inspect.getfullargspec( - value.__class__ - ).args - d = { - k: v - for k, v - in d.items() - if k in args - } - if self._identifier_version >= 4 and hasattr( - value, - "__exclude_identifier_fields__" - ): + args = inspect.getfullargspec(value.__class__).args + d = {k: v for k, v in d.items() if k in args} + if hasattr(value, "__exclude_identifier_fields__"): excluded_fields = value.__exclude_identifier_fields__ - d = { - k: v - for k, v - in d.items() - if k not in excluded_fields - } - self.add_value_to_hash_list( - d - ) - elif isinstance( - value, dict - ): + d = {k: v for k, v in d.items() if k not in excluded_fields} + self.add_value_to_hash_list(d) + elif isinstance(value, dict): for key, value in value.items(): if not (key.startswith("_") or key in ("id", "paths")): self.hash_list.append(key) - self.add_value_to_hash_list( - value - ) - elif isinstance( - value, float - ): + self.add_value_to_hash_list(value) + elif isinstance(value, float): try: - value = RESOLUTION * round( - value / RESOLUTION - ) + value = RESOLUTION * round(value / RESOLUTION) except OverflowError: pass - self.hash_list.append( - str(value) - ) - elif isinstance( - value, - (str, int, bool) - ): - self.hash_list.append( - str(value) - ) + self.hash_list.append(str(value)) + elif isinstance(value, (str, int, bool)): + self.hash_list.append(str(value)) elif isinstance(value, Iterable): for value in value: - self.add_value_to_hash_list( - value - ) + self.add_value_to_hash_list(value) def add_value_to_hash_list(self, value): - if isinstance( - value, - property - ): + if isinstance(value, property): return - self._add_value_to_hash_list( - value - ) + self._add_value_to_hash_list(value) def __str__(self): - return md5(".".join( - self.hash_list - ).encode("utf-8")).hexdigest() + return md5(".".join(self.hash_list).encode("utf-8")).hexdigest() def __repr__(self): return f"<{self.__class__.__name__} {self}>" From 203e15fde095751213ae8e0468d47013b9fe1557 Mon Sep 17 00:00:00 2001 From: Richard Date: Fri, 2 Dec 2022 08:52:23 +0000 Subject: [PATCH 025/226] fixing tests... --- .../database/identifier/test_converter.py | 175 ++------ .../database/identifier/test_identifiers.py | 393 +++++++----------- 2 files changed, 173 insertions(+), 395 deletions(-) diff --git a/test_autofit/database/identifier/test_converter.py b/test_autofit/database/identifier/test_converter.py index eeed4759f..973f97aa9 100644 --- a/test_autofit/database/identifier/test_converter.py +++ b/test_autofit/database/identifier/test_converter.py @@ -11,114 +11,48 @@ from autofit.tools.update_identifiers import ( update_directory_identifiers, update_database_identifiers, - update_identifiers_from_dict + update_identifiers_from_dict, ) -output_directory = Path( - __file__ -).parent / "output" +output_directory = Path(__file__).parent / "output" -@output_path_for_test( - output_directory, -) -def test_directory( - old_directory_paths -): +@output_path_for_test(output_directory,) +def test_directory(old_directory_paths): old_directory_paths.save_all() - assert listdir( - output_directory / "name" - ) == [ - old_directory_paths.identifier - ] + assert listdir(output_directory / "name") == [old_directory_paths.identifier] - conf.instance["general"]["output"]["identifier_version"] = 3 - update_directory_identifiers( - output_directory - ) + update_directory_identifiers(output_directory) - filename, = listdir( - output_directory / "name" - ) + (filename,) = listdir(output_directory / "name") identifier, suffix = filename.split(".") assert identifier != old_directory_paths.identifier assert suffix == "zip" -@pytest.fixture( - name="old_directory_paths" -) +@pytest.fixture(name="old_directory_paths") def make_old_directory_paths(): - conf.instance["general"]["output"]["identifier_version"] = 1 - search = af.DynestyStatic( - name="name" - ) - search.paths.model = af.PriorModel( - af.Gaussian - ) + search = af.DynestyStatic(name="name") + search.paths.model = af.PriorModel(af.Gaussian) return search.paths -@output_path_for_test( - output_directory -) -def test_update_identifiers_from_dict_no_change(): - search = af.DynestyStatic( - name="name" - ) - search.paths.model = af.PriorModel( - af.Gaussian - ) - old_directory_paths = search.paths - - old_directory_paths.save_all() - old_directory_paths.zip_remove() - - update_identifiers_from_dict( - output_directory, - {} - ) - - filename, = listdir( - output_directory / "name" - ) - - identifier, suffix = filename.split(".") - assert identifier == old_directory_paths.identifier - assert suffix == "zip" - - -@output_path_for_test( - output_directory -) +@output_path_for_test(output_directory) def test_update_identifiers_from_dict(): - search = af.DynestyStatic( - name="name" - ) - search.paths.model = af.PriorModel( - af.Gaussian - ) + search = af.DynestyStatic(name="name") + search.paths.model = af.PriorModel(af.Gaussian) old_directory_paths = search.paths - initial_length = len( - old_directory_paths._identifier.hash_list - ) + initial_length = len(old_directory_paths._identifier.hash_list) old_directory_paths.save_all() old_directory_paths.zip_remove() - update_identifiers_from_dict( - output_directory, - { - "normalization": "magnitude" - } - ) + update_identifiers_from_dict(output_directory, {"normalization": "magnitude"}) - filename, = listdir( - output_directory / "name" - ) + (filename,) = listdir(output_directory / "name") identifier, suffix = filename.split(".") assert identifier != old_directory_paths.identifier @@ -128,9 +62,7 @@ def test_update_identifiers_from_dict(): with zipfile.ZipFile(output_directory / "name" / filename, "r") as f: f.extractall(unzipped) - with open( - unzipped / ".identifier" - ) as f: + with open(unzipped / ".identifier") as f: lines = f.read().split("\n") assert "normalization" not in lines assert "magnitude" in lines @@ -138,83 +70,36 @@ def test_update_identifiers_from_dict(): assert len(lines) == initial_length -@output_path_for_test( - output_directory, -) -def test_zipped_no_change( - old_directory_paths -): +@output_path_for_test(output_directory,) +def test_zipped_no_change(old_directory_paths): old_directory_paths.save_all() old_directory_paths.zip_remove() - update_directory_identifiers( - output_directory - ) + update_directory_identifiers(output_directory) - filename, = listdir( - output_directory / "name" - ) + (filename,) = listdir(output_directory / "name") identifier, suffix = filename.split(".") assert identifier == old_directory_paths.identifier assert suffix == "zip" -@output_path_for_test( - output_directory, -) -def test_zipped( - old_directory_paths -): +@output_path_for_test(output_directory,) +def test_zipped(old_directory_paths): old_directory_paths.save_all() old_directory_paths.zip_remove() - assert listdir( - output_directory / "name" - ) == [ - f"{old_directory_paths.identifier}.zip" - ] - - conf.instance["general"]["output"]["identifier_version"] = 3 - update_directory_identifiers( - output_directory - ) - - filename, = listdir( - output_directory / "name" - ) - - identifier, suffix = filename.split(".") - assert identifier != old_directory_paths.identifier - assert suffix == "zip" + assert listdir(output_directory / "name") == [ + f"{old_directory_paths.identifier}.zip" + ] def test_database(session): - conf.instance["general"]["output"]["identifier_version"] = 1 - search = af.DynestyStatic( - name="name", - session=session - ) - search.paths.model = af.PriorModel( - af.Gaussian - ) + search = af.DynestyStatic(name="name", session=session) + search.paths.model = af.PriorModel(af.Gaussian) search.paths.save_all( - search_config_dict=search.config_dict_search, - info={}, - pickle_files=[] + search_config_dict=search.config_dict_search, info={}, pickle_files=[] ) - fit, = Fit.all( - session=session - ) + (fit,) = Fit.all(session=session) assert fit.id == search.paths.identifier - - conf.instance["general"]["output"]["identifier_version"] = 3 - update_database_identifiers( - session - ) - - fit, = Fit.all( - session=session - ) - assert fit.id != search.paths.identifier diff --git a/test_autofit/database/identifier/test_identifiers.py b/test_autofit/database/identifier/test_identifiers.py index 1912c0771..6925cab0d 100644 --- a/test_autofit/database/identifier/test_identifiers.py +++ b/test_autofit/database/identifier/test_identifiers.py @@ -6,51 +6,15 @@ from autofit.mapper.model_object import Identifier -def set_version(version): - conf.instance[ - "general" - ][ - "output" - ][ - "identifier_version" - ] = version - - -@pytest.fixture( - autouse=True -) -def set_high_version(): - set_version(99) - - -def test_identifier_version(): - set_version(1) - identifier = Identifier(af.Gaussian()) - - set_version(2) - assert identifier != Identifier(af.Gaussian()) - - assert identifier == Identifier( - af.Gaussian(), - version=1 - ) - - def test_unique_tag_is_used(): - identifier = af.DynestyStatic( - "name", - unique_tag="tag" - ).paths._identifier + identifier = af.DynestyStatic("name", unique_tag="tag").paths._identifier assert "tag" in identifier.hash_list def test_class_path(): - identifier = Identifier( - Class, - version=3 - ) - string, = identifier.hash_list + identifier = Identifier(Class,) + (string,) = identifier.hash_list assert "test_autofit.database.identifier.test_identifiers.Class" in string @@ -84,22 +48,12 @@ def __init__(self): def test_exclude_identifier_fields(): - other = ExcludeClass( - three=4 - ) - assert Identifier( - other - ) == Identifier( - ExcludeClass() - ) + other = ExcludeClass(three=4) + assert Identifier(other) == Identifier(ExcludeClass()) other.__exclude_identifier_fields__ = tuple() - assert Identifier( - other - ) != Identifier( - ExcludeClass() - ) + assert Identifier(other) != Identifier(ExcludeClass()) def test_numpy_array(): @@ -109,20 +63,14 @@ def test_numpy_array(): def test_hash_list(): identifier = Identifier(Class()) - assert identifier.hash_list == [ - "Class", "one", "1", "two", "2" - ] + assert identifier.hash_list == ["Class", "one", "1", "two", "2"] def test_constructor_only(): attribute = AttributeClass() attribute.attribute = 1 - assert Identifier( - AttributeClass() - ) == Identifier( - attribute - ) + assert Identifier(AttributeClass()) == Identifier(attribute) def test_exclude_does_no_effect_constructor(): @@ -130,11 +78,7 @@ def test_exclude_does_no_effect_constructor(): attribute.__exclude_identifier_fields__ = tuple() attribute.attribute = 1 - assert Identifier( - AttributeClass() - ) == Identifier( - attribute - ) + assert Identifier(AttributeClass()) == Identifier(attribute) class PrivateClass: @@ -143,9 +87,7 @@ def __init__(self, argument): def test_private_not_included(): - instance = PrivateClass( - argument="one" - ) + instance = PrivateClass(argument="one") identifier = str(Identifier(instance)) instance._argument = "two" @@ -156,28 +98,16 @@ def test_missing_field(): instance = Class() instance.__identifier_fields__ = ("five",) - with pytest.raises( - AssertionError - ): - Identifier( - instance - ) + with pytest.raises(AssertionError): + Identifier(instance) def test_change_class(): gaussian_0 = af.Model( - af.Gaussian, - normalization=af.UniformPrior( - lower_limit=1e-6, - upper_limit=1e6 - ) + af.Gaussian, normalization=af.UniformPrior(lower_limit=1e-6, upper_limit=1e6) ) gaussian_1 = af.Model( - af.Gaussian, - normalization=af.LogUniformPrior( - lower_limit=1e-6, - upper_limit=1e6 - ) + af.Gaussian, normalization=af.LogUniformPrior(lower_limit=1e-6, upper_limit=1e6) ) assert Identifier(gaussian_0) != Identifier(gaussian_1) @@ -185,9 +115,7 @@ def test_change_class(): def test_tiny_change(): # noinspection PyTypeChecker - instance = Class( - one=1.0 - ) + instance = Class(one=1.0) identifier = str(Identifier(instance)) instance.one += 1e-9 @@ -198,47 +126,29 @@ def test_tiny_change(): def test_infinity(): # noinspection PyTypeChecker - instance = Class( - one=float("inf") - ) + instance = Class(one=float("inf")) str(Identifier(instance)) def test_identifier_fields(): other = Class(three=4) - assert Identifier( - Class() - ) == Identifier( - other - ) + assert Identifier(Class()) == Identifier(other) other.__identifier_fields__ = ("one", "two", "three") - assert Identifier( - Class() - ) != Identifier( - other - ) + assert Identifier(Class()) != Identifier(other) def test_unique_tag(): search = af.m.MockSearch() - search.fit( - model=af.Model( - af.Gaussian - ), - analysis=af.m.MockAnalysis() - ) + search.fit(model=af.Model(af.Gaussian), analysis=af.m.MockAnalysis()) identifier = search.paths.identifier search = af.m.MockSearch(unique_tag="dataset") search.fit( - model=af.Model( - af.Gaussian - ), - analysis=af.m.MockAnalysis(), + model=af.Model(af.Gaussian), analysis=af.m.MockAnalysis(), ) assert search.paths.identifier != identifier @@ -247,66 +157,46 @@ def test_unique_tag(): def test_prior(): identifier = af.UniformPrior().identifier assert identifier == af.UniformPrior().identifier - assert identifier != af.UniformPrior( - lower_limit=0.5 - ).identifier - assert identifier != af.UniformPrior( - upper_limit=0.5 - ).identifier + assert identifier != af.UniformPrior(lower_limit=0.5).identifier + assert identifier != af.UniformPrior(upper_limit=0.5).identifier def test_model(): - identifier = af.PriorModel( - af.Gaussian, - centre=af.UniformPrior() - ).identifier - assert identifier == af.PriorModel( - af.Gaussian, - centre=af.UniformPrior() - ).identifier - assert identifier != af.PriorModel( - af.Gaussian, - centre=af.UniformPrior( - upper_limit=0.5 - ) - ).identifier + identifier = af.PriorModel(af.Gaussian, centre=af.UniformPrior()).identifier + assert identifier == af.PriorModel(af.Gaussian, centre=af.UniformPrior()).identifier + assert ( + identifier + != af.PriorModel( + af.Gaussian, centre=af.UniformPrior(upper_limit=0.5) + ).identifier + ) def test_collection(): identifier = af.CollectionPriorModel( - gaussian=af.PriorModel( - af.Gaussian, - centre=af.UniformPrior() - ) - ).identifier - assert identifier == af.CollectionPriorModel( - gaussian=af.PriorModel( - af.Gaussian, - centre=af.UniformPrior() - ) - ).identifier - assert identifier != af.CollectionPriorModel( - gaussian=af.PriorModel( - af.Gaussian, - centre=af.UniformPrior( - upper_limit=0.5 - ) - ) + gaussian=af.PriorModel(af.Gaussian, centre=af.UniformPrior()) ).identifier + assert ( + identifier + == af.CollectionPriorModel( + gaussian=af.PriorModel(af.Gaussian, centre=af.UniformPrior()) + ).identifier + ) + assert ( + identifier + != af.CollectionPriorModel( + gaussian=af.PriorModel(af.Gaussian, centre=af.UniformPrior(upper_limit=0.5)) + ).identifier + ) def test_instance(): - identifier = af.CollectionPriorModel( - gaussian=af.Gaussian() - ).identifier - assert identifier == af.CollectionPriorModel( - gaussian=af.Gaussian() - ).identifier - assert identifier != af.CollectionPriorModel( - gaussian=af.Gaussian( - centre=0.5 - ) - ).identifier + identifier = af.CollectionPriorModel(gaussian=af.Gaussian()).identifier + assert identifier == af.CollectionPriorModel(gaussian=af.Gaussian()).identifier + assert ( + identifier + != af.CollectionPriorModel(gaussian=af.Gaussian(centre=0.5)).identifier + ) def test__identifier_description(): @@ -315,7 +205,9 @@ def test__identifier_description(): af.Gaussian, centre=af.UniformPrior(lower_limit=0.0, upper_limit=1.0), normalization=af.LogUniformPrior(lower_limit=0.001, upper_limit=0.01), - sigma=af.GaussianPrior(mean=0.5, sigma=2.0, lower_limit=-1.0, upper_limit=1.0), + sigma=af.GaussianPrior( + mean=0.5, sigma=2.0, lower_limit=-1.0, upper_limit=1.0 + ), ) ) @@ -325,63 +217,63 @@ def test__identifier_description(): i = 0 - assert description[i] == "CollectionPriorModel"; + assert description[i] == "CollectionPriorModel" i += 1 - assert description[i] == "item_number"; + assert description[i] == "item_number" i += 1 - assert description[i] == "0"; + assert description[i] == "0" i += 1 - assert description[i] == "gaussian"; + assert description[i] == "gaussian" i += 1 - assert description[i] == "PriorModel"; + assert description[i] == "PriorModel" i += 1 - assert description[i] == "cls"; + assert description[i] == "cls" i += 1 - assert description[i] == "autofit.example.model.Gaussian"; + assert description[i] == "autofit.example.model.Gaussian" i += 1 - assert description[i] == "centre"; + assert description[i] == "centre" i += 1 - assert description[i] == "UniformPrior"; + assert description[i] == "UniformPrior" i += 1 - assert description[i] == "lower_limit"; + assert description[i] == "lower_limit" i += 1 - assert description[i] == "0.0"; + assert description[i] == "0.0" i += 1 - assert description[i] == "upper_limit"; + assert description[i] == "upper_limit" i += 1 - assert description[i] == "1.0"; + assert description[i] == "1.0" i += 1 - assert description[i] == "normalization"; + assert description[i] == "normalization" i += 1 - assert description[i] == "LogUniformPrior"; + assert description[i] == "LogUniformPrior" i += 1 - assert description[i] == "lower_limit"; + assert description[i] == "lower_limit" i += 1 - assert description[i] == "0.001"; + assert description[i] == "0.001" i += 1 - assert description[i] == "upper_limit"; + assert description[i] == "upper_limit" i += 1 - assert description[i] == "0.01"; + assert description[i] == "0.01" i += 1 - assert description[i] == "sigma"; + assert description[i] == "sigma" i += 1 - assert description[i] == "GaussianPrior"; + assert description[i] == "GaussianPrior" i += 1 - assert description[i] == "lower_limit"; + assert description[i] == "lower_limit" i += 1 - assert description[i] == "-1.0"; + assert description[i] == "-1.0" i += 1 - assert description[i] == "upper_limit"; + assert description[i] == "upper_limit" i += 1 - assert description[i] == "1.0"; + assert description[i] == "1.0" i += 1 - assert description[i] == "mean"; + assert description[i] == "mean" i += 1 - assert description[i] == "0.5"; + assert description[i] == "0.5" i += 1 - assert description[i] == "sigma"; + assert description[i] == "sigma" i += 1 - assert description[i] == "2.0"; + assert description[i] == "2.0" i += 1 @@ -391,7 +283,9 @@ def test__identifier_description__after_model_and_instance(): af.Gaussian, centre=af.UniformPrior(lower_limit=0.0, upper_limit=1.0), normalization=af.LogUniformPrior(lower_limit=0.001, upper_limit=0.01), - sigma=af.GaussianPrior(mean=0.5, sigma=2.0, lower_limit=-1.0, upper_limit=1.0), + sigma=af.GaussianPrior( + mean=0.5, sigma=2.0, lower_limit=-1.0, upper_limit=1.0 + ), ) ) @@ -399,15 +293,11 @@ def test__identifier_description__after_model_and_instance(): samples = af.m.MockSamples( max_log_likelihood_instance=max_log_likelihood_instance, - gaussian_tuples=[(1.0, 2.0), (3.0, 4.0), (5.0, 6.0)] + gaussian_tuples=[(1.0, 2.0), (3.0, 4.0), (5.0, 6.0)], ) result = af.Result( - samples=samples, - model=model, - sigma=1.0, - use_errors=True, - use_widths=False + samples=samples, model=model, sigma=1.0, use_errors=True, use_widths=False ) model.gaussian.centre = result.model.gaussian.centre @@ -416,7 +306,9 @@ def test__identifier_description__after_model_and_instance(): identifier = Identifier([model]) description = identifier.description - assert description == """CollectionPriorModel + assert ( + description + == """CollectionPriorModel item_number 0 gaussian @@ -445,6 +337,7 @@ def test__identifier_description__after_model_and_instance(): 0.5 sigma 2.0""" + ) def test__identifier_description__after_take_attributes(): @@ -453,7 +346,9 @@ def test__identifier_description__after_take_attributes(): af.Gaussian, centre=af.UniformPrior(lower_limit=0.0, upper_limit=1.0), normalization=af.LogUniformPrior(lower_limit=0.001, upper_limit=0.01), - sigma=af.GaussianPrior(mean=0.5, sigma=2.0, lower_limit=-1.0, upper_limit=1.0), + sigma=af.GaussianPrior( + mean=0.5, sigma=2.0, lower_limit=-1.0, upper_limit=1.0 + ), ) ) @@ -467,87 +362,85 @@ def test__identifier_description__after_take_attributes(): i = 0 - assert description[i] == "CollectionPriorModel"; + assert description[i] == "CollectionPriorModel" i += 1 - assert description[i] == "item_number"; + assert description[i] == "item_number" i += 1 - assert description[i] == "0"; + assert description[i] == "0" i += 1 - assert description[i] == "gaussian"; + assert description[i] == "gaussian" i += 1 - assert description[i] == "PriorModel"; + assert description[i] == "PriorModel" i += 1 - assert description[i] == "cls"; + assert description[i] == "cls" i += 1 - assert description[i] == "autofit.example.model.Gaussian"; + assert description[i] == "autofit.example.model.Gaussian" i += 1 - assert description[i] == "centre"; + assert description[i] == "centre" i += 1 - assert description[i] == "UniformPrior"; + assert description[i] == "UniformPrior" i += 1 - assert description[i] == "lower_limit"; + assert description[i] == "lower_limit" i += 1 - assert description[i] == "0.0"; + assert description[i] == "0.0" i += 1 - assert description[i] == "upper_limit"; + assert description[i] == "upper_limit" i += 1 - assert description[i] == "1.0"; + assert description[i] == "1.0" i += 1 - assert description[i] == "normalization"; + assert description[i] == "normalization" i += 1 - assert description[i] == "LogUniformPrior"; + assert description[i] == "LogUniformPrior" i += 1 - assert description[i] == "lower_limit"; + assert description[i] == "lower_limit" i += 1 - assert description[i] == "0.001"; + assert description[i] == "0.001" i += 1 - assert description[i] == "upper_limit"; + assert description[i] == "upper_limit" i += 1 - assert description[i] == "0.01"; + assert description[i] == "0.01" i += 1 - assert description[i] == "sigma"; + assert description[i] == "sigma" i += 1 - assert description[i] == "GaussianPrior"; + assert description[i] == "GaussianPrior" i += 1 - assert description[i] == "lower_limit"; + assert description[i] == "lower_limit" i += 1 - assert description[i] == "-1.0"; + assert description[i] == "-1.0" i += 1 - assert description[i] == "upper_limit"; + assert description[i] == "upper_limit" i += 1 - assert description[i] == "1.0"; + assert description[i] == "1.0" i += 1 - assert description[i] == "mean"; + assert description[i] == "mean" i += 1 - assert description[i] == "0.5"; + assert description[i] == "0.5" i += 1 - assert description[i] == "sigma"; + assert description[i] == "sigma" i += 1 - assert description[i] == "2.0"; + assert description[i] == "2.0" i += 1 def test_dynesty_static(): - assert Identifier( - af.DynestyStatic() - ).hash_list == [ - 'DynestyStatic', - 'nlive', - '150', - 'bound', - 'multi', - 'sample', - 'auto', - 'bootstrap', - 'enlarge', - 'walks', - '5', - 'facc', - '0.5', - 'slices', - '5', - 'fmove', - '0.9', - 'max_move', - '100' - ] + assert Identifier(af.DynestyStatic()).hash_list == [ + "DynestyStatic", + "nlive", + "150", + "bound", + "multi", + "sample", + "auto", + "bootstrap", + "enlarge", + "walks", + "5", + "facc", + "0.5", + "slices", + "5", + "fmove", + "0.9", + "max_move", + "100", + ] From a5883869f82dfaeb4b6fbfabbb4743875c92898f Mon Sep 17 00:00:00 2001 From: Richard Date: Fri, 2 Dec 2022 09:05:24 +0000 Subject: [PATCH 026/226] remove broken test --- test_autofit/database/identifier/test_converter.py | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/test_autofit/database/identifier/test_converter.py b/test_autofit/database/identifier/test_converter.py index 973f97aa9..4d4f9e6ce 100644 --- a/test_autofit/database/identifier/test_converter.py +++ b/test_autofit/database/identifier/test_converter.py @@ -6,11 +6,9 @@ import autofit as af from autoconf.conf import output_path_for_test -from autofit import conf from autofit.database import Fit from autofit.tools.update_identifiers import ( update_directory_identifiers, - update_database_identifiers, update_identifiers_from_dict, ) @@ -23,14 +21,6 @@ def test_directory(old_directory_paths): assert listdir(output_directory / "name") == [old_directory_paths.identifier] - update_directory_identifiers(output_directory) - - (filename,) = listdir(output_directory / "name") - - identifier, suffix = filename.split(".") - assert identifier != old_directory_paths.identifier - assert suffix == "zip" - @pytest.fixture(name="old_directory_paths") def make_old_directory_paths(): From 5798791facca9672e42b55aae0e9cc35ff205e31 Mon Sep 17 00:00:00 2001 From: Richard Date: Fri, 2 Dec 2022 11:22:23 +0000 Subject: [PATCH 027/226] skip writing visualisation output if paths is type NullPaths --- autofit/non_linear/abstract_search.py | 3 +- autofit/non_linear/mcmc/emcee/emcee.py | 73 ++++++------ autofit/non_linear/mcmc/zeus/zeus.py | 76 ++++++------ autofit/non_linear/nest/dynesty/abstract.py | 125 +++++++++++--------- 4 files changed, 146 insertions(+), 131 deletions(-) diff --git a/autofit/non_linear/abstract_search.py b/autofit/non_linear/abstract_search.py index 28510ce7c..f1bf081e7 100644 --- a/autofit/non_linear/abstract_search.py +++ b/autofit/non_linear/abstract_search.py @@ -33,7 +33,6 @@ from .paths.null import NullPaths from ..graphical.declarative.abstract import PriorFactor from ..graphical.expectation_propagation import AbstractFactorOptimiser -from ..tools.util import IntervalCounter logger = logging.getLogger(__name__) @@ -677,7 +676,7 @@ def perform_update(self, model, analysis, during_analysis): self.paths.save_object("samples", samples) - if not during_analysis: + if not during_analysis and not isinstance(self.paths, NullPaths): self.plot_results(samples=samples) try: diff --git a/autofit/non_linear/mcmc/emcee/emcee.py b/autofit/non_linear/mcmc/emcee/emcee.py index f83ad5b5b..7273009e0 100644 --- a/autofit/non_linear/mcmc/emcee/emcee.py +++ b/autofit/non_linear/mcmc/emcee/emcee.py @@ -6,7 +6,6 @@ import numpy as np from autoconf import conf -from autofit import exc from autofit.database.sqlalchemy_ import sa from autofit.mapper.model_mapper import ModelMapper from autofit.mapper.prior_model.abstract import AbstractPriorModel @@ -20,22 +19,20 @@ class Emcee(AbstractMCMC): - __identifier_fields__ = ( - "nwalkers", - ) + __identifier_fields__ = ("nwalkers",) def __init__( - self, - name: Optional[str] = None, - path_prefix: Optional[str] = None, - unique_tag: Optional[str] = None, - prior_passer: Optional[PriorPasser] = None, - initializer: Optional[Initializer] = None, - auto_correlations_settings=AutoCorrelationsSettings(), - iterations_per_update: int = None, - number_of_cores: int = None, - session: Optional[sa.orm.Session] = None, - **kwargs + self, + name: Optional[str] = None, + path_prefix: Optional[str] = None, + unique_tag: Optional[str] = None, + prior_passer: Optional[PriorPasser] = None, + initializer: Optional[Initializer] = None, + auto_correlations_settings=AutoCorrelationsSettings(), + iterations_per_update: int = None, + number_of_cores: int = None, + session: Optional[sa.orm.Session] = None, + **kwargs ): """ An Emcee non-linear search. @@ -93,7 +90,6 @@ def __init__( self.logger.debug("Creating Emcee Search") class Fitness(AbstractMCMC.Fitness): - def figure_of_merit_from(self, parameter_list): """ The figure of merit is the value that the `NonLinearSearch` uses to sample parameter space. `Emcee` @@ -123,17 +119,13 @@ def _fit(self, model: AbstractPriorModel, analysis, log_likelihood_cap=None): model=model, analysis=analysis ) - pool = self.make_sneaky_pool( - fitness_function - ) + pool = self.make_sneaky_pool(fitness_function) emcee_sampler = emcee.EnsembleSampler( nwalkers=self.config_dict_search["nwalkers"], ndim=model.prior_count, log_prob_fn=fitness_function.__call__, - backend=emcee.backends.HDFBackend( - filename=self.backend_filename - ), + backend=emcee.backends.HDFBackend(filename=self.backend_filename), pool=pool, ) @@ -149,11 +141,17 @@ def _fit(self, model: AbstractPriorModel, analysis, log_likelihood_cap=None): else: iterations_remaining = self.config_dict_run["nsteps"] - total_iterations - self.logger.info("Existing Emcee samples found, resuming non-linear search.") + self.logger.info( + "Existing Emcee samples found, resuming non-linear search." + ) except AttributeError: - unit_parameter_lists, parameter_lists, log_posterior_list = self.initializer.samples_from_model( + ( + unit_parameter_lists, + parameter_lists, + log_posterior_list, + ) = self.initializer.samples_from_model( total_points=emcee_sampler.nwalkers, model=model, fitness_function=fitness_function, @@ -177,11 +175,11 @@ def _fit(self, model: AbstractPriorModel, analysis, log_likelihood_cap=None): iterations = self.iterations_per_update for sample in emcee_sampler.sample( - initial_state=emcee_state, - iterations=iterations, - progress=True, - skip_initial_state_check=True, - store=True, + initial_state=emcee_state, + iterations=iterations, + progress=True, + skip_initial_state_check=True, + store=True, ): pass @@ -209,14 +207,16 @@ def config_dict_with_test_mode_settings_from(self, config_dict): "nsteps": 10, } - def fitness_function_from_model_and_analysis(self, model, analysis, log_likelihood_cap=None): + def fitness_function_from_model_and_analysis( + self, model, analysis, log_likelihood_cap=None + ): return Emcee.Fitness( paths=self.paths, model=model, analysis=analysis, samples_from_model=self.samples_from, - log_likelihood_cap=log_likelihood_cap + log_likelihood_cap=log_likelihood_cap, ) @property @@ -232,9 +232,7 @@ def backend(self) -> emcee.backends.HDFBackend: """ if os.path.isfile(self.backend_filename): - return emcee.backends.HDFBackend( - filename=self.backend_filename - ) + return emcee.backends.HDFBackend(filename=self.backend_filename) else: raise FileNotFoundError( "The file emcee.hdf does not exist at the path " @@ -247,17 +245,18 @@ def samples_from(self, model): model=model, results_internal=self.backend, auto_correlation_settings=self.auto_correlations_settings, - time=self.timer.time + time=self.timer.time, ) def plot_results(self, samples): - def should_plot(name): return conf.instance["visualize"]["plots_search"]["emcee"][name] plotter = EmceePlotter( samples=samples, - output=Output(path=path.join(self.paths.image_path, "search"), format="png") + output=Output( + path=path.join(self.paths.image_path, "search"), format="png" + ), ) if should_plot("corner"): diff --git a/autofit/non_linear/mcmc/zeus/zeus.py b/autofit/non_linear/mcmc/zeus/zeus.py index 0f4fdb318..20ddff04e 100644 --- a/autofit/non_linear/mcmc/zeus/zeus.py +++ b/autofit/non_linear/mcmc/zeus/zeus.py @@ -2,18 +2,17 @@ from typing import Optional import numpy as np -from autofit.database.sqlalchemy_ import sa from autoconf import conf -from autofit import exc +from autofit.database.sqlalchemy_ import sa from autofit.mapper.model_mapper import ModelMapper from autofit.mapper.prior_model.abstract import AbstractPriorModel -from autofit.non_linear.mcmc.abstract_mcmc import AbstractMCMC -from autofit.non_linear.mcmc.auto_correlations import AutoCorrelationsSettings -from autofit.non_linear.mcmc.zeus.samples import SamplesZeus from autofit.non_linear.abstract_search import PriorPasser from autofit.non_linear.initializer import Initializer +from autofit.non_linear.mcmc.abstract_mcmc import AbstractMCMC +from autofit.non_linear.mcmc.auto_correlations import AutoCorrelationsSettings from autofit.non_linear.mcmc.zeus.plotter import ZeusPlotter +from autofit.non_linear.mcmc.zeus.samples import SamplesZeus from autofit.plot.output import Output @@ -24,21 +23,21 @@ class Zeus(AbstractMCMC): "tolerance", "patience", "mu", - "light_mode" + "light_mode", ) def __init__( - self, - name: Optional[str] = None, - path_prefix: Optional[str] = None, - unique_tag: Optional[str] = None, - prior_passer: Optional[PriorPasser] = None, - initializer: Optional[Initializer] = None, - auto_correlations_settings=AutoCorrelationsSettings(), - iterations_per_update: int = None, - number_of_cores: int = None, - session: Optional[sa.orm.Session] = None, - **kwargs + self, + name: Optional[str] = None, + path_prefix: Optional[str] = None, + unique_tag: Optional[str] = None, + prior_passer: Optional[PriorPasser] = None, + initializer: Optional[Initializer] = None, + auto_correlations_settings=AutoCorrelationsSettings(), + iterations_per_update: int = None, + number_of_cores: int = None, + session: Optional[sa.orm.Session] = None, + **kwargs ): """ An Zeus non-linear search. @@ -97,7 +96,6 @@ def __init__( self.logger.debug("Creating Zeus Search") class Fitness(AbstractMCMC.Fitness): - def figure_of_merit_from(self, parameter_list): """ The figure of merit is the value that the `NonLinearSearch` uses to sample parameter space. @@ -164,7 +162,9 @@ def _fit(self, model: AbstractPriorModel, analysis, log_likelihood_cap=None): else: iterations_remaining = self.config_dict_run["nsteps"] - total_iterations - self.logger.info("Existing Zeus samples found, resuming non-linear search.") + self.logger.info( + "Existing Zeus samples found, resuming non-linear search." + ) else: @@ -177,7 +177,11 @@ def _fit(self, model: AbstractPriorModel, analysis, log_likelihood_cap=None): zeus_sampler.ncall_total = 0 - unit_parameter_lists, parameter_lists, log_posterior_list = self.initializer.samples_from_model( + ( + unit_parameter_lists, + parameter_lists, + log_posterior_list, + ) = self.initializer.samples_from_model( total_points=zeus_sampler.nwalkers, model=model, fitness_function=fitness_function, @@ -202,20 +206,17 @@ def _fit(self, model: AbstractPriorModel, analysis, log_likelihood_cap=None): iterations = self.iterations_per_update for sample in zeus_sampler.sample( - start=zeus_state, - log_prob0=log_posterior_list, - iterations=iterations, - progress=True, + start=zeus_state, + log_prob0=log_posterior_list, + iterations=iterations, + progress=True, ): pass zeus_sampler.ncall_total += zeus_sampler.ncall - self.paths.save_object( - "zeus", - zeus_sampler - ) + self.paths.save_object("zeus", zeus_sampler) zeus_state = zeus_sampler.get_last_sample() log_posterior_list = zeus_sampler.get_last_log_prob() @@ -252,14 +253,16 @@ def config_dict_with_test_mode_settings_from(self, config_dict): "nsteps": 10, } - def fitness_function_from_model_and_analysis(self, model, analysis, log_likelihood_cap=None): + def fitness_function_from_model_and_analysis( + self, model, analysis, log_likelihood_cap=None + ): return Zeus.Fitness( paths=self.paths, model=model, analysis=analysis, samples_from_model=self.samples_from, - log_likelihood_cap=log_likelihood_cap + log_likelihood_cap=log_likelihood_cap, ) def samples_from(self, model): @@ -279,23 +282,22 @@ def samples_from(self, model): results_internal=self.zeus_pickled, model=model, auto_correlation_settings=self.auto_correlations_settings, - time=self.timer.time + time=self.timer.time, ) @property def zeus_pickled(self): - return self.paths.load_object( - "zeus" - ) + return self.paths.load_object("zeus") def plot_results(self, samples): - def should_plot(name): return conf.instance["visualize"]["plots_search"]["emcee"][name] plotter = ZeusPlotter( samples=samples, - output=Output(path=path.join(self.paths.image_path, "search"), format="png") + output=Output( + path=path.join(self.paths.image_path, "search"), format="png" + ), ) if should_plot("corner"): @@ -309,5 +311,3 @@ def should_plot(name): if should_plot("time_series"): plotter.time_series() - - diff --git a/autofit/non_linear/nest/dynesty/abstract.py b/autofit/non_linear/nest/dynesty/abstract.py index 90e65f25c..5dffaba2a 100644 --- a/autofit/non_linear/nest/dynesty/abstract.py +++ b/autofit/non_linear/nest/dynesty/abstract.py @@ -1,12 +1,14 @@ import os from abc import ABC -from dynesty.pool import Pool -from dynesty import NestedSampler, DynamicNestedSampler from os import path from typing import Optional, Tuple, Union import numpy as np +from dynesty import NestedSampler, DynamicNestedSampler +from dynesty.pool import Pool + from autoconf import conf +from autofit import exc from autofit.database.sqlalchemy_ import sa from autofit.mapper.prior_model.abstract import AbstractPriorModel from autofit.non_linear.abstract_search import PriorPasser @@ -14,13 +16,10 @@ from autofit.non_linear.nest.dynesty.plotter import DynestyPlotter from autofit.plot.output import Output -from autofit import exc - def prior_transform(cube, model): phys_cube = model.vector_from_unit_vector( - unit_vector=cube, - ignore_prior_limits=True + unit_vector=cube, ignore_prior_limits=True ) for i in range(len(phys_cube)): @@ -30,17 +29,16 @@ def prior_transform(cube, model): class AbstractDynesty(AbstractNest, ABC): - def __init__( - self, - name: str = "", - path_prefix: str = "", - unique_tag: Optional[str] = None, - prior_passer: PriorPasser = None, - iterations_per_update: int = None, - number_of_cores: int = None, - session: Optional[sa.orm.Session] = None, - **kwargs + self, + name: str = "", + path_prefix: str = "", + unique_tag: Optional[str] = None, + prior_passer: PriorPasser = None, + iterations_per_update: int = None, + number_of_cores: int = None, + session: Optional[sa.orm.Session] = None, + **kwargs, ): """ A Dynesty non-linear search. @@ -85,7 +83,7 @@ def __init__( iterations_per_update=iterations_per_update, number_of_cores=number_of_cores, session=session, - **kwargs + **kwargs, ) self.logger.debug(f"Creating {self.__class__.__name__} Search") @@ -105,10 +103,10 @@ def history_save(self): pass def _fit( - self, - model: AbstractPriorModel, - analysis, - log_likelihood_cap: Optional[float] = None + self, + model: AbstractPriorModel, + analysis, + log_likelihood_cap: Optional[float] = None, ): """ Fit a model using Dynesty and the Analysis class which contains the data and returns the log likelihood from @@ -140,15 +138,17 @@ def _fit( """ fitness_function = self.fitness_function_from_model_and_analysis( - model=model, - analysis=analysis, - log_likelihood_cap=log_likelihood_cap, + model=model, analysis=analysis, log_likelihood_cap=log_likelihood_cap, ) if os.path.exists(self.checkpoint_file): - self.logger.info("Existing Dynesty samples found, resuming non-linear search.") + self.logger.info( + "Existing Dynesty samples found, resuming non-linear search." + ) else: - self.logger.info("No Dynesty samples found, beginning new non-linear search. ") + self.logger.info( + "No Dynesty samples found, beginning new non-linear search. " + ) finished = False @@ -158,15 +158,17 @@ def _fit( try: - if conf.instance["non_linear"]["nest"][self.__class__.__name__]["parallel"]["force_x1_cpu"]: + if conf.instance["non_linear"]["nest"][self.__class__.__name__][ + "parallel" + ]["force_x1_cpu"]: raise RuntimeError with Pool( - njobs=self.number_of_cores, - loglike=fitness_function, - prior_transform=prior_transform, - logl_args=(model, fitness_function), - ptform_args=(model,) + njobs=self.number_of_cores, + loglike=fitness_function, + prior_transform=prior_transform, + logl_args=(model, fitness_function), + ptform_args=(model,), ) as pool: sampler = self.sampler_from( @@ -198,14 +200,16 @@ def _fit( fitness_function=fitness_function, checkpoint_exists=checkpoint_exists, pool=None, - queue_size=None + queue_size=None, ) finished = self.run_sampler(sampler=sampler) self.perform_update(model=model, analysis=analysis, during_analysis=True) - def iterations_from(self, sampler: Union[NestedSampler, DynamicNestedSampler]) -> Tuple[int, int]: + def iterations_from( + self, sampler: Union[NestedSampler, DynamicNestedSampler] + ) -> Tuple[int, int]: """ Returns the next number of iterations that a dynesty call will use and the total number of iterations that have been performed so far. @@ -265,14 +269,17 @@ def run_sampler(self, sampler: Union[NestedSampler, DynamicNestedSampler]): maxcall=iterations, print_progress=not self.silence, checkpoint_file=self.checkpoint_file, - **config_dict_run + **config_dict_run, ) iterations_after_run = np.sum(sampler.results.ncall) - return total_iterations == iterations_after_run or total_iterations == self.config_dict_run["maxcall"] + return ( + total_iterations == iterations_after_run + or total_iterations == self.config_dict_run["maxcall"] + ) - def write_uses_pool(self, uses_pool : bool) -> str: + def write_uses_pool(self, uses_pool: bool) -> str: """ If a Dynesty fit does not use a parallel pool, and is then resumed using one, this causes significant slow down. @@ -310,11 +317,7 @@ def config_dict_with_test_mode_settings_from(self, config_dict): "maxcall": 1, } - def live_points_init_from( - self, - model, - fitness_function - ): + def live_points_init_from(self, model, fitness_function): """ By default, dynesty live points are generated via the sampler's in-built initialization. @@ -332,14 +335,22 @@ def live_points_init_from( """ if os.environ.get("PYAUTOFIT_TEST_MODE") == "1": - unit_parameters, parameters, log_likelihood_list = self.initializer.samples_from_model( + ( + unit_parameters, + parameters, + log_likelihood_list, + ) = self.initializer.samples_from_model( total_points=self.total_live_points, model=model, fitness_function=fitness_function, ) - init_unit_parameters = np.zeros(shape=(self.total_live_points, model.prior_count)) - init_parameters = np.zeros(shape=(self.total_live_points, model.prior_count)) + init_unit_parameters = np.zeros( + shape=(self.total_live_points, model.prior_count) + ) + init_parameters = np.zeros( + shape=(self.total_live_points, model.prior_count) + ) init_log_likelihood_list = np.zeros(shape=(self.total_live_points)) for i in range(len(parameters)): @@ -347,7 +358,11 @@ def live_points_init_from( init_parameters[i, :] = np.asarray(parameters[i]) init_log_likelihood_list[i] = np.asarray(log_likelihood_list[i]) - live_points = [init_unit_parameters, init_parameters, init_log_likelihood_list] + live_points = [ + init_unit_parameters, + init_parameters, + init_log_likelihood_list, + ] blobs = np.asarray(self.total_live_points * [False]) @@ -356,16 +371,16 @@ def live_points_init_from( return live_points def sampler_from( - self, - model: AbstractPriorModel, - fitness_function, - checkpoint_exists : bool, - pool: Optional["Pool"], - queue_size: Optional[int] + self, + model: AbstractPriorModel, + fitness_function, + checkpoint_exists: bool, + pool: Optional["Pool"], + queue_size: Optional[int], ): raise NotImplementedError() - def check_pool(self, uses_pool : bool, pool: Pool): + def check_pool(self, uses_pool: bool, pool: Pool): if (uses_pool and pool is None) or (not uses_pool and pool is not None): raise exc.SearchException( @@ -412,7 +427,9 @@ def should_plot(name): plotter = DynestyPlotter( samples=samples, - output=Output(path=path.join(self.paths.image_path, "search"), format="png") + output=Output( + path=path.join(self.paths.image_path, "search"), format="png" + ), ) if should_plot("cornerplot"): From 7aecc980b7077a80ab6ef6f7df07cd90f8a50367 Mon Sep 17 00:00:00 2001 From: Richard Date: Fri, 2 Dec 2022 11:30:47 +0000 Subject: [PATCH 028/226] temp dir samples path --- autofit/non_linear/paths/null.py | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/autofit/non_linear/paths/null.py b/autofit/non_linear/paths/null.py index c3ef00700..052e96b00 100644 --- a/autofit/non_linear/paths/null.py +++ b/autofit/non_linear/paths/null.py @@ -1,3 +1,4 @@ +import tempfile from typing import Optional from .abstract import AbstractPaths @@ -11,6 +12,11 @@ class NullPaths(AbstractPaths): def __init__(self): super().__init__() self.objects = dict() + self._samples_path = tempfile.mkdtemp() + + @property + def samples_path(self) -> str: + return self._samples_path @AbstractPaths.parent.setter def parent(self, parent): @@ -21,11 +27,11 @@ def is_grid_search(self) -> bool: return False def create_child( - self, - name: Optional[str] = None, - path_prefix: Optional[str] = None, - is_identifier_in_paths: Optional[bool] = None, - identifier: Optional[str] = None + self, + name: Optional[str] = None, + path_prefix: Optional[str] = None, + is_identifier_in_paths: Optional[bool] = None, + identifier: Optional[str] = None, ) -> "AbstractPaths": return NullPaths() From cb53d88870b8805c7929aaa853cd46c4f09b029b Mon Sep 17 00:00:00 2001 From: Richard Date: Fri, 2 Dec 2022 11:31:22 +0000 Subject: [PATCH 029/226] fix import --- autofit/non_linear/nest/abstract_nest.py | 49 ++++++++++++------------ 1 file changed, 25 insertions(+), 24 deletions(-) diff --git a/autofit/non_linear/nest/abstract_nest.py b/autofit/non_linear/nest/abstract_nest.py index 76a468bc0..7afb9b55c 100644 --- a/autofit/non_linear/nest/abstract_nest.py +++ b/autofit/non_linear/nest/abstract_nest.py @@ -1,26 +1,29 @@ from typing import Optional from autoconf import conf -from autofit import exc from autofit.database.sqlalchemy_ import sa -from autofit.non_linear.abstract_search import IntervalCounter from autofit.non_linear.abstract_search import NonLinearSearch from autofit.non_linear.abstract_search import PriorPasser -from autofit.non_linear.initializer import InitializerPrior, AbstractInitializer, SpecificRangeInitializer +from autofit.non_linear.initializer import ( + InitializerPrior, + AbstractInitializer, + SpecificRangeInitializer, +) +from autofit.tools.util import IntervalCounter class AbstractNest(NonLinearSearch): def __init__( - self, - name: Optional[str] = None, - path_prefix: Optional[str] = None, - unique_tag: Optional[str] = None, - prior_passer: Optional[PriorPasser] = None, - iterations_per_update: Optional[int] = None, - number_of_cores: Optional[int] = None, - session: Optional[sa.orm.Session] = None, - initializer: Optional[AbstractInitializer] = None, - **kwargs + self, + name: Optional[str] = None, + path_prefix: Optional[str] = None, + unique_tag: Optional[str] = None, + prior_passer: Optional[PriorPasser] = None, + iterations_per_update: Optional[int] = None, + number_of_cores: Optional[int] = None, + session: Optional[sa.orm.Session] = None, + initializer: Optional[AbstractInitializer] = None, + **kwargs ): """ Abstract class of a nested sampling `NonLinearSearch` (e.g. MultiNest, Dynesty). @@ -42,7 +45,9 @@ def __init__( An SQLAlchemy session instance so the results of the model-fit are written to an SQLite database. """ if isinstance(initializer, SpecificRangeInitializer): - raise ValueError("SpecificRangeInitializer cannot be used for nested sampling") + raise ValueError( + "SpecificRangeInitializer cannot be used for nested sampling" + ) super().__init__( name=name, @@ -58,20 +63,14 @@ def __init__( class Fitness(NonLinearSearch.Fitness): def __init__( - self, - paths, - analysis, - model, - samples_from_model, - log_likelihood_cap=None + self, paths, analysis, model, samples_from_model, log_likelihood_cap=None ): - super().__init__( paths=paths, analysis=analysis, model=model, samples_from_model=samples_from_model, - log_likelihood_cap=log_likelihood_cap + log_likelihood_cap=log_likelihood_cap, ) self.stagger_accepted_samples = 0 @@ -89,12 +88,14 @@ def figure_of_merit_from(self, parameter_list): def config_type(self): return conf.instance["non_linear"]["nest"] - def fitness_function_from_model_and_analysis(self, model, analysis, log_likelihood_cap=None): + def fitness_function_from_model_and_analysis( + self, model, analysis, log_likelihood_cap=None + ): return self.__class__.Fitness( paths=self.paths, model=model, analysis=analysis, samples_from_model=self.samples_from, - log_likelihood_cap=log_likelihood_cap + log_likelihood_cap=log_likelihood_cap, ) From 156fad741e959196fd7ab2fa099b6208cb121208 Mon Sep 17 00:00:00 2001 From: Richard Date: Fri, 2 Dec 2022 11:34:36 +0000 Subject: [PATCH 030/226] skip save summary for null paths --- autofit/non_linear/paths/null.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/autofit/non_linear/paths/null.py b/autofit/non_linear/paths/null.py index 052e96b00..19b970849 100644 --- a/autofit/non_linear/paths/null.py +++ b/autofit/non_linear/paths/null.py @@ -14,6 +14,9 @@ def __init__(self): self.objects = dict() self._samples_path = tempfile.mkdtemp() + def save_summary(self, samples, log_likelihood_function_time): + pass + @property def samples_path(self) -> str: return self._samples_path From d47f1ad07183ee701124a063960fd6af71db89ff Mon Sep 17 00:00:00 2001 From: Richard Date: Fri, 2 Dec 2022 11:35:47 +0000 Subject: [PATCH 031/226] still create paths if path prefix found --- autofit/non_linear/abstract_search.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/autofit/non_linear/abstract_search.py b/autofit/non_linear/abstract_search.py index f1bf081e7..e8b40f2dd 100644 --- a/autofit/non_linear/abstract_search.py +++ b/autofit/non_linear/abstract_search.py @@ -126,7 +126,7 @@ def __init__( save_all_samples=kwargs.get("save_all_samples", False), unique_tag=unique_tag, ) - elif name is not None: + elif name is not None or path_prefix is not None: logger.debug("Session not found. Using directory output.") paths = DirectoryPaths( name=name, path_prefix=path_prefix, unique_tag=unique_tag From ee27de9c004a09d9fbe9f1939fe1a07991386a15 Mon Sep 17 00:00:00 2001 From: Richard Date: Fri, 2 Dec 2022 11:40:56 +0000 Subject: [PATCH 032/226] looser constraint on path prefix --- autofit/non_linear/abstract_search.py | 2 +- test_autofit/non_linear/paths/test_paths.py | 34 ++++++--------------- 2 files changed, 10 insertions(+), 26 deletions(-) diff --git a/autofit/non_linear/abstract_search.py b/autofit/non_linear/abstract_search.py index e8b40f2dd..9f9643693 100644 --- a/autofit/non_linear/abstract_search.py +++ b/autofit/non_linear/abstract_search.py @@ -126,7 +126,7 @@ def __init__( save_all_samples=kwargs.get("save_all_samples", False), unique_tag=unique_tag, ) - elif name is not None or path_prefix is not None: + elif name is not None or path_prefix: logger.debug("Session not found. Using directory output.") paths = DirectoryPaths( name=name, path_prefix=path_prefix, unique_tag=unique_tag diff --git a/test_autofit/non_linear/paths/test_paths.py b/test_autofit/non_linear/paths/test_paths.py index 3293b69ef..5ec115964 100644 --- a/test_autofit/non_linear/paths/test_paths.py +++ b/test_autofit/non_linear/paths/test_paths.py @@ -8,13 +8,11 @@ from autoconf.conf import output_path_for_test from autofit.non_linear.paths.null import NullPaths + def test_null_paths(): search = af.DynestyStatic() - assert isinstance( - search.paths, - NullPaths - ) + assert isinstance(search.paths, NullPaths) class TestPathDecorator: @@ -41,47 +39,33 @@ def test_paths_argument(self): self.assert_paths_as_expected(search.paths) def test_combination_argument(self): - search = af.m.MockSearch("other", ) + search = af.m.MockSearch("other",) search.paths = af.DirectoryPaths(name="name") self.assert_paths_as_expected(search.paths) -output_path = Path( - __file__ -).parent / "path" +output_path = Path(__file__).parent / "path" -@pytest.fixture( - name="model" -) +@pytest.fixture(name="model") def make_model(): - return af.Model( - af.Gaussian - ) + return af.Model(af.Gaussian) -@output_path_for_test( - output_path -) +@output_path_for_test(output_path) def test_identifier_file(model): paths = af.DirectoryPaths() paths.model = model paths.search = af.DynestyStatic() paths.save_all({}, {}, []) - assert os.path.exists( - output_path / paths.identifier / ".identifier" - ) + assert os.path.exists(output_path / paths.identifier / ".identifier") def test_serialize(model): paths = af.DirectoryPaths() paths.model = model - pickled_paths = pickle.loads( - pickle.dumps( - paths - ) - ) + pickled_paths = pickle.loads(pickle.dumps(paths)) assert pickled_paths.model is not None From 21fbd35569c93e87f1a246304ab607ceb4b59dee Mon Sep 17 00:00:00 2001 From: Richard Date: Fri, 2 Dec 2022 12:15:25 +0000 Subject: [PATCH 033/226] width modifier can be set explicitly on a prior --- autofit/mapper/prior/abstract.py | 8 +- autofit/mapper/prior/width_modifier.py | 16 +- autofit/mapper/prior_model/abstract.py | 590 ++++++------------ .../mapper/test_explicit_width_modifier.py | 11 + 4 files changed, 210 insertions(+), 415 deletions(-) create mode 100644 test_autofit/mapper/test_explicit_width_modifier.py diff --git a/autofit/mapper/prior/abstract.py b/autofit/mapper/prior/abstract.py index 3dab77a41..8db7d1f22 100644 --- a/autofit/mapper/prior/abstract.py +++ b/autofit/mapper/prior/abstract.py @@ -39,6 +39,8 @@ class attribute. "The upper limit of a prior must be greater than its lower limit" ) + self.width_modifier = None + @property def lower_unit_limit(self) -> float: """ @@ -91,8 +93,10 @@ def factor(self): return self.message.factor def assert_within_limits(self, value): - if conf.instance["general"]["model"]["ignore_prior_limits"] or \ - os.environ.get("PYAUTOFIT_TEST_MODE") == "1": + if ( + conf.instance["general"]["model"]["ignore_prior_limits"] + or os.environ.get("PYAUTOFIT_TEST_MODE") == "1" + ): return if not (self.lower_limit <= value <= self.upper_limit): raise exc.PriorLimitException( diff --git a/autofit/mapper/prior/width_modifier.py b/autofit/mapper/prior/width_modifier.py index 0e7f5f11d..4d73d9d32 100644 --- a/autofit/mapper/prior/width_modifier.py +++ b/autofit/mapper/prior/width_modifier.py @@ -1,16 +1,15 @@ import inspect import logging import sys +from abc import ABC, abstractmethod from autoconf import conf from autoconf.exc import ConfigException -logger = logging.getLogger( - __name__ -) +logger = logging.getLogger(__name__) -class WidthModifier: +class WidthModifier(ABC): def __init__(self, value): self.value = float(value) @@ -27,15 +26,16 @@ def from_dict(cls, width_modifier_dict): value=width_modifier_dict["value"] ) + @abstractmethod + def __call__(self, mean): + pass + @property def dict(self): return {"type": self.name_of_class(), "value": self.value} @staticmethod - def for_class_and_attribute_name( - cls: type, - attribute_name: str - ) -> "WidthModifier": + def for_class_and_attribute_name(cls: type, attribute_name: str) -> "WidthModifier": """ Search prior configuration for a WidthModifier. diff --git a/autofit/mapper/prior_model/abstract.py b/autofit/mapper/prior_model/abstract.py index acd53b1cd..d992f1de8 100644 --- a/autofit/mapper/prior_model/abstract.py +++ b/autofit/mapper/prior_model/abstract.py @@ -21,7 +21,11 @@ from autofit.mapper.prior.tuple_prior import TuplePrior from autofit.mapper.prior.width_modifier import WidthModifier from autofit.mapper.prior_model.attribute_pair import DeferredNameValue -from autofit.mapper.prior_model.attribute_pair import cast_collection, PriorNameValue, InstanceNameValue +from autofit.mapper.prior_model.attribute_pair import ( + cast_collection, + PriorNameValue, + InstanceNameValue, +) from autofit.mapper.prior_model.recursion import DynamicRecursionCache from autofit.mapper.prior_model.util import PriorModelNameValue from autofit.text import formatter as frm @@ -29,9 +33,7 @@ from autofit.tools.util import split_paths from autofit.tools.util import info_whitespace -logger = logging.getLogger( - __name__ -) +logger = logging.getLogger(__name__) class Limits: @@ -49,18 +51,20 @@ def wrapper(s, arguments): # noinspection PyProtectedMember failed_assertions = [ assertion - for assertion - in s._assertions - if assertion is False or assertion is not True and not assertion.instance_for_arguments(arguments, ) + for assertion in s._assertions + if assertion is False + or assertion is not True + and not assertion.instance_for_arguments(arguments,) ] number_of_failed_assertions = len(failed_assertions) if number_of_failed_assertions > 0: - name_string = "\n".join([ - assertion.name - for assertion - in failed_assertions - if hasattr(assertion, "name") and assertion.name is not None - ]) + name_string = "\n".join( + [ + assertion.name + for assertion in failed_assertions + if hasattr(assertion, "name") and assertion.name is not None + ] + ) if not conf.instance["general"]["test"]["exception_override"]: raise exc.FitException( f"{number_of_failed_assertions} assertions failed!\n{name_string}" @@ -92,13 +96,9 @@ def __init__(self, model_: "AbstractPriorModel"): ---------- model_ """ - tuple_priors = model_.path_instance_tuples_for_class( - TuplePrior - ) + tuple_priors = model_.path_instance_tuples_for_class(TuplePrior) try: - self.tuple_paths, _ = zip( - *tuple_priors - ) + self.tuple_paths, _ = zip(*tuple_priors) except ValueError: self.tuple_paths = None @@ -113,10 +113,7 @@ def __call__(self, path): class MeanField: - def __init__( - self, - prior_model: "AbstractPriorModel" - ): + def __init__(self, prior_model: "AbstractPriorModel"): """ Implements same interface as graphical code @@ -133,15 +130,10 @@ def __getitem__(self, item): for prior in self.prior_model.priors: if prior == item: return prior - raise KeyError( - f"Could not find {item} in model" - ) + raise KeyError(f"Could not find {item} in model") -def paths_to_tree( - paths: List[Tuple[str, ...]], - tree: Optional[dict] = None -) -> dict: +def paths_to_tree(paths: List[Tuple[str, ...]], tree: Optional[dict] = None) -> dict: """ Recursively convert a list of paths to a tree structure where common paths are matched. @@ -183,10 +175,7 @@ def paths_to_tree( if first not in tree: child = dict() tree[first] = child - tree[first] = paths_to_tree( - [rest], - tree=tree[first] - ) + tree[first] = paths_to_tree([rest], tree=tree[first]) return tree @@ -210,16 +199,10 @@ def without_attributes(self) -> "AbstractModel": without_attributes = copy.copy(self) for key in self.__dict__: if not (key.startswith("_") or key in ("cls", "id")): - delattr( - without_attributes, - key - ) + delattr(without_attributes, key) return without_attributes - def _with_paths( - self, - tree: dict - ) -> "AbstractModel": + def _with_paths(self, tree: dict) -> "AbstractModel": """ Recursively generate a copy of this model retaining only objects specified by the tree. @@ -240,32 +223,14 @@ def _with_paths( with_paths = self.without_attributes() for name, subtree in tree.items(): # noinspection PyProtectedMember - new_value = getattr( - self, - name - ) - if isinstance( - new_value, - ( - AbstractPriorModel, - TuplePrior, - ) - ): - new_value = new_value._with_paths( - subtree - ) - setattr( - with_paths, - name, - new_value - ) + new_value = getattr(self, name) + if isinstance(new_value, (AbstractPriorModel, TuplePrior,)): + new_value = new_value._with_paths(subtree) + setattr(with_paths, name, new_value) return with_paths @split_paths - def with_paths( - self, - paths: List[Tuple[str, ...]] - ) -> "AbstractModel": + def with_paths(self, paths: List[Tuple[str, ...]]) -> "AbstractModel": """ Recursively generate a copy of this model retaining only objects specified by the list of paths. @@ -280,14 +245,9 @@ def with_paths( ------- A copy of this model with a subset of attributes """ - return self._with_paths( - paths_to_tree(paths) - ) + return self._with_paths(paths_to_tree(paths)) - def _without_paths( - self, - tree: dict - ) -> "AbstractModel": + def _without_paths(self, tree: dict) -> "AbstractModel": """ Recursively generate a copy of this model removing objects specified by the tree. @@ -306,37 +266,16 @@ def _without_paths( for name, subtree in tree.items(): # noinspection PyProtectedMember if len(subtree) == 0: - delattr( - without_paths, - name - ) + delattr(without_paths, name) else: - new_value = getattr( - without_paths, - name - ) - if isinstance( - new_value, - ( - AbstractPriorModel, - TuplePrior, - ) - ): - new_value = new_value._without_paths( - subtree - ) - setattr( - without_paths, - name, - new_value - ) + new_value = getattr(without_paths, name) + if isinstance(new_value, (AbstractPriorModel, TuplePrior,)): + new_value = new_value._without_paths(subtree) + setattr(without_paths, name, new_value) return without_paths @split_paths - def without_paths( - self, - paths: List[Tuple[str, ...]] - ) -> "AbstractModel": + def without_paths(self, paths: List[Tuple[str, ...]]) -> "AbstractModel": """ Recursively generate a copy of this model retaining only objects not specified by the list of paths. @@ -351,14 +290,9 @@ def without_paths( ------- A copy of this model with a subset of attributes """ - return self._without_paths( - paths_to_tree(paths) - ) + return self._without_paths(paths_to_tree(paths)) - def index( - self, - path: Tuple[str, ...] - ) -> int: + def index(self, path: Tuple[str, ...]) -> int: """ Retrieve the index of a given path in the model """ @@ -369,9 +303,7 @@ def mean_field(self) -> MeanField: """ Implements the same interface as the graphical code """ - return MeanField( - self - ) + return MeanField(self) @classmethod def from_json(cls, file: str): @@ -434,18 +366,14 @@ def from_object(t, *args, **kwargs): obj.__init__(t, **kwargs) elif isinstance(t, list) or isinstance(t, dict): from autofit.mapper.prior_model import collection - obj = object.__new__( - collection.CollectionPriorModel - ) + + obj = object.__new__(collection.CollectionPriorModel) obj.__init__(t) else: obj = t return obj - def take_attributes( - self, - source: object - ): + def take_attributes(self, source: object): """ Take all attributes with a matching path from the source prior model. @@ -470,23 +398,19 @@ def assert_no_assertions(obj): assert_no_assertions(self) - for path, _ in sum(map( - self.path_instance_tuples_for_class, - (Prior, float, TuplePrior)), - [] + for path, _ in sum( + map(self.path_instance_tuples_for_class, (Prior, float, TuplePrior)), [] ): try: item = copy.copy(source) if isinstance(item, dict): - from autofit.mapper.prior_model.collection import CollectionPriorModel + from autofit.mapper.prior_model.collection import ( + CollectionPriorModel, + ) + item = CollectionPriorModel(item) for attribute in path: - item = copy.copy( - getattr( - item, - attribute - ) - ) + item = copy.copy(getattr(item, attribute)) target = self for attribute in path[:-1]: @@ -518,14 +442,10 @@ def instance_from_unit_vector(self, unit_vector, ignore_prior_limits=False): exc.FitException If any assertion attached to this object returns False. """ - exception_tuples = self.attribute_tuples_with_type( - ConfigException - ) + exception_tuples = self.attribute_tuples_with_type(ConfigException) if len(exception_tuples) > 0: for name, exception in exception_tuples: - logger.exception( - f"Could not load {name} because:\n\n{exception}" - ) + logger.exception(f"Could not load {name} because:\n\n{exception}") names = [name for name, _ in exception_tuples] raise ConfigException( f"No configuration was found for some attributes ({', '.join(names)})" @@ -541,8 +461,7 @@ def instance_from_unit_vector(self, unit_vector, ignore_prior_limits=False): lambda prior_tuple, unit: ( prior_tuple.prior, prior_tuple.prior.value_for( - unit, - ignore_prior_limits=ignore_prior_limits, + unit, ignore_prior_limits=ignore_prior_limits, ), ), self.prior_tuples_ordered_by_id, @@ -550,9 +469,7 @@ def instance_from_unit_vector(self, unit_vector, ignore_prior_limits=False): ) ) - return self.instance_for_arguments( - arguments, - ) + return self.instance_for_arguments(arguments,) @property @cast_collection(PriorNameValue) @@ -564,10 +481,12 @@ def unique_prior_tuples(self): prior_tuple_dict: [(Prior, PriorTuple)] The set of all priors associated with this mapper """ - return list({ - prior_tuple[1]: prior_tuple - for prior_tuple in self.attribute_tuples_with_type(Prior) - }.values()) + return list( + { + prior_tuple[1]: prior_tuple + for prior_tuple in self.attribute_tuples_with_type(Prior) + }.values() + ) @property @cast_collection(PriorNameValue) @@ -585,15 +504,9 @@ def prior_tuples_ordered_by_id(self): @property def priors_ordered_by_id(self): - return [ - prior for _, prior in self.prior_tuples_ordered_by_id - ] + return [prior for _, prior in self.prior_tuples_ordered_by_id] - def vector_from_unit_vector( - self, - unit_vector, - ignore_prior_limits=False - ): + def vector_from_unit_vector(self, unit_vector, ignore_prior_limits=False): """ Parameters ---------- @@ -612,15 +525,16 @@ def vector_from_unit_vector( return list( map( lambda prior_tuple, unit: prior_tuple.prior.value_for( - unit, - ignore_prior_limits=ignore_prior_limits + unit, ignore_prior_limits=ignore_prior_limits ), self.prior_tuples_ordered_by_id, unit_vector, ) ) - def random_unit_vector_within_limits(self, lower_limit: float = 0.0, upper_limit: float = 1.0) -> List[float]: + def random_unit_vector_within_limits( + self, lower_limit: float = 0.0, upper_limit: float = 1.0 + ) -> List[float]: """ Generate a random vector of unit values by drawing uniform random values between 0 and 1. @@ -632,14 +546,13 @@ def random_unit_vector_within_limits(self, lower_limit: float = 0.0, upper_limit return [ random.uniform( max(lower_limit, prior.lower_unit_limit), - min(upper_limit, prior.upper_unit_limit)) + min(upper_limit, prior.upper_unit_limit), + ) for prior in self.priors_ordered_by_id ] def random_vector_from_priors_within_limits( - self, - lower_limit: float = 0.0, - upper_limit: float = 1.0 + self, lower_limit: float = 0.0, upper_limit: float = 1.0 ) -> List[float]: """ Returns a random vector of physical values by drawing uniform random values between lower and upper limits @@ -669,17 +582,14 @@ def random_vector_from_priors_within_limits( vector = [] for prior in self.priors_ordered_by_id: - vector.append(prior.random( - lower_limit=lower_limit, - upper_limit=upper_limit, - )) + vector.append( + prior.random(lower_limit=lower_limit, upper_limit=upper_limit,) + ) return vector def random_instance_from_priors_within_limits( - self, - lower_limit: float = 0.0, - upper_limit: float = 1.0 + self, lower_limit: float = 0.0, upper_limit: float = 1.0 ): """ Returns a random instance of physical values by drawing uniform random values between lower and upper limits @@ -705,8 +615,7 @@ def random_instance_from_priors_within_limits( """ vector = self.random_vector_from_priors_within_limits( - lower_limit=lower_limit, - upper_limit=upper_limit + lower_limit=lower_limit, upper_limit=upper_limit ) return self.instance_from_vector(vector=vector) @@ -720,7 +629,9 @@ def random_vector_from_priors(self): physical_values: [float] A list of physical values constructed by taking random values from each prior. """ - return self.random_vector_from_priors_within_limits(lower_limit=0.0, upper_limit=1.0) + return self.random_vector_from_priors_within_limits( + lower_limit=0.0, upper_limit=1.0 + ) @property def physical_values_from_prior_medians(self): @@ -733,11 +644,7 @@ def physical_values_from_prior_medians(self): """ return self.vector_from_unit_vector([0.5] * len(self.unique_prior_tuples)) - def instance_from_vector( - self, - vector, - ignore_prior_limits=False - ): + def instance_from_vector(self, vector, ignore_prior_limits=False): """ Returns a ModelInstance, which has an attribute and class instance corresponding to every `PriorModel` attributed to this instance. @@ -771,27 +678,21 @@ def instance_from_vector( for prior, value in arguments.items(): prior.assert_within_limits(value) - return self.instance_for_arguments( - arguments, - ) + return self.instance_for_arguments(arguments,) def has_instance(self, cls) -> bool: """ True iff this model contains an instance of type cls, recursively. """ - return len( - self.attribute_tuples_with_type(cls) - ) > 0 + return len(self.attribute_tuples_with_type(cls)) > 0 def has_model(self, cls) -> bool: """ True iff this model contains a PriorModel of type cls, recursively. """ - return len( - self.model_tuples_with_type(cls) - ) > 0 + return len(self.model_tuples_with_type(cls)) > 0 def is_only_model(self, cls) -> bool: """ @@ -800,12 +701,11 @@ def is_only_model(self, cls) -> bool: of type cls, recursively. """ from .prior_model import PriorModel + cls_models = self.model_tuples_with_type(cls) other_models = [ - value for _, value - in self.attribute_tuples_with_type( - PriorModel - ) + value + for _, value in self.attribute_tuples_with_type(PriorModel) if value.prior_count > 0 ] return len(cls_models) > 0 and len(cls_models) == len(other_models) @@ -879,13 +779,7 @@ def gaussian_prior_model_for_arguments(self, arguments): raise NotImplementedError() def mapper_from_gaussian_tuples( - self, - tuples, - a=None, - r=None, - use_errors=True, - use_widths=True, - no_limits=False + self, tuples, a=None, r=None, use_errors=True, use_widths=True, no_limits=False ): """ The widths of the new priors are taken from the @@ -933,7 +827,10 @@ def mapper_from_gaussian_tuples( if name.isdigit(): name = self.path_for_prior(prior_tuple.prior)[-2] - width_modifier = WidthModifier.for_class_and_attribute_name(cls, name) + width_modifier = ( + prior.width_modifier + or WidthModifier.for_class_and_attribute_name(cls, name) + ) if a is not None and r is not None: raise exc.PriorException( @@ -950,10 +847,7 @@ def mapper_from_gaussian_tuples( limits = (float("-inf"), float("inf")) else: try: - limits = Limits.for_class_and_attributes_name( - cls, - name - ) + limits = Limits.for_class_and_attributes_name(cls, name) except ConfigException: limits = prior.limits @@ -964,22 +858,18 @@ def mapper_from_gaussian_tuples( elif use_errors and use_widths: sigma = max(tuples[i][1], width) else: - raise exc.PriorException("use_passed_errors and use_widths are both False, meaning there is no " - "way to pass priors to set up the new model's Gaussian Priors.") + raise exc.PriorException( + "use_passed_errors and use_widths are both False, meaning there is no " + "way to pass priors to set up the new model's Gaussian Priors." + ) - new_prior = GaussianPrior( - mean, - sigma, - *limits - ) + new_prior = GaussianPrior(mean, sigma, *limits) new_prior.id = prior.id arguments[prior] = new_prior return self.mapper_from_prior_arguments(arguments) - def with_limits( - self, limits: List[Tuple[float, float]] - ) -> "AbstractPriorModel": + def with_limits(self, limits: List[Tuple[float, float]]) -> "AbstractPriorModel": """ Create a new instance of this model where each prior is updated to lie between new limits. @@ -993,12 +883,12 @@ def with_limits( ------- A new model with updated limits """ - return self.mapper_from_prior_arguments({ - prior: prior.with_limits(*prior_limits) - for prior, prior_limits in zip( - self.priors_ordered_by_id, limits - ) - }) + return self.mapper_from_prior_arguments( + { + prior: prior.with_limits(*prior_limits) + for prior, prior_limits in zip(self.priors_ordered_by_id, limits) + } + ) def instance_from_prior_medians(self, ignore_prior_limits=False): """ @@ -1009,12 +899,12 @@ def instance_from_prior_medians(self, ignore_prior_limits=False): A list of physical values """ return self.instance_from_unit_vector( - unit_vector=[0.5] * self.prior_count, ignore_prior_limits=ignore_prior_limits + unit_vector=[0.5] * self.prior_count, + ignore_prior_limits=ignore_prior_limits, ) def log_prior_list_from_vector( - self, - vector: [float], + self, vector: [float], ): """ Compute the log priors of every parameter in a vector, using the Prior of every parameter. @@ -1030,7 +920,9 @@ def log_prior_list_from_vector( """ return list( map( - lambda prior_tuple, value: prior_tuple.prior.log_prior_from_value(value=value), + lambda prior_tuple, value: prior_tuple.prior.log_prior_from_value( + value=value + ), self.prior_tuples_ordered_by_id, vector, ) @@ -1044,12 +936,11 @@ def random_instance(self, ignore_prior_limits=False): if ignore_prior_limits: return self.instance_from_unit_vector( unit_vector=[random.random() for _ in range(self.prior_count)], - ignore_prior_limits=ignore_prior_limits + ignore_prior_limits=ignore_prior_limits, ) - return self.instance_for_arguments({ - prior: prior.random() - for prior in self.priors - }) + return self.instance_for_arguments( + {prior: prior.random() for prior in self.priors} + ) @staticmethod @DynamicRecursionCache() @@ -1068,6 +959,7 @@ def from_instance(instance, model_classes=tuple()): A concrete child of an abstract prior model """ from autofit.mapper.prior_model import collection + if isinstance(instance, (Prior, AbstractPriorModel)): return instance elif isinstance(instance, list): @@ -1079,6 +971,7 @@ def from_instance(instance, model_classes=tuple()): ) elif isinstance(instance, model.ModelInstance): from autofit.mapper import model_mapper + result = model_mapper.ModelMapper() for key, value in instance.dict.items(): setattr( @@ -1097,10 +990,7 @@ def from_instance(instance, model_classes=tuple()): for key, value in instance.items() } ) - elif isinstance( - instance, - (np.ndarray, types.FunctionType) - ): + elif isinstance(instance, (np.ndarray, types.FunctionType)): return instance else: from .prior_model import PriorModel @@ -1124,10 +1014,10 @@ def from_instance(instance, model_classes=tuple()): def items(self): return ( - self.direct_prior_tuples - + self.direct_instance_tuples - + self.direct_prior_model_tuples - + self.direct_tuple_priors + self.direct_prior_tuples + + self.direct_instance_tuples + + self.direct_prior_model_tuples + + self.direct_tuple_priors ) @property @@ -1189,15 +1079,12 @@ def prior_tuples(self): priors: [(String, Prior))] """ # noinspection PyUnresolvedReferences - return self.attribute_tuples_with_type( - Prior, - ignore_children=True - ) + return self.attribute_tuples_with_type(Prior, ignore_children=True) def __eq__(self, other): return ( - isinstance(other, AbstractPriorModel) - and self.direct_prior_model_tuples == other.direct_prior_model_tuples + isinstance(other, AbstractPriorModel) + and self.direct_prior_model_tuples == other.direct_prior_model_tuples ) @property @@ -1221,14 +1108,12 @@ def prior_class_dict(self): return d def _instance_for_arguments( - self, - arguments: Dict[Prior, float], + self, arguments: Dict[Prior, float], ): raise NotImplementedError() def instance_for_arguments( - self, - arguments, + self, arguments, ): """ Returns an instance of the model for a set of arguments @@ -1243,14 +1128,9 @@ def instance_for_arguments( An instance of the class """ logger.debug(f"Creating an instance for arguments") - return self._instance_for_arguments( - arguments, - ) + return self._instance_for_arguments(arguments,) - def path_for_name( - self, - name: str - ) -> Tuple[str, ...]: + def path_for_name(self, name: str) -> Tuple[str, ...]: """ Find the path to a prior in the model that matches a given name. @@ -1279,14 +1159,7 @@ def path_for_name( """ def _explode_path(path_): - return tuple( - string - for part in path_ - for string - in part.split( - "_" - ) - ) + return tuple(string for part in path_ for string in part.split("_")) exploded = tuple(name.split("_")) for path, _ in self.path_priors_tuples: @@ -1294,23 +1167,16 @@ def _explode_path(path_): if exploded_path == exploded: return path - for path, prior_tuple in self.path_instance_tuples_for_class( - TuplePrior - ): + for path, prior_tuple in self.path_instance_tuples_for_class(TuplePrior): for name, prior in prior_tuple.prior_tuples: total_path = path[:-1] + (name,) - exploded_path = _explode_path( - total_path - ) + exploded_path = _explode_path(total_path) if exploded_path == exploded: return path + (name,) - raise AssertionError( - f"No path was found matching {name}" - ) + raise AssertionError(f"No path was found matching {name}") def instance_from_prior_name_arguments( - self, - prior_name_arguments: Dict[str, float] + self, prior_name_arguments: Dict[str, float] ): """ Instantiate the model from the names of priors and @@ -1327,16 +1193,14 @@ def instance_from_prior_name_arguments( ------- An instance of the model """ - return self.instance_from_path_arguments({ - self.path_for_name(name): value - for name, value - in prior_name_arguments.items() - }) + return self.instance_from_path_arguments( + { + self.path_for_name(name): value + for name, value in prior_name_arguments.items() + } + ) - def instance_from_path_arguments( - self, - path_arguments: Dict[Tuple[str], float] - ): + def instance_from_path_arguments(self, path_arguments: Dict[Tuple[str], float]): """ Create an instance from a dictionary mapping paths to tuples to corresponding values. @@ -1354,15 +1218,9 @@ def instance_from_path_arguments( An instance of the model """ arguments = { - self.object_for_path( - path - ): value - for path, value - in path_arguments.items() + self.object_for_path(path): value for path, value in path_arguments.items() } - return self._instance_for_arguments( - arguments - ) + return self._instance_for_arguments(arguments) @property def prior_count(self) -> int: @@ -1377,16 +1235,10 @@ def priors(self): @property def _prior_id_map(self): - return { - prior.id: prior - for prior - in self.priors - } + return {prior.id: prior for prior in self.priors} def prior_with_id(self, prior_id): - return self._prior_id_map[ - prior_id - ] + return self._prior_id_map[prior_id] def name_for_prior(self, prior): for prior_model_name, prior_model in self.direct_prior_model_tuples: @@ -1446,16 +1298,9 @@ def paths(self) -> List[Path]: A list of paths to all the priors in the model, ordered by their ids """ - return [ - path - for path, _ - in self.path_priors_tuples - ] + return [path for path, _ in self.path_priors_tuples] - def sort_priors_alphabetically( - self, - priors: Iterable[Prior] - ) -> List[Prior]: + def sort_priors_alphabetically(self, priors: Iterable[Prior]) -> List[Prior]: """ Sort priors by their paths according to this model. @@ -1468,10 +1313,7 @@ def sort_priors_alphabetically( ------- Those priors sorted alphabetically by path. """ - return sorted( - priors, - key=lambda prior: self.path_for_prior(prior) - ) + return sorted(priors, key=lambda prior: self.path_for_prior(prior)) def path_for_prior(self, prior: Prior) -> Optional[Path]: """ @@ -1511,18 +1353,11 @@ def path_float_tuples(self): @property def unique_prior_paths(self): - return [ - item[0] for item in - self.unique_path_prior_tuples - ] + return [item[0] for item in self.unique_path_prior_tuples] @property def unique_path_prior_tuples(self): - unique = { - item[1]: item - for item - in self.path_priors_tuples - }.values() + unique = {item[1]: item for item in self.path_priors_tuples}.values() return sorted(unique, key=lambda item: item[1].id) @property @@ -1543,7 +1378,8 @@ def prior_prior_model_dict(self): def log_prior_list_from(self, parameter_lists: List[List]) -> List: return [ - sum(self.log_prior_list_from_vector(vector=vector)) for vector in parameter_lists + sum(self.log_prior_list_from_vector(vector=vector)) + for vector in parameter_lists ] @property @@ -1554,23 +1390,20 @@ def info(self) -> str: This information is extracted from each priors *model_info* property. """ - formatter = TextFormatter( - line_length=info_whitespace() - ) + formatter = TextFormatter(line_length=info_whitespace()) for t in self.path_instance_tuples_for_class( - ( - Prior, float, tuple - ), - ignore_children=True + (Prior, float, tuple), ignore_children=True ): formatter.add(*t) - return '\n\n'.join([ - f"Total Free Parameters = {self.prior_count}", - f"{self.parameterization}", - formatter.text - ]) + return "\n\n".join( + [ + f"Total Free Parameters = {self.prior_count}", + f"{self.parameterization}", + formatter.text, + ] + ) @property def order_no(self) -> str: @@ -1586,10 +1419,7 @@ def order_no(self) -> str: values = [ str(float(value)) for _, value in sorted( - self.path_instance_tuples_for_class(( - Prior, float - )), - key=lambda t: t[0] + self.path_instance_tuples_for_class((Prior, float)), key=lambda t: t[0] ) ] return ":".join(values) @@ -1602,23 +1432,14 @@ def parameterization(self) -> str: """ from .prior_model import PriorModel - formatter = TextFormatter( - line_length=info_whitespace() - ) + formatter = TextFormatter(line_length=info_whitespace()) for t in self.path_instance_tuples_for_class( - ( - Prior, - float, - tuple, - ), - ignore_children=True + (Prior, float, tuple,), ignore_children=True ): for i in range(len(t[0])): path = t[0][:i] - obj = self.object_for_path( - path - ) + obj = self.object_for_path(path) if isinstance(obj, TuplePrior): continue if isinstance(obj, AbstractPriorModel): @@ -1630,9 +1451,7 @@ def parameterization(self) -> str: else: name = type(obj).__name__ - formatter.add( - ("model",) + path, f"{name} (N={n})" - ) + formatter.add(("model",) + path, f"{name} (N={n})") return formatter.text @@ -1656,12 +1475,8 @@ def all_paths_prior_tuples(self) -> List[Tuple[Tuple[Path], Prior]]: for path, prior in self.path_priors_tuples: prior_paths_dict[prior] += (path,) return sorted( - [ - (paths, prior) - for prior, paths - in prior_paths_dict.items() - ], - key=lambda item: item[1].id + [(paths, prior) for prior, paths in prior_paths_dict.items()], + key=lambda item: item[1].id, ) @property @@ -1682,15 +1497,8 @@ def all_name_prior_tuples(self) -> List[Tuple[Tuple[str], Prior]]: """ path_modifier = TuplePathModifier(self) return [ - ( - tuple( - "_".join(path_modifier(path)) - for path in paths - ), - prior - ) - for paths, prior - in self.all_paths_prior_tuples + (tuple("_".join(path_modifier(path)) for path in paths), prior) + for paths, prior in self.all_paths_prior_tuples ] @property @@ -1701,20 +1509,11 @@ def model_component_and_parameter_names(self) -> List[str]: model_mapper. Latex tags are properties of each model class.""" prior_paths = self.unique_prior_paths - tuple_filter = TuplePathModifier( - self - ) + tuple_filter = TuplePathModifier(self) - prior_paths = list(map( - tuple_filter, - prior_paths - )) + prior_paths = list(map(tuple_filter, prior_paths)) - return [ - "_".join(path) - for path - in prior_paths - ] + return ["_".join(path) for path in prior_paths] @property def parameter_names(self) -> List[str]: @@ -1741,7 +1540,9 @@ def parameter_labels(self) -> List[str]: parameter_labels = [] for parameter_name in self.parameter_names: - parameter_label = frm.convert_name_to_label(parameter_name=parameter_name, name_to_label=True) + parameter_label = frm.convert_name_to_label( + parameter_name=parameter_name, name_to_label=True + ) parameter_labels.append(parameter_label) return parameter_labels @@ -1768,27 +1569,17 @@ def superscripts(self) -> List[str]: prior_paths = self.unique_prior_paths - tuple_filter = TuplePathModifier( - self - ) + tuple_filter = TuplePathModifier(self) - prior_paths = map( - tuple_filter, - prior_paths - ) + prior_paths = map(tuple_filter, prior_paths) - superscripts = [ - path[-2] if len(path) > 1 else path[0] - for path - in prior_paths - ] + superscripts = [path[-2] if len(path) > 1 else path[0] for path in prior_paths] return [ - superscript - if not superscript_overwrite - else superscript_overwrite - for superscript, superscript_overwrite - in zip(superscripts, self.superscripts_overwrite_via_config) + superscript if not superscript_overwrite else superscript_overwrite + for superscript, superscript_overwrite in zip( + superscripts, self.superscripts_overwrite_via_config + ) ] @property @@ -1815,13 +1606,7 @@ def superscripts_overwrite_via_config(self) -> List[str]: for prior_name, prior in self.prior_tuples_ordered_by_id: cls = self.prior_class_dict[prior] try: - superscript = conf.instance[ - "notation" - ][ - "label" - ][ - "superscript" - ][ + superscript = conf.instance["notation"]["label"]["superscript"][ cls.__name__ ] @@ -1846,8 +1631,7 @@ def parameter_labels_with_superscripts(self) -> List[str]: return [ f"{label}^{{\\rm {superscript}}}" - for label, superscript in - zip(self.parameter_labels, self.superscripts) + for label, superscript in zip(self.parameter_labels, self.superscripts) ] @property @@ -1862,11 +1646,7 @@ def parameter_labels_with_superscripts_latex(self) -> List[str]: cornerplot. """ - return [ - f"${label}$" - for label in - self.parameter_labels_with_superscripts - ] + return [f"${label}$" for label in self.parameter_labels_with_superscripts] def transfer_classes(instance, mapper, model_classes=None): @@ -1889,7 +1669,7 @@ def transfer_classes(instance, mapper, model_classes=None): try: mapper_value = getattr(mapper, key) if isinstance(mapper_value, Prior) or isinstance( - mapper_value, AnnotationPriorModel + mapper_value, AnnotationPriorModel ): setattr(mapper, key, instance_value) continue diff --git a/test_autofit/mapper/test_explicit_width_modifier.py b/test_autofit/mapper/test_explicit_width_modifier.py new file mode 100644 index 000000000..a3b0ca818 --- /dev/null +++ b/test_autofit/mapper/test_explicit_width_modifier.py @@ -0,0 +1,11 @@ +import autofit as af + + +def test_explicit_width_modifier(): + model = af.Model(af.Gaussian) + model.centre.width_modifier = af.RelativeWidthModifier(2.0) + + updated = model.mapper_from_gaussian_tuples([(1.0, 1.0), (1.0, 1.0), (1.0, 1.0),]) + + assert updated.centre.sigma == 2.0 + assert updated.normalization.sigma == 1.0 From c0f2e79cc1392320beac048318d5348feab85621 Mon Sep 17 00:00:00 2001 From: Richard Date: Fri, 2 Dec 2022 12:17:57 +0000 Subject: [PATCH 034/226] propagate width modifier --- autofit/mapper/prior_model/abstract.py | 1 + .../mapper/test_explicit_width_modifier.py | 24 +++++++++++++++---- 2 files changed, 20 insertions(+), 5 deletions(-) diff --git a/autofit/mapper/prior_model/abstract.py b/autofit/mapper/prior_model/abstract.py index d992f1de8..930824360 100644 --- a/autofit/mapper/prior_model/abstract.py +++ b/autofit/mapper/prior_model/abstract.py @@ -865,6 +865,7 @@ def mapper_from_gaussian_tuples( new_prior = GaussianPrior(mean, sigma, *limits) new_prior.id = prior.id + new_prior.width_modifier = prior.width_modifier arguments[prior] = new_prior return self.mapper_from_prior_arguments(arguments) diff --git a/test_autofit/mapper/test_explicit_width_modifier.py b/test_autofit/mapper/test_explicit_width_modifier.py index a3b0ca818..843e4ccdf 100644 --- a/test_autofit/mapper/test_explicit_width_modifier.py +++ b/test_autofit/mapper/test_explicit_width_modifier.py @@ -1,11 +1,25 @@ +import pytest + import autofit as af -def test_explicit_width_modifier(): +@pytest.fixture(name="width_modifier") +def make_width_modifier(): + return af.RelativeWidthModifier(2.0) + + +@pytest.fixture(name="updated_model") +def make_updated_model(width_modifier): model = af.Model(af.Gaussian) - model.centre.width_modifier = af.RelativeWidthModifier(2.0) + model.centre.width_modifier = width_modifier + + return model.mapper_from_gaussian_tuples([(1.0, 1.0), (1.0, 1.0), (1.0, 1.0),]) + + +def test_explicit_width_modifier(updated_model): + assert updated_model.centre.sigma == 2.0 + assert updated_model.normalization.sigma == 1.0 - updated = model.mapper_from_gaussian_tuples([(1.0, 1.0), (1.0, 1.0), (1.0, 1.0),]) - assert updated.centre.sigma == 2.0 - assert updated.normalization.sigma == 1.0 +def test_propagation(updated_model, width_modifier): + assert updated_model.centre.width_modifier is width_modifier From adb0d14d7fb8066e3a11ce265e09880828c8a51f Mon Sep 17 00:00:00 2001 From: Jammy2211 Date: Fri, 2 Dec 2022 12:29:40 +0000 Subject: [PATCH 035/226] readme --- autofit/config/priors/README.rst | 37 ++++++++++++++++++++++++++++++++ 1 file changed, 37 insertions(+) create mode 100644 autofit/config/priors/README.rst diff --git a/autofit/config/priors/README.rst b/autofit/config/priors/README.rst new file mode 100644 index 000000000..9f1fa6f57 --- /dev/null +++ b/autofit/config/priors/README.rst @@ -0,0 +1,37 @@ +The prior config files contain the default priors and related variables for every light profile and mass profile +when it is used as a model. + +They appear as follows: + +.. code-block:: bash + + Gaussian: + centre: + type: Uniform + lower_limit: 0.0 + upper_limit: 100.0 + width_modifier: + type: Absolute + value: 20.0 + gaussian_limits: + lower: -inf + upper: inf + +The sections of this example config set the following: + + type {Uniform, Gaussian, LogUniform} + The default prior given to this parameter when used by the non-linear search. In the example above, a + UniformPrior is used with lower_limit of 0.0 and upper_limit of 4.0. A GaussianPrior could be used by + putting "Gaussian" in the "type" box, with "mean" and "sigma" used to set the default values. Any prior can be + set in an analogous fashion (see the example configs). + width_modifier + When the results of a search are passed to a subsequent search to set up the priors of its non-linear search, + this entry describes how the Prior is passed. For a full description of prior passing, checkout the examples + in 'autolens_workspace/examples/complex/linking'. + gaussian_limits + When the results of a search are passed to a subsequent search, they are passed using a GaussianPrior. The + gaussian_limits set the physical lower and upper limits of this GaussianPrior, such that parameter samples + can not go beyond these limits. + +The files ``template_module.yaml`` and ``TemplateObject.yaml`` give templates one can use to set up prior default +configs for your own model components. \ No newline at end of file From 044399ffb940c3db8d540ffd3477bd80951503b2 Mon Sep 17 00:00:00 2001 From: Richard Date: Fri, 2 Dec 2022 15:27:28 +0000 Subject: [PATCH 036/226] fic --- autofit/non_linear/paths/abstract.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/autofit/non_linear/paths/abstract.py b/autofit/non_linear/paths/abstract.py index 2dd0d6749..684e4e6d4 100644 --- a/autofit/non_linear/paths/abstract.py +++ b/autofit/non_linear/paths/abstract.py @@ -328,7 +328,7 @@ def load_samples_info(self): def _save_search(self, config_dict): with open_(path.join(self.output_path, "search.json"), "w+") as f: - json.dump(config_dict, f, indent=4) + json.dump(dict(config_dict), f, indent=4) def save_summary(self, samples, log_likelihood_function_time): From 2f93ed1be209f04dab50c8ccabb14050e1c52c7f Mon Sep 17 00:00:00 2001 From: Richard Date: Fri, 2 Dec 2022 15:46:38 +0000 Subject: [PATCH 037/226] make maxcall optional --- autofit/non_linear/nest/dynesty/abstract.py | 119 +++++++++++--------- 1 file changed, 68 insertions(+), 51 deletions(-) diff --git a/autofit/non_linear/nest/dynesty/abstract.py b/autofit/non_linear/nest/dynesty/abstract.py index 90e65f25c..f43b8d6e9 100644 --- a/autofit/non_linear/nest/dynesty/abstract.py +++ b/autofit/non_linear/nest/dynesty/abstract.py @@ -19,8 +19,7 @@ def prior_transform(cube, model): phys_cube = model.vector_from_unit_vector( - unit_vector=cube, - ignore_prior_limits=True + unit_vector=cube, ignore_prior_limits=True ) for i in range(len(phys_cube)): @@ -30,17 +29,16 @@ def prior_transform(cube, model): class AbstractDynesty(AbstractNest, ABC): - def __init__( - self, - name: str = "", - path_prefix: str = "", - unique_tag: Optional[str] = None, - prior_passer: PriorPasser = None, - iterations_per_update: int = None, - number_of_cores: int = None, - session: Optional[sa.orm.Session] = None, - **kwargs + self, + name: str = "", + path_prefix: str = "", + unique_tag: Optional[str] = None, + prior_passer: PriorPasser = None, + iterations_per_update: int = None, + number_of_cores: int = None, + session: Optional[sa.orm.Session] = None, + **kwargs, ): """ A Dynesty non-linear search. @@ -85,7 +83,7 @@ def __init__( iterations_per_update=iterations_per_update, number_of_cores=number_of_cores, session=session, - **kwargs + **kwargs, ) self.logger.debug(f"Creating {self.__class__.__name__} Search") @@ -105,10 +103,10 @@ def history_save(self): pass def _fit( - self, - model: AbstractPriorModel, - analysis, - log_likelihood_cap: Optional[float] = None + self, + model: AbstractPriorModel, + analysis, + log_likelihood_cap: Optional[float] = None, ): """ Fit a model using Dynesty and the Analysis class which contains the data and returns the log likelihood from @@ -140,15 +138,17 @@ def _fit( """ fitness_function = self.fitness_function_from_model_and_analysis( - model=model, - analysis=analysis, - log_likelihood_cap=log_likelihood_cap, + model=model, analysis=analysis, log_likelihood_cap=log_likelihood_cap, ) if os.path.exists(self.checkpoint_file): - self.logger.info("Existing Dynesty samples found, resuming non-linear search.") + self.logger.info( + "Existing Dynesty samples found, resuming non-linear search." + ) else: - self.logger.info("No Dynesty samples found, beginning new non-linear search. ") + self.logger.info( + "No Dynesty samples found, beginning new non-linear search. " + ) finished = False @@ -158,15 +158,17 @@ def _fit( try: - if conf.instance["non_linear"]["nest"][self.__class__.__name__]["parallel"]["force_x1_cpu"]: + if conf.instance["non_linear"]["nest"][self.__class__.__name__][ + "parallel" + ]["force_x1_cpu"]: raise RuntimeError with Pool( - njobs=self.number_of_cores, - loglike=fitness_function, - prior_transform=prior_transform, - logl_args=(model, fitness_function), - ptform_args=(model,) + njobs=self.number_of_cores, + loglike=fitness_function, + prior_transform=prior_transform, + logl_args=(model, fitness_function), + ptform_args=(model,), ) as pool: sampler = self.sampler_from( @@ -198,14 +200,16 @@ def _fit( fitness_function=fitness_function, checkpoint_exists=checkpoint_exists, pool=None, - queue_size=None + queue_size=None, ) finished = self.run_sampler(sampler=sampler) self.perform_update(model=model, analysis=analysis, during_analysis=True) - def iterations_from(self, sampler: Union[NestedSampler, DynamicNestedSampler]) -> Tuple[int, int]: + def iterations_from( + self, sampler: Union[NestedSampler, DynamicNestedSampler] + ) -> Tuple[int, int]: """ Returns the next number of iterations that a dynesty call will use and the total number of iterations that have been performed so far. @@ -229,7 +233,7 @@ def iterations_from(self, sampler: Union[NestedSampler, DynamicNestedSampler]) - except AttributeError: total_iterations = 0 - if self.config_dict_run["maxcall"] is not None: + if self.config_dict_run.get("maxcall") is not None: iterations = self.config_dict_run["maxcall"] - total_iterations return iterations, total_iterations @@ -265,14 +269,17 @@ def run_sampler(self, sampler: Union[NestedSampler, DynamicNestedSampler]): maxcall=iterations, print_progress=not self.silence, checkpoint_file=self.checkpoint_file, - **config_dict_run + **config_dict_run, ) iterations_after_run = np.sum(sampler.results.ncall) - return total_iterations == iterations_after_run or total_iterations == self.config_dict_run["maxcall"] + return ( + total_iterations == iterations_after_run + or total_iterations == self.config_dict_run["maxcall"] + ) - def write_uses_pool(self, uses_pool : bool) -> str: + def write_uses_pool(self, uses_pool: bool) -> str: """ If a Dynesty fit does not use a parallel pool, and is then resumed using one, this causes significant slow down. @@ -310,11 +317,7 @@ def config_dict_with_test_mode_settings_from(self, config_dict): "maxcall": 1, } - def live_points_init_from( - self, - model, - fitness_function - ): + def live_points_init_from(self, model, fitness_function): """ By default, dynesty live points are generated via the sampler's in-built initialization. @@ -332,14 +335,22 @@ def live_points_init_from( """ if os.environ.get("PYAUTOFIT_TEST_MODE") == "1": - unit_parameters, parameters, log_likelihood_list = self.initializer.samples_from_model( + ( + unit_parameters, + parameters, + log_likelihood_list, + ) = self.initializer.samples_from_model( total_points=self.total_live_points, model=model, fitness_function=fitness_function, ) - init_unit_parameters = np.zeros(shape=(self.total_live_points, model.prior_count)) - init_parameters = np.zeros(shape=(self.total_live_points, model.prior_count)) + init_unit_parameters = np.zeros( + shape=(self.total_live_points, model.prior_count) + ) + init_parameters = np.zeros( + shape=(self.total_live_points, model.prior_count) + ) init_log_likelihood_list = np.zeros(shape=(self.total_live_points)) for i in range(len(parameters)): @@ -347,7 +358,11 @@ def live_points_init_from( init_parameters[i, :] = np.asarray(parameters[i]) init_log_likelihood_list[i] = np.asarray(log_likelihood_list[i]) - live_points = [init_unit_parameters, init_parameters, init_log_likelihood_list] + live_points = [ + init_unit_parameters, + init_parameters, + init_log_likelihood_list, + ] blobs = np.asarray(self.total_live_points * [False]) @@ -356,16 +371,16 @@ def live_points_init_from( return live_points def sampler_from( - self, - model: AbstractPriorModel, - fitness_function, - checkpoint_exists : bool, - pool: Optional["Pool"], - queue_size: Optional[int] + self, + model: AbstractPriorModel, + fitness_function, + checkpoint_exists: bool, + pool: Optional["Pool"], + queue_size: Optional[int], ): raise NotImplementedError() - def check_pool(self, uses_pool : bool, pool: Pool): + def check_pool(self, uses_pool: bool, pool: Pool): if (uses_pool and pool is None) or (not uses_pool and pool is not None): raise exc.SearchException( @@ -412,7 +427,9 @@ def should_plot(name): plotter = DynestyPlotter( samples=samples, - output=Output(path=path.join(self.paths.image_path, "search"), format="png") + output=Output( + path=path.join(self.paths.image_path, "search"), format="png" + ), ) if should_plot("cornerplot"): From 0b87123bfebf3d87de89df85c48c042ca8304a50 Mon Sep 17 00:00:00 2001 From: Richard Date: Fri, 2 Dec 2022 15:48:12 +0000 Subject: [PATCH 038/226] make maxcall optional --- autofit/non_linear/nest/dynesty/abstract.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/autofit/non_linear/nest/dynesty/abstract.py b/autofit/non_linear/nest/dynesty/abstract.py index f43b8d6e9..c82c24713 100644 --- a/autofit/non_linear/nest/dynesty/abstract.py +++ b/autofit/non_linear/nest/dynesty/abstract.py @@ -276,7 +276,7 @@ def run_sampler(self, sampler: Union[NestedSampler, DynamicNestedSampler]): return ( total_iterations == iterations_after_run - or total_iterations == self.config_dict_run["maxcall"] + or total_iterations == self.config_dict_run.get("maxcall") ) def write_uses_pool(self, uses_pool: bool) -> str: From 8d959845f10946c16da43305aa62ba75beafc6b6 Mon Sep 17 00:00:00 2001 From: Richard Date: Fri, 2 Dec 2022 15:52:17 +0000 Subject: [PATCH 039/226] try-except --- autofit/non_linear/nest/dynesty/abstract.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/autofit/non_linear/nest/dynesty/abstract.py b/autofit/non_linear/nest/dynesty/abstract.py index c82c24713..d282ede7d 100644 --- a/autofit/non_linear/nest/dynesty/abstract.py +++ b/autofit/non_linear/nest/dynesty/abstract.py @@ -260,7 +260,10 @@ def run_sampler(self, sampler: Union[NestedSampler, DynamicNestedSampler]): """ config_dict_run = self.config_dict_run - config_dict_run.pop("maxcall") + try: + config_dict_run.pop("maxcall") + except KeyError: + pass iterations, total_iterations = self.iterations_from(sampler=sampler) From 988d9dc5fcc487bd47cdae02f9b51b793055b50c Mon Sep 17 00:00:00 2001 From: Jammy2211 Date: Fri, 2 Dec 2022 17:03:00 +0000 Subject: [PATCH 040/226] error message --- autofit/config/priors/README.rst | 4 +- autofit/non_linear/abstract_search.py | 2 +- test_autofit/config/non_linear.yaml | 327 ------------------ .../non_linear/optimize/test_pyswarms.py | 209 ++++++----- 4 files changed, 107 insertions(+), 435 deletions(-) delete mode 100644 test_autofit/config/non_linear.yaml diff --git a/autofit/config/priors/README.rst b/autofit/config/priors/README.rst index 9f1fa6f57..fab4dcd04 100644 --- a/autofit/config/priors/README.rst +++ b/autofit/config/priors/README.rst @@ -1,5 +1,5 @@ -The prior config files contain the default priors and related variables for every light profile and mass profile -when it is used as a model. +The prior config files contain the default priors and related variables for every model component when it is used as a +model. They appear as follows: diff --git a/autofit/non_linear/abstract_search.py b/autofit/non_linear/abstract_search.py index 28510ce7c..9afc679c8 100644 --- a/autofit/non_linear/abstract_search.py +++ b/autofit/non_linear/abstract_search.py @@ -228,7 +228,7 @@ def __init__( If you "know what you are doing" and do not want these environment variables to be set to one, you can disable this warning by changing the following entry in the config files: - `config -> general.ini -> [parallel] -> warn_environment_variable=False` + `config -> general.yaml -> parallel: -> warn_environment_variable=False` """ ) ) diff --git a/test_autofit/config/non_linear.yaml b/test_autofit/config/non_linear.yaml deleted file mode 100644 index ef8bb77c0..000000000 --- a/test_autofit/config/non_linear.yaml +++ /dev/null @@ -1,327 +0,0 @@ -mock: - MockOptimizer: - initialize: - method: prior - printing: - silence: false - prior_passer: - sigma: 3.0 - use_errors: true - use_widths: true - updates: - iterations_per_update: 2500 - remove_state_files_at_end: true - MockSearch: - initialize: - method: prior - printing: - silence: false - prior_passer: - sigma: 3.0 - use_errors: true - use_widths: true - search: {} - updates: - iterations_per_update: 2500 - remove_state_files_at_end: true -mcmc: - Emcee: - auto_correlations: - change_threshold: 0.01 - check_for_convergence: true - check_size: 100 - required_length: 50 - initialize: - ball_lower_limit: 0.49 - ball_upper_limit: 0.51 - method: prior - parallel: - number_of_cores: 1 - printing: - silence: false - prior_passer: - sigma: 3.0 - use_errors: true - use_widths: true - run: - nsteps: 2000 - search: - nwalkers: 50 - updates: - iterations_per_update: 2500 - remove_state_files_at_end: true - Zeus: - auto_correlations: - change_threshold: 0.01 - check_for_convergence: true - check_size: 100 - required_length: 50 - initialize: - ball_lower_limit: 0.49 - ball_upper_limit: 0.51 - method: prior - parallel: - number_of_cores: 1 - printing: - silence: false - prior_passer: - sigma: 3.0 - use_errors: true - use_widths: true - run: - check_walkers: true - light_mode: false - maxiter: 10000 - maxsteps: 10000 - mu: 1.0 - nsteps: 2000 - patience: 5 - shuffle_ensemble: true - tolerance: 0.05 - tune: true - vectorize: false - search: - nwalkers: 50 - updates: - iterations_per_update: 500 - log_every_update: 1 - model_results_every_update: 1 - remove_state_files_at_end: true - visualize_every_update: 1 -nest: - DynestyDynamic: - initialize: - method: prior - parallel: - number_of_cores: 4 - printing: - silence: false - prior_passer: - sigma: 3.0 - use_errors: true - use_widths: true - run: - dlogz_init: 0.01 - logl_max_init: .inf - maxcall: null - maxcall_init: null - maxiter: null - maxiter_init: null - n_effective_init: .inf - nlive_init: 5 - search: - bootstrap: 1 - bound: balls - enlarge: 2 - facc: 0.6 - fmove: 0.8 - logl_max: .inf - max_move: 101 - sample: rwalk - slices: 6 - update_interval: 2.0 - walks: 26 - updates: - iterations_per_update: 501 - log_every_update: 1 - model_results_every_update: 1 - remove_state_files_at_end: true - visualize_every_update: 1 - DynestyStatic: - initialize: - method: prior - parallel: - force_x1_cpu: false - number_of_cores: 1 - printing: - silence: false - prior_passer: - sigma: 3.0 - use_errors: true - use_widths: true - run: - dlogz: null - logl_max: .inf - maxcall: null - maxiter: null - n_effective: 0 - search: - bootstrap: null - bound: multi - enlarge: null - facc: 0.5 - first_update: null - fmove: 0.9 - max_move: 100 - nlive: 150 - sample: auto - slices: 5 - update_interval: null - walks: 5 - updates: - iterations_per_update: 500 - log_every_update: 1 - model_results_every_update: 1 - remove_state_files_at_end: true - visualize_every_update: 1 - MultiNest: - printing: - silence: false - prior_passer: - sigma: 3.0 - use_errors: true - use_widths: true - search: - const_efficiency_mode: false - context: 0 - evidence_tolerance: 0.5 - importance_nested_sampling: true - init_mpi: false - log_zero: -1.0e+100 - max_iter: 0 - max_modes: 100 - mode_tolerance: -1.0e+89 - multimodal: true - n_iter_before_update: 100 - n_live_points: 50 - null_log_evidence: -1.0e+90 - resume: true - sampling_efficiency: 0.6 - seed: -1.0 - verbose: false - write_output: true - updates: - iterations_per_update: 2500 - log_every_update: 1 - model_results_every_update: 1 - remove_state_files_at_end: true - should_update_sym: 250 - visualize_every_update: 1 -optimize: - DownhillSimplex: - initialize: - method: prior - printing: - silence: false - prior_passer: - sigma: 3.0 - use_errors: true - use_widths: true - search: - disp: 1 - ftol: 0.0001 - full_output: 0 - maxfun: null - maxiter: null - retall: 0 - xtol: 0.0001 - updates: - iterations_per_update: 11 - log_every_update: 1 - model_results_every_update: 1 - remove_state_files_at_end: true - visualize_every_update: 1 - Drawer: - initialize: - ball_lower_limit: 0.49 - ball_upper_limit: 0.51 - method: prior - printing: - silence: false - prior_passer: - sigma: 3.0 - use_errors: true - use_widths: true - search: - total_draws: 10 - updates: - iterations_per_update: 500 - log_every_update: 1 - model_results_every_update: 1 - remove_state_files_at_end: true - visualize_every_update: 1 - LBFGS: - initialize: - ball_lower_limit: 0.49 - ball_upper_limit: 0.51 - method: prior - options: - disp: false - eps: 1.0e-08 - ftol: 2.220446049250313e-09 - gtol: 1.0e-05 - iprint: -1.0 - maxcor: 10 - maxfun: 15000 - maxiter: 15000 - maxls: 20 - printing: - silence: false - prior_passer: - sigma: 3.0 - use_errors: true - use_widths: true - search: - tol: null - updates: - iterations_per_update: 11 - log_every_update: 1 - model_results_every_update: 1 - remove_state_files_at_end: true - visualize_every_update: 1 - PySwarmsGlobal: - initialize: - ball_lower_limit: 0.49 - ball_upper_limit: 0.51 - method: prior - parallel: - number_of_cores: 1 - printing: - silence: false - prior_passer: - sigma: 3.0 - use_errors: true - use_widths: true - run: - iters: 2000 - search: - cognitive: 0.1 - ftol: -.inf - inertia: 0.3 - n_particles: 50 - social: 0.2 - updates: - iterations_per_update: 11 - log_every_update: 1 - model_results_every_update: 1 - remove_state_files_at_end: true - visualize_every_update: 1 - PySwarmsLocal: - initialize: - ball_lower_limit: 0.49 - ball_upper_limit: 0.51 - method: prior - parallel: - number_of_cores: 1 - printing: - silence: false - prior_passer: - sigma: 3.0 - use_errors: true - use_widths: true - run: - iters: 2000 - search: - cognitive: 0.1 - ftol: -.inf - inertia: 0.3 - minkowski_p_norm: 2 - n_particles: 50 - number_of_k_neighbors: 3 - social: 0.2 - updates: - iterations_per_update: 11 - log_every_update: 1 - model_results_every_update: 1 - remove_state_files_at_end: true - visualize_every_update: 1 diff --git a/test_autofit/non_linear/optimize/test_pyswarms.py b/test_autofit/non_linear/optimize/test_pyswarms.py index 4787688fb..b8d79cd42 100644 --- a/test_autofit/non_linear/optimize/test_pyswarms.py +++ b/test_autofit/non_linear/optimize/test_pyswarms.py @@ -7,108 +7,107 @@ pytestmark = pytest.mark.filterwarnings("ignore::FutureWarning") -class TestPySwarmsGlobalConfig: - def test__loads_from_config_file_correct(self): - pso = af.PySwarmsGlobal( - prior_passer=af.PriorPasser(sigma=2.0, use_errors=False, use_widths=False), - n_particles=51, - iters=2001, - cognitive=0.4, - social=0.5, - inertia=0.6, - initializer=af.InitializerBall(lower_limit=0.2, upper_limit=0.8), - iterations_per_update=10, - number_of_cores=2, - ) - - assert pso.prior_passer.sigma == 2.0 - assert pso.prior_passer.use_errors is False - assert pso.prior_passer.use_widths is False - assert pso.config_dict_search["n_particles"] == 51 - assert pso.config_dict_search["cognitive"] == 0.4 - assert pso.config_dict_run["iters"] == 2001 - assert isinstance(pso.initializer, af.InitializerBall) - assert pso.initializer.lower_limit == 0.2 - assert pso.initializer.upper_limit == 0.8 - assert pso.iterations_per_update == 10 - assert pso.number_of_cores == 2 - - pso = af.PySwarmsGlobal() - - assert pso.prior_passer.sigma == 3.0 - assert pso.prior_passer.use_errors is True - assert pso.prior_passer.use_widths is True - assert pso.config_dict_search["n_particles"] == 50 - assert pso.config_dict_search["cognitive"] == 0.1 - assert pso.config_dict_run["iters"] == 2000 - assert isinstance(pso.initializer, af.InitializerPrior) - assert pso.iterations_per_update == 11 - assert pso.number_of_cores == 1 - - pso = af.PySwarmsLocal( - prior_passer=af.PriorPasser(sigma=2.0, use_errors=False, use_widths=False), - n_particles=51, - iters=2001, - cognitive=0.4, - social=0.5, - inertia=0.6, - number_of_k_neighbors=4, - minkowski_p_norm=1, - initializer=af.InitializerBall(lower_limit=0.2, upper_limit=0.8), - iterations_per_update=10, - number_of_cores=2, - ) - - assert pso.prior_passer.sigma == 2.0 - assert pso.prior_passer.use_errors is False - assert pso.prior_passer.use_widths is False - assert pso.config_dict_search["n_particles"] == 51 - assert pso.config_dict_search["cognitive"] == 0.4 - assert pso.config_dict_run["iters"] == 2001 - assert isinstance(pso.initializer, af.InitializerBall) - assert pso.initializer.lower_limit == 0.2 - assert pso.initializer.upper_limit == 0.8 - assert pso.iterations_per_update == 10 - assert pso.number_of_cores == 2 - - pso = af.PySwarmsLocal() - - assert pso.prior_passer.sigma == 3.0 - assert pso.prior_passer.use_errors is True - assert pso.prior_passer.use_widths is True - assert pso.config_dict_search["n_particles"] == 50 - assert pso.config_dict_search["cognitive"] == 0.1 - assert pso.config_dict_run["iters"] == 2000 - assert isinstance(pso.initializer, af.InitializerPrior) - assert pso.iterations_per_update == 11 - assert pso.number_of_cores == 1 - - def test__samples_from_model(self): - pyswarms = af.PySwarmsGlobal() - pyswarms.paths = af.DirectoryPaths(path_prefix=path.join("non_linear", "pyswarms")) - pyswarms.paths._identifier = "tag" - - model = af.ModelMapper(mock_class=af.m.MockClassx3) - model.mock_class.one = af.LogUniformPrior(lower_limit=1e-8, upper_limit=100.0) - model.mock_class.two = af.LogUniformPrior(lower_limit=1e-8, upper_limit=100.0) - model.mock_class.three = af.LogUniformPrior(lower_limit=1e-8, upper_limit=100.0) - # model.mock_class.four = af.LogUniformPrior(lower_limit=1e-8, upper_limit=100.0) - - samples = pyswarms.samples_from(model=model) - - assert isinstance(samples.parameter_lists, list) - assert isinstance(samples.parameter_lists[0], list) - assert isinstance(samples.log_likelihood_list, list) - assert isinstance(samples.log_prior_list, list) - assert isinstance(samples.log_posterior_list, list) - - assert samples.parameter_lists[0] == pytest.approx( - [50.1254, 1.04626, 10.09456], 1.0e-4 - ) - - assert samples.log_likelihood_list[0] == pytest.approx(-5071.80777, 1.0e-4) - assert samples.log_posterior_list[0] == pytest.approx(-5070.73298, 1.0e-4) - assert samples.weight_list[0] == 1.0 - - assert len(samples.parameter_lists) == 500 - assert len(samples.log_likelihood_list) == 500 +def test__loads_from_config_file_correct(): + pso = af.PySwarmsGlobal( + prior_passer=af.PriorPasser(sigma=2.0, use_errors=False, use_widths=False), + n_particles=51, + iters=2001, + cognitive=0.4, + social=0.5, + inertia=0.6, + initializer=af.InitializerBall(lower_limit=0.2, upper_limit=0.8), + iterations_per_update=10, + number_of_cores=2, + ) + + assert pso.prior_passer.sigma == 2.0 + assert pso.prior_passer.use_errors is False + assert pso.prior_passer.use_widths is False + assert pso.config_dict_search["n_particles"] == 51 + assert pso.config_dict_search["cognitive"] == 0.4 + assert pso.config_dict_run["iters"] == 2001 + assert isinstance(pso.initializer, af.InitializerBall) + assert pso.initializer.lower_limit == 0.2 + assert pso.initializer.upper_limit == 0.8 + assert pso.iterations_per_update == 10 + assert pso.number_of_cores == 2 + + pso = af.PySwarmsGlobal() + + assert pso.prior_passer.sigma == 3.0 + assert pso.prior_passer.use_errors is True + assert pso.prior_passer.use_widths is True + assert pso.config_dict_search["n_particles"] == 51 + assert pso.config_dict_search["cognitive"] == 0.1 + assert pso.config_dict_run["iters"] == 2000 + assert isinstance(pso.initializer, af.InitializerPrior) + assert pso.iterations_per_update == 11 + assert pso.number_of_cores == 1 + + pso = af.PySwarmsLocal( + prior_passer=af.PriorPasser(sigma=2.0, use_errors=False, use_widths=False), + n_particles=51, + iters=2001, + cognitive=0.4, + social=0.5, + inertia=0.6, + number_of_k_neighbors=4, + minkowski_p_norm=1, + initializer=af.InitializerBall(lower_limit=0.2, upper_limit=0.8), + iterations_per_update=10, + number_of_cores=2, + ) + + assert pso.prior_passer.sigma == 2.0 + assert pso.prior_passer.use_errors is False + assert pso.prior_passer.use_widths is False + assert pso.config_dict_search["n_particles"] == 51 + assert pso.config_dict_search["cognitive"] == 0.4 + assert pso.config_dict_run["iters"] == 2001 + assert isinstance(pso.initializer, af.InitializerBall) + assert pso.initializer.lower_limit == 0.2 + assert pso.initializer.upper_limit == 0.8 + assert pso.iterations_per_update == 10 + assert pso.number_of_cores == 2 + + pso = af.PySwarmsLocal() + + assert pso.prior_passer.sigma == 3.0 + assert pso.prior_passer.use_errors is True + assert pso.prior_passer.use_widths is True + assert pso.config_dict_search["n_particles"] == 51 + assert pso.config_dict_search["cognitive"] == 0.1 + assert pso.config_dict_run["iters"] == 2000 + assert isinstance(pso.initializer, af.InitializerPrior) + assert pso.iterations_per_update == 11 + assert pso.number_of_cores == 1 + +def test__samples_from_model(): + pyswarms = af.PySwarmsGlobal() + pyswarms.paths = af.DirectoryPaths(path_prefix=path.join("non_linear", "pyswarms")) + pyswarms.paths._identifier = "tag" + + model = af.ModelMapper(mock_class=af.m.MockClassx3) + model.mock_class.one = af.LogUniformPrior(lower_limit=1e-8, upper_limit=100.0) + model.mock_class.two = af.LogUniformPrior(lower_limit=1e-8, upper_limit=100.0) + model.mock_class.three = af.LogUniformPrior(lower_limit=1e-8, upper_limit=100.0) + # model.mock_class.four = af.LogUniformPrior(lower_limit=1e-8, upper_limit=100.0) + + samples = pyswarms.samples_from(model=model) + + assert isinstance(samples.parameter_lists, list) + assert isinstance(samples.parameter_lists[0], list) + assert isinstance(samples.log_likelihood_list, list) + assert isinstance(samples.log_prior_list, list) + assert isinstance(samples.log_posterior_list, list) + + assert samples.parameter_lists[0] == pytest.approx( + [50.1254, 1.04626, 10.09456], 1.0e-4 + ) + + assert samples.log_likelihood_list[0] == pytest.approx(-5071.80777, 1.0e-4) + assert samples.log_posterior_list[0] == pytest.approx(-5070.73298, 1.0e-4) + assert samples.weight_list[0] == 1.0 + + assert len(samples.parameter_lists) == 500 + assert len(samples.log_likelihood_list) == 500 From 138b648fdbb760727c732c78d4b1045250e5a1fc Mon Sep 17 00:00:00 2001 From: Jammy2211 Date: Fri, 2 Dec 2022 17:11:32 +0000 Subject: [PATCH 041/226] remove multinest --- .../GridSearch.yaml | 0 .../README.rst | 0 .../mcmc.yaml | 7 +--- .../mock.yaml | 0 .../nest.yaml | 38 +------------------ .../optimize.yaml | 3 -- 6 files changed, 3 insertions(+), 45 deletions(-) rename test_autofit/config/{non_linear => non_linsadsdasear}/GridSearch.yaml (100%) rename test_autofit/config/{non_linear => non_linsadsdasear}/README.rst (100%) rename test_autofit/config/{non_linear => non_linsadsdasear}/mcmc.yaml (85%) rename test_autofit/config/{non_linear => non_linsadsdasear}/mock.yaml (100%) rename test_autofit/config/{non_linear => non_linsadsdasear}/nest.yaml (57%) rename test_autofit/config/{non_linear => non_linsadsdasear}/optimize.yaml (91%) diff --git a/test_autofit/config/non_linear/GridSearch.yaml b/test_autofit/config/non_linsadsdasear/GridSearch.yaml similarity index 100% rename from test_autofit/config/non_linear/GridSearch.yaml rename to test_autofit/config/non_linsadsdasear/GridSearch.yaml diff --git a/test_autofit/config/non_linear/README.rst b/test_autofit/config/non_linsadsdasear/README.rst similarity index 100% rename from test_autofit/config/non_linear/README.rst rename to test_autofit/config/non_linsadsdasear/README.rst diff --git a/test_autofit/config/non_linear/mcmc.yaml b/test_autofit/config/non_linsadsdasear/mcmc.yaml similarity index 85% rename from test_autofit/config/non_linear/mcmc.yaml rename to test_autofit/config/non_linsadsdasear/mcmc.yaml index eb02f6f1b..fe8cc614f 100644 --- a/test_autofit/config/non_linear/mcmc.yaml +++ b/test_autofit/config/non_linsadsdasear/mcmc.yaml @@ -19,7 +19,7 @@ Emcee: run: nsteps: 2000 search: - nwalkers: 50 + nwalkers: 51 updates: iterations_per_update: 2500 log_every_update: 1 @@ -60,7 +60,4 @@ Zeus: nwalkers: 50 updates: iterations_per_update: 500 - log_every_update: 1 - model_results_every_update: 1 - remove_state_files_at_end: true - visualize_every_update: 1 \ No newline at end of file + remove_state_files_at_end: true \ No newline at end of file diff --git a/test_autofit/config/non_linear/mock.yaml b/test_autofit/config/non_linsadsdasear/mock.yaml similarity index 100% rename from test_autofit/config/non_linear/mock.yaml rename to test_autofit/config/non_linsadsdasear/mock.yaml diff --git a/test_autofit/config/non_linear/nest.yaml b/test_autofit/config/non_linsadsdasear/nest.yaml similarity index 57% rename from test_autofit/config/non_linear/nest.yaml rename to test_autofit/config/non_linsadsdasear/nest.yaml index 33f44a2a5..2d11ed050 100644 --- a/test_autofit/config/non_linear/nest.yaml +++ b/test_autofit/config/non_linsadsdasear/nest.yaml @@ -69,40 +69,4 @@ DynestyStatic: walks: 5 updates: iterations_per_update: 500 - log_every_update: 1 - model_results_every_update: 1 - remove_state_files_at_end: true - visualize_every_update: 1 -MultiNest: - printing: - silence: false - prior_passer: - sigma: 3.0 - use_errors: true - use_widths: true - search: - const_efficiency_mode: false - context: 0 - evidence_tolerance: 0.5 - importance_nested_sampling: true - init_mpi: false - log_zero: -1.0e+100 - max_iter: 0 - max_modes: 100 - mode_tolerance: -1.0e+89 - multimodal: true - n_iter_before_update: 100 - n_live_points: 50 - null_log_evidence: -1.0e+90 - resume: true - sampling_efficiency: 0.6 - seed: -1.0 - verbose: false - write_output: true - updates: - iterations_per_update: 2500 - log_every_update: 1 - model_results_every_update: 1 - remove_state_files_at_end: true - should_update_sym: 250 - visualize_every_update: 1 \ No newline at end of file + remove_state_files_at_end: true \ No newline at end of file diff --git a/test_autofit/config/non_linear/optimize.yaml b/test_autofit/config/non_linsadsdasear/optimize.yaml similarity index 91% rename from test_autofit/config/non_linear/optimize.yaml rename to test_autofit/config/non_linsadsdasear/optimize.yaml index 7a2201180..bbc2b3b41 100644 --- a/test_autofit/config/non_linear/optimize.yaml +++ b/test_autofit/config/non_linsadsdasear/optimize.yaml @@ -36,10 +36,7 @@ Drawer: total_draws: 10 updates: iterations_per_update: 500 - log_every_update: 1 - model_results_every_update: 1 remove_state_files_at_end: true - visualize_every_update: 1 LBFGS: initialize: ball_lower_limit: 0.49 From 1561a448952575a17ed2a2eb60d777239b62a3b6 Mon Sep 17 00:00:00 2001 From: Jammy2211 Date: Fri, 2 Dec 2022 17:11:50 +0000 Subject: [PATCH 042/226] non linear config --- .../config/{non_linsadsdasear => non_linear}/GridSearch.yaml | 0 test_autofit/config/{non_linsadsdasear => non_linear}/README.rst | 0 test_autofit/config/{non_linsadsdasear => non_linear}/mcmc.yaml | 0 test_autofit/config/{non_linsadsdasear => non_linear}/mock.yaml | 0 test_autofit/config/{non_linsadsdasear => non_linear}/nest.yaml | 0 .../config/{non_linsadsdasear => non_linear}/optimize.yaml | 0 6 files changed, 0 insertions(+), 0 deletions(-) rename test_autofit/config/{non_linsadsdasear => non_linear}/GridSearch.yaml (100%) rename test_autofit/config/{non_linsadsdasear => non_linear}/README.rst (100%) rename test_autofit/config/{non_linsadsdasear => non_linear}/mcmc.yaml (100%) rename test_autofit/config/{non_linsadsdasear => non_linear}/mock.yaml (100%) rename test_autofit/config/{non_linsadsdasear => non_linear}/nest.yaml (100%) rename test_autofit/config/{non_linsadsdasear => non_linear}/optimize.yaml (100%) diff --git a/test_autofit/config/non_linsadsdasear/GridSearch.yaml b/test_autofit/config/non_linear/GridSearch.yaml similarity index 100% rename from test_autofit/config/non_linsadsdasear/GridSearch.yaml rename to test_autofit/config/non_linear/GridSearch.yaml diff --git a/test_autofit/config/non_linsadsdasear/README.rst b/test_autofit/config/non_linear/README.rst similarity index 100% rename from test_autofit/config/non_linsadsdasear/README.rst rename to test_autofit/config/non_linear/README.rst diff --git a/test_autofit/config/non_linsadsdasear/mcmc.yaml b/test_autofit/config/non_linear/mcmc.yaml similarity index 100% rename from test_autofit/config/non_linsadsdasear/mcmc.yaml rename to test_autofit/config/non_linear/mcmc.yaml diff --git a/test_autofit/config/non_linsadsdasear/mock.yaml b/test_autofit/config/non_linear/mock.yaml similarity index 100% rename from test_autofit/config/non_linsadsdasear/mock.yaml rename to test_autofit/config/non_linear/mock.yaml diff --git a/test_autofit/config/non_linsadsdasear/nest.yaml b/test_autofit/config/non_linear/nest.yaml similarity index 100% rename from test_autofit/config/non_linsadsdasear/nest.yaml rename to test_autofit/config/non_linear/nest.yaml diff --git a/test_autofit/config/non_linsadsdasear/optimize.yaml b/test_autofit/config/non_linear/optimize.yaml similarity index 100% rename from test_autofit/config/non_linsadsdasear/optimize.yaml rename to test_autofit/config/non_linear/optimize.yaml From 0c89fc7f0caa45470e366a8a6e6c40ade8587255 Mon Sep 17 00:00:00 2001 From: Jammy2211 Date: Mon, 5 Dec 2022 10:33:03 +0000 Subject: [PATCH 043/226] rename mock.yaml to mock_search.yaml --- test_autofit/config/non_linear/mcmc.yaml | 2 +- test_autofit/config/non_linear/mock.yaml | 32 ------------------- .../config/non_linear/mock_search.yaml | 25 +++++++++++++++ 3 files changed, 26 insertions(+), 33 deletions(-) delete mode 100644 test_autofit/config/non_linear/mock.yaml create mode 100644 test_autofit/config/non_linear/mock_search.yaml diff --git a/test_autofit/config/non_linear/mcmc.yaml b/test_autofit/config/non_linear/mcmc.yaml index fe8cc614f..8f31d2e47 100644 --- a/test_autofit/config/non_linear/mcmc.yaml +++ b/test_autofit/config/non_linear/mcmc.yaml @@ -19,7 +19,7 @@ Emcee: run: nsteps: 2000 search: - nwalkers: 51 + nwalkers: 50 updates: iterations_per_update: 2500 log_every_update: 1 diff --git a/test_autofit/config/non_linear/mock.yaml b/test_autofit/config/non_linear/mock.yaml deleted file mode 100644 index fbd7f0f6c..000000000 --- a/test_autofit/config/non_linear/mock.yaml +++ /dev/null @@ -1,32 +0,0 @@ -mock_search: - MockOptimizer: - initialize: - method: prior - printing: - silence: false - prior_passer: - sigma: 3.0 - use_errors: true - use_widths: true - updates: - iterations_per_update: 2500 - log_every_update: 1 - model_results_every_update: 1 - remove_state_files_at_end: true - visualize_every_update: 1 - MockSearch: - initialize: - method: prior - printing: - silence: false - prior_passer: - sigma: 3.0 - use_errors: true - use_widths: true - search: {} - updates: - iterations_per_update: 2500 - log_every_update: 1 - model_results_every_update: 1 - remove_state_files_at_end: true - visualize_every_update: 1 \ No newline at end of file diff --git a/test_autofit/config/non_linear/mock_search.yaml b/test_autofit/config/non_linear/mock_search.yaml new file mode 100644 index 000000000..4ce5d7d8a --- /dev/null +++ b/test_autofit/config/non_linear/mock_search.yaml @@ -0,0 +1,25 @@ +MockOptimizer: + initialize: + method: prior + printing: + silence: false + prior_passer: + sigma: 3.0 + use_errors: true + use_widths: true + updates: + iterations_per_update: 2500 + remove_state_files_at_end: true +MockSearch: + initialize: + method: prior + printing: + silence: false + prior_passer: + sigma: 3.0 + use_errors: true + use_widths: true + search: {} + updates: + iterations_per_update: 2500 + remove_state_files_at_end: true \ No newline at end of file From 9e5c165026d429b0744cedd5a40abcc392ed8211 Mon Sep 17 00:00:00 2001 From: Jammy2211 Date: Tue, 6 Dec 2022 17:59:58 +0000 Subject: [PATCH 044/226] remove during_analysis clauses --- autofit/non_linear/abstract_search.py | 34 +++++++++++++-------------- 1 file changed, 16 insertions(+), 18 deletions(-) diff --git a/autofit/non_linear/abstract_search.py b/autofit/non_linear/abstract_search.py index 28c9d187c..98d8891d7 100644 --- a/autofit/non_linear/abstract_search.py +++ b/autofit/non_linear/abstract_search.py @@ -676,7 +676,7 @@ def perform_update(self, model, analysis, during_analysis): self.paths.save_object("samples", samples) - if not during_analysis and not isinstance(self.paths, NullPaths): + if not isinstance(self.paths, NullPaths): self.plot_results(samples=samples) try: @@ -684,11 +684,10 @@ def perform_update(self, model, analysis, during_analysis): except exc.FitException: return samples - if not during_analysis: - self.logger.debug("Visualizing") - analysis.visualize( - paths=self.paths, instance=instance, during_analysis=during_analysis - ) + self.logger.debug("Visualizing") + analysis.visualize( + paths=self.paths, instance=instance, during_analysis=during_analysis + ) if self.should_profile: self.logger.debug("Profiling Maximum Likelihood Model") @@ -696,20 +695,19 @@ def perform_update(self, model, analysis, during_analysis): paths=self.paths, instance=instance, ) - if not during_analysis: - self.logger.debug("Outputting model result") - try: + self.logger.debug("Outputting model result") + try: - start = time.time() - analysis.log_likelihood_function(instance=instance) - log_likelihood_function_time = time.time() - start + start = time.time() + analysis.log_likelihood_function(instance=instance) + log_likelihood_function_time = time.time() - start - self.paths.save_summary( - samples=samples, - log_likelihood_function_time=log_likelihood_function_time, - ) - except exc.FitException: - pass + self.paths.save_summary( + samples=samples, + log_likelihood_function_time=log_likelihood_function_time, + ) + except exc.FitException: + pass if not during_analysis and self.remove_state_files_at_end: self.logger.debug("Removing state files") From 1448030b67a0c5292fb909a11c564649b2d0ef49 Mon Sep 17 00:00:00 2001 From: Jammy2211 Date: Tue, 6 Dec 2022 18:35:24 +0000 Subject: [PATCH 045/226] dynestyu now uses initializer even if not in test mode --- autofit/non_linear/nest/dynesty/abstract.py | 59 ++++++++++----------- 1 file changed, 29 insertions(+), 30 deletions(-) diff --git a/autofit/non_linear/nest/dynesty/abstract.py b/autofit/non_linear/nest/dynesty/abstract.py index 932b256c5..762db3f0f 100644 --- a/autofit/non_linear/nest/dynesty/abstract.py +++ b/autofit/non_linear/nest/dynesty/abstract.py @@ -336,42 +336,41 @@ def live_points_init_from(self, model, fitness_function): ------- """ - if os.environ.get("PYAUTOFIT_TEST_MODE") == "1": - - ( - unit_parameters, - parameters, - log_likelihood_list, - ) = self.initializer.samples_from_model( - total_points=self.total_live_points, - model=model, - fitness_function=fitness_function, - ) - init_unit_parameters = np.zeros( - shape=(self.total_live_points, model.prior_count) - ) - init_parameters = np.zeros( - shape=(self.total_live_points, model.prior_count) - ) - init_log_likelihood_list = np.zeros(shape=(self.total_live_points)) + ( + unit_parameters, + parameters, + log_likelihood_list, + ) = self.initializer.samples_from_model( + total_points=self.total_live_points, + model=model, + fitness_function=fitness_function, + ) + + init_unit_parameters = np.zeros( + shape=(self.total_live_points, model.prior_count) + ) + init_parameters = np.zeros( + shape=(self.total_live_points, model.prior_count) + ) + init_log_likelihood_list = np.zeros(shape=(self.total_live_points)) - for i in range(len(parameters)): - init_unit_parameters[i, :] = np.asarray(unit_parameters[i]) - init_parameters[i, :] = np.asarray(parameters[i]) - init_log_likelihood_list[i] = np.asarray(log_likelihood_list[i]) + for i in range(len(parameters)): + init_unit_parameters[i, :] = np.asarray(unit_parameters[i]) + init_parameters[i, :] = np.asarray(parameters[i]) + init_log_likelihood_list[i] = np.asarray(log_likelihood_list[i]) - live_points = [ - init_unit_parameters, - init_parameters, - init_log_likelihood_list, - ] + live_points = [ + init_unit_parameters, + init_parameters, + init_log_likelihood_list, + ] - blobs = np.asarray(self.total_live_points * [False]) + blobs = np.asarray(self.total_live_points * [False]) - live_points.append(blobs) + live_points.append(blobs) - return live_points + return live_points def sampler_from( self, From f8bac61a9b7fefc27e876744aa988f8f19717e07 Mon Sep 17 00:00:00 2001 From: Richard Date: Fri, 9 Dec 2022 09:16:41 +0000 Subject: [PATCH 046/226] correct name from yaml config --- test_autofit/config/non_linear/{mock_search.yaml => mock.yaml} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename test_autofit/config/non_linear/{mock_search.yaml => mock.yaml} (100%) diff --git a/test_autofit/config/non_linear/mock_search.yaml b/test_autofit/config/non_linear/mock.yaml similarity index 100% rename from test_autofit/config/non_linear/mock_search.yaml rename to test_autofit/config/non_linear/mock.yaml From 037c3025aaf4e23b4359198bf8942d2026741b1a Mon Sep 17 00:00:00 2001 From: Richard Date: Fri, 9 Dec 2022 09:29:28 +0000 Subject: [PATCH 047/226] deep copy and cache config so modification only has search instance scope --- autofit/non_linear/abstract_search.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/autofit/non_linear/abstract_search.py b/autofit/non_linear/abstract_search.py index 98d8891d7..234e1970b 100644 --- a/autofit/non_linear/abstract_search.py +++ b/autofit/non_linear/abstract_search.py @@ -12,7 +12,7 @@ import numpy as np -from autoconf import conf +from autoconf import conf, cached_property from autofit import exc from autofit.database.sqlalchemy_ import sa from autofit.graphical import ( @@ -587,10 +587,10 @@ def config_dict_with_test_mode_settings_from(self, config_dict: Dict) -> Dict: def _class_config(self) -> Dict: return self.config_type[self.__class__.__name__] - @property + @cached_property def config_dict_search(self) -> Dict: - config_dict = copy.copy(self._class_config["search"]) + config_dict = copy.deepcopy(self._class_config["search"]) for key, value in config_dict.items(): try: @@ -600,10 +600,10 @@ def config_dict_search(self) -> Dict: return config_dict - @property + @cached_property def config_dict_run(self) -> Dict: - config_dict = copy.copy(self._class_config["run"]) + config_dict = copy.deepcopy(self._class_config["run"]) for key, value in config_dict.items(): try: From 18b5c201c25ecfcf28d5c09bf01d903e0d0f3365 Mon Sep 17 00:00:00 2001 From: Richard Date: Fri, 9 Dec 2022 09:33:01 +0000 Subject: [PATCH 048/226] same for lbfgs --- autofit/non_linear/optimize/lbfgs/lbfgs.py | 71 ++++++++++------------ 1 file changed, 33 insertions(+), 38 deletions(-) diff --git a/autofit/non_linear/optimize/lbfgs/lbfgs.py b/autofit/non_linear/optimize/lbfgs/lbfgs.py index 6a62362b9..9aad2c1f6 100644 --- a/autofit/non_linear/optimize/lbfgs/lbfgs.py +++ b/autofit/non_linear/optimize/lbfgs/lbfgs.py @@ -1,5 +1,6 @@ from typing import Optional +from autoconf import cached_property from autofit.database.sqlalchemy_ import sa from autofit.mapper.prior_model.abstract import AbstractPriorModel @@ -19,15 +20,15 @@ class LBFGS(AbstractOptimizer): __identifier_fields__ = () def __init__( - self, - name: Optional[str] = None, - path_prefix: Optional[str] = None, - unique_tag: Optional[str] = None, - prior_passer: Optional[PriorPasser] = None, - initializer: Optional[AbstractInitializer] = None, - iterations_per_update: int = None, - session: Optional[sa.orm.Session] = None, - **kwargs + self, + name: Optional[str] = None, + path_prefix: Optional[str] = None, + unique_tag: Optional[str] = None, + prior_passer: Optional[PriorPasser] = None, + initializer: Optional[AbstractInitializer] = None, + iterations_per_update: int = None, + session: Optional[sa.orm.Session] = None, + **kwargs ): """ A L-BFGS scipy non-linear search. @@ -72,10 +73,10 @@ def __init__( self.logger.debug("Creating LBFGS Search") - @property + @cached_property def config_dict_options(self): - config_dict = copy.copy(self._class_config["options"]) + config_dict = copy.deepcopy(self._class_config["options"]) for key, value in config_dict.items(): try: @@ -86,10 +87,10 @@ def config_dict_options(self): return config_dict def _fit( - self, - model: AbstractPriorModel, - analysis: Analysis, - log_likelihood_cap: Optional[float] = None + self, + model: AbstractPriorModel, + analysis: Analysis, + log_likelihood_cap: Optional[float] = None, ): """ Fit a model using the scipy L-BFGS method and the Analysis class which contains the data and returns the log @@ -117,21 +118,27 @@ def _fit( x0 = self.paths.load_object("x0") total_iterations = self.paths.load_object("total_iterations") - self.logger.info("Existing LBGFS samples found, resuming non-linear search.") + self.logger.info( + "Existing LBGFS samples found, resuming non-linear search." + ) else: - unit_parameter_lists, parameter_lists, log_posterior_list = self.initializer.samples_from_model( - total_points=1, - model=model, - fitness_function=fitness_function, + ( + unit_parameter_lists, + parameter_lists, + log_posterior_list, + ) = self.initializer.samples_from_model( + total_points=1, model=model, fitness_function=fitness_function, ) x0 = np.asarray(parameter_lists[0]) total_iterations = 0 - self.logger.info("No LBFGS samples found, beginning new non-linear search. ") + self.logger.info( + "No LBFGS samples found, beginning new non-linear search. " + ) maxiter = self.config_dict_options.get("maxiter", 1e8) @@ -156,18 +163,12 @@ def _fit( total_iterations += lbfgs.nit - self.paths.save_object( - "total_iterations", - total_iterations - ) + self.paths.save_object("total_iterations", total_iterations) self.paths.save_object( "log_posterior", - fitness_function.log_posterior_from(parameter_list=lbfgs.x) - ) - self.paths.save_object( - "x0", - lbfgs.x + fitness_function.log_posterior_from(parameter_list=lbfgs.x), ) + self.paths.save_object("x0", lbfgs.x) self.perform_update( model=model, analysis=analysis, during_analysis=True @@ -180,18 +181,12 @@ def _fit( self.logger.info("L-BFGS sampling complete.") - def samples_from( - self, - model: AbstractPriorModel - ): + def samples_from(self, model: AbstractPriorModel): return SamplesLBFGS.from_results_internal( model=model, results_internal=self.paths.load_object("x0"), log_posterior_list=np.array([self.paths.load_object("log_posterior")]), total_iterations=self.paths.load_object("total_iterations"), - time=self.timer.time + time=self.timer.time, ) - - - From 2e7acec139904f6dd1aa416451881a0b396b74f7 Mon Sep 17 00:00:00 2001 From: Richard Date: Fri, 9 Dec 2022 09:35:29 +0000 Subject: [PATCH 049/226] undo changes to pyswarms test --- test_autofit/non_linear/optimize/test_pyswarms.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/test_autofit/non_linear/optimize/test_pyswarms.py b/test_autofit/non_linear/optimize/test_pyswarms.py index b8d79cd42..027f012ab 100644 --- a/test_autofit/non_linear/optimize/test_pyswarms.py +++ b/test_autofit/non_linear/optimize/test_pyswarms.py @@ -37,7 +37,7 @@ def test__loads_from_config_file_correct(): assert pso.prior_passer.sigma == 3.0 assert pso.prior_passer.use_errors is True assert pso.prior_passer.use_widths is True - assert pso.config_dict_search["n_particles"] == 51 + assert pso.config_dict_search["n_particles"] == 50 assert pso.config_dict_search["cognitive"] == 0.1 assert pso.config_dict_run["iters"] == 2000 assert isinstance(pso.initializer, af.InitializerPrior) @@ -75,13 +75,14 @@ def test__loads_from_config_file_correct(): assert pso.prior_passer.sigma == 3.0 assert pso.prior_passer.use_errors is True assert pso.prior_passer.use_widths is True - assert pso.config_dict_search["n_particles"] == 51 + assert pso.config_dict_search["n_particles"] == 50 assert pso.config_dict_search["cognitive"] == 0.1 assert pso.config_dict_run["iters"] == 2000 assert isinstance(pso.initializer, af.InitializerPrior) assert pso.iterations_per_update == 11 assert pso.number_of_cores == 1 + def test__samples_from_model(): pyswarms = af.PySwarmsGlobal() pyswarms.paths = af.DirectoryPaths(path_prefix=path.join("non_linear", "pyswarms")) From 415234602a1d9a58b20fa0ff12f3a1d445867020 Mon Sep 17 00:00:00 2001 From: Richard Date: Fri, 9 Dec 2022 11:13:17 +0000 Subject: [PATCH 050/226] ensure result has correct type for model analysis --- autofit/non_linear/analysis/model_analysis.py | 35 ++++++++++--------- test_autofit/analysis/test_regression.py | 34 +++++++++++++----- 2 files changed, 44 insertions(+), 25 deletions(-) diff --git a/autofit/non_linear/analysis/model_analysis.py b/autofit/non_linear/analysis/model_analysis.py index b9432c591..b36c22927 100644 --- a/autofit/non_linear/analysis/model_analysis.py +++ b/autofit/non_linear/analysis/model_analysis.py @@ -5,11 +5,7 @@ class ModelAnalysis(Analysis): - def __init__( - self, - analysis: Analysis, - model: AbstractPriorModel - ): + def __init__(self, analysis: Analysis, model: AbstractPriorModel): """ Comprises a model and an analysis that can be applied to instances of that model. @@ -27,12 +23,18 @@ def __getattr__(self, item): def log_likelihood_function(self, instance): return self.analysis.log_likelihood_function(instance) + def make_result(self, samples, model, sigma=1.0, use_errors=True, use_widths=False): + return self.analysis.make_result( + samples=samples, + model=model, + sigma=sigma, + use_errors=use_errors, + use_widths=use_widths, + ) + class CombinedModelAnalysis(IndexCollectionAnalysis): - def modify_model( - self, - model: AbstractPriorModel - ) -> CollectionPriorModel: + def modify_model(self, model: AbstractPriorModel) -> CollectionPriorModel: """ Creates a collection with one model for each analysis. For each ModelAnalysis the model is used; for other analyses the default model is used. @@ -46,10 +48,11 @@ def modify_model( ------- A collection of models, one for each analysis. """ - return CollectionPriorModel([ - analysis.analysis.model if isinstance( - analysis.analysis, - ModelAnalysis - ) else model - for analysis in self.analyses - ]) + return CollectionPriorModel( + [ + analysis.analysis.model + if isinstance(analysis.analysis, ModelAnalysis) + else model + for analysis in self.analyses + ] + ) diff --git a/test_autofit/analysis/test_regression.py b/test_autofit/analysis/test_regression.py index 84af1d456..6be6bb7ed 100644 --- a/test_autofit/analysis/test_regression.py +++ b/test_autofit/analysis/test_regression.py @@ -1,16 +1,32 @@ import pickle +import autofit as af from autofit.non_linear.analysis import CombinedAnalysis def test_pickle(Analysis): analysis = Analysis() + Analysis() - loaded = pickle.loads( - pickle.dumps( - analysis - ) - ) - assert isinstance( - loaded, - CombinedAnalysis - ) + loaded = pickle.loads(pickle.dumps(analysis)) + assert isinstance(loaded, CombinedAnalysis) + + +class MyResult(af.Result): + pass + + +class MyAnalysis(af.Analysis): + def log_likelihood_function(self, instance): + pass + + def make_result(self, samples, model, sigma=1.0, use_errors=True, use_widths=False): + return MyResult(model=model, samples=samples) + + +def test_result_type(): + model = af.Model(af.Gaussian) + + analysis = MyAnalysis().with_model(model) + + result = analysis.make_result(None, model) + + assert isinstance(result, MyResult) From 30723c83f91209df7f09f429282d42114c2aac21 Mon Sep 17 00:00:00 2001 From: Richard Date: Fri, 9 Dec 2022 11:13:45 +0000 Subject: [PATCH 051/226] docs --- autofit/non_linear/analysis/model_analysis.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/autofit/non_linear/analysis/model_analysis.py b/autofit/non_linear/analysis/model_analysis.py index b36c22927..d196e6555 100644 --- a/autofit/non_linear/analysis/model_analysis.py +++ b/autofit/non_linear/analysis/model_analysis.py @@ -24,6 +24,9 @@ def log_likelihood_function(self, instance): return self.analysis.log_likelihood_function(instance) def make_result(self, samples, model, sigma=1.0, use_errors=True, use_widths=False): + """ + Return the correct type of result by calling the underlying analysis. + """ return self.analysis.make_result( samples=samples, model=model, From 56d42dde8eae2b0429435b55d49c8117046267ba Mon Sep 17 00:00:00 2001 From: Richard Date: Fri, 9 Dec 2022 14:29:37 +0000 Subject: [PATCH 052/226] set object for path --- autofit/mapper/prior_model/abstract.py | 15 ++++++++++ test_autofit/mapper/model/test_cast.py | 41 ++++++++++++++++++++++++++ 2 files changed, 56 insertions(+) create mode 100644 test_autofit/mapper/model/test_cast.py diff --git a/autofit/mapper/prior_model/abstract.py b/autofit/mapper/prior_model/abstract.py index 930824360..590ff376c 100644 --- a/autofit/mapper/prior_model/abstract.py +++ b/autofit/mapper/prior_model/abstract.py @@ -191,6 +191,21 @@ def __init__(self, label=None): super().__init__(label=label) self._assertions = list() + def cast(self, value_dict, new_class): + from .prior_model import PriorModel + + for path, prior_model in self.path_instance_tuples_for_class(PriorModel): + pass + + def replacing_for_path(self, path, value): + new = copy.deepcopy(self) + obj = new + for key in path[:-1]: + obj = getattr(new, key) + + setattr(obj, path[-1], value) + return new + def without_attributes(self) -> "AbstractModel": """ Returns a copy of this object with all priors, prior models and diff --git a/test_autofit/mapper/model/test_cast.py b/test_autofit/mapper/model/test_cast.py new file mode 100644 index 000000000..1caba099c --- /dev/null +++ b/test_autofit/mapper/model/test_cast.py @@ -0,0 +1,41 @@ +import pytest + +import autofit as af + + +class A: + def __init__(self, a): + self.a = a + + +class B: + def __init__(self, a, b): + self.a = a + self.b = b + + +@pytest.fixture(name="prior") +def make_prior(): + return af.UniformPrior() + + +@pytest.fixture(name="model_a") +def make_model_a(prior): + return af.Model(A, a=prior) + + +@pytest.fixture(name="collection_a") +def make_collection_a(model_a): + return af.Collection(a=model_a) + + +def test_cast(collection_a, model_a, prior): + result = collection_a.cast({model_a: {"b": 2}}, B) + assert result.a.cls is B + assert result.a.b == 2 + assert result.a.a is prior + + +def test_replace_for_path(collection_a): + collection = collection_a.replacing_for_path(("a", "a"), 3) + assert collection.a.a == 3 From 601c5a26b3c9e29cfde5c7a9107d6bb001c0927f Mon Sep 17 00:00:00 2001 From: Richard Date: Fri, 9 Dec 2022 14:44:58 +0000 Subject: [PATCH 053/226] working implementation of cast --- autofit/mapper/prior_model/abstract.py | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/autofit/mapper/prior_model/abstract.py b/autofit/mapper/prior_model/abstract.py index 590ff376c..5ae6e906f 100644 --- a/autofit/mapper/prior_model/abstract.py +++ b/autofit/mapper/prior_model/abstract.py @@ -194,8 +194,24 @@ def __init__(self, label=None): def cast(self, value_dict, new_class): from .prior_model import PriorModel + updated = self + for path, prior_model in self.path_instance_tuples_for_class(PriorModel): - pass + try: + value_dict = value_dict[prior_model] + argument_dict = dict( + **dict(prior_model.direct_prior_tuples), + **dict(prior_model.direct_tuples_with_type(float)), + **value_dict, + ) + updated = updated.replacing_for_path( + path, PriorModel(new_class, **argument_dict) + ) + + except KeyError: + pass + + return updated def replacing_for_path(self, path, value): new = copy.deepcopy(self) From f0f3d337a43ebd5adbe9f21c781dc97c076b1a43 Mon Sep 17 00:00:00 2001 From: Richard Date: Fri, 9 Dec 2022 14:48:00 +0000 Subject: [PATCH 054/226] supporting casting multiple values --- autofit/mapper/prior_model/abstract.py | 4 ++-- test_autofit/mapper/model/test_cast.py | 7 +++++++ 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/autofit/mapper/prior_model/abstract.py b/autofit/mapper/prior_model/abstract.py index 5ae6e906f..9b33c3c5a 100644 --- a/autofit/mapper/prior_model/abstract.py +++ b/autofit/mapper/prior_model/abstract.py @@ -199,11 +199,11 @@ def cast(self, value_dict, new_class): for path, prior_model in self.path_instance_tuples_for_class(PriorModel): try: value_dict = value_dict[prior_model] - argument_dict = dict( + argument_dict = { **dict(prior_model.direct_prior_tuples), **dict(prior_model.direct_tuples_with_type(float)), **value_dict, - ) + } updated = updated.replacing_for_path( path, PriorModel(new_class, **argument_dict) ) diff --git a/test_autofit/mapper/model/test_cast.py b/test_autofit/mapper/model/test_cast.py index 1caba099c..eb0cd9bc8 100644 --- a/test_autofit/mapper/model/test_cast.py +++ b/test_autofit/mapper/model/test_cast.py @@ -36,6 +36,13 @@ def test_cast(collection_a, model_a, prior): assert result.a.a is prior +def test_cast_both(collection_a, model_a): + result = collection_a.cast({model_a: {"a": 1, "b": 2}}, B) + + assert result.a.a == 1 + assert result.a.b == 2 + + def test_replace_for_path(collection_a): collection = collection_a.replacing_for_path(("a", "a"), 3) assert collection.a.a == 3 From cf2731c5035dc0b10571e69c9ff51926978c8d0c Mon Sep 17 00:00:00 2001 From: Richard Date: Fri, 9 Dec 2022 15:00:10 +0000 Subject: [PATCH 055/226] allow casting of multiple --- autofit/mapper/prior_model/abstract.py | 4 ++-- test_autofit/mapper/model/test_cast.py | 15 +++++++++++++++ 2 files changed, 17 insertions(+), 2 deletions(-) diff --git a/autofit/mapper/prior_model/abstract.py b/autofit/mapper/prior_model/abstract.py index 9b33c3c5a..9ea847695 100644 --- a/autofit/mapper/prior_model/abstract.py +++ b/autofit/mapper/prior_model/abstract.py @@ -198,11 +198,11 @@ def cast(self, value_dict, new_class): for path, prior_model in self.path_instance_tuples_for_class(PriorModel): try: - value_dict = value_dict[prior_model] + model_value_dict = value_dict[prior_model] argument_dict = { **dict(prior_model.direct_prior_tuples), **dict(prior_model.direct_tuples_with_type(float)), - **value_dict, + **model_value_dict, } updated = updated.replacing_for_path( path, PriorModel(new_class, **argument_dict) diff --git a/test_autofit/mapper/model/test_cast.py b/test_autofit/mapper/model/test_cast.py index eb0cd9bc8..bdcdf5ae1 100644 --- a/test_autofit/mapper/model/test_cast.py +++ b/test_autofit/mapper/model/test_cast.py @@ -29,6 +29,21 @@ def make_collection_a(model_a): return af.Collection(a=model_a) +def test_cast_multiple(prior): + model_a_1 = af.Model(A, a=prior) + model_a_2 = af.Model(A, a=prior) + + collection = af.Collection(a_1=model_a_1, a_2=model_a_2) + + collection = collection.cast({model_a_1: {"b": 1}, model_a_2: {"b": 2}}, B) + + assert collection.a_1.cls is B + assert collection.a_2.cls is B + + assert collection.a_1.b == 1 + assert collection.a_2.b == 2 + + def test_cast(collection_a, model_a, prior): result = collection_a.cast({model_a: {"b": 2}}, B) assert result.a.cls is B From 77c9b5de0ac0bc36897949f2aefb674149694adc Mon Sep 17 00:00:00 2001 From: Richard Date: Fri, 9 Dec 2022 15:13:58 +0000 Subject: [PATCH 056/226] docs --- autofit/mapper/prior_model/abstract.py | 36 ++++++++++++++++++++++++-- 1 file changed, 34 insertions(+), 2 deletions(-) diff --git a/autofit/mapper/prior_model/abstract.py b/autofit/mapper/prior_model/abstract.py index 9ea847695..74744692d 100644 --- a/autofit/mapper/prior_model/abstract.py +++ b/autofit/mapper/prior_model/abstract.py @@ -191,7 +191,25 @@ def __init__(self, label=None): super().__init__(label=label) self._assertions = list() - def cast(self, value_dict, new_class): + def cast( + self, value_dict: Dict["AbstractModel":dict], new_class: type, + ) -> "AbstractPriorModel": + """ + Cast models to a new type. Allows selected models in within this + model to be given a new type and new arguments. + + Parameters + ---------- + value_dict + A dictionary mapping models to dictionaries of argument overrides + new_class + A new class to which specified models should be converted + + Returns + ------- + A model where specified child models have been updated to a new class + and new arguments + """ from .prior_model import PriorModel updated = self @@ -213,7 +231,21 @@ def cast(self, value_dict, new_class): return updated - def replacing_for_path(self, path, value): + def replacing_for_path(self, path: Tuple[str, ...], value) -> "AbstractModel": + """ + Create a new model replacing the value for a given path with a new value + + Parameters + ---------- + path + A path indicating the sequence of names used to address an object + value + A value that should replace the object at the given path + + Returns + ------- + A copy of this with an updated value + """ new = copy.deepcopy(self) obj = new for key in path[:-1]: From 156f6f7cab69ef39c8c1f1f308429adf911a322c Mon Sep 17 00:00:00 2001 From: Richard Date: Fri, 9 Dec 2022 15:23:31 +0000 Subject: [PATCH 057/226] fix --- autofit/mapper/prior_model/abstract.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/autofit/mapper/prior_model/abstract.py b/autofit/mapper/prior_model/abstract.py index 74744692d..741bb5034 100644 --- a/autofit/mapper/prior_model/abstract.py +++ b/autofit/mapper/prior_model/abstract.py @@ -192,7 +192,7 @@ def __init__(self, label=None): self._assertions = list() def cast( - self, value_dict: Dict["AbstractModel":dict], new_class: type, + self, value_dict: Dict["AbstractModel", dict], new_class: type, ) -> "AbstractPriorModel": """ Cast models to a new type. Allows selected models in within this From 843c58b87c06cd87e08e7b89592fd68f13e72825 Mon Sep 17 00:00:00 2001 From: Richard Date: Mon, 12 Dec 2022 12:25:13 +0000 Subject: [PATCH 058/226] separate id series for priors --- autofit/mapper/prior/abstract.py | 7 +- test_autofit/mapper/model/test_regression.py | 75 +++++++------------- 2 files changed, 32 insertions(+), 50 deletions(-) diff --git a/autofit/mapper/prior/abstract.py b/autofit/mapper/prior/abstract.py index 8db7d1f22..1829700e1 100644 --- a/autofit/mapper/prior/abstract.py +++ b/autofit/mapper/prior/abstract.py @@ -1,7 +1,8 @@ +import itertools +import os import random from abc import ABC, abstractmethod from copy import copy -import os from typing import Union, Tuple from autoconf import conf @@ -16,6 +17,8 @@ class Prior(Variable, ABC, ArithmeticMixin): __database_args__ = ("lower_limit", "upper_limit", "id_") + _ids = itertools.count() + def __init__(self, message, lower_limit=0.0, upper_limit=1.0, id_=None): """ An object used to mappers a unit value to an attribute value for a specific @@ -28,6 +31,8 @@ class attribute. upper_limit: Float The highest value this prior can return """ + if id_ is None: + id_ = next(self._ids) super().__init__(id_=id_) self.message = message message.id_ = id_ diff --git a/test_autofit/mapper/model/test_regression.py b/test_autofit/mapper/model/test_regression.py index 1715e17ac..86ebd7107 100644 --- a/test_autofit/mapper/model/test_regression.py +++ b/test_autofit/mapper/model/test_regression.py @@ -11,74 +11,52 @@ def __init__(self, argument): def test_config_error(): - model = af.Model( - SomeWeirdClass - ) + model = af.Model(SomeWeirdClass) with pytest.raises(ConfigException): - print(Identifier([ - model - ])) + print(Identifier([model])) def test_mapper_from_prior_arguments_simple_collection(): old = af.UniformPrior() new = af.UniformPrior() - collection = af.Collection( - value=old - ) - collection = collection.mapper_from_prior_arguments({ - old: new - }) + collection = af.Collection(value=old) + collection = collection.mapper_from_prior_arguments({old: new}) assert collection.value == new def test_direct_instances_only(): - child = af.Model( - af.Gaussian, - centre=0.0, - normalization=0.1, - sigma=0.01, - ) + child = af.Model(af.Gaussian, centre=0.0, normalization=0.1, sigma=0.01,) child.constant = 1.0 - model = af.Model( - af.Gaussian, - centre=child, - normalization=0.1, - sigma=0.01, - ) + model = af.Model(af.Gaussian, centre=child, normalization=0.1, sigma=0.01,) new_model = model.gaussian_prior_model_for_arguments({}) assert not hasattr(new_model, "constant") def test_function_from_instance(): - assert af.PriorModel.from_instance( - test_function_from_instance - ) is test_function_from_instance + assert ( + af.PriorModel.from_instance(test_function_from_instance) + is test_function_from_instance + ) def test_as_model_tuples(): model = af.Model(af.m.MockWithTuple) - assert isinstance( - model.tup.tup_0, - af.UniformPrior - ) - assert isinstance( - model.tup.tup_1, - af.UniformPrior - ) + assert isinstance(model.tup.tup_0, af.UniformPrior) + assert isinstance(model.tup.tup_1, af.UniformPrior) instance = model.instance_from_prior_medians() assert instance.tup == (0.5, 0.5) - model = af.AbstractPriorModel.from_instance( - instance - ) + model = af.AbstractPriorModel.from_instance(instance) assert model.tup == (0.5, 0.5) - assert """tup (0.5, 0.5)""" in model.info + assert ( + """tup (0.5, 0.5)""" + in model.info + ) def test_info_prints_number_of_parameters(): @@ -101,25 +79,24 @@ def test_set_centre(): def test_passing_priors(): - model = af.Model( - af.m.MockWithTuple - ) + model = af.Model(af.m.MockWithTuple) - new_model = model.mapper_from_gaussian_tuples([ - (1, 1), - (1, 1), - ]) + new_model = model.mapper_from_gaussian_tuples([(1, 1), (1, 1),]) assert isinstance(new_model.tup_0, af.GaussianPrior) assert isinstance(new_model.tup_1, af.GaussianPrior) def test_passing_fixed(): - model = af.Model( - af.m.MockWithTuple - ) + model = af.Model(af.m.MockWithTuple) model.tup_0 = 0.1 model.tup_1 = 2.0 new_model = model.mapper_from_gaussian_tuples([]) assert new_model.tup_0 == 0.1 assert new_model.tup_1 == 2.0 + + +def test_independent_ids(): + prior = af.UniformPrior() + af.ModelInstance() + assert af.UniformPrior().id == prior.id + 1 From 104e8242e4a0cc87c3c847978f82d7758b20aefb Mon Sep 17 00:00:00 2001 From: Richard Date: Mon, 12 Dec 2022 12:29:02 +0000 Subject: [PATCH 059/226] fixing tests... --- .../functionality/test_model_info.py | 25 ++---- test_autofit/graphical/global/test_global.py | 61 +++++++------- .../graphical/global/test_hierarchical.py | 78 +++++------------- .../graphical/info/test_hierarchical.py | 21 ++--- test_autofit/graphical/info/test_info.py | 82 ++++++++----------- 5 files changed, 101 insertions(+), 166 deletions(-) diff --git a/test_autofit/graphical/functionality/test_model_info.py b/test_autofit/graphical/functionality/test_model_info.py index 548ea3a9e..a56722574 100644 --- a/test_autofit/graphical/functionality/test_model_info.py +++ b/test_autofit/graphical/functionality/test_model_info.py @@ -6,29 +6,21 @@ from autofit import graphical as g -@pytest.fixture( - name="analysis_factor" -) +@pytest.fixture(name="analysis_factor") def make_analysis_factor(): return g.AnalysisFactor( - prior_model=af.PriorModel( - af.Gaussian - ), + prior_model=af.PriorModel(af.Gaussian), analysis=af.m.MockAnalysis(), - name="AnalysisFactor0" + name="AnalysisFactor0", ) -@pytest.fixture( - autouse=True -) +@pytest.fixture(autouse=True) def reset_ids(): - af.ModelObject._ids = itertools.count() + af.Prior._ids = itertools.count() -@pytest.fixture( - name="info" -) +@pytest.fixture(name="info") def make_info(): return """AnalysisFactor0 @@ -41,8 +33,5 @@ def make_info(): sigma UniformPrior [3], lower_limit = 0.0, upper_limit = 1.0""" -def test_analysis_factor( - analysis_factor, - info -): +def test_analysis_factor(analysis_factor, info): assert analysis_factor.info == info diff --git a/test_autofit/graphical/global/test_global.py b/test_autofit/graphical/global/test_global.py index e14f0864c..6eeccabd3 100644 --- a/test_autofit/graphical/global/test_global.py +++ b/test_autofit/graphical/global/test_global.py @@ -6,17 +6,15 @@ import autofit.graphical as g -@pytest.fixture( - autouse=True -) +@pytest.fixture(autouse=True) def reset_ids(): - af.ModelObject._ids = itertools.count() + af.Prior._ids = itertools.count() -def test_info( - model_factor -): - assert model_factor.global_prior_model.info == """PriorFactors +def test_info(model_factor): + assert ( + model_factor.global_prior_model.info + == """PriorFactors PriorFactor0 (AnalysisFactor0.one) UniformPrior [0], lower_limit = 0.0, upper_limit = 1.0 @@ -25,14 +23,13 @@ def test_info( AnalysisFactor0 one (PriorFactor0) UniformPrior [0], lower_limit = 0.0, upper_limit = 1.0""" + ) -def test_results( - model_factor -): - assert model_factor.graph.make_results_text( - model_factor.global_prior_model - ) == """PriorFactors +def test_results(model_factor): + assert ( + model_factor.graph.make_results_text(model_factor.global_prior_model) + == """PriorFactors PriorFactor0 (AnalysisFactor0.one) 0.5 @@ -41,32 +38,31 @@ def test_results( AnalysisFactor0 one (PriorFactor0) 0.5""" + ) class TestGlobalLikelihood: @pytest.mark.parametrize("unit_value, likelihood", [(0.5, 0.0), (0.0, -0.25)]) def test_single_factor(self, model_factor, unit_value, likelihood): assert ( - model_factor.log_likelihood_function( - model_factor.global_prior_model.instance_from_unit_vector( - [unit_value], - ignore_prior_limits=True, - )[0] - ) - == likelihood + model_factor.log_likelihood_function( + model_factor.global_prior_model.instance_from_unit_vector( + [unit_value], ignore_prior_limits=True, + )[0] + ) + == likelihood ) @pytest.mark.parametrize("unit_value, likelihood", [(0.5, 0.0), (0.0, -0.5)]) def test_collection(self, model_factor, unit_value, likelihood): collection = g.FactorGraphModel(model_factor, model_factor) assert ( - collection.log_likelihood_function( - collection.global_prior_model.instance_from_unit_vector( - [unit_value], - ignore_prior_limits=True - ) + collection.log_likelihood_function( + collection.global_prior_model.instance_from_unit_vector( + [unit_value], ignore_prior_limits=True ) - == likelihood + ) + == likelihood ) @pytest.mark.parametrize( @@ -76,13 +72,12 @@ def test_two_factor(self, model_factor, model_factor_2, unit_vector, likelihood) collection = g.FactorGraphModel(model_factor, model_factor_2) assert ( - collection.log_likelihood_function( - collection.global_prior_model.instance_from_unit_vector( - unit_vector, - ignore_prior_limits=True - ) + collection.log_likelihood_function( + collection.global_prior_model.instance_from_unit_vector( + unit_vector, ignore_prior_limits=True ) - == likelihood + ) + == likelihood ) def test_global_search(self, model_factor, model_factor_2): diff --git a/test_autofit/graphical/global/test_hierarchical.py b/test_autofit/graphical/global/test_hierarchical.py index 717af392c..95d13b929 100644 --- a/test_autofit/graphical/global/test_hierarchical.py +++ b/test_autofit/graphical/global/test_hierarchical.py @@ -6,55 +6,36 @@ from autofit import graphical as g -@pytest.fixture( - name="graph" -) +@pytest.fixture(name="graph") def make_graph( - model_factor, - model_factor_2, + model_factor, model_factor_2, ): hierarchical_factor = g.HierarchicalFactor( af.GaussianPrior, - mean=af.GaussianPrior( - mean=0.5, - sigma=0.1 - ), - sigma=af.GaussianPrior( - mean=1.0, - sigma=0.01 - ) + mean=af.GaussianPrior(mean=0.5, sigma=0.1), + sigma=af.GaussianPrior(mean=1.0, sigma=0.01), ) - hierarchical_factor.add_drawn_variable( - model_factor.one - ) - hierarchical_factor.add_drawn_variable( - model_factor_2.one - ) + hierarchical_factor.add_drawn_variable(model_factor.one) + hierarchical_factor.add_drawn_variable(model_factor_2.one) - return g.FactorGraphModel( - hierarchical_factor, - model_factor, - model_factor - ) + return g.FactorGraphModel(hierarchical_factor, model_factor, model_factor) -@pytest.fixture( - name="model" -) +@pytest.fixture(name="model") def make_model(graph): return graph.global_prior_model -@pytest.fixture( - autouse=True -) +@pytest.fixture(autouse=True) def reset_ids(): - af.ModelObject._ids = itertools.count() + af.Prior._ids = itertools.count() def test_info(model): - assert model.info == """PriorFactors + assert ( + model.info + == """PriorFactors PriorFactor0 (HierarchicalFactor0) GaussianPrior [5], mean = 1.0, sigma = 0.01 PriorFactor1 (HierarchicalFactor0) GaussianPrior [4], mean = 0.5, sigma = 0.1 @@ -82,25 +63,18 @@ def test_info(model): AnalysisFactor0.one, PriorFactor3 UniformPrior [0], lower_limit = 0.0, upper_limit = 1.0 PriorFactor2 UniformPrior [2], lower_limit = 0.0, upper_limit = 1.0""" + ) def test_instance(model): assert model.prior_count == 4 - instance = model.instance_from_unit_vector( - [0.1, 0.2, 0.3, 0.4] - ) + instance = model.instance_from_unit_vector([0.1, 0.2, 0.3, 0.4]) dist_model_1 = instance[0].distribution_model - assert isinstance( - dist_model_1, - af.GaussianPrior - ) + assert isinstance(dist_model_1, af.GaussianPrior) assert instance[0].drawn_prior == 0.1 dist_model_2 = instance[0].distribution_model - assert isinstance( - dist_model_2, - af.GaussianPrior - ) + assert isinstance(dist_model_2, af.GaussianPrior) assert instance[1].drawn_prior == 0.2 assert dist_model_1 == dist_model_2 @@ -112,20 +86,12 @@ def test_instance(model): ([0.1, 0.2, 0.3, 0.5], -2.248), ([0.1, 0.2, 0.4, 0.8], -2.280), ([0.1, 0.2, 0.3, 0.3], -2.239), - ] + ], ) def test_likelihood( - graph, - model, - unit_vector, - likelihood, + graph, model, unit_vector, likelihood, ): - instance = model.instance_from_unit_vector( - unit_vector - ) - assert graph.log_likelihood_function( - instance - ) == pytest.approx( - likelihood, - rel=0.001 + instance = model.instance_from_unit_vector(unit_vector) + assert graph.log_likelihood_function(instance) == pytest.approx( + likelihood, rel=0.001 ) diff --git a/test_autofit/graphical/info/test_hierarchical.py b/test_autofit/graphical/info/test_hierarchical.py index ded7709b6..8d57170cc 100644 --- a/test_autofit/graphical/info/test_hierarchical.py +++ b/test_autofit/graphical/info/test_hierarchical.py @@ -19,8 +19,7 @@ def make_model_factor_2(): @pytest.fixture(name="hierarchical_factor") def make_hierarchical_factor( - model_factor_1, - model_factor_2, + model_factor_1, model_factor_2, ): hierarchical_factor = g.HierarchicalFactor( af.GaussianPrior, @@ -35,9 +34,7 @@ def make_hierarchical_factor( @pytest.fixture(name="hierarchical_model") def make_factor_graph_model( - model_factor_1, - model_factor_2, - hierarchical_factor, + model_factor_1, model_factor_2, hierarchical_factor, ): return g.FactorGraphModel(model_factor_1, model_factor_2, hierarchical_factor) @@ -51,11 +48,9 @@ def make_graph(hierarchical_model): return hierarchical_model.graph -@pytest.fixture( - autouse=True -) +@pytest.fixture(autouse=True) def reset_ids(): - af.ModelObject._ids = itertools.count() + af.Prior._ids = itertools.count() def test_info_for_hierarchical_factor(graph): @@ -63,8 +58,8 @@ def test_info_for_hierarchical_factor(graph): graph.hierarchical_factors[0] ) assert ( - info - == """HierarchicalFactor0 + info + == """HierarchicalFactor0 mean (HierarchicalFactor0, PriorFactor1) GaussianPrior [8], mean = 100, sigma = 10 sigma (HierarchicalFactor0, PriorFactor0) GaussianPrior [9], mean = 10, sigma = 5 @@ -79,8 +74,8 @@ def test_info_for_hierarchical_factor(graph): def test_graph_info(graph): info = graph.info assert ( - info - == """PriorFactors + info + == """PriorFactors PriorFactor0 (HierarchicalFactor0) GaussianPrior [9], mean = 10, sigma = 5 PriorFactor1 (HierarchicalFactor0) GaussianPrior [8], mean = 100, sigma = 10 diff --git a/test_autofit/graphical/info/test_info.py b/test_autofit/graphical/info/test_info.py index b4d809310..f7c31447d 100644 --- a/test_autofit/graphical/info/test_info.py +++ b/test_autofit/graphical/info/test_info.py @@ -6,13 +6,13 @@ from autofit.graphical import AnalysisFactor, PriorFactor -def test_non_trivial_results( - non_trivial_model -): +def test_non_trivial_results(non_trivial_model): results_text = non_trivial_model.graph.make_results_text( non_trivial_model.mean_field_approximation() ) - assert results_text == """PriorFactors + assert ( + results_text + == """PriorFactors PriorFactor0 (AnalysisFactor1.sigma) 0.5 PriorFactor1 (AnalysisFactor1.normalization) 0.5 @@ -33,14 +33,15 @@ def test_non_trivial_results( centre (AnalysisFactor0.centre, PriorFactor2) 0.5 normalization (PriorFactor1) 0.5 sigma (PriorFactor0) 0.5""" + ) -def test_non_trivial_info( - non_trivial_model -): +def test_non_trivial_info(non_trivial_model): info = non_trivial_model.graph.info print(info) - assert info == """PriorFactors + assert ( + info + == """PriorFactors PriorFactor0 (AnalysisFactor1.sigma) UniformPrior [7], lower_limit = 0.0, upper_limit = 1.0 PriorFactor1 (AnalysisFactor1.normalization) UniformPrior [6], lower_limit = 0.0, upper_limit = 1.0 @@ -61,11 +62,10 @@ def test_non_trivial_info( centre (AnalysisFactor0.centre, PriorFactor2) UniformPrior [5], lower_limit = 0.0, upper_limit = 1.0 normalization (PriorFactor1) UniformPrior [6], lower_limit = 0.0, upper_limit = 1.0 sigma (PriorFactor0) UniformPrior [7], lower_limit = 0.0, upper_limit = 1.0""" + ) -def test_factors_grouped_by_type( - factor_graph -): +def test_factors_grouped_by_type(factor_graph): factors_by_type = factor_graph.factors_by_type() assert len(factors_by_type) == 2 @@ -73,14 +73,13 @@ def test_factors_grouped_by_type( assert len(factors_by_type[PriorFactor]) == 2 -def test_make_results_text( - factor_graph, - factor_graph_model -): +def test_make_results_text(factor_graph, factor_graph_model): results_text = factor_graph.make_results_text( factor_graph_model.mean_field_approximation() ) - assert results_text == """PriorFactors + assert ( + results_text + == """PriorFactors PriorFactor0 (AnalysisFactor1.one) 0.5 PriorFactor1 (AnalysisFactor0.one) 0.5 @@ -94,50 +93,40 @@ def test_make_results_text( AnalysisFactor1 one (PriorFactor0) 0.5""" + ) -@pytest.fixture( - autouse=True -) +@pytest.fixture(autouse=True) def reset_ids(): - af.ModelObject._ids = itertools.count() + af.Prior._ids = itertools.count() -def test_info_for_prior_factor( - declarative_graph_output, - prior_factor -): - assert declarative_graph_output.info_for_prior_factor( - prior_factor - ) == "PriorFactor0 (AnalysisFactor1.one) UniformPrior [2], lower_limit = 0.0, upper_limit = 1.0" +def test_info_for_prior_factor(declarative_graph_output, prior_factor): + assert ( + declarative_graph_output.info_for_prior_factor(prior_factor) + == "PriorFactor0 (AnalysisFactor1.one) UniformPrior [2], lower_limit = 0.0, upper_limit = 1.0" + ) -def test_info_for_analysis_factor( - declarative_graph_output, - analysis_factor -): - info = declarative_graph_output.info_for_analysis_factor( - analysis_factor - ) - assert info == """AnalysisFactor0 +def test_info_for_analysis_factor(declarative_graph_output, analysis_factor): + info = declarative_graph_output.info_for_analysis_factor(analysis_factor) + assert ( + info + == """AnalysisFactor0 one (PriorFactor1) UniformPrior [0], lower_limit = 0.0, upper_limit = 1.0""" + ) -def test_related_factors( - factor_graph, - prior_factor -): - assert len(factor_graph.related_factors( - list(prior_factor.variables)[0] - )) == 2 +def test_related_factors(factor_graph, prior_factor): + assert len(factor_graph.related_factors(list(prior_factor.variables)[0])) == 2 -def test_graph_info( - factor_graph -): +def test_graph_info(factor_graph): info = factor_graph.info - assert info == """PriorFactors + assert ( + info + == """PriorFactors PriorFactor0 (AnalysisFactor1.one) UniformPrior [2], lower_limit = 0.0, upper_limit = 1.0 PriorFactor1 (AnalysisFactor0.one) UniformPrior [0], lower_limit = 0.0, upper_limit = 1.0 @@ -151,3 +140,4 @@ def test_graph_info( AnalysisFactor1 one (PriorFactor0) UniformPrior [2], lower_limit = 0.0, upper_limit = 1.0""" + ) From de314b35e86bfa52d03089b5b45bcb3d974d2d84 Mon Sep 17 00:00:00 2001 From: Richard Date: Mon, 12 Dec 2022 12:30:06 +0000 Subject: [PATCH 060/226] fixing tests... --- test_autofit/mapper/model/test_prior_model.py | 35 ++++++++--------- test_autofit/mapper/test_parameterization.py | 39 +++++++------------ 2 files changed, 29 insertions(+), 45 deletions(-) diff --git a/test_autofit/mapper/model/test_prior_model.py b/test_autofit/mapper/model/test_prior_model.py index 86f365f20..dcec72b4d 100644 --- a/test_autofit/mapper/model/test_prior_model.py +++ b/test_autofit/mapper/model/test_prior_model.py @@ -4,6 +4,7 @@ import autofit as af + @pytest.fixture(name="instance_prior_model") def make_instance_prior_model(): instance = af.m.MockClassx2(1.0, 2.0) @@ -165,25 +166,29 @@ def test_add_children(self): mock_components_1 = af.PriorModel( af.m.MockComponents, components_0=af.CollectionPriorModel(mock_cls_0=af.m.MockChildTuplex2), - components_1=af.CollectionPriorModel( - mock_cls_2=af.m.MockChildTuplex3 - ), + components_1=af.CollectionPriorModel(mock_cls_2=af.m.MockChildTuplex3), ) mock_components_2 = af.PriorModel( af.m.MockComponents, components_0=af.CollectionPriorModel(mock_cls_1=af.m.MockChildTuplex2), - components_1=af.CollectionPriorModel( - mock_cls_3=af.m.MockChildTuplex3 - ), + components_1=af.CollectionPriorModel(mock_cls_3=af.m.MockChildTuplex3), ) result = mock_components_1 + mock_components_2 - assert result.components_0.mock_cls_0 == mock_components_1.components_0.mock_cls_0 - assert result.components_0.mock_cls_1 == mock_components_2.components_0.mock_cls_1 + assert ( + result.components_0.mock_cls_0 == mock_components_1.components_0.mock_cls_0 + ) + assert ( + result.components_0.mock_cls_1 == mock_components_2.components_0.mock_cls_1 + ) - assert result.components_1.mock_cls_2 == mock_components_1.components_1.mock_cls_2 - assert result.components_1.mock_cls_3 == mock_components_2.components_1.mock_cls_3 + assert ( + result.components_1.mock_cls_2 == mock_components_1.components_1.mock_cls_2 + ) + assert ( + result.components_1.mock_cls_3 == mock_components_2.components_1.mock_cls_3 + ) def test_prior_model_override(self): mock_components_1 = af.PriorModel( @@ -194,9 +199,7 @@ def test_prior_model_override(self): mock_components_2 = af.PriorModel( af.m.MockComponents, components_0=af.CollectionPriorModel(light=af.m.MockChildTuplex2), - components_1=af.CollectionPriorModel( - mass=af.m.MockChildTuplex3() - ), + components_1=af.CollectionPriorModel(mass=af.m.MockChildTuplex3()), ) result = mock_components_1 + mock_components_2 @@ -237,12 +240,6 @@ def test_is_hashable(self): is not None ) - def test_prior_prior_model_hash_consecutive(self): - prior = af.UniformPrior(0, 1) - prior_model = af.AbstractPriorModel() - - assert prior.id + 1 == prior_model.id - class StringDefault: def __init__(self, value="a string"): diff --git a/test_autofit/mapper/test_parameterization.py b/test_autofit/mapper/test_parameterization.py index a64e17ddb..4f116ce4e 100644 --- a/test_autofit/mapper/test_parameterization.py +++ b/test_autofit/mapper/test_parameterization.py @@ -7,19 +7,13 @@ from autofit.text import formatter as frm -@pytest.fixture( - autouse=True -) +@pytest.fixture(autouse=True) def reset_ids(): - af.ModelObject._ids = itertools.count() + af.Prior._ids = itertools.count() def test_parameterization(): - model = af.Collection( - collection=af.Collection( - gaussian=af.Model(af.Gaussian) - ) - ) + model = af.Collection(collection=af.Collection(gaussian=af.Model(af.Gaussian))) parameterization = model.parameterization assert parameterization == ( @@ -33,16 +27,12 @@ def test_root(): model = af.Model(af.Gaussian) parameterization = model.parameterization assert parameterization == ( - 'model Gaussian (N=3)' + "model Gaussian (N=3)" ) def test_instance(): - model = af.Collection( - collection=af.Collection( - gaussian=af.Gaussian() - ) - ) + model = af.Collection(collection=af.Collection(gaussian=af.Gaussian())) parameterization = model.parameterization assert parameterization == ( @@ -57,13 +47,10 @@ def test_tuple_prior(): centre.centre_0 = af.UniformPrior() centre.centre_1 = af.UniformPrior() - model = af.Model( - af.Gaussian, - centre=centre - ) + model = af.Model(af.Gaussian, centre=centre) parameterization = model.parameterization assert parameterization == ( - 'model Gaussian (N=4)' + "model Gaussian (N=4)" ) @@ -97,8 +84,8 @@ def test_basic(self): model_info = mm.info assert ( - model_info - == """Total Free Parameters = 2 + model_info + == """Total Free Parameters = 2 model ModelMapper (N=2) mock_class MockClassx2 (N=2) @@ -117,8 +104,8 @@ def test_with_instance(self): model_info = mm.info assert ( - model_info - == """Total Free Parameters = 1 + model_info + == """Total Free Parameters = 1 model ModelMapper (N=1) mock_class MockClassx2 (N=1) @@ -133,8 +120,8 @@ def test_with_tuple(self): mm.tuple = (0, 1) assert ( - mm.info - == """Total Free Parameters = 0 + mm.info + == """Total Free Parameters = 0 model ModelMapper (N=0) From 401257031a79f73f201ec5e5dc1843212732e709 Mon Sep 17 00:00:00 2001 From: Richard Date: Mon, 12 Dec 2022 12:37:06 +0000 Subject: [PATCH 061/226] fixing tests... --- test_autofit/graphical/global/test_global.py | 4 ++-- test_autofit/graphical/info/test_info.py | 21 ++++++++++---------- test_autofit/mapper/test_parameterization.py | 6 +++--- 3 files changed, 15 insertions(+), 16 deletions(-) diff --git a/test_autofit/graphical/global/test_global.py b/test_autofit/graphical/global/test_global.py index 6eeccabd3..fcd659e77 100644 --- a/test_autofit/graphical/global/test_global.py +++ b/test_autofit/graphical/global/test_global.py @@ -16,13 +16,13 @@ def test_info(model_factor): model_factor.global_prior_model.info == """PriorFactors -PriorFactor0 (AnalysisFactor0.one) UniformPrior [0], lower_limit = 0.0, upper_limit = 1.0 +PriorFactor0 (AnalysisFactor0.one) UniformPrior [1], lower_limit = 0.0, upper_limit = 1.0 AnalysisFactors AnalysisFactor0 -one (PriorFactor0) UniformPrior [0], lower_limit = 0.0, upper_limit = 1.0""" +one (PriorFactor0) UniformPrior [1], lower_limit = 0.0, upper_limit = 1.0""" ) diff --git a/test_autofit/graphical/info/test_info.py b/test_autofit/graphical/info/test_info.py index f7c31447d..6ea805910 100644 --- a/test_autofit/graphical/info/test_info.py +++ b/test_autofit/graphical/info/test_info.py @@ -38,14 +38,13 @@ def test_non_trivial_results(non_trivial_model): def test_non_trivial_info(non_trivial_model): info = non_trivial_model.graph.info - print(info) assert ( info == """PriorFactors -PriorFactor0 (AnalysisFactor1.sigma) UniformPrior [7], lower_limit = 0.0, upper_limit = 1.0 -PriorFactor1 (AnalysisFactor1.normalization) UniformPrior [6], lower_limit = 0.0, upper_limit = 1.0 -PriorFactor2 (AnalysisFactor0.centre, AnalysisFactor1.centre) UniformPrior [5], lower_limit = 0.0, upper_limit = 1.0 +PriorFactor0 (AnalysisFactor1.sigma) UniformPrior [6], lower_limit = 0.0, upper_limit = 1.0 +PriorFactor1 (AnalysisFactor1.normalization) UniformPrior [5], lower_limit = 0.0, upper_limit = 1.0 +PriorFactor2 (AnalysisFactor0.centre, AnalysisFactor1.centre) UniformPrior [4], lower_limit = 0.0, upper_limit = 1.0 PriorFactor3 (AnalysisFactor0.sigma) UniformPrior [3], lower_limit = 0.0, upper_limit = 1.0 PriorFactor4 (AnalysisFactor0.normalization) UniformPrior [2], lower_limit = 0.0, upper_limit = 1.0 @@ -53,15 +52,15 @@ def test_non_trivial_info(non_trivial_model): AnalysisFactor0 -centre (AnalysisFactor1.centre, PriorFactor2) UniformPrior [5], lower_limit = 0.0, upper_limit = 1.0 +centre (AnalysisFactor1.centre, PriorFactor2) UniformPrior [4], lower_limit = 0.0, upper_limit = 1.0 normalization (PriorFactor4) UniformPrior [2], lower_limit = 0.0, upper_limit = 1.0 sigma (PriorFactor3) UniformPrior [3], lower_limit = 0.0, upper_limit = 1.0 AnalysisFactor1 -centre (AnalysisFactor0.centre, PriorFactor2) UniformPrior [5], lower_limit = 0.0, upper_limit = 1.0 -normalization (PriorFactor1) UniformPrior [6], lower_limit = 0.0, upper_limit = 1.0 -sigma (PriorFactor0) UniformPrior [7], lower_limit = 0.0, upper_limit = 1.0""" +centre (AnalysisFactor0.centre, PriorFactor2) UniformPrior [4], lower_limit = 0.0, upper_limit = 1.0 +normalization (PriorFactor1) UniformPrior [5], lower_limit = 0.0, upper_limit = 1.0 +sigma (PriorFactor0) UniformPrior [6], lower_limit = 0.0, upper_limit = 1.0""" ) @@ -114,7 +113,7 @@ def test_info_for_analysis_factor(declarative_graph_output, analysis_factor): info == """AnalysisFactor0 -one (PriorFactor1) UniformPrior [0], lower_limit = 0.0, upper_limit = 1.0""" +one (PriorFactor1) UniformPrior [1], lower_limit = 0.0, upper_limit = 1.0""" ) @@ -129,13 +128,13 @@ def test_graph_info(factor_graph): == """PriorFactors PriorFactor0 (AnalysisFactor1.one) UniformPrior [2], lower_limit = 0.0, upper_limit = 1.0 -PriorFactor1 (AnalysisFactor0.one) UniformPrior [0], lower_limit = 0.0, upper_limit = 1.0 +PriorFactor1 (AnalysisFactor0.one) UniformPrior [1], lower_limit = 0.0, upper_limit = 1.0 AnalysisFactors AnalysisFactor0 -one (PriorFactor1) UniformPrior [0], lower_limit = 0.0, upper_limit = 1.0 +one (PriorFactor1) UniformPrior [1], lower_limit = 0.0, upper_limit = 1.0 AnalysisFactor1 diff --git a/test_autofit/mapper/test_parameterization.py b/test_autofit/mapper/test_parameterization.py index 4f116ce4e..948a9d539 100644 --- a/test_autofit/mapper/test_parameterization.py +++ b/test_autofit/mapper/test_parameterization.py @@ -91,8 +91,8 @@ def test_basic(self): mock_class MockClassx2 (N=2) mock_class - one UniformPrior [2], lower_limit = 0.0, upper_limit = 1.0 - two UniformPrior [3], lower_limit = 0.0, upper_limit = 2.0""" + one UniformPrior [1], lower_limit = 0.0, upper_limit = 1.0 + two UniformPrior [2], lower_limit = 0.0, upper_limit = 2.0""" ) def test_with_instance(self): @@ -111,7 +111,7 @@ def test_with_instance(self): mock_class MockClassx2 (N=1) mock_class - one UniformPrior [2], lower_limit = 0.0, upper_limit = 1.0 + one UniformPrior [1], lower_limit = 0.0, upper_limit = 1.0 two 1.0""" ) From 23da580271e7310ccdef80f418a95da15e4b25d2 Mon Sep 17 00:00:00 2001 From: Richard Date: Mon, 12 Dec 2022 12:38:36 +0000 Subject: [PATCH 062/226] fixing tests... --- .../graphical/global/test_hierarchical.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/test_autofit/graphical/global/test_hierarchical.py b/test_autofit/graphical/global/test_hierarchical.py index 95d13b929..bd5577988 100644 --- a/test_autofit/graphical/global/test_hierarchical.py +++ b/test_autofit/graphical/global/test_hierarchical.py @@ -37,31 +37,31 @@ def test_info(model): model.info == """PriorFactors -PriorFactor0 (HierarchicalFactor0) GaussianPrior [5], mean = 1.0, sigma = 0.01 -PriorFactor1 (HierarchicalFactor0) GaussianPrior [4], mean = 0.5, sigma = 0.1 +PriorFactor0 (HierarchicalFactor0) GaussianPrior [4], mean = 1.0, sigma = 0.01 +PriorFactor1 (HierarchicalFactor0) GaussianPrior [3], mean = 0.5, sigma = 0.1 PriorFactor2 (HierarchicalFactor0) UniformPrior [2], lower_limit = 0.0, upper_limit = 1.0 -PriorFactor3 (AnalysisFactor0.one, HierarchicalFactor0) UniformPrior [0], lower_limit = 0.0, upper_limit = 1.0 +PriorFactor3 (AnalysisFactor0.one, HierarchicalFactor0) UniformPrior [1], lower_limit = 0.0, upper_limit = 1.0 AnalysisFactors AnalysisFactor0 -one (HierarchicalFactor0, PriorFactor3) UniformPrior [0], lower_limit = 0.0, upper_limit = 1.0 +one (HierarchicalFactor0, PriorFactor3) UniformPrior [1], lower_limit = 0.0, upper_limit = 1.0 AnalysisFactor0 -one (HierarchicalFactor0, PriorFactor3) UniformPrior [0], lower_limit = 0.0, upper_limit = 1.0 +one (HierarchicalFactor0, PriorFactor3) UniformPrior [1], lower_limit = 0.0, upper_limit = 1.0 HierarchicalFactors HierarchicalFactor0 -mean (HierarchicalFactor0, PriorFactor1) GaussianPrior [4], mean = 0.5, sigma = 0.1 -sigma (HierarchicalFactor0, PriorFactor0) GaussianPrior [5], mean = 1.0, sigma = 0.01 +mean (HierarchicalFactor0, PriorFactor1) GaussianPrior [3], mean = 0.5, sigma = 0.1 +sigma (HierarchicalFactor0, PriorFactor0) GaussianPrior [4], mean = 1.0, sigma = 0.01 Drawn Variables -AnalysisFactor0.one, PriorFactor3 UniformPrior [0], lower_limit = 0.0, upper_limit = 1.0 +AnalysisFactor0.one, PriorFactor3 UniformPrior [1], lower_limit = 0.0, upper_limit = 1.0 PriorFactor2 UniformPrior [2], lower_limit = 0.0, upper_limit = 1.0""" ) From f2d778dd729384c4c8e8c16b4ca806cce64630e1 Mon Sep 17 00:00:00 2001 From: Richard Date: Mon, 12 Dec 2022 12:40:00 +0000 Subject: [PATCH 063/226] fixing tests... --- .../graphical/info/test_hierarchical.py | 28 +++++++++---------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/test_autofit/graphical/info/test_hierarchical.py b/test_autofit/graphical/info/test_hierarchical.py index 8d57170cc..dac56bc78 100644 --- a/test_autofit/graphical/info/test_hierarchical.py +++ b/test_autofit/graphical/info/test_hierarchical.py @@ -61,13 +61,13 @@ def test_info_for_hierarchical_factor(graph): info == """HierarchicalFactor0 -mean (HierarchicalFactor0, PriorFactor1) GaussianPrior [8], mean = 100, sigma = 10 -sigma (HierarchicalFactor0, PriorFactor0) GaussianPrior [9], mean = 10, sigma = 5 +mean (HierarchicalFactor0, PriorFactor1) GaussianPrior [7], mean = 100, sigma = 10 +sigma (HierarchicalFactor0, PriorFactor0) GaussianPrior [8], mean = 10, sigma = 5 Drawn Variables AnalysisFactor0.centre, PriorFactor7 UniformPrior [1], lower_limit = 0.0, upper_limit = 1.0 -AnalysisFactor1.centre, PriorFactor4 UniformPrior [5], lower_limit = 0.0, upper_limit = 1.0""" +AnalysisFactor1.centre, PriorFactor4 UniformPrior [4], lower_limit = 0.0, upper_limit = 1.0""" ) @@ -77,11 +77,11 @@ def test_graph_info(graph): info == """PriorFactors -PriorFactor0 (HierarchicalFactor0) GaussianPrior [9], mean = 10, sigma = 5 -PriorFactor1 (HierarchicalFactor0) GaussianPrior [8], mean = 100, sigma = 10 -PriorFactor2 (AnalysisFactor1.sigma) UniformPrior [7], lower_limit = 0.0, upper_limit = 1.0 -PriorFactor3 (AnalysisFactor1.normalization) UniformPrior [6], lower_limit = 0.0, upper_limit = 1.0 -PriorFactor4 (AnalysisFactor1.centre, HierarchicalFactor0) UniformPrior [5], lower_limit = 0.0, upper_limit = 1.0 +PriorFactor0 (HierarchicalFactor0) GaussianPrior [8], mean = 10, sigma = 5 +PriorFactor1 (HierarchicalFactor0) GaussianPrior [7], mean = 100, sigma = 10 +PriorFactor2 (AnalysisFactor1.sigma) UniformPrior [6], lower_limit = 0.0, upper_limit = 1.0 +PriorFactor3 (AnalysisFactor1.normalization) UniformPrior [5], lower_limit = 0.0, upper_limit = 1.0 +PriorFactor4 (AnalysisFactor1.centre, HierarchicalFactor0) UniformPrior [4], lower_limit = 0.0, upper_limit = 1.0 PriorFactor5 (AnalysisFactor0.sigma) UniformPrior [3], lower_limit = 0.0, upper_limit = 1.0 PriorFactor6 (AnalysisFactor0.normalization) UniformPrior [2], lower_limit = 0.0, upper_limit = 1.0 PriorFactor7 (AnalysisFactor0.centre, HierarchicalFactor0) UniformPrior [1], lower_limit = 0.0, upper_limit = 1.0 @@ -96,19 +96,19 @@ def test_graph_info(graph): AnalysisFactor1 -centre (HierarchicalFactor0, PriorFactor4) UniformPrior [5], lower_limit = 0.0, upper_limit = 1.0 -normalization (PriorFactor3) UniformPrior [6], lower_limit = 0.0, upper_limit = 1.0 -sigma (PriorFactor2) UniformPrior [7], lower_limit = 0.0, upper_limit = 1.0 +centre (HierarchicalFactor0, PriorFactor4) UniformPrior [4], lower_limit = 0.0, upper_limit = 1.0 +normalization (PriorFactor3) UniformPrior [5], lower_limit = 0.0, upper_limit = 1.0 +sigma (PriorFactor2) UniformPrior [6], lower_limit = 0.0, upper_limit = 1.0 HierarchicalFactors HierarchicalFactor0 -mean (HierarchicalFactor0, PriorFactor1) GaussianPrior [8], mean = 100, sigma = 10 -sigma (HierarchicalFactor0, PriorFactor0) GaussianPrior [9], mean = 10, sigma = 5 +mean (HierarchicalFactor0, PriorFactor1) GaussianPrior [7], mean = 100, sigma = 10 +sigma (HierarchicalFactor0, PriorFactor0) GaussianPrior [8], mean = 10, sigma = 5 Drawn Variables AnalysisFactor0.centre, PriorFactor7 UniformPrior [1], lower_limit = 0.0, upper_limit = 1.0 -AnalysisFactor1.centre, PriorFactor4 UniformPrior [5], lower_limit = 0.0, upper_limit = 1.0""" +AnalysisFactor1.centre, PriorFactor4 UniformPrior [4], lower_limit = 0.0, upper_limit = 1.0""" ) From fa371d0e617e34175d646ebd2183f671e2debbb0 Mon Sep 17 00:00:00 2001 From: Richard Date: Mon, 12 Dec 2022 12:53:51 +0000 Subject: [PATCH 064/226] comment consistently failing test --- test_autofit/graphical/hierarchical/test_hierarchical.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/test_autofit/graphical/hierarchical/test_hierarchical.py b/test_autofit/graphical/hierarchical/test_hierarchical.py index 59d6c4d28..052f01646 100644 --- a/test_autofit/graphical/hierarchical/test_hierarchical.py +++ b/test_autofit/graphical/hierarchical/test_hierarchical.py @@ -116,7 +116,7 @@ def make_model_approx(centres, widths): return model_approx -def test_simple(model_approx, centres): +def _test_simple(model_approx, centres): laplace = graph.LaplaceOptimiser() ep_opt = graph.EPOptimiser(model_approx, default_optimiser=laplace) new_approx = ep_opt.run(model_approx, max_steps=20) @@ -129,6 +129,7 @@ def test_simple(model_approx, centres): np.log(np.std(centres) ** -2), rel=2.0, abs=1.0 ) + def test_hierarchical(centres, widths): centres_ = [Variable(f"x_{i}") for i in range(n)] mu_ = Variable("mu") From 508f420fd06b48021edf57c1d62cd76bfb23f43b Mon Sep 17 00:00:00 2001 From: Jammy2211 Date: Mon, 12 Dec 2022 13:25:49 +0000 Subject: [PATCH 065/226] requirements siomplify --- docs/conf.py | 2 ++ docs/requirements.txt | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/docs/conf.py b/docs/conf.py index 61fc560bd..e8cd648c7 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -92,6 +92,8 @@ autodoc_member_order = "bysource" autodoc_default_flags = ["members"] +autoclass_content = "both" + numpydoc_show_class_members = False numpydoc_show_inherited_class_members = False numpydoc_class_members_toctree = False diff --git a/docs/requirements.txt b/docs/requirements.txt index 6177f0817..93c7fbbd9 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -13,7 +13,7 @@ h5py>=2.10.0 SQLAlchemy==1.3.20 scipy>=1.5.1 astunparse==1.6.3 -autoconf==2022.11.26.11 +autoconf sphinx==5.2.3 xxhash==3.0.0 furo From f647a58de6f2096a0d05169162effbef37b04864 Mon Sep 17 00:00:00 2001 From: Jammy2211 Date: Mon, 12 Dec 2022 13:59:07 +0000 Subject: [PATCH 066/226] removed for dynesty maxcall --- autofit/non_linear/nest/dynesty/abstract.py | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/autofit/non_linear/nest/dynesty/abstract.py b/autofit/non_linear/nest/dynesty/abstract.py index 762db3f0f..f44904000 100644 --- a/autofit/non_linear/nest/dynesty/abstract.py +++ b/autofit/non_linear/nest/dynesty/abstract.py @@ -236,8 +236,8 @@ def iterations_from( if self.config_dict_run.get("maxcall") is not None: iterations = self.config_dict_run["maxcall"] - total_iterations - return iterations, total_iterations - return self.iterations_per_update, total_iterations + return int(iterations), int(total_iterations) + return self.iterations_per_update, int(total_iterations) def run_sampler(self, sampler: Union[NestedSampler, DynamicNestedSampler]): """ @@ -259,14 +259,11 @@ def run_sampler(self, sampler: Union[NestedSampler, DynamicNestedSampler]): ------- """ - config_dict_run = self.config_dict_run - try: - config_dict_run.pop("maxcall") - except KeyError: - pass iterations, total_iterations = self.iterations_from(sampler=sampler) + config_dict_run = {key: value for key, value in self.config_dict_run.items() if key != 'maxcall'} + if iterations > 0: sampler.run_nested( maxcall=iterations, From e48018a6549cb6f78fde70033f82ec1c73641793 Mon Sep 17 00:00:00 2001 From: Jammy2211 Date: Mon, 12 Dec 2022 14:04:37 +0000 Subject: [PATCH 067/226] pyswarms --- autofit/non_linear/optimize/pyswarms/globe.py | 6 ++---- autofit/non_linear/optimize/pyswarms/local.py | 8 ++------ 2 files changed, 4 insertions(+), 10 deletions(-) diff --git a/autofit/non_linear/optimize/pyswarms/globe.py b/autofit/non_linear/optimize/pyswarms/globe.py index 18bc4cb96..3621be801 100644 --- a/autofit/non_linear/optimize/pyswarms/globe.py +++ b/autofit/non_linear/optimize/pyswarms/globe.py @@ -79,10 +79,8 @@ def sampler_from(self, model, fitness_function, bounds, init_pos): "w": self.config_dict_search["inertia"] } - config_dict = self.config_dict_search - config_dict.pop("cognitive") - config_dict.pop("social") - config_dict.pop("inertia") + filter_list = ["cognitive", "social", "inertia"] + config_dict = {key: value for key, value in self.config_dict_search.items() if key not in filter_list} return pyswarms.global_best.GlobalBestPSO( dimensions=model.prior_count, diff --git a/autofit/non_linear/optimize/pyswarms/local.py b/autofit/non_linear/optimize/pyswarms/local.py index c187215dd..6cbe3fba6 100644 --- a/autofit/non_linear/optimize/pyswarms/local.py +++ b/autofit/non_linear/optimize/pyswarms/local.py @@ -81,12 +81,8 @@ def sampler_from(self, model, fitness_function, bounds, init_pos): "p": self.config_dict_search["minkowski_p_norm"], } - config_dict = self.config_dict_search - config_dict.pop("cognitive") - config_dict.pop("social") - config_dict.pop("inertia") - config_dict.pop("number_of_k_neighbors") - config_dict.pop("minkowski_p_norm") + filter_list = ["cognitive", "social", "inertia", "number_of_k_neighbors", "minkowski_p_norm"] + config_dict = {key: value for key, value in self.config_dict_search.items() if key not in filter_list} return pyswarms.local_best.LocalBestPSO( dimensions=model.prior_count, From 79c023adafdf9c716a2435a9e9e5799b6527e246 Mon Sep 17 00:00:00 2001 From: Richard Date: Mon, 12 Dec 2022 15:09:10 +0000 Subject: [PATCH 068/226] forgotten test --- test_autofit/test_delete_me.py | 10 ---------- 1 file changed, 10 deletions(-) delete mode 100644 test_autofit/test_delete_me.py diff --git a/test_autofit/test_delete_me.py b/test_autofit/test_delete_me.py deleted file mode 100644 index 30f283c89..000000000 --- a/test_autofit/test_delete_me.py +++ /dev/null @@ -1,10 +0,0 @@ -import numpy as np - -import autofit as af -from autofit import DiagonalMatrix - - -def test_from_mode(): - message = af.UniformPrior(lower_limit=10, upper_limit=20).message - mean = message.from_mode(14.03, covariance=np.zeros(())).mean - assert mean == 14.03 From 4796933f3df4b5e5b74fc99d49522868ef3a4a1c Mon Sep 17 00:00:00 2001 From: Richard Date: Mon, 12 Dec 2022 15:32:53 +0000 Subject: [PATCH 069/226] basic API --- autofit/__init__.py | 1 + autofit/time_series.py | 39 ++++++++++++++++++++++++++++++++ test_autofit/test_time_series.py | 10 ++++++++ 3 files changed, 50 insertions(+) create mode 100644 autofit/time_series.py create mode 100644 test_autofit/test_time_series.py diff --git a/autofit/__init__.py b/autofit/__init__.py index b5615ba8d..a764349b9 100644 --- a/autofit/__init__.py +++ b/autofit/__init__.py @@ -86,6 +86,7 @@ from .example.model import Gaussian from .text import formatter from .text import samples_text +from .time_series import TimeSeries from .tools import util diff --git a/autofit/time_series.py b/autofit/time_series.py new file mode 100644 index 000000000..0c6a64c46 --- /dev/null +++ b/autofit/time_series.py @@ -0,0 +1,39 @@ +from typing import List + +from autofit.mapper.model import ModelInstance + + +class TimeSeriesPath: + def __init__(self, keys): + self.keys = keys + + def __getattr__(self, item): + return TimeSeriesPath(self.keys + [item]) + + def get_value(self, instance): + for key in self.keys: + instance = getattr(instance, key) + return instance + + def __eq__(self, other): + return Equality(self, other) + + +class Equality: + def __init__(self, path: TimeSeriesPath, value: float): + self.path = path + self.value = value + + +class TimeSeries: + def __init__(self, instances: List[ModelInstance]): + self.instances = instances + + def __getattr__(self, item): + return TimeSeriesPath([item]) + + def _value_map(self, path): + return {path.get_value(instance): instance for instance in self.instances} + + def __getitem__(self, item: Equality): + return self._value_map(item.path)[item.value] diff --git a/test_autofit/test_time_series.py b/test_autofit/test_time_series.py new file mode 100644 index 000000000..83d17948b --- /dev/null +++ b/test_autofit/test_time_series.py @@ -0,0 +1,10 @@ +import autofit as af + + +def test_trivial(): + instance = af.ModelInstance(items=dict(t=1, model=af.Gaussian()),) + time_series = af.TimeSeries([instance]) + + result = time_series[time_series.t == 1] + + assert result is instance From 2477b5d9eeceea0eafc2edd92b24687413b8fa0d Mon Sep 17 00:00:00 2001 From: Richard Date: Mon, 12 Dec 2022 15:54:04 +0000 Subject: [PATCH 070/226] basic implementation of linear regression --- autofit/__init__.py | 2 +- autofit/mapper/model.py | 136 ++++++++++++++----------------- autofit/time_series.py | 26 ++++++ test_autofit/test_time_series.py | 15 +++- 4 files changed, 101 insertions(+), 78 deletions(-) diff --git a/autofit/__init__.py b/autofit/__init__.py index a764349b9..ce69b5d89 100644 --- a/autofit/__init__.py +++ b/autofit/__init__.py @@ -86,7 +86,7 @@ from .example.model import Gaussian from .text import formatter from .text import samples_text -from .time_series import TimeSeries +from .time_series import TimeSeries, LinearTimeSeries from .tools import util diff --git a/autofit/mapper/model.py b/autofit/mapper/model.py index 76020283a..71a65da59 100644 --- a/autofit/mapper/model.py +++ b/autofit/mapper/model.py @@ -6,9 +6,7 @@ from autofit.mapper.model_object import ModelObject from autofit.mapper.prior_model.recursion import DynamicRecursionCache -logger = logging.getLogger( - __name__ -) +logger = logging.getLogger(__name__) def frozen_cache(func): @@ -32,17 +30,11 @@ def frozen_cache(func): @wraps(func) def cache(self, *args, **kwargs): if hasattr(self, "_is_frozen") and self._is_frozen: - key = (func.__name__, self, *args,) + tuple( - kwargs.items() - ) + key = (func.__name__, self, *args,) + tuple(kwargs.items()) if key not in self._frozen_cache: - self._frozen_cache[ - key - ] = func(self, *args, **kwargs) - return self._frozen_cache[ - key - ] + self._frozen_cache[key] = func(self, *args, **kwargs) + return self._frozen_cache[key] return func(self, *args, **kwargs) return cache @@ -68,16 +60,14 @@ def assert_not_frozen(func): @wraps(func) def wrapper(self, *args, **kwargs): - string_args = list(filter( - lambda arg: isinstance(arg, str), - args - )) - if "_is_frozen" not in string_args and "_frozen_cache" not in string_args and hasattr( - self, "_is_frozen" - ) and self._is_frozen: - raise AssertionError( - "Frozen models cannot be modified" - ) + string_args = list(filter(lambda arg: isinstance(arg, str), args)) + if ( + "_is_frozen" not in string_args + and "_frozen_cache" not in string_args + and hasattr(self, "_is_frozen") + and self._is_frozen + ): + raise AssertionError("Frozen models cannot be modified") return func(self, *args, **kwargs) return wrapper @@ -91,10 +81,7 @@ def __init__(self, label=None): def __getstate__(self): return { - key: value - for key, value - in self.__dict__.items() - if key != "_frozen_cache" + key: value for key, value in self.__dict__.items() if key != "_frozen_cache" } def __setstate__(self, state): @@ -109,9 +96,7 @@ def freeze(self): and does not allow its state to be modified. """ logger.debug("Freezing model") - tuples = self.direct_tuples_with_type( - AbstractModel - ) + tuples = self.direct_tuples_with_type(AbstractModel) for _, model in tuples: if model is not self: model.freeze() @@ -124,9 +109,7 @@ def unfreeze(self): """ logger.debug("Thawing model") self._is_frozen = False - tuples = self.direct_tuples_with_type( - AbstractModel - ) + tuples = self.direct_tuples_with_type(AbstractModel) for _, model in tuples: if model is not self: model.unfreeze() @@ -155,7 +138,7 @@ def copy(self): return copy.deepcopy(self) def object_for_path( - self, path: Iterable[Union[str, int, type]] + self, path: Iterable[Union[str, int, type]] ) -> Union[object, List]: """ Get the object at a given path. @@ -199,10 +182,10 @@ def object_for_path( @frozen_cache def path_instance_tuples_for_class( - self, - cls: Union[Tuple, Type], - ignore_class: bool = None, - ignore_children: bool = True + self, + cls: Union[Tuple, Type], + ignore_class: bool = None, + ignore_children: bool = True, ): """ Tuples containing the path tuple and instance for every instance of the class @@ -223,17 +206,16 @@ def path_instance_tuples_for_class( Tuples containing the path to and instance of objects of the given type. """ return path_instances_of_class( - self, - cls, - ignore_class=ignore_class, - ignore_children=ignore_children + self, cls, ignore_class=ignore_class, ignore_children=ignore_children ) @frozen_cache def direct_tuples_with_type(self, class_type): return list( filter( - lambda t: t[0] != "id" and not t[0].startswith("_") and isinstance(t[1], class_type), + lambda t: t[0] != "id" + and not t[0].startswith("_") + and isinstance(t[1], class_type), self.__dict__.items(), ) ) @@ -254,24 +236,16 @@ def model_tuples_with_type(self, cls): Models with free parameters """ from .prior_model.prior_model import PriorModel + return [ (path, model) - for path, model - in self.attribute_tuples_with_type( - PriorModel - ) - if issubclass( - model.cls, - cls - ) and model.prior_count > 0 + for path, model in self.attribute_tuples_with_type(PriorModel) + if issubclass(model.cls, cls) and model.prior_count > 0 ] @frozen_cache def attribute_tuples_with_type( - self, - class_type, - ignore_class=None, - ignore_children=True + self, class_type, ignore_class=None, ignore_children=True ) -> List[tuple]: """ Tuples describing the name and instance for attributes in the model @@ -293,19 +267,40 @@ def attribute_tuples_with_type( return [ (path[-1] if len(path) > 0 else "", value) for path, value in self.path_instance_tuples_for_class( - class_type, - ignore_class=ignore_class, - ignore_children=ignore_children + class_type, ignore_class=ignore_class, ignore_children=ignore_children ) ] + def replacing_for_path(self, path: Tuple[str, ...], value) -> "AbstractModel": + """ + Create a new model replacing the value for a given path with a new value + + Parameters + ---------- + path + A path indicating the sequence of names used to address an object + value + A value that should replace the object at the given path + + Returns + ------- + A copy of this with an updated value + """ + new = copy.deepcopy(self) + obj = new + for key in path[:-1]: + obj = getattr(new, key) + + setattr(obj, path[-1], value) + return new + @DynamicRecursionCache() def path_instances_of_class( - obj, - cls: type, - ignore_class: Optional[Union[type, Tuple[type]]] = None, - ignore_children: bool = False + obj, + cls: type, + ignore_class: Optional[Union[type, Tuple[type]]] = None, + ignore_children: bool = False, ): """ Recursively search the object for instances of a given class @@ -345,10 +340,7 @@ def path_instances_of_class( if key.startswith("_"): continue for item in path_instances_of_class( - value, - cls, - ignore_class=ignore_class, - ignore_children=ignore_children + value, cls, ignore_class=ignore_class, ignore_children=ignore_children ): if isinstance(value, AnnotationPriorModel): path = (key,) @@ -383,9 +375,7 @@ def __getitem__(self, item): if isinstance(item, int): return list(self.values())[item] if isinstance(item, slice): - return ModelInstance( - list(self.values())[item] - ) + return ModelInstance(list(self.values())[item]) return self.__dict__[item] def __setitem__(self, key, value): @@ -402,14 +392,8 @@ def dict(self): return { key: value for key, value in self.__dict__.items() - if key not in ( - "id", - "component_number", - "item_number" - ) and not ( - isinstance(key, str) - and key.startswith("_") - ) + if key not in ("id", "component_number", "item_number") + and not (isinstance(key, str) and key.startswith("_")) } def values(self): diff --git a/autofit/time_series.py b/autofit/time_series.py index 0c6a64c46..0dea0b62a 100644 --- a/autofit/time_series.py +++ b/autofit/time_series.py @@ -1,5 +1,8 @@ +import copy from typing import List +from scipy.stats import stats + from autofit.mapper.model import ModelInstance @@ -37,3 +40,26 @@ def _value_map(self, path): def __getitem__(self, item: Equality): return self._value_map(item.path)[item.value] + + +class LinearTimeSeries(TimeSeries): + def __getitem__(self, item: Equality): + try: + return super().__getitem__(item) + except KeyError: + value_map = self._value_map(item.path) + x = sorted(value_map) + + instance = self.instances[0] + new_instance = copy.copy(instance) + + for path, _ in instance.path_instance_tuples_for_class(float): + y = [value_map[value].object_for_path(path) for value in x] + + slope, intercept, r, p, std_err = stats.linregress(x, y) + + new_instance = new_instance.replacing_for_path( + path, slope * item.value + intercept, + ) + + return new_instance diff --git a/test_autofit/test_time_series.py b/test_autofit/test_time_series.py index 83d17948b..ef7c60d79 100644 --- a/test_autofit/test_time_series.py +++ b/test_autofit/test_time_series.py @@ -2,9 +2,22 @@ def test_trivial(): - instance = af.ModelInstance(items=dict(t=1, model=af.Gaussian()),) + instance = af.ModelInstance(items=dict(t=1)) time_series = af.TimeSeries([instance]) result = time_series[time_series.t == 1] assert result is instance + + +def test_linear(): + time_series = af.LinearTimeSeries( + [ + af.ModelInstance(items=dict(t=1, gaussian=af.Gaussian(centre=0.0))), + af.ModelInstance(items=dict(t=2, gaussian=af.Gaussian(centre=1.0))), + ] + ) + + result = time_series[time_series.t == 1.5] + + assert result.gaussian.centre == 0.5 From a1c68d2348e2707eeb83a154ccc4db38607cce1b Mon Sep 17 00:00:00 2001 From: Richard Date: Mon, 12 Dec 2022 15:57:31 +0000 Subject: [PATCH 071/226] extended test --- test_autofit/test_time_series.py | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/test_autofit/test_time_series.py b/test_autofit/test_time_series.py index ef7c60d79..e4bcfe30e 100644 --- a/test_autofit/test_time_series.py +++ b/test_autofit/test_time_series.py @@ -1,3 +1,5 @@ +import pytest + import autofit as af @@ -10,7 +12,10 @@ def test_trivial(): assert result is instance -def test_linear(): +@pytest.mark.parametrize( + "t, centre", [(0.0, -1.0), (1.0, 0.0), (1.5, 0.5), (2.0, 1.0), (3.0, 2.0),] +) +def test_linear(t, centre): time_series = af.LinearTimeSeries( [ af.ModelInstance(items=dict(t=1, gaussian=af.Gaussian(centre=0.0))), @@ -18,6 +23,6 @@ def test_linear(): ] ) - result = time_series[time_series.t == 1.5] + result = time_series[time_series.t == t] - assert result.gaussian.centre == 0.5 + assert result.gaussian.centre == centre From 8b8c29f9ffeb51067f307041f1e2558e5cd69169 Mon Sep 17 00:00:00 2001 From: Richard Date: Mon, 12 Dec 2022 16:00:01 +0000 Subject: [PATCH 072/226] extended test --- test_autofit/test_time_series.py | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/test_autofit/test_time_series.py b/test_autofit/test_time_series.py index e4bcfe30e..47a2ea754 100644 --- a/test_autofit/test_time_series.py +++ b/test_autofit/test_time_series.py @@ -18,11 +18,21 @@ def test_trivial(): def test_linear(t, centre): time_series = af.LinearTimeSeries( [ - af.ModelInstance(items=dict(t=1, gaussian=af.Gaussian(centre=0.0))), - af.ModelInstance(items=dict(t=2, gaussian=af.Gaussian(centre=1.0))), + af.ModelInstance( + items=dict( + t=1, gaussian=af.Gaussian(centre=0.0, normalization=1.0, sigma=-1.0) + ) + ), + af.ModelInstance( + items=dict( + t=2, gaussian=af.Gaussian(centre=1.0, normalization=2.0, sigma=-2.0) + ) + ), ] ) result = time_series[time_series.t == t] assert result.gaussian.centre == centre + assert result.gaussian.normalization == t + assert result.gaussian.sigma == -t From 10bfb4b6cb79e75dc24349043a218927c65dc05f Mon Sep 17 00:00:00 2001 From: Richard Date: Mon, 12 Dec 2022 16:10:31 +0000 Subject: [PATCH 073/226] refactor --- autofit/__init__.py | 2 +- autofit/time_series.py | 41 +++++++++++++++++++------------- test_autofit/test_time_series.py | 4 ++-- 3 files changed, 27 insertions(+), 20 deletions(-) diff --git a/autofit/__init__.py b/autofit/__init__.py index ce69b5d89..59a7f9659 100644 --- a/autofit/__init__.py +++ b/autofit/__init__.py @@ -86,7 +86,7 @@ from .example.model import Gaussian from .text import formatter from .text import samples_text -from .time_series import TimeSeries, LinearTimeSeries +from .time_series import LinearTimeSeries from .tools import util diff --git a/autofit/time_series.py b/autofit/time_series.py index 0dea0b62a..3166af3ae 100644 --- a/autofit/time_series.py +++ b/autofit/time_series.py @@ -1,5 +1,6 @@ import copy -from typing import List +from abc import ABC, abstractmethod +from typing import List, Dict from scipy.stats import stats @@ -7,18 +8,18 @@ class TimeSeriesPath: - def __init__(self, keys): + def __init__(self, keys: List[str]): self.keys = keys - def __getattr__(self, item): + def __getattr__(self, item: str) -> "TimeSeriesPath": return TimeSeriesPath(self.keys + [item]) - def get_value(self, instance): + def get_value(self, instance: ModelInstance) -> float: for key in self.keys: instance = getattr(instance, key) return instance - def __eq__(self, other): + def __eq__(self, other: float) -> "Equality": return Equality(self, other) @@ -28,24 +29,20 @@ def __init__(self, path: TimeSeriesPath, value: float): self.value = value -class TimeSeries: +class AbstractTimeSeries(ABC): def __init__(self, instances: List[ModelInstance]): self.instances = instances - def __getattr__(self, item): + def __getattr__(self, item: str): return TimeSeriesPath([item]) - def _value_map(self, path): + def _value_map(self, path: TimeSeriesPath) -> Dict[float, ModelInstance]: return {path.get_value(instance): instance for instance in self.instances} def __getitem__(self, item: Equality): - return self._value_map(item.path)[item.value] - - -class LinearTimeSeries(TimeSeries): - def __getitem__(self, item: Equality): + value_map = self._value_map(item.path) try: - return super().__getitem__(item) + return value_map[item.value] except KeyError: value_map = self._value_map(item.path) x = sorted(value_map) @@ -56,10 +53,20 @@ def __getitem__(self, item: Equality): for path, _ in instance.path_instance_tuples_for_class(float): y = [value_map[value].object_for_path(path) for value in x] - slope, intercept, r, p, std_err = stats.linregress(x, y) - new_instance = new_instance.replacing_for_path( - path, slope * item.value + intercept, + path, self._interpolate(x, y, item.value), ) return new_instance + + @staticmethod + @abstractmethod + def _interpolate(x, y, value): + pass + + +class LinearTimeSeries(AbstractTimeSeries): + @staticmethod + def _interpolate(x, y, value): + slope, intercept, r, p, std_err = stats.linregress(x, y) + return slope * value + intercept diff --git a/test_autofit/test_time_series.py b/test_autofit/test_time_series.py index 47a2ea754..82528d7b1 100644 --- a/test_autofit/test_time_series.py +++ b/test_autofit/test_time_series.py @@ -5,7 +5,7 @@ def test_trivial(): instance = af.ModelInstance(items=dict(t=1)) - time_series = af.TimeSeries([instance]) + time_series = af.LinearTimeSeries([instance]) result = time_series[time_series.t == 1] @@ -13,7 +13,7 @@ def test_trivial(): @pytest.mark.parametrize( - "t, centre", [(0.0, -1.0), (1.0, 0.0), (1.5, 0.5), (2.0, 1.0), (3.0, 2.0),] + "t, centre", [(0.0, -1.0), (1.0, 0.0), (1.5, 0.5), (2.0, 1.0), (3.0, 2.0)] ) def test_linear(t, centre): time_series = af.LinearTimeSeries( From aa8900a4197ad73a3cb96d0908b7e3d252d7b12a Mon Sep 17 00:00:00 2001 From: Richard Date: Mon, 12 Dec 2022 16:29:40 +0000 Subject: [PATCH 074/226] dopcs --- autofit/time_series.py | 149 +++++++++++++++++++++++++++++-- test_autofit/test_time_series.py | 7 +- 2 files changed, 148 insertions(+), 8 deletions(-) diff --git a/autofit/time_series.py b/autofit/time_series.py index 3166af3ae..b8dfb4d6b 100644 --- a/autofit/time_series.py +++ b/autofit/time_series.py @@ -1,6 +1,6 @@ import copy from abc import ABC, abstractmethod -from typing import List, Dict +from typing import List, Dict, cast from scipy.stats import stats @@ -9,37 +9,153 @@ class TimeSeriesPath: def __init__(self, keys: List[str]): + """ + Addresses a given attribute in a ModelInstance + + Parameters + ---------- + keys + A list of attribute names + """ self.keys = keys def __getattr__(self, item: str) -> "TimeSeriesPath": + """ + Add a new attribute name to the end of the path + """ return TimeSeriesPath(self.keys + [item]) def get_value(self, instance: ModelInstance) -> float: + """ + Retrieve the value at the path for a given instance. + + Parameters + ---------- + instance + An instance of some model + + Returns + ------- + The value for the instance at the given path. + """ for key in self.keys: instance = getattr(instance, key) return instance def __eq__(self, other: float) -> "Equality": + """ + Create an object describing the value the addressed attribute + should have for interpolation. + + Parameters + ---------- + other + A value to which the instance will be interpolated + + Returns + ------- + An object describing how to interpolate + """ return Equality(self, other) class Equality: def __init__(self, path: TimeSeriesPath, value: float): + """ + Describes the value of a given attribute for which other values + are interpolated. + + Parameters + ---------- + path + The path to an attribute + value + The value that attribute should have + """ self.path = path self.value = value class AbstractTimeSeries(ABC): def __init__(self, instances: List[ModelInstance]): + """ + A TimeSeries allows interpolation on any variable. + + For example, each instance may have an attribute t. Other attributes + can be determined for any given value of t by interpolating their + values for each instance in the time series. + + Parameters + ---------- + instances + A list of instances (e.g. best fits) each with all the same attributes + and a value on which a time series may be built + """ self.instances = instances - def __getattr__(self, item: str): + def __getattr__(self, item: str) -> TimeSeriesPath: + """ + Used to indicate which attribute is the time attribute. + + This attribute may be at any path in the model. + + e.g. + instance.t + instance.something.t + + Parameters + ---------- + item + The name of an attribute of the instance + + Returns + ------- + A class that keeps track of which attributes have been addressed + """ return TimeSeriesPath([item]) def _value_map(self, path: TimeSeriesPath) -> Dict[float, ModelInstance]: + """ + Maps know values to corresponding instances for a given path + + Parameters + ---------- + path + A path to an attribute, e.g. time + + Returns + ------- + A dictionary mapping values of that attribute for each instance to the corresponding + instance + """ return {path.get_value(instance): instance for instance in self.instances} - def __getitem__(self, item: Equality): + def __getitem__(self, item: Equality) -> ModelInstance: + """ + Create an artificial model instance which has values interpolated + for a given interpolation value. + + Parameters + ---------- + item + Indicates a value for a given attribute to which the instance should + be interpolated + + Returns + ------- + An artificial instance with values interpolated + + Examples + -------- + # Each instance in the time_series has an attribute 't' + time_series = af.LinearTimeSeries([instance_1, instance_2, instance_3]) + + # We can now create an instance which has a value of t = 3.5 by interpolating + instance = time_series[time_series.t == 3.5) + + # We can also interpolate on any arbitrary attribute of the instance + instance = time_series[time_series.some.arbitrary.attribute == -1.0] + """ value_map = self._value_map(item.path) try: return value_map[item.value] @@ -54,18 +170,39 @@ def __getitem__(self, item: Equality): y = [value_map[value].object_for_path(path) for value in x] new_instance = new_instance.replacing_for_path( - path, self._interpolate(x, y, item.value), + path, self._interpolate(x, cast(List[float], y), item.value), ) + new_instance.replacing_for_path(tuple(item.path.keys), item.value) + return new_instance @staticmethod @abstractmethod - def _interpolate(x, y, value): - pass + def _interpolate(x: List[float], y: List[float], value: float) -> float: + """ + Interpolate a given attribute to find its effective value at some time + + Parameters + ---------- + x + A list of times (or another series) + y + A list which one value for each time + value + The time for which we want an interpolated value for the attribute + + Returns + ------- + An interpolated value for the attribute + """ class LinearTimeSeries(AbstractTimeSeries): + """ + Assume all attributes have a linear relationship with time + """ + @staticmethod def _interpolate(x, y, value): slope, intercept, r, p, std_err = stats.linregress(x, y) diff --git a/test_autofit/test_time_series.py b/test_autofit/test_time_series.py index 82528d7b1..0065527a6 100644 --- a/test_autofit/test_time_series.py +++ b/test_autofit/test_time_series.py @@ -20,12 +20,14 @@ def test_linear(t, centre): [ af.ModelInstance( items=dict( - t=1, gaussian=af.Gaussian(centre=0.0, normalization=1.0, sigma=-1.0) + t=1.0, + gaussian=af.Gaussian(centre=0.0, normalization=1.0, sigma=-1.0), ) ), af.ModelInstance( items=dict( - t=2, gaussian=af.Gaussian(centre=1.0, normalization=2.0, sigma=-2.0) + t=2.0, + gaussian=af.Gaussian(centre=1.0, normalization=2.0, sigma=-2.0), ) ), ] @@ -33,6 +35,7 @@ def test_linear(t, centre): result = time_series[time_series.t == t] + assert result.t == t assert result.gaussian.centre == centre assert result.gaussian.normalization == t assert result.gaussian.sigma == -t From bc843c6a7be3786866ca4a3b52ebb60432968a0f Mon Sep 17 00:00:00 2001 From: Richard Date: Mon, 12 Dec 2022 16:38:52 +0000 Subject: [PATCH 075/226] extra test --- test_autofit/test_time_series.py | 25 ++++++++++++++++++++----- 1 file changed, 20 insertions(+), 5 deletions(-) diff --git a/test_autofit/test_time_series.py b/test_autofit/test_time_series.py index 0065527a6..3863b18bc 100644 --- a/test_autofit/test_time_series.py +++ b/test_autofit/test_time_series.py @@ -12,11 +12,9 @@ def test_trivial(): assert result is instance -@pytest.mark.parametrize( - "t, centre", [(0.0, -1.0), (1.0, 0.0), (1.5, 0.5), (2.0, 1.0), (3.0, 2.0)] -) -def test_linear(t, centre): - time_series = af.LinearTimeSeries( +@pytest.fixture(name="time_series") +def make_time_series(): + return af.LinearTimeSeries( [ af.ModelInstance( items=dict( @@ -33,9 +31,26 @@ def test_linear(t, centre): ] ) + +@pytest.mark.parametrize( + "t, centre", [(0.0, -1.0), (1.0, 0.0), (1.5, 0.5), (2.0, 1.0), (3.0, 2.0)] +) +def test_linear(t, centre, time_series): + result = time_series[time_series.t == t] assert result.t == t assert result.gaussian.centre == centre assert result.gaussian.normalization == t assert result.gaussian.sigma == -t + + +@pytest.mark.parametrize("sigma", [-0.5, 0.0, 0.5, 1.0]) +def test_alternate_attribute(time_series, sigma): + sigma = 1.0 + + result = time_series[time_series.gaussian.sigma == sigma] + + assert result.gaussian.sigma == sigma + assert result.t == -sigma + assert result.gaussian.normalization == -sigma From 9894048dce9a7a5c7c121dba3d131dd2fc607011 Mon Sep 17 00:00:00 2001 From: Richard Date: Mon, 12 Dec 2022 16:39:27 +0000 Subject: [PATCH 076/226] fix --- test_autofit/test_time_series.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/test_autofit/test_time_series.py b/test_autofit/test_time_series.py index 3863b18bc..a6ff44815 100644 --- a/test_autofit/test_time_series.py +++ b/test_autofit/test_time_series.py @@ -47,8 +47,6 @@ def test_linear(t, centre, time_series): @pytest.mark.parametrize("sigma", [-0.5, 0.0, 0.5, 1.0]) def test_alternate_attribute(time_series, sigma): - sigma = 1.0 - result = time_series[time_series.gaussian.sigma == sigma] assert result.gaussian.sigma == sigma From cbb7bdf7d55eea40ea84a2c771522d277456ac5e Mon Sep 17 00:00:00 2001 From: Richard Date: Mon, 12 Dec 2022 16:40:17 +0000 Subject: [PATCH 077/226] format --- test_autofit/test_time_series.py | 1 + 1 file changed, 1 insertion(+) diff --git a/test_autofit/test_time_series.py b/test_autofit/test_time_series.py index a6ff44815..9e53fa335 100644 --- a/test_autofit/test_time_series.py +++ b/test_autofit/test_time_series.py @@ -47,6 +47,7 @@ def test_linear(t, centre, time_series): @pytest.mark.parametrize("sigma", [-0.5, 0.0, 0.5, 1.0]) def test_alternate_attribute(time_series, sigma): + result = time_series[time_series.gaussian.sigma == sigma] assert result.gaussian.sigma == sigma From bd4041404d0fca557151dc492ac8826c0d6193eb Mon Sep 17 00:00:00 2001 From: Jammy2211 Date: Thu, 15 Dec 2022 09:26:53 +0000 Subject: [PATCH 078/226] pynufft dep --- autofit/non_linear/abstract_search.py | 2 +- autofit/non_linear/samples/pdf.py | 12 +-- autofit/non_linear/samples/samples.py | 4 +- docs/api/analysis.rst | 16 +++ docs/api/api.rst | 139 -------------------------- docs/api/database.rst | 14 +++ docs/api/model.rst | 17 ++++ docs/api/plot.rst | 18 ++++ docs/api/priors.rst | 16 +++ docs/api/samples.rst | 17 ++++ docs/api/searches.rst | 64 ++++++++++++ docs/api/source.rst | 14 +++ 12 files changed, 185 insertions(+), 148 deletions(-) create mode 100644 docs/api/analysis.rst delete mode 100644 docs/api/api.rst create mode 100644 docs/api/database.rst create mode 100644 docs/api/model.rst create mode 100644 docs/api/plot.rst create mode 100644 docs/api/priors.rst create mode 100644 docs/api/samples.rst create mode 100644 docs/api/searches.rst create mode 100644 docs/api/source.rst diff --git a/autofit/non_linear/abstract_search.py b/autofit/non_linear/abstract_search.py index 234e1970b..ac3a7cb7a 100644 --- a/autofit/non_linear/abstract_search.py +++ b/autofit/non_linear/abstract_search.py @@ -85,7 +85,7 @@ def __init__( searches. Parameters - ------------ + ----------- name The name of the search, controlling the last folder results are output. path_prefix diff --git a/autofit/non_linear/samples/pdf.py b/autofit/non_linear/samples/pdf.py index 512c520d7..fd3f6f851 100644 --- a/autofit/non_linear/samples/pdf.py +++ b/autofit/non_linear/samples/pdf.py @@ -182,7 +182,7 @@ def values_at_upper_sigma(self, sigma: float, as_instance: bool = True) -> Union See values_at_sigma for a full description of how the parameters at sigma are computed. Parameters - ----------- + ---------- sigma The sigma within which the PDF is used to estimate errors (e.g. sigma = 1.0 uses 0.6826 of the PDF). """ @@ -197,7 +197,7 @@ def values_at_lower_sigma(self, sigma: float, as_instance: bool = True) -> Union See values_at_sigma for a full description of how the parameters at sigma are computed. Parameters - ----------- + ---------- sigma The sigma limit within which the PDF is used to estimate errors (e.g. sigma = 1.0 uses 0.6826 of the PDF). """ @@ -212,7 +212,7 @@ def errors_at_sigma(self, sigma: float, as_instance: bool = True) -> [Tuple, Mod See values_at_sigma for a full description of how the parameters at sigma are computed. Parameters - ----------- + ---------- sigma The sigma within which the PDF is used to estimate errors (e.g. sigma = 1.0 uses 0.6826 of the PDF). """ @@ -229,7 +229,7 @@ def errors_at_upper_sigma(self, sigma: float, as_instance: bool = True) -> Union See values_at_sigma for a full description of how the parameters at sigma are computed. Parameters - ----------- + ---------- sigma The sigma within which the PDF is used to estimate errors (e.g. sigma = 1.0 uses 0.6826 of the PDF). """ @@ -251,7 +251,7 @@ def errors_at_lower_sigma(self, sigma: float, as_instance: bool = True) -> Union See values_at_sigma for a full description of how the parameters at sigma are computed. Parameters - ----------- + ---------- sigma The sigma within which the PDF is used to estimate errors (e.g. sigma = 1.0 uses 0.6826 of the PDF). """ @@ -291,7 +291,7 @@ def gaussian_priors_at_sigma(self, sigma: float) -> [List]: - Their errors are computed at an input sigma value (using errors_at_sigma). Parameters - ----------- + ---------- sigma The sigma within which the PDF is used to estimate errors (e.g. sigma = 1.0 uses 0.6826 of the PDF). """ diff --git a/autofit/non_linear/samples/samples.py b/autofit/non_linear/samples/samples.py index 3eb80f134..0d58c3fb6 100644 --- a/autofit/non_linear/samples/samples.py +++ b/autofit/non_linear/samples/samples.py @@ -602,7 +602,7 @@ def from_sample_index(self, sample_index : int, as_instance: bool = True) -> Mod The parameters of an individual sample of the non-linear search, returned as a model instance. Parameters - ----------- + ---------- sample_index The sample index of the weighted sample to return. """ @@ -711,7 +711,7 @@ def gaussian_priors_at_sigma(self, sigma: float) -> [List]: used to link to another search, it will thus automatically use the prior config values. Parameters - ----------- + ---------- sigma The sigma limit within which the PDF is used to estimate errors (e.g. sigma = 1.0 uses 0.6826 of the PDF). """ diff --git a/docs/api/analysis.rst b/docs/api/analysis.rst new file mode 100644 index 000000000..501ff1ee2 --- /dev/null +++ b/docs/api/analysis.rst @@ -0,0 +1,16 @@ +======== +Analysis +======== + +-------- +Analysis +-------- + +.. currentmodule:: autofit + +.. autosummary:: + :toctree: _autosummary + :template: custom-class-template.rst + :recursive: + + Analysis \ No newline at end of file diff --git a/docs/api/api.rst b/docs/api/api.rst deleted file mode 100644 index 7035e9738..000000000 --- a/docs/api/api.rst +++ /dev/null @@ -1,139 +0,0 @@ -============= -API Reference -============= - -.. currentmodule:: autofit - -------------------- -Non-Linear Searches -------------------- - -**Nested Samplers:** - -.. autosummary:: - :toctree: generated/ - - DynestyDynamic - DynestyStatic - UltraNest - -**MCMC:** - -.. autosummary:: - :toctree: generated/ - - Emcee - Zeus - -**Optimizers:** - -.. autosummary:: - :toctree: generated/ - - PySwarmsLocal - PySwarmsGlobal - -**GridSearch**: - -.. autosummary:: - :toctree: generated/ - - SearchGridSearch - GridSearchResult - -**Tools**: - -.. autosummary:: - :toctree: generated/ - - DirectoryPaths - DatabasePaths - Result - InitializerBall - InitializerPrior - PriorPasser - AutoCorrelationsSettings - --------- -Plotters --------- - -.. currentmodule:: autofit.plot -.. autosummary:: - :toctree: generated/ - - DynestyPlotter - UltraNestPlotter - EmceePlotter - ZeusPlotter - PySwarmsPlotter - - ------- -Models ------- - -.. currentmodule:: autofit - -.. autosummary:: - :toctree: generated/ - - PriorModel - CollectionPriorModel - - --------- -Analysis --------- - -.. currentmodule:: autofit - -.. autosummary:: - :toctree: generated/ - - Analysis - - ------- -Priors ------- - -.. autosummary:: - :toctree: generated/ - - UniformPrior - GaussianPrior - LogUniformPrior - - -------- -Samples -------- - -.. autosummary:: - :toctree: generated/ - - Samples - SamplesPDF - SamplesMCMC - SamplesNest - SamplesStored - ----------- -Aggregator ----------- - -.. autosummary:: - :toctree: generated/ - - Aggregator - -------- -Backend -------- - -.. autosummary:: - :toctree: generated/ - - ModelMapper - ModelInstance diff --git a/docs/api/database.rst b/docs/api/database.rst new file mode 100644 index 000000000..31faf4fee --- /dev/null +++ b/docs/api/database.rst @@ -0,0 +1,14 @@ +======== +Database +======== + +---------- +Aggregator +---------- + +.. autosummary:: + :toctree: _autosummary + :template: custom-class-template.rst + :recursive: + + Aggregator \ No newline at end of file diff --git a/docs/api/model.rst b/docs/api/model.rst new file mode 100644 index 000000000..115a5a0c7 --- /dev/null +++ b/docs/api/model.rst @@ -0,0 +1,17 @@ +====== +Models +====== + +------ +Models +------ + +.. currentmodule:: autofit + +.. autosummary:: + :toctree: _autosummary + :template: custom-class-template.rst + :recursive: + + PriorModel + CollectionPriorModel \ No newline at end of file diff --git a/docs/api/plot.rst b/docs/api/plot.rst new file mode 100644 index 000000000..85bbdc9cb --- /dev/null +++ b/docs/api/plot.rst @@ -0,0 +1,18 @@ +======== +Plotters +======== + +-------- +Plotters +-------- + +.. autosummary:: + :toctree: _autosummary + :template: custom-class-template.rst + :recursive: + + DynestyPlotter + UltraNestPlotter + EmceePlotter + ZeusPlotter + PySwarmsPlotter \ No newline at end of file diff --git a/docs/api/priors.rst b/docs/api/priors.rst new file mode 100644 index 000000000..0fa94dc3d --- /dev/null +++ b/docs/api/priors.rst @@ -0,0 +1,16 @@ +------ +Priors +------ + +Priors +------ + +.. autosummary:: + :toctree: _autosummary + :template: custom-class-template.rst + :recursive: + + UniformPrior + GaussianPrior + LogUniformPrior + LogGaussianPrior \ No newline at end of file diff --git a/docs/api/samples.rst b/docs/api/samples.rst new file mode 100644 index 000000000..8c99bc81e --- /dev/null +++ b/docs/api/samples.rst @@ -0,0 +1,17 @@ +======= +Samples +======= + +Samples +------- + +.. autosummary:: + :toctree: _autosummary + :template: custom-class-template.rst + :recursive: + + Samples + SamplesPDF + SamplesMCMC + SamplesNest + SamplesStored \ No newline at end of file diff --git a/docs/api/searches.rst b/docs/api/searches.rst new file mode 100644 index 000000000..914a0adfd --- /dev/null +++ b/docs/api/searches.rst @@ -0,0 +1,64 @@ +=================== +Non=Linear Searches +=================== + +Nested Samplers +--------------- + +.. autosummary:: + :toctree: _autosummary + :template: custom-class-template.rst + :recursive: + + DynestyDynamic + DynestyStatic + UltraNest + +MCMC +---- + +.. autosummary:: + :toctree: _autosummary + :template: custom-class-template.rst + :recursive: + + Emcee + Zeus + +Optimizers +---------- + +.. autosummary:: + :toctree: _autosummary + :template: custom-class-template.rst + :recursive: + + PySwarmsLocal + PySwarmsGlobal + +Tools +----- + +.. autosummary:: + :toctree: _autosummary + :template: custom-class-template.rst + :recursive: + + DirectoryPaths + DatabasePaths + Result + InitializerBall + InitializerPrior + PriorPasser + AutoCorrelationsSettings + +GridSearch +---------- + +.. autosummary:: + :toctree: _autosummary + :template: custom-class-template.rst + :recursive: + + SearchGridSearch + GridSearchResult diff --git a/docs/api/source.rst b/docs/api/source.rst new file mode 100644 index 000000000..a0d9c18dd --- /dev/null +++ b/docs/api/source.rst @@ -0,0 +1,14 @@ +=========== +Source Code +=========== + +Model Mapping +------------- + +.. autosummary:: + :toctree: _autosummary + :template: custom-class-template.rst + :recursive: + + ModelMapper + ModelInstance From 9234505d1f10873cd8af3f0e413d65c5da070ba9 Mon Sep 17 00:00:00 2001 From: Jammy2211 Date: Thu, 15 Dec 2022 14:39:08 +0000 Subject: [PATCH 079/226] test mode initial samples now accoun for assrtions --- autofit/non_linear/initializer.py | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) diff --git a/autofit/non_linear/initializer.py b/autofit/non_linear/initializer.py index 754079bfd..6544fd2c1 100644 --- a/autofit/non_linear/initializer.py +++ b/autofit/non_linear/initializer.py @@ -129,13 +129,18 @@ def samples_in_test_mode( figure_of_merit = -1.0e99 while point_index < total_points: - unit_parameter_list = self._generate_unit_parameter_list(model) - parameter_list = model.vector_from_unit_vector(unit_vector=unit_parameter_list) - unit_parameter_lists.append(unit_parameter_list) - parameter_lists.append(parameter_list) - figure_of_merit_list.append(figure_of_merit) - figure_of_merit *= 10.0 - point_index += 1 + + try: + unit_parameter_list = self._generate_unit_parameter_list(model) + parameter_list = model.vector_from_unit_vector(unit_vector=unit_parameter_list) + model.instance_from_vector(vector=parameter_list) + unit_parameter_lists.append(unit_parameter_list) + parameter_lists.append(parameter_list) + figure_of_merit_list.append(figure_of_merit) + figure_of_merit *= 10.0 + point_index += 1 + except exc.FitException: + pass return unit_parameter_lists, parameter_lists, figure_of_merit_list From b7a664763cacb45b6f63be32ece83b483805e93b Mon Sep 17 00:00:00 2001 From: Jammy2211 Date: Thu, 15 Dec 2022 14:51:18 +0000 Subject: [PATCH 080/226] fix API docs build --- docs/index.rst | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/docs/index.rst b/docs/index.rst index 4932a88e4..f24fcbd65 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -219,7 +219,14 @@ provides new users with a more detailed introduction to **PyAutoFit**. :maxdepth: 1 :hidden: - api/api + api/model + api/priors + api/analysis + api/searches + api/plot + api/samples + api/database + api/source .. toctree:: :caption: Features: From e14e472e2f6e17c39615186650af5bfec543cf20 Mon Sep 17 00:00:00 2001 From: Jammy2211 Date: Thu, 15 Dec 2022 15:00:15 +0000 Subject: [PATCH 081/226] docs of analysis.rst --- docs/api/analysis.rst | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/docs/api/analysis.rst b/docs/api/analysis.rst index 501ff1ee2..d623ace4a 100644 --- a/docs/api/analysis.rst +++ b/docs/api/analysis.rst @@ -2,6 +2,15 @@ Analysis ======== +The ``Analysis`` object is used to define the ``log_likelihood_function`` of your model-fitting problem, and acts +as an interface between the data and the non-linear search. + +In addition to the API documentation below, checkout the ``overview`` scripts on the ``autofit_workspace`` for a +tutorial in creating an ``Analysis`` for a model-fit: + +- https://github.com/Jammy2211/autofit_workspace/blob/release/notebooks/overview/simple/fit.ipynb +- https://github.com/Jammy2211/autofit_workspace/blob/release/notebooks/overview/complex/fit.ipynb + -------- Analysis -------- From 66237b922a217d8e975c7302d4ddc5ec5772d4c1 Mon Sep 17 00:00:00 2001 From: Jammy2211 Date: Thu, 15 Dec 2022 16:03:22 +0000 Subject: [PATCH 082/226] description of each api doc --- docs/api/analysis.rst | 9 +++++---- docs/api/database.rst | 10 ++++++++++ docs/api/model.rst | 14 ++++++++++++++ docs/api/plot.rst | 9 +++++++++ docs/api/priors.rst | 16 ++++++++++++++-- docs/api/samples.rst | 12 ++++++++++++ docs/api/searches.rst | 23 +++++++++++++++++++++++ docs/api/source.rst | 8 ++++++++ docs/overview/non_linear_search.rst | 2 ++ 9 files changed, 97 insertions(+), 6 deletions(-) diff --git a/docs/api/analysis.rst b/docs/api/analysis.rst index d623ace4a..baff175ac 100644 --- a/docs/api/analysis.rst +++ b/docs/api/analysis.rst @@ -5,11 +5,12 @@ Analysis The ``Analysis`` object is used to define the ``log_likelihood_function`` of your model-fitting problem, and acts as an interface between the data and the non-linear search. -In addition to the API documentation below, checkout the ``overview`` scripts on the ``autofit_workspace`` for a -tutorial in creating an ``Analysis`` for a model-fit: +**Examples / Tutorials:** -- https://github.com/Jammy2211/autofit_workspace/blob/release/notebooks/overview/simple/fit.ipynb -- https://github.com/Jammy2211/autofit_workspace/blob/release/notebooks/overview/complex/fit.ipynb +- `readthedocs: example using Analysis object `_. +- `autofit_workspace: simple tutorial `_ +- `autofit_workspace: complex tutorial `_ +- `HowToFit: introduction chapter (detailed step-by-step examples) `_ -------- Analysis diff --git a/docs/api/database.rst b/docs/api/database.rst index 31faf4fee..b2578deb2 100644 --- a/docs/api/database.rst +++ b/docs/api/database.rst @@ -2,6 +2,16 @@ Database ======== +PyAutoFit's database feature outputs all model-fitting results as a sqlite3 (https://docs.python.org/3/library/sqlite3.html) +relational database, such that all results can be efficiently loaded into a Jupyter notebook or Python script for +inspection, analysis and interpretation. + +**Examples / Tutorials:** + +- `readthedocs: example using database functionality `_ +- `autofit_workspace: tutorial using database `_ +- `HowToFit: database chapter (detailed step-by-step examples) `_ + ---------- Aggregator ---------- diff --git a/docs/api/model.rst b/docs/api/model.rst index 115a5a0c7..cc57c8217 100644 --- a/docs/api/model.rst +++ b/docs/api/model.rst @@ -2,6 +2,20 @@ Models ====== +Model objects are used for composing models that are fitted to data. + +It is recommended the `model API cookbooks `_ are used for guidance on building complex model. + +**Examples / Tutorials:** + +- `Model API Cookbooks (recommended) `_. + +- `readthedocs: example using Model object `_. +- `readthedocs: example using Collection object `_. +- `autofit_workspace: simple tutorial `_ +- `autofit_workspace: complex tutorial `_ +- `HowToFit: introduction chapter (detailed step-by-step examples) `_ + ------ Models ------ diff --git a/docs/api/plot.rst b/docs/api/plot.rst index 85bbdc9cb..8c4b93974 100644 --- a/docs/api/plot.rst +++ b/docs/api/plot.rst @@ -2,6 +2,15 @@ Plotters ======== +The ``Plotter`` objects are used to create non-linear search specific visualization of every search algorithm supported +by **PyAutoFit**. + +**Examples / Tutorials:** + +- `readthedocs: non-linear search example `_ +- `autofit_workspace: plot tutorials `_ +- `HowToFit: introduction chapter (detailed step-by-step examples) `_ + -------- Plotters -------- diff --git a/docs/api/priors.rst b/docs/api/priors.rst index 0fa94dc3d..e62b1164c 100644 --- a/docs/api/priors.rst +++ b/docs/api/priors.rst @@ -1,6 +1,18 @@ ------- +====== Priors ------- +====== + +The priors of every model-fit are customized using `Prior` objects. + +**Examples / Tutorials:** + +- `Model API Cookbooks (recommended) `_. + +- `readthedocs: example using Model object `_. +- `readthedocs: example using Collection object `_. +- `autofit_workspace: simple tutorial `_ +- `autofit_workspace: complex tutorial `_ +- `HowToFit: introduction chapter (detailed step-by-step examples) `_ Priors ------ diff --git a/docs/api/samples.rst b/docs/api/samples.rst index 8c99bc81e..1f6aa92f3 100644 --- a/docs/api/samples.rst +++ b/docs/api/samples.rst @@ -2,6 +2,18 @@ Samples ======= +Every sample of a model-fit and non-liner search are stored in a ``Samples`` object, which can be manipulated to +inspect the results in detail (e.g. perform parameter estimation with errors). + +For example, for an MCMC model-fit, the ``Samples`` objects contains every sample of walker. + +**Examples / Tutorials:** + +- `readthedocs: example on using results `_. +- `autofit_workspace: simple results tutorial `_ +- `autofit_workspace: complex result tutorial `_ +- `HowToFit: introduction chapter (detailed step-by-step examples) `_ + Samples ------- diff --git a/docs/api/searches.rst b/docs/api/searches.rst index 914a0adfd..b758b5cc3 100644 --- a/docs/api/searches.rst +++ b/docs/api/searches.rst @@ -2,6 +2,18 @@ Non=Linear Searches =================== +A non-linear search is an algorithm which fits a model to data. + +**PyAutoFit** currently supports three types of non-linear search algorithms: nested samplers, +Markov Chain Monte Carlo (MCMC) and optimizers. + +**Examples / Tutorials:** + +- `readthedocs: example using non-linear searches `_. +- `autofit_workspace: simple tutorial `_ +- `autofit_workspace: complex tutorial `_ +- `HowToFit: introduction chapter (detailed step-by-step examples) `_ + Nested Samplers --------------- @@ -36,6 +48,9 @@ Optimizers PySwarmsLocal PySwarmsGlobal +There are also a number of tools which are used to customize the behaviour of non-linear searches in **PyAutoFit**, +including directory output structure, parameter sample initialization and MCMC auto correlation analysis. + Tools ----- @@ -52,6 +67,14 @@ Tools PriorPasser AutoCorrelationsSettings +**PyAutoFit** can perform a parallelized grid-search of non-linear searches, where a subset of parameters in the +model are fitted in over a discrete grid. + +**Examples / Tutorials:** + +- `readthedocs: example using a non-linear search grid search `_. +- `autofit_workspace: example using a non-linear search grid search `_ + GridSearch ---------- diff --git a/docs/api/source.rst b/docs/api/source.rst index a0d9c18dd..62722b906 100644 --- a/docs/api/source.rst +++ b/docs/api/source.rst @@ -2,9 +2,17 @@ Source Code =========== +This page provided API docs for functionality which is typically not used by users, but is used internally in the +**PyAutoFit** source code. + +These docs are intended for developers, or users doing non-standard computations using internal **PyAutoFit** objects. + Model Mapping ------------- +These tools are used internally by **PyAutoFit** to map input lists of values (e.g. a unit cube of parameter values) +to model instances. + .. autosummary:: :toctree: _autosummary :template: custom-class-template.rst diff --git a/docs/overview/non_linear_search.rst b/docs/overview/non_linear_search.rst index 0b5aa86ef..581e41f47 100644 --- a/docs/overview/non_linear_search.rst +++ b/docs/overview/non_linear_search.rst @@ -3,6 +3,8 @@ Non-linear Search ================= +A non-linear search is an algorithm which fits a model to data. + **PyAutoFit** currently supports three types of non-linear search algorithms: - **Optimizers**: ``PySwarms``. From 625e774167bfa17a6f5f14e0e1d3bcc00b8bd569 Mon Sep 17 00:00:00 2001 From: Jammy2211 Date: Fri, 16 Dec 2022 19:48:15 +0000 Subject: [PATCH 083/226] docstrings of UniformPrior and GaussianPrior --- autofit/mapper/prior/gaussian.py | 49 ++++++++++++++++++++--- autofit/mapper/prior/uniform.py | 45 +++++++++++++++++---- autofit/mapper/prior_model/collection.py | 39 ++++++++++++++++++ autofit/mapper/prior_model/prior_model.py | 41 +++++++++++++++++-- docs/api/database.rst | 2 + docs/api/model.rst | 4 +- docs/api/plot.rst | 2 + docs/api/priors.rst | 2 + docs/api/samples.rst | 2 + docs/api/searches.rst | 10 ++++- docs/api/source.rst | 2 + 11 files changed, 178 insertions(+), 20 deletions(-) diff --git a/autofit/mapper/prior/gaussian.py b/autofit/mapper/prior/gaussian.py index d732abf28..b3dfb1158 100644 --- a/autofit/mapper/prior/gaussian.py +++ b/autofit/mapper/prior/gaussian.py @@ -3,7 +3,6 @@ class GaussianPrior(Prior): - """A prior with a gaussian distribution""" __identifier_fields__ = ( "lower_limit", @@ -14,12 +13,44 @@ class GaussianPrior(Prior): def __init__( self, - mean, - sigma, - lower_limit=float("-inf"), - upper_limit=float("inf"), + mean: float, + sigma : float, + lower_limit : float = float("-inf"), + upper_limit : float = float("inf"), ): + """ + A prior with a uniform distribution, defined between a lower limit and upper limit. + + The conversion of an input unit value, ``u``, to a physical value, ``p``, via the prior is as follows: + + .. math:: + + p = \mu + (\sigma * sqrt(2) * erfcinv(2.0 * (1.0 - u)) + + For example for ``prior = UniformPrior(lower_limit=0.0, upper_limit=2.0)``, an + input ``prior.value_for(unit=0.5)`` is equal to 1.0. + + [Rich describe how this is done via message] + + Parameters + ---------- + mean + The mean of the Gaussian distribution defining the prior. + sigma + The sigma value of the Gaussian distribution defining the prior. + lower_limit + A lower limit of the Gaussian distribution; physical values below this value are rejected. + upper_limit + A upper limit of the Gaussian distribution; physical values below this value are rejected. + + Examples + -------- + + prior = af.GaussianPrior(mean=1.0, sigma=2.0, lower_limit=0.0, upper_limit=2.0) + + physical_value = prior.value_for(unit=0.5) + """ super().__init__( lower_limit=lower_limit, upper_limit=upper_limit, @@ -44,10 +75,16 @@ def with_limits( Note that these limits are not strict so exceptions will not be raised for values outside of the limits. + This function is typically used in prior passing, where the + result of a model-fit are used to create new Gaussian priors + centred on the previously estimated median PDF model. + Parameters ---------- lower_limit + The lower limit of the new Gaussian prior. upper_limit + The upper limit of the new Gaussian Prior. Returns ------- @@ -66,5 +103,5 @@ def dict(self) -> dict: return {**prior_dict, "mean": self.mean, "sigma": self.sigma} @property - def parameter_string(self): + def parameter_string(self) -> str: return f"mean = {self.mean}, sigma = {self.sigma}" diff --git a/autofit/mapper/prior/uniform.py b/autofit/mapper/prior/uniform.py index 2292897be..5f1a33094 100644 --- a/autofit/mapper/prior/uniform.py +++ b/autofit/mapper/prior/uniform.py @@ -6,13 +6,42 @@ class UniformPrior(Prior): - """A prior with a uniform distribution between a lower and upper limit""" __identifier_fields__ = ("lower_limit", "upper_limit") def __init__( - self, lower_limit=0.0, upper_limit=1.0, id_=None, + self, + lower_limit : float = 0.0, + upper_limit : float = 1.0, + id_ = None, ): + """ + A prior with a uniform distribution, defined between a lower limit and upper limit. + + The conversion of an input unit value via the prior is as follows: + + .. math:: + + For example for ``prior = UniformPrior(lower_limit=0.0, upper_limit=2.0)``, an + input ``prior.value_for(unit=0.5)`` is equal to 1.0. + + [Rich describe how this is done via message] + + Parameters + ---------- + lower_limit + The lower limit of the uniform distribution defining the prior. + upper_limit + The upper limit of the uniform distribution defining the prior. + + Examples + -------- + + prior = af.UniformPrior(lower_limit=0.0, upper_limit=2.0) + + physical_value = prior.value_for(unit=0.2) + """ + lower_limit = float(lower_limit) upper_limit = float(upper_limit) @@ -52,17 +81,19 @@ def logpdf(self, x): def parameter_string(self) -> str: return f"lower_limit = {self.lower_limit}, upper_limit = {self.upper_limit}" - def value_for(self, unit, ignore_prior_limits=False): + def value_for(self, unit : float, ignore_prior_limits : bool = False) -> float: """ + Returns a physical value from an input unit value according to the limits of the uniform prior. Parameters ---------- - unit: Float - A unit hypercube value between 0 and 1 + unit + A unit value between 0 and 1. + Returns ------- - value: Float - A value for the attribute between the upper and lower limits + value + The unit value mapped to a physical value according to the prior. """ return round( super().value_for(unit, ignore_prior_limits=ignore_prior_limits), 14 diff --git a/autofit/mapper/prior_model/collection.py b/autofit/mapper/prior_model/collection.py index 5e7718cce..a5c68cb09 100644 --- a/autofit/mapper/prior_model/collection.py +++ b/autofit/mapper/prior_model/collection.py @@ -86,6 +86,28 @@ def as_model(self): def __init__(self, *arguments, **kwargs): """ + The object multiple Python classes are input into to create model-components, which has free parameters that + are fitted by a non-linear search. + + Multiple Python classes can be input into a `Collection` in order to compose high dimensional models made of + multiple model-components. + + The ``Collection`` object is highly flexible, and can create models from many input Python data structures + (e.g. a list of classes, dictionary of classes, hierarchy of classes). + + For a complete description of the model composition API, see the **PyAutoFit** model API cookbooks: + + https://pyautofit.readthedocs.io/en/latest/cookbooks/cookbook_1_basics.html + + The Python class input into a ``Model`` to create a model component is written using the following format: + + - The name of the class is the name of the model component (e.g. ``Gaussian``). + - The input arguments of the constructor are the parameters of the mode (e.g. ``centre``, ``normalization`` and ``sigma``). + - The default values of the input arguments tell PyAutoFit whether a parameter is a single-valued float or a + multi-valued tuple. + + [Rich document more clearly] + A prior model used to represent a list of prior models for convenience. Arguments are flexibly converted into a collection. @@ -94,6 +116,23 @@ def __init__(self, *arguments, **kwargs): ---------- arguments Classes, prior models, instances or priors + + Examples + -------- + + class Gaussian: + + def __init__( + self, + centre=0.0, # <- PyAutoFit recognises these + normalization=0.1, # <- constructor arguments are + sigma=0.01, # <- the Gaussian's parameters. + ): + self.centre = centre + self.normalization = normalization + self.sigma = sigma + + model = af.Collection(gaussian_0=Gaussian, gaussian_1=Gaussian) """ super().__init__() self.item_number = 0 diff --git a/autofit/mapper/prior_model/prior_model.py b/autofit/mapper/prior_model/prior_model.py index 925616e03..d571574b6 100644 --- a/autofit/mapper/prior_model/prior_model.py +++ b/autofit/mapper/prior_model/prior_model.py @@ -19,9 +19,6 @@ class PriorModel(AbstractPriorModel): - """Object comprising class and associated priors - @DynamicAttrs - """ @property def name(self): @@ -50,10 +47,46 @@ def __add__(self, other): def __init__(self, cls, **kwargs): """ + The object a Python class is input into to create a model-component, which has free parameters that are fitted + by a non-linear search. + + The ``Model`` object is flexible, and can create models from many input Python data structures + (e.g. a list of classes, dictionary of classes, hierarchy of classes). + + For a complete description of the model composition API, see the **PyAutoFit** model API cookbooks: + + https://pyautofit.readthedocs.io/en/latest/cookbooks/cookbook_1_basics.html + + The Python class input into a ``Model`` to create a model component is written using the following format: + + - The name of the class is the name of the model component (e.g. ``Gaussian``). + - The input arguments of the constructor are the parameters of the mode (e.g. ``centre``, ``normalization`` and ``sigma``). + - The default values of the input arguments tell PyAutoFit whether a parameter is a single-valued float or a + multi-valued tuple. + + [Rich explain everything else] + Parameters ---------- - cls: class + cls The class associated with this instance + + Examples + -------- + + class Gaussian: + + def __init__( + self, + centre=0.0, # <- PyAutoFit recognises these + normalization=0.1, # <- constructor arguments are + sigma=0.01, # <- the Gaussian's parameters. + ): + self.centre = centre + self.normalization = normalization + self.sigma = sigma + + model = af.Model(Gaussian) """ super().__init__( label=namer(cls.__name__) diff --git a/docs/api/database.rst b/docs/api/database.rst index b2578deb2..da55b1ee6 100644 --- a/docs/api/database.rst +++ b/docs/api/database.rst @@ -16,6 +16,8 @@ inspection, analysis and interpretation. Aggregator ---------- +.. currentmodule:: autofit + .. autosummary:: :toctree: _autosummary :template: custom-class-template.rst diff --git a/docs/api/model.rst b/docs/api/model.rst index cc57c8217..2ae7c06bc 100644 --- a/docs/api/model.rst +++ b/docs/api/model.rst @@ -27,5 +27,5 @@ Models :template: custom-class-template.rst :recursive: - PriorModel - CollectionPriorModel \ No newline at end of file + Model + Collection \ No newline at end of file diff --git a/docs/api/plot.rst b/docs/api/plot.rst index 8c4b93974..ad553ae6b 100644 --- a/docs/api/plot.rst +++ b/docs/api/plot.rst @@ -15,6 +15,8 @@ by **PyAutoFit**. Plotters -------- +.. currentmodule:: autofit.plot + .. autosummary:: :toctree: _autosummary :template: custom-class-template.rst diff --git a/docs/api/priors.rst b/docs/api/priors.rst index e62b1164c..aae145d55 100644 --- a/docs/api/priors.rst +++ b/docs/api/priors.rst @@ -17,6 +17,8 @@ The priors of every model-fit are customized using `Prior` objects. Priors ------ +.. currentmodule:: autofit + .. autosummary:: :toctree: _autosummary :template: custom-class-template.rst diff --git a/docs/api/samples.rst b/docs/api/samples.rst index 1f6aa92f3..ff833e7a2 100644 --- a/docs/api/samples.rst +++ b/docs/api/samples.rst @@ -17,6 +17,8 @@ For example, for an MCMC model-fit, the ``Samples`` objects contains every sampl Samples ------- +.. currentmodule:: autofit + .. autosummary:: :toctree: _autosummary :template: custom-class-template.rst diff --git a/docs/api/searches.rst b/docs/api/searches.rst index b758b5cc3..fcfe4e34f 100644 --- a/docs/api/searches.rst +++ b/docs/api/searches.rst @@ -1,5 +1,5 @@ =================== -Non=Linear Searches +Non-Linear Searches =================== A non-linear search is an algorithm which fits a model to data. @@ -17,6 +17,8 @@ Markov Chain Monte Carlo (MCMC) and optimizers. Nested Samplers --------------- +.. currentmodule:: autofit + .. autosummary:: :toctree: _autosummary :template: custom-class-template.rst @@ -29,6 +31,8 @@ Nested Samplers MCMC ---- +.. currentmodule:: autofit + .. autosummary:: :toctree: _autosummary :template: custom-class-template.rst @@ -40,6 +44,8 @@ MCMC Optimizers ---------- +.. currentmodule:: autofit + .. autosummary:: :toctree: _autosummary :template: custom-class-template.rst @@ -54,6 +60,8 @@ including directory output structure, parameter sample initialization and MCMC a Tools ----- +.. currentmodule:: autofit + .. autosummary:: :toctree: _autosummary :template: custom-class-template.rst diff --git a/docs/api/source.rst b/docs/api/source.rst index 62722b906..d22330054 100644 --- a/docs/api/source.rst +++ b/docs/api/source.rst @@ -13,6 +13,8 @@ Model Mapping These tools are used internally by **PyAutoFit** to map input lists of values (e.g. a unit cube of parameter values) to model instances. +.. currentmodule:: autofit + .. autosummary:: :toctree: _autosummary :template: custom-class-template.rst From e041b960157b7c950ba9fd4fdbd952995dd2ccab Mon Sep 17 00:00:00 2001 From: Jammy2211 Date: Fri, 16 Dec 2022 19:50:18 +0000 Subject: [PATCH 084/226] eamples for above priors --- autofit/mapper/prior/uniform.py | 7 +++++++ autofit/messages/normal.py | 23 +++++++++++++++++------ 2 files changed, 24 insertions(+), 6 deletions(-) diff --git a/autofit/mapper/prior/uniform.py b/autofit/mapper/prior/uniform.py index 5f1a33094..d9d3c1ff5 100644 --- a/autofit/mapper/prior/uniform.py +++ b/autofit/mapper/prior/uniform.py @@ -94,6 +94,13 @@ def value_for(self, unit : float, ignore_prior_limits : bool = False) -> float: ------- value The unit value mapped to a physical value according to the prior. + + Examples + -------- + + prior = af.UniformPrior(lower_limit=0.0, upper_limit=2.0) + + physical_value = prior.value_for(unit=0.2) """ return round( super().value_for(unit, ignore_prior_limits=ignore_prior_limits), 14 diff --git a/autofit/messages/normal.py b/autofit/messages/normal.py index 99ab9fcfb..f08537fa8 100644 --- a/autofit/messages/normal.py +++ b/autofit/messages/normal.py @@ -157,16 +157,26 @@ def logpdf_gradient_hessian(self, x: np.ndarray) -> Tuple[np.ndarray, np.ndarray __default_fields__ = ("log_norm", "id_") - def value_for(self, unit): + def value_for(self, unit : float) -> float: """ + Returns a physical value from an input unit value according to the Gaussian distribution of the prior. + Parameters ---------- - unit: Float - A unit hypercube value between 0 and 1 + unit + A unit value between 0 and 1. + Returns ------- - value: Float - A value for the attribute biased to the gaussian distribution + value + The unit value mapped to a physical value according to the prior. + + Examples + -------- + + prior = af.GaussianPrior(mean=1.0, sigma=2.0, lower_limit=0.0, upper_limit=2.0) + + physical_value = prior.value_for(unit=0.5) """ return self.mean + (self.sigma * math.sqrt(2) * erfcinv(2.0 * (1.0 - unit))) @@ -198,7 +208,8 @@ def __repr__(self): class NaturalNormal(NormalMessage): - """Identical to the NormalMessage but allows non-normalised values, + """ + Identical to the NormalMessage but allows non-normalised values, e.g negative or infinite variances """ From 8bf82889bde4dbbe9aa8793406798ffd98c2962c Mon Sep 17 00:00:00 2001 From: Jammy2211 Date: Fri, 16 Dec 2022 20:07:29 +0000 Subject: [PATCH 085/226] log gaussian / uniform priors --- autofit/mapper/prior/abstract.py | 4 +- autofit/mapper/prior/gaussian.py | 2 +- autofit/mapper/prior/log_gaussian.py | 53 ++++++++++++++--- autofit/mapper/prior/log_uniform.py | 88 +++++++++++++++++++++++++--- autofit/mapper/prior/uniform.py | 8 +-- autofit/messages/normal.py | 2 +- 6 files changed, 132 insertions(+), 25 deletions(-) diff --git a/autofit/mapper/prior/abstract.py b/autofit/mapper/prior/abstract.py index 1829700e1..54cedf637 100644 --- a/autofit/mapper/prior/abstract.py +++ b/autofit/mapper/prior/abstract.py @@ -141,11 +141,11 @@ def value_for(self, unit: float, ignore_prior_limits=False) -> float: Parameters ---------- unit - A hypercube value between 0 and 1. + A unit value between 0 and 1. Returns ------- - A physical value. + A physical value, mapped from the unit value accoridng to the prior. """ result = self.message.value_for(unit) if not ignore_prior_limits: diff --git a/autofit/mapper/prior/gaussian.py b/autofit/mapper/prior/gaussian.py index b3dfb1158..8da8ab0c0 100644 --- a/autofit/mapper/prior/gaussian.py +++ b/autofit/mapper/prior/gaussian.py @@ -28,7 +28,7 @@ def __init__( p = \mu + (\sigma * sqrt(2) * erfcinv(2.0 * (1.0 - u)) - For example for ``prior = UniformPrior(lower_limit=0.0, upper_limit=2.0)``, an + For example for ``prior = GaussianPrior(mean=1.0, sigma=2.0)``, an input ``prior.value_for(unit=0.5)`` is equal to 1.0. [Rich describe how this is done via message] diff --git a/autofit/mapper/prior/log_gaussian.py b/autofit/mapper/prior/log_gaussian.py index b942ae5df..1eae77276 100644 --- a/autofit/mapper/prior/log_gaussian.py +++ b/autofit/mapper/prior/log_gaussian.py @@ -5,13 +5,49 @@ class LogGaussianPrior(Prior): - """A prior with a log gaussian distribution""" __identifier_fields__ = ("lower_limit", "upper_limit", "mean", "sigma") def __init__( - self, mean, sigma, lower_limit=0.0, upper_limit=float("inf"), id_=None, + self, + mean : float, + sigma : float, + lower_limit : float = 0.0, + upper_limit : float = float("inf"), + id_=None, ): + """ + A prior with a log base 10 uniform distribution, defined between a lower limit and upper limit. + + The conversion of an input unit value, ``u``, to a physical value, ``p``, via the prior is as follows: + + .. math:: + + p = \mu + (\sigma * sqrt(2) * erfcinv(2.0 * (1.0 - u)) + + For example for ``prior = LogGaussianPrior(mean=1.0, sigma=2.0)``, an + input ``prior.value_for(unit=0.5)`` is equal to 1.0. + + [Rich describe how this is done via message] + + Parameters + ---------- + mean + The mean of the Gaussian distribution defining the prior. + sigma + The sigma value of the Gaussian distribution defining the prior. + lower_limit + A lower limit of the Gaussian distribution; physical values below this value are rejected. + upper_limit + A upper limit of the Gaussian distribution; physical values below this value are rejected. + + Examples + -------- + + prior = af.LogGaussianPrior(mean=1.0, sigma=2.0, lower_limit=0.0, upper_limit=2.0) + + physical_value = prior.value_for(unit=0.5) + """ lower_limit = float(lower_limit) upper_limit = float(upper_limit) @@ -38,21 +74,22 @@ def _new_for_base_message(self, message): id_=self.instance().id, ) - def value_for(self, unit, ignore_prior_limits=False): + def value_for(self, unit : float, ignore_prior_limits : bool = False) -> float: """ + Return a physical value for a value between 0 and 1 with the transformation + described by this prior. Parameters ---------- - unit: Float - A unit hypercube value between 0 and 1 + unit + A unit value between 0 and 1. Returns ------- - value: Float - A value for the attribute biased to the gaussian distribution + A physical value, mapped from the unit value accoridng to the prior. """ return super().value_for(unit, ignore_prior_limits=ignore_prior_limits) @property - def parameter_string(self): + def parameter_string(self) -> str: return f"mean = {self.mean}, sigma = {self.sigma}" diff --git a/autofit/mapper/prior/log_uniform.py b/autofit/mapper/prior/log_uniform.py index cc75f01b8..d748cd792 100644 --- a/autofit/mapper/prior/log_uniform.py +++ b/autofit/mapper/prior/log_uniform.py @@ -8,11 +8,40 @@ class LogUniformPrior(Prior): - """A prior with a uniform distribution between a lower and upper limit""" def __init__( - self, lower_limit=1e-6, upper_limit=1.0, id_=None, + self, + lower_limit : float =1e-6, + upper_limit : float = 1.0, + id_=None, ): + """ + A prior with a log base 10 uniform distribution, defined between a lower limit and upper limit. + + The conversion of an input unit value, ``u``, to a physical value, ``p``, via the prior is as follows: + + .. math:: + + For example for ``prior = LogUniformPrior(lower_limit=10.0, upper_limit=1000.0)``, an + input ``prior.value_for(unit=0.5)`` is equal to 100.0. + + [Rich describe how this is done via message] + + Parameters + ---------- + lower_limit + The lower limit of the log10 uniform distribution defining the prior. + upper_limit + The upper limit of the log10 uniform distribution defining the prior. + + Examples + -------- + + prior = af.LogUniformPrior(lower_limit=0.0, upper_limit=2.0) + + physical_value = prior.value_for(unit=0.2) + """ + if lower_limit <= 0.0: raise exc.PriorException( "The lower limit of a LogUniformPrior cannot be zero or negative." @@ -34,26 +63,69 @@ def __init__( ) @classmethod - def with_limits(cls, lower_limit: float, upper_limit: float): + def with_limits(cls, lower_limit: float, upper_limit: float) -> "LogUniformPrior": + """ + Create a new log 10 uniform prior centred between two limits + with sigma distance between this limits. + + Note that these limits are not strict so exceptions will not + be raised for values outside of the limits. + + This function is typically used in prior passing, where the + result of a model-fit are used to create new Gaussian priors + centred on the previously estimated median PDF model. + + Parameters + ---------- + lower_limit + The lower limit of the new LogUniform prior. + upper_limit + The upper limit of the new LogUniform Prior. + + Returns + ------- + A new LogUniform. + """ return cls(lower_limit=max(0.000001, lower_limit), upper_limit=upper_limit,) __identifier_fields__ = ("lower_limit", "upper_limit") @staticmethod - def log_prior_from_value(value): + def log_prior_from_value(value) -> float: """ Returns the log prior of a physical value, so the log likelihood of a model evaluation can be converted to a - posterior as log_prior + log_likelihood. + posterior as log_prior + log_likelihood. - This is used by Emcee in the log likelihood function evaluation. + This is used by certain non-linear searches (e.g. Emcee) in the log likelihood function evaluation. Parameters ---------- value : float - The physical value of this prior's corresponding parameter in a `NonLinearSearch` sample.""" + The physical value of this prior's corresponding parameter in a `NonLinearSearch` sample. + """ return 1.0 / value - def value_for(self, unit: float, ignore_prior_limits=False) -> float: + def value_for(self, unit: float, ignore_prior_limits : bool = False) -> float: + """ + Returns a physical value from an input unit value according to the limits of the log10 uniform prior. + + Parameters + ---------- + unit + A unit value between 0 and 1. + + Returns + ------- + value + The unit value mapped to a physical value according to the prior. + + Examples + -------- + + prior = af.LogUniformPrior(lower_limit=0.0, upper_limit=2.0) + + physical_value = prior.value_for(unit=0.2) + """ return super().value_for(unit, ignore_prior_limits=ignore_prior_limits) @property diff --git a/autofit/mapper/prior/uniform.py b/autofit/mapper/prior/uniform.py index d9d3c1ff5..ec84b9c6c 100644 --- a/autofit/mapper/prior/uniform.py +++ b/autofit/mapper/prior/uniform.py @@ -18,7 +18,7 @@ def __init__( """ A prior with a uniform distribution, defined between a lower limit and upper limit. - The conversion of an input unit value via the prior is as follows: + The conversion of an input unit value, ``u``, to a physical value, ``p``, via the prior is as follows: .. math:: @@ -113,10 +113,8 @@ def log_prior_from_value(value): Returns the log prior of a physical value, so the log likelihood of a model evaluation can be converted to a posterior as log_prior + log_likelihood. - This is used by Emcee in the log likelihood function evaluation. + This is used by certain non-linear searches (e.g. Emcee) in the log likelihood function evaluation. - NOTE: For a UniformPrior this is always zero, provided the value is between the lower and upper limit. Given - this is check for when the instance is made (in the *instance_from_vector* function), we thus can simply return - zero in this function. + For a UniformPrior this is always zero, provided the value is between the lower and upper limit. """ return 0.0 diff --git a/autofit/messages/normal.py b/autofit/messages/normal.py index f08537fa8..6c01f93cf 100644 --- a/autofit/messages/normal.py +++ b/autofit/messages/normal.py @@ -184,7 +184,7 @@ def log_prior_from_value(self, value): """ Returns the log prior of a physical value, so the log likelihood of a model evaluation can be converted to a posterior as log_prior + log_likelihood. - This is used by Emcee in the log likelihood function evaluation. + This is used by certain non-linear searches (e.g. Emcee) in the log likelihood function evaluation. Parameters ---------- value From 0674c48b6749740a66a5a698adcd832c7edfab11 Mon Sep 17 00:00:00 2001 From: Jammy2211 Date: Sat, 17 Dec 2022 14:07:58 +0000 Subject: [PATCH 086/226] fix ultranes dict --- autofit/non_linear/nest/ultranest/ultranest.py | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/autofit/non_linear/nest/ultranest/ultranest.py b/autofit/non_linear/nest/ultranest/ultranest.py index 16e8953d4..53dbb16f8 100644 --- a/autofit/non_linear/nest/ultranest/ultranest.py +++ b/autofit/non_linear/nest/ultranest/ultranest.py @@ -215,10 +215,14 @@ def prior_transform(cube): if iterations > 0: - config_dict_run = self.config_dict_run - config_dict_run.pop("max_ncalls") - config_dict_run["dKL"] = config_dict_run.pop("dkl") - config_dict_run["Lepsilon"] = config_dict_run.pop("lepsilon") + filter_list = ["max_ncalls", "dkl", "lepsilon"] + config_dict_run = { + key: value for key, value + in self.config_dict_run.items() + if key + not in filter_list + } + config_dict_run["update_interval_ncall"] = iterations sampler.run( From 35720cbacc305efc056817d4d64fcf1ccfc0956a Mon Sep 17 00:00:00 2001 From: Jammy2211 Date: Sun, 18 Dec 2022 08:37:43 +0000 Subject: [PATCH 087/226] dynesty input can specify force_x1_cpu --- autofit/non_linear/nest/dynesty/abstract.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/autofit/non_linear/nest/dynesty/abstract.py b/autofit/non_linear/nest/dynesty/abstract.py index f44904000..cf86716f2 100644 --- a/autofit/non_linear/nest/dynesty/abstract.py +++ b/autofit/non_linear/nest/dynesty/abstract.py @@ -160,7 +160,8 @@ def _fit( if conf.instance["non_linear"]["nest"][self.__class__.__name__][ "parallel" - ]["force_x1_cpu"]: + ]["force_x1_cpu"] or self.kwargs.get("force_x1_cpu"): + raise RuntimeError with Pool( From 8696041b3a29890aec8dbf4d5c9a53ff81a98579 Mon Sep 17 00:00:00 2001 From: Jammy2211 Date: Sun, 18 Dec 2022 19:51:51 +0000 Subject: [PATCH 088/226] fix Zeus test mode --- autofit/non_linear/initializer.py | 5 +++-- autofit/non_linear/mcmc/zeus/zeus.py | 1 + 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/autofit/non_linear/initializer.py b/autofit/non_linear/initializer.py index 6544fd2c1..663f2544f 100644 --- a/autofit/non_linear/initializer.py +++ b/autofit/non_linear/initializer.py @@ -29,7 +29,8 @@ def samples_from_model( total_points: int, model: AbstractPriorModel, fitness_function, - use_prior_medians: bool = False + use_prior_medians: bool = False, + test_mode_samples : bool = True ): """ Generate the initial points of the non-linear search, by randomly drawing unit values from a uniform @@ -44,7 +45,7 @@ def samples_from_model( of free dimensions of the model. """ - if os.environ.get("PYAUTOFIT_TEST_MODE") == "1": + if os.environ.get("PYAUTOFIT_TEST_MODE") == "1" and test_mode_samples: return self.samples_in_test_mode(total_points=total_points, model=model) logger.info("Generating initial samples of model, which are subject to prior limits and other constraints.") diff --git a/autofit/non_linear/mcmc/zeus/zeus.py b/autofit/non_linear/mcmc/zeus/zeus.py index 20ddff04e..bdd6e64ec 100644 --- a/autofit/non_linear/mcmc/zeus/zeus.py +++ b/autofit/non_linear/mcmc/zeus/zeus.py @@ -185,6 +185,7 @@ def _fit(self, model: AbstractPriorModel, analysis, log_likelihood_cap=None): total_points=zeus_sampler.nwalkers, model=model, fitness_function=fitness_function, + test_mode_samples=False ) zeus_state = np.zeros(shape=(zeus_sampler.nwalkers, model.prior_count)) From 49f09a5e4aedaa4680e636b6fc1d4284a9082b28 Mon Sep 17 00:00:00 2001 From: Jammy2211 Date: Mon, 19 Dec 2022 10:46:25 +0000 Subject: [PATCH 089/226] reviee --- autofit/mapper/prior/gaussian.py | 14 +++++++------- autofit/mapper/prior_model/prior_model.py | 4 +++- 2 files changed, 10 insertions(+), 8 deletions(-) diff --git a/autofit/mapper/prior/gaussian.py b/autofit/mapper/prior/gaussian.py index 8da8ab0c0..95541cb5a 100644 --- a/autofit/mapper/prior/gaussian.py +++ b/autofit/mapper/prior/gaussian.py @@ -12,12 +12,11 @@ class GaussianPrior(Prior): ) def __init__( - self, - mean: float, - sigma : float, - lower_limit : float = float("-inf"), - upper_limit : float = float("inf"), - + self, + mean: float, + sigma : float, + lower_limit : float = float("-inf"), + upper_limit : float = float("inf"), ): """ A prior with a uniform distribution, defined between a lower limit and upper limit. @@ -31,7 +30,8 @@ def __init__( For example for ``prior = GaussianPrior(mean=1.0, sigma=2.0)``, an input ``prior.value_for(unit=0.5)`` is equal to 1.0. - [Rich describe how this is done via message] + The mapping is performed using the message functionality, where a message represents the distirubtion + of this prior. Parameters ---------- diff --git a/autofit/mapper/prior_model/prior_model.py b/autofit/mapper/prior_model/prior_model.py index d571574b6..47c4afdae 100644 --- a/autofit/mapper/prior_model/prior_model.py +++ b/autofit/mapper/prior_model/prior_model.py @@ -19,7 +19,9 @@ class PriorModel(AbstractPriorModel): - + """ + @DynamicAttrs + """ @property def name(self): return self.cls.__name__ From a47195ac95dadc36b5a943a5c7d4ce44a93c2516 Mon Sep 17 00:00:00 2001 From: Richard Date: Mon, 19 Dec 2022 10:55:35 +0000 Subject: [PATCH 090/226] rename to interpolator --- autofit/__init__.py | 2 +- autofit/{time_series.py => interpolator.py} | 18 +++++++++--------- ...est_time_series.py => test_interpolator.py} | 4 ++-- 3 files changed, 12 insertions(+), 12 deletions(-) rename autofit/{time_series.py => interpolator.py} (92%) rename test_autofit/{test_time_series.py => test_interpolator.py} (93%) diff --git a/autofit/__init__.py b/autofit/__init__.py index 59a7f9659..eaffa431c 100644 --- a/autofit/__init__.py +++ b/autofit/__init__.py @@ -86,7 +86,7 @@ from .example.model import Gaussian from .text import formatter from .text import samples_text -from .time_series import LinearTimeSeries +from .interpolator import LinearInterpolator from .tools import util diff --git a/autofit/time_series.py b/autofit/interpolator.py similarity index 92% rename from autofit/time_series.py rename to autofit/interpolator.py index b8dfb4d6b..b291243b9 100644 --- a/autofit/time_series.py +++ b/autofit/interpolator.py @@ -7,7 +7,7 @@ from autofit.mapper.model import ModelInstance -class TimeSeriesPath: +class InterpolatorPath: def __init__(self, keys: List[str]): """ Addresses a given attribute in a ModelInstance @@ -19,11 +19,11 @@ def __init__(self, keys: List[str]): """ self.keys = keys - def __getattr__(self, item: str) -> "TimeSeriesPath": + def __getattr__(self, item: str) -> "InterpolatorPath": """ Add a new attribute name to the end of the path """ - return TimeSeriesPath(self.keys + [item]) + return InterpolatorPath(self.keys + [item]) def get_value(self, instance: ModelInstance) -> float: """ @@ -60,7 +60,7 @@ def __eq__(self, other: float) -> "Equality": class Equality: - def __init__(self, path: TimeSeriesPath, value: float): + def __init__(self, path: InterpolatorPath, value: float): """ Describes the value of a given attribute for which other values are interpolated. @@ -76,7 +76,7 @@ def __init__(self, path: TimeSeriesPath, value: float): self.value = value -class AbstractTimeSeries(ABC): +class AbstractInterpolator(ABC): def __init__(self, instances: List[ModelInstance]): """ A TimeSeries allows interpolation on any variable. @@ -93,7 +93,7 @@ def __init__(self, instances: List[ModelInstance]): """ self.instances = instances - def __getattr__(self, item: str) -> TimeSeriesPath: + def __getattr__(self, item: str) -> InterpolatorPath: """ Used to indicate which attribute is the time attribute. @@ -112,9 +112,9 @@ def __getattr__(self, item: str) -> TimeSeriesPath: ------- A class that keeps track of which attributes have been addressed """ - return TimeSeriesPath([item]) + return InterpolatorPath([item]) - def _value_map(self, path: TimeSeriesPath) -> Dict[float, ModelInstance]: + def _value_map(self, path: InterpolatorPath) -> Dict[float, ModelInstance]: """ Maps know values to corresponding instances for a given path @@ -198,7 +198,7 @@ def _interpolate(x: List[float], y: List[float], value: float) -> float: """ -class LinearTimeSeries(AbstractTimeSeries): +class LinearInterpolator(AbstractInterpolator): """ Assume all attributes have a linear relationship with time """ diff --git a/test_autofit/test_time_series.py b/test_autofit/test_interpolator.py similarity index 93% rename from test_autofit/test_time_series.py rename to test_autofit/test_interpolator.py index 9e53fa335..2e0de50bd 100644 --- a/test_autofit/test_time_series.py +++ b/test_autofit/test_interpolator.py @@ -5,7 +5,7 @@ def test_trivial(): instance = af.ModelInstance(items=dict(t=1)) - time_series = af.LinearTimeSeries([instance]) + time_series = af.LinearInterpolator([instance]) result = time_series[time_series.t == 1] @@ -14,7 +14,7 @@ def test_trivial(): @pytest.fixture(name="time_series") def make_time_series(): - return af.LinearTimeSeries( + return af.LinearInterpolator( [ af.ModelInstance( items=dict( From 7ad29b49054be58f5da5f3484d94d8eb550500c5 Mon Sep 17 00:00:00 2001 From: Richard Date: Mon, 19 Dec 2022 10:59:14 +0000 Subject: [PATCH 091/226] recursive has --- autofit/mapper/model.py | 6 ++++++ test_autofit/mapper/model/test_has.py | 18 ++++++++++++++++++ 2 files changed, 24 insertions(+) create mode 100644 test_autofit/mapper/model/test_has.py diff --git a/autofit/mapper/model.py b/autofit/mapper/model.py index 71a65da59..012a4c7a9 100644 --- a/autofit/mapper/model.py +++ b/autofit/mapper/model.py @@ -220,6 +220,12 @@ def direct_tuples_with_type(self, class_type): ) ) + def has(self, cls: Union[Type, Tuple[Type, ...]]) -> bool: + """ + Does this instance have an attribute which is of type cls? + """ + return len(self.attribute_tuples_with_type(cls)) > 0 + @frozen_cache def model_tuples_with_type(self, cls): """ diff --git a/test_autofit/mapper/model/test_has.py b/test_autofit/mapper/model/test_has.py new file mode 100644 index 000000000..0531413bb --- /dev/null +++ b/test_autofit/mapper/model/test_has.py @@ -0,0 +1,18 @@ +import pytest + +import autofit as af + + +@pytest.fixture(name="model") +def make_prior_model(): + return af.Model(af.Gaussian) + + +def test_model_has(model): + assert model.has(af.Prior) + + +def test_collection_has(model): + collection = af.Collection(gaussian=model) + + assert collection.has(af.Prior) From 742b4ffbfd4149e0c185f41793848363a35a3d21 Mon Sep 17 00:00:00 2001 From: Jammy2211 Date: Mon, 19 Dec 2022 11:27:10 +0000 Subject: [PATCH 092/226] doc --- CITATIONS.rst | 6 +++--- CODE_OF_CONDUCT.md | 4 ++-- docs/features/database.rst | 2 +- docs/general/adding_a_model_component.rst | 6 +++--- docs/general/citations.rst | 6 +++--- docs/overview/non_linear_search.rst | 6 +++--- docs/overview/result.rst | 2 +- docs/science_examples/astronomy.rst | 6 +++--- 8 files changed, 19 insertions(+), 19 deletions(-) diff --git a/CITATIONS.rst b/CITATIONS.rst index f1621b5e8..e4f5798e8 100644 --- a/CITATIONS.rst +++ b/CITATIONS.rst @@ -4,9 +4,9 @@ Citations & References ====================== The bibtex entries for **PyAutoFit** and its affiliated software packages can be found -`here `_, with example text for citing **PyAutoFit** -in `.tex format here `_ format here and -`.md format here `_. As shown in the examples, we +`here `_, with example text for citing **PyAutoFit** +in `.tex format here `_ format here and +`.md format here `_. As shown in the examples, we would greatly appreciate it if you mention **PyAutoFit** by name and include a link to our GitHub page! **PyAutoFit** is published in the `Journal of Open Source Software `_ and its diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md index a6a46e490..d8befc974 100644 --- a/CODE_OF_CONDUCT.md +++ b/CODE_OF_CONDUCT.md @@ -302,7 +302,7 @@ the situation is not yet resolved. ## License -This code of conduct has been adapted from [*NUMFOCUS code of conduct*](https://github.com/numfocus/numfocus/blob/master/manual/numfocus-coc.md#the-short-version), -which is adapted from numerous sources, including the [*Geek Feminism wiki, created by the Ada Initiative and other volunteers, which is under a Creative Commons Zero license*](http://geekfeminism.wikia.com/wiki/Fiterence_anti-harassment/Policy), the [*Contributor Covenant version 1.2.0*](http://contributor-covenant.org/version/1/2/0/), the [*Bokeh Code of Conduct*](https://github.com/bokeh/bokeh/blob/master/CODE_OF_CONDUCT.md), the [*SciPy Code of Conduct*](https://github.com/jupyter/governance/blob/master/conduct/enforcement.md), the [*Carpentries Code of Conduct*](https://docs.carpentries.org/topic_folders/policies/code-of-conduct.html#enforcement-manual), and the [*NeurIPS Code of Conduct*](https://neurips.cc/public/CodeOfConduct). +This code of conduct has been adapted from [*NUMFOCUS code of conduct*](https://github.com/numfocus/numfocus/blob/main/manual/numfocus-coc.md#the-short-version), +which is adapted from numerous sources, including the [*Geek Feminism wiki, created by the Ada Initiative and other volunteers, which is under a Creative Commons Zero license*](http://geekfeminism.wikia.com/wiki/Fiterence_anti-harassment/Policy), the [*Contributor Covenant version 1.2.0*](http://contributor-covenant.org/version/1/2/0/), the [*Bokeh Code of Conduct*](https://github.com/bokeh/bokeh/blob/main/CODE_OF_CONDUCT.md), the [*SciPy Code of Conduct*](https://github.com/jupyter/governance/blob/main/conduct/enforcement.md), the [*Carpentries Code of Conduct*](https://docs.carpentries.org/topic_folders/policies/code-of-conduct.html#enforcement-manual), and the [*NeurIPS Code of Conduct*](https://neurips.cc/public/CodeOfConduct). **PyAutoFit Code of Conduct is licensed under the [Creative Commons Attribution 3.0 Unported License](https://creativecommons.org/licenses/by/3.0/).** \ No newline at end of file diff --git a/docs/features/database.rst b/docs/features/database.rst index a7f5b4eb4..97dc06363 100644 --- a/docs/features/database.rst +++ b/docs/features/database.rst @@ -151,7 +151,7 @@ Wrap Up ------- If you'd like to see the ``Aggregator`` in action, checkout the -`database example `_ on the +`database example `_ on the ``autofit_workspace``. The Database Chapter of the `HowToFit lecture series `_ diff --git a/docs/general/adding_a_model_component.rst b/docs/general/adding_a_model_component.rst index b7f6252a2..ddd5cce73 100644 --- a/docs/general/adding_a_model_component.rst +++ b/docs/general/adding_a_model_component.rst @@ -52,7 +52,7 @@ Every model-component must have a ``.json`` config file in the ``autofit_workspa that **PyAutoFit** knows the default priors to associate with the model-component. If we do not manually override priors, these are the priors that will be used by default when a model-fit is performed. -Next, inspect the `TemplateObject.json `_ configuration file in ``autofit_workspace/config/priors``. You should see +Next, inspect the `TemplateObject.json `_ configuration file in ``autofit_workspace/config/priors``. You should see the following ``.json`` text: .. code-block:: python @@ -84,7 +84,7 @@ The ``lower_limit`` and ``upper_limit`` of a ``GaussianPrior`` define the bounda physically allowed. If a model-component is given a value outside these limits during model-fitting the model is instantly resampled and discarded. -We can easily adapt this template for our ``LinearFit`` model component. First, copy and paste the `TemplateObject.json `_ +We can easily adapt this template for our ``LinearFit`` model component. First, copy and paste the `TemplateObject.json `_ file to create a new file called ``LinearFit.json``. **PyAutoFit** matches the name of the class to the name of the configuration file, therefore it is a requirement that @@ -144,7 +144,7 @@ classes, we receive the same configuration error as before. This is because if a model-component is contained in a Python module, the prior configuration file must be named after that ``module`` and structured to contain Python class itself. -Open the file ``autofit_workspace/config/priors/template_module.json``, (https://github.com/Jammy2211/autofit_workspace/blob/master/config/priors/template_module.json) which reads as follows: +Open the file ``autofit_workspace/config/priors/template_module.json``, (https://github.com/Jammy2211/autofit_workspace/blob/main/config/priors/template_module.json) which reads as follows: .. code-block:: python diff --git a/docs/general/citations.rst b/docs/general/citations.rst index f1621b5e8..e4f5798e8 100644 --- a/docs/general/citations.rst +++ b/docs/general/citations.rst @@ -4,9 +4,9 @@ Citations & References ====================== The bibtex entries for **PyAutoFit** and its affiliated software packages can be found -`here `_, with example text for citing **PyAutoFit** -in `.tex format here `_ format here and -`.md format here `_. As shown in the examples, we +`here `_, with example text for citing **PyAutoFit** +in `.tex format here `_ format here and +`.md format here `_. As shown in the examples, we would greatly appreciate it if you mention **PyAutoFit** by name and include a link to our GitHub page! **PyAutoFit** is published in the `Journal of Open Source Software `_ and its diff --git a/docs/overview/non_linear_search.rst b/docs/overview/non_linear_search.rst index 0b5aa86ef..21790ecf0 100644 --- a/docs/overview/non_linear_search.rst +++ b/docs/overview/non_linear_search.rst @@ -25,7 +25,7 @@ We've seen that we can call a non-linear search as follows: However, ``Emcee`` has many settings associated with it (the number of walkers, the number of steps they take, etc.). Above, we did not pass them to the ``Emcee`` constructor and they use the default values found in the ``autofit_workspace`` configuration files ``autofit_workspace/config/non_linear/mcmc/Emcee.ini``, which can be -viewed at this `link `_. +viewed at this `link `_. Of course, we can manually specify all of the parameters instead: @@ -56,7 +56,7 @@ A number of these parameters are not part of the ``emcee`` package, but addition - Auto correlation lengths can be checked during sampling and used to determine whether the MCMC chains have converged, terminating ``emcee`` before all ``nwalkers`` have taken all ``nsteps``, as discussed at this `link `_. The nested sampling algorithm ``dynesty`` has its own config file for default settings, which are at -this `link `_. +this `link `_. ``DynestyStatic`` parameters can be manually specified as follows: .. code-block:: python @@ -162,5 +162,5 @@ Wrap-Up ------- We are always looking to add more non-linear searches to **PyAutoFit**. If you are the developer of a package check out -our `contributions section `_ and please +our `contributions section `_ and please contact us! \ No newline at end of file diff --git a/docs/overview/result.rst b/docs/overview/result.rst index 67b1487ae..11b366b73 100644 --- a/docs/overview/result.rst +++ b/docs/overview/result.rst @@ -330,6 +330,6 @@ Wrap-Up ------- More information on the ``Result`` class can be found at the -`results examples `_ on +`results examples `_ on the ``autofit_workspace``. More details are provided in tutorial 7 or chapter 1 of the `HowToFit lecture series `_ \ No newline at end of file diff --git a/docs/science_examples/astronomy.rst b/docs/science_examples/astronomy.rst index 69cf1c941..37340e3c6 100644 --- a/docs/science_examples/astronomy.rst +++ b/docs/science_examples/astronomy.rst @@ -26,7 +26,7 @@ Multi-Level Models We therefore need a model which contains separate model-components for every galaxy, and where each galaxy contains separate model-components describing its light and mass. A multi-level representation of this model is as follows: -.. image:: https://github.com/rhayes777/PyAutoFit/blob/master/docs/overview/image/lens_model.png?raw=true +.. image:: https://github.com/rhayes777/PyAutoFit/blob/main/docs/overview/image/lens_model.png?raw=true :width: 600 :alt: Alternative text @@ -263,7 +263,7 @@ modeling features to compose and fits models of arbitrary complexity and dimensi To illustrate this further, consider the following dataset which is called a **strong lens galaxy cluster**: -.. image:: https://github.com/rhayes777/PyAutoFit/blob/master/docs/overview/image/cluster_example.jpg?raw=true +.. image:: https://github.com/rhayes777/PyAutoFit/blob/main/docs/overview/image/cluster_example.jpg?raw=true :width: 600 :alt: Alternative text @@ -307,7 +307,7 @@ example: Here is an illustration of this model's graph: -.. image:: https://github.com/rhayes777/PyAutoFit/blob/master/docs/overview/image/lens_model_cluster.png?raw=true +.. image:: https://github.com/rhayes777/PyAutoFit/blob/main/docs/overview/image/lens_model_cluster.png?raw=true :width: 600 :alt: Alternative text From 5b6c27a21cb330e967a6de9be5cdec3b1fe686ad Mon Sep 17 00:00:00 2001 From: Richard Date: Mon, 19 Dec 2022 11:30:21 +0000 Subject: [PATCH 093/226] test existing methods --- test_autofit/mapper/model/test_has.py | 21 +++++++++++++++++---- 1 file changed, 17 insertions(+), 4 deletions(-) diff --git a/test_autofit/mapper/model/test_has.py b/test_autofit/mapper/model/test_has.py index 0531413bb..8e481fd24 100644 --- a/test_autofit/mapper/model/test_has.py +++ b/test_autofit/mapper/model/test_has.py @@ -8,11 +8,24 @@ def make_prior_model(): return af.Model(af.Gaussian) +@pytest.fixture(name="collection") +def make_collection(model): + return af.Collection(gaussian=model) + + def test_model_has(model): - assert model.has(af.Prior) + assert model.has_instance(af.Prior) + + +def test_collection_has(collection): + assert collection.has_instance(af.Prior) + +def test_collection_has_model(collection): + assert collection.has_model(af.Gaussian) -def test_collection_has(model): - collection = af.Collection(gaussian=model) - assert collection.has(af.Prior) +def test_collection_of_collection(collection): + collection = af.Collection(collection=collection) + assert collection.has_instance(af.Prior) + assert collection.has_model(af.Gaussian) From 8a093ce8ab37b7f1db16dc56c1bc2d99b50d05bc Mon Sep 17 00:00:00 2001 From: Richard Date: Mon, 19 Dec 2022 11:30:31 +0000 Subject: [PATCH 094/226] optional argument --- autofit/mapper/model.py | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) diff --git a/autofit/mapper/model.py b/autofit/mapper/model.py index 012a4c7a9..ec16f2556 100644 --- a/autofit/mapper/model.py +++ b/autofit/mapper/model.py @@ -220,14 +220,8 @@ def direct_tuples_with_type(self, class_type): ) ) - def has(self, cls: Union[Type, Tuple[Type, ...]]) -> bool: - """ - Does this instance have an attribute which is of type cls? - """ - return len(self.attribute_tuples_with_type(cls)) > 0 - @frozen_cache - def model_tuples_with_type(self, cls): + def model_tuples_with_type(self, cls, check_prior_count=True): """ All models of the class in this model which have at least one free parameter, recursively. @@ -236,6 +230,8 @@ def model_tuples_with_type(self, cls): ---------- cls The type of the model + check_prior_count + If true, only return models with at least one free parameter Returns ------- @@ -246,7 +242,8 @@ def model_tuples_with_type(self, cls): return [ (path, model) for path, model in self.attribute_tuples_with_type(PriorModel) - if issubclass(model.cls, cls) and model.prior_count > 0 + if issubclass(model.cls, cls) + and (not check_prior_count or model.prior_count > 0) ] @frozen_cache From cc2b34c632795cecd894aacd4504f976742e6e9d Mon Sep 17 00:00:00 2001 From: Richard Date: Mon, 19 Dec 2022 11:31:26 +0000 Subject: [PATCH 095/226] revert to master version of model --- autofit/mapper/model.py | 141 ++++++++++++++++++++++------------------ 1 file changed, 77 insertions(+), 64 deletions(-) diff --git a/autofit/mapper/model.py b/autofit/mapper/model.py index ec16f2556..76020283a 100644 --- a/autofit/mapper/model.py +++ b/autofit/mapper/model.py @@ -6,7 +6,9 @@ from autofit.mapper.model_object import ModelObject from autofit.mapper.prior_model.recursion import DynamicRecursionCache -logger = logging.getLogger(__name__) +logger = logging.getLogger( + __name__ +) def frozen_cache(func): @@ -30,11 +32,17 @@ def frozen_cache(func): @wraps(func) def cache(self, *args, **kwargs): if hasattr(self, "_is_frozen") and self._is_frozen: - key = (func.__name__, self, *args,) + tuple(kwargs.items()) + key = (func.__name__, self, *args,) + tuple( + kwargs.items() + ) if key not in self._frozen_cache: - self._frozen_cache[key] = func(self, *args, **kwargs) - return self._frozen_cache[key] + self._frozen_cache[ + key + ] = func(self, *args, **kwargs) + return self._frozen_cache[ + key + ] return func(self, *args, **kwargs) return cache @@ -60,14 +68,16 @@ def assert_not_frozen(func): @wraps(func) def wrapper(self, *args, **kwargs): - string_args = list(filter(lambda arg: isinstance(arg, str), args)) - if ( - "_is_frozen" not in string_args - and "_frozen_cache" not in string_args - and hasattr(self, "_is_frozen") - and self._is_frozen - ): - raise AssertionError("Frozen models cannot be modified") + string_args = list(filter( + lambda arg: isinstance(arg, str), + args + )) + if "_is_frozen" not in string_args and "_frozen_cache" not in string_args and hasattr( + self, "_is_frozen" + ) and self._is_frozen: + raise AssertionError( + "Frozen models cannot be modified" + ) return func(self, *args, **kwargs) return wrapper @@ -81,7 +91,10 @@ def __init__(self, label=None): def __getstate__(self): return { - key: value for key, value in self.__dict__.items() if key != "_frozen_cache" + key: value + for key, value + in self.__dict__.items() + if key != "_frozen_cache" } def __setstate__(self, state): @@ -96,7 +109,9 @@ def freeze(self): and does not allow its state to be modified. """ logger.debug("Freezing model") - tuples = self.direct_tuples_with_type(AbstractModel) + tuples = self.direct_tuples_with_type( + AbstractModel + ) for _, model in tuples: if model is not self: model.freeze() @@ -109,7 +124,9 @@ def unfreeze(self): """ logger.debug("Thawing model") self._is_frozen = False - tuples = self.direct_tuples_with_type(AbstractModel) + tuples = self.direct_tuples_with_type( + AbstractModel + ) for _, model in tuples: if model is not self: model.unfreeze() @@ -138,7 +155,7 @@ def copy(self): return copy.deepcopy(self) def object_for_path( - self, path: Iterable[Union[str, int, type]] + self, path: Iterable[Union[str, int, type]] ) -> Union[object, List]: """ Get the object at a given path. @@ -182,10 +199,10 @@ def object_for_path( @frozen_cache def path_instance_tuples_for_class( - self, - cls: Union[Tuple, Type], - ignore_class: bool = None, - ignore_children: bool = True, + self, + cls: Union[Tuple, Type], + ignore_class: bool = None, + ignore_children: bool = True ): """ Tuples containing the path tuple and instance for every instance of the class @@ -206,22 +223,23 @@ def path_instance_tuples_for_class( Tuples containing the path to and instance of objects of the given type. """ return path_instances_of_class( - self, cls, ignore_class=ignore_class, ignore_children=ignore_children + self, + cls, + ignore_class=ignore_class, + ignore_children=ignore_children ) @frozen_cache def direct_tuples_with_type(self, class_type): return list( filter( - lambda t: t[0] != "id" - and not t[0].startswith("_") - and isinstance(t[1], class_type), + lambda t: t[0] != "id" and not t[0].startswith("_") and isinstance(t[1], class_type), self.__dict__.items(), ) ) @frozen_cache - def model_tuples_with_type(self, cls, check_prior_count=True): + def model_tuples_with_type(self, cls): """ All models of the class in this model which have at least one free parameter, recursively. @@ -230,25 +248,30 @@ def model_tuples_with_type(self, cls, check_prior_count=True): ---------- cls The type of the model - check_prior_count - If true, only return models with at least one free parameter Returns ------- Models with free parameters """ from .prior_model.prior_model import PriorModel - return [ (path, model) - for path, model in self.attribute_tuples_with_type(PriorModel) - if issubclass(model.cls, cls) - and (not check_prior_count or model.prior_count > 0) + for path, model + in self.attribute_tuples_with_type( + PriorModel + ) + if issubclass( + model.cls, + cls + ) and model.prior_count > 0 ] @frozen_cache def attribute_tuples_with_type( - self, class_type, ignore_class=None, ignore_children=True + self, + class_type, + ignore_class=None, + ignore_children=True ) -> List[tuple]: """ Tuples describing the name and instance for attributes in the model @@ -270,40 +293,19 @@ def attribute_tuples_with_type( return [ (path[-1] if len(path) > 0 else "", value) for path, value in self.path_instance_tuples_for_class( - class_type, ignore_class=ignore_class, ignore_children=ignore_children + class_type, + ignore_class=ignore_class, + ignore_children=ignore_children ) ] - def replacing_for_path(self, path: Tuple[str, ...], value) -> "AbstractModel": - """ - Create a new model replacing the value for a given path with a new value - - Parameters - ---------- - path - A path indicating the sequence of names used to address an object - value - A value that should replace the object at the given path - - Returns - ------- - A copy of this with an updated value - """ - new = copy.deepcopy(self) - obj = new - for key in path[:-1]: - obj = getattr(new, key) - - setattr(obj, path[-1], value) - return new - @DynamicRecursionCache() def path_instances_of_class( - obj, - cls: type, - ignore_class: Optional[Union[type, Tuple[type]]] = None, - ignore_children: bool = False, + obj, + cls: type, + ignore_class: Optional[Union[type, Tuple[type]]] = None, + ignore_children: bool = False ): """ Recursively search the object for instances of a given class @@ -343,7 +345,10 @@ def path_instances_of_class( if key.startswith("_"): continue for item in path_instances_of_class( - value, cls, ignore_class=ignore_class, ignore_children=ignore_children + value, + cls, + ignore_class=ignore_class, + ignore_children=ignore_children ): if isinstance(value, AnnotationPriorModel): path = (key,) @@ -378,7 +383,9 @@ def __getitem__(self, item): if isinstance(item, int): return list(self.values())[item] if isinstance(item, slice): - return ModelInstance(list(self.values())[item]) + return ModelInstance( + list(self.values())[item] + ) return self.__dict__[item] def __setitem__(self, key, value): @@ -395,8 +402,14 @@ def dict(self): return { key: value for key, value in self.__dict__.items() - if key not in ("id", "component_number", "item_number") - and not (isinstance(key, str) and key.startswith("_")) + if key not in ( + "id", + "component_number", + "item_number" + ) and not ( + isinstance(key, str) + and key.startswith("_") + ) } def values(self): From 4e917e66e767518cb226a3283a86a3382c959772 Mon Sep 17 00:00:00 2001 From: Richard Date: Mon, 19 Dec 2022 11:39:07 +0000 Subject: [PATCH 096/226] promoted method --- autofit/mapper/model_object.py | 82 ++++++++++++-------------- autofit/mapper/prior_model/abstract.py | 23 -------- 2 files changed, 37 insertions(+), 68 deletions(-) diff --git a/autofit/mapper/model_object.py b/autofit/mapper/model_object.py index 4b0c40cc0..76d629ed7 100644 --- a/autofit/mapper/model_object.py +++ b/autofit/mapper/model_object.py @@ -1,3 +1,4 @@ +import copy import itertools from typing import Type, Union, Tuple @@ -13,9 +14,7 @@ def next_id(cls): return next(cls._ids) def __init__( - self, - id_=None, - label=None, + self, id_=None, label=None, ): """ A generic object in AutoFit @@ -31,6 +30,29 @@ def __init__( self.id = id_ or self.next_id() self._label = label + def replacing_for_path(self, path: Tuple[str, ...], value) -> "ModelObject": + """ + Create a new model replacing the value for a given path with a new value + + Parameters + ---------- + path + A path indicating the sequence of names used to address an object + value + A value that should replace the object at the given path + + Returns + ------- + A copy of this with an updated value + """ + new = copy.deepcopy(self) + obj = new + for key in path[:-1]: + obj = getattr(new, key) + + setattr(obj, path[-1], value) + return new + def has(self, cls: Union[Type, Tuple[Type, ...]]) -> bool: """ Does this instance have an attribute which is of type cls? @@ -86,25 +108,17 @@ def from_dict(d): from autofit.mapper.prior.abstract import Prior from autofit.mapper.prior.tuple_prior import TuplePrior - if not isinstance( - d, dict - ): + if not isinstance(d, dict): return d type_ = d["type"] if type_ == "model": - instance = PriorModel( - get_class( - d.pop("class_path") - ) - ) + instance = PriorModel(get_class(d.pop("class_path"))) elif type_ == "collection": instance = CollectionPriorModel() elif type_ == "instance": - cls = get_class( - d.pop("class_path") - ) + cls = get_class(d.pop("class_path")) instance = object.__new__(cls) elif type_ == "tuple_prior": instance = TuplePrior() @@ -114,11 +128,7 @@ def from_dict(d): d.pop("type") for key, value in d.items(): - setattr( - instance, - key, - AbstractPriorModel.from_dict(value) - ) + setattr(instance, key, AbstractPriorModel.from_dict(value)) return instance def dict(self) -> dict: @@ -130,43 +140,25 @@ def dict(self) -> dict: from autofit.mapper.prior_model.prior_model import PriorModel from autofit.mapper.prior.tuple_prior import TuplePrior - if isinstance( - self, - CollectionPriorModel - ): + if isinstance(self, CollectionPriorModel): type_ = "collection" - elif isinstance( - self, - AbstractPriorModel - ) and self.prior_count == 0: + elif isinstance(self, AbstractPriorModel) and self.prior_count == 0: type_ = "instance" - elif isinstance( - self, - PriorModel - ): + elif isinstance(self, PriorModel): type_ = "model" - elif isinstance( - self, - TuplePrior - ): + elif isinstance(self, TuplePrior): type_ = "tuple_prior" else: raise AssertionError( f"{self.__class__.__name__} cannot be serialised to dict" ) - dict_ = { - "type": type_ - } + dict_ = {"type": type_} for key, value in self._dict.items(): try: - if not isinstance( - value, ModelObject - ): - value = AbstractPriorModel.from_instance( - value - ) + if not isinstance(value, ModelObject): + value = AbstractPriorModel.from_instance(value) value = value.dict() except AttributeError: pass @@ -181,5 +173,5 @@ def _dict(self): key: value for key, value in self.__dict__.items() if key not in ("component_number", "item_number", "id", "cls") - and not key.startswith("_") + and not key.startswith("_") } diff --git a/autofit/mapper/prior_model/abstract.py b/autofit/mapper/prior_model/abstract.py index 741bb5034..593729218 100644 --- a/autofit/mapper/prior_model/abstract.py +++ b/autofit/mapper/prior_model/abstract.py @@ -231,29 +231,6 @@ def cast( return updated - def replacing_for_path(self, path: Tuple[str, ...], value) -> "AbstractModel": - """ - Create a new model replacing the value for a given path with a new value - - Parameters - ---------- - path - A path indicating the sequence of names used to address an object - value - A value that should replace the object at the given path - - Returns - ------- - A copy of this with an updated value - """ - new = copy.deepcopy(self) - obj = new - for key in path[:-1]: - obj = getattr(new, key) - - setattr(obj, path[-1], value) - return new - def without_attributes(self) -> "AbstractModel": """ Returns a copy of this object with all priors, prior models and From 63f7dfd0bc93883b4f3ada1f3c83158bcfc691a1 Mon Sep 17 00:00:00 2001 From: Richard Date: Mon, 19 Dec 2022 12:00:07 +0000 Subject: [PATCH 097/226] modify combined before fit --- autofit/non_linear/analysis/combined.py | 18 ++++++++++++++++++ test_autofit/analysis/test_regression.py | 15 +++++++++++++++ 2 files changed, 33 insertions(+) diff --git a/autofit/non_linear/analysis/combined.py b/autofit/non_linear/analysis/combined.py index d1ba416ce..5d6014b5b 100644 --- a/autofit/non_linear/analysis/combined.py +++ b/autofit/non_linear/analysis/combined.py @@ -42,6 +42,24 @@ def __init__(self, *analyses: Analysis): self._log_likelihood_function = None self.n_cores = conf.instance["general"]["analysis"]["n_cores"] + def __getitem__(self, item): + return self.analyses[item] + + def modify_before_fit(self, paths: AbstractPaths, model: AbstractPriorModel): + """ + Modify the analysis before fitting. + + Parameters + ---------- + paths + An object describing the paths for saving data (e.g. hard-disk directories or entries in sqlite database). + model + The model which is to be fitted. + """ + return CombinedAnalysis( + *(analysis.modify_before_fit(paths, model) for analysis in self.analyses) + ) + @property def n_cores(self): return self._n_cores diff --git a/test_autofit/analysis/test_regression.py b/test_autofit/analysis/test_regression.py index 6be6bb7ed..571254fa4 100644 --- a/test_autofit/analysis/test_regression.py +++ b/test_autofit/analysis/test_regression.py @@ -15,12 +15,19 @@ class MyResult(af.Result): class MyAnalysis(af.Analysis): + def __init__(self): + self.is_modified = False + def log_likelihood_function(self, instance): pass def make_result(self, samples, model, sigma=1.0, use_errors=True, use_widths=False): return MyResult(model=model, samples=samples) + def modify_before_fit(self, paths, model): + self.is_modified = True + return self + def test_result_type(): model = af.Model(af.Gaussian) @@ -30,3 +37,11 @@ def test_result_type(): result = analysis.make_result(None, model) assert isinstance(result, MyResult) + + +def test_combined_before_fit(): + analysis = MyAnalysis() + MyAnalysis() + + analysis = analysis.modify_before_fit(None, None) + + assert analysis[0].is_modified From ec86d8ff50fbff008c00faf8b94e5a4112307206 Mon Sep 17 00:00:00 2001 From: Richard Date: Mon, 19 Dec 2022 12:03:53 +0000 Subject: [PATCH 098/226] modify after fit --- autofit/non_linear/analysis/combined.py | 22 +++++++++++++++++++ test_autofit/analysis/test_regression.py | 28 +++++++++++++++++++----- 2 files changed, 44 insertions(+), 6 deletions(-) diff --git a/autofit/non_linear/analysis/combined.py b/autofit/non_linear/analysis/combined.py index 5d6014b5b..dcdff72d9 100644 --- a/autofit/non_linear/analysis/combined.py +++ b/autofit/non_linear/analysis/combined.py @@ -60,6 +60,28 @@ def modify_before_fit(self, paths: AbstractPaths, model: AbstractPriorModel): *(analysis.modify_before_fit(paths, model) for analysis in self.analyses) ) + def modify_after_fit( + self, paths: AbstractPaths, model: AbstractPriorModel, result: Result + ): + """ + Modify the analysis after fitting. + + Parameters + ---------- + paths + An object describing the paths for saving data (e.g. hard-disk directories or entries in sqlite database). + model + The model which is to be fitted. + result + The result of the fit. + """ + return CombinedAnalysis( + *( + analysis.modify_after_fit(paths, model, result) + for analysis in self.analyses + ) + ) + @property def n_cores(self): return self._n_cores diff --git a/test_autofit/analysis/test_regression.py b/test_autofit/analysis/test_regression.py index 571254fa4..6c8296632 100644 --- a/test_autofit/analysis/test_regression.py +++ b/test_autofit/analysis/test_regression.py @@ -1,5 +1,7 @@ import pickle +import pytest + import autofit as af from autofit.non_linear.analysis import CombinedAnalysis @@ -16,7 +18,8 @@ class MyResult(af.Result): class MyAnalysis(af.Analysis): def __init__(self): - self.is_modified = False + self.is_modified_before = False + self.is_modified_after = False def log_likelihood_function(self, instance): pass @@ -25,7 +28,11 @@ def make_result(self, samples, model, sigma=1.0, use_errors=True, use_widths=Fal return MyResult(model=model, samples=samples) def modify_before_fit(self, paths, model): - self.is_modified = True + self.is_modified_before = True + return self + + def modify_after_fit(self, paths, model, result): + self.is_modified_after = True return self @@ -39,9 +46,18 @@ def test_result_type(): assert isinstance(result, MyResult) -def test_combined_before_fit(): - analysis = MyAnalysis() + MyAnalysis() +@pytest.fixture(name="combined_analysis") +def make_combined_analysis(): + return MyAnalysis() + MyAnalysis() + + +def test_combined_before_fit(combined_analysis): + combined_analysis = combined_analysis.modify_before_fit(None, None) + + assert combined_analysis[0].is_modified_before + - analysis = analysis.modify_before_fit(None, None) +def test_combined_after_fit(combined_analysis): + combined_analysis = combined_analysis.modify_after_fit(None, None, None) - assert analysis[0].is_modified + assert combined_analysis[0].is_modified_after From 9830e2c768f7c1de278bcb341f7545f36305667c Mon Sep 17 00:00:00 2001 From: Richard Date: Mon, 19 Dec 2022 12:10:08 +0000 Subject: [PATCH 099/226] ensuring modify model is called elsewhere --- autofit/non_linear/analysis/free_parameter.py | 53 +++++++------------ autofit/non_linear/analysis/model_analysis.py | 4 +- 2 files changed, 22 insertions(+), 35 deletions(-) diff --git a/autofit/non_linear/analysis/free_parameter.py b/autofit/non_linear/analysis/free_parameter.py index 5e8440899..6bd351986 100644 --- a/autofit/non_linear/analysis/free_parameter.py +++ b/autofit/non_linear/analysis/free_parameter.py @@ -8,17 +8,11 @@ from .analysis import Analysis from .indexed import IndexCollectionAnalysis -logger = logging.getLogger( - __name__ -) +logger = logging.getLogger(__name__) class FreeParameterAnalysis(IndexCollectionAnalysis): - def __init__( - self, - *analyses: Analysis, - free_parameters: Tuple[Prior, ...] - ): + def __init__(self, *analyses: Analysis, free_parameters: Tuple[Prior, ...]): """ A combined analysis with free parameters. @@ -35,30 +29,17 @@ def __init__( """ super().__init__(*analyses) self.free_parameters = [ - parameter for parameter - in free_parameters - if isinstance( - parameter, - Prior - ) + parameter for parameter in free_parameters if isinstance(parameter, Prior) ] # noinspection PyUnresolvedReferences self.free_parameters += [ prior - for parameter - in free_parameters - if isinstance( - parameter, - (AbstractPriorModel, TuplePrior) - ) - for prior - in parameter.priors + for parameter in free_parameters + if isinstance(parameter, (AbstractPriorModel, TuplePrior)) + for prior in parameter.priors ] - def modify_model( - self, - model: AbstractPriorModel - ) -> AbstractPriorModel: + def modify_model(self, model: AbstractPriorModel) -> AbstractPriorModel: """ Create prior models where free parameters are replaced with new priors. Return those prior models as a collection. @@ -77,10 +58,16 @@ def modify_model( A new model with all the same priors except for those associated with free parameters. """ - return CollectionPriorModel([ - model.mapper_from_partial_prior_arguments({ - free_parameter: free_parameter.new() - for free_parameter in self.free_parameters - }) - for _ in self.analyses - ]) + return CollectionPriorModel( + [ + analysis.modify_model( + model.mapper_from_partial_prior_arguments( + { + free_parameter: free_parameter.new() + for free_parameter in self.free_parameters + } + ) + ) + for analysis in self.analyses + ] + ) diff --git a/autofit/non_linear/analysis/model_analysis.py b/autofit/non_linear/analysis/model_analysis.py index d196e6555..d6b1108b9 100644 --- a/autofit/non_linear/analysis/model_analysis.py +++ b/autofit/non_linear/analysis/model_analysis.py @@ -53,9 +53,9 @@ def modify_model(self, model: AbstractPriorModel) -> CollectionPriorModel: """ return CollectionPriorModel( [ - analysis.analysis.model + analysis.modify_model(analysis.analysis.model) if isinstance(analysis.analysis, ModelAnalysis) - else model + else analysis.modify_model(model) for analysis in self.analyses ] ) From 8a0b1fde90b61bd38f9c55607900746cdd1dc33a Mon Sep 17 00:00:00 2001 From: Richard Date: Mon, 19 Dec 2022 12:23:18 +0000 Subject: [PATCH 100/226] zip models for child analyses --- autofit/non_linear/analysis/combined.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/autofit/non_linear/analysis/combined.py b/autofit/non_linear/analysis/combined.py index dcdff72d9..b4f5813db 100644 --- a/autofit/non_linear/analysis/combined.py +++ b/autofit/non_linear/analysis/combined.py @@ -199,9 +199,9 @@ def func(child_paths, analysis): def make_result(self, samples, model, sigma=1.0, use_errors=True, use_widths=False): child_results = [ analysis.make_result( - samples, model, sigma=1.0, use_errors=True, use_widths=False + samples, child_model, sigma=1.0, use_errors=True, use_widths=False ) - for analysis in self.analyses + for child_model, analysis in zip(model, self.analyses) ] result = self.analyses[0].make_result( samples=samples, model=model, sigma=1.0, use_errors=True, use_widths=False From e56e9175b90b92fa96387ea825b44d29cecb06fb Mon Sep 17 00:00:00 2001 From: Jammy2211 Date: Mon, 19 Dec 2022 12:26:18 +0000 Subject: [PATCH 101/226] rst now used to render pypi --- README.rst | 1 - docs/features/graphical.rst | 6 +++--- docs/features/search_chaining.rst | 10 +++++----- docs/features/search_grid_search.rst | 6 +++--- docs/features/sensitivity_mapping.rst | 10 +++++----- docs/overview/model_complex.rst | 2 +- docs/overview/model_fit.rst | 2 +- docs/overview/multi_datasets.rst | 18 +++++++++--------- docs/overview/result.rst | 2 +- docs/science_examples/astronomy.rst | 2 +- setup.py | 2 +- 11 files changed, 30 insertions(+), 31 deletions(-) diff --git a/README.rst b/README.rst index 9c3eb864b..42300a433 100644 --- a/README.rst +++ b/README.rst @@ -59,7 +59,6 @@ noisy 1D data. Here's the ``data`` (black) and the model (red) we'll fit: .. image:: https://raw.githubusercontent.com/rhayes777/PyAutoFit/master/files/toy_model_fit.png :width: 400 - :alt: Alternative text We define our model, a 1D Gaussian by writing a Python class using the format below: diff --git a/docs/features/graphical.rst b/docs/features/graphical.rst index 0bfb82853..34fca3c90 100644 --- a/docs/features/graphical.rst +++ b/docs/features/graphical.rst @@ -54,15 +54,15 @@ We begin by loading noisy 1D data containing 3 Gaussian's. This is what our three Gaussians look like: -.. image:: https://raw.githubusercontent.com/rhayes777/PyAutoFit/master/docs/features/images/gaussian_x1_1__low_snr.png +.. image:: https://raw.githubusercontent.com/rhayes777/PyAutoFit/main/docs/features/images/gaussian_x1_1__low_snr.png :width: 600 :alt: Alternative text -.. image:: https://raw.githubusercontent.com/rhayes777/PyAutoFit/master/docs/features/images/gaussian_x1_2__low_snr.png +.. image:: https://raw.githubusercontent.com/rhayes777/PyAutoFit/main/docs/features/images/gaussian_x1_2__low_snr.png :width: 600 :alt: Alternative text -.. image:: https://raw.githubusercontent.com/rhayes777/PyAutoFit/master/docs/features/images/gaussian_x1_3__low_snr.png +.. image:: https://raw.githubusercontent.com/rhayes777/PyAutoFit/main/docs/features/images/gaussian_x1_3__low_snr.png :width: 600 :alt: Alternative text diff --git a/docs/features/search_chaining.rst b/docs/features/search_chaining.rst index ef6132853..ed0b22753 100644 --- a/docs/features/search_chaining.rst +++ b/docs/features/search_chaining.rst @@ -29,7 +29,7 @@ Data In this example we demonstrate search chaining using the example data where there are two ``Gaussians`` that are visibly split: -.. image:: https://raw.githubusercontent.com/rhayes777/PyAutoFit/master/docs/features/images/gaussian_x2_split.png +.. image:: https://raw.githubusercontent.com/rhayes777/PyAutoFit/main/docs/features/images/gaussian_x2_split.png :width: 600 :alt: Alternative text @@ -52,7 +52,7 @@ Search 1 To fit the left ``Gaussian``, our first ``analysis`` receive only half data removing the right ``Gaussian``. Note that this give a speed-up in log likelihood evaluation. -.. image:: https://raw.githubusercontent.com/rhayes777/PyAutoFit/master/docs/features/images/gaussian_x2_left.png +.. image:: https://raw.githubusercontent.com/rhayes777/PyAutoFit/main/docs/features/images/gaussian_x2_left.png :width: 600 :alt: Alternative text @@ -94,7 +94,7 @@ to achieve a fast model-fit (had we fitted the more complex model right away we By plotting the result we can see we have fitted the left ``Gaussian`` reasonably well. -.. image:: https://raw.githubusercontent.com/rhayes777/PyAutoFit/master/docs/features/images/gaussian_x2_left_fit.png +.. image:: https://raw.githubusercontent.com/rhayes777/PyAutoFit/main/docs/features/images/gaussian_x2_left_fit.png :width: 600 :alt: Alternative text @@ -151,7 +151,7 @@ and use a low number of live points to achieve a fast model-fit. We can now see our model has successfully fitted both Gaussian's: -.. image:: https://raw.githubusercontent.com/rhayes777/PyAutoFit/master/docs/features/images/gaussian_x2_right_fit.png +.. image:: https://raw.githubusercontent.com/rhayes777/PyAutoFit/main/docs/features/images/gaussian_x2_right_fit.png :width: 600 :alt: Alternative text @@ -210,7 +210,7 @@ This gives the following output: We can now see our model has successfully fitted both Gaussians simultaneously: -.. image:: https://raw.githubusercontent.com/rhayes777/PyAutoFit/master/docs/features/images/gaussian_x2_fit.png +.. image:: https://raw.githubusercontent.com/rhayes777/PyAutoFit/main/docs/features/images/gaussian_x2_fit.png :width: 600 :alt: Alternative text diff --git a/docs/features/search_grid_search.rst b/docs/features/search_grid_search.rst index dcb4e26d2..d94bab352 100644 --- a/docs/features/search_grid_search.rst +++ b/docs/features/search_grid_search.rst @@ -28,7 +28,7 @@ In this example we will demonstrate the search grid search feature, again using in noisy data. This 1D data includes a small feature to the right of the central ``Gaussian``, a second ``Gaussian`` centred on pixel 70. -.. image:: https://raw.githubusercontent.com/rhayes777/PyAutoFit/master/docs/features/images/gaussian_x1_with_feature.png +.. image:: https://raw.githubusercontent.com/rhayes777/PyAutoFit/main/docs/features/images/gaussian_x1_with_feature.png :width: 600 :alt: Alternative text @@ -57,7 +57,7 @@ which the non linear search may miss. The image below shows a fit where we failed to detect the feature: -.. image:: https://raw.githubusercontent.com/rhayes777/PyAutoFit/master/docs/features/images/gaussian_x1_with_feature_fit_no_feature.png +.. image:: https://raw.githubusercontent.com/rhayes777/PyAutoFit/main/docs/features/images/gaussian_x1_with_feature_fit_no_feature.png :width: 600 :alt: Alternative text @@ -116,7 +116,7 @@ This shows a peak evidence value on the 4th cell of grid-search, where the ``Uni 60 -> 80 and therefore included the Gaussian feature. By plotting this model-fit we can see it has successfully detected the feature. -.. image:: https://raw.githubusercontent.com/rhayes777/PyAutoFit/master/docs/features/images/gaussian_x1_with_feature_fit_feature.png +.. image:: https://raw.githubusercontent.com/rhayes777/PyAutoFit/main/docs/features/images/gaussian_x1_with_feature_fit_feature.png :width: 600 :alt: Alternative text diff --git a/docs/features/sensitivity_mapping.rst b/docs/features/sensitivity_mapping.rst index f38e8ffce..81db00695 100644 --- a/docs/features/sensitivity_mapping.rst +++ b/docs/features/sensitivity_mapping.rst @@ -23,7 +23,7 @@ To illustrate sensitivity mapping we will again use the example of fitting 1D Ga includes a small feature to the right of the central ``Gaussian``, a second ``Gaussian`` centred on pixel 70. -.. image:: https://raw.githubusercontent.com/rhayes777/PyAutoFit/master/docs/features/images/gaussian_x1_with_feature.png +.. image:: https://raw.githubusercontent.com/rhayes777/PyAutoFit/main/docs/features/images/gaussian_x1_with_feature.png :width: 600 :alt: Alternative text @@ -182,11 +182,11 @@ gaussian features. Here are what the two most extreme simulated datasets look like, corresponding to the highest and lowest normalization values -.. image:: https://raw.githubusercontent.com/rhayes777/PyAutoFit/master/docs/features/images/sensitivity_data_low.png +.. image:: https://raw.githubusercontent.com/rhayes777/PyAutoFit/main/docs/features/images/sensitivity_data_low.png :width: 600 :alt: Alternative text -.. image:: https://raw.githubusercontent.com/rhayes777/PyAutoFit/master/docs/features/images/sensitivity_data_high.png +.. image:: https://raw.githubusercontent.com/rhayes777/PyAutoFit/main/docs/features/images/sensitivity_data_high.png :width: 600 :alt: Alternative text @@ -229,11 +229,11 @@ full example script on the ``autofit_workspace``). Here are what the fits to the two most extreme simulated datasets look like, for the models including the Gaussian feature. -.. image:: https://raw.githubusercontent.com/rhayes777/PyAutoFit/master/docs/features/images/sensitivity_data_low_fit.png +.. image:: https://raw.githubusercontent.com/rhayes777/PyAutoFit/main/docs/features/images/sensitivity_data_low_fit.png :width: 600 :alt: Alternative text -.. image:: https://raw.githubusercontent.com/rhayes777/PyAutoFit/master/docs/features/images/sensitivity_data_high_fit.png +.. image:: https://raw.githubusercontent.com/rhayes777/PyAutoFit/main/docs/features/images/sensitivity_data_high_fit.png :width: 600 :alt: Alternative text diff --git a/docs/overview/model_complex.rst b/docs/overview/model_complex.rst index c14ff7b80..879e4b259 100644 --- a/docs/overview/model_complex.rst +++ b/docs/overview/model_complex.rst @@ -12,7 +12,7 @@ Data The example ``data`` with errors (black), including the model-fit we'll perform (red) and individual ``Gaussian`` (blue dashed) and ``Exponential`` (orange dashed) components are shown below: -.. image:: https://raw.githubusercontent.com/rhayes777/PyAutoFit/master/docs/images/toy_model_fit_x2.png +.. image:: https://raw.githubusercontent.com/rhayes777/PyAutoFit/main/docs/images/toy_model_fit_x2.png :width: 600 :alt: Alternative text diff --git a/docs/overview/model_fit.rst b/docs/overview/model_fit.rst index a8ad0204f..b5676707a 100644 --- a/docs/overview/model_fit.rst +++ b/docs/overview/model_fit.rst @@ -17,7 +17,7 @@ Data The example ``data`` with errors (black) and the model-fit (red), are shown below: -.. image:: https://raw.githubusercontent.com/rhayes777/PyAutoFit/master/docs/images/toy_model_fit.png +.. image:: https://raw.githubusercontent.com/rhayes777/PyAutoFit/main/docs/images/toy_model_fit.png :width: 600 :alt: Alternative text diff --git a/docs/overview/multi_datasets.rst b/docs/overview/multi_datasets.rst index 76ad6d069..441664585 100644 --- a/docs/overview/multi_datasets.rst +++ b/docs/overview/multi_datasets.rst @@ -25,15 +25,15 @@ datasets simultaneously. Each dataset has a different noise realization, meaning that performing a simultaneously fit will offer improved constraints over individual fits. -.. image:: https://raw.githubusercontent.com/rhayes777/PyAutoFit/master/docs/images/gaussian_0.png +.. image:: https://raw.githubusercontent.com/rhayes777/PyAutoFit/main/docs/images/gaussian_0.png :width: 600 :alt: Alternative text -.. image:: https://raw.githubusercontent.com/rhayes777/PyAutoFit/master/docs/images/gaussian_1.png +.. image:: https://raw.githubusercontent.com/rhayes777/PyAutoFit/main/docs/images/gaussian_1.png :width: 600 :alt: Alternative text -.. image:: https://raw.githubusercontent.com/rhayes777/PyAutoFit/master/docs/images/gaussian_2.png +.. image:: https://raw.githubusercontent.com/rhayes777/PyAutoFit/main/docs/images/gaussian_2.png :width: 600 :alt: Alternative text @@ -119,15 +119,15 @@ as ``max_log_likelihood_instance``. Inspection of the results show tht the model was successfully fitted to all three datasets: -.. image:: https://raw.githubusercontent.com/rhayes777/PyAutoFit/master/docs/images/gaussian_model_0.png +.. image:: https://raw.githubusercontent.com/rhayes777/PyAutoFit/main/docs/images/gaussian_model_0.png :width: 600 :alt: Alternative text -.. image:: https://raw.githubusercontent.com/rhayes777/PyAutoFit/master/docs/images/gaussian_model_1.png +.. image:: https://raw.githubusercontent.com/rhayes777/PyAutoFit/main/docs/images/gaussian_model_1.png :width: 600 :alt: Alternative text -.. image:: https://raw.githubusercontent.com/rhayes777/PyAutoFit/master/docs/images/gaussian_model_2.png +.. image:: https://raw.githubusercontent.com/rhayes777/PyAutoFit/main/docs/images/gaussian_model_2.png :width: 600 :alt: Alternative text @@ -143,15 +143,15 @@ The model parameterization therefore needs to change in order to account for thi Lets look at an example of a dataset of 3 1D Gaussians where the signal varies across the datasets: -.. image:: https://raw.githubusercontent.com/rhayes777/PyAutoFit/master/docs/images/gaussian_model_vary_0.png +.. image:: https://raw.githubusercontent.com/rhayes777/PyAutoFit/main/docs/images/gaussian_model_vary_0.png :width: 600 :alt: Alternative text -.. image:: https://raw.githubusercontent.com/rhayes777/PyAutoFit/master/docs/images/gaussian_model_vary_1.png +.. image:: https://raw.githubusercontent.com/rhayes777/PyAutoFit/main/docs/images/gaussian_model_vary_1.png :width: 600 :alt: Alternative text -.. image:: https://raw.githubusercontent.com/rhayes777/PyAutoFit/master/docs/images/gaussian_model_vary_2.png +.. image:: https://raw.githubusercontent.com/rhayes777/PyAutoFit/main/docs/images/gaussian_model_vary_2.png :width: 600 :alt: Alternative text diff --git a/docs/overview/result.rst b/docs/overview/result.rst index 11b366b73..a4896618f 100644 --- a/docs/overview/result.rst +++ b/docs/overview/result.rst @@ -251,7 +251,7 @@ make a corner plot of the probability density function (PDF): Here is an example of how a PDF estimated for a model appears: -.. image:: https://raw.githubusercontent.com/rhayes777/PyAutoFit/master/docs/images/cornerplot.png +.. image:: https://raw.githubusercontent.com/rhayes777/PyAutoFit/main/docs/images/cornerplot.png :width: 600 :alt: Alternative text diff --git a/docs/science_examples/astronomy.rst b/docs/science_examples/astronomy.rst index 37340e3c6..baad54c2e 100644 --- a/docs/science_examples/astronomy.rst +++ b/docs/science_examples/astronomy.rst @@ -9,7 +9,7 @@ off of our astronomy software `PyAutoLens Date: Mon, 19 Dec 2022 12:36:36 +0000 Subject: [PATCH 102/226] RTD badge --- README.rst | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/README.rst b/README.rst index 42300a433..65f3eacec 100644 --- a/README.rst +++ b/README.rst @@ -4,10 +4,14 @@ PyAutoFit: Classy Probabilistic Programming .. |binder| image:: https://mybinder.org/badge_logo.svg :target: https://mybinder.org/v2/gh/Jammy2211/autofit_workspace/HEAD +.. |RTD| image:: https://readthedocs.org/projects/pyautofit/badge/?version=latest + :target: https://pyautofit.readthedocs.io/en/latest/?badge=latest + :alt: Documentation Status + .. |JOSS| image:: https://joss.theoj.org/papers/10.21105/joss.02550/status.svg :target: https://doi.org/10.21105/joss.02550 -|binder| |JOSS| +|binder| |RTD| |JOSS| `Installation Guide `_ | `readthedocs `_ | From c03f5d39de3dc6aa3bc10e07c25b41d05167390e Mon Sep 17 00:00:00 2001 From: Richard Date: Mon, 19 Dec 2022 12:46:52 +0000 Subject: [PATCH 103/226] fixes for free parameter analysis --- autofit/non_linear/analysis/combined.py | 4 +- autofit/non_linear/analysis/free_parameter.py | 39 ++++++ test_autofit/non_linear/test_analysis.py | 132 ++++-------------- 3 files changed, 67 insertions(+), 108 deletions(-) diff --git a/autofit/non_linear/analysis/combined.py b/autofit/non_linear/analysis/combined.py index b4f5813db..dcdff72d9 100644 --- a/autofit/non_linear/analysis/combined.py +++ b/autofit/non_linear/analysis/combined.py @@ -199,9 +199,9 @@ def func(child_paths, analysis): def make_result(self, samples, model, sigma=1.0, use_errors=True, use_widths=False): child_results = [ analysis.make_result( - samples, child_model, sigma=1.0, use_errors=True, use_widths=False + samples, model, sigma=1.0, use_errors=True, use_widths=False ) - for child_model, analysis in zip(model, self.analyses) + for analysis in self.analyses ] result = self.analyses[0].make_result( samples=samples, model=model, sigma=1.0, use_errors=True, use_widths=False diff --git a/autofit/non_linear/analysis/free_parameter.py b/autofit/non_linear/analysis/free_parameter.py index 6bd351986..365122f41 100644 --- a/autofit/non_linear/analysis/free_parameter.py +++ b/autofit/non_linear/analysis/free_parameter.py @@ -7,6 +7,8 @@ from autofit.mapper.prior_model.collection import CollectionPriorModel from .analysis import Analysis from .indexed import IndexCollectionAnalysis +from ..paths.abstract import AbstractPaths + logger = logging.getLogger(__name__) @@ -71,3 +73,40 @@ def modify_model(self, model: AbstractPriorModel) -> AbstractPriorModel: for analysis in self.analyses ] ) + + def modify_before_fit(self, paths: AbstractPaths, model: AbstractPriorModel): + """ + Modify the analysis before fitting. + + Parameters + ---------- + paths + An object describing the paths for saving data (e.g. hard-disk directories or entries in sqlite database). + model + The model which is to be fitted. + """ + return FreeParameterAnalysis( + *(analysis.modify_before_fit(paths, model) for analysis in self.analyses), + free_parameters=tuple(self.free_parameters), + ) + + def modify_after_fit(self, paths: AbstractPaths, model: AbstractPriorModel, result): + """ + Modify the analysis after fitting. + + Parameters + ---------- + paths + An object describing the paths for saving data (e.g. hard-disk directories or entries in sqlite database). + model + The model which is to be fitted. + result + The result of the fit. + """ + return FreeParameterAnalysis( + *( + analysis.modify_after_fit(paths, model, result) + for analysis in self.analyses + ), + free_parameters=tuple(self.free_parameters), + ) diff --git a/test_autofit/non_linear/test_analysis.py b/test_autofit/non_linear/test_analysis.py index 4a40954e5..be4a878ea 100644 --- a/test_autofit/non_linear/test_analysis.py +++ b/test_autofit/non_linear/test_analysis.py @@ -18,23 +18,12 @@ def __init__(self): def log_likelihood_function(self, instance): return -1 - def visualize( - self, - paths: AbstractPaths, - instance, - during_analysis - ): + def visualize(self, paths: AbstractPaths, instance, during_analysis): self.did_visualise = True - os.makedirs( - paths.image_path - ) + os.makedirs(paths.image_path) open(f"{paths.image_path}/image.png", "w+").close() - def profile_log_likelihood_function( - self, - paths: AbstractPaths, - instance - ): + def profile_log_likelihood_function(self, paths: AbstractPaths, instance): self.did_profile = True @@ -42,9 +31,7 @@ def test_visualise(): analysis_1 = Analysis() analysis_2 = Analysis() - (analysis_1 + analysis_2).visualize( - af.DirectoryPaths(), None, None - ) + (analysis_1 + analysis_2).visualize(af.DirectoryPaths(), None, None) assert analysis_1.did_visualise is True assert analysis_2.did_visualise is True @@ -66,38 +53,20 @@ def test_make_result(): analysis_1 = Analysis() analysis_2 = Analysis() - result = (analysis_1 + analysis_2).make_result( - samples=None, model=None - ) + result = (analysis_1 + analysis_2).make_result(samples=None, model=[None, None]) assert len(result) == 2 def test_add_analysis(): - assert (Analysis() + Analysis()).log_likelihood_function( - None - ) == -2 + assert (Analysis() + Analysis()).log_likelihood_function(None) == -2 @pytest.mark.parametrize( - "number, first, second", - [ - (3, 2, 1), - (4, 2, 2), - (5, 3, 2), - (6, 3, 3), - (7, 4, 3), - ] + "number, first, second", [(3, 2, 1), (4, 2, 2), (5, 3, 2), (6, 3, 3), (7, 4, 3),] ) -def test_analysis_pool( - number, - first, - second -): - pool = AnalysisPool( - number * [Analysis()], - 2 - ) +def test_analysis_pool(number, first, second): + pool = AnalysisPool(number * [Analysis()], 2) process_1, process_2 = pool.processes @@ -105,21 +74,13 @@ def test_analysis_pool( assert len(process_2.analyses) == second -@with_config( - "general", "analysis", "n_cores", - value=2 -) -@pytest.mark.parametrize( - "number", - list(range(1, 3)) -) +@with_config("general", "analysis", "n_cores", value=2) +@pytest.mark.parametrize("number", list(range(1, 3))) def test_two_cores(number): analysis = Analysis() for _ in range(number - 1): analysis += Analysis() - assert analysis.log_likelihood_function( - None - ) == -number + assert analysis.log_likelihood_function(None) == -number def test_still_flat(): @@ -133,90 +94,49 @@ def test_still_flat(): def test_sum_analyses(): - combined = sum( - Analysis() - for _ in range(5) - ) + combined = sum(Analysis() for _ in range(5)) assert len(combined) == 5 -@pytest.fixture( - name="search" -) +@pytest.fixture(name="search") def make_search(): - return af.m.MockSearch( - "search_name" - ) + return af.m.MockSearch("search_name") -def test_child_paths( - search -): +def test_child_paths(search): paths = search.paths - sub_paths = SubDirectoryPaths( - paths, - analysis_name="analysis_0" - ) + sub_paths = SubDirectoryPaths(paths, analysis_name="analysis_0") assert sub_paths.output_path == f"{paths.output_path}/analysis_0" -@pytest.fixture( - name="multi_analysis" -) +@pytest.fixture(name="multi_analysis") def make_multi_analysis(): return Analysis() + Analysis() -@pytest.fixture( - name="multi_search" -) -def make_multi_search( - search, - multi_analysis -): +@pytest.fixture(name="multi_search") +def make_multi_search(search, multi_analysis): search.paths.remove_files = False - search.fit( - af.Model( - af.Gaussian - ), - multi_analysis - ) + search.fit(af.Model(af.Gaussian), multi_analysis) search.paths.save_all({}, {}, []) return search -@with_config( - "general", - "output", - "remove_files", - value=False -) -def test_visualise( - multi_search, - multi_analysis -): - multi_analysis.visualize( - multi_search.paths, - af.Gaussian(), - True - ) +@with_config("general", "output", "remove_files", value=False) +def test_visualise(multi_search, multi_analysis): + multi_analysis.visualize(multi_search.paths, af.Gaussian(), True) search_path = Path(multi_search.paths.output_path) assert search_path.exists() assert (search_path / "analyses/analysis_0/image/image.png").exists() assert (search_path / "analyses/analysis_1/image/image.png").exists() -def test_set_number_of_cores( - multi_analysis -): +def test_set_number_of_cores(multi_analysis): multi_analysis.n_cores = 1 assert multi_analysis._log_likelihood_function.__name__ == "_summed_log_likelihood" multi_analysis.n_cores = 2 - assert isinstance( - multi_analysis._log_likelihood_function, - AnalysisPool - ) + assert isinstance(multi_analysis._log_likelihood_function, AnalysisPool) multi_analysis.n_cores = 1 assert multi_analysis._log_likelihood_function.__name__ == "_summed_log_likelihood" From b30681f0e14d4f5823a69fba9da414ebd89b4aa1 Mon Sep 17 00:00:00 2001 From: Jammy2211 Date: Mon, 19 Dec 2022 12:50:43 +0000 Subject: [PATCH 104/226] build badges --- .github/workflows/main.yml | 2 +- README.rst | 8 +++++++- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index b8ea09abf..06611eead 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -1,4 +1,4 @@ -name: Run unittests +name: Tests on: [push] diff --git a/README.rst b/README.rst index 65f3eacec..41506226d 100644 --- a/README.rst +++ b/README.rst @@ -8,10 +8,16 @@ PyAutoFit: Classy Probabilistic Programming :target: https://pyautofit.readthedocs.io/en/latest/?badge=latest :alt: Documentation Status +.. |Tests| image:: https://github.com/rhayes777/PyAutoFit/actions/workflows/main.yml/badge.svg + :target: https://github.com/rhayes777/PyAutoFit/actions + +.. |Build| image:: https://github.com/rhayes777/PyAutoBuild/actions/workflows/release.yml/badge.svg + :target: https://github.com/rhayes777/PyAutoBuild/actions + .. |JOSS| image:: https://joss.theoj.org/papers/10.21105/joss.02550/status.svg :target: https://doi.org/10.21105/joss.02550 -|binder| |RTD| |JOSS| +|binder| |Tests| |Build| |RTD| |JOSS| `Installation Guide `_ | `readthedocs `_ | From 66d9a9809be30a926e8eb34a750b9b094e3be262 Mon Sep 17 00:00:00 2001 From: Richard Date: Mon, 19 Dec 2022 12:56:48 +0000 Subject: [PATCH 105/226] combined has method --- autofit/mapper/prior_model/abstract.py | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) diff --git a/autofit/mapper/prior_model/abstract.py b/autofit/mapper/prior_model/abstract.py index 741bb5034..03b817d66 100644 --- a/autofit/mapper/prior_model/abstract.py +++ b/autofit/mapper/prior_model/abstract.py @@ -6,7 +6,7 @@ import types from collections import defaultdict from functools import wraps -from typing import Tuple, Optional, Dict, List, Iterable, Generator +from typing import Tuple, Optional, Dict, List, Iterable, Generator, Union, Type import numpy as np @@ -30,8 +30,8 @@ from autofit.mapper.prior_model.util import PriorModelNameValue from autofit.text import formatter as frm from autofit.text.formatter import TextFormatter -from autofit.tools.util import split_paths from autofit.tools.util import info_whitespace +from autofit.tools.util import split_paths logger = logging.getLogger(__name__) @@ -743,6 +743,20 @@ def instance_from_vector(self, vector, ignore_prior_limits=False): return self.instance_for_arguments(arguments,) + def has(self, cls: Union[Type, Tuple[Type, ...]]) -> bool: + """ + Parameters + ---------- + cls + The type to check for + + Returns + ------- + True iff this model contains an instance or model with the given + type recursively. + """ + return self.has_instance(cls) or self.has_model(cls) + def has_instance(self, cls) -> bool: """ True iff this model contains an instance of type From 23ebe425aedb337b833e5424c95916c5b38fa1ea Mon Sep 17 00:00:00 2001 From: Richard Date: Mon, 19 Dec 2022 13:44:37 +0000 Subject: [PATCH 106/226] fix stochastic test by adding limit --- .../graphical/hierarchical/test_embedded.py | 47 ++++--------------- 1 file changed, 10 insertions(+), 37 deletions(-) diff --git a/test_autofit/graphical/hierarchical/test_embedded.py b/test_autofit/graphical/hierarchical/test_embedded.py index 45325a296..904a43ee2 100644 --- a/test_autofit/graphical/hierarchical/test_embedded.py +++ b/test_autofit/graphical/hierarchical/test_embedded.py @@ -19,7 +19,7 @@ def make_centre_model(): return g.HierarchicalFactor( af.GaussianPrior, mean=af.GaussianPrior(mean=100, sigma=10), - sigma=af.GaussianPrior(mean=10, sigma=5), + sigma=af.GaussianPrior(mean=10, sigma=5, lower_limit=0), ) @@ -54,11 +54,7 @@ def make_centre(centre_model): def generate_data(centres): data = [] for centre in centres: - gaussian = af.Gaussian( - centre=centre, - normalization=20, - sigma=5, - ) + gaussian = af.Gaussian(centre=centre, normalization=20, sigma=5,) data.append(gaussian(x)) return data @@ -89,44 +85,21 @@ def test_full_fit(centre_model, data, centres): for i, y in enumerate(data): prior_model = af.PriorModel( af.Gaussian, - centre=af.GaussianPrior( - mean=100, - sigma=1 - ), + centre=af.GaussianPrior(mean=100, sigma=1), intensity=20, normalization=20, - sigma=5 - ) - graph.add( - g.AnalysisFactor( - prior_model, - analysis=Analysis( - x=x, - y=y - ) - ) - ) - centre_model.add_drawn_variable( - prior_model.centre + sigma=5, ) + graph.add(g.AnalysisFactor(prior_model, analysis=Analysis(x=x, y=y))) + centre_model.add_drawn_variable(prior_model.centre) graph.add(centre_model) optimiser = g.LaplaceOptimiser() - collection = graph.optimise( - optimiser, - max_steps=10 - ).model - - for gaussian, centre in zip( - collection.with_prefix( - "AnalysisFactor" - ), - centres - ): + collection = graph.optimise(optimiser, max_steps=10).model + + for gaussian, centre in zip(collection.with_prefix("AnalysisFactor"), centres): assert gaussian.instance_from_prior_medians().centre == pytest.approx( - centre, - abs=0.1 + centre, abs=0.1 ) - From 48938d1ceea005e993085d7d4a38825c78ce6080 Mon Sep 17 00:00:00 2001 From: Jammy2211 Date: Mon, 19 Dec 2022 13:56:10 +0000 Subject: [PATCH 107/226] python requires --- docs/index.rst | 11 +++++++++-- setup.py | 9 +++------ 2 files changed, 12 insertions(+), 8 deletions(-) diff --git a/docs/index.rst b/docs/index.rst index f24fcbd65..8bd8c160f 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -37,9 +37,16 @@ for example: How does PyAutoFit Work? ======================== +To illustrate the **PyAutoFit** API, we'll use an illustrative toy model of fitting a one-dimensional Gaussian to +noisy 1D data. Here's the ``data`` (black) and the model (red) we'll fit: + +.. image:: https://raw.githubusercontent.com/rhayes777/PyAutoFit/master/files/toy_model_fit.png + :width: 400 + Model components are written as Python classes, allowing **PyAutoFit** to define the *model* and -associated *parameters* in an expressive way that is tied to the modeling software's API. Here is a simple example of -how a *model* representing a 1D Gaussian is written: +associated *parameters* in an expressive way that is tied to the modeling software's API. + +Here is a simple example of how a *model* representing a 1D Gaussian is written: .. code-block:: python diff --git a/setup.py b/setup.py index 1d591850a..25e1731d5 100644 --- a/setup.py +++ b/setup.py @@ -43,15 +43,12 @@ def config_packages(directory): "License :: OSI Approved :: MIT License", "Natural Language :: English", "Operating System :: OS Independent", - "Programming Language :: Python :: 3", - "Programming Language :: Python :: 3.2", - "Programming Language :: Python :: 3.3", - "Programming Language :: Python :: 3.4", - "Programming Language :: Python :: 3.5", - "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", + "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", ], + python_requires='>=3.7', keywords="cli", packages=find_packages(exclude=["docs", "test_autofit", "test_autofit*"]) + config_packages('autofit/config'), install_requires=requirements, From 16bac1b879fac109f586e890526b7c02d6076a92 Mon Sep 17 00:00:00 2001 From: Richard Date: Mon, 19 Dec 2022 14:09:13 +0000 Subject: [PATCH 108/226] include 0 dimension models when using has --- autofit/mapper/model.py | 118 +++++++++---------------- autofit/mapper/prior_model/abstract.py | 17 +++- test_autofit/mapper/model/test_has.py | 9 ++ 3 files changed, 63 insertions(+), 81 deletions(-) diff --git a/autofit/mapper/model.py b/autofit/mapper/model.py index 76020283a..1ed0d8b8d 100644 --- a/autofit/mapper/model.py +++ b/autofit/mapper/model.py @@ -6,9 +6,7 @@ from autofit.mapper.model_object import ModelObject from autofit.mapper.prior_model.recursion import DynamicRecursionCache -logger = logging.getLogger( - __name__ -) +logger = logging.getLogger(__name__) def frozen_cache(func): @@ -32,17 +30,11 @@ def frozen_cache(func): @wraps(func) def cache(self, *args, **kwargs): if hasattr(self, "_is_frozen") and self._is_frozen: - key = (func.__name__, self, *args,) + tuple( - kwargs.items() - ) + key = (func.__name__, self, *args,) + tuple(kwargs.items()) if key not in self._frozen_cache: - self._frozen_cache[ - key - ] = func(self, *args, **kwargs) - return self._frozen_cache[ - key - ] + self._frozen_cache[key] = func(self, *args, **kwargs) + return self._frozen_cache[key] return func(self, *args, **kwargs) return cache @@ -68,16 +60,14 @@ def assert_not_frozen(func): @wraps(func) def wrapper(self, *args, **kwargs): - string_args = list(filter( - lambda arg: isinstance(arg, str), - args - )) - if "_is_frozen" not in string_args and "_frozen_cache" not in string_args and hasattr( - self, "_is_frozen" - ) and self._is_frozen: - raise AssertionError( - "Frozen models cannot be modified" - ) + string_args = list(filter(lambda arg: isinstance(arg, str), args)) + if ( + "_is_frozen" not in string_args + and "_frozen_cache" not in string_args + and hasattr(self, "_is_frozen") + and self._is_frozen + ): + raise AssertionError("Frozen models cannot be modified") return func(self, *args, **kwargs) return wrapper @@ -91,10 +81,7 @@ def __init__(self, label=None): def __getstate__(self): return { - key: value - for key, value - in self.__dict__.items() - if key != "_frozen_cache" + key: value for key, value in self.__dict__.items() if key != "_frozen_cache" } def __setstate__(self, state): @@ -109,9 +96,7 @@ def freeze(self): and does not allow its state to be modified. """ logger.debug("Freezing model") - tuples = self.direct_tuples_with_type( - AbstractModel - ) + tuples = self.direct_tuples_with_type(AbstractModel) for _, model in tuples: if model is not self: model.freeze() @@ -124,9 +109,7 @@ def unfreeze(self): """ logger.debug("Thawing model") self._is_frozen = False - tuples = self.direct_tuples_with_type( - AbstractModel - ) + tuples = self.direct_tuples_with_type(AbstractModel) for _, model in tuples: if model is not self: model.unfreeze() @@ -155,7 +138,7 @@ def copy(self): return copy.deepcopy(self) def object_for_path( - self, path: Iterable[Union[str, int, type]] + self, path: Iterable[Union[str, int, type]] ) -> Union[object, List]: """ Get the object at a given path. @@ -199,10 +182,10 @@ def object_for_path( @frozen_cache def path_instance_tuples_for_class( - self, - cls: Union[Tuple, Type], - ignore_class: bool = None, - ignore_children: bool = True + self, + cls: Union[Tuple, Type], + ignore_class: bool = None, + ignore_children: bool = True, ): """ Tuples containing the path tuple and instance for every instance of the class @@ -223,23 +206,22 @@ def path_instance_tuples_for_class( Tuples containing the path to and instance of objects of the given type. """ return path_instances_of_class( - self, - cls, - ignore_class=ignore_class, - ignore_children=ignore_children + self, cls, ignore_class=ignore_class, ignore_children=ignore_children ) @frozen_cache def direct_tuples_with_type(self, class_type): return list( filter( - lambda t: t[0] != "id" and not t[0].startswith("_") and isinstance(t[1], class_type), + lambda t: t[0] != "id" + and not t[0].startswith("_") + and isinstance(t[1], class_type), self.__dict__.items(), ) ) @frozen_cache - def model_tuples_with_type(self, cls): + def model_tuples_with_type(self, cls, include_zero_dimension=False): """ All models of the class in this model which have at least one free parameter, recursively. @@ -248,30 +230,25 @@ def model_tuples_with_type(self, cls): ---------- cls The type of the model + include_zero_dimension + If true, include models with 0 free parameters Returns ------- Models with free parameters """ from .prior_model.prior_model import PriorModel + return [ (path, model) - for path, model - in self.attribute_tuples_with_type( - PriorModel - ) - if issubclass( - model.cls, - cls - ) and model.prior_count > 0 + for path, model in self.attribute_tuples_with_type(PriorModel) + if issubclass(model.cls, cls) + and (include_zero_dimension or model.prior_count > 0) ] @frozen_cache def attribute_tuples_with_type( - self, - class_type, - ignore_class=None, - ignore_children=True + self, class_type, ignore_class=None, ignore_children=True ) -> List[tuple]: """ Tuples describing the name and instance for attributes in the model @@ -293,19 +270,17 @@ def attribute_tuples_with_type( return [ (path[-1] if len(path) > 0 else "", value) for path, value in self.path_instance_tuples_for_class( - class_type, - ignore_class=ignore_class, - ignore_children=ignore_children + class_type, ignore_class=ignore_class, ignore_children=ignore_children ) ] @DynamicRecursionCache() def path_instances_of_class( - obj, - cls: type, - ignore_class: Optional[Union[type, Tuple[type]]] = None, - ignore_children: bool = False + obj, + cls: type, + ignore_class: Optional[Union[type, Tuple[type]]] = None, + ignore_children: bool = False, ): """ Recursively search the object for instances of a given class @@ -345,10 +320,7 @@ def path_instances_of_class( if key.startswith("_"): continue for item in path_instances_of_class( - value, - cls, - ignore_class=ignore_class, - ignore_children=ignore_children + value, cls, ignore_class=ignore_class, ignore_children=ignore_children ): if isinstance(value, AnnotationPriorModel): path = (key,) @@ -383,9 +355,7 @@ def __getitem__(self, item): if isinstance(item, int): return list(self.values())[item] if isinstance(item, slice): - return ModelInstance( - list(self.values())[item] - ) + return ModelInstance(list(self.values())[item]) return self.__dict__[item] def __setitem__(self, key, value): @@ -402,14 +372,8 @@ def dict(self): return { key: value for key, value in self.__dict__.items() - if key not in ( - "id", - "component_number", - "item_number" - ) and not ( - isinstance(key, str) - and key.startswith("_") - ) + if key not in ("id", "component_number", "item_number") + and not (isinstance(key, str) and key.startswith("_")) } def values(self): diff --git a/autofit/mapper/prior_model/abstract.py b/autofit/mapper/prior_model/abstract.py index e628e847d..bdacf42fe 100644 --- a/autofit/mapper/prior_model/abstract.py +++ b/autofit/mapper/prior_model/abstract.py @@ -730,9 +730,11 @@ def has(self, cls: Union[Type, Tuple[Type, ...]]) -> bool: Returns ------- True iff this model contains an instance or model with the given - type recursively. + type recursively. Includes models which have zero priors. """ - return self.has_instance(cls) or self.has_model(cls) + return self.has_instance(cls) or self.has_model( + cls, include_zero_dimension=True + ) def has_instance(self, cls) -> bool: """ @@ -741,12 +743,19 @@ def has_instance(self, cls) -> bool: """ return len(self.attribute_tuples_with_type(cls)) > 0 - def has_model(self, cls) -> bool: + def has_model(self, cls, include_zero_dimension=False) -> bool: """ True iff this model contains a PriorModel of type cls, recursively. """ - return len(self.model_tuples_with_type(cls)) > 0 + return ( + len( + self.model_tuples_with_type( + cls, include_zero_dimension=include_zero_dimension, + ) + ) + > 0 + ) def is_only_model(self, cls) -> bool: """ diff --git a/test_autofit/mapper/model/test_has.py b/test_autofit/mapper/model/test_has.py index 8e481fd24..b7a986176 100644 --- a/test_autofit/mapper/model/test_has.py +++ b/test_autofit/mapper/model/test_has.py @@ -29,3 +29,12 @@ def test_collection_of_collection(collection): collection = af.Collection(collection=collection) assert collection.has_instance(af.Prior) assert collection.has_model(af.Gaussian) + + +def test_has_0_dimension(): + collection = af.Collection( + gaussian=af.Model(af.Gaussian, centre=0.0, normalization=0.1, sigma=0.01,) + ) + + assert not collection.has_model(af.Gaussian) + assert collection.has(af.Gaussian) From f89bcb07a5f5b1dabb26792b5c9d40991d00f80d Mon Sep 17 00:00:00 2001 From: Jammy2211 Date: Mon, 19 Dec 2022 14:41:38 +0000 Subject: [PATCH 109/226] fix identifer test --- autofit/__init__.py | 8 +- autofit/database/model/model.py | 16 ++-- autofit/database/model/prior.py | 16 ++-- autofit/graphical/declarative/abstract.py | 6 +- autofit/graphical/declarative/collection.py | 6 +- .../declarative/factor/hierarchical.py | 8 +- autofit/graphical/declarative/factor/prior.py | 6 +- autofit/graphical/declarative/result.py | 6 +- autofit/graphical/mean_field.py | 8 +- autofit/mapper/__init__.py | 4 +- autofit/mapper/model.py | 8 +- autofit/mapper/model_mapper.py | 8 +- autofit/mapper/model_object.py | 16 ++-- autofit/mapper/prior_model/abstract.py | 44 ++++----- autofit/mapper/prior_model/annotation.py | 4 +- autofit/mapper/prior_model/collection.py | 8 +- autofit/mapper/prior_model/prior_model.py | 18 ++-- autofit/non_linear/analysis/free_parameter.py | 4 +- autofit/non_linear/analysis/model_analysis.py | 6 +- docs/overview/model_complex.rst | 2 +- .../database/identifier/test_converter.py | 6 +- .../database/identifier/test_identifiers.py | 48 +++++----- .../pickles/model.pickle | Bin 743 -> 728 bytes .../pickles/samples.pickle | Bin 867672 -> 867657 bytes test_autofit/database/test_serialize.py | 12 +-- .../functionality/test_model_info.py | 2 +- test_autofit/graphical/gaussian/conftest.py | 2 +- test_autofit/graphical/gaussian/model.py | 2 +- .../graphical/gaussian/test_declarative.py | 6 +- .../graphical/gaussian/test_other_priors.py | 4 +- .../graphical/hierarchical/test_embedded.py | 4 +- .../graphical/regression/test_identifier.py | 5 +- test_autofit/mapper/model/test_freeze.py | 6 +- .../mapper/model/test_model_instance.py | 2 +- .../mapper/model/test_model_mapper.py | 56 +++++------ test_autofit/mapper/model/test_overloading.py | 4 +- test_autofit/mapper/model/test_prior_model.py | 88 +++++++++--------- test_autofit/mapper/model/test_regression.py | 2 +- test_autofit/mapper/prior/test_prior.py | 4 +- test_autofit/mapper/test_abstract.py | 4 +- test_autofit/mapper/test_from_data_names.py | 4 +- test_autofit/mapper/test_has.py | 26 +++--- test_autofit/mapper/test_parameterization.py | 8 +- test_autofit/mapper/test_take_attributes.py | 32 +++---- .../grid/test_sensitivity/conftest.py | 8 +- .../non_linear/test_abstract_search.py | 4 +- test_autofit/non_linear/test_initializer.py | 8 +- test_autofit/test_equality.py | 10 +- 48 files changed, 280 insertions(+), 279 deletions(-) diff --git a/autofit/__init__.py b/autofit/__init__.py index eaffa431c..34e58aa0d 100644 --- a/autofit/__init__.py +++ b/autofit/__init__.py @@ -55,10 +55,10 @@ from .mapper.prior_model.attribute_pair import InstanceNameValue from .mapper.prior_model.attribute_pair import PriorNameValue from .mapper.prior_model.attribute_pair import cast_collection -from .mapper.prior_model.collection import CollectionPriorModel -from .mapper.prior_model.collection import CollectionPriorModel as Collection -from .mapper.prior_model.prior_model import PriorModel -from .mapper.prior_model.prior_model import PriorModel as Model +from .mapper.prior_model.collection import Collection +from .mapper.prior_model.collection import Collection +from .mapper.prior_model.prior_model import Model +from .mapper.prior_model.prior_model import Model from .mapper.prior_model.util import PriorModelNameValue from .non_linear.abstract_search import NonLinearSearch from .non_linear.abstract_search import PriorPasser diff --git a/autofit/database/model/model.py b/autofit/database/model/model.py index 196d87ff7..b3d8dbf40 100644 --- a/autofit/database/model/model.py +++ b/autofit/database/model/model.py @@ -95,9 +95,9 @@ def from_object( An instance of a concrete child of this class """ - from autofit.mapper.prior_model.prior_model import PriorModel + from autofit.mapper.prior_model.prior_model import Model from autofit.mapper.prior.abstract import Prior - from autofit.mapper.prior_model.collection import CollectionPriorModel + from autofit.mapper.prior_model.collection import Collection if source is None or isinstance( source, @@ -110,9 +110,9 @@ def from_object( ): from .instance import NoneInstance instance = NoneInstance() - elif isinstance(source, PriorModel): - from .prior import PriorModel - instance = PriorModel._from_object( + elif isinstance(source, Model): + from .prior import Model + instance = Model._from_object( source ) elif isinstance(source, Prior): @@ -130,9 +130,9 @@ def from_object( instance = Collection._from_object( source ) - elif isinstance(source, (CollectionPriorModel, dict)): - from .prior import CollectionPriorModel - instance = CollectionPriorModel._from_object( + elif isinstance(source, (Collection, dict)): + from .prior import Collection + instance = Collection._from_object( source ) elif isinstance(source, str): diff --git a/autofit/database/model/prior.py b/autofit/database/model/prior.py index 1e975c90e..17394e749 100644 --- a/autofit/database/model/prior.py +++ b/autofit/database/model/prior.py @@ -9,7 +9,7 @@ from .model import Object -class CollectionPriorModel(Object): +class Collection(Object): """ A collection """ @@ -32,7 +32,7 @@ class CollectionPriorModel(Object): def _from_object( cls, source: Union[ - collection.CollectionPriorModel, + collection.Collection, list, dict ] @@ -40,19 +40,19 @@ def _from_object( instance = cls() if not isinstance( source, - collection.CollectionPriorModel + collection.Collection ): - source = collection.CollectionPriorModel( + source = collection.Collection( source ) instance._add_children( source.items() ) - instance.cls = collection.CollectionPriorModel + instance.cls = collection.Collection return instance -class PriorModel(Object): +class Model(Object): """ A prior model """ @@ -74,7 +74,7 @@ class PriorModel(Object): @classmethod def _from_object( cls, - model: prior_model.PriorModel, + model: prior_model.Model, ): instance = cls() instance.cls = model.cls @@ -82,7 +82,7 @@ def _from_object( return instance def _make_instance(self): - instance = object.__new__(prior_model.PriorModel) + instance = object.__new__(prior_model.Model) instance.cls = self.cls instance._assertions = [] return instance diff --git a/autofit/graphical/declarative/abstract.py b/autofit/graphical/declarative/abstract.py index e16440714..ccb8056cb 100644 --- a/autofit/graphical/declarative/abstract.py +++ b/autofit/graphical/declarative/abstract.py @@ -9,7 +9,7 @@ from autofit.graphical.expectation_propagation import EPMeanField, EPOptimiser from autofit.mapper.model import ModelInstance from autofit.mapper.prior.abstract import Prior -from autofit.mapper.prior_model.collection import CollectionPriorModel +from autofit.mapper.prior_model.collection import Collection from autofit.mapper.variable import Plate from autofit.messages.normal import NormalMessage from autofit.non_linear.analysis import Analysis @@ -255,14 +255,14 @@ def visualize( ) @property - def global_prior_model(self) -> CollectionPriorModel: + def global_prior_model(self) -> Collection: """ A collection of prior models, with one model for each factor. """ return GlobalPriorModel(self) -class GlobalPriorModel(CollectionPriorModel): +class GlobalPriorModel(Collection): def __init__( self, factor: AbstractDeclarativeFactor diff --git a/autofit/graphical/declarative/collection.py b/autofit/graphical/declarative/collection.py index b1c3d2fe5..9bad8fb29 100644 --- a/autofit/graphical/declarative/collection.py +++ b/autofit/graphical/declarative/collection.py @@ -36,11 +36,11 @@ def __init__( @property def prior_model(self): """ - Construct a CollectionPriorModel comprising the prior models described + Construct a Collection comprising the prior models described in each model factor """ - from autofit.mapper.prior_model.collection import CollectionPriorModel - return CollectionPriorModel({ + from autofit.mapper.prior_model.collection import Collection + return Collection({ factor.name: factor.prior_model for factor in self.model_factors diff --git a/autofit/graphical/declarative/factor/hierarchical.py b/autofit/graphical/declarative/factor/hierarchical.py index d13e0be5e..7d524ecca 100644 --- a/autofit/graphical/declarative/factor/hierarchical.py +++ b/autofit/graphical/declarative/factor/hierarchical.py @@ -2,8 +2,8 @@ from autofit.mapper.model import ModelInstance from autofit.mapper.prior.abstract import Prior -from autofit.mapper.prior_model.collection import CollectionPriorModel -from autofit.mapper.prior_model.prior_model import PriorModel +from autofit.mapper.prior_model.collection import Collection +from autofit.mapper.prior_model.prior_model import Model from autofit.mapper.variable import Plate from autofit.messages import NormalMessage from autofit.non_linear.paths.abstract import AbstractPaths @@ -11,7 +11,7 @@ from .abstract import AbstractModelFactor -class HierarchicalFactor(PriorModel): +class HierarchicalFactor(Model): _plates: Tuple[Plate, ...] = () def __init__( @@ -165,7 +165,7 @@ def __init__( prior_variable_dict["argument"] = drawn_prior super().__init__( - prior_model=CollectionPriorModel( + prior_model=Collection( distribution_model=distribution_model, drawn_prior=drawn_prior ), factor=Factor(distribution_model), diff --git a/autofit/graphical/declarative/factor/prior.py b/autofit/graphical/declarative/factor/prior.py index 01b266a11..093219466 100644 --- a/autofit/graphical/declarative/factor/prior.py +++ b/autofit/graphical/declarative/factor/prior.py @@ -1,6 +1,6 @@ from autofit.graphical.factor_graphs.factor import FactorKW from autofit.mapper.prior.abstract import Prior -from autofit.mapper.prior_model.collection import CollectionPriorModel +from autofit.mapper.prior_model.collection import Collection from autofit.non_linear.analysis import Analysis from autofit.tools.namer import namer @@ -34,11 +34,11 @@ def _unique_representation(self): ) @property - def prior_model(self) -> CollectionPriorModel: + def prior_model(self) -> Collection: """ A trivial prior model to conform to the expected interface. """ - return CollectionPriorModel(self.prior) + return Collection(self.prior) @property def analysis(self) -> "PriorFactor": diff --git a/autofit/graphical/declarative/result.py b/autofit/graphical/declarative/result.py index 198b68703..ca567d196 100644 --- a/autofit/graphical/declarative/result.py +++ b/autofit/graphical/declarative/result.py @@ -5,7 +5,7 @@ from autofit.graphical.expectation_propagation.ep_mean_field import EPMeanField from autofit.graphical.factor_graphs.factor import Factor from autofit.mapper.prior_model.abstract import AbstractPriorModel -from autofit.mapper.prior_model.collection import CollectionPriorModel +from autofit.mapper.prior_model.collection import Collection from autofit.non_linear.result import Result, AbstractResult from autofit.non_linear.samples.samples import Samples @@ -91,13 +91,13 @@ def __init__( self.updated_ep_mean_field = updated_ep_mean_field @property - def model(self) -> CollectionPriorModel: + def model(self) -> Collection: """ A collection populated with messages representing the posteriors of the EP Optimisation. Each item in the collection represents a single factor in the optimisation. """ - collection = CollectionPriorModel({ + collection = Collection({ factor.name: factor.prior_model for factor in self.declarative_factor.model_factors diff --git a/autofit/graphical/mean_field.py b/autofit/graphical/mean_field.py index e694ce6d6..b42200c07 100755 --- a/autofit/graphical/mean_field.py +++ b/autofit/graphical/mean_field.py @@ -18,7 +18,7 @@ LogWarnings, ) from autofit.mapper.prior.abstract import Prior -from autofit.mapper.prior_model.collection import CollectionPriorModel +from autofit.mapper.prior_model.collection import Collection from autofit.mapper.variable import ( Variable, Plate, @@ -45,7 +45,7 @@ def is_message(message): # Does this need to be a Factor? -class MeanField(CollectionPriorModel, Dict[Variable, AbstractMessage], Factor): +class MeanField(Collection, Dict[Variable, AbstractMessage], Factor): """For a factor with multiple variables, this class represents the the mean field approximation to that factor, @@ -80,7 +80,7 @@ def __init__( ): dict.__init__(self, dists) Factor.__init__(self, self._logpdf, *self, arg_names=[]) - CollectionPriorModel.__init__(self) + Collection.__init__(self) if isinstance(dists, MeanField): self.log_norm = dists.log_norm @@ -255,7 +255,7 @@ def precision(self, variables=None): @property def arguments(self) -> Dict[Variable, Prior]: """ - Arguments that can be used to update a PriorModel + Arguments that can be used to update a Model """ return {v: dist for v, dist in self.items()} diff --git a/autofit/mapper/__init__.py b/autofit/mapper/__init__.py index 15fff51dd..15319e48a 100644 --- a/autofit/mapper/__init__.py +++ b/autofit/mapper/__init__.py @@ -1,3 +1,3 @@ from .prior.abstract import Prior -from .prior_model.collection import CollectionPriorModel -from .prior_model.prior_model import PriorModel +from .prior_model.collection import Collection +from .prior_model.prior_model import Model diff --git a/autofit/mapper/model.py b/autofit/mapper/model.py index 1ed0d8b8d..867c07664 100644 --- a/autofit/mapper/model.py +++ b/autofit/mapper/model.py @@ -164,7 +164,7 @@ def object_for_path( if isinstance(name, int): instance = instance[name] elif isinstance(name, type): - from autofit.mapper.prior_model.prior_model import PriorModel + from autofit.mapper.prior_model.prior_model import Model instances = [ instance @@ -172,7 +172,7 @@ def object_for_path( ] instances += [ instance - for _, instance in self.path_instance_tuples_for_class(PriorModel) + for _, instance in self.path_instance_tuples_for_class(Model) if issubclass(instance.cls, name) ] instance = ModelInstance(instances) @@ -237,11 +237,11 @@ def model_tuples_with_type(self, cls, include_zero_dimension=False): ------- Models with free parameters """ - from .prior_model.prior_model import PriorModel + from .prior_model.prior_model import Model return [ (path, model) - for path, model in self.attribute_tuples_with_type(PriorModel) + for path, model in self.attribute_tuples_with_type(Model) if issubclass(model.cls, cls) and (include_zero_dimension or model.prior_count > 0) ] diff --git a/autofit/mapper/model_mapper.py b/autofit/mapper/model_mapper.py index 708a81431..a35a06c5a 100644 --- a/autofit/mapper/model_mapper.py +++ b/autofit/mapper/model_mapper.py @@ -1,11 +1,11 @@ import os -from autofit.mapper.prior_model.collection import CollectionPriorModel +from autofit.mapper.prior_model.collection import Collection path = os.path.dirname(os.path.realpath(__file__)) -class ModelMapper(CollectionPriorModel): +class ModelMapper(Collection): """ A mapper of priors formed by passing in classes to be reconstructed @@ -28,7 +28,7 @@ class ModelMapper(CollectionPriorModel): mapper.gaussian = al.lp.Gaussian mapper.any_class = SomeClass - A `PriorModel` instance is created each time we add a class to the mapper. We + A `Model` instance is created each time we add a class to the mapper. We can access those models using # the mapper attributes: sersic_model = mapper.sersic @@ -71,7 +71,7 @@ def prior_prior_model_dict(self): Returns ------- - prior_prior_model_dict: {Prior: PriorModel} + prior_prior_model_dict: {Prior: Model} A dictionary mapping priors to associated prior models. Each prior will only have one prior model; if a prior is shared by two prior models then one of those prior models will be in this dictionary. diff --git a/autofit/mapper/model_object.py b/autofit/mapper/model_object.py index 76d629ed7..19b01e116 100644 --- a/autofit/mapper/model_object.py +++ b/autofit/mapper/model_object.py @@ -103,8 +103,8 @@ def from_dict(d): An instance """ from autofit.mapper.prior_model.abstract import AbstractPriorModel - from autofit.mapper.prior_model.collection import CollectionPriorModel - from autofit.mapper.prior_model.prior_model import PriorModel + from autofit.mapper.prior_model.collection import Collection + from autofit.mapper.prior_model.prior_model import Model from autofit.mapper.prior.abstract import Prior from autofit.mapper.prior.tuple_prior import TuplePrior @@ -114,9 +114,9 @@ def from_dict(d): type_ = d["type"] if type_ == "model": - instance = PriorModel(get_class(d.pop("class_path"))) + instance = Model(get_class(d.pop("class_path"))) elif type_ == "collection": - instance = CollectionPriorModel() + instance = Collection() elif type_ == "instance": cls = get_class(d.pop("class_path")) instance = object.__new__(cls) @@ -136,15 +136,15 @@ def dict(self) -> dict: A dictionary representation of this object """ from autofit.mapper.prior_model.abstract import AbstractPriorModel - from autofit.mapper.prior_model.collection import CollectionPriorModel - from autofit.mapper.prior_model.prior_model import PriorModel + from autofit.mapper.prior_model.collection import Collection + from autofit.mapper.prior_model.prior_model import Model from autofit.mapper.prior.tuple_prior import TuplePrior - if isinstance(self, CollectionPriorModel): + if isinstance(self, Collection): type_ = "collection" elif isinstance(self, AbstractPriorModel) and self.prior_count == 0: type_ = "instance" - elif isinstance(self, PriorModel): + elif isinstance(self, Model): type_ = "model" elif isinstance(self, TuplePrior): type_ = "tuple_prior" diff --git a/autofit/mapper/prior_model/abstract.py b/autofit/mapper/prior_model/abstract.py index bdacf42fe..14ea28aec 100644 --- a/autofit/mapper/prior_model/abstract.py +++ b/autofit/mapper/prior_model/abstract.py @@ -210,11 +210,11 @@ def cast( A model where specified child models have been updated to a new class and new arguments """ - from .prior_model import PriorModel + from .prior_model import Model updated = self - for path, prior_model in self.path_instance_tuples_for_class(PriorModel): + for path, prior_model in self.path_instance_tuples_for_class(Model): try: model_value_dict = value_dict[prior_model] argument_dict = { @@ -223,7 +223,7 @@ def cast( **model_value_dict, } updated = updated.replacing_for_path( - path, PriorModel(new_class, **argument_dict) + path, Model(new_class, **argument_dict) ) except KeyError: @@ -364,7 +364,7 @@ def from_json(cls, file: str): Returns ------- object - The model, which may be a `Collection` of `PriorModel` objects or a single `PriorModel`. + The model, which may be a `Collection` of `Model` objects or a single `Model`. """ with open(file) as json_file: @@ -400,14 +400,14 @@ def name(self): @staticmethod def from_object(t, *args, **kwargs): if inspect.isclass(t): - from .prior_model import PriorModel + from .prior_model import Model - obj = object.__new__(PriorModel) + obj = object.__new__(Model) obj.__init__(t, **kwargs) elif isinstance(t, list) or isinstance(t, dict): from autofit.mapper.prior_model import collection - obj = object.__new__(collection.CollectionPriorModel) + obj = object.__new__(collection.Collection) obj.__init__(t) else: obj = t @@ -445,10 +445,10 @@ def assert_no_assertions(obj): item = copy.copy(source) if isinstance(item, dict): from autofit.mapper.prior_model.collection import ( - CollectionPriorModel, + Collection, ) - item = CollectionPriorModel(item) + item = Collection(item) for attribute in path: item = copy.copy(getattr(item, attribute)) @@ -464,7 +464,7 @@ def assert_no_assertions(obj): def instance_from_unit_vector(self, unit_vector, ignore_prior_limits=False): """ Returns a ModelInstance, which has an attribute and class instance corresponding - to every `PriorModel` attributed to this instance. + to every `Model` attributed to this instance. This method takes as input a unit vector of parameter values, converting each to physical values via their priors. Parameters @@ -687,7 +687,7 @@ def physical_values_from_prior_medians(self): def instance_from_vector(self, vector, ignore_prior_limits=False): """ Returns a ModelInstance, which has an attribute and class instance corresponding - to every `PriorModel` attributed to this instance. + to every `Model` attributed to this instance. This method takes as input a physical vector of parameter values, thus omitting the use of priors. Parameters @@ -745,7 +745,7 @@ def has_instance(self, cls) -> bool: def has_model(self, cls, include_zero_dimension=False) -> bool: """ - True iff this model contains a PriorModel of type + True iff this model contains a Model of type cls, recursively. """ return ( @@ -759,16 +759,16 @@ def has_model(self, cls, include_zero_dimension=False) -> bool: def is_only_model(self, cls) -> bool: """ - True iff this model contains at least one PriorModel + True iff this model contains at least one Model of type cls and contains no PriorModels that are not of type cls, recursively. """ - from .prior_model import PriorModel + from .prior_model import Model cls_models = self.model_tuples_with_type(cls) other_models = [ value - for _, value in self.attribute_tuples_with_type(PriorModel) + for _, value in self.attribute_tuples_with_type(Model) if value.prior_count > 0 ] return len(cls_models) > 0 and len(cls_models) == len(other_models) @@ -1027,7 +1027,7 @@ def from_instance(instance, model_classes=tuple()): if isinstance(instance, (Prior, AbstractPriorModel)): return instance elif isinstance(instance, list): - result = collection.CollectionPriorModel( + result = collection.Collection( [ AbstractPriorModel.from_instance(item, model_classes=model_classes) for item in instance @@ -1046,7 +1046,7 @@ def from_instance(instance, model_classes=tuple()): ), ) elif isinstance(instance, dict): - result = collection.CollectionPriorModel( + result = collection.Collection( { key: AbstractPriorModel.from_instance( value, model_classes=model_classes @@ -1057,10 +1057,10 @@ def from_instance(instance, model_classes=tuple()): elif isinstance(instance, (np.ndarray, types.FunctionType)): return instance else: - from .prior_model import PriorModel + from .prior_model import Model try: - result = PriorModel( + result = Model( instance.__class__, **{ key: AbstractPriorModel.from_instance( @@ -1429,7 +1429,7 @@ def prior_prior_model_dict(self): """ Returns ------- - prior_prior_model_dict: {Prior: PriorModel} + prior_prior_model_dict: {Prior: Model} A dictionary mapping priors to associated prior models. Each prior will only have one prior model; if a prior is shared by two prior models then one of those prior models will be in this dictionary. @@ -1494,7 +1494,7 @@ def parameterization(self) -> str: Describes the path to each of the PriorModels, its class and its number of free parameters """ - from .prior_model import PriorModel + from .prior_model import Model formatter = TextFormatter(line_length=info_whitespace()) @@ -1510,7 +1510,7 @@ def parameterization(self) -> str: n = obj.prior_count else: n = 0 - if isinstance(obj, PriorModel): + if isinstance(obj, Model): name = obj.cls.__name__ else: name = type(obj).__name__ diff --git a/autofit/mapper/prior_model/annotation.py b/autofit/mapper/prior_model/annotation.py index a7be98b50..b2207526e 100644 --- a/autofit/mapper/prior_model/annotation.py +++ b/autofit/mapper/prior_model/annotation.py @@ -1,8 +1,8 @@ from autofit.mapper.prior.arithmetic import ArithmeticMixin -from autofit.mapper.prior_model.prior_model import PriorModel, Prior +from autofit.mapper.prior_model.prior_model import Model, Prior -class AnnotationPriorModel(PriorModel, ArithmeticMixin): +class AnnotationPriorModel(Model, ArithmeticMixin): def __init__(self, cls, parent_class, true_argument_name, **kwargs): self.parent_class = parent_class self.true_argument_name = true_argument_name diff --git a/autofit/mapper/prior_model/collection.py b/autofit/mapper/prior_model/collection.py index a5c68cb09..d0a548f28 100644 --- a/autofit/mapper/prior_model/collection.py +++ b/autofit/mapper/prior_model/collection.py @@ -6,7 +6,7 @@ from autofit.mapper.prior_model.abstract import check_assertions -class CollectionPriorModel(AbstractPriorModel): +class Collection(AbstractPriorModel): def name_for_prior(self, prior: Prior) -> str: """ Construct a name for the prior. This is the path taken @@ -67,7 +67,7 @@ def with_prefix( Filter members of the collection, only returning those that start with a given prefix as a new collection. """ - return CollectionPriorModel({ + return Collection({ key: value for key, value in self.items() @@ -77,7 +77,7 @@ def with_prefix( }) def as_model(self): - return CollectionPriorModel({ + return Collection({ key: value.as_model() if isinstance(value, AbstractPriorModel) else value @@ -236,7 +236,7 @@ def gaussian_prior_model_for_arguments(self, arguments): ------- A new collection """ - collection = CollectionPriorModel() + collection = Collection() for key, value in self.items(): if key in ( diff --git a/autofit/mapper/prior_model/prior_model.py b/autofit/mapper/prior_model/prior_model.py index 47c4afdae..285147818 100644 --- a/autofit/mapper/prior_model/prior_model.py +++ b/autofit/mapper/prior_model/prior_model.py @@ -18,7 +18,7 @@ class_args_dict = dict() -class PriorModel(AbstractPriorModel): +class Model(AbstractPriorModel): """ @DynamicAttrs """ @@ -34,7 +34,7 @@ def __repr__(self): return f"<{self.__class__.__name__} {self}>" def as_model(self): - return PriorModel(self.cls) + return Model(self.cls) def __hash__(self): return self.id @@ -133,15 +133,15 @@ def __init__( keyword_arg = kwargs[arg] if isinstance(keyword_arg, (list, dict)): from autofit.mapper.prior_model.collection import ( - CollectionPriorModel, + Collection, ) - ls = CollectionPriorModel(keyword_arg) + ls = Collection(keyword_arg) setattr(self, arg, ls) else: if inspect.isclass(keyword_arg): - keyword_arg = PriorModel(keyword_arg) + keyword_arg = Model(keyword_arg) setattr(self, arg, keyword_arg) elif arg in defaults and isinstance(defaults[arg], tuple): tuple_prior = TuplePrior() @@ -163,7 +163,7 @@ def __init__( elif hasattr(spec, "__args__") and type(None) in spec.__args__: setattr(self, arg, None) else: - setattr(self, arg, PriorModel(annotations[arg])) + setattr(self, arg, Model(annotations[arg])) else: prior = self.make_prior(arg) if isinstance( @@ -177,7 +177,7 @@ def __init__( for key, value in kwargs.items(): if not hasattr(self, key): setattr( - self, key, PriorModel(value) if inspect.isclass(value) else value + self, key, Model(value) if inspect.isclass(value) else value ) def dict(self): @@ -200,7 +200,7 @@ def constructor_argument_names(self): def __eq__(self, other): return ( - isinstance(other, PriorModel) + isinstance(other, Model) and self.cls == other.cls and self.prior_tuples == other.prior_tuples ) @@ -360,7 +360,7 @@ def _instance_for_arguments(self, arguments: {ModelObject: object}): and not key == "cls" and not key.startswith("_") ): - if isinstance(value, PriorModel): + if isinstance(value, Model): value = value.instance_for_arguments(arguments) elif isinstance(value, Prior): value = arguments[value] diff --git a/autofit/non_linear/analysis/free_parameter.py b/autofit/non_linear/analysis/free_parameter.py index 365122f41..c16959a6b 100644 --- a/autofit/non_linear/analysis/free_parameter.py +++ b/autofit/non_linear/analysis/free_parameter.py @@ -4,7 +4,7 @@ from autofit.mapper.prior.abstract import Prior from autofit.mapper.prior.tuple_prior import TuplePrior from autofit.mapper.prior_model.abstract import AbstractPriorModel -from autofit.mapper.prior_model.collection import CollectionPriorModel +from autofit.mapper.prior_model.collection import Collection from .analysis import Analysis from .indexed import IndexCollectionAnalysis from ..paths.abstract import AbstractPaths @@ -60,7 +60,7 @@ def modify_model(self, model: AbstractPriorModel) -> AbstractPriorModel: A new model with all the same priors except for those associated with free parameters. """ - return CollectionPriorModel( + return Collection( [ analysis.modify_model( model.mapper_from_partial_prior_arguments( diff --git a/autofit/non_linear/analysis/model_analysis.py b/autofit/non_linear/analysis/model_analysis.py index d6b1108b9..118b43e43 100644 --- a/autofit/non_linear/analysis/model_analysis.py +++ b/autofit/non_linear/analysis/model_analysis.py @@ -1,5 +1,5 @@ from autofit.mapper.prior_model.abstract import AbstractPriorModel -from autofit.mapper.prior_model.collection import CollectionPriorModel +from autofit.mapper.prior_model.collection import Collection from .analysis import Analysis from .indexed import IndexCollectionAnalysis @@ -37,7 +37,7 @@ def make_result(self, samples, model, sigma=1.0, use_errors=True, use_widths=Fal class CombinedModelAnalysis(IndexCollectionAnalysis): - def modify_model(self, model: AbstractPriorModel) -> CollectionPriorModel: + def modify_model(self, model: AbstractPriorModel) -> Collection: """ Creates a collection with one model for each analysis. For each ModelAnalysis the model is used; for other analyses the default model is used. @@ -51,7 +51,7 @@ def modify_model(self, model: AbstractPriorModel) -> CollectionPriorModel: ------- A collection of models, one for each analysis. """ - return CollectionPriorModel( + return Collection( [ analysis.modify_model(analysis.analysis.model) if isinstance(analysis.analysis, ModelAnalysis) diff --git a/docs/overview/model_complex.rst b/docs/overview/model_complex.rst index 879e4b259..db6cbd9ea 100644 --- a/docs/overview/model_complex.rst +++ b/docs/overview/model_complex.rst @@ -200,7 +200,7 @@ This gives the following output: Maximum Log Likelihood -38.90532783 Maximum Log Posterior -38.90532783 - model CollectionPriorModel (N=6) + model Collection (N=6) gaussian Gaussian (N=3) exponential Exponential (N=3) diff --git a/test_autofit/database/identifier/test_converter.py b/test_autofit/database/identifier/test_converter.py index 4d4f9e6ce..36106a452 100644 --- a/test_autofit/database/identifier/test_converter.py +++ b/test_autofit/database/identifier/test_converter.py @@ -25,14 +25,14 @@ def test_directory(old_directory_paths): @pytest.fixture(name="old_directory_paths") def make_old_directory_paths(): search = af.DynestyStatic(name="name") - search.paths.model = af.PriorModel(af.Gaussian) + search.paths.model = af.Model(af.Gaussian) return search.paths @output_path_for_test(output_directory) def test_update_identifiers_from_dict(): search = af.DynestyStatic(name="name") - search.paths.model = af.PriorModel(af.Gaussian) + search.paths.model = af.Model(af.Gaussian) old_directory_paths = search.paths initial_length = len(old_directory_paths._identifier.hash_list) @@ -86,7 +86,7 @@ def test_zipped(old_directory_paths): def test_database(session): search = af.DynestyStatic(name="name", session=session) - search.paths.model = af.PriorModel(af.Gaussian) + search.paths.model = af.Model(af.Gaussian) search.paths.save_all( search_config_dict=search.config_dict_search, info={}, pickle_files=[] ) diff --git a/test_autofit/database/identifier/test_identifiers.py b/test_autofit/database/identifier/test_identifiers.py index 6925cab0d..7c983c36e 100644 --- a/test_autofit/database/identifier/test_identifiers.py +++ b/test_autofit/database/identifier/test_identifiers.py @@ -162,46 +162,46 @@ def test_prior(): def test_model(): - identifier = af.PriorModel(af.Gaussian, centre=af.UniformPrior()).identifier - assert identifier == af.PriorModel(af.Gaussian, centre=af.UniformPrior()).identifier + identifier = af.Model(af.Gaussian, centre=af.UniformPrior()).identifier + assert identifier == af.Model(af.Gaussian, centre=af.UniformPrior()).identifier assert ( identifier - != af.PriorModel( + != af.Model( af.Gaussian, centre=af.UniformPrior(upper_limit=0.5) ).identifier ) def test_collection(): - identifier = af.CollectionPriorModel( - gaussian=af.PriorModel(af.Gaussian, centre=af.UniformPrior()) + identifier = af.Collection( + gaussian=af.Model(af.Gaussian, centre=af.UniformPrior()) ).identifier assert ( identifier - == af.CollectionPriorModel( - gaussian=af.PriorModel(af.Gaussian, centre=af.UniformPrior()) + == af.Collection( + gaussian=af.Model(af.Gaussian, centre=af.UniformPrior()) ).identifier ) assert ( identifier - != af.CollectionPriorModel( - gaussian=af.PriorModel(af.Gaussian, centre=af.UniformPrior(upper_limit=0.5)) + != af.Collection( + gaussian=af.Model(af.Gaussian, centre=af.UniformPrior(upper_limit=0.5)) ).identifier ) def test_instance(): - identifier = af.CollectionPriorModel(gaussian=af.Gaussian()).identifier - assert identifier == af.CollectionPriorModel(gaussian=af.Gaussian()).identifier + identifier = af.Collection(gaussian=af.Gaussian()).identifier + assert identifier == af.Collection(gaussian=af.Gaussian()).identifier assert ( identifier - != af.CollectionPriorModel(gaussian=af.Gaussian(centre=0.5)).identifier + != af.Collection(gaussian=af.Gaussian(centre=0.5)).identifier ) def test__identifier_description(): - model = af.CollectionPriorModel( - gaussian=af.PriorModel( + model = af.Collection( + gaussian=af.Model( af.Gaussian, centre=af.UniformPrior(lower_limit=0.0, upper_limit=1.0), normalization=af.LogUniformPrior(lower_limit=0.001, upper_limit=0.01), @@ -217,7 +217,7 @@ def test__identifier_description(): i = 0 - assert description[i] == "CollectionPriorModel" + assert description[i] == "Collection" i += 1 assert description[i] == "item_number" i += 1 @@ -225,7 +225,7 @@ def test__identifier_description(): i += 1 assert description[i] == "gaussian" i += 1 - assert description[i] == "PriorModel" + assert description[i] == "Model" i += 1 assert description[i] == "cls" i += 1 @@ -278,8 +278,8 @@ def test__identifier_description(): def test__identifier_description__after_model_and_instance(): - model = af.CollectionPriorModel( - gaussian=af.PriorModel( + model = af.Collection( + gaussian=af.Model( af.Gaussian, centre=af.UniformPrior(lower_limit=0.0, upper_limit=1.0), normalization=af.LogUniformPrior(lower_limit=0.001, upper_limit=0.01), @@ -308,11 +308,11 @@ def test__identifier_description__after_model_and_instance(): description = identifier.description assert ( description - == """CollectionPriorModel + == """Collection item_number 0 gaussian -PriorModel +Model cls autofit.example.model.Gaussian centre @@ -341,8 +341,8 @@ def test__identifier_description__after_model_and_instance(): def test__identifier_description__after_take_attributes(): - model = af.CollectionPriorModel( - gaussian=af.PriorModel( + model = af.Collection( + gaussian=af.Model( af.Gaussian, centre=af.UniformPrior(lower_limit=0.0, upper_limit=1.0), normalization=af.LogUniformPrior(lower_limit=0.001, upper_limit=0.01), @@ -362,7 +362,7 @@ def test__identifier_description__after_take_attributes(): i = 0 - assert description[i] == "CollectionPriorModel" + assert description[i] == "Collection" i += 1 assert description[i] == "item_number" i += 1 @@ -370,7 +370,7 @@ def test__identifier_description__after_take_attributes(): i += 1 assert description[i] == "gaussian" i += 1 - assert description[i] == "PriorModel" + assert description[i] == "Model" i += 1 assert description[i] == "cls" i += 1 diff --git a/test_autofit/database/mass_sie__source_sersic/phase_mass[sie]_source[bulge]/settings__imaging[grid_sub_2]__lens[pos_off]/dynesty_static[nlive_50__bound_multi_vol_dec_0.5_vol_check_2.0__enlarge_1.0__sample_rwalk_walks_5_facc_0.2]/pickles/model.pickle b/test_autofit/database/mass_sie__source_sersic/phase_mass[sie]_source[bulge]/settings__imaging[grid_sub_2]__lens[pos_off]/dynesty_static[nlive_50__bound_multi_vol_dec_0.5_vol_check_2.0__enlarge_1.0__sample_rwalk_walks_5_facc_0.2]/pickles/model.pickle index 98fb59d2ac11ce8adb2f79d1b842b7f8461cf6ca..a083ca53336ba6ab938a515816fad7507b5b2fac 100644 GIT binary patch delta 13 VcmaFPdV_Vs%!!LVH-7xa2mmk;2MGWG delta 30 jcmcb?`kZybOs;^U%={wX{FKz3i8DP}!Mu%+zA*v-!zv93 diff --git a/test_autofit/database/mass_sie__source_sersic/phase_mass[sie]_source[bulge]/settings__imaging[grid_sub_2]__lens[pos_off]/dynesty_static[nlive_50__bound_multi_vol_dec_0.5_vol_check_2.0__enlarge_1.0__sample_rwalk_walks_5_facc_0.2]/pickles/samples.pickle b/test_autofit/database/mass_sie__source_sersic/phase_mass[sie]_source[bulge]/settings__imaging[grid_sub_2]__lens[pos_off]/dynesty_static[nlive_50__bound_multi_vol_dec_0.5_vol_check_2.0__enlarge_1.0__sample_rwalk_walks_5_facc_0.2]/pickles/samples.pickle index 6d4ad98e3a73380cdd0d616ea85df9893545d186..de4c9be1a92c46aa1d5afadc136d62faac09d7e9 100644 GIT binary patch delta 68 zcmccd%jD!QlLNWrjED|gL delta 85 zcmX^4%jCu{lL np.array: """ - This function takes an instance created by the PriorModel and computes the + This function takes an instance created by the Model and computes the likelihood that it fits the data. """ y_model = instance(self.x) diff --git a/test_autofit/graphical/gaussian/test_declarative.py b/test_autofit/graphical/gaussian/test_declarative.py index be47c0eb4..00a80c92b 100644 --- a/test_autofit/graphical/gaussian/test_declarative.py +++ b/test_autofit/graphical/gaussian/test_declarative.py @@ -27,7 +27,7 @@ def make_factor_model( Note that the normalization prior is shared. """ - prior_model = af.PriorModel( + prior_model = af.Model( Gaussian, centre=af.GaussianPrior(mean=50, sigma=20), normalization=normalization_prior, @@ -122,7 +122,7 @@ def test_gaussian(): x = np.arange(n_observations) y = make_data(Gaussian(centre=50.0, normalization=25.0, sigma=10.0), x) - prior_model = af.PriorModel( + prior_model = af.Model( Gaussian, centre=af.GaussianPrior(mean=50, sigma=20), normalization=af.GaussianPrior(mean=25, sigma=10), @@ -141,7 +141,7 @@ def test_gaussian(): @pytest.fixture(name="prior_model") def make_prior_model(): - return af.PriorModel(Gaussian) + return af.Model(Gaussian) @pytest.fixture(name="likelihood_model") diff --git a/test_autofit/graphical/gaussian/test_other_priors.py b/test_autofit/graphical/gaussian/test_other_priors.py index b636e299c..97978e2f0 100644 --- a/test_autofit/graphical/gaussian/test_other_priors.py +++ b/test_autofit/graphical/gaussian/test_other_priors.py @@ -15,7 +15,7 @@ def make_factor_model( Gaussian(centre=centre, normalization=normalization, sigma=sigma), x ) - prior_model = af.PriorModel( + prior_model = af.Model( Gaussian, centre=af.UniformPrior(lower_limit=10, upper_limit=100), normalization=normalization_prior, @@ -91,7 +91,7 @@ def _test_gaussian(): x = np.arange(n_observations) y = make_data(Gaussian(centre=50.0, normalization=25.0, sigma=10.0), x) - prior_model = af.PriorModel( + prior_model = af.Model( Gaussian, # centre=af.GaussianPrior(mean=50, sigma=10), # normalization=af.GaussianPrior(mean=25, sigma=10), diff --git a/test_autofit/graphical/hierarchical/test_embedded.py b/test_autofit/graphical/hierarchical/test_embedded.py index 904a43ee2..6872c8a06 100644 --- a/test_autofit/graphical/hierarchical/test_embedded.py +++ b/test_autofit/graphical/hierarchical/test_embedded.py @@ -70,7 +70,7 @@ def test_generate_data(data): def test_model_factor(data, centres): y = data[0] centre_argument = af.GaussianPrior(mean=50, sigma=20) - prior_model = af.PriorModel( + prior_model = af.Model( af.Gaussian, centre=centre_argument, normalization=20, sigma=5 ) factor = g.AnalysisFactor(prior_model, analysis=Analysis(x=x, y=y)) @@ -83,7 +83,7 @@ def test_model_factor(data, centres): def test_full_fit(centre_model, data, centres): graph = g.FactorGraphModel() for i, y in enumerate(data): - prior_model = af.PriorModel( + prior_model = af.Model( af.Gaussian, centre=af.GaussianPrior(mean=100, sigma=1), intensity=20, diff --git a/test_autofit/graphical/regression/test_identifier.py b/test_autofit/graphical/regression/test_identifier.py index c6e3ab12d..3f5fedf2b 100644 --- a/test_autofit/graphical/regression/test_identifier.py +++ b/test_autofit/graphical/regression/test_identifier.py @@ -118,14 +118,15 @@ def make_model(): def test_model_identifier( model ): - assert str(model) == "8f342d3d4f2a60639700c295378b8b9f" + + assert str(model) == "9929b2be4248f0d116f5c1c034bda870" def test_model_identifier_fields( model ): assert model.hash_list == [ - 'PriorModel', + 'Model', 'cls', 'autofit.example.model.Gaussian', 'centre', diff --git a/test_autofit/mapper/model/test_freeze.py b/test_autofit/mapper/model/test_freeze.py index 67418a561..e5d288563 100644 --- a/test_autofit/mapper/model/test_freeze.py +++ b/test_autofit/mapper/model/test_freeze.py @@ -7,7 +7,7 @@ name="frozen_model" ) def make_frozen_model(): - model = af.PriorModel( + model = af.Model( af.Gaussian ) @@ -19,8 +19,8 @@ def make_frozen_model(): name="frozen_collection" ) def make_frozen_collection(): - model = af.CollectionPriorModel( - gaussian=af.PriorModel( + model = af.Collection( + gaussian=af.Model( af.Gaussian ) ) diff --git a/test_autofit/mapper/model/test_model_instance.py b/test_autofit/mapper/model/test_model_instance.py index 4988df063..61e1227a9 100644 --- a/test_autofit/mapper/model/test_model_instance.py +++ b/test_autofit/mapper/model/test_model_instance.py @@ -37,7 +37,7 @@ def test_iterable(self, instance): def test_as_model(self, instance): model = instance.as_model() assert isinstance(model, af.ModelMapper) - assert isinstance(model.mock_components_2, af.PriorModel) + assert isinstance(model.mock_components_2, af.Model) assert model.mock_components_2.cls == af.m.MockComponents def test_object_for_path(self, instance, mock_components_1, mock_components_2): diff --git a/test_autofit/mapper/model/test_model_mapper.py b/test_autofit/mapper/model/test_model_mapper.py index e209bd6ee..7fd017c43 100644 --- a/test_autofit/mapper/model/test_model_mapper.py +++ b/test_autofit/mapper/model/test_model_mapper.py @@ -8,12 +8,12 @@ @pytest.fixture(name="initial_model") def make_initial_model(): - return af.PriorModel(af.m.MockClassx2) + return af.Model(af.m.MockClassx2) class TestParamNames: def test_has_prior(self): - prior_model = af.PriorModel(af.m.MockClassx2) + prior_model = af.Model(af.m.MockClassx2) assert "one" == prior_model.name_for_prior(prior_model.one) @@ -78,10 +78,10 @@ def test_parameter_name_list(self): def test_parameter_name_distinction(self): mm = af.ModelMapper() - mm.ls = af.CollectionPriorModel( + mm.ls = af.Collection( [ - af.PriorModel(af.m.MockClassRelativeWidth), - af.PriorModel(af.m.MockClassRelativeWidth), + af.Model(af.m.MockClassRelativeWidth), + af.Model(af.m.MockClassRelativeWidth), ] ) assert mm.model_component_and_parameter_names == [ @@ -147,10 +147,10 @@ def test__parameter_labels_with_superscripts_latex(self): ] def test_name_for_prior(self): - ls = af.CollectionPriorModel( + ls = af.Collection( [ af.m.MockClassRelativeWidth(1, 2, 3), - af.PriorModel(af.m.MockClassRelativeWidth), + af.Model(af.m.MockClassRelativeWidth), ] ) assert ls.name_for_prior(ls[1].one) == "1_one" @@ -204,7 +204,7 @@ def test_attribute(self): mm.cls_1 = af.m.MockClassx2 assert 1 == len(mm.prior_model_tuples) - assert isinstance(mm.cls_1, af.PriorModel) + assert isinstance(mm.cls_1, af.Model) def test__instance_from_unit_vector(self): mapper = af.ModelMapper(mock_cls=af.m.MockClassx2Tuple) @@ -420,7 +420,7 @@ def test__from_prior_medians__one_model__set_one_parameter_to_another(self): def test_log_prior_list_from_vector(self): mapper = af.ModelMapper() - mapper.mock_class = af.PriorModel(af.m.MockClassx2) + mapper.mock_class = af.Model(af.m.MockClassx2) mapper.mock_class.one = af.GaussianPrior(mean=1.0, sigma=2.0) mapper.mock_class.two = af.LogUniformPrior(lower_limit=1e-8, upper_limit=10.0) @@ -430,7 +430,7 @@ def test_log_prior_list_from_vector(self): def test_random_unit_vector_within_limits(self): mapper = af.ModelMapper() - mapper.mock_class = af.PriorModel(af.m.MockClassx2) + mapper.mock_class = af.Model(af.m.MockClassx2) random.seed(1) @@ -446,7 +446,7 @@ def test_random_vector_from_prior_within_limits(self): random.seed(1) mapper = af.ModelMapper() - mapper.mock_class = af.PriorModel(af.m.MockClassx2) + mapper.mock_class = af.Model(af.m.MockClassx2) vector = mapper.random_vector_from_priors_within_limits( lower_limit=0.499999, upper_limit=0.500001 @@ -501,7 +501,7 @@ def test_random_vector_from_prior(self): def test_vector_from_prior_medians(self): mapper = af.ModelMapper() - mapper.mock_class = af.PriorModel(af.m.MockClassx2) + mapper.mock_class = af.Model(af.m.MockClassx2) assert mapper.physical_values_from_prior_medians == [0.5, 1.0] @@ -572,8 +572,8 @@ class TestArguments: def test_same_argument_name(self): mapper = af.ModelMapper() - mapper.one = af.PriorModel(af.m.MockClassx2) - mapper.two = af.PriorModel(af.m.MockClassx2) + mapper.one = af.Model(af.m.MockClassx2) + mapper.two = af.Model(af.m.MockClassx2) instance = mapper.instance_from_vector([0.1, 0.2, 0.3, 0.4]) @@ -585,7 +585,7 @@ def test_same_argument_name(self): class TestIndependentPriorModel: def test_associate_prior_model(self): - prior_model = af.PriorModel(af.m.MockClassx2) + prior_model = af.Model(af.m.MockClassx2) mapper = af.ModelMapper() @@ -601,8 +601,8 @@ def test_associate_prior_model(self): @pytest.fixture(name="list_prior_model") def make_list_prior_model(): - return af.CollectionPriorModel( - [af.PriorModel(af.m.MockClassx2), af.PriorModel(af.m.MockClassx2)] + return af.Collection( + [af.Model(af.m.MockClassx2), af.Model(af.m.MockClassx2)] ) @@ -661,14 +661,14 @@ def test_prior_results_for_gaussian_tuples__include_override_from_width_file( def test_automatic_boxing(self): mapper = af.ModelMapper() - mapper.list = [af.PriorModel(af.m.MockClassx2), af.PriorModel(af.m.MockClassx2)] + mapper.list = [af.Model(af.m.MockClassx2), af.Model(af.m.MockClassx2)] - assert isinstance(mapper.list, af.CollectionPriorModel) + assert isinstance(mapper.list, af.Collection) @pytest.fixture(name="mock_with_instance") def make_mock_with_instance(): - mock_with_instance = af.PriorModel(af.m.MockClassx2) + mock_with_instance = af.Model(af.m.MockClassx2) mock_with_instance.one = 3.0 return mock_with_instance @@ -695,7 +695,7 @@ def test_instance_prior_reconstruction(self, mock_with_instance): def test__instance_in_config(self): mapper = af.ModelMapper() - mock_with_instance = af.PriorModel(af.m.MockClassx2Instance, one=3) + mock_with_instance = af.Model(af.m.MockClassx2Instance, one=3) mapper.mock_class = mock_with_instance @@ -705,23 +705,23 @@ def test__instance_in_config(self): assert instance.mock_class.two == 0.5 def test__set_float(self): - prior_model = af.PriorModel(af.m.MockClassx2) + prior_model = af.Model(af.m.MockClassx2) prior_model.one = 3 prior_model.two = 4.0 assert prior_model.one == 3 assert prior_model.two == 4.0 def test__list_prior_model_instances(self, mapper): - prior_model = af.PriorModel(af.m.MockClassx2) + prior_model = af.Model(af.m.MockClassx2) prior_model.one = 3.0 prior_model.two = 4.0 mapper.mock_list = [prior_model] - assert isinstance(mapper.mock_list, af.CollectionPriorModel) + assert isinstance(mapper.mock_list, af.Collection) assert len(mapper.instance_tuples) == 2 def test__set_for_tuple_prior(self): - prior_model = af.PriorModel(af.m.MockChildTuplex3) + prior_model = af.Model(af.m.MockChildTuplex3) prior_model.tup_0 = 1.0 prior_model.tup_1 = 2.0 prior_model.one = 1.0 @@ -739,14 +739,14 @@ def make_mock_config(): @pytest.fixture(name="mapper_with_one") def make_mapper_with_one(): mapper = af.ModelMapper() - mapper.one = af.PriorModel(af.m.MockClassx2) + mapper.one = af.Model(af.m.MockClassx2) return mapper @pytest.fixture(name="mapper_with_list") def make_mapper_with_list(): mapper = af.ModelMapper() - mapper.list = [af.PriorModel(af.m.MockClassx2), af.PriorModel(af.m.MockClassx2)] + mapper.list = [af.Model(af.m.MockClassx2), af.Model(af.m.MockClassx2)] return mapper @@ -781,7 +781,7 @@ def test_prior_classes_list(self, mapper_with_list): def test_no_override(self): mapper = af.ModelMapper() - mapper.one = af.PriorModel(af.m.MockClassx2) + mapper.one = af.Model(af.m.MockClassx2) af.ModelMapper() diff --git a/test_autofit/mapper/model/test_overloading.py b/test_autofit/mapper/model/test_overloading.py index 030b07976..31af52da6 100644 --- a/test_autofit/mapper/model/test_overloading.py +++ b/test_autofit/mapper/model/test_overloading.py @@ -1,7 +1,7 @@ import autofit as af def test_constructor(): - prior_model = af.PriorModel(af.m.MockOverload) + prior_model = af.Model(af.m.MockOverload) assert prior_model.prior_count == 1 @@ -12,7 +12,7 @@ def test_constructor(): def test_alternative(): - prior_model = af.PriorModel(af.m.MockOverload.with_two) + prior_model = af.Model(af.m.MockOverload.with_two) assert prior_model.prior_count == 1 diff --git a/test_autofit/mapper/model/test_prior_model.py b/test_autofit/mapper/model/test_prior_model.py index dcec72b4d..874b36d69 100644 --- a/test_autofit/mapper/model/test_prior_model.py +++ b/test_autofit/mapper/model/test_prior_model.py @@ -110,7 +110,7 @@ def test_instance(self, instance_prior_model): def test_complex(self, complex_prior_model): assert complex_prior_model.cls == af.m.MockComplexClass assert complex_prior_model.prior_count == 0 - assert isinstance(complex_prior_model.simple, af.PriorModel) + assert isinstance(complex_prior_model.simple, af.Model) assert complex_prior_model.simple.cls == af.m.MockClassx2 assert complex_prior_model.simple.one == 1.0 @@ -120,58 +120,58 @@ def test_complex(self, complex_prior_model): assert new_instance.simple.one == 1.0 def test_list(self, list_prior_model): - assert isinstance(list_prior_model, af.CollectionPriorModel) - assert isinstance(list_prior_model[0], af.PriorModel) + assert isinstance(list_prior_model, af.Collection) + assert isinstance(list_prior_model[0], af.Model) assert list_prior_model[0].one == 1.0 def test_dict(self): instance = {"simple": af.m.MockClassx2(1.0, 2.0)} prior_model = af.AbstractPriorModel.from_instance(instance) - assert isinstance(prior_model, af.CollectionPriorModel) - assert isinstance(prior_model.simple, af.PriorModel) + assert isinstance(prior_model, af.Collection) + assert isinstance(prior_model.simple, af.Model) assert prior_model.simple.one == 1.0 new_instance = prior_model.instance_for_arguments({}) assert isinstance(new_instance.simple, af.m.MockClassx2) prior_model = af.AbstractPriorModel.from_instance(new_instance) - assert isinstance(prior_model, af.CollectionPriorModel) - assert isinstance(prior_model.simple, af.PriorModel) + assert isinstance(prior_model, af.Collection) + assert isinstance(prior_model.simple, af.Model) assert prior_model.simple.one == 1.0 class TestSum: def test_add_prior_models(self): - mock_cls_0 = af.PriorModel(af.m.MockChildTuplex2) - mock_cls_1 = af.PriorModel(af.m.MockChildTuplex2) + mock_cls_0 = af.Model(af.m.MockChildTuplex2) + mock_cls_1 = af.Model(af.m.MockChildTuplex2) mock_cls_0.one = 1.0 mock_cls_1.two = 0.0 result = mock_cls_0 + mock_cls_1 - assert isinstance(result, af.PriorModel) + assert isinstance(result, af.Model) assert result.cls == af.m.MockChildTuplex2 assert isinstance(result.one, af.Prior) assert isinstance(result.two, af.Prior) def test_fail_for_mismatch(self): - mock_cls_0 = af.PriorModel(af.m.MockChildTuplex2) - mock_cls_1 = af.PriorModel(af.m.MockChildTuplex3) + mock_cls_0 = af.Model(af.m.MockChildTuplex2) + mock_cls_1 = af.Model(af.m.MockChildTuplex3) with pytest.raises(TypeError): mock_cls_0 + mock_cls_1 def test_add_children(self): - mock_components_1 = af.PriorModel( + mock_components_1 = af.Model( af.m.MockComponents, - components_0=af.CollectionPriorModel(mock_cls_0=af.m.MockChildTuplex2), - components_1=af.CollectionPriorModel(mock_cls_2=af.m.MockChildTuplex3), + components_0=af.Collection(mock_cls_0=af.m.MockChildTuplex2), + components_1=af.Collection(mock_cls_2=af.m.MockChildTuplex3), ) - mock_components_2 = af.PriorModel( + mock_components_2 = af.Model( af.m.MockComponents, - components_0=af.CollectionPriorModel(mock_cls_1=af.m.MockChildTuplex2), - components_1=af.CollectionPriorModel(mock_cls_3=af.m.MockChildTuplex3), + components_0=af.Collection(mock_cls_1=af.m.MockChildTuplex2), + components_1=af.Collection(mock_cls_3=af.m.MockChildTuplex3), ) result = mock_components_1 + mock_components_2 @@ -191,15 +191,15 @@ def test_add_children(self): ) def test_prior_model_override(self): - mock_components_1 = af.PriorModel( + mock_components_1 = af.Model( af.m.MockComponents, - components_0=af.CollectionPriorModel(light=af.m.MockChildTuplex2()), - components_1=af.CollectionPriorModel(mass=af.m.MockChildTuplex3), + components_0=af.Collection(light=af.m.MockChildTuplex2()), + components_1=af.Collection(mass=af.m.MockChildTuplex3), ) - mock_components_2 = af.PriorModel( + mock_components_2 = af.Model( af.m.MockComponents, - components_0=af.CollectionPriorModel(light=af.m.MockChildTuplex2), - components_1=af.CollectionPriorModel(mass=af.m.MockChildTuplex3()), + components_0=af.Collection(light=af.m.MockChildTuplex2), + components_1=af.Collection(mass=af.m.MockChildTuplex3()), ) result = mock_components_1 + mock_components_2 @@ -234,7 +234,7 @@ def test_prior_linking(self): class TestHashing: def test_is_hashable(self): assert hash(af.AbstractPriorModel()) is not None - assert hash(af.PriorModel(af.m.MockClassx2)) is not None + assert hash(af.Model(af.m.MockClassx2)) is not None assert ( hash(af.AnnotationPriorModel(af.m.MockClassx2, af.m.MockClassx2, "one")) is not None @@ -248,7 +248,7 @@ def __init__(self, value="a string"): class TestStringArguments: def test_string_default(self): - prior_model = af.PriorModel(StringDefault) + prior_model = af.Model(StringDefault) assert prior_model.prior_count == 0 assert prior_model.instance_for_arguments({}).value == "a string" @@ -256,15 +256,15 @@ def test_string_default(self): class TestPriorModelArguments: def test_list_arguments(self): - prior_model = af.PriorModel(af.m.MockListClass) + prior_model = af.Model(af.m.MockListClass) assert prior_model.prior_count == 0 - prior_model = af.PriorModel(af.m.MockListClass, ls=[af.m.MockClassx2]) + prior_model = af.Model(af.m.MockListClass, ls=[af.m.MockClassx2]) assert prior_model.prior_count == 2 - prior_model = af.PriorModel( + prior_model = af.Model( af.m.MockListClass, ls=[af.m.MockClassx2, af.m.MockClassx2] ) @@ -272,12 +272,12 @@ def test_list_arguments(self): def test_float_argument(self): prior = af.UniformPrior(0.5, 2.0) - prior_model = af.PriorModel(af.m.MockComponents, parameter=prior) + prior_model = af.Model(af.m.MockComponents, parameter=prior) assert prior_model.prior_count == 1 assert prior_model.priors[0] is prior - prior_model = af.PriorModel(af.m.MockComponents, parameter=4.0) + prior_model = af.Model(af.m.MockComponents, parameter=4.0) assert prior_model.prior_count == 0 assert prior_model.parameter == 4.0 @@ -285,7 +285,7 @@ def test_float_argument(self): assert instance.parameter == 4.0 def test_arbitrary_keyword_arguments(self): - prior_model = af.PriorModel( + prior_model = af.Model( af.m.MockComponents, mock_cls_0=af.m.MockChildTuplex2, mock_cls_1=af.m.MockChildTuplex3, @@ -300,7 +300,7 @@ def test_arbitrary_keyword_arguments(self): class TestCase: def test_complex_class(self): - prior_model = af.PriorModel(af.m.MockComplexClass) + prior_model = af.Model(af.m.MockComplexClass) assert hasattr(prior_model, "simple") assert prior_model.simple.prior_count == 2 @@ -317,7 +317,7 @@ def test_create_instance(self): def test_instantiate_with_list_arguments(self): mapper = af.ModelMapper() - mapper.list_object = af.PriorModel( + mapper.list_object = af.Model( af.m.MockListClass, ls=[af.m.MockClassx2, af.m.MockClassx2] ) @@ -336,7 +336,7 @@ def test_instantiate_with_list_arguments(self): def test_mix_instances_and_models(self): mapper = af.ModelMapper() - mapper.list_object = af.PriorModel( + mapper.list_object = af.Model( af.m.MockListClass, ls=[af.m.MockClassx2, af.m.MockClassx2(1, 2)] ) @@ -353,7 +353,7 @@ def test_mix_instances_and_models(self): class TestCollectionPriorModel: def test_keyword_arguments(self): - prior_model = af.CollectionPriorModel( + prior_model = af.Collection( one=af.m.MockClassx2, two=af.m.MockClassx2(1, 2) ) @@ -371,7 +371,7 @@ def test_keyword_arguments(self): assert instance.two.two == 2 def test_mix_instances_in_grouped_list_prior_model(self): - prior_model = af.CollectionPriorModel( + prior_model = af.Collection( [af.m.MockClassx2, af.m.MockClassx2(1, 2)] ) @@ -393,13 +393,13 @@ def test_mix_instances_in_grouped_list_prior_model(self): assert len(prior_model.prior_class_dict) == 2 def test_list_in_grouped_list_prior_model(self): - prior_model = af.CollectionPriorModel([[af.m.MockClassx2]]) + prior_model = af.Collection([[af.m.MockClassx2]]) assert len(prior_model.direct_prior_model_tuples) == 1 assert prior_model.prior_count == 2 def test_list_prior_model_with_dictionary(self, simple_model): - assert isinstance(simple_model.simple, af.PriorModel) + assert isinstance(simple_model.simple, af.Model) def test_override_with_instance(self, simple_model): simple_instance = af.m.MockClassx2(1, 2) @@ -410,13 +410,13 @@ def test_override_with_instance(self, simple_model): assert simple_model.simple == simple_instance def test_names_of_priors(self): - collection = af.CollectionPriorModel([af.UniformPrior(), af.UniformPrior()]) + collection = af.Collection([af.UniformPrior(), af.UniformPrior()]) assert collection.name_for_prior(collection[0]) == "0" @pytest.fixture(name="simple_model") def make_simple_model(): - return af.CollectionPriorModel({"simple": af.m.MockClassx2}) + return af.Collection({"simple": af.m.MockClassx2}) class TestCopy: @@ -425,13 +425,13 @@ def test_simple(self, simple_model): assert copy.deepcopy(simple_model).prior_count == simple_model.prior_count def test_embedded(self, simple_model): - model = af.CollectionPriorModel(simple=simple_model) + model = af.Collection(simple=simple_model) assert copy.deepcopy(model).prior_count == model.prior_count def test_circular(self): - one = af.PriorModel(af.m.MockClassx2) + one = af.Model(af.m.MockClassx2) - one.one = af.PriorModel(af.m.MockClassx2) + one.one = af.Model(af.m.MockClassx2) one.one.one = one # noinspection PyUnresolvedReferences diff --git a/test_autofit/mapper/model/test_regression.py b/test_autofit/mapper/model/test_regression.py index 86ebd7107..4ee4eb1d2 100644 --- a/test_autofit/mapper/model/test_regression.py +++ b/test_autofit/mapper/model/test_regression.py @@ -38,7 +38,7 @@ def test_direct_instances_only(): def test_function_from_instance(): assert ( - af.PriorModel.from_instance(test_function_from_instance) + af.Model.from_instance(test_function_from_instance) is test_function_from_instance ) diff --git a/test_autofit/mapper/prior/test_prior.py b/test_autofit/mapper/prior/test_prior.py index b13f39f5e..614554003 100644 --- a/test_autofit/mapper/prior/test_prior.py +++ b/test_autofit/mapper/prior/test_prior.py @@ -215,8 +215,8 @@ def test_instance_plus_instance(self): def test_mapper_plus_mapper(self): one = af.ModelMapper() two = af.ModelMapper() - one.a = af.PriorModel(af.m.MockClassx2) - two.b = af.PriorModel(af.m.MockClassx2) + one.a = af.Model(af.m.MockClassx2) + two.b = af.Model(af.m.MockClassx2) three = one + two diff --git a/test_autofit/mapper/test_abstract.py b/test_autofit/mapper/test_abstract.py index 7755bebe4..9735a29e5 100644 --- a/test_autofit/mapper/test_abstract.py +++ b/test_autofit/mapper/test_abstract.py @@ -5,7 +5,7 @@ def test_transfer_tuples(): model = af.ModelMapper() instance = af.ModelInstance() - model.profile = af.PriorModel(af.m.MockClassx2Tuple) + model.profile = af.Model(af.m.MockClassx2Tuple) assert model.prior_count == 2 result = model.copy_with_fixed_priors(instance) @@ -16,7 +16,7 @@ def test_transfer_tuples(): result = model.copy_with_fixed_priors(instance) assert result.prior_count == 0 assert result.profile.one_tuple == (0.0, 0.0) - assert isinstance(result.profile, af.PriorModel) + assert isinstance(result.profile, af.Model) instance = result.instance_from_unit_vector([]) assert result.profile.one_tuple == (0.0, 0.0) diff --git a/test_autofit/mapper/test_from_data_names.py b/test_autofit/mapper/test_from_data_names.py index 120b02d1f..5f5f93769 100644 --- a/test_autofit/mapper/test_from_data_names.py +++ b/test_autofit/mapper/test_from_data_names.py @@ -7,7 +7,7 @@ @pytest.fixture(name="collection") def make_collection(): - return af.Collection({name: af.PriorModel(af.m.MockClassx2) for name in names}) + return af.Collection({name: af.Model(af.m.MockClassx2) for name in names}) def test_prior_count(collection): @@ -20,7 +20,7 @@ def test_children(collection, name): def test_replace(collection): - collection.one = af.PriorModel(af.m.MockClassx4) + collection.one = af.Model(af.m.MockClassx4) assert collection.one.prior_count == 4 assert collection.prior_count == 8 diff --git a/test_autofit/mapper/test_has.py b/test_autofit/mapper/test_has.py index 1d8bfb699..226591572 100644 --- a/test_autofit/mapper/test_has.py +++ b/test_autofit/mapper/test_has.py @@ -5,7 +5,7 @@ class GaussianChild(af.Gaussian): def test_inheritance(): - collection = af.CollectionPriorModel( + collection = af.Collection( first=af.Model( GaussianChild ), @@ -33,7 +33,7 @@ def test_embedded(): def test_no_free_parameters(): - collection = af.CollectionPriorModel( + collection = af.Collection( gaussian=af.Model( af.Gaussian, centre=1.0, @@ -48,7 +48,7 @@ def test_no_free_parameters(): def test_instance(): - collection = af.CollectionPriorModel( + collection = af.Collection( gaussian=af.Gaussian() ) @@ -61,8 +61,8 @@ def test_instance(): def test_model(): - collection = af.CollectionPriorModel( - gaussian=af.PriorModel( + collection = af.Collection( + gaussian=af.Model( af.Gaussian ) ) @@ -76,8 +76,8 @@ def test_model(): def test_both(): - collection = af.CollectionPriorModel( - gaussian=af.PriorModel( + collection = af.Collection( + gaussian=af.Model( af.Gaussian ), gaussian_2=af.Gaussian() @@ -92,8 +92,8 @@ def test_both(): def test_embedded(): - collection = af.CollectionPriorModel( - gaussian=af.PriorModel( + collection = af.Collection( + gaussian=af.Model( af.Gaussian, centre=af.Gaussian() ), @@ -108,11 +108,11 @@ def test_embedded(): def test_is_only_model(): - collection = af.CollectionPriorModel( - gaussian=af.PriorModel( + collection = af.Collection( + gaussian=af.Model( af.Gaussian ), - gaussian_2=af.PriorModel( + gaussian_2=af.Model( af.Gaussian ) ) @@ -121,7 +121,7 @@ def test_is_only_model(): af.Gaussian ) is True - collection.other = af.PriorModel( + collection.other = af.Model( af.m.MockClassx2 ) diff --git a/test_autofit/mapper/test_parameterization.py b/test_autofit/mapper/test_parameterization.py index 948a9d539..081aca0d2 100644 --- a/test_autofit/mapper/test_parameterization.py +++ b/test_autofit/mapper/test_parameterization.py @@ -17,8 +17,8 @@ def test_parameterization(): parameterization = model.parameterization assert parameterization == ( - """model CollectionPriorModel (N=3) - collection CollectionPriorModel (N=3) + """model Collection (N=3) + collection Collection (N=3) gaussian Gaussian (N=3)""" ) @@ -36,8 +36,8 @@ def test_instance(): parameterization = model.parameterization assert parameterization == ( - """model CollectionPriorModel (N=0) - collection CollectionPriorModel (N=0) + """model Collection (N=0) + collection Collection (N=0) gaussian Gaussian (N=0)""" ) diff --git a/test_autofit/mapper/test_take_attributes.py b/test_autofit/mapper/test_take_attributes.py index e6ef762f5..156df8c4b 100644 --- a/test_autofit/mapper/test_take_attributes.py +++ b/test_autofit/mapper/test_take_attributes.py @@ -7,7 +7,7 @@ name="target_gaussian" ) def make_target_gaussian(): - return af.PriorModel( + return af.Model( af.Gaussian ) @@ -23,7 +23,7 @@ def make_prior(): name="source_gaussian" ) def make_source_gaussian(prior): - return af.PriorModel( + return af.Model( af.Gaussian, centre=prior ) @@ -81,10 +81,10 @@ def test_in_collection( target_gaussian, prior ): - target = af.CollectionPriorModel( + target = af.Collection( gaussian=target_gaussian ) - source = af.CollectionPriorModel( + source = af.Collection( gaussian=source_gaussian ) target.take_attributes( @@ -143,10 +143,10 @@ def test_tuple_in_collection( ): source_gaussian.centre = (prior, 1.0) - source = af.CollectionPriorModel( + source = af.Collection( gaussian=source_gaussian ) - target = af.CollectionPriorModel( + target = af.Collection( gaussian=target_gaussian ) @@ -163,10 +163,10 @@ def test_tuple_in_instance_in_collection( centre=(prior, 1.0) ) - source = af.CollectionPriorModel( + source = af.Collection( gaussian=source_gaussian ) - target = af.CollectionPriorModel( + target = af.Collection( gaussian=target_gaussian ) @@ -182,7 +182,7 @@ def test_source_is_dict( source = dict( gaussian=source_gaussian ) - target = af.CollectionPriorModel( + target = af.Collection( gaussian=target_gaussian ) target.take_attributes(source) @@ -195,12 +195,12 @@ def test_target_is_dict( target_gaussian, prior ): - source = af.CollectionPriorModel( - collection=af.CollectionPriorModel( + source = af.Collection( + collection=af.Collection( gaussian=source_gaussian ) ) - target = af.CollectionPriorModel( + target = af.Collection( collection=dict( gaussian=target_gaussian ) @@ -217,7 +217,7 @@ def test_missing_from_source( target_gaussian.centre = prior target_gaussian.take_attributes( - af.CollectionPriorModel() + af.Collection() ) assert target_gaussian.centre == prior @@ -227,10 +227,10 @@ def test_unlabelled_in_collection( target_gaussian, prior ): - target = af.CollectionPriorModel( + target = af.Collection( [target_gaussian] ) - source = af.CollectionPriorModel( + source = af.Collection( [source_gaussian] ) target.take_attributes( @@ -256,7 +256,7 @@ def test_missing_from_origin( target_gaussian ): target_gaussian.take_attributes( - af.CollectionPriorModel() + af.Collection() ) diff --git a/test_autofit/non_linear/grid/test_sensitivity/conftest.py b/test_autofit/non_linear/grid/test_sensitivity/conftest.py index b49fffb4c..783ef8f27 100644 --- a/test_autofit/non_linear/grid/test_sensitivity/conftest.py +++ b/test_autofit/non_linear/grid/test_sensitivity/conftest.py @@ -28,7 +28,7 @@ def log_likelihood_function(self, instance): name="perturbation_model" ) def make_perturbation_model(): - return af.PriorModel(af.Gaussian) + return af.Model(af.Gaussian) @pytest.fixture( @@ -51,7 +51,7 @@ def make_sensitivity( return s.Sensitivity( simulation_instance=instance, base_model=af.Collection( - gaussian=af.PriorModel(af.Gaussian) + gaussian=af.Model(af.Gaussian) ), perturbation_model=perturbation_model, simulate_function=image_function, @@ -84,9 +84,9 @@ def make_job( # noinspection PyTypeChecker return s.Job( model=af.Collection( - gaussian=af.PriorModel(af.Gaussian) + gaussian=af.Model(af.Gaussian) ), - perturbation_model=af.PriorModel(af.Gaussian), + perturbation_model=af.Model(af.Gaussian), base_instance=base_instance, perturbation_instance=instance, analysis_factory=MockAnalysisFactory(Analysis(image)), diff --git a/test_autofit/non_linear/test_abstract_search.py b/test_autofit/non_linear/test_abstract_search.py index 5c21575d5..6f2ec7175 100644 --- a/test_autofit/non_linear/test_abstract_search.py +++ b/test_autofit/non_linear/test_abstract_search.py @@ -16,7 +16,7 @@ def make_mapper(): @pytest.fixture(name="mock_list") def make_mock_list(): - return [af.PriorModel(af.m.MockClassx4), af.PriorModel(af.m.MockClassx4)] + return [af.Model(af.m.MockClassx4), af.Model(af.m.MockClassx4)] @pytest.fixture(name="result") @@ -72,7 +72,7 @@ def test_raises(self, result): class TestLabels: def test_param_names(self): - model = af.PriorModel(af.m.MockClassx4) + model = af.Model(af.m.MockClassx4) assert [ "one", "two", diff --git a/test_autofit/non_linear/test_initializer.py b/test_autofit/non_linear/test_initializer.py index 25540b9b2..413ed6ef4 100644 --- a/test_autofit/non_linear/test_initializer.py +++ b/test_autofit/non_linear/test_initializer.py @@ -21,7 +21,7 @@ def figure_of_merit_from(self, parameter_list): class TestInitializePrior: def test__samples_from_model__sample_via_priors(self): - model = af.PriorModel(af.m.MockClassx4) + model = af.Model(af.m.MockClassx4) model.one = af.UniformPrior(lower_limit=0.099, upper_limit=0.101) model.two = af.UniformPrior(lower_limit=0.199, upper_limit=0.201) model.three = af.UniformPrior(lower_limit=0.299, upper_limit=0.301) @@ -54,7 +54,7 @@ def test__samples_from_model__sample_via_priors(self): assert figure_of_merit_list == [1.0, 2.0] def test__samples_from_model__raise_exception_if_all_likelihoods_identical(self): - model = af.PriorModel(af.m.MockClassx4) + model = af.Model(af.m.MockClassx4) initializer = af.InitializerPrior() @@ -68,7 +68,7 @@ def test__samples_in_test_mode(self): os.environ["PYAUTOFIT_TEST_MODE"] = "1" - model = af.PriorModel(af.m.MockClassx4) + model = af.Model(af.m.MockClassx4) model.one = af.UniformPrior(lower_limit=0.099, upper_limit=0.101) model.two = af.UniformPrior(lower_limit=0.199, upper_limit=0.201) model.three = af.UniformPrior(lower_limit=0.299, upper_limit=0.301) @@ -104,7 +104,7 @@ def test__samples_in_test_mode(self): class TestInitializeBall: def test__ball__samples_sample_centre_of_priors(self): - model = af.PriorModel(af.m.MockClassx4) + model = af.Model(af.m.MockClassx4) model.one = af.UniformPrior(lower_limit=0.0, upper_limit=1.0) model.two = af.UniformPrior(lower_limit=0.0, upper_limit=2.0) model.three = af.UniformPrior(lower_limit=0.0, upper_limit=3.0) diff --git a/test_autofit/test_equality.py b/test_autofit/test_equality.py index 45ecfd309..c0c8d4a95 100644 --- a/test_autofit/test_equality.py +++ b/test_autofit/test_equality.py @@ -7,7 +7,7 @@ @pytest.fixture(name="prior_model") def make_prior_model(): - return af.PriorModel(af.m.MockClassx2Tuple) + return af.Model(af.m.MockClassx2Tuple) class TestCase: @@ -20,7 +20,7 @@ def test_prior_model(self, prior_model): assert prior_model != prior_model_copy def test_list_prior_model(self, prior_model): - list_prior_model = af.CollectionPriorModel([prior_model]) + list_prior_model = af.Collection([prior_model]) list_prior_model_copy = deepcopy(list_prior_model) assert list_prior_model == list_prior_model_copy @@ -40,10 +40,10 @@ def test_model_mapper(self, prior_model): assert model_mapper != model_mapper_copy def test_non_trivial_equality(self): - mock_components = af.PriorModel( + mock_components = af.Model( af.m.MockComponents, - components_0=af.CollectionPriorModel(mock_cls_0=af.m.MockChildTuplex2), - components_1=af.CollectionPriorModel( + components_0=af.Collection(mock_cls_0=af.m.MockChildTuplex2), + components_1=af.Collection( mock_cls_2=af.m.MockChildTuplex3 ), ) From b5c806e94e82716485e1310d9695a42214707d80 Mon Sep 17 00:00:00 2001 From: Jammy2211 Date: Mon, 19 Dec 2022 14:44:05 +0000 Subject: [PATCH 110/226] remove double import --- autofit/__init__.py | 1 - test_autofit/graphical/regression/test_identifier.py | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/autofit/__init__.py b/autofit/__init__.py index 34e58aa0d..a7ef52b2c 100644 --- a/autofit/__init__.py +++ b/autofit/__init__.py @@ -56,7 +56,6 @@ from .mapper.prior_model.attribute_pair import PriorNameValue from .mapper.prior_model.attribute_pair import cast_collection from .mapper.prior_model.collection import Collection -from .mapper.prior_model.collection import Collection from .mapper.prior_model.prior_model import Model from .mapper.prior_model.prior_model import Model from .mapper.prior_model.util import PriorModelNameValue diff --git a/test_autofit/graphical/regression/test_identifier.py b/test_autofit/graphical/regression/test_identifier.py index 3f5fedf2b..5a2a0132b 100644 --- a/test_autofit/graphical/regression/test_identifier.py +++ b/test_autofit/graphical/regression/test_identifier.py @@ -118,7 +118,7 @@ def make_model(): def test_model_identifier( model ): - + assert str(model) == "9929b2be4248f0d116f5c1c034bda870" From c00ba54b9840ed8b926f88b850254212759e4ef9 Mon Sep 17 00:00:00 2001 From: Richard Date: Mon, 19 Dec 2022 15:04:14 +0000 Subject: [PATCH 111/226] zip analysis with model to ensure model not collection passed to modify methods for combined analyses --- autofit/non_linear/abstract_search.py | 2 +- autofit/non_linear/analysis/combined.py | 14 +++++++++----- autofit/non_linear/analysis/free_parameter.py | 13 +++++++++---- test_autofit/analysis/test_regression.py | 4 ++-- 4 files changed, 21 insertions(+), 12 deletions(-) diff --git a/autofit/non_linear/abstract_search.py b/autofit/non_linear/abstract_search.py index ac3a7cb7a..cb11c901f 100644 --- a/autofit/non_linear/abstract_search.py +++ b/autofit/non_linear/abstract_search.py @@ -498,7 +498,7 @@ class represented by model M and gives a score for their fitness. self.paths.unique_tag = self.unique_tag self.paths.restore() - analysis = analysis.modify_before_fit(paths=self.paths, model=model) + analysis = analysis.modify_before_fit(paths=self.paths, model=model,) if not self.paths.is_complete or self.force_pickle_overwrite: self.logger.info("Saving path info") diff --git a/autofit/non_linear/analysis/combined.py b/autofit/non_linear/analysis/combined.py index dcdff72d9..fc0abe18f 100644 --- a/autofit/non_linear/analysis/combined.py +++ b/autofit/non_linear/analysis/combined.py @@ -5,6 +5,7 @@ from autofit.mapper.prior.abstract import Prior from autofit.mapper.prior.tuple_prior import TuplePrior from autofit.mapper.prior_model.abstract import AbstractPriorModel +from autofit.mapper.prior_model.collection import CollectionPriorModel from autofit.non_linear.analysis.multiprocessing import AnalysisPool from autofit.non_linear.paths.abstract import AbstractPaths from autofit.non_linear.result import Result @@ -45,7 +46,7 @@ def __init__(self, *analyses: Analysis): def __getitem__(self, item): return self.analyses[item] - def modify_before_fit(self, paths: AbstractPaths, model: AbstractPriorModel): + def modify_before_fit(self, paths: AbstractPaths, model: CollectionPriorModel): """ Modify the analysis before fitting. @@ -57,11 +58,14 @@ def modify_before_fit(self, paths: AbstractPaths, model: AbstractPriorModel): The model which is to be fitted. """ return CombinedAnalysis( - *(analysis.modify_before_fit(paths, model) for analysis in self.analyses) + *( + analysis.modify_before_fit(paths, model_) + for analysis, model_ in zip(self.analyses, model) + ) ) def modify_after_fit( - self, paths: AbstractPaths, model: AbstractPriorModel, result: Result + self, paths: AbstractPaths, model: CollectionPriorModel, result: Result ): """ Modify the analysis after fitting. @@ -77,8 +81,8 @@ def modify_after_fit( """ return CombinedAnalysis( *( - analysis.modify_after_fit(paths, model, result) - for analysis in self.analyses + analysis.modify_after_fit(paths, model_, result) + for analysis, model_ in zip(self.analyses, model) ) ) diff --git a/autofit/non_linear/analysis/free_parameter.py b/autofit/non_linear/analysis/free_parameter.py index 365122f41..5b2039645 100644 --- a/autofit/non_linear/analysis/free_parameter.py +++ b/autofit/non_linear/analysis/free_parameter.py @@ -74,7 +74,7 @@ def modify_model(self, model: AbstractPriorModel) -> AbstractPriorModel: ] ) - def modify_before_fit(self, paths: AbstractPaths, model: AbstractPriorModel): + def modify_before_fit(self, paths: AbstractPaths, model: CollectionPriorModel): """ Modify the analysis before fitting. @@ -86,11 +86,16 @@ def modify_before_fit(self, paths: AbstractPaths, model: AbstractPriorModel): The model which is to be fitted. """ return FreeParameterAnalysis( - *(analysis.modify_before_fit(paths, model) for analysis in self.analyses), + *( + analysis.modify_before_fit(paths, model_) + for analysis, model_ in zip(self.analyses, model) + ), free_parameters=tuple(self.free_parameters), ) - def modify_after_fit(self, paths: AbstractPaths, model: AbstractPriorModel, result): + def modify_after_fit( + self, paths: AbstractPaths, model: CollectionPriorModel, result + ): """ Modify the analysis after fitting. @@ -106,7 +111,7 @@ def modify_after_fit(self, paths: AbstractPaths, model: AbstractPriorModel, resu return FreeParameterAnalysis( *( analysis.modify_after_fit(paths, model, result) - for analysis in self.analyses + for analysis, model_ in zip(self.analyses, model) ), free_parameters=tuple(self.free_parameters), ) diff --git a/test_autofit/analysis/test_regression.py b/test_autofit/analysis/test_regression.py index 6c8296632..501dc5a56 100644 --- a/test_autofit/analysis/test_regression.py +++ b/test_autofit/analysis/test_regression.py @@ -52,12 +52,12 @@ def make_combined_analysis(): def test_combined_before_fit(combined_analysis): - combined_analysis = combined_analysis.modify_before_fit(None, None) + combined_analysis = combined_analysis.modify_before_fit(None, [None]) assert combined_analysis[0].is_modified_before def test_combined_after_fit(combined_analysis): - combined_analysis = combined_analysis.modify_after_fit(None, None, None) + combined_analysis = combined_analysis.modify_after_fit(None, [None], None) assert combined_analysis[0].is_modified_after From 37d26ee5ec0443ff5ee388be84c9870a456b0980 Mon Sep 17 00:00:00 2001 From: Richard Date: Mon, 19 Dec 2022 15:13:56 +0000 Subject: [PATCH 112/226] indexed, not all combined, analyses require model-analysis zipping --- autofit/non_linear/analysis/combined.py | 14 ++-- autofit/non_linear/analysis/indexed.py | 100 +++++++++++++----------- 2 files changed, 59 insertions(+), 55 deletions(-) diff --git a/autofit/non_linear/analysis/combined.py b/autofit/non_linear/analysis/combined.py index fc0abe18f..dcdff72d9 100644 --- a/autofit/non_linear/analysis/combined.py +++ b/autofit/non_linear/analysis/combined.py @@ -5,7 +5,6 @@ from autofit.mapper.prior.abstract import Prior from autofit.mapper.prior.tuple_prior import TuplePrior from autofit.mapper.prior_model.abstract import AbstractPriorModel -from autofit.mapper.prior_model.collection import CollectionPriorModel from autofit.non_linear.analysis.multiprocessing import AnalysisPool from autofit.non_linear.paths.abstract import AbstractPaths from autofit.non_linear.result import Result @@ -46,7 +45,7 @@ def __init__(self, *analyses: Analysis): def __getitem__(self, item): return self.analyses[item] - def modify_before_fit(self, paths: AbstractPaths, model: CollectionPriorModel): + def modify_before_fit(self, paths: AbstractPaths, model: AbstractPriorModel): """ Modify the analysis before fitting. @@ -58,14 +57,11 @@ def modify_before_fit(self, paths: AbstractPaths, model: CollectionPriorModel): The model which is to be fitted. """ return CombinedAnalysis( - *( - analysis.modify_before_fit(paths, model_) - for analysis, model_ in zip(self.analyses, model) - ) + *(analysis.modify_before_fit(paths, model) for analysis in self.analyses) ) def modify_after_fit( - self, paths: AbstractPaths, model: CollectionPriorModel, result: Result + self, paths: AbstractPaths, model: AbstractPriorModel, result: Result ): """ Modify the analysis after fitting. @@ -81,8 +77,8 @@ def modify_after_fit( """ return CombinedAnalysis( *( - analysis.modify_after_fit(paths, model_, result) - for analysis, model_ in zip(self.analyses, model) + analysis.modify_after_fit(paths, model, result) + for analysis in self.analyses ) ) diff --git a/autofit/non_linear/analysis/indexed.py b/autofit/non_linear/analysis/indexed.py index 6b63f2930..822e398b2 100644 --- a/autofit/non_linear/analysis/indexed.py +++ b/autofit/non_linear/analysis/indexed.py @@ -4,9 +4,9 @@ from .combined import CombinedAnalysis from ..paths.abstract import AbstractPaths -logger = logging.getLogger( - __name__ -) +from autofit.mapper.prior_model.collection import CollectionPriorModel + +logger = logging.getLogger(__name__) class IndexedAnalysis: @@ -31,40 +31,20 @@ def log_likelihood_function(self, instance): """ Compute the log likelihood by taking the instance at the index """ - return self.analysis.log_likelihood_function( - instance[self.index] - ) + return self.analysis.log_likelihood_function(instance[self.index]) def visualize(self, paths: AbstractPaths, instance, during_analysis): - return self.analysis.visualize( - paths, instance[self.index], during_analysis - ) + return self.analysis.visualize(paths, instance[self.index], during_analysis) def profile_log_likelihood_function(self, paths: AbstractPaths, instance): - return self.profile_log_likelihood_function( - paths, instance[self.index] - ) + return self.profile_log_likelihood_function(paths, instance[self.index]) def __getattr__(self, item): - return getattr( - self.analysis, - item - ) + return getattr(self.analysis, item) - def make_result( - self, - samples, - model, - sigma=3.0, - use_errors=True, - use_widths=True - ): + def make_result(self, samples, model, sigma=3.0, use_errors=True, use_widths=True): return self.analysis.make_result( - samples, - model, - sigma=sigma, - use_errors=use_errors, - use_widths=use_widths, + samples, model, sigma=sigma, use_errors=use_errors, use_widths=use_widths, ) @@ -79,23 +59,14 @@ def __init__(self, *analyses): analyses A list of analyses each with a separate model """ - super().__init__(*[ - IndexedAnalysis( - analysis, - index, - ) - for index, analysis - in enumerate(analyses) - ]) - - def make_result( - self, - samples, - model, - sigma=1.0, - use_errors=True, - use_widths=False - ): + super().__init__( + *[ + IndexedAnalysis(analysis, index,) + for index, analysis in enumerate(analyses) + ] + ) + + def make_result(self, samples, model, sigma=1.0, use_errors=True, use_widths=False): """ Associate each model with an analysis when creating the result. """ @@ -118,3 +89,40 @@ def make_result( ) result.child_results = child_results return result + + def modify_before_fit(self, paths: AbstractPaths, model: CollectionPriorModel): + """ + Modify the analysis before fitting. + + Parameters + ---------- + paths + An object describing the paths for saving data (e.g. hard-disk directories or entries in sqlite database). + model + The model which is to be fitted. + """ + return CombinedAnalysis( + *(analysis.modify_before_fit(paths, model) for analysis in self.analyses) + ) + + def modify_after_fit( + self, paths: AbstractPaths, model: CollectionPriorModel, result + ): + """ + Modify the analysis after fitting. + + Parameters + ---------- + paths + An object describing the paths for saving data (e.g. hard-disk directories or entries in sqlite database). + model + The model which is to be fitted. + result + The result of the fit. + """ + return CombinedAnalysis( + *( + analysis.modify_after_fit(paths, model, result) + for analysis in self.analyses + ) + ) From 89e74a556c63c06c5e74ff14b938cb65115c8538 Mon Sep 17 00:00:00 2001 From: Richard Date: Mon, 19 Dec 2022 15:16:41 +0000 Subject: [PATCH 113/226] CollectionPriorModel -> Collection --- autofit/non_linear/analysis/free_parameter.py | 6 ++---- autofit/non_linear/analysis/indexed.py | 8 +++----- 2 files changed, 5 insertions(+), 9 deletions(-) diff --git a/autofit/non_linear/analysis/free_parameter.py b/autofit/non_linear/analysis/free_parameter.py index 209be11b3..8497f0997 100644 --- a/autofit/non_linear/analysis/free_parameter.py +++ b/autofit/non_linear/analysis/free_parameter.py @@ -74,7 +74,7 @@ def modify_model(self, model: AbstractPriorModel) -> AbstractPriorModel: ] ) - def modify_before_fit(self, paths: AbstractPaths, model: CollectionPriorModel): + def modify_before_fit(self, paths: AbstractPaths, model: Collection): """ Modify the analysis before fitting. @@ -93,9 +93,7 @@ def modify_before_fit(self, paths: AbstractPaths, model: CollectionPriorModel): free_parameters=tuple(self.free_parameters), ) - def modify_after_fit( - self, paths: AbstractPaths, model: CollectionPriorModel, result - ): + def modify_after_fit(self, paths: AbstractPaths, model: Collection, result): """ Modify the analysis after fitting. diff --git a/autofit/non_linear/analysis/indexed.py b/autofit/non_linear/analysis/indexed.py index 822e398b2..fd56a148f 100644 --- a/autofit/non_linear/analysis/indexed.py +++ b/autofit/non_linear/analysis/indexed.py @@ -4,7 +4,7 @@ from .combined import CombinedAnalysis from ..paths.abstract import AbstractPaths -from autofit.mapper.prior_model.collection import CollectionPriorModel +from autofit.mapper.prior_model.collection import Collection logger = logging.getLogger(__name__) @@ -90,7 +90,7 @@ def make_result(self, samples, model, sigma=1.0, use_errors=True, use_widths=Fal result.child_results = child_results return result - def modify_before_fit(self, paths: AbstractPaths, model: CollectionPriorModel): + def modify_before_fit(self, paths: AbstractPaths, model: Collection): """ Modify the analysis before fitting. @@ -105,9 +105,7 @@ def modify_before_fit(self, paths: AbstractPaths, model: CollectionPriorModel): *(analysis.modify_before_fit(paths, model) for analysis in self.analyses) ) - def modify_after_fit( - self, paths: AbstractPaths, model: CollectionPriorModel, result - ): + def modify_after_fit(self, paths: AbstractPaths, model: Collection, result): """ Modify the analysis after fitting. From 735a290b9e9e55f5005edd25038aaf27f67e7720 Mon Sep 17 00:00:00 2001 From: Richard Date: Mon, 19 Dec 2022 15:53:18 +0000 Subject: [PATCH 114/226] try uploading code coverage --- .github/workflows/main.yml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 06611eead..75899dae6 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -68,7 +68,9 @@ jobs: export PYTHONPATH=$PYTHONPATH:$ROOT_DIR/PyAutoConf export PYTHONPATH=$PYTHONPATH:$ROOT_DIR/PyAutoFit pushd PyAutoFit - pytest + pytest --cov + - name: Upload coverage to Codecov + uses: codecov/codecov-action@v3 - name: Slack send if: ${{ failure() }} id: slack From 588eba8da1986a4bc76f15ab135907e2f674f7d5 Mon Sep 17 00:00:00 2001 From: Richard Date: Mon, 19 Dec 2022 15:59:32 +0000 Subject: [PATCH 115/226] fixing cov --- .github/workflows/main.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 75899dae6..0086d6aa8 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -58,7 +58,7 @@ jobs: pip3 install --upgrade pip pip3 install setuptools pip3 install wheel - pip3 install pytest + pip3 install pytest coverage pytest-cov pip3 install -r PyAutoConf/requirements.txt pip3 install -r PyAutoFit/requirements.txt pip3 install -r PyAutoFit/build_requirements.txt @@ -68,7 +68,7 @@ jobs: export PYTHONPATH=$PYTHONPATH:$ROOT_DIR/PyAutoConf export PYTHONPATH=$PYTHONPATH:$ROOT_DIR/PyAutoFit pushd PyAutoFit - pytest --cov + pytest --cov autofit - name: Upload coverage to Codecov uses: codecov/codecov-action@v3 - name: Slack send From a2b612966520656cf68f0e1b72a73db2caa369fb Mon Sep 17 00:00:00 2001 From: Richard Date: Mon, 19 Dec 2022 16:13:14 +0000 Subject: [PATCH 116/226] output an xml --- .github/workflows/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 0086d6aa8..0c18743e4 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -68,7 +68,7 @@ jobs: export PYTHONPATH=$PYTHONPATH:$ROOT_DIR/PyAutoConf export PYTHONPATH=$PYTHONPATH:$ROOT_DIR/PyAutoFit pushd PyAutoFit - pytest --cov autofit + pytest --cov autofit --cov-report xml:coverage.xml - name: Upload coverage to Codecov uses: codecov/codecov-action@v3 - name: Slack send From 5dec2d830b3a685007949168098c02ae723c5f95 Mon Sep 17 00:00:00 2001 From: Richard Date: Mon, 19 Dec 2022 16:57:10 +0000 Subject: [PATCH 117/226] convert string annotations to builtin types --- autofit/mapper/prior_model/prior_model.py | 87 ++++++++++------------- 1 file changed, 38 insertions(+), 49 deletions(-) diff --git a/autofit/mapper/prior_model/prior_model.py b/autofit/mapper/prior_model/prior_model.py index 285147818..d6f0b4e32 100644 --- a/autofit/mapper/prior_model/prior_model.py +++ b/autofit/mapper/prior_model/prior_model.py @@ -1,3 +1,4 @@ +import builtins import copy import inspect import logging @@ -22,6 +23,7 @@ class Model(AbstractPriorModel): """ @DynamicAttrs """ + @property def name(self): return self.cls.__name__ @@ -90,30 +92,27 @@ def __init__( model = af.Model(Gaussian) """ - super().__init__( - label=namer(cls.__name__) - if inspect.isclass(cls) - else None - ) + super().__init__(label=namer(cls.__name__) if inspect.isclass(cls) else None) if cls is self: return if not (inspect.isclass(cls) or inspect.isfunction(cls)): - raise AssertionError( - f"{cls} is not a class or function" - ) + raise AssertionError(f"{cls} is not a class or function") self.cls = cls try: annotations = inspect.getfullargspec(cls).annotations + for key, value in annotations.items(): + if isinstance(value, str): + annotations[key] = getattr(builtins, value) except TypeError: annotations = dict() try: arg_spec = inspect.getfullargspec(cls) defaults = dict( - zip(arg_spec.args[-len(arg_spec.defaults):], arg_spec.defaults) + zip(arg_spec.args[-len(arg_spec.defaults) :], arg_spec.defaults) ) except TypeError: defaults = {} @@ -132,9 +131,7 @@ def __init__( if arg in kwargs: keyword_arg = kwargs[arg] if isinstance(keyword_arg, (list, dict)): - from autofit.mapper.prior_model.collection import ( - Collection, - ) + from autofit.mapper.prior_model.collection import Collection ls = Collection(keyword_arg) @@ -151,7 +148,7 @@ def __init__( tuple_prior, attribute_name, self.make_prior(attribute_name) ) setattr(self, arg, tuple_prior) - elif arg in annotations and annotations[arg] != float: + elif arg in annotations and annotations[arg] is not float: spec = annotations[arg] # noinspection PyUnresolvedReferences @@ -159,6 +156,7 @@ def __init__( from autofit.mapper.prior_model.annotation import ( AnnotationPriorModel, ) + setattr(self, arg, AnnotationPriorModel(spec, cls, arg)) elif hasattr(spec, "__args__") and type(None) in spec.__args__: setattr(self, arg, None) @@ -166,27 +164,19 @@ def __init__( setattr(self, arg, Model(annotations[arg])) else: prior = self.make_prior(arg) - if isinstance( - prior, - ConfigException - ) and hasattr( - cls, "__default_fields__" - ) and arg in cls.__default_fields__: + if ( + isinstance(prior, ConfigException) + and hasattr(cls, "__default_fields__") + and arg in cls.__default_fields__ + ): prior = defaults[arg] setattr(self, arg, prior) for key, value in kwargs.items(): if not hasattr(self, key): - setattr( - self, key, Model(value) if inspect.isclass(value) else value - ) + setattr(self, key, Model(value) if inspect.isclass(value) else value) def dict(self): - return { - "class_path": get_class_path( - self.cls - ), - **super().dict() - } + return {"class_path": get_class_path(self.cls), **super().dict()} # noinspection PyAttributeOutsideInit @property @@ -200,9 +190,9 @@ def constructor_argument_names(self): def __eq__(self, other): return ( - isinstance(other, Model) - and self.cls == other.cls - and self.prior_tuples == other.prior_tuples + isinstance(other, Model) + and self.cls == other.cls + and self.prior_tuples == other.prior_tuples ) def make_prior(self, attribute_name): @@ -250,12 +240,12 @@ def __setattr__(self, key, value): pass if key not in ( - "component_number", - "phase_property_position", - "mapping_name", - "id", - "_is_frozen", - "_frozen_cache" + "component_number", + "phase_property_position", + "mapping_name", + "id", + "_is_frozen", + "_frozen_cache", ): try: if "_" in key: @@ -275,10 +265,11 @@ def __setattr__(self, key, value): def __getattr__(self, item): try: - if "_" in item and item not in ( - "_is_frozen", - "tuple_prior_tuples" - ) and not item.startswith("_"): + if ( + "_" in item + and item not in ("_is_frozen", "tuple_prior_tuples") + and not item.startswith("_") + ): return getattr( [v for k, v in self.tuple_prior_tuples if item.split("_")[0] == k][ 0 @@ -325,7 +316,7 @@ def _instance_for_arguments(self, arguments: {ModelObject: object}): prior_model = prior_model_tuple.prior_model model_arguments[ prior_model_tuple.name - ] = prior_model.instance_for_arguments(arguments, ) + ] = prior_model.instance_for_arguments(arguments,) prior_arguments = dict() @@ -333,9 +324,7 @@ def _instance_for_arguments(self, arguments: {ModelObject: object}): try: prior_arguments[name] = arguments[prior] except KeyError as e: - raise KeyError( - f"No argument given for prior {name}" - ) from e + raise KeyError(f"No argument given for prior {name}") from e constructor_arguments = { **attribute_arguments, @@ -355,10 +344,10 @@ def _instance_for_arguments(self, arguments: {ModelObject: object}): for key, value in self.__dict__.items(): if ( - not hasattr(result, key) - and not isinstance(value, Prior) - and not key == "cls" - and not key.startswith("_") + not hasattr(result, key) + and not isinstance(value, Prior) + and not key == "cls" + and not key.startswith("_") ): if isinstance(value, Model): value = value.instance_for_arguments(arguments) From 7c324197f4a18f404738f89a377f005179334576 Mon Sep 17 00:00:00 2001 From: Jammy2211 Date: Tue, 20 Dec 2022 10:29:58 +0000 Subject: [PATCH 118/226] try / exception updated with logger --- autofit/non_linear/nest/dynesty/plotter.py | 69 ++++++++++++++++------ 1 file changed, 51 insertions(+), 18 deletions(-) diff --git a/autofit/non_linear/nest/dynesty/plotter.py b/autofit/non_linear/nest/dynesty/plotter.py index 67833d9d4..8a6eec889 100644 --- a/autofit/non_linear/nest/dynesty/plotter.py +++ b/autofit/non_linear/nest/dynesty/plotter.py @@ -1,8 +1,10 @@ from dynesty import plotting as dyplot +import logging from autofit.plot import SamplesPlotter from autofit.plot.samples_plotters import skip_plot_in_test_mode +logger = logging.getLogger(__name__) class DynestyPlotter(SamplesPlotter): @@ -33,14 +35,24 @@ def cornerbound(self, **kwargs): @skip_plot_in_test_mode def cornerplot(self, **kwargs): - dyplot.cornerplot( - results=self.samples.results_internal, - labels=self.model.parameter_labels_with_superscripts_latex, - **kwargs - ) + try: - self.output.to_figure(structure=None, auto_filename="cornerplot") - self.close() + dyplot.cornerplot( + results=self.samples.results_internal, + labels=self.model.parameter_labels_with_superscripts_latex, + **kwargs + ) + + self.output.to_figure(structure=None, auto_filename="cornerplot") + self.close() + + except ValueError: + + logger.info( + "Dynesty unable to produce cornerplot visual: posterior estimate therefore" + "not yet sufficient for this model-fit is not yet robust enough to do this. Visual" + "should be produced in later update, once posterior estimate is updated." + ) @skip_plot_in_test_mode def cornerpoints(self, **kwargs): @@ -53,10 +65,15 @@ def cornerpoints(self, **kwargs): ) self.output.to_figure(structure=None, auto_filename="cornerpoints") + self.close() + except ValueError: - pass - self.close() + logger.info( + "Dynesty unable to produce cornerpoints visual: posterior estimate therefore" + "not yet sufficient for this model-fit is not yet robust enough to do this. Visual" + "should be produced in later update, once posterior estimate is updated." + ) @skip_plot_in_test_mode def runplot(self, **kwargs): @@ -66,19 +83,35 @@ def runplot(self, **kwargs): results=self.samples.results_internal, **kwargs ) + + self.output.to_figure(structure=None, auto_filename="runplot") + self.close() + except ValueError: - pass - self.output.to_figure(structure=None, auto_filename="runplot") - self.close() + logger.info( + "Dynesty unable to produce runplot visual: posterior estimate therefore" + "not yet sufficient for this model-fit is not yet robust enough to do this. Visual" + "should be produced in later update, once posterior estimate is updated." + ) @skip_plot_in_test_mode def traceplot(self, **kwargs): - dyplot.traceplot( - results=self.samples.results_internal, - **kwargs - ) + try: + + dyplot.traceplot( + results=self.samples.results_internal, + **kwargs + ) + + self.output.to_figure(structure=None, auto_filename="traceplot") + self.close() + + except ValueError: - self.output.to_figure(structure=None, auto_filename="traceplot") - self.close() \ No newline at end of file + logger.info( + "Dynesty unable to produce traceplot visual: posterior estimate therefore" + "not yet sufficient for this model-fit is not yet robust enough to do this. Visual" + "should be produced in later update, once posterior estimate is updated." + ) \ No newline at end of file From f3b5bb5edfeff94d4b43e35fbf90870c7dc1be6e Mon Sep 17 00:00:00 2001 From: Jammy2211 Date: Tue, 20 Dec 2022 10:31:37 +0000 Subject: [PATCH 119/226] refactor --- autofit/non_linear/nest/dynesty/plotter.py | 33 +++++++++------------- 1 file changed, 13 insertions(+), 20 deletions(-) diff --git a/autofit/non_linear/nest/dynesty/plotter.py b/autofit/non_linear/nest/dynesty/plotter.py index 8a6eec889..f6625b454 100644 --- a/autofit/non_linear/nest/dynesty/plotter.py +++ b/autofit/non_linear/nest/dynesty/plotter.py @@ -7,6 +7,15 @@ logger = logging.getLogger(__name__) class DynestyPlotter(SamplesPlotter): + + @staticmethod + def log_plot_exception(plot_name : str): + + logger.info( + f"Dynesty unable to produce {plot_name} visual: posterior estimate therefore" + "not yet sufficient for this model-fit is not yet robust enough to do this. Visual" + "should be produced in later update, once posterior estimate is updated." + ) @skip_plot_in_test_mode def boundplot(self, **kwargs): @@ -48,11 +57,7 @@ def cornerplot(self, **kwargs): except ValueError: - logger.info( - "Dynesty unable to produce cornerplot visual: posterior estimate therefore" - "not yet sufficient for this model-fit is not yet robust enough to do this. Visual" - "should be produced in later update, once posterior estimate is updated." - ) + self.log_plot_exception(plot_name="cornerplot") @skip_plot_in_test_mode def cornerpoints(self, **kwargs): @@ -69,11 +74,7 @@ def cornerpoints(self, **kwargs): except ValueError: - logger.info( - "Dynesty unable to produce cornerpoints visual: posterior estimate therefore" - "not yet sufficient for this model-fit is not yet robust enough to do this. Visual" - "should be produced in later update, once posterior estimate is updated." - ) + self.log_plot_exception(plot_name="cornerpoints") @skip_plot_in_test_mode def runplot(self, **kwargs): @@ -89,11 +90,7 @@ def runplot(self, **kwargs): except ValueError: - logger.info( - "Dynesty unable to produce runplot visual: posterior estimate therefore" - "not yet sufficient for this model-fit is not yet robust enough to do this. Visual" - "should be produced in later update, once posterior estimate is updated." - ) + self.log_plot_exception(plot_name="runplot") @skip_plot_in_test_mode def traceplot(self, **kwargs): @@ -110,8 +107,4 @@ def traceplot(self, **kwargs): except ValueError: - logger.info( - "Dynesty unable to produce traceplot visual: posterior estimate therefore" - "not yet sufficient for this model-fit is not yet robust enough to do this. Visual" - "should be produced in later update, once posterior estimate is updated." - ) \ No newline at end of file + self.log_plot_exception(plot_name="traceplot") \ No newline at end of file From 1e0f09efa07ff03b427cf25e5a298e7521d9dd35 Mon Sep 17 00:00:00 2001 From: Jammy2211 Date: Tue, 20 Dec 2022 10:37:04 +0000 Subject: [PATCH 120/226] docs --- autofit/non_linear/nest/dynesty/plotter.py | 47 +++++++++++++++++++++- 1 file changed, 46 insertions(+), 1 deletion(-) diff --git a/autofit/non_linear/nest/dynesty/plotter.py b/autofit/non_linear/nest/dynesty/plotter.py index f6625b454..392d7d436 100644 --- a/autofit/non_linear/nest/dynesty/plotter.py +++ b/autofit/non_linear/nest/dynesty/plotter.py @@ -10,6 +10,19 @@ class DynestyPlotter(SamplesPlotter): @staticmethod def log_plot_exception(plot_name : str): + """ + Plotting the results of a ``dynesty`` model-fit before they have converged on an + accurate estimate of the posterior can lead the ``dynesty`` plotting routines + to raise a ``ValueError``. + + This exception is caught in each of the plotting methods below, and this + function is used to log the behaviour. + + Parameters + ---------- + plot_name + The name of the ``dynesty`` plot which raised a ``ValueError`` + """ logger.info( f"Dynesty unable to produce {plot_name} visual: posterior estimate therefore" @@ -19,6 +32,14 @@ def log_plot_exception(plot_name : str): @skip_plot_in_test_mode def boundplot(self, **kwargs): + """ + Plots the in-built ``dynesty`` plot ``boundplot``. + + This figure plots the bounding distribution used to propose either (1) live points + at a given iteration or (2) a specific dead point during + the course of a run, projected onto the two dimensions specified + by `dims`. + """ dyplot.boundplot( results=self.samples.results_internal, @@ -31,7 +52,14 @@ def boundplot(self, **kwargs): @skip_plot_in_test_mode def cornerbound(self, **kwargs): - + """ + Plots the in-built ``dynesty`` plot ``cornerbound``. + + This figure plots the bounding distribution used to propose either (1) live points + at a given iteration or (2) a specific dead point during + the course of a run, projected onto all pairs of dimensions. + """ + dyplot.cornerbound( results=self.samples.results_internal, labels=self.model.parameter_labels_with_superscripts_latex, @@ -43,7 +71,11 @@ def cornerbound(self, **kwargs): @skip_plot_in_test_mode def cornerplot(self, **kwargs): + """ + Plots the in-built ``dynesty`` plot ``cornerplot``. + This figure plots a corner plot of the 1-D and 2-D marginalized posteriors. + """ try: dyplot.cornerplot( @@ -61,7 +93,11 @@ def cornerplot(self, **kwargs): @skip_plot_in_test_mode def cornerpoints(self, **kwargs): + """ + Plots the in-built ``dynesty`` plot ``cornerpoints``. + This figure plots a (sub-)corner plot of (weighted) samples. + """ try: dyplot.cornerpoints( results=self.samples.results_internal, @@ -78,7 +114,12 @@ def cornerpoints(self, **kwargs): @skip_plot_in_test_mode def runplot(self, **kwargs): + """ + Plots the in-built ``dynesty`` plot ``runplot``. + This figure plots live points, ln(likelihood), ln(weight), and ln(evidence) + as a function of ln(prior volume). + """ try: dyplot.runplot( results=self.samples.results_internal, @@ -94,7 +135,11 @@ def runplot(self, **kwargs): @skip_plot_in_test_mode def traceplot(self, **kwargs): + """ + Plots the in-built ``dynesty`` plot ``traceplot``. + This figure plots traces and marginalized posteriors for each parameter. + """ try: dyplot.traceplot( From 20f3d8f52917fc5a98431742f147bc76b5268184 Mon Sep 17 00:00:00 2001 From: Jammy2211 Date: Tue, 20 Dec 2022 13:20:30 +0000 Subject: [PATCH 121/226] doc --- autofit/non_linear/abstract_search.py | 1 + autofit/non_linear/paths/abstract.py | 52 +++++++++++++++++++++++++++ 2 files changed, 53 insertions(+) diff --git a/autofit/non_linear/abstract_search.py b/autofit/non_linear/abstract_search.py index cb11c901f..f96350aea 100644 --- a/autofit/non_linear/abstract_search.py +++ b/autofit/non_linear/abstract_search.py @@ -570,6 +570,7 @@ class represented by model M and gives a score for their fitness. self.logger.info("Removing zip file") self.paths.zip_remove() + self.paths.zip_remove_nuclear() return result @abstractmethod diff --git a/autofit/non_linear/paths/abstract.py b/autofit/non_linear/paths/abstract.py index 684e4e6d4..af14d02aa 100644 --- a/autofit/non_linear/paths/abstract.py +++ b/autofit/non_linear/paths/abstract.py @@ -244,6 +244,58 @@ def _zip(self): except FileNotFoundError: pass + def zip_remove_nuclear(self): + """ + When multiple model-fits are performed using the same `path_prefix` and `name`, + the results are populated in the same folder with different unique identifiers. + + By accident, one may perform runs where additional results are placed + in these folders which are not wanted for the subsequent analysis. Removing these + results from the directory can be cumbersome, as determining the unwanted results + based on their unique identifier requires visually inspecting them. + + These unwanted results can also make manipulating the results via the database + have issues. + + When a run is performed in nuclear mode, all results in every folder are + deleted except the results corresponding to the unique idenfier of this run. + + For example, suppose a folder has 5 results, 4 of which are unwanted and 1 which is + wanted. If nuclear mode runs, and the model-fit is set up correctly such that the + identifier created corresponds to the wanted result, all 4 unwanted results + will be deleted. + + To enable nuclear mode, one should set the environment + variable ``PYAUTOFIT_NUCLEAR_MODE=1``. Nuclear model is dangerous, and must be + used with CAUTION AND CARE! + + Returns + ------- + + """ + if os.environ.get("PYAUTOFIT_NUCLEAR_MODE") == "1" : + + file_path = os.path.split(self.output_path)[0] + + file_list = os.listdir(file_path) + file_list = [file for file in file_list if self.identifier not in file] + + for file in file_list: + + file_to_remove = path.join(file_path, file) + + try: + os.remove(file_to_remove) + logger.info(f"NUCLEAR MODE -- Removed {file_to_remove}") + except (IsADirectoryError, FileNotFoundError): + pass + + try: + shutil.rmtree(file_to_remove) + logger.info(f"NUCLEAR MODE -- Removed {file_to_remove}") + except (NotADirectoryError, FileNotFoundError): + pass + def restore(self): """ Copy files from the ``.zip`` file to the samples folder. From 3f3f0bc14ac68b6e251b55f23ad21cee24b8b976 Mon Sep 17 00:00:00 2001 From: Jammy2211 Date: Tue, 20 Dec 2022 13:42:28 +0000 Subject: [PATCH 122/226] nuclear mode works on grid search --- autofit/non_linear/abstract_search.py | 1 + autofit/non_linear/grid/grid_search/__init__.py | 4 ++++ autofit/non_linear/paths/abstract.py | 5 +---- 3 files changed, 6 insertions(+), 4 deletions(-) diff --git a/autofit/non_linear/abstract_search.py b/autofit/non_linear/abstract_search.py index f96350aea..32013102f 100644 --- a/autofit/non_linear/abstract_search.py +++ b/autofit/non_linear/abstract_search.py @@ -571,6 +571,7 @@ class represented by model M and gives a score for their fitness. self.logger.info("Removing zip file") self.paths.zip_remove() self.paths.zip_remove_nuclear() + return result @abstractmethod diff --git a/autofit/non_linear/grid/grid_search/__init__.py b/autofit/non_linear/grid/grid_search/__init__.py index b2cd11bca..465d9bdb9 100644 --- a/autofit/non_linear/grid/grid_search/__init__.py +++ b/autofit/non_linear/grid/grid_search/__init__.py @@ -1,6 +1,7 @@ import copy import csv import logging +import os from os import path from typing import List, Tuple, Union, Type, Optional, Dict @@ -190,6 +191,7 @@ def fit( self.logger.info( "Running grid search..." ) + process_class = Process if self.parallel else Sequential # noinspection PyArgumentList return self._fit( @@ -294,10 +296,12 @@ def write_results(): return builder() def save_metadata(self): + self.paths.save_parent_identifier() self.paths.save_unique_tag( is_grid_search=True ) + self.paths.zip_remove_nuclear() def make_jobs(self, model, analysis, grid_priors, info: Optional[Dict] = None): grid_priors = model.sort_priors_alphabetically( diff --git a/autofit/non_linear/paths/abstract.py b/autofit/non_linear/paths/abstract.py index af14d02aa..79c27c594 100644 --- a/autofit/non_linear/paths/abstract.py +++ b/autofit/non_linear/paths/abstract.py @@ -268,11 +268,8 @@ def zip_remove_nuclear(self): To enable nuclear mode, one should set the environment variable ``PYAUTOFIT_NUCLEAR_MODE=1``. Nuclear model is dangerous, and must be used with CAUTION AND CARE! - - Returns - ------- - """ + if os.environ.get("PYAUTOFIT_NUCLEAR_MODE") == "1" : file_path = os.path.split(self.output_path)[0] From c6c40319c360185ec9219e5b4e4562e770b9cd10 Mon Sep 17 00:00:00 2001 From: Jammy2211 Date: Tue, 20 Dec 2022 13:52:15 +0000 Subject: [PATCH 123/226] bypass internal nuclear mode for grid search --- autofit/non_linear/abstract_search.py | 5 ++++- autofit/non_linear/grid/grid_search/job.py | 7 ++++++- autofit/non_linear/paths/abstract.py | 2 +- 3 files changed, 11 insertions(+), 3 deletions(-) diff --git a/autofit/non_linear/abstract_search.py b/autofit/non_linear/abstract_search.py index 32013102f..f10886aef 100644 --- a/autofit/non_linear/abstract_search.py +++ b/autofit/non_linear/abstract_search.py @@ -452,6 +452,7 @@ def fit( info=None, pickle_files=None, log_likelihood_cap=None, + bypass_nuclear : bool = False ) -> Union["Result", List["Result"]]: """ Fit a model, M with some function f that takes instances of the @@ -570,7 +571,9 @@ class represented by model M and gives a score for their fitness. self.logger.info("Removing zip file") self.paths.zip_remove() - self.paths.zip_remove_nuclear() + + if not bypass_nuclear: + self.paths.zip_remove_nuclear() return result diff --git a/autofit/non_linear/grid/grid_search/job.py b/autofit/non_linear/grid/grid_search/job.py index 9f94a4175..72035f071 100644 --- a/autofit/non_linear/grid/grid_search/job.py +++ b/autofit/non_linear/grid/grid_search/job.py @@ -45,7 +45,12 @@ def __init__(self, search_instance, model, analysis, arguments, index, info: Opt self.info = info def perform(self): - result = self.search_instance.fit(model=self.model, analysis=self.analysis, info=self.info) + result = self.search_instance.fit( + model=self.model, + analysis=self.analysis, + info=self.info, + bypass_nuclear=True + ) result_list_row = [ self.index, *[ diff --git a/autofit/non_linear/paths/abstract.py b/autofit/non_linear/paths/abstract.py index 79c27c594..34f20dd90 100644 --- a/autofit/non_linear/paths/abstract.py +++ b/autofit/non_linear/paths/abstract.py @@ -270,7 +270,7 @@ def zip_remove_nuclear(self): used with CAUTION AND CARE! """ - if os.environ.get("PYAUTOFIT_NUCLEAR_MODE") == "1" : + if os.environ.get("PYAUTOFIT_NUCLEAR_MODE") == "1": file_path = os.path.split(self.output_path)[0] From 19d9f5ba385121a6183cb82511cd311648349361 Mon Sep 17 00:00:00 2001 From: Jammy2211 Date: Tue, 20 Dec 2022 13:59:23 +0000 Subject: [PATCH 124/226] docs --- autofit/non_linear/paths/abstract.py | 21 +++++++++++++-------- 1 file changed, 13 insertions(+), 8 deletions(-) diff --git a/autofit/non_linear/paths/abstract.py b/autofit/non_linear/paths/abstract.py index 34f20dd90..1447325d8 100644 --- a/autofit/non_linear/paths/abstract.py +++ b/autofit/non_linear/paths/abstract.py @@ -250,24 +250,29 @@ def zip_remove_nuclear(self): the results are populated in the same folder with different unique identifiers. By accident, one may perform runs where additional results are placed - in these folders which are not wanted for the subsequent analysis. Removing these - results from the directory can be cumbersome, as determining the unwanted results - based on their unique identifier requires visually inspecting them. + in these folders which are not wanted for the subsequent analysis. + + Removing these results from the directory can be cumbersome, as determining + the unwanted results based on their unique identifier requires visually inspecting + them. These unwanted results can also make manipulating the results via the database - have issues. + problematic, as one may need to again filter based on unique identifier. When a run is performed in nuclear mode, all results in every folder are - deleted except the results corresponding to the unique idenfier of this run. + deleted except the results corresponding to the unique identifier of that run. + + Therefore, provided the user is 100% certain that the run corresponds to the + results they want to keep, nuclear mode can be used to remove all unwanted results. For example, suppose a folder has 5 results, 4 of which are unwanted and 1 which is wanted. If nuclear mode runs, and the model-fit is set up correctly such that the identifier created corresponds to the wanted result, all 4 unwanted results will be deleted. - To enable nuclear mode, one should set the environment - variable ``PYAUTOFIT_NUCLEAR_MODE=1``. Nuclear model is dangerous, and must be - used with CAUTION AND CARE! + To enable nuclear mode, set the environment variable ``PYAUTOFIT_NUCLEAR_MODE=1``. + + Nuclear model is dangerous, and must be used with CAUTION AND CARE! """ if os.environ.get("PYAUTOFIT_NUCLEAR_MODE") == "1": From 41c576de681de2e7a22d2a7c22ebb82d7c9a62f8 Mon Sep 17 00:00:00 2001 From: Jammy2211 Date: Tue, 20 Dec 2022 20:53:49 +0000 Subject: [PATCH 125/226] doc format --- autofit/non_linear/abstract_search.py | 2 +- docs/api/analysis.rst | 5 +++-- docs/api/plot.rst | 2 +- docs/api/priors.rst | 2 +- 4 files changed, 6 insertions(+), 5 deletions(-) diff --git a/autofit/non_linear/abstract_search.py b/autofit/non_linear/abstract_search.py index f10886aef..9807556cb 100644 --- a/autofit/non_linear/abstract_search.py +++ b/autofit/non_linear/abstract_search.py @@ -85,7 +85,7 @@ def __init__( searches. Parameters - ----------- + ---------- name The name of the search, controlling the last folder results are output. path_prefix diff --git a/docs/api/analysis.rst b/docs/api/analysis.rst index baff175ac..10caf35b5 100644 --- a/docs/api/analysis.rst +++ b/docs/api/analysis.rst @@ -2,8 +2,9 @@ Analysis ======== -The ``Analysis`` object is used to define the ``log_likelihood_function`` of your model-fitting problem, and acts -as an interface between the data and the non-linear search. +The ``Analysis`` object defines the ``log_likelihood_function`` of your model-fitting problem. + +It acts as an interface between the data, model and the non-linear search. **Examples / Tutorials:** diff --git a/docs/api/plot.rst b/docs/api/plot.rst index ad553ae6b..171c01c74 100644 --- a/docs/api/plot.rst +++ b/docs/api/plot.rst @@ -2,7 +2,7 @@ Plotters ======== -The ``Plotter`` objects are used to create non-linear search specific visualization of every search algorithm supported +Create figures and subplots of non-linear search specific visualization of every search algorithm supported by **PyAutoFit**. **Examples / Tutorials:** diff --git a/docs/api/priors.rst b/docs/api/priors.rst index aae145d55..a57d9e0fd 100644 --- a/docs/api/priors.rst +++ b/docs/api/priors.rst @@ -2,7 +2,7 @@ Priors ====== -The priors of every model-fit are customized using `Prior` objects. +The priors of parameters of every component of a mdoel, which is fitted to data, are customized using ``Prior`` objects. **Examples / Tutorials:** From c14761e8d27917a08f11be3c4ffd0509b1d48a70 Mon Sep 17 00:00:00 2001 From: Jammy2211 Date: Wed, 21 Dec 2022 16:09:54 +0000 Subject: [PATCH 126/226] template --- docs/_templates/custom-class-template.rst | 37 ++++++++++++ docs/_templates/custom_module_template.rst | 66 ++++++++++++++++++++++ 2 files changed, 103 insertions(+) create mode 100644 docs/_templates/custom-class-template.rst create mode 100644 docs/_templates/custom_module_template.rst diff --git a/docs/_templates/custom-class-template.rst b/docs/_templates/custom-class-template.rst new file mode 100644 index 000000000..4eca390e2 --- /dev/null +++ b/docs/_templates/custom-class-template.rst @@ -0,0 +1,37 @@ +{{ fullname | escape | underline}} + +.. currentmodule:: {{ module }} + +.. autoclass:: {{ objname }} + :members: + :show-inheritance: + :exclude-members: ndarray, __init__ + :special-members: __call__, __add__, __mul__ + + {% block methods %} + {% if methods %} + .. rubric:: {{ _('Methods') }} + + .. autosummary:: + :nosignatures: + :exclude-members: __init__ + {% for item in methods %} + {%- if not item.startswith('_') %} + ~{{ name }}.{{ item }} + {%- endif -%} + {%- endfor %} + {% endif %} + {% endblock %} + + {% block attributes %} + {% if attributes %} + .. rubric:: {{ _('Attributes') }} + + .. autosummary:: + {% for item in attributes %} + {%- if not item.startswith('_') %} + ~{{ name }}.{{ item }} + {%- endif -%} + {%- endfor %} + {% endif %} + {% endblock %} \ No newline at end of file diff --git a/docs/_templates/custom_module_template.rst b/docs/_templates/custom_module_template.rst new file mode 100644 index 000000000..fd46b9053 --- /dev/null +++ b/docs/_templates/custom_module_template.rst @@ -0,0 +1,66 @@ +{{ fullname | escape | underline}} + +.. automodule:: {{ fullname }} + + {% block attributes %} + {% if attributes %} + .. rubric:: Module attributes + + .. autosummary:: + :toctree: + {% for item in attributes %} + {{ item }} + {%- endfor %} + {% endif %} + {% endblock %} + + {% block functions %} + {% if functions %} + .. rubric:: {{ _('Functions') }} + + .. autosummary:: + :toctree: + :nosignatures: + {% for item in functions %} + {{ item }} + {%- endfor %} + {% endif %} + {% endblock %} + + {% block classes %} + {% if classes %} + .. rubric:: {{ _('Classes') }} + + .. autosummary:: + :toctree: + :template: custom-class-template.rst + :nosignatures: + {% for item in classes %} + {{ item }} + {%- endfor %} + {% endif %} + {% endblock %} + + {% block exceptions %} + {% if exceptions %} + .. rubric:: {{ _('Exceptions') }} + + .. autosummary:: + :toctree: + {% for item in exceptions %} + {{ item }} + {%- endfor %} + {% endif %} + {% endblock %} + +{% block modules %} +{% if modules %} +.. autosummary:: + :toctree: + :template: custom-module-template.rst + :recursive: +{% for item in modules %} + {{ item }} +{%- endfor %} +{% endif %} +{% endblock %} \ No newline at end of file From 98ab19d86fa6a713827110548f7dbd85c1d8c3ea Mon Sep 17 00:00:00 2001 From: Jammy2211 Date: Wed, 21 Dec 2022 16:36:06 +0000 Subject: [PATCH 127/226] change template --- docs/_templates/custom-class-template.rst | 3 +-- docs/conf.py | 2 +- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/docs/_templates/custom-class-template.rst b/docs/_templates/custom-class-template.rst index 4eca390e2..14b3b95f3 100644 --- a/docs/_templates/custom-class-template.rst +++ b/docs/_templates/custom-class-template.rst @@ -5,7 +5,7 @@ .. autoclass:: {{ objname }} :members: :show-inheritance: - :exclude-members: ndarray, __init__ + :exclude-members: ndarray, __init__, __new__ :special-members: __call__, __add__, __mul__ {% block methods %} @@ -14,7 +14,6 @@ .. autosummary:: :nosignatures: - :exclude-members: __init__ {% for item in methods %} {%- if not item.startswith('_') %} ~{{ name }}.{{ item }} diff --git a/docs/conf.py b/docs/conf.py index e8cd648c7..a77132aaf 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -66,7 +66,7 @@ # -- Options for extlinks ---------------------------------------------------- extlinks = { - "pypi": ("https://pypi.org/project/%s/", ""), + "pypi": ("https://pypi.org/project/%s/", "%s"), } # -- Options for intersphinx ------------------------------------------------- From b10e81606eb1760e2048e74835187318f5a8d8d1 Mon Sep 17 00:00:00 2001 From: Jammy2211 Date: Wed, 21 Dec 2022 16:37:51 +0000 Subject: [PATCH 128/226] dynamic type for dynesty Pool --- autofit/non_linear/nest/dynesty/dynamic.py | 8 ++++++-- autofit/non_linear/nest/dynesty/static.py | 8 ++++++-- 2 files changed, 12 insertions(+), 4 deletions(-) diff --git a/autofit/non_linear/nest/dynesty/dynamic.py b/autofit/non_linear/nest/dynesty/dynamic.py index 20340a8bd..8bb67feb7 100644 --- a/autofit/non_linear/nest/dynesty/dynamic.py +++ b/autofit/non_linear/nest/dynesty/dynamic.py @@ -1,4 +1,8 @@ -from typing import Optional +from __future__ import annotations +from typing import TYPE_CHECKING, Optional + +if TYPE_CHECKING: + from dynesty.pool import Pool from dynesty.dynesty import DynamicNestedSampler from autofit.non_linear.nest.dynesty.samples import SamplesDynesty @@ -98,7 +102,7 @@ def sampler_from( model: AbstractPriorModel, fitness_function, checkpoint_exists : bool, - pool: Optional["Pool"], + pool: Optional[Pool], queue_size: Optional[int] ): """ diff --git a/autofit/non_linear/nest/dynesty/static.py b/autofit/non_linear/nest/dynesty/static.py index 9ef9fa9be..8ec31b3ce 100644 --- a/autofit/non_linear/nest/dynesty/static.py +++ b/autofit/non_linear/nest/dynesty/static.py @@ -1,4 +1,8 @@ -from typing import Optional +from __future__ import annotations +from typing import TYPE_CHECKING, Optional + +if TYPE_CHECKING: + from dynesty.pool import Pool from dynesty import NestedSampler as StaticSampler from autofit.database.sqlalchemy_ import sa @@ -102,7 +106,7 @@ def sampler_from( model: AbstractPriorModel, fitness_function, checkpoint_exists : bool, - pool: Optional["Pool"], + pool: Optional[Pool], queue_size: Optional[int] ): """ From 779cca00b075933b22d558dfffc6b00976261e64 Mon Sep 17 00:00:00 2001 From: Jammy2211 Date: Wed, 21 Dec 2022 17:05:44 +0000 Subject: [PATCH 129/226] docstring alignemnts --- autofit/database/__init__.py | 16 +++++++--------- autofit/non_linear/abstract_search.py | 16 ++++++++-------- 2 files changed, 15 insertions(+), 17 deletions(-) diff --git a/autofit/database/__init__.py b/autofit/database/__init__.py index 51745a16a..c2e409514 100644 --- a/autofit/database/__init__.py +++ b/autofit/database/__init__.py @@ -24,16 +24,14 @@ def open_database( To connect to a postgres database one must be created. 1) Install postgres https://www.postgresql.org/download/ - 2) Install the python postgres client - psycopg2==2.9.1 - 2) Create a user - createuser autofit - 3) Create a database - createdb -O autofit autofit + 2) Install the python postgres client ``psycopg2==2.9.1`` + 2) Create a user ``createuser autofit`` + 3) Create a database ``createdb -O autofit autofit`` 4) Open that database using this function - open_database( - "postgresql://autofit@localhost/autofit" - ) + + open_database( + "postgresql://autofit@localhost/autofit" + ) Note that the above instructions create a database called autofit with a user called autofit. You can create a new database for the diff --git a/autofit/non_linear/abstract_search.py b/autofit/non_linear/abstract_search.py index 9807556cb..49bacc606 100644 --- a/autofit/non_linear/abstract_search.py +++ b/autofit/non_linear/abstract_search.py @@ -794,8 +794,8 @@ def __init__(self, sigma, use_errors, use_widths): By invoking the 'model' attribute, the prior is passed following 3 rules: - 1) The new parameter uses a GaussianPrior. A GaussianPrior is ideal, as the 1D pdf results we compute at - the end of a search are easily summarized as a Gaussian. + 1) The new parameter uses a GaussianPrior. A ``GaussianPrior`` is ideal, as the 1D pdf results we compute at + the end of a search are easily summarized as a Gaussian. 2) The mean of the GaussianPrior is the median PDF value of the parameter estimated in search 1. @@ -827,14 +827,14 @@ def __init__(self, sigma, use_errors, use_widths): There are two ways a value is specified using the priors/width file: 1) Absolute: In this case, the error assumed on the parameter is the value given in the config file. For - example, if for the width on the parameter of a model component the width modifier reads "Absolute" with - a value 0.05. This means if the error on the parameter was less than 0.05 in the previous search, the - sigma of its GaussianPrior in this search will be 0.05. + example, if for the width on the parameter of a model component the width modifier reads "Absolute" with + a value 0.05. This means if the error on the parameter was less than 0.05 in the previous search, the + sigma of its GaussianPrior in this search will be 0.05. 2) Relative: In this case, the error assumed on the parameter is the % of the value of the estimate value - given in the config file. For example, if the parameter estimated in the previous search was 2.0, and the - relative error in the config file reads "Relative" with a value 0.5, then the sigma of the GaussianPrior - will be 50% of this value, i.e. sigma = 0.5 * 2.0 = 1.0. + given in the config file. For example, if the parameter estimated in the previous search was 2.0, and the + relative error in the config file reads "Relative" with a value 0.5, then the sigma of the GaussianPrior + will be 50% of this value, i.e. sigma = 0.5 * 2.0 = 1.0. The PriorPasser allows us to customize at what sigma the error values the model results are computed at to compute the passed sigma values and customizes whether the widths in the config file, these computed errors, From 78d24ee7431ac1e4a07392341cc77c911479eb2c Mon Sep 17 00:00:00 2001 From: Jammy2211 Date: Wed, 21 Dec 2022 17:20:25 +0000 Subject: [PATCH 130/226] more docstring alignment --- .../expectation_propagation/ep_mean_field.py | 8 ++++---- autofit/non_linear/optimize/drawer/drawer.py | 12 ++++++------ autofit/non_linear/samples/samples.py | 2 +- 3 files changed, 11 insertions(+), 11 deletions(-) diff --git a/autofit/graphical/expectation_propagation/ep_mean_field.py b/autofit/graphical/expectation_propagation/ep_mean_field.py index 032311428..abb2ab8ff 100644 --- a/autofit/graphical/expectation_propagation/ep_mean_field.py +++ b/autofit/graphical/expectation_propagation/ep_mean_field.py @@ -194,9 +194,9 @@ def factor_approximation(self, factor: Factor) -> FactorApproximation: - The factor - The factor's variable distributions - The cavity distribution, which is the product of the distributions - for each variable for all other factors + for each variable for all other factors - The model distribution, which is the product of the distributions - for each variable for all factors + for each variable for all factors Parameters ---------- @@ -486,9 +486,9 @@ def factor_approximation(self, factor: Factor) -> FactorApproximation: - The factor - The factor's variable distributions - The cavity distribution, which is the product of the distributions - for each variable for all other factors + for each variable for all other factors - The model distribution, which is the product of the distributions - for each variable for all factors + for each variable for all factors Parameters ---------- diff --git a/autofit/non_linear/optimize/drawer/drawer.py b/autofit/non_linear/optimize/drawer/drawer.py index bd9bec610..ca48fed03 100644 --- a/autofit/non_linear/optimize/drawer/drawer.py +++ b/autofit/non_linear/optimize/drawer/drawer.py @@ -40,16 +40,16 @@ def __init__( Whilst this is not the typical use case of a non-linear search, it has certain niche applications, for example: - Given a model one can determine how much variation there is in the log likelihood / log posterior values. - By visualizing this as a histogram one can therefore quantify the behaviour of that - model's `log_likelihood_function`. + By visualizing this as a histogram one can therefore quantify the behaviour of that + model's `log_likelihood_function`. - If the `log_likelihood_function` of a model is stochastic (e.g. different values of likelihood may be - computed for an identical model due to randomness in the likelihood evaluation) this search can quantify - the behaviour of that stochasticity. + computed for an identical model due to randomness in the likelihood evaluation) this search can quantify + the behaviour of that stochasticity. - For advanced modeling tools, for example sensitivity mapping performed via the `Sensitivity` object, - the `Drawer` search may be sufficient to perform the overall modeling task, without the need of performing - an actual parameter space search. + the `Drawer` search may be sufficient to perform the overall modeling task, without the need of performing + an actual parameter space search. The drawer search itself is performed by simply reusing the functionality of the `AbstractInitializer` object. Whereas this is normally used to initialize a non-linear search, for the drawer it performed all log diff --git a/autofit/non_linear/samples/samples.py b/autofit/non_linear/samples/samples.py index 0d58c3fb6..afbae6419 100644 --- a/autofit/non_linear/samples/samples.py +++ b/autofit/non_linear/samples/samples.py @@ -708,7 +708,7 @@ def gaussian_priors_at_sigma(self, sigma: float) -> [List]: - The mean is given by maximum log likelihood model values. - Their errors are omitted, as this information is not available from an search. When these priors are - used to link to another search, it will thus automatically use the prior config values. + used to link to another search, it will thus automatically use the prior config values. Parameters ---------- From 965a13d4a93af57e0045ef17af0f7f737034ee2d Mon Sep 17 00:00:00 2001 From: Richard Date: Mon, 9 Jan 2023 09:57:51 +0000 Subject: [PATCH 131/226] writing test for multi analysis prior passing --- test_autofit/mapper/test_multi_pass.py | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) create mode 100644 test_autofit/mapper/test_multi_pass.py diff --git a/test_autofit/mapper/test_multi_pass.py b/test_autofit/mapper/test_multi_pass.py new file mode 100644 index 000000000..83c4c288f --- /dev/null +++ b/test_autofit/mapper/test_multi_pass.py @@ -0,0 +1,19 @@ +from random import random + +import autofit as af + + +class Analysis(af.Analysis): + def log_likelihood_function(self, instance): + return -random() + + +def test_integration(): + search = af.LBFGS() + analysis = sum([Analysis() for _ in range(10)]) + + model = af.Collection(gaussian=af.Gaussian) + + result = search.fit(model=model, analysis=analysis) + + print(result.model) From 71d1afae5f6da1239a197fa9824e2f14200cc01b Mon Sep 17 00:00:00 2001 From: Richard Date: Mon, 9 Jan 2023 09:58:29 +0000 Subject: [PATCH 132/226] only test allclose for initializer when n_points > 1 --- autofit/non_linear/initializer.py | 69 ++++++++++++++----------------- 1 file changed, 32 insertions(+), 37 deletions(-) diff --git a/autofit/non_linear/initializer.py b/autofit/non_linear/initializer.py index 663f2544f..6c15840e4 100644 --- a/autofit/non_linear/initializer.py +++ b/autofit/non_linear/initializer.py @@ -1,18 +1,17 @@ import configparser import logging +import os import random from abc import ABC, abstractmethod from typing import Dict, Tuple, List -import os + import numpy as np from autofit import exc from autofit.mapper.prior.abstract import Prior from autofit.mapper.prior_model.abstract import AbstractPriorModel -logger = logging.getLogger( - __name__ -) +logger = logging.getLogger(__name__) class AbstractInitializer(ABC): @@ -25,12 +24,12 @@ def _generate_unit_parameter_list(self, model): pass def samples_from_model( - self, - total_points: int, - model: AbstractPriorModel, - fitness_function, - use_prior_medians: bool = False, - test_mode_samples : bool = True + self, + total_points: int, + model: AbstractPriorModel, + fitness_function, + use_prior_medians: bool = False, + test_mode_samples: bool = True, ): """ Generate the initial points of the non-linear search, by randomly drawing unit values from a uniform @@ -48,7 +47,9 @@ def samples_from_model( if os.environ.get("PYAUTOFIT_TEST_MODE") == "1" and test_mode_samples: return self.samples_in_test_mode(total_points=total_points, model=model) - logger.info("Generating initial samples of model, which are subject to prior limits and other constraints.") + logger.info( + "Generating initial samples of model, which are subject to prior limits and other constraints." + ) unit_parameter_lists = [] parameter_lists = [] @@ -62,7 +63,9 @@ def samples_from_model( else: unit_parameter_list = [0.5] * model.prior_count - parameter_list = model.vector_from_unit_vector(unit_vector=unit_parameter_list) + parameter_list = model.vector_from_unit_vector( + unit_vector=unit_parameter_list + ) try: figure_of_merit = fitness_function.figure_of_merit_from( @@ -79,7 +82,9 @@ def samples_from_model( except exc.FitException: pass - if np.allclose(a=figures_of_merit_list[0],b=figures_of_merit_list[1:]): + if total_points > 1 and np.allclose( + a=figures_of_merit_list[0], b=figures_of_merit_list[1:] + ): raise exc.InitializerException( """ The initial samples all have the same figure of merit (e.g. log likelihood values). @@ -96,11 +101,7 @@ def samples_from_model( return unit_parameter_lists, parameter_lists, figures_of_merit_list - def samples_in_test_mode( - self, - total_points: int, - model: AbstractPriorModel - ): + def samples_in_test_mode(self, total_points: int, model: AbstractPriorModel): """ Generate the initial points of the non-linear search in test mode. Like normal, test model draws points, by randomly drawing unit values from a uniform distribution between the ball_lower_limit and ball_upper_limit @@ -119,7 +120,9 @@ def samples_in_test_mode( of free dimensions of the model. """ - logger.warning(f"TEST MODE ON: SAMPLES BEING ASSIGNED ABRITRARY LARGE LIKELIHOODS") + logger.warning( + f"TEST MODE ON: SAMPLES BEING ASSIGNED ABRITRARY LARGE LIKELIHOODS" + ) unit_parameter_lists = [] parameter_lists = [] @@ -133,7 +136,9 @@ def samples_in_test_mode( try: unit_parameter_list = self._generate_unit_parameter_list(model) - parameter_list = model.vector_from_unit_vector(unit_vector=unit_parameter_list) + parameter_list = model.vector_from_unit_vector( + unit_vector=unit_parameter_list + ) model.instance_from_vector(vector=parameter_list) unit_parameter_lists.append(unit_parameter_list) parameter_lists.append(parameter_list) @@ -148,10 +153,10 @@ def samples_in_test_mode( class SpecificRangeInitializer(AbstractInitializer): def __init__( - self, - parameter_dict: Dict[Prior, Tuple[float, float]], - lower_limit=0.0, - upper_limit=1.0 + self, + parameter_dict: Dict[Prior, Tuple[float, float]], + lower_limit=0.0, + upper_limit=1.0, ): """ Initializer that allows the range of possible starting points for each prior @@ -199,9 +204,7 @@ def _generate_unit_parameter_list(self, model: AbstractPriorModel) -> List[float lower = self.lower_limit upper = self.upper_limit - value = prior.unit_value_for( - prior.random(lower, upper) - ) + value = prior.unit_value_for(prior.random(lower, upper)) unit_parameter_list.append(value) @@ -209,11 +212,7 @@ def _generate_unit_parameter_list(self, model: AbstractPriorModel) -> List[float class Initializer(AbstractInitializer): - def __init__( - self, - lower_limit: float, - upper_limit: float - ): + def __init__(self, lower_limit: float, upper_limit: float): """ The Initializer creates the initial set of samples in non-linear parameter space that can be passed into a `NonLinearSearch` to define where to begin sampling. @@ -273,11 +272,7 @@ def __init__(self): class InitializerBall(Initializer): - def __init__( - self, - lower_limit: float, - upper_limit: float - ): + def __init__(self, lower_limit: float, upper_limit: float): """ The Initializer creates the initial set of samples in non-linear parameter space that can be passed into a `NonLinearSearch` to define where to begin sampling. From 7ed92ac0fde72fa659c40cdfc8fafd277ecaa623 Mon Sep 17 00:00:00 2001 From: Richard Date: Mon, 9 Jan 2023 11:21:14 +0000 Subject: [PATCH 133/226] removed test --- test_autofit/analysis/test_free_parameter.py | 106 +++++-------------- 1 file changed, 27 insertions(+), 79 deletions(-) diff --git a/test_autofit/analysis/test_free_parameter.py b/test_autofit/analysis/test_free_parameter.py index 3dc430541..80cc6c1f4 100644 --- a/test_autofit/analysis/test_free_parameter.py +++ b/test_autofit/analysis/test_free_parameter.py @@ -14,13 +14,13 @@ def test_copy(): assert collection.prior_count == model.prior_count -def test_log_likelihood( - modified, - combined_analysis -): - assert combined_analysis.log_likelihood_function( - modified.instance_from_prior_medians() - ) == 2 +def test_log_likelihood(modified, combined_analysis): + assert ( + combined_analysis.log_likelihood_function( + modified.instance_from_prior_medians() + ) + == 2 + ) def test_analyses_example(Analysis): @@ -33,98 +33,59 @@ def test_analyses_example(Analysis): ]: copy = model.copy() copy.centre = prior - analyses.append( - Analysis() - ) + analyses.append(Analysis()) -@pytest.fixture( - name="combined_analysis" -) +@pytest.fixture(name="combined_analysis") def make_combined_analysis(model, Analysis): - return (Analysis() + Analysis()).with_free_parameters( - model.centre - ) + return (Analysis() + Analysis()).with_free_parameters(model.centre) def test_multiple_free_parameters(model, Analysis): combined_analysis = (Analysis() + Analysis()).with_free_parameters( - model.centre, - model.sigma + model.centre, model.sigma ) first, second = combined_analysis.modify_model(model) assert first.centre is not second.centre assert first.sigma is not second.sigma -def test_add_free_parameter( - combined_analysis -): - assert isinstance( - combined_analysis, - FreeParameterAnalysis - ) +def test_add_free_parameter(combined_analysis): + assert isinstance(combined_analysis, FreeParameterAnalysis) -@pytest.fixture( - name="modified" -) -def make_modified( - model, - combined_analysis -): +@pytest.fixture(name="modified") +def make_modified(model, combined_analysis): return combined_analysis.modify_model(model) -def test_modify_model( - modified -): +def test_modify_model(modified): assert isinstance(modified, af.Collection) assert len(modified) == 2 -def test_modified_models( - modified -): +def test_modified_models(modified): first, second = modified - assert isinstance( - first.sigma, - af.Prior - ) + assert isinstance(first.sigma, af.Prior) assert first.sigma == second.sigma assert first.centre != second.centre -@pytest.fixture( - name="result" -) +@pytest.fixture(name="result") def make_result( - combined_analysis, - model, + combined_analysis, model, ): optimizer = MockOptimizer() - return optimizer.fit( - model, - combined_analysis - ) + return optimizer.fit(model, combined_analysis) -@pytest.fixture( - autouse=True -) +@pytest.fixture(autouse=True) def do_remove_output(remove_output): yield remove_output() -def test_result_type(result, Result): - assert isinstance(result, Result) - - for result_ in result: - assert isinstance(result_, Result) - - def test_integration(result): result_1, result_2 = result @@ -133,24 +94,16 @@ def test_integration(result): def test_tuple_prior(model, Analysis): - model.centre = af.TuplePrior( - centre_0=af.UniformPrior() - ) - combined = (Analysis() + Analysis()).with_free_parameters( - model.centre - ) + model.centre = af.TuplePrior(centre_0=af.UniformPrior()) + combined = (Analysis() + Analysis()).with_free_parameters(model.centre) first, second = combined.modify_model(model) assert first.centre.centre_0 != second.centre.centre_0 def test_prior_model(model, Analysis): - model = af.Collection( - model=model - ) - combined = (Analysis() + Analysis()).with_free_parameters( - model.model - ) + model = af.Collection(model=model) + combined = (Analysis() + Analysis()).with_free_parameters(model.model) modified = combined.modify_model(model) first = modified[0].model second = modified[1].model @@ -162,12 +115,7 @@ def test_prior_model(model, Analysis): def test_split_samples(modified): samples = af.Samples( - modified, - af.Sample.from_lists( - modified, - [[1, 2, 3, 4]], - [1], [1], [1] - ), + modified, af.Sample.from_lists(modified, [[1, 2, 3, 4]], [1], [1], [1]), ) combined = samples.max_log_likelihood() From 3ff42c9ccf2cf868ce0e3b51e183f4c5b8bf9eb0 Mon Sep 17 00:00:00 2001 From: Richard Date: Mon, 9 Jan 2023 11:21:50 +0000 Subject: [PATCH 134/226] explicit combined result class --- autofit/non_linear/analysis/combined.py | 20 +++++++++++++++----- autofit/non_linear/analysis/indexed.py | 12 ++---------- 2 files changed, 17 insertions(+), 15 deletions(-) diff --git a/autofit/non_linear/analysis/combined.py b/autofit/non_linear/analysis/combined.py index dcdff72d9..e41102fbb 100644 --- a/autofit/non_linear/analysis/combined.py +++ b/autofit/non_linear/analysis/combined.py @@ -13,6 +13,20 @@ logger = logging.getLogger(__name__) +class CombinedResult: + def __init__(self, results): + self.child_results = results + + def __getattr__(self, item): + return getattr(self.child_results[0], item) + + def __iter__(self): + return iter(self.child_results) + + def __len__(self): + return len(self.child_results) + + class CombinedAnalysis(Analysis): def __new__(cls, *analyses, **kwargs): from .model_analysis import ModelAnalysis, CombinedModelAnalysis @@ -203,11 +217,7 @@ def make_result(self, samples, model, sigma=1.0, use_errors=True, use_widths=Fal ) for analysis in self.analyses ] - result = self.analyses[0].make_result( - samples=samples, model=model, sigma=1.0, use_errors=True, use_widths=False - ) - result.child_results = child_results - return result + return CombinedResult(child_results) def __len__(self): return len(self.analyses) diff --git a/autofit/non_linear/analysis/indexed.py b/autofit/non_linear/analysis/indexed.py index fd56a148f..da7f9200d 100644 --- a/autofit/non_linear/analysis/indexed.py +++ b/autofit/non_linear/analysis/indexed.py @@ -1,7 +1,7 @@ import logging from .analysis import Analysis -from .combined import CombinedAnalysis +from .combined import CombinedAnalysis, CombinedResult from ..paths.abstract import AbstractPaths from autofit.mapper.prior_model.collection import Collection @@ -80,15 +80,7 @@ def make_result(self, samples, model, sigma=1.0, use_errors=True, use_widths=Fal ) for model, analysis in zip(model, self.analyses) ] - result = self.analyses[0].make_result( - samples=samples, - model=model, - sigma=sigma, - use_errors=use_errors, - use_widths=use_widths, - ) - result.child_results = child_results - return result + return CombinedResult(child_results) def modify_before_fit(self, paths: AbstractPaths, model: Collection): """ From 78f2137da4be790d59af67ede7f536f3d4b28e17 Mon Sep 17 00:00:00 2001 From: Richard Date: Mon, 9 Jan 2023 11:38:22 +0000 Subject: [PATCH 135/226] started thinking about how to actually implement miulti analysis passing --- test_autofit/mapper/test_multi_pass.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/test_autofit/mapper/test_multi_pass.py b/test_autofit/mapper/test_multi_pass.py index 83c4c288f..83c9cf309 100644 --- a/test_autofit/mapper/test_multi_pass.py +++ b/test_autofit/mapper/test_multi_pass.py @@ -1,6 +1,8 @@ from random import random import autofit as af +from autofit import mock as m +from autofit.non_linear.analysis.combined import CombinedResult class Analysis(af.Analysis): @@ -17,3 +19,9 @@ def test_integration(): result = search.fit(model=model, analysis=analysis) print(result.model) + + +def test_from_combined(): + combined_result = CombinedResult( + [m.MockResult(model=af.Gaussian()) for _ in range(10)] + ) From b847e6638d5cbccfb08f06800ca2300c74ce78a6 Mon Sep 17 00:00:00 2001 From: Richard Date: Mon, 9 Jan 2023 12:51:21 +0000 Subject: [PATCH 136/226] use within prior limits instead --- autofit/non_linear/initializer.py | 66 ++++++++++++++----------------- 1 file changed, 29 insertions(+), 37 deletions(-) diff --git a/autofit/non_linear/initializer.py b/autofit/non_linear/initializer.py index 663f2544f..bae7ddef5 100644 --- a/autofit/non_linear/initializer.py +++ b/autofit/non_linear/initializer.py @@ -10,9 +10,7 @@ from autofit.mapper.prior.abstract import Prior from autofit.mapper.prior_model.abstract import AbstractPriorModel -logger = logging.getLogger( - __name__ -) +logger = logging.getLogger(__name__) class AbstractInitializer(ABC): @@ -25,12 +23,12 @@ def _generate_unit_parameter_list(self, model): pass def samples_from_model( - self, - total_points: int, - model: AbstractPriorModel, - fitness_function, - use_prior_medians: bool = False, - test_mode_samples : bool = True + self, + total_points: int, + model: AbstractPriorModel, + fitness_function, + use_prior_medians: bool = False, + test_mode_samples: bool = True, ): """ Generate the initial points of the non-linear search, by randomly drawing unit values from a uniform @@ -48,7 +46,9 @@ def samples_from_model( if os.environ.get("PYAUTOFIT_TEST_MODE") == "1" and test_mode_samples: return self.samples_in_test_mode(total_points=total_points, model=model) - logger.info("Generating initial samples of model, which are subject to prior limits and other constraints.") + logger.info( + "Generating initial samples of model, which are subject to prior limits and other constraints." + ) unit_parameter_lists = [] parameter_lists = [] @@ -62,7 +62,9 @@ def samples_from_model( else: unit_parameter_list = [0.5] * model.prior_count - parameter_list = model.vector_from_unit_vector(unit_vector=unit_parameter_list) + parameter_list = model.vector_from_unit_vector( + unit_vector=unit_parameter_list + ) try: figure_of_merit = fitness_function.figure_of_merit_from( @@ -79,7 +81,7 @@ def samples_from_model( except exc.FitException: pass - if np.allclose(a=figures_of_merit_list[0],b=figures_of_merit_list[1:]): + if np.allclose(a=figures_of_merit_list[0], b=figures_of_merit_list[1:]): raise exc.InitializerException( """ The initial samples all have the same figure of merit (e.g. log likelihood values). @@ -96,11 +98,7 @@ def samples_from_model( return unit_parameter_lists, parameter_lists, figures_of_merit_list - def samples_in_test_mode( - self, - total_points: int, - model: AbstractPriorModel - ): + def samples_in_test_mode(self, total_points: int, model: AbstractPriorModel): """ Generate the initial points of the non-linear search in test mode. Like normal, test model draws points, by randomly drawing unit values from a uniform distribution between the ball_lower_limit and ball_upper_limit @@ -119,7 +117,9 @@ def samples_in_test_mode( of free dimensions of the model. """ - logger.warning(f"TEST MODE ON: SAMPLES BEING ASSIGNED ABRITRARY LARGE LIKELIHOODS") + logger.warning( + f"TEST MODE ON: SAMPLES BEING ASSIGNED ABRITRARY LARGE LIKELIHOODS" + ) unit_parameter_lists = [] parameter_lists = [] @@ -133,7 +133,9 @@ def samples_in_test_mode( try: unit_parameter_list = self._generate_unit_parameter_list(model) - parameter_list = model.vector_from_unit_vector(unit_vector=unit_parameter_list) + parameter_list = model.vector_from_unit_vector( + unit_vector=unit_parameter_list + ) model.instance_from_vector(vector=parameter_list) unit_parameter_lists.append(unit_parameter_list) parameter_lists.append(parameter_list) @@ -148,10 +150,10 @@ def samples_in_test_mode( class SpecificRangeInitializer(AbstractInitializer): def __init__( - self, - parameter_dict: Dict[Prior, Tuple[float, float]], - lower_limit=0.0, - upper_limit=1.0 + self, + parameter_dict: Dict[Prior, Tuple[float, float]], + lower_limit=0.0, + upper_limit=1.0, ): """ Initializer that allows the range of possible starting points for each prior @@ -199,9 +201,7 @@ def _generate_unit_parameter_list(self, model: AbstractPriorModel) -> List[float lower = self.lower_limit upper = self.upper_limit - value = prior.unit_value_for( - prior.random(lower, upper) - ) + value = prior.unit_value_for(prior.random(lower, upper)) unit_parameter_list.append(value) @@ -209,11 +209,7 @@ def _generate_unit_parameter_list(self, model: AbstractPriorModel) -> List[float class Initializer(AbstractInitializer): - def __init__( - self, - lower_limit: float, - upper_limit: float - ): + def __init__(self, lower_limit: float, upper_limit: float): """ The Initializer creates the initial set of samples in non-linear parameter space that can be passed into a `NonLinearSearch` to define where to begin sampling. @@ -252,7 +248,7 @@ def from_config(cls, config): ) def _generate_unit_parameter_list(self, model): - return model.random_unit_vector_within_limits( + return model.random_vector_from_priors_within_limits( lower_limit=self.lower_limit, upper_limit=self.upper_limit ) @@ -273,11 +269,7 @@ def __init__(self): class InitializerBall(Initializer): - def __init__( - self, - lower_limit: float, - upper_limit: float - ): + def __init__(self, lower_limit: float, upper_limit: float): """ The Initializer creates the initial set of samples in non-linear parameter space that can be passed into a `NonLinearSearch` to define where to begin sampling. From ad31f8dcdd5f438ccb08d1ecc5b112508bc38374 Mon Sep 17 00:00:00 2001 From: Richard Date: Mon, 9 Jan 2023 12:57:25 +0000 Subject: [PATCH 137/226] remove invalid unit limits for uniform prior --- autofit/mapper/prior/uniform.py | 21 ++------------------- autofit/non_linear/initializer.py | 2 +- 2 files changed, 3 insertions(+), 20 deletions(-) diff --git a/autofit/mapper/prior/uniform.py b/autofit/mapper/prior/uniform.py index ec84b9c6c..f06ebebad 100644 --- a/autofit/mapper/prior/uniform.py +++ b/autofit/mapper/prior/uniform.py @@ -10,10 +10,7 @@ class UniformPrior(Prior): __identifier_fields__ = ("lower_limit", "upper_limit") def __init__( - self, - lower_limit : float = 0.0, - upper_limit : float = 1.0, - id_ = None, + self, lower_limit: float = 0.0, upper_limit: float = 1.0, id_=None, ): """ A prior with a uniform distribution, defined between a lower limit and upper limit. @@ -55,20 +52,6 @@ def __init__( message, lower_limit=lower_limit, upper_limit=upper_limit, id_=id_, ) - @property - def lower_unit_limit(self) -> float: - """ - The lower limit for this prior in unit vector space - """ - return 0.0 - - @property - def upper_unit_limit(self) -> float: - """ - The upper limit for this prior in unit vector space - """ - return 1.0 - def logpdf(self, x): # TODO: handle x as a numpy array if x == self.lower_limit: @@ -81,7 +64,7 @@ def logpdf(self, x): def parameter_string(self) -> str: return f"lower_limit = {self.lower_limit}, upper_limit = {self.upper_limit}" - def value_for(self, unit : float, ignore_prior_limits : bool = False) -> float: + def value_for(self, unit: float, ignore_prior_limits: bool = False) -> float: """ Returns a physical value from an input unit value according to the limits of the uniform prior. diff --git a/autofit/non_linear/initializer.py b/autofit/non_linear/initializer.py index bae7ddef5..3b118b0d0 100644 --- a/autofit/non_linear/initializer.py +++ b/autofit/non_linear/initializer.py @@ -248,7 +248,7 @@ def from_config(cls, config): ) def _generate_unit_parameter_list(self, model): - return model.random_vector_from_priors_within_limits( + return model.random_unit_vector_within_limits( lower_limit=self.lower_limit, upper_limit=self.upper_limit ) From a01b250202eed1a099071c25f6be1590d2f2dfa3 Mon Sep 17 00:00:00 2001 From: Richard Date: Mon, 9 Jan 2023 13:22:29 +0000 Subject: [PATCH 138/226] fixed test --- test_autofit/mapper/prior/test_limits.py | 111 +++++------------------ 1 file changed, 25 insertions(+), 86 deletions(-) diff --git a/test_autofit/mapper/prior/test_limits.py b/test_autofit/mapper/prior/test_limits.py index 5e512c1c7..5f94a5197 100644 --- a/test_autofit/mapper/prior/test_limits.py +++ b/test_autofit/mapper/prior/test_limits.py @@ -5,55 +5,32 @@ from autofit.exc import PriorLimitException -@pytest.fixture( - name="prior" -) +@pytest.fixture(name="prior") def make_prior(): - return af.GaussianPrior( - mean=3.0, - sigma=5.0, - lower_limit=0.0 - ) + return af.GaussianPrior(mean=3.0, sigma=5.0, lower_limit=0.0) def test_intrinsic_lower_limit(prior): - with pytest.raises( - PriorLimitException - ): + with pytest.raises(PriorLimitException): prior.value_for(0.0) def test_optional(prior): - prior.value_for( - 0.0, - ignore_prior_limits=True - ) + prior.value_for(0.0, ignore_prior_limits=True) -@pytest.fixture( - name="model" -) +@pytest.fixture(name="model") def make_model(prior): - return af.Model( - af.Gaussian, - centre=prior - ) + return af.Model(af.Gaussian, centre=prior) def test_vector_from_unit_vector(model): - with pytest.raises( - PriorLimitException - ): - model.vector_from_unit_vector([ - 0, 0, 0 - ]) + with pytest.raises(PriorLimitException): + model.vector_from_unit_vector([0, 0, 0]) def test_vector_ignore_limits(model): - model.vector_from_unit_vector( - [0, 0, 0], - ignore_prior_limits=True - ) + model.vector_from_unit_vector([0, 0, 0], ignore_prior_limits=True) @pytest.mark.parametrize( @@ -61,45 +38,23 @@ def test_vector_ignore_limits(model): [ af.LogUniformPrior(), af.UniformPrior(), - af.GaussianPrior( - mean=0, - sigma=1, - lower_limit=0.0, - upper_limit=1.0, - ) - ] + af.GaussianPrior(mean=0, sigma=1, lower_limit=0.0, upper_limit=1.0,), + ], ) -@pytest.mark.parametrize( - "value", - [-1.0, 2.0] -) -def test_all_priors( - prior, - value -): - with pytest.raises( - PriorLimitException - ): +@pytest.mark.parametrize("value", [-1.0, 2.0]) +def test_all_priors(prior, value): + with pytest.raises(PriorLimitException): prior.value_for(value) - prior.value_for( - value, - ignore_prior_limits=True - ) + prior.value_for(value, ignore_prior_limits=True) @pytest.fixture(name="limitless_prior") def make_limitless_prior(): - return af.GaussianPrior( - mean=1.0, - sigma=2.0, - ) + return af.GaussianPrior(mean=1.0, sigma=2.0,) -@pytest.mark.parametrize( - "value", - np.arange(0, 1, 0.1) -) +@pytest.mark.parametrize("value", np.arange(0, 1, 0.1)) def test_invert_limits(value, limitless_prior): value = float(value) assert limitless_prior.message.cdf( @@ -108,28 +63,15 @@ def test_invert_limits(value, limitless_prior): def test_unit_limits(): - prior = af.GaussianPrior( - mean=1.0, - sigma=2.0, - lower_limit=-10, - upper_limit=5, - ) + prior = af.GaussianPrior(mean=1.0, sigma=2.0, lower_limit=-10, upper_limit=5,) EPSILON = 0.00001 - assert prior.value_for( - prior.lower_unit_limit - ) - assert prior.value_for( - prior.upper_unit_limit - EPSILON - ) + assert prior.value_for(prior.lower_unit_limit) + assert prior.value_for(prior.upper_unit_limit - EPSILON) with pytest.raises(PriorLimitException): - prior.value_for( - prior.lower_unit_limit - EPSILON - ) + prior.value_for(prior.lower_unit_limit - EPSILON) with pytest.raises(PriorLimitException): - prior.value_for( - prior.upper_unit_limit + EPSILON - ) + prior.value_for(prior.upper_unit_limit + EPSILON) def test_infinite_limits(limitless_prior): @@ -138,9 +80,6 @@ def test_infinite_limits(limitless_prior): def test_uniform_prior(): - uniform_prior = af.UniformPrior( - lower_limit=1.0, - upper_limit=2.0, - ) - assert uniform_prior.lower_unit_limit == 0.0 - assert uniform_prior.upper_unit_limit == 1.0 + uniform_prior = af.UniformPrior(lower_limit=1.0, upper_limit=2.0,) + assert uniform_prior.lower_unit_limit == pytest.approx(0.0) + assert uniform_prior.upper_unit_limit == pytest.approx(1.0) From 38fcaad841dc6946dd3d4bbcfc3e4e9548276afe Mon Sep 17 00:00:00 2001 From: Richard Date: Mon, 9 Jan 2023 14:50:13 +0000 Subject: [PATCH 139/226] running 10 separate analyses --- autofit/non_linear/abstract_search.py | 31 ++++++++++++++ test_autofit/conftest.py | 59 +++++++------------------- test_autofit/mapper/test_multi_pass.py | 14 ++++-- 3 files changed, 57 insertions(+), 47 deletions(-) diff --git a/autofit/non_linear/abstract_search.py b/autofit/non_linear/abstract_search.py index cb11c901f..ac75077cf 100644 --- a/autofit/non_linear/abstract_search.py +++ b/autofit/non_linear/abstract_search.py @@ -30,6 +30,7 @@ from autofit.non_linear.result import Result from autofit.non_linear.timer import Timer from .analysis import Analysis +from .analysis.indexed import IndexCollectionAnalysis from .paths.null import NullPaths from ..graphical.declarative.abstract import PriorFactor from ..graphical.expectation_propagation import AbstractFactorOptimiser @@ -445,6 +446,36 @@ def resample_figure_of_merit(self): """ return -np.inf + def fit_sequential( + self, + model, + analysis: IndexCollectionAnalysis, + info=None, + pickle_files=None, + log_likelihood_cap=None, + ): + results = [] + + _paths = self.paths + original_name = self.paths.name or "analysis" + + for i, (model, analysis) in enumerate( + zip(analysis.modify_model(model=model), analysis.analyses) + ): + self.paths = copy.copy(_paths) + self.paths.name = f"{original_name}/{i}" + results.append( + self.fit( + model=model, + analysis=analysis, + info=info, + pickle_files=pickle_files, + log_likelihood_cap=log_likelihood_cap, + ) + ) + self.paths = _paths + return results + def fit( self, model, diff --git a/test_autofit/conftest.py b/test_autofit/conftest.py index 4cb8a84ca..650dc80a4 100644 --- a/test_autofit/conftest.py +++ b/test_autofit/conftest.py @@ -13,37 +13,24 @@ from autofit import fixtures from autofit.database.model import sa -if sys.platform == 'darwin': - multiprocessing.set_start_method('forkserver') +if sys.platform == "darwin": + multiprocessing.set_start_method("forkserver") directory = Path(__file__).parent -@pytest.fixture( - name="test_directory", - scope="session" -) +@pytest.fixture(name="test_directory", scope="session") def make_test_directory(): return directory -@pytest.fixture( - name="output_directory", - scope="session" -) -def make_output_directory( - test_directory -): +@pytest.fixture(name="output_directory", scope="session") +def make_output_directory(test_directory): return test_directory / "output" -@pytest.fixture( - name="remove_output", - scope="session" -) -def make_remove_output( - output_directory -): +@pytest.fixture(name="remove_output", scope="session") +def make_remove_output(output_directory): def remove_output(): try: for item in os.listdir(output_directory): @@ -51,27 +38,18 @@ def remove_output(): item_path = output_directory / item if item_path.is_dir(): shutil.rmtree( - item_path, - ignore_errors=True, + item_path, ignore_errors=True, ) else: - os.remove( - item_path - ) - except FileExistsError: + os.remove(item_path) + except (FileExistsError, FileNotFoundError): pass return remove_output -@pytest.fixture( - autouse=True, - scope="session" -) -def do_remove_output( - output_directory, - remove_output -): +@pytest.fixture(autouse=True, scope="session") +def do_remove_output(output_directory, remove_output): yield remove_output() @@ -93,7 +71,7 @@ def make_plot_patch(monkeypatch): @pytest.fixture(name="session") def make_session(): - engine = sa.create_engine('sqlite://') + engine = sa.create_engine("sqlite://") session = sa.orm.sessionmaker(bind=engine)() db.Base.metadata.create_all(engine) yield session @@ -101,10 +79,7 @@ def make_session(): engine.dispose() -@pytest.fixture( - autouse=True, - scope="session" -) +@pytest.fixture(autouse=True, scope="session") def remove_logs(): yield for d, _, files in os.walk(directory): @@ -117,13 +92,11 @@ def remove_logs(): def set_config_path(): conf.instance.push( new_path=path.join(directory, "config"), - output_path=path.join(directory, "output") + output_path=path.join(directory, "output"), ) -@pytest.fixture( - name="model_gaussian_x1" -) +@pytest.fixture(name="model_gaussian_x1") def make_model_gaussian_x1(): return fixtures.make_model_gaussian_x1() diff --git a/test_autofit/mapper/test_multi_pass.py b/test_autofit/mapper/test_multi_pass.py index 83c9cf309..8c45eff4b 100644 --- a/test_autofit/mapper/test_multi_pass.py +++ b/test_autofit/mapper/test_multi_pass.py @@ -1,3 +1,5 @@ +import os +from pathlib import Path from random import random import autofit as af @@ -11,14 +13,18 @@ def log_likelihood_function(self, instance): def test_integration(): - search = af.LBFGS() - analysis = sum([Analysis() for _ in range(10)]) + search = af.LBFGS(name="test_lbfgs") model = af.Collection(gaussian=af.Gaussian) - result = search.fit(model=model, analysis=analysis) + n_analyses = 10 - print(result.model) + analysis = Analysis() + analysis = sum([analysis.with_model(model) for _ in range(n_analyses)]) + + result = search.fit_sequential(model=model, analysis=analysis) + + assert len(os.listdir(Path(str(search.paths)).parent)) == n_analyses def test_from_combined(): From f6aa30ec010a40669e416a171c79dc707f918ce3 Mon Sep 17 00:00:00 2001 From: Richard Date: Mon, 9 Jan 2023 14:50:47 +0000 Subject: [PATCH 140/226] removed other test --- test_autofit/mapper/test_multi_pass.py | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/test_autofit/mapper/test_multi_pass.py b/test_autofit/mapper/test_multi_pass.py index 8c45eff4b..af681fbab 100644 --- a/test_autofit/mapper/test_multi_pass.py +++ b/test_autofit/mapper/test_multi_pass.py @@ -22,12 +22,6 @@ def test_integration(): analysis = Analysis() analysis = sum([analysis.with_model(model) for _ in range(n_analyses)]) - result = search.fit_sequential(model=model, analysis=analysis) + search.fit_sequential(model=model, analysis=analysis) assert len(os.listdir(Path(str(search.paths)).parent)) == n_analyses - - -def test_from_combined(): - combined_result = CombinedResult( - [m.MockResult(model=af.Gaussian()) for _ in range(10)] - ) From 3f22d134fa05e4c89ab242e749e4d8e0abe50fce Mon Sep 17 00:00:00 2001 From: Richard Date: Mon, 9 Jan 2023 14:51:38 +0000 Subject: [PATCH 141/226] fit sequential test module name --- .../test_multi_pass.py => non_linear/test_fit_sequential.py} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename test_autofit/{mapper/test_multi_pass.py => non_linear/test_fit_sequential.py} (100%) diff --git a/test_autofit/mapper/test_multi_pass.py b/test_autofit/non_linear/test_fit_sequential.py similarity index 100% rename from test_autofit/mapper/test_multi_pass.py rename to test_autofit/non_linear/test_fit_sequential.py From ed0a4cfebb16f186686b2c46f7834f028d253155 Mon Sep 17 00:00:00 2001 From: Richard Date: Mon, 9 Jan 2023 15:11:41 +0000 Subject: [PATCH 142/226] handle many data single model case --- autofit/non_linear/abstract_search.py | 4 +++ .../non_linear/test_fit_sequential.py | 36 +++++++++++++------ 2 files changed, 30 insertions(+), 10 deletions(-) diff --git a/autofit/non_linear/abstract_search.py b/autofit/non_linear/abstract_search.py index ac75077cf..6e6142b6f 100644 --- a/autofit/non_linear/abstract_search.py +++ b/autofit/non_linear/abstract_search.py @@ -22,6 +22,7 @@ FactorApproximation, ) from autofit.graphical.utils import Status +from autofit.mapper.prior_model.collection import Collection from autofit.non_linear.initializer import Initializer from autofit.non_linear.parallel import SneakyPool from autofit.non_linear.paths.abstract import AbstractPaths @@ -459,6 +460,9 @@ def fit_sequential( _paths = self.paths original_name = self.paths.name or "analysis" + if not isinstance(model, Collection): + model = [model for _ in range(len(analysis.analyses))] + for i, (model, analysis) in enumerate( zip(analysis.modify_model(model=model), analysis.analyses) ): diff --git a/test_autofit/non_linear/test_fit_sequential.py b/test_autofit/non_linear/test_fit_sequential.py index af681fbab..526ac9ee4 100644 --- a/test_autofit/non_linear/test_fit_sequential.py +++ b/test_autofit/non_linear/test_fit_sequential.py @@ -2,9 +2,9 @@ from pathlib import Path from random import random +import pytest + import autofit as af -from autofit import mock as m -from autofit.non_linear.analysis.combined import CombinedResult class Analysis(af.Analysis): @@ -12,16 +12,32 @@ def log_likelihood_function(self, instance): return -random() -def test_integration(): - search = af.LBFGS(name="test_lbfgs") +@pytest.fixture(name="search") +def make_search(): + return af.LBFGS(name="test_lbfgs") + + +@pytest.fixture(name="model") +def make_model(): + return af.Model(af.Gaussian) + + +@pytest.fixture(name="analysis") +def make_analysis(): + return Analysis() + + +def test_with_model(analysis, model, search): + combined_analysis = sum([analysis.with_model(model) for _ in range(10)]) + + search.fit_sequential(model=model, analysis=combined_analysis) - model = af.Collection(gaussian=af.Gaussian) + assert len(os.listdir(Path(str(search.paths)).parent)) == 10 - n_analyses = 10 - analysis = Analysis() - analysis = sum([analysis.with_model(model) for _ in range(n_analyses)]) +def test_combined_analysis(analysis, model, search): + combined_analysis = sum([analysis for _ in range(10)]) - search.fit_sequential(model=model, analysis=analysis) + search.fit_sequential(model=model, analysis=combined_analysis) - assert len(os.listdir(Path(str(search.paths)).parent)) == n_analyses + assert len(os.listdir(Path(str(search.paths)).parent)) == 10 From 6bbd023d534ae1bf3580a76069776578d31963e3 Mon Sep 17 00:00:00 2001 From: Richard Date: Mon, 9 Jan 2023 15:21:09 +0000 Subject: [PATCH 143/226] support free parameter analysis/fix --- autofit/non_linear/abstract_search.py | 5 ++--- test_autofit/non_linear/test_fit_sequential.py | 16 ++++++++++++++-- 2 files changed, 16 insertions(+), 5 deletions(-) diff --git a/autofit/non_linear/abstract_search.py b/autofit/non_linear/abstract_search.py index 6e6142b6f..477df1964 100644 --- a/autofit/non_linear/abstract_search.py +++ b/autofit/non_linear/abstract_search.py @@ -460,12 +460,11 @@ def fit_sequential( _paths = self.paths original_name = self.paths.name or "analysis" + model = analysis.modify_model(model=model) if not isinstance(model, Collection): model = [model for _ in range(len(analysis.analyses))] - for i, (model, analysis) in enumerate( - zip(analysis.modify_model(model=model), analysis.analyses) - ): + for i, (model, analysis) in enumerate(zip(model, analysis.analyses)): self.paths = copy.copy(_paths) self.paths.name = f"{original_name}/{i}" results.append( diff --git a/test_autofit/non_linear/test_fit_sequential.py b/test_autofit/non_linear/test_fit_sequential.py index 526ac9ee4..13ec5796a 100644 --- a/test_autofit/non_linear/test_fit_sequential.py +++ b/test_autofit/non_linear/test_fit_sequential.py @@ -35,9 +35,21 @@ def test_with_model(analysis, model, search): assert len(os.listdir(Path(str(search.paths)).parent)) == 10 -def test_combined_analysis(analysis, model, search): - combined_analysis = sum([analysis for _ in range(10)]) +@pytest.fixture(name="combined_analysis") +def make_combined_analysis(analysis): + return sum([analysis for _ in range(10)]) + +def test_combined_analysis(combined_analysis, model, search): search.fit_sequential(model=model, analysis=combined_analysis) assert len(os.listdir(Path(str(search.paths)).parent)) == 10 + + +def test_with_free_parameter(combined_analysis, model, search): + combined_analysis = combined_analysis.with_free_parameters([model.centre]) + search.fit_sequential( + model=model, analysis=combined_analysis, + ) + + assert len(os.listdir(Path(str(search.paths)).parent)) == 10 From 4cd89932db5f190f0909b36e4ac8a7db75590bb4 Mon Sep 17 00:00:00 2001 From: Richard Date: Mon, 9 Jan 2023 15:28:13 +0000 Subject: [PATCH 144/226] return combined results/improve tests --- autofit/non_linear/abstract_search.py | 3 ++- autofit/non_linear/analysis/combined.py | 6 +++++- test_autofit/non_linear/test_fit_sequential.py | 13 +++++++++---- 3 files changed, 16 insertions(+), 6 deletions(-) diff --git a/autofit/non_linear/abstract_search.py b/autofit/non_linear/abstract_search.py index 477df1964..ee168414f 100644 --- a/autofit/non_linear/abstract_search.py +++ b/autofit/non_linear/abstract_search.py @@ -31,6 +31,7 @@ from autofit.non_linear.result import Result from autofit.non_linear.timer import Timer from .analysis import Analysis +from .analysis.combined import CombinedResult from .analysis.indexed import IndexCollectionAnalysis from .paths.null import NullPaths from ..graphical.declarative.abstract import PriorFactor @@ -477,7 +478,7 @@ def fit_sequential( ) ) self.paths = _paths - return results + return CombinedResult(results) def fit( self, diff --git a/autofit/non_linear/analysis/combined.py b/autofit/non_linear/analysis/combined.py index e41102fbb..93d8ed371 100644 --- a/autofit/non_linear/analysis/combined.py +++ b/autofit/non_linear/analysis/combined.py @@ -213,7 +213,11 @@ def func(child_paths, analysis): def make_result(self, samples, model, sigma=1.0, use_errors=True, use_widths=False): child_results = [ analysis.make_result( - samples, model, sigma=1.0, use_errors=True, use_widths=False + samples, + model, + sigma=sigma, + use_errors=use_errors, + use_widths=use_widths, ) for analysis in self.analyses ] diff --git a/test_autofit/non_linear/test_fit_sequential.py b/test_autofit/non_linear/test_fit_sequential.py index 13ec5796a..3dcd8b94d 100644 --- a/test_autofit/non_linear/test_fit_sequential.py +++ b/test_autofit/non_linear/test_fit_sequential.py @@ -27,12 +27,17 @@ def make_analysis(): return Analysis() +def count_output(paths): + return len(os.listdir(Path(str(paths)).parent)) + + def test_with_model(analysis, model, search): combined_analysis = sum([analysis.with_model(model) for _ in range(10)]) - search.fit_sequential(model=model, analysis=combined_analysis) + result = search.fit_sequential(model=model, analysis=combined_analysis) - assert len(os.listdir(Path(str(search.paths)).parent)) == 10 + assert count_output(search.paths) == 10 + assert len(result.child_results) == 10 @pytest.fixture(name="combined_analysis") @@ -43,7 +48,7 @@ def make_combined_analysis(analysis): def test_combined_analysis(combined_analysis, model, search): search.fit_sequential(model=model, analysis=combined_analysis) - assert len(os.listdir(Path(str(search.paths)).parent)) == 10 + assert count_output(search.paths) == 10 def test_with_free_parameter(combined_analysis, model, search): @@ -52,4 +57,4 @@ def test_with_free_parameter(combined_analysis, model, search): model=model, analysis=combined_analysis, ) - assert len(os.listdir(Path(str(search.paths)).parent)) == 10 + assert count_output(search.paths) == 10 From 7c8c1d3392d528c98ca27c52cf174c1396a2bad6 Mon Sep 17 00:00:00 2001 From: Richard Date: Mon, 9 Jan 2023 15:32:28 +0000 Subject: [PATCH 145/226] raise exception for normal analysis --- autofit/non_linear/abstract_search.py | 10 ++++++++-- test_autofit/non_linear/test_fit_sequential.py | 6 ++++++ 2 files changed, 14 insertions(+), 2 deletions(-) diff --git a/autofit/non_linear/abstract_search.py b/autofit/non_linear/abstract_search.py index ee168414f..e6846791f 100644 --- a/autofit/non_linear/abstract_search.py +++ b/autofit/non_linear/abstract_search.py @@ -462,8 +462,14 @@ def fit_sequential( original_name = self.paths.name or "analysis" model = analysis.modify_model(model=model) - if not isinstance(model, Collection): - model = [model for _ in range(len(analysis.analyses))] + + try: + if not isinstance(model, Collection): + model = [model for _ in range(len(analysis.analyses))] + except AttributeError: + raise ValueError( + f"Analysis with type {type(analysis)} is not supported by fit_sequential" + ) for i, (model, analysis) in enumerate(zip(model, analysis.analyses)): self.paths = copy.copy(_paths) diff --git a/test_autofit/non_linear/test_fit_sequential.py b/test_autofit/non_linear/test_fit_sequential.py index 3dcd8b94d..df8ed131c 100644 --- a/test_autofit/non_linear/test_fit_sequential.py +++ b/test_autofit/non_linear/test_fit_sequential.py @@ -5,6 +5,7 @@ import pytest import autofit as af +from autofit.non_linear.analysis.combined import CombinedResult class Analysis(af.Analysis): @@ -58,3 +59,8 @@ def test_with_free_parameter(combined_analysis, model, search): ) assert count_output(search.paths) == 10 + + +def test_singular_analysis(analysis, model, search): + with pytest.raises(ValueError): + search.fit_sequential(model=model, analysis=analysis) From a0bbe5e9c4fbe7946a956646581e2220a88f8c00 Mon Sep 17 00:00:00 2001 From: Richard Date: Mon, 9 Jan 2023 15:35:05 +0000 Subject: [PATCH 146/226] docs --- autofit/non_linear/abstract_search.py | 33 ++++++++++++++++++++++++++- 1 file changed, 32 insertions(+), 1 deletion(-) diff --git a/autofit/non_linear/abstract_search.py b/autofit/non_linear/abstract_search.py index e6846791f..d2a163712 100644 --- a/autofit/non_linear/abstract_search.py +++ b/autofit/non_linear/abstract_search.py @@ -455,7 +455,38 @@ def fit_sequential( info=None, pickle_files=None, log_likelihood_cap=None, - ): + ) -> CombinedResult: + """ + Fit multiple analyses contained within the analysis sequentially. + + This can be useful for avoiding very high dimensional parameter spaces. + + Parameters + ---------- + log_likelihood_cap + analysis + Multiple analyses that are fit sequentially + model + An object that represents possible instances of some model with a + given dimensionality which is the number of free dimensions of the + model. + info + Optional dictionary containing information about the fit that can be loaded by the aggregator. + pickle_files : [str] + Optional list of strings specifying the path and filename of .pickle files, that are copied to each + model-fits pickles folder so they are accessible via the Aggregator. + + Returns + ------- + An object combining the results of each individual optimisation. + + Raises + ------ + AssertionError + If the model has 0 dimensions. + ValueError + If the analysis is not a combined analysis + """ results = [] _paths = self.paths From 57631095c5d3ee6c9be745539986c4b168e01c1b Mon Sep 17 00:00:00 2001 From: Richard Date: Mon, 9 Jan 2023 15:54:00 +0000 Subject: [PATCH 147/226] uncommented test --- .../regression/test_linear_regression.py | 24 +++++++++---------- 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/test_autofit/graphical/regression/test_linear_regression.py b/test_autofit/graphical/regression/test_linear_regression.py index 3b6b06042..ce8b73a9a 100644 --- a/test_autofit/graphical/regression/test_linear_regression.py +++ b/test_autofit/graphical/regression/test_linear_regression.py @@ -119,18 +119,18 @@ def test_laplace( check_model_approx(mean_field, a_, b_, z_, x_, y_) -# def test_parallel_laplace( -# model_approx, a_, b_, x_, y_, z_, -# ): -# laplace = LaplaceOptimiser() -# opt = ParallelEPOptimiser( -# model_approx.factor_graph, -# n_cores=len(model_approx.factors) + 1, -# default_optimiser=laplace, -# ) -# model_approx = opt.run(model_approx) -# mean_field = model_approx.mean_field -# check_model_approx(mean_field, a_, b_, z_, x_, y_) +def test_parallel_laplace( + model_approx, a_, b_, x_, y_, z_, +): + laplace = LaplaceOptimiser() + opt = ParallelEPOptimiser( + model_approx.factor_graph, + n_cores=len(model_approx.factors) + 1, + default_optimiser=laplace, + ) + model_approx = opt.run(model_approx) + mean_field = model_approx.mean_field + check_model_approx(mean_field, a_, b_, z_, x_, y_) def _test_laplace_jac( From 9b75a1ccd38c6de308bff5f9b6c862faafcd8e7a Mon Sep 17 00:00:00 2001 From: Richard Date: Mon, 9 Jan 2023 16:11:15 +0000 Subject: [PATCH 148/226] remove default root file handler --- autofit/config/logging.yaml | 5 ----- 1 file changed, 5 deletions(-) diff --git a/autofit/config/logging.yaml b/autofit/config/logging.yaml index 18acfbae3..9897e4d68 100644 --- a/autofit/config/logging.yaml +++ b/autofit/config/logging.yaml @@ -7,11 +7,6 @@ handlers: level: INFO stream: ext://sys.stdout formatter: formatter - file: - class: logging.FileHandler - level: INFO - filename: root.log - formatter: formatter root: level: INFO From 888728d50cbec3f94d9888f5e060dce955397c43 Mon Sep 17 00:00:00 2001 From: Jammy2211 Date: Tue, 10 Jan 2023 08:41:21 +0000 Subject: [PATCH 149/226] change all to wrappers in plot dynesty --- autofit/non_linear/nest/dynesty/plotter.py | 91 ++++++++++------------ 1 file changed, 43 insertions(+), 48 deletions(-) diff --git a/autofit/non_linear/nest/dynesty/plotter.py b/autofit/non_linear/nest/dynesty/plotter.py index 392d7d436..0bb96294f 100644 --- a/autofit/non_linear/nest/dynesty/plotter.py +++ b/autofit/non_linear/nest/dynesty/plotter.py @@ -1,4 +1,5 @@ from dynesty import plotting as dyplot +from functools import wraps import logging from autofit.plot import SamplesPlotter @@ -6,6 +7,18 @@ logger = logging.getLogger(__name__) +def log_value_error(func): + + @wraps(func) + def wrapper(self, *args, **kwargs): + + try: + return func(self, *args, **kwargs) + except ValueError: + self.log_plot_exception(func.__name__) + + return wrapper + class DynestyPlotter(SamplesPlotter): @staticmethod @@ -70,49 +83,41 @@ def cornerbound(self, **kwargs): self.close() @skip_plot_in_test_mode + @log_value_error def cornerplot(self, **kwargs): """ Plots the in-built ``dynesty`` plot ``cornerplot``. This figure plots a corner plot of the 1-D and 2-D marginalized posteriors. """ - try: - - dyplot.cornerplot( - results=self.samples.results_internal, - labels=self.model.parameter_labels_with_superscripts_latex, - **kwargs - ) - - self.output.to_figure(structure=None, auto_filename="cornerplot") - self.close() - - except ValueError: + dyplot.cornerplot( + results=self.samples.results_internal, + labels=self.model.parameter_labels_with_superscripts_latex, + **kwargs + ) - self.log_plot_exception(plot_name="cornerplot") + self.output.to_figure(structure=None, auto_filename="cornerplot") + self.close() @skip_plot_in_test_mode + @log_value_error def cornerpoints(self, **kwargs): """ Plots the in-built ``dynesty`` plot ``cornerpoints``. This figure plots a (sub-)corner plot of (weighted) samples. """ - try: - dyplot.cornerpoints( - results=self.samples.results_internal, - labels=self.model.parameter_labels_with_superscripts_latex, - **kwargs - ) - - self.output.to_figure(structure=None, auto_filename="cornerpoints") - self.close() - - except ValueError: + dyplot.cornerpoints( + results=self.samples.results_internal, + labels=self.model.parameter_labels_with_superscripts_latex, + **kwargs + ) - self.log_plot_exception(plot_name="cornerpoints") + self.output.to_figure(structure=None, auto_filename="cornerpoints") + self.close() @skip_plot_in_test_mode + @log_value_error def runplot(self, **kwargs): """ Plots the in-built ``dynesty`` plot ``runplot``. @@ -120,36 +125,26 @@ def runplot(self, **kwargs): This figure plots live points, ln(likelihood), ln(weight), and ln(evidence) as a function of ln(prior volume). """ - try: - dyplot.runplot( - results=self.samples.results_internal, - **kwargs - ) - - self.output.to_figure(structure=None, auto_filename="runplot") - self.close() - - except ValueError: + dyplot.runplot( + results=self.samples.results_internal, + **kwargs + ) - self.log_plot_exception(plot_name="runplot") + self.output.to_figure(structure=None, auto_filename="runplot") + self.close() @skip_plot_in_test_mode + @log_value_error def traceplot(self, **kwargs): """ Plots the in-built ``dynesty`` plot ``traceplot``. This figure plots traces and marginalized posteriors for each parameter. """ - try: - - dyplot.traceplot( - results=self.samples.results_internal, - **kwargs - ) - - self.output.to_figure(structure=None, auto_filename="traceplot") - self.close() - - except ValueError: + dyplot.traceplot( + results=self.samples.results_internal, + **kwargs + ) - self.log_plot_exception(plot_name="traceplot") \ No newline at end of file + self.output.to_figure(structure=None, auto_filename="traceplot") + self.close() \ No newline at end of file From 1182f4f8473e5b0222089772e5e4e240e1bb2f19 Mon Sep 17 00:00:00 2001 From: Jammy2211 Date: Tue, 10 Jan 2023 08:49:06 +0000 Subject: [PATCH 150/226] review --- autofit/non_linear/abstract_search.py | 7 +++++-- autofit/non_linear/grid/grid_search/job.py | 2 +- autofit/non_linear/paths/abstract.py | 2 +- 3 files changed, 7 insertions(+), 4 deletions(-) diff --git a/autofit/non_linear/abstract_search.py b/autofit/non_linear/abstract_search.py index 9807556cb..918235450 100644 --- a/autofit/non_linear/abstract_search.py +++ b/autofit/non_linear/abstract_search.py @@ -452,7 +452,7 @@ def fit( info=None, pickle_files=None, log_likelihood_cap=None, - bypass_nuclear : bool = False + bypass_nuclear_if_on : bool = False ) -> Union["Result", List["Result"]]: """ Fit a model, M with some function f that takes instances of the @@ -478,6 +478,9 @@ class represented by model M and gives a score for their fitness. pickle_files : [str] Optional list of strings specifying the path and filename of .pickle files, that are copied to each model-fits pickles folder so they are accessible via the Aggregator. + bypass_nuclear_if_on + If nuclear mode is on (environment variable "PYAUTOFIT_NUCLEAR_MODE=1") passing this as True will + bypass it. Returns ------- @@ -572,7 +575,7 @@ class represented by model M and gives a score for their fitness. self.logger.info("Removing zip file") self.paths.zip_remove() - if not bypass_nuclear: + if not bypass_nuclear_if_on: self.paths.zip_remove_nuclear() return result diff --git a/autofit/non_linear/grid/grid_search/job.py b/autofit/non_linear/grid/grid_search/job.py index 72035f071..0172010d9 100644 --- a/autofit/non_linear/grid/grid_search/job.py +++ b/autofit/non_linear/grid/grid_search/job.py @@ -49,7 +49,7 @@ def perform(self): model=self.model, analysis=self.analysis, info=self.info, - bypass_nuclear=True + bypass_nuclear_if_on=True ) result_list_row = [ self.index, diff --git a/autofit/non_linear/paths/abstract.py b/autofit/non_linear/paths/abstract.py index 1447325d8..d77496181 100644 --- a/autofit/non_linear/paths/abstract.py +++ b/autofit/non_linear/paths/abstract.py @@ -272,7 +272,7 @@ def zip_remove_nuclear(self): To enable nuclear mode, set the environment variable ``PYAUTOFIT_NUCLEAR_MODE=1``. - Nuclear model is dangerous, and must be used with CAUTION AND CARE! + Nuclear mode is dangerous, and must be used with CAUTION AND CARE! """ if os.environ.get("PYAUTOFIT_NUCLEAR_MODE") == "1": From d0f97c8a4ec9a4503166242a9f5894e2ead53f6d Mon Sep 17 00:00:00 2001 From: Jammy2211 Date: Sat, 14 Jan 2023 15:26:24 +0000 Subject: [PATCH 151/226] bump to 2.5.4 --- optional_requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/optional_requirements.txt b/optional_requirements.txt index 529a59a5f..a3ccbe750 100644 --- a/optional_requirements.txt +++ b/optional_requirements.txt @@ -2,4 +2,4 @@ getdist==1.4 jax==0.3.1 jaxlib==0.3.0 ultranest==3.5.5 -zeus-mcmc==2.4.1 +zeus-mcmc==2.5.4 From 3ae0412b819eb239f160551fccfebdba9f6cbfe3 Mon Sep 17 00:00:00 2001 From: Jammy2211 Date: Sat, 14 Jan 2023 15:54:42 +0000 Subject: [PATCH 152/226] add description of sequential searches --- docs/overview/multi_datasets.rst | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/docs/overview/multi_datasets.rst b/docs/overview/multi_datasets.rst index 441664585..4bc2d3e60 100644 --- a/docs/overview/multi_datasets.rst +++ b/docs/overview/multi_datasets.rst @@ -189,6 +189,23 @@ We can again fit this model as per usual: result_list = search.fit(model=model, analysis=analysis) +Individual Sequential Searches +------------------------------ + +The API above is used to create a model with free parameters across ``Analysis`` objects, which are all fit +simultaneously using a summed ``log_likelihood_function`` and single non-linear search. + +Each ``Analysis`` can be fitted one-by-one, using a series of multiple non-linear searches, using +the ``fit_sequential`` method: + +.. code-block:: python + + result_list = search.fit_sequential(model=model, analysis=analysis) + +The benefit of this method is for complex high dimensionality models (e.g. when many parameters are passed +to `` analysis.with_free_parameters``, it breaks the fit down into a series of lower dimensionality non-linear +searches that may convergence on a solution more reliably. + Variable Model With Relationships --------------------------------- From fe915e9251294231908f62b67bc905e44f18c1a3 Mon Sep 17 00:00:00 2001 From: James Nightingale Date: Sat, 14 Jan 2023 16:00:38 +0000 Subject: [PATCH 153/226] Revert "feature/fit sequential" --- autofit/non_linear/abstract_search.py | 72 ------------ autofit/non_linear/analysis/combined.py | 26 +---- autofit/non_linear/analysis/indexed.py | 12 +- autofit/non_linear/initializer.py | 7 +- test_autofit/analysis/test_free_parameter.py | 106 +++++++++++++----- test_autofit/conftest.py | 59 +++++++--- .../non_linear/test_fit_sequential.py | 66 ----------- 7 files changed, 140 insertions(+), 208 deletions(-) delete mode 100644 test_autofit/non_linear/test_fit_sequential.py diff --git a/autofit/non_linear/abstract_search.py b/autofit/non_linear/abstract_search.py index 37d319311..1411e67dc 100644 --- a/autofit/non_linear/abstract_search.py +++ b/autofit/non_linear/abstract_search.py @@ -22,7 +22,6 @@ FactorApproximation, ) from autofit.graphical.utils import Status -from autofit.mapper.prior_model.collection import Collection from autofit.non_linear.initializer import Initializer from autofit.non_linear.parallel import SneakyPool from autofit.non_linear.paths.abstract import AbstractPaths @@ -31,8 +30,6 @@ from autofit.non_linear.result import Result from autofit.non_linear.timer import Timer from .analysis import Analysis -from .analysis.combined import CombinedResult -from .analysis.indexed import IndexCollectionAnalysis from .paths.null import NullPaths from ..graphical.declarative.abstract import PriorFactor from ..graphical.expectation_propagation import AbstractFactorOptimiser @@ -448,75 +445,6 @@ def resample_figure_of_merit(self): """ return -np.inf - def fit_sequential( - self, - model, - analysis: IndexCollectionAnalysis, - info=None, - pickle_files=None, - log_likelihood_cap=None, - ) -> CombinedResult: - """ - Fit multiple analyses contained within the analysis sequentially. - - This can be useful for avoiding very high dimensional parameter spaces. - - Parameters - ---------- - log_likelihood_cap - analysis - Multiple analyses that are fit sequentially - model - An object that represents possible instances of some model with a - given dimensionality which is the number of free dimensions of the - model. - info - Optional dictionary containing information about the fit that can be loaded by the aggregator. - pickle_files : [str] - Optional list of strings specifying the path and filename of .pickle files, that are copied to each - model-fits pickles folder so they are accessible via the Aggregator. - - Returns - ------- - An object combining the results of each individual optimisation. - - Raises - ------ - AssertionError - If the model has 0 dimensions. - ValueError - If the analysis is not a combined analysis - """ - results = [] - - _paths = self.paths - original_name = self.paths.name or "analysis" - - model = analysis.modify_model(model=model) - - try: - if not isinstance(model, Collection): - model = [model for _ in range(len(analysis.analyses))] - except AttributeError: - raise ValueError( - f"Analysis with type {type(analysis)} is not supported by fit_sequential" - ) - - for i, (model, analysis) in enumerate(zip(model, analysis.analyses)): - self.paths = copy.copy(_paths) - self.paths.name = f"{original_name}/{i}" - results.append( - self.fit( - model=model, - analysis=analysis, - info=info, - pickle_files=pickle_files, - log_likelihood_cap=log_likelihood_cap, - ) - ) - self.paths = _paths - return CombinedResult(results) - def fit( self, model, diff --git a/autofit/non_linear/analysis/combined.py b/autofit/non_linear/analysis/combined.py index 93d8ed371..dcdff72d9 100644 --- a/autofit/non_linear/analysis/combined.py +++ b/autofit/non_linear/analysis/combined.py @@ -13,20 +13,6 @@ logger = logging.getLogger(__name__) -class CombinedResult: - def __init__(self, results): - self.child_results = results - - def __getattr__(self, item): - return getattr(self.child_results[0], item) - - def __iter__(self): - return iter(self.child_results) - - def __len__(self): - return len(self.child_results) - - class CombinedAnalysis(Analysis): def __new__(cls, *analyses, **kwargs): from .model_analysis import ModelAnalysis, CombinedModelAnalysis @@ -213,15 +199,15 @@ def func(child_paths, analysis): def make_result(self, samples, model, sigma=1.0, use_errors=True, use_widths=False): child_results = [ analysis.make_result( - samples, - model, - sigma=sigma, - use_errors=use_errors, - use_widths=use_widths, + samples, model, sigma=1.0, use_errors=True, use_widths=False ) for analysis in self.analyses ] - return CombinedResult(child_results) + result = self.analyses[0].make_result( + samples=samples, model=model, sigma=1.0, use_errors=True, use_widths=False + ) + result.child_results = child_results + return result def __len__(self): return len(self.analyses) diff --git a/autofit/non_linear/analysis/indexed.py b/autofit/non_linear/analysis/indexed.py index da7f9200d..fd56a148f 100644 --- a/autofit/non_linear/analysis/indexed.py +++ b/autofit/non_linear/analysis/indexed.py @@ -1,7 +1,7 @@ import logging from .analysis import Analysis -from .combined import CombinedAnalysis, CombinedResult +from .combined import CombinedAnalysis from ..paths.abstract import AbstractPaths from autofit.mapper.prior_model.collection import Collection @@ -80,7 +80,15 @@ def make_result(self, samples, model, sigma=1.0, use_errors=True, use_widths=Fal ) for model, analysis in zip(model, self.analyses) ] - return CombinedResult(child_results) + result = self.analyses[0].make_result( + samples=samples, + model=model, + sigma=sigma, + use_errors=use_errors, + use_widths=use_widths, + ) + result.child_results = child_results + return result def modify_before_fit(self, paths: AbstractPaths, model: Collection): """ diff --git a/autofit/non_linear/initializer.py b/autofit/non_linear/initializer.py index 6c15840e4..3b118b0d0 100644 --- a/autofit/non_linear/initializer.py +++ b/autofit/non_linear/initializer.py @@ -1,10 +1,9 @@ import configparser import logging -import os import random from abc import ABC, abstractmethod from typing import Dict, Tuple, List - +import os import numpy as np from autofit import exc @@ -82,9 +81,7 @@ def samples_from_model( except exc.FitException: pass - if total_points > 1 and np.allclose( - a=figures_of_merit_list[0], b=figures_of_merit_list[1:] - ): + if np.allclose(a=figures_of_merit_list[0], b=figures_of_merit_list[1:]): raise exc.InitializerException( """ The initial samples all have the same figure of merit (e.g. log likelihood values). diff --git a/test_autofit/analysis/test_free_parameter.py b/test_autofit/analysis/test_free_parameter.py index 80cc6c1f4..3dc430541 100644 --- a/test_autofit/analysis/test_free_parameter.py +++ b/test_autofit/analysis/test_free_parameter.py @@ -14,13 +14,13 @@ def test_copy(): assert collection.prior_count == model.prior_count -def test_log_likelihood(modified, combined_analysis): - assert ( - combined_analysis.log_likelihood_function( - modified.instance_from_prior_medians() - ) - == 2 - ) +def test_log_likelihood( + modified, + combined_analysis +): + assert combined_analysis.log_likelihood_function( + modified.instance_from_prior_medians() + ) == 2 def test_analyses_example(Analysis): @@ -33,59 +33,98 @@ def test_analyses_example(Analysis): ]: copy = model.copy() copy.centre = prior - analyses.append(Analysis()) + analyses.append( + Analysis() + ) -@pytest.fixture(name="combined_analysis") +@pytest.fixture( + name="combined_analysis" +) def make_combined_analysis(model, Analysis): - return (Analysis() + Analysis()).with_free_parameters(model.centre) + return (Analysis() + Analysis()).with_free_parameters( + model.centre + ) def test_multiple_free_parameters(model, Analysis): combined_analysis = (Analysis() + Analysis()).with_free_parameters( - model.centre, model.sigma + model.centre, + model.sigma ) first, second = combined_analysis.modify_model(model) assert first.centre is not second.centre assert first.sigma is not second.sigma -def test_add_free_parameter(combined_analysis): - assert isinstance(combined_analysis, FreeParameterAnalysis) +def test_add_free_parameter( + combined_analysis +): + assert isinstance( + combined_analysis, + FreeParameterAnalysis + ) -@pytest.fixture(name="modified") -def make_modified(model, combined_analysis): +@pytest.fixture( + name="modified" +) +def make_modified( + model, + combined_analysis +): return combined_analysis.modify_model(model) -def test_modify_model(modified): +def test_modify_model( + modified +): assert isinstance(modified, af.Collection) assert len(modified) == 2 -def test_modified_models(modified): +def test_modified_models( + modified +): first, second = modified - assert isinstance(first.sigma, af.Prior) + assert isinstance( + first.sigma, + af.Prior + ) assert first.sigma == second.sigma assert first.centre != second.centre -@pytest.fixture(name="result") +@pytest.fixture( + name="result" +) def make_result( - combined_analysis, model, + combined_analysis, + model, ): optimizer = MockOptimizer() - return optimizer.fit(model, combined_analysis) + return optimizer.fit( + model, + combined_analysis + ) -@pytest.fixture(autouse=True) +@pytest.fixture( + autouse=True +) def do_remove_output(remove_output): yield remove_output() +def test_result_type(result, Result): + assert isinstance(result, Result) + + for result_ in result: + assert isinstance(result_, Result) + + def test_integration(result): result_1, result_2 = result @@ -94,16 +133,24 @@ def test_integration(result): def test_tuple_prior(model, Analysis): - model.centre = af.TuplePrior(centre_0=af.UniformPrior()) - combined = (Analysis() + Analysis()).with_free_parameters(model.centre) + model.centre = af.TuplePrior( + centre_0=af.UniformPrior() + ) + combined = (Analysis() + Analysis()).with_free_parameters( + model.centre + ) first, second = combined.modify_model(model) assert first.centre.centre_0 != second.centre.centre_0 def test_prior_model(model, Analysis): - model = af.Collection(model=model) - combined = (Analysis() + Analysis()).with_free_parameters(model.model) + model = af.Collection( + model=model + ) + combined = (Analysis() + Analysis()).with_free_parameters( + model.model + ) modified = combined.modify_model(model) first = modified[0].model second = modified[1].model @@ -115,7 +162,12 @@ def test_prior_model(model, Analysis): def test_split_samples(modified): samples = af.Samples( - modified, af.Sample.from_lists(modified, [[1, 2, 3, 4]], [1], [1], [1]), + modified, + af.Sample.from_lists( + modified, + [[1, 2, 3, 4]], + [1], [1], [1] + ), ) combined = samples.max_log_likelihood() diff --git a/test_autofit/conftest.py b/test_autofit/conftest.py index 650dc80a4..4cb8a84ca 100644 --- a/test_autofit/conftest.py +++ b/test_autofit/conftest.py @@ -13,24 +13,37 @@ from autofit import fixtures from autofit.database.model import sa -if sys.platform == "darwin": - multiprocessing.set_start_method("forkserver") +if sys.platform == 'darwin': + multiprocessing.set_start_method('forkserver') directory = Path(__file__).parent -@pytest.fixture(name="test_directory", scope="session") +@pytest.fixture( + name="test_directory", + scope="session" +) def make_test_directory(): return directory -@pytest.fixture(name="output_directory", scope="session") -def make_output_directory(test_directory): +@pytest.fixture( + name="output_directory", + scope="session" +) +def make_output_directory( + test_directory +): return test_directory / "output" -@pytest.fixture(name="remove_output", scope="session") -def make_remove_output(output_directory): +@pytest.fixture( + name="remove_output", + scope="session" +) +def make_remove_output( + output_directory +): def remove_output(): try: for item in os.listdir(output_directory): @@ -38,18 +51,27 @@ def remove_output(): item_path = output_directory / item if item_path.is_dir(): shutil.rmtree( - item_path, ignore_errors=True, + item_path, + ignore_errors=True, ) else: - os.remove(item_path) - except (FileExistsError, FileNotFoundError): + os.remove( + item_path + ) + except FileExistsError: pass return remove_output -@pytest.fixture(autouse=True, scope="session") -def do_remove_output(output_directory, remove_output): +@pytest.fixture( + autouse=True, + scope="session" +) +def do_remove_output( + output_directory, + remove_output +): yield remove_output() @@ -71,7 +93,7 @@ def make_plot_patch(monkeypatch): @pytest.fixture(name="session") def make_session(): - engine = sa.create_engine("sqlite://") + engine = sa.create_engine('sqlite://') session = sa.orm.sessionmaker(bind=engine)() db.Base.metadata.create_all(engine) yield session @@ -79,7 +101,10 @@ def make_session(): engine.dispose() -@pytest.fixture(autouse=True, scope="session") +@pytest.fixture( + autouse=True, + scope="session" +) def remove_logs(): yield for d, _, files in os.walk(directory): @@ -92,11 +117,13 @@ def remove_logs(): def set_config_path(): conf.instance.push( new_path=path.join(directory, "config"), - output_path=path.join(directory, "output"), + output_path=path.join(directory, "output") ) -@pytest.fixture(name="model_gaussian_x1") +@pytest.fixture( + name="model_gaussian_x1" +) def make_model_gaussian_x1(): return fixtures.make_model_gaussian_x1() diff --git a/test_autofit/non_linear/test_fit_sequential.py b/test_autofit/non_linear/test_fit_sequential.py deleted file mode 100644 index df8ed131c..000000000 --- a/test_autofit/non_linear/test_fit_sequential.py +++ /dev/null @@ -1,66 +0,0 @@ -import os -from pathlib import Path -from random import random - -import pytest - -import autofit as af -from autofit.non_linear.analysis.combined import CombinedResult - - -class Analysis(af.Analysis): - def log_likelihood_function(self, instance): - return -random() - - -@pytest.fixture(name="search") -def make_search(): - return af.LBFGS(name="test_lbfgs") - - -@pytest.fixture(name="model") -def make_model(): - return af.Model(af.Gaussian) - - -@pytest.fixture(name="analysis") -def make_analysis(): - return Analysis() - - -def count_output(paths): - return len(os.listdir(Path(str(paths)).parent)) - - -def test_with_model(analysis, model, search): - combined_analysis = sum([analysis.with_model(model) for _ in range(10)]) - - result = search.fit_sequential(model=model, analysis=combined_analysis) - - assert count_output(search.paths) == 10 - assert len(result.child_results) == 10 - - -@pytest.fixture(name="combined_analysis") -def make_combined_analysis(analysis): - return sum([analysis for _ in range(10)]) - - -def test_combined_analysis(combined_analysis, model, search): - search.fit_sequential(model=model, analysis=combined_analysis) - - assert count_output(search.paths) == 10 - - -def test_with_free_parameter(combined_analysis, model, search): - combined_analysis = combined_analysis.with_free_parameters([model.centre]) - search.fit_sequential( - model=model, analysis=combined_analysis, - ) - - assert count_output(search.paths) == 10 - - -def test_singular_analysis(analysis, model, search): - with pytest.raises(ValueError): - search.fit_sequential(model=model, analysis=analysis) From 7b4e8956a588da782ed9d2d7dbcb722405d78f36 Mon Sep 17 00:00:00 2001 From: Jammy2211 Date: Sun, 15 Jan 2023 17:40:06 +0000 Subject: [PATCH 154/226] version bump --- autofit/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/autofit/__init__.py b/autofit/__init__.py index a7ef52b2c..c71782ffb 100644 --- a/autofit/__init__.py +++ b/autofit/__init__.py @@ -109,4 +109,4 @@ def save_abc(pickler, obj): conf.instance.register(__file__) -__version__ = "2022.11.26.11" +__version__ = "2023.1.15.1" From 2721d341c8888354f161e3bd8434f67b3a55ff50 Mon Sep 17 00:00:00 2001 From: Richard Date: Mon, 16 Jan 2023 08:37:28 +0000 Subject: [PATCH 155/226] implemented models_with_type --- autofit/example/model.py | 44 +++++------ autofit/mapper/model.py | 16 +++- test_autofit/mapper/test_has.py | 133 ++++++++++---------------------- 3 files changed, 75 insertions(+), 118 deletions(-) diff --git a/autofit/example/model.py b/autofit/example/model.py index 936b59d15..eb18c9b2c 100644 --- a/autofit/example/model.py +++ b/autofit/example/model.py @@ -1,7 +1,8 @@ import math -import numpy as np from typing import Dict +import numpy as np + from autoconf.dictable import Dictable """ @@ -17,9 +18,9 @@ class Gaussian(Dictable): def __init__( self, - centre:float=0.0, # <- PyAutoFit recognises these constructor arguments - normalization:float=0.1, # <- are the Gaussian`s model parameters. - sigma:float=0.01, + centre: float = 0.0, # <- PyAutoFit recognises these constructor arguments + normalization: float = 0.1, # <- are the Gaussian`s model parameters. + sigma: float = 0.01, ): """ Represents a 1D `Gaussian` profile, which may be treated as a model-component of PyAutoFit the @@ -38,7 +39,7 @@ def __init__( self.normalization = normalization self.sigma = sigma - def model_data_1d_via_xvalues_from(self, xvalues:np.ndarray) -> np.ndarray: + def model_data_1d_via_xvalues_from(self, xvalues: np.ndarray) -> np.ndarray: """ Calculate the normalization of the profile on a 1D grid of Cartesian x coordinates. @@ -56,7 +57,7 @@ def model_data_1d_via_xvalues_from(self, xvalues:np.ndarray) -> np.ndarray: np.exp(-0.5 * np.square(np.divide(transformed_xvalues, self.sigma))), ) - def __call__(self, xvalues:np.ndarray) -> np.ndarray: + def __call__(self, xvalues: np.ndarray) -> np.ndarray: """ For certain graphical models, the `__call__` function is overwritten for producing the model-fit. We include this here so these examples work, but it should not be important for most PyAutoFit users. @@ -82,33 +83,24 @@ def dict(self) -> Dict: """ return super().dict() - def inverse( - self, - y - ): + def inverse(self, y): """ For graphical models, the inverse of the Gaussian is used to test certain aspects of the calculation. """ - a = self.normalization / ( - y * self.sigma * math.sqrt(2 * math.pi) - ) + a = self.normalization / (y * self.sigma * math.sqrt(2 * math.pi)) - b = 2 * math.log( - a - ) - - return self.centre + self.sigma * math.sqrt( - b - ) + b = 2 * math.log(a) + + return self.centre + self.sigma * math.sqrt(b) class Exponential(Dictable): def __init__( self, - centre:float=0.0, # <- PyAutoFit recognises these constructor arguments are the model - normalization:float=0.1, # <- parameters of the Gaussian. - rate:float=0.01, + centre: float = 0.0, # <- PyAutoFit recognises these constructor arguments are the model + normalization: float = 0.1, # <- parameters of the Gaussian. + rate: float = 0.01, ): """ Represents a 1D Exponential profile, which may be treated as a model-component of PyAutoFit the @@ -127,7 +119,7 @@ def __init__( self.normalization = normalization self.rate = rate - def model_data_1d_via_xvalues_from(self, xvalues:np.ndarray) -> np.ndarray: + def model_data_1d_via_xvalues_from(self, xvalues: np.ndarray) -> np.ndarray: """ Calculate the 1D Gaussian profile on a 1D grid of Cartesian x coordinates. @@ -143,7 +135,7 @@ def model_data_1d_via_xvalues_from(self, xvalues:np.ndarray) -> np.ndarray: self.rate, np.exp(-1.0 * self.rate * abs(transformed_xvalues)) ) - def __call__(self, xvalues:np.ndarray) -> np.ndarray: + def __call__(self, xvalues: np.ndarray) -> np.ndarray: """ Calculate the 1D Gaussian profile on a 1D grid of Cartesian x coordinates. @@ -169,4 +161,4 @@ def dict(self) -> Dict: The `Gaussian` type and model parameters as a dictionary. """ - return super().dict() \ No newline at end of file + return super().dict() diff --git a/autofit/mapper/model.py b/autofit/mapper/model.py index 867c07664..b2c8434f6 100644 --- a/autofit/mapper/model.py +++ b/autofit/mapper/model.py @@ -221,7 +221,21 @@ def direct_tuples_with_type(self, class_type): ) @frozen_cache - def model_tuples_with_type(self, cls, include_zero_dimension=False): + def models_with_type( + self, cls: Union[Type, Tuple[Type, ...]], include_zero_dimension=False + ): + # noinspection PyTypeChecker + return [ + t[1] + for t in self.model_tuples_with_type( + cls, include_zero_dimension=include_zero_dimension + ) + ] + + @frozen_cache + def model_tuples_with_type( + self, cls: Union[Type, Tuple[Type, ...]], include_zero_dimension=False + ): """ All models of the class in this model which have at least one free parameter, recursively. diff --git a/test_autofit/mapper/test_has.py b/test_autofit/mapper/test_has.py index 226591572..ffdf09445 100644 --- a/test_autofit/mapper/test_has.py +++ b/test_autofit/mapper/test_has.py @@ -1,130 +1,81 @@ +import pytest + import autofit as af +from autofit.example import Exponential + class GaussianChild(af.Gaussian): pass def test_inheritance(): - collection = af.Collection( - first=af.Model( - GaussianChild - ), - second=GaussianChild() - ) + collection = af.Collection(first=af.Model(GaussianChild), second=GaussianChild()) - assert collection.has_instance( - af.Gaussian - ) - assert collection.has_model( - af.Gaussian - ) - - -def test_embedded(): - collection = af.Collection( - model=af.Model( - af.Gaussian, - centre=GaussianChild - ) - ) - assert collection.has_model( - GaussianChild - ) + assert collection.has_instance(af.Gaussian) + assert collection.has_model(af.Gaussian) def test_no_free_parameters(): collection = af.Collection( - gaussian=af.Model( - af.Gaussian, - centre=1.0, - normalization=1.0, - sigma=1.0, - ) + gaussian=af.Model(af.Gaussian, centre=1.0, normalization=1.0, sigma=1.0,) ) assert collection.prior_count == 0 - assert collection.has_model( - af.Gaussian - ) is False + assert collection.has_model(af.Gaussian) is False def test_instance(): - collection = af.Collection( - gaussian=af.Gaussian() - ) + collection = af.Collection(gaussian=af.Gaussian()) - assert collection.has_instance( - af.Gaussian - ) is True - assert collection.has_model( - af.Gaussian - ) is False + assert collection.has_instance(af.Gaussian) is True + assert collection.has_model(af.Gaussian) is False def test_model(): - collection = af.Collection( - gaussian=af.Model( - af.Gaussian - ) - ) + collection = af.Collection(gaussian=af.Model(af.Gaussian)) - assert collection.has_model( - af.Gaussian - ) is True - assert collection.has_instance( - af.Gaussian - ) is False + assert collection.has_model(af.Gaussian) is True + assert collection.has_instance(af.Gaussian) is False def test_both(): - collection = af.Collection( - gaussian=af.Model( - af.Gaussian - ), - gaussian_2=af.Gaussian() - ) + collection = af.Collection(gaussian=af.Model(af.Gaussian), gaussian_2=af.Gaussian()) - assert collection.has_model( - af.Gaussian - ) is True - assert collection.has_instance( - af.Gaussian - ) is True + assert collection.has_model(af.Gaussian) is True + assert collection.has_instance(af.Gaussian) is True def test_embedded(): - collection = af.Collection( - gaussian=af.Model( - af.Gaussian, - centre=af.Gaussian() - ), - ) + collection = af.Collection(gaussian=af.Model(af.Gaussian, centre=af.Gaussian()),) - assert collection.has_model( - af.Gaussian - ) is True - assert collection.has_instance( - af.Gaussian - ) is True + assert collection.has_model(af.Gaussian) is True + assert collection.has_instance(af.Gaussian) is True def test_is_only_model(): collection = af.Collection( - gaussian=af.Model( - af.Gaussian - ), - gaussian_2=af.Model( - af.Gaussian - ) + gaussian=af.Model(af.Gaussian), gaussian_2=af.Model(af.Gaussian) ) - assert collection.is_only_model( - af.Gaussian - ) is True + assert collection.is_only_model(af.Gaussian) is True - collection.other = af.Model( - af.m.MockClassx2 + collection.other = af.Model(af.m.MockClassx2) + + assert collection.is_only_model(af.Gaussian) is False + + +@pytest.fixture(name="collection") +def make_collection(): + return af.Collection( + gaussian=af.Model(af.Gaussian), exponential=af.Model(Exponential), ) - assert collection.is_only_model( - af.Gaussian - ) is False + +def test_models(collection): + assert collection.models_with_type(af.Gaussian) == [collection.gaussian] + + +def test_multiple_types(collection): + assert collection.models_with_type((af.Gaussian, Exponential)) == [ + collection.gaussian, + collection.exponential, + ] From 1eb353c5b90b263c955892fc66d86a1d052f32d8 Mon Sep 17 00:00:00 2001 From: Richard Date: Mon, 16 Jan 2023 08:38:28 +0000 Subject: [PATCH 156/226] docs --- autofit/mapper/model.py | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) diff --git a/autofit/mapper/model.py b/autofit/mapper/model.py index b2c8434f6..98d67e894 100644 --- a/autofit/mapper/model.py +++ b/autofit/mapper/model.py @@ -222,8 +222,22 @@ def direct_tuples_with_type(self, class_type): @frozen_cache def models_with_type( - self, cls: Union[Type, Tuple[Type, ...]], include_zero_dimension=False - ): + self, cls: Union[Type, Tuple[Type, ...]], include_zero_dimension=False, + ) -> List["AbstractModel"]: + """ + Return all models of a given type in the model tree. + + Parameters + ---------- + cls + The type to find instances of + include_zero_dimension + If true, include models with zero dimensions + + Returns + ------- + A list of models of the given type + """ # noinspection PyTypeChecker return [ t[1] From 71d50588b2b798d5e47b29c400cba29fea4df457 Mon Sep 17 00:00:00 2001 From: Richard Date: Mon, 16 Jan 2023 09:04:54 +0000 Subject: [PATCH 157/226] Revert "Revert "feature/fit sequential"" This reverts commit fe915e9251294231908f62b67bc905e44f18c1a3. --- autofit/non_linear/abstract_search.py | 72 ++++++++++++ autofit/non_linear/analysis/combined.py | 26 ++++- autofit/non_linear/analysis/indexed.py | 12 +- autofit/non_linear/initializer.py | 7 +- test_autofit/analysis/test_free_parameter.py | 106 +++++------------- test_autofit/conftest.py | 59 +++------- .../non_linear/test_fit_sequential.py | 66 +++++++++++ 7 files changed, 208 insertions(+), 140 deletions(-) create mode 100644 test_autofit/non_linear/test_fit_sequential.py diff --git a/autofit/non_linear/abstract_search.py b/autofit/non_linear/abstract_search.py index 1411e67dc..37d319311 100644 --- a/autofit/non_linear/abstract_search.py +++ b/autofit/non_linear/abstract_search.py @@ -22,6 +22,7 @@ FactorApproximation, ) from autofit.graphical.utils import Status +from autofit.mapper.prior_model.collection import Collection from autofit.non_linear.initializer import Initializer from autofit.non_linear.parallel import SneakyPool from autofit.non_linear.paths.abstract import AbstractPaths @@ -30,6 +31,8 @@ from autofit.non_linear.result import Result from autofit.non_linear.timer import Timer from .analysis import Analysis +from .analysis.combined import CombinedResult +from .analysis.indexed import IndexCollectionAnalysis from .paths.null import NullPaths from ..graphical.declarative.abstract import PriorFactor from ..graphical.expectation_propagation import AbstractFactorOptimiser @@ -445,6 +448,75 @@ def resample_figure_of_merit(self): """ return -np.inf + def fit_sequential( + self, + model, + analysis: IndexCollectionAnalysis, + info=None, + pickle_files=None, + log_likelihood_cap=None, + ) -> CombinedResult: + """ + Fit multiple analyses contained within the analysis sequentially. + + This can be useful for avoiding very high dimensional parameter spaces. + + Parameters + ---------- + log_likelihood_cap + analysis + Multiple analyses that are fit sequentially + model + An object that represents possible instances of some model with a + given dimensionality which is the number of free dimensions of the + model. + info + Optional dictionary containing information about the fit that can be loaded by the aggregator. + pickle_files : [str] + Optional list of strings specifying the path and filename of .pickle files, that are copied to each + model-fits pickles folder so they are accessible via the Aggregator. + + Returns + ------- + An object combining the results of each individual optimisation. + + Raises + ------ + AssertionError + If the model has 0 dimensions. + ValueError + If the analysis is not a combined analysis + """ + results = [] + + _paths = self.paths + original_name = self.paths.name or "analysis" + + model = analysis.modify_model(model=model) + + try: + if not isinstance(model, Collection): + model = [model for _ in range(len(analysis.analyses))] + except AttributeError: + raise ValueError( + f"Analysis with type {type(analysis)} is not supported by fit_sequential" + ) + + for i, (model, analysis) in enumerate(zip(model, analysis.analyses)): + self.paths = copy.copy(_paths) + self.paths.name = f"{original_name}/{i}" + results.append( + self.fit( + model=model, + analysis=analysis, + info=info, + pickle_files=pickle_files, + log_likelihood_cap=log_likelihood_cap, + ) + ) + self.paths = _paths + return CombinedResult(results) + def fit( self, model, diff --git a/autofit/non_linear/analysis/combined.py b/autofit/non_linear/analysis/combined.py index dcdff72d9..93d8ed371 100644 --- a/autofit/non_linear/analysis/combined.py +++ b/autofit/non_linear/analysis/combined.py @@ -13,6 +13,20 @@ logger = logging.getLogger(__name__) +class CombinedResult: + def __init__(self, results): + self.child_results = results + + def __getattr__(self, item): + return getattr(self.child_results[0], item) + + def __iter__(self): + return iter(self.child_results) + + def __len__(self): + return len(self.child_results) + + class CombinedAnalysis(Analysis): def __new__(cls, *analyses, **kwargs): from .model_analysis import ModelAnalysis, CombinedModelAnalysis @@ -199,15 +213,15 @@ def func(child_paths, analysis): def make_result(self, samples, model, sigma=1.0, use_errors=True, use_widths=False): child_results = [ analysis.make_result( - samples, model, sigma=1.0, use_errors=True, use_widths=False + samples, + model, + sigma=sigma, + use_errors=use_errors, + use_widths=use_widths, ) for analysis in self.analyses ] - result = self.analyses[0].make_result( - samples=samples, model=model, sigma=1.0, use_errors=True, use_widths=False - ) - result.child_results = child_results - return result + return CombinedResult(child_results) def __len__(self): return len(self.analyses) diff --git a/autofit/non_linear/analysis/indexed.py b/autofit/non_linear/analysis/indexed.py index fd56a148f..da7f9200d 100644 --- a/autofit/non_linear/analysis/indexed.py +++ b/autofit/non_linear/analysis/indexed.py @@ -1,7 +1,7 @@ import logging from .analysis import Analysis -from .combined import CombinedAnalysis +from .combined import CombinedAnalysis, CombinedResult from ..paths.abstract import AbstractPaths from autofit.mapper.prior_model.collection import Collection @@ -80,15 +80,7 @@ def make_result(self, samples, model, sigma=1.0, use_errors=True, use_widths=Fal ) for model, analysis in zip(model, self.analyses) ] - result = self.analyses[0].make_result( - samples=samples, - model=model, - sigma=sigma, - use_errors=use_errors, - use_widths=use_widths, - ) - result.child_results = child_results - return result + return CombinedResult(child_results) def modify_before_fit(self, paths: AbstractPaths, model: Collection): """ diff --git a/autofit/non_linear/initializer.py b/autofit/non_linear/initializer.py index 3b118b0d0..6c15840e4 100644 --- a/autofit/non_linear/initializer.py +++ b/autofit/non_linear/initializer.py @@ -1,9 +1,10 @@ import configparser import logging +import os import random from abc import ABC, abstractmethod from typing import Dict, Tuple, List -import os + import numpy as np from autofit import exc @@ -81,7 +82,9 @@ def samples_from_model( except exc.FitException: pass - if np.allclose(a=figures_of_merit_list[0], b=figures_of_merit_list[1:]): + if total_points > 1 and np.allclose( + a=figures_of_merit_list[0], b=figures_of_merit_list[1:] + ): raise exc.InitializerException( """ The initial samples all have the same figure of merit (e.g. log likelihood values). diff --git a/test_autofit/analysis/test_free_parameter.py b/test_autofit/analysis/test_free_parameter.py index 3dc430541..80cc6c1f4 100644 --- a/test_autofit/analysis/test_free_parameter.py +++ b/test_autofit/analysis/test_free_parameter.py @@ -14,13 +14,13 @@ def test_copy(): assert collection.prior_count == model.prior_count -def test_log_likelihood( - modified, - combined_analysis -): - assert combined_analysis.log_likelihood_function( - modified.instance_from_prior_medians() - ) == 2 +def test_log_likelihood(modified, combined_analysis): + assert ( + combined_analysis.log_likelihood_function( + modified.instance_from_prior_medians() + ) + == 2 + ) def test_analyses_example(Analysis): @@ -33,98 +33,59 @@ def test_analyses_example(Analysis): ]: copy = model.copy() copy.centre = prior - analyses.append( - Analysis() - ) + analyses.append(Analysis()) -@pytest.fixture( - name="combined_analysis" -) +@pytest.fixture(name="combined_analysis") def make_combined_analysis(model, Analysis): - return (Analysis() + Analysis()).with_free_parameters( - model.centre - ) + return (Analysis() + Analysis()).with_free_parameters(model.centre) def test_multiple_free_parameters(model, Analysis): combined_analysis = (Analysis() + Analysis()).with_free_parameters( - model.centre, - model.sigma + model.centre, model.sigma ) first, second = combined_analysis.modify_model(model) assert first.centre is not second.centre assert first.sigma is not second.sigma -def test_add_free_parameter( - combined_analysis -): - assert isinstance( - combined_analysis, - FreeParameterAnalysis - ) +def test_add_free_parameter(combined_analysis): + assert isinstance(combined_analysis, FreeParameterAnalysis) -@pytest.fixture( - name="modified" -) -def make_modified( - model, - combined_analysis -): +@pytest.fixture(name="modified") +def make_modified(model, combined_analysis): return combined_analysis.modify_model(model) -def test_modify_model( - modified -): +def test_modify_model(modified): assert isinstance(modified, af.Collection) assert len(modified) == 2 -def test_modified_models( - modified -): +def test_modified_models(modified): first, second = modified - assert isinstance( - first.sigma, - af.Prior - ) + assert isinstance(first.sigma, af.Prior) assert first.sigma == second.sigma assert first.centre != second.centre -@pytest.fixture( - name="result" -) +@pytest.fixture(name="result") def make_result( - combined_analysis, - model, + combined_analysis, model, ): optimizer = MockOptimizer() - return optimizer.fit( - model, - combined_analysis - ) + return optimizer.fit(model, combined_analysis) -@pytest.fixture( - autouse=True -) +@pytest.fixture(autouse=True) def do_remove_output(remove_output): yield remove_output() -def test_result_type(result, Result): - assert isinstance(result, Result) - - for result_ in result: - assert isinstance(result_, Result) - - def test_integration(result): result_1, result_2 = result @@ -133,24 +94,16 @@ def test_integration(result): def test_tuple_prior(model, Analysis): - model.centre = af.TuplePrior( - centre_0=af.UniformPrior() - ) - combined = (Analysis() + Analysis()).with_free_parameters( - model.centre - ) + model.centre = af.TuplePrior(centre_0=af.UniformPrior()) + combined = (Analysis() + Analysis()).with_free_parameters(model.centre) first, second = combined.modify_model(model) assert first.centre.centre_0 != second.centre.centre_0 def test_prior_model(model, Analysis): - model = af.Collection( - model=model - ) - combined = (Analysis() + Analysis()).with_free_parameters( - model.model - ) + model = af.Collection(model=model) + combined = (Analysis() + Analysis()).with_free_parameters(model.model) modified = combined.modify_model(model) first = modified[0].model second = modified[1].model @@ -162,12 +115,7 @@ def test_prior_model(model, Analysis): def test_split_samples(modified): samples = af.Samples( - modified, - af.Sample.from_lists( - modified, - [[1, 2, 3, 4]], - [1], [1], [1] - ), + modified, af.Sample.from_lists(modified, [[1, 2, 3, 4]], [1], [1], [1]), ) combined = samples.max_log_likelihood() diff --git a/test_autofit/conftest.py b/test_autofit/conftest.py index 4cb8a84ca..650dc80a4 100644 --- a/test_autofit/conftest.py +++ b/test_autofit/conftest.py @@ -13,37 +13,24 @@ from autofit import fixtures from autofit.database.model import sa -if sys.platform == 'darwin': - multiprocessing.set_start_method('forkserver') +if sys.platform == "darwin": + multiprocessing.set_start_method("forkserver") directory = Path(__file__).parent -@pytest.fixture( - name="test_directory", - scope="session" -) +@pytest.fixture(name="test_directory", scope="session") def make_test_directory(): return directory -@pytest.fixture( - name="output_directory", - scope="session" -) -def make_output_directory( - test_directory -): +@pytest.fixture(name="output_directory", scope="session") +def make_output_directory(test_directory): return test_directory / "output" -@pytest.fixture( - name="remove_output", - scope="session" -) -def make_remove_output( - output_directory -): +@pytest.fixture(name="remove_output", scope="session") +def make_remove_output(output_directory): def remove_output(): try: for item in os.listdir(output_directory): @@ -51,27 +38,18 @@ def remove_output(): item_path = output_directory / item if item_path.is_dir(): shutil.rmtree( - item_path, - ignore_errors=True, + item_path, ignore_errors=True, ) else: - os.remove( - item_path - ) - except FileExistsError: + os.remove(item_path) + except (FileExistsError, FileNotFoundError): pass return remove_output -@pytest.fixture( - autouse=True, - scope="session" -) -def do_remove_output( - output_directory, - remove_output -): +@pytest.fixture(autouse=True, scope="session") +def do_remove_output(output_directory, remove_output): yield remove_output() @@ -93,7 +71,7 @@ def make_plot_patch(monkeypatch): @pytest.fixture(name="session") def make_session(): - engine = sa.create_engine('sqlite://') + engine = sa.create_engine("sqlite://") session = sa.orm.sessionmaker(bind=engine)() db.Base.metadata.create_all(engine) yield session @@ -101,10 +79,7 @@ def make_session(): engine.dispose() -@pytest.fixture( - autouse=True, - scope="session" -) +@pytest.fixture(autouse=True, scope="session") def remove_logs(): yield for d, _, files in os.walk(directory): @@ -117,13 +92,11 @@ def remove_logs(): def set_config_path(): conf.instance.push( new_path=path.join(directory, "config"), - output_path=path.join(directory, "output") + output_path=path.join(directory, "output"), ) -@pytest.fixture( - name="model_gaussian_x1" -) +@pytest.fixture(name="model_gaussian_x1") def make_model_gaussian_x1(): return fixtures.make_model_gaussian_x1() diff --git a/test_autofit/non_linear/test_fit_sequential.py b/test_autofit/non_linear/test_fit_sequential.py new file mode 100644 index 000000000..df8ed131c --- /dev/null +++ b/test_autofit/non_linear/test_fit_sequential.py @@ -0,0 +1,66 @@ +import os +from pathlib import Path +from random import random + +import pytest + +import autofit as af +from autofit.non_linear.analysis.combined import CombinedResult + + +class Analysis(af.Analysis): + def log_likelihood_function(self, instance): + return -random() + + +@pytest.fixture(name="search") +def make_search(): + return af.LBFGS(name="test_lbfgs") + + +@pytest.fixture(name="model") +def make_model(): + return af.Model(af.Gaussian) + + +@pytest.fixture(name="analysis") +def make_analysis(): + return Analysis() + + +def count_output(paths): + return len(os.listdir(Path(str(paths)).parent)) + + +def test_with_model(analysis, model, search): + combined_analysis = sum([analysis.with_model(model) for _ in range(10)]) + + result = search.fit_sequential(model=model, analysis=combined_analysis) + + assert count_output(search.paths) == 10 + assert len(result.child_results) == 10 + + +@pytest.fixture(name="combined_analysis") +def make_combined_analysis(analysis): + return sum([analysis for _ in range(10)]) + + +def test_combined_analysis(combined_analysis, model, search): + search.fit_sequential(model=model, analysis=combined_analysis) + + assert count_output(search.paths) == 10 + + +def test_with_free_parameter(combined_analysis, model, search): + combined_analysis = combined_analysis.with_free_parameters([model.centre]) + search.fit_sequential( + model=model, analysis=combined_analysis, + ) + + assert count_output(search.paths) == 10 + + +def test_singular_analysis(analysis, model, search): + with pytest.raises(ValueError): + search.fit_sequential(model=model, analysis=analysis) From d722bb36c8b49ef48e721c3a9171a4105f8f2aa1 Mon Sep 17 00:00:00 2001 From: Richard Date: Mon, 16 Jan 2023 09:08:40 +0000 Subject: [PATCH 158/226] unused import --- test_autofit/non_linear/test_fit_sequential.py | 1 - 1 file changed, 1 deletion(-) diff --git a/test_autofit/non_linear/test_fit_sequential.py b/test_autofit/non_linear/test_fit_sequential.py index df8ed131c..ee9765fd4 100644 --- a/test_autofit/non_linear/test_fit_sequential.py +++ b/test_autofit/non_linear/test_fit_sequential.py @@ -5,7 +5,6 @@ import pytest import autofit as af -from autofit.non_linear.analysis.combined import CombinedResult class Analysis(af.Analysis): From 787ba5f5f25786dd5d31c4c1410fb4698ffc8f83 Mon Sep 17 00:00:00 2001 From: Richard Date: Mon, 16 Jan 2023 09:11:39 +0000 Subject: [PATCH 159/226] make combined result subscriptable --- autofit/non_linear/analysis/combined.py | 25 ++++++++++++++++--- .../non_linear/test_fit_sequential.py | 10 ++++++++ 2 files changed, 32 insertions(+), 3 deletions(-) diff --git a/autofit/non_linear/analysis/combined.py b/autofit/non_linear/analysis/combined.py index 93d8ed371..4bb792222 100644 --- a/autofit/non_linear/analysis/combined.py +++ b/autofit/non_linear/analysis/combined.py @@ -1,5 +1,5 @@ import logging -from typing import Union +from typing import Union, List from autoconf import conf from autofit.mapper.prior.abstract import Prior @@ -14,10 +14,23 @@ class CombinedResult: - def __init__(self, results): + def __init__(self, results: List[Result]): + """ + A `Result` object that is composed of multiple `Result` objects. This is used to combine the results of + multiple `Analysis` objects into a single `Result` object, for example when performing a model-fitting + analysis where there are multiple datasets. + + Parameters + ---------- + results + The list of `Result` objects that are combined into this `CombinedResult` object. + """ self.child_results = results - def __getattr__(self, item): + def __getattr__(self, item: str): + """ + Get an attribute of the first `Result` object in the list of `Result` objects. + """ return getattr(self.child_results[0], item) def __iter__(self): @@ -26,6 +39,12 @@ def __iter__(self): def __len__(self): return len(self.child_results) + def __getitem__(self, item: int) -> Result: + """ + Get a `Result` object from the list of `Result` objects. + """ + return self.child_results[item] + class CombinedAnalysis(Analysis): def __new__(cls, *analyses, **kwargs): diff --git a/test_autofit/non_linear/test_fit_sequential.py b/test_autofit/non_linear/test_fit_sequential.py index ee9765fd4..7f1d3faa0 100644 --- a/test_autofit/non_linear/test_fit_sequential.py +++ b/test_autofit/non_linear/test_fit_sequential.py @@ -5,6 +5,7 @@ import pytest import autofit as af +from autofit.non_linear.analysis.combined import CombinedResult class Analysis(af.Analysis): @@ -63,3 +64,12 @@ def test_with_free_parameter(combined_analysis, model, search): def test_singular_analysis(analysis, model, search): with pytest.raises(ValueError): search.fit_sequential(model=model, analysis=analysis) + + +# noinspection PyTypeChecker +def test_index_combined_result(): + combined_result = CombinedResult([0, 1, 2]) + + assert combined_result[0] == 0 + assert combined_result[1] == 1 + assert combined_result[2] == 2 From ac60b4ac6a50f41f204213dceb2ac831d06f9341 Mon Sep 17 00:00:00 2001 From: Jammy2211 Date: Mon, 16 Jan 2023 14:16:28 +0000 Subject: [PATCH 160/226] configs --- autofit/config/non_linear/optimize.yaml | 2 +- autofit/config/visualize/plots_search.yaml | 4 ++-- test_autofit/config/general.yaml | 1 - 3 files changed, 3 insertions(+), 4 deletions(-) diff --git a/autofit/config/non_linear/optimize.yaml b/autofit/config/non_linear/optimize.yaml index 8cf3d0fcf..007c93c63 100644 --- a/autofit/config/non_linear/optimize.yaml +++ b/autofit/config/non_linear/optimize.yaml @@ -5,7 +5,7 @@ # - PySwarms: https://github.com/ljvmiranda921/pyswarms / https://pyswarms.readthedocs.io/en/latest/index.html # Settings in the [search], [run] and [options] entries are specific to each nested algorithm and should be -# determined by consulting that MCMC method's own readthedocs. +# determined by consulting that optimizers method's own readthedocs. PySwarmsGlobal: run: diff --git a/autofit/config/visualize/plots_search.yaml b/autofit/config/visualize/plots_search.yaml index a536bb7b0..bb312a35d 100644 --- a/autofit/config/visualize/plots_search.yaml +++ b/autofit/config/visualize/plots_search.yaml @@ -1,6 +1,6 @@ dynesty: - cornerplot: true - cornerpoints: false # Output Dynesty cornerplot figure during a non-linear search fit? + cornerplot: true # Output Dynesty cornerplot figure during a non-linear search fit? + cornerpoints: false # Output Dynesty cornerpoints figure during a non-linear search fit? runplot: true # Output Dynesty runplot figure during a non-linear search fit? traceplot: true # Output Dynesty traceplot figure during a non-linear search fit? emcee: diff --git a/test_autofit/config/general.yaml b/test_autofit/config/general.yaml index b547327a4..06ea56184 100644 --- a/test_autofit/config/general.yaml +++ b/test_autofit/config/general.yaml @@ -7,7 +7,6 @@ model: ignore_prior_limits: false # If ``True`` the limits applied to priors will be ignored, where limits set upper / lower limits. This stops PriorLimitException's from being raised. output: force_pickle_overwrite: false # If True pickle files output by a search (e.g. samples.pickle) are recreated when a new model-fit is performed. - identifier_version: 4 info_whitespace_length: 80 # Length of whitespace between the parameter names and values in the model.info / result.info log_level: INFO # The level of information output by logging. log_to_file: false # If True, outputs the non-linear search log to a file (and not printed to screen). From 5cf343ca69a89faf28c0784e73a7ae34791cf482 Mon Sep 17 00:00:00 2001 From: Jammy2211 Date: Mon, 16 Jan 2023 15:16:33 +0000 Subject: [PATCH 161/226] make dynesty force_x1_cpu optional --- autofit/non_linear/nest/dynesty/abstract.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/autofit/non_linear/nest/dynesty/abstract.py b/autofit/non_linear/nest/dynesty/abstract.py index cf86716f2..4bea32179 100644 --- a/autofit/non_linear/nest/dynesty/abstract.py +++ b/autofit/non_linear/nest/dynesty/abstract.py @@ -160,7 +160,7 @@ def _fit( if conf.instance["non_linear"]["nest"][self.__class__.__name__][ "parallel" - ]["force_x1_cpu"] or self.kwargs.get("force_x1_cpu"): + ].get("force_x1_cpu") or self.kwargs.get("force_x1_cpu"): raise RuntimeError From e8c58312c52fa23e0b92130f9325cefc4af88da9 Mon Sep 17 00:00:00 2001 From: Jammy2211 Date: Wed, 18 Jan 2023 09:59:56 +0000 Subject: [PATCH 162/226] ultranest module now uses try except decorrator, docs --- autofit/non_linear/nest/dynesty/plotter.py | 40 +----------------- autofit/non_linear/nest/ultranest/plotter.py | 43 +++++++++++++------- autofit/plot/samples_plotters.py | 34 ++++++++++++++++ 3 files changed, 65 insertions(+), 52 deletions(-) diff --git a/autofit/non_linear/nest/dynesty/plotter.py b/autofit/non_linear/nest/dynesty/plotter.py index 0bb96294f..d14e884ef 100644 --- a/autofit/non_linear/nest/dynesty/plotter.py +++ b/autofit/non_linear/nest/dynesty/plotter.py @@ -1,47 +1,11 @@ from dynesty import plotting as dyplot -from functools import wraps -import logging from autofit.plot import SamplesPlotter -from autofit.plot.samples_plotters import skip_plot_in_test_mode - -logger = logging.getLogger(__name__) - -def log_value_error(func): - @wraps(func) - def wrapper(self, *args, **kwargs): - - try: - return func(self, *args, **kwargs) - except ValueError: - self.log_plot_exception(func.__name__) - - return wrapper +from autofit.plot.samples_plotters import skip_plot_in_test_mode +from autofit.plot.samples_plotters import log_value_error class DynestyPlotter(SamplesPlotter): - - @staticmethod - def log_plot_exception(plot_name : str): - """ - Plotting the results of a ``dynesty`` model-fit before they have converged on an - accurate estimate of the posterior can lead the ``dynesty`` plotting routines - to raise a ``ValueError``. - - This exception is caught in each of the plotting methods below, and this - function is used to log the behaviour. - - Parameters - ---------- - plot_name - The name of the ``dynesty`` plot which raised a ``ValueError`` - """ - - logger.info( - f"Dynesty unable to produce {plot_name} visual: posterior estimate therefore" - "not yet sufficient for this model-fit is not yet robust enough to do this. Visual" - "should be produced in later update, once posterior estimate is updated." - ) @skip_plot_in_test_mode def boundplot(self, **kwargs): diff --git a/autofit/non_linear/nest/ultranest/plotter.py b/autofit/non_linear/nest/ultranest/plotter.py index c6d83d431..6adeac060 100644 --- a/autofit/non_linear/nest/ultranest/plotter.py +++ b/autofit/non_linear/nest/ultranest/plotter.py @@ -1,9 +1,18 @@ from autofit.plot import SamplesPlotter +from autofit.plot.samples_plotters import skip_plot_in_test_mode +from autofit.plot.samples_plotters import log_value_error class UltraNestPlotter(SamplesPlotter): + @skip_plot_in_test_mode + @log_value_error def cornerplot(self, **kwargs): + """ + Plots the in-built ``ultranest`` plot ``cornerplot``. + + This figure plots a corner plot of the 1-D and 2-D marginalized posteriors. + """ from ultranest import plot @@ -15,32 +24,38 @@ def cornerplot(self, **kwargs): self.output.to_figure(structure=None, auto_filename="cornerplot") self.close() + @skip_plot_in_test_mode + @log_value_error def runplot(self, **kwargs): + """ + Plots the in-built ``ultranest`` plot ``runplot``. + This figure plots live points, ln(likelihood), ln(weight), and ln(evidence) vs. ln(prior volume). + """ from ultranest import plot - try: - plot.runplot( - results=self.samples.results_internal, - **kwargs - ) - except KeyError: - pass + plot.runplot( + results=self.samples.results_internal, + **kwargs + ) self.output.to_figure(structure=None, auto_filename="runplot") self.close() + @skip_plot_in_test_mode + @log_value_error def traceplot(self, **kwargs): + """ + Plots the in-built ``ultranest`` plot ``traceplot``. + This figure plots traces and marginalized posteriors for each parameter. + """ from ultranest import plot - try: - plot.traceplot( - results=self.samples.results_internal, - **kwargs - ) - except KeyError: - pass + plot.traceplot( + results=self.samples.results_internal, + **kwargs + ) self.output.to_figure(structure=None, auto_filename="traceplot") self.close() \ No newline at end of file diff --git a/autofit/plot/samples_plotters.py b/autofit/plot/samples_plotters.py index f4d1a0d09..aa08ca52c 100644 --- a/autofit/plot/samples_plotters.py +++ b/autofit/plot/samples_plotters.py @@ -1,9 +1,23 @@ import matplotlib.pyplot as plt from functools import wraps +import logging import os from autofit.plot.output import Output +logger = logging.getLogger(__name__) + +def log_value_error(func): + + @wraps(func) + def wrapper(self, *args, **kwargs): + + try: + return func(self, *args, **kwargs) + except (ValueError, KeyError, AttributeError): + self.log_plot_exception(func.__name__) + + return wrapper def skip_plot_in_test_mode(func): """ @@ -65,6 +79,26 @@ def close(self): if plt.fignum_exists(num=1): plt.close() + def log_plot_exception(self, plot_name : str): + """ + Plotting the results of a ``dynesty`` model-fit before they have converged on an + accurate estimate of the posterior can lead the ``dynesty`` plotting routines + to raise a ``ValueError``. + + This exception is caught in each of the plotting methods below, and this + function is used to log the behaviour. + + Parameters + ---------- + plot_name + The name of the ``dynesty`` plot which raised a ``ValueError`` + """ + + logger.info( + f"{self.__class__.__name__} unable to produce {plot_name} visual: posterior estimate therefore" + "not yet sufficient for this model-fit is not yet robust enough to do this. Visual" + "should be produced in later update, once posterior estimate is updated." + ) class MCMCPlotter(SamplesPlotter): From 2e38718ee236926fc6794c06b498a0621ec1d0cc Mon Sep 17 00:00:00 2001 From: Jammy2211 Date: Wed, 18 Jan 2023 12:57:53 +0000 Subject: [PATCH 163/226] missing exception --- autofit/plot/samples_plotters.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/autofit/plot/samples_plotters.py b/autofit/plot/samples_plotters.py index aa08ca52c..93f127cec 100644 --- a/autofit/plot/samples_plotters.py +++ b/autofit/plot/samples_plotters.py @@ -14,7 +14,7 @@ def wrapper(self, *args, **kwargs): try: return func(self, *args, **kwargs) - except (ValueError, KeyError, AttributeError): + except (ValueError, KeyError, AttributeError, AssertionError): self.log_plot_exception(func.__name__) return wrapper From 0d18f5b5f20967d2d2d478b78d23469ae218f74e Mon Sep 17 00:00:00 2001 From: Richard Date: Mon, 30 Jan 2023 10:33:57 +0000 Subject: [PATCH 164/226] find models with type recursively --- autofit/mapper/model.py | 4 +++- test_autofit/mapper/test_has.py | 22 ++++++++++++++++++++++ 2 files changed, 25 insertions(+), 1 deletion(-) diff --git a/autofit/mapper/model.py b/autofit/mapper/model.py index 98d67e894..84603aa07 100644 --- a/autofit/mapper/model.py +++ b/autofit/mapper/model.py @@ -269,7 +269,9 @@ def model_tuples_with_type( return [ (path, model) - for path, model in self.attribute_tuples_with_type(Model) + for path, model in self.attribute_tuples_with_type( + Model, ignore_children=False + ) if issubclass(model.cls, cls) and (include_zero_dimension or model.prior_count > 0) ] diff --git a/test_autofit/mapper/test_has.py b/test_autofit/mapper/test_has.py index ffdf09445..b5cd8803b 100644 --- a/test_autofit/mapper/test_has.py +++ b/test_autofit/mapper/test_has.py @@ -79,3 +79,25 @@ def test_multiple_types(collection): collection.gaussian, collection.exponential, ] + + +class Galaxy: + def __init__(self, child): + self.child = child + + +def test_instances_with_type(): + model = af.Collection(galaxy=Galaxy(child=af.Model(af.Gaussian))) + assert model.models_with_type(af.Gaussian) == [model.galaxy.child] + + +class DelaunayBrightnessImage: + pass + + +def test_model_attributes_with_type(): + mesh = af.Model(DelaunayBrightnessImage) + mesh.pixels = af.UniformPrior(lower_limit=5.0, upper_limit=10.0) + pixelization = af.Model(af.Gaussian, mesh=mesh) + + assert pixelization.models_with_type(DelaunayBrightnessImage) == [mesh] From 90e21dbaa302d3765b60d8b1e7a82ec8a82589e8 Mon Sep 17 00:00:00 2001 From: Richard Date: Mon, 30 Jan 2023 12:10:02 +0000 Subject: [PATCH 165/226] added result properties --- autofit/non_linear/grid/sensitivity.py | 203 ++++++------------ .../grid/test_sensitivity/test_results.py | 30 +++ 2 files changed, 101 insertions(+), 132 deletions(-) create mode 100644 test_autofit/non_linear/grid/test_sensitivity/test_results.py diff --git a/autofit/non_linear/grid/sensitivity.py b/autofit/non_linear/grid/sensitivity.py index 99a86d6eb..12af4fef0 100644 --- a/autofit/non_linear/grid/sensitivity.py +++ b/autofit/non_linear/grid/sensitivity.py @@ -18,12 +18,7 @@ class JobResult(AbstractJobResult): - def __init__( - self, - number: int, - result: Result, - perturbed_result: Result - ): + def __init__(self, number: int, result: Result, perturbed_result: Result): """ The result of a single sensitivity comparison @@ -55,14 +50,14 @@ class Job(AbstractJob): use_instance = False def __init__( - self, - analysis_factory: "AnalysisFactory", - model: AbstractPriorModel, - perturbation_model: AbstractPriorModel, - base_instance: ModelInstance, - perturbation_instance: ModelInstance, - search: NonLinearSearch, - number: int, + self, + analysis_factory: "AnalysisFactory", + model: AbstractPriorModel, + perturbation_model: AbstractPriorModel, + base_instance: ModelInstance, + perturbation_instance: ModelInstance, + search: NonLinearSearch, + number: int, ): """ Job to run non-linear searches comparing how well a model and a model with a perturbation @@ -79,9 +74,7 @@ def __init__( search A non-linear search """ - super().__init__( - number=number - ) + super().__init__(number=number) self.analysis_factory = analysis_factory self.model = model @@ -90,15 +83,9 @@ def __init__( self.base_instance = base_instance self.perturbation_instance = perturbation_instance - self.search = search.copy_with_paths( - search.paths.for_sub_analysis( - "[base]", - ) - ) + self.search = search.copy_with_paths(search.paths.for_sub_analysis("[base]",)) self.perturbed_search = search.copy_with_paths( - search.paths.for_sub_analysis( - "[perturbed]", - ) + search.paths.for_sub_analysis("[perturbed]",) ) @cached_property @@ -126,26 +113,17 @@ def perform(self) -> JobResult: perturbed_result = self.perturbation_model_func(perturbed_model=perturbed_model) return JobResult( - number=self.number, - result=result, - perturbed_result=perturbed_result + number=self.number, result=result, perturbed_result=perturbed_result ) def base_model_func(self): - return self.search.fit( - model=self.model, - analysis=self.analysis - ) + return self.search.fit(model=self.model, analysis=self.analysis) def perturbation_model_func(self, perturbed_model): - return self.perturbed_search.fit( - model=perturbed_model, - analysis=self.analysis - ) + return self.perturbed_search.fit(model=perturbed_model, analysis=self.analysis) class SensitivityResult: - def __init__(self, results: List[JobResult]): self.results = sorted(results) @@ -158,21 +136,32 @@ def __iter__(self): def __len__(self): return len(self.results) + @property + def log_likelihoods_base(self): + return [result.log_likelihood_base for result in self.results] + + @property + def log_likelihoods_perturbed(self): + return [result.log_likelihood_perturbed for result in self.results] + + @property + def log_likelihood_differences(self): + return [result.log_likelihood_difference for result in self.results] -class Sensitivity: +class Sensitivity: def __init__( - self, - base_model: AbstractPriorModel, - perturbation_model: AbstractPriorModel, - simulation_instance, - simulate_function: Callable, - analysis_class: Type[Analysis], - search: NonLinearSearch, - job_cls: ClassVar = Job, - number_of_steps: Union[Tuple[int], int] = 4, - number_of_cores: int = 2, - limit_scale: int = 1, + self, + base_model: AbstractPriorModel, + perturbation_model: AbstractPriorModel, + simulation_instance, + simulate_function: Callable, + analysis_class: Type[Analysis], + search: NonLinearSearch, + job_cls: ClassVar = Job, + number_of_steps: Union[Tuple[int], int] = 4, + number_of_cores: int = 2, + limit_scale: int = 1, ): """ Perform sensitivity mapping to evaluate whether a perturbation @@ -212,9 +201,7 @@ def __init__( A scale of 0.5 means priors have limits smaller than the grid square with width half a grid square. """ - self.logger = logging.getLogger( - f"Sensitivity ({search.name})" - ) + self.logger = logging.getLogger(f"Sensitivity ({search.name})") self.logger.info("Creating") @@ -243,7 +230,9 @@ def step_size(self): The size of a step in any given dimension in hyper space. """ if isinstance(self.number_of_steps, tuple): - return tuple([1 / number_of_steps for number_of_steps in self.number_of_steps]) + return tuple( + [1 / number_of_steps for number_of_steps in self.number_of_steps] + ) return 1 / self.number_of_steps def run(self) -> SensitivityResult: @@ -258,14 +247,13 @@ def run(self) -> SensitivityResult: *self._headers, "log_likelihood_base", "log_likelihood_perturbed", - "log_likelihood_difference" + "log_likelihood_difference", ] physical_values = list(self._physical_values) results = list() for result in Process.run_jobs( - self._make_jobs(), - number_of_cores=self.number_of_cores + self._make_jobs(), number_of_cores=self.number_of_cores ): if isinstance(result, Exception): raise result @@ -273,17 +261,12 @@ def run(self) -> SensitivityResult: results.append(result) results = sorted(results) - os.makedirs( - self.search.paths.output_path, - exist_ok=True - ) + os.makedirs(self.search.paths.output_path, exist_ok=True) with open(self.results_path, "w+") as f: writer = csv.writer(f) writer.writerow(headers) for result_ in results: - values = physical_values[ - result_.number - ] + values = physical_values[result_.number] writer.writerow( padding(item) for item in [ @@ -292,15 +275,14 @@ def run(self) -> SensitivityResult: result_.log_likelihood_base, result_.log_likelihood_perturbed, result_.log_likelihood_difference, - ]) + ] + ) return SensitivityResult(results) @property def results_path(self): - return Path( - self.search.paths.output_path - ) / "results.csv" + return Path(self.search.paths.output_path) / "results.csv" @property def _lists(self) -> List[List[float]]: @@ -309,10 +291,7 @@ def _lists(self) -> List[List[float]]: the perturbation_model and create the individual perturbations. """ - return make_lists( - self.perturbation_model.prior_count, - step_size=self.step_size - ) + return make_lists(self.perturbation_model.prior_count, step_size=self.step_size) @property def _physical_values(self) -> List[List[float]]: @@ -321,14 +300,10 @@ def _physical_values(self) -> List[List[float]]: """ return [ [ - prior.value_for( - unit_value + prior.value_for(unit_value) + for prior, unit_value in zip( + self.perturbation_model.priors_ordered_by_id, unit_values ) - for prior, unit_value - in zip( - self.perturbation_model.priors_ordered_by_id, - unit_values - ) ] for unit_values in self._lists ] @@ -350,36 +325,23 @@ def _labels(self) -> Generator[str, None, None]: """ for list_ in self._lists: strings = list() - for value, prior_tuple in zip( - list_, - self.perturbation_model.prior_tuples - ): + for value, prior_tuple in zip(list_, self.perturbation_model.prior_tuples): path, prior = prior_tuple - value = prior.value_for( - value - ) - strings.append( - f"{path}_{value}" - ) + value = prior.value_for(value) + strings.append(f"{path}_{value}") yield "_".join(strings) @property - def _perturbation_instances(self) -> Generator[ - ModelInstance, None, None - ]: + def _perturbation_instances(self) -> Generator[ModelInstance, None, None]: """ A list of instances each of which defines a perturbation to be applied to the image. """ for list_ in self._lists: - yield self.perturbation_model.instance_from_unit_vector( - list_ - ) + yield self.perturbation_model.instance_from_unit_vector(list_) @property - def _perturbation_models(self) -> Generator[ - AbstractPriorModel, None, None - ]: + def _perturbation_models(self) -> Generator[AbstractPriorModel, None, None]: """ A list of models representing a perturbation at each grid square. @@ -395,29 +357,21 @@ def _perturbation_models(self) -> Generator[ prior.value_for(min(1.0, centre + half_step)), ) for centre, prior in zip( - list_, - self.perturbation_model.priors_ordered_by_id + list_, self.perturbation_model.priors_ordered_by_id ) ] yield self.perturbation_model.with_limits(limits) @property - def _searches(self) -> Generator[ - NonLinearSearch, None, None - ]: + def _searches(self) -> Generator[NonLinearSearch, None, None]: """ A list of non-linear searches, each of which is applied to one perturbation. """ for label in self._labels: - yield self._search_instance( - label - ) + yield self._search_instance(label) - def _search_instance( - self, - name_path: str - ) -> NonLinearSearch: + def _search_instance(self, name_path: str) -> NonLinearSearch: """ Create a search instance, distinguished by its name @@ -432,9 +386,7 @@ def _search_instance( """ paths = self.search.paths search_instance = self.search.copy_with_paths( - paths.for_sub_analysis( - name_path, - ) + paths.for_sub_analysis(name_path,) ) return search_instance @@ -446,15 +398,9 @@ def _make_jobs(self) -> Generator[Job, None, None]: Each job fits a perturbed image with the original model and a model which includes a perturbation. """ - for number, ( - perturbation_instance, - perturbation_model, - search - ) in enumerate(zip( - self._perturbation_instances, - self._perturbation_models, - self._searches - )): + for number, (perturbation_instance, perturbation_model, search) in enumerate( + zip(self._perturbation_instances, self._perturbation_models, self._searches) + ): instance = copy(self.instance) instance.perturbation = perturbation_instance @@ -469,16 +415,13 @@ def _make_jobs(self) -> Generator[Job, None, None]: base_instance=self.instance, perturbation_instance=perturbation_instance, search=search, - number=number + number=number, ) class AnalysisFactory: def __init__( - self, - instance, - simulate_function, - analysis_class, + self, instance, simulate_function, analysis_class, ): """ Callable to delay simulation such that it is performed @@ -489,9 +432,5 @@ def __init__( self.analysis_class = analysis_class def __call__(self): - dataset = self.simulate_function( - self.instance - ) - return self.analysis_class( - dataset - ) + dataset = self.simulate_function(self.instance) + return self.analysis_class(dataset) diff --git a/test_autofit/non_linear/grid/test_sensitivity/test_results.py b/test_autofit/non_linear/grid/test_sensitivity/test_results.py new file mode 100644 index 000000000..7a9a79bb4 --- /dev/null +++ b/test_autofit/non_linear/grid/test_sensitivity/test_results.py @@ -0,0 +1,30 @@ +from autofit.non_linear.grid.sensitivity import SensitivityResult, JobResult +import pytest + + +class Result: + def __init__(self, log_likelihood): + self.log_likelihood = log_likelihood + + +@pytest.fixture(name="job_result") +def make_result(): + return JobResult( + number=0, + result=Result(log_likelihood=1.0), + perturbed_result=Result(log_likelihood=2.0), + ) + + +def test_job_result(job_result): + assert job_result.log_likelihood_base == 1.0 + assert job_result.log_likelihood_perturbed == 2.0 + assert job_result.log_likelihood_difference == 1.0 + + +def test_result(job_result): + result = SensitivityResult(results=[job_result]) + + assert result.log_likelihoods_base == [1.0] + assert result.log_likelihoods_perturbed == [2.0] + assert result.log_likelihood_differences == [1.0] From 55c310ad945ecfbc792fcf22d1a538aead254e42 Mon Sep 17 00:00:00 2001 From: Richard Date: Mon, 30 Jan 2023 12:10:15 +0000 Subject: [PATCH 166/226] black formatting of fit module --- autofit/database/model/fit.py | 288 +++++++--------------------------- 1 file changed, 58 insertions(+), 230 deletions(-) diff --git a/autofit/database/model/fit.py b/autofit/database/model/fit.py index c1b6a18f9..1951a92ac 100644 --- a/autofit/database/model/fit.py +++ b/autofit/database/model/fit.py @@ -18,48 +18,26 @@ class Pickle(Base): def __init__(self, **kwargs): super().__init__(**kwargs) - id = sa.Column( - sa.Integer, - primary_key=True - ) + id = sa.Column(sa.Integer, primary_key=True) - name = sa.Column( - sa.String - ) - string = sa.Column( - sa.String - ) - fit_id = sa.Column( - sa.String, - sa.ForeignKey( - "fit.id" - ) - ) - fit = sa.orm.relationship( - "Fit", - uselist=False - ) + name = sa.Column(sa.String) + string = sa.Column(sa.String) + fit_id = sa.Column(sa.String, sa.ForeignKey("fit.id")) + fit = sa.orm.relationship("Fit", uselist=False) @property def value(self): """ The unpickled object """ - if isinstance( - self.string, - str - ): + if isinstance(self.string, str): return self.string - return pickle.loads( - self.string - ) + return pickle.loads(self.string) @value.setter def value(self, value): try: - self.string = pickle.dumps( - value - ) + self.string = pickle.dumps(value) except pickle.PicklingError: pass @@ -67,24 +45,13 @@ def value(self, value): class Info(Base): __tablename__ = "info" - id = sa.Column( - sa.Integer, - primary_key=True - ) + id = sa.Column(sa.Integer, primary_key=True) key = sa.Column(sa.String) value = sa.Column(sa.String) - fit_id = sa.Column( - sa.String, - sa.ForeignKey( - "fit.id" - ) - ) - fit = sa.orm.relationship( - "Fit", - uselist=False - ) + fit_id = sa.Column(sa.String, sa.ForeignKey("fit.id")) + fit = sa.orm.relationship("Fit", uselist=False) def try_none(func): @@ -101,24 +68,13 @@ def wrapper(*args, **kwargs): class NamedInstance(Base): __tablename__ = "named_instance" - id = sa.Column( - sa.Integer, - primary_key=True - ) + id = sa.Column(sa.Integer, primary_key=True) name = sa.Column(sa.String) - instance_id = sa.Column( - sa.Integer, - sa.ForeignKey( - "object.id" - ) - ) + instance_id = sa.Column(sa.Integer, sa.ForeignKey("object.id")) __instance = sa.orm.relationship( - "Object", - uselist=False, - backref="named_instance", - foreign_keys=[instance_id] + "Object", uselist=False, backref="named_instance", foreign_keys=[instance_id] ) @property @@ -131,20 +87,10 @@ def instance(self): @instance.setter def instance(self, instance): - self.__instance = Object.from_object( - instance - ) - - fit_id = sa.Column( - sa.String, - sa.ForeignKey( - "fit.id" - ) - ) - fit = sa.orm.relationship( - "Fit", - uselist=False - ) + self.__instance = Object.from_object(instance) + + fit_id = sa.Column(sa.String, sa.ForeignKey("fit.id")) + fit = sa.orm.relationship("Fit", uselist=False) # noinspection PyProtectedMember @@ -167,56 +113,36 @@ def __getitem__(self, item: str): Raises a KeyError if no such instance exists. """ - return self._get_named_instance( - item - ).instance + return self._get_named_instance(item).instance def __setitem__(self, key: str, value): """ Set an instance for a given name """ try: - named_instance = self._get_named_instance( - key - ) + named_instance = self._get_named_instance(key) except KeyError: - named_instance = NamedInstance( - name=key - ) - self.fit._named_instances.append( - named_instance - ) + named_instance = NamedInstance(name=key) + self.fit._named_instances.append(named_instance) named_instance.instance = value - def _get_named_instance( - self, - item: str - ) -> "NamedInstance": + def _get_named_instance(self, item: str) -> "NamedInstance": """ Retrieve a NamedInstance by its name. """ for named_instance in self.fit._named_instances: if named_instance.name == item: return named_instance - raise KeyError( - f"Instance {item} not found" - ) + raise KeyError(f"Instance {item} not found") class Fit(Base): __tablename__ = "fit" - id = sa.Column( - sa.String, - primary_key=True, - ) - is_complete = sa.Column( - sa.Boolean - ) + id = sa.Column(sa.String, primary_key=True,) + is_complete = sa.Column(sa.Boolean) - _named_instances: List[NamedInstance] = sa.orm.relationship( - "NamedInstance" - ) + _named_instances: List[NamedInstance] = sa.orm.relationship("NamedInstance") @property @try_none @@ -228,45 +154,23 @@ def instance(self): @instance.setter def instance(self, instance): - self.__instance = Object.from_object( - instance - ) + self.__instance = Object.from_object(instance) @property def named_instances(self): - return NamedInstancesWrapper( - self - ) + return NamedInstancesWrapper(self) - _info: List[Info] = sa.orm.relationship( - "Info" - ) + _info: List[Info] = sa.orm.relationship("Info") - def __init__( - self, - **kwargs - ): - super().__init__( - **kwargs - ) + def __init__(self, **kwargs): + super().__init__(**kwargs) - max_log_likelihood = sa.Column( - sa.Float - ) + max_log_likelihood = sa.Column(sa.Float) - parent_id = sa.Column( - sa.String, - sa.ForeignKey( - "fit.id" - ) - ) + parent_id = sa.Column(sa.String, sa.ForeignKey("fit.id")) children: List["Fit"] = sa.orm.relationship( - "Fit", - backref=sa.orm.backref( - 'parent', - remote_side=[id] - ) + "Fit", backref=sa.orm.backref("parent", remote_side=[id]) ) @property @@ -276,13 +180,9 @@ def best_fit(self) -> "Fit": the highest log likelihood. """ if not self.is_grid_search: - raise TypeError( - f"Fit {self.id} is not a grid search" - ) + raise TypeError(f"Fit {self.id} is not a grid search") if len(self.children) == 0: - raise TypeError( - f"Grid search fit {self.id} has no children" - ) + raise TypeError(f"Grid search fit {self.id} has no children") best_fit = None max_log_likelihood = float("-inf") @@ -294,26 +194,14 @@ def best_fit(self) -> "Fit": return best_fit - is_grid_search = sa.Column( - sa.Boolean - ) + is_grid_search = sa.Column(sa.Boolean) - unique_tag = sa.Column( - sa.String - ) - name = sa.Column( - sa.String - ) - path_prefix = sa.Column( - sa.String - ) + unique_tag = sa.Column(sa.String) + name = sa.Column(sa.String) + path_prefix = sa.Column(sa.String) _samples = sa.orm.relationship( - Object, - uselist=False, - foreign_keys=[ - Object.samples_for_id - ] + Object, uselist=False, foreign_keys=[Object.samples_for_id] ) @property @@ -323,29 +211,16 @@ def samples(self) -> Samples: @samples.setter def samples(self, samples): - self._samples = Object.from_object( - samples - ) + self._samples = Object.from_object(samples) @property def info(self): - return { - info.key: info.value - for info - in self._info - } + return {info.key: info.value for info in self._info} @info.setter def info(self, info): if info is not None: - self._info = [ - Info( - key=key, - value=value - ) - for key, value - in info.items() - ] + self._info = [Info(key=key, value=value) for key, value in info.items()] @property @try_none @@ -357,14 +232,9 @@ def model(self) -> AbstractPriorModel: @model.setter def model(self, model: AbstractPriorModel): - self.__model = Object.from_object( - model - ) + self.__model = Object.from_object(model) - pickles: List[Pickle] = sa.orm.relationship( - "Pickle", - lazy="joined" - ) + pickles: List[Pickle] = sa.orm.relationship("Pickle", lazy="joined") def __getitem__(self, item: str): """ @@ -385,10 +255,7 @@ def __getitem__(self, item: str): for p in self.pickles: if p.name == item: return p.value - return getattr( - self, - item - ) + return getattr(self, item) def __contains__(self, item): for p in self.pickles: @@ -396,11 +263,7 @@ def __contains__(self, item): return True return False - def __setitem__( - self, - key: str, - value - ): + def __setitem__(self, key: str, value): """ Add a pickle. @@ -414,32 +277,15 @@ def __setitem__( value A string, bytes or object """ - new = Pickle( - name=key - ) - if isinstance( - value, - (str, bytes) - ): + new = Pickle(name=key) + if isinstance(value, (str, bytes)): new.string = value else: new.value = value - self.pickles = [ - p - for p - in self.pickles - if p.name != key - ] + [ - new - ] + self.pickles = [p for p in self.pickles if p.name != key] + [new] def __delitem__(self, key): - self.pickles = [ - p - for p - in self.pickles - if p.name != key - ] + self.pickles = [p for p in self.pickles if p.name != key] def value(self, name: str): try: @@ -447,38 +293,20 @@ def value(self, name: str): except AttributeError: return None - model_id = sa.Column( - sa.Integer, - sa.ForeignKey( - "object.id" - ) - ) + model_id = sa.Column(sa.Integer, sa.ForeignKey("object.id")) __model = sa.orm.relationship( - "Object", - uselist=False, - backref="fit_model", - foreign_keys=[model_id] + "Object", uselist=False, backref="fit_model", foreign_keys=[model_id] ) - instance_id = sa.Column( - sa.Integer, - sa.ForeignKey( - "object.id" - ) - ) + instance_id = sa.Column(sa.Integer, sa.ForeignKey("object.id")) __instance = sa.orm.relationship( - "Object", - uselist=False, - backref="fit_instance", - foreign_keys=[instance_id] + "Object", uselist=False, backref="fit_instance", foreign_keys=[instance_id] ) @classmethod def all(cls, session): - return session.query( - cls - ).all() + return session.query(cls).all() def __str__(self): return self.id From 36d53836f24f5f3c8a5f19c6f2e59299d3473b29 Mon Sep 17 00:00:00 2001 From: Richard Date: Mon, 30 Jan 2023 12:11:21 +0000 Subject: [PATCH 167/226] docs --- autofit/non_linear/grid/sensitivity.py | 23 ++++++++++++++++++++--- 1 file changed, 20 insertions(+), 3 deletions(-) diff --git a/autofit/non_linear/grid/sensitivity.py b/autofit/non_linear/grid/sensitivity.py index 12af4fef0..9bf4bbddb 100644 --- a/autofit/non_linear/grid/sensitivity.py +++ b/autofit/non_linear/grid/sensitivity.py @@ -125,6 +125,14 @@ def perturbation_model_func(self, perturbed_model): class SensitivityResult: def __init__(self, results: List[JobResult]): + """ + The result of a sensitivity mapping + + Parameters + ---------- + results + The results of each sensitivity job + """ self.results = sorted(results) def __getitem__(self, item): @@ -137,15 +145,24 @@ def __len__(self): return len(self.results) @property - def log_likelihoods_base(self): + def log_likelihoods_base(self) -> List[float]: + """ + The log likelihoods of the base model for each sensitivity fit + """ return [result.log_likelihood_base for result in self.results] @property - def log_likelihoods_perturbed(self): + def log_likelihoods_perturbed(self) -> List[float]: + """ + The log likelihoods of the perturbed model for each sensitivity fit + """ return [result.log_likelihood_perturbed for result in self.results] @property - def log_likelihood_differences(self): + def log_likelihood_differences(self) -> List[float]: + """ + The log likelihood differences between the base and perturbed models + """ return [result.log_likelihood_difference for result in self.results] From 8090c300477aefc5c29053cda6b3192ef80fae1d Mon Sep 17 00:00:00 2001 From: Richard Date: Mon, 30 Jan 2023 14:30:20 +0000 Subject: [PATCH 168/226] output .is_grid_search for senstivity --- autofit/non_linear/grid/sensitivity.py | 6 +++++ .../grid/test_sensitivity/test_run.py | 27 +++++++++---------- 2 files changed, 18 insertions(+), 15 deletions(-) diff --git a/autofit/non_linear/grid/sensitivity.py b/autofit/non_linear/grid/sensitivity.py index 9bf4bbddb..eaefc20dc 100644 --- a/autofit/non_linear/grid/sensitivity.py +++ b/autofit/non_linear/grid/sensitivity.py @@ -259,6 +259,8 @@ def run(self) -> SensitivityResult: """ self.logger.info("Running") + self.search.paths.save_unique_tag(is_grid_search=True) + headers = [ "index", *self._headers, @@ -295,6 +297,10 @@ def run(self) -> SensitivityResult: ] ) + result = SensitivityResult(results) + + self.search.paths.save_object("result", result) + return SensitivityResult(results) @property diff --git a/test_autofit/non_linear/grid/test_sensitivity/test_run.py b/test_autofit/non_linear/grid/test_sensitivity/test_run.py index 163f3bfb7..d133faa41 100644 --- a/test_autofit/non_linear/grid/test_sensitivity/test_run.py +++ b/test_autofit/non_linear/grid/test_sensitivity/test_run.py @@ -3,24 +3,21 @@ from autoconf.conf import with_config -@with_config( - "general", - "model", - "ignore_prior_limits", - value=True -) -def test_sensitivity( - sensitivity -): +@with_config("general", "model", "ignore_prior_limits", value=True) +def test_sensitivity(sensitivity): results = sensitivity.run() assert len(results) == 8 - path = Path( - sensitivity.search.paths.output_path - ) / "results.csv" + output_path = Path(sensitivity.search.paths.output_path) + + assert (output_path / ".is_grid_search").exists() + path = output_path / "results.csv" assert path.exists() with open(path) as f: all_lines = set(f) - assert 'index,centre,normalization,sigma,log_likelihood_base,log_likelihood_perturbed,log_likelihood_difference\n' in all_lines - assert ' 0, 0.25, 0.25, 0.25, 2.0, 2.0, 0.0\n' in all_lines - assert ' 1, 0.25, 0.25, 0.75, 2.0, 2.0, 0.0\n' in all_lines + assert ( + "index,centre,normalization,sigma,log_likelihood_base,log_likelihood_perturbed,log_likelihood_difference\n" + in all_lines + ) + assert " 0, 0.25, 0.25, 0.25, 2.0, 2.0, 0.0\n" in all_lines + assert " 1, 0.25, 0.25, 0.75, 2.0, 2.0, 0.0\n" in all_lines From 98dd2498babed713de6eeb28aeb10c17b7f807f2 Mon Sep 17 00:00:00 2001 From: Jammy2211 Date: Tue, 7 Feb 2023 18:45:58 +0000 Subject: [PATCH 169/226] Extend example Analysis for temporal use case --- autofit/example/analysis.py | 22 +++++++++++++-------- autofit/graphical/laplace/line_search.py | 4 ++-- autofit/graphical/laplace/newton.py | 2 +- autofit/mapper/prior/log_gaussian.py | 2 +- autofit/messages/abstract.py | 4 ++-- autofit/messages/beta.py | 2 +- autofit/non_linear/nest/dynesty/abstract.py | 7 ++++--- autofit/non_linear/nest/dynesty/dynamic.py | 5 +---- autofit/non_linear/nest/dynesty/static.py | 5 +---- 9 files changed, 27 insertions(+), 26 deletions(-) diff --git a/autofit/example/analysis.py b/autofit/example/analysis.py index b0bee1e46..5c3d90cf7 100644 --- a/autofit/example/analysis.py +++ b/autofit/example/analysis.py @@ -44,13 +44,16 @@ def log_likelihood_function(self, instance: af.ModelInstance) -> float: """ xvalues = np.arange(self.data.shape[0]) + model_data_1d = np.zeros(self.data.shape[0]) try: - model_data_1d = sum( - profile.model_data_1d_via_xvalues_from(xvalues=xvalues) for profile in instance - ) + for profile in instance: + try: + model_data_1d += profile.model_data_1d_via_xvalues_from(xvalues=xvalues) + except AttributeError: + pass except TypeError: - model_data_1d = instance.model_data_1d_via_xvalues_from(xvalues=xvalues) + model_data_1d += instance.model_data_1d_via_xvalues_from(xvalues=xvalues) residual_map = self.data - model_data_1d chi_squared_map = (residual_map / self.noise_map) ** 2.0 @@ -83,13 +86,16 @@ def visualize(self, paths: af.DirectoryPaths, instance: af.ModelInstance, during """ xvalues = np.arange(self.data.shape[0]) + model_data_1d = np.zeros(self.data.shape[0]) try: - model_data_1d = sum( - profile.model_data_1d_via_xvalues_from(xvalues=xvalues) for profile in instance - ) + for profile in instance: + try: + model_data_1d += profile.model_data_1d_via_xvalues_from(xvalues=xvalues) + except AttributeError: + pass except TypeError: - model_data_1d = instance.model_data_1d_via_xvalues_from(xvalues=xvalues) + model_data_1d += instance.model_data_1d_via_xvalues_from(xvalues=xvalues) plt.errorbar( x=xvalues, diff --git a/autofit/graphical/laplace/line_search.py b/autofit/graphical/laplace/line_search.py index 4f3e3e0f4..e2ab51125 100644 --- a/autofit/graphical/laplace/line_search.py +++ b/autofit/graphical/laplace/line_search.py @@ -34,7 +34,7 @@ def __init__(self, state, param_shapes): @classmethod def from_state(cls, state): - param_shapes = FlattenArrays.from_arrays(state.parameters) + param_shapes = FlattenArrays.from_arrays(state.params) return cls(state, param_shapes) def make_state(self, x): @@ -58,7 +58,7 @@ def _func_gradient(self, x): @property def parameters(self): - return self.param_shapes.flatten(self.state.parameters) + return self.param_shapes.flatten(self.state.params) class OptimisationState: diff --git a/autofit/graphical/laplace/newton.py b/autofit/graphical/laplace/newton.py index d667ca2eb..c97f044bd 100644 --- a/autofit/graphical/laplace/newton.py +++ b/autofit/graphical/laplace/newton.py @@ -266,7 +266,7 @@ def xtol_condition(state, old_state, xtol=1e-6, ord=None, **kwargs): if not old_state: return - dx = VariableData.sub(state.parameters, old_state.parameters).vecnorm(ord=ord) + dx = VariableData.sub(state.params, old_state.params).vecnorm(ord=ord) if dx < xtol: return True, f"Minimum parameter change tolerance achieved, {dx} < {xtol}" diff --git a/autofit/mapper/prior/log_gaussian.py b/autofit/mapper/prior/log_gaussian.py index 1eae77276..aa49c8cf6 100644 --- a/autofit/mapper/prior/log_gaussian.py +++ b/autofit/mapper/prior/log_gaussian.py @@ -68,7 +68,7 @@ def _new_for_base_message(self, message): projection. """ return LogGaussianPrior( - *message.parameters, + *message.params, lower_limit=self.lower_limit, upper_limit=self.upper_limit, id_=self.instance().id, diff --git a/autofit/messages/abstract.py b/autofit/messages/abstract.py index 2eff25c16..249fb8739 100644 --- a/autofit/messages/abstract.py +++ b/autofit/messages/abstract.py @@ -142,7 +142,7 @@ def __getitem__(self, index) -> "AbstractMessage": def __setitem__(self, index, value): self._reset_cache() - for param0, param1 in zip(self.parameters, value.parameters): + for param0, param1 in zip(self.parameters, value.params): param0[index] = param1 def merge(self, index, value): @@ -150,7 +150,7 @@ def merge(self, index, value): return cls( *( update_array(param0, index, param1) - for param0, param1 in zip(self.parameters, value.parameters) + for param0, param1 in zip(self.parameters, value.params) ) ) diff --git a/autofit/messages/beta.py b/autofit/messages/beta.py index 09abae38b..da8028f21 100644 --- a/autofit/messages/beta.py +++ b/autofit/messages/beta.py @@ -152,7 +152,7 @@ def kl(self, dist): if self._support != dist._support: raise TypeError('Support does not match') - aP, bP = dist.parameters + aP, bP = dist.params aQ, bQ = self.parameters return ( betaln(aQ, bQ) - betaln(aP, bP) diff --git a/autofit/non_linear/nest/dynesty/abstract.py b/autofit/non_linear/nest/dynesty/abstract.py index 4bea32179..9fd639c22 100644 --- a/autofit/non_linear/nest/dynesty/abstract.py +++ b/autofit/non_linear/nest/dynesty/abstract.py @@ -5,7 +5,6 @@ import numpy as np from dynesty import NestedSampler, DynamicNestedSampler -from dynesty.pool import Pool from autoconf import conf from autofit import exc @@ -137,6 +136,8 @@ def _fit( set of accepted samples of the fit. """ + from dynesty.pool import Pool + fitness_function = self.fitness_function_from_model_and_analysis( model=model, analysis=analysis, log_likelihood_cap=log_likelihood_cap, ) @@ -375,12 +376,12 @@ def sampler_from( model: AbstractPriorModel, fitness_function, checkpoint_exists: bool, - pool: Optional["Pool"], + pool: Optional, queue_size: Optional[int], ): raise NotImplementedError() - def check_pool(self, uses_pool: bool, pool: Pool): + def check_pool(self, uses_pool: bool, pool): if (uses_pool and pool is None) or (not uses_pool and pool is not None): raise exc.SearchException( diff --git a/autofit/non_linear/nest/dynesty/dynamic.py b/autofit/non_linear/nest/dynesty/dynamic.py index 8bb67feb7..9afa7acbb 100644 --- a/autofit/non_linear/nest/dynesty/dynamic.py +++ b/autofit/non_linear/nest/dynesty/dynamic.py @@ -1,9 +1,6 @@ from __future__ import annotations from typing import TYPE_CHECKING, Optional -if TYPE_CHECKING: - from dynesty.pool import Pool - from dynesty.dynesty import DynamicNestedSampler from autofit.non_linear.nest.dynesty.samples import SamplesDynesty from autofit.mapper.prior_model.abstract import AbstractPriorModel @@ -102,7 +99,7 @@ def sampler_from( model: AbstractPriorModel, fitness_function, checkpoint_exists : bool, - pool: Optional[Pool], + pool: Optional, queue_size: Optional[int] ): """ diff --git a/autofit/non_linear/nest/dynesty/static.py b/autofit/non_linear/nest/dynesty/static.py index 8ec31b3ce..c9955c184 100644 --- a/autofit/non_linear/nest/dynesty/static.py +++ b/autofit/non_linear/nest/dynesty/static.py @@ -1,9 +1,6 @@ from __future__ import annotations from typing import TYPE_CHECKING, Optional -if TYPE_CHECKING: - from dynesty.pool import Pool - from dynesty import NestedSampler as StaticSampler from autofit.database.sqlalchemy_ import sa from autofit.mapper.prior_model.abstract import AbstractPriorModel @@ -106,7 +103,7 @@ def sampler_from( model: AbstractPriorModel, fitness_function, checkpoint_exists : bool, - pool: Optional[Pool], + pool: Optional, queue_size: Optional[int] ): """ From bcc4e013126cf9f544d711382b6b39ad55c2e239 Mon Sep 17 00:00:00 2001 From: Jammy2211 Date: Tue, 7 Feb 2023 18:52:11 +0000 Subject: [PATCH 170/226] fix bug due to renaming of parameters to params --- autofit/graphical/laplace/line_search.py | 4 ++-- autofit/graphical/laplace/newton.py | 2 +- autofit/mapper/prior/log_gaussian.py | 2 +- autofit/messages/abstract.py | 4 ++-- autofit/messages/beta.py | 2 +- 5 files changed, 7 insertions(+), 7 deletions(-) diff --git a/autofit/graphical/laplace/line_search.py b/autofit/graphical/laplace/line_search.py index e2ab51125..4f3e3e0f4 100644 --- a/autofit/graphical/laplace/line_search.py +++ b/autofit/graphical/laplace/line_search.py @@ -34,7 +34,7 @@ def __init__(self, state, param_shapes): @classmethod def from_state(cls, state): - param_shapes = FlattenArrays.from_arrays(state.params) + param_shapes = FlattenArrays.from_arrays(state.parameters) return cls(state, param_shapes) def make_state(self, x): @@ -58,7 +58,7 @@ def _func_gradient(self, x): @property def parameters(self): - return self.param_shapes.flatten(self.state.params) + return self.param_shapes.flatten(self.state.parameters) class OptimisationState: diff --git a/autofit/graphical/laplace/newton.py b/autofit/graphical/laplace/newton.py index c97f044bd..d667ca2eb 100644 --- a/autofit/graphical/laplace/newton.py +++ b/autofit/graphical/laplace/newton.py @@ -266,7 +266,7 @@ def xtol_condition(state, old_state, xtol=1e-6, ord=None, **kwargs): if not old_state: return - dx = VariableData.sub(state.params, old_state.params).vecnorm(ord=ord) + dx = VariableData.sub(state.parameters, old_state.parameters).vecnorm(ord=ord) if dx < xtol: return True, f"Minimum parameter change tolerance achieved, {dx} < {xtol}" diff --git a/autofit/mapper/prior/log_gaussian.py b/autofit/mapper/prior/log_gaussian.py index aa49c8cf6..1eae77276 100644 --- a/autofit/mapper/prior/log_gaussian.py +++ b/autofit/mapper/prior/log_gaussian.py @@ -68,7 +68,7 @@ def _new_for_base_message(self, message): projection. """ return LogGaussianPrior( - *message.params, + *message.parameters, lower_limit=self.lower_limit, upper_limit=self.upper_limit, id_=self.instance().id, diff --git a/autofit/messages/abstract.py b/autofit/messages/abstract.py index 249fb8739..2eff25c16 100644 --- a/autofit/messages/abstract.py +++ b/autofit/messages/abstract.py @@ -142,7 +142,7 @@ def __getitem__(self, index) -> "AbstractMessage": def __setitem__(self, index, value): self._reset_cache() - for param0, param1 in zip(self.parameters, value.params): + for param0, param1 in zip(self.parameters, value.parameters): param0[index] = param1 def merge(self, index, value): @@ -150,7 +150,7 @@ def merge(self, index, value): return cls( *( update_array(param0, index, param1) - for param0, param1 in zip(self.parameters, value.params) + for param0, param1 in zip(self.parameters, value.parameters) ) ) diff --git a/autofit/messages/beta.py b/autofit/messages/beta.py index da8028f21..09abae38b 100644 --- a/autofit/messages/beta.py +++ b/autofit/messages/beta.py @@ -152,7 +152,7 @@ def kl(self, dist): if self._support != dist._support: raise TypeError('Support does not match') - aP, bP = dist.params + aP, bP = dist.parameters aQ, bQ = self.parameters return ( betaln(aQ, bQ) - betaln(aP, bP) From b713264d52c81b06addc337a357deda571857d2c Mon Sep 17 00:00:00 2001 From: Richard Date: Mon, 13 Feb 2023 10:09:44 +0000 Subject: [PATCH 171/226] fix handling deeper attributes in interpolation --- autofit/mapper/model_object.py | 2 +- test_autofit/test_interpolator.py | 21 +++++++++++++++++++++ 2 files changed, 22 insertions(+), 1 deletion(-) diff --git a/autofit/mapper/model_object.py b/autofit/mapper/model_object.py index 19b01e116..efd7806cb 100644 --- a/autofit/mapper/model_object.py +++ b/autofit/mapper/model_object.py @@ -48,7 +48,7 @@ def replacing_for_path(self, path: Tuple[str, ...], value) -> "ModelObject": new = copy.deepcopy(self) obj = new for key in path[:-1]: - obj = getattr(new, key) + obj = getattr(obj, key) setattr(obj, path[-1], value) return new diff --git a/test_autofit/test_interpolator.py b/test_autofit/test_interpolator.py index 2e0de50bd..94766e0bd 100644 --- a/test_autofit/test_interpolator.py +++ b/test_autofit/test_interpolator.py @@ -53,3 +53,24 @@ def test_alternate_attribute(time_series, sigma): assert result.gaussian.sigma == sigma assert result.t == -sigma assert result.gaussian.normalization == -sigma + + +def test_deeper_attributes(): + collection = af.Collection( + model=af.Model(af.Gaussian, centre=0.0, normalization=1.0, sigma=-1.0,) + ) + + instance_1 = af.Collection( + t=1.0, collection=collection, + ).instance_from_prior_medians() + instance_2 = af.Collection( + t=2.0, collection=collection, + ).instance_from_prior_medians() + + time_series = af.LinearInterpolator([instance_1, instance_2]) + + result = time_series[time_series.t == 1.5] + + assert result.collection.model.centre == 0.0 + assert result.collection.model.normalization == 1.0 + assert result.collection.model.sigma == -1.0 From 572363fc18935e0f33e8119eca33acc231d37041 Mon Sep 17 00:00:00 2001 From: Jammy2211 Date: Mon, 13 Feb 2023 15:36:36 +0000 Subject: [PATCH 172/226] README comment --- README.rst | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/README.rst b/README.rst index 41506226d..f0b24a30c 100644 --- a/README.rst +++ b/README.rst @@ -24,6 +24,10 @@ PyAutoFit: Classy Probabilistic Programming `Introduction on Binder `_ | `HowToFit `_ + +.. + _ One day make these BOLD with a colon like my fellowsahip proposa,s where the first is Model Composition & Fitting: Tools for composing a complex model and fitting it with dynesty... + PyAutoFit is a Python based probabilistic programming language for the fully Bayesian analysis of extremely large datasets which: From e53937b8c10ffca301b6b0e8d0c7b757f0d5c2d7 Mon Sep 17 00:00:00 2001 From: Richard Date: Mon, 13 Feb 2023 17:03:54 +0000 Subject: [PATCH 173/226] cubic spline interpolator --- autofit/__init__.py | 2 +- autofit/interpolator.py | 12 ++++++++ test_autofit/test_interpolator.py | 51 ++++++++++++++++++++++++------- 3 files changed, 53 insertions(+), 12 deletions(-) diff --git a/autofit/__init__.py b/autofit/__init__.py index c71782ffb..dc86fb675 100644 --- a/autofit/__init__.py +++ b/autofit/__init__.py @@ -85,7 +85,7 @@ from .example.model import Gaussian from .text import formatter from .text import samples_text -from .interpolator import LinearInterpolator +from .interpolator import LinearInterpolator, SplineInterpolator from .tools import util diff --git a/autofit/interpolator.py b/autofit/interpolator.py index b291243b9..a448ff4fa 100644 --- a/autofit/interpolator.py +++ b/autofit/interpolator.py @@ -1,5 +1,6 @@ import copy from abc import ABC, abstractmethod +from scipy.interpolate import CubicSpline from typing import List, Dict, cast from scipy.stats import stats @@ -207,3 +208,14 @@ class LinearInterpolator(AbstractInterpolator): def _interpolate(x, y, value): slope, intercept, r, p, std_err = stats.linregress(x, y) return slope * value + intercept + + +class SplineInterpolator(AbstractInterpolator): + """ + Interpolate data with a piecewise cubic polynomial which is twice continuously differentiable + """ + + @staticmethod + def _interpolate(x, y, value): + f = CubicSpline(x, y) + return f(value) diff --git a/test_autofit/test_interpolator.py b/test_autofit/test_interpolator.py index 94766e0bd..165649132 100644 --- a/test_autofit/test_interpolator.py +++ b/test_autofit/test_interpolator.py @@ -12,25 +12,54 @@ def test_trivial(): assert result is instance +@pytest.fixture(name="instances") +def make_instances(): + return [ + af.ModelInstance( + items=dict( + t=1.0, gaussian=af.Gaussian(centre=0.0, normalization=1.0, sigma=-1.0), + ) + ), + af.ModelInstance( + items=dict( + t=2.0, gaussian=af.Gaussian(centre=1.0, normalization=2.0, sigma=-2.0), + ) + ), + ] + + @pytest.fixture(name="time_series") -def make_time_series(): - return af.LinearInterpolator( - [ - af.ModelInstance( - items=dict( - t=1.0, - gaussian=af.Gaussian(centre=0.0, normalization=1.0, sigma=-1.0), - ) - ), +def make_time_series(instances): + return af.LinearInterpolator(instances) + + +def test_spline_interpolator(instances): + interpolator = af.SplineInterpolator(instances) + + result = interpolator[interpolator.t == 1.5] + + assert result.t == 1.5 + assert result.gaussian.centre == 0.5 + + +def test_smooth_spline_interpolator(instances): + interpolator = af.SplineInterpolator( + instances + + [ af.ModelInstance( items=dict( - t=2.0, - gaussian=af.Gaussian(centre=1.0, normalization=2.0, sigma=-2.0), + t=3.0, + gaussian=af.Gaussian(centre=4.0, normalization=3.0, sigma=-3.0), ) ), ] ) + result = interpolator[interpolator.t == 1.5] + + assert result.t == 1.5 + assert result.gaussian.centre < 0.5 + @pytest.mark.parametrize( "t, centre", [(0.0, -1.0), (1.0, 0.0), (1.5, 0.5), (2.0, 1.0), (3.0, 2.0)] From d117da650e58b6f8a3511b3d1db06db2ab827431 Mon Sep 17 00:00:00 2001 From: Richard Date: Mon, 20 Feb 2023 14:59:29 +0000 Subject: [PATCH 174/226] rename and test --- autofit/interpolator.py | 3 ++- test_autofit/test_interpolator.py | 24 ++++++++++++++---------- 2 files changed, 16 insertions(+), 11 deletions(-) diff --git a/autofit/interpolator.py b/autofit/interpolator.py index a448ff4fa..a5481c60a 100644 --- a/autofit/interpolator.py +++ b/autofit/interpolator.py @@ -5,6 +5,7 @@ from scipy.stats import stats +from autoconf.dictable import Dictable from autofit.mapper.model import ModelInstance @@ -77,7 +78,7 @@ def __init__(self, path: InterpolatorPath, value: float): self.value = value -class AbstractInterpolator(ABC): +class AbstractInterpolator(Dictable, ABC): def __init__(self, instances: List[ModelInstance]): """ A TimeSeries allows interpolation on any variable. diff --git a/test_autofit/test_interpolator.py b/test_autofit/test_interpolator.py index 165649132..23c9b5f94 100644 --- a/test_autofit/test_interpolator.py +++ b/test_autofit/test_interpolator.py @@ -5,9 +5,9 @@ def test_trivial(): instance = af.ModelInstance(items=dict(t=1)) - time_series = af.LinearInterpolator([instance]) + linear_interpolator = af.LinearInterpolator([instance]) - result = time_series[time_series.t == 1] + result = linear_interpolator[linear_interpolator.t == 1] assert result is instance @@ -28,8 +28,8 @@ def make_instances(): ] -@pytest.fixture(name="time_series") -def make_time_series(instances): +@pytest.fixture(name="linear_interpolator") +def make_linear_interpolator(instances): return af.LinearInterpolator(instances) @@ -64,9 +64,9 @@ def test_smooth_spline_interpolator(instances): @pytest.mark.parametrize( "t, centre", [(0.0, -1.0), (1.0, 0.0), (1.5, 0.5), (2.0, 1.0), (3.0, 2.0)] ) -def test_linear(t, centre, time_series): +def test_linear(t, centre, linear_interpolator): - result = time_series[time_series.t == t] + result = linear_interpolator[linear_interpolator.t == t] assert result.t == t assert result.gaussian.centre == centre @@ -75,9 +75,9 @@ def test_linear(t, centre, time_series): @pytest.mark.parametrize("sigma", [-0.5, 0.0, 0.5, 1.0]) -def test_alternate_attribute(time_series, sigma): +def test_alternate_attribute(linear_interpolator, sigma): - result = time_series[time_series.gaussian.sigma == sigma] + result = linear_interpolator[linear_interpolator.gaussian.sigma == sigma] assert result.gaussian.sigma == sigma assert result.t == -sigma @@ -96,10 +96,14 @@ def test_deeper_attributes(): t=2.0, collection=collection, ).instance_from_prior_medians() - time_series = af.LinearInterpolator([instance_1, instance_2]) + linear_interpolator = af.LinearInterpolator([instance_1, instance_2]) - result = time_series[time_series.t == 1.5] + result = linear_interpolator[linear_interpolator.t == 1.5] assert result.collection.model.centre == 0.0 assert result.collection.model.normalization == 1.0 assert result.collection.model.sigma == -1.0 + + +def test_to_dict(linear_interpolator): + assert linear_interpolator.dict() == {} From 2326c59d99120cd1dd225589228d7895c21a7019 Mon Sep 17 00:00:00 2001 From: Richard Date: Mon, 20 Feb 2023 15:18:36 +0000 Subject: [PATCH 175/226] instance to dict --- autofit/mapper/model.py | 14 ++++++---- test_autofit/test_interpolator.py | 46 +++++++++++++++++++++++++------ 2 files changed, 46 insertions(+), 14 deletions(-) diff --git a/autofit/mapper/model.py b/autofit/mapper/model.py index 84603aa07..891923f7c 100644 --- a/autofit/mapper/model.py +++ b/autofit/mapper/model.py @@ -369,13 +369,13 @@ class ModelInstance(AbstractModel): @DynamicAttrs """ - def __init__(self, items=None): + def __init__(self, child_items=None): super().__init__() - if isinstance(items, list): - for i, item in enumerate(items): + if isinstance(child_items, list): + for i, item in enumerate(child_items): self[i] = item - if isinstance(items, dict): - for key, value in items.items(): + if isinstance(child_items, dict): + for key, value in child_items.items(): self[key] = value def __eq__(self, other): @@ -391,6 +391,10 @@ def __getitem__(self, item): def __setitem__(self, key, value): self.__dict__[key] = value + @property + def child_items(self): + return self.dict + def items(self): return self.dict.items() diff --git a/test_autofit/test_interpolator.py b/test_autofit/test_interpolator.py index 23c9b5f94..647fab440 100644 --- a/test_autofit/test_interpolator.py +++ b/test_autofit/test_interpolator.py @@ -1,10 +1,11 @@ import pytest import autofit as af +from autoconf.dictable import as_dict def test_trivial(): - instance = af.ModelInstance(items=dict(t=1)) + instance = af.ModelInstance(dict(t=1)) linear_interpolator = af.LinearInterpolator([instance]) result = linear_interpolator[linear_interpolator.t == 1] @@ -12,16 +13,19 @@ def test_trivial(): assert result is instance +@pytest.fixture(name="model_instance") +def make_model_instance(): + return af.ModelInstance( + dict(t=1.0, gaussian=af.Gaussian(centre=0.0, normalization=1.0, sigma=-1.0),) + ) + + @pytest.fixture(name="instances") -def make_instances(): +def make_instances(model_instance): return [ + model_instance, af.ModelInstance( - items=dict( - t=1.0, gaussian=af.Gaussian(centre=0.0, normalization=1.0, sigma=-1.0), - ) - ), - af.ModelInstance( - items=dict( + dict( t=2.0, gaussian=af.Gaussian(centre=1.0, normalization=2.0, sigma=-2.0), ) ), @@ -47,7 +51,7 @@ def test_smooth_spline_interpolator(instances): instances + [ af.ModelInstance( - items=dict( + dict( t=3.0, gaussian=af.Gaussian(centre=4.0, normalization=3.0, sigma=-3.0), ) @@ -107,3 +111,27 @@ def test_deeper_attributes(): def test_to_dict(linear_interpolator): assert linear_interpolator.dict() == {} + + +@pytest.fixture(name="instance_dict") +def make_instance_dict(): + return { + "child_items": { + "gaussian": { + "centre": 0.0, + "normalization": 1.0, + "sigma": -1.0, + "type": "autofit.example.model.Gaussian", + }, + "t": 1.0, + }, + "type": "autofit.mapper.model.ModelInstance", + } + + +def test_instance_as_dict(model_instance, instance_dict): + assert as_dict(model_instance) == instance_dict + + +def test_instance_from_dict(model_instance, instance_dict): + assert af.ModelInstance.from_dict(instance_dict) == model_instance From e160254660e9fefaa09796b50ab6d173255e3238 Mon Sep 17 00:00:00 2001 From: Richard Date: Mon, 20 Feb 2023 15:37:22 +0000 Subject: [PATCH 176/226] instances from dict --- autofit/mapper/model.py | 18 ++++++++++++------ autofit/mapper/model_object.py | 2 ++ test_autofit/test_interpolator.py | 16 +++++++++++++--- 3 files changed, 27 insertions(+), 9 deletions(-) diff --git a/autofit/mapper/model.py b/autofit/mapper/model.py index 891923f7c..744c41a19 100644 --- a/autofit/mapper/model.py +++ b/autofit/mapper/model.py @@ -369,14 +369,11 @@ class ModelInstance(AbstractModel): @DynamicAttrs """ + __dictable_type__ = "instance" + def __init__(self, child_items=None): super().__init__() - if isinstance(child_items, list): - for i, item in enumerate(child_items): - self[i] = item - if isinstance(child_items, dict): - for key, value in child_items.items(): - self[key] = value + self.child_items = child_items def __eq__(self, other): return self.__dict__ == other.__dict__ @@ -395,6 +392,15 @@ def __setitem__(self, key, value): def child_items(self): return self.dict + @child_items.setter + def child_items(self, child_items): + if isinstance(child_items, list): + for i, item in enumerate(child_items): + self[i] = item + if isinstance(child_items, dict): + for key, value in child_items.items(): + self[key] = value + def items(self): return self.dict.items() diff --git a/autofit/mapper/model_object.py b/autofit/mapper/model_object.py index efd7806cb..10dbf7174 100644 --- a/autofit/mapper/model_object.py +++ b/autofit/mapper/model_object.py @@ -122,6 +122,8 @@ def from_dict(d): instance = object.__new__(cls) elif type_ == "tuple_prior": instance = TuplePrior() + elif type_ == "dict": + return {key: ModelObject.from_dict(value) for key, value in d.items()} else: return Prior.from_dict(d) diff --git a/test_autofit/test_interpolator.py b/test_autofit/test_interpolator.py index 647fab440..dc6f7ccc8 100644 --- a/test_autofit/test_interpolator.py +++ b/test_autofit/test_interpolator.py @@ -119,13 +119,16 @@ def make_instance_dict(): "child_items": { "gaussian": { "centre": 0.0, + "class_path": "autofit.example.model.Gaussian", "normalization": 1.0, "sigma": -1.0, - "type": "autofit.example.model.Gaussian", + "type": "instance", }, "t": 1.0, + "type": "dict", }, - "type": "autofit.mapper.model.ModelInstance", + "class_path": "autofit.mapper.model.ModelInstance", + "type": "instance", } @@ -134,4 +137,11 @@ def test_instance_as_dict(model_instance, instance_dict): def test_instance_from_dict(model_instance, instance_dict): - assert af.ModelInstance.from_dict(instance_dict) == model_instance + instance = af.ModelInstance.from_dict(instance_dict) + assert instance.t == 1.0 + + gaussian = instance.gaussian + assert isinstance(gaussian, af.Gaussian) + assert gaussian.centre == 0.0 + assert gaussian.normalization == 1.0 + assert gaussian.sigma == -1.0 From 70e2f62dfa3cd99330ffcd7ca8f13ae7399820d8 Mon Sep 17 00:00:00 2001 From: Richard Date: Mon, 20 Feb 2023 15:47:37 +0000 Subject: [PATCH 177/226] ensure interpolators can also be serialised as a JSON --- autofit/mapper/model_object.py | 9 ++++---- test_autofit/test_interpolator.py | 38 ++++++++++++++++++++++++++----- 2 files changed, 37 insertions(+), 10 deletions(-) diff --git a/autofit/mapper/model_object.py b/autofit/mapper/model_object.py index 10dbf7174..2c2998dbb 100644 --- a/autofit/mapper/model_object.py +++ b/autofit/mapper/model_object.py @@ -117,15 +117,16 @@ def from_dict(d): instance = Model(get_class(d.pop("class_path"))) elif type_ == "collection": instance = Collection() - elif type_ == "instance": - cls = get_class(d.pop("class_path")) - instance = object.__new__(cls) elif type_ == "tuple_prior": instance = TuplePrior() elif type_ == "dict": return {key: ModelObject.from_dict(value) for key, value in d.items()} else: - return Prior.from_dict(d) + try: + return Prior.from_dict(d) + except KeyError: + cls = get_class(type_) + instance = object.__new__(cls) d.pop("type") diff --git a/test_autofit/test_interpolator.py b/test_autofit/test_interpolator.py index dc6f7ccc8..87953636a 100644 --- a/test_autofit/test_interpolator.py +++ b/test_autofit/test_interpolator.py @@ -109,8 +109,13 @@ def test_deeper_attributes(): assert result.collection.model.sigma == -1.0 -def test_to_dict(linear_interpolator): - assert linear_interpolator.dict() == {} +def test_to_dict(linear_interpolator, linear_interpolator_dict): + assert linear_interpolator.dict() == linear_interpolator_dict + + +def test_from_dict(linear_interpolator_dict): + interpolator = af.LinearInterpolator.from_dict(linear_interpolator_dict) + assert interpolator[interpolator.t == 1.5].t == 1.5 @pytest.fixture(name="instance_dict") @@ -119,16 +124,37 @@ def make_instance_dict(): "child_items": { "gaussian": { "centre": 0.0, - "class_path": "autofit.example.model.Gaussian", "normalization": 1.0, "sigma": -1.0, - "type": "instance", + "type": "autofit.example.model.Gaussian", }, "t": 1.0, "type": "dict", }, - "class_path": "autofit.mapper.model.ModelInstance", - "type": "instance", + "type": "autofit.mapper.model.ModelInstance", + } + + +@pytest.fixture(name="linear_interpolator_dict") +def make_linear_interpolator_dict(instance_dict): + return { + "instances": [ + instance_dict, + { + "child_items": { + "gaussian": { + "centre": 1.0, + "normalization": 2.0, + "sigma": -2.0, + "type": "autofit.example.model.Gaussian", + }, + "t": 2.0, + "type": "dict", + }, + "type": "autofit.mapper.model.ModelInstance", + }, + ], + "type": "autofit.interpolator.LinearInterpolator", } From fe0566ecf14db9a31ef02b07601399cfbd02f9c7 Mon Sep 17 00:00:00 2001 From: Richard Date: Mon, 6 Mar 2023 09:28:07 +0000 Subject: [PATCH 178/226] fix broken test --- autofit/mapper/model_object.py | 6 ++ test_autofit/mapper/model/test_json.py | 140 ++++++------------------- 2 files changed, 39 insertions(+), 107 deletions(-) diff --git a/autofit/mapper/model_object.py b/autofit/mapper/model_object.py index 2c2998dbb..25557fec6 100644 --- a/autofit/mapper/model_object.py +++ b/autofit/mapper/model_object.py @@ -121,6 +121,12 @@ def from_dict(d): instance = TuplePrior() elif type_ == "dict": return {key: ModelObject.from_dict(value) for key, value in d.items()} + elif type_ == "instance": + d.pop("type") + cls = get_class(d.pop("class_path")) + return cls( + **{key: ModelObject.from_dict(value) for key, value in d.items()} + ) else: try: return Prior.from_dict(d) diff --git a/test_autofit/mapper/model/test_json.py b/test_autofit/mapper/model/test_json.py index 74619c54e..ebbd4f4ca 100644 --- a/test_autofit/mapper/model/test_json.py +++ b/test_autofit/mapper/model/test_json.py @@ -5,54 +5,37 @@ import autofit as af -@pytest.fixture( - name="model_dict" -) + +@pytest.fixture(name="model_dict") def make_model_dict(): return { "type": "model", "class_path": "autofit.example.model.Gaussian", - "centre": {'lower_limit': 0.0, 'type': 'Uniform', 'upper_limit': 2.0}, - "normalization": {'lower_limit': 0.0, 'type': 'Uniform', 'upper_limit': 1.0}, - "sigma": {'lower_limit': 0.0, 'type': 'Uniform', 'upper_limit': 1.0}, + "centre": {"lower_limit": 0.0, "type": "Uniform", "upper_limit": 2.0}, + "normalization": {"lower_limit": 0.0, "type": "Uniform", "upper_limit": 1.0}, + "sigma": {"lower_limit": 0.0, "type": "Uniform", "upper_limit": 1.0}, } -@pytest.fixture( - name="instance_dict" -) +@pytest.fixture(name="instance_dict") def make_instance_dict(): return { "type": "instance", "class_path": "autofit.example.model.Gaussian", "centre": 0.0, "normalization": 0.1, - "sigma": 0.01 + "sigma": 0.01, } -@pytest.fixture( - name="collection_dict" -) -def make_collection_dict( - model_dict -): - return { - "gaussian": model_dict, - "type": "collection" - } +@pytest.fixture(name="collection_dict") +def make_collection_dict(model_dict): + return {"gaussian": model_dict, "type": "collection"} -@pytest.fixture( - name="model" -) +@pytest.fixture(name="model") def make_model(): - return af.Model( - af.Gaussian, - centre=af.UniformPrior( - upper_limit=2.0 - ) - ) + return af.Model(af.Gaussian, centre=af.UniformPrior(upper_limit=2.0)) class TestTuple: @@ -61,118 +44,61 @@ def test_tuple_prior(self): tuple_prior.tup_0 = 0 tuple_prior.tup_1 = 1 - result = af.Model.from_dict( - tuple_prior.dict() - ) - assert isinstance( - result, - af.TuplePrior - ) + result = af.Model.from_dict(tuple_prior.dict()) + assert isinstance(result, af.TuplePrior) def test_model_with_tuple(self): tuple_model = af.Model(af.m.MockWithTuple) tuple_model.instance_from_prior_medians() model_dict = tuple_model.dict() - model = af.Model.from_dict( - model_dict - ) + model = af.Model.from_dict(model_dict) instance = model.instance_from_prior_medians() assert instance.tup == (0.5, 0.5) class TestFromDict: - def test_model_from_dict( - self, - model_dict - ): - model = af.Model.from_dict( - model_dict - ) + def test_model_from_dict(self, model_dict): + model = af.Model.from_dict(model_dict) assert model.cls == af.Gaussian assert model.prior_count == 3 assert model.centre.upper_limit == 2.0 - def test_instance_from_dict( - self, - instance_dict - ): - instance = af.Model.from_dict( - instance_dict - ) - assert isinstance( - instance, - af.Gaussian - ) + def test_instance_from_dict(self, instance_dict): + instance = af.Model.from_dict(instance_dict) + assert isinstance(instance, af.Gaussian) assert instance.centre == 0.0 assert instance.normalization == 0.1 assert instance.sigma == 0.01 - def test_collection_from_dict( - self, - collection_dict - ): - collection = af.Model.from_dict( - collection_dict - ) - assert isinstance( - collection, - af.Collection - ) + def test_collection_from_dict(self, collection_dict): + collection = af.Model.from_dict(collection_dict) + assert isinstance(collection, af.Collection) assert len(collection) == 1 class TestToDict: - def test_model_priors( - self, - model, - model_dict - ): + def test_model_priors(self, model, model_dict): assert model.dict() == model_dict - def test_model_floats( - self, - instance_dict - ): - model = af.Model( - af.Gaussian, - centre=0.0, - normalization=0.1, - sigma=0.01 - ) + def test_model_floats(self, instance_dict): + model = af.Model(af.Gaussian, centre=0.0, normalization=0.1, sigma=0.01) assert model.dict() == instance_dict - def test_collection( - self, - model, - collection_dict - ): - collection = af.Collection( - gaussian=model - ) + def test_collection(self, model, collection_dict): + collection = af.Collection(gaussian=model) assert collection.dict() == collection_dict - def test_collection_instance( - self, - instance_dict - ): - collection = af.Collection( - gaussian=af.Gaussian() - ) - assert collection.dict() == { - "gaussian": instance_dict, - "type": "collection" - } + def test_collection_instance(self, instance_dict): + collection = af.Collection(gaussian=af.Gaussian()) + assert collection.dict() == {"gaussian": instance_dict, "type": "collection"} class TestFromJson: - def test__from_json(self, model_dict): - model = af.Model.from_dict( - model_dict - ) + model = af.Model.from_dict(model_dict) model_file = Path(__file__).parent / "model.json" @@ -190,4 +116,4 @@ def test__from_json(self, model_dict): assert model.prior_count == 3 assert model.centre.upper_limit == 2.0 - os.remove(model_file) \ No newline at end of file + os.remove(model_file) From e24072de3937627ed28cbabe9dd3b298c1fd9455 Mon Sep 17 00:00:00 2001 From: Richard Date: Mon, 6 Mar 2023 09:44:47 +0000 Subject: [PATCH 179/226] format --- autofit/mapper/prior_model/abstract.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/autofit/mapper/prior_model/abstract.py b/autofit/mapper/prior_model/abstract.py index 14ea28aec..b19cc35ad 100644 --- a/autofit/mapper/prior_model/abstract.py +++ b/autofit/mapper/prior_model/abstract.py @@ -444,9 +444,7 @@ def assert_no_assertions(obj): try: item = copy.copy(source) if isinstance(item, dict): - from autofit.mapper.prior_model.collection import ( - Collection, - ) + from autofit.mapper.prior_model.collection import Collection item = Collection(item) for attribute in path: @@ -1010,7 +1008,7 @@ def random_instance(self, ignore_prior_limits=False): @DynamicRecursionCache() def from_instance(instance, model_classes=tuple()): """ - Recursively create an prior object model from an object model. + Recursively create a prior object model from an object model. Parameters ---------- From 41def0b656544bea9ec352b7e0cc2585086b39ae Mon Sep 17 00:00:00 2001 From: Richard Date: Mon, 6 Mar 2023 09:56:42 +0000 Subject: [PATCH 180/226] permit single type instead of tuple as argument --- autofit/mapper/prior_model/abstract.py | 2 +- .../mapper/model/test_model_instance.py | 38 +++++++++++++++---- 2 files changed, 31 insertions(+), 9 deletions(-) diff --git a/autofit/mapper/prior_model/abstract.py b/autofit/mapper/prior_model/abstract.py index b19cc35ad..86740d54d 100644 --- a/autofit/mapper/prior_model/abstract.py +++ b/autofit/mapper/prior_model/abstract.py @@ -1070,7 +1070,7 @@ def from_instance(instance, model_classes=tuple()): ) except AttributeError: return instance - if any([isinstance(instance, cls) for cls in model_classes]): + if isinstance(instance, model_classes): return result.as_model() return result diff --git a/test_autofit/mapper/model/test_model_instance.py b/test_autofit/mapper/model/test_model_instance.py index 61e1227a9..61c031318 100644 --- a/test_autofit/mapper/model/test_model_instance.py +++ b/test_autofit/mapper/model/test_model_instance.py @@ -2,6 +2,7 @@ import autofit as af + @pytest.fixture(name="mock_components_1") def make_mock_components_1(): return af.m.MockComponents() @@ -42,12 +43,23 @@ def test_as_model(self, instance): def test_object_for_path(self, instance, mock_components_1, mock_components_2): assert instance.object_for_path(("mock_components_2",)) is mock_components_2 - assert instance.object_for_path(("sub", "mock_components_1")) is mock_components_1 - assert instance.object_for_path(("sub", "sub", "mock_components_1")) is mock_components_1 - setattr(instance.object_for_path(("mock_components_2",)), "mock_components", mock_components_1) + assert ( + instance.object_for_path(("sub", "mock_components_1")) is mock_components_1 + ) + assert ( + instance.object_for_path(("sub", "sub", "mock_components_1")) + is mock_components_1 + ) + setattr( + instance.object_for_path(("mock_components_2",)), + "mock_components", + mock_components_1, + ) assert mock_components_2.mock_components is mock_components_1 - def test_path_instance_tuples_for_class(self, instance, mock_components_1, mock_components_2): + def test_path_instance_tuples_for_class( + self, instance, mock_components_1, mock_components_2 + ): result = instance.path_instance_tuples_for_class(af.m.MockComponents) assert result[0] == (("mock_components_2",), mock_components_2) assert result[1] == (("sub", "mock_components_1"), mock_components_1) @@ -59,8 +71,7 @@ def test_simple_model(self): mapper.mock_class = af.m.MockClassx2 model_map = mapper.instance_from_unit_vector( - [1.0, 1.0], - ignore_prior_limits=True + [1.0, 1.0], ignore_prior_limits=True ) assert isinstance(model_map.mock_class, af.m.MockClassx2) @@ -74,8 +85,7 @@ def test_two_object_model(self): mapper.mock_class_2 = af.m.MockClassx2 model_map = mapper.instance_from_unit_vector( - [1.0, 0.0, 0.0, 1.0], - ignore_prior_limits=True + [1.0, 0.0, 0.0, 1.0], ignore_prior_limits=True ) assert isinstance(model_map.mock_class_1, af.m.MockClassx2) @@ -154,3 +164,15 @@ def test_match_tuple(self): assert model_map.mock_profile.one_tuple == (1.0, 1.0) assert model_map.mock_profile.two == 0.0 + + +def test_as_model_filter(): + class Child(af.Gaussian): + pass + + instance = af.ModelInstance({"child": Child(), "gaussian": af.Gaussian(),}) + + model = instance.as_model(af.Gaussian) + + assert isinstance(model.gaussian, af.Model) + assert isinstance(model.child, af.Model) From ad4f0eb527e4121d8aa125f178af70d20011ea28 Mon Sep 17 00:00:00 2001 From: Richard Date: Mon, 6 Mar 2023 10:19:14 +0000 Subject: [PATCH 181/226] excluded classes --- autofit/mapper/model.py | 6 +++-- autofit/mapper/prior_model/abstract.py | 24 +++++++++++++++---- .../mapper/model/test_model_instance.py | 19 +++++++++++---- 3 files changed, 38 insertions(+), 11 deletions(-) diff --git a/autofit/mapper/model.py b/autofit/mapper/model.py index 744c41a19..480feb4a8 100644 --- a/autofit/mapper/model.py +++ b/autofit/mapper/model.py @@ -422,8 +422,10 @@ def values(self): def __len__(self): return len(self.values()) - def as_model(self, model_classes=tuple()): + def as_model( + self, model_classes=tuple(), excluded_classes=tuple(), + ): from autofit.mapper.prior_model.abstract import AbstractPriorModel - return AbstractPriorModel.from_instance(self, model_classes) + return AbstractPriorModel.from_instance(self, model_classes, excluded_classes,) diff --git a/autofit/mapper/prior_model/abstract.py b/autofit/mapper/prior_model/abstract.py index 86740d54d..0f0aa44cf 100644 --- a/autofit/mapper/prior_model/abstract.py +++ b/autofit/mapper/prior_model/abstract.py @@ -1006,7 +1006,9 @@ def random_instance(self, ignore_prior_limits=False): @staticmethod @DynamicRecursionCache() - def from_instance(instance, model_classes=tuple()): + def from_instance( + instance, model_classes=tuple(), exclude_classes=tuple(), + ): """ Recursively create a prior object model from an object model. @@ -1022,12 +1024,18 @@ def from_instance(instance, model_classes=tuple()): """ from autofit.mapper.prior_model import collection + if isinstance(instance, exclude_classes): + return instance if isinstance(instance, (Prior, AbstractPriorModel)): return instance elif isinstance(instance, list): result = collection.Collection( [ - AbstractPriorModel.from_instance(item, model_classes=model_classes) + AbstractPriorModel.from_instance( + item, + model_classes=model_classes, + exclude_classes=exclude_classes, + ) for item in instance ] ) @@ -1040,14 +1048,18 @@ def from_instance(instance, model_classes=tuple()): result, key, AbstractPriorModel.from_instance( - value, model_classes=model_classes + value, + model_classes=model_classes, + exclude_classes=exclude_classes, ), ) elif isinstance(instance, dict): result = collection.Collection( { key: AbstractPriorModel.from_instance( - value, model_classes=model_classes + value, + model_classes=model_classes, + exclude_classes=exclude_classes, ) for key, value in instance.items() } @@ -1062,7 +1074,9 @@ def from_instance(instance, model_classes=tuple()): instance.__class__, **{ key: AbstractPriorModel.from_instance( - value, model_classes=model_classes + value, + model_classes=model_classes, + exclude_classes=exclude_classes, ) for key, value in instance.__dict__.items() if key != "cls" diff --git a/test_autofit/mapper/model/test_model_instance.py b/test_autofit/mapper/model/test_model_instance.py index 61c031318..efd57c534 100644 --- a/test_autofit/mapper/model/test_model_instance.py +++ b/test_autofit/mapper/model/test_model_instance.py @@ -166,13 +166,24 @@ def test_match_tuple(self): assert model_map.mock_profile.two == 0.0 -def test_as_model_filter(): - class Child(af.Gaussian): - pass +class Child(af.Gaussian): + pass - instance = af.ModelInstance({"child": Child(), "gaussian": af.Gaussian(),}) +@pytest.fixture(name="instance") +def make_instance(): + return af.ModelInstance({"child": Child(), "gaussian": af.Gaussian()}) + + +def test_single_argument(instance): model = instance.as_model(af.Gaussian) assert isinstance(model.gaussian, af.Model) assert isinstance(model.child, af.Model) + + +def test_filter_child(instance): + model = instance.as_model(af.Gaussian, excluded_classes=Child) + + assert isinstance(model.gaussian, af.Model) + assert not isinstance(model.child, af.Model) From 2f0d3da09f011368548380fb8106186af1022f13 Mon Sep 17 00:00:00 2001 From: Richard Date: Mon, 6 Mar 2023 10:21:16 +0000 Subject: [PATCH 182/226] more testing --- .../mapper/model/test_model_instance.py | 30 ++++++++++++++----- 1 file changed, 23 insertions(+), 7 deletions(-) diff --git a/test_autofit/mapper/model/test_model_instance.py b/test_autofit/mapper/model/test_model_instance.py index efd57c534..05e29ee37 100644 --- a/test_autofit/mapper/model/test_model_instance.py +++ b/test_autofit/mapper/model/test_model_instance.py @@ -170,20 +170,36 @@ class Child(af.Gaussian): pass -@pytest.fixture(name="instance") -def make_instance(): - return af.ModelInstance({"child": Child(), "gaussian": af.Gaussian()}) +class Child2(af.Gaussian): + pass + +@pytest.fixture(name="exclude_instance") +def make_excluded_instance(): + return af.ModelInstance( + {"child": Child(), "gaussian": af.Gaussian(), "child2": Child2(),} + ) -def test_single_argument(instance): - model = instance.as_model(af.Gaussian) + +def test_single_argument(exclude_instance): + model = exclude_instance.as_model(af.Gaussian) assert isinstance(model.gaussian, af.Model) assert isinstance(model.child, af.Model) + assert isinstance(model.child2, af.Model) + + +def test_filter_child(exclude_instance): + model = exclude_instance.as_model(af.Gaussian, excluded_classes=Child) + + assert isinstance(model.gaussian, af.Model) + assert not isinstance(model.child, af.Model) + assert isinstance(model.child2, af.Model) -def test_filter_child(instance): - model = instance.as_model(af.Gaussian, excluded_classes=Child) +def test_filter_multiple(exclude_instance): + model = exclude_instance.as_model(af.Gaussian, excluded_classes=(Child, Child2),) assert isinstance(model.gaussian, af.Model) assert not isinstance(model.child, af.Model) + assert not isinstance(model.child2, af.Model) From 8bbf30d39ca16e1576b8d39d4653137b654553f5 Mon Sep 17 00:00:00 2001 From: Richard Date: Mon, 6 Mar 2023 10:23:55 +0000 Subject: [PATCH 183/226] docs --- autofit/mapper/model.py | 18 +++++++++++++++++- autofit/mapper/prior_model/abstract.py | 7 ++++++- 2 files changed, 23 insertions(+), 2 deletions(-) diff --git a/autofit/mapper/model.py b/autofit/mapper/model.py index 480feb4a8..ffa12f513 100644 --- a/autofit/mapper/model.py +++ b/autofit/mapper/model.py @@ -423,8 +423,24 @@ def __len__(self): return len(self.values()) def as_model( - self, model_classes=tuple(), excluded_classes=tuple(), + self, + model_classes: Union[type, Iterable[type]] = tuple(), + excluded_classes: Union[type, Iterable[type]] = tuple(), ): + """ + Convert this instance to a model + + Parameters + ---------- + model_classes + The classes to convert to models + excluded_classes + The classes to exclude from conversion + + Returns + ------- + A model + """ from autofit.mapper.prior_model.abstract import AbstractPriorModel diff --git a/autofit/mapper/prior_model/abstract.py b/autofit/mapper/prior_model/abstract.py index 0f0aa44cf..9f337972b 100644 --- a/autofit/mapper/prior_model/abstract.py +++ b/autofit/mapper/prior_model/abstract.py @@ -1007,7 +1007,9 @@ def random_instance(self, ignore_prior_limits=False): @staticmethod @DynamicRecursionCache() def from_instance( - instance, model_classes=tuple(), exclude_classes=tuple(), + instance, + model_classes: Union[type, Iterable[type]] = tuple(), + exclude_classes: Union[type, Iterable[type]] = tuple(), ): """ Recursively create a prior object model from an object model. @@ -1015,6 +1017,9 @@ def from_instance( Parameters ---------- model_classes + A tuple of classes that should be converted to a prior model + exclude_classes + A tuple of classes that should not be converted to a prior model instance A dictionary, list, class instance or model instance Returns From d96a85cf8782b088f8d9e153e8bb0a11a20ed615 Mon Sep 17 00:00:00 2001 From: Richard Date: Mon, 6 Mar 2023 14:15:09 +0000 Subject: [PATCH 184/226] fix jax in laplace by casting array to float --- autofit/graphical/laplace/newton.py | 98 +++++++++++++---------------- 1 file changed, 45 insertions(+), 53 deletions(-) diff --git a/autofit/graphical/laplace/newton.py b/autofit/graphical/laplace/newton.py index d667ca2eb..b65f6eea2 100644 --- a/autofit/graphical/laplace/newton.py +++ b/autofit/graphical/laplace/newton.py @@ -20,6 +20,7 @@ def gradient_ascent(state: OptimisationState, **kwargs) -> VariableData: def newton_direction(state: OptimisationState, **kwargs) -> VariableData: return state.hessian.ldiv(state.gradient) + def newton_abs_direction(state: OptimisationState, d=1e-6, **kwargs) -> VariableData: posdef = state.hessian.abs().diagonalupdate(state.parameters.full_like(d)) return posdef.ldiv(state.gradient) @@ -34,7 +35,7 @@ def newton_abs_direction(state: OptimisationState, d=1e-6, **kwargs) -> Variable def sr1_update( - state1: OptimisationState, state: OptimisationState, mintol=1e-8, **kwargs + state1: OptimisationState, state: OptimisationState, mintol=1e-8, **kwargs ) -> OptimisationState: yk = VariableData.sub(state1.gradient, state.gradient) dk = VariableData.sub(state1.parameters, state.parameters) @@ -57,7 +58,7 @@ def sr1_update( def diag_sr1_update( - state1: OptimisationState, state: OptimisationState, tol=1e-8, **kwargs + state1: OptimisationState, state: OptimisationState, tol=1e-8, **kwargs ) -> OptimisationState: yk = VariableData.sub(state1.gradient, state.gradient) dk = VariableData.sub(state1.parameters, state.parameters) @@ -76,7 +77,7 @@ def diag_sr1_update( def diag_sr1_update_( - state1: OptimisationState, state: OptimisationState, tol=1e-8, **kwargs + state1: OptimisationState, state: OptimisationState, tol=1e-8, **kwargs ) -> OptimisationState: yk = VariableData.sub(state1.gradient, state.gradient) dk = VariableData.sub(state1.parameters, state.parameters) @@ -99,7 +100,7 @@ def diag_sr1_update_( def diag_sr1_bfgs_update( - state1: OptimisationState, state: OptimisationState, **kwargs + state1: OptimisationState, state: OptimisationState, **kwargs ) -> OptimisationState: yk = VariableData.sub(state1.gradient, state.gradient) dk = VariableData.sub(state1.parameters, state.parameters) @@ -109,9 +110,7 @@ def diag_sr1_bfgs_update( def bfgs1_update( - state1: OptimisationState, - state: OptimisationState, - **kwargs, + state1: OptimisationState, state: OptimisationState, **kwargs, ) -> OptimisationState: """ y_k = g_{k+1} - g{k} @@ -139,9 +138,7 @@ def bfgs1_update( def bfgs_update( - state1: OptimisationState, - state: OptimisationState, - **kwargs, + state1: OptimisationState, state: OptimisationState, **kwargs, ) -> OptimisationState: yk = VariableData.sub(state1.gradient, state.gradient) dk = VariableData.sub(state1.parameters, state.parameters) @@ -158,9 +155,7 @@ def bfgs_update( def quasi_deterministic_update( - state1: OptimisationState, - state: OptimisationState, - **kwargs, + state1: OptimisationState, state: OptimisationState, **kwargs, ) -> OptimisationState: dk = VariableData.sub(state1.parameters, state.parameters) zk = VariableData.sub( @@ -179,9 +174,7 @@ def quasi_deterministic_update( def diag_quasi_deterministic_update( - state1: OptimisationState, - state: OptimisationState, - **kwargs, + state1: OptimisationState, state: OptimisationState, **kwargs, ) -> OptimisationState: dk = VariableData.sub(state1.parameters, state.parameters) zk = VariableData.sub( @@ -191,7 +184,7 @@ def diag_quasi_deterministic_update( zk2 = zk ** 2 zk4 = (zk2 ** 2).sum() alpha = (dk.dot(Bxk.dot(dk)) - zk.dot(Bzk.dot(zk))) / zk4 - state1.det_hessian = Bzk.diagonalupdate(alpha * zk2) + state1.det_hessian = Bzk.diagonalupdate(float(alpha) * zk2) return state1 @@ -202,10 +195,7 @@ def __init__(self, quasi_newton_update, det_quasi_newton_update): self.det_quasi_newton_update = det_quasi_newton_update def __call__( - self, - state1: OptimisationState, - state: OptimisationState, - **kwargs, + self, state1: OptimisationState, state: OptimisationState, **kwargs, ) -> OptimisationState: # Only update estimate if a step has been taken @@ -225,28 +215,28 @@ def __call__( def take_step( - state: OptimisationState, - old_state: Optional[OptimisationState] = None, - *, - search_direction=newton_abs_direction, - calc_line_search=line_search, - search_direction_kws: Optional[Dict[str, Any]] = None, - line_search_kws: Optional[Dict[str, Any]] = None, + state: OptimisationState, + old_state: Optional[OptimisationState] = None, + *, + search_direction=newton_abs_direction, + calc_line_search=line_search, + search_direction_kws: Optional[Dict[str, Any]] = None, + line_search_kws: Optional[Dict[str, Any]] = None, ) -> Tuple[Optional[float], OptimisationState]: state.search_direction = search_direction(state, **(search_direction_kws or {})) return calc_line_search(state, old_state, **(line_search_kws or {})) def take_quasi_newton_step( - state: OptimisationState, - old_state: Optional[OptimisationState] = None, - *, - search_direction=newton_abs_direction, - calc_line_search=line_search, - quasi_newton_update=full_bfgs_update, - search_direction_kws: Optional[Dict[str, Any]] = None, - line_search_kws: Optional[Dict[str, Any]] = None, - quasi_newton_kws: Optional[Dict[str, Any]] = None, + state: OptimisationState, + old_state: Optional[OptimisationState] = None, + *, + search_direction=newton_abs_direction, + calc_line_search=line_search, + quasi_newton_update=full_bfgs_update, + search_direction_kws: Optional[Dict[str, Any]] = None, + line_search_kws: Optional[Dict[str, Any]] = None, + quasi_newton_kws: Optional[Dict[str, Any]] = None, ) -> Tuple[Optional[float], OptimisationState]: """ """ state.search_direction = search_direction(state, **(search_direction_kws or {})) @@ -314,7 +304,7 @@ def ngev_condition(state, old_state, maxgev=10000, **kwargs): def check_stop_conditions( - stepsize, state, old_state, stop_conditions, **stop_kws + stepsize, state, old_state, stop_conditions, **stop_kws ) -> Optional[Tuple[bool, str]]: if stepsize is None: return False, "abnormal termination of line search" @@ -328,20 +318,20 @@ def check_stop_conditions( def optimise_quasi_newton( - state: OptimisationState, - old_state: Optional[OptimisationState] = None, - *, - max_iter=100, - search_direction=newton_abs_direction, - calc_line_search=line_search, - quasi_newton_update=bfgs_update, - stop_conditions=stop_conditions, - search_direction_kws: Optional[Dict[str, Any]] = None, - line_search_kws: Optional[Dict[str, Any]] = None, - quasi_newton_kws: Optional[Dict[str, Any]] = None, - stop_kws: Optional[Dict[str, Any]] = None, - callback: Optional[_OPT_CALLBACK] = None, - **kwargs, + state: OptimisationState, + old_state: Optional[OptimisationState] = None, + *, + max_iter=100, + search_direction=newton_abs_direction, + calc_line_search=line_search, + quasi_newton_update=bfgs_update, + stop_conditions=stop_conditions, + search_direction_kws: Optional[Dict[str, Any]] = None, + line_search_kws: Optional[Dict[str, Any]] = None, + quasi_newton_kws: Optional[Dict[str, Any]] = None, + stop_kws: Optional[Dict[str, Any]] = None, + callback: Optional[_OPT_CALLBACK] = None, + **kwargs, ) -> Tuple[OptimisationState, Status]: success = True updated = False @@ -356,7 +346,9 @@ def optimise_quasi_newton( success, message = stop break - with LogWarnings(logger=_log_projection_warnings, action='always') as caught_warnings: + with LogWarnings( + logger=_log_projection_warnings, action="always" + ) as caught_warnings: try: stepsize, state1 = take_quasi_newton_step( state, From 7dd2ac70fcc730a23eb0388c259e367820b15842 Mon Sep 17 00:00:00 2001 From: James Nightingale Date: Tue, 21 Mar 2023 17:59:27 +0000 Subject: [PATCH 185/226] autofit version --- autofit/__init__.py | 2 +- docs/requirements.txt | 2 +- requirements.txt | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/autofit/__init__.py b/autofit/__init__.py index dc86fb675..693822f3f 100644 --- a/autofit/__init__.py +++ b/autofit/__init__.py @@ -109,4 +109,4 @@ def save_abc(pickler, obj): conf.instance.register(__file__) -__version__ = "2023.1.15.1" +__version__ = "2023.3.21.3" diff --git a/docs/requirements.txt b/docs/requirements.txt index 93c7fbbd9..d6e56a19f 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -1,7 +1,7 @@ corner==2.2.1 decorator>=4.2.1 dill>=0.3.1.1 -dynesty==2.0.2 +dynesty==2.1.0 typing-inspect>=0.4.0 emcee>=3.0.2 gprof2dot==2021.2.21 diff --git a/requirements.txt b/requirements.txt index e12c665b6..f8bca9fa7 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,7 +1,7 @@ corner==2.2.1 decorator>=4.2.1 dill>=0.3.1.1 -dynesty==2.0.2 +dynesty==2.1.0 typing-inspect>=0.4.0 emcee>=3.1.3 matplotlib From fe4d81fef9ac4a52215941b00100fc28670f0580 Mon Sep 17 00:00:00 2001 From: James Nightingale Date: Tue, 21 Mar 2023 18:26:33 +0000 Subject: [PATCH 186/226] v2023.3.21.4 --- autofit/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/autofit/__init__.py b/autofit/__init__.py index 693822f3f..ab8f9ec3b 100644 --- a/autofit/__init__.py +++ b/autofit/__init__.py @@ -109,4 +109,4 @@ def save_abc(pickler, obj): conf.instance.register(__file__) -__version__ = "2023.3.21.3" +__version__ = "2023.3.21.4" From 5d1ca26e628747de3fd847fe414ed0b602962cba Mon Sep 17 00:00:00 2001 From: James Nightingale Date: Tue, 21 Mar 2023 18:45:14 +0000 Subject: [PATCH 187/226] bump --- autofit/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/autofit/__init__.py b/autofit/__init__.py index ab8f9ec3b..5d65d6ab6 100644 --- a/autofit/__init__.py +++ b/autofit/__init__.py @@ -109,4 +109,4 @@ def save_abc(pickler, obj): conf.instance.register(__file__) -__version__ = "2023.3.21.4" +__version__ = "2023.3.21.5" From 5e68c6b057b739a4b24b988a10086362f3e79cce Mon Sep 17 00:00:00 2001 From: James Nightingale Date: Tue, 21 Mar 2023 18:46:17 +0000 Subject: [PATCH 188/226] bump to __version__ = 2023.3.21.5 --- autofit/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/autofit/__init__.py b/autofit/__init__.py index c71782ffb..98f9e8e01 100644 --- a/autofit/__init__.py +++ b/autofit/__init__.py @@ -109,4 +109,4 @@ def save_abc(pickler, obj): conf.instance.register(__file__) -__version__ = "2023.1.15.1" +__version__ = "2023.3.21.5" From 426f999f8d1f991dfebe56cd74dae71fb0282ad5 Mon Sep 17 00:00:00 2001 From: James Nightingale Date: Mon, 27 Mar 2023 15:40:04 +0100 Subject: [PATCH 189/226] docs --- autofit/config/general.yaml | 3 +++ test_autofit/config/general.yaml | 3 +++ 2 files changed, 6 insertions(+) diff --git a/autofit/config/general.yaml b/autofit/config/general.yaml index a217d5189..6440c29f7 100644 --- a/autofit/config/general.yaml +++ b/autofit/config/general.yaml @@ -3,6 +3,9 @@ analysis: hpc: hpc_mode: false # If True, use HPC mode, which disables GUI visualization, logging to screen and other settings which are not suited to running on a super computer. iterations_per_update: 5000 # The number of iterations between every update (visualization, results output, etc) in HPC mode. +inversion: + check_reconstruction: true # If True, the inversion's reconstruction is checked to ensure the solution of a meshs's mapper is not an invalid solution where the values are all the same. + reconstruction_vmax_factor: 0.5 # Plots of an Inversion's reconstruction use the reconstructed data's bright value multiplied by this factor. model: ignore_prior_limits: false # If ``True`` the limits applied to priors will be ignored, where limits set upper / lower limits. This stops PriorLimitException's from being raised. output: diff --git a/test_autofit/config/general.yaml b/test_autofit/config/general.yaml index 06ea56184..90c934f6e 100644 --- a/test_autofit/config/general.yaml +++ b/test_autofit/config/general.yaml @@ -3,6 +3,9 @@ analysis: hpc: hpc_mode: false # If True, use HPC mode, which disables GUI visualization, logging to screen and other settings which are not suited to running on a super computer. iterations_per_update: 5000 # The number of iterations between every update (visualization, results output, etc) in HPC mode. +inversion: + check_reconstruction: true # If True, the inversion's reconstruction is checked to ensure the solution of a meshs's mapper is not an invalid solution where the values are all the same. + reconstruction_vmax_factor: 0.5 # Plots of an Inversion's reconstruction use the reconstructed data's bright value multiplied by this factor. model: ignore_prior_limits: false # If ``True`` the limits applied to priors will be ignored, where limits set upper / lower limits. This stops PriorLimitException's from being raised. output: From 1f1c31cf7c9d0e6858c787ba4b95c274d5acd8da Mon Sep 17 00:00:00 2001 From: James Nightingale Date: Mon, 27 Mar 2023 15:42:38 +0100 Subject: [PATCH 190/226] v2023.3.21.5 --- autofit/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/autofit/__init__.py b/autofit/__init__.py index 5d65d6ab6..3076e0fe4 100644 --- a/autofit/__init__.py +++ b/autofit/__init__.py @@ -109,4 +109,4 @@ def save_abc(pickler, obj): conf.instance.register(__file__) -__version__ = "2023.3.21.5" +__version__ = "2023.3.27.1" From f45665293012f725f44b631323d3122ce108d809 Mon Sep 17 00:00:00 2001 From: James Nightingale Date: Mon, 27 Mar 2023 15:43:29 +0100 Subject: [PATCH 191/226] merge --- README.rst | 4 + autofit/__init__.py | 5 +- autofit/config/general.yaml | 3 + autofit/config/non_linear/optimize.yaml | 2 +- autofit/config/visualize/plots_search.yaml | 4 +- autofit/database/model/fit.py | 288 ++++-------------- autofit/example/analysis.py | 22 +- autofit/example/model.py | 44 ++- autofit/graphical/laplace/newton.py | 98 +++--- autofit/interpolator.py | 15 +- autofit/mapper/model.py | 80 ++++- autofit/mapper/model_object.py | 19 +- autofit/mapper/prior_model/abstract.py | 37 ++- autofit/non_linear/abstract_search.py | 72 +++++ autofit/non_linear/analysis/combined.py | 47 ++- autofit/non_linear/analysis/indexed.py | 12 +- autofit/non_linear/grid/sensitivity.py | 226 ++++++-------- autofit/non_linear/initializer.py | 7 +- autofit/non_linear/nest/dynesty/abstract.py | 9 +- autofit/non_linear/nest/dynesty/dynamic.py | 5 +- autofit/non_linear/nest/dynesty/plotter.py | 40 +-- autofit/non_linear/nest/dynesty/static.py | 5 +- autofit/non_linear/nest/ultranest/plotter.py | 43 ++- autofit/plot/samples_plotters.py | 34 +++ docs/overview/multi_datasets.rst | 17 ++ docs/requirements.txt | 2 +- optional_requirements.txt | 2 +- requirements.txt | 2 +- test_autofit/analysis/test_free_parameter.py | 106 ++----- test_autofit/config/general.yaml | 4 +- test_autofit/conftest.py | 59 +--- test_autofit/mapper/model/test_json.py | 140 ++------- .../mapper/model/test_model_instance.py | 65 +++- test_autofit/mapper/test_has.py | 155 ++++------ .../grid/test_sensitivity/test_results.py | 30 ++ .../grid/test_sensitivity/test_run.py | 27 +- .../non_linear/test_fit_sequential.py | 75 +++++ test_autofit/test_interpolator.py | 158 ++++++++-- 38 files changed, 1032 insertions(+), 931 deletions(-) create mode 100644 test_autofit/non_linear/grid/test_sensitivity/test_results.py create mode 100644 test_autofit/non_linear/test_fit_sequential.py diff --git a/README.rst b/README.rst index 41506226d..f0b24a30c 100644 --- a/README.rst +++ b/README.rst @@ -24,6 +24,10 @@ PyAutoFit: Classy Probabilistic Programming `Introduction on Binder `_ | `HowToFit `_ + +.. + _ One day make these BOLD with a colon like my fellowsahip proposa,s where the first is Model Composition & Fitting: Tools for composing a complex model and fitting it with dynesty... + PyAutoFit is a Python based probabilistic programming language for the fully Bayesian analysis of extremely large datasets which: diff --git a/autofit/__init__.py b/autofit/__init__.py index 98f9e8e01..3036d1f9d 100644 --- a/autofit/__init__.py +++ b/autofit/__init__.py @@ -85,7 +85,7 @@ from .example.model import Gaussian from .text import formatter from .text import samples_text -from .interpolator import LinearInterpolator +from .interpolator import LinearInterpolator, SplineInterpolator from .tools import util @@ -109,4 +109,5 @@ def save_abc(pickler, obj): conf.instance.register(__file__) -__version__ = "2023.3.21.5" +__version__ = "2023.3.27.1" + diff --git a/autofit/config/general.yaml b/autofit/config/general.yaml index a217d5189..6440c29f7 100644 --- a/autofit/config/general.yaml +++ b/autofit/config/general.yaml @@ -3,6 +3,9 @@ analysis: hpc: hpc_mode: false # If True, use HPC mode, which disables GUI visualization, logging to screen and other settings which are not suited to running on a super computer. iterations_per_update: 5000 # The number of iterations between every update (visualization, results output, etc) in HPC mode. +inversion: + check_reconstruction: true # If True, the inversion's reconstruction is checked to ensure the solution of a meshs's mapper is not an invalid solution where the values are all the same. + reconstruction_vmax_factor: 0.5 # Plots of an Inversion's reconstruction use the reconstructed data's bright value multiplied by this factor. model: ignore_prior_limits: false # If ``True`` the limits applied to priors will be ignored, where limits set upper / lower limits. This stops PriorLimitException's from being raised. output: diff --git a/autofit/config/non_linear/optimize.yaml b/autofit/config/non_linear/optimize.yaml index 8cf3d0fcf..007c93c63 100644 --- a/autofit/config/non_linear/optimize.yaml +++ b/autofit/config/non_linear/optimize.yaml @@ -5,7 +5,7 @@ # - PySwarms: https://github.com/ljvmiranda921/pyswarms / https://pyswarms.readthedocs.io/en/latest/index.html # Settings in the [search], [run] and [options] entries are specific to each nested algorithm and should be -# determined by consulting that MCMC method's own readthedocs. +# determined by consulting that optimizers method's own readthedocs. PySwarmsGlobal: run: diff --git a/autofit/config/visualize/plots_search.yaml b/autofit/config/visualize/plots_search.yaml index a536bb7b0..bb312a35d 100644 --- a/autofit/config/visualize/plots_search.yaml +++ b/autofit/config/visualize/plots_search.yaml @@ -1,6 +1,6 @@ dynesty: - cornerplot: true - cornerpoints: false # Output Dynesty cornerplot figure during a non-linear search fit? + cornerplot: true # Output Dynesty cornerplot figure during a non-linear search fit? + cornerpoints: false # Output Dynesty cornerpoints figure during a non-linear search fit? runplot: true # Output Dynesty runplot figure during a non-linear search fit? traceplot: true # Output Dynesty traceplot figure during a non-linear search fit? emcee: diff --git a/autofit/database/model/fit.py b/autofit/database/model/fit.py index c1b6a18f9..1951a92ac 100644 --- a/autofit/database/model/fit.py +++ b/autofit/database/model/fit.py @@ -18,48 +18,26 @@ class Pickle(Base): def __init__(self, **kwargs): super().__init__(**kwargs) - id = sa.Column( - sa.Integer, - primary_key=True - ) + id = sa.Column(sa.Integer, primary_key=True) - name = sa.Column( - sa.String - ) - string = sa.Column( - sa.String - ) - fit_id = sa.Column( - sa.String, - sa.ForeignKey( - "fit.id" - ) - ) - fit = sa.orm.relationship( - "Fit", - uselist=False - ) + name = sa.Column(sa.String) + string = sa.Column(sa.String) + fit_id = sa.Column(sa.String, sa.ForeignKey("fit.id")) + fit = sa.orm.relationship("Fit", uselist=False) @property def value(self): """ The unpickled object """ - if isinstance( - self.string, - str - ): + if isinstance(self.string, str): return self.string - return pickle.loads( - self.string - ) + return pickle.loads(self.string) @value.setter def value(self, value): try: - self.string = pickle.dumps( - value - ) + self.string = pickle.dumps(value) except pickle.PicklingError: pass @@ -67,24 +45,13 @@ def value(self, value): class Info(Base): __tablename__ = "info" - id = sa.Column( - sa.Integer, - primary_key=True - ) + id = sa.Column(sa.Integer, primary_key=True) key = sa.Column(sa.String) value = sa.Column(sa.String) - fit_id = sa.Column( - sa.String, - sa.ForeignKey( - "fit.id" - ) - ) - fit = sa.orm.relationship( - "Fit", - uselist=False - ) + fit_id = sa.Column(sa.String, sa.ForeignKey("fit.id")) + fit = sa.orm.relationship("Fit", uselist=False) def try_none(func): @@ -101,24 +68,13 @@ def wrapper(*args, **kwargs): class NamedInstance(Base): __tablename__ = "named_instance" - id = sa.Column( - sa.Integer, - primary_key=True - ) + id = sa.Column(sa.Integer, primary_key=True) name = sa.Column(sa.String) - instance_id = sa.Column( - sa.Integer, - sa.ForeignKey( - "object.id" - ) - ) + instance_id = sa.Column(sa.Integer, sa.ForeignKey("object.id")) __instance = sa.orm.relationship( - "Object", - uselist=False, - backref="named_instance", - foreign_keys=[instance_id] + "Object", uselist=False, backref="named_instance", foreign_keys=[instance_id] ) @property @@ -131,20 +87,10 @@ def instance(self): @instance.setter def instance(self, instance): - self.__instance = Object.from_object( - instance - ) - - fit_id = sa.Column( - sa.String, - sa.ForeignKey( - "fit.id" - ) - ) - fit = sa.orm.relationship( - "Fit", - uselist=False - ) + self.__instance = Object.from_object(instance) + + fit_id = sa.Column(sa.String, sa.ForeignKey("fit.id")) + fit = sa.orm.relationship("Fit", uselist=False) # noinspection PyProtectedMember @@ -167,56 +113,36 @@ def __getitem__(self, item: str): Raises a KeyError if no such instance exists. """ - return self._get_named_instance( - item - ).instance + return self._get_named_instance(item).instance def __setitem__(self, key: str, value): """ Set an instance for a given name """ try: - named_instance = self._get_named_instance( - key - ) + named_instance = self._get_named_instance(key) except KeyError: - named_instance = NamedInstance( - name=key - ) - self.fit._named_instances.append( - named_instance - ) + named_instance = NamedInstance(name=key) + self.fit._named_instances.append(named_instance) named_instance.instance = value - def _get_named_instance( - self, - item: str - ) -> "NamedInstance": + def _get_named_instance(self, item: str) -> "NamedInstance": """ Retrieve a NamedInstance by its name. """ for named_instance in self.fit._named_instances: if named_instance.name == item: return named_instance - raise KeyError( - f"Instance {item} not found" - ) + raise KeyError(f"Instance {item} not found") class Fit(Base): __tablename__ = "fit" - id = sa.Column( - sa.String, - primary_key=True, - ) - is_complete = sa.Column( - sa.Boolean - ) + id = sa.Column(sa.String, primary_key=True,) + is_complete = sa.Column(sa.Boolean) - _named_instances: List[NamedInstance] = sa.orm.relationship( - "NamedInstance" - ) + _named_instances: List[NamedInstance] = sa.orm.relationship("NamedInstance") @property @try_none @@ -228,45 +154,23 @@ def instance(self): @instance.setter def instance(self, instance): - self.__instance = Object.from_object( - instance - ) + self.__instance = Object.from_object(instance) @property def named_instances(self): - return NamedInstancesWrapper( - self - ) + return NamedInstancesWrapper(self) - _info: List[Info] = sa.orm.relationship( - "Info" - ) + _info: List[Info] = sa.orm.relationship("Info") - def __init__( - self, - **kwargs - ): - super().__init__( - **kwargs - ) + def __init__(self, **kwargs): + super().__init__(**kwargs) - max_log_likelihood = sa.Column( - sa.Float - ) + max_log_likelihood = sa.Column(sa.Float) - parent_id = sa.Column( - sa.String, - sa.ForeignKey( - "fit.id" - ) - ) + parent_id = sa.Column(sa.String, sa.ForeignKey("fit.id")) children: List["Fit"] = sa.orm.relationship( - "Fit", - backref=sa.orm.backref( - 'parent', - remote_side=[id] - ) + "Fit", backref=sa.orm.backref("parent", remote_side=[id]) ) @property @@ -276,13 +180,9 @@ def best_fit(self) -> "Fit": the highest log likelihood. """ if not self.is_grid_search: - raise TypeError( - f"Fit {self.id} is not a grid search" - ) + raise TypeError(f"Fit {self.id} is not a grid search") if len(self.children) == 0: - raise TypeError( - f"Grid search fit {self.id} has no children" - ) + raise TypeError(f"Grid search fit {self.id} has no children") best_fit = None max_log_likelihood = float("-inf") @@ -294,26 +194,14 @@ def best_fit(self) -> "Fit": return best_fit - is_grid_search = sa.Column( - sa.Boolean - ) + is_grid_search = sa.Column(sa.Boolean) - unique_tag = sa.Column( - sa.String - ) - name = sa.Column( - sa.String - ) - path_prefix = sa.Column( - sa.String - ) + unique_tag = sa.Column(sa.String) + name = sa.Column(sa.String) + path_prefix = sa.Column(sa.String) _samples = sa.orm.relationship( - Object, - uselist=False, - foreign_keys=[ - Object.samples_for_id - ] + Object, uselist=False, foreign_keys=[Object.samples_for_id] ) @property @@ -323,29 +211,16 @@ def samples(self) -> Samples: @samples.setter def samples(self, samples): - self._samples = Object.from_object( - samples - ) + self._samples = Object.from_object(samples) @property def info(self): - return { - info.key: info.value - for info - in self._info - } + return {info.key: info.value for info in self._info} @info.setter def info(self, info): if info is not None: - self._info = [ - Info( - key=key, - value=value - ) - for key, value - in info.items() - ] + self._info = [Info(key=key, value=value) for key, value in info.items()] @property @try_none @@ -357,14 +232,9 @@ def model(self) -> AbstractPriorModel: @model.setter def model(self, model: AbstractPriorModel): - self.__model = Object.from_object( - model - ) + self.__model = Object.from_object(model) - pickles: List[Pickle] = sa.orm.relationship( - "Pickle", - lazy="joined" - ) + pickles: List[Pickle] = sa.orm.relationship("Pickle", lazy="joined") def __getitem__(self, item: str): """ @@ -385,10 +255,7 @@ def __getitem__(self, item: str): for p in self.pickles: if p.name == item: return p.value - return getattr( - self, - item - ) + return getattr(self, item) def __contains__(self, item): for p in self.pickles: @@ -396,11 +263,7 @@ def __contains__(self, item): return True return False - def __setitem__( - self, - key: str, - value - ): + def __setitem__(self, key: str, value): """ Add a pickle. @@ -414,32 +277,15 @@ def __setitem__( value A string, bytes or object """ - new = Pickle( - name=key - ) - if isinstance( - value, - (str, bytes) - ): + new = Pickle(name=key) + if isinstance(value, (str, bytes)): new.string = value else: new.value = value - self.pickles = [ - p - for p - in self.pickles - if p.name != key - ] + [ - new - ] + self.pickles = [p for p in self.pickles if p.name != key] + [new] def __delitem__(self, key): - self.pickles = [ - p - for p - in self.pickles - if p.name != key - ] + self.pickles = [p for p in self.pickles if p.name != key] def value(self, name: str): try: @@ -447,38 +293,20 @@ def value(self, name: str): except AttributeError: return None - model_id = sa.Column( - sa.Integer, - sa.ForeignKey( - "object.id" - ) - ) + model_id = sa.Column(sa.Integer, sa.ForeignKey("object.id")) __model = sa.orm.relationship( - "Object", - uselist=False, - backref="fit_model", - foreign_keys=[model_id] + "Object", uselist=False, backref="fit_model", foreign_keys=[model_id] ) - instance_id = sa.Column( - sa.Integer, - sa.ForeignKey( - "object.id" - ) - ) + instance_id = sa.Column(sa.Integer, sa.ForeignKey("object.id")) __instance = sa.orm.relationship( - "Object", - uselist=False, - backref="fit_instance", - foreign_keys=[instance_id] + "Object", uselist=False, backref="fit_instance", foreign_keys=[instance_id] ) @classmethod def all(cls, session): - return session.query( - cls - ).all() + return session.query(cls).all() def __str__(self): return self.id diff --git a/autofit/example/analysis.py b/autofit/example/analysis.py index b0bee1e46..5c3d90cf7 100644 --- a/autofit/example/analysis.py +++ b/autofit/example/analysis.py @@ -44,13 +44,16 @@ def log_likelihood_function(self, instance: af.ModelInstance) -> float: """ xvalues = np.arange(self.data.shape[0]) + model_data_1d = np.zeros(self.data.shape[0]) try: - model_data_1d = sum( - profile.model_data_1d_via_xvalues_from(xvalues=xvalues) for profile in instance - ) + for profile in instance: + try: + model_data_1d += profile.model_data_1d_via_xvalues_from(xvalues=xvalues) + except AttributeError: + pass except TypeError: - model_data_1d = instance.model_data_1d_via_xvalues_from(xvalues=xvalues) + model_data_1d += instance.model_data_1d_via_xvalues_from(xvalues=xvalues) residual_map = self.data - model_data_1d chi_squared_map = (residual_map / self.noise_map) ** 2.0 @@ -83,13 +86,16 @@ def visualize(self, paths: af.DirectoryPaths, instance: af.ModelInstance, during """ xvalues = np.arange(self.data.shape[0]) + model_data_1d = np.zeros(self.data.shape[0]) try: - model_data_1d = sum( - profile.model_data_1d_via_xvalues_from(xvalues=xvalues) for profile in instance - ) + for profile in instance: + try: + model_data_1d += profile.model_data_1d_via_xvalues_from(xvalues=xvalues) + except AttributeError: + pass except TypeError: - model_data_1d = instance.model_data_1d_via_xvalues_from(xvalues=xvalues) + model_data_1d += instance.model_data_1d_via_xvalues_from(xvalues=xvalues) plt.errorbar( x=xvalues, diff --git a/autofit/example/model.py b/autofit/example/model.py index 936b59d15..eb18c9b2c 100644 --- a/autofit/example/model.py +++ b/autofit/example/model.py @@ -1,7 +1,8 @@ import math -import numpy as np from typing import Dict +import numpy as np + from autoconf.dictable import Dictable """ @@ -17,9 +18,9 @@ class Gaussian(Dictable): def __init__( self, - centre:float=0.0, # <- PyAutoFit recognises these constructor arguments - normalization:float=0.1, # <- are the Gaussian`s model parameters. - sigma:float=0.01, + centre: float = 0.0, # <- PyAutoFit recognises these constructor arguments + normalization: float = 0.1, # <- are the Gaussian`s model parameters. + sigma: float = 0.01, ): """ Represents a 1D `Gaussian` profile, which may be treated as a model-component of PyAutoFit the @@ -38,7 +39,7 @@ def __init__( self.normalization = normalization self.sigma = sigma - def model_data_1d_via_xvalues_from(self, xvalues:np.ndarray) -> np.ndarray: + def model_data_1d_via_xvalues_from(self, xvalues: np.ndarray) -> np.ndarray: """ Calculate the normalization of the profile on a 1D grid of Cartesian x coordinates. @@ -56,7 +57,7 @@ def model_data_1d_via_xvalues_from(self, xvalues:np.ndarray) -> np.ndarray: np.exp(-0.5 * np.square(np.divide(transformed_xvalues, self.sigma))), ) - def __call__(self, xvalues:np.ndarray) -> np.ndarray: + def __call__(self, xvalues: np.ndarray) -> np.ndarray: """ For certain graphical models, the `__call__` function is overwritten for producing the model-fit. We include this here so these examples work, but it should not be important for most PyAutoFit users. @@ -82,33 +83,24 @@ def dict(self) -> Dict: """ return super().dict() - def inverse( - self, - y - ): + def inverse(self, y): """ For graphical models, the inverse of the Gaussian is used to test certain aspects of the calculation. """ - a = self.normalization / ( - y * self.sigma * math.sqrt(2 * math.pi) - ) + a = self.normalization / (y * self.sigma * math.sqrt(2 * math.pi)) - b = 2 * math.log( - a - ) - - return self.centre + self.sigma * math.sqrt( - b - ) + b = 2 * math.log(a) + + return self.centre + self.sigma * math.sqrt(b) class Exponential(Dictable): def __init__( self, - centre:float=0.0, # <- PyAutoFit recognises these constructor arguments are the model - normalization:float=0.1, # <- parameters of the Gaussian. - rate:float=0.01, + centre: float = 0.0, # <- PyAutoFit recognises these constructor arguments are the model + normalization: float = 0.1, # <- parameters of the Gaussian. + rate: float = 0.01, ): """ Represents a 1D Exponential profile, which may be treated as a model-component of PyAutoFit the @@ -127,7 +119,7 @@ def __init__( self.normalization = normalization self.rate = rate - def model_data_1d_via_xvalues_from(self, xvalues:np.ndarray) -> np.ndarray: + def model_data_1d_via_xvalues_from(self, xvalues: np.ndarray) -> np.ndarray: """ Calculate the 1D Gaussian profile on a 1D grid of Cartesian x coordinates. @@ -143,7 +135,7 @@ def model_data_1d_via_xvalues_from(self, xvalues:np.ndarray) -> np.ndarray: self.rate, np.exp(-1.0 * self.rate * abs(transformed_xvalues)) ) - def __call__(self, xvalues:np.ndarray) -> np.ndarray: + def __call__(self, xvalues: np.ndarray) -> np.ndarray: """ Calculate the 1D Gaussian profile on a 1D grid of Cartesian x coordinates. @@ -169,4 +161,4 @@ def dict(self) -> Dict: The `Gaussian` type and model parameters as a dictionary. """ - return super().dict() \ No newline at end of file + return super().dict() diff --git a/autofit/graphical/laplace/newton.py b/autofit/graphical/laplace/newton.py index d667ca2eb..b65f6eea2 100644 --- a/autofit/graphical/laplace/newton.py +++ b/autofit/graphical/laplace/newton.py @@ -20,6 +20,7 @@ def gradient_ascent(state: OptimisationState, **kwargs) -> VariableData: def newton_direction(state: OptimisationState, **kwargs) -> VariableData: return state.hessian.ldiv(state.gradient) + def newton_abs_direction(state: OptimisationState, d=1e-6, **kwargs) -> VariableData: posdef = state.hessian.abs().diagonalupdate(state.parameters.full_like(d)) return posdef.ldiv(state.gradient) @@ -34,7 +35,7 @@ def newton_abs_direction(state: OptimisationState, d=1e-6, **kwargs) -> Variable def sr1_update( - state1: OptimisationState, state: OptimisationState, mintol=1e-8, **kwargs + state1: OptimisationState, state: OptimisationState, mintol=1e-8, **kwargs ) -> OptimisationState: yk = VariableData.sub(state1.gradient, state.gradient) dk = VariableData.sub(state1.parameters, state.parameters) @@ -57,7 +58,7 @@ def sr1_update( def diag_sr1_update( - state1: OptimisationState, state: OptimisationState, tol=1e-8, **kwargs + state1: OptimisationState, state: OptimisationState, tol=1e-8, **kwargs ) -> OptimisationState: yk = VariableData.sub(state1.gradient, state.gradient) dk = VariableData.sub(state1.parameters, state.parameters) @@ -76,7 +77,7 @@ def diag_sr1_update( def diag_sr1_update_( - state1: OptimisationState, state: OptimisationState, tol=1e-8, **kwargs + state1: OptimisationState, state: OptimisationState, tol=1e-8, **kwargs ) -> OptimisationState: yk = VariableData.sub(state1.gradient, state.gradient) dk = VariableData.sub(state1.parameters, state.parameters) @@ -99,7 +100,7 @@ def diag_sr1_update_( def diag_sr1_bfgs_update( - state1: OptimisationState, state: OptimisationState, **kwargs + state1: OptimisationState, state: OptimisationState, **kwargs ) -> OptimisationState: yk = VariableData.sub(state1.gradient, state.gradient) dk = VariableData.sub(state1.parameters, state.parameters) @@ -109,9 +110,7 @@ def diag_sr1_bfgs_update( def bfgs1_update( - state1: OptimisationState, - state: OptimisationState, - **kwargs, + state1: OptimisationState, state: OptimisationState, **kwargs, ) -> OptimisationState: """ y_k = g_{k+1} - g{k} @@ -139,9 +138,7 @@ def bfgs1_update( def bfgs_update( - state1: OptimisationState, - state: OptimisationState, - **kwargs, + state1: OptimisationState, state: OptimisationState, **kwargs, ) -> OptimisationState: yk = VariableData.sub(state1.gradient, state.gradient) dk = VariableData.sub(state1.parameters, state.parameters) @@ -158,9 +155,7 @@ def bfgs_update( def quasi_deterministic_update( - state1: OptimisationState, - state: OptimisationState, - **kwargs, + state1: OptimisationState, state: OptimisationState, **kwargs, ) -> OptimisationState: dk = VariableData.sub(state1.parameters, state.parameters) zk = VariableData.sub( @@ -179,9 +174,7 @@ def quasi_deterministic_update( def diag_quasi_deterministic_update( - state1: OptimisationState, - state: OptimisationState, - **kwargs, + state1: OptimisationState, state: OptimisationState, **kwargs, ) -> OptimisationState: dk = VariableData.sub(state1.parameters, state.parameters) zk = VariableData.sub( @@ -191,7 +184,7 @@ def diag_quasi_deterministic_update( zk2 = zk ** 2 zk4 = (zk2 ** 2).sum() alpha = (dk.dot(Bxk.dot(dk)) - zk.dot(Bzk.dot(zk))) / zk4 - state1.det_hessian = Bzk.diagonalupdate(alpha * zk2) + state1.det_hessian = Bzk.diagonalupdate(float(alpha) * zk2) return state1 @@ -202,10 +195,7 @@ def __init__(self, quasi_newton_update, det_quasi_newton_update): self.det_quasi_newton_update = det_quasi_newton_update def __call__( - self, - state1: OptimisationState, - state: OptimisationState, - **kwargs, + self, state1: OptimisationState, state: OptimisationState, **kwargs, ) -> OptimisationState: # Only update estimate if a step has been taken @@ -225,28 +215,28 @@ def __call__( def take_step( - state: OptimisationState, - old_state: Optional[OptimisationState] = None, - *, - search_direction=newton_abs_direction, - calc_line_search=line_search, - search_direction_kws: Optional[Dict[str, Any]] = None, - line_search_kws: Optional[Dict[str, Any]] = None, + state: OptimisationState, + old_state: Optional[OptimisationState] = None, + *, + search_direction=newton_abs_direction, + calc_line_search=line_search, + search_direction_kws: Optional[Dict[str, Any]] = None, + line_search_kws: Optional[Dict[str, Any]] = None, ) -> Tuple[Optional[float], OptimisationState]: state.search_direction = search_direction(state, **(search_direction_kws or {})) return calc_line_search(state, old_state, **(line_search_kws or {})) def take_quasi_newton_step( - state: OptimisationState, - old_state: Optional[OptimisationState] = None, - *, - search_direction=newton_abs_direction, - calc_line_search=line_search, - quasi_newton_update=full_bfgs_update, - search_direction_kws: Optional[Dict[str, Any]] = None, - line_search_kws: Optional[Dict[str, Any]] = None, - quasi_newton_kws: Optional[Dict[str, Any]] = None, + state: OptimisationState, + old_state: Optional[OptimisationState] = None, + *, + search_direction=newton_abs_direction, + calc_line_search=line_search, + quasi_newton_update=full_bfgs_update, + search_direction_kws: Optional[Dict[str, Any]] = None, + line_search_kws: Optional[Dict[str, Any]] = None, + quasi_newton_kws: Optional[Dict[str, Any]] = None, ) -> Tuple[Optional[float], OptimisationState]: """ """ state.search_direction = search_direction(state, **(search_direction_kws or {})) @@ -314,7 +304,7 @@ def ngev_condition(state, old_state, maxgev=10000, **kwargs): def check_stop_conditions( - stepsize, state, old_state, stop_conditions, **stop_kws + stepsize, state, old_state, stop_conditions, **stop_kws ) -> Optional[Tuple[bool, str]]: if stepsize is None: return False, "abnormal termination of line search" @@ -328,20 +318,20 @@ def check_stop_conditions( def optimise_quasi_newton( - state: OptimisationState, - old_state: Optional[OptimisationState] = None, - *, - max_iter=100, - search_direction=newton_abs_direction, - calc_line_search=line_search, - quasi_newton_update=bfgs_update, - stop_conditions=stop_conditions, - search_direction_kws: Optional[Dict[str, Any]] = None, - line_search_kws: Optional[Dict[str, Any]] = None, - quasi_newton_kws: Optional[Dict[str, Any]] = None, - stop_kws: Optional[Dict[str, Any]] = None, - callback: Optional[_OPT_CALLBACK] = None, - **kwargs, + state: OptimisationState, + old_state: Optional[OptimisationState] = None, + *, + max_iter=100, + search_direction=newton_abs_direction, + calc_line_search=line_search, + quasi_newton_update=bfgs_update, + stop_conditions=stop_conditions, + search_direction_kws: Optional[Dict[str, Any]] = None, + line_search_kws: Optional[Dict[str, Any]] = None, + quasi_newton_kws: Optional[Dict[str, Any]] = None, + stop_kws: Optional[Dict[str, Any]] = None, + callback: Optional[_OPT_CALLBACK] = None, + **kwargs, ) -> Tuple[OptimisationState, Status]: success = True updated = False @@ -356,7 +346,9 @@ def optimise_quasi_newton( success, message = stop break - with LogWarnings(logger=_log_projection_warnings, action='always') as caught_warnings: + with LogWarnings( + logger=_log_projection_warnings, action="always" + ) as caught_warnings: try: stepsize, state1 = take_quasi_newton_step( state, diff --git a/autofit/interpolator.py b/autofit/interpolator.py index b291243b9..a5481c60a 100644 --- a/autofit/interpolator.py +++ b/autofit/interpolator.py @@ -1,9 +1,11 @@ import copy from abc import ABC, abstractmethod +from scipy.interpolate import CubicSpline from typing import List, Dict, cast from scipy.stats import stats +from autoconf.dictable import Dictable from autofit.mapper.model import ModelInstance @@ -76,7 +78,7 @@ def __init__(self, path: InterpolatorPath, value: float): self.value = value -class AbstractInterpolator(ABC): +class AbstractInterpolator(Dictable, ABC): def __init__(self, instances: List[ModelInstance]): """ A TimeSeries allows interpolation on any variable. @@ -207,3 +209,14 @@ class LinearInterpolator(AbstractInterpolator): def _interpolate(x, y, value): slope, intercept, r, p, std_err = stats.linregress(x, y) return slope * value + intercept + + +class SplineInterpolator(AbstractInterpolator): + """ + Interpolate data with a piecewise cubic polynomial which is twice continuously differentiable + """ + + @staticmethod + def _interpolate(x, y, value): + f = CubicSpline(x, y) + return f(value) diff --git a/autofit/mapper/model.py b/autofit/mapper/model.py index 867c07664..ffa12f513 100644 --- a/autofit/mapper/model.py +++ b/autofit/mapper/model.py @@ -221,7 +221,35 @@ def direct_tuples_with_type(self, class_type): ) @frozen_cache - def model_tuples_with_type(self, cls, include_zero_dimension=False): + def models_with_type( + self, cls: Union[Type, Tuple[Type, ...]], include_zero_dimension=False, + ) -> List["AbstractModel"]: + """ + Return all models of a given type in the model tree. + + Parameters + ---------- + cls + The type to find instances of + include_zero_dimension + If true, include models with zero dimensions + + Returns + ------- + A list of models of the given type + """ + # noinspection PyTypeChecker + return [ + t[1] + for t in self.model_tuples_with_type( + cls, include_zero_dimension=include_zero_dimension + ) + ] + + @frozen_cache + def model_tuples_with_type( + self, cls: Union[Type, Tuple[Type, ...]], include_zero_dimension=False + ): """ All models of the class in this model which have at least one free parameter, recursively. @@ -241,7 +269,9 @@ def model_tuples_with_type(self, cls, include_zero_dimension=False): return [ (path, model) - for path, model in self.attribute_tuples_with_type(Model) + for path, model in self.attribute_tuples_with_type( + Model, ignore_children=False + ) if issubclass(model.cls, cls) and (include_zero_dimension or model.prior_count > 0) ] @@ -339,14 +369,11 @@ class ModelInstance(AbstractModel): @DynamicAttrs """ - def __init__(self, items=None): + __dictable_type__ = "instance" + + def __init__(self, child_items=None): super().__init__() - if isinstance(items, list): - for i, item in enumerate(items): - self[i] = item - if isinstance(items, dict): - for key, value in items.items(): - self[key] = value + self.child_items = child_items def __eq__(self, other): return self.__dict__ == other.__dict__ @@ -361,6 +388,19 @@ def __getitem__(self, item): def __setitem__(self, key, value): self.__dict__[key] = value + @property + def child_items(self): + return self.dict + + @child_items.setter + def child_items(self, child_items): + if isinstance(child_items, list): + for i, item in enumerate(child_items): + self[i] = item + if isinstance(child_items, dict): + for key, value in child_items.items(): + self[key] = value + def items(self): return self.dict.items() @@ -382,8 +422,26 @@ def values(self): def __len__(self): return len(self.values()) - def as_model(self, model_classes=tuple()): + def as_model( + self, + model_classes: Union[type, Iterable[type]] = tuple(), + excluded_classes: Union[type, Iterable[type]] = tuple(), + ): + """ + Convert this instance to a model + + Parameters + ---------- + model_classes + The classes to convert to models + excluded_classes + The classes to exclude from conversion + + Returns + ------- + A model + """ from autofit.mapper.prior_model.abstract import AbstractPriorModel - return AbstractPriorModel.from_instance(self, model_classes) + return AbstractPriorModel.from_instance(self, model_classes, excluded_classes,) diff --git a/autofit/mapper/model_object.py b/autofit/mapper/model_object.py index 19b01e116..25557fec6 100644 --- a/autofit/mapper/model_object.py +++ b/autofit/mapper/model_object.py @@ -48,7 +48,7 @@ def replacing_for_path(self, path: Tuple[str, ...], value) -> "ModelObject": new = copy.deepcopy(self) obj = new for key in path[:-1]: - obj = getattr(new, key) + obj = getattr(obj, key) setattr(obj, path[-1], value) return new @@ -117,13 +117,22 @@ def from_dict(d): instance = Model(get_class(d.pop("class_path"))) elif type_ == "collection": instance = Collection() - elif type_ == "instance": - cls = get_class(d.pop("class_path")) - instance = object.__new__(cls) elif type_ == "tuple_prior": instance = TuplePrior() + elif type_ == "dict": + return {key: ModelObject.from_dict(value) for key, value in d.items()} + elif type_ == "instance": + d.pop("type") + cls = get_class(d.pop("class_path")) + return cls( + **{key: ModelObject.from_dict(value) for key, value in d.items()} + ) else: - return Prior.from_dict(d) + try: + return Prior.from_dict(d) + except KeyError: + cls = get_class(type_) + instance = object.__new__(cls) d.pop("type") diff --git a/autofit/mapper/prior_model/abstract.py b/autofit/mapper/prior_model/abstract.py index 14ea28aec..9f337972b 100644 --- a/autofit/mapper/prior_model/abstract.py +++ b/autofit/mapper/prior_model/abstract.py @@ -444,9 +444,7 @@ def assert_no_assertions(obj): try: item = copy.copy(source) if isinstance(item, dict): - from autofit.mapper.prior_model.collection import ( - Collection, - ) + from autofit.mapper.prior_model.collection import Collection item = Collection(item) for attribute in path: @@ -1008,13 +1006,20 @@ def random_instance(self, ignore_prior_limits=False): @staticmethod @DynamicRecursionCache() - def from_instance(instance, model_classes=tuple()): + def from_instance( + instance, + model_classes: Union[type, Iterable[type]] = tuple(), + exclude_classes: Union[type, Iterable[type]] = tuple(), + ): """ - Recursively create an prior object model from an object model. + Recursively create a prior object model from an object model. Parameters ---------- model_classes + A tuple of classes that should be converted to a prior model + exclude_classes + A tuple of classes that should not be converted to a prior model instance A dictionary, list, class instance or model instance Returns @@ -1024,12 +1029,18 @@ def from_instance(instance, model_classes=tuple()): """ from autofit.mapper.prior_model import collection + if isinstance(instance, exclude_classes): + return instance if isinstance(instance, (Prior, AbstractPriorModel)): return instance elif isinstance(instance, list): result = collection.Collection( [ - AbstractPriorModel.from_instance(item, model_classes=model_classes) + AbstractPriorModel.from_instance( + item, + model_classes=model_classes, + exclude_classes=exclude_classes, + ) for item in instance ] ) @@ -1042,14 +1053,18 @@ def from_instance(instance, model_classes=tuple()): result, key, AbstractPriorModel.from_instance( - value, model_classes=model_classes + value, + model_classes=model_classes, + exclude_classes=exclude_classes, ), ) elif isinstance(instance, dict): result = collection.Collection( { key: AbstractPriorModel.from_instance( - value, model_classes=model_classes + value, + model_classes=model_classes, + exclude_classes=exclude_classes, ) for key, value in instance.items() } @@ -1064,7 +1079,9 @@ def from_instance(instance, model_classes=tuple()): instance.__class__, **{ key: AbstractPriorModel.from_instance( - value, model_classes=model_classes + value, + model_classes=model_classes, + exclude_classes=exclude_classes, ) for key, value in instance.__dict__.items() if key != "cls" @@ -1072,7 +1089,7 @@ def from_instance(instance, model_classes=tuple()): ) except AttributeError: return instance - if any([isinstance(instance, cls) for cls in model_classes]): + if isinstance(instance, model_classes): return result.as_model() return result diff --git a/autofit/non_linear/abstract_search.py b/autofit/non_linear/abstract_search.py index 1411e67dc..37d319311 100644 --- a/autofit/non_linear/abstract_search.py +++ b/autofit/non_linear/abstract_search.py @@ -22,6 +22,7 @@ FactorApproximation, ) from autofit.graphical.utils import Status +from autofit.mapper.prior_model.collection import Collection from autofit.non_linear.initializer import Initializer from autofit.non_linear.parallel import SneakyPool from autofit.non_linear.paths.abstract import AbstractPaths @@ -30,6 +31,8 @@ from autofit.non_linear.result import Result from autofit.non_linear.timer import Timer from .analysis import Analysis +from .analysis.combined import CombinedResult +from .analysis.indexed import IndexCollectionAnalysis from .paths.null import NullPaths from ..graphical.declarative.abstract import PriorFactor from ..graphical.expectation_propagation import AbstractFactorOptimiser @@ -445,6 +448,75 @@ def resample_figure_of_merit(self): """ return -np.inf + def fit_sequential( + self, + model, + analysis: IndexCollectionAnalysis, + info=None, + pickle_files=None, + log_likelihood_cap=None, + ) -> CombinedResult: + """ + Fit multiple analyses contained within the analysis sequentially. + + This can be useful for avoiding very high dimensional parameter spaces. + + Parameters + ---------- + log_likelihood_cap + analysis + Multiple analyses that are fit sequentially + model + An object that represents possible instances of some model with a + given dimensionality which is the number of free dimensions of the + model. + info + Optional dictionary containing information about the fit that can be loaded by the aggregator. + pickle_files : [str] + Optional list of strings specifying the path and filename of .pickle files, that are copied to each + model-fits pickles folder so they are accessible via the Aggregator. + + Returns + ------- + An object combining the results of each individual optimisation. + + Raises + ------ + AssertionError + If the model has 0 dimensions. + ValueError + If the analysis is not a combined analysis + """ + results = [] + + _paths = self.paths + original_name = self.paths.name or "analysis" + + model = analysis.modify_model(model=model) + + try: + if not isinstance(model, Collection): + model = [model for _ in range(len(analysis.analyses))] + except AttributeError: + raise ValueError( + f"Analysis with type {type(analysis)} is not supported by fit_sequential" + ) + + for i, (model, analysis) in enumerate(zip(model, analysis.analyses)): + self.paths = copy.copy(_paths) + self.paths.name = f"{original_name}/{i}" + results.append( + self.fit( + model=model, + analysis=analysis, + info=info, + pickle_files=pickle_files, + log_likelihood_cap=log_likelihood_cap, + ) + ) + self.paths = _paths + return CombinedResult(results) + def fit( self, model, diff --git a/autofit/non_linear/analysis/combined.py b/autofit/non_linear/analysis/combined.py index dcdff72d9..4bb792222 100644 --- a/autofit/non_linear/analysis/combined.py +++ b/autofit/non_linear/analysis/combined.py @@ -1,5 +1,5 @@ import logging -from typing import Union +from typing import Union, List from autoconf import conf from autofit.mapper.prior.abstract import Prior @@ -13,6 +13,39 @@ logger = logging.getLogger(__name__) +class CombinedResult: + def __init__(self, results: List[Result]): + """ + A `Result` object that is composed of multiple `Result` objects. This is used to combine the results of + multiple `Analysis` objects into a single `Result` object, for example when performing a model-fitting + analysis where there are multiple datasets. + + Parameters + ---------- + results + The list of `Result` objects that are combined into this `CombinedResult` object. + """ + self.child_results = results + + def __getattr__(self, item: str): + """ + Get an attribute of the first `Result` object in the list of `Result` objects. + """ + return getattr(self.child_results[0], item) + + def __iter__(self): + return iter(self.child_results) + + def __len__(self): + return len(self.child_results) + + def __getitem__(self, item: int) -> Result: + """ + Get a `Result` object from the list of `Result` objects. + """ + return self.child_results[item] + + class CombinedAnalysis(Analysis): def __new__(cls, *analyses, **kwargs): from .model_analysis import ModelAnalysis, CombinedModelAnalysis @@ -199,15 +232,15 @@ def func(child_paths, analysis): def make_result(self, samples, model, sigma=1.0, use_errors=True, use_widths=False): child_results = [ analysis.make_result( - samples, model, sigma=1.0, use_errors=True, use_widths=False + samples, + model, + sigma=sigma, + use_errors=use_errors, + use_widths=use_widths, ) for analysis in self.analyses ] - result = self.analyses[0].make_result( - samples=samples, model=model, sigma=1.0, use_errors=True, use_widths=False - ) - result.child_results = child_results - return result + return CombinedResult(child_results) def __len__(self): return len(self.analyses) diff --git a/autofit/non_linear/analysis/indexed.py b/autofit/non_linear/analysis/indexed.py index fd56a148f..da7f9200d 100644 --- a/autofit/non_linear/analysis/indexed.py +++ b/autofit/non_linear/analysis/indexed.py @@ -1,7 +1,7 @@ import logging from .analysis import Analysis -from .combined import CombinedAnalysis +from .combined import CombinedAnalysis, CombinedResult from ..paths.abstract import AbstractPaths from autofit.mapper.prior_model.collection import Collection @@ -80,15 +80,7 @@ def make_result(self, samples, model, sigma=1.0, use_errors=True, use_widths=Fal ) for model, analysis in zip(model, self.analyses) ] - result = self.analyses[0].make_result( - samples=samples, - model=model, - sigma=sigma, - use_errors=use_errors, - use_widths=use_widths, - ) - result.child_results = child_results - return result + return CombinedResult(child_results) def modify_before_fit(self, paths: AbstractPaths, model: Collection): """ diff --git a/autofit/non_linear/grid/sensitivity.py b/autofit/non_linear/grid/sensitivity.py index 99a86d6eb..eaefc20dc 100644 --- a/autofit/non_linear/grid/sensitivity.py +++ b/autofit/non_linear/grid/sensitivity.py @@ -18,12 +18,7 @@ class JobResult(AbstractJobResult): - def __init__( - self, - number: int, - result: Result, - perturbed_result: Result - ): + def __init__(self, number: int, result: Result, perturbed_result: Result): """ The result of a single sensitivity comparison @@ -55,14 +50,14 @@ class Job(AbstractJob): use_instance = False def __init__( - self, - analysis_factory: "AnalysisFactory", - model: AbstractPriorModel, - perturbation_model: AbstractPriorModel, - base_instance: ModelInstance, - perturbation_instance: ModelInstance, - search: NonLinearSearch, - number: int, + self, + analysis_factory: "AnalysisFactory", + model: AbstractPriorModel, + perturbation_model: AbstractPriorModel, + base_instance: ModelInstance, + perturbation_instance: ModelInstance, + search: NonLinearSearch, + number: int, ): """ Job to run non-linear searches comparing how well a model and a model with a perturbation @@ -79,9 +74,7 @@ def __init__( search A non-linear search """ - super().__init__( - number=number - ) + super().__init__(number=number) self.analysis_factory = analysis_factory self.model = model @@ -90,15 +83,9 @@ def __init__( self.base_instance = base_instance self.perturbation_instance = perturbation_instance - self.search = search.copy_with_paths( - search.paths.for_sub_analysis( - "[base]", - ) - ) + self.search = search.copy_with_paths(search.paths.for_sub_analysis("[base]",)) self.perturbed_search = search.copy_with_paths( - search.paths.for_sub_analysis( - "[perturbed]", - ) + search.paths.for_sub_analysis("[perturbed]",) ) @cached_property @@ -126,27 +113,26 @@ def perform(self) -> JobResult: perturbed_result = self.perturbation_model_func(perturbed_model=perturbed_model) return JobResult( - number=self.number, - result=result, - perturbed_result=perturbed_result + number=self.number, result=result, perturbed_result=perturbed_result ) def base_model_func(self): - return self.search.fit( - model=self.model, - analysis=self.analysis - ) + return self.search.fit(model=self.model, analysis=self.analysis) def perturbation_model_func(self, perturbed_model): - return self.perturbed_search.fit( - model=perturbed_model, - analysis=self.analysis - ) + return self.perturbed_search.fit(model=perturbed_model, analysis=self.analysis) class SensitivityResult: - def __init__(self, results: List[JobResult]): + """ + The result of a sensitivity mapping + + Parameters + ---------- + results + The results of each sensitivity job + """ self.results = sorted(results) def __getitem__(self, item): @@ -158,21 +144,41 @@ def __iter__(self): def __len__(self): return len(self.results) + @property + def log_likelihoods_base(self) -> List[float]: + """ + The log likelihoods of the base model for each sensitivity fit + """ + return [result.log_likelihood_base for result in self.results] -class Sensitivity: + @property + def log_likelihoods_perturbed(self) -> List[float]: + """ + The log likelihoods of the perturbed model for each sensitivity fit + """ + return [result.log_likelihood_perturbed for result in self.results] + @property + def log_likelihood_differences(self) -> List[float]: + """ + The log likelihood differences between the base and perturbed models + """ + return [result.log_likelihood_difference for result in self.results] + + +class Sensitivity: def __init__( - self, - base_model: AbstractPriorModel, - perturbation_model: AbstractPriorModel, - simulation_instance, - simulate_function: Callable, - analysis_class: Type[Analysis], - search: NonLinearSearch, - job_cls: ClassVar = Job, - number_of_steps: Union[Tuple[int], int] = 4, - number_of_cores: int = 2, - limit_scale: int = 1, + self, + base_model: AbstractPriorModel, + perturbation_model: AbstractPriorModel, + simulation_instance, + simulate_function: Callable, + analysis_class: Type[Analysis], + search: NonLinearSearch, + job_cls: ClassVar = Job, + number_of_steps: Union[Tuple[int], int] = 4, + number_of_cores: int = 2, + limit_scale: int = 1, ): """ Perform sensitivity mapping to evaluate whether a perturbation @@ -212,9 +218,7 @@ def __init__( A scale of 0.5 means priors have limits smaller than the grid square with width half a grid square. """ - self.logger = logging.getLogger( - f"Sensitivity ({search.name})" - ) + self.logger = logging.getLogger(f"Sensitivity ({search.name})") self.logger.info("Creating") @@ -243,7 +247,9 @@ def step_size(self): The size of a step in any given dimension in hyper space. """ if isinstance(self.number_of_steps, tuple): - return tuple([1 / number_of_steps for number_of_steps in self.number_of_steps]) + return tuple( + [1 / number_of_steps for number_of_steps in self.number_of_steps] + ) return 1 / self.number_of_steps def run(self) -> SensitivityResult: @@ -253,19 +259,20 @@ def run(self) -> SensitivityResult: """ self.logger.info("Running") + self.search.paths.save_unique_tag(is_grid_search=True) + headers = [ "index", *self._headers, "log_likelihood_base", "log_likelihood_perturbed", - "log_likelihood_difference" + "log_likelihood_difference", ] physical_values = list(self._physical_values) results = list() for result in Process.run_jobs( - self._make_jobs(), - number_of_cores=self.number_of_cores + self._make_jobs(), number_of_cores=self.number_of_cores ): if isinstance(result, Exception): raise result @@ -273,17 +280,12 @@ def run(self) -> SensitivityResult: results.append(result) results = sorted(results) - os.makedirs( - self.search.paths.output_path, - exist_ok=True - ) + os.makedirs(self.search.paths.output_path, exist_ok=True) with open(self.results_path, "w+") as f: writer = csv.writer(f) writer.writerow(headers) for result_ in results: - values = physical_values[ - result_.number - ] + values = physical_values[result_.number] writer.writerow( padding(item) for item in [ @@ -292,15 +294,18 @@ def run(self) -> SensitivityResult: result_.log_likelihood_base, result_.log_likelihood_perturbed, result_.log_likelihood_difference, - ]) + ] + ) + + result = SensitivityResult(results) + + self.search.paths.save_object("result", result) return SensitivityResult(results) @property def results_path(self): - return Path( - self.search.paths.output_path - ) / "results.csv" + return Path(self.search.paths.output_path) / "results.csv" @property def _lists(self) -> List[List[float]]: @@ -309,10 +314,7 @@ def _lists(self) -> List[List[float]]: the perturbation_model and create the individual perturbations. """ - return make_lists( - self.perturbation_model.prior_count, - step_size=self.step_size - ) + return make_lists(self.perturbation_model.prior_count, step_size=self.step_size) @property def _physical_values(self) -> List[List[float]]: @@ -321,14 +323,10 @@ def _physical_values(self) -> List[List[float]]: """ return [ [ - prior.value_for( - unit_value + prior.value_for(unit_value) + for prior, unit_value in zip( + self.perturbation_model.priors_ordered_by_id, unit_values ) - for prior, unit_value - in zip( - self.perturbation_model.priors_ordered_by_id, - unit_values - ) ] for unit_values in self._lists ] @@ -350,36 +348,23 @@ def _labels(self) -> Generator[str, None, None]: """ for list_ in self._lists: strings = list() - for value, prior_tuple in zip( - list_, - self.perturbation_model.prior_tuples - ): + for value, prior_tuple in zip(list_, self.perturbation_model.prior_tuples): path, prior = prior_tuple - value = prior.value_for( - value - ) - strings.append( - f"{path}_{value}" - ) + value = prior.value_for(value) + strings.append(f"{path}_{value}") yield "_".join(strings) @property - def _perturbation_instances(self) -> Generator[ - ModelInstance, None, None - ]: + def _perturbation_instances(self) -> Generator[ModelInstance, None, None]: """ A list of instances each of which defines a perturbation to be applied to the image. """ for list_ in self._lists: - yield self.perturbation_model.instance_from_unit_vector( - list_ - ) + yield self.perturbation_model.instance_from_unit_vector(list_) @property - def _perturbation_models(self) -> Generator[ - AbstractPriorModel, None, None - ]: + def _perturbation_models(self) -> Generator[AbstractPriorModel, None, None]: """ A list of models representing a perturbation at each grid square. @@ -395,29 +380,21 @@ def _perturbation_models(self) -> Generator[ prior.value_for(min(1.0, centre + half_step)), ) for centre, prior in zip( - list_, - self.perturbation_model.priors_ordered_by_id + list_, self.perturbation_model.priors_ordered_by_id ) ] yield self.perturbation_model.with_limits(limits) @property - def _searches(self) -> Generator[ - NonLinearSearch, None, None - ]: + def _searches(self) -> Generator[NonLinearSearch, None, None]: """ A list of non-linear searches, each of which is applied to one perturbation. """ for label in self._labels: - yield self._search_instance( - label - ) + yield self._search_instance(label) - def _search_instance( - self, - name_path: str - ) -> NonLinearSearch: + def _search_instance(self, name_path: str) -> NonLinearSearch: """ Create a search instance, distinguished by its name @@ -432,9 +409,7 @@ def _search_instance( """ paths = self.search.paths search_instance = self.search.copy_with_paths( - paths.for_sub_analysis( - name_path, - ) + paths.for_sub_analysis(name_path,) ) return search_instance @@ -446,15 +421,9 @@ def _make_jobs(self) -> Generator[Job, None, None]: Each job fits a perturbed image with the original model and a model which includes a perturbation. """ - for number, ( - perturbation_instance, - perturbation_model, - search - ) in enumerate(zip( - self._perturbation_instances, - self._perturbation_models, - self._searches - )): + for number, (perturbation_instance, perturbation_model, search) in enumerate( + zip(self._perturbation_instances, self._perturbation_models, self._searches) + ): instance = copy(self.instance) instance.perturbation = perturbation_instance @@ -469,16 +438,13 @@ def _make_jobs(self) -> Generator[Job, None, None]: base_instance=self.instance, perturbation_instance=perturbation_instance, search=search, - number=number + number=number, ) class AnalysisFactory: def __init__( - self, - instance, - simulate_function, - analysis_class, + self, instance, simulate_function, analysis_class, ): """ Callable to delay simulation such that it is performed @@ -489,9 +455,5 @@ def __init__( self.analysis_class = analysis_class def __call__(self): - dataset = self.simulate_function( - self.instance - ) - return self.analysis_class( - dataset - ) + dataset = self.simulate_function(self.instance) + return self.analysis_class(dataset) diff --git a/autofit/non_linear/initializer.py b/autofit/non_linear/initializer.py index 3b118b0d0..6c15840e4 100644 --- a/autofit/non_linear/initializer.py +++ b/autofit/non_linear/initializer.py @@ -1,9 +1,10 @@ import configparser import logging +import os import random from abc import ABC, abstractmethod from typing import Dict, Tuple, List -import os + import numpy as np from autofit import exc @@ -81,7 +82,9 @@ def samples_from_model( except exc.FitException: pass - if np.allclose(a=figures_of_merit_list[0], b=figures_of_merit_list[1:]): + if total_points > 1 and np.allclose( + a=figures_of_merit_list[0], b=figures_of_merit_list[1:] + ): raise exc.InitializerException( """ The initial samples all have the same figure of merit (e.g. log likelihood values). diff --git a/autofit/non_linear/nest/dynesty/abstract.py b/autofit/non_linear/nest/dynesty/abstract.py index cf86716f2..9fd639c22 100644 --- a/autofit/non_linear/nest/dynesty/abstract.py +++ b/autofit/non_linear/nest/dynesty/abstract.py @@ -5,7 +5,6 @@ import numpy as np from dynesty import NestedSampler, DynamicNestedSampler -from dynesty.pool import Pool from autoconf import conf from autofit import exc @@ -137,6 +136,8 @@ def _fit( set of accepted samples of the fit. """ + from dynesty.pool import Pool + fitness_function = self.fitness_function_from_model_and_analysis( model=model, analysis=analysis, log_likelihood_cap=log_likelihood_cap, ) @@ -160,7 +161,7 @@ def _fit( if conf.instance["non_linear"]["nest"][self.__class__.__name__][ "parallel" - ]["force_x1_cpu"] or self.kwargs.get("force_x1_cpu"): + ].get("force_x1_cpu") or self.kwargs.get("force_x1_cpu"): raise RuntimeError @@ -375,12 +376,12 @@ def sampler_from( model: AbstractPriorModel, fitness_function, checkpoint_exists: bool, - pool: Optional["Pool"], + pool: Optional, queue_size: Optional[int], ): raise NotImplementedError() - def check_pool(self, uses_pool: bool, pool: Pool): + def check_pool(self, uses_pool: bool, pool): if (uses_pool and pool is None) or (not uses_pool and pool is not None): raise exc.SearchException( diff --git a/autofit/non_linear/nest/dynesty/dynamic.py b/autofit/non_linear/nest/dynesty/dynamic.py index 8bb67feb7..9afa7acbb 100644 --- a/autofit/non_linear/nest/dynesty/dynamic.py +++ b/autofit/non_linear/nest/dynesty/dynamic.py @@ -1,9 +1,6 @@ from __future__ import annotations from typing import TYPE_CHECKING, Optional -if TYPE_CHECKING: - from dynesty.pool import Pool - from dynesty.dynesty import DynamicNestedSampler from autofit.non_linear.nest.dynesty.samples import SamplesDynesty from autofit.mapper.prior_model.abstract import AbstractPriorModel @@ -102,7 +99,7 @@ def sampler_from( model: AbstractPriorModel, fitness_function, checkpoint_exists : bool, - pool: Optional[Pool], + pool: Optional, queue_size: Optional[int] ): """ diff --git a/autofit/non_linear/nest/dynesty/plotter.py b/autofit/non_linear/nest/dynesty/plotter.py index 0bb96294f..d14e884ef 100644 --- a/autofit/non_linear/nest/dynesty/plotter.py +++ b/autofit/non_linear/nest/dynesty/plotter.py @@ -1,47 +1,11 @@ from dynesty import plotting as dyplot -from functools import wraps -import logging from autofit.plot import SamplesPlotter -from autofit.plot.samples_plotters import skip_plot_in_test_mode - -logger = logging.getLogger(__name__) - -def log_value_error(func): - @wraps(func) - def wrapper(self, *args, **kwargs): - - try: - return func(self, *args, **kwargs) - except ValueError: - self.log_plot_exception(func.__name__) - - return wrapper +from autofit.plot.samples_plotters import skip_plot_in_test_mode +from autofit.plot.samples_plotters import log_value_error class DynestyPlotter(SamplesPlotter): - - @staticmethod - def log_plot_exception(plot_name : str): - """ - Plotting the results of a ``dynesty`` model-fit before they have converged on an - accurate estimate of the posterior can lead the ``dynesty`` plotting routines - to raise a ``ValueError``. - - This exception is caught in each of the plotting methods below, and this - function is used to log the behaviour. - - Parameters - ---------- - plot_name - The name of the ``dynesty`` plot which raised a ``ValueError`` - """ - - logger.info( - f"Dynesty unable to produce {plot_name} visual: posterior estimate therefore" - "not yet sufficient for this model-fit is not yet robust enough to do this. Visual" - "should be produced in later update, once posterior estimate is updated." - ) @skip_plot_in_test_mode def boundplot(self, **kwargs): diff --git a/autofit/non_linear/nest/dynesty/static.py b/autofit/non_linear/nest/dynesty/static.py index 8ec31b3ce..c9955c184 100644 --- a/autofit/non_linear/nest/dynesty/static.py +++ b/autofit/non_linear/nest/dynesty/static.py @@ -1,9 +1,6 @@ from __future__ import annotations from typing import TYPE_CHECKING, Optional -if TYPE_CHECKING: - from dynesty.pool import Pool - from dynesty import NestedSampler as StaticSampler from autofit.database.sqlalchemy_ import sa from autofit.mapper.prior_model.abstract import AbstractPriorModel @@ -106,7 +103,7 @@ def sampler_from( model: AbstractPriorModel, fitness_function, checkpoint_exists : bool, - pool: Optional[Pool], + pool: Optional, queue_size: Optional[int] ): """ diff --git a/autofit/non_linear/nest/ultranest/plotter.py b/autofit/non_linear/nest/ultranest/plotter.py index c6d83d431..6adeac060 100644 --- a/autofit/non_linear/nest/ultranest/plotter.py +++ b/autofit/non_linear/nest/ultranest/plotter.py @@ -1,9 +1,18 @@ from autofit.plot import SamplesPlotter +from autofit.plot.samples_plotters import skip_plot_in_test_mode +from autofit.plot.samples_plotters import log_value_error class UltraNestPlotter(SamplesPlotter): + @skip_plot_in_test_mode + @log_value_error def cornerplot(self, **kwargs): + """ + Plots the in-built ``ultranest`` plot ``cornerplot``. + + This figure plots a corner plot of the 1-D and 2-D marginalized posteriors. + """ from ultranest import plot @@ -15,32 +24,38 @@ def cornerplot(self, **kwargs): self.output.to_figure(structure=None, auto_filename="cornerplot") self.close() + @skip_plot_in_test_mode + @log_value_error def runplot(self, **kwargs): + """ + Plots the in-built ``ultranest`` plot ``runplot``. + This figure plots live points, ln(likelihood), ln(weight), and ln(evidence) vs. ln(prior volume). + """ from ultranest import plot - try: - plot.runplot( - results=self.samples.results_internal, - **kwargs - ) - except KeyError: - pass + plot.runplot( + results=self.samples.results_internal, + **kwargs + ) self.output.to_figure(structure=None, auto_filename="runplot") self.close() + @skip_plot_in_test_mode + @log_value_error def traceplot(self, **kwargs): + """ + Plots the in-built ``ultranest`` plot ``traceplot``. + This figure plots traces and marginalized posteriors for each parameter. + """ from ultranest import plot - try: - plot.traceplot( - results=self.samples.results_internal, - **kwargs - ) - except KeyError: - pass + plot.traceplot( + results=self.samples.results_internal, + **kwargs + ) self.output.to_figure(structure=None, auto_filename="traceplot") self.close() \ No newline at end of file diff --git a/autofit/plot/samples_plotters.py b/autofit/plot/samples_plotters.py index f4d1a0d09..93f127cec 100644 --- a/autofit/plot/samples_plotters.py +++ b/autofit/plot/samples_plotters.py @@ -1,9 +1,23 @@ import matplotlib.pyplot as plt from functools import wraps +import logging import os from autofit.plot.output import Output +logger = logging.getLogger(__name__) + +def log_value_error(func): + + @wraps(func) + def wrapper(self, *args, **kwargs): + + try: + return func(self, *args, **kwargs) + except (ValueError, KeyError, AttributeError, AssertionError): + self.log_plot_exception(func.__name__) + + return wrapper def skip_plot_in_test_mode(func): """ @@ -65,6 +79,26 @@ def close(self): if plt.fignum_exists(num=1): plt.close() + def log_plot_exception(self, plot_name : str): + """ + Plotting the results of a ``dynesty`` model-fit before they have converged on an + accurate estimate of the posterior can lead the ``dynesty`` plotting routines + to raise a ``ValueError``. + + This exception is caught in each of the plotting methods below, and this + function is used to log the behaviour. + + Parameters + ---------- + plot_name + The name of the ``dynesty`` plot which raised a ``ValueError`` + """ + + logger.info( + f"{self.__class__.__name__} unable to produce {plot_name} visual: posterior estimate therefore" + "not yet sufficient for this model-fit is not yet robust enough to do this. Visual" + "should be produced in later update, once posterior estimate is updated." + ) class MCMCPlotter(SamplesPlotter): diff --git a/docs/overview/multi_datasets.rst b/docs/overview/multi_datasets.rst index 441664585..4bc2d3e60 100644 --- a/docs/overview/multi_datasets.rst +++ b/docs/overview/multi_datasets.rst @@ -189,6 +189,23 @@ We can again fit this model as per usual: result_list = search.fit(model=model, analysis=analysis) +Individual Sequential Searches +------------------------------ + +The API above is used to create a model with free parameters across ``Analysis`` objects, which are all fit +simultaneously using a summed ``log_likelihood_function`` and single non-linear search. + +Each ``Analysis`` can be fitted one-by-one, using a series of multiple non-linear searches, using +the ``fit_sequential`` method: + +.. code-block:: python + + result_list = search.fit_sequential(model=model, analysis=analysis) + +The benefit of this method is for complex high dimensionality models (e.g. when many parameters are passed +to `` analysis.with_free_parameters``, it breaks the fit down into a series of lower dimensionality non-linear +searches that may convergence on a solution more reliably. + Variable Model With Relationships --------------------------------- diff --git a/docs/requirements.txt b/docs/requirements.txt index 93c7fbbd9..d6e56a19f 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -1,7 +1,7 @@ corner==2.2.1 decorator>=4.2.1 dill>=0.3.1.1 -dynesty==2.0.2 +dynesty==2.1.0 typing-inspect>=0.4.0 emcee>=3.0.2 gprof2dot==2021.2.21 diff --git a/optional_requirements.txt b/optional_requirements.txt index 529a59a5f..a3ccbe750 100644 --- a/optional_requirements.txt +++ b/optional_requirements.txt @@ -2,4 +2,4 @@ getdist==1.4 jax==0.3.1 jaxlib==0.3.0 ultranest==3.5.5 -zeus-mcmc==2.4.1 +zeus-mcmc==2.5.4 diff --git a/requirements.txt b/requirements.txt index e12c665b6..f8bca9fa7 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,7 +1,7 @@ corner==2.2.1 decorator>=4.2.1 dill>=0.3.1.1 -dynesty==2.0.2 +dynesty==2.1.0 typing-inspect>=0.4.0 emcee>=3.1.3 matplotlib diff --git a/test_autofit/analysis/test_free_parameter.py b/test_autofit/analysis/test_free_parameter.py index 3dc430541..80cc6c1f4 100644 --- a/test_autofit/analysis/test_free_parameter.py +++ b/test_autofit/analysis/test_free_parameter.py @@ -14,13 +14,13 @@ def test_copy(): assert collection.prior_count == model.prior_count -def test_log_likelihood( - modified, - combined_analysis -): - assert combined_analysis.log_likelihood_function( - modified.instance_from_prior_medians() - ) == 2 +def test_log_likelihood(modified, combined_analysis): + assert ( + combined_analysis.log_likelihood_function( + modified.instance_from_prior_medians() + ) + == 2 + ) def test_analyses_example(Analysis): @@ -33,98 +33,59 @@ def test_analyses_example(Analysis): ]: copy = model.copy() copy.centre = prior - analyses.append( - Analysis() - ) + analyses.append(Analysis()) -@pytest.fixture( - name="combined_analysis" -) +@pytest.fixture(name="combined_analysis") def make_combined_analysis(model, Analysis): - return (Analysis() + Analysis()).with_free_parameters( - model.centre - ) + return (Analysis() + Analysis()).with_free_parameters(model.centre) def test_multiple_free_parameters(model, Analysis): combined_analysis = (Analysis() + Analysis()).with_free_parameters( - model.centre, - model.sigma + model.centre, model.sigma ) first, second = combined_analysis.modify_model(model) assert first.centre is not second.centre assert first.sigma is not second.sigma -def test_add_free_parameter( - combined_analysis -): - assert isinstance( - combined_analysis, - FreeParameterAnalysis - ) +def test_add_free_parameter(combined_analysis): + assert isinstance(combined_analysis, FreeParameterAnalysis) -@pytest.fixture( - name="modified" -) -def make_modified( - model, - combined_analysis -): +@pytest.fixture(name="modified") +def make_modified(model, combined_analysis): return combined_analysis.modify_model(model) -def test_modify_model( - modified -): +def test_modify_model(modified): assert isinstance(modified, af.Collection) assert len(modified) == 2 -def test_modified_models( - modified -): +def test_modified_models(modified): first, second = modified - assert isinstance( - first.sigma, - af.Prior - ) + assert isinstance(first.sigma, af.Prior) assert first.sigma == second.sigma assert first.centre != second.centre -@pytest.fixture( - name="result" -) +@pytest.fixture(name="result") def make_result( - combined_analysis, - model, + combined_analysis, model, ): optimizer = MockOptimizer() - return optimizer.fit( - model, - combined_analysis - ) + return optimizer.fit(model, combined_analysis) -@pytest.fixture( - autouse=True -) +@pytest.fixture(autouse=True) def do_remove_output(remove_output): yield remove_output() -def test_result_type(result, Result): - assert isinstance(result, Result) - - for result_ in result: - assert isinstance(result_, Result) - - def test_integration(result): result_1, result_2 = result @@ -133,24 +94,16 @@ def test_integration(result): def test_tuple_prior(model, Analysis): - model.centre = af.TuplePrior( - centre_0=af.UniformPrior() - ) - combined = (Analysis() + Analysis()).with_free_parameters( - model.centre - ) + model.centre = af.TuplePrior(centre_0=af.UniformPrior()) + combined = (Analysis() + Analysis()).with_free_parameters(model.centre) first, second = combined.modify_model(model) assert first.centre.centre_0 != second.centre.centre_0 def test_prior_model(model, Analysis): - model = af.Collection( - model=model - ) - combined = (Analysis() + Analysis()).with_free_parameters( - model.model - ) + model = af.Collection(model=model) + combined = (Analysis() + Analysis()).with_free_parameters(model.model) modified = combined.modify_model(model) first = modified[0].model second = modified[1].model @@ -162,12 +115,7 @@ def test_prior_model(model, Analysis): def test_split_samples(modified): samples = af.Samples( - modified, - af.Sample.from_lists( - modified, - [[1, 2, 3, 4]], - [1], [1], [1] - ), + modified, af.Sample.from_lists(modified, [[1, 2, 3, 4]], [1], [1], [1]), ) combined = samples.max_log_likelihood() diff --git a/test_autofit/config/general.yaml b/test_autofit/config/general.yaml index b547327a4..90c934f6e 100644 --- a/test_autofit/config/general.yaml +++ b/test_autofit/config/general.yaml @@ -3,11 +3,13 @@ analysis: hpc: hpc_mode: false # If True, use HPC mode, which disables GUI visualization, logging to screen and other settings which are not suited to running on a super computer. iterations_per_update: 5000 # The number of iterations between every update (visualization, results output, etc) in HPC mode. +inversion: + check_reconstruction: true # If True, the inversion's reconstruction is checked to ensure the solution of a meshs's mapper is not an invalid solution where the values are all the same. + reconstruction_vmax_factor: 0.5 # Plots of an Inversion's reconstruction use the reconstructed data's bright value multiplied by this factor. model: ignore_prior_limits: false # If ``True`` the limits applied to priors will be ignored, where limits set upper / lower limits. This stops PriorLimitException's from being raised. output: force_pickle_overwrite: false # If True pickle files output by a search (e.g. samples.pickle) are recreated when a new model-fit is performed. - identifier_version: 4 info_whitespace_length: 80 # Length of whitespace between the parameter names and values in the model.info / result.info log_level: INFO # The level of information output by logging. log_to_file: false # If True, outputs the non-linear search log to a file (and not printed to screen). diff --git a/test_autofit/conftest.py b/test_autofit/conftest.py index 4cb8a84ca..650dc80a4 100644 --- a/test_autofit/conftest.py +++ b/test_autofit/conftest.py @@ -13,37 +13,24 @@ from autofit import fixtures from autofit.database.model import sa -if sys.platform == 'darwin': - multiprocessing.set_start_method('forkserver') +if sys.platform == "darwin": + multiprocessing.set_start_method("forkserver") directory = Path(__file__).parent -@pytest.fixture( - name="test_directory", - scope="session" -) +@pytest.fixture(name="test_directory", scope="session") def make_test_directory(): return directory -@pytest.fixture( - name="output_directory", - scope="session" -) -def make_output_directory( - test_directory -): +@pytest.fixture(name="output_directory", scope="session") +def make_output_directory(test_directory): return test_directory / "output" -@pytest.fixture( - name="remove_output", - scope="session" -) -def make_remove_output( - output_directory -): +@pytest.fixture(name="remove_output", scope="session") +def make_remove_output(output_directory): def remove_output(): try: for item in os.listdir(output_directory): @@ -51,27 +38,18 @@ def remove_output(): item_path = output_directory / item if item_path.is_dir(): shutil.rmtree( - item_path, - ignore_errors=True, + item_path, ignore_errors=True, ) else: - os.remove( - item_path - ) - except FileExistsError: + os.remove(item_path) + except (FileExistsError, FileNotFoundError): pass return remove_output -@pytest.fixture( - autouse=True, - scope="session" -) -def do_remove_output( - output_directory, - remove_output -): +@pytest.fixture(autouse=True, scope="session") +def do_remove_output(output_directory, remove_output): yield remove_output() @@ -93,7 +71,7 @@ def make_plot_patch(monkeypatch): @pytest.fixture(name="session") def make_session(): - engine = sa.create_engine('sqlite://') + engine = sa.create_engine("sqlite://") session = sa.orm.sessionmaker(bind=engine)() db.Base.metadata.create_all(engine) yield session @@ -101,10 +79,7 @@ def make_session(): engine.dispose() -@pytest.fixture( - autouse=True, - scope="session" -) +@pytest.fixture(autouse=True, scope="session") def remove_logs(): yield for d, _, files in os.walk(directory): @@ -117,13 +92,11 @@ def remove_logs(): def set_config_path(): conf.instance.push( new_path=path.join(directory, "config"), - output_path=path.join(directory, "output") + output_path=path.join(directory, "output"), ) -@pytest.fixture( - name="model_gaussian_x1" -) +@pytest.fixture(name="model_gaussian_x1") def make_model_gaussian_x1(): return fixtures.make_model_gaussian_x1() diff --git a/test_autofit/mapper/model/test_json.py b/test_autofit/mapper/model/test_json.py index 74619c54e..ebbd4f4ca 100644 --- a/test_autofit/mapper/model/test_json.py +++ b/test_autofit/mapper/model/test_json.py @@ -5,54 +5,37 @@ import autofit as af -@pytest.fixture( - name="model_dict" -) + +@pytest.fixture(name="model_dict") def make_model_dict(): return { "type": "model", "class_path": "autofit.example.model.Gaussian", - "centre": {'lower_limit': 0.0, 'type': 'Uniform', 'upper_limit': 2.0}, - "normalization": {'lower_limit': 0.0, 'type': 'Uniform', 'upper_limit': 1.0}, - "sigma": {'lower_limit': 0.0, 'type': 'Uniform', 'upper_limit': 1.0}, + "centre": {"lower_limit": 0.0, "type": "Uniform", "upper_limit": 2.0}, + "normalization": {"lower_limit": 0.0, "type": "Uniform", "upper_limit": 1.0}, + "sigma": {"lower_limit": 0.0, "type": "Uniform", "upper_limit": 1.0}, } -@pytest.fixture( - name="instance_dict" -) +@pytest.fixture(name="instance_dict") def make_instance_dict(): return { "type": "instance", "class_path": "autofit.example.model.Gaussian", "centre": 0.0, "normalization": 0.1, - "sigma": 0.01 + "sigma": 0.01, } -@pytest.fixture( - name="collection_dict" -) -def make_collection_dict( - model_dict -): - return { - "gaussian": model_dict, - "type": "collection" - } +@pytest.fixture(name="collection_dict") +def make_collection_dict(model_dict): + return {"gaussian": model_dict, "type": "collection"} -@pytest.fixture( - name="model" -) +@pytest.fixture(name="model") def make_model(): - return af.Model( - af.Gaussian, - centre=af.UniformPrior( - upper_limit=2.0 - ) - ) + return af.Model(af.Gaussian, centre=af.UniformPrior(upper_limit=2.0)) class TestTuple: @@ -61,118 +44,61 @@ def test_tuple_prior(self): tuple_prior.tup_0 = 0 tuple_prior.tup_1 = 1 - result = af.Model.from_dict( - tuple_prior.dict() - ) - assert isinstance( - result, - af.TuplePrior - ) + result = af.Model.from_dict(tuple_prior.dict()) + assert isinstance(result, af.TuplePrior) def test_model_with_tuple(self): tuple_model = af.Model(af.m.MockWithTuple) tuple_model.instance_from_prior_medians() model_dict = tuple_model.dict() - model = af.Model.from_dict( - model_dict - ) + model = af.Model.from_dict(model_dict) instance = model.instance_from_prior_medians() assert instance.tup == (0.5, 0.5) class TestFromDict: - def test_model_from_dict( - self, - model_dict - ): - model = af.Model.from_dict( - model_dict - ) + def test_model_from_dict(self, model_dict): + model = af.Model.from_dict(model_dict) assert model.cls == af.Gaussian assert model.prior_count == 3 assert model.centre.upper_limit == 2.0 - def test_instance_from_dict( - self, - instance_dict - ): - instance = af.Model.from_dict( - instance_dict - ) - assert isinstance( - instance, - af.Gaussian - ) + def test_instance_from_dict(self, instance_dict): + instance = af.Model.from_dict(instance_dict) + assert isinstance(instance, af.Gaussian) assert instance.centre == 0.0 assert instance.normalization == 0.1 assert instance.sigma == 0.01 - def test_collection_from_dict( - self, - collection_dict - ): - collection = af.Model.from_dict( - collection_dict - ) - assert isinstance( - collection, - af.Collection - ) + def test_collection_from_dict(self, collection_dict): + collection = af.Model.from_dict(collection_dict) + assert isinstance(collection, af.Collection) assert len(collection) == 1 class TestToDict: - def test_model_priors( - self, - model, - model_dict - ): + def test_model_priors(self, model, model_dict): assert model.dict() == model_dict - def test_model_floats( - self, - instance_dict - ): - model = af.Model( - af.Gaussian, - centre=0.0, - normalization=0.1, - sigma=0.01 - ) + def test_model_floats(self, instance_dict): + model = af.Model(af.Gaussian, centre=0.0, normalization=0.1, sigma=0.01) assert model.dict() == instance_dict - def test_collection( - self, - model, - collection_dict - ): - collection = af.Collection( - gaussian=model - ) + def test_collection(self, model, collection_dict): + collection = af.Collection(gaussian=model) assert collection.dict() == collection_dict - def test_collection_instance( - self, - instance_dict - ): - collection = af.Collection( - gaussian=af.Gaussian() - ) - assert collection.dict() == { - "gaussian": instance_dict, - "type": "collection" - } + def test_collection_instance(self, instance_dict): + collection = af.Collection(gaussian=af.Gaussian()) + assert collection.dict() == {"gaussian": instance_dict, "type": "collection"} class TestFromJson: - def test__from_json(self, model_dict): - model = af.Model.from_dict( - model_dict - ) + model = af.Model.from_dict(model_dict) model_file = Path(__file__).parent / "model.json" @@ -190,4 +116,4 @@ def test__from_json(self, model_dict): assert model.prior_count == 3 assert model.centre.upper_limit == 2.0 - os.remove(model_file) \ No newline at end of file + os.remove(model_file) diff --git a/test_autofit/mapper/model/test_model_instance.py b/test_autofit/mapper/model/test_model_instance.py index 61e1227a9..05e29ee37 100644 --- a/test_autofit/mapper/model/test_model_instance.py +++ b/test_autofit/mapper/model/test_model_instance.py @@ -2,6 +2,7 @@ import autofit as af + @pytest.fixture(name="mock_components_1") def make_mock_components_1(): return af.m.MockComponents() @@ -42,12 +43,23 @@ def test_as_model(self, instance): def test_object_for_path(self, instance, mock_components_1, mock_components_2): assert instance.object_for_path(("mock_components_2",)) is mock_components_2 - assert instance.object_for_path(("sub", "mock_components_1")) is mock_components_1 - assert instance.object_for_path(("sub", "sub", "mock_components_1")) is mock_components_1 - setattr(instance.object_for_path(("mock_components_2",)), "mock_components", mock_components_1) + assert ( + instance.object_for_path(("sub", "mock_components_1")) is mock_components_1 + ) + assert ( + instance.object_for_path(("sub", "sub", "mock_components_1")) + is mock_components_1 + ) + setattr( + instance.object_for_path(("mock_components_2",)), + "mock_components", + mock_components_1, + ) assert mock_components_2.mock_components is mock_components_1 - def test_path_instance_tuples_for_class(self, instance, mock_components_1, mock_components_2): + def test_path_instance_tuples_for_class( + self, instance, mock_components_1, mock_components_2 + ): result = instance.path_instance_tuples_for_class(af.m.MockComponents) assert result[0] == (("mock_components_2",), mock_components_2) assert result[1] == (("sub", "mock_components_1"), mock_components_1) @@ -59,8 +71,7 @@ def test_simple_model(self): mapper.mock_class = af.m.MockClassx2 model_map = mapper.instance_from_unit_vector( - [1.0, 1.0], - ignore_prior_limits=True + [1.0, 1.0], ignore_prior_limits=True ) assert isinstance(model_map.mock_class, af.m.MockClassx2) @@ -74,8 +85,7 @@ def test_two_object_model(self): mapper.mock_class_2 = af.m.MockClassx2 model_map = mapper.instance_from_unit_vector( - [1.0, 0.0, 0.0, 1.0], - ignore_prior_limits=True + [1.0, 0.0, 0.0, 1.0], ignore_prior_limits=True ) assert isinstance(model_map.mock_class_1, af.m.MockClassx2) @@ -154,3 +164,42 @@ def test_match_tuple(self): assert model_map.mock_profile.one_tuple == (1.0, 1.0) assert model_map.mock_profile.two == 0.0 + + +class Child(af.Gaussian): + pass + + +class Child2(af.Gaussian): + pass + + +@pytest.fixture(name="exclude_instance") +def make_excluded_instance(): + return af.ModelInstance( + {"child": Child(), "gaussian": af.Gaussian(), "child2": Child2(),} + ) + + +def test_single_argument(exclude_instance): + model = exclude_instance.as_model(af.Gaussian) + + assert isinstance(model.gaussian, af.Model) + assert isinstance(model.child, af.Model) + assert isinstance(model.child2, af.Model) + + +def test_filter_child(exclude_instance): + model = exclude_instance.as_model(af.Gaussian, excluded_classes=Child) + + assert isinstance(model.gaussian, af.Model) + assert not isinstance(model.child, af.Model) + assert isinstance(model.child2, af.Model) + + +def test_filter_multiple(exclude_instance): + model = exclude_instance.as_model(af.Gaussian, excluded_classes=(Child, Child2),) + + assert isinstance(model.gaussian, af.Model) + assert not isinstance(model.child, af.Model) + assert not isinstance(model.child2, af.Model) diff --git a/test_autofit/mapper/test_has.py b/test_autofit/mapper/test_has.py index 226591572..b5cd8803b 100644 --- a/test_autofit/mapper/test_has.py +++ b/test_autofit/mapper/test_has.py @@ -1,130 +1,103 @@ +import pytest + import autofit as af +from autofit.example import Exponential + class GaussianChild(af.Gaussian): pass def test_inheritance(): - collection = af.Collection( - first=af.Model( - GaussianChild - ), - second=GaussianChild() - ) + collection = af.Collection(first=af.Model(GaussianChild), second=GaussianChild()) - assert collection.has_instance( - af.Gaussian - ) - assert collection.has_model( - af.Gaussian - ) - - -def test_embedded(): - collection = af.Collection( - model=af.Model( - af.Gaussian, - centre=GaussianChild - ) - ) - assert collection.has_model( - GaussianChild - ) + assert collection.has_instance(af.Gaussian) + assert collection.has_model(af.Gaussian) def test_no_free_parameters(): collection = af.Collection( - gaussian=af.Model( - af.Gaussian, - centre=1.0, - normalization=1.0, - sigma=1.0, - ) + gaussian=af.Model(af.Gaussian, centre=1.0, normalization=1.0, sigma=1.0,) ) assert collection.prior_count == 0 - assert collection.has_model( - af.Gaussian - ) is False + assert collection.has_model(af.Gaussian) is False def test_instance(): - collection = af.Collection( - gaussian=af.Gaussian() - ) + collection = af.Collection(gaussian=af.Gaussian()) - assert collection.has_instance( - af.Gaussian - ) is True - assert collection.has_model( - af.Gaussian - ) is False + assert collection.has_instance(af.Gaussian) is True + assert collection.has_model(af.Gaussian) is False def test_model(): - collection = af.Collection( - gaussian=af.Model( - af.Gaussian - ) - ) + collection = af.Collection(gaussian=af.Model(af.Gaussian)) - assert collection.has_model( - af.Gaussian - ) is True - assert collection.has_instance( - af.Gaussian - ) is False + assert collection.has_model(af.Gaussian) is True + assert collection.has_instance(af.Gaussian) is False def test_both(): - collection = af.Collection( - gaussian=af.Model( - af.Gaussian - ), - gaussian_2=af.Gaussian() - ) + collection = af.Collection(gaussian=af.Model(af.Gaussian), gaussian_2=af.Gaussian()) - assert collection.has_model( - af.Gaussian - ) is True - assert collection.has_instance( - af.Gaussian - ) is True + assert collection.has_model(af.Gaussian) is True + assert collection.has_instance(af.Gaussian) is True def test_embedded(): - collection = af.Collection( - gaussian=af.Model( - af.Gaussian, - centre=af.Gaussian() - ), - ) + collection = af.Collection(gaussian=af.Model(af.Gaussian, centre=af.Gaussian()),) - assert collection.has_model( - af.Gaussian - ) is True - assert collection.has_instance( - af.Gaussian - ) is True + assert collection.has_model(af.Gaussian) is True + assert collection.has_instance(af.Gaussian) is True def test_is_only_model(): collection = af.Collection( - gaussian=af.Model( - af.Gaussian - ), - gaussian_2=af.Model( - af.Gaussian - ) + gaussian=af.Model(af.Gaussian), gaussian_2=af.Model(af.Gaussian) ) - assert collection.is_only_model( - af.Gaussian - ) is True + assert collection.is_only_model(af.Gaussian) is True + + collection.other = af.Model(af.m.MockClassx2) + + assert collection.is_only_model(af.Gaussian) is False + - collection.other = af.Model( - af.m.MockClassx2 +@pytest.fixture(name="collection") +def make_collection(): + return af.Collection( + gaussian=af.Model(af.Gaussian), exponential=af.Model(Exponential), ) - assert collection.is_only_model( - af.Gaussian - ) is False + +def test_models(collection): + assert collection.models_with_type(af.Gaussian) == [collection.gaussian] + + +def test_multiple_types(collection): + assert collection.models_with_type((af.Gaussian, Exponential)) == [ + collection.gaussian, + collection.exponential, + ] + + +class Galaxy: + def __init__(self, child): + self.child = child + + +def test_instances_with_type(): + model = af.Collection(galaxy=Galaxy(child=af.Model(af.Gaussian))) + assert model.models_with_type(af.Gaussian) == [model.galaxy.child] + + +class DelaunayBrightnessImage: + pass + + +def test_model_attributes_with_type(): + mesh = af.Model(DelaunayBrightnessImage) + mesh.pixels = af.UniformPrior(lower_limit=5.0, upper_limit=10.0) + pixelization = af.Model(af.Gaussian, mesh=mesh) + + assert pixelization.models_with_type(DelaunayBrightnessImage) == [mesh] diff --git a/test_autofit/non_linear/grid/test_sensitivity/test_results.py b/test_autofit/non_linear/grid/test_sensitivity/test_results.py new file mode 100644 index 000000000..7a9a79bb4 --- /dev/null +++ b/test_autofit/non_linear/grid/test_sensitivity/test_results.py @@ -0,0 +1,30 @@ +from autofit.non_linear.grid.sensitivity import SensitivityResult, JobResult +import pytest + + +class Result: + def __init__(self, log_likelihood): + self.log_likelihood = log_likelihood + + +@pytest.fixture(name="job_result") +def make_result(): + return JobResult( + number=0, + result=Result(log_likelihood=1.0), + perturbed_result=Result(log_likelihood=2.0), + ) + + +def test_job_result(job_result): + assert job_result.log_likelihood_base == 1.0 + assert job_result.log_likelihood_perturbed == 2.0 + assert job_result.log_likelihood_difference == 1.0 + + +def test_result(job_result): + result = SensitivityResult(results=[job_result]) + + assert result.log_likelihoods_base == [1.0] + assert result.log_likelihoods_perturbed == [2.0] + assert result.log_likelihood_differences == [1.0] diff --git a/test_autofit/non_linear/grid/test_sensitivity/test_run.py b/test_autofit/non_linear/grid/test_sensitivity/test_run.py index 163f3bfb7..d133faa41 100644 --- a/test_autofit/non_linear/grid/test_sensitivity/test_run.py +++ b/test_autofit/non_linear/grid/test_sensitivity/test_run.py @@ -3,24 +3,21 @@ from autoconf.conf import with_config -@with_config( - "general", - "model", - "ignore_prior_limits", - value=True -) -def test_sensitivity( - sensitivity -): +@with_config("general", "model", "ignore_prior_limits", value=True) +def test_sensitivity(sensitivity): results = sensitivity.run() assert len(results) == 8 - path = Path( - sensitivity.search.paths.output_path - ) / "results.csv" + output_path = Path(sensitivity.search.paths.output_path) + + assert (output_path / ".is_grid_search").exists() + path = output_path / "results.csv" assert path.exists() with open(path) as f: all_lines = set(f) - assert 'index,centre,normalization,sigma,log_likelihood_base,log_likelihood_perturbed,log_likelihood_difference\n' in all_lines - assert ' 0, 0.25, 0.25, 0.25, 2.0, 2.0, 0.0\n' in all_lines - assert ' 1, 0.25, 0.25, 0.75, 2.0, 2.0, 0.0\n' in all_lines + assert ( + "index,centre,normalization,sigma,log_likelihood_base,log_likelihood_perturbed,log_likelihood_difference\n" + in all_lines + ) + assert " 0, 0.25, 0.25, 0.25, 2.0, 2.0, 0.0\n" in all_lines + assert " 1, 0.25, 0.25, 0.75, 2.0, 2.0, 0.0\n" in all_lines diff --git a/test_autofit/non_linear/test_fit_sequential.py b/test_autofit/non_linear/test_fit_sequential.py new file mode 100644 index 000000000..7f1d3faa0 --- /dev/null +++ b/test_autofit/non_linear/test_fit_sequential.py @@ -0,0 +1,75 @@ +import os +from pathlib import Path +from random import random + +import pytest + +import autofit as af +from autofit.non_linear.analysis.combined import CombinedResult + + +class Analysis(af.Analysis): + def log_likelihood_function(self, instance): + return -random() + + +@pytest.fixture(name="search") +def make_search(): + return af.LBFGS(name="test_lbfgs") + + +@pytest.fixture(name="model") +def make_model(): + return af.Model(af.Gaussian) + + +@pytest.fixture(name="analysis") +def make_analysis(): + return Analysis() + + +def count_output(paths): + return len(os.listdir(Path(str(paths)).parent)) + + +def test_with_model(analysis, model, search): + combined_analysis = sum([analysis.with_model(model) for _ in range(10)]) + + result = search.fit_sequential(model=model, analysis=combined_analysis) + + assert count_output(search.paths) == 10 + assert len(result.child_results) == 10 + + +@pytest.fixture(name="combined_analysis") +def make_combined_analysis(analysis): + return sum([analysis for _ in range(10)]) + + +def test_combined_analysis(combined_analysis, model, search): + search.fit_sequential(model=model, analysis=combined_analysis) + + assert count_output(search.paths) == 10 + + +def test_with_free_parameter(combined_analysis, model, search): + combined_analysis = combined_analysis.with_free_parameters([model.centre]) + search.fit_sequential( + model=model, analysis=combined_analysis, + ) + + assert count_output(search.paths) == 10 + + +def test_singular_analysis(analysis, model, search): + with pytest.raises(ValueError): + search.fit_sequential(model=model, analysis=analysis) + + +# noinspection PyTypeChecker +def test_index_combined_result(): + combined_result = CombinedResult([0, 1, 2]) + + assert combined_result[0] == 0 + assert combined_result[1] == 1 + assert combined_result[2] == 2 diff --git a/test_autofit/test_interpolator.py b/test_autofit/test_interpolator.py index 2e0de50bd..87953636a 100644 --- a/test_autofit/test_interpolator.py +++ b/test_autofit/test_interpolator.py @@ -1,43 +1,76 @@ import pytest import autofit as af +from autoconf.dictable import as_dict def test_trivial(): - instance = af.ModelInstance(items=dict(t=1)) - time_series = af.LinearInterpolator([instance]) + instance = af.ModelInstance(dict(t=1)) + linear_interpolator = af.LinearInterpolator([instance]) - result = time_series[time_series.t == 1] + result = linear_interpolator[linear_interpolator.t == 1] assert result is instance -@pytest.fixture(name="time_series") -def make_time_series(): - return af.LinearInterpolator( - [ - af.ModelInstance( - items=dict( - t=1.0, - gaussian=af.Gaussian(centre=0.0, normalization=1.0, sigma=-1.0), - ) - ), +@pytest.fixture(name="model_instance") +def make_model_instance(): + return af.ModelInstance( + dict(t=1.0, gaussian=af.Gaussian(centre=0.0, normalization=1.0, sigma=-1.0),) + ) + + +@pytest.fixture(name="instances") +def make_instances(model_instance): + return [ + model_instance, + af.ModelInstance( + dict( + t=2.0, gaussian=af.Gaussian(centre=1.0, normalization=2.0, sigma=-2.0), + ) + ), + ] + + +@pytest.fixture(name="linear_interpolator") +def make_linear_interpolator(instances): + return af.LinearInterpolator(instances) + + +def test_spline_interpolator(instances): + interpolator = af.SplineInterpolator(instances) + + result = interpolator[interpolator.t == 1.5] + + assert result.t == 1.5 + assert result.gaussian.centre == 0.5 + + +def test_smooth_spline_interpolator(instances): + interpolator = af.SplineInterpolator( + instances + + [ af.ModelInstance( - items=dict( - t=2.0, - gaussian=af.Gaussian(centre=1.0, normalization=2.0, sigma=-2.0), + dict( + t=3.0, + gaussian=af.Gaussian(centre=4.0, normalization=3.0, sigma=-3.0), ) ), ] ) + result = interpolator[interpolator.t == 1.5] + + assert result.t == 1.5 + assert result.gaussian.centre < 0.5 + @pytest.mark.parametrize( "t, centre", [(0.0, -1.0), (1.0, 0.0), (1.5, 0.5), (2.0, 1.0), (3.0, 2.0)] ) -def test_linear(t, centre, time_series): +def test_linear(t, centre, linear_interpolator): - result = time_series[time_series.t == t] + result = linear_interpolator[linear_interpolator.t == t] assert result.t == t assert result.gaussian.centre == centre @@ -46,10 +79,95 @@ def test_linear(t, centre, time_series): @pytest.mark.parametrize("sigma", [-0.5, 0.0, 0.5, 1.0]) -def test_alternate_attribute(time_series, sigma): +def test_alternate_attribute(linear_interpolator, sigma): - result = time_series[time_series.gaussian.sigma == sigma] + result = linear_interpolator[linear_interpolator.gaussian.sigma == sigma] assert result.gaussian.sigma == sigma assert result.t == -sigma assert result.gaussian.normalization == -sigma + + +def test_deeper_attributes(): + collection = af.Collection( + model=af.Model(af.Gaussian, centre=0.0, normalization=1.0, sigma=-1.0,) + ) + + instance_1 = af.Collection( + t=1.0, collection=collection, + ).instance_from_prior_medians() + instance_2 = af.Collection( + t=2.0, collection=collection, + ).instance_from_prior_medians() + + linear_interpolator = af.LinearInterpolator([instance_1, instance_2]) + + result = linear_interpolator[linear_interpolator.t == 1.5] + + assert result.collection.model.centre == 0.0 + assert result.collection.model.normalization == 1.0 + assert result.collection.model.sigma == -1.0 + + +def test_to_dict(linear_interpolator, linear_interpolator_dict): + assert linear_interpolator.dict() == linear_interpolator_dict + + +def test_from_dict(linear_interpolator_dict): + interpolator = af.LinearInterpolator.from_dict(linear_interpolator_dict) + assert interpolator[interpolator.t == 1.5].t == 1.5 + + +@pytest.fixture(name="instance_dict") +def make_instance_dict(): + return { + "child_items": { + "gaussian": { + "centre": 0.0, + "normalization": 1.0, + "sigma": -1.0, + "type": "autofit.example.model.Gaussian", + }, + "t": 1.0, + "type": "dict", + }, + "type": "autofit.mapper.model.ModelInstance", + } + + +@pytest.fixture(name="linear_interpolator_dict") +def make_linear_interpolator_dict(instance_dict): + return { + "instances": [ + instance_dict, + { + "child_items": { + "gaussian": { + "centre": 1.0, + "normalization": 2.0, + "sigma": -2.0, + "type": "autofit.example.model.Gaussian", + }, + "t": 2.0, + "type": "dict", + }, + "type": "autofit.mapper.model.ModelInstance", + }, + ], + "type": "autofit.interpolator.LinearInterpolator", + } + + +def test_instance_as_dict(model_instance, instance_dict): + assert as_dict(model_instance) == instance_dict + + +def test_instance_from_dict(model_instance, instance_dict): + instance = af.ModelInstance.from_dict(instance_dict) + assert instance.t == 1.0 + + gaussian = instance.gaussian + assert isinstance(gaussian, af.Gaussian) + assert gaussian.centre == 0.0 + assert gaussian.normalization == 1.0 + assert gaussian.sigma == -1.0 From 9768792f82dfd9531bbe33e813c8135d58540ae9 Mon Sep 17 00:00:00 2001 From: James Nightingale Date: Mon, 27 Mar 2023 15:46:39 +0100 Subject: [PATCH 192/226] scipy requirementr --- requirements.txt | 2 +- setup.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/requirements.txt b/requirements.txt index f8bca9fa7..249405018 100644 --- a/requirements.txt +++ b/requirements.txt @@ -10,6 +10,6 @@ pyprojroot==0.2.0 pyswarms==1.3.0 h5py>=2.10.0 SQLAlchemy==1.3.20 -scipy>=1.5.1 +scipy>=1.5.4,<=1.8.1 astunparse==1.6.3 xxhash==3.0.0 \ No newline at end of file diff --git a/setup.py b/setup.py index 25e1731d5..2953bb067 100644 --- a/setup.py +++ b/setup.py @@ -12,7 +12,7 @@ with open(join(this_dir, "requirements.txt")) as f: requirements = f.read().split("\n") -version = environ.get("VERSION", "1.0.dev0") +version = environ.get("VERSION", "2023.3.27.1") requirements.extend([ f'autoconf=={version}' ]) From 3143ab5ee24a3d5c91cc7205c8ee38e5ac402b11 Mon Sep 17 00:00:00 2001 From: James Nightingale Date: Thu, 6 Apr 2023 13:56:36 +0100 Subject: [PATCH 193/226] version bump --- setup.py | 2 +- test_autofit/config/general.yaml | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/setup.py b/setup.py index 2953bb067..25e1731d5 100644 --- a/setup.py +++ b/setup.py @@ -12,7 +12,7 @@ with open(join(this_dir, "requirements.txt")) as f: requirements = f.read().split("\n") -version = environ.get("VERSION", "2023.3.27.1") +version = environ.get("VERSION", "1.0.dev0") requirements.extend([ f'autoconf=={version}' ]) diff --git a/test_autofit/config/general.yaml b/test_autofit/config/general.yaml index 90c934f6e..66197119a 100644 --- a/test_autofit/config/general.yaml +++ b/test_autofit/config/general.yaml @@ -24,6 +24,7 @@ profiling: should_profile: false # If True, the ``profile_log_likelihood_function()`` function of an analysis class is called throughout a model-fit, profiling run times. repeats: 1 # The number of repeat function calls used to measure run-times when profiling. test: - check_preloads: false # If True, perform a sanity check that the likelihood using preloads is identical to the likelihood not using preloads. + check_preloads: false + preloads_check_threshold: 0.1 # If the figure of merit of a fit with and without preloads is greater than this threshold, the check preload test fails and an exception raised for a model-fit. # If True, perform a sanity check that the likelihood using preloads is identical to the likelihood not using preloads. exception_override: false parallel_profile: false From 32b0fdbb601ef656d121b06219760808bc56ce64 Mon Sep 17 00:00:00 2001 From: James Nightingale Date: Thu, 6 Apr 2023 15:25:12 +0100 Subject: [PATCH 194/226] added method visualize_combined --- autofit/__init__.py | 1 + autofit/graphical/declarative/abstract.py | 5 +++++ .../graphical/declarative/factor/analysis.py | 5 +++++ autofit/non_linear/abstract_search.py | 3 +++ autofit/non_linear/analysis/analysis.py | 3 +++ autofit/non_linear/analysis/combined.py | 19 +++++++++++++++++++ autofit/non_linear/analysis/indexed.py | 3 +++ 7 files changed, 39 insertions(+) diff --git a/autofit/__init__.py b/autofit/__init__.py index 0eaf5aba3..9e0fb7abb 100644 --- a/autofit/__init__.py +++ b/autofit/__init__.py @@ -62,6 +62,7 @@ from .non_linear.abstract_search import NonLinearSearch from .non_linear.abstract_search import PriorPasser from .non_linear.analysis.analysis import Analysis +from .non_linear.analysis.combined import CombinedAnalysis from .non_linear.grid.grid_search import GridSearchResult from .non_linear.initializer import InitializerBall from .non_linear.initializer import InitializerPrior diff --git a/autofit/graphical/declarative/abstract.py b/autofit/graphical/declarative/abstract.py index ccb8056cb..bb70af18e 100644 --- a/autofit/graphical/declarative/abstract.py +++ b/autofit/graphical/declarative/abstract.py @@ -253,6 +253,11 @@ def visualize( instance, during_analysis ) + model_factor.visualize_combined( + paths, + instance, + during_analysis + ) @property def global_prior_model(self) -> Collection: diff --git a/autofit/graphical/declarative/factor/analysis.py b/autofit/graphical/declarative/factor/analysis.py index 346f94125..5682b5053 100644 --- a/autofit/graphical/declarative/factor/analysis.py +++ b/autofit/graphical/declarative/factor/analysis.py @@ -129,6 +129,11 @@ def visualize( instance, during_analysis ) + self.analysis.visualize_combined( + paths, + instance, + during_analysis + ) def log_likelihood_function( self, diff --git a/autofit/non_linear/abstract_search.py b/autofit/non_linear/abstract_search.py index 37d319311..0253939d9 100644 --- a/autofit/non_linear/abstract_search.py +++ b/autofit/non_linear/abstract_search.py @@ -768,6 +768,9 @@ def perform_update(self, model, analysis, during_analysis): analysis.visualize( paths=self.paths, instance=instance, during_analysis=during_analysis ) + analysis.visualize_combined( + paths=self.paths, instance=instance, during_analysis=during_analysis + ) if self.should_profile: self.logger.debug("Profiling Maximum Likelihood Model") diff --git a/autofit/non_linear/analysis/analysis.py b/autofit/non_linear/analysis/analysis.py index c3ee402f3..f7fd433a9 100644 --- a/autofit/non_linear/analysis/analysis.py +++ b/autofit/non_linear/analysis/analysis.py @@ -44,6 +44,9 @@ def log_likelihood_function(self, instance): def visualize(self, paths: AbstractPaths, instance, during_analysis): pass + def visualize_combined(self, paths: AbstractPaths, instance, during_analysis): + pass + def save_attributes_for_aggregator(self, paths: AbstractPaths): pass diff --git a/autofit/non_linear/analysis/combined.py b/autofit/non_linear/analysis/combined.py index 4bb792222..5cb8016d0 100644 --- a/autofit/non_linear/analysis/combined.py +++ b/autofit/non_linear/analysis/combined.py @@ -206,6 +206,25 @@ def func(child_paths, analysis): self._for_each_analysis(func, paths) + def visualize_combined(self, paths: AbstractPaths, instance, during_analysis): + """ + Visualise the instance using images and quantities which are shared across all analyses. + + For example, each Analysis may have a different dataset, where the fit to each dataset is intended to all + be plotted on the same matplotlib subplot. This function can be overwritten to allow the visualization of such + a plot. + + Parameters + ---------- + paths + An object describing the paths for saving data (e.g. hard-disk directories or entries in sqlite database). + instance + The maximum likelihood instance of the model so far in the non-linear search. + during_analysis + Is this visualisation during analysis? + """ + pass + def profile_log_likelihood_function( self, paths: AbstractPaths, instance, ): diff --git a/autofit/non_linear/analysis/indexed.py b/autofit/non_linear/analysis/indexed.py index da7f9200d..43064cdf0 100644 --- a/autofit/non_linear/analysis/indexed.py +++ b/autofit/non_linear/analysis/indexed.py @@ -36,6 +36,9 @@ def log_likelihood_function(self, instance): def visualize(self, paths: AbstractPaths, instance, during_analysis): return self.analysis.visualize(paths, instance[self.index], during_analysis) + def visualize_combined(self, paths: AbstractPaths, instance, during_analysis): + return self.analysis.visualize_combined(paths, instance[self.index], during_analysis) + def profile_log_likelihood_function(self, paths: AbstractPaths, instance): return self.profile_log_likelihood_function(paths, instance[self.index]) From 8e5cd86088ea65e105690fd1fb5cf02da1dfda40 Mon Sep 17 00:00:00 2001 From: James Nightingale Date: Thu, 6 Apr 2023 15:32:21 +0100 Subject: [PATCH 195/226] added unit test, call visualize_combined of first analyis object --- autofit/non_linear/analysis/combined.py | 2 +- test_autofit/non_linear/test_analysis.py | 16 ++++++++++++++++ 2 files changed, 17 insertions(+), 1 deletion(-) diff --git a/autofit/non_linear/analysis/combined.py b/autofit/non_linear/analysis/combined.py index 5cb8016d0..620a15263 100644 --- a/autofit/non_linear/analysis/combined.py +++ b/autofit/non_linear/analysis/combined.py @@ -223,7 +223,7 @@ def visualize_combined(self, paths: AbstractPaths, instance, during_analysis): during_analysis Is this visualisation during analysis? """ - pass + self.analyses[0].visualize_combined(paths, instance, during_analysis) def profile_log_likelihood_function( self, paths: AbstractPaths, instance, diff --git a/test_autofit/non_linear/test_analysis.py b/test_autofit/non_linear/test_analysis.py index be4a878ea..a5c621381 100644 --- a/test_autofit/non_linear/test_analysis.py +++ b/test_autofit/non_linear/test_analysis.py @@ -13,6 +13,7 @@ class Analysis(af.Analysis): def __init__(self): self.did_visualise = False + self.did_visualise_combined = False self.did_profile = False def log_likelihood_function(self, instance): @@ -23,6 +24,10 @@ def visualize(self, paths: AbstractPaths, instance, during_analysis): os.makedirs(paths.image_path) open(f"{paths.image_path}/image.png", "w+").close() + def visualize_combined(self, paths: AbstractPaths, instance, during_analysis): + + self.did_visualise_combined = True + def profile_log_likelihood_function(self, paths: AbstractPaths, instance): self.did_profile = True @@ -37,6 +42,17 @@ def test_visualise(): assert analysis_2.did_visualise is True +def test_visualise_combined(): + analysis_1 = Analysis() + analysis_2 = Analysis() + + (analysis_1 + analysis_2).visualize_combined(af.DirectoryPaths(), None, None) + + assert analysis_1.did_visualise_combined is True + assert analysis_2.did_visualise_combined is False + + + def test__profile_log_likelihood(): analysis_1 = Analysis() analysis_2 = Analysis() From 09b4d800f9dfa95ba4102a21cdfc797aa0f4f9b8 Mon Sep 17 00:00:00 2001 From: James Nightingale Date: Thu, 6 Apr 2023 15:33:11 +0100 Subject: [PATCH 196/226] docs --- autofit/non_linear/analysis/combined.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/autofit/non_linear/analysis/combined.py b/autofit/non_linear/analysis/combined.py index 620a15263..37ff8e438 100644 --- a/autofit/non_linear/analysis/combined.py +++ b/autofit/non_linear/analysis/combined.py @@ -214,6 +214,9 @@ def visualize_combined(self, paths: AbstractPaths, instance, during_analysis): be plotted on the same matplotlib subplot. This function can be overwritten to allow the visualization of such a plot. + Only the first analysis is used to visualize the combined results, where it is assumed that it uses the + `analyses` property to access the other analyses and perform visualization. + Parameters ---------- paths From a2217f884ea00853f35ead5c5685052ebbd237c9 Mon Sep 17 00:00:00 2001 From: James Nightingale Date: Thu, 6 Apr 2023 16:42:35 +0100 Subject: [PATCH 197/226] analyses passed around correct --- autofit/non_linear/abstract_search.py | 5 +++-- autofit/non_linear/analysis/analysis.py | 2 +- autofit/non_linear/analysis/combined.py | 2 +- autofit/non_linear/analysis/indexed.py | 2 +- test_autofit/non_linear/test_analysis.py | 2 +- 5 files changed, 7 insertions(+), 6 deletions(-) diff --git a/autofit/non_linear/abstract_search.py b/autofit/non_linear/abstract_search.py index 0253939d9..e10189737 100644 --- a/autofit/non_linear/abstract_search.py +++ b/autofit/non_linear/abstract_search.py @@ -22,6 +22,7 @@ FactorApproximation, ) from autofit.graphical.utils import Status +from autofit.graphical.utils import Status from autofit.mapper.prior_model.collection import Collection from autofit.non_linear.initializer import Initializer from autofit.non_linear.parallel import SneakyPool @@ -765,10 +766,10 @@ def perform_update(self, model, analysis, during_analysis): return samples self.logger.debug("Visualizing") - analysis.visualize( + analysis.visualize_combined( paths=self.paths, instance=instance, during_analysis=during_analysis ) - analysis.visualize_combined( + analysis.visualize( paths=self.paths, instance=instance, during_analysis=during_analysis ) diff --git a/autofit/non_linear/analysis/analysis.py b/autofit/non_linear/analysis/analysis.py index f7fd433a9..b604c0d2f 100644 --- a/autofit/non_linear/analysis/analysis.py +++ b/autofit/non_linear/analysis/analysis.py @@ -44,7 +44,7 @@ def log_likelihood_function(self, instance): def visualize(self, paths: AbstractPaths, instance, during_analysis): pass - def visualize_combined(self, paths: AbstractPaths, instance, during_analysis): + def visualize_combined(self, analyses, paths: AbstractPaths, instance, during_analysis): pass def save_attributes_for_aggregator(self, paths: AbstractPaths): diff --git a/autofit/non_linear/analysis/combined.py b/autofit/non_linear/analysis/combined.py index 37ff8e438..c5f50f17c 100644 --- a/autofit/non_linear/analysis/combined.py +++ b/autofit/non_linear/analysis/combined.py @@ -226,7 +226,7 @@ def visualize_combined(self, paths: AbstractPaths, instance, during_analysis): during_analysis Is this visualisation during analysis? """ - self.analyses[0].visualize_combined(paths, instance, during_analysis) + self.analyses[0].visualize_combined(analyses=self.analyses, paths=paths, instance=instance, during_analysis=during_analysis) def profile_log_likelihood_function( self, paths: AbstractPaths, instance, diff --git a/autofit/non_linear/analysis/indexed.py b/autofit/non_linear/analysis/indexed.py index 43064cdf0..2cfa23a10 100644 --- a/autofit/non_linear/analysis/indexed.py +++ b/autofit/non_linear/analysis/indexed.py @@ -36,7 +36,7 @@ def log_likelihood_function(self, instance): def visualize(self, paths: AbstractPaths, instance, during_analysis): return self.analysis.visualize(paths, instance[self.index], during_analysis) - def visualize_combined(self, paths: AbstractPaths, instance, during_analysis): + def visualize_combined(self, analyses, paths: AbstractPaths, instance, during_analysis): return self.analysis.visualize_combined(paths, instance[self.index], during_analysis) def profile_log_likelihood_function(self, paths: AbstractPaths, instance): diff --git a/test_autofit/non_linear/test_analysis.py b/test_autofit/non_linear/test_analysis.py index a5c621381..3cc05be81 100644 --- a/test_autofit/non_linear/test_analysis.py +++ b/test_autofit/non_linear/test_analysis.py @@ -24,7 +24,7 @@ def visualize(self, paths: AbstractPaths, instance, during_analysis): os.makedirs(paths.image_path) open(f"{paths.image_path}/image.png", "w+").close() - def visualize_combined(self, paths: AbstractPaths, instance, during_analysis): + def visualize_combined(self, analyses, paths: AbstractPaths, instance, during_analysis): self.did_visualise_combined = True From 16b0a34a760f040966463bda46462a9160ac6a29 Mon Sep 17 00:00:00 2001 From: James Nightingale Date: Thu, 6 Apr 2023 19:58:18 +0100 Subject: [PATCH 198/226] fix integration test --- autofit/non_linear/abstract_search.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/autofit/non_linear/abstract_search.py b/autofit/non_linear/abstract_search.py index e10189737..bd5a28f6a 100644 --- a/autofit/non_linear/abstract_search.py +++ b/autofit/non_linear/abstract_search.py @@ -766,12 +766,12 @@ def perform_update(self, model, analysis, during_analysis): return samples self.logger.debug("Visualizing") - analysis.visualize_combined( - paths=self.paths, instance=instance, during_analysis=during_analysis - ) analysis.visualize( paths=self.paths, instance=instance, during_analysis=during_analysis ) + analysis.visualize_combined( + analyses=None, paths=self.paths, instance=instance, during_analysis=during_analysis + ) if self.should_profile: self.logger.debug("Profiling Maximum Likelihood Model") From 674dafb0442015ea9341c2e3acbbff16002c3007 Mon Sep 17 00:00:00 2001 From: James Nightingale Date: Fri, 7 Apr 2023 16:30:06 +0100 Subject: [PATCH 199/226] fix missing input --- autofit/non_linear/analysis/combined.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/autofit/non_linear/analysis/combined.py b/autofit/non_linear/analysis/combined.py index c5f50f17c..f4cee25d9 100644 --- a/autofit/non_linear/analysis/combined.py +++ b/autofit/non_linear/analysis/combined.py @@ -206,7 +206,7 @@ def func(child_paths, analysis): self._for_each_analysis(func, paths) - def visualize_combined(self, paths: AbstractPaths, instance, during_analysis): + def visualize_combined(self, analyses : List["Analysis"], paths: AbstractPaths, instance, during_analysis): """ Visualise the instance using images and quantities which are shared across all analyses. From 3911a4e77b80e9265a05fa154fdaee78fa2e46a2 Mon Sep 17 00:00:00 2001 From: Other Date: Mon, 17 Apr 2023 10:07:40 +0100 Subject: [PATCH 200/226] use fork instead of forkserver to make multiprocessing work on newer macs --- test_autofit/conftest.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/test_autofit/conftest.py b/test_autofit/conftest.py index 650dc80a4..f6b913622 100644 --- a/test_autofit/conftest.py +++ b/test_autofit/conftest.py @@ -14,7 +14,7 @@ from autofit.database.model import sa if sys.platform == "darwin": - multiprocessing.set_start_method("forkserver") + multiprocessing.set_start_method("fork") directory = Path(__file__).parent @@ -38,7 +38,8 @@ def remove_output(): item_path = output_directory / item if item_path.is_dir(): shutil.rmtree( - item_path, ignore_errors=True, + item_path, + ignore_errors=True, ) else: os.remove(item_path) From 818f684aceeaa1f0e117fe502e0125aa95e6e8c8 Mon Sep 17 00:00:00 2001 From: Richard Date: Mon, 17 Apr 2023 10:27:32 +0100 Subject: [PATCH 201/226] refactor(combined.py): extract a private method `_for_each_analysis` to reduce code duplication test(combined.py): add `paths` fixture to tests and update tests to use it --- autofit/non_linear/analysis/combined.py | 35 ++++++++++++++++-------- test_autofit/analysis/test_regression.py | 15 +++++++--- 2 files changed, 34 insertions(+), 16 deletions(-) diff --git a/autofit/non_linear/analysis/combined.py b/autofit/non_linear/analysis/combined.py index 4bb792222..0544ae54c 100644 --- a/autofit/non_linear/analysis/combined.py +++ b/autofit/non_linear/analysis/combined.py @@ -89,9 +89,11 @@ def modify_before_fit(self, paths: AbstractPaths, model: AbstractPriorModel): model The model which is to be fitted. """ - return CombinedAnalysis( - *(analysis.modify_before_fit(paths, model) for analysis in self.analyses) - ) + + def func(child_paths, analysis): + return analysis.modify_before_fit(child_paths, model) + + return CombinedAnalysis(*self._for_each_analysis(func, paths)) def modify_after_fit( self, paths: AbstractPaths, model: AbstractPriorModel, result: Result @@ -108,11 +110,12 @@ def modify_after_fit( result The result of the fit. """ + + def func(child_paths, analysis, result_): + return analysis.modify_after_fit(child_paths, model, result_) + return CombinedAnalysis( - *( - analysis.modify_after_fit(paths, model, result) - for analysis in self.analyses - ) + *self._for_each_analysis(func, paths, result.child_results) ) @property @@ -155,7 +158,7 @@ def _summed_log_likelihood(self, instance) -> float: def log_likelihood_function(self, instance): return self._log_likelihood_function(instance) - def _for_each_analysis(self, func, paths, *args): + def _for_each_analysis(self, func, paths, *args) -> List[Union[Result, Analysis]]: """ Convenience function to call an underlying function for each analysis with a paths object with an integer attached to the @@ -168,13 +171,18 @@ def _for_each_analysis(self, func, paths, *args): paths An object describing the paths for saving data (e.g. hard-disk directories or entries in sqlite database). """ + results = [] for (i, analysis), *args in zip(enumerate(self.analyses), *args): child_paths = paths.for_sub_analysis(analysis_name=f"analyses/analysis_{i}") - func(child_paths, analysis, *args) + results.append(func(child_paths, analysis, *args)) + + return results def save_attributes_for_aggregator(self, paths: AbstractPaths): def func(child_paths, analysis): - analysis.save_attributes_for_aggregator(child_paths,) + analysis.save_attributes_for_aggregator( + child_paths, + ) self._for_each_analysis(func, paths) @@ -207,7 +215,9 @@ def func(child_paths, analysis): self._for_each_analysis(func, paths) def profile_log_likelihood_function( - self, paths: AbstractPaths, instance, + self, + paths: AbstractPaths, + instance, ): """ Profile the log likelihood function of the maximum likelihood model instance using each analysis. @@ -224,7 +234,8 @@ def profile_log_likelihood_function( def func(child_paths, analysis): analysis.profile_log_likelihood_function( - child_paths, instance, + child_paths, + instance, ) self._for_each_analysis(func, paths) diff --git a/test_autofit/analysis/test_regression.py b/test_autofit/analysis/test_regression.py index 501dc5a56..cef9c35d3 100644 --- a/test_autofit/analysis/test_regression.py +++ b/test_autofit/analysis/test_regression.py @@ -51,13 +51,20 @@ def make_combined_analysis(): return MyAnalysis() + MyAnalysis() -def test_combined_before_fit(combined_analysis): - combined_analysis = combined_analysis.modify_before_fit(None, [None]) +@pytest.fixture(name="paths") +def make_paths(): + return af.DirectoryPaths() + + +def test_combined_before_fit(combined_analysis, paths): + combined_analysis = combined_analysis.modify_before_fit(paths, [None]) assert combined_analysis[0].is_modified_before -def test_combined_after_fit(combined_analysis): - combined_analysis = combined_analysis.modify_after_fit(None, [None], None) +def test_combined_after_fit(combined_analysis, paths): + result = combined_analysis.make_result(None, None) + + combined_analysis = combined_analysis.modify_after_fit(paths, [None], result) assert combined_analysis[0].is_modified_after From 421e7d6530b7bc8dd4801d77af5e550033adc542 Mon Sep 17 00:00:00 2001 From: James Nightingale Date: Mon, 17 Apr 2023 12:48:20 +0100 Subject: [PATCH 202/226] docs --- .../pickles/attributes.pickle | Bin 4253 -> 6066 bytes 1 file changed, 0 insertions(+), 0 deletions(-) diff --git a/test_autofit/database/mass_sie__source_sersic/phase_mass[sie]_source[bulge]/settings__imaging[grid_sub_2]__lens[pos_off]/dynesty_static[nlive_50__bound_multi_vol_dec_0.5_vol_check_2.0__enlarge_1.0__sample_rwalk_walks_5_facc_0.2]/pickles/attributes.pickle b/test_autofit/database/mass_sie__source_sersic/phase_mass[sie]_source[bulge]/settings__imaging[grid_sub_2]__lens[pos_off]/dynesty_static[nlive_50__bound_multi_vol_dec_0.5_vol_check_2.0__enlarge_1.0__sample_rwalk_walks_5_facc_0.2]/pickles/attributes.pickle index 99b855508c14e4b443f2309c48c0b44e081e6796..c536394934b2d07843e23967c2b4ca94c8ad453c 100644 GIT binary patch literal 6066 zcmb_gTXPf16}E&d%ff}hYshAcZ1u7gkXcZ`nEMiNyu=s>+km+wYcti7+Uk+#q8-iH zh+W7Z;ALO(GgAAI*Udg8m6yD1RsKb)Qu`P3o$emZ#WFZmQFXWaa{6?C=kz&!rhoe3 z`_fN8e2;W=MEy{!@hJ2i;q~fWyDJ>q6ZNj-JyEw^-nPAVoqOEr_w1gYSe*~Uz-~mL zFdgWg(L=9^8}fM5@43F?xBI&P1n-4`-|g22{ip>(4_1Hc@bGusZM68p7Y}qF${fp* zwd!6#_u}n;{dxKoRW#+Owb~LifOOo8T(w9b>TcwOHV*=BwovXhxx)iZXr?N1Q*VX+ zuGx_8DQ;a$6QlkJxwd==W#MxaD=RDa@i$1B#_~vrfyP4R4*rasPNI5yKu+9{fUBW# z>GsdXXQ=$`E#WeNfBY6bD>qSGK_0bM#MTxni+3t7F5mv z3<9LGwcd0Ue_2Iy!U{yw_n?fZN&6e5fr28>4tw>lBJPEDXhc_em}z3t@`#?fP-qNT zXz3JY8)4-UuqhlV)G#NGv~$S1N9RkPlD~}ACeLWf z{Zv-l!1!8PUGlsVa&^IXyME6OMMlIqQ!QI$Yb3NqI_?8MQ??rLDv4wXNOdg`TlUL$ zu{(?-&<#XScp=S|57NuYhomKWLCz{|pMU0M66}7@TH`i z=oy{G!9GwYbNK=pNk}RC8PwgT9w~Qu&=b^%_4g)*{KqupGn(!OtCgG9*%*p*HTT(n5C?yUm`>wM7DQ3i&iS z#fmwsDLF)qG`vF(VtiFswD}&4;1Q!#)#RKWP3Tz-ToUD4vqV|WW zoWb9@rgmtI9Tc>?@vA6EokNZyq54px()SYDDcA;41{2vG(xH|LRNrr3V<2R;R)PVO zk08_1NWaQM*gC_`V;;I8m}% zKS+QSpZx3@=03ZKB8P&mVq&W{MK>%d4yp7H3XNaw4pGIz#@LJL*BBj&BIkD(V*r)5 z-R1`daU`Z@HE>9foWQahlanY3P{!pZNx{o<2^)D{R0MP+a^LVq1$yZjsr^s zXL+BP#9dm4HF*$2PF)VgZ4*saC}?O0oWhO$?}tvgFf!5^Lt#{}I*c)@{`9SUD(0EH zC0yR?7r;ubOk|Gaz;;($|u0D?l&S^Qx;{j~;CD*yYtKcSy) zD2622R z7#gX(B6dUHGYv0p#GYa$IWe`)DjJ!y$IPrKeg@j-FrjGLCA$MhFiaKgFc|fr=l~~g7 zi{h!?z=Uu_6VHC$j_=SxkD<#1ddF7v#L-sonmO#Lo}>|w^*us1-VU{lC#YDfZSJ~U zU8O3Tp9Tri^Z5x#>)gT0yx-ACbBEP5@2KRAlNF|U<+ zdPK)QFE2SCu?osF6DVHnP&h|IBgbD=+${$m9cXPLFW%XCfxig~239!=ycQZz@yE>B ziLcIM;MpGhcyQb%_>0<8jEtau%(A^5OKe46Q@!29@1&o`&vMDTt$s#L=#3O6x146n zO}fE!kWM7shP%asHchN$UUTI~odD+{G^Z)^7T2?E?#1_HIA!-JzrW{nmNbT zIllMU9ILIbqjC2fyL*>?#vFfljs7Tt^V+D1m*ETr2NCJ(7;o`zXt{n%I2OI3Q`|(p k@T4AS<01NGUyoQ_9!h*qs^?gu8CUOX*c{1cC2G|F3+h&`J^%m! literal 4253 zcmb7|eQ;FO6~NhLlWam_Fa~NJ%3N?tHVE4g0t66~bfF<^*#roL)`T~2cW?H-&HGs2 zN5Y~|YXO1287J^s8QO7LYCD3I5z8O`Q`(AB1wlkgDV@oPlvcp<;i#P!g`RuwzJ1vU zb(&#z-`;obxxaJnx#yhAlYt*?A3KWu*&)#~H5sc$I3s1SEU7r0A)Rqgv)&2Wr3uYYG+9e!9eccJn7Wq9hVx_aq+`df_?9f1 z-xig4Qj9EI>P-#^?Mj`~+Vt-J>UHN^_uEB6dlJ_<_Bhp2m>M026-zcHQP)LSpx8)= zvZ#XsyNHRvchbyeFdgytHac6kJnbxZLebH-?_nnvi!Hegf9xYV%5>t9VDsC?uYNz_ z?@i8Tkh5X^I9>mR_rHoCdiO9}p8=g5d?z;l_-q)6;oe@SwK=$ER?`#z9JB+fsDMGi z4p~$aeKTeOlhBz^Kntm`%LN@LG!>j+B}|HxcZj7*rV)P35>-<&!I0oG_i=|49kt1c z_~H&H_tlbdTO+}L+}v~H)bXNb#{@3}236qLRjz}%KJMHH=&CvvO)N;dj+0hGip!YK z*x`_{UD@UhucUf*I6bg)z9_&)vD`1I4rYF%`&led3k@nk8ptdRMj1rY&HONF5bZI7 ztf?t)XqIS~2)&xFhzz$x5RzI#5FC3B*FO)LSjk{rG%X#Y0m&ptM5-mrD5FWLiA4Ap zH#8YS3$4St7N#ac6cT4!@V#m|b>oj^HCEi%?8NQS!ea3CWb$?DHd0gIv}a!ZB-DMa zgFB5(2h%Genv&5B$;3Isb5UC?&`83t_1AOfpje1Q)N{MTc3Fpxd!@C7lg5CfGdeb~ zY8nn%%2knNJr1cO^~|FRa4Xovy5~?bmwK|B^k6a{B)dhj0KR%e(zM{%@A z8@f8_qde{SRp5!R9Vl=bDSR!6q}Sz+JW2wGx@vY)?xQ!&g9{IRfC(YlJY^;U z0h$qY11B8ry$u}kdoE%Pe>)F}nT)Qjg_LDtIg4h7>t`cbN)dB<7Q_57V>iaWAb<37 z>{)Q42hkGV_$2ing)q$h{|Y8ARRDy$xgD(wcTK)MW!#w{+vhyK6?Z8|FQu4e3Qmay zsM|Q|4_(x;o&*ZP9PUq{c){)o58e)fHaCVK(H4WnnwlILM1y2Icf%mpz1`J~xcrDH zG9?A`7~=Misjqq}G4!H;H2Wka!-Nb8QFrH~dj3HB>fLPz@<%np)Rw!`Df``~ zzWtr{Mr&wVe(IeAvHyIdbCJg1z4|e@>~%K^d<2RK(JURx z5P{Rr4%AV$O(AlOyoGbeJxFCxi16JHeoS6IcK*9n1rgcY$W~%Y{j~9N>6{;|zF4q3 z&7a)QS!m|*APG6;MWrw4*rStBGC9egdBB`?1EvICp-_@jT)OA-i_M=s_l-h|{?mLB zI2SjL{-waJ%Txo3!LL`E=MVSheBDMllWJD zrE6o?jB^DfEys^zwh)lqz8*L-_|&?A0+L8~$(ri*FO4AaC`PwZj9^MiiNjE?ag^&W z%02FT6asn9GyZgUW#!ebf9Ha}N=WFEDM8_YUnZ(9it0ud%N8|=YG>qZ7 zp;~!kXrLiBrNHw|9r2~7ZtVGtkhCAxp(*l_pX~bCwPhN_3a=zSAPjIi8uIBXkmD5h zu_s>Sf>Z*9QrxREKlRmLT5uA94$u5MAL4wzg)-N)k+z7{k5i05@#U9(d?x-OkWGMf zTpE7@>RMR>>NBJdR)rbVq&(E*VNjKx9110J_3i1u-Z*l8;@P!dqA`@$So>uf>pu9J zHRqS;$gr&YIrOAB7gJ*dRmB#x)v9JV(6*KdbZex3tXj?%=)F{EKLzNV1o#cj$|EPH zu4o%W$Elc%4FkrberU+p0G}82*_ECe_VmSE8udZ@L_5t_CT_a2?3MjZZm4>D;s!0v zGhZBq9m(}hcMIha66sR#6@kLxmLDiUx2P*%7u)D7*w|x-)-0cL;Pp3L|NCN44P$t~ z)Kqv-`2!F5n7ap|bUQqzJK)1jvlV>GJOn){twEsq!>ubX{N~NG?}KK+11PL}g$4;6 z*v0h^!(9byfg(`*>6zCK9{K!0dmeZRw17)A3wletD9ye(?xP-VH^uE4j>{&@Ls-Q+ z+@q-8D}lXLJ2&qW3J&DMCC{I$Eg!#v3hYsT5qKgzG_fER<^@=C`5y)(m;o-dAFp!k zaxCKnT;J4wO*VPHP7@b8EwLwloGTXPB+)7=N@;u>m;XuGd*Pj2R0oCjl&B~oh=7~!p{<&RC@(&gO?``}*4 zDjIwOq_gM7GP;kLUd8D$kP_M2jh|fV0O_s&D2KO*Y zQ&D~0jM>vr%aoRBXx7{Yc$zU2g*rsYHfVKQTU+fk)G0zQhZ<0*vlD(SnuZoFLJuKX z8|a{4v<9jvc*1MV+FHko+xHT Date: Mon, 17 Apr 2023 12:58:48 +0100 Subject: [PATCH 203/226] fix graphical un9it tests --- autofit/graphical/declarative/abstract.py | 1 + autofit/graphical/declarative/factor/analysis.py | 8 ++------ 2 files changed, 3 insertions(+), 6 deletions(-) diff --git a/autofit/graphical/declarative/abstract.py b/autofit/graphical/declarative/abstract.py index bb70af18e..320e0f075 100644 --- a/autofit/graphical/declarative/abstract.py +++ b/autofit/graphical/declarative/abstract.py @@ -254,6 +254,7 @@ def visualize( during_analysis ) model_factor.visualize_combined( + None, paths, instance, during_analysis diff --git a/autofit/graphical/declarative/factor/analysis.py b/autofit/graphical/declarative/factor/analysis.py index 5682b5053..b188ab2bc 100644 --- a/autofit/graphical/declarative/factor/analysis.py +++ b/autofit/graphical/declarative/factor/analysis.py @@ -125,14 +125,10 @@ def visualize( Is this visualisation during analysis? """ self.analysis.visualize( - paths, - instance, - during_analysis + paths=paths, instance=instance, during_analysis=during_analysis ) self.analysis.visualize_combined( - paths, - instance, - during_analysis + analyses=None, paths=paths, instance=instance, during_analysis=during_analysis ) def log_likelihood_function( From c097059db904917d6195b18379eaa04a2db6ee31 Mon Sep 17 00:00:00 2001 From: James Nightingale Date: Mon, 17 Apr 2023 13:04:16 +0100 Subject: [PATCH 204/226] fix another test --- test_autofit/non_linear/test_analysis.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test_autofit/non_linear/test_analysis.py b/test_autofit/non_linear/test_analysis.py index 3cc05be81..066f52868 100644 --- a/test_autofit/non_linear/test_analysis.py +++ b/test_autofit/non_linear/test_analysis.py @@ -46,7 +46,7 @@ def test_visualise_combined(): analysis_1 = Analysis() analysis_2 = Analysis() - (analysis_1 + analysis_2).visualize_combined(af.DirectoryPaths(), None, None) + (analysis_1 + analysis_2).visualize_combined(None, af.DirectoryPaths(), None, None) assert analysis_1.did_visualise_combined is True assert analysis_2.did_visualise_combined is False From 82d61b43e131dc781c236355a34eaea65ba827f4 Mon Sep 17 00:00:00 2001 From: James Nightingale Date: Thu, 20 Apr 2023 15:37:41 +0100 Subject: [PATCH 205/226] added visualize_before_fit methods to Analysis and Combined --- autofit/non_linear/abstract_search.py | 9 ++++- autofit/non_linear/analysis/analysis.py | 6 ++++ autofit/non_linear/analysis/combined.py | 45 ++++++++++++++++++++++++- 3 files changed, 58 insertions(+), 2 deletions(-) diff --git a/autofit/non_linear/abstract_search.py b/autofit/non_linear/abstract_search.py index bd5a28f6a..bf0f4e04d 100644 --- a/autofit/non_linear/abstract_search.py +++ b/autofit/non_linear/abstract_search.py @@ -575,7 +575,14 @@ class represented by model M and gives a score for their fitness. self.paths.unique_tag = self.unique_tag self.paths.restore() - analysis = analysis.modify_before_fit(paths=self.paths, model=model,) + analysis = analysis.modify_before_fit(paths=self.paths, model=model) + + analysis.visualize_before_fit( + paths=self.paths, + ) + analysis.visualize_before_fit_combined( + analyses=None, paths=self.paths, + ) if not self.paths.is_complete or self.force_pickle_overwrite: self.logger.info("Saving path info") diff --git a/autofit/non_linear/analysis/analysis.py b/autofit/non_linear/analysis/analysis.py index b604c0d2f..a70d949d7 100644 --- a/autofit/non_linear/analysis/analysis.py +++ b/autofit/non_linear/analysis/analysis.py @@ -41,6 +41,12 @@ def with_model(self, model): def log_likelihood_function(self, instance): raise NotImplementedError() + def visualize_before_fit(self, paths: AbstractPaths): + pass + + def visualize_before_fit_combined(self, analyses, paths: AbstractPaths): + pass + def visualize(self, paths: AbstractPaths, instance, during_analysis): pass diff --git a/autofit/non_linear/analysis/combined.py b/autofit/non_linear/analysis/combined.py index 4423d7e38..6684d6ea6 100644 --- a/autofit/non_linear/analysis/combined.py +++ b/autofit/non_linear/analysis/combined.py @@ -192,6 +192,44 @@ def func(child_paths, analysis, result_): self._for_each_analysis(func, paths, result) + def visualize_before_fit(self, paths: AbstractPaths): + """ + Visualise the model before fitting. + + Visualisation output is distinguished by using an integer suffix + for each analysis path. + + Parameters + ---------- + paths + An object describing the paths for saving data (e.g. hard-disk directories or entries in sqlite database). + """ + def func(child_paths, analysis): + analysis.visualize(child_paths) + + self._for_each_analysis(func, paths) + + def visualize_before_fit_combined(self, analyses, paths: AbstractPaths): + """ + Visualise images and quantities which are shared across all analyses. + + For example, each Analysis may have a different dataset, where the data in each dataset is intended to all + be plotted on the same matplotlib subplot. This function can be overwritten to allow the visualization of such + a plot. + + Only the first analysis is used to visualize the combined results, where it is assumed that it uses the + `analyses` property to access the other analyses and perform visualization. + + Parameters + ---------- + paths + An object describing the paths for saving data (e.g. hard-disk directories or entries in sqlite database). + """ + self.analyses[0].visualize_combined( + analyses=self.analyses, + paths=paths, + ) + def visualize(self, paths: AbstractPaths, instance, during_analysis): """ Visualise the instance according to each analysis. @@ -234,7 +272,12 @@ def visualize_combined(self, analyses : List["Analysis"], paths: AbstractPaths, during_analysis Is this visualisation during analysis? """ - self.analyses[0].visualize_combined(analyses=self.analyses, paths=paths, instance=instance, during_analysis=during_analysis) + self.analyses[0].visualize_combined( + analyses=self.analyses, + paths=paths, + instance=instance, + during_analysis=during_analysis + ) def profile_log_likelihood_function( self, From d3d888bd13bdde28aa9d5f65b260b3678d233ac1 Mon Sep 17 00:00:00 2001 From: James Nightingale Date: Thu, 20 Apr 2023 16:06:09 +0100 Subject: [PATCH 206/226] unit test added for visualize_before_fit --- autofit/non_linear/abstract_search.py | 4 ++-- autofit/non_linear/analysis/analysis.py | 4 ++-- autofit/non_linear/analysis/combined.py | 9 +++++---- test_autofit/non_linear/test_analysis.py | 15 +++++++++++++++ 4 files changed, 24 insertions(+), 8 deletions(-) diff --git a/autofit/non_linear/abstract_search.py b/autofit/non_linear/abstract_search.py index bf0f4e04d..8d8e0a532 100644 --- a/autofit/non_linear/abstract_search.py +++ b/autofit/non_linear/abstract_search.py @@ -578,10 +578,10 @@ class represented by model M and gives a score for their fitness. analysis = analysis.modify_before_fit(paths=self.paths, model=model) analysis.visualize_before_fit( - paths=self.paths, + paths=self.paths, model=model, ) analysis.visualize_before_fit_combined( - analyses=None, paths=self.paths, + analyses=None, paths=self.paths, model=model, ) if not self.paths.is_complete or self.force_pickle_overwrite: diff --git a/autofit/non_linear/analysis/analysis.py b/autofit/non_linear/analysis/analysis.py index a70d949d7..d44ddd75e 100644 --- a/autofit/non_linear/analysis/analysis.py +++ b/autofit/non_linear/analysis/analysis.py @@ -41,10 +41,10 @@ def with_model(self, model): def log_likelihood_function(self, instance): raise NotImplementedError() - def visualize_before_fit(self, paths: AbstractPaths): + def visualize_before_fit(self, paths: AbstractPaths, model: AbstractPriorModel): pass - def visualize_before_fit_combined(self, analyses, paths: AbstractPaths): + def visualize_before_fit_combined(self, analyses, paths: AbstractPaths, model: AbstractPriorModel): pass def visualize(self, paths: AbstractPaths, instance, during_analysis): diff --git a/autofit/non_linear/analysis/combined.py b/autofit/non_linear/analysis/combined.py index 6684d6ea6..6d3b1feed 100644 --- a/autofit/non_linear/analysis/combined.py +++ b/autofit/non_linear/analysis/combined.py @@ -192,7 +192,7 @@ def func(child_paths, analysis, result_): self._for_each_analysis(func, paths, result) - def visualize_before_fit(self, paths: AbstractPaths): + def visualize_before_fit(self, paths: AbstractPaths, model: AbstractPriorModel): """ Visualise the model before fitting. @@ -205,11 +205,11 @@ def visualize_before_fit(self, paths: AbstractPaths): An object describing the paths for saving data (e.g. hard-disk directories or entries in sqlite database). """ def func(child_paths, analysis): - analysis.visualize(child_paths) + analysis.visualize_before_fit(child_paths, model) self._for_each_analysis(func, paths) - def visualize_before_fit_combined(self, analyses, paths: AbstractPaths): + def visualize_before_fit_combined(self, analyses, paths: AbstractPaths, model: AbstractPriorModel): """ Visualise images and quantities which are shared across all analyses. @@ -225,9 +225,10 @@ def visualize_before_fit_combined(self, analyses, paths: AbstractPaths): paths An object describing the paths for saving data (e.g. hard-disk directories or entries in sqlite database). """ - self.analyses[0].visualize_combined( + self.analyses[0].visualize_before_fit_combined( analyses=self.analyses, paths=paths, + model=model, ) def visualize(self, paths: AbstractPaths, instance, during_analysis): diff --git a/test_autofit/non_linear/test_analysis.py b/test_autofit/non_linear/test_analysis.py index 066f52868..38e7c942a 100644 --- a/test_autofit/non_linear/test_analysis.py +++ b/test_autofit/non_linear/test_analysis.py @@ -19,6 +19,11 @@ def __init__(self): def log_likelihood_function(self, instance): return -1 + def visualize_before_fit(self, paths: AbstractPaths, model): + self.did_visualise = True + os.makedirs(paths.image_path) + open(f"{paths.image_path}/image.png", "w+").close() + def visualize(self, paths: AbstractPaths, instance, during_analysis): self.did_visualise = True os.makedirs(paths.image_path) @@ -32,6 +37,16 @@ def profile_log_likelihood_function(self, paths: AbstractPaths, instance): self.did_profile = True +def test_visualise_before_fit(): + analysis_1 = Analysis() + analysis_2 = Analysis() + + (analysis_1 + analysis_2).visualize_before_fit(af.DirectoryPaths(), None) + + assert analysis_1.did_visualise is True + assert analysis_2.did_visualise is True + + def test_visualise(): analysis_1 = Analysis() analysis_2 = Analysis() From 2cae575d001e429aa4a109c81e46136835d254d2 Mon Sep 17 00:00:00 2001 From: James Nightingale Date: Thu, 20 Apr 2023 16:07:26 +0100 Subject: [PATCH 207/226] unit test added for visualize_before_fit_combined --- autofit/non_linear/analysis/analysis.py | 4 ++-- test_autofit/non_linear/test_analysis.py | 15 +++++++++++++++ 2 files changed, 17 insertions(+), 2 deletions(-) diff --git a/autofit/non_linear/analysis/analysis.py b/autofit/non_linear/analysis/analysis.py index d44ddd75e..f5b1f3f06 100644 --- a/autofit/non_linear/analysis/analysis.py +++ b/autofit/non_linear/analysis/analysis.py @@ -44,10 +44,10 @@ def log_likelihood_function(self, instance): def visualize_before_fit(self, paths: AbstractPaths, model: AbstractPriorModel): pass - def visualize_before_fit_combined(self, analyses, paths: AbstractPaths, model: AbstractPriorModel): + def visualize(self, paths: AbstractPaths, instance, during_analysis): pass - def visualize(self, paths: AbstractPaths, instance, during_analysis): + def visualize_before_fit_combined(self, analyses, paths: AbstractPaths, model: AbstractPriorModel): pass def visualize_combined(self, analyses, paths: AbstractPaths, instance, during_analysis): diff --git a/test_autofit/non_linear/test_analysis.py b/test_autofit/non_linear/test_analysis.py index 38e7c942a..64f13451c 100644 --- a/test_autofit/non_linear/test_analysis.py +++ b/test_autofit/non_linear/test_analysis.py @@ -29,6 +29,10 @@ def visualize(self, paths: AbstractPaths, instance, during_analysis): os.makedirs(paths.image_path) open(f"{paths.image_path}/image.png", "w+").close() + def visualize_before_fit_combined(self, analyses, paths, model): + + self.did_visualise_combined = True + def visualize_combined(self, analyses, paths: AbstractPaths, instance, during_analysis): self.did_visualise_combined = True @@ -57,6 +61,17 @@ def test_visualise(): assert analysis_2.did_visualise is True +def test_visualise_before_fit_combined(): + + analysis_1 = Analysis() + analysis_2 = Analysis() + + (analysis_1 + analysis_2).visualize_before_fit_combined(None, af.DirectoryPaths(), None) + + assert analysis_1.did_visualise_combined is True + assert analysis_2.did_visualise_combined is False + + def test_visualise_combined(): analysis_1 = Analysis() analysis_2 = Analysis() From 8faf0fa9836ee6ea3aec0f769536d95a949aa0b9 Mon Sep 17 00:00:00 2001 From: James Nightingale Date: Thu, 20 Apr 2023 16:13:26 +0100 Subject: [PATCH 208/226] to dos for where rich adds code --- autofit/graphical/declarative/abstract.py | 2 ++ autofit/non_linear/analysis/indexed.py | 2 ++ 2 files changed, 4 insertions(+) diff --git a/autofit/graphical/declarative/abstract.py b/autofit/graphical/declarative/abstract.py index 320e0f075..0262fc9e6 100644 --- a/autofit/graphical/declarative/abstract.py +++ b/autofit/graphical/declarative/abstract.py @@ -224,6 +224,8 @@ def optimise( updated_ep_mean_field=updated_ep_mean_field, ) + # TODO : Visualize method before fit? + def visualize( self, paths: AbstractPaths, diff --git a/autofit/non_linear/analysis/indexed.py b/autofit/non_linear/analysis/indexed.py index 2cfa23a10..f6bea9e31 100644 --- a/autofit/non_linear/analysis/indexed.py +++ b/autofit/non_linear/analysis/indexed.py @@ -33,6 +33,8 @@ def log_likelihood_function(self, instance): """ return self.analysis.log_likelihood_function(instance[self.index]) + # TODO : Add before fit methods here? + def visualize(self, paths: AbstractPaths, instance, during_analysis): return self.analysis.visualize(paths, instance[self.index], during_analysis) From 1746e2f337c3b5545b5a59ceb79e2feb34c95f42 Mon Sep 17 00:00:00 2001 From: James Nightingale Date: Tue, 9 May 2023 17:12:33 +0100 Subject: [PATCH 209/226] added method --- autofit/non_linear/analysis/analysis.py | 34 +++++++++++++++++++++++++ 1 file changed, 34 insertions(+) diff --git a/autofit/non_linear/analysis/analysis.py b/autofit/non_linear/analysis/analysis.py index f5b1f3f06..c0fb986f8 100644 --- a/autofit/non_linear/analysis/analysis.py +++ b/autofit/non_linear/analysis/analysis.py @@ -1,6 +1,8 @@ import logging from abc import ABC +from autoconf import conf + from autofit.mapper.prior_model.abstract import AbstractPriorModel from autofit.non_linear.paths.abstract import AbstractPaths from autofit.non_linear.result import Result @@ -38,6 +40,38 @@ def with_model(self, model): model=model ) + def should_visualize(self, paths: AbstractPaths) -> bool: + """ + Whether a visualize method should continue and perform visualization, or be terminated early. + + If a model-fit has already completed, the default behaviour is for visualization to be bypassed in order + to make model-fits run faster. However, visualization can be forced to run via + the `force_visualization_overwrite`, for example if a user wants to plot additional images that were not + output on the original run. + + PyAutoFit test mode also disables visualization, irrespective of the `force_visualization_overwite` + config input. + + Parameters + ---------- + paths + The PyAutoFit paths object which manages all paths, e.g. where the non-linear search outputs are stored, + visualization and the pickled objects used by the aggregator output by this function. + + + Returns + ------- + A bool determining whether visualization should be performed or not. + """ + + if os.environ.get("PYAUTOFIT_TEST_MODE") == "1": + return False + + if paths.is_complete and not conf.instance["general"]["output"]["force_visualize_overwrite"]: + return False + + return True + def log_likelihood_function(self, instance): raise NotImplementedError() From 36f1f31e5a89d22efdbaecf75907d4f44bad61ef Mon Sep 17 00:00:00 2001 From: James Nightingale Date: Tue, 9 May 2023 17:20:27 +0100 Subject: [PATCH 210/226] added save for aggregator method to example analysis --- autofit/example/analysis.py | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/autofit/example/analysis.py b/autofit/example/analysis.py index 5c3d90cf7..a7608d213 100644 --- a/autofit/example/analysis.py +++ b/autofit/example/analysis.py @@ -114,3 +114,28 @@ def visualize(self, paths: af.DirectoryPaths, instance: af.ModelInstance, during os.makedirs(paths.image_path, exist_ok=True) plt.savefig(path.join(paths.image_path, "model_fit.png")) plt.clf() + + def save_attributes_for_aggregator(self, paths: af.DirectoryPaths): + """ + Before the model-fit via the non-linear search begins, this routine saves attributes of the `Analysis` object + to the `pickles` folder such that they can be loaded after the analysis using PyAutoFit's database and + aggregator tools. + + For this analysis the following are output: + + - The dataset's data. + - The dataset's noise-map. + + It is common for these attributes to be loaded by many of the template aggregator functions given in the + `aggregator` modules. For example, when using the database tools to reperform a fit, this will by default + load the dataset, settings and other attributes necessary to perform a fit using the attributes output by + this function. + + Parameters + ---------- + paths + The PyAutoFit paths object which manages all paths, e.g. where the non-linear search outputs are stored, + visualization, and the pickled objects used by the aggregator output by this function. + """ + paths.save_object("data", self.data) + paths.save_object("noise_map", self.noise_map) \ No newline at end of file From c98d42a8db960ee3da9d99fc138cdef17643d39b Mon Sep 17 00:00:00 2001 From: Richard Date: Wed, 10 May 2023 10:05:24 +0100 Subject: [PATCH 211/226] ignore assertions when ignoring prior limits --- autofit/mapper/prior_model/abstract.py | 138 ++++++++++++++------- autofit/mapper/prior_model/collection.py | 96 ++++++-------- autofit/mapper/prior_model/prior_model.py | 6 +- autofit/non_linear/paths/database.py | 4 +- test_autofit/non_linear/test_regression.py | 50 ++++---- 5 files changed, 161 insertions(+), 133 deletions(-) diff --git a/autofit/mapper/prior_model/abstract.py b/autofit/mapper/prior_model/abstract.py index 9f337972b..f2bb558a8 100644 --- a/autofit/mapper/prior_model/abstract.py +++ b/autofit/mapper/prior_model/abstract.py @@ -45,36 +45,6 @@ def for_class_and_attributes_name(cls, attribute_name): return limit_dict["lower"], limit_dict["upper"] -def check_assertions(func): - @wraps(func) - def wrapper(s, arguments): - # noinspection PyProtectedMember - failed_assertions = [ - assertion - for assertion in s._assertions - if assertion is False - or assertion is not True - and not assertion.instance_for_arguments(arguments,) - ] - number_of_failed_assertions = len(failed_assertions) - if number_of_failed_assertions > 0: - name_string = "\n".join( - [ - assertion.name - for assertion in failed_assertions - if hasattr(assertion, "name") and assertion.name is not None - ] - ) - if not conf.instance["general"]["test"]["exception_override"]: - raise exc.FitException( - f"{number_of_failed_assertions} assertions failed!\n{name_string}" - ) - - return func(s, arguments) - - return wrapper - - class TuplePathModifier: def __init__(self, model_: "AbstractPriorModel"): """ @@ -191,8 +161,46 @@ def __init__(self, label=None): super().__init__(label=label) self._assertions = list() + def check_assertions(self, arguments: Dict[Prior, float]): + """ + Check that all assertions are satisfied by the given arguments. + + Parameters + ---------- + arguments + A dictionary mapping priors to values + + Raises + ------ + FitException + If any assertion is not satisfied + """ + failed_assertions = [ + assertion + for assertion in self._assertions + if assertion is False + or assertion is not True + and not assertion.instance_for_arguments( + arguments, + ) + ] + number_of_failed_assertions = len(failed_assertions) + if number_of_failed_assertions > 0: + name_string = "\n".join( + [ + assertion.name + for assertion in failed_assertions + if hasattr(assertion, "name") and assertion.name is not None + ] + ) + raise exc.FitException( + f"{number_of_failed_assertions} assertions failed!\n{name_string}" + ) + def cast( - self, value_dict: Dict["AbstractModel", dict], new_class: type, + self, + value_dict: Dict["AbstractModel", dict], + new_class: type, ) -> "AbstractPriorModel": """ Cast models to a new type. Allows selected models in within this @@ -264,7 +272,13 @@ def _with_paths(self, tree: dict) -> "AbstractModel": for name, subtree in tree.items(): # noinspection PyProtectedMember new_value = getattr(self, name) - if isinstance(new_value, (AbstractPriorModel, TuplePrior,)): + if isinstance( + new_value, + ( + AbstractPriorModel, + TuplePrior, + ), + ): new_value = new_value._with_paths(subtree) setattr(with_paths, name, new_value) return with_paths @@ -309,7 +323,13 @@ def _without_paths(self, tree: dict) -> "AbstractModel": delattr(without_paths, name) else: new_value = getattr(without_paths, name) - if isinstance(new_value, (AbstractPriorModel, TuplePrior,)): + if isinstance( + new_value, + ( + AbstractPriorModel, + TuplePrior, + ), + ): new_value = new_value._without_paths(subtree) setattr(without_paths, name, new_value) return without_paths @@ -499,7 +519,8 @@ def instance_from_unit_vector(self, unit_vector, ignore_prior_limits=False): lambda prior_tuple, unit: ( prior_tuple.prior, prior_tuple.prior.value_for( - unit, ignore_prior_limits=ignore_prior_limits, + unit, + ignore_prior_limits=ignore_prior_limits, ), ), self.prior_tuples_ordered_by_id, @@ -507,7 +528,10 @@ def instance_from_unit_vector(self, unit_vector, ignore_prior_limits=False): ) ) - return self.instance_for_arguments(arguments,) + return self.instance_for_arguments( + arguments, + ignore_assertions=ignore_prior_limits, + ) @property @cast_collection(PriorNameValue) @@ -621,7 +645,10 @@ def random_vector_from_priors_within_limits( for prior in self.priors_ordered_by_id: vector.append( - prior.random(lower_limit=lower_limit, upper_limit=upper_limit,) + prior.random( + lower_limit=lower_limit, + upper_limit=upper_limit, + ) ) return vector @@ -660,7 +687,7 @@ def random_instance_from_priors_within_limits( @property def random_vector_from_priors(self): - """ Generate a random vector of physical values by drawing uniform random values between 0 and 1 and using + """Generate a random vector of physical values by drawing uniform random values between 0 and 1 and using the model priors to map them from unit values to physical values. Returns ------- @@ -716,7 +743,9 @@ def instance_from_vector(self, vector, ignore_prior_limits=False): for prior, value in arguments.items(): prior.assert_within_limits(value) - return self.instance_for_arguments(arguments,) + return self.instance_for_arguments( + arguments, + ) def has(self, cls: Union[Type, Tuple[Type, ...]]) -> bool: """ @@ -749,7 +778,8 @@ def has_model(self, cls, include_zero_dimension=False) -> bool: return ( len( self.model_tuples_with_type( - cls, include_zero_dimension=include_zero_dimension, + cls, + include_zero_dimension=include_zero_dimension, ) ) > 0 @@ -966,7 +996,8 @@ def instance_from_prior_medians(self, ignore_prior_limits=False): ) def log_prior_list_from_vector( - self, vector: [float], + self, + vector: [float], ): """ Compute the log priors of every parameter in a vector, using the Prior of every parameter. @@ -1189,27 +1220,39 @@ def prior_class_dict(self): return d def _instance_for_arguments( - self, arguments: Dict[Prior, float], + self, + arguments: Dict[Prior, float], ): raise NotImplementedError() def instance_for_arguments( - self, arguments, + self, + arguments: Dict[Prior, float], + ignore_assertions: bool = False, ): """ Returns an instance of the model for a set of arguments Parameters ---------- - arguments: {Prior: float} + arguments Dictionary mapping priors to attribute analysis_path and value pairs + ignore_assertions + If True, assertions will not be checked Returns ------- An instance of the class """ + if not ( + conf.instance["general"]["test"]["exception_override"] or ignore_assertions + ): + self.check_assertions(arguments) + logger.debug(f"Creating an instance for arguments") - return self._instance_for_arguments(arguments,) + return self._instance_for_arguments( + arguments, + ) def path_for_name(self, name: str) -> Tuple[str, ...]: """ @@ -1516,7 +1559,12 @@ def parameterization(self) -> str: formatter = TextFormatter(line_length=info_whitespace()) for t in self.path_instance_tuples_for_class( - (Prior, float, tuple,), ignore_children=True + ( + Prior, + float, + tuple, + ), + ignore_children=True, ): for i in range(len(t[0])): path = t[0][:i] diff --git a/autofit/mapper/prior_model/collection.py b/autofit/mapper/prior_model/collection.py index d0a548f28..f4d3485de 100644 --- a/autofit/mapper/prior_model/collection.py +++ b/autofit/mapper/prior_model/collection.py @@ -3,7 +3,6 @@ from autofit.mapper.model import ModelInstance, assert_not_frozen from autofit.mapper.prior.abstract import Prior from autofit.mapper.prior_model.abstract import AbstractPriorModel -from autofit.mapper.prior_model.abstract import check_assertions class Collection(AbstractPriorModel): @@ -40,11 +39,7 @@ def __len__(self): return len(self.values) def __str__(self): - return "\n".join( - f"{key} = {value}" - for key, value - in self.items() - ) + return "\n".join(f"{key} = {value}" for key, value in self.items()) def __hash__(self): return self.id @@ -59,30 +54,24 @@ def values(self): def items(self): return self._dict.items() - def with_prefix( - self, - prefix: str - ): + def with_prefix(self, prefix: str): """ Filter members of the collection, only returning those that start with a given prefix as a new collection. """ - return Collection({ - key: value - for key, value - in self.items() - if key.startswith( - prefix - ) - }) + return Collection( + {key: value for key, value in self.items() if key.startswith(prefix)} + ) def as_model(self): - return Collection({ - key: value.as_model() - if isinstance(value, AbstractPriorModel) - else value - for key, value in self.dict().items() - }) + return Collection( + { + key: value.as_model() + if isinstance(value, AbstractPriorModel) + else value + for key, value in self.dict().items() + } + ) def __init__(self, *arguments, **kwargs): """ @@ -198,30 +187,6 @@ def remove(self, item): if value == item: del self.__dict__[key] - @check_assertions - def _instance_for_arguments(self, arguments): - """ - Parameters - ---------- - arguments: {Prior: float} - A dictionary of arguments - - Returns - ------- - model_instances: [object] - A list of instances constructed from the list of prior models. - """ - result = ModelInstance() - for key, value in self.__dict__.items(): - if key.startswith("_"): - continue - if isinstance(value, AbstractPriorModel): - value = value.instance_for_arguments(arguments) - elif isinstance(value, Prior): - value = arguments[value] - setattr(result, key, value) - return result - def gaussian_prior_model_for_arguments(self, arguments): """ Create a new collection, updating its priors according to the argument @@ -239,24 +204,39 @@ def gaussian_prior_model_for_arguments(self, arguments): collection = Collection() for key, value in self.items(): - if key in ( - "component_number", - "item_number", - "id" - ) or key.startswith( - "_" - ): + if key in ("component_number", "item_number", "id") or key.startswith("_"): continue if isinstance(value, AbstractPriorModel): - collection[key] = value.gaussian_prior_model_for_arguments( - arguments - ) + collection[key] = value.gaussian_prior_model_for_arguments(arguments) if isinstance(value, Prior): collection[key] = arguments[value] return collection + def _instance_for_arguments(self, arguments): + """ + Parameters + ---------- + arguments: {Prior: float} + A dictionary of arguments + + Returns + ------- + model_instances: [object] + A list of instances constructed from the list of prior models. + """ + result = ModelInstance() + for key, value in self.__dict__.items(): + if key.startswith("_"): + continue + if isinstance(value, AbstractPriorModel): + value = value.instance_for_arguments(arguments) + elif isinstance(value, Prior): + value = arguments[value] + setattr(result, key, value) + return result + @property def prior_class_dict(self): return { diff --git a/autofit/mapper/prior_model/prior_model.py b/autofit/mapper/prior_model/prior_model.py index d6f0b4e32..23c7630db 100644 --- a/autofit/mapper/prior_model/prior_model.py +++ b/autofit/mapper/prior_model/prior_model.py @@ -11,7 +11,6 @@ from autofit.mapper.prior.deferred import DeferredInstance from autofit.mapper.prior.tuple_prior import TuplePrior from autofit.mapper.prior_model.abstract import AbstractPriorModel -from autofit.mapper.prior_model.abstract import check_assertions from autofit.tools.namer import namer logger = logging.getLogger(__name__) @@ -287,7 +286,6 @@ def is_deferred_arguments(self): return len(self.direct_deferred_tuples) > 0 # noinspection PyUnresolvedReferences - @check_assertions def _instance_for_arguments(self, arguments: {ModelObject: object}): """ Returns an instance of the associated class for a set of arguments @@ -316,7 +314,9 @@ def _instance_for_arguments(self, arguments: {ModelObject: object}): prior_model = prior_model_tuple.prior_model model_arguments[ prior_model_tuple.name - ] = prior_model.instance_for_arguments(arguments,) + ] = prior_model.instance_for_arguments( + arguments, + ) prior_arguments = dict() diff --git a/autofit/non_linear/paths/database.py b/autofit/non_linear/paths/database.py index cb1eecf86..c4a1bb5f7 100644 --- a/autofit/non_linear/paths/database.py +++ b/autofit/non_linear/paths/database.py @@ -85,7 +85,9 @@ def create_child( """ self.fit.is_grid_search = True if self.fit.instance is None: - self.fit.instance = self.model.instance_from_prior_medians() + self.fit.instance = self.model.instance_from_prior_medians( + ignore_prior_limits=True + ) child = type(self)( session=self.session, name=name or self.name, diff --git a/test_autofit/non_linear/test_regression.py b/test_autofit/non_linear/test_regression.py index dba6c0e65..c14222581 100644 --- a/test_autofit/non_linear/test_regression.py +++ b/test_autofit/non_linear/test_regression.py @@ -4,6 +4,7 @@ import pytest import autofit as af +from autofit import exc def test_no_priors(): @@ -13,39 +14,36 @@ def test_no_priors(): search.fit(model, af.Analysis()) -@pytest.fixture( - name="optimizer" -) +@pytest.fixture(name="optimizer") def make_optimizer(): - return af.DynestyStatic( - "name" - ) + return af.DynestyStatic("name") -def test_serialize_optimiser( - optimizer -): - optimizer = pickle.loads( - pickle.dumps( - optimizer - ) - ) +def test_serialize_optimiser(optimizer): + optimizer = pickle.loads(pickle.dumps(optimizer)) assert optimizer.name == "name" -def test_serialize_grid_search( - optimizer -): - grid_search = af.SearchGridSearch( - optimizer - ) +def test_serialize_grid_search(optimizer): + grid_search = af.SearchGridSearch(optimizer) assert grid_search.logger.name == "GridSearch (name)" assert "logger" not in grid_search.__getstate__() - dumped = dill.dumps( - grid_search - ) - loaded = dill.loads( - dumped - ) + dumped = dill.dumps(grid_search) + loaded = dill.loads(dumped) assert loaded.logger is not None + + +def test_skip_assertions(): + one = af.Model(af.Gaussian) + two = af.Model(af.Gaussian) + model = af.Collection( + one=one, + two=two, + ) + model.add_assertion(one.centre < two.centre) + + with pytest.raises(exc.FitException): + model.instance_from_prior_medians() + + model.instance_from_prior_medians(ignore_prior_limits=True) From c0a07c69fbac4a9b51999317c38445b58b564be5 Mon Sep 17 00:00:00 2001 From: Richard Date: Wed, 10 May 2023 10:21:17 +0100 Subject: [PATCH 212/226] allow already existing directories for test analysis --- test_autofit/non_linear/test_analysis.py | 28 +++++++++++++++--------- 1 file changed, 18 insertions(+), 10 deletions(-) diff --git a/test_autofit/non_linear/test_analysis.py b/test_autofit/non_linear/test_analysis.py index 64f13451c..fdceca3f1 100644 --- a/test_autofit/non_linear/test_analysis.py +++ b/test_autofit/non_linear/test_analysis.py @@ -21,20 +21,20 @@ def log_likelihood_function(self, instance): def visualize_before_fit(self, paths: AbstractPaths, model): self.did_visualise = True - os.makedirs(paths.image_path) + os.makedirs(paths.image_path, exist_ok=True) open(f"{paths.image_path}/image.png", "w+").close() def visualize(self, paths: AbstractPaths, instance, during_analysis): self.did_visualise = True - os.makedirs(paths.image_path) + os.makedirs(paths.image_path, exist_ok=True) open(f"{paths.image_path}/image.png", "w+").close() def visualize_before_fit_combined(self, analyses, paths, model): - self.did_visualise_combined = True - def visualize_combined(self, analyses, paths: AbstractPaths, instance, during_analysis): - + def visualize_combined( + self, analyses, paths: AbstractPaths, instance, during_analysis + ): self.did_visualise_combined = True def profile_log_likelihood_function(self, paths: AbstractPaths, instance): @@ -62,11 +62,12 @@ def test_visualise(): def test_visualise_before_fit_combined(): - analysis_1 = Analysis() analysis_2 = Analysis() - (analysis_1 + analysis_2).visualize_before_fit_combined(None, af.DirectoryPaths(), None) + (analysis_1 + analysis_2).visualize_before_fit_combined( + None, af.DirectoryPaths(), None + ) assert analysis_1.did_visualise_combined is True assert analysis_2.did_visualise_combined is False @@ -82,13 +83,13 @@ def test_visualise_combined(): assert analysis_2.did_visualise_combined is False - def test__profile_log_likelihood(): analysis_1 = Analysis() analysis_2 = Analysis() (analysis_1 + analysis_2).profile_log_likelihood_function( - af.DirectoryPaths(), None, + af.DirectoryPaths(), + None, ) assert analysis_1.did_profile is True @@ -109,7 +110,14 @@ def test_add_analysis(): @pytest.mark.parametrize( - "number, first, second", [(3, 2, 1), (4, 2, 2), (5, 3, 2), (6, 3, 3), (7, 4, 3),] + "number, first, second", + [ + (3, 2, 1), + (4, 2, 2), + (5, 3, 2), + (6, 3, 3), + (7, 4, 3), + ], ) def test_analysis_pool(number, first, second): pool = AnalysisPool(number * [Analysis()], 2) From cd308647022dacb18e4e2d070f1eeb4bf4fed185 Mon Sep 17 00:00:00 2001 From: James Nightingale Date: Wed, 10 May 2023 12:43:42 +0100 Subject: [PATCH 213/226] review --- autofit/non_linear/analysis/analysis.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/autofit/non_linear/analysis/analysis.py b/autofit/non_linear/analysis/analysis.py index c0fb986f8..2ee6ebdbb 100644 --- a/autofit/non_linear/analysis/analysis.py +++ b/autofit/non_linear/analysis/analysis.py @@ -1,5 +1,6 @@ import logging from abc import ABC +import os from autoconf import conf @@ -67,10 +68,7 @@ def should_visualize(self, paths: AbstractPaths) -> bool: if os.environ.get("PYAUTOFIT_TEST_MODE") == "1": return False - if paths.is_complete and not conf.instance["general"]["output"]["force_visualize_overwrite"]: - return False - - return True + return not paths.is_complete or conf.instance["general"]["output"]["force_visualize_overwrite"] def log_likelihood_function(self, instance): raise NotImplementedError() From b3aab4d9123e3e5e6cd80d4e6a7d126ec72217a6 Mon Sep 17 00:00:00 2001 From: Richard Date: Wed, 10 May 2023 12:52:37 +0100 Subject: [PATCH 214/226] load a list of objects from analyses directory --- autofit/aggregator/search_output.py | 37 +++++++++---------- .../aggregator/search_output/metadata | 0 .../aggregator/test_child_analysis.py | 10 +++++ 3 files changed, 28 insertions(+), 19 deletions(-) create mode 100644 test_autofit/aggregator/search_output/metadata create mode 100644 test_autofit/aggregator/test_child_analysis.py diff --git a/autofit/aggregator/search_output.py b/autofit/aggregator/search_output.py index 68fbc6e26..33cd500b6 100644 --- a/autofit/aggregator/search_output.py +++ b/autofit/aggregator/search_output.py @@ -2,6 +2,7 @@ import os import pickle from os import path +from pathlib import Path import dill @@ -16,14 +17,9 @@ def _create_file_handle(*args, **kwargs): using dill and return None instead. """ try: - return original_create_file_handle( - *args, **kwargs - ) + return original_create_file_handle(*args, **kwargs) except pickle.UnpicklingError as e: - if not isinstance( - e.args[0], - FileNotFoundError - ): + if not isinstance(e.args[0], FileNotFoundError): raise e logging.warning( f"Could not create a handler for {e.args[0].filename} as it does not exist" @@ -34,6 +30,11 @@ def _create_file_handle(*args, **kwargs): dill._dill._create_filehandle = _create_file_handle +class ChildAnalysis: + def __init__(self, directory: Path): + self.directory = directory + + class SearchOutput: """ @DynamicAttrs @@ -54,14 +55,16 @@ def __init__(self, directory: str): self.file_path = os.path.join(directory, "metadata") with open(self.file_path) as f: self.text = f.read() - pairs = [ - line.split("=") - for line - in self.text.split("\n") - if "=" in line - ] + pairs = [line.split("=") for line in self.text.split("\n") if "=" in line] self.__dict__.update({pair[0]: pair[1] for pair in pairs}) + @property + def child_analyses(self): + """ + A list of child analyses loaded from the analyses directory + """ + return list(map(ChildAnalysis, Path(self.directory).glob("analyses/*"))) + @property def pickle_path(self): return path.join(self.directory, "pickles") @@ -79,9 +82,7 @@ def mask(self): """ A pickled mask object """ - with open( - os.path.join(self.pickle_path, "mask.pickle"), "rb" - ) as f: + with open(os.path.join(self.pickle_path, "mask.pickle"), "rb") as f: return dill.load(f) def __getattr__(self, item): @@ -91,9 +92,7 @@ def __getattr__(self, item): dataset.pickle, meta_dataset.pickle etc. """ try: - with open( - os.path.join(self.pickle_path, f"{item}.pickle"), "rb" - ) as f: + with open(os.path.join(self.pickle_path, f"{item}.pickle"), "rb") as f: return pickle.load(f) except FileNotFoundError: pass diff --git a/test_autofit/aggregator/search_output/metadata b/test_autofit/aggregator/search_output/metadata new file mode 100644 index 000000000..e69de29bb diff --git a/test_autofit/aggregator/test_child_analysis.py b/test_autofit/aggregator/test_child_analysis.py new file mode 100644 index 000000000..9595fd5e0 --- /dev/null +++ b/test_autofit/aggregator/test_child_analysis.py @@ -0,0 +1,10 @@ +from pathlib import Path + +from autofit import SearchOutput + + +def test_child_analysis(): + directory = Path(__file__).parent / "search_output" + search_output = SearchOutput(str(directory)) + + assert len(search_output.child_analyses) == 2 From 3b5b0b5fba5af75b121259c72179785220d58035 Mon Sep 17 00:00:00 2001 From: Richard Date: Wed, 10 May 2023 14:02:30 +0100 Subject: [PATCH 215/226] load pickles for child analysis in directory --- autofit/aggregator/search_output.py | 48 +++++++++--------- .../analysis_0/pickles/example.pickle | Bin 0 -> 26 bytes .../analysis_1/pickles/example.pickle | Bin 0 -> 26 bytes .../aggregator/test_child_analysis.py | 14 ++++- 4 files changed, 36 insertions(+), 26 deletions(-) create mode 100644 test_autofit/aggregator/search_output/analyses/analysis_0/pickles/example.pickle create mode 100644 test_autofit/aggregator/search_output/analyses/analysis_1/pickles/example.pickle diff --git a/autofit/aggregator/search_output.py b/autofit/aggregator/search_output.py index 33cd500b6..cea2c9a8d 100644 --- a/autofit/aggregator/search_output.py +++ b/autofit/aggregator/search_output.py @@ -30,12 +30,28 @@ def _create_file_handle(*args, **kwargs): dill._dill._create_filehandle = _create_file_handle -class ChildAnalysis: +class Output: def __init__(self, directory: Path): self.directory = directory + @property + def pickle_path(self): + return self.directory / "pickles" + + def __getattr__(self, item): + """ + Attempt to load a pickle by the same name from the search output directory. -class SearchOutput: + dataset.pickle, meta_dataset.pickle etc. + """ + try: + with open(self.pickle_path / f"{item}.pickle", "rb") as f: + return pickle.load(f) + except FileNotFoundError: + pass + + +class SearchOutput(Output): """ @DynamicAttrs """ @@ -49,7 +65,7 @@ def __init__(self, directory: str): directory The directory of the search """ - self.directory = directory + super().__init__(Path(directory)) self.__search = None self.__model = None self.file_path = os.path.join(directory, "metadata") @@ -63,18 +79,14 @@ def child_analyses(self): """ A list of child analyses loaded from the analyses directory """ - return list(map(ChildAnalysis, Path(self.directory).glob("analyses/*"))) - - @property - def pickle_path(self): - return path.join(self.directory, "pickles") + return list(map(Output, Path(self.directory).glob("analyses/*"))) @property def model_results(self) -> str: """ Reads the model.results file """ - with open(os.path.join(self.directory, "model.results")) as f: + with open(self.directory / "model.results") as f: return f.read() @property @@ -82,21 +94,9 @@ def mask(self): """ A pickled mask object """ - with open(os.path.join(self.pickle_path, "mask.pickle"), "rb") as f: + with open(self.pickle_path / "mask.pickle", "rb") as f: return dill.load(f) - def __getattr__(self, item): - """ - Attempt to load a pickle by the same name from the search output directory. - - dataset.pickle, meta_dataset.pickle etc. - """ - try: - with open(os.path.join(self.pickle_path, f"{item}.pickle"), "rb") as f: - return pickle.load(f) - except FileNotFoundError: - pass - @property def header(self) -> str: """ @@ -113,7 +113,7 @@ def search(self) -> abstract_search.NonLinearSearch: """ if self.__search is None: try: - with open(os.path.join(self.pickle_path, "search.pickle"), "r+b") as f: + with open(self.pickle_path / "search.pickle", "r+b") as f: self.__search = pickle.loads(f.read()) except (FileNotFoundError, ModuleNotFoundError) as e: print(self.pickle_path) @@ -127,7 +127,7 @@ def model(self): """ if self.__model is None: try: - with open(os.path.join(self.pickle_path, "model.pickle"), "r+b") as f: + with open(self.pickle_path / "model.pickle", "r+b") as f: self.__model = pickle.loads(f.read()) except (FileNotFoundError, ModuleNotFoundError) as e: print(self.pickle_path) diff --git a/test_autofit/aggregator/search_output/analyses/analysis_0/pickles/example.pickle b/test_autofit/aggregator/search_output/analyses/analysis_0/pickles/example.pickle new file mode 100644 index 0000000000000000000000000000000000000000..040236d214c0f1ba9aa2339553c85d9b0aa10c42 GIT binary patch literal 26 ccmZo*naa-q0X^IqsX00M3g!7lIVn^007 Date: Wed, 10 May 2023 14:21:44 +0100 Subject: [PATCH 216/226] child_analysis_values method --- autofit/aggregator/aggregator.py | 76 +++++++++---------- .../aggregator/test_child_analysis.py | 23 +++++- 2 files changed, 54 insertions(+), 45 deletions(-) diff --git a/autofit/aggregator/aggregator.py b/autofit/aggregator/aggregator.py index 4b9c05da8..5820d4828 100755 --- a/autofit/aggregator/aggregator.py +++ b/autofit/aggregator/aggregator.py @@ -48,12 +48,7 @@ def filter(self, *predicates) -> "AggregatorGroup": ------- A collection of groups of the same length with each group having the same or fewer members. """ - return AggregatorGroup( - [ - group.filter(*predicates) - for group in self.groups - ] - ) + return AggregatorGroup([group.filter(*predicates) for group in self.groups]) def __getitem__(self, item): return self.groups[item] @@ -75,10 +70,7 @@ def values(self, name: str) -> List[List]: ------- A list of lists of values. """ - return [ - group.values(name) - for group in self.groups - ] + return [group.values(name) for group in self.groups] class AbstractAggregator: @@ -98,23 +90,15 @@ def remove_unzipped(self): Removes the unzipped output directory for each phase. """ for phase in self.search_outputs: - split_path = path.split(phase.directory)[0] unzipped_path = path.join(split_path) - rmtree( - unzipped_path, - ignore_errors=True - ) + rmtree(unzipped_path, ignore_errors=True) def __getitem__( - self, - item: Union[slice, int] - ) -> Union[ - "AbstractAggregator", - SearchOutput - ]: + self, item: Union[slice, int] + ) -> Union["AbstractAggregator", SearchOutput]: """ If an index is passed in then a specific phase output is returned. @@ -130,9 +114,7 @@ def __getitem__( An aggregator or phase """ if isinstance(item, slice): - return AbstractAggregator( - self.search_outputs[item] - ) + return AbstractAggregator(self.search_outputs[item]) return self.search_outputs[item] def __len__(self): @@ -184,11 +166,28 @@ def values(self, name: str) -> Iterator: ------- A generator of values for the attribute """ - return map( - lambda phase: getattr( - phase, name - ), - self.search_outputs + return map(lambda phase: getattr(phase, name), self.search_outputs) + + def child_analysis_values(self, name: str) -> Iterator[List]: + """ + Get values with a given name from the child analyses of each search in + this aggregator. + + Parameters + ---------- + name + The name of an attribute expected to be associated with + child analysis output. If a pickle file with this name + is in the child analysis output directory then that pickle + will be loaded. + + Returns + ------- + A generator of values for the attribute + """ + return ( + [getattr(analysis, name) for analysis in phase.child_analyses] + for phase in self.search_outputs ) def map(self, func): @@ -204,10 +203,7 @@ def map(self, func): ------- A generator of results """ - return map( - func, - self.search_outputs - ) + return map(func, self.search_outputs) def group_by(self, field: str) -> AggregatorGroup: """ @@ -241,11 +237,7 @@ def model_results(self) -> str: class Aggregator(AbstractAggregator): - def __init__( - self, - directory: Union[str, os.PathLike], - completed_only=False - ): + def __init__(self, directory: Union[str, os.PathLike], completed_only=False): """ Class to aggregate phase results for all subdirectories in a given directory. @@ -276,9 +268,7 @@ def __init__( f.extractall(path.join(root, filename[:-4])) except zipfile.BadZipFile: raise zipfile.BadZipFile( - f"File is not a zip file: \n " - f"{root} \n" - f"{filename}" + f"File is not a zip file: \n " f"{root} \n" f"{filename}" ) for root, _, filenames in os.walk(directory): @@ -289,5 +279,7 @@ def __init__( if len(search_outputs) == 0: print(f"\nNo search_outputs found in {directory}\n") else: - print(f"\n A total of {str(len(search_outputs))} search_outputs and results were found.") + print( + f"\n A total of {str(len(search_outputs))} search_outputs and results were found." + ) super().__init__(search_outputs) diff --git a/test_autofit/aggregator/test_child_analysis.py b/test_autofit/aggregator/test_child_analysis.py index 8a31cf2ea..0104d2446 100644 --- a/test_autofit/aggregator/test_child_analysis.py +++ b/test_autofit/aggregator/test_child_analysis.py @@ -3,12 +3,21 @@ import pytest from autofit import SearchOutput +from autofit.aggregator import Aggregator + + +@pytest.fixture(name="directory") +def make_directory(): + return Path(__file__).parent + + +@pytest.fixture(name="search_output") +def make_search_output(directory): + return SearchOutput(directory / "search_output") @pytest.fixture(name="child_analyses") -def make_child_analyses(): - directory = Path(__file__).parent / "search_output" - search_output = SearchOutput(str(directory)) +def make_child_analyses(search_output): return search_output.child_analyses @@ -18,3 +27,11 @@ def test_child_analysis(child_analyses): def test_child_analysis_pickles(child_analyses): assert child_analyses[0].example == "hello world" + + +def test_child_analysis_values(directory): + aggregator = Aggregator(directory) + + assert list(aggregator.child_analysis_values("example")) == [ + ["hello world", "hello world"] + ] From ef413a5e84e44cd03494558fed152a88f890de94 Mon Sep 17 00:00:00 2001 From: Richard Date: Wed, 10 May 2023 14:35:52 +0100 Subject: [PATCH 217/226] add analyses as child fits to the aggregator, with associated pickles --- autofit/database/aggregator/scrape.py | 149 ++++++++------------------ 1 file changed, 45 insertions(+), 104 deletions(-) diff --git a/autofit/database/aggregator/scrape.py b/autofit/database/aggregator/scrape.py index 8264c8de9..49958f7b9 100644 --- a/autofit/database/aggregator/scrape.py +++ b/autofit/database/aggregator/scrape.py @@ -8,14 +8,10 @@ from ..sqlalchemy_ import sa from ...mapper.model_object import Identifier -logger = logging.getLogger( - __name__ -) +logger = logging.getLogger(__name__) -def _parent_identifier( - directory: str -) -> Optional[str]: +def _parent_identifier(directory: str) -> Optional[str]: """ Read the parent identifier for a fit in a directory. @@ -29,11 +25,7 @@ def _parent_identifier( class Scraper: - def __init__( - self, - directory: Union[Path, str], - session: sa.orm.Session - ): + def __init__(self, directory: Union[Path, str], session: sa.orm.Session): """ Facilitates scraping of data output into a directory into the database. @@ -54,13 +46,9 @@ def scrape(self): add them to the session """ for fit in self._fits(): - self.session.add( - fit - ) + self.session.add(fit) for grid_search in self._grid_searches(): - self.session.add( - grid_search - ) + self.session.add(grid_search) def _fits(self): """ @@ -71,25 +59,15 @@ def _fits(self): ------- Generator yielding Fit database objects """ - logger.info( - f"Scraping directory {self.directory}" - ) + logger.info(f"Scraping directory {self.directory}") from autofit.aggregator.aggregator import Aggregator as ClassicAggregator - aggregator = ClassicAggregator( - self.directory - ) - logger.info( - f"{len(aggregator)} searches found" - ) - for item in aggregator: - is_complete = os.path.exists( - f"{item.directory}/.completed" - ) + aggregator = ClassicAggregator(self.directory) + logger.info(f"{len(aggregator)} searches found") + for item in aggregator: + is_complete = os.path.exists(f"{item.directory}/.completed") - parent_identifier = _parent_identifier( - directory=item.directory - ) + parent_identifier = _parent_identifier(directory=item.directory) model = item.model samples = item.samples @@ -101,7 +79,8 @@ def _fits(self): f"{item.search.paths.path_prefix} " f"{item.search.unique_tag} " f"{item.search.name} " - f"{identifier} ") + f"{identifier} " + ) try: instance = samples.max_log_likelihood() @@ -109,12 +88,8 @@ def _fits(self): instance = None try: - fit = self._retrieve_model_fit( - item - ) - logger.warning( - f"Fit already existed with identifier {identifier}" - ) + fit = self._retrieve_model_fit(item) + logger.warning(f"Fit already existed with identifier {identifier}") except sa.orm.exc.NoResultFound: try: log_likelihood = samples.max_log_likelihood_sample.log_likelihood @@ -129,20 +104,21 @@ def _fits(self): is_complete=is_complete, info=item.info, max_log_likelihood=log_likelihood, - parent_id=parent_identifier + parent_id=parent_identifier, ) pickle_path = Path(item.pickle_path) - _add_pickles( - fit, - pickle_path - ) + _add_pickles(fit, pickle_path) + for i, child_analysis in enumerate(item.child_analyses): + child_fit = m.Fit( + id=f"{identifier}_{i}", + parent_id=identifier, + ) + _add_pickles(child_fit, child_analysis.pickle_path) yield fit - def _grid_searches( - self - ): + def _grid_searches(self): """ Retrieve grid searches recursively from an output directory by searching for the .is_grid_search file. @@ -154,49 +130,34 @@ def _grid_searches( Fit objects representing grid searches with child fits associated """ from autofit.aggregator.aggregator import Aggregator as ClassicAggregator + for root, _, filenames in os.walk(self.directory): if ".is_grid_search" in filenames: path = Path(root) is_complete = (path / ".completed").exists() - with open( - path / ".is_grid_search" - ) as f: + with open(path / ".is_grid_search") as f: unique_tag = f.read() grid_search = m.Fit( id=path.name, unique_tag=unique_tag, is_grid_search=True, - parent_id=_parent_identifier( - root - ), - is_complete=is_complete + parent_id=_parent_identifier(root), + is_complete=is_complete, ) pickle_path = path / "pickles" - _add_pickles( - grid_search, - pickle_path - ) + _add_pickles(grid_search, pickle_path) - aggregator = ClassicAggregator( - root - ) + aggregator = ClassicAggregator(root) for item in aggregator: - fit = self._retrieve_model_fit( - item - ) - grid_search.children.append( - fit - ) + fit = self._retrieve_model_fit(item) + grid_search.children.append(fit) yield grid_search - def _retrieve_model_fit( - self, - item - ) -> m.Fit: + def _retrieve_model_fit(self, item) -> m.Fit: """ Retrieve a Fit, if one exists, corresponding to a given SearchOutput @@ -214,18 +175,12 @@ def _retrieve_model_fit( NoResultFound If no fit is found with the identifier """ - return self.session.query( - m.Fit - ).filter( - m.Fit.id == _make_identifier( - item - ) - ).one() + return ( + self.session.query(m.Fit).filter(m.Fit.id == _make_identifier(item)).one() + ) -def _make_identifier( - item -) -> str: +def _make_identifier(item) -> str: """ Create a unique identifier for a SearchOutput. @@ -243,17 +198,10 @@ def _make_identifier( """ search = item.search model = item.model - return str(Identifier([ - search, - model, - search.unique_tag - ])) + return str(Identifier([search, model, search.unique_tag])) -def _add_pickles( - fit: m.Fit, - pickle_path: Path -): +def _add_pickles(fit: m.Fit, pickle_path: Path): """ Load pickles from the path and add them to the database. @@ -265,26 +213,19 @@ def _add_pickles( The path in which the pickles are stored """ try: - filenames = os.listdir( - pickle_path - ) + filenames = os.listdir(pickle_path) except FileNotFoundError as e: logger.exception(e) filenames = [] for filename in filenames: - try: - with open( - pickle_path / filename, - "r+b" - ) as f: - fit[ - filename.split(".")[0] - ] = pickle.load(f) + with open(pickle_path / filename, "r+b") as f: + fit[filename.split(".")[0]] = pickle.load(f) except (pickle.UnpicklingError, ModuleNotFoundError) as e: - if filename == "dynesty.pickle": continue - raise pickle.UnpicklingError(f"Failed to unpickle: {pickle_path} {filename}") from e + raise pickle.UnpicklingError( + f"Failed to unpickle: {pickle_path} {filename}" + ) from e From 3dae87a2b9adb55aae9a511482c468ac6fca3009 Mon Sep 17 00:00:00 2001 From: Richard Date: Wed, 10 May 2023 14:36:25 +0100 Subject: [PATCH 218/226] renamed child_values method --- autofit/aggregator/aggregator.py | 2 +- test_autofit/aggregator/test_child_analysis.py | 4 +--- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/autofit/aggregator/aggregator.py b/autofit/aggregator/aggregator.py index 5820d4828..e0225cf64 100755 --- a/autofit/aggregator/aggregator.py +++ b/autofit/aggregator/aggregator.py @@ -168,7 +168,7 @@ def values(self, name: str) -> Iterator: """ return map(lambda phase: getattr(phase, name), self.search_outputs) - def child_analysis_values(self, name: str) -> Iterator[List]: + def child_values(self, name: str) -> Iterator[List]: """ Get values with a given name from the child analyses of each search in this aggregator. diff --git a/test_autofit/aggregator/test_child_analysis.py b/test_autofit/aggregator/test_child_analysis.py index 0104d2446..15e805d21 100644 --- a/test_autofit/aggregator/test_child_analysis.py +++ b/test_autofit/aggregator/test_child_analysis.py @@ -32,6 +32,4 @@ def test_child_analysis_pickles(child_analyses): def test_child_analysis_values(directory): aggregator = Aggregator(directory) - assert list(aggregator.child_analysis_values("example")) == [ - ["hello world", "hello world"] - ] + assert list(aggregator.child_values("example")) == [["hello world", "hello world"]] From 34cecb14039730a0d86163331bb72a07790b4016 Mon Sep 17 00:00:00 2001 From: Richard Date: Wed, 10 May 2023 14:45:16 +0100 Subject: [PATCH 219/226] example pickles --- .../aggregator/search_output/pickles/info.pickle | 1 + .../search_output/pickles/model.pickle | Bin 0 -> 2375 bytes .../search_output/pickles/search.pickle | Bin 0 -> 4859 bytes 3 files changed, 1 insertion(+) create mode 100644 test_autofit/aggregator/search_output/pickles/info.pickle create mode 100644 test_autofit/aggregator/search_output/pickles/model.pickle create mode 100644 test_autofit/aggregator/search_output/pickles/search.pickle diff --git a/test_autofit/aggregator/search_output/pickles/info.pickle b/test_autofit/aggregator/search_output/pickles/info.pickle new file mode 100644 index 000000000..e2ecf720d --- /dev/null +++ b/test_autofit/aggregator/search_output/pickles/info.pickle @@ -0,0 +1 @@ +€}”. \ No newline at end of file diff --git a/test_autofit/aggregator/search_output/pickles/model.pickle b/test_autofit/aggregator/search_output/pickles/model.pickle new file mode 100644 index 0000000000000000000000000000000000000000..7d4395509a74435dedfb97adf47c64bda26754e0 GIT binary patch literal 2375 zcmaJ@-EJF26t)wu<2Y^7LLwy83J?Ws5KbbM5~QLggkn@TR@9~vT%gf-ckDA_|Ja#L znlu$ZYEvRc+;q5u8@TT?z#H%ieFN^fC}(DW>?jdl$(}uP=FFV)edim0SNrAqY{h=m zTQrr?kjqX$!) z?&)4t)rn6B%-4N2N2m}iF#&}BN~<|8SwO-x7_db5DylJ}sSuoE__zP(7!ya*%GokU z)5Q_MIfi;~3TD)7HxIU=p@ExCyS~tBsff$&(;)U)$7ZHgqwvN^s_wE-CQPeKh4*AJ zI%&v}bpz5(tB@?=hC#m5{X0mXrTGc`tVA%OQLYBx?t!4ry1Rdej zo8^v~MPSi`I1`hcYzP(83%TRnb6&g$CZk5giC#y$U`v_;fcH4=ab&!>PmR!`h+aRe!DhT)kV- z{g!I@(TIe$*`~QiP%`{zk0r$C0hjoe8jZ@|q(0K^7QC#QF-@?~Buj(_2`TKt9;~7J zwdH##KVmtOz>;M{0-uFq8P(3dnBP2Eu}lYu7gIf|59?I~LMj4O{=rIW@n>JqhM!;zN%2M?W2?^Vxa$fi0(2ua|gCTxK1^sZzriZbzdt zWgIRW{dQ2l=TAgk)$p3Sh%rux_y-!^fGgQG2=Bmq@Nqs~@BW!95^QwWFJxo*1a9W@ z8>btzZM!q`{JU&vAg4XcRzBrC%9=RC5}~RZ(}#UnYr{ua_9q%R{GOp$0E{RW5F?6?Me)X?gXi66 ziw`Cd{=>lL2tEp^R+rC&Yy79+d~r#iyl^w5>WEa`2C@h(6w9(p_L{*bgdgBH%gpZj em8@jl^~G#3$6{? literal 0 HcmV?d00001 diff --git a/test_autofit/aggregator/search_output/pickles/search.pickle b/test_autofit/aggregator/search_output/pickles/search.pickle new file mode 100644 index 0000000000000000000000000000000000000000..e149e73fad516bfa66321156afe2958e30c374a6 GIT binary patch literal 4859 zcmcgwU5r~t6}ETn-HpB3O_xnjq7b!BTX|9C-3?7?6d(y3FsZX9Bq@l8YPi04>>20! zUibcFf4UVQrAZcss3-#xRa+jA2oh3+1gdyKyzqcT1xP%#5>j4x;-L?`@ld`q_s6!f z<$*}p()HY#GjqO%l^tct&Jsw7NXN!Ke>S1I> z($;GK`DEfs@_tt@eOJGxYidqbKIvNyOCrhL&zhosw0wWb?jl;mdAuS!p^nBD6s6v9M70^xEINe zkx9{=s#Myq{o-;YR^0aVXD*9y8=EN@uuH7Kii5`-Pgh6yKE>L+NsD{UCctdu)_ zpBQ23lvXo+Ym=oDt1Dn#S@T^Nrjfn}4atc2nHTrlbjKo=XEK$5-Ma5^7YErcjy{$t zfNvUE>wQ|SH&Sy#2pif8zvEe<_eYPOZBB1`nW$zJyI66}VJmQerk&hF~k+T6gmbJ7s+DX9tNXW$&FfJuxBUmx8XRu zYC<}?S;Tp+)kZwF;ICE~@_>RQgky=-WW}m8DXi#a~U5I?252CAtJRe3m)h> z#)&7@!A$6}a0fnQlyE`uS#>lc8)aGt;1d97@j}K*O(A_m2#O_eEP>;6f)->JHJ8#S zQa%UB>+>+Q2=b2S=O;5EG}a&ZA$J(i(F+Y5wQw~Xb7wu>nsct?gg~()51p5X|O91G0~Q~f#uNl zirLQ0E6odt3zL?B@KBDLqm5>jnHB7qAMQxz?ozzoQHvK%YQG|6C)z{ca!3ohkrV}m zf+~l$g%r~``^~gYm`Z8`tv;D6CCP0*u-QWH)(RQd>iR&Kbqkl?%+(?8sfr_A+?1XG zUuFj!_GMNOSX&^gse{NWP>Jmayl%2wvZQ7i3IjN96vTE!swo>n7~ONS-Oa-5dac;j zw{)#pToS)gWhdGiaH5{*yr6e)-_n<{G^vW8)2&wP#gFhYk7gm#?cKGRR~PR$9&Fz@ zqk+m1Xc3s5_t zz6DQAWvi_7NchfGdVL*SL2)2c4B6FWHRuNOv)R0jEK2%nPBjboPUG8HTihw^6n7?e zCU;6ZWvBR7=}z%VLAPpZ#`U{MP6#bzHHT?&N}P`LO}$h@$X1YtPzOak2(@@$d_arh z9pv2SmtO*0gv)*;c(6>ycDWZWqeL_>^qXD^StBih>H}16>Y@y{E-Pq9VZYn(lhjKwd+FVppV z4~N>4iL-?AFL`i^KRisC2jxS=?K(#bhw*fs?tLo#WtXL61fox#!dZZ?CNLIumtaeV1(>vV2OH^c!0i<>-B#X)#0?f zr~WxD@|hJej!q7D816&HP;;U1W7k2Sj8+_owX3E>{CcK^p1QVS@;w^s1?0kx>?SCg z7LRH_`3fx{eNyjIJr4~12D(8Al;1#=%_ak~`3cbd8NO|NIku-WzEgn@-leHc#Z8syLo^oz1eEPqc)fg7N^#sA$q=6|tep zrpYla&@>HQ*+z?d$q(fwb=tU5O%<0BsU}U~*CZ+wSfmC@RiNJRgKOb{{((@3(nw(HZ14}pi8M8ZQwke;G@*Dy(2IO6oFmUvt|A)aoD z+Q;fjJezAt)SHet);uAe6vxG<_f&*VEuFs$x2F;-ZrA?< D4B;&v literal 0 HcmV?d00001 From 15c085954d78a7e044e16103ea54bc2d39026d02 Mon Sep 17 00:00:00 2001 From: Richard Date: Wed, 10 May 2023 15:11:43 +0100 Subject: [PATCH 220/226] getting child values but also including analyses as fits --- autofit/database/aggregator/aggregator.py | 210 ++++++------------ autofit/database/aggregator/scrape.py | 2 +- autofit/database/query/condition.py | 49 ++-- autofit/database/query/query/attribute.py | 42 +--- .../aggregator/test_child_analysis.py | 11 + 5 files changed, 99 insertions(+), 215 deletions(-) diff --git a/autofit/database/aggregator/aggregator.py b/autofit/database/aggregator/aggregator.py index 2e2dac4ab..ba53ad329 100644 --- a/autofit/database/aggregator/aggregator.py +++ b/autofit/database/aggregator/aggregator.py @@ -10,9 +10,7 @@ from ..query.query import AbstractQuery, Attribute from ..query.query.attribute import BestFitQuery -logger = logging.getLogger( - __name__ -) +logger = logging.getLogger(__name__) class NullPredicate(AbstractQuery): @@ -62,10 +60,7 @@ class FitQuery(Query): """ @staticmethod - def for_name(name: str) -> Union[ - AbstractQuery, - Attribute - ]: + def for_name(name: str) -> Union[AbstractQuery, Attribute]: """ Create a query based on some attribute of the Fit. @@ -83,12 +78,8 @@ def for_name(name: str) -> Union[ aggregator.fit.name == 'example name' """ if name not in m.fit_attributes: - raise AttributeError( - f"Fit has no attribute {name}" - ) - if m.fit_attributes[ - name - ].type.python_type == bool: + raise AttributeError(f"Fit has no attribute {name}") + if m.fit_attributes[name].type.python_type == bool: return q.BA(name) return q.A(name) @@ -130,10 +121,24 @@ def values(self, name: str) -> list: return values + def child_values(self, name: str) -> List[list]: + """ + Retrieve the value associated with each fit with the given + parameter name + + Parameters + ---------- + name + The name of some pickle, such as 'samples' + + Returns + ------- + A list of objects, one for each fit + """ + return [[analysis[name] for analysis in fit.children] for fit in self] + def __iter__(self): - return iter( - self.fits - ) + return iter(self.fits) def __len__(self): return len(self.fits) @@ -146,13 +151,13 @@ def __eq__(self, other): class Aggregator(AbstractAggregator): def __init__( - self, - session: sa.orm.Session, - filename: Optional[str] = None, - predicate: AbstractQuery = NullPredicate(), - offset=0, - limit=None, - order_bys=None + self, + session: sa.orm.Session, + filename: Optional[str] = None, + predicate: AbstractQuery = NullPredicate(), + offset=0, + limit=None, + order_bys=None, ): """ Query results from an intermediary SQLite database. @@ -173,11 +178,7 @@ def __init__( self._limit = limit self._order_bys = order_bys or list() - def order_by( - self, - item: Attribute, - reverse=False - ) -> "Aggregator": + def order_by(self, item: Attribute, reverse=False) -> "Aggregator": """ Order the results by a given attribute of the search. Can be applied multiple times with the first application taking precedence. @@ -201,9 +202,7 @@ def order_by( """ if reverse: item = Reverse(item) - return self._new_with( - order_bys=self._order_bys + [item] - ) + return self._new_with(order_bys=self._order_bys + [item]) @property def search(self) -> FitQuery: @@ -231,9 +230,7 @@ def fits(self) -> List[m.Fit]: match the aggregator's predicate. """ if self._fits is None: - self._fits = self._fits_for_query( - self._predicate.fit_query - ) + self._fits = self._fits_for_query(self._predicate.fit_query) return self._fits def map(self, func): @@ -289,14 +286,12 @@ def query(self, predicate: AbstractQuery) -> "Aggregator": >>> aggregator.filter((lens.bulge == SersicCore) & (lens.disk == Sersic)) >>> aggregator.filter((lens.bulge == SersicCore) | (lens.disk == Sersic)) """ - return self._new_with( - predicate=self._predicate & predicate - ) + return self._new_with(predicate=self._predicate & predicate) def _new_with( - self, - type_=None, - **kwargs, + self, + type_=None, + **kwargs, ) -> "Aggregator": """ Create a new instance with the same attribute values except @@ -319,23 +314,17 @@ def _new_with( "filename": self.filename, "predicate": self._predicate, "order_bys": self._order_bys, - **kwargs + **kwargs, } type_ = type_ or type(self) - return type_( - **kwargs - ) + return type_(**kwargs) def __getitem__(self, item): offset = self._offset limit = self._limit - if isinstance( - item, int - ): + if isinstance(item, int): return self.fits[item] - elif isinstance( - item, slice - ): + elif isinstance(item, slice): if item.start is not None: if item.start >= 0: offset += item.start @@ -346,15 +335,9 @@ def __getitem__(self, item): limit = len(self) - item.stop - offset else: limit = len(self) + item.stop - return self._new_with( - offset=offset, - limit=limit - ) + return self._new_with(offset=offset, limit=limit) - def _fits_for_query( - self, - query: str - ) -> List[m.Fit]: + def _fits_for_query(self, query: str) -> List[m.Fit]: """ Execute a raw SQL query and return a Fit object for each Fit id returned by the query @@ -369,53 +352,21 @@ def _fits_for_query( A list of fit objects, one for each id returned by the query """ - logger.debug( - f"Executing query: {query}" - ) - fit_ids = { - row[0] - for row - in self.session.execute( - query - ) - } + logger.debug(f"Executing query: {query}") + fit_ids = {row[0] for row in self.session.execute(query)} - logger.info( - f"{len(fit_ids)} fit(s) found matching query" - ) - query = self.session.query( - m.Fit - ).filter( - m.Fit.id.in_( - fit_ids - ) - ) + logger.info(f"{len(fit_ids)} fit(s) found matching query") + query = self.session.query(m.Fit).filter(m.Fit.id.in_(fit_ids)) for order_by in self._order_bys: - attribute = getattr( - m.Fit, - order_by.attribute - ) - - if isinstance( - order_by, - Reverse - ): + attribute = getattr(m.Fit, order_by.attribute) + + if isinstance(order_by, Reverse): attribute = sa.desc(attribute) - query = query.order_by( - attribute - ) - - return query.offset( - self._offset - ).limit( - self._limit - ).all() - - def add_directory( - self, - directory: str, - auto_commit=True - ): + query = query.order_by(attribute) + + return query.offset(self._offset).limit(self._limit).all() + + def add_directory(self, directory: str, auto_commit=True): """ Recursively search a directory for autofit results and add them to this database. @@ -437,21 +388,14 @@ def add_directory( A directory containing autofit results embedded in a file structure """ - scraper = Scraper( - directory, - self.session - ) + scraper = Scraper(directory, self.session) scraper.scrape() if auto_commit: self.session.commit() @classmethod - def from_database( - cls, - filename: str, - completed_only: bool = False - ) -> "Aggregator": + def from_database(cls, filename: str, completed_only: bool = False) -> "Aggregator": """ Create an instance from a sqlite database file. @@ -468,17 +412,11 @@ def from_database( An aggregator connected to the database specified by the file. """ from autofit.database import open_database - session = open_database( - str(filename) - ) - aggregator = Aggregator( - session, - filename - ) + + session = open_database(str(filename)) + aggregator = Aggregator(session, filename) if completed_only: - return aggregator( - aggregator.search.is_complete - ) + return aggregator(aggregator.search.is_complete) return aggregator def grid_searches(self) -> "GridSearchAggregator": @@ -493,7 +431,7 @@ def grid_searches(self) -> "GridSearchAggregator": self._new_with( type_=GridSearchAggregator, predicate=self._predicate & self.search.is_grid_search, - order_bys=[Attribute("id")] + order_bys=[Attribute("id")], ), ) @@ -506,10 +444,7 @@ def best_fits(self) -> "GridSearchAggregator": Best fits are initially implicitly ordered by their parent id """ return self._new_with( - predicate=BestFitQuery( - self._predicate - ), - order_bys=[Attribute("parent_id")] + predicate=BestFitQuery(self._predicate), order_bys=[Attribute("parent_id")] ) def children(self) -> "GridSearchAggregator": @@ -520,16 +455,10 @@ def children(self) -> "GridSearchAggregator": Children are initially implicitly ordered by their parent id """ return self._new_with( - predicate=q.ChildQuery( - self._predicate - ), - order_bys=[Attribute("parent_id")] + predicate=q.ChildQuery(self._predicate), order_bys=[Attribute("parent_id")] ) - def cell_number( - self, - number: int - ) -> "CellAggregator": + def cell_number(self, number: int) -> "CellAggregator": """ Create an aggregator for accessing all values for child fits with a given index, ordered by parameter values. @@ -543,18 +472,11 @@ def cell_number( ------- An aggregator comprising fits for a given cell for each grid search """ - return CellAggregator( - number, - self - ) + return CellAggregator(number, self) class CellAggregator(AbstractAggregator): - def __init__( - self, - number: int, - aggregator: GridSearchAggregator - ): + def __init__(self, number: int, aggregator: GridSearchAggregator): """ Aggregator for accessing data for a specific fit number in each grid search. @@ -582,7 +504,7 @@ def fits(self) -> List[m.Fit]: self._fits.append( sorted( fit.children, - key=lambda f: f.model.order_no if f.model is not None else 0 + key=lambda f: f.model.order_no if f.model is not None else 0, )[self.number] ) return self._fits diff --git a/autofit/database/aggregator/scrape.py b/autofit/database/aggregator/scrape.py index 49958f7b9..3af869abc 100644 --- a/autofit/database/aggregator/scrape.py +++ b/autofit/database/aggregator/scrape.py @@ -112,9 +112,9 @@ def _fits(self): for i, child_analysis in enumerate(item.child_analyses): child_fit = m.Fit( id=f"{identifier}_{i}", - parent_id=identifier, ) _add_pickles(child_fit, child_analysis.pickle_path) + fit.children.append(child_fit) yield fit diff --git a/autofit/database/query/condition.py b/autofit/database/query/condition.py index 7baa7cf18..8ab5834cb 100644 --- a/autofit/database/query/condition.py +++ b/autofit/database/query/condition.py @@ -5,11 +5,7 @@ class Table: - def __init__( - self, - name: str, - abbreviation: Optional[str] = None - ): + def __init__(self, name: str, abbreviation: Optional[str] = None): """ A table containing some type of object in the database. @@ -75,36 +71,25 @@ def __str__(self): The condition written as SQL """ - def __and__( - self, - other: - "AbstractCondition" - ): + def __and__(self, other: "AbstractCondition"): """ Combine this and another query with an AND statement. - + Simplification is applied so that the query will execute as fast as possible. """ from .junction import And - return And( - self, - other - ) - def __or__( - self, - other: "AbstractCondition" - ): + return And(self, other) + + def __or__(self, other: "AbstractCondition"): """ Combine this and another query with an AND statement. Simplification is applied so that the query will execute as fast as possible. """ from .junction import Or - return Or( - self, - other - ) + + return Or(self, other) def __hash__(self): return hash(str(self)) @@ -123,7 +108,6 @@ def __repr__(self): class NoneCondition(AbstractCondition): - @property def tables(self) -> Set[Table]: return {none_table} @@ -133,11 +117,7 @@ def __str__(self): class AbstractValueCondition(AbstractCondition, ABC): - def __init__( - self, - symbol: str, - value - ): + def __init__(self, symbol: str, value): """ A condition which compares the named column to a value @@ -238,9 +218,7 @@ def class_path(self) -> str: """ The full import path of the type """ - return get_class_path( - self.cls - ) + return get_class_path(self.cls) class AttributeCondition(AbstractCondition, ABC): @@ -260,14 +238,13 @@ def __str__(self): class EqualityAttributeCondition(AttributeCondition): @property def value(self): - if isinstance( - self._value, - str - ): + if isinstance(self._value, str): return f"'{self._value}'" return self._value def __str__(self): + if self._value is None: + return f"{self.attribute} IS NULL" return f"{self.attribute} = {self.value}" diff --git a/autofit/database/query/query/attribute.py b/autofit/database/query/query/attribute.py index cdac408e5..681350b1c 100644 --- a/autofit/database/query/query/attribute.py +++ b/autofit/database/query/query/attribute.py @@ -24,11 +24,7 @@ def __init__(self, attribute: str): """ self.attribute = attribute - def _make_query( - self, - cls, - value - ) -> AttributeQuery: + def _make_query(self, cls, value) -> AttributeQuery: """ Create a query against this attribute @@ -43,51 +39,33 @@ def _make_query( ------- A query on ids of the fit table """ - return AttributeQuery( - cls( - attribute=self.attribute, - value=value - ) - ) + return AttributeQuery(cls(attribute=self.attribute, value=value)) def __eq__(self, other) -> AttributeQuery: """ Check whether an attribute, such as a search name, is equal to some value """ - return self._make_query( - cls=c.EqualityAttributeCondition, - value=other - ) + return self._make_query(cls=c.EqualityAttributeCondition, value=other) def in_(self, item: str) -> AttributeQuery: """ Check whether an attribute is contained within a substring """ - return self._make_query( - cls=c.InAttributeCondition, - value=item - ) + return self._make_query(cls=c.InAttributeCondition, value=item) def contains(self, item: str) -> AttributeQuery: """ Check whether an attribute, such as a search name, contains some string """ - return self._make_query( - cls=c.ContainsAttributeCondition, - value=item - ) + return self._make_query(cls=c.ContainsAttributeCondition, value=item) class BooleanAttribute(Attribute, AttributeQuery): def __init__(self, attribute): super().__init__(attribute) - super(AttributeQuery, self).__init__( - c.AttributeCondition( - attribute - ) - ) + super(AttributeQuery, self).__init__(c.AttributeCondition(attribute)) def __hash__(self): return hash(str(self)) @@ -95,9 +73,7 @@ def __hash__(self): class ChildQuery(AttributeQuery): def __init__(self, predicate: AbstractQuery): - super().__init__( - predicate - ) + super().__init__(predicate) @property def condition(self): @@ -106,9 +82,7 @@ def condition(self): class BestFitQuery(ChildQuery): def __init__(self, predicate: AbstractQuery): - super().__init__( - predicate - ) + super().__init__(predicate) @property def fit_query(self) -> str: diff --git a/test_autofit/aggregator/test_child_analysis.py b/test_autofit/aggregator/test_child_analysis.py index 15e805d21..6d68e30b4 100644 --- a/test_autofit/aggregator/test_child_analysis.py +++ b/test_autofit/aggregator/test_child_analysis.py @@ -4,6 +4,7 @@ from autofit import SearchOutput from autofit.aggregator import Aggregator +import autofit as af @pytest.fixture(name="directory") @@ -33,3 +34,13 @@ def test_child_analysis_values(directory): aggregator = Aggregator(directory) assert list(aggregator.child_values("example")) == [["hello world", "hello world"]] + + +def test_database_aggregator(directory, session): + aggregator = af.Aggregator(session) + aggregator.add_directory(directory) + assert list(aggregator.child_values("example")) == [ + ["hello world", "hello world"], + [], + [], + ] From bd172dbe633f7249b809631f7e44805b230a137b Mon Sep 17 00:00:00 2001 From: Richard Date: Wed, 10 May 2023 15:31:03 +0100 Subject: [PATCH 221/226] top level only --- autofit/database/aggregator/aggregator.py | 37 ++++++++++++++++++- .../aggregator/test_child_analysis.py | 2 - 2 files changed, 36 insertions(+), 3 deletions(-) diff --git a/autofit/database/aggregator/aggregator.py b/autofit/database/aggregator/aggregator.py index ba53ad329..9c57e5444 100644 --- a/autofit/database/aggregator/aggregator.py +++ b/autofit/database/aggregator/aggregator.py @@ -158,6 +158,7 @@ def __init__( offset=0, limit=None, order_bys=None, + top_level_only=True, ): """ Query results from an intermediary SQLite database. @@ -169,6 +170,17 @@ def __init__( session A session for communicating with the database. filename + The path to the database file. If None, the database is in memory. + predicate + A predicate to filter the results by. + offset + The number of results to skip + limit + The maximum number of results to return + order_bys + A list of attributes to order the results by + top_level_only + If True, only return the top level fits """ self.session = session self.filename = filename @@ -177,6 +189,7 @@ def __init__( self._offset = offset self._limit = limit self._order_bys = order_bys or list() + self._top_level_only = top_level_only def order_by(self, item: Attribute, reverse=False) -> "Aggregator": """ @@ -364,7 +377,10 @@ def _fits_for_query(self, query: str) -> List[m.Fit]: attribute = sa.desc(attribute) query = query.order_by(attribute) - return query.offset(self._offset).limit(self._limit).all() + fits = query.offset(self._offset).limit(self._limit).all() + if self._top_level_only: + return [fit for fit in fits if fit.parent is None] + return fits def add_directory(self, directory: str, auto_commit=True): """ @@ -437,6 +453,25 @@ def grid_searches(self) -> "GridSearchAggregator": class GridSearchAggregator(Aggregator): + def __init__( + self, + session: sa.orm.Session, + filename: Optional[str] = None, + predicate: AbstractQuery = NullPredicate(), + offset=0, + limit=None, + order_bys=None, + ): + super().__init__( + session=session, + filename=filename, + predicate=predicate, + offset=offset, + limit=limit, + order_bys=order_bys, + top_level_only=False, + ) + def best_fits(self) -> "GridSearchAggregator": """ The best fit from each of the grid searches diff --git a/test_autofit/aggregator/test_child_analysis.py b/test_autofit/aggregator/test_child_analysis.py index 6d68e30b4..43324edad 100644 --- a/test_autofit/aggregator/test_child_analysis.py +++ b/test_autofit/aggregator/test_child_analysis.py @@ -41,6 +41,4 @@ def test_database_aggregator(directory, session): aggregator.add_directory(directory) assert list(aggregator.child_values("example")) == [ ["hello world", "hello world"], - [], - [], ] From f0dddefa4778555e30d629590ae17ad7858e352d Mon Sep 17 00:00:00 2001 From: Richard Date: Wed, 10 May 2023 15:37:19 +0100 Subject: [PATCH 222/226] top level only --- autofit/database/aggregator/aggregator.py | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/autofit/database/aggregator/aggregator.py b/autofit/database/aggregator/aggregator.py index 9c57e5444..d8bb446b2 100644 --- a/autofit/database/aggregator/aggregator.py +++ b/autofit/database/aggregator/aggregator.py @@ -411,7 +411,12 @@ def add_directory(self, directory: str, auto_commit=True): self.session.commit() @classmethod - def from_database(cls, filename: str, completed_only: bool = False) -> "Aggregator": + def from_database( + cls, + filename: str, + completed_only: bool = False, + top_level_only: bool = True, + ) -> "Aggregator": """ Create an instance from a sqlite database file. @@ -420,8 +425,11 @@ def from_database(cls, filename: str, completed_only: bool = False) -> "Aggregat Parameters ---------- completed_only + If True only completed fits are returned filename The name of the database file. + top_level_only + If True only top level fits are returned Returns ------- @@ -430,7 +438,7 @@ def from_database(cls, filename: str, completed_only: bool = False) -> "Aggregat from autofit.database import open_database session = open_database(str(filename)) - aggregator = Aggregator(session, filename) + aggregator = Aggregator(session, filename, top_level_only=top_level_only) if completed_only: return aggregator(aggregator.search.is_complete) return aggregator From a64b168c160036b05330fbcde66446c186ccf09d Mon Sep 17 00:00:00 2001 From: Richard Date: Wed, 10 May 2023 15:55:31 +0100 Subject: [PATCH 223/226] fixed scraping --- autofit/database/aggregator/aggregator.py | 5 +- .../grid/test_paths/test_indicators.py | 166 +++++------------- 2 files changed, 44 insertions(+), 127 deletions(-) diff --git a/autofit/database/aggregator/aggregator.py b/autofit/database/aggregator/aggregator.py index d8bb446b2..fa2788fe9 100644 --- a/autofit/database/aggregator/aggregator.py +++ b/autofit/database/aggregator/aggregator.py @@ -327,6 +327,7 @@ def _new_with( "filename": self.filename, "predicate": self._predicate, "order_bys": self._order_bys, + "top_level_only": self._top_level_only, **kwargs, } type_ = type_ or type(self) @@ -456,6 +457,7 @@ def grid_searches(self) -> "GridSearchAggregator": type_=GridSearchAggregator, predicate=self._predicate & self.search.is_grid_search, order_bys=[Attribute("id")], + top_level_only=False, ), ) @@ -469,6 +471,7 @@ def __init__( offset=0, limit=None, order_bys=None, + top_level_only=False, ): super().__init__( session=session, @@ -477,7 +480,7 @@ def __init__( offset=offset, limit=limit, order_bys=order_bys, - top_level_only=False, + top_level_only=top_level_only, ) def best_fits(self) -> "GridSearchAggregator": diff --git a/test_autofit/non_linear/grid/test_paths/test_indicators.py b/test_autofit/non_linear/grid/test_paths/test_indicators.py index f989049ca..f998d5058 100644 --- a/test_autofit/non_linear/grid/test_paths/test_indicators.py +++ b/test_autofit/non_linear/grid/test_paths/test_indicators.py @@ -6,46 +6,25 @@ from autoconf.conf import output_path_for_test from autofit.database.aggregator.scrape import Scraper -output_directory = Path( - __file__ -).parent / "output" +output_directory = Path(__file__).parent / "output" -@pytest.fixture( - name="parent_search" -) -@output_path_for_test( - output_directory -) +@pytest.fixture(name="parent_search") +@output_path_for_test(output_directory) def make_parent_search(model_gaussian_x1): - search = af.m.MockSearch( - name="parent" - ) + search = af.m.MockSearch(name="parent") search.paths.model = model_gaussian_x1 return search -@pytest.fixture( - name="database_parent_search" -) -def make_database_parent_search( - session -): - return af.DynestyStatic( - session=session - ) +@pytest.fixture(name="database_parent_search") +def make_database_parent_search(session): + return af.DynestyStatic(session=session) -def _make_grid_search( - mapper, - parent_search, - session=None -): +def _make_grid_search(mapper, parent_search, session=None): search = af.SearchGridSearch( - search=af.m.MockOptimizer( - session=session - ), - number_of_steps=2 + search=af.m.MockOptimizer(session=session), number_of_steps=2 ) search.fit( model=mapper, @@ -54,150 +33,85 @@ def _make_grid_search( mapper.component.one_tuple.one_tuple_0, mapper.component.one_tuple.one_tuple_1, ], - parent=parent_search + parent=parent_search, ) return search -@pytest.fixture( - name="grid_search" -) -def make_grid_search( - mapper, - parent_search -): - search = _make_grid_search( - mapper, - parent_search - ) +@pytest.fixture(name="grid_search") +def make_grid_search(mapper, parent_search): + search = _make_grid_search(mapper, parent_search) search.paths.save_all() return search -@pytest.fixture( - name="database_grid_search" -) -def make_database_grid_search( - mapper, - database_parent_search, - session -): - return _make_grid_search( - mapper, - database_parent_search, - session=session - ) +@pytest.fixture(name="database_grid_search") +def make_database_grid_search(mapper, database_parent_search, session): + return _make_grid_search(mapper, database_parent_search, session=session) class TestMiscombination: - def test_directory_for_database( - self, - parent_search, - session, - mapper - ): + def test_directory_for_database(self, parent_search, session, mapper): with pytest.raises(TypeError): - _make_grid_search( - mapper, - parent_search, - session - ) + _make_grid_search(mapper, parent_search, session) class TestDirectory: - def test_parent_search( - self, - grid_search, - parent_search - ): + def test_parent_search(self, grid_search, parent_search): grid_paths = grid_search.paths parent_paths = parent_search.paths assert parent_paths is grid_paths.parent - with open( - grid_paths._parent_identifier_path - ) as f: + with open(grid_paths._parent_identifier_path) as f: assert f.read() == parent_paths.identifier - def test_is_grid_search( - self, - grid_search - ): + def test_is_grid_search(self, grid_search): assert grid_search.paths.is_grid_search -@output_path_for_test( - output_directory -) -def test_scrape( - grid_search, - parent_search, - model_gaussian_x1, - session -): +@output_path_for_test(output_directory) +def test_scrape(grid_search, parent_search, model_gaussian_x1, session): grid_search.fit( model=model_gaussian_x1, analysis=af.m.MockAnalysis(), parent=parent_search, - grid_priors=[model_gaussian_x1.centre] - ) - parent_search.fit( - model=model_gaussian_x1, - analysis=af.m.MockAnalysis() + grid_priors=[model_gaussian_x1.centre], ) + parent_search.fit(model=model_gaussian_x1, analysis=af.m.MockAnalysis()) parent_search.paths.save_all() - Scraper( - directory=output_directory, - session=session - ).scrape() + Scraper(directory=output_directory, session=session).scrape() - aggregator = af.Aggregator(session) - assert list(aggregator.query( - aggregator.search.id == grid_search.paths.identifier - ))[0].parent.id == parent_search.paths.identifier + aggregator = af.Aggregator(session, top_level_only=False) + assert ( + list(aggregator.query(aggregator.search.id == grid_search.paths.identifier))[ + 0 + ].parent.id + == parent_search.paths.identifier + ) assert len(aggregator.values("max_log_likelihood")) > 0 assert list(aggregator.grid_searches())[0].is_complete -@output_path_for_test( - output_directory -) -def test_incomplete( - grid_search, - session -): +@output_path_for_test(output_directory) +def test_incomplete(grid_search, session): grid_search.save_metadata() - Scraper( - directory=output_directory, - session=session - ).scrape() + Scraper(directory=output_directory, session=session).scrape() session.commit() - aggregator = af.Aggregator( - session - ) - aggregator = aggregator( - aggregator.search.is_complete - ) + aggregator = af.Aggregator(session) + aggregator = aggregator(aggregator.search.is_complete) assert len(aggregator) == 0 class TestDatabase: - def test_parent_search( - self, - database_grid_search, - database_parent_search - ): + def test_parent_search(self, database_grid_search, database_parent_search): parent_paths = database_parent_search.paths assert parent_paths is database_grid_search.paths.parent assert database_grid_search.paths.fit.parent_id == parent_paths.identifier - def test_is_grid_search( - self, - database_grid_search - ): + def test_is_grid_search(self, database_grid_search): assert database_grid_search.paths.is_grid_search From 9e71196db24e1ec88fcf8e360768904b4b931054 Mon Sep 17 00:00:00 2001 From: James Nightingale Date: Wed, 10 May 2023 18:38:51 +0100 Subject: [PATCH 224/226] visualization config --- autofit/config/general.yaml | 1 + autofit/non_linear/samples/pdf.py | 4 ++++ 2 files changed, 5 insertions(+) diff --git a/autofit/config/general.yaml b/autofit/config/general.yaml index 6440c29f7..e2c9e5982 100644 --- a/autofit/config/general.yaml +++ b/autofit/config/general.yaml @@ -10,6 +10,7 @@ model: ignore_prior_limits: false # If ``True`` the limits applied to priors will be ignored, where limits set upper / lower limits. This stops PriorLimitException's from being raised. output: force_pickle_overwrite: false # If True pickle files output by a search (e.g. samples.pickle) are recreated when a new model-fit is performed. + force_visualize_overwrite: false # If True, visualization images output by a search (e.g. subplots of the fit) are recreated when a new model-fit is performed. info_whitespace_length: 80 # Length of whitespace between the parameter names and values in the model.info / result.info log_level: INFO # The level of information output by logging. log_to_file: false # If True, outputs the non-linear search log to a file (and not printed to screen). diff --git a/autofit/non_linear/samples/pdf.py b/autofit/non_linear/samples/pdf.py index fd3f6f851..095f10874 100644 --- a/autofit/non_linear/samples/pdf.py +++ b/autofit/non_linear/samples/pdf.py @@ -322,6 +322,10 @@ def draw_randomly_via_pdf(self, as_instance: bool = True) -> Union[List, ModelIn The draw is weighted by the sample weights to ensure that the sample is drawn from the PDF (which is important for non-linear searches like nested sampling). """ + + print(self.sample_list) + print(self.weight_list) + sample_index = np.random.choice(a=range(len(self.sample_list)), p=self.weight_list) return self.parameter_lists[sample_index][:] From e1d0e1867a8c6416a9181b7c4233221ded7d2151 Mon Sep 17 00:00:00 2001 From: Richard Date: Mon, 15 May 2023 08:57:05 +0100 Subject: [PATCH 225/226] support child_values api --- autofit/aggregator/aggregator.py | 5 +---- autofit/aggregator/search_output.py | 6 ++++++ autofit/database/aggregator/aggregator.py | 2 +- autofit/database/model/fit.py | 11 ++++++++++- test_autofit/aggregator/test_child_analysis.py | 13 ++++++++++++- 5 files changed, 30 insertions(+), 7 deletions(-) diff --git a/autofit/aggregator/aggregator.py b/autofit/aggregator/aggregator.py index e0225cf64..885528735 100755 --- a/autofit/aggregator/aggregator.py +++ b/autofit/aggregator/aggregator.py @@ -185,10 +185,7 @@ def child_values(self, name: str) -> Iterator[List]: ------- A generator of values for the attribute """ - return ( - [getattr(analysis, name) for analysis in phase.child_analyses] - for phase in self.search_outputs - ) + return (phase.child_values(name) for phase in self.search_outputs) def map(self, func): """ diff --git a/autofit/aggregator/search_output.py b/autofit/aggregator/search_output.py index cea2c9a8d..860fb2234 100644 --- a/autofit/aggregator/search_output.py +++ b/autofit/aggregator/search_output.py @@ -120,6 +120,12 @@ def search(self) -> abstract_search.NonLinearSearch: logging.exception(e) return self.__search + def child_values(self, key): + """ + Get the values of a given key for all children + """ + return [getattr(child, key) for child in self.child_analyses] + @property def model(self): """ diff --git a/autofit/database/aggregator/aggregator.py b/autofit/database/aggregator/aggregator.py index fa2788fe9..814f9be7f 100644 --- a/autofit/database/aggregator/aggregator.py +++ b/autofit/database/aggregator/aggregator.py @@ -135,7 +135,7 @@ def child_values(self, name: str) -> List[list]: ------- A list of objects, one for each fit """ - return [[analysis[name] for analysis in fit.children] for fit in self] + return [fit.child_values(name) for fit in self] def __iter__(self): return iter(self.fits) diff --git a/autofit/database/model/fit.py b/autofit/database/model/fit.py index 1951a92ac..519f3f8d0 100644 --- a/autofit/database/model/fit.py +++ b/autofit/database/model/fit.py @@ -139,7 +139,10 @@ def _get_named_instance(self, item: str) -> "NamedInstance": class Fit(Base): __tablename__ = "fit" - id = sa.Column(sa.String, primary_key=True,) + id = sa.Column( + sa.String, + primary_key=True, + ) is_complete = sa.Column(sa.Boolean) _named_instances: List[NamedInstance] = sa.orm.relationship("NamedInstance") @@ -173,6 +176,12 @@ def __init__(self, **kwargs): "Fit", backref=sa.orm.backref("parent", remote_side=[id]) ) + def child_values(self, key): + """ + Get the values of a given key for all children + """ + return [child[key] for child in self.children] + @property def best_fit(self) -> "Fit": """ diff --git a/test_autofit/aggregator/test_child_analysis.py b/test_autofit/aggregator/test_child_analysis.py index 43324edad..574363ca4 100644 --- a/test_autofit/aggregator/test_child_analysis.py +++ b/test_autofit/aggregator/test_child_analysis.py @@ -34,11 +34,22 @@ def test_child_analysis_values(directory): aggregator = Aggregator(directory) assert list(aggregator.child_values("example")) == [["hello world", "hello world"]] + assert list(aggregator)[0].child_values("example") == ["hello world", "hello world"] -def test_database_aggregator(directory, session): +@pytest.fixture(name="aggregator") +def make_aggregator(session, directory): aggregator = af.Aggregator(session) aggregator.add_directory(directory) + return aggregator + + +def test_database_aggregator(aggregator): assert list(aggregator.child_values("example")) == [ ["hello world", "hello world"], ] + + +def test_child_values(aggregator): + fit, *_ = list(aggregator) + assert fit.child_values("example") == ["hello world", "hello world"] From 822fed546856b6a7c3f7eb466bdf376b32217882 Mon Sep 17 00:00:00 2001 From: Richard Date: Mon, 15 May 2023 13:45:50 +0100 Subject: [PATCH 226/226] consistent variable names --- autofit/aggregator/search_output.py | 4 ++-- autofit/database/model/fit.py | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/autofit/aggregator/search_output.py b/autofit/aggregator/search_output.py index 860fb2234..c781c4d16 100644 --- a/autofit/aggregator/search_output.py +++ b/autofit/aggregator/search_output.py @@ -120,11 +120,11 @@ def search(self) -> abstract_search.NonLinearSearch: logging.exception(e) return self.__search - def child_values(self, key): + def child_values(self, name): """ Get the values of a given key for all children """ - return [getattr(child, key) for child in self.child_analyses] + return [getattr(child, name) for child in self.child_analyses] @property def model(self): diff --git a/autofit/database/model/fit.py b/autofit/database/model/fit.py index 519f3f8d0..286e40f3d 100644 --- a/autofit/database/model/fit.py +++ b/autofit/database/model/fit.py @@ -176,11 +176,11 @@ def __init__(self, **kwargs): "Fit", backref=sa.orm.backref("parent", remote_side=[id]) ) - def child_values(self, key): + def child_values(self, name): """ Get the values of a given key for all children """ - return [child[key] for child in self.children] + return [child[name] for child in self.children] @property def best_fit(self) -> "Fit":