From ad3070901cc661035df48cf509ecd3502cb6e2f4 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 23 Nov 2024 14:44:47 -0500 Subject: [PATCH 01/43] build(deps): bump pypa/cibuildwheel from 2.21 to 2.22 (#4408) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [pypa/cibuildwheel](https://github.com/pypa/cibuildwheel) from 2.21 to 2.22.
Release notes

Sourced from pypa/cibuildwheel's releases.

Version 2.22.0

v2.21.3

v2.21.2

Note: the default manylinux image is scheduled to change from manylinux2014 to manylinux_2_28 in a cibuildwheel release on or after 6th May 2025 - you can set the value now to avoid getting upgraded if you want. (#1992)

Version 2.21.1

Changelog

Sourced from pypa/cibuildwheel's changelog.


title: Changelog

Changelog

v2.22.0

23 November 2024

v2.21.3

9 October 2024

v2.21.2

2 October 2024

Note: the default manylinux image is scheduled to change from manylinux2014 to manylinux_2_28 in a cibuildwheel release on or after 6th May 2025 - you can set the value now to avoid getting upgraded if you want. (#1992)

v2.21.1

16 September 2024

... (truncated)

Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=pypa/cibuildwheel&package-manager=github_actions&previous-version=2.21&new-version=2.22)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/build_wheel.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build_wheel.yml b/.github/workflows/build_wheel.yml index db5745e241..8788bc929b 100644 --- a/.github/workflows/build_wheel.yml +++ b/.github/workflows/build_wheel.yml @@ -86,7 +86,7 @@ jobs: rm -rf .git if: matrix.dp_pkg_name == 'deepmd-kit-cu11' - name: Build wheels - uses: pypa/cibuildwheel@v2.21 + uses: pypa/cibuildwheel@v2.22 env: CIBW_BUILD_VERBOSITY: 1 CIBW_ARCHS: all From e7ad8dca6ea9d0fcaf83fcee231a1f1358443169 Mon Sep 17 00:00:00 2001 From: Duo <50307526+iProzd@users.noreply.github.com> Date: Tue, 26 Nov 2024 10:05:59 +0800 Subject: [PATCH 02/43] chore(pt): update multitask example (#4419) ## Summary by CodeRabbit - **New Features** - Updated multi-task model configuration with a new descriptor for enhanced representation learning. - Introduced additional parameters for model initialization and attention mechanisms. - **Bug Fixes** - Replaced outdated descriptor references in model configurations to ensure compatibility with new settings. --- .../pytorch_example/input_torch.json | 69 ++++++++++++++----- 1 file changed, 50 insertions(+), 19 deletions(-) diff --git a/examples/water_multi_task/pytorch_example/input_torch.json b/examples/water_multi_task/pytorch_example/input_torch.json index 04d848538d..f0d9a67ea5 100644 --- a/examples/water_multi_task/pytorch_example/input_torch.json +++ b/examples/water_multi_task/pytorch_example/input_torch.json @@ -6,23 +6,54 @@ "O", "H" ], - "sea_descriptor_1": { - "type": "se_e2_a", - "sel": [ - 46, - 92 - ], - "rcut_smth": 0.50, - "rcut": 6.00, - "neuron": [ - 25, - 50, - 100 - ], - "resnet_dt": false, - "axis_neuron": 16, - "type_one_side": true, - "seed": 1, + "dpa2_descriptor": { + "type": "dpa2", + "repinit": { + "tebd_dim": 8, + "rcut": 6.0, + "rcut_smth": 0.5, + "nsel": 120, + "neuron": [ + 25, + 50, + 100 + ], + "axis_neuron": 12, + "activation_function": "tanh", + "three_body_sel": 48, + "three_body_rcut": 4.0, + "three_body_rcut_smth": 3.5, + "use_three_body": true + }, + "repformer": { + "rcut": 4.0, + "rcut_smth": 3.5, + "nsel": 48, + "nlayers": 6, + "g1_dim": 128, + "g2_dim": 32, + "attn2_hidden": 32, + "attn2_nhead": 4, + "attn1_hidden": 128, + "attn1_nhead": 4, + "axis_neuron": 4, + "update_h2": false, + "update_g1_has_conv": true, + "update_g1_has_grrg": true, + "update_g1_has_drrd": true, + "update_g1_has_attn": false, + "update_g2_has_g1g1": false, + "update_g2_has_attn": true, + "update_style": "res_residual", + "update_residual": 0.01, + "update_residual_init": "norm", + "attn2_has_gate": true, + "use_sqrt_nnei": true, + "g1_out_conv": true, + "g1_out_mlp": true + }, + "precision": "float64", + "add_tebd_to_repinit_out": false, "_comment": " that's all" }, "_comment": "that's all" @@ -30,7 +61,7 @@ "model_dict": { "water_1": { "type_map": "type_map_all", - "descriptor": "sea_descriptor_1", + "descriptor": "dpa2_descriptor", "fitting_net": { "neuron": [ 240, @@ -44,7 +75,7 @@ }, "water_2": { "type_map": "type_map_all", - "descriptor": "sea_descriptor_1", + "descriptor": "dpa2_descriptor", "fitting_net": { "neuron": [ 240, From f343a3b212edab5525502e0261f3068c0b6fb1f6 Mon Sep 17 00:00:00 2001 From: Anyang Peng <137014849+anyangml@users.noreply.github.com> Date: Wed, 27 Nov 2024 02:14:18 +0800 Subject: [PATCH 03/43] Fix: add model_def_script to ZBL (#4423) ## Summary by CodeRabbit - **New Features** - Enhanced model definition handling for improved encapsulation and consistency across different model types. - **Bug Fixes** - Ensured that model definition scripts are correctly set to a JSON string representation for all model instances. --- deepmd/pt/model/model/__init__.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/deepmd/pt/model/model/__init__.py b/deepmd/pt/model/model/__init__.py index a46909622b..f2e03fb99e 100644 --- a/deepmd/pt/model/model/__init__.py +++ b/deepmd/pt/model/model/__init__.py @@ -196,7 +196,7 @@ def get_zbl_model(model_params): rmax = model_params["sw_rmax"] atom_exclude_types = model_params.get("atom_exclude_types", []) pair_exclude_types = model_params.get("pair_exclude_types", []) - return DPZBLModel( + model = DPZBLModel( dp_model, pt_model, rmin, @@ -205,6 +205,8 @@ def get_zbl_model(model_params): atom_exclude_types=atom_exclude_types, pair_exclude_types=pair_exclude_types, ) + model.model_def_script = json.dumps(model_params) + return model def _can_be_converted_to_float(value) -> Optional[bool]: From 3cdf407b0b811ae6ee2d6a1168113ba15de04b8d Mon Sep 17 00:00:00 2001 From: Anyang Peng <137014849+anyangml@users.noreply.github.com> Date: Wed, 27 Nov 2024 15:02:31 +0800 Subject: [PATCH 04/43] Feat: add pairtab compression (#4432) ## Summary by CodeRabbit - **New Features** - Introduced a new method `enable_compression` in the PairTabAtomicModel class, indicating that the model does not support compression settings. - **Documentation** - Added docstring for the `enable_compression` method to clarify its purpose. --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- deepmd/dpmodel/atomic_model/pairtab_atomic_model.py | 11 +++++++++++ deepmd/pt/model/atomic_model/pairtab_atomic_model.py | 11 +++++++++++ 2 files changed, 22 insertions(+) diff --git a/deepmd/dpmodel/atomic_model/pairtab_atomic_model.py b/deepmd/dpmodel/atomic_model/pairtab_atomic_model.py index aefdbf7f1c..a4bffe508d 100644 --- a/deepmd/dpmodel/atomic_model/pairtab_atomic_model.py +++ b/deepmd/dpmodel/atomic_model/pairtab_atomic_model.py @@ -425,3 +425,14 @@ def is_aparam_nall(self) -> bool: If False, the shape is (nframes, nloc, ndim). """ return False + + def enable_compression( + self, + min_nbor_dist: float, + table_extrapolate: float = 5, + table_stride_1: float = 0.01, + table_stride_2: float = 0.1, + check_frequency: int = -1, + ) -> None: + """Pairtab model does not support compression.""" + pass diff --git a/deepmd/pt/model/atomic_model/pairtab_atomic_model.py b/deepmd/pt/model/atomic_model/pairtab_atomic_model.py index c535808d83..0d3b2c0c41 100644 --- a/deepmd/pt/model/atomic_model/pairtab_atomic_model.py +++ b/deepmd/pt/model/atomic_model/pairtab_atomic_model.py @@ -484,3 +484,14 @@ def is_aparam_nall(self) -> bool: If False, the shape is (nframes, nloc, ndim). """ return False + + def enable_compression( + self, + min_nbor_dist: float, + table_extrapolate: float = 5, + table_stride_1: float = 0.01, + table_stride_2: float = 0.1, + check_frequency: int = -1, + ) -> None: + """Pairtab model does not support compression.""" + pass From 4a45fe5d37097fcdf5d0e33d23d8b2674984bdc9 Mon Sep 17 00:00:00 2001 From: HydrogenSulfate <490868991@qq.com> Date: Wed, 27 Nov 2024 16:20:48 +0800 Subject: [PATCH 05/43] pd: support paddle backend and `water/se_e2_a` (#4302) Split into several pull requests. 1. Add core modules of paddle backend(`deepmd.pd.*`) and related backend module unitests. 2. Support training/testing/freeze(C++ inference will be supported in subsequent pull request) for example water/se_e2_a. 3. Add se_e2_a related uinttests Related PR to be merged: - [x] ## Accuracy test ### pytorch ![image](https://github.com/user-attachments/assets/cea8f313-4a57-4575-b55a-b6cf577654a2) ### paddle: ``` log deepmd.utils.batch_size Adjust batch size from 1024 to 2048 deepmd.utils.batch_size Adjust batch size from 2048 to 4096 deepmd.entrypoints.test # number of test data : 30 , deepmd.entrypoints.test Energy MAE : 7.467160e-02 eV deepmd.entrypoints.test Energy RMSE : 8.981154e-02 eV deepmd.entrypoints.test Energy MAE/Natoms : 3.889146e-04 eV deepmd.entrypoints.test Energy RMSE/Natoms : 4.677685e-04 eV deepmd.entrypoints.test Force MAE : 4.495974e-02 eV/A deepmd.entrypoints.test Force RMSE : 5.883696e-02 eV/A deepmd.entrypoints.test Virial MAE : 4.683873e+00 eV deepmd.entrypoints.test Virial RMSE : 6.298489e+00 eV deepmd.entrypoints.test Virial MAE/Natoms : 2.439517e-02 eV deepmd.entrypoints.test Virial RMSE/Natoms : 3.280463e-02 eV ``` ## Summary by CodeRabbit ## Release Notes - **New Features** - Introduced support for PaddlePaddle in the DeePMD framework, enhancing model training and evaluation capabilities. - Added new backend options and configuration files for multitask models. - Implemented new classes and methods for handling Paddle-specific functionalities, including descriptor calculations and model evaluations. - Enhanced the command-line interface to include Paddle as a backend option. - Expanded the functionality for managing Paddle dependencies and configurations in the testing framework. - **Bug Fixes** - Improved error handling and robustness in various components across the framework. - **Tests** - Expanded the test suite to include Paddle-specific tests, ensuring consistency and reliability across different backends. - Introduced unit tests for new functionalities related to Paddle, including model evaluations and descriptor calculations. - Added tests to validate force gradient calculations and smoothness properties in models. - Implemented tests for neighbor statistics and region transformations, ensuring accuracy in calculations. - **Documentation** - Updated documentation across multiple modules to reflect new features and usage instructions. --------- Signed-off-by: HydrogenSulfate <490868991@qq.com> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .github/workflows/test_cuda.yml | 1 + .github/workflows/test_python.yml | 1 + backend/find_paddle.py | 133 ++ deepmd/backend/paddle.py | 124 ++ deepmd/dpmodel/model/make_model.py | 2 +- deepmd/main.py | 3 +- deepmd/pd/__init__.py | 11 + deepmd/pd/entrypoints/__init__.py | 1 + deepmd/pd/entrypoints/main.py | 543 ++++++++ deepmd/pd/infer/__init__.py | 1 + deepmd/pd/infer/deep_eval.py | 537 +++++++ deepmd/pd/infer/inference.py | 64 + deepmd/pd/loss/__init__.py | 12 + deepmd/pd/loss/ener.py | 428 ++++++ deepmd/pd/loss/loss.py | 63 + deepmd/pd/model/__init__.py | 1 + deepmd/pd/model/atomic_model/__init__.py | 31 + .../model/atomic_model/base_atomic_model.py | 579 ++++++++ .../pd/model/atomic_model/dp_atomic_model.py | 333 +++++ .../model/atomic_model/energy_atomic_model.py | 17 + deepmd/pd/model/descriptor/__init__.py | 22 + deepmd/pd/model/descriptor/base_descriptor.py | 8 + deepmd/pd/model/descriptor/descriptor.py | 219 +++ deepmd/pd/model/descriptor/env_mat.py | 87 ++ deepmd/pd/model/descriptor/se_a.py | 715 ++++++++++ deepmd/pd/model/model/__init__.py | 144 ++ deepmd/pd/model/model/dp_model.py | 64 + deepmd/pd/model/model/ener_model.py | 135 ++ deepmd/pd/model/model/frozen.py | 182 +++ deepmd/pd/model/model/make_model.py | 614 ++++++++ deepmd/pd/model/model/model.py | 55 + deepmd/pd/model/model/transform_output.py | 262 ++++ deepmd/pd/model/network/__init__.py | 1 + deepmd/pd/model/network/init.py | 458 ++++++ deepmd/pd/model/network/mlp.py | 328 +++++ deepmd/pd/model/network/network.py | 325 +++++ deepmd/pd/model/task/__init__.py | 16 + deepmd/pd/model/task/base_fitting.py | 8 + deepmd/pd/model/task/ener.py | 85 ++ deepmd/pd/model/task/fitting.py | 506 +++++++ deepmd/pd/model/task/invar_fitting.py | 183 +++ deepmd/pd/model/task/task.py | 1 + deepmd/pd/train/__init__.py | 1 + deepmd/pd/train/training.py | 1240 +++++++++++++++++ deepmd/pd/train/wrapper.py | 213 +++ deepmd/pd/utils/__init__.py | 11 + deepmd/pd/utils/auto_batch_size.py | 60 + deepmd/pd/utils/dataloader.py | 339 +++++ deepmd/pd/utils/dataset.py | 57 + deepmd/pd/utils/decomp.py | 247 ++++ deepmd/pd/utils/dp_random.py | 14 + deepmd/pd/utils/env.py | 107 ++ deepmd/pd/utils/env_mat_stat.py | 235 ++++ deepmd/pd/utils/exclude_mask.py | 164 +++ deepmd/pd/utils/finetune.py | 200 +++ deepmd/pd/utils/multi_task.py | 162 +++ deepmd/pd/utils/neighbor_stat.py | 197 +++ deepmd/pd/utils/nlist.py | 535 +++++++ deepmd/pd/utils/preprocess.py | 18 + deepmd/pd/utils/region.py | 119 ++ deepmd/pd/utils/serialization.py | 55 + deepmd/pd/utils/stat.py | 604 ++++++++ deepmd/pd/utils/update_sel.py | 14 + deepmd/pd/utils/utils.py | 179 +++ deepmd/pt/model/network/network.py | 24 +- deepmd/utils/batch_size.py | 20 +- deepmd/utils/data.py | 15 + pyproject.toml | 14 +- source/tests/consistent/common.py | 70 +- source/tests/consistent/descriptor/common.py | 32 + .../consistent/descriptor/test_se_e2_a.py | 26 + source/tests/consistent/fitting/common.py | 3 + source/tests/consistent/fitting/test_ener.py | 52 + source/tests/consistent/model/common.py | 15 + source/tests/consistent/model/test_ener.py | 56 + source/tests/consistent/test_activation.py | 15 + source/tests/consistent/test_neighbor_stat.py | 5 + .../tests/consistent/test_type_embedding.py | 15 + source/tests/pd/__init__.py | 6 + source/tests/pd/common.py | 263 ++++ source/tests/pd/conftest.py | 9 + source/tests/pd/model/__init__.py | 1 + source/tests/pd/model/test_autodiff.py | 263 ++++ source/tests/pd/model/test_descriptor.py | 195 +++ source/tests/pd/model/test_dp_atomic_model.py | 235 ++++ source/tests/pd/model/test_dp_model.py | 633 +++++++++ source/tests/pd/model/test_embedding_net.py | 217 +++ source/tests/pd/model/test_ener_fitting.py | 150 ++ source/tests/pd/model/test_env_mat.py | 187 +++ source/tests/pd/model/test_exclusion_mask.py | 70 + source/tests/pd/model/test_fitting_net.py | 148 ++ source/tests/pd/model/test_force_grad.py | 111 ++ source/tests/pd/model/test_forward_lower.py | 208 +++ source/tests/pd/model/test_get_model.py | 113 ++ source/tests/pd/model/test_jit.py | 83 ++ source/tests/pd/model/test_mlp.py | 283 ++++ source/tests/pd/model/test_model.py | 433 ++++++ source/tests/pd/model/test_nlist.py | 304 ++++ source/tests/pd/model/test_null_input.py | 94 ++ source/tests/pd/model/test_permutation.py | 489 +++++++ source/tests/pd/model/test_region.py | 62 + source/tests/pd/model/test_rot.py | 234 ++++ source/tests/pd/model/test_rotation.py | 113 ++ .../tests/pd/model/test_saveload_se_e2_a.py | 138 ++ source/tests/pd/model/test_se_e2_a.py | 137 ++ source/tests/pd/model/test_smooth.py | 172 +++ source/tests/pd/model/test_trans.py | 168 +++ .../model/water/data/data_0/set.000/box.npy | Bin 0 -> 3008 bytes .../model/water/data/data_0/set.000/coord.npy | Bin 0 -> 184448 bytes .../water/data/data_0/set.000/energy.npy | Bin 0 -> 448 bytes .../model/water/data/data_0/set.000/force.npy | Bin 0 -> 184448 bytes .../tests/pd/model/water/data/data_0/type.raw | 192 +++ .../pd/model/water/data/data_0/type_map.raw | 2 + .../model/water/data/single/set.000/box.npy | Bin 0 -> 164 bytes .../model/water/data/single/set.000/coord.npy | Bin 0 -> 2432 bytes .../water/data/single/set.000/energy.npy | Bin 0 -> 132 bytes .../model/water/data/single/set.000/force.npy | Bin 0 -> 2432 bytes .../tests/pd/model/water/data/single/type.raw | 192 +++ .../pd/model/water/data/single/type_map.raw | 2 + source/tests/pd/model/water/multitask.json | 140 ++ source/tests/pd/model/water/se_atten.json | 83 ++ source/tests/pd/model/water/se_e2_a.json | 77 + source/tests/pd/test_auto_batch_size.py | 38 + source/tests/pd/test_change_bias.py | 150 ++ source/tests/pd/test_decomp.py | 131 ++ source/tests/pd/test_dp_show.py | 219 +++ source/tests/pd/test_finetune.py | 379 +++++ source/tests/pd/test_loss.py | 585 ++++++++ source/tests/pd/test_lr.py | 106 ++ source/tests/pd/test_multitask.py | 224 +++ source/tests/pd/test_neighbor_stat.py | 69 + source/tests/pd/test_sampler.py | 114 ++ source/tests/pd/test_training.py | 176 +++ source/tests/pd/test_update_sel.py | 194 +++ source/tests/pd/test_utils.py | 35 + source/tests/pd/water | 1 + 136 files changed, 21039 insertions(+), 25 deletions(-) create mode 100644 backend/find_paddle.py create mode 100644 deepmd/backend/paddle.py create mode 100644 deepmd/pd/__init__.py create mode 100644 deepmd/pd/entrypoints/__init__.py create mode 100644 deepmd/pd/entrypoints/main.py create mode 100644 deepmd/pd/infer/__init__.py create mode 100644 deepmd/pd/infer/deep_eval.py create mode 100644 deepmd/pd/infer/inference.py create mode 100644 deepmd/pd/loss/__init__.py create mode 100644 deepmd/pd/loss/ener.py create mode 100644 deepmd/pd/loss/loss.py create mode 100644 deepmd/pd/model/__init__.py create mode 100644 deepmd/pd/model/atomic_model/__init__.py create mode 100644 deepmd/pd/model/atomic_model/base_atomic_model.py create mode 100644 deepmd/pd/model/atomic_model/dp_atomic_model.py create mode 100644 deepmd/pd/model/atomic_model/energy_atomic_model.py create mode 100644 deepmd/pd/model/descriptor/__init__.py create mode 100644 deepmd/pd/model/descriptor/base_descriptor.py create mode 100644 deepmd/pd/model/descriptor/descriptor.py create mode 100644 deepmd/pd/model/descriptor/env_mat.py create mode 100644 deepmd/pd/model/descriptor/se_a.py create mode 100644 deepmd/pd/model/model/__init__.py create mode 100644 deepmd/pd/model/model/dp_model.py create mode 100644 deepmd/pd/model/model/ener_model.py create mode 100644 deepmd/pd/model/model/frozen.py create mode 100644 deepmd/pd/model/model/make_model.py create mode 100644 deepmd/pd/model/model/model.py create mode 100644 deepmd/pd/model/model/transform_output.py create mode 100644 deepmd/pd/model/network/__init__.py create mode 100644 deepmd/pd/model/network/init.py create mode 100644 deepmd/pd/model/network/mlp.py create mode 100644 deepmd/pd/model/network/network.py create mode 100644 deepmd/pd/model/task/__init__.py create mode 100644 deepmd/pd/model/task/base_fitting.py create mode 100644 deepmd/pd/model/task/ener.py create mode 100644 deepmd/pd/model/task/fitting.py create mode 100644 deepmd/pd/model/task/invar_fitting.py create mode 100644 deepmd/pd/model/task/task.py create mode 100644 deepmd/pd/train/__init__.py create mode 100644 deepmd/pd/train/training.py create mode 100644 deepmd/pd/train/wrapper.py create mode 100644 deepmd/pd/utils/__init__.py create mode 100644 deepmd/pd/utils/auto_batch_size.py create mode 100644 deepmd/pd/utils/dataloader.py create mode 100644 deepmd/pd/utils/dataset.py create mode 100644 deepmd/pd/utils/decomp.py create mode 100644 deepmd/pd/utils/dp_random.py create mode 100644 deepmd/pd/utils/env.py create mode 100644 deepmd/pd/utils/env_mat_stat.py create mode 100644 deepmd/pd/utils/exclude_mask.py create mode 100644 deepmd/pd/utils/finetune.py create mode 100644 deepmd/pd/utils/multi_task.py create mode 100644 deepmd/pd/utils/neighbor_stat.py create mode 100644 deepmd/pd/utils/nlist.py create mode 100644 deepmd/pd/utils/preprocess.py create mode 100644 deepmd/pd/utils/region.py create mode 100644 deepmd/pd/utils/serialization.py create mode 100644 deepmd/pd/utils/stat.py create mode 100644 deepmd/pd/utils/update_sel.py create mode 100644 deepmd/pd/utils/utils.py create mode 100644 source/tests/pd/__init__.py create mode 100644 source/tests/pd/common.py create mode 100644 source/tests/pd/conftest.py create mode 100644 source/tests/pd/model/__init__.py create mode 100644 source/tests/pd/model/test_autodiff.py create mode 100644 source/tests/pd/model/test_descriptor.py create mode 100644 source/tests/pd/model/test_dp_atomic_model.py create mode 100644 source/tests/pd/model/test_dp_model.py create mode 100644 source/tests/pd/model/test_embedding_net.py create mode 100644 source/tests/pd/model/test_ener_fitting.py create mode 100644 source/tests/pd/model/test_env_mat.py create mode 100644 source/tests/pd/model/test_exclusion_mask.py create mode 100644 source/tests/pd/model/test_fitting_net.py create mode 100644 source/tests/pd/model/test_force_grad.py create mode 100644 source/tests/pd/model/test_forward_lower.py create mode 100644 source/tests/pd/model/test_get_model.py create mode 100644 source/tests/pd/model/test_jit.py create mode 100644 source/tests/pd/model/test_mlp.py create mode 100644 source/tests/pd/model/test_model.py create mode 100644 source/tests/pd/model/test_nlist.py create mode 100644 source/tests/pd/model/test_null_input.py create mode 100644 source/tests/pd/model/test_permutation.py create mode 100644 source/tests/pd/model/test_region.py create mode 100644 source/tests/pd/model/test_rot.py create mode 100644 source/tests/pd/model/test_rotation.py create mode 100644 source/tests/pd/model/test_saveload_se_e2_a.py create mode 100644 source/tests/pd/model/test_se_e2_a.py create mode 100644 source/tests/pd/model/test_smooth.py create mode 100644 source/tests/pd/model/test_trans.py create mode 100644 source/tests/pd/model/water/data/data_0/set.000/box.npy create mode 100644 source/tests/pd/model/water/data/data_0/set.000/coord.npy create mode 100644 source/tests/pd/model/water/data/data_0/set.000/energy.npy create mode 100644 source/tests/pd/model/water/data/data_0/set.000/force.npy create mode 100644 source/tests/pd/model/water/data/data_0/type.raw create mode 100644 source/tests/pd/model/water/data/data_0/type_map.raw create mode 100644 source/tests/pd/model/water/data/single/set.000/box.npy create mode 100644 source/tests/pd/model/water/data/single/set.000/coord.npy create mode 100644 source/tests/pd/model/water/data/single/set.000/energy.npy create mode 100644 source/tests/pd/model/water/data/single/set.000/force.npy create mode 100644 source/tests/pd/model/water/data/single/type.raw create mode 100644 source/tests/pd/model/water/data/single/type_map.raw create mode 100644 source/tests/pd/model/water/multitask.json create mode 100644 source/tests/pd/model/water/se_atten.json create mode 100644 source/tests/pd/model/water/se_e2_a.json create mode 100644 source/tests/pd/test_auto_batch_size.py create mode 100644 source/tests/pd/test_change_bias.py create mode 100644 source/tests/pd/test_decomp.py create mode 100644 source/tests/pd/test_dp_show.py create mode 100644 source/tests/pd/test_finetune.py create mode 100644 source/tests/pd/test_loss.py create mode 100644 source/tests/pd/test_lr.py create mode 100644 source/tests/pd/test_multitask.py create mode 100644 source/tests/pd/test_neighbor_stat.py create mode 100644 source/tests/pd/test_sampler.py create mode 100644 source/tests/pd/test_training.py create mode 100644 source/tests/pd/test_update_sel.py create mode 100644 source/tests/pd/test_utils.py create mode 120000 source/tests/pd/water diff --git a/.github/workflows/test_cuda.yml b/.github/workflows/test_cuda.yml index 4dbdc5acb9..db0dfb6c61 100644 --- a/.github/workflows/test_cuda.yml +++ b/.github/workflows/test_cuda.yml @@ -51,6 +51,7 @@ jobs: - run: | export PYTORCH_ROOT=$(python -c 'import torch;print(torch.__path__[0])') export TENSORFLOW_ROOT=$(python -c 'import importlib,pathlib;print(pathlib.Path(importlib.util.find_spec("tensorflow").origin).parent)') + source/install/uv_with_retry.sh pip install --system --pre paddlepaddle-gpu -i https://www.paddlepaddle.org.cn/packages/nightly/cu123/ source/install/uv_with_retry.sh pip install --system -v -e .[gpu,test,lmp,cu12,torch,jax] mpi4py env: DP_VARIANT: cuda diff --git a/.github/workflows/test_python.yml b/.github/workflows/test_python.yml index e30a19c8b1..e153164232 100644 --- a/.github/workflows/test_python.yml +++ b/.github/workflows/test_python.yml @@ -31,6 +31,7 @@ jobs: export PYTORCH_ROOT=$(python -c 'import torch;print(torch.__path__[0])') source/install/uv_with_retry.sh pip install --system -e .[test,jax] mpi4py source/install/uv_with_retry.sh pip install --system horovod --no-build-isolation + source/install/uv_with_retry.sh pip install --system --pre "paddlepaddle" -i https://www.paddlepaddle.org.cn/packages/nightly/cpu/ env: # Please note that uv has some issues with finding # existing TensorFlow package. Currently, it uses diff --git a/backend/find_paddle.py b/backend/find_paddle.py new file mode 100644 index 0000000000..bc54cdcaa5 --- /dev/null +++ b/backend/find_paddle.py @@ -0,0 +1,133 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import importlib +import os +import site +from functools import ( + lru_cache, +) +from importlib.machinery import ( + FileFinder, +) +from importlib.util import ( + find_spec, +) +from pathlib import ( + Path, +) +from sysconfig import ( + get_path, +) +from typing import ( + Optional, + Union, +) + + +@lru_cache +def find_paddle() -> tuple[Optional[str], list[str]]: + """Find PaddlePadle library. + + Tries to find PaddlePadle in the order of: + + 1. Environment variable `PADDLE_ROOT` if set + 2. The current Python environment. + 3. user site packages directory if enabled + 4. system site packages directory (purelib) + + Considering the default PaddlePadle package still uses old CXX11 ABI, we + cannot install it automatically. + + Returns + ------- + str, optional + PaddlePadle library path if found. + list of str + Paddle requirement if not found. Empty if found. + """ + if os.environ.get("DP_ENABLE_PADDLE", "0") == "0": + return None, [] + requires = [] + pd_spec = None + + if (pd_spec is None or not pd_spec) and os.environ.get("PADDLE_ROOT") is not None: + site_packages = Path(os.environ.get("PADDLE_ROOT")).parent.absolute() + pd_spec = FileFinder(str(site_packages)).find_spec("paddle") + + # get paddle spec + # note: isolated build will not work for backend + if pd_spec is None or not pd_spec: + pd_spec = find_spec("paddle") + + if not pd_spec and site.ENABLE_USER_SITE: + # first search TF from user site-packages before global site-packages + site_packages = site.getusersitepackages() + if site_packages: + pd_spec = FileFinder(site_packages).find_spec("paddle") + + if not pd_spec: + # purelib gets site-packages path + site_packages = get_path("purelib") + if site_packages: + pd_spec = FileFinder(site_packages).find_spec("paddle") + + # get install dir from spec + try: + pd_install_dir = pd_spec.submodule_search_locations[0] # type: ignore + # AttributeError if ft_spec is None + # TypeError if submodule_search_locations are None + # IndexError if submodule_search_locations is an empty list + except (AttributeError, TypeError, IndexError): + pd_install_dir = None + requires.extend(get_pd_requirement()["paddle"]) + return pd_install_dir, requires + + +@lru_cache +def get_pd_requirement(pd_version: str = "") -> dict: + """Get PaddlePadle requirement when Paddle is not installed. + + If pd_version is not given and the environment variable `PADDLE_VERSION` is set, use it as the requirement. + + Parameters + ---------- + pd_version : str, optional + Paddle version + + Returns + ------- + dict + PaddlePadle requirement. + """ + if pd_version is None: + return {"paddle": []} + if pd_version == "": + pd_version = os.environ.get("PADDLE_VERSION", "") + + return { + "paddle": [ + "paddlepaddle>=3.0.0b1" if pd_version != "" else "paddlepaddle>=3.0.0b1", + ], + } + + +@lru_cache +def get_pd_version(pd_path: Optional[Union[str, Path]]) -> str: + """Get Paddle version from a Paddle Python library path. + + Parameters + ---------- + pd_path : str or Path + Paddle Python library path, e.g. "/python3.10/site-packages/paddle/" + + Returns + ------- + str + version + """ + if pd_path is None or pd_path == "": + return "" + version_file = Path(pd_path) / "version" / "__init__.py" + spec = importlib.util.spec_from_file_location("paddle.version", version_file) + module = importlib.util.module_from_spec(spec) + spec.loader.exec_module(module) + return module.full_version diff --git a/deepmd/backend/paddle.py b/deepmd/backend/paddle.py new file mode 100644 index 0000000000..b1f664e76a --- /dev/null +++ b/deepmd/backend/paddle.py @@ -0,0 +1,124 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +from importlib.util import ( + find_spec, +) +from typing import ( + TYPE_CHECKING, + Callable, + ClassVar, +) + +from deepmd.backend.backend import ( + Backend, +) + +if TYPE_CHECKING: + from argparse import ( + Namespace, + ) + + from deepmd.infer.deep_eval import ( + DeepEvalBackend, + ) + from deepmd.utils.neighbor_stat import ( + NeighborStat, + ) + + +@Backend.register("pd") +@Backend.register("paddle") +class PaddleBackend(Backend): + """Paddle backend.""" + + name = "Paddle" + """The formal name of the backend.""" + features: ClassVar[Backend.Feature] = ( + Backend.Feature.ENTRY_POINT + | Backend.Feature.DEEP_EVAL + | Backend.Feature.NEIGHBOR_STAT + | Backend.Feature.IO + ) + """The features of the backend.""" + suffixes: ClassVar[list[str]] = [".json", ".pd"] + """The suffixes of the backend.""" + + def is_available(self) -> bool: + """Check if the backend is available. + + Returns + ------- + bool + Whether the backend is available. + """ + return find_spec("paddle") is not None + + @property + def entry_point_hook(self) -> Callable[["Namespace"], None]: + """The entry point hook of the backend. + + Returns + ------- + Callable[[Namespace], None] + The entry point hook of the backend. + """ + from deepmd.pd.entrypoints.main import main as deepmd_main + + return deepmd_main + + @property + def deep_eval(self) -> type["DeepEvalBackend"]: + """The Deep Eval backend of the backend. + + Returns + ------- + type[DeepEvalBackend] + The Deep Eval backend of the backend. + """ + from deepmd.pd.infer.deep_eval import DeepEval as DeepEvalPD + + return DeepEvalPD + + @property + def neighbor_stat(self) -> type["NeighborStat"]: + """The neighbor statistics of the backend. + + Returns + ------- + type[NeighborStat] + The neighbor statistics of the backend. + """ + from deepmd.pd.utils.neighbor_stat import ( + NeighborStat, + ) + + return NeighborStat + + @property + def serialize_hook(self) -> Callable[[str], dict]: + """The serialize hook to convert the model file to a dictionary. + + Returns + ------- + Callable[[str], dict] + The serialize hook of the backend. + """ + from deepmd.pd.utils.serialization import ( + serialize_from_file, + ) + + return serialize_from_file + + @property + def deserialize_hook(self) -> Callable[[str, dict], None]: + """The deserialize hook to convert the dictionary to a model file. + + Returns + ------- + Callable[[str, dict], None] + The deserialize hook of the backend. + """ + from deepmd.pd.utils.serialization import ( + deserialize_to_file, + ) + + return deserialize_to_file diff --git a/deepmd/dpmodel/model/make_model.py b/deepmd/dpmodel/model/make_model.py index fbf2c6e21f..70ddbe09b8 100644 --- a/deepmd/dpmodel/model/make_model.py +++ b/deepmd/dpmodel/model/make_model.py @@ -457,7 +457,7 @@ def format_nlist( Returns ------- - formated_nlist + formatted_nlist the formatted nlist. """ diff --git a/deepmd/main.py b/deepmd/main.py index b3daf75963..097588ca0a 100644 --- a/deepmd/main.py +++ b/deepmd/main.py @@ -99,9 +99,10 @@ def main_parser() -> argparse.ArgumentParser: formatter_class=RawTextArgumentDefaultsHelpFormatter, epilog=textwrap.dedent( """\ - Use --tf or --pt to choose the backend: + Use --tf, --pt or --pd to choose the backend: dp --tf train input.json dp --pt train input.json + dp --pd train input.json """ ), ) diff --git a/deepmd/pd/__init__.py b/deepmd/pd/__init__.py new file mode 100644 index 0000000000..c3b2e96ef2 --- /dev/null +++ b/deepmd/pd/__init__.py @@ -0,0 +1,11 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later + +# import customized OPs globally + +from deepmd.utils.entry_point import ( + load_entry_point, +) + +load_entry_point("deepmd.pd") + +__all__ = [] diff --git a/deepmd/pd/entrypoints/__init__.py b/deepmd/pd/entrypoints/__init__.py new file mode 100644 index 0000000000..6ceb116d85 --- /dev/null +++ b/deepmd/pd/entrypoints/__init__.py @@ -0,0 +1 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later diff --git a/deepmd/pd/entrypoints/main.py b/deepmd/pd/entrypoints/main.py new file mode 100644 index 0000000000..19653d6ea7 --- /dev/null +++ b/deepmd/pd/entrypoints/main.py @@ -0,0 +1,543 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import argparse +import copy +import json +import logging +from pathlib import ( + Path, +) +from typing import ( + Optional, + Union, +) + +import h5py +import paddle +import paddle.distributed as dist +import paddle.distributed.fleet as fleet +import paddle.version + +from deepmd import ( + __version__, +) +from deepmd.common import ( + expand_sys_str, +) +from deepmd.loggers.loggers import ( + set_log_handles, +) +from deepmd.main import ( + parse_args, +) +from deepmd.pd.infer import ( + inference, +) +from deepmd.pd.model.model import ( + BaseModel, +) +from deepmd.pd.train import ( + training, +) +from deepmd.pd.train.wrapper import ( + ModelWrapper, +) +from deepmd.pd.utils.dataloader import ( + DpLoaderSet, +) +from deepmd.pd.utils.env import ( + DEVICE, +) +from deepmd.pd.utils.finetune import ( + get_finetune_rules, +) +from deepmd.pd.utils.multi_task import ( + preprocess_shared_params, +) +from deepmd.pd.utils.stat import ( + make_stat_input, +) +from deepmd.pd.utils.utils import ( + to_numpy_array, +) +from deepmd.utils.argcheck import ( + normalize, +) +from deepmd.utils.compat import ( + update_deepmd_input, +) +from deepmd.utils.data_system import ( + get_data, + process_systems, +) +from deepmd.utils.path import ( + DPPath, +) +from deepmd.utils.summary import SummaryPrinter as BaseSummaryPrinter + +log = logging.getLogger(__name__) + + +def get_trainer( + config, + init_model=None, + restart_model=None, + finetune_model=None, + force_load=False, + init_frz_model=None, + shared_links=None, + finetune_links=None, +): + multi_task = "model_dict" in config.get("model", {}) + + # Initialize DDP + world_size = dist.get_world_size() + if world_size > 1: + assert paddle.version.nccl() != "0" + fleet.init(is_collective=True) + + def prepare_trainer_input_single( + model_params_single, data_dict_single, rank=0, seed=None + ): + training_dataset_params = data_dict_single["training_data"] + validation_dataset_params = data_dict_single.get("validation_data", None) + validation_systems = ( + validation_dataset_params["systems"] if validation_dataset_params else None + ) + training_systems = training_dataset_params["systems"] + training_systems = process_systems(training_systems) + if validation_systems is not None: + validation_systems = process_systems(validation_systems) + + # stat files + stat_file_path_single = data_dict_single.get("stat_file", None) + if rank != 0: + stat_file_path_single = None + elif stat_file_path_single is not None: + if not Path(stat_file_path_single).exists(): + if stat_file_path_single.endswith((".h5", ".hdf5")): + with h5py.File(stat_file_path_single, "w") as f: + pass + else: + Path(stat_file_path_single).mkdir() + stat_file_path_single = DPPath(stat_file_path_single, "a") + + # validation and training data + # avoid the same batch sequence among devices + rank_seed = (seed + rank) % (2**32) if seed is not None else None + validation_data_single = ( + DpLoaderSet( + validation_systems, + validation_dataset_params["batch_size"], + model_params_single["type_map"], + seed=rank_seed, + ) + if validation_systems + else None + ) + train_data_single = DpLoaderSet( + training_systems, + training_dataset_params["batch_size"], + model_params_single["type_map"], + seed=rank_seed, + ) + return ( + train_data_single, + validation_data_single, + stat_file_path_single, + ) + + rank = dist.get_rank() if dist.is_available() and dist.is_initialized() else 0 + data_seed = config["training"].get("seed", None) + if not multi_task: + ( + train_data, + validation_data, + stat_file_path, + ) = prepare_trainer_input_single( + config["model"], + config["training"], + rank=rank, + seed=data_seed, + ) + else: + train_data, validation_data, stat_file_path = {}, {}, {} + for model_key in config["model"]["model_dict"]: + ( + train_data[model_key], + validation_data[model_key], + stat_file_path[model_key], + ) = prepare_trainer_input_single( + config["model"]["model_dict"][model_key], + config["training"]["data_dict"][model_key], + rank=rank, + seed=data_seed, + ) + + trainer = training.Trainer( + config, + train_data, + stat_file_path=stat_file_path, + validation_data=validation_data, + init_model=init_model, + restart_model=restart_model, + finetune_model=finetune_model, + force_load=force_load, + shared_links=shared_links, + finetune_links=finetune_links, + init_frz_model=init_frz_model, + ) + return trainer + + +class SummaryPrinter(BaseSummaryPrinter): + """Summary printer for Paddle.""" + + def is_built_with_cuda(self) -> bool: + """Check if the backend is built with CUDA.""" + return paddle.device.is_compiled_with_cuda() + + def is_built_with_rocm(self) -> bool: + """Check if the backend is built with ROCm.""" + return paddle.device.is_compiled_with_rocm() + + def get_compute_device(self) -> str: + """Get Compute device.""" + return str(DEVICE) + + def get_ngpus(self) -> int: + """Get the number of GPUs.""" + return paddle.device.cuda.device_count() + + def get_backend_info(self) -> dict: + """Get backend information.""" + op_info = {} + return { + "Backend": "Paddle", + "PD ver": f"v{paddle.__version__}-g{paddle.version.commit[:11]}", + "Enable custom OP": False, + **op_info, + } + + +def train( + input_file: str, + init_model: Optional[str], + restart: Optional[str], + finetune: Optional[str], + init_frz_model: Optional[str], + model_branch: str, + skip_neighbor_stat: bool = False, + use_pretrain_script: bool = False, + force_load: bool = False, + output: str = "out.json", +): + log.info("Configuration path: %s", input_file) + SummaryPrinter()() + with open(input_file) as fin: + config = json.load(fin) + # ensure suffix, as in the command line help, we say "path prefix of checkpoint files" + if init_model is not None and not init_model.endswith(".pd"): + init_model += ".pd" + if restart is not None and not restart.endswith(".pd"): + restart += ".pd" + + # update multitask config + multi_task = "model_dict" in config["model"] + shared_links = None + if multi_task: + config["model"], shared_links = preprocess_shared_params(config["model"]) + # handle the special key + assert ( + "RANDOM" not in config["model"]["model_dict"] + ), "Model name can not be 'RANDOM' in multi-task mode!" + + # update fine-tuning config + finetune_links = None + if finetune is not None: + config["model"], finetune_links = get_finetune_rules( + finetune, + config["model"], + model_branch=model_branch, + change_model_params=use_pretrain_script, + ) + # update init_model or init_frz_model config if necessary + if (init_model is not None or init_frz_model is not None) and use_pretrain_script: + if init_model is not None: + init_state_dict = paddle.load(init_model) + if "model" in init_state_dict: + init_state_dict = init_state_dict["model"] + config["model"] = init_state_dict["_extra_state"]["model_params"] + else: + raise NotImplementedError("init_frz_model is not supported yet") + + # argcheck + config = update_deepmd_input(config, warning=True, dump="input_v2_compat.json") + config = normalize(config, multi_task=multi_task) + + # do neighbor stat + min_nbor_dist = None + if not skip_neighbor_stat: + log.info( + "Calculate neighbor statistics... (add --skip-neighbor-stat to skip this step)" + ) + + if not multi_task: + type_map = config["model"].get("type_map") + train_data = get_data( + config["training"]["training_data"], 0, type_map, None + ) + config["model"], min_nbor_dist = BaseModel.update_sel( + train_data, type_map, config["model"] + ) + else: + min_nbor_dist = {} + for model_item in config["model"]["model_dict"]: + type_map = config["model"]["model_dict"][model_item].get("type_map") + train_data = get_data( + config["training"]["data_dict"][model_item]["training_data"], + 0, + type_map, + None, + ) + config["model"]["model_dict"][model_item], min_nbor_dist[model_item] = ( + BaseModel.update_sel( + train_data, type_map, config["model"]["model_dict"][model_item] + ) + ) + + with open(output, "w") as fp: + json.dump(config, fp, indent=4) + + trainer = get_trainer( + config, + init_model, + restart, + finetune, + force_load, + init_frz_model, + shared_links=shared_links, + finetune_links=finetune_links, + ) + # save min_nbor_dist + if min_nbor_dist is not None: + if not multi_task: + trainer.model.min_nbor_dist = min_nbor_dist + else: + for model_item in min_nbor_dist: + trainer.model[model_item].min_nbor_dist = min_nbor_dist[model_item] + trainer.run() + + +def freeze( + model: str, + output: str = "frozen_model.json", + head: Optional[str] = None, +): + paddle.set_flags( + { + "FLAGS_save_cf_stack_op": 1, + "FLAGS_prim_enable_dynamic": 1, + "FLAGS_enable_pir_api": 1, + } + ) + model = inference.Tester(model, head=head).model + model.eval() + from paddle.static import ( + InputSpec, + ) + + """ + ** coord [None, natoms, 3] paddle.float64 + ** atype [None, natoms] paddle.int64 + ** nlist [None, natoms, nnei] paddle.int32 + """ + # NOTE: 'FLAGS_save_cf_stack_op', 'FLAGS_prim_enable_dynamic' and + # 'FLAGS_enable_pir_api' shoule be enabled when freezing model. + jit_model = paddle.jit.to_static( + model.forward_lower, + full_graph=True, + input_spec=[ + InputSpec([-1, -1, 3], dtype="float64", name="coord"), + InputSpec([-1, -1], dtype="int32", name="atype"), + InputSpec([-1, -1, -1], dtype="int32", name="nlist"), + ], + ) + if output.endswith(".json"): + output = output[:-5] + paddle.jit.save( + jit_model, + path=output, + skip_prune_program=True, + ) + log.info( + f"Paddle inference model has been exported to: {output}.json and {output}.pdiparams" + ) + + +def change_bias( + input_file: str, + mode: str = "change", + bias_value: Optional[list] = None, + datafile: Optional[str] = None, + system: str = ".", + numb_batch: int = 0, + model_branch: Optional[str] = None, + output: Optional[str] = None, +): + if input_file.endswith(".pd"): + old_state_dict = paddle.load(input_file) + model_state_dict = copy.deepcopy(old_state_dict.get("model", old_state_dict)) + model_params = model_state_dict["_extra_state"]["model_params"] + else: + raise RuntimeError( + "Paddle now do not support change bias directly from a freezed model file" + "Please provided a checkpoint file with a .pd extension" + ) + multi_task = "model_dict" in model_params + bias_adjust_mode = "change-by-statistic" if mode == "change" else "set-by-statistic" + if multi_task: + assert ( + model_branch is not None + ), "For multitask model, the model branch must be set!" + assert model_branch in model_params["model_dict"], ( + f"For multitask model, the model branch must be in the 'model_dict'! " + f"Available options are : {list(model_params['model_dict'].keys())}." + ) + log.info(f"Changing out bias for model {model_branch}.") + model = training.get_model_for_wrapper(model_params) + type_map = ( + model_params["type_map"] + if not multi_task + else model_params["model_dict"][model_branch]["type_map"] + ) + model_to_change = model if not multi_task else model[model_branch] + if input_file.endswith(".pd"): + wrapper = ModelWrapper(model) + wrapper.set_state_dict(old_state_dict["model"]) + else: + raise NotImplementedError("Only support .pd file") + + if bias_value is not None: + # use user-defined bias + assert model_to_change.model_type in [ + "ener" + ], "User-defined bias is only available for energy model!" + assert ( + len(bias_value) == len(type_map) + ), f"The number of elements in the bias should be the same as that in the type_map: {type_map}." + old_bias = model_to_change.get_out_bias() + bias_to_set = paddle.to_tensor( + bias_value, dtype=old_bias.dtype, place=old_bias.place + ).reshape(old_bias.shape) + model_to_change.set_out_bias(bias_to_set) + log.info( + f"Change output bias of {type_map!s} " + f"from {to_numpy_array(old_bias).reshape(-1)!s} " + f"to {to_numpy_array(bias_to_set).reshape(-1)!s}." + ) + updated_model = model_to_change + else: + # calculate bias on given systems + if datafile is not None: + with open(datafile) as datalist: + all_sys = datalist.read().splitlines() + else: + all_sys = expand_sys_str(system) + data_systems = process_systems(all_sys) + data_single = DpLoaderSet( + data_systems, + 1, + type_map, + ) + mock_loss = training.get_loss( + {"inference": True}, 1.0, len(type_map), model_to_change + ) + data_requirement = mock_loss.label_requirement + data_requirement += training.get_additional_data_requirement(model_to_change) + data_single.add_data_requirement(data_requirement) + nbatches = numb_batch if numb_batch != 0 else float("inf") + sampled_data = make_stat_input( + data_single.systems, + data_single.dataloaders, + nbatches, + ) + updated_model = training.model_change_out_bias( + model_to_change, sampled_data, _bias_adjust_mode=bias_adjust_mode + ) + + if not multi_task: + model = updated_model + else: + model[model_branch] = updated_model + + if input_file.endswith(".pd"): + output_path = ( + output if output is not None else input_file.replace(".pd", "_updated.pd") + ) + wrapper = ModelWrapper(model) + if "model" in old_state_dict: + old_state_dict["model"] = wrapper.state_dict() + old_state_dict["model"]["_extra_state"] = model_state_dict["_extra_state"] + else: + old_state_dict = wrapper.state_dict() + old_state_dict["_extra_state"] = model_state_dict["_extra_state"] + paddle.save(old_state_dict, output_path) + else: + raise NotImplementedError("Only support .pd file now") + + log.info(f"Saved model to {output_path}") + + +def main(args: Optional[Union[list[str], argparse.Namespace]] = None): + if not isinstance(args, argparse.Namespace): + FLAGS = parse_args(args=args) + else: + FLAGS = args + + set_log_handles( + FLAGS.log_level, + Path(FLAGS.log_path) if FLAGS.log_path else None, + mpi_log=None, + ) + log.debug("Log handles were successfully set") + log.info("DeePMD version: %s", __version__) + + if FLAGS.command == "train": + train( + input_file=FLAGS.INPUT, + init_model=FLAGS.init_model, + restart=FLAGS.restart, + finetune=FLAGS.finetune, + init_frz_model=FLAGS.init_frz_model, + model_branch=FLAGS.model_branch, + skip_neighbor_stat=FLAGS.skip_neighbor_stat, + use_pretrain_script=FLAGS.use_pretrain_script, + force_load=FLAGS.force_load, + output=FLAGS.output, + ) + elif FLAGS.command == "freeze": + if Path(FLAGS.checkpoint_folder).is_dir(): + checkpoint_path = Path(FLAGS.checkpoint_folder) + latest_ckpt_file = (checkpoint_path / "checkpoint").read_text() + FLAGS.model = str(checkpoint_path.joinpath(latest_ckpt_file)) + else: + FLAGS.model = FLAGS.checkpoint_folder + FLAGS.output = str(Path(FLAGS.output).with_suffix(".json")) + freeze(model=FLAGS.model, output=FLAGS.output, head=FLAGS.head) + elif FLAGS.command == "change-bias": + change_bias( + input_file=FLAGS.INPUT, + mode=FLAGS.mode, + bias_value=FLAGS.bias_value, + datafile=FLAGS.datafile, + system=FLAGS.system, + numb_batch=FLAGS.numb_batch, + model_branch=FLAGS.model_branch, + output=FLAGS.output, + ) + else: + raise RuntimeError(f"Invalid command {FLAGS.command}!") + + +if __name__ == "__main__": + main() diff --git a/deepmd/pd/infer/__init__.py b/deepmd/pd/infer/__init__.py new file mode 100644 index 0000000000..6ceb116d85 --- /dev/null +++ b/deepmd/pd/infer/__init__.py @@ -0,0 +1 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later diff --git a/deepmd/pd/infer/deep_eval.py b/deepmd/pd/infer/deep_eval.py new file mode 100644 index 0000000000..a2f8510f28 --- /dev/null +++ b/deepmd/pd/infer/deep_eval.py @@ -0,0 +1,537 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +from typing import ( + TYPE_CHECKING, + Any, + Callable, + Optional, + Union, +) + +import numpy as np +import paddle + +from deepmd.dpmodel.common import PRECISION_DICT as NP_PRECISION_DICT +from deepmd.dpmodel.output_def import ( + ModelOutputDef, + OutputVariableCategory, + OutputVariableDef, +) +from deepmd.infer.deep_eval import DeepEval as DeepEvalWrapper +from deepmd.infer.deep_eval import ( + DeepEvalBackend, +) +from deepmd.infer.deep_pot import ( + DeepPot, +) +from deepmd.pd.model.model import ( + get_model, +) +from deepmd.pd.train.wrapper import ( + ModelWrapper, +) +from deepmd.pd.utils.auto_batch_size import ( + AutoBatchSize, +) +from deepmd.pd.utils.env import ( + DEVICE, + GLOBAL_PD_FLOAT_PRECISION, + RESERVED_PRECISON_DICT, + enable_prim, +) +from deepmd.pd.utils.utils import ( + to_numpy_array, + to_paddle_tensor, +) + +if TYPE_CHECKING: + import ase.neighborlist + + +class DeepEval(DeepEvalBackend): + """Paddle backend implementation of DeepEval. + + Parameters + ---------- + model_file : Path + The name of the frozen model file. + output_def : ModelOutputDef + The output definition of the model. + *args : list + Positional arguments. + auto_batch_size : bool or int or AutomaticBatchSize, default: False + If True, automatic batch size will be used. If int, it will be used + as the initial batch size. + neighbor_list : ase.neighborlist.NewPrimitiveNeighborList, optional + The ASE neighbor list class to produce the neighbor list. If None, the + neighbor list will be built natively in the model. + **kwargs : dict + Keyword arguments. + """ + + def __init__( + self, + model_file: str, + output_def: ModelOutputDef, + *args: Any, + auto_batch_size: Union[bool, int, AutoBatchSize] = True, + neighbor_list: Optional["ase.neighborlist.NewPrimitiveNeighborList"] = None, + head: Optional[Union[str, int]] = None, + **kwargs: Any, + ): + enable_prim(True) + self.output_def = output_def + self.model_path = model_file + if str(self.model_path).endswith(".pd"): + state_dict = paddle.load(model_file) + if "model" in state_dict: + state_dict = state_dict["model"] + self.input_param = state_dict["_extra_state"]["model_params"] + self.model_def_script = self.input_param + self.multi_task = "model_dict" in self.input_param + if self.multi_task: + model_keys = list(self.input_param["model_dict"].keys()) + if isinstance(head, int): + head = model_keys[0] + assert ( + head is not None + ), f"Head must be set for multitask model! Available heads are: {model_keys}" + assert ( + head in model_keys + ), f"No head named {head} in model! Available heads are: {model_keys}" + self.input_param = self.input_param["model_dict"][head] + state_dict_head = {"_extra_state": state_dict["_extra_state"]} + for item in state_dict: + if f"model.{head}." in item: + state_dict_head[ + item.replace(f"model.{head}.", "model.Default.") + ] = state_dict[item].clone() + state_dict = state_dict_head + model = get_model(self.input_param).to(DEVICE) + # model = paddle.jit.to_static(model) + self.dp = ModelWrapper(model) + self.dp.set_state_dict(state_dict) + else: + # self.dp = paddle.jit.load(self.model_path.split(".json")[0]) + raise ValueError(f"Unknown model file format: {self.model_path}!") + self.rcut = self.dp.model["Default"].get_rcut() + self.type_map = self.dp.model["Default"].get_type_map() + if isinstance(auto_batch_size, bool): + if auto_batch_size: + self.auto_batch_size = AutoBatchSize() + else: + self.auto_batch_size = None + elif isinstance(auto_batch_size, int): + self.auto_batch_size = AutoBatchSize(auto_batch_size) + elif isinstance(auto_batch_size, AutoBatchSize): + self.auto_batch_size = auto_batch_size + else: + raise TypeError("auto_batch_size should be bool, int, or AutoBatchSize") + self._has_spin = getattr(self.dp.model["Default"], "has_spin", False) + if callable(self._has_spin): + self._has_spin = self._has_spin() + + def get_rcut(self) -> float: + """Get the cutoff radius of this model.""" + return self.rcut + + def get_ntypes(self) -> int: + """Get the number of atom types of this model.""" + return len(self.type_map) + + def get_type_map(self) -> list[str]: + """Get the type map (element name of the atom types) of this model.""" + return self.type_map + + def get_dim_fparam(self) -> int: + """Get the number (dimension) of frame parameters of this DP.""" + return self.dp.model["Default"].get_dim_fparam() + + def get_dim_aparam(self) -> int: + """Get the number (dimension) of atomic parameters of this DP.""" + return self.dp.model["Default"].get_dim_aparam() + + def get_intensive(self) -> bool: + return self.dp.model["Default"].get_intensive() + + @property + def model_type(self) -> type["DeepEvalWrapper"]: + """The the evaluator of the model type.""" + model_output_type = self.dp.model["Default"].model_output_type() + if "energy" in model_output_type: + return DeepPot + else: + raise RuntimeError("Unknown model type") + + def get_sel_type(self) -> list[int]: + """Get the selected atom types of this model. + + Only atoms with selected atom types have atomic contribution + to the result of the model. + If returning an empty list, all atom types are selected. + """ + return self.dp.model["Default"].get_sel_type() + + def get_numb_dos(self) -> int: + """Get the number of DOS.""" + return self.dp.model["Default"].get_numb_dos() + + def get_task_dim(self) -> int: + """Get the output dimension.""" + return self.dp.model["Default"].get_task_dim() + + def get_has_efield(self): + """Check if the model has efield.""" + return False + + def get_ntypes_spin(self): + """Get the number of spin atom types of this model. Only used in old implement.""" + return 0 + + def get_has_spin(self): + """Check if the model has spin atom types.""" + return self._has_spin + + def eval( + self, + coords: np.ndarray, + cells: Optional[np.ndarray], + atom_types: np.ndarray, + atomic: bool = False, + fparam: Optional[np.ndarray] = None, + aparam: Optional[np.ndarray] = None, + **kwargs: Any, + ) -> dict[str, np.ndarray]: + """Evaluate the energy, force and virial by using this DP. + + Parameters + ---------- + coords + The coordinates of atoms. + The array should be of size nframes x natoms x 3 + cells + The cell of the region. + If None then non-PBC is assumed, otherwise using PBC. + The array should be of size nframes x 9 + atom_types + The atom types + The list should contain natoms ints + atomic + Calculate the atomic energy and virial + fparam + The frame parameter. + The array can be of size : + - nframes x dim_fparam. + - dim_fparam. Then all frames are assumed to be provided with the same fparam. + aparam + The atomic parameter + The array can be of size : + - nframes x natoms x dim_aparam. + - natoms x dim_aparam. Then all frames are assumed to be provided with the same aparam. + - dim_aparam. Then all frames and atoms are provided with the same aparam. + **kwargs + Other parameters + + Returns + ------- + output_dict : dict + The output of the evaluation. The keys are the names of the output + variables, and the values are the corresponding output arrays. + """ + # convert all of the input to numpy array + atom_types = np.array(atom_types, dtype=np.int32) + coords = np.array(coords) + if cells is not None: + cells = np.array(cells) + natoms, numb_test = self._get_natoms_and_nframes( + coords, atom_types, len(atom_types.shape) > 1 + ) + request_defs = self._get_request_defs(atomic) + if "spin" not in kwargs or kwargs["spin"] is None: + out = self._eval_func(self._eval_model, numb_test, natoms)( + coords, cells, atom_types, fparam, aparam, request_defs + ) + else: + out = self._eval_func(self._eval_model_spin, numb_test, natoms)( + coords, + cells, + atom_types, + np.array(kwargs["spin"]), + fparam, + aparam, + request_defs, + ) + return dict( + zip( + [x.name for x in request_defs], + out, + ) + ) + + def _get_request_defs(self, atomic: bool) -> list[OutputVariableDef]: + """Get the requested output definitions. + + When atomic is True, all output_def are requested. + When atomic is False, only energy (tensor), force, and virial + are requested. + + Parameters + ---------- + atomic : bool + Whether to request the atomic output. + + Returns + ------- + list[OutputVariableDef] + The requested output definitions. + """ + if atomic: + return list(self.output_def.var_defs.values()) + else: + return [ + x + for x in self.output_def.var_defs.values() + if x.category + in ( + OutputVariableCategory.OUT, + OutputVariableCategory.REDU, + OutputVariableCategory.DERV_R, + OutputVariableCategory.DERV_C_REDU, + ) + ] + + def _eval_func(self, inner_func: Callable, numb_test: int, natoms: int) -> Callable: + """Wrapper method with auto batch size. + + Parameters + ---------- + inner_func : Callable + the method to be wrapped + numb_test : int + number of tests + natoms : int + number of atoms + + Returns + ------- + Callable + the wrapper + """ + if self.auto_batch_size is not None: + + def eval_func(*args, **kwargs): + return self.auto_batch_size.execute_all( + inner_func, numb_test, natoms, *args, **kwargs + ) + + else: + eval_func = inner_func + return eval_func + + def _get_natoms_and_nframes( + self, + coords: np.ndarray, + atom_types: np.ndarray, + mixed_type: bool = False, + ) -> tuple[int, int]: + if mixed_type: + natoms = len(atom_types[0]) + else: + natoms = len(atom_types) + if natoms == 0: + assert coords.size == 0 + else: + coords = np.reshape(np.array(coords), [-1, natoms * 3]) + nframes = coords.shape[0] + return natoms, nframes + + def _eval_model( + self, + coords: np.ndarray, + cells: Optional[np.ndarray], + atom_types: np.ndarray, + fparam: Optional[np.ndarray], + aparam: Optional[np.ndarray], + request_defs: list[OutputVariableDef], + ): + model = self.dp.to(DEVICE) + prec = NP_PRECISION_DICT[RESERVED_PRECISON_DICT[GLOBAL_PD_FLOAT_PRECISION]] + + nframes = coords.shape[0] + if len(atom_types.shape) == 1: + natoms = len(atom_types) + atom_types = np.tile(atom_types, nframes).reshape([nframes, -1]) + else: + natoms = len(atom_types[0]) + + coord_input = paddle.to_tensor( + coords.reshape([nframes, natoms, 3]).astype(prec), + dtype=GLOBAL_PD_FLOAT_PRECISION, + place=DEVICE, + ) + type_input = paddle.to_tensor( + atom_types.astype(NP_PRECISION_DICT[RESERVED_PRECISON_DICT[paddle.int64]]), + dtype=paddle.int64, + place=DEVICE, + ) + if cells is not None: + box_input = paddle.to_tensor( + cells.reshape([nframes, 3, 3]), + dtype=GLOBAL_PD_FLOAT_PRECISION, + place=DEVICE, + ) + else: + box_input = None + if fparam is not None: + fparam_input = to_paddle_tensor( + fparam.reshape([nframes, self.get_dim_fparam()]) + ) + else: + fparam_input = None + if aparam is not None: + aparam_input = to_paddle_tensor( + aparam.reshape([nframes, natoms, self.get_dim_aparam()]) + ) + else: + aparam_input = None + do_atomic_virial = any( + x.category == OutputVariableCategory.DERV_C for x in request_defs + ) + batch_output = model( + coord_input, + type_input, + box=box_input, + do_atomic_virial=do_atomic_virial, + fparam=fparam_input, + aparam=aparam_input, + ) + if isinstance(batch_output, tuple): + batch_output = batch_output[0] + + results = [] + for odef in request_defs: + pd_name = self._OUTDEF_DP2BACKEND[odef.name] + if pd_name in batch_output: + shape = self._get_output_shape(odef, nframes, natoms) + out = batch_output[pd_name].reshape(shape).numpy() + results.append(out) + else: + shape = self._get_output_shape(odef, nframes, natoms) + results.append( + np.full(np.abs(shape), np.nan, dtype=prec) + ) # this is kinda hacky + return tuple(results) + + def _eval_model_spin( + self, + coords: np.ndarray, + cells: Optional[np.ndarray], + atom_types: np.ndarray, + spins: np.ndarray, + fparam: Optional[np.ndarray], + aparam: Optional[np.ndarray], + request_defs: list[OutputVariableDef], + ): + raise NotImplementedError("_eval_model_spin is not supported yet.") + + def _get_output_shape(self, odef, nframes, natoms): + if odef.category == OutputVariableCategory.DERV_C_REDU: + # virial + return [nframes, *odef.shape[:-1], 9] + elif odef.category == OutputVariableCategory.REDU: + # energy + return [nframes, *odef.shape, 1] + elif odef.category == OutputVariableCategory.DERV_C: + # atom_virial + return [nframes, *odef.shape[:-1], natoms, 9] + elif odef.category == OutputVariableCategory.DERV_R: + # force + return [nframes, *odef.shape[:-1], natoms, 3] + elif odef.category == OutputVariableCategory.OUT: + # atom_energy, atom_tensor + # Something wrong here? + # return [nframes, *shape, natoms, 1] + return [nframes, natoms, *odef.shape, 1] + else: + raise RuntimeError("unknown category") + + def eval_typeebd(self) -> np.ndarray: + """Evaluate output of type embedding network by using this model. + + Returns + ------- + np.ndarray + The output of type embedding network. The shape is [ntypes, o_size] or [ntypes + 1, o_size], + where ntypes is the number of types, and o_size is the number of nodes + in the output layer. If there are multiple type embedding networks, + these outputs will be concatenated along the second axis. + + Raises + ------ + KeyError + If the model does not enable type embedding. + + See Also + -------- + deepmd.pd.model.network.network.TypeEmbedNetConsistent : + The type embedding network. + """ + raise NotImplementedError("eval_typeebd is not supported yet.") + + def get_model_def_script(self) -> str: + """Get model definition script.""" + return self.model_def_script + + def eval_descriptor( + self, + coords: np.ndarray, + cells: Optional[np.ndarray], + atom_types: np.ndarray, + fparam: Optional[np.ndarray] = None, + aparam: Optional[np.ndarray] = None, + **kwargs: Any, + ) -> np.ndarray: + """Evaluate descriptors by using this DP. + + Parameters + ---------- + coords + The coordinates of atoms. + The array should be of size nframes x natoms x 3 + cells + The cell of the region. + If None then non-PBC is assumed, otherwise using PBC. + The array should be of size nframes x 9 + atom_types + The atom types + The list should contain natoms ints + fparam + The frame parameter. + The array can be of size : + - nframes x dim_fparam. + - dim_fparam. Then all frames are assumed to be provided with the same fparam. + aparam + The atomic parameter + The array can be of size : + - nframes x natoms x dim_aparam. + - natoms x dim_aparam. Then all frames are assumed to be provided with the same aparam. + - dim_aparam. Then all frames and atoms are provided with the same aparam. + + Returns + ------- + descriptor + Descriptors. + """ + model = self.dp.model["Default"] + model.set_eval_descriptor_hook(True) + self.eval( + coords, + cells, + atom_types, + atomic=False, + fparam=fparam, + aparam=aparam, + **kwargs, + ) + descriptor = model.eval_descriptor() + model.set_eval_descriptor_hook(False) + return to_numpy_array(descriptor) diff --git a/deepmd/pd/infer/inference.py b/deepmd/pd/infer/inference.py new file mode 100644 index 0000000000..ae1b8e8516 --- /dev/null +++ b/deepmd/pd/infer/inference.py @@ -0,0 +1,64 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import logging +from copy import ( + deepcopy, +) + +import paddle + +from deepmd.pd.model.model import ( + get_model, +) +from deepmd.pd.train.wrapper import ( + ModelWrapper, +) +from deepmd.pd.utils.env import ( + DEVICE, + JIT, +) + +log = logging.getLogger(__name__) + + +class Tester: + def __init__( + self, + model_ckpt, + head=None, + ): + """Construct a DeePMD tester. + + Args: + - config: The Dict-like configuration with training options. + """ + # Model + state_dict = paddle.load(model_ckpt) + if "model" in state_dict: + state_dict = state_dict["model"] + model_params = state_dict["_extra_state"]["model_params"] + self.multi_task = "model_dict" in model_params + if self.multi_task: + assert head is not None, "Head must be specified in multitask mode!" + self.head = head + assert head in model_params["model_dict"], ( + f"Specified head {head} not found in model {model_ckpt}! " + f"Available ones are {list(model_params['model_dict'].keys())}." + ) + model_params = model_params["model_dict"][head] + state_dict_head = {"_extra_state": state_dict["_extra_state"]} + for item in state_dict: + if f"model.{head}." in item: + state_dict_head[ + item.replace(f"model.{head}.", "model.Default.") + ] = state_dict[item].clone() + state_dict = state_dict_head + + self.model_params = deepcopy(model_params) + self.model = get_model(model_params).to(DEVICE) + + # Model Wrapper + self.wrapper = ModelWrapper(self.model) # inference only + if JIT: + raise NotImplementedError + # self.wrapper = paddle.jit.to_static(self.wrapper) + self.wrapper.set_state_dict(state_dict) diff --git a/deepmd/pd/loss/__init__.py b/deepmd/pd/loss/__init__.py new file mode 100644 index 0000000000..0e978b95c2 --- /dev/null +++ b/deepmd/pd/loss/__init__.py @@ -0,0 +1,12 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +from .ener import ( + EnergyStdLoss, +) +from .loss import ( + TaskLoss, +) + +__all__ = [ + "EnergyStdLoss", + "TaskLoss", +] diff --git a/deepmd/pd/loss/ener.py b/deepmd/pd/loss/ener.py new file mode 100644 index 0000000000..7c5d848b45 --- /dev/null +++ b/deepmd/pd/loss/ener.py @@ -0,0 +1,428 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +from typing import ( + Optional, +) + +import paddle +import paddle.nn.functional as F + +from deepmd.pd.loss.loss import ( + TaskLoss, +) +from deepmd.pd.utils import ( + decomp, + env, +) +from deepmd.pd.utils.env import ( + GLOBAL_PD_FLOAT_PRECISION, +) +from deepmd.utils.data import ( + DataRequirementItem, +) + + +class EnergyStdLoss(TaskLoss): + def __init__( + self, + starter_learning_rate=1.0, + start_pref_e=0.0, + limit_pref_e=0.0, + start_pref_f=0.0, + limit_pref_f=0.0, + start_pref_v=0.0, + limit_pref_v=0.0, + start_pref_ae: float = 0.0, + limit_pref_ae: float = 0.0, + start_pref_pf: float = 0.0, + limit_pref_pf: float = 0.0, + relative_f: Optional[float] = None, + enable_atom_ener_coeff: bool = False, + start_pref_gf: float = 0.0, + limit_pref_gf: float = 0.0, + numb_generalized_coord: int = 0, + use_l1_all: bool = False, + inference=False, + **kwargs, + ): + r"""Construct a layer to compute loss on energy, force and virial. + + Parameters + ---------- + starter_learning_rate : float + The learning rate at the start of the training. + start_pref_e : float + The prefactor of energy loss at the start of the training. + limit_pref_e : float + The prefactor of energy loss at the end of the training. + start_pref_f : float + The prefactor of force loss at the start of the training. + limit_pref_f : float + The prefactor of force loss at the end of the training. + start_pref_v : float + The prefactor of virial loss at the start of the training. + limit_pref_v : float + The prefactor of virial loss at the end of the training. + start_pref_ae : float + The prefactor of atomic energy loss at the start of the training. + limit_pref_ae : float + The prefactor of atomic energy loss at the end of the training. + start_pref_pf : float + The prefactor of atomic prefactor force loss at the start of the training. + limit_pref_pf : float + The prefactor of atomic prefactor force loss at the end of the training. + relative_f : float + If provided, relative force error will be used in the loss. The difference + of force will be normalized by the magnitude of the force in the label with + a shift given by relative_f + enable_atom_ener_coeff : bool + if true, the energy will be computed as \sum_i c_i E_i + start_pref_gf : float + The prefactor of generalized force loss at the start of the training. + limit_pref_gf : float + The prefactor of generalized force loss at the end of the training. + numb_generalized_coord : int + The dimension of generalized coordinates. + use_l1_all : bool + Whether to use L1 loss, if False (default), it will use L2 loss. + inference : bool + If true, it will output all losses found in output, ignoring the pre-factors. + **kwargs + Other keyword arguments. + """ + super().__init__() + self.starter_learning_rate = starter_learning_rate + self.has_e = (start_pref_e != 0.0 and limit_pref_e != 0.0) or inference + self.has_f = (start_pref_f != 0.0 and limit_pref_f != 0.0) or inference + self.has_v = (start_pref_v != 0.0 and limit_pref_v != 0.0) or inference + self.has_ae = (start_pref_ae != 0.0 and limit_pref_ae != 0.0) or inference + self.has_pf = (start_pref_pf != 0.0 and limit_pref_pf != 0.0) or inference + self.has_gf = start_pref_gf != 0.0 and limit_pref_gf != 0.0 + + self.start_pref_e = start_pref_e + self.limit_pref_e = limit_pref_e + self.start_pref_f = start_pref_f + self.limit_pref_f = limit_pref_f + self.start_pref_v = start_pref_v + self.limit_pref_v = limit_pref_v + self.start_pref_ae = start_pref_ae + self.limit_pref_ae = limit_pref_ae + self.start_pref_pf = start_pref_pf + self.limit_pref_pf = limit_pref_pf + self.start_pref_gf = start_pref_gf + self.limit_pref_gf = limit_pref_gf + self.relative_f = relative_f + self.enable_atom_ener_coeff = enable_atom_ener_coeff + self.numb_generalized_coord = numb_generalized_coord + if self.has_gf and self.numb_generalized_coord < 1: + raise RuntimeError( + "When generalized force loss is used, the dimension of generalized coordinates should be larger than 0" + ) + self.use_l1_all = use_l1_all + self.inference = inference + + def forward(self, input_dict, model, label, natoms, learning_rate, mae=False): + """Return loss on energy and force. + + Parameters + ---------- + input_dict : dict[str, paddle.Tensor] + Model inputs. + model : paddle.nn.Layer + Model to be used to output the predictions. + label : dict[str, paddle.Tensor] + Labels. + natoms : int + The local atom number. + + Returns + ------- + model_pred: dict[str, paddle.Tensor] + Model predictions. + loss: paddle.Tensor + Loss for model to minimize. + more_loss: dict[str, paddle.Tensor] + Other losses for display. + """ + model_pred = model(**input_dict) + coef = learning_rate / self.starter_learning_rate + pref_e = self.limit_pref_e + (self.start_pref_e - self.limit_pref_e) * coef + pref_f = self.limit_pref_f + (self.start_pref_f - self.limit_pref_f) * coef + pref_v = self.limit_pref_v + (self.start_pref_v - self.limit_pref_v) * coef + pref_ae = self.limit_pref_ae + (self.start_pref_ae - self.limit_pref_ae) * coef + pref_pf = self.limit_pref_pf + (self.start_pref_pf - self.limit_pref_pf) * coef + pref_gf = self.limit_pref_gf + (self.start_pref_gf - self.limit_pref_gf) * coef + + loss = paddle.zeros([1], dtype=env.GLOBAL_PD_FLOAT_PRECISION).to(env.DEVICE)[0] + more_loss = {} + # more_loss['log_keys'] = [] # showed when validation on the fly + # more_loss['test_keys'] = [] # showed when doing dp test + atom_norm = 1.0 / natoms + if self.has_e and "energy" in model_pred and "energy" in label: + energy_pred = model_pred["energy"] + energy_label = label["energy"] + if self.enable_atom_ener_coeff and "atom_energy" in model_pred: + atom_ener_pred = model_pred["atom_energy"] + # when ener_coeff (\nu) is defined, the energy is defined as + # E = \sum_i \nu_i E_i + # instead of the sum of atomic energies. + # + # A case is that we want to train reaction energy + # A + B -> C + D + # E = - E(A) - E(B) + E(C) + E(D) + # A, B, C, D could be put far away from each other + atom_ener_coeff = label["atom_ener_coeff"] + atom_ener_coeff = atom_ener_coeff.reshape(atom_ener_pred.shape) + energy_pred = paddle.sum(atom_ener_coeff * atom_ener_pred, axis=1) + find_energy = label.get("find_energy", 0.0) + pref_e = pref_e * find_energy + if not self.use_l1_all: + l2_ener_loss = paddle.mean(paddle.square(energy_pred - energy_label)) + if not self.inference: + more_loss["l2_ener_loss"] = self.display_if_exist( + l2_ener_loss.detach(), find_energy + ) + loss += atom_norm * (pref_e * l2_ener_loss) + rmse_e = l2_ener_loss.sqrt() * atom_norm + more_loss["rmse_e"] = self.display_if_exist( + rmse_e.detach(), find_energy + ) + # more_loss['log_keys'].append('rmse_e') + else: # use l1 and for all atoms + l1_ener_loss = F.l1_loss( + energy_pred.reshape([-1]), + energy_label.reshape([-1]), + reduction="sum", + ) + loss += pref_e * l1_ener_loss + more_loss["mae_e"] = self.display_if_exist( + F.l1_loss( + energy_pred.reshape([-1]), + energy_label.reshape([-1]), + reduction="mean", + ).detach(), + find_energy, + ) + # more_loss['log_keys'].append('rmse_e') + if mae: + mae_e = paddle.mean(paddle.abs(energy_pred - energy_label)) * atom_norm + more_loss["mae_e"] = self.display_if_exist(mae_e.detach(), find_energy) + mae_e_all = paddle.mean(paddle.abs(energy_pred - energy_label)) + more_loss["mae_e_all"] = self.display_if_exist( + mae_e_all.detach(), find_energy + ) + + if ( + (self.has_f or self.has_pf or self.relative_f or self.has_gf) + and "force" in model_pred + and "force" in label + ): + find_force = label.get("find_force", 0.0) + pref_f = pref_f * find_force + force_pred = model_pred["force"] + force_label = label["force"] + diff_f = (force_label - force_pred).reshape([-1]) + + if self.relative_f is not None: + force_label_3 = force_label.reshape([-1, 3]) + # norm_f = force_label_3.norm(axis=1, keepdim=True) + self.relative_f + norm_f = ( + decomp.norm(force_label_3, axis=1, keepdim=True) + self.relative_f + ) + diff_f_3 = diff_f.reshape([-1, 3]) + diff_f_3 = diff_f_3 / norm_f + diff_f = diff_f_3.reshape([-1]) + + if self.has_f: + if not self.use_l1_all: + l2_force_loss = paddle.mean(paddle.square(diff_f)) + if not self.inference: + more_loss["l2_force_loss"] = self.display_if_exist( + l2_force_loss.detach(), find_force + ) + loss += (pref_f * l2_force_loss).to(GLOBAL_PD_FLOAT_PRECISION) + rmse_f = l2_force_loss.sqrt() + more_loss["rmse_f"] = self.display_if_exist( + rmse_f.detach(), find_force + ) + else: + l1_force_loss = F.l1_loss(force_label, force_pred, reduction="none") + more_loss["mae_f"] = self.display_if_exist( + l1_force_loss.mean().detach(), find_force + ) + l1_force_loss = l1_force_loss.sum(-1).mean(-1).sum() + loss += (pref_f * l1_force_loss).to(GLOBAL_PD_FLOAT_PRECISION) + if mae: + mae_f = paddle.mean(paddle.abs(diff_f)) + more_loss["mae_f"] = self.display_if_exist( + mae_f.detach(), find_force + ) + + if self.has_pf and "atom_pref" in label: + atom_pref = label["atom_pref"] + find_atom_pref = label.get("find_atom_pref", 0.0) + pref_pf = pref_pf * find_atom_pref + atom_pref_reshape = atom_pref.reshape([-1]) + l2_pref_force_loss = (paddle.square(diff_f) * atom_pref_reshape).mean() + if not self.inference: + more_loss["l2_pref_force_loss"] = self.display_if_exist( + l2_pref_force_loss.detach(), find_atom_pref + ) + loss += (pref_pf * l2_pref_force_loss).to(GLOBAL_PD_FLOAT_PRECISION) + rmse_pf = l2_pref_force_loss.sqrt() + more_loss["rmse_pf"] = self.display_if_exist( + rmse_pf.detach(), find_atom_pref + ) + + if self.has_gf and "drdq" in label: + drdq = label["drdq"] + find_drdq = label.get("find_drdq", 0.0) + pref_gf = pref_gf * find_drdq + force_reshape_nframes = force_pred.reshape([-1, natoms * 3]) + force_label_reshape_nframes = force_label.reshape([-1, natoms * 3]) + drdq_reshape = drdq.reshape( + [-1, natoms * 3, self.numb_generalized_coord] + ) + + # gen_force_label = paddle.einsum( + # "bij,bi->bj", drdq_reshape, force_label_reshape_nframes + # ) + gen_force_label = ( + drdq_reshape * force_label_reshape_nframes.unsqueeze(-1) + ).sum([-2]) + + # gen_force = paddle.einsum( + # "bij,bi->bj", drdq_reshape, force_reshape_nframes + # ) + gen_force = (drdq_reshape * force_reshape_nframes.unsqueeze(-1)).sum( + [-2] + ) + + diff_gen_force = gen_force_label - gen_force + l2_gen_force_loss = paddle.square(diff_gen_force).mean() + if not self.inference: + more_loss["l2_gen_force_loss"] = self.display_if_exist( + l2_gen_force_loss.detach(), find_drdq + ) + loss += (pref_gf * l2_gen_force_loss).to(GLOBAL_PD_FLOAT_PRECISION) + rmse_gf = l2_gen_force_loss.sqrt() + more_loss["rmse_gf"] = self.display_if_exist( + rmse_gf.detach(), find_drdq + ) + + if self.has_v and "virial" in model_pred and "virial" in label: + find_virial = label.get("find_virial", 0.0) + pref_v = pref_v * find_virial + diff_v = label["virial"] - model_pred["virial"].reshape([-1, 9]) + l2_virial_loss = paddle.mean(paddle.square(diff_v)) + if not self.inference: + more_loss["l2_virial_loss"] = self.display_if_exist( + l2_virial_loss.detach(), find_virial + ) + loss += atom_norm * (pref_v * l2_virial_loss) + rmse_v = l2_virial_loss.sqrt() * atom_norm + more_loss["rmse_v"] = self.display_if_exist(rmse_v.detach(), find_virial) + if mae: + mae_v = paddle.mean(paddle.abs(diff_v)) * atom_norm + more_loss["mae_v"] = self.display_if_exist(mae_v.detach(), find_virial) + + if self.has_ae and "atom_energy" in model_pred and "atom_ener" in label: + atom_ener = model_pred["atom_energy"] + atom_ener_label = label["atom_ener"] + find_atom_ener = label.get("find_atom_ener", 0.0) + pref_ae = pref_ae * find_atom_ener + atom_ener_reshape = atom_ener.reshape([-1]) + atom_ener_label_reshape = atom_ener_label.reshape([-1]) + l2_atom_ener_loss = paddle.square( + atom_ener_label_reshape - atom_ener_reshape + ).mean() + if not self.inference: + more_loss["l2_atom_ener_loss"] = self.display_if_exist( + l2_atom_ener_loss.detach(), find_atom_ener + ) + loss += (pref_ae * l2_atom_ener_loss).to(GLOBAL_PD_FLOAT_PRECISION) + rmse_ae = l2_atom_ener_loss.sqrt() + more_loss["rmse_ae"] = self.display_if_exist( + rmse_ae.detach(), find_atom_ener + ) + + if not self.inference: + more_loss["rmse"] = paddle.sqrt(loss.detach()) + return model_pred, loss, more_loss + + @property + def label_requirement(self) -> list[DataRequirementItem]: + """Return data label requirements needed for this loss calculation.""" + label_requirement = [] + if self.has_e: + label_requirement.append( + DataRequirementItem( + "energy", + ndof=1, + atomic=False, + must=False, + high_prec=True, + ) + ) + if self.has_f: + label_requirement.append( + DataRequirementItem( + "force", + ndof=3, + atomic=True, + must=False, + high_prec=False, + ) + ) + if self.has_v: + label_requirement.append( + DataRequirementItem( + "virial", + ndof=9, + atomic=False, + must=False, + high_prec=False, + ) + ) + if self.has_ae: + label_requirement.append( + DataRequirementItem( + "atom_ener", + ndof=1, + atomic=True, + must=False, + high_prec=False, + ) + ) + if self.has_pf: + label_requirement.append( + DataRequirementItem( + "atom_pref", + ndof=1, + atomic=True, + must=False, + high_prec=False, + repeat=3, + ) + ) + if self.has_gf > 0: + label_requirement.append( + DataRequirementItem( + "drdq", + ndof=self.numb_generalized_coord * 3, + atomic=True, + must=False, + high_prec=False, + ) + ) + if self.enable_atom_ener_coeff: + label_requirement.append( + DataRequirementItem( + "atom_ener_coeff", + ndof=1, + atomic=True, + must=False, + high_prec=False, + default=1.0, + ) + ) + return label_requirement diff --git a/deepmd/pd/loss/loss.py b/deepmd/pd/loss/loss.py new file mode 100644 index 0000000000..f825f9ff61 --- /dev/null +++ b/deepmd/pd/loss/loss.py @@ -0,0 +1,63 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +from abc import ( + ABC, + abstractmethod, +) + +import paddle + +from deepmd.utils.data import ( + DataRequirementItem, +) +from deepmd.utils.plugin import ( + make_plugin_registry, +) + + +class TaskLoss(paddle.nn.Layer, ABC, make_plugin_registry("loss")): + def __init__(self, **kwargs): + """Construct loss.""" + super().__init__() + + def forward(self, input_dict, model, label, natoms, learning_rate): + """Return loss .""" + raise NotImplementedError + + @property + @abstractmethod + def label_requirement(self) -> list[DataRequirementItem]: + """Return data label requirements needed for this loss calculation.""" + pass + + @staticmethod + def display_if_exist(loss: paddle.Tensor, find_property: float) -> paddle.Tensor: + """Display NaN if labeled property is not found. + + Parameters + ---------- + loss : paddle.Tensor + the loss tensor + find_property : float + whether the property is found + """ + return loss if bool(find_property) else paddle.to_tensor(float("nan")) + + @classmethod + def get_loss(cls, loss_params: dict) -> "TaskLoss": + """Get the loss module by the parameters. + + By default, all the parameters are directly passed to the constructor. + If not, override this method. + + Parameters + ---------- + loss_params : dict + The loss parameters + + Returns + ------- + TaskLoss + The loss module + """ + loss = cls(**loss_params) + return loss diff --git a/deepmd/pd/model/__init__.py b/deepmd/pd/model/__init__.py new file mode 100644 index 0000000000..6ceb116d85 --- /dev/null +++ b/deepmd/pd/model/__init__.py @@ -0,0 +1 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later diff --git a/deepmd/pd/model/atomic_model/__init__.py b/deepmd/pd/model/atomic_model/__init__.py new file mode 100644 index 0000000000..68a7cc8f79 --- /dev/null +++ b/deepmd/pd/model/atomic_model/__init__.py @@ -0,0 +1,31 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +"""The atomic model provides the prediction of some property on each +atom. All the atomic models are not supposed to be directly accessed +by users, but it provides a convenient interface for the +implementation of models. + +Taking the energy models for example, the developeres only needs to +implement the atomic energy prediction via an atomic model, and the +model can be automatically made by the `deepmd.dpmodel.make_model` +method. The `DPModel` is made by +``` +DPModel = make_model(DPAtomicModel) +``` + +""" + +from .base_atomic_model import ( + BaseAtomicModel, +) +from .dp_atomic_model import ( + DPAtomicModel, +) +from .energy_atomic_model import ( + DPEnergyAtomicModel, +) + +__all__ = [ + "BaseAtomicModel", + "DPAtomicModel", + "DPEnergyAtomicModel", +] diff --git a/deepmd/pd/model/atomic_model/base_atomic_model.py b/deepmd/pd/model/atomic_model/base_atomic_model.py new file mode 100644 index 0000000000..1100813fb4 --- /dev/null +++ b/deepmd/pd/model/atomic_model/base_atomic_model.py @@ -0,0 +1,579 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later + +import copy +import logging +from typing import ( + Callable, + Optional, + Union, +) + +import numpy as np +import paddle + +from deepmd.dpmodel.atomic_model import ( + make_base_atomic_model, +) +from deepmd.dpmodel.output_def import ( + FittingOutputDef, + OutputVariableDef, +) +from deepmd.pd.utils import ( + AtomExcludeMask, + PairExcludeMask, + env, +) +from deepmd.pd.utils.nlist import ( + extend_input_and_build_neighbor_list, +) +from deepmd.pd.utils.stat import ( + compute_output_stats, +) +from deepmd.pd.utils.utils import ( + to_numpy_array, + to_paddle_tensor, +) +from deepmd.utils.finetune import ( + get_index_between_two_maps, + map_atom_exclude_types, + map_pair_exclude_types, +) +from deepmd.utils.path import ( + DPPath, +) + +log = logging.getLogger(__name__) +dtype = env.GLOBAL_PD_FLOAT_PRECISION +device = env.DEVICE + +BaseAtomicModel_ = make_base_atomic_model(paddle.Tensor) + + +class BaseAtomicModel(paddle.nn.Layer, BaseAtomicModel_): + """The base of atomic model. + + Parameters + ---------- + type_map + Mapping atom type to the name (str) of the type. + For example `type_map[1]` gives the name of the type 1. + atom_exclude_types + Exclude the atomic contribution of the given types + pair_exclude_types + Exclude the pair of atoms of the given types from computing the output + of the atomic model. Implemented by removing the pairs from the nlist. + rcond : float, optional + The condition number for the regression of atomic energy. + preset_out_bias : Dict[str, list[Optional[paddle.Tensor]]], optional + Specifying atomic energy contribution in vacuum. Given by key:value pairs. + The value is a list specifying the bias. the elements can be None or np.array of output shape. + For example: [None, [2.]] means type 0 is not set, type 1 is set to [2.] + The `set_davg_zero` key in the descriptor should be set. + + """ + + def __init__( + self, + type_map: list[str], + atom_exclude_types: list[int] = [], + pair_exclude_types: list[tuple[int, int]] = [], + rcond: Optional[float] = None, + preset_out_bias: Optional[dict[str, np.ndarray]] = None, + ): + paddle.nn.Layer.__init__(self) + BaseAtomicModel_.__init__(self) + self.type_map = type_map + self.reinit_atom_exclude(atom_exclude_types) + self.reinit_pair_exclude(pair_exclude_types) + self.rcond = rcond + self.preset_out_bias = preset_out_bias + + def init_out_stat(self): + """Initialize the output bias.""" + ntypes = self.get_ntypes() + self.bias_keys: list[str] = list(self.fitting_output_def().keys()) + self.max_out_size = max( + [self.atomic_output_def()[kk].size for kk in self.bias_keys] + ) + self.n_out = len(self.bias_keys) + out_bias_data = self._default_bias() + out_std_data = self._default_std() + self.register_buffer("out_bias", out_bias_data) + self.register_buffer("out_std", out_std_data) + + def set_out_bias(self, out_bias: paddle.Tensor) -> None: + self.out_bias = out_bias + + def __setitem__(self, key, value): + if key in ["out_bias"]: + self.out_bias = value + elif key in ["out_std"]: + self.out_std = value + else: + raise KeyError(key) + + def __getitem__(self, key): + if key in ["out_bias"]: + return self.out_bias + elif key in ["out_std"]: + return self.out_std + else: + raise KeyError(key) + + def get_type_map(self) -> list[str]: + """Get the type map.""" + return self.type_map + + def reinit_atom_exclude( + self, + exclude_types: list[int] = [], + ): + self.atom_exclude_types = exclude_types + if exclude_types == []: + self.atom_excl = None + else: + self.atom_excl = AtomExcludeMask(self.get_ntypes(), self.atom_exclude_types) + + def reinit_pair_exclude( + self, + exclude_types: list[tuple[int, int]] = [], + ): + self.pair_exclude_types = exclude_types + if exclude_types == []: + self.pair_excl = None + else: + self.pair_excl = PairExcludeMask(self.get_ntypes(), self.pair_exclude_types) + + # to make jit happy... + def make_atom_mask( + self, + atype: paddle.Tensor, + ) -> paddle.Tensor: + """The atoms with type < 0 are treated as virtual atoms, + which serves as place-holders for multi-frame calculations + with different number of atoms in different frames. + + Parameters + ---------- + atype + Atom types. >= 0 for real atoms <0 for virtual atoms. + + Returns + ------- + mask + True for real atoms and False for virtual atoms. + + """ + # supposed to be supported by all backends + return atype >= 0 + + def atomic_output_def(self) -> FittingOutputDef: + old_def = self.fitting_output_def() + old_list = list(old_def.get_data().values()) + return FittingOutputDef( + old_list # noqa:RUF005 + + [ + OutputVariableDef( + name="mask", + shape=[1], + reducible=False, + r_differentiable=False, + c_differentiable=False, + ) + ] + ) + + def forward_common_atomic( + self, + extended_coord: paddle.Tensor, + extended_atype: paddle.Tensor, + nlist: paddle.Tensor, + mapping: Optional[paddle.Tensor] = None, + fparam: Optional[paddle.Tensor] = None, + aparam: Optional[paddle.Tensor] = None, + comm_dict: Optional[dict[str, paddle.Tensor]] = None, + ) -> dict[str, paddle.Tensor]: + """Common interface for atomic inference. + + This method accept extended coordinates, extended atom typs, neighbor list, + and predict the atomic contribution of the fit property. + + Parameters + ---------- + extended_coord + extended coordinates, shape: nf x (nall x 3) + extended_atype + extended atom typs, shape: nf x nall + for a type < 0 indicating the atomic is virtual. + nlist + neighbor list, shape: nf x nloc x nsel + mapping + extended to local index mapping, shape: nf x nall + fparam + frame parameters, shape: nf x dim_fparam + aparam + atomic parameter, shape: nf x nloc x dim_aparam + comm_dict + The data needed for communication for parallel inference. + + Returns + ------- + ret_dict + dict of output atomic properties. + should implement the definition of `fitting_output_def`. + ret_dict["mask"] of shape nf x nloc will be provided. + ret_dict["mask"][ff,ii] == 1 indicating the ii-th atom of the ff-th frame is real. + ret_dict["mask"][ff,ii] == 0 indicating the ii-th atom of the ff-th frame is virtual. + + """ + _, nloc, _ = nlist.shape + atype = extended_atype[:, :nloc] + + if self.pair_excl is not None: + pair_mask = self.pair_excl(nlist, extended_atype) + # exclude neighbors in the nlist + nlist = paddle.where(pair_mask == 1, nlist, -1) + + ext_atom_mask = self.make_atom_mask(extended_atype) + ret_dict = self.forward_atomic( + extended_coord, + paddle.where( + ext_atom_mask, extended_atype, paddle.zeros_like(extended_atype) + ), + nlist, + mapping=mapping, + fparam=fparam, + aparam=aparam, + comm_dict=comm_dict, + ) + ret_dict = self.apply_out_stat(ret_dict, atype) + + # nf x nloc + atom_mask = ext_atom_mask[:, :nloc].astype(paddle.int32) + if self.atom_excl is not None: + atom_mask *= self.atom_excl(atype) + + for kk in ret_dict.keys(): + out_shape = ret_dict[kk].shape + out_shape2 = 1 + for ss in out_shape[2:]: + out_shape2 *= ss + ret_dict[kk] = ( + ret_dict[kk].reshape([out_shape[0], out_shape[1], out_shape2]) + * atom_mask.unsqueeze(2).astype(ret_dict[kk].dtype) + ).reshape(out_shape) + ret_dict["mask"] = atom_mask + + return ret_dict + + def forward( + self, + extended_coord: paddle.Tensor, + extended_atype: paddle.Tensor, + nlist: paddle.Tensor, + mapping: Optional[paddle.Tensor] = None, + fparam: Optional[paddle.Tensor] = None, + aparam: Optional[paddle.Tensor] = None, + comm_dict: Optional[dict[str, paddle.Tensor]] = None, + ) -> dict[str, paddle.Tensor]: + return self.forward_common_atomic( + extended_coord, + extended_atype, + nlist, + mapping=mapping, + fparam=fparam, + aparam=aparam, + comm_dict=comm_dict, + ) + + def change_type_map( + self, type_map: list[str], model_with_new_type_stat=None + ) -> None: + """Change the type related params to new ones, according to `type_map` and the original one in the model. + If there are new types in `type_map`, statistics will be updated accordingly to `model_with_new_type_stat` for these new types. + """ + remap_index, has_new_type = get_index_between_two_maps(self.type_map, type_map) + self.type_map = type_map + self.reinit_atom_exclude( + map_atom_exclude_types(self.atom_exclude_types, remap_index) + ) + self.reinit_pair_exclude( + map_pair_exclude_types(self.pair_exclude_types, remap_index) + ) + if has_new_type: + extend_shape = [ + self.out_bias.shape[0], + len(type_map), + *list(self.out_bias.shape[2:]), + ] + extend_bias = paddle.zeros(extend_shape, dtype=self.out_bias.dtype).to( + device=self.out_bias.place + ) + self.out_bias = paddle.concat([self.out_bias, extend_bias], axis=1) + extend_std = paddle.ones(extend_shape, dtype=self.out_std.dtype).to( + device=self.out_std.place + ) + self.out_std = paddle.concat([self.out_std, extend_std], axis=1) + self.out_bias = self.out_bias[:, remap_index, :] + self.out_std = self.out_std[:, remap_index, :] + + def serialize(self) -> dict: + return { + "type_map": self.type_map, + "atom_exclude_types": self.atom_exclude_types, + "pair_exclude_types": self.pair_exclude_types, + "rcond": self.rcond, + "preset_out_bias": self.preset_out_bias, + "@variables": { + "out_bias": to_numpy_array(self.out_bias), + "out_std": to_numpy_array(self.out_std), + }, + } + + @classmethod + def deserialize(cls, data: dict) -> "BaseAtomicModel": + data = copy.deepcopy(data) + variables = data.pop("@variables", None) + variables = ( + {"out_bias": None, "out_std": None} if variables is None else variables + ) + obj = cls(**data) + obj["out_bias"] = ( + to_paddle_tensor(variables["out_bias"]) + if variables["out_bias"] is not None + else obj._default_bias() + ) + obj["out_std"] = ( + to_paddle_tensor(variables["out_std"]) + if variables["out_std"] is not None + else obj._default_std() + ) + return obj + + def compute_or_load_stat( + self, + merged: Union[Callable[[], list[dict]], list[dict]], + stat_file_path: Optional[DPPath] = None, + ): + """ + Compute the output statistics (e.g. energy bias) for the fitting net from packed data. + + Parameters + ---------- + merged : Union[Callable[[], list[dict]], list[dict]] + - list[dict]: A list of data samples from various data systems. + Each element, `merged[i]`, is a data dictionary containing `keys`: `paddle.Tensor` + originating from the `i`-th data system. + - Callable[[], list[dict]]: A lazy function that returns data samples in the above format + only when needed. Since the sampling process can be slow and memory-intensive, + the lazy function helps by only sampling once. + stat_file_path : Optional[DPPath] + The path to the stat file. + + """ + raise NotImplementedError + + def compute_or_load_out_stat( + self, + merged: Union[Callable[[], list[dict]], list[dict]], + stat_file_path: Optional[DPPath] = None, + ): + """ + Compute the output statistics (e.g. energy bias) for the fitting net from packed data. + + Parameters + ---------- + merged : Union[Callable[[], list[dict]], list[dict]] + - list[dict]: A list of data samples from various data systems. + Each element, `merged[i]`, is a data dictionary containing `keys`: `paddle.Tensor` + originating from the `i`-th data system. + - Callable[[], list[dict]]: A lazy function that returns data samples in the above format + only when needed. Since the sampling process can be slow and memory-intensive, + the lazy function helps by only sampling once. + stat_file_path : Optional[DPPath] + The path to the stat file. + + """ + self.change_out_bias( + merged, + stat_file_path=stat_file_path, + bias_adjust_mode="set-by-statistic", + ) + + def apply_out_stat( + self, + ret: dict[str, paddle.Tensor], + atype: paddle.Tensor, + ): + """Apply the stat to each atomic output. + The developer may override the method to define how the bias is applied + to the atomic output of the model. + + Parameters + ---------- + ret + The returned dict by the forward_atomic method + atype + The atom types. nf x nloc + + """ + out_bias, out_std = self._fetch_out_stat(self.bias_keys) + for kk in self.bias_keys: + # nf x nloc x odims, out_bias: ntypes x odims + ret[kk] = ret[kk] + out_bias[kk][atype] + return ret + + def change_out_bias( + self, + sample_merged, + stat_file_path: Optional[DPPath] = None, + bias_adjust_mode="change-by-statistic", + ) -> None: + """Change the output bias according to the input data and the pretrained model. + + Parameters + ---------- + sample_merged : Union[Callable[[], list[dict]], list[dict]] + - list[dict]: A list of data samples from various data systems. + Each element, `merged[i]`, is a data dictionary containing `keys`: `paddle.Tensor` + originating from the `i`-th data system. + - Callable[[], list[dict]]: A lazy function that returns data samples in the above format + only when needed. Since the sampling process can be slow and memory-intensive, + the lazy function helps by only sampling once. + bias_adjust_mode : str + The mode for changing output bias : ['change-by-statistic', 'set-by-statistic'] + 'change-by-statistic' : perform predictions on labels of target dataset, + and do least square on the errors to obtain the target shift as bias. + 'set-by-statistic' : directly use the statistic output bias in the target dataset. + stat_file_path : Optional[DPPath] + The path to the stat file. + """ + if bias_adjust_mode == "change-by-statistic": + delta_bias, out_std = compute_output_stats( + sample_merged, + self.get_ntypes(), + keys=list(self.atomic_output_def().keys()), + stat_file_path=stat_file_path, + model_forward=self._get_forward_wrapper_func(), + rcond=self.rcond, + preset_bias=self.preset_out_bias, + atomic_output=self.atomic_output_def(), + ) + self._store_out_stat(delta_bias, out_std, add=True) + elif bias_adjust_mode == "set-by-statistic": + bias_out, std_out = compute_output_stats( + sample_merged, + self.get_ntypes(), + keys=list(self.atomic_output_def().keys()), + stat_file_path=stat_file_path, + rcond=self.rcond, + preset_bias=self.preset_out_bias, + atomic_output=self.atomic_output_def(), + ) + self._store_out_stat(bias_out, std_out) + else: + raise RuntimeError("Unknown bias_adjust_mode mode: " + bias_adjust_mode) + + def _get_forward_wrapper_func(self) -> Callable[..., paddle.Tensor]: + """Get a forward wrapper of the atomic model for output bias calculation.""" + + def model_forward(coord, atype, box, fparam=None, aparam=None): + with ( + paddle.no_grad() + ): # it's essential for pure paddle forward function to use auto_batchsize + ( + extended_coord, + extended_atype, + mapping, + nlist, + ) = extend_input_and_build_neighbor_list( + coord, + atype, + self.get_rcut(), + self.get_sel(), + mixed_types=self.mixed_types(), + box=box, + ) + atomic_ret = self.forward_common_atomic( + extended_coord, + extended_atype, + nlist, + mapping=mapping, + fparam=fparam, + aparam=aparam, + ) + return {kk: vv.detach() for kk, vv in atomic_ret.items()} + + return model_forward + + def _default_bias(self): + ntypes = self.get_ntypes() + return paddle.zeros([self.n_out, ntypes, self.max_out_size], dtype=dtype).to( + device=device + ) + + def _default_std(self): + ntypes = self.get_ntypes() + return paddle.ones([self.n_out, ntypes, self.max_out_size], dtype=dtype).to( + device=device + ) + + def _varsize( + self, + shape: list[int], + ) -> int: + output_size = 1 + len_shape = len(shape) + for i in range(len_shape): + output_size *= shape[i] + return output_size + + def _get_bias_index( + self, + kk: str, + ) -> int: + res: list[int] = [] + for i, e in enumerate(self.bias_keys): + if e == kk: + res.append(i) + assert len(res) == 1 + return res[0] + + def _store_out_stat( + self, + out_bias: dict[str, paddle.Tensor], + out_std: dict[str, paddle.Tensor], + add: bool = False, + ): + ntypes = self.get_ntypes() + out_bias_data = paddle.clone(self.out_bias) + out_std_data = paddle.clone(self.out_std) + for kk in out_bias.keys(): + assert kk in out_std.keys() + idx = self._get_bias_index(kk) + size = self._varsize(self.atomic_output_def()[kk].shape) + if not add: + out_bias_data[idx, :, :size] = out_bias[kk].reshape([ntypes, size]) + else: + out_bias_data[idx, :, :size] += out_bias[kk].reshape([ntypes, size]) + out_std_data[idx, :, :size] = out_std[kk].reshape([ntypes, size]) + paddle.assign(out_bias_data, self.out_bias) + paddle.assign(out_std_data, self.out_std) + + def _fetch_out_stat( + self, + keys: list[str], + ) -> tuple[dict[str, paddle.Tensor], dict[str, paddle.Tensor]]: + ret_bias = {} + ret_std = {} + ntypes = self.get_ntypes() + for kk in keys: + idx = self._get_bias_index(kk) + isize = self._varsize(self.atomic_output_def()[kk].shape) + ret_bias[kk] = self.out_bias[idx, :, :isize].reshape( + [ntypes] + list(self.atomic_output_def()[kk].shape) # noqa: RUF005 + ) + ret_std[kk] = self.out_std[idx, :, :isize].reshape( + [ntypes] + list(self.atomic_output_def()[kk].shape) # noqa: RUF005 + ) + return ret_bias, ret_std diff --git a/deepmd/pd/model/atomic_model/dp_atomic_model.py b/deepmd/pd/model/atomic_model/dp_atomic_model.py new file mode 100644 index 0000000000..47b881e0cc --- /dev/null +++ b/deepmd/pd/model/atomic_model/dp_atomic_model.py @@ -0,0 +1,333 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import copy +import functools +import logging +from typing import ( + Optional, +) + +import paddle + +from deepmd.dpmodel import ( + FittingOutputDef, +) +from deepmd.pd.model.descriptor.base_descriptor import ( + BaseDescriptor, +) +from deepmd.pd.model.task.base_fitting import ( + BaseFitting, +) +from deepmd.utils.path import ( + DPPath, +) +from deepmd.utils.version import ( + check_version_compatibility, +) + +from .base_atomic_model import ( + BaseAtomicModel, +) + +log = logging.getLogger(__name__) + + +@BaseAtomicModel.register("standard") +class DPAtomicModel(BaseAtomicModel): + """Model give atomic prediction of some physical property. + + Parameters + ---------- + descriptor + Descriptor + fitting_net + Fitting net + type_map + Mapping atom type to the name (str) of the type. + For example `type_map[1]` gives the name of the type 1. + """ + + def __init__( + self, + descriptor, + fitting, + type_map: list[str], + **kwargs, + ): + super().__init__(type_map, **kwargs) + ntypes = len(type_map) + self.type_map = type_map + self.ntypes = ntypes + self.descriptor = descriptor + self.rcut = self.descriptor.get_rcut() + self.sel = self.descriptor.get_sel() + self.fitting_net = fitting + super().init_out_stat() + self.enable_eval_descriptor_hook = False + self.eval_descriptor_list = [] + + # register 'type_map' as buffer + def _string_to_array(s: str) -> list[int]: + return [ord(c) for c in s] + + self.register_buffer( + "buffer_type_map", + paddle.to_tensor(_string_to_array(" ".join(self.type_map)), dtype="int32"), + ) + self.buffer_type_map.name = "buffer_type_map" + if hasattr(self.descriptor, "has_message_passing"): + # register 'has_message_passing' as buffer(cast to int32 as problems may meets with vector) + self.register_buffer( + "buffer_has_message_passing", + paddle.to_tensor(self.descriptor.has_message_passing(), dtype="int32"), + ) + self.buffer_has_message_passing.name = "buffer_has_message_passing" + # register 'ntypes' as buffer + self.register_buffer( + "buffer_ntypes", paddle.to_tensor(self.ntypes, dtype="int32") + ) + self.buffer_ntypes.name = "buffer_ntypes" + # register 'rcut' as buffer + self.register_buffer( + "buffer_rcut", paddle.to_tensor(self.rcut, dtype="float64") + ) + self.buffer_rcut.name = "buffer_rcut" + if hasattr(self.fitting_net, "get_dim_fparam"): + # register 'dfparam' as buffer + self.register_buffer( + "buffer_dfparam", + paddle.to_tensor(self.fitting_net.get_dim_fparam(), dtype="int32"), + ) + self.buffer_dfparam.name = "buffer_dfparam" + if hasattr(self.fitting_net, "get_dim_aparam"): + # register 'daparam' as buffer + self.register_buffer( + "buffer_daparam", + paddle.to_tensor(self.fitting_net.get_dim_aparam(), dtype="int32"), + ) + self.buffer_daparam.name = "buffer_daparam" + # register 'aparam_nall' as buffer + self.register_buffer( + "buffer_aparam_nall", + paddle.to_tensor(False, dtype="int32"), + ) + self.buffer_aparam_nall.name = "buffer_aparam_nall" + + eval_descriptor_list: list[paddle.Tensor] + + def set_eval_descriptor_hook(self, enable: bool) -> None: + """Set the hook for evaluating descriptor and clear the cache for descriptor list.""" + self.enable_eval_descriptor_hook = enable + self.eval_descriptor_list = [] + + def eval_descriptor(self) -> paddle.Tensor: + """Evaluate the descriptor.""" + return paddle.concat(self.eval_descriptor_list) + + def fitting_output_def(self) -> FittingOutputDef: + """Get the output def of the fitting net.""" + return ( + self.fitting_net.output_def() + if self.fitting_net is not None + else self.coord_denoise_net.output_def() + ) + + def get_rcut(self) -> float: + """Get the cut-off radius.""" + return self.rcut + + def get_sel(self) -> list[int]: + """Get the neighbor selection.""" + return self.sel + + def mixed_types(self) -> bool: + """If true, the model + 1. assumes total number of atoms aligned across frames; + 2. uses a neighbor list that does not distinguish different atomic types. + + If false, the model + 1. assumes total number of atoms of each atom type aligned across frames; + 2. uses a neighbor list that distinguishes different atomic types. + + """ + return self.descriptor.mixed_types() + + def change_type_map( + self, type_map: list[str], model_with_new_type_stat=None + ) -> None: + """Change the type related params to new ones, according to `type_map` and the original one in the model. + If there are new types in `type_map`, statistics will be updated accordingly to `model_with_new_type_stat` for these new types. + """ + super().change_type_map( + type_map=type_map, model_with_new_type_stat=model_with_new_type_stat + ) + self.type_map = type_map + self.ntypes = len(type_map) + self.descriptor.change_type_map( + type_map=type_map, + model_with_new_type_stat=model_with_new_type_stat.descriptor + if model_with_new_type_stat is not None + else None, + ) + self.fitting_net.change_type_map(type_map=type_map) + + def has_message_passing(self) -> bool: + """Returns whether the atomic model has message passing.""" + return self.descriptor.has_message_passing() + + def need_sorted_nlist_for_lower(self) -> bool: + """Returns whether the atomic model needs sorted nlist when using `forward_lower`.""" + return self.descriptor.need_sorted_nlist_for_lower() + + def serialize(self) -> dict: + dd = BaseAtomicModel.serialize(self) + dd.update( + { + "@class": "Model", + "@version": 2, + "type": "standard", + "type_map": self.type_map, + "descriptor": self.descriptor.serialize(), + "fitting": self.fitting_net.serialize(), + } + ) + return dd + + @classmethod + def deserialize(cls, data) -> "DPAtomicModel": + data = copy.deepcopy(data) + check_version_compatibility(data.pop("@version", 1), 2, 1) + data.pop("@class", None) + data.pop("type", None) + descriptor_obj = BaseDescriptor.deserialize(data.pop("descriptor")) + fitting_obj = BaseFitting.deserialize(data.pop("fitting")) + data["descriptor"] = descriptor_obj + data["fitting"] = fitting_obj + obj = super().deserialize(data) + return obj + + def forward_atomic( + self, + extended_coord, + extended_atype, + nlist, + mapping: Optional[paddle.Tensor] = None, + fparam: Optional[paddle.Tensor] = None, + aparam: Optional[paddle.Tensor] = None, + comm_dict: Optional[dict[str, paddle.Tensor]] = None, + ) -> dict[str, paddle.Tensor]: + """Return atomic prediction. + + Parameters + ---------- + extended_coord + coordinates in extended region + extended_atype + atomic type in extended region + nlist + neighbor list. nf x nloc x nsel + mapping + mapps the extended indices to local indices + fparam + frame parameter. nf x ndf + aparam + atomic parameter. nf x nloc x nda + + Returns + ------- + result_dict + the result dict, defined by the `FittingOutputDef`. + + """ + nframes, nloc, nnei = nlist.shape + atype = extended_atype[:, :nloc] + if self.do_grad_r() or self.do_grad_c(): + extended_coord.stop_gradient = False + descriptor, rot_mat, g2, h2, sw = self.descriptor( + extended_coord, + extended_atype, + nlist, + mapping=mapping, + comm_dict=comm_dict, + ) + assert descriptor is not None + if self.enable_eval_descriptor_hook: + self.eval_descriptor_list.append(descriptor) + # energy, force + fit_ret = self.fitting_net( + descriptor, + atype, + gr=rot_mat, + g2=g2, + h2=h2, + fparam=fparam, + aparam=aparam, + ) + return fit_ret + + def get_out_bias(self) -> paddle.Tensor: + return self.out_bias + + def compute_or_load_stat( + self, + sampled_func, + stat_file_path: Optional[DPPath] = None, + ): + """ + Compute or load the statistics parameters of the model, + such as mean and standard deviation of descriptors or the energy bias of the fitting net. + When `sampled` is provided, all the statistics parameters will be calculated (or re-calculated for update), + and saved in the `stat_file_path`(s). + When `sampled` is not provided, it will check the existence of `stat_file_path`(s) + and load the calculated statistics parameters. + + Parameters + ---------- + sampled_func + The lazy sampled function to get data frames from different data systems. + stat_file_path + The dictionary of paths to the statistics files. + """ + if stat_file_path is not None and self.type_map is not None: + # descriptors and fitting net with different type_map + # should not share the same parameters + stat_file_path /= " ".join(self.type_map) + + @functools.lru_cache + def wrapped_sampler(): + sampled = sampled_func() + if self.pair_excl is not None: + pair_exclude_types = self.pair_excl.get_exclude_types() + for sample in sampled: + sample["pair_exclude_types"] = list(pair_exclude_types) + if self.atom_excl is not None: + atom_exclude_types = self.atom_excl.get_exclude_types() + for sample in sampled: + sample["atom_exclude_types"] = list(atom_exclude_types) + return sampled + + self.descriptor.compute_input_stats(wrapped_sampler, stat_file_path) + self.compute_or_load_out_stat(wrapped_sampler, stat_file_path) + + def get_dim_fparam(self) -> int: + """Get the number (dimension) of frame parameters of this atomic model.""" + return self.fitting_net.get_dim_fparam() + + def get_dim_aparam(self) -> int: + """Get the number (dimension) of atomic parameters of this atomic model.""" + return self.fitting_net.get_dim_aparam() + + def get_sel_type(self) -> list[int]: + """Get the selected atom types of this model. + + Only atoms with selected atom types have atomic contribution + to the result of the model. + If returning an empty list, all atom types are selected. + """ + return self.fitting_net.get_sel_type() + + def is_aparam_nall(self) -> bool: + """Check whether the shape of atomic parameters is (nframes, nall, ndim). + + If False, the shape is (nframes, nloc, ndim). + """ + return False diff --git a/deepmd/pd/model/atomic_model/energy_atomic_model.py b/deepmd/pd/model/atomic_model/energy_atomic_model.py new file mode 100644 index 0000000000..708ec9db7f --- /dev/null +++ b/deepmd/pd/model/atomic_model/energy_atomic_model.py @@ -0,0 +1,17 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +from deepmd.pd.model.task.ener import ( + EnergyFittingNet, + InvarFitting, +) + +from .dp_atomic_model import ( + DPAtomicModel, +) + + +class DPEnergyAtomicModel(DPAtomicModel): + def __init__(self, descriptor, fitting, type_map, **kwargs): + assert isinstance(fitting, EnergyFittingNet) or isinstance( + fitting, InvarFitting + ) + super().__init__(descriptor, fitting, type_map, **kwargs) diff --git a/deepmd/pd/model/descriptor/__init__.py b/deepmd/pd/model/descriptor/__init__.py new file mode 100644 index 0000000000..654643959b --- /dev/null +++ b/deepmd/pd/model/descriptor/__init__.py @@ -0,0 +1,22 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +from .base_descriptor import ( + BaseDescriptor, +) +from .descriptor import ( + DescriptorBlock, +) +from .env_mat import ( + prod_env_mat, +) +from .se_a import ( + DescrptBlockSeA, + DescrptSeA, +) + +__all__ = [ + "BaseDescriptor", + "DescriptorBlock", + "DescrptBlockSeA", + "DescrptSeA", + "prod_env_mat", +] diff --git a/deepmd/pd/model/descriptor/base_descriptor.py b/deepmd/pd/model/descriptor/base_descriptor.py new file mode 100644 index 0000000000..8f0b799f87 --- /dev/null +++ b/deepmd/pd/model/descriptor/base_descriptor.py @@ -0,0 +1,8 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import paddle + +from deepmd.dpmodel.descriptor import ( + make_base_descriptor, +) + +BaseDescriptor = make_base_descriptor(paddle.Tensor, "forward") diff --git a/deepmd/pd/model/descriptor/descriptor.py b/deepmd/pd/model/descriptor/descriptor.py new file mode 100644 index 0000000000..36de5b1948 --- /dev/null +++ b/deepmd/pd/model/descriptor/descriptor.py @@ -0,0 +1,219 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import logging +from abc import ( + ABC, + abstractmethod, +) +from typing import ( + Callable, + Optional, + Union, +) + +import paddle + +from deepmd.pd.utils import ( + env, +) +from deepmd.pd.utils.env_mat_stat import ( + EnvMatStatSe, +) +from deepmd.utils.env_mat_stat import ( + StatItem, +) +from deepmd.utils.path import ( + DPPath, +) +from deepmd.utils.plugin import ( + make_plugin_registry, +) + +log = logging.getLogger(__name__) + + +class DescriptorBlock(paddle.nn.Layer, ABC, make_plugin_registry("DescriptorBlock")): + """The building block of descriptor. + Given the input descriptor, provide with the atomic coordinates, + atomic types and neighbor list, calculate the new descriptor. + """ + + local_cluster = False + + def __new__(cls, *args, **kwargs): + if cls is DescriptorBlock: + try: + descrpt_type = kwargs["type"] + except KeyError as e: + raise KeyError( + "the type of DescriptorBlock should be set by `type`" + ) from e + cls = cls.get_class_by_type(descrpt_type) + return super().__new__(cls) + + @abstractmethod + def get_rcut(self) -> float: + """Returns the cut-off radius.""" + pass + + @abstractmethod + def get_rcut_smth(self) -> float: + """Returns the radius where the neighbor information starts to smoothly decay to 0.""" + pass + + @abstractmethod + def get_nsel(self) -> int: + """Returns the number of selected atoms in the cut-off radius.""" + pass + + @abstractmethod + def get_sel(self) -> list[int]: + """Returns the number of selected atoms for each type.""" + pass + + @abstractmethod + def get_ntypes(self) -> int: + """Returns the number of element types.""" + pass + + @abstractmethod + def get_dim_out(self) -> int: + """Returns the output dimension.""" + pass + + @abstractmethod + def get_dim_in(self) -> int: + """Returns the input dimension.""" + pass + + @abstractmethod + def get_dim_emb(self) -> int: + """Returns the embedding dimension.""" + pass + + @abstractmethod + def get_env_protection(self) -> float: + """Returns the protection of building environment matrix.""" + pass + + def compute_input_stats( + self, + merged: Union[Callable[[], list[dict]], list[dict]], + path: Optional[DPPath] = None, + ): + """ + Compute the input statistics (e.g. mean and stddev) for the descriptors from packed data. + + Parameters + ---------- + merged : Union[Callable[[], list[dict]], list[dict]] + - list[dict]: A list of data samples from various data systems. + Each element, `merged[i]`, is a data dictionary containing `keys`: `paddle.Tensor` + originating from the `i`-th data system. + - Callable[[], list[dict]]: A lazy function that returns data samples in the above format + only when needed. Since the sampling process can be slow and memory-intensive, + the lazy function helps by only sampling once. + path : Optional[DPPath] + The path to the stat file. + + """ + raise NotImplementedError + + def get_stats(self) -> dict[str, StatItem]: + """Get the statistics of the descriptor.""" + raise NotImplementedError + + def share_params(self, base_class, shared_level, resume=False): + """ + Share the parameters of self to the base_class with shared_level during multitask training. + If not start from checkpoint (resume is False), + some separated parameters (e.g. mean and stddev) will be re-calculated across different classes. + """ + assert ( + self.__class__ == base_class.__class__ + ), "Only descriptors of the same type can share params!" + if shared_level == 0: + # link buffers + if hasattr(self, "mean"): + if not resume: + # in case of change params during resume + base_env = EnvMatStatSe(base_class) + base_env.stats = base_class.stats + for kk in base_class.get_stats(): + base_env.stats[kk] += self.get_stats()[kk] + mean, stddev = base_env() + if not base_class.set_davg_zero: + paddle.assign( + paddle.to_tensor(mean).to(device=env.DEVICE), + base_class.mean, + ) + paddle.assign( + paddle.to_tensor(stddev).to(device=env.DEVICE), + base_class.stddev, + ) + # must share, even if not do stat + self.mean = base_class.mean + self.stddev = base_class.stddev + # self.set_state_dict(base_class.state_dict()) # this does not work, because it only inits the model + # the following will successfully link all the params except buffers + for item in self._sub_layers: + self._sub_layers[item] = base_class._sub_layers[item] + else: + raise NotImplementedError + + @abstractmethod + def forward( + self, + nlist: paddle.Tensor, + extended_coord: paddle.Tensor, + extended_atype: paddle.Tensor, + extended_atype_embd: Optional[paddle.Tensor] = None, + mapping: Optional[paddle.Tensor] = None, + ): + """Calculate DescriptorBlock.""" + pass + + @abstractmethod + def has_message_passing(self) -> bool: + """Returns whether the descriptor block has message passing.""" + + @abstractmethod + def need_sorted_nlist_for_lower(self) -> bool: + """Returns whether the descriptor block needs sorted nlist when using `forward_lower`.""" + + +def extend_descrpt_stat(des, type_map, des_with_stat=None): + r""" + Extend the statistics of a descriptor block with types from newly provided `type_map`. + + After extending, the type related dimension of the extended statistics will have a length of + `len(old_type_map) + len(type_map)`, where `old_type_map` represents the type map in `des`. + The `get_index_between_two_maps()` function can then be used to correctly select statistics for types + from `old_type_map` or `type_map`. + Positive indices from 0 to `len(old_type_map) - 1` will select old statistics of types in `old_type_map`, + while negative indices from `-len(type_map)` to -1 will select new statistics of types in `type_map`. + + Parameters + ---------- + des : DescriptorBlock + The descriptor block to be extended. + type_map : list[str] + The name of each type of atoms to be extended. + des_with_stat : DescriptorBlock, Optional + The descriptor block has additional statistics of types from newly provided `type_map`. + If None, the default statistics will be used. + Otherwise, the statistics provided in this DescriptorBlock will be used. + + """ + if des_with_stat is not None: + extend_davg = des_with_stat["davg"] + extend_dstd = des_with_stat["dstd"] + else: + extend_shape = [len(type_map), *list(des["davg"].shape[1:])] + extend_davg = paddle.zeros(extend_shape, dtype=des["davg"].dtype).to( + device=des["davg"].place + ) + extend_dstd = paddle.ones(extend_shape, dtype=des["dstd"].dtype).to( + device=des["dstd"].place + ) + des["davg"] = paddle.concat([des["davg"], extend_davg], axis=0) + des["dstd"] = paddle.concat([des["dstd"], extend_dstd], axis=0) diff --git a/deepmd/pd/model/descriptor/env_mat.py b/deepmd/pd/model/descriptor/env_mat.py new file mode 100644 index 0000000000..3a9daec1e8 --- /dev/null +++ b/deepmd/pd/model/descriptor/env_mat.py @@ -0,0 +1,87 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later + +import paddle + +from deepmd.pd.utils import ( + decomp, +) +from deepmd.pd.utils.preprocess import ( + compute_smooth_weight, +) + + +def _make_env_mat( + nlist, + coord, + rcut: float, + ruct_smth: float, + radial_only: bool = False, + protection: float = 0.0, +): + """Make smooth environment matrix.""" + bsz, natoms, nnei = nlist.shape + coord = coord.reshape([bsz, -1, 3]) + nall = coord.shape[1] + mask = nlist >= 0 + # nlist = nlist * mask ## this impl will contribute nans in Hessian calculation. + nlist = paddle.where(mask, nlist, nall - 1) + coord_l = coord[:, :natoms].reshape([bsz, -1, 1, 3]) + index = nlist.reshape([bsz, -1]).unsqueeze(-1).expand([-1, -1, 3]) + # coord_r = paddle.take_along_axis(coord, axis=1, indices=index) + coord_r = decomp.take_along_axis(coord, axis=1, indices=index) + coord_r = coord_r.reshape([bsz, natoms, nnei, 3]) + diff = coord_r - coord_l + # length = paddle.linalg.norm(diff, axis=-1, keepdim=True) + length = decomp.norm(diff, axis=-1, keepdim=True) + # for index 0 nloc atom + length = length + (~mask.unsqueeze(-1)).astype(length.dtype) + t0 = 1 / (length + protection) + t1 = diff / (length + protection) ** 2 + weight = compute_smooth_weight(length, ruct_smth, rcut) + weight = weight * mask.unsqueeze(-1).astype(weight.dtype) + if radial_only: + env_mat = t0 * weight + else: + env_mat = paddle.concat([t0.astype(t1.dtype), t1], axis=-1) * weight + return env_mat, diff * mask.unsqueeze(-1).astype(diff.dtype), weight + + +def prod_env_mat( + extended_coord, + nlist, + atype, + mean, + stddev, + rcut: float, + rcut_smth: float, + radial_only: bool = False, + protection: float = 0.0, +): + """Generate smooth environment matrix from atom coordinates and other context. + + Args: + - extended_coord: Copied atom coordinates with shape [nframes, nall*3]. + - atype: Atom types with shape [nframes, nloc]. + - mean: Average value of descriptor per element type with shape [len(sec), nnei, 4 or 1]. + - stddev: Standard deviation of descriptor per element type with shape [len(sec), nnei, 4 or 1]. + - rcut: Cut-off radius. + - rcut_smth: Smooth hyper-parameter for pair force & energy. + - radial_only: Whether to return a full description or a radial-only descriptor. + - protection: Protection parameter to prevent division by zero errors during calculations. + + Returns + ------- + - env_mat: Shape is [nframes, natoms[1]*nnei*4]. + """ + _env_mat_se_a, diff, switch = _make_env_mat( + nlist, + extended_coord, + rcut, + rcut_smth, + radial_only, + protection=protection, + ) # shape [n_atom, dim, 4 or 1] + t_avg = mean[atype] # [n_atom, dim, 4 or 1] + t_std = stddev[atype] # [n_atom, dim, 4 or 1] + env_mat_se_a = (_env_mat_se_a - t_avg) / t_std + return env_mat_se_a, diff, switch diff --git a/deepmd/pd/model/descriptor/se_a.py b/deepmd/pd/model/descriptor/se_a.py new file mode 100644 index 0000000000..180d6f0a3f --- /dev/null +++ b/deepmd/pd/model/descriptor/se_a.py @@ -0,0 +1,715 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import itertools +from typing import ( + Callable, + ClassVar, + Optional, + Union, +) + +import numpy as np +import paddle + +from deepmd.dpmodel.utils.seed import ( + child_seed, +) +from deepmd.pd.model.descriptor import ( + DescriptorBlock, + prod_env_mat, +) +from deepmd.pd.utils import ( + env, +) +from deepmd.pd.utils.env import ( + PRECISION_DICT, + RESERVED_PRECISON_DICT, +) +from deepmd.pd.utils.env_mat_stat import ( + EnvMatStatSe, +) +from deepmd.pd.utils.update_sel import ( + UpdateSel, +) +from deepmd.utils.data_system import ( + DeepmdDataSystem, +) +from deepmd.utils.env_mat_stat import ( + StatItem, +) +from deepmd.utils.path import ( + DPPath, +) +from deepmd.utils.version import ( + check_version_compatibility, +) + +try: + from typing import ( + Final, + ) +except ImportError: + pass + +from deepmd.dpmodel.utils import EnvMat as DPEnvMat +from deepmd.pd.model.network.mlp import ( + EmbeddingNet, + NetworkCollection, +) +from deepmd.pd.utils.exclude_mask import ( + PairExcludeMask, +) + +from .base_descriptor import ( + BaseDescriptor, +) + + +@BaseDescriptor.register("se_e2_a") +@BaseDescriptor.register("se_a") +class DescrptSeA(BaseDescriptor, paddle.nn.Layer): + def __init__( + self, + rcut, + rcut_smth, + sel, + neuron=[25, 50, 100], + axis_neuron=16, + set_davg_zero: bool = False, + activation_function: str = "tanh", + precision: str = "float64", + resnet_dt: bool = False, + exclude_types: list[tuple[int, int]] = [], + env_protection: float = 0.0, + type_one_side: bool = True, + trainable: bool = True, + seed: Optional[Union[int, list[int]]] = None, + ntypes: Optional[int] = None, # to be compat with input + type_map: Optional[list[str]] = None, + # not implemented + spin=None, + ): + del ntypes + if spin is not None: + raise NotImplementedError("old implementation of spin is not supported.") + super().__init__() + self.type_map = type_map + self.compress = False + self.sea = DescrptBlockSeA( + rcut, + rcut_smth, + sel, + neuron=neuron, + axis_neuron=axis_neuron, + set_davg_zero=set_davg_zero, + activation_function=activation_function, + precision=precision, + resnet_dt=resnet_dt, + exclude_types=exclude_types, + env_protection=env_protection, + type_one_side=type_one_side, + trainable=trainable, + seed=seed, + ) + + def get_rcut(self) -> float: + """Returns the cut-off radius.""" + return self.sea.get_rcut() + + def get_rcut_smth(self) -> float: + """Returns the radius where the neighbor information starts to smoothly decay to 0.""" + return self.sea.get_rcut_smth() + + def get_nsel(self) -> int: + """Returns the number of selected atoms in the cut-off radius.""" + return self.sea.get_nsel() + + def get_sel(self) -> list[int]: + """Returns the number of selected atoms for each type.""" + return self.sea.get_sel() + + def get_ntypes(self) -> int: + """Returns the number of element types.""" + return self.sea.get_ntypes() + + def get_type_map(self) -> list[str]: + """Get the name to each type of atoms.""" + return self.type_map + + def get_dim_out(self) -> int: + """Returns the output dimension.""" + return self.sea.get_dim_out() + + def get_dim_emb(self) -> int: + """Returns the output dimension.""" + return self.sea.get_dim_emb() + + def mixed_types(self): + """Returns if the descriptor requires a neighbor list that distinguish different + atomic types or not. + """ + return self.sea.mixed_types() + + def has_message_passing(self) -> bool: + """Returns whether the descriptor has message passing.""" + return self.sea.has_message_passing() + + def need_sorted_nlist_for_lower(self) -> bool: + """Returns whether the descriptor needs sorted nlist when using `forward_lower`.""" + return self.sea.need_sorted_nlist_for_lower() + + def get_env_protection(self) -> float: + """Returns the protection of building environment matrix.""" + return self.sea.get_env_protection() + + def share_params(self, base_class, shared_level, resume=False): + """ + Share the parameters of self to the base_class with shared_level during multitask training. + If not start from checkpoint (resume is False), + some separated parameters (e.g. mean and stddev) will be re-calculated across different classes. + """ + assert ( + self.__class__ == base_class.__class__ + ), "Only descriptors of the same type can share params!" + # For SeA descriptors, the user-defined share-level + # shared_level: 0 + # share all parameters in sea + if shared_level == 0: + self.sea.share_params(base_class.sea, 0, resume=resume) + # Other shared levels + else: + raise NotImplementedError + + @property + def dim_out(self): + """Returns the output dimension of this descriptor.""" + return self.sea.dim_out + + def change_type_map( + self, type_map: list[str], model_with_new_type_stat=None + ) -> None: + """Change the type related params to new ones, according to `type_map` and the original one in the model. + If there are new types in `type_map`, statistics will be updated accordingly to `model_with_new_type_stat` for these new types. + """ + raise NotImplementedError( + "Descriptor se_e2_a does not support changing for type related params!" + "This feature is currently not implemented because it would require additional work to support the non-mixed-types case. " + "We may consider adding this support in the future if there is a clear demand for it." + ) + + def compute_input_stats( + self, + merged: Union[Callable[[], list[dict]], list[dict]], + path: Optional[DPPath] = None, + ): + """ + Compute the input statistics (e.g. mean and stddev) for the descriptors from packed data. + + Parameters + ---------- + merged : Union[Callable[[], list[dict]], list[dict]] + - list[dict]: A list of data samples from various data systems. + Each element, `merged[i]`, is a data dictionary containing `keys`: `paddle.Tensor` + originating from the `i`-th data system. + - Callable[[], list[dict]]: A lazy function that returns data samples in the above format + only when needed. Since the sampling process can be slow and memory-intensive, + the lazy function helps by only sampling once. + path : Optional[DPPath] + The path to the stat file. + + """ + return self.sea.compute_input_stats(merged, path) + + def reinit_exclude( + self, + exclude_types: list[tuple[int, int]] = [], + ): + """Update the type exclusions.""" + self.sea.reinit_exclude(exclude_types) + + def forward( + self, + coord_ext: paddle.Tensor, + atype_ext: paddle.Tensor, + nlist: paddle.Tensor, + mapping: Optional[paddle.Tensor] = None, + comm_dict: Optional[dict[str, paddle.Tensor]] = None, + ): + """Compute the descriptor. + + Parameters + ---------- + coord_ext + The extended coordinates of atoms. shape: nf x (nallx3) + atype_ext + The extended aotm types. shape: nf x nall + nlist + The neighbor list. shape: nf x nloc x nnei + mapping + The index mapping, not required by this descriptor. + comm_dict + The data needed for communication for parallel inference. + + Returns + ------- + descriptor + The descriptor. shape: nf x nloc x (ng x axis_neuron) + gr + The rotationally equivariant and permutationally invariant single particle + representation. shape: nf x nloc x ng x 3 + g2 + The rotationally invariant pair-partical representation. + this descriptor returns None + h2 + The rotationally equivariant pair-partical representation. + this descriptor returns None + sw + The smooth switch function. + + """ + return self.sea.forward(nlist, coord_ext, atype_ext, None, mapping) + + def set_stat_mean_and_stddev( + self, + mean: paddle.Tensor, + stddev: paddle.Tensor, + ) -> None: + """Update mean and stddev for descriptor.""" + self.sea.mean = mean + self.sea.stddev = stddev + + def get_stat_mean_and_stddev(self) -> tuple[paddle.Tensor, paddle.Tensor]: + """Get mean and stddev for descriptor.""" + return self.sea.mean, self.sea.stddev + + def serialize(self) -> dict: + obj = self.sea + return { + "@class": "Descriptor", + "type": "se_e2_a", + "@version": 2, + "rcut": obj.rcut, + "rcut_smth": obj.rcut_smth, + "sel": obj.sel, + "neuron": obj.neuron, + "axis_neuron": obj.axis_neuron, + "resnet_dt": obj.resnet_dt, + "set_davg_zero": obj.set_davg_zero, + "activation_function": obj.activation_function, + # make deterministic + "precision": RESERVED_PRECISON_DICT[obj.prec], + "embeddings": obj.filter_layers.serialize(), + "env_mat": DPEnvMat(obj.rcut, obj.rcut_smth).serialize(), + "exclude_types": obj.exclude_types, + "env_protection": obj.env_protection, + "@variables": { + "davg": obj["davg"].numpy(), + "dstd": obj["dstd"].numpy(), + }, + "type_map": self.type_map, + ## to be updated when the options are supported. + "trainable": True, + "type_one_side": obj.type_one_side, + "spin": None, + } + + @classmethod + def deserialize(cls, data: dict) -> "DescrptSeA": + data = data.copy() + check_version_compatibility(data.pop("@version", 1), 2, 1) + data.pop("@class", None) + data.pop("type", None) + variables = data.pop("@variables") + embeddings = data.pop("embeddings") + env_mat = data.pop("env_mat") + obj = cls(**data) + + def t_cvt(xx): + return paddle.to_tensor(xx, dtype=obj.sea.prec).to(device=env.DEVICE) + + obj.sea["davg"] = t_cvt(variables["davg"]) + obj.sea["dstd"] = t_cvt(variables["dstd"]) + obj.sea.filter_layers = NetworkCollection.deserialize(embeddings) + return obj + + @classmethod + def update_sel( + cls, + train_data: DeepmdDataSystem, + type_map: Optional[list[str]], + local_jdata: dict, + ) -> tuple[dict, Optional[float]]: + """Update the selection and perform neighbor statistics. + + Parameters + ---------- + train_data : DeepmdDataSystem + data used to do neighbor statistics + type_map : list[str], optional + The name of each type of atoms + local_jdata : dict + The local data refer to the current class + + Returns + ------- + dict + The updated local data + float + The minimum distance between two atoms + """ + local_jdata_cpy = local_jdata.copy() + min_nbor_dist, local_jdata_cpy["sel"] = UpdateSel().update_one_sel( + train_data, type_map, local_jdata_cpy["rcut"], local_jdata_cpy["sel"], False + ) + return local_jdata_cpy, min_nbor_dist + + +@DescriptorBlock.register("se_e2_a") +class DescrptBlockSeA(DescriptorBlock): + ndescrpt: Final[int] + __constants__: ClassVar[list] = ["ndescrpt"] + lower: dict[str, int] + upper: dict[str, int] + table_data: dict[str, paddle.Tensor] + table_config: list[Union[int, float]] + + def __init__( + self, + rcut, + rcut_smth, + sel, + neuron=[25, 50, 100], + axis_neuron=16, + set_davg_zero: bool = False, + activation_function: str = "tanh", + precision: str = "float64", + resnet_dt: bool = False, + exclude_types: list[tuple[int, int]] = [], + env_protection: float = 0.0, + type_one_side: bool = True, + trainable: bool = True, + seed: Optional[Union[int, list[int]]] = None, + **kwargs, + ): + """Construct an embedding net of type `se_a`. + + Args: + - rcut: Cut-off radius. + - rcut_smth: Smooth hyper-parameter for pair force & energy. + - sel: For each element type, how many atoms is selected as neighbors. + - filter_neuron: Number of neurons in each hidden layers of the embedding net. + - axis_neuron: Number of columns of the sub-matrix of the embedding matrix. + """ + super().__init__() + self.rcut = float(rcut) + self.rcut_smth = float(rcut_smth) + self.neuron = neuron + self.filter_neuron = self.neuron + self.axis_neuron = axis_neuron + self.set_davg_zero = set_davg_zero + self.activation_function = activation_function + self.precision = precision + self.prec = PRECISION_DICT[self.precision] + self.resnet_dt = resnet_dt + self.env_protection = env_protection + self.ntypes = len(sel) + self.type_one_side = type_one_side + self.seed = seed + # order matters, placed after the assignment of self.ntypes + self.reinit_exclude(exclude_types) + + self.sel = sel + # should be on CPU to avoid D2H, as it is used as slice index + self.sec = [0, *np.cumsum(self.sel).tolist()] + self.split_sel = self.sel + self.nnei = sum(sel) + self.ndescrpt = self.nnei * 4 + + wanted_shape = (self.ntypes, self.nnei, 4) + mean = paddle.zeros(wanted_shape, dtype=self.prec).to(device=env.DEVICE) + stddev = paddle.ones(wanted_shape, dtype=self.prec).to(device=env.DEVICE) + self.register_buffer("mean", mean) + self.register_buffer("stddev", stddev) + + # add for compression + self.compress = False + self.lower = {} + self.upper = {} + self.table_data = {} + self.table_config = [] + + ndim = 1 if self.type_one_side else 2 + filter_layers = NetworkCollection( + ndim=ndim, ntypes=len(sel), network_type="embedding_network" + ) + for ii, embedding_idx in enumerate( + itertools.product(range(self.ntypes), repeat=ndim) + ): + filter_layers[embedding_idx] = EmbeddingNet( + 1, + self.filter_neuron, + activation_function=self.activation_function, + precision=self.precision, + resnet_dt=self.resnet_dt, + seed=child_seed(self.seed, ii), + ) + self.filter_layers = filter_layers + self.stats = None + # set trainable + self.trainable = trainable + for param in self.parameters(): + param.stop_gradient = not trainable + + def get_rcut(self) -> float: + """Returns the cut-off radius.""" + return self.rcut + + def get_rcut_smth(self) -> float: + """Returns the radius where the neighbor information starts to smoothly decay to 0.""" + return self.rcut_smth + + def get_nsel(self) -> int: + """Returns the number of selected atoms in the cut-off radius.""" + return sum(self.sel) + + def get_sel(self) -> list[int]: + """Returns the number of selected atoms for each type.""" + return self.sel + + def get_ntypes(self) -> int: + """Returns the number of element types.""" + return self.ntypes + + def get_dim_out(self) -> int: + """Returns the output dimension.""" + return self.dim_out + + def get_dim_rot_mat_1(self) -> int: + """Returns the first dimension of the rotation matrix. The rotation is of shape dim_1 x 3.""" + return self.filter_neuron[-1] + + def get_dim_emb(self) -> int: + """Returns the output dimension.""" + return self.neuron[-1] + + def get_dim_in(self) -> int: + """Returns the input dimension.""" + return self.dim_in + + def mixed_types(self) -> bool: + """If true, the descriptor + 1. assumes total number of atoms aligned across frames; + 2. requires a neighbor list that does not distinguish different atomic types. + + If false, the descriptor + 1. assumes total number of atoms of each atom type aligned across frames; + 2. requires a neighbor list that distinguishes different atomic types. + + """ + return False + + def get_env_protection(self) -> float: + """Returns the protection of building environment matrix.""" + return self.env_protection + + @property + def dim_out(self): + """Returns the output dimension of this descriptor.""" + return self.filter_neuron[-1] * self.axis_neuron + + @property + def dim_in(self): + """Returns the atomic input dimension of this descriptor.""" + return 0 + + def __setitem__(self, key, value): + if key in ("avg", "data_avg", "davg"): + self.mean = value + elif key in ("std", "data_std", "dstd"): + self.stddev = value + else: + raise KeyError(key) + + def __getitem__(self, key): + if key in ("avg", "data_avg", "davg"): + return self.mean + elif key in ("std", "data_std", "dstd"): + return self.stddev + else: + raise KeyError(key) + + def compute_input_stats( + self, + merged: Union[Callable[[], list[dict]], list[dict]], + path: Optional[DPPath] = None, + ): + """ + Compute the input statistics (e.g. mean and stddev) for the descriptors from packed data. + + Parameters + ---------- + merged : Union[Callable[[], list[dict]], list[dict]] + - list[dict]: A list of data samples from various data systems. + Each element, `merged[i]`, is a data dictionary containing `keys`: `paddle.Tensor` + originating from the `i`-th data system. + - Callable[[], list[dict]]: A lazy function that returns data samples in the above format + only when needed. Since the sampling process can be slow and memory-intensive, + the lazy function helps by only sampling once. + path : Optional[DPPath] + The path to the stat file. + + """ + env_mat_stat = EnvMatStatSe(self) + if path is not None: + path = path / env_mat_stat.get_hash() + if path is None or not path.is_dir(): + if callable(merged): + # only get data for once + sampled = merged() + else: + sampled = merged + else: + sampled = [] + env_mat_stat.load_or_compute_stats(sampled, path) + self.stats = env_mat_stat.stats + mean, stddev = env_mat_stat() + if not self.set_davg_zero: + paddle.assign(paddle.to_tensor(mean).to(device=env.DEVICE), self.mean) # pylint: disable=no-explicit-dtype + paddle.assign(paddle.to_tensor(stddev).to(device=env.DEVICE), self.stddev) # pylint: disable=no-explicit-dtype + + def get_stats(self) -> dict[str, StatItem]: + """Get the statistics of the descriptor.""" + if self.stats is None: + raise RuntimeError( + "The statistics of the descriptor has not been computed." + ) + return self.stats + + def reinit_exclude( + self, + exclude_types: list[tuple[int, int]] = [], + ): + self.exclude_types = exclude_types + self.emask = PairExcludeMask(self.ntypes, exclude_types=exclude_types) + + def enable_compression( + self, + table_data, + table_config, + lower, + upper, + ) -> None: + self.compress = True + self.table_data = table_data + self.table_config = table_config + self.lower = lower + self.upper = upper + + def forward( + self, + nlist: paddle.Tensor, + extended_coord: paddle.Tensor, + extended_atype: paddle.Tensor, + extended_atype_embd: Optional[paddle.Tensor] = None, + mapping: Optional[paddle.Tensor] = None, + ): + """Calculate decoded embedding for each atom. + + Args: + - coord: Tell atom coordinates with shape [nframes, natoms[1]*3]. + - atype: Tell atom types with shape [nframes, natoms[1]]. + - natoms: Tell atom count and element count. Its shape is [2+self.ntypes]. + - box: Tell simulation box with shape [nframes, 9]. + + Returns + ------- + - `paddle.Tensor`: descriptor matrix with shape [nframes, natoms[0]*self.filter_neuron[-1]*self.axis_neuron]. + """ + del extended_atype_embd, mapping + nf = nlist.shape[0] + nloc = nlist.shape[1] + atype: paddle.Tensor = extended_atype[:, :nloc] + dmatrix, diff, sw = prod_env_mat( + extended_coord, + nlist, + atype, + self.mean, + self.stddev, + self.rcut, + self.rcut_smth, + protection=self.env_protection, + ) + + dmatrix = dmatrix.reshape([-1, self.nnei, 4]) + dmatrix = dmatrix.astype(self.prec) + nfnl = dmatrix.shape[0] + # pre-allocate a shape to pass jit + xyz_scatter = paddle.zeros( + [nfnl, 4, self.filter_neuron[-1]], + dtype=self.prec, + ).to(extended_coord.place) + # nfnl x nnei + exclude_mask = self.emask(nlist, extended_atype).reshape([nfnl, self.nnei]) + for embedding_idx, ll in enumerate(self.filter_layers.networks): + if self.type_one_side: + ii = embedding_idx + ti = -1 + # paddle.jit is not happy with slice(None) + # ti_mask = paddle.ones(nfnl, dtype=paddle.bool, device=dmatrix.place) + # applying a mask seems to cause performance degradation + ti_mask = None + else: + # ti: center atom type, ii: neighbor type... + ii = embedding_idx // self.ntypes + ti = embedding_idx % self.ntypes + ti_mask = atype.flatten() == ti + # nfnl x nt + if ti_mask is not None: + mm = exclude_mask[ti_mask, self.sec[ii] : self.sec[ii + 1]] + else: + mm = exclude_mask[:, self.sec[ii] : self.sec[ii + 1]] + # nfnl x nt x 4 + if ti_mask is not None: + rr = dmatrix[ti_mask, self.sec[ii] : self.sec[ii + 1], :] + else: + rr = dmatrix[:, self.sec[ii] : self.sec[ii + 1], :] + if self.compress: + raise NotImplementedError( + "Compressed environment is not implemented yet." + ) + else: + if rr.numel() > 0: + rr = rr * mm.unsqueeze(2).astype(rr.dtype) + ss = rr[:, :, :1] + # nfnl x nt x ng + gg = ll.forward(ss) + # nfnl x 4 x ng + gr = paddle.matmul(rr.transpose([0, 2, 1]), gg) + if ti_mask is not None: + xyz_scatter[ti_mask] += gr + else: + xyz_scatter += gr + + xyz_scatter /= self.nnei + xyz_scatter_1 = xyz_scatter.transpose([0, 2, 1]) + rot_mat: paddle.Tensor = xyz_scatter_1[:, :, 1:4] + xyz_scatter_2 = xyz_scatter[:, :, 0 : self.axis_neuron] + result = paddle.matmul( + xyz_scatter_1, xyz_scatter_2 + ) # shape is [nframes*nall, self.filter_neuron[-1], self.axis_neuron] + result = result.reshape([nf, nloc, self.filter_neuron[-1] * self.axis_neuron]) + rot_mat = rot_mat.reshape([nf, nloc] + list(rot_mat.shape[1:])) # noqa:RUF005 + return ( + result.astype(env.GLOBAL_PD_FLOAT_PRECISION), + rot_mat.astype(env.GLOBAL_PD_FLOAT_PRECISION), + None, + None, + sw, + ) + + def has_message_passing(self) -> bool: + """Returns whether the descriptor block has message passing.""" + return False + + def need_sorted_nlist_for_lower(self) -> bool: + """Returns whether the descriptor block needs sorted nlist when using `forward_lower`.""" + return False diff --git a/deepmd/pd/model/model/__init__.py b/deepmd/pd/model/model/__init__.py new file mode 100644 index 0000000000..990ee51348 --- /dev/null +++ b/deepmd/pd/model/model/__init__.py @@ -0,0 +1,144 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +"""The model that takes the coordinates, cell and atom types as input +and predicts some property. The models are automatically generated from +atomic models by the `deepmd.dpmodel.make_model` method. + +The `make_model` method does the reduction, auto-differentiation and +communication of the atomic properties according to output variable +definition `deepmd.dpmodel.OutputVariableDef`. + +All models should be inherited from :class:`deepmd.pd.model.model.model.BaseModel`. +Models generated by `make_model` have already done it. +""" + +import copy +import json + +import numpy as np + +from deepmd.pd.model.descriptor.base_descriptor import ( + BaseDescriptor, +) +from deepmd.pd.model.task import ( + BaseFitting, +) + +from .dp_model import ( + DPModelCommon, +) +from .ener_model import ( + EnergyModel, +) +from .frozen import ( + FrozenModel, +) +from .make_model import ( + make_model, +) +from .model import ( + BaseModel, +) + + +def _get_standard_model_components(model_params, ntypes): + # descriptor + model_params["descriptor"]["ntypes"] = ntypes + model_params["descriptor"]["type_map"] = copy.deepcopy(model_params["type_map"]) + descriptor = BaseDescriptor(**model_params["descriptor"]) + # fitting + fitting_net = model_params.get("fitting_net", {}) + fitting_net["type"] = fitting_net.get("type", "ener") + fitting_net["ntypes"] = descriptor.get_ntypes() + fitting_net["type_map"] = copy.deepcopy(model_params["type_map"]) + fitting_net["mixed_types"] = descriptor.mixed_types() + if fitting_net["type"] in ["dipole", "polar"]: + fitting_net["embedding_width"] = descriptor.get_dim_emb() + fitting_net["dim_descrpt"] = descriptor.get_dim_out() + grad_force = "direct" not in fitting_net["type"] + if not grad_force: + fitting_net["out_dim"] = descriptor.get_dim_emb() + if "ener" in fitting_net["type"]: + fitting_net["return_energy"] = True + fitting = BaseFitting(**fitting_net) + return descriptor, fitting, fitting_net["type"] + + +def _can_be_converted_to_float(value): + try: + float(value) + return True + except (TypeError, ValueError): + # return false for any failure... + return False + + +def _convert_preset_out_bias_to_array(preset_out_bias, type_map): + if preset_out_bias is not None: + for kk in preset_out_bias: + if len(preset_out_bias[kk]) != len(type_map): + raise ValueError( + "length of the preset_out_bias should be the same as the type_map" + ) + for jj in range(len(preset_out_bias[kk])): + if preset_out_bias[kk][jj] is not None: + if isinstance(preset_out_bias[kk][jj], list): + bb = preset_out_bias[kk][jj] + elif _can_be_converted_to_float(preset_out_bias[kk][jj]): + bb = [float(preset_out_bias[kk][jj])] + else: + raise ValueError( + f"unsupported type/value of the {jj}th element of " + f"preset_out_bias['{kk}'] " + f"{type(preset_out_bias[kk][jj])}" + ) + preset_out_bias[kk][jj] = np.array(bb) + return preset_out_bias + + +def get_standard_model(model_params): + model_params_old = model_params + model_params = copy.deepcopy(model_params) + ntypes = len(model_params["type_map"]) + descriptor, fitting, fitting_net_type = _get_standard_model_components( + model_params, ntypes + ) + atom_exclude_types = model_params.get("atom_exclude_types", []) + pair_exclude_types = model_params.get("pair_exclude_types", []) + preset_out_bias = model_params.get("preset_out_bias") + preset_out_bias = _convert_preset_out_bias_to_array( + preset_out_bias, model_params["type_map"] + ) + + if fitting_net_type in ["ener", "direct_force_ener"]: + modelcls = EnergyModel + else: + raise RuntimeError(f"Unknown fitting type: {fitting_net_type}") + + model = modelcls( + descriptor=descriptor, + fitting=fitting, + type_map=model_params["type_map"], + atom_exclude_types=atom_exclude_types, + pair_exclude_types=pair_exclude_types, + preset_out_bias=preset_out_bias, + ) + model.model_def_script = json.dumps(model_params_old) + return model + + +def get_model(model_params): + model_type = model_params.get("type", "standard") + if model_type == "standard": + return get_standard_model(model_params) + else: + return BaseModel.get_class_by_type(model_type).get_model(model_params) + + +__all__ = [ + "BaseModel", + "get_model", + "DPModelCommon", + "EnergyModel", + "FrozenModel", + "make_model", +] diff --git a/deepmd/pd/model/model/dp_model.py b/deepmd/pd/model/model/dp_model.py new file mode 100644 index 0000000000..e014be5b68 --- /dev/null +++ b/deepmd/pd/model/model/dp_model.py @@ -0,0 +1,64 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +from typing import ( + Optional, +) + +import paddle + +from deepmd.pd.model.descriptor.base_descriptor import ( + BaseDescriptor, +) +from deepmd.utils.data_system import ( + DeepmdDataSystem, +) + + +class DPModelCommon: + """A base class to implement common methods for all the Models.""" + + @classmethod + def update_sel( + cls, + train_data: DeepmdDataSystem, + type_map: Optional[list[str]], + local_jdata: dict, + ) -> tuple[dict, Optional[float]]: + """Update the selection and perform neighbor statistics. + + Parameters + ---------- + train_data : DeepmdDataSystem + data used to do neighbor statistics + type_map : list[str], optional + The name of each type of atoms + local_jdata : dict + The local data refer to the current class + + Returns + ------- + dict + The updated local data + float + The minimum distance between two atoms + """ + local_jdata_cpy = local_jdata.copy() + local_jdata_cpy["descriptor"], min_nbor_dist = BaseDescriptor.update_sel( + train_data, type_map, local_jdata["descriptor"] + ) + return local_jdata_cpy, min_nbor_dist + + def get_fitting_net(self): + """Get the fitting network.""" + return self.atomic_model.fitting_net + + def get_descriptor(self): + """Get the descriptor.""" + return self.atomic_model.descriptor + + def set_eval_descriptor_hook(self, enable: bool) -> None: + """Set the hook for evaluating descriptor and clear the cache for descriptor list.""" + self.atomic_model.set_eval_descriptor_hook(enable) + + def eval_descriptor(self) -> paddle.Tensor: + """Evaluate the descriptor.""" + return self.atomic_model.eval_descriptor() diff --git a/deepmd/pd/model/model/ener_model.py b/deepmd/pd/model/model/ener_model.py new file mode 100644 index 0000000000..3f3db4a527 --- /dev/null +++ b/deepmd/pd/model/model/ener_model.py @@ -0,0 +1,135 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +from copy import ( + deepcopy, +) +from typing import ( + Optional, +) + +import paddle + +from deepmd.pd.model.atomic_model import ( + DPEnergyAtomicModel, +) +from deepmd.pd.model.model.model import ( + BaseModel, +) + +from .dp_model import ( + DPModelCommon, +) +from .make_model import ( + make_model, +) + +DPEnergyModel_ = make_model(DPEnergyAtomicModel) + + +@BaseModel.register("ener") +class EnergyModel(DPModelCommon, DPEnergyModel_): + model_type = "ener" + + def __init__( + self, + *args, + **kwargs, + ): + DPModelCommon.__init__(self) + DPEnergyModel_.__init__(self, *args, **kwargs) + + def translated_output_def(self): + out_def_data = self.model_output_def().get_data() + output_def = { + "atom_energy": deepcopy(out_def_data["energy"]), + "energy": deepcopy(out_def_data["energy_redu"]), + } + if self.do_grad_r("energy"): + output_def["force"] = deepcopy(out_def_data["energy_derv_r"]) + output_def["force"].squeeze(-2) + if self.do_grad_c("energy"): + output_def["virial"] = deepcopy(out_def_data["energy_derv_c_redu"]) + output_def["virial"].squeeze(-2) + output_def["atom_virial"] = deepcopy(out_def_data["energy_derv_c"]) + output_def["atom_virial"].squeeze(-3) + if "mask" in out_def_data: + output_def["mask"] = deepcopy(out_def_data["mask"]) + return output_def + + def forward( + self, + coord, + atype, + box: Optional[paddle.Tensor] = None, + fparam: Optional[paddle.Tensor] = None, + aparam: Optional[paddle.Tensor] = None, + do_atomic_virial: bool = False, + ) -> dict[str, paddle.Tensor]: + model_ret = self.forward_common( + coord, + atype, + box, + fparam=fparam, + aparam=aparam, + do_atomic_virial=do_atomic_virial, + ) + if self.get_fitting_net() is not None: + model_predict = {} + model_predict["atom_energy"] = model_ret["energy"] + model_predict["energy"] = model_ret["energy_redu"] + if self.do_grad_r("energy"): + model_predict["force"] = model_ret["energy_derv_r"].squeeze(-2) + if self.do_grad_c("energy"): + model_predict["virial"] = model_ret["energy_derv_c_redu"].squeeze(-2) + if do_atomic_virial: + model_predict["atom_virial"] = model_ret["energy_derv_c"].squeeze( + -3 + ) + else: + model_predict["force"] = model_ret["dforce"] + if "mask" in model_ret: + model_predict["mask"] = model_ret["mask"] + else: + model_predict = model_ret + model_predict["updated_coord"] += coord + return model_predict + + def forward_lower( + self, + extended_coord, + extended_atype, + nlist, + mapping: Optional[paddle.Tensor] = None, + fparam: Optional[paddle.Tensor] = None, + aparam: Optional[paddle.Tensor] = None, + do_atomic_virial: bool = False, + comm_dict: Optional[dict[str, paddle.Tensor]] = None, + ): + model_ret = self.forward_common_lower( + extended_coord, + extended_atype, + nlist, + mapping, + fparam=fparam, + aparam=aparam, + do_atomic_virial=do_atomic_virial, + comm_dict=comm_dict, + extra_nlist_sort=self.need_sorted_nlist_for_lower(), + ) + if self.get_fitting_net() is not None: + model_predict = {} + model_predict["atom_energy"] = model_ret["energy"] + model_predict["energy"] = model_ret["energy_redu"] + if self.do_grad_r("energy"): + model_predict["extended_force"] = model_ret["energy_derv_r"].squeeze(-2) + if self.do_grad_c("energy"): + model_predict["virial"] = model_ret["energy_derv_c_redu"].squeeze(-2) + if do_atomic_virial: + model_predict["extended_virial"] = model_ret[ + "energy_derv_c" + ].squeeze(-3) + else: + assert model_ret["dforce"] is not None + model_predict["dforce"] = model_ret["dforce"] + else: + model_predict = model_ret + return model_predict diff --git a/deepmd/pd/model/model/frozen.py b/deepmd/pd/model/model/frozen.py new file mode 100644 index 0000000000..e8128c6bd1 --- /dev/null +++ b/deepmd/pd/model/model/frozen.py @@ -0,0 +1,182 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import json +from typing import ( + Optional, +) + +import paddle + +from deepmd.dpmodel.output_def import ( + FittingOutputDef, +) +from deepmd.pd.model.model.model import ( + BaseModel, +) +from deepmd.utils.data_system import ( + DeepmdDataSystem, +) + + +@BaseModel.register("frozen") +class FrozenModel(BaseModel): + """Load model from a frozen model, which cannot be trained. + + Parameters + ---------- + model_file : str + The path to the frozen model + """ + + def __init__(self, model_file: str, **kwargs): + super().__init__(**kwargs) + self.model_file = model_file + if model_file.endswith(".json"): + self.model = paddle.jit.load(model_file.split(".json")[0]) + else: + raise NotImplementedError( + f"Only support .json file, but received {model_file}" + ) + + def fitting_output_def(self) -> FittingOutputDef: + """Get the output def of developer implemented atomic models.""" + return self.model.fitting_output_def() + + def get_rcut(self) -> float: + """Get the cut-off radius.""" + return self.model.get_rcut() + + def get_type_map(self) -> list[str]: + """Get the type map.""" + return self.model.get_type_map() + + def get_sel(self) -> list[int]: + """Returns the number of selected atoms for each type.""" + return self.model.get_sel() + + def get_dim_fparam(self) -> int: + """Get the number (dimension) of frame parameters of this atomic model.""" + return self.model.get_dim_fparam() + + def get_dim_aparam(self) -> int: + """Get the number (dimension) of atomic parameters of this atomic model.""" + return self.model.get_dim_aparam() + + def get_sel_type(self) -> list[int]: + """Get the selected atom types of this model. + + Only atoms with selected atom types have atomic contribution + to the result of the model. + If returning an empty list, all atom types are selected. + """ + return self.model.get_sel_type() + + def is_aparam_nall(self) -> bool: + """Check whether the shape of atomic parameters is (nframes, nall, ndim). + + If False, the shape is (nframes, nloc, ndim). + """ + return self.model.is_aparam_nall() + + def mixed_types(self) -> bool: + """If true, the model + 1. assumes total number of atoms aligned across frames; + 2. uses a neighbor list that does not distinguish different atomic types. + + If false, the model + 1. assumes total number of atoms of each atom type aligned across frames; + 2. uses a neighbor list that distinguishes different atomic types. + + """ + return self.model.mixed_types() + + def has_message_passing(self) -> bool: + """Returns whether the descriptor has message passing.""" + return self.model.has_message_passing() + + def need_sorted_nlist_for_lower(self) -> bool: + """Returns whether the model needs sorted nlist when using `forward_lower`.""" + return self.model.need_sorted_nlist_for_lower() + + def forward( + self, + coord, + atype, + box: Optional[paddle.Tensor] = None, + fparam: Optional[paddle.Tensor] = None, + aparam: Optional[paddle.Tensor] = None, + do_atomic_virial: bool = False, + ) -> dict[str, paddle.Tensor]: + return self.model.forward( + coord, + atype, + box=box, + fparam=fparam, + aparam=aparam, + do_atomic_virial=do_atomic_virial, + ) + + def get_model_def_script(self) -> str: + """Get the model definition script.""" + # try to use the original script instead of "frozen model" + # Note: this cannot change the script of the parent model + # it may still try to load hard-coded filename, which might + # be a problem + return self.model.get_model_def_script() + + def get_min_nbor_dist(self) -> Optional[float]: + """Get the minimum neighbor distance.""" + return self.model.get_min_nbor_dist() + + def serialize(self) -> dict: + from deepmd.pd.model.model import ( + get_model, + ) + + # try to recover the original model + model_def_script = json.loads(self.get_model_def_script()) + model = get_model(model_def_script) + model.set_state_dict(self.model.state_dict()) + return model.serialize() + + @classmethod + def deserialize(cls, data: dict): + raise RuntimeError("Should not touch here.") + + def get_nnei(self) -> int: + """Returns the total number of selected neighboring atoms in the cut-off radius.""" + return self.model.get_nnei() + + def get_nsel(self) -> int: + """Returns the total number of selected neighboring atoms in the cut-off radius.""" + return self.model.get_nsel() + + @classmethod + def update_sel( + cls, + train_data: DeepmdDataSystem, + type_map: Optional[list[str]], + local_jdata: dict, + ) -> tuple[dict, Optional[float]]: + """Update the selection and perform neighbor statistics. + + Parameters + ---------- + train_data : DeepmdDataSystem + data used to do neighbor statistics + type_map : list[str], optional + The name of each type of atoms + local_jdata : dict + The local data refer to the current class + + Returns + ------- + dict + The updated local data + float + The minimum distance between two atoms + """ + return local_jdata, None + + def model_output_type(self) -> str: + """Get the output type for the model.""" + return self.model.model_output_type() diff --git a/deepmd/pd/model/model/make_model.py b/deepmd/pd/model/model/make_model.py new file mode 100644 index 0000000000..67b46d4d87 --- /dev/null +++ b/deepmd/pd/model/model/make_model.py @@ -0,0 +1,614 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +from typing import ( + Optional, +) + +import paddle + +from deepmd.dpmodel import ( + ModelOutputDef, +) +from deepmd.dpmodel.output_def import ( + FittingOutputDef, + OutputVariableCategory, + OutputVariableOperation, + check_operation_applied, +) +from deepmd.pd.model.atomic_model.base_atomic_model import ( + BaseAtomicModel, +) +from deepmd.pd.model.model.model import ( + BaseModel, +) +from deepmd.pd.model.model.transform_output import ( + communicate_extended_output, + fit_output_to_model_output, +) +from deepmd.pd.utils import ( + decomp, +) +from deepmd.pd.utils.env import ( + GLOBAL_PD_ENER_FLOAT_PRECISION, + GLOBAL_PD_FLOAT_PRECISION, + PRECISION_DICT, + RESERVED_PRECISON_DICT, +) +from deepmd.pd.utils.nlist import ( + extend_input_and_build_neighbor_list, + nlist_distinguish_types, +) +from deepmd.utils.path import ( + DPPath, +) + + +def make_model(T_AtomicModel: type[BaseAtomicModel]): + """Make a model as a derived class of an atomic model. + + The model provide two interfaces. + + 1. the `forward_common_lower`, that takes extended coordinates, atyps and neighbor list, + and outputs the atomic and property and derivatives (if required) on the extended region. + + 2. the `forward_common`, that takes coordinates, atypes and cell and predicts + the atomic and reduced property, and derivatives (if required) on the local region. + + Parameters + ---------- + T_AtomicModel + The atomic model. + + Returns + ------- + CM + The model. + + """ + + class CM(BaseModel): + def __init__( + self, + *args, + # underscore to prevent conflict with normal inputs + atomic_model_: Optional[T_AtomicModel] = None, + **kwargs, + ): + super().__init__(*args, **kwargs) + if atomic_model_ is not None: + self.atomic_model: T_AtomicModel = atomic_model_ + else: + self.atomic_model: T_AtomicModel = T_AtomicModel(*args, **kwargs) + self.precision_dict = PRECISION_DICT + self.reverse_precision_dict = RESERVED_PRECISON_DICT + self.global_pd_float_precision = GLOBAL_PD_FLOAT_PRECISION + self.global_pd_ener_float_precision = GLOBAL_PD_ENER_FLOAT_PRECISION + + def model_output_def(self): + """Get the output def for the model.""" + return ModelOutputDef(self.atomic_output_def()) + + def model_output_type(self) -> list[str]: + """Get the output type for the model.""" + output_def = self.model_output_def() + var_defs = output_def.var_defs + # jit: Comprehension ifs are not supported yet + # type hint is critical for JIT + vars: list[str] = [] + for kk, vv in var_defs.items(): + # .value is critical for JIT + if vv.category == OutputVariableCategory.OUT.value: + vars.append(kk) + return vars + + def enable_compression( + self, + table_extrapolate: float = 5, + table_stride_1: float = 0.01, + table_stride_2: float = 0.1, + check_frequency: int = -1, + ) -> None: + """Call atomic_model enable_compression(). + + Parameters + ---------- + table_extrapolate + The scale of model extrapolation + table_stride_1 + The uniform stride of the first table + table_stride_2 + The uniform stride of the second table + check_frequency + The overflow check frequency + """ + self.atomic_model.enable_compression( + self.get_min_nbor_dist(), + table_extrapolate, + table_stride_1, + table_stride_2, + check_frequency, + ) + + def forward_common( + self, + coord, + atype, + box: Optional[paddle.Tensor] = None, + fparam: Optional[paddle.Tensor] = None, + aparam: Optional[paddle.Tensor] = None, + do_atomic_virial: bool = False, + ) -> dict[str, paddle.Tensor]: + """Return model prediction. + + Parameters + ---------- + coord + The coordinates of the atoms. + shape: nf x (nloc x 3) + atype + The type of atoms. shape: nf x nloc + box + The simulation box. shape: nf x 9 + fparam + frame parameter. nf x ndf + aparam + atomic parameter. nf x nloc x nda + do_atomic_virial + If calculate the atomic virial. + + Returns + ------- + ret_dict + The result dict of type dict[str,paddle.Tensor]. + The keys are defined by the `ModelOutputDef`. + + """ + cc, bb, fp, ap, input_prec = self.input_type_cast( + coord, box=box, fparam=fparam, aparam=aparam + ) + del coord, box, fparam, aparam + ( + extended_coord, + extended_atype, + mapping, + nlist, + ) = extend_input_and_build_neighbor_list( + cc, + atype, + self.get_rcut(), + self.get_sel(), + mixed_types=self.mixed_types(), + box=bb, + ) + model_predict_lower = self.forward_common_lower( + extended_coord, + extended_atype, + nlist, + mapping, + do_atomic_virial=do_atomic_virial, + fparam=fp, + aparam=ap, + ) + model_predict = communicate_extended_output( + model_predict_lower, + self.model_output_def(), + mapping, + do_atomic_virial=do_atomic_virial, + ) + model_predict = self.output_type_cast(model_predict, input_prec) + return model_predict + + def get_out_bias(self) -> paddle.Tensor: + return self.atomic_model.get_out_bias() + + def set_out_bias(self, out_bias: paddle.Tensor) -> None: + self.atomic_model.set_out_bias(out_bias) + + def change_out_bias( + self, + merged, + bias_adjust_mode="change-by-statistic", + ) -> None: + """Change the output bias of atomic model according to the input data and the pretrained model. + + Parameters + ---------- + merged : Union[Callable[[], list[dict]], list[dict]] + - list[dict]: A list of data samples from various data systems. + Each element, `merged[i]`, is a data dictionary containing `keys`: `paddle.Tensor` + originating from the `i`-th data system. + - Callable[[], list[dict]]: A lazy function that returns data samples in the above format + only when needed. Since the sampling process can be slow and memory-intensive, + the lazy function helps by only sampling once. + bias_adjust_mode : str + The mode for changing output bias : ['change-by-statistic', 'set-by-statistic'] + 'change-by-statistic' : perform predictions on labels of target dataset, + and do least square on the errors to obtain the target shift as bias. + 'set-by-statistic' : directly use the statistic output bias in the target dataset. + """ + self.atomic_model.change_out_bias( + merged, + bias_adjust_mode=bias_adjust_mode, + ) + + def forward_common_lower( + self, + extended_coord, + extended_atype, + nlist, + mapping: Optional[paddle.Tensor] = None, + fparam: Optional[paddle.Tensor] = None, + aparam: Optional[paddle.Tensor] = None, + do_atomic_virial: bool = False, + comm_dict: Optional[dict[str, paddle.Tensor]] = None, + extra_nlist_sort: bool = False, + ): + """Return model prediction. Lower interface that takes + extended atomic coordinates and types, nlist, and mapping + as input, and returns the predictions on the extended region. + The predictions are not reduced. + + Parameters + ---------- + extended_coord + coordinates in extended region. nf x (nall x 3) + extended_atype + atomic type in extended region. nf x nall + nlist + neighbor list. nf x nloc x nsel. + mapping + mapps the extended indices to local indices. nf x nall. + fparam + frame parameter. nf x ndf + aparam + atomic parameter. nf x nloc x nda + do_atomic_virial + whether calculate atomic virial. + comm_dict + The data needed for communication for parallel inference. + extra_nlist_sort + whether to forcibly sort the nlist. + + Returns + ------- + result_dict + the result dict, defined by the `FittingOutputDef`. + + """ + nframes, nall = extended_atype.shape[:2] + extended_coord = extended_coord.reshape([nframes, -1, 3]) + nlist = self.format_nlist( + extended_coord, extended_atype, nlist, extra_nlist_sort=extra_nlist_sort + ) + cc_ext, _, fp, ap, input_prec = self.input_type_cast( + extended_coord, fparam=fparam, aparam=aparam + ) + del extended_coord, fparam, aparam + atomic_ret = self.atomic_model.forward_common_atomic( + cc_ext, + extended_atype, + nlist, + mapping=mapping, + fparam=fp, + aparam=ap, + comm_dict=comm_dict, + ) + model_predict = fit_output_to_model_output( + atomic_ret, + self.atomic_output_def(), + cc_ext, + do_atomic_virial=do_atomic_virial, + create_graph=self.training, + ) + model_predict = self.output_type_cast(model_predict, input_prec) + return model_predict + + def input_type_cast( + self, + coord: paddle.Tensor, + box: Optional[paddle.Tensor] = None, + fparam: Optional[paddle.Tensor] = None, + aparam: Optional[paddle.Tensor] = None, + ) -> tuple[ + paddle.Tensor, + Optional[paddle.Tensor], + Optional[paddle.Tensor], + Optional[paddle.Tensor], + str, + ]: + """Cast the input data to global float type.""" + input_prec = self.reverse_precision_dict[coord.dtype] + ### + ### type checking would not pass jit, convert to coord prec anyway + ### + # for vv, kk in zip([fparam, aparam], ["frame", "atomic"]): + # if vv is not None and self.reverse_precision_dict[vv.dtype] != input_prec: + # log.warning( + # f"type of {kk} parameter {self.reverse_precision_dict[vv.dtype]}" + # " does not match" + # f" that of the coordinate {input_prec}" + # ) + _lst: list[Optional[paddle.Tensor]] = [ + vv.astype(coord.dtype) if vv is not None else None + for vv in [box, fparam, aparam] + ] + box, fparam, aparam = _lst + if ( + input_prec + == self.reverse_precision_dict[self.global_pd_float_precision] + ): + return coord, box, fparam, aparam, input_prec + else: + pp = self.global_pd_float_precision + return ( + coord.to(pp), + box.to(pp) if box is not None else None, + fparam.to(pp) if fparam is not None else None, + aparam.to(pp) if aparam is not None else None, + input_prec, + ) + + def output_type_cast( + self, + model_ret: dict[str, paddle.Tensor], + input_prec: str, + ) -> dict[str, paddle.Tensor]: + """Convert the model output to the input prec.""" + do_cast = ( + input_prec + != self.reverse_precision_dict[self.global_pd_float_precision] + ) + pp = self.precision_dict[input_prec] + odef = self.model_output_def() + for kk in odef.keys(): + if kk not in model_ret.keys(): + # do not return energy_derv_c if not do_atomic_virial + continue + if check_operation_applied(odef[kk], OutputVariableOperation.REDU): + model_ret[kk] = ( + model_ret[kk].to(self.global_pd_ener_float_precision) + if model_ret[kk] is not None + else None + ) + elif do_cast: + model_ret[kk] = ( + model_ret[kk].to(pp) if model_ret[kk] is not None else None + ) + return model_ret + + def format_nlist( + self, + extended_coord: paddle.Tensor, + extended_atype: paddle.Tensor, + nlist: paddle.Tensor, + extra_nlist_sort: bool = False, + ): + """Format the neighbor list. + + 1. If the number of neighbors in the `nlist` is equal to sum(self.sel), + it does nothong + + 2. If the number of neighbors in the `nlist` is smaller than sum(self.sel), + the `nlist` is pad with -1. + + 3. If the number of neighbors in the `nlist` is larger than sum(self.sel), + the nearest sum(sel) neighbors will be preserved. + + Known limitations: + + In the case of not self.mixed_types, the nlist is always formatted. + May have side effact on the efficiency. + + Parameters + ---------- + extended_coord + coordinates in extended region. nf x nall x 3 + extended_atype + atomic type in extended region. nf x nall + nlist + neighbor list. nf x nloc x nsel + extra_nlist_sort + whether to forcibly sort the nlist. + + Returns + ------- + formatted_nlist + the formatted nlist. + + """ + mixed_types = self.mixed_types() + nlist = self._format_nlist( + extended_coord, + nlist, + sum(self.get_sel()), + extra_nlist_sort=extra_nlist_sort, + ) + if not mixed_types: + nlist = nlist_distinguish_types(nlist, extended_atype, self.get_sel()) + return nlist + + def _format_nlist( + self, + extended_coord: paddle.Tensor, + nlist: paddle.Tensor, + nnei: int, + extra_nlist_sort: bool = False, + ): + n_nf, n_nloc, n_nnei = nlist.shape + # nf x nall x 3 + extended_coord = extended_coord.reshape([n_nf, -1, 3]) + rcut = self.get_rcut() + + if n_nnei < nnei: + nlist = paddle.concat( + [ + nlist, + -1 + * paddle.ones( + [n_nf, n_nloc, nnei - n_nnei], + dtype=nlist.dtype, + ).to(nlist.place), + ], + axis=-1, + ) + + if n_nnei > nnei or extra_nlist_sort: + n_nf, n_nloc, n_nnei = nlist.shape + m_real_nei = nlist >= 0 + nlist = paddle.where(m_real_nei, nlist, paddle.zeros_like(nlist)) + # nf x nloc x 3 + coord0 = extended_coord[:, :n_nloc, :] + # nf x (nloc x nnei) x 3 + index = nlist.reshape([n_nf, n_nloc * n_nnei, 1]).expand([-1, -1, 3]) + coord1 = decomp.take_along_axis(extended_coord, axis=1, indices=index) + # nf x nloc x nnei x 3 + coord1 = coord1.reshape([n_nf, n_nloc, n_nnei, 3]) + # nf x nloc x nnei + # rr = paddle.linalg.norm(coord0[:, :, None, :] - coord1, axis=-1) + rr = decomp.norm(coord0[:, :, None, :] - coord1, axis=-1) + rr = paddle.where(m_real_nei, rr, float("inf")) + rr, nlist_mapping = ( + paddle.sort(rr, axis=-1), + paddle.argsort(rr, axis=-1), + ) + nlist = decomp.take_along_axis(nlist, axis=2, indices=nlist_mapping) + nlist = paddle.where(rr > rcut, paddle.full_like(nlist, -1), nlist) + nlist = nlist[..., :nnei] + else: # not extra_nlist_sort and n_nnei <= nnei: + pass # great! + assert nlist.shape[-1] == nnei + return nlist + + def do_grad_r( + self, + var_name: Optional[str] = None, + ) -> bool: + """Tell if the output variable `var_name` is r_differentiable. + if var_name is None, returns if any of the variable is r_differentiable. + """ + return self.atomic_model.do_grad_r(var_name) + + def do_grad_c( + self, + var_name: Optional[str] = None, + ) -> bool: + """Tell if the output variable `var_name` is c_differentiable. + if var_name is None, returns if any of the variable is c_differentiable. + """ + return self.atomic_model.do_grad_c(var_name) + + def change_type_map( + self, type_map: list[str], model_with_new_type_stat=None + ) -> None: + """Change the type related params to new ones, according to `type_map` and the original one in the model. + If there are new types in `type_map`, statistics will be updated accordingly to `model_with_new_type_stat` for these new types. + """ + self.atomic_model.change_type_map( + type_map=type_map, + model_with_new_type_stat=model_with_new_type_stat.atomic_model + if model_with_new_type_stat is not None + else None, + ) + + def serialize(self) -> dict: + return self.atomic_model.serialize() + + @classmethod + def deserialize(cls, data) -> "CM": + return cls(atomic_model_=T_AtomicModel.deserialize(data)) + + def get_dim_fparam(self) -> int: + """Get the number (dimension) of frame parameters of this atomic model.""" + return self.atomic_model.get_dim_fparam() + + def get_dim_aparam(self) -> int: + """Get the number (dimension) of atomic parameters of this atomic model.""" + return self.atomic_model.get_dim_aparam() + + def get_sel_type(self) -> list[int]: + """Get the selected atom types of this model. + + Only atoms with selected atom types have atomic contribution + to the result of the model. + If returning an empty list, all atom types are selected. + """ + return self.atomic_model.get_sel_type() + + def is_aparam_nall(self) -> bool: + """Check whether the shape of atomic parameters is (nframes, nall, ndim). + + If False, the shape is (nframes, nloc, ndim). + """ + return self.atomic_model.is_aparam_nall() + + def get_rcut(self) -> float: + """Get the cut-off radius.""" + return self.atomic_model.get_rcut() + + def get_type_map(self) -> list[str]: + """Get the type map.""" + return self.atomic_model.get_type_map() + + def get_nsel(self) -> int: + """Returns the total number of selected neighboring atoms in the cut-off radius.""" + return self.atomic_model.get_nsel() + + def get_nnei(self) -> int: + """Returns the total number of selected neighboring atoms in the cut-off radius.""" + return self.atomic_model.get_nnei() + + def atomic_output_def(self) -> FittingOutputDef: + """Get the output def of the atomic model.""" + return self.atomic_model.atomic_output_def() + + def compute_or_load_stat( + self, + sampled_func, + stat_file_path: Optional[DPPath] = None, + ): + """Compute or load the statistics.""" + return self.atomic_model.compute_or_load_stat(sampled_func, stat_file_path) + + def get_sel(self) -> list[int]: + """Returns the number of selected atoms for each type.""" + return self.atomic_model.get_sel() + + def mixed_types(self) -> bool: + """If true, the model + 1. assumes total number of atoms aligned across frames; + 2. uses a neighbor list that does not distinguish different atomic types. + + If false, the model + 1. assumes total number of atoms of each atom type aligned across frames; + 2. uses a neighbor list that distinguishes different atomic types. + + """ + return self.atomic_model.mixed_types() + + def has_message_passing(self) -> bool: + """Returns whether the model has message passing.""" + return self.atomic_model.has_message_passing() + + def need_sorted_nlist_for_lower(self) -> bool: + """Returns whether the model needs sorted nlist when using `forward_lower`.""" + return self.atomic_model.need_sorted_nlist_for_lower() + + def forward( + self, + coord, + atype, + box: Optional[paddle.Tensor] = None, + fparam: Optional[paddle.Tensor] = None, + aparam: Optional[paddle.Tensor] = None, + do_atomic_virial: bool = False, + ) -> dict[str, paddle.Tensor]: + # directly call the forward_common method when no specific transform rule + return self.forward_common( + coord, + atype, + box, + fparam=fparam, + aparam=aparam, + do_atomic_virial=do_atomic_virial, + ) + + return CM diff --git a/deepmd/pd/model/model/model.py b/deepmd/pd/model/model/model.py new file mode 100644 index 0000000000..06a2c6910f --- /dev/null +++ b/deepmd/pd/model/model/model.py @@ -0,0 +1,55 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +from typing import ( + Optional, +) + +import paddle + +from deepmd.dpmodel.model.base_model import ( + make_base_model, +) +from deepmd.utils.path import ( + DPPath, +) + + +class BaseModel(paddle.nn.Layer, make_base_model()): + def __init__(self, *args, **kwargs): + """Construct a basic model for different tasks.""" + paddle.nn.Layer.__init__(self) + self.model_def_script = "" + self.min_nbor_dist = None + + def compute_or_load_stat( + self, + sampled_func, + stat_file_path: Optional[DPPath] = None, + ): + """ + Compute or load the statistics parameters of the model, + such as mean and standard deviation of descriptors or the energy bias of the fitting net. + When `sampled` is provided, all the statistics parameters will be calculated (or re-calculated for update), + and saved in the `stat_file_path`(s). + When `sampled` is not provided, it will check the existence of `stat_file_path`(s) + and load the calculated statistics parameters. + + Parameters + ---------- + sampled_func + The sampled data frames from different data systems. + stat_file_path + The path to the statistics files. + """ + raise NotImplementedError + + def get_model_def_script(self) -> str: + """Get the model definition script.""" + return self.model_def_script + + def get_min_nbor_dist(self) -> Optional[float]: + """Get the minimum distance between two atoms.""" + return self.min_nbor_dist + + def get_ntypes(self): + """Returns the number of element types.""" + return len(self.get_type_map()) diff --git a/deepmd/pd/model/model/transform_output.py b/deepmd/pd/model/model/transform_output.py new file mode 100644 index 0000000000..469bfd3168 --- /dev/null +++ b/deepmd/pd/model/model/transform_output.py @@ -0,0 +1,262 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later + +import paddle + +from deepmd.dpmodel import ( + FittingOutputDef, + ModelOutputDef, + OutputVariableDef, + get_deriv_name, + get_reduce_name, +) +from deepmd.pd.utils import ( + decomp, + env, +) + + +def atomic_virial_corr( + extended_coord: paddle.Tensor, + atom_energy: paddle.Tensor, +): + nall = extended_coord.shape[1] + nloc = atom_energy.shape[1] + coord, _ = paddle.split(extended_coord, [nloc, nall - nloc], axis=1) + # no derivative with respect to the loc coord. + coord = coord.detach() + ce = coord * atom_energy + sumce0, sumce1, sumce2 = paddle.split(paddle.sum(ce, axis=1), [1, 1, 1], axis=-1) + # faked_grad = paddle.ones_like(sumce0) + extended_virial_corr0 = paddle.autograd.grad( + [sumce0], + [extended_coord], + # grad_outputs=lst, + create_graph=False, + retain_graph=True, + )[0] + assert extended_virial_corr0 is not None + extended_virial_corr1 = paddle.autograd.grad( + [sumce1], + [extended_coord], + # grad_outputs=lst, + create_graph=False, + retain_graph=True, + )[0] + assert extended_virial_corr1 is not None + extended_virial_corr2 = paddle.autograd.grad( + [sumce2], + [extended_coord], + # grad_outputs=lst, + create_graph=False, + retain_graph=True, + )[0] + assert extended_virial_corr2 is not None + extended_virial_corr = paddle.concat( + [ + extended_virial_corr0.unsqueeze(-1), + extended_virial_corr1.unsqueeze(-1), + extended_virial_corr2.unsqueeze(-1), + ], + axis=-1, + ) + return extended_virial_corr + + +def task_deriv_one( + atom_energy: paddle.Tensor, + energy: paddle.Tensor, + extended_coord: paddle.Tensor, + do_virial: bool = True, + do_atomic_virial: bool = False, + create_graph: bool = True, +): + # faked_grad = paddle.ones_like(energy) + # lst = paddle.jit.annotate(List[Optional[paddle.Tensor]], [faked_grad]) + extended_force = paddle.autograd.grad( + [energy], + [extended_coord], + # grad_outputs=lst, + create_graph=create_graph, + retain_graph=True, + )[0] + assert extended_force is not None + extended_force = -extended_force + if do_virial: + extended_virial = extended_force.unsqueeze(-1) @ extended_coord.unsqueeze(-2) + # the correction sums to zero, which does not contribute to global virial + if do_atomic_virial: + extended_virial_corr = atomic_virial_corr(extended_coord, atom_energy) + extended_virial = extended_virial + extended_virial_corr + # to [...,3,3] -> [...,9] + extended_virial = extended_virial.reshape( + [*list(extended_virial.shape[:-2]), 9] + ) + else: + extended_virial = None + return extended_force, extended_virial + + +def get_leading_dims( + vv: paddle.Tensor, + vdef: OutputVariableDef, +): + """Get the dimensions of nf x nloc.""" + vshape = vv.shape + return list(vshape[: (len(vshape) - len(vdef.shape))]) + + +def take_deriv( + vv: paddle.Tensor, + svv: paddle.Tensor, + vdef: OutputVariableDef, + coord_ext: paddle.Tensor, + do_virial: bool = False, + do_atomic_virial: bool = False, + create_graph: bool = True, +): + size = 1 + for ii in vdef.shape: + size *= ii + vv1 = vv.reshape(list(get_leading_dims(vv, vdef)) + [size]) # noqa: RUF005 + svv1 = svv.reshape(list(get_leading_dims(svv, vdef)) + [size]) # noqa: RUF005 + split_vv1 = paddle.split(vv1, [1] * size, axis=-1) + split_svv1 = paddle.split(svv1, [1] * size, axis=-1) + split_ff, split_avir = [], [] + for vvi, svvi in zip(split_vv1, split_svv1): + # nf x nloc x 3, nf x nloc x 9 + ffi, aviri = task_deriv_one( + vvi, + svvi, + coord_ext, + do_virial=do_virial, + do_atomic_virial=do_atomic_virial, + create_graph=create_graph, + ) + # nf x nloc x 1 x 3, nf x nloc x 1 x 9 + ffi = ffi.unsqueeze(-2) + split_ff.append(ffi) + if do_virial: + assert aviri is not None + aviri = aviri.unsqueeze(-2) + split_avir.append(aviri) + # nf x nall x v_dim x 3, nf x nall x v_dim x 9 + out_lead_shape = list(coord_ext.shape[:-1]) + vdef.shape + ff = paddle.concat(split_ff, axis=-2).reshape(out_lead_shape + [3]) # noqa: RUF005 + if do_virial: + avir = paddle.concat(split_avir, axis=-2).reshape(out_lead_shape + [9]) # noqa: RUF005 + else: + avir = None + return ff, avir + + +def fit_output_to_model_output( + fit_ret: dict[str, paddle.Tensor], + fit_output_def: FittingOutputDef, + coord_ext: paddle.Tensor, + do_atomic_virial: bool = False, + create_graph: bool = True, +) -> dict[str, paddle.Tensor]: + """Transform the output of the fitting network to + the model output. + + """ + redu_prec = env.GLOBAL_PD_ENER_FLOAT_PRECISION + model_ret = dict(fit_ret.items()) + for kk, vv in fit_ret.items(): + vdef = fit_output_def[kk] + shap = vdef.shape + atom_axis = -(len(shap) + 1) + if vdef.reducible: + kk_redu = get_reduce_name(kk) + if vdef.intensive: + model_ret[kk_redu] = paddle.mean(vv.astype(redu_prec), axis=atom_axis) + else: + model_ret[kk_redu] = paddle.sum(vv.astype(redu_prec), axis=atom_axis) + if vdef.r_differentiable: + kk_derv_r, kk_derv_c = get_deriv_name(kk) + dr, dc = take_deriv( + vv, + model_ret[kk_redu], + vdef, + coord_ext, + do_virial=vdef.c_differentiable, + do_atomic_virial=do_atomic_virial, + create_graph=create_graph, + ) + model_ret[kk_derv_r] = dr + if vdef.c_differentiable: + assert dc is not None + model_ret[kk_derv_c] = dc + model_ret[kk_derv_c + "_redu"] = paddle.sum( + model_ret[kk_derv_c].astype(redu_prec), axis=1 + ) + return model_ret + + +def communicate_extended_output( + model_ret: dict[str, paddle.Tensor], + model_output_def: ModelOutputDef, + mapping: paddle.Tensor, # nf x nloc + do_atomic_virial: bool = False, +) -> dict[str, paddle.Tensor]: + """Transform the output of the model network defined on + local and ghost (extended) atoms to local atoms. + + """ + redu_prec = env.GLOBAL_PD_ENER_FLOAT_PRECISION + new_ret = {} + for kk in model_output_def.keys_outp(): + vv = model_ret[kk] + vdef = model_output_def[kk] + new_ret[kk] = vv + if vdef.reducible: + kk_redu = get_reduce_name(kk) + new_ret[kk_redu] = model_ret[kk_redu] + # nf x nloc + vldims = get_leading_dims(vv, vdef) + # nf x nall + mldims = list(mapping.shape) + kk_derv_r, kk_derv_c = get_deriv_name(kk) + if vdef.r_differentiable: + # vdim x 3 + derv_r_ext_dims = list(vdef.shape) + [3] # noqa:RUF005 + mapping = mapping.reshape(mldims + [1] * len(derv_r_ext_dims)).expand( + [-1] * len(mldims) + derv_r_ext_dims + ) + force = paddle.zeros(vldims + derv_r_ext_dims, dtype=vv.dtype).to( + device=vv.place + ) + # nf x nloc x nvar x 3 + new_ret[kk_derv_r] = decomp.scatter_reduce( + force, + 1, + index=mapping, + src=model_ret[kk_derv_r], + reduce="sum", + ) + if vdef.c_differentiable: + assert vdef.r_differentiable + derv_c_ext_dims = list(vdef.shape) + [9] # noqa:RUF005 + # nf x nloc x nvar x 3 -> nf x nloc x nvar x 9 + mapping = paddle.tile( + mapping, + [1] * (len(mldims) + len(vdef.shape)) + [3], + ) + virial = paddle.zeros(vldims + derv_c_ext_dims, dtype=vv.dtype).to( + device=vv.place + ) + # nf x nloc x nvar x 9 + new_ret[kk_derv_c] = decomp.scatter_reduce( + virial, + 1, + index=mapping, + src=model_ret[kk_derv_c], + reduce="sum", + ) + new_ret[kk_derv_c + "_redu"] = paddle.sum( + new_ret[kk_derv_c].to(redu_prec), axis=1 + ) + if not do_atomic_virial: + # pop atomic virial, because it is not correctly calculated. + new_ret.pop(kk_derv_c) + return new_ret diff --git a/deepmd/pd/model/network/__init__.py b/deepmd/pd/model/network/__init__.py new file mode 100644 index 0000000000..6ceb116d85 --- /dev/null +++ b/deepmd/pd/model/network/__init__.py @@ -0,0 +1 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later diff --git a/deepmd/pd/model/network/init.py b/deepmd/pd/model/network/init.py new file mode 100644 index 0000000000..dbdad56794 --- /dev/null +++ b/deepmd/pd/model/network/init.py @@ -0,0 +1,458 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later + +# Copyright (c) 2024 The PyTorch Authors. All rights reserved. +# +# This file includes source code from PyTorch of version v2.3.0, which is released under the BSD-3-Clause license. +# For more information about PyTorch, visit https://pytorch.org/. + + +# These no_grad_* functions are necessary as wrappers around the parts of these +# functions that use `with paddle.no_grad()`. The JIT doesn't support context +# managers, so these need to be implemented as builtins. Using these wrappers +# lets us keep those builtins small and re-usable. + +from __future__ import ( + annotations, +) + +import math +import warnings + +import paddle +from paddle import ( + Tensor, +) + +PaddleGenerator = paddle.base.libpaddle.Generator + + +def _no_grad_uniform_(tensor: paddle.Tensor, a, b, generator=None): + with paddle.no_grad(): + return tensor.uniform_(a, b) + + +def _no_grad_normal_(tensor: paddle.Tensor, mean, std, generator=None): + with paddle.no_grad(): + return tensor.normal_(mean, std) + + +def _no_grad_trunc_normal_(tensor: paddle.Tensor, mean, std, a, b, generator=None): + # Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf + def norm_cdf(x): + # Computes standard normal cumulative distribution function + return (1.0 + math.erf(x / math.sqrt(2.0))) / 2.0 + + if (mean < a - 2 * std) or (mean > b + 2 * std): + warnings.warn( + "mean is more than 2 std from [a, b] in nn.init.trunc_normal_. " + "The distribution of values may be incorrect.", + stacklevel=2, + ) + + with paddle.no_grad(): + # Values are generated by using a truncated uniform distribution and + # then using the inverse CDF for the normal distribution. + # Get upper and lower cdf values + l = norm_cdf((a - mean) / std) + u = norm_cdf((b - mean) / std) + + # Uniformly fill tensor with values from [l, u], then translate to + # [2l-1, 2u-1]. + tensor.uniform_(2 * l - 1, 2 * u - 1) + + # Use inverse cdf transform for normal distribution to get truncated + # standard normal + tensor.erfinv_() + + # Transform to proper mean, std + tensor.multiply_(std * math.sqrt(2.0)) + tensor.add_(mean) + + # Clamp to ensure it's in the proper range + tensor.clip_(min=a, max=b) + return tensor + + +def _no_grad_zero_(tensor: paddle.Tensor): + with paddle.no_grad(): + return tensor.zero_() + + +def _no_grad_fill_(tensor: paddle.Tensor, val): + with paddle.no_grad(): + return tensor.fill_(val) + + +def calculate_gain(nonlinearity, param=None): + r"""Return the recommended gain value for the given nonlinearity function. + + The values are as follows: + + ================= ==================================================== + nonlinearity gain + ================= ==================================================== + Linear / Identity :math:`1` + Conv{1,2,3}D :math:`1` + Sigmoid :math:`1` + Tanh :math:`\frac{5}{3}` + ReLU :math:`\sqrt{2}` + Leaky Relu :math:`\sqrt{\frac{2}{1 + \text{negative\_slope}^2}}` + SELU :math:`\frac{3}{4}` + ================= ==================================================== + + .. warning:: + In order to implement `Self-Normalizing Neural Networks`_ , + you should use ``nonlinearity='linear'`` instead of ``nonlinearity='selu'``. + This gives the initial weights a variance of ``1 / N``, + which is necessary to induce a stable fixed point in the forward pass. + In contrast, the default gain for ``SELU`` sacrifices the normalization + effect for more stable gradient flow in rectangular layers. + + Args: + nonlinearity: the non-linear function (`nn.functional` name) + param: optional parameter for the non-linear function + + Examples + -------- + >>> gain = nn.init.calculate_gain( + ... "leaky_relu", 0.2 + ... ) # leaky_relu with negative_slope=0.2 + + .. _Self-Normalizing Neural Networks: https://papers.nips.cc/paper/2017/hash/5d44ee6f2c3f71b73125876103c8f6c4-Abstract.html + """ + linear_fns = [ + "linear", + "conv1d", + "conv2d", + "conv3d", + "conv_transpose1d", + "conv_transpose2d", + "conv_transpose3d", + ] + if nonlinearity in linear_fns or nonlinearity == "sigmoid": + return 1 + elif nonlinearity == "tanh": + return 5.0 / 3 + elif nonlinearity == "relu": + return math.sqrt(2.0) + elif nonlinearity == "leaky_relu": + if param is None: + negative_slope = 0.01 + elif ( + not isinstance(param, bool) + and isinstance(param, int) + or isinstance(param, float) + ): + # True/False are instances of int, hence check above + negative_slope = param + else: + raise ValueError(f"negative_slope {param} not a valid number") + return math.sqrt(2.0 / (1 + negative_slope**2)) + elif nonlinearity == "selu": + return ( + 3.0 / 4 + ) # Value found empirically (https://github.com/pytorch/pytorch/pull/50664) + else: + raise ValueError(f"Unsupported nonlinearity {nonlinearity}") + + +def _calculate_fan_in_and_fan_out(tensor, reverse=False): + dimensions = tensor.ndim + if dimensions < 2: + raise ValueError( + "Fan in and fan out can not be computed for tensor with fewer than 2 dimensions" + ) + + if reverse: + num_input_fmaps, num_output_fmaps = tensor.shape[0], tensor.shape[1] + else: + num_input_fmaps, num_output_fmaps = tensor.shape[1], tensor.shape[0] + + receptive_field_size = 1 + if tensor.ndim > 2: + for s in tensor.shape[2:]: + receptive_field_size *= s + fan_in = num_input_fmaps * receptive_field_size + fan_out = num_output_fmaps * receptive_field_size + + return fan_in, fan_out + + +def _calculate_correct_fan(tensor, mode, reverse=False): + mode = mode.lower() + valid_modes = ["fan_in", "fan_out"] + if mode not in valid_modes: + raise ValueError(f"Mode {mode} not supported, please use one of {valid_modes}") + + fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor, reverse) + return fan_in if mode == "fan_in" else fan_out + + +def zeros_(tensor: Tensor) -> Tensor: + r"""Fill the input Tensor with the scalar value `0`. + + Args: + tensor: an n-dimensional `paddle.Tensor` + + Examples + -------- + >>> w = paddle.empty(3, 5) + >>> nn.init.zeros_(w) + """ + return _no_grad_zero_(tensor) + + +def ones_(tensor: Tensor) -> Tensor: + r"""Fill the input Tensor with the scalar value `1`. + + Args: + tensor: an n-dimensional `paddle.Tensor` + + Examples + -------- + >>> w = paddle.empty(3, 5) + >>> nn.init.ones_(w) + """ + return _no_grad_fill_(tensor, 1.0) + + +def constant_(tensor: Tensor, val: float) -> Tensor: + r"""Fill the input Tensor with the value :math:`\text{val}`. + + Args: + tensor: an n-dimensional `paddle.Tensor` + val: the value to fill the tensor with + + Examples + -------- + >>> w = paddle.empty(3, 5) + >>> nn.init.constant_(w, 0.3) + """ + return _no_grad_fill_(tensor, val) + + +def normal_( + tensor: Tensor, + mean: float = 0.0, + std: float = 1.0, + generator: PaddleGenerator | None = None, +) -> Tensor: + r"""Fill the input Tensor with values drawn from the normal distribution. + + :math:`\mathcal{N}(\text{mean}, \text{std}^2)`. + + Args: + tensor: an n-dimensional `paddle.Tensor` + mean: the mean of the normal distribution + std: the standard deviation of the normal distribution + generator: the paddle Generator to sample from (default: None) + + Examples + -------- + >>> w = paddle.empty(3, 5) + >>> nn.init.normal_(w) + """ + return _no_grad_normal_(tensor, mean, std, generator) + + +def trunc_normal_( + tensor: Tensor, + mean: float = 0.0, + std: float = 1.0, + a: float = -2.0, + b: float = 2.0, + generator: PaddleGenerator | None = None, +) -> Tensor: + r"""Fill the input Tensor with values drawn from a truncated normal distribution. + + The values are effectively drawn from the + normal distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)` + with values outside :math:`[a, b]` redrawn until they are within + the bounds. The method used for generating the random values works + best when :math:`a \leq \text{mean} \leq b`. + + Args: + tensor: an n-dimensional `paddle.Tensor` + mean: the mean of the normal distribution + std: the standard deviation of the normal distribution + a: the minimum cutoff value + b: the maximum cutoff value + generator: the paddle Generator to sample from (default: None) + + Examples + -------- + >>> w = paddle.empty(3, 5) + >>> nn.init.trunc_normal_(w) + """ + return _no_grad_trunc_normal_(tensor, mean, std, a, b) + + +def kaiming_uniform_( + tensor: Tensor, + a: float = 0, + mode: str = "fan_in", + nonlinearity: str = "leaky_relu", + generator: PaddleGenerator | None = None, + reverse: bool = False, +): + r"""Fill the input `Tensor` with values using a Kaiming uniform distribution. + + The method is described in `Delving deep into rectifiers: Surpassing + human-level performance on ImageNet classification` - He, K. et al. (2015). + The resulting tensor will have values sampled from + :math:`\mathcal{U}(-\text{bound}, \text{bound})` where + + .. math:: + \text{bound} = \text{gain} \times \sqrt{\frac{3}{\text{fan\_mode}}} + + Also known as He initialization. + + Args: + tensor: an n-dimensional `paddle.Tensor` + a: the negative slope of the rectifier used after this layer (only + used with ``'leaky_relu'``) + mode: either ``'fan_in'`` (default) or ``'fan_out'``. Choosing ``'fan_in'`` + preserves the magnitude of the variance of the weights in the + forward pass. Choosing ``'fan_out'`` preserves the magnitudes in the + backwards pass. + nonlinearity: the non-linear function (`nn.functional` name), + recommended to use only with ``'relu'`` or ``'leaky_relu'`` (default). + generator: the paddle Generator to sample from (default: None) + reverse (bool, optional): Tensor data format order, False by default as + [fout, fin, ...].. Defaults to False. + + Examples + -------- + >>> w = paddle.empty(3, 5) + >>> nn.init.kaiming_uniform_(w, mode="fan_in", nonlinearity="relu") + """ + if 0 in tensor.shape: + warnings.warn("Initializing zero-element tensors is a no-op") + return tensor + fan = _calculate_correct_fan(tensor, mode, reverse) + gain = calculate_gain(nonlinearity, a) + std = gain / math.sqrt(fan) + bound = math.sqrt(3.0) * std # Calculate uniform bounds from standard deviation + with paddle.no_grad(): + return tensor.uniform_(-bound, bound) + + +def kaiming_normal_( + tensor: Tensor, + a: float = 0, + mode: str = "fan_in", + nonlinearity: str = "leaky_relu", + generator: PaddleGenerator | None = None, + reverse: bool = False, +): + r"""Fill the input `Tensor` with values using a Kaiming normal distribution. + + The method is described in `Delving deep into rectifiers: Surpassing + human-level performance on ImageNet classification` - He, K. et al. (2015). + The resulting tensor will have values sampled from + :math:`\mathcal{N}(0, \text{std}^2)` where + + .. math:: + \text{std} = \frac{\text{gain}}{\sqrt{\text{fan\_mode}}} + + Also known as He initialization. + + Args: + tensor: an n-dimensional `paddle.Tensor` + a: the negative slope of the rectifier used after this layer (only + used with ``'leaky_relu'``) + mode: either ``'fan_in'`` (default) or ``'fan_out'``. Choosing ``'fan_in'`` + preserves the magnitude of the variance of the weights in the + forward pass. Choosing ``'fan_out'`` preserves the magnitudes in the + backwards pass. + nonlinearity: the non-linear function (`nn.functional` name), + recommended to use only with ``'relu'`` or ``'leaky_relu'`` (default). + generator: the paddle Generator to sample from (default: None) + reverse (bool, optional): Tensor data format order, False by default as + [fout, fin, ...].. Defaults to False. + + Examples + -------- + >>> w = paddle.empty(3, 5) + >>> nn.init.kaiming_normal_(w, mode="fan_out", nonlinearity="relu") + """ + if 0 in tensor.shape: + warnings.warn("Initializing zero-element tensors is a no-op") + return tensor + fan = _calculate_correct_fan(tensor, mode, reverse) + gain = calculate_gain(nonlinearity, a) + std = gain / math.sqrt(fan) + with paddle.no_grad(): + return tensor.normal_(0, std) + + +def xavier_uniform_( + tensor: Tensor, + gain: float = 1.0, + generator: PaddleGenerator | None = None, + reverse: bool = False, +) -> Tensor: + r"""Fill the input `Tensor` with values using a Xavier uniform distribution. + + The method is described in `Understanding the difficulty of training + deep feedforward neural networks` - Glorot, X. & Bengio, Y. (2010). + The resulting tensor will have values sampled from + :math:`\mathcal{U}(-a, a)` where + + .. math:: + a = \text{gain} \times \sqrt{\frac{6}{\text{fan\_in} + \text{fan\_out}}} + + Also known as Glorot initialization. + + Args: + tensor: an n-dimensional `paddle.Tensor` + gain: an optional scaling factor + generator: the paddle Generator to sample from (default: None) + reverse (bool, optional): Tensor data format order, False by default as + [fout, fin, ...].. Defaults to False. + + Examples + -------- + >>> w = paddle.empty(3, 5) + >>> nn.init.xavier_uniform_(w, gain=nn.init.calculate_gain("relu")) + """ + fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor, reverse=reverse) + std = gain * math.sqrt(2.0 / float(fan_in + fan_out)) + a = math.sqrt(3.0) * std # Calculate uniform bounds from standard deviation + + return _no_grad_uniform_(tensor, -a, a, generator) + + +def xavier_normal_( + tensor: Tensor, + gain: float = 1.0, + generator: PaddleGenerator | None = None, + reverse: bool = False, +) -> Tensor: + r"""Fill the input `Tensor` with values using a Xavier normal distribution. + + The method is described in `Understanding the difficulty of training deep feedforward + neural networks` - Glorot, X. & Bengio, Y. (2010). The resulting tensor + will have values sampled from :math:`\mathcal{N}(0, \text{std}^2)` where + + .. math:: + \text{std} = \text{gain} \times \sqrt{\frac{2}{\text{fan\_in} + \text{fan\_out}}} + + Also known as Glorot initialization. + + Args: + tensor: an n-dimensional `paddle.Tensor` + gain: an optional scaling factor + generator: the paddle Generator to sample from (default: None) + reverse (bool, optional): Tensor data format order, False by + default as [fout, fin, ...]. Defaults to False. + + Examples + -------- + >>> w = paddle.empty(3, 5) + >>> nn.init.xavier_normal_(w) + """ + fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor, reverse=reverse) + std = gain * math.sqrt(2.0 / float(fan_in + fan_out)) + + return _no_grad_normal_(tensor, 0.0, std, generator) diff --git a/deepmd/pd/model/network/mlp.py b/deepmd/pd/model/network/mlp.py new file mode 100644 index 0000000000..370b0fa8fa --- /dev/null +++ b/deepmd/pd/model/network/mlp.py @@ -0,0 +1,328 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +from __future__ import ( + annotations, +) + +from typing import ( + ClassVar, +) + +import numpy as np +import paddle +import paddle.nn as nn + +from deepmd.pd.utils import ( + env, +) + +device = env.DEVICE + +from deepmd.dpmodel.utils import ( + NativeLayer, +) +from deepmd.dpmodel.utils import NetworkCollection as DPNetworkCollection +from deepmd.dpmodel.utils import ( + make_embedding_network, + make_fitting_network, + make_multilayer_network, +) +from deepmd.pd.model.network.init import ( + PaddleGenerator, + kaiming_normal_, + normal_, + trunc_normal_, + xavier_uniform_, +) +from deepmd.pd.utils.env import ( + DEFAULT_PRECISION, + PRECISION_DICT, +) +from deepmd.pd.utils.utils import ( + ActivationFn, + get_generator, + to_numpy_array, + to_paddle_tensor, +) + + +def empty_t(shape, precision): + return paddle.empty(shape, dtype=precision).to(device=device) + + +class Identity(nn.Layer): + def __init__(self): + super().__init__() + + def forward( + self, + xx: paddle.Tensor, + ) -> paddle.Tensor: + """The Identity operation layer.""" + return xx + + def serialize(self) -> dict: + return { + "@class": "Identity", + "@version": 1, + } + + @classmethod + def deserialize(cls, data: dict) -> Identity: + return Identity() + + +class MLPLayer(nn.Layer): + def __init__( + self, + num_in, + num_out, + bias: bool = True, + use_timestep: bool = False, + activation_function: str | None = None, + resnet: bool = False, + bavg: float = 0.0, + stddev: float = 1.0, + precision: str = DEFAULT_PRECISION, + init: str = "default", + seed: int | list[int] | None = None, + ): + super().__init__() + # only use_timestep when skip connection is established. + self.use_timestep = use_timestep and ( + num_out == num_in or num_out == num_in * 2 + ) + self.num_in = num_in + self.num_out = num_out + self.activate_name = activation_function + self.activate = ActivationFn(self.activate_name) + self.precision = precision + self.prec = PRECISION_DICT[self.precision] + self.matrix = self.create_parameter( + (num_in, num_out), + dtype=self.prec, + default_initializer=nn.initializer.Assign( + empty_t((num_in, num_out), self.prec) + ), + ) + random_generator = get_generator(seed) + if bias: + self.bias = self.create_parameter( + [num_out], + dtype=self.prec, + default_initializer=nn.initializer.Assign( + empty_t([num_out], self.prec) + ), + ) + else: + self.bias = None + if self.use_timestep: + self.idt = self.create_parameter( + [num_out], + dtype=self.prec, + default_initializer=nn.initializer.Assign( + empty_t([num_out], self.prec) + ), + ) + else: + self.idt = None + self.resnet = resnet + if init == "default": + self._default_normal_init( + bavg=bavg, stddev=stddev, generator=random_generator + ) + elif init == "trunc_normal": + self._trunc_normal_init(1.0, generator=random_generator) + elif init == "relu": + self._trunc_normal_init(2.0, generator=random_generator) + elif init == "glorot": + self._glorot_uniform_init(generator=random_generator) + elif init == "gating": + self._zero_init(self.use_bias) + elif init == "kaiming_normal": + self._normal_init(generator=random_generator) + elif init == "final": + self._zero_init(False) + else: + raise ValueError(f"Unknown initialization method: {init}") + + def check_type_consistency(self): + precision = self.precision + + def check_var(var): + if var is not None: + # assertion "float64" == "double" would fail + assert PRECISION_DICT[var.dtype.name] is PRECISION_DICT[precision] + + check_var(self.matrix) + check_var(self.bias) + check_var(self.idt) + + def dim_in(self) -> int: + return self.matrix.shape[0] + + def dim_out(self) -> int: + return self.matrix.shape[1] + + def _default_normal_init( + self, + bavg: float = 0.0, + stddev: float = 1.0, + generator: PaddleGenerator | None = None, + ): + normal_( + self.matrix.data, + std=stddev / np.sqrt(self.num_out + self.num_in), + generator=generator, + ) + if self.bias is not None: + normal_(self.bias.data, mean=bavg, std=stddev, generator=generator) + if self.idt is not None: + normal_(self.idt.data, mean=0.1, std=0.001, generator=generator) + + def _trunc_normal_init(self, scale=1.0, generator: PaddleGenerator | None = None): + # Constant from scipy.stats.truncnorm.std(a=-2, b=2, loc=0., scale=1.) + TRUNCATED_NORMAL_STDDEV_FACTOR = 0.87962566103423978 + _, fan_in = self.matrix.shape + scale = scale / max(1, fan_in) + std = (scale**0.5) / TRUNCATED_NORMAL_STDDEV_FACTOR + trunc_normal_(self.matrix, mean=0.0, std=std, generator=generator) + + def _glorot_uniform_init(self, generator: PaddleGenerator | None = None): + xavier_uniform_(self.matrix, gain=1, generator=generator) + + def _zero_init(self, use_bias=True): + with paddle.no_grad(): + self.matrix.fill_(0.0) + if use_bias and self.bias is not None: + with paddle.no_grad(): + self.bias.fill_(1.0) + + def _normal_init(self, generator: PaddleGenerator | None = None): + kaiming_normal_(self.matrix, nonlinearity="linear", generator=generator) + + def forward( + self, + xx: paddle.Tensor, + ) -> paddle.Tensor: + """One MLP layer used by DP model. + + Parameters + ---------- + xx : paddle.Tensor + The input. + + Returns + ------- + yy: paddle.Tensor + The output. + """ + ori_prec = xx.dtype + xx = xx.astype(self.prec) + yy = ( + paddle.matmul(xx, self.matrix) + self.bias + if self.bias is not None + else paddle.matmul(xx, self.matrix) + ) + yy = self.activate(yy).clone() + yy = yy * self.idt if self.idt is not None else yy + if self.resnet: + if xx.shape[-1] == yy.shape[-1]: + yy += xx + elif 2 * xx.shape[-1] == yy.shape[-1]: + yy += paddle.concat([xx, xx], axis=-1) + # else: + # yy = yy + yy = yy.astype(ori_prec) + return yy + + def serialize(self) -> dict: + """Serialize the layer to a dict. + + Returns + ------- + dict + The serialized layer. + """ + nl = NativeLayer( + self.matrix.shape[0], + self.matrix.shape[1], + bias=self.bias is not None, + use_timestep=self.idt is not None, + activation_function=self.activate_name, + resnet=self.resnet, + precision=self.precision, + ) + nl.w, nl.b, nl.idt = ( + to_numpy_array(self.matrix), + to_numpy_array(self.bias), + to_numpy_array(self.idt), + ) + return nl.serialize() + + @classmethod + def deserialize(cls, data: dict) -> MLPLayer: + """Deserialize the layer from a dict. + + Parameters + ---------- + data : dict + The dict to deserialize from. + """ + nl = NativeLayer.deserialize(data) + obj = cls( + nl["matrix"].shape[0], + nl["matrix"].shape[1], + bias=nl["bias"] is not None, + use_timestep=nl["idt"] is not None, + activation_function=nl["activation_function"], + resnet=nl["resnet"], + precision=nl["precision"], + ) + prec = PRECISION_DICT[obj.precision] + + def check_load_param(ss): + if nl[ss] is not None: + tensor = to_paddle_tensor(nl[ss]) + return paddle.create_parameter( + tensor.shape, + dtype=tensor.dtype, + default_initializer=nn.initializer.Assign(tensor), + ) + return None + + obj.matrix = check_load_param("matrix") + obj.bias = check_load_param("bias") + obj.idt = check_load_param("idt") + return obj + + +MLP_ = make_multilayer_network(MLPLayer, nn.Layer) + + +class MLP(MLP_): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.layers = paddle.nn.LayerList(self.layers) + + forward = MLP_.call + + +EmbeddingNet = make_embedding_network(MLP, MLPLayer) + +FittingNet = make_fitting_network(EmbeddingNet, MLP, MLPLayer) + + +class NetworkCollection(DPNetworkCollection, nn.Layer): + """Paddle implementation of NetworkCollection.""" + + NETWORK_TYPE_MAP: ClassVar[dict[str, type]] = { + "network": MLP, + "embedding_network": EmbeddingNet, + "fitting_network": FittingNet, + } + + def __init__(self, *args, **kwargs): + # init both two base classes + DPNetworkCollection.__init__(self, *args, **kwargs) + nn.Layer.__init__(self) + self.networks = self._networks = paddle.nn.LayerList(self._networks) diff --git a/deepmd/pd/model/network/network.py b/deepmd/pd/model/network/network.py new file mode 100644 index 0000000000..f118c234ab --- /dev/null +++ b/deepmd/pd/model/network/network.py @@ -0,0 +1,325 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +from typing import ( + Optional, + Union, +) + +import paddle +import paddle.nn as nn + +from deepmd.dpmodel.utils.type_embed import ( + get_econf_tebd, +) +from deepmd.pd.model.network.mlp import ( + EmbeddingNet, +) +from deepmd.pd.utils import ( + env, +) +from deepmd.pd.utils.utils import ( + to_paddle_tensor, +) +from deepmd.utils.finetune import ( + get_index_between_two_maps, +) +from deepmd.utils.version import ( + check_version_compatibility, +) + + +def Tensor(*shape): + return paddle.empty(shape, dtype=env.GLOBAL_PD_FLOAT_PRECISION).to( + device=env.DEVICE + ) + + +class TypeEmbedNet(nn.Layer): + def __init__( + self, + type_nums, + embed_dim, + bavg=0.0, + stddev=1.0, + precision="default", + seed: Optional[Union[int, list[int]]] = None, + use_econf_tebd=False, + use_tebd_bias: bool = False, + type_map=None, + ): + """Construct a type embedding net.""" + super().__init__() + self.type_nums = type_nums + self.embed_dim = embed_dim + self.bavg = bavg + self.stddev = stddev + self.use_econf_tebd = use_econf_tebd + self.use_tebd_bias = use_tebd_bias + self.type_map = type_map + self.embedding = TypeEmbedNetConsistent( + ntypes=self.type_nums, + neuron=[self.embed_dim], + padding=True, + activation_function="Linear", + use_econf_tebd=use_econf_tebd, + use_tebd_bias=use_tebd_bias, + type_map=type_map, + precision=precision, + seed=seed, + ) + # init.normal_(self.embedding.weight[:-1], mean=bavg, std=stddev) + + def forward(self, atype): + """ + Args: + atype: Type of each input, [nframes, nloc] or [nframes, nloc, nnei]. + + Returns + ------- + type_embedding: + + """ + return self.embedding(atype.place)[atype] + + def share_params(self, base_class, shared_level, resume=False): + """ + Share the parameters of self to the base_class with shared_level during multitask training. + If not start from checkpoint (resume is False), + some seperated parameters (e.g. mean and stddev) will be re-calculated across different classes. + """ + assert ( + self.__class__ == base_class.__class__ + ), "Only TypeEmbedNet of the same type can share params!" + if shared_level == 0: + # the following will successfully link all the params except buffers, which need manually link. + for item in self._sub_layers: + self._sub_layers[item] = base_class._sub_layers[item] + else: + raise NotImplementedError + + def change_type_map( + self, type_map: list[str], model_with_new_type_stat=None + ) -> None: + """Change the type related params to new ones, according to `type_map` and the original one in the model. + If there are new types in `type_map`, statistics will be updated accordingly to `model_with_new_type_stat` for these new types. + """ + self.embedding.change_type_map(type_map=type_map) + + +class TypeEmbedNetConsistent(nn.Layer): + r"""Type embedding network that is consistent with other backends. + + Parameters + ---------- + ntypes : int + Number of atom types + neuron : list[int] + Number of neurons in each hidden layers of the embedding net + resnet_dt + Time-step `dt` in the resnet construction: y = x + dt * \phi (Wx + b) + activation_function + The activation function in the embedding net. Supported options are |ACTIVATION_FN| + precision + The precision of the embedding net parameters. Supported options are |PRECISION| + trainable + If the weights of embedding net are trainable. + seed + Random seed for initializing the network parameters. + padding + Concat the zero padding to the output, as the default embedding of empty type. + use_econf_tebd: bool, Optional + Whether to use electronic configuration type embedding. + use_tebd_bias : bool, Optional + Whether to use bias in the type embedding layer. + type_map: list[str], Optional + A list of strings. Give the name to each type of atoms. + """ + + def __init__( + self, + *, + ntypes: int, + neuron: list[int], + resnet_dt: bool = False, + activation_function: str = "tanh", + precision: str = "default", + trainable: bool = True, + seed: Optional[Union[int, list[int]]] = None, + padding: bool = False, + use_econf_tebd: bool = False, + use_tebd_bias: bool = False, + type_map: Optional[list[str]] = None, + ): + """Construct a type embedding net.""" + super().__init__() + self.ntypes = ntypes + self.neuron = neuron + self.seed = seed + self.resnet_dt = resnet_dt + self.precision = precision + self.prec = env.PRECISION_DICT[self.precision] + self.activation_function = str(activation_function) + self.trainable = trainable + self.padding = padding + self.use_econf_tebd = use_econf_tebd + self.use_tebd_bias = use_tebd_bias + self.type_map = type_map + self.econf_tebd = None + embed_input_dim = ntypes + if self.use_econf_tebd: + econf_tebd, embed_input_dim = get_econf_tebd( + self.type_map, precision=self.precision + ) + self.econf_tebd = to_paddle_tensor(econf_tebd) + self.embedding_net = EmbeddingNet( + embed_input_dim, + self.neuron, + self.activation_function, + self.resnet_dt, + self.precision, + self.seed, + bias=self.use_tebd_bias, + ) + for param in self.parameters(): + param.stop_gradient = not trainable + + def forward(self, device: str): + """Caulate type embedding network. + + Returns + ------- + type_embedding: paddle.Tensor + Type embedding network. + """ + if not self.use_econf_tebd: + embed = self.embedding_net( + paddle.eye(self.ntypes, dtype=self.prec).to(device=device) + ) + else: + assert self.econf_tebd is not None + embed = self.embedding_net(self.econf_tebd.to(device)) + if self.padding: + embed = paddle.concat( + [ + embed, + paddle.zeros([1, embed.shape[1]], dtype=self.prec).to( + device=device + ), + ] + ) + return embed + + def change_type_map( + self, type_map: list[str], model_with_new_type_stat=None + ) -> None: + """Change the type related params to new ones, according to `type_map` and the original one in the model. + If there are new types in `type_map`, statistics will be updated accordingly to `model_with_new_type_stat` for these new types. + """ + assert ( + self.type_map is not None + ), "'type_map' must be defined when performing type changing!" + remap_index, has_new_type = get_index_between_two_maps(self.type_map, type_map) + if not self.use_econf_tebd: + do_resnet = self.neuron[0] in [ + self.ntypes, + self.ntypes * 2, + len(type_map), + len(type_map) * 2, + ] + assert ( + not do_resnet or self.activation_function == "Linear" + ), "'activation_function' must be 'Linear' when performing type changing on resnet structure!" + first_layer_matrix = self.embedding_net.layers[0].matrix + eye_vector = paddle.eye(self.ntypes, dtype=self.prec).to( + device=first_layer_matrix.place + ) + # preprocess for resnet connection + if self.neuron[0] == self.ntypes: + first_layer_matrix += eye_vector + elif self.neuron[0] == self.ntypes * 2: + first_layer_matrix += paddle.concat([eye_vector, eye_vector], axis=-1) + + # randomly initialize params for the unseen types + if has_new_type: + extend_type_params = paddle.rand( + [len(type_map), first_layer_matrix.shape[-1]], + dtype=first_layer_matrix.dtype, + ).to(device=first_layer_matrix.place) + first_layer_matrix = paddle.concat( + [first_layer_matrix, extend_type_params], axis=0 + ) + + first_layer_matrix = first_layer_matrix[remap_index] + new_ntypes = len(type_map) + eye_vector = paddle.eye(new_ntypes, dtype=self.prec).to( + device=first_layer_matrix.place + ) + + if self.neuron[0] == new_ntypes: + first_layer_matrix -= eye_vector + elif self.neuron[0] == new_ntypes * 2: + first_layer_matrix -= paddle.concat([eye_vector, eye_vector], axis=-1) + + self.embedding_net.layers[0].num_in = new_ntypes + self.embedding_net.layers[0].matrix = self.create_parameter( + first_layer_matrix.shape, + dtype=first_layer_matrix.dtype, + default_initializer=nn.initializer.Assign(first_layer_matrix), + ) + else: + econf_tebd, embed_input_dim = get_econf_tebd( + type_map, precision=self.precision + ) + self.econf_tebd = to_paddle_tensor(econf_tebd) + self.type_map = type_map + self.ntypes = len(type_map) + + @classmethod + def deserialize(cls, data: dict): + """Deserialize the model. + + Parameters + ---------- + data : dict + The serialized data + + Returns + ------- + TypeEmbedNetConsistent + The deserialized model + """ + data = data.copy() + check_version_compatibility(data.pop("@version", 1), 2, 1) + data_cls = data.pop("@class") + assert data_cls == "TypeEmbedNet", f"Invalid class {data_cls}" + + embedding_net = EmbeddingNet.deserialize(data.pop("embedding")) + # compat with version 1 + if "use_tebd_bias" not in data: + data["use_tebd_bias"] = True + type_embedding_net = cls(**data) + type_embedding_net.embedding_net = embedding_net + return type_embedding_net + + def serialize(self) -> dict: + """Serialize the model. + + Returns + ------- + dict + The serialized data + """ + return { + "@class": "TypeEmbedNet", + "@version": 2, + "ntypes": self.ntypes, + "neuron": self.neuron, + "resnet_dt": self.resnet_dt, + "precision": self.precision, + "activation_function": self.activation_function, + "trainable": self.trainable, + "padding": self.padding, + "use_econf_tebd": self.use_econf_tebd, + "use_tebd_bias": self.use_tebd_bias, + "type_map": self.type_map, + "embedding": self.embedding_net.serialize(), + } diff --git a/deepmd/pd/model/task/__init__.py b/deepmd/pd/model/task/__init__.py new file mode 100644 index 0000000000..ad616156c7 --- /dev/null +++ b/deepmd/pd/model/task/__init__.py @@ -0,0 +1,16 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +from .base_fitting import ( + BaseFitting, +) +from .ener import ( + EnergyFittingNet, +) +from .fitting import ( + Fitting, +) + +__all__ = [ + "EnergyFittingNet", + "Fitting", + "BaseFitting", +] diff --git a/deepmd/pd/model/task/base_fitting.py b/deepmd/pd/model/task/base_fitting.py new file mode 100644 index 0000000000..9ad3b801cd --- /dev/null +++ b/deepmd/pd/model/task/base_fitting.py @@ -0,0 +1,8 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import paddle + +from deepmd.dpmodel.fitting import ( + make_base_fitting, +) + +BaseFitting = make_base_fitting(paddle.Tensor, fwd_method_name="forward") diff --git a/deepmd/pd/model/task/ener.py b/deepmd/pd/model/task/ener.py new file mode 100644 index 0000000000..ed0cfac69d --- /dev/null +++ b/deepmd/pd/model/task/ener.py @@ -0,0 +1,85 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import copy +import logging +from typing import ( + Optional, + Union, +) + +import paddle + +from deepmd.pd.model.task.fitting import ( + Fitting, + GeneralFitting, +) +from deepmd.pd.model.task.invar_fitting import ( + InvarFitting, +) +from deepmd.pd.utils import ( + env, +) +from deepmd.pd.utils.env import ( + DEFAULT_PRECISION, +) +from deepmd.utils.version import ( + check_version_compatibility, +) + +dtype = env.GLOBAL_PD_FLOAT_PRECISION +device = env.DEVICE + +log = logging.getLogger(__name__) + + +@Fitting.register("ener") +class EnergyFittingNet(InvarFitting): + def __init__( + self, + ntypes: int, + dim_descrpt: int, + neuron: list[int] = [128, 128, 128], + bias_atom_e: Optional[paddle.Tensor] = None, + resnet_dt: bool = True, + numb_fparam: int = 0, + numb_aparam: int = 0, + activation_function: str = "tanh", + precision: str = DEFAULT_PRECISION, + mixed_types: bool = True, + seed: Optional[Union[int, list[int]]] = None, + type_map: Optional[list[str]] = None, + **kwargs, + ): + super().__init__( + "energy", + ntypes, + dim_descrpt, + 1, + neuron=neuron, + bias_atom_e=bias_atom_e, + resnet_dt=resnet_dt, + numb_fparam=numb_fparam, + numb_aparam=numb_aparam, + activation_function=activation_function, + precision=precision, + mixed_types=mixed_types, + seed=seed, + type_map=type_map, + **kwargs, + ) + + @classmethod + def deserialize(cls, data: dict) -> "GeneralFitting": + data = copy.deepcopy(data) + check_version_compatibility(data.pop("@version", 1), 2, 1) + data.pop("var_name") + data.pop("dim_out") + return super().deserialize(data) + + def serialize(self) -> dict: + """Serialize the fitting to dict.""" + return { + **super().serialize(), + "type": "ener", + } + + exclude_types: list[int] diff --git a/deepmd/pd/model/task/fitting.py b/deepmd/pd/model/task/fitting.py new file mode 100644 index 0000000000..9008ef8af3 --- /dev/null +++ b/deepmd/pd/model/task/fitting.py @@ -0,0 +1,506 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import copy +import logging +from abc import ( + abstractmethod, +) +from typing import ( + Optional, + Union, +) + +import numpy as np +import paddle + +from deepmd.dpmodel.utils.seed import ( + child_seed, +) +from deepmd.pd.model.network.mlp import ( + FittingNet, + NetworkCollection, +) +from deepmd.pd.model.task.base_fitting import ( + BaseFitting, +) +from deepmd.pd.utils import ( + env, +) +from deepmd.pd.utils.env import ( + DEFAULT_PRECISION, + PRECISION_DICT, +) +from deepmd.pd.utils.exclude_mask import ( + AtomExcludeMask, +) +from deepmd.pd.utils.utils import ( + to_numpy_array, + to_paddle_tensor, +) +from deepmd.utils.finetune import ( + get_index_between_two_maps, + map_atom_exclude_types, +) + +dtype = env.GLOBAL_PD_FLOAT_PRECISION +device = env.DEVICE + +log = logging.getLogger(__name__) + + +class Fitting(paddle.nn.Layer, BaseFitting): + # plugin moved to BaseFitting + + def __new__(cls, *args, **kwargs): + if cls is Fitting: + return BaseFitting.__new__(BaseFitting, *args, **kwargs) + return super().__new__(cls) + + def share_params(self, base_class, shared_level, resume=False): + """ + Share the parameters of self to the base_class with shared_level during multitask training. + If not start from checkpoint (resume is False), + some separated parameters (e.g. mean and stddev) will be re-calculated across different classes. + """ + assert ( + self.__class__ == base_class.__class__ + ), "Only fitting nets of the same type can share params!" + if shared_level == 0: + # link buffers + if hasattr(self, "bias_atom_e"): + self.bias_atom_e = base_class.bias_atom_e + # the following will successfully link all the params except buffers, which need manually link. + for item in self._sub_layers: + self._sub_layers[item] = base_class._sub_layers[item] + elif shared_level == 1: + # only not share the bias_atom_e + # the following will successfully link all the params except buffers, which need manually link. + for item in self._sub_layers: + self._sub_layers[item] = base_class._sub_layers[item] + else: + raise NotImplementedError + + +class GeneralFitting(Fitting): + """Construct a general fitting net. + + Parameters + ---------- + var_name : str + The atomic property to fit, 'energy', 'dipole', and 'polar'. + ntypes : int + Element count. + dim_descrpt : int + Embedding width per atom. + dim_out : int + The output dimension of the fitting net. + neuron : list[int] + Number of neurons in each hidden layers of the fitting net. + bias_atom_e : paddle.Tensor, optional + Average energy per atom for each element. + resnet_dt : bool + Using time-step in the ResNet construction. + numb_fparam : int + Number of frame parameters. + numb_aparam : int + Number of atomic parameters. + activation_function : str + Activation function. + precision : str + Numerical precision. + mixed_types : bool + If true, use a uniform fitting net for all atom types, otherwise use + different fitting nets for different atom types. + rcond : float, optional + The condition number for the regression of atomic energy. + seed : int, optional + Random seed. + exclude_types: list[int] + Atomic contributions of the excluded atom types are set zero. + trainable : Union[list[bool], bool] + If the parameters in the fitting net are trainable. + Now this only supports setting all the parameters in the fitting net at one state. + When in list[bool], the trainable will be True only if all the boolean parameters are True. + remove_vaccum_contribution: list[bool], optional + Remove vacuum contribution before the bias is added. The list assigned each + type. For `mixed_types` provide `[True]`, otherwise it should be a list of the same + length as `ntypes` signaling if or not removing the vacuum contribution for the atom types in the list. + type_map: list[str], Optional + A list of strings. Give the name to each type of atoms. + use_aparam_as_mask: bool + If True, the aparam will not be used in fitting net for embedding. + """ + + def __init__( + self, + var_name: str, + ntypes: int, + dim_descrpt: int, + neuron: list[int] = [128, 128, 128], + bias_atom_e: Optional[paddle.Tensor] = None, + resnet_dt: bool = True, + numb_fparam: int = 0, + numb_aparam: int = 0, + activation_function: str = "tanh", + precision: str = DEFAULT_PRECISION, + mixed_types: bool = True, + rcond: Optional[float] = None, + seed: Optional[Union[int, list[int]]] = None, + exclude_types: list[int] = [], + trainable: Union[bool, list[bool]] = True, + remove_vaccum_contribution: Optional[list[bool]] = None, + type_map: Optional[list[str]] = None, + use_aparam_as_mask: bool = False, + **kwargs, + ): + super().__init__() + self.var_name = var_name + self.ntypes = ntypes + self.dim_descrpt = dim_descrpt + self.neuron = neuron + self.mixed_types = mixed_types + self.resnet_dt = resnet_dt + self.numb_fparam = numb_fparam + self.numb_aparam = numb_aparam + self.activation_function = activation_function + self.precision = precision + self.prec = PRECISION_DICT[self.precision] + self.rcond = rcond + self.seed = seed + self.type_map = type_map + self.use_aparam_as_mask = use_aparam_as_mask + # order matters, should be place after the assignment of ntypes + self.reinit_exclude(exclude_types) + self.trainable = trainable + # need support for each layer settings + self.trainable = ( + all(self.trainable) if isinstance(self.trainable, list) else self.trainable + ) + self.remove_vaccum_contribution = remove_vaccum_contribution + + net_dim_out = self._net_out_dim() + # init constants + if bias_atom_e is None: + bias_atom_e = np.zeros([self.ntypes, net_dim_out], dtype=np.float64) + bias_atom_e = paddle.to_tensor(bias_atom_e, dtype=self.prec).to(device=device) + bias_atom_e = bias_atom_e.reshape([self.ntypes, net_dim_out]) + if not self.mixed_types: + assert self.ntypes == bias_atom_e.shape[0], "Element count mismatches!" + self.register_buffer("bias_atom_e", bias_atom_e) + + if self.numb_fparam > 0: + self.register_buffer( + "fparam_avg", + paddle.zeros([self.numb_fparam], dtype=self.prec).to(device=device), + ) + self.register_buffer( + "fparam_inv_std", + paddle.ones([self.numb_fparam], dtype=self.prec).to(device=device), + ) + else: + self.fparam_avg, self.fparam_inv_std = None, None + if self.numb_aparam > 0: + self.register_buffer( + "aparam_avg", + paddle.zeros([self.numb_aparam], dtype=self.prec).to(device=device), + ) + self.register_buffer( + "aparam_inv_std", + paddle.ones([self.numb_aparam], dtype=self.prec).to(device=device), + ) + else: + self.aparam_avg, self.aparam_inv_std = None, None + + in_dim = ( + self.dim_descrpt + + self.numb_fparam + + (0 if self.use_aparam_as_mask else self.numb_aparam) + ) + + self.filter_layers = NetworkCollection( + 1 if not self.mixed_types else 0, + self.ntypes, + network_type="fitting_network", + networks=[ + FittingNet( + in_dim, + net_dim_out, + self.neuron, + self.activation_function, + self.resnet_dt, + self.precision, + bias_out=True, + seed=child_seed(self.seed, ii), + ) + for ii in range(self.ntypes if not self.mixed_types else 1) + ], + ) + # set trainable + for param in self.parameters(): + param.stop_gradient = not self.trainable + + def reinit_exclude( + self, + exclude_types: list[int] = [], + ): + self.exclude_types = exclude_types + self.emask = AtomExcludeMask(self.ntypes, self.exclude_types) + + def change_type_map( + self, type_map: list[str], model_with_new_type_stat=None + ) -> None: + """Change the type related params to new ones, according to `type_map` and the original one in the model. + If there are new types in `type_map`, statistics will be updated accordingly to `model_with_new_type_stat` for these new types. + """ + assert ( + self.type_map is not None + ), "'type_map' must be defined when performing type changing!" + assert self.mixed_types, "Only models in mixed types can perform type changing!" + remap_index, has_new_type = get_index_between_two_maps(self.type_map, type_map) + self.type_map = type_map + self.ntypes = len(type_map) + self.reinit_exclude(map_atom_exclude_types(self.exclude_types, remap_index)) + if has_new_type: + extend_shape = [len(type_map), *list(self.bias_atom_e.shape[1:])] + extend_bias_atom_e = paddle.zeros( + extend_shape, + dtype=self.bias_atom_e.dtype, + ).to(device=self.bias_atom_e.place) + self.bias_atom_e = paddle.concat( + [self.bias_atom_e, extend_bias_atom_e], axis=0 + ) + self.bias_atom_e = self.bias_atom_e[remap_index] + + def serialize(self) -> dict: + """Serialize the fitting to dict.""" + return { + "@class": "Fitting", + "@version": 2, + "var_name": self.var_name, + "ntypes": self.ntypes, + "dim_descrpt": self.dim_descrpt, + "neuron": self.neuron, + "resnet_dt": self.resnet_dt, + "numb_fparam": self.numb_fparam, + "numb_aparam": self.numb_aparam, + "activation_function": self.activation_function, + "precision": self.precision, + "mixed_types": self.mixed_types, + "nets": self.filter_layers.serialize(), + "rcond": self.rcond, + "exclude_types": self.exclude_types, + "@variables": { + "bias_atom_e": to_numpy_array(self.bias_atom_e), + "fparam_avg": to_numpy_array(self.fparam_avg), + "fparam_inv_std": to_numpy_array(self.fparam_inv_std), + "aparam_avg": to_numpy_array(self.aparam_avg), + "aparam_inv_std": to_numpy_array(self.aparam_inv_std), + }, + "type_map": self.type_map, + # "tot_ener_zero": self.tot_ener_zero , + # "trainable": self.trainable , + # "atom_ener": self.atom_ener , + # "layer_name": self.layer_name , + # "spin": self.spin , + ## NOTICE: not supported by far + "tot_ener_zero": False, + "trainable": [self.trainable] * (len(self.neuron) + 1), + "layer_name": None, + "use_aparam_as_mask": self.use_aparam_as_mask, + "spin": None, + } + + @classmethod + def deserialize(cls, data: dict) -> "GeneralFitting": + data = copy.deepcopy(data) + variables = data.pop("@variables") + nets = data.pop("nets") + obj = cls(**data) + for kk in variables.keys(): + obj[kk] = to_paddle_tensor(variables[kk]) + obj.filter_layers = NetworkCollection.deserialize(nets) + return obj + + def get_dim_fparam(self) -> int: + """Get the number (dimension) of frame parameters of this atomic model.""" + return self.numb_fparam + + def get_dim_aparam(self) -> int: + """Get the number (dimension) of atomic parameters of this atomic model.""" + return self.numb_aparam + + # make jit happy + exclude_types: list[int] + + def get_sel_type(self) -> list[int]: + """Get the selected atom types of this model. + + Only atoms with selected atom types have atomic contribution + to the result of the model. + If returning an empty list, all atom types are selected. + """ + # make jit happy + sel_type: list[int] = [] + for ii in range(self.ntypes): + if ii not in self.exclude_types: + sel_type.append(ii) + return sel_type + + def get_type_map(self) -> list[str]: + """Get the name to each type of atoms.""" + return self.type_map + + def __setitem__(self, key, value): + if key in ["bias_atom_e"]: + value = value.reshape([self.ntypes, self._net_out_dim()]) + self.bias_atom_e = value + elif key in ["fparam_avg"]: + self.fparam_avg = value + elif key in ["fparam_inv_std"]: + self.fparam_inv_std = value + elif key in ["aparam_avg"]: + self.aparam_avg = value + elif key in ["aparam_inv_std"]: + self.aparam_inv_std = value + elif key in ["scale"]: + self.scale = value + else: + raise KeyError(key) + + def __getitem__(self, key): + if key in ["bias_atom_e"]: + return self.bias_atom_e + elif key in ["fparam_avg"]: + return self.fparam_avg + elif key in ["fparam_inv_std"]: + return self.fparam_inv_std + elif key in ["aparam_avg"]: + return self.aparam_avg + elif key in ["aparam_inv_std"]: + return self.aparam_inv_std + elif key in ["scale"]: + return self.scale + else: + raise KeyError(key) + + @abstractmethod + def _net_out_dim(self): + """Set the FittingNet output dim.""" + pass + + def _extend_f_avg_std(self, xx: paddle.Tensor, nb: int) -> paddle.Tensor: + return paddle.tile(xx.reshape([1, self.numb_fparam]), [nb, 1]) + + def _extend_a_avg_std(self, xx: paddle.Tensor, nb: int, nloc: int) -> paddle.Tensor: + return paddle.tile(xx.reshape([1, 1, self.numb_aparam]), [nb, nloc, 1]) + + def _forward_common( + self, + descriptor: paddle.Tensor, + atype: paddle.Tensor, + gr: Optional[paddle.Tensor] = None, + g2: Optional[paddle.Tensor] = None, + h2: Optional[paddle.Tensor] = None, + fparam: Optional[paddle.Tensor] = None, + aparam: Optional[paddle.Tensor] = None, + ): + xx = descriptor + if self.remove_vaccum_contribution is not None: + # TODO: compute the input for vaccm when remove_vaccum_contribution is set + # Ideally, the input for vacuum should be computed; + # we consider it as always zero for convenience. + # Needs a compute_input_stats for vacuum passed from the + # descriptor. + xx_zeros = paddle.zeros_like(xx) + else: + xx_zeros = None + nf, nloc, nd = xx.shape + net_dim_out = self._net_out_dim() + + if nd != self.dim_descrpt: + raise ValueError( + f"get an input descriptor of dim {nd}," + f"which is not consistent with {self.dim_descrpt}." + ) + # check fparam dim, concate to input descriptor + if self.numb_fparam > 0: + assert fparam is not None, "fparam should not be None" + assert self.fparam_avg is not None + assert self.fparam_inv_std is not None + if fparam.shape[-1] != self.numb_fparam: + raise ValueError( + "get an input fparam of dim {fparam.shape[-1]}, ", + "which is not consistent with {self.numb_fparam}.", + ) + fparam = fparam.reshape([nf, self.numb_fparam]) + nb, _ = fparam.shape + t_fparam_avg = self._extend_f_avg_std(self.fparam_avg, nb) + t_fparam_inv_std = self._extend_f_avg_std(self.fparam_inv_std, nb) + fparam = (fparam - t_fparam_avg) * t_fparam_inv_std + fparam = paddle.tile(fparam.reshape([nf, 1, -1]), [1, nloc, 1]) + xx = paddle.concat( + [xx, fparam], + axis=-1, + ) + if xx_zeros is not None: + xx_zeros = paddle.concat( + [xx_zeros, fparam], + axis=-1, + ) + # check aparam dim, concate to input descriptor + if self.numb_aparam > 0 and not self.use_aparam_as_mask: + assert aparam is not None, "aparam should not be None" + assert self.aparam_avg is not None + assert self.aparam_inv_std is not None + if aparam.shape[-1] != self.numb_aparam: + raise ValueError( + f"get an input aparam of dim {aparam.shape[-1]}, ", + f"which is not consistent with {self.numb_aparam}.", + ) + aparam = aparam.reshape([nf, -1, self.numb_aparam]) + nb, nloc, _ = aparam.shape + t_aparam_avg = self._extend_a_avg_std(self.aparam_avg, nb, nloc) + t_aparam_inv_std = self._extend_a_avg_std(self.aparam_inv_std, nb, nloc) + aparam = (aparam - t_aparam_avg) * t_aparam_inv_std + xx = paddle.concat( + [xx, aparam], + axis=-1, + ) + if xx_zeros is not None: + xx_zeros = paddle.concat( + [xx_zeros, aparam], + axis=-1, + ) + + outs = paddle.zeros( + (nf, nloc, net_dim_out), + dtype=env.GLOBAL_PD_FLOAT_PRECISION, + ).to(device=descriptor.place) # jit assertion + if self.mixed_types: + atom_property = self.filter_layers.networks[0](xx) + self.bias_atom_e[atype] + if xx_zeros is not None: + atom_property -= self.filter_layers.networks[0](xx_zeros) + outs = outs + atom_property # Shape is [nframes, natoms[0], net_dim_out] + else: + for type_i, ll in enumerate(self.filter_layers.networks): + mask = (atype == type_i).unsqueeze(-1) + mask.stop_gradient = True + mask = paddle.tile(mask, (1, 1, net_dim_out)) + atom_property = ll(xx) + if xx_zeros is not None: + # must assert, otherwise jit is not happy + assert self.remove_vaccum_contribution is not None + if not ( + len(self.remove_vaccum_contribution) > type_i + and not self.remove_vaccum_contribution[type_i] + ): + atom_property -= ll(xx_zeros) + atom_property = atom_property + self.bias_atom_e[type_i] + atom_property = atom_property * mask.astype(atom_property.dtype) + outs = ( + outs + atom_property + ) # Shape is [nframes, natoms[0], net_dim_out] + # nf x nloc + mask = self.emask(atype) + # nf x nloc x nod + outs = outs * mask[:, :, None].astype(outs.dtype) + return {self.var_name: outs.astype(env.GLOBAL_PD_FLOAT_PRECISION)} diff --git a/deepmd/pd/model/task/invar_fitting.py b/deepmd/pd/model/task/invar_fitting.py new file mode 100644 index 0000000000..b366fc1d2e --- /dev/null +++ b/deepmd/pd/model/task/invar_fitting.py @@ -0,0 +1,183 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import copy +import logging +from typing import ( + Optional, + Union, +) + +import paddle + +from deepmd.dpmodel import ( + FittingOutputDef, + OutputVariableDef, + fitting_check_output, +) +from deepmd.pd.model.task.fitting import ( + GeneralFitting, +) +from deepmd.pd.utils import ( + env, +) +from deepmd.pd.utils.env import ( + DEFAULT_PRECISION, +) +from deepmd.utils.version import ( + check_version_compatibility, +) + +dtype = env.GLOBAL_PD_FLOAT_PRECISION +device = env.DEVICE + +log = logging.getLogger(__name__) + + +@GeneralFitting.register("invar") +@fitting_check_output +class InvarFitting(GeneralFitting): + """Construct a fitting net for energy. + + Parameters + ---------- + var_name : str + The atomic property to fit, 'energy', 'dipole', and 'polar'. + ntypes : int + Element count. + dim_descrpt : int + Embedding width per atom. + dim_out : int + The output dimension of the fitting net. + neuron : list[int] + Number of neurons in each hidden layers of the fitting net. + bias_atom_e : paddle.Tensor, optional + Average energy per atom for each element. + resnet_dt : bool + Using time-step in the ResNet construction. + numb_fparam : int + Number of frame parameters. + numb_aparam : int + Number of atomic parameters. + activation_function : str + Activation function. + precision : str + Numerical precision. + mixed_types : bool + If true, use a uniform fitting net for all atom types, otherwise use + different fitting nets for different atom types. + rcond : float, optional + The condition number for the regression of atomic energy. + seed : int, optional + Random seed. + exclude_types: list[int] + Atomic contributions of the excluded atom types are set zero. + atom_ener: list[Optional[paddle.Tensor]], optional + Specifying atomic energy contribution in vacuum. + The value is a list specifying the bias. the elements can be None or np.array of output shape. + For example: [None, [2.]] means type 0 is not set, type 1 is set to [2.] + The `set_davg_zero` key in the descriptor should be set. + type_map: list[str], Optional + A list of strings. Give the name to each type of atoms. + use_aparam_as_mask: bool + If True, the aparam will not be used in fitting net for embedding. + """ + + def __init__( + self, + var_name: str, + ntypes: int, + dim_descrpt: int, + dim_out: int, + neuron: list[int] = [128, 128, 128], + bias_atom_e: Optional[paddle.Tensor] = None, + resnet_dt: bool = True, + numb_fparam: int = 0, + numb_aparam: int = 0, + activation_function: str = "tanh", + precision: str = DEFAULT_PRECISION, + mixed_types: bool = True, + rcond: Optional[float] = None, + seed: Optional[Union[int, list[int]]] = None, + exclude_types: list[int] = [], + atom_ener: Optional[list[Optional[paddle.Tensor]]] = None, + type_map: Optional[list[str]] = None, + use_aparam_as_mask: bool = False, + **kwargs, + ): + self.dim_out = dim_out + self.atom_ener = atom_ener + super().__init__( + var_name=var_name, + ntypes=ntypes, + dim_descrpt=dim_descrpt, + neuron=neuron, + bias_atom_e=bias_atom_e, + resnet_dt=resnet_dt, + numb_fparam=numb_fparam, + numb_aparam=numb_aparam, + activation_function=activation_function, + precision=precision, + mixed_types=mixed_types, + rcond=rcond, + seed=seed, + exclude_types=exclude_types, + remove_vaccum_contribution=None + if atom_ener is None or len([x for x in atom_ener if x is not None]) == 0 + else [x is not None for x in atom_ener], + type_map=type_map, + use_aparam_as_mask=use_aparam_as_mask, + **kwargs, + ) + + def _net_out_dim(self): + """Set the FittingNet output dim.""" + return self.dim_out + + def serialize(self) -> dict: + data = super().serialize() + data["type"] = "invar" + data["dim_out"] = self.dim_out + data["atom_ener"] = self.atom_ener + return data + + @classmethod + def deserialize(cls, data: dict) -> "GeneralFitting": + data = copy.deepcopy(data) + check_version_compatibility(data.pop("@version", 1), 2, 1) + return super().deserialize(data) + + def output_def(self) -> FittingOutputDef: + return FittingOutputDef( + [ + OutputVariableDef( + self.var_name, + [self.dim_out], + reducible=True, + r_differentiable=True, + c_differentiable=True, + ), + ] + ) + + def forward( + self, + descriptor: paddle.Tensor, + atype: paddle.Tensor, + gr: Optional[paddle.Tensor] = None, + g2: Optional[paddle.Tensor] = None, + h2: Optional[paddle.Tensor] = None, + fparam: Optional[paddle.Tensor] = None, + aparam: Optional[paddle.Tensor] = None, + ): + """Based on embedding net output, alculate total energy. + + Args: + - inputs: Embedding matrix. Its shape is [nframes, natoms[0], self.dim_descrpt]. + - natoms: Tell atom count and element count. Its shape is [2+self.ntypes]. + + Returns + ------- + - `paddle.Tensor`: Total energy with shape [nframes, natoms[0]]. + """ + return self._forward_common(descriptor, atype, gr, g2, h2, fparam, aparam) + + exclude_types: list[int] diff --git a/deepmd/pd/model/task/task.py b/deepmd/pd/model/task/task.py new file mode 100644 index 0000000000..6ceb116d85 --- /dev/null +++ b/deepmd/pd/model/task/task.py @@ -0,0 +1 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later diff --git a/deepmd/pd/train/__init__.py b/deepmd/pd/train/__init__.py new file mode 100644 index 0000000000..6ceb116d85 --- /dev/null +++ b/deepmd/pd/train/__init__.py @@ -0,0 +1 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later diff --git a/deepmd/pd/train/training.py b/deepmd/pd/train/training.py new file mode 100644 index 0000000000..09cf86ecdd --- /dev/null +++ b/deepmd/pd/train/training.py @@ -0,0 +1,1240 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import datetime +import functools +import logging +import time +from contextlib import ( + contextmanager, +) +from copy import ( + deepcopy, +) +from pathlib import ( + Path, +) +from typing import ( + Any, +) + +import numpy as np +import paddle +import paddle.distributed as dist +from paddle.distributed import ( + fleet, +) +from paddle.framework import ( + core, +) +from paddle.io import ( + DataLoader, +) + +from deepmd.common import ( + symlink_prefix_files, +) +from deepmd.dpmodel.utils.learning_rate import ( + LearningRateExp, +) +from deepmd.loggers.training import ( + format_training_message_per_task, +) +from deepmd.pd.loss import ( + EnergyStdLoss, + TaskLoss, +) +from deepmd.pd.model.model import ( + get_model, +) +from deepmd.pd.train.wrapper import ( + ModelWrapper, +) +from deepmd.pd.utils import ( + dp_random, +) +from deepmd.pd.utils.dataloader import ( + BufferedIterator, + get_weighted_sampler, +) +from deepmd.pd.utils.env import ( + DEVICE, + JIT, + NUM_WORKERS, + SAMPLER_RECORD, + enable_prim, +) +from deepmd.pd.utils.stat import ( + make_stat_input, +) +from deepmd.pd.utils.utils import ( + to_numpy_array, +) +from deepmd.utils.data import ( + DataRequirementItem, +) +from deepmd.utils.path import ( + DPH5Path, +) + +log = logging.getLogger(__name__) + +from typing import ( + Optional, +) + + +def format_training_message( + batch: int, + wall_time: float, + eta: Optional[int] = None, +): + msg = f"batch {batch:7d}: " f"total wall time = {wall_time:.2f} s" + if isinstance(eta, int): + msg += f", eta = {datetime.timedelta(seconds=int(eta))!s}" + return msg + + +class Trainer: + def __init__( + self, + config: dict[str, Any], + training_data, + stat_file_path=None, + validation_data=None, + init_model=None, + restart_model=None, + finetune_model=None, + force_load=False, + shared_links=None, + finetune_links=None, + init_frz_model=None, + ): + """Construct a DeePMD trainer. + + Args: + - config: The Dict-like configuration with training options. + """ + enable_prim(True) + if init_model is not None: + resume_model = init_model + elif restart_model is not None: + resume_model = restart_model + elif finetune_model is not None: + resume_model = finetune_model + else: + resume_model = None + resuming = resume_model is not None + self.restart_training = restart_model is not None + model_params = config["model"] + training_params = config["training"] + self.multi_task = "model_dict" in model_params + self.finetune_links = finetune_links + self.finetune_update_stat = False + self.model_keys = ( + list(model_params["model_dict"]) if self.multi_task else ["Default"] + ) + self.rank = ( + dist.get_rank() if dist.is_available() and dist.is_initialized() else 0 + ) + self.world_size = ( + dist.get_world_size() + if dist.is_available() and dist.is_initialized() + else 1 + ) + self.num_model = len(self.model_keys) + + # Iteration config + self.num_steps = training_params["numb_steps"] + self.disp_file = training_params.get("disp_file", "lcurve.out") + self.disp_freq = training_params.get("disp_freq", 1000) + self.save_ckpt = training_params.get("save_ckpt", "model.ckpt") + self.save_freq = training_params.get("save_freq", 1000) + self.max_ckpt_keep = training_params.get("max_ckpt_keep", 5) + self.display_in_training = training_params.get("disp_training", True) + self.timing_in_training = training_params.get("time_training", True) + self.change_bias_after_training = training_params.get( + "change_bias_after_training", False + ) + self.lcurve_should_print_header = True + + def get_opt_param(params): + opt_type = params.get("opt_type", "Adam") + opt_param = { + "kf_blocksize": params.get("kf_blocksize", 5120), + "kf_start_pref_e": params.get("kf_start_pref_e", 1), + "kf_limit_pref_e": params.get("kf_limit_pref_e", 1), + "kf_start_pref_f": params.get("kf_start_pref_f", 1), + "kf_limit_pref_f": params.get("kf_limit_pref_f", 1), + } + return opt_type, opt_param + + def get_data_loader(_training_data, _validation_data, _training_params): + def get_dataloader_and_buffer(_data, _params): + if "auto_prob" in _training_params["training_data"]: + _sampler = get_weighted_sampler( + _data, _params["training_data"]["auto_prob"] + ) + elif "sys_probs" in _training_params["training_data"]: + _sampler = get_weighted_sampler( + _data, + _params["training_data"]["sys_probs"], + sys_prob=True, + ) + else: + _sampler = get_weighted_sampler(_data, "prob_sys_size") + + if _sampler is None: + log.warning( + "Sampler not specified!" + ) # None sampler will lead to a premature stop iteration. Replacement should be True in attribute of the sampler to produce expected number of items in one iteration. + _dataloader = DataLoader( + _data, + batch_sampler=paddle.io.BatchSampler( + sampler=_sampler, + drop_last=False, + ), + num_workers=NUM_WORKERS + if dist.is_available() + else 0, # setting to 0 diverges the behavior of its iterator; should be >=1 + collate_fn=lambda batch: batch[0], # prevent extra conversion + # pin_memory=True, + ) + _data_buffered = BufferedIterator(iter(_dataloader)) + return _dataloader, _data_buffered + + training_dataloader, training_data_buffered = get_dataloader_and_buffer( + _training_data, _training_params + ) + + if _validation_data is not None: + ( + validation_dataloader, + validation_data_buffered, + ) = get_dataloader_and_buffer(_validation_data, _training_params) + valid_numb_batch = _training_params["validation_data"].get( + "numb_btch", 1 + ) + else: + validation_dataloader = None + validation_data_buffered = None + valid_numb_batch = 1 + return ( + training_dataloader, + training_data_buffered, + validation_dataloader, + validation_data_buffered, + valid_numb_batch, + ) + + def single_model_stat( + _model, + _data_stat_nbatch, + _training_data, + _validation_data, + _stat_file_path, + _data_requirement, + finetune_has_new_type=False, + ): + _data_requirement += get_additional_data_requirement(_model) + _training_data.add_data_requirement(_data_requirement) + if _validation_data is not None: + _validation_data.add_data_requirement(_data_requirement) + + @functools.lru_cache + def get_sample(): + sampled = make_stat_input( + _training_data.systems, + _training_data.dataloaders, + _data_stat_nbatch, + ) + return sampled + + if (not resuming or finetune_has_new_type) and self.rank == 0: + _model.compute_or_load_stat( + sampled_func=get_sample, + stat_file_path=_stat_file_path, + ) + if isinstance(_stat_file_path, DPH5Path): + _stat_file_path.root.close() + return get_sample + + def get_lr(lr_params): + assert ( + lr_params.get("type", "exp") == "exp" + ), "Only learning rate `exp` is supported!" + lr_params["stop_steps"] = self.num_steps - self.warmup_steps + lr_exp = LearningRateExp(**lr_params) + return lr_exp + + # Optimizer + if self.multi_task and training_params.get("optim_dict", None) is not None: + self.optim_dict = training_params.get("optim_dict") + missing_keys = [ + key for key in self.model_keys if key not in self.optim_dict + ] + assert ( + not missing_keys + ), f"These keys are not in optim_dict: {missing_keys}!" + self.opt_type = {} + self.opt_param = {} + for model_key in self.model_keys: + self.opt_type[model_key], self.opt_param[model_key] = get_opt_param( + self.optim_dict[model_key] + ) + else: + self.opt_type, self.opt_param = get_opt_param(training_params) + + # Model + self.model = get_model_for_wrapper(model_params) + + # Loss + if not self.multi_task: + self.loss = get_loss( + config["loss"], + config["learning_rate"]["start_lr"], + len(model_params["type_map"]), + self.model, + ) + else: + self.loss = {} + for model_key in self.model_keys: + loss_param = config["loss_dict"][model_key] + if config.get("learning_rate_dict", None) is not None: + lr_param = config["learning_rate_dict"][model_key]["start_lr"] + else: + lr_param = config["learning_rate"]["start_lr"] + ntypes = len(model_params["model_dict"][model_key]["type_map"]) + self.loss[model_key] = get_loss( + loss_param, lr_param, ntypes, self.model[model_key] + ) + + # Data + if not self.multi_task: + self.get_sample_func = single_model_stat( + self.model, + model_params.get("data_stat_nbatch", 10), + training_data, + validation_data, + stat_file_path, + self.loss.label_requirement, + finetune_has_new_type=self.finetune_links["Default"].get_has_new_type() + if self.finetune_links is not None + else False, + ) + ( + self.training_dataloader, + self.training_data, + self.validation_dataloader, + self.validation_data, + self.valid_numb_batch, + ) = get_data_loader(training_data, validation_data, training_params) + training_data.print_summary( + "training", + to_numpy_array(self.training_dataloader.batch_sampler.sampler.weights), + ) + if validation_data is not None: + validation_data.print_summary( + "validation", + to_numpy_array( + self.validation_dataloader.batch_sampler.sampler.weights + ), + ) + else: + ( + self.training_dataloader, + self.training_data, + self.validation_dataloader, + self.validation_data, + self.valid_numb_batch, + self.get_sample_func, + ) = {}, {}, {}, {}, {}, {} + for model_key in self.model_keys: + self.get_sample_func[model_key] = single_model_stat( + self.model[model_key], + model_params["model_dict"][model_key].get("data_stat_nbatch", 10), + training_data[model_key], + validation_data[model_key], + stat_file_path[model_key], + self.loss[model_key].label_requirement, + finetune_has_new_type=self.finetune_links[ + model_key + ].get_has_new_type() + if self.finetune_links is not None + else False, + ) + ( + self.training_dataloader[model_key], + self.training_data[model_key], + self.validation_dataloader[model_key], + self.validation_data[model_key], + self.valid_numb_batch[model_key], + ) = get_data_loader( + training_data[model_key], + validation_data[model_key], + training_params["data_dict"][model_key], + ) + + training_data[model_key].print_summary( + f"training in {model_key}", + to_numpy_array( + self.training_dataloader[ + model_key + ].batch_sampler.sampler.weights + ), + ) + if ( + validation_data is not None + and validation_data[model_key] is not None + ): + validation_data[model_key].print_summary( + f"validation in {model_key}", + to_numpy_array( + self.validation_dataloader[ + model_key + ].batch_sampler.sampler.weights + ), + ) + + # Learning rate + self.warmup_steps = training_params.get("warmup_steps", 0) + self.gradient_max_norm = training_params.get("gradient_max_norm", 0.0) + assert ( + self.num_steps - self.warmup_steps > 0 or self.warmup_steps == 0 + ), "Warm up steps must be less than total training steps!" + if self.multi_task and config.get("learning_rate_dict", None) is not None: + self.lr_exp = {} + for model_key in self.model_keys: + self.lr_exp[model_key] = get_lr(config["learning_rate_dict"][model_key]) + else: + self.lr_exp = get_lr(config["learning_rate"]) + + # JIT + if JIT: + raise NotImplementedError( + "JIT is not supported yet when training with Paddle" + ) + self.model = paddle.jit.to_static(self.model) + + # Model Wrapper + self.wrapper = ModelWrapper(self.model, self.loss, model_params=model_params) + self.start_step = 0 + + # resuming and finetune + optimizer_state_dict = None + if resuming: + log.info(f"Resuming from {resume_model}.") + state_dict = paddle.load(resume_model) + if "model" in state_dict: + optimizer_state_dict = ( + state_dict["optimizer"] if finetune_model is None else None + ) + state_dict = state_dict["model"] + self.start_step = ( + state_dict["_extra_state"]["train_infos"]["step"] + if self.restart_training + else 0 + ) + if self.rank == 0: + if force_load: + input_keys = list(state_dict.keys()) + target_keys = list(self.wrapper.state_dict().keys()) + missing_keys = [ + item for item in target_keys if item not in input_keys + ] + if missing_keys: + target_state_dict = self.wrapper.state_dict() + slim_keys = [] + for item in missing_keys: + state_dict[item] = target_state_dict[item].clone().detach() + new_key = True + for slim_key in slim_keys: + if slim_key in item: + new_key = False + break + if new_key: + tmp_keys = ".".join(item.split(".")[:3]) + slim_keys.append(tmp_keys) + slim_keys = [i + ".*" for i in slim_keys] + log.warning( + f"Force load mode allowed! These keys are not in ckpt and will re-init: {slim_keys}" + ) + # update model params in the pretrained model + if finetune_model is not None: + new_state_dict = {} + target_state_dict = self.wrapper.state_dict() + # pretrained_model + pretrained_model = get_model_for_wrapper( + state_dict["_extra_state"]["model_params"] + ) + pretrained_model_wrapper = ModelWrapper(pretrained_model) + pretrained_model_wrapper.set_state_dict(state_dict) + # update type related params + for model_key in self.model_keys: + finetune_rule_single = self.finetune_links[model_key] + _model_key_from = finetune_rule_single.get_model_branch() + # skip if updated + if ( + finetune_rule_single.get_finetune_tmap() + != pretrained_model_wrapper.model[ + _model_key_from + ].get_type_map() + ): + model_with_new_type_stat = None + if finetune_rule_single.get_has_new_type(): + self.finetune_update_stat = True + model_with_new_type_stat = self.wrapper.model[model_key] + pretrained_model_wrapper.model[ + _model_key_from + ].change_type_map( + finetune_rule_single.get_finetune_tmap(), + model_with_new_type_stat=model_with_new_type_stat, + ) + state_dict = pretrained_model_wrapper.state_dict() + + def collect_single_finetune_params( + _model_key, + _finetune_rule_single, + _new_state_dict, + _origin_state_dict, + _random_state_dict, + ): + _new_fitting = _finetune_rule_single.get_random_fitting() + _model_key_from = _finetune_rule_single.get_model_branch() + target_keys = [ + i + for i in _random_state_dict.keys() + if i != "_extra_state" and f".{_model_key}." in i + ] + for item_key in target_keys: + if _new_fitting and (".descriptor." not in item_key): + # print(f'Keep {item_key} in old model!') + _new_state_dict[item_key] = ( + _random_state_dict[item_key].clone().detach() + ) + else: + new_key = item_key.replace( + f".{_model_key}.", f".{_model_key_from}." + ) + # print(f'Replace {item_key} with {new_key} in pretrained_model!') + _new_state_dict[item_key] = ( + _origin_state_dict[new_key].clone().detach() + ) + + # collect model params from the pretrained model + for model_key in self.model_keys: + finetune_rule_single = self.finetune_links[model_key] + collect_single_finetune_params( + model_key, + finetune_rule_single, + new_state_dict, + state_dict, + target_state_dict, + ) + state_dict = new_state_dict + state_dict["_extra_state"] = self.wrapper.state_dict()[ + "_extra_state" + ] + + self.wrapper.set_state_dict(state_dict) + + # change bias for fine-tuning + if finetune_model is not None: + + def single_model_finetune( + _model, + _finetune_rule_single, + _sample_func, + ): + _model = model_change_out_bias( + _model, + _sample_func, + _bias_adjust_mode="change-by-statistic" + if not _finetune_rule_single.get_random_fitting() + else "set-by-statistic", + ) + return _model + + if not self.multi_task: + finetune_rule_single = self.finetune_links["Default"] + self.model = single_model_finetune( + self.model, finetune_rule_single, self.get_sample_func + ) + else: + for model_key in self.model_keys: + finetune_rule_single = self.finetune_links[model_key] + if not finetune_rule_single.get_resuming(): + log.info( + f"Model branch {model_key} will be fine-tuned. This may take a long time..." + ) + self.model[model_key] = single_model_finetune( + self.model[model_key], + finetune_rule_single, + self.get_sample_func[model_key], + ) + else: + log.info( + f"Model branch {model_key} will resume training." + ) + + if init_frz_model is not None: + frz_model = paddle.jit.load(init_frz_model) + self.model.set_state_dict(frz_model.state_dict()) + + # Multi-task share params + if shared_links is not None: + self.wrapper.share_params( + shared_links, + resume=(resuming and not self.finetune_update_stat) or self.rank != 0, + ) + + # TODO add lr warmups for multitask + # author: iProzd + def warm_up_linear(step, warmup_steps): + if step < warmup_steps: + return step / warmup_steps + else: + return self.lr_exp.value(step - warmup_steps) / self.lr_exp.start_lr + + # TODO add optimizers for multitask + # author: iProzd + if self.opt_type == "Adam": + self.scheduler = paddle.optimizer.lr.LambdaDecay( + learning_rate=self.lr_exp.start_lr, + lr_lambda=lambda step: warm_up_linear( + step + self.start_step, self.warmup_steps + ), + ) + self.optimizer = paddle.optimizer.Adam( + learning_rate=self.scheduler, parameters=self.wrapper.parameters() + ) + if optimizer_state_dict is not None and self.restart_training: + self.optimizer.set_state_dict(optimizer_state_dict) + else: + raise ValueError(f"Not supported optimizer type '{self.opt_type}'") + + if dist.is_available() and dist.is_initialized(): + # DDP will guarantee the model parameters are identical across all processes + self.wrapper = fleet.distributed_model( + self.wrapper, + # find_unused_parameters=True, + ) + self.optimizer = fleet.distributed_optimizer(self.optimizer) + + # Get model prob for multi-task + if self.multi_task: + self.model_prob = np.array([0.0 for key in self.model_keys]) + if training_params.get("model_prob", None) is not None: + model_prob = training_params["model_prob"] + for ii, model_key in enumerate(self.model_keys): + if model_key in model_prob: + self.model_prob[ii] += float(model_prob[model_key]) + else: + for ii, model_key in enumerate(self.model_keys): + self.model_prob[ii] += float(len(self.training_data[model_key])) + sum_prob = np.sum(self.model_prob) + assert sum_prob > 0.0, "Sum of model prob must be larger than 0!" + self.model_prob = self.model_prob / sum_prob + + # Tensorboard + self.enable_tensorboard = training_params.get("tensorboard", False) + self.tensorboard_log_dir = training_params.get("tensorboard_log_dir", "log") + self.tensorboard_freq = training_params.get("tensorboard_freq", 1) + self.enable_profiler = training_params.get("enable_profiler", False) + self.profiling = training_params.get("profiling", False) + self.profiling_file = training_params.get("profiling_file", "timeline.json") + + def run(self): + fout = ( + open( + self.disp_file, + mode="w" if not self.restart_training else "a", + buffering=1, + ) + if self.rank == 0 + else None + ) # line buffered + if SAMPLER_RECORD: + record_file = f"Sample_rank_{self.rank}.txt" + fout1 = open(record_file, mode="w", buffering=1) + log.info("Start to train %d steps.", self.num_steps) + if dist.is_available() and dist.is_initialized(): + log.info(f"Rank: {dist.get_rank()}/{dist.get_world_size()}") + if self.enable_tensorboard: + from tensorboardX import ( + SummaryWriter, + ) + + writer = SummaryWriter(log_dir=self.tensorboard_log_dir) + enable_profiling = self.enable_profiler or self.profiling + if enable_profiling: + core.nvprof_start() + core.nvprof_enable_record_event() + + def step(_step_id, task_key="Default"): + # Paddle Profiler + if enable_profiling: + core.nvprof_nvtx_push(f"Training step {_step_id}") + self.wrapper.train() + if isinstance(self.lr_exp, dict): + _lr = self.lr_exp[task_key] + else: + _lr = self.lr_exp + cur_lr = _lr.value(_step_id) + pref_lr = cur_lr + self.optimizer.clear_grad(set_to_zero=False) + input_dict, label_dict, log_dict = self.get_data( + is_train=True, task_key=task_key + ) + if SAMPLER_RECORD: + print_str = f"Step {_step_id}: sample system{log_dict['sid']} frame{log_dict['fid']}\n" + fout1.write(print_str) + fout1.flush() + if self.opt_type == "Adam": + cur_lr = self.scheduler.get_lr() + if _step_id < self.warmup_steps: + pref_lr = _lr.start_lr + else: + pref_lr = cur_lr + with nvprof_context(enable_profiling, "Forward pass"): + model_pred, loss, more_loss = self.wrapper( + **input_dict, + cur_lr=pref_lr, + label=label_dict, + task_key=task_key, + ) + + with nvprof_context(enable_profiling, "Backward pass"): + loss.backward() + + if self.gradient_max_norm > 0.0: + with nvprof_context(enable_profiling, "Gradient clip"): + grad_norm = paddle.nn.utils.clip_grad_norm_( + self.wrapper.parameters(), self.gradient_max_norm + ) + if not paddle.isfinite(grad_norm).all(): + # check local gradnorm single GPU case, trigger NanDetector + raise FloatingPointError("gradients are Nan/Inf") + + with nvprof_context(enable_profiling, "Adam update"): + self.optimizer.step() + + self.scheduler.step() + + if enable_profiling: + core.nvprof_nvtx_pop() + else: + raise ValueError(f"Not supported optimizer type '{self.opt_type}'") + + # Log and persist + display_step_id = _step_id + 1 + if self.display_in_training and ( + display_step_id % self.disp_freq == 0 or display_step_id == 1 + ): + self.wrapper.eval() + + def log_loss_train(_loss, _more_loss, _task_key="Default"): + results = {} + rmse_val = { + item: _more_loss[item] + for item in _more_loss + if "l2_" not in item + } + for item in sorted(rmse_val.keys()): + results[item] = rmse_val[item] + return results + + def log_loss_valid(_task_key="Default"): + single_results = {} + sum_natoms = 0 + if not self.multi_task: + valid_numb_batch = self.valid_numb_batch + else: + valid_numb_batch = self.valid_numb_batch[_task_key] + for ii in range(valid_numb_batch): + self.optimizer.clear_grad() + input_dict, label_dict, _ = self.get_data( + is_train=False, task_key=_task_key + ) + if input_dict == {}: + # no validation data + return {} + _, loss, more_loss = self.wrapper( + **input_dict, + cur_lr=pref_lr, + label=label_dict, + task_key=_task_key, + ) + # more_loss.update({"rmse": math.sqrt(loss)}) + natoms = int(input_dict["atype"].shape[-1]) + sum_natoms += natoms + for k, v in more_loss.items(): + if "l2_" not in k: + single_results[k] = ( + single_results.get(k, 0.0) + v * natoms + ) + results = {k: v / sum_natoms for k, v in single_results.items()} + return results + + if not self.multi_task: + train_results = log_loss_train(loss, more_loss) + valid_results = log_loss_valid() + if self.rank == 0: + log.info( + format_training_message_per_task( + batch=display_step_id, + task_name="trn", + rmse=train_results, + learning_rate=cur_lr, + ) + ) + if valid_results: + log.info( + format_training_message_per_task( + batch=display_step_id, + task_name="val", + rmse=valid_results, + learning_rate=None, + ) + ) + else: + train_results = {_key: {} for _key in self.model_keys} + valid_results = {_key: {} for _key in self.model_keys} + train_results[task_key] = log_loss_train( + loss, more_loss, _task_key=task_key + ) + for _key in self.model_keys: + if _key != task_key: + self.optimizer.clear_grad() + input_dict, label_dict, _ = self.get_data( + is_train=True, task_key=_key + ) + _, loss, more_loss = self.wrapper( + **input_dict, + cur_lr=pref_lr, + label=label_dict, + task_key=_key, + ) + train_results[_key] = log_loss_train( + loss, more_loss, _task_key=_key + ) + valid_results[_key] = log_loss_valid(_task_key=_key) + if self.rank == 0: + log.info( + format_training_message_per_task( + batch=display_step_id, + task_name=_key + "_trn", + rmse=train_results[_key], + learning_rate=cur_lr, + ) + ) + if valid_results[_key]: + log.info( + format_training_message_per_task( + batch=display_step_id, + task_name=_key + "_val", + rmse=valid_results[_key], + learning_rate=None, + ) + ) + + current_time = time.time() + train_time = current_time - self.t0 + self.t0 = current_time + if self.rank == 0 and self.timing_in_training: + eta = int( + (self.num_steps - _step_id - 1) / self.disp_freq * train_time + ) + log.info( + format_training_message( + batch=display_step_id, + wall_time=train_time, + eta=eta, + ) + ) + # the first training time is not accurate + if ( + (_step_id + 1 - self.start_step) > self.disp_freq + or self.num_steps - self.start_step < 2 * self.disp_freq + ): + self.total_train_time += train_time + + if fout: + if self.lcurve_should_print_header: + self.print_header(fout, train_results, valid_results) + self.lcurve_should_print_header = False + self.print_on_training( + fout, display_step_id, cur_lr, train_results, valid_results + ) + + if ( + ((_step_id + 1) % self.save_freq == 0 and _step_id != self.start_step) + or (_step_id + 1) == self.num_steps + ) and (self.rank == 0 or dist.get_rank() == 0): + # Handle the case if rank 0 aborted and re-assigned + self.latest_model = Path(self.save_ckpt + f"-{_step_id + 1}.pd") + + module = ( + self.wrapper.module + if dist.is_available() and dist.is_initialized() + else self.wrapper + ) + self.save_model(self.latest_model, lr=cur_lr, step=_step_id) + log.info(f"Saved model to {self.latest_model}") + symlink_prefix_files(self.latest_model.stem, self.save_ckpt) + with open("checkpoint", "w") as f: + f.write(str(self.latest_model)) + + # tensorboard + if self.enable_tensorboard and ( + display_step_id % self.tensorboard_freq == 0 or display_step_id == 1 + ): + writer.add_scalar(f"{task_key}/lr", cur_lr, display_step_id) + writer.add_scalar(f"{task_key}/loss", loss, display_step_id) + for item in more_loss: + writer.add_scalar( + f"{task_key}/{item}", more_loss[item].item(), _step_id + ) + + self.t0 = time.time() + self.total_train_time = 0.0 + for step_id in range(self.num_steps): + if step_id < self.start_step: + continue + if self.multi_task: + chosen_index_list = dp_random.choice( + np.arange( + self.num_model, dtype=np.int32 + ), # int32 should be enough for # models... + p=np.array(self.model_prob), + size=self.world_size, + replace=True, + ) + assert chosen_index_list.size == self.world_size + model_index = chosen_index_list[self.rank] + model_key = self.model_keys[model_index] + else: + model_key = "Default" + step(step_id, model_key) + if JIT: + break + + if self.change_bias_after_training and (self.rank == 0 or dist.get_rank() == 0): + if not self.multi_task: + self.model = model_change_out_bias( + self.model, + self.get_sample_func, + _bias_adjust_mode="change-by-statistic", + ) + else: + for model_key in self.model_keys: + self.model[model_key] = model_change_out_bias( + self.model[model_key], + self.get_sample_func[model_key], + _bias_adjust_mode="change-by-statistic", + ) + self.latest_model = Path(self.save_ckpt + f"-{self.num_steps}.pd") + cur_lr = self.lr_exp.value(self.num_steps - 1) + self.save_model(self.latest_model, lr=cur_lr, step=self.num_steps - 1) + log.info(f"Saved model to {self.latest_model}") + symlink_prefix_files(self.latest_model.stem, self.save_ckpt) + with open("checkpoint", "w") as f: + f.write(str(self.latest_model)) + + if ( + self.rank == 0 or dist.get_rank() == 0 + ): # Handle the case if rank 0 aborted and re-assigned + if self.num_steps == 0: + # when num_steps is 0, the checkpoint is never not saved + self.latest_model = Path(self.save_ckpt + "-0.pd") + self.save_model(self.latest_model, lr=0, step=0) + log.info(f"Saved model to {self.latest_model}") + symlink_prefix_files(self.latest_model.stem, self.save_ckpt) + with open("checkpoint", "w") as f: + f.write(str(self.latest_model)) + + elapsed_batch = self.num_steps - self.start_step + if self.timing_in_training and elapsed_batch // self.disp_freq > 0: + if self.start_step >= 2 * self.disp_freq: + log.info( + "average training time: %.4f s/batch (exclude first %d batches)", + self.total_train_time + / ( + elapsed_batch // self.disp_freq * self.disp_freq + - self.disp_freq + ), + self.disp_freq, + ) + else: + log.info( + "average training time: %.4f s/batch", + self.total_train_time + / (elapsed_batch // self.disp_freq * self.disp_freq), + ) + + if JIT: + raise NotImplementedError( + "Paddle JIT saving during training is not supported yet." + ) + log.info(f"Trained model has been saved to: {self.save_ckpt}") + + if fout: + fout.close() + if SAMPLER_RECORD: + fout1.close() + if self.enable_tensorboard: + writer.close() + if enable_profiling: + core.nvprof_stop() + log.info( + "The nsys profiling trace have been saved to *.nsys-rep and *.sqlite " + "files, which can be viewd in NVIDIA Nsight Systems software" + ) + + def save_model(self, save_path, lr=0.0, step=0): + module = ( + self.wrapper.module + if dist.is_available() and dist.is_initialized() + else self.wrapper + ) + module.train_infos["lr"] = float(lr) + module.train_infos["step"] = step + paddle.save( + {"model": module.state_dict(), "optimizer": self.optimizer.state_dict()}, + str(save_path), + ) + checkpoint_dir = save_path.parent + checkpoint_files = [ + f + for f in checkpoint_dir.glob("*.pd") + if not f.is_symlink() and f.name.startswith(self.save_ckpt) + ] + if len(checkpoint_files) > self.max_ckpt_keep: + checkpoint_files.sort(key=lambda x: x.stat().st_mtime) + checkpoint_files[0].unlink() + + def get_data(self, is_train=True, task_key="Default"): + if not self.multi_task: + if is_train: + try: + batch_data = next(iter(self.training_data)) + except StopIteration: + # Refresh the status of the dataloader to start from a new epoch + self.training_data = BufferedIterator( + iter(self.training_dataloader) + ) + batch_data = next(iter(self.training_data)) + else: + if self.validation_data is None: + return {}, {}, {} + try: + batch_data = next(iter(self.validation_data)) + except StopIteration: + self.validation_data = BufferedIterator( + iter(self.validation_dataloader) + ) + batch_data = next(iter(self.validation_data)) + else: + if is_train: + try: + batch_data = next(iter(self.training_data[task_key])) + except StopIteration: + # Refresh the status of the dataloader to start from a new epoch + self.training_data[task_key] = BufferedIterator( + iter(self.training_dataloader[task_key]) + ) + batch_data = next(iter(self.training_data[task_key])) + else: + if self.validation_data[task_key] is None: + return {}, {}, {} + try: + batch_data = next(iter(self.validation_data[task_key])) + except StopIteration: + self.validation_data[task_key] = BufferedIterator( + iter(self.validation_dataloader[task_key]) + ) + batch_data = next(iter(self.validation_data[task_key])) + + for key in batch_data.keys(): + if key == "sid" or key == "fid" or key == "box" or "find_" in key: + continue + elif not isinstance(batch_data[key], list): + if batch_data[key] is not None: + batch_data[key] = batch_data[key].to(DEVICE) + else: + batch_data[key] = [item.to(DEVICE) for item in batch_data[key]] + # we may need a better way to classify which are inputs and which are labels + # now wrapper only supports the following inputs: + input_keys = [ + "coord", + "atype", + "spin", + "box", + "fparam", + "aparam", + ] + input_dict = {item_key: None for item_key in input_keys} + label_dict = {} + for item_key in batch_data: + if item_key in input_keys: + input_dict[item_key] = batch_data[item_key] + else: + if item_key not in ["sid", "fid"]: + label_dict[item_key] = batch_data[item_key] + log_dict = {} + if "fid" in batch_data: + log_dict["fid"] = batch_data["fid"] + log_dict["sid"] = batch_data["sid"] + return input_dict, label_dict, log_dict + + def print_header(self, fout, train_results, valid_results): + train_keys = sorted(train_results.keys()) + print_str = "" + print_str += "# %5s" % "step" + if not self.multi_task: + if valid_results: + prop_fmt = " %11s %11s" + for k in train_keys: + print_str += prop_fmt % (k + "_val", k + "_trn") + else: + prop_fmt = " %11s" + for k in train_keys: + print_str += prop_fmt % (k + "_trn") + else: + for model_key in self.model_keys: + if valid_results[model_key]: + prop_fmt = " %11s %11s" + for k in sorted(train_results[model_key].keys()): + print_str += prop_fmt % ( + k + f"_val_{model_key}", + k + f"_trn_{model_key}", + ) + else: + prop_fmt = " %11s" + for k in sorted(train_results[model_key].keys()): + print_str += prop_fmt % (k + f"_trn_{model_key}") + print_str += " %8s\n" % "lr" + print_str += "# If there is no available reference data, rmse_*_{val,trn} will print nan\n" + fout.write(print_str) + fout.flush() + + def print_on_training(self, fout, step_id, cur_lr, train_results, valid_results): + train_keys = sorted(train_results.keys()) + print_str = "" + print_str += "%7d" % step_id + if not self.multi_task: + if valid_results: + prop_fmt = " %11.2e %11.2e" + for k in train_keys: + print_str += prop_fmt % (valid_results[k], train_results[k]) + else: + prop_fmt = " %11.2e" + for k in train_keys: + print_str += prop_fmt % (train_results[k]) + else: + for model_key in self.model_keys: + if valid_results[model_key]: + prop_fmt = " %11.2e %11.2e" + for k in sorted(valid_results[model_key].keys()): + print_str += prop_fmt % ( + valid_results[model_key][k], + train_results[model_key][k], + ) + else: + prop_fmt = " %11.2e" + for k in sorted(train_results[model_key].keys()): + print_str += prop_fmt % (train_results[model_key][k]) + print_str += f" {cur_lr:8.1e}\n" + fout.write(print_str) + fout.flush() + + +def get_additional_data_requirement(_model): + additional_data_requirement = [] + if _model.get_dim_fparam() > 0: + fparam_requirement_items = [ + DataRequirementItem( + "fparam", _model.get_dim_fparam(), atomic=False, must=True + ) + ] + additional_data_requirement += fparam_requirement_items + if _model.get_dim_aparam() > 0: + aparam_requirement_items = [ + DataRequirementItem( + "aparam", _model.get_dim_aparam(), atomic=True, must=True + ) + ] + additional_data_requirement += aparam_requirement_items + has_spin = getattr(_model, "has_spin", False) + if callable(has_spin): + has_spin = has_spin() + if has_spin: + spin_requirement_items = [ + DataRequirementItem("spin", ndof=3, atomic=True, must=True) + ] + additional_data_requirement += spin_requirement_items + return additional_data_requirement + + +def get_loss(loss_params, start_lr, _ntypes, _model): + loss_type = loss_params.get("type", "ener") + if loss_type == "ener": + loss_params["starter_learning_rate"] = start_lr + return EnergyStdLoss(**loss_params) + else: + loss_params["starter_learning_rate"] = start_lr + return TaskLoss.get_class_by_type(loss_type).get_loss(loss_params) + + +def get_single_model( + _model_params, +): + model = get_model(deepcopy(_model_params)).to(DEVICE) + return model + + +def get_model_for_wrapper(_model_params): + if "model_dict" not in _model_params: + _model = get_single_model( + _model_params, + ) + else: + _model = {} + model_keys = list(_model_params["model_dict"]) + for _model_key in model_keys: + _model[_model_key] = get_single_model( + _model_params["model_dict"][_model_key], + ) + return _model + + +def model_change_out_bias( + _model, + _sample_func, + _bias_adjust_mode="change-by-statistic", +): + old_bias = deepcopy(_model.get_out_bias()) + _model.change_out_bias( + _sample_func, + bias_adjust_mode=_bias_adjust_mode, + ) + new_bias = deepcopy(_model.get_out_bias()) + + model_type_map = _model.get_type_map() + log.info( + f"Change output bias of {model_type_map!s} " + f"from {to_numpy_array(old_bias).reshape(-1)!s} " + f"to {to_numpy_array(new_bias).reshape(-1)!s}." + ) + return _model + + +@contextmanager +def nvprof_context(enable_profiler: bool, name: str): + if enable_profiler: + core.nvprof_nvtx_push(name) + + try: + yield + + finally: + if enable_profiler: + core.nvprof_nvtx_pop() diff --git a/deepmd/pd/train/wrapper.py b/deepmd/pd/train/wrapper.py new file mode 100644 index 0000000000..c3643f8372 --- /dev/null +++ b/deepmd/pd/train/wrapper.py @@ -0,0 +1,213 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +from __future__ import ( + annotations, +) + +import logging +from collections import ( + OrderedDict, +) +from typing import ( + Union, +) + +import paddle + +_StateDict = Union[dict[str, paddle.Tensor], OrderedDict[str, paddle.Tensor]] + + +log = logging.getLogger(__name__) + + +class ModelWrapper(paddle.nn.Layer): + def __init__( + self, + model: paddle.nn.Layer | dict, + loss: paddle.nn.Layer | dict = None, + model_params=None, + shared_links=None, + ): + """Construct a DeePMD model wrapper. + + Args: + - config: The Dict-like configuration with training options. + """ + super().__init__() + self.model_params = model_params if model_params is not None else {} + self.train_infos = { + "lr": 0, + "step": 0, + } + self.multi_task = False + self.model = paddle.nn.LayerDict() + # Model + if isinstance(model, paddle.nn.Layer): + self.model["Default"] = model + elif isinstance(model, dict): + self.multi_task = True + for task_key in model: + assert isinstance( + model[task_key], paddle.nn.Layer + ), f"{task_key} in model_dict is not a paddle.nn.Layer!" + self.model[task_key] = model[task_key] + # Loss + self.loss = None + if loss is not None: + self.loss = paddle.nn.LayerDict() + if isinstance(loss, paddle.nn.Layer): + self.loss["Default"] = loss + elif isinstance(loss, dict): + for task_key in loss: + assert isinstance( + loss[task_key], paddle.nn.Layer + ), f"{task_key} in loss_dict is not a paddle.nn.Layer!" + self.loss[task_key] = loss[task_key] + self.inference_only = self.loss is None + + def share_params(self, shared_links, resume=False): + """ + Share the parameters of classes following rules defined in shared_links during multitask training. + If not start from checkpoint (resume is False), + some separated parameters (e.g. mean and stddev) will be re-calculated across different classes. + """ + supported_types = ["descriptor", "fitting_net"] + for shared_item in shared_links: + class_name = shared_links[shared_item]["type"] + shared_base = shared_links[shared_item]["links"][0] + class_type_base = shared_base["shared_type"] + model_key_base = shared_base["model_key"] + shared_level_base = shared_base["shared_level"] + if "descriptor" in class_type_base: + if class_type_base == "descriptor": + base_class = self.model[model_key_base].get_descriptor() + elif "hybrid" in class_type_base: + raise NotImplementedError( + "Hybrid descriptor is not implemented yet" + ) + else: + raise RuntimeError(f"Unknown class_type {class_type_base}!") + for link_item in shared_links[shared_item]["links"][1:]: + class_type_link = link_item["shared_type"] + model_key_link = link_item["model_key"] + shared_level_link = int(link_item["shared_level"]) + assert ( + shared_level_link >= shared_level_base + ), "The shared_links must be sorted by shared_level!" + assert ( + "descriptor" in class_type_link + ), f"Class type mismatched: {class_type_base} vs {class_type_link}!" + if class_type_link == "descriptor": + link_class = self.model[model_key_link].get_descriptor() + elif "hybrid" in class_type_link: + raise NotImplementedError( + "Hybrid descriptor is not implemented yet" + ) + else: + raise RuntimeError(f"Unknown class_type {class_type_link}!") + link_class.share_params( + base_class, shared_level_link, resume=resume + ) + log.warning( + f"Shared params of {model_key_base}.{class_type_base} and {model_key_link}.{class_type_link}!" + ) + else: + if hasattr(self.model[model_key_base], class_type_base): + base_class = self.model[model_key_base].__getattr__(class_type_base) + for link_item in shared_links[shared_item]["links"][1:]: + class_type_link = link_item["shared_type"] + model_key_link = link_item["model_key"] + shared_level_link = int(link_item["shared_level"]) + assert ( + shared_level_link >= shared_level_base + ), "The shared_links must be sorted by shared_level!" + assert ( + class_type_base == class_type_link + ), f"Class type mismatched: {class_type_base} vs {class_type_link}!" + link_class = self.model[model_key_link].__getattr__( + class_type_link + ) + link_class.share_params( + base_class, shared_level_link, resume=resume + ) + log.warning( + f"Shared params of {model_key_base}.{class_type_base} and {model_key_link}.{class_type_link}!" + ) + + def forward( + self, + coord, + atype, + spin: paddle.Tensor | None = None, + box: paddle.Tensor | None = None, + cur_lr: paddle.Tensor | None = None, + label: paddle.Tensor | None = None, + task_key: paddle.Tensor | None = None, + inference_only=False, + do_atomic_virial=False, + fparam: paddle.Tensor | None = None, + aparam: paddle.Tensor | None = None, + ): + if not self.multi_task: + task_key = "Default" + else: + assert ( + task_key is not None + ), f"Multitask model must specify the inference task! Supported tasks are {list(self.model.keys())}." + input_dict = { + "coord": coord, + "atype": atype, + "box": box, + "do_atomic_virial": do_atomic_virial, + "fparam": fparam, + "aparam": aparam, + } + has_spin = getattr(self.model[task_key], "has_spin", False) + if callable(has_spin): + has_spin = has_spin() + if has_spin: + input_dict["spin"] = spin + + if self.inference_only or inference_only: + model_pred = self.model[task_key](**input_dict) + return model_pred, None, None + else: + natoms = atype.shape[-1] + model_pred, loss, more_loss = self.loss[task_key]( + input_dict, + self.model[task_key], + label, + natoms=natoms, + learning_rate=cur_lr, + ) + return model_pred, loss, more_loss + + def load_state_dict( + self, + state_dict: _StateDict, + ) -> tuple[list[str], list[str]]: + self.set_extra_state(state_dict.pop("_extra_state")) + return super().set_state_dict(state_dict) + + def set_state_dict( + self, + state_dict: _StateDict, + ) -> tuple[list[str], list[str]]: + return self.load_state_dict(state_dict) + + def state_dict(self): + state_dict = super().state_dict() + extra_state = self.get_extra_state() + state_dict.update({"_extra_state": extra_state}) + return state_dict + + def set_extra_state(self, extra_state: dict): + self.model_params = extra_state["model_params"] + self.train_infos = extra_state["train_infos"] + return None + + def get_extra_state(self) -> dict: + extra_state = { + "model_params": self.model_params, + "train_infos": self.train_infos, + } + return extra_state diff --git a/deepmd/pd/utils/__init__.py b/deepmd/pd/utils/__init__.py new file mode 100644 index 0000000000..7e1043eda4 --- /dev/null +++ b/deepmd/pd/utils/__init__.py @@ -0,0 +1,11 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later + +from .exclude_mask import ( + AtomExcludeMask, + PairExcludeMask, +) + +__all__ = [ + "PairExcludeMask", + "AtomExcludeMask", +] diff --git a/deepmd/pd/utils/auto_batch_size.py b/deepmd/pd/utils/auto_batch_size.py new file mode 100644 index 0000000000..8cdb5ddea2 --- /dev/null +++ b/deepmd/pd/utils/auto_batch_size.py @@ -0,0 +1,60 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later + +import paddle + +from deepmd.utils.batch_size import AutoBatchSize as AutoBatchSizeBase + + +class AutoBatchSize(AutoBatchSizeBase): + """Auto batch size. + + Parameters + ---------- + initial_batch_size : int, default: 1024 + initial batch size (number of total atoms) when DP_INFER_BATCH_SIZE + is not set + factor : float, default: 2. + increased factor + + """ + + def __init__( + self, + initial_batch_size: int = 1024, + factor: float = 2.0, + ): + super().__init__( + initial_batch_size=initial_batch_size, + factor=factor, + ) + + def is_gpu_available(self) -> bool: + """Check if GPU is available. + + Returns + ------- + bool + True if GPU is available + """ + return paddle.device.cuda.device_count() > 0 + + def is_oom_error(self, e: Exception) -> bool: + """Check if the exception is an OOM error. + + Parameters + ---------- + e : Exception + Exception + """ + # several sources think CUSOLVER_STATUS_INTERNAL_ERROR is another out-of-memory error, + # such as https://github.com/JuliaGPU/CUDA.jl/issues/1924 + # (the meaningless error message should be considered as a bug in cusolver) + if isinstance(e, RuntimeError) and ( + "CUDA out of memory." in e.args[0] + or "CUDA driver error: out of memory" in e.args[0] + or "cusolver error: CUSOLVER_STATUS_INTERNAL_ERROR" in e.args[0] + ): + # Release all unoccupied cached memory + # paddle.device.cuda.empty_cache() + return True + return False diff --git a/deepmd/pd/utils/dataloader.py b/deepmd/pd/utils/dataloader.py new file mode 100644 index 0000000000..7a2bf4fe9c --- /dev/null +++ b/deepmd/pd/utils/dataloader.py @@ -0,0 +1,339 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import logging +import os +import queue +import time +from collections.abc import ( + Iterator, +) +from multiprocessing.dummy import ( + Pool, +) +from threading import ( + Thread, +) + +import h5py +import numpy as np +import paddle +import paddle.distributed as dist + +# import paddle.multiprocessing +from paddle.io import ( + BatchSampler, + DataLoader, + Dataset, + DistributedBatchSampler, + WeightedRandomSampler, +) +from paddle.io.dataloader.collate import ( + default_collate_fn, +) + +from deepmd.pd.utils import ( + env, +) +from deepmd.pd.utils.dataset import ( + DeepmdDataSetForLoader, +) +from deepmd.utils.data import ( + DataRequirementItem, +) +from deepmd.utils.data_system import ( + print_summary, + prob_sys_size_ext, + process_sys_probs, +) + +log = logging.getLogger(__name__) +# paddle.multiprocessing.set_sharing_strategy("file_system") + + +def setup_seed(seed): + paddle.seed(seed) + os.environ["FLAGS_cudnn_deterministic"] = "True" + + +class DpLoaderSet(Dataset): + """A dataset for storing DataLoaders to multiple Systems. + + Parameters + ---------- + sys_path + Path to the data system + batch_size + Max frame count in a batch. + type_map + Gives the name of different atom types + seed + Random seed for dataloader + shuffle + If the data are shuffled (Only effective in serial mode. Always shuffle in distributed data parallelism) + """ + + def __init__( + self, + systems, + batch_size, + type_map, + seed=None, + shuffle=True, + ): + if seed is not None: + setup_seed(seed) + if isinstance(systems, str): + with h5py.File(systems) as file: + systems = [os.path.join(systems, item) for item in file.keys()] + + self.systems: list[DeepmdDataSetForLoader] = [] + if len(systems) >= 100: + log.info(f"Constructing DataLoaders from {len(systems)} systems") + + def construct_dataset(system): + return DeepmdDataSetForLoader( + system=system, + type_map=type_map, + ) + + MAX_PROCESSES_NUM = 4 + processes = min( + os.cpu_count() + // ( + dist.get_world_size() + if dist.is_available() and dist.is_initialized() + else 1 + ), + MAX_PROCESSES_NUM, + ) + with Pool(processes) as pool: + self.systems = pool.map(construct_dataset, systems) + + self.sampler_list: list[DistributedBatchSampler] = [] + self.index = [] + self.total_batch = 0 + + self.dataloaders = [] + self.batch_sizes = [] + if isinstance(batch_size, str): + if batch_size == "auto": + rule = 32 + elif batch_size.startswith("auto:"): + rule = int(batch_size.split(":")[1]) + else: + rule = None + log.error("Unsupported batch size type") + for ii in self.systems: + ni = ii._natoms + bsi = rule // ni + if bsi * ni < rule: + bsi += 1 + self.batch_sizes.append(bsi) + elif isinstance(batch_size, list): + self.batch_sizes = batch_size + else: + self.batch_sizes = batch_size * np.ones(len(systems), dtype=int) + assert len(self.systems) == len(self.batch_sizes) + for system, batch_size in zip(self.systems, self.batch_sizes): + if dist.is_available() and dist.is_initialized(): + system_batch_sampler = DistributedBatchSampler( + system, + shuffle=( + (not (dist.is_available() and dist.is_initialized())) + and shuffle + ), + batch_size=int(batch_size), + ) + self.sampler_list.append(system_batch_sampler) + else: + system_batch_sampler = BatchSampler( + system, + shuffle=( + (not (dist.is_available() and dist.is_initialized())) + and shuffle + ), + batch_size=int(batch_size), + ) + self.sampler_list.append(system_batch_sampler) + system_dataloader = DataLoader( + dataset=system, + num_workers=0, # Should be 0 to avoid too many threads forked + batch_sampler=system_batch_sampler, + collate_fn=collate_batch, + use_buffer_reader=False, + places=["cpu"], + ) + self.dataloaders.append(system_dataloader) + self.index.append(len(system_dataloader)) + self.total_batch += len(system_dataloader) + + class LazyIter: + """Lazy iterator to prevent fetching data when iter(item).""" + + def __init__(self, item): + self.item = item + + def __iter__(self): + # directly return + return self + + def __next__(self): + if not isinstance(self.item, Iterator): + # make iterator here lazily + self.item = iter(self.item) + return next(self.item) + + self.iters = [] + for item in self.dataloaders: + self.iters.append(LazyIter(item)) + + def set_noise(self, noise_settings): + # noise_settings['noise_type'] # "trunc_normal", "normal", "uniform" + # noise_settings['noise'] # float, default 1.0 + # noise_settings['noise_mode'] # "prob", "fix_num" + # noise_settings['mask_num'] # if "fix_num", int + # noise_settings['mask_prob'] # if "prob", float + # noise_settings['same_mask'] # coord and type same mask? + for system in self.systems: + system.set_noise(noise_settings) + + def __len__(self): + return len(self.dataloaders) + + def __getitem__(self, idx): + # log.warning(str(paddle.distributed.get_rank())+" idx: "+str(idx)+" index: "+str(self.index[idx])) + try: + batch = next(self.iters[idx]) + except StopIteration: + self.iters[idx] = iter(self.dataloaders[idx]) + batch = next(self.iters[idx]) + batch["sid"] = idx + return batch + + def add_data_requirement(self, data_requirement: list[DataRequirementItem]): + """Add data requirement for each system in multiple systems.""" + for system in self.systems: + system.add_data_requirement(data_requirement) + + def print_summary( + self, + name: str, + prob: list[float], + ): + print_summary( + name, + len(self.systems), + [ss.system for ss in self.systems], + [ss._natoms for ss in self.systems], + self.batch_sizes, + [ + ss._data_system.get_sys_numb_batch(self.batch_sizes[ii]) + for ii, ss in enumerate(self.systems) + ], + prob, + [ss._data_system.pbc for ss in self.systems], + ) + + +_sentinel = object() +QUEUESIZE = 32 + + +class BackgroundConsumer(Thread): + def __init__(self, queue, source, max_len): + Thread.__init__(self) + self._queue = queue + self._source = source # Main DL iterator + self._max_len = max_len # + + def run(self): + for item in self._source: + self._queue.put(item) # Blocking if the queue is full + + # Signal the consumer we are done. + self._queue.put(_sentinel) + + +class BufferedIterator: + def __init__(self, iterable): + self._queue = queue.Queue(QUEUESIZE) + self._iterable = iterable + self._consumer = None + + self.start_time = time.time() + self.warning_time = None + self.total = len(iterable) + + def _create_consumer(self): + self._consumer = BackgroundConsumer(self._queue, self._iterable, self.total) + self._consumer.daemon = True + self._consumer.start() + + def __iter__(self): + return self + + def __len__(self): + return self.total + + def __next__(self): + # Create consumer if not created yet + if self._consumer is None: + self._create_consumer() + # Notify the user if there is a data loading bottleneck + if self._queue.qsize() < min(2, max(1, self._queue.maxsize // 2)): + if time.time() - self.start_time > 5 * 60: + if ( + self.warning_time is None + or time.time() - self.warning_time > 15 * 60 + ): + log.warning( + "Data loading buffer is empty or nearly empty. This may " + "indicate a data loading bottleneck, and increasing the " + "number of workers (--num-workers) may help." + ) + self.warning_time = time.time() + + # Get next example + item = self._queue.get() + if isinstance(item, Exception): + raise item + if item is _sentinel: + raise StopIteration + return item + + +def collate_batch(batch): + example = batch[0] + result = {} + for key in example.keys(): + if "find_" in key: + result[key] = batch[0][key] + else: + if batch[0][key] is None: + result[key] = None + elif key == "fid": + result[key] = [d[key] for d in batch] + elif key == "type": + continue + else: + result[key] = default_collate_fn([d[key] for d in batch]) + return result + + +def get_weighted_sampler(training_data, prob_style, sys_prob=False): + if sys_prob is False: + if prob_style == "prob_uniform": + prob_v = 1.0 / float(training_data.__len__()) + probs = [prob_v for ii in range(training_data.__len__())] + else: # prob_sys_size;A:B:p1;C:D:p2 or prob_sys_size = prob_sys_size;0:nsys:1.0 + if prob_style == "prob_sys_size": + style = f"prob_sys_size;0:{len(training_data)}:1.0" + else: + style = prob_style + probs = prob_sys_size_ext(style, len(training_data), training_data.index) + else: + probs = process_sys_probs(prob_style, training_data.index) + log.debug("Generated weighted sampler with prob array: " + str(probs)) + # training_data.total_batch is the size of one epoch, you can increase it to avoid too many rebuilding of iteraters + len_sampler = training_data.total_batch * max(env.NUM_WORKERS, 1) + sampler = WeightedRandomSampler(probs, len_sampler, replacement=True) + return sampler diff --git a/deepmd/pd/utils/dataset.py b/deepmd/pd/utils/dataset.py new file mode 100644 index 0000000000..1f0533d8fc --- /dev/null +++ b/deepmd/pd/utils/dataset.py @@ -0,0 +1,57 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later + + +from typing import ( + Optional, +) + +from paddle.io import ( + Dataset, +) + +from deepmd.utils.data import ( + DataRequirementItem, + DeepmdData, +) + + +class DeepmdDataSetForLoader(Dataset): + def __init__(self, system: str, type_map: Optional[list[str]] = None): + """Construct DeePMD-style dataset containing frames cross different systems. + + Args: + - systems: Paths to systems. + - type_map: Atom types. + """ + self.system = system + self._type_map = type_map + self._data_system = DeepmdData(sys_path=system, type_map=self._type_map) + self.mixed_type = self._data_system.mixed_type + self._ntypes = self._data_system.get_ntypes() + self._natoms = self._data_system.get_natoms() + self._natoms_vec = self._data_system.get_natoms_vec(self._ntypes) + + def __len__(self): + return self._data_system.nframes + + def __getitem__(self, index): + """Get a frame from the selected system.""" + b_data = self._data_system.get_item_paddle(index) + b_data["natoms"] = self._natoms_vec + return b_data + + def add_data_requirement(self, data_requirement: list[DataRequirementItem]): + """Add data requirement for this data system.""" + for data_item in data_requirement: + self._data_system.add( + data_item["key"], + data_item["ndof"], + atomic=data_item["atomic"], + must=data_item["must"], + high_prec=data_item["high_prec"], + type_sel=data_item["type_sel"], + repeat=data_item["repeat"], + default=data_item["default"], + dtype=data_item["dtype"], + output_natoms_for_type_sel=data_item["output_natoms_for_type_sel"], + ) diff --git a/deepmd/pd/utils/decomp.py b/deepmd/pd/utils/decomp.py new file mode 100644 index 0000000000..434301441a --- /dev/null +++ b/deepmd/pd/utils/decomp.py @@ -0,0 +1,247 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later + +# This file is used to implement some paddle functions with composite API, +# so as to support high-order differentation when double-backward is needed. +# For example: [norm] --decomposition--> [multiply, power, sum] +# This file will be removed when implmented functions are decomposed into primitive +# function in Paddle framework in the future. + +from __future__ import ( + annotations, +) + +import paddle + +__all__ = [ + "softmax", + "norm", + "take_along_axis", + "scatter_reduce", + "sec", + "masked_add_", +] + + +# decomposition for forward function +def softmax_decomp(x: paddle.Tensor, axis: int = -1) -> paddle.Tensor: + """Forward decompsition function of softmax. + + Parameters + ---------- + x : paddle.Tensor + Input. + axis : int, defaults: -1. + A dimension along which softmax will be computed. + + Returns + ------- + paddle.Tensor + Computed output. + """ + x_max = paddle.max(x, axis=axis, keepdim=True) + x = x - x_max + return paddle.exp(x) / paddle.sum(paddle.exp(x), axis=axis, keepdim=True) + + +def norm_decomp( + x: paddle.Tensor, p: float = 2, axis: bool = -1, keepdim: bool = False +) -> paddle.Tensor: + """Forward decompsition function of norm. + + Parameters + ---------- + x : paddle.Tensor + Input + p : float, default: 2 + Order of norm + axis : bool, default: -1 + Dimensions over which to compute the vector or matrix norm + keepdim : bool, default: False + If set to True, the reduced dimensions are retained in the result as dimensions + with size one + + Returns + ------- + paddle.Tensor + A real-valued tensor, even when A is complex. + """ + if p == 2 or p == 2.0: + # clip for negative indexing, or 1/(0^(k-1)) will cause inf in backward + return (x * x).sum(axis=axis, keepdim=keepdim) ** 0.5 + return (x.abs() ** p).sum(axis=axis, keepdim=keepdim) ** (1 / p) + + +def take_along_axis_decomp( + x: paddle.Tensor, indices: paddle.Tensor, axis: int, broadcast: bool = True +) -> paddle.Tensor: + """Forward decompsition function of take_along_axis. + + Parameters + ---------- + x : paddle.Tensor + The input tensor. + indices : paddle.Tensor + Indices to take along each 1d slice of array. + axis : int + The axis to take 1d slices along. + broadcast : bool, default: True + Whether the indices broadcast. + + Returns + ------- + paddle.Tensor + Computed output. + """ + # manually contruct indices for gather_nd(ind_gather_nd.ndim == indices.ndim + 1, + # the lsat 1 represents the number of dimension(s) of indices) + ind_gather_nd = paddle.stack( + paddle.meshgrid(*[paddle.arange(v) for v in indices.shape], indexing="ij"), + axis=-1, + ) + ind_gather_nd[..., axis] = indices + # compute output using constructed indices via gather_nd + out = paddle.gather_nd(x, ind_gather_nd) + return out + + +def scatter_reduce_decomp( + input: paddle.Tensor, + axis: int, + index: paddle.Tensor, + src: paddle.Tensor, + reduce: str, +) -> paddle.Tensor: + """Forward decompsition function of scatter_reduce. + + Parameters + ---------- + input : paddle.Tensor + Input tensor. + axis : int + The axis along which to index. + index : paddle.Tensor + The indices of elements to scatter and reduce. + src : paddle.Tensor + The source elements to scatter and reduce. + reduce : str + The reduction operation to apply for non-unique indices. + Supported modes: ("sum", "prod", "mean", "amax", "amin"). + + Returns + ------- + paddle.Tensor + Computed output. + """ + # reduce: "sum", "prod", "mean", "amax", "amin" + if reduce == "sum": + output = input.put_along_axis( + indices=index, values=src, axis=axis, reduce="add" + ) + elif reduce == "mean": + output = input.put_along_axis( + indices=index, values=src, axis=axis, reduce="mean" + ) + elif reduce == "prod": + output = input.put_along_axis( + indices=index, values=src, axis=axis, reduce="mul" + ) + else: + raise NotImplementedError("only support mode in ['sum', 'prod', 'mean']!") + return output + + +def sec(length: int, size: int) -> list[int]: + """Auxiliary function for decomposed functions. + + If length is not divisible by size, the last chunk will be smaller. + + Parameters + ---------- + length : int + Length to be chunked. + size : int + Chunk size. + + Returns + ------- + list[int] + Chunked output list. + """ + assert length > 0 + assert size > 0 + if length % size == 0: + return [size] * (length // size) + return [size] * (length // size) + [length % size] + + +def masked_add__decomp( + x: paddle.Tensor, mask: paddle.Tensor, v: paddle.Tensor +) -> paddle.Tensor: + """Forward decompsition function of masked_add_(inplace operator). + + Parameters + ---------- + x : paddle.Tensor + Input tensor. + mask : paddle.Tensor + Mask tensor. + v : paddle.Tensor + Value to add. + + Returns + ------- + paddle.Tensor + Computed output. + """ + assert mask.dtype == paddle.bool, f"mask must be bool type, but got {mask.dtype}" + # indices is bool mask + mask_coord = paddle.concat( + paddle.nonzero(mask, as_tuple=True), + axis=1, + ) # [nz, dim] + if not paddle.is_tensor(v): + v = paddle.full([mask_coord.shape[0]], v, dtype=x.dtype) + t = paddle.scatter_nd_add( + x, + mask_coord, + v, + ) + paddle.assign(t, x) # inplace update + return x + + +def normalize_decomp( + x: paddle.Tensor, + p: float = 2, + axis: int = 1, + epsilon: float = 1e-12, +) -> paddle.Tensor: + """Forward decompsition function of normalize. + + Parameters + ---------- + x : paddle.Tensor + Input tensor. + p : float, optional + Order of the norm, default: 2 + axis : int, optional + Axis on which to perform normalization, default: 1 + epsilon : float, optional + Epislon value, default: 1e-12 + + Returns + ------- + paddle.Tensor + Computed output. + """ + return paddle.nn.functional.normalize(x, p, axis, epsilon) + # return x / norm(x, p=p, axis=axis, keepdim=True) + + +# alias for decomposed functions for convinience +normalize = normalize_decomp +masked_add_ = masked_add__decomp +scatter_reduce = scatter_reduce_decomp +take_along_axis = take_along_axis_decomp +norm = norm_decomp +softmax = softmax_decomp diff --git a/deepmd/pd/utils/dp_random.py b/deepmd/pd/utils/dp_random.py new file mode 100644 index 0000000000..e81488c506 --- /dev/null +++ b/deepmd/pd/utils/dp_random.py @@ -0,0 +1,14 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +from deepmd.utils.random import ( + choice, + random, + seed, + shuffle, +) + +__all__ = [ + "choice", + "random", + "seed", + "shuffle", +] diff --git a/deepmd/pd/utils/env.py b/deepmd/pd/utils/env.py new file mode 100644 index 0000000000..4c104db374 --- /dev/null +++ b/deepmd/pd/utils/env.py @@ -0,0 +1,107 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import logging +import os + +import numpy as np +import paddle + +from deepmd.common import ( + VALID_PRECISION, +) +from deepmd.env import ( + GLOBAL_ENER_FLOAT_PRECISION, + GLOBAL_NP_FLOAT_PRECISION, + get_default_nthreads, + set_default_nthreads, +) + +SAMPLER_RECORD = os.environ.get("SAMPLER_RECORD", False) +try: + # only linux + ncpus = len(os.sched_getaffinity(0)) +except AttributeError: + ncpus = os.cpu_count() +NUM_WORKERS = int(os.environ.get("NUM_WORKERS", min(0, ncpus))) +# Make sure DDP uses correct device if applicable +LOCAL_RANK = paddle.distributed.get_rank() + +if os.environ.get("DEVICE") == "cpu" or paddle.device.cuda.device_count() <= 0: + DEVICE = "cpu" +else: + DEVICE = f"gpu:{LOCAL_RANK}" + +paddle.device.set_device(DEVICE) + +JIT = False +CACHE_PER_SYS = 5 # keep at most so many sets per sys in memory +ENERGY_BIAS_TRAINABLE = True + +PRECISION_DICT = { + "float16": paddle.float16, + "float32": paddle.float32, + "float64": paddle.float64, + "half": paddle.float16, + "single": paddle.float32, + "double": paddle.float64, + "int32": paddle.int32, + "int64": paddle.int64, + "bfloat16": paddle.bfloat16, + "bool": paddle.bool, +} +GLOBAL_PD_FLOAT_PRECISION = PRECISION_DICT[np.dtype(GLOBAL_NP_FLOAT_PRECISION).name] +GLOBAL_PD_ENER_FLOAT_PRECISION = PRECISION_DICT[ + np.dtype(GLOBAL_ENER_FLOAT_PRECISION).name +] +PRECISION_DICT["default"] = GLOBAL_PD_FLOAT_PRECISION +assert VALID_PRECISION.issubset(PRECISION_DICT.keys()) +# cannot automatically generated +RESERVED_PRECISON_DICT = { + paddle.float16: "float16", + paddle.float32: "float32", + paddle.float64: "float64", + paddle.int32: "int32", + paddle.int64: "int64", + paddle.bfloat16: "bfloat16", + paddle.bool: "bool", +} +assert set(PRECISION_DICT.values()) == set(RESERVED_PRECISON_DICT.keys()) +DEFAULT_PRECISION = "float64" + +# throw warnings if threads not set +set_default_nthreads() +inter_nthreads, intra_nthreads = get_default_nthreads() +# if inter_nthreads > 0: # the behavior of 0 is not documented +# os.environ['OMP_NUM_THREADS'] = str(inter_nthreads) +# if intra_nthreads > 0: +# os.environ['CPU_NUM'] = str(intra_nthreads) + + +def enable_prim(enable: bool = True): + """Enable running program in primitive C++ API in eager/static mode.""" + from paddle.framework import ( + core, + ) + + core.set_prim_eager_enabled(enable) + core._set_prim_all_enabled(enable) + log = logging.getLogger(__name__) + log.info(f"{'Enable' if enable else 'Disable'} prim in eager and static mode.") + + +__all__ = [ + "GLOBAL_ENER_FLOAT_PRECISION", + "GLOBAL_NP_FLOAT_PRECISION", + "GLOBAL_PD_FLOAT_PRECISION", + "GLOBAL_PD_ENER_FLOAT_PRECISION", + "DEFAULT_PRECISION", + "PRECISION_DICT", + "RESERVED_PRECISON_DICT", + "SAMPLER_RECORD", + "NUM_WORKERS", + "DEVICE", + "JIT", + "CACHE_PER_SYS", + "ENERGY_BIAS_TRAINABLE", + "LOCAL_RANK", + "enable_prim", +] diff --git a/deepmd/pd/utils/env_mat_stat.py b/deepmd/pd/utils/env_mat_stat.py new file mode 100644 index 0000000000..a37a9672f9 --- /dev/null +++ b/deepmd/pd/utils/env_mat_stat.py @@ -0,0 +1,235 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +from collections.abc import ( + Iterator, +) +from typing import ( + TYPE_CHECKING, + Union, +) + +import numpy as np +import paddle + +from deepmd.common import ( + get_hash, +) +from deepmd.pd.model.descriptor.env_mat import ( + prod_env_mat, +) +from deepmd.pd.utils import ( + env, +) +from deepmd.pd.utils.exclude_mask import ( + PairExcludeMask, +) +from deepmd.pd.utils.nlist import ( + extend_input_and_build_neighbor_list, +) +from deepmd.utils.env_mat_stat import EnvMatStat as BaseEnvMatStat +from deepmd.utils.env_mat_stat import ( + StatItem, +) + +if TYPE_CHECKING: + from deepmd.pd.model.descriptor import ( + DescriptorBlock, + ) + + +class EnvMatStat(BaseEnvMatStat): + def compute_stat(self, env_mat: dict[str, paddle.Tensor]) -> dict[str, StatItem]: + """Compute the statistics of the environment matrix for a single system. + + Parameters + ---------- + env_mat : paddle.Tensor + The environment matrix. + + Returns + ------- + dict[str, StatItem] + The statistics of the environment matrix. + """ + stats = {} + for kk, vv in env_mat.items(): + stats[kk] = StatItem( + number=vv.numel().item(), + sum=vv.sum().item() if vv.numel().item() != 0 else paddle.zeros([]), + squared_sum=paddle.square(vv).sum().item() + if vv.numel().item() != 0 + else paddle.zeros([]), + ) + return stats + + +class EnvMatStatSe(EnvMatStat): + """Environmental matrix statistics for the se_a/se_r environmental matrix. + + Parameters + ---------- + descriptor : DescriptorBlock + The descriptor of the model. + """ + + def __init__(self, descriptor: "DescriptorBlock"): + super().__init__() + self.descriptor = descriptor + self.last_dim = ( + self.descriptor.ndescrpt // self.descriptor.nnei + ) # se_r=1, se_a=4 + + def iter( + self, data: list[dict[str, Union[paddle.Tensor, list[tuple[int, int]]]]] + ) -> Iterator[dict[str, StatItem]]: + """Get the iterator of the environment matrix. + + Parameters + ---------- + data : list[dict[str, Union[paddle.Tensor, list[tuple[int, int]]]]] + The data. + + Yields + ------ + dict[str, StatItem] + The statistics of the environment matrix. + """ + zero_mean = paddle.zeros( + [self.descriptor.get_ntypes(), self.descriptor.get_nsel(), self.last_dim], + dtype=env.GLOBAL_PD_FLOAT_PRECISION, + ).to(env.DEVICE) + one_stddev = paddle.ones( + [self.descriptor.get_ntypes(), self.descriptor.get_nsel(), self.last_dim], + dtype=env.GLOBAL_PD_FLOAT_PRECISION, + ).to(env.DEVICE) + if self.last_dim == 4: + radial_only = False + elif self.last_dim == 1: + radial_only = True + else: + raise ValueError( + "last_dim should be 1 for raial-only or 4 for full descriptor." + ) + for system in data: + coord, atype, box, natoms = ( + system["coord"], + system["atype"], + system["box"], + system["natoms"], + ) + ( + extended_coord, + extended_atype, + mapping, + nlist, + ) = extend_input_and_build_neighbor_list( + coord, + atype, + self.descriptor.get_rcut(), + self.descriptor.get_sel(), + mixed_types=self.descriptor.mixed_types(), + box=box, + ) + env_mat, _, _ = prod_env_mat( + extended_coord, + nlist, + atype, + zero_mean, + one_stddev, + self.descriptor.get_rcut(), + self.descriptor.get_rcut_smth(), + radial_only, + protection=self.descriptor.get_env_protection(), + ) + # apply excluded_types + exclude_mask = self.descriptor.emask(nlist, extended_atype) + env_mat *= exclude_mask.unsqueeze(-1).astype(env_mat.dtype) + # reshape to nframes * nloc at the atom level, + # so nframes/mixed_type do not matter + env_mat = env_mat.reshape( + [ + coord.shape[0] * coord.shape[1], + self.descriptor.get_nsel(), + self.last_dim, + ] + ) + atype = atype.reshape([coord.shape[0] * coord.shape[1]]) + # (1, nloc) eq (ntypes, 1), so broadcast is possible + # shape: (ntypes, nloc) + type_idx = paddle.equal( + atype.reshape([1, -1]), + paddle.arange(self.descriptor.get_ntypes(), dtype=atype.dtype) + .to(device=env.DEVICE) + .reshape([-1, 1]), + ) + if "pair_exclude_types" in system: + # shape: (1, nloc, nnei) + exclude_mask = PairExcludeMask( + self.descriptor.get_ntypes(), system["pair_exclude_types"] + )(nlist, extended_atype).reshape( + [1, coord.shape[0] * coord.shape[1], -1] + ) + # shape: (ntypes, nloc, nnei) + type_idx = paddle.logical_and(type_idx.unsqueeze(-1), exclude_mask) + for type_i in range(self.descriptor.get_ntypes()): + dd = env_mat[type_idx[type_i]] + dd = dd.reshape([-1, self.last_dim]) # typen_atoms * unmasked_nnei, 4 + env_mats = {} + env_mats[f"r_{type_i}"] = dd[:, :1] + if self.last_dim == 4: + env_mats[f"a_{type_i}"] = dd[:, 1:] + yield self.compute_stat(env_mats) + + def get_hash(self) -> str: + """Get the hash of the environment matrix. + + Returns + ------- + str + The hash of the environment matrix. + """ + dscpt_type = "se_a" if self.last_dim == 4 else "se_r" + return get_hash( + { + "type": dscpt_type, + "ntypes": self.descriptor.get_ntypes(), + "rcut": round(self.descriptor.get_rcut(), 2), + "rcut_smth": round(self.descriptor.rcut_smth, 2), + "nsel": self.descriptor.get_nsel(), + "sel": self.descriptor.get_sel(), + "mixed_types": self.descriptor.mixed_types(), + } + ) + + def __call__(self): + avgs = self.get_avg() + stds = self.get_std() + + all_davg = [] + all_dstd = [] + + for type_i in range(self.descriptor.get_ntypes()): + if self.last_dim == 4: + davgunit = [[avgs[f"r_{type_i}"], 0, 0, 0]] + dstdunit = [ + [ + stds[f"r_{type_i}"], + stds[f"a_{type_i}"], + stds[f"a_{type_i}"], + stds[f"a_{type_i}"], + ] + ] + elif self.last_dim == 1: + davgunit = [[avgs[f"r_{type_i}"]]] + dstdunit = [ + [ + stds[f"r_{type_i}"], + ] + ] + davg = np.tile(davgunit, [self.descriptor.get_nsel(), 1]) + dstd = np.tile(dstdunit, [self.descriptor.get_nsel(), 1]) + all_davg.append(davg) + all_dstd.append(dstd) + + mean = np.stack(all_davg) + stddev = np.stack(all_dstd) + return mean, stddev diff --git a/deepmd/pd/utils/exclude_mask.py b/deepmd/pd/utils/exclude_mask.py new file mode 100644 index 0000000000..088ac186a8 --- /dev/null +++ b/deepmd/pd/utils/exclude_mask.py @@ -0,0 +1,164 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later + +import numpy as np +import paddle + +from deepmd.pd.utils import ( + decomp, +) +from deepmd.pd.utils.utils import ( + to_paddle_tensor, +) + + +class AtomExcludeMask(paddle.nn.Layer): + """Computes the type exclusion mask for atoms.""" + + def __init__( + self, + ntypes: int, + exclude_types: list[int] = [], + ): + super().__init__() + self.reinit(ntypes, exclude_types) + + def reinit( + self, + ntypes: int, + exclude_types: list[int] = [], + ): + self.ntypes = ntypes + self.exclude_types = exclude_types + self.type_mask = np.array( + [1 if tt_i not in self.exclude_types else 0 for tt_i in range(ntypes)], + dtype=np.int32, + ) + self.type_mask = to_paddle_tensor(self.type_mask).reshape([-1]) + + def get_exclude_types(self): + return self.exclude_types + + def get_type_mask(self): + return self.type_mask + + def forward( + self, + atype: paddle.Tensor, + ) -> paddle.Tensor: + """Compute type exclusion mask for atoms. + + Parameters + ---------- + atype + The extended atom types. shape: nf x natom + + Returns + ------- + mask + The type exclusion mask for atoms. shape: nf x natom + Element [ff,ii] being 0 if type(ii) is excluded, + otherwise being 1. + + """ + nf, natom = atype.shape + return self.type_mask[atype].reshape([nf, natom]).to(atype.place) + + +class PairExcludeMask(paddle.nn.Layer): + """Computes the type exclusion mask for atom pairs.""" + + def __init__( + self, + ntypes: int, + exclude_types: list[tuple[int, int]] = [], + ): + super().__init__() + self.reinit(ntypes, exclude_types) + + def reinit( + self, + ntypes: int, + exclude_types: list[tuple[int, int]] = [], + ): + self.ntypes = ntypes + self._exclude_types: set[tuple[int, int]] = set() + for tt in exclude_types: + assert len(tt) == 2 + self._exclude_types.add((tt[0], tt[1])) + self._exclude_types.add((tt[1], tt[0])) + # ntypes + 1 for nlist masks + self.type_mask = np.array( + [ + [ + 1 if (tt_i, tt_j) not in self._exclude_types else 0 + for tt_i in range(ntypes + 1) + ] + for tt_j in range(ntypes + 1) + ], + dtype=np.int32, + ) + # (ntypes+1 x ntypes+1) + self.type_mask = to_paddle_tensor(self.type_mask).reshape([-1]) + self.no_exclusion = len(self._exclude_types) == 0 + + def get_exclude_types(self): + return self._exclude_types + + # may have a better place for this method... + def forward( + self, + nlist: paddle.Tensor, + atype_ext: paddle.Tensor, + ) -> paddle.Tensor: + """Compute type exclusion mask. + + Parameters + ---------- + nlist + The neighbor list. shape: nf x nloc x nnei + atype_ext + The extended aotm types. shape: nf x nall + + Returns + ------- + mask + The type exclusion mask of shape: nf x nloc x nnei. + Element [ff,ii,jj] being 0 if type(ii), type(nlist[ff,ii,jj]) is excluded, + otherwise being 1. + + """ + if self.no_exclusion: + # safely return 1 if nothing is excluded. + return paddle.ones_like(nlist, dtype=paddle.int32).to(device=nlist.place) + nf, nloc, nnei = nlist.shape + nall = atype_ext.shape[1] + # add virtual atom of type ntypes. nf x nall+1 + ae = paddle.concat( + [ + atype_ext, + self.ntypes + * paddle.ones([nf, 1], dtype=atype_ext.dtype).to( + device=atype_ext.place + ), + ], + axis=-1, + ) + type_i = atype_ext[:, :nloc].reshape([nf, nloc]) * (self.ntypes + 1) + # nf x nloc x nnei + index = paddle.where(nlist == -1, nall, nlist).reshape([nf, nloc * nnei]) + # type_j = paddle.take_along_axis(ae, axis=1, indices=index).reshape( + # [nf, nloc, nnei] + # ) + type_j = decomp.take_along_axis(ae, axis=1, indices=index).reshape( + [nf, nloc, nnei] + ) + type_ij = type_i[:, :, None] + type_j + # nf x (nloc x nnei) + type_ij = type_ij.reshape([nf, nloc * nnei]) + mask = ( + self.type_mask[type_ij] + .reshape([nf, nloc, nnei]) + .to(atype_ext.place) + .astype("bool") + ) + return mask diff --git a/deepmd/pd/utils/finetune.py b/deepmd/pd/utils/finetune.py new file mode 100644 index 0000000000..edac72d9c9 --- /dev/null +++ b/deepmd/pd/utils/finetune.py @@ -0,0 +1,200 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import logging +from copy import ( + deepcopy, +) + +import paddle + +from deepmd.utils.finetune import ( + FinetuneRuleItem, +) + +log = logging.getLogger(__name__) + + +def get_finetune_rule_single( + _single_param_target, + _model_param_pretrained, + from_multitask=False, + model_branch="Default", + model_branch_from="", + change_model_params=False, +): + single_config = deepcopy(_single_param_target) + new_fitting = False + model_branch_chosen = "Default" + + if not from_multitask: + single_config_chosen = deepcopy(_model_param_pretrained) + if model_branch_from == "RANDOM": + # not ["", "RANDOM"], because single-from-single finetune uses pretrained fitting in default + new_fitting = True + else: + model_dict_params = _model_param_pretrained["model_dict"] + if model_branch_from in ["", "RANDOM"]: + model_branch_chosen = next(iter(model_dict_params.keys())) + new_fitting = True + log.warning( + "The fitting net will be re-init instead of using that in the pretrained model! " + "The bias_adjust_mode will be set-by-statistic!" + ) + else: + model_branch_chosen = model_branch_from + assert model_branch_chosen in model_dict_params, ( + f"No model branch named '{model_branch_chosen}'! " + f"Available ones are {list(model_dict_params.keys())}." + ) + single_config_chosen = deepcopy(model_dict_params[model_branch_chosen]) + old_type_map, new_type_map = ( + single_config_chosen["type_map"], + single_config["type_map"], + ) + finetune_rule = FinetuneRuleItem( + p_type_map=old_type_map, + type_map=new_type_map, + model_branch=model_branch_chosen, + random_fitting=new_fitting, + ) + if change_model_params: + trainable_param = { + "descriptor": single_config.get("descriptor", {}).get("trainable", True), + "fitting_net": single_config.get("fitting_net", {}).get("trainable", True), + } + single_config["descriptor"] = single_config_chosen["descriptor"] + if not new_fitting: + single_config["fitting_net"] = single_config_chosen["fitting_net"] + log.info( + f"Change the '{model_branch}' model configurations according to the model branch " + f"'{model_branch_chosen}' in the pretrained one..." + ) + for net_type in trainable_param: + if net_type in single_config: + single_config[net_type]["trainable"] = trainable_param[net_type] + else: + single_config[net_type] = {"trainable": trainable_param[net_type]} + return single_config, finetune_rule + + +def get_finetune_rules( + finetune_model, model_config, model_branch="", change_model_params=True +): + """ + Get fine-tuning rules and (optionally) change the model_params according to the pretrained one. + + This function gets the fine-tuning rules and (optionally) changes input in different modes as follows: + 1. Single-task fine-tuning from a single-task pretrained model: + - The model will be fine-tuned based on the pretrained model. + - (Optional) Updates the model parameters based on the pretrained model. + 2. Single-task fine-tuning from a multi-task pretrained model: + - The model will be fine-tuned based on the selected branch in the pretrained model. + The chosen branch can be defined from the command-line or `finetune_head` input parameter. + If not defined, model parameters in the fitting network will be randomly initialized. + - (Optional) Updates the model parameters based on the selected branch in the pretrained model. + 3. Multi-task fine-tuning from a single-task pretrained model: + - The model in each branch will be fine-tuned or resumed based on the single branch ('Default') in the pretrained model. + The chosen branches can be defined from the `finetune_head` input parameter of each branch. + - If `finetune_head` is defined as 'Default', + it will be fine-tuned based on the single branch ('Default') in the pretrained model. + - If `finetune_head` is not defined and the model_key is 'Default', + it will resume from the single branch ('Default') in the pretrained model without fine-tuning. + - If `finetune_head` is not defined and the model_key is not 'Default', + it will be fine-tuned based on the single branch ('Default') in the pretrained model, + while model parameters in the fitting network of the branch will be randomly initialized. + - (Optional) Updates model parameters in each branch based on the single branch ('Default') in the pretrained model. + 4. Multi-task fine-tuning from a multi-task pretrained model: + - The model in each branch will be fine-tuned or resumed based on the chosen branches in the pretrained model. + The chosen branches can be defined from the `finetune_head` input parameter of each branch. + - If `finetune_head` is defined as one of the branches in the pretrained model, + it will be fine-tuned based on the chosen branch in the pretrained model. + - If `finetune_head` is not defined and the model_key is the same as one of those in the pretrained model, + it will resume from the model_key branch in the pretrained model without fine-tuning. + - If `finetune_head` is not defined and a new model_key is used, + it will be fine-tuned based on the chosen branch in the pretrained model, + while model parameters in the fitting network of the branch will be randomly initialized. + - (Optional) Updates model parameters in each branch based on the chosen branches in the pretrained model. + + Parameters + ---------- + finetune_model + The pretrained model. + model_config + The fine-tuning input parameters. + model_branch + The model branch chosen in command-line mode, only for single-task fine-tuning. + change_model_params + Whether to change the model parameters according to the pretrained one. + + Returns + ------- + model_config: + Updated model parameters. + finetune_links: + Fine-tuning rules in a dict format, with `model_branch`: FinetuneRuleItem pairs. + """ + multi_task = "model_dict" in model_config + state_dict = paddle.load(finetune_model) + if "model" in state_dict: + state_dict = state_dict["model"] + last_model_params = state_dict["_extra_state"]["model_params"] + finetune_from_multi_task = "model_dict" in last_model_params + finetune_links = {} + if not multi_task: + # use command-line first + if model_branch == "" and "finetune_head" in model_config: + model_branch = model_config["finetune_head"] + model_config, finetune_rule = get_finetune_rule_single( + model_config, + last_model_params, + from_multitask=finetune_from_multi_task, + model_branch="Default", + model_branch_from=model_branch, + change_model_params=change_model_params, + ) + finetune_links["Default"] = finetune_rule + else: + assert model_branch == "", ( + "Multi-task fine-tuning does not support command-line branches chosen!" + "Please define the 'finetune_head' in each model params!" + ) + target_keys = model_config["model_dict"].keys() + if not finetune_from_multi_task: + pretrained_keys = ["Default"] + else: + pretrained_keys = last_model_params["model_dict"].keys() + for model_key in target_keys: + resuming = False + if ( + "finetune_head" in model_config["model_dict"][model_key] + and model_config["model_dict"][model_key]["finetune_head"] != "RANDOM" + ): + pretrained_key = model_config["model_dict"][model_key]["finetune_head"] + assert pretrained_key in pretrained_keys, ( + f"'{pretrained_key}' head chosen to finetune not exist in the pretrained model!" + f"Available heads are: {list(pretrained_keys)}" + ) + model_branch_from = pretrained_key + elif ( + "finetune_head" not in model_config["model_dict"][model_key] + and model_key in pretrained_keys + ): + # not do anything if not defined "finetune_head" in heads that exist in the pretrained model + # this will just do resuming + model_branch_from = model_key + resuming = True + else: + # if not defined "finetune_head" in new heads or "finetune_head" is "RANDOM", the fitting net will bre randomly initialized + model_branch_from = "RANDOM" + model_config["model_dict"][model_key], finetune_rule = ( + get_finetune_rule_single( + model_config["model_dict"][model_key], + last_model_params, + from_multitask=finetune_from_multi_task, + model_branch=model_key, + model_branch_from=model_branch_from, + change_model_params=change_model_params, + ) + ) + finetune_links[model_key] = finetune_rule + finetune_links[model_key].resuming = resuming + return model_config, finetune_links diff --git a/deepmd/pd/utils/multi_task.py b/deepmd/pd/utils/multi_task.py new file mode 100644 index 0000000000..680dc53c79 --- /dev/null +++ b/deepmd/pd/utils/multi_task.py @@ -0,0 +1,162 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +from copy import ( + deepcopy, +) + +from deepmd.pd.model.descriptor import ( + BaseDescriptor, +) +from deepmd.pd.model.task import ( + BaseFitting, +) + + +def preprocess_shared_params(model_config): + """Preprocess the model params for multitask model, and generate the links dict for further sharing. + + Args: + model_config: Model params of multitask model. + + Returns + ------- + model_config: Preprocessed model params of multitask model. + Those string names are replaced with real params in `shared_dict` of model params. + shared_links: Dict of link infos for further sharing. + Each item, whose key must be in `shared_dict`, is a dict with following keys: + - "type": The real class type of this item. + - "links": List of shared settings, each sub-item is a dict with following keys: + - "model_key": Model key in the `model_dict` to share this item. + - "shared_type": Type of this shard item. + - "shared_level": Shared level (int) of this item in this model. + Lower for more params to share, 0 means to share all params in this item. + This list are sorted by "shared_level". + For example, if one has `model_config` like this: + "model": { + "shared_dict": { + "my_type_map": ["foo", "bar"], + "my_des1": { + "type": "se_e2_a", + "neuron": [10, 20, 40] + }, + }, + "model_dict": { + "model_1": { + "type_map": "my_type_map", + "descriptor": "my_des1", + "fitting_net": { + "neuron": [100, 100, 100] + } + }, + "model_2": { + "type_map": "my_type_map", + "descriptor": "my_des1", + "fitting_net": { + "neuron": [100, 100, 100] + } + } + "model_3": { + "type_map": "my_type_map", + "descriptor": "my_des1:1", + "fitting_net": { + "neuron": [100, 100, 100] + } + } + } + } + The above config will init three model branches named `model_1` and `model_2` and `model_3`, + in which: + - `model_2` and `model_3` will have the same `type_map` as that in `model_1`. + - `model_2` will share all the parameters of `descriptor` with `model_1`, + while `model_3` will share part of parameters of `descriptor` with `model_1` + on human-defined share-level `1` (default is `0`, meaning share all the parameters). + - `model_1`, `model_2` and `model_3` have three different `fitting_net`s. + The returned `model_config` will automatically fulfill the input `model_config` as if there's no sharing, + and the `shared_links` will keep all the sharing information with looking: + { + 'my_des1': { + 'type': 'DescrptSeA', + 'links': [ + {'model_key': 'model_1', + 'shared_type': 'descriptor', + 'shared_level': 0}, + {'model_key': 'model_2', + 'shared_type': 'descriptor', + 'shared_level': 0}, + {'model_key': 'model_3', + 'shared_type': 'descriptor', + 'shared_level': 1} + ] + } + } + + """ + assert "model_dict" in model_config, "only multi-task model can use this method!" + supported_types = ["type_map", "descriptor", "fitting_net"] + shared_dict = model_config.get("shared_dict", {}) + shared_links = {} + type_map_keys = [] + + def replace_one_item(params_dict, key_type, key_in_dict, suffix="", index=None): + shared_type = key_type + shared_key = key_in_dict + shared_level = 0 + if ":" in key_in_dict: + shared_key = key_in_dict.split(":")[0] + shared_level = int(key_in_dict.split(":")[1]) + assert ( + shared_key in shared_dict + ), f"Appointed {shared_type} {shared_key} are not in the shared_dict! Please check the input params." + if index is None: + params_dict[shared_type] = deepcopy(shared_dict[shared_key]) + else: + params_dict[index] = deepcopy(shared_dict[shared_key]) + if shared_type == "type_map": + if key_in_dict not in type_map_keys: + type_map_keys.append(key_in_dict) + else: + if shared_key not in shared_links: + class_name = get_class_name(shared_type, shared_dict[shared_key]) + shared_links[shared_key] = {"type": class_name, "links": []} + link_item = { + "model_key": model_key, + "shared_type": shared_type + suffix, + "shared_level": shared_level, + } + shared_links[shared_key]["links"].append(link_item) + + for model_key in model_config["model_dict"]: + model_params_item = model_config["model_dict"][model_key] + for item_key in model_params_item: + if item_key in supported_types: + item_params = model_params_item[item_key] + if isinstance(item_params, str): + replace_one_item(model_params_item, item_key, item_params) + elif item_params.get("type", "") == "hybrid": + for ii, hybrid_item in enumerate(item_params["list"]): + if isinstance(hybrid_item, str): + replace_one_item( + model_params_item[item_key]["list"], + item_key, + hybrid_item, + suffix=f"_hybrid_{ii}", + index=ii, + ) + for shared_key in shared_links: + shared_links[shared_key]["links"] = sorted( + shared_links[shared_key]["links"], + key=lambda x: x["shared_level"] + - ("spin" in model_config["model_dict"][x["model_key"]]) * 100, + ) + # little trick to make spin models in the front to be the base models, + # because its type embeddings are more general. + assert len(type_map_keys) == 1, "Multitask model must have only one type_map!" + return model_config, shared_links + + +def get_class_name(item_key, item_params): + if item_key == "descriptor": + return BaseDescriptor.get_class_by_type(item_params.get("type", "se_e2_a")) + elif item_key == "fitting_net": + return BaseFitting.get_class_by_type(item_params.get("type", "ener")) + else: + raise RuntimeError(f"Unknown class_name type {item_key}") diff --git a/deepmd/pd/utils/neighbor_stat.py b/deepmd/pd/utils/neighbor_stat.py new file mode 100644 index 0000000000..af39161e98 --- /dev/null +++ b/deepmd/pd/utils/neighbor_stat.py @@ -0,0 +1,197 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +from collections.abc import ( + Iterator, +) +from typing import ( + Optional, +) + +import numpy as np +import paddle + +from deepmd.pd.utils.auto_batch_size import ( + AutoBatchSize, +) +from deepmd.pd.utils.env import ( + DEVICE, +) +from deepmd.pd.utils.nlist import ( + extend_coord_with_ghosts, +) +from deepmd.utils.data_system import ( + DeepmdDataSystem, +) +from deepmd.utils.neighbor_stat import NeighborStat as BaseNeighborStat + + +class NeighborStatOP(paddle.nn.Layer): + """Class for getting neighbor statistics data information. + + Parameters + ---------- + ntypes + The num of atom types + rcut + The cut-off radius + mixed_types : bool, optional + If True, treat neighbors of all types as a single type. + """ + + def __init__( + self, + ntypes: int, + rcut: float, + mixed_types: bool, + ) -> None: + super().__init__() + self.rcut = float(rcut) + self.ntypes = ntypes + self.mixed_types = mixed_types + + def forward( + self, + coord: paddle.Tensor, + atype: paddle.Tensor, + cell: Optional[paddle.Tensor], + ) -> tuple[paddle.Tensor, paddle.Tensor]: + """Calculate the neareest neighbor distance between atoms, maximum nbor size of + atoms and the output data range of the environment matrix. + + Parameters + ---------- + coord + The coordinates of atoms. + atype + The atom types. + cell + The cell. + + Returns + ------- + paddle.Tensor + The minimal squared distance between two atoms, in the shape of (nframes,) + paddle.Tensor + The maximal number of neighbors + """ + nframes = coord.shape[0] + coord = coord.reshape([nframes, -1, 3]) + nloc = coord.shape[1] + coord = coord.reshape([nframes, nloc * 3]) + extend_coord, extend_atype, _ = extend_coord_with_ghosts( + coord, atype, cell, self.rcut + ) + + coord1 = extend_coord.reshape([nframes, -1]) + nall = coord1.shape[1] // 3 + coord0 = coord1[:, : nloc * 3] + diff: paddle.Tensor = coord1.reshape([nframes, -1, 3]).unsqueeze( + 1 + ) - coord0.reshape([nframes, -1, 3]).unsqueeze(2) + assert list(diff.shape) == [nframes, nloc, nall, 3] + # remove the diagonal elements + mask = paddle.eye(nloc, nall).to(dtype=paddle.bool, device=diff.place) + # diff[:, mask] = float("inf") + # diff.masked_fill_( + # paddle.broadcast_to(mask.unsqueeze([0, -1]), diff.shape), + # paddle.to_tensor(float("inf")), + # ) + diff[paddle.broadcast_to(mask.unsqueeze([0, -1]), diff.shape)] = float("inf") + rr2 = paddle.sum(paddle.square(diff), axis=-1) + min_rr2 = paddle.min(rr2, axis=-1) + # count the number of neighbors + if not self.mixed_types: + mask = rr2 < self.rcut**2 + nnei = paddle.zeros((nframes, nloc, self.ntypes), dtype=paddle.int64) + for ii in range(self.ntypes): + nnei[:, :, ii] = paddle.sum( + mask & ((extend_atype == ii)[:, None, :]), axis=-1 + ) + else: + mask = rr2 < self.rcut**2 + # virtual types (<0) are not counted + nnei = paddle.sum( + mask & ((extend_atype >= 0).unsqueeze(1)), axis=-1 + ).reshape([nframes, nloc, 1]) + max_nnei = paddle.max(nnei, axis=1) + return min_rr2, max_nnei + + +class NeighborStat(BaseNeighborStat): + """Neighbor statistics using pure NumPy. + + Parameters + ---------- + ntypes : int + The num of atom types + rcut : float + The cut-off radius + mixed_type : bool, optional, default=False + Treat all types as a single type. + """ + + def __init__( + self, + ntypes: int, + rcut: float, + mixed_type: bool = False, + ) -> None: + super().__init__(ntypes, rcut, mixed_type) + op = NeighborStatOP(ntypes, rcut, mixed_type) + # self.op = paddle.jit.to_static(op) + self.op = op + self.auto_batch_size = AutoBatchSize() + + def iterator( + self, data: DeepmdDataSystem + ) -> Iterator[tuple[np.ndarray, float, str]]: + """Abstract method for producing data. + + Yields + ------ + np.ndarray + The maximal number of neighbors + float + The squared minimal distance between two atoms + str + The directory of the data system + """ + for ii in range(len(data.system_dirs)): + for jj in data.data_systems[ii].dirs: + data_set = data.data_systems[ii] + data_set_data = data_set._load_set(jj) + minrr2, max_nnei = self.auto_batch_size.execute_all( + self._execute, + data_set_data["coord"].shape[0], + data_set.get_natoms(), + data_set_data["coord"], + data_set_data["type"], + data_set_data["box"] if data_set.pbc else None, + ) + yield np.max(max_nnei, axis=0), np.min(minrr2), jj + + def _execute( + self, + coord: np.ndarray, + atype: np.ndarray, + cell: Optional[np.ndarray], + ): + """Execute the operation. + + Parameters + ---------- + coord + The coordinates of atoms. + atype + The atom types. + cell + The cell. + """ + with paddle.no_grad(): + minrr2, max_nnei = self.op( + paddle.to_tensor(coord, place=DEVICE), + paddle.to_tensor(atype, place=DEVICE), + paddle.to_tensor(cell, place=DEVICE) if cell is not None else None, + ) + minrr2 = minrr2.numpy() + max_nnei = max_nnei.numpy() + return minrr2, max_nnei diff --git a/deepmd/pd/utils/nlist.py b/deepmd/pd/utils/nlist.py new file mode 100644 index 0000000000..44924ce07d --- /dev/null +++ b/deepmd/pd/utils/nlist.py @@ -0,0 +1,535 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +from typing import ( + Optional, + Union, +) + +import paddle + +from deepmd.pd.utils import ( + decomp, + env, +) +from deepmd.pd.utils.region import ( + normalize_coord, + to_face_distance, +) + + +def extend_input_and_build_neighbor_list( + coord, + atype, + rcut: float, + sel: list[int], + mixed_types: bool = False, + box: Optional[paddle.Tensor] = None, +): + nframes, nloc = atype.shape[:2] + if box is not None: + box_gpu = box.to(coord.place) + coord_normalized = normalize_coord( + coord.reshape([nframes, nloc, 3]), + box_gpu.reshape([nframes, 3, 3]), + ) + else: + box_gpu = None + coord_normalized = coord.clone() + extended_coord, extended_atype, mapping = extend_coord_with_ghosts( + coord_normalized, atype, box_gpu, rcut, box + ) + nlist = build_neighbor_list( + extended_coord, + extended_atype, + nloc, + rcut, + sel, + distinguish_types=(not mixed_types), + ) + extended_coord = extended_coord.reshape([nframes, -1, 3]) + return extended_coord, extended_atype, mapping, nlist + + +def build_neighbor_list( + coord: paddle.Tensor, + atype: paddle.Tensor, + nloc: int, + rcut: float, + sel: Union[int, list[int]], + distinguish_types: bool = True, +) -> paddle.Tensor: + """Build neighbor list for a single frame. keeps nsel neighbors. + + Parameters + ---------- + coord : paddle.Tensor + exptended coordinates of shape [batch_size, nall x 3] + atype : paddle.Tensor + extended atomic types of shape [batch_size, nall] + if type < 0 the atom is treat as virtual atoms. + nloc : int + number of local atoms. + rcut : float + cut-off radius + sel : int or list[int] + maximal number of neighbors (of each type). + if distinguish_types==True, nsel should be list and + the length of nsel should be equal to number of + types. + distinguish_types : bool + distinguish different types. + + Returns + ------- + neighbor_list : paddle.Tensor + Neighbor list of shape [batch_size, nloc, nsel], the neighbors + are stored in an ascending order. If the number of + neighbors is less than nsel, the positions are masked + with -1. The neighbor list of an atom looks like + |------ nsel ------| + xx xx xx xx -1 -1 -1 + if distinguish_types==True and we have two types + |---- nsel[0] -----| |---- nsel[1] -----| + xx xx xx xx -1 -1 -1 xx xx xx -1 -1 -1 -1 + For virtual atoms all neighboring positions are filled with -1. + + """ + batch_size = coord.shape[0] + coord = coord.reshape([batch_size, -1]) + nall = coord.shape[1] // 3 + # fill virtual atoms with large coords so they are not neighbors of any + # real atom. + if coord.numel() > 0: + xmax = paddle.max(coord) + 2.0 * rcut + else: + xmax = paddle.zeros([], dtype=coord.dtype).to(device=coord.place) + 2.0 * rcut + # nf x nall + is_vir = atype < 0 + coord1 = paddle.where( + is_vir[:, :, None], xmax, coord.reshape([batch_size, nall, 3]) + ).reshape([batch_size, nall * 3]) + if isinstance(sel, int): + sel = [sel] + # nloc x 3 + coord0 = coord1[:, : nloc * 3] + # nloc x nall x 3 + diff = coord1.reshape([batch_size, -1, 3]).unsqueeze(1) - coord0.reshape( + [batch_size, -1, 3] + ).unsqueeze(2) + if paddle.in_dynamic_mode(): + assert list(diff.shape) == [batch_size, nloc, nall, 3] + # nloc x nall + # rr = paddle.linalg.norm(diff, axis=-1) + rr = decomp.norm(diff, axis=-1) + # if central atom has two zero distances, sorting sometimes can not exclude itself + rr = rr - paddle.eye(nloc, nall, dtype=rr.dtype).to(device=rr.place).unsqueeze(0) + rr, nlist = paddle.sort(rr, axis=-1), paddle.argsort(rr, axis=-1) + # nloc x (nall-1) + rr = rr[:, :, 1:] + nlist = nlist[:, :, 1:] + + return _trim_mask_distinguish_nlist( + is_vir, atype, rr, nlist, rcut, sel, distinguish_types + ) + + +def _trim_mask_distinguish_nlist( + is_vir_cntl: paddle.Tensor, + atype_neig: paddle.Tensor, + rr: paddle.Tensor, + nlist: paddle.Tensor, + rcut: float, + sel: list[int], + distinguish_types: bool, +) -> paddle.Tensor: + """Trim the size of nlist, mask if any central atom is virtual, distinguish types if necessary.""" + nsel = sum(sel) + # nloc x nsel + batch_size, nloc, nnei = rr.shape + if paddle.in_dynamic_mode(): + assert batch_size == is_vir_cntl.shape[0] + if nsel <= nnei: + rr = rr[:, :, :nsel] + nlist = nlist[:, :, :nsel] + else: + rr = paddle.concat( + [ + rr, + paddle.ones([batch_size, nloc, nsel - nnei]).to( + device=rr.place, dtype=rr.dtype + ) + + rcut, + ], + axis=-1, + ) + nlist = paddle.concat( + [ + nlist, + paddle.ones([batch_size, nloc, nsel - nnei], dtype=nlist.dtype).to( + device=rr.place + ), + ], + axis=-1, + ) + if paddle.in_dynamic_mode(): + assert list(nlist.shape) == [batch_size, nloc, nsel] + nlist = paddle.where( + paddle.logical_or((rr > rcut), is_vir_cntl[:, :nloc, None]), -1, nlist + ) + if distinguish_types: + return nlist_distinguish_types(nlist, atype_neig, sel) + else: + return nlist + + +def build_directional_neighbor_list( + coord_cntl: paddle.Tensor, + atype_cntl: paddle.Tensor, + coord_neig: paddle.Tensor, + atype_neig: paddle.Tensor, + rcut: float, + sel: Union[int, list[int]], + distinguish_types: bool = True, +) -> paddle.Tensor: + """Build directional neighbor list. + + With each central atom, all the neighbor atoms in the cut-off radius will + be recorded in the neighbor list. The maximum neighbors is nsel. If the real + number of neighbors is larger than nsel, the neighbors will be sorted with the + distance and the first nsel neighbors are kept. + + Important: the central and neighboring atoms are assume to be different atoms. + + Parameters + ---------- + coord_central : paddle.Tensor + coordinates of central atoms. assumed to be local atoms. + shape [batch_size, nloc_central x 3] + atype_central : paddle.Tensor + atomic types of central atoms. shape [batch_size, nloc_central] + if type < 0 the atom is treated as virtual atoms. + coord_neighbor : paddle.Tensor + extended coordinates of neighbors atoms. shape [batch_size, nall_neighbor x 3] + atype_central : paddle.Tensor + extended atomic types of neighbors atoms. shape [batch_size, nall_neighbor] + if type < 0 the atom is treated as virtual atoms. + rcut : float + cut-off radius + sel : int or list[int] + maximal number of neighbors (of each type). + if distinguish_types==True, nsel should be list and + the length of nsel should be equal to number of + types. + distinguish_types : bool + distinguish different types. + + Returns + ------- + neighbor_list : paddle.Tensor + Neighbor list of shape [batch_size, nloc_central, nsel], the neighbors + are stored in an ascending order. If the number of neighbors is less than nsel, + the positions are masked with -1. The neighbor list of an atom looks like + |------ nsel ------| + xx xx xx xx -1 -1 -1 + if distinguish_types==True and we have two types + |---- nsel[0] -----| |---- nsel[1] -----| + xx xx xx xx -1 -1 -1 xx xx xx -1 -1 -1 -1 + For virtual atoms all neighboring positions are filled with -1. + """ + batch_size = coord_cntl.shape[0] + coord_cntl = coord_cntl.reshape([batch_size, -1]) + nloc_cntl = coord_cntl.shape[1] // 3 + coord_neig = coord_neig.reshape([batch_size, -1]) + nall_neig = coord_neig.shape[1] // 3 + # fill virtual atoms with large coords so they are not neighbors of any + # real atom. + if coord_neig.numel() > 0: + xmax = paddle.max(coord_cntl) + 2.0 * rcut + else: + xmax = ( + paddle.zeros([1], dtype=coord_neig.dtype, device=coord_neig.place) + + 2.0 * rcut + ) + # nf x nloc + is_vir_cntl = atype_cntl < 0 + # nf x nall + is_vir_neig = atype_neig < 0 + # nf x nloc x 3 + coord_cntl = coord_cntl.reshape([batch_size, nloc_cntl, 3]) + # nf x nall x 3 + coord_neig = paddle.where( + is_vir_neig[:, :, None], xmax, coord_neig.reshape([batch_size, nall_neig, 3]) + ).reshape([batch_size, nall_neig, 3]) + # nsel + if isinstance(sel, int): + sel = [sel] + # nloc x nall x 3 + diff = coord_neig[:, None, :, :] - coord_cntl[:, :, None, :] + if paddle.in_dynamic_mode(): + assert list(diff.shape) == [batch_size, nloc_cntl, nall_neig, 3] + # nloc x nall + # rr = paddle.linalg.norm(diff, axis=-1) + rr = decomp.norm(diff, axis=-1) + rr, nlist = paddle.sort(rr, axis=-1), paddle.argsort(rr, axis=-1) + + # We assume that the central and neighbor atoms are diffferent, + # thus we do not need to exclude self-neighbors. + # # if central atom has two zero distances, sorting sometimes can not exclude itself + # rr -= paddle.eye(nloc_cntl, nall_neig, dtype=rr.dtype, device=rr.place).unsqueeze(0) + # rr, nlist = paddle.sort(rr, axis=-1) + # # nloc x (nall-1) + # rr = rr[:, :, 1:] + # nlist = nlist[:, :, 1:] + + return _trim_mask_distinguish_nlist( + is_vir_cntl, atype_neig, rr, nlist, rcut, sel, distinguish_types + ) + + +def nlist_distinguish_types( + nlist: paddle.Tensor, + atype: paddle.Tensor, + sel: list[int], +): + """Given a nlist that does not distinguish atom types, return a nlist that + distinguish atom types. + + """ + nf, nloc, nnei = nlist.shape + ret_nlist = [] + # nloc x nall + tmp_atype = paddle.tile(atype.unsqueeze(1), [1, nloc, 1]) + mask = nlist == -1 + # nloc x s(nsel) + # tnlist = paddle.take_along_axis( + # tmp_atype, + # axis=2, + # indices=nlist.masked_fill(mask, 0), + # ) + tnlist = decomp.take_along_axis( + tmp_atype, + axis=2, + indices=nlist.masked_fill(mask, 0), + ) + tnlist = tnlist.masked_fill(mask, -1) + snsel = tnlist.shape[2] + for ii, ss in enumerate(sel): + # nloc x s(nsel) + # to int because bool cannot be sort on GPU + pick_mask = (tnlist == ii).to(paddle.int64) + # nloc x s(nsel), stable sort, nearer neighbors first + pick_mask, imap = ( + paddle.sort(pick_mask, axis=-1, descending=True, stable=True), + paddle.argsort(pick_mask, axis=-1, descending=True, stable=True), + ) + # nloc x s(nsel) + # inlist = paddle.take_along_axis(nlist, axis=2, indices=imap) + inlist = decomp.take_along_axis(nlist, axis=2, indices=imap) + inlist = inlist.masked_fill(~(pick_mask.to(paddle.bool)), -1) + # nloc x nsel[ii] + ret_nlist.append(paddle.split(inlist, [ss, snsel - ss], axis=-1)[0]) + return paddle.concat(ret_nlist, axis=-1) + + +# build_neighbor_list = paddle.vmap( +# build_neighbor_list_lower, +# in_dims=(0,0,None,None,None), +# out_dims=(0), +# ) + + +def get_multiple_nlist_key( + rcut: float, + nsel: int, +) -> str: + return str(rcut) + "_" + str(nsel) + + +def build_multiple_neighbor_list( + coord: paddle.Tensor, + nlist: paddle.Tensor, + rcuts: list[float], + nsels: list[int], +) -> dict[str, paddle.Tensor]: + """Input one neighbor list, and produce multiple neighbor lists with + different cutoff radius and numbers of selection out of it. The + required rcuts and nsels should be smaller or equal to the input nlist. + + Parameters + ---------- + coord : paddle.Tensor + exptended coordinates of shape [batch_size, nall x 3] + nlist : paddle.Tensor + Neighbor list of shape [batch_size, nloc, nsel], the neighbors + should be stored in an ascending order. + rcuts : list[float] + list of cut-off radius in ascending order. + nsels : list[int] + maximal number of neighbors in ascending order. + + Returns + ------- + nlist_dict : dict[str, paddle.Tensor] + A dict of nlists, key given by get_multiple_nlist_key(rc, nsel) + value being the corresponding nlist. + + """ + if paddle.in_dynamic_mode(): + assert len(rcuts) == len(nsels) + if len(rcuts) == 0: + return {} + nb, nloc, nsel = nlist.shape + if nsel < nsels[-1]: + pad = -paddle.ones( + [nb, nloc, nsels[-1] - nsel], + dtype=nlist.dtype, + ).to(device=nlist.place) + # nb x nloc x nsel + nlist = paddle.concat([nlist, pad], axis=-1) + if paddle.is_tensor(nsel): + nsel = paddle.to_tensor(nsels[-1], dtype=nsel.dtype) + else: + nsel = nsels[-1] + + # nb x nall x 3 + coord1 = coord.reshape([nb, -1, 3]) + nall = coord1.shape[1] + # nb x nloc x 3 + coord0 = coord1[:, :nloc, :] + nlist_mask = nlist == -1 + # nb x (nloc x nsel) x 3 + index = ( + nlist.masked_fill(nlist_mask, 0) + .reshape([nb, nloc * nsel]) + .unsqueeze(-1) + .expand([-1, -1, 3]) + ) + # nb x nloc x nsel x 3 + # coord2 = paddle.take_along_axis(coord1, axis=1, index=index).reshape( + # [nb, nloc, nsel, 3] + # ) + coord2 = decomp.take_along_axis(coord1, axis=1, indices=index).reshape( + [nb, nloc, nsel, 3] + ) + # nb x nloc x nsel x 3 + diff = coord2 - coord0[:, :, None, :] + # nb x nloc x nsel + # rr = paddle.linalg.norm(diff, axis=-1) + rr = decomp.norm(diff, axis=-1) + rr.masked_fill(nlist_mask, float("inf")) + nlist0 = nlist + ret = {} + for rc, ns in zip(rcuts[::-1], nsels[::-1]): + nlist0 = nlist0[:, :, :ns].masked_fill(rr[:, :, :ns] > rc, -1) + ret[get_multiple_nlist_key(rc, ns)] = nlist0 + return ret + + +def extend_coord_with_ghosts( + coord: paddle.Tensor, + atype: paddle.Tensor, + cell: Optional[paddle.Tensor], + rcut: float, + cell_cpu: Optional[paddle.Tensor] = None, +) -> tuple[paddle.Tensor, paddle.Tensor, paddle.Tensor]: + """Extend the coordinates of the atoms by appending peridoc images. + The number of images is large enough to ensure all the neighbors + within rcut are appended. + + Parameters + ---------- + coord : paddle.Tensor + original coordinates of shape [-1, nloc*3]. + atype : paddle.Tensor + atom type of shape [-1, nloc]. + cell : paddle.Tensor + simulation cell tensor of shape [-1, 9]. + rcut : float + the cutoff radius + cell_cpu : paddle.Tensor + cell on cpu for performance + + Returns + ------- + extended_coord: paddle.Tensor + extended coordinates of shape [-1, nall*3]. + extended_atype: paddle.Tensor + extended atom type of shape [-1, nall]. + index_mapping: paddle.Tensor + maping extended index to the local index + + """ + device = coord.place + nf, nloc = atype.shape[:2] + # int64 for index + aidx = paddle.tile(paddle.arange(nloc).to(device=device).unsqueeze(0), [nf, 1]) # pylint: disable=no-explicit-dtype + if cell is None: + nall = nloc + extend_coord = coord.clone() + extend_atype = atype.clone() + extend_aidx = aidx.clone() + else: + coord = coord.reshape([nf, nloc, 3]) + cell = cell.reshape([nf, 3, 3]) + cell_cpu = cell_cpu.reshape([nf, 3, 3]) if cell_cpu is not None else cell + # nf x 3 + to_face = to_face_distance(cell_cpu) + # nf x 3 + # *2: ghost copies on + and - directions + # +1: central cell + nbuff = paddle.ceil(rcut / to_face) + INT64_MIN = -9223372036854775808 + nbuff = paddle.where( + paddle.isinf(nbuff), + paddle.full_like(nbuff, INT64_MIN, dtype=paddle.int64), + nbuff.astype(paddle.int64), + ) + # 3 + nbuff = paddle.amax(nbuff, axis=0) # faster than paddle.max + nbuff_cpu = nbuff.cpu() + xi = ( + paddle.arange(-nbuff_cpu[0], nbuff_cpu[0] + 1, 1).to( + dtype=env.GLOBAL_PD_FLOAT_PRECISION + ) + # .cpu() + ) # pylint: disable=no-explicit-dtype + yi = ( + paddle.arange(-nbuff_cpu[1], nbuff_cpu[1] + 1, 1).to( + dtype=env.GLOBAL_PD_FLOAT_PRECISION + ) + # .cpu() + ) # pylint: disable=no-explicit-dtype + zi = ( + paddle.arange(-nbuff_cpu[2], nbuff_cpu[2] + 1, 1).to( + dtype=env.GLOBAL_PD_FLOAT_PRECISION + ) + # .cpu() + ) # pylint: disable=no-explicit-dtype + eye_3 = ( + paddle.eye(3, dtype=env.GLOBAL_PD_FLOAT_PRECISION).to( + dtype=env.GLOBAL_PD_FLOAT_PRECISION + ) + # .cpu() + ) + xyz = xi.reshape([-1, 1, 1, 1]) * eye_3[0] + xyz = xyz + yi.reshape([1, -1, 1, 1]) * eye_3[1] + xyz = xyz + zi.reshape([1, 1, -1, 1]) * eye_3[2] + xyz = xyz.reshape([-1, 3]) + # xyz = xyz.to(device=device) + # ns x 3 + # shift_idx = xyz[paddle.argsort(paddle.norm(xyz, axis=1))] + shift_idx = xyz[paddle.argsort(decomp.norm(xyz, axis=1))] + ns, _ = shift_idx.shape + nall = ns * nloc + # nf x ns x 3 + shift_vec = paddle.einsum("sd,fdk->fsk", shift_idx, cell) + # nf x ns x nloc x 3 + extend_coord = coord[:, None, :, :] + shift_vec[:, :, None, :] + # nf x ns x nloc + extend_atype = paddle.tile(atype.unsqueeze(-2), [1, ns, 1]) + # nf x ns x nloc + extend_aidx = paddle.tile(aidx.unsqueeze(-2), [1, ns, 1]) + return ( + extend_coord.reshape([nf, nall * 3]).to(device), + extend_atype.reshape([nf, nall]).to(device), + extend_aidx.reshape([nf, nall]).to(device), + ) diff --git a/deepmd/pd/utils/preprocess.py b/deepmd/pd/utils/preprocess.py new file mode 100644 index 0000000000..3e047c1b8b --- /dev/null +++ b/deepmd/pd/utils/preprocess.py @@ -0,0 +1,18 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import logging + +import paddle + +log = logging.getLogger(__name__) + + +def compute_smooth_weight(distance, rmin: float, rmax: float): + """Compute smooth weight for descriptor elements.""" + if rmin >= rmax: + raise ValueError("rmin should be less than rmax.") + min_mask = distance <= rmin + max_mask = distance >= rmax + mid_mask = paddle.logical_not(paddle.logical_or(min_mask, max_mask)) + uu = (distance - rmin) / (rmax - rmin) + vv = uu * uu * uu * (-6 * uu * uu + 15 * uu - 10) + 1 + return vv * mid_mask.astype(vv.dtype) + min_mask.astype(vv.dtype) diff --git a/deepmd/pd/utils/region.py b/deepmd/pd/utils/region.py new file mode 100644 index 0000000000..21927e3619 --- /dev/null +++ b/deepmd/pd/utils/region.py @@ -0,0 +1,119 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import paddle + +from deepmd.pd.utils import ( + decomp, +) + + +def phys2inter( + coord: paddle.Tensor, + cell: paddle.Tensor, +) -> paddle.Tensor: + """Convert physical coordinates to internal(direct) coordinates. + + Parameters + ---------- + coord : paddle.Tensor + physical coordinates of shape [*, na, 3]. + cell : paddle.Tensor + simulation cell tensor of shape [*, 3, 3]. + + Returns + ------- + inter_coord: paddle.Tensor + the internal coordinates + + """ + if paddle.in_dynamic_mode(): + try: + rec_cell = paddle.linalg.inv(cell) + except Exception as e: + rec_cell = paddle.full_like(cell, float("nan")) + rec_cell.stop_gradient = cell.stop_gradient + else: + rec_cell = paddle.linalg.inv(cell) + return paddle.matmul(coord, rec_cell) + + +def inter2phys( + coord: paddle.Tensor, + cell: paddle.Tensor, +) -> paddle.Tensor: + """Convert internal(direct) coordinates to physical coordinates. + + Parameters + ---------- + coord : paddle.Tensor + internal coordinates of shape [*, na, 3]. + cell : paddle.Tensor + simulation cell tensor of shape [*, 3, 3]. + + Returns + ------- + phys_coord: paddle.Tensor + the physical coordinates + + """ + return paddle.matmul(coord, cell) + + +def to_face_distance( + cell: paddle.Tensor, +) -> paddle.Tensor: + """Compute the to-face-distance of the simulation cell. + + Parameters + ---------- + cell : paddle.Tensor + simulation cell tensor of shape [*, 3, 3]. + + Returns + ------- + dist: paddle.Tensor + the to face distances of shape [*, 3] + + """ + cshape = cell.shape + dist = b_to_face_distance(cell.reshape([-1, 3, 3])) + return dist.reshape(list(cshape[:-2]) + [3]) # noqa:RUF005 + + +def b_to_face_distance(cell): + volume = paddle.linalg.det(cell) + c_yz = paddle.cross(cell[:, 1], cell[:, 2], axis=-1) + # _h2yz = volume / paddle.linalg.norm(c_yz, axis=-1) + _h2yz = volume / decomp.norm(c_yz, axis=-1) + c_zx = paddle.cross(cell[:, 2], cell[:, 0], axis=-1) + # _h2zx = volume / paddle.linalg.norm(c_zx, axis=-1) + _h2zx = volume / decomp.norm(c_zx, axis=-1) + c_xy = paddle.cross(cell[:, 0], cell[:, 1], axis=-1) + # _h2xy = volume / paddle.linalg.norm(c_xy, axis=-1) + _h2xy = volume / decomp.norm(c_xy, axis=-1) + return paddle.stack([_h2yz, _h2zx, _h2xy], axis=1) + + +# b_to_face_distance = paddle.vmap( +# _to_face_distance, in_dims=(0), out_dims=(0)) + + +def normalize_coord( + coord: paddle.Tensor, + cell: paddle.Tensor, +) -> paddle.Tensor: + """Apply PBC according to the atomic coordinates. + + Parameters + ---------- + coord : paddle.Tensor + original coordinates of shape [*, na, 3]. + + Returns + ------- + wrapped_coord: paddle.Tensor + wrapped coordinates of shape [*, na, 3]. + + """ + icoord = phys2inter(coord, cell) + icoord = paddle.remainder(icoord, paddle.full([], 1.0)) + return inter2phys(icoord, cell) diff --git a/deepmd/pd/utils/serialization.py b/deepmd/pd/utils/serialization.py new file mode 100644 index 0000000000..0274608424 --- /dev/null +++ b/deepmd/pd/utils/serialization.py @@ -0,0 +1,55 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import json + +import paddle + +from deepmd.pd.model.model.model import ( + BaseModel, +) + + +def serialize_from_file(model_file: str) -> dict: + """Serialize the model file to a dictionary. + + Parameters + ---------- + model_file : str + The model file to be serialized. + + Returns + ------- + dict + The serialized model data. + """ + raise NotImplementedError("Paddle do not support jit.export yet.") + + +def deserialize_to_file(model_file: str, data: dict) -> None: + """Deserialize the dictionary to a model file. + + Parameters + ---------- + model_file : str + The model file to be saved. + data : dict + The dictionary to be deserialized. + """ + if not model_file.endswith(".json"): + raise ValueError("Paddle backend only supports converting .json file") + model = BaseModel.deserialize(data["model"]) + # JIT will happy in this way... + model.model_def_script = json.dumps(data["model_def_script"]) + if "min_nbor_dist" in data.get("@variables", {}): + model.min_nbor_dist = float(data["@variables"]["min_nbor_dist"]) + # model = paddle.jit.to_static(model) + paddle.set_flags( + { + "FLAGS_save_cf_stack_op": 1, + "FLAGS_prim_enable_dynamic": 1, + "FLAGS_enable_pir_api": 1, + } + ) + paddle.jit.save( + model, + model_file.split(".json")[0], + ) diff --git a/deepmd/pd/utils/stat.py b/deepmd/pd/utils/stat.py new file mode 100644 index 0000000000..a8bdbd6415 --- /dev/null +++ b/deepmd/pd/utils/stat.py @@ -0,0 +1,604 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import logging +from collections import ( + defaultdict, +) +from typing import ( + Callable, + Optional, + Union, +) + +import numpy as np +import paddle + +from deepmd.dpmodel.output_def import ( + FittingOutputDef, +) +from deepmd.pd.utils import ( + AtomExcludeMask, +) +from deepmd.pd.utils.auto_batch_size import ( + AutoBatchSize, +) +from deepmd.pd.utils.utils import ( + dict_to_device, + to_numpy_array, + to_paddle_tensor, +) +from deepmd.utils.out_stat import ( + compute_stats_from_atomic, + compute_stats_from_redu, +) +from deepmd.utils.path import ( + DPPath, +) + +log = logging.getLogger(__name__) + + +def make_stat_input(datasets, dataloaders, nbatches): + """Pack data for statistics. + + Args: + - dataset: A list of dataset to analyze. + - nbatches: Batch count for collecting stats. + + Returns + ------- + - a list of dicts, each of which contains data from a system + """ + lst = [] + log.info(f"Packing data for statistics from {len(datasets)} systems") + for i in range(len(datasets)): + sys_stat = {} + + iterator = iter(dataloaders[i]) + numb_batches = min(nbatches, len(dataloaders[i])) + for _ in range(numb_batches): + try: + stat_data = next(iterator) + except StopIteration: + iterator = iter(dataloaders[i]) + stat_data = next(iterator) + for dd in stat_data: + if stat_data[dd] is None: + sys_stat[dd] = None + elif isinstance(stat_data[dd], paddle.Tensor): + if dd not in sys_stat: + sys_stat[dd] = [] + sys_stat[dd].append(stat_data[dd]) + elif isinstance(stat_data[dd], np.float32): + sys_stat[dd] = stat_data[dd] + else: + pass + + for key in sys_stat: + if isinstance(sys_stat[key], np.float32): + pass + elif sys_stat[key] is None or sys_stat[key][0] is None: + sys_stat[key] = None + elif isinstance(stat_data[dd], paddle.Tensor): + sys_stat[key] = paddle.concat(sys_stat[key], axis=0) + dict_to_device(sys_stat) + lst.append(sys_stat) + return lst + + +def _restore_from_file( + stat_file_path: DPPath, + keys: list[str] = ["energy"], +) -> Optional[dict]: + if stat_file_path is None: + return None, None + stat_files = [stat_file_path / f"bias_atom_{kk}" for kk in keys] + if all(not (ii.is_file()) for ii in stat_files): + return None, None + stat_files = [stat_file_path / f"std_atom_{kk}" for kk in keys] + if all(not (ii.is_file()) for ii in stat_files): + return None, None + + ret_bias = {} + ret_std = {} + for kk in keys: + fp = stat_file_path / f"bias_atom_{kk}" + # only read the key that exists + if fp.is_file(): + ret_bias[kk] = fp.load_numpy() + for kk in keys: + fp = stat_file_path / f"std_atom_{kk}" + # only read the key that exists + if fp.is_file(): + ret_std[kk] = fp.load_numpy() + return ret_bias, ret_std + + +def _save_to_file( + stat_file_path: DPPath, + bias_out: dict, + std_out: dict, +): + assert stat_file_path is not None + stat_file_path.mkdir(exist_ok=True, parents=True) + for kk, vv in bias_out.items(): + fp = stat_file_path / f"bias_atom_{kk}" + fp.save_numpy(vv) + for kk, vv in std_out.items(): + fp = stat_file_path / f"std_atom_{kk}" + fp.save_numpy(vv) + + +def _post_process_stat( + out_bias, + out_std, +): + """Post process the statistics. + + For global statistics, we do not have the std for each type of atoms, + thus fake the output std by ones for all the types. + + """ + new_std = {} + for kk, vv in out_bias.items(): + new_std[kk] = np.ones_like(vv) + return out_bias, new_std + + +def _compute_model_predict( + sampled: Union[Callable[[], list[dict]], list[dict]], + keys: list[str], + model_forward: Callable[..., paddle.Tensor], +): + auto_batch_size = AutoBatchSize() + model_predict = {kk: [] for kk in keys} + for system in sampled: + nframes = system["coord"].shape[0] + coord, atype, box, natoms = ( + system["coord"], + system["atype"], + system["box"], + system["natoms"], + ) + fparam = system.get("fparam", None) + aparam = system.get("aparam", None) + + def model_forward_auto_batch_size(*args, **kwargs): + return auto_batch_size.execute_all( + model_forward, + nframes, + system["atype"].shape[-1], + *args, + **kwargs, + ) + + sample_predict = model_forward_auto_batch_size( + coord, atype, box, fparam=fparam, aparam=aparam + ) + for kk in keys: + model_predict[kk].append( + to_numpy_array( + sample_predict[kk] # nf x nloc x odims + ) + ) + return model_predict + + +def _make_preset_out_bias( + ntypes: int, + ibias: list[Optional[np.ndarray]], +) -> Optional[np.ndarray]: + """Make preset out bias. + + output: + a np array of shape [ntypes, *(odim0, odim1, ...)] is any item is not None + None if all items are None. + """ + if len(ibias) != ntypes: + raise ValueError("the length of preset bias list should be ntypes") + if all(ii is None for ii in ibias): + return None + for refb in ibias: + if refb is not None: + break + refb = np.array(refb) + nbias = [ + np.full_like(refb, np.nan, dtype=np.float64) if ii is None else ii + for ii in ibias + ] + return np.array(nbias) + + +def _fill_stat_with_global( + atomic_stat: Union[np.ndarray, None], + global_stat: np.ndarray, +): + """This function is used to fill atomic stat with global stat. + + Parameters + ---------- + atomic_stat : Union[np.ndarray, None] + The atomic stat. + global_stat : np.ndarray + The global stat. + if the atomic stat is None, use global stat. + if the atomic stat is not None, but has nan values (missing atypes), fill with global stat. + """ + if atomic_stat is None: + return global_stat + else: + atomic_stat = atomic_stat.reshape(global_stat.shape) + return np.nan_to_num( + np.where( + np.isnan(atomic_stat) & ~np.isnan(global_stat), global_stat, atomic_stat + ) + ) + + +def compute_output_stats( + merged: Union[Callable[[], list[dict]], list[dict]], + ntypes: int, + keys: Union[str, list[str]] = ["energy"], + stat_file_path: Optional[DPPath] = None, + rcond: Optional[float] = None, + preset_bias: Optional[dict[str, list[Optional[np.ndarray]]]] = None, + model_forward: Optional[Callable[..., paddle.Tensor]] = None, + atomic_output: Optional[FittingOutputDef] = None, +): + """ + Compute the output statistics (e.g. energy bias) for the fitting net from packed data. + + Parameters + ---------- + merged : Union[Callable[[], list[dict]], list[dict]] + - list[dict]: A list of data samples from various data systems. + Each element, `merged[i]`, is a data dictionary containing `keys`: `paddle.Tensor` + originating from the `i`-th data system. + - Callable[[], list[dict]]: A lazy function that returns data samples in the above format + only when needed. Since the sampling process can be slow and memory-intensive, + the lazy function helps by only sampling once. + ntypes : int + The number of atom types. + stat_file_path : DPPath, optional + The path to the stat file. + rcond : float, optional + The condition number for the regression of atomic energy. + preset_bias : dict[str, list[Optional[paddle.Tensor]]], optional + Specifying atomic energy contribution in vacuum. Given by key:value pairs. + The value is a list specifying the bias. the elements can be None or np.ndarray of output shape. + For example: [None, [2.]] means type 0 is not set, type 1 is set to [2.] + The `set_davg_zero` key in the descriptor should be set. + model_forward : Callable[..., paddle.Tensor], optional + The wrapped forward function of atomic model. + If not None, the model will be utilized to generate the original energy prediction, + which will be subtracted from the energy label of the data. + The difference will then be used to calculate the delta complement energy bias for each type. + atomic_output : FittingOutputDef, optional + The output of atomic model. + """ + # try to restore the bias from stat file + bias_atom_e, std_atom_e = _restore_from_file(stat_file_path, keys) + + # failed to restore the bias from stat file. compute + if bias_atom_e is None: + # only get data once, sampled is a list of dict[str, paddle.Tensor] + sampled = merged() if callable(merged) else merged + if model_forward is not None: + model_pred = _compute_model_predict(sampled, keys, model_forward) + else: + model_pred = None + + # remove the keys that are not in the sample + keys = [keys] if isinstance(keys, str) else keys + assert isinstance(keys, list) + new_keys = [ + ii + for ii in keys + if (ii in sampled[0].keys()) or ("atom_" + ii in sampled[0].keys()) + ] + del keys + keys = new_keys + # split system based on label + atomic_sampled_idx = defaultdict(list) + global_sampled_idx = defaultdict(list) + + for kk in keys: + for idx, system in enumerate(sampled): + if (("find_atom_" + kk) in system) and ( + system["find_atom_" + kk] > 0.0 + ): + atomic_sampled_idx[kk].append(idx) + elif (("find_" + kk) in system) and (system["find_" + kk] > 0.0): + global_sampled_idx[kk].append(idx) + + else: + continue + + # use index to gather model predictions for the corresponding systems. + + model_pred_g = ( + { + kk: [ + np.sum(vv[idx], axis=1) for idx in global_sampled_idx[kk] + ] # sum atomic dim + for kk, vv in model_pred.items() + } + if model_pred + else None + ) + model_pred_a = ( + { + kk: [vv[idx] for idx in atomic_sampled_idx[kk]] + for kk, vv in model_pred.items() + } + if model_pred + else None + ) + + # concat all frames within those systems + model_pred_g = ( + { + kk: np.concatenate(model_pred_g[kk]) + for kk in model_pred_g.keys() + if len(model_pred_g[kk]) > 0 + } + if model_pred + else None + ) + model_pred_a = ( + { + kk: np.concatenate(model_pred_a[kk]) + for kk in model_pred_a.keys() + if len(model_pred_a[kk]) > 0 + } + if model_pred + else None + ) + + # compute stat + bias_atom_g, std_atom_g = compute_output_stats_global( + sampled, + ntypes, + keys, + rcond, + preset_bias, + model_pred_g, + atomic_output, + ) + bias_atom_a, std_atom_a = compute_output_stats_atomic( + sampled, + ntypes, + keys, + model_pred_a, + ) + + # merge global/atomic bias + bias_atom_e, std_atom_e = {}, {} + for kk in keys: + # use atomic bias whenever available + if kk in bias_atom_a: + bias_atom_e[kk] = bias_atom_a[kk] + std_atom_e[kk] = std_atom_a[kk] + else: + bias_atom_e[kk] = None + std_atom_e[kk] = None + # use global bias to fill missing atomic bias + if kk in bias_atom_g: + bias_atom_e[kk] = _fill_stat_with_global( + bias_atom_e[kk], bias_atom_g[kk] + ) + std_atom_e[kk] = _fill_stat_with_global(std_atom_e[kk], std_atom_g[kk]) + if (bias_atom_e[kk] is None) or (std_atom_e[kk] is None): + raise RuntimeError("Fail to compute stat.") + + if stat_file_path is not None: + _save_to_file(stat_file_path, bias_atom_e, std_atom_e) + + bias_atom_e = {kk: to_paddle_tensor(vv) for kk, vv in bias_atom_e.items()} + std_atom_e = {kk: to_paddle_tensor(vv) for kk, vv in std_atom_e.items()} + return bias_atom_e, std_atom_e + + +def compute_output_stats_global( + sampled: list[dict], + ntypes: int, + keys: list[str], + rcond: Optional[float] = None, + preset_bias: Optional[dict[str, list[Optional[paddle.Tensor]]]] = None, + model_pred: Optional[dict[str, np.ndarray]] = None, + atomic_output: Optional[FittingOutputDef] = None, +): + """This function only handle stat computation from reduced global labels.""" + # return directly if model predict is empty for global + if model_pred == {}: + return {}, {} + + # get label dict from sample; for each key, only picking the system with global labels. + outputs = { + kk: [ + system[kk] + for system in sampled + if kk in system and system.get(f"find_{kk}", 0) > 0 + ] + for kk in keys + } + + data_mixed_type = "real_natoms_vec" in sampled[0] + natoms_key = "natoms" if not data_mixed_type else "real_natoms_vec" + for system in sampled: + if "atom_exclude_types" in system: + type_mask = AtomExcludeMask( + ntypes, system["atom_exclude_types"] + ).get_type_mask() + system[natoms_key][:, 2:] *= type_mask.unsqueeze(0) + + input_natoms = { + kk: [ + item[natoms_key] + for item in sampled + if kk in item and item.get(f"find_{kk}", 0) > 0 + ] + for kk in keys + } + # shape: (nframes, ndim) + merged_output = { + kk: to_numpy_array(paddle.concat(outputs[kk])) + for kk in keys + if len(outputs[kk]) > 0 + } + # shape: (nframes, ntypes) + + merged_natoms = { + kk: to_numpy_array(paddle.concat(input_natoms[kk])[:, 2:]) + for kk in keys + if len(input_natoms[kk]) > 0 + } + nf = {kk: merged_natoms[kk].shape[0] for kk in keys if kk in merged_natoms} + if preset_bias is not None: + assigned_atom_ener = { + kk: _make_preset_out_bias(ntypes, preset_bias[kk]) + if kk in preset_bias.keys() + else None + for kk in keys + } + else: + assigned_atom_ener = {kk: None for kk in keys} + + if model_pred is None: + stats_input = merged_output + else: + # subtract the model bias and output the delta bias + + stats_input = { + kk: merged_output[kk] - model_pred[kk] for kk in keys if kk in merged_output + } + + bias_atom_e = {} + std_atom_e = {} + for kk in keys: + if kk in stats_input: + if atomic_output is not None and atomic_output.get_data()[kk].intensive: + task_dim = stats_input[kk].shape[1] + assert merged_natoms[kk].shape == (nf[kk], ntypes) + stats_input[kk] = ( + merged_natoms[kk].sum(axis=1).reshape([-1, 1]) * stats_input[kk] + ) + assert stats_input[kk].shape == (nf[kk], task_dim) + bias_atom_e[kk], std_atom_e[kk] = compute_stats_from_redu( + stats_input[kk], + merged_natoms[kk], + assigned_bias=assigned_atom_ener[kk], + rcond=rcond, + ) + else: + # this key does not have global labels, skip it. + continue + bias_atom_e, std_atom_e = _post_process_stat(bias_atom_e, std_atom_e) + + # unbias_e is only used for print rmse + + if model_pred is None: + unbias_e = { + kk: merged_natoms[kk] @ bias_atom_e[kk].reshape([ntypes, -1]) + for kk in bias_atom_e.keys() + } + else: + unbias_e = { + kk: model_pred[kk].reshape([nf[kk], -1]) + + merged_natoms[kk] @ bias_atom_e[kk].reshape([ntypes, -1]) + for kk in bias_atom_e.keys() + } + atom_numbs = {kk: merged_natoms[kk].sum(-1) for kk in bias_atom_e.keys()} + + def rmse(x): + return np.sqrt(np.mean(np.square(x))) + + for kk in bias_atom_e.keys(): + rmse_ae = rmse( + ( + unbias_e[kk].reshape([nf[kk], -1]).astype(merged_output[kk].dtype) + - merged_output[kk].reshape([nf[kk], -1]) + ) + / atom_numbs[kk][:, None].astype(merged_output[kk].dtype) + ) + log.info( + f"RMSE of {kk} per atom after linear regression is: {rmse_ae} in the unit of {kk}." + ) + return bias_atom_e, std_atom_e + + +def compute_output_stats_atomic( + sampled: list[dict], + ntypes: int, + keys: list[str], + model_pred: Optional[dict[str, np.ndarray]] = None, +): + # get label dict from sample; for each key, only picking the system with atomic labels. + outputs = { + kk: [ + system["atom_" + kk] + for system in sampled + if ("atom_" + kk) in system and system.get(f"find_atom_{kk}", 0) > 0 + ] + for kk in keys + } + natoms = { + kk: [ + system["atype"] + for system in sampled + if ("atom_" + kk) in system and system.get(f"find_atom_{kk}", 0) > 0 + ] + for kk in keys + } + # shape: (nframes, nloc, ndim) + merged_output = { + kk: to_numpy_array(paddle.concat(outputs[kk])) + for kk in keys + if len(outputs[kk]) > 0 + } + merged_natoms = { + kk: to_numpy_array(paddle.concat(natoms[kk])) + for kk in keys + if len(natoms[kk]) > 0 + } + # reshape merged data to [nf, nloc, ndim] + merged_output = { + kk: merged_output[kk].reshape((*merged_natoms[kk].shape, -1)) + for kk in merged_output + } + + if model_pred is None: + stats_input = merged_output + else: + # subtract the model bias and output the delta bias + stats_input = { + kk: merged_output[kk] - model_pred[kk].reshape(merged_output[kk].shape) + for kk in keys + if kk in merged_output + } + + bias_atom_e = {} + std_atom_e = {} + + for kk in keys: + if kk in stats_input: + bias_atom_e[kk], std_atom_e[kk] = compute_stats_from_atomic( + stats_input[kk], + merged_natoms[kk], + ) + # correction for missing types + missing_types = ntypes - merged_natoms[kk].max() - 1 + if missing_types > 0: + assert ( + bias_atom_e[kk].dtype is std_atom_e[kk].dtype + ), "bias and std should be of the same dtypes" + nan_padding = np.empty( + (missing_types, bias_atom_e[kk].shape[1]), + dtype=bias_atom_e[kk].dtype, + ) + nan_padding.fill(np.nan) + bias_atom_e[kk] = np.concatenate([bias_atom_e[kk], nan_padding], axis=0) + std_atom_e[kk] = np.concatenate([std_atom_e[kk], nan_padding], axis=0) + else: + # this key does not have atomic labels, skip it. + continue + return bias_atom_e, std_atom_e diff --git a/deepmd/pd/utils/update_sel.py b/deepmd/pd/utils/update_sel.py new file mode 100644 index 0000000000..32b8d66c73 --- /dev/null +++ b/deepmd/pd/utils/update_sel.py @@ -0,0 +1,14 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later + +from deepmd.pd.utils.neighbor_stat import ( + NeighborStat, +) +from deepmd.utils.update_sel import ( + BaseUpdateSel, +) + + +class UpdateSel(BaseUpdateSel): + @property + def neighbor_stat(self) -> type[NeighborStat]: + return NeighborStat diff --git a/deepmd/pd/utils/utils.py b/deepmd/pd/utils/utils.py new file mode 100644 index 0000000000..48732ff84e --- /dev/null +++ b/deepmd/pd/utils/utils.py @@ -0,0 +1,179 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +from __future__ import ( + annotations, +) + +from typing import ( + TYPE_CHECKING, + overload, +) + +import ml_dtypes +import numpy as np +import paddle +import paddle.nn.functional as F + +from deepmd.dpmodel.common import PRECISION_DICT as NP_PRECISION_DICT + +from .env import ( + DEVICE, +) +from .env import PRECISION_DICT as PD_PRECISION_DICT + +if TYPE_CHECKING: + from deepmd.pd.model.network.init import ( + PaddleGenerator, + ) + + +class ActivationFn(paddle.nn.Layer): + def __init__(self, activation: str | None): + super().__init__() + self.activation: str = activation if activation is not None else "linear" + + def forward(self, x: paddle.Tensor) -> paddle.Tensor: + """Returns the tensor after applying activation function corresponding to `activation`.""" + if self.activation.lower() == "relu": + return F.relu(x) + elif self.activation.lower() == "gelu" or self.activation.lower() == "gelu_tf": + return F.gelu(x, approximate=True) + elif self.activation.lower() == "tanh": + return paddle.tanh(x) + elif self.activation.lower() == "relu6": + return F.relu6(x) + elif self.activation.lower() == "softplus": + return F.softplus(x) + elif self.activation.lower() == "sigmoid": + return F.sigmoid(x) + elif self.activation.lower() == "linear" or self.activation.lower() == "none": + return x + else: + raise RuntimeError(f"activation function {self.activation} not supported") + + +@overload +def to_numpy_array(xx: paddle.Tensor) -> np.ndarray: ... + + +@overload +def to_numpy_array(xx: None) -> None: ... + + +def to_numpy_array( + xx, +): + if xx is None: + return None + assert xx is not None + # Create a reverse mapping of PD_PRECISION_DICT + reverse_precision_dict = {v: k for k, v in PD_PRECISION_DICT.items()} + # Use the reverse mapping to find keys with the desired value + prec = reverse_precision_dict.get(xx.dtype, None) + prec = NP_PRECISION_DICT.get(prec, np.float64) + if prec is None: + raise ValueError(f"unknown precision {xx.dtype}") + if isinstance(xx, np.ndarray): + return xx.astype(prec) + if xx.dtype == paddle.bfloat16: + xx = xx.astype(paddle.get_default_dtype()) + return xx.numpy().astype(prec) + + +@overload +def to_paddle_tensor(xx: np.ndarray) -> paddle.Tensor: ... + + +@overload +def to_paddle_tensor(xx: None) -> None: ... + + +def to_paddle_tensor( + xx, +): + if xx is None: + return None + assert xx is not None + if not isinstance(xx, np.ndarray): + return xx + # Create a reverse mapping of NP_PRECISION_DICT + reverse_precision_dict = {v: k for k, v in NP_PRECISION_DICT.items()} + # Use the reverse mapping to find keys with the desired value + prec = reverse_precision_dict.get(xx.dtype.type, None) + prec = PD_PRECISION_DICT.get(prec, None) + if prec is None: + raise ValueError(f"unknown precision {xx.dtype}") + if xx.dtype == ml_dtypes.bfloat16: + xx = xx.astype(np.float32) + return paddle.to_tensor(xx, dtype=prec, place=DEVICE) + + +def dict_to_device(sample_dict): + for key in sample_dict: + if isinstance(sample_dict[key], list): + sample_dict[key] = [item.to(DEVICE) for item in sample_dict[key]] + if isinstance(sample_dict[key], np.float32): + sample_dict[key] = ( + paddle.ones(1, dtype=paddle.float32).to(device=DEVICE) + * sample_dict[key] + ) + else: + if sample_dict[key] is not None: + sample_dict[key] = sample_dict[key].to(DEVICE) + + +# https://github.com/numpy/numpy/blob/a4cddb60489f821a1a4dffc16cd5c69755d43bdb/numpy/random/bit_generator.pyx#L58-L63 +INIT_A = 0x43B0D7E5 +MULT_A = 0x931E8875 +MIX_MULT_L = 0xCA01F9DD +MIX_MULT_R = 0x4973F715 +XSHIFT = 16 + + +def hashmix(value: int, hash_const: list[int]): + value ^= INIT_A + hash_const[0] *= MULT_A + value *= INIT_A + # prevent overflow + hash_const[0] &= 0xFFFF_FFFF_FFFF_FFFF + value &= 0xFFFF_FFFF_FFFF_FFFF + value ^= value >> XSHIFT + return value + + +def mix(x: int, y: int): + result = MIX_MULT_L * x - MIX_MULT_R * y + # prevent overflow + result &= 0xFFFF_FFFF_FFFF_FFFF + result ^= result >> XSHIFT + return result + + +def mix_entropy(entropy_array: list[int]) -> int: + # https://github.com/numpy/numpy/blob/a4cddb60489f821a1a4dffc16cd5c69755d43bdb/numpy/random/bit_generator.pyx#L341-L374 + hash_const = [INIT_A] + mixer = hashmix(entropy_array[0], hash_const) + for i_src in range(1, len(entropy_array)): + mixer = mix(mixer, hashmix(entropy_array[i_src], hash_const)) + return mixer + + +def get_generator( + seed: int | list[int] | None = None, +) -> PaddleGenerator | None: + if seed is not None: + if isinstance(seed, list): + seed = mix_entropy(seed) + if DEVICE == "cpu": + generator = paddle.framework.core.default_cpu_generator() + elif DEVICE == "gpu": + generator = paddle.framework.core.default_cuda_generator(0) + elif DEVICE.startswith("gpu:"): + generator = paddle.framework.core.default_cuda_generator( + int(DEVICE.split("gpu:")[1]) + ) + else: + raise ValueError("DEVICE should be cpu or gpu or gpu:x") + generator.manual_seed(seed) + return generator + else: + return None diff --git a/deepmd/pt/model/network/network.py b/deepmd/pt/model/network/network.py index 353ed0c063..5b4d741a3b 100644 --- a/deepmd/pt/model/network/network.py +++ b/deepmd/pt/model/network/network.py @@ -1,5 +1,6 @@ # SPDX-License-Identifier: LGPL-3.0-or-later from typing import ( + Final, Optional, Union, ) @@ -8,29 +9,17 @@ import torch import torch.nn as nn import torch.nn.functional as F +import torch.utils.checkpoint +from deepmd.dpmodel.utils.type_embed import ( + get_econf_tebd, +) from deepmd.pt.model.network.mlp import ( EmbeddingNet, ) from deepmd.pt.utils import ( env, ) -from deepmd.utils.version import ( - check_version_compatibility, -) - -try: - from typing import ( - Final, - ) -except ImportError: - from torch.jit import Final - -import torch.utils.checkpoint - -from deepmd.dpmodel.utils.type_embed import ( - get_econf_tebd, -) from deepmd.pt.utils.utils import ( ActivationFn, to_torch_tensor, @@ -38,6 +27,9 @@ from deepmd.utils.finetune import ( get_index_between_two_maps, ) +from deepmd.utils.version import ( + check_version_compatibility, +) def Tensor(*shape): diff --git a/deepmd/utils/batch_size.py b/deepmd/utils/batch_size.py index 7cd125c97b..c1cbea4cda 100644 --- a/deepmd/utils/batch_size.py +++ b/deepmd/utils/batch_size.py @@ -175,7 +175,11 @@ def execute_with_batch_size( *[ ( vv[start_index:end_index, ...] - if array_api_compat.is_array_api_obj(vv) and vv.ndim > 1 + if ( + array_api_compat.is_array_api_obj(vv) + and vv.ndim > 1 + or str(vv.__class__) == "" + ) else vv ) for vv in args @@ -183,7 +187,11 @@ def execute_with_batch_size( **{ kk: ( vv[start_index:end_index, ...] - if array_api_compat.is_array_api_obj(vv) and vv.ndim > 1 + if ( + array_api_compat.is_array_api_obj(vv) + and vv.ndim > 1 + or str(vv.__class__) == "" + ) else vv ) for kk, vv in kwargs.items() @@ -222,6 +230,14 @@ def concate_result(r): if array_api_compat.is_array_api_obj(r[0]): xp = array_api_compat.array_namespace(r[0]) ret = xp.concat(r, axis=0) + elif str(r[0].__class__) == "": + try: + import paddle + except ModuleNotFoundError as e: + raise ModuleNotFoundError( + "The 'paddlepaddle' is required but not installed." + ) from e + ret = paddle.concat(r, axis=0) else: raise RuntimeError(f"Unexpected result type {type(r[0])}") return ret diff --git a/deepmd/utils/data.py b/deepmd/utils/data.py index 493a9d8d54..b93356bdbf 100644 --- a/deepmd/utils/data.py +++ b/deepmd/utils/data.py @@ -247,6 +247,21 @@ def get_item_torch(self, index: int) -> dict: frame["fid"] = index return frame + def get_item_paddle(self, index: int) -> dict: + """Get a single frame data . The frame is picked from the data system by index. The index is coded across all the sets. + + Parameters + ---------- + index + index of the frame + """ + i = bisect.bisect_right(self.prefix_sum, index) + frames = self._load_set(self.dirs[i]) + frame = self._get_subdata(frames, index - self.prefix_sum[i]) + frame = self.reformat_data_torch(frame) + frame["fid"] = index + return frame + def get_batch(self, batch_size: int) -> dict: """Get a batch of data with `batch_size` frames. The frames are randomly picked from the data system. diff --git a/pyproject.toml b/pyproject.toml index b9d8503b18..fd0c76839b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -224,12 +224,12 @@ replacement = '\1="https://github.com/deepmodeling/deepmd-kit/raw/master/\g<2>"' [tool.cibuildwheel] test-command = [ "python -m deepmd -h", - """python -c "import deepmd.tf;import deepmd.pt" """, + """python -c "import deepmd.tf;import deepmd.pt;import deepmd.pd" """, "dp -h", "dp_ipi", "pytest {project}/source/tests/tf/test_lammps.py" ] -test-extras = ["cpu", "test", "lmp", "ipi", "torch"] +test-extras = ["cpu", "test", "lmp", "ipi", "torch", "paddle"] build = ["cp311-*"] skip = ["*-win32", "*-manylinux_i686", "*-musllinux*"] # TODO: uncomment to use the latest image when CUDA 11 is deprecated @@ -248,6 +248,7 @@ PIP_PREFER_BINARY = "1" DP_LAMMPS_VERSION = "stable_29Aug2024_update1" DP_ENABLE_IPI = "1" DP_ENABLE_PYTORCH = "1" +DP_ENABLE_PADDLE = "1" # for unclear reason, when enabling PyTorch, OpenMP is found accidentally CMAKE_ARGS = "-DCMAKE_DISABLE_FIND_PACKAGE_OpenMP=1" @@ -284,6 +285,7 @@ PIP_PREFER_BINARY = "1" DP_LAMMPS_VERSION = "stable_29Aug2024_update1" DP_ENABLE_IPI = "1" DP_ENABLE_PYTORCH = "1" +DP_ENABLE_PADDLE = "1" MPI_HOME = "/usr/lib64/mpich" PATH = "/usr/lib64/mpich/bin:$PATH" # use CPU version of torch for building, which should also work for GPU @@ -294,7 +296,7 @@ UV_EXTRA_INDEX_URL = "https://download.pytorch.org/whl/cpu" CMAKE_PREFIX_PATH="/opt/python/cp311-cp311/" [tool.cibuildwheel.windows] -test-extras = ["cpu", "torch"] +test-extras = ["cpu", "torch", "paddle"] test-command = [ "python -m deepmd -h", "dp -h", @@ -302,6 +304,7 @@ test-command = [ [tool.cibuildwheel.windows.environment] PIP_PREFER_BINARY = "1" DP_ENABLE_PYTORCH = "1" +DP_ENABLE_PADDLE = "1" # One can run `tox` or `tox -e gpu` # to run pytest in an isolated environment @@ -407,10 +410,12 @@ convention = "numpy" banned-module-level-imports = [ "deepmd.tf", "deepmd.pt", + "deepmd.pd", "deepmd.jax", "tensorflow", "torch", "jax", + "paddle", ] [tool.ruff.lint.flake8-tidy-imports.banned-api] @@ -424,10 +429,13 @@ runtime-evaluated-base-classes = ["torch.nn.Module"] "deepmd/tf/**" = ["TID253"] "deepmd/pt/**" = ["TID253"] "deepmd/jax/**" = ["TID253"] +"deepmd/pd/**" = ["TID253"] "source/tests/tf/**" = ["TID253"] "source/tests/pt/**" = ["TID253"] "source/tests/jax/**" = ["TID253"] +"source/tests/pd/**" = ["TID253"] "source/tests/universal/pt/**" = ["TID253"] +"source/tests/universal/pd/**" = ["TID253"] "source/jax2tf_tests/**" = ["TID253"] "source/ipi/tests/**" = ["TID253"] "source/lmp/tests/**" = ["TID253"] diff --git a/source/tests/consistent/common.py b/source/tests/consistent/common.py index 358ac8d542..cb4dbed391 100644 --- a/source/tests/consistent/common.py +++ b/source/tests/consistent/common.py @@ -42,10 +42,11 @@ INSTALLED_TF = Backend.get_backend("tensorflow")().is_available() INSTALLED_PT = Backend.get_backend("pytorch")().is_available() INSTALLED_JAX = Backend.get_backend("jax")().is_available() +INSTALLED_PD = Backend.get_backend("paddle")().is_available() INSTALLED_ARRAY_API_STRICT = find_spec("array_api_strict") is not None -if os.environ.get("CI") and not (INSTALLED_TF and INSTALLED_PT): - raise ImportError("TensorFlow or PyTorch should be tested in the CI") +if os.environ.get("CI") and not (INSTALLED_TF and INSTALLED_PT and INSTALLED_PD): + raise ImportError("TensorFlow, PyTorch or Paddle should be tested in the CI") if INSTALLED_TF: @@ -66,6 +67,7 @@ "INSTALLED_TF", "INSTALLED_PT", "INSTALLED_JAX", + "INSTALLED_PD", "INSTALLED_ARRAY_API_STRICT", ] @@ -85,6 +87,8 @@ class CommonTest(ABC): """PyTorch model class.""" jax_class: ClassVar[Optional[type]] """JAX model class.""" + pd_class: ClassVar[Optional[type]] + """Paddle model class.""" array_api_strict_class: ClassVar[Optional[type]] args: ClassVar[Optional[Union[Argument, list[Argument]]]] """Arguments that maps to the `data`.""" @@ -97,6 +101,8 @@ class CommonTest(ABC): # we may usually skip jax before jax is fully supported skip_jax: ClassVar[bool] = True """Whether to skip the JAX model.""" + skip_pd: ClassVar[bool] = not INSTALLED_PD + """Whether to skip the Paddle model.""" skip_array_api_strict: ClassVar[bool] = True """Whether to skip the array_api_strict model.""" rtol = 1e-10 @@ -179,6 +185,16 @@ def eval_jax(self, jax_obj: Any) -> Any: """ raise NotImplementedError("Not implemented") + @abstractmethod + def eval_pd(self, pd_obj: Any) -> Any: + """Evaluate the return value of PD. + + Parameters + ---------- + pd_obj : Any + The object of PD + """ + def eval_array_api_strict(self, array_api_strict_obj: Any) -> Any: """Evaluate the return value of array_api_strict. @@ -195,6 +211,7 @@ class RefBackend(Enum): TF = 1 DP = 2 PT = 3 + PD = 4 JAX = 5 ARRAY_API_STRICT = 6 @@ -262,6 +279,11 @@ def get_jax_ret_serialization_from_cls(self, obj): data = obj.serialize() return ret, data + def get_pd_ret_serialization_from_cls(self, obj): + ret = self.eval_pd(obj) + data = obj.serialize() + return ret, data + def get_array_api_strict_ret_serialization_from_cls(self, obj): ret = self.eval_array_api_strict(obj) data = obj.serialize() @@ -280,6 +302,8 @@ def get_reference_backend(self): return self.RefBackend.PT if not self.skip_jax: return self.RefBackend.JAX + if not self.skip_pd: + return self.RefBackend.PD if not self.skip_array_api_strict: return self.RefBackend.ARRAY_API_STRICT raise ValueError("No available reference") @@ -298,6 +322,9 @@ def get_reference_ret_serialization(self, ref: RefBackend): if ref == self.RefBackend.JAX: obj = self.init_backend_cls(self.jax_class) return self.get_jax_ret_serialization_from_cls(obj) + if ref == self.RefBackend.PD: + obj = self.init_backend_cls(self.pd_class) + return self.get_pd_ret_serialization_from_cls(obj) if ref == self.RefBackend.ARRAY_API_STRICT: obj = self.init_backend_cls(self.array_api_strict_class) return self.get_array_api_strict_ret_serialization_from_cls(obj) @@ -459,6 +486,45 @@ def test_jax_self_consistent(self) -> None: else: self.assertEqual(rr1, rr2) + def test_pd_consistent_with_ref(self): + """Test whether PD and reference are consistent.""" + if self.skip_pd: + self.skipTest("Unsupported backend") + ref_backend = self.get_reference_backend() + if ref_backend == self.RefBackend.PD: + self.skipTest("Reference is self") + ret1, data1 = self.get_reference_ret_serialization(ref_backend) + ret1 = self.extract_ret(ret1, ref_backend) + obj = self.pd_class.deserialize(data1) + ret2 = self.eval_pd(obj) + ret2 = self.extract_ret(ret2, self.RefBackend.PD) + data2 = obj.serialize() + if obj.__class__.__name__.startswith(("Polar", "Dipole", "DOS")): + # tf, pd serialization mismatch + common_keys = set(data1.keys()) & set(data2.keys()) + data1 = {k: data1[k] for k in common_keys} + data2 = {k: data2[k] for k in common_keys} + np.testing.assert_equal(data1, data2) + for rr1, rr2 in zip(ret1, ret2): + np.testing.assert_allclose(rr1, rr2, rtol=self.rtol, atol=self.atol) + assert rr1.dtype == rr2.dtype, f"{rr1.dtype} != {rr2.dtype}" + + def test_pd_self_consistent(self): + """Test whether PD is self consistent.""" + if self.skip_pd: + self.skipTest("Unsupported backend") + obj1 = self.init_backend_cls(self.pd_class) + ret1, data1 = self.get_pd_ret_serialization_from_cls(obj1) + obj2 = self.pd_class.deserialize(data1) + ret2, data2 = self.get_pd_ret_serialization_from_cls(obj2) + np.testing.assert_equal(data1, data2) + for rr1, rr2 in zip(ret1, ret2): + if isinstance(rr1, np.ndarray) and isinstance(rr2, np.ndarray): + np.testing.assert_allclose(rr1, rr2, rtol=self.rtol, atol=self.atol) + assert rr1.dtype == rr2.dtype, f"{rr1.dtype} != {rr2.dtype}" + else: + self.assertEqual(rr1, rr2) + @unittest.skipIf(TEST_DEVICE != "cpu" and CI, "Only test on CPU.") def test_array_api_strict_consistent_with_ref(self) -> None: """Test whether array_api_strict and reference are consistent.""" diff --git a/source/tests/consistent/descriptor/common.py b/source/tests/consistent/descriptor/common.py index a469a22348..baa6e97d04 100644 --- a/source/tests/consistent/descriptor/common.py +++ b/source/tests/consistent/descriptor/common.py @@ -19,6 +19,7 @@ from ..common import ( INSTALLED_ARRAY_API_STRICT, INSTALLED_JAX, + INSTALLED_PD, INSTALLED_PT, INSTALLED_TF, ) @@ -43,6 +44,15 @@ if INSTALLED_ARRAY_API_STRICT: import array_api_strict +if INSTALLED_PD: + import paddle + + from deepmd.pd.utils.env import DEVICE as PD_DEVICE + from deepmd.pd.utils.nlist import build_neighbor_list as build_neighbor_list_pd + from deepmd.pd.utils.nlist import ( + extend_coord_with_ghosts as extend_coord_with_ghosts_pd, + ) + class DescriptorTest: """Useful utilities for descriptor tests.""" @@ -135,6 +145,28 @@ def eval_jax_descriptor( for x in jax_obj(ext_coords, ext_atype, nlist=nlist, mapping=mapping) ] + def eval_pd_descriptor( + self, pd_obj: Any, natoms, coords, atype, box, mixed_types: bool = False + ) -> Any: + ext_coords, ext_atype, mapping = extend_coord_with_ghosts_pd( + paddle.to_tensor(coords).to(PD_DEVICE).reshape([1, -1, 3]), + paddle.to_tensor(atype).to(PD_DEVICE).reshape([1, -1]), + paddle.to_tensor(box).to(PD_DEVICE).reshape([1, 3, 3]), + pd_obj.get_rcut(), + ) + nlist = build_neighbor_list_pd( + ext_coords, + ext_atype, + natoms[0], + pd_obj.get_rcut(), + pd_obj.get_sel(), + distinguish_types=(not mixed_types), + ) + return [ + x.detach().cpu().numpy() if paddle.is_tensor(x) else x + for x in pd_obj(ext_coords, ext_atype, nlist=nlist, mapping=mapping) + ] + def eval_array_api_strict_descriptor( self, array_api_strict_obj: Any, diff --git a/source/tests/consistent/descriptor/test_se_e2_a.py b/source/tests/consistent/descriptor/test_se_e2_a.py index a3ed19e8f3..a463960fb7 100644 --- a/source/tests/consistent/descriptor/test_se_e2_a.py +++ b/source/tests/consistent/descriptor/test_se_e2_a.py @@ -14,6 +14,7 @@ from ..common import ( INSTALLED_ARRAY_API_STRICT, INSTALLED_JAX, + INSTALLED_PD, INSTALLED_PT, INSTALLED_TF, CommonTest, @@ -31,6 +32,10 @@ from deepmd.tf.descriptor.se_a import DescrptSeA as DescrptSeATF else: DescrptSeATF = None +if INSTALLED_PD: + from deepmd.pd.model.descriptor.se_a import DescrptSeA as DescrptSeAPD +else: + DescrptSeAPD = None from deepmd.utils.argcheck import ( descrpt_se_a_args, ) @@ -122,6 +127,17 @@ def skip_jax(self) -> bool: ) = self.param return not type_one_side or not INSTALLED_JAX + @property + def skip_pd(self) -> bool: + ( + resnet_dt, + type_one_side, + excluded_types, + precision, + env_protection, + ) = self.param + return CommonTest.skip_pd + @property def skip_array_api_strict(self) -> bool: ( @@ -137,6 +153,7 @@ def skip_array_api_strict(self) -> bool: dp_class = DescrptSeADP pt_class = DescrptSeAPT jax_class = DescrptSeAJAX + pd_class = DescrptSeAPD array_api_strict_class = DescrptSeAArrayAPIStrict args = descrpt_se_a_args() @@ -223,6 +240,15 @@ def eval_jax(self, jax_obj: Any) -> Any: self.box, ) + def eval_pd(self, pd_obj: Any) -> Any: + return self.eval_pd_descriptor( + pd_obj, + self.natoms, + self.coords, + self.atype, + self.box, + ) + def eval_array_api_strict(self, array_api_strict_obj: Any) -> Any: return self.eval_array_api_strict_descriptor( array_api_strict_obj, diff --git a/source/tests/consistent/fitting/common.py b/source/tests/consistent/fitting/common.py index 95557d9ab8..1f6c2da565 100644 --- a/source/tests/consistent/fitting/common.py +++ b/source/tests/consistent/fitting/common.py @@ -2,6 +2,7 @@ from ..common import ( + INSTALLED_PD, INSTALLED_PT, INSTALLED_TF, ) @@ -13,6 +14,8 @@ GLOBAL_TF_FLOAT_PRECISION, tf, ) +if INSTALLED_PD: + pass class FittingTest: diff --git a/source/tests/consistent/fitting/test_ener.py b/source/tests/consistent/fitting/test_ener.py index 1ef846dbcc..12fafa7ba8 100644 --- a/source/tests/consistent/fitting/test_ener.py +++ b/source/tests/consistent/fitting/test_ener.py @@ -17,6 +17,7 @@ from ..common import ( INSTALLED_ARRAY_API_STRICT, INSTALLED_JAX, + INSTALLED_PD, INSTALLED_PT, INSTALLED_TF, CommonTest, @@ -37,6 +38,13 @@ from deepmd.tf.fit.ener import EnerFitting as EnerFittingTF else: EnerFittingTF = object +if INSTALLED_PD: + import paddle + + from deepmd.pd.model.task.ener import EnergyFittingNet as EnerFittingPD + from deepmd.pd.utils.env import DEVICE as PD_DEVICE +else: + EnerFittingPD = object from deepmd.utils.argcheck import ( fitting_ener, ) @@ -115,10 +123,25 @@ def skip_array_api_strict(self) -> bool: # TypeError: The array_api_strict namespace does not support the dtype 'bfloat16' return not INSTALLED_ARRAY_API_STRICT or precision == "bfloat16" + @property + def skip_pd(self) -> bool: + ( + resnet_dt, + precision, + mixed_types, + numb_fparam, + (numb_aparam, use_aparam_as_mask), + atom_ener, + ) = self.param + # Paddle do not support "bfloat16" in some kernels, + # so skip this in CI test + return CommonTest.skip_pd or precision == "bfloat16" + tf_class = EnerFittingTF dp_class = EnerFittingDP pt_class = EnerFittingPT jax_class = EnerFittingJAX + pd_class = EnerFittingPD array_api_strict_class = EnerFittingStrict args = fitting_ener() @@ -252,6 +275,35 @@ def eval_array_api_strict(self, array_api_strict_obj: Any) -> Any: )["energy"] ) + def eval_pd(self, pd_obj: Any) -> Any: + ( + resnet_dt, + precision, + mixed_types, + numb_fparam, + (numb_aparam, use_aparam_as_mask), + atom_ener, + ) = self.param + return ( + pd_obj( + paddle.to_tensor(self.inputs).to(device=PD_DEVICE), + paddle.to_tensor(self.atype.reshape([1, -1])).to(device=PD_DEVICE), + fparam=( + paddle.to_tensor(self.fparam).to(device=PD_DEVICE) + if numb_fparam + else None + ), + aparam=( + paddle.to_tensor(self.aparam).to(device=PD_DEVICE) + if numb_aparam + else None + ), + )["energy"] + .detach() + .cpu() + .numpy() + ) + def extract_ret(self, ret: Any, backend) -> tuple[np.ndarray, ...]: if backend == self.RefBackend.TF: # shape is not same diff --git a/source/tests/consistent/model/common.py b/source/tests/consistent/model/common.py index bb38abc5b6..7cf71000db 100644 --- a/source/tests/consistent/model/common.py +++ b/source/tests/consistent/model/common.py @@ -12,6 +12,7 @@ from ..common import ( INSTALLED_JAX, + INSTALLED_PD, INSTALLED_PT, INSTALLED_TF, ) @@ -29,6 +30,9 @@ from deepmd.jax.env import ( jnp, ) +if INSTALLED_PD: + from deepmd.pd.utils.utils import to_numpy_array as paddle_to_numpy + from deepmd.pd.utils.utils import to_paddle_tensor as numpy_to_paddle class ModelTest: @@ -114,3 +118,14 @@ def assert_jax_array(arr): do_atomic_virial=True, ).items() } + + def eval_pd_model(self, pd_obj: Any, natoms, coords, atype, box) -> Any: + return { + kk: paddle_to_numpy(vv) + for kk, vv in pd_obj( + numpy_to_paddle(coords), + numpy_to_paddle(atype), + box=numpy_to_paddle(box), + do_atomic_virial=True, + ).items() + } diff --git a/source/tests/consistent/model/test_ener.py b/source/tests/consistent/model/test_ener.py index 4c50c08bef..d56b9a257b 100644 --- a/source/tests/consistent/model/test_ener.py +++ b/source/tests/consistent/model/test_ener.py @@ -24,6 +24,7 @@ from ..common import ( INSTALLED_JAX, + INSTALLED_PD, INSTALLED_PT, INSTALLED_TF, SKIP_FLAG, @@ -45,6 +46,13 @@ from deepmd.tf.model.ener import EnerModel as EnergyModelTF else: EnergyModelTF = None +if INSTALLED_PD: + from deepmd.pd.model.model import get_model as get_model_pd + from deepmd.pd.model.model.ener_model import EnergyModel as EnergyModelPD + from deepmd.pd.utils.utils import to_numpy_array as paddle_to_numpy + from deepmd.pd.utils.utils import to_paddle_tensor as numpy_to_paddle +else: + EnergyModelPD = None from deepmd.utils.argcheck import ( model_args, ) @@ -106,7 +114,9 @@ def data(self) -> dict: tf_class = EnergyModelTF dp_class = EnergyModelDP pt_class = EnergyModelPT + pd_class = EnergyModelPD jax_class = EnergyModelJAX + pd_class = EnergyModelPD args = model_args() def get_reference_backend(self): @@ -120,6 +130,8 @@ def get_reference_backend(self): return self.RefBackend.TF if not self.skip_jax: return self.RefBackend.JAX + if not self.skip_pd: + return self.RefBackend.PD if not self.skip_dp: return self.RefBackend.DP raise ValueError("No available reference") @@ -146,6 +158,8 @@ def pass_data_to_cls(self, cls, data) -> Any: return model elif cls is EnergyModelJAX: return get_model_jax(data) + elif cls is EnergyModelPD: + return get_model_pd(data) return cls(**data, **self.additional_data) def setUp(self) -> None: @@ -224,6 +238,15 @@ def eval_jax(self, jax_obj: Any) -> Any: self.box, ) + def eval_pd(self, pd_obj: Any) -> Any: + return self.eval_pd_model( + pd_obj, + self.natoms, + self.coords, + self.atype, + self.box, + ) + def extract_ret(self, ret: Any, backend) -> tuple[np.ndarray, ...]: # shape not matched. ravel... if backend is self.RefBackend.DP: @@ -258,6 +281,14 @@ def extract_ret(self, ret: Any, backend) -> tuple[np.ndarray, ...]: ret["energy_derv_c_redu"].ravel(), ret["energy_derv_c"].ravel(), ) + elif backend is self.RefBackend.PD: + return ( + ret["energy"].flatten(), + ret["atom_energy"].flatten(), + ret["force"].flatten(), + ret["virial"].flatten(), + ret["atom_virial"].flatten(), + ) raise ValueError(f"Unknown backend: {backend}") @@ -309,6 +340,7 @@ def data(self) -> dict: dp_class = EnergyModelDP pt_class = EnergyModelPT jax_class = EnergyModelJAX + pd_class = EnergyModelPD args = model_args() def get_reference_backend(self): @@ -322,6 +354,8 @@ def get_reference_backend(self): return self.RefBackend.JAX if not self.skip_dp: return self.RefBackend.DP + if not self.skip_pd: + return self.RefBackend.PD raise ValueError("No available reference") @property @@ -342,6 +376,8 @@ def pass_data_to_cls(self, cls, data) -> Any: return get_model_pt(data) elif cls is EnergyModelJAX: return get_model_jax(data) + elif cls is EnergyModelPD: + return get_model_pd(data) return cls(**data, **self.additional_data) def setUp(self) -> None: @@ -436,6 +472,18 @@ def eval_jax(self, jax_obj: Any) -> Any: ).items() } + def eval_pd(self, pd_obj: Any) -> Any: + return { + kk: paddle_to_numpy(vv) + for kk, vv in pd_obj.forward_lower( + numpy_to_paddle(self.extended_coord), + numpy_to_paddle(self.extended_atype), + numpy_to_paddle(self.nlist), + numpy_to_paddle(self.mapping), + do_atomic_virial=True, + ).items() + } + def extract_ret(self, ret: Any, backend) -> tuple[np.ndarray, ...]: # shape not matched. ravel... if backend is self.RefBackend.DP: @@ -462,4 +510,12 @@ def extract_ret(self, ret: Any, backend) -> tuple[np.ndarray, ...]: ret["energy_derv_c_redu"].ravel(), ret["energy_derv_c"].ravel(), ) + elif backend is self.RefBackend.PD: + return ( + ret["energy"].flatten(), + ret["atom_energy"].flatten(), + ret["extended_force"].flatten(), + ret["virial"].flatten(), + ret["extended_virial"].flatten(), + ) raise ValueError(f"Unknown backend: {backend}") diff --git a/source/tests/consistent/test_activation.py b/source/tests/consistent/test_activation.py index 2368b6c473..31351d4a9d 100644 --- a/source/tests/consistent/test_activation.py +++ b/source/tests/consistent/test_activation.py @@ -17,6 +17,7 @@ ) from .common import ( INSTALLED_JAX, + INSTALLED_PD, INSTALLED_PT, INSTALLED_TF, parameterized, @@ -37,6 +38,12 @@ from deepmd.jax.env import ( jnp, ) +if INSTALLED_PD: + from deepmd.pd.utils.utils import ActivationFn as ActivationFn_pd + from deepmd.pd.utils.utils import to_numpy_array as paddle_to_numpy + from deepmd.pd.utils.utils import ( + to_paddle_tensor, + ) @parameterized( @@ -83,3 +90,11 @@ def test_jax_consistent_with_ref(self) -> None: test = get_activation_fn_dp(self.activation)(input) self.assertTrue(isinstance(test, jnp.ndarray)) np.testing.assert_allclose(self.ref, np.from_dlpack(test), atol=1e-10) + + @unittest.skipUnless(INSTALLED_PD, "Paddle is not installed") + def test_pd_consistent_with_ref(self): + if INSTALLED_PD: + test = paddle_to_numpy( + ActivationFn_pd(self.activation)(to_paddle_tensor(self.random_input)) + ) + np.testing.assert_allclose(self.ref, test, atol=1e-10) diff --git a/source/tests/consistent/test_neighbor_stat.py b/source/tests/consistent/test_neighbor_stat.py index 573e367267..9c9f97045b 100644 --- a/source/tests/consistent/test_neighbor_stat.py +++ b/source/tests/consistent/test_neighbor_stat.py @@ -14,6 +14,7 @@ ) from .common import ( INSTALLED_JAX, + INSTALLED_PD, INSTALLED_PT, INSTALLED_TF, ) @@ -87,3 +88,7 @@ def test_neighbor_stat_dp(self) -> None: @unittest.skipUnless(INSTALLED_JAX, "jax is not installed") def test_neighbor_stat_jax(self) -> None: self.run_neighbor_stat("jax") + + @unittest.skipUnless(INSTALLED_PD, "paddle is not installed") + def test_neighbor_stat_pd(self): + self.run_neighbor_stat("paddle") diff --git a/source/tests/consistent/test_type_embedding.py b/source/tests/consistent/test_type_embedding.py index 1c56abea0c..9c1de0e8c5 100644 --- a/source/tests/consistent/test_type_embedding.py +++ b/source/tests/consistent/test_type_embedding.py @@ -17,6 +17,7 @@ from .common import ( INSTALLED_ARRAY_API_STRICT, INSTALLED_JAX, + INSTALLED_PD, INSTALLED_PT, INSTALLED_TF, CommonTest, @@ -45,6 +46,13 @@ from ..array_api_strict.utils.type_embed import TypeEmbedNet as TypeEmbedNetStrict else: TypeEmbedNetStrict = None +if INSTALLED_PD: + import paddle + + from deepmd.pd.model.network.network import TypeEmbedNetConsistent as TypeEmbedNetPD + from deepmd.pd.utils.env import DEVICE as PD_DEVICE +else: + TypeEmbedNetPD = object @parameterized( @@ -79,6 +87,7 @@ def data(self) -> dict: dp_class = TypeEmbedNetDP pt_class = TypeEmbedNetPT jax_class = TypeEmbedNetJAX + pd_class = TypeEmbedNetPD array_api_strict_class = TypeEmbedNetStrict args = type_embedding_args() skip_jax = not INSTALLED_JAX @@ -130,6 +139,12 @@ def eval_jax(self, jax_obj: Any) -> Any: raise ValueError("Output is numpy array") return [np.array(x) if isinstance(x, jnp.ndarray) else x for x in (out,)] + def eval_pd(self, pd_obj: Any) -> Any: + return [ + x.detach().cpu().numpy() if paddle.is_tensor(x) else x + for x in (pd_obj(device=PD_DEVICE),) + ] + def eval_array_api_strict(self, array_api_strict_obj: Any) -> Any: out = array_api_strict_obj() return [ diff --git a/source/tests/pd/__init__.py b/source/tests/pd/__init__.py new file mode 100644 index 0000000000..8d1616afb2 --- /dev/null +++ b/source/tests/pd/__init__.py @@ -0,0 +1,6 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +from deepmd.pd.utils import ( + env, +) + +env.enable_prim(True) diff --git a/source/tests/pd/common.py b/source/tests/pd/common.py new file mode 100644 index 0000000000..59a9672330 --- /dev/null +++ b/source/tests/pd/common.py @@ -0,0 +1,263 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +from typing import ( + Optional, + Union, +) + +import numpy as np +import paddle + +from deepmd.main import ( + main, +) +from deepmd.pd.utils.env import ( + DEVICE, + GLOBAL_PD_FLOAT_PRECISION, +) + + +def run_dp(cmd: str) -> int: + """Run DP directly from the entry point instead of the subprocess. + + It is quite slow to start DeePMD-kit with subprocess. + + Parameters + ---------- + cmd : str + The command to run. + + Returns + ------- + int + Always returns 0. + """ + cmds = cmd.split() + if cmds[0] == "dp": + cmds = cmds[1:] + else: + raise RuntimeError("The command is not dp") + + main(cmds) + return 0 + + +def eval_model( + model, + coords: Union[np.ndarray, paddle.Tensor], + cells: Optional[Union[np.ndarray, paddle.Tensor]], + atom_types: Union[np.ndarray, paddle.Tensor, list[int]], + spins: Optional[Union[np.ndarray, paddle.Tensor]] = None, + atomic: bool = False, + infer_batch_size: int = 2, + denoise: bool = False, +): + model = model.to(DEVICE) + energy_out = [] + atomic_energy_out = [] + force_out = [] + force_mag_out = [] + virial_out = [] + atomic_virial_out = [] + updated_coord_out = [] + logits_out = [] + err_msg = ( + f"All inputs should be the same format, " + f"but found {type(coords)}, {type(cells)}, {type(atom_types)} instead! " + ) + return_tensor = True + if isinstance(coords, paddle.Tensor): + if cells is not None: + assert isinstance(cells, paddle.Tensor), err_msg + if spins is not None: + assert isinstance(spins, paddle.Tensor), err_msg + assert isinstance(atom_types, paddle.Tensor) or isinstance(atom_types, list) + atom_types = paddle.to_tensor(atom_types, dtype=paddle.int32, place=DEVICE) + elif isinstance(coords, np.ndarray): + if cells is not None: + assert isinstance(cells, np.ndarray), err_msg + if spins is not None: + assert isinstance(spins, np.ndarray), err_msg + assert isinstance(atom_types, np.ndarray) or isinstance(atom_types, list) + atom_types = np.array(atom_types, dtype=np.int32) + return_tensor = False + + nframes = coords.shape[0] + if len(atom_types.shape) == 1: + natoms = len(atom_types) + if isinstance(atom_types, paddle.Tensor): + atom_types = paddle.tile(atom_types.unsqueeze(0), [nframes, 1]).reshape( + [nframes, -1] + ) + else: + atom_types = np.tile(atom_types, nframes).reshape(nframes, -1) + else: + natoms = len(atom_types[0]) + + coord_input = paddle.to_tensor( + coords.reshape([-1, natoms, 3]), dtype=GLOBAL_PD_FLOAT_PRECISION, place=DEVICE + ) + spin_input = None + if spins is not None: + spin_input = paddle.to_tensor( + spins.reshape([-1, natoms, 3]), + dtype=GLOBAL_PD_FLOAT_PRECISION, + place=DEVICE, + ) + has_spin = getattr(model, "has_spin", False) + if callable(has_spin): + has_spin = has_spin() + type_input = paddle.to_tensor(atom_types, dtype=paddle.int64, place=DEVICE) + box_input = None + if cells is None: + pbc = False + else: + pbc = True + box_input = paddle.to_tensor( + cells.reshape([-1, 3, 3]), dtype=GLOBAL_PD_FLOAT_PRECISION, place=DEVICE + ) + num_iter = int((nframes + infer_batch_size - 1) / infer_batch_size) + + for ii in range(num_iter): + batch_coord = coord_input[ii * infer_batch_size : (ii + 1) * infer_batch_size] + batch_atype = type_input[ii * infer_batch_size : (ii + 1) * infer_batch_size] + batch_box = None + batch_spin = None + if spin_input is not None: + batch_spin = spin_input[ii * infer_batch_size : (ii + 1) * infer_batch_size] + if pbc: + batch_box = box_input[ii * infer_batch_size : (ii + 1) * infer_batch_size] + input_dict = { + "coord": batch_coord, + "atype": batch_atype, + "box": batch_box, + "do_atomic_virial": atomic, + } + if has_spin: + input_dict["spin"] = batch_spin + batch_output = model(**input_dict) + if isinstance(batch_output, tuple): + batch_output = batch_output[0] + if not return_tensor: + if "energy" in batch_output: + energy_out.append(batch_output["energy"].numpy()) + if "atom_energy" in batch_output: + atomic_energy_out.append(batch_output["atom_energy"].numpy()) + if "force" in batch_output: + force_out.append(batch_output["force"].numpy()) + if "force_mag" in batch_output: + force_mag_out.append(batch_output["force_mag"].numpy()) + if "virial" in batch_output: + virial_out.append(batch_output["virial"].numpy()) + if "atom_virial" in batch_output: + atomic_virial_out.append(batch_output["atom_virial"].numpy()) + if "updated_coord" in batch_output: + updated_coord_out.append(batch_output["updated_coord"].numpy()) + if "logits" in batch_output: + logits_out.append(batch_output["logits"].numpy()) + else: + if "energy" in batch_output: + energy_out.append(batch_output["energy"]) + if "atom_energy" in batch_output: + atomic_energy_out.append(batch_output["atom_energy"]) + if "force" in batch_output: + force_out.append(batch_output["force"]) + if "force_mag" in batch_output: + force_mag_out.append(batch_output["force_mag"]) + if "virial" in batch_output: + virial_out.append(batch_output["virial"]) + if "atom_virial" in batch_output: + atomic_virial_out.append(batch_output["atom_virial"]) + if "updated_coord" in batch_output: + updated_coord_out.append(batch_output["updated_coord"]) + if "logits" in batch_output: + logits_out.append(batch_output["logits"]) + if not return_tensor: + energy_out = ( + np.concatenate(energy_out) if energy_out else np.zeros([nframes, 1]) # pylint: disable=no-explicit-dtype + ) + atomic_energy_out = ( + np.concatenate(atomic_energy_out) + if atomic_energy_out + else np.zeros([nframes, natoms, 1]) # pylint: disable=no-explicit-dtype + ) + force_out = ( + np.concatenate(force_out) if force_out else np.zeros([nframes, natoms, 3]) # pylint: disable=no-explicit-dtype + ) + force_mag_out = ( + np.concatenate(force_mag_out) + if force_mag_out + else np.zeros([nframes, natoms, 3]) # pylint: disable=no-explicit-dtype + ) + virial_out = ( + np.concatenate(virial_out) if virial_out else np.zeros([nframes, 3, 3]) # pylint: disable=no-explicit-dtype + ) + atomic_virial_out = ( + np.concatenate(atomic_virial_out) + if atomic_virial_out + else np.zeros([nframes, natoms, 3, 3]) # pylint: disable=no-explicit-dtype + ) + updated_coord_out = ( + np.concatenate(updated_coord_out) if updated_coord_out else None + ) + logits_out = np.concatenate(logits_out) if logits_out else None + else: + energy_out = ( + paddle.concat(energy_out) + if energy_out + else paddle.zeros([nframes, 1], dtype=GLOBAL_PD_FLOAT_PRECISION).to( + device=DEVICE + ) + ) + atomic_energy_out = ( + paddle.concat(atomic_energy_out) + if atomic_energy_out + else paddle.zeros([nframes, natoms, 1], dtype=GLOBAL_PD_FLOAT_PRECISION).to( + device=DEVICE + ) + ) + force_out = ( + paddle.concat(force_out) + if force_out + else paddle.zeros([nframes, natoms, 3], dtype=GLOBAL_PD_FLOAT_PRECISION).to( + device=DEVICE + ) + ) + force_mag_out = ( + paddle.concat(force_mag_out) + if force_mag_out + else paddle.zeros([nframes, natoms, 3], dtype=GLOBAL_PD_FLOAT_PRECISION).to( + device=DEVICE + ) + ) + virial_out = ( + paddle.concat(virial_out) + if virial_out + else paddle.zeros([nframes, 3, 3], dtype=GLOBAL_PD_FLOAT_PRECISION).to( + device=DEVICE + ) + ) + atomic_virial_out = ( + paddle.concat(atomic_virial_out) + if atomic_virial_out + else paddle.zeros( + [nframes, natoms, 3, 3], dtype=GLOBAL_PD_FLOAT_PRECISION + ).to(device=DEVICE) + ) + updated_coord_out = ( + paddle.concat(updated_coord_out) if updated_coord_out else None + ) + logits_out = paddle.concat(logits_out) if logits_out else None + if denoise: + return updated_coord_out, logits_out + else: + results_dict = { + "energy": energy_out, + "force": force_out, + "virial": virial_out, + } + if has_spin: + results_dict["force_mag"] = force_mag_out + if atomic: + results_dict["atom_energy"] = atomic_energy_out + results_dict["atom_virial"] = atomic_virial_out + return results_dict diff --git a/source/tests/pd/conftest.py b/source/tests/pd/conftest.py new file mode 100644 index 0000000000..530cb18907 --- /dev/null +++ b/source/tests/pd/conftest.py @@ -0,0 +1,9 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import paddle +import pytest + + +@pytest.fixture(scope="package", autouse=True) +def clear_cuda_memory(request): + yield + paddle.device.cuda.empty_cache() diff --git a/source/tests/pd/model/__init__.py b/source/tests/pd/model/__init__.py new file mode 100644 index 0000000000..6ceb116d85 --- /dev/null +++ b/source/tests/pd/model/__init__.py @@ -0,0 +1 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later diff --git a/source/tests/pd/model/test_autodiff.py b/source/tests/pd/model/test_autodiff.py new file mode 100644 index 0000000000..a056491fb3 --- /dev/null +++ b/source/tests/pd/model/test_autodiff.py @@ -0,0 +1,263 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import copy +import unittest + +import numpy as np +import paddle + +from deepmd.pd.model.model import ( + get_model, +) +from deepmd.pd.utils import ( + env, +) +from deepmd.pd.utils.utils import ( + to_numpy_array, +) + +from ...seed import ( + GLOBAL_SEED, +) + +dtype = paddle.float64 + +from ..common import ( + eval_model, +) +from .test_permutation import ( + model_dpa1, + model_dpa2, + model_hybrid, + model_se_e2_a, + model_spin, + model_zbl, +) + + +# from deepmd-kit repo +def finite_difference(f, x, delta=1e-6): + in_shape = x.shape + y0 = f(x) + out_shape = y0.shape + res = np.empty(out_shape + in_shape) + for idx in np.ndindex(*in_shape): + diff = np.zeros(in_shape) + diff[idx] += delta + y1p = f(x + diff) + y1n = f(x - diff) + res[(Ellipsis, *idx)] = (y1p - y1n) / (2 * delta) + return res + + +def stretch_box(old_coord, old_box, new_box): + ocoord = old_coord.reshape(-1, 3) + obox = old_box.reshape(3, 3) + nbox = new_box.reshape(3, 3) + ncoord = ocoord @ np.linalg.inv(obox) @ nbox + return ncoord.reshape(old_coord.shape) + + +class ForceTest: + def test( + self, + ): + env.enable_prim(True) + places = 5 + delta = 1e-5 + natoms = 5 + generator = paddle.seed(GLOBAL_SEED) + cell = paddle.rand([3, 3], dtype=dtype).to(device="cpu") + cell = (cell + cell.T) + 5.0 * paddle.eye(3).to(device="cpu") + coord = paddle.rand([natoms, 3], dtype=dtype).to(device="cpu") + coord = paddle.matmul(coord, cell) + spin = paddle.rand([natoms, 3], dtype=dtype).to(device="cpu") + atype = paddle.to_tensor([0, 0, 0, 1, 1]) + # assumes input to be numpy tensor + coord = coord.numpy() + spin = spin.numpy() + test_spin = getattr(self, "test_spin", False) + if not test_spin: + test_keys = ["energy", "force", "virial"] + else: + test_keys = ["energy", "force", "force_mag", "virial"] + + def np_infer_coord( + coord, + ): + result = eval_model( + self.model, + paddle.to_tensor(coord).to(device=env.DEVICE).unsqueeze(0), + cell.unsqueeze(0), + atype, + spins=paddle.to_tensor(spin).to(device=env.DEVICE).unsqueeze(0), + ) + # detach + ret = {key: to_numpy_array(result[key].squeeze(0)) for key in test_keys} + return ret + + def np_infer_spin( + spin, + ): + result = eval_model( + self.model, + paddle.to_tensor(coord).to(device=env.DEVICE).unsqueeze(0), + cell.unsqueeze(0), + atype, + spins=paddle.to_tensor(spin).to(device=env.DEVICE).unsqueeze(0), + ) + # detach + ret = {key: to_numpy_array(result[key].squeeze(0)) for key in test_keys} + return ret + + def ff_coord(_coord): + return np_infer_coord(_coord)["energy"] + + def ff_spin(_spin): + return np_infer_spin(_spin)["energy"] + + if not test_spin: + fdf = -finite_difference(ff_coord, coord, delta=delta).squeeze() + rff = np_infer_coord(coord)["force"] + np.testing.assert_almost_equal(fdf, rff, decimal=places) + else: + # real force + fdf = -finite_difference(ff_coord, coord, delta=delta).squeeze() + rff = np_infer_coord(coord)["force"] + np.testing.assert_almost_equal(fdf, rff, decimal=places) + # magnetic force + fdf = -finite_difference(ff_spin, spin, delta=delta).squeeze() + rff = np_infer_spin(spin)["force_mag"] + np.testing.assert_almost_equal(fdf, rff, decimal=places) + + +class VirialTest: + def test( + self, + ): + places = 5 + delta = 1e-4 + natoms = 5 + generator = paddle.seed(GLOBAL_SEED) + cell = paddle.rand([3, 3], dtype=dtype).to(device="cpu") + cell = (cell) + 5.0 * paddle.eye(3).to(device="cpu") + coord = paddle.rand([natoms, 3], dtype=dtype).to(device="cpu") + coord = paddle.matmul(coord, cell) + atype = paddle.to_tensor([0, 0, 0, 1, 1]) + # assumes input to be numpy tensor + coord = coord.numpy() + cell = cell.numpy() + test_keys = ["energy", "force", "virial"] + + def np_infer( + new_cell, + ): + result = eval_model( + self.model, + paddle.to_tensor(stretch_box(coord, cell, new_cell)) + .to(device="cpu") + .unsqueeze(0), + paddle.to_tensor(new_cell).to(device="cpu").unsqueeze(0), + atype, + ) + # detach + ret = {key: to_numpy_array(result[key].squeeze(0)) for key in test_keys} + # detach + return ret + + def ff(bb): + return np_infer(bb)["energy"] + + fdv = ( + -(finite_difference(ff, cell, delta=delta).transpose([0, 2, 1]) @ cell) + .squeeze() + .reshape([9]) + ) + rfv = np_infer(cell)["virial"] + np.testing.assert_almost_equal(fdv, rfv, decimal=places) + + +class TestEnergyModelSeAForce(unittest.TestCase, ForceTest): + def setUp(self): + model_params = copy.deepcopy(model_se_e2_a) + self.type_split = False + self.model = get_model(model_params).to(env.DEVICE) + + +class TestEnergyModelSeAVirial(unittest.TestCase, VirialTest): + def setUp(self): + model_params = copy.deepcopy(model_se_e2_a) + self.type_split = False + self.model = get_model(model_params).to(env.DEVICE) + + +@unittest.skip("Skip for not implemented yet") +class TestEnergyModelDPA1Force(unittest.TestCase, ForceTest): + def setUp(self): + model_params = copy.deepcopy(model_dpa1) + self.type_split = True + self.model = get_model(model_params).to(env.DEVICE) + + +@unittest.skip("Skip for not implemented yet") +class TestEnergyModelDPA1Virial(unittest.TestCase, VirialTest): + def setUp(self): + model_params = copy.deepcopy(model_dpa1) + self.type_split = True + self.model = get_model(model_params).to(env.DEVICE) + + +@unittest.skip("Skip for not implemented yet") +class TestEnergyModelDPA2Force(unittest.TestCase, ForceTest): + def setUp(self): + model_params = copy.deepcopy(model_dpa2) + self.type_split = True + self.model = get_model(model_params).to(env.DEVICE) + + +@unittest.skip("Skip for not implemented yet") +class TestEnergyModelDPAUniVirial(unittest.TestCase, VirialTest): + def setUp(self): + model_params = copy.deepcopy(model_dpa2) + self.type_split = True + self.model = get_model(model_params).to(env.DEVICE) + + +@unittest.skip("Skip for not implemented yet") +class TestEnergyModelHybridForce(unittest.TestCase, ForceTest): + def setUp(self): + model_params = copy.deepcopy(model_hybrid) + self.type_split = True + self.model = get_model(model_params).to(env.DEVICE) + + +@unittest.skip("Skip for not implemented yet") +class TestEnergyModelHybridVirial(unittest.TestCase, VirialTest): + def setUp(self): + model_params = copy.deepcopy(model_hybrid) + self.type_split = True + self.model = get_model(model_params).to(env.DEVICE) + + +@unittest.skip("Skip for not implemented yet") +class TestEnergyModelZBLForce(unittest.TestCase, ForceTest): + def setUp(self): + model_params = copy.deepcopy(model_zbl) + self.type_split = False + self.model = get_model(model_params).to(env.DEVICE) + + +@unittest.skip("Skip for not implemented yet") +class TestEnergyModelZBLVirial(unittest.TestCase, VirialTest): + def setUp(self): + model_params = copy.deepcopy(model_zbl) + self.type_split = False + self.model = get_model(model_params).to(env.DEVICE) + + +@unittest.skip("Skip for not implemented yet") +class TestEnergyModelSpinSeAForce(unittest.TestCase, ForceTest): + def setUp(self): + model_params = copy.deepcopy(model_spin) + self.type_split = False + self.test_spin = True + self.model = get_model(model_params).to(env.DEVICE) diff --git a/source/tests/pd/model/test_descriptor.py b/source/tests/pd/model/test_descriptor.py new file mode 100644 index 0000000000..10f2fd271b --- /dev/null +++ b/source/tests/pd/model/test_descriptor.py @@ -0,0 +1,195 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import os +import unittest + +import numpy as np +import paddle +import tensorflow.compat.v1 as tf + +tf.disable_eager_execution() + +import json +from pathlib import ( + Path, +) + +from deepmd.pd.model.descriptor import ( + prod_env_mat, +) +from deepmd.pd.utils import ( + decomp, + dp_random, + env, +) +from deepmd.pd.utils.dataset import ( + DeepmdDataSetForLoader, +) +from deepmd.pd.utils.env import ( + DEVICE, + GLOBAL_NP_FLOAT_PRECISION, + GLOBAL_PD_FLOAT_PRECISION, +) +from deepmd.pd.utils.nlist import ( + extend_input_and_build_neighbor_list, +) +from deepmd.tf.common import ( + expand_sys_str, +) +from deepmd.tf.env import ( + op_module, +) + +from ..test_finetune import ( + energy_data_requirement, +) +from .test_embedding_net import ( + get_single_batch, +) + +CUR_DIR = os.path.dirname(__file__) + + +def base_se_a(rcut, rcut_smth, sel, batch, mean, stddev): + g = tf.Graph() + with g.as_default(): + coord = tf.placeholder(GLOBAL_NP_FLOAT_PRECISION, [None, None]) + box = tf.placeholder(GLOBAL_NP_FLOAT_PRECISION, [None, None]) + atype = tf.placeholder(tf.int32, [None, None]) + natoms_vec = tf.placeholder(tf.int32, [None]) + default_mesh = tf.placeholder(tf.int32, [None]) + stat_descrpt, descrpt_deriv, rij, nlist = op_module.prod_env_mat_a( + coord, + atype, + natoms_vec, + box, + default_mesh, + tf.constant(mean), + tf.constant(stddev), + rcut_a=-1.0, + rcut_r=rcut, + rcut_r_smth=rcut_smth, + sel_a=sel, + sel_r=[0 for i in sel], + ) + + net_deriv_reshape = tf.ones_like(stat_descrpt) + force = op_module.prod_force_se_a( + net_deriv_reshape, + descrpt_deriv, + nlist, + natoms_vec, + n_a_sel=sum(sel), + n_r_sel=0, + ) + + with tf.Session(graph=g) as sess: + y = sess.run( + [stat_descrpt, force, nlist], + feed_dict={ + coord: batch["coord"], + box: batch["box"], + natoms_vec: batch["natoms"], + atype: batch["atype"], + default_mesh: np.array([0, 0, 0, 2, 2, 2]), + }, + ) + tf.reset_default_graph() + return y + + +class TestSeA(unittest.TestCase): + def setUp(self): + dp_random.seed(20) + with open(str(Path(__file__).parent / "water/se_e2_a.json")) as fin: + content = fin.read() + config = json.loads(content) + data_file = [str(Path(__file__).parent / "water/data/data_0")] + config["training"]["training_data"]["systems"] = data_file + config["training"]["validation_data"]["systems"] = data_file + model_config = config["model"] + self.rcut = model_config["descriptor"]["rcut"] + self.rcut_smth = model_config["descriptor"]["rcut_smth"] + self.sel = model_config["descriptor"]["sel"] + self.bsz = config["training"]["training_data"]["batch_size"] + self.systems = config["training"]["validation_data"]["systems"] + if isinstance(self.systems, str): + self.systems = expand_sys_str(self.systems) + ds = DeepmdDataSetForLoader( + self.systems[0], + model_config["type_map"], + ) + ds.add_data_requirement(energy_data_requirement) + self.np_batch, self.pd_batch = get_single_batch(ds) + self.sec = np.cumsum(self.sel) + self.ntypes = len(self.sel) + self.nnei = sum(self.sel) + + def test_consistency(self): + avg_zero = paddle.zeros( + [self.ntypes, self.nnei * 4], + dtype=GLOBAL_PD_FLOAT_PRECISION, + ).to(device=env.DEVICE) + std_ones = paddle.ones( + [self.ntypes, self.nnei * 4], + dtype=GLOBAL_PD_FLOAT_PRECISION, + ).to(device=env.DEVICE) + base_d, base_force, base_nlist = base_se_a( + rcut=self.rcut, + rcut_smth=self.rcut_smth, + sel=self.sel, + batch=self.np_batch, + mean=avg_zero.detach().cpu(), + stddev=std_ones.detach().cpu(), + ) + + pd_coord = self.pd_batch["coord"].to(env.DEVICE) + atype = self.pd_batch["atype"].to(env.DEVICE) + pd_coord.stop_gradient = False + ( + extended_coord, + extended_atype, + mapping, + nlist, + ) = extend_input_and_build_neighbor_list( + pd_coord, + self.pd_batch["atype"].to(env.DEVICE), + self.rcut, + self.sel, + mixed_types=False, + box=self.pd_batch["box"].to(env.DEVICE), + ) + my_d, _, _ = prod_env_mat( + extended_coord, + nlist, + atype, + avg_zero.reshape([-1, self.nnei, 4]).to(DEVICE), + std_ones.reshape([-1, self.nnei, 4]).to(DEVICE), + self.rcut, + self.rcut_smth, + ) + my_d.sum().backward() + bsz = pd_coord.shape[0] + my_force = pd_coord.grad.reshape([bsz, -1, 3]).cpu().detach().numpy() + base_force = base_force.reshape(bsz, -1, 3) + base_d = base_d.reshape(bsz, -1, self.nnei, 4) + my_d = my_d.reshape([bsz, -1, self.nnei, 4]).cpu().detach().numpy() + base_nlist = base_nlist.reshape(bsz, -1, self.nnei) + + mapping = mapping.cpu() + my_nlist = nlist.reshape([bsz, -1]).cpu() + mask = my_nlist == -1 + my_nlist = my_nlist * (~mask).astype(my_nlist.dtype) + my_nlist = decomp.take_along_axis(mapping, axis=-1, indices=my_nlist) + my_nlist = my_nlist * (~mask).astype(my_nlist.dtype) - mask.astype( + my_nlist.dtype + ) + my_nlist = my_nlist.cpu().reshape([bsz, -1, self.nnei]).numpy() + self.assertTrue(np.allclose(base_nlist, my_nlist)) + self.assertTrue(np.allclose(np.mean(base_d, axis=2), np.mean(my_d, axis=2))) + self.assertTrue(np.allclose(np.std(base_d, axis=2), np.std(my_d, axis=2))) + # descriptors may be different when there are multiple neighbors in the same distance + self.assertTrue(np.allclose(base_force, -my_force)) + + +if __name__ == "__main__": + unittest.main() diff --git a/source/tests/pd/model/test_dp_atomic_model.py b/source/tests/pd/model/test_dp_atomic_model.py new file mode 100644 index 0000000000..785bfa1076 --- /dev/null +++ b/source/tests/pd/model/test_dp_atomic_model.py @@ -0,0 +1,235 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import itertools +import unittest + +import numpy as np +import paddle + +from deepmd.dpmodel.atomic_model import DPAtomicModel as DPDPAtomicModel +from deepmd.dpmodel.descriptor import DescrptSeA as DPDescrptSeA +from deepmd.dpmodel.fitting import InvarFitting as DPInvarFitting +from deepmd.pd.model.atomic_model import ( + DPAtomicModel, +) +from deepmd.pd.model.descriptor.se_a import ( + DescrptSeA, +) +from deepmd.pd.model.task.ener import ( + InvarFitting, +) +from deepmd.pd.utils import ( + env, +) +from deepmd.pd.utils.utils import ( + to_numpy_array, + to_paddle_tensor, +) + +from .test_env_mat import ( + TestCaseSingleFrameWithNlist, + TestCaseSingleFrameWithNlistWithVirtual, +) + +dtype = env.GLOBAL_PD_FLOAT_PRECISION + + +class TestDPAtomicModel(unittest.TestCase, TestCaseSingleFrameWithNlist): + def setUp(self): + TestCaseSingleFrameWithNlist.setUp(self) + + def test_self_consistency(self): + nf, nloc, nnei = self.nlist.shape + ds = DescrptSeA( + self.rcut, + self.rcut_smth, + self.sel, + ).to(env.DEVICE) + ft = InvarFitting( + "energy", + self.nt, + ds.get_dim_out(), + 1, + mixed_types=ds.mixed_types(), + ).to(env.DEVICE) + type_map = ["foo", "bar"] + + # test the case of exclusion + for atom_excl, pair_excl in itertools.product([[], [1]], [[], [[0, 1]]]): + md0 = DPAtomicModel( + ds, + ft, + type_map=type_map, + ).to(env.DEVICE) + md0.reinit_atom_exclude(atom_excl) + md0.reinit_pair_exclude(pair_excl) + md1 = DPAtomicModel.deserialize(md0.serialize()).to(env.DEVICE) + args = [ + to_paddle_tensor(ii) + for ii in [self.coord_ext, self.atype_ext, self.nlist] + ] + ret0 = md0.forward_common_atomic(*args) + ret1 = md1.forward_common_atomic(*args) + np.testing.assert_allclose( + to_numpy_array(ret0["energy"]), + to_numpy_array(ret1["energy"]), + ) + + def test_dp_consistency(self): + nf, nloc, nnei = self.nlist.shape + ds = DPDescrptSeA( + self.rcut, + self.rcut_smth, + self.sel, + ) + ft = DPInvarFitting( + "energy", + self.nt, + ds.get_dim_out(), + 1, + mixed_types=ds.mixed_types(), + ) + type_map = ["foo", "bar"] + md0 = DPDPAtomicModel(ds, ft, type_map=type_map) + md1 = DPAtomicModel.deserialize(md0.serialize()).to(env.DEVICE) + args0 = [self.coord_ext, self.atype_ext, self.nlist] + args1 = [ + to_paddle_tensor(ii) for ii in [self.coord_ext, self.atype_ext, self.nlist] + ] + ret0 = md0.forward_common_atomic(*args0) + ret1 = md1.forward_common_atomic(*args1) + np.testing.assert_allclose( + ret0["energy"], + to_numpy_array(ret1["energy"]), + ) + + def test_jit(self): + nf, nloc, nnei = self.nlist.shape + ds = DescrptSeA( + self.rcut, + self.rcut_smth, + self.sel, + ).to(env.DEVICE) + ft = InvarFitting( + "energy", + self.nt, + ds.get_dim_out(), + 1, + mixed_types=ds.mixed_types(), + ).to(env.DEVICE) + type_map = ["foo", "bar"] + md0 = DPAtomicModel(ds, ft, type_map=type_map).to(env.DEVICE) + md0 = paddle.jit.to_static(md0) + self.assertEqual(md0.get_rcut(), self.rcut) + self.assertEqual(md0.get_type_map(), type_map) + + def test_excl_consistency(self): + type_map = ["foo", "bar"] + + # test the case of exclusion + for atom_excl, pair_excl in itertools.product([[], [1]], [[], [[0, 1]]]): + ds = DescrptSeA( + self.rcut, + self.rcut_smth, + self.sel, + ).to(env.DEVICE) + ft = InvarFitting( + "energy", + self.nt, + ds.get_dim_out(), + 1, + mixed_types=ds.mixed_types(), + ).to(env.DEVICE) + md0 = DPAtomicModel( + ds, + ft, + type_map=type_map, + ).to(env.DEVICE) + md1 = DPAtomicModel.deserialize(md0.serialize()).to(env.DEVICE) + + md0.reinit_atom_exclude(atom_excl) + md0.reinit_pair_exclude(pair_excl) + # hacking! + md1.descriptor.reinit_exclude(pair_excl) + md1.fitting_net.reinit_exclude(atom_excl) + + # check energy consistency + args = [ + to_paddle_tensor(ii) + for ii in [self.coord_ext, self.atype_ext, self.nlist] + ] + ret0 = md0.forward_common_atomic(*args) + ret1 = md1.forward_common_atomic(*args) + np.testing.assert_allclose( + to_numpy_array(ret0["energy"]), + to_numpy_array(ret1["energy"]), + ) + + # check output def + out_names = [vv.name for vv in md0.atomic_output_def().get_data().values()] + self.assertEqual(out_names, ["energy", "mask"]) + if atom_excl != []: + for ii in md0.atomic_output_def().get_data().values(): + if ii.name == "mask": + self.assertEqual(ii.shape, [1]) + self.assertFalse(ii.reducible) + self.assertFalse(ii.r_differentiable) + self.assertFalse(ii.c_differentiable) + + # check mask + if atom_excl == []: + pass + elif atom_excl == [1]: + self.assertIn("mask", ret0.keys()) + expected = np.array([1, 1, 0], dtype="int64") + expected = np.concatenate( + [expected, expected[self.perm[: self.nloc]]] + ).reshape(2, 3) + np.testing.assert_array_equal(to_numpy_array(ret0["mask"]), expected) + else: + raise ValueError(f"not expected atom_excl {atom_excl}") + + +class TestDPAtomicModelVirtualConsistency(unittest.TestCase): + def setUp(self): + self.case0 = TestCaseSingleFrameWithNlist() + self.case1 = TestCaseSingleFrameWithNlistWithVirtual() + self.case0.setUp() + self.case1.setUp() + + def test_virtual_consistency(self): + nf, _, _ = self.case0.nlist.shape + ds = DescrptSeA( + self.case0.rcut, + self.case0.rcut_smth, + self.case0.sel, + ) + ft = InvarFitting( + "energy", + self.case0.nt, + ds.get_dim_out(), + 1, + mixed_types=ds.mixed_types(), + ) + type_map = ["foo", "bar"] + md1 = DPAtomicModel(ds, ft, type_map=type_map).to(env.DEVICE) + + args0 = [self.case0.coord_ext, self.case0.atype_ext, self.case0.nlist] + args0 = [to_paddle_tensor(ii) for ii in args0] + args1 = [self.case1.coord_ext, self.case1.atype_ext, self.case1.nlist] + args1 = [to_paddle_tensor(ii) for ii in args1] + + ret0 = md1.forward_common_atomic(*args0) + ret1 = md1.forward_common_atomic(*args1) + + for dd in range(self.case0.nf): + np.testing.assert_allclose( + to_numpy_array(ret0["energy"])[dd], + to_numpy_array(ret1["energy"])[dd, self.case1.get_real_mapping[dd], :], + ) + expected_mask = np.array( + [ + [1, 0, 1, 1], + [1, 1, 0, 1], + ] + ) + np.testing.assert_equal(to_numpy_array(ret1["mask"]), expected_mask) diff --git a/source/tests/pd/model/test_dp_model.py b/source/tests/pd/model/test_dp_model.py new file mode 100644 index 0000000000..a281851f14 --- /dev/null +++ b/source/tests/pd/model/test_dp_model.py @@ -0,0 +1,633 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import unittest + +import numpy as np +import paddle + +from deepmd.dpmodel.descriptor import DescrptSeA as DPDescrptSeA +from deepmd.dpmodel.fitting import EnergyFittingNet as DPEnergyFittingNet +from deepmd.dpmodel.model.ener_model import EnergyModel as DPEnergyModel +from deepmd.pd.model.descriptor.se_a import ( + DescrptSeA, +) +from deepmd.pd.model.model import ( + EnergyModel, +) +from deepmd.pd.model.task.ener import ( + EnergyFittingNet, +) +from deepmd.pd.utils import ( + env, +) +from deepmd.pd.utils.nlist import ( + build_neighbor_list, + extend_coord_with_ghosts, + extend_input_and_build_neighbor_list, +) +from deepmd.pd.utils.utils import ( + to_numpy_array, + to_paddle_tensor, +) + +from ...seed import ( + GLOBAL_SEED, +) +from .test_env_mat import ( + TestCaseSingleFrameWithNlist, + TestCaseSingleFrameWithoutNlist, +) + +dtype = env.GLOBAL_PD_FLOAT_PRECISION + + +class TestDPModel(unittest.TestCase, TestCaseSingleFrameWithoutNlist): + def setUp(self): + TestCaseSingleFrameWithoutNlist.setUp(self) + + def test_self_consistency(self): + nf, nloc = self.atype.shape + ds = DescrptSeA( + self.rcut, + self.rcut_smth, + self.sel, + ).to(env.DEVICE) + ft = EnergyFittingNet( + self.nt, + ds.get_dim_out(), + mixed_types=ds.mixed_types(), + ).to(env.DEVICE) + type_map = ["foo", "bar"] + md0 = EnergyModel(ds, ft, type_map=type_map).to(env.DEVICE) + md1 = EnergyModel.deserialize(md0.serialize()).to(env.DEVICE) + args = [to_paddle_tensor(ii) for ii in [self.coord, self.atype, self.cell]] + ret0 = md0.forward_common(*args) + ret1 = md1.forward_common(*args) + np.testing.assert_allclose( + to_numpy_array(ret0["energy"]), + to_numpy_array(ret1["energy"]), + atol=self.atol, + ) + np.testing.assert_allclose( + to_numpy_array(ret0["energy_redu"]), + to_numpy_array(ret1["energy_redu"]), + atol=self.atol, + ) + np.testing.assert_allclose( + to_numpy_array(ret0["energy_derv_r"]), + to_numpy_array(ret1["energy_derv_r"]), + atol=self.atol, + ) + np.testing.assert_allclose( + to_numpy_array(ret0["energy_derv_c_redu"]), + to_numpy_array(ret1["energy_derv_c_redu"]), + atol=self.atol, + ) + ret0 = md0.forward_common(*args, do_atomic_virial=True) + ret1 = md1.forward_common(*args, do_atomic_virial=True) + np.testing.assert_allclose( + to_numpy_array(ret0["energy_derv_c"]), + to_numpy_array(ret1["energy_derv_c"]), + atol=self.atol, + ) + + coord_ext, atype_ext, mapping = extend_coord_with_ghosts( + to_paddle_tensor(self.coord), + to_paddle_tensor(self.atype), + to_paddle_tensor(self.cell), + self.rcut, + ) + nlist = build_neighbor_list( + coord_ext, + atype_ext, + self.nloc, + self.rcut, + self.sel, + distinguish_types=(not md0.mixed_types()), + ) + args = [coord_ext, atype_ext, nlist] + ret2 = md0.forward_common_lower(*args, do_atomic_virial=True) + # check the consistency between the reduced virial from + # forward_common and forward_common_lower + np.testing.assert_allclose( + to_numpy_array(ret0["energy_derv_c_redu"]), + to_numpy_array(ret2["energy_derv_c_redu"]), + atol=self.atol, + ) + + def test_dp_consistency(self): + nf, nloc = self.atype.shape + nfp, nap = 2, 3 + ds = DPDescrptSeA( + self.rcut, + self.rcut_smth, + self.sel, + ) + ft = DPEnergyFittingNet( + self.nt, + ds.get_dim_out(), + mixed_types=ds.mixed_types(), + numb_fparam=nfp, + numb_aparam=nap, + ) + type_map = ["foo", "bar"] + md0 = DPEnergyModel(ds, ft, type_map=type_map) + md1 = EnergyModel.deserialize(md0.serialize()).to(env.DEVICE) + + rng = np.random.default_rng(GLOBAL_SEED) + fparam = rng.normal(size=[self.nf, nfp]) + aparam = rng.normal(size=[self.nf, nloc, nap]) + args0 = [self.coord, self.atype, self.cell] + args1 = [to_paddle_tensor(ii) for ii in [self.coord, self.atype, self.cell]] + kwargs0 = {"fparam": fparam, "aparam": aparam} + kwargs1 = {kk: to_paddle_tensor(vv) for kk, vv in kwargs0.items()} + ret0 = md0.call(*args0, **kwargs0) + ret1 = md1.forward_common(*args1, **kwargs1) + np.testing.assert_allclose( + ret0["energy"], + to_numpy_array(ret1["energy"]), + atol=self.atol, + ) + np.testing.assert_allclose( + ret0["energy_redu"], + to_numpy_array(ret1["energy_redu"]), + atol=self.atol, + ) + + def test_dp_consistency_nopbc(self): + nf, nloc = self.atype.shape + nfp, nap = 2, 3 + ds = DPDescrptSeA( + self.rcut, + self.rcut_smth, + self.sel, + ) + ft = DPEnergyFittingNet( + self.nt, + ds.get_dim_out(), + mixed_types=ds.mixed_types(), + numb_fparam=nfp, + numb_aparam=nap, + ) + type_map = ["foo", "bar"] + md0 = DPEnergyModel(ds, ft, type_map=type_map) + md1 = EnergyModel.deserialize(md0.serialize()).to(env.DEVICE) + + rng = np.random.default_rng(GLOBAL_SEED) + fparam = rng.normal(size=[self.nf, nfp]) + aparam = rng.normal(size=[self.nf, self.nloc, nap]) + args0 = [self.coord, self.atype] + args1 = [to_paddle_tensor(ii) for ii in args0] + kwargs0 = {"fparam": fparam, "aparam": aparam} + kwargs1 = {kk: to_paddle_tensor(vv) for kk, vv in kwargs0.items()} + ret0 = md0.call(*args0, **kwargs0) + ret1 = md1.forward_common(*args1, **kwargs1) + np.testing.assert_allclose( + ret0["energy"], + to_numpy_array(ret1["energy"]), + atol=self.atol, + ) + np.testing.assert_allclose( + ret0["energy_redu"], + to_numpy_array(ret1["energy_redu"]), + atol=self.atol, + ) + + def test_prec_consistency(self): + rng = np.random.default_rng(GLOBAL_SEED) + nf, nloc = self.atype.shape + ds = DPDescrptSeA( + self.rcut, + self.rcut_smth, + self.sel, + ) + ft = DPEnergyFittingNet( + self.nt, + ds.get_dim_out(), + mixed_types=ds.mixed_types(), + ) + nfp, nap = 2, 3 + type_map = ["foo", "bar"] + fparam = rng.normal(size=[self.nf, nfp]) + aparam = rng.normal(size=[self.nf, nloc, nap]) + + md0 = DPEnergyModel(ds, ft, type_map=type_map) + md1 = EnergyModel.deserialize(md0.serialize()).to(env.DEVICE) + + args64 = [to_paddle_tensor(ii) for ii in [self.coord, self.atype, self.cell]] + args64[0] = args64[0].to(paddle.float64) + args64[2] = args64[2].to(paddle.float64) + args32 = [to_paddle_tensor(ii) for ii in [self.coord, self.atype, self.cell]] + args32[0] = args32[0].to(paddle.float32) + args32[2] = args32[2].to(paddle.float32) + # fparam, aparam are converted to coordinate precision by model + fparam = to_paddle_tensor(fparam) + aparam = to_paddle_tensor(aparam) + + model_l_ret_64 = md1.forward_common(*args64, fparam=fparam, aparam=aparam) + model_l_ret_32 = md1.forward_common(*args32, fparam=fparam, aparam=aparam) + + for ii in model_l_ret_32.keys(): + if ii[-4:] == "redu": + self.assertEqual(model_l_ret_32[ii].dtype, paddle.float64) + else: + self.assertEqual(model_l_ret_32[ii].dtype, paddle.float32) + if ii != "mask": + self.assertEqual(model_l_ret_64[ii].dtype, paddle.float64) + else: + self.assertEqual(model_l_ret_64[ii].dtype, paddle.int32) + np.testing.assert_allclose( + to_numpy_array(model_l_ret_32[ii]), + to_numpy_array(model_l_ret_64[ii]), + atol=self.atol, + ) + + +class TestDPModelLower(unittest.TestCase, TestCaseSingleFrameWithNlist): + def setUp(self): + TestCaseSingleFrameWithNlist.setUp(self) + + def test_self_consistency(self): + nf, nloc, nnei = self.nlist.shape + ds = DescrptSeA( + self.rcut, + self.rcut_smth, + self.sel, + ).to(env.DEVICE) + ft = EnergyFittingNet( + self.nt, + ds.get_dim_out(), + mixed_types=ds.mixed_types(), + ).to(env.DEVICE) + type_map = ["foo", "bar"] + md0 = EnergyModel(ds, ft, type_map=type_map).to(env.DEVICE) + md1 = EnergyModel.deserialize(md0.serialize()).to(env.DEVICE) + args = [ + to_paddle_tensor(ii) for ii in [self.coord_ext, self.atype_ext, self.nlist] + ] + ret0 = md0.forward_common_lower(*args) + ret1 = md1.forward_common_lower(*args) + np.testing.assert_allclose( + to_numpy_array(ret0["energy"]), + to_numpy_array(ret1["energy"]), + atol=self.atol, + ) + np.testing.assert_allclose( + to_numpy_array(ret0["energy_redu"]), + to_numpy_array(ret1["energy_redu"]), + atol=self.atol, + ) + np.testing.assert_allclose( + to_numpy_array(ret0["energy_derv_r"]), + to_numpy_array(ret1["energy_derv_r"]), + atol=self.atol, + ) + np.testing.assert_allclose( + to_numpy_array(ret0["energy_derv_c_redu"]), + to_numpy_array(ret1["energy_derv_c_redu"]), + atol=self.atol, + ) + ret0 = md0.forward_common_lower(*args, do_atomic_virial=True) + ret1 = md1.forward_common_lower(*args, do_atomic_virial=True) + np.testing.assert_allclose( + to_numpy_array(ret0["energy_derv_c"]), + to_numpy_array(ret1["energy_derv_c"]), + atol=self.atol, + ) + + def test_dp_consistency(self): + nf, nloc, nnei = self.nlist.shape + ds = DPDescrptSeA( + self.rcut, + self.rcut_smth, + self.sel, + ) + ft = DPEnergyFittingNet( + self.nt, + ds.get_dim_out(), + mixed_types=ds.mixed_types(), + ) + type_map = ["foo", "bar"] + md0 = DPEnergyModel(ds, ft, type_map=type_map) + md1 = EnergyModel.deserialize(md0.serialize()).to(env.DEVICE) + args0 = [self.coord_ext, self.atype_ext, self.nlist] + args1 = [ + to_paddle_tensor(ii) for ii in [self.coord_ext, self.atype_ext, self.nlist] + ] + ret0 = md0.call_lower(*args0) + ret1 = md1.forward_common_lower(*args1) + np.testing.assert_allclose( + ret0["energy"], + to_numpy_array(ret1["energy"]), + atol=self.atol, + ) + np.testing.assert_allclose( + ret0["energy_redu"], + to_numpy_array(ret1["energy_redu"]), + atol=self.atol, + ) + + def test_prec_consistency(self): + rng = np.random.default_rng(GLOBAL_SEED) + nf, nloc, nnei = self.nlist.shape + ds = DPDescrptSeA( + self.rcut, + self.rcut_smth, + self.sel, + ) + ft = DPEnergyFittingNet( + self.nt, + ds.get_dim_out(), + mixed_types=ds.mixed_types(), + ) + nfp, nap = 2, 3 + type_map = ["foo", "bar"] + fparam = rng.normal(size=[self.nf, nfp]) + aparam = rng.normal(size=[self.nf, nloc, nap]) + + md0 = DPEnergyModel(ds, ft, type_map=type_map) + md1 = EnergyModel.deserialize(md0.serialize()).to(env.DEVICE) + + args64 = [ + to_paddle_tensor(ii) for ii in [self.coord_ext, self.atype_ext, self.nlist] + ] + args64[0] = args64[0].to(paddle.float64) + args32 = [ + to_paddle_tensor(ii) for ii in [self.coord_ext, self.atype_ext, self.nlist] + ] + args32[0] = args32[0].to(paddle.float32) + # fparam, aparam are converted to coordinate precision by model + fparam = to_paddle_tensor(fparam) + aparam = to_paddle_tensor(aparam) + + model_l_ret_64 = md1.forward_common_lower(*args64, fparam=fparam, aparam=aparam) + model_l_ret_32 = md1.forward_common_lower(*args32, fparam=fparam, aparam=aparam) + + for ii in model_l_ret_32.keys(): + if ii[-4:] == "redu": + self.assertEqual(model_l_ret_32[ii].dtype, paddle.float64) + else: + self.assertEqual(model_l_ret_32[ii].dtype, paddle.float32) + if ii != "mask": + self.assertEqual(model_l_ret_64[ii].dtype, paddle.float64) + else: + self.assertEqual(model_l_ret_64[ii].dtype, paddle.int32) + np.testing.assert_allclose( + to_numpy_array(model_l_ret_32[ii]), + to_numpy_array(model_l_ret_64[ii]), + atol=self.atol, + ) + + def test_jit(self): + nf, nloc, nnei = self.nlist.shape + ds = DescrptSeA( + self.rcut, + self.rcut_smth, + self.sel, + ).to(env.DEVICE) + ft = EnergyFittingNet( + self.nt, + ds.get_dim_out(), + mixed_types=ds.mixed_types(), + ).to(env.DEVICE) + type_map = ["foo", "bar"] + md0 = EnergyModel(ds, ft, type_map=type_map).to(env.DEVICE) + md0 = paddle.jit.to_static(md0) + md0.get_rcut() + md0.get_type_map() + + +class TestDPModelFormatNlist(unittest.TestCase): + def setUp(self): + # nloc == 3, nall == 4 + self.nloc = 3 + self.nall = 5 + self.nf, self.nt = 1, 2 + self.coord_ext = np.array( + [ + [0, 0, 0], + [0, 1, 0], + [0, 0, 1], + [0, -2, 0], + [2.3, 0, 0], + ], + dtype=np.float64, + ).reshape([1, self.nall * 3]) + # sel = [5, 2] + self.sel = [5, 2] + self.expected_nlist = np.array( + [ + [1, 3, -1, -1, -1, 2, -1], + [0, -1, -1, -1, -1, 2, -1], + [0, 1, -1, -1, -1, -1, -1], + ], + dtype="int64", + ).reshape([1, self.nloc, sum(self.sel)]) + self.atype_ext = np.array([0, 0, 1, 0, 1], dtype="int64").reshape( + [1, self.nall] + ) + self.rcut_smth = 0.4 + self.rcut = 2.0 + + nf, nloc, nnei = self.expected_nlist.shape + ds = DescrptSeA( + self.rcut, + self.rcut_smth, + self.sel, + ).to(env.DEVICE) + ft = EnergyFittingNet( + self.nt, + ds.get_dim_out(), + mixed_types=ds.mixed_types(), + ).to(env.DEVICE) + type_map = ["foo", "bar"] + self.md = EnergyModel(ds, ft, type_map=type_map).to(env.DEVICE) + + def test_nlist_eq(self): + # n_nnei == nnei + nlist = np.array( + [ + [1, 3, -1, -1, -1, 2, -1], + [0, -1, -1, -1, -1, 2, -1], + [0, 1, -1, -1, -1, -1, -1], + ], + dtype=np.int64, + ).reshape([1, self.nloc, -1]) + nlist1 = self.md.format_nlist( + to_paddle_tensor(self.coord_ext), + to_paddle_tensor(self.atype_ext), + to_paddle_tensor(nlist), + ) + np.testing.assert_equal(self.expected_nlist, to_numpy_array(nlist1)) + + def test_nlist_st(self): + # n_nnei < nnei + nlist = np.array( + [ + [1, 3, -1, 2], + [0, -1, -1, 2], + [0, 1, -1, -1], + ], + dtype=np.int64, + ).reshape([1, self.nloc, -1]) + nlist1 = self.md.format_nlist( + to_paddle_tensor(self.coord_ext), + to_paddle_tensor(self.atype_ext), + to_paddle_tensor(nlist), + ) + np.testing.assert_equal(self.expected_nlist, to_numpy_array(nlist1)) + + def test_nlist_lt(self): + # n_nnei > nnei + nlist = np.array( + [ + [1, 3, -1, -1, -1, 2, -1, -1, 4], + [0, -1, 4, -1, -1, 2, -1, 3, -1], + [0, 1, -1, -1, -1, 4, -1, -1, 3], + ], + dtype=np.int64, + ).reshape([1, self.nloc, -1]) + nlist1 = self.md.format_nlist( + to_paddle_tensor(self.coord_ext), + to_paddle_tensor(self.atype_ext), + to_paddle_tensor(nlist), + ) + np.testing.assert_equal(self.expected_nlist, to_numpy_array(nlist1)) + + +class TestEnergyModel(unittest.TestCase, TestCaseSingleFrameWithoutNlist): + def setUp(self): + TestCaseSingleFrameWithoutNlist.setUp(self) + + def test_self_consistency(self): + nf, nloc = self.atype.shape + ds = DescrptSeA( + self.rcut, + self.rcut_smth, + self.sel, + ).to(env.DEVICE) + ft = EnergyFittingNet( + self.nt, + ds.get_dim_out(), + mixed_types=ds.mixed_types(), + ).to(env.DEVICE) + type_map = ["foo", "bar"] + md0 = EnergyModel(ds, ft, type_map=type_map).to(env.DEVICE) + md1 = EnergyModel.deserialize(md0.serialize()).to(env.DEVICE) + args = [to_paddle_tensor(ii) for ii in [self.coord, self.atype, self.cell]] + ret0 = md0.forward(*args) + ret1 = md1.forward(*args) + np.testing.assert_allclose( + to_numpy_array(ret0["atom_energy"]), + to_numpy_array(ret1["atom_energy"]), + atol=self.atol, + ) + np.testing.assert_allclose( + to_numpy_array(ret0["energy"]), + to_numpy_array(ret1["energy"]), + atol=self.atol, + ) + np.testing.assert_allclose( + to_numpy_array(ret0["force"]), + to_numpy_array(ret1["force"]), + atol=self.atol, + ) + np.testing.assert_allclose( + to_numpy_array(ret0["virial"]), + to_numpy_array(ret1["virial"]), + atol=self.atol, + ) + ret0 = md0.forward(*args, do_atomic_virial=True) + ret1 = md1.forward(*args, do_atomic_virial=True) + np.testing.assert_allclose( + to_numpy_array(ret0["atom_virial"]), + to_numpy_array(ret1["atom_virial"]), + atol=self.atol, + ) + coord_ext, atype_ext, mapping, nlist = extend_input_and_build_neighbor_list( + to_paddle_tensor(self.coord), + to_paddle_tensor(self.atype), + self.rcut, + self.sel, + mixed_types=md0.mixed_types(), + box=to_paddle_tensor(self.cell), + ) + args = [coord_ext, atype_ext, nlist] + ret2 = md0.forward_lower(*args, do_atomic_virial=True) + # check the consistency between the reduced virial from + # forward and forward_lower + np.testing.assert_allclose( + to_numpy_array(ret0["virial"]), + to_numpy_array(ret2["virial"]), + atol=self.atol, + ) + + +class TestEnergyModelLower(unittest.TestCase, TestCaseSingleFrameWithNlist): + def setUp(self): + TestCaseSingleFrameWithNlist.setUp(self) + + def test_self_consistency(self): + nf, nloc, nnei = self.nlist.shape + ds = DescrptSeA( + self.rcut, + self.rcut_smth, + self.sel, + ).to(env.DEVICE) + ft = EnergyFittingNet( + self.nt, + ds.get_dim_out(), + mixed_types=ds.mixed_types(), + ).to(env.DEVICE) + type_map = ["foo", "bar"] + md0 = EnergyModel(ds, ft, type_map=type_map).to(env.DEVICE) + md1 = EnergyModel.deserialize(md0.serialize()).to(env.DEVICE) + args = [ + to_paddle_tensor(ii) for ii in [self.coord_ext, self.atype_ext, self.nlist] + ] + ret0 = md0.forward_lower(*args) + ret1 = md1.forward_lower(*args) + np.testing.assert_allclose( + to_numpy_array(ret0["atom_energy"]), + to_numpy_array(ret1["atom_energy"]), + atol=self.atol, + ) + np.testing.assert_allclose( + to_numpy_array(ret0["energy"]), + to_numpy_array(ret1["energy"]), + atol=self.atol, + ) + np.testing.assert_allclose( + to_numpy_array(ret0["extended_force"]), + to_numpy_array(ret1["extended_force"]), + atol=self.atol, + ) + np.testing.assert_allclose( + to_numpy_array(ret0["virial"]), + to_numpy_array(ret1["virial"]), + atol=self.atol, + ) + ret0 = md0.forward_lower(*args, do_atomic_virial=True) + ret1 = md1.forward_lower(*args, do_atomic_virial=True) + np.testing.assert_allclose( + to_numpy_array(ret0["extended_virial"]), + to_numpy_array(ret1["extended_virial"]), + atol=self.atol, + ) + + def test_jit(self): + nf, nloc, nnei = self.nlist.shape + ds = DescrptSeA( + self.rcut, + self.rcut_smth, + self.sel, + ).to(env.DEVICE) + ft = EnergyFittingNet( + self.nt, + ds.get_dim_out(), + mixed_types=ds.mixed_types(), + ).to(env.DEVICE) + type_map = ["foo", "bar"] + md0 = EnergyModel(ds, ft, type_map=type_map).to(env.DEVICE) + md0 = paddle.jit.to_static(md0) + self.assertEqual(md0.get_rcut(), self.rcut) + self.assertEqual(md0.get_type_map(), type_map) diff --git a/source/tests/pd/model/test_embedding_net.py b/source/tests/pd/model/test_embedding_net.py new file mode 100644 index 0000000000..2dcc9f821b --- /dev/null +++ b/source/tests/pd/model/test_embedding_net.py @@ -0,0 +1,217 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import json +import os +import re +import unittest + +import numpy as np +import paddle +import tensorflow.compat.v1 as tf + +from deepmd.pd.utils import ( + env, +) + +tf.disable_eager_execution() + +from pathlib import ( + Path, +) + +from deepmd.pd.model.descriptor import ( + DescrptSeA, +) +from deepmd.pd.utils import ( + dp_random, +) +from deepmd.pd.utils.dataset import ( + DeepmdDataSetForLoader, +) +from deepmd.pd.utils.env import ( + DEVICE, + GLOBAL_NP_FLOAT_PRECISION, +) +from deepmd.pd.utils.nlist import ( + extend_input_and_build_neighbor_list, +) +from deepmd.tf.common import ( + expand_sys_str, +) +from deepmd.tf.descriptor import DescrptSeA as DescrptSeA_tf + +from ..test_finetune import ( + energy_data_requirement, +) + +CUR_DIR = os.path.dirname(__file__) + + +def gen_key(worb, depth, elemid): + return (worb, depth, elemid) + + +def get_single_batch(dataset, index=None): + if index is None: + index = dp_random.choice(np.arange(len(dataset))) + np_batch = dataset[index] + pd_batch = {} + + for key in [ + "coord", + "box", + "force", + "force_mag", + "energy", + "virial", + "atype", + "natoms", + ]: + if key in np_batch.keys(): + np_batch[key] = np.expand_dims(np_batch[key], axis=0) + pd_batch[key] = paddle.to_tensor(np_batch[key]).to(device=env.DEVICE) + if key in ["coord", "force", "force_mag"]: + np_batch[key] = np_batch[key].reshape(1, -1) + np_batch["natoms"] = np_batch["natoms"][0] + return np_batch, pd_batch + + +def base_se_a(descriptor, coord, atype, natoms, box): + g = tf.Graph() + with g.as_default(): + name_pfx = "d_sea_" + t_coord = tf.placeholder( + GLOBAL_NP_FLOAT_PRECISION, [None, None], name=name_pfx + "t_coord" + ) + t_atype = tf.placeholder(tf.int32, [None, None], name=name_pfx + "t_type") + t_natoms = tf.placeholder( + tf.int32, [descriptor.ntypes + 2], name=name_pfx + "t_natoms" + ) + t_box = tf.placeholder( + GLOBAL_NP_FLOAT_PRECISION, [None, None], name=name_pfx + "t_box" + ) + t_default_mesh = tf.placeholder(tf.int32, [None], name=name_pfx + "t_mesh") + t_embedding = descriptor.build( + t_coord, t_atype, t_natoms, t_box, t_default_mesh, input_dict={} + ) + fake_energy = tf.reduce_sum(t_embedding) + t_force = descriptor.prod_force_virial(fake_energy, t_natoms)[0] + t_vars = {} + for var in tf.global_variables(): + ms = re.findall(r"([a-z]+)_(\d)_(\d)", var.name) + if len(ms) == 1: + m = ms[0] + key = gen_key(worb=m[0], depth=int(m[1]), elemid=int(m[2])) + t_vars[key] = var + init_op = tf.global_variables_initializer() + + with tf.Session(graph=g) as sess: + sess.run(init_op) + embedding, force, values = sess.run( + [t_embedding, t_force, t_vars], + feed_dict={ + t_coord: coord, + t_atype: atype, + t_natoms: natoms, + t_box: box, + t_default_mesh: np.array([0, 0, 0, 2, 2, 2]), + }, + ) + tf.reset_default_graph() + return embedding, force, values + + +class TestSeA(unittest.TestCase): + def setUp(self): + dp_random.seed(0) + with open(str(Path(__file__).parent / "water/se_e2_a.json")) as fin: + content = fin.read() + config = json.loads(content) + data_file = [str(Path(__file__).parent / "water/data/data_0")] + config["training"]["training_data"]["systems"] = data_file + config["training"]["validation_data"]["systems"] = data_file + model_config = config["model"] + self.rcut = model_config["descriptor"]["rcut"] + self.rcut_smth = model_config["descriptor"]["rcut_smth"] + self.sel = model_config["descriptor"]["sel"] + self.bsz = config["training"]["training_data"]["batch_size"] + self.systems = config["training"]["validation_data"]["systems"] + if isinstance(self.systems, str): + self.systems = expand_sys_str(self.systems) + ds = DeepmdDataSetForLoader( + self.systems[0], + model_config["type_map"], + ) + ds.add_data_requirement(energy_data_requirement) + self.filter_neuron = model_config["descriptor"]["neuron"] + self.axis_neuron = model_config["descriptor"]["axis_neuron"] + self.np_batch, self.paddle_batch = get_single_batch(ds) + + def test_consistency(self): + dp_d = DescrptSeA_tf( + rcut=self.rcut, + rcut_smth=self.rcut_smth, + sel=self.sel, + neuron=self.filter_neuron, + axis_neuron=self.axis_neuron, + seed=1, + ) + dp_embedding, dp_force, dp_vars = base_se_a( + descriptor=dp_d, + coord=self.np_batch["coord"], + atype=self.np_batch["atype"], + natoms=self.np_batch["natoms"], + box=self.np_batch["box"], + ) + + # Reproduced + descriptor = DescrptSeA( + self.rcut, + self.rcut_smth, + self.sel, + neuron=self.filter_neuron, + axis_neuron=self.axis_neuron, + ).to(DEVICE) + for name, param in descriptor.named_parameters(): + ms = re.findall(r"(\d)\.layers\.(\d)\.([a-z]+)", name) + if len(ms) == 1: + m = ms[0] + key = gen_key(worb=m[2], depth=int(m[1]) + 1, elemid=int(m[0])) + var = dp_vars[key] + with paddle.no_grad(): + # Keep parameter value consistency between 2 implentations + paddle.assign(var, param) + + pd_coord = self.paddle_batch["coord"].to(env.DEVICE) + pd_coord.stop_gradient = False + + ( + extended_coord, + extended_atype, + mapping, + nlist, + ) = extend_input_and_build_neighbor_list( + pd_coord, + self.paddle_batch["atype"].to(env.DEVICE), + self.rcut, + self.sel, + mixed_types=False, + box=self.paddle_batch["box"].to(env.DEVICE), + ) + descriptor_out, _, _, _, _ = descriptor( + extended_coord, + extended_atype, + nlist, + ) + my_embedding = descriptor_out.cpu().detach().numpy() + fake_energy = paddle.sum(descriptor_out) + fake_energy.backward() + my_force = -pd_coord.grad.cpu().numpy() + + # Check + np.testing.assert_allclose(dp_embedding, my_embedding) + dp_force = dp_force.reshape(*my_force.shape) + np.testing.assert_allclose(dp_force, my_force) + + +if __name__ == "__main__": + unittest.main() diff --git a/source/tests/pd/model/test_ener_fitting.py b/source/tests/pd/model/test_ener_fitting.py new file mode 100644 index 0000000000..dd13f139dc --- /dev/null +++ b/source/tests/pd/model/test_ener_fitting.py @@ -0,0 +1,150 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import itertools +import unittest + +import numpy as np +import paddle + +from deepmd.dpmodel.fitting import InvarFitting as DPInvarFitting +from deepmd.pd.model.descriptor.se_a import ( + DescrptSeA, +) +from deepmd.pd.model.task.ener import ( + InvarFitting, +) +from deepmd.pd.utils import ( + env, +) +from deepmd.pd.utils.utils import ( + to_numpy_array, +) + +from ...seed import ( + GLOBAL_SEED, +) +from .test_env_mat import ( + TestCaseSingleFrameWithNlist, +) + +dtype = env.GLOBAL_PD_FLOAT_PRECISION + + +class TestInvarFitting(unittest.TestCase, TestCaseSingleFrameWithNlist): + def setUp(self): + TestCaseSingleFrameWithNlist.setUp(self) + + def test_consistency( + self, + ): + rng = np.random.default_rng(GLOBAL_SEED) + nf, nloc, nnei = self.nlist.shape + dd0 = DescrptSeA(self.rcut, self.rcut_smth, self.sel).to(env.DEVICE) + rd0, _, _, _, _ = dd0( + paddle.to_tensor(self.coord_ext, dtype=dtype).to(device=env.DEVICE), + paddle.to_tensor(self.atype_ext, dtype="int64").to(device=env.DEVICE), + paddle.to_tensor(self.nlist, dtype="int64").to(device=env.DEVICE), + ) + atype = paddle.to_tensor(self.atype_ext[:, :nloc], dtype="int64").to( + device=env.DEVICE + ) + + for od, mixed_types, nfp, nap, et, nn in itertools.product( + [1, 3], + [True, False], + [0, 3], + [0, 4], + [[], [0], [1]], + [[4, 4, 4], []], + ): + ft0 = InvarFitting( + "foo", + self.nt, + dd0.dim_out, + od, + numb_fparam=nfp, + numb_aparam=nap, + mixed_types=mixed_types, + exclude_types=et, + neuron=nn, + seed=GLOBAL_SEED, + ).to(env.DEVICE) + ft1 = DPInvarFitting.deserialize(ft0.serialize()) + ft2 = InvarFitting.deserialize(ft0.serialize()) + + if nfp > 0: + ifp = paddle.to_tensor( + rng.normal(size=(self.nf, nfp)), dtype=dtype, place=env.DEVICE + ) + else: + ifp = None + if nap > 0: + iap = paddle.to_tensor( + rng.normal(size=(self.nf, self.nloc, nap)), + dtype=dtype, + place=env.DEVICE, + ) + else: + iap = None + + ret0 = ft0(rd0, atype, fparam=ifp, aparam=iap) + ret1 = ft1( + rd0.detach().cpu().numpy(), + atype.detach().cpu().numpy(), + fparam=to_numpy_array(ifp), + aparam=to_numpy_array(iap), + ) + ret2 = ft2(rd0, atype, fparam=ifp, aparam=iap) + np.testing.assert_allclose( + to_numpy_array(ret0["foo"]), + ret1["foo"], + ) + np.testing.assert_allclose( + to_numpy_array(ret0["foo"]), + to_numpy_array(ret2["foo"]), + ) + self.assertEqual(ft0.get_sel_type(), ft1.get_sel_type()) + + def test_jit( + self, + ): + for od, mixed_types, nfp, nap, et in itertools.product( + [1, 3], + [True, False], + [0, 3], + [0, 4], + [[], [0]], + ): + ft0 = InvarFitting( + "foo", + self.nt, + 9, + od, + numb_fparam=nfp, + numb_aparam=nap, + mixed_types=mixed_types, + exclude_types=et, + seed=GLOBAL_SEED, + ).to(env.DEVICE) + paddle.jit.to_static(ft0) + + def test_get_set(self): + ifn0 = InvarFitting( + "energy", + self.nt, + 3, + 1, + seed=GLOBAL_SEED, + ) + rng = np.random.default_rng(GLOBAL_SEED) + foo = rng.normal([3, 4]) + for ii in [ + "bias_atom_e", + "fparam_avg", + "fparam_inv_std", + "aparam_avg", + "aparam_inv_std", + ]: + ifn0[ii] = paddle.to_tensor(foo, dtype=dtype).to(device=env.DEVICE) + np.testing.assert_allclose( + foo, np.reshape(ifn0[ii].detach().cpu().numpy(), foo.shape) + ) diff --git a/source/tests/pd/model/test_env_mat.py b/source/tests/pd/model/test_env_mat.py new file mode 100644 index 0000000000..7cbc698264 --- /dev/null +++ b/source/tests/pd/model/test_env_mat.py @@ -0,0 +1,187 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import unittest + +import numpy as np +import paddle + +from deepmd.dpmodel.utils import ( + EnvMat, +) +from deepmd.pd.model.descriptor.env_mat import ( + prod_env_mat, +) +from deepmd.pd.utils import ( + env, +) + +from ...seed import ( + GLOBAL_SEED, +) + +dtype = env.GLOBAL_PD_FLOAT_PRECISION + + +class TestCaseSingleFrameWithNlist: + def setUp(self): + # nloc == 3, nall == 4 + self.nloc = 3 + self.nall = 4 + self.nf, self.nt = 2, 2 + self.coord_ext = np.array( + [ + [0, 0, 0], + [0, 1, 0], + [0, 0, 1], + [0, -2, 0], + ], + dtype=np.float64, + ).reshape([1, self.nall, 3]) + self.atype_ext = np.array([0, 0, 1, 0], dtype="int64").reshape([1, self.nall]) + self.mapping = np.array([0, 1, 2, 0], dtype="int64").reshape([1, self.nall]) + # sel = [5, 2] + self.sel = [5, 2] + self.sel_mix = [7] + self.natoms = [3, 3, 2, 1] + self.nlist = np.array( + [ + [1, 3, -1, -1, -1, 2, -1], + [0, -1, -1, -1, -1, 2, -1], + [0, 1, -1, -1, -1, -1, -1], + ], + dtype="int64", + ).reshape([1, self.nloc, sum(self.sel)]) + self.rcut = 2.2 + self.rcut_smth = 0.4 + # permutations + self.perm = np.array([2, 0, 1, 3], dtype=np.int32) + inv_perm = np.array([1, 2, 0, 3], dtype=np.int32) + # permute the coord and atype + self.coord_ext = np.concatenate( + [self.coord_ext, self.coord_ext[:, self.perm, :]], axis=0 + ).reshape(self.nf, self.nall * 3) + self.atype_ext = np.concatenate( + [self.atype_ext, self.atype_ext[:, self.perm]], axis=0 + ) + self.mapping = np.concatenate( + [self.mapping, self.mapping[:, self.perm]], axis=0 + ) + + # permute the nlist + nlist1 = self.nlist[:, self.perm[: self.nloc], :] + mask = nlist1 == -1 + nlist1 = inv_perm[nlist1] + nlist1 = np.where(mask, -1, nlist1) + self.nlist = np.concatenate([self.nlist, nlist1], axis=0) + self.atol = 1e-12 + + +class TestCaseSingleFrameWithNlistWithVirtual: + def setUp(self): + # nloc == 3, nall == 4 + self.nloc = 4 + self.nall = 5 + self.nf, self.nt = 2, 2 + self.coord_ext = np.array( + [ + [0, 0, 0], + [0, 0, 0], + [0, 1, 0], + [0, 0, 1], + [0, -2, 0], + ], + dtype=np.float64, + ).reshape([1, self.nall, 3]) + self.atype_ext = np.array([0, -1, 0, 1, 0], dtype="int64").reshape( + [1, self.nall] + ) + # sel = [5, 2] + self.sel = [5, 2] + self.sel_mix = [7] + self.natoms = [3, 3, 2, 1] + self.nlist = np.array( + [ + [2, 4, -1, -1, -1, 3, -1], + [-1, -1, -1, -1, -1, -1, -1], + [0, -1, -1, -1, -1, 3, -1], + [0, 2, -1, -1, -1, -1, -1], + ], + dtype="int64", + ).reshape([1, self.nloc, sum(self.sel)]) + self.rcut = 2.2 + self.rcut_smth = 0.4 + # permutations + self.perm = np.array([3, 0, 1, 2, 4], dtype=np.int32) + inv_perm = np.argsort(self.perm) + # permute the coord and atype + self.coord_ext = np.concatenate( + [self.coord_ext, self.coord_ext[:, self.perm, :]], axis=0 + ).reshape(self.nf, self.nall * 3) + self.atype_ext = np.concatenate( + [self.atype_ext, self.atype_ext[:, self.perm]], axis=0 + ) + # permute the nlist + nlist1 = self.nlist[:, self.perm[: self.nloc], :] + mask = nlist1 == -1 + nlist1 = inv_perm[nlist1] + nlist1 = np.where(mask, -1, nlist1) + self.nlist = np.concatenate([self.nlist, nlist1], axis=0) + self.get_real_mapping = np.array([[0, 2, 3], [0, 1, 3]], dtype=np.int32) + self.atol = 1e-12 + + +class TestCaseSingleFrameWithoutNlist: + def setUp(self): + # nloc == 3, nall == 4 + self.nloc = 3 + self.nf, self.nt = 1, 2 + self.coord = np.array( + [ + [0, 0, 0], + [0, 1, 0], + [0, 0, 1], + ], + dtype=np.float64, + ).reshape([1, self.nloc * 3]) + self.atype = np.array([0, 0, 1], dtype="int64").reshape([1, self.nloc]) + self.cell = 2.0 * np.eye(3).reshape([1, 9]) + # sel = [5, 2] + self.sel = [16, 8] + self.sel_mix = [24] + self.natoms = [3, 3, 2, 1] + self.rcut = 2.2 + self.rcut_smth = 0.4 + self.atol = 1e-12 + + +# to be merged with the tf test case +class TestEnvMat(unittest.TestCase, TestCaseSingleFrameWithNlist): + def setUp(self): + TestCaseSingleFrameWithNlist.setUp(self) + + def test_consistency( + self, + ): + rng = np.random.default_rng(GLOBAL_SEED) + nf, nloc, nnei = self.nlist.shape + davg = rng.normal(size=(self.nt, nnei, 4)) + dstd = rng.normal(size=(self.nt, nnei, 4)) + dstd = 0.1 + np.abs(dstd) + em0 = EnvMat(self.rcut, self.rcut_smth) + mm0, diff0, ww0 = em0.call( + self.coord_ext, self.atype_ext, self.nlist, davg, dstd + ) + mm1, diff1, ww1 = prod_env_mat( + paddle.to_tensor(self.coord_ext, dtype=dtype).to(device=env.DEVICE), + paddle.to_tensor(self.nlist, dtype="int64").to(device=env.DEVICE), + paddle.to_tensor(self.atype_ext[:, :nloc], dtype="int64").to( + device=env.DEVICE + ), + paddle.to_tensor(davg).to(device=env.DEVICE), + paddle.to_tensor(dstd).to(device=env.DEVICE), + self.rcut, + self.rcut_smth, + ) + np.testing.assert_allclose(mm0, mm1.detach().cpu().numpy()) + np.testing.assert_allclose(diff0, diff1.detach().cpu().numpy()) + np.testing.assert_allclose(ww0, ww1.detach().cpu().numpy()) + np.testing.assert_allclose(mm0[0][self.perm[: self.nloc]], mm0[1]) diff --git a/source/tests/pd/model/test_exclusion_mask.py b/source/tests/pd/model/test_exclusion_mask.py new file mode 100644 index 0000000000..ff479ee7db --- /dev/null +++ b/source/tests/pd/model/test_exclusion_mask.py @@ -0,0 +1,70 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import unittest + +import numpy as np + +from deepmd.pd.utils import ( + env, +) +from deepmd.pd.utils.exclude_mask import ( + AtomExcludeMask, + PairExcludeMask, +) +from deepmd.pd.utils.utils import ( + to_numpy_array, + to_paddle_tensor, +) + +from .test_env_mat import ( + TestCaseSingleFrameWithNlist, +) + +dtype = env.GLOBAL_PD_FLOAT_PRECISION + + +class TestAtomExcludeMask(unittest.TestCase): + def test_build_type_exclude_mask(self): + nf = 2 + nt = 3 + exclude_types = [0, 2] + atype = np.array( + [ + [0, 2, 1, 2, 0, 1, 0], + [1, 2, 0, 0, 2, 2, 1], + ], + dtype=np.int32, + ).reshape([nf, -1]) + expected_mask = np.array( + [ + [0, 0, 1, 0, 0, 1, 0], + [1, 0, 0, 0, 0, 0, 1], + ] + ).reshape([nf, -1]) + des = AtomExcludeMask(nt, exclude_types=exclude_types) + mask = des(to_paddle_tensor(atype)) + np.testing.assert_equal(to_numpy_array(mask), expected_mask) + + +# to be merged with the tf test case +class TestPairExcludeMask(unittest.TestCase, TestCaseSingleFrameWithNlist): + def setUp(self): + TestCaseSingleFrameWithNlist.setUp(self) + + def test_build_type_exclude_mask(self): + exclude_types = [[0, 1]] + expected_mask = np.array( + [ + [1, 1, 1, 1, 1, 0, 1], + [1, 1, 1, 1, 1, 0, 1], + [0, 0, 1, 1, 1, 1, 1], + [0, 0, 1, 1, 1, 1, 1], + [1, 1, 1, 1, 1, 0, 1], + [1, 1, 1, 1, 1, 0, 1], + ] + ).reshape(self.nf, self.nloc, sum(self.sel)) + des = PairExcludeMask(self.nt, exclude_types=exclude_types).to(env.DEVICE) + mask = des( + to_paddle_tensor(self.nlist), + to_paddle_tensor(self.atype_ext), + ) + np.testing.assert_equal(to_numpy_array(mask), expected_mask) diff --git a/source/tests/pd/model/test_fitting_net.py b/source/tests/pd/model/test_fitting_net.py new file mode 100644 index 0000000000..9a4d4d128f --- /dev/null +++ b/source/tests/pd/model/test_fitting_net.py @@ -0,0 +1,148 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import re +import unittest + +import numpy as np +import paddle +import tensorflow.compat.v1 as tf + +tf.disable_eager_execution() + +from deepmd.pd.model.task import ( + EnergyFittingNet, +) +from deepmd.pd.utils import ( + env, +) +from deepmd.pd.utils.env import ( + GLOBAL_NP_FLOAT_PRECISION, +) +from deepmd.tf.fit.ener import ( + EnerFitting, +) + +from ...seed import ( + GLOBAL_SEED, +) + + +class FakeDescriptor: + def __init__(self, ntypes, embedding_width): + self._ntypes = ntypes + self._dim_out = embedding_width + + def get_ntypes(self): + return self._ntypes + + def get_dim_out(self): + return self._dim_out + + +def gen_key(type_id, layer_id, w_or_b): + return (type_id, layer_id, w_or_b) + + +def base_fitting_net(dp_fn, embedding, natoms, atype): + g = tf.Graph() + with g.as_default(): + t_embedding = tf.placeholder(GLOBAL_NP_FLOAT_PRECISION, [None, None]) + t_natoms = tf.placeholder(tf.int32, [None]) + t_atype = tf.placeholder(tf.int32, [None, None]) + t_energy = dp_fn.build(t_embedding, t_natoms, {"atype": t_atype}) + init_op = tf.global_variables_initializer() + t_vars = {} + for var in tf.global_variables(): + key = None + matched = re.match(r"layer_(\d)_type_(\d)/([a-z]+)", var.name) + if matched: + key = gen_key( + type_id=matched.group(2), + layer_id=matched.group(1), + w_or_b=matched.group(3), + ) + else: + matched = re.match(r"final_layer_type_(\d)/([a-z]+)", var.name) + if matched: + key = gen_key( + type_id=matched.group(1), layer_id=-1, w_or_b=matched.group(2) + ) + if key is not None: + t_vars[key] = var + + with tf.Session(graph=g) as sess: + sess.run(init_op) + energy, values = sess.run( + [t_energy, t_vars], + feed_dict={ + t_embedding: embedding, + t_natoms: natoms, + t_atype: atype, + }, + ) + tf.reset_default_graph() + return energy, values + + +class TestFittingNet(unittest.TestCase): + def setUp(self): + nloc = 7 + self.embedding_width = 30 + self.natoms = np.array([nloc, nloc, 2, 5], dtype=np.int32) + rng = np.random.default_rng(GLOBAL_SEED) + self.embedding = rng.uniform(size=[4, nloc * self.embedding_width]) + self.ntypes = self.natoms.size - 2 + self.n_neuron = [32, 32, 32] + self.atype = np.zeros([4, nloc], dtype=np.int32) + cnt = 0 + for i in range(self.ntypes): + self.atype[:, cnt : cnt + self.natoms[i + 2]] = i + cnt += self.natoms[i + 2] + + fake_d = FakeDescriptor(2, 30) + self.dp_fn = EnerFitting( + fake_d.get_ntypes(), fake_d.get_dim_out(), self.n_neuron + ) + self.dp_fn.bias_atom_e = rng.uniform(size=[self.ntypes]) + + def test_consistency(self): + dp_energy, values = base_fitting_net( + self.dp_fn, self.embedding, self.natoms, self.atype + ) + my_fn = EnergyFittingNet( + self.ntypes, + self.embedding_width, + neuron=self.n_neuron, + bias_atom_e=self.dp_fn.bias_atom_e, + mixed_types=False, + ).to(env.DEVICE) + for name, param in my_fn.named_parameters(): + matched = re.match( + r"filter_layers\.networks\.(\d).layers\.(\d)\.([a-z]+)", name + ) + key = None + if matched: + if int(matched.group(2)) == len(self.n_neuron): + layer_id = -1 + else: + layer_id = matched.group(2) + key = gen_key( + type_id=matched.group(1), + layer_id=layer_id, + w_or_b=matched.group(3), + ) + assert key is not None + var = values[key] + with paddle.no_grad(): + # Keep parameter value consistency between 2 implentations + paddle.assign(var, param) + embedding = paddle.to_tensor(self.embedding) + embedding = embedding.reshape([4, -1, self.embedding_width]) + atype = paddle.to_tensor(self.atype) + ret = my_fn(embedding.to(env.DEVICE), atype.to(env.DEVICE)) + my_energy = ret["energy"] + my_energy = my_energy.detach().cpu() + np.testing.assert_allclose(dp_energy, my_energy.numpy().reshape([-1])) + + +if __name__ == "__main__": + unittest.main() diff --git a/source/tests/pd/model/test_force_grad.py b/source/tests/pd/model/test_force_grad.py new file mode 100644 index 0000000000..d7b569ef38 --- /dev/null +++ b/source/tests/pd/model/test_force_grad.py @@ -0,0 +1,111 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import copy +import json +import unittest +from pathlib import ( + Path, +) +from typing import ( + Optional, +) + +import numpy as np +import paddle + +from deepmd.pd.model.model import ( + get_model, +) +from deepmd.pd.utils import ( + env, +) +from deepmd.utils.data import ( + DeepmdData, +) + +from ...seed import ( + GLOBAL_SEED, +) + + +class CheckSymmetry(DeepmdData): + def __init__( + self, + sys_path: str, + type_map: Optional[list[str]] = None, + ): + super().__init__(sys_path=sys_path, type_map=type_map) + self.add("energy", 1, atomic=False, must=False, high_prec=True) + self.add("force", 3, atomic=True, must=False, high_prec=False) + self.add("virial", 9, atomic=False, must=False, high_prec=False) + + def get_disturb(self, index, atom_index, axis_index, delta): + for i in range( + 0, len(self.dirs) + 1 + ): # note: if different sets can be merged, prefix sum is unused to calculate + if index < self.prefix_sum[i]: + break + frames = self._load_set(self.dirs[i - 1]) + tmp = copy.deepcopy(frames["coord"].reshape(self.nframes, -1, 3)) + tmp[:, atom_index, axis_index] += delta + frames["coord"] = tmp + frame = self._get_subdata(frames, index - self.prefix_sum[i - 1]) + frame = self.reformat_data_torch(frame) + return frame + + +def get_data(batch): + inputs = {} + for key in ["coord", "atype", "box"]: + inputs[key] = batch[key].unsqueeze(0).to(env.DEVICE) + return inputs + + +class TestForceGrad(unittest.TestCase): + def setUp(self): + with open(str(Path(__file__).parent / "water/se_e2_a.json")) as fin: + self.config = json.load(fin) + data_file = [str(Path(__file__).parent / "water/data/data_0")] + self.config["training"]["training_data"]["systems"] = data_file + self.config["training"]["validation_data"]["systems"] = data_file + self.system_index = 0 + self.batch_index = 0 + self.get_dataset(self.system_index, self.batch_index) + self.get_model() + + def get_model(self): + self.model = get_model(self.config["model"]).to(env.DEVICE) + + def get_dataset(self, system_index=0, batch_index=0): + systems = self.config["training"]["training_data"]["systems"] + rcut = self.config["model"]["descriptor"]["rcut"] + sel = self.config["model"]["descriptor"]["sel"] + sec = paddle.cumsum(paddle.to_tensor(sel), axis=0) + type_map = self.config["model"]["type_map"] + self.dpdatasystem = CheckSymmetry( + sys_path=systems[system_index], type_map=type_map + ) + self.origin_batch = self.dpdatasystem.get_item_paddle(batch_index) + + @unittest.skip("it can be replaced by autodiff") + def test_force_grad(self, threshold=1e-2, delta0=1e-6, seed=20): + rng = np.random.default_rng(GLOBAL_SEED) + result0 = self.model(**get_data(self.origin_batch)) + np.random.default_rng(seed) + errors = np.zeros((self.dpdatasystem.natoms, 3)) + for atom_index in range(self.dpdatasystem.natoms): + for axis_index in range(3): + delta = rng.random() * delta0 + disturb_batch = self.dpdatasystem.get_disturb( + self.batch_index, atom_index, axis_index, delta + ) + disturb_result = self.model(**get_data(disturb_batch)) + disturb_force = -(disturb_result["energy"] - result0["energy"]) / delta + disturb_error = ( + result0["force"][0, atom_index, axis_index] - disturb_force + ) + errors[atom_index, axis_index] = disturb_error.detach().cpu().numpy() + self.assertTrue(np.abs(errors).max() < threshold, msg=str(np.abs(errors).max())) + + +if __name__ == "__main__": + unittest.main() diff --git a/source/tests/pd/model/test_forward_lower.py b/source/tests/pd/model/test_forward_lower.py new file mode 100644 index 0000000000..ac8d0f54fc --- /dev/null +++ b/source/tests/pd/model/test_forward_lower.py @@ -0,0 +1,208 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import copy +import unittest + +import numpy as np +import paddle + +from deepmd.pd.model.model import ( + get_model, +) +from deepmd.pd.utils import ( + decomp, + env, +) +from deepmd.pd.utils.nlist import ( + extend_input_and_build_neighbor_list, +) + +from ...seed import ( + GLOBAL_SEED, +) +from ..common import ( + eval_model, +) +from .test_permutation import ( # model_dpau, + model_dpa1, + model_dpa2, + model_se_e2_a, + model_spin, + model_zbl, +) + +dtype = paddle.float64 + + +def reduce_tensor(extended_tensor, mapping, nloc: int): + nframes, nall = extended_tensor.shape[:2] + ext_dims = extended_tensor.shape[2:] + reduced_tensor = paddle.zeros( + [nframes, nloc, *ext_dims], + dtype=extended_tensor.dtype, + ).to(device=extended_tensor.place) + mldims = list(mapping.shape) + mapping = mapping.reshape(mldims + [1] * len(ext_dims)).expand( + [-1] * len(mldims) + list(ext_dims) + ) + # nf x nloc x (*ext_dims) + reduced_tensor = decomp.scatter_reduce( + reduced_tensor, + 1, + index=mapping, + src=extended_tensor, + reduce="sum", + ) + return reduced_tensor + + +class ForwardLowerTest: + def test( + self, + ): + prec = self.prec + natoms = 5 + cell = 4.0 * paddle.eye(3, dtype=dtype).to(device=env.DEVICE) + generator = paddle.seed(GLOBAL_SEED) + coord = 3.0 * paddle.rand([natoms, 3], dtype=dtype).to(device=env.DEVICE) + spin = 0.5 * paddle.rand([natoms, 3], dtype=dtype).to(device=env.DEVICE) + atype = paddle.to_tensor([0, 0, 0, 1, 1], dtype=paddle.int64).to( + device=env.DEVICE + ) + test_spin = getattr(self, "test_spin", False) + if not test_spin: + test_keys = ["energy", "force", "virial"] + else: + test_keys = ["energy", "force", "force_mag"] + + result_forward = eval_model( + self.model, + coord.unsqueeze(0), + cell.unsqueeze(0), + atype, + spins=spin.unsqueeze(0), + ) + ( + extended_coord, + extended_atype, + mapping, + nlist, + ) = extend_input_and_build_neighbor_list( + coord.unsqueeze(0), + atype.unsqueeze(0), + self.model.get_rcut() + 1.0 + if test_spin + else self.model.get_rcut(), # buffer region for spin nlist + self.model.get_sel(), + mixed_types=self.model.mixed_types(), + box=cell.unsqueeze(0), + ) + extended_spin = decomp.take_along_axis( + spin.unsqueeze(0), indices=mapping.unsqueeze(-1).tile((1, 1, 3)), axis=1 + ) + input_dict = { + "extended_coord": extended_coord, + "extended_atype": extended_atype, + "nlist": nlist, + "mapping": mapping, + "do_atomic_virial": False, + } + if test_spin: + input_dict["extended_spin"] = extended_spin + result_forward_lower = self.model.forward_lower(**input_dict) + for key in test_keys: + if key in ["energy"]: + np.testing.assert_allclose( + result_forward_lower[key].numpy(), + result_forward[key].numpy(), + rtol=prec, + atol=prec, + ) + elif key in ["force", "force_mag"]: + reduced_vv = reduce_tensor( + result_forward_lower[f"extended_{key}"], mapping, natoms + ) + np.testing.assert_allclose( + reduced_vv.numpy(), + result_forward[key].numpy(), + rtol=prec, + atol=prec, + ) + elif key == "virial": + if not hasattr(self, "test_virial") or self.test_virial: + np.testing.assert_allclose( + result_forward_lower[key].numpy(), + result_forward[key].numpy(), + rtol=prec, + atol=prec, + ) + else: + raise RuntimeError(f"Unexpected test key {key}") + + +class TestEnergyModelSeA(unittest.TestCase, ForwardLowerTest): + def setUp(self): + self.prec = 1e-10 + model_params = copy.deepcopy(model_se_e2_a) + self.model = get_model(model_params).to(env.DEVICE) + + +@unittest.skip("Skip for not implemented yet") +class TestEnergyModelDPA1(unittest.TestCase, ForwardLowerTest): + def setUp(self): + self.prec = 1e-10 + model_params = copy.deepcopy(model_dpa1) + self.model = get_model(model_params).to(env.DEVICE) + + +@unittest.skip("Skip for not implemented yet") +class TestEnergyModelDPA2(unittest.TestCase, ForwardLowerTest): + def setUp(self): + self.prec = 1e-10 + model_params = copy.deepcopy(model_dpa2) + self.model = get_model(model_params).to(env.DEVICE) + + +@unittest.skip("Skip for not implemented yet") +class TestEnergyModelZBL(unittest.TestCase, ForwardLowerTest): + def setUp(self): + self.prec = 1e-10 + model_params = copy.deepcopy(model_zbl) + self.model = get_model(model_params).to(env.DEVICE) + + +@unittest.skip("Skip for not implemented yet") +class TestEnergyModelSpinSeA(unittest.TestCase, ForwardLowerTest): + def setUp(self): + self.prec = 1e-10 + model_params = copy.deepcopy(model_spin) + self.test_spin = True + self.model = get_model(model_params).to(env.DEVICE) + + +@unittest.skip("Skip for not implemented yet") +class TestEnergyModelSpinDPA1(unittest.TestCase, ForwardLowerTest): + def setUp(self): + self.prec = 1e-10 + model_params = copy.deepcopy(model_spin) + model_params["descriptor"] = copy.deepcopy(model_dpa1)["descriptor"] + # double sel for virtual atoms to avoid large error + model_params["descriptor"]["sel"] *= 2 + self.test_spin = True + self.model = get_model(model_params).to(env.DEVICE) + + +@unittest.skip("Skip for not implemented yet") +class TestEnergyModelSpinDPA2(unittest.TestCase, ForwardLowerTest): + def setUp(self): + self.prec = 1e-10 + model_params = copy.deepcopy(model_spin) + model_params["descriptor"] = copy.deepcopy(model_dpa2)["descriptor"] + # double sel for virtual atoms to avoid large error + model_params["descriptor"]["repinit"]["nsel"] *= 2 + model_params["descriptor"]["repformer"]["nsel"] *= 2 + self.test_spin = True + self.model = get_model(model_params).to(env.DEVICE) + + +if __name__ == "__main__": + unittest.main() diff --git a/source/tests/pd/model/test_get_model.py b/source/tests/pd/model/test_get_model.py new file mode 100644 index 0000000000..7ace7c4e43 --- /dev/null +++ b/source/tests/pd/model/test_get_model.py @@ -0,0 +1,113 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import copy +import unittest + +import numpy as np +import paddle + +from deepmd.pd.model.model import ( + get_model, +) +from deepmd.pd.utils import ( + env, +) + +dtype = paddle.float64 + +model_se_e2_a = { + "type_map": ["O", "H", "B"], + "descriptor": { + "type": "se_e2_a", + "sel": [46, 92, 4], + "rcut_smth": 0.50, + "rcut": 4.00, + "neuron": [25, 50, 100], + "resnet_dt": False, + "axis_neuron": 16, + "seed": 1, + }, + "fitting_net": { + "neuron": [24, 24, 24], + "resnet_dt": True, + "seed": 1, + }, + "data_stat_nbatch": 20, + "atom_exclude_types": [1], + "pair_exclude_types": [[1, 2]], + "preset_out_bias": { + "energy": [ + None, + [1.0], + [3.0], + ] + }, +} + + +class TestGetModel(unittest.TestCase): + def test_model_attr(self): + model_params = copy.deepcopy(model_se_e2_a) + self.model = get_model(model_params).to(env.DEVICE) + atomic_model = self.model.atomic_model + self.assertEqual(atomic_model.type_map, ["O", "H", "B"]) + self.assertEqual( + atomic_model.preset_out_bias, + { + "energy": [ + None, + np.array([1.0]), + np.array([3.0]), + ] + }, + ) + self.assertEqual(atomic_model.atom_exclude_types, [1]) + self.assertEqual(atomic_model.pair_exclude_types, [[1, 2]]) + + def test_model_attr_energy_float(self): + model_params = copy.deepcopy(model_se_e2_a) + model_params["preset_out_bias"] = {"energy": ["1.", 3, None]} + self.model = get_model(model_params).to(env.DEVICE) + atomic_model = self.model.atomic_model + self.assertEqual(atomic_model.type_map, ["O", "H", "B"]) + self.assertEqual( + atomic_model.preset_out_bias, + { + "energy": [ + np.array([1.0]), + np.array([3.0]), + None, + ] + }, + ) + self.assertEqual(atomic_model.atom_exclude_types, [1]) + self.assertEqual(atomic_model.pair_exclude_types, [[1, 2]]) + + def test_model_attr_energy_unsupported_type(self): + model_params = copy.deepcopy(model_se_e2_a) + model_params["preset_out_bias"] = {"energy": [1.0 + 2.0j, 3, None]} + with self.assertRaises(ValueError): + self.model = get_model(model_params).to(env.DEVICE) + + def test_model_attr_energy_unsupported_value(self): + model_params = copy.deepcopy(model_se_e2_a) + model_params["preset_out_bias"] = {"energy": ["1.0 + 2.0j", 3, None]} + with self.assertRaises(ValueError): + self.model = get_model(model_params).to(env.DEVICE) + + def test_notset_model_attr(self): + model_params = copy.deepcopy(model_se_e2_a) + model_params.pop("atom_exclude_types") + model_params.pop("pair_exclude_types") + model_params.pop("preset_out_bias") + self.model = get_model(model_params).to(env.DEVICE) + atomic_model = self.model.atomic_model + self.assertEqual(atomic_model.type_map, ["O", "H", "B"]) + self.assertEqual(atomic_model.preset_out_bias, None) + self.assertEqual(atomic_model.atom_exclude_types, []) + self.assertEqual(atomic_model.pair_exclude_types, []) + + def test_preset_wrong_len(self): + model_params = copy.deepcopy(model_se_e2_a) + model_params["preset_out_bias"] = {"energy": [None]} + with self.assertRaises(ValueError): + self.model = get_model(model_params).to(env.DEVICE) diff --git a/source/tests/pd/model/test_jit.py b/source/tests/pd/model/test_jit.py new file mode 100644 index 0000000000..28ab499bf1 --- /dev/null +++ b/source/tests/pd/model/test_jit.py @@ -0,0 +1,83 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import json +import os +import shutil +import unittest +from copy import ( + deepcopy, +) +from pathlib import ( + Path, +) + +import paddle +from paddle.static import ( + InputSpec, +) + +from deepmd.pd.entrypoints.main import ( + get_trainer, +) +from deepmd.pd.infer import ( + inference, +) + +from .test_permutation import ( + model_se_e2_a, +) + + +class JITTest: + def test_jit(self): + trainer = get_trainer(deepcopy(self.config)) + trainer.run() + paddle.set_flags( + { + "FLAGS_save_cf_stack_op": 1, + "FLAGS_prim_enable_dynamic": 1, + "FLAGS_enable_pir_api": 1, + } + ) + model = paddle.jit.to_static( + inference.Tester("./model.pd").model, full_graph=True + ) + paddle.jit.save( + model, + "./frozen_model", + input_spec=[ + InputSpec([-1, -1, 3], dtype="float64"), + InputSpec([-1, -1], dtype="int32"), + InputSpec([-1, -1, -1], dtype="int32"), + ], + ) + + def tearDown(self): + for f in os.listdir("."): + if f.startswith("model") and f.endswith("pd"): + os.remove(f) + if f in ["lcurve.out", "frozen_model.json", "frozen_model.pdiparams"]: + os.remove(f) + if f in ["stat_files"]: + shutil.rmtree(f) + if f in ["checkpoint"]: + os.remove(f) + + +class TestEnergyModelSeA(unittest.TestCase, JITTest): + def setUp(self): + input_json = str(Path(__file__).parent / "water/se_atten.json") + with open(input_json) as f: + self.config = json.load(f) + data_file = [str(Path(__file__).parent / "water/data/data_0")] + self.config["training"]["training_data"]["systems"] = data_file + self.config["training"]["validation_data"]["systems"] = data_file + self.config["model"] = deepcopy(model_se_e2_a) + self.config["training"]["numb_steps"] = 10 + self.config["training"]["save_freq"] = 10 + + def tearDown(self): + JITTest.tearDown(self) + + +if __name__ == "__main__": + unittest.main() diff --git a/source/tests/pd/model/test_mlp.py b/source/tests/pd/model/test_mlp.py new file mode 100644 index 0000000000..90653644d3 --- /dev/null +++ b/source/tests/pd/model/test_mlp.py @@ -0,0 +1,283 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import itertools +import unittest + +import numpy as np +import paddle + +from deepmd.dpmodel.utils import EmbeddingNet as DPEmbeddingNet +from deepmd.dpmodel.utils import FittingNet as DPFittingNet +from deepmd.dpmodel.utils import ( + NativeLayer, + NativeNet, +) +from deepmd.pd.model.network.mlp import ( + MLP, + EmbeddingNet, + FittingNet, + MLPLayer, +) +from deepmd.pd.utils import ( + env, +) +from deepmd.pd.utils.env import ( + PRECISION_DICT, +) + + +def get_tols(prec): + if prec in ["single", "float32"]: + rtol, atol = 0.0, 1e-4 + elif prec in ["double", "float64"]: + rtol, atol = 0.0, 1e-12 + # elif prec in ["half", "float16"]: + # rtol, atol=1e-2, 0 + else: + raise ValueError(f"unknown prec {prec}") + return rtol, atol + + +class TestMLPLayer(unittest.TestCase): + def setUp(self): + self.test_cases = itertools.product( + [(5, 5), (5, 10), (5, 8), (8, 5)], # inp, out + [True, False], # bias + [True, False], # use time step + ["tanh", "none"], # activation + [True, False], # resnet + [None, [4], [3, 2]], # prefix shapes + ["float32", "double"], # precision + ) + + def test_match_native_layer( + self, + ): + for (ninp, nout), bias, ut, ac, resnet, ashp, prec in self.test_cases: + # input + inp_shap = [ninp] + if ashp is not None: + inp_shap = ashp + inp_shap + rtol, atol = get_tols(prec) + dtype = PRECISION_DICT[prec] + xx = ( + paddle.arange(np.prod(inp_shap), dtype=dtype) + .to(device=env.DEVICE) + .reshape(inp_shap) + ) + # def mlp layer + ml = MLPLayer(ninp, nout, bias, ut, ac, resnet, precision=prec).to( + env.DEVICE + ) + # check consistency + nl = NativeLayer.deserialize(ml.serialize()) + np.testing.assert_allclose( + ml.forward(xx).detach().cpu().numpy(), + nl.call(xx.detach().cpu().numpy()), + rtol=rtol, + atol=atol, + err_msg=f"(i={ninp}, o={nout}) bias={bias} use_dt={ut} act={ac} resnet={resnet} prec={prec}", + ) + # check self-consistency + ml1 = MLPLayer.deserialize(ml.serialize()).to(env.DEVICE) + np.testing.assert_allclose( + ml.forward(xx).detach().cpu().numpy(), + ml1.forward(xx).detach().cpu().numpy(), + rtol=rtol, + atol=atol, + err_msg=f"(i={ninp}, o={nout}) bias={bias} use_dt={ut} act={ac} resnet={resnet} prec={prec}", + ) + + def test_jit(self): + for (ninp, nout), bias, ut, ac, resnet, _, prec in self.test_cases: + ml = MLPLayer(ninp, nout, bias, ut, ac, resnet, precision=prec) + model = paddle.jit.to_static(ml) + ml1 = MLPLayer.deserialize(ml.serialize()) + model = paddle.jit.to_static(ml1) + + +class TestMLP(unittest.TestCase): + def setUp(self): + self.test_cases = itertools.product( + [[2, 2, 4, 8], [1, 3, 3]], # inp and hiddens + [True, False], # bias + [True, False], # use time step + ["tanh", "none"], # activation + [True, False], # resnet + [None, [4], [3, 2]], # prefix shapes + ["float32", "double"], # precision + ) + + def test_match_native_net( + self, + ): + for ndims, bias, ut, ac, resnet, ashp, prec in self.test_cases: + # input + inp_shap = [ndims[0]] + if ashp is not None: + inp_shap = ashp + inp_shap + rtol, atol = get_tols(prec) + dtype = PRECISION_DICT[prec] + xx = ( + paddle.arange(np.prod(inp_shap), dtype=dtype) + .to(device=env.DEVICE) + .reshape(inp_shap) + ) + # def MLP + layers = [] + for ii in range(1, len(ndims)): + layers.append( + MLPLayer( + ndims[ii - 1], ndims[ii], bias, ut, ac, resnet, precision=prec + ).serialize() + ) + ml = MLP(layers).to(env.DEVICE) + # check consistency + nl = NativeNet.deserialize(ml.serialize()) + np.testing.assert_allclose( + ml.forward(xx).detach().cpu().numpy(), + nl.call(xx.detach().cpu().numpy()), + rtol=rtol, + atol=atol, + err_msg=f"net={ndims} bias={bias} use_dt={ut} act={ac} resnet={resnet} prec={prec}", + ) + # check self-consistency + ml1 = MLP.deserialize(ml.serialize()).to(env.DEVICE) + np.testing.assert_allclose( + ml.forward(xx).detach().cpu().numpy(), + ml1.forward(xx).detach().cpu().numpy(), + rtol=rtol, + atol=atol, + err_msg=f"net={ndims} bias={bias} use_dt={ut} act={ac} resnet={resnet} prec={prec}", + ) + + def test_jit(self): + for ndims, bias, ut, ac, resnet, _, prec in self.test_cases: + layers = [] + for ii in range(1, len(ndims)): + ml = layers.append( + MLPLayer( + ndims[ii - 1], ndims[ii], bias, ut, ac, resnet, precision=prec + ).serialize() + ) + ml = MLP(ml) + model = paddle.jit.to_static(ml) + ml1 = MLP.deserialize(ml.serialize()) + model = paddle.jit.to_static(ml1) + + +class TestEmbeddingNet(unittest.TestCase): + def setUp(self): + self.test_cases = itertools.product( + [1, 3], # inp + [[24, 48, 96], [24, 36]], # and hiddens + ["tanh", "none"], # activation + [True, False], # resnet_dt + ["float32", "double"], # precision + ) + + def test_match_embedding_net( + self, + ): + for idim, nn, act, idt, prec in self.test_cases: + # input + rtol, atol = get_tols(prec) + dtype = PRECISION_DICT[prec] + xx = paddle.arange(idim, dtype=dtype).to(device=env.DEVICE) + # def MLP + ml = EmbeddingNet(idim, nn, act, idt, prec).to(env.DEVICE) + # check consistency + nl = DPEmbeddingNet.deserialize(ml.serialize()) + np.testing.assert_allclose( + ml.forward(xx).detach().cpu().numpy(), + nl.call(xx.detach().cpu().numpy()), + rtol=rtol, + atol=atol, + err_msg=f"idim={idim} nn={nn} use_dt={idt} act={act} prec={prec}", + ) + # check self-consistency + ml1 = EmbeddingNet.deserialize(ml.serialize()).to(env.DEVICE) + np.testing.assert_allclose( + ml.forward(xx).detach().cpu().numpy(), + ml1.forward(xx).detach().cpu().numpy(), + rtol=rtol, + atol=atol, + err_msg=f"idim={idim} nn={nn} use_dt={idt} act={act} prec={prec}", + ) + + def test_jit( + self, + ): + for idim, nn, act, idt, prec in self.test_cases: + # def MLP + ml = EmbeddingNet(idim, nn, act, idt, prec).to(env.DEVICE) + ml1 = EmbeddingNet.deserialize(ml.serialize()).to(env.DEVICE) + model = paddle.jit.to_static(ml) + model = paddle.jit.to_static(ml1) + + +class TestFittingNet(unittest.TestCase): + def setUp(self): + self.test_cases = itertools.product( + [1, 3], # inp + [1, 5], # out + [[24, 48, 96], [24, 36]], # and hiddens + ["tanh", "none"], # activation + [True, False], # resnet_dt + ["float32", "double"], # precision + [True, False], # bias_out + ) + + def test_match_fitting_net( + self, + ): + for idim, odim, nn, act, idt, prec, ob in self.test_cases: + # input + rtol, atol = get_tols(prec) + dtype = PRECISION_DICT[prec] + xx = paddle.arange(idim, dtype=dtype).to(device=env.DEVICE) + # def MLP + ml = FittingNet( + idim, + odim, + neuron=nn, + activation_function=act, + resnet_dt=idt, + precision=prec, + bias_out=ob, + ).to(env.DEVICE) + # check consistency + nl = DPFittingNet.deserialize(ml.serialize()) + np.testing.assert_allclose( + ml.forward(xx).detach().cpu().numpy(), + nl.call(xx.detach().cpu().numpy()), + rtol=rtol, + atol=atol, + err_msg=f"idim={idim} nn={nn} use_dt={idt} act={act} prec={prec}", + ) + # check self-consistency + ml1 = FittingNet.deserialize(ml.serialize()).to(env.DEVICE) + np.testing.assert_allclose( + ml.forward(xx).detach().cpu().numpy(), + ml1.forward(xx).detach().cpu().numpy(), + rtol=rtol, + atol=atol, + err_msg=f"idim={idim} nn={nn} use_dt={idt} act={act} prec={prec}", + ) + + def test_jit( + self, + ): + for idim, odim, nn, act, idt, prec, ob in self.test_cases: + # def MLP + ml = FittingNet( + idim, + odim, + neuron=nn, + activation_function=act, + resnet_dt=idt, + precision=prec, + bias_out=ob, + ).to(env.DEVICE) + ml1 = FittingNet.deserialize(ml.serialize()).to(env.DEVICE) + model = paddle.jit.to_static(ml) + model = paddle.jit.to_static(ml1) diff --git a/source/tests/pd/model/test_model.py b/source/tests/pd/model/test_model.py new file mode 100644 index 0000000000..2566a9ce41 --- /dev/null +++ b/source/tests/pd/model/test_model.py @@ -0,0 +1,433 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import json +import unittest +from typing import ( + NamedTuple, +) + +import numpy as np +import paddle +import tensorflow.compat.v1 as tf + +from deepmd.pd.utils import ( + env, +) + +tf.disable_eager_execution() + +from pathlib import ( + Path, +) + +from deepmd.dpmodel.utils.learning_rate import LearningRateExp as MyLRExp +from deepmd.pd.loss import ( + EnergyStdLoss, +) +from deepmd.pd.model.model import ( + get_model, +) +from deepmd.pd.utils.dataloader import ( + DpLoaderSet, +) +from deepmd.pd.utils.env import ( + DEVICE, +) +from deepmd.tf.common import ( + expand_sys_str, +) +from deepmd.tf.descriptor import DescrptSeA as DescrptSeA_tf +from deepmd.tf.fit import ( + EnerFitting, +) +from deepmd.tf.loss import ( + EnerStdLoss, +) +from deepmd.tf.model import ( + EnerModel, +) +from deepmd.tf.utils.data_system import ( + DeepmdDataSystem, +) +from deepmd.tf.utils.learning_rate import ( + LearningRateExp, +) + +from ..test_finetune import ( + energy_data_requirement, +) + + +class VariableState(NamedTuple): + value: np.ndarray + gradient: np.ndarray + + +def paddle2tf(paddle_name, last_layer_id=None): + fields = paddle_name.split(".") + offset = int(fields[3] == "networks") + 1 + element_id = int(fields[2 + offset]) + if fields[1] == "descriptor": + if fields[2].startswith("compress_"): + return None + layer_id = int(fields[4 + offset]) + 1 + weight_type = fields[5 + offset] + ret = "filter_type_all/%s_%d_%d:0" % (weight_type, layer_id, element_id) + elif fields[1] == "fitting_net": + layer_id = int(fields[4 + offset]) + weight_type = fields[5 + offset] + if layer_id != last_layer_id: + ret = "layer_%d_type_%d/%s:0" % (layer_id, element_id, weight_type) + else: + ret = "final_layer_type_%d/%s:0" % (element_id, weight_type) + else: + raise RuntimeError(f"Unexpected parameter name: {paddle_name}") + return ret + + +class DpTrainer: + def __init__(self) -> None: + with open(str(Path(__file__).parent / "water/se_e2_a.json")) as fin: + content = fin.read() + config = json.loads(content) + data_file = [str(Path(__file__).parent / "water/data/data_0")] + config["training"]["training_data"]["systems"] = data_file + config["training"]["validation_data"]["systems"] = data_file + model_config = config["model"] + self.rcut = model_config["descriptor"]["rcut"] + self.rcut_smth = model_config["descriptor"]["rcut_smth"] + self.sel = model_config["descriptor"]["sel"] + self.systems = config["training"]["validation_data"]["systems"] + if isinstance(self.systems, str): + self.systems = expand_sys_str(self.systems) + self.batch_size = config["training"]["training_data"]["batch_size"] + self.type_map = model_config["type_map"] + self.filter_neuron = model_config["descriptor"]["neuron"] + self.axis_neuron = model_config["descriptor"]["axis_neuron"] + self.n_neuron = model_config["fitting_net"]["neuron"] + self.data_stat_nbatch = 3 + self.start_lr = 0.001 + self.stop_lr = 3.51e-8 + self.decay_steps = 500 + self.stop_steps = 1600 + self.start_pref_e = 1.0 + self.limit_pref_e = 2.0 + self.start_pref_f = 2.0 + self.limit_pref_f = 1.0 + self.ntypes = len(self.type_map) + + def get_intermediate_state(self, num_steps=1): + dp_model = self._get_dp_model() + dp_loss = self._get_dp_loss() + dp_lr = self._get_dp_lr() + dp_ds = self._get_dp_dataset() + dp_ds.add_data_requirements(dp_model.input_requirement) + dp_ds.add_data_requirements(dp_loss.label_requirement) + dp_model.data_stat(dp_ds) + + # Build graph + g = tf.Graph() + with g.as_default(): + place_holders = self._get_dp_placeholders(dp_ds) + model_pred = dp_model.build( + coord_=place_holders["coord"], + atype_=place_holders["type"], + natoms=place_holders["natoms_vec"], + box=place_holders["box"], + mesh=place_holders["default_mesh"], + input_dict=place_holders, + ) + global_step = tf.train.get_or_create_global_step() + learning_rate = dp_lr.build(global_step, self.stop_steps) + l2_l, _ = dp_loss.build( + learning_rate=learning_rate, + natoms=place_holders["natoms_vec"], + model_dict=model_pred, + label_dict=place_holders, + suffix="test", + ) + t_vars = tf.trainable_variables() + optimizer = tf.train.AdamOptimizer(learning_rate) + t_grad_and_vars = optimizer.compute_gradients(l2_l, t_vars) + train_op = optimizer.apply_gradients(t_grad_and_vars, global_step) + init_op = tf.global_variables_initializer() + t_heads = { + "loss": l2_l, + "energy": model_pred["energy"], + "force": model_pred["force"], + "virial": model_pred["virial"], + "atom_virial": model_pred["atom_virial"], + } + + # Get statistics of each component + stat_dict = { + "descriptor.mean": dp_model.descrpt.davg, + "descriptor.stddev": dp_model.descrpt.dstd, + "fitting_net.bias_atom_e": dp_model.fitting.bias_atom_e, + } + + # Get variables and their gradients + with tf.Session(graph=g) as sess: + sess.run(init_op) + for _ in range(num_steps): + batch = dp_ds.get_batch() + feeds = self._get_feed_dict(batch, place_holders) + sess.run(train_op, feed_dict=feeds) + + batch = dp_ds.get_batch() + feeds = self._get_feed_dict(batch, place_holders) + grads_and_vars, head_dict = sess.run( + [t_grad_and_vars, t_heads], feed_dict=feeds + ) + vs_dict = {} + for idx, one in enumerate(t_vars): + grad, var = grads_and_vars[idx] + vs_dict[one.name] = VariableState(var, grad) + + tf.reset_default_graph() + # Used for reproducing + return batch, head_dict, stat_dict, vs_dict + + def _get_dp_dataset(self): + data = DeepmdDataSystem( + systems=self.systems, + batch_size=self.batch_size, + test_size=1, + rcut=self.rcut, + type_map=self.type_map, + trn_all_set=True, + ) + return data + + def _get_dp_model(self): + dp_descrpt = DescrptSeA_tf( + rcut=self.rcut, + rcut_smth=self.rcut_smth, + sel=self.sel, + neuron=self.filter_neuron, + axis_neuron=self.axis_neuron, + ) + dp_fitting = EnerFitting( + dp_descrpt.get_ntypes(), dp_descrpt.get_dim_out(), neuron=self.n_neuron + ) + return EnerModel( + dp_descrpt, + dp_fitting, + type_map=self.type_map, + data_stat_nbatch=self.data_stat_nbatch, + ) + + def _get_dp_loss(self): + return EnerStdLoss( + starter_learning_rate=self.start_lr, + start_pref_e=self.start_pref_e, + limit_pref_e=self.limit_pref_e, + start_pref_f=self.start_pref_f, + limit_pref_f=self.limit_pref_f, + ) + + def _get_dp_lr(self): + return LearningRateExp( + start_lr=self.start_lr, stop_lr=self.stop_lr, decay_steps=self.decay_steps + ) + + def _get_dp_placeholders(self, dataset): + place_holders = {} + data_dict = dataset.get_data_dict() + for kk in data_dict.keys(): + if kk == "type": + continue + prec = tf.float64 + place_holders[kk] = tf.placeholder(prec, [None], name="t_" + kk) + place_holders["find_" + kk] = tf.placeholder( + tf.float32, name="t_find_" + kk + ) + place_holders["type"] = tf.placeholder(tf.int32, [None], name="t_type") + place_holders["natoms_vec"] = tf.placeholder( + tf.int32, [self.ntypes + 2], name="t_natoms" + ) + place_holders["default_mesh"] = tf.placeholder(tf.int32, [None], name="t_mesh") + place_holders["is_training"] = tf.placeholder(tf.bool) + return place_holders + + def _get_feed_dict(self, batch, place_holders): + feed_dict = {} + for kk in batch.keys(): + if kk == "find_type" or kk == "type": + continue + if "find_" in kk: + feed_dict[place_holders[kk]] = batch[kk] + else: + feed_dict[place_holders[kk]] = np.reshape(batch[kk], [-1]) + for ii in ["type"]: + feed_dict[place_holders[ii]] = np.reshape(batch[ii], [-1]) + for ii in ["natoms_vec", "default_mesh"]: + feed_dict[place_holders[ii]] = batch[ii] + feed_dict[place_holders["is_training"]] = True + return feed_dict + + +class TestEnergy(unittest.TestCase): + def setUp(self) -> None: + self.dp_trainer = DpTrainer() + self.wanted_step = 0 + for key in dir(self.dp_trainer): + if not key.startswith("_") or key == "get_intermediate_state": + value = getattr(self.dp_trainer, key) + setattr(self, key, value) + + def test_consistency(self) -> None: + batch, head_dict, stat_dict, vs_dict = self.dp_trainer.get_intermediate_state( + self.wanted_step + ) + # Build DeePMD graph + my_ds = DpLoaderSet(self.systems, self.batch_size, self.type_map) + my_ds.add_data_requirement(energy_data_requirement) + my_model = get_model( + model_params={ + "descriptor": { + "type": "se_e2_a", + "sel": self.sel, + "rcut_smth": self.rcut_smth, + "rcut": self.rcut, + "neuron": self.filter_neuron, + "axis_neuron": self.axis_neuron, + }, + "fitting_net": {"neuron": self.n_neuron, "mixed_types": False}, + "data_stat_nbatch": self.data_stat_nbatch, + "type_map": self.type_map, + }, + ) + my_model.to(DEVICE) + my_lr = MyLRExp(self.start_lr, self.stop_lr, self.decay_steps, self.stop_steps) + my_loss = EnergyStdLoss( + starter_learning_rate=self.start_lr, + start_pref_e=self.start_pref_e, + limit_pref_e=self.limit_pref_e, + start_pref_f=self.start_pref_f, + limit_pref_f=self.limit_pref_f, + ) + + # Keep statistics consistency between 2 implementations + my_em = my_model.get_descriptor() + mean = stat_dict["descriptor.mean"].reshape([self.ntypes, my_em.get_nsel(), 4]) + stddev = stat_dict["descriptor.stddev"].reshape( + [self.ntypes, my_em.get_nsel(), 4] + ) + my_em.set_stat_mean_and_stddev( + paddle.to_tensor(mean).to(device=DEVICE), + paddle.to_tensor(stddev).to(device=DEVICE), + ) + my_model.get_fitting_net().bias_atom_e = paddle.to_tensor( + stat_dict["fitting_net.bias_atom_e"], place=DEVICE + ) + + # Keep parameter value consistency between 2 implementations + for name, param in my_model.named_parameters(): + name = name.replace("sea.", "") + var_name = paddle2tf(name, last_layer_id=len(self.n_neuron)) + if var_name is None: + continue + var = vs_dict[var_name].value + with paddle.no_grad(): + src = paddle.to_tensor(var) + dst = param + # print(name) + # print(src.mean(), src.std()) + # print(dst.mean(), dst.std()) + paddle.assign(src, dst) + # Start forward computing + tmp = np.copy(batch["natoms_vec"]) + batch = my_ds.systems[0]._data_system._get_subdata(batch, 0) + batch = my_ds.systems[0]._data_system.reformat_data_torch(batch) + for key in ["coord", "atype", "box", "energy", "force"]: + batch[key] = paddle.to_tensor(batch[key]).to(device=env.DEVICE) + batch[key] = batch[key].unsqueeze(0) + batch["coord"].stop_gradient = False + batch["natoms_vec"] = tmp + batch["natoms"] = paddle.to_tensor( + batch["natoms_vec"], place=batch["coord"].place + ).unsqueeze(0) + model_input = { + "coord": batch["coord"].to(env.DEVICE), + "atype": batch["atype"].to(env.DEVICE), + "box": batch["box"].to(env.DEVICE), + "do_atomic_virial": True, + } + model_input_1 = { + "coord": batch["coord"].to(env.DEVICE), + "atype": batch["atype"].to(env.DEVICE), + "box": batch["box"].to(env.DEVICE), + "do_atomic_virial": False, + } + label = { + "energy": batch["energy"].to(env.DEVICE), + "find_energy": 1.0, + "force": batch["force"].to(env.DEVICE), + "find_force": 1.0, + } + cur_lr = my_lr.value(self.wanted_step) + model_predict, loss, _ = my_loss( + model_input, my_model, label, int(batch["natoms"][0, 0]), cur_lr + ) + model_predict_1 = my_model(**model_input_1) + p_energy, p_force, p_virial, p_atomic_virial = ( + model_predict["energy"], + model_predict["force"], + model_predict["virial"], + model_predict["atom_virial"], + ) + np.testing.assert_allclose( + head_dict["energy"], p_energy.reshape([-1]).cpu().detach().numpy() + ) + np.testing.assert_allclose( + head_dict["force"], + p_force.reshape(head_dict["force"].shape).cpu().detach().numpy(), + ) + rtol = 1e-5 + atol = 1e-8 + np.testing.assert_allclose( + head_dict["loss"], loss.cpu().detach().numpy(), rtol=rtol, atol=atol + ) + np.testing.assert_allclose( + head_dict["virial"], + p_virial.reshape(head_dict["virial"].shape).cpu().detach().numpy(), + ) + np.testing.assert_allclose( + head_dict["virial"], + model_predict_1["virial"] + .reshape([*head_dict["virial"].shape]) + .cpu() + .detach() + .numpy(), + ) + self.assertIsNone(model_predict_1.get("atom_virial", None)) + np.testing.assert_allclose( + head_dict["atom_virial"], + p_atomic_virial.reshape(head_dict["atom_virial"].shape) + .cpu() + .detach() + .numpy(), + ) + optimizer = paddle.optimizer.Adam(cur_lr, parameters=my_model.parameters()) + optimizer.clear_grad() + + def step(step_id) -> None: + bdata = self.training_data.get_trainning_batch() + optimizer.clear_grad() + + # Compare gradient for consistency + loss.backward() + + for name, param in my_model.named_parameters(): + name = name.replace("sea.", "") + var_name = paddle2tf(name, last_layer_id=len(self.n_neuron)) + if var_name is None: + continue + var_grad = vs_dict[var_name].gradient + param_grad = param.grad.cpu() + var_grad = paddle.to_tensor(var_grad).to(device="cpu") + assert np.allclose(var_grad, param_grad, rtol=rtol, atol=atol) + + +if __name__ == "__main__": + unittest.main() diff --git a/source/tests/pd/model/test_nlist.py b/source/tests/pd/model/test_nlist.py new file mode 100644 index 0000000000..0947355ac0 --- /dev/null +++ b/source/tests/pd/model/test_nlist.py @@ -0,0 +1,304 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import unittest + +import numpy as np +import paddle + +from deepmd.pd.utils import ( + env, +) +from deepmd.pd.utils.nlist import ( + build_directional_neighbor_list, + build_multiple_neighbor_list, + build_neighbor_list, + extend_coord_with_ghosts, + get_multiple_nlist_key, +) +from deepmd.pd.utils.region import ( + inter2phys, +) + +dtype = paddle.float64 + + +class TestNeighList(unittest.TestCase): + def setUp(self): + self.nf = 3 + self.nloc = 3 + self.ns = 5 * 5 * 3 + self.nall = self.ns * self.nloc + self.cell = paddle.to_tensor( + [[1, 0, 0], [0.4, 0.8, 0], [0.1, 0.3, 2.1]], dtype=dtype, place=env.DEVICE + ) + self.icoord = paddle.to_tensor( + [[0, 0, 0], [0, 0, 0], [0.5, 0.5, 0.1]], dtype=dtype, place=env.DEVICE + ) + self.atype = paddle.to_tensor([-1, 0, 1], dtype=paddle.int64).to( + device=env.DEVICE + ) + [self.cell, self.icoord, self.atype] = [ + ii.unsqueeze(0) for ii in [self.cell, self.icoord, self.atype] + ] + self.coord = inter2phys(self.icoord, self.cell).reshape([-1, self.nloc * 3]) + self.cell = self.cell.reshape([-1, 9]) + [self.cell, self.coord, self.atype] = [ + paddle.tile(ii, [self.nf, 1]) for ii in [self.cell, self.coord, self.atype] + ] + self.rcut = 1.01 + self.prec = 1e-10 + self.nsel = [10, 10] + # genrated by preprocess.build_neighbor_list + # ref_nlist, _, _ = legacy_build_neighbor_list( + # 2, ecoord[0], eatype[0], + # self.rcut, + # paddle.to_tensor([10,20], dtype=paddle.int64), + # mapping[0], type_split=True, ) + self.ref_nlist = paddle.to_tensor( + [ + [-1] * sum(self.nsel), + [1, 1, 1, 1, 1, 1, -1, -1, -1, -1, 2, 2, 2, 2, -1, -1, -1, -1, -1, -1], + [1, 1, 1, 1, -1, -1, -1, -1, -1, -1, 2, 2, 2, 2, 2, 2, -1, -1, -1, -1], + ], + place=env.DEVICE, + ) + + def test_build_notype(self): + ecoord, eatype, mapping = extend_coord_with_ghosts( + self.coord, self.atype, self.cell, self.rcut + ) + # test normal sel + nlist = build_neighbor_list( + ecoord, + eatype, + self.nloc, + self.rcut, + sum(self.nsel), + distinguish_types=False, + ) + nlist_mask = nlist[0] == -1 + nlist_loc = mapping[0][nlist[0]] + nlist_loc[nlist_mask] = -1 + np.testing.assert_allclose( + paddle.sort(nlist_loc, axis=-1).numpy(), + paddle.sort(self.ref_nlist, axis=-1).numpy(), + ) + # test a very large sel + nlist = build_neighbor_list( + ecoord, + eatype, + self.nloc, + self.rcut, + sum(self.nsel) + 300, # +300, real nnei==224 + distinguish_types=False, + ) + nlist_mask = nlist[0] == -1 + nlist_loc = mapping[0][nlist[0]] + nlist_loc[nlist_mask] = -1 + np.testing.assert_allclose( + paddle.sort(nlist_loc, descending=True, axis=-1)[ + :, : sum(self.nsel) + ].numpy(), + paddle.sort(self.ref_nlist, descending=True, axis=-1).numpy(), + ) + + def test_build_type(self): + ecoord, eatype, mapping = extend_coord_with_ghosts( + self.coord, self.atype, self.cell, self.rcut + ) + nlist = build_neighbor_list( + ecoord, + eatype, + self.nloc, + self.rcut, + self.nsel, + distinguish_types=True, + ) + np.testing.assert_allclose(nlist[0].numpy(), nlist[1].numpy()) + nlist_mask = nlist[0] == -1 + nlist_loc = mapping[0][nlist[0]] + nlist_loc[nlist_mask] = -1 + for ii in range(2): + np.testing.assert_allclose( + paddle.sort( + paddle.split(nlist_loc, (self.nsel), axis=-1)[ii], axis=-1 + ).numpy(), + paddle.sort( + paddle.split(self.ref_nlist, (self.nsel), axis=-1)[ii], axis=-1 + ).numpy(), + ) + + def test_build_multiple_nlist(self): + rcuts = [1.01, 2.01] + nsels = [20, 80] + ecoord, eatype, mapping = extend_coord_with_ghosts( + self.coord, self.atype, self.cell, max(rcuts) + ) + nlist1 = build_neighbor_list( + ecoord, + eatype, + self.nloc, + rcuts[1], + nsels[1] - 1, + distinguish_types=False, + ) + pad = -1 * paddle.ones([self.nf, self.nloc, 1], dtype=nlist1.dtype).to( + device=nlist1.place + ) + nlist2 = paddle.concat([nlist1, pad], axis=-1) + nlist0 = build_neighbor_list( + ecoord, + eatype, + self.nloc, + rcuts[0], + nsels[0], + distinguish_types=False, + ) + nlists = build_multiple_neighbor_list(ecoord, nlist1, rcuts, nsels) + for dd in range(2): + self.assertEqual( + nlists[get_multiple_nlist_key(rcuts[dd], nsels[dd])].shape[-1], + nsels[dd], + ) + np.testing.assert_allclose( + nlists[get_multiple_nlist_key(rcuts[0], nsels[0])].numpy(), + nlist0.numpy(), + ) + np.testing.assert_allclose( + nlists[get_multiple_nlist_key(rcuts[1], nsels[1])].numpy(), + nlist2.numpy(), + ) + + def test_extend_coord(self): + ecoord, eatype, mapping = extend_coord_with_ghosts( + self.coord, self.atype, self.cell, self.rcut + ) + # expected ncopy x nloc + self.assertEqual(list(ecoord.shape), [self.nf, self.nall * 3]) + self.assertEqual(list(eatype.shape), [self.nf, self.nall]) + self.assertEqual(list(mapping.shape), [self.nf, self.nall]) + # check the nloc part is identical with original coord + np.testing.assert_allclose( + ecoord[:, : self.nloc * 3].numpy(), + self.coord.numpy(), + rtol=self.prec, + atol=self.prec, + ) + # check the shift vectors are aligned with grid + shift_vec = ( + ecoord.reshape([-1, self.ns, self.nloc, 3]) + - self.coord.reshape([-1, self.nloc, 3])[:, None, :, :] + ) + shift_vec = shift_vec.reshape([-1, self.nall, 3]) + # hack!!! assumes identical cell across frames + shift_vec = paddle.matmul( + shift_vec, paddle.linalg.inv(self.cell.reshape([self.nf, 3, 3])[0]) + ) + # nf x nall x 3 + shift_vec = paddle.round(shift_vec) + # check: identical shift vecs + np.testing.assert_allclose( + shift_vec[0].numpy(), shift_vec[1].numpy(), rtol=self.prec, atol=self.prec + ) + # check: shift idx aligned with grid + mm, cc = paddle.unique(shift_vec[0][:, 0], axis=-1, return_counts=True) + np.testing.assert_allclose( + mm.numpy(), + paddle.to_tensor([-2, -1, 0, 1, 2], dtype=dtype) + .to(device=env.DEVICE) + .numpy(), + rtol=self.prec, + atol=self.prec, + ) + np.testing.assert_allclose( + cc.numpy(), + paddle.to_tensor( + [self.ns * self.nloc // 5] * 5, dtype=paddle.int64, place=env.DEVICE + ).numpy(), + rtol=self.prec, + atol=self.prec, + ) + mm, cc = paddle.unique(shift_vec[1][:, 1], axis=-1, return_counts=True) + np.testing.assert_allclose( + mm.numpy(), + paddle.to_tensor([-2, -1, 0, 1, 2], dtype=dtype).to(device=env.DEVICE), + rtol=self.prec, + atol=self.prec, + ) + np.testing.assert_allclose( + cc.numpy(), + paddle.to_tensor( + [self.ns * self.nloc // 5] * 5, dtype=paddle.int64, place=env.DEVICE + ), + rtol=self.prec, + atol=self.prec, + ) + mm, cc = paddle.unique(shift_vec[1][:, 2], axis=-1, return_counts=True) + np.testing.assert_allclose( + mm.numpy(), + paddle.to_tensor([-1, 0, 1], dtype=dtype).to(device=env.DEVICE).numpy(), + rtol=self.prec, + atol=self.prec, + ) + np.testing.assert_allclose( + cc.numpy(), + paddle.to_tensor( + [self.ns * self.nloc // 3] * 3, dtype=paddle.int64, place=env.DEVICE + ).numpy(), + rtol=self.prec, + atol=self.prec, + ) + + def test_build_directional_nlist(self): + """Directional nlist is tested against the standard nlist implementation.""" + ecoord, eatype, mapping = extend_coord_with_ghosts( + self.coord, self.atype, self.cell, self.rcut + ) + for distinguish_types, mysel in zip([True, False], [sum(self.nsel), 300]): + # full neighbor list + nlist_full = build_neighbor_list( + ecoord, + eatype, + self.nloc, + self.rcut, + sum(self.nsel), + distinguish_types=distinguish_types, + ) + # central as part of the system + nlist = build_directional_neighbor_list( + ecoord[:, 3:6], + eatype[:, 1:2], + paddle.concat( + [ + ecoord[:, 0:3], + paddle.zeros( + [self.nf, 3], + dtype=dtype, + ).to(device=env.DEVICE), # placeholder + ecoord[:, 6:], + ], + axis=1, + ), + paddle.concat( + [ + eatype[:, 0:1], + -1 + * paddle.ones( + [self.nf, 1], + dtype="int64", + ).to(device=env.DEVICE), # placeholder + eatype[:, 2:], + ], + axis=1, + ), + self.rcut, + mysel, + distinguish_types=distinguish_types, + ) + np.testing.assert_allclose(nlist[0].numpy(), nlist[1].numpy()) + np.testing.assert_allclose(nlist[0].numpy(), nlist[2].numpy()) + np.testing.assert_allclose( + paddle.sort(nlist[0], descending=True, axis=-1)[ + :, : sum(self.nsel) + ].numpy(), + paddle.sort(nlist_full[0][1:2], descending=True, axis=-1).numpy(), + ) diff --git a/source/tests/pd/model/test_null_input.py b/source/tests/pd/model/test_null_input.py new file mode 100644 index 0000000000..9bf0860265 --- /dev/null +++ b/source/tests/pd/model/test_null_input.py @@ -0,0 +1,94 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import copy +import unittest + +import numpy as np +import paddle + +from deepmd.pd.model.model import ( + get_model, +) +from deepmd.pd.utils import ( + env, +) +from deepmd.pd.utils.utils import ( + to_numpy_array, +) + +from ...seed import ( + GLOBAL_SEED, +) +from ..common import ( + eval_model, +) +from .test_permutation import ( + model_se_e2_a, +) + +dtype = paddle.float64 + + +class NullTest: + def test_nloc_1( + self, + ): + natoms = 1 + generator = paddle.seed(GLOBAL_SEED) + # paddle.seed(1000) + cell = paddle.rand([3, 3], dtype=dtype).to(device=env.DEVICE) + # large box to exclude images + cell = (cell + cell.T) + 100.0 * paddle.eye(3).to(device=env.DEVICE) + coord = paddle.rand([natoms, 3], dtype=dtype).to(device=env.DEVICE) + atype = paddle.to_tensor([0], dtype=paddle.int32).to(device=env.DEVICE) + test_keys = ["energy", "force", "virial"] + result = eval_model(self.model, coord.unsqueeze(0), cell.unsqueeze(0), atype) + ret0 = {key: result[key].squeeze(0) for key in test_keys} + prec = 1e-10 + expect_e_shape = [1] + expect_f = paddle.zeros([natoms, 3], dtype=dtype).to(device=env.DEVICE) + expect_v = paddle.zeros([9], dtype=dtype).to(device=env.DEVICE) + self.assertEqual(list(ret0["energy"].shape), expect_e_shape) + self.assertFalse(np.isnan(to_numpy_array(ret0["energy"])[0])) + np.testing.assert_allclose( + ret0["force"].numpy(), expect_f.numpy(), rtol=prec, atol=prec + ) + if not hasattr(self, "test_virial") or self.test_virial: + np.testing.assert_allclose( + ret0["virial"].numpy(), expect_v.numpy(), rtol=prec, atol=prec + ) + + def test_nloc_2_far( + self, + ): + natoms = 2 + generator = paddle.seed(GLOBAL_SEED) + cell = paddle.rand([3, 3], dtype=dtype).to(device=env.DEVICE) + # large box to exclude images + cell = (cell + cell.T) + 3000.0 * paddle.eye(3).to(device=env.DEVICE) + coord = paddle.rand([1, 3], dtype=dtype).to(device=env.DEVICE) + # 2 far-away atoms + coord = paddle.concat([coord, coord + 100.0], axis=0) + atype = paddle.to_tensor([0, 2], dtype=paddle.int32).to(device=env.DEVICE) + test_keys = ["energy", "force", "virial"] + result = eval_model(self.model, coord.unsqueeze(0), cell.unsqueeze(0), atype) + ret0 = {key: result[key].squeeze(0) for key in test_keys} + prec = 1e-10 + expect_e_shape = [1] + expect_f = paddle.zeros([natoms, 3], dtype=dtype).to(device=env.DEVICE) + expect_v = paddle.zeros([9], dtype=dtype).to(device=env.DEVICE) + self.assertEqual(list(ret0["energy"].shape), expect_e_shape) + self.assertFalse(np.isnan(to_numpy_array(ret0["energy"])[0])) + np.testing.assert_allclose( + ret0["force"].numpy(), expect_f.numpy(), rtol=prec, atol=prec + ) + if not hasattr(self, "test_virial") or self.test_virial: + np.testing.assert_allclose( + ret0["virial"].numpy(), expect_v.numpy(), rtol=prec, atol=prec + ) + + +class TestEnergyModelSeA(unittest.TestCase, NullTest): + def setUp(self): + model_params = copy.deepcopy(model_se_e2_a) + self.type_split = False + self.model = get_model(model_params).to(env.DEVICE) diff --git a/source/tests/pd/model/test_permutation.py b/source/tests/pd/model/test_permutation.py new file mode 100644 index 0000000000..8482ca7ffe --- /dev/null +++ b/source/tests/pd/model/test_permutation.py @@ -0,0 +1,489 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import copy +import os +import unittest + +import paddle + +from deepmd.pd.model.model import ( + get_model, +) +from deepmd.pd.utils import ( + env, +) + +from ...seed import ( + GLOBAL_SEED, +) +from ..common import ( + eval_model, +) + +CUR_DIR = os.path.dirname(__file__) + +dtype = paddle.float64 +import numpy as np + +model_se_e2_a = { + "type_map": ["O", "H", "B"], + "descriptor": { + "type": "se_e2_a", + "sel": [46, 92, 4], + "rcut_smth": 0.50, + "rcut": 4.00, + "neuron": [25, 50, 100], + "resnet_dt": False, + "axis_neuron": 16, + "seed": 1, + }, + "fitting_net": { + "neuron": [24, 24, 24], + "resnet_dt": True, + "seed": 1, + }, + "data_stat_nbatch": 20, +} + +model_dos = { + "type_map": ["O", "H", "B"], + "descriptor": { + "type": "se_e2_a", + "sel": [46, 92, 4], + "rcut_smth": 0.50, + "rcut": 4.00, + "neuron": [25, 50, 100], + "resnet_dt": False, + "axis_neuron": 16, + "seed": 1, + }, + "fitting_net": { + "neuron": [24, 24, 24], + "resnet_dt": True, + "seed": 1, + "type": "dos", + "numb_dos": 250, + }, + "data_stat_nbatch": 20, +} + +model_zbl = { + "type_map": ["O", "H", "B"], + "use_srtab": f"{CUR_DIR}/water/data/zbl_tab_potential/H2O_tab_potential.txt", + "smin_alpha": 0.1, + "sw_rmin": 0.2, + "sw_rmax": 4.0, + "descriptor": { + "type": "se_atten", + "sel": 40, + "rcut_smth": 0.5, + "rcut": 4.0, + "neuron": [25, 50, 100], + "axis_neuron": 16, + "attn": 64, + "attn_layer": 2, + "attn_dotr": True, + "attn_mask": False, + "activation_function": "tanh", + "scaling_factor": 1.0, + "normalize": False, + "temperature": 1.0, + "set_davg_zero": True, + "type_one_side": True, + "seed": 1, + }, + "fitting_net": { + "neuron": [24, 24, 24], + "resnet_dt": True, + "seed": 1, + }, + "data_stat_nbatch": 20, +} + + +model_spin = { + "type_map": ["O", "H", "B"], + "descriptor": { + "type": "se_e2_a", + "sel": [46, 92, 4], + "rcut_smth": 0.50, + "rcut": 4.00, + "neuron": [25, 50, 100], + "resnet_dt": False, + "axis_neuron": 16, + "seed": 1, + }, + "fitting_net": { + "neuron": [24, 24, 24], + "resnet_dt": True, + "seed": 1, + }, + "data_stat_nbatch": 20, + "spin": { + "use_spin": [True, False, False], + "virtual_scale": [0.3140], + "_comment": " that's all", + }, +} + +model_dpa2 = { + "type_map": ["O", "H", "B"], + "descriptor": { + "type": "dpa2", + "repinit": { + "rcut": 6.0, + "rcut_smth": 2.0, + "nsel": 100, + "neuron": [2, 4, 8], + "axis_neuron": 4, + "activation_function": "tanh", + }, + "repformer": { + "rcut": 4.0, + "rcut_smth": 0.5, + "nsel": 40, + "nlayers": 12, + "g1_dim": 8, + "g2_dim": 5, + "attn2_hidden": 3, + "attn2_nhead": 1, + "attn1_hidden": 5, + "attn1_nhead": 1, + "axis_neuron": 4, + "update_h2": False, + "update_g1_has_conv": True, + "update_g1_has_grrg": True, + "update_g1_has_drrd": True, + "update_g1_has_attn": True, + "update_g2_has_g1g1": True, + "update_g2_has_attn": True, + "attn2_has_gate": True, + }, + "seed": 1, + "add_tebd_to_repinit_out": False, + }, + "fitting_net": { + "neuron": [24, 24], + "resnet_dt": True, + "seed": 1, + }, +} + +model_dpa2tebd = { + "type_map": ["O", "H", "B"], + "descriptor": { + "type": "dpa2", + "repinit": { + "rcut": 6.0, + "rcut_smth": 0.5, + "nsel": 100, + "neuron": [2, 4, 8], + "axis_neuron": 4, + "activation_function": "tanh", + "three_body_sel": 40, + "three_body_rcut": 4.0, + "three_body_rcut_smth": 3.5, + "use_three_body": True, + }, + "repformer": { + "rcut": 4.0, + "rcut_smth": 0.5, + "nsel": 40, + "nlayers": 6, + "g1_dim": 8, + "g2_dim": 5, + "attn2_hidden": 3, + "attn2_nhead": 1, + "attn1_hidden": 5, + "attn1_nhead": 1, + "axis_neuron": 4, + "update_h2": False, + "update_g1_has_conv": True, + "update_g1_has_grrg": True, + "update_g1_has_drrd": True, + "update_g1_has_attn": False, + "update_g2_has_g1g1": False, + "update_g2_has_attn": True, + "update_style": "res_residual", + "update_residual": 0.01, + "update_residual_init": "norm", + "attn2_has_gate": True, + "use_sqrt_nnei": True, + "g1_out_conv": True, + "g1_out_mlp": True, + }, + "seed": 1, + "add_tebd_to_repinit_out": False, + }, + "fitting_net": { + "neuron": [24, 24], + "resnet_dt": True, + "seed": 1, + }, +} + +model_dpa1 = { + "type_map": ["O", "H", "B"], + "descriptor": { + "type": "se_atten", + "sel": 40, + "rcut_smth": 0.5, + "rcut": 4.0, + "neuron": [25, 50, 100], + "axis_neuron": 16, + "attn": 64, + "attn_layer": 2, + "attn_dotr": True, + "attn_mask": False, + "activation_function": "tanh", + "scaling_factor": 1.0, + "normalize": False, + "temperature": 1.0, + "set_davg_zero": True, + "type_one_side": True, + "seed": 1, + }, + "fitting_net": { + "neuron": [24, 24, 24], + "resnet_dt": True, + "seed": 1, + }, +} + + +model_hybrid = { + "type_map": ["O", "H", "B"], + "descriptor": { + "type": "hybrid", + "list": [ + { + "type": "se_atten", + "sel": 120, + "rcut_smth": 0.5, + "rcut": 6.0, + "neuron": [25, 50, 100], + "axis_neuron": 16, + "attn": 128, + "attn_layer": 0, + "attn_dotr": True, + "attn_mask": False, + "activation_function": "tanh", + "scaling_factor": 1.0, + "normalize": True, + "temperature": 1.0, + "seed": 1, + }, + { + "type": "dpa2", + "repinit": { + "rcut": 6.0, + "rcut_smth": 2.0, + "nsel": 30, + "neuron": [2, 4, 8], + "axis_neuron": 4, + "activation_function": "tanh", + }, + "repformer": { + "rcut": 4.0, + "rcut_smth": 0.5, + "nsel": 10, + "nlayers": 12, + "g1_dim": 8, + "g2_dim": 5, + "attn2_hidden": 3, + "attn2_nhead": 1, + "attn1_hidden": 5, + "attn1_nhead": 1, + "axis_neuron": 4, + "update_h2": False, + "update_g1_has_conv": True, + "update_g1_has_grrg": True, + "update_g1_has_drrd": True, + "update_g1_has_attn": True, + "update_g2_has_g1g1": True, + "update_g2_has_attn": True, + "attn2_has_gate": True, + }, + "seed": 1, + "add_tebd_to_repinit_out": False, + }, + ], + }, + "fitting_net": { + "neuron": [240, 240, 240], + "resnet_dt": True, + "seed": 1, + "_comment": " that's all", + }, + "_comment": " that's all", +} + +model_property = { + "type_map": ["H", "C", "N", "O"], + "descriptor": { + "type": "se_e2_a", + "sel": [3, 3, 3, 3], + "rcut_smth": 0.50, + "rcut": 4.00, + "neuron": [25, 50, 100], + "resnet_dt": False, + "axis_neuron": 16, + "seed": 1, + }, + "fitting_net": { + "type": "property", + "task_dim": 3, + "neuron": [24, 24, 24], + "resnet_dt": True, + "bias_method": "normal", + "intensive": True, + "seed": 1, + }, +} + + +class PermutationTest: + def test( + self, + ): + natoms = 5 + generator = paddle.seed(GLOBAL_SEED) + cell = paddle.rand([3, 3], dtype=dtype) + cell = (cell + cell.T) + 5.0 * paddle.eye(3) + coord = paddle.rand([natoms, 3], dtype=dtype) + spin = paddle.rand([natoms, 3], dtype=dtype) + coord = paddle.matmul(coord, cell) + atype = paddle.to_tensor([0, 0, 0, 1, 1], dtype=paddle.int32) + idx_perm = [1, 0, 4, 3, 2] + test_spin = getattr(self, "test_spin", False) + if not test_spin: + test_keys = ["energy", "force", "virial"] + else: + test_keys = ["energy", "force", "force_mag", "virial"] + result_0 = eval_model( + self.model, + coord.unsqueeze(0), + cell.unsqueeze(0), + atype, + spins=spin.unsqueeze(0), + ) + ret0 = {key: result_0[key].squeeze(0) for key in test_keys} + result_1 = eval_model( + self.model, + coord[idx_perm].unsqueeze(0), + cell.unsqueeze(0), + atype[idx_perm], + spins=spin[idx_perm].unsqueeze(0), + ) + ret1 = {key: result_1[key].squeeze(0) for key in test_keys} + prec = 1e-10 + for key in test_keys: + if key in ["energy"]: + np.testing.assert_allclose( + ret0[key].numpy(), ret1[key].numpy(), rtol=prec, atol=prec + ) + elif key in ["force", "force_mag"]: + np.testing.assert_allclose( + ret0[key][idx_perm].numpy(), ret1[key].numpy(), rtol=prec, atol=prec + ) + elif key == "virial": + if not hasattr(self, "test_virial") or self.test_virial: + np.testing.assert_allclose( + ret0[key], ret1[key], rtol=prec, atol=prec + ) + else: + raise RuntimeError(f"Unexpected test key {key}") + + +class TestEnergyModelSeA(unittest.TestCase, PermutationTest): + def setUp(self): + model_params = copy.deepcopy(model_se_e2_a) + self.type_split = False + self.model = get_model(model_params).to(env.DEVICE) + + +@unittest.skip("Skip for not implemented yet") +class TestDOSModelSeA(unittest.TestCase, PermutationTest): + def setUp(self): + model_params = copy.deepcopy(model_dos) + self.type_split = False + self.model = get_model(model_params).to(env.DEVICE) + + +@unittest.skip("Skip for not implemented yet") +class TestEnergyModelDPA1(unittest.TestCase, PermutationTest): + def setUp(self): + model_params = copy.deepcopy(model_dpa1) + self.type_split = True + self.model = get_model(model_params).to(env.DEVICE) + + +@unittest.skip("Skip for not implemented yet") +class TestEnergyModelDPA2(unittest.TestCase, PermutationTest): + def setUp(self): + model_params = copy.deepcopy(model_dpa2) + self.type_split = True + self.model = get_model(model_params).to(env.DEVICE) + + +@unittest.skip("Skip for not implemented yet") +class TestForceModelDPA2(unittest.TestCase, PermutationTest): + def setUp(self): + model_params = copy.deepcopy(model_dpa2) + model_params["fitting_net"]["type"] = "direct_force_ener" + self.type_split = True + self.test_virial = False + self.model = get_model(model_params).to(env.DEVICE) + + +@unittest.skip("Skip for not implemented yet") +class TestEnergyModelHybrid(unittest.TestCase, PermutationTest): + def setUp(self): + model_params = copy.deepcopy(model_hybrid) + self.type_split = True + self.model = get_model(model_params).to(env.DEVICE) + + +@unittest.skip("Skip for not implemented yet") +class TestForceModelHybrid(unittest.TestCase, PermutationTest): + def setUp(self): + model_params = copy.deepcopy(model_hybrid) + model_params["fitting_net"]["type"] = "direct_force_ener" + self.type_split = True + self.test_virial = False + self.model = get_model(model_params).to(env.DEVICE) + + +@unittest.skip("Skip for not implemented yet") +class TestEnergyModelZBL(unittest.TestCase, PermutationTest): + def setUp(self): + model_params = copy.deepcopy(model_zbl) + self.type_split = False + self.model = get_model(model_params).to(env.DEVICE) + + +@unittest.skip("Skip for not implemented yet") +class TestEnergyModelSpinSeA(unittest.TestCase, PermutationTest): + def setUp(self): + model_params = copy.deepcopy(model_spin) + self.type_split = False + self.test_spin = True + self.model = get_model(model_params).to(env.DEVICE) + + +# class TestEnergyFoo(unittest.TestCase): +# def test(self): +# model_params = model_dpau +# self.model = EnergyModelDPAUni(model_params).to(env.DEVICE) + +# natoms = 5 +# cell = paddle.rand([3, 3], dtype=dtype) +# cell = (cell + cell.T) + 5. * paddle.eye(3) +# coord = paddle.rand([natoms, 3], dtype=dtype) +# coord = paddle.matmul(coord, cell) +# atype = paddle.to_tensor([0, 0, 0, 1, 1]) +# idx_perm = [1, 0, 4, 3, 2] +# ret0 = infer_model(self.model, coord, cell, atype, type_split=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/source/tests/pd/model/test_region.py b/source/tests/pd/model/test_region.py new file mode 100644 index 0000000000..93fa82d8a5 --- /dev/null +++ b/source/tests/pd/model/test_region.py @@ -0,0 +1,62 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import unittest + +import numpy as np +import paddle + +from deepmd.pd.utils.region import ( + inter2phys, + to_face_distance, +) + +from ...seed import ( + GLOBAL_SEED, +) + +dtype = paddle.float64 + + +class TestRegion(unittest.TestCase): + def setUp(self): + self.cell = paddle.to_tensor( + [[1, 0, 0], [0.4, 0.8, 0], [0.1, 0.3, 2.1]], dtype=dtype, place="cpu" + ) + self.cell = self.cell.unsqueeze(0).unsqueeze(0) + self.cell = paddle.tile(self.cell, [4, 5, 1, 1]) + self.prec = 9e-8 + + def test_inter_to_phys(self): + generator = paddle.seed(GLOBAL_SEED) + inter = paddle.rand([4, 5, 3, 3], dtype=dtype).to(device="cpu") + phys = inter2phys(inter, self.cell) + for ii in range(4): + for jj in range(5): + expected_phys = paddle.matmul(inter[ii, jj], self.cell[ii, jj]) + np.testing.assert_allclose( + phys[ii, jj].numpy(), + expected_phys.numpy(), + rtol=self.prec, + atol=self.prec, + ) + + def test_to_face_dist(self): + cell0 = self.cell[0][0].numpy() + vol = np.linalg.det(cell0) + # area of surfaces xy, xz, yz + sxy = np.linalg.norm(np.cross(cell0[0], cell0[1])) + sxz = np.linalg.norm(np.cross(cell0[0], cell0[2])) + syz = np.linalg.norm(np.cross(cell0[1], cell0[2])) + # vol / area gives distance + dz = vol / sxy + dy = vol / sxz + dx = vol / syz + expected = paddle.to_tensor([dx, dy, dz], place="cpu") + dists = to_face_distance(self.cell) + for ii in range(4): + for jj in range(5): + np.testing.assert_allclose( + dists[ii][jj].numpy(), + expected.numpy(), + rtol=self.prec, + atol=self.prec, + ) diff --git a/source/tests/pd/model/test_rot.py b/source/tests/pd/model/test_rot.py new file mode 100644 index 0000000000..4d59117560 --- /dev/null +++ b/source/tests/pd/model/test_rot.py @@ -0,0 +1,234 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import copy +import unittest + +import numpy as np +import paddle + +from deepmd.pd.model.model import ( + get_model, +) +from deepmd.pd.utils import ( + env, +) + +from ...seed import ( + GLOBAL_SEED, +) +from ..common import ( + eval_model, +) +from .test_permutation import ( # model_dpau, + model_dos, + model_dpa1, + model_dpa2, + model_hybrid, + model_se_e2_a, + model_spin, + model_zbl, +) + +dtype = paddle.float64 + + +class RotTest: + def test( + self, + ): + generator = paddle.seed(GLOBAL_SEED) + prec = 1e-9 + natoms = 5 + cell = 10.0 * paddle.eye(3, dtype=dtype).to(device=env.DEVICE) + coord = 2 * paddle.rand([natoms, 3], dtype=dtype).to(device=env.DEVICE) + spin = 2 * paddle.rand([natoms, 3], dtype=dtype).to(device=env.DEVICE) + shift = paddle.to_tensor([4, 4, 4], dtype=dtype).to(device=env.DEVICE) + atype = paddle.to_tensor([0, 0, 0, 1, 1], dtype=paddle.int32).to( + device=env.DEVICE + ) + from scipy.stats import ( + special_ortho_group, + ) + + test_spin = getattr(self, "test_spin", False) + if not test_spin: + test_keys = ["energy", "force", "virial"] + else: + test_keys = ["energy", "force", "force_mag"] + rmat = paddle.to_tensor(special_ortho_group.rvs(3), dtype=dtype).to( + device=env.DEVICE + ) + + # rotate only coord and shift to the center of cell + coord_rot = paddle.matmul(coord, rmat) + spin_rot = paddle.matmul(spin, rmat) + result_0 = eval_model( + self.model, + (coord + shift).unsqueeze(0), + cell.unsqueeze(0), + atype, + spins=spin.unsqueeze(0), + ) + ret0 = {key: result_0[key].squeeze(0) for key in test_keys} + result_1 = eval_model( + self.model, + (coord_rot + shift).unsqueeze(0), + cell.unsqueeze(0), + atype, + spins=spin_rot.unsqueeze(0), + ) + ret1 = {key: result_1[key].squeeze(0) for key in test_keys} + for key in test_keys: + if key in ["energy"]: + np.testing.assert_allclose( + ret0[key].numpy(), ret1[key].numpy(), rtol=prec, atol=prec + ) + elif key in ["force", "force_mag"]: + np.testing.assert_allclose( + paddle.matmul(ret0[key], rmat).numpy(), + ret1[key].numpy(), + rtol=prec, + atol=prec, + ) + elif key == "virial": + if not hasattr(self, "test_virial") or self.test_virial: + np.testing.assert_allclose( + paddle.matmul( + rmat.T, paddle.matmul(ret0[key].reshape([3, 3]), rmat) + ).numpy(), + ret1[key].reshape([3, 3]).numpy(), + rtol=prec, + atol=prec, + ) + else: + raise RuntimeError(f"Unexpected test key {key}") + # rotate coord and cell + paddle.seed(0) + cell = paddle.rand([3, 3], dtype=dtype).to(device=env.DEVICE) + cell = (cell + cell.T) + 5.0 * paddle.eye(3).to(device=env.DEVICE) + coord = paddle.rand([natoms, 3], dtype=dtype).to(device=env.DEVICE) + coord = paddle.matmul(coord, cell) + spin = paddle.rand([natoms, 3], dtype=dtype).to(device=env.DEVICE) + atype = paddle.to_tensor([0, 0, 0, 1, 1], dtype=paddle.int32).to( + device=env.DEVICE + ) + coord_rot = paddle.matmul(coord, rmat) + spin_rot = paddle.matmul(spin, rmat) + cell_rot = paddle.matmul(cell, rmat) + result_0 = eval_model( + self.model, + coord.unsqueeze(0), + cell.unsqueeze(0), + atype, + spins=spin.unsqueeze(0), + ) + ret0 = {key: result_0[key].squeeze(0) for key in test_keys} + result_1 = eval_model( + self.model, + coord_rot.unsqueeze(0), + cell_rot.unsqueeze(0), + atype, + spins=spin_rot.unsqueeze(0), + ) + ret1 = {key: result_1[key].squeeze(0) for key in test_keys} + for key in test_keys: + if key in ["energy"]: + np.testing.assert_allclose( + ret0[key].numpy(), ret1[key].numpy(), rtol=prec, atol=prec + ) + elif key in ["force", "force_mag"]: + np.testing.assert_allclose( + paddle.matmul(ret0[key], rmat).numpy(), + ret1[key].numpy(), + rtol=prec, + atol=prec, + ) + elif key == "virial": + if not hasattr(self, "test_virial") or self.test_virial: + np.testing.assert_allclose( + paddle.matmul( + rmat.T, paddle.matmul(ret0[key].reshape([3, 3]), rmat) + ).numpy(), + ret1[key].reshape([3, 3]).numpy(), + rtol=prec, + atol=prec, + ) + + +class TestEnergyModelSeA(unittest.TestCase, RotTest): + def setUp(self): + model_params = copy.deepcopy(model_se_e2_a) + self.type_split = False + self.model = get_model(model_params).to(env.DEVICE) + + +@unittest.skip("Skip for not implemented yet") +class TestDOSModelSeA(unittest.TestCase, RotTest): + def setUp(self): + model_params = copy.deepcopy(model_dos) + self.type_split = False + self.model = get_model(model_params).to(env.DEVICE) + + +@unittest.skip("Skip for not implemented yet") +class TestEnergyModelDPA1(unittest.TestCase, RotTest): + def setUp(self): + model_params = copy.deepcopy(model_dpa1) + self.type_split = True + self.model = get_model(model_params).to(env.DEVICE) + + +@unittest.skip("Skip for not implemented yet") +class TestEnergyModelDPA2(unittest.TestCase, RotTest): + def setUp(self): + model_params = copy.deepcopy(model_dpa2) + self.type_split = True + self.model = get_model(model_params).to(env.DEVICE) + + +@unittest.skip("Skip for not implemented yet") +class TestForceModelDPA2(unittest.TestCase, RotTest): + def setUp(self): + model_params = copy.deepcopy(model_dpa2) + model_params["fitting_net"]["type"] = "direct_force_ener" + self.type_split = True + self.test_virial = False + self.model = get_model(model_params).to(env.DEVICE) + + +@unittest.skip("Skip for not implemented yet") +class TestEnergyModelHybrid(unittest.TestCase, RotTest): + def setUp(self): + model_params = copy.deepcopy(model_hybrid) + self.type_split = True + self.model = get_model(model_params).to(env.DEVICE) + + +@unittest.skip("Skip for not implemented yet") +class TestForceModelHybrid(unittest.TestCase, RotTest): + def setUp(self): + model_params = copy.deepcopy(model_hybrid) + model_params["fitting_net"]["type"] = "direct_force_ener" + self.type_split = True + self.test_virial = False + self.model = get_model(model_params).to(env.DEVICE) + + +@unittest.skip("Skip for not implemented yet") +class TestEnergyModelZBL(unittest.TestCase, RotTest): + def setUp(self): + model_params = copy.deepcopy(model_zbl) + self.type_split = False + self.model = get_model(model_params).to(env.DEVICE) + + +@unittest.skip("Skip for not implemented yet") +class TestEnergyModelSpinSeA(unittest.TestCase, RotTest): + def setUp(self): + model_params = copy.deepcopy(model_spin) + self.type_split = False + self.test_spin = True + self.model = get_model(model_params).to(env.DEVICE) + + +if __name__ == "__main__": + unittest.main() diff --git a/source/tests/pd/model/test_rotation.py b/source/tests/pd/model/test_rotation.py new file mode 100644 index 0000000000..94e3442631 --- /dev/null +++ b/source/tests/pd/model/test_rotation.py @@ -0,0 +1,113 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import json +import unittest +from pathlib import ( + Path, +) +from typing import ( + Optional, +) + +import numpy as np +import paddle +from scipy.stats import ( + special_ortho_group, +) + +from deepmd.pd.model.model import ( + get_model, +) +from deepmd.pd.utils import ( + env, +) +from deepmd.utils.data import ( + DeepmdData, +) + + +class CheckSymmetry(DeepmdData): + def __init__( + self, + sys_path: str, + type_map: Optional[list[str]] = None, + ): + super().__init__(sys_path=sys_path, type_map=type_map) + self.add("energy", 1, atomic=False, must=False, high_prec=True) + self.add("force", 3, atomic=True, must=False, high_prec=False) + self.add("virial", 9, atomic=False, must=False, high_prec=False) + + def get_rotation(self, index, rotation_matrix): + for i in range( + 0, len(self.dirs) + 1 + ): # note: if different sets can be merged, prefix sum is unused to calculate + if index < self.prefix_sum[i]: + break + frames = self._load_set(self.dirs[i - 1]) + frames["coord"] = np.dot( + rotation_matrix, frames["coord"].reshape(-1, 3).T + ).T.reshape(self.nframes, -1) + frames["box"] = np.dot( + rotation_matrix, frames["box"].reshape(-1, 3).T + ).T.reshape(self.nframes, -1) + frames["force"] = np.dot( + rotation_matrix, frames["force"].reshape(-1, 3).T + ).T.reshape(self.nframes, -1) + frame = self._get_subdata(frames, index - self.prefix_sum[i - 1]) + frame = self.reformat_data_torch(frame) + return frame + + +def get_data(batch): + inputs = {} + for key in ["coord", "atype", "box"]: + inputs[key] = paddle.to_tensor(batch[key]).to(device=env.DEVICE) + inputs[key] = inputs[key].unsqueeze(0).to(env.DEVICE) + return inputs + + +class TestRotation(unittest.TestCase): + def setUp(self): + with open(str(Path(__file__).parent / "water/se_e2_a.json")) as fin: + self.config = json.load(fin) + data_file = [str(Path(__file__).parent / "water/data/data_0")] + self.config["training"]["training_data"]["systems"] = data_file + self.config["training"]["validation_data"]["systems"] = data_file + self.rotation = special_ortho_group.rvs(3) + device = paddle.get_device() + paddle.set_device("cpu") + self.get_dataset(0) + paddle.set_device(device) + self.get_model() + + def get_model(self): + self.model = get_model(self.config["model"]).to(env.DEVICE) + + def get_dataset(self, system_index=0, batch_index=0): + systems = self.config["training"]["training_data"]["systems"] + type_map = self.config["model"]["type_map"] + dpdatasystem = CheckSymmetry(sys_path=systems[system_index], type_map=type_map) + self.origin_batch = dpdatasystem.get_item_paddle(batch_index) + self.rotated_batch = dpdatasystem.get_rotation(batch_index, self.rotation) + + def test_rotation(self): + result1 = self.model(**get_data(self.origin_batch)) + result2 = self.model(**get_data(self.rotated_batch)) + rotation = paddle.to_tensor(self.rotation).to(env.DEVICE) + np.testing.assert_allclose(result1["energy"].numpy(), result2["energy"].numpy()) + if "force" in result1: + np.testing.assert_allclose( + result2["force"][0].numpy(), + paddle.matmul(rotation, result1["force"][0].T).T.numpy(), + ) + if "virial" in result1: + np.testing.assert_allclose( + result2["virial"][0].view([3, 3]).numpy(), + paddle.matmul( + paddle.matmul(rotation, result1["virial"][0].view([3, 3]).T), + rotation.T, + ).numpy(), + ) + + +if __name__ == "__main__": + unittest.main() diff --git a/source/tests/pd/model/test_saveload_se_e2_a.py b/source/tests/pd/model/test_saveload_se_e2_a.py new file mode 100644 index 0000000000..c1c2ba2cdd --- /dev/null +++ b/source/tests/pd/model/test_saveload_se_e2_a.py @@ -0,0 +1,138 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import copy +import json +import os +import unittest +from pathlib import ( + Path, +) + +import numpy as np +import paddle +from paddle.io import ( + DataLoader, +) + +from deepmd.pd.loss import ( + EnergyStdLoss, +) +from deepmd.pd.model.model import ( + get_model, +) +from deepmd.pd.train.wrapper import ( + ModelWrapper, +) +from deepmd.pd.utils import ( + env, +) +from deepmd.pd.utils.dataloader import ( + BufferedIterator, + DpLoaderSet, +) +from deepmd.pd.utils.stat import ( + make_stat_input, +) +from deepmd.tf.common import ( + expand_sys_str, +) + + +def get_dataset(config): + model_config = config["model"] + rcut = model_config["descriptor"]["rcut"] + sel = model_config["descriptor"]["sel"] + systems = config["training"]["validation_data"]["systems"] + if isinstance(systems, str): + systems = expand_sys_str(systems) + batch_size = config["training"]["training_data"]["batch_size"] + type_map = model_config["type_map"] + + dataset = DpLoaderSet(systems, batch_size, type_map) + data_stat_nbatch = model_config.get("data_stat_nbatch", 10) + sampled = make_stat_input(dataset.systems, dataset.dataloaders, data_stat_nbatch) + return dataset, sampled + + +class TestSaveLoadSeA(unittest.TestCase): + def setUp(self): + input_json = str(Path(__file__).parent / "water/se_e2_a.json") + with open(input_json) as fin: + self.config = json.load(fin) + data_file = [str(Path(__file__).parent / "water/data/data_0")] + self.config["training"]["training_data"]["systems"] = data_file + self.config["training"]["validation_data"]["systems"] = data_file + self.config["loss"]["starter_learning_rate"] = self.config["learning_rate"][ + "start_lr" + ] + self.dataset, self.sampled = get_dataset(self.config) + self.training_dataloader = DataLoader( + self.dataset, + batch_sampler=paddle.io.BatchSampler( + sampler=paddle.io.RandomSampler(self.dataset), + drop_last=False, + ), + num_workers=0, # setting to 0 diverges the behavior of its iterator; should be >=1 + collate_fn=lambda batch: batch[0], + ) + device = paddle.get_device() + paddle.set_device("cpu") + self.training_data = BufferedIterator(iter(self.training_dataloader)) + paddle.set_device(device) + self.loss = EnergyStdLoss(**self.config["loss"]) + self.cur_lr = 1 + self.task_key = "Default" + self.input_dict, self.label_dict = self.get_data() + self.start_lr = self.config["learning_rate"]["start_lr"] + + def get_model_result(self, read=False, model_file="tmp_model.pd"): + wrapper = self.create_wrapper() + optimizer = paddle.optimizer.Adam( + learning_rate=self.start_lr, parameters=wrapper.parameters() + ) + optimizer.clear_grad() + if read: + wrapper.set_state_dict(paddle.load(model_file)) + os.remove(model_file) + else: + paddle.save(wrapper.state_dict(), model_file) + result = wrapper( + **self.input_dict, + cur_lr=self.cur_lr, + label=self.label_dict, + task_key=self.task_key, + )[0] + return result + + def create_wrapper(self): + model_config = copy.deepcopy(self.config["model"]) + model = get_model(model_config).to(env.DEVICE) + return ModelWrapper(model, self.loss) + + def get_data(self): + try: + batch_data = next(iter(self.training_data)) + except StopIteration: + # Refresh the status of the dataloader to start from a new epoch + self.training_data = BufferedIterator(iter(self.training_dataloader)) + batch_data = next(iter(self.training_data)) + input_dict = {} + for item in ["coord", "atype", "box"]: + if item in batch_data: + input_dict[item] = batch_data[item].to(env.DEVICE) + else: + input_dict[item] = None + label_dict = {} + for item in ["energy", "force", "virial"]: + if item in batch_data: + label_dict[item] = batch_data[item].to(env.DEVICE) + return input_dict, label_dict + + def test_saveload(self): + result1 = self.get_model_result() + result2 = self.get_model_result(read=True) + for item in result1: + np.testing.assert_allclose(result1[item].numpy(), result2[item].numpy()) + + +if __name__ == "__main__": + unittest.main() diff --git a/source/tests/pd/model/test_se_e2_a.py b/source/tests/pd/model/test_se_e2_a.py new file mode 100644 index 0000000000..b1e6abe5ae --- /dev/null +++ b/source/tests/pd/model/test_se_e2_a.py @@ -0,0 +1,137 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import itertools +import unittest + +import numpy as np +import paddle + +from deepmd.dpmodel.descriptor import DescrptSeA as DPDescrptSeA +from deepmd.pd.model.descriptor.se_a import ( + DescrptSeA, +) +from deepmd.pd.utils import ( + env, +) +from deepmd.pd.utils.env import ( + PRECISION_DICT, +) + +from ...seed import ( + GLOBAL_SEED, +) +from .test_env_mat import ( + TestCaseSingleFrameWithNlist, +) +from .test_mlp import ( + get_tols, +) + +dtype = env.GLOBAL_PD_FLOAT_PRECISION + + +# to be merged with the tf test case +class TestDescrptSeA(unittest.TestCase, TestCaseSingleFrameWithNlist): + def setUp(self): + TestCaseSingleFrameWithNlist.setUp(self) + + def test_consistency( + self, + ): + rng = np.random.default_rng(GLOBAL_SEED) + nf, nloc, nnei = self.nlist.shape + davg = rng.normal(size=(self.nt, nnei, 4)) + dstd = rng.normal(size=(self.nt, nnei, 4)) + dstd = 0.1 + np.abs(dstd) + + for idt, prec, em in itertools.product( + [False, True], + ["float64", "float32"], + [[], [[0, 1]], [[1, 1]]], + ): + dtype = PRECISION_DICT[prec] + rtol, atol = get_tols(prec) + err_msg = f"idt={idt} prec={prec}" + # sea new impl + dd0 = DescrptSeA( + self.rcut, + self.rcut_smth, + self.sel, + precision=prec, + resnet_dt=idt, + exclude_types=em, + seed=GLOBAL_SEED, + ).to(env.DEVICE) + dd0.sea.mean = paddle.to_tensor(davg, dtype=dtype).to(device=env.DEVICE) + dd0.sea.dstd = paddle.to_tensor(dstd, dtype=dtype).to(device=env.DEVICE) + rd0, _, _, _, _ = dd0( + paddle.to_tensor(self.coord_ext, dtype=dtype).to(device=env.DEVICE), + paddle.to_tensor(self.atype_ext, dtype="int64").to(device=env.DEVICE), + paddle.to_tensor(self.nlist, dtype="int64").to(device=env.DEVICE), + ) + # serialization + dd1 = DescrptSeA.deserialize(dd0.serialize()) + rd1, gr1, _, _, sw1 = dd1( + paddle.to_tensor(self.coord_ext, dtype=dtype).to(device=env.DEVICE), + paddle.to_tensor(self.atype_ext, dtype="int64").to(device=env.DEVICE), + paddle.to_tensor(self.nlist, dtype="int64").to(device=env.DEVICE), + ) + np.testing.assert_allclose( + rd0.detach().cpu().numpy(), + rd1.detach().cpu().numpy(), + rtol=rtol, + atol=atol, + err_msg=err_msg, + ) + np.testing.assert_allclose( + rd0.detach().cpu().numpy()[0][self.perm[: self.nloc]], + rd0.detach().cpu().numpy()[1], + rtol=rtol, + atol=atol, + err_msg=err_msg, + ) + # dp impl + dd2 = DPDescrptSeA.deserialize(dd0.serialize()) + rd2, gr2, _, _, sw2 = dd2.call( + self.coord_ext, + self.atype_ext, + self.nlist, + ) + for aa, bb in zip([rd1, gr1, sw1], [rd2, gr2, sw2]): + np.testing.assert_allclose( + aa.detach().cpu().numpy(), + bb, + rtol=rtol, + atol=atol, + err_msg=err_msg, + ) + + def test_jit( + self, + ): + rng = np.random.default_rng(GLOBAL_SEED) + nf, nloc, nnei = self.nlist.shape + davg = rng.normal(size=(self.nt, nnei, 4)) + dstd = rng.normal(size=(self.nt, nnei, 4)) + dstd = 0.1 + np.abs(dstd) + + for idt, prec in itertools.product( + [False, True], + ["float64", "float32"], + ): + dtype = PRECISION_DICT[prec] + rtol, atol = get_tols(prec) + err_msg = f"idt={idt} prec={prec}" + # sea new impl + dd0 = DescrptSeA( + self.rcut, + self.rcut_smth, + self.sel, + precision=prec, + resnet_dt=idt, + seed=GLOBAL_SEED, + ) + dd0.sea.mean = paddle.to_tensor(davg, dtype=dtype).to(device=env.DEVICE) + dd0.sea.dstd = paddle.to_tensor(dstd, dtype=dtype).to(device=env.DEVICE) + dd1 = DescrptSeA.deserialize(dd0.serialize()) + model = paddle.jit.to_static(dd0) + model = paddle.jit.to_static(dd1) diff --git a/source/tests/pd/model/test_smooth.py b/source/tests/pd/model/test_smooth.py new file mode 100644 index 0000000000..7f77a6f188 --- /dev/null +++ b/source/tests/pd/model/test_smooth.py @@ -0,0 +1,172 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import copy +import unittest + +import numpy as np +import paddle + +from deepmd.pd.model.model import ( + get_model, +) +from deepmd.pd.utils import ( + env, +) + +from ...seed import ( + GLOBAL_SEED, +) +from ..common import ( + eval_model, +) +from .test_permutation import ( # model_dpau, + model_se_e2_a, +) + +dtype = paddle.float64 + + +class SmoothTest: + def test( + self, + ): + generator = paddle.seed(GLOBAL_SEED) + # displacement of atoms + epsilon = 1e-5 if self.epsilon is None else self.epsilon + # required prec. relative prec is not checked. + rprec = 0.0 + aprec = 1e-5 if self.aprec is None else self.aprec + + natoms = 10 + cell = 8.6 * paddle.eye(3, dtype=dtype).to(device=env.DEVICE) + atype0 = paddle.arange(3, dtype=dtype).to(device=env.DEVICE) + atype1 = paddle.randint(0, 3, [natoms - 3]).to( + device=env.DEVICE, dtype=atype0.dtype + ) + atype = paddle.concat([atype0, atype1]).reshape([natoms]) + coord0 = ( + paddle.to_tensor( + [ + 0.0, + 0.0, + 0.0, + 4.0 - 0.5 * epsilon, + 0.0, + 0.0, + 0.0, + 4.0 - 0.5 * epsilon, + 0.0, + ], + dtype=dtype, + ) + .reshape([-1, 3]) + .to(device=env.DEVICE) + ) + coord1 = paddle.rand( + [natoms - coord0.shape[0], 3], + dtype=dtype, + ).to(device=env.DEVICE) + coord1 = paddle.matmul(coord1, cell) + coord = paddle.concat([coord0, coord1], axis=0) + spin = paddle.rand([natoms, 3], dtype=dtype).to(device=env.DEVICE) + coord0 = paddle.clone(coord) + coord1 = paddle.clone(coord) + coord1[1][0] += epsilon + coord2 = paddle.clone(coord) + coord2[2][1] += epsilon + coord3 = paddle.clone(coord) + coord3[1][0] += epsilon + coord3[2][1] += epsilon + test_spin = getattr(self, "test_spin", False) + if not test_spin: + test_keys = ["energy", "force", "virial"] + else: + test_keys = ["energy", "force", "force_mag", "virial"] + + result_0 = eval_model( + self.model, + coord0.unsqueeze(0), + cell.unsqueeze(0), + atype, + spins=spin.unsqueeze(0), + ) + ret0 = {key: result_0[key].squeeze(0) for key in test_keys} + result_1 = eval_model( + self.model, + coord1.unsqueeze(0), + cell.unsqueeze(0), + atype, + spins=spin.unsqueeze(0), + ) + ret1 = {key: result_1[key].squeeze(0) for key in test_keys} + result_2 = eval_model( + self.model, + coord2.unsqueeze(0), + cell.unsqueeze(0), + atype, + spins=spin.unsqueeze(0), + ) + ret2 = {key: result_2[key].squeeze(0) for key in test_keys} + result_3 = eval_model( + self.model, + coord3.unsqueeze(0), + cell.unsqueeze(0), + atype, + spins=spin.unsqueeze(0), + ) + ret3 = {key: result_3[key].squeeze(0) for key in test_keys} + + def compare(ret0, ret1): + for key in test_keys: + if key in ["energy"]: + np.testing.assert_allclose( + ret0[key].numpy(), ret1[key].numpy(), rtol=rprec, atol=aprec + ) + elif key in ["force", "force_mag"]: + # plus 1. to avoid the divided-by-zero issue + np.testing.assert_allclose( + (1.0 + ret0[key]).numpy(), + (1.0 + ret1[key]).numpy(), + rtol=rprec, + atol=aprec, + ) + elif key == "virial": + if not hasattr(self, "test_virial") or self.test_virial: + np.testing.assert_allclose( + (1.0 + ret0[key]).numpy(), + (1.0 + ret1[key]).numpy(), + rtol=rprec, + atol=aprec, + ) + else: + raise RuntimeError(f"Unexpected test key {key}") + + compare(ret0, ret1) + compare(ret1, ret2) + compare(ret0, ret3) + + +class TestEnergyModelSeA(unittest.TestCase, SmoothTest): + def setUp(self): + model_params = copy.deepcopy(model_se_e2_a) + self.type_split = False + self.model = get_model(model_params).to(env.DEVICE) + self.epsilon, self.aprec = None, None + + +# class TestEnergyFoo(unittest.TestCase): +# def test(self): +# model_params = model_dpau +# self.model = EnergyModelDPAUni(model_params).to(env.DEVICE) + +# natoms = 5 +# cell = paddle.rand([3, 3], dtype=dtype) +# cell = (cell + cell.T) + 5. * paddle.eye(3) +# coord = paddle.rand([natoms, 3], dtype=dtype) +# coord = paddle.matmul(coord, cell) +# atype = paddle.to_tensor([0, 0, 0, 1, 1]) +# idx_perm = [1, 0, 4, 3, 2] +# ret0 = infer_model(self.model, coord, cell, atype, type_split=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/source/tests/pd/model/test_trans.py b/source/tests/pd/model/test_trans.py new file mode 100644 index 0000000000..f69d2f5b83 --- /dev/null +++ b/source/tests/pd/model/test_trans.py @@ -0,0 +1,168 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import copy +import unittest + +import numpy as np +import paddle + +from deepmd.pd.model.model import ( + get_model, +) +from deepmd.pd.utils import ( + env, +) + +from ...seed import ( + GLOBAL_SEED, +) +from ..common import ( + eval_model, +) +from .test_permutation import ( # model_dpau, + model_dos, + model_dpa1, + model_dpa2, + model_hybrid, + model_se_e2_a, + model_spin, + model_zbl, +) + +dtype = paddle.float64 + + +class TransTest: + def test( + self, + ): + natoms = 5 + generator = paddle.seed(GLOBAL_SEED) + cell = paddle.rand([3, 3], dtype=dtype).to(device=env.DEVICE) + cell = (cell + cell.T) + 5.0 * paddle.eye(3).to(device=env.DEVICE) + coord = paddle.rand([natoms, 3], dtype=dtype).to(device=env.DEVICE) + coord = paddle.matmul(coord, cell) + spin = paddle.rand([natoms, 3], dtype=dtype).to(device=env.DEVICE) + atype = paddle.to_tensor([0, 0, 0, 1, 1], dtype=paddle.int32).to( + device=env.DEVICE + ) + shift = (paddle.rand([3], dtype=dtype).to(device=env.DEVICE) - 0.5) * 2.0 + coord_s = paddle.matmul( + paddle.remainder( + paddle.matmul(coord + shift, paddle.linalg.inv(cell)), paddle.ones([]) + ), + cell, + ) + test_spin = getattr(self, "test_spin", False) + if not test_spin: + test_keys = ["energy", "force", "virial"] + else: + test_keys = ["energy", "force", "force_mag", "virial"] + result_0 = eval_model( + self.model, + coord.unsqueeze(0), + cell.unsqueeze(0), + atype, + spins=spin.unsqueeze(0), + ) + ret0 = {key: result_0[key].squeeze(0) for key in test_keys} + result_1 = eval_model( + self.model, + coord_s.unsqueeze(0), + cell.unsqueeze(0), + atype, + spins=spin.unsqueeze(0), + ) + ret1 = {key: result_1[key].squeeze(0) for key in test_keys} + prec = 1e-7 + for key in test_keys: + if key in ["energy", "force", "force_mag"]: + np.testing.assert_allclose( + ret0[key].numpy(), ret1[key].numpy(), rtol=prec, atol=prec + ) + elif key == "virial": + if not hasattr(self, "test_virial") or self.test_virial: + np.testing.assert_allclose( + ret0[key].numpy(), ret1[key].numpy(), rtol=prec, atol=prec + ) + else: + raise RuntimeError(f"Unexpected test key {key}") + + +class TestEnergyModelSeA(unittest.TestCase, TransTest): + def setUp(self): + model_params = copy.deepcopy(model_se_e2_a) + self.type_split = False + self.model = get_model(model_params).to(env.DEVICE) + + +@unittest.skip("Skip for not implemented yet") +class TestDOSModelSeA(unittest.TestCase, TransTest): + def setUp(self): + model_params = copy.deepcopy(model_dos) + self.type_split = False + self.model = get_model(model_params).to(env.DEVICE) + + +@unittest.skip("Skip for not implemented yet") +class TestEnergyModelDPA1(unittest.TestCase, TransTest): + def setUp(self): + model_params = copy.deepcopy(model_dpa1) + self.type_split = True + self.model = get_model(model_params).to(env.DEVICE) + + +@unittest.skip("Skip for not implemented yet") +class TestEnergyModelDPA2(unittest.TestCase, TransTest): + def setUp(self): + model_params = copy.deepcopy(model_dpa2) + self.type_split = True + self.model = get_model(model_params).to(env.DEVICE) + + +@unittest.skip("Skip for not implemented yet") +class TestForceModelDPA2(unittest.TestCase, TransTest): + def setUp(self): + model_params = copy.deepcopy(model_dpa2) + model_params["fitting_net"]["type"] = "direct_force_ener" + self.type_split = True + self.test_virial = False + self.model = get_model(model_params).to(env.DEVICE) + + +@unittest.skip("Skip for not implemented yet") +class TestEnergyModelHybrid(unittest.TestCase, TransTest): + def setUp(self): + model_params = copy.deepcopy(model_hybrid) + self.type_split = True + self.model = get_model(model_params).to(env.DEVICE) + + +@unittest.skip("Skip for not implemented yet") +class TestForceModelHybrid(unittest.TestCase, TransTest): + def setUp(self): + model_params = copy.deepcopy(model_hybrid) + model_params["fitting_net"]["type"] = "direct_force_ener" + self.type_split = True + self.test_virial = False + self.model = get_model(model_params).to(env.DEVICE) + + +@unittest.skip("Skip for not implemented yet") +class TestEnergyModelZBL(unittest.TestCase, TransTest): + def setUp(self): + model_params = copy.deepcopy(model_zbl) + self.type_split = False + self.model = get_model(model_params).to(env.DEVICE) + + +@unittest.skip("Skip for not implemented yet") +class TestEnergyModelSpinSeA(unittest.TestCase, TransTest): + def setUp(self): + model_params = copy.deepcopy(model_spin) + self.type_split = False + self.test_spin = True + self.model = get_model(model_params).to(env.DEVICE) + + +if __name__ == "__main__": + unittest.main() diff --git a/source/tests/pd/model/water/data/data_0/set.000/box.npy b/source/tests/pd/model/water/data/data_0/set.000/box.npy new file mode 100644 index 0000000000000000000000000000000000000000..6ad2de625b40040a2d13248dd8b197a0f885bdc0 GIT binary patch literal 3008 zcmbR27wQ`j$;eQ~P_3SlTAW;@Zl$1ZlV+l>qoAIaUsO_*m=~X4l#&V(cT3DEP6dh= zXCxM+0{I#i2099snmP)#3giN=P+50J1|)!uk4+3o3j;`gR1G3Tu!RLSF@z3=(J&lM zhw$WpEv;Y^gKGdXK=Pw%5FvssEU<|obc}}KX!syf1GcchCWg>4ntn#Z2ay`Eg#|V- MgpSelGZe!I08k$V_W%F@ literal 0 HcmV?d00001 diff --git a/source/tests/pd/model/water/data/data_0/set.000/coord.npy b/source/tests/pd/model/water/data/data_0/set.000/coord.npy new file mode 100644 index 0000000000000000000000000000000000000000..8bd448b1254784551c11c2c238af183a8dc0a4f3 GIT binary patch literal 184448 zcmbT7_g~Hb|Nq;24-G99MN(;LoyYAS8d5S+nH91#GP0#XS}283QYwlvO4NBiZIVc4 z5hV#(8Rez;p7-ZZ_@1B6IX|7tc|0GF$NhG@U2hoyLH>cE;$rK>Hd)MH5It|Tg{z5$ zd!&QKNE3_56|2{*4v(6BWJy@ThOqoMYqw$6j$9Fbao~V zx+&1&ZXc>O{({6^dNj=q^y*n0(jHADR$PEo)glCLt;URdhS+22OS+!2biw;Q!W928 z2l)$V&oCybzCvDw{eYURB`@4H91ViUcxCsSz4>WGyTt7&>S8OV>Z(yoS`B907sBYI z5xq|^p)dUpIF0A#+^eor$Wk3li7T&h?=M8qMHO-MX+FVAvlKRRh#ob2??az=3V7<_ z<=s|bqJk#9v6H0xjUAkT`qA7+ynk;q92Od2?j}a%s$Mj8WfQmE@(wP(QlRgSM_3NkEJl%COrf^A52PSvLyW7%I2x&U@%jr35KVTJ$V#q$}5qAjVgvX zji&FnBgxj?jaH5Ef>8G~tlZ4$!krzMS>Z$JpSIJXZC;e>(T&Sf^KeW>hz~ZexOLSL zv^Vc0oW3ujh=B@xpZx_3ZPaP|`0o(xdH|akdBpbVl3KqKSx?+U3R};zo&s_D-timD zbA}1S?>J(#*#eq+TancLQlZ}?TJr&Ys`${tS^0Icp^?QnGt`{Bx;qgoJ`EvFp*>sT zI+HqgfUYd+gyUQXddP=xWqT)6PNM<;ac2=m4Vl2wmMO5*K^{~xG9OU&KY%HfwXyv8U^uxu~cA=_^n3}Fo_iIvZXVU?{u&1^EG zEz6(d#56rJ`BjaWg#|E}GL)A045K^4TxnU1D~H<$U~6GP&)-^8?v-HjkUYsIuV})% z<)v)ReoN}$%OJTk3Hxre^S)zapVr5 zY6>~*vBra+_dQxtZgH#3wsQtMUm@tZDW_0<2HyD|v}p866fSfmrPt{srW#KZKNs<< z)-K1Q)K3`5NTexphjCBbgZ{3+jQuhINVR5@yekXB%wg5t$U#5(8rb^(7^-~ ze~LIPfkmo@I5IQ^pTzp%^U8+KtSp6-&vE8tHCoWNZ#Jpp8}o0pATzg@&{00k+`eaH z?+ytv`*#rear$)HMV|)RLrGf2oqVorpx-mC=#cvu#IDSP@>c`ezGeghZ#Yu<{%D$K zAfQ*P-lF6A0{HDL#pE!^ zE;}%y+1KM##d(Nr8&1ZoQZVLwCbR6EO2I27>DD?U(lP}X;5(Bpwk+eD46cAXww}$} z?a6``Pov+hnb3-Hr2NpI$ku3ORendXWkQ#5!+BjQ5v{YkmnyAt^r98P-;i)zm3B<> zr#YJ^`o`Xi_;*TJ{raDs}^{ zIwwg}mq<`Yni;&6$5E)kENtI+4yCOUbm098gdGnjk9V6X=7k*XHolM1ld8C_uQRbY zFo8eRyPN6zdQo?*S8d0DGn0801DV0`xbHj}gI2|`<3)wSvqv6!^bQfH2PIpDHEZwV z`N&apw5%2*W>`?Q)j@h+y^*5bbNJ`cR@k~zk?xrsL*%zY*rraVsaroG&{&g-Qr;kQ z%Q}ix@`b`tJsOjrf~-y($mcFc|APhOepd~;3p}v9<}~`=e8kj$#?*Eo8xu;7c_?29 zqUm{Ztatr5SU8$fK&%lJn7OcLr9r5_)c~cE=UBSIjAm#3#K?OoO!b2}DX;Y-u3VGQ zo(9R5YBUV=z}+MpLlXu#(wa;Ef|Y2%=q>aMMq#$;ZkRsxW^uWW~u&N{fyrox}Q(PuO~C8R^O#V@f@_AxZH(UzyOB>nfZvo`?SB&Pn z{zj#-GRsPHpo^~=UcWA2t3S;`u>3A8To2ml{|e_WE79Zga^ze7lSu?bV(3OGrWWf$ z5or@}Z})4_y*q_GCsksms|{^y9YY7UM&Yl$KG_vqM8~_u$l6Evr@5ZO2XOJq~4!iSgnjj<3H|RFADOne>%x4%n>`mzF0cTn>rWD~m!zfI* z6p>N3xG!f&H=ls?5;6pX7e5kMZjML64AK0xrm@y5z=PsduCvYSKcjccbZnnsK;=&g zpkKHa3viknwa^8t7wFQ)XR7qjQ#6xEYS3^6ZC=g(1AZK?744B3lr_PFCcJyd{p#$+ z$_d?~Gm~?_dgv^wnj~nZ9$no+|%e3C9f{p1J=pRGc5L4(QcKq^HX@*%mci}`Sut9Z9aiN0n? z!(O!(y6Z<$l7bZdvsb3Udu1p#EQnSng=0&Y89AtBqI8lh7Oy&k-0zI6tCX>R_e9~x zwm4Mf+(a&^l2X-2_yp@?>8kxKIw?uG<;iPSQGFkS%Z=%?=h)i%VH=>f@d?I#`Hm$I ztVrh37hEX($y`<_P~74Gx|MB8nzFuLj^02+=5x4zxeqcM-{NeA3O$cIjSw|?EEf!=yBQXAsC6qj z8#b}ay~8Ny;WZ>?s|i;uiihW?5wtG;4fcj7pssf?3RKO=z5E4h?+s+<_sY}Uqqo`D z71c1Ul%sjiF0%4LvuID6CkajEXz&*WG8u6}V5aLzuUA zBeBK}yU07dn0h{)W!>#Tb?n|c&IgL2Q$)C}} zZvARp7!nGpaf9i~jN#<@Fdu8Rtf{Lo1MU)&uxrS0ipU~*x+jQ!l_@ddk{G<&FGH>C zyy%PQoOB(0lf4_&1g**$IPGOl!yS{cR4W1-65ny3o_S%pX#SD8t46bP&T@ah$q?xb z=cj13K-S<9q^3-zh3_3`%%78-RQO92R)0ip*8{;CnFhG+?8HT#3{Kf}C3&Piz#bg~ z(mnP>SbIW|`|)rjHp?6qJ;&N~rgtU8{7p&uMG!eP$g0^TeXyA|E3^)%YG)c&Y1P(4{!$ck`&Y9!{zQQK`pbRt6CD2 zE@w<~vAZaBp9jfn`|v$aFXPrOT~ZN?qLY!=(COw%_jBdwkw`=SC;tt9r_G~{gBh~o z%&C9JVYux&%#d~&2ZY{~cIO6rb0kafWLgANByPY-(SWk*-Xn<%#@UBa?CEkxO3%8> ze8Sq{ZKp+V zP?M(|4K*yq7L)Nz@%L!@*=a@Y*~4JBVIKYXtVzG7T)>Lv8bSS6Ti6|Spj$bT)V41l zIVtzq?OO(X!CM!BnWGjb`|&?`7uK@p)fp&U(1Ud*mssnAO>Ft6skEW68!P=k<4sjD zOZ??Zn`XFpcOewb;gK@Z^l?lT6ZhJQiKl&qfBhY(^nw}g ze`!MLhgl>oRtKcH(b_vMv^gamyQ)-a#fn;7cUg-;f&=)bodK79BS`D@6>PM_{fF6iq13Gv$LI_7^w8YQ#7 za{HxQ5Z)bA%MaYgEX^UbW3?Q`dkvxF-efAvnoiZv!JoTy3HBq!=)a(cY^F%7?R{!R z8k{t7hJV0Km!~UM{xrj3BA#!xA?IPIvEhp@3S;t6XW&2=%v)GohZ@I?K8D`L`#5z# zi?*B8!=O$P8}#PTi|KDXzGzxPMz+c zWX=!f&YL+y;oMAedM!iyo(999C7Es27|-i}TF&V&)j(tHL0q?&5>^HjV$-0ZbZksM z+xGM*yW8nYlfuMl?x+zIdCH1YToy<_4*BsiJ!Obdj$y0*#Ibs*09vvWN?Z{cKN7g?jsQJDEFCX#~JKT)<@CQ$j=PQpJ z#@@8ApONXki=1NV4ZP@8r43i}ge7aVD5ya68`}JNrSV(oi%c(UUVKEG{ZIA`Hr#(D zs!09T1lwD36t-s!R>Ul%8GH;m70b}t>PkG$ljg%hGZ6M?AHQx^FpG?LrFm16SVp5S zWq#d*sP_qUx=$WQ-byozgD&L#EZ;*u`Zac+(G`T6-9n`%QDOWZnA{&q`D-jcm0x16F6{tj;le26iYLnuq|L|O>78+6U*Lg77qX4DD zwwQZpD!EyctLhs2NXK!1N32C_>q&de6_{yq@gWD)x`rCzTAXn92!cK^LD+^uNx@hScHd9X+bw+$E5@JBUWxs?o-B z4gT1>O|);ya|my}!kZ{@oTWY7ej9fjJl>8Gj{S(7;D*E9tEnVp2bGPLBafB&80qK0 zTUW+ose2sXJ*JiwymX+1igxBPW;!)T=i+JJYDBCRXHq74RJZaF`f=dZxhW*FWgZwk7|f6WF+M zI~z4`zu>D$2x-6j#nSg%k;nB`(SLY|bxEb-@lII^bV$MU&>^(t=rFQbHd*}1k+9L$pgX4Hs6nd-qem#v9`z|?(>@Ko%LB;pMgerY(y>9h0qMqS zaP^oZ=`Kf+o`3WGLJq1j4a zH21a|Z(e%|#&fl(x;~j(7GOZL9F1tzr3HLXQVg{^{KErfapL_52v!rINK-tp_ILq&$)b&HM@eWu3r^Auaak@p07cB z&k{`NbVk&K3G9m6E}_@$6oIDUZ=msouzcrzr1MU6HU9+mJ+-780okPIw2mfbAK|SM zjUn!$NmGX8V(+|T2oo|guRofJU9=s!^Ju5 zPZ6rxDr&#fR^XU~CFMGYvhUkpGUNJ5^sGXH6myN}r`uV9;hHJ5DaW1vFu4%PgA)WJ z6DP9#MSgVbW*)}xvY{cRU!f>3h0v(&@ZU9$+tR8>5x*9pxl@_!CW<6v-e=T{&eVxR z1=KCI01^M@Q2(Qw=+el;<6|{2AFVF(*Y&0Wjvg&y%|R3QiWWw-#wxI z?@MrQSEOTQD+O}5LTI+StSCdQ#|si?cFBXuYD+p^4rD{gqL=@7ZW#tTwWu*pf=-9@ zbEl?AQ=hJYPblfa4uetnG<`A^o%f*(k4s#%NTa+D*Q5*EGKKqe^r&~5Dh->ylHYBz zktB|MM)1d%;7eaK<;_z$7wJLBA6o_86&f^jwFhp+Zlu$}apdbRO&M9I;rLLImy_Lu zxTb@=ct9*O9X*OnXO3k09)9%2Y#+SzchZSD3!dV$Js+TUt;ZsAI2fvB6 z+}2?i&b?&;xt27@s|QYVkFas4^6^!4&b^v-5lM2EByK26>o40NIn9-(xzDEj7H2wE zwGrR89)-v95p=tGCq^9`O`(V4>FqrMJu>)#h2L`Vp`;oukrup__$>O^as=L!m(q$e z*Kxi0HI8WMkXZF6n5A&o{NOA5)1XAvi>0ZarO=Y7r_7~Xivomy;5JH6Xs)7#1K<2e zxn71g&EAElW$RdDwGw@trp#a4r;1T2S?E=o&D}kijo4;0`chINsG1o}Wp}-(;$0t> z9kLR6Qh`jbV>)eltj%xUTZ5!wdp&Ml8p4h%5uNg>M1;Bc zXQoES5*H(Jt2Dj4;ZMnrB&kMpCQlF?5k8$e9q+G(lfFwaUd=fKZt6L7Sq7lvfG-8e z44_!*1s2UULEp?0gsc>4+~)`QWk*F?{JtMQ>*hyx@4rq+@H+S>^7#MkHK&({-#{l# zm-2pHK{m3+>o@E!k#G{7oOn2%~ z+8T8dFRBlt^?D~Subu*|Ri=Zx6zF+vF=ycU8#byl`BnBUB3&E_(|ePsH)ISAnBL)* z%f5y67af|asLgaWMOlBeGL0y)=g%v}P{ihkc+v6%MVP{AI=SW>yhRc7`)kQUEBf#+T+bmB@7 z-ikdGmTQ!85>Gjf+xHW3r95|jP!XotjiaV}*RZwFk*-cop$Efu(A6V_yklh;jH13k zw=A3fcAv$sL{F-A`GnwE8WiF1A5>0mBM&YF75_}hGCUb|JNsD2!(j+mKZDY>?D5J= z0gn_?A?ExOe@aKu&@;6-Se(tij8kDPDnYdPOeRDUWc4B#|DQS#Lm24SZ+Il z3V|-^UDKlEEpsU|kkO(G(RA&sHTlOZL|gG;{F^_5elE5`v)3@{+p~^_{&Jv?j;-hs z`HY`dm*HW$HUFZ+o7lHXXg-Lh^i3@oP`rTSZCbQYMUk%chz^DGPX9a$>kfrb@}5ByJao3mQ6`f7 z%vg8aZAe}>rVSC@f^E*HnbWsvbc?Byv!Xs-bu<;um_3I!+?&Juo0mdu?`nG|@FbBq&q z%7&8Eh{qW8C;`ef^{AY>?eMaM7b(+VF;D%haT!c95+T_8`B z-xUeQz4c|C3G3<1_g82Nt@MyvqfW6_Ymt2A1h%|r=QYfiLGrsP9W0ioEPogN-$OC# z@$}}kRR=IK${vPSXH(D!KkDDD$_r)hVcC6sQdJM)&dfEZ*j&+j`2_w%!YVr6^B)4n ze#6HKJ(Oh%D6e=Nf*OuvtFq{sxDt(vja%uDb{x$eD>^6ouS4QP8z&@jrij zU=a&O)4!fk0;3r-nWoKd94}dpz|tkq3JDdsZN2L;s;59$k`7Y)ewQ z_aP_BvqP8cr-p4YH2y{bA2)sk<|ir944HDw{auJ#J<}*-M?WmCs8iO)``{uYDLx<& z8nevluiajpPIN?g+8XTowwy+o2IJ~{C!G9u3W-l2z`e$T%G}Rl#N_?Lju}DJ7xuR{ z-N^t)M0eHC8*;Sx#s`-AEfC{fo3VCH8@z5P&=faGS~+`w&6=e{FV99$roA2oi>F|w zSvD3Oeu>DryK#bR;I1B-LDDyr=BoVi1=}cQ z`!!Venm#XkZMw-l3Z?A{Xs$X!oND`J=R>JzR26cJcvg|)^*yZ}k6m+x?T88}~ zqhgkoLv(q-1b0G(Ed62}tg{kfaYR7nRS&UfvmSLM|Hj-^7RXH4g5K6DL0L2CM`a8W zlv)t@AebsUZ(_HP6P;~xqT@0rp*35RCU;-P_qP)uxOxzehi9QH*odYCUWdNu2H z!)8w&rnD``{b%}|_;^=J6o1TBSeCP0I@e)5`wY`vWl5#q-Dy(G6|hz<`Z?`5vb#MY z`B#|=nmsAjy`K5Jjjb)8laKZYd8)P}&fu{-CAd|w-${*-(B6hjkuNl=dlUBkSdQ}O zbpDjNFX~QckjW_x%2sOQrln|7wboy5i)0;?MLwWIZ73Z{A4gNRlyR;{|DtNkUwm9K zopA#%@Tju~mBUQ<^%^UvH}M6=tayWI?J1m3ekyk*G72;^Nu{S9SY@2!r#xYi8v}pTJl_xYB+Vu+nr1~wmwv%k;>2fmkpzJJ$Mnc9dEuJMf!1P zQI07=^M>e>Y=SB!ThF7f7goS`lxQy3+JpI`{B+HSN_ct%(rHLRY+jjQ$%|bWd+QOx zCTo(yup2Om(7^#Z&sNk`31=!1q%~VmVW>po=OnPoKua{xOU!X<#z;}dV>IkLEMo02 zVT&$hte#C%;w(tDG94L<tlMIDNwG{9C`Q20_4()zK5?${n;19L6tT3jdg z`{oNhbj~0lax`rW`-+s>3AnvM2BmUCX?R#ZOF1vb7BtC`*TlWGIDQA$wKXYkUnQHq zCzLiV8&A9E{ekprX)=EA$y8PRN$U1g$n}3kE21v zuw<1Frw{Hz^b|b9`~44aX8vKc47SJG^LpIsePgMAX$W_G&04lp;VH@^CD6CUj0QAq zD80B6Dzk@BX~01kY@drF6GO7{Vf05bi~`$~JRDc-$D&3ZQkXKHNNqG(yiH?;Z6Z!i zDHSm@hmy75CiMRL-#V`5>kcgfs$^(cv98G5E#Yd{sSqz|&%^l~e*V0G`i5ZI-04cn zw2dp6-i15&e_@ngGMlyU54Od=!lPsze!9#i(sZ~D>FvMJFJZ>5-4o269$13vA4OPy z$dG1)NuzbF2Q}PUNZEbLFk93n}f(|tMB0ukTwQFI2%4M40;j}n{`Xr<*insh#zpMI_k zy{T{Es}x6nY|mm={8*Z7D@G-Sa&+8KmHNgmq1WO*Fi;yp`s0(pO5HGjK?C;5OrsM! zjPTLs5KCW|0^8D?SSsS*PAb$uEMN-Cb{%IH-fpztKn>lEAF*UeKc<_gBXPbpoa#?^b9g>x2rP_3yOy!JjuSY9Y~Yd*&x&XY7xxsg6Q zfr*V$lzjUPk_}g4pG!PuJ;+6HyD9BWdI6blXR%=6U6_ir?3}ty7_eH+l_hvm>jV=z zsydb>DmB2W$$~MV0qNI|rBxpuV4=J$#bzAFoSE|=aZ{RxpK_w!F$*bPJcO-l%0p|j z1o`!M3+JU)3Xi2OBe8c~@GIDhU9P&+sB;MA1MvvplX&TyYp}uAkR}{bp?<$hT;X+7 z5l7a}8O?r)k48nPJL^wHPbZR_vlO4H{uKug{=&^)k62=LJwnz&K<$cNf<@vXbk*E#pA#qN2=^m!hvC&JYSYL^X-TvV0nq?l=EB--JelNdg zY${@#2Ggz9>1-rk5)8&R37l9oD0>!J>68 z@`cZNN7{L`3_BBLa4Gj3;)hKlFOz*NdW|AmEI5Jx<=??WRLFX1DO@&+xXH<3^fyF_ z8@C}Ek6gP&v!f|JRsH0?N-7-h_ufJ4;OmIC96|rQdJ!Yx%+4mLP}-kybSG1j&do?h z?CQ&qS@jC7Z+2nPR|Wp?{vdjM;41>vK4VXVGlWSZ=0Q1+=FVGS&6-VVXMByn2%q?^6pSb?xx;uLx8F&n2*!LmP0qokr9{8%7M zmSYXs{7r7OsK6e(Dq9(MXD8k&XQQOulP=A^3Guu^q%}{6Rw$2#LFXo{k)J2Hrs_#2 z4gI0|;jx3Q_zksdM^n%liyw2#!NyZr=wk``dZlnQwMPQjuUQ<9Yv;#kjb z?0q6eF>5(Uio4-tL=Lwi--{y2fakrW@Y?zsG@IlhUSdF9oHZTZauGGsBkASz3uyXk zhrn+`D9d#e8CZ?*7-Od^kd)hrBb&u&nt~#i_i{WP8F`-hySzq2ekf{8bg5mG)fD?K zgSXZN?#Dy{f=hL%`r#n5QIhA!E6bBGN1Bf?=)vCfgSb#Rk!(+Rl9_x9r%V4~Z1#6} z8W#(Si)*n+QQ#BmKJ8fWlUm_N3edtNjeZIN@qQgmrX+H}MX9!7DG z=hNpTEn1`e6gHpE2%0p+sbaPT|5PdgT9=GT(`z^kx~Eb1Rp93NRdhW&3-7v*vy$e8 zG?wq?avQ%weaU%&*y3tPL|M_piJDa3XF-*xl1OrnCvkNfc!|7rg%xKmFIZT{5m~E0*B^R$dY~}DfSfeCj zl+!n`n)sNkvOcvCLh9e(^r^F`EZ1b)b^-o!p}>+u$iSh-R9LJm8Q1 zD9*1&c4rZ$&#|H7;ib5g^&9-g{}8j{G7gUHXJ3bPaGNeO`WN?>iaz*`R zO&XhJN^re^Kbu92Q+YNVx4J=mzBQe+cB0nb0hB6sM<8Rp9e+ltQi0P1T0Y=T8@9h; z`So>BKRg49d(DVq;?XcE0D}iLaaRKaFwaAt()`rvr{4jtHC)7Xq-ycoM!&@<(-Q3M zVpR8S4B=E8_oeMMjE4Nc?Xxz_WJoi5Uv@yB4dDh$ttPF~PMr9rPjBz|a9dMzIGK!5 zSg|V^H^0b}_Twoywtom+beus8Z)(xL2GK0G-(Ry;%P4EwZCF2aqI+XHa4cAkmhb4o!$aYuxx^accIIT2k_#<*#X8R1 zz_v)xz7ujdsp!XUH$~%GWF02l*P<`>FX7S^fLT$^g8R1|NCSVF+uD1`E7Bs*Z;rw} zugBv2w>S77?;;&!O5A{`N6|f*EX54yl_;az*snqd8$)o2uY!K>H;i_lgI->nqDIUl zz3i_Tv_Nz&b(-RqMvy4eyX#?BF`juVikcrf)R!(##g#h95@f>T#8aW_OHiA?J^m|f!K#cPsvKy=qYI!% zQf`#EM_-*uc-HzbP#9A3?|LqLHpRjwOOH#??~rMeMY z?U9QI+P`q^W)e5X$kapZ%VPSp{0T-r+XjmaW$Fw(0A(%%Mdz+@>y|9R)D#nPovcdz z|0(eOp1O3vL543gkfV27_M_stH?5i%K-?Bp{*~A_R4)38v5R9_gWg+&LHl?hQgJ98}+VBnTuuc>}uW}paZMw%C=MBfv@(!-|;UlC>6c>1p zxrPyBNW&XNnYFtOg{+S!)pS2Pq8rBFoqreOB2{Qk&26S^P>7Dm;ba~14xfMh!Jbfg z3ZCmv*G+s8*l$Pf=TD*Wix;-`RbzCY9Vt9~#ir*!77h}5!_6z-V}8CoMZdd(BTllg z)1N{9)3t@-V<%!zX+2hn*i%!rY$nmZ3tBJQaA|cd?1v7fT@!y|u+dYN&}B&8Rgp9< z+l-p#r(%O$2`W~}(!=?Q5Nwy>O_zm};UaYk5mP7WbZ;2FxQb8tvjnc1<7kj#A`+ji zryDyjL*v|QWQ%hDHI-VF;B!&bzqrlL2+T-M%mv$aX`-foFolk3#iA?e!rc~2 z(D`W`)%+865<=%dy&;$lT``K!nEu2={DU+%HmCsdiMhh|@FIL|)}g)4F)a6f2b-2R zgN7{oiC_Pu$U&z=5EJJ~fkG=Tq+b^5%hK4YUcg*w3VmqFhyM&?YWg5ab+TWXt)m}~ z6h{cWZ@AJL3c>hGA8_*NB$|KXx@b;xpdNJx`jU~4GgItn{_nH+Z|xG`V>54X-Ld>r=x)pqqT%-|7sw2u4p%z-imihNO>HB(Be!oTqbSQ9;x&ZIfh)Xp08 zt{+Bc8&5#%@@_b)>QM7f8yXgOmR)I0U=tff&!&iFUASM4%hvLs#q}@Ql;I~)q3i*5 zBT?41v=BQlrXXY85x3b`g3+Zs@Y_!~2 zN80OcMAAH$$?pp%`ObBm-_l>W6H_VF9a4|FA}gw?lBCojTJ&M#Zd&*&oIYzL@s9o5 z@o<|Yjruzsk?Zy#&V3qvO6f+svMeRV%h2wqFzOoEg-Vf!s1}}tbWx;?dFhxE4kJv#n|Y0TdyOv>>?j_-RU#||bLbFj~U({Xb0 z5B%MJ4TZ*{8E?}Q6c~PETY?R#z$b!!z8Xy%W~JgI&!c0Mhz$##2gmvLH2HowRb=Z@ zM2tSU$}h!byR(?zaYf+$cPbUCC1Hqa9Jy7UhR&zg= zAy#oZB{Wpy?F}o+cp^bNs#2U<5Qj6#3)6n|co(z}0NBEy6EGz1O`#=k3c{jrRqo@bB%ZVIBRd}A8k*QbPuq=`{(f2U=hB36KUPM1^D5&5+eq(_+u_{m~&2<)~e`}&FC-O zbWyetIc^R=)Vdg!$w~NLI*yji6L~1ZuW%K;9oXfmK|fk=ih2hBLH7I)%)Ttg&uWRL z3Dk{|=h_gw+?-AK8|Lxc%Nfq{=a4&5jXvkRW^xmwXkFt9lI_zYf#waUH9z&Zvg0A* zHYM^#xdY5H)smhk?_<9l<`Ny>jmdiJ$YnqmEl6X5lAg?bQ8jlV_CI(?9pe7p=h0hd zO?gTm;1#Y$(!M*WM=yd_ZcXDSD*NL@i~&!saj=Op&5x*i{VZP9w@|#6m<{o8y4!y{h zs5_DC40pVo-;1nR36j>PsH5ZE&MI=%4bkzl@;AyzMN*+o6&mt zxwwAj08XFzh2YUKNZL48KGj9+knvxJg*`kCE4#!zeS4q3q0N z1igEWpWmgZW~?@`qepOaO#*WmH@2a%_v9wts&K;Mt5kRae5nj3UD;kYy`x-*Fq z#8qie@m!27tQ92OT+DwR|G4&uh*b}Luoo}3%6lwLD8bBlV`|)R-$QD09Lrldk4Dyf zg4IHmp z@PMW(eQp_s*B1QxQ6EvZK$|ehRjY)e&Ih4(%xu9ahrvLl@4cdZkv!vI+)U}G65~0{1EBA z_GI3D0I|6#xHUnaG(Crq&+8ECiT}-(=bgn;FA3WBNJ%&#;sx@itRg4vWH^nphSZ%w zbSkbE(>CPdmC`BxR8uC3HB6~xo02Gtkmr}G=+OGecSfMCa31_b znm?oa7yH>G4!`z5PA=dn9!b^+Lv5bI#MOZ0nKVU+d zlbQ(Zza*&5iJ+094q8%;EDbMLBg=!4q?{jyx!XpPMeqT<)enS`Z3$XEi88l76=?oy z=ei?KAyc^(1Mf^}m&$&emAcFlj?bl;7pK*pk=}vS%RO*ds!qn0N7$O?#duWz4CRJz z5#y;wnNcF%FRYhwLS34cxQN_1ZCb6Gj1|tAka_qPvWwD@{PY`V@+6Wf8Z~Kr$RD_j z41|+W8Qyv13T9clQ~1$TWS!kcqs&B!#Pv_`;$+C>hbl!kpU34str8H-$wE;VxkEf#Lop6}64ExNFvCqT2_&sNDcu04wVW<6@AY^?) zjd>MV{8*jDC;G6AtQ&0H@Tv6nWfvqC_aV$%iX|D0rFVy9AaS6MnZzVPF*6DNzP{wK zvmWce3?}IXigb8wJDb;NgGu!wAa1n>1wV?#`Hm-;X%bFds%2=&8cjhiV`yVl0n#f? zX~Bs~ynE~q*@X$ndyxzK2}8*L%?rF*%|YW^JCXuUVE0{7KkUI9Ztpu+QqoZ5%Zr-X z_skmHU*F3vDTq2pGsaNKq>DIzOqVXrxQ@EJA$Zm?nA}8n&0B#CdvIk3OMRA$%Qo_4 z+AAe|Cgn<6#kZM>Y6B`$H(<`T5p?(dArUt;5nA0l`2dB9IC0mAB9v8WiJl_wGe?Qs z%6<5Vx0mr{(P0eknn5$geW-4EEw}LRPdu&c#gj4#wzj?vvpYT@Y_Ti9Yy28IEYX44 zTc2U}djU7++ClEgu~_^+j?Ob6$F~jR+Iuf5Es@ezw6EiAs+5_85JEyxRzgOlltg8w z60)*0qUV0h2&s%xX0lb1O}y9t{op&E`@Zh$JjU-trg^4wFM&-m2s`co6LwkV7MK$Iyq+_2Pch%Tbj48F9as;oSOCB$+so zL(@lOCmPb2Pm<*48%-(6k+{Cyn%XuWM8IV|$jfcR#}gqWWh;fNH!PUkq;!0>{s`ZB zdX#?b5@d(Bv$E<(OfDu#bghZsm$csC`F%YaKF6P(56DJ+)C-XQOWZqZM8kR3Lczik z2R)4`YHSq!=--cQM`dE&j2!$-t-+S7yD)UQHO=ZbkHQ_zsLi31-=9-Z8L|@@VNUGp zjh@wpn}(xAKZFkTtA|BdKe{zc zg0z@l{x1zZKr!?5V=^9F9#3 zg6xfr5FD!zs>%N!a-N`K`$H6rYrq$S-FW6Y6b~k-i{F~NQ`wlqLjQ&mwt(lpjT@{m z_qHC%53r`|I!ADNxgmWBz5)4983=!EN!3%0KT^dBqoVP`Mx7S@QKR`Pf?zF{Cb8-aarA^T1ecwL z(w<0KcGHE{->(*2>pL*er3c}=SFyukzj5r!8#MOn5xiWMl7q&5r0o5S;vFAE*(qy< zn%Wt7Jn9Ud@_VYxWksxNWmFXqO;4VwlDhU)Jo=jA8F%*G*}*}XIR;(A=kf7C~!K&GgE$4T`Wmn2A1@^FrB(K zjiBU}^Tdj#=aG}xf}Mxfk>%E%xc0-FmgYRd{4?tGYBcY*eVj$|U+sXxu+$s!tlyr4UY=!{EXeY)H>R)RsQ9=*d?oHeJRFjmH=qe+5cD zi?DM0A0b>KkYxTC(%*N7nKs=(&hU-wK#&$?>|i8w=sNZcQKC^VD-gIa1WPXK5-YN# z7hlIvfqw`4w5kkyd6)l&n|{^2^07ktg*du&<{>gBZpDVZ_VjP>G4xd#3w68cV%71h z5v@0XCVp0=Q&z1);4RK|2=x?8eY}H+Rfo~5ZX~(A38kqi>f-k^-yqej1BGhan4DHE z0)nJSGB`wR|7$+IoheD|-xKt4oaXat!m0{G3H|Ue4 zO$F!n;gNj{oBwPUEtH(-p`RTGSC^l#pE{6QTaI|O9v+W%`|5D-_$9ntYfMX1C8vr7OGxleeii-LuNY$`fMf_TiaQ{WVaX`dpauc^dW6)THO~pHaWW5t~=Xq5bk$ zc4p;Ja&z4RGvfVE*-JP%koQnK{=g$knPhh4qA}w(yDJPJV{Io=_c6h$>*Xv=-oM_mPHFE;7o4Styygd^G{+CYSs|$EzZGY1kEZQ?>)~@mjo!;E zl1um(R#~zTr=JT|O3Cgts=^ay0i8HqMV@t6p>xU za;`UGY+(!@^RDj+h z3V-~>D9{;7x&4fxpWwwNoz26=@1HSvq9h$KZbR3Puk6jKl}x!VQuNUJ4!fuE0FUD> z=~#!Yr{$k?ELV67HJyj(JI{m;x_v`?-D@_hP?t8%BFZ-~qhVh+!m8{T{5REM44+Sj zggQ}=W+ZJG(w72Pbm7^7c+k8VkbQiYh5HSrdyOTSJTj5)M^)p9>=*6@r-_eOa}2+fFDB<$l=oM%-Ik2+I+wu&V7x zLamE`;Apxs4W4TtM6h90d1yYS zh0KM0CTGe_KZFgcLG;b04K)D@G@?$4w#7SOnRW>50w)N|+dQe>n$PhXlH@qkk3956 zc$jKUZeCLVdEMyT)tf>$tVi0>Iarr@0y7UC$6>c2^fUShT2_C;ea@_DO)5bT{bpbE z9|~I6ohV^ugAkM5ADwfrB68sZB-R;H@D~TN=l$>g9hTH#nuF%(&AfYULyBDv6p|TE zM!{~Zr*bA%u28044IwmVniomUt7ZUL*%s0K-EZNit46XlZP@AK zf;;;LuqRC}^hd3dSw4FYw@gD?ua|!zH{TU1ryAiu;}Fi(=ur1P33^{Ul8R26)APMU zXwG95a^`1b`SP<+I`$sY{{pb5#E{y*PavzInw0tPGXhs?Atl-!zm7cjy3yf7(yxnA zz<7q}d?Qk$9wAavg*0c$ko?iD2={YgW>z*d)ySS21YPuV982A=HOci`DdZ!sdiCxJ z$Ju`aY5RU@3K?IHgwVrm(oJ3Q*1VTq@AI^TUlZH^pXaE^-3GH!zmR1zk*%-Z&hjpg zAa|cmlmz{P@7sawn_Dm)H&BDe_(>S3G#aZs;_-S{2yMG^4=VirHfPx%Y`pf2Md@!v znud%pZKfOP=bPidZg*Ev1l8!ZV5!KNGUm8Y$(4l|U}R2<{7=A^Yg-m2*=STf0!1%7 z$`5bE)6B=Hw0VUgZdW0=`Xf<)hTv1iwKo3Ej&!-gUe&e0*&!HHc4^V^W&R{J^b&s# z^=R*cz38%E#_za>)UUyqv&JUTT%$bp`12-o27bcr6O)9Khwpe^UlmKH`t>*+84tg) zUrO&RbKGgbY zfLKST1N~=pus$BM2bs=#3Nas;0< zqdN(sF?^aPO$?kuebm&+e}#a5zH3BHVQtWMoFbl5l#i%PE%KVG3irK=BG<>WkdzgR z)aGPdD(Hn`IXP_HFD;h#y#(*L!OX{~4oI=2(g}ZH6Jti&w^q@eE?;`Mz(XA3{Tio- z$Wg7tWmZtV4+RhV)5EWw*kvM1t@r=nq}MQNDse~FKS!Eu)quo~0BmzPh+oa_WK?#E z^-q|)MOkz;2GAxLl)7&FQ@QwGtVI|RimASN|ZcxAJ6J#u#GWt^xjXG z)ZXpHhjfCwj4HjX_yg7NzeH2d$f0s|7`MVhW>OjrCKvxTDJbozRV_~5@2D0}G1E-vPr zykk5kai|yzM%mD!ovIY6@|?w#&Be9(xq^aHf67yf#rVZPp>fcgLhkgS>v3P|x#dRc z+fp!M@Muc5dj_d-J0UDN4evv}aP6=!*-HL`Bk%F%@*L-sHD?gMN{(Kw+bC|hDo2-+ ze8s098-lv(G1jJs#ZK!-d$?y%#(y{aTpg-vI|QfFv-q3QkH)-npxSinD)~RRJVghu z;LvgzN^i93~X_wF?usD3W`aM6Yq@+^wETPjagNL*68xd0n^uX0STGSxQ! z5mJ7)BBsGi>}q%u@mqQ!rkv>hicm62_$NGS?1m)I8X8|#5CRlcX~|w0GM_m{yrnFj zwr^>{HtlbC7`2*>Rgo2KIBtcqXJ@g(@+*Qn88mN1)0I&xY1vb0TKZfBmmRkSwK1`1 zv&t9u?w`d@U2`JEy9ZdR%4k}!qX6FH*3l*}6RfsTWHiZ_Zhff|{KM{}s-KN#jcGj^ z9(hyjhl_Z7(~=r3^CcN`?`1{N)VcdG_;Dg_U3?S~ zD)n%hp+r*Wf8d<+0nAz}0sl1aODAhm^R;w}G}6G3mojAgs0)S0N<#A1Lbhn`C=xYG zlJ~j{>{@Qmj=xu@f|=>UHkI8hJ?ALeE(Qozo!R(S+?!&3k#uB*2VpYzbo+5{ ztIuurIcFUGZVnPZ7Ksq%+v#;rJ)6B6K9N>69DuQd3q@4&e4j*LXxIBfT1H#=yWN0J zO^!g%^LFS-jieo$ROtCUGn(u7Su`Yl0(`Q^(zkP*DU-MZWgm+0z19xTBYo+`)>oL| z^bi3%{1NDO5b4s|WKmPbbD2FDU*j)6q}2;cB;I4De_v=y7|2;eojhj7;R#bdrn%pxD5Blb{wU-lZle+-9 zkktsY=@5Hv$VHWn60LZzLj8j-3v%~bkS;%7oGM#^M?Bm6ka<$XM3BzUCgHZ?J49bm zCzFJOUJJbWx1Rf~$G2#SC!SqE^RHjV7q@qidUug^i@QWCr-X2ArxtabCua584Wr*C zQjOIb3Rtc|wY!ev&#-rbR{dbkUP~8m5pJ`^7wqWtN?Ba%oJr=isU(=dgy%|qMD9_(J6 zMJE&sP;ESfM3=rpIzpfP^Lo(jzMS_9S3qG6X9MId!Y|3wtjWO&lUXR~&h3j!{c6~> zhk1zT-HjD$w&c=QhVu$@S>!1Nc1mR`J?^>2{sr69;@Rz3@-&ILWfkL7aXW-u&J4{m zC!-Jg^g?PTHF6U+sdObR9yy34uO(pE$rFeiWI?YsMBBz(> zg*CKaLy@}FJ2Bbzpx1{CGyFO;nOsh5)8*PIXgY;5>nDEVN59^A#U!S%bHY}{XF9ST ztz+1jIFLTqg?e2mKF6w`@GklwHIn4t>4ATKvCTimQt+WXaoNS2P|jV#z0ZTo0aP?2QwI-Akiqr&^# z>x2b)-t5rRB~&@58Ci1aLVt21AB!w}3oU@}urFfSDN8YVmN7|&>e0ZnF5=>49T-$> zD^7E~2fs*7j5;uq7QG%#OQidX&5fGj)k~U2DJ~Ftw`tOb2U=t^Yn*tbIzQ*0y+hYL zN$PBW%&csK>CCn;wAb&&L+f5N?$=`6_KczrGg9d0e0ef;evF7^$A$fi(y;K*Qt`%3 zk6Du$=ymm4CiQhKJGyBDgjXY>ARmG`dxo*%P$@Q2q#@jwxeZIdeV*~TkFY0|^A&3E z!E>ECy$sky)7s)WU*?kd?4U8|SHyYDC-c#nkc*FlLixGW2?IXkoO7$ikW;g0L-05} zyUTsqPb;|g=7`sw{5eUCrX+O-IG4R(ue#6RvStHZfAbl)wg3}nc8H_~@E(wOkJsSO znrPiKkj9_Uq33tY*lgZ6mM*-GTyxH3Tx3aEIv=6%d>!lJY$Bt+R*9XpAaFXhPpXm6a(YG(>-N6`+s9_)?&hcr)V zHh8KNmHhUE-OK&#_o@(dUCqF^Ca!VzxsQND8q}OBL;k;YFr+C7hni|Ht~%~c?Kk4E zZ$>+2XO5x`S=aIBj2*cF`RCAaR})o(49IwwJGliNK~|Ru<(%1r3CV-e zlghc1>pdx|zY>#CkN0|Vc`hb2en-@)dsU4M-jsb_2`%-V2)G}O|CoV){SPBaX(FbC z-xU^&@kLUYHfbGFC(i@QVjCMRa@o>fd|cxRPERX`>GE(AaZkqZ#|5D({0j`j-eK-h z7nafcBJMaz)4=H$gb^~abbCM({x~a<;yJ~O53>UWUng$_?B(2tVndoEn+V!8h-^k_9pr2916$_qQe0(;%ARERek_{yS5poh5J#ys~JU0ilD1Fh}1kskm*D%n!e~gUUbC5r#T%V z*MG3+3%)eGC=2Y?I|gaxL?tSckuei&m!b36)-VmwS+Bp9whF`xSeToPwUXNl1zyda&UooRd`P zaZ5WYt{6hI$dC7@UWoSHb0u5PVHhmMxh1DYlBVfZGzC~w%Oq*>&S?Qi3pb#aZ`n`@ znt*VllQ0cGiKQ>>$f$WUWM|&Rh1VbW`F{ZpQwCxPZU{NY7(L%pC>RZPLY;L5%vSs2 zL*L#M9biLSZT6${hy^_?+K8C>~}D|HEXfn8iB zd#H5|3pIl%M9vt&Uq&;prVvb0;-#JlUg}6)W(qX+$u4Xbx3OM{a^kj) zyS%1!^bwq|NK@ymqpV`>9;`F_jc>N^Sf8{LY+KDFvRK-VtxX-cH*+UzS>{2{I+Ree zF=`BSD(y(Qo|r$NmG2@ahhu#Yc%ETBR61wuF6g{E5ycGNr) zq6gShO{OWmnzf&8QoDq|!#SHKR*edF`_L2V^RVK2$pXMrp6Q;{=Qf^AGiw3ujtdB zEL9pFLt>Y0I@Gy0K&;*8D_XNo!qz{OHXa*JPrOw5u7Kxw)%+T{Hy^UErRTBys50sI zXo^R^iK86X?-S6kDW3Et3%T8f)Ns-S!@Nz{>2dxDeQ$%*)_e>Z+|Bm9mO$lSY4J8E{_P1e z5S=Zk#<+uKG+jxCT)20^wq?_HnF&OH!^Fa-O9&e+LFyItENkHog#ERl^KW{fA1Ol) zpXI1BZZwq?x*|c=nXaiGN5DBnK)jLOF@uRB;DXGEfR_mKKg zns+6})2e?=^yYvW!abYdv0sDY+@z5pibAzV8ytoT2tI5;KW<9W?2+lr=b{$p`A(rT z^`^XU%{h)^i=kdAL*=v$HuBBF(Xv@&$}@_RUhjCeHv$U_%dp!!U!*_6hv#kAL8Cm0 zMsmJ^UgT@kA5`XD86C1suZH#7@2up3DYYE4AU8`l&YV}pLSHS~r2ifXt@0wt{Dp{{ z;Ykz9rD@OJDD2Hy&IUSLiLaXd_BwUY+Ux4kN=SdxV&b>wv2UIxC7d&5*0bL*o3W$F z;AIQ4lYhf4{;OB{AEFui=CgjUWsw*7fPLAYj9}jpL_S6QyyQ$@TUBzscZI2N{+sBX zr04KpTi(YAg4kPdRI8J$j+HYTH3JJVFAp&WH*?*3^~OfuNs~QQa^f~?Q=Vc-FJr|WU(1J z74triML1Ubio~mYmtd)qHdV}$C;d+!glmQ>^v20X{OHmtd{fSZ)UiM^2??R*m6wDl z`(8xyN+i+Sh#d*6!??CLIPFd1?xMw1)w>-IoNJ+~F~Lg%Qw5V+eH8xNjPb`b=*({m z6z&^C^QZ9cNr(#Bj(dp489i0qgE}#{cbeFuG#;0A?dX=PA0AzeppDOF;kn{!>hYTh zsRscpIVp@PPZ)@=+-c)q4bB_A zDr&owgEJMiC{EVq&)q3paQ?~0FVcbkWkb=~CSzDAbU^C6G09ETKxtSa6k1;+V#Ea` zcn_kV<#KBLWBN(8&5&}9DC5X27PuPLPeS?RexQJlmc7Ylaol3LQUv&VLM)q z+kjo45212`oUp%b6bY-=L(hCQJ=~pv*_)rEeTFnS2XH>O`Ez_IbHIG2Pdj;LamC(5 z8mf4K=YfnUdB9Vg_$1FFDnem*p6>?9`3Fy%4d_~4%I?lKCS}uBQG?KjeR;)my^^M) zcPnmVpsfxC96irc)TWTnRv&79{RY|RWT{QkO_aPXkfby%nBkQ|wzBv)Yc~$Ytmu)H zwc|9>Zds8{FIiHEQ-8r-E)jXPP`QO&D`s znl+qn#Rtg{e7;~#V-w7&|A*`Nz?uK&)K6kwsVRcoD zNF#(kcR7*d#SQFx?ok9JkHa1ddrF?M154`-P}NZ(Hrl!lj(IZF5F}6i?qms5IX9{L z$w={AshvpMybZ;*zO+bQ$6;d^Eh@k7HX0FROZ3p(RdA`wG|YN}=}r74&*wOXtjf z;L}V4s#}vzYMZA}R=`PdWlALezT)h!b4gSz@er;nhVk78Kd{lD3z{#~>5<=Tstp=} z$93jpwJHl2uX$koi?Mj{*^iRr)eyYmjF&|CI!H;?;zB0RENY&^GM?*A*m#FcE_0(t zk;)joQIf9uaF6+JIqT__ie7_1;aS^$jC#!3VEVo2)8nz!;9y3_C(Y-KI&(U9E(X!+ z*{D3-&Dj(i;LLM3xHp3a?9e5z(SNXefInn*=Hfu6yRa_pviK%;=h=A)UEyg3`R{6g^*-HcfbeSw&MkdvDqb6{kSbZdRo$ zbLL^9tcTaoykPMahgz?Vb0me|Kg+N#b}f4vcmaQZ_a&EeGM;&>^Vq(TkyOmH*6uSn zxAVziR_;8EHqV>K*jand?(d5ukNBRH&5`ulBqt7PPxa1%F zj$X#9)0Yv@(F}vf+#AX`FX**+(@(#C^qF(FM+8*!jGH7(3XJHEpF54vxr?WI*0gQ) z8pQJ%PCwg(dU2*wjsH0MEZ@Wo-t5N1u)i?e^T5OVr@k;=GoC7K3XtUJiC!=u-^6pA ze>NIHCiUWKp#W}E`cm*u1sdOYL+IFLK)Na>;-=u+s5RV;*=a$fkUEA6k7w+YQOHpND* zP&=T^w4f?aEE~HFWeeQs$>9Z@rKyDVlP2P8*aD=MSaW}} zoJC0QVO#rO6?AJ`pmF7@$iV*@vfTPoRInVKvoj~tznN4rE|Iguwu-Cjm*b3jJI>ei zg;vBF)Gu_U)?#%Ub4#9HOw^#q-$(P#h&SeKvZuRE{H|G?0C_du1qpGdiE`)Jtl{Cp zC)K0eKd-@>%HA}Np94nj7ucm!(KNC~&Z{GCEI+S);v4%58|joXEs8rp#sY; z_Mx(WoKt0Bk4=9$XIOhYm25C0wL2@39J?RZB?@%?RW@QnQiOp@DWqkoPj7?j5tFtA zX}x3N5+W;JSv!_&rtQVNb&IJX;XI1oH=)r+nVuD?QP{on_*-#-d3hSplzIzFEM5nn zRrlG6%Nk_#=K+#^Ckf?o30U0aO}4#xR&Zki9(hh@Gd^q(Z!5p%+4^{2)wDiav98`m zD2O_W8&S3-SG$k3JHKJmU8CvZ%kPljea%iig^SyId}#2pNkU{uFEoFsWM`h3Bc{}! zYTg_}^dx=yZYfQ7#m`yB_IRwAl_U(AIf#^=??l;sS#syhy|%o^_%z&xo{w{(;k=JH zeu52M6D}adX9eb+dBA6MhBcO!l+QB-={)~D=HYD|@#uoI_CxsiY!wzSwWN_sHNvoh zW6XGYE#6*NsehxqKhIe7P6j~>NIQOPqYvE_1U zD)q1uf9D0Zf4 zJo?Umt4AuL{+F(a;v<8A#C?z+qC&H*RAG=FNs41;lg$%pn)K-=&(xZFy_?wq%eYK& z?UDYFt&?~=QWW+U0gK4dT`Pn9XHrOqyyJ8*4BZ|c3|Cwhi>A#J!3<$sw%JFM(@_h18@ zhq#PNIUJHLa54ylR)4`&?q?Pm*8S64}b>B#6Kd%r@{hdq=ACIE9D2Z;g3vj9b zhRRnG6mU#|V!Cg^#$_F|x6q|u1qQT#>KZcp62P{TE-ia^6^lJ|gy~y>Kbu`C_SR>Z zDQv-&zz_kt${1PEOQli8xmO%N|0B;=ngzz)s;YvJFp1AQWB`_ze)mD7ucwCK{^ zH5S6=^kBOBV6reRJfG<{e`Ze074Ub`NYZ(G6+L}9+oeQ;$iWC>7feKJO_Sh$(v042 zIt|ykfAEs?&-)zU-0CBaG-Uc);eOH@lomMAkD7CE>`B4FSz=spn+GjzdkPP1!(itY ztcY*G@16!M9a4{vGmZ+m9|q7m*M5TIArlsMo^#=LTI2HmzSO73f|4!nKzeaM5{8_D z&G|A6aIvM*d}}h)oKE@b=7^tr7NK`|M!PzI+%LP+s#oENu&zR5CQ!Z5j#8fOhup3V zEIRZ@tiL-A!=9>=;Ur~BvMm;*rF!5x(?`6RYd@oYCi1@UDB5#~XzkdoLU~pfY%H|t z-mc*+=%^O88@__{&_UvNZb#)EUI zF?cr3eXmH>kINx3b}@4ce1!x`6vv*Eg)Q&v+g6@pcF7Ycs5T2pwLJ4b$qII1uSIzW zD@4*t65IndcTF5Bk!Gb# zLW~OCO_@urVrv@oERvS5F(ajMz&N!$T=M%1xu?k(xWj@ptX9!luIG(2c#3Rq4|JVe zfW`<(anc`ua*jWZ*>e_l~{2` zd5a-@lP*mkSWGxMHABf zSq^7;52VUJL_aAz&Ydx)?SFsceJW?eNS?;R1E0{ueT5ChZq)tT}=Z|EEad=lS}|UFcU>>(%%pn%dmT*cQ%& zF+SXhp!0m+<&@`)l2_qKP&edWow2Ve zz3h73EPVLRv-sXzGpo~~RMk2Rem55>H5KqxQxy-}!26#9-*3||jwT;Hgl5^-2%V-$ zS!H~`MZY6>zVQaT#&y=t-v%UkVj5_VJ|eUX$$xGeCY7!eT*89U_RpKMcmE<|NEAH! zYr(E#rFh-7KEgW36t+O~A{N~_ESeNuih%>{=xy*$Hg%&9iV0L<{2VWD$k6fe7M|Dh zK?Mh7#fc{Vcy-l-E!W=9%6PA`XiO^C3k^xrxSjWbZ?hhUp_t>}EDAEUraANWB2Z46 z)-Ls?vp$0TPP z$_V6t8V`#S)=Q&V;yxCu=u`OB^TJ`?S(qqqh3COHp|-GxNl5gkP35aGpl>>6wS5$S zR7t>#0lleYo*ebO)E2AoZsG!EA8}dT89bNr!Q zP?M&EJnL~Yd%pP4@n!UO@jrZBc^F5F%Gsq8jTe7sj>G(_+kBU34`#*6LT_;-)h&vr z+Bylk)_ey;vib^1xhJ4@Z-KaY#B`6O>>)I7^%eGL!U)o&KPd6sYRFa>h>B}M0RSJO6)BJBO;K-YOL;r6W~?DoJ+~0^1gJj|p$Cv+AH{>}E3WWKH1vBh4PO0U5cN-}x4OI`3k;t|f&o z=K0yNeIQYjwOG)SHbAT`qle#r;mGZ5+>qn!y8>6SW3-tp^B4_@MW(iCGKk#nI;jPKJ-XvoYN0K_nc(U;v?D6&>+$j@5Z;G z7GyW~4JxCxu%xgLeEl{ElUn*wcGwX}T6d!&D1@e~oxqi8!q7`o=Wy$`M$e?#vBW}*Yi(sGW21N68G@> zz))QJF_40C?TOZK?Y&!@{%T)Dk&sB$Z<5^+>at^9tw|+Hbar$`?Za3copnbrkJ7aP`hR?j@Dj6jS3Ppq4@|PUFGHS zM$*g7dkun*@LdAycwe}eG^{k|(WcI2bZm(N{XKsktvdpQ*Z|JXyAUSs8aF0Ozm_cL~IAAepU-+wV#k(7KVkuui^$MnpndTj$*)X2Nz{ND6w-!H67 zT!MFVY^cg)Hg#3^AhO~IHE7<%$p92Z&}F8trHvD zq+s^F0c0d7Q1sJQ;rt6Z>gyXUc09m&qzeb(&djmYu|JIB-q?y)Dc(Zjc}bF1*AY(r z_=WsPo_|hSCGN4CM;SfxG*jm)tk=}CeDe@0T@{Nb%_neTwGvIpv%rh#5oD{9Ky$aHO7ScJ_h z5z~Bz+aDyc)rtzu|H1X) zJa%QQ5>3e(Nzu=B=v??_o^LG0e#Mt~$5|_H-@X;TWQ?X~o2BXcvCk-J3d7jx8&EgL zfcfw_wU zMw0AwojH?g9x_`jJ+q>`XslYqe||D{934epj|jLH5jrNEd9ZE zd#dcktmHg&-?N}$oO^L;D&LvMSzq-$FZ^J7D6VPD5sIa}NpbIWq4UH)w&&snsMz$y z{a9o2n8)b){XLkrW)K+d(u~&6AuT zonq7Ke&WQQ@hDr;o7~Rshk@2Y=p}24GjDkzx!#zZIH$+_g`W7nj0%nTt}O22JhvS? zcHmS|D23i)v~JpcK_f+i9((?TNFj$c<08H+?Ltw=DIsR%H2UH30@p4n)5~k#B5~&~ zVV9pX5IYNp$D5PdbSnf5A53+JCz0Q4Im-Of3abV4MAF*r(3%q}mT!*0fmefQ{}~6= z@cVk_AV0JWnn%rnyWl%AiKT9xL8|v;#A~<~9_waL-r`GM{x!2O?aVVAiZ!HttS|1sHmfBcQM@^`HFA$~tS8bbA^{VDcO9v-d8K&eeV-Y(9? zB{$ytYaP!uO$}O+@U((!$2GJZ@E*e9zXTIQ3Di(yOX?rJd_9{j*veJN6MGoXeF z&NkO>V?JL7(5X;!(m%42UPx?W@s51gRrO;is2voUWtCv_P#fx!`H1RitFR?co%gB+ z(9^UBtfYQ8>nN9{Z2!CL3xAg^E!4?A=Md|j5J~^-FJ0cygCVOG>EOEYp2IV}=u<)z zTqAz7n%nC@x|4W^g?IDS9^>4q-!T9C0h_En@GKUb?Zb=XcJ36v-U0t{cYCIdpuU;h zdkh&&Kacbl-}^ihx89hLobDD#|FnjO^kH1tdmOU*{Lb>L07p1eVwZml6s8@8X)j$g z%xV(KlRaq5h$P{}I$ivJcp8!0$3dmYfEI^3QqR_7cp7d)XJ>E5fs$Z6*X1mzcWxwS zJdOg+UV zFYn9mNqZ>xo|L7-Ee_&&ag7)segIvAys041h0ODY2|G{xM1$iGJ{#t-rP+_MP=X9$$k8{*a+NE7GIeo=$7?thuz2r{{)NEd{wc%4$^ zm687%(?%PMTW7}L^Ex-0lEU*-Cnj^fY9+F!#n86lS+Gr4VzYe;*rV^=LUf%hy?&-F z6t?WcyXFTXOw8!L!+KKmb0Pmd4&o8cw_(KRR=XDwWcc(lGP!m&Nl%jIr>jzB z>MOV(pFwNa4&t+DKT7NnF(3byBDL{18K0`Zb&_w zN%pZ%F**MwqExlXagaRS+O!XEKMrNu%bh4w%br4S1RznJ=ThxB1L>Ox)6ZQK7HbtC z({Kpw`y)+Z{zbSv*jpiJ?!Sv4G6d?O-aGG*owVHY`4`& zT7Iex-6mc5CcU0b+vY`KN7T_&^^Uc*1Y!2yOf2P$lmGb7|KsSq1F?MHH*D`cBBP9k z5g|>y_j!p*X$fsfI}J@~qDT>;P&A}c3P~yJeV%BjR7gWZLsNa)QHg%{_xE3am0mp0 zeP7pk9!KaaE$--(r*XNRY`Nzy9LP`-td|aSH@%9PvjjM_+`x~Yya)egIXt`%2y?D`P)0<{Fn4!XCBG`C4`qV=c&rdOOYFj|xVs-yN^H`vHPU3U(JFd;zB-*}< z<6MbeB%5!Ivy;C|tg`s5?d3qQU9r%O+sBUg>V-e!HN+2P&ryExhS;*qEfOhj<^=lwo6qW&PD0o7Lil-f2|r!9hc~?pRx?*od*n&z)Lntb z5?R{!mG`oZ^Z0y3mzAGaAlVpoQkYSPO@nwVBh;X8$Wgzdw0N_d$JB=^>R1L9IQt7FZIXHZYv?Te1P!fpEd=3 ziQs%Ug0=>P^iX~Uo$t89;85S&pLv8>?|JR5Y#V6rlU4!0hEpcM%VA9|J4r`v@ z!?5+2B<+qRIKIV%;$8E&uT_bDDtn4=`#T~?wh52A_rYx|LwfPgo(vB1{+}^tnf*J1 z*w#GsZFQs>MX{gbiau(mY8q|FFbs&sehf%kzQ%c9I95b;!?N z$EN!5y!-5amF?SPNwM~d#H&{s=D+r&l-g66cG#J|HK$O@@-4J&+agiI-M*r}E463m z;^Il}933-)4qPtB>jB-V`^GvHJddW&w*&B|;tMKqm)uxhJ~Iihb{!FJ1epJMQieMKyX5hG>&c%SiTBau6Z?E$DgQ5;SOd zlYYM*q!f_IUVYV~>rX=Hc9b`LZ5@v(WqYCe-I)g3$Kv9}$#iynJk_^xw!A&(@%$Hs zqEn}kxKk7g!e)_u##N+guA{2;=~$fq1irTv$$Z{-O!O_ns*jZ{=(ajp4_Bvm8v96a z@kEmOOPpGzK`s#|Br#5TY)sW^nii-+uj8`Of7owke!!e=9J($9d|rxvGqX|Jv)SX% zS`jTrthfVHmyO8>^&7x{rvG(er`I65Z+KIhU*$zDd(6e;*@xgb&jcPn2eG4(Lnt%u zG`e?s(^cqG^oUVdSF#wn+BVXZ7jl$SF^#*&J7CRsI9DX?_&ZL6z9yy!TVKt?8p#~W zzS08IwMTK*s0h}pe4*s*O=UHo&}%{q_v2)-Q*FoL#XZsCM!Q8k=bnN&oDFMfg^BH- zK{bBZmZVEhQ+U_+K@IXa<2!x81#DfChO5_{>CIhL8u5ZVR&vh?W%Z+BnrcnE=eg3K znqb;BHApfb<^t9{TEQy3ZE1P#Y1sB!!}|fh#J*AdeQc&q_3p-G3x#|ElnmDWi~;2smP6Qjmn-wVVp@aXhAp$=^oTS(FK9$vJtk! zfqsv0z$81+DWf?0aloJUb>ke>26HUgcm$Q9-NXfFe8^6wjyuhHo+IlT3L0<1@|gl% zOi-sm&k}I&jjFUoP@_i&3@J4I8rEcn(mx~al&$5wl^G+2>0*n-H)k!K{b9(@eT%R$ z@(kPD`A+bN$dURDbj2b><80biM$?*+czGa=QjcbOH!P*>;3T?`DMusaxbrM2RS<4F z(It<0;@jFHo@r@d7r$3ZToT4pc0(3?xL;{psWSOKc*kCKq+(OxA!+|L2IL<+9vR;+ZR9N)KCXL|uXd$dZH z{H{Z)x;|aLa!|be!+>;~4awHElPxK^2J25ou-nT!VWI=I&-(<`G98MWoDWuh6^a?w z)P6vP91JbV^{lPnCBF-;876d9Q(nxs?Slln06OsODH8RDFq7#fIu9Y1_RlDu5Z}Ap%aNkxdiR&-p1T<_ zPxGhuVGPNZJ8-Gcm4ZI_V}9TW$=9wZdT$N-@aQFG%nw6%z5`WT;v!CO zo=M#X@lIdAwbVK07#?T}sMxJU``+r4P4qs<2xFy3YLv-pQ%@>Yyo_(I0i12kcTQSz zYvZZD)TO+rcmfqE&r)k4*cbAE@fBP-r`mG&8>|YN|Pnm@?8lloH-|k>advE%e z`<*`Iq=9m}whcNx#~ zZqSaN`q*9`i=P>P1jk`2w6J#&?AB@0*vR2@W&Az-nEeI~{sZYRXN*iW8b|Mcy+nro zd6-=Oh@R(`;%VIglKY?k-QpcyUGK&+6D}gTMu$!pm5LT_hO}tZQc(yUh36w~qht98 z=J3*lhVJM`|NI;Am*;!_TXqSD8#!m_Ob?pW*p9xk^My%avBJY4{_x*yK&mo*XMIT;M`?=ex0Br%&)JNvvRrygTH;Xv?6p2#;_j z*Vqi2IcO`@I?fUEp7ufYEf?|}mWrVd&q6mCbThw-vz2veT3ZE*wnmf7*fdOYSEY4( z;_yD+4Ldg}L4D&0+U78qGaMZ8b?rb*c=i^E6S#{Pq8=vg?3 z>N{gl&x=%t8=}Z22|RPufn5RdNDL~))NwzBi+Xd(%drNzDjTVG)nU%Ec!KF(nsj7= z5;ZAg!Km0y@`Zo)ipzL+uxA=E!(jYPZbfmX2C0{uv%4+fY(~vWI&-rNDWh#?jcKY$Ei#atA9)j3_2kU}Hb~&}+95bTd$c7FXHP^TsE_r5&K_ zrAA_MST+u|TfrmIhCSRpoF-r3y{J!9NHV({{c>}F@uDPr+ngrkq{+~d;{Nzzu1SwN z#!^_f_lUUj6mKJ*3uTMe;iKzR(yji5*r*F=kFCSHe^J;THH=mnE0T^e@1}e|&t!71 zV%XGf6z+6bbiHRzA-0ReeEm^au<#>nZ@*-@ng*opW=)BA1!xcNMHhL;?5Il-26G?B zu9HtuS1+L{>JGxL3NxG<*PS-ixYMkPNo1YnE?ulqfir78n0&Am%`K?leES%vZ~HAa zu98EjqAKNW=le{rV6o)KZ`7%55bwXZfvs2K@z-=Tx!xW|nTsC@9!Fn*xpt#;`>n$4 zE(1C!HKCc6s^a-QbIEt>Lu^g>$>(JHn9_fe&~Ru7!iRmp?{jU~72U-KOCu<6;#Mm6 zRHmv0rO>|+D|S`xzyy`aV)3{CSi!9^)U@_0YaBG44o2+9Ec@LQ*SZli_EigB1&ZQu znNpTJwh3C7jU;UGVT@@TNkDAtpyx=6tM%S=ARd#H(>3W)WVKrNs#`*UhPY`#}fh`|@4F6mP(DKxD+-y^$ImiBOQFpsp4dK4^B z#HUfF^trGXKkt2lCVv;y1Q%hF+#?iTG@@YVubeYtN7EL5mwI?@K?6+4JH11Y-+YtV zJm*=C(3falF3Ym&IWOO0CvJXA#a8n+@#rO6G%qoywbgRu+7KcJ@os@rz$`JM@F_}c zrr<^5RQf$^0%_?eiR-$5!IRMjRH>sTK7Q7V?j<@>$X`S8uJ0=9EvrDKD)Q7nNfojD z9(W*p7WcAV=2<^wIv?PNm$O&X&WbQn3RR&E#Ca~O9L4B9(b#9_FZOOU!{x-0{-6$?a_v)+p&vpQ0pm z8y(DN&ciTm(L_q^JqKNUw)HY%0Ve-_fO(?~X-)GD=y$YBF13xLp67>PTD}fk%p58F zxCVLjc+2iPZH2#h56hpGpzRI!rS3MQtJYO))&1@i@n{;|nQ23#6eHp9wGBJ%9Lf4> z0^Us8AQT5h(B3sxbX4aHGQt-?<7OqSPMr`O^=47=&pp^uw1VD0;CV6Ljk?TdB85R( z)K*`JPd)Nj_6jBXG=XQ{rBN6aYL4Mg3}}k@n#Hv4>37f`eiz@tWL5Y zPj`qhF>R7NZ9Q4s(G2Xmf2(p`MmF;6`jelej6K?-i)eoTS;<}O!!GO5D+dd9!JYGp z&E|^%!6#r_-HY`;dV_6RG=={1JRsTFlAo3`jnU6%eGiR9=&>onp(lOli1%iQzA9w1 zJCH0gpTdPRB45?H(}&)1nAI_gwl&pZ+VNzpHM#`dC+b+<51iA|1*N9zxS4nnH)`u} z$+r7V-wzS4iO7C{b9$?T;`l*W0wVHbnLkwr)|Po9w)>-P(uF9r?sY7E~hI*@C^@V#8e-C!t%v3(k>$#YM^M zaly%e`h@7xBvU&vyW}Az?{O5TeXGDrQ&oH%IGzUhOd_*>N@8uuf0#H+i`?j`#3=;;h$ya`B-si^-sxw8e1CqH<=yoJD!|fB4GF`f`&{}hGa{D`*A1e?bX0B^OxPJnn=Wq(l)vO)27gBa4}v1pkt6xF1%8UnP8p7o|k@%N{fN z^G0O4k@qpq4WM6Nxg)417E?IWY_d)+e)|lia=He4>B3PomLBtY<;^iXJ6mu zP6s6+)zi3P?Jk0F2$(7+Z=3NE}0tuG}=;Lfo3G5>L&u$*{N)r{&^+KGM%HLN-L1y&5Qz!+B> zieAaxLV5)#+R%g4HWc$2UM}wH4xrb^`cpo4Cmb882)&`_pxe`c8dcmWGfYAwuKi(0 z^aO;lo$TkzL9{LHGM}&QKtHb!qW0HRh;^Fe%y(C_o*Rgx4_-%)h?!#lKgC#`G91%X z$I`C+k<|CpUm^Bi6E5j-uiMC%9zmS{tDN42su;dA*u9o|y{pBSUQZEt_AV<>(UlGh zjD?lo0~}1(rjL=<$XdLJ!k(|C#}VBq&*KV|IyVbe`}V`V!C%z%wqd8|*-}QGJl?m? zAh&M3Qx&qB6bEpImUn#R-xpV;#^yD`u;@BI+bkC3m=r7aaIfZ`y9netxbMO7bYONQ z&ABIto!g@@!08uuuoMb>mW%Bt+~`qm0~+W3#h|8VoO>8W*5?kxvuO~0J;dEPy^pc? zbC*F$Swf11x0sB}dS<#chr2Ol=t|09N*K5YYGFmpf~whDQ$Gq3osdwkMV32M=yq!u z>*}{0-QL$?smn3w6q-_Ev>~;xpG=E*A0uE{DD_scC&kQ7yz|c8&a-T&bl!G6&bKFX z>t%G&(}H&Rcj4-33EniF!qxe^1yz+vR2O;zu1mt`^_-iis6P)6QyuzQsYQqMGVrX# zm3^J1N%MKX-rr*)wyzH*Th0YATk`^wmKY0rrf5tc0Z`ge|F=A#QW+ z#}=IFkeL2ijS_!fkGYwpn91{$sw+I1^6U4kYgqtQ+bUAme|luG?jkG78AWsbqD0B$ zOW4!0lO6b=j^X@VcRnf^3nn{~_Vji%Ye?88F$&-N%Lw;A_n}W7Zur6Ht|i(2BzNpC zgu|w^ne!KBZVE!Sb zqqu-OZB{+vee$CsTRgiGE6y&BkE$M)XPHzRMeCq|q{p zZmTvhcm91Y*XKFJh1Z0$I*%p#tt;u(hYKTFNG;>Ma_%v@tZ zL;bsxO^37i+^h{7f4Pg!HILv>r-|2d0!e>B5Z!;`Eb94k9~;jj&L8wr`Yq=bdY9`^ zRfnGl{=MvT>lm#m$^^*t%GaG6NUFozIbqr*9+$Gi?f-(S|6a zx(bKk7a==lyXdtni`iQZpnw1TSZ~(=$)zm;SpC%$mPcKX@y3y9Pl;hO*4PSjdt8LJ zPm;9CstExvZ0PuY5#vr;(`Nl-y3o9ezNEK`KBJaEjn9I8FCK?pZ9b0n@&A8@ty+UN z?d$A<%&QILsLT6}|EwvuEdwozebDu82MnzzQhMA?Hl*5767N^V`-g8);b==6lDV&R zrkZ>E*aZ|&5YAg)-carH6_VU8JQLrt(>63}Sl)W5iPABHg zBF91Y^erzM20<|><5{qmc6)KYbdhi^jCNw*+|F~!Vw2l$Y;|#YmW6a?1XiiQwoFlFtDV&sB z3GZ}Yy8c;#j!v5d&w;zy_2w|KYu};Dad|yhr5Sg~WV;I1&ZStp*Oo$Dq-@|pJ&bG~ zLE+P6IGa+7dOcpxtZPT{=g?|#_^~|Pm^YruPO4*{cTXl*?1y)~J?VG-#fRxJ%&c!H zj5-DjDL%GT@_YkKpDI(T_e9bw<~~p-drIUy_jG=iRp6b35B}GXp`8Z1F<0Sp+6o`r z2h;1EAIR8o1BZ4MVD!Alxb@)`Y*f9(usVI}jP@2MT7G10+wO8+w<_MA8bB5roJFh6 z-4tg{X~lpvc;r?$1ns)?l%MuX0QTEdN@VPkhA7{e!=+4i6eMQy02EOC*7bV%;NprynYmQGKxn=(3 zeOyuOQ~d@P&+AiI*(<4s`A@vKsYHuz1d9bDR#DHgX8d(|%NfL#Z1&_w9>;!#wly(pDU7r$KBWb%eClz6Xz={ij0dz~a) z4ckQ1$-^EHtfd!i>+Z{1#D*bY@K(gHx%u$!4-2Er_A7 z_u|K1B^t%~mT{x^3H zKIj)-Ig}tXfU^!&)F{_N6CGXMNi$?BDzXq&%B)T;0iv{ebgT6SDtn&-%wsAf;m@4!m#A3cKZmBVP_&42jw;2_I7?#=n+s={p1oK}3?h+Ul>2)aFk^Y#_#+faMT3m7Qm z8|;9^0zc}wpND~4_G8tmb9j3*5c=IcXh*FKdA7cSty3Ao_*}a@>pT2Rti^twCe+ii zyI2;(^SP&ABf_&Ep2pbIlO|iT8;F)NwflB{V9VE^tGrYFhL!-I~PI&TH_T@y(gJ@sgb8E2IZ(iIf7WAU5 zge$0osk<7zAFD#Kv-hA`?VaR5#cpK3Mw41~79zZ6Az44^Msc@W@$pc&u;|oYq*+X$ zfE&7$JUxv6Ud>^9t%ixCwn4-j3wvyD&G+?@Z?~KmRP1 zrQBYsbiU6Mc4W;&3h$pJzO5E8-YkwyzTO>H-)2y9<4!Cuu%nPQuVA?UjfAx&p>mF= zG~uHiowV}C4|5e7tUQMPB(-4VK{M(cGMJw9n}=PMW4Q~n34LcTMf|I)SfbM%_sr~Q z_2n*{R=tXpIL?TVxsMj5*EqC$f|$l#I>$8BDfI|HPnSN#JZUETCud9Z+J?}loO-NI z>PrL1q~O(@denTjrVpEqDeJcnUEcDDWw^e8j*%wS?avjAl0sO=jOC;ea1R%C%A_V6 zxHn8O6{X`2L0a-j^nJP*It^wtRboL_|9Xi(oqwRY*iU?PqY6(p^jh!}afR@}mJw^$Hd1oR`v+3!iYOtOJ>0|JZoVF*N7$Y8<6Ztl@p2 zvbc4;PZ&fipKKu86PmQW{yV;m`zmpHk&8&JonpiAbaq#77)>7ow#3$kZHQcjxLg}V zAMwDXrFzUqeH!zBFhdX*6~Ux@rI7Zq0as-EP}R*woL|wCY~H2Ol3?!3KJ!pK?6?yZ z4>c&lJspk9^N>6%fV?+-LiQaE%IhUZ7anh*sOmN7IL`V1`AK-XY$(?DjO9I@=~Vts z4I2l@vH8a@%r$+`Z{JbXhbPS$rVeIKG2~vVFpn7Xhj<9=acFJGqP6*!_}r@=v`H&CH_%(Su$HV#r-ZaHQb{f z*n*Yq3!yr<4C`bK#j*@OZ)nNI4}Slh=XnuHOW$GqZ#}xXUXR)W3SpdF%D#KpP=!_x zI{0Z4s(BzL}|CGe>FQ6#5eS3}t%UsgNNj-C#S4_FPI3=H4=d*ZxlS^i4b4 zyniHJiciGJjNYX5P?_#Z9G`+#&@F-Hh%y)^Y6{zpXF#?e+CZuFCojGVMSMwpmEfmbcV=_ z_I<9g(DfB4y=jIhIeXgG;Y8CmK8ERQZe#a93Ez`xxU|cGez`hR^3Y-IX5=fjq_rNc z+_QM&u(mKda3nqa<&M4o_$;zi4n?bb@^=DvcDBV~)vecJ@{t`_cBKdXyWfTEZuVkl zgbdkfLd-Z>h%3fDpqcDLgJlA#n{Y&UoBtZ;?ip~;=y0h_m=-PKTn@WMQ$@p(;S^l{ z5Xau!MD+X=NuS@Rgt^a#V%NYkII=~70-e<0eJ+&7{9Q>s8cZLgbv$WOsdnz%^}b!vjN1ne&8Axj&i?c%DY$TF$h(q(GWk1{kn*2yUGA6~1^_ z(x*?ks9W$Ek;BH4wCM`8&p6UjZ96gO1b2+i@gOc}@g?0}K9u|}00%yb7~3j`h@Fm9*1Z7! zCnFKs+9GZ?N<-@g*|q88wd9%ost5W+lQd4bDc- zHlSfYhB2jH`c(9YJEk5Pi}uxPsA={)STykrOo*INadE4VVda5&X16i=f(+@bk;9tY z_0&9jC3ShJ(arm(&~zHEqtE;OIR5904YU@!pr_mxT`#v{2x7oW~dSE^Ut6T*+rx=$x|%kzjNE0UvpP- z62(s_K*E)AG-psVQqBKhcgi=UFNma7lhe@OiqXfwS=g|rI|iQeKq2Q}tv6MJrtC3E zSY;-5dnuAd4`<3>v;lqJUt>`}hOl7|#*x1-I4zVV3F zIgfp^j->M^(S66ux14uAhyDb-#++?()Vtxcq&?H0{!FNmresOboTY=$dZzH;U8uS_ zvB+)iOXGq&(J*R0Q#6RjzoSJ`G}+MgHH%SM&HWN9d`Zr(9+%{ZhM3q>TvaB>9Im3~9e2>$2QiHNp8h;ekB@Ar&xfBV8(}EU*U=_hZx^xU z*A@QtX~Ojj&*OfaPKlmM;taPYe6~^|ox6vn4f|Bclb5zEd=`nRok{4jCWyIwgd@ZeMKC44yo|;6JAWUpovXwP_y8f zsC$XzHg*k-Rox_7TkeF&vuUM4+&LY={TJ6})6@U457)_1Sce?>Hb&5|ZfoHkWJll2 z7osxV6TjPb;!@>kifzeYSzi;RnI`#YIr|lRmh~Wo>pStzBv0ZoY9@^;EN6?4#NmH; z{q&Ab6!v?}N~`yx@NyG8W*)=aBpVve-7{-bKd@{aJu3H|O)`NF^e$oo4s}a}u3}%Z z8MYm%!-fcr9=zW<#gv|J<_^s>k&wS%2A{U?!s+TjdZ)h+zn`wBx_9UBX~Y$*^Wa$< zbrrfW>oNxJPh>N`t5TYwKFz$%JxRTdpvc+QBjotkTbd>Ou2>5Bp}v%ytxT6X=W-5a zEbDx;Tr3GVE&X?FCu^=dfzG(z9=*>W#M`FcAsW4m#t%|S@k#0XBbmrKs=jb zPo!ZOEj~~?gW4^9m{099c2#>4joqAvJyv$~`wMr8+-+md^LN1i3kijMU$rPB1m*wa z$-8bO1>Jjzw?q2S{OgR4+a0Rxd@OZzW_nH@w^5-SE<9J#|Rn?&c9mTDh%E89y=tOJmZ@o ztvT=ujfyJNba|7geRDINy7dsJS8%@BvombnyJ~62YEN7ZEkWWzIT{d2$W08PKX;c> z$W?8+)m4GTd((wWZTrx5GF{X&J|+2k&6RpjT+Zwsj3$*%&L6zGk*2#Dq4$Jz$r%mq zaE!Vw_(s$t;bx(9{QcWlzRrV2eXqx`982yOil<|R5w!AShNw1p0p4;p>zMddEbE?z zTUJEdX5B$`wKiS*-GMB5IFQeuBuMa6a%jRGrtsn}OUzNml1dAT;QM|t`AK`lwAyFYC~)`w0|(yPc`% zDw1@WJn0YKCM;F)Lh(%>>fc+Qc4;PKUdeFwWWEaTG2{s8bcdyR9l@lt?!uqVyU>rb zKr*Zku>B8%X!#1FkelC-5zKvMc0ufInlDWprh zI*6|(IXE`DKlDdQNVTL9X_Yte;MYYgd0&R_hh!+)I$nI=WJy0KtBEFwmss?Ok37fP zABO3@=+E+=G~BKVPCUExqWKg?F3f~gt1fIqLfo^RI+sh zS>%6b?W^u!sKHpa&dQNW*(IoCY=_gDFJh5j9E7dPq-5Nko`yXT##jEp^Gg#%`C~;W zR~>)=>)|wIpckd>YZAmQ*^@dcjZtUr{JEJ!#?5b!^g}u zbBeIlZ#v}u*7Ci-BE6Iu$8$|9XieK1ntj@s3P+wqic-4ZyJ{P9%_GIXo940;JR9FI z;RS1o^P`Mzr?Ho3Sf(xE?CT|w9u9l=2oe6xLSXq@#I1`IUU2Ra3-P9%kLnRP-kEAM zcaZk9b@X!35%HDcRvfqd#`oJvbl}@Qxa=K9lV_eqzgjISRg$HIshg?GGXsyVInkhX zNoaXi$(qNE!?$#PW}V!{(yzZ|5qc*Ox>%0BWDO?y@EkmuzJXm)U(a6LnnOM-%h`^` zepIUQ8TEbVvL}uQ5!`kkGmq^-G53>9kQh+>CGNKS)Ps8Ghmk2~^-m0$%y|`Q`17SN z@x&9lR`jQx<}0|>)0kf5d`125k(i`agkiFeghZa}Zb~_Zan0*!cLwiCT&;xn0UbJ0 zB~Rauo`k}Bb>D;Ug?h@KSsaJHUo&KWi!5ks|)@}sIO{9P2-6(ot2-vs* zY^c^C@sxs2<;C)C3=0yFBxti!8Ru}cS8wui8_U{nzF_m^C(}NG^MO;fX^dMVGl=pb zhnQ5c=eHX;cy1f-znFI)v`On}IBEevWW>f_a5=vDB#&YbTgsxq=hVv34Tq%Qc*g+lvv4 zPGUvgJ2<`$6CW?(y~XRgG;?ejySnu{_H4St{FoK(ju=St%OB$3o}T2v^DIf5Yq4gx zIcI{J)9uu0l%>|dcFcW_YaE_s@SfaRBR9Lcv1%LIR6P*_&i1uWgoHk&};C?&{1=yTfBiUVZO)~ho zlMtC&1IV;X2Xxlrm6tPR8NK3sEi2mMl}d#?FW6BtJt~VXx;kXGD zZ2kg&p6O9He`PZAT}p3j!tmsxC0QOz#llWITwkyQ^V8;$u97YmPW;G@cE5>M*H;M5 zbfm~H`>?F&l{91LY_gLd%0`~kfmq#>YI8e~-ymh9G7g}&*M0P?&qusPKT2vhq#xUx znEzQ_vh6*GLQNbWMOcNel*+?w`mUKetEy8F$KAkUxksEg< zR!`)-nIufH-$?CMXArT7?*(%7>E>Wn3gCNt>6}7l!WkOAHb(S!@lovcGQ)H=P0Ds} z#~s%)VWSM^D29)tUvrcwW=9YjZf#;Cj?5RwA3N{yYef*dmUI|YK2|a=@F0ZcedtI3 ztL(0p7H-}cN6o37Sj2lBnJNh^>$x{wzOYq%_#_`&z7-!# zGa!F%h1Foza6{Ttd>X!Yix6(WcV*`t$ZPB|_U3yt>l|HyZ>!{Kg4#5}Z|`L4_U|`y zxN;W_YpW#ce|V?$=wa;dpMcMCa+iiFbJl*N9@X8_pzD#(h0_{x)c353$Wf9AIH8Hh z=A)^Ua}3r;JQjp*UvaTRog#9tOH*YzD}e96T7E}~<2Of=#nPwHKiP`V3l~}B5FO!e zu|2#@&tdLkd8$6*irS1>)aPLs_2KzzGp|cXzdB#&Zj*t#>H9^UuTHFxduayL#jz;( z(!4wIn3@ww#p5}1vTv^B1kX^FY&|Vpn8ICHK55cIzk2u?52YKcYv4A~isp~mMdTSy zqbN%ZxiueJ8#Jlk@I+YmO~#xmPf{6CkM`aAWI6XU5K8wb5*WYaW z;*BsmH-(y)*&@vIE<3ue0tV5%Kij<@MOE&E=T9$oto9NMZfRt}GTPXkYD!Oe&Nuf$ zJ{#_|A3s)pz~t0Z_>?@DOwVZYzOMy}ax^KWB81kR7)A$wMq#5uGMZ&vDD_M#<}VsX zBO5nUZM_qPQqE7lR_4#upo<84I#$KCFXiF7oOrOQV<(@wHQs6_wVic)8Zx?<)Qe!9D_`cW` z!qUa3S;Xf(P_5Jy1|HspEehr&nHa$?CCnnPjNx?s=U<*l=tIwUdcFNE>6FIW& zJ4!rW8cBn_s?nnP6vangvL_z`h1{tVk@WT!Jj>+h#c^kR4&$BF-4S$uVRv$VcLI6$ za)r!}1Z1p;6d!9$XHmhI_YuY&RL;rae>fd&NJA1 zWCM-8pJC9!6 z5z5TFuxjsm?%>-G1ofwG4`VUr_%)`KH5@y;#?WRJ1>n;Yw!HcX)EyMa&u9=et~vm# z-j%GVXeX|#b(tN%L`#ae?Nx=Rx(4M(fs+aOB|Yd6BqS#+0q_1u$&u2t6Jmn$kKsl(|#bJZ!B|H z-jAN2`%0VNb|Wp7A)Mi+LZSP;C@1C-rt6zfKVcZ@+RTBOi66Zh*^FAR75LM$ia+1< zpv7mKXF_@_?Ttkjl_pm$i ziqxyfkw(exf-l>Tr9(f9=R7y#$_s0%Yto?tITNu_R+FUDti;FF_pvX+2ug~6bf$J1 z8S1Kv2O^%~?umbx6sjW}v44&?#(L!TXRCO;dM#b+YURF^F2n~Y!TqWyJ+_+ynIDHC z>!m`A9xlUnxf%3n^ahf>qDfQw+{2^2(uxB@4m3Zch`VfWuzstD(!ORp_R-Uc#hzab zt`9pjno)hrRX4kB+hEiSf`K z_z*6GYGAUZ2Ym?8Bvtu7$WY{WpOWl+P%f%yoMiJ^-WymK!3fEP01mW^Z@^aRq z>|i;XAUh2y9s+i3cNVgy`%vM6!*C9Wq%GshaBbfW)a%HR;qdO{l~{=%wcpvpa6V@$ zF(bnfewfc)1G1()=-k7n7}udK{NA_{sV0MH>?j%fRulE8)YuU71S2Tjf}TrF3LD34Z{KCH)`^(x}Pd{>J2 zttXCD31Z5JOQCMB%R6*dWM0X=wcDOyUxF3qv>ZfvN-FlhHYLkqSNah$lzn{jl3g8F zgp;ubR3cL(&Ec`=y))F2lr2Et!3KY~nbWQ3=dkbaI@t35Z}^fBY&Fv%d0Sb^?EOzT zkf=_lt#}Vld<3CmCQKE^kj!>p>Rjn3n4FfOg^oN0i>VTm{;HI+@GFKq@e254SrS+`YrDN^JvO5VR>9TQYU&WU(4%jGIG8nz;YhunT!bcQL*3gmf{VBi=EN z5&f^YLQdO)76!;8Pkt^*I!0o%ayZR9u8yiLpCp-T)9AlhF+%g6cIZUjm7Z*`fX24| zwBeL2*-SE`J+X;2ByTQ_zmqM_O;5mp0Sd&vrbFE)7YcL7(dUb8IJ-iV`d4svUCLa_ zs#}G-J%-Y-@K{Xh*2$b7PsKZ9Px`wslc_}Q7e)p}p;K@+``+6pG+@7nHJ<2M(hWre?#YcH!{TIS|FJLa`*DNciFSH8ZuG-NHAkxc zKaS4BA;o`LyG8#riC6$>?R*^JNlo@4oxo;!cSs}>^ zp~zcSg?`uf_ZPh0p69vm>pIWl_Xnv&o@Pk^IriN*^ylSJ6z3wforyp@T&6}qW;z6%9ehZRikQjUt0g=9A2gz!3f^}8nsVCC(9?1(bE^qyITngN?m<9VO;2P&{czXFKm>`J`D$d-h2jEJ?uT{_>>0Mum!9jtDW_N#&S7 zRt&#)6K^!rAk7^?J-xiC*Ec)i%>g+IxxzED-7}f(Aq~p0Z9>@>B~kxv0$nZqhFM3y z`1>_ z-sAB)byC$&qhB>KlsdOmoHlU*))XnxKg)E=J5hZBdNtnBo#*FSoeXW(K>9;wu49YNm!#2SvW(XDXUBJ;j3(3@gXVE7< zz^SA^NY>ZkT&aDioKVd48=WZFTbsT;alzr2k#ux{CY31OMMvp8LB4G>`l$Gj;Xh>> z;=2}STSqdPS?k11l|9m@pR9$~N|``=ceXYE0{o@+G<1OmQ;2!U{&Sy6tNS*?KdK9R z4wtaWk=(=TY+k-5*9-Svwy?&UFxY$urhiw`5j4YsCS}TzTkd+cr6V1go)&_J7vI4? z3P9jC&PRSdgxs$@N43omij)qd3srlOx{7yr;t#+!#umA0+#h(s0*Zb0Xn5sctor&G zV}?kPvFtM>)+P8H?JDZNaVM(@1~f?}M^YD5hMM{1>|d?{8U6C6hu?V*cCI5euHS$Y zW!qu;RG&^>vEr<;33T7o3~d*$qRvK-;s#bo2ZbJ%20ULznSAD&$bF{sf{jUO#y%W8 zmID2IZ^cd7X?Sg{N2|@+us`yZ;IhGx#_X{br@1u2Va_BZH;kvMk0WT${6j*V$9p)* z|A)9y`cj#<3RM50JI(GGC0_U%PxAY_^Z7oX&&P(b#YF*hM#BcNYH4`KIgH~BmEmeR zg`UlvMW^n_(MH+#xRLfs;*#?oUtjJJz2`fjIfc6bNBm`blpQ(y$_Wp;55p&2Wt@pC zWqscqXN3uegqX>-a1|7#m;T6SYD} z{n>lxy3@JL@633eHbozdrOTrYsL^X97F+E^y5|eXb>D=k?`wpv#3?j6RGkLzeTfxY z24JF9B5t;sv%@Oh#I(1d<;HwEWL65_?z}^KlV?phx8`^6O6X0x!TRR$-g-aIXJ~iE z$i_0(v)+KL=f1_z&ga6zgdEh)_NP7#GIUBI3@?i^*pLWgQGdctrr9loP4RBPsV!E5 z{ICnS_*aWGe+*)JiypCB?mODTU1WQ=%2Lakg>1=bZ%VM%#?!XAx?{;z_}vUTDp@E=0d|c+f-$# zF3BcefZAY&PfK<~ci}GF=L}PGn>y~OsY2EG_ZYtF3?hdmC@Z?HE8=W8e{L6(2wPwH1(|(OS*5zMjhRRe|tC^ z+&D(6-j8Tm$#&$Q5Dx9(q#Q7mnoC=Sp~t$g=*3^uybWOuV{W3^=?CVnk`?cD#!4~a@05P8OCLV3O@|$@Wy((n0`C~ zsk|$6pobyKWkbpSW+K9W@cGEZRoHUtAbagFgOXJ^o5JollFqkEH7g2uSKOW~G}P&& zuO-cp&7xs;W9Y%)L!ybr2@F?mM6hHIqBti~xr%=-S~8@|okRa)&-qtG(v_{F;dRE5 zZt(qE4eusbS(V_r26udPQ^mQrb69Fk67F&T@kH)(37-DH?u;3Ztj}ca<13_JbTm-X z|0%K_^X@~IKKtMi2g@%{@jo|TL0@C~JpUJWVGMxXa2;B)ek$FsvZlBEPWqZsP}J`# z=B?er`&o|EZWT*5=Zq-V<2Cl$%;$cR^%%Y@pIMCbqvNSNP*9gZ^(qxmztEkuw=2?C z?$3RF_zmIJz_w)~S zv%-nJDaVn)Zj^XhRt#y%M%XgW>3ewrYiD`E+ITM_EJ|@{js=A}J%gI;Q{-el!Uz;% z-QO|jt#MG8KG~BbqZbMB&95X@h0jo-tBrx9&1kr}9hC)K!EMIbxpmbj4o>3CbzAak z@}#{LVKi*hT2>X30@o&8x@Q+a_k@8|P{T9gKMSyQ>>3Q=+}9U=3s88)2GM3q#0L2Y z?oyYh-aOZnIO~qEdcGX(w;wIWiHC4dp%5~~;dH#*ozl~;2o3j|aAZL%23k#JiGv%V z!*dwv70-q6#mh+5?HUgF^WQ^}kC1!yl;FS974n-3F>a*^VkeZDN4<=#EdhGF=z2$>Jz1iH4&i{?VOAK`b((MEe_7{rL6sJKC}MML7W)a zjSg1j33vE8K!)FUs`6w=Z>BvptV-jon^DwbaE2JFUV$%5Ucke5HLcoFg0&HTbYW#P zD)lr;VZR!QGh)ck$rPRzRy3z>EB5D_;iuMJd~+O2UpMqaOvPKa;bjsk7hS=x75q2J z@9HHRreI>qDYko=J57HfhX&g=^uDi4D-XFayGS+6dsmCc)z2|}i7d^FkfC1TQ)%;7 zGn&$SF$=-gw@ zuPJ_iuoIVR#?#<0iga-3LZqv2X3I^L#S4~MlD=~0%rf{hzMiQT8a2>CE%ktX$VzYG$WSw@vM&K+!zdLx`vp0=n%5-97 zI2(S7_q3Z*q@t!L<*jzYtQoJ788DshZFzy9y%L%j;YOF0cJsRf=g)sG!s>tWsJxhq ztoNI6z=LPu>pI}ZyFYD*??Wcx27KKzu`ako80+gx#TR)$p}i*yQM`<7)2YmQyd{lo z8%p}qZX$VypeK^Hl@85~*m;a&Xp*J3fPo0y>cUw%& zXC9(NZ8xHg9O$I+ejMI86$3iviL%8Daddz&=a}$*ROJPs`i&8tq!Hq~NPb7CI*Hte zV>riP1l>7eCl*a=g)prJ_rtHVXx`%wouo*=XWNP2ugxICf>tEHc!Tgp+ng<7?dW3E+Mdexp%HJ(RmWfx`y8pDvt=nWeb*?XY@XT<=-P71Tke@9y_QR<7 zg77qT99_F@K*xl5)r3OMG0fO3OR>{^Ixz zWUgyv{NPM3asB8^pby+ETUpk3Jt{7##p3xvQo}dFXdUiFaxuzO^Jf+`r_W?gTU^DO z3OlxJ$3bt`rXm>5U*#R&nFn)iBl3B+l)2an?A75=s!8|;gXX_zEmvio_aziPRF*ZZ zuVZoly=6U`m*8&xXv*4p5qqxnriHS~^idSpiiN?b815{66fu}APfo%5jz6gR<3nm! z`1fO^D+O+KqAEW3Kd{<~X3mo$_xw;~lzqcPmn`g@Wludanz6R+JUU!HA#mIcSfxsl zIe370U9CU0hzjDh=13Nt^AHze2jIxEel%;$V2a%H4rFIZ7tR!;-eM|zt{c$5B<}^eswwx9s4yfH3EIf7{g~8j|8(p-O0#Snfh0@NWNIs;{4t} z_|eE2jcLoNc;zol4!VG$L$jn&Eq=oOIsE;?XH!?E>Cze=}@Z2As z|56tA@j0j?XI77VsZ0lD&B@C?lk6%(s5&@W{8_|%$0__7j0BiPoWhH_VKgJY19KO4 zVewLV&YtG`g}E!>n`%v+mogA?BLMELoAG$&D9Rdek-3{K6Sh8EiOVx;V5r19=JyJr zquUcUqh!%XO;+f?r9XZS_>ApC4C%4WWVXm89nQ6%khkbOoMrlv^|$vp(pLrS^o+eM%oU_qxvIo{ZwSMlVAF*#o-49@`W$Wl46t{Q5$#*hn=?6<(daAdnW?oI<@~(~=gUTH zNAfT%$#9`aQ(3Azn9Z{rg-qVWl>Ss&NpGgsNnA_waCEh;ppQ+6Yv@a#{O+^Wqrzx# zq%Y0=^c`P~lu2*O*NO??j27AM67^9@EW1kw9U~;L|2LMBXNjn-<+Er0n@r>RNU5NGyP z@9RQAi{E4Jp1b&dwgZ2*KEh$28g%rt6$3a+Pq$^8F#LLqu zL;Oz@Qn4t;9p@QnmhKV1c1}k}QFqGP!auLVi-N?96WjKNiGR=RM*LI${2by>BQFqH z{;m=V@A7`Ht~_18&GR5Vz9Y=%JG8nfikTmlQkBpI*Gu=Ye%&HT)$LV6>VD2xwmySt zuDZ1O+dDRGNep?YMo@)yFS-)Mxm3r_NLEjH$i0c_;-qWF(0XGmQ*)&9n~k4B^zyq-qi455Oa;bOGj zBsjiPBK4@XWR`jt79+;-Zb&nN-!)>*yB=iH5Jj)zCt>;4K6EB!KbVRSCTv@P(rcrs zHtH$c?W*Q}^$+(@&VPw>nVkR0J1GAB9AMp2z%=dn9jlmofKTyz|1mi__gRZg)Q!ZY zOaEcj)guUm7HzKN+~7SEXkm&u$?C>Zy|N8ycf{k?uWfj{M}};!Y{9W{{M^eu#J5uQ z=m^hsMojcU+?ky?n>taLX)B>#%PUa*Gl{nFnc%%qZK&_3NOP5W2BOb#$iCUio?Pd% z^ntx8dz3YnI`E8zq6)S4y^XB_#=?IulX<^>I4!8srZEGSVEeX(64nwS?l!t0$!dxa z>=TY)&fhhXVUjZ#xxtbiKl&+g%k^REz~0VOLOXR;vDa-_rmJ9kC<|FDQ9q+b2q?0Bo|)AZ|*eAqI#^!El27$1M%<* zzPFrcLI)1dVcRd?L0;S^c9EZ@Ke!OZKYNYUzwPPFqg@!XX9*(0jmY_!6|L+Hp_$S@ ztSf0BK01Aek>yaKwqvG{ylffuTD%JzW^19MpDt&o-o%a83_R|8MJ(p|b%pKv^hv5l z#Ra{@AzeKws>Md!{`e+>9&LuPCCFw`D5>h0iUzN~!?AxSzt_I0h}5`;cRKRqY7{Nn zX~fg>MampedKNPBx{?^}06JmojDgKtu)fBMlC$&?*EyaBE}2XJ(cz6PME=c2KO(_p$&!h zRGR7y^N)Gh->)}W99ayXEn&ja{1n>tsXzA>HelblwXpLSu=1CM=)F9Iw##pa^_L>fmT${ma^bx8H`ysmOKu~ z(4ss;>-}OC+(#P= zMF;e0rgaqhFO%ne??5^?=0C{rJLTotVPt$N8Kd~${Yc&|T)l7@5!)+ZTB;4t8T=S? zR)JPHm+^n+OlaK8!^`fnl$yC!%$fEBkIL;t+5b%7m{fxUj(U8)Zbn*b`qM0^ycQN2}?{Z5| zE2@mo<=OvKIBR#i9GtTj%ec#=GeDjuo%kzsl*rJs?KWa4d&~M-r0e-l zg@lAwl%LR|Z6!w~VO5-WZKg&;Cyf%@2d*X-(~ONqpYi&n3^vq!mF|``!nE#ElxQoE zN0BGOmE!2>rj?XnFH5o!7tz@9N!T5q4w=yH;xzkEc30v?QFVH3#n3Tib2}NEx343a z4Smu3PY81e@StO(3IyF3b=YPZCJm0~J+nf0GH@=0!6O$sxqBP!c1xybmvTiLM}PcG zSEN7PPoPsa3mMX3Bv#dPj*uA*Ine@3nFVy2pFwVC_9vZDVOY$c7eYiBI%ZF#bY&~_ zT-C_*M&+UN%QvX3?Mr8kx5Hp_7o90(vNGNC@`C!VK4e@QODj&eayEQ4&Mz!M^<5jP^-n|o7YP-|r;&-dH%;pH z2b1ErW5|y4c(b9qI9n-#Lbvd}xbp(q>{Wz}*|#u1RD*sEZ$WON2%SJpG+p6)JkC|! zkeW^>Ry|}}Rpdy|xE^PXo25dy2HaHmZ~Un$?d!70^EJ}r0v?A^8!QE`b3xr#R zors?lEZW#sq2F0=XwUYgdp@3|u(?JE+x-zQTJ*^@Z4WbD(39eK@XlDEy4WSZl8jxd z5pTxx5&JX{`!ZT+-Q$TjgC9d*tqcD~2ElgaB9hi6(LQ~iTbXqbYa5KY_c{^E6V`}h z4jyLL9GyuxdW03}Or~QqcH-vY6pGCohTFF{OX}szgcb9z2*c7pLjBh!A;{<)da5w) z+ZHg;0i3IzMTS^QPj2rMuaAsCkHA(aUd^V9KlWq9Bu~y860!Jq9UhcXI zI|zjdW@H-gf{3lQbYS*c+8e+*rST2$`5uTx_NS1TXC)rG=uhh0g;z0q3FrSjMsoIX zBu>$wZbd3|-R}^;5BswfDZRMsK%L63q`}^MISp1-q$YcLs@>%%(OYN-ecvZeR}#N^mAC&M7(9 zUXEv_X|U2SMp&E*IVWxwho*9mNPio0?HV$jc&_Le3? zu+K%Wjy5repBKgDW>j!Sor1p%7Mpu^qr$df;@l}85tL{E)iW{9 zz`O69bwBRgSaSRG1(ip2X!4Q{bTgbu8P{T<>SIkwV^*Oy(Hb=mSHP<{meR9zaK23g zCj7y)b3iNpyy5dV$#xX`D+$NvPojCd(j?XYhT%rA6*;Vzr5`Panf!aA!Mfy~k7f&ai4X)$ME`6ezT``SWuWmG}Iy&I`clcPNw3$Um>h&|oaozI5kC_15W`1iQBVmRCHA1fq`^QV6!73umFJO2IMAU!$ThgA1Y5mguGqxSwS7J5CKU3Lki z)N}jLX6Qi{Mhay9I0*0hE=IO+N4k24HmO`+fVsNLwBW~R8vnfoV!jex8OW%r#{`(@ zgwYl4r!dbe#+<2VU@qeb9sYc|-{S?2>O8?yKSOqnGce-9OsOdDjObCKNw5AJFTRej z#nyuR7@61?dD(_EXP-59*1U%4MeZPzDTDIK(>UqxL>>Ek(xz_uV*i5E!pU1x@rL^z zGJa1My5E>X<=YL|@=ng!D(KC^V+`okoXzmbnT6DDGMCpq34r%M&eM?9qEnsz;s%R0 z+}|`$eD|i3&p#Ow8%EO81%9OAB_}EjUkeP`g_q$tuc|=bme@?iD6Z=kCcaWQu+6 zCa~XEyvfvmx1=zhpVoJ6;`5SCR3EQ~zdpGv=&u94-gQ{|NKuy7cf@(?mR`o|vEKB! z?Ig-PZD{xOEmZh;4d0C~7uWH9uG|bKYFKd$kE2SVTsf3Fy#%=T>qY%@?!#7f4f(yE z4qL^4aGyCEw)RF?eO&{aGRM#$&VC-U-XC|0@{syWhOFc4Y0ko(s2^*=Vsgu*kMfNX z@OnPN8qDcZk`~=+oyqF%?L@zgI+Xgmke};8`+4W-r-db(yjh#~0Ar|jl|PM52*n`I z<62+pLOxNeInyYJ{MTjDEq*3=aPtc^$ED+&^bAZ&M8VT6jGpNW81Z`r6+PLB^XvtF z7%0)+^)Zn0%6RmrYhg{FJ&qM%R1kzCS(5yeX6=vXER&6lzVgBFv_H+9a$ z;5>#Eovi9C@4Wt3EyPDgAhJ4_XOGWHt_>^1--CTOA8{0YbB5EPeoAyQdJdAGN8xL@y?3m>Jn84f zq0+b=&zr_mMu-gk;&V54DpJ_+CK3ls_PVY*b!Jb=+hH}Tya>V0-cwQ6NkE*LDXzSz@4#B{G>r86&55NdIvG)+B7-51SyvM z|A%|#wPn?4)hkDu82&~0(-w$~t$ar8??U+_BPdDLR5He}46>agn8^WaI)3&TA}_|l zGwzERv?>8JEYzrWVlS%d=O?av`xOHUc8Kj==Q+E2F$OI6qr;`{^yYDw(3!_m1YTAWGe?mFWl@yunu zws?A9j?^UeBhoroNDV5AaOfiEb)LJ3=#zuV$|Q@PsjR20zH3Aavlv`FWJe>rZQ|dt zYP6VmP|CM^_%T3-Hr9T|rMfiwNl|z#cB9r6fmm8`iJkLO#^NU<=+-qOY1XA|Y3gRN6{5vD1!AD-s9NxGU?$sC3Spd-0Zai(M<3Bm~id%H24n z=1rAqoPCz4hao{bvC&Og>U@H;a1MDwk8_C}?E^^v-8Xc6lB4Peqv+088w^*SNOH%o z;_~2aXt~S%Jr>*vl`)v)Bmzv@-eOa&J`3UdEQOxBJZp1M{Ay`TJp*`#XV@pUa?)k= zIAMYCQQA}&VM7XKoj9tdPD@Nqq9gte;&=C>?y*WV%+`S1H^d1InH%uR(}+R~p9%j( zS;1a!A{~o*iLnW%yf0K3Q$(L72&_m!zScMK%ZxA_c*FBp;o6jKG*0X{u?BIY$BDlN zRl{DzAGd=ilh*zK+SoNr40(JVBh_^2*BBYm)X1D}-Z!VjTr2V5l~weZpB76bZ8~{PvrUMn3ov+x}T_hHv=t?6UE1QMQrSv z;gmG?mUQZL56OYyJP$A+3w29e5r6j(>o#t@V7$CTvT+Lcm&Tov8cw~6Rw>^XaK~VY zvm@=v*-3_vlE^h3^)hWT{cEBG3FTSV1nW3$U7V zd>cCF;nGkCocQF!^WszKb-4|``}yHF?_$aBQXt*Ywsdq$0nBS%rS46`$!V%SnmyKH z8GZYsr?@$zCtYz+AgRefc30hqtUpFmqN^cA|6Yqnjm6wu zr$&a$_rZUQx;UnIDXkmYhaLoW;BUb~d^>am$A7taYi{$TeYQC;nZ1I-H*Cd^-c7LL z-ImWIdB&&V0@kd*&)nVr*IVyHVwpR(ch#|iE1KjXZA8t|{ZeTqcWwU!4VUozvu+gn z9KOSBeXPU-a!({3nSRVfeuH}Bp%lHwjMPJ%3dtWl~XSzJ+^?gg4rKM_2==CWkhal9=uJ(+XLLzLC$X zGIp@)(x=!z`!%dLd5GJ$%%fzrX84@1hIi;L>9B)`1m*lctSRFFzHt7B`+8rvriDY^+%k;MdVVp@#-s|jA?x9E z;~olCbm_%(DW+#UWgnZXSwr4N;bFr7+$iL(GY?ZL-Jy(uSC=Ab&KnpQRzo(;g6hY% zLFQ{NQ)< z>Ltm1;zz>p1K2ueE#3UI2*-KPR8F=#CCD1k`i$pTJgg@II5%QT7iY`%TShnJQ-C;xH+gV zZzLv#n0|C6T9FgzH$>OAQ`~yoEKx5?ghj;o6+WKo|7yxM(~?d=F#Cy$E)lxxVsv4 zD^RBD=2BK(5DwEj&FGrN(VMlZ&&=Ow4ZaB?>yno-kN*N z=)9q*9;75#uIR)2ASIj`Kb?JG6&P{Zh+aK15dzH)c$vvYQGLvJbX@<8=k*&UeQu1T z9JdY3FT@hZnt&Hq3CV{hlgFK0+~2H68Mpa<>Gc^ljl26o)K5wKU?3?;`k>y4Gch^; zXSLA-bdPqU`auI}-}}ROmTE_%dz?UZR5Wy4Z$Zq;Kx2d%sTlI?S+^_D9PkXqrSGvb z^a$>!Xo;3OL;`1=-`RPPDNMS6FVq_e7Yu1S&)_LMzk?M4##Hq;56gCMMD0lvs!}zl zJuZ9)8hDiztlEt}MePX107=XKO@jTZh2#{x2V*={G2fN@-(T)Pk(xRF-LDom^S;QD z4xWF`RHK7u3xwAV#$?)JC~n#E7#ciVJ?dTn=QoX}57ye^l=&Zde&RR&jepO+Yuv)! zbDfy^DO7yaa~35k$Wy)h15BOUQ`q;wn|{RGVy5e1OpLapptl1t@&S^XeE_B%Kgu++0H+PWPvTtminFv>H#}U&CSFAaVa% zKYAFo9c`EAQlQ=?e&!WXRAET+e6FJUssfAu<+F4Pb&69{rt-o9Wb=K%9f>-%%XZ*U zuLPmIaUj^zAllHXL^n1EA#GzJn;$ew^r~Agj9sUIy5|QF-L+UUF^!)&e%g>tP6l%k zf3fR@qqwI;mQJYmp;FD~l21#=kvs3U_8wG(I@_%B#IMoJy?Y3)ntc#OVUCn>{U7?i z{lqLcEkla-RN)tQGMO|bp=PQYy=Waqiu@Tgl(Q5Dc^|N8+6G261A3;0xAfl&K zV)amcxZfK_&3tZn02ko7wFi>g&%@VNm4x3pqHMYx>FgXOCfb|8*6K6fTRvx+XN_rQ z+hB^ddWS1_v?wp87)8S`BB-Yw)lawOIm-h=toa-1X7MVL+_XqVrHAzTmGSgtx-s;2 z-9d!BhUDrN&YXAIit%%H!&-{F*oA#t4)N#vUm-AI1>WgriR(3kn4U7tMK*Q{vuSO)6k(rUq54%X8vp4hyk1no`HMP= z;?(I@yc|9Ln?>_;PO#Z?wdrrlD{Mb6E8MiU#fa}?$UUMPt$vh@C;Rf*jvSuzXENf< z9sRI()>a$~elDH)^$_2Wn-Mqiu%-GFX_L`#8nWdN)_T~`cZ-eu%o9PaleNVY!!E+_ z;Xui)O~$Mx&X*!W&S2sy4@x*BPcxm>@yaC-^=@fWeS0mcEQ&x@xDs7kFqYm8P^IZ+ zx-@pm(Q>uN!D!5mBmeamA*l12chPyAToVN2fBy7#U=!arJ;KnZM$mtjkDZpiXk6cI z;;3a!kUi=qYNXY$vR&U0`>rR3-qfYMGuG7p=`Cj28<5y}7HrmDBr7=4rTNz6^osZ0 zTa<;dHFq%JU~iI~2%tq9M^MVgcBXx<8h)EsNtPw_r+;S)vEq9+Jn&YO;hAU+CQp8j zS`?&vLP!(-K;z+Z@l@Mm6cAk5E6sGovf%`)Y|EWN`t6j0JFo|C0B-7;Msub(81v6K> ziWT$cL4U$pF-#?&&GK-dA=j?4+nvE=IBhrPDQ_gLWq$Devp}kMa)7VsM`RYO)4<#FylVe2^6wl8(06RB4;@FMmsiRF8F!pO$EkZ zKE(d{Zf8NSCs5?RdN!l~U|Mpr5)B7q*f5)1{A&9Pvn_jJ)Zc_gx$Dun{P8q2#+wdI zT}Fz}?P=ZYAPhOenbYA`w5r(`o$Ejajn8nT*ATe)TtE-58&~e+nv{kT zsB)(Kw!2R-L7^1?z1O7ig(|d2{UBT?$FQ1rhGg5ox!TRULA__vrHOyx*CtDol;woZ z#5mk-38TQfy~zH-c%*$#W*=vh_^afzbluH&?524JKCK(YW=J1nTe2lN7=*ADKdYEa z%0!y{r3XDI(xb02i@ZI5O{MpN^F_Tr#c1vDWYwBeBu%rYP}lL3kRQ#N+mruckV*ji zH*!1He65lm%+x1)+wm~I*OPuI@s8HR-$*;rjb=^>pr`M=a2R29XL$prx@|^IQaw&D z)WR7)pPa7v2UvFrU2+b1bp0NRerZs}(w*YEp*^T6){LT+)$wxB17!PbW!78tY5rRY zP1X4e3BQBi-nbV&$L~SEy+0Yw>P>47^`pkn50Y+QFTl@UlQd0Fc-svg#pHgiA{qKedav}4c=MJ)Ka5*gzik8Zvf?p)% zbn8W;**9p|y2EtUPB=U-kjz^S$}Qdk9quf7Imaho7dgYr2D& z=7!IlOL#=EP&kQH*{8I?8abP z_NG6b2|0)JV|&m!p6eSka3pT&X5no54n)p0B7={OaCH}Otp5l8jy{WP!zSbC#yDZ6 zw+mTyJrR~PZf1e2ZXx@5IjeN+M?niEw14Iu-0;z(jU}&5nA2iQi<1w*|KD2d`;ji9e=vFpT2$k# zMvKZ7#M=+l=vJ+lINq!rgC1_jye$*RRyv$&)~kvmf634fuTIW=9mk$et-(8s_i!P5 z@zaJ`G&kubuxSZ`>!WR`bK|X<5j$HK*!EPw4Jtq^227clk3R{dFxirgj$s z4BvnsYQ#3{<@nfWMJ-2lQ4%zk+U~DI@!2IbOM4@dcJ^Zx<}*mX-9>bX`GxV_nxrn) zCD?Y%fd>4Mr~gjs(WyI`blfq7jvZJpsxB|a*T(-~l)eD%NjY$68%Vm({~%_z9t{{N zPmOQGXk^u7JePBz312h$e5*V5uRe^?C4PK2Wen%^u`I|T?f*MTp4xg;=~fG83k~er zd5Yb4>MjHi*TjZy4S4!pi^|QCaYk&vgaM;5)$P_}2*O7%2GJ6mjM+ zcixXzrnU-oNoOsoo8e6sM;@$GMxI*SiUsw*m3VSTp9-3Gvxm{+XaxgRU;2%r z3v%@Kl)1#<0;7EvPW)DTodqAygmMrtTr+@HYQDo`7Xw;tp+ZMBEztOpyGA<|r6%5t zWO6s*jPDLFA~){b801c)=L>Aa-%{AFUWSUi!K9hJ4SstA z@#RpO_|i5W)3{&UeW4b0v%4>K%`;&R&rk zmNcRDQ5c0j`pif2o1B{8vTC>>k+r61f_Sw!5hn&1(Vi7b)I0fsV7uLjdDMcSHr zWLTF-=ZO92KHn8Zb#BChqfQjndjR$*@EmN!Y+55x1@o+emGQTC9a?S0=6L&Dopr3SL$uey*Zp! z)7_lhYB$sIk5g%$SB`kDWe=3%DQ-yBD$?RoZ4 z&4a=_k71d96O&imj-!Rc$^Bgg(`nnxVp@-2>gy-yv$!Ycx`^1s`QY&%rcrqR0~L>I zJ@Ak}vo{XvNs~5iWzUkrkiPjj)^bj*$8t5Yvf~-36?d72vJp+ui=_)S`qbH(1#jMY zU2OW2pJmdK6xJX-*&ahfHF}alLpzLB1Cb`bAKUi6lGyXS`7Wn)>^EOYsre@mb@LmP zOBE=iQ=8UYEW-0oY3x<6f#kT`io&1K|8aC4ZaKd1A8%+68d9kw?NX_<=(*l!Nyy45 zD-wn5>`^2#LZx9vB{LDJPf0!Z9a-5DA{AMYC@TrS>-+ly90$j9yYK5d&-eTFf`?iq zn<(m1SI{H)raB5)e3o0s{k#KgoAJPO4g7Npn8rvqF=^y^iM%zj%|CBr-r~W++nJnA z%$YEe$$i)d1K#5@3#Mx)UgK9#D<Yon ze;x-lhQlW!88cRKuQt!DJu<7q)%!Q`ecfBUPCtjx$WR1n$cRH`cu?q}o}%%pB9^2i zBKqemwrYe4^~v<6A7}Zysi!d|PN_iorzx<0Voa}iN4LDmkLB$j%@*ZlLw~gzsXN7$ zP1+_Q%6h;St-g=HXSU!_fE9(9A4bBGDNst-EzWolgf1f^s^AQu`YUSU979cNK0HwD zzqtb9?qsOyO{0F}`JO&hDs<$2!*9b5Ec!E+~Y_&jA-6(?#dk3*8>$t}=?*sCUJY4=^8FY zr$JNOfh;a9Alc`pbm!DB+;-f6a2d{kZl1=>!}vM8Y$ujxY@))WY3O&M1InZSVo!Px zO7FT3^0>@olls$8R*_Ar<=DJk4si{I^|4b7%JwAdkqx~pTnnq(zdeHtI4MIQm5@`$zoOTfjZ(LJ2Zdp3v-z`Ef>Z9jY5CXQi2BHVnpSh^!&&wJ zcZh%VzgS*h{Sfx2;>07z!jb#Tm5vYSfz&1A=@Lu9cGD%aPkR#NxNE~Bw1frRH4x=y z|H9e_`-BZYPvFgYAM*P68zD~}NxZ=O@BGj8_%Kck822B{+@9mh*Nx<@dj>ZHe98V$ z6OM$bk^B)wT04z*m92WCA=Hxcx9^6S?+NqgDX959iV7DRfx6#hBc^UbV8?x^8}_4r zi>f%U%mq*7YMIGyH@e|rf?%~yq}))VeL1PjuDdfg(|QG&J`>@?-L}tqb05y*9^@V^Pw&e5qTMG6Uiyl{$)C!W9fiMr#w&mpE^%{^C1$9q>={Kt%(OH85AFqvE$l_>6ofIpHf zp}R~PYFAF6$9%4G=fV#BFv@2qH=2l3!!?DAOYB+gyDM1vxJI~@!}B4Wi?v`}u(!#h z0G4GLMlDO;VCSFbDCJzAW-)-W#@=K;PFCoPW$-cBf&51kX?t-IRAuyNxVk(III*6s z$eD}V+qX-97ZcreABTjLdq@fjC68~Hp*h-{)VS-WGA#$+4rox|>Z6ExWP*lKX&4u} z69F@fDRRYkIKJl&$M1J>r&7RRnP_-?{43aZ4W+_C}g?El{VyODbSsHw8nTjOa>^9qs)%ndaQCVXE=R(8#-{BWpb+Mgv==ef5^m>po2| z)jSNx)7HEvl?V6i#R!gEAg23nMz3L+#WCbdoCgq&R~td(FCivu0rPrbBdi8gjN1~ zkjvdP%Xa8u)bQc7~_g`%uTL+f410xwNY# z3SE1qWApo2_-3EQHi=$PebhsYu;zWhFXd$w{!(n8>OlXyx2!X^pwkl*X=c$JN|fI& z>YUsS$6GPR-G?76u+^MByPV+RgG&zU3BMYa320zV0k

8_D17ES@Pzxp;`@_~) zABN<=U-&ZcEC%sigxu*~L~E*9tC=&!*36*Ei7u2@Ksfw99>;H4Q026l*z43=aH@@_ zI}2S%VZ?jhCtD5gr2Ft4=`7AP9!KlH9N>PO6%_nV#4{Nws-_!r7pe-qi4?KsR3VF% z*XH?a?#sEGh$FKHAfZEn%oZzBPiIr%^Ltk`-wUGoi7J%hIu)<8-I;>p2yxS+kJ6E2 zPO=cMLbNS^w3s8&7;_`2ubK|kB3ONUm8V!l>Nm^ddD%& zOO~1TQ)FRnBPnP60XS#&r)0igSY&R1FA?itQnXW$;rk$^!!uyXGtX{2eJSf*Gn{O6 zsD{t8WJb(D!O39q8YDvb?Q!(q#dBLz&9LLZaGpkyrIfbo*!fd|&tv#}AXbx9l5)gp zj-Ak$G+3_jj{&RS$_!@U(1ooyVzZ6=Ct}qj!^em zSukz5j$3YebjmnXsuw?t!Y{mI>Uj?^-+Mc2>uo@#P8qQE+m5w~-^8Pp%kX@G27US> zN5*;@;su_8O%Cx8TX`=(ptdjCmJcUpKb+qs>jhPlyT}exp-1lOf<`R&oBvj)d;2Gf zvI}CUo^@bq<68vG5!s26dxWza&Cu|*1drG2Q0HJP?puna9Hlj+Fh-Sv51qhg^Y20r zkF7{`*&|N9W6s7-7))Q?v{=xBv1F~b7iR|VphKbr;^QNdPE!fh@(#?$9v`tmHc{H3 zR0wf}7sa2+!zXV$T2PS4`KK{-W@?=1`h6(IC}rAIScAq7K84ClyKZ{GJ*_d(c#p&sQagn_{bfoTmA7_RrSx>r zRon`DhNzx~R6g}9wEk5>afmt`^>nCivnq*^yXfS0B{cE(zxbvV19bLE)pt2y_s_}H z7SfF+!pUcrC>~5r?gp17I*)R^`LFx-{4*E zM7oCkqz>JKDWa~sc)LIEv_@W#ctuMkHPd{kpYl1LQ{${1?!RdEbH?8Y?k@(Wg8@)7?_Z^W1b9HWfzHyGog7m3xbZYU}B@v=N63YVcru8&lDJD9G+%*cN>kc4Zn=D$@sJ z6F7q|IfiE=ITvbm3M{SRaz-~GezpP5Me`%geZ z`oHp;Z>I!C9|XDhPUz0rBqW<;V8=rr8u?!_l5{<&=k8r}-E$Mw#H5K)#^Z3`=pQa@ z-9z4vr?6$G7Y&^Kgu8Q8XhiFKxGs*RbB$pr=DydSClX=ahij*LT9_fbeX07lDGTbenJMK8~(P|rvD^fm3GG?TLg#lm@FKcjN+4GY`9d#<#2SrE-; z85sG5pWC^oXvX+rmSCBR_P3o<*Dzh`Ki?8}=Bv@?Pu|pTbUVIQE6_iOVWe}mKjwD@ z(kOm+s2Grd?uFM8KEMcPbG@jss|!26@xDVpTZ~)FJ=foRa$f8SG4HqnB~)5a<_kqw z9Vs*sJgex>5b;qJ;q+&9|jD=er~u4qrn=fG#Pw zrwUu$nwVaB1c{H|;?jg8l5X6a@W3bz_v7}#>UXo~G$$5zgShj!TAieKhKmRHcO(6u z0phQ?U)URBh@vk%FT;I0S?BD;9g3H+rKcwOz0DL>EbdERM{?(r-b_)>W+lbh$&lBh zU(lbgjm#B{!kb3p@39MT&DEuW=$T?XVQdogcf|&5)fqZ@&o{;v$rK`?7zJUzbOm&=ERRJ5kg-R+^q#iU*4wsdDXQ zEafiUK}Yt|F1_`nm9j@nO*O?VGd()+;5>wYY>0Z}$@y<9ULET}nm$)hw{Zde@f-;~ z-@eqGkOWU#XIyYw3$uazIXW^1>XlZw)OZRWyWXOj(4aWwApJ4q7n zh$Zv$U3_qF8gy(e3zCe7iF`FGwN)dYS85^{D0PXQ`rb{m;Lf*vyo(~Plx8NeW-903A8y19>CUz9y{S9*# z=iv2V1(f*Ni;>4xu}o*XG9Nv8ifC6w!{g&n*(Fc2u4`cJ+xP6{x6yRuuN-9$;jG}s zHxi|X+>hBT!NulXtj%@;JoC0fcOd8W&v*ulGn!PatU@7)Elk5b5=wn{Nv4f=r=cEW zF{kw{7O0G)_f_R!^3J4{;7&`X&qtJ}J{?_F0t3l>er_y6+n5xL>SjgbKc2$ooN6TQ z`hwT3=lJ<(H6~XL6g+eX5f;jdR%!3q#o^`n&)*EAx0=x({@pLH{12axT2jXJqsT~g zgyS7!dfeti@wbjif?WzF?;plO_53%ev}6rl>BuPc>SNY?p$+RRC!q0BFYb9eg>_xC zIXk38nDxRPF`Ii*ul3sGTcaVm_~_8`4Vq%DN&_}MEJNtmDU@X+q2^zAg%SPRaklIu z-i+EQku5Jkajhb0O)C~49I009FkwWECqmyX!>+%kJpaRez9v@G zH+Cxh{U}GXc~5vv!BJ`Tn%DfkJG!ky#S_IqzRSc2}CEJ%H-8a3S3<1@rvBuO4Y#+|c7g`Zc@xKxSs^uDmL z_vLuB+MX7K%h0}94cfm=g?_yrPj?1{W8yjsx;k+eVy6FO3JGUmG{uK5`F5~BkAtKu zpTr^ocQJoR4;r_z4H1^QIR9e{Tc70y4nOWfGL!TDWB-%%aF0b;?i0NE@E!B+ zI?y4lR&^#1&4x^to*=}ULxivA&ta;d}QKPTYjZ%Y?>9&osEFIxKdEvz^b zf8~099<6=Ak}XG){lj=n-MX4i{;ELyhEE7R-HfJ@M)Zj1T?}V+!!T1zx_;1x+CIn7 zA-ns0KWa{S4%eajOH!_1ACJ4Cwv?mr7xT9AS!kXE-mdCL(|%oHV{@$7S_=(&9QBF~ znkzyfk!PgSpR;z$X>`@xmx4)=5=>R;mAjTCnz_)VOS7=*=t&lp7Kgx5lM#~aOF{eI zVaiSg`V#UB+UzsCEgb-Zkk0b0YuxBbj58i@mL+fAoj=k`icRVLNq%&v@E|uB3;#2w zeTxp^!GocwK3oKAb_UA*2a@Sxo&_7<2>AWBaSopsD~e&ex!VqGhEPXCupea7rc~^`6f3?DuV?HW|ofMN^lwskz1{n0J z#7}wtPO9B5Y%+GDCqc&aa_crG9J-6yN-ns|J>d7Jd(u^{0-U{MNC^ffuupvgLIZWU z>(P@QRCC@~_IB28uoq4bzF=gBsw7tHu=M+jrPLDNfo>&R5m2W~N#~EjsVxzjca8{J zn)A8e+lapDY0#7d-lFZf?qsswRct8k#Qy&-!XjflZEN+Ty9ZRnlCTCGoY;=|URzjE zMJAr}UaG@*194f}eE#n4MDs6oGMj$OTmOzD9gWw*<=PognPyA%gTi3L8AZD1!|4!b zHMLlM#>}YG<%3h-VzE)AX#9K|ytp@X{{wd{xTVc*_V+~AMprD_$lqTF^4QrlIqdkZ zDbD-Z0ONjc<=zR^e1^fFHSReYIIs^%cPG%l>R{Tqh{R^zyYL+N6RmyEv8y#l&_Yg> z!1Mp#w*JPTGb(g>R{&k}3`9e?Gu4;4yF(#4nRs zozqa#S(6O8fAi^I#Vt&^@d>v+{lQ>$MOuDsC$>j7GSdcA5@H6@y}s7i*UXtkE4dfQ zhu;OK8VUNjtN3iqhK}j{z>;vz9~-ilyLRldK z65EfSG0uOk6LII8DFsKBU~1L^B)qqx?w1{CC@YgJFp^{AwKn3(^q;tO{;RZ6bvPMF z8`y%{3yqi^18Fk?9q} zfKmQfp_q;xnw--Z5`&w)_2|OiVA^?4fxd@a#aWZD-v9i&AZQH|S+Cg`EpJBa^ZMhU zLjY~vyb?ioqey#J61Mp5U{fk1Na=~J7{j@?zJ`qbym}p1;Yzw$3N)jCU-D|-LLNUD zZM2IJ+rm2jKN~f#7xzZ}IE&}J+qpxd18z^X>1T2aLX*dkdBq?sYO|tsR_EYS?Tll4 zDly%c`)-wt@MiyC$L;CZ}L;2NHL594BmMmz7&0LgHj870H0dB(<)8$ePjd7@GK|)b;cVt~ zNAmBZA^J50BZ0eSHhJtt!K*=--FgJw4pd^jh9ljc%Gv%W|6x$ZH}q^Q#MGUSS@p4a zAtuw68Xs~~_!(uSY`TU6BNpRpfGNpxpNJwA;NpdTR5U6BCh?nKf6#?orn}S4t>Y+N zcaG%I=+#JbQl-`#0hC?DC~j6K&u*9Fj>!moE$%^MCWP}ntsTNA*@}tbQ<0TqMk)Di zsNOS4cyL~c{(JRUI9%KWpdaFz7CU?b)m{;GxI+93J)zlV%wV` z!nKRhl<~6>I(*hS*-~Ek9=%uiHOmuWF6S|pXI(^U%pdu|fuzX$FkPKdv~UA=SQptaaYrJHEmjd#k9aXQF zJD*`}A*iSiXUA}!yP14Ejo?1W+gl*Ep5PvWS!VpZI-8W_N};^NmFSWTdEV$wk`MP# zSwEFFb~8a%V_%YT-ay9hHg;!a6^6$9QNmeWd&{~Aq|3`@M; zNM5P9)1;|2?9J|0eAVwoYQrx`a-*kWc=vh~@O@{5cTYO(tUwd)2hpdZzBFl?g#5Sb z(h0+G%v+s}>wzyZf95dkdBVM4hU2L%R-3fPw&3?dQw)bY%I_3P9^MT`-({04pxVvfs;XXm5lADLN0vdYkdIWq(gnXgrPhAv%&fW5;8| zWEUEpu1E`6E*7pj!`2Rw5ffG#u_-l^g(-Y?vL)~%3**`I^}NFs^JyEiOV4M0ua5iw zESmpMSu!}}%ofh}rxW#U?A9cEcpvtKUi1<;t{g^t9C=o4V^6yI{4X?Z@3WSx*|4a$ z5qcDPk?i7sY`(M}b_!GI$DTGAzICVMViy|E@AhRpV<|iP3|h zXuwMx+t+y9tXLFAVfBk(PKmP&x0Kz+*47ecpuke?8GMUXPM_kFve#3N-d< z)6^fuP^cM?XvG-) z#cy?}*ein02WpV{<|}v|Y%X-&`U1~s(?qBB>yi9Um!4<#Li`L3;U~{S?lqo({eBTR z^!ETuJE(^zek$Uh%yL|RuggC7uf?7s8=5%b4~&mkQc`;~nYH=Rq-9=W%dK~?t<|6a z`Gf4VY6>!z*pYR(Jk>a;QKP;b9a=DgjJ#}kcWofO6H3ug=!VGt$DuujXl=2`bRW)P z5&zC2arbx3IoXcH!dq}#{ff;{52oJDj^3Aw?O_`I5^qXAanD9KG%riQ`I(=QaWe<6 z!mKFqaW9g7>xXe8ZE5o61=O|5j;bW1us!M^`aJAI*S0J}cBHGIHGT{2U2jMJzw5c{ zbsNkaE8x9JTio-G`^xM0V_oi2-d`_4bVxq(hW4b^I3)_^-Y0FX95&*S66us{QSzmO z7^tQXBMW89;n~<%ZCXOw%U^7)?0B*>R^=SkiMVrb8yk9eiFk3ucWKRx>8$NtCX&63 zndXbr_{n$F$(bA3`-XRH*7ZR88`_PG`F<$w;s|y)aRk*U%oc|ZA)Q8`0U> z4l8r+qrx zMn3ueGRr^%hCwhaU9Xl<$luju9=~)=_z{ z&(PiY9%?Ng+2(EPg0+tper8?9K|2*X)?+B|8ii9p@;d4{T#-0rA0y-x#Oyoq*yWil zwhJN5;-m`|DcZ5u2ggy_zHP|-x`pQTa)sfG5p4KB?#ogv7d(%>z!+y4sbBA#{QKd> z^SNhnHEkd*aM(*tmtx6p{t?kWDG)o0Rmf)h3C=!DL*>UIB;WZI&7X`(J*^eYCyLr7 z378x1NO8+T@N}jMJgUZH?i|iQUfUN2cjWPQ&H*g);9lM-mb7>59<0pMV@5ZRu>GDJ z*tNW#Fx+EGXZkD9xh*f*o9sQXv6H8JVkZ7maHbtoRq5FR2?F>tA|pi7>?a;XFP7rV z_A|(owWdBi2b2~F5=yrbjQE}Hc?Slc-V256+;vyjDEM@nM%7CBP~?4vjqRr~I`9q- zoYo|RQ|$=teFaL6>U>u&Pc3em+&;x#taZN9hFggB!F_?s}gU_ zqxs}k7O+Q+T7DJ_{X9G0!6H{MIq0QWb^DnkHWvR&3`zebNt=JUP#Zv)Z%!b)0L{tzY~xC1ys5ndHDgruJX@oC31bm_@ahQU~T>9>sb zDQ~8EJJd)S`IsSf5u2XI;dH?o@%h+OEahH*YSk}el|eHod3ie2-fSgF*I2}dPmy%3 zEEdA8YXq+y@9=d)oG_#6JYMp%Xi?@Rb^eg@KAax?7xN44xA?9)bra0^S>|ZoI{cY>iCHB&p&@JxEtPF%^G)PX+jAHE z>c65Y+L^9j&xP!a-ORjT5&LDxT}oAN*vU_Jbg1hYz9^qz?jKXpQ1BJ|-tNM{>%FPw zj{!BehSR;efiym1J*~>LqGIiF(9X?+nI-R5@At*Sehy^rwTZfo>`#&BxD&%77*C2X z^4+MeD01(^Ov~dCe??KkgU7tveF3-jtCQv|H5xmG=Vv}|VGo0~>Cp^Dy4t|chd7Ps||s$;uSZ{gv{gYb2G0uPdoW*QNnfZgU+2?s}8i zkqnZxh^CiA%f!%JXWqZ!4!V*YjHo*WZRbGRl>7zfxPNNI;7;t_wUE|khoOX7Q~11C zeGbIgWKrpOkp65(y#l&aWKlzc=YuF|zX1S3~%x{FBnvZ934Qx-$ z60t~qTlvCYWzyTuoQ*$hC8r)1!`auI;@U?s<+7?`bTcUIybJBPn29A_cI3sq>=#p`U|h?c_O$`nz0HCel^gKc{w~&@t3X}qN8Z1C zhOHsLg<%mMbT0jeaCy}rW*1e8&z1(T{%uLQ%iL)8={l@2GvmIoWCWc!0HYccDqZZx z^Vgf$VfUA;!kuSPcs}a44bKYO5zSj*j85fpd~vhHai`uCta2K@g|X0l^I6<-DF})^ zdQnNL9KF2qK=?FAg`P&(h{H;5V*CCu?lttM9}&Yz`-7fP93@L@|ESTW;qu=5-4)4T z>`%Ve9VfQcN7Lg`KcJBF7VgtE1`S)5THa;gjfZImF?@D+k_)%S;i0p6kA4>4Y01*^ z?l)j}C8}KI(OYPx?G&4=hC&i+LB81>S$;EA#KhZB>jVaXQZ zwftB32VRsulDmzC22OM)SBcK@8Or$~yQpXWEE4Q<#8WBTU>NlW4U={uv-uQG^8DVE zZhR-xszL1^HOZ!XB*iBz#^O~TG`(dVYIZB3xBq-Z1`MIecBk2d_w~YE{Wzp9YlQ!0 zJ$gUoEX=OtFpWp$?2G$UX1Q+->=(bqDivcoFwKTl-rf!6YMw6*x`*35i&XE|fpTw4 zjLy?1%`elby@K;yf6rQ(;empaxyQMsL;=T+6WKZKrV5d~i zKA2LHaxlwx6S>e02qDjSAoLro|7j5WR1NPI4Q$+MO4eRIY3{}-?m!>M^3AR2?4Bxo zb<33uY*~Qk{|%sBDnBs1a}UPHRIcCH;iiai`d)?f;mAE9Yqp98HIMN|E``jAVAm zQ-HD_)~4HGXYE-bx7mU2_RT`V4<*j~9zn_v`HU{plQOGh#ck8KV@KRT`r%%LG0%Da zBJ2c`X%V8$oycuYEuNabfjH+Ozw%TJa>7=?YV}EL*CfE z#E4{`+EMAn`&eDnj|y~ohbKH6eK;rAajrMnuLz|+kE@vf;JrxDHl-@lF;w2n$aIn_ zHp_}AT;>7x%bH%xRN)x!R{0lx7VA@&V^OU-X-DzCe(`pp-z0frhl54GoIN{?LC6Bc!t>T;Xee<=bTQTm2`7T9D=qpvYNaDE5E*C zWrsX4cI6N{rdq`&eIFxq-?kAsDj)Hz+MG^16~VWmi}?qRXFqr*eu`TtT6=WjGoQVh zIlPhV+_M>D)81i}aWO6r(xdG&)OhiE4$T{DL*xACQQxI~Xhpz0^!<{6+$C-BmRpV< z53Q+pc!a%L%yzLo#crB7zv_7Iw^*=1T@Q~Sk2_Vg` zIB9N~GXmEYG8;AnADX!%ASn&kew)!XKKod|eJC4cyAi$W+N6p<2har zNN3km)C~0}X9;&e(tfy#eaLuTE>2AxfSMsCQ0A=kJE{ z4eitDO@DJJJCtFIGw4w60@{+wSxvp` zu(M}}B)RSjbWR-=!%q-Yue;K;5kJ_Vc2m5~_r<7rUYOKN3s!g5Gp#RK?9${2;pp)i zl;2K~hR$rjt{(m9iM|reH`1fc8zj@Q;h>ZLS~gl{wJx`^o6>(glgM1oz2 zRp5$}2wVC*>?xLwxrtZ)4={W0M|=-?3h(*#LREwtiP|p&+vystacBt~V^v^r!;%aK zy3&`ykFm+yg5JfYp%3p-_L4WItwWvZi$MU3oUG2X>!+~tP7jhf;V7KhIfPce)Z)(J zGMFtJfRhvZ(8gJ(apTJlT>A4q}RUuJO)P zcY-hHQ#fR35Do5+ktLy&wK|;qs%7a`)ir1v)q86kZ^5d+o5V9AA$)#lLF|_S;sYmB zlb0Wy{1#D&T^Bp&c23f@Yzj4P4;BubXu-pTQ_?ZQUEI#KBgsw`S~Xmko@yo08?D*= zTwfqcx9x=6s9!i)y%`@C9>avgqiEmbKN!`{xubn_iLHsCZ7OS^RpLR0WiwDZQ4Zgg zBM=!eglDC-GfgWfR7|Goc=a z6InmwO>mjc89mh(@lmj&?X}$~L)#Uz))>;gyxFwG%!Q&;GGKpBimqry>fgQ=Iy&A| z+IK0bt}~?Rf;t_GT8S-xGjK(_o3P{RBpRS~3`x)9sNlu}Sa81UE>VW(OS_YO68~P_ z{mM2g8dLO2J?dY!hE!K>W^>cbXy2y_^w@7N88$W?BaPhXv)NA+x}?IzwS+YXYSETa z6ND;_X307}`}*HGYuTL>C{wYexjRc)&An-K=EVf6i1~_=ek!DKY@4^d?=Vt)xJ?K- zkj-Am-e3jAc2IpijwUJI#PbK%bm@R9_v;#SXSWV!&N?A1JmWwPFEVju@?T`{^&#O1 z=SFsNZnBM*c)lVY_eMHXu`Fj*C2%(FgQIXN34&*?8@+3M3Tbsc9xtzhk;Yvpes~Jc z7I|@P(msD)^C$QIB(QDcOqJe|u2x zkPvG7p}@P}S$O5!hi3B5`AhEgxR?BiscyQA+9dASNN^yP?l)m`auckBzKVNYX5jn? zHIm8Zd*O&F!q!$53RL$K3-d0(L~#pDDS(=7LB72ch0kZXlO|e~G>7(OkK=mKF8)0z zHs~#OZ(c?3>^{Slcf%5|Wl67&uwnOFhGV&15thDFqkrSnFgjo%&5oZ*)S*JjvS)Eg zX#fjr<=)pnkz)AW@u=mzSi|-wtVcsIJ^m608~4Q|4$;E$PuILJoy?MSi(f6sw*Nxj zr^A91e=m;7w4tH@{^0X64LWJIokl$kBhS<2;_0L%crBwyi&Q!1=TQZQB>K~`KHo7z z^Dlm9=+mq}Q52WI1yX0wb$-q|8h4+0r+XmlC{fw)0#>r

)*OElAOC#W~T87Hl|+ z6MZ_^S%XBjddvtq;=de59Ua(SqD&#Tj!Kj^t-@2k*O=gzk4MvWNNyKrZ>7#66V7)1 z8a|IU9`8$Uey+gWp1Yx1&AVJ0o1m|2L)I;;c+ZHtcAv@9?YD-|w>tzSg)G5tyf3Z% zT8#G>me9g@5hr*CqDWnV>?idgYo}v~C3W5hvZSL`yz@QN3tlY|WWrfDUg~A&W;#~N)upUfA&}m6o|Wd$wZIgO|Cp18H&!{0rO^2YVBl^={)XIhS-*`9 z4vI!qX`Eo-??S!_<4_(WM?U6$bmPQr9M&F1Wt>f3(>DVm=kI0rD}vS|Z%iw=z`H1Z zm=V;6j@W#}<()NH+e1XJ8*L~%R*vq{zGBBNcaoWFMzS-%dM|!bgrNgZutFsxs_i?7 zY@$UPT=vb!pEKj= z+xf{9oSiMqc-eud)-HIgQy2V~tJ1_nvh<^Hf#{IBl4p-)XRC_C^ zZ-~QxHk=jn-5TBXr&0R3`BZGl^I*%K;O$RmmRa%x_4oFRdk+l4$}P_HDpHekAr*0D z4QGt+w??q;cNP|r%kr1)VG$Q*3V|MX5gn>5%$i&a2Rmc#66SYDsS)MGB~g^jQrek* zMEvFu4TbiXST{BX>(tJo#vz3KGdP3VK#P31o9oPX?xb76@b`@~InUXQ;t}>3IwuA* z9`XLc9U1n>_?BSJ_w&EC-eTA&erA%ng1KjYNVb2RO1CYpvb*gGXfN!**g3!9KKmM* zXng=Tem_Rf)Li5`+Ebj09&PB}#18e*qLOW4^m$-^`tdZ3JBN1Sz!M7^JbxP!WhV%u z^`dFZ-#+xU@C~+=M#FsKMV=9s6P4$Tr+T#%Tq{^d>HqSvCb$C85?xvv%AJ0J*Pz>x z&Ftb;X#KUGRNS6}4V-V1$N7hmp+BK`xKNmQa4ro08%5GRibN4Jp|&HAbu}*#jda$O zD+bPCneInmX{aqZ?3E3tatn%>AZ7CIN>HpCO)W2fb9R;yont##e%27$=C@Up9eM^^ z94aL1zP)9aw@e_F4@p?K$d)e8kfWw+KUv1XSY+h)6Jiqe>8{&CSiYC1y$wU?=#M6R zt+FC%Z_roTjRB{|^WUi&uI6cw?asXw)e5Ma1*&=S4fi`M5I*NPbRCW$Pg|A_d#(}{ zrkav;zOQ)MP#$-W-9}l0AwIscqsiG$bjRx!ra#do)4BP0&N)vF`r14*U{C0Go~=Co zl6C)h3C-LIrKcJ${4EJ04F?&h+dsiN!$Xpv`P?CVD2r$860kW>u44J>b!d{)p%F#u z^d;q)@V8Np8a5k=D|D{FAX*C}`0Qugs6cx9_^Dt}|BO% zqtapEdO(~q*?@KMyhpDKo7q|Qk@RfoE@<7@Kuwlrd@gxX5~w91yVwFDZt8tBZa6AE zw&MXFob@DY$0}sy^rh7uJIQ6lDl#ZOAifR9K$G=X5F9Ki`M%fA6F2wl-Z% z`Hbedi^%0n8hV_vq^G7^Fi7SX=U=YFX*P*s`&nbsx_fLl=Wd>uE>AOt_ov4n5-?28 zfqkBLo5ikr!Yp2@;%}r8i&a_*>lL;Qljb+3KYDz7Cj==@lRQUMuqCopmW>l;8b~7aPIs8ZzUS=JxDN* z3C5K90ThwKJtLQQBf2<%>5f*T^9p#I zF9jU$#H4EA_(Q>FYeQxU|R(g~&^^PXqnQ*`oCv&vxpq>*3;>HeLC2O@2#u$3ZI@Pqt$DT_$n}w#q4yZLug^TZ9#P1B?oP@ z)>FMcLuSKPX`)@a;AePRP|0qBzICE752a9Z@Fn*rHK?2JNaxLWQ2o`lRD1J~cpRHy zANvnKG1!twFzNjGsHj;cFR_>n{iwU?Qqsvjm5jixn| zJ~I8|pIGMb;~4U%3v8@A&6mkSkzXoPve?3k&+r*;<6TzLVoeE`KEi5e3fsV4uDMwa z7_oXUSdYH6=7knL85Kh9y-dhCIf^uk>?v&FJjCtWi@`t4X}nu7DysVbA4lgMmgD<| z@sK9%rJ=MXQd&~)eO^&Y$w-Bg8OcbpM?^9rWkt#sB1IWd@AFiY5m|`_m6424$o9Lx zzkeM^4u{_NdG7nV&hzu3j)qujwlSc;i`tQ8&0y#1*I*E4!$FlN^I+V6foyOUu!Q|g68k)s9vtIAf9czV|H@8BmS1=`A z=6REyLD*SB>}s-;m>HNLx%y-g>ngty?j0)3+LE($3#}+?=jHlEl70%E05EE2T?Q z^-1|ze{7o~PrW(2C*PqKl_GZ#MM$XfUpOA058yuSdfYk}jn39$q^PN5=Cy&OKKBQX z@;TkbJNwWpA_qpBUvpMMq*#{Ik2Yp0)67dXY;%`#M13z~(HX|{bgc_*TV4+#)|k!) z9K+B%H@W|bv*?$YQo@zVyc2(g{T=WG-%fU?)NOl(7b84bzekIy$KPtK{F5&2Gi)Gb znr}wa*c}*K)Gp=~t;JxT%}e3?vcWSg#ezm9s-9&kN*CATn7bvKL&wsThVlG)rz|#q zc#Qp)zfhMdBdA?{32z^D@_4&dyt8OAeXDAP({}C*`rMuSY=(2z$pjdcXJOSFd1@WH z6x|PU-|fsL^g5vj1#*wq$5rh`8E%JAZj>%QbIxFPQg=$|`bW|>{<*}cWIhyQJn;UP zFEm%!v-v|_dX#JblUiLYfs5Tr>63;#m~+R5%qsZoAjp!Emh7OdKj%@$r}N^iG$1mX zXA`5jL!$RV+;}va-Y7I7;)xF3r0!((o%gCTW8mJ!j9x14M0Cg?Xg9d z|Hpjf@8VkJM`%v&PZtmEMz`pA>59zBbniMbG0XzqnFjPE?>F~7?qk_!cf*deeo|}B zAkbG3UtXE3pesu8oc)#G$afMm(y3z)NApC)M>7PGaLXi~*g zF)ZB}S(WvO@-l$@LqoD{;JvSY)sP*dOyy2jaK_>s2H&uz@C`l4V@IoM$h03A;tmOMjwfX!xcGQf5JB~rYPmY~%ZIPChcE=x^ zN$8nnPOEq(!9y~O9Wyxq*{{5x-gFLfN}hCf1Mh|23S|MDjcOSaMdwC&(c$i+vF`RM zTvT_a)UgRzb$t>sn`F|c4W{7l`hbwnpR}I?AS(|@gJ~Kkp4l;no>khPpcp)H4o2;Vrd1v_sWY_cjZD27hWBB{0 zuO01K#yfxgT3GjwB+WUa=v8to+4stjHjXL8zZ=IS{X48^V&FyC&KZTQ z!9T>D%JJxNT!l72>qWVR=HkPxRoM7ullV=&0w%L&W7vpcw0o=@^)RXx<|NeMK!0^| z7X}JWe7|Y&gTHsD%7`mAFQZ=kP7+#Jjfzov$oXO*)b7*dewa59mhp~2>_AK&uz)5% z+rXLZ-RM#99elM~CYoPdgcFrJ#HTfP*uJYC6s?=hCN7*pFC!13p>iuR9XGg3cSfHp2dU!}&wkr^eFK{>C*=^)? zH3tIU$x43;cr;&=dQbd}quUcHk^-T(tt;((5{yGXF0$0LKG=1hGt;h_Vc{`d{2X;0 z4RT6!Zn6Wde9Kt@-Ll!8dt*F4-~&_8@FEge{dlflKPAF4pNWYp;U`8`h!`8aaDxJZ=c93RW$)htNGl8xIFLS2sR zL1!Ncb&FOY)xAI1xh`8#arv24<`CaeMEhZrp(f2z@S|Py8pm^g!sKHB<#_5NW_bv` zJX?kjeoJt$S;VCKJ)mIbM4PKykknPg{y8lkGx&3{t7A_xc1RYLd52rARgG$X-(aIZ z@%P4iUHJ2z-_H-$w12`^m|s#M{Zr>*KY%l%>n-TE?_ZqHH6yh?f29e&39ufiLnRfx z1kXLPXxQpc>KmJ}>bJ7wQ4YVScW!}&1FLBVO#81pUnm*tcyvgruV19((_Fr)93$LssToz?SiYs5$={ zf+`(I&o`6$SjJP#zYH-}btE1>bKqRd97sZo;W&OMt!k~raW73;5^^0IcQ2uA&-nF@5l0ISh;mLy_lwfi|e`bY*#TP^_(A>!MSQtCorL`SlUN}cb}eELQQoQ zuD07zST_x_{&b!lsPTo3v;yvR5BPc7hD`c@g!`2Q7Ou+sbetPt%5&W-PV7e8nDdyl zuM?%;(~%SMRqz`Y!go)+i+K43HUxUW`|&!2&pO4T27}ZU_d@M$EU6yLM=D-p6uMDw zrbBxV<-e`cit*r|#dVtG`$CqYj`o4|q-ZRDI8Yj0Dxm|3u{aU) z0eTAqsjKr%SoN?c&%+LsmCl*Y!3Na)IEUx{BauDsIJ7nPLa*3_Ox7PopI()icI`7l ze@YQ27mPXo_6yoChfu|^ufn=>s(e;o0IOB%Xl*c{H%Hw`QIESlOHAno?^16Z=7#Is z3)W+r8(CH(AYZY^p-@)#es zn!BfS3uLKUR}lV=kD_CeCy02gM9eW%s+ciT7`N9Kv!p8_3F}P@cFe*92V2UC4db1H zE@a#B8hux|3z{G5P`PcqxKTL@_4{~+rSAZUgF;E{p-zIEF0BWao^h@vQZ4 zp|*z4rcXZb(43dc-P3(}zM(tW=jl_EQz|W6GlFL1EEU7FDslIg8a>s$$2taGM6I(u z1$gkC{ADdljqggc>VoLW+p#$I+MN1q--Q9gdLi5J1S&N=Nk>#flJkM0-6Q6JCO-h% zsz*-$-lKZ2F1{zbvu%6Sq=}rP9Li^+Jz90hYQ!KGV7>s&{#Dq=-XrJmAo_LrJuE){ zW0pLhyy3)1I%&^6ZC&@{^ZX<5k#E2RmtFAG9YBuV0{C51mBt#s!#=mASa_6k3!}cX z@BSlbh29pt$Xm_xEZomDT!wabX+wa8F6ll#4Bdlo*{nD-3cqVkU5n#LwKblFPP3-p z*>`w9g`^F&+hNH2KXNJGkw1AYCYKC^9`)g~!XnoAZvL&P5lm1_fzpp}Xl!PL)HfQ?m6^0iFzOf1V>!6=H8JDd`QaR_oTz6BVO~XE7 z(smopavT9~^D%WM^c3D7RyU?m*LUE?^kBSw zcnVf~F5tH<-;vDW9gTo`3^r^=j~4E!4K~5Xqh*5U{lWCQJYJ|G3$&UPAaI^9^gnCS zAul`HUveBc)0b3#q+p5=4kiBoIevpX&6yrXW7YE{-6F?AXGafuRy~S71P$hW0UiQU zdW6P4GjYR1kM>D-VGKMmr~aRC^!F5GAL~XNL$&F3^)KOTbayIIvJ=}|ZgCL(MFc$X zp>s0sq&w!Ba3r%09^6$~|3Qa2w$#HZ=o20(92Q~}7V>Ue75;A0Bm4R4!ri?+grcjK z&^WUj_Z-zIvLOt~b(XZbY&vbSQ=peTzjUeKVzK}HJLtQ_NnEyZIRY!4$;ZDRF51i> zi=E35tvQ=CO!lEqqy&nW?YFlO*LTz4 z2omF~Z$o*b9)$&jlg^$(9O4;7cCRZHT4~eu53itVHH~a1+QE7gKifN{LryGZiJ5#? zzs!%S-|@3sha;1EdctDsHCT9>QTfkTSZfu8H|=AYeyJ-Ng`}}X1@|%ifGQm`Ph>$7 zbv%B<|4;Z%F*wYErcC|}Pt`!W#k22jJALSBp(YhqMk90MMFcY#RzMLPjN>N;N(8@!` zR9kQw-knV7A{+WV6Tf2rU~8YZZ1Ll4)~%ZRz5QDujOapPtvlJ@u0$tu)DbqXot<)% zpl9|%M89&Si%uobD>9((jWTp{@-tTG$L|=Q^o9NdB;=(Kir*6+BUdquj%<651A9Ga zrNbclxp)&sm}}C8n^~Az!E;bT3cOG4!;6iUw6x$7Os8FdXJsRX+_{6GervI=vQ*f0 z(2mcv`8>_(EGz9=is4(;aq%htJ2E{f#8SlG1}zE?&%x{C0T>jmN4+;WkThy6ts8rS z8LwQ4nCV|o(tfpQ?Y&#lx*03T#f;CZ%68+`1Xa3SbCTySH(*fR4#7EiAvj}#)*b9g zJ(YaL7oW7qFWN^O`Mnb-J1@c0bnAu;jl;)_)uG2_Vx9MrC3zF(xAdnZdA=q(;< zm_wO*oji-7NK+S;6rT>UrHqlbcw96K)1O%o=RqJpr$22Q~szr+fbYtW9(Jg}~D8B=M4? zH!1RI${sfUjWT;-+%v2@2QSqr2JLi{k0ILDLvQws#^gqn;y(XVyOrwQ$ zW~9N-WPfe;;UM>aRU5}+Vc~EgSamw3?boL#-2Hs_@p6=pD}=mbfiP5N5~aP_gK0st zNjP;C(xZiFJETpATNFv+atSP|p0UMhw0xK`ecixtNyi*UWI%>xzwu`E2%*}09yC@5 zP-4GtoQpph-TM4yKfkRK{T@${*xm?YIs*?w>1JYadT=;Lg2tQo%dA4)xX z{6yhOW!kX3PGa-Lod#wFii@L9U{^>SOMUW$$;%Fb>}`@rTO<8qWEcQ^q)w<^BFNV8>qQe&av;o^!jElTnF#@CvrYhHFD; z>-iSsnP0)+Nyl+}VL1wfkC+o75!F06?_JAI43Vp5TX`R-@t-NGxG%>(--)!I-oTWD zYSgy*9L90R+L$xev@pzu_PCvt-Zt+JIhT95b(Z%f!m6eFM}<2>~Kf1Uwfclka zh>>~U(7=1VyF2QogEG3%zr_l)efuOaZuN31k$Z~{ch2>^JCTie5H8*REQIf}%b>l0 z=LFmBk#m0

H?XNMZeMO;TrLWR`v$6lOz5hME79F>d}oM`5~MD}4=AT>3P#FF53 zbX4~z>vzbKPrPnU3wiWQjDn&@toJCVtoWT^InN;$J?}IYAvrJcshR#-`^FDX5o;$jC_t&TS z`iAuCT0FnY{boVo26X)q&-~3!5oT!&N6Kd}%1ZczsPjpvRsOqJI2*UUxh{wHzi??Fk_ z+*NG%ZGRdP#(jX^E)=QSg|>U?;l(T;-f1k9uHC0c%Idpd(4`wa?>d95Z@{zPBXvLTlQ+qZlg#49Q`34i}8@!4^qSljOsRyjvv2A_+h<`5a3cqiPnZAFE>GTn`TCHZczz|WNOblc8|3kp|~ zLDM_j<(!>My=O?)SDlmIN!*BxO%Jf9u^Ww8Wr>0vF?36LA$8s~pkLF^qd+xSDqWj^ z$%DPctrw@UdED#j(p<%qswdDWE$;9g7Dol2O_2L%uSCTlTyVHmDcp&uN08+KAus3# zX0LH2O_v8qzG6)e_14jzW9#WedWkrza~3L(wPF77-E@ou#MTa@8IK;qeBcjs7_?!M z*Lu1ycNosKLuj-Z51aRw*|)coVCLXOChgj2cJ9Yo)iW?ZNrqlucA!-rJJId^9hUNN zJ?Dr7kXKXgS|sW7 zeeb}EJT$nd(k2r{k~^{wOQtVkW374DcCS9IcQL_=v-9ZegPwH#T_@7NWeVLE^IXfI zadey%No_WF6CB#aj%lwGN1y&Bd7<=Al6Y$!%B=0BYX0RgPZ>-j6V6Etcc{RxVKn70 z{(y)MCHl>uLxGmQRGGCx43-E;De`0AcN$>K7ykJ_xeGZ9Y-oAw7ZfZ^WiIs_k^j?5 zc$9BL!wqKQ^%xa0852aeYg-UL(VEWw;WL4Y@hHD8p%Om7%P)+;E%`EhuQ5R6BU=ic z-G)e>@z@-^5e~O2aH_l=;RKdOJYj2&n$n!F?sPBlDc%%l z(DwTWp^(R!v-d5jt<8`w*ZR`A>s|29^CMQ+sFC^Dxq^>l33JU@M)n@p@$XR(6Xuz6 zzQs&jirbF|Pv443S27V3WKOzU<>`~Rp=dv|D~&6eCe8_dfP`KaXb%da@`&-Y_?@+= zHoOrl$H~&qbQPg{>j%z$&?e8s>0($$4DC1VKvIef=?B)bfRla{WHbpq-kd~CHwDr> zI1e{lr_$P6i%9dj1{KFWLyq%FiTZ}KNH{l3R7ji4h8uy-j;eni}!yK@V~ne3R{*KL$GLwXo*1 z-$(rZzvJXr_+Ch5BfYgKvV001tT79Esiz8hEkAJ^S{*j7B5Fo%90xQ_GgoTE^xK_9qtMR{Nu?!CFi1}QsJ#V0E| z9O@6wGiS%@dr_1c=SX)h5&B$9L6oI3dfTKfFhK zN*|#}IS-c?YE#fGCAQk}8hdOzlFEjBg=(}s_4m2N?pA^<7HT5Srj{)!OvIMaQP6YX zT+e>DF=DhP#h&U)Hudf7hkPKurIkn$dC$uC**x^BevD;JL3HIu9=y1RK>O@KnmOSN zR*f*C8y06VWs5hwUSuJb_oe^iV1$|1!A20!lKBqjaSRQ+;_z9sT!^Z5raMo|1QYHp z`|oZQy5=qG``LzKg4}6!K^0~+n$qOb3#c(1i`MDrX{i7|&dN~3o+;@r-&iX<(K%2fBY15PiZNgmcLap!F zPdqrO67Gt}aqm|sEp789e^x8x?2;j)KshSDtH^U-HP|BCj!j48#H14ubfWYn^gUl; z>DnOzz1}7)Zyb!(6H;(*aX)Iw7=quNo&RZ8INjXMdzK1yI3*n|SjyBRXT?TwSb8{; z=3CITPR>vA9Y+tREPzheCFCi;8tc26uq4|#l)7qwIH#%|!EXJ88uha8$^5nbc)ET992 zKaQaz6Z3Fq<`DYPQ(HVZD+O~0nDM=GK4dDmpIH4Qn0*m0`CHN={g?1)sl}e?7TlkC z0N!thA}d=%Ou0COu9q$sHnhvI#&^|N6=e*!878z`+n#Lu-NavQTblj22oHFk)=b`# zY?rvxmgq?oYkrhn_2Hh21-ewe(VGtgPq~}3m|Et2b*iTrL_>+To99iDd zkC0scjPzwK!uDs2NX@Jc*#ZA>PBvZ|ZaGc(t2+QIZF7;CW=ytgbYW#MgrpCql9cC? z6!nWyXzV0paL3Fa!-ZnrE1IYrt#LIQr(l8)sw|kcstlblx>(?}p~FR{`o` zdb|v+Rmm4-xM$;v??4*b(vBG3q3D{)yYdCYsDE&vxNqE9IPI%NTz&%Yv0mg{HjsV) zcW5l*tXCZcy7F|+|M{ZD1FfiJ)GquTsfhrGTeu!HiuxK4gYo<_mTj1b)3Ie3%=aQo zmitS`C$NfjZq#(`DVurt8_Eo?uL>V$P|TFz~$dEgIj%cU_>rjX}j4RSW^LPeQ|Fq^jn-XHV@XAa_e zRB{4cuFs@7MNMcu^9KEUE0UtQDXsZ_2J4eXvaxWY1ILVM_wha`yc$BnQ*~<3tU~oB zQP>=o0qHA$I@Zq5qIO$gesC_UOz$eDb`F#Nh_v@;UicY5G_MG=tMYNF!-SlCWToF% ztYHaV#&f4{3yROQ^PK}{?f)G{m0{g+Fwq>7Y*#}@bu+38eCgPe>$tqE7j51oLs83D zvE?os;C7~4QE8SNeQ6npA?s>kbALPq6-x2Iek4VF9Z0ABPC>|3A)`E=J3gj|zkB)q zH+~zC!F$Bs+?_D{0!-IGLA&Es$m_4ii!PUi8F@sBz8X}y-Fcr&5_mnBy)J3Ww& zmyIU7M18SQ=_9i0%&UrZq_p`16OT+I=>i8ly@ZTRtLHyigo7yaDT^8RC>li_kR3kV^kGvXWJb5~IIS z+>19I?>>)1)pWkMx$c1p>w1VT-D**~=aqEs#G6<#&zu}*ai7JkeuS@ENt_r)e_b|+ z%cic!M~5eP`!@s)dy?^ShBL+8QY0@Q?%mK)r4EIuG|tNhyA<82-0%W+eE-XKJ;*|7 zxG!DXUc=|Q>zSne5FBf(aQL|@4a>{L>Y-gx{AD^lI#A+qdbk8z7yrbiLKV_De}N@s zZ^oyV8qV~o#5#V*mK1)&-QXscU}i;+a;K3>ya|oTOT>eMOuXi-8|m_kSni=Bo-yXW zf!ikZGeDlyazpU!x`^4>z^g;!wQ~7A5e!4rMl9RFN@ITL}dI@j`DcVO*?fT`!Rf^7~FfM#AmBF zn|JvRF3#C5$REx}da5do_}!bm(=KKEFN`G1UY`-s{0{ojH;UWZhSQzfH<&Ua+iUId*X~pbsIJqMmYS(Wf&T2p2DQ!gCkz2yg z)$U{+{8~`zv5T#5=MJh$9e4$qkV`6~K^3=gf4nt?rswgy*K|yM+lPAP521w%QdxJS zFsAW&IVKkMAj2w8=4azUDOV1&#^qO$HeexY3i^;=YX-i>j>pL*dqw@L-tgvo@A^K9 zboH2pm_JpOn*5x_%oC4LHDDjo3WMmtenumYGzd#abYg*m43(Prv!QNZu;#;84DIP4 zE?T{aQl7lPu(vJ9%{V9&A385A4q&KSa0WUuCgk69Af~(?Ne?t8QdjPXi~98xi|1XH znkCm^+RGHNHfAKcUNNH&1OGA4oq=Snvl%)n@*s`Kw6XXpj~+(tT2$Iv9{Z1EY=k#|Lb5;|t0Y5iWr@LrtZxlg$2piJv7 zeZ!UN88j)!5jo%NNxM27GFSVdpg9e?aw8}vOab=BuFPZVX8gzIjh3p@M+1>}9Xr^U z?QhsQIcGt;qZ?LP{YJC_-|JQGXZvRI{>7vQBm_T3O%Tt#t$l`*BlNIpg*jQBpFk%D z4WPM06ES%~Ax?S!#UgJhs#o--=4!h4Y25W>Bh&Qg_o&Aq87=C++5PYWs%dTls&$Ls_F*209@A&=n z!3Qj@kTaB!#8Cd%w5SQdW3Lj1}lK&Vb^vW}3tLDE!GS4Y2U#(9EQXT2NPa!;a zSkow;xiQ)nj~F#8s%VtZit3s4CnKHB*fJHf2kTQtOfacs4WteJj%><_L%6zaJ-*9Z zldVxIOtu;$;`UbY$@dVbR>;%*d=*l1{wlN-D^b#r38HqI6zi?dV&?r2l2LY|5|?CQ z|KC>hwwI-*Z4oSPA@3dz>ww}J-j`KeLRw#H;LZ4VW8fI!PSh5mson}rDdqovPqnqn z^Zda$&S(gw4V-V}`mP9mO;OUsiDkSCx{BXf!=QD{fx_;{{ok>9SUrpTj@HrCJpp*R z@u6gU`FwVxMOCZ{<-W+)OM;0Dx0Bcrxd(k9{*;&3mnKXDis==B5>V56?bz+(r!6xC`MGU z*(EEW>|BB71>7s$s}ypzL-1~AE%Ub-NPqseu#KE?FlduJZM&(@Cb)9{TF86!yHJ3c zymLNv_D5`U3ZdJ#?dZ?y`DD7xl!hOi0h1ovacoKlN~av>oGdL`?me9z^fRL#Ir6l$ zOK)__5 z+%j$yo$o78J4~}d#?qjDd`io_+^n_Tq`I2`$JJ$PF zsdU!4GkCw!kg63Pd8AewvJa)>X!WMAP&(R-vo3!n-`d7f(6R$;XH9>^4OxaPv-wy# zU^1;r$b|qyk}i~|zE!=L{pronSi45b= zu~&-xJID9#K0UQ*SjBJ}W>QfwT*o9%y!|7b^Fw$SHE6QGJ#)_z4==P?8rB(C1&uKZ{Z}SkJmCdHq$1*(g z{F2XnV}u>2`p~HeKm1&HhVuvdQ(&Vi)Z<2z?TXoyznMD z)X~x&-KWaZn^;vkdor6fxF?`0vjz=%Rd}b|gJxg;f_18oS@db{!dT1qU%gByY-bWI zM;?Y=c1uY~>8CG=PG2*yS| z!?!q9Qg-P@?L6zdSm`+%R&PfgW6Y@|ZzIBm5iG7sfj-=9K;7o@VsGa~{QM4@lcGSi zGM*T`d^h|1Jy>*_q$GJ(=*q0R-N$vG4T6T|d8AxWqxZ(z%)a&p8_9VmOW%FKgsUHL z^0;~NlN2ACexQ^MuxV!TdnduV*GN=-45S$S^Ek;fn!Ve2pDyz-Q!fs~X7y8&?;5W3 zQ*|+>_jrsnw{etd%V%#fc2qZHAeB`f!)0|R3NgBZyQUH7$-jfRoP;&z{i$+i2VUo8 z;aB=?Oo_|HrcqlkVPm0C`oN7|xI7p7vvk(W>LHxkjgbD(lTYVE~l{{6cgp&)b{co{o~Sdnk40@=6sr<$qT=&ErbEk6@3s@Cqsv6`RT+G#F;qV#V+}7!`3R zsYfGb>@kAv3-kZG=7)73Kts#7!J4z`-p}kp+I>&sz*!qA@`|MYe*cBe-yuI>DR*WU zAli4I)VygF-5s(P=D}-8_sbTj)imSjKY41;(V#63H*s>XEN&%P(qOAzWUH1y2E9ix z#c#T_A-@_nAp_Y_y%6p{cBRRjdAexVV!RD$U{~_AsHk5A>pW}6qCfE+NV2yOp~d|U z(dtw%e-~@|6+$1~E?!$;GTq?@o? zXG30|sKPDnMwI6w<>_@;nbHw&c5^rRL<9HnBr@mIEC!ZRH>DQD=(JfTeuH%{c zJ^XuX7fhJpN-gatge85OFu~{tYL`c|M=KR5WOOTD4t^~Zja)*zyFP{W&VTS}jTPqA z%@gikw!uf^3iMIdqHl&4_#HN$I_oFXwPFoY*OTJ>xaA(Q?jqor#Lq0_5wGJ&$_k%Y z(SXrpS#$(lOIOq2^a-fx6Um-JaM7 zW?_)cHrV+1ljO4wm`X7_&p-bgb4yV1%z(3)Z{omvTX^;^V#Dv)Q}y_7Y}f?_lI44l zQ!fXyB@G+k_2V<@yGyacRGF^Pd&C$`q}hIhXcXt)#(p)SsK6O~x03{8m!EjF`vg8e z;$FmoQ%QHb8Fxv^Q}G&0yjpVtyo)EK>UdDz`BV(c;{X1Om!N;}Gv>~hBLhnVs`pC6 zk3Yj%*IN#>qq+~pxsAp5jpJy=5?SiC=m zspC`#r8i62xfY|8q3$F$sL!Q<@IM&So%`C769vA5rjBbq_-vGi8!K#Se2FX`I|b6IH4$W&ra@Du zwsEiRA<4B}A9y}!zqom3B7DtF=;o_8%p$I&C}#B*6eRA(-uwMAW?eGdC$EN(svcs| z;+xpeH(u&F<374S>`i?Scc*C&Z0J_Sb}FW+G|_ab_$4v{4r!hE_I@xzCnQ2&i{~nM zR%OgZB^tu>)5bj}P?*L5*xr|r_NXKjWow|^ekDF09zouxRD zLYkcmkW{jk{@aOVO5bD44MpWh9qOwcgPr~^!^EM|9Dya-_9ewSe51^$Flpn z^{lZdknWCaLt!1~c0P+|!Ihqra=Vk6oS(+h8rPxA!A01*z=yv079pn5m}b4_U9rVy zSdZ-t{YEQEzIeOQ!*cGG+j|?&qo>o@)HB!;?LsHbJ*f2XRoG7NN8Z|baM78A&E-?D z`&b5c?KY>-vL-C@xQv(sPcSCo4E&?lBUhRyj2PiW5t9wYQS!yC`tk*YH0hzZFX#0x z0Ns&Z#nE(4QVHifs$t&vepjDc#;{~Yw#@@O5q%(Es-9x0Nk-)`yZaYXd=Fv~i8 z7vCj|QM%lTEJ}`HlZ_vSr>_&sT|6-Olp!s=&(DzC%Y~u5b2(tHmv~V59>xh7c-cOQ zvMPP4L!&^Dt87B}*Uvb-oyX(XzQbatZ|GkaC~BOFp&gm8ka@Ze7ayuflOOICbmpyx zOqXovrJ9gde{0ltb*CqNXV71suf0}T1q&nRt6h&b;Yw|qIPCU#)Th~y%`qJWG>xUX zvwabKeJu$VV>sI@lex~FN@L!jpr{P?3hA+tLLNWjT7Cud6URPBO5;z4)D~Z!7XOv?*j+5kgmWM`>X*v-GND9 zuY~s0*GTBvfLEMLa(nS}ybZF(&w33?(hH)zJTq$l7Xw-SWZWNp2MV*VaVND6U0FAs z`mHqOv++;J;QY~E4|wO_&77?W8%jm#t1)M3B0YMt4~Nr#VCc}F*wM|H{wWsY@zrML zoo`M(I}K=w%3|`!&SvRdEy%2@4CA%lN#s+EA><9Fz!ibR$zoBcFR~`#imtyQ9H7fe?iY;vjrA$pP`r7v`wzPGp{a5NGj(3NXXV5^-R~W)} zDC|ODhBu*GXSI1li#MJV2fZo<%$XR&SF0NUs995pulo=&H<30F;gT}Uu4v%(mV?_ zs!sP8dgaJcvGz>yP{0MaasGDX{3*1#ol&;w7U9;Jo0vWMBmBpCFlQs~3ViVqf0r7F zjt>^mU-2R8)^tE)YPpA;;zZ6$4@Z+*876mXQ{5*E9CGp|ckKv@S*bxG^UtBu?uX=k z@NIMyZ4qM*$Ka0WN+lu6I591l?z*qxp5A5D6yt(@IzH^d{TO!hfw4G_@8|98p9$N$ z6k*36XIk37E45W|=iG@+v`a6Ljwo{eiR>LXeXd1kd;<09R*3bJhtfei?xq_hPdyS8 zDQMda`gz?2+4p${UnYxZYb^2P_8ELX;ZGs8mU!N!nzif1V*C6WXfNzTBgRPa)kA`l z>lGo(nbA{!bVrE#U#R_3px$mt5|fp-NL$qc-=h_na#DrrYQN$&&#W*-d+y#17k-|R(Bp4cY*oOA4vuKO4+%)4QvzVKuyd`#(^h+Wc*b`zhfq}PhOeu@ulSK(WMB` znJ0A`JA{4~`Jm^g>(Ct=Ny($jki49u&-92~*5|Y(Q^mgg82=0qxfL(3vuh)_)pJl@ZEf zQNlx%Sv|+VWg5(0>oN9h=e@g_K(V%UCdH-6P^QXz7>c6QWvK=2AHEbrYmY)WYD=?v zs32{;CnY&VQ^Y{+|L-;5shKi=_m7ynC0l%wkpT1NK2(2G0awm87jH5RN3F|x9DdXv zU#@aL-B=?e_EF(Q;YFwzow)3{g3P#<^{lJGBx9g>dR-i<=k`RR132j^~cAF1yM zPk2txf~x8esZ$_mu5<(Re=Q@Qb?5MG_**!-D$v=fY82G-G7KNAW5Ny1o z*dK%&-_>Y*)20)m3U*y3tY?Zss{3G)9UPBU;ZGsEC6v|#s2Q?ECt zC$~?aIhAvlD|h1c({1p+Y(`04UVxOdVAlQu%fhnp_FOVjZpw(7Kl!=!nXQ=N&<$r_ zmBDVNu*GiiXX136)iR-0KSlcP-Xtg_@>#>+;o^Zm zx8Ub@5`$h$qpK;NbUg5^aDQ&7fOY0LnO8Ug}-owNYw^bDVz6Esa1q@U? zq_4-G5iadqfVG@+^yqV6>P)c0sO`R#cz7D+=_}E@q6YpQIwf70`y93xQpA1H&KTRR z4@GKp$7cQ+);{pX>iO^X=&GK%)%)Vt5w!?iI<>mCbYM<&3dBZc}dv9$#U4HOZWMyVVf1-|| z!>X|SoW3rWeRQMybGqTxlY${e_1yJlRr(et!~HHB!PQ2+$D+9dxa#UEl(UT9WrHp* zxpop6+m0sPj89O${|BO&%btZ!W2kk25sW)xxZ8OfFmkI47R(8t|NNT4{|1r}-j~ii z8jg;eeXv{D#E&&~rk95@uzyDv;=HEOGuQL~|96IZ^zhBKhj290n$F~ulMJV3*jBdADT3Zd6ju#_D}7uSE`yTli{lTLNGd@CFp&f26j z-<~8(PNF}{ZoJ(qM(>L#=2kJMWjP}Hl{%irOuWSn4@gCoEpx?q6OHa3Na}xPbJLDq zgZKD2JUQ+`XEb)<^uR&*aCftCu6r(IF88O*SXJgm%;4WTO3;}%i-p83j7QK^ir#Z! z)cSo0oi#bcKdt|V`8mxP`fCHHaj6YE#cjw~BqK=giKU+d9^!4?Pi%GC;i=}kiH|)O zj+uiP7p;ipU;fD=tRaweFU+B-ll#$w3;{2``HA3L1Npsi!Y*SsmKk&;TzbcyXBpde znJhe4ThCltbMR~UDp5g$DOYH(ChXWNL)_hC{L1cIsH$*bo(2iZcx_8>47X5i!WgQ1 z6(G#x#V|DdjgPbAXu_CVjJeNIc-&`L*7e}R21WW2x`L*zF~P;-EK4Xl2#f80*uQW; zb~AUTwUr!||GH~X+N1zj2lZN~Tr*6xDZKufOd zlN(&;)WBg-8HUHJkri_UF7F7YrJVz4mBoD8KF5UqU5>!sH~EOT)q>*-qS1%39z#!t z(y*C^6fIIDh4a?fjSPf;4d+M9;mCOLE~Hk*l5n6JQVYA$W+FjH80WY1RUy1DrHH~K z22yUX7A+a-4lnOuYL8H+jW;jjYTX3>xpM@oLGY~gpE#$=M+=Wvj;o#H>Jlivf zt{0c$sZb?4Kl41MzcZp;JB+xKhBn-8``HxT`U$O(PZ;OFfYWUmMb|Fe=F*!rG2r=X z*c{vfL%U$IdRK^Smcy~Wr$Q|uC0uyQN*IOiXt* zRQg1pJ|DZseQFd#URoX-o~zRB5neRwA&<<5c-Nt)J?4H1>4ZMl2HaWUnGG1ua zSVLcfB`B-23$eM0;zwu9XoT)GB#kVEVVWz6m*L@f<33L+;&Z( z>XFk$$J^%N-7-a*H03iw<}erV!(6N`*C4nSvEMN#(mG;CGA^sRgi0-%X1ti1Z#t3k z9cR27R|u2+c66jC9EDLMIoYZ-ie;RHjH+jNV7UP|Vy`f+h?QVCWgJPZI)-0|SJE|Q zmJhe6#L8gSlV|Ujb;>8$bJBqOP@qLS<|tG2`I~6EtBR2cvJ`PbhwOxO(eA~XP-~b; z`UT3gpehAnn%g*KPglY6`4)bn=H+1-464 z$gZLELRz*Wq3=j~wJ2E7eti}z?IiJZSPZw7IlTOhvtb`GjGa9y)HllzJ7m`&Vyh3| z*G7qku3d`HCQ8&{J&q(wUqRuK0<|sk`i}#EBd;dYrr9@PyKo15pI0Hj!W!E@x|7@R zk7&ErjjE|V+Tr2!{h$6R#uigy8MO5c^`3-{rv0Qu0`EZd&mR?(827HG+e2g z|Fo&|$8 z!<;mAEI`;m+ zUB7gkeR}~TRON&pyQWa|_4BaW98J#qcY_8!#;il~bgkhl)*L^@-v3qHdSk}$sFWv9 z$y`cP@PI1YTe+D1Psn3Di`y&_|>xo`nqHG5#I{ZqKhFELkP|hK2vFl4YExE$6$r3c}_Ee$Y z;s>19p=MZ}A<#Juy0gHTeA^!3=PF|oy}yFD{f^=DD>K?BuSNpnGFCd@7t1on(J3Wc zT39lS&d&@d`$G>!8b)HwAG?TGw_`ktt|NH7d^6)QbqeNVX5;)I#;Ro8hd_H1fjiuc zl6&)ovh6pKJS7}EF8R{i53Zyy-@&KNsmA9Mnk1i~S+Olti)^}-Xs<>afBC~Qb{D<@ zqZMxvGSiBk|G)W$CDXCQ@g7jIUg^; zT#|8--Kf7zHaEbKea9IKJj67OT$Yc-A}!wY?!0c^@J*e#T&)FJst)4j&AS;x*_R%8 z-hl6DH`=P3L6`3(QvBEhf?1dw^egmfuw@p_sJx1QW}v^P?qbJUZOUl*iU;B(^7`q6 zBd30&>lkCCd%fj+vl_Wm2Hy0c#1@Cos-SgoGWM{!yhbr|I;b;mOYSAkcgP3PyL0nN z|41w6`rC{Yk{)67;Q3tUnj$o;RHiVUR6JPiK!c_arm61(X(xkn z>N$^dwN?}nHykP-$C7n&5^Y)R&pcu;z~UPaEjo(~FMA6JbGbSSNCIFzZWc&7>bUe`89Z4!wRGBbpq``Z#)X7zjX@ zZW%?x#U+LN*GEFgjy31=X8B;>nz=Zty_B=scLU?tOy=pzPSG<_0hienNbg2SQ-V8l zQXb0_UsnvE=l+|8Qxey)RaOGq(ICpy2%#}k;$fLHj2z1QP_@QYPX6j{+Y-RD|X*?Bkl2w`Ss;EKHDjf%f2|g zNQ|J}IdN3xEk$2F8W4ISpLajG4gF}Dpua&H*~b|Fr=YdsnXx6eVdXlgNUTHr_gF}e zAHzL6<<6UrSS?PEVf(16XX2{S7va+DOcT>tr;Qs-QBrx-eq|Gd&dU@G6t$6-J%}W? zoPyHd%Qz`arNmPWNZFuHRxVfZXvS)C-#;7+mE>v5_yk~)HLm=0N8W?!)MRgjgOBxa zrKgbb$l9PUGN3DOjzLHIZ-s7QAoB%cEGgj0-?nb0Jy=LF60a^#~ z=%@*z4L?9bN0&;4UE=6GE0o&IU_Dt`s+p69Y4&bh>3m-yH1`0Xeo_kGlCy9(H^pOO z*ghQDX+bTIa=F8P0(XM#C)2SepfWhyyt`oI|EOmXw?xe%;Yc^Hp7p2 z5j(S8TuEaC<4r+?Ie+6(GK$29m|NXv<~^x@zbU*Voe^#X_qXZ#>u?iNvZ{Zj3?2i$Q zmzm?y7rPpbXvTFrigSO8+o!bXLGL}xnNfkaYP$5^+K^TmZ01|q_laFY55Yo4o%X!C z!ylAfNbwFAxlJxFF=?qe_r}10Xz6~I+1r8xzk7ur)+mW zgg4i2U~sM)gb`z?t;(AklkW4zX6I1ctWK|6C-ZGzw8*tUgT9~k5iTByVO`4>lm)k8 zT89DNC=~J0rxkH%&~1FTmL`=MgOM&5Mv5+RbfBacF-g~P-&J4uGBgo)x9k-9jmYPW z6^D@0TT4!LqaRh6Z-R8%7J6Xk3U@PU(UAkLWH&59tTXE)%l&kV(qCS{%fS=q^2Kb{ zX&6WYk7m+X!+6U7zD@AEKN*Qi`Lh9^|Myn7s|9v0~>y3g0%Lo|(JQWBVnrUUUqh z^%|5oCLI^;{pmw+0*y~#o}%*-^wgygO{ryY@Rt$>zn@6SAB*ARv78R57vb2c_t2TC zOtw`$*rC8$s$W|$OzGxx#cLw0fsWg$o8Rp{Aw?$eX z36h^SnbO$L(^PL=;$LKl#vKku-JKO=ekB{er}ra!(0Pp9J{mPDV`$V~)}y%e2lpb} zk=uR%b<5Pr;_zwVXxulr=6&R?#&>X|W`DtmOlQo})}pt^jp*RJNBC94yh15gA<_2` z{3^`I?U@-p_ye*#qtExqcq0X=g` zt#FxYNQbM`>Frc~;o|2=+A)M>GpQAqcMrj}H%)w`rY8<`)*DwzfJl}vaG9KenM-ut;u>DIVW6Ax9g2i7M+}FR!U6vok-b=Eud*2@_Vg6XS zMUs9g4Ix+cL(orO!O>qsu26C=9o$;ZjrAHxUtWB|q^(~#H~maB>Pyiv=R6FZYeNNB ztf=H=ASJWj&$az)>D~hy`sNpiK=D4Lu$lSAdS~W~cW1NU7`k6RhR8{RCNG$UHx@_H zIb^7?y3>>PM3k((5L&@g`ig%rzk8!uyp50EJqSca-=skq1Fg6svx0vxmx-79QC;`JQ2XYN2<+x#H zMib2@a=&EDxIIPF$gy6T-Z|UQ;FCW5gHf~S+`MEV(1Ay%wU`V4yNoMp2%-^ehQ4b# zQEUAlY?-Kp;cIte$Cw+Uz3F}E!$1vqF4Uk`7n%R-XA5kY3nU?X3Qa1oLHELGgup_4 zD~$(ztj3eg+PHIjJZ0Flv8+`WmiBSPBZE3PUeTiNSC@q@162xJ%Z%#zm7M;;I$RiX zk83nBpt|Hil+Su6Ib%%eap?^dzpumcDr3qY*Pmt<*pqp=AveF}918ksP_4F}FwMXW zV?Kpa@n<%>PptK9Uaw0AcheA6y%VoTw+bujQt*S_C#xUJQ}L34LZ7IoIBsep>^ank z?r(a~{u4+y&-l>4evE}-c^W2p8l;nalRp}0O4{CL6u&N9$UnB0HvIa7P&+BwYUqja zeLcvai8;Tlim~jm3@ylEy_@f|7$bKhr5{(I@9$oq>{J7PI6ULOEUqx{)DNzApes#n zQT5E-?94s)pN#t5F|b|}iuwshL{GY^Dnir#mjC+w6TY!W#j(4}AE*qzQE+#(q3>5WOZRG4S9-3Ne4pcte`>rMC<}+~eup=yBL@p+`~| z*CWV32%n{tv94|w4N%p`^Xp1TI=%~YC3^7Wp$$nM-G%Efo{ASwpTV5fE4W$C>@8zq zPR9E+=yAmnF8-k#x;H<@w=17<#zUFbE%}SLL2aCF3gb!snoGZj=}^o$#+e?IgTE`^ zVz+DtIUcX%$i1Y+fy^k*eWI;;#|A6Xe*nIJ*~aXgZYRTPZ>@1dn8ENE)4T~ z_j6LQ2Ew~g<}ltelA9{qg7>~0Z>D+~XK$-f%{gsu+@o6V#KdvbXLdVU*0L@vn;GqN z5z(e>C8)Q5;ItXPB=#NyiQVxe1Kq1Vn`J< zp(JuH<;C1W$O2cYv>Zz0&T{=d>NK_f0^SsgFnRJm?3|K^`zJN2(&h!8%icxY)fX_8 zy?{fP;?U6`;28i0RFn#(cdl=%ALhryqXzg88Dur#_tsl$eh9vwRdQnwG4|~_MyGajn)bO zu-M*+79LQbndxB^_G%(*0|!%c{dU|L@|*j0y%dQL1E|Jx02e zt8yP_5A{X<(0J}kRF>E;Oad`7FCh6#pPo56a>l+h(4GDSQ_sAIN4`GoZtuX^fWi1u zs7~SgW)pwIl74>Lg^@a2Fd(A__L+MTG;|q73;K;I&-uc|? z@xe6A#*>Qky6|!aV@r#gM8RK1(N+x(%P&e`@q!{uEAqqd=b#M3CP-Qv(GDL;I=4g< zUqlWlm>%4Gr;Y~jEH0z6N=JNW3-MH>8^%!%)5c%&-$v0pi zCC{%!zSxBPMYU{?lZeQ^wse5aQnNFrQOuo9-1p6iNML#KwP(lBi&ewuyG9u|KI{aX zjxUEvz605>S&aoTx=47qRyZd&1yRa!^lF7J1?S)7y$a=N_~2QB**UwRs`Zp=F7r}<-r>5XB&m95a8ZUV^QHd{*7mLZ=Oe3V(-Q%P23=4e{z#ns<{A$xcknxo zAk4^|K3$N8gdRs}tS7w4L5aR;E;8L{e$$uSHYk${H;+DP+u_%H3woiOjcGv^2ui9!t_t%6 zM-N2&qZ^!#X#&*l3Wyy)fQ0?Gur=2gx1wXXEgRfu-{Mwoc*GCJ+*hM?4>xXOjy!5F zy+D)O1MFsR;?+O?qW-}wGTy>6GE0}z=r_8wQ6>sbOA;af_#Jka%tsTOM;cg!G55O) z#pkr5{h2Y244Z?#TdOL(IgmP=DII#dl&+~ig7w4q_`Ob^a+z0NXTn+7*N^4AmM{jw zI~!7{G=U=P@MVs;Rh_;gLK3o~llWmWqew2< zh;)-xxS`e8pjsNseHm;@#zo%raD;$|Z~IW;SpgL}3nAUab^%3>bYoWt$#m`EHnMwS zq5EH?>aVT%$bKF*u11mV@;f+nXg6k+S`*j12ls-)@mpe*kbh?l{8lqYjdef9Q9Hm# z9@eAnHCBSS@fJS4x{OCb0kqu1k7|l6h5wijA4~thh=0i0*Rf1%o&vosb`t)MTSWi$ zhDvt4!?@?8`HtCUL}sq|t+gJ@89V*!eM`idfZC0hkisK5QasiPmwOo%?~^}4PvwB{ z*&qc9UB>kK=LhcVk|AQ_FR>W1dKDbaCc(b4n%i}01WsSH6>M!^;rhQo@!pT+*f-pQ z`p;IR1eOEepqN2lKF**D-BRHSjw46527jiwV4CVS>~wY|?J5PTi8rH+b?ohaWG0O= z8ji$V5wYwMcJxtaOrs*K{N+cR#_r}!-^Ov?#RY5!+6c`G4f<~fzTNHz*W)^qUVdYL zXrdGH54=Hmj3)hHuB7x1N0c`3xSw?&&vt6iz$_{19GA#7r1c}|?#Xm&h8CUwaTG1j zkK(;;Gqy0^K(%`_@9#g4YOeL8i`rlCYcU5)>;=EkgqzXC+>lYsf9$Y;7N+08nM?1W zwpWu(-)mDA<7772{Njuw9O;zaU2&|j;NPny0HeyPLhUfL%} zvi?kO(gUtbZme*jREfEz)c92|yKt&xFSoemJhbzbsBTv$SDSf)JMbcyB#ZxG=`YrA z`cqiZknTp;1Sdo}-sH^dgRv=t&G1~96QlPo0X=9$4y8|eC|6Vu^ z42E4C^S_#TQrOE)+`SMF&+uXSSiso)#*uSH;y%ppD3cT#&MBdfT76n1;@p}R5LM|8x$IHwi z!2VyJDylP1m?Y&(%;y~qI`CR{j-Vr&0(DnYq7Z$I-aDCoibWWm6hW0HyOB0<9p||s zoZ9mOgt?=CVZ67LxG1g?dCg9=vCm%=m=2)+^U|sQ;}{aT=L;&sN)fWU18x(o@%7Y6 z6doQ%6>KMy9M+%gQY6X!a4;<{=Abvnk^bG-h9_^lVUvFnmCQwTLH90q`Q=T~2kq%d z3x11te>I4fy@r%mKWq=1$a0}RyvHFgB#ECuG|Z58OjyMI>*#|wKknkT(1<3Mi~o=3 z@n2WII!}u<**?B>wmIdo-pa(}OsI?MAcGa~c3?BBxCjd14QX9y7dHFFU^?TLJaak7 zS<8%}=aUj}@KhwF%QfKg&_DRxu11$U`w?GIgRwzss2=7>j$_T}@3weS{GP{6NgqIN zui7AeqE)OYeF$G?J5s}{2RQtA1N!{bMO>UNC9h89T32rrNih!0lzFk740jEuX6w-V zdtF@Czd$Ok^dsk$%o8s3A;YddyxAykx+NNcFYD#8Pi8Y_l#FIG*>SXO^%E2|O3|rM zCMG#*i}@md-2JD`t8Z|p?FK_}Lo(xk9qL+WZA-Z>cq>m^JTV5WN_6|@IU+-J2sZU zlJ#x87+;YO*vt#%A8={lZwzl5#f@`nfrd&ejJyi?$9-1Q-SWFA-^v&Z2RHGv{9^fE zkrvD$w+{x=8uU$2#(HT_id36I`+lm^;V{OEc;Am-w!R)dwt>Q~#bJ={cBBP0dN|I$ znbm3Q5g8LfSMDSu?-$2qeUInvX}Sq#^ig!uW{K<<- zZpI2Z9xNjn_yONTR#VxOQusxAlcQ-nCaqx|80O}+LpTLJF-E3_6|KL!9fL;(;OE`D zI5A)hr9Y6ujqDvADSzM)r z3w54wV6%G%sIjh<^95BZzH$@F5_kDTrTx&GZBP2)l9U^ngo(M&x$>QkLQ48Y@!>-S ze6YoDZ0=Ix%=?vKc#jNe7sYU{krTM#TjmqRci~v}4@lbSbI|vqUl--kcAotkr+Of* zISF4U*1IOq;TrBd5&FH!zn4x ziCSciAkAKhT;vX-&1NAmZ!5;9W@F@LE9$fV31-(`#Gs3hkhV>LnNuWotgz+fliX-_ zDa-$e%0vdvxA4a08JDxph-8m=Q;7E!YG3oJvfUs}-+Zzv{<=s| zzoE+n*H0-pu}Y6FKT*Xhw*ZlPxD#~7jlh&$8(BtS7dJ@X5X&%J_>VI(#90s*H8T&5 zl{Gp2>_%^=4b=vx)8403Db)Iy5V?LoE^E9*pAE9u5txPL`ZNA@$+BST>5r&oiRB1uYmmXE`2kt3U3zI)&@x;9dA?l9xM! z#b=qvX}cOpm?1aA)08d>_`u_ei%36bXEywv7Si!Oj z%uSe+IvvJR1>DQVRctP7Ac|7)<`RnXFtKQ;SR&;#+Jo&##&B`HBd9rht#CBrFcx{86M6L3aV`5MQcOlJMyA+M|e~y^`DA6T@HMT zsTFl^jfCQbzEsS1zTeKjMP0rP?TmAwoT7C2uN+6)%W818CJT?HSXc9e9(EYIkyC9i z&?`n7W1_4MevJE#uV9@!L}*ITCg+l1fsbwFe4Fmz_>z9u{A~y=`RPpSm}@+>K#%6e z9LCQt6_~7LMW*|W>B@RlUgM-9ZnORQAl4696n>Te$Xo>VI-Oied?|8zK8rRQJJ3Cr zMVGpl37spygu;w4=m{DW=&VKgOKk<6-H+fEJzg*vEJmP-3Z7U7&^)PNGV4_rj;OZd z^GS6|N*%?EUv#20ybndJix5;cttPj~dZ=AtOi+zsVikz~{u=rBIr)EF81QSOL7 ztar8JZY)L1s*s%OBRJa`@b)}&!G>)Y0sMkIErurV*G9cv zh-kUJFP)w7i0{2!jXRUZ@{wK7ah~}R8t>nNiLNa@ez%>nmd4ZWueXHdZClxQR*sCe z9fm}3K5n%6P>TaD9~@ySKPSITJ$}CIz?o<)5CX381*C`nJMgE-gy+G*SJ&np4F6KWKYo#|3R-R z1Ceuhtp6U#C;H5%^UeDZm%fVLd?Py^%i$k39LedxvN3cT^|*R_-V~xj&vNBm=2))=&NkWBh!S(x#bvEX9w8$PFIn4wZ?nL*b}9pXZnagNe(9 ze+x6Y*_Q^=kXy#sv!CS^eD-2}`D(HlD`FD=S=&;(qR7TKKI3DF7uO)po7&7oO0GSuDIy{&i7ZK?Ij-MP_i2mnR~g}#dTcN z{7K~S*$!vQ)aZHRUyROO%{fj>f=bZ?tk6A(mA4FO7t7)rdQPDhc0P?QUqC$vtZ2yj z&De1^2Sy$i^yGd91{pd~SJ?tO{Y;-?yE-vvi5HHpJc~tnWxRE{KlQnD2#PDBDBk!U zrkb6>EGtzih+zHfe%oM_Y{bpjphZ)(H0Y!G47ksYpykD?<19of1#?<;Uc#m=7F?Lu08Y?e2So=7QP9XLI3*1vo6Z<+jcz-4R+vPCqoqlA zxDt)qeS>>A+n1zArVAa$JbuO+aUX3=FvV;dUFpq0??rox+1U;u#)Q+1NI_qpE8;fR zX*e@v5F#$CupW#r#Vo2v1KXAUR2WL~rx&6y)|U+3-l0)x4ju&EghT!S++uUf$}|bu zn_rGAFLq%1rADN_ZbFfYyKtu3f*z0()jHO2digi8)$%AOK4U>*)nW8i@eb4rEr`vF zk+H2717BLxvK{I)iuIF+MSkLzv+R2493=`2JHty4kK>|ett7k3d)V-Dl=wp15Smw! ziYv4mpUdA1vt+{HB497 zPb^01e5v}tLlhPQvj6ZEB~=59Qs^|Wr~dN@8cr{*y!s9EX(LucyDqO$nOM|vUaQJb6<+Bwr#k7Hwd^(<9 zT)2P-eHklYM?czn%Ly7T88Dq%!!M2KL$mFCaF}r+GZ!)D>Whacv;Bk(nj&&6SdAj% z$s~R21J1FYT6bj=%9k&LD!UuiD7K^Bx*aOR#iE`5N3c4bvFc`)3!B`uXsK(YFeiQt z%AFs<^0flGe(KSNAX^gHygBb)KkAC(vG3zWtX^+GzGvDYx}Cx=Ii1hvwff`HZ)LhK zox%&sb4b2@y(q|~66=z8b6ZC&U78{SsHX}Mi1mxr3!aF-(sxC zIN|HkCj4lQLHMY#^s|TPeE$OeXU|Kxm-MAip%3_}Yjo(!31iCXuoOaV){#H{qW_Rj znEB~9R}^f>?=&+<3d&gC)B>g@oW~`d&!n2&6IOV)iIB&6s zTVL!=vtp7(WHF9{jrU@qV=8IPX3*eI8bDXKEv9% z)(QjKGen8Z=k4W2A3u(rhURo-dLi?f_|uBz{i!l1jl06!I1(r3QDS2NxpYUtr+Obk z$c>uAmts-Mbh3V$K#g%kK8L?!fOa-k&t_TwG5>h8bwPA_(?htviz0K&LX3U)oVnUm zX;{l|#?3j*^0!HzZ+K|mFQK3}?~%s7OaE3!b3OUR+}Feydhoq3MX97< zQ{xuTc(xlIFRd-8`!2yC7skA}@=tu9y`{Vo^eA<6EjP_)EZt!Ii}hoeo2u5H<}{lL zYk!R;?!KCk_4o+9W)4Ng5ys)n@gfDI>yS^JLN5oYQKX+ALXPGlefLDZe4R9TPgjE0 zCRGY-7{T0Uj0f)Bfwk&~_<=pk8OLY_RkEG*v~OqFJMkuNr!B;&`6KAm%%Awk@`IbZ zi@0K!Qz)9IP9HZN7VJhEQ%r5J@L}C#jP>on>;_X5xN6Y7Mnj4^UW4)P4e8a{>!>L@ ziED=qNq1~5rZb8R2MmeWpF3nrs9A5nbuB|KH$#ohU4MOP9|A?+9Q zAAS2N&_mYm$zgBbQY8xAn<$(z?!=7&>xG&*EHm~y844x7bkT1JT^wq2+{DfmpI3~w@z}s1`K%qarjE$ye z=}9CSAxCRQRbj)B6k*cqMC`j6BS>~ua5DWz(%Ae0&W6pQ`+YiqWFejWRF~qG-75ao zHs)SZxyA)nyhOFx5N^r711KHtOBS22;X#xq>9y^kQ>T*{f7eIw{4*1i+=kJurRn5v zcoXT{IO=e&#wkyhy_w7yjej;%(~Cs-8_83tvLD>XGyh?$23EDW)41z|=~}wjcEbd2 zq2G`*%!-^?Z{?Z#32yOEOU`HBDk?j+gj@UFgbJ9q;@G?pZu9pYm}2WhDe<|e+vHA7 z5h~)*NG-22sJS zCrG+@o$dTTpmWqtDZD>&mWA`QYYnvVuP)yfEv5sdt7!XW#toINL(jBVDa306?ZP~;VT_dM5-rR5ur9utWtOpKf0ahs4S1d!OnCh zc^_G(#ZyjshVZ0w3?{3((X+jq5VG?IG@JdY!~PAL&neUF{-?00F^XJ=Ovji#O62@# z3u0!E$0U(GitNWwq+vX=57u&F_hun`>O;IfU_iCv+mI@s%mw@#OF@o*=TpDZ--JAFy=B zMm%)9%1f|2<=kL(a#{8k{9|wMj5T&OdQtf=TOT@Tuo)>9%P6dib+#2>AXQO^-neU$ z!KN}OZk2?2nLSCw*-+-C>4<1(;AU1EQ==1`KlGc!d)e$jRf`L?QV$HCE``&|7hF`Q zhj8G>V8(H-5lspH3bk5mzWGWS27T{Oi*Ih^hNwK|EX;#w-c;u1^pK(bx4T8pe|gaj zWm)`_?%=vVuSA_F2^-gXQ;zsG4xLb;ti#>#7TaRUs8z@@d+U)D1Df%DEoQkq#(9a! zG<#DO@-7ah#+A-wlC%e=<+?OGrX0g!0-(L)47!>7=VO2-IlmLI>_!#-gtg#Edl`gz z;W!v!%g@htr97#Z{GOiUT-@oqSYg=0eXzBpmFzvY$LbpEZ7}~+VK(L_g+Ot&5$)?8 zO-B|savd?pMLYD4!>zTO`Oi*?-i{KHyQIK%6g9zN|9lMgupyNt`S`PWG$!k86XL%I z;F7lveT(Zuy5FUQ;c}|fhdGTpr~v_2((rRV%LR6L(SkWCyjougy64vmqq`2A+x2=_ zjFYA-Rtkc%?h=}cYSdnT2anEEyvdw=K76<(wun=(+{22@e1_n?tOsL4hS0g+{b}f@ zR~WlHkyn!Wfw9lBneucE&VMtY0~2hKpBqT3%nQ8e`Z7|C*$C%7A4Eg_7gMmxB4Lx- zFBEAX6Tc`pjVmi0Xd9D0{`#d!GsdJ*W!nTAq?#v8@<~Vc$$w~?o`9OE`B)AI+IZm~ zCVkYSI!%^OkeW@E`l0aIIFO{cbSMl|!ue_Ekowu5<~&wI(E%my3*(BfsdbZ{-s<_C@$>MiY1|T%x8M1F_kTLT=<{le`r+?Y(y5cjiM29N=wc&dOt&2-XZ*>}Oq&$Fj=w|GcbRb2aB@~%p!uIMPVe%*r*Qew|YNI|kfBGa! z8np>){MJ%M!yOc?_>Sk1s&sRRCf(|3fY*L`1hV&9dLiSVMr|OiwFTTZ3kNEG^#u=> z81feF`(apUL&}{>0!Mj@vML4qUR=ZHXKjV$qsnn{*$vm!;AonC*%8svDDx5BW46hQR$d>P-^wS zME6O!w9cN-UgAPG2eLc${0^-7!@4fsl_+;%bD8X4eDH!4gxFb7NUInJ8FvI*OVE95 z5B|n7*ZZDl7~ozFx0*%>^(U~4&AyKt1@d_-m}_JAT3+$qX6|ub6J$(Hu~EX5GB`&X zp-=(2QghlKw*vC9ixC=aPyfz()3n_)$yi333wpU1s_e~FB0GtBuLseA+@oCILsgg; zz!)?094N_pHx8+*DggY+sVYO9@AiSn5i8#nP0-GGzBdk|;#VkJPSh5_Ai4 z5FX!(>VXLqU{``^yIrXNfi6r=R;EwA(sZe7;eR`Mgsa+8(%c+0XPL6RW;Lq+_|w-d z!=RpZo-<(#veI=GSUW+FBw}ju{)!bE8!Nd)=47rGw4tokg_5&>Fk(w3mn$;_M|3_y zY2a;yWXjW9#>O4x589i^Gn4A2Rq1}8 zZd|)>fTNMS(H3FHC;E<~7ruLN>eUjGZ~BD4eIH@ngT5ro{M}^|MOaXm$<;p`OdVIP zDPjLG7;j@c_4(SA$L>NgmiKtyz=IeN=SGL0vYF?mE$ES#L|(PI@NSQS_?Ub@zNz;! z7L_gJj|Nm=n1n9H>lliHZ{~APPtKxU!`iU>V>{A{CU9;o;^WH48T(d6Wt1B=n`1KUn@P z)PQON`wI8vJ7C(GjT1?eNPX)#+O_>7uUhZ}repshFZ?05JhKLeV&tf9pSMsHxRfM2 znsNB*7pzVT;g7I6?cVg~Tx?`4-sTP?*@vcR91pT;pG#s%9pbLEF&DvVvFAK#GM>6u zsF}6{N%98N{7w>lLX@ZVqvhZQKX_yYFm6^DSDs>pzuwb@n*~qtX3__7Ad5yeBb|%>`fsvTUJCyNkQsHmwwJxs=tuXj#?hm1?xc53f(*X`Oyu`u7t0~A+hQZEE?mLy#ZFZCxsAI% zRw1C`2EN=f5HsHmqsZ4AvBGyA9g1$igF60Mk2R!IOO$EH*Q1=lAIEx3=XvPV?liZe z0)`=uxL)}WBV$zQvF%prhdvJI)f7pQ+;|WRIJX0kP&&HNNh z$I_~~*R2ojTq{Foc3Pq2Q96d)^%A=3_2}c*7}&R{QP_Y$s`h$@yumtT@(#SmxfGoV z+*7lNchIA2aW_^(%3T|*_a96dxj!-T2In;?$Re;p3c=KX@9uVq7v^z+W~yk>j$w(?|2U7sKZY|1u7`{Br!1m=t)Jk< zxeMl|&)B7xD+R|RW(d1@6q~Yo&|UIDkX$^q+b2^3sZ;#feK_~`p`dS(fh_Ahanq-< ztb+StcJ&>`uALY{dd0=?(aWaRk^b1Qyjs#nu|M_xutP{#@e;FlpC91yu@SO-rg1^6 z!KPSCs`-#d!ic5hYnLqs3>k*a_Bym}^HDtgn1?SZK@@BF8dbHNk^J;7B5yCI2^Xdy zX0`>*;q25v6*)AF@_^OiiTwRA3Kdm-@$_;b9=~{s_ZimYaAXUVPnWRrJ-yiqjg9PO z$xWulJJ{nAWU0I5S@!x!1-@0u(TLsqFqw1tyLNGoOie3$w9uOzd8a@(&xZ_a6FJXx z2VzPcC`Tg!2f34S^{jNxu;uySI~`aVvI#y#*HQlXt>AlUEHx~v#pE{$^vUQLEI3zk zLvLf!oc2;*lN&~A(E zC=A|%zdbFebD0h88stl51Kow}+}&8JYDdONjQZZ={oUAg>=NH={Z$TQLYx!zn{))8 z!*XCY<&jAJ7U1{_McQ&li9f4t!k7ymq2d)TE>CQQ#oQ?FEeW6p+1}Jo@w*WJu>}$z z&P1E8%#2ts+CD**k|Xp*SCb^VvhXezShQlBrWUTJR|%uu`GE0zZm$Yua+vOoYmM_r zMNjYn& zWlS~apZn23pQo@$(V?jaJ|oE`os7qiN7FTZ5+@hp+2+^mjg2h)c`jn_YbU((wZ@g+ z`FN=K6B+e=DD{gJo_DS>ZwnRX*=rK58M4m<^!LwuRKL<1DBy;-0ie$^`1FPZ#BvHgXPFHLL&o+;2NZkjui& z_@>{NM*sSO|32Mh56Y^bQBfd$DB;|kYDd`L&?d(cY^(18YWGopy`sos=l4KWE~Pe#=UE zf7N2zR@RwhNF|8_sJ^fhNAL2^SN1L(?fwWk0p`@?YCs~hqx!u$5-p!gP~!Q--ve(8 zKlD`L)j5SCM!ZC$Mv0^?lCv{|jqEaop3Zy&qD z)b9+Uu%*K#R(^xn+N&9mm01Co&^b` zVydYv)i;!o+>aDOO11dF&>7M6V4m~6QJ;>l_w>Y1|v&mWfa zPDU$?6qn-PTXpg8obe>a>ylFXOXTh72h9hiSh#9);|!$%G&Cd~x7wD_l(&1a{N5|D z{i>9ru0ylKk6^!d4zmy9t|87+8I?oW9;<}k936}(*yf$tJL4i2Nui|@kTgEL)&bZN9sJF9;bf$B}4CBOJiEMAUym%#C4mbMEIN?mBlU2pRC+>ed?`8Ai9!nZ91W>kb^As?}&HuS#n4(8ek_#b;~ z^$(tF9J&-pd?+lx>Pan=WJRg=7S^rkJbJq~v(+WNXf=1R&Rf}ldDFT7*#9PWO&Ez% z4HLQ??ZeqX9xTW^g_UgGj3MD_MBl@uZ$>i;9C?7y0FGz<>DJvu zax>?-q^<|(G&=0FsrOGj`B5Ui^_`3Euk9(FbD!7NjikWi+?}nHLeY0Cuw$n(n-dhz z-K>`4rfc6|bk$RM9k&;&-1?Hua&+0j1S2SDrj5}y| zy!!t$HmB#v=z7C6UY=niGL?nqLoT?V{1wsXjA^vVRmt?(=~$id0$&C{z*K&pJGkx{ z<}NqHzys!#qaR71?%42|N-2yAGvO~@!tJGN@vN6MO%I(+UV#?Wb+rv!x6Z>7xl$P9 zc1es!2a^h=A?Mvn+PPvA&d0yOgf0cTe}QwMLT^KV>JxTH)`!OFB4HIGzg@AB&LsSe5oTT?Bg= zgr^fKkt5b2>6{Ba4t|EI^KW5zJ5-I{`v%<1gTv)CJ z6LsEMHF2S%PmiNly%nv{yMQc%bZolsK=QL0nI?@SW2cpD;{COV(l?;vZ--K94u8kq zTf;V#)nH7-TKpK~Ox-;S@Ya#zdk=3C_x=fo&mYcg@##saF-^iZO?8SJJxgrcx}W#; zc4G2?38dGb?=5Y%2vZjF9m3^r{QS6vHDvL*-0}bL)3H;inUz8d4%~)h-#ZL@sU-|D zIW24)Fon+#4?}B}9@Sj>z>c37O!HsQA{EB>BwtVAaA~&Wm3J%N9Lf}X+NERUQZE`m z-W@qUV<@yg_eI$yQdswu5LJ!;=f7lW0cv8L(jV-3e@*b5wiPcred!4A5!muw)Ra$| zG%i1aHh&HfTjloQLhM~k?ntA?fX#?64Izb-kD=41N!HD(bYCxyI&vHlHQ$Q%)~~=o zjSysXFTfkaa5||v5S`yovW@c=U|P#PEY@+L>!W$+X0bD>YHl;p!;KOW4PY`$j$Tbw zC7CbFSkSdVtVn->y7!lHLs60DZ2N)2*O7F{qc{EHGpw!QrgV5hD(_O{<4foxXbc>P z4&FtcYY|J`6t(DAdRD zeyPuw9851^69?Sj-S5A`cgt7j=q$1>5sFb}IQMbhh=Qp~9^Aoiac9ec5g9ZE^Xjq&3?3Xj)Y29?LwYZm69wrfdy$Gxvc=%4>BTY4ML zO|n9s{3WO!??Vgil_`+%`+&riN=Yy}JNXM7o#dEQIgctp9FV*r) zhXPNi(`u_IYQ5)*X^P(D>aiCC6RdHydoHT>htmG&)2ybefu%mL!02_Y$l72)B{i3! zR93^H3Z|02OGl%_k+GQdf@e7R+~UxZd(3-8E|!f_p;Pzj@w>*2G;AHHe#t&oHpP;f zAIu~c;6UGx3`D}4axC*RqX4y3%)DLPI7?*>9l0FL_ZLsFt{@%6`6(@Hdx=ZF4yO?B zYP|7Fq&cIDap+<*CR|mZWS&ucJ-C^(DhIQ@9}Q_`gBlHa&1cSS{gG1Ei3hGG1e~pH!lbqSc%MUCfRh9U#`x)XhTjjR`BnjLeEoB zzAwW)mi}aZ`#CDwIBVEd4hiNBFt+SL`zN0e@BZMsi1EF}AKQL1U-|#gFuIHNoM%d> z{aq=0WD9DFv}sLn6NZ*ugqEHOHJA0G9ge4jXBm?kySEo`E;Zk+m~RnMUFTByDD2^*c61wCAka!smQ8WBm?(x}VtLExCf< zS!cMKUBkFgHL_l9jhg)Fv~d0sO1ttFKQ&JxJ(6SUSEmw%{Aum|O5iimiD#J{( z29weh{{0JFLqlv_(Clc)=J#}?nmzr69F?DV{o}+5$=Z{!at$VBlRcQC>O`;WN~mOR zI@Mgr6E`dzfqvoKThww84M&U7Hj?PNx5zV(<`h+Y7p|%c>AQJ2Uf{sqc_2Qjg&dacf zTE`CXuAMBOMZc2YjxrmfrIw!5cjgWjKz#P`E{=HlmOhxzzmr}9?>OjSIlsfrJP=4=%TJ7 zIV|OTp&1&&8>``nNti)3k_N=^eCmo%dvNBw6VB%jqK8xdBG+1m*iLhpM)EmytTv7O zStUAJ$WWg>{-S#6Ll%1d1>$tv5bw{ung1*~$LubuhH}5DsT7*9UdV zyre;Q<-Z6oUw*>@ugRk8`x{txeGnXW29U0~H%SjY5T0`0Q281I%0Aa1S;y~x!3JuS z<7ps1{=Sg+-_N60^-CP-+YJQ~_k?`cVQ^D>iWkEa>D>iASe#2HgI}qXuK}e~XMezAo-^6sdpitI8;i~9f%G}- z2;QAZBH!A2*iXEUx123Cr>JB#1xJn6!8CF**= zlZ}hog+RxxQqNbM!EDO;d3`h~W~M*In0PGsf;P`QhEv~`!1wAE zT%P@cO&t_ML;bEG_y_;(mG(wW*(KV-nyi=3hO z1*3+GjnlkL>D=e}=(Nj6sQL%dy=fMvJ+YyyClu)04qI{Qzz10LUoY{q`VY*m*2SIi z!|9LfAZqyATkQX88~%&n3^RpPVN0S3xlJ~q6K_Y0TdyvpA-}#t_0&JK2AIIL+K)Ca zjKW>xQ+U~~L7w-fp}sbbl9W>DggLyF}>mOWN2Yh&$(e3OV zz&+*g1lQsX%8`OUb> znhqbvlz?yC-?tC?XWS?#Rgbhsn!u~O4Q=6GShEmU8ZjaaCGU#xX>u<*Wgm_Yq%Qo( zOsAZieP~C-1AO3Y2pWG1HEz8`&%MJ*cFPXP_e~=0CMhlj)#Ka}&hnV4#ChpQP&{i5 zGwoEThBzGx$=U|v+8-=;s}6OPcfvJtv-I2%TkPVwBg;+&`t1>d6#D{plw!oFkpY62 z+if;)=oT1G7A12->Y>}imiFsuvv>Lr*aG>%WRuj5cBL55pObT0_VppOwoa((q_}XbgN-c@ly|No!IZO2E^G|*j z(xtI^1Nr`OCQ7o#&>Xw-m|Alf{V$!t-HAGw`_rEa*R(_a^EqJeFl^m-2>rIoQbgG% z(X`_a_N}uQU+ep!ugzl&rMK)e@41teBbhFF2+1mSa;`gyta1AxFIthGvnhSNzEWtY z&uDzkpV?d|RhoaGLV9@WND4l#i5vCYp>gnwG>Yd2t@e~d=kYq;kCtmXb88yPi}mQT zbvK&ftt~b+bm7hNmieR-~TObi|I+fO|_}b zudk@8xtP*=$f9pQ@J5Z6>hI?F z1_SX)XffKyZxCZ7YAG>P{BloRW4R|)KLQG4R3J0#*kXvD$%>e zlr7ZQ&NAn$Vb}NmWGiM_QS}-{nt0?0`@EB)w{NMFzv-sit69nsPi@Iq&kX`u=zgKG^!QX(++&877 zi2rcQm!H}H9>PA}=N#I^XMt5_l#t^`B_@G>qa1hR;siq)&}|USQyoOj-XGX?m+Mg6 z@~&~$U3dDvpJ#Dr3T?HZ?wCi6tZGKR+KgXq`; zp1J$dCiF~w00j#@x?=ZGGQZrA8ad1P^RaJ25@*$$o4p0OJ%W|F8k#aD3i0EHpwavq z{CJKv^>{e$B`&5uN-OE;Lrq$%SB@tQ?&4IJHP~}{nHU>*gN@kfMrAMlV;v>Ybbj9+ zD08RR^N+y59Tw8l#kGQ4R+aFz@I4-tZxa$Ts$tK2(7k1jp{If`^>Ztvgw!-%9oQin zeOZJP%Ce;OpooH#HzL@CbB8qd@y|q`W|?v(>%eqMSj`>5Yx1EdEv@2=8c?9{BQOvxv z9#vP`(O0(;mtNYC(`3Gzb&Vs~xlu{uVj9J}3*ocFk;J`6Bel%v%G&No3w5O@naju} z&xKZAdWx4j2czqeh;<(<#N(#?J2ATf>64Oah4~}AD&3C@`?M*F^HiQXRlsAWJ=2-l zohDC}Ck^Eyys?>1UYF(R!LM$law1+5-#8rM1EOhwTMuf)U@U%`&1V%ZqE)?u^vKD( zEa1dyj5-p@zB^yVa&JpI`?*S@Gi5)!IdL>OK2W064imLc2=jVZ zLV5IBF{$AebK{)0n!f&$GtXzT%ays1D#fDw^cXDP`$AH2?9PA{=j{Bxp8twjpS*;W zQ~?gV-RSqElc+Fsp}}`5DM)=0oqsJA;|~qOw&^`cwBuaVrByg{BZ4CQ9$^+|o#mBu z;Na6_QvDbOtzvtc)w~{xac&5dreR+1ndGq$h&t+l1m69;7x@Vvf=y|BL=A>JmPqq_ z#?tPW?)Tc)r?%mK0Y>kNk5- zoAp@Qc|naQ1r9f?!OtDJM>ZdD^wIE7dvV{{ua zh8^7*NxZ4!e$W)%ezzO$R_cGwHzt;eI*orRK=U~4={hC3jQuPqZZFT6jS*G zEloXX*6A}a;r!>yD?RDpG-HYhm?d;MR!E(Lx1z6;7A;O}65Oq))3!+k?AwaFaCsHX z_J8d~`ujIxv1}IVuF5pk9bAO!T}Jee&(bexD~kR}?eJ`r7awf9h`MYUP+EDvJnk((WmN0688^WMx7n}b4>h>lmCpdf98B)#taRNEp39w zF-4kvtq;HR#nRuUDRfi)7s9?D$MQZs#2aBLh+kDIF5qsFIf4CXfvzjFO$w!*8LKeP zJckU*osm#KSkfnE09AL_lMX3u_dP-u_X(PG4mboUQJfw-ns)!t)moY z`T6AKhNJL5|ODTRdR2| zvLt1inq5GVXJw(p_sX*uYtTjG2Pe0;S>jv3RQi5}=TLi>qf<4G-5b`Eto-D~OWk$g zeytYm5fE+{u0@KTA$Q_TXR?T*bHXSx$$F1E4+*JESSu7&g_B{_Kykd;PIz8c!mZ)v zY(GD*Tdpq0mYhDGESug~ z-xLc8XN0ViTL-4H853UaLErAdn6h#h6)xjDnTh=W!CqekR_(&ovps0^fBQvy`FDJ- z@JPr%@{sLGdW67EUT1AKq6KTssjt&*^h!0O(5(XUY_{MC&z0OzwW6KtK=u<1g{zI5 z@xs)MBGwu9!<(?W}(8}B&Z#<{l&w710u8t)V7WVdwsQYTO8Jfr)uI8Z#NJPlrN zvqe?AOYEkhEBzgMm?=r4XkSACZjN3{ts^`!du2o8>h+g}mS9bxdCEoJ)m-Oi&u5N9 zYe$j`|8Bl5@*r*BwRCQ68hy;zDXus^1MjXXQvRVl+G16UMiU>3f3y#3E#_3X?iLya)G&J)GN@<{W&C@{l%mU`-a%qPvm1RyCe4w4kMhPIO*1lGgA%?j-AFl*aGB zYljbm-=wv8k=cV*uJY#$TyL`VO(Bn00d#A92SUv!AgKpuUtaPSBkuW=u7iNpdy;9> zM4mm$zJ`M*w1}P8q>1($@o&a-mOkE%-V`ZQmqQMGm(L*$$#rbrYeJR^ryHm3n}?lw z(R6^jn-Us%4=PlLc^v|o}?}id-lDEci#`Q;|UTbb1{lm>dZsnmHu>l*bnS)_{A<6SL3N^ zZDY$g9Xe#H^nd)&Sn{W}&R;N9TZ4CE#?qT&JACGu=31_!*d+Zy7i<*Z~KzLq+e(a zGozg=>oIrfO|0eqzngQpkIC1aeAb(?7N`B_ZLLG=?ih%%d*q>I6ieCd?+{e<%eNxK zoLYTyct30d%#XeouWU$#>u!5Gv`dj5UGoq-T%RH*OIPeY?Q z^d*V7d^3hJf>uz;KY40Wyp4vGtHNs69Bj1A60@zE+3#r*a*NQE_^h#Gv3XOVwT?Rg z+@?Z7^^4@kxm0Q2SIZk+=XIjBV4PIf{WKcO{3$w@J84TCY3QmAbo4c6jV~_~Plp@9 ztyGWPUhKxOWX=hTi6D#W`*7I9`I(&m-#%|C{V0ya@?IwN^}!M(bw*&+K^4qM8&97z zO^|cDgWWByz{tP9`Ln@i%Hbu5b?GKZ$4ww_F_87*&$rBDTQZ;i7CQZ9v2A??Li=}M zkL6xiX!fS=js|q{EVCoy2~QJlSGMD-;$l%~B+*gpE5G~v)$ zOx&zTzq39yp6ouE?B{)EhI9iCp3B*#Oe1oCT!9a}a^btST|91{jP$D-G}T;=40F{) z>(L67veI3wDSHbQ8FQ%j8caisLnwt^7OE^S;!S}nSzqpwUJ5m!!^72S=F2GY=HeCf zm2<;dmb}E#H8&Y)7YLtES>wa&V^DstODAM)VaIpS8NU~k;w)u~bK8Ng&0mH0xw&v? zUN1g;(VK1f<4bL8WLWfj&Teg6kE0FQ6z${3bK`lEm;d?E$o&U}Mc3Y;@4gCY^Ui~i z?((HuOZVc_9!FAIluwiYte}ImT69m6U|YL76>k#orB@l23<#q2mG>aW&;M??$N4!) zRBoGsnob*vVJq<7P8siH#-gKr5*__K2TOJuK;zRI4Ayyp3Qa4zxT%)^2DtE?!w5E9 zsf4*Zzhf2Ad|1Mn3q7`D=CO&=S3>bb+!CxU#+ zyV1Wjxp=*364STVqvX0H!j^btsK3aErv4jg)AkCOYnaj8pgPIgxJarr_u<~X??_K| zquw))OLwFXr@)h@;<#rg;4c5fFR7cNw6f#xZJ3MF@h4Xo0bEFZ~Br6D-M~2}>{sfBRdB~Wct#Dtv7f+v!#O`L!X_@^R zvotQkBTpNpU0dLBUXPUZOT=SSct^0yTC9Ae1j7;U5VB7LW}Zfr9cn{z_itj}4Ri7- zK8|5g6|k-{C*$>fNvYO{j^l>(*_5N$n9t8ckAmox_gMP7ZY7(Pc$K?7R@>q(%499M9eC{C@JqHIJb`waf3hq3;SDZMk%pdWEtq5gL`b1+w-&Agj4u4Ose ze@E3&L*Pu1dbELxgco_0|=7Qx0&1@7voS}Rzrz~DZ%<`! zd@kaA2lwBlIkK(sd)PpSF!Fw^LY6y?=~K?{#t$3D(NU8H;%?7FcsyV-TY9&fbgIEP z+RZ)1b7fqp*zzZ)*q&p~HJmZra8FvFp+}#mIbrfvZ6a%;j*;)6nW;>}>q6-8R%-;A zj-*>wx8V0N7uQ-Z;9Wv@1k3x9)&d#I*m4OEbv;nL+P-db< z1^RhH<4iuCgw<2q1Q?i8ds%A1GWtoKWKKN z(bc}#QYuira;@Mq-js?G;bz3g( z9w)LT+y!LNJi;$`)+m<0E06c&k}&b(OqiE_m%KO=?$;#wAdR{51&5YDlb%{7z;Tl+ zDLfG|ey9_Dj4Gj=C&_eGu2ih)u8+MhdT?j)eiS)v$DyJTq(0>_nk>2 z=Y+zWe;5Cp$%kyV4^pQlVy5~$3h|4EhL#zg^sU9~-)|xJ(~@LA*JE1uG^ylk1pOOd zE14>2;(WUeO}-|}nZXa4VM7MyJ$i!Dqo>f{+L5yJ)M(J67DnxsbnECus(9>3Po~XB zd_*NWU3yXB%K~hwN*B)do=<^1ZT98pQyeQ_fIS5#pp<-9m{2^1N`J3M594IIwEZlu ze!sgL4z{M_wIlk#|8%+?5Fc@{_U8{avV z>C={`+f0Rb{`?EqiiwA7k$+u@r9ZmFHa3pp=dN{#?(ItUe-y~q;xPL$XENT(_z8Ec ztmyvFB(P*nYV9$M)-UGF0nwUf@&42m*D{RkA4uz-ofp~NtW65gGE6-j+Oktysf~u zMN{apWC2jeDh6jL`!*mAReblrA4D_$~;ysoCfXZ1qbKFkhr zm1@jFHIR1a{Szcx&R}rxG3kAqHbh_dqV~Vn5G7dC$ml$Jb|{H#w;dN}k6Hy!OBFh! zPy$CjFE`u4y)pxzA#;)j*{ZjrIbt>)s?LLKvlVSJPvbLzKg?lT1}yYIp9jY4G%v!D2BG>2(LN^?T z)@LtzAK8Hy4R_eXC|mk)bOKGB=0mabGmxXc6-z!iQda5~Tz%D_#x$>>bJrc{o3RXC zv|El{ZB6jsnIlZ9h@}ozgTStMdd+k3P7@x%)I*KT>ea}#?g}!j6(LMjr=2|v$>V+& z?Rq3-J-k$DPVHwH2UZ9zPlsT`xlqz8_<^vR^_W^z$$r%;(8~we!cN0<#j)g#R40cU$X_of>f?+~eHMvKIJwutwTl;%Z=+so^qRHM)8er#e7|BIp3 zUu*b1-Ism@DALVEiU=4Vj7=zzzRA}o`j~>+hjNt7IVovgvV0b7O#=&W`CV7eNAiep za_Vyk2R4?W(44>1ej4Lw1t`S26$PCSag1mA&h~BOxi8N2UYI8S`C&zEPR|9!!x!0t zzRyvu&hun$eD3?ima<=7M3;g&ZC_IZnKxx<;4`<^q6clS;k$2?=5}@ zbtjASYiPXtd{VJJAkOMogqC^V(b1JpJ?8Gl>G7V_YJM5oRkHN-Zzp0{3f)p$#~GQ9 zq!wAgyRffVon!>6UksxD+rF}Y3Er%)XC>zGOmXygZe^BvPat|kzdSx zHs-JyDURlx^Y&3}KwTD6gI}P%Z4bWZ^rGe5L#sPHh9>-RrN-ttG{)MA)NZBWK=U?K zODyTmrfGOw*qhexPoO)c)+8r=25;X1$UIz!(+Ywx%rAt-$a9A8++=cBZ9@>>KlS>i zM<2LvXr03*to`TD6l(bW<{;l!cAtg}#Y77FCP%^JxVy1WnUFHA7~~X5ISSh3zIg$P zOp2IBd|z?i+p~@D8l;jr-?NZ(MO$k5Tnej-KJ;uwCCfE<&yGhAr%O(9lz&l^{_O05 z{+S6?EgCGy{#g4HZxiU}TUm_SonbD$mRN^{Gt%sdgK~1kbGXH!rh3^gogcxBho1dpL7-)E8nx{VJ>uX^IyDJcm^`3 zIFoJOZLI8i36D9W#JXb+bmy%em5)2fayTzaVRk*6b+e~%w(f}&86Xn7f?3TOPD&JJ8IAJeaola zXukShJQ_a(d#_qjk#$d6Hh!Ae`IPUYe={KCX`|pQ}6RPG7p?S)qP#CoV7b;}Q@$n*D`8b+OX;c0*!w?T%y>*Ra`=5`soiKO2S>Ab zM;W%}V1c0Fegcmts|jnaw!%=~f%mCfk-E={oaYzQaQQi$J$qeDTQvb|P1Nb|=Ny>m z?}9?gAj;5qi}TBRa3{SA?VK`;W>n9B%S?Q~0q(~CA<=UuOjbH_ z9=|@d#lK?fYqV)?ODx&@IMXMGD3tTeMSL$i>IhzqI+GE?+Y3vnzp)uT_H4t%+EgU* zuKUZ(zrv4SBWYJn3EE>*`1h|Kai(WsK1h$$m@+MHY{s9M687$q3O#7;!95IH(0sBd zj0|*WPV^7hXYCc@F2=!hR45hPSEOeYi}tKc_QyR@RGsN1%`Ho3%g5K??g}%%ytpk` zw#0_semcPlJ7nN|p7S#@enRn$F&UX>u|HM*)NGn54j#7$drp0ljQstI9ojsC*4h^1 zc)0`h%$K3HX`O6}Lk4cOdkMXt_T=5%c*mNe-0nP$%bL4zB%m7S4P|KK$7C_}k2$sH4ix*i$RKja zH9U8<9C;soKMU-*#Z+!d;Z-U}@*5I_ zzpqEqsaxIP6W#*3xG2djT^lmW-G;E(HPAaK+Z5j?1)HwuQi7ZYcQW4*yd?6JJI6xY zdF%|&iRi+pAegpjjiBY*?g?H-JiE*}QiJ5$rB7CUgT*5SQuvT64u7f6c*qN?w|vpR?Fe?uccZqugrx^#Y18gGbpDPmwf=6zx3#MTpFiudeMy<nUORu$qwV3F0ertPe8s@mU{^L&~v3cm>Zk1tbP|+f#O}}SEmG( zQZpLQ^EbvzIG1H5pa0mt#r}XTi2CEjXG$8J7i@*8DVj7ijQ4Oy4xrf$vyiM(4YkR9 zR{Nt8+iN|kE7v&5gn z=(gKfQP=b&w))li&0ki^W-3RK+L7G|{^do>Z_3fCW@BvrJrUz~)HXUi=X~u6Goi8K zEfzEdkyF2)@KLg%^rb}-%kw!1P#;g%oQ`7G<4S}Y)S&f?2j(3mYW(sa>i=K z;=$*I&$y%QV5;b{rZ>F{QWM`iImI%}-(bo#7u3bt(T~d(qp?$fjiMX}65F(2~9Z)U&Bs$er>Ke($(jw5DD% zq)V0_Zi} zN1tX#?txmwa$(ELJUqXw+oo*6m(u2b{>I64!z8oRd*H_!7dN|QzrmFE3icZy17 z49Tn_NfaVuD3S;fk*TE2i3+u!r=lp4A$esMDpX`neCzl91solFuf5iNU)OnZSQQAP zpMiAdf)tLL^l)7oN3oavmW^(AqTiB*$SexyK05E`hWAB~LiTOWU<t(Xm_% zJGXurzs23{+1O=2kZQiM9KP*rYV9|W)L+EX{OdOKY-v2p?H$61aK=AjzC5oS2O6|u z31!_IKq_;)@JGrYZVS(1i~e1{cC8457`PkpPwXl2-4Ap-ZRG-19YyV~W#R}hR-VUTEcI3($s%92i(~$a zxrU@Z-;I3Q=Rm!A9H|U^2HX5tteH^-t8F^)UpJJ#O#O*Hw<|F1PZkDFWc#J^HcVQw zLg=^1o<7`Fp<#QF97B_;FMn7ZD5Mv6{D8$FQ1`OY2NF_nW z?fi9-Gye4iJ{M%^RL(6v;L;i{_k28cvu;~$*In_NWCwCNoPpJvd*EO2Nf>xE3Cj+d zk?bRNdTMSiysuTJhezy$Ol#)c{$~S~(utJ!cp8PKD+%ULA0l_%U;L107uSt@4=>W8 zvC~ro{&WI8>imGyBY(j4p$3Nj8AYbUCqp^w2p*KmkW|D5D9@Nh$Hnnf&pft@T2GN+ zmnE)KDnRn*9HDB!Htz3AFVdUeB?|oXR^%@mhZjd2u}I1j(+f;EyT<1tDH4We-6-P9NoL*Fw?grxNGsP9%G{{AlbU(SUl<63P)^flJqh;u@pGVVVjd8g^6`X4wR4=u`H^q*+-=-t~ z%N6nd=W{7I$D6Zeox|SMEEn|Y4_+*)c)@2yXA%j9wK`T%Oq zi=Y9G<}@f~G5+o>Mx|R1n-!%p7YcZ&wRR(EBkm5>eY%~mn6k-A2*l^R%CIvdYy5& zVl<@;ZNk;j+LV-Q%4PJ4=*+o#p5NLZ{kxxXo7(nrd(s2w{PPTC+_52e{lUo?_M@T1 zVR(62h0Z)Hy2gIDdwzngUum&nQrQ(P`^hr6$!FU=IEt!&A|!kNE;o>Oj_!EjL z?HlOM_uE|SvQ1p@?@{#W@pEy|;aBMVuTp$QXoBL;AvAY}96h{mNOg>L``s^`Dy5SI z``bG(SWS_x*d2ko%o(UkPNGX@Jt&KiCyj-FFoxaL##kN)Yl6{^Hana;C4;F&i!oOf z^eM_3`juXMUGy%f%QC)?p%EEdGZta{0q*d?N8H)v*SJy?HF! zl$If4E91LuF(YBB0#(GQ;9`g&#W~HPmCnP+E@2DieXhgC?p`QHFU0EW9`uyW%m##+ z(+&AfIKJP4JIyQ;-0v2zAHRSG6<I??k=j`W6U(zU&{m|s%BwTw0) z&$}A5IdvUfvTEkS5>2SBy8)YgCh~SC9AJCik7l?2L66-o=$>llMjce8`6X$5=l366 z>2$^fSbI-w)O8Les~H1hWIET-5=HCT|EXB#CEl`~t*iZJal}P88uRP|FDrMGQ=j;Y zi@^LX|`kzG>X5W;Y|?1M!<*3PE@C+ zA^e+|3}5C~y`6Fn1}`_^_ej=H(OLr~nGs~7`4*?vKg3b9Q+U;S6E;pyFz)pW{#d_3 zv~BGnK4;!?ZvXg)P!$iurZN*!9%4&7Z`Gj;`qaRDNfRz+Vf73r+LSzu4$qoQW8bk| z17m-bj54A^bc46#>ymR~?!&VG=!sbeG8eB)6CZ(R}#%5VZAzU$z zZe0D%axiz0ZTdv)dN6_S-LMlcZJ*+To+K@=dBlkx#go0td^)p5g;qD7Mu_o3zV_x> z)c=_#s41M~wz52j?#$ub*wQhisk;@2i`USQc4f3C`*IJ*jO0J=-pJ#5J9Jyq`3+Sy z`1!$^%H{CWZhI19mZ)m=P*9843X4k63*78l`h zn45ie0vYcZ&0H_?6wa9SHN7LbS&XH3Z|W`hmb*Cdf-wcTc~t(!j^ZNb&`vpP z8qzf!RozFBANd@MEfZi_J(%vet)X}yO}g6q7H`H5$IaNwh_w2TzxpGXhI&-tkKqam zd-(uAM+<1pS7Z)*1F~t`2S=rLQQvqAvNL1dm8XEygC+FDR)bazy^at2L;3V~sc76W zku>y~6HFoodn2PcBby8%YwSRA=W`J!r@J0YA8Cn4O5ecb76*!XoFYmM^g;ZPU@9r? zg6uVAx@9|bO z;)on8+JATs-h7p%)@>|1;e8!*Jp<{mqaD51-GF{EcJw=L2Z9yGz;nPgoY>_F>*IrH zQ|v!Psa0U^$!bKXzl7)Y5*T1<0tj~Og#s<+;k0ct~djVsz)sX6E zLgyzW;pC^S2oN_4QzSCj&CQe!hI~YdqN>2BsL+oiZo>TV*EsoA7aJv}lDlUJ&0A_H zyxs5^8Y87i=>8;rZK6zD<&!3eC6kxN0jGI<;AkGs&~$d>%XE;l)|d z_tC)k!b9B8Ogk>4Oh=q&as$Wy)QVf1UgM*-DM|j4q!Vq-;k;xU?KriR5`8m-6EioX zXve{F^`H_#&X7G37S!>NEI`aNNr~d{G4nlsl^{l{4HSNw*x#!-U8n5DQ>G0YgD6n+qWS6K>&OF9TUZPE5$!YkX4|>x27tmO-6|$?81kLp!bh)^j z-3vQ$__Zs}h&Q3;W+W$9;7Oh#`{C0upRp}!@Cz-N!b?&6H(4reEk|S4U(Ry8J)O)N zLFIi;P|s-MIP!c;N4!gS=A8D7wFX zfbjFKG$c)`F(6P0QN;sj@f~~mUQmX^_Jb+x;ALnobbyt$84dPkOktaIp7~*Vq8Axk zPM<~54F@G()JMwjW)f79iK zK5a$%<|QYL)BA#^n+G5-1k%PS-c;D|g!dcx8=tTAV8^TiQPJrNe2M>uod?02kBMS^ zw+@`GQzrYNuf@5QEqv^+;gATNg~n0?YCb*%B2P@pH40Oz^BdmjNpUCqN9JxaC5yK zNg8WY(_Jg(S4*YviHuP_c%=}0;1$b5=~KGuKW=zgB~0{f$aB+uY+NQw1!WRsQZt3z z^5SDZs zF=KM^Hu?_c%wUmH6DO#o#AeA|2Z9boZ{k0$X zo81S8pbET~{>y3EyyIWHiKyaR8~^#39maXy#G?LQa5mSbM;lDYy!;}LDBDuupS^f= z*pqS2?dj?YXWD|X6yI}7l)iE!=2oba*u|eF7J5>PWD6JfrWVI!!Z3E8IsK4JhR3LJ zkX*q$E+>N!GF*{1&($LH3w!w90vU3$?6=mR zf-stVK%3UjXbSlY{OuE$jju;Tsx$2#phgEw2hrTg$+X~xGwtjR7StSn;QFV2wAWw}6%V|L zu|Hj?(B?JjeB|j;*E{T4$?oL2_Gl{}L}RzJp5?F?+#t4>&)>ySY_&49o%}cxxp-vB z*JCrAvn)4wf>*3l)e?70G-j+5V_ALYoQB>(QXF$Gd|V|iZW)D-My)s^(EzQ9W~A;d z#rTdBDgWyLnh-pif}+$&w{ix;zq78sX%`A#&%{pFAsuctjqK&SQS9~_w#Mw;@oXFp z`uy^mnCe4LVY?6zH-{#!e#>0a{~=DRHK7eL9t% z{$f6uWrf%tyhs$~?u&C1hmnL-FCOpDh4yU;l#c(!8+$l&FI?V=KY9GbLf33=P;xe! zOMbwydpo!8O#eDO&9xmszQbYwOlxf9+~W=D5=)X8{l2 z=#%)f0_jB=pyKF06e~80moUEZmq-UBe*F(A3qnZF_&M%dfc^{jpd*(L!QzJ^saT%I z47Vuk=*eI_|1?NQ3?kDT_t6~i3fmd~e@4wMENz>C*8_rhRgnh;H|SBq^3z;H%75r- zSH~sBfB2j4Mpv1m;@x5+TDqQHiPsLne;@RznE8O;%TJ)1Pe#l-w!k=Jz=Ak4N%4F|{&!&`lb{uNM8K?PO97&hzP(=AH_-IY%CU#3v*P9tad{`FS-RP0S zDS5PQSF3oU;07}tAbsFQ^gOELj=i+&)bDq(dURY&_oOOqb_t)3Z||?)yU(3b?wGrjBr@DTR~4UhOCuK7dAE zPQ=0tP0r417e#narft{m1(DU4O@2;pO1MU^8Fty?PKTq`cVGoFCWx2gj3;2UAmf?h*wHC zn2Te8V4dd1_k>P??f5iw#VBy@Yp!GTR9nie&*bD+>+!X!6KK&I37XPkOO=r$xPua7 z=+;GVAtf&#Ri__t2hZ*1s-uF*IC=-Xc6hLTlP2x;_dh}8Nl2ZCG3+<_ z@URIK7}S9^ehTC?o=pi=tPv7EgC3|d=Th?tT*?#}e@7QirtTCb^%}LV-SD`O$Zb6= zApfI2eY|;E$Q!9bBTAMu+#x`mKvcN{JqOVd0?QnAp+uN@?YSVNO;)wl9C zpEW7&p9W(Z&J{Y+mXb+l7dpl~!(0nh44>e@>xG!XG_L|Sg)+3H#vbm+7cw`~YO37x z6Nf$iLxZ%H@cdRPtoj`j9QIt|{*pHpU1Y_{1d zURSL|+F@P!2;SpNTvy(&H2 zZbi4JmE!Zq_oDQrgLs2#4IJ|e!+{)Y8rv*MO^ZiyAOAapmyNn)702>a9X_Oc(u@q| z&Er%KWe%6P}^<9ODeLz5(u@z^u4IoaZ_N8kMd^Gi|fzh_eUwNIk=N zWtP`Sz9K&6H6N4SL@|f2Cd-i=KwO_3x>&jW@ED$dVYV3$bq-<0RpL-vh@LGLButve zZ5+(p;jD|ZIEJ~et6a$Ycb6!oWg^Kl|5v=<6(rksa(89yxz9!;sjHmvu5XN{pK5v} zvuFl>b$CH>&m>XeT1m1p55-+J|GXlaPT!dy)$a%Uj6U+@H5#07-q}3{JBzuTc)I_dk{@;1Hde>g3^a@oO0En7ae*e|K3t~XWfEbE8>L%TGcSww+x9+ zW2o%B6P0~X5b7hYW2dbaZPyFnLzA?~_@o*I>@X7s%~(n@)ejJMqXoT*wis`}li&4g z430|Q$G%Eg(z&RM_r|Mej&d?>h>@bGj3$=j2@#&iEyRT(hlIs8Qt;GuC!M)Gr`bN8 zPIhGC$GB}&FYAqEITuCh^V;}|!qy7eq90haq#svsIt7zu##3bKH7u_Jjoz}G`gw1r z^;X9PpDE7xlgildb_eL++v~Ve;!ZQ=Zy}D&gTFX@g9gitE!#K=`a@-?)oLbsw=tLf z@BaAO8ch8Ms^W{)GfqD22zKpOBYUS2w0~U{{6?MT`l_yq_r=DN!w_SHUb3T=w;w{| zRh8((^gOKD$ht?X*P`U;a0)3_r{f*dY0+}#EHY#r$1X>z>WG8fAs%t}E$H9sDd;Wq zrS%KbXpwCQ-KcyE#X3Eh{>RRGV+&zfxi{51)S_*`3d)qJhIe)qjBYEDZM-6VWWI-2 zV?y~;Nrq&9RGVc91-y=(PyLyf{JN15nJK69d|EidGZxYE1Ev)6F%nmQ)o_{j>Uj4A zU-3wz5Jbt&#p31+E@Jv)$geP?u1YDc#^aj!663tiKG%lf%>!xR31jYcQwWuYE@M5! zs}Obcag%pia#PQQ(7{15P+r54cCQwZvLyU(rb6doTE*h2s?%>N?B_teRv$ZHo5`Fpr_Z?d7eS8Y(>q=pA(U59}kSC zHsQj_H_PbEBX%A}en%CVz{J3p@z6(MawXf9Usa>D#_4dG5KR?36DTWGmPW>Q#ZvTWkKxke<@kdcXGF3mHRz4T zB=M=mdof|08{N~dM({LOx-s`KCGSY0@`VS5UFt??Y#BlB58U>B{{e=~#-QKou%eFeRr+taCEN zPJF@9kG9@%fyvQ$TuXIiPFOa(Z2!i2V-mg?zl2lgWBg%%!?m@96HJbB&Fr_Lq;j*cyy%Wg{&^ew4X?1#W7m7H^u23d`3g{DJ~_`Wa$ zGhTtdEtRIY6H$2f@GfWez(n|~uEIHvo68NXya369WG>@i6*xC_8uqEyt8x#x3&*FD zMjZRMW_`!4(~B#Hp7y553%j^kg>RY9VK^GPMxc5BI6CKZ1&?m_C+|Col%deXnN16V zsdaYwmZ!r=>^L6$k#A6%S#kYja>2 zVoP!|UodpoHC+DO2JO;2Fk-!&mmW8GJvKx9m-3#UmQu>;)n7yFpGELwbFHO*&UC8h zI@SeQP(R^3_|A9)%rRpBwgc_%(cmWh8pD-bISi9wjDLD>hWP6bH#+z91$Xu&kA@@C z82{RmPV^Mw@i|L;yI&&MoSp$!JuOPvp-SzadwE?~4bpi*f|~9%OgO#{XV3Ukc9S2e zR^H^VE%=MIE9Gctz*SDW@Bwc6zsABccfsii+w(u~#DjnJ5XRex4Z4f?!NdBv1APZ^ zVsAf6K1k@DGLgRMM9_CtMS8TO86N`W#a0KtqM|56_&h%fSLJM}%~ctri`o5Wd?cQ| zOQjKu!XaZcgu8Cc90pTt1^u7T5h7o}?|X3>8+C`!hNDC@DP~br;#&WWjnEu-@(F zM}6F!vk9V|`?h1j!`qk@rA3870?ULoaH(=nxscUy{J%a|9Cei-ml4LaYNQyg@JL<@U9;Nz2v+>VX`^dxvT?M$?#>;I+0#EtPmnUnXYVlw9WTG8CGYso*+ ziR?n#P;x(mbz(E|)N2v9Bh8QcdltYMf)+Jw&~pRen#Ze_ZYS1M@4Fz0lTLzWNpjdR@NDekD-sN?0fcPW)$~lWC~|& z+<>oHX592Kbx>rv(!eQexeN1xXkz3TGC1CYn{3yn^GS}6?H52hq8+&#mPy==Gj6y# zMjI-+A&j?p54y%yWaP&7yfWHod1r~ACHB1AXy#+SwGqc$r779OpYrAuG3Tr!sXjIm zj;8HE-G`CX$TAm}qZ6S)0!-Ad;gi;I8a$yJ=2gu|V6*J^S}nMAuMk<5vO<@dJKevu zgT`z+{9NMk#}$+TJLvJkp|D&F&w6zjf@#2dtLTuIDr_6t_yW$Eh|Rmx=? zx?w*pu;0&&HUtzRSi=L$6@n0VWem-~D1q-i+c=dio3T#eI{y4MA$?r|M*Gw;Z)Fmv zvzECwdniviRf zun=V}*(g2t6Uv=ASZS(9={I6Xm(B1V-1?69F5b9mosNMETE%4xJZM=#J~YEOl3w&l zMELe%q5gN2M;MT2>uJ_wTg%Y3cr2Vz#?L%|AkTfLEGN%W~ zh54Urm%%JU%o#7%5cY9*#GAX4Dn^Yz2miro+@Ee9tJn7Te{RLR0BXtz;Qq!NVUE%aBrf(w@#HYZYtBQvRDbeiE-a-Dr#Z7XNhpw!;jir+ zMpF+Nvpt#&&11fxZ69u7s-Y)cl^j9WST` zg2Xtg-HhDycGN$}L0C&C|BrchFF!S>s8LtAEA|)gU2!<1ZVx2wrQSrkH;_0`pO%}R zK$;(8Q`Z>N^=Bqzm={8uWUg@!vkxGidQmZ7Np$^h7++wplsmD9uMdDjDN1LN5PlZanYwY;ohTGY&-QFdv@!S z|IU91vE0KAcy37fCbQ{5v<*1~?1V+eXOg&X47#58}(M<<#zT3!=^&Nc&_!yXGoW`%=a~(S6MMia+9oa4JNdfojAhw2!QH>#~dWt}hcRV0h+?M(42{S;?V6pD_{0QzZo z4)+tx$abL|9mvs!*`6_|i$5reU}t%D(o8JEcYJpArtB(q-#avdB9a^^DJU0xtL#Zr z>Ns9#1R~1d4ovD6A$f@tg*|+X=!UEKS;w-%?;as&AL}#jRTd6~xs${1r+l~44DOrt z4SZT7iED{NsOau6dbza;&(@lg|H^b+h~0Q8Moa@6}lU8sG)?m-hx zgolr6vEb4|l!yXo%`rc!meu5YpDNPdUImh#-&yW`rxn7=UL+1(AT&E}q@O|$<{qoT z%JN;JakHGo_r0vKt8O222x!u`eF`|&GmCN$&7+ALtp64N5R2WK#o6UwP_t~Up#Nzw z#!B0f%0xxhp<%xI36W3@SxLVZ6ZTav;?^n8VE2T}e7)uu#*Xn9UlCW}31>@hhJJ)p zf)QnmOQ%JF5p>o%OVI7k!1TrPG)cS}!o-tcd`hbNEJG7Nv%JzBDO&w4f>IhXP^M@{ z6Q3}~L?Rnf$F9M7<{gR{dw|P1UBNqhY(+%OJG3#6%t+%3*e`76%GW5tzhstJ>ijG? z4{F7eIt%J=;K{9ZOGBb|2O{?~xAq}Rs+`I^w(f7Zj91L9o-lS5N2R!;d0_yo;-d?wUgB%Gv1azmX!@JMqz+`;b|y zO_$kw!UMYO3jJ^KSb zvv4f6UP~yqO}xzwh`Pt6I6EWkY9O^Tr}F-t4s>mZ0_FKS;Df3q@~8e42R$81Cf7IM z1k1bIoeiY1EPJBo?8G=3`hv{+?T}P+qv>Z(;4R~MdbU=h+gXxGQ`2{yDK%#zB^RO3n^b6%9>Vsqh1 z?3`^$x09KpYxzu4I@ZDIRh(kq3oV+b9ZWZU+-St%Zf@Dm3f%JaLtUvYE%&Pi7afZa zWlw}Y<~kWRgmvF{DN^3}TfFpA=2F}2BfR~78r_#SqUeu5nVW;=j7cv)aYviZGp>e2 z;}22B=f7AN){ARmYvI}0Bs#|S%Z<#}yh=TZlkcBXF1f`6*^@6Juatd;9A0o^r597G ze*~%h)2F!!6>!s8C%Pf`1QX&Dg$chE@m0s1+BTGM&VGzTYqcBW^H$Phli?^gUdr_| zcqt<1kITc!`p8vhD`x`v(s%9#Tb%DzZ44GKbvMxJ1gweT7?w_DkQD3kzzly z?$nn6dQtEL|J{|QZ$lNSaOF~Z{DHZ4W1T2j=O6|Z>Y}b`GR~OzlB|6@XVmP)YmC~2 zz+3N-DH=rOyI9sJ)fjhM?{Qh$E>!9@4(juJP}8hT@ges`Q(bmq%E>moOsK}j`OFP9 z{Vxvcji*_SRur~r0X2*4Y2S!Nc*1;BKVrHdhd6ARWI)T*B53+zJ^E26OK*5~w>GK6 zGQpA`Z{tIs{#}BH>r&>Ue~-y7H?i)E3dJ!lrhIw<+dXaOJo5+BEg3`l^G*-9_D0cG zwg;&Qy@S5$^}Oq@tw>+MTn3X=N$UPaOrCK^bmPbx!M|aIXhDMoU-Tdu?^a*%(ms5N zdBytCpGU_bUu97o8m&S9{IW*M@U>clS=TyIogTFU8ZzOV0rh)GD6oX=HzhF7G`8V z#l4X2&@=O=4-Opljtb*TjQcR`O)oaIyYNSATM_+Hkz7<(3BUYSlYb>+z|Iuo3_p_l z=sJ#~W^yQ(Vp*JCV|p&30JT9=>6z>znp(xUEN4Gpby2dH=5JZLwr!hGH^Ch(J6&nM z^((HucMygT4n<;R1R`TBp!Bw$Tk@`hJKj>r%P{BM*CT!6S&a{IpG+uZ`bYek%vk>> z+sS+OGJ3mXk1%fL7Pw`0;UIG(i$N_mkzuIpy-PcTeKWs20!=eOgp@3O=U$xQ33|bhE2ol(>H*Ox2mU zn`JlmSZjE$pR@us536x(`CaD4cA}~0-=chX1b4w7!Y zuLM&So8_#T>Bzi-D*uk)`9|ZsLLmzO=nQj~jHP3G<@nV&`NX`a1X^c4{nxYGVc; z@;4a90sYvlT%YofcJTX;DA18}20~}{SL8b8V8^szS{+HuwOh+qod1eJ{r)0+pAmPw zibuAy0*&aG#fOF>RCtm(XMWQ&nDQ7=})UptbpNdHlylgvzJ+F zRB74<$@n_ahlEbnXCfi(=qA)=TacHJ0qWyJC_5t%+cjb+lV$WK7S(az>Lcjbk4}DB zLO0&GG>L|Br?HkCD5Xo8?WV11==?NFH}s|5M>h*`qZ`rOs70kK9iofGVkd;M$tokPHHSfS;1TJMBAF(XG z?|hAL6C-e2&vMgOeh{Ax(M67bBfe`5r0jri;&HK=C{B97Izt^OIpsp)+!sg;dC9d} zYtXd-#`j~KJcZId&>nFFk*go#?}hC+^V*u$iGwI*EbCC;l%n+Zv3Tyya&gS@EM^%N zvlA&;-WS7o?Kdz~>m42)>R|c7KIFw8!_v3v*dJp{-JzCb9K4DiC1!C?Y%Hi}PXR97 zzggkGU;~P!2Gcpl(jRer7bd^9MZfPR#Dpc>?Gs+ypGYM-H1Qsn&X^*aEz;C}B!YYM zej3GVfUY*RvbiAJg9gZRk)>YD8$JLVwyEIF_I-G(JO@v&2GH`zyBMOWOR3xC>4jb> zhN_8BAA3xkDdR=q>88+TUf3ar#?v(a8q9q@oJPHP#p@_;f|Ro{jb5<{;=$t>Pv|T< zo?OJYu3=OjavCXYf9<5y2g5lRA(^Iy`04q4F56}8F>2-S+3G-=@p|~!$p{&tL0QSB zH15SU#HS3QR%O;j;KDI=xjlu}Iu-()I3TW`6r_)bx5wZfJ zx)YGby4$vr8}WTxD3Si=D77;2r$gZZrRyH*MbP zL@rNw{*C@?B#62&_16~eYuQ7r_x%p*+(5o)(?WV;)P}85%*kix%+Hfu%wLvsfRh*V zc(Ivdm(g+zWHW;UD)XpBN{cz_>#?f!2sifoOLPr(6f%sMLt?KZnO*CLqC=r1e|8i4 ztfHyieHF_?9N}6UleuNj@AAI~NYko$3&n>9Uc$-)BH}kH(#_k}jH$Dgyp3Jxgz`*b zx9BN~9uJ^DqnFSqkNa@nOy)I=Dta@Uwc{Q&*w9dEbzI={zGp|+Ra+w3Wdz#?enF+d352bZM)wjq!M$msXx4wq zyky}wywq&wytZZ{aLg}^m-6M_TI}JPJ*JSA_De(@{fjL@0(ZySk2F#rai`<7p(r~I zPY$JEx;@K;PP&N0Bm2=x6}J0^fk1-dx)*{34IdR;oG|yS7V3qYto%* z0%L(bJ#m6N{$DfxzGa@Of$DU05IbXB@1Qiti26wu!SjF*4t~|5=Gfunxi*+qY}~}v zZp(n-nZGbHD-!>{`oiPn;%Itvq814zv1s>`r{wZXe6`GCJm~#=!=xmX53-=?ifn5B zS6Xn&(xjklU*T|uBHPd8VE!Y2;swTOylW@~vU|k}1|n5uV(#0UPGV%A99>+ZBsgX* zr1_C@AF zOD9ooa}Z;Hc+mU$D%`KM#qy(h&>rnbvA#O6UmwFy+LDPaMonlFOOdrC`wiKmhG8*b zB#~YzR{H9TTa~T&qaa5oF86ZrWj=WM`vrzAdB(b{YD6<7Np{U0F8`fAO_z$K6-w-^ zG~0%ys}I3p)+6-h@50C{ReZQ%7>%$|qN6KbVaRePxasAi@{2S#j;lGU|xPMzNh?YoxhjjE~v2Qf%w~k>xp(sUe%eiuHeEuZD ziXT{A_XlI;E4ZWfzT`d04wE!*bA#r`X zwdlug5jn@KK;!6#csDtej;Y>- z)ZE15^cplZx4=&}7cJH6PlRnXSV}iPtU}dv z#_exg!+ePIv48Y#A#wItq&?K6_Xl+;oaIet|J9_ygSz<|+gjWjdO{tqn?|Ybo-O`ROSfn9sR)6p zQTR%qYcF1nduJYE?lmo1{izxYSK2{OqMVa&3lqOoA-sG35?qJ^JyhMty*)4pTV3BH za?4YgRvD4x-yW>#GsNm0IyAWdY;y54rx^oMAX~{8AU7%z;*;W>QlAF zGhB&T1as+ZoLF;`+wnY@OrC8=TKZC2_a_g}KKDRswH9g2(4o7Y_pqW|8n>nlqLAgL z6jQa1RtJ}GXtJU;3tyq3Xb7L5avqMw&LnZ{E52QujFk;KkO(rP;{0sxPX0fU(PRZm z$u$sd;_9(sq&8{j?&8e<=Q`31pbeoC6n;~dn&nvA+B=Be>==jZLGqYhzXM5o!?2vO z9JiP>qw22?O$~Sqt3XSfO&E{)`%}c>pGT00HlzQ)FYr~EOw2oph1*;xt4UEP>PbgV ze-k=1g!zA0S|HK&Fq$Tvg`tA||L4nvpu0Gi-H77qBY3si8%3!jdF!d(q&RW}lXi>PJ1xdyQ{n+D9!8C^gBeFy_vDu8h){p8W z{phVNGiWX>=QK{0BCjS18B5uW&S3!->yCg&>IR`)aVFk1Ns`MOUE2SlmY=p)nYMS^ z3(tO>hA)2wLh=*}|LRIJTDR~4R-J4XEkVD`!nj>IU+{4!V|Dc?3stjMu`cp`nEmU+ zelK%grrVPL{vrtfHJ!pA=5_Q?V&_DH3pGxQpb*AkyEwlF)A~j5SE3)otutPDz_=Op zKZa7Vr302+3!}{j>4?mVrnyo=#VK^ZTSOt<6NUL_j-tBd9V}uu(%&4`DV$1FGvy09t(Y(UuM#;{FJ#`1U|2IY z!5rrG%osKd&mY`}=i_k{GSnL9Mig;3>k?q~O^lVtjcHLYkFBgzTdj14ou6*BIBg(c z_8I#uzCwA6Hgg}DqC4a##3S#aY=j)W)MI(4sk2GP+lpQ^$5Fst9cmdIhk=aA?Wgnt zKQ$uZ9yO3Y`^}_cCnb8iy$9Y!df2{uH*)zjao&gkx}LQUdi@rV%)fgWr__NLS7d4S zb|VVgbqa1?i@DQLt~7S96>XCmfM|tj^hB&hUjH!;&hKyhp4~a<4h1U?!(SbhXuFt$ijq|w4Yj0x9R$p$LWq+2p8AX}ytXnffg$xT1L-`Tw%Chd_ zzcUULm^70Pi)^?%dgmGEPnvH0vg318o`|28E~f8G9^nS#%J?nRq0x<5*x!YRWjDCI z9d%fDRF3L?&J|*PV`%83KWJ!c#dRACUV4oKc_)m)=?^OrzTAbbDX{a=z;>HQhh}~dE;(>A$+-N{8{5D$4{3WOE?G_r#({W?#V+?=okGhzADfd zb!|?KGa&1ii|I_OKc#1zfhKIlnFL#!-Rln3pKjcol2p1V_)=KN1Ei)#L5e~0WYk=R zgtHUrUDG4B`%7T&q>Fe|T8?v96{+N=0`U!E=84=S{KXGBFV~_a&yJ>? z&f*-dNQunj#!)PDpZAV7r`%mi!a$t>dSf<7ux~z$ix<1NjVc$oS-E3r)!UOe6wHyG zwjNE1H^s%p@o3(Bn17zAN<+5h0mBjO_mEfdI27Xwdm`L>q5+z{*-rz6Kaa5qGI=d7<}ae zS5|I9ktVE@8uSiQ#_}}wKpD_i0Ud+>G=n)|y0ee*YB}5amHmTZQlvqnhvtgM`A5^! zL3&=AHrHUTpUQ1~sYX^$H$fqM8~l9aD?7ibp>~lPDL$5@(7^e^gpqHdRv#^-vhT4| z{#?X6vGYhSh@M0XymiDUoN?5qT-P7G)`|Wk^Gchfh6V{0n(OG{%pZvU-Hg0$1MK{L zQyg8R2bZ+FkPMcfH*V&*uy-N(7j7b_CTZI3BcSooETQaJDrPpX5~N>ba>EDu({Gc> zBI7oHvif!acm9u~^A5=IecO1_(vYMnrIIER?Ru``ETc$@mQc!;m4wWO3Pss8C?s1% zp?dB+p-A}}C7UEOWGnPuzxRKkb>G)@p2zX|cx)!U=t-E+=Y-_ap|GImtL-)gh9Qn;d8+Q#{bnr*{&~Ls;WS~ zWCPn-Yb11;_JGmv{!rKKOP2HH=|)C4i}5eQNxcDN`>qftcY0HXp#}vPE@4TLs?^oB zfZB=Y`E#ej(S8>a-wozomc{tFE`n|j*+j#Sd2nyZ8>~2+hDl%cqq~u!sPib83U6IS zH=kuREhrxoI~!4K*qu@|zC+6MeBMheCCXpaDcs14G*<4Sr#Xx9WXBskbW|c5n$ApD z?POgGmXRO#PQ5&miN5+%7`_do(hObk=N!&_QrwQOuk3{Le;Z(*YDjMy=CiwpxZAJH zi&PZk$#~!p!ete4&9;ejY2!Dc#_BL~<;^hi-YT}B#e=4ixgHv@R}4O-ZAzA3a|C z0Ec2V$$Z&WtVlV7TJE|Sbm1YC4D2a>$^@a$C{vjH*O$I1k0nj_FnV{UcZE+@F$$f* zrgRzb{zne{PcFlb2${-{d+bo6)}0b_5Z^d9!r3Mhx?8;H@o7f8 z65k14rOz=~PJ>PT-tEy zoh;|26yZCZ2Oia6-nYkn!K}~&AZi~e5;;fcWysc-M$A0 zL!C)weIb7J;5_5iDx@Vhi5|t8(S{!@X-fnnC$oM~t;xsrzZT>Z+Y8N8$I}C&M7n!# z1TB?&jwdcdFmHY?-}?^}Uvk#cl~Yf#?B812KeZN*##LcHpWSBfP@|6q2XJ!CZecg? z1JC6%hM0*}*xwmVL-#1~vr%vQrF%dcSi;x;TwW+8+1Yb*2*~+zLg6-|G!pSkS zkXR9p6@?*^It%WVF>|0und2-+ZYV1{IEA|R?nB9f9!VZJi08-o(gXcq@vQp^L=Mry zFGMq!vy;j3=Xr#j9!b}IRA}Ir4{V3tE|g^55PY39Xu*9mY^v9$v_^ZX5I-ZUl<(3$ z1kv`>Zjko(r8zxrK!1M@Zd`wY(mn35>~f=~^B?i&=PT$|6;vcIy$Z8Zobfe{0xLd>V5YF7Z&GUXd*OK-d9!X|own$Of zWEGEaeeTRG&k*MQkVF1+M%gbKk+H{`jr^`kG8753yStG+_@}t=v?+#H_2u^^1)8yK zy4cZ&JH}_t6qTYLCPC+`{04_U>{PmQ7s?ejH#ZqJVY2j;I|&x1a}UpzUeq?{8uKYIp~Y9i>GMr< zdZgrq-j?yW#GTP%_C(BR2oV01FQkA%1Mkqz~s}v@EBn zsuGU-zJnutzV~BSZz@@G674ZR*f&2-Dx1RhDn*W%a&aKuCTkl1vCWxgHS@+S+k zJ%`Yn>^$7xsZ8U3d6H-6N!;pmB@gbU?>6lO(mcn|sQH($&pHvV`nAxVY=O0tTZ}Xt^0aAAXBF`)omT1<%#b;rW2+Lqy4gZ-`qJAkIElhWDc+Sj@X@ms^5qpGF_i zY1u0{#p{#SpM^rpD-}xU_yxyvGsJb%meGs8-!QjL#8dTDNwVM8ia$o-&>K;Mikq^u zm-WV%ep6|dbOjxHCr4@3QWzf{FF0;Jjf*yG#EvUU!px6u6ur8VY4-J{wl#5>XO}>s zEu~Ds%ZYWV_|SdX!@>&t|6rG~S$Z$289!&*P;%pUq#x)D<7#v#R$DRmKI+AhF9xk$h2E4YSog+DX|beB1~w&zc_4O_l^CvkH?9Mv2>Ap z7j*d`e zs^!n>IWql7cB&$&nP~96Q$NyCm`?3O9BA~&IdEB&1CP_%BwB>xo-rtEJMT=$S&>;z z2R`8wrqDVp@^ z+7i-C{KZb@451yzD)8g|bYZ~;3#>Euq^bn&beP0vj7c3#?}R!@zWEB*#&@zqx|QhD z<}BPu5FGGOfr>|&5Z%{;`hDDwNmZTLyVr+eimDJR<3z6#gG9%v z1<1S@NXc6ZU_NdOO61FV2POntO&Hbne}?BL@4|RQAyiLXM$==?vR?8;@HFLo*8B{i zYWWoQX*&0+sJr6Jv;mZT$da_iR3P=K4t*WGAHBRwq1UWTssJB&X*jwNris2sf#nu zyujk7SSzXf0|6Ear~k zUpQ=Cj`L|GRE^B6@NnLQ$n}j_5X?EGi};-UeH5)}TuWx&D%2)>7-@Nlf<@7GOqm!d z9{VjX+4R4=&>cmr^$Vu<`b_+8UP(Crj7>jpAh}t7M>=8F6QLvZAOV?EE`PS__J)Y=QozhBjhrl+^FY+aqv@nDH;Cd0(J+;(TRmNG`M&Zn#0br2_qS6 z=4_rN#`oE1p7j_R{u_q}*{~+fP55TffzFZ~JmJi%Ze7Z>+9ZTBb`2)gw+rahH@^Rk z9EAm66VW6qOW!uT;VA!m<*R0Mr%``0SAPq6B|98TJc@~zYlP(|_`lJ=9KCen$ej1& zB5G<7Y2K4Q#A}e>6z-aMc!&uNedwa6It485jo72}IEz+?Y=>ULnp%mFTbYW0G2Zl9 zSDTdbr$KJ%D>mlcMDb?9aQ5-ZKGs^WA3^D{?i*#VL#fh*3qoz&O<$P8JHn4D9=wA0 zBnUS|x&y0_NU`1vp5 zIx4YByAaI21ar6#%50aa__D!-oYM@c;>cNcYezj;{#|y^(vrMC+fkv}8>p8V)4v-B z@o#=BEr(uAl8214z@vGu`}Tr=AS<$sa?Q(t+}%iy7~m>o#lDwr4E=h zE|kVCT1Ux+Ds(ra3BND*c003%XPo^b#9bHi+`ndxqP+iBvhY~rimbQ^sF#jLFL@0# z1P5}*N;vDKQ!Z_gxsHsge!_f<=a?onpe=D<@hrMOgvLl%{KF;j;Xxq3K$Gmo z?SN``DLxiXqG(H5I(_Fistda?u}3^r)o#SQohB61u?E`{obWL*1zJP>$U9pDNz!b| z!_o$f&uhi97e-_ikphQvl@+Fk1Ie~v2~#~{00*@o%zc=R z=|^V6&&K4PWmrB=nQ zc-e0tExw>cz1{e;HhwycJW47W<+;mudlJvWb>ZcDB2}+)7>u>2oKXYG%Htq%=h)Jh zzlX6xGYroZD={@R1jrjij+^W8Ip79P%qYUE_GcLNzdh2LcES0UJ^4-%g}Y;u*{9~S z@JlvD_Bk^;sXL4|g+9T{OGfma&)$S%TQJSfkfh7GtLN)sw%GA5dpY|E{1*1))61U~ zD>sdz&HpJQRO3AUD;bQgU;U`~&q>6KN%(N}p*ZQ|T->|Zo7yfY)9>JJ;pMUv35uY0*KDokwrbhn8zc1yO zSXNf?;B{j?{vtqtUtOU8mDiqf9KTLdXBQXwW$A?>~qNf_? zC3AmeM0gjg4x2&QcfH}+9z%OI>|wDcRbs3+nIy4E!VC9qbn{q>^ou5Eg+&ddJCizK zr)Ws#IqBrSZW@J}9uXyDcH@I@ce;N#1}?lOGqBx{9v}UOCr*lFCZ|N^@v|tzB^e!$ zhSPk`Bg^33zV$~YL%xCYPyQTbl(S37^G(K5`P=vts6|l|D{y~G1;dD3mK49CB6HDv zOd0$JGT?V$`K`>wnfJl3wW49;f7sM%Nhi$zpjrBzmBt%U@{&pPVd5Yf@p>z4x;V$- zw*pl)L}O9HV3G-*P47GnXq~PyeH@*PZ+utX@olqoK7URu9(4eVR1;}U?qyUbH{nf7 z4@!8fK=SvVqAF4eF)s(uW6n_7;lGNu8y;m5=S=B&=wo;_?5(&n$Q8xwT46e^WEH!4Ei4u)YCLr+~tF4Fftb`RMHTeXhNx1nwfhZ_r;e5)4}O)knJc>`q~m9 zC()0lcHEMNAAi7_Zw$>ztwdVUVDcR8D%#km!?Vnl7WO*`wfW&V7hb}hPaDu@gA<*(d_P*Y{cIPT-8hYAFHXcJnI+`3o}b5cW=Pil zJSCZubY8F+B}YSbcL?%^XW`qEcit{NMRc$(P0>oG-uEJCYo7~ZQQB(Ey{bf)WfSTC z>^q1m;2e+r?NBt5q45he>E7cQ+Ie;}dI&C*k&*`wy@A;1XaNVFa~dtQvQZxkrPnqs zN14h!jOAWgHnALu=XKz^J%o*Hb)n|SV7%?dp zJ|#lPdqH$Hf#;ig#nAeuNBA|f8l7L1NrQ9qQof(WhPoN-C-&U&{Ls)u-ca6XMQ1(I@n%|X-@1C0MDuTsJ^=qU*{*wsZ&K^UzMWF9I9m6@Qm|bs?;sT=y&ck*us)j{-&_Af2F{o z+id<4U1F6^^l8sK#1FQl(L2{ec~K%Bw{jLvKIi&7&Y>d*_$7P-FTw$M^&NIm4u#n*Oq zc$%Ds!&cr@SK>q!ogTs*RT+A3^A*P$W(zL!eQ5gmzDC%NKQ$Zs9Y;(YmxxHVg>=wZ(JQFbJK z+RnCT4nb zTc=>%sYDEe3+L%HNeV{o5~TKrAY)OF^jICLe0vDzOQP5eK9@D`=)ttMtVf4qCz|WD z=;8iK7VD6S=bDX}v*tf6Q8A!?Z+lXnw+>P!nD8@a2>sk_Kr_oYhc@&Wq>cZuAY=`O zA6zMf^Si-p?sTbHr9k@KXW~WvRb=;>NjHR=e!RiI_B{0m<+F@$AQF z@~JLI-*HYfW3ny9?AeP$M*RQ$vk*8k8`Y-Q;nnJjCsR!6<%#zw#ub?Ts6oB`D`@)M z#+UHlLiBS-Dt1*7r@2Kkx$f7Xtjzg8`X;2e*^b7}eu$?P2DCr$0BUA#gY7&+^3xth z9&^7)n%sXdFY6-+&*@E%SJg?wIs2jVmoi544*iZJ!=TT*Lw6raaY}w3s&#*f?V-W= zo~usHX$sU4qa`|D?m-@(hlxAyKfpTc2rNF#{f`(!MOTLj%I{^UV|;hA{PCbd*r!B; zetbvDm2sl{$(0oN@jFiLx{Z*tUnCDQE>_(9Jn;W#y;nG!py5t`oNt)HxfqcYV*MLl zUUi76Hx_C=USQzIB(bKy6&?f_Q$b%n_@&RLhjE;dcVi_fuIr7aO|6pWnmj9zoi1GB z`(nS^z0$>#FTiQBHO*WoM`r!>Xz<-Ma{0skG6PPCea~^PgE>Ddm}a8(%03)u@+GU` zA8}^BI&E31Mt$_>(ul9iQT&E;sg`f#e%I%0Y`=+^_k}3G`$@JXq)-^hJG4RVkI~qT z_e~WF;8&N!)W;dIqUyoy*V*~-@Oq2&V+WAL<`~n6+>JNkkI{Di8q|HPNZy@$cU=e( zdAjtFR~U__iAjF`dQP2nIBLt11=6rC{&nGuVFhxOB*_ ziR30zfZMNA>C#r7^Am4E?ZP)W^Lcjeg<3co8^f)P?|Xh|k#Fic8gk?q%i&p$`^)nEWZ$|PnE5;lRH~1oZpVvZ^=%+MF;JlZ zZBq<&az*n~6Tv>sikdF$L};c8jXX1!wzx{s_+S{tyK0LmL)OBs+LqR3@bgdebOe4Y zf#HeqV^Kii0Y9pT--KBKrN_5%x< zUXC3GlQGeA5a(GvK>YehZ0&j@`u$sk+_l|lALqZ-=iL_Uv}I`CAwO}zr|T$~n}VI& z$J2lm4+<$A;odEW&-Nfofq(T`hM6V_ms?RdOJ6*j$oGw%zwlG~852}$B&#RQbax%X zQH2vO;i0_-{XU`t{g*M+wt5=v2v?+G^Ga}g$m)vH*9~|vV~RNJs4~2=4QNfkO=kCG z7Oyw+_lE8&>hswU>1qWMP0f#zJp*nEI{IB`^xrA$Yc9h2IvW~!i93pUzutCnGA&*@ zotoOp#J_76qh&%*nzuWZCjNJk^MOZ`eBcXIzLq6VbuG%h6H62RtVeN(C;5!rh6z3R zyT88ZBXporVsZO&M04PZ>R(hII9#$W5`BWU&KHb3dczOCaWF zS&*$=G%3&F&Oz>-HJM_KFiinLnY)E`o)Rj3dj^X_`TYNF4QTvT6!nrJ$r&wL{5%_V zFHW%#?IBdaIXid8ctFZ~IY;W%C?~lJL0;ZM_O4B^|KLIQj`Spd&Ql8bQ56 z>QOgjBndCH_;xRo5=KwwfdD<4wo}48o@>-UZyu8L)Ae(JoU7kHQudKP$%V zmxIJb+Kx2hyFM+jdL&s>dIc+IJYbg|YST6D9{#tq1?x?%Xw;3ZIA)uUrA3B(r#gT- zR|V3d+x9pwIfr9bn$%ITTY7(Hcj@w;E6JHZgHF!1!Ra>c%Km;7r5>yC=xDR3Fpc-u z()6hK8t2x$mJ_SWbm^Fyoj6DKD-N3k;pAg~D)Qnr8~1I(v#%fd9pW9{jd~}|9fN2u`U=E=ublJh1iJ%_HaO=INxP}yXP`3rj=U&|3$e{%YbZ*t+${adb?TL zD>-!29!v58e4i7mNB`vVn6|1Ht(9FRqJZBQ6243HQmfhPzCjebaxXuN@XzVFJT*4S zBY9gqzbBasg93Hwj9oNRWd35%>Cq%JndbL2x~v>l zV-!(xbOgOP-Uh|=5`0k3M5$UKjF}AOd|E9w8}%V~1v~L=KRG@xdkA`M&3lbDv{u)S zN`}?J`?n5VEk6NWkCRAi*Q2o}gUPt|G@BlInYsMCh4=ks$m!1|X<~i=ZCqQ+4j;RT zrN3`VH}U;}jpt!pP~n{(%MNka_H>l;v*m+xD!i}VQ?%zZ;0eZ#Vwn3`tbW%EK4->| zszo3TQn5Q5imsYhNY38EuY6~^TE#OR54b~T_I5H_wUUnf zDiVLTcp{~-7eyIn;_i-2oRf8@Ld7TWTBSqFhrY#)m2*j*pOGHLn9#ldF$gx8VBCxa zP)!OYZ66=ruaw~(*2~bjB1w1+)^~c+vA#E$ zPrr1Wl>G#abI0-V&~RFNO_3%vzhsMT!gA0h!;P?C%gIN)#3f zYUX>{P1k+6*R@4(__h^o&3d%ldo3HpJEBFm+{uu4e_Kylkbc*GcI@L=y7!xjh9&1= zZnH_^KhKvX6igxUZXy0P^D&uj57MvIgtpd9%vsuAk-kQa(6t0xHh#dQ!2$H`Jf9OD zGNC8R_a&EJ#v^`LIITZ_2q!k>ple?tX6X*Wx|iJb;Qs-dqwb)S?-4Kmx`6j1ROxum zHu1>ZKGZy-o0$AbU>oh*@O@MO=GdE)@e5O$q{aIJ4*f{?NiniawxS2`7H3_GuF5Sc7_ zrbW_cKe6U*EYDS@A?COhbqz|z7Yh~q-QW&A&UXE<&>CasK4;?G!+6be-lm@&=vUKb zbQPXvMpGBFtk=)QQ(D8T3~2J zCjZ6JA@2WiTNnqw%xp|`;tcEv2^zS2JAZEsEzRspt#3ZT+1mlMT!cq_tFTbRmrmN` zpvZ7FWjQ|Md9?}@G-=XWe>uwIdDZj2RqW?u9kS!Ts(>fU@wI9N{ryjl97nfcae9bg z(l!I?-6v5;m;rhCEW!^L9~S82FD_mp$J_$OGlPV!D9gDoskgp>trsk5{V2v9RjZj{ za4_BHOoLhco^fsGFJ`amM>WNnV&~dQ&I62MSBJ}C(v=A6`)V`pa#rxd@0}>B%wq3Y zE{rD>NN>v;P+`C@81~~?kbk3T`oLzmWonSzbsy?*^M|$m1e#i0gNs8}!M(K>^*Ppf zqUJzvJ$@l-#YG(9_x45ai*Q`+Js$6l5f5E4q5rOE^WUFRmjCM}^qsrGvBa3Xn;oda zx)~P-51_jH2e6Yk=dZ?$I?wW+#lJ8*cJw9_-+zFkrV{O5ttJ@uvtXYdEuzNqyLfPJ zt6&&pM?)uW#jZOCu_)}dcquv+6SB-`dC#7-bGVh5>DG-tjUFVvm|llpzNYBQ`62O1 zVf0{wf!H4Z2t&`vP{GQjf?19{`SjBzyLF4i+(#>@@76D{efbTgdD&u5PRdi{qKGnFqu(c_pl*lK0)5G9>Xy87#Xh z0S9i+#N5Xw&@t9vHuH8$n-ooi&wR!g=4T>RmA!-8t8A&YqZVN{!)TX!Hffr!pdOk3 zi3|D_%#8x zn`Y4>vf?h&Mt1S|O)TE?1|50UG$btrsecDaV-|#xj>OpA|A-8hYwJ?va}BC&Qbyd* ziI^kz6v<9a`1GeYeX>&^2V+Mz`mH|Acbv&{YWnn7aXnsH6~S0rjcWh#Ib4;R7+JWS z=SNH_^2jgrY>vU=iv^gk_R9T=hZ~)?+m6tGE9nOJW`(`xo%yE&2TNj)R%hnceN75|9ioLWqhzG*SLJbO#Xd%=!u(M?eOk1md$!IqkongD;oAV(m<~kHu-fa^ePn5M`IwZFgyrBgS+0g7Kc(oJD4cNBE@P#|xe9Ja&gJ`z)&^F5ZU z_;KMPepYBha!Wn3jZ&nGbqWN%)W1xzc0cZI=}S+~kHA2a08;atNmojgNU=Ac4Zqtj z9r)}wtQKw&^KUOf+AZD*KdK5>{^#=TX2IV!jZ7Q@aK+c0DfXC3H?Nt9-8d&^=FSq~ zzv(5MXKq7><>cw<8B1EcJCpj>gwm6mePZ&Lwb<<-M`L$QhuXD#{1@azzy5TgLsge1 zwX0A+C(cRHnSrHI)>M6J4=Nsw#&5F?IGQ<<9`^sq)le3bH5%F9&_)mZ3WWX z?z53g|6~1Kw+Kz+hGT90U-S+(qUclwsCO>I>>IB!AnGo99yX?2m39=@?qy#S^l9(x z2-+5CL7g%gNFUAf=lo83&vHE~Qu~wB;$`$yc^Fv^e1n&Fc!o0P5N>7Nmn=>4q6Xdr zTvwGy2RE$2@5Z;V^p~X(n!1#B>j55L?St98OMZ&=rCZ6d^d^53v)R^32ft=eshP`b)%60}-LdU|AIUl>l85cTqnZ+|hO<(%N zz1uEBKV$EHMN$a06_%HKQ}0_YZ0xus)~W0RAE@9(T`)~_y^izK&B%1O9GU3pLe+K< z5_gT{cQd~Ka^`*4;@|LmGKx-Blpwo!7=53`fA`*PgyC(F7r&Qo`Memug;K0+y@0^S z!^vn`8{}r+#>x${6gl%D3Lc)sO`}f1ue$>s@;xNHDU`5JtN}CmzKZb-ul#Tu+Rw`1 z_i+G~U%HO+pe=Y-!?{>D>?l|}n0^$^Wm%Owk)EVU3%UF7h~Hq^@*$fks&aSE4+;Ki z4yMC1w(;I%Gi!-DC{8J#i#6O26u3u|g6v*W*gppN5a|`v`<$MgEN*<=GP~X+xQ+rzj&$hng4{{4}LQ_&irHmw&Ky&X{qyn1+my z6iROl!80`(=5X=4MBc8uxV-%fGE>`ySl3d{CLB(hmY=ccqbXHv-bkx`Ceh=qC8F%& zD9k?b6L4Nd*?-DVcZ$(0ey+8YQ6{seJ!!PX9P$sc<-@6g}1COTCqN^2m$) zmtJHEdVVbPLmbwR5|CP9LXQWR!Q!+5&U7R(xx04M{B{tM2KOXW-s9A*JI&5ShvUtN z4|t#{f1cISmwX3ol9(PmLjmog3IScOzWeJPv1E zJn`;vC>=VPg}@KG6n2k)O$U^)yT_7|o{=H7{Ax?b>MSwgmI6g?^rb#|e1EjTg#x38 z)2xESu<@~`Z$4+?G$s@->#t$+qAj=`WkM6B-AQ(5H3DjyFkx*g&W$a^ICT{KRl+_@x7v(*HAZyBr9bsaok)WluCPHD zc4I+nC#1P;?juw)gw@V5^yFbSE;Op5_^~1NYut_Gnf>uw_O@7edJ|-}>yeSo3vm-r)_3gJg zGj#wRH&CQJH*4;b%%oWpqsXf~MRe%30K=NTVQ&ha z6Y_n9apUKLn!6Ck&sY`fs@XlA1t>h*fbV~+aGq!T?!Nws=@n#JCfeSlZe|Bj*&_Kq0}=E&dgj|XGECk z_9x3fN3e1b&!fok_t+g5lIgjNDfdxf{o}UdT#GVyhG?<1=5C}=vWwZrl_SDD7?~e< z*6nmYR4%$Bclcp(i_#QK&EPEgh3X_ID~gXk_n>j_y~WR~ui;?l4!nODLeFA}GS2vd6Yl!{>>`rY68dfM9;JjzEux58}_%t5E_G5Z< zJKP2@eg)FFnbSydr7Bt3JVNBBo6@%vU!wTxZc+R75^PT5TigZ*5DA5ilO4}+)Uj2 z+J%)7>ri5F0%t}HBm2m|yj!bGdzQ=6nlW>!M-`t_Z+D~xYtnFMjvI`PQy`b;MPbtJ z_-!+VJ#}1-PX7knGVeu4g0EpfsXR_s{bO-afr8coEex^vjXEO(8u+}L&28O?Vu$C@ znOuW!KlMnTy+mU|54dFZr~i&mp%>Q&@qJP%Tm~PU0OJ&5D$ zK_-P7ZjNQ;Gt_C{iMyDasKwGl+;Qxj6K%W-n1SybzG4`^>t&nr^WH04dh*|Ep&>UN zEvwzA&71Q|>_?%$e;IQj@Gh1e~10xpe z(d8|EB->^~R~~L)^_=@Y*f$Ars@8OGX*Plebi>acN5t{+5pWqTM>A7m;b{Nv zls7F-oZ)pAudfv%t1OIGY_q4!!J<%+`-aaH<)|xf9;>tIPWxB9$I=s>LWgTC^)|1; z9QiMhTUH`vsvCsWe6Btw@G3@nX>vbRA4pb5(p0(WG<4}f} z&@vzO5__`Pua8=fAkvz)ALb`kQTN|7P$`aKTTdFYZB2&a@$o-#*Zra}&-M~@`wbzj zn?G^$mpL&pg`STJq1(fQ#Y3D=+jdx)_76;=YmFlIzLd}p_pi`-@eBWA6{#~Tnu7P} zLV930x&A0XbhPuxD~Ky3bF}j}0Q--$N*G%{=;atuJTR&&P_A zJ-ED+@6Qv<&`P>A9W&@5&q%cIlqDao8&zMw6S1!gg zRP;11W=5%wVqEf9>0KdD8npZh4oYPtcebBJ)ocTjJ?JU%>KM#g`28uBJKgW{Jbs={ z5~~Xcq-V(wS;aVW#Gaprtvb^Y%Ng_AHXMR1pDmdTmnW^4E7-CLoD1}Kll0AKd(w&Y z|9^k=<*!po?%)mVRduJ)#qQLPXZz*+E$E+L30&1AXn%VTP0gt=vNWKHb7iQ!=p-r> zjzU-QGDdgfy_BAUaLvb!@@ANj;f~eJXU%=Aw>!f+>~%?Rj|WkYTgYnDruP~|Vr&^Y4XSl4Q*o6=CW|&K)l}g>L_*K-pN6nxApT;<>RH zDqA5YuHffIC4RRuRi$1}ZV9QUw8_cKO&qni5!8MF`dvOWZXb7pZ!;5Ty?zI|pl;N0 zQ)FeAk3Z;rM!M`bKhzvSTWb2FM0DUZL;Cerh+ zC~|Y}P3J>CqPMqHqW`lUXD1&JrH3*BzM~JG`;QH~wm=e|m&Efs8_~MkjdvL@u+@9J z*e7Qr@qZ4p7{?50mh&UnC>T+}AvxOJ+ky(GZY7&vGpWb1U@<8o32X1Xgt~4d-)U_} z(;z3xdZ|LOv1$|=p-j>n)2MHqE1vflLF!!IyAe!=4&K zOz6#vCRUs`kY1@Rq}4`dWNbDL@+bG9Z?HDCmPX)d)z=ESoyk-&bQGPD=UMjPE$F6p z0IMRE!~yZ+iTyi(w`-Tsg4z2KTy+^M6=bONgepm2ok!SmZ?>FgSX`r3>EzziaCm5f zUTtqt7phOrqm+f=#SZAXGn~5a$dbXtEx2_#mbIK4DP{%k5Mt)r;@0*oSeAK7Ds7J9 z2j{OhwXa}z3-j3NWxW5N`xAA;hts>8uchTtzI@gaB6jK@hSGVF-T%Iysp@!9?V=1! z4je{nhI0P%4OeVBn~5f&Rr>an61}YjQg$iOf}viN&s`|d1xB>9V>Efs3c{gYk<^iX z21>VcvEgt9KEjW{AT2z}`Fa3y-MQC>XQjdxm}Sc&?# z|F@4+!qDF%D67Vs4&Ut%{<)v$+&O;tdlx2nS!qy^t}4BfFtK9aa&i&*+5`{<|SRJRTz-y@w{x+6KQ%MvwmznR2Ow$zR&Q)@sUOAOF5S z=kFM2wbRV0=>V+4lxg#bv-oeP2kmejMEz8HVFvFGydE}}OkVPHY~5mf=g*%_AqG^! zcU?=1CenlBDYWYF7?OMZ4}P-_qLt_4Oyn;MH8qo{Xx3>Y_%EXV++%pfpc((E^0R#Q zUv#ue;XJ>I6$*y5w2Zs_dTpl-2mG4?> z`}L$9M-GV_g5L3G-4`L;@+lk9`vq>k@`McM!TlV_S#AE;;a{y!i*Xs>ALe63|H0(A z!IJ)}In(z;Zi1|07EW)sple5mQbqP83Qo^tD^E8fJ|cz5%rvLi>PsMnOq@OON36Dr zL^gLdm!xP@!pagMd;VwCtAvW{RCveCRf2u>qsZWoJINeq7nJPz{-Q;TYRr06h_5(1 zzEXo6#&;LHJI2x1N7teL{TULi^pM)?mY_Xn81e!iWAMRl^oaAXx+TTZ;pjx#+`StG z*p~3#)dbOKwSS}!KNyv@!8526PF581H5KziQ12ery+WKlRBn&Ygb=U%9aEB#jq z-<)buc0-f|wsCh3KPNBD$i?HE4pf()L5XV<>4A2ssLxqKE$=z&zuzwU$e-QrdW_V@ z7r<(c7CDBs;K%s&B)2dC_skS&UiC_hHr2(f%Yxcn-(soORHoLl1A}b;aURD$3^g4>DpLp2zynk1 z@Uh`^dD$8&3^StP8G-2ZJBq4_hIDcJVAO@VlR-fO?LX;H`i5T+b~F-QGqQM(L|wd5 z?Mc0LFW_VEI7(vW_`|y*6A#E!9`C=3$bsz42Y^6)Qp4ml{^#XdbF z8b5zwd0NdZ7yc~7CEH-oj|YNHABa_8!cJsjNl%3an-*!b$2~0hLx(&p9{9+X!?-d8+8q>OWv?; zyc4W&Ybffpx3w3C_IU-%_txSBuV&PQ zD&zav@pM<0=Shr*iaIkcfVZp3$Fo?-m+4Eg*Y#=B(IC-z|1wILz&++y{~&9TJzC^J z>qRe2{Z@|ql}co=d>WPyoKBhi8U6E(4Bc*g1E2q^=sf(fdfzxs$P7`T$jlaslJT6+ zrIe8oDybADMM+yjDWpM4q$ouh4W;3mhUc6lEk&t>RMMoqi0F6!{sS-1bIx;L_x1U_ z-_xG+a?HvC%iqtR8m4vVV7Q5OA8^q|s_mr;8n&RFpS zyBQ;Dz{2y;=vkz3FSl!qS0@tJ`%(WnmB z{WWQ+{yiv4EhqB^FZNDVr+0(m;NUU?XJ(H_oDXA@_84LHI8%7s&V|$YpP1UKPm3<@ zz$d>^qUL)usHIzri`r<8dIK|(&-{u4uYTzJmxfLWC3@^}6tmBmkQo)E5)CD9#bK`!No(l@GRb<1%-UoOPpCwnt%-1?c?$Ku zEk^mJB@~c!hB3M?;aQ0s9o#;EvJ0v)=pTUj{Iox;=n)m(^j3MU=qO&X^%G0McA`gTfC_DU@@ zc1A(_l!DkwRfB#?&Ot_#0` zmN()1kb$(}2+Ko-mtlMI5ZdaZOIgv^`L%L^E*5!tSkGqS-T{^Te~A&SPj`qrW>x{~ zh?CrBo8gqrr^Cx~C%h}At|>2y$LfPxuIHN=G~H1zLA@i*qNu9Z+F(NtmC;ZZ}zk@j?fs<&8l12@f+m0WDnA+N6?^|yXk6T96j#b zCCo2!!+}c*G;`7ccwc7u#|m${-gO;^JPk=&x*iqn3FNYWE-Yscr;8ck$jg_*E1S{i zWAjP#1vR`@vBL6A88CnI8=@};RHmMeKpz!O?Y$Sbee!8;>KzUI-lR=lZnCsQ<1?o` zEf;oa>>e?Ixkg(VoAsd%t!|TtS`+IhuUtrp9~`MVV-?a?T*997!zop58$ReVKD>Ss zNsVAEv&EmVbVx1|)?J48p|8C9`x#VbaRK{ZEM(tb_RO$u!Iuw=RiV~{@g2927yFTW zG+&!?Y6j4hj#M(-(8_(P_=u5{*{ssOuOhC}2`VwO$aiHwDoxyuG1?iN$0;4Usrs4U z)2#~s*M)G-TEh>VybJn|htkV?$(;P9DRl7HM6ye4N5kiFWb81JPjLyPLD^P9Ps=fE znf#tBF)tHM`aXeHWgUU@SfY6gBq{GAhvBbX;rB1dC0L#@dUO3y(DfIx2>~?KPl_(P zXpxevH2*7P4OV-H(BF_mD9+e$dSUF=M>&f^rF0A*ncbNRg2lVS~9k;o;gjOZ%MKjgXnO=8m_<09X!r@%#AHFCu7}WtkT{ARiAG{m~<-Q1Qm*A z+4#h|QvPY>J3KoxN~qAOLPV?!F134*`*<5_=q}{vjJuC0ZRW5&`%9EEc?ivEW{yy0 zb-_k`0|ozlipU$UF<_${Mns=yoh zixxi}Py(&dlj#0{Ol-3pMZ3~cNaIgDIph@x*5#pi*-wfzhU_HS;B6RpPDIxRGnS*K zF5NxdhAYMY4|GjwR9hM59dUw{`%XVYwr> z#~Uas)rL?ymw77+@pqRd-HUX<^&^hd8n%I!-ePlN{wplyXCSqp4DW0m1<}HZRNHYC z8mpGGJWCDM>Q*B2rUKpDq(s?~g;vtW79fioVgB}Ani5s!}XKoZ-RuCB;;p51g6D^?rQxMUr!Yjz-~ z{&E_56v)xuh5B^*@DSc?%}hG5V6otH^#*HtW^>)gJUE%i>Evz}hxi~zl56^g-Lf^@ zkY_1K8$XCESfNGX`Q}hzUF8@4ZuH~+cU;$(qmB+=iY=NB6Ez>2@%5KUmXtL z9t8hI0gRz7P1yAuW5>_L&G=fh57wqZdijFkm40LqVM8TuT39#w8Vc)pF7~-Ph0PyB zx8`;son;UoOIG5zMhz||X_N3%l@529lmCnmt~#w04h>r5d_+wkMaENSv-joF8peg! zWicc)NU$=&_CGvzt zts}7JGrJpbFT}68xo8dyqyYbCu&>afM_U?Tki@*AKb#;pYAADCZwB|n1*&DvnB)~k zTjC7Srl^bWiDghr`i}M@eVW^vg_`KA;sX<>(=-on?wyGjW1?zP-4Mo<^?5F8n#?*C z;s%U)e-kmm2Bfs&C%%RFaSqD{(FUh+bfkp2UYrt;xcw3i4|$2Ope=At?BcZ)!>G1f zfmSHJz-#FVkWq<(Oj`;!?w2Di9k&PBhS4;@tqOZm+d$2oSouMbMpooNE4Yhgiwr2G z&W5fP+8~qpiKGwfke)$4a-N*8_!=LLpaq6xU-S`od(Xpe#uIL*pdobT7jgHN_>0xv{t#=ZXX_;IjhruXvK#+aKs@U>+>x9FcYsVe{Y-DAe#pX4o#c zjR~MP?0ML_L5&o9fM zD|(QnU?aYu??&NID!I`OO^CP`jCyS)s#o6$so!g1EIC@3d?*l~$_=SIPmY~aKJ$(% zq$$X3{(iW2|*y!0P+F;y?A8nRG z{m*zTdNrIbyt6`5;AARD+=2^%E2%X*4Th1K+_@7Wbb9;?{+s)AC_8v@j}Tk>q{sD>J2_;Vmw$dfQOCZmTt9Av@!&}42mRqrAGf2lxNPRjh@~?{bu4@Q z3(0Y9xRfqIxn=3-OkwwcLyReLodNwYSWoH7%eZh2WAbHpVIy~Ci$3M;#%xzZN>6

E`A>seQw}y3QWG>*%j~)z< zc#kpcuJ!bs$6V2|eBQpphQe;O@+rs78RM`XT?v~}bx)rVXhzGHmLe!~1of%xL!-i0 z^t#$oUY|W3ikU*MM}82Mq%4NiNhOkr_oGG!C$bv)lB+cqWAeCZ2p-B@KjC|X0I7f*zLf(j*Zf)hq{#>C*~!>^-P4Onp8ANTam1;J+AoqlZAUO%qv$?I@@2~pP|or%89tf zF)#Sc!R`27t|4xodIygE*87_G&D-ZC43U%2QB(u{B2X8SiO!R43mukYV{VqDM5}jP;hg#_pZlq*BrXP3# zRdp>g+|`WYt>e*cwTatk4E zg8Zpr8{^(EU&P`SY9wLji(UJ`Nj8Jomu~C~+Hg&k z%KzJgo!cu#6SWBAS@wGMFe&;fUCg@B-?>RI)C9GX3!<0<1Nf}^FZk52mMaZP!2#!X zEYV%dy`OuG`*~_IrO3X+g*WW)f4+c|W!y{djUt`~iqQX^2$4tUq2HS+^kmcn7+=+- zuXB5lJxI)*IG+xkZdd-}Qb)3HR)O8bw^(#1m~qFWJ82>k;nPvSutC z?SaqpG%5YCGv#aF!Prc7vTi;Dr|_w0TCYcKjRtg}Gms8#-O6n*&Vb*bUyQ}##apEf zuDG@)ih>S5!J5EOOq=-!r}LLXSvM2&mlz2`-d6l#=cc=zjA!unG2fEPdc!;C^AZ6P zv~y+}e5BkN7iuCkzH}6R@A!(yX;QQ@BZ_;rq7Z=}y09~@g_kH;PoXsuw7ZaHLseY3 zU9IM{MbLs&=~hhf(xsu#XTcy^gZyTOk}2bV>ULJ3*zkPC4~-7AT%0AG`@lM>R)Z)+ zsXxU2C&b?zlkm58F7A1)VtK?y?zQMU=icy+AO4BQdLLab_3v%qj0L5S=|^L3n2_$9 zIP$fdL}M>I2&YBkM#)cNkzQ| z_a?gIq-+WNZ!=Ct!3%Ev6-O>x{xr@Qe8a|TQe;zl4;8sjxU`$IsCxfJQMi!?*8h73 z&4kZb7Zbo$`|ZMon_6^r`3)rRV2o?FM~stP!>RmIC+)M#D4a1^fBhK&yY%f)Jz_-s zR&zM4$P)R?*-f2&#Ll=axV3aPIx_OGD{!JFU=0=coIw3f0TOZjDV_DL za@xSyPGj)q>2{x_Z&&`p%eIN>V+dB$mt3Br8wE%#Oya2a$aJSC$|C=!f5)EFYii zPHJjxm|4<~-ZFRG6@Csz`1sRwxhFWf=nO1b=BGA;-9h9zvHQvwxYSgOi)}fqC3hZY z3KVH?(RCq%%~b9a0tNFz1&rHq3x4qmaFaDAX@?P%qgRK22UTeLS9UkWJ}db$eX?SG zhz+5&e8@v5{^a&mn7|l$OFj+cTcp-fga2A_x-hpPl{@3i9 z+5NYdDmj0YrEaeX;ac%eM2O}HBTSy)%}X!5XW3^3-is)+lCKGPh~zUG)V@LB_arKl zX@)X2&UF!v`EOujUoTRP-lDu!30FP0@ahKo__Fd1Yux z)T!L?ChDXUsAo+>e37FC)YmB;Dj$dHLt z6rC9DNk2X2;N665cs(~G$7{0@KP;H+ns-szXBP@$_pGa0;_>?OF+?BI5;ShH{?E;O znA{Ub#mlp?WYJRuxyg}X^hfyhFM(@?h}#q`LmE@nsZc$eZ0aXqmtHgMS?~P);YJs? zhyOT(OVO0SpY1b`rK9=qX->k+oQAA?$Y1%m2}X*!%z>3AmR@lWkLDWC29Mt^F|ocB zUNV-#A`~cni!-fQ&@C==^`KkpRE2=c2hjOX1#eHy;5LsRPg%@Cvu3R;C9^K6gXc`7 zC`ZDu{z8TA9M%V(Zx8p@PvG7MQvT|%P<8Kt-n+xR`|bG<+03QbX)hU@r~s2K>T&qS zCe|f!B!{nUSRMWaPYe`s$@UUt>J{maYLPHqR)#da!-YP+kE@yb0+Vw_L7lOl2D-C% zjb=55Sq~+x7{&};n}ve62Bg}gNVQ!~RH>)Nf0J5<(0(K6z^L&wbo64D1+*7wCRN~C z({gVA2Xl(;IK?;%YhhU>aV@^q8sc7QIuxKr>NVa1I{VOBzC#F2xQEwM6W}s+G6_+P zAzt&BS4g^#T=T)SC8$SyX1^MBPgSB7{rd|)q*haiYa60}wqpOhk%+zF$Co&mx8^(N*#V!R;{VP0%EwGh*PlcoEMs*e)0w=k?4oA9M5>7N z5VShB!me@*)n3?5*5&tMnLUX_d6%IiIgnNqy@tHg4w~_0HfpOSs3$87BLjxOY@Z@R z90SN=_a{zzu^CQOFg}XOXLOwy&2r)AkXO{g-FV|6x@o(NBvwj7QQwe^Y8vo!bG+z% z^8s|MGo|-`neXS2h=MhgDX5n@#@J_7D7&8Qhl$9=-3;@u9f66t5y{E*aD~q%QtYK| z^umkHW}$7E5UdY1i)_ptVl3>Q?nd7CZy{cF9o1RiVp*mtn|sWv65*qenFomRX- zz8qf6fKN_G(=PyO_5gI!qv(?+bGGiaA)BA8xPRK3T=)PlGFdg0iXT`~#?({1+zUtA+p$1U>^hAm z-U#Qdw{zF;&!Q&%0x0}&pzX&Q|LwOT-rdT?I@2g#Yl0fhFEzzQ=0H4oVmvt-cHx6g z50)?Rq=si^m{{OP@13gPzwIa_p7bLv?{%nhnaDVew~^m*AJ0tYa1h@`L9{wWZn+|? zTt1k#PS>G@uk*OFX=2P=`i-N{`joZRgo^aOU~sqw<$Ih#?($oZV@@N9Hfj2uZbZ$a z*11HUW-dWHRr*m?&3pd_^wcJ>vqdY0yW4ZvuSIU%$>@pR4_DV8!k-K~%#PKe&4G$^ zIe(6@ZOJ2)Wy}}y7rw!chlCLWf+>UT>*Y?V3%T#^z{6n}bs0+wTCCSp^o%*k&Ugy1 z5;joyEat-s{D&ebLzGv69?DI?sOowYER~~kDI*c}B!(u0ucWd%Ioh7vf+^edg$3;F za875g;C`rp3mq_pBt00HCSQ?L&5y$MnHey7w-B6%3D^5uf}fs!Nu)e=0Hp;MiI1@z zO6XEsy0eh+FM`I9aO?n`^xRCYN=bs@rAbg?EYDrLb7AUPj#D35FW~bdWZqU{jKB&M z6>Xrxo^W`2Dbja~RX8_jDF*y=guQ+k%i~YL0|ml|(S}&SX^MAcq5m&(R_KDb{@c}c;D{z-_Xr50~rd!k1(a81^*EWQbL)N{f;FP=c2BPe5u#DF3s2Jz1<7LOJuFp=#!GIGsucYnBVH*#R^qI0rYSmy-JZ zvsk$CDfR}ck<|e;3M>|*`OhQn;Ok+e%$d-km2r5StAwX%!zj}4Df)Ki^Cus~g70*r zpN!Ra{zN3av(|7+BS_vUX;RzBs2Hy^khzV)nt|>@*;lWY5WjTj5eDAOsJQ$qZ;!@RG1AW zmER{|SC9tDm&W8NvZlJL%)^>*0RM5d%yqgv64Ad4)2$s=gEKhW5Z?XdZ>YL^kg;WdRt=N5O3zu4y4yoY5bb- z&lvw{AlWL#i@1YIwCqDS)Y!8!`ffa}<38e%r~y4o=8ATYJHS4Nsc_6Xgwrb+54A%V zy$9#eVBJMj`A&uwzrKy^Sd$7L#_>Folp^@flf@7_<_mh-z~=M-I(EYiXkSMrV-&EY zNm*oh!IOqXKj1eVV@}&!h2lLdXHi{YK|l7f-7@2i-B_AJgJ;d76Tzv%XXYcFk<)_M zKS%Mko$YZmrqXQ}DcasAL%*73NVjbbRd%M~{TS8@W@nwVLGpNWBozO3O`@44)!e;J zr}zN392~b`xlDaM>U)rYUB-vFyzmNc!fbgi*kKM%B)@~0aZBX4?&OBn?8Oa*Hta0V z#jQeJ#=vA=AZJs=8jWCw`}vd<%=!mDnYbEU4F_#Wk{B8ThSZ=IujS;^q)Ri6zeCkH z4L^@=LvPP}Ug1j^?H93U_xmzh*u^p^F~3lyu0*{oZ#*Ki5v|Q%Id#^b2@BCA*WS(K zBQJ0gj?AOjC@}wZ89(3N7ne<)$lJ9W&f(iYTTXI!w==eJe=Yu=Q4yDN=MWk<{u8IJ zTaS;kENR-<7u?$Tc{FDF6!OdJ!%ia=y6-)Re^%x}){;ql%gt?E-SJ1<48ajH{{>Qo z#w|2TT2l8?MY2yCfg6Jf4L?pW-={hKc+4}FW;aTjm`hFd7M8uTpwKW$!FKl!ED**} z!uj*4`^a_{qC&_!L}6swXxhKDkvZV%8Hem9Hf_I$o`KEaOw0JF0M;=Myw2ZP+by~x z^#Btc0Ivlsb6{>k1NSl}l8F7jGN$C*svI~JThXTy2YS3HkoLT{L`&Tj7%;xULEA}W z8#7?gDBY))bXXkxDYd=-WU9y8Stx%``48GvDzNO&U8&8FuU-90c4qh#` ze6gxGf9daRd{ny(v6y+E?n>b8v&A&*<03K>t5B?ODZH zDECznH|*3@ayQ%zXPLFsm^B&_L*>M!5o)|rObdTk=`~#L9pOzUKSWiGIh|Vm8#bMC z6xp6g`C4IgRw+T46`zi{tpd!sJ+zg(h$YXb(73)&EYI@?lbZUoT-at3C1vBSJ5iQw z1SvN$Ea*gp&R*Yqd%`jJ%5vPR05t(>cn%Js&&Q9|7k?4I0* z^d+n(V(~}h(Vl?Jb8I)e{~ENS2GTHgCX{*`N=*xF=}OcBN}Xd$bs0X`^7;UtkMBTp z-#qMWHm5M@HDuVNN!J=aV%A?rSY~d6`Wp!$;gT11`;=i=-(tEp59RMIJNNS(UvDiB?n8bc z6^y7CmmM35T@(J}=3Vy3n&Lp(P?3RopY=$SaUM-$j&pN-_b{LFTz-!^^Dz9E4fhrO z=t|0L(sFu?f!@yagyr}v@9%~7A|1AhI)N9(cIX>^6>2sPFts(NuJ?bTa_1_3x?RNF zuJ2IT!Q;dkRl&Q)k=8yRM&pvkb2k-y28Y6ib2C7I+`bI8&u-F;-(AMA46g zO3k$heQroP&%WT@ofrIAmV?`8pesn`yvFGP=Fsk#LcWo~baG%9Z=wGkD~?Fhu=Z5( zopc2nrYlGLPK65kYgSSIe~gJgs|wpYy*TGTPpIE>A8T)NMQl^E9d$Y;9Z6(Fu zQJ`Ff7Q7m2&Q%Wof>G87h4~eo+_2`+v{^o#o4vb{J8fl;j}bGFGtd|gW%iuZrV!4n zG?FjQuSUk(z2a9XUomg^a5{cWj$A60XuI7m=H6LGC(Vk50dWbCxZQ%Fj=hMgJ>% z!$|U{BI~>`AGoPHRaI@E2cu)qcGZe3R2IRa#TK_i;!&DBjk7pViCE8ak7AuJeMV&-($<#%6QCy0M zg9Y8>Zi0g#I5-rAKK#+Ng5c(tf5E`VIrrRiUz&y4nc{+#*h*}xvJO^!HEFA<%D8fE zRjK=%zPDbg`(+*H(_Ssm4{b8iToMZ0dt7rk9xY?(F)HkvRBV<{6J7|@i;6r?ij)nH%3~tFS7^O205m}P? z1-;~*U;16hGk4f8@y`bS2Y*iP(#^pzgmI6X3-)j9Jv5>}!S06nnf)?ZhF^on=>0NPm>82|tP literal 0 HcmV?d00001 diff --git a/source/tests/pd/model/water/data/data_0/set.000/force.npy b/source/tests/pd/model/water/data/data_0/set.000/force.npy new file mode 100644 index 0000000000000000000000000000000000000000..10b2ab83a233e3e9cd9d3ce05cda586295edadf4 GIT binary patch literal 184448 zcmbT7_g~Kc_s3h3jG_`H?L?uJk*?P{At58mCK94jR%A;<#<$PE@V$P!UO&BV*Y$dx=i}V(=OoUUGkxX)dAZGU+Xt;!xpDc1L1PUD zNxWK<%hqgM*?E2A>SgOzc78V>V`XVDa`dRdmIgZw z{(m1;D|>zH?I(dYakw}oqnxVJyNilNVHkYdiF=&C2jaQwV$6v*u%mwl2lm~~>51zAAHif}gxIU2K|KAzlv58BlW&59AbHabjot6kTJIVr za-zw5_k+T>+1$iZGQWBTU$~8w{>zL5gVl=|{WJK!*=&B0^ftN`m&E1Q=U}9J9NTGBl9x0M{pugltwa+z z+cDZff7eOAqcw#WPv67i0?bgm^f;RAo&o*N-)4D7W8AnypI5D%!t=*1hNLy0c)9Eu z9XOG|m;V)tfh#-^4|I{$U5er2Ltr<78j=)Oo`%Y~mNMJUKiRhE8CA)OdGyju=u?p)8p+P1?Z09Gw>P5ue`Be6Py{LU z-oOjy76@~#N5hUg#^{qCOP6O3;NG~7N2k}KR>f#EzWtE$_ooX+$AU;Pc=I6fOCmd^?EKWb;zYhj(6bB z+y~HHd`N8kaS}8ydV*OsiB6+m(JAXO@H*EWBaFLKtNSwi{$L?rx$g>ThioxGQ4BpA>p7K_D%asmM8yW?z?j&8*NN04Ibh3#%O6$crs=^ zGn0LswGlUUQ^OQb;N{&Q_yRY ztXB};YgY=><~$|)(Ec2`>;O!Q2}h%4XCTl?kIf2V;q<8z7_0V?JT88vrp8jM-I}CST#_J|ldp^%hooUgUpv3AjjW z8hctl#mJj;MW5F{VVS2dI~m>=wl&N|E7am7?e5~SSBBu7xE9(+>%fhd#JKw*j7B(-iszw+R;fbSJ3{_3ZvA|;?jate81=!RK*6tpneRGd$=R? z+a}EzrOBy&2ArHc55Gjr;`9}bW$PwRWU~jF7+JrNr_*te8+MJ;qGRya?`*W`UMFm~ z&*0j=L*YvMJQOTLq~Gr}P|*N+PIBypYs#NO%4j_lkJ!_OsbzxpDHB1Fizz)h5qi$A z0nNb%h<&U$wtq4F_*OHv?rJa@J!8S>+6?ym4lkjlfT>F+iO#Fyirf=`1PZ991u2Bht$u${$1_ijIEOu~Mt+jtk@pG_*=sOmzy z0+Pf$wHrcLuXuR&rdjxryMqlK2uC|@fyXmdW&Mg80RBCqwj_PDp5ukr7q1cZ^(Nt0 z!3yu01_()m^m$7BAzIt#68LtUmo7h#Gmrl{78PDiTL@#_5 zbBG%QBZP3L7+#XPiMlnXqiT0w_I&aVE6pRYzpVwFANfvftE?f4y8-`3#*ppNQa+QY z2fN#z!0)rMg137K$GjYYe_coN6Psh=#l9)n?d>gY`EJFTcAoSl&jj2*nd03Si^a47 zt>UJB?c$K$HA3DGtFl`*uY{wOqs8N;Z-t3yBk>G8#bd23WOG{$xO=7szn!(0T+i7+ zY};`fwRa;2xK5@p!$)9a@`*A#%vf&!4RGG0iaVb~^SfeKTHiR47a#gW4iyHNJ3bJ6 z3w}}bH<`eOnizJ|z#+%zJ88`K;}X+bq!%{^GXE;l8)@Z!UeMvAT$1Z6Ge9o?EN4N3e*inyQmhr)M_il%gk_4_(|#r zJ5BS4bit`p=Yhq}9L(K)7>Wn|lPob&m!y3-2kL41`1ZLH7T%4(AN%C+i=7JAcZsAC z*VOU&cNJDVxQ#t8PDBm&MzLV>ZRloZ$A>o`s4DPl z#nJe6_;McG3;3``DQ!~jh6COUykKJ*Xbip2{yhpX+wmgYj4Z_)kFC(&Gm$|%lO|Og zQ2A|%XsCXaoO?;=cYGYF=B>t@)5YSurxPetSU@-P<0RiJJA@(wJ)C+ym6g|~@`Vaz zaqqto*t_{7RUgWMoM#Na-!6#R82nGPh~iYDxyjN+o5X6$-lFD^EjiKiNM=}Lw(oNrW-HO#)s zmF5#!bN2$?ygHL3^=-Iv=ydcknu?SDy5KZ_Pp+#~=JBh?VC9x5p1GXFQMGzpc4weN zPKh5a?O{o!mA7#7$VVU`7knga-PjP)@J)*~Po0yO59OWI(YtQDOsoA2# zl6|;)Qat2M9>tb}P(&hd~IZM+M7q_?ZrQ`(BaMFS$ePw_vjJG z#YXDVk82g7{JFWT=N(grt@=3(dm1HPIgU6>)Z^&EH>5xFM&Qm{J`ghcD`~Azz^OIf z;&xj*IMdOCtH1nY)n&HeJT{z*Y-&KoAcHrS-l37L+i-jHJn_-r<&g7df9^+FlXqk3DQDD!I+`_7hSBZ)gqVr* z_{e%oZnBLQo`)OKg@iL?^m;uU(2T)3J4ceuczb@fBSNw)$y4kzR)tr0Gsf`M!@0tw zH|QTya!A;I35HENMI8?l#T?BDQi)rQr)|T)T5%e#dr~S1`{qF_KDLXE4?558y-(q> zUyhjds2wJo#PcG961%Y7EB8V?LB$olCX~uf3}?xu5N^DKC!~|g+}7} zKyTV^5m{!nDhIBt{wZw93*#%XrfmDiLwft&A7H<&G;D2(*k)l5YeF`V#hT%uU44*W z7aPHbjKMTnNeAz4`2lBX7%#Rs3A%EUNepo9#@wvOZy!uz$+sSy8}tZ_WCLkce={)scSbOKRV0+#{es1* zXT)d^Uo;$ikgVS?!L|-fwEgcFE$I7LQa@=1uE~m{$?kF3`5eLuV^g@d{v_TR76ET^ zBcQm?9}daf%U4xaV6TfAbog99K3{kmOD!FXXH8w7G-Ca-bc{kH&Kq}0?Dl;P#I1CO=c&CoUzjGCxEQcyq7L@| zSS|F}lF0e4{n5qS7kjwC!X5oL&NSZN4ExR%xdln>(dwD#i^d4Vl)wzr+%hol6UsH zdB1T;=1mHHewxb<{bG%mIxs9|87>T+&-HJV`0F1V6oYfgbD=AR_;^bVOTUY~=JvjOp8qS|YQ_NfukNEMj&^D$7db}-yT#ZIv;J=0n&OV0PA*#AXX#Cz7HXE1X#g4IXFzl=3;+;n6<|!3|*MENS z-}d_yVr&NMR1{g9-XR$O*bhb#T{)+P_?VT8L~dq=>kxnH zirD9CIp;1thdt$iU&=*7R|N$Otlz{N-+!UA8xMEhZ#_kRoPi#mx-hGofEO;@qlE9B z=jzZA?4=zB=Z1V1UPP|J$idyfY*h$4sN4rQIf@mV0S{gtCNUooj*p`4P*-*cH|6Sq zi`zK*z0<(1w*E8^KT^r1p1i@~G_O&;=W?hn) z`#3;=*K&?eD1@s~>g;~=Cl32{m8X94gqA@+xHhehD?jJ(n#+LeQ>&n=*FJhX<*V>z z{S_>auEG_+wqjqCg?zz$E)2C3c!|6dY_+`vznvcl+XgO!g@gMdCadr@^Kl$Mcqx3| zkp&Oj!f>lXJ2lB^bF53fxT9Z##BmIub=7fHTj|XH8~^L%Nr~vmE4ZP}kk9W-!s(6P zyeDuJAHH#f>`J>i6h-$#wHp?sO)`|JycS>QOyCJWcfgnQKd|({1Xdp4iicMX6VuM_ zpiSd1W0(6Ud1>`(a9U+1vuUXW{myd2&OR3xXNY;IO6wNz(pe4KFV5UtAOj$P?j1Fq^ z@#n@eP3Knf_^s~1O#@`#&f3$9EvgPz=o}6=E`l5S+d0?R1pBF_V0$C+Q1@UlvY!n_b!PvZeVRD+`*%qfmz6?EstE?2=)oKNE`n<*r)j_DMQKOL zbgmj&Dk^4OhROM19N^g#J?6U6;-(kSpb!Vvp3~?|4}b9e(o7Qr_n|m6SUfrPv*e?{ z2aRmGM=@98u)kd|Ec>Mo$@MpcjKTJxJjaP+*VWR2LFu%(d@()0r%rdy)zflyCEl$h z#{=wi!G3Ewnm-tfD!sDt;}YPD3e`tA2v!s(Sjn@UUnbx)&Ch{1$vXpNh_(?n8ie78!h~ zhJWw6W7@oKT)Y1`)qR`;56eFYp-1mS;u(T&AE%3f{+B`VtQAa*Yk^&^iSQ%(I5?;$ zNQN|w=D4r>Y3EjX)UX*yZ>#Q#FK4`kUUygFt9_NE6nF~;>h^OuVZXnwGORGo)>=I1f|@>(5z zv}2+B*mH3Gss_w2FJz(B(uqhwubIJ?)c%uu&WC6d~A1R*B7sXhrrsgMmA=G{jJ+jT9 z4F=2T_r(pIvcsLfT(`hjwd2xmD|+K)-2*T=GzMkPWB5qfIk?|_17=T-ht-bV#FU#) zVa&Y?9QE6YM>LW6A!w*j+`Aci#Pr0mt8!`B1_zdR&lCF3_ZQM$PsUphD`?N+8?^q& zZ}L}LC0s`Nj?cysjlbeRL>li^v?7f>2q4*CS$EF~8H`gP`#Bh4H{X*_SR7mrrxB@LYXkY z+vE?DdYT*d4nF{=dNfnOj+?Z7{xZH0xD+m@Ho)SmJE`Y7ZJ4>@tikyI!5=Vqo-VWq-hh>tn80D>8I%VycWK1=+Aqv9Hl6j%|<;0Vd8*(7!nXJ zU6t==uf08=Z0w?=c7aV1K zOMDc)N7$*N2zSB{L;cHZu;af(ad>irB=pBFUY|Lb!T%r7?~|atO%X4xxJ$G@RoL`x z8l@+Fp=Fs%u%>YyN~U)~qf17@*TQ??y~RhE`%95^HRZ*g1u67xcUSCF)FKY-871yn zZAm|TGa#+z1DTjApzbUQXN`|Qr`fMCX8u*UUI<+I;vb&a6GJ_!Wo&okEUQi5MF9a3 zaNnyBg)~2*xxLQ_-mfpxi4U9MT6zywTK5|wuP&$0&vU6-D~4xtSB#F8Qtp9Q^fD)( z0?l-IMMenP&fQ8wi;Br`*8%u=auK}WtIOd%`{A^l1<>UA9*@n^#1dU=!Dnnat}9fB z2j|X{VO9>GH9N^(rsjO~iVJ7W9L|bs$3oBc8N6D{llvC#5T74K;Y!7J8vZnoNPUlTouVF zU%;iUSFsiL(O=thFlNw2>CfKD_&c)*8auf{mdiO*^NOSJf2VP@>Iq&H@tY!EsG^;U z0p=MTr~2kysNX-V^Sw(E`Dd|YLk*7Y6Tk_xC$V|! z9?ZXYfCi8<58a@M>pu38wLSXEcg8S3IpBcN!%N8Pyf!2|4CJ+!^JxAeUl?Fmz}=JW z`0BV~9;2&^4Ug@xse8Q;xTl9h?sqBA^=!kGg@@T>=)KPO`Wn@A=>{vGE{DfQOQ3Py z9=J45BJ`SljE6otN@Yt4C(ZiH-*z^!V(bc*qY^maei_E^^+xv?C7ci(!yC2rS!V;AA3&NkW8(=wJ zkLpTi)8iL=@xO_k%qe~!wSUw^)#@&&R3ZbFn|(NK?hIM>%Bhs_u7bW%q1Y1F6$cz2 z#0AFBuq=KdZ+Wwn&95DY=lhh{XVgGi9rcx(Bc!x-i&BccU zBT(ncBHR*HASv4N2C4>o;QOS#kbLe8tWr8m4O{k8+(l~$RB#YJ9;$^4>Y8w{#E{qZ zHsIHbBk@48A8FPbuxFAcyY=bx%*OY4`{(o6J{_T|6j|@|eN29N8fUu4V^=3DiC&c% zj65mNIq7A%cV$f+ElKP>bENga_F!S3O zFo(`QZjgY{v=m!?+_@nkmJPe*(E#7$5V&I`@n=J-Y&JsM;cdcLM;l!E@ENH;=mFC` zkJ0C42~vk*1@yP*3f_G+uyFq#>Fo7#D0`to`y}0Y;`t`>Dencxd;do{yC>Kyj(IB8 z-FTHo6y(yR=X=S{b_nd9`dqZQ7msJBH^BGn8MMM9f@ZW=g7=jdBsbLnEOxaB&Rta5 zH<}?oQI7I^_T{cSGNJ42F#dUGpv+PAns7(lBR1P7veApNf~YzWU(9V5jt5@A%F8J1 zFVCj(UUJ<1Ljh#0ROKP7C!=FazF5BIJOv%DkO&WFK}%{VHVE%%-A`rSU~mk!jU#RFQSzk*6H8{z507?7>lLEfIv;K+|#w0n~?6;>9}n2ulY(Wq36@!QT1&1@X3 zAW5Q9R3csz{K+4j5GJq{R)3y) ztn-i-Z3#njXCotMJCmuc&vWi&(GJK~eYpVDGVbXbgodx1+4to@R@^leRR+E#&0&qOy48Xs zl3L5QuYW1Y{*fXMJD`U;HX+bvlme4mZ%gu)d?AII>o`_f4j-;M2RX~N`Owpgurq4C z^vwNUywXt-Qy-}E*m3%Zqrifh#5s4DZD&~L&t!R}Wi zM9G)gU$}P@{N6o-^28waQa#R}Zkj+goS}I$6iIVcH*Ov01I^Q}(Q2jLOz%vnJS~DZ zWYr7%f8&=stDrp+k}`-MQD2t*L zjhWbR)(U%f4Z?{Zp3*tT`fgOi-8Eclg4q%KNly*j@3^ z#Vu5CnM5}`xtMYH^WtNLYcTxCe=t?0LKr?bRlNAwoIS%1FI)8ADqLgW0R1I{ zFmu*1%=3(6xuOfa`Fb0@xG@(O?YxQeHuQv}*?lqdp$t|=SHYxChQF(K5kRep*ke?8 zJUsHQAlK@L4kkwsi?Z-hpIGj$x`^+2cH#W&w8=ju{HQ2@Gr-y*5HdWciE}ikFn<HDi#iYu2Nb((*}3jYEoavHuzTAIoIOC^u7ONd^>Y3 zEl7(MRNQ93?rUcFY4&YgKdBwuA0=~=)?7Ma8TcVQM%{*%wx}Cg7676#P$|&9Qb898ozH5;whokCPsX_vDAsiiL7A!}%$2ah3vS zE^i>;{6UoXu@Y8|J`Oc$|L9a!I2P4UhBKl(O6ufLrJIo~pZk$=ath++2e`(16sk!_ zz*3JJP&%&&eN|8_x;30M-kd_0Ug3DyIY(;J={IU`2k@-+6R7_F1)b@ffjDvJ2mO9| z0>fShVbVD*HZlk#-&r=Wpl@#~ks-Kg&c^K?4UqTfr06y<2^upOHb-ixu~nG`cx1$FUm)y#4tW1PKW%TKQ4`vA0gehqJT#1JcNFR!Q}OFA9ggFbE?g7 znT5tqbgA&=2fY>fWatGxaK;DkWbTH3e~KZrWikifzXul%kHtGX!i7U;$8xu-5cG6C zLQZ4H^TP2;=&7mCS4Y;v&2x)O)}x+`fv&=rl}%@iYo zQ^9z`FY&TrG5Y@bBIy@%l?+elUb4FToU9wt*j&#JesxJDoiab%TO{JKoNf@-B?@Zg ztI&3SJs27tTUp>~yVB zaA(9bFc^4&tTZhl`fD%Q__HxoHThj9SKEWvCe)EhehMzB-9?dWF2SIP^}_i=1#%g1 z2s#dg;M)zGL4R=zyeU6JcYm$JuUAf!#aB)2fd9GTdu`xk>UswzWbPkB)-yZ?FAXV7&x zpoy@1=1(EFu?N2ool8$2yNV&tp9yvw6zRb;51P?)sxYKSPi#12%u|MBvc-&exT^92 z8cWnb(?P(H8(H*bNCC~Q`YY~<+)dZ+^yctyu51r^Kp)5tW1>J>$ zzF#@mWf|7Wm5FV&6~c(!*;S-<0;`I0UX7EKCo75!Q&)NjL zQp(`${Z+zr;RGb!Y?7YHXclz*b-2?-QOH^)j*HcxE2k3Zz4v}_e%%*mxt}2FZh-qf z`BVS8%i`tDHlXvQCmz519*j2Jr`V~6nBZb0Rd>7r-rCnwy%s9WHqn~p zPpOaTUSaRQzd~tzSZAgmi@UEJS>Byac}+|*Cm1FnLt6GoHOvoQ8+n1O?L zJm-Pf*WpjzRSu}Ah8eSZ2rXq6VA#4<@&J=Lq}2|atq0?=%qlFel5y~#u@rSASG4?< zAvye?lnV3~pz-P{&^uIt(RP~X_+X0U^Y>?BeMZovyzSyYon4*KyY24j< znxE<<(#r)TH@_UYwm;s|ap6fiW{AZXU{UQO8f#_A&i`(M`udU3HQP=|m{vutfz}*T zdkk+MNyk8+I(qAy$fir~k@BzJl=Uo`Q%r{lsVi*BK z?yVr<$u*k#CJ(xY4&$he#bD6*7V3I#;*9Huv3%gKLFi_Dr4&BeCqjZFr;)?QR<-tGFWp^FiXBgHyhGOqkk3sa#6uOUt%yV z*8~T|D@f1#ezEtrT*G&j_kiw_F5GKhgIMOS1-le>3-2|@LRL;N$j{sh9*=uUPIgzs z_yB9Jl<%y0-J^KYq%)#*QZ&<>I2!u1Sy1V8w>wp*;zIlfi?S85r$rcN&Ce5}A1Gtk zDRr(7yGY*J36No{*U4)q!gM1Ej=p{Z9&0M`)vYS{p+lehJXzDPTb zD#a5;6?|%0Zl^y=MzggMsJ-M7lr%YtWsjw>uOJ=oJ=w)-%S!NA_pN-;yccddUW>k` zO&s%3@_IeM_)gX_ zY#*Hvlz6xSa6jjZyrRCF2!;zGjkMY1tv4?*JuMzfX{NHzaa{E`q?6Nh!AI^zpm_Wk z6~ZXjpg9?lBhIbzu?4+t04YV^@We+mvzYNx3` z6=CnzGo0>}PZuZE@iJvC9&QuGL(+RYl&VDW(SYufsVDbC^5`<2p-={XzQ>@>D@N+H zrY|~9O~<}@%cZ~Gj&@L9IM89uj1=5)&>dxYnzHDJC!pBjwHWqy7XCQZ7bg46V{+ZW zli!aL|C#9F9_2V*UZ=tD^u5r2ycO$gyv_#IoxQn<4FBsB$``aXWtMSka9>CSA2eD+ z(UMP)Kjm&`FMNS-g-=ASfhK4%H3LsxK16b#eX#a<3?5pymj^7A(n0}`Oy!n2A!lX<}DQ8>lB@QX2X|~260u$Pcj((7fde?#9IF*n!R)m1<(EsTlaTk z|A;c-L(4uoS@0Nwi>#VfV6; zY@xQFTdu{^r{R4Ci+43t_Hq=fkJH7lLznn@T^b}?WU-_5Elz3fDLdV+0>c7wd3@i= z7}2yHL)8CKWDK$UaswV|a2Z1jqC}k$M?iRTKxmowOKk6T2+Cb%(zqo7c<2r|8j z;cXX1ZLJC#GJ;`Fx--g0oUk8g)C6ZcebwO&U1(Z$9^uS(DZXw2g_DP+UK^*wic0}x zyS_Jt@BR#pe_x2Na(nWVzZ3YDZVJWQZNqI{x}dQ89`(C>N8-QKn{MsyMW+Xv;apz@ zVZgOW@_G<1o-cXQ5 zx$x~nPh4c}Eg78k8s^H!>IkY4Lvt5osy6Hi9ZgdC}&o7`J6Tg8pIYP*3 z4}oQtJwElN z>=txcP{N8k!};mnmrNPs(c?ll%FDb3IVTe#wbP5mZJWbtMXNw-TO&-%7y`5QO~dRj zujuE!Lo{TPwS8nh7 zr^ah1-=>@WRmFQc$A!7W2$$#_q{Lofd|Jm6Z*-Y~Eo8a@h;ojMN_-s`UwF|6J@0&My=XVpmcP|lB8hcP> z;Wn`PtWRq|1BVSg0yAA!;+ODH?Atkm(saw1gTH+zm4Q<+C}GU%bOVe1!R$mv6H z?urLWTyFTzn$FS35{qYpbd8z&T;X zLJeMFp-$UfbJ=5dJ`|720H=!X^yQigKV5o&`aiD}O82!(8_hq#xI5u|(qDyJigobY zwsf{rPN0n&Ou4bm1JAo0fV9D_u=D9f$~lrE+FSJ|r;IFG=Jy5qtnJl(tTmN z=6P0A@B;a)%jn|6bZL(*K6yQntDD`yr^kEvdSHj(EF^H|BRTH)v==+vTts1<4SsUG zC_4B5$&9#show00PLbrmtQb)CnvZqccf@jq*I za6anHi$`SB(w|Hu8fvd9F-RlX22_xtgn>{OV2V;*9ZA)g))f`%Cv&?$EW z{`i-{c?*;sXlOmY`Vh+v=e_Xa!Ve(mJ{5!&O%#;1iIzLCs68y7X5ZJK>RJ26B`z;% z?-G4}6BRCal^sR5!906whuIY1&s8o^W;~DGpUd&RC9b zm!_dCX@G-v+findu`=fkcUbp{Jzl?~=8%=rO&Z{Jh8JHuh(|6@qK>CO$aqvPmG3N+ zY}acSct#TUyt5CFb#a3%^B`83J%VRHS1EaKeoM9+IN!aOhu8 zuGLdU6}9WUp<^+0OFAs{>b*x8cPx{uD>mY;jZ#$HpUByVw_w+&1-#|&UhblmfaRez zxckvcR_m-^&ZjLpcP>2Xs>yg-s5zXgyEvn6lOtM=sOt1^Yf!ixML)Hq^ul&6J-MaA z76p&!=*W4z`R`}?UNnjy7N%p!n~${7%}*%UlPr#&_K61Q6bgGLKZb2CJssS1QpiO1m}d0G zwO^8?CWjoIATPPvY0`}lG$xGqJM`f!c~_j6v6s?E zMGJ|!`GS9OqBQJ|DrN^i6uSnhaLSu>zU3bPs$-0B_P$&?sy+ke49>>HNE!U}i)M@8 z!MxAaeO27@N>PUo>~F((=J_dZGo4E9rmpJJKbr2%;GbOd{}&ZD1+ zL2!5XRq8uwE6mq0;%&}D!Tj7Fd{@<1h&`!^{+E7JMPiL~R!%DA&G`bMXGXAUz7cJo z?Iawo|0cEu1&~dCIlKRpqJF#=d5uh@9zJhj`p4(|*?FsAYq*1sq#r{M=YFu`K|i>& zca?P2AWwKO>k*ZAD`Sr`d-NMB<)3D;bn}-Xt?k_JiVN6CDpU4?zi~JGcA^V9mn;)B z#?7QDLbx#Mpdl29cI4vqh5~MWppl+-P%z3~Xs&L-~Gc*feNkTOo-CZkv5o>_s z_D!Kh6CBY~)MtOUF0ko@E1B8#BgY{NC`3AltuJ2yRF}iWscrT<<6a3SbKlUhW7DMu zn`c6fQUaMuvvFnYbV^qU!H%9=xDPaQjrjqv2+pRGhn;()?a!s(SKIJ!{l|E?&R2+< z-w6IEOZnWWeweO*kJm>4=FiDy`6rtE=J9n_S?~p3L>;6hk1GY;J)N_|cXLGz%TcsA z{*|y_B^@jq>Y$)|5EQ?0q`x}>gY0xrwMx^$w}+Pa$9N%aSY^c?%i?hSs}iAQ`5zja zd_X+>y+!DEV+ol2h@!_M&G`2HDtfiyGAShM(Ds|RVZ5diPTf3$CR}x+hN_qJWBx$F zV1Yhv7~7luF4har%}+vIekr+rAJ2(5neOWL1e>E^8aHC|9<)iLn70_G0`QBg3f8{}zmlnnG<`)oJ|F3)t!uDBa-*49r0k%Pk&8QQJ9l&qZ}6?Q z8JH>A3j-4W5cgh*ZQ6tB-b|Cuo-Iz070wqA1TPh~)x8rA*Cc`YjIQ|TK|c)D*2YK2 z=Lnf=qiN6T6~da=r{UAVAvh?qoi8db$BvD|VQi8XPmDN4sqZI~*+eaVYO{`C$hvdD zU3JXTn}T6(E1@!BBmWp(44Z~@vX#SD@MLi@^dE9WFrS<*(SDo*nU#&A=i?=)Xy2D3 z8$SuIg;ycO<_Y{Wy^oqarXkld|<@P&{hJ8EF zl55jBJ*X$@-7TemXWd}&_MM{i%^djC_!#t@oa=8-i)Wv%_yDD|NbFf*xFqje0+E3 zqR@5Gyh|;(-J69gGNf)+J8f251y#X)|gue z+iQQrwb|qNK_6pS{jev0?yks%30H) z4m?ClZUlr0~u)S>%7BD^Kh6 zQEOWCc*nwK)O@bQd9;Yl-uJ+!Wg6m$PZs>%;5Y}RpX9gF2Q)7#2t#*ENO+eGRxxhm zm~;cSs1B1&2@I!un;co;)=0eJC*cLNHsh-5aX54FQFOKV$eWwGqTNmpthskr;!}7S z?-cCg$E|^IO1&K~tQ#PkrymFVmrN2r^9I9egdcYVBZH~x} z;1Cs84lrH8B`)n{o1*{0!k+dx=-egoN|T>lx&8!nI`4>y_)$Y6lqE&NmAW@b&6h>*>!&!&E0{00uf>E@0dOymkb8Z6sy7W-tc&?D*Cl^rfOZr03`g>41A(Z5@=YnsD6~FiIfft|Gpi#Fq zyysMVjM>ciK>Z2ayVQ>dPaefVC)RM8%0S*?Yo{=7J5n5fEsKKRsiQ-OD$i&b1!8c)+_4U+NQu zAKGg`&g=>?q3=Dx<&2|fslOAPtZ&MWwZBX=b$hYr`&T6QF9*9>o%wpBt1>HSqabL$ zCWox8IC^RUI=9)&qlWfFn^os9;%;ZzGd#s-l>2b)=RFW6GsC;`a_aL#mAegd#m|9i zkeP8y>Xv>M{;pg^d!_DzkwJu@d$@s)M+wqwGMYA_15P;~jF(C(u;Lw+?fPCvX&DID z9o4v`K7Z_n^lll-OM|B1y-rs}7fU5w z(2;o8j2~qc(|Tad@JFCkp(muLSkd|udugk3gSat%0N8pL(TY{?$f~$byzr?TmOUKL zt(|^?cU?XFXblsZ-fHmKJ^^A{;4k{JqJ&;XsbisU2z0j7<~dn|u*abJpdUD#JB|8| zYtpBJ$($49cheO9YpY7#=I06T|J4e|C+CXed%HlQ<_{Qkv@_o=3xWH)NB=(y#!a5g zBU6rWUV0$C8ez)=I{3k^-0t|bZ7f_ZG#9edK-O)~Z?MP+MkBZZ`f1xCQ_YOlg$Cf? z89Uf=UJtT4qK^;U2I6WBJ$_QA&wF1-lG)ZA&{^K3NP9M)f(A9i*Xq}_$tMjD4APUF z0I$TmQP24L$`iC|{B)V_;K|sLv#8seJCJ9VC$V;=xH`=Z`o20TCJ*f|CghXHRv)bsvVHYl5Nt?7t*_R;~vzFK^MrGBu@CA?l5jRPasa+mQH!Z@dnJZ-TqetMrtOPAz=dsdb>)G$<>P;ke&u_%M?c#RX& zPDe=m*gPK8{e;lHc`K>ipGx0wAdQ?JNk)2YxK}_79(@0U7AuCaN8KkH=XHo_^l&eEU7x{>PLE_i-b4wbPpm@6j921AFB4_2?{TPMT#vrB2cg_y9*1@?f&DSPXm7Ma z==SVAe7Gb%&n0Gp@JS6!&hMoTu}g&U8i{P$5DDMshQitz^Z5P8WZs~9R$N%$6+KR9C;JPkGfTvSdF@d1>sPRCP7@8Zv-$82Ybe#6hSPfm zE6b~WI7YRAt7TodY*-0&xM9cv1B%4o=gvd$+!TfJGZ7+Cht9gjieBd*)1)1XAlN?# zK3oY$hiSQ_*Y*N;-fh5L+S|e8<0H}S(JXYm6U=Rf8)42LC!VU+7gH}xhBS>?G)nt5 zEQ{EUlUC}pwUfvd8PeQr(lYM2=OO*pcIO?DKS1+jGTaL2DD;mxM2kB9Q=A`aBfKoy z47=S7c>Uzd;65Xk+RkvmV&xpc$GU{ACaUAWaT%n(aUFk(-9Xklm*K@3YYeJ&#D95- z5bkb|)f%1UlOsnc&*q;b?b~gzv->l!T2xDPe>*i!Y>+*kcL~+5KcJGul|1-SHy(Gp zA8$H0Q<^mfa&Fr+UcNAe9(TSj*y3ksx8*N(n4ZjvcDIF_>-%!P_6G6Ko`2-5qlJ$T z{D;Z2*0V<0LCF99g?7$-qFCiPmfCDe#J0Wd0IgN!j}~cRC*QqXy5t4ihz^59cN_V$ zw*%m^sj58v(?RxJS4rh}X7cY_KgD@<*Upbh?JSbE6&~n+h;@Ij0G+#uTxI$UhNjdC zp>`pxyz0i$+Y`BB!8>8OdRyVu#T-7;W)^SB9SBo-E&P|10p5x+tPHt}IZ4RdX4S*W zZ56oUDe=bf4lr%v9CXYai2Zde@X3I9N(&r{A3h$&hOLq@S(nLi{g%!5hJ zJ>?HRe+I|aqm-^YP^_B&nBLcF<=N$U_|{jx`CN~ckV@m^!^Eaf zt!4AAiMtUWQ3lc{wRovQOr&*x*WjTQN z3gr~uV;;{d?kPmhY>%#!+~Ml`#}vEtBs3>$^S&Zoey~3euZ2Ap-%bjGT~%IOxuz=y zPHWG(H8$u{ahI!e$MMnI$yjTdMmwA4k^bJz!T?LAvYJ)!r9(U)w#ner<9)HG({X-W z|BXM`rtzI{FHQ>6!VAxDQkH*L_z@e%M)i4u|Mz4JzY+&U`Rd9=lm0=Ep(D}UMO#cx z@x|ZYg2>$cI|TWx6fXrlXKkNExV`ZhR!RMfYXzqv@KB^e$SMN4jtwa4M>x07U{%hgxO`xn-QGEAGIpw-Xf&CFR+Uy-KmdZ9mhhg7oS8=QG zoJ&Y`#U+_{)KzlUFyvu-9B{$Xw_;V&Td*!DqWr*tkT$X}>ol9=r#)RTENnL2S#dyg z+p`KT-K!TqO&h~eUB=;w@O>;_szLYWmBEc~6Ukt6lymfql0m<2k|MxCz2sWm_ zo@YeEPcD4^^fAy18%6t~&(X0R+p$zhA8@;@=VxrYmK->-wL)iZNt$iJ~D-W zE&Vk0q33!#VBLUMAXB+Q<=ehddG-{l`<4&IzFpW=-vQH)wTNZA@58y-Q^f52Mfm8l z1}$qB&F0VSptoiu^k|BPId2G-wT=<8KC0r;mZO4wn7Vj&K?v4AalxOOeNh7GF{i^I z_&0P2q{b|h<^j)ux2ux;^E@j3+=nN%bP}r1YE!7Q9_@$^rmv5j*ruJ1@Ub?VHqK8G zhQ*GhiZNEyYvvDO|2H!{I9LTX*A!As>`5q!UMoIK){@sH7vlKx>GIjuF)(MqWDZ+0 zn%yrRr(Nf|;iyZF_#$XDPPqiEu`mK2%xwj6dL(+;ZR3TTooP~!HR7I^O>}#YGw#s$ z=i~WTaQrZTvg;hq$2r?C(?2x0eI-&yD6OYaOKjOAXYw#)82!8_)@=l=V)O zL%ol&;2U-TDgu%eu9APN-=F#TbCAF6oy9v?_bv~rKA*#JBW7TARx!LdGLGz8d(f;4 znH*pC5)SQ}NN~O{B!)`Q=iPSjR=chEaz$IEeVcmF&CZd*b#pvd5kfC5o5c^|jx<7T zBm0Jj^Q%+peA09cdUiJErgzWjTuT`T{3{VUv~dLS(m1RRK7yW&J+W--ZAf;Af+H<= z#JoER6n8vDIuBa%>cC+*WwaT`zj!XDe(r}$%a+0D8xdI6VIJpuogv?chVp1F7aY6h z5bWxu!8`3L#paw+2>W`4^R~Z*AALn}jzu0lt9}7(JK55S*E(!wG?<>X9u%gYE2qNx zpLEZ0m*_NU9%T+G6vudY;IB8$G^?!)egC~@-_I&EZhj{kU6n5wN5qI)vy6Fugbf*J ztMdk(1<=_vmK!=1qIDj!(}W~(;jZE|oQFs*%r2=eY$pAgF4# zY|@txlrrw6FlvsGd}HK3antNQWUA|hx*_SZFOH6+sI#C3qw(bDHIuR$mcm7yr&Mt@ z9DnM~!EDdiRMx<_@^%e^=1W4 zUX}oRCYWMOgz7`FRn6Q3M(4Bu=+PgVMARTj7@jIU%#7# zftpLXq1g<>I_Bf9ZtR2Him$Js@IUDFnt_kM77|GlEnWNHf2&O*ii@~Q)I>)EF3Hx;-$@@QVtQZ-@K22xf z*7^uYAMk}zr)SXYGX@;JR)yxhsiy?TGnjaR#h?FtxQCw>ue0e1Ulx6&_~UiZHghiQ zx)n`3nmQ_f&)7*emb!FO9xr@sDiJlvj`dR_aK$fs%yLY`EY)7@ykF`+$3LQ>k&mfX zFO$5UnqYzMdU(9W1Jaahxx&o^OSKc>{Z4f`P0Qe4V^issqcNNC6z==Y2fgAii3SY| z(Xs3UkG72$S8fjDOGh(=-piX{;DrOFR@1gqtFtyIM5qWol`&$bVh~Teti(>`XR)ex z3QZ5ZOm}AexW(i_6>!p=9Yz(D96-{CVnf)B9iGS49f@o9*JnPY+#m{A4&(-<3j^ zbmWh{+eq2v3i{&g&Ot3c@Z|F-oG@t;_v#x;>4AHA_lHBU^5H#rRec_8=Xv6T+qanB z_T$#U7ieF{TlDdn0v}!9j{!%|@n)QYQ={#8wW_*u;re4>JmdzdoBWW~^*PBWOcU7Z zkCb_*NSwXq5LDYFIR^|jv(r3%`S@+Q;{LmS;tll!7%*) z9Ww!cx$bALyN7tq)-*VH=LnAc?#Lc%A}H$cIO?LB%;MSK(Dkt|WQdY)^< z%2X{J_osr&mkHEIJ)Bid0=PwHN0xpM6x$!!(2DE5sYB6Xu=|@sPCvimuFLA!BQy+` zpG_lgm2q_L^IZBIn+4mv)!}o;J@6&Pk!J05poi0GAAAj6ptUy$#m)55ucCfPoHBYyq*7y0H*#Nmra@;#@I;F`Rby8p80 z@O{B(R@0eBR|bg3yS5P~gzbi?!)=W*6vWBQ_$DqZy5Z3jtryc>> zbg)wreGk&cEv6l5;f*JX8y-<`ct8;h;NcwR)hzD6{!-j<$xrIGNnMM)dFb$?0>-Yq z$0K_AQqL)u=wtsov~qV8d9_!?7dMB)v@ky*$wY}2(@x43JJK(^`*V+zz2z5j8rUo36QoyEkoMf7dn}~>7eendR?zS0Bm6QcokZCg*b*{KNcXd4`B)2{c6uaOFOJ1gF7L$Ec1XUR z`k}8zXX$h)|=xotWc&$;0oi_Bs0d;ZE(BBqs zIS=Hl_Iv2?yj$e>s8kU)unkPqO2C}+j+Fgk3>!Qtq2kpMu+6tt+ThbsS(HeG+@ImFq-ooGUR`jE&9DerL zkFx7g;-l%N+&QE_1k`Ket~y&-GBp;8KWdU;&#^GTxRO#PPlvp>KOytr7;?+~r?|9M zVz?Ur(7w9T8p64IE^l( zs?$l^WL`9Su>5jOe`v{^0V!mHaovJ>oB9~B<;-82tg%?^tumQ3pPKLkc`ay^%%GI` zyRw&;-GpJvt#G7fKbdZo5Lu(OXut0`yWr9T&OL(U;FXW}dP`|NA! z$#fvy_#8scs$IF|Z7SGzRKo-9Qh16(3(dS@1^ed&!3J43modkuV1sa#4jP$ba^zk( zay1ovd_o{FAc3ad?S?&$#dF5Wd*Yq6kHTlSIizXTiCmLZgq@$yiMwCkq(wbk*6j`k-GX>W`H5^HCcaoG80!g+f1C5cJ{ zU%Gfoow=ECi*}q37Vk7#;Paa^u|`|+QT8fUIE=S~o~kAsB|9b_9h(NPHU^3drhXDn zD7FYyF)Q%XwDa)W;wDT;4TISeC$eA6E4rIGlHEQQfyP09A!kXfOs8@Qm0RlZ`fll5 zA-Qv&=~}RdrUkCgYXHlrtwL|pZM?u#6>FFJV!Oq8sP0-Od5Nv*q5debZLa{>Y~6(l z9XIppd!g_UL>TR1peP$v3JrI{F@Jf0_-E@Pa1FHLwg3Lm{Lb#!;u-*I{Z_*1j^=RD zO7dkLm)Hdzb<#9ifu7MJ@b;i4-#Pph3MFp#$lY(W)qEb`_nd&~FAdpt+gNe;st2%k zWKS>|AW-2lJ-8A(0k6$WB#qZT>{;_1^6z9RPU`sJ0h9Io>9PY~s(yrKi`6hwr-&CF zeM!$JjK>oTowy{effxPD=Y#r*s1kpj793ZVXHMTsZ?7oCrK9$NN7^dc?&%LXs{8@3 zoBe~6H-)j?@=4rb^=mxXq|0`C2ibRM51!xY5Er*MCW(;{%%_~7l6QcvdAVe<@;$ZB zd?8*waFM!JBvIa;8JM>r2M^gtil5b_d&rQ_mAiQn#j*LHF`PZ!Ul#HD zlz74WC^{WVi^y*~4v`|Xux^9Pbw{cYUg zvl_iS-l4?~UxfN&XRxQYp8S&8G1-e#g}id@UhHY~J{Vr%s(P#Os1enp$l08-i!J+r&m{b$THWl$4`O!f6t8+hWZ0e4;5_Q1BGlkt3 zpU0dh?eVzNFe>^U%O3*|W475*N?13ZNA%F1Xr7lz~Vb!W(8{V~4krGbU%-)Z>DMDkfSorb^ei5c1{q~BVP`?Ab= z=X^Kei;5-WHI-shskJ=hGKq6ad(#rvO&mWp7e40fz?8qztY>#y;l|01Jm8%=yve?Z zwsz`x_2wIPnAsI=lg#9+94mSKImt8WJ_r4W_rd7MJTdds0eI-D!`7d|rSG`0JTB-A z4(alZoKhRPr&%a}PwR$nIHW^lj_eF8c2Y%&O1?zTd;OiHrRC+dt#Vmg?xTwH+XVanG z+9`CyYB9LFrO*b8C$!mV9&PqX;@>86^dFXq1|Rit(jPNSKBOqMyW$6PcmTidypLtO zI&kMvSIK?!3_PS&3R`57Q?cqf?0NK=hHq#pJMzf`WS75F$C7B8n~+COYi6*2+Dg9p z-&93%o31$jbDeneb_dJ|N~4L^$?Q9@f>j262djQw&^1|L-`n5Fe{3$-s-?gvsNh~L z;k@^05UHtl@TMH)i6kr>UHLVIHpOug*8u?icHq%XwIBH{Lh*00?QG5LpXgW`09- zG!CGXo%DFa!&fp}_jt$%cqicazSQ+@7tnFjR6LMLJ>V+=5D=(=^f0f~py4_FrnB@T zW(;Zsm_g5#(|FzHIlRRG5(Kd+=lA~rf9AHur>&R4VBvDIZ#)1KKQH6%#yP}?i(wFr z6#QIUK~rT0r8W&vy{&$U_^C(4B|Jh7$wmD(P%v51T%R~tI@trR}QsN+= za!SmRJU81t*ui`jTPA)IF1=2I#3h3;-X@=qZATtrX^6hxQt;WENbsm|p|#(G>B!|4 z7x9W4PJ5v$c2;kohbIDsOS%?I_rSDQ z#^T$Nxr&n2(s$tNI-E#u*gWYP)V3dkE;IK~!O?{n77)yFNuA2tJ6>iLqh7%Cqo^g( z4_hT}B&&TiEI)P}(3u452%l6r?jI<3HdW3PyNv<5?^$xABBmOQ8-{&=%P4w)I`NZzZ9I7X4m zFLk^qM>~^-g{{Oj{mVO6k)6nX0u}JF+P2OmMdyN-j zcAr%A*=`Dn1NG^5 zgUp}KrZp4nHewk1$OT`|n4k*AuvIF#F z`6o&@{3^a$cZt1^9R=IH$6-^)4qUi!k@)nYwsOs&TeR?08`4z$E=-KJ;1a`FzVdkj z*@`}*{<2z#ESvxyen;7N%rrikScvOp?Bx03$7tTe*D_o6{%~x8gRESBT$%;RajeZ) zA?LNTF!W+I?7i>GzVSNz!gm8%R?Y`yM}73jUoQThc0t&*27K2bB6+^C{d?XoXKdj)w*RLP?gppRF(4#1XN@xZ1#j4~?_qM_MEK&^0q< zuVfqCq&EO=YxuCYhk;zXIu0uv=iv3Kb6jU3%~KET11Vsj+?5@0esUr2)49pEr=3vi zSq;xo)s{;$2e>_;Cpvz7DmyhOk$RtZW2+zWbpC!ad@NmvlkH0&B6v8vR6K{8o%8Y7 z?bTvfx{A{L{0!E;)&~1*oDC6`7s+=xC_WLW}cx=GNq5fPX6DdElNo+e;mpdmlP=2B#gbbd<6_>w5Xo5d} z-KegdaJHXvn(Y!EskW6ZbdN%p4GnPYjG?@^AQx{=OoqR*42g9^m$a}{*|f(EG|MOf zeRrOOYx@0!_K&pXST~<9{kFmBl^y8+?pM#%`JCzb8#eU13*$ZXB=j$zS%CKIrQO}e5 zw%w26e>C~w;so%Y5hdF<;FGK>{3%rP@OUq-v+FrSw8de8H7^j!cd&9FFBH5H_)vaCwaNSVfeAQ zP@0b}=WShdm2FP;U@u`jRwVVp<2Sq0qaXgz<=SaQ-el?hrx6X~ZiLgtH&)a#DnR(& z6h`IIAwuoq3pi|rioCXXwo)(FfVT`^#{ITjB?wqihA-NYb@);GFKG+Sxt58aUM`Tj zS?1VJniZyg3xfBb*5a`z?_mG&wfwT?1OJ}97|&en2X}2JV|{oMo_&6cKZOov8$B=7 zS)7<=k>!`=1Og6vHXsk5uf!DI7j#OQP4A`=hJOO$eoCCB-2 zKrIFSog$xqZLFwe_E`9Mu?${rw1TsSm%%Wr9KQHBh$A<7iqg&TR3IhTY!Hk4@&W$^=_7Ijn^`J*+u@UR&7W=}v(c z?PXgonc$k$rL?_SmGcf?k?~Cfd}`8$dM@jaiT_eKOvr~5>LYmV{IjA%TQ%G(aTRap z#iHFJJ+XPuGPaq0MRe-Wmbb*(3T|7H;M-QOQlIgEVE3p*4m>%4eg4Rx=igBFJf(rZ zADj}m$7c%))xB`xN``F6ekUm0^qZ~+NuFw}JMhXmm}#Id_j@%H53bFZ{dFl7VqR{* z&-c^V&nQc{Uv`{rHW|Yr1Er!)dX|fYP`X`wS{!=Hh4W2AP-pTq@omrJbS`KV{dl_$ zGk#mc6z`*yQ5S}z+?=o_c^A)HIuhe9S#!y5KlbvNP8Zi5mG(zOV6kN|6uwfSI`fY- zt=}_P+s{xIec~1UnQ04}BM#8qz@O0PdIop-93tw^%D`tm(n0tBL5lZv#;T5sV0`#~ zMW4uzwA{&yYtF6Uti3TL-ym%XSYJYUxE40;PM}Q{OIfR{n*5yWdDcqbPd&@K2&%tE zWAE9gu}3>+w7t0rpST@>lXbdq@|ZUEkh_xqmhSxeKo|B~A4aL^T3C?Q2}eINvre9%hdN15j^0v$}m&6ObiodzA=NeDV-$q<}jMCwE<qV=(pWstYE?Cy9h|!uqsp^LnyUi^Ft=Fx5>Y|kA8(ycahS{Jx z=N5RW^oDi~di35claJjh=GY*S-gF))Gk5Qg557v=m(xz}q3=jx!L$xeM3Npr;i9m>qHf{JdW(jJtP&(F^sdOw~es(MOB6|0svE6RH{C zuM+&G7~!i&J;|WpweYU?9|aGYiGBNigVg@^tm1N8oFVP(&{-DGOKl@L z8lS_Q>TdEG-Fk@evR`oIT`HAd@4znV1uo{BhtR(JaZqq!C2Vo+NIkbSkgdgY8tFD! zu()Tz1D!6&a;p!ERvpJu_{U&A{b>eHzUe1y$WDOOrgL!f5Ci$_v_u#Yz8!tO94EEn zbyV}O5(85L<*h3Js7b?*+Fwmj+*v0%@~;--=tYk)SoJ(l)7gM$4LWmjuQ+OxQb4ct zM_^aKY)V#J%mmd z7V?q#rm)^h6(iM(zy{xv(UDx2iF4nA;|_&HT8c~7i3uPelks^;mv`^ra1?ATAqwJQ#v40dhv_@>il?qpw#Z%>)Z{T3zR1n)Pb zIZ^`~OH`E4s`rRm=Ur)C>ty^}&OGw(dHDDzgT|~+kXbD% z1J#GugyY>0qT%;s_D=2w6(?HhL>q_vsJt zvbQK6PM5KTcP7T1>A}VCQiR@Px>7++7{A$dg8#HBC-?no$`%?V?W~xJ4;p%kk2ZGY z?sK-_ZcR;j)B(mSM^ijCTZR25GnTA9Bz`%t0y|8U@sw&6rAPbYtomU;R@xXTjj|2d zzv3$Fzt@u#gU?cWkq!Mx9tGc-m`H1iH@$MR@c0+dlRf=X_V>uz<7=7-1 zoy+@J!n5moVpiG`7}52obRLS~ZGYo%;nlIYdDbBC9g;&$d!U?q2W=H%pQmNSb zZII~dd_dIJNQdliDUfeG9;|b_h!dV9lIf;3@aJtJwzN;B9@cAG4JBTli91*;99C`3(ZEi^9TsSQR~Qtmz$<+|idSuW;yatG zG|t8!3tnkq=AFyzIX@IrBQ4PJ)Y~ivv67cBO`_KPQLhRNC@;Twq)03dYJRMSh!oB0zHJyn1UyCe`HFW97BkDD;-oSMdON8)w7u!bD+|lqSFW6b8|n zD!f2hL;Y3+Lid$>P?0(hT!xtO=7~CNmu1RDO`jBxJKqqD`_7|Dhpb`hrIAptu0kFB z&coV|H^q578p!iqBd+Bi@OJHI2u;zW&Y?eqjr%>xwJZjF=4>4^7^ zzmdgO+2Mh{5+9MhS*)>agSGE}fp?U3*($U1g5~#cX!#+B^}3S(Wtzkv1b3GO4$pxf zjTPd>ldWLs`j_gf4nX0lhl;xMN*d}HPm2pWOMTUjoU_fJww!LmC;yuXvajU=A2Gv4 z+5=hccZ^PJN{*usE~4SKwqkYTey)`Lr1Scf=y*iB`_I~nSKma-S|4k3AnGb_E==WD zUd@;jXH4d;4*1NcJwLguhS~|Qp++3T-TZPVBX@2|+=%N}Pvn_XH(@(^ z&#Sw*@_>NVIA7W^K~6qm&y|5}@jR85HpI~I|F$qp%7mlFXFvhlVf#XFI@|0dG`w1; zFxQBYdT}%G$=F!o*y+&;UOF@! zxA&?Bmn||fJRZjj=KrF$HV4tBw>`%-<-=J6M`+*ClB`v`vp&p$3a=(%*MED(O80N{ z_e&U_xR(w`_G?g(bucVUzf9wuF4Lj@%czUH0ok_Aq3e6h_|?N{@M>g>VADDkPi)(V zv-5n#oy||M@5LSBpWFWUVrK<;B~`(RsFN%=KFH5C57O4H74Y=L5}4)sl5W0n$O=K7(SvA`}~HqD$|0!j`|4WRTFA6JG5AKkXkN z^EVXg8mswtv=?X1l%$EN;9C-wse#YQ93quYR6DiE56Io2l!d=?N^!TVQel9QPoy(rX zz8GsL3-XtGGzHLdYXUyloI`fM$6{U88+f5tftNL|(5hXJv2x&AMe&vZ`RnaQ*kgs{ zaq+pr_ha+0^IKIueC58lZ(L{GbRZe0<;_HBz6Jm4<9U6^Np`Zek^3Gsz-NjjK$RlT zY085=`Vo93b`cL6(oH_8%%A_Z%aTB8M?N;ulCmP-z|ZxP%Rw}zGrm9Y#Gmi9H8va? z-`?a&Vg(Ge+X;SBW_|acEiBH?!`|z1gvBF=Q`N{yh+ik|OVzzk8-_K|&=IpZW~DpW zAC82X(@ca*hfm=NiHTcob{mIY2xNQv{_@mkT5?OR4$98YBlu$37|^fZ2acB)E6C#V~6L^aAB>`YIzt+mv_PNuPfQ-%1rUg$3rw_;4NNS zszp!s#tZIKe$mOXci=er)4XB(f-aa^uC!a`z)nBx*!#`uZek%fg?{bE& z6Ai`1H3#8(fd+ovX2!!M#@G7$Cug;n0_9FNXPv7t;-8b(6)hW!L=(x&f79YUmmQzL zqf70TyGQ&&J7op^y}3=P*3CrfPW7dFiy)|Ts~48txGKi3lDv->GdOzZF!}83DR{9g zT|B6&id7j)K_|wG@2SOzyCfFsMpXgdeo;gZ26ZHvhaOLpGWLx5cgbYx9jcQj2opCS zQV3>qp_BJPr^MuF1+|-j4r^Y4>Y?S})~!Gm{!QAww^4^B6*@egwwCYBbf!5!RAqXK zPcr|e&*E_92v~0FOqg+8vI*wHVB$*2!J_q+nnMPm$D2VM@Z*K#I%^ds zPUu8`CQo6LZ4cnd>%*k?_9{QzqNcPd*O7abtAhX7j(lw{aKwr6oYzrFH?Q@kZax}l z)izW-d#+sQl64YB1oRV{HQm^HYz#eiJ|g84%TZ=^gy46+kTA9p^lDSE_IHYu9Swxd zZElJCqODoBDV$6?b`XZlHkW0T4MlC`Xe5bIseN%9E-$K+O@3nmZogi`k)l2H^l$)t z-8zS}jF!^d9^Q21UMWnQ(E^V0Dtdc8h(>4kMpZcr#m6>d`(^uaZ@3;h#Z6X3d0Z@8 zBb~R_JsBzO&q;>1j|};qK`Ea-e-GZSzQP4_S7PhcQ+&M!!0%iYf7(v)%tc~G7QBU2 zqeS7Dhb4~m(PHgu9YM6}gM-dAko&p|(4}Cq!Z3S+xO!H!P#rrNY7%3?b!jLK81S4f zubv8x7gYq!dl~TA&j>grT=;pwR&07(B4u1=VvO9JV#*dOj+!?Ki#`lOjiYbi<0;8W zmHU8RYdoTtkr(if`gl5%z75TH8BjFjh@by{P&l+s;Ign-SbuH^X3b+-JnWCS{C*`2 z@*E}mxAQK1y_LvO5^tf>bPx<@Jyra7OoL1|+G3Gc2+W-Ejbbxr!n*$&g#1IjX>L|~ z9<==&3Co)mu@ih{-z}%}*-SIgNZl(;$qUD%GErF6>ky4jULx*Kol8Em4s!hR;qYpW z8>c5-7ayi%(j~KhqF&b$7^}5KbY2m}SKrvsfsupx#nB+>o8ZQE)?-2Uhr}-ZJwh82 zud`O30ojCx3L{=Dq7i%3peeZzmE8|T)dkgHWn3V1Xlw${f+Z45Y>JOdtGRD;9{n?q z$J?!yFkLMMGDrNTC2uv+s`U_VNZ!fUhF!${EBe#9pUD_M^CP%RE}o8`PJ+|lOYrfm zj5=OCx4dr+dq>Rc4DXCwzgZ0uR;>*Av~>r*WU-@99}VJ^zkN z#H0D!Sao6;2j8`aqnXvP;qf@q?sFEduRG+T`_7SPnHZp7QH=27?tJ0YyHXtN=*sEl z%Q;NtCtjS=8*|s23pZkh(1%~IU~Wo#G~3xgEqB9&-CNh-$GUx#c>jvjMUJ9un?ty9 zkt$0s4c;Z~H9BS-3uQNrFiJN8`mn^cJZb|2r^#UZ-2=jkm2$o?M2qrPmy2hwYGVGh zXB+rcbXxVDF!1U_nxOlRdiS3K4}Y`>{c;Agr%ETD zz0sXFAD;*|#eKLi$&Qa@G}EnAQ#|W^L~=UR@!;-@czWnEUKw-?BAi;vOrktxulst* z6w%knc#anLt_sHrDaLh>wc*0i-^nt0w@fxNn$G7pDvDReQbzf9sNR<(IsQL_v-Sk0 z&~cD|%8-0oB82D}kZvI3^?hn9y6V3UQ#5TA#>%o14Jaj%h9inwI z_{PRg%Cg-aTs-@Q?9z-VSmgd$h*C)guk;z%UU(p0e48t}-Yo;a%yOE#S7KaO8S+To zY`*+(DnD*@;Dy)I;Fhrr7tJW)zNUM)*`W`OceljFor9@r`6OYndq`d# zt>~?K2>jZ=6a(ISfcQE^vD)ha@$h97z1s&Dt{;mvZs#Fw_-(AZ)q~SdO=A0{XSub$ z6vIc9@&EVnW&d}WtA9UG3XGKwAka&)H>d7FFGyqE8?+r|aXHLHE&r!%oUsYNG6p)3IVl3e9@{lOivg z(O>Ug+^4J)@0{g|H#Pp^VSJ6<9I9cl=Na-7S5cCw4e!iyl6XTa$oAd@r+Z62qpUof zb!!*4&-w**A@{i6?LF-bFqZBJ0-DaUkZ;YYgcMB>H$8JkAu|U2wiM9*nFaLW`z%cP zrz`D<*d!`1*Fef=4_;TajSrQm!OF}n+{3^NwOg*yF=r7Dm6*VVvudRI@GW%gI0d%0 z-oppheQACB09KspM=z7+ai=BiST*e+#z}i%wg<>L>qRL}(C@-(7215NWD^G(TEoZc zG1BgVxp-}BU*)xsGx*mvhJK}vRXh^^LIdq$maohG;O#1P6bp180>`(b9z>{oN{I z{I{P%=%W6N#t;{O`9@Z)H{fFW|0p^Shn)T|j%yE< zq#Y$GNy{kIb3f-1-$E2hLM3|_vNBSM7TThXmJlsc-E$sA86`V2l37*=q5SUeU#NSZ zdp_s$e!pHq=Kr_g%i6dGWpuP@&+>W5q)J3N0r#_<9sRv(j0Pq3eA^f^ zyk}Q{lhZHKQ?Gia*|e7};mx3XZ~;a)xzrR2j=3j(p=^<`r?z|0g!amIl;@v}mmjOc zZDF}1-z6c#Q`6z$k484JMF*_<`+%0!dzMrj#*907ynRxN`;Z~PP7nUZ_K%NPVaZPD zv{k3nnISOmfDDKp%1VOnE?^h6-MHkoOuYK#3aKADim%!#U|;-DNW4?cS&m9$GR6wD zz&V~Dy~Yx%)t6H9>6a8>o=>UC6R{&)FuQz(X`FDf)>unM1%EDxyeOd7;d$WJvySt2F9b1%XY#M z52DdhQ4W$)N|~#D1#2H}1^VNjaBC+fGlw5K0^@%Ke`7=m_y6ol#f|%H_o-0KSgpw# zUq9lzXGF4NcP{WkCX6?{By_g9LEslVjDknq!3zVXfLQAcHnjGzY?o;@gSN@QUh4~3 za?g;=?CxRST_t|*sAT%JNuB0d>odDmf}_Nyk1w@Ory-I{@G4G|RnBY?X=yhpzr^(0*%aRC zno#5Lu$uL1PuSE;Yp@_lnqs4pnM%oZ{`7?h*gf?MWjM#cMVIHCx)y_{B}0hcafgrk zPq>@(a6tbtMl|8mZaU$pAk3}ykY`rMzBDX>!^+dBdMsm$Rt%~6bff;$v}K{dgXgRdL<*}UWt?$yGJkmf7Ca>IT{;Y6 zm|5azwEJ6xr;Prfx91pc!nkpuDD4Hh9jWBAJDlY-_vb!Dj$pStPr><@He5t&A=5VT z#fQ317}b9et-Sw%*_ErnWNcwSMbdoO+GQ*x@+wXmnnqUg7g>#sj$?7Z1j^YY!;}k@ zV7b8ZX>_hbFM~YlTs$39k2PS!^UM5_yrWpQ^9_-hrW{~~)8@}|El4NJZkY~Ol+O;gDys84U7d(Rs zM>&|^lL`qn|=* z!G4O9^Q7+nuCyhylh!{UO?oyPNMn~JGaOh0;ih4rJkEigIm6TMem`n_g%0eXqjk)o z!xO^0?vhPjDwWP!4FBdIf^;3B`dL4R8p1A;p6OB0{uuxt7Q}&XorR<|L=pY&88Z#J zX13y43@r411@B(wP{Vmbis$e_cMX&%+}FGZ)I^?;7xw(SZhh;uu?c3)Wg*G4TGye z5iEJ$HTGtN8tuucWkrE6MFUmsY1{n2EJ?WEvA-FE1*T*9yLMUlWBwz^_;n3>pN|3& zUF6HYD^a)gQ3x{|AQ=dijH_rD9a^{>iYK*WP+x~Zg6ua=v+ z$r}#uyUg!=od_)sG31{-nLU#%hpWM!`#LVnydD;(Ij6?dFIM1wlMQU9tDv}Z#q4c8YKb;h5V=I#>Cpe-Mt-pT`)F#>OL zvoe>vZ3y`6;h-qS4cO4#q+Yp|)mA35>;9d*ZRG^))iz))`z>**kU9Fda00YwkH<}G zq}Zve z%UwX>?g~6Runa!AUjav-GI+jiE9TH!_SK?*ojYcRO6SIqy!Jufey9qxG+*b8iwD8J z2?BenTmu?%@1fC$JZ#b5gKou%%u~yajr(&CFRwU2j}|sn?GH?b(3uzbrH#V5!bXpd zhL{L!;|A6>eGC1*GeCGRwD|*Z&)DzdY5cLH=lMd1dtC06D>Oo0=sSB`gO=J&G!~e7 z^7l{T&~e7xXy^NIYSD47UcZF=yVNPYxdhZJZt}g~*RYRg7Etwjq35r!Wg1t&5 zi`7&X@+(qUmvPFm>}hUI{kC(IrFE8eKGtyzjFiSsPX|`?lq0A6QqZ9{4=XmbGgWVa zL0=cdHa(ceX6QZQ`fb04-~aXSrd?KC^|wkk{rU!)om9dX^c=3)v8NZeUmbu`3-{4G zWoOo=9nCx>yP#BRBxsIor10c(d_dz5b}{Z2SxKGd)%Et{fQ83U;w9`x()!qlkVjmo z5%2~jt2x)xo*->E6Q*fz7wPRD&khdx%?%lyO+LR*l1Ho-UE1&jkIr4m7EC%yR*G5R z_eupWZnkCzCXJ%YO^KXwrXg4>4-=zq5$l?)fxlC-p}W5gHHE5B>Ee7*RJp)EcUFXZ z{qEs=scb$gt(E%-G61$yLEXfMtSuAxQwjS}>WU?u@QmU%>$LJ;1Aj5OKO4B~`Pr=2 zPzJ`lS_vOHCcLYK+=L6)@T#p2&)VmJ^I!{JvRNCwmW4v<_96J$Uz$qaYKpZUH_)KH z>FkHkP|)f)#d%nd6gZ>E~#x)X8Rz zS<0M`-((}h)S$|sgQ>I~6gXv7aD0yzrjFJoY&U}CM~fgg*oD#$Oa-N3FFCuI5K`W8 zmN|t^z&ExL&~{=0X54b%l&S_}JMD(Y0xKtZo|NMmrBy6-PXRw-i9K%Go`PSJ){*CJ zeQ3-NX9KoyY>iUwOv9iNuGd8k^bH?Gj;?4MI_g36s{EHpS z>E(}kcCo|S7qC{ye0kf>!2xN}RC;$E99ce%f8Mbk?oS>Gp1tv~XG#==o{l5c?^Cfu za1Jya@8rKNOoOzaUpYtBajQI2L zhhpgDJsp^>BzRoDl*1HrH~8~v18!RNlPyoQqrq-*R3ht35$gZg(bXaBzRhA6K7@1 zaJDYm0(bH#lQorxd4hvHB|e`y6?VWN&o6jB^a($)){TxXE1-d9T@bSZsxFKD6*poNQ5ZM+#dNlZN{}i`Wux22(R~DXsUbD0s6z z3_pF8%~{;cKlFc$Pq&J>LOnGK$mrnu3}5nJZtfFy&6ZI7X$Gww_7;zBdWcm5SL@D+ zFBtzkml--lu+MpJyiAx0G_}2Fj*bOw?R@6`=nA7rCq%AEkJx4xP1^TO z8NI6Iq3d+>#NxoKEGTR<1Y}NU8s!2zXlxPp@zxpU`0OBlGqZrJ9b2HVTmp}e-@}p8 zvmwJhmhStHhOSF$tWPFFbZGob%u-AjJWy|7zQ=1kw=x;r+*grXw;xSTm*@AGx{<9} zIW>JI`r*;Y+|y2SwLQ|}fuR#c-xUW4ERZfZY`+IQ662X?Nf7Cpu3&4PI|?I3%$XDV4_J*rF* z4;IgV6fC(~W`x(x`iV{cUZaq=$7zm?ff#EM7uxGCBu;APX(so?{>7jb`db^Op zTX%)M8qrS@J8}x!1{Tz9@t6j;dce_syIEvf66?y{4>w+ZU}L3jVDQV=4%;Qi(0+U` zQ_dHhWrotQvF^PvNC;fKtw0^Mm)UQpC-l+W1IGVZ#jY$a2C42Dl(ScgZY4RwTc-@c z)uKS7^lRa{k|!CpXW;k;wdDDI68~=cP5OQK2?WhDhZh?CK~vch7t1EH0z(;+9&5o4 z6>NoKVYj*C{BnU+mccZGFS5hNC&|FQmeu`^12MXF0q;kW=w6K4g3m;CmH)ul(uOk zBo`Y)PD&iQ4zPud9Tu1*Y@!B8I7qf>W-DL(M{@3$xG?!b=Jq{-`Dc4m-&le1J!KLN z`fWis*XBX&+gwOCv1QwT1l8AJ+{gNp@cL&xNZ2AaX1gsYjW)-JU(0J8 zEk>}-I-1w-(cbFn#-9Si$;5;xzM-U0v^pP;%I*hla@(@VArKJ(mxh+S@tGm zr8Avu+xxRUL4#oDS_M{nVJY7AD#JzRe~7q-(WvP0i51Epq;+?^*gIi9w%m{oTK$We ztM?(c<-q=$xR1vnzhyH96(PHHrjp6d=GeH()wnESH~aW|B1?LE7E=aGq1zx$%Gi0B zt)Fn6b-o<{YtOiV!>@N-m&_b?ahD%n=x0P%9Cp*B{b}TUNE5&NB?&uL1Mts_!$&Oz z+^LHpSYdvID{o#-%I5#L!?Whmmzr|CdvP{wI&cK*ca$?%JE>}~b?T%VmjR=~jYw2} zixl^5!$*@F@P>yP$$cJ(>cb9Gcd#e#k>bhpmfga8u1)-&KPza;*@0AZLYs;{i~vnL zAvbzRohjV6WMv7bpmpqL*3@8vJKk);%R_unwk(%7HZB*9%>IWBcS4zEXFN^nwPY>7 z5s&B#Iq-@?+-Q~qaUtcTU^|V>s1Z9->Q+-$8pp06UVT$z>(~<8o#O zNbdGOLXqLW;Z?wlnrZAKmpybnnXk)&%FA_}PQWB~Tt^B{j9-WSa`R{jjHSy9-!SX) zNBD=PkbLN678%jVo`2TB@wQr!5Nd}v(v>~}i;`6_z{VUjeRWxakr4!+KZ6ertHBLCgt{9hQdpc0(XV$k zQf@0CfBQkUX)xowv$4~x!nmMe! zfPtUPQ0CJx62*D2m;rB@TZAMoRqI4Yx3QtyKq`?dNd|Z^Yo%B^6AG7eV&Hf#5!O1k0>B!Sasx zLqA!Ly?pAxeR&xndLw%uk9ZV-^2C|A{?0cvmuY0_e|uPC;1b6jJ?F6Bx^kErrHfZ* zNVRgx_xJhn5j+NSrdosNob()`GLs&WY zw7ZHw$v@(|%xAIZ2P=8&vB>>0t0Co+v+2-OTk3M3#d>DPU~q^pvXT?s>=EvSd_2E)`d#K*{Er1!{bOy5TJZNE73yxki7tZ@*~lZ=0@J~QmJR3? zzE6$plui^cH-9i?x2z?LUB_8d??L8mS}D3zumyBo;>oFAnYJrcaNSw=_}jX}aQ32L zG7^Q*&4rg}s`MQuW`nN4R%29H7qU`y1n}H zlD(c)@O$0?T5dcDhMrfUk5(rn)62>!^jjlum2r!z7fFc?&CZa{?c-cUkR6kGo`-*e zGU1d@4p^1wNOn~+HXx^fMStB3i7!mWvlsTViQ}9p-AhXRtj<|rF=x=EF`qb-u_<`@ zPa#(=?j{d&Te1GNVUlI-iW2pVSUyhiKIB{9p|TNc>6Kg~=*J7^Hg>D>MKG%Pki_bIC>f3j%*j|_KTz7q>p&9=p0n_ z`@$IwQl#eD@~mCw4*mPw4jwyt=<&-~e$bZll+YAU+6p<8VI6@{hsBa7LrqcOrm=B% zO=#OLV_H8upVu_XVVBZY!$~W zS%CUVxK(@rojotZ)i4LDnYDyAe7wpB&3#8h&im0l+qd}Y?+2E+8^HA0bIM&kA5AUH z$*^6P9>i?~so%{wN4EnP>K^Vcqa0 z=(?<$7L4d)DoLkEHZuiH^*%s;xB^}LaFt$*HOR2`F{zd&u`k9ONpYgkr|G*$GdAu9 zRsCd09d?wiKbS|WohCtArY4BW)9LlO`OveG!4;EPWG(EvM}3mvx0a2RJWVL2)$Yh{ z14|)bK{%g1Jdm>+f0!vuN)UJ_&pFEwEtW0#*B0lmr&F)5^Ia`2Y#w{cY4km1 zuf`5%CNdkL^=1HU?=N&S)ZFpE&tfV{-9!#KE9vR7Tr3mV-8H*1$h&Sb|90UKjJIrI zlCe9*e+EB>o3=~o#G?aHdj2_mRab(#wWXM}R~=_hJ4jyZHq!R>o^(@7O5AYY2oI+9dHq!2Q4FQXDd)$eg>}|9?WaV z+p=#-KOy1ka(=Y5C!0HeAgi2i!|gQc#?S+|tIwUPW18AgBt2V(Me9%G-yYUt8M&8H zV{JCA>n?^8?i*WpK=8s$y$7?;9p$t_-=pJnRd}$bmznw+L4tW1JP0)AZRYt!*%+n{R8WXKpRN1^?`P+w#Mzf{K`UJY1;>K^~myI)sXccni?F1f{z zD*lh2UJfObJAvf?`@6tW*v%hRs1mYE5j9-NRpy)2%yobZ+Nkmv(icJwE2m-q{c9-r z+JCS*v4Fi0=D5T505wLc@F>(N~{G@5&-v#W)BUrv$qd=Jv?^li94b0X|= zokJTug=>~+Rq)mL3FgLEixU3($+A|;OUzo9@m1MIVl~xlT(x=`+g)$QwrqA{{u7V! z$^8=0y=*KWnlXnnf(hL3#zB(bmYS3vR>6h*BT-qbEl$4efc@44!8F$^5U5g(>4r6I zWq>91KGUV?;t%|(g$_*Ui;&;e&ft$*yud|k zWEJx-l4`&YPPZ`=evEM;r#X%=FGCsk>{TYO(L>?GH9b~h*TRKQn?YSsOX$B?vo%F!Cl?m1NRX@zz~y#s07reDm+bfDNeOJG)J#<25)XYncl!?>waPP1FR zJjtf)#hms?sGWQf{yGm}Js*T_*_bE1<^x9zd24|s8Q)-2TQd5o4u}1P0|bWDC^%l1 zjLCyifU7EIDwaB8sZW&{(_~7QEvHkDt}O59T(Y7M06LtJfh3! zI${v(Kl(VXc5($9{l`%6k%uO09;+mlY9#%Kfj7Rn(SsUg4ZjOfzr!+gc_US@gsF@`>mVsFapS%B+%tkCv>=kvUn z#WSJvKS7zEX{B*P??v-vxdFJ^cO3Rh6f@nDsdV4<9g59t=x~LYn>1OTosM{hn^Y=A z_1iRr?1vKMntaFS@n?BYo#V7yV6q1(u7ub2s+5xVhu1u{iroDK-uC;^OjA0V)x9tW z$BgML&QFh44E@cm(+Pzeg^J=gfqlGGMike4FBCfN?q`>)zi>ezqq&|@9o){jW-K>+ z1C@2XQ)3BG-$e!qbUho#w};4ZMvHXS;yR?+M~}$+lozpHF?3#^8YK zDB{~uaycDOYu1jX-~pY?U{)$La>?AB)OLR8bafEG8J1IJ)oBbCQPx`mY z*=+9#=RIalf^Pb~^HOpbxOHF7Fs>6xXMzNLlMW}T- zuBP*f16M)p4-t`&$_LOIob!e*l&~ zb)u)`L$K~-5^kGZf=m1!quiZ6bp66*aK0lZC@rAv6~tZ5vPBnGfpI>QiSKU)Pl9z| z?xqNCc6Tx@_iltQx^3Wmdo1k!vWe@OEo5YR@6v6bx%5Hvgs5X|0aF@ykfurw)0{K% z==H)6ucr(Yk5@1zKFk%C_UTDx$c~`5Gr#h=-C8ubxE9;xqv`o&3HWdk6fLk_k`<5B z(Wz>1_rwR*Av2ogYsyKw@5_nxW?XY zJgz`(|3oM|_8fhhD#7u5J}g~YQq?q98mdea;O2c78v0rfJ1(kF!=7N07uW=C`-d^} zz&t!PG!i!)%)>RSWa#6HUy!mihNvtY=Pi;I%e38sJH0$S8@Q33d~*y+!b4e&y)yNW zUWMgmskE{;g_KB9V&cG{Z{~h@_@$B@_RGL^ziu|+S_51DJPflh$M6lCoJ1#C4F1~l z1kPPMgfaXUEFEfsv~?m~+L**lRpnrbd@h$@B>{uy1tjfL4%;-!u`fmp8YeP&x#^EE z@`(aP>I{doM+@-bXIXCf23c{mRX+U<`AE$cqo}PankvtlgTk~PtSqDdpND9MS)lN@ zXce+R$gYf3C2?aC3zJ;pP%i>JZut>*m9S!r0r5Y*ml1FKR;RTq2OeDVNJo1?;h z)I8;lG8HIq(k_-AZVZO(JKJ%(QuJ=)GVcB`Th6I8m;dAxhj(+V*n;@?sPOahZ7M~1KpW2ZmH>svJh zHjy5=XRC^~oPEJGoRb8;m59CD^c3{QA7VCBFLR4#1d{6-E6mz{lkYcCn*x%H+27hF z_~(oVQ|%V++UKjntb!?E7;_98rn$kgrgxmbRwU<}6Ud#uFqI9sBnNt{8zFYW5!$U2 zOZVo7(RN#TdQ8vxnwvYg5J@7rG+xH$>o=L3#U<|3965Nlcpu%;Zsh|`C1LvaAA{*Ejz+S|fBb;5DP-=h@uIhuuJMU&R$edwP4 z0FB(=^5W4q1us%8uuft*SF4zs<~bJn{u}pv%vbDij%9x=&f?)IAvOCYQEc*sr&NDC z0@{5;pxQi+0xwRWabE+;-Y6R$rl#P!npk{%K?SwE4`8nUy_!`P1(>iV7Q!E!(h!q) zn&X$t2KFXnrcWn7RXDeIWC#w)F`4}RUkS{*@Ca`k{0cV;dEH0fS{Yk9kaB{T>-#Q>TB8H$^6wbYy5lp^;<&nS$LPyiOJ1;ruD4j#uK6tBhdcHu z+0sHY6Z~6W09_#faf99HM&o8lM%|qmu14JqAqmE?` zlFC;({95lCHtBO2nwT7fZO0RNt0o<0F$};wM~bB{Ri%2xE-pq|c$c@Y!pox%)qISX zBlG3qyxWIFJXJmx^rsJ`3m@;XnGyX-p$+k8S}NM7&ERVVFIe6Fv% z_uu3PZg1qrR514V_5;rGa5{PPNOQBencQoYa<*&h2*zwSp^^VNkh&4ebTh{=d%58_ z&qSN~To!&SPaC4^n@Qweb&cDzx|)CJBuAbp9n^C2mqYr}C-}o~7Jnxy9WsYyvgJ)D z*=t!HI+3hMg?CC=k^emQpKc#>^H7!aM!Ud|@2AjB`X{_})`CZWhhS;sGdAY4ENf9r zWxB;>INs$j_vC;FOY|E9>ji&%aEuf6r23L%=m+NiE1CxW`OEh>S@SaE?C6M&F3GM* zfD``fIj6D|K3H2{@FeC>2w@Z0qwU4qUZK~q(&{?rD6kYFLacH20}EbdX+M7N!Q*%| zMT2aV)g@&LpZS>rr$Ob^Af{LRiq+f1h}w%M;gEJOoV6yBe3CP{SHrL1M4gpbp{$Cv zF2`y#u8qO(fy#h`yV=FDQ`k<~Fmx9(UxlCluvhZ-ObuMBpF?BKg^6AG@WLEQe<~*C z^#K3-FpXxnx8Oacu{B=u6Cu5DKj*pW2z-2X2(*SdL7taCg2sQ$Xt5nAI*!EBZB@Lv zZU*D8wX(r2PE0oUhp06vm$|-6rey`WcJTdG%b`(|2 z%ONp&2TC`VvFjT>F=>pz=KA~>*T0U&nk5V9zp$lzXLb^+8#ahWT*-g~o$ajlzXE3S z{TIwP*2PQr%h`%GK@{?H26(rq(B$4-@GaVz&Fag94UGdJXmbSm1vNo-`PG_|0x$Ba zI)M-71ahsnykYA41bVi>iC(;pWCq*(xv1L@_^|cES*pP>aQrk7Q!Je6?o(^7=TkWJ z3k;!v#siQc@G?)Ot4fOheu85w_Q8zt3n_KOALf*D7&nwBvfTaepz!WE-db5tT;g5| z;nM9a!e7EPdx>r)chay_70E_r1xPJj!~c_>3z_l>utv(9>a8bnxvq1eq+%Yt4?9k4 z7Q9CBsQI*ahYECeU7(^PAL07Z&ozF}60ZkXb`H%w<0S>KK25#vo?uxQTk#>|&4q7JzK$Yqq^P9I95n z!4nm;;9yZZU(|7t<~WZ2M#wQX$wl5a3u#)%Xs2

Jbo~0y36oal zV{%*=yB1XdUf&~N;r*|o%*oT~km)z-%uc6m**cQ+H_y4o#8&fe^7kIvohR31}o);&RC{sc|4mUIYdkoYnywC_kkA}1Vvh|h4!2>Oz#E&+FC%r25>g88#p7+MeB6`0)Lxl`P3lv` z6<$Z#)dMNCS~d&a?GEDmfq!wS)E)eCH3@?MU7%Wb4Gfu}A- zxV=q@I8d0kT1J26$|eXo^>H28ZwjK+pBrqM=`Mcckk4q_R)z|b#!+~pF*xlxA-Kg_ zC~TnzU4GR8zGrXo<2~n#HlNGJn1{8rV}T}IY}rN;kB8B|+xqx#WgV3_s8P$j*ZdpB zYXE)<=v@Cw*sVLz36*G?CB22;|16mwke7?=M}NV9_(HV0xtS?$bApBJBYysP4vq~A zBkzL8Ec}Wp8?eKJ#+kLV%sZdB*!;o#lt+V@gLEz}D?iQxOBa(4KL#8oCBQQeYe-wX z7XzO*)!3zsVH4BS*q%4er00`GMqlFDfX*oP_U|#gXdA=Zto?*jxBcKE3Lavp!1gWB z&=!hr$zV~^pye}njOiuDXGwzeMv0yQWCsm z#dvqnIrc-@8tn2_VA+ZfIQqqEcqiV?st^3c!H;j?%}Wtz+Pa7BiTlc<)(Q4_{0(+< z!9u1HlgbvQ$-t~xwXC4P2zRgjO&XS=tYK^&Zn|W{RHMXfkk@5?^@g>~W?vKyb2-M; znjZ0!4Uci157x8myA?@5LKZgN0}PySpy{i`Lzzq)(f$_P|ky-+mmq zm!-^H)Xb7cyu*{#Ls?$V2>~cp!bA@O;7Y9reH7d*)~3;NOIIisdQGIB$XHZ; z=}ao`F0%VaOIe-TZ9e?_T#Pwt2n`nckSCSKrl!5BQ8}ytTl>kth0H}P{`wGbvwY28 ziYZ_}`;TfH;n@z7Z1upm(;r{N{3CJ|P4u9>31B;tLv4U@S#{4+k z<)MtOcd~(P^F^u2gJ_)XP<-I@1SE&eSZEt#zjnXpRY{MvwzuN#veWoOVAv`t45m>V z4{-fDx3UX%=UK()c$BZ*#*`j@Ws814VN;Zr;-C)UnVD5t6F#ShzqU3K{mfK3_wqnm zJYBexjg@CY!%Fn**E726^@#PWe1g$2yEjUtk1d!4=S&wQ9+|?KI(kHUkfX^sv|>5e~2yr^|X*@d$fsNc|HQ3zEPzp z>lmi-_CG4h-^)A{=7QnplbnXq712WnbDa3?9lNQZUvoe$l>hN4gF3e>vxJvrf@7>0 zPMc&x;LK4Flvab!HbRU^Ff+3A1l2*qFxPzn1QaO1%8WAtV=n`z`xkMRZ|#Khb8^k8uiJ6) znW^AqUqmJvtym_T#xFA(O$+oVvxf&x3ZB9U%4(Yj|0%e0o2TTFqpLN16x)*l&SaX) z2chk_QWCw}4&x`a;u@!=uw3C1%C8;8+ddl%Z|Wv<+}j>dpg5mp89-L*~u~g z{%_g+YQjUB_2`n($Il8BxN>%;6g|5i-&%HnJ-^Wp6kf&C(X^$|<8T#S>au8j={?fl zX(V3YXGOzIY^iXEEbQ?=3~M?ZVTY)mR~aOn+YYs3*eGrGW#?b~@-6_rt5)GmB^%CX zcPvyajbVLq((HB5FMRNO2t7?WBno`$N{Uy+z^D7Mo4Rgz!6bxLKAA|r!GdgW52D9Y zWO-ZFQK;S+#GI1^#@XyY!nx7|I~;^={Dc8;-SHzzukd54AEtrYcLQGM!49_hmI3@+ zph<6r=+K6A54bt!JU4!bnk0SmZ*uE@j-73DY z|GEA%PF_=wTYQ@^pz}KD{#Xq8>vd??B!OS=x)7#E%Swvp|6|LqxRCUdMYLECcz=9I z`|E;5GpZUv^XYkLjR_)~pn>Gm-XyRnbkT5IA^uY1=(h1Y>{_;irrh%-%j!TFQvHyM zo;$tRiT^PYuWCG@J*|J)y-!zguDv3@ zjvNisugHSQNik)uj0b&PFCM==NuWhcQ*o8kIJmxxc)Qy6JQPo z5j>P%O(T=s*I?iKj*e~K28Rx$u&%o@u;{^i-237RpIx&LQ>@-#=K^1tIXaK_#l^r3 z-Tq{0kO)hSl)+wJjg%%!v-;L`xIq6QKT*iW-c}na8E+;_`xMv1oKR)9v3nX*ozl&Z za7tncwId)lP9OgG9HP%{H*0(*YqP!fFW8&)AF(LX2p7iruwcD`R6Y6=`}Vel9sZHT zwNDZjn>$|M(3^(b5%VT?vv3s^3LV9>b8R8}$#l4LpM%+XVW<-tN@s4gV|Y$Fm%rQ! zGis%&zEj9%xs7Mbhug!Q00aJ0)O}Hw#GICnDPb>L-!hNP>6rDk3qQU+%U^vVINT{WD3OTHKaSrdUCkxY69q>_Z z6+SH+&9vG&*y;I$(Kw?-N}^w2=aDVoa`7hXoA4jxG?+r%k~Nh2J`v13>qNJXy`i^imxP|? zC^mVN0sAdCuDa9JiY=-N;Z2_GNB8Inl<>=&1;@|gwjb%lwufgy&fm6Xqs34XPaa5T zs{U~+1z70A8|NUtL;1HM-ex0MJE1=iU8$FE?5*U_z9OKprierniWIF^I3J8 zaOqDJ%}F=lciUC7!jD^Em4+4>EWgWU{>h*Q<6QP+)F4otK8(AYuw2w)R?nSqwx=^Y zU1(*A2L&1F!i&D)RJ-Ocg}FZA5=#0*T+?IJZ*5_ffqDX`>jIvd;Y5GuFQD??SU6KT zAM55xVB69_>Jjeih8+lml<7zDPg597>8)k07qa2zEjOlArwwD)YE$-?bgWuki10TS zBmOKG8QkB4;dXf-9wu@rpaHs8Qig>a5AiKJ0DqbFcndOda#DNO; zpmg|qT;_9v^XtCF54W$DiyaLa>D%NP1Kmm~4Dfi^$( zy*De&@T5n>P2fpwCl~%=9c(>OK(jSJ<7T%N{1|0>dTygkMPu*c+T3#dUEzv-n}BXe{ovEA(wJFL5P?Z(@6gm7rD7W+vnF72|I8)>tb% z=51On>Ct&hIK3c3_#LyxonE)N0SCfCDd0NGUml8*&whMhQ!T5pe#4G$Tg=d4An2TT zL#I(SY*A+@oVrmAYqQJ&yvD)(NeigHMV%8;h@yS**%*7ZlwDE(f^!bUveN4#sU&M9 zJo(z6DQXA8Mj0P!zv7LJ*Hf|ZnGd{cG>5qciZE-BlIY)-4m7nJOeu9n@a9)CRcyS$ zoCY3*ir}5Fv{{dhT%iEY@6K}8Hwtl&>ISwd`Xzo*-a(r(i<$n|pF%(4ENQQ=W4hZ6 z;ac@WKJ>mVe01*>x-k}P>85_=F9z4SL6ozimA}_B3X6~L!)v>TgNls`*}PJaDCLHd zaISw90ALf_b&;#>5&GU-x_IsSn{Rv(1%^L4n$-(fU-e?0HiyPx_^{=y7%)giU&HG3{SPvm<(2f7X% zgG#UA=+^dzJACFNZ+^BvtTS-9fr2%D~5@Ey4>->`Q z8I*EG6HF`o!NAX#K97<>{M}EEO2;PRyqnVElCfrzIUTC-;jb*YOi2;3QXN<{!i1(S zl)#9J+ks0c@>bgpcPO z0Ev=O^)vOathaMIwaqD{`@$Q1@9<)BkW@H2OU8iOuHCF@i#!|k_$24&Kb|ezh1`^; z8KUXaw>S>JV8~vTyv8WQO!_;ckeXs-`Z$CPfL5dp?a@8=XiWqe}4F%>ru6*Js^lukv4XLvcsJbarAv6#n+iL*r?L>9TUn z_wgN;Vd=<=cRH|9t5>p{;WaFKkQv-6sG$7*muP8Qa`iL)M1EdR2Q+sZl2`x?bjBTn z)*?f=mve*ta|^_9*=MZd^GQ~0CMEf#*93t>8exq|07h8~b8EH+9bLn752roB4b{i+ zk@pLjzibZ;OV|bvGoG^hm(Q`jTLM?UTBF9w?lNo5)`P%IUwSyvnr`bnK?4OZru=aj zU*&d@_kNj;5zSBWYU+G4Tatpln`&!}bfl>-SP{R^H{k0XQ*cpGgTtdw2jB)9gYh#> z=;;zEJf^#hq|eFWBz+DdMaq&VmUiO)K8i3zO&(564yT7l!l2dr3U6(8knLB|qm5^) z1eaqdG`OcS|K~0w_;**MG7iPlZDN4AzEFUz6FM!vxWN`HBSXziP;pAF(-1p!W zdzrA3$uI6l3aMw=rd8wN-Jg@-B~^>9Tf9+mXcGE1h0yz3qoLr3@DCT{)`LeXqwnPN z?EBBVkbO==TrKooOy!qx<3*W_*S3-zyb%Le%H%1%XE(QH=jECTfsH&%a}l^_$g?|Y z2az{crjsIL^!VMwS_bz}OV(Ysvb2uXEztzA#&67g)DI4s_GivFi0eEkP5-^?`yWN; z9Z=){#qowTNF}4JhLnmZQPgwJMP$oJQRGW@c9E3^(vCFAEGaEXr2Cw6jYL*5Bc+V& z5!vbY{Qj*!x~=>9JmSElhBnJUyL_2yh9KT5dlf|D(l!gOgUxNH77(Isypj_&2bGNr{F z+-)noUe!*M#1m zJ@In(Qrts{Xy+d;D7%>R`n~>q<-Ww0yR(O$eSHK!yRQa|ZBqX3%M8g~w?YV**9A5# zs1QFb4Tn>&Qk~!5?JoW;9!$EmbHMYGnnEM03%8n%!SNPrsZ*5^wtRmN$F`dZ-qMaG z&iy90j_@Hf!yIApNB%D8vA1;nQ?8hB;NiL}g<$`ZX102_W#k)A0l#ll0wHMT7 zxhu}$+7SXCO;A-FY0j4ROG}-H?&z#2_z_I55jQ|Fpc|SVF-EtlCK_vChyJa_@`5Ko zE0a5t`oNF$Wb9#Hc}GKGWqSfcPegIg0atjRm*mR0IT=1bai@_NBE@W$iPT3K=iMay>|9Q3cp3uYG3j1 z)Q{kzyPb8UbL7kiT`INh4RLn)+?;Bp*p<>w_Vjai+!3kD=3Vzgx9hQ3DYrx0&9<=k z#cEzYlfdNYAqsVjB7?E>>BWm=93=VVvVz9&n!5S&ZTdgxWUCRsv$~Hx9h-Q`ty1o0 zeS!lV2cg%0w`lW1=^kWU$BT=W;(=i%;@YqpRvI@1ODBI5-y zuL4sk`xF#7t1~0YP?oF&;IZ3FC+U<8_|Vd}pf`T&S7O zYcD>***)%w3G)VX(vcpl^I#A9KOc*^UQ#xDuNq-Ui+HzW8m{pR=OND|yiQ{Tw!fG` z2Zrq7YRfZ}HC~G^_H7be_hyPK*0xjNqinJ2b9Z6Qf!E^8yALGB$VR*}Y9(6wL~vWo za%#S4M`KcBxM}26u_pbS(^c1A-06H4BuU8NEXlQJUy!;f+1nz=Z^^eJuL zR4d+W(wCi2(Z~DUJ5$O{4aj=o1j!E@1%s@6;>3v`V7QGmL-I?84H`aTai%VYp1Omf zJyJpQ-VwBz9)bTpH$&vP17Iecqm2uO@{RS+;Py`yuHSNl=DFkxHEhb`-(7R^8{&dX z-UqX;olPm?lxLCn zrpyiFJqsbB#h)^p+r-7&o;n+^zD&l!{osx`3br=5h_indfVH`%(AaY?oYIJe*6=-) zw#S?`Hhtl(|Lte(`r|y$_aye&auoY?ScGmTj*JyM`;VMb{8K!otqau{44#^?HLqOL=z^S}0m(oSg_cec3(cQ?Nl zuDM3g`IN`vx3A$aHlP%XJC<_Pe*qw1ie&t~Z8nD_x{w-$Q_RN_T=aAWEQ&Kj&&U&$y*q~rUbyq2f%aH9{I#H})m;=Y z1RTa{!Pi~|yh7EGkJ@Ak%NoOJQKKBLD>05%SL2(Ff8o7hBBbee7bH@)!6W&ZmWH6Iu@&^UNDidsCSaRUjoGF$ytH=#%EH{i zZuLAE)_fh!4gBSk>V8nc5Nn=4qz>x+X42@)c>W%8Uz|0-Mo{_`3hTp_(c?-gnaW>K ze!abz3(2U_xgJjIO~mP5H#zL#9bWeDHoWM19A`;;jM}S5p;{+ahz!0>%Kz{E-X6#A zHD+?t2R{rPXvFI`=bJU$$!5Pw%oGn8^8bhhk(y`@~V_tEp@Zd?RwR{F8bcQ~7HSCqL;W1+_c zr&kurm{r%8XIzTGhz$>L^(k+R{8K2@bO;b{6-j8D`?|79$zeVD-eUUsGFM{e^}*f~ zPKxpCa%n(W0jamvP{6|%&`X*(KKICz|GWK5%+!@~WN&uK2YCm>=h4ODe?=W}@s1OC z%WEn6^?db=f&E~wqT3vG3dGRm%9Py6p4c6YI!5LbE@ul8PZ0lG=mxnb- zKCwqabKz}pKktaIHRsTE*U7AAu8PHu1No8aTH(|5A!6yKZ22RTKr!I)IQVV$7^o(M zygs>b(3uWm{?p-D9eW2`Jy$Rf+l$8~Zf(ta5q4dRhnQ*`@C(x5{LM+UF|dHz+^U3` zRu(iu<&s!j8i&)5r10MFJGpsKm=IKPM0io#OldPdON`J=SQ>ZNS-X1{tgbl%=ifBK zy;sj5aOn+nupP-t_G~6-_n5`PK@q{@{v=wU1yxuOWPB@_&McQX|b@b^=C9 z`_EwQC7iypl9lGyab2(#76)#@FMlRD_Z##b?NZA@S7{2SNAKieeOrnCJ`|%>!f9>B z3?7~5p;*6v5?$HQ32&a6gfq*-@IgD2bx$45D;Iag7&k3mk=Bn?XBUcPzYGMQ-cRK% zFRHkoW32pM*nIf)p_X?1&7}@E|G}NJbJ*wYCiF4TfP!U#GQ(B5;!J%*-u)nk&q=*; zjZSabwN*pWyuC@7klL9aCdHs<+Dm!JfF@p9yo3Lql~|Db+Hs%t#tNHfOW|M3WB%o! z&3}wD$XVKB=i0QR*Z#+7xyN^QA8{Jsc+jptxUvJ!Y?=K8F0IV_85xYe^6oTR> zV3J_X!Nu*_)$s*5yzj~t23s-Wup?gYkRqLrv#B!2nb*8>=9}_ly1p@(roZkWo4M*C znl5ev>WgojkbP)zt`@-_{}V-NR=0Zlf1^SwLI9H zMQ?oHVIPH!b)kgTIhc}t2Y>B}XUjtAPE~&ePM|iF`kvyAe{SJN)nTar@+4mGsIG8- zz7OZ!NJQ0zajdU44!?~J!&Ji=xNu*K+~oCUNKomH=L4S$W3@&KL5^=Z+oA?OzL?3U z-xkr+gb7%6B%b}>Npt1t#p0<+%|c}C3{X?ZVfPXlYiinXkBz<1A>lBWdr4fhPjgYI zNW<%8ouJuPO{{V<78jMcP@mmB=;^i-@W^xn;q0ZP68s*jr;P!(iV$%@coq!bk}Oa+ zBYZpA1P}c_B-VDR5`NCu0{%5?;QXwYsFtoR#tcu#%omgBagmZN?MNY>v5tXx*8REt z%=PR#`yyX^Q_60g$KuSQEqvvIFZS^s%OxI4wA!>cO`YRG8*08mVmAlg_%2O|dUgit zpDqJc<8m&adzFI5DsiW#1LU>023~)uhw9PNj9c@LFv2yQ(z{$f5*!ts@6x{z>2M{@D13$I_53*MP3tU9!(XzAFUvlgzVuCJouvGEEXkk=V! zM-Sv#Ru5=czb}+iumpC^UWi8rrwL{`!4y(hD_%d_ftQ|*=HEZ14(6JD|5rWoqvx}E z$nZqr$u<^BC1z)8<084E<|3M?-HCnIPQu`elZA6thxyI442m3P%!M;M8n*KrSrrt}_A&Z!?bXBcpLU1f(}Aba(c6n@LQzQliFyx>LXbA zAe_F5s^X6~W@zxLH(oxmM}AkjTQ&uqgAemsX`TC1=a?i<{wpLRf1kn+R=;H3O_o&u zWTMdf!75Uy8?gB#T{71_fYUAep-!JR@ExtM$f>^0pJqpZ@BBy_0EdLzDTm3&GzT`l z%n}o9bD_PfJ*7>)gm<@E@wkR=WHS3aSx;yLzkiO9Q{zUyYk;>}rOAJ8h!ikZn|%_V z;0cq?+{r(kigquB<37r0AZr6-yN-M)xd&7Oj98N_WkgI9FZ@v8B8 zx;5QUsJpvc@qae&wjR?l-^B}W)|z0nRV+NO?Sy}`f-&K@D$Xq1OD_+^(;$yy5_YFO zE-}gE_XhT0VWxy}Hr`xo9N{#`TZA5{205njtX%M!WNaq*=(Hk5g$! zc_7WLn~Y7DTcJP}4F;}vx$~AbGD)f-HxGaC9JmTP^;1Xpje-14S15}KlFpCi%Q<(} z3HKuBQ($843Dk;Mh!-*$_muCHCe&F%zRyz{+jNN+EgAtkUd_UQ(FM3-g*6H} zFKO!7Afe~c2UPs77guD|QuC29;^eySeDc~-JpOwEN7}R#LwlVD*WTwr|7RagJ8h3K zGjmYo;9kk^ejWQQT8`K2@6*<&J}95zE;!{sfq?^BDLtb}NGo5&()ozm`AS$h_HIh8%K-PpC_HxN8{JYnhJ)HY0UM1C;IM5VtKDtD(I?)4=$y=n&o|(m zM`2*)(-Y&0Z+2qI|i%akJ^vWTDF}FbzRs^Lvmyu`o$)XDIZG&cNS;Ftvi0ufJ#^X zynPo1W`oRK;%0vu;>AmcCErj^sg7XMepXB zPGN$Ya|YJE8-)9A7PDHmoc`#i(2A{6_iue?#jOF7@9O9q*zsiwYxfD}7Qb0|!KOR4 z$xo2)Q*Z8FX%B9x=g~eafjjQC=QzL7SoEY9=4qW4I@PVfHf4K7?WYm^rOk=E>ZS^d z7Ej{swIy)rzcO0$doIr%*^i4CnJBb7zD3CCB|E3{42K%5qwron&h<*JJfl7sb#%)h z>(P76e(y~V_UY*Tw+AoV=EL%>dpJb8)-DP!rpUIjXtBeByLT`X_DMM)yC6%}KXRTA zsobYGikITn6T#R|V?6&I*aSOkYlVb~-f(H!J9=n$*m+%FEycO<$rScO@=t~yVD)X= zp-+4i4~|hsv$$pqs&(TI>iU$cX3A#SUZhku4eO!}6nYj9;q?q(_6k@IMTdRPdsz*_ zEa$a6y6!j4x+ie`EGOY@X)})O7R~FFrz`GwXA4t9Qt9GxHJtLzlqO&$y_oeD2G91w zu%=^R`e`A#4~@e6y_fL1qFM}Xx0H4UEQP^gDIC9im|~d4btqk%4Og_|@N4h=xTt9z zf5>Hid@q?D`~z7buEpx?e4HXL#PL6OkjKdq*gaXFk32dLYX=q3HkXTVY}Rh7cvC{B zVwcdT8wbH|vAz)0do)eo7eHSNV!8bBI#{tX2Ir4HBF~q4e|82_ILjqU9`XGoEUaxu z&Vivcs*3}i_}&-qPs_B%kE$LpM7UoS1qu#gcDWdqp`OS?T zzhF)|GZJ_tPhOl%mXsjG|~sk8Sj1XD0YuY#_Z2HzBY%i4_BDpnuUp zp0J`Tc0HwyZ*%tu`|<-o-zQ2)lza2BrHPw`;t`)vXGmrmn|Aa?w30yp9 z9Q)2q5I_FxObZ%gX?brmXkESlJ~WIFj5c2uGY%hx=a=Kc2&r4P-Zy zP3Cd2Pm-A|KG=i%mss%Fl!t8Axe@H+U*e(0Rpg+kq3UxZ`J=@nxYrsfn3{LRe;t%? zLWmQNKDdW>e)=nH8QB>Q6h9R@_t?Wf#+6Cg1zQkRdeE_DPsNLS8tK~Lxm^CY3x4Tw zh&^Yt(7!I;!q5m?Z2fqKCNCIBZ~Ffh;*BC$v+GY7a9N$a{_O+JbL&_uU?|`5FQc~b zofszX!?wOTG<&KAe#z2B_v&cscOX`qthCP*#Eaa%#!!!4G!0!`a>g36h+7%vH}nK zi5yzr%DsnA;-6Yi;i9q}y4yd1-VvQ>^|&6K_s5kr>>6mdsvn5!mcrvF`^8PK6LFA3 zFy2eelXyDg#3{vYI92tL5YcFY!$m-~{e$u9)=Dv}Za)UtNDQ14mN+5(tnkIGPU!Hl zjMgi(@S9nlxMiNPOy!v`gsy88hfYgDI&xKRkZ6SWJ*526h|S!2eE@#f9td{nr@+rY zNbDeW@lPk^8=XheyYO(m63HiIYBdLF?_i zuI$E6jr{(I^Bd^a6c;z#4CQ>UG$Bt8KZwX58HbrJ9C zI0n?WWPz&0d(#@>1V)Qj;cd8r+55K2Z0DR|cvS=X)?HY2MX>0-KonL>EMfNx{&>|o z4Qrq8Nqi>%f5V!jPMAk3I1Bo|a-kApO zfAbPpOWv^E$qs;CN14tV!`)Nr1=3WYS%abtBI{;U@yYTZ@Z{bqZ zeDs|E3X9Hl;a+~H$z`8(Pg`(K>=tlPV(%Xn4(P|=fO}~e#9?^gN&uv(#>3&`0VrF$ zPu%x;GL^;`!xr0J==ymi7ltQNIqaZT-F4`a(hnC!t>&j7j9WJQ)THC%ECcF;u)r!3~pFP|cL3SijgAx3t`XLhn4- zv{>S}X^*15t&`B@g$v%;dIJ_;%7KB)|H9xoS~3m6TOl>-kkk4pK6G+p;Qp z(v3royeGGkCT_3MOvcl!xVTQ2=VWHFPmmhV5nhq<#7XkIrFyc@q$OLwq=sUpXY_$@ zpE$S6u;;6f^`ZZnhje)FSyml!0Y?rTitf8_kfWEn;?B-=yl&A~u_2`wUv5~AI}Xi8 zwYVLaQPo*7!sej(ad|P+O|};|$0yPUDU-fmx(8@CX7IkUZTS0CHBL{^mbJ|Dhf!l! zN#3x*+}$gScK)!S4Ot^#WnW{?hqZiZ-84?S<$&8u?C^%fkP0gHrvsa^q4SxE{5Bv0 zyANB<{f3lb;V6B+s=Xflge>@Xr-=@9+)ZwKbNKUd%=w$KqDy z(R@S4oT77gpr_?FWXBb_VX7-`ubPFcyd9R3L84c_JKWlxEGh4XD&GOteco zfqI2Og5I9ZY;!1xX75m9C#e^ATr&uIT84sE&s(Bpzc5-kyjmbPkrvJ$%`22#xNb(7 z@WF2leGQP@nq8)|S;T5_rZquDNGrY6T?}nLJNV@0P1O2Sj}0HMlOK|eLyw+&NwYy> z{rB%ozwD>+PUZi=?;bgM@g{LWsS387dIDXRrph1duM-Y7^~Cfit7!XO2WUJKN5>Xv ziov_0gtv8rc}DL9I9d|!e5fV^Z2sDDXBQvZyHgG4P!ua0E(gaqTX}I*Ht2=8L$l{X zP<=lK%Ad#4Nlyh0oc)5WwTH>x+FYV}o>sDnj``qfkciu)8AHLf_6j2%qFP^dJ`(Vg zZOl@@@u@GGe0v4_s8vu(j-s{^ePHdO-WWe^DF1GXqrQE%VM}xZ{P_?pocg_yj?|j7 z^1gbCZv7#ICwZWg)dy$CiG49M-(BocKc1b;U&~K+@`v^7=ZJGU`wJCPjQt6I#0@)ivELC>CQ$C zl6Ckcqq`{jSlFrJjSzTgJ1xr5AU3p(*h`=CWNwyr^kE0d4-I3cK@MxmUkG z;$PpzFlFjlSlc>9SiiV8bxX0M631f5y4jcJ)Grl=Syxi8B16U8heH+P+J}PIH63^; zu@>WIYw{bPYG`V=(Agp8usrW{CQUvVg*S?FF#l~iq%ChF(s0JJKXvfswOD!Zk=ydB zGi7AH&R5#;nSrHqd(bSg!7A?)G`^f*znHt2Tib!RTN$Fko@zQaYnm85 zHA4cFIP>?s8?^C(8ZY>Gjb2&)gFah2a68Y_bh#$Xd5ZpUyz1~B;vc*7n(r^+hm$G& zQ}>{f{l$1(ae+&Bbmd)2A(YUj&26>U$)?qnd%jJ>-vjyZ^qDVS&o@eqb@6Aae@cQ z-Busw!Rb*by!lEmzDnoblYaQpvKP1C_n2lzZI{QXcHp0#9JxbD8XefXgl>ITh3k(x z^Ows9h47b7oEo^0R;opF>g*XHt*hwMoPi(^GJKVYtCSF zdpdnE9EWrN9A@48_KGtt4tWy!hF<|G?g}D@8t3LyZNO z$nyC^x_Y<0VyHA__R4sLcf0E2hd3jwadKtyZgH-jGl#dNnR8=cD^4;Lz}s;IEBBp7 zh6{rEVscO6_?slWcKiiu$?UPSY!Z%h?!_7@0b<|d-DT5)!g1QHD74vUNE^bJ;R3}; z>{v5JoLRD*f|L8BZ^23$+FzMe9FlRUUk~nhE{JbpVodpP6g$?tIBR8MxFdJMC);SWu1whYpYKcm!0A^v7E& z`qVp5lU=Ty$J_d|smqii+L_&x^eG*Il(zc z2PwvM8;#!UPe2LG;2N6|u<&1RVSZf#s#ag%Gf&K9u6weu&%B%5^Q-|J$TsBldqPuE8C8{baXx+40q}Rq`QjU2sxI6NO*q3_h{1i1cHk;q0kzFzjx+^!Xj!-+C+^ z+c--YXp)3+nLgZ3ntvKw45q6ew~^6?COXzN1h3cVunG<2lz>W@{q_~Lm?gqohhnUp z`GGw3964OSO-Qe}j+zz|s7T7%7e-G;=NIavIo}W)TZ$mZ@FD7S{R^jNDe#8cQ`i#w zf{&L!qc!G{Tppf7op+34k2Zfkf29kizscYgnZHCAk4>~UUYTQZrz&veJ|WOVZNclLK!!5#2b zi3`JkM>I0GJ3Rawfir3rioM6xi2GYjc(wc;={z6G_u74iwz7BPwsFQ(*}0hx*~@9l zq}?1B9m&lb)@C!0jbQ#)l^rY-*i~+xxh2i1fG8 zHcSR_&WqvFvEPD`|8cgGyoguwc4B^RI&5mZ4QJ&S;eAmmdB0i@2fH69|KdHo@WgVY z=WW87uBu}B`#<6;b62*TbWE_1en)m87G#o=$C0^Gf7MbKPpn=Diks<>TNFtaYk$Fj z<2j(;wvaFS+Td3-MCTvTU^-p$!A8&G2eYe%l(wbxcz--q&J)l~9wYd=g^9nmoP}D+ zt+1xf3O8pMkg>NBTnt-|EALi|13zbp&mFX3;GrSd_~aCgjbF<<_En36I^LvR-*?gj z#dV4@n~RJ63G8!kL+wc|VO3rtj(RH1EK>Wj_OVZ}$gmKP?n@7p^`Nrp5xge&5U#p*kH1)jQSt~S*)Xl1>|UsfNsbk`(RLwh=+uwr`x+=-oK8jM z96`7i^paL+$?>sUB{qCkrH1$ts1MYq;T}?7eAQXdk{_fcTNA_&?!J8cd^Q*x<>K_( zJi*=Chh`OMV4{yQTOOJwDiyopwT@e9slPNg=r|6-=A5DY3VCTn*QLDZ|2cZ?XPVxVN72^JxUKZwv`G2Iw_!ZF-yMpRZmD< z^j)~-|4j69oG4B@+Kw;WuOU6@^)8+#`85(x!nC>1u{paNHhD@+ttm?Mz^{O(?+74| zK^o9YNfQ^y&SI@)DW8083+sFE5ysU#RWm+nt8+Sffso8>#|M4EzZ( zAKIa%pCMPYS#V>>PjZM$qki9P_+WDb`5ROat@DceXtW;N7?d7+aSF2 zI15jI-9jz*QfMt6p(zuFaE0z5)X=nqwyTEpv6~xRF6aUA&%4oyZ*R%Ks8VoDse{GP zi@+%b9F}hrmmVtRypKEKN^OPwqfRhAm?+W!^*osWCxn*UjUt=sN`Io zpo^{%pBg9i;)DiReR=@3FUfG$tlWVX&3)LxQ^ZweM2 zwsQEYIh?RBT#OK^;ZtrJbbQ{63r3pLIm>1I-2NFoEEvUyYTTR-?Jhk3juzpTGG{(A z#{oT-yd@4;K@SJjK)Uq=Ea`U>Mhz+z0#!eYo+aPJOy%Bu`kpPVU*-dr11>|#)xjJ& zGC=Ydg$Zkq?4|3o4q^F*PHevK2*)HC)0}G;@O(!jTHor$!@J68`j}LFEcG_qJ0@|~ zh7p*4;w~9hH}X59qtxw}24_ZlaPsPG{A6)Se4TKfMr}nLXx$%HUOh=Ic8;7szYKeM z*OL0)Ef9Mv34K>jkllEhMOP{}g0@93raQLySzBT{dkW&cQ%1ZmFAWTb^vA1S71-{w z9qUb8!Is5_oc2PAKeXNA13wO-e)(<;?^lj5-(BL^Iq%U-`v2-b)QWrOuMmIj>V|i} zpK;F8%qN=Nj@Np|VbwNmP`)=yw%7SKUa*Mc)23F`W7;ve^Po3&%c^(!(5nJsl79#r zFB&KYRS=l|?0^NP+MNC968JZ7A@$eQP$RLwpK}2}Fjc{AC!=6{nGxgpP^@yhK)0m) z{@{wUV#n+?{7S45HmaZGOAAbR@uGH$u_Iz>+rS8VYp_g;$)@q313|pD(_LKPx=HL2 z`j8`fdvfLMgYGJkS$cAB|!c*RF!D)B{+a zm4xSy+v5aM!7jhsal*|^{!cZHXZYR{-}aBkcRm?w%Xeg5Q8%(H3@8|bJD1$&Z7y*vPn^Y{dbO812U>G+$Satv z{uflfs8PeJSET5pMX&Ff;&kc%JMH&bIMVepC!RYk)IRjXefQj1EjN$H{kMA3jmXExsgKo(Kpvd+hOt8$OJHwaIlu-|btF~tHE$ephga1;|KX@n}F-nB^mG&^V zXC(R#X%)s>xYC@MXH>U!A6A|@!Bv%mK~>5XCRyJCx!xu5LP0)R{TGWKlV8HEg$F>} z#8mv4W}rCrs~bQ26o}t!0H1xyDqYGBb0Y5#=iF=@R+j|c5$n6R#+?H3y_Y&n8rG9X58Ma(fkVVkoksGjw|~iMc_@@*?WOm9cEh^r zSmfpQvU8hbF|$}%_C)du=FFCMsJ88><%1fZ-4Y73+Eu}$i-&l1a4x@?q|0t?+CoOi zC@ilJ#q`bNFmqih*j^flAx(`!zq5|m<$3`RT+spMs_qwl_unT@h&)V1CzRRu#al6U zW*SWDYK#AM(cys|-RSwo-EekmoKVx;MO0m)%Y{G9xL}eCW~_ZjjxCpj##2EYn^Y+p zEgL|e)4u|f4tJM0dDo7VgWI%pbntG6&p$+?*Uol|OZ~^-i{v?EG5x$aZU#Y>-)0&d z@mVLBIz9dIO_?z+k(e@KZE8=1FUL^*D2dlpT~}wqvdHf+^xz8)ZwllC z$=VW+@Edgc*pX+r3}IQg!z^n_B3+=ANb|`%b8z1}=FCWvOkj!G9f8i=7ZC{KJ2Hh1K>kpvs z%9*&VpRySFu^+F^nt|P_e*(t_@}iCBou|0=cE$4!ztc=5mzl8~8 zALk@}Kk4Gz0un{7ySTsI9BkCTO)qK>&`O%9q+@!=haWej*CY^42|RmQa`cx@iHFN_zqGYAA*_LUm)AG@m)p`7JWuDqI7ftFMJ5iSzSlM>v`csg%o(oQBf5>tWWPV9`kJC470LO-ena z9#Hja)|2ul-pSJ6NSC8jc3w#<%BF(+~6R?{OG^J>F@F-9!#F3+0_lVtM=oeU2WGFZmrjK}+n<8hRS+-6hm{ zq+$wf8Y>D7MJDw9hXEF;I-}&9qKhRVe9Ms0IAjaw_sFKZ56YbusOj*+L?d2P<%r_m z#niRKOq^~ro|fc21#yZ@%sFUFiO)Jgd`mdGw%dS5RCfu72N>~j`i0G2Gw|C@CrBEg zBCgwdm4X^aBovZl>4FZGZA`3EW{&2n) z8ZM-8_kmqxZ=ZVcr?9LjTTv5nVnsTv24|7H0@3VL_Rq84J)8Mk}UHHb5J=Azi zQ!(dXI6nBRk0q;P@RP|$dOh+WJ%72A!^ic&amnfYUVE_OrZ}ACvre&Tk^|~&enLy6 zyPcK7i^mRn&xH-$owl`G00-}9^B0VxgZU{^Ph>bRb&-kxmPowEE*p8ni0y2vYlA!5 zn)p%F1Rk9rQ~0;2;#9MZETwud{wHC-IgV&&kibA4SsoGvnrr&=>Ut0P_l<@4J-v;E z&Wq{$*Nxmb_#6(b(j$i{@$}%wLCC3c;N-r2S&~MQak{hfQ9T>}kk~{jk`H0@sC?)& zYdW6ZVu90ABtGD=C6WhZAZhvDq6NL9FzT>^qIU;VW;pS|jbE9PJcL=dHu1^leeu~I z3)ab7Ep9w*O#^>jqP2NP`9M+!JW_AZ>y}4wnRj>oC)X$EQXh%;6z-f6m_=UevdWe$ zwE$TLvcb1%@o2d@w^#c`)lC)jGY2zq4H15z%`_qzZMIX}#5DIF55?*MpAJaE!CHrS{G$xc+J`ludnI zR90i{Klr#7Kgv(>W}ggEN$QQyZaOC#ejJ9al`qBgO|vNTW?QM{pI`E~5sT$= ziJPU^`~1v)A*$#gS637wk0z$wI-WO$(IdSvYf~}b9{&<|x9f}_a{t4!d6#)o(NYL- z><{y#xtxVf3p7054cC?%^Yye{w0UQtXo5G$b40$hdngwChi>8=i4ESEogg-9KLr<3 z#^t`{Bn%6o{Q=!EzV@6vXq5rZ`FU0BccqWqdjA4cifbi}uuS-8e_8&aIsv?F`h&;z z-5BF5<1sZxczx+=YE*dv8zq*~u8@6@v^JXpJ}!g-NR=Za)-uo#3E%6&LR>nP{!5jaLI8ZwiQP*C7lw9O;I zWXEK1$o&CdrW&H~&zp44{2()n0tk#R;6pc670N#}@ey~RyZ|-4zCq%J{<%#pR-xRI zJccVxpUHE6zJUl5ozIoCH;Hp*dPVl{R)AZ@A6^n>PwKv%4{#I#T~0kuxf50 zjP%=u-V3YACd`&T!Ur)<;!4%exhEVuu5H{ah z3hO_0!S}ac)A3p@`P?a)P9Y5?v`#FbbK7bJtIwCkSA`jH<-L>}{Q44xc95R?J^SOA ze-UDrfT1`@HU@2y;~@Bqy|^s!mwedSr8M7QHaEKHLwR%$QSk4Nym<^iF+WVrqq9Iy z;yKw_zrbfgAYEM-N8oo>__#a?vgVzK4Uck#@O$UP_NS!}FG>g5e~F~~ISMVUUqG2& zs&lgQSmF88{^;Oa1y8^40MGsFsCO|?vW~gv<1Vp&q)fBhj4QCnPLFT)+Kz8@6Ul5_ z6jTjX7h;=-Q7`L!vD~XageJsNyO>@)gfm&Y<{fIRSjummw4|L_SMccbRGi+F4izUR zQSUp~A+F;D5(EVm$^&3i#V{d0>m1zg6eG9!9zx4huY=C%@px0Z2M4)_;-{iI2pH)O z32Ro6g!PiRxO4e?whx+|vE$;ob{t$5k9V7a)?Q1dw`qo$sv)O4G9PT2+7(LneH2_~ zETZc*=HTpNf_lr&!M!Cj=#p_9r0ZS*v;K#nZL$vb9G@(9OqIix#Hm=BGK4owiUq%j zI&iwD;AeXahI=-C6b2e4ib7(JIHZ3o9mOJp?*~b&=qu^Bb zRLFUB9hRP#x^G`Pus!U9a--2aWRsgrE&mO~1{MnQCGPjBzn7t4RU(Jaj^}r;d^qX2 z2CwXX9hOhrC-}ZFmrbhH;-jhC(8XFV7WR%|eOn(;TX%&FI*j5mXD8zT=T*4bX$AUu zy(2v#jXfp0@yI_GY|P`@%WTB?*5#QI~Dp;)pq%NOC7N z?3noyF3Ki=_L2>xdD|3Tt!)`Mb>`r7izUMdS8R z!ZQ;}FTdm*@?Rf#b+|={mCn3@N|F4kWiJ@#yK<&hB(088Ht=qm>9KKV7 zw*+aR(O@|R`OKi-BMjJG*_y7{e1JFOC&H=L9>V*rrShN(X-BDV!@;R5Bqn)1PIO&` ze$A;Im0-hC*^TGWVO&-`7DsLKXZv1N@Zz00MlJIYH$DC)dNeJ8!4jwI$)znUyFL*^ z3U^9At9X$cm6cb0gi0Vj^u>Fcx3^y(nd}zmr|>TKcfgm+RVE;Qbd( zUM=M)ebz_PruGtXgxngejX8?lE=dlmSxb5M-jRsE^zH1Xo`s}^vq-<^Lzw*TIDBpE zN>6qaa_n~LcGLO|BJ2(LQNcW{Ez3c(ysl`ia8Y;~pv;4eyk&BC6>!|~z4$1|*1Kdw%%s|OJu@EMG z9?3h`1PeVg?5Nq!fg&e8fJd64TOFpwk;KRIv5s-5&dK<#CB8cGsHa_I{x67pjEOn+v$_TrK`zf2ya$ zeoDWi$p%{M#5!+%9yo=-XKofb=J`SV%xrKQCeO#Rd}w>rTe^b>ICPi3Q0LJDYYkpY zo#zvz-Y1Cc*LIM%vKifoEENNk){yAd1#=`m9(G^BOwYM6^)a2tzQ(N-*A~AWhiaF0|DjFEM@awQ-ERU4f z-@4Plynl*JXU}(A?R&38&&1o55!6R!vZNbYUpp%FJHn#Tn>`TM+yt5d3b?8vU08q; z1F|w&;{N79$cD9|W{?_J49*izCCKRC<<~&IU+Kb|!4$mGiz0fDlbA9&cN+9^KfA@2Ghj}rn_YwizF!BQy;{#xxM zj?L}Q_NNrEe9BN<3Ih0lyb7ze_lpC}%Ak8|GRy0vu|v}Zx;^hKTt9S*c8v|AjdMrB z;U3*^9*iM-gRXYhruXA2K3_fEC(g#A{0m?9+yuM*mHCNAH#F{e2fw~tCrwyNb9>+X z|Gl=0j~)g^Z3Mgaf%tWK9KCBT5Q>|1x#z=q(z|0fw1269RS`w>;B#kMeS0rCUbca! z{ZiSa$PW$v+Sa6fzf849KhU&5s6Me}74=!v3)bvgfQ1@4!UpAA^w+PL931Rvls(dvF66ZRMij=PFpb_Cr@ zPnxtmmpZPNl7sGMTz6!ka8TlkuKFR+N{L-*@x8O49H+tB<{L2jo;q#XQ^)%+=flT} zG`^uaU8pbUONU$I#k|fEXGt(Y*FZzuu)mY|clSlxbqNF5uW%AwJ-3LQg420b;dW3> zn!wvmgrb4^YhnIIZDIH)2fSYKfnM$#kI#A>gvuH2;4?3R?!_C^*QphBc!dUz9^DVC z!Vgf^f`} z{}U9}Bx6fZrD#3B6Fr!GntNSHg3c#C^TdfJxZZz*B()sDEhZX#r&0@*)LpTmN`=$) zBW=nxD@Cy_j#^*crD^ka;eZJeZ~y*Va$jCU3ab^w^Im=VcApbCS{6+dH3<$Yeu`)M0+qY&P@W3)Mvf*hpS-+-%)Sa@*73nByWa>taG%hMa>tJ3omR z&Hsr$lm8RfZG6fd6Qg;=i~e>Y^&f=l%tDFRu3A%CQ;u0t-D+CkIvo2gc>=q25f;Zb z@SDBgK$a_``F-teV=i82y&g$8<9-S!78cR`1E)|mFNHoAxk7{EMb5jnnnym-sHuH) zk<$8I6?$BDV9#5d;MKKPAln)!MkS1-AQyG)d*~eg=h|5~+jTh3c)A6=-!6k{kIsA|{k*gbB&T<5mk8(8{&5t>M_$b&A;e*xv)HqgY z3)?hw<29e$dFGcn@Sz*i89GW6XL<6SGGk~^Val$4MtOG9&ShT)-MzP;MoC%lf;bgS zf98VlG?>B;$V)r2W*AWT$TsD~Q~WL056^Xb24@Bj=k5^7_L5Vk=Y3Bc;k+BxCoZOw z(r(iGp)&rwd<5TYS%IFTHi}6b6>&#z8~XY9vBV^4mS*&$661dbcy$RQ*OpA~ZtjZa zKMgp^VKR)lKZ3Vrg}?@jLEIPfh21~);+V0+dBmg%c>a4VmT%C5+_41^KCe~C&ELXz zr_BPZ<|CIJM#y!Sq)03)S$VO)E%<(1jEk;Y%ga5)Ax=B=sX^>J6kcDEEi6PP83b| zMdQ3RU+JF28nHZb9NfA}dtA4p;Jrp)c%lA=bc^Ou+d&OZ_*7yu?%yZt1=jVj^j0G+ zXqTfIl3OP0$Qzm$;7lJrPm%JsQcm7qOWMg53v~l+VaoT%aO6-=)@pLWw-WEkXMZdw zwoAU^v#nfuDHz|IBycUhMfnhEZ#v+tc(d8jI`F;~JnbR*OZ^d)6-VPv?L)Tf0_Di# zuqplCMf7^kQ`sFCU3xWcqxf*g6;3?-tGeogC5!(RkiWMgOq??r(kq8!qV!I%U1o_R zbCh_sVm_T-oX1~ORP2nOF6NCDwK!1hfz+Ui`*rnEZTkzHc3=hXF{u?3UKp~9#GD=x zP{n_5cw(-%9?TrsCZ?E2isy_ngdcC7WAV*y*m+bNw#=)Ls!(C{d)XrV)z^()x+}r$ zmx?&7&tp&@e3K5B`tsxCPOPGNj7>ErvWk)}t_v18&NK%nJ4AB#pJ(`jMYb5m-oof_ znJ}+nCJpT#CCF;DXn3&_h&GontVEOht_tVGt~=;}|94QY-bi*6_kdA?FKzd`42nTN z#8sJ5FycuP==o)0@s2QXIem~8pK1X8`jNO`fsd7R%(DJ`=5x*gWdQ*W)HG0J^)jO41>lEoiHr(DTNJLBYxfC zh#?EFQnQbpY=rkSzO-Qu)o-~D+r4*^jn5tO{oM;!;2fH=dIDa_dna5^d@H6+7%E

=WuLe9@KPd6GJ{!P^T7ch#6}NYPGg_;C&68Yv=?rgfvu*SJ%#4)mg%al5yEg7`zEAN;NTE?d}pgz*6$#k)A=PVc)bLMZNGv8M@zf+ zCW+movlpx_&%@3tCbJWclCL!zgTCa@iA6bdVPQ739aY0)Kf?LqK6~yjv5=N^FM|*E z8IW$M$-6$e@*VF2q36≪&xS=qN_?*Qqg;%-=enExC_$^Syc@3$`^C;yH75ysq|8 zR%`Z45E>?2oqcTVPg+HV&;^4>&FwCPk~m*3-NB_ThRM zCf!-Tqcnpgl|sDy0jl(ihg?@(xG=Fo*ab^$;8Oq%U92d0&C{gc3UOlKTt%w;+y+5! z&qK!C@to^Yf_IbE=)%_ZV5ZUz?N7@f)%%j*qR@|H#{}D|SlFPq@^knmWhR66-a~so zK^&(fX%z-76jua?;r+U9SXdu}cKcSqf0D~3ee5`)yL1ox92N!^Bd*e!p=tEdGy!&~ zrqQQ61E96&tN5>^QGBrA7`dK3N%wXb@b#Og={-J$2XQlCS3=`O<3$I#ndf_i_`> zwa2r^s*y7JKu^q3w}iW)vv_qzJmuzomN?r2E;^{p-=q$zmRw)H@pv*-_n40>mV^pb z!w<6k=L5od*IIG#QAK_-LsTbFI5Bzf^4h_^-LZl9(pj2`(vH#$a%?m zoBvE)*+muh8F$Z ziFe~@IK=r|2wFA6AYNiNUuv#KUoxlfDcx~Jhs3e$XN2#j^@jToz2V=&TR2L!A8xk1 z%u`O@Mf<<4;PP9A?{|`?b!%hraNcy>5u}b2BAa1gS!DHJ9YZYJG!B)!{Nq1c)%o?O zShnfCk38EBqDP;-Flu=fD1Y35mnS;&)2@{?xwFJ|cwvRkO{qjz|MuuDzK_|#AB$Sq++aI59PUBR;Tm{vv((crxynmh=du65 z<2WcNU-Nk2;5{6ymGH4PGc2c+6|X}71GCvSy6 z@}+`tyrb>W9(gdoTRNNdDVOz_@>o#vxCWDtz2_cY3U z5DyH_qOQxwKzKn{-e=vJ`>HG0eRDnslZ{g7XpIjV4=uoS!@+!dK%H$vNCS?%&>qJuq`gI3`55vZqxS{wVd30?+zW;WY<5JSy@csrPvGPAb1^)`8sBJ2-V+ z7+K7*rG?+4=wf~h+jlddmNm=xgQ>QZfAOGUf6KwV_!idfRl z#2;~5JZFeG&$wK~TTT#mx{`*y?wrJiao$W6jcS93!`ttVF+Sc~(I#KFa*W^Cpuy|OOqyVSz38Ho;?ye97mxp9d&Lp#5Y++|*63=H3v-Jr0ZPf||Brv)VdR?X`>4;`UR~ zz$P2V1tGjZeJ!XBHs{|Ft66Ps5R5;1feK4(AZn8ztEcs)lXHS`hNdD*#vHnrP(;%U zyOVH!6^}ACC$-nD^sus)$LmeQjJ@5(`sXWPn(QQ+83lv&D9O`MGEGQ`(`L_{1@yA5 zH|`7**rn4r^qY|(tTsD`yx^MTD9i@E!5_sjr$52Jx!=jF^B!UHOmiG|xsV@r4xk%b zF2NT+Gn^znYjS&4;;xGWuvgV38nr^|1ok)qorikh+DIen;C#LE@5_i7hzAwYT=A|J0A6^hQ7X`a9v5j=;Rp~;I|0- zXFU{ez5Gk}<9B0E)x+Z2krhzA_A~!oagB0^eiAGW^%Ea@sqx@B=3nbKI8;qKSJxR4&>}JuEda{lOz&3>ZGdamV{IaJ;-*NNJlPc8Fhvp3xKO{MQm|gVaFSZfwJsH|ue$ zJffD1fq3%wkLrMJC&d!`A$0xOFOJYnp--C94boPLzYf_AahCgdz_6*TA2Us?@GOD8 zU-E>pd%N(){psMX{E{#yiqDNqBlj2IpyW#o@3E3&KJxd%H(%_LwjMz2uEyS7~5VS5Rvu@^iA!Y4M@Y-3-T37PPo1g7XWC=Nkvo_AcF>XcD`vr;q4qe2x$v&aJzIHUi=Bd!FtuA!i-&9d}j;ah&;e+sgmC9 zA=-XkoljcXaf@0%&im|1nOXg0&n_+G_{ML-xJgoH`}`?b^ z@W6##cu9&9r)r16h%J%Awml(O{_rKME8h~+@=V}@mmLm0p=7rqK}3^xG34~68kLOp z)4$JZoH1-YrU&ikjcsFakp3cEKTsc?wnqpJ0SEcwLlg3sKZ3m?k}*!YJ)K|t8uqVG zmG;yL;8Pnze3156`tLrh8yck-)bf1h*oBI6_O zd)2k(_mmg>aq&SuR-Og1>Z35zc@%c(xQH)mDrr-O3+^m$K$SN!l*cq0me&?|IGFBYx#rbbOfX zf249p#zHYsLkW*m>hiH=sx>ByB_H|W{d{ar56=8YSf}z7hm9&?t?iliw z(h!HqUCTxF1V2jukOsLWeQ|n-Dx2P^pl7w#HD_(>uz286Y@Ij|>oQkz#X2=%u)iAx z%!=kr@5{K+epF3X8q)0rHq?;rjfUOwF?r;6@!}eF{HLfO`sz*Masnvp7YnJ+Gi`I4 zBf%!xj&-dH#L|FQaC*u=xNu6E4bQp4Q|ZQ>KH!z0a{at;Wz!B}-6;(Ye)fwBmJPuF z?Mp8o9Sd9L--a=sg`#5Eh|?R#_vNFr)2Y@{MO^#6m6nGshQ9L(MA35w3DuF*=Y=NT zyjf2n8trt#^FL8`(uZD$rpu1ZRbzeozI;=d<#ZFLm`UaRs~V;`{B@6K5rFTrW^ zV<}H?8V1cY z7q&d@AkQv>INEbIE7gtwJ&$|h&1=tv8F!87Veu7FzDoq`Z_7cWt(mauLp-cVG(d0n za(+I@9*Pc(#52xG)L@hY3$lv%zZ3t&n8ztxXbYh z{OFF*tJy~!Tyg{x_Dc6HP01~m8HU^KUGU%yKRD=Y1rtW*!OYeOdZagw?86G+g{La3 z+p1#;+)j?Qzis|jFQJ}etI4r{fw;P(6v8CW$}F7|vgw-+QJ45F zoVff4{V3>ZH?{c#+bBiBm*_OOqCE*p|9dF0a1>#+pE{pjB=Nwl6j*aeJf;tI=k`_i z=z#N1I=#)1cNp3HkGse2Q_s<-uDfwde=n?zkUVYfiCFz9nB=-$p`<}Ksay6O@sr*z z%y;=}Q!!vI`#655Qz3Hr)j*z8j@62{*I$OFA*zDrnQ}-i)PN@wY}o&2G!IfO!RmE! zDBbn3lj2%7A9{+OHJ$^_swzk+kr%@j#?kljdYB$F0RwC|LEZduFyxE@Uw)fNnc+F` z_>VfRUay5e8@tnjghgO8C4^mbJw&hX8?i3qEw1*Ep?trE{Pp7}@z;#r{PCqW-})XX zv|PAN8o~9v9Vd#VtDnP|o3rCR~+_vlHL4e z%kYrL4SW=kLdqxClATYXY_9)U?t89Y^!xJ^?Dn=`r>Pa#@%J3=X}TthO%D>P{H-ZH z?H}BIVFG81J?YlK8LTzAPO#mohZ(lUILEyfeqGrK#@m*n#?~)5d1t!t@^~1oRPyD) z5(i}G^<{i5*bZ&LiYF{^2F17Ug_)((c-rp`xaYMGy194bl{3eHisBrpZ@-PU)g+*` z={frE$9`@;DZLXcG{OPt%EI#N5p?@on(d~kLumfdCLDTbBS5#+xMOo4yPNd1doZsMZdGXE zjnAvVWKxSzl~9f8OSAZjehNgYJi*Gjvv_;m1W1}HZ`XI(JB+isi>HUF+Tppy#KWIc zVq%UkqCt(HoIL@0OZL%}o#W8#rAQ}V{(p|wEib`>%Y%8@ht76+8q;yck=}6F^(Hrrl4gvq{cBoHrgQMl z2H8t53$9DqhlL->YPKq2_9e4wpRgTxP~iw#ulj;d)7pje#v0&vW)b*h8nU9-Xr54a z5r2O;!>&`#aa-&tO36CRVKFV(XYDe~C~6ljCR`O9i)V1{>0iRC<3Tvc#u;_2voUAe zNDjEYlygT{im?NG^Mj{dF)=s>llx@S|FRDM9-2~d)kXT=wHoVH)Ufl7EJ5*mmUPqo zg{MEKaog?amC4iiQF8{Z$3Qj4F$4<{jaO}`l@KNf8y#g%QVw5s} zNGKpn$&u+{GMXPmT>!nJ-tfRk;sUs&!Vaq^6gIgpU;b!>)8eK5)+iQYXVlSu#TB%B zRxm3^En)o)Bgi3IMauIF)L89;7t`a>hW(xw?soD$?C^m*yXwKs3sp4;P!xeI&3)b{^q!?giiI;jt@dvYGW zZ8#!~(S0X`L>#7^G)3O2-68wn87jQI^Fw^uH-P&czDz^TNw>D{$4JG!frigZmtFd+ zSu=3nCfGAZ;=Sh#hDfglvKpkq${2(F0ymL*s3OZBY#^U?e$?jFmHpLBdE-4x4o_c8 zgSP@Eo$N%8efywZb{&1Vk}re?w^83@7h2VvKuwdA=|r_IZ%_VBO@0T^qs$I9Z|vs7 z8m%;8%{5lfnJJjIj~0@%-_y$_{!p@ZD)sJp5Av7C(t>;k-g0j!ELM?vg$cu1YyEw` z|8AEsLEDoTT04s7MLsyRrGO$2_s1t=oLD?&3a*dkxFK*l#yRWr*Dt{k)xM2i!Xr2s z>o4&KT4<@VGdzvyL`_d;Q)!R$P&#c4Tr|>xuI@`^6J6EVLc_2o8{}(FPN?F0W98sZ zLz?i?JBzW`XneQ4vDz-&L>wpAAJW2{1wHLST)b=p&sjGLhs-}rpPHtL`MD<4wiP8*ul^QRFpzhCUxWXcV z%I+CZVTcG-V;}IUi-RcO&~b5li1ZBey#gjt%8>P4frlO6f%oTIKo_MsID5qiJQ5$v zea()N-@&fjrlX8)NjlU|e>}tvsHB0fUxH;)DE++u3MM?W~=zW2UFk?#{zuXMGGS7H<l=?9@5{zHa+l8m(yU7dPa8Q zR{_l3Q!K_m-$Vv>d#UW_EwQ<@7-qi?L;c2S7W;Qj6ag7U072ZkTI$nGafA#mNWK(D>{K@l5&!iP83keqJ-D zgW-8B8&rliBiq?AV>h24St~4enudFCJc70%cD$#D74*1ML=mcssBGvByl}=xoR=jN z7B*Q@@INV^dDxok*DFZxiQZtPeFKMiEA#nX{p_%(2McP_{FMD3+-BC3`W!F2h(7W- z;*~ihxupZIE`*iSJ@8D7oKVp(8WRg_@p7UrR_zPL7DYord*c=8EbU;XCXBGL*!O^Y zb{>LrhedLfnap{(n1gZsRKZ?AvDgHe(nP%(wmg1fSXr4CMd zreJ5ZXCdhS_$g};B52*tSo(9y9VXwtiRMS|z}595aG_xV&hh&QI|9byiy584rQbb1 zcHXdN+?GK#cWhd@<@^mW@YSKY{r0lLn|$$8@@}v$>42uq>$z)I9vXRk7JaRba?8}t zXqv2SH}lq39P_-xX0(*2TvE{urGm1UowSDIm&w2wRJdkSPwts*21&z?(SM$)aQ|cg zcUD^lyTWJH+*^1Gdu{aMhl~5#l_aG@No*jikI*CkybZi{&Ueu;Gn4I7pW`;=543Rh zdeE^*q53upn+TJh?0c>n9&A>nA5&ELZq`rHsX_~uHAmA0v&9ouXQ`DwodJm@nK zCwh;8d0$skU0@{Gu5g5epDU>J`eB&S@E3N-U+2d$Ysj$arEE_3-|);q1mg*{u-5Jz zEj3ZZS2u?7r|iC{KFox(?_IW0%ZTNW#4ujJK$}(-n$}dW`pTuf;`mZkk+4BYyE;SF zl8e{vV)5_{n8C-Oqj)s99LuNYy632~?@{W%v6twyM2@TH9>v`E(L9l)J?KwM_B3iB zzeTQaaqk&8sb2^^E1y-*|Eoqm9sOx}wi4Pao@dnwyRjhIcXaw$UgyZN#Y`-h|9QZKr(^c4gxO^`l^jgWKr3|JKx z(vNH7!TI+unw!-i_`Kdn$36T+{R!JZuVJ>VN2w9|UYBOYsx^G%rV(eq+e$wBK8WS7 zpTPNPhe1twFl-VA@QLlYV#DrktWhFm&=QtGa{VOie%F+nDy8pQ#yafd{uF#J|AZ-- zN<4Jy3VK$#O|ZZ9#V`(8L5x12M>o9a12`lZ?2nAbIvj}ewYPkB_`~3={-8r$rESJxd4|ADv_d;FuElYu$e|h2hRdMjjDn_(gc~>0VGM*E@ zB*3^QA0fJ9J;h4J|DpBfxT3t6Ymx}QJ_e{C^99-(f@Swbf%ivOvr*649KJUW9%x&m zUi>>YQ{BkJ|IVewhdy)J7Yp>5mJBWr)i5;)g%4lP!o!&#;l`*(^p6_oj!;MjVY9Hj z+hCG+?(Eqb7v84HVwljn{Mp4`6z!pt;0W0 z*mKVz?cgK%e6D5X;$#hdoT=Rtm);+NA>XG7y(M1uGJi*Y@mS*3UX$a1Lx*5zaDTqK z%}Utjk_XG1+)3GgIj)@iN$h%U9C)oP#oseF@c7YR;APWHe!CsZ*l}ZR6nw^TR)x+_A?L78Lmj17|?Z#gBh-MDm4k5w2!2<_E(-%3;_~>T4Qi^?+ z*RK<98Cg)SgPAyH-*I>ryM?Dqxy;|QooRztBOP}bhVDPL*y717v9Uyt1I{~=v(H7S zy_=3%-Dil#?^dz?%|nozr;q)wwSfKSDYjEaEx?ZGL^QnAg)0tSBO})Ve0#S&zVTq& z$HVk_SdJq1U!VX(Us<4=qZU4VIRkzB#BtUYGs;qJrE3noXm-OXv9tC$8mN`Vn?I$B z1_vu?jq?t+8S;fzPE6oa7GrV2gC6wwz){o>SGL=1x(*kuORAQSZGnV;hw>?C| z7F=H5pV$0nCjD*27;)t_&NTf=S_Yb|cK$2+x_lA0>dis7FnP}DYKkXo8gRn-TQo`* z56Mx07Z$AN|F+A7zaP$v6(I|R8OLU_y0M_*^SG4ZYI_|esO?bP5 zU~a@gbPanBtv7G*nwvu;5#u79pMHoeT|1$6RWss4C1HJ{CT)-$;~O{q$BM(%@k_(X znpy7IIILN-CaY#VYj#ZKUP|oF;u=x|WZ@ z<)?kf$=L$pW^G1;@Av7HWh$Djzk_)nZ*pd;wCmW_)2^jq9`7hC#M^0kyz#*qSeWx( z7&)V--QguuaQgfp-q;v|aqU-eVZa!$n4Am8-9KZ%#14LHvzWsQ&)D7_wU+zlmVl+F zB{Yuc5W3H9LbrBJyH?2qvdO`OilZWB$3s*3er~X6@uY~$T5D)?j3VYd=CeSzB7*tdVRZ4jZF;sDyi6cGzVZ0 zxo|#HItshBoW&cu70^3TMZ8dG&$?-zSol!tFJ|P?j+23W^sQH zbuMe@W+A!R;QWZ~{QW;S(O~saYBjc?v#y4=En}~eYkLtmCf^knkMByeZab1*zd~Wh zj=w_0r{jYD_!H3VwO4fhnS(V82H}zhf6l!w?U+?&v-NQeFdp0tKU8D*$*kT~RPz#k zmF$8_+6afpf0&Gj%o1+xJ)(^GQ}?4kK*D*lek~-7dREv7v2q7iwf6Ig4VVN zP&4AAaBz`1Ia{k@(i$s?1H6SgUAqF``>)2}#H}cou$``qo5ID7BcRyR9CAb7fXr7kb%hjvFeZY*4VB9=H*d8V~Zer0dwcbuawc zsEiA~ABWpc56Cd$6D*P#f?-|`>{HuH{<)g?^YS@H5`fT&rb|O#U}^4;CfAWKGAqV2y$61)(^GhtoezQs&JHioWtpn+zHy> z?-7M3lt8A#I&#_9MNl2}0-HulbKlIXl=meE!J?k}6h45^?Y69W;3~B{ZQvti?;%?9 zAnlLs3^rwYv_7(q>LU#~%~x_dy*&k|+oG|d@D4qAB<1Kgv` z<-r9&VLBxgrSlicmgfrLsETcm>hbrqkQ3x9ziM9PtqwxSoC-Uc5L(KW#O5&g^b5wrh=8S9Su1TscA3 z>e2{m(1U8$q)~J83OI3P85}>|kMttC02``e#n){(uT`6KB>wd>(>oM5as$0{3FUv| z-DM`L=kk~afBM1V0e}m2E!(fpR z&#QZE5If9G2>our+vm3^#7~p5Z)@B6_t(OYm%PYwK@fg=u#bxk(`nnM0cc<6#J@Xl zA-B12gkO!Ds^bj1fwT2&Th0AlDWkX=gN@6jZssI>8F>{326nLRlri|}+#|75&nn2( zi^Arm6=ds`g~e+p!jJ7eab2<^oR88b<9U_h{jF)DgBVV!b0<;IoQ=Zz{1P$@ci^ED z`*ZuD49W=o0RcTLWX{L*@VQf&;MR2)9@zJuug*V3@eI)X%6E!=1Bq9Rd#`x z{ENb^ONur5i*iI&IW@lUVi`tj^u@t-_O@N^R`Q^SIc(*U0`iH@d@+1Kmrlr|mj|!H zCW*;aWnF|O_Jc^y=o_p&zE*IOb_%aUJ1F)^Fkh=F;XIXa_VO&H-f6>Shj>H>8?PSMD3W#pSUiGANJ;x}@Ey!c==yju>OYpabAGGB}}@WuTHw^Qyd z56l~$MA3cw@xa*5sAXj*7|BMl^Rs4QxWsOq5U$1?b%Pu0Q+VQLCvcQLhs?>Lb_XB* z5^lHXQ$N`xc>1lA-5=!;yZG8T?tCOscGDsZWv)Kp{4SH@dyj#(#!7mUVTnIKXhOy_ zM>rI-i1pw3(DhA9`2OoxS@ZsK{*-rF>P~)UXAA@EHkWj6{G~_W&JD5e;eAUb+|d-k zQunk_ox6t~^)(e`*GyP9qV@#nl;+N$`yln!P%GK zlGH#03R6*UpqyRMfmbkM=Va(5W%DwQ=5R%_D(%axgX{H)Xs)G;n}_t( z8zUv2;u?(X*v5yv3-S8J-T1fHTgd&HjCtewaSogyl?U2v^yW6nPEQc(&Md+?gB0!7 z+)v_;JNCj-%LbvR&Rq(eycgb2FTlA;%Q&%n2yE&(n9@F*@X_fDgo_gwVvBmVl)1>^ z_0p{+EnN}*t(?WxvCrvvlqq|-9hN%%oxw`l-&xFNS^lI5LG4dh=sny93X4I?8L8Fu zdMWTfg|+zRR|#%*yNgNdOlk`H9F!RG#`tXP3AF9~4$~(|xtJZ#;FHQ<8@B=tuFpg~ zKQ@Tv-O};eBX>T(a3QZVO=W}N8&D9Lfi`OQ_~xN!v|{~WHW)gD99&L9a_5nPR_}W9 zXbZ75QrN;Yp_H6l_h3!&2mIAj1{1o5lJ2Ssv}$=PgxpOOv&<$*ori0@ci%2fKeG$w zhsFqosRwDHot?Dv9nR}~YH)%|6I!Kc)!dh6DW&jP=&-jhw!Ybf%jOpGwYFJ!g3{UE zuuc5@=n7T5k^JJ{mtfooIcP3zuod!m2*SY0ATNlFZ zP3HLAzAuDH43XgUyTYMO?XXQfNXThu6duJ57Z$I30bOtcXKH4`OWR^Wz2*d6-YDgj z69+-g&IGo;c2_hwq>16GuGFW~4_N-~HkqFGlTu&D_=vR2N;Eu7N6)_`^&#Kroxd~h zsw$_q_7{cDrjxkfRVr!T{SBIZI&3?wun~5emy)TpBQ6Ux=f~re@z1te!K>1mhwdWa z-MSE?vm0h+2BTd6q4-L=S-yJz6r6N3;QZmMl=SZ+4J>>_2U~`m$?E<9ERXh;{mF5I zh6&Yh{Kiu-sW;}Au{}_IdIbHqo(NO4FVHzJLr^o?Lr;BQ3B^03=;XUY zwB~6NuDROq0++}IY?tQ(2mvmN$j?63KqlaJT zHzvBmC{Ht*`Fti*`tC=ic}3WA!d>7p>eJ=C{`{8rrf76XTBDzzg({~TQFqcMoS(du zpVcuF_zO{39=3`(9Fj1<+z6<-+s2d%C$Sj@vf^oX{;=M^iXgk8lqI-nOJB(66AsW8 zd@-Ndng^=V5%oen?R5nlYg~w}NA7S>4tcW;5v!qZcOh=@?PXJ*D3H6mEwheH>=R~lG=;((^S zb8Qw~8<$3&ciPBsGtchqi{>*duENg+zqupcA!PI=iuP3Gk-@{+=;hQOcZP2TkMUoa zVby+G7ZSvb*397QH5A}@WCeaPQzpf#ZE)oZ2&{%+m|Q!8ng;o)M+?A(ay z``1$J%1X|p>OM}qe~mXi6Gs*=&cR{FzO1M3d6r%n30K^TF~u^UdTvao=z<=0@ckgr zlFvXht^WK))A2C+ZvZ%G%cIh_XIT8HiqjZeLleF+t8fi!MmzG^~15l~72OpUjQi+>_kpf$Hd%z%YT)3PWZwUa;E!F&qz46RI-5am$R^d92 za`^6%IlcE0W;(KSggmjBd+4-;@B2QLb{QwLIfWm%1?R8e#s8ccejfox67tyAs4*;J z-)Yopw4|5fOfSa79B7Oa|g&o=%2*qiSaMf#hm08qYUuzbRCY5xk|4DX6exD&)B@nK5SNjz~G-B&a&Ot;L`CYC|-J&Uo|jC z)TU4iZDsDH(QYo5GaD%0A#Q@mkkizN`27U-(50`$kYVrH z?dCSL()$RN-a$B0#~S9i3NyGoed&bmG&H!OFCAW<2E8j7oNt{D2hN`3jtlIc%JOQu zvb!(zWMzU|`eQc!XB4a&Vh`q;wyY)J60V(|kE+Y;>G{$^oMFA4f%an@dTBBnUH+t>2e4j*)N99#n+gDFpJu_!v)r*+QGJ{%kXWYtT=VGp;$8KJ^byX!qg6(p+T>D z@%NaGG~RF#SuBm_HfSBi!;)I~b6JaqB`?Awp^mh(G>s2=yOFXV2f?}vW^_NklG=2n z%zMUgP}^C-uRm^$3PQf_#;rJJf6mx_Ax9xm?*wSrHsf!fX)ruO5zL0`fZSFK+WftWm41K0es3s*xx3CVC7&?b zt*Hwd&#gs4w*vX&ySwo3JUzV8zL+Ark$XGhlt?~+FmsUustg#4*9RVj@s|?#3x{26 zb~_EghQcV`q$Z0SJ7Nsi8MUA74POlp19xykF1VBVi^H@g>nqNEQx1D_6>x1!n#k>7 zE%zi_EN~8*&~5xR^!0v@K|i%9s>qr~D$GQ8zkirgqz!lWc(Qwe$5`luNBH7HIGgbJ z2mffoLVA596*ozJP_Mm$4WBTTDxO5c+Y>$%m#xBPJn!Lr*Nnm`rt{JKRHf*tT`JqL ztB{{9IAwxAJ!IhlTBI7X3R^p+%rLz_7iJ;sX4Iuj^MoTmP5ujiF(H!oabJ$;@dRaJ z_fpaP8#J>g6dp|d&A-$4fac`;oadE&9H%1#6fH1J5|Vg-&oH>%&9KZ}k6I3XkQi-n zrG-%mLe5+h$0$~cCVsa9ue&1j%1Wjv-_zXQ!lBF{IvusnM8eyKd=_7mC{cX5g4UZ& zr|#SLv1HU6$?@S!X@b2WvzasiY#wZa^%jq?K`nyWhW&@~X9_7uDUsz{OcAw>FMyaL zS@P3s#qxhn5R>A;^tONH1HyaQiR2=V?(W6uq4QyKLLLUMxQnv!d-$cY0(-!9i^NUc z2uq_6;ikmll;~pwKazGc^Sgqlc+4;w<$M6DZ9Pd>$kp}>j7PK&ho=cIXm+2sxNF%I z`V{g&GW_;!3IAE2FU?mJEs4BA$s-8|?eitqL&EcIUCMNA1&856d$i@F;2r(J9LqGJ zi(bMMwkp9LM-R~4$HBOdcagWciG~aHc?atkY>#9m3~J#;BklaDQD7(=&M=}D6>Uno z3^Z=k05V$K1U(IB$@6J2AGh!b7cp=sTVZq&otqWunn-G$xed56&P5N5Q+grX89TZlPOUD%x!p3IDsz@xqfy@NcEiRvzWg57aqE)^*OX z{nr63yS*NVxlWX1Ip1K5aT%v;nuDx740IJ#=(@#Sf!F;H(?Sere|R67p4tfK%m%}m zF{?>h62{NL=sM;fN$^`b`v!Afy z>pEDkP=?p%2zdv)To&`v8H(>&J@e*p+C|2~)-KjT>>Jwl_O{ zEvd%wTpF4fKar?a+rfETOO$v2jNkY(v|5~ztNN+O>0Tp_#nTx5XOYzs`i)`RZCzj;-mu-6S4~NbT!jpY%dDn4w znCO@_TlTYr{nQ>zXNP)$$HCFkPcIFnA&ab0jkh6%Dj8Dq$s>QE1La#CCS+h{Gk$sw zbe$0VV{a`mv2QvwPXmhktA){9Mzu$5Y>tfdZLA_J-hKzR<`z=(+w+vwn@DPN%C$!Nt@)D$9@hfbSW5~yxETL<$%;pL?}FYXN3hTCW>MV) zOSbagX)K4;{MiMWux_LU=YK2>xBu+mT!v)`PVH^f*e4!8{1g1mjd!4>crN%3KEa*0 zet~*=w#@OmIrvXYr<1RT!-$)sgqh(Fk$t@xTb(>w+$&v9%28pEW;#Inp=>(a)NTMP zHw8!rdo|+{C3`enq%Hldn*_BdBy{-71{!_Z9I7Whofz>|U=HrLhszgKp!;|q`01ww zo1fM}$TJ)2|5+1Kht!clS{6H!e}EtS!34_vyYa!ZNEW&AI5XN2z^0C$3a%-`;qxj} z^e6+W+NUHgTxlp>xHumAMcu$Fzx9~Ls*N=5*DW@s&V=$!R6#E63`7p?0M+QlP;S}` z)6_VceZ&hl{jnEV4r%0X90V!T&yoJgGBmh18I%iT>C%E;cKc2o^c@-FIOk#`zbZiZ zS>j;YyXYV;Sa+B6RT?IKXLW~jnLHO3N93`*!4jAz>^BAt_QZtxv$R&)ghodtc-2f` z3a^MD-HMt9fzpQ_6_i_Yg%zyZL#e%5(yJ;**c4wY*yoVP-hT2B z_K9UQc}g7f8&}QGbyX7!YbUl;em=g5m7)IAazqy|ZKR&01ZeuV2+FzlkXM3HoS*1^>k zcUfbHm?GERU~hxZFe9zG{MQ-2q9WtI^z?->nM9ppvwzRxAgw?Dd&YY{bLTxay>u73 z&7K1uisxaVW*ZKglES`oXVIoS5vBPzQM~>nuGZMjGO~-8U)P8J3H+GhE$gxQ+EExa@GKiY!+UZ6P?9*W~p+1@K3g&!*Q3f&)8vG)lfi zgLnHm>ZeeT=3n$U**_y;Yt2Zs-7=iA2N=ra)!Qilg05(++8#8kt7Aq& zH}q-OcXp(^RB~HZkye>YQRXE`Iu(`SZOA-a*YZGM)l|@ih0#za>qkj7J7C{kMR>JE zm9iGeO5MtVd3Rr>bx$?KwmN2*)%yTnZ8|IJxtWHKp9f&v6XK!_jp#?42i!<~!jE&F z4*GY-Gm{=0*6)%HP7buh4c8kb|J!g$Wi|_uhl#w_Rgp1AWm1S0i2@T*m&ha{{A< z*|@j4naei5i|cF#Q&ZnfWS=;OUREEbc@fK@;6^#xgbsi$0xNgV9ZTW{3cgJt&-YE> z#&v!0tLYH>Y&`;};YofU3No3?&wTleGjB2gm%{OEu9YsF8f_qOn%9HlB|XviK!E|< zwwFyB^PVgCaiS(dM+N4lPiCtVUGc|%Az*rZ04$F-fy1>Wf`dr#CtP}mMs=^*;`QZR z$=}K9BMSN8j@}Akl+P{fLL^W!=oE-(1-LaXPMa(Syh><1zM;;F?^uirYU< zn-<&m@LOH)L#>c08?t{oec2)STs&1t{f{HNA2SjBGJRR~k6iAW!)mU~(heG%hdWNW zRlzB4EhLH3JNCS~9*Q>^h!`a;^xw?hHvaa)3eF;GStU8^)U`T-mPPG1L1ixjT z0$eCkBEJO(@z6L~W?j^u>IWsU`QBSt@7@n=^wIUCXJtjQovNaI*K1_u{TA*{nZj-X z(Ys_t=_cKW+-J{FA+x1T{{7@IQJ9M*jg}=&UV}>)=9xZYvth4U8Qd~zXOkA>;qXU} zY?{!iPs~0diB*rM(pBd$=u{?%?)%ZORu9%=AMN;H#5Ydf;sDIMT?t3UFLBeFeH6rR zWBs(%q`}@Xf@8x6IKg+S7t~)oSnC*lZn)16w4Eq^Q~e9pn#Ry0Uw63rGM}~GFow?C z$zW^hjgx;Hv!&RV6mOW*6NP8YHN6T){`f>Eb|77C9Ye0gXK3jMU2yf!fVtme_+I^T z=yaL^GoHDDoq+U_C`{nTg*k91?qz^q%@ww^uOeLccBJ&Dv&5axylCL%Hul9+hxBKL zKy=km_+_*jBbPAxZ6rh0$BuJuqq^Akz4G90{{q@Bu3!mi7b#UUlP$e*5`Hbor^`(h zu*J%lzFsz>)!luh%d#Isz&|Tgeffc^9&Nxic0*}frY_{yo05X!I;zsY2u<%zfJr zUgroLwwMIgPhmPf>|;dnyGwYFPio|GxtDiO;qi;VnBDy_la2cC0OpTK#O+s0>5-f} zR?lr09Y5&>$(mPK(ZUScHZC3VUM162w5})vbv4f8@rlahHqRP@&P32D+hF=WcM=x(z2Y3UFW@`H3INf%U~R#}j_J?Q zPh$-Kd$S>$Hc4or)_t~USRg4Y?`64*YuMp{cX%;ea6VaF$DCPCaL7cFvtUis<7f{N zH|^NwMJ`Nz-)I)2oX?E2Vwt?pMfS_M2s-t1XsjK_*0}n?`^Wd;{x!pD6RU|RQ=iN( z=ch0i8zVOO$y27dyLnU(?EF8l zcTG=Nv4bI*r=7%4TCwOq@-Kh%TM@Q)cyJ#LbKs%PSZH^T^Ke5-gh4B4|H=c_CDuM z2ux9Dfr)$E@g)`~ZU*mKmKw5RJ8n%gLgeFD9j_Aoj7~||4MeT z^>1BSaM)rxP_&Hf@0}n&_iKEqOgmfBp-OSd&62CCbD(fH(8Y0L&c5Ik^PAnvl{IGI z!tr^0T(JhLc8X&oRR>F)OJ~!cyiGp#|seo_GG9Z7F8+N6RLIcAM7_xmi`xWSp_mlpxU$m0$3p;*K zqrFTeYbmRIm(C_V=)m|r0Ti8+N>~0FGuC{aS1p}`wx86cQ(YX$VCFZb93IE&vxeg@ z({?6%@Hx}alw~*V?LgllNAf1vlbxzpVqa%V*yVN3l^4tVk ztroGLgNLBIof3Nvlc+Ex1qXY1a=&!?LxSNtY#XXiYCWm!eOo8$Cm-g&1&_qv6K?Q2 zebdQ%q=Z6J?U>F)FPb|mlr89=!P3GR`E`t9=C2QNlP1hT7=N5HBHn?+RVQ9+eH{Bd zMTa%N?n|HgJ*J?OYlL^SB3LOZaU(-6vHszwFeOI|%1b4zVs|;!Es5i@ce_B=kO5K; zvr2(cf-pZJo1TVV!0p42u_gaT@Bn78T>{Qek=>c5lu^>PEb)Cn4J|ByU-R$WC(?BZX)(-CH`i|L%_ z8Gg+Aa*%nI%LXhuLYnPMnMP>~?&>v!Pf5RU$aoHak>Ql8W*zU4qdvnio?N;XPGWc#dq+RO5Z2J zqyx^>`+O24J@tal*0XpfWF(F9HNaXqYjQhO#dhs`NpreZpxfnf(9$oD6viH9218pM zuUB?3XZPFW{B;YQTy~fG8cWI9YdgLjGm(PJITm&`g_=$NvZKZ4dF{eDzO%wi{FP;c zti1*${BWV+imuFEM-B(y1xSDC2~mo3sM40P%BKOWctH`nxN{*jjXcEcHw=g6QN!?L z)jPh`{S-Y{dq|S?xh#t+={y~-U3KZ2CO>6ffz#n)11Vj%cU7HhDrTMVdVst;7<7H}!Ff{)U9 z!gnbFXRdCFI_XEy{Nw?C^#L39w^s_kcVEHS>Uj8;lucW8!??*qjBu>Et$18jCw)2> z1sRv-!w4%A8e5VHJ@t3!;j@S2|8yg5><~D2{tgU)~Yj|I2R`6KRz2N{#?}Om>=@#66uYnaWGh?^+ErQpAcaDh< z!0({(#uyeed#&SPQCZ5XJP#|%nx*kwIU z82>7fYE-Rhol_s^C-Ve?doJR+p#y1_b0R%f&*XZ;7ciT6J4zL}jUzq@x#~6Q_}}Cv z{50?zwtGHfp|9t`=(oDy+$(e~V&}tx9nWxl_$~6Cr$O6QGW2BP5Yn5Y1|KF| zVBeQN!?0&};N9q0RH=5L7t>VeU+E1vAEPh+{Wb}d()v-Uf*%Xtd7Bk{2TGJ50J}E( z!$pG${G?a^L|ea4l^&B-rm@$wX~B${sQ)-v;DA=tz+HQMb4yAOAAM$(ezV2NfzHJ1 zp2o?iyrFj5X8NfkWa8>|!7)pp(@ndMQ(mjm)8Uix*5hq#*RR)(gKwyS$-_HLu6s9l ziMrTB!^v!rs~oKORxR0}AIa>do#kemD&U4rM(!Qy)X#1R*$X?tY@zDZZ~9$k@;aOS zXFCgz@0>{m{b#}H%XS!aPv}_*gI-I4$>Z}Sk_TUT5<6dIrf2?&oGylgb6_A;yY2k zt!MxZ@o=M@=iA|`>24O1w~iKn=mRcIzF<5vp04`#hiPXM+4gn2*!1qTob{f;Y(rTh zb69eOn=*F?U+XX%i_+D=-#e6Ss>0x=_5rqXY%LpQlt{N%x4`;`byR-WgRGiVp;Ebm zqV1ZXd+T$E)My4y(HgT~RWUh%3-)d3TAb0NK*L^HlYY=vvPrlOCEx$D45McLRlsD{ z_itY|aegH{Sf)bkn1s3~hEe+`Lo__bB*%ZpHbweZw2Pg)FbzhP_oF8k-Tb;{Io^FQPh-1du-N=PpYwMu<@g6c^vTVv*d>78 zeH=ihTSwrV(H2zJ+6aNNH7Ef2u&1qr&(s>jCoGvp%7bc|ep)&@?YYZ6^0(oo)hXz+ zPn9BC^qA(FIQmg83l6g%!!Wxxw%|Yw{#A2hpX!AiTtx~QW|)&&Ybx6>I8CnBJK{#U z6qd7NHWy{{hK%CBQ(o=~dK8xquAB9V)F;u*u6T@Ht&Vc{WVun>#_(6})?&si9mxA8 z2Pfj3am0odX#BW~Gk;%?v2to~PPUKureZS94{xWja}L6)?~(k)h4<0eKNO;jDw&5) z8H`vPfPD>uaIMl~$ddc#=yT$=u(#Qd3uUvQqUR8$MVw%Jmk))Vv!-$VRoAf{(W|i^ zTMBcM84Qyxk21qrLf!ek(KKjK$ew(uo6+0b)UV95VXgHaiMh=5xbp_1LVFrEZe2Q~+ z4hHROvG~K+oQsK-VSP$c`DqQ`C7Pik_9SQ=PJ3>Tky@FY?Q1!H>Z1zOzIBGY?@z^= z=>@11-oUO2pXT$R#ppEu3RQ{kvr?y1?9`s2XnoX}UwrZrn|f!zDB5lsGxHuuYo@1= zvY{uZyz3!1;QV3S<-vn&jL`i}9zuE90o<_Pr8M{K0xW%Ck5iTyLDZ@ac2RT|<64B@ zbJaCbyT^FQapd{w_s8*v0#8t*#ST92qc&Z>yb~2n-Jo^Xa_)owRUD@Bm~$A>fQ_ay z(3oHkvUgUacHV7_IDeIs^dvLOk3^=$#;_}=pV;Q1iiGp4hod1&Xo7qp^TIvs>Prn$ z*Ri4AKG$&S#d_wN{2Y^vg?re9LeAu6I!kyS#WKd4FvXGTcgX%f}H-sKn~bP&h(>}2z`*TUowfHkxGu^HQq zsYT`yCu;Vfwi!oR?!Y=O>5C`0$ll|7JUP(FR9LE?K7P8ff!z#^=B>Dgiha}OZb-; zx~MM>afSc+K=y};)H&1#wrxY0rryTXb4QW={1GD03w`ME>LYk%?Ld%kKgg?Vx$?!q zfvEkg1{OSCVGFn~zGKYTu%|?lu@&lSbQz zMS&=N7g}t#0KL&|Y@GNli^@GJdFLZy^A0A^`d5N~Vni<m4208{8LF>cIf`@V@mQPQC!0E?XVY)r{Q++#I(y|`zMBNg-6S`$8mErW# zb`)@*ezBuf!i>e@9S$A30-|{@+@14>LT%GH&AZOx;1(ko>0baTG>eLA!obyKB1?Z% z#s24Him_P*E}>g!ZKDoLfBh4N9eThUhqd6jz|rE;;sCH(6^g>Fi{4HEkaVo0xWjf( zap)&KYw;6%jxcAQk&)PDRZgb!w?IY7bgUaxfdhilS(<(r$Y>m;GVN>7XdO#`6XQ@v zN2p$GwSmBs3t(MG13X%OlO=3hj+dt@(-FaGaMs9Kn5S5<9^0>=W@|@d^HqeN*D)v* zoO=nP`S8_t8Q2})1yyG8*r{PjHIom*ztSdFV##Ak#5gu|?*Ob6m`0vovq)+84i=(* z3r?&*NgBeuHeq=smw4@GO&;lrJ&*fgKgaMExuT;^s28nIQ(zrmOl_PpSgwB);n`L zTX{2^`MM0ng*O)C_KH{B-9}f~DKIsr$*yJV+~#A!j1I>dqs6p-q!Smk>nOTw1z@$q zW~_RP;AUCPngXie(pG&^mPVmmS1Mdrv!dNs1^=&#jlko7${7J_>QJ2}IDhudYo6N4r9${1Y6lVILiqy?mh6+Z>!SuV9tbN>EyzjUH zes9#{&sEz)q(UmLvYbGgqK%MwVjM2>ZAYJ1>$reh1K?_l4*RTelGUCN(cqL>aBXTe zJGMd{^W&c3rL|qSa*iC!o|A#`Z@-B~UVp)-mFwX5;Av#L^cQ}fp2eg~izQ$C2%V+! zsm$@(QxNX(m=n03uASUQV}f(hXoMQsux1kfxyf$aF2O~Dlh2>;6*!-p!05(3mbhyV zn)I|tN{Zvy%8SvY?Y9Io8yAyJ=5FYCro}fL&t%oEgQY(YT;j&-{iGEyA7U$~4vKQq zAg$SptxO1K*KiP%Ia)03Y9 z%dWMZ_rIUb%ZyZGul2XFiqH8tS87HdZv{c=u-|Cl_KIbx&Y%-k`KVSko9dc}lBrQK zdMadaca+j;oUn)6KeULsFA*HF2VL383mF)dl?N)$UF_Q9L-cscapuyr9vw2iu_3>t z?6_(N$E~oh*-`S8`|tTp^e>8GeLOy))4+7eg;VNq{h zxwkgEkfBD#U*Ge#LeBPCbbLW7aAFA7fB+ z_AKTyvYQ_fC}x*N#0zf0SN!{F0c4|_fNOl7;|(Djy7p`~i%Y9xHv1RzDms}Wm*(@V z>){{dzFB}U)1dzRdoraEZHlPuXWzr^U|N*mDbk2@>{!{wH>(~L+)s_nyGj|^d6ybljqlvZ){`t^x2801 zmljxd#L!y0^q67{ z{$R+gAkg#{oYRgr^qZy+X!qsw4W{S z@WPyf-V~_#n+rBuO>X78uugdz%9NR5HkX11oiY4LyK!`OqBU7hHfJwd58-U<99H&h z0xUjff(|#cDQ3$v9OoVizT8dvRIW_7^_S4A;0)BZFsADh2f(Yr!(iIU+pPKdF)m=R zJ>R&Y1%IvY2a3zO*qMd>B#(Q>QP%A)e%+MS5TB|FQ%79Ip!-g^>c9Td+rfX)%0UY4 z&CgibzB-(+E|+$nt>tQ@fi(91X*Ri$6V4vp*k_RN{SHc_SE1t_N2QLCURw~x0=Efn z#U9{aIiKf;TwBR%tn>uV`xr=jr$>A8$3X9${#2!^3U#*US@DxAZ;?EEQ%zvaPUaI|nPHzwko?7}J*H z$p1(pq!p*IytB>paBK?x$yb6a!zQ!ePSLdDtHwm1?I-!%{xigrZl}@<;qbOPQMfZT z+2fUva4NC;L%E()q(iqT5Ibi~c^ThOT{)T71Urz!i!n5RqbXTT-NJl3`iQq>4+2H` zd3gKJMuJW?`dU0tI>sXoH(LqW!+w2edEHdX{gO(X8}+!^(8iu+XTUYtKDaN!36?46 z&>%M%xM;Li;P-E%<4(rhfid~)|siuluP1K z?aX7ioYe6BV7l?|0G_*80JaN1)kN(*B6P~o~b z18vELuzK8QX8q#=iKU_ReTD(=x7C4`zSE~aOP0cKYhCDBsv&)!D+{_B1ElgwE+BRI z0W)4U;P;oD_XbalALUZ0zWp-x9Q$&jzuBpS$DKIn`2I~r(qzzq2N{d-MV&lvc- ziig|zM6+2}MDM?tq?JK;*Lc!}+0_&=u$H~d-Gc54ax`mO2CsEa zmt?wxGs%%Zd_vYG7PjFi7*BpGWC31r4YvPKwzigA-!c-N6gE*>;TamDc!+93`jgS0 z^`Lg3hyQt@FIZ(Bgs@}%@MVn~{eBt^K^-H(e3d5~C*|1YKtCa0c$E?|?I6uf4K4*8 zMEjBJvHO7m91Kz<|BJielF4E+cvdU0@-*<%7@mLlTb8X`tLxaH9>l7xJ^7piv0RPr zSZ1j3k^dK0&$p+B@9CkEWuzlO7lk@UW_Hx!FY}VBRpP#C{>NqPF z61E52?s>3_Ie*Y@!C_II*FC(Nl#S0TPq6DN{kU!1Rjkp_g3tO(;p({=(0%_8T(%3x zkL$*;ImNdn{kNaRieIv@R%tmq|7SUuIBpa-=3o@N88;agny!JKJF?W$F5{^J|eo^>4>L=I3sHXr<24T&$WXAe`|+0FG6rM@5A zS#G8R9@p$6s_s9M+kJ_H1D0X9$?q!uy5k91xi3%0^7Khp0{0oEz)v&9>GMvsa%83fZ#YwdWRfEPG7c-Ee#&WO_v` znGexXV%{}hxl_tb{H@sIOkOI%j9wYxEItt<|0L5e?+hwa90+Fb4{%viAMly0!`S!M zK~lMmK782Tk4*CCDZAv?Ry}0<8DfU5BrQ|Ny*;P7(MxkA$BfQnXX{|fdASd-cnpQH zHMh{!%N=Ro0g?Dy7vnC?fE~~JgWN7<+Og;?3mYEGYW3{clT@Ksk?R5%`|iZWs|y|D z?e8!zYZ+Kk{vTK9oxw*}_(8*v05}_z54=GXsNT}0r^5z-lglmGUbq#CZi->kTfv7F zyb`1-j-;VAlunM9(ypDtJTtkK8{fDd=e)_RDG{Hf!t4uZvEn@3YSJFGU)BX37p|%DZJ`l2B+aPm%xVHZyD6P9daYu*K!qYKqux%U~`%Z*p{n>1e#Exap zxX*`VgH*dQn`vd-!&#GDaJcMYe(TIrxPR~pJTXa_!I_U>lN<#`k%tTJ4NPZw=T*_u zc_GHT%x32l=73*u939Kt2kYL265%+Ty;qf%Dp;(Q#SQS8Am0F?5F$ehJF#q z>J{vY;vPgc&J}w`7F{r@8$qUGS@qF^ulrL;PxGeDr7%JNoe-n`$RG7AM=WKQqsPsAfM@ z2c2cgrp3&AYAmU~uBGuRpP9M)I_Sxl(mv+|xCDJIY2Fwove>?lEpw=5(>K)e=J<{&JpTX# z1y}T#vtRI(rVlP%WX`L;Fk~+Rcyd2FLfX*%2D2-y@zu&s=!wmM6*=}`#5sTu9%MTl zeE8~FWz_n*olg*U4>eEc;Ox%LkTq)_H1~hTmTY*)EtEflMbVE~)`-1yXxS!+n=_L9 zM=W9)_Cati$N@yl_R^BRJX@k+L_JS#QplhV7QD-soOTz|S-vk^3_p(#B)W8ulfk&% z%{0|}A0@YGk^gczX~F(0WE?K$b280YYsp1+viUT>v}h}vKq4;xZVma1+`-Cjgt(y9 zl`Y$`nEnLJpyjnGV4S#sOy9g_N0YyD>dC3Fy!ZmIQNM-i#>?T6V|lQ4_iPd`6uKCm z?zC&y4C-yGVKt%)+~ktkbf+qe4LEKA7Jf?N{2MN4tlGtnJ(xq{tDl$&w;unP2}~-1 zX_@|G0{KR2NR6-OP-VFT{JA0r_gccaUSD_AEK?_=xLY)#irAa}^7va-N2=DSCtguJ zf|8?rVNd2%tbi0||K=7`U$~L1`@F`1`E3xZl}+7?MNqUphppUGOs=q%Pi!+p<#Q`> z$F_Ma&8Cc*#opi&G$P<;up6wf_{u)5S^}Ggt3mo3F->@KjJww-R+L$_kFV4;g0g*s z*gI({EZ%1Ws_U!iY4-t9{H8!Ct;_f?_XT z!%fR0kn8Q{j&1dZQ(Je@{c}k|_{1E(#(L2dl@zo+6~k7XuIJX|SBnaxchk|si02ee zQtIF!>L`r?y}ylU`D8WQu`dg!Qmtb{hb)|4Kbo4AmtyV4O%#|{$M3foD%yMNIde~K z!b!K^V)S|)n3MFIZ{A=9&l^_LU(Ga7TDy{XTN4}-c?3%ihqJz`^O$MgcvO6x%>CM5 z&Be@(#Ic(fvX+#U!rxzuGu@a3wz&;BeT*G8P4VLA>>Nu``CfT%~hJBqya#9)6IQkCdPWeMrv%qlCwt)hd zWNvhO8EYAM05?^Qqqc7`crZv8^My{0r?d|^O8u$1do7!lN!U-hkOuTSkIys?!qg4j5_q(Xox3YIKT1pSvB4(L-gO^;jDCP?g)Zr#2azn?qz3-8&SslN&XP=>v>L58 z@NE9a3wZqDLLB?C78j{zadTcwW*5?bAS)U!bO?JymFG>s;GPQ2J3n8tKv4ws0zYW} ztvX8Ia9w!+%w^kk#hjnMhT!tz_J^ zEf+8Fd-j~eIg$+$leZS+lsy5ww+gK0hl7|4tL4oUWN@TSHE!M9&M&^9LJ>(owYvq@ z-El)Kl8L~b-xgBba(!^NGNptq`KY<;4r@6dK@oN3bh2VMTk@a~x2?#*KQBx1`I4ui zxi|0QtGY`p!_k;5=6JJb-=m@P{tz%zyT+C-RfaJ?hQjGwfB=^iES$Z8-^s7zS9bca zoaRE@o);}~6YdoYGFOo+lcCpd4C(B-VAfrI8Ks)h|L@Q~7cv(^rZ`E0eyo7``Iqpb z1&{St=YZEVUp8{+X0p!HCF2b_)Q~-fa$RHTltDc~;R9%TP1N?vne=_`bo0(&=K~D3+Gqc?uAbW`iW1B`@>8c zo^qL*WdeVD9eFy)K=6JCe){!z{uJK_?td1zJ6qOrw>piXwd)pZdOw~9mGL;YwuCk5 zZr}xPC-l^WaWfVUg9&T);Mv7`p#4M#EUN`KbwMG<+RenJyQ;37fA7367FiB0*U_ea692J?S1uu zYxwsXm8HPC<_uyA53jPFk{r@As>7o~?yw_%J4|bSj}e-4@O62kW75Kl*g4RIcyC!K zN$%#m?iH}f!|ljYnE7q8UqWLiX5vN@O(3mHs5ZEmjk5d0RF(`9cJYEkY?6ffbuVL; zeSP?ePa~n{f*Oi%-{rg7Sw)4@YDe|j8i@JT|i-5#KKxF78r?E(2VDt z7^|nI;#XtfDR4hLXYClG^j#8h>n#q7yUF*zU5;^v7kHCDLT2kz78koz;PVFl;-E6%hea?BE=M%0UdMpME z(&LVg7SKZ}#7WKU!Edf#pzxp>)OD`FXP43;X~943hw zb_dB|>1T*>e;`c>N8!%99{6sRHt&Cv3#KdfgI;F`3h@uX2^OuqvRa>S_|3<2kCh#} zuZTknp9lQ6eYKEo++G2dhxmkI8jR{O2hMG^g}<`~VEus&Xe(BN!PgO3pXL(lA12x_YpQ;+XPqc_Wu zwl|8aCu!4z3unR7KO4h;jlyTP)3LbwC$1=t5?qu|a;>=$_E4;6ALHFP+gn@VrQ3_I z4D+JOmX(5CuV;d%nU|o_>mFQOcnp5FJ?3!+JMhZEGSN+OgjIj3V1<)q!0#-&up__KSFex*d$GVn6bjbikx{*GaWK8D;=g;vg z?U$lQoswepv9XkV;Sp|rdJ2abY~YgkR6J5joar8kp>K+CeB@AxZLOxztM%iBK}x*# zUKp>>59RTnOVH37`PsP9*mrp{E|qrucWocUKT|XvPmM`odD2{*UK5I!EgT(Z*muI! zpCfr&S3Nv4ER60?8o>L{OFR05eA&y~NAB7C4#IL>uCVB&xbN=*>dS+{wd#O)B}GMX zYgRNT<%RNu^IDQy?2dd#$_egwzY#9Gy$8dGQ~1rUL3rn~z4*4IKVFz^3C-%2qNl4X z4UV1()gw>P)2Iw;-a1o!W^Rrn3wj{bYe7QD3>^GS;@n$UI<|9gAuIWQiiUMufBQU) z3C*L53%_N}Wk=<*g=cB0O}e1&H-dj}`A6*!o1yCE$&~N^m69ty@%-BE+}|!8XRb5n z7h_{F;N597Y3PVOBe&DxoquSEls_oS&ZHhYqiEiCGx3sQvbgAHg%GYN5TngUV1Lb8 z__F2@%$7JB22yTiL)aQz*QFG#Y&v4jAmE&QQ_0gEEL@sl$x}{rC6BFLz%fz-Twe^N zR+DSuvA#L{^S&!B`Iv|!G!o(eGmLD>eW#*hiB76^>{@sn-0MFIbKYCf?v@O3LEdcP z>i93>k$)eAJ&D`ICsh~3d;gVDbJ-M9?tYDY+pT5i0%@mWL4&Zi`*OM&8ptY}jCju1 zI`F@+2^L=;A*_sQM>D?d63*-mgm?d*(wXsdWSbsz!it*(X|Gfm_I5J34e!zjmJm z>$l##VZ|RfW_g~!o^2DxMRej#H>JM%-~>K5{kX*3s)T7RgW>GvU*dUh4aI{#{jqmK z0uS@D!_EVgQF37M7mFA&QJyJg{tLwlM_s(hd9$wJ(o5RVU6{w1|JeI5Vn z{t)_=tY!Pcr!fD*6PWQniW7&O$EPiY+_Sr?qBZgqJzPcDtJ`1jjtiC-sn-au=N^FD zp%N~scENr|x@@cUS&Z%w4KsJ$7b1rxa*pm4j5@xLzoo?SgloMN>Appf8Py99E^8!J z=b^B-#1=iIJp3PvdxC%9bcvg+#b2Yp!I%FoqH(Wyem^UOQpGy>-dc}iCw~|SQ)rTG8*7+8(*Bylcd-`DY*Cr?)^F`P? z+D=I8uvAdl+YOpz#S#~wCy#r%j1u~Wu=k({*d4P=d^F+-yquE@?fbi7($ggIy~!t9 zdDR-c?erb5{7DwCiO+@E3lG9aA9e9qYK#FsB=X$ z=$aCp)wms3SGrQf3vFDp{VxpOy^tT58PbmR#q{C*9nrnLBPDB17nl1DH#Wa>9+yW4=8$)V`q@Do3-Z5I2aRPy(zV9I#h z#qsE>PIzq42OOifkGzYvih_9w`z&vRuySV(G`hhbH=H5ejR{=1Y#46%7Dhw&I7;lA z@yrXQ{A#y(xa)lv)}6Rh?9|7R4ZhOKR*_yIjZ2<>u^7UPTbYbHy==CjK(CNN} z!@9l)i}huoSr8(o>sGS*AH?~4yhz37F8M^?72=XAdHTDL@c;Q17>PKz(<#BRsZ_oh zzwja1Bc3qH7ZP^Q=KoZr9o>dn4$a7frS2~=H>8QehwNdiZarc7&k^Ff+pD=lMLb>e z9)j=grK5NEOwv>2fl}#3y1SQYlzMj<(*7c;mO5Yu??rrJ^AuhdsE&!B)`)Y1&WR7k zpCt3RYOz(vUbuKya?Snc$ZforO2vz`lhr}7d>_{8SmVLEd@fb}BYgbHRAjJQmfxn~ zINT}%b$DDf?q7=)a09@lflJDLvZL{D=v3%q+>_W^guWCLgEh?7sZq#R}|u zU^aMe*oF)HRLNe7H9S${Gb!n5v(2Uhq!!STi#IGrv)f#09Z8X|}H+hK3J@4$i$vT1JE5+*bLE zE)`_jQOXI0xk>LCgk1yfQ0%Hp&^RRzuH=-`z)7+2#&D&$D^{SQ%s@Ebqm_OyI7uou z$J5G#|8Xnz#HK^eWHsp++m`AJt53WZyS5+C^7~Wap6xAa3%ViZc#~mCx zD+z7f_3--R1>k#hl5F_-SlDValBNV-k#-+0iaj-Zqor>T^tkhr`ffYHzrJTf+=gVF zaZibzL)77+%|GxOb&`@&q+O81CE^&}Cb@i{7yma;3k`m}q+F@*Q+MqiOkf4wG&02x zw?wSi)P@&_YE#(08(a_)Ol}Y3Aty43BIXSgN4Y+t!C`&r+?QSaQaA_$cBjF49W_eN zG^9W0a^TBx72;VxV7zTQSFDUgYlW8hLgGK`oOXlZW1fmZj(5Zf=flC*WTfc-E?iK3 zSO(#<2E)qZVQ}+(JUJCON@w0{ynV}d4EK6Z8q2NlziCpx(e@>4&A-U!qp56!E4q#7gGbw!(KJ(Qet+ho-0Yz_-w2%~w6jZ~(3xMs)ZJUr z|LP&$KhBt9>#tML;5rDnv4*`rDbdqSQF!=M5B!nok2kZwU|8p3EDbln(S{vq@Z^q| zUy;i0<<|Us5%^Gil-Rm&J*f|P26l=SKs{&U-}CGFjl|{7jK~oGwaewHMla~th!9cd z-xJ~V0|hh;9}k{C^(hX=)9M#0+|ExGEpuDQ{;#yh)I*a$U=q#O@#m_(37qrVl7}j+ zNbQcq*LNwQM{25US-x1lI_C)7i&IuS8gmzgJBS5c`$HeEKZ5GwJ)o|#!XeD9k7Ck+ z`9kdROzKu~8IG^65+D9kz<=Q;P#%>-F9!$Fs*HN!^Pv!Un>>mh~I_08o z>HzAPkO5tvB*1G7qHlKYobl=rxDP53s-io?zuZooov(_jujWGQ-hSjaawD8jn+<*R znqlg~n_~Q#SW4?XoSQ#gr%j$K!LU>vizGi;zD{TEc zX9#s>HFDLFPPAmdAzunn!N+rSK)rXZ=vlFM)X;l<72qL?{SHmQbFUM`3qAv>Q>r&V zpYU2}`FI?x7Mvgp$NOY9Yy*$|T*74rov~Ef7w&O)B%Z;?pwqXUQ@xaUV)4I{=8^Gm z%QX>wl6yklual5+<^`N@ISFFpZgjQVgHxw==ee0L$^3U4*=MElCB-Lsw?{+C&n-iG zb{t2u%2(vuqrKw{<*&T-_By}|zi3@ni8$g(u)4 znICtrpl{n0Mw zBQ8|hCA_D@@LB5-8QXSMsJ$u%bMHt}cG5)g<31tK)`(T#dZ2Z|aM``iwz##v1Xc7@ z+2!_k>R1xZOYGC}--~*|TiW+`UX;%rTqC)=M;PXe4a2_c2vwV}A(0S%vZX+GJDu9<+3Kv$fuOS+{K-z%!)xHi$wo^b}m9 zNuYmm1KrxKAm7esq&}KD{waHhZi(@-x#nx2;-)HkC%W>Kf-14hLxx`QN?0}An>VW| zs5RXPjg|d5XQKld#027!m@eG9L=~T$?1#A)T@<$Kqu}wK2*H204ix9=$}*Z0Am-CuHcb1LUe9)c)k@t=M>GAcKh#bN&?^4&uRsr{gzV&tXK zs4DrXmA9W{4>603KYswN35}A9Hx|EK@f4NvQu#y7RjRArhXE%$f?7%rYnR*-mB%}? zUUV3V{`2W+%m_TRcO&`Qey4s{reWHoHL&d}(~&)EIO}bD$J?e(5V^gn7!>De1D9z4N@E#7R`td8&3uf-?Z+o17X2+To%@*8j2sUX_LYJgY-FMzfiTzM@;&7NFI1pkDqLPPc5-U ztf~H+uP2O!!r>XnCEvxVqn`;;V@cL%=t`fp^F?=`<@9T(2Y!5P!qrNd_-Rj*7DEUUhE2Lsa8NHpyCz)|TnjSlCI)Yg}^W_-}_whAp2HAJ=M838tihdmk<6fmbVN8>> zcb@o#gW9(WV`3JtNAD`(?t;NoJmIrgG`$BqpBTsYn*8a_E-kc$>B3Gm3-(_f!0TVB z@!a`Y+^^xSsQFd{Z9UJk&h$NKUgAnN;j`IVwU)ZO24TVNqvUznlY9O8CzwC9g}YVK zxzRI^x$umYCOZV05m2(5j@tr(2J)V@K16#Sew)V+|Rayw1(lhRws?_ zFaD2mKTYJDMOv7jcOP{11BAK%bZD=yHgDWJ1~2)xLcp~cC|aZn7+yr`t82u}6Jx|5 z1^?jI-W)PCJ0_lp^rPD+Zo_H$MP415}VsrN|OAmubwC843sB~g27jqob_1t^8az}0K3*kF5w z@Oh*L%q!|Hrp{6UYs;a~S>u=7;n#EAp_vDpKZc^wnqAx$tczpzrqLhWL~3e~vd05A za_Ql@_~W4hZq10{pT@|y4U6f``;)ZR*Pq(dO_>keC)4Ri@Y&@Snd5__Fyf^)>R($* z<>}dYqw6kSeqICXiaN04vQ6xnro{6-BE>c8eLbULVW6|%1kMJ-I%2SqnFPVC7&2Dg$*T?O^8 zdQYvEFCa*<3PzkvmOPj7FnID`u}0#z;cz1Nh|N&F;~wc>_y=>&y2`u%s)1LlKR~uk z0gX$YCFGw_<=2mcNp*RIaPFcJuSgGtuTQ-(%_vUTcv?@)Cp8XSc33>Jb~}%WS%t%H zE6Kd4)d~Ho4#?JZJ1j=M&Js;MwvybrOt#T)1~!~~CJvLBO&62y!u9Qk#X|v-vxTn0 z2c6?k?NlfH{@5Sg#tsM9@y_VA-yO#|S-`)Q7BKOp7R^!biIerGa?rD@!knsJFte#6 zua4OSo1ce};S7D0dp?Ds-?X`<*`D6sUW*ULrlN2!pYlwV#36@sNG5T~+6783_rfTM znY0gP97tq~FiSMuKMb|cN$%xWHsrK7QfTjS3y#kFMc%Xa3R|A65RY`xVB^?AG1=}W zOdPTl%7gu=SMZ8a>&9Obx2^8YGo@>HTF8f!+y2nkr2}MLs}1qBsVO?#jD<#SmbEQi z3!8uKq_MM(g6FhVFxvlTNp4OaJlBiGX>%;N_%Y8(i)a zhJ~$iaMDf@raGR5#$mVNYMl+beAmEc*B@-P;s>`(A{=*m2^u~tWw-Ftw>Uhc5xp5rY(7^kA*I}y3ReY+l8vo2r!P$7C$d6^Q z(`ZBAEF69O$EezRGaT5e%lm#r3YvOTnf~jF%h#Tg)tc&p^VNL%+ouiKp&B+X4}$Ni z$GEEd^O91XXu(#0#*cZQ|JpPu$u%Pq&*8Mm* zzeT)wE`wYRyP?czuvjSdV_#?n5(FqaPXD5dopzak@3u=+C%I&6Mrz{N7z4-eXC-Eu z(*|B(smU{py5gs8zbVppE`9vb2**}f^1~m;;E?>siFSlamGisV6!JJTv{gfzAqEM-&+g` zMt^0luA^x-*HZDDUE<3KcUqR$0%{qfVceEEg3BDqeLgi(SUb5$)X8d)4-M*pUf$ne z^Yk<-S|`wtYx8LOI0cm24&rH5Ge9Y!1N!#9#fkwp_)(VRdtR%Bxu1+Vwj><4ov%U1 zR~=}v-edW)VHwiQzzgzCB85=nWH|F5fyZKXUN7auGnWLwYxB++`fC8XyqB_f-IAc4 zKfon5e{9~b!2x|^!7wUUw96LQSSek8e%vs&oKzrr0(4=wlqs+cse?^9EE-Q&@5XF!~00ije&ab`pi4nnGP_x?iH-kOqFlr?mWz?P)N5|=DWgu zdi{Z6bL9Zk>G(saP4qd##0`0t#g~ue9IS5)7T*{_*o?8opT|R8*Uh`AIdxKbZ6@jZ!uv; z5}7pBf%WZgl>0XwvWD!%d(Y?bon8%~vb=;wZug}FWA=)&13g7vJQCI}3I#dE;M2jc z`0>k49O2oD2mD_^zj>>{@078WdGcqw;6Q#8sKz=*`zh*qHk=7rAS!Qe$0j4x$>eMR zw>%ESu>a?@Pt1oK<=t>;tP_4QY!LHmpHj*D)fc1ZED@G{ek6OfzdgOG*e-vPzFM}$ zM}yvZ2UGv)p+a#-ZQfnJTb^-n3+&vrUTks-1jpS+>G=LqxV6WFFCDzkdjAIU?jb>F zqw0rZ#2w*Ldwn*EzXeqlfwXvyhnRXInA`l_$+77g#YlVJB~RpJm)k(8ss)gpv=1DY zmJ278N5Q`L$=I{)3oe4eg@kkJR5M@~1%?kNqcLMe^NGE{=#3Q}u-PDV$w(2} zjXI2{o`&Js(tp@G(+0=gx=7o!%iu-XUSYutscsz-gF~gAldae1!xP&U(XZkEbZ>HMZsC$#iJcqN-AJ=dNIg&!^HYos&2u$xNFRSD3$c^v;s zn8_E7Zb0wfUQoGt1myHlA_Gl-QX4%?*75gH@^WOEZJ<3qJ?kXuetsd0>#>pkkv<;! z?Tn_Q zuJN2UJBibML%iH?n6PQ6CGHrRMT>GChy$HgVxjXBSo|=RZWrGm*-v9|vsP7Hx@kcR zQj~e0?SA4$XAF#2;#sEJim(^yczEh$Z2e}Z~0!oTI0=NS*>a_V$k8pvn|}ud{`|O>pJfdvj#ZqhyMIJG&~z?Mk8J zji2Dy%1RobF0lGI4MjrHFfg_(;Dc(t9jlEbzTS^>Si5Zs1kE3TuWSb@77dTZ*6<71 zX+%AIT)GUeKGgs-uu=HGA3|R(`~Z$f&-QlDMNkfb){H%TUay6BH@K3=_d#?^WgdQ7 zItIr2Bl;%}m)L;q$@a=zao^`#XFH7oBJq0I)C9u3D z6OV52m!GMRfHl8o@l_{@N#;Kgw(N7@$i@F*qe>|RzKN8)F|jOPcNM!1?9Cshcf@JS z>~Oc?DXOT*!soxwgRAW=Va?26;=`f)IBsk=uKV{6Ze^D6VSiO?BoKa3YkJ4#!#Wv@B2Rk)bZ-I6Cr*K=EV4R)9MDE~TH?n>o3WT!zhtvX~m$p?UxVK80d?gRWTKPymzFJ@G<=l)VW(DvWJ=12aBu z6SSmSvpnx8RrMQzcCWe#Q|i`|VzUc_TW`nR_W{c{?1F@iuY~U{mq7V&N7NYCi2gQG z)>>&N?p@tparan0_VVn{dj@FpJ1Z9sS*q-K_mdqzzm~y1KjUa-S1IF*!?<$KRor}L zH)cUG1@}HmH&XuJ6Q~kfCTxbCg&TyQgBQYz1{um+ev#F6Gd{589<4DEsQslCFhS}$ z-rf96u$Fd%zIJlr_6yWt{iA%27R&K-OgL1J-pSLA{sWKxy}4+K)c@#W45p54!gv>B zjw|j%Lm#&a*++kq?JZks^a!Ndzyt7OMmA43{RG?Rl+*CfDER)(l;tZOlKM7koLzpF zRM#E?waI|uA0?bz`i-0emBD9E5p~e|L;I{$aq*K}wsLOaZ4W-vhz}vKtw|Rajn6~N z^In`V>9TYW?L+&2#jw-#Xvx#<2}%aBbR;#DQ?k^r8}rzB{1#+-ET*Z#Ud=x+N4= zt;O)8me?}Ll+V_R(CI=JB`rv%-!&QHR(8h^&3zdPJz)G3$&=A{9u9uA6>=_j;HqhM zV$%a$hpA!m0=8uQu=QX(Q+i3jh8-$+SN8y$O zdtu0#nW*#10R`LJusg2|7eyvh{mm4bWOPPs6}`n(BedzK#EILWaUGg(9i_Hkd&HVX zGp<%Q6JnnCp!uWRap*`7*oKz0 z6w;#Cc4%vshqT^Mu)Fz4!6~yBaZ&wW zNR3!bz4{*&&l`^uQtza}z%$b@zG$R4=GsEkI`E2=HqFH}>uoS)(8^@=zU;wLk>-U$I|d`9dKa!9?0rY z1idSD@aNg(5I4k?9#630whxnd3VI4hB-YvKD{4F^uQL|fNHt`E9u~}856|Y>u?y=s z+BkgW?wuu;#~^!NyRSlU9XL_kZ@&#*AF3;n7`4=GO_?m#{tP{NtAe{!GU>s^0iYo} zPiy`jf~it2amlb`T-o9Qi!W>Nv1Da-Tposh#`Q3uo)wCRq_c%6dX#!Tbq`N6zW`yTb3lEf(vHCW+vUhWr_&ikcof136b zywWp5nw#Zu+p|zQXsUzftQ{fb##l_fw_28T$PMSe{6GitK8h2zTSAHMY)smAnM005 zN*Nwknj>>@ob+WTjBU%5GG9A{lDpOLxyG5B8_L1Tz5}>j_9MHWWwJZ%N>KQ*4qvS8 z$c0^>(rWiMNP0AYt|c9%dbcKQkN2s6*FzkU{0gewK5&tA7VUR#2svlWz}gStk`FzU zV^5mEsV|3xv-9(X5SLuqm8_3~(G`ACISbsIV)~>%+UjFcthnodE_D^+WeYFV8%{IhMFMpyWe{ww5e46pFzGCbiOE!_R_Tks| zLX)|I_C!YDhezu$V&OabJa-N`_jbn1s(Yd9eREhCUVnbTwOwLPl0BD{EXDk3dWt%~ zn=<{QJz%+Kd;T7$1=+oi!=o|(;RwCMV&~y~VS9c%g~gOSoFp0u8`t=e%ZSd9{i!Qw zb|mudKNu~NM_}dlGB%A_3ELl=a!&ivH2Jv>T$cKjW1Q5m<$WewUDALl8LQdXfFOS}UVYSv9aw788?>ljqbx~NQXa|oA#YT>wge#36q>4$I*DI1>f#6oG*3n z&00?rsDD7Akhi!E7S73s@w0RAt>t2L=&_!^j*aJImOp4>=R<7&_Bm!~ISBWqorboF zTZAq5CSY-I6*%-Pnui>DjRD`f@x-Pud{4%#K5>xPYLdofomw&U(H)^_);5|{L~Nus z3;+Jqhd*8_cquo4W3AI9uU;sBf3Xt7Y<6Q+?g&teXprWRhj8eOcrwdWM;)&KQRz!k z$(-GhRJy@kSQKy`>!TLoh`UPoV`@GOZgauAju-G>jwfw1t)}l0d*Gl{ALeL111%d@ zkodsDlWUT5f{#K#D3D^U0hG3t$!+EOIKa<=tK7Y@@PjHgYj;Jvy;phB*flJkn$LSo zbD+z+IWV$N1qTOIP>;$s`WtjYcGzXsNcX^E76uRGdmR>n|3yD!zdmFf5KWzYo2kge z3I_dEf@f0Q>F#-j=1tsy7ozsSyx}G^XZ=rkf|Db;IcngZ$uhB*7ejwfBYBUNn_-k` z3|j9@#>rA0e6TJYSBxLbZ*`_o2WhXRZ|gDYJ9#$@)KCXgr@g|qv$w=xmu_HUzl2jC zc44YpU5{FDqpxOJ& z@XFi>`Rn?Z|L0R+{DLBC`uCVhr5a#@QU>h|knVHcEGWjm3x?}zVcOP6K6lBQT}_iX zQ6*aZkk^&()mftBwP^W*f=a6D(^u?LcvJGY?1B?rHSt-dJ3IX6Eb+C2@!h9CG_GQ&KP}Iq#=_lC)}j5j!97evm5K~uYtJ*lVJ4gI}~^RIUHP)B3tCx z2d1QK<(VNH@L^*$jO+d8qPgoEp>A=L{QX&`>@5Sh-}5+F+TrQQzM6J$aZV-b#wSZG z<)Lt_?hI|*(@6V8Y3v`<0d(vxLHx>O+TDBqgB09>2LKdZu)jzIJ!+)Zn%3Ttkbt6la9~8;p%jlcA`B` zi1VQ?-@lTxeG{zydleSksDX+1w76;2Nh<5nS6HRi5kfpC$6k_~1a_a(|mYPadhoUHb>L&6#DY@{5 zPJlsL9yH(T6wTb4O*ihjpi1LK{_^f3#d~J6lVxXoy|e)OnBEqioq12D0i9&UUZ=>D z>|tkx0WDr9aS$vS7Ve7`mD^1Rlhco2mWeL-cZ!0w1E%7-)?ch5vyfOZqv+Jxt~e*8 zH|{xQOhvW@kYjs;hB`XXma|{s@6OYrp<61+XQ_!-x}OA}ywBXb(_c7NS|s*HNAb!Y zEl$q91~(7pQFvwwndo1m4<1GkWK_!clxFiZqZ{b2YoTa)kcGF}^@3#^r9Jb#H-u+j zrYXiWl%d2wzz(gkG(4^!j%{cZSDpJ0QaZk)n}sz{65@t?Yb6%TfS#OxxSQyekW3nB zox~u+KY+FFFyoA&n6p9IaY)bwu$n%OZzpQdjiGnoU{`Z=`XvXKx)s8Ki3OmneH;xR zo!Q2n$s5xwrkg5|9&%YU;G#dz-0I(dD4t- z%CPR2l>OONMkn=S!EM_g@qV*Egr3wDQ=9rI(siGKdFwey8I*|42a;jSkegC{u@Cfq z9|fz%d2m9m!7{IOFq~k9y|*dzwFYnSFU%;r|H43=-Q4?de zLh(!MU7UXEDz)2rfYwU0uGLpV$o_kK{#N{nYJ4Z-kg2g;xicJ;a?;6pRSOIlI9gU- z^%;gfxWw+S6X0>Pv3S}3Fm4PVjGsHE&>i>5@>@N^@!5nt7@4sMFEozh{6b|Z`}K#O zO&W}&b0Lw{U{k;>&K|df9WK71UB|lM@1HKxuI4EY zJ-JdykKMrI_PVo6#~gHBuIzXq?S#1XzcFMX`ISO?w#TSEFJ62v2n_P(VyDV-xy{+z z_}IXb7roF>dpD;Mo)t(QZTjgjyJ6}+nzn;8&D#&fFFNbwF4buM^MUQBq zrtApJyODu!zQ==~QYc$l4dLr*(pkpS23K0|VGY9vxVwEjoa1zr{Z;jFWLpZ&(-_H0 z1N3Q%O#*w}{eV|zzXR9xj(n^wf_FBzbNnJ-?7D_N_Xv@6DMR>NoPKQkl%_ktNIO zZ$ievYFe{tKdv?nhv~+7QmkY;Hmf{>hkMl(OY5aPgKscxF~5&L_P-UnKiffR)gN)K z?>Sob$^(+;If}_+&I#8~1kelBgIuAWE54P7QAAoh!BaDpM$3l47lYpXbFHPLa$q-V z_f)ERRK`P(e{xxVpaFd{+sI?LcH|fFYL3p&MUGsunevai&owB+s%X;!N* zc@Po%6nArs&7Hu{KCI%>HT$T>QQOgP#9@h(8c#!Abor)+Cd^|Sy8k>y81&@={!}dF z$xA!a!Pouyb-g)G&HW{u`L-Ce$6m(E569u9Z5!xf;BfNovIX^SOZ}&Xw;-nDd|6n( zbWSsIXZ5Rr^lC~W9n`3z&I61v*m4t^E&fc4bZj~KXu3SbZ8(ke{UW}o9E^51T{viQ z15`u^To`Cg&Cv__yY*S|VN8vDz>BwZW8G^?AAJ~>jWNZT&)rbfRY8k(UZT*O`#8{H zJWtd3k1Fa9@m2SBil%aN>E1mFD;s6ss}l4dJxrRBAowI&X%_W z#D=ZTyfShfs!dMi>fhbivKO#!KhT%HHWwfN~KKyEF$DO}uN&HV6Jmb=l zJ$gy*hSHC)V^pqiAhlXJBe8YgPSxi`*G|~!dYa_?*o=zPSIMXRuKe}2!DtyNc}A12 zgU=srdg;EO&}%Q~8uzE0FFJv~>sW?~UKsYlkbUb7VdE)H*zd2#9x-FE{LW^|$o<5F z4!N>Ir5qRfrBLfcb>4sH06d7E%;ncR(e)SG5C+zQ!69d8SsX<(qDDYrgp`f^@0*w! zvkQHG%@GcoCkd@LdeFTZtrC^Abiu+fh8vg4Wj3n%U}vSynNrTwFwjCAczg#;O;x6f z26t>bkk0*@D@L`}-jO+PDi+OK=g`@LdvLlzVwkvxieGM~h?Nl~@?}RKQ9#yM9OUHz zXXk0+`mRVxoh6^v(Ox)K{~_IZwSem{J3`o!Jo!0~<>H8JHSV&jmamrI7wQfKqVA(y zDE=`ScOE}URAbGz=2gL@>+itjavEyXPUhp5{is6XF68H5koNn#@#4#eDSX2NkX5;( z`Qy#pCiNPXv-W|CP9(lbaiY%>!%<`D5mEPXJ=tvOEsCLPdH#^B)V*mgd3>|!mnFR;<4QYnE0cR>t=Ko z#wQ0!b!QB&@rj24&!%ynmN6-f6JgTwzFc(TE)92yg6!_WV$cX@2tTnJGk?v3d5^om zU+Z*1F@Go~&p1SbjQ+rFiSID(%4EDP^OBrLhcR1y7;8?N&F^m!$}~NNKbJf5kPUx8 z2Oq$vnNuJ^`yS46Gv`NXyNZjKtsvjg(iuFm1CMu(p}husOL98~((#D*FuM7Jl;@RZ z?URmB)(@$ctdXu~$wQtXF}5h{*G zT+XFhNo?F}jQe62;{Sz~UNFOI^4q;kxHbd~=r-Vlu#&FkJ)48r!6&Xq;73gwO3Ho20Eelx} z2hC^R2-zR;pj@i5Ney!O;{>*InyXGm3dT^dI7n!2- zh0Zv-afYDt-BeVJ-N)@td-9i*5WZWZ14~?E*bh@=BYW%1KirxLD_V-g2bqb~$4eh~ zX6?i|3*XY2O_%6LW*Tg*+rSU~G+@5`8fZ0M;}(lzL2dGRVZo!Nc*W`qf4&+bj@asi zox(Sf?zmOBaug2Tyh zEKEEFXt*1dMODSdHKFw5U?r@Jjp8-GOX&2^Z*jPN;}NYIq1|$PS)+Am>_Wyl`697^Y21l{Y>N?{6lE|G+GFc9l_tnd;@N0UZVN0 zo|Y+xGpP^aSB9U^roSd8)}G^=KU&1&9rm$VOQZ0*eghWV@s#x$d4b=r?~7*9W;pxA zK-B$jGiHA2f~)Q(3O+$$)a~DF@H;pZ^X9(;r8QZCQlq-$8N38-PrI^_pOneo;K2KB zhM?u&eta)Zs@Igeu+Wgm-A5HeNPDIs4DKAXzQL|j{zPEMLZB!7N{7kAmrmmZIxALFO+^#T9m=)B`<{NF!L+bR`B zX`#qSMn;|cx{i!wlx!)=%HB#=rL>1Mv`|8!L`Zb*>pFxeTVy03E0OG!GJf~>_wRW; z`sUb|Lf^EAc{NvHT|o$JdUFgm&3-1Ew`2>9%6=d$oh@}Qg1bxnfs^R!IuAG8_ma6- zoD^~;4X&5^U0As`jGqimkoM@xpwEKIxH?pisuw0g_wq$}d!h==yksT2Jts*N_Z#t+ z_tVk)&k3-;Xh25~c(c~}jzavjDg0-w9IMt?V^pLSuc|qW`Jwkgy|y<#sCQ(ei=~2! zv;({HeJXZp_m@&StAhEqG~un#ml}F1=)sJ1p|om&Fr?2&@nZQB+8XqOmP#z_MFkdE zcqkR7#L4l~h+WX_-?fqp!R_JQqg1q${Ifo*hhc5)JB4O}5`%Ff-b!yTaoKu<_P@^L zopuYxe(VJ4>XSKXRSG;ga)=7_&%xT76bN|s6d$^ch3-GzQj}dWe3;T5{5;L@`I%Vg zUFqRaZy!k^W*dbp!?P0Ww?X_`RUtGdpXOxqwJ_s$cj~VmOg4YoVQarUMSAmP+A=r{ z=ay^YP{#+dA$tvZee@HyU-u2h|6D^SZbg&tlNa#Sxk-2&lS(r?N7D)4!6R2}+bc}J zoGaU&Gnri4NL?Ds-BKR1PzY?EBf8rjrn`ej;n%$qBk+1MjrLhX&5Aohe!~`aJ*FY~ z`9FnSLu^FFg}w+IPII5JhRO~;HoSj=D!P0Qy=eEkNs)2RpWC&6B&5{lb5Up{W?L^; ze7?T~r(T*$eZ@11-lPuRS0>3y$M_0=LsEpNwl!4Is4CuDvsBPu|BTv|pHtYJbcEXr z^XY4F9Nk*>jaE(S!4b`~A?N5LIHg#}Ro^D_kFX~4N;8AY=1FwN+LsO%y~TxH^2I=7 zi5>Z>6VJ_@B)uOa*=lbNR5{K=rS(xb-nIp=b*;gQ(515Z&a-LFTVviT>xC<$?@+bZ z46d8k3(N1nftx=2;o1Xp zEa|C5u;`T&I`vPc>(YCE`N4+@m8$~o{!~gMrCr3=2PO#~`%B5 zscW<_K0&Ct&{GuWIMZWmZ**;$D|Ok^A-G2_H7y)2*1nII^)GM5>jU0$Z!ZF9e6nePG6i{t$G0Iz8Sv7JSCn(3#ymxX;}}!FbXl z9+=uu{^NloSQR~lYbHhb!9ESAw6?)t4%MJ`F#_kOhvE)6jApx?_)~r?fk_iA8&F22 zhknaG-sy(k&0X=wAWxj_xR11-Jg1^<`IzEdP=d$3VRV!)#9p@*J>#qe<&Zf1*m9NP zkHnDwG+)%IIKbYYZi{Hm>*uqPMPUx4>fq33EEb1&}OaC-w|!{6ZJ^TOYzsT4oa1V*W&A5Dl@IsZ&cDOXv!X^7RLu9&uhXFn zy(gD=o-zeHAA}a$B)XVlflcp%7>;k_xmwFPE7pO^RgY1$c^BOIRG@X`O%Bh$GmY!Zy9_i<_%pd zO%jtEx^cAbJ838F50C#i01vy5hKzW7U?~%@=ev{}n45$S`=yBlM+wFo z6brL_1}m>=+l!{2+2T?`0mqgGft&R_QI`8vaCM7-gx%JHz1}^=FPS%+TB@Si=XV^i zZ6JP|;EyhAD(Ead5jPF8;g`1uOPe53Xxc2XSH?c07yAd|ibdVHOK1^&a*@Mi`>Pz7 zH=nOgm_vVN7U8Q8yXc{XFW<1ah1zQyabWLEsZY0%y1nRzcGCaRj+a~Ti~B0fHod{C zG!99O3l-&L`-8aW?rDm+=*b#>yEtQnB^7D}!tx$Oa}EZvP_P)S%3`^pyEksWeSmLE zTY^hw_u-Kzt+0o&IdxT4Qx--OAGv7G`t}##Sf;n=oT?7P8^^NVoR?%YVHjRKvYb2J zbbyL~2@5WRQ6XuTkaKHJn zXtFFCs>axT#^r$XAGd9Q>j z4cPyWfU8G3;L*(kz&pwXwndx}x(CZ?s+9rFjO~uKcY3kakIAe*y9)aB%7(o*XCQFn zMoI}f3$M!CW17h-4BM-N@u_A|c&;;!>D4IQzw}bfYq>&ql%}|3ptjQYP+R#V&3%Hi z_BdL$bTDRZI16JkAJC6?F*N+{NBAF~K`&pEPFEFDOuvB?bjbo8@(ZD5S`RR7SkGUz z$Mf_X4RH7EOLY8}4qN^!1x4XV?m00ER1fdsr{^{4%vGs3-)Jgl2B!i~Fw#_a`L7$MC5T-0d$nUWmh4HvQ$5W|{P`aj#&R zw8$a;cfQz9;uIxM+e-@?2XMYpl?K^mi0Oy5(amoH2ai6ErBAA0$?=z4rs%NXHU#BuqQR$S9PjxWCVo9m<3|m{{%8EjFxrXTTKoS$ z|FOdVGZ)S+;vF`_@a3dx(6RF7)HX4ST!%=$zwPmRDz@uw3tQ&|^JJ@OB@<_=@R6lmAOIFoQsPg} zDLMl)W_wfOwsT@|p%Gqv*jBlsn>$7~g~73GLq30MKNV)Yr%R2|lD@nR&AU|4f%PvG zucWT-hfzhY!#?VKlTFN!si6H8i?f()1UUi1*re zLIaDV;u7cEFmKaddKoki<~G>zI>jFRG4ZQdcI_Ah?Kn?E|M`HvjiKuVT)X}Mw)ZTdTb%fq@#Ox9I!LDik?({>A4Cl+E|(hC?H)&ZOTXz=6b$>jbv zoySynqc-`eSfyD*s;ogFZGm0dBsq9LJqga!%!IQ8u7jlmNPcvyEH?cK_gn6RCx1(O zS(ZM^0pGju(q$_6quySa>e`)sn{Nt9;jgIAg6Ab0>+@my@5iF&jx3mdyoF{hP=j~3 zRM_Zh8*G3C?)r2Eezg1!dGj4n{j3W1+PRRDMo+__OIe5+nIKPzqMnD{#nbEl%Dn4R z#Zf8^)KA`zlCw6EU)mwsOb=k^u_jQB0(Mi*eujPafgg230!pJ5Vu}5$2O)?FWWOk_!9OF)^?iD2g8Oy z=>iX2zj`;@J1@i=Qxot=kiP80w@TcU!SG<41GX+SVuiFpb})D%RexRrYp$B}vDiMC z_fy*L_!J7$+qGl+t}4`Nnr!Vav6rs(U%L0wV4yXiVp|RM}z0VXteT%5d8fg z8OQ)^e0SkOi{Z5T;%ao>+#X*}Hs!`G7huN76IAg}Vxc-egsO3W@l2KtqE#CV^zM(5 zPYdWx?N7mXi>mUD@-W|iuEx5TGr-CEufwP{Yk5-ddSTM}U3m9JCf<~ChuYnqqm$tl ze7nyQ*N+@5e{wz>PWCj!#iyl?^6n~X{wo8Ym@Sl3oFZPX%jV#^NFjXm6go0I2`Af* z;fXu1<0G$&)MnUWNY8B}3s-X&`fJP1*Wx&|4T_vq zi~d2R>$mq9+rUy3lit0?4}#lgbk z^)S5b1drQkqr7jK$2vYv^zi9Px|Y`gsw3`Uz}`KeI>8*09B#74-GTBMR-^G(brfVr zreOOuo-JUN;b&IR4VqYw} z{XK`BKaa-unMxR1_Y%Kkm@6k;_{^z~9}5zz0ykZ`MyuNYq`MB*ic*Q2Saa1(+5NaJ zKTjEjI|}Afmn)-5Ykz+%=qIvb|55ni_?(1?4%l|q|FVIJg7QEN{kZJ_o@bMwbh8sC zVqdd9eEet(S56(uA7(G$hU*fz=wPmR zzw05|aij+o8ktKB)QhCEtsMikp&;j6s{1W5^gq|q`(dFZU-p3}ZK>j%WB!mGnkMUi zsSOEkmNaCZtMJ3dmNpE|7O!3Xq{x?LQ{NyhdUp0a^%&U?+TXNf^$APh&s+<3vY0|= z6{k^*7kT)Pf7E?h2JP8qk4JyjVb^qN&(&Zx&oF&YqpdZC_g6G|by2+LeHH`YM?G1*qR#>0K z;;rg;G{PZFaZ_UegsqXzi=Wry$HNza=06pmOt`{v1+#eT=(&(soK9H?F(u+vqtDo zDRA~-d*Q-`gt#HtE-JHnUqrY**0KCgvClclb|(F2+utxoOc_Jcl~ z5MJ&Gr+Hen#A2R>g%vL;CnSS!uZl(6!=reL!5Gna zauYTC4W|05A++b?K%QbzPL*#}giyD9a$9~)7E_Zb*j|r>sKJLgFlr-`rl!(dt1GMS zHo@;*C2i(nFR0!7oicPAdCj6OoR+eUZ8wg#zqfumPMji4Tc9Lf%RxR);~W3;^!u^xzQQffuO@sap0xmh<_Yy^m7q! zPI-zt9WDs(As$vtI)@eA2Xc^&LWmtu1*`U0l55*%V(bt{tUR&=O|;`sJhmI(w;3;b zsZE8eAD_{dx=~c58A?Nvm*T*oW0Z3S1#LARm_!bZ_oR`#c_#?!QVN zaZ}Fj^+z~hY6syQuBHoa70|l-y0l+&iRQknflUrZut9l@Y%OXj{cwPgF&$}*o-T!F z4`9#6!K@-N&!lMp8NX+lD=!4dyk~LGV@bF~3Djle`Qi`D;8-sP$+kdHQllZdi=7hUS6F z1wEKHr!#hWwH1pNFT*QdA81{U7B;?HLkD+<@xKxCA>idj@zil=Iz7`#%IKb=Wh;i# zeeEo2KO;&U;Lu*I^R^}3+9zV%0mAha2I7C)5;4;&fgRuP!Q+#ri8HoLz|I@|IqQWA zk1y=TXS}7px|)=$Z21qQZDJhqUSEE-axedH*D|kL8m7lIu*#WTyeeWp?(}Wu&C5$r zI^XcX;pN=?aS*QH6#NrrBENl1kGrf7#@-K3p`5)VZd z(ATzKP^4CI0Vi@ncP|N6a|DxIL=yvW|_efZ3ZI0Sy z56OE$BqgrtLk~~N@Xmyua-Y_27%-@Wiw1ddK}M2z_3~3Rlg@rgFQdS{eLMur4x;}S z_Ct%3{doOMAh%BX#Q{|V`E`3qw>3`&mshrU*{o3va6OL8cMs<4-|F~j?lCkfO#-JA z?sTVJDB2{<;hSdNA^p);?^B>XP8>aX($Phx2KLwD(vuMF?{r3OY z0Agu3s8HCFVyKZ3m;Y55a}(W%*Mm;P3qsD-~AtT zG|-2TT?RZY_b3KePUE?C1M#iSAf&qXG)}D@XJ=k1`BCoyE4{YRqHnuluh(c!i7?>c z%()bE<08H?dsnYe zHi=)1NR~2RYvfr*M__)x7r1}z8@T=5pXGD(Fj`SB4x8TMFtuhd6~SQIc&7_jzjOxQ z^Zlg1(-TFzf&BP;E0j-~ijQlYL_>#Y@%u(&Zn`jqBcAjkt$)kOb#OQuuYE#W-)!K# zht~LDmLY$bw2og}NsL1EOukXO0GiGo!~?_U3nPF1hQ+zTp+T5Y#l|VtpEnn75NjbW(z z=nxw>pA=?|E#)Y=iQKQSfco?@q7G|;w@N+ja0`O6vq}6XwoDv5NtM<&XOOeUB3OB< zOc?sRn0^Hs(I>UBoWIf&$DMAEZ#S>PzTJMnxN!oUDNW?OvkDtcMzlwWZU1|OPU6g+OH4JZlOf|pUvAuIgzWS~|-*z(Q zM|F85k1PRQe|7Y}bPHuWz3@r@uk35^6mr(>XRlM=APJGKG3pQ0^3KC~G{49nR_F_?4lGN5#4)56c1Y2ATc*^@qF{Ew>{yF7=`@VdD z-$T`LOw&Y}`N$&}y(N!R9@tXMjudK$T#t2zn%t)SG1SIbY_2!uPKwUTU|s3GYSbM& z1uwxN2i4Ga;T=UUt5MwZt|#xU`NjRGEQ8%KcGypM6wU45z^>BPsc=t^o1q4bbNcWp zFGKF2-$l71CKkOuG|-U4J@EW_;zS`1+m4oW(XIpNHLb_EuS0mv&qP`{`wInL45P+Q znd0uK860Au5a&C1}!SUz6``Ta185YsjqR0Gc z=so;)QOSq8Zbq%>!E&#OflBjX8?nuD6JfvlX1?Rv5mT?fryq-7%Q7kpdGwriiFAhiHJ;}Gz6>caV&F~@T z(lfZf_iB9k_X`E5S<>4db4h1WPmJ4ti>7z*!Fi4LtdV1Zd)rDGFSv@o4aVZ_3_q+4 z?#OmG`g8X0-ViDJVZHSQ+|m62#%+zGm4{9$dLCO#t`9bp%$%9U3tZ0g*a73%Mw!FA zXSKlRa3uQ^;dmuf13i2Oh(;dkC~22BFLTc2M@>%L^-*6AKcFt)Mh)Tm`bgaUAsGG+ zUd{f?3VCMNIK2PwE;bo@;n{=()PHRY_}=i~|F#D6vZ=Dp{{#g(#7#Jxi~M4U)5UTTzBG-PP%f_#sJ0@VN`bW0!?&5 zn4f5%w7eNAu3}<)|J~TF=NcOKHv?3b=0iZja;)f916nR?cyO0eJmNS2N45#1byND_ zq@$kfU!l)`6SiW%ihq1wZ8bt=Pq;ibgtym>rQ2Nw^3*G6p@u>bQ?3_p56 zFw8DO&2Cp@caC)9qTWKFz9uM=BzSDS#~ZA zU$+aJH%Q#8T}gE9?mAIukoLaj+$B2|eG1L}Cw#v1TlA?4p;K;0FlbMuxZ1N`w)HU6 z#66YNvRI_Dcze;Q^FKJ;&$5p<{w$&jK^?mg}1mV-5MJ{UW2c}v&HSxLr}ikR_^{U7rW{|6Mn`S zqip4VVfUV~@IcDNy!rG&{2uJh+t+Q!>;Yd<%ecHIhS&XltBuTe8@5}r?sd1uv>>pPg zjeW3~!*b41!TVBJyXS(eY^*hXj(?&Uf8{icm-4|Kx6X%zga#P8sUPf_)2gUxJi$}! z-b08_EIql~o6Aln3ezS(f|}yt6q%e33*xOYx&I-y(8z`k^^1hS=zTE%-Z7zC?<2LW zI!`aHvY=}AY;y4!Ocu6};c%@J#T6eVhvX{gI42DHtm;7j<)7uaeI zfyTxU)TT6-2J~9b?jfr=?a+Q$^1M-;ve8JGyXvl}{$3j`kG~hD$LWe!|2bgv(^WXY z;D*$%{zsP<{1Sy2OGvxdnYsTB3Ln&->-P^Nr;ic*OY4@<;l*RPYxM{6eJ_g}Z|}ye zls4QLoj{8}I!U_|tKq-&N3_hc1OK)2V+R*?e*U;wke&V^{*}BvwNp9p_rnnSwUWf) zoZCfnY+1@=eP_mjvFZSZ9GWBzPIE1 zJIY{MQZVUTkfFB|IVe!0Vnw$O@aI;cKyl;-RktKrd4IN2axiP~5f%-sEle!dEXF0~^lZopx&Fi?{t*En+T z-x;B* z*Kewy@|=RRCCztP7GG66PHX1@&z#vFhJ+oVTT_oIF3okO!WVh`;8Bd|xZVhRZHlML zVH1G;9)M0vF!qgz!nmEgFefq$#h<&_^iMEtaIa!Zbt72YGhAXG&Bssus<4m#adh53 zKp3Lxj`({B_bwdGR(2=A&(4%%!fU}ZIv#z$?BayDKt5J2V~xeCl+J3>GjbEgCEOQA zY?_Sif+Y=9r5Z*&jDwJs@5HEh1HA8a7{XF^mz3Q*LB1uYph6SKfL?6lVZT`L^}HTB9ACv zihrvj=#FC=XO>DF%5@hwwj@eSFB_vg)@_OSqj4PWnzRqs=-lO}o$JNJg9dSH_+@w) zw+v<^T;n&{BYEeRzP!BLnG;f_U1RM?RDJqVSUJBtjvjObo^>)NP*6ZyG1z%5FcwqaucLcB)Dln0mT0FF?xp%*I=DmS9$3 zld#!Wm#x%JL7+vF?29GQ_z|zE#5{oqJ*=nLxHz18#er+>I?@=u_Cn?|D>`M72Ti}$ zij6n*@%x-Ta9+xV9JP-{t(kuv#AWYk`13}*-Ns)zVCD?k5#vsaeWYzI(d=URVgun| zup3u}=*f50Eid^K;?M1+?U{(a6KKhUt71dtY4|U-EC1DBNp1i7(~|!C$fULg4F2@O zOA`#_uUd^U_wgNEjQZ?gP^hSWppCm%E{D&tI#^o12tra#`LNXc@XQAf_L>N*X9jt35b zz1cl@Slc2VInoU-%#@3sS29s+t(tPdggksaIRY-enJDgbpGV_Qgkt-|wLG`lxFq#& z2`n9Tj_$N(La*F5Fzvpc{Nj!FXw_#8RvAB$JWRwXE9c7g+n8f&kT1O%rN&l?(k7tG zEyXgSt~I-nXJ+F7soV4uQMp6udfD`UuY=`d6sy@2!I z9_BZb4@2c4P1FqSDn>PT=DmJ_)aW10vv0+dSYVz0AtO{d>IOQpG3OoA?v99jOv4w9@JM;zRV{QVh)1nSnPuu7RJA=gS@% z^c8hehtX-T_2TtY)|}V38H(bJ&`vg+cBbplo$=cIPhEx0w@rZ9rkAAhxn68zxLYy9 z;RYBxs?c-K??OZm6&k!`Ae9_7#VcQSNdEE{xSygA2`&=na9X~&z1|(n=M{0c3}d`# z)`n;Fv&1~@LE_0Xolv|U%MZ5#rG_NKqm-qvTG|IR{_0C@);_@Uq()LWSmLPh<3Y1+ zFEVzLvZdA<-0e{fnQwbVKR+bO{5?{IWr_cUoU?(npwb@iO1-xsrv0%9PbyX}*2jKz z^%U@HA&#jFL%loC1*2L`xUcRAo$Nbd?Un8rsx9NgAJ5{)5)(cZzKFMWTSZsl3`i#| zY;ee;=clyf8*K?!-Dt$+^{L`7YaNV;vWC>gZ1JC+1ssSy0OFo@c;0Opdd;wu^;GI$ z)9Cgb@aGCIZ;6EnlOS{(cbn7|J;+XWg>s&Sz~5V=&|*Xm{gie&tuAMR&x~2nGO>UP zHmFj}n^G#Q^n&$m&%k?YeR0cubuPUC5cAsvBX@+s_FQ9{b?A<`c3f|qvq)m1-dK*Y z7j5OI8&081ff-bMcg00@xtw)%Iv#JEEzET=QoefXgUi1(L)X|4-1B28C$y;&d)ml_ zeVsBm^`wcI=4+0o9pBSwZyT}diqwB=E*0w>yYkjL4~NS&*T{NEKlwuc@eINOJX#P1 zek&s{L{5z7bqP;IpQV00{Mc17GRFr4`iJqiAK}owo-yEHJRk6Wg|eVCuw#Fq=;We? zAGV~iwaPsCxyR#_+x1h)BUa!cW6}vSMuJ7f8yp{$$^UvyQhE;d$6@Y$l*gk?>0!n} z`T*C3g#$(EKe7eh=D6Uas>$?e;s@$KLLIMMm^JvsGG!uYx?m3 zPW7}W?>~3xWYu0U*jx`2&96~-X%c3Q8L6DU?+-+-bY+9V`vv9eV>r0RAN7iN!#bOx zVvTOJP&Ypt|2t4bar5)}P~QTyTDX`CeWG!V)*aY1v^)0qNfXvyWPUC&07l;Whf&89 z`10rX5)V=G#vgmIs_PYW4vFT4`3~s#<}-rPPHd^b1H0w*1LvrnD@{dna%wFZr z=bbAdRjVD$bNh}q!@l4%gN2w;qNDt<;WY=v?!^NmUrE*8eAal`j-!GyC?mrgi(XH_ zH~9x~6|5n{`f|mzdmrHa#HIKmF^$(YOyu#udhrVRJn6Zg3Wc&g5F{}*CJjqL3ok=V z_8h@~I{R?Vw-EN)q$ywRCgnBrGI>=8f(!b4**b)ab|6xBEhNX$RC%@kCjg z2Iq8Kj-dgIWsNcMs8eo;7pzs;*Yr48%%8;(E~lXC{cVsfh{AZsA2jJ;E_C_&lU`L9 zq1zuld2cD({J^2TvigMuxjMhWnA}Pm^xtpnqqjd)rALsHp72jZaCMg4pwXo zfZY?XQRs68f4mhY^$G(qjOMUe;ccAz>pq`;^_fZwuF;gDp6paN9!Dk&S6#(V_^-EJb4fB(_-yocY`|bz0@T z6ueslxicoC>X8Iq+HwWv8`<&A8*==5`5oSxt%I81Hc6cN-coKf9&Kms=7Gz4uzZi7 zC>AHc$rsr?>BU3#$;#sCDz&6*{7;}pD;g6x74QDb^QFuS7w*E+9muyoJYlV^HmIsr5B%{P+)IxZ-lW^ocWZrikGmj_x*<^4X&Yfp zb^~nkxJ~PHltRCW+eOzq*o?@{WQwHO6;V5gAJ55;ahk`ueO+7G9B;# zw52iCCY%{(#(4vmQi;wUkj3w%Wf$l1>#;3WuW}f`>yJ{F~DS)VlRVJ(FTu<896NWVfhjVkCU2@}&8|2QL z{Je0AO*bANHkUsa7NX(7e=@()KKO9?Xe!?z?abZ|gGbc^`QOJxXi7gW?U*Er-cM?U zlBOWMP;!a7EGQPXR;tR~*0}KCr1$JGlIddyRpselNpN*qs^HN(36?Ba2IHKHq`plR z#dlZ$2M(U5YB}(u7#Gr;tW@~zjHL;tQD8Udm#}Z@6e^ieDl|OlB7BH?Lq2c)N?sp1 zASUSigGoJoN$*P<>)qN%&IPt`)^9!MFHQ%G8Lgs4K_q;tRLAkYC&^Sb0{mX&!<*qR zsdbw|VtUWx)a%t$Xuc0NI>mDR`XRXX%x4G~xnCIm>$JrEJw^^CAU3w6?Xe+ zvgOAaFex}1KE@Tm#_ZvAw!azw{$s?IW#y!2U@5FB6JX-R*?eu+HujnS5MyI^fXN_l zys&%%tCtRD_e^tY7;eo?C1WUE;zV}*k}KptF`}Tf!=UxH(c$It{gALK5t3Km1(k83 z^tkZ_EbK5)`7h-c9(C%%EtSJCDXSHi=<8`$8X8dndaXloyMq}Xau0!M?O^>Bz{^zd3Nyz{bC{K)-CjiDh_8@7yh$qvG@%puf%oCjByIH2>K2juD-gFgqJ zqyYtsXuGh1&CH*ZkDEPDTht3H)7#PO;w#*DLCGM*!G$T#gzr=RC~ zW6F4UcxD@c{qKg0+sBx~&Qpbu<=up%B-X{-lt8wx{eXGrzr!z`PMmfx48Q30#5Yru zY0u=&h?8^iW7p5{_t6TBd~X4%Jp|Ai?g3u*pK$Pu^?af&L5G!9w6QT4uDPY~wMz*! zZCI($qjE0pd)h{MFfx+|uc?O4ON`{bI**ngG~LDvdN1V<1}+$**#KAK>)3ui|aV`*NNv zJ=4oO;W(ud#`=DfyrT$?_qP)wZ1&=mU&EA3if6Ivw?Aa{B8)UTyTJ4ueO&$IINkNO z<&WZWT=Qf!f45vlo97hbmYH+#Z%Yz*ZUjE^{IfX7YbEyowSr;|kVB5Bqgk4?$(QFL zyx4sd_jwqIm&_gu2WKC`dp_wfAvc=l{JRQfDSyHGaWHA9C6mg5>2lMQqiB6|8-EEO zEqQn$IBReEm61^txX?O5JSv$ z<;jlfsRQ8S%wD`bx-08y_rQC`yYaL2aj3au z!B(9l@BfW0J}j{3r1gWa@l_=E`R#%}{UnyY{s4ZQ{SEzIYA7wz9I(Z4JKJpk$JzI1 zkwy7s!NGhf&Y5~pe3zMo$v;=%xePOMSZfGzs4i;m%fi-(DDgs;4nA#qg^LILfM=I- z(O>H8-o9aj;-Y-$@~MhUBD%BdmNe|^wgjh1k=o1o3JBJCP1|qAK~7=_r;ne8VXux; z&5ltJkXZ|@_7jpYK+Wq`nEN`ETu!pMq{&s0ulaya+%tY|Ofhk3AP!2pz+V~)VV6r21nz6(-sK`|<<&|)+)TV{;7h*JeS7Q7 zj(Bg~Ha@LWN~ff=#T!Eva%+EI=(QpWMo8JraJdJ6__+w}VrSBSI~=+11wZsRoq$XB zri!ZigZXPv9~kSphpmJT=v2H9-nBT3FP5K#@q8_Kj^2oa!=2 zka8Q=F(YWWZyFtu7{M-cwJ^lOoEtl*ajfxJ`lt+d=;-zYKD%xQyYPu%TV}>t9V{r} zOENmTXv^37o)taqq^-5fE$Hf0EdE$0z;u5#RGIQr+;-VdF!p{dB$>35V}~t*qWY|G zI(`5K_npM&=cmGhkRDJu{DPSCJCHOU+MsRjQ*^$jg8nU&xIVN7pFRvAyQYq~LU~s# z9{86|hE1h<=V4Hnrj2QFTi{*uL|Xq%lizG!53tJ)I^A5t6T6&b*V9Yb(%BLR*7s%k zK_&D%uL52tTtUNZ1vOP%71l4;#Zh}4c&EuGNw3%D;aP?_-_D4g&%dG_zU?tW*OVNa zA7J;}Z|LmrNw}&w9q$?s;MHd@pi#zhD0omJm=`WV<;ja;Sc)k`mRs`u1(&gF{&<=W ze!S{PkgPN_9+y_12hn;h7qoHX{fDMgZs{S~^y9s7CEymHKPrd88SR8VdyI(#6G?xu z7N&icaq;`!u*lF58$ZR;{I&rQ7PyM%xu=m^%R^H9DpEvC+qx_FX(-)7JM-1ygK$Wm zAK&SInLKmz#qtMddEK2e)UNA#pb9P2y|fsnY)VjEPf3Q?!(RwRW&jE2b;x(ChSIF` zEu5_WKZ?%8ovOBr!%?P6Bq1qLcrz4{!r9L{O%f^^D9uGlnrJ3dri@W2q=--oDTTAw zItV3c(43)B^Q1{def#?dT&{EN{p@G0^;`G-5`A8DayJ7@@!rRIg2%Uw{r)$a9k<*F z<%cxb?x=E@>-89SRGg#kf2PvIdNJ(QE*)m#ex3c9SS3=~wuOHbAY@OkW^#E~g~C zlT>IPAmYuQ1>oKFj?l%gVEYm?V3GVqT;TDFS^Qm&FQ@8PK)gA@o=r^Q#yXs&P=ym? z#=)z=nb>SJm%d~r5M>EI-w6?TVWK8$nkwc}C(ALT#RJ&}x$Tl#(>1jIV>B6Q?U#JB z{lcyPTZe{2UFdkY2)(w4v8(F8+4(3}QQ|3MXxOg9n>vaZnxDkTJ^k1KE|J+96|!Z_ zigz^1U~|svP)W@;oHckLDd~kX?bI(Qx8W<+Ck1g+!y>4)eIoc=y8ygfCo^;J;?GLL z@i{|!^50>4kQyhcN*%++3R#NK8{06#<|Ed5WHQcVJUcZuoCdg!qM=Q<(SBVAQ(QKW z)a4>X57q_2mEU=MQP6o*E%^lgZY}Ic;7E8=IEJoG=)ssZg0r&XF;(2im#jJw$bNj% zg#H~1VfbVXf-6wPB}Wiei6p*V!;=S7TtfMjKwOQl#f~b*xCklKyxN z<9%C)(ZryUuxP&v91^_czeX8zoqpHIdZ;z2sZOV<0mtY}e^W48D8>8Q4dl^qh0b>S zuv2o%pshBDGEb$U=i5c>6E{R^|KdOK*B6T1@^O7&+9QM;4f}a*(~sEw)&M5`UBGT0 zJPC3AjF@q2uyB6Vqu+peu&X7BpXC_>_HGG~Vpv2&XTCS>TG?Xv#r?~;)Aa}1n?(6O^9sOSx*K$uyT0j3Kdsit!BfYz1(0zs(T)T!{!h&9| znlG)Wf5;SK)!=GL0@}a5&+P?GI_}3{+?L1e)9j;o(yWm-K5>BOS=AJO%@0N{?+?L) zw}7bG5(gw`(@0^b=Zr}eJP9~Ml0}bb_BnwA=PnC>n=PS3y@`5?p23G)5zN{VOLk^O zcr|z#jYypa=cYZQ%{d|D&>#y=|E$T%Ium`Ln1bCz3#PfDn69qvCq8s_7khVgJ^LXo z#~BK8=vX-zdx!U@znaF3sh^_*J9s$V)+$MRtPXlj8)(?>7+RSrj|09vV*3hQNPD`b zc%s)9w&~m_W|}b&=XExMeugfKdZz>rZw!*&?|%vSp4~LO-UR+-#gS&44ag;Xk`P@XczV_-1Ecv17$# zm|1_0OrPGv`d#D6L!3^1y&o`-;1ITCUK;8r`eO&P5rf4i`X?~PT1x}LE^{D#{B8l; zJ2U9Yvd=7ST?uaCvY3779khF(#7<}_h_}{F61mM7LDoqFsYa)Z%NVE5KU$aJIrQMw^(ypG@jHK1@h)GIDg!&DHgxLBBg}cBO8>$QiCu{VrG{}(m^hMDr>%zU zGI=)fi4#psd{2YV93yA*2^5!o8!z7&1Mi+)=3N@=n4D67c6RbPR--I9%XSvCn=u=q z?E7H6vEvMQ)r5lLEDLyTa+KHgs=%}t5fC-zD8vZ&km2Bh&IWQ|W*7^O%E^LPdm-gE zrNN=+1Ep;V^KtViW9mD6E=7EPQ>D7>1RI`aKnHi;z{zLQ$jU5?_1xS{_Q@$s?2ske zS#*HiyZ!`W-fHT)qzh|2J6Mdam*57u3Gu$w6utKe^C(#Zlikz=|JZFD{oYz&{-5CA zy6u5Y-$E$SeIhg#Ex?pT3b1ut2sB%PXVJ`JyN|s2V4>p$)zpEPPz62d;SX9LZOGZ#r2Hn!;v1e zU-5z|C3>(mvLRG`zCQ#t$V-wo+p<3%izrUm|Jbp*50{&8mnAPfj-Q5Zql6@Vu)C(o z{oQ&G2Y+p0r#7~-L-X62=AcsCxhw@QB+a4Nbs;R>{x?%;m*vCeePkzIxWnZIMNDnU zbi6S=6&#e`Vv@`|7Bx44zoS@!u?^So>|saI^*;@$vZyxuC4(E4&>)mtL>6 z!6Anj?^kXG`!$2$lUXembWG#(W?pB*6rwn@i&JP>eHC7w@r_?U#DcXHD!{L~Wi-wt zk$;>Pz`7HDbJ8(jt9w*IG==TE*~ni4u0a*P@>uQQVnA z9~`bZ6lBMY!=yvb{8#6Vc(-{IW}ldbUOH0#bigk*=|&%F+qjH;Jr;p+jx8lEQiSLG z!*J;f8CbY)3!FXwk9#cFmqMbq^3Qzw3;$0DccXMP$#;yV(IadqWYjArdzFK4&#$1* zW}$Dl<0CVudX34h8!2E+INDEmk!gi9N2*#Q^+bW=GVym#M+qW+!WcP9dG1+J5Ag$cMlOS$i1i z_W^V@WyySJ9QGy!vz{>txMzlxy}h4?x*L6=x8D$+t-g+@UQXxh_o+Zdurb-4*2g77 z`og*`FPOs-Wk_G}my1~YR-!n>h#869D1CV(9f{e`)EXy2;jVTzwq`p;ubB=rSv5@0 zIwf(m^P^Kc+tJX0fIYM$jjc`W>oFeOh26>P)Axdfo~!6tzZCEr?hfWf9lXw-Zf>IC zVcyj=PH;}-v$(Ap(4v$_>-HOx^_J!2r|-;W`$;&_U2Wmq@W;{K2bo|AWs}#X3mG2; z8dz&gOAm!Z@n2QVi&CbiTFI_f##77NQcRc^AaEQ< ziI0u;qjxRuMRP;cq50iae)*f9sD8?h9j}{BY_k#!EjYmGyQ_iTsUXf`>HxU=$dbE% zH-vkz@jI5BX(9K02GZ&uXZeatno=e{BzRw9*=o;v=sl&v`UD12!OcIcw9|++n7eQ# zqB?H7Ukm4%l1n<-&2T-$l$F&Gz2qLS$hpFO$z>QxQqxh zre(Ewe76HU(7g(mW^AHL@4IkZdkj4n+LH>Z>Fkl5QKe$7Hr#0($tG)0hkBd=+K)~G zGZ6M)l_RLH`cd!-SwK;J#!1yp65!9t99l7Y8+Hq>6W57jv1WcW%I3>~)rjL@7x)Qt z#ty_r?_j=ZTPiEF2t~W%KX`hDI*5e+kK%++!0#O@-Y_hNU*sK4O`l@;;FqdW+r3d_ zX1S3)`#gX-MK@#6;9mZW`$&nxs(xfU@-TBzEal^6zhhHt6uHgK1-C(QRGlNP7DU6`@$T_X*6~H$m0*@Sc`vc%ws2tKR{HoGMtncP{8?jOtot& z{irvkelu_3hkiGhZR0@!)R01pYchGOuC64xW>p00vDuF z7imd*zn0;eO|Eo%;3jZ75d|B!cCoS67MLnzBP-gz;yd9^xAj0E=@wRl=`Stvth>dP z>lo963sx-nv?KkC_lBloUlQ@Ps3_#d7hcMt8;g(Qg@n&6_^6p!!FW0>xUNP;ccN&3 zjiMB?z3Ju?j*QnxDR56Yf9~mM@*3I2#^nlpQLR-V?Tv%h8L9NmN$6#3%qB_22$=rQ zn2L7$z@&L@*l)kQ;FlU9WOHA#^btM0pR}4?_;*>TrhGHMjSC{jp!@lNCY5+f+cq$Oo3^Taw+-4FWqviSzXw#17U(@)m+`?!K;s zoDcg7^+tc>{DUw${(|79?N27pHnGm-Tj*(tK2sbJM9;4$!}4|U!a355Bi?vH!Ri@! z{V?Lx>~W&rrw2Hhe^1$zR-RQAT;Y3)LTTS#JF>Gcg;TfFHD!MEq0 z$YQ>aXTJoe?veL9q41RtEm>xQ1<|{}KzSK|USdwcxz^CDdl^QJn@ghoGU5Q+?Ud*d zMm~9=OzEK*cjelFt5pRHQ8r=&pZS2RFgMSuzax1(>$!nhl?oJO7z$k&#$ zq@82vCL2%ox%F7-RE?{>Q_)9H1!g+$<>VFSvH5>$al23}%RB#No2)`H)ngi5XuHRH z8e8y@yU>5*c8V-yt=ZG;{kUb8A-TURgrnQcNc_wQhP`aV#{UdK-dGtI@7+sFwY5nu zznn8#`i37exrGm!V+m$wmNM&&8T^@f(PSFi#R61hFt4?Tbq;dI%rsZ(9cRJq{Thvx z@fP@Ntvy?jvO`o&{!BkbpOpU{V>>()sO?5K6Fo2$|L!=*E-86|sJ58%Ztu_i(C+2l zOdmkyBTisXv^Ra5;}6YG2a0v>=|DjL=k)GFD>FWK5yuOWv9ZnZtWxh0O77cHcKE)WqWPh=%%k3m`!P!UxN7GV8Oq)%rDalKZ&i9;o})@fyd>Uq)e|=VOUp z^H$!ktO-}5m|eW*M+psMaEDwj8$4jNkSjZl-LqwIK=}mmKpRCm+*tr;V-Dc0&u2xu zmL~A_p=a6EE2enQ_cb=Q-(#iq=W%h}e>5jBo*Q&w9^PD2flD`g(|x^k*0jfylYJ)`cJu`m3iG)v*7B41kY}0qyMyp^wG?_`VkFIFoQI?)6+dY4RMxDS|RS%y~;X{4}`w*^_-010&;t?0X?<4 zSe0}SUz%%81Km_XabLJZ5?{u$&7xqEko9^e-a|D*;skbKIR7d}lV*4-LH5j({6mxR znENS#GS!{9y4Qp0Q{;V?>L(&M#~P-XH-w#(|II{!Qgrc4gehNZIO7S1)D>a?B}&2! z)axhKOirXVD}UM=ttiFyMx)`F&PO<8q{$oImW3sbi^0EFhqH<+hGV^6xN)W)s3Zns zuDcdRO*_W23YFn^wm#%uJ%>s{hU&=u3i3J=0gi5RAYD?!BAt|>$4Zy)@f|KLYtjVG zb3IUIJCTka428z<>-efX7A9@~$5@vK@Jsz!)mfwfpIg+dUqA=SUc&SYW1_XPe5S)p zc<}5T`X}qd+L+^bG2k}qs$YYGordB?p3JiI2l^EDfs~nh$zpygm*ZZJ=>?WFedSgR z&-sWwbNYbe$XUFOh7G(*nMnQKjU(Cca0nW^8fW<*LO=abN}en)ZtaSLbZY|&p8Abj zs}zqC<0JSurjBmEQv{FU|^n&C}V; z`tw>k(P+(rw;v#-fWtKY{uDZud=>6KJ4&wHSa>x`0cMA#vKzr~V6xmE$nL0t!#^G2 z*;+05bTJ-hAM(b$tGOf{cw6vzOr{mFLC{}64*z_Z2>(iUvbR>FSaX9st`vL~6)G9D zz-nj2{I$BqE&+9=(F_%v@=q~8HVRrmAoSBkX=GGe+R?Zf{D=nL>BF@ zXs`(XW-{115mmmKv74#Jn3rDyA2!c}%?Dd)z*8@~h-CxlaDq0RN;pQ--c=)uMK;H< z7?;~b;{vfU4sh3%jD5%os%6(K6Jxbhr{3fg0-YBu})67o2 z8jWppM^IUK1gp3nD`aUzqE|osp{g*Ft`%0oh+UKT)*>V6@X}6JwNsulIwrBPf!(Z2 zEs|VaGWm<&oUufh6$J}!)_*Q)R1i0UJ6rvU#hGw0aHA<5y8H#l82!g4)vsk4J>Rfi z>k;g}p9=Y_*HhwpU9N0ZJ_{bMjJ7I<;Be(G{7{=rJqpQme_0XxKK?K>?+j$&{aqZl^g>k-+`# z_knS_V;N^Lnzu`S%M^4%QGbEJmX@zzf7e98&#qZa?fNJD+B_7#Pp!h5W40L7)0Y~= zQP`)x+Ah;GkP=s>;Hht;(YPlBGNxw`DwnW$!ELlKO@WnN8VmZ3Cvd8L8bsW$0msZG zycAp9Nr7=s2JHZ#7h=l0YWHSEG z3-ufa;($Jt)UUFZjo(?#t}j<-&xP4r?T>g~CcqB7UJI;|gvHQdHIKP`-47~OmEgR6 zE!v{q$2T~aV1<-j^#&bUWnq%Z6q&HKuNp4p=N>_T)qI+v99 z8Ixu80lwI?ocg)wgYke|bO==fUN{py#_z`AZwf#w_!>TW8AsCvcG5x_F*yB-VWCh2 zjaB$hu-9@MC%v zF0shLr|0MJ2{RwSF@@V~5Ld)>H6G!fK}Asba0tv@pGe<^>}NBVzvITb^kEzI1csI{ zBhC^X!6g@k9&hhCq!GZ5w-2$)u_?f=P=;XO<5lTTqX?nzdH$ECiodWtQsD0bw{ikEQC`>s-R;3~ZJ%pW|i_owu; zCgd#{!8Fugp>>TnSbj5Q?v)#9v-1x&X=^prx&2|S%SNKoog*wRXgKt(DPc{+Z*m_c zmf&kBeD@NUqHR_;Tevx#9u?n3ExApc@wae1zoMLFA3V$-tt-T6{gL3*?X=ydAZA5RfUP^e zG2@4GQKvV7zBQCnqQ(SV`S&lj+_uL1$vRMeE}6|pHU`t>TJY#uooI*AXq;I)iOsjT z$_`&=tkFN8yBe|rPF#0_uX9_tiViV3w5Y;=LDM0$_8Q2$zC)EWwd{bI5AHr3$A5Re zB{}&E`P0D-cz>}DciAmNa6a^<*Xnh6+{_ZH-f}cE<|zxAd6_S*Y+`NA9ccYH3d4tn zFr!P&{NbU0c+P1L^ZD(MeWJE8Yqil_*5EL{A=jG?s#%1Wmz~73?<;71yek9*aHJxZ zVSe?Qtmt<%DgN0(pAHq!5~m#0_gzFeKQ^&HyHeSVkuu_)n8tnuJz#fq_Hjl>$MdFF zzhU8}S@dB2e99B{GhL6ZhCYca9fo*@;$?>z}^h_cN(XaYh1eS5o7=^<_c* z%w7yWHkmC?8V=g=T3GWza4_m@N=>%Zd{u`8Qh7mG)~IZDfU?K>!eaXz?$}QqUZg(*fAqvKkHPO~cwH$J>rVx>lOpK1 z_YY2ZJROH!jHX3DelUZ{vrtm5E1hiOO8U3nFq?uobXoli?KvGvg|&zAboe)>?@R3tnz8*M=vnuif;^FisLDRws6LglPC{DGJ#zGh4xn(8gg1|Qra zDEKsQvE&CgPBjVA_Bn&>!5DLB4J)T zhzeHtg4u=`*t^=5=4O8u82aX7iBS=L*cD0+Hhz?+oXRb`UWX%*ga5OGjJ)Ua4PqVU zo{|P<4c+l=`Zn^sok&*C^@wenELm6A$o!Ta;|4iJb9K{)(=MAhlv-8bg>yGC_|;IF zW@=C8Pvnw#kOl>0X5s(;c4uQPAHOt|&z4^Zean@}yL=3cSYQdOugF90f~BChOz<@} z+j9-ihSGnUnrx`?w+z?Q>})h z$&#;BO{2hyNEW7TO`SW#c$vjTbad}N+-H#qcaEm8_7^^6 zuCyBqisIPpbtAdI5-FA)KLP@0n7kf7U&IFwt}x6Gz-5hw*iGE%W&34bCH_H239hojDMP(#s+Nf=F69Aqg8?-jHz|S z>o!Tewcjo@`khJ*Zk5rt zpg3qyR~7%C)3ftYE(;nVuvF{H&^uHMT{R`#cX!yxoz=sQL+^99-bz4xWG7wp z`OHT1-`UNX&v@WMPqk6$h>lxm9-(G3C4~Q&9eeCn{1TPXmG}L3X`JTWJ!x8DEA9O2XFm zPL^UlALR1x!lMjlFiQ(&gUo~Rm4P#-6;#dZpF1zPU2zJeWh13#CV8L`RZLAXDOkHD zkqvUX!+Kjy=>6?&f=8$uf9YIeYh^Yuk6sbj^pk<)hr)ZQrz18fb`}qEE}-Ca54x{w z!CYoWQo7eMc<0|FF#GHT55{zqSyf?YV;SZ$eq>tQK7DsYVMeOYqj* zQIMxtPI?R8=#n z_v$u&X7X-MON-G|w^sH=8UwXU$H3Bd1NQE}gUnf!&(?Ibvxn9{Y3BY-_@Z|JEKNqN z^$?gZGony?{2w-{;XF<~upG4>zY&hW-pYTEFR~+NuZX;6-V(h|a3J|dXW;LVua(kC z_gHi26Xx>Hnx!qWfJ!3^sA!15_I`ut?GOVl>sUWKf2#>NYg;wUh~PNI7BPw!c+y`X z$KkhiJJ+ef;Tc^|^2yqd^OEO4cTHcIlsXW4()-b=pKGz>$utZ-Z$NgfMVM7RRG0~C z@OLveR8|fsIXfUg>gU+M7BVY?wgzy*5(Hl3dWd{)C>k^bwzb zX2~6#GYmsJUUNN{10imU9~{5`7judOSnR+GdURq5Tk-u9r+D-aXOjJhyW0Jo8&W4? znWMKbe3`|mi=V?a;~C`l+Jst^5At^$$AZ_eVZdxVaK|VMu6E@}3XgvZSFNgW@yi?< z^|7qdsdp=gRJ)jJ^KhL1sg-wpoK8hY%T?-dQh0aZwt-6y-+!e>kCWYg( z4L&rcbrh3XyO(Db|Di+eNw~kb44!A+;0;pe(k%TrmbGaD2+mdfuQD7WvIfA9{^peb zz=8xK9t^U-4{9wXxO*J(BO3rLivf}<>AX$ro379&~EZxsCcZ3E%o)Zq5CD85jegFznQ zSgl-xfz2BzzF!VD`p$>HXXnEZg%Gkh#j{ZR3R3!}g%6&8Vca?mvYV1aY76h#)~=~% zw$B$)Vo5#8_u4>BXgk~dV3jRp~l^E!yXELfG}QV;*M(-us<7D|8opY&Sc z0Z>s*q5D@#`MkE*P&slx@j`yZapxj#LTw7Yyfy@iL=L#IH-|f0=FfUN(piFAF2q#z zWzHtI;oW&{mgAocUs8(zE2`L^PhYWS?G~7}JQDtsSiq?MxA2}>0lYTf2cN>)S@-;D zbhj#!x(o7Y>u;&xd9T4U<!W?5Be7U@sQ@MVr~~2RoI_LVDVi1>*fXaKB>Q;=fBJSa z$v@BHkIJ5c;FS6FDX}cT-xH+%yrd##zH*FpWu?I#><0lX*^^FXW}4D z5m{XIDhSj|q~(vQAmX%~^wkJ0nx{U1!pALQiJu>`F-_5&O>r*%9^1-}?VAL~;fhfD z?>E0Cv4nYy_`?M!gt2$&R?vU%eO9ZpfLhdlF#G4VoSln-kVlz}&Gw^E?UE}iFui~u zmrUWmXsqR`-U>6T@4E11^8_f;mlHBGqXllBEhQvov+b5kXt^xGs>8wzC;ljvJ{U|| z3kTw}DcP{z^a3-5Y{^IWBTOT2C!BYV;5}SNz^+Jj@uk)T(x2W4pLU)Qco62Kswj9y zmfCQYM;ay1H5zdDnn>YHlYxOUO}KVq5+Y)S(zZDL&&|g_*DBhyo1j&$ETe8f0FUPlGpbG_Cszm<=6HCF>qiy!(Ebu44sB zvjjJqtl;8(_7Nv;T8dup3iP^HW*7!g2=cR1v-W}BU zfIYnYm4?a9&UDOJh7UYF8oPZW*yMjhAbPT^X!P4(IC{QDDQUY9i~3Hhsv+C z!1RVQ<+zMwc?u~M>y^(=)bxRV%hxc4ImHw)C6hEiYQeI*kJvYTBjzg1h&C`aD2=Vf zbUj;ML#~;7^?v>cr__=Igga`|EL zajcksz(q&BVV6`0chA^DNk3~?po>5FyZC^r=P!1nz?mXjJOG7VrKm%~9&Ir38DBC$ z(LV-4^Q$1|dpI9fGL`MTnN3O`qw${R255c#jazgg02#51!!B#U=disUbMy*hEU1 zdzsx{-oWL3JcG-&jAE6!1JP;lO|0#{&o&Rc#KuRb!<}jjtb>se>*4+)DHW0;GfL9nHLvQ?+v%s+vGM`5V#f?VCaE9O_7d)424$%8 z)fZz5h5d}N=PFb@2Z(FN?!=BUn(3OYO$WZcjDEe=-V$He-7^p zskhRgXyFMOW8(+E{fx+Po*G>q0;G7}57q6>@O71tesf-ufTJ)-e%eH%oWnR9*99m9L_k3S&h~;=%t*^uaR-VTG9S#ul z)tI6hOPRM#0y~&9k{f8Xk`H#S8fnWV6=--mjM<73~L@Fs2L(HPv`; zITfrDSU3e2BFQv<1)cQYA~+J#IfW~hbm{sRd@RUR9gitNfy*wk(yRja4xWDROedWs zsmyw@JLEkXO$z05Fl(?T&Q6(0wcfdcxLu3$Be-9bmUYY80xPgIeu3iUp|#b7s~=Zpke@je9~|7=n_j;y0}7a(M8lQ z84GPJ2gfvvLEcImJOuueMr}VjIx&g2S@eYs%@Yo3CuO$&SEp#>yr~fVK*}<1t*52i z+i;Gf9_)7rWAl`sV6{k=AMj!`Z7_4gpCH50QP_wB2u-1*AA`BTb$sF5(Y z2|+k_%xMMuU{XCJQdK@jOXundejyt$9NNQP%|6czi&ydsZE7g1;J3sm zdOCY%yc1_C$3XkVKd6wFM*p59Q@{3^7yW9!OY@$D8|cwMdzg@ zlCiPbP`-eXD7BK@zD%L+zE_}+lTfc}q{6`XK<-kZDXtTCUD~HFzz0_svBw3IXkmdG zD7?Btu_rX)@LLWv)@v}uZ^xl1cRwabxeH;3VM$XCsBS(FO){(Tw)Hzu*=d5O5(l%>c|0HLHHnt~;5hSn!-Ra6 z8e1~7FMXV537efa;J8;?MKjBHaTW94FavFC)Og$%oFZ!J;@={E&*(0`E?&Z|e=wGw zWg?hPnOpIsWicK#234)0nXRoDA@K5ZDGi>_t;Hh;zM#vW z2Z+w*^ySMM*ejrny;|n54f%q{WJx!7Zh9Jf@J!@G-pE zR)PJu58!<=>>ws8nRj~D4e^E?6$niH{`)P!XQ*FHkG4DL{|H#;-H^bQB zDTa7WwuVZKVW!qtWxgHW zj;v&7?g;#%#C~GA$|{fyolbX8*;2E^AJLHKCm?G3MDd1)!=x(f58(JyXC;QF^GWNc zI$M~K!c`9LW|#atG1>APXWtph9+YJocC>L;s`Sci6_-TqZ0FLAbo9gpnowj! zd76KivWt{-Oqy;Zn-Q4u|mk>$Hf{;`gS;nZc;iT0yP zxPAJwA*DS6Y(kH)BbEwoC6_#G)(jnxuD1Ra@!x&e-uI^ z<7ZHFLN5Jsb)wA7qcBg$g~HNY$T0mFhoT|$bpKL#bEKUen!W@!9y!JbYROUoE|M%8 z{epdatP9d7ulT6VwqhJCp~wD%A#3hY_&!m1{!R{rWyu4@(RUNz)S-N~t2B~^_W#J^ z9ZOODh##D{@nsfOUyuKO%SkTCt7`sy+413DZa19^nKOn7AVL+#!`s28j0QXy$^TtWbp-ZNay_!GeL0CLh1|^K{0g{a zwI5%_b_o3AKJ4z5v7GnT68gJJ4w8$@QTKqrm$flx#^2YXyz*G;+O3sLsS`NfH}A9An<;F+VaVrAN^RO>J@`U9RwpUORLQkUL+K2Titq6f#; z42FND_LP0^CN?SO!h)s&(%kWV#oGm*)PJ_yL1( z_Cpct2t0zbtj{y850TjYL*NsC4rRUUFzz^=j(t;FBu@-9`6TP-?B>vsxMRc&4t$)iVj!*nnD z2`;hn!^mv_jEhiM^pFwgW3Sbm>`dByvMouOjSL8f~ z40nS~!(QU1S+n6-SPbdhDc<%-5Wjj5L9VMty|cjXo@u`-`(X?y%}MFY=2j zWu1xnc)n3lG<^R-aOpb&c#l4$>(K`U7#|B-;Kw%DT^B{_oD)@@i^QZ6XUXzUGq?Zh zA<%gt3->l1pqP=Cw7p?2s(YNHKTUqjdhB zXcQ`rmgSuq2f|7jV-A*qR+{Ei4|0tU6n4=_ZaXStV+ksBpOG)pFn9U$LZw4kC-Z&MCap@D0gfloAFXcdXaDCZySDK5jLsBY7rqfxiuQSryOHu|KsR9 z{IUMtKW*DWDMTapx| zgqF5aec!*|U*OS$-sd{!dcB@cgOk*6L+nTc zlV0wNv{{MTO%TT^MyUh>lfjKU&)bq&coCR_FV{mC9$?hSMj^2H5og^BxT0lWT26@~js%bbNz4kL1w@@4+tp z0yL6r!#5Mp(ts!aa9jA2)_pmQHf~;k@#6=#kkOChY<21IKeAzI}xT@LJ2Q7euM-Zv$eOkK1B1GfEvUb=vH;A{jA1~1`UC1Et~lf6*r za+7^ILk_1z%7Il+7u;MfA^aOA&K~VDLf@C=@L4X6$g1CArYawUB^#eX#KS7u{v`ta zKa8Sp;eYf^VhOeBm<>*cx1j8tGvr7k*L7%_M1qD}*`A*BwBLR^nc=6yENE$^!L}oe zM;GARDX05DLt0NKHOJL3zwV`1UwjFGE3Lu{0GsnVOKXJ zZd67_LW-zOB$rL^&&IO0+eGVAGHrM1q{Gp7h&TTVDl*^c!U$C~3|GNxR;lb}&up3y zQ$hCB7t+Xt!(YVn#V(>8u922CtJJ@pE`*sZ~O>@NG4xMDN)Nbq;N~Ol?1>{ik zBE~v5k6L(&&>_Jcy8ZDC&d+6npL*5FS}jkknB7jCt^oOw%yrcSV=+zXA!P=YsZ`)) zcJ&w@89K`t@oHc>o$Zqjbjs12tk){-x@7@S4`D;ib%&x%8hob25cRRn?x*JEYjA3`IEu#}Qo#Gw)TOo+|FviyR zb<|^75i?PFA`C58r&=jJSX=Xe=-PAcX>VhA#LQxM+*N{|&+EwdL)&0d>>a^#?p*g` zN2&SQI8ta81LOA>($s1N!OhKM@wd!Lsx)^#4!ea@zvWXj-4e3B>73R$yZAHo7KxCat&NlfbQNtWWYkCjG7>IvpE_qG^rv=*21g zDAk`NHe(}BmzWR#nrqmn7=Hh$wjMoMi0{?}6@ zi24vm{%f`1HOpyJ^aZBF&5n0HO9kMO8J7=!#f%PeY|3|QaQ&azbaCH1Hn>lZbzQ!XQCeUVM@w$X z949iVK^ixRWwUqA??S1{m=3Hjq9u3(uiTFy|(q`Q-<_nh@Za-@SZmGH`OuL7Rd&OyZU^q#h zbD#OXV*@QeScH@OHEDxLHdAJqj~}~}=>9$b;O4BetI&LfFQzPp zO_l`aFH|7YUz(tFz!Ca4e=b;$pM?$EmSD+XF8v|tgw+<=v@5)sJ>|Iz#zoJ?%(gss zX@xGVES-QQedpLud!%7`Vy^A}JI+i_yd=K+n*hx>rb5l~J;Y4RmoEGkAe7m666+72 z6Lf^XBE5=vbdQW2URdOU39~EVsb?&8SPf9pq=b(pf8oQS9GJ8C08U@l#)gL+LGk(~ zX8wx^c>1;s{gtOu?~90c9{5n*BZnb*@eZ(lZiLr%eWAHyCHY5VuL)EphcX^w9CJa_ z3_tSEL+_|NYR&yD_@a14aFpxi1bU~U{L64UW!Q(-%|C>y|6Nw4_3GL*I8c#4QW*uo?k>3HuqMtKCqZ3EBqsW3 zbNhD&hr8Fq#=GC?gGWl3a&rb)ZQ?v+{7)om;u~1N9SUx%ym6_8J-nYF#!q*?&TEtd zcpI05GYsQ#foC0r_s`|OQJaMqnr_qSW;t{Ru|S#jlUS`glV4qM3|9Qv0@fELV1`L4 z%Cag#8|QR5d$=0A@(P&yXSmMMTq)sw&N0|Np$0Aqng|}z1oha5)bIRJh`2hD8h*{d z{F@xFw#}NqW_dVWt}z3~`4y6ayz>~isFi$EUWOu50krS_M2FAcaZ_j>T*^3$&t)FL zwIOX#71t7O`CEdAoPBX$U>=hvl8?#aX*jJ{jW(@SLwVmdSiZ@a{`~5AB$H16~wXVT;l>w7Y6dWwjns ztB2gZU*ZG}SdheZk-Lc|+0KXt^4a$m-DI;zGcCImO#>wr@!RNG=F;g$Xt+KI6YfT0 zrDhC@*d6-+InH-kCP^Fjfyh30h0)p$=%m9S3Dm&-3a`(jf;HcN94?=i!i5v9p@**oMXi@?yB2(Bzx7jrQvLt=chf-a!Fw9+ z&8N{zo>JYPFR4b@UI7zx7-FAI!k`K3xWCCz)_L<~db&0YnA7q&_SHm)HHxK&_kU%q z*Z-u`AH>qI73r|ssDjk2&}Fvecw*zE-(;-aX%Z`xB~}(&d4Ck%kvPp0bbp~g{riO5 z_rW%{(fAO&Dt}Fu{OiX#Ke%jCh7I@+TaXJTJHc02OPUkLz~>MNyc}VM`<&h2=GOwA zoy!6A+Oz~cmu|;kMJb`)KRIapG65uPYe_OUbKH_}rX!t+#7IONqWY~N(Do9Iu8;I^)5S}enT8jLG`zW5gUdq3 z;)>EAq+rCD^T;oO1xE?0PAZ2t?xOs)PoHA2&J*gpXB-ZUafY)8O2DeWfoxmu0+~CN zna1lWylU6&P*nboacQ_fdTX;0?oY*m$kW6_lck0iDoOq0AK0~y!o=qbaNbo#S~rkH zU+k3RHzx^cMoz&o&=?=`{tVME8LC>2jF7%n2Iy$-u3a4mRs)3~ON* z4e2sPz@4X}X$FUK*1FCHVzZhkIk(pcsImn=dhg-67=T=2c{1u?nnvQH_(SEID`8 z2foeUi5ihbWa|?}9PRHRvgk4*z@;Xqz{EzPDxkJLyY8-|tcsJ%h;(2HmW)zeN5~P3_cBm7TC!Bk`Q2}FR zv#?W`Kq|L+(<^%$I3BDdN!%X8xp}$!h2<~OBo&3KvO3uHvYzbb9I}f}-68P>e#E+w zkBionAeYm}!z-RKhkHIyzs5~ud3gi84sXGBcL(V6l*Mut0=&JvpSiYmA&R`a$Hu4UQolce_-rTFXCN)Jd|86C*_h*}u(W{Rb zA30NI_=ixC{pAzM_}WFkCd;9$R4ezw&rmJlZB$B@0#gqiq%BIYI#QGzUGa(R`8*pQ zFK~fL9COUq=^QTMX1C6M7y3D24!Zw5h>NeKkSB-Mk^%QV)GfNWEx-OEF zI+jp*SHQV6%HT#x3;lO3hTHWRV%naq?DHq0;Oa7-KQ&#MoUOWuF>20GR%H$X?+nyG zS%NKB+(7$3N3fZBf|#9*1cPM`AW-IdNTuOSgrOH2U3&&uT3jdY>vZg zz`DMdRQMgSr&OOGIO_;J^b4it-}F!>jz=#%*G12=iNgEx2o~-r6O4-P zhvMjY_)6r6PzUMi0{{%jMo`F0nx(|8R2DXiulnUf1;Wgplw z^Tp(BVgyY(R}F3jj~TTkLp1!`QhM%-Dk|uYAy+!0$*s|~f&yhlh&NtKN)pSdV_^*w zaJYlKjuZ#Q=~dLMu8P?h zc(V*MwnmgLv#Y0LO;W+~qbkSu{78*t4-%KlQEYf(9=-kK86C`xWyKn$F->kPmbw~% ze?%~~c%Tp8^ghzY-%Ygo?P;9i`+)qm33yD*r}-X?@;fp5VW z+$5RF3-I^RUnDYa924teM22Qwq?0#ZgNsFFIA4Fm`x!`Qd4d)Y<}lBYyPpsq@7Dh88>uy-C$vm$GZ$jZBXqdRud#<5UtH8CZ$K2`lLR&1r(`T4pHgCy((JQPeUD z>A(DQ^h0qYJt^^oj!&uPjW0xKxfMvW%R1=GBigi6X(`utNg(e&Z6pmr)5*8%FPSlW z){z3yW#o2|v@kWHAJvU=$lUqL#LIFM89OZ%it6iOopwGEcaemIIcw11tbs7WRs=Tu zHiw`8Orbr5^F7NQV%m;OsPoJb5e&Ld6zsWV2l?}5s6w6xowDXFNIZE$``j#`uV+8m zuHa8bqs}2MO&|hUC$i7jmL?i1)4*q&F*02X&v~36FH2sN$d3j1DI<$WckG5ti%jZh z8b@yis}sLgbHeUQg*Xop?i`nb`jKgH?eKiO^LGzy>XGDo)SV%FGnSLIZLYNDvO6yP zR!646Gv=IZF8vlajeL5S!F)ZOA^4=EMRe@nu@!d~)A*n`y6xyRGUi1K?H5qGRTvB* zf8VjQw3m?F;7%g(_YGaTTLz2DZqn4X9@HU~^W44NjeiwB(Z7G}DG4#beZhB_*N=}8 z75_H^J&rLyTS^Yi&nA+VJ~=_QcP!qzWDUng9U((&5}F6^p!JdysW;DwHa!+FQd6$6 zQ=P`5G}lXs;x}=3t`uhLpKijq6|%Vj^?jd*dweUmf3=CG@!H@W>g>42LOL5N*(*Jyj z7=F^HGZ#Ol?>C7-`{V>tKQD(kjQLJ~wVz-Pt+(TR0n=gUfCn?!)gf3jB@!0IsbZ_) zBf3BM1QhD*g0~#+c~z?$l@+mNlj_IOm0OV=R+7Pk%{MUmjTu$z&w?-)dunB-4(vrQ zCVNa6e0cmB#qQ3dT?LzAAgz~I!RII_Otd^%a9BW{G&3RH9bA=p8nn!+c?pfD+ z>P+U|?{uDiDLuv8gLAvqLGa5UtdX!~HFBeg<-f0V@fI0q%5y@rs=(#?w?JOV0BP|7 z`r|FBQ>>x-_K`4BP))IE9Jw-Q3OH-bgM__dV8eZ<#=nSXW~*kehPEjPnQS#j&L@ z?t(gg-8Mi>FXY0hwbfWYW&`Bz8N)WUe4^)r3^B!OH*T>OgLT=KcpT)&6y_?-T7Lrc zzifgolOZzZ%yDjqzX~S5Zi82@M?te?zv+TBoQzs+@w$`xR_ zMLV9fE5;kG6LG<3JL1i$!M4kkz=c}^NN!6M=mu!v>Qqw@yS)s4th&S)(tR*lD1*Lz zh^%Q8oe2$~``HEJUJXITb$zn##VmAu{1pByeF$r^xV&MJKE9LR4BBrw=h4igcy#ko z_SDIjw5N{aWn3#Dfs4n1r2QA_mXeHSLoYCBR2+tnUE`TOJqTsn{Gnds32)4mt*G-j z0G>sglemD{Q0=b{tE3d@&($kn^2cX5_EfX2VZsmg@5c>T`#YLW+V4bWs_wz#8>aEy z(^J6gGJ_}d4TaOAayeFDF3t3Eg4?2R*kyhCRH{%!Xw&nb;IzYjRIa*00=`TK?;Q__ z)^<@?ty2j{XBFYuPg26GBEMk%j#B!1>o2JAJB2dqyBOyqvD75+Eshs);%{O17GVU=$HQUm0m9GSC`f(JCo<1dmCPxwCPoucyE#mT|gL*Y*^B(Me0z9uql<vlXzy+ulj=0NAOpE&gNE3M8S$1fh9kEf3OfXJ&InD#slB?ivZj`eY{ zyLO1EnPfqY%xT6>N(|G_M?rUlJUi^C22DRoc-K`pU-gXy8q=-ExUSEm8^3ll8@vN? zlJp(y6-MEb{*%P&ehB+?vJK6!l|sE)Gx4sA9p-Kc#n`faw2!}piR9Grz3OU^c>F2z zAt4d;?sI(CL!Rho{FrLbi(@ktCjmXrIbXNW!qL}8AU^U|QyO zRri&A_DG}tlLCl}=rr1{#AW>Ywy`m52B-&ngzOrvAp4G6kV+dV+t*x2$tPz{sN@8G_7`~inO|C8%C3?%PNU}>YTQy+~ zbxA!!^}?pv#tm&DPwu=WTYPR(VadOwW zosJieBfEb@pd8mp$Q`YwAu*HLdS7MIe^46Ts-BbQZ+F|Qv+Kp7EEjZreh?b&>B8uV z>EI*3kqJmQrN;t(vZw8ZTgWRoEvm?f)g^Qi(#VM zY)}}RkFqH-q<4zCFev>2>^M7vu3TP8F1VL~MotBeS2+n$sjmRH3Q=ooF`CDT;E&mZw8QrimD*fRLZ_T#b!VCg)K?kv9e1U{`0Z`B-w%GEE6;=yH^~uZ#8Hj5tlSFs zmyco7e)vP2mmVanbB59B4~WzbFRnw=NwkCSk&BZL;>1BO*5o4R&`7#NH>g{)62bK} zWU&fS(~w5z%|2{TNjuv$7)|7MucP|A&e97*v4Y%j)>Pq_EQIurgDDD*7*M0duUP#D zELK?vRFApCnYB0RifU0*YM(?`U5}%WZW{;+&MK404-WWr*Lu8PGLQVxNx=__^XUZ6 zr6smKnd%IDrx)|PAW8igwYcZSIt@N2lDe7n*uNC=`CT#0ziY~|vvSCod@VA;gzEt6 zRZ-oOF);nWcDnJKB8LCSVB-=*sIhY+o$-udcEv_Ksq=t6EHVjT2It*+I*Hy_Q)ZVx z+eQpFSi)&lc~ zWnj;&dvs~gMP@;%37&i5MuxbIOhvLf>~O7z4F^)e;e9Ja&*3;n9m}Y5(pF5KV1TD8 zAECm6D)Re_6>L%Lr%Psy!vd*RDi=45MN;8p-qknoi{nEayX zez2HLfo3ouaeEcWizBmf|05qXTe6FGz4L?m!xq?kH4cMMoy4#?x$HBWIHoK{gOnzn zVxM?=lZoE*n31PS5S1kh3)4m^2Wf-;9t)gMW=qpObLbU10!ODR!!55@BuhsW`^`NF ze(z;<-pJBlXGHMp{{4dJC0D8RRVnt)jY29DWyPwMa^H^ckE!bR4!9KKi$-dTiM@3x zlNl?8Db4d>RpB-==qyRClp@f8V@Gv6>_fp}TMXT6i|sczQ|r~+Xin>Fd?X)D>=ly9 zNw*&6(bO%RH_nUR7*%I2i60D_KcUhgdiZgrFOgnZ$-R3OlSB8!iAG2c*@cG@nqpy& zXFj<x>H_K#-r#M}L-c#H|A z>=)2Go@MlD@E7{-K`t6{Z^hN~5@GuLhvdU|FS2IXp4;ahrpYx7_5S>fy3SY!UZoK* z@>Cn^Z`Kj#q)DjrYA)8wBDvkaohN?z9TV$m4^(q4$ec(g>E;sTnX@z|OmbzDKifjY zgl-(0eSx`n^9)80q`=lk*#hB9S?Io^ip_eK^jPm%W`6fIlIS;!797&PW>v8gO(x1v zUv5V^LG1+g9ehoH3vC#WaXFBcH5n&I zktWYE{6Q(sVV0x?cWT1Oox_&}<(hGP`tCg(>OO%L3pAm%NMHD~xQWY?g`vXhwRoUE zf!TQ~!nR@JO!8M+0ZwQW^qp~-yt^HQma3s}T<&WEv1b zu~?y$sLN==p0gTs=e3ydI7RL~m@)n|)X-B5)D+h7k6<}{41RqP!la~Ez!mi>_yx7-^pQhfX zeh|Wuzi%aR)gy#I zJDH>oA7-B|{6MD|>qA}JH5h;KIg>GQ9AdE7GjPHgTts%P7sfKKy zauy?%6Ommqn`1&%;Nqi`QC8Xv*G&yZugy_RQ+_8#Udlt|r^WQUaS>F^|A@yn#eh}O zAdFZ0jjwdJVgJEmxZR=-yF#Xc+e&*<*}aTLq(+c7*96if=)~eqCz5HwP~~Y)(Q7Wp zt_tzTUvr8fRe2$L8Mjdp6;on5m$Gsv*OS$oq-pM|boRN7B%ShE0jBhnF=4MMGIQOb z(_$Q5u2YCBleeJU_6yh%IsrfJy$Rc&KOzfUTS=5!Fn!A!fnVoOGP~0T*S7f3drvst zYO)Qn{CJ*~y8MG?FJ-ts<5x=AMuBtS5q5glc$yTh4A!C6P_I|X&d6r)Wy27u+A_rO zdsyb+sV9t!ZV6)$DTV7L3I#7dE@Yzqt*QGSKNmikIlp;EH*b zbx&I(g$+0Kgbt2bP@=GuAI5Zlm$|0!&YWlvmG6&zs1Zv?n<&% zm6(UUgRv$94u1CJK=F|6N{=n%Q|o(b z(R+btzHB3(w9TMCs+1T`pH827Z$o$70I|DI(0iWQtoaQwoKZJ{n%}%g@@k9dQHPc6 z{!kIfYBOT0dF$9n^O^WhVJVC))q>C4jNxRfEbPhH$qarx&X$RvBlA9L5|^q8@cNA$ z7A1>gx$k81;P7$~y_5w%1HwsdRXCB^=YtWE;be8wR=5)Hg0(XyA$9mo)*&zkNI+ZKKWJ@0A^K{plqgst9mX1 zcSR)6+f0Cs53v1nu7JrT{%~GBintqmAQ$U}s9JHE+%~^}&8au>Kh1g=`8FNyDkl-? zJ2T+p<6m^)$YnBbN)vQcYodyj55B7a`m1^gu5U3Um(!&9hbxUSYl9*jJ?oDdeaDG- z)l9l0*97C0OL$UvkjP&;!~E6iqh~}lp?~>1@_Fzam29ddM;4tR9a%T%_Qh7jQ_`I7 zGMP&%EqzG4dM;yT--92+{qVe|ES78^3$pH+IPs}5ZC>?iq4@0Dmw+9&qoE_YJ2JQ>vw8Nk(D`|#S(QH)pR*n8S0!Z(9qqLLk z{Pf2M50o%rYdP{)^JLfw4yIca!ysu^E z`@3>kRB3*T#YGS#vCwig2L6SZ!tvN@rum#LJF>2a?i#+%W^><2_0Nj%=<{YAE;tGk ztVLks(F(Gl0jFVUY&h0FCBUJHsN};9{l#r58v8Pp$eD&qvg7{NwKyJ z6*7k$YIGY7FOp>}1mu4pDWE+4m@Bl*Y>~f-C;pn0sZ?kkABl z>#Ai)=zdV%zY?@N6rl9!Dolw~0=Jt182K|xFydSe0V8>=*z>P+Uw0j?b-#$N&Rlok zL=s;9GM?OB)C#k^oFG(C2}#*&!Bo4ME-*63olTb5xwnMP4ZI32i^8D!{S>-vW*2Tb zmP5r=$FRP$&y&$5!t*<%JSN^5DAUp&bYnZ&d@9^v>4V)Q~)FLWfysE9=S!z5<}U0|K?Kv!Z_BvL6qE|o=-M+xp7>EO7_sx zJ>b)L8%kBb@YV_=*>MuL;hqdj7XC3Nj^PK%qyHX|!}Go)9XxG=hq9K* zxX1SjvHCCrd-G+4k7M>g*?-#LQzI&@vgbJD4kChH!A>-6cthD@HNL?&b9mz{AxzfO zA%;%HWV@F<6bu}NGc|dzUS=jg_;~|)RawZc^yASy@$cm6uQN=soe8et=DIn*Zm@YZ zxgeaB2l-{jFzUTB~=XCH)}`@?AxmxB%1*F;(vBluvhOYh2i zkt!yVq&ld>>pe3BC95MK=uj`Ix~n9#dYOsd2CDEW^%ZrDtYUAyx=1Ef^un-V1F99w zgNHZ7K}_T&{g!A>^hH~^-A5=C%1!0}i*lub|0F=ffV)2*(1nQCJy=@s40ol7g34Ae zvh!~v?@de;)Sh?<84kT%CUg$Jj{0L^dKmn3;9;R^I&gMivW;W2TJ4BtKJ*`;%C(lX zIJFQxjod&avyCii$iO2rJlJxJ3vkajAOCcZg-FXD@;oXS@;1mqk{E#M0!3&UyiXQR zRKm+Ybl`--8uCST8Y=B71?Rk3DD&n6D$KH?-o~PQ`}5{FCvyoi(wBz@WoLNq$-5wG z*F3tj>!^prL4dGE2H(dQ{7;0j7kze^0^hmw}omVuSULv~m z9ha30zjBQgEK|TQD=!oOCmdTpUM(d|Ts4LLE+`LDl8BC%xWS{&Km##63d&4#xrlL^TuC~C{X23w8` zXnc=VHT+3zN5jC&(;l<8^|9SM_s}yZpU{P~Dp9^J4cf-15bZaubms0HYM3?`eI-v2 z89N{BZ`_SvcxfEVT#-$>!`&rg5<&CLI*8%A9D!r3*|Odjq|zXo1YF%or+cU4#Lgc; zW{nlbj>#iR$+~dFHHF=Cy8;^{YVkcf!d>f$^psE;-g6!MA0-y>z+w|lTE7Kp;tkkR zxq-B3@UZPw1jJvxM%YO$g7)z%@pTfVGY4eAb6A$VpO!}l!@Q~2*>Oz5Z8@S9euw>4 zxev83uOK&X$ikHfZF;QZHVzntkUj6;)1p6daD7)PbIJbmU28H`*H(}&?qPE z!%NA3#|GJDnH{`Tr8Hv8^bq%z=7Qq>vtLY*;uCHVgOEm7zz)Px9 zDAjn3UG{;?N*H#qEgiR*HAcClk=;g3HGdGb+(0guZd zhL;DBSA#QHbstrlwDmZ>Ilr9ll*ku&^-3~D+3`@LuEDt(gHR*M0A~)AzzW|~!d@R> zsGJ-wJbWE;gr*Sgo(wCS{|LG^CDTwXmP|jTh%e97Lf~>~wyQxNL%819PtKR>Znun{ zOaDcrt*y!IfIzZEc98w4zmGUDn=rh-m1vp*Vp9}e^goR}BQar8QVf;4QbpRHTQFAy zyJ$*;IChq0FeBg8P;L1?W{c8YYCe4lYFds%v%}NC|8WeN*gcNq6-?*8Kc+Nlf)?2@ zvV~fRc@u%N6FpX9Le)EGL*^VV$8ocpY_y3)7n?iiwokyW9X|npfM zuo%QQ8RE4u#l&?mgP5pxkPx*ytlYg>=p`4+IF&}To4UAOW!G`)WB3qUN2=IL!FNX7 zZ3>C{XUo%{dzGDaOA2RymxjKb6?ma6hknr)fdg+!iOLyooRwYAngs~Zzg-OY(W034 zk8{BmA4BV<>ELwX6r3crRNUn{UAy}*bJ%kuwtbz)^7ukFZ{9R)FWo^_bUZ)<(W8*O zF;`$@wE(8Q;hZ<4lDsn~Yp7AxJh-j2oje~?&u&uSxOlZSEE_K(DA%8jK0b29Je9kX zKKV)&+ix(Pd1l0+DHfAr803|(^jf?l@Gfd{?BQlAW+MUvi*so4(F^2LUk<$0xkOe) zucwn`Pmz2BK0P^VMta@2c{xo54cGl6c`J$u{2Rkm7AVs{{-??6AVp~4xiRlgMI!9u zJZ&Kpu}))@9hvP7ar+uc!=f2r>2D}}@@NbRC_>hlVaffOWp&1OSr~tpbDA^zaLM#4 zHnpaXB#bmNdI8Ebq-GOw$kd|$IM>Hs4S%BMXF-1&bg>Qb^U2>6SJ~bzD$p!!B$it9 znYNwR=p?<*b>1psWXqxJv@x}pp0NY6Gc6B;h95A~OpoJzCs$ebV+LHAvXUfADmND$edqi z5keG%TgpRW>-2DJ=KPP*Q6c~?aj2{Jgt{;*$*}7~99hs#BffDA5P~!~KpW*`IF4)z zw@bPBmJQx_gAPBIW3Ap?V0Jy6&ZALn%u~11m|h38No|N83JPJ(O=lo~3&+8(w&YmD zGF)ao6DM$f`?4krn8-|quexJ!rbh}bI`SX1DJT=I!XQ{Avy-N*swCHzr<0%R_1J7S z1B)VU@YmOwxI3TQ5uSRFIaB&*)+x$1|NOx0R`=#zS+kdU`+O!-&3D2Y*LIMMIS0?S zrm$C=JGoBS5t^=54PmcV!>pU)5NB}-`~Mk0|7Lyq*XSfPdKBXX;SQW;uSm+Ly3^Im`>=U3emDEU7vr!}*niNxLM{t>cc<#g-fJ^Z`|7@RBc)yq*rKW~*tenH=X3 zqx9>=2(YLP=k`)Dm=SiKyib&ej6WA)(ai!3w&nIJ;dy%F|PE?d3vzZ0SZ$XAFS#mV@+#=rc4eszxWd zejGnp8V0{E#}hH%v1^h%zeIOEDg{i(+R-)=`-G)kvoGUF=m6C(wx=s)x8N){o0eP{H=zH=O&`w+HK^t<$Yuo*Yiv{*6Iy;2YP1H zN7Ao+ji|k~CbjR6)3=e=shVLZx#koL_G_|n=}v9$u&fusSLxGQx4oMtebWw2N z@>0%kk6xlMKCd%GViYw`_1n(+bM+O#6YgiKZ#X3#nRm z7fDERgbUwRQhB8pki@Z->_*2zcvvL+D(n>fE)fN0Mb^-tl0{{o*O1OJADF!|8DPfc zkRJt!(3cYy!HQpbY~t2XGH^GMDK`_)HRCQ5>-0$ScFZEKL;aZU5uboRZPwC=nt!CO zzXc}VT}kK9=5zZ~JJk1?P8(c55t|iUhU<+lt~LD4dX^=FXqOCaT=$9Ycr(bVab3CA zv*+usIH?n{*W5f-G!f>iUn52z1f-C2OY-ELN$xZ*BjIup{UbHuvZ*QV{4t(Ctson% zbo1->UQtH#@?_pL|3_ruqhRPuU4XY{PeuKow@DJ`+6{4?L=H7|Fo8SrnXd6t%*=Ju z=zZVS)HCcJar{}%Tp#QsuALkkaJdDn&6q*nC|cnCieFqu-j75_R*<*$%{+Q<0j>yk zf;-h0VXUbp46Y4eRWMF zuzHF-xbO8N)o(+YxRy?yq(_zPmDfPSx37uph7^v!qKgY}2GG8eT6Wu&J*?Z+!&E_} zm^Q66!odx@X~sWC@=J%~Y0OuKAZZzp@skB*m1(f)r336M*~=R$EQW_c2~guz2Yc)u zFvkn-uv-GOn5iNmxK3RKs)X*?6=s4lkL4i!wJqD~(kHOWOhnPOdid>k6Klft9{;jl zxJ2m*$(|pLzRAbI*82{--0Cgw<%OHHdtiX*a5G`G)GV0cL`h|$KR#Nv67R&#Aw5c^ z#Jo9yjEWK>ulb6`kKc^#^`ng1mk|=$u!nw6;kde7=D@#t1?<)1x+}@@C_Zrm#+tpL zJ|p|V=l(ZxrTi58nafISPc9*klg?7bIlq}e`CRJ1!bm8Zc?u_$jFN@>x1j5YGS2wW zz@9&84Rb17vHaI>u5)z^)~)I#ma={@RLU{4jiOM*s+X+dbNPteF#NS+4LmPeLosa~ zzUpNdU-SDgU^5XG>N?t#WI1zh9?JMIZYsu2(}obo_q0oW8RVbMVcix2@ucMwv))VIxX>gG-yVj8 zOx+nSJFd^4WFJhlz8@oVq&UWk##h>Zs+P{Ojfa%83<_o*f>p_hSjF2&#^?-?vcNyo zeD)aZ3>=FR>RVbX2tooAGrZvi<5?~!T-PL)Awf8< zdLyoJ4S=xzT+&vk!2wFw&;*GqxX1h-yGe`VHovS!owP{2dwwN#+wX>Ft}h_{3k~t= zb}{&ABBU}pDfHvtGAIx^jCSE#tb|FDV62ZS-|K-Up7t?^(_<5vB)=G(U!RElFTG^& z$y8Y8vWV6n{6OyCE5N+ZSty#b8fJ5zNxhW$G*;9TP0pv1$Um=P2h{R~jM zJQ)*HUQjaA4K^O?CkyYSvcG)eNpt87tPSrYn>4n-{4d#X;(-o-k8WL?B~RgUSv1EtTD!&u0yc-R*Yc0j&?n{e!WUOMjq3==_BfDypQB{L z>ShQKkreu{RaEKR4&rX?P5$0W;FUg6hswPp*l>Ou@~!dXtR3&pD2M zR8g@=B=fYVlY{ftp>o^`_`F*iV-1dxgwXq(hu{J^2{L%dYbH#-zMFi?s$qBRRRE2+ zrI0)h>4~XUIIcVt{pPjfTIps|tvN{K?Oe%?%zwPN^UX|Q(+Z~Npzwbkop(S^|NF)@ zM1zKwGD>8YQbIlFzMn*9h)PyQ$==x_G?kJDg-W|fWR!Z&eJ3kaM5qXveQc6Fe&_r9 z^Zb3@=e+N6U9St=UN6O|w{|j_?j}8N5hfNqp!dZeVW4FYzIHdozQ%j$k&T%!Nizm? z#x>K4*CwL2Q40K8;0_c2MMJWVHrWap;+jd3;*k8ka4+*0go320I(HCva^(dZ9e}XW41Z( zg@x5AsMal%y)vEg(A#Fv-{C_mH-uuvqWj`%_vhm5`WB&I=kpY0pP-m23xlCjcBZ@S z^#0_VBr;hFk+v6;HE@@#@7yNK2oG{)4Wv^XOE#Y^K9~)W^euxk0#hVt-r} zc3m95aUIW;*cDqxXmUyMDAepZ7JFXFWzU`>EE-|SrF07)gzXhS{hmpB)AvaEMTtV~ zl5V`n;iTY|7yhWocNdO%9gkXrH-XodQ+({(4thFnfaEJ5 z$eUCnL08?E`vsN41Y@~)`kb?9{yvWG)E*a$AI!&@_mo)Ybs71dJOc+@;~-?=afqn7 z$nl@DDd78Ix@M)qdS{1I_|h>#H;)`T(mDYf&p7kLQ@6I_(=H>?&$N~rmfok`drv|{|1BJSN}yNEPtc*b z=cN1nvADCB#BNVYgz1OVg%M5P;rG5^G27T37wGQiWYeXbc<=)q{1M6XW75RN3Cl_C zP7*#_TSM=Sx?|jZedyey6Prwx_Pe(F;Foif^SyWC`0pBGhI=y{i#-8vmgy*h_ZZ+I zH4mPmAHg}AZyhzJOTL0VD&S%$@QKnq{QW2m2d*+#+$dNN_6yDlUsF;ccFi(y*`Xx5 zGNMNLF8&@nlZx8HgoPSm+}2)|pIE&XX7svD*^8saw{d+ywP6&tzU|Il68G$Ong`B2 zIu9O>o`hG$F?dCEmFkZZRE2#cKZz$7_I4nhq9lHL)r`OOjpOt}J+AuQ1g^i*;KtCo z5+CF!T-K?g>0P!^eg|{3c%DNKOgnPaurBC7;VIi_ByiX7cG&8+AD6xv!>T)Vc-gHv zV!Z8q=&(H$R8P+$pF8`};qD^1b*Y;D_jlo(Y3p$R^1UvWxGpt zz5C*g1^al(jwd{6a5nw-;ye!9J&jJsltY8kGLGqY402pT@a)TXuw`gJ9(l)>L-j7e z>1VHC{*x2Ml>hj( zS18o$jB~qwp*horV_{|l&%ZewJGt%`GGn&0&udHZq*oF3+^Qj#>CfTfDm|K8n=91# zctFP5tvI5r7yd7|MJHtsTh^VV3j_36ZRTqlo4uG1_~dfMrum|GM7oq)Isk6Ou0gl? zalA$H3Q8Vw>@uVQgIouQKixy{a%4K)?Hw#^$n0I7-~Arvn9M-GzwLO!!Bsr?wT`@g z>2CD%>dNC6|DcPDeMH%a^-wYSkkHk97$vw$z4oVmc;dM}jy`OH+r1n(>7X?(N|9J~ z89I>Z_Lbkn=2J!CWE@)Wj63bz*m}n;%(r@uN&AQK;cM-&`F02{Z-~HY2LDO3`BALc z>;+-V*K?J`5ru!M^6>_RTjsMEYSQbv7$92OK4F>pGmL}CmK~(WW;w~20WA96kK|ZAiw&hjxoq%%~ zzQvBG{(DT0O3&D-EdVDdP{>s6j%{s%2@U}L2)Oqx00 z*q%lYE{P;0g)7#Mw?GLEDtR|NAbg55-IHp=%uoI0GyNjLv2r7wvzj2be7i(d9&WJc zm(;T^Hl_2ZjL8;xf^K@c%U-ma_+DVUre({zq7}ZX2KW!tsE9boT0vgHU@=?64#2oJK6C_?I4Kj@IoV|UQOQv0>JA?Dg0d$PS=9YlqYabvuJ(yaS~%lORA@aQ|u(V61xlfW$VPqurCV7pSt{L@;@pcR)*g8zu{8T53%3i`O>?W%R;&-(XzV> zq|C>|5cl68>{xwN$Xh&^n)f7A>v1)jbnl@erfxSF7L-%8r<9v@QjW7@rqX)vTqrx6 z2}NJN!($y&oRMnA6JNKdql>=7#oyAuBQ;ix?_?;=ggfyVwNZFJ&y6N|IMZ44!*F>| zq;R+H1zm0l7m9sVv9`HXcpx!}-oLVjwm}DJ|M1?l-Xja0y#uk=i%>=7`8V)))p^*X zHV!_>wSiwPC zX^#w0#!jZzM)R?Lr~!Umk_Jj1#m9 z63KC`J<5Il%2pXVbMY?&&Qf&eJ|&BB#}geq9(tHe<_r|SDxDC+2A7ecfy9Wt5Js0? z9EI?dZak~gPUtamh3M7iz0=FgaQJgNm}2&I;AyXaQ+8u0{E*(o;*(bF^gso)M|B_r zk1-T*}&%^M~)%4zQCLe?%iXT|Prb8q6VZM@BvUoDT((&S-=a*6E zc^6@&`i%0XjNMpL=M4v_QvBL;1r)5b2Kle97`HNlPl;#v>meg~j@vA5KJfg8SC!tqpD2**gA^b_||IX7@6IjuG0Elx^YGua*HdTeylfY3h(c{fk)1-q74={I92vZ`cHY|J`a)W(??NZ@LqTq zzMglin!|BF(#sw;ttIui8@y6wmfT6#UTEGJ&24vH(46H*;LU0s4l6U{l6Lc;`jq5p zv@|E@H!=)a^^KMWXvybHIfwr{ht9s!z(moJ4NnBZe?!$pt1H)V4OjY68x&!&x?|E%cEZmF0T!e?$7Jj%AQGiAzvy= zP-RJsGTQ1@xR&>DRJ zV^0nvxz=9_s&K~amz}XX`48lT)WNWiZyq&Z&$qdD64J5UYa(j*_&1TO0e~>%+_q z(HWq9{;u#%dp^zHI*>w@lBu-b43EwF%H`8`q5J2zRMQy2%T}~N@L^UA{jq{iP7C3% zFb6ESRZgxW?$S3WTMCJugb%AV`G8*zSk3w(6c?|jg&hm%N{|J%{%en&?hoZBy$5ru zwU#_3Z5z*?ZUVMXiRPOf1;HqoQb*jw?Z)=d5)jQE4w`b~ssi$Gcj6i$89NlsAlj`2 zD^No0tyquWr>&uGL-cs(>RfV2j3D16PgK;~;)*qzv|-p=_%-4_IK*WNdm=w7I?GO? z?dI3we4m?Qtd%oG(<4#!#}uf~k+?RJ$87N|S7^T20sb0ALR#`{u~rdDch~x{BHuuq zrR9rzt0#(Y_b+C^-!qJ?Jnmsk#WB~O8~AAXV2_40}$_(0KC+KAfZ?6?zX4CBvS7xLdcgFd{G_Pnie za!;xygAKRn^6M?!Yfm4XbL*in@}V_;8@~#Ui=~Jjt+XQ1oJUr*iA{P2Tzj@$%sLau zFP5Lg?UFC~_7RCs)c2-%@7-}UxAGMO9Eah&0|#(fS})n5e_e6n(vFZEydGxfMMzrX zWc=0F4n`eHrqQIsopZ7v?bk5ed*>(^JP7A`8}`G7epa|DPy;VpcalAva6`Pa_Yl1{ zYoitY=F`AG_K;Ni557yf!<@dBFD#HT(#H+*Ih`G@Wyl5CvYNW|CMu{MmFv7n?}~}jOong?Wo%nL1q5O zAjGy30#_d4)k|uej*rO^=X@LnMhjD=9F&freL{^I*GPQx4}k$?e+)(>u;3pt+^)HUXqnxj+zh0&h5g) zs6ad+s&l7UJ7Gw+J^n6Q%KpmcoOUr&_`5`;L&`V7XjCi*-c97dl`OK$B`9=J2&3|+ zL;Hkc7}X&1UDZ@b8XZnK;dg1Ks+4ip*NA6JJApBa4~m1T2I2sd4S3764OWkHkm~N? zT;FdR^zMC=4qlUVQiBb#Px5HM69YNv;%WZ<5AIu%jsqJ{$aT3`?qo*T?O_cK>6xT6u_C1bty$&vd$D{){%OZ0EP9}HkU-{zQ-=JH*q>HO9q7#oySih?|=XO(bderl*xJ73g?;j8VvbRb?-7Xi{jTdBZ zTnINz3#Juo_TiIV-pCr4q`K_|9@L*mjXM?id*=i`X4;p&M1}Fu`qA7+@{tY7$-~dx zgPn}Ldda7|e#GWbFP^5-EPQ;V0vjs>#QyeC{NCm|ja%T!J=@Zu$D}e`n{)&@*np=7 zWBKyNCjQ=I2rgK61PT{B!x2yW@{%KK(X;11&I`B-Ur%2I1Fbx)>sm|60}c3F2P15e z5Ks{4b`i~B-v0y4?)yxfdEQ^*Xys#O<4JtmS%3$> zR5ASLI?j~5pHa?XVsY3Lr_Kl)zPo$`4_{%!@=LWspO!3KWMD%DZb_JT=^kx5 z6pgpE+<9)pLkLKaNqio2EIEFczWj44`*1sq{#!k6wERT{Bn?qT_nBf^4H?{QeeNeDhAc@J~WRM_mL9(|8k3m+H$T%^!HS9sozZ2G++ z8{W0g6Zcqtm*9HgpgTl{Us{*Jkn6MYtcc?MvF6-u=V;ngFL|b2kKom-5~p#|HL^aU zgy+^}lm3n;c>MHp+SJFG_Zdv3bd{a5)`)Ofb@X^>>gq4L7e)v)wF8@dp3kAVwfKmN zg(E9U6|?(ofQK9HvAC=f)P9fP%=B->b$>-2`ApWiQpp;#K#{agi+^OE16N5`cQgB= zqU~=xGJNhR=5`Aed`4^Fq?F6x^mYx$<1gH9e^!hr>4r06ddO~< zujglV1Msic1!#$3==QslxZ_(aw4c$3ugeV~UZF47T>qYSc55S>US8}S@JI5&EfhAC zjG%@o#@O+?AwhUgT;mu{W0GY2w9o%^O($UDY?(>^?;lcdP!FM^bDsE8IbIxP`w*Ck?-W$k>(ek8++uoDcS#NwyAP{Bnu#Vo~)pa(#@2rO7mMU{X)kU6aVuJ&_es)?jr&yZd`_a)s zS@6xf3MM}_Cg+9=;^hgwdG5e?eyEx%On;S%>x_4y>2ZXcQZINeMITCE9YV)mQS_&w z5(77OVPEY(!q&CX98sVPQ^ct_wtq0KHj*mg#t?C9#woF#yAA%B+(d1mm5~0{MNwtl zmrps{!-J!vag47MYhKW2)vc+}xb6pSF*FiS+hpPn>9xAj8$-2!xv~cdYWEYMB=uU^^(r}qp+aB7Q1To z=RmEa=yAZAn^xA6R;$FOEPF^heUt>{Gss#OwsUK_DnCD&Pg_T&3052{_E|l=sl0%;@0@XiKW^X{cNQ;hP94Ww}LnopC>>vDO z;Y2*38jXK1bi#g4zsuwk9z)<7KRz_?AN07_9j7EZ^7twbE?J|-A8dQj$o;*c)peA3 zrX1P$$`9e)!<|%X=P8t@4#AL5zlDx%~yw!OLyV(T~eRA ztqb(Kw+=E!ErU6}H}OrD6&`k8BY1D=%t7w^#K(X1xi=xZd#*;^tWV-R{g3o=h5;6+ ztY%s}o%YPS3g0CCP}fOe>~_W*;`2#@Z+jTXn^pv&I`mYb;Zqz>wMwPYwh zSk;kkJ?M?&M`*)^_VK*c_y7)(IM#C-JK*`{Cxnw~W~iZ>%42XiT?zZjmVJu&>YlTF zF6%sfHSf-$eorZ{G?#|g-V*RK1;4G*@8BeVZ5?0$ZCx{u3a6@BzZi{+E1GN2k&iGf58EV7Vnoo-ZuiV6{b9w^3lg`Z* zKZ%ocUa0gll=a^BA8kz2!OLSOL#U~z+-^nyCaayM4H2psV)}#NnFm(L$3w8kX&4@x z;*^lDPxId?v+}I5Xn$N8l3SBl(^*cQ=MKOQwTpbmtqfI`>SN6DJTCF`!2!>G$+Gqn zJ9ghj-3)8XK99?yOV&$pkjn^ax)m>eP;95g>n}rg$w)qSDwz$ukI?DxQC!y9gr{8E zgKmYntlc$QdK+zaUnHdyIsaq0WB7~+84RjyIX>?MJYv>7-7W~87KYP zML}!F^SBN9H2-D^<(f%4zP=;r@4F82pxMjN?B9FIw{A<@S6JYbO-mI1!)jz&LvK^y zv;GLj>Pe--RMxKUjo8PZsCwBOs7!u}qt`TvgQD#DzE=RB&bxrwo7Bio>na|(yn)Tt zCh@g+z}7?8IeTU*7iO4XRO=M5u^x-<_4dPv6Y&(Y#S>$iYN&f$2=i!gfKSlnWCh*np2=Z&{j%6>{d@F(fXkUhZ# zE~;vln-0@O&5|2XudYV_G)7@|+8C)uk0tf(N}|TT!*F)tHPQU`eE1jH1$;HXQu3Br zC(f*@l&`wR8_YK`m@&rfFTwp_${_3*frTTb4 zYa#1)uBXgDGX-gf#69iy(u!y$x;UyoiV4zAyw!qqd^_OrAD(bTyO~t-dvf@x+c14t zu`pxAU|yE!$})*#s@?VkhBr>;Czku*%k*84*8VBEANoQ|zaE6hd!z88aUjXMZ$!Tf znQ&R+4ZiX8W zCOm=uUk@m*6!&FUm2hylzMiiwz5>0%!_XnBn~?JRILEJEgi9sAr`NCtqJGFzT64Gy zuKT;2uZ)VvaE%ZlVX=~w4cm%?`zN#Uh{IU1ARV6_o=kuGBD9xpg;?|B_+P+npddRvrSa$0Hr?~ZiB^x-h7I);M#@l@TvN;EM@2*&Um5>NmEq> z`aXy3Q!3$jw+>>HumsYE#G$8#1O0h&51fW}z-yuoD_h@?_NbD!t6(5yqYtl2mg<`C z$MM@YZSH^9hTp~+;EK10xY{lmN7b0(vHB=FbZrQKZ76|Bn>&!@=s20vSV`;pqC)(a zF$%kQpCOHb{!lgSm$b9BB2nu(RSdG`6ob2ps(C|r;($6ZGSH_$gW)tQcp&*@&cdTR z9k8;G2W_5M!Q1@mWm!34X!m+Hgw)kI?dvrSRIMh_lYT1YT|>^WhGi-2^$i88(4!Ws z7E=Co4ArbY2+bp3L-3^*Ui!QX_jyb3_+~GB6i|hR9U?(KwE+^>q;cieL6~u)o&2g( zPqeRa!M86Z-|yZWc0A~VY0;MDZQ0NGsBW|9RO*ahRw&_GiSHiMrX|`h9Lwvzs&j(s z1bBP*GS9E;0Iz1Qz)|BHc|>kHUw^Ac{(szIhx{dmq`2Z?n@7CyQw8^$bO-8gE4%7bl;zf zqET*Xu+F^)AE?yfn}0`(BM$ClZ=Wc7`A{ErZ$8a~mglkSd~31IM#{N*CKm?TpxJ#wIsA|2R#I*w(2KOkV)FnnGaPkju&iHA#LK)s-jv=69ra+-|d0?TgMH#$4 z{DPlzw77onVD|g_5WfwzF7Ny596EmA%dIEtdDC!D9JTm14-GFCF84OLOY+C!!M zW7G|rsxnw&o=GjcQ95ZC-4Po<4dBoo7Uh<0Vf52~E>+mIp~cWR7&`Pao4=ETO~-MR zH)8=9{~d(OCO3&=cS>yO>x;>Jv?)$k?n9eueu3q!T3UK3gu8Xiq8eEue+Vi@of|WBlsRS+m{S`Lr4WbK8O?iKf|=!p}fZC z34Dp`f6zomhVb zO|JD44t|MX)3#fn^<5KB&GHm;tbRes5fOH7wB)`&Ci6kvI9jFrTFANWN6u2tuk|)< z7=GnHi5n&e7R?NouAQcBg{RPEiYYF;dWlA!+YAM*+ZENB`(fr)iM8i{13T_A<981W zAm)-cv?_;URp)m6;M+?Y*|QUEu3U!);TLtQvBfhvu5>S{9f$M^MgN)*7`{-_V#M|+ zcgxf#V?Avc_G2D4JE}0+Pf(~<3V6amOSq`jDCYjkfZFPw92lM{x+NA0lP>(El@&p> zY(SjgeLE5~OwQr6m;11N&;>CjbU%+>y$R;WIg|3nRM;Tb!NoSIFw(CQ0!${c{*pax z5*R@DE-m0NDWMPo@Xc<#wIsID}}mxJd+zQmf{ z-k2u#c-D{q?$1O`X|{E%-Dmh(QVFg%FLQe9e{lMZIfZT>OeZUq;YDHj~f?m(F_+-cqO5bFRXMV)O zH06VW?$Vdy)~^oKsYZYm;r3$H@9n7F!yRf?RZ*GsaEN-oNo+dm!K>}UvB+*ere9kP zADcD`0sW*sgmw=eqv(qNx$MXGUHb8!4_>@h-4GYqt)SMTXttRcMxntD&?@O>6-&19 zi-*%;>z6aoZqYD|4r`OuDWAhX%ccJRXakI??!ZYKrg6vF!%(R(gNHqS%32e5@(t4y zn4($=ZWCi<_RllO2T06sZO^9X^2GGX*TnIGJINt89`3pAlQglV{7Cjy%DU^u4dJ1< z=AUHe9B6=j3$1xaei(L{+80wHk`CzEz>xLRNLfvdeQx(Bzwd%Da!V3b=lvryt$}PV zWaHesl^`$d4aJe>l-0RHaJ~10w$EJ-c~%|ien=Fg)k<~TdRJ^Kcf}@)8Zm2%Gt8+j zgW&AnV0o!8%zJ;H-mEXAAbBykZ3t5&+4=LxKi_06XWN6_m)CHpb{Y7I{=DCTq+|0{N(z}RrK%qxao zF&n74(w0G9^k3t#^;APf2c^HYY`!{O{tM1} zTOmG3e^2LjeiF?vS!^{}hC*#DruzOAY}LI{FW&`sCgk$$5w`ODQBoiOcL4Ui-Ap@O zdX}qC=~y1WWW4lGm-N<>Z${EP;CQp&AY2^)t+T^LZKo%s6BLCfzbrx33l~|wJ|3+#!(n{VGeJXlJJ4I4Kw(8_g5##tep^Q5HB^w#zZteT6tQZLQ=tZ=#WxuAv)`#kle% z=-Fcoyq}UnMIo2@#>_}YnnUMu z!m)m^=k_^TADhLYKc3Q|m3cfmSD$?z#e#L$o~%{m!0(oK=Na}&s5>eIi-!*89jb*w zTx1>fSa${wPYU2#zam=yGM2v&HZI4OgFN4N3f9+3d*0|u%)8#T{I2pdbk!dzE*@IO zR)3Od_6n2o5ynz}vczyOmHO0>Uq?7;U1xSljKJSJJjL))4r282Srih`TT#&vK&!fc zf`!}E%ikJV$VYTMOV^Y&>A2Xx+(9#+v85ifwNA66V;rmWHsJQpPJ!$7;TY6S4ZA&g zOp}k*(wN(lXQ{axmtS9lCyaW)2{ldB#Uhrub-)J4Lh?+T0M)kA+(Bm^zkbjegT&bzv7!L=&Me1W zi6YxtnDO(5?o#fnKwGx8-2CkfYTpV64} zPbnvK1FHKr$_#HDVpAz^^blP{v)JR*y=8@5r%xy0cFt;+nH4#8-yRJwF1hf(b*8M6 z@lHHgZNqCi28f#iMv{4nfif-w=2BhbNvCh zYdcXKpEjFL1neZWDYM1AC2sInyC)s6zbc0M1o7}DQ;10$CszLSok!6wSU6DpS{4M{{gh`T`V@v{#2&EZX9=ioCo8}_Q0XiLd9YoImVg1g{%%*81L;M z^#soR*x@>=XddLi^Y3YNxeI;2l7dB=E@YK+Qt@Q|IfYs8TH4tsM7Z(M4wmifMHBaU zk=dG3*m7kx-MM2zF!MTmaUa6kD(O5pAW|{1{X!lo&f$vsZXoqpR2Cr_MkN6y9;i(uP z$f09y;M&f{_*AuqVm4&K#rPZKKBfosSel6*?Yj$^7q7$L$g%jhTRfZaGBLmEmXOeU zCAY0n#?MNkVqaVm{BRh=>s&oB`h1+2ea(wHjXp$hQyb3>Y=V)(Q#5&3FYG_1A%5Ex zDRBb^D?W@@Vy&B-0LJyAS5ay7Y49u9a`^x`Pi�(Y@f_>L0XCV;4>T=`M_(*(4S# zCP3l%8^V9R3aI&IPqNF^2X*s<^flED6JOsDI>zeov<4fljc0yW_0s9l+y2l?94Yh- zIYBJ>Ds+Z5LZ`6;esVQoU)7b=+rXR8AD=J2m>x|Z!*rB|pZ8*MsTm=`C_Q1%}&$9eEP0-CmnyLKM zfIqpr#W6pF zt+;kn0ffF(<^G*?uuHWiKU~y~@2_})d$X3{hkP%r$sQo(w55v{e*eUuu3`95@=K08 zmrkdZb;zs2xXdqiJS8kN3XMizob%4O+|6O05ZKZPIk}TDLg_MO`FG*J z6T0)~h(*|K%~ks9ZvgG@h}8CJ80)V*0Yk63;VQ?|Sli8ndV5q-cVji~|65 z%cXR?gC+Sch=wCgI-u$QQSmM0p5PeQm3$XwqV4!jxKLpt7ToI#aCR!r4vbZJo!BTG z?hr|zwFh$D)3L%m(;2wJd@(wQbjC+K0H>}hhk@!3$X5DJ`rbVXQ*&~0)5eQ@=7gv4 b&MKUje>noCsq0~*vKHCg%qvrw*9-m+vO>aZ literal 0 HcmV?d00001 diff --git a/source/tests/pd/model/water/data/data_0/type.raw b/source/tests/pd/model/water/data/data_0/type.raw new file mode 100644 index 0000000000..97e8fdfcf8 --- /dev/null +++ b/source/tests/pd/model/water/data/data_0/type.raw @@ -0,0 +1,192 @@ +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 diff --git a/source/tests/pd/model/water/data/data_0/type_map.raw b/source/tests/pd/model/water/data/data_0/type_map.raw new file mode 100644 index 0000000000..e900768b1d --- /dev/null +++ b/source/tests/pd/model/water/data/data_0/type_map.raw @@ -0,0 +1,2 @@ +O +H diff --git a/source/tests/pd/model/water/data/single/set.000/box.npy b/source/tests/pd/model/water/data/single/set.000/box.npy new file mode 100644 index 0000000000000000000000000000000000000000..65897e0f9c5ec79e1a3182fc9b9bd6e00799244f GIT binary patch literal 164 zcmbR27wQ`j$;eQ~P_3SlTAW;@Zl$1ZlV+l>qoAIaUsO_*m=~X4l#&V(cT3DEP6dh= gXCxM+0{I$-ItrGWItsN4WCN~HS$9VUB!G<%0BRf_UjP6A literal 0 HcmV?d00001 diff --git a/source/tests/pd/model/water/data/single/set.000/coord.npy b/source/tests/pd/model/water/data/single/set.000/coord.npy new file mode 100644 index 0000000000000000000000000000000000000000..6e0594a8030bf6c9d7e3e2a4d1c7b9e5b421af1f GIT binary patch literal 2432 zcmbW0`yKSLG75FhocSMPePwHe zDG}%MkyhDAaw(@}QcH=XEYf6s|Ap`Kx98*6$Mbo}ea$LQ-wAR%4G4^>keQ2TcsCu|QUiNSf~zKtfgx(c`|H{sc96>7buPIJ#z zpzPcOmZ79gU#n@*lNTcNu2Do^iw>1Pe+Jzh3%HgJUl=YGk-6nWy0|J7)|wyKPT3Z| zQ`RT-TXL2ecU;AbQVn5sWgRjX+tc^Fn2BVzi0p8pyPG<3Niu<=6+>Ky7ZQzqQs)=u zZi4HGgx#t<%3kEHr4_xq(74Q)I29!dOX*-zEgPhm_DLWAZAm&#xmepcgVd%iCm+Rg z5V$$e#%reZXsZO;GNPKOV#x2>1Frvfj9z!c&8qo?&Hb=azljoA1*AIlxGVn#G1=9e z?m|u4q+-kO|LQW+xpM<^gY6MtFprcZj@0{YHJ0vHqa?L6NG@&06g?w49BD>&i?pTt zEdFGtxK><`nMtL?JzUa&FNHl6z})8wekpy)epN6g+t>`ePTdEu(Ow=_Dfm&UMwyRC zuwSOn2gE%Af60qqRel|pJ>5`g?MB%jJV^h@EZ%uu6W%|aO;eXzayd+iK1Ghf?xPQX z^;a>i(UYf{!&Nx&bTcyx)f7fc0`Pry3tSS$kP#^lnOz{+`bE>Re-xZ~a9=Qk!UKzNKeY-EOMFOFG7P&G1u8BXL&2oo z^mhA6WVKpR_RUb(8jiAD-vmg-%P2f+8yvaYMkd#c<)SFE9zmVEVSvBVF)?2I?+(0 zJAOM`2G1+1MNYq+zZXbCRE+r!;PNVLTwe*s9q3BH*BhK z?tVWKY9>-@;&k$rt4H>7UHEhfsATC>!mjP~`cV!m66@2Bh)x6s26O2h0<=9OGHa5f z2Y>&F`KHg=&S6z@X}rKC9Gi;B?PX{fua~moix@mUk5so!WFt4jNY~entdBgzhQ~AL zQ`IESKYlSy4jkalxjAELatG_r$Yjcgy(nedQRvBAQQK-|5_Zf+;-A{s_n!i;^cOR7 z&^eBjB00Kv)r~9-%h020MkcNr{CIK>4lVH@xA#9|!Ru5+saIg7TOxEWnb8cJHk^(B z9oA0y2PHGHvA9{0T$Q1^HNjIQj3 zOLr@8q>ziD=4rI?uo4+wdBh#mZ-VQT2)?KD3KT?pU{>Zy-@G9DBvIxCjdjqf)+K+* z1fge%3LV=#hJXqUUanhA(|hIVK)w{O5@*+N8$L($~brivU{;y-q1lLr3CR4Oj3jo{H*EVCOJ5AYw7YIdtiD$fgHCep{u!B7=JB8 zn(?fV(;t3{mzh6sE?XO+qUk{S>Kz!pYfqjDdrA54hC-Jd&8p9#$^jk{ z-LJ_$>Iw8OyhV5H2fWMQOOu?7k@A-|d4R)1hdpEo?!EN*ywEr_r*-vnU|%2+aF7u!4L6&5}*0RqBS=eR4C^%~GMD z3!gWX?aO^}WH$!C^Psd-sKPFg8rCA;8B|wnL}TJTIKm0Ze7qV0Z4q>e z?_mBDXJpN>oA#)8(XDn0Vdg1YXe4DJmZ7S_UdaEI=llEXRa1QB;QfOgaDhUmW z`H3|Sa6hj_LVpR2x@$4hID`hCPo{S92c&o_khgIPdD)~vwRRq@ZcT)Njy>K?-iyDc uuOO#;rYP*6E9Cv|<7JitZD`h|thghHADP2$ukfd>(&eo3oe~yBzs0}CO%F=| literal 0 HcmV?d00001 diff --git a/source/tests/pd/model/water/data/single/set.000/energy.npy b/source/tests/pd/model/water/data/single/set.000/energy.npy new file mode 100644 index 0000000000000000000000000000000000000000..a0a88fb78ae09feb17e41593d6d8f60084479320 GIT binary patch literal 132 zcmbR27wQ`j$;eQ~P_3SlTAW;@Zl$1ZlV+l>qoAIaUsO_*m=~X4l#&V(cT3DEP6dh= aXCxM+0{I$-I+{8PwF(pfu7z)39s>Z!l^&G< literal 0 HcmV?d00001 diff --git a/source/tests/pd/model/water/data/single/set.000/force.npy b/source/tests/pd/model/water/data/single/set.000/force.npy new file mode 100644 index 0000000000000000000000000000000000000000..d5b847a86e3a5eea0476a8cd93210965b3cb44b9 GIT binary patch literal 2432 zcmbWr`6HEi8-Q_J$WjbRqE1PNk|>It=e`v&Dq&icNE~IdG$aivOIeaFh7`&cO_sC_ za-Q#fM50Nwn50rusD@FQrlv*By#K;`{r0)CRM0X|tRlS1yxo z-V9@3F3L*O+z?Z39`n}XeQc;dGwD{f@e=<;_QT{AA zN4n7_Baz+5bwN0Cu!>%k#IeG}zhPPJ5bS-aMKLWYSabL!##vd~owXg|?OVLjsM~|} zXRc+D3**sMuZ{eRJE*Ah1Oi@Ni~dW z^JA4;FVGE>a@^#;4TF0H(i~qM(iXhKcjisJ(^1!B7=dn(9yxob*sW{Cwh!!Xg;dj@Qs41=vT6D{C!efq2^*6=Int2!< z{*AQv(g)bpQ%%*YWI(v-0e7H83KbFs(#<=>MxD~w@^emX$3MehRwl$=K`*>%i(_8- z?Ibg!j$0w#fP3D@u#k)qR<+)rRq3QKeQ6Co9BSw4-YQGG_gPBWOItg&fajb!*Je9> zFNobsE5$T)WTsng!s35U0<*q`>g~?B7`n|j9RverqoHU9^wdGeT446j4`&P?Q{ zWRquLH7V`?z|9|?3Q;{Fe5OS>n-y`HY?aS*&Fj@j{-83sL^q3`?^gzW<%O_jrve&0 zJx!MiqR@rg1>)2y9PFGZ?U-XOZIMf&q@ur|eVz{uZ~_b-8slDtIbr>cQn+--i!ax7 zpzv#xQFqK7Cstl$b9OYsohEORQz#;v4XyAjhKF^2*ZJX(GHC3g%mUK7eBISLR@& zhVNotfE;tCEfGg4?|n0Ca99W>nh{j4Q$&yZf|$bcO7c`V#H_6DH{>MSaAvhHDP3g+ zx}C9vThU=q-j+<3Dgsi?f6KgAb;BfkTghJMpU53+sbp>mJwIeiXQZFuk&_w-)*CW8 zhf=0eG=aPWvmv)&7M&PAK=g42+2~KFl#W9qFi}F3-!lf6pprB=PN0rc^R{kiYVy0-D?0 zfIdkdtQt0?nvH++%a1<6h(uMp%+?^Tf3A>rE52cO4z|Lb_$WHR`2vesQ^7pfYQllc zCK{>lW%4Hqkkfw7cdaB4tK5Y_%{+GLQx5IVXk>*oIn3tjS2lL@Fh**ng3vUX*ECn* zj2=yc>kg&_R$2UFgC3M=TZxfM+88Yr(d&*r5Z87t4Dg#;h% z&tmVaJlTfiCFB>-53R@6v+_wZM6u1`&^j-SSuU7&c`Ela(|Gw6H1BSOH#)I2Yr`4d zG@^qQ`(&_n(cPkvv?}&u!hC#ZuR%J`uhZz=6ww;{Jj^c^ql#fa>!0rk16s-SYflgUlZpE8& zwp^@02FlC0Fg1=8FCaXt{=`(TnxgYuT`I4)WMvj{@a&Q-o47L=Zf{nFKUUm=WY2i| zB8tcEor)9|Vu|nd5-~R?8%|l>=cl*trWn6su7gveBcmE<^T8Vb{H2s`#ASfm>@H?z zUBYdgW6dq{TY|Z^^D*+}1AMsIf|Mo3*fuI-_t8cIxl`U#&`<~7bTh|FN;E4ujY zp2oDN*AY!zGPsAYs_|=rE_u5KQs~Nagt{RRKF=Q8|8k+pPsgENX)J4a9l{2m_Os5E zwN%zoNR=0+;IniUS~{)_HhPRBk8c`bY?}e^`|Kfat-2JX&F7$Pw3ZJX>0q_9)mh4( zqa;eW&DCx0z#0=-iKO^Wg8pyLv2%f2c=bnxlHxmc%)4U=`xlJPcc%aV literal 0 HcmV?d00001 diff --git a/source/tests/pd/model/water/data/single/type.raw b/source/tests/pd/model/water/data/single/type.raw new file mode 100644 index 0000000000..97e8fdfcf8 --- /dev/null +++ b/source/tests/pd/model/water/data/single/type.raw @@ -0,0 +1,192 @@ +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 diff --git a/source/tests/pd/model/water/data/single/type_map.raw b/source/tests/pd/model/water/data/single/type_map.raw new file mode 100644 index 0000000000..e900768b1d --- /dev/null +++ b/source/tests/pd/model/water/data/single/type_map.raw @@ -0,0 +1,2 @@ +O +H diff --git a/source/tests/pd/model/water/multitask.json b/source/tests/pd/model/water/multitask.json new file mode 100644 index 0000000000..83524a8b77 --- /dev/null +++ b/source/tests/pd/model/water/multitask.json @@ -0,0 +1,140 @@ +{ + "model": { + "shared_dict": { + "my_type_map": [ + "O", + "H", + "B" + ], + "my_descriptor": { + "type": "se_e2_a", + "sel": [ + 46, + 92 + ], + "rcut_smth": 0.50, + "rcut": 6.00, + "neuron": [ + 25, + 50, + 100 + ], + "resnet_dt": false, + "axis_neuron": 16, + "seed": 1, + "_comment": " that's all" + }, + "_comment": "that's all" + }, + "model_dict": { + "model_1": { + "type_map": "my_type_map", + "descriptor": "my_descriptor", + "fitting_net": { + "neuron": [ + 240, + 240, + 240 + ], + "resnet_dt": true, + "seed": 1, + "_comment": " that's all" + }, + "data_stat_nbatch": 1 + }, + "model_2": { + "type_map": "my_type_map", + "descriptor": "my_descriptor", + "fitting_net": { + "neuron": [ + 240, + 240, + 240 + ], + "resnet_dt": true, + "seed": 1, + "_comment": " that's all" + }, + "data_stat_nbatch": 1 + } + } + }, + "learning_rate": { + "type": "exp", + "decay_steps": 5000, + "start_lr": 0.0002, + "decay_rate": 0.98, + "stop_lr": 3.51e-08, + "_comment": "that's all" + }, + "loss_dict": { + "model_1": { + "type": "ener", + "start_pref_e": 0.02, + "limit_pref_e": 1, + "start_pref_f": 1000, + "limit_pref_f": 1, + "start_pref_v": 0, + "limit_pref_v": 0 + }, + "model_2": { + "type": "ener", + "start_pref_e": 0.02, + "limit_pref_e": 1, + "start_pref_f": 1000, + "limit_pref_f": 1, + "start_pref_v": 0, + "limit_pref_v": 0 + } + }, + "training": { + "model_prob": { + "model_1": 0.5, + "model_2": 0.5 + }, + "data_dict": { + "model_1": { + "stat_file": "./stat_files/model_1.hdf5", + "training_data": { + "systems": [ + "pd/water/data/data_0" + ], + "batch_size": 1, + "_comment": "that's all" + }, + "validation_data": { + "systems": [ + "pd/water/data/data_0" + ], + "batch_size": 1, + "_comment": "that's all" + } + }, + "model_2": { + "stat_file": "./stat_files/model_2.hdf5", + "training_data": { + "systems": [ + "pd/water/data/data_0" + ], + "batch_size": 1, + "_comment": "that's all" + }, + "validation_data": { + "systems": [ + "pd/water/data/data_0" + ], + "batch_size": 1, + "_comment": "that's all" + } + } + }, + "numb_steps": 100000, + "warmup_steps": 0, + "gradient_max_norm": 5.0, + "seed": 10, + "disp_file": "lcurve.out", + "disp_freq": 100, + "save_freq": 100, + "_comment": "that's all" + } +} diff --git a/source/tests/pd/model/water/se_atten.json b/source/tests/pd/model/water/se_atten.json new file mode 100644 index 0000000000..70abf6759c --- /dev/null +++ b/source/tests/pd/model/water/se_atten.json @@ -0,0 +1,83 @@ +{ + "_comment": "that's all", + "model": { + "type_map": [ + "O", + "H" + ], + "descriptor": { + "type": "se_atten", + "sel": 40, + "rcut_smth": 0.5, + "rcut": 4.0, + "neuron": [ + 25, + 50, + 100 + ], + "axis_neuron": 16, + "type_one_side": true, + "attn": 64, + "attn_layer": 2, + "attn_dotr": true, + "attn_mask": false, + "activation_function": "tanh", + "scaling_factor": 1.0, + "normalize": false, + "temperature": 1.0, + "seed": 1 + }, + "fitting_net": { + "neuron": [ + 240, + 240, + 240 + ], + "resnet_dt": true, + "seed": 1, + "_comment": " that's all" + }, + "_comment": " that's all" + }, + "learning_rate": { + "type": "exp", + "decay_steps": 5000, + "start_lr": 0.001, + "stop_lr": 3.51e-08, + "_comment": "that's all" + }, + "loss": { + "type": "ener", + "start_pref_e": 0.02, + "limit_pref_e": 1, + "start_pref_f": 1000, + "limit_pref_f": 1, + "start_pref_v": 0, + "limit_pref_v": 0, + "_comment": " that's all" + }, + "training": { + "training_data": { + "systems": [ + "pd/water/data/data_0" + ], + "batch_size": 1, + "_comment": "that's all" + }, + "validation_data": { + "systems": [ + "pd/water/data/data_0" + ], + "batch_size": 1, + "numb_btch": 1, + "_comment": "that's all" + }, + "numb_steps": 1000000, + "seed": 10, + "disp_file": "lcurve.out", + "disp_freq": 100, + "save_freq": 1000, + "save_ckpt": "model", + "_comment": "that's all" + } +} diff --git a/source/tests/pd/model/water/se_e2_a.json b/source/tests/pd/model/water/se_e2_a.json new file mode 100644 index 0000000000..96f51ba5aa --- /dev/null +++ b/source/tests/pd/model/water/se_e2_a.json @@ -0,0 +1,77 @@ +{ + "model": { + "type_map": [ + "O", + "H" + ], + "descriptor": { + "type": "se_e2_a", + "sel": [ + 46, + 92 + ], + "rcut_smth": 0.50, + "rcut": 6.00, + "neuron": [ + 25, + 50, + 100 + ], + "resnet_dt": false, + "axis_neuron": 16, + "seed": 1, + "_comment": " that's all" + }, + "fitting_net": { + "neuron": [ + 240, + 240, + 240 + ], + "resnet_dt": true, + "seed": 1, + "_comment": " that's all" + }, + "data_stat_nbatch": 20, + "_comment": " that's all" + }, + "learning_rate": { + "type": "exp", + "decay_steps": 5000, + "start_lr": 0.001, + "stop_lr": 3.51e-8, + "_comment": "that's all" + }, + "loss": { + "type": "ener", + "start_pref_e": 0.02, + "limit_pref_e": 1, + "start_pref_f": 1000, + "limit_pref_f": 1, + "_comment": " that's all" + }, + "training": { + "training_data": { + "systems": [ + "pd/water/data/data_0" + ], + "batch_size": 1, + "_comment": "that's all" + }, + "validation_data": { + "systems": [ + "pd/water/data/data_0" + ], + "batch_size": 1, + "numb_btch": 3, + "_comment": "that's all" + }, + "numb_steps": 100000, + "seed": 10, + "disp_file": "lcurve.out", + "disp_freq": 100, + "save_freq": 10000, + "_comment": "that's all" + }, + "_comment": "that's all" +} diff --git a/source/tests/pd/test_auto_batch_size.py b/source/tests/pd/test_auto_batch_size.py new file mode 100644 index 0000000000..966333f47c --- /dev/null +++ b/source/tests/pd/test_auto_batch_size.py @@ -0,0 +1,38 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import unittest + +import numpy as np +import paddle + +from deepmd.pd.utils.auto_batch_size import ( + AutoBatchSize, +) + + +class TestAutoBatchSize(unittest.TestCase): + def test_execute_all(self): + dd0 = paddle.zeros((10000, 2, 1, 3, 4)) + dd1 = paddle.ones((10000, 2, 1, 3, 4)) + auto_batch_size = AutoBatchSize(256, 2.0) + + def func(dd1): + return paddle.zeros_like(dd1), paddle.ones_like(dd1) + + dd2 = auto_batch_size.execute_all(func, 10000, 2, dd1) + np.testing.assert_equal(dd0.numpy(), dd2[0].numpy()) + np.testing.assert_equal(dd1.numpy(), dd2[1].numpy()) + + def test_execute_all_dict(self): + dd0 = paddle.zeros((10000, 2, 1, 3, 4)) + dd1 = paddle.ones((10000, 2, 1, 3, 4)) + auto_batch_size = AutoBatchSize(256, 2.0) + + def func(dd1): + return { + "foo": paddle.zeros_like(dd1), + "bar": paddle.ones_like(dd1), + } + + dd2 = auto_batch_size.execute_all(func, 10000, 2, dd1) + np.testing.assert_equal(dd0.numpy(), dd2["foo"].numpy()) + np.testing.assert_equal(dd1.numpy(), dd2["bar"].numpy()) diff --git a/source/tests/pd/test_change_bias.py b/source/tests/pd/test_change_bias.py new file mode 100644 index 0000000000..2d87b739ff --- /dev/null +++ b/source/tests/pd/test_change_bias.py @@ -0,0 +1,150 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import json +import os +import shutil +import tempfile +import unittest +from copy import ( + deepcopy, +) +from pathlib import ( + Path, +) + +import numpy as np +import paddle + +from deepmd.pd.entrypoints.main import ( + get_trainer, +) +from deepmd.pd.train.training import ( + get_model_for_wrapper, + model_change_out_bias, +) +from deepmd.pd.train.wrapper import ( + ModelWrapper, +) +from deepmd.pd.utils.dataloader import ( + DpLoaderSet, +) +from deepmd.pd.utils.stat import ( + make_stat_input, +) +from deepmd.pd.utils.utils import ( + to_paddle_tensor, +) + +from .common import ( + run_dp, +) +from .model.test_permutation import ( + model_se_e2_a, +) +from .test_finetune import ( + energy_data_requirement, +) + +current_path = os.getcwd() + + +class TestChangeBias(unittest.TestCase): + def setUp(self): + input_json = str(Path(__file__).parent / "water/se_atten.json") + with open(input_json) as f: + self.config = json.load(f) + model_name = "change-bias-model.ckpt" + self.data_file = [str(Path(__file__).parent / "water/data/single")] + self.config["training"]["training_data"]["systems"] = self.data_file + self.config["training"]["validation_data"]["systems"] = self.data_file + self.config["model"] = deepcopy(model_se_e2_a) + self.config["training"]["numb_steps"] = 1 + self.config["training"]["save_freq"] = 1 + self.config["training"]["save_ckpt"] = model_name + self.trainer = get_trainer(deepcopy(self.config)) + self.trainer.run() + self.state_dict_trained = self.trainer.wrapper.model.state_dict() + data = DpLoaderSet( + self.data_file, + batch_size=1, + type_map=self.config["model"]["type_map"], + ) + data.add_data_requirement(energy_data_requirement) + self.sampled = make_stat_input( + data.systems, + data.dataloaders, + nbatches=1, + ) + self.model_path = Path(current_path) / (model_name + ".pd") + self.model_path_data_bias = Path(current_path) / ( + model_name + "data_bias" + ".pd" + ) + self.model_path_data_file_bias = Path(current_path) / ( + model_name + "data_file_bias" + ".pd" + ) + self.model_path_user_bias = Path(current_path) / ( + model_name + "user_bias" + ".pd" + ) + + def test_change_bias_with_data(self): + run_dp( + f"dp --pd change-bias {self.model_path!s} -s {self.data_file[0]} -o {self.model_path_data_bias!s}" + ) + state_dict = paddle.load(str(self.model_path_data_bias)) + model_params = state_dict["model"]["_extra_state"]["model_params"] + model_for_wrapper = get_model_for_wrapper(model_params) + wrapper = ModelWrapper(model_for_wrapper) + wrapper.set_state_dict(state_dict["model"]) + updated_bias = wrapper.model["Default"].get_out_bias() + expected_model = model_change_out_bias( + self.trainer.wrapper.model["Default"], + self.sampled, + _bias_adjust_mode="change-by-statistic", + ) + expected_bias = expected_model.get_out_bias() + np.testing.assert_allclose(updated_bias.numpy(), expected_bias.numpy()) + + def test_change_bias_with_data_sys_file(self): + tmp_file = tempfile.NamedTemporaryFile(delete=False, suffix=".txt") + with open(tmp_file.name, "w") as f: + f.writelines([sys + "\n" for sys in self.data_file]) + run_dp( + f"dp --pd change-bias {self.model_path!s} -f {tmp_file.name} -o {self.model_path_data_file_bias!s}" + ) + state_dict = paddle.load(str(self.model_path_data_file_bias)) + model_params = state_dict["model"]["_extra_state"]["model_params"] + model_for_wrapper = get_model_for_wrapper(model_params) + wrapper = ModelWrapper(model_for_wrapper) + wrapper.set_state_dict(state_dict["model"]) + updated_bias = wrapper.model["Default"].get_out_bias() + expected_model = model_change_out_bias( + self.trainer.wrapper.model["Default"], + self.sampled, + _bias_adjust_mode="change-by-statistic", + ) + expected_bias = expected_model.get_out_bias() + np.testing.assert_allclose(updated_bias.numpy(), expected_bias.numpy()) + + def test_change_bias_with_user_defined(self): + user_bias = [0.1, 3.2, -0.5] + run_dp( + f"dp --pd change-bias {self.model_path!s} -b {' '.join([str(_) for _ in user_bias])} -o {self.model_path_user_bias!s}" + ) + state_dict = paddle.load(str(self.model_path_user_bias)) + model_params = state_dict["model"]["_extra_state"]["model_params"] + model_for_wrapper = get_model_for_wrapper(model_params) + wrapper = ModelWrapper(model_for_wrapper) + wrapper.set_state_dict(state_dict["model"]) + updated_bias = wrapper.model["Default"].get_out_bias() + expected_bias = to_paddle_tensor(np.array(user_bias)).reshape( + updated_bias.shape + ) + np.testing.assert_allclose(updated_bias.numpy(), expected_bias.numpy()) + + def tearDown(self): + for f in os.listdir("."): + if f.startswith("change-bias-model") and f.endswith(".pd"): + os.remove(f) + if f in ["lcurve.out"]: + os.remove(f) + if f in ["stat_files"]: + shutil.rmtree(f) diff --git a/source/tests/pd/test_decomp.py b/source/tests/pd/test_decomp.py new file mode 100644 index 0000000000..d8439ad994 --- /dev/null +++ b/source/tests/pd/test_decomp.py @@ -0,0 +1,131 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import unittest + +import numpy as np +import paddle + +from deepmd.pd.utils import ( + decomp, +) + +from ..seed import ( + GLOBAL_SEED, +) + + +class TestDecomp(unittest.TestCase): + def setUp(self): + paddle.seed(GLOBAL_SEED) + + def test_softmax_decomp(self): + raw_api = paddle.nn.functional.softmax + decomp_api = decomp.softmax + + raw_input = paddle.randn([100, 100], "float32") + raw_output = raw_api(raw_input) + decomp_output = decomp_api(raw_input) + + np.testing.assert_allclose( + raw_output.numpy(), + decomp_output.numpy(), + 1e-6, + 1e-8, + ) + + def test_norm_decomp(self): + raw_api = paddle.linalg.norm + decomp_api = decomp.norm + + raw_input = paddle.randn([100, 100], "float32") + raw_output = raw_api(raw_input, p=2, axis=-1) + decomp_output = decomp_api(raw_input, p=2, axis=-1) + + np.testing.assert_allclose( + raw_output.numpy(), + decomp_output.numpy(), + 1e-5, + 1e-8, + ) + + def test_take_along_axis_decomp(self): + raw_api = paddle.take_along_axis + decomp_api = decomp.take_along_axis + + raw_input = paddle.randn([100, 100], "float32") + raw_indices = paddle.randint(0, 100, [100, 2]) + raw_output = raw_api(raw_input, raw_indices, axis=-1) + decomp_output = decomp_api(raw_input, raw_indices, axis=-1) + + np.testing.assert_equal( + raw_output.numpy(), + decomp_output.numpy(), + ) + + def test_scatter_reduce_decomp(self): + raw_api = paddle.put_along_axis + decomp_api = decomp.scatter_reduce + raw_input = paddle.randn([100, 100], "float32") + axis = 0 + raw_index = paddle.randint(0, 100, [100, 100], "int64") + raw_values = paddle.randn([100, 100], "float32") + raw_output = raw_api(raw_input, raw_index, raw_values, axis=axis, reduce="add") + decomp_output = decomp_api( + raw_input, axis, raw_index, src=raw_values, reduce="sum" + ) + + np.testing.assert_allclose( + raw_output.numpy(), + decomp_output.numpy(), + 2e-5, + 1e-7, + ) + + def test_sec(self): + shape = [10, 3] + length = shape[0] + size = 3 + + split_sections = decomp.sec(length, size) + assert split_sections == [3, 3, 3, 1] + + def test_masked_add_(self): + decomp_api = decomp.masked_add_ + + raw_input = paddle.randn([10, 10], "float32") + raw_mask = paddle.randint(0, 2, [10, 10]).astype("bool") + add_values = paddle.randn([10, 10], "float32") + raw_output = raw_input.clone() + + for i in range(raw_input.shape[0]): + for j in range(raw_input.shape[1]): + if raw_mask[i][j]: + raw_output[i][j] += add_values[i][j] + + decomp_output = decomp_api(raw_input, raw_mask, add_values[raw_mask]) + + np.testing.assert_equal( + raw_output.numpy(), + decomp_output.numpy(), # inplace + ) + + np.testing.assert_equal( + raw_output.numpy(), + raw_input.numpy(), # inplace + ) + + def test_normalize_decomp(self): + raw_api = paddle.nn.functional.normalize + decomp_api = decomp.normalize_decomp + + raw_input = paddle.randn([100, 100], "float32") + axis = -1 + + raw_output = raw_api(raw_input, p=2, axis=axis) + decomp_output = decomp_api(raw_input, p=2, axis=axis) + + np.testing.assert_allclose( + raw_output.numpy(), + decomp_output.numpy(), # inplace + 1e-5, + 1e-8, + ) diff --git a/source/tests/pd/test_dp_show.py b/source/tests/pd/test_dp_show.py new file mode 100644 index 0000000000..c1c20ff3a1 --- /dev/null +++ b/source/tests/pd/test_dp_show.py @@ -0,0 +1,219 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import io +import json +import os +import shutil +import unittest +from contextlib import ( + redirect_stderr, +) +from copy import ( + deepcopy, +) +from pathlib import ( + Path, +) + +from deepmd.pd.entrypoints.main import ( + get_trainer, +) +from deepmd.pd.utils.multi_task import ( + preprocess_shared_params, +) + +from .common import ( + run_dp, +) +from .model.test_permutation import ( + model_se_e2_a, +) + + +class TestSingleTaskModel(unittest.TestCase): + def setUp(self): + input_json = str(Path(__file__).parent / "water/se_atten.json") + with open(input_json) as f: + self.config = json.load(f) + os.environ["FLAGS_prim_enable_dynamic"] = "1" + os.environ["FLAGS_enable_pir_api"] = "1" + self.config["training"]["numb_steps"] = 1 + self.config["training"]["save_freq"] = 1 + data_file = [str(Path(__file__).parent / "water/data/single")] + self.config["training"]["training_data"]["systems"] = data_file + self.config["training"]["validation_data"]["systems"] = data_file + self.config["model"] = deepcopy(model_se_e2_a) + self.config["model"]["type_map"] = ["O", "H", "Au"] + trainer = get_trainer(deepcopy(self.config)) + trainer.run() + run_dp("dp --pd freeze") + + def test_checkpoint(self): + INPUT = "model.pd" + ATTRIBUTES = "type-map descriptor fitting-net" + with redirect_stderr(io.StringIO()) as f: + run_dp(f"dp --pd show {INPUT} {ATTRIBUTES}") + results = f.getvalue().split("\n")[:-1] + assert "This is a singletask model" in results[-4] + assert "The type_map is ['O', 'H', 'Au']" in results[-3] + assert ( + "{'type': 'se_e2_a'" and "'sel': [46, 92, 4]" and "'rcut': 4.0" + ) in results[-2] + assert ( + "The fitting_net parameter is {'neuron': [24, 24, 24], 'resnet_dt': True, 'seed': 1}" + in results[-1] + ) + + @unittest.skip( + "Paddle do not support dp --pd show frozen models(.json and .pdiparams file), " + "will be supported in the future." + ) + def test_frozen_model(self): + INPUT = "frozen_model.json" + ATTRIBUTES = "type-map descriptor fitting-net" + with redirect_stderr(io.StringIO()) as f: + run_dp(f"dp --pd show {INPUT} {ATTRIBUTES}") + results = f.getvalue().split("\n")[:-1] + assert "This is a singletask model" in results[-4] + assert "The type_map is ['O', 'H', 'Au']" in results[-3] + assert ( + "{'type': 'se_e2_a'" and "'sel': [46, 92, 4]" and "'rcut': 4.0" + ) in results[-2] + assert ( + "The fitting_net parameter is {'neuron': [24, 24, 24], 'resnet_dt': True, 'seed': 1}" + in results[-1] + ) + + def test_checkpoint_error(self): + INPUT = "model.pd" + ATTRIBUTES = "model-branch type-map descriptor fitting-net" + with self.assertRaisesRegex( + RuntimeError, "The 'model-branch' option requires a multitask model" + ): + run_dp(f"dp --pd show {INPUT} {ATTRIBUTES}") + + def tearDown(self): + for f in os.listdir("."): + if f.startswith("model") and f.endswith("pd"): + os.remove(f) + if f in ["lcurve.out", "frozen_model.pd", "output.txt", "checkpoint"]: + os.remove(f) + if f in ["stat_files"]: + shutil.rmtree(f) + + +class TestMultiTaskModel(unittest.TestCase): + def setUp(self): + input_json = str(Path(__file__).parent / "water/multitask.json") + with open(input_json) as f: + self.config = json.load(f) + self.config["model"]["shared_dict"]["my_descriptor"] = model_se_e2_a[ + "descriptor" + ] + data_file = [str(Path(__file__).parent / "water/data/data_0")] + self.stat_files = "se_e2_a" + os.makedirs(self.stat_files, exist_ok=True) + self.config["training"]["data_dict"]["model_1"]["training_data"]["systems"] = ( + data_file + ) + self.config["training"]["data_dict"]["model_1"]["validation_data"][ + "systems" + ] = data_file + self.config["training"]["data_dict"]["model_1"]["stat_file"] = ( + f"{self.stat_files}/model_1" + ) + self.config["training"]["data_dict"]["model_2"]["training_data"]["systems"] = ( + data_file + ) + self.config["training"]["data_dict"]["model_2"]["validation_data"][ + "systems" + ] = data_file + self.config["training"]["data_dict"]["model_2"]["stat_file"] = ( + f"{self.stat_files}/model_2" + ) + self.config["model"]["model_dict"]["model_1"]["fitting_net"] = { + "neuron": [1, 2, 3], + "seed": 678, + } + self.config["model"]["model_dict"]["model_2"]["fitting_net"] = { + "neuron": [9, 8, 7], + "seed": 1111, + } + self.config["training"]["numb_steps"] = 1 + self.config["training"]["save_freq"] = 1 + self.origin_config = deepcopy(self.config) + self.config["model"], self.shared_links = preprocess_shared_params( + self.config["model"] + ) + trainer = get_trainer(deepcopy(self.config), shared_links=self.shared_links) + trainer.run() + run_dp("dp --pd freeze --head model_1") + + def test_checkpoint(self): + INPUT = "model.ckpt.pd" + ATTRIBUTES = "model-branch type-map descriptor fitting-net" + with redirect_stderr(io.StringIO()) as f: + run_dp(f"dp --pd show {INPUT} {ATTRIBUTES}") + results = f.getvalue().split("\n")[:-1] + assert "This is a multitask model" in results[-8] + assert ( + "Available model branches are ['model_1', 'model_2', 'RANDOM'], " + "where 'RANDOM' means using a randomly initialized fitting net." + in results[-7] + ) + assert "The type_map of branch model_1 is ['O', 'H', 'B']" in results[-6] + assert "The type_map of branch model_2 is ['O', 'H', 'B']" in results[-5] + assert ( + "model_1" + and "'type': 'se_e2_a'" + and "'sel': [46, 92, 4]" + and "'rcut_smth': 0.5" + ) in results[-4] + assert ( + "model_2" + and "'type': 'se_e2_a'" + and "'sel': [46, 92, 4]" + and "'rcut_smth': 0.5" + ) in results[-3] + assert ( + "The fitting_net parameter of branch model_1 is {'neuron': [1, 2, 3], 'seed': 678}" + in results[-2] + ) + assert ( + "The fitting_net parameter of branch model_2 is {'neuron': [9, 8, 7], 'seed': 1111}" + in results[-1] + ) + + @unittest.skip( + "Paddle do not support dp --pd show frozen models(.json and .pdiparams file), " + "will be supported in the future." + ) + def test_frozen_model(self): + INPUT = "frozen_model.json" + ATTRIBUTES = "type-map descriptor fitting-net" + with redirect_stderr(io.StringIO()) as f: + run_dp(f"dp --pd show {INPUT} {ATTRIBUTES}") + results = f.getvalue().split("\n")[:-1] + assert "This is a singletask model" in results[-4] + assert "The type_map is ['O', 'H', 'B']" in results[-3] + assert ( + "'type': 'se_e2_a'" and "'sel': [46, 92, 4]" and "'rcut_smth': 0.5" + ) in results[-2] + assert ( + "The fitting_net parameter is {'neuron': [1, 2, 3], 'seed': 678}" + in results[-1] + ) + + def tearDown(self): + for f in os.listdir("."): + if f.startswith("model") and f.endswith("pd"): + os.remove(f) + if f in [ + "lcurve.out", + "frozen_model.json", + "frozen_model.pdiparams", + "checkpoint", + "output.txt", + ]: + os.remove(f) + if f in ["stat_files", self.stat_files]: + shutil.rmtree(f) diff --git a/source/tests/pd/test_finetune.py b/source/tests/pd/test_finetune.py new file mode 100644 index 0000000000..2c6cca83aa --- /dev/null +++ b/source/tests/pd/test_finetune.py @@ -0,0 +1,379 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import json +import os +import shutil +import tempfile +import unittest +from copy import ( + deepcopy, +) +from pathlib import ( + Path, +) + +import numpy as np +import paddle + +from deepmd.infer.deep_eval import ( + DeepEval, +) +from deepmd.pd.entrypoints.main import ( + get_trainer, +) +from deepmd.pd.model.model import ( + get_model, +) +from deepmd.pd.utils import ( + env, +) +from deepmd.pd.utils.dataloader import ( + DpLoaderSet, +) +from deepmd.pd.utils.finetune import ( + get_finetune_rules, +) +from deepmd.pd.utils.stat import ( + make_stat_input, +) +from deepmd.pd.utils.utils import ( + to_numpy_array, + to_paddle_tensor, +) +from deepmd.utils.data import ( + DataRequirementItem, +) + +from .model.test_permutation import ( + model_dos, + model_dpa1, + model_dpa2, + model_se_e2_a, + model_zbl, +) + +energy_data_requirement = [ + DataRequirementItem( + "energy", + ndof=1, + atomic=False, + must=False, + high_prec=True, + ), + DataRequirementItem( + "force", + ndof=3, + atomic=True, + must=False, + high_prec=False, + ), + DataRequirementItem( + "virial", + ndof=9, + atomic=False, + must=False, + high_prec=False, + ), + DataRequirementItem( + "dos", + ndof=250, + atomic=False, + must=False, + high_prec=True, + ), + DataRequirementItem( + "atom_ener", + ndof=1, + atomic=True, + must=False, + high_prec=False, + ), + DataRequirementItem( + "atom_pref", + ndof=1, + atomic=True, + must=False, + high_prec=False, + repeat=3, + ), +] + + +class FinetuneTest: + @unittest.skip( + "Paddle do not support finetune in frozen models(.json and .pdiparams file), " + "will be supported in the future." + ) + def test_finetune_change_out_bias(self): + self.testkey = "energy" if self.testkey is None else self.testkey + # get data + data = DpLoaderSet( + self.data_file, + batch_size=1, + type_map=self.config["model"]["type_map"], + ) + data.add_data_requirement(energy_data_requirement) + sampled = make_stat_input( + data.systems, + data.dataloaders, + nbatches=1, + ) + # make sampled of multiple frames with different atom numbs + numb_atom = sampled[0]["atype"].shape[1] + small_numb_atom = numb_atom // 2 + small_atom_data = deepcopy(sampled[0]) + atomic_key = ["coord", "atype"] + for kk in atomic_key: + small_atom_data[kk] = small_atom_data[kk][:, :small_numb_atom] + scale_pref = float(small_numb_atom / numb_atom) + small_atom_data[self.testkey] *= scale_pref + small_atom_data["natoms"][:, :2] = small_numb_atom + small_atom_data["natoms"][:, 2:] = paddle.bincount( + small_atom_data["atype"][0], + minlength=small_atom_data["natoms"].shape[1] - 2, + ) + sampled = [sampled[0], small_atom_data] + + # get model + model = get_model(self.config["model"]).to(env.DEVICE) + atomic_model = model.atomic_model + atomic_model["out_bias"] = paddle.randn(atomic_model["out_bias"].shape) + energy_bias_before = to_numpy_array(atomic_model["out_bias"])[0] + + # prepare original model for test + dp = paddle.jit.to_static(model) + tmp_model = tempfile.NamedTemporaryFile(delete=False, suffix=".pd") + paddle.jit.save(dp, tmp_model.name) + dp = DeepEval(tmp_model.name) + origin_type_map = ["O", "H"] + full_type_map = ["O", "H", "B"] + + # change energy bias + model.atomic_model.change_out_bias( + sampled, + bias_adjust_mode="change-by-statistic", + ) + energy_bias_after = to_numpy_array(atomic_model["out_bias"])[0] + + # get ground-truth energy bias change + sorter = np.argsort(full_type_map) + idx_type_map = sorter[ + np.searchsorted(full_type_map, origin_type_map, sorter=sorter) + ] + ntest = 1 + atom_nums = np.tile( + np.bincount(to_numpy_array(sampled[0]["atype"][0]))[idx_type_map], + (ntest, 1), + ) + atom_nums_small = np.tile( + np.bincount(to_numpy_array(sampled[1]["atype"][0]))[idx_type_map], + (ntest, 1), + ) + atom_nums = np.concatenate([atom_nums, atom_nums_small], axis=0) + + energy = dp.eval( + to_numpy_array(sampled[0]["coord"][:ntest]), + to_numpy_array(sampled[0]["box"][:ntest]), + to_numpy_array(sampled[0]["atype"][0]), + )[0] + energy_small = dp.eval( + to_numpy_array(sampled[1]["coord"][:ntest]), + to_numpy_array(sampled[1]["box"][:ntest]), + to_numpy_array(sampled[1]["atype"][0]), + )[0] + energy_diff = to_numpy_array(sampled[0][self.testkey][:ntest]) - energy + energy_diff_small = ( + to_numpy_array(sampled[1][self.testkey][:ntest]) - energy_small + ) + energy_diff = np.concatenate([energy_diff, energy_diff_small], axis=0) + finetune_shift = ( + energy_bias_after[idx_type_map] - energy_bias_before[idx_type_map] + ).ravel() + ground_truth_shift = np.linalg.lstsq(atom_nums, energy_diff, rcond=None)[ + 0 + ].reshape(-1) + + # check values + np.testing.assert_almost_equal(finetune_shift, ground_truth_shift, decimal=10) + + self.tearDown() + + def test_finetune_change_type(self): + if not self.mixed_types: + # skip when not mixed_types + return + # get data + data = DpLoaderSet( + self.data_file, + batch_size=1, + type_map=self.config["model"]["type_map"], + ) + data.add_data_requirement(energy_data_requirement) + sampled = make_stat_input( + data.systems, + data.dataloaders, + nbatches=1, + ) + data_type_map = self.config["model"]["type_map"] + for [old_type_map, new_type_map] in [ + [["H", "X1", "X2", "O", "B"], ["O", "H", "B"]], + [["O", "H", "B"], ["H", "X1", "X2", "O", "B"]], + ]: + old_type_map_index = np.array( + [old_type_map.index(i) for i in data_type_map], dtype=np.int32 + ) + new_type_map_index = np.array( + [new_type_map.index(i) for i in data_type_map], dtype=np.int32 + ) + + # get pretrained model with old type map + config_old_type_map = deepcopy(self.config) + config_old_type_map["model"]["type_map"] = old_type_map + trainer = get_trainer(config_old_type_map) + trainer.run() + finetune_model = ( + config_old_type_map["training"].get("save_ckpt", "model.ckpt") + ".pd" + ) + + # finetune load the same type_map + config_old_type_map_finetune = deepcopy(self.config) + config_old_type_map_finetune["model"]["type_map"] = old_type_map + config_old_type_map_finetune["model"], finetune_links = get_finetune_rules( + finetune_model, + config_old_type_map_finetune["model"], + ) + trainer_finetune_old = get_trainer( + config_old_type_map_finetune, + finetune_model=finetune_model, + finetune_links=finetune_links, + ) + + # finetune load the slim type_map + config_new_type_map_finetune = deepcopy(self.config) + config_new_type_map_finetune["model"]["type_map"] = new_type_map + config_new_type_map_finetune["model"], finetune_links = get_finetune_rules( + finetune_model, + config_new_type_map_finetune["model"], + ) + trainer_finetune_new = get_trainer( + config_new_type_map_finetune, + finetune_model=finetune_model, + finetune_links=finetune_links, + ) + + # test consistency + ntest = 1 + prec = 1e-10 + model_old_result = trainer_finetune_old.model( + sampled[0]["coord"][:ntest], + to_paddle_tensor(old_type_map_index)[sampled[0]["atype"][:ntest]], + box=sampled[0]["box"][:ntest], + ) + model_new_result = trainer_finetune_new.model( + sampled[0]["coord"][:ntest], + to_paddle_tensor(new_type_map_index)[sampled[0]["atype"][:ntest]], + box=sampled[0]["box"][:ntest], + ) + test_keys = ["energy", "force", "virial"] + for key in test_keys: + np.testing.assert_allclose( + model_old_result[key].numpy(), + model_new_result[key].numpy(), + rtol=prec, + atol=prec, + ) + + self.tearDown() + + def tearDown(self): + for f in os.listdir("."): + if f.startswith("model") and f.endswith(".pd"): + os.remove(f) + if f in ["lcurve.out"]: + os.remove(f) + if f in ["stat_files"]: + shutil.rmtree(f) + + +class TestEnergyModelSeA(FinetuneTest, unittest.TestCase): + def setUp(self): + input_json = str(Path(__file__).parent / "water/se_atten.json") + with open(input_json) as f: + self.config = json.load(f) + self.data_file = [str(Path(__file__).parent / "water/data/single")] + self.config["training"]["training_data"]["systems"] = self.data_file + self.config["training"]["validation_data"]["systems"] = self.data_file + self.config["model"] = deepcopy(model_se_e2_a) + self.config["training"]["numb_steps"] = 1 + self.config["training"]["save_freq"] = 1 + self.mixed_types = False + self.testkey = None + + +@unittest.skip("Skip for not implemented yet") +class TestEnergyZBLModelSeA(FinetuneTest, unittest.TestCase): + def setUp(self): + input_json = str(Path(__file__).parent / "water/se_atten.json") + with open(input_json) as f: + self.config = json.load(f) + self.data_file = [str(Path(__file__).parent / "water/data/single")] + self.config["training"]["training_data"]["systems"] = self.data_file + self.config["training"]["validation_data"]["systems"] = self.data_file + self.config["model"] = deepcopy(model_zbl) + self.config["training"]["numb_steps"] = 1 + self.config["training"]["save_freq"] = 1 + self.mixed_types = False + self.testkey = None + + +@unittest.skip("Skip for not implemented yet") +class TestEnergyDOSModelSeA(FinetuneTest, unittest.TestCase): + def setUp(self): + input_json = str(Path(__file__).parent / "dos/input.json") + with open(input_json) as f: + self.config = json.load(f) + self.data_file = [str(Path(__file__).parent / "dos/data/global_system")] + self.config["training"]["training_data"]["systems"] = self.data_file + self.config["training"]["validation_data"]["systems"] = self.data_file + self.config["model"] = deepcopy(model_dos) + self.config["training"]["numb_steps"] = 1 + self.config["training"]["save_freq"] = 1 + self.mixed_types = False + self.testkey = "dos" + + +@unittest.skip("Skip for not implemented yet") +class TestEnergyModelDPA1(FinetuneTest, unittest.TestCase): + def setUp(self): + input_json = str(Path(__file__).parent / "water/se_atten.json") + with open(input_json) as f: + self.config = json.load(f) + self.data_file = [str(Path(__file__).parent / "water/data/single")] + self.config["training"]["training_data"]["systems"] = self.data_file + self.config["training"]["validation_data"]["systems"] = self.data_file + self.config["model"] = deepcopy(model_dpa1) + self.config["training"]["numb_steps"] = 1 + self.config["training"]["save_freq"] = 1 + self.mixed_types = True + self.testkey = None + + +@unittest.skip("Skip for not implemented yet") +class TestEnergyModelDPA2(FinetuneTest, unittest.TestCase): + def setUp(self): + input_json = str(Path(__file__).parent / "water/se_atten.json") + with open(input_json) as f: + self.config = json.load(f) + self.data_file = [str(Path(__file__).parent / "water/data/single")] + self.config["training"]["training_data"]["systems"] = self.data_file + self.config["training"]["validation_data"]["systems"] = self.data_file + self.config["model"] = deepcopy(model_dpa2) + self.config["model"]["descriptor"]["repformer"]["nlayers"] = 2 + + self.config["training"]["numb_steps"] = 1 + self.config["training"]["save_freq"] = 1 + self.mixed_types = True + self.testkey = None + + +if __name__ == "__main__": + unittest.main() diff --git a/source/tests/pd/test_loss.py b/source/tests/pd/test_loss.py new file mode 100644 index 0000000000..a7b8109e10 --- /dev/null +++ b/source/tests/pd/test_loss.py @@ -0,0 +1,585 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import os +import unittest + +import numpy as np +import paddle +import tensorflow.compat.v1 as tf + +tf.disable_eager_execution() +from pathlib import ( + Path, +) + +from deepmd.pd.loss import ( + EnergyStdLoss, +) +from deepmd.pd.utils.dataset import ( + DeepmdDataSetForLoader, +) +from deepmd.tf.loss.ener import ( + EnerStdLoss, +) +from deepmd.utils.data import ( + DataRequirementItem, +) + +from ..seed import ( + GLOBAL_SEED, +) +from .model.test_embedding_net import ( + get_single_batch, +) +from .test_finetune import ( + energy_data_requirement, +) + +CUR_DIR = os.path.dirname(__file__) + + +def get_batch(system, type_map, data_requirement): + dataset = DeepmdDataSetForLoader(system, type_map) + dataset.add_data_requirement(data_requirement) + np_batch, pd_batch = get_single_batch(dataset) + return np_batch, pd_batch + + +class LossCommonTest(unittest.TestCase): + def setUp(self): + self.cur_lr = 1.2 + if not self.spin: + self.system = str(Path(__file__).parent / "water/data/data_0") + self.type_map = ["H", "O"] + else: + self.system = str(Path(__file__).parent / "NiO/data/data_0") + self.type_map = ["Ni", "O"] + energy_data_requirement.append( + DataRequirementItem( + "force_mag", + ndof=3, + atomic=True, + must=False, + high_prec=False, + ) + ) + # data + np_batch, pd_batch = get_batch( + self.system, self.type_map, energy_data_requirement + ) + natoms = np_batch["natoms"] + self.nloc = natoms[0] + nframes = np_batch["energy"].shape[0] + rng = np.random.default_rng(GLOBAL_SEED) + + if not self.spin: + l_energy, l_force, l_virial = ( + np_batch["energy"], + np_batch["force"], + np_batch["virial"], + ) + p_energy, p_force, p_virial = ( + np.ones_like(l_energy), + np.ones_like(l_force), + np.ones_like(l_virial), + ) + nloc = natoms[0] + batch_size = pd_batch["coord"].shape[0] + p_atom_energy = rng.random(size=[batch_size, nloc]) + l_atom_energy = rng.random(size=[batch_size, nloc]) + atom_pref = rng.random(size=[batch_size, nloc * 3]) + drdq = rng.random(size=[batch_size, nloc * 2 * 3]) + atom_ener_coeff = rng.random(size=[batch_size, nloc]) + # placeholders + l_force_real = l_force + l_force_mag = l_force + p_force_real = p_force + p_force_mag = p_force + else: + # data + np_batch, pd_batch = get_batch( + self.system, self.type_map, energy_data_requirement + ) + natoms = np_batch["natoms"] + self.nloc = natoms[0] + l_energy, l_force_real, l_force_mag, l_virial = ( + np_batch["energy"], + np_batch["force"], + np_batch["force_mag"], + np_batch["virial"], + ) + # merged force for tf old implement + l_force_merge_tf = np.concatenate( + [ + l_force_real.reshape([nframes, self.nloc, 3]), + l_force_mag.reshape([nframes, self.nloc, 3])[ + np_batch["atype"] == 0 + ].reshape([nframes, -1, 3]), + ], + axis=1, + ).reshape([nframes, -1]) + p_energy, p_force_real, p_force_mag, p_force_merge_tf, p_virial = ( + np.ones_like(l_energy), + np.ones_like(l_force_real), + np.ones_like(l_force_mag), + np.ones_like(l_force_merge_tf), + np.ones_like(l_virial), + ) + virt_nloc = (np_batch["atype"] == 0).sum(-1) + natoms_tf = np.concatenate([natoms, virt_nloc], axis=0) + natoms_tf[:2] += virt_nloc + nloc = natoms_tf[0] + batch_size = pd_batch["coord"].shape[0] + p_atom_energy = rng.random(size=[batch_size, nloc]) + l_atom_energy = rng.random(size=[batch_size, nloc]) + atom_pref = rng.random(size=[batch_size, nloc * 3]) + drdq = rng.random(size=[batch_size, nloc * 2 * 3]) + atom_ener_coeff = rng.random(size=[batch_size, nloc]) + self.nloc_tf = nloc + natoms = natoms_tf + l_force = l_force_merge_tf + p_force = p_force_merge_tf + + # tf + self.g = tf.Graph() + with self.g.as_default(): + t_cur_lr = tf.placeholder(shape=[], dtype=tf.float64) + t_natoms = tf.placeholder(shape=[None], dtype=tf.int32) + t_penergy = tf.placeholder(shape=[None, 1], dtype=tf.float64) + t_pforce = tf.placeholder(shape=[None, None], dtype=tf.float64) + t_pvirial = tf.placeholder(shape=[None, 9], dtype=tf.float64) + t_patom_energy = tf.placeholder(shape=[None, None], dtype=tf.float64) + t_lenergy = tf.placeholder(shape=[None, 1], dtype=tf.float64) + t_lforce = tf.placeholder(shape=[None, None], dtype=tf.float64) + t_lvirial = tf.placeholder(shape=[None, 9], dtype=tf.float64) + t_latom_energy = tf.placeholder(shape=[None, None], dtype=tf.float64) + t_atom_pref = tf.placeholder(shape=[None, None], dtype=tf.float64) + t_atom_ener_coeff = tf.placeholder(shape=[None, None], dtype=tf.float64) + t_drdq = tf.placeholder(shape=[None, None], dtype=tf.float64) + find_energy = tf.constant(1.0, dtype=tf.float64) + find_force = tf.constant(1.0, dtype=tf.float64) + find_virial = tf.constant(1.0 if not self.spin else 0.0, dtype=tf.float64) + find_atom_energy = tf.constant(1.0, dtype=tf.float64) + find_atom_pref = tf.constant(1.0, dtype=tf.float64) + find_drdq = tf.constant(1.0, dtype=tf.float64) + find_atom_ener_coeff = tf.constant(1.0, dtype=tf.float64) + model_dict = { + "energy": t_penergy, + "force": t_pforce, + "virial": t_pvirial, + "atom_ener": t_patom_energy, + } + label_dict = { + "energy": t_lenergy, + "force": t_lforce, + "virial": t_lvirial, + "atom_ener": t_latom_energy, + "atom_pref": t_atom_pref, + "drdq": t_drdq, + "atom_ener_coeff": t_atom_ener_coeff, + "find_energy": find_energy, + "find_force": find_force, + "find_virial": find_virial, + "find_atom_ener": find_atom_energy, + "find_atom_pref": find_atom_pref, + "find_drdq": find_drdq, + "find_atom_ener_coeff": find_atom_ener_coeff, + } + self.tf_loss_sess = self.tf_loss.build( + t_cur_lr, t_natoms, model_dict, label_dict, "" + ) + + self.feed_dict = { + t_cur_lr: self.cur_lr, + t_natoms: natoms, + t_penergy: p_energy, + t_pforce: p_force, + t_pvirial: p_virial.reshape([-1, 9]), + t_patom_energy: p_atom_energy, + t_lenergy: l_energy, + t_lforce: l_force, + t_lvirial: l_virial.reshape([-1, 9]), + t_latom_energy: l_atom_energy, + t_atom_pref: atom_pref, + t_drdq: drdq, + t_atom_ener_coeff: atom_ener_coeff, + } + # pd + if not self.spin: + self.model_pred = { + "energy": paddle.to_tensor(p_energy), + "force": paddle.to_tensor(p_force), + "virial": paddle.to_tensor(p_virial), + "atom_energy": paddle.to_tensor(p_atom_energy), + } + self.label = { + "energy": paddle.to_tensor(l_energy), + "find_energy": 1.0, + "force": paddle.to_tensor(l_force), + "find_force": 1.0, + "virial": paddle.to_tensor(l_virial), + "find_virial": 1.0, + "atom_ener": paddle.to_tensor(l_atom_energy), + "find_atom_ener": 1.0, + "atom_pref": paddle.to_tensor(atom_pref), + "find_atom_pref": 1.0, + "drdq": paddle.to_tensor(drdq), + "find_drdq": 1.0, + "atom_ener_coeff": paddle.to_tensor(atom_ener_coeff), + "find_atom_ener_coeff": 1.0, + } + self.label_absent = { + "energy": paddle.to_tensor(l_energy), + "force": paddle.to_tensor(l_force), + "virial": paddle.to_tensor(l_virial), + "atom_ener": paddle.to_tensor(l_atom_energy), + "atom_pref": paddle.to_tensor(atom_pref), + "drdq": paddle.to_tensor(drdq), + "atom_ener_coeff": paddle.to_tensor(atom_ener_coeff), + } + else: + self.model_pred = { + "energy": paddle.to_tensor(p_energy), + "force": paddle.to_tensor(p_force_real).reshape( + [nframes, self.nloc, 3] + ), + "force_mag": paddle.to_tensor(p_force_mag).reshape( + [nframes, self.nloc, 3] + ), + "mask_mag": paddle.to_tensor(np_batch["atype"] == 0).reshape( + [nframes, self.nloc, 1] + ), + "atom_energy": paddle.to_tensor(p_atom_energy), + } + self.label = { + "energy": paddle.to_tensor(l_energy), + "find_energy": 1.0, + "force": paddle.to_tensor(l_force_real).reshape( + [nframes, self.nloc, 3] + ), + "find_force": 1.0, + "force_mag": paddle.to_tensor(l_force_mag).reshape( + [nframes, self.nloc, 3] + ), + "find_force_mag": 1.0, + "atom_ener": paddle.to_tensor(l_atom_energy), + "find_atom_ener": 1.0, + "atom_ener_coeff": paddle.to_tensor(atom_ener_coeff), + "find_atom_ener_coeff": 1.0, + } + self.label_absent = { + "energy": paddle.to_tensor(l_energy), + "force": paddle.to_tensor(l_force_real).reshape( + [nframes, self.nloc, 3] + ), + "force_mag": paddle.to_tensor(l_force_mag).reshape( + [nframes, self.nloc, 3] + ), + "atom_ener": paddle.to_tensor(l_atom_energy), + "atom_ener_coeff": paddle.to_tensor(atom_ener_coeff), + } + self.natoms = pd_batch["natoms"] + + def tearDown(self) -> None: + tf.reset_default_graph() + return super().tearDown() + + +class TestEnerStdLoss(LossCommonTest): + def setUp(self): + self.start_lr = 1.1 + self.start_pref_e = 0.02 + self.limit_pref_e = 1.0 + self.start_pref_f = 1000.0 + self.limit_pref_f = 1.0 + self.start_pref_v = 0.02 + self.limit_pref_v = 1.0 + # tf + self.tf_loss = EnerStdLoss( + self.start_lr, + self.start_pref_e, + self.limit_pref_e, + self.start_pref_f, + self.limit_pref_f, + self.start_pref_v, + self.limit_pref_v, + ) + # pd + self.pd_loss = EnergyStdLoss( + self.start_lr, + self.start_pref_e, + self.limit_pref_e, + self.start_pref_f, + self.limit_pref_f, + self.start_pref_v, + self.limit_pref_v, + ) + self.spin = False + super().setUp() + + def test_consistency(self): + with tf.Session(graph=self.g) as sess: + tf_loss, tf_more_loss = sess.run( + self.tf_loss_sess, feed_dict=self.feed_dict + ) + + def fake_model(): + return self.model_pred + + _, pd_loss, pd_more_loss = self.pd_loss( + {}, + fake_model, + self.label, + self.nloc, + self.cur_lr, + ) + _, pd_loss_absent, pd_more_loss_absent = self.pd_loss( + {}, + fake_model, + self.label_absent, + self.nloc, + self.cur_lr, + ) + pd_loss = pd_loss.detach().cpu() + pd_loss_absent = pd_loss_absent.detach().cpu() + self.assertTrue(np.allclose(tf_loss, pd_loss.numpy())) + self.assertTrue(np.allclose(0.0, pd_loss_absent.numpy())) + for key in ["ener", "force", "virial"]: + self.assertTrue( + np.allclose( + tf_more_loss[f"l2_{key}_loss"], pd_more_loss[f"l2_{key}_loss"] + ) + ) + self.assertTrue(np.isnan(pd_more_loss_absent[f"l2_{key}_loss"].numpy())) + + +class TestEnerStdLossAePfGf(LossCommonTest): + def setUp(self): + self.start_lr = 1.1 + self.start_pref_e = 0.02 + self.limit_pref_e = 1.0 + self.start_pref_f = 1000.0 + self.limit_pref_f = 1.0 + self.start_pref_v = 0.02 + self.limit_pref_v = 1.0 + self.start_pref_ae = 0.02 + self.limit_pref_ae = 1.0 + self.start_pref_pf = 0.02 + self.limit_pref_pf = 1.0 + self.start_pref_gf = 0.02 + self.limit_pref_gf = 1.0 + self.numb_generalized_coord = 2 + # tf + self.tf_loss = EnerStdLoss( + self.start_lr, + self.start_pref_e, + self.limit_pref_e, + self.start_pref_f, + self.limit_pref_f, + self.start_pref_v, + self.limit_pref_v, + self.start_pref_ae, + self.limit_pref_ae, + self.start_pref_pf, + self.limit_pref_pf, + start_pref_gf=self.start_pref_gf, + limit_pref_gf=self.limit_pref_gf, + numb_generalized_coord=self.numb_generalized_coord, + ) + # pd + self.pd_loss = EnergyStdLoss( + self.start_lr, + self.start_pref_e, + self.limit_pref_e, + self.start_pref_f, + self.limit_pref_f, + self.start_pref_v, + self.limit_pref_v, + self.start_pref_ae, + self.limit_pref_ae, + self.start_pref_pf, + self.limit_pref_pf, + start_pref_gf=self.start_pref_gf, + limit_pref_gf=self.limit_pref_gf, + numb_generalized_coord=self.numb_generalized_coord, + ) + self.spin = False + super().setUp() + + def test_consistency(self): + with tf.Session(graph=self.g) as sess: + tf_loss, tf_more_loss = sess.run( + self.tf_loss_sess, feed_dict=self.feed_dict + ) + + def fake_model(): + return self.model_pred + + _, pd_loss, pd_more_loss = self.pd_loss( + {}, + fake_model, + self.label, + self.nloc, + self.cur_lr, + ) + _, pd_loss_absent, pd_more_loss_absent = self.pd_loss( + {}, + fake_model, + self.label_absent, + self.nloc, + self.cur_lr, + ) + pd_loss = pd_loss.detach().cpu() + pd_loss_absent = pd_loss_absent.detach().cpu() + self.assertTrue(np.allclose(tf_loss, pd_loss.numpy())) + self.assertTrue(np.allclose(0.0, pd_loss_absent.numpy())) + for key in ["ener", "force", "virial", "atom_ener", "pref_force", "gen_force"]: + self.assertTrue( + np.allclose( + tf_more_loss[f"l2_{key}_loss"], pd_more_loss[f"l2_{key}_loss"] + ) + ) + self.assertTrue(np.isnan(pd_more_loss_absent[f"l2_{key}_loss"].numpy())) + + +class TestEnerStdLossAecoeff(LossCommonTest): + def setUp(self): + self.start_lr = 1.1 + self.start_pref_e = 0.02 + self.limit_pref_e = 1.0 + self.start_pref_f = 1000.0 + self.limit_pref_f = 1.0 + self.start_pref_v = 0.02 + self.limit_pref_v = 1.0 + # tf + self.tf_loss = EnerStdLoss( + self.start_lr, + self.start_pref_e, + self.limit_pref_e, + self.start_pref_f, + self.limit_pref_f, + self.start_pref_v, + self.limit_pref_v, + enable_atom_ener_coeff=True, + ) + # pd + self.pd_loss = EnergyStdLoss( + self.start_lr, + self.start_pref_e, + self.limit_pref_e, + self.start_pref_f, + self.limit_pref_f, + self.start_pref_v, + self.limit_pref_v, + enable_atom_ener_coeff=True, + ) + self.spin = False + super().setUp() + + def test_consistency(self): + with tf.Session(graph=self.g) as sess: + tf_loss, tf_more_loss = sess.run( + self.tf_loss_sess, feed_dict=self.feed_dict + ) + + def fake_model(): + return self.model_pred + + _, pd_loss, pd_more_loss = self.pd_loss( + {}, + fake_model, + self.label, + self.nloc, + self.cur_lr, + ) + _, pd_loss_absent, pd_more_loss_absent = self.pd_loss( + {}, + fake_model, + self.label_absent, + self.nloc, + self.cur_lr, + ) + pd_loss = pd_loss.detach().cpu() + pd_loss_absent = pd_loss_absent.detach().cpu() + self.assertTrue(np.allclose(tf_loss, pd_loss.numpy())) + self.assertTrue(np.allclose(0.0, pd_loss_absent.numpy())) + for key in ["ener", "force", "virial"]: + self.assertTrue( + np.allclose( + tf_more_loss[f"l2_{key}_loss"], pd_more_loss[f"l2_{key}_loss"] + ) + ) + self.assertTrue(np.isnan(pd_more_loss_absent[f"l2_{key}_loss"].numpy())) + + +class TestEnerStdLossRelativeF(LossCommonTest): + def setUp(self): + self.start_lr = 1.1 + self.start_pref_e = 0.02 + self.limit_pref_e = 1.0 + self.start_pref_f = 1000.0 + self.limit_pref_f = 1.0 + self.start_pref_v = 0.02 + self.limit_pref_v = 1.0 + # tf + self.tf_loss = EnerStdLoss( + self.start_lr, + self.start_pref_e, + self.limit_pref_e, + self.start_pref_f, + self.limit_pref_f, + self.start_pref_v, + self.limit_pref_v, + relative_f=0.1, + ) + # pd + self.pd_loss = EnergyStdLoss( + self.start_lr, + self.start_pref_e, + self.limit_pref_e, + self.start_pref_f, + self.limit_pref_f, + self.start_pref_v, + self.limit_pref_v, + relative_f=0.1, + ) + self.spin = False + super().setUp() + + def test_consistency(self): + with tf.Session(graph=self.g) as sess: + tf_loss, tf_more_loss = sess.run( + self.tf_loss_sess, feed_dict=self.feed_dict + ) + + def fake_model(): + return self.model_pred + + _, pd_loss, pd_more_loss = self.pd_loss( + {}, + fake_model, + self.label, + self.nloc, + self.cur_lr, + ) + _, pd_loss_absent, pd_more_loss_absent = self.pd_loss( + {}, + fake_model, + self.label_absent, + self.nloc, + self.cur_lr, + ) + pd_loss = pd_loss.detach().cpu() + pd_loss_absent = pd_loss_absent.detach().cpu() + self.assertTrue(np.allclose(tf_loss, pd_loss.numpy())) + self.assertTrue(np.allclose(0.0, pd_loss_absent.numpy())) + for key in ["ener", "force", "virial"]: + self.assertTrue( + np.allclose( + tf_more_loss[f"l2_{key}_loss"], pd_more_loss[f"l2_{key}_loss"] + ) + ) + self.assertTrue(np.isnan(pd_more_loss_absent[f"l2_{key}_loss"].numpy())) + + +if __name__ == "__main__": + unittest.main() diff --git a/source/tests/pd/test_lr.py b/source/tests/pd/test_lr.py new file mode 100644 index 0000000000..9607f982fd --- /dev/null +++ b/source/tests/pd/test_lr.py @@ -0,0 +1,106 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import unittest + +import numpy as np +import tensorflow.compat.v1 as tf + +tf.disable_eager_execution() + +from deepmd.dpmodel.utils.learning_rate import ( + LearningRateExp, +) +from deepmd.tf.utils import ( + learning_rate, +) + + +class TestLearningRate(unittest.TestCase): + def setUp(self): + self.start_lr = 0.001 + self.stop_lr = 3.51e-8 + self.decay_steps = np.arange(400, 601, 100) + self.stop_steps = np.arange(500, 1600, 500) + + def test_consistency(self): + for decay_step in self.decay_steps: + for stop_step in self.stop_steps: + self.decay_step = decay_step + self.stop_step = stop_step + self.judge_it() + self.decay_rate_pd() + + def judge_it(self): + base_lr = learning_rate.LearningRateExp( + self.start_lr, self.stop_lr, self.decay_step + ) + g = tf.Graph() + with g.as_default(): + global_step = tf.placeholder(shape=[], dtype=tf.int32) + t_lr = base_lr.build(global_step, self.stop_step) + + my_lr = LearningRateExp( + self.start_lr, self.stop_lr, self.decay_step, self.stop_step + ) + with tf.Session(graph=g) as sess: + base_vals = [ + sess.run(t_lr, feed_dict={global_step: step_id}) + for step_id in range(self.stop_step) + if step_id % self.decay_step != 0 + ] + my_vals = [ + my_lr.value(step_id) + for step_id in range(self.stop_step) + if step_id % self.decay_step != 0 + ] + self.assertTrue(np.allclose(base_vals, my_vals)) + tf.reset_default_graph() + + def decay_rate_pd(self): + my_lr = LearningRateExp( + self.start_lr, self.stop_lr, self.decay_step, self.stop_step + ) + + default_ds = 100 if self.stop_step // 10 > 100 else self.stop_step // 100 + 1 + if self.decay_step >= self.stop_step: + self.decay_step = default_ds + decay_rate = np.exp( + np.log(self.stop_lr / self.start_lr) / (self.stop_step / self.decay_step) + ) + my_lr_decay = LearningRateExp( + self.start_lr, + 1e-10, + self.decay_step, + self.stop_step, + decay_rate=decay_rate, + ) + min_lr = 1e-5 + my_lr_decay_trunc = LearningRateExp( + self.start_lr, + min_lr, + self.decay_step, + self.stop_step, + decay_rate=decay_rate, + ) + my_vals = [ + my_lr.value(step_id) + for step_id in range(self.stop_step) + if step_id % self.decay_step != 0 + ] + my_vals_decay = [ + my_lr_decay.value(step_id) + for step_id in range(self.stop_step) + if step_id % self.decay_step != 0 + ] + my_vals_decay_trunc = [ + my_lr_decay_trunc.value(step_id) + for step_id in range(self.stop_step) + if step_id % self.decay_step != 0 + ] + self.assertTrue(np.allclose(my_vals_decay, my_vals)) + self.assertTrue( + np.allclose(my_vals_decay_trunc, np.clip(my_vals, a_min=min_lr, a_max=None)) + ) + + +if __name__ == "__main__": + unittest.main() diff --git a/source/tests/pd/test_multitask.py b/source/tests/pd/test_multitask.py new file mode 100644 index 0000000000..e3d4cfa7de --- /dev/null +++ b/source/tests/pd/test_multitask.py @@ -0,0 +1,224 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import json +import os +import shutil +import unittest +from copy import ( + deepcopy, +) +from pathlib import ( + Path, +) + +import numpy as np + +from deepmd.pd.entrypoints.main import ( + get_trainer, +) +from deepmd.pd.utils.finetune import ( + get_finetune_rules, +) +from deepmd.pd.utils.multi_task import ( + preprocess_shared_params, +) +from deepmd.utils.argcheck import ( + normalize, +) +from deepmd.utils.compat import ( + update_deepmd_input, +) + +from .model.test_permutation import ( + model_se_e2_a, +) + + +def setUpModule(): + global multitask_template + multitask_template_json = str(Path(__file__).parent / "water/multitask.json") + with open(multitask_template_json) as f: + multitask_template = json.load(f) + + +class MultiTaskTrainTest: + def test_multitask_train(self): + # test multitask training + self.config = update_deepmd_input(self.config, warning=True) + self.config = normalize(self.config, multi_task=True) + trainer = get_trainer(deepcopy(self.config), shared_links=self.shared_links) + trainer.run() + # check model keys + self.assertEqual(len(trainer.wrapper.model), 2) + self.assertIn("model_1", trainer.wrapper.model) + self.assertIn("model_2", trainer.wrapper.model) + + # check shared parameters + multi_state_dict = trainer.wrapper.model.state_dict() + for state_key in multi_state_dict: + if "model_1" in state_key: + self.assertIn(state_key.replace("model_1", "model_2"), multi_state_dict) + if "model_2" in state_key: + self.assertIn(state_key.replace("model_2", "model_1"), multi_state_dict) + if "model_1.descriptor" in state_key: + np.testing.assert_allclose( + multi_state_dict[state_key].numpy(), + multi_state_dict[state_key.replace("model_1", "model_2")].numpy(), + ) + + # test multitask fine-tuning + # add model_3 + self.origin_config["model"]["model_dict"]["model_3"] = deepcopy( + self.origin_config["model"]["model_dict"]["model_2"] + ) + self.origin_config["loss_dict"]["model_3"] = deepcopy( + self.origin_config["loss_dict"]["model_2"] + ) + self.origin_config["training"]["model_prob"]["model_3"] = deepcopy( + self.origin_config["training"]["model_prob"]["model_2"] + ) + self.origin_config["training"]["data_dict"]["model_3"] = deepcopy( + self.origin_config["training"]["data_dict"]["model_2"] + ) + self.origin_config["training"]["data_dict"]["model_3"]["stat_file"] = ( + self.origin_config[ + "training" + ]["data_dict"]["model_3"]["stat_file"].replace("model_2", "model_3") + ) + + # add model_4 + self.origin_config["model"]["model_dict"]["model_4"] = deepcopy( + self.origin_config["model"]["model_dict"]["model_2"] + ) + self.origin_config["loss_dict"]["model_4"] = deepcopy( + self.origin_config["loss_dict"]["model_2"] + ) + self.origin_config["training"]["model_prob"]["model_4"] = deepcopy( + self.origin_config["training"]["model_prob"]["model_2"] + ) + self.origin_config["training"]["data_dict"]["model_4"] = deepcopy( + self.origin_config["training"]["data_dict"]["model_2"] + ) + self.origin_config["training"]["data_dict"]["model_4"]["stat_file"] = ( + self.origin_config[ + "training" + ]["data_dict"]["model_4"]["stat_file"].replace("model_2", "model_4") + ) + + # set finetune rules + # model_1 resuming from model_1 + # pass + + # model_2 fine-tuning from model_2 + self.origin_config["model"]["model_dict"]["model_2"]["finetune_head"] = ( + "model_2" + ) + + # new model_3 fine-tuning from model_2 + self.origin_config["model"]["model_dict"]["model_3"]["finetune_head"] = ( + "model_2" + ) + + # new model_4 fine-tuning with randomly initialized fitting net + # pass + + self.origin_config["model"], shared_links_finetune = preprocess_shared_params( + self.origin_config["model"] + ) + + finetune_model = self.config["training"].get("save_ckpt", "model.ckpt") + ".pd" + self.origin_config["model"], finetune_links = get_finetune_rules( + finetune_model, + self.origin_config["model"], + ) + self.origin_config = update_deepmd_input(self.origin_config, warning=True) + self.origin_config = normalize(self.origin_config, multi_task=True) + trainer_finetune = get_trainer( + deepcopy(self.origin_config), + finetune_model=finetune_model, + shared_links=shared_links_finetune, + finetune_links=finetune_links, + ) + + # check parameters + multi_state_dict_finetuned = trainer_finetune.wrapper.model.state_dict() + for state_key in multi_state_dict_finetuned: + if "model_1" in state_key: + np.testing.assert_allclose( + multi_state_dict[state_key].numpy(), + multi_state_dict_finetuned[state_key].numpy(), + ) + elif "model_2" in state_key and "out_bias" not in state_key: + np.testing.assert_allclose( + multi_state_dict[state_key].numpy(), + multi_state_dict_finetuned[state_key].numpy(), + ) + elif "model_3" in state_key and "out_bias" not in state_key: + np.testing.assert_allclose( + multi_state_dict[state_key.replace("model_3", "model_2")].numpy(), + multi_state_dict_finetuned[state_key].numpy(), + ) + elif ( + "model_4" in state_key + and "fitting_net" not in state_key + and "out_bias" not in state_key + ): + np.testing.assert_allclose( + multi_state_dict[state_key.replace("model_4", "model_2")].numpy(), + multi_state_dict_finetuned[state_key].numpy(), + ) + + # check running + trainer_finetune.run() + self.tearDown() + + def tearDown(self): + for f in os.listdir("."): + if f.startswith("model") and f.endswith(".pd"): + os.remove(f) + if f in ["lcurve.out"]: + os.remove(f) + if f in [self.stat_files]: + shutil.rmtree(f) + + +class TestMultiTaskSeA(unittest.TestCase, MultiTaskTrainTest): + def setUp(self): + multitask_se_e2_a = deepcopy(multitask_template) + multitask_se_e2_a["model"]["shared_dict"]["my_descriptor"] = model_se_e2_a[ + "descriptor" + ] + data_file = [str(Path(__file__).parent / "water/data/data_0")] + self.stat_files = "se_e2_a" + os.makedirs(self.stat_files, exist_ok=True) + self.config = multitask_se_e2_a + self.config["training"]["data_dict"]["model_1"]["training_data"]["systems"] = ( + data_file + ) + self.config["training"]["data_dict"]["model_1"]["validation_data"][ + "systems" + ] = data_file + self.config["training"]["data_dict"]["model_1"]["stat_file"] = ( + f"{self.stat_files}/model_1" + ) + self.config["training"]["data_dict"]["model_2"]["training_data"]["systems"] = ( + data_file + ) + self.config["training"]["data_dict"]["model_2"]["validation_data"][ + "systems" + ] = data_file + self.config["training"]["data_dict"]["model_2"]["stat_file"] = ( + f"{self.stat_files}/model_2" + ) + self.config["training"]["numb_steps"] = 1 + self.config["training"]["save_freq"] = 1 + self.origin_config = deepcopy(self.config) + self.config["model"], self.shared_links = preprocess_shared_params( + self.config["model"] + ) + + def tearDown(self) -> None: + MultiTaskTrainTest.tearDown(self) + + +if __name__ == "__main__": + unittest.main() diff --git a/source/tests/pd/test_neighbor_stat.py b/source/tests/pd/test_neighbor_stat.py new file mode 100644 index 0000000000..613150b7fc --- /dev/null +++ b/source/tests/pd/test_neighbor_stat.py @@ -0,0 +1,69 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import shutil +import unittest + +import dpdata +import numpy as np + +from deepmd.entrypoints.neighbor_stat import ( + neighbor_stat, +) + +from ..seed import ( + GLOBAL_SEED, +) + + +def gen_sys(nframes): + rng = np.random.default_rng(GLOBAL_SEED) + natoms = 1000 + data = {} + X, Y, Z = np.mgrid[0:2:3j, 0:2:3j, 0:2:3j] + positions = np.vstack([X.ravel(), Y.ravel(), Z.ravel()]).T # + 0.1 + data["coords"] = np.repeat(positions[np.newaxis, :, :], nframes, axis=0) + data["forces"] = rng.random([nframes, natoms, 3]) + data["cells"] = np.array([3.0, 0.0, 0.0, 0.0, 3.0, 0.0, 0.0, 0.0, 3.0]).reshape( + 1, 3, 3 + ) + data["energies"] = rng.random([nframes, 1]) + data["atom_names"] = ["TYPE"] + data["atom_numbs"] = [27] + data["atom_types"] = np.repeat(0, 27) + return data + + +class TestNeighborStat(unittest.TestCase): + def setUp(self): + data0 = gen_sys(1) + sys0 = dpdata.LabeledSystem() + sys0.data = data0 + sys0.to_deepmd_npy("system_0", set_size=1) + + def tearDown(self): + shutil.rmtree("system_0") + + def test_neighbor_stat(self): + for rcut in (0.0, 1.0, 2.0, 4.0): + for mixed_type in (True, False): + with self.subTest(rcut=rcut, mixed_type=mixed_type): + rcut += 1e-3 # prevent numerical errors + min_nbor_dist, max_nbor_size = neighbor_stat( + system="system_0", + rcut=rcut, + type_map=["TYPE", "NO_THIS_TYPE"], + mixed_type=mixed_type, + backend="paddle", + ) + upper = np.ceil(rcut) + 1 + X, Y, Z = np.mgrid[-upper:upper, -upper:upper, -upper:upper] + positions = np.vstack([X.ravel(), Y.ravel(), Z.ravel()]).T + # distance to (0,0,0) + distance = np.linalg.norm(positions, axis=1) + expected_neighbors = np.count_nonzero( + np.logical_and(distance > 0, distance <= rcut) + ) + self.assertAlmostEqual(min_nbor_dist, 1.0, 6) + ret = [expected_neighbors] + if not mixed_type: + ret.append(0) + np.testing.assert_array_equal(max_nbor_size, ret) diff --git a/source/tests/pd/test_sampler.py b/source/tests/pd/test_sampler.py new file mode 100644 index 0000000000..2af5a9c05c --- /dev/null +++ b/source/tests/pd/test_sampler.py @@ -0,0 +1,114 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import json +import os +import unittest +from pathlib import ( + Path, +) + +import numpy as np +import paddle +from paddle.io import ( + BatchSampler, + DataLoader, +) + +from deepmd.pd.utils.dataloader import ( + DpLoaderSet, + get_weighted_sampler, +) +from deepmd.tf.common import ( + expand_sys_str, +) +from deepmd.tf.utils import random as tf_random +from deepmd.tf.utils.data_system import ( + DeepmdDataSystem, +) + +CUR_DIR = os.path.dirname(__file__) + + +class TestSampler(unittest.TestCase): + def setUp(self): + with open(str(Path(__file__).parent / "water/se_e2_a.json")) as fin: + content = fin.read() + config = json.loads(content) + data_file = [str(Path(__file__).parent / "water/data/data_0")] + config["training"]["training_data"]["systems"] = data_file + config["training"]["validation_data"]["systems"] = data_file + model_config = config["model"] + self.rcut = model_config["descriptor"]["rcut"] + self.rcut_smth = model_config["descriptor"]["rcut_smth"] + self.sel = model_config["descriptor"]["sel"] + self.batch_size = config["training"]["training_data"]["batch_size"] + self.systems = config["training"]["validation_data"]["systems"] + if isinstance(self.systems, str): + self.systems = expand_sys_str(self.systems) + self.my_dataset = DpLoaderSet( + self.systems, + self.batch_size, + model_config["type_map"], + seed=10, + shuffle=False, + ) + + tf_random.seed(10) + self.dp_dataset = DeepmdDataSystem(self.systems, self.batch_size, 1, self.rcut) + + def test_sampler_debug_info(self): + dataloader = DataLoader( + self.my_dataset, + batch_sampler=BatchSampler( + get_weighted_sampler(self.my_dataset, prob_style="prob_sys_size"), + drop_last=False, + ), + num_workers=0, # setting to 0 diverges the behavior of its iterator; should be >=1 + # pin_memory=True, + ) + device = paddle.get_device() + paddle.set_device("cpu") + batch_data = next(iter(dataloader)) + paddle.set_device(device) + sid = batch_data["sid"] + fid = batch_data["fid"][0] + coord = batch_data["coord"].squeeze(0) + frame = self.my_dataset.systems[sid].__getitem__(fid) + self.assertTrue(np.allclose(coord, frame["coord"])) + + def test_auto_prob_uniform(self): + auto_prob_style = "prob_uniform" + sampler = get_weighted_sampler(self.my_dataset, prob_style=auto_prob_style) + my_probs = np.array(sampler.weights) + self.dp_dataset.set_sys_probs(auto_prob_style=auto_prob_style) + dp_probs = np.array(self.dp_dataset.sys_probs) + self.assertTrue(np.allclose(my_probs, dp_probs)) + + def test_auto_prob_sys_size(self): + auto_prob_style = "prob_sys_size" + sampler = get_weighted_sampler(self.my_dataset, prob_style=auto_prob_style) + my_probs = np.array(sampler.weights) + self.dp_dataset.set_sys_probs(auto_prob_style=auto_prob_style) + dp_probs = np.array(self.dp_dataset.sys_probs) + self.assertTrue(np.allclose(my_probs, dp_probs)) + + def test_auto_prob_sys_size_ext(self): + auto_prob_style = "prob_sys_size;0:1:0.2;1:3:0.8" + sampler = get_weighted_sampler(self.my_dataset, prob_style=auto_prob_style) + my_probs = np.array(sampler.weights) + self.dp_dataset.set_sys_probs(auto_prob_style=auto_prob_style) + dp_probs = np.array(self.dp_dataset.sys_probs) + self.assertTrue(np.allclose(my_probs, dp_probs)) + + def test_sys_probs(self): + sys_probs = [0.1, 0.4, 0.5] + sampler = get_weighted_sampler( + self.my_dataset, prob_style=sys_probs, sys_prob=True + ) + my_probs = np.array(sampler.weights) + self.dp_dataset.set_sys_probs(sys_probs=sys_probs) + dp_probs = np.array(self.dp_dataset.sys_probs) + self.assertTrue(np.allclose(my_probs, dp_probs)) + + +if __name__ == "__main__": + unittest.main() diff --git a/source/tests/pd/test_training.py b/source/tests/pd/test_training.py new file mode 100644 index 0000000000..d4e7309a65 --- /dev/null +++ b/source/tests/pd/test_training.py @@ -0,0 +1,176 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import json +import os +import shutil +import unittest +from copy import ( + deepcopy, +) +from pathlib import ( + Path, +) + +import numpy as np + +from deepmd.pd.entrypoints.main import ( + get_trainer, +) +from deepmd.pd.utils.finetune import ( + get_finetune_rules, +) + +from .model.test_permutation import ( + model_se_e2_a, +) + + +class DPTrainTest: + def test_dp_train(self): + # test training from scratch + trainer = get_trainer(deepcopy(self.config)) + trainer.run() + state_dict_trained = trainer.wrapper.model.state_dict() + + # test fine-tuning using same input + finetune_model = self.config["training"].get("save_ckpt", "model.ckpt") + ".pd" + self.config["model"], finetune_links = get_finetune_rules( + finetune_model, + self.config["model"], + ) + trainer_finetune = get_trainer( + deepcopy(self.config), + finetune_model=finetune_model, + finetune_links=finetune_links, + ) + + # test fine-tuning using empty input + self.config_empty = deepcopy(self.config) + if "descriptor" in self.config_empty["model"]: + self.config_empty["model"]["descriptor"] = {} + if "fitting_net" in self.config_empty["model"]: + self.config_empty["model"]["fitting_net"] = {} + self.config_empty["model"], finetune_links = get_finetune_rules( + finetune_model, + self.config_empty["model"], + change_model_params=True, + ) + trainer_finetune_empty = get_trainer( + deepcopy(self.config_empty), + finetune_model=finetune_model, + finetune_links=finetune_links, + ) + + # test fine-tuning using random fitting + self.config["model"], finetune_links = get_finetune_rules( + finetune_model, self.config["model"], model_branch="RANDOM" + ) + trainer_finetune_random = get_trainer( + deepcopy(self.config_empty), + finetune_model=finetune_model, + finetune_links=finetune_links, + ) + + # check parameters + state_dict_finetuned = trainer_finetune.wrapper.model.state_dict() + state_dict_finetuned_empty = trainer_finetune_empty.wrapper.model.state_dict() + state_dict_finetuned_random = trainer_finetune_random.wrapper.model.state_dict() + for state_key in state_dict_finetuned: + if "out_bias" not in state_key and "out_std" not in state_key: + np.testing.assert_allclose( + state_dict_trained[state_key].numpy(), + state_dict_finetuned[state_key].numpy(), + ) + np.testing.assert_allclose( + state_dict_trained[state_key].numpy(), + state_dict_finetuned_empty[state_key].numpy(), + ) + if "fitting_net" not in state_key: + np.testing.assert_allclose( + state_dict_trained[state_key].numpy(), + state_dict_finetuned_random[state_key].numpy(), + ) + + # check running + trainer_finetune.run() + trainer_finetune_empty.run() + trainer_finetune_random.run() + + def test_trainable(self): + fix_params = deepcopy(self.config) + fix_params["model"]["descriptor"]["trainable"] = False + fix_params["model"]["fitting_net"]["trainable"] = False + free_descriptor = hasattr(self, "not_all_grad") and self.not_all_grad + if free_descriptor: + # can not set requires_grad false for all parameters, + # because the input coord has no grad, thus the loss if all set to false + # we only check trainable for fitting net + fix_params["model"]["descriptor"]["trainable"] = True + trainer_fix = get_trainer(fix_params) + model_dict_before_training = deepcopy( + trainer_fix.model.get_fitting_net().state_dict() + ) + trainer_fix.run() + model_dict_after_training = deepcopy( + trainer_fix.model.get_fitting_net().state_dict() + ) + else: + trainer_fix = get_trainer(fix_params) + model_dict_before_training = deepcopy(trainer_fix.model.state_dict()) + trainer_fix.run() + model_dict_after_training = deepcopy(trainer_fix.model.state_dict()) + for key in model_dict_before_training: + np.testing.assert_allclose( + model_dict_before_training[key].numpy(), + model_dict_after_training[key].numpy(), + ) + + def tearDown(self): + for f in os.listdir("."): + if f.startswith("model") and f.endswith(".pd"): + os.remove(f) + if f in ["lcurve.out"]: + os.remove(f) + if f in ["stat_files"]: + shutil.rmtree(f) + + +class TestEnergyModelSeA(unittest.TestCase, DPTrainTest): + def setUp(self): + input_json = str(Path(__file__).parent / "water/se_atten.json") + with open(input_json) as f: + self.config = json.load(f) + data_file = [str(Path(__file__).parent / "water/data/data_0")] + self.config["training"]["training_data"]["systems"] = data_file + self.config["training"]["validation_data"]["systems"] = data_file + self.config["model"] = deepcopy(model_se_e2_a) + self.config["training"]["numb_steps"] = 1 + self.config["training"]["save_freq"] = 1 + + def tearDown(self) -> None: + DPTrainTest.tearDown(self) + + +class TestFparam(unittest.TestCase, DPTrainTest): + """Test if `fparam` can be loaded correctly.""" + + def setUp(self): + input_json = str(Path(__file__).parent / "water/se_atten.json") + with open(input_json) as f: + self.config = json.load(f) + data_file = [str(Path(__file__).parent / "water/data/data_0")] + self.config["training"]["training_data"]["systems"] = data_file + self.config["training"]["validation_data"]["systems"] = data_file + self.config["model"] = deepcopy(model_se_e2_a) + self.config["model"]["fitting_net"]["numb_fparam"] = 1 + self.config["training"]["numb_steps"] = 1 + self.config["training"]["save_freq"] = 1 + self.set_path = Path(__file__).parent / "water/data/data_0" / "set.000" + shutil.copyfile(self.set_path / "energy.npy", self.set_path / "fparam.npy") + + def tearDown(self) -> None: + (self.set_path / "fparam.npy").unlink(missing_ok=True) + DPTrainTest.tearDown(self) + + +if __name__ == "__main__": + unittest.main() diff --git a/source/tests/pd/test_update_sel.py b/source/tests/pd/test_update_sel.py new file mode 100644 index 0000000000..e7b1acf6ff --- /dev/null +++ b/source/tests/pd/test_update_sel.py @@ -0,0 +1,194 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import random +import unittest +from unittest.mock import ( + patch, +) + +from deepmd.pd.model.model.model import ( + BaseModel, +) +from deepmd.pd.utils.update_sel import ( + UpdateSel, +) + +from ..seed import ( + GLOBAL_SEED, +) + + +def update_sel(jdata): + type_map = jdata["model"].get("type_map") + train_data = None + jdata["model"], _ = BaseModel.update_sel(train_data, type_map, jdata["model"]) + return jdata + + +class TestTrain(unittest.TestCase): + def setUp(self) -> None: + self.update_sel = UpdateSel() + self.mock_min_nbor_dist = random.Random(GLOBAL_SEED).random() + return super().setUp() + + @patch("deepmd.pd.utils.update_sel.UpdateSel.get_nbor_stat") + def test_update_one_sel(self, sel_mock): + sel_mock.return_value = self.mock_min_nbor_dist, [10, 20] + + min_nbor_dist, sel = self.update_sel.update_one_sel(None, None, 6, "auto") + # self.assertEqual(descriptor['sel'], [11,22]) + self.assertEqual(sel, [12, 24]) + self.assertAlmostEqual(min_nbor_dist, self.mock_min_nbor_dist) + min_nbor_dist, sel = self.update_sel.update_one_sel(None, None, 6, "auto:1.5") + # self.assertEqual(descriptor['sel'], [15,30]) + self.assertEqual(sel, [16, 32]) + self.assertAlmostEqual(min_nbor_dist, self.mock_min_nbor_dist) + + @unittest.skip("Skip for not implemented yet") + @patch("deepmd.pd.utils.update_sel.UpdateSel.get_nbor_stat") + def test_update_sel_hybrid(self, sel_mock): + sel_mock.return_value = self.mock_min_nbor_dist, [10, 20] + + jdata = { + "model": { + "descriptor": { + "type": "hybrid", + "list": [ + {"type": "se_e2_a", "rcut": 6, "sel": "auto"}, + {"type": "se_e2_a", "rcut": 6, "sel": "auto:1.5"}, + ], + } + }, + "training": {"training_data": {}}, + } + expected_out = { + "model": { + "descriptor": { + "type": "hybrid", + "list": [ + {"type": "se_e2_a", "rcut": 6, "sel": [12, 24]}, + {"type": "se_e2_a", "rcut": 6, "sel": [16, 32]}, + ], + } + }, + "training": {"training_data": {}}, + } + jdata = update_sel(jdata) + self.assertEqual(jdata, expected_out) + + @patch("deepmd.pd.utils.update_sel.UpdateSel.get_nbor_stat") + def test_update_sel(self, sel_mock): + sel_mock.return_value = self.mock_min_nbor_dist, [10, 20] + + jdata = { + "model": {"descriptor": {"type": "se_e2_a", "rcut": 6, "sel": "auto"}}, + "training": {"training_data": {}}, + } + expected_out = { + "model": {"descriptor": {"type": "se_e2_a", "rcut": 6, "sel": [12, 24]}}, + "training": {"training_data": {}}, + } + jdata = update_sel(jdata) + self.assertEqual(jdata, expected_out) + + @unittest.skip("Skip for not implemented yet") + @patch("deepmd.pd.utils.update_sel.UpdateSel.get_nbor_stat") + def test_update_sel_atten_auto(self, sel_mock): + sel_mock.return_value = self.mock_min_nbor_dist, [25] + + jdata = { + "model": { + "descriptor": { + "type": "se_atten", + "sel": "auto", + "rcut": 6, + } + }, + "training": {"training_data": {}}, + } + expected_out = { + "model": { + "descriptor": { + "type": "se_atten", + "sel": 28, + "rcut": 6, + } + }, + "training": {"training_data": {}}, + } + jdata = update_sel(jdata) + self.assertEqual(jdata, expected_out) + + @unittest.skip("Skip for not implemented yet") + @patch("deepmd.pd.utils.update_sel.UpdateSel.get_nbor_stat") + def test_update_sel_atten_int(self, sel_mock): + sel_mock.return_value = self.mock_min_nbor_dist, [25] + + jdata = { + "model": { + "descriptor": { + "type": "se_atten", + "sel": 30, + "rcut": 6, + } + }, + "training": {"training_data": {}}, + } + expected_out = { + "model": { + "descriptor": { + "type": "se_atten", + "sel": 30, + "rcut": 6, + } + }, + "training": {"training_data": {}}, + } + jdata = update_sel(jdata) + self.assertEqual(jdata, expected_out) + + @unittest.skip("Skip for not implemented yet") + @patch("deepmd.pd.utils.update_sel.UpdateSel.get_nbor_stat") + def test_update_sel_atten_list(self, sel_mock): + sel_mock.return_value = self.mock_min_nbor_dist, [25] + + jdata = { + "model": { + "descriptor": { + "type": "se_atten", + "sel": 30, + "rcut": 6, + } + }, + "training": {"training_data": {}}, + } + expected_out = { + "model": { + "descriptor": { + "type": "se_atten", + "sel": 30, + "rcut": 6, + } + }, + "training": {"training_data": {}}, + } + jdata = update_sel(jdata) + self.assertEqual(jdata, expected_out) + + def test_skip_frozen(self): + jdata = { + "model": { + "type": "frozen", + }, + "training": {"training_data": {}}, + } + expected_out = jdata.copy() + jdata = update_sel(jdata) + self.assertEqual(jdata, expected_out) + + def test_wrap_up_4(self): + self.assertEqual(self.update_sel.wrap_up_4(12), 3 * 4) + self.assertEqual(self.update_sel.wrap_up_4(13), 4 * 4) + self.assertEqual(self.update_sel.wrap_up_4(14), 4 * 4) + self.assertEqual(self.update_sel.wrap_up_4(15), 4 * 4) + self.assertEqual(self.update_sel.wrap_up_4(16), 4 * 4) + self.assertEqual(self.update_sel.wrap_up_4(17), 5 * 4) diff --git a/source/tests/pd/test_utils.py b/source/tests/pd/test_utils.py new file mode 100644 index 0000000000..8d25cff964 --- /dev/null +++ b/source/tests/pd/test_utils.py @@ -0,0 +1,35 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import unittest + +import numpy as np +import paddle + +from deepmd.pd.utils.utils import ( + to_numpy_array, + to_paddle_tensor, +) + +from ..seed import ( + GLOBAL_SEED, +) + + +class TestCvt(unittest.TestCase): + def test_to_numpy(self): + rng = np.random.default_rng(GLOBAL_SEED) + foo = rng.normal([3, 4]) + for ptp, npp in zip( + [paddle.float16, paddle.float32, paddle.float64], + [np.float16, np.float32, np.float64], + ): + foo = foo.astype(npp) + bar = to_paddle_tensor(foo) + self.assertEqual(bar.dtype, ptp) + onk = to_numpy_array(bar) + self.assertEqual(onk.dtype, npp) + with self.assertRaises(ValueError) as ee: + foo = foo.astype(np.int8) + bar = to_paddle_tensor(foo) + with self.assertRaises(ValueError) as ee: + bar = to_paddle_tensor(foo) + bar = to_numpy_array(bar.int()) diff --git a/source/tests/pd/water b/source/tests/pd/water new file mode 120000 index 0000000000..9e74b75a82 --- /dev/null +++ b/source/tests/pd/water @@ -0,0 +1 @@ +model/water/ \ No newline at end of file From 4b92b6d8a57a543f8d826e6c87dd4dac6b722566 Mon Sep 17 00:00:00 2001 From: Chun Cai Date: Thu, 28 Nov 2024 02:00:30 +0800 Subject: [PATCH 06/43] Perf: print summary on rank 0 (#4434) ## Summary by CodeRabbit - **Bug Fixes** - Adjusted the summary printing functionality to ensure it only executes from the main process in distributed settings, preventing duplicate outputs. --- deepmd/pt/utils/dataloader.py | 28 +++++++++++++++------------- 1 file changed, 15 insertions(+), 13 deletions(-) diff --git a/deepmd/pt/utils/dataloader.py b/deepmd/pt/utils/dataloader.py index 9920622792..2fea6b72d2 100644 --- a/deepmd/pt/utils/dataloader.py +++ b/deepmd/pt/utils/dataloader.py @@ -185,19 +185,21 @@ def print_summary( name: str, prob: list[float], ) -> None: - print_summary( - name, - len(self.systems), - [ss.system for ss in self.systems], - [ss._natoms for ss in self.systems], - self.batch_sizes, - [ - ss._data_system.get_sys_numb_batch(self.batch_sizes[ii]) - for ii, ss in enumerate(self.systems) - ], - prob, - [ss._data_system.pbc for ss in self.systems], - ) + rank = dist.get_rank() if dist.is_initialized() else 0 + if rank == 0: + print_summary( + name, + len(self.systems), + [ss.system for ss in self.systems], + [ss._natoms for ss in self.systems], + self.batch_sizes, + [ + ss._data_system.get_sys_numb_batch(self.batch_sizes[ii]) + for ii, ss in enumerate(self.systems) + ], + prob, + [ss._data_system.pbc for ss in self.systems], + ) _sentinel = object() From 037cf3f3add2a24ae7c5ecc412c8f1c7669a21ea Mon Sep 17 00:00:00 2001 From: Chun Cai Date: Thu, 28 Nov 2024 06:08:42 +0800 Subject: [PATCH 07/43] perf: optimize training loop (#4426) Improvements to the training process: * [`deepmd/pt/train/training.py`](diffhunk://#diff-a90c90dc0e6a17fbe2e930f91182805b83260484c9dc1cfac3331378ffa34935R659): Added a check to skip setting the model to training mode if it already is. The profiling result shows it takes some time to recursively set it to all models. * [`deepmd/pt/train/training.py`](diffhunk://#diff-a90c90dc0e6a17fbe2e930f91182805b83260484c9dc1cfac3331378ffa34935L686-L690): Modified the gradient clipping function to include the `error_if_nonfinite` parameter, and removed the manual check for non-finite gradients and the associated exception raising. ## Summary by CodeRabbit - **New Features** - Improved training loop with enhanced error handling and control flow. - Updated gradient clipping logic for better error detection. - Refined logging functionality for training and validation results. - **Bug Fixes** - Prevented redundant training calls by adding conditional checks. - **Documentation** - Clarified method logic in the `Trainer` class without changing method signatures. --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- deepmd/pt/train/training.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/deepmd/pt/train/training.py b/deepmd/pt/train/training.py index f74c4769bf..af6e48191d 100644 --- a/deepmd/pt/train/training.py +++ b/deepmd/pt/train/training.py @@ -656,7 +656,6 @@ def step(_step_id, task_key="Default") -> None: # PyTorch Profiler if self.enable_profiler or self.profiling: prof.step() - self.wrapper.train() if isinstance(self.lr_exp, dict): _lr = self.lr_exp[task_key] else: @@ -682,12 +681,11 @@ def step(_step_id, task_key="Default") -> None: ) loss.backward() if self.gradient_max_norm > 0.0: - grad_norm = torch.nn.utils.clip_grad_norm_( - self.wrapper.parameters(), self.gradient_max_norm + torch.nn.utils.clip_grad_norm_( + self.wrapper.parameters(), + self.gradient_max_norm, + error_if_nonfinite=True, ) - if not torch.isfinite(grad_norm).all(): - # check local gradnorm single GPU case, trigger NanDetector - raise FloatingPointError("gradients are Nan/Inf") with torch.device("cpu"): self.optimizer.step() self.scheduler.step() @@ -766,7 +764,7 @@ def fake_model(): if self.display_in_training and ( display_step_id % self.disp_freq == 0 or display_step_id == 1 ): - self.wrapper.eval() + self.wrapper.eval() # Will set to train mode before fininshing validation def log_loss_train(_loss, _more_loss, _task_key="Default"): results = {} @@ -872,6 +870,7 @@ def log_loss_valid(_task_key="Default"): learning_rate=None, ) ) + self.wrapper.train() current_time = time.time() train_time = current_time - self.t0 @@ -927,6 +926,7 @@ def log_loss_valid(_task_key="Default"): f"{task_key}/{item}", more_loss[item], display_step_id ) + self.wrapper.train() self.t0 = time.time() self.total_train_time = 0.0 for step_id in range(self.num_steps): From f7e4cdfcbff021286bcbee7b7c1a4237abf99448 Mon Sep 17 00:00:00 2001 From: HydrogenSulfate <490868991@qq.com> Date: Thu, 28 Nov 2024 12:23:54 +0800 Subject: [PATCH 08/43] pd: skip certain UT and fix paddle ver in in test_cuda.yml (#4439) In two unit tests under `pd/`, paddle.jit.save is called, which leads to occasional cuda error 709. Before resolving this issue, temporarily mark these two unittests to be skipped(`pd/test_dp_show.py` and `pd/test_multikask`). ![image](https://github.com/user-attachments/assets/45af373f-27cf-4c31-915d-d47296426b6b) ![image](https://github.com/user-attachments/assets/e4413b7d-d530-4d9e-a2d2-f3695e12f9e3) ![image](https://github.com/user-attachments/assets/62f4a378-52c1-4e4d-ab23-a9b41d982c97) Meanwhile, the version of paddlepaddle-gpu in test_cuda.yml has been fixed. @njzjz ## Summary by CodeRabbit - **Bug Fixes** - Updated test classes to skip execution due to unresolved CUDA errors. - **Tests** - Introduced a new test class for multitask models. - Added assertions to validate multitask model configurations. - Retained cleanup methods in test classes to manage generated files. --- .github/workflows/test_cuda.yml | 2 +- source/tests/pd/test_dp_show.py | 2 ++ source/tests/pd/test_multitask.py | 2 ++ 3 files changed, 5 insertions(+), 1 deletion(-) diff --git a/.github/workflows/test_cuda.yml b/.github/workflows/test_cuda.yml index db0dfb6c61..a5b7583656 100644 --- a/.github/workflows/test_cuda.yml +++ b/.github/workflows/test_cuda.yml @@ -51,7 +51,7 @@ jobs: - run: | export PYTORCH_ROOT=$(python -c 'import torch;print(torch.__path__[0])') export TENSORFLOW_ROOT=$(python -c 'import importlib,pathlib;print(pathlib.Path(importlib.util.find_spec("tensorflow").origin).parent)') - source/install/uv_with_retry.sh pip install --system --pre paddlepaddle-gpu -i https://www.paddlepaddle.org.cn/packages/nightly/cu123/ + source/install/uv_with_retry.sh pip install --system --pre https://paddle-whl.bj.bcebos.com/nightly/cu123/paddlepaddle-gpu/paddlepaddle_gpu-3.0.0.dev20241126-cp311-cp311-linux_x86_64.whl source/install/uv_with_retry.sh pip install --system -v -e .[gpu,test,lmp,cu12,torch,jax] mpi4py env: DP_VARIANT: cuda diff --git a/source/tests/pd/test_dp_show.py b/source/tests/pd/test_dp_show.py index c1c20ff3a1..9a856a6bff 100644 --- a/source/tests/pd/test_dp_show.py +++ b/source/tests/pd/test_dp_show.py @@ -29,6 +29,7 @@ ) +@unittest.skip("Skip until solving cuda error 709 in jit.save") class TestSingleTaskModel(unittest.TestCase): def setUp(self): input_json = str(Path(__file__).parent / "water/se_atten.json") @@ -101,6 +102,7 @@ def tearDown(self): shutil.rmtree(f) +@unittest.skip("Skip until solving cuda error 709 in jit.save") class TestMultiTaskModel(unittest.TestCase): def setUp(self): input_json = str(Path(__file__).parent / "water/multitask.json") diff --git a/source/tests/pd/test_multitask.py b/source/tests/pd/test_multitask.py index e3d4cfa7de..65210d07b3 100644 --- a/source/tests/pd/test_multitask.py +++ b/source/tests/pd/test_multitask.py @@ -40,6 +40,7 @@ def setUpModule(): multitask_template = json.load(f) +@unittest.skip("Skip until solving cuda error 709 in jit.save") class MultiTaskTrainTest: def test_multitask_train(self): # test multitask training @@ -181,6 +182,7 @@ def tearDown(self): shutil.rmtree(f) +@unittest.skip("Skip until solving cuda error 709 in jit.save") class TestMultiTaskSeA(unittest.TestCase, MultiTaskTrainTest): def setUp(self): multitask_se_e2_a = deepcopy(multitask_template) From a852aa9f4611356b89880e4a5a06b15b29e4a3fb Mon Sep 17 00:00:00 2001 From: Jinzhe Zeng Date: Thu, 28 Nov 2024 00:31:50 -0500 Subject: [PATCH 09/43] fix: unmark `eval_pd` as `abstractmethod` (#4438) Many classes don't have this method and become abstract classes. It looks to me like these tests are skipped silently and don't throw errors. ## Summary by CodeRabbit ## Summary by CodeRabbit - **Bug Fixes** - Updated the `eval_pd` method to provide a clearer indication of its implementation status by raising a `NotImplementedError`. - Changed the `skip_pd` class variable to always skip Paddle-related tests. - **Tests** - Adjusted the `skip_pd` property in the `TestSeA` and `TestEner` classes to directly reflect the installation status of Paddle. --------- Signed-off-by: Jinzhe Zeng --- source/tests/consistent/common.py | 4 ++-- source/tests/consistent/descriptor/test_se_e2_a.py | 2 +- source/tests/consistent/fitting/test_ener.py | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/source/tests/consistent/common.py b/source/tests/consistent/common.py index cb4dbed391..a08e849c6c 100644 --- a/source/tests/consistent/common.py +++ b/source/tests/consistent/common.py @@ -101,7 +101,7 @@ class CommonTest(ABC): # we may usually skip jax before jax is fully supported skip_jax: ClassVar[bool] = True """Whether to skip the JAX model.""" - skip_pd: ClassVar[bool] = not INSTALLED_PD + skip_pd: ClassVar[bool] = True """Whether to skip the Paddle model.""" skip_array_api_strict: ClassVar[bool] = True """Whether to skip the array_api_strict model.""" @@ -185,7 +185,6 @@ def eval_jax(self, jax_obj: Any) -> Any: """ raise NotImplementedError("Not implemented") - @abstractmethod def eval_pd(self, pd_obj: Any) -> Any: """Evaluate the return value of PD. @@ -194,6 +193,7 @@ def eval_pd(self, pd_obj: Any) -> Any: pd_obj : Any The object of PD """ + raise NotImplementedError("Not implemented") def eval_array_api_strict(self, array_api_strict_obj: Any) -> Any: """Evaluate the return value of array_api_strict. diff --git a/source/tests/consistent/descriptor/test_se_e2_a.py b/source/tests/consistent/descriptor/test_se_e2_a.py index a463960fb7..8838696108 100644 --- a/source/tests/consistent/descriptor/test_se_e2_a.py +++ b/source/tests/consistent/descriptor/test_se_e2_a.py @@ -136,7 +136,7 @@ def skip_pd(self) -> bool: precision, env_protection, ) = self.param - return CommonTest.skip_pd + return not INSTALLED_PD @property def skip_array_api_strict(self) -> bool: diff --git a/source/tests/consistent/fitting/test_ener.py b/source/tests/consistent/fitting/test_ener.py index 12fafa7ba8..f5a79acabe 100644 --- a/source/tests/consistent/fitting/test_ener.py +++ b/source/tests/consistent/fitting/test_ener.py @@ -135,7 +135,7 @@ def skip_pd(self) -> bool: ) = self.param # Paddle do not support "bfloat16" in some kernels, # so skip this in CI test - return CommonTest.skip_pd or precision == "bfloat16" + return not INSTALLED_PD or precision == "bfloat16" tf_class = EnerFittingTF dp_class = EnerFittingDP From a6b61b9b9913eef4ffec2d047e55015dd811b259 Mon Sep 17 00:00:00 2001 From: Duo <50307526+iProzd@users.noreply.github.com> Date: Thu, 28 Nov 2024 17:12:12 +0800 Subject: [PATCH 10/43] feat(pt/dp): support case embedding and sharable fitting (#4417) ## Summary by CodeRabbit - **New Features** - Introduced a `set_case_embd` method across multiple atomic model classes to enhance case embedding functionality. - Added a `dim_case_embd` parameter in various fitting classes to support case-specific embedding dimensions. - Updated serialization methods to include `dim_case_embd` in the output. - Added a comprehensive JSON configuration for multitask models in water simulations. - Introduced a new function to validate case embedding dimensions in multi-task training. - Updated the `share_params` method in the `DescrptDPA2` class to streamline parameter sharing logic. - **Bug Fixes** - Improved version compatibility checks in deserialization methods across several classes. - **Documentation** - Enhanced documentation for multi-task training, emphasizing the transition to PyTorch and detailing configuration changes. - **Tests** - Updated test cases to incorporate new parameters and configurations related to case embeddings. - Introduced new tests for multitask learning configurations. --------- Signed-off-by: Duo <50307526+iProzd@users.noreply.github.com> Co-authored-by: coderabbitai[bot] <136622811+coderabbitai[bot]@users.noreply.github.com> --- .../dpmodel/atomic_model/dp_atomic_model.py | 7 + .../atomic_model/linear_atomic_model.py | 16 ++ .../atomic_model/make_base_atomic_model.py | 8 + .../atomic_model/pairtab_atomic_model.py | 9 + deepmd/dpmodel/fitting/dipole_fitting.py | 4 +- deepmd/dpmodel/fitting/dos_fitting.py | 4 +- deepmd/dpmodel/fitting/ener_fitting.py | 4 +- deepmd/dpmodel/fitting/general_fitting.py | 35 +++- deepmd/dpmodel/fitting/invar_fitting.py | 4 +- .../dpmodel/fitting/polarizability_fitting.py | 6 +- deepmd/dpmodel/fitting/property_fitting.py | 4 +- deepmd/dpmodel/model/make_model.py | 3 + .../pd/model/atomic_model/dp_atomic_model.py | 7 + deepmd/pd/model/model/make_model.py | 3 + deepmd/pd/model/task/ener.py | 4 +- deepmd/pd/model/task/fitting.py | 23 ++- deepmd/pd/model/task/invar_fitting.py | 7 +- .../pt/model/atomic_model/dp_atomic_model.py | 7 + .../model/atomic_model/linear_atomic_model.py | 16 ++ .../atomic_model/pairtab_atomic_model.py | 9 + deepmd/pt/model/descriptor/dpa2.py | 19 +-- deepmd/pt/model/model/make_model.py | 3 + deepmd/pt/model/task/dipole.py | 6 +- deepmd/pt/model/task/dos.py | 4 +- deepmd/pt/model/task/ener.py | 4 +- deepmd/pt/model/task/fitting.py | 53 +++++- deepmd/pt/model/task/invar_fitting.py | 6 +- deepmd/pt/model/task/polarizability.py | 8 +- deepmd/pt/model/task/property.py | 6 +- deepmd/pt/train/training.py | 32 +++- deepmd/pt/train/wrapper.py | 12 +- deepmd/tf/fit/dipole.py | 11 +- deepmd/tf/fit/dos.py | 12 +- deepmd/tf/fit/ener.py | 12 +- deepmd/tf/fit/polar.py | 11 +- deepmd/utils/argcheck.py | 40 +++++ doc/train/multi-task-training.md | 21 ++- .../pytorch_example/input_torch_sharefit.json | 155 ++++++++++++++++++ source/tests/common/test_examples.py | 1 + source/tests/pt/model/water/multitask.json | 3 +- .../pt/model/water/multitask_sharefit.json | 134 +++++++++++++++ source/tests/pt/test_multitask.py | 55 ++++++- .../universal/dpmodel/fitting/test_fitting.py | 15 +- 43 files changed, 732 insertions(+), 71 deletions(-) create mode 100644 examples/water_multi_task/pytorch_example/input_torch_sharefit.json create mode 100644 source/tests/pt/model/water/multitask_sharefit.json diff --git a/deepmd/dpmodel/atomic_model/dp_atomic_model.py b/deepmd/dpmodel/atomic_model/dp_atomic_model.py index 749fe6bbf9..2fa072cc78 100644 --- a/deepmd/dpmodel/atomic_model/dp_atomic_model.py +++ b/deepmd/dpmodel/atomic_model/dp_atomic_model.py @@ -65,6 +65,13 @@ def get_sel(self) -> list[int]: """Get the neighbor selection.""" return self.descriptor.get_sel() + def set_case_embd(self, case_idx: int): + """ + Set the case embedding of this atomic model by the given case_idx, + typically concatenated with the output of the descriptor and fed into the fitting net. + """ + self.fitting.set_case_embd(case_idx) + def mixed_types(self) -> bool: """If true, the model 1. assumes total number of atoms aligned across frames; diff --git a/deepmd/dpmodel/atomic_model/linear_atomic_model.py b/deepmd/dpmodel/atomic_model/linear_atomic_model.py index 9676b34bfd..8108292bd2 100644 --- a/deepmd/dpmodel/atomic_model/linear_atomic_model.py +++ b/deepmd/dpmodel/atomic_model/linear_atomic_model.py @@ -134,6 +134,14 @@ def get_model_rcuts(self) -> list[float]: def get_sel(self) -> list[int]: return [max([model.get_nsel() for model in self.models])] + def set_case_embd(self, case_idx: int): + """ + Set the case embedding of this atomic model by the given case_idx, + typically concatenated with the output of the descriptor and fed into the fitting net. + """ + for model in self.models: + model.set_case_embd(case_idx) + def get_model_nsels(self) -> list[int]: """Get the processed sels for each individual models. Not distinguishing types.""" return [model.get_nsel() for model in self.models] @@ -428,6 +436,14 @@ def deserialize(cls, data) -> "DPZBLLinearEnergyAtomicModel": data.pop("type", None) return super().deserialize(data) + def set_case_embd(self, case_idx: int): + """ + Set the case embedding of this atomic model by the given case_idx, + typically concatenated with the output of the descriptor and fed into the fitting net. + """ + # only set case_idx for dpmodel + self.models[0].set_case_embd(case_idx) + def _compute_weight( self, extended_coord: np.ndarray, diff --git a/deepmd/dpmodel/atomic_model/make_base_atomic_model.py b/deepmd/dpmodel/atomic_model/make_base_atomic_model.py index a4c38518a3..01caa7cd64 100644 --- a/deepmd/dpmodel/atomic_model/make_base_atomic_model.py +++ b/deepmd/dpmodel/atomic_model/make_base_atomic_model.py @@ -68,6 +68,14 @@ def get_sel(self) -> list[int]: """Returns the number of selected atoms for each type.""" pass + @abstractmethod + def set_case_embd(self, case_idx: int) -> None: + """ + Set the case embedding of this atomic model by the given case_idx, + typically concatenated with the output of the descriptor and fed into the fitting net. + """ + pass + def get_nsel(self) -> int: """Returns the total number of selected neighboring atoms in the cut-off radius.""" return sum(self.get_sel()) diff --git a/deepmd/dpmodel/atomic_model/pairtab_atomic_model.py b/deepmd/dpmodel/atomic_model/pairtab_atomic_model.py index a4bffe508d..0c35320e7f 100644 --- a/deepmd/dpmodel/atomic_model/pairtab_atomic_model.py +++ b/deepmd/dpmodel/atomic_model/pairtab_atomic_model.py @@ -120,6 +120,15 @@ def get_type_map(self) -> list[str]: def get_sel(self) -> list[int]: return [self.sel] + def set_case_embd(self, case_idx: int): + """ + Set the case embedding of this atomic model by the given case_idx, + typically concatenated with the output of the descriptor and fed into the fitting net. + """ + raise NotImplementedError( + "Case identification not supported for PairTabAtomicModel!" + ) + def get_nsel(self) -> int: return self.sel diff --git a/deepmd/dpmodel/fitting/dipole_fitting.py b/deepmd/dpmodel/fitting/dipole_fitting.py index c872ef0555..fcaea43338 100644 --- a/deepmd/dpmodel/fitting/dipole_fitting.py +++ b/deepmd/dpmodel/fitting/dipole_fitting.py @@ -95,6 +95,7 @@ def __init__( resnet_dt: bool = True, numb_fparam: int = 0, numb_aparam: int = 0, + dim_case_embd: int = 0, rcond: Optional[float] = None, tot_ener_zero: bool = False, trainable: Optional[list[bool]] = None, @@ -130,6 +131,7 @@ def __init__( resnet_dt=resnet_dt, numb_fparam=numb_fparam, numb_aparam=numb_aparam, + dim_case_embd=dim_case_embd, rcond=rcond, tot_ener_zero=tot_ener_zero, trainable=trainable, @@ -159,7 +161,7 @@ def serialize(self) -> dict: @classmethod def deserialize(cls, data: dict) -> "GeneralFitting": data = data.copy() - check_version_compatibility(data.pop("@version", 1), 2, 1) + check_version_compatibility(data.pop("@version", 1), 3, 1) var_name = data.pop("var_name", None) assert var_name == "dipole" return super().deserialize(data) diff --git a/deepmd/dpmodel/fitting/dos_fitting.py b/deepmd/dpmodel/fitting/dos_fitting.py index b4b1ee3cb2..2f6df77eac 100644 --- a/deepmd/dpmodel/fitting/dos_fitting.py +++ b/deepmd/dpmodel/fitting/dos_fitting.py @@ -36,6 +36,7 @@ def __init__( resnet_dt: bool = True, numb_fparam: int = 0, numb_aparam: int = 0, + dim_case_embd: int = 0, bias_dos: Optional[np.ndarray] = None, rcond: Optional[float] = None, trainable: Union[bool, list[bool]] = True, @@ -60,6 +61,7 @@ def __init__( bias_atom=bias_dos, numb_fparam=numb_fparam, numb_aparam=numb_aparam, + dim_case_embd=dim_case_embd, rcond=rcond, trainable=trainable, activation_function=activation_function, @@ -73,7 +75,7 @@ def __init__( @classmethod def deserialize(cls, data: dict) -> "GeneralFitting": data = data.copy() - check_version_compatibility(data.pop("@version", 1), 2, 1) + check_version_compatibility(data.pop("@version", 1), 3, 1) data["numb_dos"] = data.pop("dim_out") data.pop("tot_ener_zero", None) data.pop("var_name", None) diff --git a/deepmd/dpmodel/fitting/ener_fitting.py b/deepmd/dpmodel/fitting/ener_fitting.py index 53bedb4cec..6435b6468f 100644 --- a/deepmd/dpmodel/fitting/ener_fitting.py +++ b/deepmd/dpmodel/fitting/ener_fitting.py @@ -32,6 +32,7 @@ def __init__( resnet_dt: bool = True, numb_fparam: int = 0, numb_aparam: int = 0, + dim_case_embd: int = 0, rcond: Optional[float] = None, tot_ener_zero: bool = False, trainable: Optional[list[bool]] = None, @@ -55,6 +56,7 @@ def __init__( resnet_dt=resnet_dt, numb_fparam=numb_fparam, numb_aparam=numb_aparam, + dim_case_embd=dim_case_embd, rcond=rcond, tot_ener_zero=tot_ener_zero, trainable=trainable, @@ -73,7 +75,7 @@ def __init__( @classmethod def deserialize(cls, data: dict) -> "GeneralFitting": data = data.copy() - check_version_compatibility(data.pop("@version", 1), 2, 1) + check_version_compatibility(data.pop("@version", 1), 3, 1) data.pop("var_name") data.pop("dim_out") return super().deserialize(data) diff --git a/deepmd/dpmodel/fitting/general_fitting.py b/deepmd/dpmodel/fitting/general_fitting.py index 2958a7d18d..c05d84c4a1 100644 --- a/deepmd/dpmodel/fitting/general_fitting.py +++ b/deepmd/dpmodel/fitting/general_fitting.py @@ -105,6 +105,7 @@ def __init__( resnet_dt: bool = True, numb_fparam: int = 0, numb_aparam: int = 0, + dim_case_embd: int = 0, bias_atom_e: Optional[np.ndarray] = None, rcond: Optional[float] = None, tot_ener_zero: bool = False, @@ -127,6 +128,7 @@ def __init__( self.resnet_dt = resnet_dt self.numb_fparam = numb_fparam self.numb_aparam = numb_aparam + self.dim_case_embd = dim_case_embd self.rcond = rcond self.tot_ener_zero = tot_ener_zero self.trainable = trainable @@ -171,11 +173,16 @@ def __init__( self.aparam_inv_std = np.ones(self.numb_aparam, dtype=self.prec) else: self.aparam_avg, self.aparam_inv_std = None, None + if self.dim_case_embd > 0: + self.case_embd = np.zeros(self.dim_case_embd, dtype=self.prec) + else: + self.case_embd = None # init networks in_dim = ( self.dim_descrpt + self.numb_fparam + (0 if self.use_aparam_as_mask else self.numb_aparam) + + self.dim_case_embd ) self.nets = NetworkCollection( 1 if not self.mixed_types else 0, @@ -222,6 +229,13 @@ def get_type_map(self) -> list[str]: """Get the name to each type of atoms.""" return self.type_map + def set_case_embd(self, case_idx: int): + """ + Set the case embedding of this fitting net by the given case_idx, + typically concatenated with the output of the descriptor and fed into the fitting net. + """ + self.case_embd = np.eye(self.dim_case_embd, dtype=self.prec)[case_idx] + def change_type_map( self, type_map: list[str], model_with_new_type_stat=None ) -> None: @@ -255,6 +269,8 @@ def __setitem__(self, key, value) -> None: self.aparam_avg = value elif key in ["aparam_inv_std"]: self.aparam_inv_std = value + elif key in ["case_embd"]: + self.case_embd = value elif key in ["scale"]: self.scale = value else: @@ -271,6 +287,8 @@ def __getitem__(self, key): return self.aparam_avg elif key in ["aparam_inv_std"]: return self.aparam_inv_std + elif key in ["case_embd"]: + return self.case_embd elif key in ["scale"]: return self.scale else: @@ -287,7 +305,7 @@ def serialize(self) -> dict: """Serialize the fitting to dict.""" return { "@class": "Fitting", - "@version": 2, + "@version": 3, "var_name": self.var_name, "ntypes": self.ntypes, "dim_descrpt": self.dim_descrpt, @@ -295,6 +313,7 @@ def serialize(self) -> dict: "resnet_dt": self.resnet_dt, "numb_fparam": self.numb_fparam, "numb_aparam": self.numb_aparam, + "dim_case_embd": self.dim_case_embd, "rcond": self.rcond, "activation_function": self.activation_function, "precision": self.precision, @@ -303,6 +322,7 @@ def serialize(self) -> dict: "nets": self.nets.serialize(), "@variables": { "bias_atom_e": to_numpy_array(self.bias_atom_e), + "case_embd": to_numpy_array(self.case_embd), "fparam_avg": to_numpy_array(self.fparam_avg), "fparam_inv_std": to_numpy_array(self.fparam_inv_std), "aparam_avg": to_numpy_array(self.aparam_avg), @@ -423,6 +443,19 @@ def _call_common( axis=-1, ) + if self.dim_case_embd > 0: + assert self.case_embd is not None + case_embd = xp.tile(xp.reshape(self.case_embd, [1, 1, -1]), [nf, nloc, 1]) + xx = xp.concat( + [xx, case_embd], + axis=-1, + ) + if xx_zeros is not None: + xx_zeros = xp.concat( + [xx_zeros, case_embd], + axis=-1, + ) + # calculate the prediction if not self.mixed_types: outs = xp.zeros( diff --git a/deepmd/dpmodel/fitting/invar_fitting.py b/deepmd/dpmodel/fitting/invar_fitting.py index 219589d9ee..b5d3a02d86 100644 --- a/deepmd/dpmodel/fitting/invar_fitting.py +++ b/deepmd/dpmodel/fitting/invar_fitting.py @@ -123,6 +123,7 @@ def __init__( resnet_dt: bool = True, numb_fparam: int = 0, numb_aparam: int = 0, + dim_case_embd: int = 0, bias_atom: Optional[np.ndarray] = None, rcond: Optional[float] = None, tot_ener_zero: bool = False, @@ -155,6 +156,7 @@ def __init__( resnet_dt=resnet_dt, numb_fparam=numb_fparam, numb_aparam=numb_aparam, + dim_case_embd=dim_case_embd, rcond=rcond, bias_atom_e=bias_atom, tot_ener_zero=tot_ener_zero, @@ -183,7 +185,7 @@ def serialize(self) -> dict: @classmethod def deserialize(cls, data: dict) -> "GeneralFitting": data = data.copy() - check_version_compatibility(data.pop("@version", 1), 2, 1) + check_version_compatibility(data.pop("@version", 1), 3, 1) return super().deserialize(data) def _net_out_dim(self): diff --git a/deepmd/dpmodel/fitting/polarizability_fitting.py b/deepmd/dpmodel/fitting/polarizability_fitting.py index 021359a96e..0db6a23377 100644 --- a/deepmd/dpmodel/fitting/polarizability_fitting.py +++ b/deepmd/dpmodel/fitting/polarizability_fitting.py @@ -101,6 +101,7 @@ def __init__( resnet_dt: bool = True, numb_fparam: int = 0, numb_aparam: int = 0, + dim_case_embd: int = 0, rcond: Optional[float] = None, tot_ener_zero: bool = False, trainable: Optional[list[bool]] = None, @@ -150,6 +151,7 @@ def __init__( resnet_dt=resnet_dt, numb_fparam=numb_fparam, numb_aparam=numb_aparam, + dim_case_embd=dim_case_embd, rcond=rcond, tot_ener_zero=tot_ener_zero, trainable=trainable, @@ -187,7 +189,7 @@ def __getitem__(self, key): def serialize(self) -> dict: data = super().serialize() data["type"] = "polar" - data["@version"] = 3 + data["@version"] = 4 data["embedding_width"] = self.embedding_width data["fit_diag"] = self.fit_diag data["shift_diag"] = self.shift_diag @@ -198,7 +200,7 @@ def serialize(self) -> dict: @classmethod def deserialize(cls, data: dict) -> "GeneralFitting": data = data.copy() - check_version_compatibility(data.pop("@version", 1), 3, 1) + check_version_compatibility(data.pop("@version", 1), 4, 1) var_name = data.pop("var_name", None) assert var_name == "polar" return super().deserialize(data) diff --git a/deepmd/dpmodel/fitting/property_fitting.py b/deepmd/dpmodel/fitting/property_fitting.py index 18a56e3bf9..8b903af00e 100644 --- a/deepmd/dpmodel/fitting/property_fitting.py +++ b/deepmd/dpmodel/fitting/property_fitting.py @@ -78,6 +78,7 @@ def __init__( resnet_dt: bool = True, numb_fparam: int = 0, numb_aparam: int = 0, + dim_case_embd: int = 0, activation_function: str = "tanh", precision: str = DEFAULT_PRECISION, mixed_types: bool = True, @@ -99,6 +100,7 @@ def __init__( resnet_dt=resnet_dt, numb_fparam=numb_fparam, numb_aparam=numb_aparam, + dim_case_embd=dim_case_embd, rcond=rcond, trainable=trainable, activation_function=activation_function, @@ -111,7 +113,7 @@ def __init__( @classmethod def deserialize(cls, data: dict) -> "PropertyFittingNet": data = data.copy() - check_version_compatibility(data.pop("@version"), 2, 1) + check_version_compatibility(data.pop("@version"), 3, 1) data.pop("dim_out") data.pop("var_name") data.pop("tot_ener_zero") diff --git a/deepmd/dpmodel/model/make_model.py b/deepmd/dpmodel/model/make_model.py index 70ddbe09b8..ccad72c6a5 100644 --- a/deepmd/dpmodel/model/make_model.py +++ b/deepmd/dpmodel/model/make_model.py @@ -552,6 +552,9 @@ def serialize(self) -> dict: def deserialize(cls, data) -> "CM": return cls(atomic_model_=T_AtomicModel.deserialize(data)) + def set_case_embd(self, case_idx: int): + self.atomic_model.set_case_embd(case_idx) + def get_dim_fparam(self) -> int: """Get the number (dimension) of frame parameters of this atomic model.""" return self.atomic_model.get_dim_fparam() diff --git a/deepmd/pd/model/atomic_model/dp_atomic_model.py b/deepmd/pd/model/atomic_model/dp_atomic_model.py index 47b881e0cc..25a0f89d77 100644 --- a/deepmd/pd/model/atomic_model/dp_atomic_model.py +++ b/deepmd/pd/model/atomic_model/dp_atomic_model.py @@ -139,6 +139,13 @@ def get_sel(self) -> list[int]: """Get the neighbor selection.""" return self.sel + def set_case_embd(self, case_idx: int): + """ + Set the case embedding of this atomic model by the given case_idx, + typically concatenated with the output of the descriptor and fed into the fitting net. + """ + self.fitting_net.set_case_embd(case_idx) + def mixed_types(self) -> bool: """If true, the model 1. assumes total number of atoms aligned across frames; diff --git a/deepmd/pd/model/model/make_model.py b/deepmd/pd/model/model/make_model.py index 67b46d4d87..d5c5c6bd41 100644 --- a/deepmd/pd/model/model/make_model.py +++ b/deepmd/pd/model/model/make_model.py @@ -516,6 +516,9 @@ def serialize(self) -> dict: def deserialize(cls, data) -> "CM": return cls(atomic_model_=T_AtomicModel.deserialize(data)) + def set_case_embd(self, case_idx: int): + self.atomic_model.set_case_embd(case_idx) + def get_dim_fparam(self) -> int: """Get the number (dimension) of frame parameters of this atomic model.""" return self.atomic_model.get_dim_fparam() diff --git a/deepmd/pd/model/task/ener.py b/deepmd/pd/model/task/ener.py index ed0cfac69d..789ef75066 100644 --- a/deepmd/pd/model/task/ener.py +++ b/deepmd/pd/model/task/ener.py @@ -42,6 +42,7 @@ def __init__( resnet_dt: bool = True, numb_fparam: int = 0, numb_aparam: int = 0, + dim_case_embd: int = 0, activation_function: str = "tanh", precision: str = DEFAULT_PRECISION, mixed_types: bool = True, @@ -59,6 +60,7 @@ def __init__( resnet_dt=resnet_dt, numb_fparam=numb_fparam, numb_aparam=numb_aparam, + dim_case_embd=dim_case_embd, activation_function=activation_function, precision=precision, mixed_types=mixed_types, @@ -70,7 +72,7 @@ def __init__( @classmethod def deserialize(cls, data: dict) -> "GeneralFitting": data = copy.deepcopy(data) - check_version_compatibility(data.pop("@version", 1), 2, 1) + check_version_compatibility(data.pop("@version", 1), 3, 1) data.pop("var_name") data.pop("dim_out") return super().deserialize(data) diff --git a/deepmd/pd/model/task/fitting.py b/deepmd/pd/model/task/fitting.py index 9008ef8af3..375cf834cc 100644 --- a/deepmd/pd/model/task/fitting.py +++ b/deepmd/pd/model/task/fitting.py @@ -103,6 +103,9 @@ class GeneralFitting(Fitting): Number of frame parameters. numb_aparam : int Number of atomic parameters. + dim_case_embd : int + (Not supported yet) + Dimension of case specific embedding. activation_function : str Activation function. precision : str @@ -140,6 +143,7 @@ def __init__( resnet_dt: bool = True, numb_fparam: int = 0, numb_aparam: int = 0, + dim_case_embd: int = 0, activation_function: str = "tanh", precision: str = DEFAULT_PRECISION, mixed_types: bool = True, @@ -161,6 +165,10 @@ def __init__( self.resnet_dt = resnet_dt self.numb_fparam = numb_fparam self.numb_aparam = numb_aparam + self.dim_case_embd = dim_case_embd + if dim_case_embd > 0: + raise ValueError("dim_case_embd is not supported yet in PaddlePaddle.") + self.case_embd = None self.activation_function = activation_function self.precision = precision self.prec = PRECISION_DICT[self.precision] @@ -274,7 +282,7 @@ def serialize(self) -> dict: """Serialize the fitting to dict.""" return { "@class": "Fitting", - "@version": 2, + "@version": 3, "var_name": self.var_name, "ntypes": self.ntypes, "dim_descrpt": self.dim_descrpt, @@ -282,6 +290,7 @@ def serialize(self) -> dict: "resnet_dt": self.resnet_dt, "numb_fparam": self.numb_fparam, "numb_aparam": self.numb_aparam, + "dim_case_embd": self.dim_case_embd, "activation_function": self.activation_function, "precision": self.precision, "mixed_types": self.mixed_types, @@ -290,6 +299,7 @@ def serialize(self) -> dict: "exclude_types": self.exclude_types, "@variables": { "bias_atom_e": to_numpy_array(self.bias_atom_e), + "case_embd": None, "fparam_avg": to_numpy_array(self.fparam_avg), "fparam_inv_std": to_numpy_array(self.fparam_inv_std), "aparam_avg": to_numpy_array(self.aparam_avg), @@ -349,6 +359,13 @@ def get_type_map(self) -> list[str]: """Get the name to each type of atoms.""" return self.type_map + def set_case_embd(self, case_idx: int): + """ + Set the case embedding of this fitting net by the given case_idx, + typically concatenated with the output of the descriptor and fed into the fitting net. + """ + raise NotImplementedError("set_case_embd is not supported yet in PaddlePaddle.") + def __setitem__(self, key, value): if key in ["bias_atom_e"]: value = value.reshape([self.ntypes, self._net_out_dim()]) @@ -361,6 +378,8 @@ def __setitem__(self, key, value): self.aparam_avg = value elif key in ["aparam_inv_std"]: self.aparam_inv_std = value + elif key in ["case_embd"]: + self.case_embd = value elif key in ["scale"]: self.scale = value else: @@ -377,6 +396,8 @@ def __getitem__(self, key): return self.aparam_avg elif key in ["aparam_inv_std"]: return self.aparam_inv_std + elif key in ["case_embd"]: + return self.case_embd elif key in ["scale"]: return self.scale else: diff --git a/deepmd/pd/model/task/invar_fitting.py b/deepmd/pd/model/task/invar_fitting.py index b366fc1d2e..b92c862dc8 100644 --- a/deepmd/pd/model/task/invar_fitting.py +++ b/deepmd/pd/model/task/invar_fitting.py @@ -57,6 +57,9 @@ class InvarFitting(GeneralFitting): Number of frame parameters. numb_aparam : int Number of atomic parameters. + dim_case_embd : int + (Not supported yet) + Dimension of case specific embedding. activation_function : str Activation function. precision : str @@ -92,6 +95,7 @@ def __init__( resnet_dt: bool = True, numb_fparam: int = 0, numb_aparam: int = 0, + dim_case_embd: int = 0, activation_function: str = "tanh", precision: str = DEFAULT_PRECISION, mixed_types: bool = True, @@ -114,6 +118,7 @@ def __init__( resnet_dt=resnet_dt, numb_fparam=numb_fparam, numb_aparam=numb_aparam, + dim_case_embd=dim_case_embd, activation_function=activation_function, precision=precision, mixed_types=mixed_types, @@ -142,7 +147,7 @@ def serialize(self) -> dict: @classmethod def deserialize(cls, data: dict) -> "GeneralFitting": data = copy.deepcopy(data) - check_version_compatibility(data.pop("@version", 1), 2, 1) + check_version_compatibility(data.pop("@version", 1), 3, 1) return super().deserialize(data) def output_def(self) -> FittingOutputDef: diff --git a/deepmd/pt/model/atomic_model/dp_atomic_model.py b/deepmd/pt/model/atomic_model/dp_atomic_model.py index 2cdc97f934..c988d63213 100644 --- a/deepmd/pt/model/atomic_model/dp_atomic_model.py +++ b/deepmd/pt/model/atomic_model/dp_atomic_model.py @@ -93,6 +93,13 @@ def get_sel(self) -> list[int]: """Get the neighbor selection.""" return self.sel + def set_case_embd(self, case_idx: int): + """ + Set the case embedding of this atomic model by the given case_idx, + typically concatenated with the output of the descriptor and fed into the fitting net. + """ + self.fitting_net.set_case_embd(case_idx) + def mixed_types(self) -> bool: """If true, the model 1. assumes total number of atoms aligned across frames; diff --git a/deepmd/pt/model/atomic_model/linear_atomic_model.py b/deepmd/pt/model/atomic_model/linear_atomic_model.py index 3a6abccdf6..36c636ddfb 100644 --- a/deepmd/pt/model/atomic_model/linear_atomic_model.py +++ b/deepmd/pt/model/atomic_model/linear_atomic_model.py @@ -158,6 +158,14 @@ def get_model_rcuts(self) -> list[float]: def get_sel(self) -> list[int]: return [max([model.get_nsel() for model in self.models])] + def set_case_embd(self, case_idx: int): + """ + Set the case embedding of this atomic model by the given case_idx, + typically concatenated with the output of the descriptor and fed into the fitting net. + """ + for model in self.models: + model.set_case_embd(case_idx) + def get_model_nsels(self) -> list[int]: """Get the processed sels for each individual models. Not distinguishing types.""" return [model.get_nsel() for model in self.models] @@ -561,6 +569,14 @@ def serialize(self) -> dict: ) return dd + def set_case_embd(self, case_idx: int): + """ + Set the case embedding of this atomic model by the given case_idx, + typically concatenated with the output of the descriptor and fed into the fitting net. + """ + # only set case_idx for dpmodel + self.models[0].set_case_embd(case_idx) + @classmethod def deserialize(cls, data) -> "DPZBLLinearEnergyAtomicModel": data = data.copy() diff --git a/deepmd/pt/model/atomic_model/pairtab_atomic_model.py b/deepmd/pt/model/atomic_model/pairtab_atomic_model.py index 0d3b2c0c41..62b47afb32 100644 --- a/deepmd/pt/model/atomic_model/pairtab_atomic_model.py +++ b/deepmd/pt/model/atomic_model/pairtab_atomic_model.py @@ -141,6 +141,15 @@ def get_type_map(self) -> list[str]: def get_sel(self) -> list[int]: return [self.sel] + def set_case_embd(self, case_idx: int): + """ + Set the case embedding of this atomic model by the given case_idx, + typically concatenated with the output of the descriptor and fed into the fitting net. + """ + raise NotImplementedError( + "Case identification not supported for PairTabAtomicModel!" + ) + def get_nsel(self) -> int: return self.sel diff --git a/deepmd/pt/model/descriptor/dpa2.py b/deepmd/pt/model/descriptor/dpa2.py index ebad588e32..c8e430960b 100644 --- a/deepmd/pt/model/descriptor/dpa2.py +++ b/deepmd/pt/model/descriptor/dpa2.py @@ -403,25 +403,8 @@ def share_params(self, base_class, shared_level, resume=False) -> None: ] self.repformers.share_params(base_class.repformers, 0, resume=resume) # shared_level: 1 - # share all parameters in type_embedding and repinit - elif shared_level == 1: - self._modules["type_embedding"] = base_class._modules["type_embedding"] - self.repinit.share_params(base_class.repinit, 0, resume=resume) - if self.use_three_body: - self.repinit_three_body.share_params( - base_class.repinit_three_body, 0, resume=resume - ) - # shared_level: 2 - # share all parameters in type_embedding and repformers - elif shared_level == 2: - self._modules["type_embedding"] = base_class._modules["type_embedding"] - self._modules["g1_shape_tranform"] = base_class._modules[ - "g1_shape_tranform" - ] - self.repformers.share_params(base_class.repformers, 0, resume=resume) - # shared_level: 3 # share all parameters in type_embedding - elif shared_level == 3: + elif shared_level == 1: self._modules["type_embedding"] = base_class._modules["type_embedding"] # Other shared levels else: diff --git a/deepmd/pt/model/model/make_model.py b/deepmd/pt/model/model/make_model.py index 83abf9ee4a..472eae5329 100644 --- a/deepmd/pt/model/model/make_model.py +++ b/deepmd/pt/model/model/make_model.py @@ -514,6 +514,9 @@ def serialize(self) -> dict: def deserialize(cls, data) -> "CM": return cls(atomic_model_=T_AtomicModel.deserialize(data)) + def set_case_embd(self, case_idx: int): + self.atomic_model.set_case_embd(case_idx) + @torch.jit.export def get_dim_fparam(self) -> int: """Get the number (dimension) of frame parameters of this atomic model.""" diff --git a/deepmd/pt/model/task/dipole.py b/deepmd/pt/model/task/dipole.py index c2db53288a..65b64220ae 100644 --- a/deepmd/pt/model/task/dipole.py +++ b/deepmd/pt/model/task/dipole.py @@ -51,6 +51,8 @@ class DipoleFittingNet(GeneralFitting): Number of frame parameters. numb_aparam : int Number of atomic parameters. + dim_case_embd : int + Dimension of case specific embedding. activation_function : str Activation function. precision : str @@ -81,6 +83,7 @@ def __init__( resnet_dt: bool = True, numb_fparam: int = 0, numb_aparam: int = 0, + dim_case_embd: int = 0, activation_function: str = "tanh", precision: str = DEFAULT_PRECISION, mixed_types: bool = True, @@ -103,6 +106,7 @@ def __init__( resnet_dt=resnet_dt, numb_fparam=numb_fparam, numb_aparam=numb_aparam, + dim_case_embd=dim_case_embd, activation_function=activation_function, precision=precision, mixed_types=mixed_types, @@ -128,7 +132,7 @@ def serialize(self) -> dict: @classmethod def deserialize(cls, data: dict) -> "GeneralFitting": data = data.copy() - check_version_compatibility(data.pop("@version", 1), 2, 1) + check_version_compatibility(data.pop("@version", 1), 3, 1) data.pop("var_name", None) return super().deserialize(data) diff --git a/deepmd/pt/model/task/dos.py b/deepmd/pt/model/task/dos.py index a71117e587..568ef81c92 100644 --- a/deepmd/pt/model/task/dos.py +++ b/deepmd/pt/model/task/dos.py @@ -47,6 +47,7 @@ def __init__( resnet_dt: bool = True, numb_fparam: int = 0, numb_aparam: int = 0, + dim_case_embd: int = 0, rcond: Optional[float] = None, bias_dos: Optional[torch.Tensor] = None, trainable: Union[bool, list[bool]] = True, @@ -73,6 +74,7 @@ def __init__( resnet_dt=resnet_dt, numb_fparam=numb_fparam, numb_aparam=numb_aparam, + dim_case_embd=dim_case_embd, activation_function=activation_function, precision=precision, mixed_types=mixed_types, @@ -99,7 +101,7 @@ def output_def(self) -> FittingOutputDef: @classmethod def deserialize(cls, data: dict) -> "DOSFittingNet": data = data.copy() - check_version_compatibility(data.pop("@version", 1), 2, 1) + check_version_compatibility(data.pop("@version", 1), 3, 1) data.pop("@class", None) data.pop("var_name", None) data.pop("tot_ener_zero", None) diff --git a/deepmd/pt/model/task/ener.py b/deepmd/pt/model/task/ener.py index 543d987e31..07351b33f6 100644 --- a/deepmd/pt/model/task/ener.py +++ b/deepmd/pt/model/task/ener.py @@ -50,6 +50,7 @@ def __init__( resnet_dt: bool = True, numb_fparam: int = 0, numb_aparam: int = 0, + dim_case_embd: int = 0, activation_function: str = "tanh", precision: str = DEFAULT_PRECISION, mixed_types: bool = True, @@ -67,6 +68,7 @@ def __init__( resnet_dt=resnet_dt, numb_fparam=numb_fparam, numb_aparam=numb_aparam, + dim_case_embd=dim_case_embd, activation_function=activation_function, precision=precision, mixed_types=mixed_types, @@ -78,7 +80,7 @@ def __init__( @classmethod def deserialize(cls, data: dict) -> "GeneralFitting": data = data.copy() - check_version_compatibility(data.pop("@version", 1), 2, 1) + check_version_compatibility(data.pop("@version", 1), 3, 1) data.pop("var_name") data.pop("dim_out") return super().deserialize(data) diff --git a/deepmd/pt/model/task/fitting.py b/deepmd/pt/model/task/fitting.py index fb0954979e..2486ab576f 100644 --- a/deepmd/pt/model/task/fitting.py +++ b/deepmd/pt/model/task/fitting.py @@ -64,14 +64,7 @@ def share_params(self, base_class, shared_level, resume=False) -> None: self.__class__ == base_class.__class__ ), "Only fitting nets of the same type can share params!" if shared_level == 0: - # link buffers - if hasattr(self, "bias_atom_e"): - self.bias_atom_e = base_class.bias_atom_e - # the following will successfully link all the params except buffers, which need manually link. - for item in self._modules: - self._modules[item] = base_class._modules[item] - elif shared_level == 1: - # only not share the bias_atom_e + # only not share the bias_atom_e and the case_embd # the following will successfully link all the params except buffers, which need manually link. for item in self._modules: self._modules[item] = base_class._modules[item] @@ -102,6 +95,8 @@ class GeneralFitting(Fitting): Number of frame parameters. numb_aparam : int Number of atomic parameters. + dim_case_embd : int + Dimension of case specific embedding. activation_function : str Activation function. precision : str @@ -139,6 +134,7 @@ def __init__( resnet_dt: bool = True, numb_fparam: int = 0, numb_aparam: int = 0, + dim_case_embd: int = 0, activation_function: str = "tanh", precision: str = DEFAULT_PRECISION, mixed_types: bool = True, @@ -160,6 +156,7 @@ def __init__( self.resnet_dt = resnet_dt self.numb_fparam = numb_fparam self.numb_aparam = numb_aparam + self.dim_case_embd = dim_case_embd self.activation_function = activation_function self.precision = precision self.prec = PRECISION_DICT[self.precision] @@ -211,10 +208,20 @@ def __init__( else: self.aparam_avg, self.aparam_inv_std = None, None + if self.dim_case_embd > 0: + self.register_buffer( + "case_embd", + torch.zeros(self.dim_case_embd, dtype=self.prec, device=device), + # torch.eye(self.dim_case_embd, dtype=self.prec, device=device)[0], + ) + else: + self.case_embd = None + in_dim = ( self.dim_descrpt + self.numb_fparam + (0 if self.use_aparam_as_mask else self.numb_aparam) + + self.dim_case_embd ) self.filter_layers = NetworkCollection( @@ -274,7 +281,7 @@ def serialize(self) -> dict: """Serialize the fitting to dict.""" return { "@class": "Fitting", - "@version": 2, + "@version": 3, "var_name": self.var_name, "ntypes": self.ntypes, "dim_descrpt": self.dim_descrpt, @@ -282,6 +289,7 @@ def serialize(self) -> dict: "resnet_dt": self.resnet_dt, "numb_fparam": self.numb_fparam, "numb_aparam": self.numb_aparam, + "dim_case_embd": self.dim_case_embd, "activation_function": self.activation_function, "precision": self.precision, "mixed_types": self.mixed_types, @@ -290,6 +298,7 @@ def serialize(self) -> dict: "exclude_types": self.exclude_types, "@variables": { "bias_atom_e": to_numpy_array(self.bias_atom_e), + "case_embd": to_numpy_array(self.case_embd), "fparam_avg": to_numpy_array(self.fparam_avg), "fparam_inv_std": to_numpy_array(self.fparam_inv_std), "aparam_avg": to_numpy_array(self.aparam_avg), @@ -349,6 +358,15 @@ def get_type_map(self) -> list[str]: """Get the name to each type of atoms.""" return self.type_map + def set_case_embd(self, case_idx: int): + """ + Set the case embedding of this fitting net by the given case_idx, + typically concatenated with the output of the descriptor and fed into the fitting net. + """ + self.case_embd = torch.eye(self.dim_case_embd, dtype=self.prec, device=device)[ + case_idx + ] + def __setitem__(self, key, value) -> None: if key in ["bias_atom_e"]: value = value.view([self.ntypes, self._net_out_dim()]) @@ -361,6 +379,8 @@ def __setitem__(self, key, value) -> None: self.aparam_avg = value elif key in ["aparam_inv_std"]: self.aparam_inv_std = value + elif key in ["case_embd"]: + self.case_embd = value elif key in ["scale"]: self.scale = value else: @@ -377,6 +397,8 @@ def __getitem__(self, key): return self.aparam_avg elif key in ["aparam_inv_std"]: return self.aparam_inv_std + elif key in ["case_embd"]: + return self.case_embd elif key in ["scale"]: return self.scale else: @@ -475,6 +497,19 @@ def _forward_common( dim=-1, ) + if self.dim_case_embd > 0: + assert self.case_embd is not None + case_embd = torch.tile(self.case_embd.reshape([1, 1, -1]), [nf, nloc, 1]) + xx = torch.cat( + [xx, case_embd], + dim=-1, + ) + if xx_zeros is not None: + xx_zeros = torch.cat( + [xx_zeros, case_embd], + dim=-1, + ) + outs = torch.zeros( (nf, nloc, net_dim_out), dtype=self.prec, diff --git a/deepmd/pt/model/task/invar_fitting.py b/deepmd/pt/model/task/invar_fitting.py index 2579f5b9da..b1599eac60 100644 --- a/deepmd/pt/model/task/invar_fitting.py +++ b/deepmd/pt/model/task/invar_fitting.py @@ -56,6 +56,8 @@ class InvarFitting(GeneralFitting): Number of frame parameters. numb_aparam : int Number of atomic parameters. + dim_case_embd : int + Dimension of case specific embedding. activation_function : str Activation function. precision : str @@ -91,6 +93,7 @@ def __init__( resnet_dt: bool = True, numb_fparam: int = 0, numb_aparam: int = 0, + dim_case_embd: int = 0, activation_function: str = "tanh", precision: str = DEFAULT_PRECISION, mixed_types: bool = True, @@ -113,6 +116,7 @@ def __init__( resnet_dt=resnet_dt, numb_fparam=numb_fparam, numb_aparam=numb_aparam, + dim_case_embd=dim_case_embd, activation_function=activation_function, precision=precision, mixed_types=mixed_types, @@ -141,7 +145,7 @@ def serialize(self) -> dict: @classmethod def deserialize(cls, data: dict) -> "GeneralFitting": data = data.copy() - check_version_compatibility(data.pop("@version", 1), 2, 1) + check_version_compatibility(data.pop("@version", 1), 3, 1) return super().deserialize(data) def output_def(self) -> FittingOutputDef: diff --git a/deepmd/pt/model/task/polarizability.py b/deepmd/pt/model/task/polarizability.py index 8e07896e38..d9a421d635 100644 --- a/deepmd/pt/model/task/polarizability.py +++ b/deepmd/pt/model/task/polarizability.py @@ -53,6 +53,8 @@ class PolarFittingNet(GeneralFitting): Number of frame parameters. numb_aparam : int Number of atomic parameters. + dim_case_embd : int + Dimension of case specific embedding. activation_function : str Activation function. precision : str @@ -85,6 +87,7 @@ def __init__( resnet_dt: bool = True, numb_fparam: int = 0, numb_aparam: int = 0, + dim_case_embd: int = 0, activation_function: str = "tanh", precision: str = DEFAULT_PRECISION, mixed_types: bool = True, @@ -128,6 +131,7 @@ def __init__( resnet_dt=resnet_dt, numb_fparam=numb_fparam, numb_aparam=numb_aparam, + dim_case_embd=dim_case_embd, activation_function=activation_function, precision=precision, mixed_types=mixed_types, @@ -191,7 +195,7 @@ def change_type_map( def serialize(self) -> dict: data = super().serialize() data["type"] = "polar" - data["@version"] = 3 + data["@version"] = 4 data["embedding_width"] = self.embedding_width data["fit_diag"] = self.fit_diag data["shift_diag"] = self.shift_diag @@ -202,7 +206,7 @@ def serialize(self) -> dict: @classmethod def deserialize(cls, data: dict) -> "GeneralFitting": data = data.copy() - check_version_compatibility(data.pop("@version", 1), 3, 1) + check_version_compatibility(data.pop("@version", 1), 4, 1) data.pop("var_name", None) return super().deserialize(data) diff --git a/deepmd/pt/model/task/property.py b/deepmd/pt/model/task/property.py index 1c2b9e7c9c..dec0f1447b 100644 --- a/deepmd/pt/model/task/property.py +++ b/deepmd/pt/model/task/property.py @@ -60,6 +60,8 @@ class PropertyFittingNet(InvarFitting): Number of frame parameters. numb_aparam : int Number of atomic parameters. + dim_case_embd : int + Dimension of case specific embedding. activation_function : str Activation function. precision : str @@ -83,6 +85,7 @@ def __init__( resnet_dt: bool = True, numb_fparam: int = 0, numb_aparam: int = 0, + dim_case_embd: int = 0, activation_function: str = "tanh", precision: str = DEFAULT_PRECISION, mixed_types: bool = True, @@ -102,6 +105,7 @@ def __init__( resnet_dt=resnet_dt, numb_fparam=numb_fparam, numb_aparam=numb_aparam, + dim_case_embd=dim_case_embd, activation_function=activation_function, precision=precision, mixed_types=mixed_types, @@ -129,7 +133,7 @@ def output_def(self) -> FittingOutputDef: @classmethod def deserialize(cls, data: dict) -> "PropertyFittingNet": data = data.copy() - check_version_compatibility(data.pop("@version", 1), 2, 1) + check_version_compatibility(data.pop("@version", 1), 3, 1) data.pop("dim_out") data.pop("var_name") obj = super().deserialize(data) diff --git a/deepmd/pt/train/training.py b/deepmd/pt/train/training.py index af6e48191d..61683fd857 100644 --- a/deepmd/pt/train/training.py +++ b/deepmd/pt/train/training.py @@ -265,7 +265,7 @@ def get_lr(lr_params): self.opt_type, self.opt_param = get_opt_param(training_params) # Model - self.model = get_model_for_wrapper(model_params) + self.model = get_model_for_wrapper(model_params, resuming=resuming) # Loss if not self.multi_task: @@ -1267,7 +1267,7 @@ def get_single_model( return model -def get_model_for_wrapper(_model_params): +def get_model_for_wrapper(_model_params, resuming=False): if "model_dict" not in _model_params: _model = get_single_model( _model_params, @@ -1275,13 +1275,41 @@ def get_model_for_wrapper(_model_params): else: _model = {} model_keys = list(_model_params["model_dict"]) + do_case_embd, case_embd_index = get_case_embd_config(_model_params) for _model_key in model_keys: _model[_model_key] = get_single_model( _model_params["model_dict"][_model_key], ) + if do_case_embd and not resuming: + # only set case_embd when from scratch multitask training + _model[_model_key].set_case_embd(case_embd_index[_model_key]) return _model +def get_case_embd_config(_model_params): + assert ( + "model_dict" in _model_params + ), "Only support setting case embedding for multi-task model!" + model_keys = list(_model_params["model_dict"]) + sorted_model_keys = sorted(model_keys) + numb_case_embd_list = [ + _model_params["model_dict"][model_key] + .get("fitting_net", {}) + .get("dim_case_embd", 0) + for model_key in sorted_model_keys + ] + if not all(item == numb_case_embd_list[0] for item in numb_case_embd_list): + raise ValueError( + f"All models must have the same dimension of case embedding, while the settings are: {numb_case_embd_list}" + ) + if numb_case_embd_list[0] == 0: + return False, {} + case_embd_index = { + model_key: idx for idx, model_key in enumerate(sorted_model_keys) + } + return True, case_embd_index + + def model_change_out_bias( _model, _sample_func, diff --git a/deepmd/pt/train/wrapper.py b/deepmd/pt/train/wrapper.py index 48119caf19..f0253c283e 100644 --- a/deepmd/pt/train/wrapper.py +++ b/deepmd/pt/train/wrapper.py @@ -112,8 +112,10 @@ def share_params(self, shared_links, resume=False) -> None: f"Shared params of {model_key_base}.{class_type_base} and {model_key_link}.{class_type_link}!" ) else: - if hasattr(self.model[model_key_base], class_type_base): - base_class = self.model[model_key_base].__getattr__(class_type_base) + if hasattr(self.model[model_key_base].atomic_model, class_type_base): + base_class = self.model[model_key_base].atomic_model.__getattr__( + class_type_base + ) for link_item in shared_links[shared_item]["links"][1:]: class_type_link = link_item["shared_type"] model_key_link = link_item["model_key"] @@ -124,9 +126,9 @@ def share_params(self, shared_links, resume=False) -> None: assert ( class_type_base == class_type_link ), f"Class type mismatched: {class_type_base} vs {class_type_link}!" - link_class = self.model[model_key_link].__getattr__( - class_type_link - ) + link_class = self.model[ + model_key_link + ].atomic_model.__getattr__(class_type_link) link_class.share_params( base_class, shared_level_link, resume=resume ) diff --git a/deepmd/tf/fit/dipole.py b/deepmd/tf/fit/dipole.py index c05fa4b525..4428d06536 100644 --- a/deepmd/tf/fit/dipole.py +++ b/deepmd/tf/fit/dipole.py @@ -58,6 +58,8 @@ class DipoleFittingSeA(Fitting): Number of frame parameters numb_aparam Number of atomic parameters + dim_case_embd + Dimension of case specific embedding. sel_type : list[int] The atom types selected to have an atomic dipole prediction. If is None, all atoms are selected. seed : int @@ -84,6 +86,7 @@ def __init__( resnet_dt: bool = True, numb_fparam: int = 0, numb_aparam: int = 0, + dim_case_embd: int = 0, sel_type: Optional[list[int]] = None, seed: Optional[int] = None, activation_function: str = "tanh", @@ -119,10 +122,13 @@ def __init__( self.type_map = type_map self.numb_fparam = numb_fparam self.numb_aparam = numb_aparam + self.dim_case_embd = dim_case_embd if numb_fparam > 0: raise ValueError("numb_fparam is not supported in the dipole fitting") if numb_aparam > 0: raise ValueError("numb_aparam is not supported in the dipole fitting") + if dim_case_embd > 0: + raise ValueError("dim_case_embd is not supported in TensorFlow.") self.fparam_avg = None self.fparam_std = None self.fparam_inv_std = None @@ -385,7 +391,7 @@ def serialize(self, suffix: str) -> dict: data = { "@class": "Fitting", "type": "dipole", - "@version": 2, + "@version": 3, "ntypes": self.ntypes, "dim_descrpt": self.dim_descrpt, "embedding_width": self.dim_rot_mat_1, @@ -395,6 +401,7 @@ def serialize(self, suffix: str) -> dict: "resnet_dt": self.resnet_dt, "numb_fparam": self.numb_fparam, "numb_aparam": self.numb_aparam, + "dim_case_embd": self.dim_case_embd, "activation_function": self.activation_function_name, "precision": self.fitting_precision.name, "exclude_types": [], @@ -428,7 +435,7 @@ def deserialize(cls, data: dict, suffix: str): The deserialized model """ data = data.copy() - check_version_compatibility(data.pop("@version", 1), 2, 1) + check_version_compatibility(data.pop("@version", 1), 3, 1) fitting = cls(**data) fitting.fitting_net_variables = cls.deserialize_network( data["nets"], diff --git a/deepmd/tf/fit/dos.py b/deepmd/tf/fit/dos.py index 099cba0d12..1da0e55a92 100644 --- a/deepmd/tf/fit/dos.py +++ b/deepmd/tf/fit/dos.py @@ -74,6 +74,8 @@ class DOSFitting(Fitting): Number of frame parameter numb_aparam Number of atomic parameter + dim_case_embd + Dimension of case specific embedding. ! numb_dos (added) Number of gridpoints on which the DOS is evaluated (NEDOS in VASP) rcond @@ -111,6 +113,7 @@ def __init__( resnet_dt: bool = True, numb_fparam: int = 0, numb_aparam: int = 0, + dim_case_embd: int = 0, numb_dos: int = 300, rcond: Optional[float] = None, trainable: Optional[list[bool]] = None, @@ -132,6 +135,9 @@ def __init__( self.numb_fparam = numb_fparam self.numb_aparam = numb_aparam + self.dim_case_embd = dim_case_embd + if dim_case_embd > 0: + raise ValueError("dim_case_embd is not supported in TensorFlow.") self.numb_dos = numb_dos @@ -672,7 +678,7 @@ def deserialize(cls, data: dict, suffix: str = ""): The deserialized model """ data = data.copy() - check_version_compatibility(data.pop("@version", 1), 2, 1) + check_version_compatibility(data.pop("@version", 1), 3, 1) data["numb_dos"] = data.pop("dim_out") fitting = cls(**data) fitting.fitting_net_variables = cls.deserialize_network( @@ -699,7 +705,7 @@ def serialize(self, suffix: str = "") -> dict: data = { "@class": "Fitting", "type": "dos", - "@version": 2, + "@version": 3, "var_name": "dos", "ntypes": self.ntypes, "dim_descrpt": self.dim_descrpt, @@ -709,6 +715,7 @@ def serialize(self, suffix: str = "") -> dict: "resnet_dt": self.resnet_dt, "numb_fparam": self.numb_fparam, "numb_aparam": self.numb_aparam, + "dim_case_embd": self.dim_case_embd, "rcond": self.rcond, "trainable": self.trainable, "activation_function": self.activation_function, @@ -731,6 +738,7 @@ def serialize(self, suffix: str = "") -> dict: "fparam_inv_std": self.fparam_inv_std, "aparam_avg": self.aparam_avg, "aparam_inv_std": self.aparam_inv_std, + "case_embd": None, }, "type_map": self.type_map, } diff --git a/deepmd/tf/fit/ener.py b/deepmd/tf/fit/ener.py index 7a3ee8eade..068d3d8e35 100644 --- a/deepmd/tf/fit/ener.py +++ b/deepmd/tf/fit/ener.py @@ -117,6 +117,8 @@ class EnerFitting(Fitting): Number of frame parameter numb_aparam Number of atomic parameter + dim_case_embd + Dimension of case specific embedding. rcond The condition number for the regression of atomic energy. tot_ener_zero @@ -156,6 +158,7 @@ def __init__( resnet_dt: bool = True, numb_fparam: int = 0, numb_aparam: int = 0, + dim_case_embd: int = 0, rcond: Optional[float] = None, tot_ener_zero: bool = False, trainable: Optional[list[bool]] = None, @@ -190,6 +193,9 @@ def __init__( # .add("trainable", [list, bool], default = True) self.numb_fparam = numb_fparam self.numb_aparam = numb_aparam + self.dim_case_embd = dim_case_embd + if dim_case_embd > 0: + raise ValueError("dim_case_embd is not supported in TensorFlow.") self.n_neuron = neuron self.resnet_dt = resnet_dt self.rcond = rcond @@ -878,7 +884,7 @@ def deserialize(cls, data: dict, suffix: str = ""): The deserialized model """ data = data.copy() - check_version_compatibility(data.pop("@version", 1), 2, 1) + check_version_compatibility(data.pop("@version", 1), 3, 1) fitting = cls(**data) fitting.fitting_net_variables = cls.deserialize_network( data["nets"], @@ -904,7 +910,7 @@ def serialize(self, suffix: str = "") -> dict: data = { "@class": "Fitting", "type": "ener", - "@version": 2, + "@version": 3, "var_name": "energy", "ntypes": self.ntypes, "dim_descrpt": self.dim_descrpt + self.tebd_dim, @@ -914,6 +920,7 @@ def serialize(self, suffix: str = "") -> dict: "resnet_dt": self.resnet_dt, "numb_fparam": self.numb_fparam, "numb_aparam": self.numb_aparam, + "dim_case_embd": self.dim_case_embd, "rcond": self.rcond, "tot_ener_zero": self.tot_ener_zero, "trainable": self.trainable, @@ -945,6 +952,7 @@ def serialize(self, suffix: str = "") -> dict: "fparam_inv_std": self.fparam_inv_std, "aparam_avg": self.aparam_avg, "aparam_inv_std": self.aparam_inv_std, + "case_embd": None, }, "type_map": self.type_map, } diff --git a/deepmd/tf/fit/polar.py b/deepmd/tf/fit/polar.py index 2f1400e697..14dd6ee092 100644 --- a/deepmd/tf/fit/polar.py +++ b/deepmd/tf/fit/polar.py @@ -63,6 +63,8 @@ class PolarFittingSeA(Fitting): Number of frame parameters numb_aparam Number of atomic parameters + dim_case_embd + Dimension of case specific embedding. sel_type : list[int] The atom types selected to have an atomic polarizability prediction. If is None, all atoms are selected. fit_diag : bool @@ -95,6 +97,7 @@ def __init__( resnet_dt: bool = True, numb_fparam: int = 0, numb_aparam: int = 0, + dim_case_embd: int = 0, sel_type: Optional[list[int]] = None, fit_diag: bool = True, scale: Optional[list[float]] = None, @@ -162,10 +165,13 @@ def __init__( self.type_map = type_map self.numb_fparam = numb_fparam self.numb_aparam = numb_aparam + self.dim_case_embd = dim_case_embd if numb_fparam > 0: raise ValueError("numb_fparam is not supported in the dipole fitting") if numb_aparam > 0: raise ValueError("numb_aparam is not supported in the dipole fitting") + if dim_case_embd > 0: + raise ValueError("dim_case_embd is not supported in TensorFlow.") self.fparam_avg = None self.fparam_std = None self.fparam_inv_std = None @@ -578,7 +584,7 @@ def serialize(self, suffix: str) -> dict: data = { "@class": "Fitting", "type": "polar", - "@version": 3, + "@version": 4, "ntypes": self.ntypes, "dim_descrpt": self.dim_descrpt, "embedding_width": self.dim_rot_mat_1, @@ -588,6 +594,7 @@ def serialize(self, suffix: str) -> dict: "resnet_dt": self.resnet_dt, "numb_fparam": self.numb_fparam, "numb_aparam": self.numb_aparam, + "dim_case_embd": self.dim_case_embd, "activation_function": self.activation_function_name, "precision": self.fitting_precision.name, "exclude_types": [], @@ -625,7 +632,7 @@ def deserialize(cls, data: dict, suffix: str): """ data = data.copy() check_version_compatibility( - data.pop("@version", 1), 3, 1 + data.pop("@version", 1), 4, 1 ) # to allow PT version. fitting = cls(**data) fitting.fitting_net_variables = cls.deserialize_network( diff --git a/deepmd/utils/argcheck.py b/deepmd/utils/argcheck.py index d5419a38cd..5b57f15979 100644 --- a/deepmd/utils/argcheck.py +++ b/deepmd/utils/argcheck.py @@ -1433,6 +1433,7 @@ def descrpt_variant_type_args(exclude_hybrid: bool = False) -> Variant: def fitting_ener(): doc_numb_fparam = "The dimension of the frame parameter. If set to >0, file `fparam.npy` should be included to provided the input fparams." doc_numb_aparam = "The dimension of the atomic parameter. If set to >0, file `aparam.npy` should be included to provided the input aparams." + doc_dim_case_embd = "The dimension of the case embedding embedding. When training or fine-tuning a multitask model with case embedding embeddings, this number should be set to the number of model branches." doc_neuron = "The number of neurons in each hidden layers of the fitting net. When two hidden layers are of the same size, a skip connection is built." doc_activation_function = f'The activation function in the fitting net. Supported activation functions are {list_to_doc(ACTIVATION_FN_DICT.keys())} Note that "gelu" denotes the custom operator version, and "gelu_tf" denotes the TF standard version. If you set "None" or "none" here, no activation function will be used.' doc_precision = f"The precision of the fitting net parameters, supported options are {list_to_doc(PRECISION_DICT.keys())} Default follows the interface precision." @@ -1459,6 +1460,13 @@ def fitting_ener(): return [ Argument("numb_fparam", int, optional=True, default=0, doc=doc_numb_fparam), Argument("numb_aparam", int, optional=True, default=0, doc=doc_numb_aparam), + Argument( + "dim_case_embd", + int, + optional=True, + default=0, + doc=doc_only_pt_supported + doc_dim_case_embd, + ), Argument( "neuron", list[int], @@ -1509,6 +1517,7 @@ def fitting_ener(): def fitting_dos(): doc_numb_fparam = "The dimension of the frame parameter. If set to >0, file `fparam.npy` should be included to provided the input fparams." doc_numb_aparam = "The dimension of the atomic parameter. If set to >0, file `aparam.npy` should be included to provided the input aparams." + doc_dim_case_embd = "The dimension of the case embedding embedding. When training or fine-tuning a multitask model with case embedding embeddings, this number should be set to the number of model branches." doc_neuron = "The number of neurons in each hidden layers of the fitting net. When two hidden layers are of the same size, a skip connection is built." doc_activation_function = f'The activation function in the fitting net. Supported activation functions are {list_to_doc(ACTIVATION_FN_DICT.keys())} Note that "gelu" denotes the custom operator version, and "gelu_tf" denotes the TF standard version. If you set "None" or "none" here, no activation function will be used.' doc_precision = f"The precision of the fitting net parameters, supported options are {list_to_doc(PRECISION_DICT.keys())} Default follows the interface precision." @@ -1525,6 +1534,13 @@ def fitting_dos(): return [ Argument("numb_fparam", int, optional=True, default=0, doc=doc_numb_fparam), Argument("numb_aparam", int, optional=True, default=0, doc=doc_numb_aparam), + Argument( + "dim_case_embd", + int, + optional=True, + default=0, + doc=doc_only_pt_supported + doc_dim_case_embd, + ), Argument( "neuron", list[int], optional=True, default=[120, 120, 120], doc=doc_neuron ), @@ -1556,6 +1572,7 @@ def fitting_dos(): def fitting_property(): doc_numb_fparam = "The dimension of the frame parameter. If set to >0, file `fparam.npy` should be included to provided the input fparams." doc_numb_aparam = "The dimension of the atomic parameter. If set to >0, file `aparam.npy` should be included to provided the input aparams." + doc_dim_case_embd = "The dimension of the case embedding embedding. When training or fine-tuning a multitask model with case embedding embeddings, this number should be set to the number of model branches." doc_neuron = "The number of neurons in each hidden layers of the fitting net. When two hidden layers are of the same size, a skip connection is built" doc_activation_function = f'The activation function in the fitting net. Supported activation functions are {list_to_doc(ACTIVATION_FN_DICT.keys())} Note that "gelu" denotes the custom operator version, and "gelu_tf" denotes the TF standard version. If you set "None" or "none" here, no activation function will be used.' doc_resnet_dt = 'Whether to use a "Timestep" in the skip connection' @@ -1567,6 +1584,13 @@ def fitting_property(): return [ Argument("numb_fparam", int, optional=True, default=0, doc=doc_numb_fparam), Argument("numb_aparam", int, optional=True, default=0, doc=doc_numb_aparam), + Argument( + "dim_case_embd", + int, + optional=True, + default=0, + doc=doc_only_pt_supported + doc_dim_case_embd, + ), Argument( "neuron", list[int], @@ -1597,6 +1621,7 @@ def fitting_property(): def fitting_polar(): doc_numb_fparam = "The dimension of the frame parameter. If set to >0, file `fparam.npy` should be included to provided the input fparams." doc_numb_aparam = "The dimension of the atomic parameter. If set to >0, file `aparam.npy` should be included to provided the input aparams." + doc_dim_case_embd = "The dimension of the case embedding embedding. When training or fine-tuning a multitask model with case embedding embeddings, this number should be set to the number of model branches." doc_neuron = "The number of neurons in each hidden layers of the fitting net. When two hidden layers are of the same size, a skip connection is built." doc_activation_function = f'The activation function in the fitting net. Supported activation functions are {list_to_doc(ACTIVATION_FN_DICT.keys())} Note that "gelu" denotes the custom operator version, and "gelu_tf" denotes the TF standard version. If you set "None" or "none" here, no activation function will be used.' doc_resnet_dt = 'Whether to use a "Timestep" in the skip connection' @@ -1625,6 +1650,13 @@ def fitting_polar(): default=0, doc=doc_only_pt_supported + doc_numb_aparam, ), + Argument( + "dim_case_embd", + int, + optional=True, + default=0, + doc=doc_only_pt_supported + doc_dim_case_embd, + ), Argument( "neuron", list[int], @@ -1667,6 +1699,7 @@ def fitting_polar(): def fitting_dipole(): doc_numb_fparam = "The dimension of the frame parameter. If set to >0, file `fparam.npy` should be included to provided the input fparams." doc_numb_aparam = "The dimension of the atomic parameter. If set to >0, file `aparam.npy` should be included to provided the input aparams." + doc_dim_case_embd = "The dimension of the case embedding embedding. When training or fine-tuning a multitask model with case embedding embeddings, this number should be set to the number of model branches." doc_neuron = "The number of neurons in each hidden layers of the fitting net. When two hidden layers are of the same size, a skip connection is built." doc_activation_function = f'The activation function in the fitting net. Supported activation functions are {list_to_doc(ACTIVATION_FN_DICT.keys())} Note that "gelu" denotes the custom operator version, and "gelu_tf" denotes the TF standard version. If you set "None" or "none" here, no activation function will be used.' doc_resnet_dt = 'Whether to use a "Timestep" in the skip connection' @@ -1688,6 +1721,13 @@ def fitting_dipole(): default=0, doc=doc_only_pt_supported + doc_numb_aparam, ), + Argument( + "dim_case_embd", + int, + optional=True, + default=0, + doc=doc_only_pt_supported + doc_dim_case_embd, + ), Argument( "neuron", list[int], diff --git a/doc/train/multi-task-training.md b/doc/train/multi-task-training.md index 9d5b71592e..51dffcc5f5 100644 --- a/doc/train/multi-task-training.md +++ b/doc/train/multi-task-training.md @@ -48,14 +48,27 @@ Specifically, there are several parts that need to be modified: - {ref}`model/model_dict `: The core definition of the model part and the explanation of sharing rules, starting with user-defined model name keys `model_key`, such as `my_model_1`. Each model part needs to align with the components of the single-task training {ref}`model `, but with the following sharing rules: -- - If you want to share the current model component with other tasks, which should be part of the {ref}`model/shared_dict `, + + - If you want to share the current model component with other tasks, which should be part of the {ref}`model/shared_dict `, you can directly fill in the corresponding `part_key`, such as `"descriptor": "my_descriptor", ` to replace the previous detailed parameters. Here, you can also specify the shared_level, such as `"descriptor": "my_descriptor:shared_level", ` - and use the user-defined integer `shared_level` in the code to share the corresponding module to varying degrees - (default is to share all parameters, i.e., `shared_level`=0). - The parts that are exclusive to each model can be written following the previous definition. + and use the user-defined integer `shared_level` in the code to share the corresponding module to varying degrees. + - For descriptors, `shared_level` can be set as follows: + - Valid `shared_level` values are 0-1, depending on the descriptor type + - Each level enables different sharing behaviors: + - Level 0: Shares all parameters (default) + - Level 1: Shares type embedding only + - Not all descriptors support all levels (e.g., se_a only supports level 0) + - For fitting nets, we only support the default `shared_level`=0, where all parameters will be shared except for `bias_atom_e` and `case_embd`. + - To conduct multitask training, there are two typical approaches: + 1. **Descriptor sharing only**: Share the descriptor with `shared_level`=0. See [here](../../examples/water_multi_task/pytorch_example/input_torch.json) for an example. + 2. **Descriptor and fitting network sharing with data identification**: + - Share the descriptor and the fitting network with `shared_level`=0. + - {ref}`dim_case_embd ` must be set to the number of model branches, which will distinguish different data tasks using a one-hot embedding. + - See [here](../../examples/water_multi_task/pytorch_example/input_torch_sharefit.json) for an example. + - The parts that are exclusive to each model can be written following the previous definition. - {ref}`loss_dict `: The loss settings corresponding to each task model, specified by the `model_key`. Each {ref}`loss_dict/model_key ` contains the corresponding loss settings, diff --git a/examples/water_multi_task/pytorch_example/input_torch_sharefit.json b/examples/water_multi_task/pytorch_example/input_torch_sharefit.json new file mode 100644 index 0000000000..2fc23007c6 --- /dev/null +++ b/examples/water_multi_task/pytorch_example/input_torch_sharefit.json @@ -0,0 +1,155 @@ +{ + "_comment": "that's all", + "model": { + "shared_dict": { + "type_map_all": [ + "O", + "H" + ], + "dpa2_descriptor": { + "type": "dpa2", + "repinit": { + "tebd_dim": 8, + "rcut": 6.0, + "rcut_smth": 0.5, + "nsel": 120, + "neuron": [ + 25, + 50, + 100 + ], + "axis_neuron": 12, + "activation_function": "tanh", + "three_body_sel": 48, + "three_body_rcut": 4.0, + "three_body_rcut_smth": 3.5, + "use_three_body": true + }, + "repformer": { + "rcut": 4.0, + "rcut_smth": 3.5, + "nsel": 48, + "nlayers": 6, + "g1_dim": 128, + "g2_dim": 32, + "attn2_hidden": 32, + "attn2_nhead": 4, + "attn1_hidden": 128, + "attn1_nhead": 4, + "axis_neuron": 4, + "update_h2": false, + "update_g1_has_conv": true, + "update_g1_has_grrg": true, + "update_g1_has_drrd": true, + "update_g1_has_attn": false, + "update_g2_has_g1g1": false, + "update_g2_has_attn": true, + "update_style": "res_residual", + "update_residual": 0.01, + "update_residual_init": "norm", + "attn2_has_gate": true, + "use_sqrt_nnei": true, + "g1_out_conv": true, + "g1_out_mlp": true + }, + "precision": "float64", + "add_tebd_to_repinit_out": false, + "_comment": " that's all" + }, + "shared_fit_with_id": { + "neuron": [ + 240, + 240, + 240 + ], + "resnet_dt": true, + "seed": 1, + "dim_case_embd": 2, + "_comment": " that's all" + }, + "_comment": "that's all" + }, + "model_dict": { + "water_1": { + "type_map": "type_map_all", + "descriptor": "dpa2_descriptor", + "fitting_net": "shared_fit_with_id" + }, + "water_2": { + "type_map": "type_map_all", + "descriptor": "dpa2_descriptor", + "fitting_net": "shared_fit_with_id" + } + } + }, + "learning_rate": { + "type": "exp", + "decay_steps": 5000, + "start_lr": 0.001, + "stop_lr": 3.51e-08, + "_comment": "that's all" + }, + "loss_dict": { + "water_1": { + "type": "ener", + "start_pref_e": 0.02, + "limit_pref_e": 1, + "start_pref_f": 1000, + "limit_pref_f": 1, + "start_pref_v": 0, + "limit_pref_v": 0 + }, + "water_2": { + "type": "ener", + "start_pref_e": 0.02, + "limit_pref_e": 1, + "start_pref_f": 1000, + "limit_pref_f": 1, + "start_pref_v": 0, + "limit_pref_v": 0 + } + }, + "training": { + "model_prob": { + "water_1": 0.5, + "water_2": 0.5 + }, + "data_dict": { + "water_1": { + "training_data": { + "systems": [ + "../../water/data/data_0/", + "../../water/data/data_1/", + "../../water/data/data_2/" + ], + "batch_size": 1, + "_comment": "that's all" + }, + "validation_data": { + "systems": [ + "../../water/data/data_3/" + ], + "batch_size": 1, + "_comment": "that's all" + } + }, + "water_2": { + "training_data": { + "systems": [ + "../../water/data/data_0/", + "../../water/data/data_1/", + "../../water/data/data_2/" + ], + "batch_size": 1, + "_comment": "that's all" + } + } + }, + "numb_steps": 100000, + "seed": 10, + "disp_file": "lcurve.out", + "disp_freq": 100, + "save_freq": 100, + "_comment": "that's all" + } +} diff --git a/source/tests/common/test_examples.py b/source/tests/common/test_examples.py index 068a91709c..1ddbb50db9 100644 --- a/source/tests/common/test_examples.py +++ b/source/tests/common/test_examples.py @@ -64,6 +64,7 @@ input_files_multi = ( p_examples / "water_multi_task" / "pytorch_example" / "input_torch.json", + p_examples / "water_multi_task" / "pytorch_example" / "input_torch_sharefit.json", ) diff --git a/source/tests/pt/model/water/multitask.json b/source/tests/pt/model/water/multitask.json index 06a4f88e55..e8d998e6f1 100644 --- a/source/tests/pt/model/water/multitask.json +++ b/source/tests/pt/model/water/multitask.json @@ -10,7 +10,8 @@ "type": "se_e2_a", "sel": [ 46, - 92 + 92, + 4 ], "rcut_smth": 0.50, "rcut": 6.00, diff --git a/source/tests/pt/model/water/multitask_sharefit.json b/source/tests/pt/model/water/multitask_sharefit.json new file mode 100644 index 0000000000..246b5992f7 --- /dev/null +++ b/source/tests/pt/model/water/multitask_sharefit.json @@ -0,0 +1,134 @@ +{ + "model": { + "shared_dict": { + "my_type_map": [ + "O", + "H", + "B" + ], + "my_descriptor": { + "type": "se_e2_a", + "sel": [ + 46, + 92, + 4 + ], + "rcut_smth": 0.50, + "rcut": 6.00, + "neuron": [ + 25, + 50, + 100 + ], + "resnet_dt": false, + "axis_neuron": 16, + "seed": 1, + "_comment": " that's all" + }, + "my_fitting": { + "dim_case_embd": 2, + "neuron": [ + 240, + 240, + 240 + ], + "resnet_dt": true, + "seed": 1, + "_comment": " that's all" + }, + "_comment": "that's all" + }, + "model_dict": { + "model_1": { + "type_map": "my_type_map", + "descriptor": "my_descriptor", + "fitting_net": "my_fitting", + "data_stat_nbatch": 1 + }, + "model_2": { + "type_map": "my_type_map", + "descriptor": "my_descriptor", + "fitting_net": "my_fitting", + "data_stat_nbatch": 1 + } + } + }, + "learning_rate": { + "type": "exp", + "decay_steps": 5000, + "start_lr": 0.0002, + "decay_rate": 0.98, + "stop_lr": 3.51e-08, + "_comment": "that's all" + }, + "loss_dict": { + "model_1": { + "type": "ener", + "start_pref_e": 0.02, + "limit_pref_e": 1, + "start_pref_f": 1000, + "limit_pref_f": 1, + "start_pref_v": 0, + "limit_pref_v": 0 + }, + "model_2": { + "type": "ener", + "start_pref_e": 0.02, + "limit_pref_e": 1, + "start_pref_f": 1000, + "limit_pref_f": 1, + "start_pref_v": 0, + "limit_pref_v": 0 + } + }, + "training": { + "model_prob": { + "model_1": 0.5, + "model_2": 0.5 + }, + "data_dict": { + "model_1": { + "stat_file": "./stat_files/model_1.hdf5", + "training_data": { + "systems": [ + "pt/water/data/data_0" + ], + "batch_size": 1, + "_comment": "that's all" + }, + "validation_data": { + "systems": [ + "pt/water/data/data_0" + ], + "batch_size": 1, + "_comment": "that's all" + } + }, + "model_2": { + "stat_file": "./stat_files/model_2.hdf5", + "training_data": { + "systems": [ + "pt/water/data/data_0" + ], + "batch_size": 1, + "_comment": "that's all" + }, + "validation_data": { + "systems": [ + "pt/water/data/data_0" + ], + "batch_size": 1, + "_comment": "that's all" + } + } + }, + "numb_steps": 100000, + "warmup_steps": 0, + "gradient_max_norm": 5.0, + "seed": 10, + "disp_file": "lcurve.out", + "disp_freq": 100, + "save_freq": 100, + "_comment": "that's all" + } +} diff --git a/source/tests/pt/test_multitask.py b/source/tests/pt/test_multitask.py index a59d6f8e54..62964abad3 100644 --- a/source/tests/pt/test_multitask.py +++ b/source/tests/pt/test_multitask.py @@ -42,12 +42,20 @@ def setUpModule() -> None: with open(multitask_template_json) as f: multitask_template = json.load(f) + global multitask_sharefit_template + multitask_sharefit_template_json = str( + Path(__file__).parent / "water/multitask_sharefit.json" + ) + with open(multitask_sharefit_template_json) as f: + multitask_sharefit_template = json.load(f) + class MultiTaskTrainTest: def test_multitask_train(self) -> None: # test multitask training self.config = update_deepmd_input(self.config, warning=True) self.config = normalize(self.config, multi_task=True) + self.share_fitting = getattr(self, "share_fitting", False) trainer = get_trainer(deepcopy(self.config), shared_links=self.shared_links) trainer.run() # check model keys @@ -62,7 +70,12 @@ def test_multitask_train(self) -> None: self.assertIn(state_key.replace("model_1", "model_2"), multi_state_dict) if "model_2" in state_key: self.assertIn(state_key.replace("model_2", "model_1"), multi_state_dict) - if "model_1.descriptor" in state_key: + if ("model_1.atomic_model.descriptor" in state_key) or ( + self.share_fitting + and "model_1.atomic_model.fitting_net" in state_key + and "fitting_net.bias_atom_e" not in state_key + and "fitting_net.case_embd" not in state_key + ): torch.testing.assert_close( multi_state_dict[state_key], multi_state_dict[state_key.replace("model_1", "model_2")], @@ -223,6 +236,46 @@ def tearDown(self) -> None: MultiTaskTrainTest.tearDown(self) +class TestMultiTaskSeASharefit(unittest.TestCase, MultiTaskTrainTest): + def setUp(self) -> None: + multitask_se_e2_a = deepcopy(multitask_sharefit_template) + multitask_se_e2_a["model"]["shared_dict"]["my_descriptor"] = model_se_e2_a[ + "descriptor" + ] + data_file = [str(Path(__file__).parent / "water/data/data_0")] + self.stat_files = "se_e2_a_share_fit" + os.makedirs(self.stat_files, exist_ok=True) + self.config = multitask_se_e2_a + self.config["training"]["data_dict"]["model_1"]["training_data"]["systems"] = ( + data_file + ) + self.config["training"]["data_dict"]["model_1"]["validation_data"][ + "systems" + ] = data_file + self.config["training"]["data_dict"]["model_1"]["stat_file"] = ( + f"{self.stat_files}/model_1" + ) + self.config["training"]["data_dict"]["model_2"]["training_data"]["systems"] = ( + data_file + ) + self.config["training"]["data_dict"]["model_2"]["validation_data"][ + "systems" + ] = data_file + self.config["training"]["data_dict"]["model_2"]["stat_file"] = ( + f"{self.stat_files}/model_2" + ) + self.config["training"]["numb_steps"] = 1 + self.config["training"]["save_freq"] = 1 + self.origin_config = deepcopy(self.config) + self.config["model"], self.shared_links = preprocess_shared_params( + self.config["model"] + ) + self.share_fitting = True + + def tearDown(self) -> None: + MultiTaskTrainTest.tearDown(self) + + class TestMultiTaskDPA1(unittest.TestCase, MultiTaskTrainTest): def setUp(self) -> None: multitask_DPA1 = deepcopy(multitask_template) diff --git a/source/tests/universal/dpmodel/fitting/test_fitting.py b/source/tests/universal/dpmodel/fitting/test_fitting.py index fe6ffd2e09..db199c02a3 100644 --- a/source/tests/universal/dpmodel/fitting/test_fitting.py +++ b/source/tests/universal/dpmodel/fitting/test_fitting.py @@ -39,7 +39,7 @@ def FittingParamEnergy( exclude_types=[], precision="float64", embedding_width=None, - numb_param=0, # test numb_fparam and numb_aparam together + numb_param=0, # test numb_fparam, numb_aparam and dim_case_embd together ): input_dict = { "ntypes": ntypes, @@ -51,6 +51,7 @@ def FittingParamEnergy( "precision": precision, "numb_fparam": numb_param, "numb_aparam": numb_param, + "dim_case_embd": numb_param, } return input_dict @@ -77,7 +78,7 @@ def FittingParamDos( exclude_types=[], precision="float64", embedding_width=None, - numb_param=0, # test numb_fparam and numb_aparam together + numb_param=0, # test numb_fparam, numb_aparam and dim_case_embd together ): input_dict = { "ntypes": ntypes, @@ -89,6 +90,7 @@ def FittingParamDos( "precision": precision, "numb_fparam": numb_param, "numb_aparam": numb_param, + "dim_case_embd": numb_param, } return input_dict @@ -115,7 +117,7 @@ def FittingParamDipole( exclude_types=[], precision="float64", embedding_width=None, - numb_param=0, # test numb_fparam and numb_aparam together + numb_param=0, # test numb_fparam, numb_aparam and dim_case_embd together ): assert ( embedding_width is not None @@ -131,6 +133,7 @@ def FittingParamDipole( "precision": precision, "numb_fparam": numb_param, "numb_aparam": numb_param, + "dim_case_embd": numb_param, } return input_dict @@ -157,7 +160,7 @@ def FittingParamPolar( exclude_types=[], precision="float64", embedding_width=None, - numb_param=0, # test numb_fparam and numb_aparam together + numb_param=0, # test numb_fparam, numb_aparam and dim_case_embd together ): assert embedding_width is not None, "embedding_width for polar fitting is required." input_dict = { @@ -171,6 +174,7 @@ def FittingParamPolar( "precision": precision, "numb_fparam": numb_param, "numb_aparam": numb_param, + "dim_case_embd": numb_param, } return input_dict @@ -197,7 +201,7 @@ def FittingParamProperty( exclude_types=[], precision="float64", embedding_width=None, - numb_param=0, # test numb_fparam and numb_aparam together + numb_param=0, # test numb_fparam, numb_aparam and dim_case_embd together ): input_dict = { "ntypes": ntypes, @@ -209,6 +213,7 @@ def FittingParamProperty( "precision": precision, "numb_fparam": numb_param, "numb_aparam": numb_param, + "dim_case_embd": numb_param, } return input_dict From a11f264b5c3794b381dbf7e07bf4b5467bf8fc7b Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Fri, 29 Nov 2024 10:34:50 +0800 Subject: [PATCH 11/43] [pre-commit.ci] pre-commit autoupdate (#4420) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/astral-sh/ruff-pre-commit: v0.7.4 → v0.8.0](https://github.com/astral-sh/ruff-pre-commit/compare/v0.7.4...v0.8.0) - [github.com/pre-commit/mirrors-clang-format: v19.1.3 → v19.1.4](https://github.com/pre-commit/mirrors-clang-format/compare/v19.1.3...v19.1.4) --------- Signed-off-by: Jinzhe Zeng Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Jinzhe Zeng --- .pre-commit-config.yaml | 4 +- deepmd/__init__.py | 2 +- deepmd/common.py | 12 ++--- deepmd/dpmodel/__init__.py | 12 ++--- deepmd/dpmodel/atomic_model/__init__.py | 12 ++--- deepmd/dpmodel/common.py | 4 +- deepmd/dpmodel/descriptor/__init__.py | 8 ++-- deepmd/dpmodel/fitting/__init__.py | 6 +-- deepmd/dpmodel/model/__init__.py | 2 +- deepmd/dpmodel/utils/__init__.py | 26 +++++------ deepmd/entrypoints/test.py | 16 +++---- deepmd/env.py | 8 ++-- deepmd/infer/__init__.py | 4 +- deepmd/infer/deep_eval.py | 6 +-- deepmd/infer/model_devi.py | 8 ++-- deepmd/jax/descriptor/__init__.py | 8 ++-- deepmd/jax/env.py | 2 +- deepmd/jax/fitting/__init__.py | 2 +- deepmd/jax/model/__init__.py | 4 +- deepmd/pd/model/model/__init__.py | 2 +- deepmd/pd/model/network/init.py | 6 +-- deepmd/pd/model/task/__init__.py | 2 +- deepmd/pd/train/training.py | 6 +-- deepmd/pd/utils/__init__.py | 2 +- deepmd/pd/utils/decomp.py | 6 +-- deepmd/pd/utils/env.py | 16 +++---- deepmd/pt/cxx_op.py | 11 ++--- deepmd/pt/loss/__init__.py | 8 ++-- deepmd/pt/model/atomic_model/__init__.py | 8 ++-- deepmd/pt/model/descriptor/__init__.py | 14 +++--- deepmd/pt/model/model/__init__.py | 16 +++---- deepmd/pt/model/network/init.py | 6 +-- deepmd/pt/model/task/__init__.py | 6 +-- deepmd/pt/train/training.py | 6 +-- deepmd/pt/utils/__init__.py | 2 +- deepmd/pt/utils/env.py | 16 +++---- deepmd/pt/utils/plugin.py | 4 +- deepmd/tf/__init__.py | 10 ++-- deepmd/tf/common.py | 18 ++++---- deepmd/tf/descriptor/__init__.py | 2 +- deepmd/tf/entrypoints/__init__.py | 12 ++--- deepmd/tf/entrypoints/main.py | 2 +- deepmd/tf/env.py | 37 +++++++-------- deepmd/tf/fit/__init__.py | 4 +- deepmd/tf/fit/ener.py | 2 +- deepmd/tf/infer/__init__.py | 4 +- deepmd/tf/infer/deep_eval.py | 6 +-- deepmd/tf/infer/deep_polar.py | 2 +- deepmd/tf/infer/model_devi.py | 4 +- deepmd/tf/loss/__init__.py | 2 +- deepmd/tf/model/__init__.py | 8 ++-- deepmd/tf/model/model_stat.py | 2 +- deepmd/tf/nvnmd/entrypoints/__init__.py | 2 +- deepmd/tf/nvnmd/entrypoints/wrap.py | 2 +- deepmd/tf/nvnmd/fit/ener.py | 2 +- deepmd/tf/nvnmd/utils/__init__.py | 8 ++-- deepmd/tf/nvnmd/utils/config.py | 2 +- deepmd/tf/train/trainer.py | 27 +++++------ deepmd/tf/utils/argcheck.py | 6 +-- deepmd/tf/utils/data_system.py | 2 +- deepmd/tf/utils/errors.py | 2 +- deepmd/tf/utils/parallel_op.py | 2 +- deepmd/tf/utils/path.py | 4 +- deepmd/tf/utils/plugin.py | 4 +- deepmd/utils/batch_size.py | 11 ++--- deepmd/utils/data.py | 6 +-- deepmd/utils/data_system.py | 38 +++++++-------- deepmd/utils/econf_embd.py | 2 +- deepmd/utils/pair_tab.py | 7 ++- deepmd/utils/update_sel.py | 6 +-- doc/conf.py | 4 +- doc/getting-started/quick_start.ipynb | 8 ++-- source/install/build_tf.py | 4 +- source/lmp/tests/write_lmp_data.py | 46 +++++-------------- .../array_api_strict/descriptor/__init__.py | 4 +- source/tests/consistent/common.py | 9 ++-- source/tests/pd/model/test_model.py | 6 +-- source/tests/pt/model/test_model.py | 6 +-- source/tests/tf/common.py | 22 ++------- source/tests/tf/test_data_modifier.py | 2 +- source/tests/tf/test_deepmd_data_sys.py | 8 ++-- source/tests/tf/test_ewald.py | 2 +- source/tests/tf/test_gen_stat_data.py | 2 +- 83 files changed, 295 insertions(+), 359 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 7aa2012200..df101ff67b 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -29,7 +29,7 @@ repos: exclude: ^source/3rdparty - repo: https://github.com/astral-sh/ruff-pre-commit # Ruff version. - rev: v0.7.4 + rev: v0.8.0 hooks: - id: ruff args: ["--fix"] @@ -60,7 +60,7 @@ repos: - id: blacken-docs # C++ - repo: https://github.com/pre-commit/mirrors-clang-format - rev: v19.1.3 + rev: v19.1.4 hooks: - id: clang-format exclude: ^(source/3rdparty|source/lib/src/gpu/cudart/.+\.inc|.+\.ipynb$) diff --git a/deepmd/__init__.py b/deepmd/__init__.py index 6f2b65ba63..14d933da11 100644 --- a/deepmd/__init__.py +++ b/deepmd/__init__.py @@ -40,6 +40,6 @@ def DeepPotential(*args, **kwargs): __all__ = [ - "__version__", "DeepPotential", + "__version__", ] diff --git a/deepmd/common.py b/deepmd/common.py index 1e66113306..d109c64570 100644 --- a/deepmd/common.py +++ b/deepmd/common.py @@ -35,13 +35,13 @@ ) __all__ = [ - "select_idx_map", - "make_default_mesh", - "j_loader", + "VALID_ACTIVATION", + "VALID_PRECISION", "expand_sys_str", "get_np_precision", - "VALID_PRECISION", - "VALID_ACTIVATION", + "j_loader", + "make_default_mesh", + "select_idx_map", ] _PRECISION = Literal["default", "float16", "float32", "float64"] @@ -63,9 +63,9 @@ if TYPE_CHECKING: _DICT_VAL = TypeVar("_DICT_VAL") __all__ += [ + "_ACTIVATION", "_DICT_VAL", "_PRECISION", - "_ACTIVATION", ] diff --git a/deepmd/dpmodel/__init__.py b/deepmd/dpmodel/__init__.py index 111c2d6ced..b87d217c74 100644 --- a/deepmd/dpmodel/__init__.py +++ b/deepmd/dpmodel/__init__.py @@ -23,18 +23,18 @@ ) __all__ = [ - "DPModelCommon", - "PRECISION_DICT", "DEFAULT_PRECISION", - "NativeOP", - "ModelOutputDef", + "PRECISION_DICT", + "DPModelCommon", "FittingOutputDef", + "ModelOutputDef", + "NativeOP", "OutputVariableDef", - "model_check_output", "fitting_check_output", - "get_reduce_name", "get_deriv_name", "get_hessian_name", + "get_reduce_name", + "model_check_output", ] diff --git a/deepmd/dpmodel/atomic_model/__init__.py b/deepmd/dpmodel/atomic_model/__init__.py index 4f4ef32e03..3d90c738ae 100644 --- a/deepmd/dpmodel/atomic_model/__init__.py +++ b/deepmd/dpmodel/atomic_model/__init__.py @@ -44,14 +44,14 @@ ) __all__ = [ - "make_base_atomic_model", "BaseAtomicModel", "DPAtomicModel", - "DPEnergyAtomicModel", - "PairTabAtomicModel", - "LinearEnergyAtomicModel", - "DPZBLLinearEnergyAtomicModel", "DPDOSAtomicModel", - "DPPolarAtomicModel", "DPDipoleAtomicModel", + "DPEnergyAtomicModel", + "DPPolarAtomicModel", + "DPZBLLinearEnergyAtomicModel", + "LinearEnergyAtomicModel", + "PairTabAtomicModel", + "make_base_atomic_model", ] diff --git a/deepmd/dpmodel/common.py b/deepmd/dpmodel/common.py index 8353cc28e3..920364edc0 100644 --- a/deepmd/dpmodel/common.py +++ b/deepmd/dpmodel/common.py @@ -221,10 +221,10 @@ def safe_cast_array( __all__ = [ - "GLOBAL_NP_FLOAT_PRECISION", + "DEFAULT_PRECISION", "GLOBAL_ENER_FLOAT_PRECISION", + "GLOBAL_NP_FLOAT_PRECISION", "PRECISION_DICT", "RESERVED_PRECISON_DICT", - "DEFAULT_PRECISION", "NativeOP", ] diff --git a/deepmd/dpmodel/descriptor/__init__.py b/deepmd/dpmodel/descriptor/__init__.py index de22757647..8542168d91 100644 --- a/deepmd/dpmodel/descriptor/__init__.py +++ b/deepmd/dpmodel/descriptor/__init__.py @@ -28,13 +28,13 @@ ) __all__ = [ + "DescrptDPA1", + "DescrptDPA2", + "DescrptHybrid", "DescrptSeA", + "DescrptSeAttenV2", "DescrptSeR", "DescrptSeT", "DescrptSeTTebd", - "DescrptDPA1", - "DescrptSeAttenV2", - "DescrptDPA2", - "DescrptHybrid", "make_base_descriptor", ] diff --git a/deepmd/dpmodel/fitting/__init__.py b/deepmd/dpmodel/fitting/__init__.py index 2a35a68d35..5bdfff2571 100644 --- a/deepmd/dpmodel/fitting/__init__.py +++ b/deepmd/dpmodel/fitting/__init__.py @@ -22,11 +22,11 @@ ) __all__ = [ - "InvarFitting", - "make_base_fitting", + "DOSFittingNet", "DipoleFitting", "EnergyFittingNet", + "InvarFitting", "PolarFitting", - "DOSFittingNet", "PropertyFittingNet", + "make_base_fitting", ] diff --git a/deepmd/dpmodel/model/__init__.py b/deepmd/dpmodel/model/__init__.py index f2a884c497..37ef57b38b 100644 --- a/deepmd/dpmodel/model/__init__.py +++ b/deepmd/dpmodel/model/__init__.py @@ -29,9 +29,9 @@ ) __all__ = [ + "DPModelCommon", "EnergyModel", "PropertyModel", - "DPModelCommon", "SpinModel", "make_model", ] diff --git a/deepmd/dpmodel/utils/__init__.py b/deepmd/dpmodel/utils/__init__.py index 0ae70dc31d..f6834ec838 100644 --- a/deepmd/dpmodel/utils/__init__.py +++ b/deepmd/dpmodel/utils/__init__.py @@ -36,27 +36,27 @@ ) __all__ = [ - "EnvMat", - "make_multilayer_network", - "make_embedding_network", - "make_fitting_network", + "AtomExcludeMask", "EmbeddingNet", + "EnvMat", "FittingNet", "NativeLayer", "NativeNet", "NetworkCollection", - "load_dp_model", - "save_dp_model", - "traverse_model_dict", - "build_neighbor_list", - "nlist_distinguish_types", - "get_multiple_nlist_key", + "PairExcludeMask", "build_multiple_neighbor_list", + "build_neighbor_list", "extend_coord_with_ghosts", - "normalize_coord", + "get_multiple_nlist_key", "inter2phys", + "load_dp_model", + "make_embedding_network", + "make_fitting_network", + "make_multilayer_network", + "nlist_distinguish_types", + "normalize_coord", "phys2inter", + "save_dp_model", "to_face_distance", - "AtomExcludeMask", - "PairExcludeMask", + "traverse_model_dict", ] diff --git a/deepmd/entrypoints/test.py b/deepmd/entrypoints/test.py index 09863eb6ac..d9744246d7 100644 --- a/deepmd/entrypoints/test.py +++ b/deepmd/entrypoints/test.py @@ -704,9 +704,9 @@ def test_dos( frame_output = np.hstack((test_out, pred_out)) save_txt_file( - detail_path.with_suffix(".dos.out.%.d" % ii), + detail_path.with_suffix(f".dos.out.{ii}"), frame_output, - header="%s - %.d: data_dos pred_dos" % (system, ii), + header=f"{system} - {ii}: data_dos pred_dos", append=append_detail, ) @@ -718,9 +718,9 @@ def test_dos( frame_output = np.hstack((test_out, pred_out)) save_txt_file( - detail_path.with_suffix(".ados.out.%.d" % ii), + detail_path.with_suffix(f".ados.out.{ii}"), frame_output, - header="%s - %.d: data_ados pred_ados" % (system, ii), + header=f"{system} - {ii}: data_ados pred_ados", append=append_detail, ) @@ -860,9 +860,9 @@ def test_property( frame_output = np.hstack((test_out, pred_out)) save_txt_file( - detail_path.with_suffix(".property.out.%.d" % ii), + detail_path.with_suffix(f".property.out.{ii}"), frame_output, - header="%s - %.d: data_property pred_property" % (system, ii), + header=f"{system} - {ii}: data_property pred_property", append=append_detail, ) @@ -874,9 +874,9 @@ def test_property( frame_output = np.hstack((test_out, pred_out)) save_txt_file( - detail_path.with_suffix(".aproperty.out.%.d" % ii), + detail_path.with_suffix(f".aproperty.out.{ii}"), frame_output, - header="%s - %.d: data_aproperty pred_aproperty" % (system, ii), + header=f"{system} - {ii}: data_aproperty pred_aproperty", append=append_detail, ) diff --git a/deepmd/env.py b/deepmd/env.py index c1dd9d6f25..2c1241a36b 100644 --- a/deepmd/env.py +++ b/deepmd/env.py @@ -13,12 +13,12 @@ import deepmd.lib __all__ = [ - "GLOBAL_NP_FLOAT_PRECISION", - "GLOBAL_ENER_FLOAT_PRECISION", - "global_float_prec", "GLOBAL_CONFIG", - "SHARED_LIB_MODULE", + "GLOBAL_ENER_FLOAT_PRECISION", + "GLOBAL_NP_FLOAT_PRECISION", "SHARED_LIB_DIR", + "SHARED_LIB_MODULE", + "global_float_prec", ] log = logging.getLogger(__name__) diff --git a/deepmd/infer/__init__.py b/deepmd/infer/__init__.py index 8a8afb165a..a0330c4ffb 100644 --- a/deepmd/infer/__init__.py +++ b/deepmd/infer/__init__.py @@ -10,10 +10,10 @@ ) __all__ = [ - "DeepPot", - "calc_model_devi", "DeepEval", + "DeepPot", "DeepPotential", + "calc_model_devi", ] diff --git a/deepmd/infer/deep_eval.py b/deepmd/infer/deep_eval.py index 6e2b2942ed..159f9bdf60 100644 --- a/deepmd/infer/deep_eval.py +++ b/deepmd/infer/deep_eval.py @@ -503,8 +503,7 @@ def _standard_input(self, coords, cells, atom_types, fparam, aparam, mixed_type) fparam = np.tile(fparam.reshape([-1]), [nframes, 1]) else: raise RuntimeError( - "got wrong size of frame param, should be either %d x %d or %d" - % (nframes, fdim, fdim) + f"got wrong size of frame param, should be either {nframes} x {fdim} or {fdim}" ) if aparam is not None: fdim = self.get_dim_aparam() @@ -516,8 +515,7 @@ def _standard_input(self, coords, cells, atom_types, fparam, aparam, mixed_type) aparam = np.tile(aparam.reshape([-1]), [nframes, natoms]) else: raise RuntimeError( - "got wrong size of frame param, should be either %d x %d x %d or %d x %d or %d" - % (nframes, natoms, fdim, natoms, fdim, fdim) + f"got wrong size of frame param, should be either {nframes} x {natoms} x {fdim} or {natoms} x {fdim} or {fdim}" ) return coords, cells, atom_types, fparam, aparam, nframes, natoms diff --git a/deepmd/infer/model_devi.py b/deepmd/infer/model_devi.py index 304aabdadc..b3e3330835 100644 --- a/deepmd/infer/model_devi.py +++ b/deepmd/infer/model_devi.py @@ -204,16 +204,16 @@ def write_model_devi_out( assert devi.shape[1] == 8 else: assert devi.shape[1] > 8 - header = "%s\n%10s" % (header, "step") + header = f"{header}\n{'step':10s}" for item in "vf": - header += "%19s%19s%19s" % ( + header += "{:19s}{:19s}{:19s}".format( f"max_devi_{item}", f"min_devi_{item}", f"avg_devi_{item}", ) - header += "%19s" % "devi_e" + header += f'{"devi_e":19s}' if atomic: - header += "%19s" % "atm_devi_f(N)" + header += f"{'atm_devi_f(N)':19s}" with open(fname, "ab") as fp: np.savetxt( fp, diff --git a/deepmd/jax/descriptor/__init__.py b/deepmd/jax/descriptor/__init__.py index dc5282dd21..de6489e6cf 100644 --- a/deepmd/jax/descriptor/__init__.py +++ b/deepmd/jax/descriptor/__init__.py @@ -25,12 +25,12 @@ ) __all__ = [ + "DescrptDPA1", + "DescrptDPA2", + "DescrptHybrid", "DescrptSeA", + "DescrptSeAttenV2", "DescrptSeR", "DescrptSeT", "DescrptSeTTebd", - "DescrptDPA1", - "DescrptSeAttenV2", - "DescrptDPA2", - "DescrptHybrid", ] diff --git a/deepmd/jax/env.py b/deepmd/jax/env.py index 02e31ae66e..738b2bba18 100644 --- a/deepmd/jax/env.py +++ b/deepmd/jax/env.py @@ -18,7 +18,7 @@ __all__ = [ "jax", + "jax_export", "jnp", "nnx", - "jax_export", ] diff --git a/deepmd/jax/fitting/__init__.py b/deepmd/jax/fitting/__init__.py index 226a6d5b43..77133e2bac 100644 --- a/deepmd/jax/fitting/__init__.py +++ b/deepmd/jax/fitting/__init__.py @@ -7,8 +7,8 @@ ) __all__ = [ - "EnergyFittingNet", "DOSFittingNet", "DipoleFittingNet", + "EnergyFittingNet", "PolarFittingNet", ] diff --git a/deepmd/jax/model/__init__.py b/deepmd/jax/model/__init__.py index fd31999aab..79d5bb2b23 100644 --- a/deepmd/jax/model/__init__.py +++ b/deepmd/jax/model/__init__.py @@ -19,10 +19,10 @@ ) __all__ = [ - "EnergyModel", - "DPZBLLinearEnergyAtomicModel", "DOSModel", + "DPZBLLinearEnergyAtomicModel", "DipoleModel", + "EnergyModel", "PolarModel", "PropertyModel", ] diff --git a/deepmd/pd/model/model/__init__.py b/deepmd/pd/model/model/__init__.py index 990ee51348..cd758add6d 100644 --- a/deepmd/pd/model/model/__init__.py +++ b/deepmd/pd/model/model/__init__.py @@ -136,9 +136,9 @@ def get_model(model_params): __all__ = [ "BaseModel", - "get_model", "DPModelCommon", "EnergyModel", "FrozenModel", + "get_model", "make_model", ] diff --git a/deepmd/pd/model/network/init.py b/deepmd/pd/model/network/init.py index dbdad56794..83a16807d7 100644 --- a/deepmd/pd/model/network/init.py +++ b/deepmd/pd/model/network/init.py @@ -138,10 +138,8 @@ def calculate_gain(nonlinearity, param=None): elif nonlinearity == "leaky_relu": if param is None: negative_slope = 0.01 - elif ( - not isinstance(param, bool) - and isinstance(param, int) - or isinstance(param, float) + elif (not isinstance(param, bool) and isinstance(param, int)) or isinstance( + param, float ): # True/False are instances of int, hence check above negative_slope = param diff --git a/deepmd/pd/model/task/__init__.py b/deepmd/pd/model/task/__init__.py index ad616156c7..f3e39b3ff3 100644 --- a/deepmd/pd/model/task/__init__.py +++ b/deepmd/pd/model/task/__init__.py @@ -10,7 +10,7 @@ ) __all__ = [ + "BaseFitting", "EnergyFittingNet", "Fitting", - "BaseFitting", ] diff --git a/deepmd/pd/train/training.py b/deepmd/pd/train/training.py index 09cf86ecdd..17d369751f 100644 --- a/deepmd/pd/train/training.py +++ b/deepmd/pd/train/training.py @@ -1088,7 +1088,7 @@ def get_data(self, is_train=True, task_key="Default"): def print_header(self, fout, train_results, valid_results): train_keys = sorted(train_results.keys()) print_str = "" - print_str += "# %5s" % "step" + print_str += "# {:5s}".format("step") if not self.multi_task: if valid_results: prop_fmt = " %11s %11s" @@ -1111,7 +1111,7 @@ def print_header(self, fout, train_results, valid_results): prop_fmt = " %11s" for k in sorted(train_results[model_key].keys()): print_str += prop_fmt % (k + f"_trn_{model_key}") - print_str += " %8s\n" % "lr" + print_str += " {:8s}\n".format("lr") print_str += "# If there is no available reference data, rmse_*_{val,trn} will print nan\n" fout.write(print_str) fout.flush() @@ -1119,7 +1119,7 @@ def print_header(self, fout, train_results, valid_results): def print_on_training(self, fout, step_id, cur_lr, train_results, valid_results): train_keys = sorted(train_results.keys()) print_str = "" - print_str += "%7d" % step_id + print_str += f"{step_id:7d}" if not self.multi_task: if valid_results: prop_fmt = " %11.2e %11.2e" diff --git a/deepmd/pd/utils/__init__.py b/deepmd/pd/utils/__init__.py index 7e1043eda4..f90cf82249 100644 --- a/deepmd/pd/utils/__init__.py +++ b/deepmd/pd/utils/__init__.py @@ -6,6 +6,6 @@ ) __all__ = [ - "PairExcludeMask", "AtomExcludeMask", + "PairExcludeMask", ] diff --git a/deepmd/pd/utils/decomp.py b/deepmd/pd/utils/decomp.py index 434301441a..272c2deacb 100644 --- a/deepmd/pd/utils/decomp.py +++ b/deepmd/pd/utils/decomp.py @@ -13,12 +13,12 @@ import paddle __all__ = [ - "softmax", + "masked_add_", "norm", - "take_along_axis", "scatter_reduce", "sec", - "masked_add_", + "softmax", + "take_along_axis", ] diff --git a/deepmd/pd/utils/env.py b/deepmd/pd/utils/env.py index 4c104db374..6dbdc69f30 100644 --- a/deepmd/pd/utils/env.py +++ b/deepmd/pd/utils/env.py @@ -89,19 +89,19 @@ def enable_prim(enable: bool = True): __all__ = [ + "CACHE_PER_SYS", + "DEFAULT_PRECISION", + "DEVICE", + "ENERGY_BIAS_TRAINABLE", "GLOBAL_ENER_FLOAT_PRECISION", "GLOBAL_NP_FLOAT_PRECISION", - "GLOBAL_PD_FLOAT_PRECISION", "GLOBAL_PD_ENER_FLOAT_PRECISION", - "DEFAULT_PRECISION", + "GLOBAL_PD_FLOAT_PRECISION", + "JIT", + "LOCAL_RANK", + "NUM_WORKERS", "PRECISION_DICT", "RESERVED_PRECISON_DICT", "SAMPLER_RECORD", - "NUM_WORKERS", - "DEVICE", - "JIT", - "CACHE_PER_SYS", - "ENERGY_BIAS_TRAINABLE", - "LOCAL_RANK", "enable_prim", ] diff --git a/deepmd/pt/cxx_op.py b/deepmd/pt/cxx_op.py index b0653522b2..f7922a5c52 100644 --- a/deepmd/pt/cxx_op.py +++ b/deepmd/pt/cxx_op.py @@ -50,16 +50,11 @@ def load_library(module_name: str) -> bool: if PT_CXX11_ABI_FLAG != pt_cxx11_abi_flag: raise RuntimeError( "This deepmd-kit package was compiled with " - "CXX11_ABI_FLAG=%d, but PyTorch runtime was compiled " - "with CXX11_ABI_FLAG=%d. These two library ABIs are " - "incompatible and thus an error is raised when loading %s. " + f"CXX11_ABI_FLAG={PT_CXX11_ABI_FLAG}, but PyTorch runtime was compiled " + f"with CXX11_ABI_FLAG={pt_cxx11_abi_flag}. These two library ABIs are " + f"incompatible and thus an error is raised when loading {module_name}. " "You need to rebuild deepmd-kit against this PyTorch " "runtime." - % ( - PT_CXX11_ABI_FLAG, - pt_cxx11_abi_flag, - module_name, - ) ) from e # different versions may cause incompatibility, see TF diff --git a/deepmd/pt/loss/__init__.py b/deepmd/pt/loss/__init__.py index 78528bceaa..cae561a8a2 100644 --- a/deepmd/pt/loss/__init__.py +++ b/deepmd/pt/loss/__init__.py @@ -22,11 +22,11 @@ ) __all__ = [ + "DOSLoss", "DenoiseLoss", - "EnergyStdLoss", "EnergySpinLoss", - "TensorLoss", - "TaskLoss", - "DOSLoss", + "EnergyStdLoss", "PropertyLoss", + "TaskLoss", + "TensorLoss", ] diff --git a/deepmd/pt/model/atomic_model/__init__.py b/deepmd/pt/model/atomic_model/__init__.py index 2aa4b1cdb2..4da9bf781b 100644 --- a/deepmd/pt/model/atomic_model/__init__.py +++ b/deepmd/pt/model/atomic_model/__init__.py @@ -47,11 +47,11 @@ "BaseAtomicModel", "DPAtomicModel", "DPDOSAtomicModel", + "DPDipoleAtomicModel", "DPEnergyAtomicModel", - "DPPropertyAtomicModel", - "PairTabAtomicModel", - "LinearEnergyAtomicModel", "DPPolarAtomicModel", - "DPDipoleAtomicModel", + "DPPropertyAtomicModel", "DPZBLLinearEnergyAtomicModel", + "LinearEnergyAtomicModel", + "PairTabAtomicModel", ] diff --git a/deepmd/pt/model/descriptor/__init__.py b/deepmd/pt/model/descriptor/__init__.py index 4ffa937bcb..4a227918fe 100644 --- a/deepmd/pt/model/descriptor/__init__.py +++ b/deepmd/pt/model/descriptor/__init__.py @@ -43,18 +43,18 @@ __all__ = [ "BaseDescriptor", "DescriptorBlock", - "make_default_type_embedding", + "DescrptBlockRepformers", "DescrptBlockSeA", "DescrptBlockSeAtten", - "DescrptSeAttenV2", - "DescrptSeTTebd", "DescrptBlockSeTTebd", - "DescrptSeA", - "DescrptSeR", - "DescrptSeT", "DescrptDPA1", "DescrptDPA2", "DescrptHybrid", + "DescrptSeA", + "DescrptSeAttenV2", + "DescrptSeR", + "DescrptSeT", + "DescrptSeTTebd", + "make_default_type_embedding", "prod_env_mat", - "DescrptBlockRepformers", ] diff --git a/deepmd/pt/model/model/__init__.py b/deepmd/pt/model/model/__init__.py index f2e03fb99e..491a524da8 100644 --- a/deepmd/pt/model/model/__init__.py +++ b/deepmd/pt/model/model/__init__.py @@ -297,17 +297,17 @@ def get_model(model_params): __all__ = [ "BaseModel", - "get_model", + "DOSModel", "DPModelCommon", - "EnergyModel", + "DPZBLModel", "DipoleModel", - "PolarModel", - "DOSModel", + "EnergyModel", "FrozenModel", - "SpinModel", + "LinearEnergyModel", + "PolarModel", "SpinEnergyModel", - "DPZBLModel", - "make_model", + "SpinModel", + "get_model", "make_hessian_model", - "LinearEnergyModel", + "make_model", ] diff --git a/deepmd/pt/model/network/init.py b/deepmd/pt/model/network/init.py index fe3c034637..53e2c70892 100644 --- a/deepmd/pt/model/network/init.py +++ b/deepmd/pt/model/network/init.py @@ -130,10 +130,8 @@ def calculate_gain(nonlinearity, param=None): elif nonlinearity == "leaky_relu": if param is None: negative_slope = 0.01 - elif ( - not isinstance(param, bool) - and isinstance(param, int) - or isinstance(param, float) + elif (not isinstance(param, bool) and isinstance(param, int)) or isinstance( + param, float ): # True/False are instances of int, hence check above negative_slope = param diff --git a/deepmd/pt/model/task/__init__.py b/deepmd/pt/model/task/__init__.py index 02d852eab7..37ffec2725 100644 --- a/deepmd/pt/model/task/__init__.py +++ b/deepmd/pt/model/task/__init__.py @@ -29,14 +29,14 @@ ) __all__ = [ + "BaseFitting", + "DOSFittingNet", "DenoiseNet", "DipoleFittingNet", "EnergyFittingNet", "EnergyFittingNetDirect", "Fitting", - "BaseFitting", - "TypePredictNet", "PolarFittingNet", - "DOSFittingNet", "PropertyFittingNet", + "TypePredictNet", ] diff --git a/deepmd/pt/train/training.py b/deepmd/pt/train/training.py index 61683fd857..9e6f92b06d 100644 --- a/deepmd/pt/train/training.py +++ b/deepmd/pt/train/training.py @@ -1132,7 +1132,7 @@ def get_data(self, is_train=True, task_key="Default"): def print_header(self, fout, train_results, valid_results) -> None: train_keys = sorted(train_results.keys()) print_str = "" - print_str += "# %5s" % "step" + print_str += "# {:5s}".format("step") if not self.multi_task: if valid_results: prop_fmt = " %11s %11s" @@ -1155,7 +1155,7 @@ def print_header(self, fout, train_results, valid_results) -> None: prop_fmt = " %11s" for k in sorted(train_results[model_key].keys()): print_str += prop_fmt % (k + f"_trn_{model_key}") - print_str += " %8s\n" % "lr" + print_str += " {:8s}\n".format("lr") print_str += "# If there is no available reference data, rmse_*_{val,trn} will print nan\n" fout.write(print_str) fout.flush() @@ -1165,7 +1165,7 @@ def print_on_training( ) -> None: train_keys = sorted(train_results.keys()) print_str = "" - print_str += "%7d" % step_id + print_str += f"{step_id:7d}" if not self.multi_task: if valid_results: prop_fmt = " %11.2e %11.2e" diff --git a/deepmd/pt/utils/__init__.py b/deepmd/pt/utils/__init__.py index 7e1043eda4..f90cf82249 100644 --- a/deepmd/pt/utils/__init__.py +++ b/deepmd/pt/utils/__init__.py @@ -6,6 +6,6 @@ ) __all__ = [ - "PairExcludeMask", "AtomExcludeMask", + "PairExcludeMask", ] diff --git a/deepmd/pt/utils/env.py b/deepmd/pt/utils/env.py index 81dce669ff..6471fd80a8 100644 --- a/deepmd/pt/utils/env.py +++ b/deepmd/pt/utils/env.py @@ -75,18 +75,18 @@ torch.set_num_threads(intra_nthreads) __all__ = [ + "CACHE_PER_SYS", + "DEFAULT_PRECISION", + "DEVICE", + "ENERGY_BIAS_TRAINABLE", "GLOBAL_ENER_FLOAT_PRECISION", "GLOBAL_NP_FLOAT_PRECISION", - "GLOBAL_PT_FLOAT_PRECISION", "GLOBAL_PT_ENER_FLOAT_PRECISION", - "DEFAULT_PRECISION", + "GLOBAL_PT_FLOAT_PRECISION", + "JIT", + "LOCAL_RANK", + "NUM_WORKERS", "PRECISION_DICT", "RESERVED_PRECISON_DICT", "SAMPLER_RECORD", - "NUM_WORKERS", - "DEVICE", - "JIT", - "CACHE_PER_SYS", - "ENERGY_BIAS_TRAINABLE", - "LOCAL_RANK", ] diff --git a/deepmd/pt/utils/plugin.py b/deepmd/pt/utils/plugin.py index aa901c06e8..7a39ae498c 100644 --- a/deepmd/pt/utils/plugin.py +++ b/deepmd/pt/utils/plugin.py @@ -10,7 +10,7 @@ __all__ = [ "Plugin", - "VariantMeta", - "VariantABCMeta", "PluginVariant", + "VariantABCMeta", + "VariantMeta", ] diff --git a/deepmd/tf/__init__.py b/deepmd/tf/__init__.py index 80044b68cc..933729fde2 100644 --- a/deepmd/tf/__init__.py +++ b/deepmd/tf/__init__.py @@ -38,15 +38,15 @@ load_entry_point("deepmd") __all__ = [ + "DeepEval", + "DeepPotential", + "DipoleChargeModifier", "__version__", + "cluster", "descriptor", "fit", "loss", - "utils", - "cluster", "network", - "DeepEval", - "DeepPotential", - "DipoleChargeModifier", "nvnmd", + "utils", ] diff --git a/deepmd/tf/common.py b/deepmd/tf/common.py index df7842545a..ef257c5eae 100644 --- a/deepmd/tf/common.py +++ b/deepmd/tf/common.py @@ -43,22 +43,22 @@ ) __all__ = [ - # from deepmd.common - "select_idx_map", - "make_default_mesh", - "j_loader", - "expand_sys_str", - "get_np_precision", + "ACTIVATION_FN_DICT", # from self "PRECISION_DICT", + "cast_precision", + "clear_session", + "expand_sys_str", "gelu", "gelu_tf", - "ACTIVATION_FN_DICT", "get_activation_func", + "get_np_precision", "get_precision", + "j_loader", + "make_default_mesh", "safe_cast_tensor", - "cast_precision", - "clear_session", + # from deepmd.common + "select_idx_map", ] # define constants diff --git a/deepmd/tf/descriptor/__init__.py b/deepmd/tf/descriptor/__init__.py index 6ef6c50da5..4bdc988682 100644 --- a/deepmd/tf/descriptor/__init__.py +++ b/deepmd/tf/descriptor/__init__.py @@ -42,8 +42,8 @@ "DescrptHybrid", "DescrptLocFrame", "DescrptSeA", - "DescrptSeAEbdV2", "DescrptSeAEbd", + "DescrptSeAEbdV2", "DescrptSeAEf", "DescrptSeAEfLower", "DescrptSeAMask", diff --git a/deepmd/tf/entrypoints/__init__.py b/deepmd/tf/entrypoints/__init__.py index 9c3a8b31e1..bf8c51067e 100644 --- a/deepmd/tf/entrypoints/__init__.py +++ b/deepmd/tf/entrypoints/__init__.py @@ -34,15 +34,15 @@ ) __all__ = [ - "doc_train_input", - "freeze", - "test", - "train_dp", - "transfer", "compress", + "convert", "doc_train_input", + "doc_train_input", + "freeze", "make_model_devi", - "convert", "neighbor_stat", "start_dpgui", + "test", + "train_dp", + "transfer", ] diff --git a/deepmd/tf/entrypoints/main.py b/deepmd/tf/entrypoints/main.py index 799a6280f6..5058c51c17 100644 --- a/deepmd/tf/entrypoints/main.py +++ b/deepmd/tf/entrypoints/main.py @@ -35,7 +35,7 @@ train_nvnmd, ) -__all__ = ["main", "parse_args", "get_ll", "main_parser"] +__all__ = ["get_ll", "main", "main_parser", "parse_args"] def main(args: Optional[Union[list[str], argparse.Namespace]] = None) -> None: diff --git a/deepmd/tf/env.py b/deepmd/tf/env.py index 9a50f1281f..dac705b9c9 100644 --- a/deepmd/tf/env.py +++ b/deepmd/tf/env.py @@ -103,26 +103,26 @@ def filter(self, record) -> bool: tfv2 = None __all__ = [ + "ATTENTION_LAYER_PATTERN", + "EMBEDDING_NET_PATTERN", + "FITTING_NET_PATTERN", "GLOBAL_CONFIG", - "GLOBAL_TF_FLOAT_PRECISION", - "GLOBAL_NP_FLOAT_PRECISION", "GLOBAL_ENER_FLOAT_PRECISION", - "global_float_prec", - "global_cvt_2_tf_float", - "global_cvt_2_ener_float", + "GLOBAL_NP_FLOAT_PRECISION", + "GLOBAL_TF_FLOAT_PRECISION", "MODEL_VERSION", "SHARED_LIB_DIR", "SHARED_LIB_MODULE", - "default_tf_session_config", - "reset_default_tf_session_config", - "op_module", - "op_grads_module", + "TF_VERSION", "TRANSFER_PATTERN", - "FITTING_NET_PATTERN", - "EMBEDDING_NET_PATTERN", "TYPE_EMBEDDING_PATTERN", - "ATTENTION_LAYER_PATTERN", - "TF_VERSION", + "default_tf_session_config", + "global_cvt_2_ener_float", + "global_cvt_2_tf_float", + "global_float_prec", + "op_grads_module", + "op_module", + "reset_default_tf_session_config", "tf_py_version", ] @@ -349,16 +349,11 @@ def get_module(module_name: str) -> "ModuleType": if TF_CXX11_ABI_FLAG != tf_cxx11_abi_flag: raise RuntimeError( "This deepmd-kit package was compiled with " - "CXX11_ABI_FLAG=%d, but TensorFlow runtime was compiled " - "with CXX11_ABI_FLAG=%d. These two library ABIs are " - "incompatible and thus an error is raised when loading %s. " + f"CXX11_ABI_FLAG={TF_CXX11_ABI_FLAG}, but TensorFlow runtime was compiled " + f"with CXX11_ABI_FLAG={tf_cxx11_abi_flag}. These two library ABIs are " + f"incompatible and thus an error is raised when loading {module_name}. " "You need to rebuild deepmd-kit against this TensorFlow " "runtime." - % ( - TF_CXX11_ABI_FLAG, - tf_cxx11_abi_flag, - module_name, - ) ) from e # different versions may cause incompatibility diff --git a/deepmd/tf/fit/__init__.py b/deepmd/tf/fit/__init__.py index 29067eb987..d9af5a26ba 100644 --- a/deepmd/tf/fit/__init__.py +++ b/deepmd/tf/fit/__init__.py @@ -17,10 +17,10 @@ ) __all__ = [ + "DOSFitting", "DipoleFittingSeA", "EnerFitting", - "DOSFitting", + "Fitting", "GlobalPolarFittingSeA", "PolarFittingSeA", - "Fitting", ] diff --git a/deepmd/tf/fit/ener.py b/deepmd/tf/fit/ener.py index 068d3d8e35..d791a2eba0 100644 --- a/deepmd/tf/fit/ener.py +++ b/deepmd/tf/fit/ener.py @@ -222,7 +222,7 @@ def __init__( for at, ae in enumerate(atom_ener if atom_ener is not None else []): if ae is not None: self.atom_ener.append( - tf.constant(ae, GLOBAL_TF_FLOAT_PRECISION, name="atom_%d_ener" % at) + tf.constant(ae, GLOBAL_TF_FLOAT_PRECISION, name=f"atom_{at}_ener") ) else: self.atom_ener.append(None) diff --git a/deepmd/tf/infer/__init__.py b/deepmd/tf/infer/__init__.py index 3596ff033e..de8a77976e 100644 --- a/deepmd/tf/infer/__init__.py +++ b/deepmd/tf/infer/__init__.py @@ -35,13 +35,13 @@ ) __all__ = [ - "DeepPotential", + "DeepDOS", "DeepDipole", "DeepEval", "DeepGlobalPolar", "DeepPolar", "DeepPot", - "DeepDOS", + "DeepPotential", "DeepWFC", "DipoleChargeModifier", "EwaldRecp", diff --git a/deepmd/tf/infer/deep_eval.py b/deepmd/tf/infer/deep_eval.py index 9348b973b9..db4faffed9 100644 --- a/deepmd/tf/infer/deep_eval.py +++ b/deepmd/tf/infer/deep_eval.py @@ -817,8 +817,7 @@ def _prepare_feed_dict( fparam = np.tile(fparam.reshape([-1]), [nframes, 1]) else: raise RuntimeError( - "got wrong size of frame param, should be either %d x %d or %d" - % (nframes, fdim, fdim) + f"got wrong size of frame param, should be either {nframes} x {fdim} or {fdim}" ) if self.has_aparam: fdim = self.get_dim_aparam() @@ -830,8 +829,7 @@ def _prepare_feed_dict( aparam = np.tile(aparam.reshape([-1]), [nframes, natoms]) else: raise RuntimeError( - "got wrong size of frame param, should be either %d x %d x %d or %d x %d or %d" - % (nframes, natoms, fdim, natoms, fdim, fdim) + f"got wrong size of frame param, should be either {nframes} x {natoms} x {fdim} or {natoms} x {fdim} or {fdim}" ) # sort inputs diff --git a/deepmd/tf/infer/deep_polar.py b/deepmd/tf/infer/deep_polar.py index c3d42fd537..93b840c714 100644 --- a/deepmd/tf/infer/deep_polar.py +++ b/deepmd/tf/infer/deep_polar.py @@ -5,6 +5,6 @@ ) __all__ = [ - "DeepPolar", "DeepGlobalPolar", + "DeepPolar", ] diff --git a/deepmd/tf/infer/model_devi.py b/deepmd/tf/infer/model_devi.py index 4ee979ac67..e112232f45 100644 --- a/deepmd/tf/infer/model_devi.py +++ b/deepmd/tf/infer/model_devi.py @@ -9,10 +9,10 @@ ) __all__ = [ - "make_model_devi", "calc_model_devi", - "write_model_devi_out", "calc_model_devi_e", "calc_model_devi_f", "calc_model_devi_v", + "make_model_devi", + "write_model_devi_out", ] diff --git a/deepmd/tf/loss/__init__.py b/deepmd/tf/loss/__init__.py index 5098d04abf..0f8b4407de 100644 --- a/deepmd/tf/loss/__init__.py +++ b/deepmd/tf/loss/__init__.py @@ -12,9 +12,9 @@ ) __all__ = [ + "DOSLoss", "EnerDipoleLoss", "EnerSpinLoss", "EnerStdLoss", - "DOSLoss", "TensorLoss", ] diff --git a/deepmd/tf/model/__init__.py b/deepmd/tf/model/__init__.py index 85cc74781d..0a2201ba8f 100644 --- a/deepmd/tf/model/__init__.py +++ b/deepmd/tf/model/__init__.py @@ -26,14 +26,14 @@ ) __all__ = [ - "EnerModel", "DOSModel", "DipoleModel", - "GlobalPolarModel", - "PolarModel", - "WFCModel", + "EnerModel", "FrozenModel", + "GlobalPolarModel", "LinearEnergyModel", "PairTabModel", "PairwiseDPRc", + "PolarModel", + "WFCModel", ] diff --git a/deepmd/tf/model/model_stat.py b/deepmd/tf/model/model_stat.py index db70262d50..96c8b4a4af 100644 --- a/deepmd/tf/model/model_stat.py +++ b/deepmd/tf/model/model_stat.py @@ -8,7 +8,7 @@ ) __all__ = [ + "_make_all_stat_ref", # used by tests "make_stat_input", "merge_sys_stat", - "_make_all_stat_ref", # used by tests ] diff --git a/deepmd/tf/nvnmd/entrypoints/__init__.py b/deepmd/tf/nvnmd/entrypoints/__init__.py index b939d81d39..76f96baf2c 100644 --- a/deepmd/tf/nvnmd/entrypoints/__init__.py +++ b/deepmd/tf/nvnmd/entrypoints/__init__.py @@ -9,4 +9,4 @@ Wrap, ) -__all__ = ["save_weight", "MapTable", "Wrap"] +__all__ = ["MapTable", "Wrap", "save_weight"] diff --git a/deepmd/tf/nvnmd/entrypoints/wrap.py b/deepmd/tf/nvnmd/entrypoints/wrap.py index c4bee0acb5..ced97bdbf1 100755 --- a/deepmd/tf/nvnmd/entrypoints/wrap.py +++ b/deepmd/tf/nvnmd/entrypoints/wrap.py @@ -136,7 +136,7 @@ def wrap(self) -> None: d = e.extend_hex(d, w_full) # DEVELOP_DEBUG if jdata_sys["debug"]: - log.info("%s: %d x % d bit" % (k, h, w * 4)) + log.info(f"{k}: {h} x {w * 4} bit") FioTxt().save(f"nvnmd/wrap/h{k}.txt", d) datas[ii] = d # update h & w of nvnmd_cfg diff --git a/deepmd/tf/nvnmd/fit/ener.py b/deepmd/tf/nvnmd/fit/ener.py index 20adda395c..a4c15077ec 100644 --- a/deepmd/tf/nvnmd/fit/ener.py +++ b/deepmd/tf/nvnmd/fit/ener.py @@ -10,7 +10,7 @@ __all__ = [ "GLOBAL_TF_FLOAT_PRECISION", - "tf", "nvnmd_cfg", "one_layer_nvnmd", + "tf", ] diff --git a/deepmd/tf/nvnmd/utils/__init__.py b/deepmd/tf/nvnmd/utils/__init__.py index b4cb3d4799..a1d9ba322d 100644 --- a/deepmd/tf/nvnmd/utils/__init__.py +++ b/deepmd/tf/nvnmd/utils/__init__.py @@ -25,14 +25,14 @@ ) __all__ = [ - "nvnmd_args", - "nvnmd_cfg", "Encode", "FioBin", "FioDic", "FioTxt", - "one_layer", - "map_nvnmd", "get_filter_weight", "get_fitnet_weight", + "map_nvnmd", + "nvnmd_args", + "nvnmd_cfg", + "one_layer", ] diff --git a/deepmd/tf/nvnmd/utils/config.py b/deepmd/tf/nvnmd/utils/config.py index 8b35a1a2aa..41bd650b06 100644 --- a/deepmd/tf/nvnmd/utils/config.py +++ b/deepmd/tf/nvnmd/utils/config.py @@ -132,7 +132,7 @@ def init_config_by_version(self, version, max_nnei) -> None: r"""Initialize version-dependent parameters.""" self.version = version self.max_nnei = max_nnei - log.debug("#Set nvnmd version as %d " % self.version) + log.debug(f"#Set nvnmd version as {self.version} ") if self.version == 0: if self.max_nnei == 128: self.jdata_deepmd_input = jdata_deepmd_input_v0_ni128.copy() diff --git a/deepmd/tf/train/trainer.py b/deepmd/tf/train/trainer.py index c6affdef7b..1fe72bc5e3 100644 --- a/deepmd/tf/train/trainer.py +++ b/deepmd/tf/train/trainer.py @@ -180,7 +180,7 @@ def build(self, data=None, stop_batch=0, origin_type_map=None, suffix="") -> Non ), "Data in mixed_type format must use ener fitting!" if self.numb_fparam > 0: - log.info("training with %d frame parameter(s)" % self.numb_fparam) + log.info(f"training with {self.numb_fparam} frame parameter(s)") else: log.info("training without frame parameter") @@ -191,13 +191,12 @@ def build(self, data=None, stop_batch=0, origin_type_map=None, suffix="") -> Non single_data = data if self.ntypes < single_data.get_ntypes(): raise ValueError( - "The number of types of the training data is %d, but that of the " - "model is only %d. The latter must be no less than the former. " + f"The number of types of the training data is {single_data.get_ntypes()}, but that of the " + f"model is only {self.ntypes}. The latter must be no less than the former. " "You may need to reset one or both of them. Usually, the former " "is given by `model/type_map` in the training parameter (if set) " "or the maximum number in the training data. The latter is given " "by `model/descriptor/sel` in the training parameter." - % (single_data.get_ntypes(), self.ntypes) ) self.type_map = single_data.get_type_map() self.batch_size = data.get_batch_size() @@ -421,14 +420,12 @@ def train(self, train_data=None, valid_data=None) -> None: is_first_step = True self.cur_batch = cur_batch log.info( - "start training at lr %.2e (== %.2e), decay_step %d, decay_rate %f, final lr will be %.2e" - % ( - run_sess(self.sess, self.learning_rate), - self.lr.value(cur_batch), - self.lr.decay_steps_, - self.lr.decay_rate_, - self.lr.value(stop_batch), - ) + "start training at lr %.2e (== %.2e), decay_step %d, decay_rate %f, final lr will be %.2e", + run_sess(self.sess, self.learning_rate), + self.lr.value(cur_batch), + self.lr.decay_steps_, + self.lr.decay_rate_, + self.lr.value(stop_batch), ) prf_options = None @@ -693,7 +690,7 @@ def valid_on_the_fly( @staticmethod def print_header(fp, train_results, valid_results) -> None: print_str = "" - print_str += "# %5s" % "step" + print_str += "# {:5s}".format("step") if valid_results is not None: prop_fmt = " %11s %11s" for k in train_results.keys(): @@ -702,7 +699,7 @@ def print_header(fp, train_results, valid_results) -> None: prop_fmt = " %11s" for k in train_results.keys(): print_str += prop_fmt % (k + "_trn") - print_str += " %8s\n" % "lr" + print_str += " {:8s}\n".format("lr") print_str += "# If there is no available reference data, rmse_*_{val,trn} will print nan\n" fp.write(print_str) fp.flush() @@ -716,7 +713,7 @@ def print_on_training( cur_lr, ) -> None: print_str = "" - print_str += "%7d" % cur_batch + print_str += f"{cur_batch:7d}" if valid_results is not None: prop_fmt = " %11.2e %11.2e" for k in valid_results.keys(): diff --git a/deepmd/tf/utils/argcheck.py b/deepmd/tf/utils/argcheck.py index caec33c319..162234180a 100644 --- a/deepmd/tf/utils/argcheck.py +++ b/deepmd/tf/utils/argcheck.py @@ -11,10 +11,10 @@ ) __all__ = [ - "list_to_doc", - "normalize", + "gen_args", "gen_doc", "gen_json", - "gen_args", + "list_to_doc", + "normalize", "type_embedding_args", ] diff --git a/deepmd/tf/utils/data_system.py b/deepmd/tf/utils/data_system.py index da0cce28e8..8ca2007414 100644 --- a/deepmd/tf/utils/data_system.py +++ b/deepmd/tf/utils/data_system.py @@ -9,6 +9,6 @@ __all__ = [ "DeepmdDataSystem", - "process_sys_probs", "prob_sys_size_ext", + "process_sys_probs", ] diff --git a/deepmd/tf/utils/errors.py b/deepmd/tf/utils/errors.py index 5f7291c7ce..dcba19d3a3 100644 --- a/deepmd/tf/utils/errors.py +++ b/deepmd/tf/utils/errors.py @@ -13,7 +13,7 @@ class GraphWithoutTensorError(Exception): __all__ = [ - "OutOfMemoryError", "GraphTooLargeError", "GraphWithoutTensorError", + "OutOfMemoryError", ] diff --git a/deepmd/tf/utils/parallel_op.py b/deepmd/tf/utils/parallel_op.py index ce43ea8c15..c23f347dc1 100644 --- a/deepmd/tf/utils/parallel_op.py +++ b/deepmd/tf/utils/parallel_op.py @@ -59,7 +59,7 @@ def __init__( self.placeholders = [] self.ops = [] for ii in range(self.nthreads): - with tf.name_scope("task_%d" % ii) as scope: + with tf.name_scope(f"task_{ii}") as scope: placeholder, op = builder() self.placeholders.append(placeholder) self.ops.append(op) diff --git a/deepmd/tf/utils/path.py b/deepmd/tf/utils/path.py index 67990543ae..54d4da42bf 100644 --- a/deepmd/tf/utils/path.py +++ b/deepmd/tf/utils/path.py @@ -8,7 +8,7 @@ ) __all__ = [ - "DPPath", - "DPOSPath", "DPH5Path", + "DPOSPath", + "DPPath", ] diff --git a/deepmd/tf/utils/plugin.py b/deepmd/tf/utils/plugin.py index f2f0336691..2ed6d7e72e 100644 --- a/deepmd/tf/utils/plugin.py +++ b/deepmd/tf/utils/plugin.py @@ -10,7 +10,7 @@ __all__ = [ "Plugin", - "VariantMeta", - "VariantABCMeta", "PluginVariant", + "VariantABCMeta", + "VariantMeta", ] diff --git a/deepmd/utils/batch_size.py b/deepmd/utils/batch_size.py index c1cbea4cda..516c4d2ead 100644 --- a/deepmd/utils/batch_size.py +++ b/deepmd/utils/batch_size.py @@ -68,7 +68,7 @@ def __init__(self, initial_batch_size: int = 1024, factor: float = 2.0) -> None: log.warning( "You can use the environment variable DP_INFER_BATCH_SIZE to" "control the inference batch size (nframes * natoms). " - "The default value is %d." % initial_batch_size + f"The default value is {initial_batch_size}." ) self.factor = factor @@ -141,8 +141,7 @@ def _adjust_batch_size(self, factor: float) -> None: old_batch_size = self.current_batch_size self.current_batch_size = int(self.current_batch_size * factor) log.info( - "Adjust batch size from %d to %d" - % (old_batch_size, self.current_batch_size) + f"Adjust batch size from {old_batch_size} to {self.current_batch_size}" ) def execute_all( @@ -176,8 +175,7 @@ def execute_with_batch_size( ( vv[start_index:end_index, ...] if ( - array_api_compat.is_array_api_obj(vv) - and vv.ndim > 1 + (array_api_compat.is_array_api_obj(vv) and vv.ndim > 1) or str(vv.__class__) == "" ) else vv @@ -188,8 +186,7 @@ def execute_with_batch_size( kk: ( vv[start_index:end_index, ...] if ( - array_api_compat.is_array_api_obj(vv) - and vv.ndim > 1 + (array_api_compat.is_array_api_obj(vv) and vv.ndim > 1) or str(vv.__class__) == "" ) else vv diff --git a/deepmd/utils/data.py b/deepmd/utils/data.py index b93356bdbf..d87117fa24 100644 --- a/deepmd/utils/data.py +++ b/deepmd/utils/data.py @@ -789,10 +789,10 @@ def __getitem__(self, key: str): raise KeyError(key) return self.dict[key] - def __eq__(self, __value: object) -> bool: - if not isinstance(__value, DataRequirementItem): + def __eq__(self, value: object, /) -> bool: + if not isinstance(value, DataRequirementItem): return False - return self.dict == __value.dict + return self.dict == value.dict def __repr__(self) -> str: return f"DataRequirementItem({self.dict})" diff --git a/deepmd/utils/data_system.py b/deepmd/utils/data_system.py index 0e960d0ba1..a67047a034 100644 --- a/deepmd/utils/data_system.py +++ b/deepmd/utils/data_system.py @@ -216,19 +216,12 @@ def __init__( chk_ret = self.data_systems[ii].check_batch_size(self.batch_size[ii]) if chk_ret is not None and not is_auto_bs and not self.mixed_systems: warnings.warn( - "system %s required batch size is larger than the size of the dataset %s (%d > %d)" - % ( - self.system_dirs[ii], - chk_ret[0], - self.batch_size[ii], - chk_ret[1], - ) + f"system {self.system_dirs[ii]} required batch size is larger than the size of the dataset {chk_ret[0]} ({self.batch_size[ii]} > {chk_ret[1]})" ) chk_ret = self.data_systems[ii].check_test_size(self.test_size[ii]) if chk_ret is not None and not is_auto_bs and not self.mixed_systems: warnings.warn( - "system %s required test size is larger than the size of the dataset %s (%d > %d)" - % (self.system_dirs[ii], chk_ret[0], self.test_size[ii], chk_ret[1]) + f"system {self.system_dirs[ii]} required test size is larger than the size of the dataset {chk_ret[0]} ({self.test_size[ii]} > {chk_ret[1]})" ) def _load_test(self, ntests=-1) -> None: @@ -671,22 +664,25 @@ def print_summary( log.info( f"---Summary of DataSystem: {name:13s}-----------------------------------------------" ) - log.info("found %d system(s):" % nsystems) + log.info("found %d system(s):", nsystems) log.info( - ("{} ".format(_format_name_length("system", sys_width))) - + ("%6s %6s %6s %9s %3s" % ("natoms", "bch_sz", "n_bch", "prob", "pbc")) + "%s %6s %6s %6s %9s %3s", + _format_name_length("system", sys_width), + "natoms", + "bch_sz", + "n_bch", + "prob", + "pbc", ) for ii in range(nsystems): log.info( - "%s %6d %6d %6d %9.3e %3s" - % ( - _format_name_length(system_dirs[ii], sys_width), - natoms[ii], - batch_size[ii], - nbatches[ii], - sys_probs[ii], - "T" if pbc[ii] else "F", - ) + "%s %6d %6d %6d %9.3e %3s", + _format_name_length(system_dirs[ii], sys_width), + natoms[ii], + batch_size[ii], + nbatches[ii], + sys_probs[ii], + "T" if pbc[ii] else "F", ) log.info( "--------------------------------------------------------------------------------------" diff --git a/deepmd/utils/econf_embd.py b/deepmd/utils/econf_embd.py index ce0da9e8fe..5ff136b373 100644 --- a/deepmd/utils/econf_embd.py +++ b/deepmd/utils/econf_embd.py @@ -7,8 +7,8 @@ __all__ = [ "electronic_configuration_embedding", - "normalized_electronic_configuration_embedding", "make_econf_embedding", + "normalized_electronic_configuration_embedding", "transform_to_spin_rep", ] diff --git a/deepmd/utils/pair_tab.py b/deepmd/utils/pair_tab.py index 89f66cc994..05c68a5d67 100644 --- a/deepmd/utils/pair_tab.py +++ b/deepmd/utils/pair_tab.py @@ -64,10 +64,9 @@ def reinit(self, filename: str, rcut: Optional[float] = None) -> None: ncol = self.vdata.shape[1] - 1 n0 = (-1 + np.sqrt(1 + 8 * ncol)) * 0.5 self.ntypes = int(n0 + 0.1) - assert self.ntypes * (self.ntypes + 1) // 2 == ncol, ( - "number of volumes provided in %s does not match guessed number of types %d" - % (filename, self.ntypes) - ) + assert ( + self.ntypes * (self.ntypes + 1) // 2 == ncol + ), f"number of volumes provided in {filename} does not match guessed number of types {self.ntypes}" # check table data against rcut and update tab_file if needed, table upper boundary is used as rcut if not provided. self.rcut = rcut if rcut is not None else self.rmax diff --git a/deepmd/utils/update_sel.py b/deepmd/utils/update_sel.py index c47a1d2fc9..c9213de699 100644 --- a/deepmd/utils/update_sel.py +++ b/deepmd/utils/update_sel.py @@ -49,9 +49,9 @@ def update_one_sel( # we may skip warning for sel=0, where the user is likely # to exclude such type in the descriptor log.warning( - "sel of type %d is not enough! The expected value is " - "not less than %d, but you set it to %d. The accuracy" - " of your model may get worse." % (ii, tt, dd) + f"sel of type {ii} is not enough! The expected value is " + f"not less than {tt}, but you set it to {dd}. The accuracy" + " of your model may get worse." ) return min_nbor_dist, sel diff --git a/doc/conf.py b/doc/conf.py index eca7665712..b266126c58 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -28,9 +28,7 @@ # -- Project information ----------------------------------------------------- project = "DeePMD-kit" -copyright = ( - "2017-%d, DeepModeling" % datetime.datetime.now(tz=datetime.timezone.utc).year -) +copyright = f"2017-{datetime.datetime.now(tz=datetime.timezone.utc).year}, DeepModeling" author = "DeepModeling" autoapi_dirs = ["../deepmd"] diff --git a/doc/getting-started/quick_start.ipynb b/doc/getting-started/quick_start.ipynb index ba8ac892bb..2ddb6a195e 100644 --- a/doc/getting-started/quick_start.ipynb +++ b/doc/getting-started/quick_start.ipynb @@ -1,5 +1,5 @@ { - "cells": [ + "cells": [ { "cell_type": "markdown", "id": "b22f597d-ec17-4ab9-8933-28e92af2438d", @@ -278,7 +278,7 @@ "\n", "# load data of abacus/md format\n", "data = dpdata.LabeledSystem(\"DeePMD-kit_Tutorial/00.data/abacus_md\", fmt=\"abacus/md\")\n", - "print(\"# the data contains %d frames\" % len(data))\n", + "print(f\"# the data contains {len(data)} frames\")\n", "\n", "# random choose 40 index for validation_data\n", "rng = np.random.default_rng()\n", @@ -295,8 +295,8 @@ "# all validation data put into directory:\"validation_data\"\n", "data_validation.to_deepmd_npy(\"DeePMD-kit_Tutorial/00.data/validation_data\")\n", "\n", - "print(\"# the training data contains %d frames\" % len(data_training))\n", - "print(\"# the validation data contains %d frames\" % len(data_validation))" + "print(f\"# the training data contains {len(data_training)} frames\")\n", + "print(f\"# the validation data contains {len(data_validation)} frames\")" ] }, { diff --git a/source/install/build_tf.py b/source/install/build_tf.py index d49458c032..d4fcc357e7 100755 --- a/source/install/build_tf.py +++ b/source/install/build_tf.py @@ -381,7 +381,7 @@ def call(commands: list[str], env={}, **kwargs) -> None: if exit_code: raise RuntimeError( - "Run %s failed, return code: %d" % (" ".join(commands), exit_code) + "Run {} failed, return code: {}".format(" ".join(commands), exit_code) ) @@ -791,7 +791,7 @@ def _build_opts(self) -> list[str]: "--config=opt", "--config=noaws", "--copt=-mtune=generic", - "--local_cpu_resources=%d" % CPU_COUNT, + f"--local_cpu_resources={CPU_COUNT}", ] if self.enable_mkl: # enable oneDNN diff --git a/source/lmp/tests/write_lmp_data.py b/source/lmp/tests/write_lmp_data.py index 747b42160d..e84fc481f0 100644 --- a/source/lmp/tests/write_lmp_data.py +++ b/source/lmp/tests/write_lmp_data.py @@ -9,16 +9,15 @@ def write_lmp_data(box, coord, type_list, file_name) -> None: ntype = np.unique(type_list).shape[0] with open(file_name, "w") as f: f.write(comment_lmp_data + "\n") - f.write("%d atoms\n" % (natom)) - f.write("%d atom types\n" % (ntype)) + f.write(f"{natom} atoms\n") + f.write(f"{ntype} atom types\n") f.write(f"{box[0]:.10e} {box[1]:.10e} xlo xhi\n") f.write(f"{box[2]:.10e} {box[3]:.10e} ylo yhi\n") f.write(f"{box[4]:.10e} {box[5]:.10e} zlo zhi\n") f.write(f"{box[6]:.10e} {box[7]:.10e} {box[8]:.10e} xy xz yz\n\nAtoms\n\n") for i in range(natom): f.write( - "%d %d %.10e %.10e %.10e\n" - % (i + 1, type_list[i], coord[i][0], coord[i][1], coord[i][2]) + f"{i + 1} {type_list[i]} {coord[i][0]:.10e} {coord[i][1]:.10e} {coord[i][2]:.10e}\n" ) f.write("\n") @@ -34,10 +33,10 @@ def write_lmp_data_full( nbond_list[i] = len(bond_list[i]) with open(file_name, "w") as f: f.write(comment_lmp_data + "\n") - f.write("%d atoms\n" % (natom)) - f.write("%d atom types\n" % (ntype)) - f.write("%d bonds\n" % (nbond_list.sum())) - f.write("%d bond types\n" % (nbond_type)) + f.write(f"{natom} atoms\n") + f.write(f"{ntype} atom types\n") + f.write(f"{nbond_list.sum()} bonds\n") + f.write(f"{nbond_type} bond types\n") f.write(f"{box[0]:.10e} {box[1]:.10e} xlo xhi\n") f.write(f"{box[2]:.10e} {box[3]:.10e} ylo yhi\n") f.write(f"{box[4]:.10e} {box[5]:.10e} zlo zhi\n") @@ -48,16 +47,7 @@ def write_lmp_data_full( f.write("\nAtoms\n\n") for i in range(natom): f.write( - "%d %d %d %.10e %.10e %.10e %.10e\n" - % ( - i + 1, - mol_list[i], - type_list[i], - charge[i], - coord[i][0], - coord[i][1], - coord[i][2], - ) + f"{i + 1} {mol_list[i]} {type_list[i]} {charge[i]:.10e} {coord[i][0]:.10e} {coord[i][1]:.10e} {coord[i][2]:.10e}\n" ) f.write("\nBonds\n\n") bond_count = 0 @@ -65,8 +55,7 @@ def write_lmp_data_full( for j in range(nbond_list[i]): bond_count += 1 f.write( - "%d %d %d %d\n" - % (bond_count, i + 1, bond_list[i][j][0], bond_list[i][j][1]) + f"{bond_count} {i + 1} {bond_list[i][j][0]} {bond_list[i][j][1]}\n" ) f.write("\n") @@ -79,25 +68,14 @@ def write_lmp_data_spin(box, coord, spin, type_list, file_name) -> None: sp_unit = np.where(sp_norm == 0, 1, sp_unit) with open(file_name, "w") as f: f.write(comment_lmp_data + "\n") - f.write("%d atoms\n" % (natom)) - f.write("%d atom types\n" % (ntype)) + f.write(f"{natom} atoms\n") + f.write(f"{ntype} atom types\n") f.write(f"{box[0]:.10e} {box[1]:.10e} xlo xhi\n") f.write(f"{box[2]:.10e} {box[3]:.10e} ylo yhi\n") f.write(f"{box[4]:.10e} {box[5]:.10e} zlo zhi\n") f.write(f"{box[6]:.10e} {box[7]:.10e} {box[8]:.10e} xy xz yz\n\nAtoms\n\n") for i in range(natom): f.write( - "%d %d %.10e %.10e %.10e %.10e %.10e %.10e %.10e\n" - % ( - i + 1, - type_list[i], - coord[i][0], - coord[i][1], - coord[i][2], - sp_unit[i][0], - sp_unit[i][1], - sp_unit[i][2], - sp_norm[i][0], - ) + f"{i + 1} {type_list[i]} {coord[i][0]:.10e} {coord[i][1]:.10e} {coord[i][2]:.10e} {sp_unit[i][0]:.10e} {sp_unit[i][1]:.10e} {sp_unit[i][2]:.10e} {sp_norm[i][0]:.10e}\n" ) f.write("\n") diff --git a/source/tests/array_api_strict/descriptor/__init__.py b/source/tests/array_api_strict/descriptor/__init__.py index 5667fed858..bd778e364d 100644 --- a/source/tests/array_api_strict/descriptor/__init__.py +++ b/source/tests/array_api_strict/descriptor/__init__.py @@ -13,8 +13,8 @@ ) __all__ = [ - "DescrptSeA", - "DescrptSeR", "DescrptDPA1", "DescrptHybrid", + "DescrptSeA", + "DescrptSeR", ] diff --git a/source/tests/consistent/common.py b/source/tests/consistent/common.py index a08e849c6c..7ee8477a3b 100644 --- a/source/tests/consistent/common.py +++ b/source/tests/consistent/common.py @@ -63,12 +63,13 @@ __all__ = [ - "CommonTest", - "INSTALLED_TF", - "INSTALLED_PT", + "INSTALLED_ARRAY_API_STRICT", "INSTALLED_JAX", "INSTALLED_PD", - "INSTALLED_ARRAY_API_STRICT", + "INSTALLED_PT", + "INSTALLED_TF", + "CommonTest", + "CommonTest", ] SKIP_FLAG = object() diff --git a/source/tests/pd/model/test_model.py b/source/tests/pd/model/test_model.py index 2566a9ce41..ce91fd3f21 100644 --- a/source/tests/pd/model/test_model.py +++ b/source/tests/pd/model/test_model.py @@ -71,14 +71,14 @@ def paddle2tf(paddle_name, last_layer_id=None): return None layer_id = int(fields[4 + offset]) + 1 weight_type = fields[5 + offset] - ret = "filter_type_all/%s_%d_%d:0" % (weight_type, layer_id, element_id) + ret = f"filter_type_all/{weight_type}_{layer_id}_{element_id}:0" elif fields[1] == "fitting_net": layer_id = int(fields[4 + offset]) weight_type = fields[5 + offset] if layer_id != last_layer_id: - ret = "layer_%d_type_%d/%s:0" % (layer_id, element_id, weight_type) + ret = f"layer_{layer_id}_type_{element_id}/{weight_type}:0" else: - ret = "final_layer_type_%d/%s:0" % (element_id, weight_type) + ret = f"final_layer_type_{element_id}/{weight_type}:0" else: raise RuntimeError(f"Unexpected parameter name: {paddle_name}") return ret diff --git a/source/tests/pt/model/test_model.py b/source/tests/pt/model/test_model.py index e0fb850ca8..eee0e9beef 100644 --- a/source/tests/pt/model/test_model.py +++ b/source/tests/pt/model/test_model.py @@ -71,14 +71,14 @@ def torch2tf(torch_name, last_layer_id=None): return None layer_id = int(fields[4 + offset]) + 1 weight_type = fields[5 + offset] - ret = "filter_type_all/%s_%d_%d:0" % (weight_type, layer_id, element_id) + ret = f"filter_type_all/{weight_type}_{layer_id}_{element_id}:0" elif fields[1] == "fitting_net": layer_id = int(fields[4 + offset]) weight_type = fields[5 + offset] if layer_id != last_layer_id: - ret = "layer_%d_type_%d/%s:0" % (layer_id, element_id, weight_type) + ret = f"layer_{layer_id}_type_{element_id}/{weight_type}:0" else: - ret = "final_layer_type_%d/%s:0" % (element_id, weight_type) + ret = f"final_layer_type_{element_id}/{weight_type}:0" else: raise RuntimeError(f"Unexpected parameter name: {torch_name}") return ret diff --git a/source/tests/tf/common.py b/source/tests/tf/common.py index 7e93af94c8..6095af5ebf 100644 --- a/source/tests/tf/common.py +++ b/source/tests/tf/common.py @@ -340,7 +340,7 @@ def force_test( c_force, force[0, idx * 3 + dd], places=places, - msg="force component [%d,%d] failed" % (idx, dd), + msg=f"force component [{idx},{dd}] failed", ) @@ -962,19 +962,12 @@ def __init__( chk_ret = self.data_systems[ii].check_batch_size(self.batch_size[ii]) if chk_ret is not None: raise RuntimeError( - "system %s required batch size %d is larger than the size %d of the dataset %s" - % ( - self.system_dirs[ii], - self.batch_size[ii], - chk_ret[1], - chk_ret[0], - ) + f"system {self.system_dirs[ii]} required batch size {self.batch_size[ii]} is larger than the size {chk_ret[1]} of the dataset {chk_ret[0]}" ) chk_ret = self.data_systems[ii].check_test_size(test_size) if chk_ret is not None: warnings.warn( - "WARNING: system %s required test size %d is larger than the size %d of the dataset %s" - % (self.system_dirs[ii], test_size, chk_ret[1], chk_ret[0]) + f"WARNING: system {self.system_dirs[ii]} required test size {test_size} is larger than the size {chk_ret[1]} of the dataset {chk_ret[0]}" ) if run_opt is not None: @@ -1026,16 +1019,11 @@ def print_summary(self) -> None: # width 65 sys_width = 42 tmp_msg += "---Summary of DataSystem-----------------------------------------\n" - tmp_msg += "find %d system(s):\n" % self.nsystems + tmp_msg += f"find {self.nsystems} system(s):\n" tmp_msg += "{} ".format(self.format_name_length("system", sys_width)) tmp_msg += "{} {} {}\n".format("natoms", "bch_sz", "n_bch") for ii in range(self.nsystems): - tmp_msg += "%s %6d %6d %5d\n" % ( - self.format_name_length(self.system_dirs[ii], sys_width), - self.natoms[ii], - self.batch_size[ii], - self.nbatches[ii], - ) + tmp_msg += f"{self.format_name_length(self.system_dirs[ii], sys_width)} {self.natoms[ii]:6d} {self.batch_size[ii]:6d} {self.nbatches[ii]:5d}\n" tmp_msg += "-----------------------------------------------------------------\n" # log.info(tmp_msg) diff --git a/source/tests/tf/test_data_modifier.py b/source/tests/tf/test_data_modifier.py index 12471f3817..db11fa5c2d 100644 --- a/source/tests/tf/test_data_modifier.py +++ b/source/tests/tf/test_data_modifier.py @@ -128,7 +128,7 @@ def _test_fv(self) -> None: vf[:, ii].ravel(), num_f.ravel(), places, - err_msg="dof %d does not match" % (ii), + err_msg=f"dof {ii} does not match", ) box3 = np.reshape(box, [nframes, 3, 3]) diff --git a/source/tests/tf/test_deepmd_data_sys.py b/source/tests/tf/test_deepmd_data_sys.py index 19abab1d21..9709e80097 100644 --- a/source/tests/tf/test_deepmd_data_sys.py +++ b/source/tests/tf/test_deepmd_data_sys.py @@ -37,12 +37,12 @@ def setUp(self) -> None: self.sys_name = [] self.nset = 3 for ii in range(self.nsys): - sys_name = "sys_%d" % ii + sys_name = f"sys_{ii}" self.sys_name.append(sys_name) os.makedirs(sys_name, exist_ok=True) np.savetxt(os.path.join(sys_name, "type.raw"), self.atom_type[ii], fmt="%d") for jj in range(self.nset): - set_name = os.path.join(sys_name, "set.%03d" % jj) + set_name = os.path.join(sys_name, f"set.{jj:03d}") os.makedirs(set_name, exist_ok=True) path = os.path.join(set_name, "coord.npy") val = rng.random([self.nframes[ii] + jj, self.natoms[ii] * 3]) @@ -58,7 +58,7 @@ def setUp(self) -> None: def tearDown(self) -> None: for ii in range(self.nsys): - sys_name = "sys_%d" % ii + sys_name = f"sys_{ii}" shutil.rmtree(sys_name) def test_ntypes(self) -> None: @@ -474,7 +474,7 @@ def _in_array(self, target, idx_map, ndof, array) -> None: find = True all_find.append(find) for idx, ii in enumerate(all_find): - self.assertTrue(ii, msg="does not find frame %d in array" % idx) + self.assertTrue(ii, msg=f"does not find frame {idx} in array") def test_sys_prob_floating_point_error(self) -> None: # test floating point error; See #1917 diff --git a/source/tests/tf/test_ewald.py b/source/tests/tf/test_ewald.py index c108c44fe7..270546fbc8 100644 --- a/source/tests/tf/test_ewald.py +++ b/source/tests/tf/test_ewald.py @@ -143,7 +143,7 @@ def test_force(self) -> None: c_force, force[:, idx * 3 + dd], places, - err_msg="force component [%d,%d] failed" % (idx, dd), + err_msg=f"force component [{idx},{dd}] failed", ) def test_virial(self) -> None: diff --git a/source/tests/tf/test_gen_stat_data.py b/source/tests/tf/test_gen_stat_data.py index d0fc3b6b3d..a49fe72f11 100644 --- a/source/tests/tf/test_gen_stat_data.py +++ b/source/tests/tf/test_gen_stat_data.py @@ -39,7 +39,7 @@ def gen_sys(nframes, atom_types): data["atom_names"] = [] data["atom_numbs"] = [] for ii in range(len(types)): - data["atom_names"].append("TYPE_%d" % ii) + data["atom_names"].append(f"TYPE_{ii}") data["atom_numbs"].append(np.sum(atom_types == ii)) data["atom_types"] = np.array(atom_types, dtype=int) return data From 03c6e49f4718f2dcb1bc9eadccc4e3c3f3b54e09 Mon Sep 17 00:00:00 2001 From: Chun Cai Date: Sat, 30 Nov 2024 11:05:20 +0800 Subject: [PATCH 12/43] chore: refactor training loop (#4435) ## Summary by CodeRabbit - **New Features** - Enhanced training loop to support multi-task training, allowing for more flexible model selection. - **Improvements** - Streamlined `step` function to accept only the step ID, simplifying its usage. - Adjusted logging and model saving mechanisms for consistency with the new training flow. - Improved random seed management for enhanced reproducibility in data processing. - Enhanced error handling in data retrieval to ensure seamless operation during data loading. - Added type hints for better clarity in data loader attributes. --------- Signed-off-by: Chun Cai Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- deepmd/pt/train/training.py | 26 ++++++++------------------ deepmd/pt/utils/dataloader.py | 2 ++ 2 files changed, 10 insertions(+), 18 deletions(-) diff --git a/deepmd/pt/train/training.py b/deepmd/pt/train/training.py index 9e6f92b06d..72e84d577a 100644 --- a/deepmd/pt/train/training.py +++ b/deepmd/pt/train/training.py @@ -653,6 +653,12 @@ def run(self) -> None: prof.start() def step(_step_id, task_key="Default") -> None: + if self.multi_task: + model_index = dp_random.choice( + np.arange(self.num_model, dtype=np.int_), + p=self.model_prob, + ) + task_key = self.model_keys[model_index] # PyTorch Profiler if self.enable_profiler or self.profiling: prof.step() @@ -929,24 +935,8 @@ def log_loss_valid(_task_key="Default"): self.wrapper.train() self.t0 = time.time() self.total_train_time = 0.0 - for step_id in range(self.num_steps): - if step_id < self.start_step: - continue - if self.multi_task: - chosen_index_list = dp_random.choice( - np.arange( - self.num_model, dtype=np.int32 - ), # int32 should be enough for # models... - p=np.array(self.model_prob), - size=self.world_size, - replace=True, - ) - assert chosen_index_list.size == self.world_size - model_index = chosen_index_list[self.rank] - model_key = self.model_keys[model_index] - else: - model_key = "Default" - step(step_id, model_key) + for step_id in range(self.start_step, self.num_steps): + step(step_id) if JIT: break diff --git a/deepmd/pt/utils/dataloader.py b/deepmd/pt/utils/dataloader.py index 2fea6b72d2..d33b17b035 100644 --- a/deepmd/pt/utils/dataloader.py +++ b/deepmd/pt/utils/dataloader.py @@ -28,6 +28,7 @@ ) from deepmd.pt.utils import ( + dp_random, env, ) from deepmd.pt.utils.dataset import ( @@ -50,6 +51,7 @@ def setup_seed(seed) -> None: torch.manual_seed(seed) torch.cuda.manual_seed_all(seed) torch.backends.cudnn.deterministic = True + dp_random.seed(seed) class DpLoaderSet(Dataset): From db0a2a3048706f94b9f9376b1ae109cb40806c7c Mon Sep 17 00:00:00 2001 From: Jinzhe Zeng Date: Sat, 30 Nov 2024 14:48:13 -0500 Subject: [PATCH 13/43] chore(tests): ensure the same result of frame 0 and 1 (#4442) Copied from https://github.com/njzjz/deepmd-gnn/pull/27. ## Summary by CodeRabbit - **Bug Fixes** - Enhanced the robustness of the testing framework to ensure consistent model output across multiple frames of input data. - Added assertions to validate output equivalence for the first and second frames. - **Tests** - Adjusted the testing methods to accommodate changes in input dimensionality and ensure proper validation of model behavior. --------- Signed-off-by: Jinzhe Zeng --- .../universal/common/cases/model/utils.py | 23 +++++++++++++------ 1 file changed, 16 insertions(+), 7 deletions(-) diff --git a/source/tests/universal/common/cases/model/utils.py b/source/tests/universal/common/cases/model/utils.py index 87c0209169..8fe6a131ef 100644 --- a/source/tests/universal/common/cases/model/utils.py +++ b/source/tests/universal/common/cases/model/utils.py @@ -119,7 +119,7 @@ def test_has_message_passing(self) -> None: def test_forward(self) -> None: """Test forward and forward_lower.""" test_spin = getattr(self, "test_spin", False) - nf = 1 + nf = 2 natoms = 5 aprec = ( 0 @@ -127,10 +127,10 @@ def test_forward(self) -> None: else self.aprec_dict["test_forward"] ) rng = np.random.default_rng(GLOBAL_SEED) - coord = 4.0 * rng.random([natoms, 3]).reshape([nf, -1]) - atype = np.array([0, 0, 0, 1, 1], dtype=int).reshape([nf, -1]) - spin = 0.5 * rng.random([natoms, 3]).reshape([nf, -1]) - cell = 6.0 * np.eye(3).reshape([nf, 9]) + coord = 4.0 * rng.random([1, natoms, 3]).repeat(nf, 0).reshape([nf, -1]) + atype = np.array([[0, 0, 0, 1, 1] * nf], dtype=int).reshape([nf, -1]) + spin = 0.5 * rng.random([1, natoms, 3]).repeat(nf, 0).reshape([nf, -1]) + cell = 6.0 * np.repeat(np.eye(3)[None, ...], nf, axis=0).reshape([nf, 9]) coord_ext, atype_ext, mapping, nlist = extend_input_and_build_neighbor_list( coord, atype, @@ -147,9 +147,9 @@ def test_forward(self) -> None: aparam = None fparam = None if self.module.get_dim_aparam() > 0: - aparam = rng.random([nf, natoms, self.module.get_dim_aparam()]) + aparam = rng.random([1, natoms, self.module.get_dim_aparam()]).repeat(nf, 0) if self.module.get_dim_fparam() > 0: - fparam = rng.random([nf, self.module.get_dim_fparam()]) + fparam = rng.random([1, self.module.get_dim_fparam()]).repeat(nf, 0) ret = [] ret_lower = [] for module in self.modules_to_test: @@ -183,6 +183,15 @@ def test_forward(self) -> None: ret_lower.append(module.forward_lower(**input_dict_lower)) for kk in ret[0]: + # ensure the first frame and the second frame are the same + if ret[0][kk] is not None: + np.testing.assert_allclose( + ret[0][kk][0], + ret[0][kk][1], + err_msg=f"compare {kk} between frame 0 and 1", + atol=aprec, + ) + subret = [] for rr in ret: if rr is not None: From 9b70351327b8657ca198053720fb6931611659df Mon Sep 17 00:00:00 2001 From: Jinzhe Zeng Date: Sun, 1 Dec 2024 21:48:10 -0500 Subject: [PATCH 14/43] fix(tf): pass `type_one_side` & `exclude_types` to `DPTabulate` in `se_r` (#4446) Fix #4445. * Modify `DPTabulate` instance creation to include `self.type_one_side` and `self.exclude_types` ## Summary by CodeRabbit - **New Features** - Enhanced configurability for the `DescrptSeR` class, allowing users to customize compression behavior with new parameters. - Introduced optional parameters for improved management of atom types and interactions during the embedding process. - **Bug Fixes** - Added validation for excluded types to ensure proper handling within the compression logic. --- deepmd/tf/descriptor/se_r.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/deepmd/tf/descriptor/se_r.py b/deepmd/tf/descriptor/se_r.py index d74b6df9ee..4cc867cf0d 100644 --- a/deepmd/tf/descriptor/se_r.py +++ b/deepmd/tf/descriptor/se_r.py @@ -356,6 +356,8 @@ def enable_compression( self.filter_neuron, graph, graph_def, + type_one_side=self.type_one_side, + exclude_types=self.exclude_types, activation_fn=self.filter_activation_fn, suffix=suffix, ) From 3917cf0be777f548b9bc4da63af97a78e8e9e952 Mon Sep 17 00:00:00 2001 From: Chun Cai Date: Tue, 3 Dec 2024 03:39:56 +0800 Subject: [PATCH 15/43] Perf: remove redundant checks on data integrity (#4433) Systems are aggregated here https://github.com/deepmodeling/deepmd-kit/blob/f343a3b212edab5525502e0261f3068c0b6fb1f6/deepmd/utils/data_system.py#L802 and later initialized here https://github.com/deepmodeling/deepmd-kit/blob/f343a3b212edab5525502e0261f3068c0b6fb1f6/deepmd/utils/data_system.py#L809-L810 This process will instantiate `DeepmdData` class, and it will perform data integrity checks https://github.com/deepmodeling/deepmd-kit/blob/e695a91ca6f7a1c9c830ab1c58b7b7a05db3da23/deepmd/utils/data.py#L80-L82 Besides, the checking process enumerates all items for all ranks, which is unnecessary and quite slow. So this PR removes this check. ## Summary by CodeRabbit - **New Features** - Enhanced flexibility in defining test sizes by allowing percentage input for the `test_size` parameter. - Introduced a new method to automatically compute test sizes based on the specified percentage of total data. - Improved path handling to accept both string and Path inputs, enhancing usability. - **Bug Fixes** - Improved error handling for invalid paths, ensuring users receive clear feedback when files are not found. - **Deprecation Notice** - The `get_test` method is now deprecated, with new logic implemented for loading test data when necessary. --------- Signed-off-by: Chun Cai Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Jinzhe Zeng Co-authored-by: coderabbitai[bot] <136622811+coderabbitai[bot]@users.noreply.github.com> --- deepmd/utils/data.py | 2 ++ deepmd/utils/data_system.py | 22 ++-------------------- deepmd/utils/path.py | 12 ++++++------ 3 files changed, 10 insertions(+), 26 deletions(-) diff --git a/deepmd/utils/data.py b/deepmd/utils/data.py index d87117fa24..3d74c72bda 100644 --- a/deepmd/utils/data.py +++ b/deepmd/utils/data.py @@ -60,6 +60,8 @@ def __init__( ) -> None: """Constructor.""" root = DPPath(sys_path) + if not root.is_dir(): + raise FileNotFoundError(f"System {sys_path} is not found!") self.dirs = root.glob(set_prefix + ".*") if not len(self.dirs): raise FileNotFoundError(f"No {set_prefix}.* is found in {sys_path}") diff --git a/deepmd/utils/data_system.py b/deepmd/utils/data_system.py index a67047a034..439c6d7d09 100644 --- a/deepmd/utils/data_system.py +++ b/deepmd/utils/data_system.py @@ -28,9 +28,6 @@ from deepmd.utils.out_stat import ( compute_stats_from_redu, ) -from deepmd.utils.path import ( - DPPath, -) log = logging.getLogger(__name__) @@ -103,6 +100,8 @@ def __init__( del rcut self.system_dirs = systems self.nsystems = len(self.system_dirs) + if self.nsystems <= 0: + raise ValueError("No systems provided") self.data_systems = [] for ii in self.system_dirs: self.data_systems.append( @@ -751,23 +750,6 @@ def process_systems(systems: Union[str, list[str]]) -> list[str]: systems = expand_sys_str(systems) elif isinstance(systems, list): systems = systems.copy() - help_msg = "Please check your setting for data systems" - # check length of systems - if len(systems) == 0: - msg = "cannot find valid a data system" - log.fatal(msg) - raise OSError(msg, help_msg) - # roughly check all items in systems are valid - for ii in systems: - ii = DPPath(ii) - if not ii.is_dir(): - msg = f"dir {ii} is not a valid dir" - log.fatal(msg) - raise OSError(msg, help_msg) - if not (ii / "type.raw").is_file(): - msg = f"dir {ii} is not a valid data system dir" - log.fatal(msg) - raise OSError(msg, help_msg) return systems diff --git a/deepmd/utils/path.py b/deepmd/utils/path.py index 6c52caac1d..c542ccf661 100644 --- a/deepmd/utils/path.py +++ b/deepmd/utils/path.py @@ -14,6 +14,7 @@ from typing import ( ClassVar, Optional, + Union, ) import h5py @@ -157,19 +158,16 @@ class DPOSPath(DPPath): Parameters ---------- - path : str + path : Union[str, Path] path mode : str, optional mode, by default "r" """ - def __init__(self, path: str, mode: str = "r") -> None: + def __init__(self, path: Union[str, Path], mode: str = "r") -> None: super().__init__() self.mode = mode - if isinstance(path, Path): - self.path = path - else: - self.path = Path(path) + self.path = Path(path) def load_numpy(self) -> np.ndarray: """Load NumPy array. @@ -300,6 +298,8 @@ def __init__(self, path: str, mode: str = "r") -> None: # so we do not support file names containing #... s = path.split("#") self.root_path = s[0] + if not os.path.isfile(self.root_path): + raise FileNotFoundError(f"{self.root_path} not found") self.root = self._load_h5py(s[0], mode) # h5 path: default is the root path self._name = s[1] if len(s) > 1 else "/" From ce9aeb348932ffcbe9b1482300d57227061b529c Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 3 Dec 2024 12:06:56 +0800 Subject: [PATCH 16/43] [pre-commit.ci] pre-commit autoupdate (#4449) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/astral-sh/ruff-pre-commit: v0.8.0 → v0.8.1](https://github.com/astral-sh/ruff-pre-commit/compare/v0.8.0...v0.8.1) - [github.com/scop/pre-commit-shfmt: v3.10.0-1 → v3.10.0-2](https://github.com/scop/pre-commit-shfmt/compare/v3.10.0-1...v3.10.0-2) - https://github.com/pylint-dev/pylint/: v3.3.1 → v3.3.2 Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index df101ff67b..b52d517d82 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -29,7 +29,7 @@ repos: exclude: ^source/3rdparty - repo: https://github.com/astral-sh/ruff-pre-commit # Ruff version. - rev: v0.8.0 + rev: v0.8.1 hooks: - id: ruff args: ["--fix"] @@ -74,7 +74,7 @@ repos: exclude: ^(source/3rdparty|\.github/workflows|\.clang-format) # Shell - repo: https://github.com/scop/pre-commit-shfmt - rev: v3.10.0-1 + rev: v3.10.0-2 hooks: - id: shfmt # CMake @@ -154,7 +154,7 @@ repos: exclude: .pre-commit-config.yaml|source/lmp # customized pylint rules - repo: https://github.com/pylint-dev/pylint/ - rev: v3.3.1 + rev: v3.3.2 hooks: - id: pylint entry: env PYTHONPATH=source/checker pylint From b4ade5ceac0272443b94b7469393f6ba49b4e21c Mon Sep 17 00:00:00 2001 From: Chun Cai Date: Mon, 9 Dec 2024 08:42:19 +0800 Subject: [PATCH 17/43] refactor: simplify dataset construction (#4437) ## Summary by CodeRabbit - **New Features** - Introduced a new function for dataset construction, enhancing data loading processes. - Added a method to improve pickling and unpickling capabilities for path handling classes. - **Bug Fixes** - Updated summary printing to prevent redundant output during distributed training. - **Refactor** - Simplified initialization of the BackgroundConsumer class. - Streamlined consumer thread and queue handling in the BufferedIterator class. --------- Signed-off-by: Chun Cai Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Han Wang <92130845+wanghan-iapcm@users.noreply.github.com> --- deepmd/pt/utils/dataloader.py | 65 +++++++++++++---------------------- deepmd/utils/path.py | 10 ++++++ 2 files changed, 34 insertions(+), 41 deletions(-) diff --git a/deepmd/pt/utils/dataloader.py b/deepmd/pt/utils/dataloader.py index d33b17b035..67e5195f6d 100644 --- a/deepmd/pt/utils/dataloader.py +++ b/deepmd/pt/utils/dataloader.py @@ -1,11 +1,13 @@ # SPDX-License-Identifier: LGPL-3.0-or-later import logging import os -import queue import time from multiprocessing.dummy import ( Pool, ) +from queue import ( + Queue, +) from threading import ( Thread, ) @@ -204,70 +206,51 @@ def print_summary( ) -_sentinel = object() -QUEUESIZE = 32 - - class BackgroundConsumer(Thread): - def __init__(self, queue, source, max_len) -> None: - Thread.__init__(self) + def __init__(self, queue, source) -> None: + super().__init__() + self.daemon = True self._queue = queue self._source = source # Main DL iterator - self._max_len = max_len # def run(self) -> None: for item in self._source: self._queue.put(item) # Blocking if the queue is full - # Signal the consumer we are done. - self._queue.put(_sentinel) + # Signal the consumer we are done; this should not happen for DataLoader + self._queue.put(StopIteration()) + + +QUEUESIZE = 32 class BufferedIterator: def __init__(self, iterable) -> None: - self._queue = queue.Queue(QUEUESIZE) + self._queue = Queue(QUEUESIZE) self._iterable = iterable - self._consumer = None - - self.start_time = time.time() - self.warning_time = None - self.total = len(iterable) - - def _create_consumer(self) -> None: - self._consumer = BackgroundConsumer(self._queue, self._iterable, self.total) - self._consumer.daemon = True + self._consumer = BackgroundConsumer(self._queue, self._iterable) self._consumer.start() + self.last_warning_time = time.time() def __iter__(self): return self def __len__(self) -> int: - return self.total + return len(self._iterable) def __next__(self): - # Create consumer if not created yet - if self._consumer is None: - self._create_consumer() - # Notify the user if there is a data loading bottleneck - if self._queue.qsize() < min(2, max(1, self._queue.maxsize // 2)): - if time.time() - self.start_time > 5 * 60: - if ( - self.warning_time is None - or time.time() - self.warning_time > 15 * 60 - ): - log.warning( - "Data loading buffer is empty or nearly empty. This may " - "indicate a data loading bottleneck, and increasing the " - "number of workers (--num-workers) may help." - ) - self.warning_time = time.time() - - # Get next example + start_wait = time.time() item = self._queue.get() + wait_time = time.time() - start_wait + if ( + wait_time > 1.0 and start_wait - self.last_warning_time > 15 * 60 + ): # Even for Multi-Task training, each step usually takes < 1s + log.warning( + f"Data loading is slow, waited {wait_time:.2f} seconds. Ignoring this warning for 15 minutes." + ) + self.last_warning_time = start_wait if isinstance(item, Exception): raise item - if item is _sentinel: - raise StopIteration return item diff --git a/deepmd/utils/path.py b/deepmd/utils/path.py index c542ccf661..87a44aa70d 100644 --- a/deepmd/utils/path.py +++ b/deepmd/utils/path.py @@ -114,6 +114,10 @@ def is_file(self) -> bool: def is_dir(self) -> bool: """Check if self is directory.""" + @abstractmethod + def __getnewargs__(self): + """Return the arguments to be passed to __new__ when unpickling an instance.""" + @abstractmethod def __truediv__(self, key: str) -> "DPPath": """Used for / operator.""" @@ -169,6 +173,9 @@ def __init__(self, path: Union[str, Path], mode: str = "r") -> None: self.mode = mode self.path = Path(path) + def __getnewargs__(self): + return (self.path, self.mode) + def load_numpy(self) -> np.ndarray: """Load NumPy array. @@ -304,6 +311,9 @@ def __init__(self, path: str, mode: str = "r") -> None: # h5 path: default is the root path self._name = s[1] if len(s) > 1 else "/" + def __getnewargs__(self): + return (self.root_path, self.mode) + @classmethod @lru_cache(None) def _load_h5py(cls, path: str, mode: str = "r") -> h5py.File: From d162d0b47b51ea4aee56acbafb0c30fbc1dcdb82 Mon Sep 17 00:00:00 2001 From: HanswithCMY <167949665+HanswithCMY@users.noreply.github.com> Date: Mon, 9 Dec 2024 09:14:25 +0800 Subject: [PATCH 18/43] feat: add method to access fele in pppm/dplr (#4452) - I would like to modify the fele vector in pppm/dplr (e.g., by a fix plugin), which would be used to calculate the long-range interaction in fix dplr. - Therefore, I would like to add a new method to access the editable fele. This method would not change the current workflows of the lammps-dplr interface. ## Summary by CodeRabbit - **New Features** - Introduced a new method to access a vector of doubles, enhancing the class interface for better usability. This allows for external modification of the vector. - **Bug Fixes** - No bug fixes included in this release. - **Documentation** - Updated documentation to reflect the addition of the new method. --------- Signed-off-by: HanswithCMY <167949665+HanswithCMY@users.noreply.github.com> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Han Wang <92130845+wanghan-iapcm@users.noreply.github.com> --- source/lmp/pppm_dplr.h | 1 + 1 file changed, 1 insertion(+) diff --git a/source/lmp/pppm_dplr.h b/source/lmp/pppm_dplr.h index 1484a16e72..b7e221c686 100644 --- a/source/lmp/pppm_dplr.h +++ b/source/lmp/pppm_dplr.h @@ -28,6 +28,7 @@ class PPPMDPLR : public PPPM { ~PPPMDPLR() override {}; void init() override; const std::vector &get_fele() const { return fele; }; + std::vector &get_fele() { return fele; } protected: void compute(int, int) override; From ec3b83fb9e6d0bd453b8d9b23fcd33732982fac4 Mon Sep 17 00:00:00 2001 From: Jinzhe Zeng Date: Sun, 8 Dec 2024 20:15:29 -0500 Subject: [PATCH 19/43] fix(cc): copy nloc atoms from neighbor list (#4459) Prevent that the size of the neighbor list is larger than nloc. ## Summary by CodeRabbit - **New Features** - Enhanced flexibility in copying neighbor list data with the addition of a `natoms` parameter. - Improved handling of neighbor list data in the `compute` methods across multiple classes. - **Bug Fixes** - Refined error handling in the `translate_error` method for better clarity on exceptions. - **Documentation** - Updated method documentation to reflect changes in parameters and usage. - **Style** - Adjusted code structure for better readability and maintainability. Signed-off-by: Jinzhe Zeng --- source/api_cc/include/common.h | 8 +++++++- source/api_cc/src/DeepPotJAX.cc | 2 +- source/api_cc/src/DeepPotPT.cc | 2 +- source/api_cc/src/DeepSpinPT.cc | 2 +- source/api_cc/src/common.cc | 5 +++-- 5 files changed, 13 insertions(+), 6 deletions(-) diff --git a/source/api_cc/include/common.h b/source/api_cc/include/common.h index 2bd0cf7135..612f699ea4 100644 --- a/source/api_cc/include/common.h +++ b/source/api_cc/include/common.h @@ -33,7 +33,13 @@ struct NeighborListData { std::vector firstneigh; public: - void copy_from_nlist(const InputNlist& inlist); + /** + * @brief Copy the neighbor list from an InputNlist. + * @param[in] inlist The input neighbor list. + * @param[in] natoms The number of atoms to copy. If natoms is -1, copy all + * atoms. + */ + void copy_from_nlist(const InputNlist& inlist, const int natoms = -1); void shuffle(const std::vector& fwd_map); void shuffle(const deepmd::AtomMap& map); void shuffle_exclude_empty(const std::vector& fwd_map); diff --git a/source/api_cc/src/DeepPotJAX.cc b/source/api_cc/src/DeepPotJAX.cc index 805380081d..07f8b9119b 100644 --- a/source/api_cc/src/DeepPotJAX.cc +++ b/source/api_cc/src/DeepPotJAX.cc @@ -566,7 +566,7 @@ void deepmd::DeepPotJAX::compute(std::vector& ener, input_list[1] = add_input(op, atype, atype_shape, data_tensor[1], status); // nlist if (ago == 0) { - nlist_data.copy_from_nlist(lmp_list); + nlist_data.copy_from_nlist(lmp_list, nall - nghost); nlist_data.shuffle_exclude_empty(fwd_map); } size_t max_size = 0; diff --git a/source/api_cc/src/DeepPotPT.cc b/source/api_cc/src/DeepPotPT.cc index 6910de3ccd..abd35eaf1e 100644 --- a/source/api_cc/src/DeepPotPT.cc +++ b/source/api_cc/src/DeepPotPT.cc @@ -169,7 +169,7 @@ void DeepPotPT::compute(ENERGYVTYPE& ener, at::Tensor atype_Tensor = torch::from_blob(atype_64.data(), {1, nall_real}, int_option).to(device); if (ago == 0) { - nlist_data.copy_from_nlist(lmp_list); + nlist_data.copy_from_nlist(lmp_list, nall - nghost); nlist_data.shuffle_exclude_empty(fwd_map); nlist_data.padding(); if (do_message_passing) { diff --git a/source/api_cc/src/DeepSpinPT.cc b/source/api_cc/src/DeepSpinPT.cc index aef2d60150..7421b623db 100644 --- a/source/api_cc/src/DeepSpinPT.cc +++ b/source/api_cc/src/DeepSpinPT.cc @@ -177,7 +177,7 @@ void DeepSpinPT::compute(ENERGYVTYPE& ener, torch::from_blob(atype_64.data(), {1, nall_real}, int_option).to(device); c10::optional mapping_tensor; if (ago == 0) { - nlist_data.copy_from_nlist(lmp_list); + nlist_data.copy_from_nlist(lmp_list, nall - nghost); nlist_data.shuffle_exclude_empty(fwd_map); nlist_data.padding(); if (do_message_passing) { diff --git a/source/api_cc/src/common.cc b/source/api_cc/src/common.cc index 5a4f05d75c..c51ae9a8b4 100644 --- a/source/api_cc/src/common.cc +++ b/source/api_cc/src/common.cc @@ -232,8 +232,9 @@ template void deepmd::select_real_atoms_coord( const int& nall, const bool aparam_nall); -void deepmd::NeighborListData::copy_from_nlist(const InputNlist& inlist) { - int inum = inlist.inum; +void deepmd::NeighborListData::copy_from_nlist(const InputNlist& inlist, + const int natoms) { + int inum = natoms >= 0 ? natoms : inlist.inum; ilist.resize(inum); jlist.resize(inum); memcpy(&ilist[0], inlist.ilist, inum * sizeof(int)); From e9ed26794c13739532713d5d9fe2288c8495f7f7 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 10 Dec 2024 11:04:27 +0800 Subject: [PATCH 20/43] [pre-commit.ci] pre-commit autoupdate (#4464) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/astral-sh/ruff-pre-commit: v0.8.1 → v0.8.2](https://github.com/astral-sh/ruff-pre-commit/compare/v0.8.1...v0.8.2) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index b52d517d82..38f5abf616 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -29,7 +29,7 @@ repos: exclude: ^source/3rdparty - repo: https://github.com/astral-sh/ruff-pre-commit # Ruff version. - rev: v0.8.1 + rev: v0.8.2 hooks: - id: ruff args: ["--fix"] From e21f985a79e3178fb0cdc9c9a95a6cd08515c96e Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 17 Dec 2024 17:20:41 -0500 Subject: [PATCH 21/43] [pre-commit.ci] pre-commit autoupdate (#4473) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/astral-sh/ruff-pre-commit: v0.8.2 → v0.8.3](https://github.com/astral-sh/ruff-pre-commit/compare/v0.8.2...v0.8.3) - [github.com/pre-commit/mirrors-clang-format: v19.1.4 → v19.1.5](https://github.com/pre-commit/mirrors-clang-format/compare/v19.1.4...v19.1.5) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 38f5abf616..9058decc21 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -29,7 +29,7 @@ repos: exclude: ^source/3rdparty - repo: https://github.com/astral-sh/ruff-pre-commit # Ruff version. - rev: v0.8.2 + rev: v0.8.3 hooks: - id: ruff args: ["--fix"] @@ -60,7 +60,7 @@ repos: - id: blacken-docs # C++ - repo: https://github.com/pre-commit/mirrors-clang-format - rev: v19.1.4 + rev: v19.1.5 hooks: - id: clang-format exclude: ^(source/3rdparty|source/lib/src/gpu/cudart/.+\.inc|.+\.ipynb$) From e8167ce562bf158135567b137c4afa0ba0492dba Mon Sep 17 00:00:00 2001 From: HydrogenSulfate <490868991@qq.com> Date: Wed, 18 Dec 2024 06:29:35 +0800 Subject: [PATCH 22/43] pd: support dpa1 (#4414) Summary of this PR: 1. upload DPA-1 related code 2. merge much develop code 3. add all eager composite operators except `softmax_grad`, `p_norm_grad`, `split_grad`, and `concat_grad` to the composite operator blacklist() to significantly improve model execution speed (reducing the time taken from 100% more than PyTorch to about 10% to 15% more). related PR: https://github.com/lanpa/tensorboardX/pull/728 ### Training curve: ![training_curves_comparison_eager_opt](https://github.com/user-attachments/assets/3b71fc99-5abf-4353-a61a-38737d3c7f2c) ### Accuracy test(left: paddle, right: torch): ![image](https://github.com/user-attachments/assets/a42b4bfd-c0f8-4eb8-85eb-ff1adf981dbb) Ralated optimization of Paddle framework: - [x] https://github.com/PaddlePaddle/Paddle/pull/69349 - [x] https://github.com/PaddlePaddle/Paddle/pull/69333 - [x] https://github.com/PaddlePaddle/Paddle/pull/69479 - [x] https://github.com/PaddlePaddle/Paddle/pull/69515 - [x] https://github.com/PaddlePaddle/Paddle/pull/69487 - [x] https://github.com/PaddlePaddle/Paddle/pull/69661 - [x] https://github.com/PaddlePaddle/Paddle/pull/69660 - [x] https://github.com/PaddlePaddle/Paddle/pull/69596 - [x] https://github.com/PaddlePaddle/Paddle/pull/69556 ## Summary by CodeRabbit ## Release Notes - **New Features** - Introduced several new classes for molecular descriptors, including `DescrptDPA1`, `DescrptBlockSeAtten`, and `LayerNorm`, enhancing the modeling capabilities for molecular simulations. - Added new JSON configuration files for model parameters and multitask models related to water simulations. - Implemented new test classes for validating the functionality of the `DPAtomicModel` and various descriptor classes. - Added new test classes for evaluating denoising models, including `TestDenoiseModelDPA1` and `TestDenoiseModelDPA2`. - Enhanced the `ModelWrapper` class to clarify the handling of model parameters and state management. - **Bug Fixes** - Improved internal logic for handling model state saving and loading, ensuring consistency in outputs. - **Documentation** - Enhanced type hints and return annotations across various classes and methods for better clarity. - **Tests** - Expanded the testing framework with new test cases for denoising models and descriptor functionalities, ensuring robust validation of features. - Activated previously skipped tests for energy models, improving test coverage. - Enhanced multitask training tests with new configuration handling and test classes. --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- deepmd/pd/entrypoints/main.py | 18 +- deepmd/pd/loss/ener.py | 6 +- .../pd/model/atomic_model/dp_atomic_model.py | 38 +- deepmd/pd/model/descriptor/__init__.py | 6 + deepmd/pd/model/descriptor/dpa1.py | 689 +++++++++++ deepmd/pd/model/descriptor/env_mat.py | 9 +- deepmd/pd/model/descriptor/se_a.py | 147 ++- deepmd/pd/model/descriptor/se_atten.py | 1073 +++++++++++++++++ deepmd/pd/model/model/ener_model.py | 17 +- deepmd/pd/model/model/make_model.py | 18 +- deepmd/pd/model/network/layernorm.py | 165 +++ deepmd/pd/model/network/network.py | 25 +- deepmd/pd/model/task/fitting.py | 75 +- deepmd/pd/train/training.py | 109 +- deepmd/pd/train/wrapper.py | 16 +- deepmd/pd/utils/dataloader.py | 57 +- deepmd/pd/utils/decomp.py | 120 +- deepmd/pd/utils/env.py | 64 +- deepmd/pd/utils/exclude_mask.py | 20 +- deepmd/pd/utils/nlist.py | 28 +- deepmd/pd/utils/region.py | 13 +- deepmd/pd/utils/utils.py | 19 + .../tests/consistent/descriptor/test_dpa1.py | 44 + source/tests/consistent/model/test_dpa1.py | 28 + source/tests/pd/common.py | 8 + source/tests/pd/model/models/dpa1.json | 36 + source/tests/pd/model/models/dpa1.pd | Bin 0 -> 11329 bytes source/tests/pd/model/models/dpa2_tebd.pd | Bin 0 -> 537 bytes .../pd/model/test_atomic_model_atomic_stat.py | 431 +++++++ .../pd/model/test_atomic_model_global_stat.py | 510 ++++++++ source/tests/pd/model/test_autodiff.py | 2 - source/tests/pd/model/test_descriptor.py | 3 +- source/tests/pd/model/test_descriptor_dpa1.py | 387 ++++++ source/tests/pd/model/test_dpa1.py | 164 +++ source/tests/pd/model/test_env_mat.py | 6 +- source/tests/pd/model/test_forward_lower.py | 3 +- source/tests/pd/model/test_null_input.py | 8 + source/tests/pd/model/test_permutation.py | 23 +- .../pd/model/test_permutation_denoise.py | 109 ++ source/tests/pd/model/test_rot.py | 1 - source/tests/pd/model/test_rot_denoise.py | 124 ++ source/tests/pd/model/test_saveload_dpa1.py | 144 +++ source/tests/pd/model/test_smooth.py | 36 + source/tests/pd/model/test_trans.py | 1 - source/tests/pd/model/test_trans_denoise.py | 95 ++ .../pd/model/water/multitask_sharefit.json | 134 ++ source/tests/pd/test_decomp.py | 61 - source/tests/pd/test_finetune.py | 1 - source/tests/pd/test_multitask.py | 58 +- source/tests/pd/test_training.py | 33 +- 50 files changed, 4740 insertions(+), 442 deletions(-) create mode 100644 deepmd/pd/model/descriptor/dpa1.py create mode 100644 deepmd/pd/model/descriptor/se_atten.py create mode 100644 deepmd/pd/model/network/layernorm.py create mode 100644 source/tests/pd/model/models/dpa1.json create mode 100644 source/tests/pd/model/models/dpa1.pd create mode 100644 source/tests/pd/model/models/dpa2_tebd.pd create mode 100644 source/tests/pd/model/test_atomic_model_atomic_stat.py create mode 100644 source/tests/pd/model/test_atomic_model_global_stat.py create mode 100644 source/tests/pd/model/test_descriptor_dpa1.py create mode 100644 source/tests/pd/model/test_dpa1.py create mode 100644 source/tests/pd/model/test_permutation_denoise.py create mode 100644 source/tests/pd/model/test_rot_denoise.py create mode 100644 source/tests/pd/model/test_saveload_dpa1.py create mode 100644 source/tests/pd/model/test_trans_denoise.py create mode 100644 source/tests/pd/model/water/multitask_sharefit.json diff --git a/deepmd/pd/entrypoints/main.py b/deepmd/pd/entrypoints/main.py index 19653d6ea7..3fa66312e7 100644 --- a/deepmd/pd/entrypoints/main.py +++ b/deepmd/pd/entrypoints/main.py @@ -230,7 +230,7 @@ def train( use_pretrain_script: bool = False, force_load: bool = False, output: str = "out.json", -): +) -> None: log.info("Configuration path: %s", input_file) SummaryPrinter()() with open(input_file) as fin: @@ -321,10 +321,18 @@ def train( # save min_nbor_dist if min_nbor_dist is not None: if not multi_task: - trainer.model.min_nbor_dist = min_nbor_dist + trainer.model.min_nbor_dist = paddle.to_tensor( + min_nbor_dist, + dtype=paddle.float64, + place=DEVICE, + ) else: for model_item in min_nbor_dist: - trainer.model[model_item].min_nbor_dist = min_nbor_dist[model_item] + trainer.model[model_item].min_nbor_dist = paddle.to_tensor( + min_nbor_dist[model_item], + dtype=paddle.float64, + place=DEVICE, + ) trainer.run() @@ -332,7 +340,7 @@ def freeze( model: str, output: str = "frozen_model.json", head: Optional[str] = None, -): +) -> None: paddle.set_flags( { "FLAGS_save_cf_stack_op": 1, @@ -383,7 +391,7 @@ def change_bias( numb_batch: int = 0, model_branch: Optional[str] = None, output: Optional[str] = None, -): +) -> None: if input_file.endswith(".pd"): old_state_dict = paddle.load(input_file) model_state_dict = copy.deepcopy(old_state_dict.get("model", old_state_dict)) diff --git a/deepmd/pd/loss/ener.py b/deepmd/pd/loss/ener.py index 7c5d848b45..73ad53601a 100644 --- a/deepmd/pd/loss/ener.py +++ b/deepmd/pd/loss/ener.py @@ -10,7 +10,6 @@ TaskLoss, ) from deepmd.pd.utils import ( - decomp, env, ) from deepmd.pd.utils.env import ( @@ -224,10 +223,7 @@ def forward(self, input_dict, model, label, natoms, learning_rate, mae=False): if self.relative_f is not None: force_label_3 = force_label.reshape([-1, 3]) - # norm_f = force_label_3.norm(axis=1, keepdim=True) + self.relative_f - norm_f = ( - decomp.norm(force_label_3, axis=1, keepdim=True) + self.relative_f - ) + norm_f = force_label_3.norm(axis=1, keepdim=True) + self.relative_f diff_f_3 = diff_f.reshape([-1, 3]) diff_f_3 = diff_f_3 / norm_f diff_f = diff_f_3.reshape([-1]) diff --git a/deepmd/pd/model/atomic_model/dp_atomic_model.py b/deepmd/pd/model/atomic_model/dp_atomic_model.py index 25a0f89d77..1089b93a68 100644 --- a/deepmd/pd/model/atomic_model/dp_atomic_model.py +++ b/deepmd/pd/model/atomic_model/dp_atomic_model.py @@ -1,5 +1,4 @@ # SPDX-License-Identifier: LGPL-3.0-or-later -import copy import functools import logging from typing import ( @@ -52,7 +51,7 @@ def __init__( fitting, type_map: list[str], **kwargs, - ): + ) -> None: super().__init__(type_map, **kwargs) ntypes = len(type_map) self.type_map = type_map @@ -201,7 +200,7 @@ def serialize(self) -> dict: @classmethod def deserialize(cls, data) -> "DPAtomicModel": - data = copy.deepcopy(data) + data = data.copy() check_version_compatibility(data.pop("@version", 1), 2, 1) data.pop("@class", None) data.pop("type", None) @@ -212,6 +211,37 @@ def deserialize(cls, data) -> "DPAtomicModel": obj = super().deserialize(data) return obj + def enable_compression( + self, + min_nbor_dist: float, + table_extrapolate: float = 5, + table_stride_1: float = 0.01, + table_stride_2: float = 0.1, + check_frequency: int = -1, + ) -> None: + """Call descriptor enable_compression(). + + Parameters + ---------- + min_nbor_dist + The nearest distance between atoms + table_extrapolate + The scale of model extrapolation + table_stride_1 + The uniform stride of the first table + table_stride_2 + The uniform stride of the second table + check_frequency + The overflow check frequency + """ + self.descriptor.enable_compression( + min_nbor_dist, + table_extrapolate, + table_stride_1, + table_stride_2, + check_frequency, + ) + def forward_atomic( self, extended_coord, @@ -278,7 +308,7 @@ def compute_or_load_stat( self, sampled_func, stat_file_path: Optional[DPPath] = None, - ): + ) -> None: """ Compute or load the statistics parameters of the model, such as mean and standard deviation of descriptors or the energy bias of the fitting net. diff --git a/deepmd/pd/model/descriptor/__init__.py b/deepmd/pd/model/descriptor/__init__.py index 654643959b..7eaa0df85b 100644 --- a/deepmd/pd/model/descriptor/__init__.py +++ b/deepmd/pd/model/descriptor/__init__.py @@ -5,6 +5,10 @@ from .descriptor import ( DescriptorBlock, ) +from .dpa1 import ( + DescrptBlockSeAtten, + DescrptDPA1, +) from .env_mat import ( prod_env_mat, ) @@ -17,6 +21,8 @@ "BaseDescriptor", "DescriptorBlock", "DescrptBlockSeA", + "DescrptBlockSeAtten", + "DescrptDPA1", "DescrptSeA", "prod_env_mat", ] diff --git a/deepmd/pd/model/descriptor/dpa1.py b/deepmd/pd/model/descriptor/dpa1.py new file mode 100644 index 0000000000..f3f1ea26d6 --- /dev/null +++ b/deepmd/pd/model/descriptor/dpa1.py @@ -0,0 +1,689 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +from typing import ( + Callable, + Optional, + Union, +) + +import paddle + +from deepmd.dpmodel.utils import EnvMat as DPEnvMat +from deepmd.dpmodel.utils.seed import ( + child_seed, +) +from deepmd.pd.model.network.mlp import ( + NetworkCollection, +) +from deepmd.pd.model.network.network import ( + TypeEmbedNet, + TypeEmbedNetConsistent, +) +from deepmd.pd.utils import ( + env, +) +from deepmd.pd.utils.env import ( + PRECISION_DICT, + RESERVED_PRECISON_DICT, +) +from deepmd.pd.utils.update_sel import ( + UpdateSel, +) +from deepmd.utils.data_system import ( + DeepmdDataSystem, +) +from deepmd.utils.finetune import ( + get_index_between_two_maps, + map_pair_exclude_types, +) +from deepmd.utils.path import ( + DPPath, +) +from deepmd.utils.version import ( + check_version_compatibility, +) + +from .base_descriptor import ( + BaseDescriptor, +) +from .descriptor import ( + extend_descrpt_stat, +) +from .se_atten import ( + DescrptBlockSeAtten, + NeighborGatedAttention, +) + + +@BaseDescriptor.register("dpa1") +@BaseDescriptor.register("se_atten") +class DescrptDPA1(BaseDescriptor, paddle.nn.Layer): + r"""Attention-based descriptor which is proposed in the pretrainable DPA-1[1] model. + + This descriptor, :math:`\mathcal{D}^i \in \mathbb{R}^{M \times M_{<}}`, is given by + + .. math:: + \mathcal{D}^i = \frac{1}{N_c^2}(\hat{\mathcal{G}}^i)^T \mathcal{R}^i (\mathcal{R}^i)^T \hat{\mathcal{G}}^i_<, + + where :math:`\hat{\mathcal{G}}^i` represents the embedding matrix:math:`\mathcal{G}^i` + after additional self-attention mechanism and :math:`\mathcal{R}^i` is defined by the full case in the se_e2_a descriptor. + Note that we obtain :math:`\mathcal{G}^i` using the type embedding method by default in this descriptor. + + To perform the self-attention mechanism, the queries :math:`\mathcal{Q}^{i,l} \in \mathbb{R}^{N_c\times d_k}`, + keys :math:`\mathcal{K}^{i,l} \in \mathbb{R}^{N_c\times d_k}`, + and values :math:`\mathcal{V}^{i,l} \in \mathbb{R}^{N_c\times d_v}` are first obtained: + + .. math:: + \left(\mathcal{Q}^{i,l}\right)_{j}=Q_{l}\left(\left(\mathcal{G}^{i,l-1}\right)_{j}\right), + + .. math:: + \left(\mathcal{K}^{i,l}\right)_{j}=K_{l}\left(\left(\mathcal{G}^{i,l-1}\right)_{j}\right), + + .. math:: + \left(\mathcal{V}^{i,l}\right)_{j}=V_{l}\left(\left(\mathcal{G}^{i,l-1}\right)_{j}\right), + + where :math:`Q_{l}`, :math:`K_{l}`, :math:`V_{l}` represent three trainable linear transformations + that output the queries and keys of dimension :math:`d_k` and values of dimension :math:`d_v`, and :math:`l` + is the index of the attention layer. + The input embedding matrix to the attention layers, denoted by :math:`\mathcal{G}^{i,0}`, + is chosen as the two-body embedding matrix. + + Then the scaled dot-product attention method is adopted: + + .. math:: + A(\mathcal{Q}^{i,l}, \mathcal{K}^{i,l}, \mathcal{V}^{i,l}, \mathcal{R}^{i,l})=\varphi\left(\mathcal{Q}^{i,l}, \mathcal{K}^{i,l},\mathcal{R}^{i,l}\right)\mathcal{V}^{i,l}, + + where :math:`\varphi\left(\mathcal{Q}^{i,l}, \mathcal{K}^{i,l},\mathcal{R}^{i,l}\right) \in \mathbb{R}^{N_c\times N_c}` is attention weights. + In the original attention method, + one typically has :math:`\varphi\left(\mathcal{Q}^{i,l}, \mathcal{K}^{i,l}\right)=\mathrm{softmax}\left(\frac{\mathcal{Q}^{i,l} (\mathcal{K}^{i,l})^{T}}{\sqrt{d_{k}}}\right)`, + with :math:`\sqrt{d_{k}}` being the normalization temperature. + This is slightly modified to incorporate the angular information: + + .. math:: + \varphi\left(\mathcal{Q}^{i,l}, \mathcal{K}^{i,l},\mathcal{R}^{i,l}\right) = \mathrm{softmax}\left(\frac{\mathcal{Q}^{i,l} (\mathcal{K}^{i,l})^{T}}{\sqrt{d_{k}}}\right) \odot \hat{\mathcal{R}}^{i}(\hat{\mathcal{R}}^{i})^{T}, + + where :math:`\hat{\mathcal{R}}^{i} \in \mathbb{R}^{N_c\times 3}` denotes normalized relative coordinates, + :math:`\hat{\mathcal{R}}^{i}_{j} = \frac{\boldsymbol{r}_{ij}}{\lVert \boldsymbol{r}_{ij} \lVert}` + and :math:`\odot` means element-wise multiplication. + + Then layer normalization is added in a residual way to finally obtain the self-attention local embedding matrix + :math:`\hat{\mathcal{G}}^{i} = \mathcal{G}^{i,L_a}` after :math:`L_a` attention layers:[^1] + + .. math:: + \mathcal{G}^{i,l} = \mathcal{G}^{i,l-1} + \mathrm{LayerNorm}(A(\mathcal{Q}^{i,l}, \mathcal{K}^{i,l}, \mathcal{V}^{i,l}, \mathcal{R}^{i,l})). + + Parameters + ---------- + rcut: float + The cut-off radius :math:`r_c` + rcut_smth: float + From where the environment matrix should be smoothed :math:`r_s` + sel : list[int], int + list[int]: sel[i] specifies the maxmum number of type i atoms in the cut-off radius + int: the total maxmum number of atoms in the cut-off radius + ntypes : int + Number of element types + neuron : list[int] + Number of neurons in each hidden layers of the embedding net :math:`\mathcal{N}` + axis_neuron: int + Number of the axis neuron :math:`M_2` (number of columns of the sub-matrix of the embedding matrix) + tebd_dim: int + Dimension of the type embedding + tebd_input_mode: str + The input mode of the type embedding. Supported modes are ["concat", "strip"]. + - "concat": Concatenate the type embedding with the smoothed radial information as the union input for the embedding network. + - "strip": Use a separated embedding network for the type embedding and combine the output with the radial embedding network output. + resnet_dt: bool + Time-step `dt` in the resnet construction: + y = x + dt * \phi (Wx + b) + trainable: bool + If the weights of this descriptors are trainable. + trainable_ln: bool + Whether to use trainable shift and scale weights in layer normalization. + ln_eps: float, Optional + The epsilon value for layer normalization. + type_one_side: bool + If 'False', type embeddings of both neighbor and central atoms are considered. + If 'True', only type embeddings of neighbor atoms are considered. + Default is 'False'. + attn: int + Hidden dimension of the attention vectors + attn_layer: int + Number of attention layers + attn_dotr: bool + If dot the angular gate to the attention weights + attn_mask: bool + (Only support False to keep consistent with other backend references.) + (Not used in this version. True option is not implemented.) + If mask the diagonal of attention weights + exclude_types : list[list[int]] + The excluded pairs of types which have no interaction with each other. + For example, `[[0, 1]]` means no interaction between type 0 and type 1. + env_protection: float + Protection parameter to prevent division by zero errors during environment matrix calculations. + set_davg_zero: bool + Set the shift of embedding net input to zero. + activation_function: str + The activation function in the embedding net. Supported options are |ACTIVATION_FN| + precision: str + The precision of the embedding net parameters. Supported options are |PRECISION| + scaling_factor: float + The scaling factor of normalization in calculations of attention weights. + If `temperature` is None, the scaling of attention weights is (N_dim * scaling_factor)**0.5 + normalize: bool + Whether to normalize the hidden vectors in attention weights calculation. + temperature: float + If not None, the scaling of attention weights is `temperature` itself. + smooth_type_embedding: bool + Whether to use smooth process in attention weights calculation. + concat_output_tebd: bool + Whether to concat type embedding at the output of the descriptor. + stripped_type_embedding: bool, Optional + (Deprecated, kept only for compatibility.) + Whether to strip the type embedding into a separate embedding network. + Setting this parameter to `True` is equivalent to setting `tebd_input_mode` to 'strip'. + Setting it to `False` is equivalent to setting `tebd_input_mode` to 'concat'. + The default value is `None`, which means the `tebd_input_mode` setting will be used instead. + seed: int, Optional + Random seed for parameter initialization. + use_econf_tebd: bool, Optional + Whether to use electronic configuration type embedding. + use_tebd_bias : bool, Optional + Whether to use bias in the type embedding layer. + type_map: list[str], Optional + A list of strings. Give the name to each type of atoms. + spin + (Only support None to keep consistent with other backend references.) + (Not used in this version. Not-none option is not implemented.) + The old implementation of deepspin. + + Limitations + ----------- + The currently implementation will not support the following deprecated features + 1. spin is not None + 2. attn_mask == True + + References + ---------- + .. [1] Duo Zhang, Hangrui Bi, Fu-Zhi Dai, Wanrun Jiang, Linfeng Zhang, and Han Wang. 2022. + DPA-1: Pretraining of Attention-based Deep Potential Model for Molecular Simulation. + arXiv preprint arXiv:2208.08236. + """ + + def __init__( + self, + rcut: float, + rcut_smth: float, + sel: Union[list[int], int], + ntypes: int, + neuron: list = [25, 50, 100], + axis_neuron: int = 16, + tebd_dim: int = 8, + tebd_input_mode: str = "concat", + set_davg_zero: bool = True, + attn: int = 128, + attn_layer: int = 2, + attn_dotr: bool = True, + attn_mask: bool = False, + activation_function: str = "tanh", + precision: str = "float64", + resnet_dt: bool = False, + exclude_types: list[tuple[int, int]] = [], + env_protection: float = 0.0, + scaling_factor: int = 1.0, + normalize=True, + temperature=None, + concat_output_tebd: bool = True, + trainable: bool = True, + trainable_ln: bool = True, + ln_eps: Optional[float] = 1e-5, + smooth_type_embedding: bool = True, + type_one_side: bool = False, + stripped_type_embedding: Optional[bool] = None, + seed: Optional[Union[int, list[int]]] = None, + use_econf_tebd: bool = False, + use_tebd_bias: bool = False, + type_map: Optional[list[str]] = None, + # not implemented + spin=None, + type: Optional[str] = None, + ) -> None: + super().__init__() + # Ensure compatibility with the deprecated stripped_type_embedding option. + if stripped_type_embedding is not None: + # Use the user-set stripped_type_embedding parameter first + tebd_input_mode = "strip" if stripped_type_embedding else "concat" + if spin is not None: + raise NotImplementedError("old implementation of spin is not supported.") + if attn_mask: + raise NotImplementedError( + "old implementation of attn_mask is not supported." + ) + # to keep consistent with default value in this backends + if ln_eps is None: + ln_eps = 1e-5 + + self.tebd_input_mode = tebd_input_mode + + del type, spin, attn_mask + self.se_atten = DescrptBlockSeAtten( + rcut, + rcut_smth, + sel, + ntypes, + neuron=neuron, + axis_neuron=axis_neuron, + tebd_dim=tebd_dim, + tebd_input_mode=tebd_input_mode, + set_davg_zero=set_davg_zero, + attn=attn, + attn_layer=attn_layer, + attn_dotr=attn_dotr, + attn_mask=False, + activation_function=activation_function, + precision=precision, + resnet_dt=resnet_dt, + scaling_factor=scaling_factor, + normalize=normalize, + temperature=temperature, + smooth=smooth_type_embedding, + type_one_side=type_one_side, + exclude_types=exclude_types, + env_protection=env_protection, + trainable_ln=trainable_ln, + ln_eps=ln_eps, + seed=child_seed(seed, 1), + ) + self.use_econf_tebd = use_econf_tebd + self.use_tebd_bias = use_tebd_bias + self.type_map = type_map + self.compress = False + self.type_embedding = TypeEmbedNet( + ntypes, + tebd_dim, + precision=precision, + seed=child_seed(seed, 2), + use_econf_tebd=use_econf_tebd, + use_tebd_bias=use_tebd_bias, + type_map=type_map, + ) + self.prec = PRECISION_DICT[precision] + self.tebd_dim = tebd_dim + self.concat_output_tebd = concat_output_tebd + self.trainable = trainable + # set trainable + for param in self.parameters(): + param.stop_gradient = not trainable + + def get_rcut(self) -> float: + """Returns the cut-off radius.""" + return self.se_atten.get_rcut() + + def get_rcut_smth(self) -> float: + """Returns the radius where the neighbor information starts to smoothly decay to 0.""" + return self.se_atten.get_rcut_smth() + + def get_nsel(self) -> int: + """Returns the number of selected atoms in the cut-off radius.""" + return self.se_atten.get_nsel() + + def get_sel(self) -> list[int]: + """Returns the number of selected atoms for each type.""" + return self.se_atten.get_sel() + + def get_ntypes(self) -> int: + """Returns the number of element types.""" + return self.se_atten.get_ntypes() + + def get_type_map(self) -> list[str]: + """Get the name to each type of atoms.""" + return self.type_map + + def get_dim_out(self) -> int: + """Returns the output dimension.""" + ret = self.se_atten.get_dim_out() + if self.concat_output_tebd: + ret += self.tebd_dim + return ret + + def get_dim_emb(self) -> int: + return self.se_atten.dim_emb + + def mixed_types(self) -> bool: + """If true, the descriptor + 1. assumes total number of atoms aligned across frames; + 2. requires a neighbor list that does not distinguish different atomic types. + + If false, the descriptor + 1. assumes total number of atoms of each atom type aligned across frames; + 2. requires a neighbor list that distinguishes different atomic types. + + """ + return self.se_atten.mixed_types() + + def has_message_passing(self) -> bool: + """Returns whether the descriptor has message passing.""" + return self.se_atten.has_message_passing() + + def need_sorted_nlist_for_lower(self) -> bool: + """Returns whether the descriptor needs sorted nlist when using `forward_lower`.""" + return self.se_atten.need_sorted_nlist_for_lower() + + def get_env_protection(self) -> float: + """Returns the protection of building environment matrix.""" + return self.se_atten.get_env_protection() + + def share_params(self, base_class, shared_level, resume=False) -> None: + """ + Share the parameters of self to the base_class with shared_level during multitask training. + If not start from checkpoint (resume is False), + some separated parameters (e.g. mean and stddev) will be re-calculated across different classes. + """ + assert ( + self.__class__ == base_class.__class__ + ), "Only descriptors of the same type can share params!" + # For DPA1 descriptors, the user-defined share-level + # shared_level: 0 + # share all parameters in both type_embedding and se_atten + if shared_level == 0: + self._sub_layers["type_embedding"] = base_class._sub_layers[ + "type_embedding" + ] + self.se_atten.share_params(base_class.se_atten, 0, resume=resume) + # shared_level: 1 + # share all parameters in type_embedding + elif shared_level == 1: + self._sub_layers["type_embedding"] = base_class._sub_layers[ + "type_embedding" + ] + # Other shared levels + else: + raise NotImplementedError + + @property + def dim_out(self): + return self.get_dim_out() + + @property + def dim_emb(self): + return self.get_dim_emb() + + def compute_input_stats( + self, + merged: Union[Callable[[], list[dict]], list[dict]], + path: Optional[DPPath] = None, + ): + """ + Compute the input statistics (e.g. mean and stddev) for the descriptors from packed data. + + Parameters + ---------- + merged : Union[Callable[[], list[dict]], list[dict]] + - list[dict]: A list of data samples from various data systems. + Each element, `merged[i]`, is a data dictionary containing `keys`: `paddle.Tensor` + originating from the `i`-th data system. + - Callable[[], list[dict]]: A lazy function that returns data samples in the above format + only when needed. Since the sampling process can be slow and memory-intensive, + the lazy function helps by only sampling once. + path : Optional[DPPath] + The path to the stat file. + + """ + return self.se_atten.compute_input_stats(merged, path) + + def set_stat_mean_and_stddev( + self, + mean: paddle.Tensor, + stddev: paddle.Tensor, + ) -> None: + """Update mean and stddev for descriptor.""" + self.se_atten.mean = mean + self.se_atten.stddev = stddev + + def get_stat_mean_and_stddev(self) -> tuple[paddle.Tensor, paddle.Tensor]: + """Get mean and stddev for descriptor.""" + return self.se_atten.mean, self.se_atten.stddev + + def change_type_map( + self, type_map: list[str], model_with_new_type_stat=None + ) -> None: + """Change the type related params to new ones, according to `type_map` and the original one in the model. + If there are new types in `type_map`, statistics will be updated accordingly to `model_with_new_type_stat` for these new types. + """ + assert ( + self.type_map is not None + ), "'type_map' must be defined when performing type changing!" + remap_index, has_new_type = get_index_between_two_maps(self.type_map, type_map) + obj = self.se_atten + obj.ntypes = len(type_map) + self.type_map = type_map + self.type_embedding.change_type_map(type_map=type_map) + obj.reinit_exclude(map_pair_exclude_types(obj.exclude_types, remap_index)) + if has_new_type: + # the avg and std of new types need to be updated + extend_descrpt_stat( + obj, + type_map, + des_with_stat=model_with_new_type_stat.se_atten + if model_with_new_type_stat is not None + else None, + ) + obj["davg"] = obj["davg"][remap_index] + obj["dstd"] = obj["dstd"][remap_index] + + def serialize(self) -> dict: + obj = self.se_atten + data = { + "@class": "Descriptor", + "type": "dpa1", + "@version": 2, + "rcut": obj.rcut, + "rcut_smth": obj.rcut_smth, + "sel": obj.sel, + "ntypes": obj.ntypes, + "neuron": obj.neuron, + "axis_neuron": obj.axis_neuron, + "tebd_dim": obj.tebd_dim, + "tebd_input_mode": obj.tebd_input_mode, + "set_davg_zero": obj.set_davg_zero, + "attn": obj.attn_dim, + "attn_layer": obj.attn_layer, + "attn_dotr": obj.attn_dotr, + "attn_mask": False, + "activation_function": obj.activation_function, + "resnet_dt": obj.resnet_dt, + "scaling_factor": obj.scaling_factor, + "normalize": obj.normalize, + "temperature": obj.temperature, + "trainable_ln": obj.trainable_ln, + "ln_eps": obj.ln_eps, + "smooth_type_embedding": obj.smooth, + "type_one_side": obj.type_one_side, + "concat_output_tebd": self.concat_output_tebd, + "use_econf_tebd": self.use_econf_tebd, + "use_tebd_bias": self.use_tebd_bias, + "type_map": self.type_map, + # make deterministic + "precision": RESERVED_PRECISON_DICT[obj.prec], + "embeddings": obj.filter_layers.serialize(), + "attention_layers": obj.dpa1_attention.serialize(), + "env_mat": DPEnvMat(obj.rcut, obj.rcut_smth).serialize(), + "type_embedding": self.type_embedding.embedding.serialize(), + "exclude_types": obj.exclude_types, + "env_protection": obj.env_protection, + "@variables": { + "davg": obj["davg"].numpy(), + "dstd": obj["dstd"].numpy(), + }, + "trainable": self.trainable, + "spin": None, + } + if obj.tebd_input_mode in ["strip"]: + data.update({"embeddings_strip": obj.filter_layers_strip.serialize()}) + return data + + @classmethod + def deserialize(cls, data: dict) -> "DescrptDPA1": + data = data.copy() + check_version_compatibility(data.pop("@version"), 2, 1) + data.pop("@class") + data.pop("type") + variables = data.pop("@variables") + embeddings = data.pop("embeddings") + type_embedding = data.pop("type_embedding") + attention_layers = data.pop("attention_layers") + env_mat = data.pop("env_mat") + tebd_input_mode = data["tebd_input_mode"] + if tebd_input_mode in ["strip"]: + embeddings_strip = data.pop("embeddings_strip") + else: + embeddings_strip = None + # compat with version 1 + if "use_tebd_bias" not in data: + data["use_tebd_bias"] = True + obj = cls(**data) + + def t_cvt(xx): + return paddle.to_tensor(xx, dtype=obj.se_atten.prec).to(device=env.DEVICE) + + obj.type_embedding.embedding = TypeEmbedNetConsistent.deserialize( + type_embedding + ) + obj.se_atten["davg"] = t_cvt(variables["davg"]) + obj.se_atten["dstd"] = t_cvt(variables["dstd"]) + obj.se_atten.filter_layers = NetworkCollection.deserialize(embeddings) + if tebd_input_mode in ["strip"]: + obj.se_atten.filter_layers_strip = NetworkCollection.deserialize( + embeddings_strip + ) + obj.se_atten.dpa1_attention = NeighborGatedAttention.deserialize( + attention_layers + ) + return obj + + def enable_compression( + self, + min_nbor_dist: float, + table_extrapolate: float = 5, + table_stride_1: float = 0.01, + table_stride_2: float = 0.1, + check_frequency: int = -1, + ) -> None: + """Receive the statisitcs (distance, max_nbor_size and env_mat_range) of the training data. + + Parameters + ---------- + min_nbor_dist + The nearest distance between atoms + table_extrapolate + The scale of model extrapolation + table_stride_1 + The uniform stride of the first table + table_stride_2 + The uniform stride of the second table + check_frequency + The overflow check frequency + """ + # do some checks before the mocel compression process + raise NotImplementedError("Model compression is not supported in paddle yet.") + + def forward( + self, + extended_coord: paddle.Tensor, + extended_atype: paddle.Tensor, + nlist: paddle.Tensor, + mapping: Optional[paddle.Tensor] = None, + comm_dict: Optional[dict[str, paddle.Tensor]] = None, + ): + """Compute the descriptor. + + Parameters + ---------- + extended_coord + The extended coordinates of atoms. shape: nf x (nallx3) + extended_atype + The extended aotm types. shape: nf x nall + nlist + The neighbor list. shape: nf x nloc x nnei + mapping + The index mapping, not required by this descriptor. + comm_dict + The data needed for communication for parallel inference. + + Returns + ------- + descriptor + The descriptor. shape: nf x nloc x (ng x axis_neuron) + gr + The rotationally equivariant and permutationally invariant single particle + representation. shape: nf x nloc x ng x 3 + g2 + The rotationally invariant pair-partical representation. + shape: nf x nloc x nnei x ng + h2 + The rotationally equivariant pair-partical representation. + shape: nf x nloc x nnei x 3 + sw + The smooth switch function. shape: nf x nloc x nnei + + """ + # cast the input to internal precsion + extended_coord = extended_coord.to(dtype=self.prec) + del mapping + nframes, nloc, nnei = nlist.shape + nall = extended_coord.reshape([nframes, -1]).shape[1] // 3 + g1_ext = self.type_embedding(extended_atype) + g1_inp = g1_ext[:, :nloc, :] + if self.tebd_input_mode in ["strip"]: + type_embedding = self.type_embedding.get_full_embedding(g1_ext.place) + else: + type_embedding = None + g1, g2, h2, rot_mat, sw = self.se_atten( + nlist, + extended_coord, + extended_atype, + g1_ext, + mapping=None, + type_embedding=type_embedding, + ) + if self.concat_output_tebd: + g1 = paddle.concat([g1, g1_inp], axis=-1) + + return ( + g1.to(dtype=env.GLOBAL_PD_FLOAT_PRECISION), + rot_mat.to(dtype=env.GLOBAL_PD_FLOAT_PRECISION), + g2.to(dtype=env.GLOBAL_PD_FLOAT_PRECISION) if g2 is not None else None, + h2.to(dtype=env.GLOBAL_PD_FLOAT_PRECISION), + sw.to(dtype=env.GLOBAL_PD_FLOAT_PRECISION), + ) + + @classmethod + def update_sel( + cls, + train_data: DeepmdDataSystem, + type_map: Optional[list[str]], + local_jdata: dict, + ) -> tuple[dict, Optional[float]]: + """Update the selection and perform neighbor statistics. + + Parameters + ---------- + train_data : DeepmdDataSystem + data used to do neighbor statistics + type_map : list[str], optional + The name of each type of atoms + local_jdata : dict + The local data refer to the current class + + Returns + ------- + dict + The updated local data + float + The minimum distance between two atoms + """ + local_jdata_cpy = local_jdata.copy() + min_nbor_dist, sel = UpdateSel().update_one_sel( + train_data, type_map, local_jdata_cpy["rcut"], local_jdata_cpy["sel"], True + ) + local_jdata_cpy["sel"] = sel[0] + return local_jdata_cpy, min_nbor_dist diff --git a/deepmd/pd/model/descriptor/env_mat.py b/deepmd/pd/model/descriptor/env_mat.py index 3a9daec1e8..9b72da0b16 100644 --- a/deepmd/pd/model/descriptor/env_mat.py +++ b/deepmd/pd/model/descriptor/env_mat.py @@ -2,9 +2,6 @@ import paddle -from deepmd.pd.utils import ( - decomp, -) from deepmd.pd.utils.preprocess import ( compute_smooth_weight, ) @@ -27,12 +24,10 @@ def _make_env_mat( nlist = paddle.where(mask, nlist, nall - 1) coord_l = coord[:, :natoms].reshape([bsz, -1, 1, 3]) index = nlist.reshape([bsz, -1]).unsqueeze(-1).expand([-1, -1, 3]) - # coord_r = paddle.take_along_axis(coord, axis=1, indices=index) - coord_r = decomp.take_along_axis(coord, axis=1, indices=index) + coord_r = paddle.take_along_axis(coord, axis=1, indices=index) coord_r = coord_r.reshape([bsz, natoms, nnei, 3]) diff = coord_r - coord_l - # length = paddle.linalg.norm(diff, axis=-1, keepdim=True) - length = decomp.norm(diff, axis=-1, keepdim=True) + length = paddle.linalg.norm(diff, axis=-1, keepdim=True) # for index 0 nloc atom length = length + (~mask.unsqueeze(-1)).astype(length.dtype) t0 = 1 / (length + protection) diff --git a/deepmd/pd/model/descriptor/se_a.py b/deepmd/pd/model/descriptor/se_a.py index 180d6f0a3f..0af6d082b8 100644 --- a/deepmd/pd/model/descriptor/se_a.py +++ b/deepmd/pd/model/descriptor/se_a.py @@ -9,6 +9,7 @@ import numpy as np import paddle +import paddle.nn as nn from deepmd.dpmodel.utils.seed import ( child_seed, @@ -87,13 +88,14 @@ def __init__( type_map: Optional[list[str]] = None, # not implemented spin=None, - ): + ) -> None: del ntypes if spin is not None: raise NotImplementedError("old implementation of spin is not supported.") super().__init__() self.type_map = type_map self.compress = False + self.prec = PRECISION_DICT[precision] self.sea = DescrptBlockSeA( rcut, rcut_smth, @@ -161,7 +163,7 @@ def get_env_protection(self) -> float: """Returns the protection of building environment matrix.""" return self.sea.get_env_protection() - def share_params(self, base_class, shared_level, resume=False): + def share_params(self, base_class, shared_level, resume=False) -> None: """ Share the parameters of self to the base_class with shared_level during multitask training. If not start from checkpoint (resume is False), @@ -222,10 +224,35 @@ def compute_input_stats( def reinit_exclude( self, exclude_types: list[tuple[int, int]] = [], - ): + ) -> None: """Update the type exclusions.""" self.sea.reinit_exclude(exclude_types) + def enable_compression( + self, + min_nbor_dist: float, + table_extrapolate: float = 5, + table_stride_1: float = 0.01, + table_stride_2: float = 0.1, + check_frequency: int = -1, + ) -> None: + """Receive the statisitcs (distance, max_nbor_size and env_mat_range) of the training data. + + Parameters + ---------- + min_nbor_dist + The nearest distance between atoms + table_extrapolate + The scale of model extrapolation + table_stride_1 + The uniform stride of the first table + table_stride_2 + The uniform stride of the second table + check_frequency + The overflow check frequency + """ + raise ValueError("Enable compression is not supported.") + def forward( self, coord_ext: paddle.Tensor, @@ -266,7 +293,18 @@ def forward( The smooth switch function. """ - return self.sea.forward(nlist, coord_ext, atype_ext, None, mapping) + # cast the input to internal precsion + coord_ext = coord_ext.to(dtype=self.prec) + g1, rot_mat, g2, h2, sw = self.sea.forward( + nlist, coord_ext, atype_ext, None, mapping + ) + return ( + g1.to(dtype=env.GLOBAL_PD_FLOAT_PRECISION), + rot_mat.to(dtype=env.GLOBAL_PD_FLOAT_PRECISION), + None, + None, + sw.to(dtype=env.GLOBAL_PD_FLOAT_PRECISION), + ) def set_stat_mean_and_stddev( self, @@ -367,10 +405,6 @@ def update_sel( class DescrptBlockSeA(DescriptorBlock): ndescrpt: Final[int] __constants__: ClassVar[list] = ["ndescrpt"] - lower: dict[str, int] - upper: dict[str, int] - table_data: dict[str, paddle.Tensor] - table_config: list[Union[int, float]] def __init__( self, @@ -389,7 +423,7 @@ def __init__( trainable: bool = True, seed: Optional[Union[int, list[int]]] = None, **kwargs, - ): + ) -> None: """Construct an embedding net of type `se_a`. Args: @@ -430,13 +464,6 @@ def __init__( self.register_buffer("mean", mean) self.register_buffer("stddev", stddev) - # add for compression - self.compress = False - self.lower = {} - self.upper = {} - self.table_data = {} - self.table_config = [] - ndim = 1 if self.type_one_side else 2 filter_layers = NetworkCollection( ndim=ndim, ntypes=len(sel), network_type="embedding_network" @@ -459,6 +486,21 @@ def __init__( for param in self.parameters(): param.stop_gradient = not trainable + # add for compression + self.compress = False + self.compress_info = nn.ParameterList( + [ + self.create_parameter([], dtype=self.prec).to(device="cpu") + for _ in range(len(self.filter_layers.networks)) + ] + ) + self.compress_data = nn.ParameterList( + [ + self.create_parameter([], dtype=self.prec).to(device=env.DEVICE) + for _ in range(len(self.filter_layers.networks)) + ] + ) + def get_rcut(self) -> float: """Returns the cut-off radius.""" return self.rcut @@ -517,11 +559,11 @@ def dim_out(self): return self.filter_neuron[-1] * self.axis_neuron @property - def dim_in(self): + def dim_in(self) -> int: """Returns the atomic input dimension of this descriptor.""" return 0 - def __setitem__(self, key, value): + def __setitem__(self, key, value) -> None: if key in ("avg", "data_avg", "davg"): self.mean = value elif key in ("std", "data_std", "dstd"): @@ -541,7 +583,7 @@ def compute_input_stats( self, merged: Union[Callable[[], list[dict]], list[dict]], path: Optional[DPPath] = None, - ): + ) -> None: """ Compute the input statistics (e.g. mean and stddev) for the descriptors from packed data. @@ -587,22 +629,45 @@ def get_stats(self) -> dict[str, StatItem]: def reinit_exclude( self, exclude_types: list[tuple[int, int]] = [], - ): + ) -> None: self.exclude_types = exclude_types self.emask = PairExcludeMask(self.ntypes, exclude_types=exclude_types) def enable_compression( self, - table_data, - table_config, - lower, - upper, + table_data: dict[str, paddle.Tensor], + table_config: list[Union[int, float]], + lower: dict[str, int], + upper: dict[str, int], ) -> None: + for embedding_idx, ll in enumerate(self.filter_layers.networks): + if self.type_one_side: + ii = embedding_idx + ti = -1 + else: + # ti: center atom type, ii: neighbor type... + ii = embedding_idx // self.ntypes + ti = embedding_idx % self.ntypes + if self.type_one_side: + net = "filter_-1_net_" + str(ii) + else: + net = "filter_" + str(ti) + "_net_" + str(ii) + info_ii = paddle.to_tensor( + [ + lower[net], + upper[net], + upper[net] * table_config[0], + table_config[1], + table_config[2], + table_config[3], + ], + dtype=self.prec, + place="cpu", + ) + tensor_data_ii = table_data[net].to(device=env.DEVICE, dtype=self.prec) + self.compress_data[embedding_idx] = tensor_data_ii + self.compress_info[embedding_idx] = info_ii self.compress = True - self.table_data = table_data - self.table_config = table_config - self.lower = lower - self.upper = upper def forward( self, @@ -611,6 +676,7 @@ def forward( extended_atype: paddle.Tensor, extended_atype_embd: Optional[paddle.Tensor] = None, mapping: Optional[paddle.Tensor] = None, + type_embedding: Optional[paddle.Tensor] = None, ): """Calculate decoded embedding for each atom. @@ -627,7 +693,7 @@ def forward( del extended_atype_embd, mapping nf = nlist.shape[0] nloc = nlist.shape[1] - atype: paddle.Tensor = extended_atype[:, :nloc] + atype = extended_atype[:, :nloc] dmatrix, diff, sw = prod_env_mat( extended_coord, nlist, @@ -640,7 +706,6 @@ def forward( ) dmatrix = dmatrix.reshape([-1, self.nnei, 4]) - dmatrix = dmatrix.astype(self.prec) nfnl = dmatrix.shape[0] # pre-allocate a shape to pass jit xyz_scatter = paddle.zeros( @@ -649,7 +714,9 @@ def forward( ).to(extended_coord.place) # nfnl x nnei exclude_mask = self.emask(nlist, extended_atype).reshape([nfnl, self.nnei]) - for embedding_idx, ll in enumerate(self.filter_layers.networks): + for embedding_idx, (ll, compress_data_ii, compress_info_ii) in enumerate( + zip(self.filter_layers.networks, self.compress_data, self.compress_info) + ): if self.type_one_side: ii = embedding_idx ti = -1 @@ -680,10 +747,16 @@ def forward( if rr.numel() > 0: rr = rr * mm.unsqueeze(2).astype(rr.dtype) ss = rr[:, :, :1] - # nfnl x nt x ng - gg = ll.forward(ss) - # nfnl x 4 x ng - gr = paddle.matmul(rr.transpose([0, 2, 1]), gg) + if self.compress: + raise NotImplementedError( + "Compressed environment is not implemented yet." + ) + else: + # nfnl x nt x ng + gg = ll.forward(ss) + # nfnl x 4 x ng + gr = paddle.matmul(rr.transpose([0, 2, 1]), gg) + if ti_mask is not None: xyz_scatter[ti_mask] += gr else: @@ -699,8 +772,8 @@ def forward( result = result.reshape([nf, nloc, self.filter_neuron[-1] * self.axis_neuron]) rot_mat = rot_mat.reshape([nf, nloc] + list(rot_mat.shape[1:])) # noqa:RUF005 return ( - result.astype(env.GLOBAL_PD_FLOAT_PRECISION), - rot_mat.astype(env.GLOBAL_PD_FLOAT_PRECISION), + result, + rot_mat, None, None, sw, diff --git a/deepmd/pd/model/descriptor/se_atten.py b/deepmd/pd/model/descriptor/se_atten.py new file mode 100644 index 0000000000..1ebf8c6717 --- /dev/null +++ b/deepmd/pd/model/descriptor/se_atten.py @@ -0,0 +1,1073 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +from typing import ( + Callable, + Optional, + Union, +) + +import paddle +import paddle.nn as nn +import paddle.nn.functional as paddle_func + +from deepmd.dpmodel.utils.seed import ( + child_seed, +) +from deepmd.pd.model.descriptor.descriptor import ( + DescriptorBlock, +) +from deepmd.pd.model.descriptor.env_mat import ( + prod_env_mat, +) +from deepmd.pd.model.network.layernorm import ( + LayerNorm, +) +from deepmd.pd.model.network.mlp import ( + EmbeddingNet, + MLPLayer, + NetworkCollection, +) +from deepmd.pd.utils import ( + env, +) +from deepmd.pd.utils.env import ( + DEFAULT_PRECISION, + PRECISION_DICT, +) +from deepmd.pd.utils.env_mat_stat import ( + EnvMatStatSe, +) +from deepmd.pd.utils.exclude_mask import ( + PairExcludeMask, +) +from deepmd.utils.env_mat_stat import ( + StatItem, +) +from deepmd.utils.path import ( + DPPath, +) +from deepmd.utils.version import ( + check_version_compatibility, +) + + +@DescriptorBlock.register("se_atten") +class DescrptBlockSeAtten(DescriptorBlock): + def __init__( + self, + rcut: float, + rcut_smth: float, + sel: Union[list[int], int], + ntypes: int, + neuron: list = [25, 50, 100], + axis_neuron: int = 16, + tebd_dim: int = 8, + tebd_input_mode: str = "concat", + set_davg_zero: bool = True, + attn: int = 128, + attn_layer: int = 2, + attn_dotr: bool = True, + attn_mask: bool = False, + activation_function="tanh", + precision: str = "float64", + resnet_dt: bool = False, + scaling_factor=1.0, + normalize=True, + temperature=None, + smooth: bool = True, + type_one_side: bool = False, + exclude_types: list[tuple[int, int]] = [], + env_protection: float = 0.0, + trainable_ln: bool = True, + ln_eps: Optional[float] = 1e-5, + seed: Optional[Union[int, list[int]]] = None, + type: Optional[str] = None, + ) -> None: + r"""Construct an embedding net of type `se_atten`. + + Parameters + ---------- + rcut : float + The cut-off radius :math:`r_c` + rcut_smth : float + From where the environment matrix should be smoothed :math:`r_s` + sel : list[int], int + list[int]: sel[i] specifies the maxmum number of type i atoms in the cut-off radius + int: the total maxmum number of atoms in the cut-off radius + ntypes : int + Number of element types + neuron : list[int] + Number of neurons in each hidden layers of the embedding net :math:`\mathcal{N}` + axis_neuron : int + Number of the axis neuron :math:`M_2` (number of columns of the sub-matrix of the embedding matrix) + tebd_dim : int + Dimension of the type embedding + tebd_input_mode : str + The input mode of the type embedding. Supported modes are ["concat", "strip"]. + - "concat": Concatenate the type embedding with the smoothed radial information as the union input for the embedding network. + - "strip": Use a separated embedding network for the type embedding and combine the output with the radial embedding network output. + resnet_dt : bool + Time-step `dt` in the resnet construction: + y = x + dt * \phi (Wx + b) + trainable_ln : bool + Whether to use trainable shift and scale weights in layer normalization. + ln_eps : float, Optional + The epsilon value for layer normalization. + type_one_side : bool + If 'False', type embeddings of both neighbor and central atoms are considered. + If 'True', only type embeddings of neighbor atoms are considered. + Default is 'False'. + attn : int + Hidden dimension of the attention vectors + attn_layer : int + Number of attention layers + attn_dotr : bool + If dot the angular gate to the attention weights + attn_mask : bool + (Only support False to keep consistent with other backend references.) + (Not used in this version.) + If mask the diagonal of attention weights + exclude_types : list[list[int]] + The excluded pairs of types which have no interaction with each other. + For example, `[[0, 1]]` means no interaction between type 0 and type 1. + env_protection : float + Protection parameter to prevent division by zero errors during environment matrix calculations. + set_davg_zero : bool + Set the shift of embedding net input to zero. + activation_function : str + The activation function in the embedding net. Supported options are |ACTIVATION_FN| + precision : str + The precision of the embedding net parameters. Supported options are |PRECISION| + scaling_factor : float + The scaling factor of normalization in calculations of attention weights. + If `temperature` is None, the scaling of attention weights is (N_dim * scaling_factor)**0.5 + normalize : bool + Whether to normalize the hidden vectors in attention weights calculation. + temperature : float + If not None, the scaling of attention weights is `temperature` itself. + seed : int, Optional + Random seed for parameter initialization. + """ + super().__init__() + del type + self.rcut = float(rcut) + self.rcut_smth = float(rcut_smth) + self.neuron = neuron + self.filter_neuron = self.neuron + self.axis_neuron = axis_neuron + self.tebd_dim = tebd_dim + self.tebd_input_mode = tebd_input_mode + self.set_davg_zero = set_davg_zero + self.attn_dim = attn + self.attn_layer = attn_layer + self.attn_dotr = attn_dotr + self.attn_mask = attn_mask + self.activation_function = activation_function + self.precision = precision + self.prec = PRECISION_DICT[self.precision] + self.resnet_dt = resnet_dt + self.scaling_factor = scaling_factor + self.normalize = normalize + self.temperature = temperature + self.smooth = smooth + self.type_one_side = type_one_side + self.env_protection = env_protection + self.trainable_ln = trainable_ln + self.seed = seed + # to keep consistent with default value in this backends + if ln_eps is None: + ln_eps = 1e-5 + self.ln_eps = ln_eps + + if isinstance(sel, int): + sel = [sel] + + self.ntypes = ntypes + self.sel = sel + self.sec = self.sel + self.split_sel = self.sel + self.nnei = sum(sel) + self.ndescrpt = self.nnei * 4 + # order matters, placed after the assignment of self.ntypes + self.reinit_exclude(exclude_types) + + self.dpa1_attention = NeighborGatedAttention( + self.attn_layer, + self.nnei, + self.filter_neuron[-1], + self.attn_dim, + dotr=self.attn_dotr, + do_mask=self.attn_mask, + scaling_factor=self.scaling_factor, + normalize=self.normalize, + temperature=self.temperature, + trainable_ln=self.trainable_ln, + ln_eps=self.ln_eps, + smooth=self.smooth, + precision=self.precision, + seed=child_seed(self.seed, 0), + ) + + wanted_shape = (self.ntypes, self.nnei, 4) + mean = paddle.zeros(wanted_shape, dtype=self.prec).to(device=env.DEVICE) + stddev = paddle.ones(wanted_shape, dtype=self.prec).to(device=env.DEVICE) + self.register_buffer("mean", mean) + self.register_buffer("stddev", stddev) + self.tebd_dim_input = self.tebd_dim if self.type_one_side else self.tebd_dim * 2 + if self.tebd_input_mode in ["concat"]: + self.embd_input_dim = 1 + self.tebd_dim_input + else: + self.embd_input_dim = 1 + + self.filter_layers_strip = None + filter_layers = NetworkCollection( + ndim=0, ntypes=self.ntypes, network_type="embedding_network" + ) + filter_layers[0] = EmbeddingNet( + self.embd_input_dim, + self.filter_neuron, + activation_function=self.activation_function, + precision=self.precision, + resnet_dt=self.resnet_dt, + seed=child_seed(self.seed, 1), + ) + self.filter_layers = filter_layers + if self.tebd_input_mode in ["strip"]: + filter_layers_strip = NetworkCollection( + ndim=0, ntypes=self.ntypes, network_type="embedding_network" + ) + filter_layers_strip[0] = EmbeddingNet( + self.tebd_dim_input, + self.filter_neuron, + activation_function=self.activation_function, + precision=self.precision, + resnet_dt=self.resnet_dt, + seed=child_seed(self.seed, 2), + ) + self.filter_layers_strip = filter_layers_strip + self.stats = None + + # add for compression + self.compress = False + self.is_sorted = False + self.compress_info = nn.ParameterList( + [ + self.create_parameter( + [], default_initializer=nn.initializer.Constant(0), dtype=self.prec + ).to("cpu") + ] + ) + self.compress_data = nn.ParameterList( + [ + self.create_parameter( + [], default_initializer=nn.initializer.Constant(0), dtype=self.prec + ).to(env.DEVICE) + ] + ) + + def get_rcut(self) -> float: + """Returns the cut-off radius.""" + return self.rcut + + def get_rcut_smth(self) -> float: + """Returns the radius where the neighbor information starts to smoothly decay to 0.""" + return self.rcut_smth + + def get_nsel(self) -> int: + """Returns the number of selected atoms in the cut-off radius.""" + return sum(self.sel) + + def get_sel(self) -> list[int]: + """Returns the number of selected atoms for each type.""" + return self.sel + + def get_ntypes(self) -> int: + """Returns the number of element types.""" + return self.ntypes + + def get_dim_in(self) -> int: + """Returns the input dimension.""" + return self.dim_in + + def get_dim_out(self) -> int: + """Returns the output dimension.""" + return self.dim_out + + def get_dim_rot_mat_1(self) -> int: + """Returns the first dimension of the rotation matrix. The rotation is of shape dim_1 x 3.""" + return self.filter_neuron[-1] + + def get_dim_emb(self) -> int: + """Returns the output dimension of embedding.""" + return self.filter_neuron[-1] + + def __setitem__(self, key, value) -> None: + if key in ("avg", "data_avg", "davg"): + self.mean = value + elif key in ("std", "data_std", "dstd"): + self.stddev = value + else: + raise KeyError(key) + + def __getitem__(self, key): + if key in ("avg", "data_avg", "davg"): + return self.mean + elif key in ("std", "data_std", "dstd"): + return self.stddev + else: + raise KeyError(key) + + def mixed_types(self) -> bool: + """If true, the descriptor + 1. assumes total number of atoms aligned across frames; + 2. requires a neighbor list that does not distinguish different atomic types. + + If false, the descriptor + 1. assumes total number of atoms of each atom type aligned across frames; + 2. requires a neighbor list that distinguishes different atomic types. + + """ + return True + + def get_env_protection(self) -> float: + """Returns the protection of building environment matrix.""" + return self.env_protection + + @property + def dim_out(self): + """Returns the output dimension of this descriptor.""" + return self.filter_neuron[-1] * self.axis_neuron + + @property + def dim_in(self): + """Returns the atomic input dimension of this descriptor.""" + return self.tebd_dim + + @property + def dim_emb(self): + """Returns the output dimension of embedding.""" + return self.get_dim_emb() + + def compute_input_stats( + self, + merged: Union[Callable[[], list[dict]], list[dict]], + path: Optional[DPPath] = None, + ) -> None: + """ + Compute the input statistics (e.g. mean and stddev) for the descriptors from packed data. + + Parameters + ---------- + merged : Union[Callable[[], list[dict]], list[dict]] + - list[dict]: A list of data samples from various data systems. + Each element, `merged[i]`, is a data dictionary containing `keys`: `paddle.Tensor` + originating from the `i`-th data system. + - Callable[[], list[dict]]: A lazy function that returns data samples in the above format + only when needed. Since the sampling process can be slow and memory-intensive, + the lazy function helps by only sampling once. + path : Optional[DPPath] + The path to the stat file. + + """ + env_mat_stat = EnvMatStatSe(self) + if path is not None: + path = path / env_mat_stat.get_hash() + if path is None or not path.is_dir(): + if callable(merged): + # only get data for once + sampled = merged() + else: + sampled = merged + else: + sampled = [] + env_mat_stat.load_or_compute_stats(sampled, path) + self.stats = env_mat_stat.stats + mean, stddev = env_mat_stat() + if not self.set_davg_zero: + paddle.assign(paddle.to_tensor(mean).to(device=env.DEVICE), self.mean) # pylint: disable=no-explicit-dtype + paddle.assign(paddle.to_tensor(stddev).to(device=env.DEVICE), self.stddev) # pylint: disable=no-explicit-dtype + + def get_stats(self) -> dict[str, StatItem]: + """Get the statistics of the descriptor.""" + if self.stats is None: + raise RuntimeError( + "The statistics of the descriptor has not been computed." + ) + return self.stats + + def reinit_exclude( + self, + exclude_types: list[tuple[int, int]] = [], + ) -> None: + self.exclude_types = exclude_types + self.is_sorted = len(self.exclude_types) == 0 + self.emask = PairExcludeMask(self.ntypes, exclude_types=exclude_types) + + def enable_compression( + self, + table_data, + table_config, + lower, + upper, + ) -> None: + net = "filter_net" + self.compress_info[0] = paddle.to_tensor( + [ + lower[net], + upper[net], + upper[net] * table_config[0], + table_config[1], + table_config[2], + table_config[3], + ], + dtype=self.prec, + place="cpu", + ) + self.compress_data[0] = table_data[net].to(device=env.DEVICE, dtype=self.prec) + self.compress = True + + def forward( + self, + nlist: paddle.Tensor, + extended_coord: paddle.Tensor, + extended_atype: paddle.Tensor, + extended_atype_embd: Optional[paddle.Tensor] = None, + mapping: Optional[paddle.Tensor] = None, + type_embedding: Optional[paddle.Tensor] = None, + ): + """Compute the descriptor. + + Parameters + ---------- + nlist + The neighbor list. shape: nf x nloc x nnei + extended_coord + The extended coordinates of atoms. shape: nf x (nallx3) + extended_atype + The extended aotm types. shape: nf x nall x nt + extended_atype_embd + The extended type embedding of atoms. shape: nf x nall + mapping + The index mapping, not required by this descriptor. + type_embedding + Full type embeddings. shape: (ntypes+1) x nt + Required for stripped type embeddings. + + Returns + ------- + result + The descriptor. shape: nf x nloc x (ng x axis_neuron) + g2 + The rotationally invariant pair-partical representation. + shape: nf x nloc x nnei x ng + h2 + The rotationally equivariant pair-partical representation. + shape: nf x nloc x nnei x 3 + gr + The rotationally equivariant and permutationally invariant single particle + representation. shape: nf x nloc x ng x 3 + sw + The smooth switch function. shape: nf x nloc x nnei + + """ + del mapping + assert extended_atype_embd is not None + nframes, nloc, nnei = nlist.shape + atype = extended_atype[:, :nloc] + nb = nframes + nall = extended_coord.reshape([nb, -1, 3]).shape[1] + dmatrix, diff, sw = prod_env_mat( + extended_coord, + nlist, + atype, + self.mean, + self.stddev, + self.rcut, + self.rcut_smth, + protection=self.env_protection, + ) + # nb x nloc x nnei + exclude_mask = self.emask(nlist, extended_atype) + nlist = paddle.where(exclude_mask != 0, nlist, paddle.full_like(nlist, -1)) + nlist_mask = nlist != -1 + nlist = paddle.where(nlist == -1, paddle.zeros_like(nlist), nlist) + sw = paddle.squeeze(sw, -1) + # nf x nall x nt + nt = extended_atype_embd.shape[-1] + # beyond the cutoff sw should be 0.0 + sw = sw.masked_fill(~nlist_mask, 0.0) + # (nb x nloc) x nnei + exclude_mask = exclude_mask.reshape([nb * nloc, nnei]) + # nfnl x nnei x 4 + dmatrix = dmatrix.reshape([-1, self.nnei, 4]) + nfnl = dmatrix.shape[0] + # nfnl x nnei x 4 + rr = dmatrix + rr = rr * exclude_mask[:, :, None].astype(rr.dtype) + ss = rr[:, :, :1] + if self.tebd_input_mode in ["concat"]: + atype_tebd_ext = extended_atype_embd + # nb x (nloc x nnei) x nt + index = nlist.reshape([nb, nloc * nnei]).unsqueeze(-1).expand([-1, -1, nt]) + # nb x (nloc x nnei) x nt + atype_tebd_nlist = paddle.take_along_axis( + atype_tebd_ext, axis=1, indices=index + ) # j + # nb x nloc x nnei x nt + atype_tebd_nlist = atype_tebd_nlist.reshape([nb, nloc, nnei, nt]) + + # nf x nloc x nt -> nf x nloc x nnei x nt + atype_tebd = extended_atype_embd[:, :nloc, :] + atype_tebd_nnei = atype_tebd.unsqueeze(2).expand( + [-1, -1, self.nnei, -1] + ) # i + + nlist_tebd = atype_tebd_nlist.reshape([nfnl, nnei, self.tebd_dim]) + atype_tebd = atype_tebd_nnei.reshape([nfnl, nnei, self.tebd_dim]) + if not self.type_one_side: + # nfnl x nnei x (1 + tebd_dim * 2) + ss = paddle.concat([ss, nlist_tebd, atype_tebd], axis=2) + else: + # nfnl x nnei x (1 + tebd_dim) + ss = paddle.concat([ss, nlist_tebd], axis=2) + # nfnl x nnei x ng + gg = self.filter_layers.networks[0](ss) + input_r = paddle.nn.functional.normalize( + rr.reshape([-1, self.nnei, 4])[:, :, 1:4], axis=-1 + ) + gg = self.dpa1_attention( + gg, nlist_mask, input_r=input_r, sw=sw + ) # shape is [nframes*nloc, self.neei, out_size] + # nfnl x 4 x ng + xyz_scatter = paddle.matmul(rr.transpose([0, 2, 1]), gg) + elif self.tebd_input_mode in ["strip"]: + assert self.filter_layers_strip is not None + assert type_embedding is not None + ng = self.filter_neuron[-1] + ntypes_with_padding = type_embedding.shape[0] + # nf x (nl x nnei) + nlist_index = nlist.reshape([nb, nloc * nnei]) + # nf x (nl x nnei) + nei_type = paddle.take_along_axis( + extended_atype, indices=nlist_index, axis=1 + ) + # (nf x nl x nnei) x ng + nei_type_index = nei_type.reshape([-1, 1]).expand([-1, ng]).to(paddle.int64) + if self.type_one_side: + tt_full = self.filter_layers_strip.networks[0](type_embedding) + # (nf x nl x nnei) x ng + gg_t = paddle.take_along_axis(tt_full, indices=nei_type_index, axis=0) + else: + idx_i = paddle.tile( + atype.reshape([-1, 1]) * ntypes_with_padding, [1, nnei] + ).reshape([-1]) + idx_j = nei_type.reshape([-1]) + # (nf x nl x nnei) x ng + idx = (idx_i + idx_j).reshape([-1, 1]).expand([-1, ng]).to(paddle.int64) + # (ntypes) * ntypes * nt + type_embedding_nei = paddle.tile( + type_embedding.reshape([1, ntypes_with_padding, nt]), + [ntypes_with_padding, 1, 1], + ) + # ntypes * (ntypes) * nt + type_embedding_center = paddle.tile( + type_embedding.reshape([ntypes_with_padding, 1, nt]), + [1, ntypes_with_padding, 1], + ) + # (ntypes * ntypes) * (nt+nt) + two_side_type_embedding = paddle.concat( + [type_embedding_nei, type_embedding_center], -1 + ).reshape([-1, nt * 2]) + tt_full = self.filter_layers_strip.networks[0](two_side_type_embedding) + # (nf x nl x nnei) x ng + gg_t = paddle.take_along_axis(tt_full, axis=0, indices=idx) + # (nf x nl) x nnei x ng + gg_t = gg_t.reshape([nfnl, nnei, ng]) + if self.smooth: + gg_t = gg_t * sw.reshape([-1, self.nnei, 1]) + if self.compress: + raise NotImplementedError("Compression is not implemented yet.") + else: + # nfnl x nnei x ng + gg_s = self.filter_layers.networks[0](ss) + # nfnl x nnei x ng + gg = gg_s * gg_t + gg_s + input_r = paddle_func.normalize( + rr.reshape([-1, self.nnei, 4])[:, :, 1:4], axis=-1 + ) + gg = self.dpa1_attention( + gg, nlist_mask, input_r=input_r, sw=sw + ) # shape is [nframes*nloc, self.neei, out_size] + # nfnl x 4 x ng + xyz_scatter = paddle.matmul(rr.transpose([0, 2, 1]), gg) + else: + raise NotImplementedError + + xyz_scatter = xyz_scatter / self.nnei + xyz_scatter_1 = xyz_scatter.transpose([0, 2, 1]) + rot_mat = xyz_scatter_1[:, :, 1:4] + xyz_scatter_2 = xyz_scatter[:, :, 0 : self.axis_neuron] + result = paddle.matmul( + xyz_scatter_1, xyz_scatter_2 + ) # shape is [nframes*nloc, self.filter_neuron[-1], self.axis_neuron] + + return ( + result.reshape([nframes, nloc, self.filter_neuron[-1] * self.axis_neuron]), + gg.reshape([nframes, nloc, self.nnei, self.filter_neuron[-1]]) + if not self.compress + else None, + dmatrix.reshape([nframes, nloc, self.nnei, 4])[..., 1:], + rot_mat.reshape([nframes, nloc, self.filter_neuron[-1], 3]), + sw, + ) + + def has_message_passing(self) -> bool: + """Returns whether the descriptor block has message passing.""" + return False + + def need_sorted_nlist_for_lower(self) -> bool: + """Returns whether the descriptor block needs sorted nlist when using `forward_lower`.""" + return False + + +class NeighborGatedAttention(nn.Layer): + def __init__( + self, + layer_num: int, + nnei: int, + embed_dim: int, + hidden_dim: int, + dotr: bool = False, + do_mask: bool = False, + scaling_factor: float = 1.0, + normalize: bool = True, + temperature: Optional[float] = None, + trainable_ln: bool = True, + ln_eps: float = 1e-5, + smooth: bool = True, + precision: str = DEFAULT_PRECISION, + seed: Optional[Union[int, list[int]]] = None, + ) -> None: + """Construct a neighbor-wise attention net.""" + super().__init__() + self.layer_num = layer_num + self.nnei = nnei + self.embed_dim = embed_dim + self.hidden_dim = hidden_dim + self.dotr = dotr + self.do_mask = do_mask + self.scaling_factor = scaling_factor + self.normalize = normalize + self.temperature = temperature + self.trainable_ln = trainable_ln + self.ln_eps = ln_eps + self.smooth = smooth + self.precision = precision + self.seed = seed + self.network_type = NeighborGatedAttentionLayer + attention_layers = [] + for i in range(self.layer_num): + attention_layers.append( + NeighborGatedAttentionLayer( + nnei, + embed_dim, + hidden_dim, + dotr=dotr, + do_mask=do_mask, + scaling_factor=scaling_factor, + normalize=normalize, + temperature=temperature, + trainable_ln=trainable_ln, + ln_eps=ln_eps, + smooth=smooth, + precision=precision, + seed=child_seed(seed, i), + ) + ) + self.attention_layers = nn.LayerList(attention_layers) + + def forward( + self, + input_G, + nei_mask, + input_r: Optional[paddle.Tensor] = None, + sw: Optional[paddle.Tensor] = None, + ): + """Compute the multi-layer gated self-attention. + + Parameters + ---------- + input_G + inputs with shape: (nf x nloc) x nnei x embed_dim. + nei_mask + neighbor mask, with paddings being 0. shape: (nf x nloc) x nnei. + input_r + normalized radial. shape: (nf x nloc) x nnei x 3. + sw + The smooth switch function. shape: nf x nloc x nnei + """ + out = input_G + for layer in self.attention_layers: + out = layer(out, nei_mask, input_r=input_r, sw=sw) + return out + + def __getitem__(self, key): + if isinstance(key, int): + return self.attention_layers[key] + else: + raise TypeError(key) + + def __setitem__(self, key, value) -> None: + if not isinstance(key, int): + raise TypeError(key) + if isinstance(value, self.network_type): + pass + elif isinstance(value, dict): + value = self.network_type.deserialize(value) + else: + raise TypeError(value) + self.attention_layers[key] = value + + def serialize(self) -> dict: + """Serialize the networks to a dict. + + Returns + ------- + dict + The serialized networks. + """ + return { + "@class": "NeighborGatedAttention", + "@version": 1, + "layer_num": self.layer_num, + "nnei": self.nnei, + "embed_dim": self.embed_dim, + "hidden_dim": self.hidden_dim, + "dotr": self.dotr, + "do_mask": self.do_mask, + "scaling_factor": self.scaling_factor, + "normalize": self.normalize, + "temperature": self.temperature, + "trainable_ln": self.trainable_ln, + "ln_eps": self.ln_eps, + "precision": self.precision, + "attention_layers": [layer.serialize() for layer in self.attention_layers], + } + + @classmethod + def deserialize(cls, data: dict) -> "NeighborGatedAttention": + """Deserialize the networks from a dict. + + Parameters + ---------- + data : dict + The dict to deserialize from. + """ + data = data.copy() + check_version_compatibility(data.pop("@version"), 1, 1) + data.pop("@class") + attention_layers = data.pop("attention_layers") + obj = cls(**data) + for ii, network in enumerate(attention_layers): + obj[ii] = network + return obj + + +class NeighborGatedAttentionLayer(nn.Layer): + def __init__( + self, + nnei: int, + embed_dim: int, + hidden_dim: int, + dotr: bool = False, + do_mask: bool = False, + scaling_factor: float = 1.0, + normalize: bool = True, + temperature: Optional[float] = None, + smooth: bool = True, + trainable_ln: bool = True, + ln_eps: float = 1e-5, + precision: str = DEFAULT_PRECISION, + seed: Optional[Union[int, list[int]]] = None, + ) -> None: + """Construct a neighbor-wise attention layer.""" + super().__init__() + self.nnei = nnei + self.embed_dim = embed_dim + self.hidden_dim = hidden_dim + self.dotr = dotr + self.do_mask = do_mask + self.scaling_factor = scaling_factor + self.normalize = normalize + self.temperature = temperature + self.precision = precision + self.trainable_ln = trainable_ln + self.ln_eps = ln_eps + self.seed = seed + self.attention_layer = GatedAttentionLayer( + nnei, + embed_dim, + hidden_dim, + dotr=dotr, + do_mask=do_mask, + scaling_factor=scaling_factor, + normalize=normalize, + temperature=temperature, + smooth=smooth, + precision=precision, + seed=child_seed(seed, 0), + ) + self.attn_layer_norm = LayerNorm( + self.embed_dim, + eps=ln_eps, + trainable=trainable_ln, + precision=precision, + seed=child_seed(seed, 1), + ) + + def forward( + self, + x, + nei_mask, + input_r: Optional[paddle.Tensor] = None, + sw: Optional[paddle.Tensor] = None, + ): + residual = x + x, _ = self.attention_layer(x, nei_mask, input_r=input_r, sw=sw) + x = residual + x + x = self.attn_layer_norm(x) + return x + + def serialize(self) -> dict: + """Serialize the networks to a dict. + + Returns + ------- + dict + The serialized networks. + """ + return { + "nnei": self.nnei, + "embed_dim": self.embed_dim, + "hidden_dim": self.hidden_dim, + "dotr": self.dotr, + "do_mask": self.do_mask, + "scaling_factor": self.scaling_factor, + "normalize": self.normalize, + "temperature": self.temperature, + "trainable_ln": self.trainable_ln, + "ln_eps": self.ln_eps, + "precision": self.precision, + "attention_layer": self.attention_layer.serialize(), + "attn_layer_norm": self.attn_layer_norm.serialize(), + } + + @classmethod + def deserialize(cls, data: dict) -> "NeighborGatedAttentionLayer": + """Deserialize the networks from a dict. + + Parameters + ---------- + data : dict + The dict to deserialize from. + """ + data = data.copy() + attention_layer = data.pop("attention_layer") + attn_layer_norm = data.pop("attn_layer_norm") + obj = cls(**data) + obj.attention_layer = GatedAttentionLayer.deserialize(attention_layer) + obj.attn_layer_norm = LayerNorm.deserialize(attn_layer_norm) + return obj + + +class GatedAttentionLayer(nn.Layer): + def __init__( + self, + nnei: int, + embed_dim: int, + hidden_dim: int, + num_heads: int = 1, + dotr: bool = False, + do_mask: bool = False, + scaling_factor: float = 1.0, + normalize: bool = True, + temperature: Optional[float] = None, + bias: bool = True, + smooth: bool = True, + precision: str = DEFAULT_PRECISION, + seed: Optional[Union[int, list[int]]] = None, + ) -> None: + """Construct a multi-head neighbor-wise attention net.""" + super().__init__() + assert hidden_dim % num_heads == 0, "hidden_dim must be divisible by num_heads" + self.nnei = nnei + self.embed_dim = embed_dim + self.hidden_dim = hidden_dim + self.num_heads = num_heads + self.head_dim = hidden_dim // num_heads + self.dotr = dotr + self.do_mask = do_mask + self.bias = bias + self.smooth = smooth + self.scaling_factor = scaling_factor + self.temperature = temperature + self.precision = precision + self.seed = seed + self.scaling = ( + (self.head_dim * scaling_factor) ** -0.5 + if temperature is None + else temperature + ) + self.normalize = normalize + self.in_proj = MLPLayer( + embed_dim, + hidden_dim * 3, + bias=bias, + use_timestep=False, + bavg=0.0, + stddev=1.0, + precision=precision, + seed=child_seed(seed, 0), + ) + self.out_proj = MLPLayer( + hidden_dim, + embed_dim, + bias=bias, + use_timestep=False, + bavg=0.0, + stddev=1.0, + precision=precision, + seed=child_seed(seed, 1), + ) + + def forward( + self, + query, + nei_mask, + input_r: Optional[paddle.Tensor] = None, + sw: Optional[paddle.Tensor] = None, + attnw_shift: float = 20.0, + ): + """Compute the multi-head gated self-attention. + + Parameters + ---------- + query + inputs with shape: (nf x nloc) x nnei x embed_dim. + nei_mask + neighbor mask, with paddings being 0. shape: (nf x nloc) x nnei. + input_r + normalized radial. shape: (nf x nloc) x nnei x 3. + sw + The smooth switch function. shape: (nf x nloc) x nnei + attnw_shift : float + The attention weight shift to preserve smoothness when doing padding before softmax. + """ + q, k, v = self.in_proj(query).chunk(3, axis=-1) + + # Reshape for multi-head attention: (nf x nloc) x num_heads x nnei x head_dim + q = q.reshape([-1, self.nnei, self.num_heads, self.head_dim]).transpose( + [0, 2, 1, 3] + ) + k = k.reshape([-1, self.nnei, self.num_heads, self.head_dim]).transpose( + [0, 2, 1, 3] + ) + v = v.reshape([-1, self.nnei, self.num_heads, self.head_dim]).transpose( + [0, 2, 1, 3] + ) + + if self.normalize: + q = paddle_func.normalize(q, axis=-1) + k = paddle_func.normalize(k, axis=-1) + v = paddle_func.normalize(v, axis=-1) + + q = q * self.scaling + # (nf x nloc) x num_heads x head_dim x nnei + k = k.transpose([0, 1, 3, 2]) + + # Compute attention scores + # (nf x nloc) x num_heads x nnei x nnei + attn_weights = paddle.matmul(q, k) + # (nf x nloc) x nnei + nei_mask = nei_mask.reshape([-1, self.nnei]) + + if self.smooth: + assert sw is not None + # (nf x nloc) x 1 x nnei + sw = sw.reshape([-1, 1, self.nnei]) + attn_weights = (attn_weights + attnw_shift) * sw[:, :, :, None] * sw[ + :, :, None, : + ] - attnw_shift + else: + # (nf x nloc) x 1 x 1 x nnei + attn_weights = attn_weights.masked_fill( + ~nei_mask.unsqueeze(1).unsqueeze(1), float("-inf") + ) + + attn_weights = paddle_func.softmax(attn_weights, axis=-1) + attn_weights = attn_weights.masked_fill( + ~nei_mask.unsqueeze(1).unsqueeze(-1), 0.0 + ) + if self.smooth: + assert sw is not None + attn_weights = attn_weights * sw[:, :, :, None] * sw[:, :, None, :] + + if self.dotr: + # (nf x nloc) x nnei x 3 + assert input_r is not None, "input_r must be provided when dotr is True!" + # (nf x nloc) x 1 x nnei x nnei + angular_weight = paddle.matmul( + input_r, input_r.transpose([0, 2, 1]) + ).reshape([-1, 1, self.nnei, self.nnei]) + attn_weights = attn_weights * angular_weight + + # Apply attention to values + # (nf x nloc) x nnei x (num_heads x head_dim) + o = ( + paddle.matmul(attn_weights, v) + .transpose([0, 2, 1, 3]) + .reshape([-1, self.nnei, self.hidden_dim]) + ) + output = self.out_proj(o) + return output, attn_weights + + def serialize(self) -> dict: + """Serialize the networks to a dict. + + Returns + ------- + dict + The serialized networks. + """ + return { + "nnei": self.nnei, + "embed_dim": self.embed_dim, + "hidden_dim": self.hidden_dim, + "num_heads": self.num_heads, + "dotr": self.dotr, + "do_mask": self.do_mask, + "scaling_factor": self.scaling_factor, + "normalize": self.normalize, + "temperature": self.temperature, + "bias": self.bias, + "smooth": self.smooth, + "precision": self.precision, + "in_proj": self.in_proj.serialize(), + "out_proj": self.out_proj.serialize(), + } + + @classmethod + def deserialize(cls, data: dict) -> "GatedAttentionLayer": + """Deserialize the networks from a dict. + + Parameters + ---------- + data : dict + The dict to deserialize from. + """ + data = data.copy() + in_proj = data.pop("in_proj") + out_proj = data.pop("out_proj") + obj = cls(**data) + obj.in_proj = MLPLayer.deserialize(in_proj) + obj.out_proj = MLPLayer.deserialize(out_proj) + return obj diff --git a/deepmd/pd/model/model/ener_model.py b/deepmd/pd/model/model/ener_model.py index 3f3db4a527..a5b1b9d4b3 100644 --- a/deepmd/pd/model/model/ener_model.py +++ b/deepmd/pd/model/model/ener_model.py @@ -1,7 +1,4 @@ # SPDX-License-Identifier: LGPL-3.0-or-later -from copy import ( - deepcopy, -) from typing import ( Optional, ) @@ -33,26 +30,26 @@ def __init__( self, *args, **kwargs, - ): + ) -> None: DPModelCommon.__init__(self) DPEnergyModel_.__init__(self, *args, **kwargs) def translated_output_def(self): out_def_data = self.model_output_def().get_data() output_def = { - "atom_energy": deepcopy(out_def_data["energy"]), - "energy": deepcopy(out_def_data["energy_redu"]), + "atom_energy": out_def_data["energy"], + "energy": out_def_data["energy_redu"], } if self.do_grad_r("energy"): - output_def["force"] = deepcopy(out_def_data["energy_derv_r"]) + output_def["force"] = out_def_data["energy_derv_r"] output_def["force"].squeeze(-2) if self.do_grad_c("energy"): - output_def["virial"] = deepcopy(out_def_data["energy_derv_c_redu"]) + output_def["virial"] = out_def_data["energy_derv_c_redu"] output_def["virial"].squeeze(-2) - output_def["atom_virial"] = deepcopy(out_def_data["energy_derv_c"]) + output_def["atom_virial"] = out_def_data["energy_derv_c"] output_def["atom_virial"].squeeze(-3) if "mask" in out_def_data: - output_def["mask"] = deepcopy(out_def_data["mask"]) + output_def["mask"] = out_def_data["mask"] return output_def def forward( diff --git a/deepmd/pd/model/model/make_model.py b/deepmd/pd/model/model/make_model.py index d5c5c6bd41..2b9a4b5bec 100644 --- a/deepmd/pd/model/model/make_model.py +++ b/deepmd/pd/model/model/make_model.py @@ -24,9 +24,6 @@ communicate_extended_output, fit_output_to_model_output, ) -from deepmd.pd.utils import ( - decomp, -) from deepmd.pd.utils.env import ( GLOBAL_PD_ENER_FLOAT_PRECISION, GLOBAL_PD_FLOAT_PRECISION, @@ -72,7 +69,7 @@ def __init__( # underscore to prevent conflict with normal inputs atomic_model_: Optional[T_AtomicModel] = None, **kwargs, - ): + ) -> None: super().__init__(*args, **kwargs) if atomic_model_ is not None: self.atomic_model: T_AtomicModel = atomic_model_ @@ -176,7 +173,9 @@ def forward_common( atype, self.get_rcut(), self.get_sel(), - mixed_types=self.mixed_types(), + # types will be distinguished in the lower interface, + # so it doesn't need to be distinguished here + mixed_types=True, box=bb, ) model_predict_lower = self.forward_common_lower( @@ -411,7 +410,7 @@ def format_nlist( Returns ------- - formatted_nlist + formated_nlist the formatted nlist. """ @@ -459,18 +458,17 @@ def _format_nlist( coord0 = extended_coord[:, :n_nloc, :] # nf x (nloc x nnei) x 3 index = nlist.reshape([n_nf, n_nloc * n_nnei, 1]).expand([-1, -1, 3]) - coord1 = decomp.take_along_axis(extended_coord, axis=1, indices=index) + coord1 = paddle.take_along_axis(extended_coord, axis=1, indices=index) # nf x nloc x nnei x 3 coord1 = coord1.reshape([n_nf, n_nloc, n_nnei, 3]) # nf x nloc x nnei - # rr = paddle.linalg.norm(coord0[:, :, None, :] - coord1, axis=-1) - rr = decomp.norm(coord0[:, :, None, :] - coord1, axis=-1) + rr = paddle.linalg.norm(coord0[:, :, None, :] - coord1, axis=-1) rr = paddle.where(m_real_nei, rr, float("inf")) rr, nlist_mapping = ( paddle.sort(rr, axis=-1), paddle.argsort(rr, axis=-1), ) - nlist = decomp.take_along_axis(nlist, axis=2, indices=nlist_mapping) + nlist = paddle.take_along_axis(nlist, axis=2, indices=nlist_mapping) nlist = paddle.where(rr > rcut, paddle.full_like(nlist, -1), nlist) nlist = nlist[..., :nnei] else: # not extra_nlist_sort and n_nnei <= nnei: diff --git a/deepmd/pd/model/network/layernorm.py b/deepmd/pd/model/network/layernorm.py new file mode 100644 index 0000000000..4d37b208f9 --- /dev/null +++ b/deepmd/pd/model/network/layernorm.py @@ -0,0 +1,165 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +from typing import ( + Optional, + Union, +) + +import numpy as np +import paddle +import paddle.nn as nn + +from deepmd.dpmodel.utils.network import LayerNorm as DPLayerNorm +from deepmd.pd.model.network.init import ( + normal_, + ones_, + zeros_, +) +from deepmd.pd.utils import ( + decomp, + env, +) +from deepmd.pd.utils.env import ( + DEFAULT_PRECISION, + PRECISION_DICT, +) +from deepmd.pd.utils.utils import ( + get_generator, + to_numpy_array, + to_paddle_tensor, +) + +device = env.DEVICE + + +def empty_t(shape, precision): + return paddle.empty(shape, dtype=precision).to(device=device) + + +class LayerNorm(nn.Layer): + def __init__( + self, + num_in, + eps: float = 1e-5, + uni_init: bool = True, + bavg: float = 0.0, + stddev: float = 1.0, + precision: str = DEFAULT_PRECISION, + trainable: bool = True, + seed: Optional[Union[int, list[int]]] = None, + ): + super().__init__() + self.eps = eps + self.uni_init = uni_init + self.num_in = num_in + self.precision = precision + self.prec = PRECISION_DICT[self.precision] + self.matrix = self.create_parameter( + shape=[num_in], + dtype=self.prec, + default_initializer=nn.initializer.Assign( + empty_t((num_in,), self.prec), + ), + ) + self.bias = self.create_parameter( + shape=[num_in], + dtype=self.prec, + default_initializer=nn.initializer.Assign(empty_t([num_in], self.prec)), + ) + random_generator = get_generator(seed) + if self.uni_init: + ones_(self.matrix.data) + zeros_(self.bias.data) + else: + normal_(self.bias.data, mean=bavg, std=stddev, generator=random_generator) + normal_( + self.matrix.data, + std=stddev / np.sqrt(self.num_in), + generator=random_generator, + ) + self.trainable = trainable + if not self.trainable: + self.matrix.stop_gradient = True + self.bias.stop_gradient = True + + def dim_out(self) -> int: + return self.matrix.shape[0] + + def forward( + self, + xx: paddle.Tensor, + ) -> paddle.Tensor: + """One Layer Norm used by DP model. + + Parameters + ---------- + xx : paddle.Tensor + The input of index. + + Returns + ------- + yy: paddle.Tensor + The output. + """ + # if xx.numel() > 0: + if decomp.numel(xx): + variance, mean = ( + paddle.var(xx, axis=-1, unbiased=False, keepdim=True), + paddle.mean(xx, axis=-1, keepdim=True), + ) + yy = (xx - mean) / paddle.sqrt(variance + self.eps) + else: + yy = xx + if self.matrix is not None and self.bias is not None: + yy = yy * self.matrix + self.bias + return yy + + def serialize(self) -> dict: + """Serialize the layer to a dict. + + Returns + ------- + dict + The serialized layer. + """ + nl = DPLayerNorm( + self.matrix.shape[0], + eps=self.eps, + trainable=self.trainable, + precision=self.precision, + ) + nl.w = to_numpy_array(self.matrix) + nl.b = to_numpy_array(self.bias) + data = nl.serialize() + return data + + @classmethod + def deserialize(cls, data: dict) -> "LayerNorm": + """Deserialize the layer from a dict. + + Parameters + ---------- + data : dict + The dict to deserialize from. + """ + nl = DPLayerNorm.deserialize(data) + obj = cls( + nl["matrix"].shape[0], + eps=nl["eps"], + trainable=nl["trainable"], + precision=nl["precision"], + ) + prec = PRECISION_DICT[obj.precision] + + def check_load_param(ss): + if nl[ss] is not None: + tensor = to_paddle_tensor(nl[ss]) + return paddle.create_parameter( + tensor.shape, + dtype=tensor.dtype, + default_initializer=nn.initializer.Assign(tensor), + ) + return None + + obj.matrix = check_load_param("matrix") + obj.bias = check_load_param("bias") + return obj diff --git a/deepmd/pd/model/network/network.py b/deepmd/pd/model/network/network.py index f118c234ab..1974e526a0 100644 --- a/deepmd/pd/model/network/network.py +++ b/deepmd/pd/model/network/network.py @@ -45,7 +45,7 @@ def __init__( use_econf_tebd=False, use_tebd_bias: bool = False, type_map=None, - ): + ) -> None: """Construct a type embedding net.""" super().__init__() self.type_nums = type_nums @@ -80,11 +80,28 @@ def forward(self, atype): """ return self.embedding(atype.place)[atype] - def share_params(self, base_class, shared_level, resume=False): + def get_full_embedding(self, device: str): + """ + Get the type embeddings of all types. + + Parameters + ---------- + device : str + The device on which to perform the computation. + + Returns + ------- + type_embedding : paddle.Tensor + The full type embeddings of all types. The last index corresponds to the zero padding. + Shape: (ntypes + 1) x tebd_dim + """ + return self.embedding(device) + + def share_params(self, base_class, shared_level, resume=False) -> None: """ Share the parameters of self to the base_class with shared_level during multitask training. If not start from checkpoint (resume is False), - some seperated parameters (e.g. mean and stddev) will be re-calculated across different classes. + some separated parameters (e.g. mean and stddev) will be re-calculated across different classes. """ assert ( self.__class__ == base_class.__class__ @@ -148,7 +165,7 @@ def __init__( use_econf_tebd: bool = False, use_tebd_bias: bool = False, type_map: Optional[list[str]] = None, - ): + ) -> None: """Construct a type embedding net.""" super().__init__() self.ntypes = ntypes diff --git a/deepmd/pd/model/task/fitting.py b/deepmd/pd/model/task/fitting.py index 375cf834cc..d9db44aff5 100644 --- a/deepmd/pd/model/task/fitting.py +++ b/deepmd/pd/model/task/fitting.py @@ -1,5 +1,4 @@ # SPDX-License-Identifier: LGPL-3.0-or-later -import copy import logging from abc import ( abstractmethod, @@ -55,7 +54,7 @@ def __new__(cls, *args, **kwargs): return BaseFitting.__new__(BaseFitting, *args, **kwargs) return super().__new__(cls) - def share_params(self, base_class, shared_level, resume=False): + def share_params(self, base_class, shared_level, resume=False) -> None: """ Share the parameters of self to the base_class with shared_level during multitask training. If not start from checkpoint (resume is False), @@ -65,14 +64,7 @@ def share_params(self, base_class, shared_level, resume=False): self.__class__ == base_class.__class__ ), "Only fitting nets of the same type can share params!" if shared_level == 0: - # link buffers - if hasattr(self, "bias_atom_e"): - self.bias_atom_e = base_class.bias_atom_e - # the following will successfully link all the params except buffers, which need manually link. - for item in self._sub_layers: - self._sub_layers[item] = base_class._sub_layers[item] - elif shared_level == 1: - # only not share the bias_atom_e + # only not share the bias_atom_e and the case_embd # the following will successfully link all the params except buffers, which need manually link. for item in self._sub_layers: self._sub_layers[item] = base_class._sub_layers[item] @@ -104,7 +96,6 @@ class GeneralFitting(Fitting): numb_aparam : int Number of atomic parameters. dim_case_embd : int - (Not supported yet) Dimension of case specific embedding. activation_function : str Activation function. @@ -155,7 +146,7 @@ def __init__( type_map: Optional[list[str]] = None, use_aparam_as_mask: bool = False, **kwargs, - ): + ) -> None: super().__init__() self.var_name = var_name self.ntypes = ntypes @@ -166,9 +157,6 @@ def __init__( self.numb_fparam = numb_fparam self.numb_aparam = numb_aparam self.dim_case_embd = dim_case_embd - if dim_case_embd > 0: - raise ValueError("dim_case_embd is not supported yet in PaddlePaddle.") - self.case_embd = None self.activation_function = activation_function self.precision = precision self.prec = PRECISION_DICT[self.precision] @@ -189,7 +177,9 @@ def __init__( # init constants if bias_atom_e is None: bias_atom_e = np.zeros([self.ntypes, net_dim_out], dtype=np.float64) - bias_atom_e = paddle.to_tensor(bias_atom_e, dtype=self.prec).to(device=device) + bias_atom_e = paddle.to_tensor( + bias_atom_e, dtype=env.GLOBAL_PD_FLOAT_PRECISION, place=device + ) bias_atom_e = bias_atom_e.reshape([self.ntypes, net_dim_out]) if not self.mixed_types: assert self.ntypes == bias_atom_e.shape[0], "Element count mismatches!" @@ -218,10 +208,20 @@ def __init__( else: self.aparam_avg, self.aparam_inv_std = None, None + if self.dim_case_embd > 0: + self.register_buffer( + "case_embd", + paddle.zeros(self.dim_case_embd, dtype=self.prec, place=device), + # paddle.eye(self.dim_case_embd, dtype=self.prec, place=device)[0], + ) + else: + self.case_embd = None + in_dim = ( self.dim_descrpt + self.numb_fparam + (0 if self.use_aparam_as_mask else self.numb_aparam) + + self.dim_case_embd ) self.filter_layers = NetworkCollection( @@ -249,7 +249,7 @@ def __init__( def reinit_exclude( self, exclude_types: list[int] = [], - ): + ) -> None: self.exclude_types = exclude_types self.emask = AtomExcludeMask(self.ntypes, self.exclude_types) @@ -299,7 +299,7 @@ def serialize(self) -> dict: "exclude_types": self.exclude_types, "@variables": { "bias_atom_e": to_numpy_array(self.bias_atom_e), - "case_embd": None, + "case_embd": to_numpy_array(self.case_embd), "fparam_avg": to_numpy_array(self.fparam_avg), "fparam_inv_std": to_numpy_array(self.fparam_inv_std), "aparam_avg": to_numpy_array(self.aparam_avg), @@ -321,7 +321,7 @@ def serialize(self) -> dict: @classmethod def deserialize(cls, data: dict) -> "GeneralFitting": - data = copy.deepcopy(data) + data = data.copy() variables = data.pop("@variables") nets = data.pop("nets") obj = cls(**data) @@ -364,9 +364,11 @@ def set_case_embd(self, case_idx: int): Set the case embedding of this fitting net by the given case_idx, typically concatenated with the output of the descriptor and fed into the fitting net. """ - raise NotImplementedError("set_case_embd is not supported yet in PaddlePaddle.") + self.case_embd = paddle.eye(self.dim_case_embd, dtype=self.prec).to(device)[ + case_idx + ] - def __setitem__(self, key, value): + def __setitem__(self, key, value) -> None: if key in ["bias_atom_e"]: value = value.reshape([self.ntypes, self._net_out_dim()]) self.bias_atom_e = value @@ -424,7 +426,11 @@ def _forward_common( fparam: Optional[paddle.Tensor] = None, aparam: Optional[paddle.Tensor] = None, ): - xx = descriptor + # cast the input to internal precsion + xx = descriptor.to(self.prec) + fparam = fparam.to(self.prec) if fparam is not None else None + aparam = aparam.to(self.prec) if aparam is not None else None + if self.remove_vaccum_contribution is not None: # TODO: compute the input for vaccm when remove_vaccum_contribution is set # Ideally, the input for vacuum should be computed; @@ -492,15 +498,30 @@ def _forward_common( axis=-1, ) + if self.dim_case_embd > 0: + assert self.case_embd is not None + case_embd = paddle.tile(self.case_embd.reshape([1, 1, -1]), [nf, nloc, 1]) + xx = paddle.concat( + [xx, case_embd], + axis=-1, + ) + if xx_zeros is not None: + xx_zeros = paddle.concat( + [xx_zeros, case_embd], + axis=-1, + ) + outs = paddle.zeros( (nf, nloc, net_dim_out), dtype=env.GLOBAL_PD_FLOAT_PRECISION, - ).to(device=descriptor.place) # jit assertion + ).to(device=descriptor.place) if self.mixed_types: atom_property = self.filter_layers.networks[0](xx) + self.bias_atom_e[atype] if xx_zeros is not None: atom_property -= self.filter_layers.networks[0](xx_zeros) - outs = outs + atom_property # Shape is [nframes, natoms[0], net_dim_out] + outs = ( + outs + atom_property + self.bias_atom_e[atype].to(self.prec) + ) # Shape is [nframes, natoms[0], net_dim_out] else: for type_i, ll in enumerate(self.filter_layers.networks): mask = (atype == type_i).unsqueeze(-1) @@ -516,12 +537,12 @@ def _forward_common( ): atom_property -= ll(xx_zeros) atom_property = atom_property + self.bias_atom_e[type_i] - atom_property = atom_property * mask.astype(atom_property.dtype) + atom_property = paddle.where(mask, atom_property, 0.0) outs = ( outs + atom_property ) # Shape is [nframes, natoms[0], net_dim_out] # nf x nloc - mask = self.emask(atype) + mask = self.emask(atype).to("bool") # nf x nloc x nod - outs = outs * mask[:, :, None].astype(outs.dtype) + outs = paddle.where(mask[:, :, None], outs, 0.0) return {self.var_name: outs.astype(env.GLOBAL_PD_FLOAT_PRECISION)} diff --git a/deepmd/pd/train/training.py b/deepmd/pd/train/training.py index 17d369751f..65e35a1c4b 100644 --- a/deepmd/pd/train/training.py +++ b/deepmd/pd/train/training.py @@ -3,9 +3,6 @@ import functools import logging import time -from contextlib import ( - contextmanager, -) from copy import ( deepcopy, ) @@ -53,7 +50,7 @@ ) from deepmd.pd.utils.dataloader import ( BufferedIterator, - get_weighted_sampler, + get_sampler_from_params, ) from deepmd.pd.utils.env import ( DEVICE, @@ -66,6 +63,7 @@ make_stat_input, ) from deepmd.pd.utils.utils import ( + nvprof_context, to_numpy_array, ) from deepmd.utils.data import ( @@ -87,6 +85,7 @@ def format_training_message( wall_time: float, eta: Optional[int] = None, ): + """Format a training message.""" msg = f"batch {batch:7d}: " f"total wall time = {wall_time:.2f} s" if isinstance(eta, int): msg += f", eta = {datetime.timedelta(seconds=int(eta))!s}" @@ -107,7 +106,7 @@ def __init__( shared_links=None, finetune_links=None, init_frz_model=None, - ): + ) -> None: """Construct a DeePMD trainer. Args: @@ -169,19 +168,7 @@ def get_opt_param(params): def get_data_loader(_training_data, _validation_data, _training_params): def get_dataloader_and_buffer(_data, _params): - if "auto_prob" in _training_params["training_data"]: - _sampler = get_weighted_sampler( - _data, _params["training_data"]["auto_prob"] - ) - elif "sys_probs" in _training_params["training_data"]: - _sampler = get_weighted_sampler( - _data, - _params["training_data"]["sys_probs"], - sys_prob=True, - ) - else: - _sampler = get_weighted_sampler(_data, "prob_sys_size") - + _sampler = get_sampler_from_params(_data, _params) if _sampler is None: log.warning( "Sampler not specified!" @@ -202,14 +189,16 @@ def get_dataloader_and_buffer(_data, _params): return _dataloader, _data_buffered training_dataloader, training_data_buffered = get_dataloader_and_buffer( - _training_data, _training_params + _training_data, _training_params["training_data"] ) if _validation_data is not None: ( validation_dataloader, validation_data_buffered, - ) = get_dataloader_and_buffer(_validation_data, _training_params) + ) = get_dataloader_and_buffer( + _validation_data, _training_params["validation_data"] + ) valid_numb_batch = _training_params["validation_data"].get( "numb_btch", 1 ) @@ -284,7 +273,7 @@ def get_lr(lr_params): self.opt_type, self.opt_param = get_opt_param(training_params) # Model - self.model = get_model_for_wrapper(model_params) + self.model = get_model_for_wrapper(model_params, resuming=resuming) # Loss if not self.multi_task: @@ -496,7 +485,7 @@ def collect_single_finetune_params( _new_state_dict, _origin_state_dict, _random_state_dict, - ): + ) -> None: _new_fitting = _finetune_rule_single.get_random_fitting() _model_key_from = _finetune_rule_single.get_model_branch() target_keys = [ @@ -669,10 +658,11 @@ def run(self): core.nvprof_start() core.nvprof_enable_record_event() - def step(_step_id, task_key="Default"): + def step(_step_id, task_key="Default") -> None: # Paddle Profiler if enable_profiling: core.nvprof_nvtx_push(f"Training step {_step_id}") + self.wrapper.train() if isinstance(self.lr_exp, dict): _lr = self.lr_exp[task_key] @@ -707,20 +697,17 @@ def step(_step_id, task_key="Default"): if self.gradient_max_norm > 0.0: with nvprof_context(enable_profiling, "Gradient clip"): - grad_norm = paddle.nn.utils.clip_grad_norm_( - self.wrapper.parameters(), self.gradient_max_norm + paddle.nn.utils.clip_grad_norm_( + self.wrapper.parameters(), + self.gradient_max_norm, + error_if_nonfinite=True, ) - if not paddle.isfinite(grad_norm).all(): - # check local gradnorm single GPU case, trigger NanDetector - raise FloatingPointError("gradients are Nan/Inf") with nvprof_context(enable_profiling, "Adam update"): self.optimizer.step() self.scheduler.step() - if enable_profiling: - core.nvprof_nvtx_pop() else: raise ValueError(f"Not supported optimizer type '{self.opt_type}'") @@ -729,7 +716,7 @@ def step(_step_id, task_key="Default"): if self.display_in_training and ( display_step_id % self.disp_freq == 0 or display_step_id == 1 ): - self.wrapper.eval() + self.wrapper.eval() # Will set to train mode before fininshing validation def log_loss_train(_loss, _more_loss, _task_key="Default"): results = {} @@ -835,6 +822,7 @@ def log_loss_valid(_task_key="Default"): learning_rate=None, ) ) + self.wrapper.train() current_time = time.time() train_time = current_time - self.t0 @@ -888,12 +876,16 @@ def log_loss_valid(_task_key="Default"): display_step_id % self.tensorboard_freq == 0 or display_step_id == 1 ): writer.add_scalar(f"{task_key}/lr", cur_lr, display_step_id) - writer.add_scalar(f"{task_key}/loss", loss, display_step_id) + writer.add_scalar(f"{task_key}/loss", loss.item(), display_step_id) for item in more_loss: writer.add_scalar( - f"{task_key}/{item}", more_loss[item].item(), _step_id + f"{task_key}/{item}", more_loss[item].item(), display_step_id ) + if enable_profiling: + core.nvprof_nvtx_pop() + + self.wrapper.train() self.t0 = time.time() self.total_train_time = 0.0 for step_id in range(self.num_steps): @@ -989,7 +981,7 @@ def log_loss_valid(_task_key="Default"): "files, which can be viewd in NVIDIA Nsight Systems software" ) - def save_model(self, save_path, lr=0.0, step=0): + def save_model(self, save_path, lr=0.0, step=0) -> None: module = ( self.wrapper.module if dist.is_available() and dist.is_initialized() @@ -1085,7 +1077,7 @@ def get_data(self, is_train=True, task_key="Default"): log_dict["sid"] = batch_data["sid"] return input_dict, label_dict, log_dict - def print_header(self, fout, train_results, valid_results): + def print_header(self, fout, train_results, valid_results) -> None: train_keys = sorted(train_results.keys()) print_str = "" print_str += "# {:5s}".format("step") @@ -1116,7 +1108,9 @@ def print_header(self, fout, train_results, valid_results): fout.write(print_str) fout.flush() - def print_on_training(self, fout, step_id, cur_lr, train_results, valid_results): + def print_on_training( + self, fout, step_id, cur_lr, train_results, valid_results + ) -> None: train_keys = sorted(train_results.keys()) print_str = "" print_str += f"{step_id:7d}" @@ -1191,7 +1185,7 @@ def get_single_model( return model -def get_model_for_wrapper(_model_params): +def get_model_for_wrapper(_model_params, resuming=False): if "model_dict" not in _model_params: _model = get_single_model( _model_params, @@ -1199,13 +1193,41 @@ def get_model_for_wrapper(_model_params): else: _model = {} model_keys = list(_model_params["model_dict"]) + do_case_embd, case_embd_index = get_case_embd_config(_model_params) for _model_key in model_keys: _model[_model_key] = get_single_model( _model_params["model_dict"][_model_key], ) + if do_case_embd and not resuming: + # only set case_embd when from scratch multitask training + _model[_model_key].set_case_embd(case_embd_index[_model_key]) return _model +def get_case_embd_config(_model_params): + assert ( + "model_dict" in _model_params + ), "Only support setting case embedding for multi-task model!" + model_keys = list(_model_params["model_dict"]) + sorted_model_keys = sorted(model_keys) + numb_case_embd_list = [ + _model_params["model_dict"][model_key] + .get("fitting_net", {}) + .get("dim_case_embd", 0) + for model_key in sorted_model_keys + ] + if not all(item == numb_case_embd_list[0] for item in numb_case_embd_list): + raise ValueError( + f"All models must have the same dimension of case embedding, while the settings are: {numb_case_embd_list}" + ) + if numb_case_embd_list[0] == 0: + return False, {} + case_embd_index = { + model_key: idx for idx, model_key in enumerate(sorted_model_keys) + } + return True, case_embd_index + + def model_change_out_bias( _model, _sample_func, @@ -1225,16 +1247,3 @@ def model_change_out_bias( f"to {to_numpy_array(new_bias).reshape(-1)!s}." ) return _model - - -@contextmanager -def nvprof_context(enable_profiler: bool, name: str): - if enable_profiler: - core.nvprof_nvtx_push(name) - - try: - yield - - finally: - if enable_profiler: - core.nvprof_nvtx_pop() diff --git a/deepmd/pd/train/wrapper.py b/deepmd/pd/train/wrapper.py index c3643f8372..2263a6e9b9 100644 --- a/deepmd/pd/train/wrapper.py +++ b/deepmd/pd/train/wrapper.py @@ -26,7 +26,7 @@ def __init__( loss: paddle.nn.Layer | dict = None, model_params=None, shared_links=None, - ): + ) -> None: """Construct a DeePMD model wrapper. Args: @@ -64,7 +64,7 @@ def __init__( self.loss[task_key] = loss[task_key] self.inference_only = self.loss is None - def share_params(self, shared_links, resume=False): + def share_params(self, shared_links, resume=False) -> None: """ Share the parameters of classes following rules defined in shared_links during multitask training. If not start from checkpoint (resume is False), @@ -111,8 +111,10 @@ def share_params(self, shared_links, resume=False): f"Shared params of {model_key_base}.{class_type_base} and {model_key_link}.{class_type_link}!" ) else: - if hasattr(self.model[model_key_base], class_type_base): - base_class = self.model[model_key_base].__getattr__(class_type_base) + if hasattr(self.model[model_key_base].atomic_model, class_type_base): + base_class = self.model[model_key_base].atomic_model.__getattr__( + class_type_base + ) for link_item in shared_links[shared_item]["links"][1:]: class_type_link = link_item["shared_type"] model_key_link = link_item["model_key"] @@ -123,9 +125,9 @@ def share_params(self, shared_links, resume=False): assert ( class_type_base == class_type_link ), f"Class type mismatched: {class_type_base} vs {class_type_link}!" - link_class = self.model[model_key_link].__getattr__( - class_type_link - ) + link_class = self.model[ + model_key_link + ].atomic_model.__getattr__(class_type_link) link_class.share_params( base_class, shared_level_link, resume=resume ) diff --git a/deepmd/pd/utils/dataloader.py b/deepmd/pd/utils/dataloader.py index 7a2bf4fe9c..9d59ea0da7 100644 --- a/deepmd/pd/utils/dataloader.py +++ b/deepmd/pd/utils/dataloader.py @@ -183,6 +183,7 @@ def __next__(self): return next(self.item) self.iters = [] + for item in self.dataloaders: self.iters.append(LazyIter(item)) @@ -196,7 +197,7 @@ def set_noise(self, noise_settings): for system in self.systems: system.set_noise(noise_settings) - def __len__(self): + def __len__(self) -> int: return len(self.dataloaders) def __getitem__(self, idx): @@ -219,19 +220,21 @@ def print_summary( name: str, prob: list[float], ): - print_summary( - name, - len(self.systems), - [ss.system for ss in self.systems], - [ss._natoms for ss in self.systems], - self.batch_sizes, - [ - ss._data_system.get_sys_numb_batch(self.batch_sizes[ii]) - for ii, ss in enumerate(self.systems) - ], - prob, - [ss._data_system.pbc for ss in self.systems], - ) + rank = dist.get_rank() if dist.is_initialized() else 0 + if rank == 0: + print_summary( + name, + len(self.systems), + [ss.system for ss in self.systems], + [ss._natoms for ss in self.systems], + self.batch_sizes, + [ + ss._data_system.get_sys_numb_batch(self.batch_sizes[ii]) + for ii, ss in enumerate(self.systems) + ], + prob, + [ss._data_system.pbc for ss in self.systems], + ) _sentinel = object() @@ -239,13 +242,13 @@ def print_summary( class BackgroundConsumer(Thread): - def __init__(self, queue, source, max_len): + def __init__(self, queue, source, max_len) -> None: Thread.__init__(self) self._queue = queue self._source = source # Main DL iterator self._max_len = max_len # - def run(self): + def run(self) -> None: for item in self._source: self._queue.put(item) # Blocking if the queue is full @@ -254,7 +257,7 @@ def run(self): class BufferedIterator: - def __init__(self, iterable): + def __init__(self, iterable) -> None: self._queue = queue.Queue(QUEUESIZE) self._iterable = iterable self._consumer = None @@ -263,7 +266,7 @@ def __init__(self, iterable): self.warning_time = None self.total = len(iterable) - def _create_consumer(self): + def _create_consumer(self) -> None: self._consumer = BackgroundConsumer(self._queue, self._iterable, self.total) self._consumer.daemon = True self._consumer.start() @@ -271,7 +274,7 @@ def _create_consumer(self): def __iter__(self): return self - def __len__(self): + def __len__(self) -> int: return self.total def __next__(self): @@ -337,3 +340,19 @@ def get_weighted_sampler(training_data, prob_style, sys_prob=False): len_sampler = training_data.total_batch * max(env.NUM_WORKERS, 1) sampler = WeightedRandomSampler(probs, len_sampler, replacement=True) return sampler + + +def get_sampler_from_params(_data, _params): + if ( + "sys_probs" in _params and _params["sys_probs"] is not None + ): # use sys_probs first + _sampler = get_weighted_sampler( + _data, + _params["sys_probs"], + sys_prob=True, + ) + elif "auto_prob" in _params: + _sampler = get_weighted_sampler(_data, _params["auto_prob"]) + else: + _sampler = get_weighted_sampler(_data, "prob_sys_size") + return _sampler diff --git a/deepmd/pd/utils/decomp.py b/deepmd/pd/utils/decomp.py index 272c2deacb..3b7bddbcd1 100644 --- a/deepmd/pd/utils/decomp.py +++ b/deepmd/pd/utils/decomp.py @@ -10,100 +10,17 @@ annotations, ) +import numpy as np import paddle __all__ = [ "masked_add_", - "norm", + "numel", "scatter_reduce", "sec", - "softmax", - "take_along_axis", ] -# decomposition for forward function -def softmax_decomp(x: paddle.Tensor, axis: int = -1) -> paddle.Tensor: - """Forward decompsition function of softmax. - - Parameters - ---------- - x : paddle.Tensor - Input. - axis : int, defaults: -1. - A dimension along which softmax will be computed. - - Returns - ------- - paddle.Tensor - Computed output. - """ - x_max = paddle.max(x, axis=axis, keepdim=True) - x = x - x_max - return paddle.exp(x) / paddle.sum(paddle.exp(x), axis=axis, keepdim=True) - - -def norm_decomp( - x: paddle.Tensor, p: float = 2, axis: bool = -1, keepdim: bool = False -) -> paddle.Tensor: - """Forward decompsition function of norm. - - Parameters - ---------- - x : paddle.Tensor - Input - p : float, default: 2 - Order of norm - axis : bool, default: -1 - Dimensions over which to compute the vector or matrix norm - keepdim : bool, default: False - If set to True, the reduced dimensions are retained in the result as dimensions - with size one - - Returns - ------- - paddle.Tensor - A real-valued tensor, even when A is complex. - """ - if p == 2 or p == 2.0: - # clip for negative indexing, or 1/(0^(k-1)) will cause inf in backward - return (x * x).sum(axis=axis, keepdim=keepdim) ** 0.5 - return (x.abs() ** p).sum(axis=axis, keepdim=keepdim) ** (1 / p) - - -def take_along_axis_decomp( - x: paddle.Tensor, indices: paddle.Tensor, axis: int, broadcast: bool = True -) -> paddle.Tensor: - """Forward decompsition function of take_along_axis. - - Parameters - ---------- - x : paddle.Tensor - The input tensor. - indices : paddle.Tensor - Indices to take along each 1d slice of array. - axis : int - The axis to take 1d slices along. - broadcast : bool, default: True - Whether the indices broadcast. - - Returns - ------- - paddle.Tensor - Computed output. - """ - # manually contruct indices for gather_nd(ind_gather_nd.ndim == indices.ndim + 1, - # the lsat 1 represents the number of dimension(s) of indices) - ind_gather_nd = paddle.stack( - paddle.meshgrid(*[paddle.arange(v) for v in indices.shape], indexing="ij"), - axis=-1, - ) - ind_gather_nd[..., axis] = indices - # compute output using constructed indices via gather_nd - out = paddle.gather_nd(x, ind_gather_nd) - return out - - def scatter_reduce_decomp( input: paddle.Tensor, axis: int, @@ -210,38 +127,13 @@ def masked_add__decomp( return x -def normalize_decomp( - x: paddle.Tensor, - p: float = 2, - axis: int = 1, - epsilon: float = 1e-12, -) -> paddle.Tensor: - """Forward decompsition function of normalize. - - Parameters - ---------- - x : paddle.Tensor - Input tensor. - p : float, optional - Order of the norm, default: 2 - axis : int, optional - Axis on which to perform normalization, default: 1 - epsilon : float, optional - Epislon value, default: 1e-12 +def numel(x: paddle.Tensor) -> int: + if paddle.in_dynamic_mode(): + return np.prod(x.shape) - Returns - ------- - paddle.Tensor - Computed output. - """ - return paddle.nn.functional.normalize(x, p, axis, epsilon) - # return x / norm(x, p=p, axis=axis, keepdim=True) + return paddle.numel(x) # alias for decomposed functions for convinience -normalize = normalize_decomp masked_add_ = masked_add__decomp scatter_reduce = scatter_reduce_decomp -take_along_axis = take_along_axis_decomp -norm = norm_decomp -softmax = softmax_decomp diff --git a/deepmd/pd/utils/env.py b/deepmd/pd/utils/env.py index 6dbdc69f30..041c231282 100644 --- a/deepmd/pd/utils/env.py +++ b/deepmd/pd/utils/env.py @@ -77,13 +77,75 @@ def enable_prim(enable: bool = True): + # NOTE: operator in list below will not use composite + # operator but kernel instead + EAGER_COMP_OP_BLACK_LIST = [ + "abs_grad", + "cast_grad", + # "concat_grad", + "cos_double_grad", + "cos_grad", + "cumprod_grad", + "cumsum_grad", + "dropout_grad", + "erf_grad", + "exp_grad", + "expand_grad", + "floor_grad", + "gather_grad", + "gather_nd_grad", + "gelu_grad", + "group_norm_grad", + "instance_norm_grad", + "layer_norm_grad", + "leaky_relu_grad", + "log_grad", + "max_grad", + "pad_grad", + "pow_double_grad", + "pow_grad", + "prod_grad", + "relu_grad", + "roll_grad", + "rsqrt_grad", + "scatter_grad", + "scatter_nd_add_grad", + "sigmoid_grad", + "silu_grad", + "sin_double_grad", + "sin_grad", + "slice_grad", + # "split_grad", + "sqrt_grad", + "stack_grad", + "sum_grad", + "tanh_double_grad", + "tanh_grad", + "topk_grad", + "transpose_grad", + "add_double_grad", + "add_grad", + "assign_grad", + "batch_norm_grad", + "divide_grad", + "elementwise_pow_grad", + "maximum_grad", + "min_grad", + "minimum_grad", + "multiply_grad", + "subtract_grad", + "tile_grad", + ] + EAGER_COMP_OP_BLACK_LIST = list(set(EAGER_COMP_OP_BLACK_LIST)) + """Enable running program in primitive C++ API in eager/static mode.""" from paddle.framework import ( core, ) core.set_prim_eager_enabled(enable) - core._set_prim_all_enabled(enable) + if enable: + paddle.framework.core._set_prim_backward_blacklist(*EAGER_COMP_OP_BLACK_LIST) log = logging.getLogger(__name__) log.info(f"{'Enable' if enable else 'Disable'} prim in eager and static mode.") diff --git a/deepmd/pd/utils/exclude_mask.py b/deepmd/pd/utils/exclude_mask.py index 088ac186a8..29c9cc3501 100644 --- a/deepmd/pd/utils/exclude_mask.py +++ b/deepmd/pd/utils/exclude_mask.py @@ -3,9 +3,6 @@ import numpy as np import paddle -from deepmd.pd.utils import ( - decomp, -) from deepmd.pd.utils.utils import ( to_paddle_tensor, ) @@ -18,7 +15,7 @@ def __init__( self, ntypes: int, exclude_types: list[int] = [], - ): + ) -> None: super().__init__() self.reinit(ntypes, exclude_types) @@ -26,7 +23,7 @@ def reinit( self, ntypes: int, exclude_types: list[int] = [], - ): + ) -> None: self.ntypes = ntypes self.exclude_types = exclude_types self.type_mask = np.array( @@ -71,7 +68,7 @@ def __init__( self, ntypes: int, exclude_types: list[tuple[int, int]] = [], - ): + ) -> None: super().__init__() self.reinit(ntypes, exclude_types) @@ -79,7 +76,7 @@ def reinit( self, ntypes: int, exclude_types: list[tuple[int, int]] = [], - ): + ) -> None: self.ntypes = ntypes self._exclude_types: set[tuple[int, int]] = set() for tt in exclude_types: @@ -137,19 +134,14 @@ def forward( [ atype_ext, self.ntypes - * paddle.ones([nf, 1], dtype=atype_ext.dtype).to( - device=atype_ext.place - ), + * paddle.ones([nf, 1], dtype=atype_ext.dtype).to(atype_ext.place), ], axis=-1, ) type_i = atype_ext[:, :nloc].reshape([nf, nloc]) * (self.ntypes + 1) # nf x nloc x nnei index = paddle.where(nlist == -1, nall, nlist).reshape([nf, nloc * nnei]) - # type_j = paddle.take_along_axis(ae, axis=1, indices=index).reshape( - # [nf, nloc, nnei] - # ) - type_j = decomp.take_along_axis(ae, axis=1, indices=index).reshape( + type_j = paddle.take_along_axis(ae, axis=1, indices=index).reshape( [nf, nloc, nnei] ) type_ij = type_i[:, :, None] + type_j diff --git a/deepmd/pd/utils/nlist.py b/deepmd/pd/utils/nlist.py index 44924ce07d..ae9db628a1 100644 --- a/deepmd/pd/utils/nlist.py +++ b/deepmd/pd/utils/nlist.py @@ -7,7 +7,6 @@ import paddle from deepmd.pd.utils import ( - decomp, env, ) from deepmd.pd.utils.region import ( @@ -118,8 +117,7 @@ def build_neighbor_list( if paddle.in_dynamic_mode(): assert list(diff.shape) == [batch_size, nloc, nall, 3] # nloc x nall - # rr = paddle.linalg.norm(diff, axis=-1) - rr = decomp.norm(diff, axis=-1) + rr = paddle.linalg.norm(diff, axis=-1) # if central atom has two zero distances, sorting sometimes can not exclude itself rr = rr - paddle.eye(nloc, nall, dtype=rr.dtype).to(device=rr.place).unsqueeze(0) rr, nlist = paddle.sort(rr, axis=-1), paddle.argsort(rr, axis=-1) @@ -267,8 +265,7 @@ def build_directional_neighbor_list( if paddle.in_dynamic_mode(): assert list(diff.shape) == [batch_size, nloc_cntl, nall_neig, 3] # nloc x nall - # rr = paddle.linalg.norm(diff, axis=-1) - rr = decomp.norm(diff, axis=-1) + rr = paddle.linalg.norm(diff, axis=-1) rr, nlist = paddle.sort(rr, axis=-1), paddle.argsort(rr, axis=-1) # We assume that the central and neighbor atoms are diffferent, @@ -300,12 +297,7 @@ def nlist_distinguish_types( tmp_atype = paddle.tile(atype.unsqueeze(1), [1, nloc, 1]) mask = nlist == -1 # nloc x s(nsel) - # tnlist = paddle.take_along_axis( - # tmp_atype, - # axis=2, - # indices=nlist.masked_fill(mask, 0), - # ) - tnlist = decomp.take_along_axis( + tnlist = paddle.take_along_axis( tmp_atype, axis=2, indices=nlist.masked_fill(mask, 0), @@ -322,8 +314,7 @@ def nlist_distinguish_types( paddle.argsort(pick_mask, axis=-1, descending=True, stable=True), ) # nloc x s(nsel) - # inlist = paddle.take_along_axis(nlist, axis=2, indices=imap) - inlist = decomp.take_along_axis(nlist, axis=2, indices=imap) + inlist = paddle.take_along_axis(nlist, axis=2, indices=imap) inlist = inlist.masked_fill(~(pick_mask.to(paddle.bool)), -1) # nloc x nsel[ii] ret_nlist.append(paddle.split(inlist, [ss, snsel - ss], axis=-1)[0]) @@ -404,17 +395,13 @@ def build_multiple_neighbor_list( .expand([-1, -1, 3]) ) # nb x nloc x nsel x 3 - # coord2 = paddle.take_along_axis(coord1, axis=1, index=index).reshape( - # [nb, nloc, nsel, 3] - # ) - coord2 = decomp.take_along_axis(coord1, axis=1, indices=index).reshape( + coord2 = paddle.take_along_axis(coord1, axis=1, indices=index).reshape( [nb, nloc, nsel, 3] ) # nb x nloc x nsel x 3 diff = coord2 - coord0[:, :, None, :] # nb x nloc x nsel - # rr = paddle.linalg.norm(diff, axis=-1) - rr = decomp.norm(diff, axis=-1) + rr = paddle.linalg.norm(diff, axis=-1) rr.masked_fill(nlist_mask, float("inf")) nlist0 = nlist ret = {} @@ -516,8 +503,7 @@ def extend_coord_with_ghosts( xyz = xyz.reshape([-1, 3]) # xyz = xyz.to(device=device) # ns x 3 - # shift_idx = xyz[paddle.argsort(paddle.norm(xyz, axis=1))] - shift_idx = xyz[paddle.argsort(decomp.norm(xyz, axis=1))] + shift_idx = xyz[paddle.argsort(paddle.norm(xyz, axis=1))] ns, _ = shift_idx.shape nall = ns * nloc # nf x ns x 3 diff --git a/deepmd/pd/utils/region.py b/deepmd/pd/utils/region.py index 21927e3619..f3e3eaa52d 100644 --- a/deepmd/pd/utils/region.py +++ b/deepmd/pd/utils/region.py @@ -1,10 +1,6 @@ # SPDX-License-Identifier: LGPL-3.0-or-later import paddle -from deepmd.pd.utils import ( - decomp, -) - def phys2inter( coord: paddle.Tensor, @@ -82,14 +78,11 @@ def to_face_distance( def b_to_face_distance(cell): volume = paddle.linalg.det(cell) c_yz = paddle.cross(cell[:, 1], cell[:, 2], axis=-1) - # _h2yz = volume / paddle.linalg.norm(c_yz, axis=-1) - _h2yz = volume / decomp.norm(c_yz, axis=-1) + _h2yz = volume / paddle.linalg.norm(c_yz, axis=-1) c_zx = paddle.cross(cell[:, 2], cell[:, 0], axis=-1) - # _h2zx = volume / paddle.linalg.norm(c_zx, axis=-1) - _h2zx = volume / decomp.norm(c_zx, axis=-1) + _h2zx = volume / paddle.linalg.norm(c_zx, axis=-1) c_xy = paddle.cross(cell[:, 0], cell[:, 1], axis=-1) - # _h2xy = volume / paddle.linalg.norm(c_xy, axis=-1) - _h2xy = volume / decomp.norm(c_xy, axis=-1) + _h2xy = volume / paddle.linalg.norm(c_xy, axis=-1) return paddle.stack([_h2yz, _h2zx, _h2xy], axis=1) diff --git a/deepmd/pd/utils/utils.py b/deepmd/pd/utils/utils.py index 48732ff84e..87072eb3cd 100644 --- a/deepmd/pd/utils/utils.py +++ b/deepmd/pd/utils/utils.py @@ -3,6 +3,9 @@ annotations, ) +from contextlib import ( + contextmanager, +) from typing import ( TYPE_CHECKING, overload, @@ -12,6 +15,9 @@ import numpy as np import paddle import paddle.nn.functional as F +from paddle.framework import ( + core, +) from deepmd.dpmodel.common import PRECISION_DICT as NP_PRECISION_DICT @@ -177,3 +183,16 @@ def get_generator( return generator else: return None + + +@contextmanager +def nvprof_context(enable_profiler: bool, name: str): + if enable_profiler: + core.nvprof_nvtx_push(name) + + try: + yield + + finally: + if enable_profiler: + core.nvprof_nvtx_pop() diff --git a/source/tests/consistent/descriptor/test_dpa1.py b/source/tests/consistent/descriptor/test_dpa1.py index 8be219f5ea..92b2c6bd0b 100644 --- a/source/tests/consistent/descriptor/test_dpa1.py +++ b/source/tests/consistent/descriptor/test_dpa1.py @@ -18,6 +18,7 @@ from ..common import ( INSTALLED_ARRAY_API_STRICT, INSTALLED_JAX, + INSTALLED_PD, INSTALLED_PT, INSTALLED_TF, CommonTest, @@ -39,6 +40,10 @@ from deepmd.jax.descriptor.dpa1 import DescrptDPA1 as DescriptorDPA1JAX else: DescriptorDPA1JAX = None +if INSTALLED_PD: + from deepmd.pd.model.descriptor.dpa1 import DescrptDPA1 as DescrptDPA1PD +else: + DescrptDPA1PD = None if INSTALLED_ARRAY_API_STRICT: from ...array_api_strict.descriptor.dpa1 import DescrptDPA1 as DescriptorDPA1Strict else: @@ -187,6 +192,34 @@ def skip_dp(self) -> bool: temperature, ) + @property + def skip_pd(self) -> bool: + ( + tebd_dim, + tebd_input_mode, + resnet_dt, + type_one_side, + attn, + attn_layer, + attn_dotr, + excluded_types, + env_protection, + set_davg_zero, + scaling_factor, + normalize, + temperature, + ln_eps, + smooth_type_embedding, + concat_output_tebd, + precision, + use_econf_tebd, + use_tebd_bias, + ) = self.param + return not INSTALLED_PD or self.is_meaningless_zero_attention_layer_tests( + attn_layer, + temperature, + ) + @property def skip_jax(self) -> bool: ( @@ -287,6 +320,7 @@ def skip_tf(self) -> bool: tf_class = DescrptDPA1TF dp_class = DescrptDPA1DP pt_class = DescrptDPA1PT + pd_class = DescrptDPA1PD jax_class = DescriptorDPA1JAX array_api_strict_class = DescriptorDPA1Strict @@ -387,6 +421,16 @@ def eval_jax(self, jax_obj: Any) -> Any: mixed_types=True, ) + def eval_pd(self, pd_obj: Any) -> Any: + return self.eval_pd_descriptor( + pd_obj, + self.natoms, + self.coords, + self.atype, + self.box, + mixed_types=True, + ) + def eval_array_api_strict(self, array_api_strict_obj: Any) -> Any: return self.eval_array_api_strict_descriptor( array_api_strict_obj, diff --git a/source/tests/consistent/model/test_dpa1.py b/source/tests/consistent/model/test_dpa1.py index 774c624ac7..8b8fab7ae1 100644 --- a/source/tests/consistent/model/test_dpa1.py +++ b/source/tests/consistent/model/test_dpa1.py @@ -14,6 +14,7 @@ from ..common import ( INSTALLED_JAX, + INSTALLED_PD, INSTALLED_PT, INSTALLED_TF, SKIP_FLAG, @@ -37,6 +38,11 @@ model_args, ) +if INSTALLED_PD: + from deepmd.pd.model.model import get_model as get_model_pd + from deepmd.pd.model.model.ener_model import EnergyModel as EnergyModelPD +else: + EnergyModelPD = None if INSTALLED_JAX: from deepmd.jax.model.ener_model import EnergyModel as EnergyModelJAX from deepmd.jax.model.model import get_model as get_model_jax @@ -90,6 +96,7 @@ def data(self) -> dict: tf_class = EnergyModelTF dp_class = EnergyModelDP pt_class = EnergyModelPT + pd_class = EnergyModelPD jax_class = EnergyModelJAX args = model_args() @@ -102,6 +109,8 @@ def get_reference_backend(self): return self.RefBackend.PT if not self.skip_tf: return self.RefBackend.TF + if not self.skip_pd: + return self.RefBackend.PD if not self.skip_jax: return self.RefBackend.JAX if not self.skip_dp: @@ -119,6 +128,8 @@ def pass_data_to_cls(self, cls, data) -> Any: return get_model_dp(data) elif cls is EnergyModelPT: return get_model_pt(data) + elif cls is EnergyModelPD: + return get_model_pd(data) elif cls is EnergyModelJAX: return get_model_jax(data) return cls(**data, **self.additional_data) @@ -190,6 +201,15 @@ def eval_pt(self, pt_obj: Any) -> Any: self.box, ) + def eval_pd(self, pd_obj: Any) -> Any: + return self.eval_pd_model( + pd_obj, + self.natoms, + self.coords, + self.atype, + self.box, + ) + def eval_jax(self, jax_obj: Any) -> Any: return self.eval_jax_model( jax_obj, @@ -225,6 +245,14 @@ def extract_ret(self, ret: Any, backend) -> tuple[np.ndarray, ...]: ret[3].ravel(), ret[4].ravel(), ) + elif backend is self.RefBackend.PD: + return ( + ret["energy"].flatten(), + ret["atom_energy"].flatten(), + ret["force"].flatten(), + ret["virial"].flatten(), + ret["atom_virial"].flatten(), + ) elif backend is self.RefBackend.JAX: return ( ret["energy_redu"].ravel(), diff --git a/source/tests/pd/common.py b/source/tests/pd/common.py index 59a9672330..d73544c5f1 100644 --- a/source/tests/pd/common.py +++ b/source/tests/pd/common.py @@ -1,4 +1,5 @@ # SPDX-License-Identifier: LGPL-3.0-or-later +import pathlib from typing import ( Optional, Union, @@ -7,6 +8,7 @@ import numpy as np import paddle +from deepmd.common import j_loader as dp_j_loader from deepmd.main import ( main, ) @@ -15,6 +17,12 @@ GLOBAL_PD_FLOAT_PRECISION, ) +tests_path = pathlib.Path(__file__).parent.absolute() + + +def j_loader(filename): + return dp_j_loader(tests_path / filename) + def run_dp(cmd: str) -> int: """Run DP directly from the entry point instead of the subprocess. diff --git a/source/tests/pd/model/models/dpa1.json b/source/tests/pd/model/models/dpa1.json new file mode 100644 index 0000000000..a969c290ae --- /dev/null +++ b/source/tests/pd/model/models/dpa1.json @@ -0,0 +1,36 @@ +{ + "type_map": [ + "O", + "H" + ], + "descriptor": { + "type": "se_atten", + "sel": 30, + "rcut_smth": 2.0, + "rcut": 6.0, + "neuron": [ + 2, + 4, + 8 + ], + "axis_neuron": 4, + "attn": 5, + "attn_layer": 2, + "attn_dotr": true, + "attn_mask": false, + "activation_function": "tanh", + "scaling_factor": 1.0, + "normalize": true, + "temperature": 1.0, + "seed": 1 + }, + "fitting_net": { + "neuron": [ + 240, + 240, + 240 + ], + "resnet_dt": true, + "seed": 1 + } +} diff --git a/source/tests/pd/model/models/dpa1.pd b/source/tests/pd/model/models/dpa1.pd new file mode 100644 index 0000000000000000000000000000000000000000..147312635cda5afb1065ddcb61b66e90bebe5f3f GIT binary patch literal 11329 zcmeHNc{r5&+m~#mMbU;R**X;}OG({YEhDX3juVl|D2$j2Au6OKYOEi3Z)ahbMl_w^}fF|9cTHy=X(F}{PE0OpX+(%x9VqrEk5H)B zslNQ*E~NSj+FFcVa-B-x@uY5^WKQ*_?xrddCJ}fEQUpG4Dv95h--6;~NBreB-lJ=% zJ{QNXnCv@Ai)v_SsQ;C7d^-_%C{%NwP40qYaZQ{jGBA;Wi406+U?Kw(8Te;2K;=wg zUjYY-g$3~xmBdF9{AIe6ru_SCpY=bdbsvnyIPp1=hhH)<(FYTKFwqAS>tI3${;zf5 z+qBlUurpg}YDS?DZ7Eh{Tcv+}GqpBzBHBABsr);nWMyk=XHPz+WMf9Lw{qk%=Y?O~DNIi*=q;sqUQ z(wwikW#5HCyXOz-Q&N%t4r*^MW8N`i58FdHd!fv-I`{wLqBblLQxywHp z#e#%>3c1Zdc5|cftz)@J+OqN%`!y3n4g?E??TLXW3$%K5Q<5R>-G!ug7g(^_cFJ}& z{cN~#cQns2yaqmMCOj705dgaqkM^navS95Z;*+rBHQ+a>yD&^47gCIH!*>B;-f|6`9#QMZ2Qqdr5z~UT`Qr@k$E)@feRGb&Qss>2k zykTo>KBQhG#~#siLwuhSjaE-^yX)+_xNs&kX~sEyG)scp!>UBt=6Wbddq0b&k&LZm zLz01CBHZ<8%Sf@xhA)J=p(GtTK6?93tvQkglSY5kJtM(HSGtLWoNEqv1{HbV`I-UA zorwYIvu;74;n7P@XR46LRRzpfGM*gP||;p+Y> zkn7lVyGuL^6V9jUU7<5z!!@0ID$dmyozu7XidzoUiLRbuW>kiOibpRb6|iB-2}X$J zx**65>z1aK6d^NsqfbAp9*xu5Gu)90eI6{SW{rI4wOqlk@+kv7buc&k8B?U9sN zM-Idf4cl(zPk{E^c{Uq*@-gVz>#ZV?3s2Y@(yyQbdMnQiK3J2A-&A75UsnlpD>DZ! z8ZndeU@*&PvtyI~Km_GEKaC}xkd|0d1l#B=k7G(*XmDB2p(6DS1bHoH?T$!=F50Sr zD1}RqU{}4VCHf1bL?_)K1vg-al=NU?Tq7F4x_S1(54E`eibHq!VFnb{7~8H*A4RFi zxkFQpyP!5OkpHGxEkqq&Vs*Uo3w)!s@z2vDpQMbVe*6TXG4<1)#KThbR=L;;{`52YduV)h5+1Q_0cEDIP1xozAJL_B1;LY?NgN~c!pimn*H*qu% zLO-5PP?}eYVl@A@AB?Mj&*^lu(b}gV=tkfNVo0wYxyIdEe!=k%+7B0V{_baQ{sih|As+j*fO5D--;UGJU80gvQ!WySdLx zYm;OFb^yuJjP(-x*B6b5SJorZ-%VC7r~|eexI4`5=)zXDKAP^#55V_cHa{ya78Ybw zYY=BQj;)S0e<(iNv59JGOSZT9o!R-{f&Ja#|E<5(|9oKMo@3?T^c)@^ z(ADN^I=DQK2p-NV#-O+mcY);;D4jz&AiVe%q)ywDvH1Z5FJ&7Sd!&(}-I142oluX5 zPUuXP>nX=NF-cF!f?A+h2Q&{-uc3ba$r%Fb$qYZwELTk&tO+Z@V?FJ$5-%QNhsGlB z_kyeO0;TA(%(Z+NnJ4Mg{4g5^DAn`4K53#@LA;RNjCv3`XM8Q%CkAz|ZzpUwPr>E& zIadqc6yf1R>}+9KI`R`wY|wbsgXbdSAj~HXB<|aWnQr6cCx33@4H*_nZCku2tu+_v z-7l5O2Fk$CL-K<UPIyO&usj$@eLJE^>5;*qnQMUb;KVTz#?Y_4X&QjhMz8WfOrWtAu9WO}T~D zhhCJ)DAd5r&%1@sEiH%eAX`IUNd}Y#zjS*Xk_78?=x+wHGLXuW}UpTy)!HE{l}PDBEDuYzc*qkIThcp^*ru;pbY${?`bmI#KzWyGqfAE}5cM3|Umc|1I+r5s$W8rGJ5sl<8P4EFlXXhjMBw8+@A_t1h$ z-Y&a(5Gtm~cgchc(B?c$9MKSbg;r$gHJ>8;!DIM&zz&O#xMJ4(Q%t$HxK5Dj&%7Q0 z%RoM}O5^3&uo%~K{Fc_A^c?Y?;TK-NG*B|P^LRI@xqgU-=K?0SJRjEL{6 z3Y<|1Goqhl#azn5>np^(Q$DjmNwn`rg$>nEJn0-sIwu+Vb#LlyrPpF%M@EY5;e326 zL>XRDRtd-A?}ys+6(UW=i|r*94symN^*d%ig<{b|S=}Xc*dMd?qP|WgG+aycovKp` zWcw3A6;YY^aAtzheCPnSlgqXZfiXDaQoot8*=+h0{g)1J$1%}-5wq+HigJnb2G4cssny7-tmOAVmTOH$ij&Je(fAk!`4Q!lz z{dKo7cIP+Gp?K8FnnJYybyvfdNI6ZmKmN=1#=rJCzPq0>i@-bP=tVe=zHij8*!XDz zoSY_nlz)9PTxU1<|NKH9@}?i6T;;36`yU4P_3V2FGK)@&OwX!C)v9SDv@ZoC zHO1p%aXo?ZhBCM-`K~m6paLD#UG|1%24kpe=d6(F0TGV)7CBRc}&BB%_bbFdIzl?bw zY~1-KtG3hObb#Q4M#ZPtZ)sq1JCy;0TeIr3-9qr}&(=QY^jY8@YcjlBgFUudqc8L%oGzr{qiLt)w-EaNAj6eP z-(ko%#&FRyYtKm*$?vdDQT{=3SsPfWMqZD-Ukcf4=C?b!^#4J|tG>^e@7D)U9!{HX zk+$7B)R_shl@7S;wp7Am;j%_klT?TzxhKxr*a8B}R3B(9po5f`>tn*sWOz*%DA@X> z8W+Zlmf55?;cEqP0mt=~5PT&_HFQlO=ziQTe`s+VNVvbahT_fObwE$C*ZmF_^%D}0 zd6vNbq(!~@N9*Bslo(VUOMx3-dcp<6+kkd6Z)o`s`Ka!!Fw(fX5%0I{ZWtt&pacIv z!PI@PpvBR4vTuGPbg&y`E2Q%9qW>vJJE4p4>}o_$C(k|HuytwGV%ca=-j~-tO(+)7 z(r`gZPd4tK<8E|5sR3sUQI;=yLnds8c`>OzfzGMjIhnI{>Mzx?d zXP3*G@q8?drT=?%m`;e#e^h)F{y=)avo?QMea0`_KPNxqbNtWf&-k1t1n5r)(D-Hh zXB0?c&$o9lPudeL_L6s+*_+uAIcCU^^P#Ox<-EyBn&dm%l4wh`H=__OOgaArIgss5 zRj8!7T;P>d(n2oqDk@2q3%r_2TE+!drIJ>0fz_y_wOn9zDoL9QtU)E|aDmrQNrqhD zwN#Rkujp@BuH?}Ci5t%0d4wCzA$p7(&S83j8_uEX$PMRkJ_28`dywnoCoW#o1qGCM*z1+l-qRfgZJ)(J~ zxdoMa$@xX8dby=JC7FpuMTwPDdU)cCQj_!Zic5-0lS`&dp3=h#Ryd`HJud~KWbzbm zhSn+0j7d}4rvy#W@MiR8^5$rplEK=;no?3(kP6bolx8udvjZZ}+{0)y#m~>r>pu{H z32%myDM_8q4V^KPhXiXs*l#_y>n3mDqWylN+Lvz4|7d^Naj}Sn^R@jtih*0dEIzdV zUEH=y^#Skqcjbpqb2+}j{<7MAdx5y)``^^^{#d>K`2M=wl`}7gy|aJyz{>d0&Nuee z$teyJybtzoiFIG}koCv@S@WmKOn>oU|LsFl{Uv8_us3r?Uej^vNf|u8Hz@J=gfKl literal 0 HcmV?d00001 diff --git a/source/tests/pd/model/test_atomic_model_atomic_stat.py b/source/tests/pd/model/test_atomic_model_atomic_stat.py new file mode 100644 index 0000000000..93aa7b8905 --- /dev/null +++ b/source/tests/pd/model/test_atomic_model_atomic_stat.py @@ -0,0 +1,431 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import tempfile +import unittest +from pathlib import ( + Path, +) +from typing import ( + Optional, +) + +import h5py +import numpy as np +import paddle + +from deepmd.dpmodel.output_def import ( + FittingOutputDef, + OutputVariableDef, +) +from deepmd.pd.model.atomic_model import ( + BaseAtomicModel, + DPAtomicModel, +) +from deepmd.pd.model.descriptor.dpa1 import ( + DescrptDPA1, +) +from deepmd.pd.model.task.base_fitting import ( + BaseFitting, +) +from deepmd.pd.utils import ( + env, +) +from deepmd.pd.utils.utils import ( + to_numpy_array, + to_paddle_tensor, +) +from deepmd.utils.path import ( + DPPath, +) + +from .test_env_mat import ( + TestCaseSingleFrameWithNlist, +) + +dtype = env.GLOBAL_PD_FLOAT_PRECISION + + +class FooFitting(paddle.nn.Layer, BaseFitting): + def output_def(self): + return FittingOutputDef( + [ + OutputVariableDef( + "foo", + [1], + reducible=True, + r_differentiable=True, + c_differentiable=True, + ), + OutputVariableDef( + "bar", + [1, 2], + reducible=True, + r_differentiable=True, + c_differentiable=True, + ), + ] + ) + + def serialize(self) -> dict: + raise NotImplementedError + + def change_type_map( + self, type_map: list[str], model_with_new_type_stat=None + ) -> None: + raise NotImplementedError + + def get_type_map(self) -> list[str]: + raise NotImplementedError + + def forward( + self, + descriptor: paddle.Tensor, + atype: paddle.Tensor, + gr: Optional[paddle.Tensor] = None, + g2: Optional[paddle.Tensor] = None, + h2: Optional[paddle.Tensor] = None, + fparam: Optional[paddle.Tensor] = None, + aparam: Optional[paddle.Tensor] = None, + ): + nf, nloc, _ = descriptor.shape + ret = {} + ret["foo"] = ( + paddle.to_tensor( + [ + [1.0, 2.0, 3.0], + [4.0, 5.0, 6.0], + ] + ) + .reshape([nf, nloc, *self.output_def()["foo"].shape]) + .to(env.GLOBAL_PD_FLOAT_PRECISION) + .to(env.DEVICE) + ) + ret["bar"] = ( + paddle.to_tensor( + [ + [1.0, 2.0, 3.0, 7.0, 8.0, 9.0], + [4.0, 5.0, 6.0, 10.0, 11.0, 12.0], + ] + ) + .reshape([nf, nloc, *self.output_def()["bar"].shape]) + .to(env.GLOBAL_PD_FLOAT_PRECISION) + .to(env.DEVICE) + ) + return ret + + +class TestAtomicModelStat(unittest.TestCase, TestCaseSingleFrameWithNlist): + def tearDown(self): + self.tempdir.cleanup() + + def setUp(self): + TestCaseSingleFrameWithNlist.setUp(self) + self.merged_output_stat = [ + { + "coord": to_paddle_tensor(np.zeros([2, 3, 3])), + "atype": to_paddle_tensor( + np.array([[0, 0, 1], [0, 1, 1]], dtype=np.int32) + ), + "atype_ext": to_paddle_tensor( + np.array([[0, 0, 1, 0], [0, 1, 1, 0]], dtype=np.int32) + ), + "box": to_paddle_tensor(np.zeros([2, 3, 3])), + "natoms": to_paddle_tensor( + np.array([[3, 3, 2, 1], [3, 3, 1, 2]], dtype=np.int32) + ), + # bias of foo: 5, 6 + "atom_foo": to_paddle_tensor( + np.array([[5.0, 5.0, 5.0], [5.0, 6.0, 7.0]]).reshape(2, 3, 1) + ), + # bias of bar: [1, 5], [3, 2] + "bar": to_paddle_tensor( + np.array([5.0, 12.0, 7.0, 9.0]).reshape(2, 1, 2) + ), + "find_atom_foo": np.float32(1.0), + "find_bar": np.float32(1.0), + }, + { + "coord": to_paddle_tensor(np.zeros([2, 3, 3])), + "atype": to_paddle_tensor( + np.array([[0, 0, 1], [0, 1, 1]], dtype=np.int32) + ), + "atype_ext": to_paddle_tensor( + np.array([[0, 0, 1, 0], [0, 1, 1, 0]], dtype=np.int32) + ), + "box": to_paddle_tensor(np.zeros([2, 3, 3])), + "natoms": to_paddle_tensor( + np.array([[3, 3, 2, 1], [3, 3, 1, 2]], dtype=np.int32) + ), + # bias of foo: 5, 6 from atomic label. + "foo": to_paddle_tensor(np.array([5.0, 7.0]).reshape(2, 1)), + # bias of bar: [1, 5], [3, 2] + "bar": to_paddle_tensor( + np.array([5.0, 12.0, 7.0, 9.0]).reshape(2, 1, 2) + ), + "find_foo": np.float32(1.0), + "find_bar": np.float32(1.0), + }, + ] + self.tempdir = tempfile.TemporaryDirectory() + h5file = str((Path(self.tempdir.name) / "testcase.h5").resolve()) + with h5py.File(h5file, "w") as f: + pass + self.stat_file_path = DPPath(h5file, "a") + + def test_output_stat(self): + nf, nloc, nnei = self.nlist.shape + ds = DescrptDPA1( + self.rcut, + self.rcut_smth, + sum(self.sel), + self.nt, + ).to(env.DEVICE) + ft = FooFitting().to(env.DEVICE) + type_map = ["foo", "bar"] + md0 = DPAtomicModel( + ds, + ft, + type_map=type_map, + ).to(env.DEVICE) + args = [ + to_paddle_tensor(ii) for ii in [self.coord_ext, self.atype_ext, self.nlist] + ] + # nf x nloc + at = self.atype_ext[:, :nloc] + + def cvt_ret(x): + return {kk: to_numpy_array(vv) for kk, vv in x.items()} + + # 1. test run without bias + # nf x na x odim + ret0 = md0.forward_common_atomic(*args) + ret0 = cvt_ret(ret0) + expected_ret0 = {} + expected_ret0["foo"] = np.array( + [ + [1.0, 2.0, 3.0], + [4.0, 5.0, 6.0], + ] + ).reshape([nf, nloc, *md0.fitting_output_def()["foo"].shape]) + expected_ret0["bar"] = np.array( + [ + [1.0, 2.0, 3.0, 7.0, 8.0, 9.0], + [4.0, 5.0, 6.0, 10.0, 11.0, 12.0], + ] + ).reshape([nf, nloc, *md0.fitting_output_def()["bar"].shape]) + for kk in ["foo", "bar"]: + np.testing.assert_almost_equal(ret0[kk], expected_ret0[kk]) + + # 2. test bias is applied + md0.compute_or_load_out_stat( + self.merged_output_stat, stat_file_path=self.stat_file_path + ) + ret1 = md0.forward_common_atomic(*args) + expected_std = np.ones( + (2, 2, 2), dtype=np.float64 + ) # 2 keys, 2 atypes, 2 max dims. + expected_std[0, :, :1] = np.array([0.0, 0.816496]).reshape( + 2, 1 + ) # updating std for foo based on [5.0, 5.0, 5.0], [5.0, 6.0, 7.0]] + np.testing.assert_almost_equal( + to_numpy_array(md0.out_std), expected_std, decimal=4 + ) + ret1 = cvt_ret(ret1) + # nt x odim + foo_bias = np.array([5.0, 6.0]).reshape(2, 1) + bar_bias = np.array([1.0, 5.0, 3.0, 2.0]).reshape(2, 1, 2) + expected_ret1 = {} + expected_ret1["foo"] = ret0["foo"] + foo_bias[at] + expected_ret1["bar"] = ret0["bar"] + bar_bias[at] + for kk in ["foo", "bar"]: + np.testing.assert_almost_equal(ret1[kk], expected_ret1[kk]) + + # 3. test bias load from file + def raise_error(): + raise RuntimeError + + md0.compute_or_load_out_stat(raise_error, stat_file_path=self.stat_file_path) + ret2 = md0.forward_common_atomic(*args) + ret2 = cvt_ret(ret2) + for kk in ["foo", "bar"]: + np.testing.assert_almost_equal(ret1[kk], ret2[kk]) + np.testing.assert_almost_equal( + to_numpy_array(md0.out_std), expected_std, decimal=4 + ) + + # 4. test change bias + BaseAtomicModel.change_out_bias( + md0, self.merged_output_stat, bias_adjust_mode="change-by-statistic" + ) + args = [ + to_paddle_tensor(ii) + for ii in [ + self.coord_ext, + to_numpy_array(self.merged_output_stat[0]["atype_ext"]), + self.nlist, + ] + ] + ret3 = md0.forward_common_atomic(*args) + ret3 = cvt_ret(ret3) + expected_std[0, :, :1] = np.array([1.24722, 0.47140]).reshape( + 2, 1 + ) # updating std for foo based on [4.0, 3.0, 2.0], [1.0, 1.0, 1.0]] + expected_ret3 = {} + # new bias [2.666, 1.333] + expected_ret3["foo"] = np.array( + [[3.6667, 4.6667, 4.3333], [6.6667, 6.3333, 7.3333]] + ).reshape(2, 3, 1) + for kk in ["foo"]: + np.testing.assert_almost_equal(ret3[kk], expected_ret3[kk], decimal=4) + np.testing.assert_almost_equal( + to_numpy_array(md0.out_std), expected_std, decimal=4 + ) + + +class TestAtomicModelStatMergeGlobalAtomic( + unittest.TestCase, TestCaseSingleFrameWithNlist +): + def tearDown(self): + self.tempdir.cleanup() + + def setUp(self): + TestCaseSingleFrameWithNlist.setUp(self) + self.merged_output_stat = [ + { + "coord": to_paddle_tensor(np.zeros([2, 3, 3])), + "atype": to_paddle_tensor( + np.array([[0, 0, 0], [0, 0, 0]], dtype=np.int32) + ), + "atype_ext": to_paddle_tensor( + np.array([[0, 0, 1, 0], [0, 1, 1, 0]], dtype=np.int32) + ), + "box": to_paddle_tensor(np.zeros([2, 3, 3])), + "natoms": to_paddle_tensor( + np.array([[3, 3, 2, 1], [3, 3, 1, 2]], dtype=np.int32) + ), + # bias of foo: 5.5, nan + "atom_foo": to_paddle_tensor( + np.array([[5.0, 5.0, 5.0], [5.0, 6.0, 7.0]]).reshape(2, 3, 1) + ), + # bias of bar: [1, 5], [3, 2] + "bar": to_paddle_tensor( + np.array([5.0, 12.0, 7.0, 9.0]).reshape(2, 1, 2) + ), + "find_atom_foo": np.float32(1.0), + "find_bar": np.float32(1.0), + }, + { + "coord": to_paddle_tensor(np.zeros([2, 3, 3])), + "atype": to_paddle_tensor( + np.array([[0, 0, 1], [0, 1, 1]], dtype=np.int32) + ), + "atype_ext": to_paddle_tensor( + np.array([[0, 0, 1, 0], [0, 1, 1, 0]], dtype=np.int32) + ), + "box": to_paddle_tensor(np.zeros([2, 3, 3])), + "natoms": to_paddle_tensor( + np.array([[3, 3, 2, 1], [3, 3, 1, 2]], dtype=np.int32) + ), + # bias of foo: 5.5, 3 from atomic label. + "foo": to_paddle_tensor(np.array([5.0, 7.0]).reshape(2, 1)), + # bias of bar: [1, 5], [3, 2] + "bar": to_paddle_tensor( + np.array([5.0, 12.0, 7.0, 9.0]).reshape(2, 1, 2) + ), + "find_foo": np.float32(1.0), + "find_bar": np.float32(1.0), + }, + ] + self.tempdir = tempfile.TemporaryDirectory() + h5file = str((Path(self.tempdir.name) / "testcase.h5").resolve()) + with h5py.File(h5file, "w") as f: + pass + self.stat_file_path = DPPath(h5file, "a") + + def test_output_stat(self): + nf, nloc, nnei = self.nlist.shape + ds = DescrptDPA1( + self.rcut, + self.rcut_smth, + sum(self.sel), + self.nt, + ).to(env.DEVICE) + ft = FooFitting().to(env.DEVICE) + type_map = ["foo", "bar"] + md0 = DPAtomicModel( + ds, + ft, + type_map=type_map, + ).to(env.DEVICE) + args = [ + to_paddle_tensor(ii) for ii in [self.coord_ext, self.atype_ext, self.nlist] + ] + # nf x nloc + at = self.atype_ext[:, :nloc] + + def cvt_ret(x): + return {kk: to_numpy_array(vv) for kk, vv in x.items()} + + # 1. test run without bias + # nf x na x odim + ret0 = md0.forward_common_atomic(*args) + ret0 = cvt_ret(ret0) + expected_ret0 = {} + expected_ret0["foo"] = np.array( + [ + [1.0, 2.0, 3.0], + [4.0, 5.0, 6.0], + ] + ).reshape([nf, nloc, *md0.fitting_output_def()["foo"].shape]) + expected_ret0["bar"] = np.array( + [ + [1.0, 2.0, 3.0, 7.0, 8.0, 9.0], + [4.0, 5.0, 6.0, 10.0, 11.0, 12.0], + ] + ).reshape([nf, nloc, *md0.fitting_output_def()["bar"].shape]) + for kk in ["foo", "bar"]: + np.testing.assert_almost_equal(ret0[kk], expected_ret0[kk]) + + # 2. test bias is applied + md0.compute_or_load_out_stat( + self.merged_output_stat, stat_file_path=self.stat_file_path + ) + ret1 = md0.forward_common_atomic(*args) + ret1 = cvt_ret(ret1) + # nt x odim + foo_bias = np.array([5.5, 3.0]).reshape(2, 1) + bar_bias = np.array([1.0, 5.0, 3.0, 2.0]).reshape(2, 1, 2) + expected_ret1 = {} + expected_ret1["foo"] = ret0["foo"] + foo_bias[at] + expected_ret1["bar"] = ret0["bar"] + bar_bias[at] + for kk in ["foo", "bar"]: + np.testing.assert_almost_equal(ret1[kk], expected_ret1[kk]) + + # 3. test bias load from file + def raise_error(): + raise RuntimeError + + md0.compute_or_load_out_stat(raise_error, stat_file_path=self.stat_file_path) + ret2 = md0.forward_common_atomic(*args) + ret2 = cvt_ret(ret2) + for kk in ["foo", "bar"]: + np.testing.assert_almost_equal(ret1[kk], ret2[kk]) + + # 4. test change bias + BaseAtomicModel.change_out_bias( + md0, self.merged_output_stat, bias_adjust_mode="change-by-statistic" + ) + args = [ + to_paddle_tensor(ii) + for ii in [ + self.coord_ext, + to_numpy_array(self.merged_output_stat[0]["atype_ext"]), + self.nlist, + ] + ] + ret3 = md0.forward_common_atomic(*args) + ret3 = cvt_ret(ret3) + expected_ret3 = {} + # new bias [2, -5] + expected_ret3["foo"] = np.array([[3, 4, -2], [6, 0, 1]]).reshape(2, 3, 1) + for kk in ["foo"]: + np.testing.assert_almost_equal(ret3[kk], expected_ret3[kk], decimal=4) diff --git a/source/tests/pd/model/test_atomic_model_global_stat.py b/source/tests/pd/model/test_atomic_model_global_stat.py new file mode 100644 index 0000000000..abd7928a0f --- /dev/null +++ b/source/tests/pd/model/test_atomic_model_global_stat.py @@ -0,0 +1,510 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import tempfile +import unittest +from pathlib import ( + Path, +) +from typing import ( + Optional, +) + +import h5py +import numpy as np +import paddle + +from deepmd.dpmodel.atomic_model import DPAtomicModel as DPDPAtomicModel +from deepmd.dpmodel.output_def import ( + FittingOutputDef, + OutputVariableDef, +) +from deepmd.pd.model.atomic_model import ( + BaseAtomicModel, + DPAtomicModel, +) +from deepmd.pd.model.descriptor import ( + DescrptDPA1, + DescrptSeA, +) +from deepmd.pd.model.task.base_fitting import ( + BaseFitting, +) +from deepmd.pd.model.task.ener import ( + InvarFitting, +) +from deepmd.pd.utils import ( + env, +) +from deepmd.pd.utils.utils import ( + to_numpy_array, + to_paddle_tensor, +) +from deepmd.utils.path import ( + DPPath, +) + +from .test_env_mat import ( + TestCaseSingleFrameWithNlist, +) + +dtype = env.GLOBAL_PD_FLOAT_PRECISION + + +class FooFitting(paddle.nn.Layer, BaseFitting): + def output_def(self): + return FittingOutputDef( + [ + OutputVariableDef( + "foo", + [1], + reducible=True, + r_differentiable=True, + c_differentiable=True, + ), + OutputVariableDef( + "pix", + [1], + reducible=True, + r_differentiable=True, + c_differentiable=True, + ), + OutputVariableDef( + "bar", + [1, 2], + reducible=True, + r_differentiable=True, + c_differentiable=True, + ), + ] + ) + + def serialize(self) -> dict: + raise NotImplementedError + + def change_type_map( + self, type_map: list[str], model_with_new_type_stat=None + ) -> None: + raise NotImplementedError + + def get_type_map(self) -> list[str]: + raise NotImplementedError + + def forward( + self, + descriptor: paddle.Tensor, + atype: paddle.Tensor, + gr: Optional[paddle.Tensor] = None, + g2: Optional[paddle.Tensor] = None, + h2: Optional[paddle.Tensor] = None, + fparam: Optional[paddle.Tensor] = None, + aparam: Optional[paddle.Tensor] = None, + ): + nf, nloc, _ = descriptor.shape + ret = {} + ret["foo"] = ( + paddle.to_tensor( + [ + [1.0, 2.0, 3.0], + [4.0, 5.0, 6.0], + ] + ) + .reshape([nf, nloc] + self.output_def()["foo"].shape) # noqa: RUF005 + .to(env.GLOBAL_PD_FLOAT_PRECISION) + .to(env.DEVICE) + ) + ret["pix"] = ( + paddle.to_tensor( + [ + [3.0, 2.0, 1.0], + [6.0, 5.0, 4.0], + ] + ) + .reshape([nf, nloc] + self.output_def()["pix"].shape) # noqa: RUF005 + .to(env.GLOBAL_PD_FLOAT_PRECISION) + .to(env.DEVICE) + ) + ret["bar"] = ( + paddle.to_tensor( + [ + [1.0, 2.0, 3.0, 7.0, 8.0, 9.0], + [4.0, 5.0, 6.0, 10.0, 11.0, 12.0], + ] + ) + .reshape([nf, nloc] + self.output_def()["bar"].shape) # noqa: RUF005 + .to(env.GLOBAL_PD_FLOAT_PRECISION) + .to(env.DEVICE) + ) + return ret + + +class TestAtomicModelStat(unittest.TestCase, TestCaseSingleFrameWithNlist): + def tearDown(self): + self.tempdir.cleanup() + + def setUp(self): + TestCaseSingleFrameWithNlist.setUp(self) + nf, nloc, nnei = self.nlist.shape + self.merged_output_stat = [ + { + "coord": to_paddle_tensor(np.zeros([2, 3, 3])), + "atype": to_paddle_tensor( + np.array([[0, 0, 1], [0, 1, 1]], dtype=np.int32) + ), + "atype_ext": to_paddle_tensor( + np.array([[0, 0, 1, 0], [0, 1, 1, 0]], dtype=np.int32) + ), + "box": to_paddle_tensor(np.zeros([2, 3, 3])), + "natoms": to_paddle_tensor( + np.array([[3, 3, 2, 1], [3, 3, 1, 2]], dtype=np.int32) + ), + # bias of foo: 1, 3 + "foo": to_paddle_tensor(np.array([5.0, 7.0]).reshape(2, 1)), + # no bias of pix + # bias of bar: [1, 5], [3, 2] + "bar": to_paddle_tensor( + np.array([5.0, 12.0, 7.0, 9.0]).reshape(2, 1, 2) + ), + "find_foo": np.float32(1.0), + "find_bar": np.float32(1.0), + } + ] + self.tempdir = tempfile.TemporaryDirectory() + h5file = str((Path(self.tempdir.name) / "testcase.h5").resolve()) + with h5py.File(h5file, "w") as f: + pass + self.stat_file_path = DPPath(h5file, "a") + + def test_output_stat(self): + nf, nloc, nnei = self.nlist.shape + ds = DescrptDPA1( + self.rcut, + self.rcut_smth, + sum(self.sel), + self.nt, + ).to(env.DEVICE) + ft = FooFitting().to(env.DEVICE) + type_map = ["foo", "bar"] + md0 = DPAtomicModel( + ds, + ft, + type_map=type_map, + ).to(env.DEVICE) + args = [ + to_paddle_tensor(ii) for ii in [self.coord_ext, self.atype_ext, self.nlist] + ] + # nf x nloc + at = self.atype_ext[:, :nloc] + + def cvt_ret(x): + return {kk: to_numpy_array(vv) for kk, vv in x.items()} + + # 1. test run without bias + # nf x na x odim + ret0 = md0.forward_common_atomic(*args) + ret0 = cvt_ret(ret0) + + expected_ret0 = {} + expected_ret0["foo"] = np.array( + [ + [1.0, 2.0, 3.0], + [4.0, 5.0, 6.0], + ] + ).reshape([nf, nloc] + md0.fitting_output_def()["foo"].shape) # noqa: RUF005 + expected_ret0["pix"] = np.array( + [ + [3.0, 2.0, 1.0], + [6.0, 5.0, 4.0], + ] + ).reshape([nf, nloc] + md0.fitting_output_def()["pix"].shape) # noqa: RUF005 + expected_ret0["bar"] = np.array( + [ + [1.0, 2.0, 3.0, 7.0, 8.0, 9.0], + [4.0, 5.0, 6.0, 10.0, 11.0, 12.0], + ] + ).reshape([nf, nloc] + md0.fitting_output_def()["bar"].shape) # noqa: RUF005 + for kk in ["foo", "pix", "bar"]: + np.testing.assert_almost_equal(ret0[kk], expected_ret0[kk]) + + # 2. test bias is applied + md0.compute_or_load_out_stat( + self.merged_output_stat, stat_file_path=self.stat_file_path + ) + ret1 = md0.forward_common_atomic(*args) + ret1 = cvt_ret(ret1) + expected_std = np.ones((3, 2, 2)) # 3 keys, 2 atypes, 2 max dims. + # nt x odim + foo_bias = np.array([1.0, 3.0]).reshape(2, 1) + bar_bias = np.array([1.0, 5.0, 3.0, 2.0]).reshape(2, 1, 2) + expected_ret1 = {} + expected_ret1["foo"] = ret0["foo"] + foo_bias[at] + expected_ret1["pix"] = ret0["pix"] + expected_ret1["bar"] = ret0["bar"] + bar_bias[at] + for kk in ["foo", "pix", "bar"]: + np.testing.assert_almost_equal(ret1[kk], expected_ret1[kk]) + np.testing.assert_almost_equal(to_numpy_array(md0.out_std), expected_std) + + # 3. test bias load from file + def raise_error(): + raise RuntimeError + + md0.compute_or_load_out_stat(raise_error, stat_file_path=self.stat_file_path) + ret2 = md0.forward_common_atomic(*args) + ret2 = cvt_ret(ret2) + for kk in ["foo", "pix", "bar"]: + np.testing.assert_almost_equal(ret1[kk], ret2[kk]) + np.testing.assert_almost_equal(to_numpy_array(md0.out_std), expected_std) + + # 4. test change bias + BaseAtomicModel.change_out_bias( + md0, self.merged_output_stat, bias_adjust_mode="change-by-statistic" + ) + args = [ + to_paddle_tensor(ii) + for ii in [ + self.coord_ext, + to_numpy_array(self.merged_output_stat[0]["atype_ext"]), + self.nlist, + ] + ] + ret3 = md0.forward_common_atomic(*args) + ret3 = cvt_ret(ret3) + ## model output on foo: [[2, 3, 6], [5, 8, 9]] given bias [1, 3] + ## foo sumed: [11, 22] compared with [5, 7], fit target is [-6, -15] + ## fit bias is [1, -8] + ## old bias + fit bias [2, -5] + ## new model output is [[3, 4, -2], [6, 0, 1]], which sumed to [5, 7] + expected_ret3 = {} + expected_ret3["foo"] = np.array([[3, 4, -2], [6, 0, 1]]).reshape(2, 3, 1) + expected_ret3["pix"] = ret0["pix"] + for kk in ["foo", "pix"]: + np.testing.assert_almost_equal(ret3[kk], expected_ret3[kk]) + # bar is too complicated to be manually computed. + np.testing.assert_almost_equal(to_numpy_array(md0.out_std), expected_std) + + def test_preset_bias(self): + nf, nloc, nnei = self.nlist.shape + ds = DescrptDPA1( + self.rcut, + self.rcut_smth, + sum(self.sel), + self.nt, + ).to(env.DEVICE) + ft = FooFitting().to(env.DEVICE) + type_map = ["foo", "bar"] + preset_out_bias = { + # "foo": np.array(3.0, 2.0]).reshape(2, 1), + "foo": [None, 2], + "bar": np.array([7.0, 5.0, 13.0, 11.0]).reshape(2, 1, 2), + } + md0 = DPAtomicModel( + ds, + ft, + type_map=type_map, + preset_out_bias=preset_out_bias, + ).to(env.DEVICE) + args = [ + to_paddle_tensor(ii) for ii in [self.coord_ext, self.atype_ext, self.nlist] + ] + # nf x nloc + at = self.atype_ext[:, :nloc] + + def cvt_ret(x): + return {kk: to_numpy_array(vv) for kk, vv in x.items()} + + # 1. test run without bias + # nf x na x odim + ret0 = md0.forward_common_atomic(*args) + ret0 = cvt_ret(ret0) + expected_ret0 = {} + expected_ret0["foo"] = np.array( + [ + [1.0, 2.0, 3.0], + [4.0, 5.0, 6.0], + ] + ).reshape([nf, nloc] + md0.fitting_output_def()["foo"].shape) # noqa: RUF005 + expected_ret0["pix"] = np.array( + [ + [3.0, 2.0, 1.0], + [6.0, 5.0, 4.0], + ] + ).reshape([nf, nloc] + md0.fitting_output_def()["pix"].shape) # noqa: RUF005 + expected_ret0["bar"] = np.array( + [ + [1.0, 2.0, 3.0, 7.0, 8.0, 9.0], + [4.0, 5.0, 6.0, 10.0, 11.0, 12.0], + ] + ).reshape([nf, nloc] + md0.fitting_output_def()["bar"].shape) # noqa: RUF005 + for kk in ["foo", "pix", "bar"]: + np.testing.assert_almost_equal(ret0[kk], expected_ret0[kk]) + + # 2. test bias is applied + md0.compute_or_load_out_stat( + self.merged_output_stat, stat_file_path=self.stat_file_path + ) + ret1 = md0.forward_common_atomic(*args) + ret1 = cvt_ret(ret1) + # foo sums: [5, 7], + # given bias of type 1 being 2, the bias left for type 0 is [5-2*1, 7-2*2] = [3,3] + # the solution of type 0 is 1.8 + foo_bias = np.array([1.8, preset_out_bias["foo"][1]]).reshape(2, 1) + bar_bias = preset_out_bias["bar"] + expected_ret1 = {} + expected_ret1["foo"] = ret0["foo"] + foo_bias[at] + expected_ret1["pix"] = ret0["pix"] + expected_ret1["bar"] = ret0["bar"] + bar_bias[at] + for kk in ["foo", "pix", "bar"]: + np.testing.assert_almost_equal(ret1[kk], expected_ret1[kk]) + + # 3. test bias load from file + def raise_error(): + raise RuntimeError + + md0.compute_or_load_out_stat(raise_error, stat_file_path=self.stat_file_path) + ret2 = md0.forward_common_atomic(*args) + ret2 = cvt_ret(ret2) + for kk in ["foo", "pix", "bar"]: + np.testing.assert_almost_equal(ret1[kk], ret2[kk]) + + # 4. test change bias + BaseAtomicModel.change_out_bias( + md0, self.merged_output_stat, bias_adjust_mode="change-by-statistic" + ) + args = [ + to_paddle_tensor(ii) + for ii in [ + self.coord_ext, + to_numpy_array(self.merged_output_stat[0]["atype_ext"]), + self.nlist, + ] + ] + ret3 = md0.forward_common_atomic(*args) + ret3 = cvt_ret(ret3) + ## model output on foo: [[2.8, 3.8, 5], [5.8, 7., 8.]] given bias [1.8, 2] + ## foo sumed: [11.6, 20.8] compared with [5, 7], fit target is [-6.6, -13.8] + ## fit bias is [-7, 2] (2 is assigned. -7 is fit to [-8.6, -17.8]) + ## old bias[1.8,2] + fit bias[-7, 2] = [-5.2, 4] + ## new model output is [[-4.2, -3.2, 7], [-1.2, 9, 10]] + expected_ret3 = {} + expected_ret3["foo"] = np.array([[-4.2, -3.2, 7.0], [-1.2, 9.0, 10.0]]).reshape( + 2, 3, 1 + ) + expected_ret3["pix"] = ret0["pix"] + for kk in ["foo", "pix"]: + np.testing.assert_almost_equal(ret3[kk], expected_ret3[kk]) + # bar is too complicated to be manually computed. + + def test_preset_bias_all_none(self): + nf, nloc, nnei = self.nlist.shape + ds = DescrptDPA1( + self.rcut, + self.rcut_smth, + sum(self.sel), + self.nt, + ).to(env.DEVICE) + ft = FooFitting().to(env.DEVICE) + type_map = ["foo", "bar"] + preset_out_bias = { + "foo": [None, None], + } + md0 = DPAtomicModel( + ds, + ft, + type_map=type_map, + preset_out_bias=preset_out_bias, + ).to(env.DEVICE) + args = [ + to_paddle_tensor(ii) for ii in [self.coord_ext, self.atype_ext, self.nlist] + ] + # nf x nloc + at = self.atype_ext[:, :nloc] + + def cvt_ret(x): + return {kk: to_numpy_array(vv) for kk, vv in x.items()} + + # 1. test run without bias + # nf x na x odim + ret0 = md0.forward_common_atomic(*args) + ret0 = cvt_ret(ret0) + expected_ret0 = {} + expected_ret0["foo"] = np.array( + [ + [1.0, 2.0, 3.0], + [4.0, 5.0, 6.0], + ] + ).reshape([nf, nloc] + md0.fitting_output_def()["foo"].shape) # noqa: RUF005 + expected_ret0["pix"] = np.array( + [ + [3.0, 2.0, 1.0], + [6.0, 5.0, 4.0], + ] + ).reshape([nf, nloc] + md0.fitting_output_def()["pix"].shape) # noqa: RUF005 + expected_ret0["bar"] = np.array( + [ + [1.0, 2.0, 3.0, 7.0, 8.0, 9.0], + [4.0, 5.0, 6.0, 10.0, 11.0, 12.0], + ] + ).reshape([nf, nloc] + md0.fitting_output_def()["bar"].shape) # noqa: RUF005 + for kk in ["foo", "pix", "bar"]: + np.testing.assert_almost_equal(ret0[kk], expected_ret0[kk]) + + # 2. test bias is applied + md0.compute_or_load_out_stat( + self.merged_output_stat, stat_file_path=self.stat_file_path + ) + ret1 = md0.forward_common_atomic(*args) + ret1 = cvt_ret(ret1) + # nt x odim + foo_bias = np.array([1.0, 3.0]).reshape(2, 1) + bar_bias = np.array([1.0, 5.0, 3.0, 2.0]).reshape(2, 1, 2) + expected_ret1 = {} + expected_ret1["foo"] = ret0["foo"] + foo_bias[at] + expected_ret1["pix"] = ret0["pix"] + expected_ret1["bar"] = ret0["bar"] + bar_bias[at] + for kk in ["foo", "pix", "bar"]: + np.testing.assert_almost_equal(ret1[kk], expected_ret1[kk]) + + def test_serialize(self): + nf, nloc, nnei = self.nlist.shape + ds = DescrptSeA( + self.rcut, + self.rcut_smth, + self.sel, + ).to(env.DEVICE) + ft = InvarFitting( + "foo", + self.nt, + ds.get_dim_out(), + 1, + mixed_types=ds.mixed_types(), + ).to(env.DEVICE) + type_map = ["A", "B"] + md0 = DPAtomicModel( + ds, + ft, + type_map=type_map, + ).to(env.DEVICE) + args = [ + to_paddle_tensor(ii) for ii in [self.coord_ext, self.atype_ext, self.nlist] + ] + # nf x nloc + at = self.atype_ext[:, :nloc] + + def cvt_ret(x): + return {kk: to_numpy_array(vv) for kk, vv in x.items()} + + md0.compute_or_load_out_stat( + self.merged_output_stat, stat_file_path=self.stat_file_path + ) + ret0 = md0.forward_common_atomic(*args) + ret0 = cvt_ret(ret0) + md1 = DPAtomicModel.deserialize(md0.serialize()) + ret1 = md1.forward_common_atomic(*args) + ret1 = cvt_ret(ret1) + + for kk in ["foo"]: + np.testing.assert_almost_equal(ret0[kk], ret1[kk]) + + md2 = DPDPAtomicModel.deserialize(md0.serialize()) + args = [self.coord_ext, self.atype_ext, self.nlist] + ret2 = md2.forward_common_atomic(*args) + for kk in ["foo"]: + np.testing.assert_almost_equal(ret0[kk], ret2[kk]) diff --git a/source/tests/pd/model/test_autodiff.py b/source/tests/pd/model/test_autodiff.py index a056491fb3..1bd9dd0d0f 100644 --- a/source/tests/pd/model/test_autodiff.py +++ b/source/tests/pd/model/test_autodiff.py @@ -190,7 +190,6 @@ def setUp(self): self.model = get_model(model_params).to(env.DEVICE) -@unittest.skip("Skip for not implemented yet") class TestEnergyModelDPA1Force(unittest.TestCase, ForceTest): def setUp(self): model_params = copy.deepcopy(model_dpa1) @@ -198,7 +197,6 @@ def setUp(self): self.model = get_model(model_params).to(env.DEVICE) -@unittest.skip("Skip for not implemented yet") class TestEnergyModelDPA1Virial(unittest.TestCase, VirialTest): def setUp(self): model_params = copy.deepcopy(model_dpa1) diff --git a/source/tests/pd/model/test_descriptor.py b/source/tests/pd/model/test_descriptor.py index 10f2fd271b..dc78856851 100644 --- a/source/tests/pd/model/test_descriptor.py +++ b/source/tests/pd/model/test_descriptor.py @@ -17,7 +17,6 @@ prod_env_mat, ) from deepmd.pd.utils import ( - decomp, dp_random, env, ) @@ -179,7 +178,7 @@ def test_consistency(self): my_nlist = nlist.reshape([bsz, -1]).cpu() mask = my_nlist == -1 my_nlist = my_nlist * (~mask).astype(my_nlist.dtype) - my_nlist = decomp.take_along_axis(mapping, axis=-1, indices=my_nlist) + my_nlist = paddle.take_along_axis(mapping, axis=-1, indices=my_nlist) my_nlist = my_nlist * (~mask).astype(my_nlist.dtype) - mask.astype( my_nlist.dtype ) diff --git a/source/tests/pd/model/test_descriptor_dpa1.py b/source/tests/pd/model/test_descriptor_dpa1.py new file mode 100644 index 0000000000..bfcf4ba6ee --- /dev/null +++ b/source/tests/pd/model/test_descriptor_dpa1.py @@ -0,0 +1,387 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import json +import os +import unittest +from pathlib import ( + Path, +) + +import numpy as np +import paddle + +from deepmd.pd.model.descriptor import ( + DescrptBlockSeAtten, + DescrptDPA1, +) +from deepmd.pd.model.network.network import ( + TypeEmbedNet, +) +from deepmd.pd.utils import ( + env, +) +from deepmd.pd.utils.nlist import ( + extend_input_and_build_neighbor_list, +) + +CUR_DIR = os.path.dirname(__file__) + + +class TestDPA1(unittest.TestCase): + def setUp(self): + cell = [ + 5.122106549439247480e00, + 4.016537340154059388e-01, + 6.951654033828678081e-01, + 4.016537340154059388e-01, + 6.112136112297989143e00, + 8.178091365465004481e-01, + 6.951654033828678081e-01, + 8.178091365465004481e-01, + 6.159552512682983760e00, + ] + self.cell = ( + paddle.to_tensor( + cell, + dtype=env.GLOBAL_PD_FLOAT_PRECISION, + ) + .to(device=env.DEVICE) + .reshape([1, 3, 3]) + ) + coord = [ + 2.978060152121375648e00, + 3.588469695887098077e00, + 2.792459820604495491e00, + 3.895592322591093115e00, + 2.712091020667753760e00, + 1.366836847133650501e00, + 9.955616170888935690e-01, + 4.121324820711413039e00, + 1.817239061889086571e00, + 3.553661462345699906e00, + 5.313046969500791583e00, + 6.635182659098815883e00, + 6.088601018589653080e00, + 6.575011420004332585e00, + 6.825240650611076099e00, + ] + self.coord = paddle.to_tensor( + coord, dtype=env.GLOBAL_PD_FLOAT_PRECISION, place=env.DEVICE + ).reshape([1, -1, 3]) + self.atype = paddle.to_tensor( + [0, 0, 0, 1, 1], dtype=paddle.int32, place=env.DEVICE + ).reshape([1, -1]) + self.ref_d = paddle.to_tensor( + [ + 8.382518544113587780e-03, + -3.390120566088597812e-03, + 6.145981571114964362e-03, + -4.880300873973819273e-03, + -3.390120566088597812e-03, + 1.372540996564941464e-03, + -2.484163690574096341e-03, + 1.972313058658722688e-03, + 6.145981571114964362e-03, + -2.484163690574096341e-03, + 4.507748738021747671e-03, + -3.579717194906019764e-03, + -4.880300873973819273e-03, + 1.972313058658722688e-03, + -3.579717194906019764e-03, + 2.842794615687799838e-03, + 6.733043802494966066e-04, + -2.721540313345096771e-04, + 4.936158526085561134e-04, + -3.919743287822345223e-04, + -1.311123004527576900e-02, + 5.301179352601203924e-03, + -9.614612349318877454e-03, + 7.634884975521277241e-03, + 8.877088452901006621e-03, + -3.590945566653638409e-03, + 6.508042782015627942e-03, + -5.167671664327699171e-03, + -2.697241463040870365e-03, + 1.091350446825975137e-03, + -1.976895708961905022e-03, + 1.569671412121975348e-03, + 8.645131636261189911e-03, + -3.557395265621639355e-03, + 6.298048561552698106e-03, + -4.999272007935521948e-03, + -3.557395265621639355e-03, + 1.467866637220284964e-03, + -2.587004431651147504e-03, + 2.052752235601402672e-03, + 6.298048561552698106e-03, + -2.587004431651147504e-03, + 4.594085551315935101e-03, + -3.647656549789176847e-03, + -4.999272007935521948e-03, + 2.052752235601402672e-03, + -3.647656549789176847e-03, + 2.896359275520481256e-03, + 6.689620176492027878e-04, + -2.753606422414641049e-04, + 4.864958810186969444e-04, + -3.860599754167503119e-04, + -1.349238259226558101e-02, + 5.547478630961994242e-03, + -9.835472300819447095e-03, + 7.808197926069362048e-03, + 9.220744348752592245e-03, + -3.795799103392961601e-03, + 6.716516319358462918e-03, + -5.331265718473574867e-03, + -2.783836698392940304e-03, + 1.147461939123531121e-03, + -2.025013030986024063e-03, + 1.606944814423778541e-03, + 9.280385723343491378e-03, + -3.515852178447095942e-03, + 7.085282215778941628e-03, + -5.675852414643783178e-03, + -3.515852178447095942e-03, + 1.337760635271160884e-03, + -2.679428786337713451e-03, + 2.145400621815936413e-03, + 7.085282215778941628e-03, + -2.679428786337713451e-03, + 5.414439648102228192e-03, + -4.338426468139268931e-03, + -5.675852414643783178e-03, + 2.145400621815936413e-03, + -4.338426468139268931e-03, + 3.476467482674507146e-03, + 7.166961981167455130e-04, + -2.697932188839837972e-04, + 5.474643906631899504e-04, + -4.386556623669893621e-04, + -1.480434821331240956e-02, + 5.604647062899507579e-03, + -1.130745349141585449e-02, + 9.059113563516829268e-03, + 9.758791063112262978e-03, + -3.701477720487638626e-03, + 7.448215522796466058e-03, + -5.966057584545172120e-03, + -2.845102393948158344e-03, + 1.078743584169829543e-03, + -2.170093031447992756e-03, + 1.738010461687942770e-03, + 9.867599071916231118e-03, + -3.811041717688905522e-03, + 7.121877634386481262e-03, + -5.703120290113914553e-03, + -3.811041717688905522e-03, + 1.474046183772771213e-03, + -2.747386907428428938e-03, + 2.199711055637492037e-03, + 7.121877634386481262e-03, + -2.747386907428428938e-03, + 5.145050639440944609e-03, + -4.120642824501622239e-03, + -5.703120290113914553e-03, + 2.199711055637492037e-03, + -4.120642824501622239e-03, + 3.300262321758350853e-03, + 1.370499995344566383e-03, + -5.313041843655797901e-04, + 9.860110343046961986e-04, + -7.892505817954784597e-04, + -1.507686316307561489e-02, + 5.818961290579217904e-03, + -1.088774506142304276e-02, + 8.719460408506790952e-03, + 9.764630842803939323e-03, + -3.770134041110058572e-03, + 7.049438389985595785e-03, + -5.645302934019884485e-03, + -3.533582373572779437e-03, + 1.367148320603491559e-03, + -2.546602904764623705e-03, + 2.038882844528267305e-03, + 7.448297038731285964e-03, + -2.924276815200288742e-03, + 5.355960540523636154e-03, + -4.280386435083473329e-03, + -2.924276815200288742e-03, + 1.150311064893848757e-03, + -2.100635980860638373e-03, + 1.678427895009850001e-03, + 5.355960540523636154e-03, + -2.100635980860638373e-03, + 3.853607053247790071e-03, + -3.080076301871465493e-03, + -4.280386435083473329e-03, + 1.678427895009850001e-03, + -3.080076301871465493e-03, + 2.461876613756722523e-03, + 9.730712866459405395e-04, + -3.821759579990726546e-04, + 6.994242056622360787e-04, + -5.589662297882965055e-04, + -1.138916742131982317e-02, + 4.469391132927387489e-03, + -8.192016282448397885e-03, + 6.547234460517113892e-03, + 7.460070829043288082e-03, + -2.929867802018087421e-03, + 5.363646855497249989e-03, + -4.286347242903034739e-03, + -2.643569023340565718e-03, + 1.038826463247002245e-03, + -1.899910089750410976e-03, + 1.518237240362583541e-03, + ], + dtype=env.GLOBAL_PD_FLOAT_PRECISION, + place=env.DEVICE, + ) + with open(Path(CUR_DIR) / "models" / "dpa1.json") as fp: + self.model_json = json.load(fp) + self.file_model_param = Path(CUR_DIR) / "models" / "dpa1.pd" + self.file_type_embed = Path(CUR_DIR) / "models" / "dpa2_tebd.pd" + + def test_descriptor_block(self) -> None: + # paddle.seed(0) + model_dpa1 = self.model_json + dparams = model_dpa1["descriptor"] + ntypes = len(model_dpa1["type_map"]) + assert "se_atten" == dparams["type"] + dparams.pop("type") + dparams["ntypes"] = ntypes + des = DescrptBlockSeAtten( + **dparams, + ).to(env.DEVICE) + state_dict = paddle.load(str(self.file_model_param)) + # this is an old state dict, modify manually + state_dict["compress_info.0"] = des.compress_info[0] + state_dict["compress_data.0"] = des.compress_data[0] + des.set_state_dict(state_dict) + coord = self.coord + atype = self.atype + box = self.cell + # handle type_embedding + type_embedding = TypeEmbedNet(ntypes, 8, use_tebd_bias=True).to(env.DEVICE) + type_embedding.set_state_dict(paddle.load(str(self.file_type_embed))) + + ## to save model parameters + # paddle.save(des.state_dict(), 'model_weights.pd') + # paddle.save(type_embedding.state_dict(), 'model_weights.pd') + ( + extended_coord, + extended_atype, + mapping, + nlist, + ) = extend_input_and_build_neighbor_list( + coord, + atype, + des.get_rcut(), + des.get_sel(), + mixed_types=des.mixed_types(), + box=box, + ) + descriptor, env_mat, diff, rot_mat, sw = des( + nlist, + extended_coord, + extended_atype, + type_embedding(extended_atype), + mapping=None, + ) + # np.savetxt('tmp.out', descriptor.detach().numpy().reshape(1,-1), delimiter=",") + self.assertEqual(descriptor.shape[-1], des.get_dim_out()) + self.assertAlmostEqual(6.0, des.get_rcut()) + self.assertEqual(30, des.get_nsel()) + self.assertEqual(2, des.get_ntypes()) + np.testing.assert_allclose( + descriptor.reshape([-1]).numpy(), self.ref_d.numpy(), atol=1e-10, rtol=1e-10 + ) + + def test_descriptor(self) -> None: + with open(Path(CUR_DIR) / "models" / "dpa1.json") as fp: + self.model_json = json.load(fp) + model_dpa2 = self.model_json + ntypes = len(model_dpa2["type_map"]) + dparams = model_dpa2["descriptor"] + dparams["ntypes"] = ntypes + assert dparams["type"] == "se_atten" + dparams.pop("type") + dparams["concat_output_tebd"] = False + dparams["use_tebd_bias"] = True + des = DescrptDPA1( + **dparams, + ).to(env.DEVICE) + target_dict = des.state_dict() + source_dict = paddle.load(str(self.file_model_param)) + type_embd_dict = paddle.load(str(self.file_type_embed)) + target_dict = translate_se_atten_and_type_embd_dicts_to_dpa1( + target_dict, + source_dict, + type_embd_dict, + ) + des.set_state_dict(target_dict) + + coord = self.coord + atype = self.atype + box = self.cell + ( + extended_coord, + extended_atype, + mapping, + nlist, + ) = extend_input_and_build_neighbor_list( + coord, + atype, + des.get_rcut(), + des.get_sel(), + mixed_types=des.mixed_types(), + box=box, + ) + descriptor, env_mat, diff, rot_mat, sw = des( + extended_coord, + extended_atype, + nlist, + mapping=mapping, + ) + self.assertEqual(descriptor.shape[-1], des.get_dim_out()) + self.assertAlmostEqual(6.0, des.get_rcut()) + self.assertEqual(30, des.get_nsel()) + self.assertEqual(2, des.get_ntypes()) + np.testing.assert_allclose( + descriptor.reshape([-1]).numpy(), self.ref_d.numpy(), atol=1e-10, rtol=1e-10 + ) + + dparams["concat_output_tebd"] = True + des = DescrptDPA1( + **dparams, + ).to(env.DEVICE) + descriptor, env_mat, diff, rot_mat, sw = des( + extended_coord, + extended_atype, + nlist, + mapping=mapping, + ) + self.assertEqual(descriptor.shape[-1], des.get_dim_out()) + + +def translate_se_atten_and_type_embd_dicts_to_dpa1( + target_dict, + source_dict, + type_embd_dict, +): + all_keys = list(target_dict.keys()) + record = [False for ii in all_keys] + for kk, vv in source_dict.items(): + tk = "se_atten." + kk + record[all_keys.index(tk)] = True + target_dict[tk] = vv + assert len(type_embd_dict.keys()) == 2 + it = iter(type_embd_dict.keys()) + for _ in range(2): + kk = next(it) + tk = "type_embedding." + kk + record[all_keys.index(tk)] = True + target_dict[tk] = type_embd_dict[kk] + record[all_keys.index("se_atten.compress_data.0")] = True + record[all_keys.index("se_atten.compress_info.0")] = True + assert all(record) + return target_dict diff --git a/source/tests/pd/model/test_dpa1.py b/source/tests/pd/model/test_dpa1.py new file mode 100644 index 0000000000..285dd3d4cd --- /dev/null +++ b/source/tests/pd/model/test_dpa1.py @@ -0,0 +1,164 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import itertools +import unittest + +import numpy as np +import paddle + +from deepmd.dpmodel.descriptor.dpa1 import DescrptDPA1 as DPDescrptDPA1 +from deepmd.pd.model.descriptor.dpa1 import ( + DescrptDPA1, +) +from deepmd.pd.utils import ( + env, +) +from deepmd.pd.utils.env import ( + PRECISION_DICT, +) + +from ...seed import ( + GLOBAL_SEED, +) +from .test_env_mat import ( + TestCaseSingleFrameWithNlist, +) +from .test_mlp import ( + get_tols, +) + +dtype = env.GLOBAL_PD_FLOAT_PRECISION + + +class TestDescrptSeAtten(unittest.TestCase, TestCaseSingleFrameWithNlist): + def setUp(self): + TestCaseSingleFrameWithNlist.setUp(self) + + def test_consistency( + self, + ): + rng = np.random.default_rng(100) + nf, nloc, nnei = self.nlist.shape + davg = rng.normal(size=(self.nt, nnei, 4)) + dstd = rng.normal(size=(self.nt, nnei, 4)) + dstd = 0.1 + np.abs(dstd) + + for idt, sm, to, tm, prec, ect in itertools.product( + [False, True], # resnet_dt + [False, True], # smooth_type_embedding + [False, True], # type_one_side + ["concat", "strip"], # tebd_input_mode + [ + "float64", + ], # precision + [False, True], # use_econf_tebd + ): + dtype = PRECISION_DICT[prec] + rtol, atol = get_tols(prec) + err_msg = f"idt={idt} prec={prec}" + + # dpa1 new impl + dd0 = DescrptDPA1( + self.rcut, + self.rcut_smth, + self.sel_mix, + self.nt, + attn_layer=2, + precision=prec, + resnet_dt=idt, + smooth_type_embedding=sm, + type_one_side=to, + tebd_input_mode=tm, + use_econf_tebd=ect, + type_map=["O", "H"] if ect else None, + seed=GLOBAL_SEED, + ).to(env.DEVICE) + dd0.se_atten.mean = paddle.to_tensor(davg, dtype=dtype).to( + device=env.DEVICE + ) + dd0.se_atten.stddev = paddle.to_tensor(dstd, dtype=dtype).to( + device=env.DEVICE + ) + rd0, _, _, _, _ = dd0( + paddle.to_tensor(self.coord_ext, dtype=dtype).to(device=env.DEVICE), + paddle.to_tensor(self.atype_ext, dtype="int64").to(device=env.DEVICE), + paddle.to_tensor(self.nlist, dtype="int64").to(device=env.DEVICE), + ) + # serialization + dd1 = DescrptDPA1.deserialize(dd0.serialize()) + rd1, _, _, _, _ = dd1( + paddle.to_tensor(self.coord_ext, dtype=dtype).to(device=env.DEVICE), + paddle.to_tensor(self.atype_ext, dtype="int64").to(device=env.DEVICE), + paddle.to_tensor(self.nlist, dtype="int64").to(device=env.DEVICE), + ) + np.testing.assert_allclose( + rd0.detach().cpu().numpy(), + rd1.detach().cpu().numpy(), + rtol=rtol, + atol=atol, + err_msg=err_msg, + ) + # dp impl + dd2 = DPDescrptDPA1.deserialize(dd0.serialize()) + rd2, _, _, _, _ = dd2.call( + self.coord_ext, + self.atype_ext, + self.nlist, + ) + np.testing.assert_allclose( + rd0.detach().cpu().numpy(), + rd2, + rtol=rtol, + atol=atol, + err_msg=err_msg, + ) + + def test_jit( + self, + ): + rng = np.random.default_rng(GLOBAL_SEED) + nf, nloc, nnei = self.nlist.shape + davg = rng.normal(size=(self.nt, nnei, 4)) + dstd = rng.normal(size=(self.nt, nnei, 4)) + dstd = 0.1 + np.abs(dstd) + + for idt, prec, sm, to, tm, ect in itertools.product( + [ + False, + ], # resnet_dt + [ + "float64", + ], # precision + [False, True], # smooth_type_embedding + [ + False, + ], # type_one_side + ["concat", "strip"], # tebd_input_mode + [False, True], # use_econf_tebd + ): + dtype = PRECISION_DICT[prec] + rtol, atol = get_tols(prec) + err_msg = f"idt={idt} prec={prec}" + # dpa1 new impl + dd0 = DescrptDPA1( + self.rcut, + self.rcut_smth, + self.sel, + self.nt, + precision=prec, + resnet_dt=idt, + smooth_type_embedding=sm, + type_one_side=to, + tebd_input_mode=tm, + use_econf_tebd=ect, + type_map=["O", "H"] if ect else None, + seed=GLOBAL_SEED, + ) + dd0.se_atten.mean = paddle.to_tensor(davg, dtype=dtype).to( + device=env.DEVICE + ) + dd0.se_atten.dstd = paddle.to_tensor(dstd, dtype=dtype).to( + device=env.DEVICE + ) + # dd1 = DescrptDPA1.deserialize(dd0.serialize()) + model = paddle.jit.to_static(dd0) + # model = paddle.jit.to_static(dd1) diff --git a/source/tests/pd/model/test_env_mat.py b/source/tests/pd/model/test_env_mat.py index 7cbc698264..bbdb7c75a3 100644 --- a/source/tests/pd/model/test_env_mat.py +++ b/source/tests/pd/model/test_env_mat.py @@ -22,7 +22,7 @@ class TestCaseSingleFrameWithNlist: - def setUp(self): + def setUp(self) -> None: # nloc == 3, nall == 4 self.nloc = 3 self.nall = 4 @@ -155,12 +155,12 @@ def setUp(self): # to be merged with the tf test case class TestEnvMat(unittest.TestCase, TestCaseSingleFrameWithNlist): - def setUp(self): + def setUp(self) -> None: TestCaseSingleFrameWithNlist.setUp(self) def test_consistency( self, - ): + ) -> None: rng = np.random.default_rng(GLOBAL_SEED) nf, nloc, nnei = self.nlist.shape davg = rng.normal(size=(self.nt, nnei, 4)) diff --git a/source/tests/pd/model/test_forward_lower.py b/source/tests/pd/model/test_forward_lower.py index ac8d0f54fc..db6497b605 100644 --- a/source/tests/pd/model/test_forward_lower.py +++ b/source/tests/pd/model/test_forward_lower.py @@ -96,7 +96,7 @@ def test( mixed_types=self.model.mixed_types(), box=cell.unsqueeze(0), ) - extended_spin = decomp.take_along_axis( + extended_spin = paddle.take_along_axis( spin.unsqueeze(0), indices=mapping.unsqueeze(-1).tile((1, 1, 3)), axis=1 ) input_dict = { @@ -146,7 +146,6 @@ def setUp(self): self.model = get_model(model_params).to(env.DEVICE) -@unittest.skip("Skip for not implemented yet") class TestEnergyModelDPA1(unittest.TestCase, ForwardLowerTest): def setUp(self): self.prec = 1e-10 diff --git a/source/tests/pd/model/test_null_input.py b/source/tests/pd/model/test_null_input.py index 9bf0860265..5d67491943 100644 --- a/source/tests/pd/model/test_null_input.py +++ b/source/tests/pd/model/test_null_input.py @@ -22,6 +22,7 @@ eval_model, ) from .test_permutation import ( + model_dpa1, model_se_e2_a, ) @@ -92,3 +93,10 @@ def setUp(self): model_params = copy.deepcopy(model_se_e2_a) self.type_split = False self.model = get_model(model_params).to(env.DEVICE) + + +class TestEnergyModelDPA1(unittest.TestCase, NullTest): + def setUp(self): + model_params = copy.deepcopy(model_dpa1) + self.type_split = True + self.model = get_model(model_params).to(env.DEVICE) diff --git a/source/tests/pd/model/test_permutation.py b/source/tests/pd/model/test_permutation.py index 8482ca7ffe..4543348d3b 100644 --- a/source/tests/pd/model/test_permutation.py +++ b/source/tests/pd/model/test_permutation.py @@ -3,6 +3,7 @@ import os import unittest +import numpy as np import paddle from deepmd.pd.model.model import ( @@ -22,7 +23,6 @@ CUR_DIR = os.path.dirname(__file__) dtype = paddle.float64 -import numpy as np model_se_e2_a = { "type_map": ["O", "H", "B"], @@ -344,7 +344,7 @@ class PermutationTest: def test( self, - ): + ) -> None: natoms = 5 generator = paddle.seed(GLOBAL_SEED) cell = paddle.rand([3, 3], dtype=dtype) @@ -395,7 +395,7 @@ def test( class TestEnergyModelSeA(unittest.TestCase, PermutationTest): - def setUp(self): + def setUp(self) -> None: model_params = copy.deepcopy(model_se_e2_a) self.type_split = False self.model = get_model(model_params).to(env.DEVICE) @@ -403,15 +403,14 @@ def setUp(self): @unittest.skip("Skip for not implemented yet") class TestDOSModelSeA(unittest.TestCase, PermutationTest): - def setUp(self): + def setUp(self) -> None: model_params = copy.deepcopy(model_dos) self.type_split = False self.model = get_model(model_params).to(env.DEVICE) -@unittest.skip("Skip for not implemented yet") class TestEnergyModelDPA1(unittest.TestCase, PermutationTest): - def setUp(self): + def setUp(self) -> None: model_params = copy.deepcopy(model_dpa1) self.type_split = True self.model = get_model(model_params).to(env.DEVICE) @@ -419,7 +418,7 @@ def setUp(self): @unittest.skip("Skip for not implemented yet") class TestEnergyModelDPA2(unittest.TestCase, PermutationTest): - def setUp(self): + def setUp(self) -> None: model_params = copy.deepcopy(model_dpa2) self.type_split = True self.model = get_model(model_params).to(env.DEVICE) @@ -427,7 +426,7 @@ def setUp(self): @unittest.skip("Skip for not implemented yet") class TestForceModelDPA2(unittest.TestCase, PermutationTest): - def setUp(self): + def setUp(self) -> None: model_params = copy.deepcopy(model_dpa2) model_params["fitting_net"]["type"] = "direct_force_ener" self.type_split = True @@ -437,7 +436,7 @@ def setUp(self): @unittest.skip("Skip for not implemented yet") class TestEnergyModelHybrid(unittest.TestCase, PermutationTest): - def setUp(self): + def setUp(self) -> None: model_params = copy.deepcopy(model_hybrid) self.type_split = True self.model = get_model(model_params).to(env.DEVICE) @@ -445,7 +444,7 @@ def setUp(self): @unittest.skip("Skip for not implemented yet") class TestForceModelHybrid(unittest.TestCase, PermutationTest): - def setUp(self): + def setUp(self) -> None: model_params = copy.deepcopy(model_hybrid) model_params["fitting_net"]["type"] = "direct_force_ener" self.type_split = True @@ -455,7 +454,7 @@ def setUp(self): @unittest.skip("Skip for not implemented yet") class TestEnergyModelZBL(unittest.TestCase, PermutationTest): - def setUp(self): + def setUp(self) -> None: model_params = copy.deepcopy(model_zbl) self.type_split = False self.model = get_model(model_params).to(env.DEVICE) @@ -463,7 +462,7 @@ def setUp(self): @unittest.skip("Skip for not implemented yet") class TestEnergyModelSpinSeA(unittest.TestCase, PermutationTest): - def setUp(self): + def setUp(self) -> None: model_params = copy.deepcopy(model_spin) self.type_split = False self.test_spin = True diff --git a/source/tests/pd/model/test_permutation_denoise.py b/source/tests/pd/model/test_permutation_denoise.py new file mode 100644 index 0000000000..a0de541f0b --- /dev/null +++ b/source/tests/pd/model/test_permutation_denoise.py @@ -0,0 +1,109 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import copy +import unittest + +import numpy as np +import paddle + +from deepmd.pd.model.model import ( + get_model, +) +from deepmd.pd.utils import ( + env, +) +from deepmd.pd.utils.utils import ( + get_generator, +) + +from ...seed import ( + GLOBAL_SEED, +) +from ..common import ( + eval_model, +) +from .test_permutation import ( # model_dpau, + model_dpa1, + model_dpa2, + model_hybrid, +) + +dtype = paddle.float64 + +model_dpa1 = copy.deepcopy(model_dpa1) +model_dpa2 = copy.deepcopy(model_dpa2) +model_hybrid = copy.deepcopy(model_hybrid) +model_dpa1["type_map"] = ["O", "H", "B", "MASKED_TOKEN"] +model_dpa1.pop("fitting_net") +model_dpa2["type_map"] = ["O", "H", "B", "MASKED_TOKEN"] +model_dpa2.pop("fitting_net") +model_hybrid["type_map"] = ["O", "H", "B", "MASKED_TOKEN"] +model_hybrid.pop("fitting_net") + + +class PermutationDenoiseTest: + def test( + self, + ) -> None: + generator = get_generator(GLOBAL_SEED) + natoms = 5 + cell = paddle.rand([3, 3], dtype=dtype).to(env.DEVICE) + cell = (cell + cell.T) + 5.0 * paddle.eye(3).to(env.DEVICE) + coord = paddle.rand([natoms, 3], dtype=dtype).to(env.DEVICE) + coord = paddle.matmul(coord, cell) + atype = paddle.to_tensor([0, 0, 0, 1, 1]).to(env.DEVICE) + idx_perm = [1, 0, 4, 3, 2] + updated_c0, logits0 = eval_model( + self.model, coord.unsqueeze(0), cell.unsqueeze(0), atype, denoise=True + ) + ret0 = {"updated_coord": updated_c0.squeeze(0), "logits": logits0.squeeze(0)} + updated_c1, logits1 = eval_model( + self.model, + coord[idx_perm].unsqueeze(0), + cell.unsqueeze(0), + atype[idx_perm], + denoise=True, + ) + ret1 = {"updated_coord": updated_c1.squeeze(0), "logits": logits1.squeeze(0)} + prec = 1e-10 + np.testing.assert_allclose( + ret0["updated_coord"][idx_perm].numpy(), + ret1["updated_coord"].numpy(), + rtol=prec, + atol=prec, + ) + np.testing.assert_allclose( + ret0["logits"][idx_perm].numpy(), + ret1["logits"].numpy(), + rtol=prec, + atol=prec, + ) + + +@unittest.skip("support of the denoise is temporally disabled") +class TestDenoiseModelDPA1(unittest.TestCase, PermutationDenoiseTest): + def setUp(self) -> None: + model_params = copy.deepcopy(model_dpa1) + self.type_split = True + self.model = get_model(model_params).to(env.DEVICE) + + +@unittest.skip("support of the denoise is temporally disabled") +class TestDenoiseModelDPA2(unittest.TestCase, PermutationDenoiseTest): + def setUp(self) -> None: + model_params = copy.deepcopy(model_dpa2) + self.type_split = True + self.model = get_model( + model_params, + ).to(env.DEVICE) + + +# @unittest.skip("hybrid not supported at the moment") +# class TestDenoiseModelHybrid(unittest.TestCase, TestPermutationDenoise): +# def setUp(self): +# model_params = copy.deepcopy(model_hybrid_denoise) +# self.type_split = True +# self.model = get_model(model_params).to(env.DEVICE) + + +if __name__ == "__main__": + unittest.main() diff --git a/source/tests/pd/model/test_rot.py b/source/tests/pd/model/test_rot.py index 4d59117560..85c90dc60f 100644 --- a/source/tests/pd/model/test_rot.py +++ b/source/tests/pd/model/test_rot.py @@ -169,7 +169,6 @@ def setUp(self): self.model = get_model(model_params).to(env.DEVICE) -@unittest.skip("Skip for not implemented yet") class TestEnergyModelDPA1(unittest.TestCase, RotTest): def setUp(self): model_params = copy.deepcopy(model_dpa1) diff --git a/source/tests/pd/model/test_rot_denoise.py b/source/tests/pd/model/test_rot_denoise.py new file mode 100644 index 0000000000..74d5d41791 --- /dev/null +++ b/source/tests/pd/model/test_rot_denoise.py @@ -0,0 +1,124 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import copy +import unittest + +import numpy as np +import paddle + +from deepmd.pd.model.model import ( + get_model, +) +from deepmd.pd.utils import ( + env, +) + +from ...seed import ( + GLOBAL_SEED, +) +from ..common import ( + eval_model, +) +from .test_permutation_denoise import ( # model_dpa2, + model_dpa1, +) + +dtype = paddle.float64 + + +class RotDenoiseTest: + def test( + self, + ): + generator = paddle.seed(GLOBAL_SEED) + prec = 1e-10 + natoms = 5 + cell = 10.0 * paddle.eye(3, dtype=dtype).to(env.DEVICE) + coord = 2 * paddle.rand([natoms, 3], dtype=dtype).to(device=env.DEVICE) + shift = paddle.to_tensor([4, 4, 4], dtype=dtype).to(env.DEVICE) + atype = paddle.to_tensor([0, 0, 0, 1, 1]).to(env.DEVICE) + from scipy.stats import ( + special_ortho_group, + ) + + rmat = paddle.to_tensor(special_ortho_group.rvs(3), dtype=dtype).to(env.DEVICE) + + # rotate only coord and shift to the center of cell + coord_rot = paddle.matmul(coord, rmat) + update_c0, logits0 = eval_model( + self.model, + (coord + shift).unsqueeze(0), + cell.unsqueeze(0), + atype, + denoise=True, + ) + update_c0 = update_c0 - (coord + shift).unsqueeze(0) + ret0 = {"updated_coord": update_c0.squeeze(0), "logits": logits0.squeeze(0)} + update_c1, logits1 = eval_model( + self.model, + (coord_rot + shift).unsqueeze(0), + cell.unsqueeze(0), + atype, + denoise=True, + ) + update_c1 = update_c1 - (coord_rot + shift).unsqueeze(0) + ret1 = {"updated_coord": update_c1.squeeze(0), "logits": logits1.squeeze(0)} + np.testing.assert_allclose( + paddle.matmul(ret0["updated_coord"], rmat).numpy(), + ret1["updated_coord"].numpy(), + rtol=prec, + atol=prec, + ) + np.testing.assert_allclose( + ret0["logits"].numpy(), ret1["logits"].numpy(), rtol=prec, atol=prec + ) + + # rotate coord and cell + paddle.seed(0) + cell = paddle.rand([3, 3], dtype=dtype).to(env.DEVICE) + cell = (cell + cell.T) + 5.0 * paddle.eye(3).to(env.DEVICE) + coord = paddle.rand([natoms, 3], dtype=dtype).to(env.DEVICE) + coord = paddle.matmul(coord, cell) + atype = paddle.to_tensor([0, 0, 0, 1, 1]).to(env.DEVICE) + coord_rot = paddle.matmul(coord, rmat) + cell_rot = paddle.matmul(cell, rmat) + update_c0, logits0 = eval_model( + self.model, coord.unsqueeze(0), cell.unsqueeze(0), atype, denoise=True + ) + ret0 = {"updated_coord": update_c0.squeeze(0), "logits": logits0.squeeze(0)} + update_c1, logits1 = eval_model( + self.model, + coord_rot.unsqueeze(0), + cell_rot.unsqueeze(0), + atype, + denoise=True, + ) + ret1 = {"updated_coord": update_c1.squeeze(0), "logits": logits1.squeeze(0)} + np.testing.assert_allclose( + ret0["logits"].numpy(), ret1["logits"].numpy(), rtol=prec, atol=prec + ) + np.testing.assert_allclose( + paddle.matmul(ret0["updated_coord"], rmat).numpy(), + ret1["updated_coord"].numpy(), + rtol=prec, + atol=prec, + ) + + +@unittest.skip("support of the denoise is temporally disabled") +class TestDenoiseModelDPA1(unittest.TestCase, RotDenoiseTest): + def setUp(self): + model_params = copy.deepcopy(model_dpa1) + self.type_split = True + self.model = get_model(model_params).to(env.DEVICE) + + +# @unittest.skip("hybrid not supported at the moment") +# class TestEnergyModelHybrid(unittest.TestCase, TestRotDenoise): +# def setUp(self): +# model_params = copy.deepcopy(model_hybrid_denoise) +# self.type_split = True +# self.model = get_model(model_params).to(env.DEVICE) + + +if __name__ == "__main__": + unittest.main() diff --git a/source/tests/pd/model/test_saveload_dpa1.py b/source/tests/pd/model/test_saveload_dpa1.py new file mode 100644 index 0000000000..54a82e479a --- /dev/null +++ b/source/tests/pd/model/test_saveload_dpa1.py @@ -0,0 +1,144 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import copy +import json +import os +import unittest +from pathlib import ( + Path, +) + +import numpy as np +import paddle +from paddle.io import ( + DataLoader, +) + +from deepmd.pd.loss import ( + EnergyStdLoss, +) +from deepmd.pd.model.model import ( + get_model, +) +from deepmd.pd.train.wrapper import ( + ModelWrapper, +) +from deepmd.pd.utils import ( + env, +) +from deepmd.pd.utils.dataloader import ( + BufferedIterator, + DpLoaderSet, +) +from deepmd.pd.utils.stat import ( + make_stat_input, +) +from deepmd.tf.common import ( + expand_sys_str, +) + + +def get_dataset(config): + model_config = config["model"] + rcut = model_config["descriptor"]["rcut"] + sel = model_config["descriptor"]["sel"] + systems = config["training"]["validation_data"]["systems"] + if isinstance(systems, str): + systems = expand_sys_str(systems) + batch_size = config["training"]["training_data"]["batch_size"] + type_map = model_config["type_map"] + + dataset = DpLoaderSet(systems, batch_size, type_map) + data_stat_nbatch = model_config.get("data_stat_nbatch", 10) + sampled = make_stat_input(dataset.systems, dataset.dataloaders, data_stat_nbatch) + return dataset, sampled + + +class TestSaveLoadDPA1(unittest.TestCase): + def setUp(self): + input_json = str(Path(__file__).parent / "water/se_atten.json") + with open(input_json) as fin: + self.config = json.load(fin) + self.config["loss"]["starter_learning_rate"] = self.config["learning_rate"][ + "start_lr" + ] + data_file = [str(Path(__file__).parent / "water/data/data_0")] + self.config["training"]["training_data"]["systems"] = data_file + self.config["training"]["validation_data"]["systems"] = data_file + self.dataset, self.sampled = get_dataset(self.config) + self.training_dataloader = DataLoader( + self.dataset, + batch_sampler=paddle.io.BatchSampler( + sampler=paddle.io.RandomSampler(self.dataset), + drop_last=False, + ), + num_workers=0, # setting to 0 diverges the behavior of its iterator; should be >=1 + collate_fn=lambda x: x[0], + ) + device = paddle.get_device() + paddle.set_device("cpu") + self.training_data = BufferedIterator(iter(self.training_dataloader)) + paddle.set_device(device) + self.loss = EnergyStdLoss(**self.config["loss"]) + self.cur_lr = 1 + self.task_key = "Default" + self.input_dict, self.label_dict = self.get_data() + self.start_lr = self.config["learning_rate"]["start_lr"] + + def get_model_result(self, read=False, model_file="tmp_model.pd"): + wrapper = self.create_wrapper(read) + optimizer = paddle.optimizer.Adam( + learning_rate=self.start_lr, parameters=wrapper.parameters() + ) + optimizer.clear_grad() + if read: + wrapper.set_state_dict(paddle.load(model_file)) + os.remove(model_file) + else: + paddle.save(wrapper.state_dict(), model_file) + result = wrapper( + **self.input_dict, + cur_lr=self.cur_lr, + label=self.label_dict, + task_key=self.task_key, + )[0] + return result + + def create_wrapper(self, read: bool): + model_config = copy.deepcopy(self.config["model"]) + model_config["resuming"] = read + model_config["stat_file_dir"] = "stat_files" + model_config["stat_file"] = "stat.hdf5" + model_config["stat_file_path"] = os.path.join( + model_config["stat_file_dir"], model_config["stat_file"] + ) + model = get_model(model_config).to(env.DEVICE) + return ModelWrapper(model, self.loss) + + def get_data(self): + try: + batch_data = next(iter(self.training_data)) + except StopIteration: + # Refresh the status of the dataloader to start from a new epoch + self.training_data = BufferedIterator(iter(self.training_dataloader)) + batch_data = next(iter(self.training_data)) + input_dict = {} + for item in ["coord", "atype", "box"]: + if item in batch_data: + input_dict[item] = batch_data[item].to(env.DEVICE) + else: + input_dict[item] = None + label_dict = {} + for item in ["energy", "force", "virial"]: + if item in batch_data: + label_dict[item] = batch_data[item].to(env.DEVICE) + return input_dict, label_dict + + def test_saveload(self): + result1 = self.get_model_result() + result2 = self.get_model_result(read=True) + for item in result1: + np.testing.assert_allclose(result1[item].numpy(), result2[item].numpy()) + + +if __name__ == "__main__": + unittest.main() diff --git a/source/tests/pd/model/test_smooth.py b/source/tests/pd/model/test_smooth.py index 7f77a6f188..cc50043ad8 100644 --- a/source/tests/pd/model/test_smooth.py +++ b/source/tests/pd/model/test_smooth.py @@ -19,6 +19,7 @@ eval_model, ) from .test_permutation import ( # model_dpau, + model_dpa1, model_se_e2_a, ) @@ -153,6 +154,41 @@ def setUp(self): self.epsilon, self.aprec = None, None +class TestEnergyModelDPA1(unittest.TestCase, SmoothTest): + def setUp(self): + model_params = copy.deepcopy(model_dpa1) + self.type_split = True + self.model = get_model(model_params).to(env.DEVICE) + # less degree of smoothness, + # error can be systematically removed by reducing epsilon + self.epsilon = 1e-5 + self.aprec = 1e-5 + + +class TestEnergyModelDPA1Excl1(unittest.TestCase, SmoothTest): + def setUp(self): + model_params = copy.deepcopy(model_dpa1) + model_params["pair_exclude_types"] = [[0, 1]] + self.type_split = True + self.model = get_model(model_params).to(env.DEVICE) + # less degree of smoothness, + # error can be systematically removed by reducing epsilon + self.epsilon = 1e-5 + self.aprec = 1e-5 + + +class TestEnergyModelDPA1Excl12(unittest.TestCase, SmoothTest): + def setUp(self): + model_params = copy.deepcopy(model_dpa1) + model_params["pair_exclude_types"] = [[0, 1], [0, 2]] + self.type_split = True + self.model = get_model(model_params).to(env.DEVICE) + # less degree of smoothness, + # error can be systematically removed by reducing epsilon + self.epsilon = 1e-5 + self.aprec = 1e-5 + + # class TestEnergyFoo(unittest.TestCase): # def test(self): # model_params = model_dpau diff --git a/source/tests/pd/model/test_trans.py b/source/tests/pd/model/test_trans.py index f69d2f5b83..3fae49d598 100644 --- a/source/tests/pd/model/test_trans.py +++ b/source/tests/pd/model/test_trans.py @@ -103,7 +103,6 @@ def setUp(self): self.model = get_model(model_params).to(env.DEVICE) -@unittest.skip("Skip for not implemented yet") class TestEnergyModelDPA1(unittest.TestCase, TransTest): def setUp(self): model_params = copy.deepcopy(model_dpa1) diff --git a/source/tests/pd/model/test_trans_denoise.py b/source/tests/pd/model/test_trans_denoise.py new file mode 100644 index 0000000000..8317d4d2ae --- /dev/null +++ b/source/tests/pd/model/test_trans_denoise.py @@ -0,0 +1,95 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import copy +import unittest + +import numpy as np +import paddle + +from deepmd.pd.model.model import ( + get_model, +) +from deepmd.pd.utils import ( + env, +) + +from ...seed import ( + GLOBAL_SEED, +) +from ..common import ( + eval_model, +) +from .test_permutation_denoise import ( + model_dpa1, + model_dpa2, + model_hybrid, +) + +dtype = paddle.float64 + + +class TransDenoiseTest: + def test( + self, + ): + natoms = 5 + generator = paddle.seed(GLOBAL_SEED) + cell = paddle.rand([3, 3], dtype=dtype).to(env.DEVICE) + cell = (cell + cell.T) + 5.0 * paddle.eye(3).to(env.DEVICE) + coord = paddle.rand([natoms, 3], dtype=dtype).to(env.DEVICE) + coord = paddle.matmul(coord, cell) + atype = paddle.to_tensor([0, 0, 0, 1, 1]).to(env.DEVICE) + shift = (paddle.rand([3], dtype=dtype) - 0.5).to(env.DEVICE) * 2.0 + coord_s = paddle.matmul( + paddle.remainder( + paddle.matmul(coord + shift, paddle.linalg.inv(cell)), 1.0 + ), + cell, + ) + updated_c0, logits0 = eval_model( + self.model, coord.unsqueeze(0), cell.unsqueeze(0), atype, denoise=True + ) + updated_c0 = updated_c0 - coord.unsqueeze(0) + ret0 = {"updated_coord": updated_c0.squeeze(0), "logits": logits0.squeeze(0)} + updated_c1, logits1 = eval_model( + self.model, coord_s.unsqueeze(0), cell.unsqueeze(0), atype, denoise=True + ) + updated_c1 = updated_c1 - coord_s.unsqueeze(0) + ret1 = {"updated_coord": updated_c1.squeeze(0), "logits": logits1.squeeze(0)} + prec = 1e-10 + np.testing.assert_allclose( + ret0["updated_coord"].numpy(), + ret1["updated_coord"].numpy(), + rtol=prec, + atol=prec, + ) + np.testing.assert_allclose( + ret0["logits"].numpy(), ret1["logits"].numpy(), rtol=prec, atol=prec + ) + + +@unittest.skip("support of the denoise is temporally disabled") +class TestDenoiseModelDPA1(unittest.TestCase, TransDenoiseTest): + def setUp(self): + model_params = copy.deepcopy(model_dpa1) + self.type_split = True + self.model = get_model(model_params).to(env.DEVICE) + + +@unittest.skip("support of the denoise is temporally disabled") +class TestDenoiseModelDPA2(unittest.TestCase, TransDenoiseTest): + def setUp(self): + model_params = copy.deepcopy(model_dpa2) + self.type_split = True + self.model = get_model(model_params).to(env.DEVICE) + + +@unittest.skip("hybrid not supported at the moment") +class TestDenoiseModelHybrid(unittest.TestCase, TransDenoiseTest): + def setUp(self): + model_params = copy.deepcopy(model_hybrid) + self.type_split = True + self.model = get_model(model_params).to(env.DEVICE) + + +if __name__ == "__main__": + unittest.main() diff --git a/source/tests/pd/model/water/multitask_sharefit.json b/source/tests/pd/model/water/multitask_sharefit.json new file mode 100644 index 0000000000..246b5992f7 --- /dev/null +++ b/source/tests/pd/model/water/multitask_sharefit.json @@ -0,0 +1,134 @@ +{ + "model": { + "shared_dict": { + "my_type_map": [ + "O", + "H", + "B" + ], + "my_descriptor": { + "type": "se_e2_a", + "sel": [ + 46, + 92, + 4 + ], + "rcut_smth": 0.50, + "rcut": 6.00, + "neuron": [ + 25, + 50, + 100 + ], + "resnet_dt": false, + "axis_neuron": 16, + "seed": 1, + "_comment": " that's all" + }, + "my_fitting": { + "dim_case_embd": 2, + "neuron": [ + 240, + 240, + 240 + ], + "resnet_dt": true, + "seed": 1, + "_comment": " that's all" + }, + "_comment": "that's all" + }, + "model_dict": { + "model_1": { + "type_map": "my_type_map", + "descriptor": "my_descriptor", + "fitting_net": "my_fitting", + "data_stat_nbatch": 1 + }, + "model_2": { + "type_map": "my_type_map", + "descriptor": "my_descriptor", + "fitting_net": "my_fitting", + "data_stat_nbatch": 1 + } + } + }, + "learning_rate": { + "type": "exp", + "decay_steps": 5000, + "start_lr": 0.0002, + "decay_rate": 0.98, + "stop_lr": 3.51e-08, + "_comment": "that's all" + }, + "loss_dict": { + "model_1": { + "type": "ener", + "start_pref_e": 0.02, + "limit_pref_e": 1, + "start_pref_f": 1000, + "limit_pref_f": 1, + "start_pref_v": 0, + "limit_pref_v": 0 + }, + "model_2": { + "type": "ener", + "start_pref_e": 0.02, + "limit_pref_e": 1, + "start_pref_f": 1000, + "limit_pref_f": 1, + "start_pref_v": 0, + "limit_pref_v": 0 + } + }, + "training": { + "model_prob": { + "model_1": 0.5, + "model_2": 0.5 + }, + "data_dict": { + "model_1": { + "stat_file": "./stat_files/model_1.hdf5", + "training_data": { + "systems": [ + "pt/water/data/data_0" + ], + "batch_size": 1, + "_comment": "that's all" + }, + "validation_data": { + "systems": [ + "pt/water/data/data_0" + ], + "batch_size": 1, + "_comment": "that's all" + } + }, + "model_2": { + "stat_file": "./stat_files/model_2.hdf5", + "training_data": { + "systems": [ + "pt/water/data/data_0" + ], + "batch_size": 1, + "_comment": "that's all" + }, + "validation_data": { + "systems": [ + "pt/water/data/data_0" + ], + "batch_size": 1, + "_comment": "that's all" + } + } + }, + "numb_steps": 100000, + "warmup_steps": 0, + "gradient_max_norm": 5.0, + "seed": 10, + "disp_file": "lcurve.out", + "disp_freq": 100, + "save_freq": 100, + "_comment": "that's all" + } +} diff --git a/source/tests/pd/test_decomp.py b/source/tests/pd/test_decomp.py index d8439ad994..c554083bda 100644 --- a/source/tests/pd/test_decomp.py +++ b/source/tests/pd/test_decomp.py @@ -17,50 +17,6 @@ class TestDecomp(unittest.TestCase): def setUp(self): paddle.seed(GLOBAL_SEED) - def test_softmax_decomp(self): - raw_api = paddle.nn.functional.softmax - decomp_api = decomp.softmax - - raw_input = paddle.randn([100, 100], "float32") - raw_output = raw_api(raw_input) - decomp_output = decomp_api(raw_input) - - np.testing.assert_allclose( - raw_output.numpy(), - decomp_output.numpy(), - 1e-6, - 1e-8, - ) - - def test_norm_decomp(self): - raw_api = paddle.linalg.norm - decomp_api = decomp.norm - - raw_input = paddle.randn([100, 100], "float32") - raw_output = raw_api(raw_input, p=2, axis=-1) - decomp_output = decomp_api(raw_input, p=2, axis=-1) - - np.testing.assert_allclose( - raw_output.numpy(), - decomp_output.numpy(), - 1e-5, - 1e-8, - ) - - def test_take_along_axis_decomp(self): - raw_api = paddle.take_along_axis - decomp_api = decomp.take_along_axis - - raw_input = paddle.randn([100, 100], "float32") - raw_indices = paddle.randint(0, 100, [100, 2]) - raw_output = raw_api(raw_input, raw_indices, axis=-1) - decomp_output = decomp_api(raw_input, raw_indices, axis=-1) - - np.testing.assert_equal( - raw_output.numpy(), - decomp_output.numpy(), - ) - def test_scatter_reduce_decomp(self): raw_api = paddle.put_along_axis decomp_api = decomp.scatter_reduce @@ -112,20 +68,3 @@ def test_masked_add_(self): raw_output.numpy(), raw_input.numpy(), # inplace ) - - def test_normalize_decomp(self): - raw_api = paddle.nn.functional.normalize - decomp_api = decomp.normalize_decomp - - raw_input = paddle.randn([100, 100], "float32") - axis = -1 - - raw_output = raw_api(raw_input, p=2, axis=axis) - decomp_output = decomp_api(raw_input, p=2, axis=axis) - - np.testing.assert_allclose( - raw_output.numpy(), - decomp_output.numpy(), # inplace - 1e-5, - 1e-8, - ) diff --git a/source/tests/pd/test_finetune.py b/source/tests/pd/test_finetune.py index 2c6cca83aa..f82f7a8cd0 100644 --- a/source/tests/pd/test_finetune.py +++ b/source/tests/pd/test_finetune.py @@ -341,7 +341,6 @@ def setUp(self): self.testkey = "dos" -@unittest.skip("Skip for not implemented yet") class TestEnergyModelDPA1(FinetuneTest, unittest.TestCase): def setUp(self): input_json = str(Path(__file__).parent / "water/se_atten.json") diff --git a/source/tests/pd/test_multitask.py b/source/tests/pd/test_multitask.py index 65210d07b3..d59990dcca 100644 --- a/source/tests/pd/test_multitask.py +++ b/source/tests/pd/test_multitask.py @@ -29,23 +29,24 @@ ) from .model.test_permutation import ( + model_dpa1, model_se_e2_a, ) -def setUpModule(): +def setUpModule() -> None: global multitask_template multitask_template_json = str(Path(__file__).parent / "water/multitask.json") with open(multitask_template_json) as f: multitask_template = json.load(f) -@unittest.skip("Skip until solving cuda error 709 in jit.save") class MultiTaskTrainTest: - def test_multitask_train(self): + def test_multitask_train(self) -> None: # test multitask training self.config = update_deepmd_input(self.config, warning=True) self.config = normalize(self.config, multi_task=True) + self.share_fitting = getattr(self, "share_fitting", False) trainer = get_trainer(deepcopy(self.config), shared_links=self.shared_links) trainer.run() # check model keys @@ -60,7 +61,12 @@ def test_multitask_train(self): self.assertIn(state_key.replace("model_1", "model_2"), multi_state_dict) if "model_2" in state_key: self.assertIn(state_key.replace("model_2", "model_1"), multi_state_dict) - if "model_1.descriptor" in state_key: + if ("model_1.atomic_model.descriptor" in state_key) or ( + self.share_fitting + and "model_1.atomic_model.fitting_net" in state_key + and "fitting_net.bias_atom_e" not in state_key + and "fitting_net.case_embd" not in state_key + ): np.testing.assert_allclose( multi_state_dict[state_key].numpy(), multi_state_dict[state_key.replace("model_1", "model_2")].numpy(), @@ -172,7 +178,7 @@ def test_multitask_train(self): trainer_finetune.run() self.tearDown() - def tearDown(self): + def tearDown(self) -> None: for f in os.listdir("."): if f.startswith("model") and f.endswith(".pd"): os.remove(f) @@ -182,9 +188,8 @@ def tearDown(self): shutil.rmtree(f) -@unittest.skip("Skip until solving cuda error 709 in jit.save") class TestMultiTaskSeA(unittest.TestCase, MultiTaskTrainTest): - def setUp(self): + def setUp(self) -> None: multitask_se_e2_a = deepcopy(multitask_template) multitask_se_e2_a["model"]["shared_dict"]["my_descriptor"] = model_se_e2_a[ "descriptor" @@ -222,5 +227,44 @@ def tearDown(self) -> None: MultiTaskTrainTest.tearDown(self) +class TestMultiTaskDPA1(unittest.TestCase, MultiTaskTrainTest): + def setUp(self) -> None: + multitask_DPA1 = deepcopy(multitask_template) + multitask_DPA1["model"]["shared_dict"]["my_descriptor"] = model_dpa1[ + "descriptor" + ] + data_file = [str(Path(__file__).parent / "water/data/data_0")] + self.stat_files = "DPA1" + os.makedirs(self.stat_files, exist_ok=True) + self.config = multitask_DPA1 + self.config["training"]["data_dict"]["model_1"]["training_data"]["systems"] = ( + data_file + ) + self.config["training"]["data_dict"]["model_1"]["validation_data"][ + "systems" + ] = data_file + self.config["training"]["data_dict"]["model_1"]["stat_file"] = ( + f"{self.stat_files}/model_1" + ) + self.config["training"]["data_dict"]["model_2"]["training_data"]["systems"] = ( + data_file + ) + self.config["training"]["data_dict"]["model_2"]["validation_data"][ + "systems" + ] = data_file + self.config["training"]["data_dict"]["model_2"]["stat_file"] = ( + f"{self.stat_files}/model_2" + ) + self.config["training"]["numb_steps"] = 1 + self.config["training"]["save_freq"] = 1 + self.origin_config = deepcopy(self.config) + self.config["model"], self.shared_links = preprocess_shared_params( + self.config["model"] + ) + + def tearDown(self) -> None: + MultiTaskTrainTest.tearDown(self) + + if __name__ == "__main__": unittest.main() diff --git a/source/tests/pd/test_training.py b/source/tests/pd/test_training.py index d4e7309a65..c3d65c09df 100644 --- a/source/tests/pd/test_training.py +++ b/source/tests/pd/test_training.py @@ -15,17 +15,21 @@ from deepmd.pd.entrypoints.main import ( get_trainer, ) +from deepmd.pd.utils.env import ( + enable_prim, +) from deepmd.pd.utils.finetune import ( get_finetune_rules, ) from .model.test_permutation import ( + model_dpa1, model_se_e2_a, ) class DPTrainTest: - def test_dp_train(self): + def test_dp_train(self) -> None: # test training from scratch trainer = get_trainer(deepcopy(self.config)) trainer.run() @@ -95,7 +99,7 @@ def test_dp_train(self): trainer_finetune_empty.run() trainer_finetune_random.run() - def test_trainable(self): + def test_trainable(self) -> None: fix_params = deepcopy(self.config) fix_params["model"]["descriptor"]["trainable"] = False fix_params["model"]["fitting_net"]["trainable"] = False @@ -124,7 +128,7 @@ def test_trainable(self): model_dict_after_training[key].numpy(), ) - def tearDown(self): + def tearDown(self) -> None: for f in os.listdir("."): if f.startswith("model") and f.endswith(".pd"): os.remove(f) @@ -135,7 +139,7 @@ def tearDown(self): class TestEnergyModelSeA(unittest.TestCase, DPTrainTest): - def setUp(self): + def setUp(self) -> None: input_json = str(Path(__file__).parent / "water/se_atten.json") with open(input_json) as f: self.config = json.load(f) @@ -145,6 +149,9 @@ def setUp(self): self.config["model"] = deepcopy(model_se_e2_a) self.config["training"]["numb_steps"] = 1 self.config["training"]["save_freq"] = 1 + # import paddle + enable_prim(True) + # assert paddle.framework.core._is_eager_prim_enabled() def tearDown(self) -> None: DPTrainTest.tearDown(self) @@ -153,7 +160,7 @@ def tearDown(self) -> None: class TestFparam(unittest.TestCase, DPTrainTest): """Test if `fparam` can be loaded correctly.""" - def setUp(self): + def setUp(self) -> None: input_json = str(Path(__file__).parent / "water/se_atten.json") with open(input_json) as f: self.config = json.load(f) @@ -172,5 +179,21 @@ def tearDown(self) -> None: DPTrainTest.tearDown(self) +class TestEnergyModelDPA1(unittest.TestCase, DPTrainTest): + def setUp(self) -> None: + input_json = str(Path(__file__).parent / "water/se_atten.json") + with open(input_json) as f: + self.config = json.load(f) + data_file = [str(Path(__file__).parent / "water/data/data_0")] + self.config["training"]["training_data"]["systems"] = data_file + self.config["training"]["validation_data"]["systems"] = data_file + self.config["model"] = deepcopy(model_dpa1) + self.config["training"]["numb_steps"] = 1 + self.config["training"]["save_freq"] = 1 + + def tearDown(self) -> None: + DPTrainTest.tearDown(self) + + if __name__ == "__main__": unittest.main() From 104fc365ed8d6cef0c0583be755cc4c0e961cbe9 Mon Sep 17 00:00:00 2001 From: Chun Cai Date: Wed, 18 Dec 2024 06:37:03 +0800 Subject: [PATCH 23/43] Perf: use fused Adam optimizer (#4463) This PR sets the Adam optimizer to use the `fused=True` parameter. For the profiling result shown below, this modification brings an 2.75x improvement on optimizer update (22ms vs. 8ms) and ~3% improvement for total speed up (922ms vs. 892ms). The benchmark case is training a DPA-2 Q3 release model. Please note that the absolute time may differs between steps.

Before

![image](https://github.com/user-attachments/assets/d6b05a1d-6e6c-478d-921f-c497718bc551)

After

![image](https://github.com/user-attachments/assets/b216b919-094c-441f-96a7-146e1e3db483)

[Ref](https://pytorch.org/docs/stable/generated/torch.optim.Adam.html): > The foreach and fused implementations are typically faster than the for-loop, single-tensor implementation, with **fused being theoretically fastest** with both vertical and horizontal fusion. As such, if the user has not specified either flag (i.e., when foreach = fused = None), we will attempt defaulting to the foreach implementation when the tensors are all on CUDA. Why not fused? Since the fused implementation is relatively new, we want to give it sufficient bake-in time. ## Summary by CodeRabbit - **Bug Fixes** - Improved optimizer performance during training by modifying the initialization of the Adam optimizer. - **Documentation** - Updated method signature for clarity in the `Trainer` class. --- deepmd/pt/train/training.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deepmd/pt/train/training.py b/deepmd/pt/train/training.py index 72e84d577a..8ca510492c 100644 --- a/deepmd/pt/train/training.py +++ b/deepmd/pt/train/training.py @@ -579,7 +579,7 @@ def warm_up_linear(step, warmup_steps): # author: iProzd if self.opt_type == "Adam": self.optimizer = torch.optim.Adam( - self.wrapper.parameters(), lr=self.lr_exp.start_lr + self.wrapper.parameters(), lr=self.lr_exp.start_lr, fused=True ) if optimizer_state_dict is not None and self.restart_training: self.optimizer.load_state_dict(optimizer_state_dict) From c0914e1ebc18d3bb7bc76571bd18ca2f8c701586 Mon Sep 17 00:00:00 2001 From: Jia-Xin Zhu <53895049+ChiahsinChu@users.noreply.github.com> Date: Wed, 18 Dec 2024 13:25:33 +0800 Subject: [PATCH 24/43] feat (tf/pt): add atomic weights to tensor loss (#4466) Interfaces are of particular interest in many studies. However, the configurations in the training set to represent the interface normally also include large parts of the bulk material. As a result, the final model would prefer the bulk information while the interfacial information is less learnt. It is difficult to simply improve the proportion of interfaces in the configurations since the electronic structures of the interface might only be reasonable with a certain thickness of bulk materials. Therefore, I wonder whether it is possible to define weights for atomic quantities in loss functions. This allows us to add higher weights for the atomic information for the regions of interest and probably makes the model "more focused" on the region of interest. In this PR, I add the keyword `enable_atomic_weight` to the loss function of the tensor model. In principle, it could be generalised to any atomic quantity, e.g., atomic forces. I would like to know the developers' comments/suggestions about this feature. I can add support for other loss functions and finish unit tests once we agree on this feature. Best. ## Summary by CodeRabbit - **New Features** - Introduced an optional parameter for atomic weights in loss calculations, enhancing flexibility in the `TensorLoss` class. - Added a suite of unit tests for the `TensorLoss` functionality, ensuring consistency between TensorFlow and PyTorch implementations. - **Bug Fixes** - Updated logic for local loss calculations to ensure correct application of atomic weights based on user input. - **Documentation** - Improved clarity of documentation for several function arguments, including the addition of a new argument related to atomic weights. --- deepmd/pt/loss/tensor.py | 22 ++ deepmd/tf/loss/tensor.py | 24 +- deepmd/utils/argcheck.py | 12 +- source/tests/pt/test_loss_tensor.py | 464 ++++++++++++++++++++++++++++ 4 files changed, 519 insertions(+), 3 deletions(-) create mode 100644 source/tests/pt/test_loss_tensor.py diff --git a/deepmd/pt/loss/tensor.py b/deepmd/pt/loss/tensor.py index 8f2f937a07..69b133de58 100644 --- a/deepmd/pt/loss/tensor.py +++ b/deepmd/pt/loss/tensor.py @@ -22,6 +22,7 @@ def __init__( pref_atomic: float = 0.0, pref: float = 0.0, inference=False, + enable_atomic_weight: bool = False, **kwargs, ) -> None: r"""Construct a loss for local and global tensors. @@ -40,6 +41,8 @@ def __init__( The prefactor of the weight of global loss. It should be larger than or equal to 0. inference : bool If true, it will output all losses found in output, ignoring the pre-factors. + enable_atomic_weight : bool + If true, atomic weight will be used in the loss calculation. **kwargs Other keyword arguments. """ @@ -50,6 +53,7 @@ def __init__( self.local_weight = pref_atomic self.global_weight = pref self.inference = inference + self.enable_atomic_weight = enable_atomic_weight assert ( self.local_weight >= 0.0 and self.global_weight >= 0.0 @@ -85,6 +89,12 @@ def forward(self, input_dict, model, label, natoms, learning_rate=0.0, mae=False """ model_pred = model(**input_dict) del learning_rate, mae + + if self.enable_atomic_weight: + atomic_weight = label["atom_weight"].reshape([-1, 1]) + else: + atomic_weight = 1.0 + loss = torch.zeros(1, dtype=env.GLOBAL_PT_FLOAT_PRECISION, device=env.DEVICE)[0] more_loss = {} if ( @@ -103,6 +113,7 @@ def forward(self, input_dict, model, label, natoms, learning_rate=0.0, mae=False diff = (local_tensor_pred - local_tensor_label).reshape( [-1, self.tensor_size] ) + diff = diff * atomic_weight if "mask" in model_pred: diff = diff[model_pred["mask"].reshape([-1]).bool()] l2_local_loss = torch.mean(torch.square(diff)) @@ -171,4 +182,15 @@ def label_requirement(self) -> list[DataRequirementItem]: high_prec=False, ) ) + if self.enable_atomic_weight: + label_requirement.append( + DataRequirementItem( + "atomic_weight", + ndof=1, + atomic=True, + must=False, + high_prec=False, + default=1.0, + ) + ) return label_requirement diff --git a/deepmd/tf/loss/tensor.py b/deepmd/tf/loss/tensor.py index aca9182ff6..d7f879b4b4 100644 --- a/deepmd/tf/loss/tensor.py +++ b/deepmd/tf/loss/tensor.py @@ -40,6 +40,7 @@ def __init__(self, jdata, **kwarg) -> None: # YWolfeee: modify, use pref / pref_atomic, instead of pref_weight / pref_atomic_weight self.local_weight = jdata.get("pref_atomic", None) self.global_weight = jdata.get("pref", None) + self.enable_atomic_weight = jdata.get("enable_atomic_weight", False) assert ( self.local_weight is not None and self.global_weight is not None @@ -66,9 +67,18 @@ def build(self, learning_rate, natoms, model_dict, label_dict, suffix): "global_loss": global_cvt_2_tf_float(0.0), } + if self.enable_atomic_weight: + atomic_weight = tf.reshape(label_dict["atom_weight"], [-1, 1]) + else: + atomic_weight = global_cvt_2_tf_float(1.0) + if self.local_weight > 0.0: + diff = tf.reshape(polar, [-1, self.tensor_size]) - tf.reshape( + atomic_polar_hat, [-1, self.tensor_size] + ) + diff = diff * atomic_weight local_loss = global_cvt_2_tf_float(find_atomic) * tf.reduce_mean( - tf.square(self.scale * (polar - atomic_polar_hat)), name="l2_" + suffix + tf.square(self.scale * diff), name="l2_" + suffix ) more_loss["local_loss"] = self.display_if_exist(local_loss, find_atomic) l2_loss += self.local_weight * local_loss @@ -163,4 +173,16 @@ def label_requirement(self) -> list[DataRequirementItem]: type_sel=self.type_sel, ) ) + if self.enable_atomic_weight: + data_requirements.append( + DataRequirementItem( + "atom_weight", + 1, + atomic=True, + must=False, + high_prec=False, + default=1.0, + type_sel=self.type_sel, + ) + ) return data_requirements diff --git a/deepmd/utils/argcheck.py b/deepmd/utils/argcheck.py index 5b57f15979..9eac0e804d 100644 --- a/deepmd/utils/argcheck.py +++ b/deepmd/utils/argcheck.py @@ -2511,8 +2511,9 @@ def loss_property(): def loss_tensor(): # doc_global_weight = "The prefactor of the weight of global loss. It should be larger than or equal to 0. If only `pref` is provided or both are not provided, training will be global mode, i.e. the shape of 'polarizability.npy` or `dipole.npy` should be #frams x [9 or 3]." # doc_local_weight = "The prefactor of the weight of atomic loss. It should be larger than or equal to 0. If only `pref_atomic` is provided, training will be atomic mode, i.e. the shape of `polarizability.npy` or `dipole.npy` should be #frames x ([9 or 3] x #selected atoms). If both `pref` and `pref_atomic` are provided, training will be combined mode, and atomic label should be provided as well." - doc_global_weight = "The prefactor of the weight of global loss. It should be larger than or equal to 0. If controls the weight of loss corresponding to global label, i.e. 'polarizability.npy` or `dipole.npy`, whose shape should be #frames x [9 or 3]. If it's larger than 0.0, this npy should be included." - doc_local_weight = "The prefactor of the weight of atomic loss. It should be larger than or equal to 0. If controls the weight of loss corresponding to atomic label, i.e. `atomic_polarizability.npy` or `atomic_dipole.npy`, whose shape should be #frames x ([9 or 3] x #selected atoms). If it's larger than 0.0, this npy should be included. Both `pref` and `pref_atomic` should be provided, and either can be set to 0.0." + doc_global_weight = "The prefactor of the weight of global loss. It should be larger than or equal to 0. It controls the weight of loss corresponding to global label, i.e. 'polarizability.npy` or `dipole.npy`, whose shape should be #frames x [9 or 3]. If it's larger than 0.0, this npy should be included." + doc_local_weight = "The prefactor of the weight of atomic loss. It should be larger than or equal to 0. It controls the weight of loss corresponding to atomic label, i.e. `atomic_polarizability.npy` or `atomic_dipole.npy`, whose shape should be #frames x ([9 or 3] x #atoms). If it's larger than 0.0, this npy should be included. Both `pref` and `pref_atomic` should be provided, and either can be set to 0.0." + doc_enable_atomic_weight = "If true, the atomic loss will be reweighted." return [ Argument( "pref", [float, int], optional=False, default=None, doc=doc_global_weight @@ -2524,6 +2525,13 @@ def loss_tensor(): default=None, doc=doc_local_weight, ), + Argument( + "enable_atomic_weight", + bool, + optional=True, + default=False, + doc=doc_enable_atomic_weight, + ), ] diff --git a/source/tests/pt/test_loss_tensor.py b/source/tests/pt/test_loss_tensor.py new file mode 100644 index 0000000000..5802c0b775 --- /dev/null +++ b/source/tests/pt/test_loss_tensor.py @@ -0,0 +1,464 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import os +import unittest + +import numpy as np +import tensorflow.compat.v1 as tf +import torch + +tf.disable_eager_execution() +from pathlib import ( + Path, +) + +from deepmd.pt.loss import TensorLoss as PTTensorLoss +from deepmd.pt.utils import ( + dp_random, + env, +) +from deepmd.pt.utils.dataset import ( + DeepmdDataSetForLoader, +) +from deepmd.tf.loss.tensor import TensorLoss as TFTensorLoss +from deepmd.utils.data import ( + DataRequirementItem, +) + +from ..seed import ( + GLOBAL_SEED, +) + +CUR_DIR = os.path.dirname(__file__) + + +def get_batch(system, type_map, data_requirement): + dataset = DeepmdDataSetForLoader(system, type_map) + dataset.add_data_requirement(data_requirement) + np_batch, pt_batch = get_single_batch(dataset) + return np_batch, pt_batch + + +def get_single_batch(dataset, index=None): + if index is None: + index = dp_random.choice(np.arange(len(dataset))) + np_batch = dataset[index] + pt_batch = {} + + for key in [ + "coord", + "box", + "atom_dipole", + "dipole", + "atom_polarizability", + "polarizability", + "atype", + "natoms", + ]: + if key in np_batch.keys(): + np_batch[key] = np.expand_dims(np_batch[key], axis=0) + pt_batch[key] = torch.as_tensor(np_batch[key], device=env.DEVICE) + if key in ["coord", "atom_dipole"]: + np_batch[key] = np_batch[key].reshape(1, -1) + np_batch["natoms"] = np_batch["natoms"][0] + return np_batch, pt_batch + + +class LossCommonTest(unittest.TestCase): + def setUp(self) -> None: + self.cur_lr = 1.2 + self.type_map = ["H", "O"] + + # data + tensor_data_requirement = [ + DataRequirementItem( + "atomic_" + self.label_name, + ndof=self.tensor_size, + atomic=True, + must=False, + high_prec=False, + ), + DataRequirementItem( + self.label_name, + ndof=self.tensor_size, + atomic=False, + must=False, + high_prec=False, + ), + DataRequirementItem( + "atomic_weight", + ndof=1, + atomic=True, + must=False, + high_prec=False, + default=1.0, + ), + ] + np_batch, pt_batch = get_batch( + self.system, self.type_map, tensor_data_requirement + ) + natoms = np_batch["natoms"] + self.nloc = natoms[0] + self.nframes = np_batch["atom_" + self.label_name].shape[0] + rng = np.random.default_rng(GLOBAL_SEED) + + l_atomic_tensor, l_global_tensor = ( + np_batch["atom_" + self.label_name], + np_batch[self.label_name], + ) + p_atomic_tensor, p_global_tensor = ( + np.ones_like(l_atomic_tensor), + np.ones_like(l_global_tensor), + ) + + batch_size = pt_batch["coord"].shape[0] + + # atom_pref = rng.random(size=[batch_size, nloc * 3]) + # drdq = rng.random(size=[batch_size, nloc * 2 * 3]) + atom_weight = rng.random(size=[batch_size, self.nloc]) + + # tf + self.g = tf.Graph() + with self.g.as_default(): + t_cur_lr = tf.placeholder(shape=[], dtype=tf.float64) + t_natoms = tf.placeholder(shape=[None], dtype=tf.int32) + t_patomic_tensor = tf.placeholder(shape=[None, None], dtype=tf.float64) + t_pglobal_tensor = tf.placeholder(shape=[None, None], dtype=tf.float64) + t_latomic_tensor = tf.placeholder(shape=[None, None], dtype=tf.float64) + t_lglobal_tensor = tf.placeholder(shape=[None, None], dtype=tf.float64) + t_atom_weight = tf.placeholder(shape=[None, None], dtype=tf.float64) + find_atomic = tf.constant(1.0, dtype=tf.float64) + find_global = tf.constant(1.0, dtype=tf.float64) + find_atom_weight = tf.constant(1.0, dtype=tf.float64) + model_dict = { + self.tensor_name: t_patomic_tensor, + } + label_dict = { + "atom_" + self.label_name: t_latomic_tensor, + "find_atom_" + self.label_name: find_atomic, + self.label_name: t_lglobal_tensor, + "find_" + self.label_name: find_global, + "atom_weight": t_atom_weight, + "find_atom_weight": find_atom_weight, + } + self.tf_loss_sess = self.tf_loss.build( + t_cur_lr, t_natoms, model_dict, label_dict, "" + ) + + self.feed_dict = { + t_cur_lr: self.cur_lr, + t_natoms: natoms, + t_patomic_tensor: p_atomic_tensor, + t_pglobal_tensor: p_global_tensor, + t_latomic_tensor: l_atomic_tensor, + t_lglobal_tensor: l_global_tensor, + t_atom_weight: atom_weight, + } + # pt + self.model_pred = { + self.tensor_name: torch.from_numpy(p_atomic_tensor), + "global_" + self.tensor_name: torch.from_numpy(p_global_tensor), + } + self.label = { + "atom_" + self.label_name: torch.from_numpy(l_atomic_tensor), + "find_" + "atom_" + self.label_name: 1.0, + self.label_name: torch.from_numpy(l_global_tensor), + "find_" + self.label_name: 1.0, + "atom_weight": torch.from_numpy(atom_weight), + "find_atom_weight": 1.0, + } + self.label_absent = { + "atom_" + self.label_name: torch.from_numpy(l_atomic_tensor), + self.label_name: torch.from_numpy(l_global_tensor), + "atom_weight": torch.from_numpy(atom_weight), + } + self.natoms = pt_batch["natoms"] + + def tearDown(self) -> None: + tf.reset_default_graph() + return super().tearDown() + + +class TestAtomicDipoleLoss(LossCommonTest): + def setUp(self) -> None: + self.tensor_name = "dipole" + self.tensor_size = 3 + self.label_name = "dipole" + self.system = str(Path(__file__).parent / "water_tensor/dipole/O78H156") + + self.pref_atomic = 1.0 + self.pref = 0.0 + # tf + self.tf_loss = TFTensorLoss( + { + "pref_atomic": self.pref_atomic, + "pref": self.pref, + }, + tensor_name=self.tensor_name, + tensor_size=self.tensor_size, + label_name=self.label_name, + ) + # pt + self.pt_loss = PTTensorLoss( + self.tensor_name, + self.tensor_size, + self.label_name, + self.pref_atomic, + self.pref, + ) + + super().setUp() + + def test_consistency(self) -> None: + with tf.Session(graph=self.g) as sess: + tf_loss, tf_more_loss = sess.run( + self.tf_loss_sess, feed_dict=self.feed_dict + ) + + def fake_model(): + return self.model_pred + + _, pt_loss, pt_more_loss = self.pt_loss( + {}, + fake_model, + self.label, + self.nloc, + self.cur_lr, + ) + _, pt_loss_absent, pt_more_loss_absent = self.pt_loss( + {}, + fake_model, + self.label_absent, + self.nloc, + self.cur_lr, + ) + pt_loss = pt_loss.detach().cpu() + pt_loss_absent = pt_loss_absent.detach().cpu() + self.assertTrue(np.allclose(tf_loss, pt_loss.numpy())) + self.assertTrue(np.allclose(0.0, pt_loss_absent.numpy())) + for key in ["local"]: + self.assertTrue( + np.allclose( + tf_more_loss[f"{key}_loss"], + pt_more_loss[f"l2_{key}_{self.tensor_name}_loss"], + ) + ) + self.assertTrue( + np.isnan(pt_more_loss_absent[f"l2_{key}_{self.tensor_name}_loss"]) + ) + + +class TestAtomicDipoleAWeightLoss(LossCommonTest): + def setUp(self) -> None: + self.tensor_name = "dipole" + self.tensor_size = 3 + self.label_name = "dipole" + self.system = str(Path(__file__).parent / "water_tensor/dipole/O78H156") + + self.pref_atomic = 1.0 + self.pref = 0.0 + # tf + self.tf_loss = TFTensorLoss( + { + "pref_atomic": self.pref_atomic, + "pref": self.pref, + "enable_atomic_weight": True, + }, + tensor_name=self.tensor_name, + tensor_size=self.tensor_size, + label_name=self.label_name, + ) + # pt + self.pt_loss = PTTensorLoss( + self.tensor_name, + self.tensor_size, + self.label_name, + self.pref_atomic, + self.pref, + enable_atomic_weight=True, + ) + + super().setUp() + + def test_consistency(self) -> None: + with tf.Session(graph=self.g) as sess: + tf_loss, tf_more_loss = sess.run( + self.tf_loss_sess, feed_dict=self.feed_dict + ) + + def fake_model(): + return self.model_pred + + _, pt_loss, pt_more_loss = self.pt_loss( + {}, + fake_model, + self.label, + self.nloc, + self.cur_lr, + ) + _, pt_loss_absent, pt_more_loss_absent = self.pt_loss( + {}, + fake_model, + self.label_absent, + self.nloc, + self.cur_lr, + ) + pt_loss = pt_loss.detach().cpu() + pt_loss_absent = pt_loss_absent.detach().cpu() + self.assertTrue(np.allclose(tf_loss, pt_loss.numpy())) + self.assertTrue(np.allclose(0.0, pt_loss_absent.numpy())) + for key in ["local"]: + self.assertTrue( + np.allclose( + tf_more_loss[f"{key}_loss"], + pt_more_loss[f"l2_{key}_{self.tensor_name}_loss"], + ) + ) + self.assertTrue( + np.isnan(pt_more_loss_absent[f"l2_{key}_{self.tensor_name}_loss"]) + ) + + +class TestAtomicPolarLoss(LossCommonTest): + def setUp(self) -> None: + self.tensor_name = "polar" + self.tensor_size = 9 + self.label_name = "polarizability" + + self.system = str(Path(__file__).parent / "water_tensor/polar/atomic_system") + + self.pref_atomic = 1.0 + self.pref = 0.0 + # tf + self.tf_loss = TFTensorLoss( + { + "pref_atomic": self.pref_atomic, + "pref": self.pref, + }, + tensor_name=self.tensor_name, + tensor_size=self.tensor_size, + label_name=self.label_name, + ) + # pt + self.pt_loss = PTTensorLoss( + self.tensor_name, + self.tensor_size, + self.label_name, + self.pref_atomic, + self.pref, + ) + + super().setUp() + + def test_consistency(self) -> None: + with tf.Session(graph=self.g) as sess: + tf_loss, tf_more_loss = sess.run( + self.tf_loss_sess, feed_dict=self.feed_dict + ) + + def fake_model(): + return self.model_pred + + _, pt_loss, pt_more_loss = self.pt_loss( + {}, + fake_model, + self.label, + self.nloc, + self.cur_lr, + ) + _, pt_loss_absent, pt_more_loss_absent = self.pt_loss( + {}, + fake_model, + self.label_absent, + self.nloc, + self.cur_lr, + ) + pt_loss = pt_loss.detach().cpu() + pt_loss_absent = pt_loss_absent.detach().cpu() + self.assertTrue(np.allclose(tf_loss, pt_loss.numpy())) + self.assertTrue(np.allclose(0.0, pt_loss_absent.numpy())) + for key in ["local"]: + self.assertTrue( + np.allclose( + tf_more_loss[f"{key}_loss"], + pt_more_loss[f"l2_{key}_{self.tensor_name}_loss"], + ) + ) + self.assertTrue( + np.isnan(pt_more_loss_absent[f"l2_{key}_{self.tensor_name}_loss"]) + ) + + +class TestAtomicPolarAWeightLoss(LossCommonTest): + def setUp(self) -> None: + self.tensor_name = "polar" + self.tensor_size = 9 + self.label_name = "polarizability" + + self.system = str(Path(__file__).parent / "water_tensor/polar/atomic_system") + + self.pref_atomic = 1.0 + self.pref = 0.0 + # tf + self.tf_loss = TFTensorLoss( + { + "pref_atomic": self.pref_atomic, + "pref": self.pref, + "enable_atomic_weight": True, + }, + tensor_name=self.tensor_name, + tensor_size=self.tensor_size, + label_name=self.label_name, + ) + # pt + self.pt_loss = PTTensorLoss( + self.tensor_name, + self.tensor_size, + self.label_name, + self.pref_atomic, + self.pref, + enable_atomic_weight=True, + ) + + super().setUp() + + def test_consistency(self) -> None: + with tf.Session(graph=self.g) as sess: + tf_loss, tf_more_loss = sess.run( + self.tf_loss_sess, feed_dict=self.feed_dict + ) + + def fake_model(): + return self.model_pred + + _, pt_loss, pt_more_loss = self.pt_loss( + {}, + fake_model, + self.label, + self.nloc, + self.cur_lr, + ) + _, pt_loss_absent, pt_more_loss_absent = self.pt_loss( + {}, + fake_model, + self.label_absent, + self.nloc, + self.cur_lr, + ) + pt_loss = pt_loss.detach().cpu() + pt_loss_absent = pt_loss_absent.detach().cpu() + self.assertTrue(np.allclose(tf_loss, pt_loss.numpy())) + self.assertTrue(np.allclose(0.0, pt_loss_absent.numpy())) + for key in ["local"]: + self.assertTrue( + np.allclose( + tf_more_loss[f"{key}_loss"], + pt_more_loss[f"l2_{key}_{self.tensor_name}_loss"], + ) + ) + self.assertTrue( + np.isnan(pt_more_loss_absent[f"l2_{key}_{self.tensor_name}_loss"]) + ) + + +if __name__ == "__main__": + unittest.main() From c24498b10900ce165a6242ce6f6fb1261c31a9cb Mon Sep 17 00:00:00 2001 From: HydrogenSulfate <490868991@qq.com> Date: Sat, 21 Dec 2024 02:47:41 +0800 Subject: [PATCH 25/43] pd: fix learning rate setting when resume (#4480) "When resuming training, there is no need to add `self.start_step` to the step count because Paddle uses `lr_sche.last_epoch` as the input for `step`, which already records the `start_step` steps." learning rate are correct after fixing ![22AD6874B74E437E9B133D75ABCC02FE](https://github.com/user-attachments/assets/1ad0ce71-6e1c-4de5-87dc-0daca1f6f038) ## Summary by CodeRabbit - **New Features** - Enhanced training process with improved optimizer configuration and learning rate adjustments. - Refined logging of training and validation results for clarity. - Improved model saving logic to preserve the latest state during interruptions. - Enhanced tensorboard logging for detailed tracking of training metrics. - **Bug Fixes** - Corrected lambda function for learning rate scheduler to reference warmup steps accurately. - **Chores** - Streamlined data loading and handling for efficient training across different tasks. --- deepmd/pd/train/training.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/deepmd/pd/train/training.py b/deepmd/pd/train/training.py index 65e35a1c4b..0f3c7a9732 100644 --- a/deepmd/pd/train/training.py +++ b/deepmd/pd/train/training.py @@ -588,15 +588,14 @@ def warm_up_linear(step, warmup_steps): if self.opt_type == "Adam": self.scheduler = paddle.optimizer.lr.LambdaDecay( learning_rate=self.lr_exp.start_lr, - lr_lambda=lambda step: warm_up_linear( - step + self.start_step, self.warmup_steps - ), + lr_lambda=lambda step: warm_up_linear(step, self.warmup_steps), ) self.optimizer = paddle.optimizer.Adam( learning_rate=self.scheduler, parameters=self.wrapper.parameters() ) if optimizer_state_dict is not None and self.restart_training: self.optimizer.set_state_dict(optimizer_state_dict) + self.scheduler.last_epoch -= 1 else: raise ValueError(f"Not supported optimizer type '{self.opt_type}'") From 250c9076795bedce0cf359d6aaaf049a4d450d4d Mon Sep 17 00:00:00 2001 From: Jinzhe Zeng Date: Sun, 22 Dec 2024 01:46:15 -0500 Subject: [PATCH 26/43] docs: update deepmd-gnn URL (#4482) ## Summary by CodeRabbit - **Documentation** - Updated guidelines for creating and integrating new models in the DeePMD-kit framework. - Added new sections on descriptors, fitting networks, and model requirements. - Enhanced unit testing section with instructions for regression tests. - Updated URL for the DeePMD-GNN plugin to reflect new repository location. Signed-off-by: Jinzhe Zeng --- doc/development/create-a-model-pt.md | 2 +- doc/third-party/out-of-deepmd-kit.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/development/create-a-model-pt.md b/doc/development/create-a-model-pt.md index 08528cc5f6..7eb75b7026 100644 --- a/doc/development/create-a-model-pt.md +++ b/doc/development/create-a-model-pt.md @@ -180,7 +180,7 @@ The arguments here should be consistent with the class arguments of your new com ## Package new codes You may package new codes into a new Python package if you don't want to contribute it to the main DeePMD-kit repository. -A good example is [DeePMD-GNN](https://github.com/njzjz/deepmd-gnn). +A good example is [DeePMD-GNN](https://gitlab.com/RutgersLBSR/deepmd-gnn). It's crucial to add your new component to `project.entry-points."deepmd.pt"` in `pyproject.toml`: ```toml diff --git a/doc/third-party/out-of-deepmd-kit.md b/doc/third-party/out-of-deepmd-kit.md index 12ae5842c7..a04ba9741b 100644 --- a/doc/third-party/out-of-deepmd-kit.md +++ b/doc/third-party/out-of-deepmd-kit.md @@ -6,7 +6,7 @@ The codes of the following interfaces are not a part of the DeePMD-kit package a ### External GNN models (MACE/NequIP) -[DeePMD-GNN](https://github.com/njzjz/deepmd-gnn) is DeePMD-kit plugin for various graph neural network (GNN) models. +[DeePMD-GNN](https://gitlab.com/RutgersLBSR/deepmd-gnn) is DeePMD-kit plugin for various graph neural network (GNN) models. It has interfaced with [MACE](https://github.com/ACEsuit/mace) (PyTorch version) and [NequIP](https://github.com/mir-group/nequip) (PyTorch version). It is also the first example to the DeePMD-kit [plugin mechanism](../development/create-a-model-pt.md#package-new-codes). From deaeec9c9b1f51c9b306724eb6e8d195755ac8dd Mon Sep 17 00:00:00 2001 From: Jinzhe Zeng Date: Sun, 22 Dec 2024 01:46:47 -0500 Subject: [PATCH 27/43] docs: update DPA-2 citation (#4483) ## Summary by CodeRabbit - **New Features** - Updated references in the bibliography for the DPA-2 model to include a new article entry for 2024. - Added a new reference for an attention-based descriptor. - **Bug Fixes** - Corrected reference links in documentation to point to updated DOI links instead of arXiv. - **Documentation** - Revised entries in the credits and model documentation to reflect the latest citations and details. - Enhanced clarity and detail in fine-tuning documentation for TensorFlow and PyTorch implementations. --------- Signed-off-by: Jinzhe Zeng --- CITATIONS.bib | 32 +++++++++++++++--------------- deepmd/dpmodel/descriptor/dpa2.py | 7 ++++++- deepmd/pt/model/descriptor/dpa2.py | 7 ++++++- doc/credits.rst | 2 +- doc/model/dpa2.md | 2 +- doc/train/finetuning.md | 2 +- doc/train/multi-task-training.md | 2 +- 7 files changed, 32 insertions(+), 22 deletions(-) diff --git a/CITATIONS.bib b/CITATIONS.bib index d5524a14f6..52c8045bf3 100644 --- a/CITATIONS.bib +++ b/CITATIONS.bib @@ -128,26 +128,26 @@ @article{Zhang_NpjComputMater_2024_v10_p94 doi = {10.1038/s41524-024-01278-7}, } -@misc{Zhang_2023_DPA2, +@article{Zhang_npjComputMater_2024_v10_p293, annote = {DPA-2}, author = { Duo Zhang and Xinzijian Liu and Xiangyu Zhang and Chengqian Zhang and Chun - Cai and Hangrui Bi and Yiming Du and Xuejian Qin and Jiameng Huang and - Bowen Li and Yifan Shan and Jinzhe Zeng and Yuzhi Zhang and Siyuan Liu and - Yifan Li and Junhan Chang and Xinyan Wang and Shuo Zhou and Jianchuan Liu - and Xiaoshan Luo and Zhenyu Wang and Wanrun Jiang and Jing Wu and Yudi Yang - and Jiyuan Yang and Manyi Yang and Fu-Qiang Gong and Linshuang Zhang and - Mengchao Shi and Fu-Zhi Dai and Darrin M. York and Shi Liu and Tong Zhu and - Zhicheng Zhong and Jian Lv and Jun Cheng and Weile Jia and Mohan Chen and - Guolin Ke and Weinan E and Linfeng Zhang and Han Wang + Cai and Hangrui Bi and Yiming Du and Xuejian Qin and Anyang Peng and + Jiameng Huang and Bowen Li and Yifan Shan and Jinzhe Zeng and Yuzhi Zhang + and Siyuan Liu and Yifan Li and Junhan Chang and Xinyan Wang and Shuo Zhou + and Jianchuan Liu and Xiaoshan Luo and Zhenyu Wang and Wanrun Jiang and + Jing Wu and Yudi Yang and Jiyuan Yang and Manyi Yang and Fu-Qiang Gong and + Linshuang Zhang and Mengchao Shi and Fu-Zhi Dai and Darrin M. York and Shi + Liu and Tong Zhu and Zhicheng Zhong and Jian Lv and Jun Cheng and Weile Jia + and Mohan Chen and Guolin Ke and Weinan E and Linfeng Zhang and Han Wang }, - title = { - {DPA-2: Towards a universal large atomic model for molecular and material - simulation} - }, - publisher = {arXiv}, - year = 2023, - doi = {10.48550/arXiv.2312.15492}, + title = {{DPA-2: a large atomic model as a multi-task learner}}, + journal = {npj Comput. Mater}, + year = 2024, + volume = 10, + number = 1, + pages = 293, + doi = {10.1038/s41524-024-01493-2}, } @article{Zhang_PhysPlasmas_2020_v27_p122704, diff --git a/deepmd/dpmodel/descriptor/dpa2.py b/deepmd/dpmodel/descriptor/dpa2.py index e4cadb7b36..55ae331593 100644 --- a/deepmd/dpmodel/descriptor/dpa2.py +++ b/deepmd/dpmodel/descriptor/dpa2.py @@ -387,7 +387,7 @@ def __init__( use_tebd_bias: bool = False, type_map: Optional[list[str]] = None, ) -> None: - r"""The DPA-2 descriptor. see https://arxiv.org/abs/2312.15492. + r"""The DPA-2 descriptor[1]_. Parameters ---------- @@ -434,6 +434,11 @@ def __init__( sw: torch.Tensor The switch function for decaying inverse distance. + References + ---------- + .. [1] Zhang, D., Liu, X., Zhang, X. et al. DPA-2: a + large atomic model as a multi-task learner. npj + Comput Mater 10, 293 (2024). https://doi.org/10.1038/s41524-024-01493-2 """ def init_subclass_params(sub_data, sub_class): diff --git a/deepmd/pt/model/descriptor/dpa2.py b/deepmd/pt/model/descriptor/dpa2.py index c8e430960b..f086a346b6 100644 --- a/deepmd/pt/model/descriptor/dpa2.py +++ b/deepmd/pt/model/descriptor/dpa2.py @@ -100,7 +100,7 @@ def __init__( use_tebd_bias: bool = False, type_map: Optional[list[str]] = None, ) -> None: - r"""The DPA-2 descriptor. see https://arxiv.org/abs/2312.15492. + r"""The DPA-2 descriptor[1]_. Parameters ---------- @@ -147,6 +147,11 @@ def __init__( sw: torch.Tensor The switch function for decaying inverse distance. + References + ---------- + .. [1] Zhang, D., Liu, X., Zhang, X. et al. DPA-2: a + large atomic model as a multi-task learner. npj + Comput Mater 10, 293 (2024). https://doi.org/10.1038/s41524-024-01493-2 """ super().__init__() diff --git a/doc/credits.rst b/doc/credits.rst index 1b39dc1e0e..059746ee0b 100644 --- a/doc/credits.rst +++ b/doc/credits.rst @@ -54,7 +54,7 @@ Cite DeePMD-kit and methods .. bibliography:: :filter: False - Zhang_2023_DPA2 + Zhang_npjComputMater_2024_v10_p293 - If frame-specific parameters (`fparam`, e.g. electronic temperature) is used, diff --git a/doc/model/dpa2.md b/doc/model/dpa2.md index eb641d6b01..300876bf05 100644 --- a/doc/model/dpa2.md +++ b/doc/model/dpa2.md @@ -4,7 +4,7 @@ **Supported backends**: PyTorch {{ pytorch_icon }}, JAX {{ jax_icon }}, DP {{ dpmodel_icon }} ::: -The DPA-2 model implementation. See https://arxiv.org/abs/2312.15492 for more details. +The DPA-2 model implementation. See https://doi.org/10.1038/s41524-024-01493-2 for more details. Training example: `examples/water/dpa2/input_torch_medium.json`, see [README](../../examples/water/dpa2/README.md) for inputs in different levels. diff --git a/doc/train/finetuning.md b/doc/train/finetuning.md index cf2f5fde4f..04d86cfc98 100644 --- a/doc/train/finetuning.md +++ b/doc/train/finetuning.md @@ -94,7 +94,7 @@ The model section will be overwritten (except the `type_map` subsection) by that #### Fine-tuning from a multi-task pre-trained model -Additionally, within the PyTorch implementation and leveraging the flexibility offered by the framework and the multi-task training process proposed in DPA2 [paper](https://arxiv.org/abs/2312.15492), +Additionally, within the PyTorch implementation and leveraging the flexibility offered by the framework and the multi-task training process proposed in DPA2 [paper](https://doi.org/10.1038/s41524-024-01493-2), we also support more general multitask pre-trained models, which includes multiple datasets for pre-training. These pre-training datasets share a common descriptor while maintaining their individual fitting nets, as detailed in the paper above. diff --git a/doc/train/multi-task-training.md b/doc/train/multi-task-training.md index 51dffcc5f5..16f6c0e05c 100644 --- a/doc/train/multi-task-training.md +++ b/doc/train/multi-task-training.md @@ -26,7 +26,7 @@ and the Adam optimizer is executed to minimize $L^{(t)}$ for one step to update In the case of multi-GPU parallel training, different GPUs will independently select their tasks. In the DPA-2 model, this multi-task training framework is adopted.[^1] -[^1]: Duo Zhang, Xinzijian Liu, Xiangyu Zhang, Chengqian Zhang, Chun Cai, Hangrui Bi, Yiming Du, Xuejian Qin, Jiameng Huang, Bowen Li, Yifan Shan, Jinzhe Zeng, Yuzhi Zhang, Siyuan Liu, Yifan Li, Junhan Chang, Xinyan Wang, Shuo Zhou, Jianchuan Liu, Xiaoshan Luo, Zhenyu Wang, Wanrun Jiang, Jing Wu, Yudi Yang, Jiyuan Yang, Manyi Yang, Fu-Qiang Gong, Linshuang Zhang, Mengchao Shi, Fu-Zhi Dai, Darrin M. York, Shi Liu, Tong Zhu, Zhicheng Zhong, Jian Lv, Jun Cheng, Weile Jia, Mohan Chen, Guolin Ke, Weinan E, Linfeng Zhang, Han Wang, [arXiv preprint arXiv:2312.15492 (2023)](https://arxiv.org/abs/2312.15492) licensed under a [Creative Commons Attribution (CC BY) license](http://creativecommons.org/licenses/by/4.0/). +[^1]: Duo Zhang, Xinzijian Liu, Xiangyu Zhang, Chengqian Zhang, Chun Cai, Hangrui Bi, Yiming Du, Xuejian Qin, Anyang Peng, Jiameng Huang, Bowen Li, Yifan Shan, Jinzhe Zeng, Yuzhi Zhang, Siyuan Liu, Yifan Li, Junhan Chang, Xinyan Wang, Shuo Zhou, Jianchuan Liu, Xiaoshan Luo, Zhenyu Wang, Wanrun Jiang, Jing Wu, Yudi Yang, Jiyuan Yang, Manyi Yang, Fu-Qiang Gong, Linshuang Zhang, Mengchao Shi, Fu-Zhi Dai, Darrin M. York, Shi Liu, Tong Zhu, Zhicheng Zhong, Jian Lv, Jun Cheng, Weile Jia, Mohan Chen, Guolin Ke, Weinan E, Linfeng Zhang, Han Wang, DPA-2: a large atomic model as a multi-task learner. npj Comput Mater 10, 293 (2024). [DOI: 10.1038/s41524-024-01493-2](https://doi.org/10.1038/s41524-024-01493-2) licensed under a [Creative Commons Attribution (CC BY) license](http://creativecommons.org/licenses/by/4.0/). Compared with the previous TensorFlow implementation, the new support in PyTorch is more flexible and efficient. In particular, it makes multi-GPU parallel training and even tasks beyond DFT possible, From 2525ab2a4ea0097baec842055f713eceddcb01af Mon Sep 17 00:00:00 2001 From: Jinzhe Zeng Date: Sun, 22 Dec 2024 01:47:09 -0500 Subject: [PATCH 28/43] docs: fix a minor typo on the title of `install-from-c-library.md` (#4484) ## Summary by CodeRabbit - **Documentation** - Updated formatting of the installation guide for the pre-compiled C library. - Icons for TensorFlow and JAX are now displayed together in the header. - Retained all installation instructions and compatibility notes. Signed-off-by: Jinzhe Zeng --- doc/install/install-from-c-library.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/install/install-from-c-library.md b/doc/install/install-from-c-library.md index d408fb1b67..806be51ca9 100644 --- a/doc/install/install-from-c-library.md +++ b/doc/install/install-from-c-library.md @@ -1,4 +1,4 @@ -# Install from pre-compiled C library {{ tensorflow_icon }}, JAX {{ jax_icon }} +# Install from pre-compiled C library {{ tensorflow_icon }} {{ jax_icon }} :::{note} **Supported backends**: TensorFlow {{ tensorflow_icon }}, JAX {{ jax_icon }} From cfe17a3e3e2fd198a42d9591d203bd2975c72824 Mon Sep 17 00:00:00 2001 From: Jinzhe Zeng Date: Sun, 22 Dec 2024 01:47:38 -0500 Subject: [PATCH 29/43] fix: print dlerror if dlopen fails (#4485) xref: https://github.com/njzjz/deepmd-gnn/issues/44 ## Summary by CodeRabbit - **New Features** - Enhanced error messages for library loading failures on non-Windows platforms. - Updated thread management environment variable checks for improved compatibility. - Added support for mixed types in tensor input handling, allowing for more flexible configurations. - **Bug Fixes** - Improved error reporting for dynamic library loading issues. --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- source/api_cc/src/common.cc | 8 +++++++- source/lib/src/gpu/cudart/cudart_stub.cc | 4 ++++ 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/source/api_cc/src/common.cc b/source/api_cc/src/common.cc index c51ae9a8b4..d3cad083bd 100644 --- a/source/api_cc/src/common.cc +++ b/source/api_cc/src/common.cc @@ -390,7 +390,13 @@ static inline void _load_library_path(std::string dso_path) { if (!dso_handle) { throw deepmd::deepmd_exception( dso_path + - " is not found! You can add the library directory to LD_LIBRARY_PATH"); + " is not found or fails to load! You can add the library directory to " + "LD_LIBRARY_PATH." +#ifndef _WIN32 + " Error message: " + + std::string(dlerror()) +#endif + ); } } diff --git a/source/lib/src/gpu/cudart/cudart_stub.cc b/source/lib/src/gpu/cudart/cudart_stub.cc index 8083a0a89d..cfbabd6f5e 100644 --- a/source/lib/src/gpu/cudart/cudart_stub.cc +++ b/source/lib/src/gpu/cudart/cudart_stub.cc @@ -25,6 +25,10 @@ void *DP_cudart_dlopen(char *libname) { #endif if (!dso_handle) { std::cerr << "DeePMD-kit: Cannot find " << libname << std::endl; +#ifndef _WIN32 + std::cerr << "DeePMD-kit: Error message: " << std::string(dlerror()) + << std::endl; +#endif return nullptr; } std::cerr << "DeePMD-kit: Successfully load " << libname << std::endl; From 242408d584f47db6746000672c4241a02f671ae8 Mon Sep 17 00:00:00 2001 From: HydrogenSulfate <490868991@qq.com> Date: Tue, 24 Dec 2024 01:13:15 +0800 Subject: [PATCH 30/43] pd: fix oom error (#4493) Paddle use `MemoryError` rather than `RuntimeError` used in pytorch, now I can test DPA-1 and DPA-2 in 16G V100... ![image](https://github.com/user-attachments/assets/42ead773-bf26-4195-8f67-404b151371de) ## Summary by CodeRabbit - **Bug Fixes** - Improved detection of out-of-memory (OOM) errors to enhance application stability. - Ensured cached memory is cleared upon OOM errors, preventing potential memory leaks. --- deepmd/pd/utils/auto_batch_size.py | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/deepmd/pd/utils/auto_batch_size.py b/deepmd/pd/utils/auto_batch_size.py index 8cdb5ddea2..0eb5e46d5f 100644 --- a/deepmd/pd/utils/auto_batch_size.py +++ b/deepmd/pd/utils/auto_batch_size.py @@ -49,12 +49,8 @@ def is_oom_error(self, e: Exception) -> bool: # several sources think CUSOLVER_STATUS_INTERNAL_ERROR is another out-of-memory error, # such as https://github.com/JuliaGPU/CUDA.jl/issues/1924 # (the meaningless error message should be considered as a bug in cusolver) - if isinstance(e, RuntimeError) and ( - "CUDA out of memory." in e.args[0] - or "CUDA driver error: out of memory" in e.args[0] - or "cusolver error: CUSOLVER_STATUS_INTERNAL_ERROR" in e.args[0] - ): + if isinstance(e, MemoryError) and ("ResourceExhaustedError" in e.args[0]): # Release all unoccupied cached memory - # paddle.device.cuda.empty_cache() + paddle.device.cuda.empty_cache() return True return False From 47412da3de58fe92c7f7c12cbcb67220ed96cde2 Mon Sep 17 00:00:00 2001 From: HydrogenSulfate <490868991@qq.com> Date: Tue, 24 Dec 2024 01:50:09 +0800 Subject: [PATCH 31/43] pd: add missing `dp.eval()` in pd backend (#4488) Switch to eval mode when evaluating model, otherwise `self.training` will be `True`, backward graph will be created and cause OOM ## Summary by CodeRabbit - **New Features** - Enhanced model evaluation state management to ensure correct behavior during evaluation. - **Bug Fixes** - Improved type consistency in the `normalize_coord` function for better computational accuracy. --- deepmd/pd/infer/deep_eval.py | 1 + deepmd/pd/utils/region.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/deepmd/pd/infer/deep_eval.py b/deepmd/pd/infer/deep_eval.py index a2f8510f28..c31170ad71 100644 --- a/deepmd/pd/infer/deep_eval.py +++ b/deepmd/pd/infer/deep_eval.py @@ -113,6 +113,7 @@ def __init__( else: # self.dp = paddle.jit.load(self.model_path.split(".json")[0]) raise ValueError(f"Unknown model file format: {self.model_path}!") + self.dp.eval() self.rcut = self.dp.model["Default"].get_rcut() self.type_map = self.dp.model["Default"].get_type_map() if isinstance(auto_batch_size, bool): diff --git a/deepmd/pd/utils/region.py b/deepmd/pd/utils/region.py index f3e3eaa52d..d2600ef16e 100644 --- a/deepmd/pd/utils/region.py +++ b/deepmd/pd/utils/region.py @@ -108,5 +108,5 @@ def normalize_coord( """ icoord = phys2inter(coord, cell) - icoord = paddle.remainder(icoord, paddle.full([], 1.0)) + icoord = paddle.remainder(icoord, paddle.full([], 1.0, dtype=icoord.dtype)) return inter2phys(icoord, cell) From 30b1447aa900d393f8d5ffeceee156a3124cfec8 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 24 Dec 2024 12:20:34 +0800 Subject: [PATCH 32/43] [pre-commit.ci] pre-commit autoupdate (#4497) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/astral-sh/ruff-pre-commit: v0.8.3 → v0.8.4](https://github.com/astral-sh/ruff-pre-commit/compare/v0.8.3...v0.8.4) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 9058decc21..bd36fd6e63 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -29,7 +29,7 @@ repos: exclude: ^source/3rdparty - repo: https://github.com/astral-sh/ruff-pre-commit # Ruff version. - rev: v0.8.3 + rev: v0.8.4 hooks: - id: ruff args: ["--fix"] From f8605ee16ee57196c7a5157e60b08ed9ad16245b Mon Sep 17 00:00:00 2001 From: Jinzhe Zeng Date: Tue, 24 Dec 2024 20:24:45 -0500 Subject: [PATCH 33/43] fix: fix seed with multiple ranks (#4479) Fix #4440. ## Summary by CodeRabbit - **New Features** - Enhanced seed handling to support both single integers and lists for improved randomness in distributed training. - Added logging for neighbor statistics calculation during training. - **Bug Fixes** - Improved error handling in data loading processes to ensure robustness. - **Documentation** - Updated documentation for methods related to seed and batch size management for clarity. --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- deepmd/pd/entrypoints/main.py | 2 +- deepmd/pd/utils/dataloader.py | 11 ++++++++++- deepmd/pt/entrypoints/main.py | 2 +- deepmd/pt/utils/dataloader.py | 11 +++++++++-- deepmd/tf/entrypoints/train.py | 5 +++-- deepmd/utils/random.py | 2 +- 6 files changed, 25 insertions(+), 8 deletions(-) diff --git a/deepmd/pd/entrypoints/main.py b/deepmd/pd/entrypoints/main.py index 3fa66312e7..bcc93432af 100644 --- a/deepmd/pd/entrypoints/main.py +++ b/deepmd/pd/entrypoints/main.py @@ -123,7 +123,7 @@ def prepare_trainer_input_single( # validation and training data # avoid the same batch sequence among devices - rank_seed = (seed + rank) % (2**32) if seed is not None else None + rank_seed = [rank, seed % (2**32)] if seed is not None else None validation_data_single = ( DpLoaderSet( validation_systems, diff --git a/deepmd/pd/utils/dataloader.py b/deepmd/pd/utils/dataloader.py index 9d59ea0da7..80b3e7cb8b 100644 --- a/deepmd/pd/utils/dataloader.py +++ b/deepmd/pd/utils/dataloader.py @@ -36,6 +36,10 @@ from deepmd.pd.utils.dataset import ( DeepmdDataSetForLoader, ) +from deepmd.pt.utils.utils import ( + mix_entropy, +) +from deepmd.utils import random as dp_random from deepmd.utils.data import ( DataRequirementItem, ) @@ -50,8 +54,13 @@ def setup_seed(seed): - paddle.seed(seed) + if isinstance(seed, (list, tuple)): + mixed_seed = mix_entropy(seed) + else: + mixed_seed = seed + paddle.seed(mixed_seed) os.environ["FLAGS_cudnn_deterministic"] = "True" + dp_random.seed(seed) class DpLoaderSet(Dataset): diff --git a/deepmd/pt/entrypoints/main.py b/deepmd/pt/entrypoints/main.py index 1e5314a821..fd4be73e84 100644 --- a/deepmd/pt/entrypoints/main.py +++ b/deepmd/pt/entrypoints/main.py @@ -138,7 +138,7 @@ def prepare_trainer_input_single( # validation and training data # avoid the same batch sequence among devices - rank_seed = (seed + rank) % (2**32) if seed is not None else None + rank_seed = [rank, seed % (2**32)] if seed is not None else None validation_data_single = ( DpLoaderSet( validation_systems, diff --git a/deepmd/pt/utils/dataloader.py b/deepmd/pt/utils/dataloader.py index 67e5195f6d..12681a304d 100644 --- a/deepmd/pt/utils/dataloader.py +++ b/deepmd/pt/utils/dataloader.py @@ -36,6 +36,9 @@ from deepmd.pt.utils.dataset import ( DeepmdDataSetForLoader, ) +from deepmd.pt.utils.utils import ( + mix_entropy, +) from deepmd.utils.data import ( DataRequirementItem, ) @@ -50,8 +53,12 @@ def setup_seed(seed) -> None: - torch.manual_seed(seed) - torch.cuda.manual_seed_all(seed) + if isinstance(seed, (list, tuple)): + mixed_seed = mix_entropy(seed) + else: + mixed_seed = seed + torch.manual_seed(mixed_seed) + torch.cuda.manual_seed_all(mixed_seed) torch.backends.cudnn.deterministic = True dp_random.seed(seed) diff --git a/deepmd/tf/entrypoints/train.py b/deepmd/tf/entrypoints/train.py index 590fa04635..1762f1049a 100755 --- a/deepmd/tf/entrypoints/train.py +++ b/deepmd/tf/entrypoints/train.py @@ -220,9 +220,10 @@ def _do_work( seed = jdata["training"].get("seed", None) if seed is not None: # avoid the same batch sequence among workers - seed += run_opt.my_rank seed = seed % (2**32) - dp_random.seed(seed) + dp_random.seed([run_opt.my_rank, seed]) + else: + dp_random.seed(seed) # setup data modifier modifier = get_modifier(jdata["model"].get("modifier", None)) diff --git a/deepmd/utils/random.py b/deepmd/utils/random.py index b0c75600fb..10ebdf0790 100644 --- a/deepmd/utils/random.py +++ b/deepmd/utils/random.py @@ -56,7 +56,7 @@ def random(size=None): return _RANDOM_GENERATOR.random_sample(size) -def seed(val: Optional[int] = None) -> None: +def seed(val: Optional[Union[int, list[int]]] = None) -> None: """Seed the generator. Parameters From f9f17594d6174bd4448865ed775b5732ea1a9f9c Mon Sep 17 00:00:00 2001 From: Chenqqian Zhang <100290172+Chengqian-Zhang@users.noreply.github.com> Date: Wed, 25 Dec 2024 11:10:58 +0800 Subject: [PATCH 34/43] Refactor property fitting interface (#4471) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Two main changes: 1. The program can read data files in `npy` format with any prefix(`tc.npy`, `band_gap.npy`...). One just needs to write the name of the property and the corresponding dimension in `model/fitting` in `input.json`. 2. Data normalisation has been added to the program. Specifically, the mean and standard deviation of the properties are calculated when calculating the stat, the output of `fitting_net` is multiplied by the standard deviation and then added to the mean to get the output of the `PropertyModel`, and when calculating the loss, the loss is again normalised. ## Summary by CodeRabbit ## Release Notes - **New Features** - Introduced new parameters for property loss calculations and model fitting, enhancing flexibility and functionality. - Added methods for retrieving property names and checking property intensity across various classes. - **Bug Fixes** - Improved validation and error handling for property-related calculations. - **Documentation** - Enhanced documentation for property fitting, including detailed parameter explanations and structured training examples. - Added documentation for new parameters in the JSON configuration related to property fitting. - **Tests** - Added new test cases to validate the functionality of updated methods and properties. - Updated existing tests to utilize dynamic property names instead of hardcoded strings. - **Chores** - Updated configuration files and test data to align with new property handling features. --------- Signed-off-by: Chenqqian Zhang <100290172+Chengqian-Zhang@users.noreply.github.com> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- deepmd/dpmodel/atomic_model/__init__.py | 4 + .../atomic_model/property_atomic_model.py | 24 +++ deepmd/dpmodel/fitting/property_fitting.py | 18 +- deepmd/dpmodel/model/property_model.py | 6 +- deepmd/entrypoints/test.py | 20 +- deepmd/infer/deep_eval.py | 6 +- deepmd/infer/deep_property.py | 44 +++- deepmd/pt/infer/deep_eval.py | 11 +- deepmd/pt/loss/property.py | 91 ++++++-- .../model/atomic_model/base_atomic_model.py | 12 +- .../atomic_model/property_atomic_model.py | 28 +-- deepmd/pt/model/model/property_model.py | 19 +- deepmd/pt/model/task/property.py | 27 +-- deepmd/pt/train/training.py | 4 + deepmd/pt/utils/stat.py | 53 +++-- deepmd/utils/argcheck.py | 7 +- deepmd/utils/out_stat.py | 61 ++++++ doc/model/index.rst | 1 + doc/model/train-fitting-property.md | 194 ++++++++++++++++++ .../{property.npy => band_prop.npy} | Bin .../{property.npy => band_prop.npy} | Bin .../{property.npy => band_prop.npy} | Bin examples/property/train/README.md | 5 + examples/property/train/input_torch.json | 6 + source/tests/common/test_out_stat.py | 53 +++++ .../tests/consistent/fitting/test_property.py | 9 +- .../tests/consistent/model/test_property.py | 10 +- source/tests/pd/model/test_permutation.py | 2 +- source/tests/pt/model/test_permutation.py | 2 +- .../tests/pt/model/test_property_fitting.py | 41 ++-- source/tests/pt/property/double/nopbc | 0 .../double/set.000000/band_property.npy | Bin 0 -> 176 bytes .../pt/property/double/set.000000/coord.npy | Bin 0 -> 1088 bytes .../double/set.000000/real_atom_types.npy | Bin 0 -> 448 bytes source/tests/pt/property/double/type.raw | 20 ++ source/tests/pt/property/double/type_map.raw | 4 + source/tests/pt/property/input.json | 1 + .../{property.npy => band_property.npy} | Bin source/tests/pt/test_dp_test.py | 2 +- source/tests/pt/test_training.py | 2 +- .../universal/common/cases/model/model.py | 2 +- .../universal/dpmodel/fitting/test_fitting.py | 2 + .../tests/universal/dpmodel/loss/test_loss.py | 7 +- 43 files changed, 652 insertions(+), 146 deletions(-) create mode 100644 doc/model/train-fitting-property.md rename examples/property/data/data_0/set.000000/{property.npy => band_prop.npy} (100%) rename examples/property/data/data_1/set.000000/{property.npy => band_prop.npy} (100%) rename examples/property/data/data_2/set.000000/{property.npy => band_prop.npy} (100%) create mode 100644 examples/property/train/README.md create mode 100644 source/tests/pt/property/double/nopbc create mode 100644 source/tests/pt/property/double/set.000000/band_property.npy create mode 100644 source/tests/pt/property/double/set.000000/coord.npy create mode 100644 source/tests/pt/property/double/set.000000/real_atom_types.npy create mode 100644 source/tests/pt/property/double/type.raw create mode 100644 source/tests/pt/property/double/type_map.raw rename source/tests/pt/property/single/set.000000/{property.npy => band_property.npy} (100%) diff --git a/deepmd/dpmodel/atomic_model/__init__.py b/deepmd/dpmodel/atomic_model/__init__.py index 3d90c738ae..4d882d5e4b 100644 --- a/deepmd/dpmodel/atomic_model/__init__.py +++ b/deepmd/dpmodel/atomic_model/__init__.py @@ -42,6 +42,9 @@ from .polar_atomic_model import ( DPPolarAtomicModel, ) +from .property_atomic_model import ( + DPPropertyAtomicModel, +) __all__ = [ "BaseAtomicModel", @@ -50,6 +53,7 @@ "DPDipoleAtomicModel", "DPEnergyAtomicModel", "DPPolarAtomicModel", + "DPPropertyAtomicModel", "DPZBLLinearEnergyAtomicModel", "LinearEnergyAtomicModel", "PairTabAtomicModel", diff --git a/deepmd/dpmodel/atomic_model/property_atomic_model.py b/deepmd/dpmodel/atomic_model/property_atomic_model.py index 6f69f8dfb6..e3c038e695 100644 --- a/deepmd/dpmodel/atomic_model/property_atomic_model.py +++ b/deepmd/dpmodel/atomic_model/property_atomic_model.py @@ -1,4 +1,6 @@ # SPDX-License-Identifier: LGPL-3.0-or-later +import numpy as np + from deepmd.dpmodel.fitting.property_fitting import ( PropertyFittingNet, ) @@ -15,3 +17,25 @@ def __init__(self, descriptor, fitting, type_map, **kwargs): "fitting must be an instance of PropertyFittingNet for DPPropertyAtomicModel" ) super().__init__(descriptor, fitting, type_map, **kwargs) + + def apply_out_stat( + self, + ret: dict[str, np.ndarray], + atype: np.ndarray, + ): + """Apply the stat to each atomic output. + + In property fitting, each output will be multiplied by label std and then plus the label average value. + + Parameters + ---------- + ret + The returned dict by the forward_atomic method + atype + The atom types. nf x nloc. It is useless in property fitting. + + """ + out_bias, out_std = self._fetch_out_stat(self.bias_keys) + for kk in self.bias_keys: + ret[kk] = ret[kk] * out_std[kk][0] + out_bias[kk][0] + return ret diff --git a/deepmd/dpmodel/fitting/property_fitting.py b/deepmd/dpmodel/fitting/property_fitting.py index 8b903af00e..6d0aa3546f 100644 --- a/deepmd/dpmodel/fitting/property_fitting.py +++ b/deepmd/dpmodel/fitting/property_fitting.py @@ -41,10 +41,9 @@ class PropertyFittingNet(InvarFitting): this list is of length :math:`N_l + 1`, specifying if the hidden layers and the output layer are trainable. intensive Whether the fitting property is intensive. - bias_method - The method of applying the bias to each atomic output, user can select 'normal' or 'no_bias'. - If 'normal' is used, the computed bias will be added to the atomic output. - If 'no_bias' is used, no bias will be added to the atomic output. + property_name: + The name of fitting property, which should be consistent with the property name in the dataset. + If the data file is named `humo.npy`, this parameter should be "humo". resnet_dt Time-step `dt` in the resnet construction: :math:`y = x + dt * \phi (Wx + b)` @@ -74,7 +73,7 @@ def __init__( rcond: Optional[float] = None, trainable: Union[bool, list[bool]] = True, intensive: bool = False, - bias_method: str = "normal", + property_name: str = "property", resnet_dt: bool = True, numb_fparam: int = 0, numb_aparam: int = 0, @@ -89,9 +88,8 @@ def __init__( ) -> None: self.task_dim = task_dim self.intensive = intensive - self.bias_method = bias_method super().__init__( - var_name="property", + var_name=property_name, ntypes=ntypes, dim_descrpt=dim_descrpt, dim_out=task_dim, @@ -113,9 +111,9 @@ def __init__( @classmethod def deserialize(cls, data: dict) -> "PropertyFittingNet": data = data.copy() - check_version_compatibility(data.pop("@version"), 3, 1) + check_version_compatibility(data.pop("@version"), 4, 1) data.pop("dim_out") - data.pop("var_name") + data["property_name"] = data.pop("var_name") data.pop("tot_ener_zero") data.pop("layer_name") data.pop("use_aparam_as_mask", None) @@ -131,6 +129,8 @@ def serialize(self) -> dict: **InvarFitting.serialize(self), "type": "property", "task_dim": self.task_dim, + "intensive": self.intensive, } + dd["@version"] = 4 return dd diff --git a/deepmd/dpmodel/model/property_model.py b/deepmd/dpmodel/model/property_model.py index 16fdedd36e..9bd07bd349 100644 --- a/deepmd/dpmodel/model/property_model.py +++ b/deepmd/dpmodel/model/property_model.py @@ -1,6 +1,6 @@ # SPDX-License-Identifier: LGPL-3.0-or-later -from deepmd.dpmodel.atomic_model.dp_atomic_model import ( - DPAtomicModel, +from deepmd.dpmodel.atomic_model import ( + DPPropertyAtomicModel, ) from deepmd.dpmodel.model.base_model import ( BaseModel, @@ -13,7 +13,7 @@ make_model, ) -DPPropertyModel_ = make_model(DPAtomicModel) +DPPropertyModel_ = make_model(DPPropertyAtomicModel) @BaseModel.register("property") diff --git a/deepmd/entrypoints/test.py b/deepmd/entrypoints/test.py index d9744246d7..5aeb84468d 100644 --- a/deepmd/entrypoints/test.py +++ b/deepmd/entrypoints/test.py @@ -779,9 +779,17 @@ def test_property( tuple[list[np.ndarray], list[int]] arrays with results and their shapes """ - data.add("property", dp.task_dim, atomic=False, must=True, high_prec=True) + var_name = dp.get_var_name() + assert isinstance(var_name, str) + data.add(var_name, dp.task_dim, atomic=False, must=True, high_prec=True) if has_atom_property: - data.add("atom_property", dp.task_dim, atomic=True, must=False, high_prec=True) + data.add( + f"atom_{var_name}", + dp.task_dim, + atomic=True, + must=False, + high_prec=True, + ) if dp.get_dim_fparam() > 0: data.add( @@ -832,12 +840,12 @@ def test_property( aproperty = ret[1] aproperty = aproperty.reshape([numb_test, natoms * dp.task_dim]) - diff_property = property - test_data["property"][:numb_test] + diff_property = property - test_data[var_name][:numb_test] mae_property = mae(diff_property) rmse_property = rmse(diff_property) if has_atom_property: - diff_aproperty = aproperty - test_data["atom_property"][:numb_test] + diff_aproperty = aproperty - test_data[f"atom_{var_name}"][:numb_test] mae_aproperty = mae(diff_aproperty) rmse_aproperty = rmse(diff_aproperty) @@ -854,7 +862,7 @@ def test_property( detail_path = Path(detail_file) for ii in range(numb_test): - test_out = test_data["property"][ii].reshape(-1, 1) + test_out = test_data[var_name][ii].reshape(-1, 1) pred_out = property[ii].reshape(-1, 1) frame_output = np.hstack((test_out, pred_out)) @@ -868,7 +876,7 @@ def test_property( if has_atom_property: for ii in range(numb_test): - test_out = test_data["atom_property"][ii].reshape(-1, 1) + test_out = test_data[f"atom_{var_name}"][ii].reshape(-1, 1) pred_out = aproperty[ii].reshape(-1, 1) frame_output = np.hstack((test_out, pred_out)) diff --git a/deepmd/infer/deep_eval.py b/deepmd/infer/deep_eval.py index 159f9bdf60..15e4a56280 100644 --- a/deepmd/infer/deep_eval.py +++ b/deepmd/infer/deep_eval.py @@ -70,8 +70,6 @@ class DeepEvalBackend(ABC): "dipole_derv_c_redu": "virial", "dos": "atom_dos", "dos_redu": "dos", - "property": "atom_property", - "property_redu": "property", "mask_mag": "mask_mag", "mask": "mask", # old models in v1 @@ -276,6 +274,10 @@ def get_has_spin(self) -> bool: """Check if the model has spin atom types.""" return False + def get_var_name(self) -> str: + """Get the name of the fitting property.""" + raise NotImplementedError + @abstractmethod def get_ntypes_spin(self) -> int: """Get the number of spin atom types of this model. Only used in old implement.""" diff --git a/deepmd/infer/deep_property.py b/deepmd/infer/deep_property.py index 389a0e8512..5944491cc0 100644 --- a/deepmd/infer/deep_property.py +++ b/deepmd/infer/deep_property.py @@ -37,25 +37,41 @@ class DeepProperty(DeepEval): Keyword arguments. """ - @property def output_def(self) -> ModelOutputDef: - """Get the output definition of this model.""" - return ModelOutputDef( + """ + Get the output definition of this model. + But in property_fitting, the output definition is not known until the model is loaded. + So we need to rewrite the output definition after the model is loaded. + See detail in change_output_def. + """ + pass + + def change_output_def(self) -> None: + """ + Change the output definition of this model. + In property_fitting, the output definition is known after the model is loaded. + We need to rewrite the output definition and related information. + """ + self.output_def = ModelOutputDef( FittingOutputDef( [ OutputVariableDef( - "property", - shape=[-1], + self.get_var_name(), + shape=[self.get_task_dim()], reducible=True, atomic=True, + intensive=self.get_intensive(), ), ] ) ) - - def change_output_def(self) -> None: - self.output_def["property"].shape = self.task_dim - self.output_def["property"].intensive = self.get_intensive() + self.deep_eval.output_def = self.output_def + self.deep_eval._OUTDEF_DP2BACKEND[self.get_var_name()] = ( + f"atom_{self.get_var_name()}" + ) + self.deep_eval._OUTDEF_DP2BACKEND[f"{self.get_var_name()}_redu"] = ( + self.get_var_name() + ) @property def task_dim(self) -> int: @@ -120,10 +136,12 @@ def eval( aparam=aparam, **kwargs, ) - atomic_property = results["property"].reshape( + atomic_property = results[self.get_var_name()].reshape( nframes, natoms, self.get_task_dim() ) - property = results["property_redu"].reshape(nframes, self.get_task_dim()) + property = results[f"{self.get_var_name()}_redu"].reshape( + nframes, self.get_task_dim() + ) if atomic: return ( @@ -141,5 +159,9 @@ def get_intensive(self) -> bool: """Get whether the property is intensive.""" return self.deep_eval.get_intensive() + def get_var_name(self) -> str: + """Get the name of the fitting property.""" + return self.deep_eval.get_var_name() + __all__ = ["DeepProperty"] diff --git a/deepmd/pt/infer/deep_eval.py b/deepmd/pt/infer/deep_eval.py index 59b833d34c..facead838e 100644 --- a/deepmd/pt/infer/deep_eval.py +++ b/deepmd/pt/infer/deep_eval.py @@ -184,6 +184,15 @@ def get_dim_aparam(self) -> int: def get_intensive(self) -> bool: return self.dp.model["Default"].get_intensive() + def get_var_name(self) -> str: + """Get the name of the property.""" + if hasattr(self.dp.model["Default"], "get_var_name") and callable( + getattr(self.dp.model["Default"], "get_var_name") + ): + return self.dp.model["Default"].get_var_name() + else: + raise NotImplementedError + @property def model_type(self) -> type["DeepEvalWrapper"]: """The the evaluator of the model type.""" @@ -200,7 +209,7 @@ def model_type(self) -> type["DeepEvalWrapper"]: return DeepGlobalPolar elif "wfc" in model_output_type: return DeepWFC - elif "property" in model_output_type: + elif self.get_var_name() in model_output_type: return DeepProperty else: raise RuntimeError("Unknown model type") diff --git a/deepmd/pt/loss/property.py b/deepmd/pt/loss/property.py index 07e394650a..9d42c81b45 100644 --- a/deepmd/pt/loss/property.py +++ b/deepmd/pt/loss/property.py @@ -1,5 +1,8 @@ # SPDX-License-Identifier: LGPL-3.0-or-later import logging +from typing import ( + Union, +) import torch import torch.nn.functional as F @@ -21,9 +24,13 @@ class PropertyLoss(TaskLoss): def __init__( self, task_dim, + var_name: str, loss_func: str = "smooth_mae", metric: list = ["mae"], beta: float = 1.00, + out_bias: Union[list, None] = None, + out_std: Union[list, None] = None, + intensive: bool = False, **kwargs, ) -> None: r"""Construct a layer to compute loss on property. @@ -32,18 +39,32 @@ def __init__( ---------- task_dim : float The output dimension of property fitting net. + var_name : str + The atomic property to fit, 'energy', 'dipole', and 'polar'. loss_func : str The loss function, such as "smooth_mae", "mae", "rmse". metric : list The metric such as mae, rmse which will be printed. - beta: + beta : float The 'beta' parameter in 'smooth_mae' loss. + out_bias : Union[list, None] + It is the average value of the label. The shape is nkeys * ntypes * task_dim. + In property fitting, nkeys = 1, so the shape is 1 * ntypes * task_dim. + out_std : Union[list, None] + It is the standard deviation of the label. The shape is nkeys * ntypes * task_dim. + In property fitting, nkeys = 1, so the shape is 1 * ntypes * task_dim. + intensive : bool + Whether the property is intensive. """ super().__init__() self.task_dim = task_dim self.loss_func = loss_func self.metric = metric self.beta = beta + self.out_bias = out_bias + self.out_std = out_std + self.intensive = intensive + self.var_name = var_name def forward(self, input_dict, model, label, natoms, learning_rate=0.0, mae=False): """Return loss on properties . @@ -69,34 +90,64 @@ def forward(self, input_dict, model, label, natoms, learning_rate=0.0, mae=False Other losses for display. """ model_pred = model(**input_dict) - assert label["property"].shape[-1] == self.task_dim - assert model_pred["property"].shape[-1] == self.task_dim + var_name = self.var_name + nbz = model_pred[var_name].shape[0] + assert model_pred[var_name].shape == (nbz, self.task_dim) + assert label[var_name].shape == (nbz, self.task_dim) + if not self.intensive: + model_pred[var_name] = model_pred[var_name] / natoms + label[var_name] = label[var_name] / natoms + + if self.out_std is None: + out_std = model.atomic_model.out_std[0][0] + else: + out_std = torch.tensor( + self.out_std, dtype=env.GLOBAL_PT_FLOAT_PRECISION, device=env.DEVICE + ) + if out_std.shape != (self.task_dim,): + raise ValueError( + f"Expected out_std to have shape ({self.task_dim},), but got {out_std.shape}" + ) + + if self.out_bias is None: + out_bias = model.atomic_model.out_bias[0][0] + else: + out_bias = torch.tensor( + self.out_bias, dtype=env.GLOBAL_PT_FLOAT_PRECISION, device=env.DEVICE + ) + if out_bias.shape != (self.task_dim,): + raise ValueError( + f"Expected out_bias to have shape ({self.task_dim},), but got {out_bias.shape}" + ) + loss = torch.zeros(1, dtype=env.GLOBAL_PT_FLOAT_PRECISION, device=env.DEVICE)[0] more_loss = {} # loss if self.loss_func == "smooth_mae": loss += F.smooth_l1_loss( - label["property"], - model_pred["property"], + (label[var_name] - out_bias) / out_std, + (model_pred[var_name] - out_bias) / out_std, reduction="sum", beta=self.beta, ) elif self.loss_func == "mae": loss += F.l1_loss( - label["property"], model_pred["property"], reduction="sum" + (label[var_name] - out_bias) / out_std, + (model_pred[var_name] - out_bias) / out_std, + reduction="sum", ) elif self.loss_func == "mse": loss += F.mse_loss( - label["property"], - model_pred["property"], + (label[var_name] - out_bias) / out_std, + (model_pred[var_name] - out_bias) / out_std, reduction="sum", ) elif self.loss_func == "rmse": loss += torch.sqrt( F.mse_loss( - label["property"], - model_pred["property"], + (label[var_name] - out_bias) / out_std, + (model_pred[var_name] - out_bias) / out_std, reduction="mean", ) ) @@ -106,28 +157,28 @@ def forward(self, input_dict, model, label, natoms, learning_rate=0.0, mae=False # more loss if "smooth_mae" in self.metric: more_loss["smooth_mae"] = F.smooth_l1_loss( - label["property"], - model_pred["property"], + label[var_name], + model_pred[var_name], reduction="mean", beta=self.beta, ).detach() if "mae" in self.metric: more_loss["mae"] = F.l1_loss( - label["property"], - model_pred["property"], + label[var_name], + model_pred[var_name], reduction="mean", ).detach() if "mse" in self.metric: more_loss["mse"] = F.mse_loss( - label["property"], - model_pred["property"], + label[var_name], + model_pred[var_name], reduction="mean", ).detach() if "rmse" in self.metric: more_loss["rmse"] = torch.sqrt( F.mse_loss( - label["property"], - model_pred["property"], + label[var_name], + model_pred[var_name], reduction="mean", ) ).detach() @@ -140,10 +191,10 @@ def label_requirement(self) -> list[DataRequirementItem]: label_requirement = [] label_requirement.append( DataRequirementItem( - "property", + self.var_name, ndof=self.task_dim, atomic=False, - must=False, + must=True, high_prec=True, ) ) diff --git a/deepmd/pt/model/atomic_model/base_atomic_model.py b/deepmd/pt/model/atomic_model/base_atomic_model.py index a64eca0fe9..c83e35dab3 100644 --- a/deepmd/pt/model/atomic_model/base_atomic_model.py +++ b/deepmd/pt/model/atomic_model/base_atomic_model.py @@ -125,6 +125,14 @@ def get_type_map(self) -> list[str]: """Get the type map.""" return self.type_map + def get_compute_stats_distinguish_types(self) -> bool: + """Get whether the fitting net computes stats which are not distinguished between different types of atoms.""" + return True + + def get_intensive(self) -> bool: + """Whether the fitting property is intensive.""" + return False + def reinit_atom_exclude( self, exclude_types: list[int] = [], @@ -456,7 +464,6 @@ def change_out_bias( model_forward=self._get_forward_wrapper_func(), rcond=self.rcond, preset_bias=self.preset_out_bias, - atomic_output=self.atomic_output_def(), ) self._store_out_stat(delta_bias, out_std, add=True) elif bias_adjust_mode == "set-by-statistic": @@ -467,7 +474,8 @@ def change_out_bias( stat_file_path=stat_file_path, rcond=self.rcond, preset_bias=self.preset_out_bias, - atomic_output=self.atomic_output_def(), + stats_distinguish_types=self.get_compute_stats_distinguish_types(), + intensive=self.get_intensive(), ) self._store_out_stat(bias_out, std_out) else: diff --git a/deepmd/pt/model/atomic_model/property_atomic_model.py b/deepmd/pt/model/atomic_model/property_atomic_model.py index 1fdc72b2b6..3622c9f476 100644 --- a/deepmd/pt/model/atomic_model/property_atomic_model.py +++ b/deepmd/pt/model/atomic_model/property_atomic_model.py @@ -19,31 +19,31 @@ def __init__(self, descriptor, fitting, type_map, **kwargs): ) super().__init__(descriptor, fitting, type_map, **kwargs) + def get_compute_stats_distinguish_types(self) -> bool: + """Get whether the fitting net computes stats which are not distinguished between different types of atoms.""" + return False + + def get_intensive(self) -> bool: + """Whether the fitting property is intensive.""" + return self.fitting_net.get_intensive() + def apply_out_stat( self, ret: dict[str, torch.Tensor], atype: torch.Tensor, ): """Apply the stat to each atomic output. - This function defines how the bias is applied to the atomic output of the model. + In property fitting, each output will be multiplied by label std and then plus the label average value. Parameters ---------- ret The returned dict by the forward_atomic method atype - The atom types. nf x nloc + The atom types. nf x nloc. It is useless in property fitting. """ - if self.fitting_net.get_bias_method() == "normal": - out_bias, out_std = self._fetch_out_stat(self.bias_keys) - for kk in self.bias_keys: - # nf x nloc x odims, out_bias: ntypes x odims - ret[kk] = ret[kk] + out_bias[kk][atype] - return ret - elif self.fitting_net.get_bias_method() == "no_bias": - return ret - else: - raise NotImplementedError( - "Only 'normal' and 'no_bias' is supported for parameter 'bias_method'." - ) + out_bias, out_std = self._fetch_out_stat(self.bias_keys) + for kk in self.bias_keys: + ret[kk] = ret[kk] * out_std[kk][0] + out_bias[kk][0] + return ret diff --git a/deepmd/pt/model/model/property_model.py b/deepmd/pt/model/model/property_model.py index 4581a2bc3e..7c50c75ff1 100644 --- a/deepmd/pt/model/model/property_model.py +++ b/deepmd/pt/model/model/property_model.py @@ -37,8 +37,8 @@ def __init__( def translated_output_def(self): out_def_data = self.model_output_def().get_data() output_def = { - "atom_property": out_def_data["property"], - "property": out_def_data["property_redu"], + f"atom_{self.get_var_name()}": out_def_data[self.get_var_name()], + self.get_var_name(): out_def_data[f"{self.get_var_name()}_redu"], } if "mask" in out_def_data: output_def["mask"] = out_def_data["mask"] @@ -62,8 +62,8 @@ def forward( do_atomic_virial=do_atomic_virial, ) model_predict = {} - model_predict["atom_property"] = model_ret["property"] - model_predict["property"] = model_ret["property_redu"] + model_predict[f"atom_{self.get_var_name()}"] = model_ret[self.get_var_name()] + model_predict[self.get_var_name()] = model_ret[f"{self.get_var_name()}_redu"] if "mask" in model_ret: model_predict["mask"] = model_ret["mask"] return model_predict @@ -76,7 +76,12 @@ def get_task_dim(self) -> int: @torch.jit.export def get_intensive(self) -> bool: """Get whether the property is intensive.""" - return self.model_output_def()["property"].intensive + return self.model_output_def()[self.get_var_name()].intensive + + @torch.jit.export + def get_var_name(self) -> str: + """Get the name of the property.""" + return self.get_fitting_net().var_name @torch.jit.export def forward_lower( @@ -102,8 +107,8 @@ def forward_lower( extra_nlist_sort=self.need_sorted_nlist_for_lower(), ) model_predict = {} - model_predict["atom_property"] = model_ret["property"] - model_predict["property"] = model_ret["property_redu"] + model_predict[f"atom_{self.get_var_name()}"] = model_ret[self.get_var_name()] + model_predict[self.get_var_name()] = model_ret[f"{self.get_var_name()}_redu"] if "mask" in model_ret: model_predict["mask"] = model_ret["mask"] return model_predict diff --git a/deepmd/pt/model/task/property.py b/deepmd/pt/model/task/property.py index dec0f1447b..c15e60fe04 100644 --- a/deepmd/pt/model/task/property.py +++ b/deepmd/pt/model/task/property.py @@ -43,17 +43,16 @@ class PropertyFittingNet(InvarFitting): dim_descrpt : int Embedding width per atom. task_dim : int - The dimension of outputs of fitting net. + The dimension of outputs of fitting net. + property_name: + The name of fitting property, which should be consistent with the property name in the dataset. + If the data file is named `humo.npy`, this parameter should be "humo". neuron : list[int] Number of neurons in each hidden layers of the fitting net. bias_atom_p : torch.Tensor, optional Average property per atom for each element. intensive : bool, optional Whether the fitting property is intensive. - bias_method : str, optional - The method of applying the bias to each atomic output, user can select 'normal' or 'no_bias'. - If 'normal' is used, the computed bias will be added to the atomic output. - If 'no_bias' is used, no bias will be added to the atomic output. resnet_dt : bool Using time-step in the ResNet construction. numb_fparam : int @@ -77,11 +76,11 @@ def __init__( self, ntypes: int, dim_descrpt: int, + property_name: str, task_dim: int = 1, neuron: list[int] = [128, 128, 128], bias_atom_p: Optional[torch.Tensor] = None, intensive: bool = False, - bias_method: str = "normal", resnet_dt: bool = True, numb_fparam: int = 0, numb_aparam: int = 0, @@ -94,9 +93,8 @@ def __init__( ) -> None: self.task_dim = task_dim self.intensive = intensive - self.bias_method = bias_method super().__init__( - var_name="property", + var_name=property_name, ntypes=ntypes, dim_descrpt=dim_descrpt, dim_out=task_dim, @@ -113,9 +111,6 @@ def __init__( **kwargs, ) - def get_bias_method(self) -> str: - return self.bias_method - def output_def(self) -> FittingOutputDef: return FittingOutputDef( [ @@ -130,12 +125,16 @@ def output_def(self) -> FittingOutputDef: ] ) + def get_intensive(self) -> bool: + """Whether the fitting property is intensive.""" + return self.intensive + @classmethod def deserialize(cls, data: dict) -> "PropertyFittingNet": data = data.copy() - check_version_compatibility(data.pop("@version", 1), 3, 1) + check_version_compatibility(data.pop("@version", 1), 4, 1) data.pop("dim_out") - data.pop("var_name") + data["property_name"] = data.pop("var_name") obj = super().deserialize(data) return obj @@ -146,7 +145,9 @@ def serialize(self) -> dict: **InvarFitting.serialize(self), "type": "property", "task_dim": self.task_dim, + "intensive": self.intensive, } + dd["@version"] = 4 return dd diff --git a/deepmd/pt/train/training.py b/deepmd/pt/train/training.py index 8ca510492c..eca952d7f8 100644 --- a/deepmd/pt/train/training.py +++ b/deepmd/pt/train/training.py @@ -1240,7 +1240,11 @@ def get_loss(loss_params, start_lr, _ntypes, _model): return TensorLoss(**loss_params) elif loss_type == "property": task_dim = _model.get_task_dim() + var_name = _model.get_var_name() + intensive = _model.get_intensive() loss_params["task_dim"] = task_dim + loss_params["var_name"] = var_name + loss_params["intensive"] = intensive return PropertyLoss(**loss_params) else: loss_params["starter_learning_rate"] = start_lr diff --git a/deepmd/pt/utils/stat.py b/deepmd/pt/utils/stat.py index 1c5e3f1c52..710d392ac3 100644 --- a/deepmd/pt/utils/stat.py +++ b/deepmd/pt/utils/stat.py @@ -12,9 +12,6 @@ import numpy as np import torch -from deepmd.dpmodel.output_def import ( - FittingOutputDef, -) from deepmd.pt.utils import ( AtomExcludeMask, ) @@ -27,6 +24,7 @@ to_torch_tensor, ) from deepmd.utils.out_stat import ( + compute_stats_do_not_distinguish_types, compute_stats_from_atomic, compute_stats_from_redu, ) @@ -136,11 +134,16 @@ def _post_process_stat( For global statistics, we do not have the std for each type of atoms, thus fake the output std by ones for all the types. + If the shape of out_std is already the same as out_bias, + we do not need to do anything. """ new_std = {} for kk, vv in out_bias.items(): - new_std[kk] = np.ones_like(vv) + if vv.shape == out_std[kk].shape: + new_std[kk] = out_std[kk] + else: + new_std[kk] = np.ones_like(vv) return out_bias, new_std @@ -242,7 +245,8 @@ def compute_output_stats( rcond: Optional[float] = None, preset_bias: Optional[dict[str, list[Optional[np.ndarray]]]] = None, model_forward: Optional[Callable[..., torch.Tensor]] = None, - atomic_output: Optional[FittingOutputDef] = None, + stats_distinguish_types: bool = True, + intensive: bool = False, ): """ Compute the output statistics (e.g. energy bias) for the fitting net from packed data. @@ -272,8 +276,10 @@ def compute_output_stats( If not None, the model will be utilized to generate the original energy prediction, which will be subtracted from the energy label of the data. The difference will then be used to calculate the delta complement energy bias for each type. - atomic_output : FittingOutputDef, optional - The output of atomic model. + stats_distinguish_types : bool, optional + Whether to distinguish different element types in the statistics. + intensive : bool, optional + Whether the fitting target is intensive. """ # try to restore the bias from stat file bias_atom_e, std_atom_e = _restore_from_file(stat_file_path, keys) @@ -362,7 +368,8 @@ def compute_output_stats( rcond, preset_bias, model_pred_g, - atomic_output, + stats_distinguish_types, + intensive, ) bias_atom_a, std_atom_a = compute_output_stats_atomic( sampled, @@ -405,7 +412,8 @@ def compute_output_stats_global( rcond: Optional[float] = None, preset_bias: Optional[dict[str, list[Optional[np.ndarray]]]] = None, model_pred: Optional[dict[str, np.ndarray]] = None, - atomic_output: Optional[FittingOutputDef] = None, + stats_distinguish_types: bool = True, + intensive: bool = False, ): """This function only handle stat computation from reduced global labels.""" # return directly if model predict is empty for global @@ -476,19 +484,22 @@ def compute_output_stats_global( std_atom_e = {} for kk in keys: if kk in stats_input: - if atomic_output is not None and atomic_output.get_data()[kk].intensive: - task_dim = stats_input[kk].shape[1] - assert merged_natoms[kk].shape == (nf[kk], ntypes) - stats_input[kk] = ( - merged_natoms[kk].sum(axis=1).reshape(-1, 1) * stats_input[kk] + if not stats_distinguish_types: + bias_atom_e[kk], std_atom_e[kk] = ( + compute_stats_do_not_distinguish_types( + stats_input[kk], + merged_natoms[kk], + assigned_bias=assigned_atom_ener[kk], + intensive=intensive, + ) + ) + else: + bias_atom_e[kk], std_atom_e[kk] = compute_stats_from_redu( + stats_input[kk], + merged_natoms[kk], + assigned_bias=assigned_atom_ener[kk], + rcond=rcond, ) - assert stats_input[kk].shape == (nf[kk], task_dim) - bias_atom_e[kk], std_atom_e[kk] = compute_stats_from_redu( - stats_input[kk], - merged_natoms[kk], - assigned_bias=assigned_atom_ener[kk], - rcond=rcond, - ) else: # this key does not have global labels, skip it. continue diff --git a/deepmd/utils/argcheck.py b/deepmd/utils/argcheck.py index 9eac0e804d..50ef07b2af 100644 --- a/deepmd/utils/argcheck.py +++ b/deepmd/utils/argcheck.py @@ -1580,7 +1580,7 @@ def fitting_property(): doc_seed = "Random seed for parameter initialization of the fitting net" doc_task_dim = "The dimension of outputs of fitting net" doc_intensive = "Whether the fitting property is intensive" - doc_bias_method = "The method of applying the bias to each atomic output, user can select 'normal' or 'no_bias'. If 'no_bias' is used, no bias will be added to the atomic output." + doc_property_name = "The names of fitting property, which should be consistent with the property name in the dataset." return [ Argument("numb_fparam", int, optional=True, default=0, doc=doc_numb_fparam), Argument("numb_aparam", int, optional=True, default=0, doc=doc_numb_aparam), @@ -1612,7 +1612,10 @@ def fitting_property(): Argument("task_dim", int, optional=True, default=1, doc=doc_task_dim), Argument("intensive", bool, optional=True, default=False, doc=doc_intensive), Argument( - "bias_method", str, optional=True, default="normal", doc=doc_bias_method + "property_name", + str, + optional=False, + doc=doc_property_name, ), ] diff --git a/deepmd/utils/out_stat.py b/deepmd/utils/out_stat.py index 4d0d788f8b..ecbd379e2d 100644 --- a/deepmd/utils/out_stat.py +++ b/deepmd/utils/out_stat.py @@ -130,3 +130,64 @@ def compute_stats_from_atomic( output[mask].std(axis=0) if output[mask].size > 0 else np.nan ) return output_bias, output_std + + +def compute_stats_do_not_distinguish_types( + output_redu: np.ndarray, + natoms: np.ndarray, + assigned_bias: Optional[np.ndarray] = None, + intensive: bool = False, +) -> tuple[np.ndarray, np.ndarray]: + """Compute element-independent statistics for property fitting. + + Computes mean and standard deviation of the output, treating all elements equally. + For extensive properties, the output is normalized by the total number of atoms + before computing statistics. + + Parameters + ---------- + output_redu + The reduced output value, shape is [nframes, *(odim0, odim1, ...)]. + natoms + The number of atoms for each atom, shape is [nframes, ntypes]. + Used for normalization of extensive properties and generating uniform bias. + assigned_bias + The assigned output bias, shape is [ntypes, *(odim0, odim1, ...)]. + Set to a tensor of shape (odim0, odim1, ...) filled with nan if the bias + of the type is not assigned. + intensive + Whether the output is intensive or extensive. + If False, the output will be normalized by the total number of atoms before computing statistics. + + Returns + ------- + np.ndarray + The computed output mean(fake bias), shape is [ntypes, *(odim0, odim1, ...)]. + The same bias is used for all atom types. + np.ndarray + The computed output standard deviation, shape is [ntypes, *(odim0, odim1, ...)]. + The same standard deviation is used for all atom types. + """ + natoms = np.array(natoms) # [nf, ntypes] + nf, ntypes = natoms.shape + output_redu = np.array(output_redu) + var_shape = list(output_redu.shape[1:]) + output_redu = output_redu.reshape(nf, -1) + if not intensive: + total_atoms = natoms.sum(axis=1) + output_redu = output_redu / total_atoms[:, np.newaxis] + # check shape + assert output_redu.ndim == 2 + assert natoms.ndim == 2 + assert output_redu.shape[0] == natoms.shape[0] # [nf,1] + + computed_output_bias = np.repeat( + np.mean(output_redu, axis=0)[np.newaxis, :], ntypes, axis=0 + ) + output_std = np.std(output_redu, axis=0) + + computed_output_bias = computed_output_bias.reshape([natoms.shape[1]] + var_shape) # noqa: RUF005 + output_std = output_std.reshape(var_shape) + output_std = np.tile(output_std, (computed_output_bias.shape[0], 1)) + + return computed_output_bias, output_std diff --git a/doc/model/index.rst b/doc/model/index.rst index c067ea4207..5e7ba32486 100644 --- a/doc/model/index.rst +++ b/doc/model/index.rst @@ -16,6 +16,7 @@ Model train-energy-spin train-fitting-tensor train-fitting-dos + train-fitting-property train-se-e2-a-tebd train-se-a-mask train-se-e3-tebd diff --git a/doc/model/train-fitting-property.md b/doc/model/train-fitting-property.md new file mode 100644 index 0000000000..be1b63bf6f --- /dev/null +++ b/doc/model/train-fitting-property.md @@ -0,0 +1,194 @@ +# Fit other properties {{ pytorch_icon }} {{ jax_icon }} {{ dpmodel_icon }} + +:::{note} +**Supported backends**: PyTorch {{ pytorch_icon }}, JAX {{ jax_icon }}, DP {{ dpmodel_icon }} +::: + +Here we present an API to DeepProperty model, which can be used to fit other properties like band gap, bulk modulus, critical temperature, etc. + +In this example, we will show you how to train a model to fit properties of `humo`, `lumo` and `band gap`. A complete training input script of the examples can be found in + +```bash +$deepmd_source_dir/examples/property/train +``` + +The training and validation data are also provided our examples. But note that **the data provided along with the examples are of limited amount, and should not be used to train a production model.** + +Similar to the `input.json` used in `ener` mode, training JSON is also divided into {ref}`model `, {ref}`learning_rate `, {ref}`loss ` and {ref}`training `. Most keywords remain the same as `ener` mode, and their meaning can be found [here](train-se-atten.md). To fit the `property`, one needs to modify {ref}`model[standard]/fitting_net ` and {ref}`loss `. + +## The fitting Network + +The {ref}`fitting_net ` section tells DP which fitting net to use. + +The JSON of `property` type should be provided like + +```json +"fitting_net" : { + "type": "property", + "intensive": true, + "property_name": "band_prop", + "task_dim": 3, + "neuron": [240,240,240], + "resnet_dt": true, + "seed": 1, +}, +``` + +- `type` specifies which type of fitting net should be used. It should be `property`. +- `intensive` indicates whether the fitting property is intensive. If `intensive` is `true`, the model output is the average of the property contribution of each atom. If `intensive` is `false`, the model output is the sum of the property contribution of each atom. +- `property_name` is the name of the property to be predicted. It should be consistent with the property name in the dataset. In each system, code will read `set.*/{property_name}.npy` file as prediction label if you use NumPy format data. +- `fitting_net/task_dim` is the dimension of model output. It should be consistent with the property dimension in the dataset, which means if the shape of data stored in `set.*/{property_name}.npy` is `batch size * 3`, `fitting_net/task_dim` should be set to 3. +- The rest arguments have the same meaning as they do in `ener` mode. + +## Loss + +DeepProperty supports trainings of the global system (one or more global labels are provided in a frame). For example, when fitting `property`, each frame will provide a `1 x task_dim` vector which gives the fitting properties. + +The loss section should be provided like + +```json +"loss" : { + "type": "property", + "metric": ["mae"], + "loss_func": "smooth_mae" +}, +``` + +- {ref}`type ` should be written as `property` as a distinction from `ener` mode. +- `metric`: The metric for display, which will be printed in `lcurve.out`. This list can include 'smooth_mae', 'mae', 'mse' and 'rmse'. +- `loss_func`: The loss function to minimize, you can use 'mae','smooth_mae', 'mse' and 'rmse'. + +## Training Data Preparation + +The label should be named `{property_name}.npy/raw`, `property_name` is defined by `fitting_net/property_name` in `input.json`. + +To prepare the data, you can use `dpdata` tools, for example: + +```py +import dpdata +import numpy as np +from dpdata.data_type import ( + Axis, + DataType, +) + +property_name = "band_prop" # fittng_net/property_name +task_dim = 3 # fitting_net/task_dim + +# register datatype +datatypes = [ + DataType( + property_name, + np.ndarray, + shape=(Axis.NFRAMES, task_dim), + required=False, + ), +] +datatypes.extend( + [ + DataType( + "energies", + np.ndarray, + shape=(Axis.NFRAMES, 1), + required=False, + ), + DataType( + "forces", + np.ndarray, + shape=(Axis.NFRAMES, Axis.NATOMS, 1), + required=False, + ), + ] +) + +for datatype in datatypes: + dpdata.System.register_data_type(datatype) + dpdata.LabeledSystem.register_data_type(datatype) + +ls = dpdata.MultiSystems() +frame = dpdata.System("POSCAR", fmt="vasp/poscar") +labelframe = dpdata.LabeledSystem() +labelframe.append(frame) +labelframe.data[property_name] = np.array([[-0.236, 0.056, 0.292]], dtype=np.float32) +ls.append(labelframe) +ls.to_deepmd_npy_mixed("deepmd") +``` + +## Train the Model + +The training command is the same as `ener` mode, i.e. + +::::{tab-set} + +:::{tab-item} PyTorch {{ pytorch_icon }} + +```bash +dp --pt train input.json +``` + +::: + +:::: + +The detailed loss can be found in `lcurve.out`: + +``` +# step mae_val mae_trn lr +# If there is no available reference data, rmse_*_{val,trn} will print nan + 1 2.72e-02 2.40e-02 2.0e-04 + 100 1.79e-02 1.34e-02 2.0e-04 + 200 1.45e-02 1.86e-02 2.0e-04 + 300 1.61e-02 4.90e-03 2.0e-04 + 400 2.04e-02 1.05e-02 2.0e-04 + 500 9.09e-03 1.85e-02 2.0e-04 + 600 1.01e-02 5.63e-03 2.0e-04 + 700 1.10e-02 1.76e-02 2.0e-04 + 800 1.14e-02 1.50e-02 2.0e-04 + 900 9.54e-03 2.70e-02 2.0e-04 + 1000 1.00e-02 2.73e-02 2.0e-04 +``` + +## Test the Model + +We can use `dp test` to infer the properties for given frames. + +::::{tab-set} + +:::{tab-item} PyTorch {{ pytorch_icon }} + +```bash + +dp --pt freeze -o frozen_model.pth + +dp --pt test -m frozen_model.pth -s ../data/data_0/ -d ${output_prefix} -n 100 +``` + +::: + +:::: + +if `dp test -d ${output_prefix}` is specified, the predicted properties for each frame are output in the working directory + +``` +${output_prefix}.property.out.0 ${output_prefix}.property.out.1 ${output_prefix}.property.out.2 ${output_prefix}.property.out.3 +``` + +for `*.property.out.*`, it contains matrix with shape of `(2, task_dim)`, + +``` +# ../data/data_0 - 0: data_property pred_property +-2.449000030755996704e-01 -2.315840660495154801e-01 +6.400000303983688354e-02 5.810663314446311983e-02 +3.088999986648559570e-01 2.917143316092784544e-01 +``` + +## Data Normalization + +When `fitting_net/type` is `ener`, the energy bias layer “$e_{bias}$” adds a constant bias to the atomic energy contribution according to the atomic number.i.e., +$$e_{bias} (Z_i) (MLP(D_i))= MLP(D_i) + e_{bias} (Z_i)$$ + +But when `fitting_net/type` is `property`. The property bias layer is used to normalize the property output of the model.i.e., +$$p_{bias} (MLP(D_i))= MLP(D_i) * std+ mean$$ + +1. `std`: The standard deviation of the property label +2. `mean`: The average value of the property label diff --git a/examples/property/data/data_0/set.000000/property.npy b/examples/property/data/data_0/set.000000/band_prop.npy similarity index 100% rename from examples/property/data/data_0/set.000000/property.npy rename to examples/property/data/data_0/set.000000/band_prop.npy diff --git a/examples/property/data/data_1/set.000000/property.npy b/examples/property/data/data_1/set.000000/band_prop.npy similarity index 100% rename from examples/property/data/data_1/set.000000/property.npy rename to examples/property/data/data_1/set.000000/band_prop.npy diff --git a/examples/property/data/data_2/set.000000/property.npy b/examples/property/data/data_2/set.000000/band_prop.npy similarity index 100% rename from examples/property/data/data_2/set.000000/property.npy rename to examples/property/data/data_2/set.000000/band_prop.npy diff --git a/examples/property/train/README.md b/examples/property/train/README.md new file mode 100644 index 0000000000..e4dc9ed704 --- /dev/null +++ b/examples/property/train/README.md @@ -0,0 +1,5 @@ +Some explanations of the parameters in `input.json`: + +1. `fitting_net/property_name` is the name of the property to be predicted. It should be consistent with the property name in the dataset. In each system, code will read `set.*/{property_name}.npy` file as prediction label if you use NumPy format data. +2. `fitting_net/task_dim` is the dimension of model output. It should be consistent with the property dimension in the dataset, which means if the shape of data stored in `set.*/{property_name}.npy` is `batch size * 3`, `fitting_net/task_dim` should be set to 3. +3. `fitting/intensive` indicates whether the fitting property is intensive. If `intensive` is `true`, the model output is the average of the property contribution of each atom. If `intensive` is `false`, the model output is the sum of the property contribution of each atom. diff --git a/examples/property/train/input_torch.json b/examples/property/train/input_torch.json index 33eaa28a07..1e6ce00048 100644 --- a/examples/property/train/input_torch.json +++ b/examples/property/train/input_torch.json @@ -33,6 +33,7 @@ "type": "property", "intensive": true, "task_dim": 3, + "property_name": "band_prop", "neuron": [ 240, 240, @@ -53,6 +54,11 @@ }, "loss": { "type": "property", + "metric": [ + "mae" + ], + "loss_func": "smooth_mae", + "beta": 1.0, "_comment": " that's all" }, "training": { diff --git a/source/tests/common/test_out_stat.py b/source/tests/common/test_out_stat.py index c175d7c643..0236c39f22 100644 --- a/source/tests/common/test_out_stat.py +++ b/source/tests/common/test_out_stat.py @@ -4,6 +4,7 @@ import numpy as np from deepmd.utils.out_stat import ( + compute_stats_do_not_distinguish_types, compute_stats_from_atomic, compute_stats_from_redu, ) @@ -89,6 +90,58 @@ def test_compute_stats_from_redu_with_assigned_bias(self) -> None: rtol=1e-7, ) + def test_compute_stats_do_not_distinguish_types_intensive(self) -> None: + """Test compute_stats_property function with intensive scenario.""" + bias, std = compute_stats_do_not_distinguish_types( + self.output_redu, self.natoms, intensive=True + ) + # Test shapes + assert bias.shape == (len(self.mean), self.output_redu.shape[1]) + assert std.shape == (len(self.mean), self.output_redu.shape[1]) + + # Test values + for fake_atom_bias in bias: + np.testing.assert_allclose( + fake_atom_bias, np.mean(self.output_redu, axis=0), rtol=1e-7 + ) + for fake_atom_std in std: + np.testing.assert_allclose( + fake_atom_std, np.std(self.output_redu, axis=0), rtol=1e-7 + ) + + def test_compute_stats_do_not_distinguish_types_extensive(self) -> None: + """Test compute_stats_property function with extensive scenario.""" + bias, std = compute_stats_do_not_distinguish_types( + self.output_redu, self.natoms + ) + # Test shapes + assert bias.shape == (len(self.mean), self.output_redu.shape[1]) + assert std.shape == (len(self.mean), self.output_redu.shape[1]) + + # Test values + for fake_atom_bias in bias: + np.testing.assert_allclose( + fake_atom_bias, + np.array( + [ + 6218.91610282, + 7183.82275736, + 4445.23155934, + 5748.23644722, + 5362.8519454, + ] + ), + rtol=1e-7, + ) + for fake_atom_std in std: + np.testing.assert_allclose( + fake_atom_std, + np.array( + [128.78691576, 36.53743668, 105.82372405, 96.43642486, 33.68885327] + ), + rtol=1e-7, + ) + def test_compute_stats_from_atomic(self) -> None: bias, std = compute_stats_from_atomic(self.output, self.atype) np.testing.assert_allclose(bias, self.mean) diff --git a/source/tests/consistent/fitting/test_property.py b/source/tests/consistent/fitting/test_property.py index 3abd672c88..4c359026c7 100644 --- a/source/tests/consistent/fitting/test_property.py +++ b/source/tests/consistent/fitting/test_property.py @@ -86,6 +86,7 @@ def data(self) -> dict: "seed": 20240217, "task_dim": task_dim, "intensive": intensive, + "property_name": "foo", } @property @@ -186,7 +187,7 @@ def eval_pt(self, pt_obj: Any) -> Any: aparam=torch.from_numpy(self.aparam).to(device=PT_DEVICE) if numb_aparam else None, - )["property"] + )[pt_obj.var_name] .detach() .cpu() .numpy() @@ -207,7 +208,7 @@ def eval_dp(self, dp_obj: Any) -> Any: self.atype.reshape(1, -1), fparam=self.fparam if numb_fparam else None, aparam=self.aparam if numb_aparam else None, - )["property"] + )[dp_obj.var_name] def eval_jax(self, jax_obj: Any) -> Any: ( @@ -225,7 +226,7 @@ def eval_jax(self, jax_obj: Any) -> Any: jnp.asarray(self.atype.reshape(1, -1)), fparam=jnp.asarray(self.fparam) if numb_fparam else None, aparam=jnp.asarray(self.aparam) if numb_aparam else None, - )["property"] + )[jax_obj.var_name] ) def eval_array_api_strict(self, array_api_strict_obj: Any) -> Any: @@ -244,7 +245,7 @@ def eval_array_api_strict(self, array_api_strict_obj: Any) -> Any: array_api_strict.asarray(self.atype.reshape(1, -1)), fparam=array_api_strict.asarray(self.fparam) if numb_fparam else None, aparam=array_api_strict.asarray(self.aparam) if numb_aparam else None, - )["property"] + )[array_api_strict_obj.var_name] ) def extract_ret(self, ret: Any, backend) -> tuple[np.ndarray, ...]: diff --git a/source/tests/consistent/model/test_property.py b/source/tests/consistent/model/test_property.py index 29786fb247..75aded98fd 100644 --- a/source/tests/consistent/model/test_property.py +++ b/source/tests/consistent/model/test_property.py @@ -56,6 +56,7 @@ def data(self) -> dict: "fitting_net": { "type": "property", "neuron": [4, 4, 4], + "property_name": "foo", "resnet_dt": True, "numb_fparam": 0, "precision": "float64", @@ -182,14 +183,15 @@ def eval_jax(self, jax_obj: Any) -> Any: def extract_ret(self, ret: Any, backend) -> tuple[np.ndarray, ...]: # shape not matched. ravel... + property_name = self.data["fitting_net"]["property_name"] if backend in {self.RefBackend.DP, self.RefBackend.JAX}: return ( - ret["property_redu"].ravel(), - ret["property"].ravel(), + ret[f"{property_name}_redu"].ravel(), + ret[property_name].ravel(), ) elif backend is self.RefBackend.PT: return ( - ret["property"].ravel(), - ret["atom_property"].ravel(), + ret[property_name].ravel(), + ret[f"atom_{property_name}"].ravel(), ) raise ValueError(f"Unknown backend: {backend}") diff --git a/source/tests/pd/model/test_permutation.py b/source/tests/pd/model/test_permutation.py index 4543348d3b..135c5ea819 100644 --- a/source/tests/pd/model/test_permutation.py +++ b/source/tests/pd/model/test_permutation.py @@ -331,10 +331,10 @@ }, "fitting_net": { "type": "property", + "property_name": "band_property", "task_dim": 3, "neuron": [24, 24, 24], "resnet_dt": True, - "bias_method": "normal", "intensive": True, "seed": 1, }, diff --git a/source/tests/pt/model/test_permutation.py b/source/tests/pt/model/test_permutation.py index 5c7b8db9a4..e4eb47a540 100644 --- a/source/tests/pt/model/test_permutation.py +++ b/source/tests/pt/model/test_permutation.py @@ -331,9 +331,9 @@ "fitting_net": { "type": "property", "task_dim": 3, + "property_name": "band_property", "neuron": [24, 24, 24], "resnet_dt": True, - "bias_method": "normal", "intensive": True, "seed": 1, }, diff --git a/source/tests/pt/model/test_property_fitting.py b/source/tests/pt/model/test_property_fitting.py index 305d1be951..6825924bc1 100644 --- a/source/tests/pt/model/test_property_fitting.py +++ b/source/tests/pt/model/test_property_fitting.py @@ -61,7 +61,7 @@ def test_consistency( self.atype_ext[:, : self.nloc], dtype=int, device=env.DEVICE ) - for nfp, nap, bias_atom_p, intensive, bias_method in itertools.product( + for nfp, nap, bias_atom_p, intensive in itertools.product( [0, 3], [0, 4], [ @@ -69,18 +69,17 @@ def test_consistency( np.array([[11, 12, 13, 4, 15], [16, 17, 18, 9, 20]]), ], [True, False], - ["normal", "no_bias"], ): ft0 = PropertyFittingNet( self.nt, self.dd0.dim_out, task_dim=5, + property_name="foo", numb_fparam=nfp, numb_aparam=nap, mixed_types=self.dd0.mixed_types(), bias_atom_p=bias_atom_p, intensive=intensive, - bias_method=bias_method, seed=GLOBAL_SEED, ).to(env.DEVICE) @@ -120,36 +119,35 @@ def test_consistency( aparam=to_numpy_array(iap), ) np.testing.assert_allclose( - to_numpy_array(ret0["property"]), - ret1["property"], + to_numpy_array(ret0[ft0.var_name]), + ret1[ft1.var_name], ) np.testing.assert_allclose( - to_numpy_array(ret0["property"]), - to_numpy_array(ret2["property"]), + to_numpy_array(ret0[ft0.var_name]), + to_numpy_array(ret2[ft2.var_name]), ) np.testing.assert_allclose( - to_numpy_array(ret0["property"]), - ret3["property"], + to_numpy_array(ret0[ft0.var_name]), + ret3[ft3.var_name], ) def test_jit( self, ) -> None: - for nfp, nap, intensive, bias_method in itertools.product( + for nfp, nap, intensive in itertools.product( [0, 3], [0, 4], [True, False], - ["normal", "no_bias"], ): ft0 = PropertyFittingNet( self.nt, self.dd0.dim_out, task_dim=5, + property_name="foo", numb_fparam=nfp, numb_aparam=nap, mixed_types=self.dd0.mixed_types(), intensive=intensive, - bias_method=bias_method, seed=GLOBAL_SEED, ).to(env.DEVICE) torch.jit.script(ft0) @@ -201,6 +199,7 @@ def test_trans(self) -> None: self.nt, self.dd0.dim_out, task_dim=11, + property_name="bar", numb_fparam=0, numb_aparam=0, mixed_types=self.dd0.mixed_types(), @@ -229,7 +228,7 @@ def test_trans(self) -> None: ) ret0 = ft0(rd0, atype, gr0, fparam=None, aparam=None) - res.append(ret0["property"]) + res.append(ret0[ft0.var_name]) np.testing.assert_allclose(to_numpy_array(res[0]), to_numpy_array(res[1])) @@ -257,21 +256,20 @@ def test_rot(self) -> None: # use larger cell to rotate only coord and shift to the center of cell cell_rot = 10.0 * torch.eye(3, dtype=dtype, device=env.DEVICE) - for nfp, nap, intensive, bias_method in itertools.product( + for nfp, nap, intensive in itertools.product( [0, 3], [0, 4], [True, False], - ["normal", "no_bias"], ): ft0 = PropertyFittingNet( self.nt, self.dd0.dim_out, # dim_descrpt - task_dim=9, + task_dim=5, + property_name="bar", numb_fparam=nfp, numb_aparam=nap, mixed_types=self.dd0.mixed_types(), intensive=intensive, - bias_method=bias_method, seed=GLOBAL_SEED, ).to(env.DEVICE) if nfp > 0: @@ -312,7 +310,7 @@ def test_rot(self) -> None: ) ret0 = ft0(rd0, atype, gr0, fparam=ifp, aparam=iap) - res.append(ret0["property"]) + res.append(ret0[ft0.var_name]) np.testing.assert_allclose( to_numpy_array(res[1]), to_numpy_array(res[0]), @@ -324,6 +322,7 @@ def test_permu(self) -> None: self.nt, self.dd0.dim_out, task_dim=8, + property_name="abc", numb_fparam=0, numb_aparam=0, mixed_types=self.dd0.mixed_types(), @@ -353,7 +352,7 @@ def test_permu(self) -> None: ) ret0 = ft0(rd0, atype, gr0, fparam=None, aparam=None) - res.append(ret0["property"]) + res.append(ret0[ft0.var_name]) np.testing.assert_allclose( to_numpy_array(res[0][:, idx_perm]), @@ -372,6 +371,7 @@ def test_trans(self) -> None: self.nt, self.dd0.dim_out, task_dim=11, + property_name="foo", numb_fparam=0, numb_aparam=0, mixed_types=self.dd0.mixed_types(), @@ -400,7 +400,7 @@ def test_trans(self) -> None: ) ret0 = ft0(rd0, atype, gr0, fparam=None, aparam=None) - res.append(ret0["property"]) + res.append(ret0[ft0.var_name]) np.testing.assert_allclose(to_numpy_array(res[0]), to_numpy_array(res[1])) @@ -422,6 +422,7 @@ def setUp(self) -> None: self.nt, self.dd0.dim_out, task_dim=3, + property_name="bar", numb_fparam=0, numb_aparam=0, mixed_types=self.dd0.mixed_types(), diff --git a/source/tests/pt/property/double/nopbc b/source/tests/pt/property/double/nopbc new file mode 100644 index 0000000000..e69de29bb2 diff --git a/source/tests/pt/property/double/set.000000/band_property.npy b/source/tests/pt/property/double/set.000000/band_property.npy new file mode 100644 index 0000000000000000000000000000000000000000..042c1a8b0d29ada2d0b96a701969717eceae7cc6 GIT binary patch literal 176 zcmbR27wQ`j$;eQ~P_3SlTAW;@Zl$1ZlV+i=qoAIaUsO_*m=~X4l#&V(cT3DEP6dh= zXCxM+0{I$7Its>`ItsN4WCJb+1_p&k5$E><=?A_^8|;B}1Iy9NAUYw%8Z7Rx?ekg? KpFuO`vONIezA7pJ literal 0 HcmV?d00001 diff --git a/source/tests/pt/property/double/set.000000/coord.npy b/source/tests/pt/property/double/set.000000/coord.npy new file mode 100644 index 0000000000000000000000000000000000000000..9c781a81f3b1580efc8326f0a15195d8c19ff068 GIT binary patch literal 1088 zcmbV~?MqZa6vjv0TvIZNpv@PW0^N!&v=6$Lv11>E0xJv&mGDZg1&PLPidohQmKiD0 zX3$1*qtYTOi&Ti!9SZ5i5YfVv%Il_9x|%PmW~xBvtubFP9%-BhpKN_L2U;mv z;|B7>?Su*1>JPnxI5T{}`tc6DRk-Tq-9&A&4TB)5O6!Pf3mR{74n{bX~ zSGa}?pN(j6?qYuU4hm1e`O0T~>U=}&I3F?hF5GX|EkHf?qjw6lVr=tcg7P)94>TDc zT@IQiO-^I{jL8T2(s76SK2e2VL6e;mI6o=j`6tl4?-I^giL4)lef-sK?3a2yS_V3O z<}LUvtsxY&^kR>KbxPuJt|aF@-dk}D2SD7`vs(QxV=Kn7{`BG?P>GKHgRFI89bOfv zo|j1nz?as!v48o|r{AFEit}C<_}xx?9~NJUeapYpKB-(8!bDtYEdPx85gF<^`>W8W z>%Ft+2ZeV4dCU!e$R`)QKS8sWRQ&JKf^Hh)ne-`8x?sExRFVVk1F5d2(*?e^1MexF y>umuo&PUC*E#bVlrxyR6E${mn>|;@deYnpNzlsShpL&h&#ayLcjNeRKH2nkZIZ_k= literal 0 HcmV?d00001 diff --git a/source/tests/pt/property/double/set.000000/real_atom_types.npy b/source/tests/pt/property/double/set.000000/real_atom_types.npy new file mode 100644 index 0000000000000000000000000000000000000000..3bfe0abd94b2b77384ba11c69205ac7360cf3c52 GIT binary patch literal 448 zcmbR27wQ`j$;eQ~P_3SlTAW;@Zl$1ZlWC!@qoAIaUsO_*m=~X4l#&V(cT3DEP6dh= xXCxM+0{I$7ItoSxnmP)#3giMV1{wiIsJX<_Oi=aAXf%zS$3U#RVCrzW4*< None: pred_property = np.loadtxt(self.detail_file + ".property.out.0")[:, 1] np.testing.assert_almost_equal( pred_property, - to_numpy_array(result["property"])[0], + to_numpy_array(result[model.get_var_name()])[0], ) def tearDown(self) -> None: diff --git a/source/tests/pt/test_training.py b/source/tests/pt/test_training.py index 1fbd01c39f..ad52c5db16 100644 --- a/source/tests/pt/test_training.py +++ b/source/tests/pt/test_training.py @@ -464,7 +464,7 @@ def setUp(self) -> None: property_input = str(Path(__file__).parent / "property/input.json") with open(property_input) as f: self.config_property = json.load(f) - prop_data_file = [str(Path(__file__).parent / "property/single")] + prop_data_file = [str(Path(__file__).parent / "property/double")] self.config_property["training"]["training_data"]["systems"] = prop_data_file self.config_property["training"]["validation_data"]["systems"] = prop_data_file self.config_property["model"]["descriptor"] = deepcopy(model_dpa1["descriptor"]) diff --git a/source/tests/universal/common/cases/model/model.py b/source/tests/universal/common/cases/model/model.py index cee69d9d6c..06ddd90970 100644 --- a/source/tests/universal/common/cases/model/model.py +++ b/source/tests/universal/common/cases/model/model.py @@ -165,7 +165,7 @@ def setUpClass(cls) -> None: cls.expected_dim_aparam = 0 cls.expected_sel_type = [0, 1] cls.expected_aparam_nall = False - cls.expected_model_output_type = ["property", "mask"] + cls.expected_model_output_type = ["band_prop", "mask"] cls.model_output_equivariant = [] cls.expected_sel = [46, 92] cls.expected_sel_mix = sum(cls.expected_sel) diff --git a/source/tests/universal/dpmodel/fitting/test_fitting.py b/source/tests/universal/dpmodel/fitting/test_fitting.py index db199c02a3..2fe0060003 100644 --- a/source/tests/universal/dpmodel/fitting/test_fitting.py +++ b/source/tests/universal/dpmodel/fitting/test_fitting.py @@ -208,6 +208,8 @@ def FittingParamProperty( "dim_descrpt": dim_descrpt, "mixed_types": mixed_types, "type_map": type_map, + "task_dim": 3, + "property_name": "band_prop", "exclude_types": exclude_types, "seed": GLOBAL_SEED, "precision": precision, diff --git a/source/tests/universal/dpmodel/loss/test_loss.py b/source/tests/universal/dpmodel/loss/test_loss.py index 6473c159da..79c67cdba4 100644 --- a/source/tests/universal/dpmodel/loss/test_loss.py +++ b/source/tests/universal/dpmodel/loss/test_loss.py @@ -189,11 +189,14 @@ def LossParamTensor( def LossParamProperty(): key_to_pref_map = { - "property": 1.0, + "foo": 1.0, } input_dict = { "key_to_pref_map": key_to_pref_map, - "task_dim": 2, + "var_name": "foo", + "out_bias": [0.1, 0.5, 1.2, -0.1, -10], + "out_std": [8, 10, 0.001, -0.2, -10], + "task_dim": 5, } return input_dict From beeb3d932695c872809f10ba9b35917f877af80d Mon Sep 17 00:00:00 2001 From: HydrogenSulfate <490868991@qq.com> Date: Wed, 25 Dec 2024 11:58:19 +0800 Subject: [PATCH 35/43] pd: support dpa2 (#4418) Support DPA-2 in paddle backend. This PR will be updated after #4414 is merged. ### Training curve: ![training_curves_comparison_dpa2](https://github.com/user-attachments/assets/29bdeffa-cf2d-4586-afcf-7df0569997c3) ### Accuracy test(left: paddle, right: torch): ![image](https://github.com/user-attachments/assets/5bff55f3-1c39-4b95-93f0-68783e794716) Ralated optimization of Paddle framework: - [x] https://github.com/PaddlePaddle/Paddle/pull/69349 - [x] https://github.com/PaddlePaddle/Paddle/pull/69333 - [x] https://github.com/PaddlePaddle/Paddle/pull/69479 - [x] https://github.com/PaddlePaddle/Paddle/pull/69515 - [x] https://github.com/PaddlePaddle/Paddle/pull/69487 - [x] https://github.com/PaddlePaddle/Paddle/pull/69661 - [x] https://github.com/PaddlePaddle/Paddle/pull/69660 - [x] https://github.com/PaddlePaddle/Paddle/pull/69596 - [x] https://github.com/PaddlePaddle/Paddle/pull/69556 ## Summary by CodeRabbit - **New Features** - Introduced new classes for molecular descriptors: `DescrptDPA2`, `DescrptBlockRepformers`, `DescrptSeTTebd`, and `DescrptBlockSeTTebd`. - Added new functions for tensor operations and descriptor management, enhancing the capabilities of the module. - Updated JSON configurations for multitask models to refine selection criteria and data paths. - **Bug Fixes** - Improved error handling and parameter validation across various descriptor classes. - **Documentation** - Enhanced test coverage for new descriptor functionalities and configurations. - **Tests** - Added new test classes to validate the functionality of `DescrptDPA2` and multitask training scenarios. - Expanded test capabilities for descriptor classes based on installed dependencies. - Updated existing tests to support new configurations and functionalities. --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- deepmd/pd/model/descriptor/__init__.py | 14 + deepmd/pd/model/descriptor/dpa2.py | 902 ++++++++++ deepmd/pd/model/descriptor/repformer_layer.py | 1484 +++++++++++++++++ deepmd/pd/model/descriptor/repformers.py | 576 +++++++ deepmd/pd/model/descriptor/se_t_tebd.py | 931 +++++++++++ deepmd/pd/model/task/fitting.py | 4 +- deepmd/pd/utils/multi_task.py | 4 +- deepmd/pd/utils/spin.py | 30 + .../tests/consistent/descriptor/test_dpa2.py | 51 + .../consistent/descriptor/test_se_t_tebd.py | 17 + source/tests/pd/model/models/dpa2.json | 57 + source/tests/pd/model/models/dpa2.pd | Bin 0 -> 119535 bytes source/tests/pd/model/test_autodiff.py | 43 +- source/tests/pd/model/test_descriptor_dpa2.py | 208 +++ source/tests/pd/model/test_dpa2.py | 333 ++++ source/tests/pd/model/test_forward_lower.py | 15 +- source/tests/pd/model/test_null_input.py | 12 +- source/tests/pd/model/test_permutation.py | 1 - source/tests/pd/model/test_rot.py | 1 - source/tests/pd/model/test_rot_denoise.py | 11 +- source/tests/pd/model/test_smooth.py | 31 + source/tests/pd/model/test_trans.py | 1 - source/tests/pd/model/test_unused_params.py | 92 + source/tests/pd/model/water/multitask.json | 3 +- .../pd/model/water/multitask_sharefit.json | 8 +- source/tests/pd/test_finetune.py | 15 +- source/tests/pd/test_multitask.py | 127 ++ source/tests/pd/test_training.py | 17 + source/tests/pd/test_update_sel.py | 62 +- 29 files changed, 4987 insertions(+), 63 deletions(-) create mode 100644 deepmd/pd/model/descriptor/dpa2.py create mode 100644 deepmd/pd/model/descriptor/repformer_layer.py create mode 100644 deepmd/pd/model/descriptor/repformers.py create mode 100644 deepmd/pd/model/descriptor/se_t_tebd.py create mode 100644 deepmd/pd/utils/spin.py create mode 100644 source/tests/pd/model/models/dpa2.json create mode 100644 source/tests/pd/model/models/dpa2.pd create mode 100644 source/tests/pd/model/test_descriptor_dpa2.py create mode 100644 source/tests/pd/model/test_dpa2.py create mode 100644 source/tests/pd/model/test_unused_params.py diff --git a/deepmd/pd/model/descriptor/__init__.py b/deepmd/pd/model/descriptor/__init__.py index 7eaa0df85b..cee9dbf226 100644 --- a/deepmd/pd/model/descriptor/__init__.py +++ b/deepmd/pd/model/descriptor/__init__.py @@ -9,20 +9,34 @@ DescrptBlockSeAtten, DescrptDPA1, ) +from .dpa2 import ( + DescrptDPA2, +) from .env_mat import ( prod_env_mat, ) +from .repformers import ( + DescrptBlockRepformers, +) from .se_a import ( DescrptBlockSeA, DescrptSeA, ) +from .se_t_tebd import ( + DescrptBlockSeTTebd, + DescrptSeTTebd, +) __all__ = [ "BaseDescriptor", "DescriptorBlock", + "DescrptBlockRepformers", "DescrptBlockSeA", "DescrptBlockSeAtten", + "DescrptBlockSeTTebd", "DescrptDPA1", + "DescrptDPA2", "DescrptSeA", + "DescrptSeTTebd", "prod_env_mat", ] diff --git a/deepmd/pd/model/descriptor/dpa2.py b/deepmd/pd/model/descriptor/dpa2.py new file mode 100644 index 0000000000..8d4e13edae --- /dev/null +++ b/deepmd/pd/model/descriptor/dpa2.py @@ -0,0 +1,902 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +from typing import ( + Callable, + Optional, + Union, +) + +import paddle + +from deepmd.dpmodel.descriptor.dpa2 import ( + RepformerArgs, + RepinitArgs, +) +from deepmd.dpmodel.utils import EnvMat as DPEnvMat +from deepmd.dpmodel.utils.seed import ( + child_seed, +) +from deepmd.pd.model.network.mlp import ( + Identity, + MLPLayer, + NetworkCollection, +) +from deepmd.pd.model.network.network import ( + TypeEmbedNet, + TypeEmbedNetConsistent, +) +from deepmd.pd.utils import ( + env, +) +from deepmd.pd.utils.env import ( + PRECISION_DICT, +) +from deepmd.pd.utils.nlist import ( + build_multiple_neighbor_list, + get_multiple_nlist_key, +) +from deepmd.pd.utils.update_sel import ( + UpdateSel, +) +from deepmd.pd.utils.utils import ( + to_numpy_array, +) +from deepmd.utils.data_system import ( + DeepmdDataSystem, +) +from deepmd.utils.finetune import ( + get_index_between_two_maps, + map_pair_exclude_types, +) +from deepmd.utils.path import ( + DPPath, +) +from deepmd.utils.version import ( + check_version_compatibility, +) + +from .base_descriptor import ( + BaseDescriptor, +) +from .descriptor import ( + extend_descrpt_stat, +) +from .repformer_layer import ( + RepformerLayer, +) +from .repformers import ( + DescrptBlockRepformers, +) +from .se_atten import ( + DescrptBlockSeAtten, +) +from .se_t_tebd import ( + DescrptBlockSeTTebd, +) + + +@BaseDescriptor.register("dpa2") +class DescrptDPA2(BaseDescriptor, paddle.nn.Layer): + def __init__( + self, + ntypes: int, + # args for repinit + repinit: Union[RepinitArgs, dict], + # args for repformer + repformer: Union[RepformerArgs, dict], + # kwargs for descriptor + concat_output_tebd: bool = True, + precision: str = "float64", + smooth: bool = True, + exclude_types: list[tuple[int, int]] = [], + env_protection: float = 0.0, + trainable: bool = True, + seed: Optional[Union[int, list[int]]] = None, + add_tebd_to_repinit_out: bool = False, + use_econf_tebd: bool = False, + use_tebd_bias: bool = False, + type_map: Optional[list[str]] = None, + ) -> None: + r"""The DPA-2 descriptor[1]_. + + Parameters + ---------- + repinit : Union[RepinitArgs, dict] + The arguments used to initialize the repinit block, see docstr in `RepinitArgs` for details information. + repformer : Union[RepformerArgs, dict] + The arguments used to initialize the repformer block, see docstr in `RepformerArgs` for details information. + concat_output_tebd : bool, optional + Whether to concat type embedding at the output of the descriptor. + precision : str, optional + The precision of the embedding net parameters. + smooth : bool, optional + Whether to use smoothness in processes such as attention weights calculation. + exclude_types : list[list[int]], optional + The excluded pairs of types which have no interaction with each other. + For example, `[[0, 1]]` means no interaction between type 0 and type 1. + env_protection : float, optional + Protection parameter to prevent division by zero errors during environment matrix calculations. + For example, when using paddings, there may be zero distances of neighbors, which may make division by zero error during environment matrix calculations without protection. + trainable : bool, optional + If the parameters are trainable. + seed : int, optional + Random seed for parameter initialization. + add_tebd_to_repinit_out : bool, optional + Whether to add type embedding to the output representation from repinit before inputting it into repformer. + use_econf_tebd : bool, Optional + Whether to use electronic configuration type embedding. + use_tebd_bias : bool, Optional + Whether to use bias in the type embedding layer. + type_map : list[str], Optional + A list of strings. Give the name to each type of atoms. + + Returns + ------- + descriptor: paddle.Tensor + the descriptor of shape nb x nloc x g1_dim. + invariant single-atom representation. + g2: paddle.Tensor + invariant pair-atom representation. + h2: paddle.Tensor + equivariant pair-atom representation. + rot_mat: paddle.Tensor + rotation matrix for equivariant fittings + sw: paddle.Tensor + The switch function for decaying inverse distance. + + References + ---------- + .. [1] Zhang, D., Liu, X., Zhang, X. et al. DPA-2: a + large atomic model as a multi-task learner. npj + Comput Mater 10, 293 (2024). https://doi.org/10.1038/s41524-024-01493-2 + """ + super().__init__() + + def init_subclass_params(sub_data, sub_class): + if isinstance(sub_data, dict): + return sub_class(**sub_data) + elif isinstance(sub_data, sub_class): + return sub_data + else: + raise ValueError( + f"Input args must be a {sub_class.__name__} class or a dict!" + ) + + self.repinit_args = init_subclass_params(repinit, RepinitArgs) + self.repformer_args = init_subclass_params(repformer, RepformerArgs) + self.tebd_input_mode = self.repinit_args.tebd_input_mode + + self.repinit = DescrptBlockSeAtten( + self.repinit_args.rcut, + self.repinit_args.rcut_smth, + self.repinit_args.nsel, + ntypes, + attn_layer=0, + neuron=self.repinit_args.neuron, + axis_neuron=self.repinit_args.axis_neuron, + tebd_dim=self.repinit_args.tebd_dim, + tebd_input_mode=self.repinit_args.tebd_input_mode, + set_davg_zero=self.repinit_args.set_davg_zero, + exclude_types=exclude_types, + env_protection=env_protection, + activation_function=self.repinit_args.activation_function, + precision=precision, + resnet_dt=self.repinit_args.resnet_dt, + smooth=smooth, + type_one_side=self.repinit_args.type_one_side, + seed=child_seed(seed, 0), + ) + self.use_three_body = self.repinit_args.use_three_body + if self.use_three_body: + self.repinit_three_body = DescrptBlockSeTTebd( + self.repinit_args.three_body_rcut, + self.repinit_args.three_body_rcut_smth, + self.repinit_args.three_body_sel, + ntypes, + neuron=self.repinit_args.three_body_neuron, + tebd_dim=self.repinit_args.tebd_dim, + tebd_input_mode=self.repinit_args.tebd_input_mode, + set_davg_zero=self.repinit_args.set_davg_zero, + exclude_types=exclude_types, + env_protection=env_protection, + activation_function=self.repinit_args.activation_function, + precision=precision, + resnet_dt=self.repinit_args.resnet_dt, + smooth=smooth, + seed=child_seed(seed, 5), + ) + else: + self.repinit_three_body = None + self.repformers = DescrptBlockRepformers( + self.repformer_args.rcut, + self.repformer_args.rcut_smth, + self.repformer_args.nsel, + ntypes, + nlayers=self.repformer_args.nlayers, + g1_dim=self.repformer_args.g1_dim, + g2_dim=self.repformer_args.g2_dim, + axis_neuron=self.repformer_args.axis_neuron, + direct_dist=self.repformer_args.direct_dist, + update_g1_has_conv=self.repformer_args.update_g1_has_conv, + update_g1_has_drrd=self.repformer_args.update_g1_has_drrd, + update_g1_has_grrg=self.repformer_args.update_g1_has_grrg, + update_g1_has_attn=self.repformer_args.update_g1_has_attn, + update_g2_has_g1g1=self.repformer_args.update_g2_has_g1g1, + update_g2_has_attn=self.repformer_args.update_g2_has_attn, + update_h2=self.repformer_args.update_h2, + attn1_hidden=self.repformer_args.attn1_hidden, + attn1_nhead=self.repformer_args.attn1_nhead, + attn2_hidden=self.repformer_args.attn2_hidden, + attn2_nhead=self.repformer_args.attn2_nhead, + attn2_has_gate=self.repformer_args.attn2_has_gate, + activation_function=self.repformer_args.activation_function, + update_style=self.repformer_args.update_style, + update_residual=self.repformer_args.update_residual, + update_residual_init=self.repformer_args.update_residual_init, + set_davg_zero=self.repformer_args.set_davg_zero, + smooth=smooth, + exclude_types=exclude_types, + env_protection=env_protection, + precision=precision, + trainable_ln=self.repformer_args.trainable_ln, + ln_eps=self.repformer_args.ln_eps, + use_sqrt_nnei=self.repformer_args.use_sqrt_nnei, + g1_out_conv=self.repformer_args.g1_out_conv, + g1_out_mlp=self.repformer_args.g1_out_mlp, + seed=child_seed(seed, 1), + ) + self.rcsl_list = [ + (self.repformers.get_rcut(), self.repformers.get_nsel()), + (self.repinit.get_rcut(), self.repinit.get_nsel()), + ] + if self.use_three_body: + self.rcsl_list.append( + (self.repinit_three_body.get_rcut(), self.repinit_three_body.get_nsel()) + ) + self.rcsl_list.sort() + for ii in range(1, len(self.rcsl_list)): + assert ( + self.rcsl_list[ii - 1][1] <= self.rcsl_list[ii][1] + ), "rcut and sel are not in the same order" + self.rcut_list = [ii[0] for ii in self.rcsl_list] + self.nsel_list = [ii[1] for ii in self.rcsl_list] + self.use_econf_tebd = use_econf_tebd + self.use_tebd_bias = use_tebd_bias + self.type_map = type_map + self.type_embedding = TypeEmbedNet( + ntypes, + self.repinit_args.tebd_dim, + precision=precision, + seed=child_seed(seed, 2), + use_econf_tebd=self.use_econf_tebd, + use_tebd_bias=use_tebd_bias, + type_map=type_map, + ) + self.concat_output_tebd = concat_output_tebd + self.precision = precision + self.prec = PRECISION_DICT[self.precision] + self.smooth = smooth + self.exclude_types = exclude_types + self.env_protection = env_protection + self.trainable = trainable + self.add_tebd_to_repinit_out = add_tebd_to_repinit_out + + self.repinit_out_dim = self.repinit.dim_out + if self.repinit_args.use_three_body: + assert self.repinit_three_body is not None + self.repinit_out_dim += self.repinit_three_body.dim_out + + if self.repinit_out_dim == self.repformers.dim_in: + self.g1_shape_tranform = Identity() + else: + self.g1_shape_tranform = MLPLayer( + self.repinit_out_dim, + self.repformers.dim_in, + bias=False, + precision=precision, + init="glorot", + seed=child_seed(seed, 3), + ) + self.tebd_transform = None + if self.add_tebd_to_repinit_out: + self.tebd_transform = MLPLayer( + self.repinit_args.tebd_dim, + self.repformers.dim_in, + bias=False, + precision=precision, + seed=child_seed(seed, 4), + ) + assert self.repinit.rcut > self.repformers.rcut + assert self.repinit.sel[0] > self.repformers.sel[0] + + self.tebd_dim = self.repinit_args.tebd_dim + self.rcut = self.repinit.get_rcut() + self.rcut_smth = self.repinit.get_rcut_smth() + self.ntypes = ntypes + self.sel = self.repinit.sel + # set trainable + for param in self.parameters(): + param.stop_gradient = not trainable + self.compress = False + + def get_rcut(self) -> float: + """Returns the cut-off radius.""" + return self.rcut + + def get_rcut_smth(self) -> float: + """Returns the radius where the neighbor information starts to smoothly decay to 0.""" + return self.rcut_smth + + def get_nsel(self) -> int: + """Returns the number of selected atoms in the cut-off radius.""" + return sum(self.sel) + + def get_sel(self) -> list[int]: + """Returns the number of selected atoms for each type.""" + return self.sel + + def get_ntypes(self) -> int: + """Returns the number of element types.""" + return self.ntypes + + def get_type_map(self) -> list[str]: + """Get the name to each type of atoms.""" + return self.type_map + + def get_dim_out(self) -> int: + """Returns the output dimension of this descriptor.""" + ret = self.repformers.dim_out + if self.concat_output_tebd: + ret += self.tebd_dim + return ret + + def get_dim_emb(self) -> int: + """Returns the embedding dimension of this descriptor.""" + return self.repformers.dim_emb + + def mixed_types(self) -> bool: + """If true, the descriptor + 1. assumes total number of atoms aligned across frames; + 2. requires a neighbor list that does not distinguish different atomic types. + + If false, the descriptor + 1. assumes total number of atoms of each atom type aligned across frames; + 2. requires a neighbor list that distinguishes different atomic types. + + """ + return True + + def has_message_passing(self) -> bool: + """Returns whether the descriptor has message passing.""" + return any( + [self.repinit.has_message_passing(), self.repformers.has_message_passing()] + ) + + def need_sorted_nlist_for_lower(self) -> bool: + """Returns whether the descriptor needs sorted nlist when using `forward_lower`.""" + return True + + def get_env_protection(self) -> float: + """Returns the protection of building environment matrix.""" + # the env_protection of repinit is the same as that of the repformer + return self.repinit.get_env_protection() + + def share_params(self, base_class, shared_level, resume=False) -> None: + """ + Share the parameters of self to the base_class with shared_level during multitask training. + If not start from checkpoint (resume is False), + some separated parameters (e.g. mean and stddev) will be re-calculated across different classes. + """ + assert ( + self.__class__ == base_class.__class__ + ), "Only descriptors of the same type can share params!" + # For DPA2 descriptors, the user-defined share-level + # shared_level: 0 + # share all parameters in type_embedding, repinit and repformers + if shared_level == 0: + self._sub_layers["type_embedding"] = base_class._sub_layers[ + "type_embedding" + ] + self.repinit.share_params(base_class.repinit, 0, resume=resume) + if self.use_three_body: + self.repinit_three_body.share_params( + base_class.repinit_three_body, 0, resume=resume + ) + self._sub_layers["g1_shape_tranform"] = base_class._sub_layers[ + "g1_shape_tranform" + ] + self.repformers.share_params(base_class.repformers, 0, resume=resume) + # shared_level: 1 + # share all parameters in type_embedding + elif shared_level == 1: + self._modules["type_embedding"] = base_class._modules["type_embedding"] + # Other shared levels + else: + raise NotImplementedError + + def change_type_map( + self, type_map: list[str], model_with_new_type_stat=None + ) -> None: + """Change the type related params to new ones, according to `type_map` and the original one in the model. + If there are new types in `type_map`, statistics will be updated accordingly to `model_with_new_type_stat` for these new types. + """ + assert ( + self.type_map is not None + ), "'type_map' must be defined when performing type changing!" + remap_index, has_new_type = get_index_between_two_maps(self.type_map, type_map) + self.type_map = type_map + self.type_embedding.change_type_map(type_map=type_map) + self.exclude_types = map_pair_exclude_types(self.exclude_types, remap_index) + self.ntypes = len(type_map) + repinit = self.repinit + repformers = self.repformers + repinit_three_body = self.repinit_three_body + if has_new_type: + # the avg and std of new types need to be updated + extend_descrpt_stat( + repinit, + type_map, + des_with_stat=model_with_new_type_stat.repinit + if model_with_new_type_stat is not None + else None, + ) + extend_descrpt_stat( + repformers, + type_map, + des_with_stat=model_with_new_type_stat.repformers + if model_with_new_type_stat is not None + else None, + ) + if self.use_three_body: + extend_descrpt_stat( + repinit_three_body, + type_map, + des_with_stat=model_with_new_type_stat.repinit_three_body + if model_with_new_type_stat is not None + else None, + ) + repinit.ntypes = self.ntypes + repformers.ntypes = self.ntypes + repinit.reinit_exclude(self.exclude_types) + repformers.reinit_exclude(self.exclude_types) + repinit["davg"] = repinit["davg"][remap_index] + repinit["dstd"] = repinit["dstd"][remap_index] + repformers["davg"] = repformers["davg"][remap_index] + repformers["dstd"] = repformers["dstd"][remap_index] + if self.use_three_body: + repinit_three_body.ntypes = self.ntypes + repinit_three_body.reinit_exclude(self.exclude_types) + repinit_three_body["davg"] = repinit_three_body["davg"][remap_index] + repinit_three_body["dstd"] = repinit_three_body["dstd"][remap_index] + + @property + def dim_out(self): + return self.get_dim_out() + + @property + def dim_emb(self): + """Returns the embedding dimension g2.""" + return self.get_dim_emb() + + def compute_input_stats( + self, + merged: Union[Callable[[], list[dict]], list[dict]], + path: Optional[DPPath] = None, + ) -> None: + """ + Compute the input statistics (e.g. mean and stddev) for the descriptors from packed data. + + Parameters + ---------- + merged : Union[Callable[[], list[dict]], list[dict]] + - list[dict]: A list of data samples from various data systems. + Each element, `merged[i]`, is a data dictionary containing `keys`: `paddle.Tensor` + originating from the `i`-th data system. + - Callable[[], list[dict]]: A lazy function that returns data samples in the above format + only when needed. Since the sampling process can be slow and memory-intensive, + the lazy function helps by only sampling once. + path : Optional[DPPath] + The path to the stat file. + + """ + descrpt_list = [self.repinit, self.repformers] + if self.use_three_body: + descrpt_list.append(self.repinit_three_body) + for ii, descrpt in enumerate(descrpt_list): + descrpt.compute_input_stats(merged, path) + + def set_stat_mean_and_stddev( + self, + mean: list[paddle.Tensor], + stddev: list[paddle.Tensor], + ) -> None: + """Update mean and stddev for descriptor.""" + descrpt_list = [self.repinit, self.repformers] + if self.use_three_body: + descrpt_list.append(self.repinit_three_body) + for ii, descrpt in enumerate(descrpt_list): + descrpt.mean = mean[ii] + descrpt.stddev = stddev[ii] + + def get_stat_mean_and_stddev( + self, + ) -> tuple[list[paddle.Tensor], list[paddle.Tensor]]: + """Get mean and stddev for descriptor.""" + mean_list = [self.repinit.mean, self.repformers.mean] + stddev_list = [ + self.repinit.stddev, + self.repformers.stddev, + ] + if self.use_three_body: + mean_list.append(self.repinit_three_body.mean) + stddev_list.append(self.repinit_three_body.stddev) + return mean_list, stddev_list + + def serialize(self) -> dict: + repinit = self.repinit + repformers = self.repformers + repinit_three_body = self.repinit_three_body + data = { + "@class": "Descriptor", + "type": "dpa2", + "@version": 3, + "ntypes": self.ntypes, + "repinit_args": self.repinit_args.serialize(), + "repformer_args": self.repformer_args.serialize(), + "concat_output_tebd": self.concat_output_tebd, + "precision": self.precision, + "smooth": self.smooth, + "exclude_types": self.exclude_types, + "env_protection": self.env_protection, + "trainable": self.trainable, + "add_tebd_to_repinit_out": self.add_tebd_to_repinit_out, + "use_econf_tebd": self.use_econf_tebd, + "use_tebd_bias": self.use_tebd_bias, + "type_map": self.type_map, + "type_embedding": self.type_embedding.embedding.serialize(), + "g1_shape_tranform": self.g1_shape_tranform.serialize(), + } + if self.add_tebd_to_repinit_out: + data.update( + { + "tebd_transform": self.tebd_transform.serialize(), + } + ) + repinit_variable = { + "embeddings": repinit.filter_layers.serialize(), + "env_mat": DPEnvMat(repinit.rcut, repinit.rcut_smth).serialize(), + "@variables": { + "davg": to_numpy_array(repinit["davg"]), + "dstd": to_numpy_array(repinit["dstd"]), + }, + } + if repinit.tebd_input_mode in ["strip"]: + repinit_variable.update( + {"embeddings_strip": repinit.filter_layers_strip.serialize()} + ) + repformers_variable = { + "g2_embd": repformers.g2_embd.serialize(), + "repformer_layers": [layer.serialize() for layer in repformers.layers], + "env_mat": DPEnvMat(repformers.rcut, repformers.rcut_smth).serialize(), + "@variables": { + "davg": to_numpy_array(repformers["davg"]), + "dstd": to_numpy_array(repformers["dstd"]), + }, + } + data.update( + { + "repinit_variable": repinit_variable, + "repformers_variable": repformers_variable, + } + ) + if self.use_three_body: + repinit_three_body_variable = { + "embeddings": repinit_three_body.filter_layers.serialize(), + "env_mat": DPEnvMat( + repinit_three_body.rcut, repinit_three_body.rcut_smth + ).serialize(), + "@variables": { + "davg": to_numpy_array(repinit_three_body["davg"]), + "dstd": to_numpy_array(repinit_three_body["dstd"]), + }, + } + if repinit_three_body.tebd_input_mode in ["strip"]: + repinit_three_body_variable.update( + { + "embeddings_strip": repinit_three_body.filter_layers_strip.serialize() + } + ) + data.update( + { + "repinit_three_body_variable": repinit_three_body_variable, + } + ) + return data + + @classmethod + def deserialize(cls, data: dict) -> "DescrptDPA2": + data = data.copy() + version = data.pop("@version") + check_version_compatibility(version, 3, 1) + data.pop("@class") + data.pop("type") + repinit_variable = data.pop("repinit_variable").copy() + repformers_variable = data.pop("repformers_variable").copy() + repinit_three_body_variable = ( + data.pop("repinit_three_body_variable").copy() + if "repinit_three_body_variable" in data + else None + ) + type_embedding = data.pop("type_embedding") + g1_shape_tranform = data.pop("g1_shape_tranform") + tebd_transform = data.pop("tebd_transform", None) + add_tebd_to_repinit_out = data["add_tebd_to_repinit_out"] + if version < 3: + # compat with old version + data["repformer_args"]["use_sqrt_nnei"] = False + data["repformer_args"]["g1_out_conv"] = False + data["repformer_args"]["g1_out_mlp"] = False + data["repinit"] = RepinitArgs(**data.pop("repinit_args")) + data["repformer"] = RepformerArgs(**data.pop("repformer_args")) + # compat with version 1 + if "use_tebd_bias" not in data: + data["use_tebd_bias"] = True + obj = cls(**data) + obj.type_embedding.embedding = TypeEmbedNetConsistent.deserialize( + type_embedding + ) + if add_tebd_to_repinit_out: + assert isinstance(tebd_transform, dict) + obj.tebd_transform = MLPLayer.deserialize(tebd_transform) + if obj.repinit.dim_out != obj.repformers.dim_in: + obj.g1_shape_tranform = MLPLayer.deserialize(g1_shape_tranform) + + def t_cvt(xx): + return paddle.to_tensor(xx, dtype=obj.repinit.prec, place=env.DEVICE) + + # deserialize repinit + statistic_repinit = repinit_variable.pop("@variables") + env_mat = repinit_variable.pop("env_mat") + tebd_input_mode = data["repinit"].tebd_input_mode + obj.repinit.filter_layers = NetworkCollection.deserialize( + repinit_variable.pop("embeddings") + ) + if tebd_input_mode in ["strip"]: + obj.repinit.filter_layers_strip = NetworkCollection.deserialize( + repinit_variable.pop("embeddings_strip") + ) + obj.repinit["davg"] = t_cvt(statistic_repinit["davg"]) + obj.repinit["dstd"] = t_cvt(statistic_repinit["dstd"]) + + if data["repinit"].use_three_body: + # deserialize repinit_three_body + statistic_repinit_three_body = repinit_three_body_variable.pop("@variables") + env_mat = repinit_three_body_variable.pop("env_mat") + tebd_input_mode = data["repinit"].tebd_input_mode + obj.repinit_three_body.filter_layers = NetworkCollection.deserialize( + repinit_three_body_variable.pop("embeddings") + ) + if tebd_input_mode in ["strip"]: + obj.repinit_three_body.filter_layers_strip = ( + NetworkCollection.deserialize( + repinit_three_body_variable.pop("embeddings_strip") + ) + ) + obj.repinit_three_body["davg"] = t_cvt(statistic_repinit_three_body["davg"]) + obj.repinit_three_body["dstd"] = t_cvt(statistic_repinit_three_body["dstd"]) + + # deserialize repformers + statistic_repformers = repformers_variable.pop("@variables") + env_mat = repformers_variable.pop("env_mat") + repformer_layers = repformers_variable.pop("repformer_layers") + obj.repformers.g2_embd = MLPLayer.deserialize( + repformers_variable.pop("g2_embd") + ) + obj.repformers["davg"] = t_cvt(statistic_repformers["davg"]) + obj.repformers["dstd"] = t_cvt(statistic_repformers["dstd"]) + obj.repformers.layers = paddle.nn.LayerList( + [RepformerLayer.deserialize(layer) for layer in repformer_layers] + ) + return obj + + def forward( + self, + extended_coord: paddle.Tensor, + extended_atype: paddle.Tensor, + nlist: paddle.Tensor, + mapping: Optional[paddle.Tensor] = None, + comm_dict: Optional[dict[str, paddle.Tensor]] = None, + ): + """Compute the descriptor. + + Parameters + ---------- + extended_coord + The extended coordinates of atoms. shape: nf x (nallx3) + extended_atype + The extended aotm types. shape: nf x nall + nlist + The neighbor list. shape: nf x nloc x nnei + mapping + The index mapping, mapps extended region index to local region. + comm_dict + The data needed for communication for parallel inference. + + Returns + ------- + descriptor + The descriptor. shape: nf x nloc x (ng x axis_neuron) + gr + The rotationally equivariant and permutationally invariant single particle + representation. shape: nf x nloc x ng x 3 + g2 + The rotationally invariant pair-partical representation. + shape: nf x nloc x nnei x ng + h2 + The rotationally equivariant pair-partical representation. + shape: nf x nloc x nnei x 3 + sw + The smooth switch function. shape: nf x nloc x nnei + + """ + # cast the input to internal precsion + extended_coord = extended_coord.to(dtype=self.prec) + + use_three_body = self.use_three_body + nframes, nloc, nnei = nlist.shape + nall = extended_coord.reshape([nframes, -1]).shape[1] // 3 + # nlists + nlist_dict = build_multiple_neighbor_list( + extended_coord.detach(), + nlist, + self.rcut_list, + self.nsel_list, + ) + # repinit + g1_ext = self.type_embedding(extended_atype) + g1_inp = g1_ext[:, :nloc, :] + if self.tebd_input_mode in ["strip"]: + type_embedding = self.type_embedding.get_full_embedding(g1_ext.place) + else: + type_embedding = None + g1, _, _, _, _ = self.repinit( + nlist_dict[ + get_multiple_nlist_key(self.repinit.get_rcut(), self.repinit.get_nsel()) + ], + extended_coord, + extended_atype, + g1_ext, + mapping, + type_embedding, + ) + if use_three_body: + assert self.repinit_three_body is not None + g1_three_body, __, __, __, __ = self.repinit_three_body( + nlist_dict[ + get_multiple_nlist_key( + self.repinit_three_body.get_rcut(), + self.repinit_three_body.get_nsel(), + ) + ], + extended_coord, + extended_atype, + g1_ext, + mapping, + type_embedding, + ) + g1 = paddle.concat([g1, g1_three_body], axis=-1) + # linear to change shape + g1 = self.g1_shape_tranform(g1) + if self.add_tebd_to_repinit_out: + assert self.tebd_transform is not None + g1 = g1 + self.tebd_transform(g1_inp) + # mapping g1 + if comm_dict is None: + assert mapping is not None + mapping_ext = ( + mapping.reshape([nframes, nall]) + .unsqueeze(-1) + .expand([-1, -1, g1.shape[-1]]) + ) + g1_ext = paddle.take_along_axis(g1, mapping_ext, 1) + g1 = g1_ext + # repformer + g1, g2, h2, rot_mat, sw = self.repformers( + nlist_dict[ + get_multiple_nlist_key( + self.repformers.get_rcut(), self.repformers.get_nsel() + ) + ], + extended_coord, + extended_atype, + g1, + mapping, + comm_dict=comm_dict, + ) + if self.concat_output_tebd: + g1 = paddle.concat([g1, g1_inp], axis=-1) + return ( + g1.to(dtype=env.GLOBAL_PD_FLOAT_PRECISION), + rot_mat.to(dtype=env.GLOBAL_PD_FLOAT_PRECISION), + g2.to(dtype=env.GLOBAL_PD_FLOAT_PRECISION), + h2.to(dtype=env.GLOBAL_PD_FLOAT_PRECISION), + sw.to(dtype=env.GLOBAL_PD_FLOAT_PRECISION), + ) + + @classmethod + def update_sel( + cls, + train_data: DeepmdDataSystem, + type_map: Optional[list[str]], + local_jdata: dict, + ) -> tuple[dict, Optional[float]]: + """Update the selection and perform neighbor statistics. + + Parameters + ---------- + train_data : DeepmdDataSystem + data used to do neighbor statistics + type_map : list[str], optional + The name of each type of atoms + local_jdata : dict + The local data refer to the current class + + Returns + ------- + dict + The updated local data + float + The minimum distance between two atoms + """ + local_jdata_cpy = local_jdata.copy() + update_sel = UpdateSel() + min_nbor_dist, repinit_sel = update_sel.update_one_sel( + train_data, + type_map, + local_jdata_cpy["repinit"]["rcut"], + local_jdata_cpy["repinit"]["nsel"], + True, + ) + local_jdata_cpy["repinit"]["nsel"] = repinit_sel[0] + min_nbor_dist, repinit_three_body_sel = update_sel.update_one_sel( + train_data, + type_map, + local_jdata_cpy["repinit"]["three_body_rcut"], + local_jdata_cpy["repinit"]["three_body_sel"], + True, + ) + local_jdata_cpy["repinit"]["three_body_sel"] = repinit_three_body_sel[0] + min_nbor_dist, repformer_sel = update_sel.update_one_sel( + train_data, + type_map, + local_jdata_cpy["repformer"]["rcut"], + local_jdata_cpy["repformer"]["nsel"], + True, + ) + local_jdata_cpy["repformer"]["nsel"] = repformer_sel[0] + return local_jdata_cpy, min_nbor_dist + + def enable_compression( + self, + min_nbor_dist: float, + table_extrapolate: float = 5, + table_stride_1: float = 0.01, + table_stride_2: float = 0.1, + check_frequency: int = -1, + ) -> None: + """Receive the statistics (distance, max_nbor_size and env_mat_range) of the training data. + + Parameters + ---------- + min_nbor_dist + The nearest distance between atoms + table_extrapolate + The scale of model extrapolation + table_stride_1 + The uniform stride of the first table + table_stride_2 + The uniform stride of the second table + check_frequency + The overflow check frequency + """ + # do some checks before the mocel compression process + raise NotImplementedError("enable_compression is not implemented yet") diff --git a/deepmd/pd/model/descriptor/repformer_layer.py b/deepmd/pd/model/descriptor/repformer_layer.py new file mode 100644 index 0000000000..a09c5cbe17 --- /dev/null +++ b/deepmd/pd/model/descriptor/repformer_layer.py @@ -0,0 +1,1484 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +from typing import ( + Optional, + Union, +) + +import paddle +import paddle.nn as nn + +from deepmd.dpmodel.utils.seed import ( + child_seed, +) +from deepmd.pd.model.network.init import ( + constant_, + normal_, +) +from deepmd.pd.model.network.layernorm import ( + LayerNorm, +) +from deepmd.pd.model.network.mlp import ( + MLPLayer, +) +from deepmd.pd.utils import ( + decomp, + env, +) +from deepmd.pd.utils.env import ( + PRECISION_DICT, +) +from deepmd.pd.utils.utils import ( + ActivationFn, + get_generator, + to_numpy_array, + to_paddle_tensor, +) +from deepmd.utils.version import ( + check_version_compatibility, +) + + +def get_residual( + _dim: int, + _scale: float, + _mode: str = "norm", + trainable: bool = True, + precision: str = "float64", + seed: Optional[Union[int, list[int]]] = None, +) -> paddle.Tensor: + r""" + Get residual tensor for one update vector. + + Parameters + ---------- + _dim : int + The dimension of the update vector. + _scale + The initial scale of the residual tensor. See `_mode` for details. + _mode + The mode of residual initialization for the residual tensor. + - "norm" (default): init residual using normal with `_scale` std. + - "const": init residual using element-wise constants of `_scale`. + trainable + Whether the residual tensor is trainable. + precision + The precision of the residual tensor. + seed : int, optional + Random seed for parameter initialization. + """ + random_generator = get_generator(seed) + residual = paddle.create_parameter( + [_dim], + dtype=PRECISION_DICT[precision], + default_initializer=nn.initializer.Constant(0), + ).to(device=env.DEVICE) + residual.stop_gradient = not trainable + if _mode == "norm": + normal_(residual.data, std=_scale, generator=random_generator) + elif _mode == "const": + constant_(residual.data, val=_scale) + else: + raise RuntimeError(f"Unsupported initialization mode '{_mode}'!") + return residual + + +# common ops +def _make_nei_g1( + g1_ext: paddle.Tensor, + nlist: paddle.Tensor, +) -> paddle.Tensor: + """ + Make neighbor-wise atomic invariant rep. + + Parameters + ---------- + g1_ext + Extended atomic invariant rep, with shape nb x nall x ng1. + nlist + Neighbor list, with shape nb x nloc x nnei. + + Returns + ------- + gg1: paddle.Tensor + Neighbor-wise atomic invariant rep, with shape nb x nloc x nnei x ng1. + + """ + # nlist: nb x nloc x nnei + nb, nloc, nnei = nlist.shape + # g1_ext: nb x nall x ng1 + ng1 = g1_ext.shape[-1] + # index: nb x (nloc x nnei) x ng1 + index = nlist.reshape([nb, nloc * nnei]).unsqueeze(-1).expand([-1, -1, ng1]) + # gg1 : nb x (nloc x nnei) x ng1 + gg1 = paddle.take_along_axis(g1_ext, axis=1, indices=index) + # gg1 : nb x nloc x nnei x ng1 + gg1 = gg1.reshape([nb, nloc, nnei, ng1]) + return gg1 + + +def _apply_nlist_mask( + gg: paddle.Tensor, + nlist_mask: paddle.Tensor, +) -> paddle.Tensor: + """ + Apply nlist mask to neighbor-wise rep tensors. + + Parameters + ---------- + gg + Neighbor-wise rep tensors, with shape nf x nloc x nnei x d. + nlist_mask + Neighbor list mask, where zero means no neighbor, with shape nf x nloc x nnei. + """ + # gg: nf x nloc x nnei x d + # msk: nf x nloc x nnei + return gg.masked_fill(~nlist_mask.unsqueeze(-1), 0.0) + + +def _apply_switch(gg: paddle.Tensor, sw: paddle.Tensor) -> paddle.Tensor: + """ + Apply switch function to neighbor-wise rep tensors. + + Parameters + ---------- + gg + Neighbor-wise rep tensors, with shape nf x nloc x nnei x d. + sw + The switch function, which equals 1 within the rcut_smth range, smoothly decays from 1 to 0 between rcut_smth and rcut, + and remains 0 beyond rcut, with shape nf x nloc x nnei. + """ + # gg: nf x nloc x nnei x d + # sw: nf x nloc x nnei + return gg * sw.unsqueeze(-1) + + +class Atten2Map(paddle.nn.Layer): + def __init__( + self, + input_dim: int, + hidden_dim: int, + head_num: int, + has_gate: bool = False, # apply gate to attn map + smooth: bool = True, + attnw_shift: float = 20.0, + precision: str = "float64", + seed: Optional[Union[int, list[int]]] = None, + ): + """Return neighbor-wise multi-head self-attention maps, with gate mechanism.""" + super().__init__() + self.input_dim = input_dim + self.hidden_dim = hidden_dim + self.head_num = head_num + self.mapqk = MLPLayer( + input_dim, + hidden_dim * 2 * head_num, + bias=False, + precision=precision, + seed=seed, + ) + self.has_gate = has_gate + self.smooth = smooth + self.attnw_shift = attnw_shift + self.precision = precision + + def forward( + self, + g2: paddle.Tensor, # nb x nloc x nnei x ng2 + h2: paddle.Tensor, # nb x nloc x nnei x 3 + nlist_mask: paddle.Tensor, # nb x nloc x nnei + sw: paddle.Tensor, # nb x nloc x nnei + ) -> paddle.Tensor: + ( + nb, + nloc, + nnei, + _, + ) = g2.shape + nd, nh = self.hidden_dim, self.head_num + # nb x nloc x nnei x nd x (nh x 2) + g2qk = self.mapqk(g2).reshape([nb, nloc, nnei, nd, nh * 2]) + # nb x nloc x (nh x 2) x nnei x nd + g2qk = paddle.transpose(g2qk, (0, 1, 4, 2, 3)) + # nb x nloc x nh x nnei x nd + g2q, g2k = paddle.split(g2qk, decomp.sec(g2qk.shape[2], nh), axis=2) + # g2q = paddle.nn.functional.normalize(g2q, axis=-1) + # g2k = paddle.nn.functional.normalize(g2k, axis=-1) + # nb x nloc x nh x nnei x nnei + attnw = paddle.matmul(g2q, paddle.transpose(g2k, [0, 1, 2, 4, 3])) / nd**0.5 + if self.has_gate: + gate = paddle.matmul(h2, paddle.transpose(h2, [0, 1, 3, 2])).unsqueeze(-3) + attnw = attnw * gate + # mask the attenmap, nb x nloc x 1 x 1 x nnei + attnw_mask = ~nlist_mask.unsqueeze(2).unsqueeze(2) + # mask the attenmap, nb x nloc x 1 x nnei x 1 + attnw_mask_c = ~nlist_mask.unsqueeze(2).unsqueeze(-1) + if self.smooth: + attnw = (attnw + self.attnw_shift) * sw[:, :, None, :, None] * sw[ + :, :, None, None, : + ] - self.attnw_shift + else: + attnw = attnw.masked_fill( + attnw_mask, + float("-inf"), + ) + attnw = paddle.nn.functional.softmax(attnw, axis=-1) + attnw = attnw.masked_fill( + attnw_mask, + 0.0, + ) + # nb x nloc x nh x nnei x nnei + attnw = attnw.masked_fill( + attnw_mask_c, + 0.0, + ) + if self.smooth: + attnw = attnw * sw[:, :, None, :, None] * sw[:, :, None, None, :] + # nb x nloc x nnei x nnei + h2h2t = paddle.matmul(h2, paddle.transpose(h2, [0, 1, 3, 2])) / 3.0**0.5 + # nb x nloc x nh x nnei x nnei + ret = attnw * h2h2t[:, :, None, :, :] + # ret = paddle.nn.functional.softmax(g2qk, axis=-1) + # nb x nloc x nnei x nnei x nh + ret = paddle.transpose(ret, (0, 1, 3, 4, 2)) + return ret + + def serialize(self) -> dict: + """Serialize the networks to a dict. + + Returns + ------- + dict + The serialized networks. + """ + return { + "@class": "Atten2Map", + "@version": 1, + "input_dim": self.input_dim, + "hidden_dim": self.hidden_dim, + "head_num": self.head_num, + "has_gate": self.has_gate, + "smooth": self.smooth, + "attnw_shift": self.attnw_shift, + "precision": self.precision, + "mapqk": self.mapqk.serialize(), + } + + @classmethod + def deserialize(cls, data: dict) -> "Atten2Map": + """Deserialize the networks from a dict. + + Parameters + ---------- + data : dict + The dict to deserialize from. + """ + data = data.copy() + check_version_compatibility(data.pop("@version"), 1, 1) + data.pop("@class") + mapqk = data.pop("mapqk") + obj = cls(**data) + obj.mapqk = MLPLayer.deserialize(mapqk) + return obj + + +class Atten2MultiHeadApply(paddle.nn.Layer): + def __init__( + self, + input_dim: int, + head_num: int, + precision: str = "float64", + seed: Optional[Union[int, list[int]]] = None, + ) -> None: + super().__init__() + self.input_dim = input_dim + self.head_num = head_num + self.mapv = MLPLayer( + input_dim, + input_dim * head_num, + bias=False, + precision=precision, + seed=child_seed(seed, 0), + ) + self.head_map = MLPLayer( + input_dim * head_num, + input_dim, + precision=precision, + seed=child_seed(seed, 1), + ) + self.precision = precision + + def forward( + self, + AA: paddle.Tensor, # nf x nloc x nnei x nnei x nh + g2: paddle.Tensor, # nf x nloc x nnei x ng2 + ) -> paddle.Tensor: + nf, nloc, nnei, ng2 = g2.shape + nh = self.head_num + # nf x nloc x nnei x ng2 x nh + g2v = self.mapv(g2).reshape([nf, nloc, nnei, ng2, nh]) + # nf x nloc x nh x nnei x ng2 + g2v = paddle.transpose(g2v, (0, 1, 4, 2, 3)) + # g2v = paddle.nn.functional.normalize(g2v, axis=-1) + # nf x nloc x nh x nnei x nnei + AA = paddle.transpose(AA, (0, 1, 4, 2, 3)) + # nf x nloc x nh x nnei x ng2 + ret = paddle.matmul(AA, g2v) + # nf x nloc x nnei x ng2 x nh + ret = paddle.transpose(ret, (0, 1, 3, 4, 2)).reshape( + [nf, nloc, nnei, (ng2 * nh)] + ) + # nf x nloc x nnei x ng2 + return self.head_map(ret) + + def serialize(self) -> dict: + """Serialize the networks to a dict. + + Returns + ------- + dict + The serialized networks. + """ + return { + "@class": "Atten2MultiHeadApply", + "@version": 1, + "input_dim": self.input_dim, + "head_num": self.head_num, + "precision": self.precision, + "mapv": self.mapv.serialize(), + "head_map": self.head_map.serialize(), + } + + @classmethod + def deserialize(cls, data: dict) -> "Atten2MultiHeadApply": + """Deserialize the networks from a dict. + + Parameters + ---------- + data : dict + The dict to deserialize from. + """ + data = data.copy() + check_version_compatibility(data.pop("@version"), 1, 1) + data.pop("@class") + mapv = data.pop("mapv") + head_map = data.pop("head_map") + obj = cls(**data) + obj.mapv = MLPLayer.deserialize(mapv) + obj.head_map = MLPLayer.deserialize(head_map) + return obj + + +class Atten2EquiVarApply(paddle.nn.Layer): + def __init__( + self, + input_dim: int, + head_num: int, + precision: str = "float64", + seed: Optional[Union[int, list[int]]] = None, + ) -> None: + super().__init__() + self.input_dim = input_dim + self.head_num = head_num + self.head_map = MLPLayer( + head_num, 1, bias=False, precision=precision, seed=seed + ) + self.precision = precision + + def forward( + self, + AA: paddle.Tensor, # nf x nloc x nnei x nnei x nh + h2: paddle.Tensor, # nf x nloc x nnei x 3 + ) -> paddle.Tensor: + nf, nloc, nnei, _ = h2.shape + nh = self.head_num + # nf x nloc x nh x nnei x nnei + AA = paddle.transpose(AA, (0, 1, 4, 2, 3)) + h2m = paddle.unsqueeze(h2, axis=2) + # nf x nloc x nh x nnei x 3 + h2m = paddle.tile(h2m, [1, 1, nh, 1, 1]) + # nf x nloc x nh x nnei x 3 + ret = paddle.matmul(AA, h2m) + # nf x nloc x nnei x 3 x nh + ret = paddle.transpose(ret, (0, 1, 3, 4, 2)).reshape([nf, nloc, nnei, 3, nh]) + # nf x nloc x nnei x 3 + return paddle.squeeze(self.head_map(ret), axis=-1) + + def serialize(self) -> dict: + """Serialize the networks to a dict. + + Returns + ------- + dict + The serialized networks. + """ + return { + "@class": "Atten2EquiVarApply", + "@version": 1, + "input_dim": self.input_dim, + "head_num": self.head_num, + "precision": self.precision, + "head_map": self.head_map.serialize(), + } + + @classmethod + def deserialize(cls, data: dict) -> "Atten2EquiVarApply": + """Deserialize the networks from a dict. + + Parameters + ---------- + data : dict + The dict to deserialize from. + """ + data = data.copy() + check_version_compatibility(data.pop("@version"), 1, 1) + data.pop("@class") + head_map = data.pop("head_map") + obj = cls(**data) + obj.head_map = MLPLayer.deserialize(head_map) + return obj + + +class LocalAtten(paddle.nn.Layer): + def __init__( + self, + input_dim: int, + hidden_dim: int, + head_num: int, + smooth: bool = True, + attnw_shift: float = 20.0, + precision: str = "float64", + seed: Optional[Union[int, list[int]]] = None, + ) -> None: + super().__init__() + self.input_dim = input_dim + self.hidden_dim = hidden_dim + self.head_num = head_num + self.mapq = MLPLayer( + input_dim, + hidden_dim * 1 * head_num, + bias=False, + precision=precision, + seed=child_seed(seed, 0), + ) + self.mapkv = MLPLayer( + input_dim, + (hidden_dim + input_dim) * head_num, + bias=False, + precision=precision, + seed=child_seed(seed, 1), + ) + self.head_map = MLPLayer( + input_dim * head_num, + input_dim, + precision=precision, + seed=child_seed(seed, 2), + ) + self.smooth = smooth + self.attnw_shift = attnw_shift + self.precision = precision + + def forward( + self, + g1: paddle.Tensor, # nb x nloc x ng1 + gg1: paddle.Tensor, # nb x nloc x nnei x ng1 + nlist_mask: paddle.Tensor, # nb x nloc x nnei + sw: paddle.Tensor, # nb x nloc x nnei + ) -> paddle.Tensor: + nb, nloc, nnei = nlist_mask.shape + ni, nd, nh = self.input_dim, self.hidden_dim, self.head_num + assert ni == g1.shape[-1] + assert ni == gg1.shape[-1] + # nb x nloc x nd x nh + g1q = self.mapq(g1).reshape([nb, nloc, nd, nh]) + # nb x nloc x nh x nd + g1q = paddle.transpose(g1q, (0, 1, 3, 2)) + # nb x nloc x nnei x (nd+ni) x nh + gg1kv = self.mapkv(gg1).reshape([nb, nloc, nnei, nd + ni, nh]) + gg1kv = paddle.transpose(gg1kv, (0, 1, 4, 2, 3)) + # nb x nloc x nh x nnei x nd, nb x nloc x nh x nnei x ng1 + gg1k, gg1v = paddle.split(gg1kv, [nd, ni], axis=-1) + + # nb x nloc x nh x 1 x nnei + attnw = ( + paddle.matmul(g1q.unsqueeze(-2), paddle.transpose(gg1k, [0, 1, 2, 4, 3])) + / nd**0.5 + ) + # nb x nloc x nh x nnei + attnw = attnw.squeeze(-2) + # mask the attenmap, nb x nloc x 1 x nnei + attnw_mask = ~nlist_mask.unsqueeze(-2) + # nb x nloc x nh x nnei + if self.smooth: + attnw = (attnw + self.attnw_shift) * sw.unsqueeze(-2) - self.attnw_shift + else: + attnw = attnw.masked_fill( + attnw_mask, + float("-inf"), + ) + attnw = paddle.nn.functional.softmax(attnw, axis=-1) + attnw = attnw.masked_fill( + attnw_mask, + 0.0, + ) + if self.smooth: + attnw = attnw * sw.unsqueeze(-2) + + # nb x nloc x nh x ng1 + ret = ( + paddle.matmul(attnw.unsqueeze(-2), gg1v) + .squeeze(-2) + .reshape([nb, nloc, nh * ni]) + ) + # nb x nloc x ng1 + ret = self.head_map(ret) + return ret + + def serialize(self) -> dict: + """Serialize the networks to a dict. + + Returns + ------- + dict + The serialized networks. + """ + return { + "@class": "LocalAtten", + "@version": 1, + "input_dim": self.input_dim, + "hidden_dim": self.hidden_dim, + "head_num": self.head_num, + "smooth": self.smooth, + "attnw_shift": self.attnw_shift, + "precision": self.precision, + "mapq": self.mapq.serialize(), + "mapkv": self.mapkv.serialize(), + "head_map": self.head_map.serialize(), + } + + @classmethod + def deserialize(cls, data: dict) -> "LocalAtten": + """Deserialize the networks from a dict. + + Parameters + ---------- + data : dict + The dict to deserialize from. + """ + data = data.copy() + check_version_compatibility(data.pop("@version"), 1, 1) + data.pop("@class") + mapq = data.pop("mapq") + mapkv = data.pop("mapkv") + head_map = data.pop("head_map") + obj = cls(**data) + obj.mapq = MLPLayer.deserialize(mapq) + obj.mapkv = MLPLayer.deserialize(mapkv) + obj.head_map = MLPLayer.deserialize(head_map) + return obj + + +class RepformerLayer(paddle.nn.Layer): + def __init__( + self, + rcut, + rcut_smth, + sel: int, + ntypes: int, + g1_dim=128, + g2_dim=16, + axis_neuron: int = 4, + update_chnnl_2: bool = True, + update_g1_has_conv: bool = True, + update_g1_has_drrd: bool = True, + update_g1_has_grrg: bool = True, + update_g1_has_attn: bool = True, + update_g2_has_g1g1: bool = True, + update_g2_has_attn: bool = True, + update_h2: bool = False, + attn1_hidden: int = 64, + attn1_nhead: int = 4, + attn2_hidden: int = 16, + attn2_nhead: int = 4, + attn2_has_gate: bool = False, + activation_function: str = "tanh", + update_style: str = "res_avg", + update_residual: float = 0.001, + update_residual_init: str = "norm", + smooth: bool = True, + precision: str = "float64", + trainable_ln: bool = True, + ln_eps: Optional[float] = 1e-5, + use_sqrt_nnei: bool = True, + g1_out_conv: bool = True, + g1_out_mlp: bool = True, + seed: Optional[Union[int, list[int]]] = None, + ) -> None: + super().__init__() + self.epsilon = 1e-4 # protection of 1./nnei + self.rcut = float(rcut) + self.rcut_smth = float(rcut_smth) + self.ntypes = ntypes + sel = [sel] if isinstance(sel, int) else sel + self.nnei = sum(sel) + assert len(sel) == 1 + self.sel = sel + self.sec = self.sel + self.axis_neuron = axis_neuron + self.activation_function = activation_function + self.act = ActivationFn(activation_function) + self.update_g1_has_grrg = update_g1_has_grrg + self.update_g1_has_drrd = update_g1_has_drrd + self.update_g1_has_conv = update_g1_has_conv + self.update_g1_has_attn = update_g1_has_attn + self.update_chnnl_2 = update_chnnl_2 + self.update_g2_has_g1g1 = update_g2_has_g1g1 if self.update_chnnl_2 else False + self.update_g2_has_attn = update_g2_has_attn if self.update_chnnl_2 else False + self.update_h2 = update_h2 if self.update_chnnl_2 else False + del update_g2_has_g1g1, update_g2_has_attn, update_h2 + self.attn1_hidden = attn1_hidden + self.attn1_nhead = attn1_nhead + self.attn2_hidden = attn2_hidden + self.attn2_nhead = attn2_nhead + self.attn2_has_gate = attn2_has_gate + self.update_style = update_style + self.update_residual = update_residual + self.update_residual_init = update_residual_init + self.smooth = smooth + self.g1_dim = g1_dim + self.g2_dim = g2_dim + self.trainable_ln = trainable_ln + self.ln_eps = ln_eps + self.precision = precision + self.seed = seed + self.use_sqrt_nnei = use_sqrt_nnei + self.g1_out_conv = g1_out_conv + self.g1_out_mlp = g1_out_mlp + + assert update_residual_init in [ + "norm", + "const", + ], "'update_residual_init' only support 'norm' or 'const'!" + self.update_residual = update_residual + self.update_residual_init = update_residual_init + self.g1_residual = [] + self.g2_residual = [] + self.h2_residual = [] + + if self.update_style == "res_residual": + self.g1_residual.append( + get_residual( + g1_dim, + self.update_residual, + self.update_residual_init, + precision=precision, + seed=child_seed(seed, 0), + ) + ) + + g1_in_dim = self.cal_1_dim(g1_dim, g2_dim, self.axis_neuron) + self.linear1 = MLPLayer( + g1_in_dim, + g1_dim, + precision=precision, + seed=child_seed(seed, 1), + ) + self.linear2 = None + self.proj_g1g2 = None + self.proj_g1g1g2 = None + self.attn2g_map = None + self.attn2_mh_apply = None + self.attn2_lm = None + self.attn2_ev_apply = None + self.loc_attn = None + + if self.update_chnnl_2: + self.linear2 = MLPLayer( + g2_dim, + g2_dim, + precision=precision, + seed=child_seed(seed, 2), + ) + if self.update_style == "res_residual": + self.g2_residual.append( + get_residual( + g2_dim, + self.update_residual, + self.update_residual_init, + precision=precision, + seed=child_seed(seed, 3), + ) + ) + if self.g1_out_mlp: + self.g1_self_mlp = MLPLayer( + g1_dim, + g1_dim, + precision=precision, + seed=child_seed(seed, 15), + ) + if self.update_style == "res_residual": + self.g1_residual.append( + get_residual( + g1_dim, + self.update_residual, + self.update_residual_init, + precision=precision, + seed=child_seed(seed, 16), + ) + ) + else: + self.g1_self_mlp = None + if self.update_g1_has_conv: + if not self.g1_out_conv: + self.proj_g1g2 = MLPLayer( + g1_dim, + g2_dim, + bias=False, + precision=precision, + seed=child_seed(seed, 4), + ) + else: + self.proj_g1g2 = MLPLayer( + g2_dim, + g1_dim, + bias=False, + precision=precision, + seed=child_seed(seed, 4), + ) + if self.update_style == "res_residual": + self.g1_residual.append( + get_residual( + g1_dim, + self.update_residual, + self.update_residual_init, + precision=precision, + seed=child_seed(seed, 17), + ) + ) + if self.update_g2_has_g1g1: + self.proj_g1g1g2 = MLPLayer( + g1_dim, + g2_dim, + bias=False, + precision=precision, + seed=child_seed(seed, 5), + ) + if self.update_style == "res_residual": + self.g2_residual.append( + get_residual( + g2_dim, + self.update_residual, + self.update_residual_init, + precision=precision, + seed=child_seed(seed, 6), + ) + ) + if self.update_g2_has_attn or self.update_h2: + self.attn2g_map = Atten2Map( + g2_dim, + attn2_hidden, + attn2_nhead, + attn2_has_gate, + self.smooth, + precision=precision, + seed=child_seed(seed, 7), + ) + if self.update_g2_has_attn: + self.attn2_mh_apply = Atten2MultiHeadApply( + g2_dim, attn2_nhead, precision=precision, seed=child_seed(seed, 8) + ) + self.attn2_lm = LayerNorm( + g2_dim, + eps=ln_eps, + trainable=trainable_ln, + precision=precision, + seed=child_seed(seed, 9), + ) + if self.update_style == "res_residual": + self.g2_residual.append( + get_residual( + g2_dim, + self.update_residual, + self.update_residual_init, + precision=precision, + seed=child_seed(seed, 10), + ) + ) + + if self.update_h2: + self.attn2_ev_apply = Atten2EquiVarApply( + g2_dim, attn2_nhead, precision=precision, seed=child_seed(seed, 11) + ) + if self.update_style == "res_residual": + self.h2_residual.append( + get_residual( + 1, + self.update_residual, + self.update_residual_init, + precision=precision, + seed=child_seed(seed, 12), + ) + ) + if self.update_g1_has_attn: + self.loc_attn = LocalAtten( + g1_dim, + attn1_hidden, + attn1_nhead, + self.smooth, + precision=precision, + seed=child_seed(seed, 13), + ) + if self.update_style == "res_residual": + self.g1_residual.append( + get_residual( + g1_dim, + self.update_residual, + self.update_residual_init, + precision=precision, + seed=child_seed(seed, 14), + ) + ) + + self.g1_residual = nn.ParameterList(self.g1_residual) + self.g2_residual = nn.ParameterList(self.g2_residual) + self.h2_residual = nn.ParameterList(self.h2_residual) + + def cal_1_dim(self, g1d: int, g2d: int, ax: int) -> int: + ret = g1d if not self.g1_out_mlp else 0 + if self.update_g1_has_grrg: + ret += g2d * ax + if self.update_g1_has_drrd: + ret += g1d * ax + if self.update_g1_has_conv and not self.g1_out_conv: + ret += g2d + return ret + + def _update_h2( + self, + h2: paddle.Tensor, + attn: paddle.Tensor, + ) -> paddle.Tensor: + """ + Calculate the attention weights update for pair-wise equivariant rep. + + Parameters + ---------- + h2 + Pair-wise equivariant rep tensors, with shape nf x nloc x nnei x 3. + attn + Attention weights from g2 attention, with shape nf x nloc x nnei x nnei x nh2. + """ + assert self.attn2_ev_apply is not None + # nf x nloc x nnei x nh2 + h2_1 = self.attn2_ev_apply(attn, h2) + return h2_1 + + def _update_g1_conv( + self, + gg1: paddle.Tensor, + g2: paddle.Tensor, + nlist_mask: paddle.Tensor, + sw: paddle.Tensor, + ) -> paddle.Tensor: + """ + Calculate the convolution update for atomic invariant rep. + + Parameters + ---------- + gg1 + Neighbor-wise atomic invariant rep, with shape nb x nloc x nnei x ng1. + g2 + Pair invariant rep, with shape nb x nloc x nnei x ng2. + nlist_mask + Neighbor list mask, where zero means no neighbor, with shape nb x nloc x nnei. + sw + The switch function, which equals 1 within the rcut_smth range, smoothly decays from 1 to 0 between rcut_smth and rcut, + and remains 0 beyond rcut, with shape nb x nloc x nnei. + """ + assert self.proj_g1g2 is not None + nb, nloc, nnei, _ = g2.shape + ng1 = gg1.shape[-1] + ng2 = g2.shape[-1] + if not self.g1_out_conv: + # gg1 : nb x nloc x nnei x ng2 + gg1 = self.proj_g1g2(gg1).reshape([nb, nloc, nnei, ng2]) + else: + gg1 = gg1.reshape([nb, nloc, nnei, ng1]) + # nb x nloc x nnei x ng2/ng1 + gg1 = _apply_nlist_mask(gg1, nlist_mask) + if not self.smooth: + # normalized by number of neighbors, not smooth + # nb x nloc x 1 + # must use astype here to convert bool to float, otherwise there will be numerical difference from numpy + invnnei = 1.0 / ( + self.epsilon + paddle.sum(nlist_mask.astype(gg1.dtype), axis=-1) + ).unsqueeze(-1) + else: + gg1 = _apply_switch(gg1, sw) + invnnei = (1.0 / float(nnei)) * paddle.ones( + (nb, nloc, 1), dtype=gg1.dtype + ).to(device=gg1.place) + if not self.g1_out_conv: + # nb x nloc x ng2 + g1_11 = paddle.sum(g2 * gg1, axis=2) * invnnei + else: + g2 = self.proj_g1g2(g2).reshape([nb, nloc, nnei, ng1]) + # nb x nloc x ng1 + g1_11 = paddle.sum(g2 * gg1, axis=2) * invnnei + return g1_11 + + @staticmethod + def _cal_hg( + g2: paddle.Tensor, + h2: paddle.Tensor, + nlist_mask: paddle.Tensor, + sw: paddle.Tensor, + smooth: bool = True, + epsilon: float = 1e-4, + use_sqrt_nnei: bool = True, + ) -> paddle.Tensor: + """ + Calculate the transposed rotation matrix. + + Parameters + ---------- + g2 + Neighbor-wise/Pair-wise invariant rep tensors, with shape nb x nloc x nnei x ng2. + h2 + Neighbor-wise/Pair-wise equivariant rep tensors, with shape nb x nloc x nnei x 3. + nlist_mask + Neighbor list mask, where zero means no neighbor, with shape nb x nloc x nnei. + sw + The switch function, which equals 1 within the rcut_smth range, smoothly decays from 1 to 0 between rcut_smth and rcut, + and remains 0 beyond rcut, with shape nb x nloc x nnei. + smooth + Whether to use smoothness in processes such as attention weights calculation. + epsilon + Protection of 1./nnei. + + Returns + ------- + hg + The transposed rotation matrix, with shape nb x nloc x 3 x ng2. + """ + # g2: nb x nloc x nnei x ng2 + # h2: nb x nloc x nnei x 3 + # msk: nb x nloc x nnei + nb, nloc, nnei, _ = g2.shape + ng2 = g2.shape[-1] + # nb x nloc x nnei x ng2 + g2 = _apply_nlist_mask(g2, nlist_mask) + if not smooth: + # nb x nloc + # must use astype here to convert bool to float, otherwise there will be numerical difference from numpy + if not use_sqrt_nnei: + invnnei = 1.0 / ( + epsilon + paddle.sum(nlist_mask.astype(g2.dtype), axis=-1) + ) + else: + invnnei = 1.0 / ( + epsilon + + paddle.sqrt(paddle.sum(nlist_mask.astype(g2.dtype), axis=-1)) + ) + # nb x nloc x 1 x 1 + invnnei = invnnei.unsqueeze(-1).unsqueeze(-1) + else: + g2 = _apply_switch(g2, sw) + if not use_sqrt_nnei: + invnnei = (1.0 / float(nnei)) * paddle.ones( + (nb, nloc, 1, 1), dtype=g2.dtype + ).to(device=g2.place) + else: + invnnei = paddle.rsqrt( + float(nnei) + * paddle.ones((nb, nloc, 1, 1), dtype=g2.dtype).to(device=g2.place) + ) + # nb x nloc x 3 x ng2 + h2g2 = paddle.matmul(paddle.transpose(h2, [0, 1, 3, 2]), g2) * invnnei + return h2g2 + + @staticmethod + def _cal_grrg(h2g2: paddle.Tensor, axis_neuron: int) -> paddle.Tensor: + """ + Calculate the atomic invariant rep. + + Parameters + ---------- + h2g2 + The transposed rotation matrix, with shape nb x nloc x 3 x ng2. + axis_neuron + Size of the submatrix. + + Returns + ------- + grrg + Atomic invariant rep, with shape nb x nloc x (axis_neuron x ng2) + """ + # nb x nloc x 3 x ng2 + nb, nloc, _, ng2 = h2g2.shape + # nb x nloc x 3 x axis + # h2g2m = paddle.split(h2g2, decomp.sec(h2g2.shape[-1], axis_neuron), axis=-1)[0] + h2g2m = h2g2[..., :axis_neuron] # use slice instead of split + # nb x nloc x axis x ng2 + g1_13 = paddle.matmul(paddle.transpose(h2g2m, [0, 1, 3, 2]), h2g2) / (3.0**1) + # nb x nloc x (axisxng2) + g1_13 = g1_13.reshape([nb, nloc, axis_neuron * ng2]) + return g1_13 + + def symmetrization_op( + self, + g2: paddle.Tensor, + h2: paddle.Tensor, + nlist_mask: paddle.Tensor, + sw: paddle.Tensor, + axis_neuron: int, + smooth: bool = True, + epsilon: float = 1e-4, + ) -> paddle.Tensor: + """ + Symmetrization operator to obtain atomic invariant rep. + + Parameters + ---------- + g2 + Neighbor-wise/Pair-wise invariant rep tensors, with shape nb x nloc x nnei x ng2. + h2 + Neighbor-wise/Pair-wise equivariant rep tensors, with shape nb x nloc x nnei x 3. + nlist_mask + Neighbor list mask, where zero means no neighbor, with shape nb x nloc x nnei. + sw + The switch function, which equals 1 within the rcut_smth range, smoothly decays from 1 to 0 between rcut_smth and rcut, + and remains 0 beyond rcut, with shape nb x nloc x nnei. + axis_neuron + Size of the submatrix. + smooth + Whether to use smoothness in processes such as attention weights calculation. + epsilon + Protection of 1./nnei. + + Returns + ------- + grrg + Atomic invariant rep, with shape nb x nloc x (axis_neuron x ng2) + """ + # g2: nb x nloc x nnei x ng2 + # h2: nb x nloc x nnei x 3 + # msk: nb x nloc x nnei + nb, nloc, nnei, _ = g2.shape + # nb x nloc x 3 x ng2 + h2g2 = self._cal_hg( + g2, + h2, + nlist_mask, + sw, + smooth=smooth, + epsilon=epsilon, + use_sqrt_nnei=self.use_sqrt_nnei, + ) + # nb x nloc x (axisxng2) + g1_13 = self._cal_grrg(h2g2, axis_neuron) + return g1_13 + + def _update_g2_g1g1( + self, + g1: paddle.Tensor, # nb x nloc x ng1 + gg1: paddle.Tensor, # nb x nloc x nnei x ng1 + nlist_mask: paddle.Tensor, # nb x nloc x nnei + sw: paddle.Tensor, # nb x nloc x nnei + ) -> paddle.Tensor: + """ + Update the g2 using element-wise dot g1_i * g1_j. + + Parameters + ---------- + g1 + Atomic invariant rep, with shape nb x nloc x ng1. + gg1 + Neighbor-wise atomic invariant rep, with shape nb x nloc x nnei x ng1. + nlist_mask + Neighbor list mask, where zero means no neighbor, with shape nb x nloc x nnei. + sw + The switch function, which equals 1 within the rcut_smth range, smoothly decays from 1 to 0 between rcut_smth and rcut, + and remains 0 beyond rcut, with shape nb x nloc x nnei. + """ + ret = g1.unsqueeze(-2) * gg1 + # nb x nloc x nnei x ng1 + ret = _apply_nlist_mask(ret, nlist_mask) + if self.smooth: + ret = _apply_switch(ret, sw) + return ret + + def forward( + self, + g1_ext: paddle.Tensor, # nf x nall x ng1 + g2: paddle.Tensor, # nf x nloc x nnei x ng2 + h2: paddle.Tensor, # nf x nloc x nnei x 3 + nlist: paddle.Tensor, # nf x nloc x nnei + nlist_mask: paddle.Tensor, # nf x nloc x nnei + sw: paddle.Tensor, # switch func, nf x nloc x nnei + ): + """ + Parameters + ---------- + g1_ext : nf x nall x ng1 extended single-atom channel + g2 : nf x nloc x nnei x ng2 pair-atom channel, invariant + h2 : nf x nloc x nnei x 3 pair-atom channel, equivariant + nlist : nf x nloc x nnei neighbor list (padded neis are set to 0) + nlist_mask : nf x nloc x nnei masks of the neighbor list. real nei 1 otherwise 0 + sw : nf x nloc x nnei switch function + + Returns + ------- + g1: nf x nloc x ng1 updated single-atom channel + g2: nf x nloc x nnei x ng2 updated pair-atom channel, invariant + h2: nf x nloc x nnei x 3 updated pair-atom channel, equivariant + """ + cal_gg1 = ( + self.update_g1_has_drrd + or self.update_g1_has_conv + or self.update_g1_has_attn + or self.update_g2_has_g1g1 + ) + + nb, nloc, nnei, _ = g2.shape + nall = g1_ext.shape[1] + g1, _ = paddle.split(g1_ext, [nloc, nall - nloc], axis=1) + if paddle.in_dynamic_mode(): + assert [nb, nloc] == g1.shape[:2] + if paddle.in_dynamic_mode(): + assert [nb, nloc, nnei] == h2.shape[:3] + + g2_update: list[paddle.Tensor] = [g2] + h2_update: list[paddle.Tensor] = [h2] + g1_update: list[paddle.Tensor] = [g1] + g1_mlp: list[paddle.Tensor] = [g1] if not self.g1_out_mlp else [] + if self.g1_out_mlp: + if paddle.in_dynamic_mode(): + assert self.g1_self_mlp is not None + g1_self_mlp = self.act(self.g1_self_mlp(g1)) + g1_update.append(g1_self_mlp) + + if cal_gg1: + gg1 = _make_nei_g1(g1_ext, nlist) + else: + gg1 = None + + if self.update_chnnl_2: + # mlp(g2) + if paddle.in_dynamic_mode(): + assert self.linear2 is not None + # nb x nloc x nnei x ng2 + g2_1 = self.act(self.linear2(g2)) + g2_update.append(g2_1) + + if self.update_g2_has_g1g1: + # linear(g1_i * g1_j) + if paddle.in_dynamic_mode(): + assert gg1 is not None + if paddle.in_dynamic_mode(): + assert self.proj_g1g1g2 is not None + g2_update.append( + self.proj_g1g1g2(self._update_g2_g1g1(g1, gg1, nlist_mask, sw)) + ) + + if self.update_g2_has_attn or self.update_h2: + # gated_attention(g2, h2) + if paddle.in_dynamic_mode(): + assert self.attn2g_map is not None + # nb x nloc x nnei x nnei x nh + AAg = self.attn2g_map(g2, h2, nlist_mask, sw) + + if self.update_g2_has_attn: + if paddle.in_dynamic_mode(): + assert self.attn2_mh_apply is not None + if paddle.in_dynamic_mode(): + assert self.attn2_lm is not None + # nb x nloc x nnei x ng2 + g2_2 = self.attn2_mh_apply(AAg, g2) + g2_2 = self.attn2_lm(g2_2) + g2_update.append(g2_2) + + if self.update_h2: + # linear_head(attention_weights * h2) + h2_update.append(self._update_h2(h2, AAg)) + + if self.update_g1_has_conv: + if paddle.in_dynamic_mode(): + assert gg1 is not None + g1_conv = self._update_g1_conv(gg1, g2, nlist_mask, sw) + if not self.g1_out_conv: + g1_mlp.append(g1_conv) + else: + g1_update.append(g1_conv) + + if self.update_g1_has_grrg: + g1_mlp.append( + self.symmetrization_op( + g2, + h2, + nlist_mask, + sw, + self.axis_neuron, + smooth=self.smooth, + epsilon=self.epsilon, + ) + ) + + if self.update_g1_has_drrd: + if paddle.in_dynamic_mode(): + assert gg1 is not None + g1_mlp.append( + self.symmetrization_op( + gg1, + h2, + nlist_mask, + sw, + self.axis_neuron, + smooth=self.smooth, + epsilon=self.epsilon, + ) + ) + + # nb x nloc x [ng1+ng2+(axisxng2)+(axisxng1)] + # conv grrg drrd + g1_1 = self.act(self.linear1(paddle.concat(g1_mlp, axis=-1))) + g1_update.append(g1_1) + + if self.update_g1_has_attn: + assert gg1 is not None + assert self.loc_attn is not None + g1_update.append(self.loc_attn(g1, gg1, nlist_mask, sw)) + + # update + if self.update_chnnl_2: + g2_new = self.list_update(g2_update, "g2") + h2_new = self.list_update(h2_update, "h2") + else: + g2_new, h2_new = g2, h2 + g1_new = self.list_update(g1_update, "g1") + return g1_new, g2_new, h2_new + + def list_update_res_avg( + self, + update_list: list[paddle.Tensor], + ) -> paddle.Tensor: + nitem = len(update_list) + uu = update_list[0] + for ii in range(1, nitem): + uu = uu + update_list[ii] + return uu / (float(nitem) ** 0.5) + + def list_update_res_incr(self, update_list: list[paddle.Tensor]) -> paddle.Tensor: + nitem = len(update_list) + uu = update_list[0] + scale = 1.0 / (float(nitem - 1) ** 0.5) if nitem > 1 else 0.0 + for ii in range(1, nitem): + uu = uu + scale * update_list[ii] + return uu + + def list_update_res_residual( + self, update_list: list[paddle.Tensor], update_name: str = "g1" + ) -> paddle.Tensor: + nitem = len(update_list) + uu = update_list[0] + # make jit happy + if update_name == "g1": + for ii, vv in enumerate(self.g1_residual): + uu = uu + vv * update_list[ii + 1] + elif update_name == "g2": + for ii, vv in enumerate(self.g2_residual): + uu = uu + vv * update_list[ii + 1] + elif update_name == "h2": + for ii, vv in enumerate(self.h2_residual): + uu = uu + vv * update_list[ii + 1] + else: + raise NotImplementedError + return uu + + def list_update( + self, update_list: list[paddle.Tensor], update_name: str = "g1" + ) -> paddle.Tensor: + if self.update_style == "res_avg": + return self.list_update_res_avg(update_list) + elif self.update_style == "res_incr": + return self.list_update_res_incr(update_list) + elif self.update_style == "res_residual": + return self.list_update_res_residual(update_list, update_name=update_name) + else: + raise RuntimeError(f"unknown update style {self.update_style}") + + def serialize(self) -> dict: + """Serialize the networks to a dict. + + Returns + ------- + dict + The serialized networks. + """ + data = { + "@class": "RepformerLayer", + "@version": 2, + "rcut": self.rcut, + "rcut_smth": self.rcut_smth, + "sel": self.sel, + "ntypes": self.ntypes, + "g1_dim": self.g1_dim, + "g2_dim": self.g2_dim, + "axis_neuron": self.axis_neuron, + "update_chnnl_2": self.update_chnnl_2, + "update_g1_has_conv": self.update_g1_has_conv, + "update_g1_has_drrd": self.update_g1_has_drrd, + "update_g1_has_grrg": self.update_g1_has_grrg, + "update_g1_has_attn": self.update_g1_has_attn, + "update_g2_has_g1g1": self.update_g2_has_g1g1, + "update_g2_has_attn": self.update_g2_has_attn, + "update_h2": self.update_h2, + "attn1_hidden": self.attn1_hidden, + "attn1_nhead": self.attn1_nhead, + "attn2_hidden": self.attn2_hidden, + "attn2_nhead": self.attn2_nhead, + "attn2_has_gate": self.attn2_has_gate, + "activation_function": self.activation_function, + "update_style": self.update_style, + "smooth": self.smooth, + "precision": self.precision, + "trainable_ln": self.trainable_ln, + "use_sqrt_nnei": self.use_sqrt_nnei, + "g1_out_conv": self.g1_out_conv, + "g1_out_mlp": self.g1_out_mlp, + "ln_eps": self.ln_eps, + "linear1": self.linear1.serialize(), + } + if self.update_chnnl_2: + data.update( + { + "linear2": self.linear2.serialize(), + } + ) + if self.update_g1_has_conv: + data.update( + { + "proj_g1g2": self.proj_g1g2.serialize(), + } + ) + if self.update_g2_has_g1g1: + data.update( + { + "proj_g1g1g2": self.proj_g1g1g2.serialize(), + } + ) + if self.update_g2_has_attn or self.update_h2: + data.update( + { + "attn2g_map": self.attn2g_map.serialize(), + } + ) + if self.update_g2_has_attn: + data.update( + { + "attn2_mh_apply": self.attn2_mh_apply.serialize(), + "attn2_lm": self.attn2_lm.serialize(), + } + ) + + if self.update_h2: + data.update( + { + "attn2_ev_apply": self.attn2_ev_apply.serialize(), + } + ) + if self.update_g1_has_attn: + data.update( + { + "loc_attn": self.loc_attn.serialize(), + } + ) + if self.g1_out_mlp: + data.update( + { + "g1_self_mlp": self.g1_self_mlp.serialize(), + } + ) + if self.update_style == "res_residual": + data.update( + { + "@variables": { + "g1_residual": [to_numpy_array(t) for t in self.g1_residual], + "g2_residual": [to_numpy_array(t) for t in self.g2_residual], + "h2_residual": [to_numpy_array(t) for t in self.h2_residual], + } + } + ) + return data + + @classmethod + def deserialize(cls, data: dict) -> "RepformerLayer": + """Deserialize the networks from a dict. + + Parameters + ---------- + data : dict + The dict to deserialize from. + """ + data = data.copy() + check_version_compatibility(data.pop("@version"), 2, 1) + data.pop("@class") + linear1 = data.pop("linear1") + update_chnnl_2 = data["update_chnnl_2"] + update_g1_has_conv = data["update_g1_has_conv"] + update_g2_has_g1g1 = data["update_g2_has_g1g1"] + update_g2_has_attn = data["update_g2_has_attn"] + update_h2 = data["update_h2"] + update_g1_has_attn = data["update_g1_has_attn"] + update_style = data["update_style"] + g1_out_mlp = data["g1_out_mlp"] + + linear2 = data.pop("linear2", None) + proj_g1g2 = data.pop("proj_g1g2", None) + proj_g1g1g2 = data.pop("proj_g1g1g2", None) + attn2g_map = data.pop("attn2g_map", None) + attn2_mh_apply = data.pop("attn2_mh_apply", None) + attn2_lm = data.pop("attn2_lm", None) + attn2_ev_apply = data.pop("attn2_ev_apply", None) + loc_attn = data.pop("loc_attn", None) + g1_self_mlp = data.pop("g1_self_mlp", None) + variables = data.pop("@variables", {}) + g1_residual = variables.get("g1_residual", data.pop("g1_residual", [])) + g2_residual = variables.get("g2_residual", data.pop("g2_residual", [])) + h2_residual = variables.get("h2_residual", data.pop("h2_residual", [])) + + obj = cls(**data) + obj.linear1 = MLPLayer.deserialize(linear1) + if update_chnnl_2: + assert isinstance(linear2, dict) + obj.linear2 = MLPLayer.deserialize(linear2) + if update_g1_has_conv: + assert isinstance(proj_g1g2, dict) + obj.proj_g1g2 = MLPLayer.deserialize(proj_g1g2) + if update_g2_has_g1g1: + assert isinstance(proj_g1g1g2, dict) + obj.proj_g1g1g2 = MLPLayer.deserialize(proj_g1g1g2) + if update_g2_has_attn or update_h2: + assert isinstance(attn2g_map, dict) + obj.attn2g_map = Atten2Map.deserialize(attn2g_map) + if update_g2_has_attn: + assert isinstance(attn2_mh_apply, dict) + assert isinstance(attn2_lm, dict) + obj.attn2_mh_apply = Atten2MultiHeadApply.deserialize(attn2_mh_apply) + obj.attn2_lm = LayerNorm.deserialize(attn2_lm) + if update_h2: + assert isinstance(attn2_ev_apply, dict) + obj.attn2_ev_apply = Atten2EquiVarApply.deserialize(attn2_ev_apply) + if update_g1_has_attn: + assert isinstance(loc_attn, dict) + obj.loc_attn = LocalAtten.deserialize(loc_attn) + if g1_out_mlp: + assert isinstance(g1_self_mlp, dict) + obj.g1_self_mlp = MLPLayer.deserialize(g1_self_mlp) + if update_style == "res_residual": + for ii, t in enumerate(obj.g1_residual): + t.data = to_paddle_tensor(g1_residual[ii]) + for ii, t in enumerate(obj.g2_residual): + t.data = to_paddle_tensor(g2_residual[ii]) + for ii, t in enumerate(obj.h2_residual): + t.data = to_paddle_tensor(h2_residual[ii]) + return obj diff --git a/deepmd/pd/model/descriptor/repformers.py b/deepmd/pd/model/descriptor/repformers.py new file mode 100644 index 0000000000..47d92317df --- /dev/null +++ b/deepmd/pd/model/descriptor/repformers.py @@ -0,0 +1,576 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +from typing import ( + Callable, + Optional, + Union, +) + +import paddle + +from deepmd.dpmodel.utils.seed import ( + child_seed, +) +from deepmd.pd.model.descriptor.descriptor import ( + DescriptorBlock, +) +from deepmd.pd.model.descriptor.env_mat import ( + prod_env_mat, +) +from deepmd.pd.model.network.mlp import ( + MLPLayer, +) +from deepmd.pd.utils import ( + env, +) +from deepmd.pd.utils.env import ( + PRECISION_DICT, +) +from deepmd.pd.utils.env_mat_stat import ( + EnvMatStatSe, +) +from deepmd.pd.utils.exclude_mask import ( + PairExcludeMask, +) +from deepmd.pd.utils.utils import ( + ActivationFn, +) +from deepmd.utils.env_mat_stat import ( + StatItem, +) +from deepmd.utils.path import ( + DPPath, +) + +from .repformer_layer import ( + RepformerLayer, +) + + +@DescriptorBlock.register("se_repformer") +@DescriptorBlock.register("se_uni") +class DescrptBlockRepformers(DescriptorBlock): + def __init__( + self, + rcut, + rcut_smth, + sel: int, + ntypes: int, + nlayers: int = 3, + g1_dim=128, + g2_dim=16, + axis_neuron: int = 4, + direct_dist: bool = False, + update_g1_has_conv: bool = True, + update_g1_has_drrd: bool = True, + update_g1_has_grrg: bool = True, + update_g1_has_attn: bool = True, + update_g2_has_g1g1: bool = True, + update_g2_has_attn: bool = True, + update_h2: bool = False, + attn1_hidden: int = 64, + attn1_nhead: int = 4, + attn2_hidden: int = 16, + attn2_nhead: int = 4, + attn2_has_gate: bool = False, + activation_function: str = "tanh", + update_style: str = "res_avg", + update_residual: float = 0.001, + update_residual_init: str = "norm", + set_davg_zero: bool = True, + smooth: bool = True, + exclude_types: list[tuple[int, int]] = [], + env_protection: float = 0.0, + precision: str = "float64", + trainable_ln: bool = True, + ln_eps: Optional[float] = 1e-5, + seed: Optional[Union[int, list[int]]] = None, + use_sqrt_nnei: bool = True, + g1_out_conv: bool = True, + g1_out_mlp: bool = True, + ) -> None: + r""" + The repformer descriptor block. + + Parameters + ---------- + rcut : float + The cut-off radius. + rcut_smth : float + Where to start smoothing. For example the 1/r term is smoothed from rcut to rcut_smth. + sel : int + Maximally possible number of selected neighbors. + ntypes : int + Number of element types + nlayers : int, optional + Number of repformer layers. + g1_dim : int, optional + Dimension of the first graph convolution layer. + g2_dim : int, optional + Dimension of the second graph convolution layer. + axis_neuron : int, optional + Size of the submatrix of G (embedding matrix). + direct_dist : bool, optional + Whether to use direct distance information (1/r term) in the repformer block. + update_g1_has_conv : bool, optional + Whether to update the g1 rep with convolution term. + update_g1_has_drrd : bool, optional + Whether to update the g1 rep with the drrd term. + update_g1_has_grrg : bool, optional + Whether to update the g1 rep with the grrg term. + update_g1_has_attn : bool, optional + Whether to update the g1 rep with the localized self-attention. + update_g2_has_g1g1 : bool, optional + Whether to update the g2 rep with the g1xg1 term. + update_g2_has_attn : bool, optional + Whether to update the g2 rep with the gated self-attention. + update_h2 : bool, optional + Whether to update the h2 rep. + attn1_hidden : int, optional + The hidden dimension of localized self-attention to update the g1 rep. + attn1_nhead : int, optional + The number of heads in localized self-attention to update the g1 rep. + attn2_hidden : int, optional + The hidden dimension of gated self-attention to update the g2 rep. + attn2_nhead : int, optional + The number of heads in gated self-attention to update the g2 rep. + attn2_has_gate : bool, optional + Whether to use gate in the gated self-attention to update the g2 rep. + activation_function : str, optional + The activation function in the embedding net. + update_style : str, optional + Style to update a representation. + Supported options are: + -'res_avg': Updates a rep `u` with: u = 1/\\sqrt{n+1} (u + u_1 + u_2 + ... + u_n) + -'res_incr': Updates a rep `u` with: u = u + 1/\\sqrt{n} (u_1 + u_2 + ... + u_n) + -'res_residual': Updates a rep `u` with: u = u + (r1*u_1 + r2*u_2 + ... + r3*u_n) + where `r1`, `r2` ... `r3` are residual weights defined by `update_residual` + and `update_residual_init`. + update_residual : float, optional + When update using residual mode, the initial std of residual vector weights. + update_residual_init : str, optional + When update using residual mode, the initialization mode of residual vector weights. + set_davg_zero : bool, optional + Set the normalization average to zero. + precision : str, optional + The precision of the embedding net parameters. + smooth : bool, optional + Whether to use smoothness in processes such as attention weights calculation. + exclude_types : list[list[int]], optional + The excluded pairs of types which have no interaction with each other. + For example, `[[0, 1]]` means no interaction between type 0 and type 1. + env_protection : float, optional + Protection parameter to prevent division by zero errors during environment matrix calculations. + For example, when using paddings, there may be zero distances of neighbors, which may make division by zero error during environment matrix calculations without protection. + trainable_ln : bool, optional + Whether to use trainable shift and scale weights in layer normalization. + use_sqrt_nnei : bool, optional + Whether to use the square root of the number of neighbors for symmetrization_op normalization instead of using the number of neighbors directly. + g1_out_conv : bool, optional + Whether to put the convolutional update of g1 separately outside the concatenated MLP update. + g1_out_mlp : bool, optional + Whether to put the self MLP update of g1 separately outside the concatenated MLP update. + ln_eps : float, optional + The epsilon value for layer normalization. + seed : int, optional + Random seed for parameter initialization. + """ + super().__init__() + self.rcut = float(rcut) + self.rcut_smth = float(rcut_smth) + self.ntypes = ntypes + self.nlayers = nlayers + sel = [sel] if isinstance(sel, int) else sel + self.nnei = sum(sel) + self.ndescrpt = self.nnei * 4 # use full descriptor. + assert len(sel) == 1 + self.sel = sel + self.sec = self.sel + self.split_sel = self.sel + self.axis_neuron = axis_neuron + self.set_davg_zero = set_davg_zero + self.g1_dim = g1_dim + self.g2_dim = g2_dim + self.update_g1_has_conv = update_g1_has_conv + self.update_g1_has_drrd = update_g1_has_drrd + self.update_g1_has_grrg = update_g1_has_grrg + self.update_g1_has_attn = update_g1_has_attn + self.update_g2_has_g1g1 = update_g2_has_g1g1 + self.update_g2_has_attn = update_g2_has_attn + self.update_h2 = update_h2 + self.attn1_hidden = attn1_hidden + self.attn1_nhead = attn1_nhead + self.attn2_has_gate = attn2_has_gate + self.attn2_hidden = attn2_hidden + self.attn2_nhead = attn2_nhead + self.activation_function = activation_function + self.update_style = update_style + self.update_residual = update_residual + self.update_residual_init = update_residual_init + self.direct_dist = direct_dist + self.act = ActivationFn(activation_function) + self.smooth = smooth + self.use_sqrt_nnei = use_sqrt_nnei + self.g1_out_conv = g1_out_conv + self.g1_out_mlp = g1_out_mlp + # order matters, placed after the assignment of self.ntypes + self.reinit_exclude(exclude_types) + self.env_protection = env_protection + self.precision = precision + self.prec = PRECISION_DICT[precision] + self.trainable_ln = trainable_ln + self.ln_eps = ln_eps + self.epsilon = 1e-4 + self.seed = seed + + self.g2_embd = MLPLayer( + 1, self.g2_dim, precision=precision, seed=child_seed(seed, 0) + ) + layers = [] + for ii in range(nlayers): + layers.append( + RepformerLayer( + self.rcut, + self.rcut_smth, + self.sel, + self.ntypes, + self.g1_dim, + self.g2_dim, + axis_neuron=self.axis_neuron, + update_chnnl_2=(ii != nlayers - 1), + update_g1_has_conv=self.update_g1_has_conv, + update_g1_has_drrd=self.update_g1_has_drrd, + update_g1_has_grrg=self.update_g1_has_grrg, + update_g1_has_attn=self.update_g1_has_attn, + update_g2_has_g1g1=self.update_g2_has_g1g1, + update_g2_has_attn=self.update_g2_has_attn, + update_h2=self.update_h2, + attn1_hidden=self.attn1_hidden, + attn1_nhead=self.attn1_nhead, + attn2_has_gate=self.attn2_has_gate, + attn2_hidden=self.attn2_hidden, + attn2_nhead=self.attn2_nhead, + activation_function=self.activation_function, + update_style=self.update_style, + update_residual=self.update_residual, + update_residual_init=self.update_residual_init, + smooth=self.smooth, + trainable_ln=self.trainable_ln, + ln_eps=self.ln_eps, + precision=precision, + use_sqrt_nnei=self.use_sqrt_nnei, + g1_out_conv=self.g1_out_conv, + g1_out_mlp=self.g1_out_mlp, + seed=child_seed(child_seed(seed, 1), ii), + ) + ) + self.layers = paddle.nn.LayerList(layers) + + wanted_shape = (self.ntypes, self.nnei, 4) + mean = paddle.zeros(wanted_shape, dtype=env.GLOBAL_PD_FLOAT_PRECISION).to( + device=env.DEVICE + ) + stddev = paddle.ones(wanted_shape, dtype=env.GLOBAL_PD_FLOAT_PRECISION).to( + device=env.DEVICE + ) + self.register_buffer("mean", mean) + self.register_buffer("stddev", stddev) + self.stats = None + + def get_rcut(self) -> float: + """Returns the cut-off radius.""" + return self.rcut + + def get_rcut_smth(self) -> float: + """Returns the radius where the neighbor information starts to smoothly decay to 0.""" + return self.rcut_smth + + def get_nsel(self) -> int: + """Returns the number of selected atoms in the cut-off radius.""" + return sum(self.sel) + + def get_sel(self) -> list[int]: + """Returns the number of selected atoms for each type.""" + return self.sel + + def get_ntypes(self) -> int: + """Returns the number of element types.""" + return self.ntypes + + def get_dim_out(self) -> int: + """Returns the output dimension.""" + return self.dim_out + + def get_dim_in(self) -> int: + """Returns the input dimension.""" + return self.dim_in + + def get_dim_emb(self) -> int: + """Returns the embedding dimension g2.""" + return self.g2_dim + + def __setitem__(self, key, value) -> None: + if key in ("avg", "data_avg", "davg"): + self.mean = value + elif key in ("std", "data_std", "dstd"): + self.stddev = value + else: + raise KeyError(key) + + def __getitem__(self, key): + if key in ("avg", "data_avg", "davg"): + return self.mean + elif key in ("std", "data_std", "dstd"): + return self.stddev + else: + raise KeyError(key) + + def mixed_types(self) -> bool: + """If true, the descriptor + 1. assumes total number of atoms aligned across frames; + 2. requires a neighbor list that does not distinguish different atomic types. + + If false, the descriptor + 1. assumes total number of atoms of each atom type aligned across frames; + 2. requires a neighbor list that distinguishes different atomic types. + + """ + return True + + def get_env_protection(self) -> float: + """Returns the protection of building environment matrix.""" + return self.env_protection + + @property + def dim_out(self): + """Returns the output dimension of this descriptor.""" + return self.g1_dim + + @property + def dim_in(self): + """Returns the atomic input dimension of this descriptor.""" + return self.g1_dim + + @property + def dim_emb(self): + """Returns the embedding dimension g2.""" + return self.get_dim_emb() + + def reinit_exclude( + self, + exclude_types: list[tuple[int, int]] = [], + ) -> None: + self.exclude_types = exclude_types + self.emask = PairExcludeMask(self.ntypes, exclude_types=exclude_types) + + def forward( + self, + nlist: paddle.Tensor, + extended_coord: paddle.Tensor, + extended_atype: paddle.Tensor, + extended_atype_embd: Optional[paddle.Tensor] = None, + mapping: Optional[paddle.Tensor] = None, + type_embedding: Optional[paddle.Tensor] = None, + comm_dict: Optional[dict[str, paddle.Tensor]] = None, + ): + if comm_dict is None: + assert mapping is not None + assert extended_atype_embd is not None + nframes, nloc, nnei = nlist.shape + nall = extended_coord.reshape([nframes, -1]).shape[1] // 3 + atype = extended_atype[:, :nloc] + # nb x nloc x nnei + exclude_mask = self.emask(nlist, extended_atype) + nlist = paddle.where(exclude_mask != 0, nlist, paddle.full_like(nlist, -1)) + # nb x nloc x nnei x 4, nb x nloc x nnei x 3, nb x nloc x nnei x 1 + dmatrix, diff, sw = prod_env_mat( + extended_coord, + nlist, + atype, + self.mean, + self.stddev, + self.rcut, + self.rcut_smth, + protection=self.env_protection, + ) + nlist_mask = nlist != -1 + sw = paddle.squeeze(sw, -1) + # beyond the cutoff sw should be 0.0 + sw = sw.masked_fill(~nlist_mask, 0.0) + + # [nframes, nloc, tebd_dim] + if comm_dict is None: + if paddle.in_dynamic_mode(): + assert isinstance(extended_atype_embd, paddle.Tensor) # for jit + atype_embd = extended_atype_embd[:, :nloc, :] + if paddle.in_dynamic_mode(): + assert list(atype_embd.shape) == [nframes, nloc, self.g1_dim] + else: + atype_embd = extended_atype_embd + if paddle.in_dynamic_mode(): + assert isinstance(atype_embd, paddle.Tensor) # for jit + g1 = self.act(atype_embd) + ng1 = g1.shape[-1] + # nb x nloc x nnei x 1, nb x nloc x nnei x 3 + if not self.direct_dist: + g2, h2 = paddle.split(dmatrix, [1, 3], axis=-1) + else: + # g2, h2 = paddle.linalg.norm(diff, axis=-1, keepdim=True), diff + g2, h2 = paddle.linalg.norm(diff, axis=-1, keepdim=True), diff + g2 = g2 / self.rcut + h2 = h2 / self.rcut + # nb x nloc x nnei x ng2 + g2 = self.act(self.g2_embd(g2)) + + # set all padding positions to index of 0 + # if the a neighbor is real or not is indicated by nlist_mask + nlist[nlist == -1] = 0 + # nb x nall x ng1 + if comm_dict is None: + assert mapping is not None + mapping = ( + mapping.reshape([nframes, nall]) + .unsqueeze(-1) + .expand([-1, -1, self.g1_dim]) + ) + for idx, ll in enumerate(self.layers): + # g1: nb x nloc x ng1 + # g1_ext: nb x nall x ng1 + if comm_dict is None: + assert mapping is not None + g1_ext = paddle.take_along_axis(g1, axis=1, indices=mapping) + else: + raise NotImplementedError("Not implemented yet") + # has_spin = "has_spin" in comm_dict + # if not has_spin: + # n_padding = nall - nloc + # g1 = paddle.nn.functional.pad( + # g1.squeeze(0), (0, 0, 0, n_padding), value=0.0 + # ) + # real_nloc = nloc + # real_nall = nall + # else: + # # for spin + # real_nloc = nloc // 2 + # real_nall = nall // 2 + # real_n_padding = real_nall - real_nloc + # g1_real, g1_virtual = paddle.split( + # g1, [real_nloc, real_nloc], axis=1 + # ) + # # mix_g1: nb x real_nloc x (ng1 * 2) + # mix_g1 = paddle.concat([g1_real, g1_virtual], axis=2) + # # nb x real_nall x (ng1 * 2) + # g1 = paddle.nn.functional.pad( + # mix_g1.squeeze(0), (0, 0, 0, real_n_padding), value=0.0 + # ) + + # assert "send_list" in comm_dict + # assert "send_proc" in comm_dict + # assert "recv_proc" in comm_dict + # assert "send_num" in comm_dict + # assert "recv_num" in comm_dict + # assert "communicator" in comm_dict + # ret = paddle.ops.deepmd.border_op( + # comm_dict["send_list"], + # comm_dict["send_proc"], + # comm_dict["recv_proc"], + # comm_dict["send_num"], + # comm_dict["recv_num"], + # g1, + # comm_dict["communicator"], + # paddle.to_tensor( + # real_nloc, + # dtype=paddle.int32, + # place=env.DEVICE, + # ), # should be int of c++ + # paddle.to_tensor( + # real_nall - real_nloc, + # dtype=paddle.int32, + # place=env.DEVICE, + # ), # should be int of c++ + # ) + # g1_ext = ret[0].unsqueeze(0) + # if has_spin: + # g1_real_ext, g1_virtual_ext = paddle.split( + # g1_ext, [ng1, ng1], axis=2 + # ) + # g1_ext = concat_switch_virtual( + # g1_real_ext, g1_virtual_ext, real_nloc + # ) + g1, g2, h2 = ll.forward( + g1_ext, + g2, + h2, + nlist, + nlist_mask, + sw, + ) + + # nb x nloc x 3 x ng2 + h2g2 = RepformerLayer._cal_hg( + g2, + h2, + nlist_mask, + sw, + smooth=self.smooth, + epsilon=self.epsilon, + use_sqrt_nnei=self.use_sqrt_nnei, + ) + # (nb x nloc) x ng2 x 3 + rot_mat = paddle.transpose(h2g2, (0, 1, 3, 2)) + + return g1, g2, h2, rot_mat.reshape([nframes, nloc, self.dim_emb, 3]), sw + + def compute_input_stats( + self, + merged: Union[Callable[[], list[dict]], list[dict]], + path: Optional[DPPath] = None, + ) -> None: + """ + Compute the input statistics (e.g. mean and stddev) for the descriptors from packed data. + + Parameters + ---------- + merged : Union[Callable[[], list[dict]], list[dict]] + - list[dict]: A list of data samples from various data systems. + Each element, `merged[i]`, is a data dictionary containing `keys`: `paddle.Tensor` + originating from the `i`-th data system. + - Callable[[], list[dict]]: A lazy function that returns data samples in the above format + only when needed. Since the sampling process can be slow and memory-intensive, + the lazy function helps by only sampling once. + path : Optional[DPPath] + The path to the stat file. + + """ + env_mat_stat = EnvMatStatSe(self) + if path is not None: + path = path / env_mat_stat.get_hash() + if path is None or not path.is_dir(): + if callable(merged): + # only get data for once + sampled = merged() + else: + sampled = merged + else: + sampled = [] + env_mat_stat.load_or_compute_stats(sampled, path) + self.stats = env_mat_stat.stats + mean, stddev = env_mat_stat() + if not self.set_davg_zero: + paddle.assign(paddle.to_tensor(mean).to(device=env.DEVICE), self.mean) # pylint: disable=no-explicit-dtype + paddle.assign(paddle.to_tensor(stddev).to(device=env.DEVICE), self.stddev) # pylint: disable=no-explicit-dtype + + def get_stats(self) -> dict[str, StatItem]: + """Get the statistics of the descriptor.""" + if self.stats is None: + raise RuntimeError( + "The statistics of the descriptor has not been computed." + ) + return self.stats + + def has_message_passing(self) -> bool: + """Returns whether the descriptor block has message passing.""" + return True + + def need_sorted_nlist_for_lower(self) -> bool: + """Returns whether the descriptor block needs sorted nlist when using `forward_lower`.""" + return False diff --git a/deepmd/pd/model/descriptor/se_t_tebd.py b/deepmd/pd/model/descriptor/se_t_tebd.py new file mode 100644 index 0000000000..a8b9a6a417 --- /dev/null +++ b/deepmd/pd/model/descriptor/se_t_tebd.py @@ -0,0 +1,931 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +from typing import ( + Callable, + Optional, + Union, +) + +import paddle + +from deepmd.dpmodel.utils import EnvMat as DPEnvMat +from deepmd.dpmodel.utils.seed import ( + child_seed, +) +from deepmd.pd.model.descriptor import ( + DescriptorBlock, +) +from deepmd.pd.model.descriptor.env_mat import ( + prod_env_mat, +) +from deepmd.pd.model.network.mlp import ( + EmbeddingNet, + NetworkCollection, +) +from deepmd.pd.model.network.network import ( + TypeEmbedNet, + TypeEmbedNetConsistent, +) +from deepmd.pd.utils import ( + env, +) +from deepmd.pd.utils.env import ( + PRECISION_DICT, + RESERVED_PRECISON_DICT, +) +from deepmd.pd.utils.env_mat_stat import ( + EnvMatStatSe, +) +from deepmd.pd.utils.exclude_mask import ( + PairExcludeMask, +) +from deepmd.pd.utils.update_sel import ( + UpdateSel, +) +from deepmd.utils.data_system import ( + DeepmdDataSystem, +) +from deepmd.utils.env_mat_stat import ( + StatItem, +) +from deepmd.utils.finetune import ( + get_index_between_two_maps, + map_pair_exclude_types, +) +from deepmd.utils.path import ( + DPPath, +) +from deepmd.utils.version import ( + check_version_compatibility, +) + +from .base_descriptor import ( + BaseDescriptor, +) +from .descriptor import ( + extend_descrpt_stat, +) + + +@BaseDescriptor.register("se_e3_tebd") +class DescrptSeTTebd(BaseDescriptor, paddle.nn.Layer): + r"""Construct an embedding net that takes angles between two neighboring atoms and type embeddings as input. + + Parameters + ---------- + rcut + The cut-off radius + rcut_smth + From where the environment matrix should be smoothed + sel : Union[list[int], int] + list[int]: sel[i] specifies the maxmum number of type i atoms in the cut-off radius + int: the total maxmum number of atoms in the cut-off radius + ntypes : int + Number of element types + neuron : list[int] + Number of neurons in each hidden layers of the embedding net + tebd_dim : int + Dimension of the type embedding + tebd_input_mode : str + The input mode of the type embedding. Supported modes are ["concat", "strip"]. + - "concat": Concatenate the type embedding with the smoothed angular information as the union input for the embedding network. + - "strip": Use a separated embedding network for the type embedding and combine the output with the angular embedding network output. + resnet_dt + Time-step `dt` in the resnet construction: + y = x + dt * \phi (Wx + b) + set_davg_zero + Set the shift of embedding net input to zero. + activation_function + The activation function in the embedding net. Supported options are |ACTIVATION_FN| + env_protection: float + Protection parameter to prevent division by zero errors during environment matrix calculations. + exclude_types : list[tuple[int, int]] + The excluded pairs of types which have no interaction with each other. + For example, `[[0, 1]]` means no interaction between type 0 and type 1. + precision + The precision of the embedding net parameters. Supported options are |PRECISION| + trainable + If the weights of embedding net are trainable. + seed + Random seed for initializing the network parameters. + type_map: list[str], Optional + A list of strings. Give the name to each type of atoms. + concat_output_tebd: bool + Whether to concat type embedding at the output of the descriptor. + use_econf_tebd: bool, Optional + Whether to use electronic configuration type embedding. + use_tebd_bias : bool, Optional + Whether to use bias in the type embedding layer. + smooth: bool + Whether to use smooth process in calculation. + + """ + + def __init__( + self, + rcut: float, + rcut_smth: float, + sel: Union[list[int], int], + ntypes: int, + neuron: list = [2, 4, 8], + tebd_dim: int = 8, + tebd_input_mode: str = "concat", + resnet_dt: bool = False, + set_davg_zero: bool = True, + activation_function: str = "tanh", + env_protection: float = 0.0, + exclude_types: list[tuple[int, int]] = [], + precision: str = "float64", + trainable: bool = True, + seed: Optional[Union[int, list[int]]] = None, + type_map: Optional[list[str]] = None, + concat_output_tebd: bool = True, + use_econf_tebd: bool = False, + use_tebd_bias=False, + smooth: bool = True, + ) -> None: + super().__init__() + self.se_ttebd = DescrptBlockSeTTebd( + rcut, + rcut_smth, + sel, + ntypes, + neuron=neuron, + tebd_dim=tebd_dim, + tebd_input_mode=tebd_input_mode, + set_davg_zero=set_davg_zero, + activation_function=activation_function, + precision=precision, + resnet_dt=resnet_dt, + exclude_types=exclude_types, + env_protection=env_protection, + smooth=smooth, + seed=child_seed(seed, 1), + ) + self.prec = PRECISION_DICT[precision] + self.use_econf_tebd = use_econf_tebd + self.type_map = type_map + self.smooth = smooth + self.type_embedding = TypeEmbedNet( + ntypes, + tebd_dim, + precision=precision, + seed=child_seed(seed, 2), + use_econf_tebd=use_econf_tebd, + type_map=type_map, + use_tebd_bias=use_tebd_bias, + ) + self.tebd_dim = tebd_dim + self.tebd_input_mode = tebd_input_mode + self.concat_output_tebd = concat_output_tebd + self.trainable = trainable + # set trainable + for param in self.parameters(): + param.stop_gradient = not trainable + + def get_rcut(self) -> float: + """Returns the cut-off radius.""" + return self.se_ttebd.get_rcut() + + def get_rcut_smth(self) -> float: + """Returns the radius where the neighbor information starts to smoothly decay to 0.""" + return self.se_ttebd.get_rcut_smth() + + def get_nsel(self) -> int: + """Returns the number of selected atoms in the cut-off radius.""" + return self.se_ttebd.get_nsel() + + def get_sel(self) -> list[int]: + """Returns the number of selected atoms for each type.""" + return self.se_ttebd.get_sel() + + def get_ntypes(self) -> int: + """Returns the number of element types.""" + return self.se_ttebd.get_ntypes() + + def get_type_map(self) -> list[str]: + """Get the name to each type of atoms.""" + return self.type_map + + def get_dim_out(self) -> int: + """Returns the output dimension.""" + ret = self.se_ttebd.get_dim_out() + if self.concat_output_tebd: + ret += self.tebd_dim + return ret + + def get_dim_emb(self) -> int: + return self.se_ttebd.dim_emb + + def mixed_types(self) -> bool: + """If true, the descriptor + 1. assumes total number of atoms aligned across frames; + 2. requires a neighbor list that does not distinguish different atomic types. + + If false, the descriptor + 1. assumes total number of atoms of each atom type aligned across frames; + 2. requires a neighbor list that distinguishes different atomic types. + + """ + return self.se_ttebd.mixed_types() + + def has_message_passing(self) -> bool: + """Returns whether the descriptor has message passing.""" + return self.se_ttebd.has_message_passing() + + def need_sorted_nlist_for_lower(self) -> bool: + """Returns whether the descriptor needs sorted nlist when using `forward_lower`.""" + return self.se_ttebd.need_sorted_nlist_for_lower() + + def get_env_protection(self) -> float: + """Returns the protection of building environment matrix.""" + return self.se_ttebd.get_env_protection() + + def share_params(self, base_class, shared_level, resume=False) -> None: + """ + Share the parameters of self to the base_class with shared_level during multitask training. + If not start from checkpoint (resume is False), + some separated parameters (e.g. mean and stddev) will be re-calculated across different classes. + """ + assert ( + self.__class__ == base_class.__class__ + ), "Only descriptors of the same type can share params!" + # For DPA1 descriptors, the user-defined share-level + # shared_level: 0 + # share all parameters in both type_embedding and se_ttebd + if shared_level == 0: + self._sub_layers["type_embedding"] = base_class._sub_layers[ + "type_embedding" + ] + self.se_ttebd.share_params(base_class.se_ttebd, 0, resume=resume) + # shared_level: 1 + # share all parameters in type_embedding + elif shared_level == 1: + self._sub_layers["type_embedding"] = base_class._sub_layers[ + "type_embedding" + ] + # Other shared levels + else: + raise NotImplementedError + + @property + def dim_out(self): + return self.get_dim_out() + + @property + def dim_emb(self): + return self.get_dim_emb() + + def compute_input_stats( + self, + merged: Union[Callable[[], list[dict]], list[dict]], + path: Optional[DPPath] = None, + ): + """ + Compute the input statistics (e.g. mean and stddev) for the descriptors from packed data. + + Parameters + ---------- + merged : Union[Callable[[], list[dict]], list[dict]] + - list[dict]: A list of data samples from various data systems. + Each element, `merged[i]`, is a data dictionary containing `keys`: `paddle.Tensor` + originating from the `i`-th data system. + - Callable[[], list[dict]]: A lazy function that returns data samples in the above format + only when needed. Since the sampling process can be slow and memory-intensive, + the lazy function helps by only sampling once. + path : Optional[DPPath] + The path to the stat file. + + """ + return self.se_ttebd.compute_input_stats(merged, path) + + def set_stat_mean_and_stddev( + self, + mean: paddle.Tensor, + stddev: paddle.Tensor, + ) -> None: + """Update mean and stddev for descriptor.""" + self.se_ttebd.mean = mean + self.se_ttebd.stddev = stddev + + def get_stat_mean_and_stddev(self) -> tuple[paddle.Tensor, paddle.Tensor]: + """Get mean and stddev for descriptor.""" + return self.se_ttebd.mean, self.se_ttebd.stddev + + def change_type_map( + self, type_map: list[str], model_with_new_type_stat=None + ) -> None: + """Change the type related params to new ones, according to `type_map` and the original one in the model. + If there are new types in `type_map`, statistics will be updated accordingly to `model_with_new_type_stat` for these new types. + """ + assert ( + self.type_map is not None + ), "'type_map' must be defined when performing type changing!" + remap_index, has_new_type = get_index_between_two_maps(self.type_map, type_map) + obj = self.se_ttebd + obj.ntypes = len(type_map) + self.type_map = type_map + self.type_embedding.change_type_map(type_map=type_map) + obj.reinit_exclude(map_pair_exclude_types(obj.exclude_types, remap_index)) + if has_new_type: + # the avg and std of new types need to be updated + extend_descrpt_stat( + obj, + type_map, + des_with_stat=model_with_new_type_stat.se_ttebd + if model_with_new_type_stat is not None + else None, + ) + obj["davg"] = obj["davg"][remap_index] + obj["dstd"] = obj["dstd"][remap_index] + + def serialize(self) -> dict: + obj = self.se_ttebd + data = { + "@class": "Descriptor", + "type": "se_e3_tebd", + "@version": 1, + "rcut": obj.rcut, + "rcut_smth": obj.rcut_smth, + "sel": obj.sel, + "ntypes": obj.ntypes, + "neuron": obj.neuron, + "tebd_dim": obj.tebd_dim, + "tebd_input_mode": obj.tebd_input_mode, + "set_davg_zero": obj.set_davg_zero, + "activation_function": obj.activation_function, + "resnet_dt": obj.resnet_dt, + "concat_output_tebd": self.concat_output_tebd, + "use_econf_tebd": self.use_econf_tebd, + "type_map": self.type_map, + # make deterministic + "precision": RESERVED_PRECISON_DICT[obj.prec], + "embeddings": obj.filter_layers.serialize(), + "env_mat": DPEnvMat(obj.rcut, obj.rcut_smth).serialize(), + "type_embedding": self.type_embedding.embedding.serialize(), + "exclude_types": obj.exclude_types, + "env_protection": obj.env_protection, + "smooth": self.smooth, + "@variables": { + "davg": obj["davg"].numpy(), + "dstd": obj["dstd"].numpy(), + }, + "trainable": self.trainable, + } + if obj.tebd_input_mode in ["strip"]: + data.update({"embeddings_strip": obj.filter_layers_strip.serialize()}) + return data + + @classmethod + def deserialize(cls, data: dict) -> "DescrptSeTTebd": + data = data.copy() + check_version_compatibility(data.pop("@version"), 1, 1) + data.pop("@class") + data.pop("type") + variables = data.pop("@variables") + embeddings = data.pop("embeddings") + type_embedding = data.pop("type_embedding") + env_mat = data.pop("env_mat") + tebd_input_mode = data["tebd_input_mode"] + if tebd_input_mode in ["strip"]: + embeddings_strip = data.pop("embeddings_strip") + else: + embeddings_strip = None + obj = cls(**data) + + def t_cvt(xx): + return paddle.to_tensor(xx, dtype=obj.se_ttebd.prec).to(device=env.DEVICE) + + obj.type_embedding.embedding = TypeEmbedNetConsistent.deserialize( + type_embedding + ) + obj.se_ttebd["davg"] = t_cvt(variables["davg"]) + obj.se_ttebd["dstd"] = t_cvt(variables["dstd"]) + obj.se_ttebd.filter_layers = NetworkCollection.deserialize(embeddings) + if tebd_input_mode in ["strip"]: + obj.se_ttebd.filter_layers_strip = NetworkCollection.deserialize( + embeddings_strip + ) + return obj + + def forward( + self, + extended_coord: paddle.Tensor, + extended_atype: paddle.Tensor, + nlist: paddle.Tensor, + mapping: Optional[paddle.Tensor] = None, + comm_dict: Optional[dict[str, paddle.Tensor]] = None, + ): + """Compute the descriptor. + + Parameters + ---------- + extended_coord + The extended coordinates of atoms. shape: nf x (nallx3) + extended_atype + The extended aotm types. shape: nf x nall + nlist + The neighbor list. shape: nf x nloc x nnei + mapping + The index mapping, not required by this descriptor. + comm_dict + The data needed for communication for parallel inference. + + Returns + ------- + descriptor + The descriptor. shape: nf x nloc x (ng x axis_neuron) + gr + The rotationally equivariant and permutationally invariant single particle + representation. shape: nf x nloc x ng x 3 + g2 + The rotationally invariant pair-partical representation. + shape: nf x nloc x nnei x ng + h2 + The rotationally equivariant pair-partical representation. + shape: nf x nloc x nnei x 3 + sw + The smooth switch function. shape: nf x nloc x nnei + + """ + # cast the input to internal precsion + extended_coord = extended_coord.to(dtype=self.prec) + del mapping + nframes, nloc, nnei = nlist.shape + nall = extended_coord.reshape([nframes, -1]).shape[1] // 3 + g1_ext = self.type_embedding(extended_atype) + g1_inp = g1_ext[:, :nloc, :] + if self.tebd_input_mode in ["strip"]: + type_embedding = self.type_embedding.get_full_embedding(g1_ext.place) + else: + type_embedding = None + g1, _, _, _, sw = self.se_ttebd( + nlist, + extended_coord, + extended_atype, + g1_ext, + mapping=None, + type_embedding=type_embedding, + ) + if self.concat_output_tebd: + g1 = paddle.concat([g1, g1_inp], axis=-1) + + return ( + g1.to(dtype=env.GLOBAL_PD_FLOAT_PRECISION), + None, + None, + None, + sw.to(dtype=env.GLOBAL_PD_FLOAT_PRECISION), + ) + + @classmethod + def update_sel( + cls, + train_data: DeepmdDataSystem, + type_map: Optional[list[str]], + local_jdata: dict, + ) -> tuple[dict, Optional[float]]: + """Update the selection and perform neighbor statistics. + + Parameters + ---------- + train_data : DeepmdDataSystem + data used to do neighbor statistics + type_map : list[str], optional + The name of each type of atoms + local_jdata : dict + The local data refer to the current class + + Returns + ------- + dict + The updated local data + float + The minimum distance between two atoms + """ + local_jdata_cpy = local_jdata.copy() + min_nbor_dist, sel = UpdateSel().update_one_sel( + train_data, type_map, local_jdata_cpy["rcut"], local_jdata_cpy["sel"], True + ) + local_jdata_cpy["sel"] = sel[0] + return local_jdata_cpy, min_nbor_dist + + +@DescriptorBlock.register("se_ttebd") +class DescrptBlockSeTTebd(DescriptorBlock): + def __init__( + self, + rcut: float, + rcut_smth: float, + sel: Union[list[int], int], + ntypes: int, + neuron: list = [25, 50, 100], + tebd_dim: int = 8, + tebd_input_mode: str = "concat", + set_davg_zero: bool = True, + activation_function="tanh", + precision: str = "float64", + resnet_dt: bool = False, + exclude_types: list[tuple[int, int]] = [], + env_protection: float = 0.0, + smooth: bool = True, + seed: Optional[Union[int, list[int]]] = None, + ) -> None: + super().__init__() + self.rcut = float(rcut) + self.rcut_smth = float(rcut_smth) + self.neuron = neuron + self.filter_neuron = self.neuron + self.tebd_dim = tebd_dim + self.tebd_input_mode = tebd_input_mode + self.set_davg_zero = set_davg_zero + self.activation_function = activation_function + self.precision = precision + self.prec = PRECISION_DICT[self.precision] + self.resnet_dt = resnet_dt + self.env_protection = env_protection + self.seed = seed + self.smooth = smooth + + if isinstance(sel, int): + sel = [sel] + + self.ntypes = ntypes + self.sel = sel + self.sec = self.sel + self.split_sel = self.sel + self.nnei = sum(sel) + self.ndescrpt = self.nnei * 4 + # order matters, placed after the assignment of self.ntypes + self.reinit_exclude(exclude_types) + + wanted_shape = (self.ntypes, self.nnei, 4) + mean = paddle.zeros(wanted_shape, dtype=env.GLOBAL_PD_FLOAT_PRECISION).to( + device=env.DEVICE + ) + stddev = paddle.ones(wanted_shape, dtype=env.GLOBAL_PD_FLOAT_PRECISION).to( + device=env.DEVICE + ) + self.register_buffer("mean", mean) + self.register_buffer("stddev", stddev) + self.tebd_dim_input = self.tebd_dim * 2 + if self.tebd_input_mode in ["concat"]: + self.embd_input_dim = 1 + self.tebd_dim_input + else: + self.embd_input_dim = 1 + + self.filter_layers = None + self.filter_layers_strip = None + filter_layers = NetworkCollection( + ndim=0, ntypes=self.ntypes, network_type="embedding_network" + ) + filter_layers[0] = EmbeddingNet( + self.embd_input_dim, + self.filter_neuron, + activation_function=self.activation_function, + precision=self.precision, + resnet_dt=self.resnet_dt, + seed=child_seed(self.seed, 1), + ) + self.filter_layers = filter_layers + if self.tebd_input_mode in ["strip"]: + filter_layers_strip = NetworkCollection( + ndim=0, ntypes=self.ntypes, network_type="embedding_network" + ) + filter_layers_strip[0] = EmbeddingNet( + self.tebd_dim_input, + self.filter_neuron, + activation_function=self.activation_function, + precision=self.precision, + resnet_dt=self.resnet_dt, + seed=child_seed(self.seed, 2), + ) + self.filter_layers_strip = filter_layers_strip + self.stats = None + + def get_rcut(self) -> float: + """Returns the cut-off radius.""" + return self.rcut + + def get_rcut_smth(self) -> float: + """Returns the radius where the neighbor information starts to smoothly decay to 0.""" + return self.rcut_smth + + def get_nsel(self) -> int: + """Returns the number of selected atoms in the cut-off radius.""" + return sum(self.sel) + + def get_sel(self) -> list[int]: + """Returns the number of selected atoms for each type.""" + return self.sel + + def get_ntypes(self) -> int: + """Returns the number of element types.""" + return self.ntypes + + def get_dim_in(self) -> int: + """Returns the input dimension.""" + return self.dim_in + + def get_dim_out(self) -> int: + """Returns the output dimension.""" + return self.dim_out + + def get_dim_emb(self) -> int: + """Returns the output dimension of embedding.""" + return self.filter_neuron[-1] + + def __setitem__(self, key, value) -> None: + if key in ("avg", "data_avg", "davg"): + self.mean = value + elif key in ("std", "data_std", "dstd"): + self.stddev = value + else: + raise KeyError(key) + + def __getitem__(self, key): + if key in ("avg", "data_avg", "davg"): + return self.mean + elif key in ("std", "data_std", "dstd"): + return self.stddev + else: + raise KeyError(key) + + def mixed_types(self) -> bool: + """If true, the descriptor + 1. assumes total number of atoms aligned across frames; + 2. requires a neighbor list that does not distinguish different atomic types. + + If false, the descriptor + 1. assumes total number of atoms of each atom type aligned across frames; + 2. requires a neighbor list that distinguishes different atomic types. + + """ + return True + + def get_env_protection(self) -> float: + """Returns the protection of building environment matrix.""" + return self.env_protection + + @property + def dim_out(self): + """Returns the output dimension of this descriptor.""" + return self.filter_neuron[-1] + + @property + def dim_in(self): + """Returns the atomic input dimension of this descriptor.""" + return self.tebd_dim + + @property + def dim_emb(self): + """Returns the output dimension of embedding.""" + return self.get_dim_emb() + + def compute_input_stats( + self, + merged: Union[Callable[[], list[dict]], list[dict]], + path: Optional[DPPath] = None, + ) -> None: + """ + Compute the input statistics (e.g. mean and stddev) for the descriptors from packed data. + + Parameters + ---------- + merged : Union[Callable[[], list[dict]], list[dict]] + - list[dict]: A list of data samples from various data systems. + Each element, `merged[i]`, is a data dictionary containing `keys`: `paddle.Tensor` + originating from the `i`-th data system. + - Callable[[], list[dict]]: A lazy function that returns data samples in the above format + only when needed. Since the sampling process can be slow and memory-intensive, + the lazy function helps by only sampling once. + path : Optional[DPPath] + The path to the stat file. + + """ + env_mat_stat = EnvMatStatSe(self) + if path is not None: + path = path / env_mat_stat.get_hash() + if path is None or not path.is_dir(): + if callable(merged): + # only get data for once + sampled = merged() + else: + sampled = merged + else: + sampled = [] + env_mat_stat.load_or_compute_stats(sampled, path) + self.stats = env_mat_stat.stats + mean, stddev = env_mat_stat() + if not self.set_davg_zero: + paddle.assign(paddle.to_tensor(mean).to(device=env.DEVICE), self.mean) # pylint: disable=no-explicit-dtype + paddle.assign(paddle.to_tensor(stddev).to(device=env.DEVICE), self.stddev) # pylint: disable=no-explicit-dtype + + def get_stats(self) -> dict[str, StatItem]: + """Get the statistics of the descriptor.""" + if self.stats is None: + raise RuntimeError( + "The statistics of the descriptor has not been computed." + ) + return self.stats + + def reinit_exclude( + self, + exclude_types: list[tuple[int, int]] = [], + ) -> None: + self.exclude_types = exclude_types + self.emask = PairExcludeMask(self.ntypes, exclude_types=exclude_types) + + def forward( + self, + nlist: paddle.Tensor, + extended_coord: paddle.Tensor, + extended_atype: paddle.Tensor, + extended_atype_embd: Optional[paddle.Tensor] = None, + mapping: Optional[paddle.Tensor] = None, + type_embedding: Optional[paddle.Tensor] = None, + ): + """Compute the descriptor. + + Parameters + ---------- + nlist + The neighbor list. shape: nf x nloc x nnei + extended_coord + The extended coordinates of atoms. shape: nf x (nallx3) + extended_atype + The extended aotm types. shape: nf x nall x nt + extended_atype_embd + The extended type embedding of atoms. shape: nf x nall + mapping + The index mapping, not required by this descriptor. + type_embedding + Full type embeddings. shape: (ntypes+1) x nt + Required for stripped type embeddings. + + Returns + ------- + result + The descriptor. shape: nf x nloc x (ng x axis_neuron) + g2 + The rotationally invariant pair-partical representation. + shape: nf x nloc x nnei x ng + h2 + The rotationally equivariant pair-partical representation. + shape: nf x nloc x nnei x 3 + gr + The rotationally equivariant and permutationally invariant single particle + representation. shape: nf x nloc x ng x 3 + sw + The smooth switch function. shape: nf x nloc x nnei + + """ + del mapping + assert extended_atype_embd is not None + nframes, nloc, nnei = nlist.shape + atype = extended_atype[:, :nloc] + nb = nframes + nall = extended_coord.reshape([nb, -1, 3]).shape[1] + dmatrix, diff, sw = prod_env_mat( + extended_coord, + nlist, + atype, + self.mean, + self.stddev, + self.rcut, + self.rcut_smth, + protection=self.env_protection, + ) + # nb x nloc x nnei + exclude_mask = self.emask(nlist, extended_atype) + nlist = paddle.where(exclude_mask != 0, nlist, paddle.full_like(nlist, -1)) + nlist_mask = nlist != -1 + nlist = paddle.where(nlist == -1, paddle.zeros_like(nlist), nlist) + sw = paddle.squeeze(sw, -1) + # nf x nall x nt + nt = extended_atype_embd.shape[-1] + # beyond the cutoff sw should be 0.0 + sw = sw.masked_fill(~nlist_mask, 0.0) + # (nb x nloc) x nnei + exclude_mask = exclude_mask.reshape([nb * nloc, nnei]) + assert self.filter_layers is not None + # nfnl x nnei x 4 + dmatrix = dmatrix.reshape([-1, self.nnei, 4]) + nfnl = dmatrix.shape[0] + # nfnl x nnei x 4 + rr = dmatrix + rr = rr * exclude_mask[:, :, None].astype(rr.dtype) + + # nfnl x nt_i x 3 + rr_i = rr[:, :, 1:] + # nfnl x nt_j x 3 + rr_j = rr[:, :, 1:] + # nfnl x nt_i x nt_j + # env_ij = paddle.einsum("ijm,ikm->ijk", rr_i, rr_j) + env_ij = ( + # ij1m x i1km -> ijkm -> ijk + rr_i.unsqueeze(2) * rr_j.unsqueeze(1) + ).sum(-1) + # nfnl x nt_i x nt_j x 1 + ss = env_ij.unsqueeze(-1) + if self.tebd_input_mode in ["concat"]: + atype_tebd_ext = extended_atype_embd + # nb x (nloc x nnei) x nt + index = nlist.reshape([nb, nloc * nnei]).unsqueeze(-1).expand([-1, -1, nt]) + # nb x (nloc x nnei) x nt + # atype_tebd_nlist = paddle.take_along_axis(atype_tebd_ext, axis=1, index=index) + atype_tebd_nlist = paddle.take_along_axis( + atype_tebd_ext, axis=1, indices=index + ) + # nb x nloc x nnei x nt + atype_tebd_nlist = atype_tebd_nlist.reshape([nb, nloc, nnei, nt]) + # nfnl x nnei x tebd_dim + nlist_tebd = atype_tebd_nlist.reshape([nfnl, nnei, self.tebd_dim]) + # nfnl x nt_i x nt_j x tebd_dim + nlist_tebd_i = nlist_tebd.unsqueeze(2).expand([-1, -1, self.nnei, -1]) + nlist_tebd_j = nlist_tebd.unsqueeze(1).expand([-1, self.nnei, -1, -1]) + # nfnl x nt_i x nt_j x (1 + tebd_dim * 2) + ss = paddle.concat([ss, nlist_tebd_i, nlist_tebd_j], axis=-1) + # nfnl x nt_i x nt_j x ng + gg = self.filter_layers.networks[0](ss) + elif self.tebd_input_mode in ["strip"]: + # nfnl x nt_i x nt_j x ng + gg_s = self.filter_layers.networks[0](ss) + assert self.filter_layers_strip is not None + assert type_embedding is not None + ng = self.filter_neuron[-1] + ntypes_with_padding = type_embedding.shape[0] + # nf x (nl x nnei) + nlist_index = nlist.reshape([nb, nloc * nnei]) + # nf x (nl x nnei) + nei_type = paddle.take_along_axis( + extended_atype, indices=nlist_index, axis=1 + ) + # nfnl x nnei + nei_type = nei_type.reshape([nfnl, nnei]) + # nfnl x nnei x nnei + nei_type_i = nei_type.unsqueeze(2).expand([-1, -1, nnei]) + nei_type_j = nei_type.unsqueeze(1).expand([-1, nnei, -1]) + idx_i = nei_type_i * ntypes_with_padding + idx_j = nei_type_j + # (nf x nl x nt_i x nt_j) x ng + idx = ( + (idx_i + idx_j) + .reshape([-1, 1]) + .expand([-1, ng]) + .astype(paddle.int64) + .to(paddle.int64) + ) + # ntypes * (ntypes) * nt + type_embedding_i = paddle.tile( + type_embedding.reshape([ntypes_with_padding, 1, nt]), + [1, ntypes_with_padding, 1], + ) + # (ntypes) * ntypes * nt + type_embedding_j = paddle.tile( + type_embedding.reshape([1, ntypes_with_padding, nt]), + [ntypes_with_padding, 1, 1], + ) + # (ntypes * ntypes) * (nt+nt) + two_side_type_embedding = paddle.concat( + [type_embedding_i, type_embedding_j], -1 + ).reshape([-1, nt * 2]) + tt_full = self.filter_layers_strip.networks[0](two_side_type_embedding) + # (nfnl x nt_i x nt_j) x ng + gg_t = paddle.take_along_axis(tt_full, indices=idx, axis=0) + # (nfnl x nt_i x nt_j) x ng + gg_t = gg_t.reshape([nfnl, nnei, nnei, ng]) + if self.smooth: + gg_t = ( + gg_t + * sw.reshape([nfnl, self.nnei, 1, 1]) + * sw.reshape([nfnl, 1, self.nnei, 1]) + ) + # nfnl x nt_i x nt_j x ng + gg = gg_s * gg_t + gg_s + else: + raise NotImplementedError + + # nfnl x ng + # res_ij = paddle.einsum("ijk,ijkm->im", env_ij, gg) + res_ij = ( + # ijk1 x ijkm -> ijkm -> im + env_ij.unsqueeze(-1) * gg + ).sum([1, 2]) + res_ij = res_ij * (1.0 / float(self.nnei) / float(self.nnei)) + # nf x nl x ng + result = res_ij.reshape([nframes, nloc, self.filter_neuron[-1]]) + return ( + result, + None, + None, + None, + sw, + ) + + def has_message_passing(self) -> bool: + """Returns whether the descriptor block has message passing.""" + return False + + def need_sorted_nlist_for_lower(self) -> bool: + """Returns whether the descriptor block needs sorted nlist when using `forward_lower`.""" + return False diff --git a/deepmd/pd/model/task/fitting.py b/deepmd/pd/model/task/fitting.py index d9db44aff5..6e96b7b081 100644 --- a/deepmd/pd/model/task/fitting.py +++ b/deepmd/pd/model/task/fitting.py @@ -211,8 +211,8 @@ def __init__( if self.dim_case_embd > 0: self.register_buffer( "case_embd", - paddle.zeros(self.dim_case_embd, dtype=self.prec, place=device), - # paddle.eye(self.dim_case_embd, dtype=self.prec, place=device)[0], + paddle.zeros(self.dim_case_embd, dtype=self.prec).to(device=device), + # paddle.eye(self.dim_case_embd, dtype=self.prec).to(device=device)[0], ) else: self.case_embd = None diff --git a/deepmd/pd/utils/multi_task.py b/deepmd/pd/utils/multi_task.py index 680dc53c79..321883c12e 100644 --- a/deepmd/pd/utils/multi_task.py +++ b/deepmd/pd/utils/multi_task.py @@ -96,7 +96,9 @@ def preprocess_shared_params(model_config): shared_links = {} type_map_keys = [] - def replace_one_item(params_dict, key_type, key_in_dict, suffix="", index=None): + def replace_one_item( + params_dict, key_type, key_in_dict, suffix="", index=None + ) -> None: shared_type = key_type shared_key = key_in_dict shared_level = 0 diff --git a/deepmd/pd/utils/spin.py b/deepmd/pd/utils/spin.py new file mode 100644 index 0000000000..934fb3762a --- /dev/null +++ b/deepmd/pd/utils/spin.py @@ -0,0 +1,30 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later + +import paddle + + +def concat_switch_virtual( + extended_tensor, + extended_tensor_virtual, + nloc: int, +): + """ + Concat real and virtual extended tensors, and switch all the local ones to the first nloc * 2 atoms. + - [:, :nloc]: original nloc real atoms. + - [:, nloc: nloc + nloc]: virtual atoms corresponding to nloc real atoms. + - [:, nloc + nloc: nloc + nall]: ghost real atoms. + - [:, nloc + nall: nall + nall]: virtual atoms corresponding to ghost real atoms. + """ + nframes, nall = extended_tensor.shape[:2] + out_shape = list(extended_tensor.shape) + out_shape[1] *= 2 + extended_tensor_updated = paddle.zeros( + out_shape, + dtype=extended_tensor.dtype, + device=extended_tensor.place, + ) + extended_tensor_updated[:, :nloc] = extended_tensor[:, :nloc] + extended_tensor_updated[:, nloc : nloc + nloc] = extended_tensor_virtual[:, :nloc] + extended_tensor_updated[:, nloc + nloc : nloc + nall] = extended_tensor[:, nloc:] + extended_tensor_updated[:, nloc + nall :] = extended_tensor_virtual[:, nloc:] + return extended_tensor_updated.reshape(out_shape) diff --git a/source/tests/consistent/descriptor/test_dpa2.py b/source/tests/consistent/descriptor/test_dpa2.py index 72c0967a78..ef840bf9d7 100644 --- a/source/tests/consistent/descriptor/test_dpa2.py +++ b/source/tests/consistent/descriptor/test_dpa2.py @@ -17,6 +17,7 @@ from ..common import ( INSTALLED_ARRAY_API_STRICT, INSTALLED_JAX, + INSTALLED_PD, INSTALLED_PT, CommonTest, parameterized, @@ -34,6 +35,12 @@ from deepmd.jax.descriptor.dpa2 import DescrptDPA2 as DescrptDPA2JAX else: DescrptDPA2JAX = None + +if INSTALLED_PD: + from deepmd.pd.model.descriptor.dpa2 import DescrptDPA2 as DescrptDPA2PD +else: + DescrptDPA2PD = None + if INSTALLED_ARRAY_API_STRICT: from ...array_api_strict.descriptor.dpa2 import DescrptDPA2 as DescrptDPA2Strict else: @@ -214,6 +221,39 @@ def skip_pt(self) -> bool: ) = self.param return CommonTest.skip_pt + @property + def skip_pd(self) -> bool: + ( + repinit_tebd_input_mode, + repinit_set_davg_zero, + repinit_type_one_side, + repinit_use_three_body, + repformer_update_g1_has_conv, + repformer_direct_dist, + repformer_update_g1_has_drrd, + repformer_update_g1_has_grrg, + repformer_update_g1_has_attn, + repformer_update_g2_has_g1g1, + repformer_update_g2_has_attn, + repformer_update_h2, + repformer_attn2_has_gate, + repformer_update_style, + repformer_update_residual_init, + repformer_set_davg_zero, + repformer_trainable_ln, + repformer_ln_eps, + repformer_use_sqrt_nnei, + repformer_g1_out_conv, + repformer_g1_out_mlp, + smooth, + exclude_types, + precision, + add_tebd_to_repinit_out, + use_econf_tebd, + use_tebd_bias, + ) = self.param + return not INSTALLED_PD or precision == "bfloat16" + @property def skip_dp(self) -> bool: ( @@ -286,6 +326,7 @@ def skip_tf(self) -> bool: tf_class = DescrptDPA2TF dp_class = DescrptDPA2DP pt_class = DescrptDPA2PT + pd_class = DescrptDPA2PD jax_class = DescrptDPA2JAX array_api_strict_class = DescrptDPA2Strict args = descrpt_dpa2_args().append(Argument("ntypes", int, optional=False)) @@ -383,6 +424,16 @@ def eval_pt(self, pt_obj: Any) -> Any: mixed_types=True, ) + def eval_pd(self, pd_obj: Any) -> Any: + return self.eval_pd_descriptor( + pd_obj, + self.natoms, + self.coords, + self.atype, + self.box, + mixed_types=True, + ) + def eval_jax(self, jax_obj: Any) -> Any: return self.eval_jax_descriptor( jax_obj, diff --git a/source/tests/consistent/descriptor/test_se_t_tebd.py b/source/tests/consistent/descriptor/test_se_t_tebd.py index bb4a5db6e7..9cdca9bde3 100644 --- a/source/tests/consistent/descriptor/test_se_t_tebd.py +++ b/source/tests/consistent/descriptor/test_se_t_tebd.py @@ -17,6 +17,7 @@ from ..common import ( INSTALLED_ARRAY_API_STRICT, INSTALLED_JAX, + INSTALLED_PD, INSTALLED_PT, CommonTest, parameterized, @@ -34,6 +35,10 @@ from deepmd.jax.descriptor.se_t_tebd import DescrptSeTTebd as DescrptSeTTebdJAX else: DescrptSeTTebdJAX = None +if INSTALLED_PD: + from deepmd.pd.model.descriptor.se_t_tebd import DescrptSeTTebd as DescrptSeTTebdPD +else: + DescrptSeTTebdPD = None if INSTALLED_ARRAY_API_STRICT: from ...array_api_strict.descriptor.se_t_tebd import ( DescrptSeTTebd as DescrptSeTTebdStrict, @@ -146,12 +151,14 @@ def skip_tf(self) -> bool: ) = self.param return True + skip_pd = not INSTALLED_PD skip_jax = not INSTALLED_JAX skip_array_api_strict = not INSTALLED_ARRAY_API_STRICT tf_class = DescrptSeTTebdTF dp_class = DescrptSeTTebdDP pt_class = DescrptSeTTebdPT + pd_class = DescrptSeTTebdPD jax_class = DescrptSeTTebdJAX array_api_strict_class = DescrptSeTTebdStrict args = descrpt_se_e3_tebd_args().append(Argument("ntypes", int, optional=False)) @@ -243,6 +250,16 @@ def eval_jax(self, jax_obj: Any) -> Any: mixed_types=True, ) + def eval_pd(self, pd_obj: Any) -> Any: + return self.eval_pd_descriptor( + pd_obj, + self.natoms, + self.coords, + self.atype, + self.box, + mixed_types=True, + ) + def eval_array_api_strict(self, array_api_strict_obj: Any) -> Any: return self.eval_array_api_strict_descriptor( array_api_strict_obj, diff --git a/source/tests/pd/model/models/dpa2.json b/source/tests/pd/model/models/dpa2.json new file mode 100644 index 0000000000..f83e319de3 --- /dev/null +++ b/source/tests/pd/model/models/dpa2.json @@ -0,0 +1,57 @@ +{ + "type_map": [ + "O", + "H" + ], + "descriptor": { + "type": "dpa2", + "repinit": { + "rcut": 6.0, + "rcut_smth": 2.0, + "nsel": 30, + "neuron": [ + 2, + 4, + 8 + ], + "axis_neuron": 4, + "activation_function": "tanh" + + }, + "repformer": { + "rcut": 4.0, + "rcut_smth": 0.5, + "nsel": 10, + "nlayers": 12, + "g1_dim": 8, + "g2_dim": 5, + "attn2_hidden": 3, + "attn2_nhead": 1, + "attn1_hidden": 5, + "attn1_nhead": 1, + "axis_neuron": 4, + "update_h2": false, + "update_g1_has_conv": true, + "update_g1_has_grrg": true, + "update_g1_has_drrd": true, + "update_g1_has_attn": true, + "update_g2_has_g1g1": true, + "update_g2_has_attn": true, + "attn2_has_gate": true, + "use_sqrt_nnei": false, + "g1_out_conv": false, + "g1_out_mlp": false + }, + "seed": 1, + "add_tebd_to_repinit_out": false + }, + "fitting_net": { + "neuron": [ + 240, + 240, + 240 + ], + "resnet_dt": true, + "seed": 1 + } +} diff --git a/source/tests/pd/model/models/dpa2.pd b/source/tests/pd/model/models/dpa2.pd new file mode 100644 index 0000000000000000000000000000000000000000..650f0c144e7c2dae9a265e61ed73d4eb29fa6dc7 GIT binary patch literal 119535 zcmeFZXH=Ehwk=8!5ky4=5m8YMLK~WIis&d!a`@C0GyY_wep8Kcxv22a@;rwPFV~)^ApUZlK4;cw5$uB=oy*XWI zj5UmO40KGn^fgosyj|!G%=C?{xb7PoYjEkC>6z-N8XK!xdArak8*AJ*G%ztWHoI@? z?d9!4@#}-$E|do9KOgb(mLPHP79&;jcJ!9<=9D0nAd}cHvBA+>o5F=c-PFoRUl91)5B~Op zzy09vbMSW@_&*&7e$CoEzh!Mr9X(SGV`V*6D-B~4E&~ly3q#`vCR{vR|M(3Lm%gg0 zv5w{MG(~g63 z5_T=e8X)j!LaWENG8}ogeZ)SIfF6e*#yd!5z=PiFCpxbrgUyA~)UzKW!PWYp`igN8 zJTl0+CC=gn(`C0>0?5kn6`ywF35g`sXAr;W%v291-6^0_}aR6#|Ky@c-RVl))XptM}cgDb2q`CoP1;QjO*=bC>Qc#^B! zE}agB0U7oU?^K#0hdcXu7-t>)iZa(hHgkC2`Q#U> z#xbA^JksB|cNrzUo<=@8Jo|IGzwE|*e~iEeiA_J<_!q`&)j4R=^-vYwy|jNcL#+c; zPDytO&qv_v2cJyGb<5EplOu4Utr#s*NmYv6BI@YsA;MBQqnU z8_mo^NpnKGQTGV-X*S_jw0!G*%s(R=EoM{XbJgpR_mqPEvQ0GXzuI8o;?RWWB26y^ z3s0lo#T>E^%$>k-jX^cra00S}uFsDLBC_9T4SslX0{5$?kc9B}!wbVYO0CKkpfkQ! zx;4BDE3V1dPLj323q`AFnbqN+e)X5#mhX>@xap@6&-*BNxYRWVp+y?~k?u`X$Ue7g z=jRM3cwpSv>L&6DZ05JwidcuDqWQU~n=4~5VB(%~f$AIDFJ3Xp)0_GEI=sJsij@Bj zR{vXr759Gt^BwQuQik}@1=Y~fXIw$%^kYq-AGh2T#Z+I@5~y`9rnG;m}a?On7T|Y(^>73oD0r(J(pY!nvn`;h#d~Avn)g&fLZV&mUh( zCUq)@vbdN>?pKpRPVKVFa!x)JIJ6XB%`e08{?)9Wfp9*rz`Mtn_Qg8 zayk@$Qxo7$jE5}ma^1M#OicA-@_gA=2Z8)E7bV;MQTUwdX}i+}cw#3jnfi1g1SSur z(dO2HvF&E9^)zBn-&UcL$Xx|Z6}uc=l>;EoQL&}YAQ$cXYfR^9cwzSKs%q}Xa;Uld zkwSMZ7n$YWgv!(v0hjD`<$_%~$SFFv<$G)s78YE1IqKvCr@H5O$^@ctQ22JCeYZY7 zB2{z`oJs{zCZ(#J{bg9eMVZ z1AIzf>~g9=v6k73*GCfZHFr!SSA8CiiCFoCaVFrM7v0h+7b?MO(^NpEybp$SHb0*b ztN^~Do=1I6De(BJP@WpGx!kiT|EBj;7M_r{A1SEI$ABcWVZMF&5TKy`vBj_kGum2* zS>G3d$)RIMRV*`bo5pRHGatgCHuA_o#+ooLZ~abs+O-;L{p%WZCklYIzNmbYdlDX$ zyW3@_9sxh>3&xeEDnYDi*;!gG7nWxBx{;A4!ILUSlUt1Wz{@UoU(U!Jx_ts3*^+0V zIi=Ri;VA;L?Hcb?&rZgtGt$Zq4dq~xKNFmBvKIOpEY^>FEy0UDRU@Rn4`KWE^+H|y zOd!A4EX({g55zORrhGPNMS9sD`#YS`D0SB`pmcjM@JcA2a3SP^&^MFNT!vh1;@w+K zl~@K6-L6H?Srg!sz!N*E6#~T5Q@JxiA$X34u#d}SV1V?bQlCl&YUyrz!#1CUWd6aW z^WPB<*wdFXoGXRkcIl4EebumA$mJZ5K@E^UdN(({FAhv7m7*B$SK%W2RmHRRdC(La z=yjbs3ZMJVX!Culh5J(%4l5ll0C75_eZhwsQO{9>vcW$H=_aSLOOMBb4gb4FM)w+c zbFfu`Mxh1oYFwxB_i!ST0nsZ5Fbgd^s2&|mdBkx1?h7O-{@chu|JIh!D_J>m2q8qcJbAxe! zuQ!ogkkY9N%_1N*W74!!b`GuzH~IXKFGa?}@_Faeg`jwi_f(@#5*FFsuo`|<3JZg? z{6>7mu;bA?+PzuHaE;UW(+3Y9tbMSty@RU=BFvODEnD;9(xgvu%EvTZCKwFH4z{8H z)t9p-FJsYIelV8WD*^Ilukd7i4TZdtZ@TNG-k==t-unDh4L(1%a%8!x1#N`Noli)I z;y|c1)5%j|@a~}cUda=PF`x3r>Axj`eUrhB`9>d1WuY)xO}557msf*(HxN*irGoYP zAc9u3Y-antWZXPY@g<8X2+m4AU<)B+<3iuH$P;R@$SKp6yO^JjsScjsC7cQHE%D-{ z((4*rThp0%dpZX)qSmVQq$MHgyYc{?E=)F8Q&ytw z+?@LbxpYKWQJi@+3lkmuIvizU zadGi$( zJ+I*Owmlo%C<@^At?8y1T5B9wGdgL!$reN%N#7lfEI`R@PXYB*C1f_!ka1P5gY-jM z`ywYoP;}vgcSv3YlzJ@9jSiGTP=m`zHU|M3KAajpv^NjmQc~aZVk*XcpQSeyisoT- z%iOTz8-KV{{mD4or~!^>=`hclHN(zzvF3A2$*8L^v$iti2V1`~6~3e>gftQrk_@vl z7%o|7v7$|bcdChvw+3S1%}|`|CgoHJkGDQd$SJ@NQI<@%PnSdC{n7LJyR+fdLS(iN zNhS{7IG&TM(uQuUIa|jBe1Jpt&?)l^`7koknIP{_f(0g;D`#jjaQ89JwQWZn@os|a z;p_wX_$7=SXzeQDb-;%-#n>V|wx^Ejp@%N|XCD#Njnsx?t1_om!z{5ec;8I|)}Wi3 z>|MI(Fz_lSTvB{TK*eN*G^IPC@F*@o{flufUYCrz+dY?wVMD?;R9Boq`RMB9UaCxN zJ!_XTNmhQX$Adg9?#BX>OFTIcuuNhJ)fFBFFpx$_*w-r7S?O2K5f zlJR;`955*9hMl<73Y#=^TFu?V@KL`e`@{x6SkDc-b9*Kjn-88=`aWOm;rR?Bv^* z6XSs-H`c4=0<&Qc8!xgK{MI=*d?AOBbTq9%2TKe5w|-ES|8R(Qr?5VIFP_ zs~21*tAe1(GZhnqjxg3JU`7{PgT}sBbYz%{aBS-0RSKfte%oE9v;T7pb|~!E6}q2+ zz834p*2rFf#hDOvpvXbWZ5zLtJ*~lUz7I*H3$MXr;}MS-xhQlFeEO1twGyW|WogPq z5D&ej^nLI-3TbwU^7Q)_VS~K$CsMu`AV0FZgIy{Qg(SJE-rOq#c5lY?Bqu*?=;B_K-o!t zkBukOFt>C@zTv7nQYq2&&y`D7i;t50bYQk4Bd^!D884J%(MiAewzJmuznWh#oxk!aqG4 zGE&%R<-r}-Vth z>W}_#a8l2`20w4n9dmV-1)=Q&*&X~@5XiUSvMS>a)M7yc23>J*wxu~OW;Gp89^TUL zpHq#BV?*a#nVm5vXg1e3D+^<~H$EYpslgw##&>I`i_wp3;Y;|lGT?Zjol>D)iLwE= zV-8&LfN+A$rV#fE|+w^5qP;caKrrxj6%U)Y=UF-oWG5`=k90XPMt@kgM$6`H0uGjLn*U-OTe)Nr6 zI69Lp?rHTd!VMh(y=Tn}&?JA=N%iJq2-tN&a>LD1oKy5UEFBVy1G!gqIbLSLa;1wc z$DFTDnxD$&cB2ltlXxFRL1Wfq+p0O`J7t`^+c+c5d^r2P1 z(BPE~am_^&tt?n@=o4JQw4EY(BWmTFlqnig1jW4hQQT8qa{o_nT!$qQ7C zK8tmUCnNh4#*%_P-?nf-S(pj z^p6egp$_wfL#5AeODrK|=F;QXqjY$&uTy~2F%Ls7Y!GHX7y%z#jC{gFv#>UcGpP}p z;G^&Ap|m@-kf^!bM(>Z8fqsK*^oF$zpE=c8(<- zjL-Lkpf<-0!scRFIA<_>OE?E-Iv*72KsM69zsVrQR0;_KhEk&8>8NwnBO%p06OMj8 zq<@P$3htMf-O3^qL9NV!kLHmAEWDj0AdymtjI=>gVecD1nI9YRa9Op-^Gg(Pw7W zaD_G!i;4A($;{mmJW-`Ct?sE>00`uf_|N122N=!$7cO)BIRm2vn=AdMKAPV-ze8#|IZ7cR&k=YEc?~ z-m`pXDj^(0Y2Hg8y`7D5-$JNu6Wt-qL55U`B_AI9(|(ZM*@A3K3iHDbSzs@v9^fQX zgjBm9@TEGW!HZUL`fut4+~~=nSf`Z-t@i376+*cvT@bSS#(F8r&JJ)Ylr;gArAZ*5Ag?t$8Y04A(gXVw#3f^L;cYYOfSozi&-qcKWGWtf2{2A zXFD1KvgYIY5oC(g;X zVVD!i_x}1E=%em-$CPI1QtjBmpGyFrc^h@Z#$3Fk#5ZebmI&(1(zbBgq_9rjd_y{H7_m_~qUs3D>U-`U z7-)frJ$}zlmbc->$J=5UIg((mm}G~Has-T=Z-{fC9>MDA_3NTD9UyL%N!m>4Mphd_ zsvl<&PMjUnuE}eK6;+jsNuwiRaqCI>ogW=PS*Jg-#vhI{exsk57uWd?GtQw8$3bc# zo(af&?dG4=v4|Pw_bJV@CV`?)%(m<8yPv4D{hry4j16^_wRpAu1out@Ili8xYs{BOgZ`8yNf}m0<1s&85(= zX5iGfPNU>;M<*|lfZLawkY2CmE#YGXj??T{7@+HdC`Gb056f=QIZS_JSw0Ch1@rSJ z-nE10fNX5f>tW#KUEP>dI1J(sjtXdxwL+4Z@IduIBl^A8DW`UP4U@7H-!{H0M{yFC z!-M-fU`2&*A?s`g-hD6@)9|zjXOI6dw~s4^fM{Khl|!Y_R!5x@o!bo@nw$Kx(z|dG z)9$Lbx53Vo5Z+SVQXJ-d`k>#w76s24zo6%?Lct9_fmzNycKdNI9Hat{q@6aNSSI26=AN!CB=+=w|o2G?Zrk` z=A}&JQ;TiiOKi$dBrI>49zodWx`I-|2rQ!eV**^_5yb>QrQ0EZwcZ}=t#pYlagq5R33i$F;l zObH}Vf=3K^tI&L}S1X0CkEaFAwCdofhF8Lm>xCH2QLjF#m4Ww#rjz$S?)+(sqrYbh zRZ~*~J}qT^RU;zFZ1nKKA5+Z~5*vR~%?pWiH0?eHeVAAz=nzX&f#m9vEDgrB(0g*n z!Gq3K=xKM&>0{>$Sf33Q=RMXB1i6z*U%$n{(c@KHC7io}>)n9=vV|GcafU}y{+NQ& zZYK5m{RG&vI(zWhXfHhLW2d)EaWFRxm^#Aw#^4iXABW-aYK>2_$N-*^lgp~#CeT1M?6r2llL%Tf6iSnj@gTKB9gY>RX5sxKo_UmRMW$_2B3qW6oA zCe^J`MqNl8{t*mNfq-XUDg64k);6J$fnwb{6%B96}I3jeDC+u!6oUS1)+(uIj z@4HjQD7_~^F{4#iw4n#=n!a?C?<#~1t zCe#9qE9LFf*3BSB(_89(Arq6g29LJN*W!Zyh2fi$U8rrBYbSiG0Cro-3s=*%rtS;KYwpgz7I_|TC;Sql_CvNa_VP};CGwe z^_TMm9M9KgcHY~8Q;+4zpgtBDmVzpZy!$ZZn?*k5>2%z!61-cKllXVy!iIWsWtgL2 z+jFfBu0N#-{QpP6=V$lM+dhVAE!{U5eq zyc}O+?`$3fn$25zJnsJKHNR-O{+-wS7dU@Vko=YcKONdrG&8V>`fqK|O6g$M4}EZ|%P&_ZjVGMmQ(UTp3ya+edoR_(kJpE;IDYW~lXn^POlA4Vf3zZo zX;&G}eJ>#N&IaS~{Hv~dv0ltFRpHf)D#dGl^bJ>}n{h&3Yu6dq8i*b6QxaWl!;!;* zsu!sTAzhoWZU2Ex*v4<`C}r$}v-z`O!5?#QJ$AFy^EP5N{^hprncF7lDLfV>we z8858dzTFB%qOSQ_t7ULoZ}r-i{3=x8|9G%lydD*wk+ewI5HKKv>w{op3touHYuY@W z1$-YjiH#_?K!RjyYEoVGPaiw{@2v6QPom)_i7h{sis#8mNP2jRf3Ud`@Q`XNl{;w_ zDxHni`aUxX+YP-2-R|}QM>1>NQ(}`#wRLm@zy4eN!oOx5UsVYwzT|yAbhR0*9*5uF z7|{Yu*+TYo`)iSs)HAW<;23HaNnD(fXoJxPQeW1>a8S%+X5k<AdBCG%BDt)VM11~$z~V;Y(C$iw zMs`XoUJ^R3IwFyW^%~9!jw9s|_p;V0biN-NTsHNNTjheU)2!VAn^EBOVMw^Sw-5-k zibwaBy#?oU=36dbY=siKt>SE7ZGh*RZV7W&GD?5+BRM@%1>N!tV@BH|aVc1JIH$oD z#J8whI>fbsjoMZU`^XxkF;T^23xug(SFi+~Dz^nhwaMp)_`( ziUZov;MBtf5BsvoB?%xsWdbA5f4xd|iE8GKjj;w!~V5op6G}XISN6R3ZSw_;vsSW79 zM>_Nz@qpol=dT>(N`c3B2d#!yEDp8lOc{4JBZYFn?%O=c_%iB|)JM+-Wa3M?ee+@+ zDy*E}J7(C1iqDVp?V>J)9~pWOyC)A6Noa@-T@ldgq)EFh=0neVS1cV>5nLL41Z3Z< z@EX&;GoJ`;$ozd$RPT5x-iRzVuYMhhgaPxjWto|H$@Off($O~5RyuqpAifpsjt*M7 zzpMlT$wO~=*NzcRk|gd&!yu-kgkR0OAMWWEBsOeqgX2+oB{TQBL2>gGP4iL)=<>To zO5YEK;m?#D3>3;VqGgEW_?H<9sn}B3xOy*wkF^C9#4PZoC;<}M zUd0~~u0X}tr^3!W@PqTR^o=nWN3lfyp&Lfz0?VLLQWa|hdbcYEU6?3D7P1JSa47tF z9FP6!as00=^M?}nO}~xbNr)P*{%1`0Bi$>2z%6EczcvNR&E&Z@5wgbc)3hyT7fec)XMS2kn}*0S3Ec_KlQ}4B4>c%G5bMxkG>9}BJ|LNYAP)O|Gb!^Dj0SKcV1r$5mh-0-aywF0x z;5UoYvouXeW5rhAI@Jr02HPlhT9v?lI{&j&42YR)Z1Fyu%3vjAA!f^ld^mHnB_exA zHfqwl#d z?!xn&I(FmWrfp~$vta?N!|z?Jav6d3k9A>RcYZ>~9}_R-dU~LXVIy z`icvgw>4=xhkm}w?mJS-!xy zMj0mdJ^_|*WY-J*sKj=syV6D*yfCZ`7)IqvV3f^hpL}^E-ikUolVg&Elb;wF?gV+` zxx>fEGyB4D^NEuFIhh=cW}>*4Pv?NAbtvi{-*f@D^27QYb`zQ4(Zb6^6M3*}g=;|K zy*KpS5prc(jKeJ4Cx!W$m?`+0igw3q*qL`J`bN_TO@nJ0?T)M)`JOPRDQ?JO)?O9z3$fyYFVL{!ozKTgPy3p=6uk%A_dA$@x1I+ z2?YvD>e=Vcp6DjE|FH~TB?{-u4Jb~B<5H$z+K>n&->40TYvcf6)l{5g@BN?EPiRtDcQ zk9zAE!-I8p0s%TT((Cezyq{epQQ zaKodu<8BegZYt1?(l3T@Idx}AYJ;IFE%^!R9ohvj1 z(m*$P_tZAWHW(z_4Dr_~gUNR_32#1D!&0i+0l6I+C}quggvzlRZY2-j_$HbRdRoU{ zMIWt!Ox=;|SMAG?fV8H6=$I};!L78woR64s1w8`Gbwei%W z=P?6G17$?I%D{_r$%XJqyCqxTRweLkAZ%b$ECurvkxBb|4*02$k$tVG3dbFJczcfO8>-2s&uhlq-`$mVX%u}EJ?YKcwU1GUiH;U)08 zKM&T1RL-oQ34?}EQ-NphQ7HOK^}fLeqH=_Di|hhz3}%~llsTp4LZ{jdd{AA6U%Qy; z1DK;hI%(Up&mBcr&cq_iGFORu=kx-T8VK0FI39eZ*Bn1LKE3xag@8?INk_I0=E2#B zTfBlFV?pH+$xZj8h47_MOU#kJ1~#WF2u#HZg8esHu>Doj3_Qb zHir;9j%)eoQJ^Z3O08sAx+hO1Ff=m6uD>U^|#KKVh!IR?>Zg7-3c7|c6AW~vFu zK)QamD^V6gsF(1?qI)bJG=8|*j&CVM!=%M8MR)RG?s&RhGm#rVdQ*^P>1{41PgTYz z6P0)My&ju`18dQBD15uyUM6R5aj6|gIysIKqjfu8zPJjPKfaeZ$NrPLQL1iI~ zo$t(382<27bEdTfqz+oEDHpqe^iGeLMKYPdvG_UM%Q_dGvW2%%D-z@3ZNDE1vDxt9 zC~MUbw?;fw_BrfoaTp$r8@Ieao&sjCH!Ppr8~~~ZA_Xq7CLsq|TrHZGo0WV|(NEa@5$TW;Q|Pj(g--Vd_RQl9}zHd{dAC6eG-5U2Vj;zh?OH zeRvywskp8FVlo%h++N7Lcjdwv%cWf(RmxzkPSZ`y_aP3R?q;6bo(cBe`U<B*bJo5c~2`6m5nQH=!$4z(}s7h~|QIpO3+ zB}5W&61&c}EVKw3mKJbn!Wr-GF9)BOV=w)9qZn%-HnZHm$DEXo%hTkpw_ZiVbou68 zvf&<}T3x>Rx@I|c+>7$xv!xzr##~Z8H{^g<Tc7DDnm+qOWaFuYg%V|}Hv1~f<)50Z&IgG!HHa!bn)@H=8{&zckl zEc-a`+WMtomvhNnnO7$2^YgZQ+UEk-_L0L<-yD%bu(M@VG#>iU*AGXP2?=zW! z0kL?Xio4Q&Bmi-LnXNjnVJ2jUO=yR7Rb$h|DFZ)+T(GIO2I{32yk*hqay&i;H1=R> zM2-$RIo@@A&hrKjtjv>E=?{QykF1CXoi39?JaOIrXSZU>@IVmH`2G4kpaiHUMQ&q@3Jh30zXxV>Bs9!0Zcq zXr+CMvEjz-?)t!X*km@5QbAS*cDLW3VfM?xc&>o^;thpZyuS8oVMh|!>?S!%_vH=R z+?x3uQcw#N`=_Y2`3sOxN~JYCnFA%CXLC#p!|>2&x47jOWstLze<>QGA#GkQ;FD=J z&Wp8bC(4#09@40H^Dz%v zVx6&tT~a!x9n(9LgkDy9qOiGl3~Q(#pzXIET~FO1GsEw0ZBYW+oR0E+M4t^ul4WN5 zi)-+uGW)7YQV}Z5&+&g#s{rHL^9Pp}gOFMHxZIVIQZ!V~j3{Bt$3seT4=xkg=*sik z=G@LycMD7F3vOBpbx3dvQ zzQ&A@w`ZcdIt`cqt#tetzb5KhUjYhK;_~%H)(B5(uC#ZB;NF|5=P#`~qgZCq-g}A> z@F{7;s4b%m^*rC#>q-#s_~!59mOqgVV#-<8ynR>E_oN8z&Dadsz-cXIyT1VLYz+?x zpv{191Q9+4!vfsIu2;lvNI<&nqX&o0N}=z3Zz@Y3u`d|*7%#t;3!#^6l)oDVL)rzB zGdqlfF;a9t3;+H!pc37vP&il#u?M90f6pw%rH?}?pRS}qC<`k^JD)oy(N?jO+Q%c6 zELqp3-IYim^!Z?^GVvC-M31PMSQ#wX*RBj%7DASflH8X^fxykxCY5<24zA`+&S~;&BU5q^`4hG)Sz2+w%i{ z=v9I3*k$S$C$jOzXs3v=NF^+%Y;5vAYYSI;66hS{5if%GlzT}&(DSZ!GVM&qP3?C( zPpyQ5TgWN($F6z!k#nplS2PFN-!s~E+vdZw>vG3;ooquUlRhPr?ZFsyaK~Y;#4;?u zwCTGEdmNmcf2XT!Q;vrfF3E2>5CU%=DTjz&bq3*7&T)szVyH4zrumRn0kX4?g`^(T zLdkL9QMD=un~a;5Tf`$^^TC%q)+*V+Wz2a>P_!5YGY))7c8&#TlsJR$h#0AWYD%@EGy{ zjV1lBObQ6zlyi#yhUp-B@kJa{QW?@6RrXY8uE2{2Md;pZ6+_OQ)?@82Qqes3Izd!0 z2lJm#-Qy!khsg(T2t4{3(6()0`_PX#RI7RSDIh!n&uNrg(|DJGYHVGXrymv}PlN62 zMWS+xZ)%Eht*iz0uIu^DIa$NbkNY$%`^@nYmDRp*I--J^-0Q;m4}SQ8hU#8%Rz0M? ze6KUt|;h#Ljzac_6#phzv)Ml}|(gqe!yoKFVcz0Iw9rX2%GL}$%1XgAl zyL0#3fLyR)Zr8&arLP8V7lZ}bzTt-HUGw$&071dBdWl=B97HEf7R#V?Jr!01d}nDc(M)Lgo1m z-6{QU6snIi5-qBMtu6MFgd5F2!T1wv{98py!=~ujNVaO!cXQm4#zj_IJMT)(akmBX z1t)@!NKfH3i-oBi@#4j=X8#>U2`|@wswknXTgcEJs)Vr^jLR{az){vGSu{r)p?1@7 ziqnTQ4A(jp+Ah`%8&3NZUcW8_HO7d7Hk&-;<|T~S6QRoX3LYd_A&{|OnmL=5iJ!~} z=bf~A;HbG0&GU+I)Xq$w|KwJRa?Ii%V~JNcs&+m)v6;v=UkNs*d^J~(eqIrg=EO+R z?nrjLz^wvpoAP?EE7Tx)B;9D0Y(5yD9B}aLZNks5vvSPQ{T9Jo53^qoJm?r z16KO6(G*Gyz>m^nBrk5KA(N&@PJm%P-kDTWSHE0~RiaZnkIhu!u@OF+mcV}antk$l z>Z4L*G1jWre_W4AvdiC1S!2;Y(Bm}E^-Or5VmCLPI{|lUhnACx%9JfnF9#K!9s@hx zY~k|Hh|~wGU(03nf!2HRz3v-Kf13Pv6ea((#9vpG7->&R3;DEw^Q+C5IY%jNo8@o?hq!&}QS?eJ~?ZU);% zKaiN66ML~*2_)(6hOYxE!18NJ>S5+$ES98du{InA3;Ao2^qq}pudcN6N@)ZidnGz; z$}7ZDMwakvgC1zguV#8FH5mjp?|73a)Qwd|s^b~9Be42P^csCaCqA4DekKr@jiF~5 z$=83Bf@htva<_gMrfFAhq|(g*UwY@vZ#m6BPsQ&jN_e^cR}>{FU)0Dogfn2>v~y49 zd>tC8Kj65v*b772H>}H~^N{kAI=Bf}!YSKC_E+}^5W2p=!abUT8~P!!`O{ly?i+hD z6c&b|b#XIl+6X?)j>20cW}uQa<9gkW3D|;p0xx#Vz_#uk(6~Uk4rbhZN|F}u~QwW@Le;*?Q1#6 z3~=A{w10yMWA~FZV$%UnRMm`n455#uhgrq?Ahb7l*w52y}A{JMM4^71s%b- z$I>gqJ`H>=p6ed3>48Z*ZkdiK;zpCIoLUUpahPd;_ST6aY|WH8+mue^?v!Kr1)0l% zz^XBIVJ;W5w%yLp4sV3SoJl>U%oK1N_1yT3w;$h&QFmT;>jAZp{T40vhw--bH7`f0 zA!t$LuI1*iLz`u-k>CM$G&i5#+oH?@B5q@E8Od6aG*aW)9CU+Ii=udRL>!P)y{?|y z_z%&0JW#YVZUIZ9 z^RAA>t4+mA>4j{>Dc{@O@&Uc5ree1q$7_*N~JX8tUSX!+%`Kg9s1*ZkS zw@a@q{B+mfQI!0)@c)FOs95`T#VhQ~zxv*XZPWtwkIcp=Zr z|9x*H+AlE0J9V~UnrPd@7$ks(U@&W3eJQ+IxJTBo(uq4>o|rY6uR+7r;i;te{lt5N zF@6_$2{0c2RY2IO9%jy-v6k%agGXl;-}#KTp;Ymb!LgidT$-xxaVA zmZYBhqc59qlTK<9EmJVQWL!CBP3&4eJ9o*9wRRrtb>GEAI&_P3OGB#V{AEQ+k=ZTl(8)Gf`99*e z`*=OrDcT?5p~-;zF^60|i$k!Xf%J)Peib}loVxmA*a8dRaYSt^&%hXI+jv&idgNDK zxzi+E48xDOV)CQn;2v4p%KTIfq_2OTBq5S2V&rBP#hDFoqQTFI;qqH}B^Oej+ZY9} z1Nmvsi5K9z%j#7s$;3&&!Ie}q=}zp~ChV#<9gB~QWhc)!7UL4Hv$!&KGM3ny>)0ws zfxxb## zZYiXlZ#`I(KyUg(7$~@%zV87y~J^>c=FIj|{-HF%D4R)~)=Yp3J zyWA~e-Qsb6yWp4E9y~;PxizYz1D#si`ZqUL<7Qf2bJsE-OlUzX8^vJUVyd^Zo+A$2 ztFB)kJWzwy%qkrVmxuB4`|Io+dn)l>yr2}LWCmQyDJyw%vm9B|MWPLe-%(~Yqc~>S znuEus!;7_Fwu47TORi&5JFsZ^Ssi8V1{0og#kNiPaEvFO?{f$mIDp7mqbRh9K(xeGIf7F1QWtQ|h*$rrWT3SVucx{r+OS1O~v375gl+@b$ zv>loDhwJMShX%iPJ-?$U`IqDP?^E!9QKA)5(qPdphihAdB$x|TU?)R5Q7jr=)?d#g}ONF3(-PB)ZC>vK(^E&Aaf|1ven|AtH8+HqQ zXQ>zK!4T3e?p31v!041G^)bI%IAtIn)PAEHqrzQ;NUPeRX7ed84&zeftT{widMFyH zu6&e_mX0BQ;le22-1ku^$Mu>-Vx|6+JGHrGg+ZL86fCcM_sW^XOu`_rR- zM^W;79`y$q{x?O*>oe)3x7j{J$mqt%o}SN;x71C^M64#ykI8D!<@MvyHLJazx0j&n zOpvwM(*=x={2QMwS_WgX7-%*tC{fCN@;HELK&rCGBB%nV+k2>-iIZo=Y{Qk$y1wwRKFyAGZ#D}3nE5oPSB=FY zJ{gm2b=b|Q=BM{G1A~JPY&4EegFEvA-w(YChqLP=D}!T1W|K?aXzn@zYRpO1R@M48-peE@&J4)(CfuL63mR1v8CY_|yJU zXzhbK3{Ag(G?tKqW4^u%6ZSD!T5v|`d^tkKC9OZKJ%MnCKKU&hSsXqVDq2&2UW)r3 z2g%=W55c2~Dteb}Nsy4k_=%q)9~8pfU$I^^Md^{@((reL6Cx%3Yd%*!gz8u9h`l0? zmz5<`3I)@_ez$&k*e-&2k(2rC+!cg9q%%JRWX&AT@*zZ@L7EQ-|cHlo;L!(%NbahTpFe4RJj1`0+mP3@_&gFL3pHeHia=-yv9 zq3)IeTYBowBt^dhyLavazbtw{h-cE;S1%0DEFOA#<5oQcYc-U-c-e${CKl;q&$IDk zKc)LV?-aGL0 z$H{wJc3`Qj3X7!7Cy^*vUKHn-orS+{P_{g~or8{1DXh9)QQ&eacg5DC08=g6*-xyuA-gY! zasX{Ms`TY|FyGgQb>S!3k|7NckrQ-Ikh21 zP6cEu8*Ma7L?W|6LZH>-B;<8u8Xs2;Ckl<<^d^#ufz`R>g5+cn_%?YSD5G}64EsC2 zM?w*7$V}aNTZm3=W_dE+%2y={P46PFWs5b76ZToFH+h74EMFNu#YyeJ`p}Ln>NPRAN2v61ApT#R*REy?~+3|7b2WK-6Y;M#Vv zT3=O9aG93UTMi(w;DxmSf0=5K+H!fL;X^)3r759BSPh0T#@?m;7L42V1}HsumxB>m zT5FVY0j%j{H5qs(AfNCm**nu*ysg~cbCSOjXrpCwTWynINBLd%nUxB{ePKs?eyIxL zKNPs-v}Qom_O%aM{k9OoJJfHYlz>0a1k$hZx8g#u!0QxuZ#;Q|x#8aXSh%J}P*PNF zaQSl7a@y`jShB02uzys55&F#e0%Pg0{POXUn?{w`r+i2_?F>OmG59;QWIe#__M&DA zfn=n~YHdpldj|}$viv_Aa?owQ{8UzZ5x!hL;xWZn3HlucfgCCo*xh$={h3`BxNl#G zevb)Yf68CO;cN}s2rWF}?T$n03mLbzpQ(nxJi9!*hBDOsKFG>SL^S<&(wKCP6+&z8 z$-w=FO^_Qu=y%T`v)z!Mg9_276v0}VDSqF`!YID0eNYL?oL@?Xb6&kjMGH_i; zgd5i5r@X`yup=Wri?Nt+GVJmntCb_6m_)r#NPjtW@072&nnDy^Zy;O#*%CO%@Ve^e zi2_`jvzL6?OZ7};;UQWMIAoho7M(FxPz+H|_Yigm_=-j${ zid(T3ZnafRMfVX)x4q4&d8Rd}vb7@kkzhNL7D8Qy?+~5P@%*jli^{Ro#$<*}zKqC* z8>y&vN5S049ovl{P3T~ltkfj<$Ly|_r2|B|q(y6%`kp{5n(c{brP}X`-h9%Zp zaLD(g=QlsBHzM7MWHZDA+Kk6HG*aLty`fXLMm`uMzTwN^%|b01`VNuH8Mq<>&mGiF z@#y?c>76dAc&qEOxpH4TuysTj>^YSI*J&M6uP_sF2zLXyitTNvnAB&t&Xo#|Mu$ZC zu^P{~YA(}9kO-3E%|}ZpLayN#FO}Q#Ab@r{f5kl;(rE@0_6TL-5n7W-lUs2p?Be9D zaJdF|3xp_C+oS@moVA4}Lo~!DtIOw12fN_Wv@jShV4>6p-@1OwP{=mUl!`%jq>m1Pg>3BL}NMJNgfa0 zj~`d>r3%L2nG&uI!2+C1b+?$F55)5tw~7^<5}?0WJtbzj36oL+E@)4ZP`beB#+{J{ zxOV!XF8!S>%%412vq?CqF2uc-4(^M@9yjsQTSQgWafLH}`}-X1uD1VWu%i&3))z2s z^UJ{L6C_RR(rhd}bRv6KMgg+)4c%V1_lNjq8jo4)F1j4EH;ac8>0+`?( zGD=B`gH9=9E6B}TEKb1VK07mB6Hl1& zjdRmXff>-e6gDzC+<*?#p(W!x$`PsFxs{F)TN}CkG16C}fM#D`3!iKSq-p%>lx?X8 z&b_yPnqBe$hrZU!p|=ZxF2jn`Ix_+L8C{2JrIRr3!(9yz5f5}$9Z~ln&g;1-O0~BO zNl0&4bT{&BG{{*#2pbM>gJ~hN7Iyh+yc)yA%wIxp~D@a!oNhlG4~=Flm32B!m6`{BAgdq4l!-a*wLXzm!9e%1Jy}XC4VrKKlwu+e zz-i6<2^}x0AnM@7KK>i|sP!(UTvW6IMV9XsD{c}5QWay)gNNBrCnb>~K{z6=eIR=l ze<~I9tuB6*$@E1Ehiblabh(&QdQzk306}^&770;~MBXeFStn%GihMJ?UjIQwocoQ$3DzVEQuP=$*tR7Cs z8$DSC_KTrlK0#u@!baQ^&_-hG$OeX_67G8?85m?9U+Q5Qi&KgVueXJN0JaC#mC`$I zzzK%6HDnp^FR%*AG-MJ zhh;9vgjbf=WCmgVr>%@1S6boG_6U31hE}-PmeC$pntt!T^6Bu(rVj%vu&ApVeiB3G{SZ08*X}?%%}uDm8bZ&93Tii zpPzi!c#`pIRfMY3VhLJw?g)Q$BLmYkBis8fK8CaP=P%HUCId}a?Vcl*gzG75!{7EK z5zFFhBDXk^0GUcbJyxBWK$h`?&gwZ);^{SbuPU01ri$88{9B69C@4wS;bt`E%2gib zuhD~&(?K?F9tr4Z_-z)fsxfc0B%q%t${iVez;S|!gmK3f?=;vGL`~Pw0d;>(d~tR* z=2>_)sBCX?duTzBsp$rb{6B-hZco{b-CtwCPP13La`_R~a^ByaHEV_8)Rb*M2=#-e zJmb)<$E6_lW!uqrjggQedV)upI3E7l-p@8h(U8)}K3><`4*!fO`S&`+zbJ%^(rABE z%h%qw7LdBC=qVG`>+_rx$i9RrJ^)8;B>vv_^x2le9PcHF3KXEBetAe14uuBnj-a4qnl*ufSpcn~MK)J;rYFO8IIr z4E&iiW%5z6NX61zxVLE@iv*8_E5>=Du>+qE$9-$8D|u-A0MR+AlLbT{^4g5KIeiv=BU}ia=W&`Z1v@yA+Qu!~h z#=j&=OcK<#suU3E&!BqpbN9y~e(xKjSu!?z)>nv~oy}INa$7F`Gos}5f14;d zAjcP<>-7;woo7P582v$&%-1~Gt`B#(brzLMR)b)}elw15;W*$Ub(YS(9c<-_Hw+(? zV9XDNG}Xae^3Y6y5 zhs}s6#Wlq%OSh>y;j@65diRM2OqsK(do0k0rHZ+a)85SB;oK9Vo#)G;(x$EH$CGT( zmwsP-nGSLNO4s&bLh^WXubX7?<9=)^3)%JX(G-mNX_IdyvX4{;cHI8Z9Scs!q(ms} z+n`PGvtZahqCj)h@M7P^N_ZZezU8vs1O#;Y4PG!C0H?7mFC}*`yvxs|7m0>{UGl#n zO8)&y{MU(+?mw%OC8P?#gqO7sBI`jaTT4bUrVHO+k=wSURR%AFi?-Zby#wJYYu?h= z3V>tm=!4AWDyV)>-BLjazf8Sf*@&f$LF_T!sKXZCxNqOb?@Bez$g;z|Tw9|PbVD0O z3cVYFopVd&{?lb>-9Expmh>6J2T2F}_-mp6q`^k*xm29)Q2b>1HV13_o__LWE635% z3reQtZBYF2*QbTrAha7C^D0>G$F`u(_PnunY~a$FiP+l;pSf!#_FwJ6!RHZd&wp0p zmF;x=_a3w(b^H4d>u%8))Tp`Ntf2}?->cI-)yA-&v*X+g;{Ve+W%8c9)eBFpSVK?M z_28i^K9*9UE%55B+{(FYW$@rQ?PqF)rS_tCQMVhH@cJR1&n+Fe6q z(Phxn-#g|!2r_DFuA$1V_++i^W%$P&@Jkd-T~qu7{q{dY7OR3_tA?yhVn`<>lRwz` z?Qto3kJVlamaN3#h|h=CzLrBP!{wU3M^#vJ`f&X2(kAR7bMDn=Zv>YQN?Sh@n&ubx zs(I_Zh<=h-@_PtL#o$X7+gaDT@C5twV4;W>&^@lNa>oB3TJOIhO8#;7{`*8pv|0U6 zE#)TQ%IPWCvM~UC&o(zxXBzP9V(mFQ(^}Ad9_Jb+Sp_-NU7k^DZQ#%)1Lxg4kX12U zCSAA+pDjC0aPW^~L!yC4JgE$gXc)YjsOqrv%D3T12}B?KkM{V(o83SQ?_D;J6R<^( zei>b92kdzgsP)k#3WZ3`5>Kl}kgy(e9PF%w<6#u?KkE81Y3-;bFI6hIq{wPcn~y;U zXPX(-d?l=2F+QHa+ze3~(_E5TycGkZWC3V*q2WaaF z33$yPa+my8VCI?C&(Ik_jp!>TqK35~0V_Mqwol?4!T8oVzFz3!%@!Q2uR`t^Udi-0 zJB(d^I1*dmhZ`=J0*ePGVJX6Gtk|m)zZ_vYoX+cnZ+#;ezfcfTK6{g+S!YJEQ;ET^ zXW}ys8h;Z2r5@0d{ru$0zV_h?0N%;r|~*$$$8*e{od*uS5y4 zp8FpVC930P9&n}?A9h)H&Po#qVd9ph%Zye)bL(0C=EE^i`@zH<{;nOAxb1!qNk+jT zM?Je_qS{2Ouo}b|F#%Waei+d?TnUs@_MgKhOq6mv(?BL{O?RfDC4fiM3?S9dULgX<{ zb&MHErWQ8&vZWRU>^A7m@>U>I`$0KghFUB;l6lQzTPLcqd~q=1pTM@U6CAnsDzPH% z;z%reKS01|mB-8(Fk8~{ecP{kkfB?ZmzgMl?}x(Ly@M*jthj>l5MjWNNZC3mUs;d; zdztk&M9F`?8vkXYB>3oO>J04$-1E3SHThXL@`p_(URN6@`p-!xQlAo=Po^MUJ>dp8 z_NbqU|I0Y)im)8GeZ3P+&A)OMu!q2>%c+NlTM0c+T24z5^=tIrf9^$AcO-r&U{kwR zRD#Q`^T?{g1&V`)}>pQ(PNoe^>GhvdZ3hJx5MlLgD;N6+r z$uHj7Xm*)>{mL_K6fy`_=HJnb3bzKLxDs1|^SS*Ez0r91eWFu=!6^dM1sC4nJ~Ojw zAagPpJ-FVCFGgcIcWhL{uS-LB$r_7s+K{1)bfpcXhZ84)B-_BX!?1b_Mu5Z^wN7Zd zBg_v<%(FaAM;booxG!}dfbSQ@RjHpXc+@-Ks@9)Oh&__M>&Q(*aGY$Y%6*!Nb@a~{ za%D2d);z8UBf=2#ej!!2m;p;Sg4xUc}+Xu^h`Z+`qGpOeR z&!s>tWTxD=(3b+^*T0PZ*;5U9t$nV6)^G5uRDh|UWH*YLwBIoe>w~w~#;YPkiEU4S z!b4e7E&R$KEjX%K4Q|yWo1_98@Vud)YQM7y-1lBrW1Q&)xhEXPT(cdReX%7yQ>_Q< zy6TVqU?hweDaQ^8J@v+0G|e48X@j88tfOvA43pgJl=mW~umOhIcQjJ6MgyHkVel|1 z9c~*%c3Y}xA6i72TMIGx4% zJQV~+=ynu1_n^HP$H12(t?0Y!xJkEYJO&i*uk}-D1m;~~JcHzeaC@s(^DeV7l(3NA z;1?Xk7&{*Q{3Dh4^vuOpI_@6q%rEg1+3JVaH)&U25>lq17)=BEo+7lg+A?(PP8~!Q zUrV2x9>OmQ>*$0e=m?BNtYtSVGT<;Q!aq34FIl4!qam>D@@j}N8POERF@fFR&g>*j?!L&Z; z%QaNG@u?mC!p~EhZE1jwUW&P0#w46B=xIBhQ3*fUgHJs$&qIdKIhN-X$6$m^rT1K7 z7wj;-*4UqH2gmCvrFjPbA))#kqU7)HsDJW>|2Lv!KWo}OJN=(<{YRYI_EQ76l69Tc zSHBafUZgzOVK;_*Qyy|1IxvlU#X=-gxBS4EylaQ1xW_P%!sEn~$N7JKYyPH?a^}BD zlq}^7`3ww^aFDD!*LAuS9$guKJEdNOKb32eADa|Insdc7>Vxhmq3`>LmAMer9+EX$ z&?mw!c8Y`-HL$L|J($Jpj;>99jQy8?Q zg8R3{z>SZqCR^?oLCvl+>*a4MLH)2o0pGn;NSL&z)={#DI6jL;uGvh~`sSroOAsks z_EW>k;*t2w@mjp>n;NVL(NWpCHycjAuvOAcu7;QC3-j8Hxi}=;G%QP516*~Yasr)# zxH53;=tA9F5ct{f87Rxd1seGZ{NSO=@o3`lE_QWCMf?n5m1p9-8#*gsyr zB?S+Bt9!K*^U(5+qxHm65)^y$zEwY+1BLFP)Eo7MC|_(x=G#;auI``nt=g(UMreKaP_co{S%YfDfCSV} zo~ES^&4=~8HaCCH5*SL3T3x+g2;LRhC${w=+~X{GXh4?>lYw*Jh9--!UrJ{)M1`9s^G%-b(Dt1yoH$g8ogY;ca))%%R7 z=x5}QjD9c3!)|ZBw%fD0IQz*x^O3qKbX?F{*B~4+BJpNdD=ao0o@=MwPl@!uyDCsI(=K-i&iJ{LV{KLw1qcA_nfd!COV(V3pV%4@$;jL(e5 zZk@40%BGif;b+_A^f#Py2O?@@>AQzthak{P;6{4l>bng(N(z6w$r$pwk ziff0m*qsbe3QP$+&k%}(!-JPBqf4Mgvh7a%U2ft3tXIrtO*?C0Z?o;Kw*=AC8d@^jG!cd>a@6(Ew-eus~6RlFg!iRD_CV{)U`49%X`cvFA9o9P%abzmU$ zBkZilF4c2*^=T>0@sDv&_~!xTaN|kist8EEcsNdTtpG`e-RaL=3SdO^OF=|SAx;IJ zJ{L@p0Y*1cZnBkCLTr}AjcWqLV(`MTp;^T|I8yz*rPy0R2HkQ7t`aUJK{U)9Is ziNTekg}aq7nQ-ads!k5Jkl%X57f8a1J$KbzDqY}M)fI;bQUTF9+UHWoTn+aqa;$G} z*2A9Kw7CU>w2AxpCu#I*DR^bv9jkd{g-LrFpT&0P;P$k3J#pgGl7IW@K>W85e48`J zLwmCVldk9;$>~pno)htOOtJ6KxT(PSK1~DWEk>|)1|tfVs&rYq8RK<#@952yRA5gY zoIA)<0iPCPMtf;O;jYJCEsD5Q5Oe7|)%++Q=mN#Lt8Ws9Q&h{;+o;93O{2pNFA-&P zb&fc2r2}U>ljZ&1Ht1VulfLXO2r!;gG1&nO)_1;3u0G%pO z(`(LCBC_eUSsj;M*^^MixQ~ZW3&Gh#L$c=T)u6*R_R;-v5z@c^#cgd8jTc|}-JSIe zgNrfaI(kfXSh2o2w^btn((eBl8NcjP%+G~5he1JU% zO7oT8xxhx1P>R~t25d8(8hk`NPX~sMo$2bUgdxj4%yl{qX!LWr@5e*p`n)`5xkr<5 zFqDXWJ;6|j0}OF?tj`J{Y`XEv(b-Hek}F`Gmn?u=+7p7j991yS5-lZ}6N9I|n|(G* zBM2j9Wuy~M!Ur$JoPPZA1f>F%ou}8!p`4{HE=x5JQ(E6hRn-ua$yE|+UzO#-%l`d` z>Fo(v_MK&ZK0y#vgbUyEr7FfvFY3E*UuT0+o%9?xbsShnU6)obYQQK;*N~USzTS*fWCZs{$W!YFpC^gl-bvT{UrS_fs$qT&ijVav&C09T2)2Mem(_n zPBz^x`&|TmO^aIvuQ)=<>eh(~{dP?IvuGE&S&OTZd#jRH5iUm2IgI~^K$@)yr&K3> zaOZvYuhhFrkQBC`_QW4OII;0rnzgAAmxkZ{Q7EoJYx~<9<`)P7V5{t)TA?Kx#m?=hK3TFUUfgP0MgyQ{Lv*%~W*g2)Ls6j);8Ln1yjyw8FLGHQv=p z2dFJCp5)xj#un#{<7{J27_sYw|Aie7;akkFN3E9|;DqQ7y4Y(;c(1@ku1u#CE@r%! zRg=ia6du}Bj?yKd?oy_&QTG7{zB?t|A}@e=oS7@nNx?~pgYQ!0d$3ygP`}7JLh7pK zM$i-qI8QNOh-{C-`^xd!c1O!_=e0_;mYr#^hj^#YEvBQY&xMvnk6bLjQDNBoG#T9V zli42%7K7Vj>3+BEC6MxPftj9gwe&R^KlMsTfiYpWmi5pU6t)Yi{^631BB>vve@r57 z^B*qRG4Brne(jEn!_AOAU%q%XwF+lt{BzmgWN?rM{TrIv;_Gy-FFm7M0k(Djo$*p0%r)|tEkioWP2APGdL|M*1{!)AD5CJujUy}91Z(kk)7tS{`dPqL z_HE>&vJcEERuxtlCBP0V`88g4eN>TR49_Erho9=FxTw?oa4!c7z4Cw>3@=VkB(~Or z>lcbFwct`5o#tR%Vygslk36S&`f|K#FfXxJrWz-m?`&9kehQ~H61ftX-xEk;&AqyQdzQDJ1F-lB=;}bq0d!Y&tpE}K>HwCi+!LPeK%>c z9VH{-*pmZ1B-JRC|8<7kjZ}ya8D%uDzV`*S-O~N9T60k4-JkbUfBbNse7-%0DiL0a z%MYnY5ajL|{*L@V5s*`u)zoVn50QgA_Kh**WBL{FDs#_9lQs$4QwPo(KhnJLZl_9G(bI0LN z#I&shjqR76h+K*5E#Dc!nIg9=*y*|`jiD#t-@)z%C!qY;T&JnIR@cr=W zn>@cLTyPZNb~ZY{ zA5L!Js7C?<+f6}ucYn~N3^$2|!sl1~&X0tPfHee}0*na~BF92}*Jd5G`OFgox4co^ zL5D2)RUNe3>@)MWA*L{1>7u;NWDJ*HzFVYnDn=2GK&|0>o zybQjPSDVz?7D2RYpI}55V%Emn^V>Y~@%0%&v5x`eu=R@3;b)?;=vX9N;AL5bT9w-? z%Y7^0TYa>}zV8mG;>h7;b07+Hp0Lv%p3eb$hvB_rM1#+T@i#Y#sRbve`nER`Tj&(} z;|_Gn1bLP2Bm3iB7n0kk%E`u-L!foqRRtqU=-G04JGBJ~^^@{3HZ~u|l^h*7iij`* zbK{`rD|0;Vk<%nZ6OEjO-*QLWyiqYZfcj^36oe1BemTpSk6*2Yk9(G-V}Bgu!jzQ> zkiLIRBN=wW8%%OdBQ&G*Z!hRrs*#ZC=qvhmVm*DdU zrxN%I9BAM22_TojCliPA6buj&lC#`bjiI^;SEL*2!0bstocd4z@QuYVkQcOp@4=ho zUu|vhs9wcew+~qu6K&cfY?gs%N^M<#$N8YC{N>@uFX=cEld8pkA`dRFekW;BG!pxj z%rA3N);J+jFzl070#pHta#Z2v=pg^P;Y+0#sCEsHsyuB)X4f*BzGAIKt9Mj8HBujLMd5TOlnZ`D1;Bz)CFFMeaX8aB4P@2mdRff7|mq6?9PR9QSK zvI9O~{5|lGr*bH4af|DiWk$79)A`yabKDzC{LkMzNEKIpIyisT8 z1^M@7O>mg5McR=i5=0t1b13QkQ0H5f!v&c}P}qF5TirW}HH-}`c?qXHh8we<^r%fW^9v)Y~Vb-?o@cCCycS~%X7 z7#*?5#jgGBI@POH`0CT}1L^Qsv>|O1zCkD)c8}+dQx}xMP6@fQ6Ep3QbmjE^=+Oi) z@|R?)|B;7vyX>D9pNK?$!}UXRP=^YvwEKR&%0#6(&sev4LW{ebbc(mZhnOnbkmaO9 zkblL@yw(JX#mz6-rY%51(V-w$zqWkj7W$O=_-@@_-LAi}V*Gm@;$IX({5H?Jgv(R_ z!TiaWDYTxqRbw~OUC)ic-H%hN}s+E=E7f|_tSOytnFpi$g) z&G^Hv=NYK;RYj%PAOmlt_$WUK2txUbeRckxGHVE}#u~E(sK3W%oyAE`;%kTM@SL0t2 zC7)kUt3ME(fO&;Ty)C?>NEWHYpZYQnosA=6-m3KBGwKTmb#GCX|1+ZG%zv9GF&=je zdTdjOcUf8}b#=$#^q@sh>X$mO5&q$lD$@@OH2T*}9+m>l7yi__GkF+whmXmKtRE=J zww~2*83XH*EY=zACg>ecQAv?V!}RAO18<8OpzaSx=V(+d7}8>!n?ePep4-+te6$NC z=l#3o`l>-xuJ_mXqE4KAp6d9frUtjvS(ZO6ZNoK(hoM5->p`p7xluJe9~6^@+2}nA z(CmS2q_%MbD67Tpm>=$evaKKPtXWpz{Z-~CDXzizXhpy|)}j*(Ofp^-KFUOns#n%Q z3f4fXF?&?y(+-hh&x)V?r~|RFr;A78iT3h!*SlfoTA`0wL@RuIA9Vb9`$*iT500LY z_p@^xfVqZqgLlt&U~bOV?W(s@;T^M`Qr4dom@jwZnAu)|TVu7Kls5h&TJ|?Y$-iHT z|2k2!H;3B9R%!zKvTiBM?(l@+*%Jq()55^zLE&twUpvgaIVCt1+X*gDbK0k}V)0f} z$o{V*<@o+Izv3osCv;?7@5nt8gW0Z$Moq)w2tS$DW%OoS} zvC?j>Im7wTYWlOzg%CvLh}KfQ`%Z`?$rDQDPUS&fsmfRJ7cFp!?d&Ul$qp!Qd-Ler z)*x)>yw0DdF#$iPKcvbn^?(Au+tG~3WT;e@9#?g$fGw}$^m|!Hi6Uv?(s71x@cbNr zwOa^3O~a{}%27i0!1!@XV`v+GbV}b}F4Tor#DZQwH}3&f7omhOqXwu^&YEXa>BM!` zf^>1=cu35pzTEdT6Qr}dBV!yJ!8$s&>RV?&DE{!fntY!GJE?jn^X40{q?vozJF4-o z$M0{5k~9A!qU6Z8Ef$<~gHR*OllF!6BRcQ7VV9tki{^r2&7YNmP^U<|l(lM%7`PmL zQ0rwIJjr_#Q%Ti~S3TbHIz$ItS6{z~j6@q3x)0>5U0ANsI@%>aN(lP{_Y?*V!^-|wQ1JLO z?xyqimS7>mW9B_;He0&k{#SB&p^z%fO1SpxM!gx(b}YAk5hwtK*cj^a@>)z2JWgNS zSc@-Y9|S!r&A|YTwVqMMQ9L6qGPUFSEL!K@WY;RH{_E`h4N>xsv-jU8O75Cm2xgA$ zfU4()d3vN$=*4Kt`z*8gsu^xir$oc<8DEY9B3r7y4*Xb>(N_1u4WgcOIhYGoAY=GiqWFV0EZ=y2vZkyDk55yl&30#Fu`F+$ zSBnoc>0FYmSkDDdna?vc)cwft`1a_FW+JWg^tQ*-^O0C%N+~n6s|rhGC<|@ag7GlB zje6Zo4@SN6(&g6c#;2q{sk`mtP^D4O_UBqV{_mIV-w-AL$BFsZ?W}(%N+hmn%1UUF za4zomwz;3qaK*C1U5>sHv-)S|S4cTHI(w+QgRKb#30Zj>)9%99512L{{$_T62cB2k z)wtKc4Ii%-sMeYjQNg8ahTFty@seldpKFpNP!l^>5~yEIq#F<>QSxs;{QrX}`47MKFOKT} zl_(+BbN>UPM6K_KfZIkM80p;Lx>Z~cV-lr83iMT&;^nI|Xfy)!n}6ohS8@^0I3Bmp zZ^JD`%H`Ue-KYgOEjVtB!D?K=nK79L%%?D(k19+6YtRfldy1$Qx`!4r_DsU|+S2<9 zb{$x#swV8YqaT%RCse*Ap2Hy>WmW^TuX)VrMN-k^n1$|o z_y|;)`I#P;DaK0k`kSvVx8gt%ncHWl9xUhDEhF(}6s6jQYc87?;I`wTyI*7{VH)3v zn9{*EklZd&LiaNr$=Ijg4~})fpJlOS#nw#t=u@Zo@_+}NdVkqyB4P-IHm7A`66%O1 zfoI{J<~*2kV|sg?y##KakCp#rL<0KPrK@wth+^E?{4}-R_z`8!a%nRHHGcW?JACZK0@?v)D(4sIA+hT9X*vQmFKeCU zbuI$_?Wdx4##iDK$vMLv!2^)bQ72wSO%zq@4;8Qw#jPJsXTwILiqKo>_a*s=5WMuq zQ{Y}=792U0Q{ooV4Osnx>tl5_z!OEaiyA#hZjrjYZcoH<^;)m49V~#&otL#X!CAk>blb)c+y&ECx?|lPS^7se!Di%RNhi)W!R1Qp+3V3w#gI$fflGVuiI$_X2e+oP zVK$76dpg~t>%eV6e@fdLyYWGp<|2b;4M<0K-VCl(6+w&0Eg(qJG9#s%;GG zFlRe;y64OQ?ue2aH&^JuV_BAKzJn>a<+|v+0&_b&C*LM3M`SI@HiFEi9&P_?OaC{8 z6v6){QDVyWa6?0osL+N}>|<0b#KW|^9~lb2z~2szbK*vY*bxxUYE;mUEfkF|)LW`B z?9?iU((PP4<+yJP`Bop)q-e6a+Ej^=+A6DaIu0P%$X^{Xm@nNwPK*$agg4KA*>}pPK*_JJ-F8oMp_p2mW4SOLm6*s= zW71O5KiPTgN@X2X>vm8+5o^Im<>{|nwHv_G|8u4#p}72Hd%yVQg(}dTk4a4DPelbC zvv2&m@$gk^AolI;I1En7&!frmgupl=zdDi$>%CD+f|+?hcREDIkA(zmLi=Wp5~Ihc z#C@!ciC08K@!YrM%xn~R`<&?jVPU3#%qcy-dSs_m3`T)0JoYr};jC^YoR5ipc5bi; zvJ7T^311*y0XykO1+7A1He{{#b#^;G2zYuk?qVvCw~uAdRuGlv?HAvN=9Iys69!I- zSdTt0Ec>rYwL>500j_9&T`+C5D!8YekK|GBa%jI+V54n{K$>tIt``|gGrVa;IPdS3 zte^;9%lBo{w;|3)ow@wepM>@y*Tsh>lELG$?6MdILao=H#!=8AIz?oUo){M5=)SBM zq2Ds#{&n(f9)eQ-`7Pj?Y-A}UaT;8@SVT-YD3gw!MiZbRBG%v6lfhqh{HaxZEKH<{ zj7G>31HwXUtBZ+<0Oc{?_N&{74&=V`y&K?+Pp;rkcE7ND?DDtCwU_4%}~qf_19}W4;ow??^RtYzc!7V{`g> zc0+7YRx0$qlM7GZp0Fv5Bcb6}Mt?LH3=w`mr9 zw<>9qV=jgxYogXmdCK@$!_q7NUNB^xsJqPdxei2MJZWwXvL~EN0vl4HDNy%NoiFT3 zKDrBRe_5td2mTlL)iJ)QhB4P{*^lOhXcg1+Ufezow(s)cU(6&%{k4azcu&T{#evKG z{A>|;RPst+n`#z{zM$k}AFY9yl#BbT%nRV>kR)T^S;82qCZHrfTY}=DKSd(Wx?_5w z|Idg&iJ<#Y^Qn7ZI{ua|84&I-08uK^G0}TfkZQX4+<_qzQr2U6qCX_z0l8-uTXbGw ziNsrqvgtHn41Os{^NS#1+|`b;8iCY4nOe? zM3{EokPK~3hd!&86PtP2cwSm%oBSxzLAz-2UN<2XvjuLl77G)5tMj%yo+XyR(V)8e z!?d|5o-fxH{W2SU9_*>Bz7q*Qzq$&&-`6_^D~K)L1qf@7<8`|GbzCC+i$6H!A|sR6Q>@@NEO?9DTMh@+}wKXuVZ4 zeM8~JmK3V1x<%kYI`!wfQXy`e8n+SnmWCxOXDjoi?V#)7p2(N<^>{}9z6|}@8oZR= z=y2zR4IX}8pLm#51?)1~Av*|cxn1pn)$=%FV4i$yUeQVpWKX8|1o;}nj}Z3ihCf9> zHFacq$7Ujm-Z}Pl_l^t<4SpwRu~r7BV|a5bvWP-*M66LxXfpJSG;xh+7{ltyN=)+m za2bnt6#ra#iJ}5b7B8no`whUCBXu#opPaH}x>&OGfsY zn3g0XqEmd^J{SCq)!wHh=3pv!b)aQx8Y-=tiC=w6xT}^9YWJT);O5f(W3ur9V>^7e z8YZ;CIg1EhsSzS&&@=4}o>3^(ApeX{C>b{2H=0qt$ibf@YkBsc3b8#W@68Rv8e+b8 zXrSP-Co#2dfhm@lsV5aU&>u=X zZMVjt-autr+khL225ZdF4iJtiCW#x5Q%itF&pAgu>mAe!cbN*8Mc|0Ri$8;};^0}5 z<#H5}nHLtfoDos)0JEp9W>j63$noWBoFf(CSV29BbE>ttnDD=Nd+&Iz|F`cyMU+Zb zg-}RTW+JV_Dn%k0Q5hLoQ6dp#XU~vL_TGCO-uB*mWMqWWKxygsImCANNOXZ5|hqKKFSk!dSr-z0Y`3@IOj|&)fDCKJ+09O_vw^ zOs*6G8D~&#LQp3`n68)?1t)^Aiu-WJ0MRkF~^+wkwo3_r-$7zQBg*uwY#3 zomio{8wa#BcOtmFTG6PEOnOg39@0*5bh*E+f=55%^{t7NgjFA^+q-;AU{Cba#OT)& z^e=z4meDJQrFJP{2Y=>)U4-M1=BXg)T*){@eWMZhMfJC>RuP-JX+gbbjPW?bum3q+ ztqBG+orQ8IYT!~%g63791T3MGxac7k3oQfT*L^P!B2`fR`G%Gt{2J9zD7KV=%yBoR z&upoHv8B3G18&y%Em>7;ogf|fVs-nA`;xGoKBG=sHxX6e&7Uicd=BZBcV~8m`Qo)b zHV14qh|P1yE_}9Nq=j;I}ILHvGYC5s)s-4jxv+4dE)-a8?1Wmj+lD=l=z$94R|B? z6chEKI1Cz2zw%2b2(@({Bqa}*<5G*_9=9tUu%NeZp~)x>kMsU=;+V~Y@i#&b<0xAp zX>?JIg}wuv{k0@>^GR@pEP6TVY!~r+u7K95Byd;N8X2xg#06=!tGzR+NPSgzGl$3% zkhqv$kjdbWqXDoRtoY+jWNjWfY<}9p$6;iV~C|zxV9m&kA&%Il6nx z+-o%cHYb08ofs}q+>#eImxgyURknd+9QrU*e zO^;i8Lzwc(k)_ns z94=aZnTX9DQmM_s3799=SbU9em8qQxiZQrMkSN@y&Du-3K((`X2mN$B>^{2R;9P4E zqy#MMD+AVCtNd9 z7r$7N5}CuPbzRDaq#dD_L?{AUSn(&Y_}8Gmg7n9_?b*03`ULk0i+o@#C~W_=l7oUaD@^-$ zHN!8{cHL&vYFM)y+>sKI2YWc?%CmpG0+~Zi^B3Y;;2^mygW}I7$Wf%Vy=R($^lU9f zI$bqzL^#yEuB!^iVq6W31%q(i#`hpU@txhG^Q!$I@g0&A66ZYjCjxV+YVSY)W(C(L zhqJb}zXHC4Z;y# zpFE*F&FfkO&T3&zTX*E3pVmz_hL%$B6i#pWDpUj=wCnD&(?rCw)9$%hqJf$gE-P!( zS%Ve6+fJB@JwTy;A(<00fmnSq+=J~!E)?v$T5P({57-&$n%8f(!Rhk~JA?la`w)A< zjIGy`AgsyxW^QO6@a_V~62l{*1*!>Uk~eojs4c>!9w449pCsz5r;mJdq)i0)6A6i?J-LA5d&)dw#Z zc%M;Ulq%bYO6Db2YmT{a>e108lcj2)II;ii=#o1YKPnG)*oi!J?uhH!O4`v!VMU9F0qi#HUqQEU$QRqmV)`I z!xELoZSbzJfIq~c7B9}O+T3EQ1|g`Rtskz(Gs!Q@G&X9nb(Ea^RB#&l*E>IbC}#&Y z91N;FLx>+QuJ+dud$JSUkF7TcNyJ?AH_Ek)cqkv@TBTJ6^#?`&ynjv^6!8IRMd?0~;P-oGJgW{3_s<`4lTU%g^+kEh7X&#Hd+V0$ zN-42{V-)5ihGSZyj&NGjIJOLlivHUP`Y$I4hN z01++C$A*>!hbeqdm(*4Ys^^5|4#o$g7u~7KQq2hD`{c|utyTpe2iMI*3r3-7!Tt3n z!Ld!rh(*~2Pl4y+8+HlL1YGEkS-f?q6fgVVjeRy*f|icQ?RWf+{FhhbUlJuI^KbLI zJKo||?$PB7#8volAU13uyB&TRtY$j#d<3akop<|hO~F4SN<{w0M9HT0(Pt#5HuMd+ zs8D*Ggcaj#r6z(!$jK&boatE!SM&FmpS;xta>=s$+iuKaXQSNRkA&0Yi!T@b^v}1j zU_s;Y=4UxN*yq=iovuWk$^qAh%7kp}zUYGFNINY3;xOqbB4I*m$lB#wzEFC3+ZEN7 zG_>vvsL&u{i%VYl?^1Il+R6T`K;uBv619I;%J=u;1@f|nYaN8#E9Izef=w0X2xUzK z?XG|~JlpK=FLyv(Os0iUA_=i)SIG(Wm!gT;c;5@E;R-XNw})_8cy3EYR6k_d zeZDy4OZceGHuCo@RuZE!8jq$s60kWnjB(Ry0zbr)e{%QvhZz5Fh?4)f690XoWYJSl zFUO`21BYCnc%`-w*s_Q-@0%`==~$>;muf*vQ>Wl$#u!LDkePhix);fp(;wQljX+U~ z*J{28qGsHYl)JLT+iaFav;0vL6n5UYkYU}7N8U3JHYBz|gwxgcN3_PU$7SYbur3L& z>))|gWosj%d}71D83bdn2>US!wYM<6yJh_&wu8-MJ1&^X((;d3q zXTxz;$i`c?Faf-FJ)TZ=?uOAT58a&qjpq49GVhvcz!45ls)*Y)*k1fjxL70^wtXz5 z;fQat!oQ{0&|04 z?`0<}PT2*z&#%{Radk#>u2bS>9|$W%)2HRjZLd*0Obh`&!3iA>38Jh#g-(2*L6QMA#Cvbc5P&N8I+sT-;uJT(rR?}7HL z%NLlLdqA-J#(C~z!^jiYoAUV70Onp8Ieh!XEbx^d4|FM8#Nt-);da2@6w| z8y>5hSbb!~;X&Xxu#Y?}@Im$;DC^%4CI5cl|38S5|8%T>@vHt{i4tNx_kTf@9DX%3 zBqE%PuCKe^uByF)FSidxtTz^byxu`+suB`tjd$xfUM~jD$Px#JvQA7b928t^>B2eM zFQN?3`#^;4cAjO?mS+YKwA$lc!K(Y7AEciSW7jnM$q zhf{osecXd4iI11`qDmo5JQ;39Rf1re(GgF!ezd&uvi4$8JQOH)(6gyX0fRpZo@({FKqdFhh)5Dep>g^eGUX8*>pcWBT^_*vYgUQoXclG* z(3AUCHG{&=v`Cw?B%nOvd^y0=6=)`ZXI|~CMyt%Z2tU1ijMFWbyu=WNj?}%5zWtr( zdH!`nyhaC%+$ypplkWzWD;17o{tX}-+%K0LTMOUzH8SMwt^-Q13Jc27ArSvzXs(bq zgu8Nj%J)@u!Ma*MZ}Xo@qKIst=W9nach0k1@m`99Fq8KiEOzx!MfM#PSn8l;-fy|f z(hZ)qziHVaR7}vqwS{NSx&wCK-;{FiK-nxy1I@3c;P&AAuLHH!u&6^=Tr65pfcXvU zW2-K7nrofPnl6Gz9=1wbuXbW(_{nh7z1jGq;GnfH(RA^!7qQPA%Ef%jLFj%E13Z^r zt)4nR27kJKR+B9_q4sLcbH2!AR6m{g*@QD6twxQ;!eUFX!i4V;o7f=CmTX>n-;_oO zaW~Ao%b((pC#J8uZ*;@^`Vk4yj$C{&op6ApP=%!JQhQpgY9RGh=pG%mTx2loQo9iy zkC$!=*16G-K}KHh_O`k>Sbb7$t~pl%%E4JeT=|5PKl=@L5vdjO9{Zn4pC#69d#-ca zJ!yn3G}k7>O#9%o7#y=*4@D2^d)fVE4iKyD&HC<2(SF{cuFVd+)iyTL}}YdzPU*@H7VVgfF6;~*Lsf4wfP7OK_KJQAJ}>c^_AKRzcb zz`y^Nqg4x0G!Wf1Nj;qZ*Y@gfh?4*GIR5KT@P8pn1n88O^D@%m_G+$DByS~<8(-#f z*){@u%$~}28w~=>rSNzAD&~m@5Bbujky_MRjWOo7NyRDU$NL7VGtj`1Z{3k-n@;v~ zzh^ff3}Bh8MV1L9l%R2Q+qIsJi+j?nj~Qn|E)V;Jl|?*s-lQHc7a726m7#jogM*O! z_{poseo3gMykMbeP!8{H#Xg*vi-p1aer59b=(7b%YPyUhTQD-$zoiR4DBt$5 zmmmg_DmhLciY+CS(lRmcQ%Nu*y}wGVx)TSKf#-Z~8B+6eZiwVH!ci;k8xHsC;rglD z%#}N9FlYSo;@#VspvNU>pWstYM4IRvE;!hSda3^TJGeR^PAyP=tB&+eh? z;TY&TV$yw~10G&@a1o9NqWfndhhLQgxUWs%c=UB*0G8&6k?*hO8)MS`X^ube)UVb1f){`>sn}ac`!gE6IOrX(x~=BG zRJy-u*l+wbO1b!PV-`Nn6J2|aF`$h$;5eb%@Yl`#H-(f_|3jigewJ$c7)2QlaVc^| z3)i9OXO7_ev1PD2TCka0+>Dj#C$?W9ZngW@9|RV~O5*d;53~Wx#G1g{e)|B?;r?hu zxml5busr0^d%zty5MxdrmMPF849R-@8g;(gRS9W zfAnb@Q1SBE`vZNoa0o80KBj#NbqA*h_&$|Ffhl#>LxUFR(i6B6niB^w6G~FrUx>Sm zn|X4-WP(_7O<~LRJg|r3$}RQ{xKD=sNn27Vx^%MG{Isn^$AN31PWy7vv}y3G{@4rP z)RM~_7fQmNfu@e(K6S9GpeX#ywswpT6^fiWPzO@4)6d;s@*`3OQkD--=3rGEJqsNb z37bw|6tq&UM$7EnZ^cQ?kn@Vq?e;rejN_)4>mxcj?gqPDFBcI6&!ZmF70X!IQnOHW zVz34hxCQdJ?al#{Cmx4o2+y2^=AOOK_QRjtVV4G+gnUv=t?a%V zng>O#k2v5B9mASCy2ViQI3xA#ObYx+Z5;a%m5wb3(h~Bv7vs_EpLbqpZ9@9o9Hp}k zM9p)@UFj3`-pE+!b=R$56!R>cvo42b9-)2rSL30oWO$;- zX45BbhsSBfww^A|LY0&5R&I$IsK&dR=e<%xG<&~{o0G~wepJt9M`|-9^)_Adm(4(l z(Q%H0TcgnV$q8GUE3c73e`msbwssT{&e$`Q-;E#t2Ckg$!^r>+~+jA~7%?opc* zVs&`jHr+GjIDI&kU+HWiaFBe3%=cH|aIKDk3SS*OYP47@4HE)py=PB%j@Q8KyYOe% zmzz-P%=7CH*bx1UM@@~WuVB=ll{Z%R^H4`}ck5}fuHPcC8Vz$qrb^?8+w1*tX`8K z?C1UN1t2@tt#vyh0l$fz9ucW{jXP#HtHc!&usGm{o7YEz+|X?j<=>wO@fAKQ z>K_|XFst*GU?B-;63^NUhjyb^u&k7pg)11%TR%DXz8@Om4fgpEqk4>ob7SPO1;j7$ z>g!P^V}z(te6h@xX0N{@kBzicn2duyy=>F3_1CR3H_{VHES7 zn21A7puT+~V!ELag~>MZ26pR$vVEt`Pb`J2364f|#Pe?{5w!1zbtHDW>T6cyIYZt- zM)(l@96zupGsfx#5}h4eM*dhr;yMxHnNU^&E?L_*=IYAvi5<(y&5TH>7wR)Un_hsf zeix+cS^~kB^YG;Ky(}2g|9PifBoQ{ma_dhOmLYq5Q$fd{LO5rs z;N8JXM+$`E(R?1#EKcU(icMFe&3reAb4ll)7bhXh4&;1(m`JhFa`7AttOwm|EEL^R zwfOk#D|!>6lirZpko=~!4M-Gfhm(KALCIRx?VE%rw9hmnYQr%I%4etQL&F-eG`ao2 z_pEf-Yh68d)QcGF^KQF#bFl-&s5u{4mX<)He5Ff3K@O4bA}n(6S3W#96uGd{Pzn@N zlS|tJ8!*Ro`(>51Y77g#6%kB0#`a34uiNb)+*b}Yx{RAyC>-MdVtjWu{!-p`d0wXt zDk_W|0!)j+xq|fQ^I9#OXmz7b_d~e2$+}utl!p?Tl}s1+HbNv9qwmc#$#_+B zJ37xD3?r-0;-3-@Fpe#sE#f)?fl+5e&T_3C{Y#t0&Z>37*I!M{yq1;V=}FEJ@x~So z7#v`izDp3Wj;4k0h!L;U<}dd?4wRru9i>H%X&Vu#%6CaHu^J!#VRp6-iG!0XyezYk z;pp&t$XI>67>1sW9e6{G14IZfGkl70!t)RPLR{~6g2&1E-)okgNb!vBVun*A8V0?x zeh~8#59p;|s%J>UJU$mnd&x@7eo(E}r4b5D*XWNK5vdFmr%mj8ZyyxrF3l}8w?sBK1}K;M}BsrK!-jXkokIk+Z2%s z;^Y_ZapzG6M3`1*?3{~(CsVF>zfb34Xxqku@2Dmus|{_jzSjUWG91R!T#d+#GkTW{ zGa#=gbMX%0ez`zTB`TJdhi2sbmF@k+dy8?86i-Aw#17NYliVM}ZN|J%; zD;tGuX3m=5GUMQg`Cm+Xp*MKmsiug%_82B0*m-%T&I;wT| z?s`OMA^!wDb}cKa#S6F8dA3|?gKhQ~K2^06=^a(uX$RjC0{7pHbPBdngqyBVF1Irp z+2D4|Ps=RCL)={Kc?~d;&;RT3(^@=yp>~Rj>k0C37m?{wltO?U&v{PaZg|PLwM9XZ zAeu;=-}&mi(B-#~rUNMzIDZGMloIZYtld-BpWpKakHMZJ<+cr=N)n>8KVOOGS9?#! zvDd)j+jAoU5e48oP9~bSr3p(MHAeGx7J{mF`O~JaB`{DdmUtl|59y*IkH&(S7d42n z-4lcPR+k!9K8Q0xshv1_^)qQP+1Q=fl zX`IY{0apvh6;dhkVc+ye*9bBXAc-o+Xj5CE+#{ZyyJt#~m5FzNR=g5i$s0P1%+m1N zU8b=bEW@Y+p&j3ej=Fe=qwHRvE?lI%VwiWL3MIvz+z)HEgP_9x{>jB);OwQR$jvQ* z&=MDBa)H;dkCvaB??o$kNO|>3iIhX^)r&FYrIiqIF1!43MIv}#xDZ~h7zeU+2P{TX znjlSimFQjNLwlNt`R<(rffqoTJ0n_x&o(@@uMtFX{x`!Hm)DYz$Z(z-JK<-0W4jd~^6WvCTuYr36Ezv+zj`!7#a`Br02G8dH?O%?VfJPb4&48w(YhrX>@G^74r zsspjpW!NbBsS#4+;P~r|Az!&DNKv^-B`Vzllq_N)M0XeCtetqKOp8En!m!Iqt{dF0 z=-z+ZQw+IxQG3xe3$<)ZCuTG3!RwDOeQ{k6G9)JYC+#8u-PRBMd7s;0|MLZl9D?vQ z5;tbpI93QvRKM0;=i<>Y(lau2524yW!qU5=u?iQr`N^G#?SpTveJopz3W<_MZb9tn z9Q>Misu;hRf`yd%SF@ZJSP6+~R(kdVmZRlW_n)eOX-35d(uu{e<^5#K+wy9V$~`5T z`7s%%B%2l=cV@r`=HBlCnt}K$cp;!*fpFXHc=CAMDh1Xo6PQ^()x+b`gckYbG90_% zv2|B+I#Qf{$nI$0v$x*A!rG7OLL?jz|-!)fZ$LXZkT=|K>Usg)FCAJdH z-aj@glMBYOIVL7{j}#0mi;~t`%?7^ntA`FpwPDZ2bK(0Ldhx@jn?0LV#dzqZpb2w+ zDc)LpCv9YiD4(@LH55{ZOQ(a`msrDrmrvkTmQohdDX*vfnz>X`V?iOa#-zURq*0FR-U%%BacT1J4+DMPK|yyw}Xs_T2M`!$2iJ0e$0IOh|6r z2Ie_<&NeXWDUn90-v7sqjwlBHFSOuqh?4)PL;QA1G zE^&VY5{~mRMh5e6O;t_!s^M?^*pUAF?4KFnT#c~sE%t?NCV9h}4=4Ys$^0J*A^$C+ zgwn6;txqD{T~&N zDnO{yXW&G{G{H$djxD$7!y_737z4?dp`SS{&oU(j{I*^`L<-Nt8+#ACSX2)~`5h68 zMvp;E4LRkqEv^?U^H;CN$jm??JwvKzPY;-RzdC=JGX-*P7lpE49tT;Wn?Gs=LxD4l zD4oq!fXFoHjH*IDPXGXn5QPAe(g4`MNO);}Bcx5~OM|Xe(aWdOp>Zf!PvMn74Pt6hBX+4^}#cYW1K?t?X z@)Pr%9-R8S-u8hWo8I->yHzMYFSlQyViHoH{Wi+paKqDI)Fn$^5OL0nh8c%A5l^Lp zA$wtxyLk}E|HMgLUqRq&_BN}y(G0V7gTJ}y0)STHBvZ!mc)YK(HL;4eosctq?rt9* zhq!xv>ROJWc;SWfTl4Zj5GXUIe7k23rL4@SUowbq1cz)akSBm_+lyUJc9GG34hE_x0gaF7(a;nYokPu`PWFV$Z%z-_Ar${_NK(SOa#H{TKXCKft^M z>L*$He$a9EY_5_yAx!i?wZ=^`h9VClIrq#~U^Tm8q@`aeR9V=gR zM({PC@(qQB4pffVrSYq-58n%}?C$3cMs=@Z%6l!ui1-aQCnJ$Dcp1o7cBpv(3>(>p z5`_maA!cOcY~m!wo}Kc^r}o1AO24Li9<}2~-sKUn8ef1 zOS6t!DE{?S{EegJ)c+Mx^0c3!;t0{Fi&EcGctE=ZS4my8odR=k@^?VG2t_S^*t4T+ zPP-5GKT^--sTly#qq-Bj>bvptk#CNDSy`BNGpy^0M-|-rcqWp+{tc8Yw0F=GTcwNg zjW+&J1cx2kcU|1w2Z{cf&uC?OP^s3thnLuLo}98Xc~#N}4YyuqvfQ3P9tULLx!r=i zvu`{&1d||K=b=R-M>R}dU0QQ1htCx3u{)Q;|$JzUz z6D3nk`}MvL#6qdxGj$$vA|W_e=MkxH1gW1c@2Wob4)kbaDzBzjW2&MD&6h{>=s8** zHzV5%4qH^*A2yL-t!4j2jc_-b36yDPhP7hVxjjd&WDlX_?%A)AE`vBbMRvs_tOX0y zWCEt_n()S#!WwQ@LPunjxRIS34H=J@MOcmu!krs*!8?bSP~f;s!SZq&9#r_n>Qq#R zPxxc0ls`4#H2VK~#Q%mU`QJ{=ziwy! z2T>An_I-vV6A4C#CWEZSiC&09Ba>|DFml{5-sCbM;-HyWNJ3P@P|BZbyNjw5Tq>mM z1oVo~PhhNHFdiXJXTF2-Bf$@SmuuB3t^hXPlVt&K25`Zw$&&X(0{Y}I#)v)df${Ut z*;eE8@K@@0X}`5WIB9%j*SS`IJoRD5g!cY0Ms8hosMhVothd<)8jT%5nEPX9X~sb7 zCu^jhXElz^=?11pmV#i9u;p|4VyI_dsg5M{6C(#CW9||@h5vQe{0&j^&ri-jS&;lY zQBvQ3;3W(D7730mhbdE*-(nMuvbF2gR9Kah^jIWT@UGgv**nl&BWZFu$oMOuq+&VkA$D!JW;;{^C3wlFDL@!uv&RFw9`qi5~z<*_%Q02rh#QfOqx#fHpqz8v@ zwQWUw_2|(_iw`q6bwf2lBD4uBw*~b*>W{<6gjM9t@jM(He!(?8*h4tKI{LZa^n-DI zn>VFyE<8BN0R@CW@yZ9PEfg@p+yL5VWg2@3V&u)njI35xA24Z*J6`4| ziez%ft>Ov6slWTOh+cmXN?xKEh>jql+*4bQtfY_Pq0aLYM%$_(+v;${Wr<SWO)p-MsZ zU_n_oDBpeal}DllEMFE+#Qr;h2|{UgM@3Pxc{wMCT@+0@_x$RMz}xf*Qua3;+vW|cWtL?KZ9|kb)dD`51 za2^UZ9RE5tUkJ^~3+`efRd9m5wN^RT2U;x7guI{Z$I38a=UDSDtBIO5_ zcgy9YOVJwr4&_uh%=vw2ro0=DmwrCf;4p=g`aKam!L=aD8sgO)(vA$OwXYRkJ%x=+ ztp(2}`*7W0gL$~I9+lI*xvIkJK#>1#HC@**vh1q9af7-AOCt_cza2;i^K!$G3zO}r z^2yz>IH4J5)2}@fYwZN1Bs1ox{$t>4OJnlBv>jbU3xAjG>jkN#n%~1_ZScsOyzbJ` zT>NSLLxMK22SSoQXJ#$cpvU$R=ZN-x5J@K6tMob#jQ()=84~xghuegX&(C^vREoHG z(>D))lx~sbog|8m?^GPiRk{fwZ|*$3aWg76Gg9y7n!q15+a#Yx%C^2A(ejcOkR5}Zgwro^<>rH38h=7kJJ}J zGIgeO^G5`umc~A|nr`%MN~v@RAVEyPcc~{nNq8*qMTc%?37mRzQOGT}5#HEwOjtcH zLe{;szbG0=$eFjWhy@&0bk~V?>}#1dhc*LZaQ*a*R`QQRXkRti zX1&&kM|Aom)GaT=7mYN@zN$fxl;P#{RSJRJn8Mr+2Exj-v(82EHVI4y57KwDv|;b5 z7|97G!oaEFa@%0mI zZ7w?JKC+B?Jx<&}*Ci)}f-r~9{|Px| zH6(-!T(?;Wh7>nb3z=7Gu(d5QYH*7qycN!>k@alEy0$ho?t@j>aQNN4YB1py`(kMA z8(Ir8&wKS^id&#e|N0IY{U-QHU&-E-mIq=tSf2{KehL>14C^WD6QIwisLeqy9)1j# zIY^8|V?rQZ5E)Yi${aoY^Yc~0QoU$5VcJKCymSXQX$YIRbl6%<2&FFORfzEKudBc- z<3&=p&eWqzbnIc#Q?;O5_H8xe2$81nagi~^Gnoij>ff*kF2pwu9U%vMYf!3Ii<+sc z8XA^I-b&BZVy2tAuho_oSmtT=d1hV>hon^PC29D@x}Q+m{T<ZtSvSrGSuh?Rj)kb! z?F>ND-q5l0|JDh)Y%kic?+cuq7RyKd>QU#l)OWepB}l2>OBcI69WL~3>^?S~f$RIy zuY7)z0XqbQoXCz=;B~{DX>t)Y_>K4I$B_rINZWLjGD*H2)yc2c%x*)7-0^91|7rz@ zf5`tByq=F1uWjhXj}cLW=LDyUZ&kz182h(DL%HC6RkZ3kZy_dSOS$aQ4ucONi;I4* z0`R$I{s%L;3~b|fdfGew5>oMY9pBFi=x7W#v{1-_(Xc|Qm4k)Ye=YVksU#beBJQ-k zen5DU9rsPJoUR7;pOcywN5U{kw9_z$xUNrZH<^96HNn<#XO8Ww`Ix}T8=!9=0`l(O z(^u&o@uZ-_k5@ zPum!)C6UlVefO)OkFPLrFGED8QxSRvJrPmeEXQ(*xyKCK%klM>OM8538&J$ptu&1Y z;iW!r9`ezm0*5r-91g!6j3-uF#~Qwuqtq|egb|uz@Ue;)-;jHZheU5J**3oh)#XSx z`ERKh<3fKmydoOhl0@jDtx34@w$_xkgc$K^`^9RWkppL~^e#&C6Alj-F6J0b!V<3G zs7kk1iPe$Uc5#Tc;ZQsI=x6Cy$i>4hr}8TvG~zEu6||F}J0LwBtts16xEEc~%_iASonRn?TMZ8%1+Juh^PgaTSyWnZ4mg-5$$k1nuf zVAIdSG)L_u$ea(oz;Y}Nw0VcF(zRB>lm*kkxq0Hb9vG-T*YV5>Jgniz+L>SnIIhU{ zma7%dRhtxMCe`DoeKK=R#T7UxPHL_4tcK{fp}`M$YEXoUmapt!3@T0K{pnB1!_E31 z;*C6w@a2fKRiFl;51Zz=XYe`=C+{oEUgz{cB@>sVnqUV4w~gGtjfsFgO>?@B>?Ts2 zD2C3B8ztgCclx_lM~D%?xg`->jtVFY471y*S`T?$W54V!ghB(?>91Y~YEhw6>cYom z64v;ac3O9~fsDbF*-BghM7FDNDJqshNY}{bDNh&3%4g;3dK3v%5#&z4?Oy^}!!I|z z`>iOw_smYlBo6`$rt#nUtOzGkVz!c;7KWA&wDZ!8H! zgq^0C^L;RC<5S>oB0YqT=gtjZLhoi!{#HlqMhX&<_C*05ahRkYE_+;oI9~|~F*Aff zx#Cjl;Cyof=6!s)F700pedg(oI>XiA;w3zoLz#!ZoXPdic>5vng)zU`?jpPcf$qsM z$*|%~l8-%Hj8ZS?cQX?(?uqa&=bR5^FmI)4#CDZPVY?BV*Pzsd9y8(QoY#8cJ>M>g z;SoomY2e;nN8r`(?Q$pU)Wd=9qwo37#3c9-y6(LvqZs^Tj^7X6MRb(Hnx1dVBGgNn zKfb(4jRHT-WA#rn3PAt#cSi%qCMk26mwA=bQRW)sBT-f@ z=x#nLV>45LK739C{tEd(h&Oz0+^L43@U`VSLJD{-?95L=pF}7-G4}lG)(GHPfR|uh5>C(<6g?%a(I5{`p!@zPpJE$H-1qg3#F>&+VTdovC%48-$A?>#@V=i z%0If{VMpH)SEGFJ4!A7&ep@BxyKQ(zYL)@lNhiMN6v0T7A@X~5ts1%5_6C?;Z^UY##IPe5%=*7C4IZzA6Ps<{{ad@?40lJ!ULrD; zhf=C%2U#-a+_W*IE3AaDZ%U0mC9bgQAr<=gMFJdX+8ey}SORLC*H1Z1T%T{dbzX(9 z6hT9*tY-P-O|Y%G>QbIO0<8nr7h6pbyAD3nlDwq>5!y#})@3@e`lG6CMtBumY+Bt< zX;gtF_j$$Q-Z`M|T;7iEo2@VxtRxqBH4H94kvQ$)*Z@Y}>t3I!Qqb||lV`JDeej@n zP)$|Xn!S!$UyP0=0a>ZwRbh|LMEJi z8c|id<_JBPKF!DPH3MF{F@=D(7Em@(`(D6T1QSV=7d7ka;EAriCdK{+7+n}>7yR6X zMl~EocU}g=nf(o8>KIQ|3t<|uCz3zBhWSPEkkv~Mk!E)L*sY(X=+I7UTbS*M`}$A*ChsCaunj7| zF{;FgwGcBspO?Uw!(w`;^3mKSe!B5qlJ7$g*Lz+an zbSvQ&BXe7>Euk+)JHDW4U4o*{XK!nk5v+m7@QK`em2iL=`oEmhCtP%r+H5uVFG6R7 z^Oe#g!N4tNfRX_|UusnVLHS;~Pc zR$Y8gC>lXx+nj6>yDJW#)czoJ+YTg4S-)>GB_Y$nlnyeR3VeT0r@xI@2WtC}(Yz&G zci!vPZ|Hxt!N$j)!1T}pY}{TDm8eq#_Lp_IXk&BXxlhos`Y$=i@$Ko2+zV z7%$#kl?q>N^ZYm$Jn)^bWkio*71o*RPaZWS#I+0Kr<$G@f~L|ZW}_ojaFZ<~p->|n zw@~jY76)I*SlmlzalHa~(sP!oY4SjIXwyq%vk5YprpfP+h=Q}eQ2f3u1N^bON0t$C zz|u$e;M3E&nDOI7rh9TbZoK?-wueZ6-22oe|B*-z^0Wq=klfRR$2awaWxrRUakoy2 zJT)SN#=)Oy^NkQyYNIsAlz`FN9ZXK=no#7q`KIp9P}HJI(ULFCg9!FZS?QGosXSGF zkL5K9KlJ7$Dl7%yQQMa;BAYF^9%ekPsqmkAAgZxsU-Fnk3)s3i8Rt3d{(hpk5TQmYxbK=A{Er; zj?=@0Bv^{Q8?%SC07!aQ^g67HQAj6y$ML3c5NA48^fku?4znlu%kR!0NX8)D+`4=$ zp=fkeeA0k#Hhj|^o$7JF`Qr)p_r>tCV@gUxFc4=A^|wS?RN>Z(ZmhmbiSR0Un5{_1 z7x!ySK$z0|`>3l^CmBJC{31{EiH)O{uaUsdn$2RH_w}%yB@T54xUkX8V%=8z5nwv-U|<4F01sOeDsb~dtY}Y z4;${^z7R$@llI;EL)}4}0W=eyVQpniFg&mjRZFcJb#S-W^*rQp`9|LESpalh&gn9oDR^%3q&xr<^qjbj z;$tALui(CBdk4;Gp5Er-kpl*55|c)OIavJiV*WJa-_U~J5GDVpL;OV{#F8;zvnPh+ z$`6LJPl1jLsCJ+vGvf6-42#I^-V!$lLEk$HrnJ_9<Wj%0YZ)(xN>Yqa6vEA<$Eq_z57+!QE2T;jGKu7}FZ z2_5C1<(T@FO_zRO7o1yvye__@2+gG=9!9(LLcCJ)*KIqe@YFT=%u8BrK;E*VVahm$ zk>y%0=T`<`K!YtS{OStOIBf=pptgPoj#h<6%{Cze4 zk|+_U8)l+s{Dw})&MAr(w}GDgFO0y5)u{7#df?CvQu;h!+Z1$>lH7C1a#gxx%JHdH7ZG#Mn!jXwWuv zmRGzp0B3233PRn9Tsmgs-R25^D^C6fE&0ci{Of3mor^}@+1u?x`#)JT3T~OL-viY88BoX%_M=Yz6hc5F-ki4tN%p7MS;V5_2RD_NJ;j;sQIV z^emAY5Si$2v)DX{ltUDIuGcpsYxJgh&9iw><)|Ln-kAhyz1!|>@6N;>^cCAFnud^j zzh?1rbrsQJJ0a7vgJ^jTrlFyAG4kcOk>2$#MfIyqTH8!Z&`~XT{a$$o`bIvKPu-M< zt3pyt-zTb(eJ%V(>(eq6(i;1qVfhZXk10vBD7NAjrMrzTLPVihbyME zyjH8ve=z9{hL6LK@=d>k$2+q0nlAJ~yW|k_M0_WxJ-J)V>Cp{Z&4iB z32k1%u2Wn+1RGNm>ciKE2W-C_XmQO(ao#N(6zFYQD81bcmIf5HMVl=Qm7Ypq4iqh z{0jymwdp^Yo8O=%|7~Lay07&Qv_$UCFDtSpOUyd0h_n-JSif@igZ2rca%=0Kp(4@+ z#k=}%OW)kLey9(L>yze4Tp4Z_;QIQuq77%sFYMo?_zq*~s&-l4 zX~png4^K*cCBl>@moMZK%(Ywm=7w8z)p&eACW{doNR>@J@(Ds6K)>tZ=S;d55P2M_ zU-j4v$#-^Lvij@+K%?;?zM&2GWmpWpsvpCv!K}|KGU{+v=)j@-h7&kouFT09+=3?F zMY`Ru$Nrd{-=HObJ~@AKGWk1N(j}N#+15>3Mm-)z?ns!E3{Yz&9(5gtT_wXV+hxAP zmqMpqBu;;8`~LEd-fYZI@EwH|y z!RkhK9Bk(mHn#~LcZwiLta9t_w{h4kclZy&te47EL ziz_CE-3de(?mk|Nwlw%T-rC2RFaTYI^6z~=FjKlJM8jQEXM!`Y$%sNeUHx_IZe&2mF1Ru(U z@dS>y1Bd!yX;HFP*um9faXp|G<0m9uXdL+)RQ4OR2kW0? z7l-4UFd#^pt9Gps3cI^J7B}Uhki^p7RHHg%v#@d3dLDz42EhSuCn`Xp&Z54SKOMKX zQRQ>h=HLRQ+ikn3cHq(PE2Q6(gsXv9rhSUKu;nOiNiu<*T@RXjPp{tqUFI*8@0GQ{ z)(g8QA}xOunC3 zj!$Y#iG4TU1NX3e=6#}DlvyqiXG<`Dr})VNzw<^qUxU)&km6b{Q3KXp(jkBBHVPN&||f-+!7x0^WEryny+h+6K4!;7AC zFYQZF?%V0KWA#K8;GUMT6H_xjTRkVidaD&Z>c0tpple5N?OBnO$w7RuvH$kg`_-`T zx~cAlZ7S?}sQ;2|Sr76q@7CoSOM|C*Z5QH2RWqqbk zJjVCFFDKQv;2Mp{N`p!V}OC)J^)f z9PUI)`!}{{uhoN4Q;gdS)p}&PV1GW&yBne(G3>qdx)$!9YEX4;tp`)#18+92mBY&7 zqXR5M{rKyMAFt=NJUBX28(nzd3D`XmrMNhl4>_KD)!#X_p~KzRk@ECSXg2j;V7R1= z(4Q+NE-pr*EVEMUEL}ZJ>oQp`>ck=QsncuZaVeM=NVIIOG+`O@gp{dw2fSW?HP1za zh?IIgtg4{n(YB_q^4R=$09)bh8$jjih(6 z2*1a{1yRAMlbuj;d8~2YbRcdhH|hu9u7o{O$qxf!qJZ=9tfEeph$1bNfTj*US}k2?gzU< z$+3$+>NzX^IHCLoE%~R{@vpbw|ALm-2A%ul;yr-;tvf#C#kHZl9^h#ukCR9;?q7XQ13hW>kttTRB|>tY)d1^ zwr((R+#znl7F|^@my(gIKBTP51|d%-aqvcQ3()OW<;dIW0&{(?e0IP3v17Nf=v~H6 ztkCR8kumCrrlU71wpriBeE*4X`(_=app@>2o2gIcbM_JSkk2wa&OAdf@a*-~z4v3_ zq21>A(}aip9bL`pSQeu1c(p09>e7qzR2yrgI%yaXug&`^djJGEE8?i8 z>_PCd(v?dzAF#M=l7aOi;lV!AcDi)Q8)6Obx?X4W!ReHR;;JV-C?lA#LSH!oMrS^U zXp5v`Zb_IcOIHbotfqW>;ZliQiA#k7&7>TcKE-eC1L%`hGwl(=)54At)#pP#-oJh+J11CNly ztBi&4Q&-m5=|v*EJK9@1x+M?EkN9*^^Jb&f*&o-=Lki)q2g8jkE`gUFh>+ zr(Vu4|IB=-cB*lo4MsdMth8OUI*$Oh&Ef?15Xn&*)G<}zWtePuD{}7W6TE+Ht(Ti{ zbW)k0*&fv2ig{GucnrKYfb7#Ag4YT^1z}Nqi-a3C94vx^wEq#{+2@6pB7|s zq^=fYr;aadBJo0ZBbw8nS<}$3(~O~SI2V8I$a%8kat1IA6#GzzW#hrCx~ne~5|PvN z{p&YjK3KC5a+xVJo(PnFxMkr^CS)2a4|>t0W6rOpYn!^_&_>erv}ar_o-7%%4)lva zDt0znm)m6^At!ErqNo+`aGH?bTS^Dt*}|1!>IQuA<%lY8O%~o0IeOZzybv_MEZjS#f%#_GnnW89xf~T~r zeU{$f)n%yHnqG`iTze>leG0*jqJfh-!wJ%>j3b6stzo6U(a}(;9QVIkxO$l(2P)F& z_~so{adXq%_7A(Vkvd%CF%@+Pd{0&W)aX-$KLdnqN)LsAImy|oiqt6RF6AxV-H-^u z^?jjb6bWEe@*#((!UK{HnkZuefeP8!**nB$LbkuVU+%3;n4xIh(RwQnR&pPI-gev* z;nuk3VCSAa!|N6v>eT?`7^KX$pJo@K_2V7$yj#H=fFT;G)$%r+^9(^gx=KM$}IXs zf|#Mt67#QetogP)F)7!ETf_&S^u2z7%GN)}m85b(>u!SryFm$D9e#1$&D;~#7q^c! z7L>tK$K$z+2Ws%9uY|N3TP8d^{`7LR2*yl$_*OduQ`%|_Ltoc>qv$m zqy1q`@3U#(p=y!UHjxBus_)X?R9NHYyP6i_G9}<6l8vH-_8|7?vggwNNFX^8o%ifn zI&6Qb$x`fGg$!!S_vL83F^KmTZ(T$px*lkDW?+s3j)e4XLxpSzh>=t4iq63+RzjaG z%?*&!+<))ID{o~3-SFB&dBD_W`AQj8oAA3WU1XhZaq;cj8gCSp=1=`P#*fiL;1 zfV06w5WK$2`%BFE$h9S#ZHsmxe%CMgW);{BUaioyjXxgDPWG)h60$sl8(+Fq4ioS5 zyUPtv&)VV>j{U;P(=mAW_QZCNN9MTY*bA@kG70GP_{{vv`cyo%O5VUtSqTSLE>!CO z(t%D9r=@hOMobvd*fQ;v1Bnc37i%kWAVPz)V6MFZgW?ZYe%=)a?t_+IwS?Kww>*yl(9#r`EKea%N`w#7h&$C$wCR!Ns_*%$30n@!h09e`7dQzA75^=8i>e{?ip_j|D-f z1V@sJVLN7a?RGx+q#o^6BE5g6TVjV2ji7T!5uT$AEP76D0Ba1gv~u#{!28lNZ2yTo zV2MwzU7X2*bH~k%JP*{v-t%XZr)YyA*Io7(yJRsa3v=zC?9arT`P6*M-Q~E`vz(o# zzX}bvZ{8N6N4PWpjGvCHD~A~(J)rtVI5?6;R2}8W!GsX&`&rJ_IA*fDY4Ug?R_^~I z-ux*QJS%y(bFNf^iuOvTaBBuo|H`e9rHDrfp@LmEjn$#^22Dqq+C>bSJjs|%#Lsxp z6@g>l3)t)xRyayk2dR0MogwCgE&an#@?r`iUdw5<-Q6S#RKCW9Rh*86H}V&AO(tXE zCzav{Lnj|dRKC#PNF9OZZ?1@Vea!_)sTZ|cnOPv0&d{oHFag7BCe#Fo7^lTk*A(+x z3ETKWE~9+|NuYd|F7$0(II2zCU7${Ug0DIQv@{K>K|O%hL7sS@FN~WoGE5g@$6Nct zEq3*Ak0dKK>1{dQifudeZeKRxvQC*8csChL4BzwLTu%dKkiKEsS_#I_(`i5Ne+|=( z$|L#Ob@1WzNbIYjTr^(}v~T&DiLI4#onIfu?TYHRhJx@jEVq!o##wumlO^& zjO+19!Byy5Asi^xk^pH|KLoC=W&_KaP8R#`l}K^$nTC}dfvlK)845a|f*Col?sfWI z#>FkSc9Q!O?|~|5qwmK;RJnZ^8!ZVz7Ip3uPRL-4GO zawAs6)HYbx)xy)yMSe%_SE0zNTh-4WUU)Q&I=Y#<5N59DN*0dgqH0*~_@llEOr5QL zT<4SsWvx$h?K3mM=>=^`EsT}uHj@hVlcUAJEVjb|El-zfFb2x?wtkD{|Vij-|2{W9Dq&&IZ}b!^Y%C zQ=3{nvT@v-PH<*mm;6sBFVyE_o&XlG|2+n z8;X_%JP4m_5BV6bsKfIkThB<|G(#iTmaGXPP5nca%IfOTb~KDg<|!S@fU}N93OhIp zP%gAfj+HnUkgaj}FmpTs=6F9t;j5{**HqR|=UE<%Mv_!84`w6P2wVLg%@VBJy=PO@ zu}XOIVum%TJsT~)I3%FVo#G?b*pLN=y#U&`?Q90psIUepr(VPth z%hDYKC$qs`d}TWBHW8E8sPRp0YbTnel4QmD5@tWT;hI_Z({S+U=TVsxoKvlh{<_xw+nJ z``CyC(v$f+-L&&yO@miVm^>HmEH&ZEd!I} z_l+9gOELB+RZ(V7J`Pvv>Oc2Jq*YD%yin!=sh7*kwqFiG=_(t~UcGqu68fNHRyh$* zWzYqXkB1?tp`ZQjkTxQ3UD5FMS`B`gTUGb4O@{J?(?1!B{gI#Kx(S6)6&C%te4EiW z7L*>+y1E@m1y!5tfja|4__3xPmI(Re$K+SRr|(Z;#VOA8x*(3I~lm7LzXjbd)_W zT)F;}R)8TAuB9zEjuPF3CEMwboT7S| zalh&S>A`eZ%9>#)J>G`ofwf5jPC4*6P5RjVnliZm;nKO_N7g7&yJz;yuR_Q@ugG!d zb^yvqUHG>0AMrpm9fsP3X_Hz&iP_u3%`o%1bCbAjIALlr|6tzN8-}$;O$XLOSvtayYcg1eeD)5*Kuy1^uf!QV^7W@PPO7~{=RJn~O8Whkvv{dHcB6BfW zfc!^2e$nrL@7hnw3f{jHjCv}gb$m8D6^sGdSxr}dCW4l$kzko zH>sUk+q2-|4Qibkz8Hw^BR@W!K@>+n*+d9vRieyP+?BJ*mEbzTZJ|M!#C5B+D3wv` zBZuI+nZHjvmUEbqeA%0c)LXYoP(QJUhN=PI^rmzq=@?|bxkTLi_9bOfy9PtYxO|-i zMFEy%OE-##m_pzy9ojs_GH7`%`DwvD1L-UFw=L-hqrk|%58{DIC_tZfYT!fzPhlmy~+k8m}_Fu|7(lv`9g+=n$!wtfNA+gkJDYO<`&V1$VBjSv{ z*j@h;XxofDQ&DbHg!z$#2usX`=^Qj2Rhcg1$;X4Lf_@v7nkbkPSor=0A#W75>3XkN z3&$Oo4yJrhg9N3HipY!t%vtxR`7vCA{Bwr%!$#T2%4NKI+A9{6&5q@5C*pIgghcN* zzc)cfwv%+%WUFyGcb`?BWjbE{xc+mK^h4Zgdc!){GXt-(2ji9HROqf#l9pY|#Le}M z!6|$}m|#WR<-S&euDRl;_U$?FqYIRrSsdU$yIsFQOa4)Z_=`fwfqS+s90ot|b;DcL zgZswd$~9SHg6FWui;bCsYz&v}zSjMueTTF4zm^M`Yq5vRwM0*^A1j+!9KOH&TQ#(2_x_^OKZ;9R%n$`6$e(4sP9fv+3^RdSqr`V8|H^$Fige9>c?3u;rP72kD;o z_^9BauHry9B;S{MrFfG_0Cz-dYL#A)q*qC2ZWsj>?_aXX)8(*V)kN|bbvGQ$&-LK% z%R=#@f)(qA_fRb@L6Ll~1^La5?rQoCB6sm#`?gjG5kkW=zmy^zeG!9 zk8jiEZW#l=hwG2TmME|2C&vHuvHONy%O+=c5w-fNwmU*^gh&4v=r?91{C|#?{0ULA zp~@#+{?!xT$R^CM)wM$KSsKfxkG^m;qIsY|ssl#bj&I>jEI&KBhcUrXIHg~UW{XFl{(3j(g_MUVINWY4aCIj$Mo^`MMcpV%pE{<1cD}&(O z+u~vb3ZeAb7qR2cM2@ZF6_MPT4A9g0(G$)efWn1B6RgJlz`WbGQ!vyEjkj=|jtucZ zHqLXmi?u3%^tRB&uYEyqA!CxEp}hz70-ef-!v?Tw_CsA#RT;{E)wx;EHUOv28?ud_ z>_@UAcjk}i4T7qyCS<>=z?RY586Sg*+&9s%@cq_3pl+|f{{CDo__x-zt9zUOu@b)_ zO8&7D|2k13E-XA{WK)Ce*Jk4=rke2bkc)yc@joZbC2_=uhvU%+J`MM_TFf8H2q;HJ}?A1~TkDYeW6{>_Np{ftM zgcIzQeSxNjHT$q#Q(|OLv>ba6c^;xDA|#U4)Qn9#`;q1)yXr=ECkC^eg0NFPME11D z9TCzR2(-*`+2R@kYd81o-21*11K%`$-nYL2^;mYPb0#2eriyW(6==lZ54NJ&kxg*T z?ws6^ zbpDn;^&6su|9?f4JWJf0|8lYrb*5t-OAf!sbIyWBbm#9N%Wy(tV#GU8F>RI;T5o`y zMs9Ap^>)a-IcVVjwHlAH^YJqAmVm~*;?BvZ4NyYMk|Q%Q1xqK5Z9aQP!i>u4=-9Y+ zEIL?UXY*+o7r(vGia+)ixCJMB?U;v=*X3!q^PNVdwJ}qQ-O`B~l4l<}tql@=!e>mR zS_3Fnaxua}kob66FAbF@62e!3_F1{l0dW49NT0)3!eqa)UO95P3_UoEcl~f^#+qHN z{M@uN*k&t2Re8GxRhBZ}b8~lK^|4*wRF1c!@549u#8%w?*h+pwl>BY>{(Yk4o^{b) zF7|Awy6}!S-%kYAa<|SgK5hg4)|ZmzgG7H@*=-{JMHj@0U)|p<*84V477r%ceWTAo&;smyG*TRj|@CKbuAM6}u^RB%eg?>tV#{2J;;P8o#@?E@b zFk?t-m^(NFXPCz~sGjA(8uP>Ko8HA(z3;GOGyCU1Uh&@$CI4+={<@v@52A$KMRMvP zK}lWNc3wUyV+uMY=+#RF#^I*iz_+WqW$erz#GV4j))@ChnAQ+oI&x3*XXWsn zyn%EuhN$ z`^%m9KrXd#?Pvo8daZ^(2&l&gepx2h`7&|%Jel}qLJ*_&pb5D`-BG;X29s=wz$&#* z;|UVe$iY=mDE6WPWI5F7RS9#F=pB0Z3Ww`(dC#@ew<>yJbc)_K>nRb+dppbfz}srV z8gzeGo3 z+<>>Y^Ay?9ApCy3nAE$Z7cK#%O=epghF>>0qGM8to^LXI`3U!}ec$X)dOz%j4S8AZ z%MCp!boNm7;Rii)|K^m);ro0a_6)%cf*l0nVFgQU?_pzrK#7QHnC zuGE7{t-g4Y%_e zZICCoNhXOs3l#ctIl^-4z_Nq<)|FRr*ufShqI?{ocbBt_%)U|-l~>=ex-p0eF8m*E zh}K|0-I<9)l8-@jNbz7zTm)zwSO{QtAfm2(79y#mgJGuwxB8858E8dcY2v9)6z%64 zG7O0(n1WvU8S9-jm~+97dEK4J(0#7{Y-G6>Jn~C+9_^|invEq;%ijbv6YZ`A*9w3# zq}rd{s|3BHc^qQ$>Y-HRg87kLLg$@#hlZyl2?E5fa?Xm?qv7$i`qE8ZD7EI$Lqp~Z zn{q==Ul#9%#61h0j+vEs#I-&cxAuatvKG@$_I&J$(_|Z8AWYLv*WHpLijLW@p4~Vv znT@&;j2{ervN6)|iEe*ICxo~UzLXni07*aEhD4n%?A32HTs@SHnKb)%j9TQweabP8 z9czT1E2sYm$@4}eQUW5ykM}`lvC}q|8-s9q?ikEZyoL1(Z|NyG+A;a8N_P%VBv@Ce zrJniJjuGAD3|uRH;C|_&zvk&ycw3xv>BP-u5ZkBZCGemU-tdnJn92@-p_gC^^FS7) zNk?R4ZfV9+jp(L%@iNGfc7K^*oQ$9OkNZg^j^MM3ub0F<@=>K<^y;bnG*I-uV0v$o zXj*sn$0owbx7E7cw_4ysP7Kqr z!d$$jv2{zd9HCqNan+^Qa~Kq@>a0cm(t%#NLe}|h5nA2uXtPm{1pQ<^7r~pvYdRvZ z;nvpydE$p>=v2C4&h3WD1LD&CbWWY>aV%l6@xw4DI44md>mK=yM4(@2?t&Rnysvq#;z1wL3O%wBm!2}0!E?%5twpjS z?CAi}+EOPdkZ|C?L6AR|NxLj^d#f=wcl)-bg=qT436L??kdth3BnmZ^A;ThE&QoH1=f z4=IWsOmc)|Ik%pxc@)YhN+FcqyaA8NyMU(VdoV@;QchIhKEZ@D3tO8wNF8qQe zIj9;tM0)Zb4o&}Y%KS|s<=nqXlvJpF|B;p00&>Q8Z(bzC&a6_~uQLm@p(5>_FNOQ# zp>ugd>QHYeJfp4YFA;kKb{CAJOuPwmjX7akHI{tz;!U2YU}?Z@VW&FEjnBX~iZ5^0 z?0q41?GdX`S{Z8Z+0gcn&cZMIquFeAn$h`k(3T_DW00SxgJ#Gg4R~6T;}j^pG2dZh z)N!U6z0&RD0;Y2DWF((X?Nd8cIN6!8|AH#ctO^TK@Wo-^Gwr9R#~N|hQwjYb($-*-;M!>gSF_bf*1K=hk%hGaz*mX$uCwRjs2 zx#cS!fvlDAzUZcY9eFHn5ux4JR+0naKMJCLvgBf+hkrt?Q#tZU_LQebyTYxhA=#aW z3Fk~LA2klfV&JE)+#kr5jV?`Go6cVI1}TA8eA?P(fW$Z`{wn=-xX(00_)aSp} zA`Vod+}XBXwOl!bySWPTOoZ%#>4RLCM=&U!a6F^+xD<6&GaBj0YcS$mOJ$S11M&ok zBp>N$!Gs+bS08-|2Trv|Zmf%WC>3hna_3bs9F6*#vvxQGipu(5ly>IA$GXRYFJgV6 z?MGn#gM0R9*mXFJ;Z+H~AUQ97jl39ihg}mc`DTJr_N%JN;0ok^TWMmdmjyPaa<=Mk z8{sKgR?}C)?w!}&Q{ps#IeHJ1Ggnsz;Mxlj?z*2EMDTqbi`L_Gw7RABGUn?`z?c~4 z$xb52Sn}e<=sjNWP9e80K|UXC=QgMol2SqD-hPMnE%iV(ctNqW#UIL_k*P)!6;4ix z@nPmeWgu8CZpbE@j74UyW-pH9fyUb)i3i6e@ECtCd738TWS-e&22!F!ljV2OwK)ww zgGiBAXCkJ@e-gQKB@y9NET6M#D5$B+P^+A)#A_S=lOtye!Le7*ecUqv>wLs_e_twx ziksq>uWu5Cb7ZkUbdMFnAaAlE2O(Y^jM`ji=n{&Krzp!0-$=%ZP^OFetiHHOguQ5i z5Y1OaiHu&%4#rQ1Ikn%9cp^(hZh!WHKoC4=DOFl!ja7ZKI;x?*(7+U+!dw&v1_`nq zH_|J?>G%MN9ajnjCMNbMduM@(?&ymDG7;O5JiKVo9D`sXAS$*E@we&ua!GpStTse zMD9D8o{uR4BnQsNl!5n&{&o}kO1vbps_ecc5Eg_qSyFCQqFLC9smZG6@WbY!r{UdD zjJrpEC+}SWyqIvgx`!~2VVY~d^KDBRM(M~4DTqX&eEFUh7SB{L(4C%jI2!>T!6t)z zbDl7AyhONUR}V)&EvQr9mTgANQnI%uY-m;)fXz;L`Z6`6OZ z4aMijz^gR%Xj{E>Xk`xAUa}gGM+8$YwjXeTv8qFAf(o86o!EQkYgGY$3bJ6eC#nId z}NYX-i$>cclD*#YA>x@*{;TOrldfzl&y-s087!v{!obCKEE(JCaS z20MkO=NnVYF?J8xY!@_x;-+&2c|s`|WixWwqc0uaQ%OaKD8~_7>3()@uPnlMjIyIz zEC@Fbm@N6!Rzo8nC&RXmOf>2H5^U#~4y&Y5+hRUGgNXu9b@O>AK%+>v=QmRE$uA&f z+#2*Q9u_{w2I=b+!->$h+}6>RaQjtqXnklDe5N3e)9fN__<5&nj#2tSWR;)Euw53+ zIm$c}FbV|LC(1u}t4AV!kYsJ?|_$$OiLYuDR^oggMVUTf?`PYtgLOB)3Y)1Hwl?m3<}Fmx!IRRF(0G z=?Di~`ghVyL$yF%)I5BasvNYm+&vqv#-OmOXwArOhkeFZL8WR<$)MieCkL0Ya(Xj+}t?K@2XpqF8eDV*G{t$h6k)<^U2PBXuP z?Iz8a8^;sz#M{;RVW}dZ*u4*L*15n$Y4zd6b`$*W&wmMxvw=yfx8etH4wPCB$#upx zL8iIMmLCZ*AS_$|NcyAz^oI0}ND8Omr_b6aSGEx3&UvNcpu#eEHDSC>#Bz?(l29GdC6!&GI&1smmiQANO%dIEX8TQI;uF8796?M8{f~Fi}B8#mL1WVfQoMK zLms8T z%=(#KCg2UbEn6b$q2A?A><5lI7^^*3?$x0No=V4?6{13Mj!uIyM5hYNU;C)-xSfe+ zTyGh{%?8*+Mtb~DWx;|sX_TKN!hXR@bc7=(f%knCOutCMVO%6U5#^QI6)8v?1NN;mioA*hpKz? z&F3iY*KQfskpK;hL2p!*TcL5gpo8=GdVKX#mhCi|0PafpR-7c;tcGEBK+A#!O)Dt59^gB1I|5@p zpGVMIXW(F7C^_jH1kwj}+!2K7!M@DbPxZyT;Aev_^?9a3IOeNT_rSLd^@S~XgAe6n zMuLCn*gw_^~-*+d>|6~Q;r}W-LOIC%??$%_#WDG+RU9Py$#5X9qXV0O@ zDmOTFJvc>rs1b*+>7SXmu7MpjOC8LFSquT(etvK+6TBGpX->J85*=T??6K8A91vsC zvTDhOzElzkXX+X}$u6k-tuPTbEt>mv`P#yr2X=SmsvB@bFs@bOZY;7sSUfCh;Dkz0 zvRt&k5CthWxDARk(fjGzUMlBY2#G@9fKDT$y7}#Q7qJ$M%hsl_Ofn zrBnMlRfE%`TcKgKjYwlAAkh9e8C`C4x3?5!!6@?Xc}u|JA|X%x>4=!2RoGRgp2Ww^6XzLmseOBI#Al!5Lzv9E6YxgR6TW1wl$n35LfJj)pM&VbVMi7-<9Ve> zn9og`YIF;SxV7H-p`<);dLN`L*Iy1l7tVd9;z#Nf;{2Rg)8V0xXqKrux?T_Zo`&VIsujR$%5i1;4?d^o zK64pxv~S4=#@U}cntq1iL5KFsd9F$DtgOU^Vk{lE#!gm!GJ1oTB)%B?IJCpun&x_~ zjxH<=b}5qIEr;Z|ukkcJH9*$?-GQ)%#hMRNPUF{Nf!#8r?>yyOn5Ej}dTw(D+%XuS z${G!ZZE}8e{#`j>CV2g8>_{f0k84%(RA!;{kVY`cLIRxBnti@Q(F@-zEd_R~t6-U( z|DtSYAxucNnQRauh&v~HWp)W=!Z}%AF^?rjB%jadR3qYDvIS%}fN-p>Z;t3JP zPDRjL#=k!HE*sb-zKiX(j|MSe*7^D2JpA@}PFdKs4E9Vo>-IYOLAD`}@2kzV zp!xjTQ_i_abT{RC)KZd*olGOo?>)~Y2*Iu*ll45z?3*DeDX+#k_t^#ER~3*gCM!0qbmtfhi^IVepgaSb!J3+o61I^i)(zmFGp+)`dC-2>< zaF-|g2;T=s9MeeY(s@=1sc(0*?KCPysg*p%a??~uwY4UZkS>Jfao)%0s(f)LgLfO1 zgey8zez?JNKN~{0u4V=4=7Mlskg!6cH>4e5+J7md9`!Cg6>Z~+heUy&hxC$4&_w+` zd*b)1KT0CMVHo~Vhxm&^2%EZTYD>*Gu#mVgNjejTvzKFQNdlKakBsdLx!PxpX}jt4 zvStn$1A63DbH8Huve3bX0vGUS+PnN};cvDX|3e| zw%PW#>HT;=GP%UwzlksvIn7bXkO>2JG=wy90SGo-u*GqZ-|m}|29!lplmF3h3Fc5F^mb?t=kOSHyw=KMH+~z>{I){ zH#Wet%kCZw_I_9u+Nk8F)&%?xpBv8+?rDi%x6~vSyo1G$o8{}v+fjLaIDy=ja8n$3 z#E~u;jiPkwA?0heK(?<=Iw`Rek7w1Fidk2{Y|}H%Z@y8uWK*qWHTni(RqG;4jdJiZ z-R1TJX&E>Z6vd`;G8G-a_`m$VISOSypEGN_P!G3?$eiULPvaPCdAZG1B2RVG{O!^Y zg~;HU)c;DYhOp|kd?{p95BzZ($F3$cVDb4V$}+7obV^R3)axAv`V{AGouc>PDr|Sf zpk@HX?wuAtHeL-nbzLJz-whzCi_M88+Ah2%dPcgwIv&HGWQrPYWMIMJ_~+Fo-Ej1C zq75TQ8#X_^KeL{c4eFEri(`jV{#c3M5GDUuiGQ6ad2bJ?6$B`??b;8^)PWwPQ#jSJ zn~)ux7xucK`f3KL^F`7#V~Zgz&QE{WF(TXeb?w3F(qzK<(!1YwUpmCcg|SOxDa;Z{ zaq6dffmM#$jJxbDge*NMJ7C-m%Clc6xwa+a?D;9#69rGOtleMB$gc?VS5BVaw6_km zgrXxf2L~}F=KYg*g_S^}#zj{3pp}q?)IDA-%qDPWqeUI24tP)&f06(r;mMr}ulSo; zM0T9Sd#l@7NEx}fMO3g2^z#be^c6>7#HtzSx;p5&u1mn-+x(WVr#tWrc|P@5 z$c6C(wiFlqN3gN*#H6ukHK-gr8Su?>41>Z7O81raz&-PC>ngd+zhoY4+;C6b1U+r-ofvk+M*=FJ~Air0vZu6$YU_ zfz+qxZa0za>b^z&W*t=W413hkmw>u0kev(dN5_lHOz|vvNOygvbcBF3c~nC;9c#-+ zgBu~QYyzv%>Gmy~_Q*)M!^nBHdTtCBj@;KDkPU*&UKy=TN-=OR|3|^QgM+XUAZ5Wx zh?-X11*6{F%Yf#Pm26+LN<6t=t2<=51L8%(4}_4#z?bOoUP|d!+;(Sym9Elb>| zBY>{(YikS+*2iKaYWJ&(N)|Gd&P=^()^lxpv}fuD}*rQBO$JMbxZKJBEDwo~ z^Y6vN&$kZLvnRveId17@y>u*DX0DJQ9|uwoVdb^$WANiq`L#50U)Zk3f0~Z55?i~! zhcn)+g5&d~xmSa2P(;s#hVyO>lH1d6-I_quq*pWibQuQWRm4vq;Zh!TyOf&u=O6r>{E`zFYE0y{4ilyCYEy}AAwQnI6+aDmGD^$%VL za2}Op*~Zrkw|rJVY;|hDz2qC-7o;oj-qO)dlHf7o;yF3?Q>Gi_PdG@+1SUY{kSSOfgd^+!d4@Re_}=8qCUzaWmMtiBkj zu7vUYtgOH*RoVl6*V<6zk$ufO&3Y*LNV|=GS2ZZeZhIVYstbEg-7$|Ph>hJ{uiO3~ zZ|@z?_22h@%Sg&-7bQ|jBGItQ(-uOaB9)z_D58=iL`HV@mc7@<_O$ojduBGQB9X55 zd3?{~cb?~Ub^MOw_+8iS`pbVlyvFDCdOe@_$KxTr)f*SAFK^#`Xb9>_KdI`j6V=I{ z)|D|QfA~QTWt;A_f#89A`tw>H@OfLttMdkF=(L9-IF~>pYYRS$S1pvmJ>O?)re;p~ z?(IE6*0nkm$=W@^x32@jZXG6Hc{+v5i`>1gOzm(y^-9W0;V|UW@Z}EwN(QPUvFp+e zu~<>|K;SOnk+OHwB$r!RD`rQ_7u-Hm2xsCgFtw^4!Wt?RQrE=x_?L+i^_g=u_hqf!DTD1EeN|;F)u^>0VM1Hp4G(zzz6uR@VQi3$N}paaSf`2YINwl*KS$m? zNGFDsYw^^T+uKv&!20JBZ_gR9-`3bUzMGKgjelVcC1l{;ArDOLiE~zWI;}E5xEJj$ zUQF306@Uas;?)p_O8lxPE|J92jt3hw>ewH5;z2j^wU+We81V7U<;&~@hiBnA9GAP$ zsg&GdprZg;2DA2!D#xNZx5>(n6EPrnK}cSwry0E;y2kIiQ4cmvAAhBE6~gEbI%6Ve zhX@G`K2fz20qf%PX_th9;4br9p$|3>;BiZDYBps7syR1ZYWr>vDVntW<>Ew|($N~H zv!apkUQ#CTSWg%BD6WY(xNhWyFi?dJbsczPi4w(n|E11sHNh5m!Gn z+!>SY!m|x-hXe|0;ZBp2_Ic^|OE@+1FD~-5zEd#7h z4D$+Ktw%%B>ZNbztDyae1K<5$p;xo`^C6gU?=l#kZZ38 zb)}1TE3eB5@>fzO&W&(-U6YzYX z06`tulN}u8_@X-7p;@jFSbJPf1$H(AZ$rx-wcVBIr)x6#eXR@+ERJ*TAozzxC00w( z6Yijq74~>}knnKX-*!#?MGO|UkIjdQ4Wg^eK;e@U?SGv$e;`W!+jjitNAUk3O74T9mH4*J;IKqsKD}C)q62 z*~vlvnY|IW=GMKre>X-I#sAW`Z$j>uWDAzB zh@F3p$*3aA3q%7kKXK?07#anKN;g)auF`pJX}ymyf3J`E6VyWKBjdp5lyy+;a6cj3 zX#&ZAtf^U*5bcyFq2C+@@?l?jv8-54G@crnEEu=wz&F`iu8)e^;dSoY7oRgnaNo7n zdymuGpoKN1e9WyFCpBiT{TLgqlISYwm+FTM2S&kOq6kgOS;}RP5(2f)Ou&W~we!id z%5G_plxWmFESv)d?`y6m%ax*@>MCp& z=zXCKt59Hg(85}>8$(sB)8!4zK*Y~9M(9C13Nrn2`?k~%xg%`)N0f*pp#N~f`$Hi` z@ZTg#rX?vjt-UjWEWVO|M}R+m{&3m+S6Lcly3?#aP_~6*c`L7GyuDGzu>Gyfcqn9N zjQLZu*+Kg=BZ-~c++aM;#WzPF8>7|RnaGKlfi1f7!Da%fcq22xwIsL*_44QB#C`Ii zP@RYRSZ@w;?NMge=qQD4oBLixp7TecbVaTLmRL}uD>K_)U=N1E^rbj1s6gfBp%q#}Tk5pTp)u5*k4K?3*9>wYcdkJIgfr0cnnj@U z+ge=P>LBR*)eSSZp8aW0kPh2VIaZSDbHMn}-Oz_u5igr`D1O@Z2;bWGJPxl70p>Gl z%p{IHXwqbTbjdyeKV_N?r<;0!r-qs3i5=c};&Peca8nI_@0-z=%gn;ZA02)t5EaG; zl=QCBeMGFv>4EYdfg(JooG~Ro+W@MP=A8piQ-S7Lfa0)G3=|!|yt^o<0xAwZxn3;R zjv1P39xD7zASf=k@rgeWxD};7>Grr{09o9D5<+Wt`GgsDFINSAZ)y4nr_J$No85B* z?nWFtujyR9nui-o!f}$0k1*h)NHq1E1Yn^lUEaZ*11baL3Qz9)z_B?l?(JJL!14C{ zneu{aY#x}bNv~9cJ&Q|K52f=Ve^+GbOmHT0FPOOR!df^dYnD?FX_!>Fe^m39HU9YM zF2o>Lj_huV6cK8TaKmD(r9Z|BjyzeNY|E92Q_bXOwj8O)6gj#8+FiNG(9E8Y8ukR<+jdEY3ZwzI`t!Z7E7MR|X8RY# zM~>(*Dn8H{UJW9&gJP+VYH;_#1fN^d&6vwa#rxcysOfngjg@6g1~IFW`+Wp%>EZN# z@uRODvguwbHVrg`OEg;Kg_m>j=+@Ft=@Z1^)qdbvGi3+%1S+!5KJo*G__-HnBQxQ} zCGDPo>lH8>sALcY;AgUU zyibHuh!^F-RT{tJ%+&~DtXE_*x(aZ}J5Di)$sJqlPY9Iiroz-1Ux_S%^JbBA&Ndwm zgEPCF*b0Ygkl)7E^Ud`zc%P!^|4=CvPdd;GKl|>5O&{#lTUaBAPU5((Pgel+lz)|Y zHT@25i`h3vUTec<*Wb65J3_JE_v)7uUtD1Km!s+uli85tcjvnm-yLM25w@kfR*#a* zA*bcNiTEnMue^$hQTU^nKP7MivFy#n%0aVmP@$|h^W`tVn#(O*nyitKbCM)%MtHx- z&R!4n)Im7DO4iK!4537%=wqdB6pB{&g`N_s0ULApolGAAq?&`??4goyo!@F%1JQ*`Im1xdBgqGSEF<769+4t}(ju6P+mw)(<@L0l@J#eDinVD$M(x ze_4powmQnVz8J433ZJn)K5ra9zylL5dFjw9JgfVGYO_~4tiI>Cp!~BGl;2<8dNlbh zxOlBQIXPrOQ?*S;1T=x_*{oMzCf{Rat?1oIxe8R7%E?p{r1V*a+J5&Iblbe3nr`|4K~H(2S==w5RI&GvULU+BCIoz=6EvP&`l{GT2@{(+nD$63^z3 zmBCHMDUn~HM2tjwa!aBBk&&6T<5c5^N_^<)=wG>&@YYIvU~Yf43>vaFnMhk!qgCv| zdco9s&?n9B*UoD}X3bdIiU3b|!>yyFNnFYcIdWq*Ek@$iQ@rnWIehT0>p`iM-ZI=9 z9O5ywkO@@>C+hdArXz)~1DB#>8Sd{XFW~(}!gu`D{wvm17(!o@WGbJCHzIYy9Gx0K z-ZLo9h&>x4Hy2Wb7Py0Dffq|mFhQa!SL|e?LEIs8ic9lJD-6qWsEAGz@m8aHUplE< zfO~tgL3e@|e&Jz=E-`rr{&tmF-c?zk-X3+)halnRm@cdrm}aACs@JvD`dCcZLhG~E z9EoyIDVjx6e6d(ncwvGd;tCD+9B$>WhOPsPJMIzp2n}YveWoWXaWN{je50%aLS8aU z&+jXMddu@R8Jnu{TCDCEqiQtfIow`l_>c>F`yae=O(8r^46Gu*>?8;+y9WE8NxE9qIbm7v2Q0{}#Pc z1CKrR1@pD5af|G2wMR|m*mHa5Ue2T8kSaI5)yjj&&ZOl?h%Sl(i;hr6-YfY4oxfj8 zUrC3g43<;URQ4cX+0p-Gt{NUC2RsWY&BA2sxlNbGUjb##<8rU=NbFIcjoM;$KAA1zDW6=`gyjoHM)?PKINY!I;(Q!HLQYBFuPH3X}HuE z5AO^NwKp$<`Mq`G4-RF4T?#kHm(C2Rpo~k_|51cNtm)xT{6jJ0T&>ugX*~Gye3c2l z6oZP4#%Iib+yOB+ogW{)nAg8Kr7p*Z9vMwIr_8jg*=j>gn6QH`FlPLkMr)QJCF-^ zW;&TR&SztX5>KJt!D`4VN7#d0wGiTrFXqiCB~lqS|f0+1^SF0Ozd~9 zN7m!UeMb&eVA7Q0vzUG9(9~pH$T=Q~wF3v->^^!!H&)cxZ%Kw;nYXvSsd7-7;o4*n zp$~j%-F|tjAPppQMgxRzk}!c=Z}P;+1b8i^nPE?mjPEtJ*&g`ihza$Fy8>M*(TwgV z)2LqshVaurfA!244sz&kX4SNVFN*8x-_D1k)N+Ls$;1j;t*TxP(>cTQF?YQVwG=3d z-}aq;vIOV_@2g%iOTrq7af3sk0BmjgEYUb!;e+Xj42c%if)9(W_%80rty zqkZvqksS?5z(jsMx;fSyOAk1|dnca>hquufbE}s^8}E+}ZVqQmUgMYID=t zSFOmHdRIvF!$aU7CT}GXPpFks@h`X+N}*lx@;bW)5x?j*=Hhvt@G437zTW-o6|%`s zd^r6q8b-vIbF)ISAV1P)?et<2RQ>wels|5OA8pA)6*kOZRxrZJUo{`vVzk8Gm&Jk< zzvKBM77>_W+md~z{0Y2u4>3IPIu+BOW)(e%CFiq z5EFTDi0xPswr9!=T`^3@qWbHvLY=DM!1l|Wa|crKvrM*c*JK2qYVOZ(X-kLGme)7# zmKLIe&as`b90^1`$_`G-hE`FaAI=+EKEFyUAN45cjl3ip;^YWw>@`%lDEPxPK5*gMEHXEWxu(_ps;&4S94xI9IGi9Hn>m$PrAl}`b*2;*Y5mBFkJ;(9pvTur6XWp zqv(y0_6pdV^VQGgMJc!)?2X*a5sx!2rxuR&SAk?Vi&65qa>4>)d9xZHK{P1DvV73; z0-xqKfs=dc;O5}eqUHQ6_#*$-;@h2gJmowf5M5b=6lZ0#=-yT0x^(91r$|ENa+;#| zzHBL0o(Q#jAoLn|>Pl_W%UZzj%C+6>sf`%EoUyI0DHW2}<~iBEAf%YxXtf&&gx3+% zg*|&(aC?_QP3JXd2+?+lGdWxW?Y=dmT+(4EnmD%FI$Dn(?e}H$ixk7A`)?)wRbrG*gg`<9@Bw(J<_(W16nHUj{Qm67~G$gx|-JqrTgm{BfSO zDxEVv4ReE6TE{Y85X{2ezBP$_81wFu%l9D+izf~RMOIcozPHHN>Vq|K|MU1jWugaS zp!Y2y+9tRlVP@rUvKXQnf%nbF95^C9Cn9^W6jXQGR_+gKL|qT+1M@`uSbD^zv@1FB z_`a0yNsG28tZz0lPZDs#ZzESHw-GXb@qO|4<%8o<|6bnXJ+x(zzL6;6NqACzbfFv@ zA@;|uV4Du^C0A6uxp*p?*tenJXqCqu1M`+0e$7|PFlBF$Q2V56tfu_5sy-dcFme9(!zk za%uz)UbH_MJk*Kfo`*$TgTDRs2mEgeA^#FlLMOUh&|KJ$dg5nA=-Vr?+^UJGT&Eh? zjRm>MavG8GyFygO5TRp{NIPJ0u>y{_G<`Dn?g5Xjb%8_$5;s-ImAam(gY-z_&){Oe;`T(|81h=;lj4fmi65bBmFV@oNF#PZx0Y@Tx>(TlPamkMy)7Eaz8;m zR|ye@hJp3cLCCzb`AGf2cHAp*Sy-c|9K&JiC}UF#OrD=QtH~AvDizwgLEecNee~{F zss$m{DSv=d@>ZHyGc2*rG#&Q?Owu;R%q8c$5ESKhLhX&b0{>08q79>B(> zd%Y@*g>W}KrCPVR66eNW?Xe4PgGWzfMfUtiM=dGNcL!H{&|XmPsm-1&!i*~KVBx2J z;6BF`8eJKK(%vrz45T}-!DlE!d?gR|7PKunY$XuSeeRoHZXLzu5tfyiULqIlRl{Pg zWwA4DMi3sDBk^4GBVMAUe)fUv}ApmdsV z3#xVg2HRj z`50L*!)go2@;50;i*^I^1D~ao&>U?39+O^lCm3X8hGItv4--Cy@4}yIN^wOq_vC%H zWc1rHvf4yF4pE^#%Ymk&m}K!Y;jL&9z6n)fW3JA?yV)o12?ln;;TD6&-GPRPmp4@du9*!T%$ofJ14d*S!K+y!Jktsz_Zb0eGCB)DzF%!y5oIruXOan)l8wTFA~WTOpYu^+F?u4`CJp|! zlK()I{O9QX`$S1e*jb6hjehJGyj;K9k_J4Rt$D`Jw!?iVnrAhdQK)UWQ)R3bQCRE9 z-TJ*Pkl_BLi=Vj*Wi1|fYJcoO^V!ukhW-E?y0!NUi+3$i(?%~{(H1QIF_J6A+X)vd zs3>Z6dU0PKr|a|L63Dsx3K%c!;7X9^9L}YX4BEMq&D0!BdOJxG7h5cS5qz$HR};;mD@JMO@9r{ABd9w z$H4sacGiCpC50nDo7O*d!%MRTt2>e|y*-?~%S9_NulV&?XZzL=+=ExY~Yj3g7 z)1epqKi#8KJy?xgiAS5tEeGIjIvLkF&K{)Nc0`6FXdD>_GLH52H$lvY4CiB+?PyEZ zEFgQR7e!yXXR>iuV)QQ|O=i1M5VYrXVI>@DctcaqB$a33Nq5sZj@!HoAO(5E( zBT*gavvSZrPJ8s}lPrkiJ|pAA*M><&x8htP38K#B=w891N(_8sMcPY9Z2q=t{*Zd} zj|b-;TuA;-lzixA6u00SMb-x;we-#+V~{8#^Pi4x)l@qa*+=-1!euhUhJQrYHR)w&`53~^iGRvHd_hx2;+rbea#l=S$HzY*%<>(!|5vfXseNL&u!yeszDsx zTIgfv(hiB?w|F}4H^BZqZYCG<+rThA?nKZ~4DNNw>DWcr2aIwuMgvMcu<5*u+2ZG1 zcra+t)hH8;>65(j(h`Fxd2APBk99XT%gR4-Y#e~lr@2cm>J2#R?k}@U+y`x*L^v6n zRl=)lna29$b+{BNpk>`pIMLf_ZDHk$#cnNyq5k1Kc$TBUJJZku0X5364}}wPh?nn9 zIVHBi-jK`Hh5PGa>RzqE`At3ex5fPjqU3+h#=lII1m0TaqzEBo1!l4DTtsKd(jh0mHEK*pBrqm=>~mv$y{P z*40vR>=z&$2n6CXGd~bbwkydywkW6ISlV;u=dqbs`=YX~Y^o5)cZg6kJ!ylWMti?! z5{{Ute2cXD*&1vntAmc^cEc{OuT_3Vt-#JFetZkTMWdiv=u~4VHq%`Gp%&4JmHQ<~ zLKliL_s3Mtg{2`VxuZUFoTCAUHwCbEapeOmc6;2fi6t_tQ>hmvI#Gk}MZg)AQdo>| zQRgvl$2vpjd6HokDrkJ|xX$hbMM5qTk6AN-Kir5c$-NKndP`4P(U#!$EfF_9+xDV^ z*h#u0&pyD>{CuuT{SHXEpnOP4mH4xo?|!CFYC$Fy_s0yM5(&=~McU%qvY zNCwl=e!XeV2mD?=Su2{(!5~$8XMu)hY^r}Fo7vxvLc?SiVy1e~=xKnq*o_{1D>Snm zOWV;?+dQklwHX#@OV_h{Yw={(W$XIyC0H**=*=m+kW!t$)W4?|gp&hiG_O%Ma3J6CN=QH-e0ZR6TLKuS~eLM+)luaYiMnjKG2AK?Te0ohWDCEysJX z3YOfX#pRA9K>S=6#muWzFnmAfmVK=Uv^7HWOX+h!eUv@3ORogU{i+PF5#$mj)w%m( zBO!3=yENa}MnBPH%k0`_83~7^mP*`r5^itG1#0eB8)4f;{YWuhLcV?b{=o~_``32- z15xtdw&OoPg8v6mVwgo9Aj#JP)pv)MLURVNZJ;Uh$-QATzg$(Tu&WYP?WA@eB-kM} zDeh>yOagWluemenF#?{!B2lxM(a^HV`L{cH89Gz1OmCg>$8Av$i`Ix#nhC9Emx3MQ zagt8Se7!RnI=#4VnJvXart(nVuwfD2eO;AS&|D6ETMX^;#9T1vzB|{4i?z^_(c=Ei zrUzw*loCdNWn;{p-2N-MOvIJbWU^WbCnt_r#>RAD@V#%^$D6Wn zE_Zf4Gp7pqE(D(Kp71~}9uxUsrb0X(&?)Y2kO3PPc`BFAcH<3_&rz3!Nr;e;e|f8} z0m^O0VmuPs@OPnzCzsA31g_aJ)UH*-*@sgO{xr29?vPC9P(s49t%i@)q7nN$IoayA z^~2#84-P%qUJf@qOdW*F^Fc@xnebDYdZ<&9%&+wl9H<1#7QJ1`9YlTSrD3t$X3khi#6vYqDmt*Fe+1o>B2scsb zi%ZI91CgHYC(pusIKHmC7#63W4QEK^uG^e*K+Q&OKtL&suo2q4>KR=MOSh&BR+chB zQh&nXm;nhRRrS{-KWE~8`9QI?m)WRZ;7TnpTnzjAt$y0d6hXj#NB0YlQ^7}I+F{pt zHAdDJ9K1pp!m`ONJd3$ljsx9fCrA+qQ1N|l^dx;cW`2~KijNAy*=X4d3^rAe@#yOYs^_E8%$B*Upmw;i5^gvi|3^b`M!6r8n^ zT=x|&fT+QCHxo4j=$@sb4BA5=CmaVgR*Z;u+-e1@gU$!M8#*VTML2dHPNigR4J`pt z->sp$JPXitsPUG(Rt!cDQ*L4Uk_C%b&DZIcGLZA^dr?P&8lX7SuwY+NhsurRH7sj{ zsfMF;x}A3hibp(s=OdpBhtwneIgR77w$5ymFwvrrW9Yst%3B8CsrT@QUoS%_eikn! z8i~#yY^FQPs(}~J9o^Z|h(^EOpW~`4z)BD2U3Ss0u*IU4X5RqfMPGlJg4^x*aPXOR zOjD=QMAJ;WB*}y5_UsYx-xh@5;n2Lua>2(Cb3DaI3=kyiNj~+yMeK=_LHpe)IN= za)LVzIly#5GYdGk>vHESmEgOGrLgl$b=ZB7a>IcTscKPEsi{b(LnQC-#1{$~$nWXT zI206)TvUUMN~C09vpH9}N%0N#o+RDA;g$p9?z_q(HZox#SEZ^vq#jKNV=1M#RpH=; zSC12nBGETxcdYDiA=G(uJ{T}8g@!Jrpsz1eP#O5U8CBEZR;Oj(&gB|7_a+<(p9K$36ggOK$-{I}aL+4^0juzVT1v%0{QX+^GY!!i z&PlWg*mIGH6Z$4dCSV(ke#uD=eT#^@CEv}ZzpKV_v7bW6OVZJ~?X}*%fI?{SZ0<2h zdxxh&ctbdSO0np=nnhgYTjUCRXt+PU08-3nrkM`Lfx-h#$5Qb;DCZiE{2uEC-aTFu zTtw@r_KU68JM&Di7g;!ONZW>s&!2reR}qgdMT9SXB7Psqk4%a7O9nh*HCMezAZHEh z5AIu8`{NhMwKlz_c6c?Xp)5^^TR$viMqSmY#-3Ks&A)rgA#eBalon$p-nHA=PS`o1 z`1bmO&!R-GwQMG%!Gkyp zVc$~mg$$qe{2>x<)^NZ0{c<{J7MM%ZiX>prZL@#|eG=ZuC~6$;O9h!^)fxXR;>_N9 zjb}AU0NhB4UR1)a*!-T#P^+^7BK_sRaTAEvsK1()!_iW3q06f786`oycu0FwRSp*4 zNtfIzRSoTHQtLE7tFXzulA{C2Sj4YhKI zq`g{&F9Pbp>Z1H_Q@a*aFuvrdY?F=Mtqwx!F1cvmHQnhcnglG2dy8+<7XmxQRwrhS z$9RjugzTIak#jpb%S3xN498O=-6tjp8d910hsEVENT$|(&&WlD`Re`J zm*tTG&+IoGc%2GB<6KU-s81&Jzh*h#Otj+9Jr}Iwx>y5Qt_90Ed^n`w%D=H$7>oA(issFVc;R3y5a@zl$7hG>IuXhweq`w8T3@^f?Q}}BV0am(3^u;4+~5=2V#5+;9b2O%QAjSO@2^}ljgik>WJpI)oNegix{B^2+ zCA=ILh1D;rzUsic3%=JV!x4@Z7pr;mSE1|qt+9TLB}Bqup*hpGLbMT(srz*=5Mhsu zTHmfr5M-m-9lpsEhqp{ppVmtNm&qDigF^-QEUAv>-t!FjX!@i^q^b@jCXZ0%sbmr1 z(JMB)dQ(v^W#^>6J7H>6o;77`UINw1HUnQa6@jxwEJ^QEIaKeW>)uO4#E3PE2a$az zK^K>}-A%(Xyz)9%t;O^mGHS@^d&d?)Qs%SSvQ-kyy#KtK^R*qnFpYZ!y~#m|Ey@Rp z0~;6zDo^{xFHgG5(eDtjJO}~aB?Um2n`N4*m#wV>;?_YtT`e($q%QT|)(8*(wggAd>ZxC6m zPZk)nx^=v#ibqNhSMmK{bFp!;^|M26H405+laB`n;5s!$jt7q?;_$BalQiW>y?C*!J?`0AUK}qV{ol zI0IKxw@T9xW^l(Od8H;}9iZdvLf3n?ToeXv|16#YRFS&~!WON-{aQzepFJB-(MV>A z?<^s7pBwit5zd`eNBC-9U&zAW@y*)qk99D2i_i6LA|6a4xx(O5QWdPz&pA9)?Ldjt3--33$;11vA-9_aJw1ZZpOhWG{dEmo z+r~z|UXli}+<_JQ`ke5(-!TnxiF6#hw`KRkhZ$h7cd43hl{mi=Xo4LK3sF-}`MTAO zWIQXe(Z{^!J?M9+YVb)BagM%qzbk`{;Y8;8*UryefX9}uxGFW8m5p3f6ikNmBiJGQg|v-8+&Vp=hH@O1Y2IS{5FzGR`aY%R!} zqwekUq8JaJ*rIR4nFRWeg6^c>uLsNUT~gGU*|3+cpjlNp9qd`FlM~(DP)fR}fL%Wk z_eVwOO25kohUUtBQVluae~E5quc9Y%4MxTv-b%u~xqL^&XzTIJQK_Q*oMl z=XD#To7dnKY;h zRq6WtHUUVFzF(go2#2L$bC&*x>FCSYuIph`2CWwq{qC-(;DCUaxyWiJc)dNRGc`k0 zA%1e0kPed|dhk=Ds%SW5Z@r`MR2GkV&cjx-ogbiTM>9vo)gl--u=%9oOT?=+e`Ga% zScX$#TiB~apFsgPv4OQnU@L7T8{-{;I@B5aCUYwB&BRG%sqKl_YAn%2sAO@|>sh)A zu@q3+c0XmvNHsIUF zRviaiJo&ZdQz4iy|Km!1Q!dP|Od9RiEJ6EOT~!YrBBOXv&F*+pAQ&}Nm`&;t4z6)( zQA=!@P)#!DQ@WN31yftR$4{Ce+Y_(Y6MiuuKh)&&RdPltnH| zGqpUp#TJgSZd842Co?drH`Dt);YWJPa`HPXg%3D296cg7oCJo{d0;a+AG5y;2uM1L-+aLi7gll#-aaV;>h7NI!qb@;7p3HJ+PDH}ACXLIs4MVE zhmN{7RT&OmJl#t_nudJSZ>9EHl)?HTw;i=QCAh)cJHnnq!lgH|?^5XUP|wG(uU3eN zF?#2!6!6gtqh`NGE8MijS3GPD=fX;H=xEKQpR(~V!zp>wjoA&ixIaB=-j$ABef-A* z)goc!Rzl0GLnSb%p7ZhQr8h9gnz)JQxh*{Dp*qCE^Bzyo@09;mcnh(h0q*w zY=*fr4_NH9otXq>p@a1xxt2>RUNEwH>p=>G+sbu|-W`cRI!lt|J#`<4sV7!MiwJpS&B1aszTTfDq!f)w(aO71 zzNVqTlahah{#0{KY4BNk z%~!KG_^+DCVN?A_cTG$m>YJLKdZ7LMKYskTLc~8Qh3w)LeqG%A94gLH<()S9hCI(- zG$o%~!V#0vtGkN&v0_n)X70^A{sje&W_T`WsVl>x*jV z-Vkq8JL4m&xwj2w`tDnm(-U42-g37(N(hs#q-z2ex#spYy@f_ zvuX}T=7ZxW!Ndz`kNy%Mja(Mp4si?&m&( zNy_2WddCR-R&=@PW_daAY|>KZbnKB%#LgI> zf8?g z-a37R1MOP1?WRUx-7uQ}88`;&neKEv68$LRW8l3pT>-+MyNbL%mSBLGpH1;XBCyo0 zE(~PHfL+vs;lutJnB;x4D&}G@G;O`KwAr#1di6|a^p@*Uf$O+_{JjROir>6EJK2q2 zu7r&&X5}t9m%40pSw9d}7a0Y< zTnO64`f2*e2=ZyXcGZub!p*Hd#ixn3?}+%rb&9*msF9~@UruCG{jKwK{C_(Y|N3Nb zdsIj_JAB5P(`NN&^}12rEw6$6RS!Wn)D_tF z&{1wOoBS>`f{9>r?Oapd(&)|-PS4uq(KBZaa^P>g`sC$^0%7k9(p*1IjS zE7MrH$>@Fb)p49Hw3^lb+zBor+Zw9}Yamfa@rcn-C>)>eYBm;YK(%Q#u1)J7@#XRA zr~R+S@ayR9+BU~FFlg-dNt*1$Z^6zKzd~N4TqnQRbFo$k84paPicbYEWy&6{;Yuho zTH-ye6png@WqmR-rEv0*;EZlNVWX&17XEFt3(|QQ<-bsO0QDM$>^b%b*bq{UN^DL8 zH?@l_j7~AI+>veNY}bV^ew6=Ey5t46LM@r=RylBGg#S0USQ#wE*eobm|EF!!ANmf& zH{yS!M(`PE61fq7%Z@Pm$KerF@|zue?(J}rn7@B4<;YX&4@eWGZ@s|afyQZfckS~} z1Bq=N6hGJ7P_DnGE+eQ0GV5bXm?)~SQuplS_OcETPu}%t>Qps$^nbX+F4T)piuUIz z3XX$Z)z`;C+diUxg-Wc~NDdl0?XdQpXoMc7!~IT-)hOe`%16;k8dm&bVyMK(mS9nJh_irA}xo+rzkt4I>N&+?b)h_GnndwT{b|{NsO0XBJ z4*!0md8ZXPk`%vE-0g%Xd&R~-rV*})I#i}1zUdg0+WeqeDhQQXlP5218A7SU4Z>#} zs{i@|l|9mIV5&wUxg5-DqucfI*tR1gx6%r>x^D z;JWuwn$K^EOp16nxfIGWWa~K-!FDAKyDT=|$Wry&h?pA&2K)X9gqw^MBg_xC_* zl7Hr%=y9Yb+cOGJ^Kr$md2Yd$$O8HhbkzPtGZZG!`ZEwYqHj;@#Id-wAcK;{XzkZ> z*x)05tKhA~de)aWqz|>g2@lD+E@GZjDO9vy5Jm-|iJG5zh>KeA!rdPV-y6|T<8sl2 zQ5$S?G%zFYtpU4(D;wSZMDoc~e!s_ZWw0STDo67<18yux2A%%d2#$^R++i~R*?s5z zw?^k5TulDXmhf@;jc+h%{E-R)vpa6unH_Q#S@uj3gX*aB?+T8@6t2BZ}dR# zJ~a)`@KH3OJ|Fihs2juCH%1?vAHpEMOY?~gLvU&C>LFwEYPcR763BR@6vGV8U1xG` z!gn;!EIe4!U=O*mGxObtkSIj6<-TGq6tEvX9CoZ$|-(3TF#C>YeU7$x#p5Qam)knUlGxx#PR-VI!^;yVk z<5<@mTY$2+mj<#ed*SX1Z}AvmNcWOmfIe$9@vmj}hwkJ5Z94vCzT~TffVQ;MF!tXw zVjbQn16f{~4W5Al6gmEov-wRMe7O3dM^`c$hmGH_Ut}kI3s`xi&Yt~%AuH}9HxK)Q z!swpu&J_(%bLvJaccdMtSzX$`#l8Y|NP5kB%{0KTEo`DYDeUos?(XYrS}icOc;w42 zm3GK$BEM$P+5wt9Jm(Y?GGP5Q_fIQ{Ot>57J(Th(2X~*E)V!C~h2@fstX4GvVC!@} zIx2<`iQaif<9@UbRBHxg*d~j>=tAUxS7aMj1(Fp;yOiRoOeV(cVMiprel`_D)(LY3 z8oCRn#c-xyINqAC3k8Kvis~3=qn)eES0TYH(DGb;etWSL!kpMu2mMGOXL4PkfFmEP z0-ETn2_iF!rq6*=kYJ6^Itws3R6(*~=^M4eP7qrpPvmDQ#zpg zK5e4wiMiK`thpi9w!~#IM(B1rTWKHY_q6AdD>-1`CVEp^d&1ksf6`1n| z0zozIWZ4$eC&2Q_GWV=U9nxIY5nm*1ba#CYcD9o(LOx%Gu3H}qK&(ZpOStqA4Bi~Q zHx-wSl(gxiK|E~`mZ!GDBb$cS?)(q-a8;oV$5R2yr)7lb*fFWBJ`sf}xA+`-mI6Lm z7U7>NJAgbRpqpc<2+s=~5+77Z!`%-1g2;=WKwD9kze#2c3_UivHo_W({;mlxe`}3^ zhBEVvO%(}7>EvoR?Hk0W>YF!PinQUUz^`?Q{jE5|*dcX<5Tnwyc1-8)Y{U1G6Jn={ zj%)i%PTGB@gkexey=)UZ5!;Z<`TO-sWp?`BK9Aw-}(&0?qpoDIb{&Q^s2^NzSGwX`twW4hq_>D3!N?$rA+Tf7p- zU$;g0x|ibfys36)K_b?`f=9J{R|Om%I&M0kp9|u{M@3e3>!F^&9K0upfMgM!0Fj0) z0t3?#)p}U_*M8*w4W9_k*GV%i|gf*w1qad$ti z$CtGHTeeYV;r5N1Da+IL5XV#|z+ci1ZyF8t?dZGkM65j3TuVHXqBG~7PuJs77Pzfy z`4S}A$$~t$_Cq+Sl(Z(>2QK2DCQXK_fQgKKv$$6^CNGQZlCEn*Et$uPE9KSjGc8up zfx8u-zX_ndz}kV&+x89D3$-JO{k@Jli!Vq={mOXsbgj52#!ZXYt^sThynOK=c)#rE zvY*70Lq`MiL{TPm=V#kb2kS8X0os^J^x@@Ij_KxK?T~H8&erg01YIxJ_qP#E zkq;u`xfFZGp$nas!lBXuO;;d zB>lgC#s9A*^$$+@zY``;D-#Vl0!CrxYk5vq$_h#@@V%~~8iCU!EkQ@E0X)QTXi-k3 z8@+>+Lr10;!7us!4B56Xc!}fY#etAE{M(!IPh}O}f0HoTQ>jzYQny8A!2_x#=9vgXNltG#lnZaJa8tw;@)Jy0X7FY?3SSDyQ3t zPlPj)H(UBKFZV)ZC{1O^&M3uAjL_hwoec&Y^%3J0DG*oPc*r6<33{lBK-z0*AoR=T zLd%XuwA9{h?wl6^$0L$1SBSKL#pDTU+aY7rJ7=W##V-Ij+wampXUf8Jhdr6Et|kB# zN9p@lQgPs(C~z~PBomk?q;Dwa+hfUg>$Jss6sjPlYC>kVd4z6d6kz zmPDFSA|(=brcTz4!iA z&wBOrSTAST@4eSvYm=KI+f*5fuhiIRyD<_@dGHF^y(RnifKQbRDJg z)WYbtI|FvcSe}-dpN)-HaUWbwuA=D%&!4BhH==&Be9&p%1oTlH$cgf5#Bz@Nol6(f z!DiPa-KHlP&qN5A@bEEFVEv1-kngFu{0pbf5?zpvZfMJ}ajW!aC}oXv&kIo}83 zI7*5Io!K3#?blKyueP7(9l5#oTYxMTWh$@FG zd@kpl6PftJb}Gw7x)8lb3iAX zLrn$x{9fj~nB4{H4_7JA+K}%{?pEduS34%oq;=M2gu~|EHhcHOX~6ry_U=S)J7o2&#?K*14zuh|6Ux8}TeQ$hYioiwB!|Rv*ZP?IGPq)`k z!RvoUTi^JRN(RxF_xeL)peu8=TYy9qX;5eMtdV?gpDp_3#Xm0^cF4PlFScdEyYbwg zNo1Unnt!mLrbGe8i7R-O3Kha#@zsU(gI?eg9Gq$z9u7&-yirNRbgVodWN+YF2th{D zcbG+0sNU_V5@k(ZcbSfkRvzW>NPpvq#1AjXSU8jE7RbN^0lLd^De}FmuYPIE;+yC* zz4M&mvRF`cUUqi7?=?K~r%rrKr2qnD)A_glAdkoA8IvF9hr^zg*~`0g!$D`g(6q#* zOsp$Vxcyp+frd54pWa(d?0F<}g7-`*6SsegkosD4{ji zd4n|Bui2}(a)(G9)J%4xeOePbY^r|jDppGBkuwDrk@nmc+n(SIdj@Jc9Pkm|TZcww z%Y&x!>u|#TL(HAv1n4)Bv*0A_km&EXue>K?9A;NBWKBu8Ot-4rq+^~8?znod&b>Sv zyuUpfka^66A9H3tnG6@AwqeHHKN^VB!H?38>Xt%cj53zI$wZ~j%6(P`3UH@##ia22 z5-cKD!L-INqDaA3{&{iDa9nG_g=m#D{IgwKRh2Z67}h##LZd$fb%EeB0hdy-&Tl@i zxMw~t+`ekCWz%W+-Njj@IuQVDJ8UNpIg*o(!M7T}Wy*nOvSq{8cVtYAI5%jcdGDmF zGj11vXe`!xJk|sDjB2B8vZSutdXd)>0d)xZI#D}iRfB;B>CrO5ckyP*GD$u1ee?3S zwE!-N!4kVoy7Y9i(=JptEu>q8M}&K)0>&E1PW*SBXJ0B%BcWin` zA^!0_DDvV$G)`UHe@$zCI#%x!C|#*s4VyNfzgvcNC}0}-L7TH2ByPnk+`3eXu|qK) z6YH`tZ;>$jt%-6R3J|)IpO_EjZ9I`~5l=u}=Bv?i$qvvhGAc-tgdkB8;C3H?G*_3)Af~y4w8t&{}sg=Ee__&{PFc_KzM8$8Z$0fi!L5-7bKBpTxa_> zj0i2u!nPENy@3OTaQ5Ti#RTaTNYYhTc(K0}ra96?W0Om9<(0^km6Cx_INM_&dSe3` z`5&$7JLU_|>8&aEB4V&<%Y{WdEbB2gwBl>CUj~-S z68Q0d_C3*%iD4#zTE*u|u;SgTWglFhqP^%*<)=#7c!d#s^^`*>Jd~N)7f+fKh#gtj zX2gCE_iAn3r?tT`O;!>rhQ@sDX=WaX!dt zGBrf$0cdocVMsed#;}wy7fk+mj2sWH9h4xIl;x^p`%mw#2mkBtq49nB@JggXSFz3> zuk~Ht(0`CD(jAO%C}%$fy5*9eTMie2_@mVgAq7tm{w$x;RLMcDMQRV;>pTR{^&tlq zLn}TrUAB?SkW6emlKAzfRym}mM!OA+RO9%mTlJg#=^)oW)0{mb79=|QC~-#&HkOf(k9 z27|YiIA_NL4X#u1@ao8Hc$u?VZTsr5 zaa*rX!FC3S-&dXfbFvx#I9<~`s8)b$SA>u4dJ={PCA)NTBVyn+d*snIq~dSG;MeBR z7f$R%Ne0A)-%u=cVBn88ONG5327;5tRsB11&iFobAy*sOz<#d8rBlXVgpHZg z3c6hlpe(iZ)o<@|Fj@5O*qK$O$k?(iZL41qh$eZhH73 zJ5n8MtXu(#_xCp&Ua7_C?2*L0%|+lpux*RqwWsju$Zz-6pRyrxiCO2m7h&k47%=(9 z=phPxO&3$K&BKMCygP2@C1KpvH)tSY90ibFqFAnwR7Jo0UM4oYo0WxQp$6uh=?z3Lv# z#G`urxxb|lxnu8Gdy#ci`wO2=f722?oUb+aUS=X>oGK2@%xJ*WbzY+D_LsvPPXF{V zc|-{>AF*%om!Y|C)sb3<9N4tP?#6HOKEJ7vy5*}wHtfF`<*NH50_iy+=kP!$a0;yI z?IQaz&CAbv8Ry+m9dU2)QnZN@G5DP*m>&AnC11f$>R_MW5ZktP){ z7M~o7g#^CH*UITx7_()Amht!*^s`I4ydgFdR=$0GXBSTcZsU%~P$f;D;9bd=vlqzc z`{$h_0&Gpd`PR0B{Zb?NSoPTSa+78i+xot*xmpgUXHCULzo){n<06fgmN_u$wc~fq z;sOkt(^`}#oevwAE`BPIngyZDBBVSND`BH%2uPIX!?}6KxpxIrK#o!LfegJmoU!@db>e&?6bK3(T(-6q zHSDui7`-S&4*KS`5C70%LqJ+jynPltb{&O`$!7GD&j<>YCF2sE1V3A!NJYug<(5G^ z6ERRbe88Hc46?s`HZTs*!KY(Cn9r{Jp0KxM=0Is^?t+)QN^}A7T`MUARq= zU6?Jh8P*LPDpkV`Z#80e#3W%E|HU_A#))_!)_IHK3I>d{x_sW%Lx;sHj|LC;M?!*W z&R(hG474~Eu!pCc+(7vDY=^m61#CBqR#WWA$A^j{dm?!XV4a}s0-vTUAa58F|687p z-_Smd zS`pOA+nc5PF|ltjdrqP0BXBA_xbBN%DyqBbmVe!w2{&G3=5El z;W!s&tV3A2-mD=HcG^+@~3QBPV{L)*y;Yjo5CpFG3nukhH5Z6Gh%v zuAt`^;KGD_&GNKXs9KTr(wTJ7k}%WW%05Xx*q>%3zB_am|JXqR>V@<3pBhm2X) z{cZDdhBP->uPQD#lr)It`^^4~RTtx>dmf8#?resG)iF7qq{+fpwpX5+TN+Sbw4L7P zQvu&z8f;(6EQMsvx?gM>!#J1Ci6djvJ>VO3(dPF}!*~@ZCztUWyk5XnpvYDSPh;HA z3;%3{&DTtFQ+0~BRf@0W`%ZM5&8%4Xn|!G^ z37^WjSq16NBYPvvTjA^UVyv%!0m{4&p8Lpu#?Wsc72kO&8rddiIZ8##L3iQQ z=CWNGsARtTruN4gn92=0`i49s%>S+`@@HWp7L_m9)@WD@POEMmn!B15rkp4--`V~g zzFcZrkGZX2^z5if>Zxu}I(|YTxiu5Bq(1ibz9l^s>k?3J@Q>1PCtoD~SGc}}W1 z&whE?QWAt(K_dr6yo%76>AttxzZp;R9!#^Be~kT+!KdurWn-y$YwIKKF7(>^gG=&p z7e;be3%~8Z4yPZrH{3~LVw3t2%`I2E|9NMyI+Oa(zmEU*nbdWY+m4<5y}&zEVkV9w z_-eODhK!pZ91>Eo&`d1`bd>(s_s|zVE$NTzu`Iy#6PX94-ouDbA%hwSw|QQ64jQipG&B7+u37vcpSVcz=Qcxb)1EOGi&A6jmY zdIK817|j!~XyfrN2#Kn)VQ99&A;Zc%ZE_|QEbDIb#bp3`v~h!_{3xn*t6#FzWMcd> z!NHV;)ev-8*8Vh47p#iBleTnT73^79x4vJe4UXO!IHAhZjl4^DF%Od>3y~AWa<>B+ zz`K;)UuH!KXdiF57TuW$=Lc5rOFWc|cHfTqtWxiU(H4GQ5Ay7t{Iw}Zi8MZ=8)z6X z9*kmdC7=}g|5R_JcK_mfL*O5y+pqPWv-8m$rjULwewAmPOT>V?W@09U%nc2-hG*{zDue) zow8-hWMkpdW#b}6J#u?XJ!0+Zf_&7~nwnq#S`C~=P zL#{JRB$PB`z*=Va}CXS~*&=SS!(CmWK}ewr;9AX#EI!!>fm< zoEq?(D_nW7I}7$)JblSgv=!N|2}vCrYR6tbiw)oJ6k=xP1k==|8SOKj)rG9`ahdUa zUQ>DwTA1j$KF^HD5aqw#?&;{kM@y zOS`Tkq{x89F^5KZhbwSsX~B4e_*KjcTPthwGyqpb?&6woX~RZ&Bg6gmq%~K7>4(s#kRMEcH%PG2X_NCNVWcNiPMaQ?eJrY7UAvKf+3Bv z$I9EE;CRQI(i6TFSQM$xbGOzLC$ln@Z9kFQXrJC)K$!~obY^{kd|4C7F?%uvPgTKr zCEj`Kvx5J5Ke9TL`v2aK|MvjF%Gln{;s(upo;lZHw!1X*1)j9yG_k|%hyT-=)QjHL zc^ic)AZ^q?UX-l{Z$I%Aa?~$|#(5vZ=AG>Y>!#{`f?Ro~& z>)3VPn?%D3wn4vm<2(!=i+t*7T@IaDE&__1=n&g)W574r3bXECRa3|@y|<8^uuDJ( zmI@W`krTSr;zDqIa&&v}wl|NA622 zt*!ui>+jtWGZY~92csyUHLHwoFbm5F0oH1-6{GxIb91-Ht5;5=^7@K z$-dmY`T+yT=DC21NIO(XnQAXoh=oa5DAQur2i?-L>%YdeB6H}SV9aPIwgk@P2`ATK z$ENJXA=mREr%kf#fm-a6hCNjx2Zo z{B0vSX7-f)<9odVJ_tTq#xesUP;bATZVlPCE0a5NsryXd)Ko%lVbIf`WFkaLm6q&!sXMz*Baz=BXAJy zuG%d>%kdcx7xbTLqL`{2IAJEzg~(*AEu(dYmCEZgiI zZOx3#tj?R6np#+2lxFcaBWpAJ|5J!8LtSJy=kWZ0itOCH3|3VvyZI6FnZ>=_+T<1= zzhrw~)F97o=8`1s$7*fXA=~CvS4kq-ECN zEvuB!4&5nu`%+oHqG%GFO-xl*a=*m9vl_9(95Wae6|^?^%}eaBc&xeVNfs!KB!@Zh zPT{{xR`{PL_AhMzck6tp{@uU-P%AIW8eKIvCdTdUjIIAJ@}~AF{2xA5+2*scStzpE z7+5pW-NR@0Cn?{V6HM<|^SupTo4!&yRZ0iO=Uwd2Oc{RjG?%^4xP3Jq zxn8BShm_pI(j3=!q3>EjWp1)n%JpEpdg=V$R_tfF+sdvI`!a4BanGKzz z*N05|Os}^wp<}IUgY6;pZj>zZ4|u+_8M$sSCTnIp$u!l&0RrZ=*y4SEeUu&pe&p=n zG+keh8`D!%^4_Olz^dkZd(+93(gksoYkA4UGp~crwFAw#==LlB=I>;Qcu3gtEqgQg zDEB{^d0!3|Z<+=sE>z$F&%&_m>n*4#|4`e2*$v8^I}(ONI?$$?O~C1BH@3{~-}yDL z8GAUm9vTkTqswxs9p`2rqmU9qs^@7P94(aPty@|Psb+Wi7JX`maQc+{UIJc?cPQ)sC3@PY2BHPyS~0!GgIz}Q9Wz&;!~MuwS@L(178`UVeg_#+F77Fr-YrM z!$5m^&(E{1q=la1>(Ld*Xm~&=;6CkC0=&yDT6N%KD;|;!;BOac1GliSxnk@M!0!2t zCau*851HCW_{zJmC7Ml7jXW`}5LuMDo2?n9weoyvWDrekX1KoRz) z3%xb{R7|$SCN8ZdkDXi6j~Rat>c-t(*Ix-8tVaPp?J3XJA{1omcP_6X-(L$Hw;t-P z!G5u3zP(pk@nT_<6dwnIU!iSL>iI%!(f(X~`*;r;Itm&2-@OgL7Au@&i+GA(KIrdQ zbef52qfDcvAa_ydn3kY1gPC0Wn=5Uy~|M7D#x3)fxGX?MZ=`*giDB8JoY}$ zZ&}NzK@rns@q#OuV63wF!>7DX=#I12=`$nS{N{CkYNra}rmdKPLsAcUT)y~mD7*(Y z`rW!bA|GOi&zlF6kjuo zM(KLFPdp3|Sw6jSe|RV4co&?Q5i5sn%_dPRo7(aAp*EV7)pJ}vFymXBUJlzeM?6O? zQ*iSMyBny{WHF|ZuyyY{^J zK%Q(jWz78Pi><|-Pgb^x%~Zp1sPBdQBjok2X{hZ%{xrC@#4PY+2W-lBF`03%hx3J9 z!N(?bKTkB0P8kxq8AEWzz)4iZP{j5NWHIWTbM@%HZg-IS|jNY82Bme zDIZx!1Y5KucHq<7(t^X=n=vFbVAmFsFQ5CtwSTd9RB@ua|`dWRgn<$Yt!PDt$`^u+II}5x&0~B^94V?)0pHm2{iVFXZ>* zu2KBs?9D>t+j;rto?&-*CpDXNRrV>CE9{nb?aF~>hq&{5$2#!5$8vVs=O(;c?;n5L zu^V3Vz4359#DG;}X<331oiLFz8u+m$7aebz)n32%961w)9rQ_q6UntEGmZAmC^OD@ zINCsm-mNz4%S_YJB_$~F_;V(_vW2jq^UuNQ_o(WAe{zvc$>i{y);5$CeX=e!umlym zcbr^xBcCnP*@Wu3dh>I1a;RnfEjN ztKi!#YR=o}gAp3n_o;T(LWuac-(9LPFlDP<^RT!FqzpODT`rQgT-+tRh9imptku?> zu(!KrVt>uf%v8rl%h=A?%FN!(?il$)MTK^gCT7m=yZoY=wV9oC$n)yZ+VR@RlB#W>D&0LN}SdnJFgGG27&0K{=cstEpgGE@0W`2xC zcn8h=q%ZG($x@c2X}~H@@-$%;Cy83Jijz#Qu!@sZZ?KA!TwPhkNwW7?#Ywh)tl}hH z8mllYGlq#Yw)^tl}i!Mpki>ZwIS5$+wSH zoa8&qA}&wzeZwkF^8LUnPV)W6Do*m9WffPTS0z*f>Rjhz%nO zL~LB3K*WX>YothQ*i#^4!eF7h}a0GK*UBQ1tK;QC=jucPJxJx zT-InCu~AHch>dazL~PVjAY!A10udYC6o}Xuq(H>RD+)wxyrV$G#xw;YHh!>1+ldXf zIV|7H+ldV>3Pfxyp+Lk2KLsK-)>0s1V*>>uHY6wzu^~%=hz%tQL~Hb7` zL~O)RAYvn#0udXT6o}ZMQy^mF83iIXswfb#(eO9oW3g%?I@+li@zG1gh>(|5j2IcG zVnoRl6(df*QZXXs7ZoE`=FX+ak7!v)#fX<>6pSn2%Ni<1 zyogXS;zgW-{k?pZp<=|#b}B}^?4e@Bi#in}UXD;P;^hPtBVJBZG2+FTiV-j7RE&7B zp(V#LcMDn`77QZeEsnu-xGNmPt@$)IAyOFjkrdr4bL z#fXkEs~(5>CbBY1ghv F{~OeTgaiNp literal 0 HcmV?d00001 diff --git a/source/tests/pd/model/test_autodiff.py b/source/tests/pd/model/test_autodiff.py index 1bd9dd0d0f..8442844a24 100644 --- a/source/tests/pd/model/test_autodiff.py +++ b/source/tests/pd/model/test_autodiff.py @@ -60,7 +60,7 @@ def stretch_box(old_coord, old_box, new_box): class ForceTest: def test( self, - ): + ) -> None: env.enable_prim(True) places = 5 delta = 1e-5 @@ -86,10 +86,10 @@ def np_infer_coord( ): result = eval_model( self.model, - paddle.to_tensor(coord).to(device=env.DEVICE).unsqueeze(0), + paddle.to_tensor(coord, place=env.DEVICE).unsqueeze(0), cell.unsqueeze(0), atype, - spins=paddle.to_tensor(spin).to(device=env.DEVICE).unsqueeze(0), + spins=paddle.to_tensor(spin, place=env.DEVICE).unsqueeze(0), ) # detach ret = {key: to_numpy_array(result[key].squeeze(0)) for key in test_keys} @@ -100,10 +100,10 @@ def np_infer_spin( ): result = eval_model( self.model, - paddle.to_tensor(coord).to(device=env.DEVICE).unsqueeze(0), + paddle.to_tensor(coord, place=env.DEVICE).unsqueeze(0), cell.unsqueeze(0), atype, - spins=paddle.to_tensor(spin).to(device=env.DEVICE).unsqueeze(0), + spins=paddle.to_tensor(spin, place=env.DEVICE).unsqueeze(0), ) # detach ret = {key: to_numpy_array(result[key].squeeze(0)) for key in test_keys} @@ -133,7 +133,7 @@ def ff_spin(_spin): class VirialTest: def test( self, - ): + ) -> None: places = 5 delta = 1e-4 natoms = 5 @@ -153,10 +153,10 @@ def np_infer( ): result = eval_model( self.model, - paddle.to_tensor(stretch_box(coord, cell, new_cell)) - .to(device="cpu") - .unsqueeze(0), - paddle.to_tensor(new_cell).to(device="cpu").unsqueeze(0), + paddle.to_tensor( + stretch_box(coord, cell, new_cell), place="cpu" + ).unsqueeze(0), + paddle.to_tensor(new_cell, place="cpu").unsqueeze(0), atype, ) # detach @@ -177,36 +177,35 @@ def ff(bb): class TestEnergyModelSeAForce(unittest.TestCase, ForceTest): - def setUp(self): + def setUp(self) -> None: model_params = copy.deepcopy(model_se_e2_a) self.type_split = False self.model = get_model(model_params).to(env.DEVICE) class TestEnergyModelSeAVirial(unittest.TestCase, VirialTest): - def setUp(self): + def setUp(self) -> None: model_params = copy.deepcopy(model_se_e2_a) self.type_split = False self.model = get_model(model_params).to(env.DEVICE) class TestEnergyModelDPA1Force(unittest.TestCase, ForceTest): - def setUp(self): + def setUp(self) -> None: model_params = copy.deepcopy(model_dpa1) self.type_split = True self.model = get_model(model_params).to(env.DEVICE) class TestEnergyModelDPA1Virial(unittest.TestCase, VirialTest): - def setUp(self): + def setUp(self) -> None: model_params = copy.deepcopy(model_dpa1) self.type_split = True self.model = get_model(model_params).to(env.DEVICE) -@unittest.skip("Skip for not implemented yet") class TestEnergyModelDPA2Force(unittest.TestCase, ForceTest): - def setUp(self): + def setUp(self) -> None: model_params = copy.deepcopy(model_dpa2) self.type_split = True self.model = get_model(model_params).to(env.DEVICE) @@ -214,7 +213,7 @@ def setUp(self): @unittest.skip("Skip for not implemented yet") class TestEnergyModelDPAUniVirial(unittest.TestCase, VirialTest): - def setUp(self): + def setUp(self) -> None: model_params = copy.deepcopy(model_dpa2) self.type_split = True self.model = get_model(model_params).to(env.DEVICE) @@ -222,7 +221,7 @@ def setUp(self): @unittest.skip("Skip for not implemented yet") class TestEnergyModelHybridForce(unittest.TestCase, ForceTest): - def setUp(self): + def setUp(self) -> None: model_params = copy.deepcopy(model_hybrid) self.type_split = True self.model = get_model(model_params).to(env.DEVICE) @@ -230,7 +229,7 @@ def setUp(self): @unittest.skip("Skip for not implemented yet") class TestEnergyModelHybridVirial(unittest.TestCase, VirialTest): - def setUp(self): + def setUp(self) -> None: model_params = copy.deepcopy(model_hybrid) self.type_split = True self.model = get_model(model_params).to(env.DEVICE) @@ -238,7 +237,7 @@ def setUp(self): @unittest.skip("Skip for not implemented yet") class TestEnergyModelZBLForce(unittest.TestCase, ForceTest): - def setUp(self): + def setUp(self) -> None: model_params = copy.deepcopy(model_zbl) self.type_split = False self.model = get_model(model_params).to(env.DEVICE) @@ -246,7 +245,7 @@ def setUp(self): @unittest.skip("Skip for not implemented yet") class TestEnergyModelZBLVirial(unittest.TestCase, VirialTest): - def setUp(self): + def setUp(self) -> None: model_params = copy.deepcopy(model_zbl) self.type_split = False self.model = get_model(model_params).to(env.DEVICE) @@ -254,7 +253,7 @@ def setUp(self): @unittest.skip("Skip for not implemented yet") class TestEnergyModelSpinSeAForce(unittest.TestCase, ForceTest): - def setUp(self): + def setUp(self) -> None: model_params = copy.deepcopy(model_spin) self.type_split = False self.test_spin = True diff --git a/source/tests/pd/model/test_descriptor_dpa2.py b/source/tests/pd/model/test_descriptor_dpa2.py new file mode 100644 index 0000000000..12017bb840 --- /dev/null +++ b/source/tests/pd/model/test_descriptor_dpa2.py @@ -0,0 +1,208 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import json +import os +import unittest +from pathlib import ( + Path, +) + +import numpy as np +import paddle + +from deepmd.pd.model.descriptor import ( + DescrptDPA2, +) +from deepmd.pd.utils import ( + env, +) +from deepmd.pd.utils.nlist import ( + extend_input_and_build_neighbor_list, +) + +CUR_DIR = os.path.dirname(__file__) + + +class TestDPA2(unittest.TestCase): + def setUp(self): + cell = [ + 5.122106549439247480e00, + 4.016537340154059388e-01, + 6.951654033828678081e-01, + 4.016537340154059388e-01, + 6.112136112297989143e00, + 8.178091365465004481e-01, + 6.951654033828678081e-01, + 8.178091365465004481e-01, + 6.159552512682983760e00, + ] + self.cell = ( + paddle.to_tensor(cell, dtype=env.GLOBAL_PD_FLOAT_PRECISION) + .reshape([1, 3, 3]) + .to(device=env.DEVICE) + ) + coord = [ + 2.978060152121375648e00, + 3.588469695887098077e00, + 2.792459820604495491e00, + 3.895592322591093115e00, + 2.712091020667753760e00, + 1.366836847133650501e00, + 9.955616170888935690e-01, + 4.121324820711413039e00, + 1.817239061889086571e00, + 3.553661462345699906e00, + 5.313046969500791583e00, + 6.635182659098815883e00, + 6.088601018589653080e00, + 6.575011420004332585e00, + 6.825240650611076099e00, + ] + self.coord = ( + paddle.to_tensor(coord, dtype=env.GLOBAL_PD_FLOAT_PRECISION) + .reshape([1, -1, 3]) + .to(device=env.DEVICE) + ) + self.atype = ( + paddle.to_tensor([0, 0, 0, 1, 1], dtype=paddle.int32) + .reshape([1, -1]) + .to(device=env.DEVICE) + ) + self.ref_d = paddle.to_tensor( + [ + 8.435412613327306630e-01, + -4.717109614540972440e-01, + -1.812643456954206256e00, + -2.315248767961955167e-01, + -7.112973006771171613e-01, + -4.162041919507591392e-01, + -1.505159810095323181e00, + -1.191652416985768403e-01, + 8.439214937875325617e-01, + -4.712976890460106594e-01, + -1.812605149396642856e00, + -2.307222236291133766e-01, + -7.115427800870099961e-01, + -4.164729253167227530e-01, + -1.505483119125936797e00, + -1.191288524278367872e-01, + 8.286420823261241297e-01, + -4.535033763979030574e-01, + -1.787877160970498425e00, + -1.961763875645104460e-01, + -7.475459187804838201e-01, + -5.231446874663764346e-01, + -1.488399984491664219e00, + -3.974117581747104583e-02, + 8.283793431613817315e-01, + -4.551551577556525729e-01, + -1.789253136645859943e00, + -1.977673627726055372e-01, + -7.448826048241211639e-01, + -5.161350182531234676e-01, + -1.487589463573479209e00, + -4.377376017839779143e-02, + 8.295404560710329944e-01, + -4.492219258475603216e-01, + -1.784484611185287450e00, + -1.901182059718481143e-01, + -7.537407667483000395e-01, + -5.384371277650709109e-01, + -1.490368056268364549e00, + -3.073744832541754762e-02, + ], + dtype=env.GLOBAL_PD_FLOAT_PRECISION, + place=env.DEVICE, + ) + self.file_model_param = Path(CUR_DIR) / "models" / "dpa2.pd" + self.file_type_embed = Path(CUR_DIR) / "models" / "dpa2_tebd.pd" + + def test_descriptor(self) -> None: + with open(Path(CUR_DIR) / "models" / "dpa2.json") as fp: + self.model_json = json.load(fp) + model_dpa2 = self.model_json + ntypes = len(model_dpa2["type_map"]) + dparams = model_dpa2["descriptor"] + dparams["ntypes"] = ntypes + assert dparams["type"] == "dpa2" + dparams.pop("type") + dparams["concat_output_tebd"] = False + dparams["use_tebd_bias"] = True + des = DescrptDPA2( + **dparams, + ).to(env.DEVICE) + target_dict = des.state_dict() + source_dict = paddle.load(str(self.file_model_param)) + # type_embd of repformer is removed + source_dict.pop("type_embedding.embedding.embedding_net.layers.0.bias") + type_embd_dict = paddle.load(str(self.file_type_embed)) + target_dict = translate_type_embd_dicts_to_dpa2( + target_dict, + source_dict, + type_embd_dict, + ) + des.set_state_dict(target_dict) + + coord = self.coord + atype = self.atype + box = self.cell + ( + extended_coord, + extended_atype, + mapping, + nlist, + ) = extend_input_and_build_neighbor_list( + coord, + atype, + des.get_rcut(), + des.get_sel(), + mixed_types=des.mixed_types(), + box=box, + ) + descriptor, env_mat, diff, rot_mat, sw = des( + extended_coord, + extended_atype, + nlist, + mapping=mapping, + ) + self.assertEqual(descriptor.shape[-1], des.get_dim_out()) + self.assertAlmostEqual(6.0, des.get_rcut()) + self.assertEqual(30, des.get_nsel()) + self.assertEqual(2, des.get_ntypes()) + np.testing.assert_allclose( + descriptor.reshape([-1]).numpy(), self.ref_d.numpy(), atol=1e-10, rtol=1e-10 + ) + + dparams["concat_output_tebd"] = True + des = DescrptDPA2( + **dparams, + ).to(env.DEVICE) + descriptor, env_mat, diff, rot_mat, sw = des( + extended_coord, + extended_atype, + nlist, + mapping=mapping, + ) + self.assertEqual(descriptor.shape[-1], des.get_dim_out()) + + +def translate_type_embd_dicts_to_dpa2( + target_dict, + source_dict, + type_embd_dict, +): + all_keys = list(target_dict.keys()) + record = [False for ii in all_keys] + for kk, vv in source_dict.items(): + record[all_keys.index(kk)] = True + target_dict[kk] = vv + assert len(type_embd_dict.keys()) == 2 + it = iter(type_embd_dict.keys()) + for _ in range(2): + kk = next(it) + tk = "type_embedding." + kk + record[all_keys.index(tk)] = True + target_dict[tk] = type_embd_dict[kk] + record[all_keys.index("repinit.compress_data.0")] = True + record[all_keys.index("repinit.compress_info.0")] = True + assert all(record) + return target_dict diff --git a/source/tests/pd/model/test_dpa2.py b/source/tests/pd/model/test_dpa2.py new file mode 100644 index 0000000000..f441007cad --- /dev/null +++ b/source/tests/pd/model/test_dpa2.py @@ -0,0 +1,333 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import itertools +import unittest + +import numpy as np +import paddle + +from deepmd.dpmodel.descriptor.dpa2 import DescrptDPA2 as DPDescrptDPA2 +from deepmd.dpmodel.descriptor.dpa2 import ( + RepformerArgs, + RepinitArgs, +) +from deepmd.pd.model.descriptor.dpa2 import ( + DescrptDPA2, +) +from deepmd.pd.utils import ( + env, +) +from deepmd.pd.utils.env import ( + PRECISION_DICT, +) + +from ...seed import ( + GLOBAL_SEED, +) +from .test_env_mat import ( + TestCaseSingleFrameWithNlist, +) +from .test_mlp import ( + get_tols, +) + +dtype = env.GLOBAL_PD_FLOAT_PRECISION + + +class TestDescrptDPA2(unittest.TestCase, TestCaseSingleFrameWithNlist): + def setUp(self) -> None: + TestCaseSingleFrameWithNlist.setUp(self) + + def test_consistency( + self, + ) -> None: + rng = np.random.default_rng(100) + nf, nloc, nnei = self.nlist.shape + davg = rng.normal(size=(self.nt, nnei, 4)) + dstd = rng.normal(size=(self.nt, nnei, 4)) + davg_2 = rng.normal(size=(self.nt, nnei // 2, 4)) + dstd_2 = rng.normal(size=(self.nt, nnei // 2, 4)) + dstd = 0.1 + np.abs(dstd) + dstd_2 = 0.1 + np.abs(dstd_2) + + for ( + riti, + riz, + rp1c, + rp1d, + rp1g, + rp1a, + rp2g, + rp2a, + rph, + rp2gate, + rus, + rpz, + sm, + prec, + ect, + ns, + ) in itertools.product( + ["concat", "strip"], # repinit_tebd_input_mode + [ + True, + ], # repinit_set_davg_zero + [True, False], # repformer_update_g1_has_conv + [True, False], # repformer_update_g1_has_drrd + [True, False], # repformer_update_g1_has_grrg + [ + False, + ], # repformer_update_g1_has_attn + [ + False, + ], # repformer_update_g2_has_g1g1 + [True, False], # repformer_update_g2_has_attn + [ + False, + ], # repformer_update_h2 + [ + True, + ], # repformer_attn2_has_gate + ["res_avg", "res_residual"], # repformer_update_style + [ + True, + ], # repformer_set_davg_zero + [ + True, + ], # smooth + ["float64"], # precision + [False, True], # use_econf_tebd + [ + False, + True, + ], # new sub-structures (use_sqrt_nnei, g1_out_conv, g1_out_mlp) + ): + if ns and not rp1d and not rp1g: + continue + dtype = PRECISION_DICT[prec] + rtol, atol = get_tols(prec) + if prec == "float64": + atol = 1e-8 # marginal GPU test cases... + + repinit = RepinitArgs( + rcut=self.rcut, + rcut_smth=self.rcut_smth, + nsel=self.sel_mix, + tebd_input_mode=riti, + set_davg_zero=riz, + ) + repformer = RepformerArgs( + rcut=self.rcut / 2, + rcut_smth=self.rcut_smth, + nsel=nnei // 2, + nlayers=3, + g1_dim=20, + g2_dim=10, + axis_neuron=4, + update_g1_has_conv=rp1c, + update_g1_has_drrd=rp1d, + update_g1_has_grrg=rp1g, + update_g1_has_attn=rp1a, + update_g2_has_g1g1=rp2g, + update_g2_has_attn=rp2a, + update_h2=rph, + attn1_hidden=20, + attn1_nhead=2, + attn2_hidden=10, + attn2_nhead=2, + attn2_has_gate=rp2gate, + update_style=rus, + set_davg_zero=rpz, + use_sqrt_nnei=ns, + g1_out_conv=ns, + g1_out_mlp=ns, + ) + + # dpa2 new impl + dd0 = DescrptDPA2( + self.nt, + repinit=repinit, + repformer=repformer, + # kwargs for descriptor + smooth=sm, + exclude_types=[], + add_tebd_to_repinit_out=False, + precision=prec, + use_econf_tebd=ect, + type_map=["O", "H"] if ect else None, + seed=GLOBAL_SEED, + ).to(env.DEVICE) + + dd0.repinit.mean = paddle.to_tensor(davg, dtype=dtype).to(device=env.DEVICE) + dd0.repinit.stddev = paddle.to_tensor(dstd, dtype=dtype).to( + device=env.DEVICE + ) + dd0.repformers.mean = paddle.to_tensor(davg_2, dtype=dtype).to( + device=env.DEVICE + ) + dd0.repformers.stddev = paddle.to_tensor(dstd_2, dtype=dtype).to( + device=env.DEVICE + ) + rd0, _, _, _, _ = dd0( + paddle.to_tensor(self.coord_ext, dtype=dtype).to(device=env.DEVICE), + paddle.to_tensor(self.atype_ext, dtype="int64").to(device=env.DEVICE), + paddle.to_tensor(self.nlist, dtype="int64").to(device=env.DEVICE), + paddle.to_tensor(self.mapping, dtype="int64").to(device=env.DEVICE), + ) + # serialization + dd1 = DescrptDPA2.deserialize(dd0.serialize()) + rd1, _, _, _, _ = dd1( + paddle.to_tensor(self.coord_ext, dtype=dtype).to(device=env.DEVICE), + paddle.to_tensor(self.atype_ext, dtype="int64").to(device=env.DEVICE), + paddle.to_tensor(self.nlist, dtype="int64").to(device=env.DEVICE), + paddle.to_tensor(self.mapping, dtype="int64").to(device=env.DEVICE), + ) + np.testing.assert_allclose( + rd0.detach().cpu().numpy(), + rd1.detach().cpu().numpy(), + rtol=rtol, + atol=atol, + ) + # dp impl + dd2 = DPDescrptDPA2.deserialize(dd0.serialize()) + rd2, _, _, _, _ = dd2.call( + self.coord_ext, self.atype_ext, self.nlist, self.mapping + ) + np.testing.assert_allclose( + rd0.detach().cpu().numpy(), + rd2, + rtol=rtol, + atol=atol, + ) + + @unittest.skip("skip jit in paddle temporally") + def test_jit( + self, + ) -> None: + rng = np.random.default_rng(100) + nf, nloc, nnei = self.nlist.shape + davg = rng.normal(size=(self.nt, nnei, 4)) + dstd = rng.normal(size=(self.nt, nnei, 4)) + davg_2 = rng.normal(size=(self.nt, nnei // 2, 4)) + dstd_2 = rng.normal(size=(self.nt, nnei // 2, 4)) + dstd = 0.1 + np.abs(dstd) + + for ( + riti, + riz, + rp1c, + rp1d, + rp1g, + rp1a, + rp2g, + rp2a, + rph, + rp2gate, + rus, + rpz, + sm, + prec, + ect, + ns, + ) in itertools.product( + ["concat", "strip"], # repinit_tebd_input_mode + [ + True, + ], # repinit_set_davg_zero + [ + True, + ], # repformer_update_g1_has_conv + [ + True, + ], # repformer_update_g1_has_drrd + [ + True, + ], # repformer_update_g1_has_grrg + [ + True, + ], # repformer_update_g1_has_attn + [ + True, + ], # repformer_update_g2_has_g1g1 + [ + True, + ], # repformer_update_g2_has_attn + [ + False, + ], # repformer_update_h2 + [ + True, + ], # repformer_attn2_has_gate + ["res_avg", "res_residual"], # repformer_update_style + [ + True, + ], # repformer_set_davg_zero + [ + True, + ], # smooth + ["float64"], # precision + [False, True], # use_econf_tebd + [True], # new sub-structures (use_sqrt_nnei, g1_out_conv, g1_out_mlp) + ): + dtype = PRECISION_DICT[prec] + rtol, atol = get_tols(prec) + + repinit = RepinitArgs( + rcut=self.rcut, + rcut_smth=self.rcut_smth, + nsel=self.sel_mix, + tebd_input_mode=riti, + set_davg_zero=riz, + ) + repformer = RepformerArgs( + rcut=self.rcut / 2, + rcut_smth=self.rcut_smth, + nsel=nnei // 2, + nlayers=3, + g1_dim=20, + g2_dim=10, + axis_neuron=4, + update_g1_has_conv=rp1c, + update_g1_has_drrd=rp1d, + update_g1_has_grrg=rp1g, + update_g1_has_attn=rp1a, + update_g2_has_g1g1=rp2g, + update_g2_has_attn=rp2a, + update_h2=rph, + attn1_hidden=20, + attn1_nhead=2, + attn2_hidden=10, + attn2_nhead=2, + attn2_has_gate=rp2gate, + update_style=rus, + set_davg_zero=rpz, + use_sqrt_nnei=ns, + g1_out_conv=ns, + g1_out_mlp=ns, + ) + + # dpa2 new impl + dd0 = DescrptDPA2( + self.nt, + repinit=repinit, + repformer=repformer, + # kwargs for descriptor + smooth=sm, + exclude_types=[], + add_tebd_to_repinit_out=False, + precision=prec, + use_econf_tebd=ect, + type_map=["O", "H"] if ect else None, + seed=GLOBAL_SEED, + ).to(env.DEVICE) + + dd0.repinit.mean = paddle.to_tensor(davg, dtype=dtype).to(device=env.DEVICE) + dd0.repinit.stddev = paddle.to_tensor(dstd, dtype=dtype).to( + device=env.DEVICE + ) + dd0.repformers.mean = paddle.to_tensor(davg_2, dtype=dtype).to( + device=env.DEVICE + ) + dd0.repformers.stddev = paddle.to_tensor(dstd_2, dtype=dtype).to( + device=env.DEVICE + ) + model = paddle.jit.to_static(dd0) diff --git a/source/tests/pd/model/test_forward_lower.py b/source/tests/pd/model/test_forward_lower.py index db6497b605..1d924e2d3d 100644 --- a/source/tests/pd/model/test_forward_lower.py +++ b/source/tests/pd/model/test_forward_lower.py @@ -140,22 +140,21 @@ def test( class TestEnergyModelSeA(unittest.TestCase, ForwardLowerTest): - def setUp(self): + def setUp(self) -> None: self.prec = 1e-10 model_params = copy.deepcopy(model_se_e2_a) self.model = get_model(model_params).to(env.DEVICE) class TestEnergyModelDPA1(unittest.TestCase, ForwardLowerTest): - def setUp(self): + def setUp(self) -> None: self.prec = 1e-10 model_params = copy.deepcopy(model_dpa1) self.model = get_model(model_params).to(env.DEVICE) -@unittest.skip("Skip for not implemented yet") class TestEnergyModelDPA2(unittest.TestCase, ForwardLowerTest): - def setUp(self): + def setUp(self) -> None: self.prec = 1e-10 model_params = copy.deepcopy(model_dpa2) self.model = get_model(model_params).to(env.DEVICE) @@ -163,7 +162,7 @@ def setUp(self): @unittest.skip("Skip for not implemented yet") class TestEnergyModelZBL(unittest.TestCase, ForwardLowerTest): - def setUp(self): + def setUp(self) -> None: self.prec = 1e-10 model_params = copy.deepcopy(model_zbl) self.model = get_model(model_params).to(env.DEVICE) @@ -171,7 +170,7 @@ def setUp(self): @unittest.skip("Skip for not implemented yet") class TestEnergyModelSpinSeA(unittest.TestCase, ForwardLowerTest): - def setUp(self): + def setUp(self) -> None: self.prec = 1e-10 model_params = copy.deepcopy(model_spin) self.test_spin = True @@ -180,7 +179,7 @@ def setUp(self): @unittest.skip("Skip for not implemented yet") class TestEnergyModelSpinDPA1(unittest.TestCase, ForwardLowerTest): - def setUp(self): + def setUp(self) -> None: self.prec = 1e-10 model_params = copy.deepcopy(model_spin) model_params["descriptor"] = copy.deepcopy(model_dpa1)["descriptor"] @@ -192,7 +191,7 @@ def setUp(self): @unittest.skip("Skip for not implemented yet") class TestEnergyModelSpinDPA2(unittest.TestCase, ForwardLowerTest): - def setUp(self): + def setUp(self) -> None: self.prec = 1e-10 model_params = copy.deepcopy(model_spin) model_params["descriptor"] = copy.deepcopy(model_dpa2)["descriptor"] diff --git a/source/tests/pd/model/test_null_input.py b/source/tests/pd/model/test_null_input.py index 5d67491943..29d2f84eea 100644 --- a/source/tests/pd/model/test_null_input.py +++ b/source/tests/pd/model/test_null_input.py @@ -23,6 +23,7 @@ ) from .test_permutation import ( model_dpa1, + model_dpa2, model_se_e2_a, ) @@ -32,7 +33,7 @@ class NullTest: def test_nloc_1( self, - ): + ) -> None: natoms = 1 generator = paddle.seed(GLOBAL_SEED) # paddle.seed(1000) @@ -60,7 +61,7 @@ def test_nloc_1( def test_nloc_2_far( self, - ): + ) -> None: natoms = 2 generator = paddle.seed(GLOBAL_SEED) cell = paddle.rand([3, 3], dtype=dtype).to(device=env.DEVICE) @@ -100,3 +101,10 @@ def setUp(self): model_params = copy.deepcopy(model_dpa1) self.type_split = True self.model = get_model(model_params).to(env.DEVICE) + + +class TestEnergyModelDPA2(unittest.TestCase, NullTest): + def setUp(self) -> None: + model_params = copy.deepcopy(model_dpa2) + self.type_split = True + self.model = get_model(model_params).to(env.DEVICE) diff --git a/source/tests/pd/model/test_permutation.py b/source/tests/pd/model/test_permutation.py index 135c5ea819..88672457a9 100644 --- a/source/tests/pd/model/test_permutation.py +++ b/source/tests/pd/model/test_permutation.py @@ -416,7 +416,6 @@ def setUp(self) -> None: self.model = get_model(model_params).to(env.DEVICE) -@unittest.skip("Skip for not implemented yet") class TestEnergyModelDPA2(unittest.TestCase, PermutationTest): def setUp(self) -> None: model_params = copy.deepcopy(model_dpa2) diff --git a/source/tests/pd/model/test_rot.py b/source/tests/pd/model/test_rot.py index 85c90dc60f..84a0d3d724 100644 --- a/source/tests/pd/model/test_rot.py +++ b/source/tests/pd/model/test_rot.py @@ -176,7 +176,6 @@ def setUp(self): self.model = get_model(model_params).to(env.DEVICE) -@unittest.skip("Skip for not implemented yet") class TestEnergyModelDPA2(unittest.TestCase, RotTest): def setUp(self): model_params = copy.deepcopy(model_dpa2) diff --git a/source/tests/pd/model/test_rot_denoise.py b/source/tests/pd/model/test_rot_denoise.py index 74d5d41791..4a1841d10b 100644 --- a/source/tests/pd/model/test_rot_denoise.py +++ b/source/tests/pd/model/test_rot_denoise.py @@ -18,8 +18,9 @@ from ..common import ( eval_model, ) -from .test_permutation_denoise import ( # model_dpa2, +from .test_permutation_denoise import ( model_dpa1, + model_dpa2, ) dtype = paddle.float64 @@ -112,6 +113,14 @@ def setUp(self): self.model = get_model(model_params).to(env.DEVICE) +@unittest.skip("support of the denoise is temporally disabled") +class TestDenoiseModelDPA2(unittest.TestCase, RotDenoiseTest): + def setUp(self) -> None: + model_params = copy.deepcopy(model_dpa2) + self.type_split = True + self.model = get_model(model_params).to(env.DEVICE) + + # @unittest.skip("hybrid not supported at the moment") # class TestEnergyModelHybrid(unittest.TestCase, TestRotDenoise): # def setUp(self): diff --git a/source/tests/pd/model/test_smooth.py b/source/tests/pd/model/test_smooth.py index cc50043ad8..f907e6f4ee 100644 --- a/source/tests/pd/model/test_smooth.py +++ b/source/tests/pd/model/test_smooth.py @@ -20,6 +20,7 @@ ) from .test_permutation import ( # model_dpau, model_dpa1, + model_dpa2, model_se_e2_a, ) @@ -189,6 +190,36 @@ def setUp(self): self.aprec = 1e-5 +class TestEnergyModelDPA2(unittest.TestCase, SmoothTest): + def setUp(self) -> None: + model_params = copy.deepcopy(model_dpa2) + model_params["descriptor"]["repinit"]["rcut"] = 8 + model_params["descriptor"]["repinit"]["rcut_smth"] = 3.5 + self.type_split = True + self.model = get_model(model_params).to(env.DEVICE) + self.epsilon, self.aprec = 1e-5, 1e-4 + + +class TestEnergyModelDPA2_1(unittest.TestCase, SmoothTest): + def setUp(self) -> None: + model_params = copy.deepcopy(model_dpa2) + model_params["fitting_net"]["type"] = "ener" + self.type_split = True + self.test_virial = False + self.model = get_model(model_params).to(env.DEVICE) + self.epsilon, self.aprec = None, None + + +class TestEnergyModelDPA2_2(unittest.TestCase, SmoothTest): + def setUp(self) -> None: + model_params = copy.deepcopy(model_dpa2) + model_params["fitting_net"]["type"] = "ener" + self.type_split = True + self.test_virial = False + self.model = get_model(model_params).to(env.DEVICE) + self.epsilon, self.aprec = None, None + + # class TestEnergyFoo(unittest.TestCase): # def test(self): # model_params = model_dpau diff --git a/source/tests/pd/model/test_trans.py b/source/tests/pd/model/test_trans.py index 3fae49d598..f050596996 100644 --- a/source/tests/pd/model/test_trans.py +++ b/source/tests/pd/model/test_trans.py @@ -110,7 +110,6 @@ def setUp(self): self.model = get_model(model_params).to(env.DEVICE) -@unittest.skip("Skip for not implemented yet") class TestEnergyModelDPA2(unittest.TestCase, TransTest): def setUp(self): model_params = copy.deepcopy(model_dpa2) diff --git a/source/tests/pd/model/test_unused_params.py b/source/tests/pd/model/test_unused_params.py new file mode 100644 index 0000000000..bf92171da1 --- /dev/null +++ b/source/tests/pd/model/test_unused_params.py @@ -0,0 +1,92 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import copy +import unittest + +import paddle + +from deepmd.pd.model.model import ( + get_model, +) +from deepmd.pd.utils import ( + env, +) + +from ...seed import ( + GLOBAL_SEED, +) +from ..common import ( + eval_model, +) +from .test_permutation import ( + model_dpa2, +) + +dtype = paddle.float64 + + +@unittest.skip("paddle do not support unpacking grad_fn.next_functions") +class TestUnusedParamsDPA2(unittest.TestCase): + def test_unused(self): + import itertools + + for conv, drrd, grrg, attn1, g1g1, attn2, h2 in itertools.product( + [True], + [True], + [True], + [True], + [True], + [True], + [True], + ): + if (not drrd) and (not grrg) and h2: + # skip the case h2 is not envolved + continue + if (not grrg) and (not conv): + # skip the case g2 is not envolved + continue + model = copy.deepcopy(model_dpa2) + model["descriptor"]["repformer"]["nlayers"] = 2 + # model["descriptor"]["combine_grrg"] = cmbg2 + model["descriptor"]["repformer"]["update_g1_has_conv"] = conv + model["descriptor"]["repformer"]["update_g1_has_drrd"] = drrd + model["descriptor"]["repformer"]["update_g1_has_grrg"] = grrg + model["descriptor"]["repformer"]["update_g1_has_attn"] = attn1 + model["descriptor"]["repformer"]["update_g2_has_g1g1"] = g1g1 + model["descriptor"]["repformer"]["update_g2_has_attn"] = attn2 + model["descriptor"]["repformer"]["update_h2"] = h2 + model["fitting_net"]["neuron"] = [12, 12, 12] + self._test_unused(model) + + def _test_unused(self, model_params): + self.model = get_model(model_params).to(env.DEVICE) + natoms = 5 + generator = paddle.seed(GLOBAL_SEED) + cell = paddle.rand([3, 3], dtype=dtype).to(device=env.DEVICE) + cell = (cell + cell.T) + 5.0 * paddle.eye(3).to(device=env.DEVICE) + coord = paddle.rand([natoms, 3], dtype=dtype).to(device=env.DEVICE) + coord = paddle.matmul(coord, cell) + atype = paddle.to_tensor([0, 0, 0, 1, 1]).to(env.DEVICE) + idx_perm = [1, 0, 4, 3, 2] + result_0 = eval_model(self.model, coord.unsqueeze(0), cell.unsqueeze(0), atype) + test_keys = ["energy", "force", "virial"] + ret0 = {key: result_0[key].squeeze(0) for key in test_keys} + + # use computation graph to find all contributing tensors + def get_contributing_params(y, top_level=True): + nf = y.grad_fn.next_functions if top_level else y.next_functions + for f, _ in nf: + try: + yield f.variable + except AttributeError: + pass # node has no tensor + if f is not None: + yield from get_contributing_params(f, top_level=False) + + contributing_parameters = set(get_contributing_params(ret0["energy"])) + all_parameters = set(self.model.parameters()) + non_contributing = all_parameters - contributing_parameters + self.assertEqual(len(non_contributing), 0) + + +if __name__ == "__main__": + unittest.main() diff --git a/source/tests/pd/model/water/multitask.json b/source/tests/pd/model/water/multitask.json index 83524a8b77..2786afca59 100644 --- a/source/tests/pd/model/water/multitask.json +++ b/source/tests/pd/model/water/multitask.json @@ -10,7 +10,8 @@ "type": "se_e2_a", "sel": [ 46, - 92 + 92, + 4 ], "rcut_smth": 0.50, "rcut": 6.00, diff --git a/source/tests/pd/model/water/multitask_sharefit.json b/source/tests/pd/model/water/multitask_sharefit.json index 246b5992f7..934ef04998 100644 --- a/source/tests/pd/model/water/multitask_sharefit.json +++ b/source/tests/pd/model/water/multitask_sharefit.json @@ -91,14 +91,14 @@ "stat_file": "./stat_files/model_1.hdf5", "training_data": { "systems": [ - "pt/water/data/data_0" + "pd/water/data/data_0" ], "batch_size": 1, "_comment": "that's all" }, "validation_data": { "systems": [ - "pt/water/data/data_0" + "pd/water/data/data_0" ], "batch_size": 1, "_comment": "that's all" @@ -108,14 +108,14 @@ "stat_file": "./stat_files/model_2.hdf5", "training_data": { "systems": [ - "pt/water/data/data_0" + "pd/water/data/data_0" ], "batch_size": 1, "_comment": "that's all" }, "validation_data": { "systems": [ - "pt/water/data/data_0" + "pd/water/data/data_0" ], "batch_size": 1, "_comment": "that's all" diff --git a/source/tests/pd/test_finetune.py b/source/tests/pd/test_finetune.py index f82f7a8cd0..769ea6f6d3 100644 --- a/source/tests/pd/test_finetune.py +++ b/source/tests/pd/test_finetune.py @@ -197,7 +197,7 @@ def test_finetune_change_out_bias(self): self.tearDown() - def test_finetune_change_type(self): + def test_finetune_change_type(self) -> None: if not self.mixed_types: # skip when not mixed_types return @@ -284,7 +284,7 @@ def test_finetune_change_type(self): self.tearDown() - def tearDown(self): + def tearDown(self) -> None: for f in os.listdir("."): if f.startswith("model") and f.endswith(".pd"): os.remove(f) @@ -295,7 +295,7 @@ def tearDown(self): class TestEnergyModelSeA(FinetuneTest, unittest.TestCase): - def setUp(self): + def setUp(self) -> None: input_json = str(Path(__file__).parent / "water/se_atten.json") with open(input_json) as f: self.config = json.load(f) @@ -311,7 +311,7 @@ def setUp(self): @unittest.skip("Skip for not implemented yet") class TestEnergyZBLModelSeA(FinetuneTest, unittest.TestCase): - def setUp(self): + def setUp(self) -> None: input_json = str(Path(__file__).parent / "water/se_atten.json") with open(input_json) as f: self.config = json.load(f) @@ -327,7 +327,7 @@ def setUp(self): @unittest.skip("Skip for not implemented yet") class TestEnergyDOSModelSeA(FinetuneTest, unittest.TestCase): - def setUp(self): + def setUp(self) -> None: input_json = str(Path(__file__).parent / "dos/input.json") with open(input_json) as f: self.config = json.load(f) @@ -342,7 +342,7 @@ def setUp(self): class TestEnergyModelDPA1(FinetuneTest, unittest.TestCase): - def setUp(self): + def setUp(self) -> None: input_json = str(Path(__file__).parent / "water/se_atten.json") with open(input_json) as f: self.config = json.load(f) @@ -356,9 +356,8 @@ def setUp(self): self.testkey = None -@unittest.skip("Skip for not implemented yet") class TestEnergyModelDPA2(FinetuneTest, unittest.TestCase): - def setUp(self): + def setUp(self) -> None: input_json = str(Path(__file__).parent / "water/se_atten.json") with open(input_json) as f: self.config = json.load(f) diff --git a/source/tests/pd/test_multitask.py b/source/tests/pd/test_multitask.py index d59990dcca..72ad251068 100644 --- a/source/tests/pd/test_multitask.py +++ b/source/tests/pd/test_multitask.py @@ -30,6 +30,8 @@ from .model.test_permutation import ( model_dpa1, + model_dpa2, + model_dpa2tebd, model_se_e2_a, ) @@ -40,6 +42,13 @@ def setUpModule() -> None: with open(multitask_template_json) as f: multitask_template = json.load(f) + global multitask_sharefit_template + multitask_sharefit_template_json = str( + Path(__file__).parent / "water/multitask_sharefit.json" + ) + with open(multitask_sharefit_template_json) as f: + multitask_sharefit_template = json.load(f) + class MultiTaskTrainTest: def test_multitask_train(self) -> None: @@ -227,6 +236,46 @@ def tearDown(self) -> None: MultiTaskTrainTest.tearDown(self) +class TestMultiTaskSeASharefit(unittest.TestCase, MultiTaskTrainTest): + def setUp(self) -> None: + multitask_se_e2_a = deepcopy(multitask_sharefit_template) + multitask_se_e2_a["model"]["shared_dict"]["my_descriptor"] = model_se_e2_a[ + "descriptor" + ] + data_file = [str(Path(__file__).parent / "water/data/data_0")] + self.stat_files = "se_e2_a_share_fit" + os.makedirs(self.stat_files, exist_ok=True) + self.config = multitask_se_e2_a + self.config["training"]["data_dict"]["model_1"]["training_data"]["systems"] = ( + data_file + ) + self.config["training"]["data_dict"]["model_1"]["validation_data"][ + "systems" + ] = data_file + self.config["training"]["data_dict"]["model_1"]["stat_file"] = ( + f"{self.stat_files}/model_1" + ) + self.config["training"]["data_dict"]["model_2"]["training_data"]["systems"] = ( + data_file + ) + self.config["training"]["data_dict"]["model_2"]["validation_data"][ + "systems" + ] = data_file + self.config["training"]["data_dict"]["model_2"]["stat_file"] = ( + f"{self.stat_files}/model_2" + ) + self.config["training"]["numb_steps"] = 1 + self.config["training"]["save_freq"] = 1 + self.origin_config = deepcopy(self.config) + self.config["model"], self.shared_links = preprocess_shared_params( + self.config["model"] + ) + self.share_fitting = True + + def tearDown(self) -> None: + MultiTaskTrainTest.tearDown(self) + + class TestMultiTaskDPA1(unittest.TestCase, MultiTaskTrainTest): def setUp(self) -> None: multitask_DPA1 = deepcopy(multitask_template) @@ -266,5 +315,83 @@ def tearDown(self) -> None: MultiTaskTrainTest.tearDown(self) +class TestMultiTaskDPA2(unittest.TestCase, MultiTaskTrainTest): + def setUp(self) -> None: + multitask_DPA2 = deepcopy(multitask_template) + multitask_DPA2["model"]["shared_dict"]["my_descriptor"] = model_dpa2[ + "descriptor" + ] + data_file = [str(Path(__file__).parent / "water/data/data_0")] + self.stat_files = "DPA2" + os.makedirs(self.stat_files, exist_ok=True) + self.config = multitask_DPA2 + self.config["training"]["data_dict"]["model_1"]["training_data"]["systems"] = ( + data_file + ) + self.config["training"]["data_dict"]["model_1"]["validation_data"][ + "systems" + ] = data_file + self.config["training"]["data_dict"]["model_1"]["stat_file"] = ( + f"{self.stat_files}/model_1" + ) + self.config["training"]["data_dict"]["model_2"]["training_data"]["systems"] = ( + data_file + ) + self.config["training"]["data_dict"]["model_2"]["validation_data"][ + "systems" + ] = data_file + self.config["training"]["data_dict"]["model_2"]["stat_file"] = ( + f"{self.stat_files}/model_2" + ) + self.config["training"]["numb_steps"] = 1 + self.config["training"]["save_freq"] = 1 + self.origin_config = deepcopy(self.config) + self.config["model"], self.shared_links = preprocess_shared_params( + self.config["model"] + ) + + def tearDown(self) -> None: + MultiTaskTrainTest.tearDown(self) + + +class TestMultiTaskDPA2Tebd(unittest.TestCase, MultiTaskTrainTest): + def setUp(self) -> None: + multitask_DPA2 = deepcopy(multitask_template) + multitask_DPA2["model"]["shared_dict"]["my_descriptor"] = model_dpa2tebd[ + "descriptor" + ] + data_file = [str(Path(__file__).parent / "water/data/data_0")] + self.stat_files = "DPA2Tebd" + os.makedirs(self.stat_files, exist_ok=True) + self.config = multitask_DPA2 + self.config["training"]["data_dict"]["model_1"]["training_data"]["systems"] = ( + data_file + ) + self.config["training"]["data_dict"]["model_1"]["validation_data"][ + "systems" + ] = data_file + self.config["training"]["data_dict"]["model_1"]["stat_file"] = ( + f"{self.stat_files}/model_1" + ) + self.config["training"]["data_dict"]["model_2"]["training_data"]["systems"] = ( + data_file + ) + self.config["training"]["data_dict"]["model_2"]["validation_data"][ + "systems" + ] = data_file + self.config["training"]["data_dict"]["model_2"]["stat_file"] = ( + f"{self.stat_files}/model_2" + ) + self.config["training"]["numb_steps"] = 1 + self.config["training"]["save_freq"] = 1 + self.origin_config = deepcopy(self.config) + self.config["model"], self.shared_links = preprocess_shared_params( + self.config["model"] + ) + + def tearDown(self) -> None: + MultiTaskTrainTest.tearDown(self) + + if __name__ == "__main__": unittest.main() diff --git a/source/tests/pd/test_training.py b/source/tests/pd/test_training.py index c3d65c09df..8958dcb165 100644 --- a/source/tests/pd/test_training.py +++ b/source/tests/pd/test_training.py @@ -24,6 +24,7 @@ from .model.test_permutation import ( model_dpa1, + model_dpa2, model_se_e2_a, ) @@ -195,5 +196,21 @@ def tearDown(self) -> None: DPTrainTest.tearDown(self) +class TestEnergyModelDPA2(unittest.TestCase, DPTrainTest): + def setUp(self) -> None: + input_json = str(Path(__file__).parent / "water/se_atten.json") + with open(input_json) as f: + self.config = json.load(f) + data_file = [str(Path(__file__).parent / "water/data/data_0")] + self.config["training"]["training_data"]["systems"] = data_file + self.config["training"]["validation_data"]["systems"] = data_file + self.config["model"] = deepcopy(model_dpa2) + self.config["training"]["numb_steps"] = 1 + self.config["training"]["save_freq"] = 1 + + def tearDown(self) -> None: + DPTrainTest.tearDown(self) + + if __name__ == "__main__": unittest.main() diff --git a/source/tests/pd/test_update_sel.py b/source/tests/pd/test_update_sel.py index e7b1acf6ff..10342357c6 100644 --- a/source/tests/pd/test_update_sel.py +++ b/source/tests/pd/test_update_sel.py @@ -31,7 +31,7 @@ def setUp(self) -> None: return super().setUp() @patch("deepmd.pd.utils.update_sel.UpdateSel.get_nbor_stat") - def test_update_one_sel(self, sel_mock): + def test_update_one_sel(self, sel_mock) -> None: sel_mock.return_value = self.mock_min_nbor_dist, [10, 20] min_nbor_dist, sel = self.update_sel.update_one_sel(None, None, 6, "auto") @@ -45,7 +45,7 @@ def test_update_one_sel(self, sel_mock): @unittest.skip("Skip for not implemented yet") @patch("deepmd.pd.utils.update_sel.UpdateSel.get_nbor_stat") - def test_update_sel_hybrid(self, sel_mock): + def test_update_sel_hybrid(self, sel_mock) -> None: sel_mock.return_value = self.mock_min_nbor_dist, [10, 20] jdata = { @@ -76,7 +76,7 @@ def test_update_sel_hybrid(self, sel_mock): self.assertEqual(jdata, expected_out) @patch("deepmd.pd.utils.update_sel.UpdateSel.get_nbor_stat") - def test_update_sel(self, sel_mock): + def test_update_sel(self, sel_mock) -> None: sel_mock.return_value = self.mock_min_nbor_dist, [10, 20] jdata = { @@ -90,9 +90,8 @@ def test_update_sel(self, sel_mock): jdata = update_sel(jdata) self.assertEqual(jdata, expected_out) - @unittest.skip("Skip for not implemented yet") @patch("deepmd.pd.utils.update_sel.UpdateSel.get_nbor_stat") - def test_update_sel_atten_auto(self, sel_mock): + def test_update_sel_atten_auto(self, sel_mock) -> None: sel_mock.return_value = self.mock_min_nbor_dist, [25] jdata = { @@ -118,9 +117,8 @@ def test_update_sel_atten_auto(self, sel_mock): jdata = update_sel(jdata) self.assertEqual(jdata, expected_out) - @unittest.skip("Skip for not implemented yet") @patch("deepmd.pd.utils.update_sel.UpdateSel.get_nbor_stat") - def test_update_sel_atten_int(self, sel_mock): + def test_update_sel_atten_int(self, sel_mock) -> None: sel_mock.return_value = self.mock_min_nbor_dist, [25] jdata = { @@ -146,9 +144,8 @@ def test_update_sel_atten_int(self, sel_mock): jdata = update_sel(jdata) self.assertEqual(jdata, expected_out) - @unittest.skip("Skip for not implemented yet") @patch("deepmd.pd.utils.update_sel.UpdateSel.get_nbor_stat") - def test_update_sel_atten_list(self, sel_mock): + def test_update_sel_atten_list(self, sel_mock) -> None: sel_mock.return_value = self.mock_min_nbor_dist, [25] jdata = { @@ -174,7 +171,50 @@ def test_update_sel_atten_list(self, sel_mock): jdata = update_sel(jdata) self.assertEqual(jdata, expected_out) - def test_skip_frozen(self): + @patch("deepmd.pd.utils.update_sel.UpdateSel.get_nbor_stat") + def test_update_sel_dpa2_auto(self, sel_mock) -> None: + sel_mock.return_value = self.mock_min_nbor_dist, [25] + + jdata = { + "model": { + "descriptor": { + "type": "dpa2", + "repinit": { + "rcut": 6.0, + "nsel": "auto", + "three_body_rcut": 4.0, + "three_body_sel": "auto", + }, + "repformer": { + "rcut": 4.0, + "nsel": "auto", + }, + } + }, + "training": {"training_data": {}}, + } + expected_out = { + "model": { + "descriptor": { + "type": "dpa2", + "repinit": { + "rcut": 6.0, + "nsel": 28, + "three_body_rcut": 4.0, + "three_body_sel": 28, + }, + "repformer": { + "rcut": 4.0, + "nsel": 28, + }, + } + }, + "training": {"training_data": {}}, + } + jdata = update_sel(jdata) + self.assertEqual(jdata, expected_out) + + def test_skip_frozen(self) -> None: jdata = { "model": { "type": "frozen", @@ -185,7 +225,7 @@ def test_skip_frozen(self): jdata = update_sel(jdata) self.assertEqual(jdata, expected_out) - def test_wrap_up_4(self): + def test_wrap_up_4(self) -> None: self.assertEqual(self.update_sel.wrap_up_4(12), 3 * 4) self.assertEqual(self.update_sel.wrap_up_4(13), 4 * 4) self.assertEqual(self.update_sel.wrap_up_4(14), 4 * 4) From 3cecca44a587b3d3160ada40b07d86abd734eeb7 Mon Sep 17 00:00:00 2001 From: Chun Cai Date: Thu, 26 Dec 2024 01:27:32 +0800 Subject: [PATCH 36/43] Perf: replace unnecessary `torch.split` with indexing (#4505) Some operations only use the first segment of the result tensor of `torch.split`. In this case, all the other segments are created and discarded. This slightly adds an overhead to the training process. ## Summary by CodeRabbit - **Bug Fixes** - Simplified tensor slicing operations in the `RepformerLayer` class and the `nlist_distinguish_types` function, enhancing readability and performance. - **Documentation** - Updated comments for clarity regarding tensor shapes in the `RepformerLayer` class. --- deepmd/pt/model/descriptor/repformer_layer.py | 2 +- deepmd/pt/utils/nlist.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/deepmd/pt/model/descriptor/repformer_layer.py b/deepmd/pt/model/descriptor/repformer_layer.py index 86b09e9b40..1e2cba66d6 100644 --- a/deepmd/pt/model/descriptor/repformer_layer.py +++ b/deepmd/pt/model/descriptor/repformer_layer.py @@ -1003,7 +1003,7 @@ def _cal_grrg(h2g2: torch.Tensor, axis_neuron: int) -> torch.Tensor: # nb x nloc x 3 x ng2 nb, nloc, _, ng2 = h2g2.shape # nb x nloc x 3 x axis - h2g2m = torch.split(h2g2, axis_neuron, dim=-1)[0] + h2g2m = h2g2[..., :axis_neuron] # nb x nloc x axis x ng2 g1_13 = torch.matmul(torch.transpose(h2g2m, -1, -2), h2g2) / (3.0**1) # nb x nloc x (axisxng2) diff --git a/deepmd/pt/utils/nlist.py b/deepmd/pt/utils/nlist.py index db1e87785b..ec94e8cd60 100644 --- a/deepmd/pt/utils/nlist.py +++ b/deepmd/pt/utils/nlist.py @@ -310,7 +310,7 @@ def nlist_distinguish_types( inlist = torch.gather(nlist, 2, imap) inlist = inlist.masked_fill(~(pick_mask.to(torch.bool)), -1) # nloc x nsel[ii] - ret_nlist.append(torch.split(inlist, [ss, snsel - ss], dim=-1)[0]) + ret_nlist.append(inlist[..., :ss]) return torch.concat(ret_nlist, dim=-1) From bd2395cf7a40afd90dc0f203583bdb836f06feda Mon Sep 17 00:00:00 2001 From: Jinzhe Zeng Date: Thu, 26 Dec 2024 00:22:39 -0500 Subject: [PATCH 37/43] docs: fix the header of the scaling test table (#4507) Fix #4494. ## Summary by CodeRabbit - **Documentation** - Updated the parallel training documentation for TensorFlow and PyTorch to enhance clarity. - Expanded explanations on parallel training processes and data loading utilities. - Introduced a flowchart to illustrate data flow and modified the scaling tests table format for better understanding. Signed-off-by: Jinzhe Zeng --- doc/train/parallel-training.md | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/doc/train/parallel-training.md b/doc/train/parallel-training.md index 9ea92b4751..00df0a63f0 100644 --- a/doc/train/parallel-training.md +++ b/doc/train/parallel-training.md @@ -27,13 +27,14 @@ In some cases, it won't work well when scaling the learning rate by worker count ### Scaling test Testing `examples/water/se_e2_a` on an 8-GPU host, linear acceleration can be observed with the increasing number of cards. - -| Num of GPU cards | Seconds every 100 samples | Samples per second | Speed up | -| ---------------- | ------------------------- | ------------------ | -------- | -| 1 | 1.4515 | 68.89 | 1.00 | -| 2 | 1.5962 | 62.65\*2 | 1.82 | -| 4 | 1.7635 | 56.71\*4 | 3.29 | -| 8 | 1.7267 | 57.91\*8 | 6.72 | +In this example, the number of samples per batch on a single GPU card ({ref}`batch_size `) is set to `1`. + +| Num of GPU cards | Samples per batch | Seconds every 100 batches | Samples per second | Speed up | +| ---------------- | ----------------- | ------------------------- | ------------------ | -------- | +| 1 | 1 | 1.4515 | 68.89 | 1.00 | +| 2 | 2 | 1.5962 | 62.65\*2 | 1.82 | +| 4 | 4 | 1.7635 | 56.71\*4 | 3.29 | +| 8 | 8 | 1.7267 | 57.91\*8 | 6.72 | ### How to use From 5788f7935dd6584c25e022d4de256977856e2000 Mon Sep 17 00:00:00 2001 From: Jinzhe Zeng Date: Thu, 26 Dec 2024 00:23:19 -0500 Subject: [PATCH 38/43] chore: fix spelling PRECISON -> PRECISION (#4508) Luckily, this variable has never been used otherwhere. ## Summary by CodeRabbit - **Bug Fixes** - Corrected the spelling of `RESERVED_PRECISON_DICT` to `RESERVED_PRECISION_DICT` across multiple files to ensure consistency and prevent potential runtime errors. - **Documentation** - Updated comments and documentation to reflect the corrected variable name, enhancing clarity without altering functionality. --- deepmd/dpmodel/common.py | 10 +++++----- deepmd/dpmodel/model/make_model.py | 10 +++++----- deepmd/pd/infer/deep_eval.py | 6 +++--- deepmd/pd/model/descriptor/dpa1.py | 4 ++-- deepmd/pd/model/descriptor/se_a.py | 4 ++-- deepmd/pd/model/descriptor/se_t_tebd.py | 4 ++-- deepmd/pd/model/model/make_model.py | 4 ++-- deepmd/pd/utils/env.py | 6 +++--- deepmd/pt/infer/deep_eval.py | 8 ++++---- deepmd/pt/model/descriptor/dpa1.py | 4 ++-- deepmd/pt/model/descriptor/se_a.py | 4 ++-- deepmd/pt/model/descriptor/se_atten_v2.py | 4 ++-- deepmd/pt/model/descriptor/se_r.py | 4 ++-- deepmd/pt/model/descriptor/se_t.py | 4 ++-- deepmd/pt/model/descriptor/se_t_tebd.py | 4 ++-- deepmd/pt/model/model/make_model.py | 4 ++-- deepmd/pt/utils/env.py | 6 +++--- 17 files changed, 45 insertions(+), 45 deletions(-) diff --git a/deepmd/dpmodel/common.py b/deepmd/dpmodel/common.py index 920364edc0..1f9d4817a2 100644 --- a/deepmd/dpmodel/common.py +++ b/deepmd/dpmodel/common.py @@ -43,7 +43,7 @@ } assert VALID_PRECISION.issubset(PRECISION_DICT.keys()) -RESERVED_PRECISON_DICT = { +RESERVED_PRECISION_DICT = { np.float16: "float16", np.float32: "float32", np.float64: "float64", @@ -52,7 +52,7 @@ ml_dtypes.bfloat16: "bfloat16", np.bool_: "bool", } -assert set(RESERVED_PRECISON_DICT.keys()) == set(PRECISION_DICT.values()) +assert set(RESERVED_PRECISION_DICT.keys()) == set(PRECISION_DICT.values()) DEFAULT_PRECISION = "float64" @@ -74,9 +74,9 @@ def get_xp_precision( elif precision == "bool": return bool elif precision == "default": - return get_xp_precision(xp, RESERVED_PRECISON_DICT[PRECISION_DICT[precision]]) + return get_xp_precision(xp, RESERVED_PRECISION_DICT[PRECISION_DICT[precision]]) elif precision == "global": - return get_xp_precision(xp, RESERVED_PRECISON_DICT[GLOBAL_NP_FLOAT_PRECISION]) + return get_xp_precision(xp, RESERVED_PRECISION_DICT[GLOBAL_NP_FLOAT_PRECISION]) elif precision == "bfloat16": return ml_dtypes.bfloat16 else: @@ -225,6 +225,6 @@ def safe_cast_array( "GLOBAL_ENER_FLOAT_PRECISION", "GLOBAL_NP_FLOAT_PRECISION", "PRECISION_DICT", - "RESERVED_PRECISON_DICT", + "RESERVED_PRECISION_DICT", "NativeOP", ] diff --git a/deepmd/dpmodel/model/make_model.py b/deepmd/dpmodel/model/make_model.py index ccad72c6a5..ec0b986394 100644 --- a/deepmd/dpmodel/model/make_model.py +++ b/deepmd/dpmodel/model/make_model.py @@ -14,7 +14,7 @@ GLOBAL_ENER_FLOAT_PRECISION, GLOBAL_NP_FLOAT_PRECISION, PRECISION_DICT, - RESERVED_PRECISON_DICT, + RESERVED_PRECISION_DICT, NativeOP, ) from deepmd.dpmodel.model.base_model import ( @@ -169,7 +169,7 @@ def __init__( self.atomic_model: T_AtomicModel = T_AtomicModel(*args, **kwargs) self.precision_dict = PRECISION_DICT # not supported by flax - # self.reverse_precision_dict = RESERVED_PRECISON_DICT + # self.reverse_precision_dict = RESERVED_PRECISION_DICT self.global_np_float_precision = GLOBAL_NP_FLOAT_PRECISION self.global_ener_float_precision = GLOBAL_ENER_FLOAT_PRECISION @@ -373,7 +373,7 @@ def input_type_cast( str, ]: """Cast the input data to global float type.""" - input_prec = RESERVED_PRECISON_DICT[self.precision_dict[coord.dtype.name]] + input_prec = RESERVED_PRECISION_DICT[self.precision_dict[coord.dtype.name]] ### ### type checking would not pass jit, convert to coord prec anyway ### @@ -382,7 +382,7 @@ def input_type_cast( for vv in [box, fparam, aparam] ] box, fparam, aparam = _lst - if input_prec == RESERVED_PRECISON_DICT[self.global_np_float_precision]: + if input_prec == RESERVED_PRECISION_DICT[self.global_np_float_precision]: return coord, box, fparam, aparam, input_prec else: pp = self.global_np_float_precision @@ -401,7 +401,7 @@ def output_type_cast( ) -> dict[str, np.ndarray]: """Convert the model output to the input prec.""" do_cast = ( - input_prec != RESERVED_PRECISON_DICT[self.global_np_float_precision] + input_prec != RESERVED_PRECISION_DICT[self.global_np_float_precision] ) pp = self.precision_dict[input_prec] odef = self.model_output_def() diff --git a/deepmd/pd/infer/deep_eval.py b/deepmd/pd/infer/deep_eval.py index c31170ad71..d13e837161 100644 --- a/deepmd/pd/infer/deep_eval.py +++ b/deepmd/pd/infer/deep_eval.py @@ -35,7 +35,7 @@ from deepmd.pd.utils.env import ( DEVICE, GLOBAL_PD_FLOAT_PRECISION, - RESERVED_PRECISON_DICT, + RESERVED_PRECISION_DICT, enable_prim, ) from deepmd.pd.utils.utils import ( @@ -355,7 +355,7 @@ def _eval_model( request_defs: list[OutputVariableDef], ): model = self.dp.to(DEVICE) - prec = NP_PRECISION_DICT[RESERVED_PRECISON_DICT[GLOBAL_PD_FLOAT_PRECISION]] + prec = NP_PRECISION_DICT[RESERVED_PRECISION_DICT[GLOBAL_PD_FLOAT_PRECISION]] nframes = coords.shape[0] if len(atom_types.shape) == 1: @@ -370,7 +370,7 @@ def _eval_model( place=DEVICE, ) type_input = paddle.to_tensor( - atom_types.astype(NP_PRECISION_DICT[RESERVED_PRECISON_DICT[paddle.int64]]), + atom_types.astype(NP_PRECISION_DICT[RESERVED_PRECISION_DICT[paddle.int64]]), dtype=paddle.int64, place=DEVICE, ) diff --git a/deepmd/pd/model/descriptor/dpa1.py b/deepmd/pd/model/descriptor/dpa1.py index f3f1ea26d6..ebaaf96bc3 100644 --- a/deepmd/pd/model/descriptor/dpa1.py +++ b/deepmd/pd/model/descriptor/dpa1.py @@ -23,7 +23,7 @@ ) from deepmd.pd.utils.env import ( PRECISION_DICT, - RESERVED_PRECISON_DICT, + RESERVED_PRECISION_DICT, ) from deepmd.pd.utils.update_sel import ( UpdateSel, @@ -503,7 +503,7 @@ def serialize(self) -> dict: "use_tebd_bias": self.use_tebd_bias, "type_map": self.type_map, # make deterministic - "precision": RESERVED_PRECISON_DICT[obj.prec], + "precision": RESERVED_PRECISION_DICT[obj.prec], "embeddings": obj.filter_layers.serialize(), "attention_layers": obj.dpa1_attention.serialize(), "env_mat": DPEnvMat(obj.rcut, obj.rcut_smth).serialize(), diff --git a/deepmd/pd/model/descriptor/se_a.py b/deepmd/pd/model/descriptor/se_a.py index 0af6d082b8..6e5832670d 100644 --- a/deepmd/pd/model/descriptor/se_a.py +++ b/deepmd/pd/model/descriptor/se_a.py @@ -23,7 +23,7 @@ ) from deepmd.pd.utils.env import ( PRECISION_DICT, - RESERVED_PRECISON_DICT, + RESERVED_PRECISION_DICT, ) from deepmd.pd.utils.env_mat_stat import ( EnvMatStatSe, @@ -334,7 +334,7 @@ def serialize(self) -> dict: "set_davg_zero": obj.set_davg_zero, "activation_function": obj.activation_function, # make deterministic - "precision": RESERVED_PRECISON_DICT[obj.prec], + "precision": RESERVED_PRECISION_DICT[obj.prec], "embeddings": obj.filter_layers.serialize(), "env_mat": DPEnvMat(obj.rcut, obj.rcut_smth).serialize(), "exclude_types": obj.exclude_types, diff --git a/deepmd/pd/model/descriptor/se_t_tebd.py b/deepmd/pd/model/descriptor/se_t_tebd.py index a8b9a6a417..b28f79e436 100644 --- a/deepmd/pd/model/descriptor/se_t_tebd.py +++ b/deepmd/pd/model/descriptor/se_t_tebd.py @@ -30,7 +30,7 @@ ) from deepmd.pd.utils.env import ( PRECISION_DICT, - RESERVED_PRECISON_DICT, + RESERVED_PRECISION_DICT, ) from deepmd.pd.utils.env_mat_stat import ( EnvMatStatSe, @@ -358,7 +358,7 @@ def serialize(self) -> dict: "use_econf_tebd": self.use_econf_tebd, "type_map": self.type_map, # make deterministic - "precision": RESERVED_PRECISON_DICT[obj.prec], + "precision": RESERVED_PRECISION_DICT[obj.prec], "embeddings": obj.filter_layers.serialize(), "env_mat": DPEnvMat(obj.rcut, obj.rcut_smth).serialize(), "type_embedding": self.type_embedding.embedding.serialize(), diff --git a/deepmd/pd/model/model/make_model.py b/deepmd/pd/model/model/make_model.py index 2b9a4b5bec..acb237b5ac 100644 --- a/deepmd/pd/model/model/make_model.py +++ b/deepmd/pd/model/model/make_model.py @@ -28,7 +28,7 @@ GLOBAL_PD_ENER_FLOAT_PRECISION, GLOBAL_PD_FLOAT_PRECISION, PRECISION_DICT, - RESERVED_PRECISON_DICT, + RESERVED_PRECISION_DICT, ) from deepmd.pd.utils.nlist import ( extend_input_and_build_neighbor_list, @@ -76,7 +76,7 @@ def __init__( else: self.atomic_model: T_AtomicModel = T_AtomicModel(*args, **kwargs) self.precision_dict = PRECISION_DICT - self.reverse_precision_dict = RESERVED_PRECISON_DICT + self.reverse_precision_dict = RESERVED_PRECISION_DICT self.global_pd_float_precision = GLOBAL_PD_FLOAT_PRECISION self.global_pd_ener_float_precision = GLOBAL_PD_ENER_FLOAT_PRECISION diff --git a/deepmd/pd/utils/env.py b/deepmd/pd/utils/env.py index 041c231282..e2abe9a6e5 100644 --- a/deepmd/pd/utils/env.py +++ b/deepmd/pd/utils/env.py @@ -55,7 +55,7 @@ PRECISION_DICT["default"] = GLOBAL_PD_FLOAT_PRECISION assert VALID_PRECISION.issubset(PRECISION_DICT.keys()) # cannot automatically generated -RESERVED_PRECISON_DICT = { +RESERVED_PRECISION_DICT = { paddle.float16: "float16", paddle.float32: "float32", paddle.float64: "float64", @@ -64,7 +64,7 @@ paddle.bfloat16: "bfloat16", paddle.bool: "bool", } -assert set(PRECISION_DICT.values()) == set(RESERVED_PRECISON_DICT.keys()) +assert set(PRECISION_DICT.values()) == set(RESERVED_PRECISION_DICT.keys()) DEFAULT_PRECISION = "float64" # throw warnings if threads not set @@ -163,7 +163,7 @@ def enable_prim(enable: bool = True): "LOCAL_RANK", "NUM_WORKERS", "PRECISION_DICT", - "RESERVED_PRECISON_DICT", + "RESERVED_PRECISION_DICT", "SAMPLER_RECORD", "enable_prim", ] diff --git a/deepmd/pt/infer/deep_eval.py b/deepmd/pt/infer/deep_eval.py index facead838e..b38dac78da 100644 --- a/deepmd/pt/infer/deep_eval.py +++ b/deepmd/pt/infer/deep_eval.py @@ -58,7 +58,7 @@ from deepmd.pt.utils.env import ( DEVICE, GLOBAL_PT_FLOAT_PRECISION, - RESERVED_PRECISON_DICT, + RESERVED_PRECISION_DICT, ) from deepmd.pt.utils.utils import ( to_numpy_array, @@ -406,7 +406,7 @@ def _eval_model( request_defs: list[OutputVariableDef], ): model = self.dp.to(DEVICE) - prec = NP_PRECISION_DICT[RESERVED_PRECISON_DICT[GLOBAL_PT_FLOAT_PRECISION]] + prec = NP_PRECISION_DICT[RESERVED_PRECISION_DICT[GLOBAL_PT_FLOAT_PRECISION]] nframes = coords.shape[0] if len(atom_types.shape) == 1: @@ -421,7 +421,7 @@ def _eval_model( device=DEVICE, ) type_input = torch.tensor( - atom_types.astype(NP_PRECISION_DICT[RESERVED_PRECISON_DICT[torch.long]]), + atom_types.astype(NP_PRECISION_DICT[RESERVED_PRECISION_DICT[torch.long]]), dtype=torch.long, device=DEVICE, ) @@ -553,7 +553,7 @@ def _eval_model_spin( np.abs(shape), np.nan, dtype=NP_PRECISION_DICT[ - RESERVED_PRECISON_DICT[GLOBAL_PT_FLOAT_PRECISION] + RESERVED_PRECISION_DICT[GLOBAL_PT_FLOAT_PRECISION] ], ) ) # this is kinda hacky diff --git a/deepmd/pt/model/descriptor/dpa1.py b/deepmd/pt/model/descriptor/dpa1.py index ba2fd1b6c6..47b2a4d15c 100644 --- a/deepmd/pt/model/descriptor/dpa1.py +++ b/deepmd/pt/model/descriptor/dpa1.py @@ -23,7 +23,7 @@ ) from deepmd.pt.utils.env import ( PRECISION_DICT, - RESERVED_PRECISON_DICT, + RESERVED_PRECISION_DICT, ) from deepmd.pt.utils.tabulate import ( DPTabulate, @@ -505,7 +505,7 @@ def serialize(self) -> dict: "use_tebd_bias": self.use_tebd_bias, "type_map": self.type_map, # make deterministic - "precision": RESERVED_PRECISON_DICT[obj.prec], + "precision": RESERVED_PRECISION_DICT[obj.prec], "embeddings": obj.filter_layers.serialize(), "attention_layers": obj.dpa1_attention.serialize(), "env_mat": DPEnvMat(obj.rcut, obj.rcut_smth).serialize(), diff --git a/deepmd/pt/model/descriptor/se_a.py b/deepmd/pt/model/descriptor/se_a.py index 408cd51d3d..550154300b 100644 --- a/deepmd/pt/model/descriptor/se_a.py +++ b/deepmd/pt/model/descriptor/se_a.py @@ -23,7 +23,7 @@ ) from deepmd.pt.utils.env import ( PRECISION_DICT, - RESERVED_PRECISON_DICT, + RESERVED_PRECISION_DICT, ) from deepmd.pt.utils.env_mat_stat import ( EnvMatStatSe, @@ -379,7 +379,7 @@ def serialize(self) -> dict: "set_davg_zero": obj.set_davg_zero, "activation_function": obj.activation_function, # make deterministic - "precision": RESERVED_PRECISON_DICT[obj.prec], + "precision": RESERVED_PRECISION_DICT[obj.prec], "embeddings": obj.filter_layers.serialize(), "env_mat": DPEnvMat(obj.rcut, obj.rcut_smth).serialize(), "exclude_types": obj.exclude_types, diff --git a/deepmd/pt/model/descriptor/se_atten_v2.py b/deepmd/pt/model/descriptor/se_atten_v2.py index 11d783261e..533d7887e0 100644 --- a/deepmd/pt/model/descriptor/se_atten_v2.py +++ b/deepmd/pt/model/descriptor/se_atten_v2.py @@ -20,7 +20,7 @@ env, ) from deepmd.pt.utils.env import ( - RESERVED_PRECISON_DICT, + RESERVED_PRECISION_DICT, ) from deepmd.utils.version import ( check_version_compatibility, @@ -223,7 +223,7 @@ def serialize(self) -> dict: "use_tebd_bias": self.use_tebd_bias, "type_map": self.type_map, # make deterministic - "precision": RESERVED_PRECISON_DICT[obj.prec], + "precision": RESERVED_PRECISION_DICT[obj.prec], "embeddings": obj.filter_layers.serialize(), "embeddings_strip": obj.filter_layers_strip.serialize(), "attention_layers": obj.dpa1_attention.serialize(), diff --git a/deepmd/pt/model/descriptor/se_r.py b/deepmd/pt/model/descriptor/se_r.py index 2cf597d015..f25fb93fa7 100644 --- a/deepmd/pt/model/descriptor/se_r.py +++ b/deepmd/pt/model/descriptor/se_r.py @@ -25,7 +25,7 @@ ) from deepmd.pt.utils.env import ( PRECISION_DICT, - RESERVED_PRECISON_DICT, + RESERVED_PRECISION_DICT, ) from deepmd.pt.utils.env_mat_stat import ( EnvMatStatSe, @@ -549,7 +549,7 @@ def serialize(self) -> dict: "set_davg_zero": self.set_davg_zero, "activation_function": self.activation_function, # make deterministic - "precision": RESERVED_PRECISON_DICT[self.prec], + "precision": RESERVED_PRECISION_DICT[self.prec], "embeddings": self.filter_layers.serialize(), "env_mat": DPEnvMat(self.rcut, self.rcut_smth).serialize(), "exclude_types": self.exclude_types, diff --git a/deepmd/pt/model/descriptor/se_t.py b/deepmd/pt/model/descriptor/se_t.py index 90565300dc..d41a28d3db 100644 --- a/deepmd/pt/model/descriptor/se_t.py +++ b/deepmd/pt/model/descriptor/se_t.py @@ -23,7 +23,7 @@ ) from deepmd.pt.utils.env import ( PRECISION_DICT, - RESERVED_PRECISON_DICT, + RESERVED_PRECISION_DICT, ) from deepmd.pt.utils.env_mat_stat import ( EnvMatStatSe, @@ -413,7 +413,7 @@ def serialize(self) -> dict: "resnet_dt": obj.resnet_dt, "set_davg_zero": obj.set_davg_zero, "activation_function": obj.activation_function, - "precision": RESERVED_PRECISON_DICT[obj.prec], + "precision": RESERVED_PRECISION_DICT[obj.prec], "embeddings": obj.filter_layers.serialize(), "env_mat": DPEnvMat(obj.rcut, obj.rcut_smth).serialize(), "exclude_types": obj.exclude_types, diff --git a/deepmd/pt/model/descriptor/se_t_tebd.py b/deepmd/pt/model/descriptor/se_t_tebd.py index 01380a7fdf..0c7b37bb58 100644 --- a/deepmd/pt/model/descriptor/se_t_tebd.py +++ b/deepmd/pt/model/descriptor/se_t_tebd.py @@ -30,7 +30,7 @@ ) from deepmd.pt.utils.env import ( PRECISION_DICT, - RESERVED_PRECISON_DICT, + RESERVED_PRECISION_DICT, ) from deepmd.pt.utils.env_mat_stat import ( EnvMatStatSe, @@ -354,7 +354,7 @@ def serialize(self) -> dict: "use_econf_tebd": self.use_econf_tebd, "type_map": self.type_map, # make deterministic - "precision": RESERVED_PRECISON_DICT[obj.prec], + "precision": RESERVED_PRECISION_DICT[obj.prec], "embeddings": obj.filter_layers.serialize(), "env_mat": DPEnvMat(obj.rcut, obj.rcut_smth).serialize(), "type_embedding": self.type_embedding.embedding.serialize(), diff --git a/deepmd/pt/model/model/make_model.py b/deepmd/pt/model/model/make_model.py index 472eae5329..c32abaa095 100644 --- a/deepmd/pt/model/model/make_model.py +++ b/deepmd/pt/model/model/make_model.py @@ -28,7 +28,7 @@ GLOBAL_PT_ENER_FLOAT_PRECISION, GLOBAL_PT_FLOAT_PRECISION, PRECISION_DICT, - RESERVED_PRECISON_DICT, + RESERVED_PRECISION_DICT, ) from deepmd.pt.utils.nlist import ( extend_input_and_build_neighbor_list, @@ -76,7 +76,7 @@ def __init__( else: self.atomic_model: T_AtomicModel = T_AtomicModel(*args, **kwargs) self.precision_dict = PRECISION_DICT - self.reverse_precision_dict = RESERVED_PRECISON_DICT + self.reverse_precision_dict = RESERVED_PRECISION_DICT self.global_pt_float_precision = GLOBAL_PT_FLOAT_PRECISION self.global_pt_ener_float_precision = GLOBAL_PT_ENER_FLOAT_PRECISION diff --git a/deepmd/pt/utils/env.py b/deepmd/pt/utils/env.py index 6471fd80a8..9803f8d04d 100644 --- a/deepmd/pt/utils/env.py +++ b/deepmd/pt/utils/env.py @@ -54,7 +54,7 @@ PRECISION_DICT["default"] = GLOBAL_PT_FLOAT_PRECISION assert VALID_PRECISION.issubset(PRECISION_DICT.keys()) # cannot automatically generated -RESERVED_PRECISON_DICT = { +RESERVED_PRECISION_DICT = { torch.float16: "float16", torch.float32: "float32", torch.float64: "float64", @@ -63,7 +63,7 @@ torch.bfloat16: "bfloat16", torch.bool: "bool", } -assert set(PRECISION_DICT.values()) == set(RESERVED_PRECISON_DICT.keys()) +assert set(PRECISION_DICT.values()) == set(RESERVED_PRECISION_DICT.keys()) DEFAULT_PRECISION = "float64" # throw warnings if threads not set @@ -87,6 +87,6 @@ "LOCAL_RANK", "NUM_WORKERS", "PRECISION_DICT", - "RESERVED_PRECISON_DICT", + "RESERVED_PRECISION_DICT", "SAMPLER_RECORD", ] From ff9b75ed57317feb2d30a792b34fe8b35298e97f Mon Sep 17 00:00:00 2001 From: James Misaka Date: Thu, 26 Dec 2024 15:44:30 +0800 Subject: [PATCH 39/43] Fix: Modify docs of DPA models (#4510) Modify docs of DPA models, especially for DPA-1 website ## Summary by CodeRabbit - **Documentation** - Updated DPA-2 model documentation for improved clarity and accessibility. - Changed references in the "se_atten" descriptor documentation to link to a formal publication on Nature. - Revised citations in the fine-tuning documentation to point to the DPA-1 paper on Nature, enhancing the credibility of sources. --- doc/model/dpa2.md | 2 +- doc/model/train-se-atten.md | 4 ++-- doc/train/finetuning.md | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/doc/model/dpa2.md b/doc/model/dpa2.md index 300876bf05..a041547a14 100644 --- a/doc/model/dpa2.md +++ b/doc/model/dpa2.md @@ -4,7 +4,7 @@ **Supported backends**: PyTorch {{ pytorch_icon }}, JAX {{ jax_icon }}, DP {{ dpmodel_icon }} ::: -The DPA-2 model implementation. See https://doi.org/10.1038/s41524-024-01493-2 for more details. +The DPA-2 model implementation. See [DPA-2 paper](https://doi.org/10.1038/s41524-024-01493-2) for more details. Training example: `examples/water/dpa2/input_torch_medium.json`, see [README](../../examples/water/dpa2/README.md) for inputs in different levels. diff --git a/doc/model/train-se-atten.md b/doc/model/train-se-atten.md index 92a56395f6..504b214737 100644 --- a/doc/model/train-se-atten.md +++ b/doc/model/train-se-atten.md @@ -8,7 +8,7 @@ Here we propose DPA-1, a Deep Potential model with a novel attention mechanism, which is highly effective for representing the conformation and chemical spaces of atomic systems and learning the PES. -See [this paper](https://arxiv.org/abs/2208.08236) for more information. DPA-1 is implemented as a new descriptor `"se_atten"` for model training, which can be used after simply editing the input.json. +See [this paper](https://www.nature.com/articles/s41524-024-01278-7) for more information. DPA-1 is implemented as a new descriptor `"se_atten"` for model training, which can be used after simply editing the input.json. ## Theory @@ -71,7 +71,7 @@ Then layer normalization is added in a residual way to finally obtain the self-a Next, we will list the detailed settings in input.json and the data format, especially for large systems with dozens of elements. An example of DPA-1 input can be found in `examples/water/se_atten/input.json`. The notation of `se_atten` is short for the smooth edition of Deep Potential with an attention mechanism. -This descriptor was described in detail in [the DPA-1 paper](https://arxiv.org/abs/2208.08236) and the images above. +This descriptor was described in detail in [the DPA-1 paper](https://www.nature.com/articles/s41524-024-01278-7) and the images above. In this example, we will train a DPA-1 model for a water system. A complete training input script of this example can be found in the directory: diff --git a/doc/train/finetuning.md b/doc/train/finetuning.md index 04d86cfc98..beb6012003 100644 --- a/doc/train/finetuning.md +++ b/doc/train/finetuning.md @@ -9,7 +9,7 @@ to vastly reduce the training cost, while it's not trivial in potential models. Compositions and configurations of data samples or even computational parameters in upstream software (such as VASP) may be different between the pre-trained and target datasets, leading to energy shifts or other diversities of training data. -Recently the emerging of methods such as [DPA-1](https://arxiv.org/abs/2208.08236) has brought us to a new stage where we can +Recently the emerging of methods such as [DPA-1](https://www.nature.com/articles/s41524-024-01278-7) has brought us to a new stage where we can perform similar pretraining-finetuning approaches. They can hopefully learn the common knowledge in the pre-trained dataset (especially the `force` information) and thus reduce the computational cost in downstream training tasks. @@ -19,7 +19,7 @@ and thus reduce the computational cost in downstream training tasks. If you have a pre-trained model `pretrained.pb` (here we support models using [`se_atten`](../model/train-se-atten.md) descriptor and [`ener`](../model/train-energy.md) fitting net) on a large dataset (for example, [OC2M](https://github.com/Open-Catalyst-Project/ocp/blob/main/DATASET.md) in -DPA-1 [paper](https://arxiv.org/abs/2208.08236)), a finetuning strategy can be performed by simply running: +DPA-1 [paper](https://www.nature.com/articles/s41524-024-01278-7)), a finetuning strategy can be performed by simply running: ```bash $ dp train input.json --finetune pretrained.pb From cc27a604121eb1169625e3f93de463a963dde11e Mon Sep 17 00:00:00 2001 From: Chun Cai Date: Thu, 26 Dec 2024 19:33:44 +0800 Subject: [PATCH 40/43] Perf: load data systems on rank 0 (#4478) The current implementation loads data on each rank. This will stress the file system. In this PR, only rank 0 will load data systems, and it will be broadcasted to each rank. The data sampler initialized later will still use the exclusive seed of each rank. ## Summary by CodeRabbit - **New Features** - Enhanced handling of distributed data loading for improved synchronization across processes. - Added broadcasting of the constructed dataset to ensure consistency in all processes. - **Bug Fixes** - Implemented safeguards to prevent incomplete data distribution by asserting the integrity of the dataset. --------- Signed-off-by: Chun Cai Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Jinzhe Zeng --- deepmd/pt/utils/dataloader.py | 25 +++++++++++-------------- 1 file changed, 11 insertions(+), 14 deletions(-) diff --git a/deepmd/pt/utils/dataloader.py b/deepmd/pt/utils/dataloader.py index 12681a304d..1c7a1884d3 100644 --- a/deepmd/pt/utils/dataloader.py +++ b/deepmd/pt/utils/dataloader.py @@ -94,26 +94,23 @@ def __init__( with h5py.File(systems) as file: systems = [os.path.join(systems, item) for item in file.keys()] - self.systems: list[DeepmdDataSetForLoader] = [] - if len(systems) >= 100: - log.info(f"Constructing DataLoaders from {len(systems)} systems") - def construct_dataset(system): return DeepmdDataSetForLoader( system=system, type_map=type_map, ) - with Pool( - os.cpu_count() - // ( - int(os.environ["LOCAL_WORLD_SIZE"]) - if dist.is_available() and dist.is_initialized() - else 1 - ) - ) as pool: - self.systems = pool.map(construct_dataset, systems) - + self.systems: list[DeepmdDataSetForLoader] = [] + global_rank = dist.get_rank() if dist.is_initialized() else 0 + if global_rank == 0: + log.info(f"Constructing DataLoaders from {len(systems)} systems") + with Pool(max(1, env.NUM_WORKERS)) as pool: + self.systems = pool.map(construct_dataset, systems) + else: + self.systems = [None] * len(systems) # type: ignore + if dist.is_initialized(): + dist.broadcast_object_list(self.systems) + assert self.systems[-1] is not None self.sampler_list: list[DistributedSampler] = [] self.index = [] self.total_batch = 0 From bf79cc6417430f256cc17c90fb915a876ad73b5b Mon Sep 17 00:00:00 2001 From: HydrogenSulfate <490868991@qq.com> Date: Thu, 26 Dec 2024 23:41:56 +0800 Subject: [PATCH 41/43] pd: fix typo in deepmd-kit-tmp/deepmd/pd/utils/dataloader.py (#4512) Fix a typo introduced in #4479, which will cause an error if torch is not installed. ![image](https://github.com/user-attachments/assets/3f9b0955-ec2b-46ac-9efa-1ed4b4e023eb) cc @njzjz ## Summary by CodeRabbit - **Chores** - Updated the import path for the `mix_entropy` function to reflect a reorganization of utility functions. Co-authored-by: Chun Cai --- deepmd/pd/utils/dataloader.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deepmd/pd/utils/dataloader.py b/deepmd/pd/utils/dataloader.py index 80b3e7cb8b..221a5e776d 100644 --- a/deepmd/pd/utils/dataloader.py +++ b/deepmd/pd/utils/dataloader.py @@ -36,7 +36,7 @@ from deepmd.pd.utils.dataset import ( DeepmdDataSetForLoader, ) -from deepmd.pt.utils.utils import ( +from deepmd.pd.utils.utils import ( mix_entropy, ) from deepmd.utils import random as dp_random From c5ad841658364c20e436525da9e8e59399df0edf Mon Sep 17 00:00:00 2001 From: Anchor Yu <91590308+1azyking@users.noreply.github.com> Date: Sat, 28 Dec 2024 04:39:55 +0800 Subject: [PATCH 42/43] feat(pt): train with energy Hessian (#4169) ## Summary by CodeRabbit - **New Features** - Introduced support for Hessian calculations across various components, enhancing the model's capabilities. - Added a new loss function for Hessian, allowing for more comprehensive training scenarios. - New JSON configuration files for multi-task and single-task learning models. - Enhanced output handling to include Hessian data in model evaluations. - Added new methods and properties to support Hessian in several classes and modules. - **Bug Fixes** - Improved handling of output shapes and results related to Hessian data. - **Documentation** - Updated documentation to include new Hessian properties and training guidelines. - Added sections detailing Hessian configurations and requirements in the training documentation. - **Tests** - Added unit tests for the new Hessian-related functionalities to ensure consistency and correctness. - Enhanced existing test cases to incorporate Hessian data handling and validation. --------- Signed-off-by: Anchor Yu <91590308+1azyking@users.noreply.github.com> Co-authored-by: coderabbitai[bot] <136622811+coderabbitai[bot]@users.noreply.github.com> Co-authored-by: anyangml Co-authored-by: Han Wang <92130845+wanghan-iapcm@users.noreply.github.com> Co-authored-by: Jinzhe Zeng --- deepmd/calculator.py | 2 +- deepmd/dpmodel/infer/deep_eval.py | 3 + deepmd/driver.py | 2 +- deepmd/entrypoints/test.py | 39 +++- deepmd/infer/deep_eval.py | 10 + deepmd/infer/deep_pot.py | 18 +- deepmd/jax/infer/deep_eval.py | 3 + deepmd/pt/infer/deep_eval.py | 12 +- deepmd/pt/infer/inference.py | 3 + deepmd/pt/loss/__init__.py | 2 + deepmd/pt/loss/ener.py | 72 +++++++ deepmd/pt/model/descriptor/env_mat.py | 5 +- deepmd/pt/model/model/__init__.py | 2 + deepmd/pt/model/model/ener_model.py | 14 ++ deepmd/pt/model/model/make_hessian_model.py | 3 +- deepmd/pt/train/training.py | 37 +++- deepmd/utils/argcheck.py | 16 ++ deepmd/utils/data.py | 21 +- doc/data/system.md | 42 ++-- doc/model/index.rst | 1 + doc/model/overall.md | 7 +- doc/model/train-energy-hessian.md | 166 ++++++++++++++++ .../hessian/data/H10C5N2O/set.000/box.npy | Bin 0 -> 776 bytes .../hessian/data/H10C5N2O/set.000/coord.npy | Bin 0 -> 4016 bytes .../hessian/data/H10C5N2O/set.000/energy.npy | Bin 0 -> 200 bytes .../hessian/data/H10C5N2O/set.000/force.npy | Bin 0 -> 4016 bytes .../hessian/data/H10C5N2O/set.000/hessian.npy | Bin 0 -> 210080 bytes examples/hessian/data/H10C5N2O/type.raw | 18 ++ examples/hessian/data/H10C5N2O/type_map.raw | 4 + examples/hessian/data/H8C4N2O/set.000/box.npy | Bin 0 -> 920 bytes .../hessian/data/H8C4N2O/set.000/coord.npy | Bin 0 -> 4088 bytes .../hessian/data/H8C4N2O/set.000/energy.npy | Bin 0 -> 216 bytes .../hessian/data/H8C4N2O/set.000/force.npy | Bin 0 -> 4088 bytes .../hessian/data/H8C4N2O/set.000/hessian.npy | Bin 0 -> 178328 bytes examples/hessian/data/H8C4N2O/type.raw | 15 ++ examples/hessian/data/H8C4N2O/type_map.raw | 4 + examples/hessian/multi_task/input.json | 129 ++++++++++++ examples/hessian/single_task/input.json | 116 +++++++++++ source/tests/common/test_examples.py | 2 + source/tests/infer/test_models.py | 8 +- .../pt/hessian/data/H8C4N2O/set.000/box.npy | Bin 0 -> 920 bytes .../pt/hessian/data/H8C4N2O/set.000/coord.npy | Bin 0 -> 4088 bytes .../hessian/data/H8C4N2O/set.000/energy.npy | Bin 0 -> 216 bytes .../pt/hessian/data/H8C4N2O/set.000/force.npy | Bin 0 -> 4088 bytes .../hessian/data/H8C4N2O/set.000/hessian.npy | Bin 0 -> 178328 bytes source/tests/pt/hessian/data/H8C4N2O/type.raw | 15 ++ .../pt/hessian/data/H8C4N2O/type_map.raw | 4 + .../tests/pt/model/test_dp_hessian_model.py | 183 ++++++++++++++++++ source/tests/pt/test_change_bias.py | 16 +- source/tests/pt/test_loss.py | 134 +++++++++++++ 50 files changed, 1083 insertions(+), 45 deletions(-) create mode 100644 doc/model/train-energy-hessian.md create mode 100644 examples/hessian/data/H10C5N2O/set.000/box.npy create mode 100644 examples/hessian/data/H10C5N2O/set.000/coord.npy create mode 100644 examples/hessian/data/H10C5N2O/set.000/energy.npy create mode 100644 examples/hessian/data/H10C5N2O/set.000/force.npy create mode 100644 examples/hessian/data/H10C5N2O/set.000/hessian.npy create mode 100644 examples/hessian/data/H10C5N2O/type.raw create mode 100644 examples/hessian/data/H10C5N2O/type_map.raw create mode 100644 examples/hessian/data/H8C4N2O/set.000/box.npy create mode 100644 examples/hessian/data/H8C4N2O/set.000/coord.npy create mode 100644 examples/hessian/data/H8C4N2O/set.000/energy.npy create mode 100644 examples/hessian/data/H8C4N2O/set.000/force.npy create mode 100644 examples/hessian/data/H8C4N2O/set.000/hessian.npy create mode 100644 examples/hessian/data/H8C4N2O/type.raw create mode 100644 examples/hessian/data/H8C4N2O/type_map.raw create mode 100644 examples/hessian/multi_task/input.json create mode 100644 examples/hessian/single_task/input.json create mode 100644 source/tests/pt/hessian/data/H8C4N2O/set.000/box.npy create mode 100644 source/tests/pt/hessian/data/H8C4N2O/set.000/coord.npy create mode 100644 source/tests/pt/hessian/data/H8C4N2O/set.000/energy.npy create mode 100644 source/tests/pt/hessian/data/H8C4N2O/set.000/force.npy create mode 100644 source/tests/pt/hessian/data/H8C4N2O/set.000/hessian.npy create mode 100644 source/tests/pt/hessian/data/H8C4N2O/type.raw create mode 100644 source/tests/pt/hessian/data/H8C4N2O/type_map.raw create mode 100644 source/tests/pt/model/test_dp_hessian_model.py diff --git a/deepmd/calculator.py b/deepmd/calculator.py index c5f742bbec..6ac676dcf6 100644 --- a/deepmd/calculator.py +++ b/deepmd/calculator.py @@ -130,7 +130,7 @@ def calculate( cell = None symbols = self.atoms.get_chemical_symbols() atype = [self.type_dict[k] for k in symbols] - e, f, v = self.dp.eval(coords=coord, cells=cell, atom_types=atype) + e, f, v = self.dp.eval(coords=coord, cells=cell, atom_types=atype)[:3] self.results["energy"] = e[0][0] # see https://gitlab.com/ase/ase/-/merge_requests/2485 self.results["free_energy"] = e[0][0] diff --git a/deepmd/dpmodel/infer/deep_eval.py b/deepmd/dpmodel/infer/deep_eval.py index ce176f5f45..91fa0ac2ac 100644 --- a/deepmd/dpmodel/infer/deep_eval.py +++ b/deepmd/dpmodel/infer/deep_eval.py @@ -383,6 +383,9 @@ def _get_output_shape(self, odef, nframes, natoms): # Something wrong here? # return [nframes, *shape, natoms, 1] return [nframes, natoms, *odef.shape, 1] + elif odef.category == OutputVariableCategory.DERV_R_DERV_R: + # hessian + return [nframes, 3 * natoms, 3 * natoms] else: raise RuntimeError("unknown category") diff --git a/deepmd/driver.py b/deepmd/driver.py index 30916259aa..8d17968376 100644 --- a/deepmd/driver.py +++ b/deepmd/driver.py @@ -67,7 +67,7 @@ def label(self, data: dict) -> dict: cell = data["cells"].reshape((nframes, 9)) else: cell = None - e, f, v = self.dp.eval(coord, cell, atype) + e, f, v = self.dp.eval(coords=coord, cells=cell, atom_types=atype)[:3] data = data.copy() data["energies"] = e.reshape((nframes,)) data["forces"] = f.reshape((nframes, natoms, 3)) diff --git a/deepmd/entrypoints/test.py b/deepmd/entrypoints/test.py index 5aeb84468d..90147ac3af 100644 --- a/deepmd/entrypoints/test.py +++ b/deepmd/entrypoints/test.py @@ -303,6 +303,8 @@ def test_ener( if dp.has_spin: data.add("spin", 3, atomic=True, must=True, high_prec=False) data.add("force_mag", 3, atomic=True, must=False, high_prec=False) + if dp.has_hessian: + data.add("hessian", 1, atomic=True, must=True, high_prec=False) test_data = data.get_test() mixed_type = data.mixed_type @@ -352,6 +354,9 @@ def test_ener( energy = energy.reshape([numb_test, 1]) force = force.reshape([numb_test, -1]) virial = virial.reshape([numb_test, 9]) + if dp.has_hessian: + hessian = ret[3] + hessian = hessian.reshape([numb_test, -1]) if has_atom_ener: ae = ret[3] av = ret[4] @@ -415,6 +420,10 @@ def test_ener( rmse_ea = rmse_e / natoms mae_va = mae_v / natoms rmse_va = rmse_v / natoms + if dp.has_hessian: + diff_h = hessian - test_data["hessian"][:numb_test] + mae_h = mae(diff_h) + rmse_h = rmse(diff_h) if has_atom_ener: diff_ae = test_data["atom_ener"][:numb_test].reshape([-1]) - ae.reshape([-1]) mae_ae = mae(diff_ae) @@ -447,6 +456,9 @@ def test_ener( if has_atom_ener: log.info(f"Atomic ener MAE : {mae_ae:e} eV") log.info(f"Atomic ener RMSE : {rmse_ae:e} eV") + if dp.has_hessian: + log.info(f"Hessian MAE : {mae_h:e} eV/A^2") + log.info(f"Hessian RMSE : {rmse_h:e} eV/A^2") if detail_file is not None: detail_path = Path(detail_file) @@ -530,8 +542,24 @@ def test_ener( "pred_vyy pred_vyz pred_vzx pred_vzy pred_vzz", append=append_detail, ) + if dp.has_hessian: + data_h = test_data["hessian"][:numb_test].reshape(-1, 1) + pred_h = hessian.reshape(-1, 1) + h = np.concatenate( + ( + data_h, + pred_h, + ), + axis=1, + ) + save_txt_file( + detail_path.with_suffix(".h.out"), + h, + header=f"{system}: data_h pred_h (3Na*3Na matrix in row-major order)", + append=append_detail, + ) if not out_put_spin: - return { + dict_to_return = { "mae_e": (mae_e, energy.size), "mae_ea": (mae_ea, energy.size), "mae_f": (mae_f, force.size), @@ -544,7 +572,7 @@ def test_ener( "rmse_va": (rmse_va, virial.size), } else: - return { + dict_to_return = { "mae_e": (mae_e, energy.size), "mae_ea": (mae_ea, energy.size), "mae_fr": (mae_fr, force_r.size), @@ -558,6 +586,10 @@ def test_ener( "rmse_v": (rmse_v, virial.size), "rmse_va": (rmse_va, virial.size), } + if dp.has_hessian: + dict_to_return["mae_h"] = (mae_h, hessian.size) + dict_to_return["rmse_h"] = (rmse_h, hessian.size) + return dict_to_return def print_ener_sys_avg(avg: dict[str, float]) -> None: @@ -584,6 +616,9 @@ def print_ener_sys_avg(avg: dict[str, float]) -> None: log.info(f"Virial RMSE : {avg['rmse_v']:e} eV") log.info(f"Virial MAE/Natoms : {avg['mae_va']:e} eV") log.info(f"Virial RMSE/Natoms : {avg['rmse_va']:e} eV") + if "rmse_h" in avg.keys(): + log.info(f"Hessian MAE : {avg['mae_h']:e} eV/A^2") + log.info(f"Hessian RMSE : {avg['rmse_h']:e} eV/A^2") def test_dos( diff --git a/deepmd/infer/deep_eval.py b/deepmd/infer/deep_eval.py index 15e4a56280..f87bfb1177 100644 --- a/deepmd/infer/deep_eval.py +++ b/deepmd/infer/deep_eval.py @@ -75,6 +75,7 @@ class DeepEvalBackend(ABC): # old models in v1 "global_polar": "global_polar", "wfc": "wfc", + "energy_derv_r_derv_r": "hessian", } @abstractmethod @@ -274,6 +275,10 @@ def get_has_spin(self) -> bool: """Check if the model has spin atom types.""" return False + def get_has_hessian(self): + """Check if the model has hessian.""" + return False + def get_var_name(self) -> str: """Get the name of the fitting property.""" raise NotImplementedError @@ -543,6 +548,11 @@ def has_spin(self) -> bool: """Check if the model has spin.""" return self.deep_eval.get_has_spin() + @property + def has_hessian(self) -> bool: + """Check if the model has hessian.""" + return self.deep_eval.get_has_hessian() + def get_ntypes_spin(self) -> int: """Get the number of spin atom types of this model. Only used in old implement.""" return self.deep_eval.get_ntypes_spin() diff --git a/deepmd/infer/deep_pot.py b/deepmd/infer/deep_pot.py index 4755bc276a..6e00a30f91 100644 --- a/deepmd/infer/deep_pot.py +++ b/deepmd/infer/deep_pot.py @@ -64,6 +64,7 @@ def output_def(self) -> ModelOutputDef: r_differentiable=True, c_differentiable=True, atomic=True, + r_hessian=True, ), ] ) @@ -99,7 +100,10 @@ def eval( aparam: Optional[np.ndarray], mixed_type: bool, **kwargs: Any, - ) -> tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray]: + ) -> Union[ + tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray], + tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray], + ]: pass @overload @@ -113,7 +117,10 @@ def eval( aparam: Optional[np.ndarray], mixed_type: bool, **kwargs: Any, - ) -> tuple[np.ndarray, np.ndarray, np.ndarray]: + ) -> Union[ + tuple[np.ndarray, np.ndarray, np.ndarray], + tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray], + ]: pass @overload @@ -179,6 +186,8 @@ def eval( atomic_virial The atomic virial of the system, in shape (nframes, natoms, 9). Only returned when atomic is True. + hessian + The Hessian matrix of the system, in shape (nframes, 3 * natoms, 3 * natoms). Returned when available. """ # This method has been used by: # documentation python.md @@ -239,6 +248,11 @@ def eval( force_mag = results["energy_derv_r_mag"].reshape(nframes, natoms, 3) mask_mag = results["mask_mag"].reshape(nframes, natoms, 1) result = (*list(result), force_mag, mask_mag) + if self.deep_eval.get_has_hessian(): + hessian = results["energy_derv_r_derv_r"].reshape( + nframes, 3 * natoms, 3 * natoms + ) + result = (*list(result), hessian) return result diff --git a/deepmd/jax/infer/deep_eval.py b/deepmd/jax/infer/deep_eval.py index a83964329e..acfd42b66a 100644 --- a/deepmd/jax/infer/deep_eval.py +++ b/deepmd/jax/infer/deep_eval.py @@ -411,6 +411,9 @@ def _get_output_shape(self, odef, nframes, natoms): elif odef.category == OutputVariableCategory.OUT: # atom_energy, atom_tensor return [nframes, natoms, *odef.shape, 1] + elif odef.category == OutputVariableCategory.DERV_R_DERV_R: + # hessian + return [nframes, 3 * natoms, 3 * natoms] else: raise RuntimeError("unknown category") diff --git a/deepmd/pt/infer/deep_eval.py b/deepmd/pt/infer/deep_eval.py index b38dac78da..06126b5dab 100644 --- a/deepmd/pt/infer/deep_eval.py +++ b/deepmd/pt/infer/deep_eval.py @@ -130,7 +130,8 @@ def __init__( ] = state_dict[item].clone() state_dict = state_dict_head model = get_model(self.input_param).to(DEVICE) - model = torch.jit.script(model) + if not self.input_param.get("hessian_mode"): + model = torch.jit.script(model) self.dp = ModelWrapper(model) self.dp.load_state_dict(state_dict) elif str(self.model_path).endswith(".pth"): @@ -160,6 +161,7 @@ def __init__( self._has_spin = getattr(self.dp.model["Default"], "has_spin", False) if callable(self._has_spin): self._has_spin = self._has_spin() + self._has_hessian = self.model_def_script.get("hessian_mode", False) def get_rcut(self) -> float: """Get the cutoff radius of this model.""" @@ -243,6 +245,10 @@ def get_has_spin(self): """Check if the model has spin atom types.""" return self._has_spin + def get_has_hessian(self): + """Check if the model has hessian.""" + return self._has_hessian + def eval( self, coords: np.ndarray, @@ -348,6 +354,7 @@ def _get_request_defs(self, atomic: bool) -> list[OutputVariableDef]: OutputVariableCategory.REDU, OutputVariableCategory.DERV_R, OutputVariableCategory.DERV_C_REDU, + OutputVariableCategory.DERV_R_DERV_R, ) ] @@ -577,6 +584,9 @@ def _get_output_shape(self, odef, nframes, natoms): # Something wrong here? # return [nframes, *shape, natoms, 1] return [nframes, natoms, *odef.shape, 1] + elif odef.category == OutputVariableCategory.DERV_R_DERV_R: + return [nframes, 3 * natoms, 3 * natoms] + # return [nframes, *odef.shape, 3 * natoms, 3 * natoms] else: raise RuntimeError("unknown category") diff --git a/deepmd/pt/infer/inference.py b/deepmd/pt/infer/inference.py index 0e3bc31057..dd0e7eaccb 100644 --- a/deepmd/pt/infer/inference.py +++ b/deepmd/pt/infer/inference.py @@ -55,6 +55,9 @@ def __init__( ] = state_dict[item].clone() state_dict = state_dict_head + model_params.pop( + "hessian_mode", None + ) # wrapper Hessian to Energy model due to JIT limit self.model_params = deepcopy(model_params) self.model = get_model(model_params).to(DEVICE) diff --git a/deepmd/pt/loss/__init__.py b/deepmd/pt/loss/__init__.py index cae561a8a2..1d25c1e52f 100644 --- a/deepmd/pt/loss/__init__.py +++ b/deepmd/pt/loss/__init__.py @@ -6,6 +6,7 @@ DOSLoss, ) from .ener import ( + EnergyHessianStdLoss, EnergyStdLoss, ) from .ener_spin import ( @@ -24,6 +25,7 @@ __all__ = [ "DOSLoss", "DenoiseLoss", + "EnergyHessianStdLoss", "EnergySpinLoss", "EnergyStdLoss", "PropertyLoss", diff --git a/deepmd/pt/loss/ener.py b/deepmd/pt/loss/ener.py index 327d75c2cd..b564aa57ec 100644 --- a/deepmd/pt/loss/ener.py +++ b/deepmd/pt/loss/ener.py @@ -411,3 +411,75 @@ def label_requirement(self) -> list[DataRequirementItem]: ) ) return label_requirement + + +class EnergyHessianStdLoss(EnergyStdLoss): + def __init__( + self, + start_pref_h=0.0, + limit_pref_h=0.0, + **kwargs, + ): + r"""Enable the layer to compute loss on hessian. + + Parameters + ---------- + start_pref_h : float + The prefactor of hessian loss at the start of the training. + limit_pref_h : float + The prefactor of hessian loss at the end of the training. + **kwargs + Other keyword arguments. + """ + super().__init__(**kwargs) + self.has_h = (start_pref_h != 0.0 and limit_pref_h != 0.0) or self.inference + + self.start_pref_h = start_pref_h + self.limit_pref_h = limit_pref_h + + def forward(self, input_dict, model, label, natoms, learning_rate, mae=False): + model_pred, loss, more_loss = super().forward( + input_dict, model, label, natoms, learning_rate, mae=mae + ) + coef = learning_rate / self.starter_learning_rate + pref_h = self.limit_pref_h + (self.start_pref_h - self.limit_pref_h) * coef + + if self.has_h and "hessian" in model_pred and "hessian" in label: + find_hessian = label.get("find_hessian", 0.0) + pref_h = pref_h * find_hessian + diff_h = label["hessian"].reshape( + -1, + ) - model_pred["hessian"].reshape( + -1, + ) + l2_hessian_loss = torch.mean(torch.square(diff_h)) + if not self.inference: + more_loss["l2_hessian_loss"] = self.display_if_exist( + l2_hessian_loss.detach(), find_hessian + ) + loss += pref_h * l2_hessian_loss + rmse_h = l2_hessian_loss.sqrt() + more_loss["rmse_h"] = self.display_if_exist(rmse_h.detach(), find_hessian) + if mae: + mae_h = torch.mean(torch.abs(diff_h)) + more_loss["mae_h"] = self.display_if_exist(mae_h.detach(), find_hessian) + + if not self.inference: + more_loss["rmse"] = torch.sqrt(loss.detach()) + return model_pred, loss, more_loss + + @property + def label_requirement(self) -> list[DataRequirementItem]: + """Add hessian label requirement needed for this loss calculation.""" + label_requirement = super().label_requirement + if self.has_h: + label_requirement.append( + DataRequirementItem( + "hessian", + ndof=1, # 9=3*3 --> 3N*3N=ndof*natoms*natoms + atomic=True, + must=False, + high_prec=False, + ) + ) + return label_requirement diff --git a/deepmd/pt/model/descriptor/env_mat.py b/deepmd/pt/model/descriptor/env_mat.py index e89e7467d3..dc7142249a 100644 --- a/deepmd/pt/model/descriptor/env_mat.py +++ b/deepmd/pt/model/descriptor/env_mat.py @@ -21,10 +21,11 @@ def _make_env_mat( nall = coord.shape[1] mask = nlist >= 0 # nlist = nlist * mask ## this impl will contribute nans in Hessian calculation. - nlist = torch.where(mask, nlist, nall - 1) + nlist = torch.where(mask, nlist, nall) coord_l = coord[:, :natoms].view(bsz, -1, 1, 3) index = nlist.view(bsz, -1).unsqueeze(-1).expand(-1, -1, 3) - coord_r = torch.gather(coord, 1, index) + coord_pad = torch.concat([coord, coord[:, -1:, :] + rcut], dim=1) + coord_r = torch.gather(coord_pad, 1, index) coord_r = coord_r.view(bsz, natoms, nnei, 3) diff = coord_r - coord_l length = torch.linalg.norm(diff, dim=-1, keepdim=True) diff --git a/deepmd/pt/model/model/__init__.py b/deepmd/pt/model/model/__init__.py index 491a524da8..37e664e82a 100644 --- a/deepmd/pt/model/model/__init__.py +++ b/deepmd/pt/model/model/__init__.py @@ -276,6 +276,8 @@ def get_standard_model(model_params): pair_exclude_types=pair_exclude_types, preset_out_bias=preset_out_bias, ) + if model_params.get("hessian_mode"): + model.enable_hessian() model.model_def_script = json.dumps(model_params_old) return model diff --git a/deepmd/pt/model/model/ener_model.py b/deepmd/pt/model/model/ener_model.py index 9487bcc5bb..8064d3eac7 100644 --- a/deepmd/pt/model/model/ener_model.py +++ b/deepmd/pt/model/model/ener_model.py @@ -15,6 +15,9 @@ from .dp_model import ( DPModelCommon, ) +from .make_hessian_model import ( + make_hessian_model, +) from .make_model import ( make_model, ) @@ -33,6 +36,13 @@ def __init__( ) -> None: DPModelCommon.__init__(self) DPEnergyModel_.__init__(self, *args, **kwargs) + self._hessian_enabled = False + + def enable_hessian(self): + self.__class__ = make_hessian_model(type(self)) + self.hess_fitting_def = super(type(self), self).atomic_output_def() + self.requires_hessian("energy") + self._hessian_enabled = True def translated_output_def(self): out_def_data = self.model_output_def().get_data() @@ -50,6 +60,8 @@ def translated_output_def(self): output_def["atom_virial"].squeeze(-3) if "mask" in out_def_data: output_def["mask"] = out_def_data["mask"] + if self._hessian_enabled: + output_def["hessian"] = out_def_data["energy_derv_r_derv_r"] return output_def def forward( @@ -85,6 +97,8 @@ def forward( model_predict["force"] = model_ret["dforce"] if "mask" in model_ret: model_predict["mask"] = model_ret["mask"] + if self._hessian_enabled: + model_predict["hessian"] = model_ret["energy_derv_r_derv_r"].squeeze(-2) else: model_predict = model_ret model_predict["updated_coord"] += coord diff --git a/deepmd/pt/model/model/make_hessian_model.py b/deepmd/pt/model/model/make_hessian_model.py index 4104314225..000b9abea4 100644 --- a/deepmd/pt/model/model/make_hessian_model.py +++ b/deepmd/pt/model/model/make_hessian_model.py @@ -172,11 +172,10 @@ def _cal_hessian_one_component( # fparam: Optional[torch.Tensor] = None, # nfp # aparam: Optional[torch.Tensor] = None, # (nloc x nap) wc = wrapper_class_forward_energy(self, ci, atype, box, fparam, aparam) - hess = torch.autograd.functional.hessian( wc, coord, - create_graph=False, + create_graph=self.training, ) return hess diff --git a/deepmd/pt/train/training.py b/deepmd/pt/train/training.py index eca952d7f8..f3b4548b05 100644 --- a/deepmd/pt/train/training.py +++ b/deepmd/pt/train/training.py @@ -25,6 +25,7 @@ from deepmd.pt.loss import ( DenoiseLoss, DOSLoss, + EnergyHessianStdLoss, EnergySpinLoss, EnergyStdLoss, PropertyLoss, @@ -264,8 +265,22 @@ def get_lr(lr_params): else: self.opt_type, self.opt_param = get_opt_param(training_params) + # loss_param_tmp for Hessian activation + loss_param_tmp = None + if not self.multi_task: + loss_param_tmp = config["loss"] + else: + loss_param_tmp = { + model_key: config["loss_dict"][model_key] + for model_key in self.model_keys + } + # Model - self.model = get_model_for_wrapper(model_params, resuming=resuming) + self.model = get_model_for_wrapper( + model_params, + resuming=resuming, + _loss_params=loss_param_tmp, + ) # Loss if not self.multi_task: @@ -1210,9 +1225,17 @@ def get_additional_data_requirement(_model): return additional_data_requirement +def whether_hessian(loss_params): + loss_type = loss_params.get("type", "ener") + return loss_type == "ener" and loss_params.get("start_pref_h", 0.0) > 0.0 + + def get_loss(loss_params, start_lr, _ntypes, _model): loss_type = loss_params.get("type", "ener") - if loss_type == "ener": + if whether_hessian(loss_params): + loss_params["starter_learning_rate"] = start_lr + return EnergyHessianStdLoss(**loss_params) + elif loss_type == "ener": loss_params["starter_learning_rate"] = start_lr return EnergyStdLoss(**loss_params) elif loss_type == "dos": @@ -1261,8 +1284,14 @@ def get_single_model( return model -def get_model_for_wrapper(_model_params, resuming=False): +def get_model_for_wrapper( + _model_params, + resuming=False, + _loss_params=None, +): if "model_dict" not in _model_params: + if _loss_params is not None and whether_hessian(_loss_params): + _model_params["hessian_mode"] = True _model = get_single_model( _model_params, ) @@ -1271,6 +1300,8 @@ def get_model_for_wrapper(_model_params, resuming=False): model_keys = list(_model_params["model_dict"]) do_case_embd, case_embd_index = get_case_embd_config(_model_params) for _model_key in model_keys: + if _loss_params is not None and whether_hessian(_loss_params[_model_key]): + _model_params["model_dict"][_model_key]["hessian_mode"] = True _model[_model_key] = get_single_model( _model_params["model_dict"][_model_key], ) diff --git a/deepmd/utils/argcheck.py b/deepmd/utils/argcheck.py index 50ef07b2af..66a6308fc5 100644 --- a/deepmd/utils/argcheck.py +++ b/deepmd/utils/argcheck.py @@ -2179,6 +2179,8 @@ def loss_ener(): doc_limit_pref_f = limit_pref("force") doc_start_pref_v = start_pref("virial", abbr="v") doc_limit_pref_v = limit_pref("virial") + doc_start_pref_h = start_pref("hessian", abbr="h") # prefactor of hessian + doc_limit_pref_h = limit_pref("hessian") doc_start_pref_ae = start_pref("atomic energy", label="atom_ener", abbr="ae") doc_limit_pref_ae = limit_pref("atomic energy") doc_start_pref_pf = start_pref( @@ -2233,6 +2235,20 @@ def loss_ener(): default=0.00, doc=doc_limit_pref_v, ), + Argument( + "start_pref_h", + [float, int], + optional=True, + default=0.00, + doc=doc_start_pref_h, + ), + Argument( + "limit_pref_h", + [float, int], + optional=True, + default=0.00, + doc=doc_limit_pref_h, + ), Argument( "start_pref_ae", [float, int], diff --git a/deepmd/utils/data.py b/deepmd/utils/data.py index 3d74c72bda..d572efd321 100644 --- a/deepmd/utils/data.py +++ b/deepmd/utils/data.py @@ -660,9 +660,24 @@ def _load_data( f"({nframes}, {natoms_sel}, {ndof_}) or" f"({nframes}, {natoms}, {ndof_})" ) - data = data.reshape([nframes, natoms, -1]) - data = data[:, idx_map, :] - data = data.reshape([nframes, -1]) + if key == "hessian": + data = data.reshape(nframes, 3 * natoms, 3 * natoms) + # get idx_map for hessian + num_chunks, chunk_size = len(idx_map), 3 + idx_map_hess = np.arange(num_chunks * chunk_size) # pylint: disable=no-explicit-dtype + idx_map_hess = idx_map_hess.reshape(num_chunks, chunk_size) + idx_map_hess = idx_map_hess[idx_map] + idx_map_hess = idx_map_hess.flatten() + data = data[:, idx_map_hess, :] + data = data[:, :, idx_map_hess] + data = data.reshape([nframes, -1]) + ndof = ( + 3 * ndof * 3 * ndof + ) # size of hessian is 3Natoms * 3Natoms + else: + data = data.reshape([nframes, natoms, -1]) + data = data[:, idx_map, :] + data = data.reshape([nframes, -1]) data = np.reshape(data, [nframes, ndof]) except ValueError as err_message: explanation = "This error may occur when your label mismatch it's name, i.e. you might store global tensor in `atomic_tensor.npy` or atomic tensor in `tensor.npy`." diff --git a/doc/data/system.md b/doc/data/system.md index b50c6fa256..f6da7b534b 100644 --- a/doc/data/system.md +++ b/doc/data/system.md @@ -22,29 +22,31 @@ The input frame properties contain the following property, the first axis of whi The labeled frame properties are listed as follows, all of which will be used for training if and only if the loss function contains such property: -| ID | Property | Raw file | Unit | Shape | Description | -| --------------------- | -------------------------------------------------------------------------------- | ------------------------- | ---- | ------------------------------------- | ----------------------------------------- | -| energy | Frame energies | energy.raw | eV | Nframes | -| force | Atomic forces | force.raw | eV/Å | Nframes \* Natoms \* 3 | -| virial | Frame virial | virial.raw | eV | Nframes \* 9 | in the order `XX XY XZ YX YY YZ ZX ZY ZZ` | -| atom_ener | Atomic energies | atom_ener.raw | eV | Nframes \* Natoms | -| atom_pref | Weights of atomic forces | atom_pref.raw | 1 | Nframes \* Natoms | -| dipole | Frame dipole | dipole.raw | Any | Nframes \* 3 | -| atomic_dipole | Atomic dipole | atomic_dipole.raw | Any | Nframes \* Natoms \* 3 | -| polarizability | Frame polarizability | polarizability.raw | Any | Nframes \* 9 | in the order `XX XY XZ YX YY YZ ZX ZY ZZ` | -| atomic_polarizability | Atomic polarizability | atomic_polarizability.raw | Any | Nframes \* Natoms \* 9 | in the order `XX XY XZ YX YY YZ ZX ZY ZZ` | -| drdq | Partial derivative of atomic coordinates with respect to generalized coordinates | drdq.raw | 1 | Nframes \* Natoms \* 3 \* Ngen_coords | +| ID | Property | Raw file | Unit | Shape | Description | +| --------------------- | -------------------------------------------------------------------------------- | ------------------------- | ------ | ------------------------------------- | ----------------------------------------- | +| energy | Frame energies | energy.raw | eV | Nframes | +| force | Atomic forces | force.raw | eV/Å | Nframes \* Natoms \* 3 | +| virial | Frame virial | virial.raw | eV | Nframes \* 9 | in the order `XX XY XZ YX YY YZ ZX ZY ZZ` | +| hessian | Frame energy Hessian matrices | hessian.raw | eV/Å^2 | Nframes \* Natoms \* 3 \* Natoms \* 3 | full Hessian matrices | +| atom_ener | Atomic energies | atom_ener.raw | eV | Nframes \* Natoms | +| atom_pref | Weights of atomic forces | atom_pref.raw | 1 | Nframes \* Natoms | +| dipole | Frame dipole | dipole.raw | Any | Nframes \* 3 | +| atomic_dipole | Atomic dipole | atomic_dipole.raw | Any | Nframes \* Natoms \* 3 | +| polarizability | Frame polarizability | polarizability.raw | Any | Nframes \* 9 | in the order `XX XY XZ YX YY YZ ZX ZY ZZ` | +| atomic_polarizability | Atomic polarizability | atomic_polarizability.raw | Any | Nframes \* Natoms \* 9 | in the order `XX XY XZ YX YY YZ ZX ZY ZZ` | +| drdq | Partial derivative of atomic coordinates with respect to generalized coordinates | drdq.raw | 1 | Nframes \* Natoms \* 3 \* Ngen_coords | In general, we always use the following convention of units: -| Property | Unit | -| -------- | ---- | -| Time | ps | -| Length | Å | -| Energy | eV | -| Force | eV/Å | -| Virial | eV | -| Pressure | Bar | +| Property | Unit | +| -------- | ------ | +| Time | ps | +| Length | Å | +| Energy | eV | +| Force | eV/Å | +| Virial | eV | +| Hessian | eV/Å^2 | +| Pressure | Bar | ## Mixed type diff --git a/doc/model/index.rst b/doc/model/index.rst index 5e7ba32486..33dbf571cf 100644 --- a/doc/model/index.rst +++ b/doc/model/index.rst @@ -14,6 +14,7 @@ Model sel train-energy train-energy-spin + train-energy-hessian train-fitting-tensor train-fitting-dos train-fitting-property diff --git a/doc/model/overall.md b/doc/model/overall.md index 7f67c6545d..cc72aa3887 100644 --- a/doc/model/overall.md +++ b/doc/model/overall.md @@ -57,6 +57,11 @@ DeePMD-kit implements the following descriptors: The fitting of the following physical properties is supported -1. [`ener`](train-energy.md): Fit the energy of the system. The force (derivative with atom positions) and the virial (derivative with the box tensor) can also be trained. +1. [`ener`](train-energy.md): Fit the energy of the system. The force (derivative with atom positions), the virial (derivative with the box tensor) and the hessian (second-order derivative with atom positions) can also be trained. + +:::{warning} +Due to the restrictions of torch jit script, the models trained with hessian are not jitable so that the frozen models cannot output hessians. +::: + 2. [`dipole`](train-fitting-tensor.md): The dipole moment. 3. [`polar`](train-fitting-tensor.md): The polarizability. diff --git a/doc/model/train-energy-hessian.md b/doc/model/train-energy-hessian.md new file mode 100644 index 0000000000..d77e7f3e88 --- /dev/null +++ b/doc/model/train-energy-hessian.md @@ -0,0 +1,166 @@ +# Fit energy Hessian {{ pytorch_icon }} + +:::{note} +**Supported backends**: PyTorch {{ pytorch_icon }} +::: + +To train a model that takes Hessian matrices, i.e., the second order derivatives of energies w.r.t coordinates as input, you only need to prepare full Hessian matrices and modify the `loss` section to define the Hessian-specific settings, keeping other sections the same as the normal energy model's input script. + +## Energy Hessian Loss + +If you want to train with Hessians, you are expected to add the start and limit prefactors of Hessians, i.e., {ref}`start_pref_h ` and {ref}`limit_pref_h ` to the {ref}`loss ` section in the `input.json`: + +```json + "loss": { + "type": "ener", + "start_pref_e": 0.02, + "limit_pref_e": 1, + "start_pref_f": 1000, + "limit_pref_f": 1, + "start_pref_v": 0, + "limit_pref_v": 0, + "start_pref_h": 10, + "limit_pref_h": 1 + }, +``` + +The options {ref}`start_pref_e `, {ref}`limit_pref_e `, {ref}`start_pref_f `, {ref}`limit_pref_f `, {ref}`start_pref_v ` and {ref}`limit_pref_v ` determine the start and limit prefactors of energy, force, and virial, respectively. The calculation and definition of Hessian loss are the same as for the other terms. + +If one does not want to train with virial, then he/she may set the virial prefactors {ref}`start_pref_v ` and {ref}`limit_pref_v ` to 0. + +## Hessian Format in PyTorch + +In the PyTorch backend, Hessian matrices are listed in `hessian.npy` files, and the data format may contain the following files: + +``` +type.raw +set.*/box.npy +set.*/coord.npy +set.*/energy.npy +set.*/force.npy +set.*/hessian.npy +``` + +This system contains `Nframes` frames with the same atom number `Natoms`, the total number of elements contained in all frames is `Ntypes`. Most files are the same as those in [standard formats](../data/system.md), here we only list the distinct ones: + +| ID | Property | Raw file | Unit | Shape | Description | +| ------- | ---------------- | ----------- | ------ | --------------------------------------- | ------------------------------------------------------- | +| hessian | Hessian matrices | hessian.npy | eV/Å^2 | Nframes \* (Natoms \* 3 \* Natoms \* 3) | Second-order derivatives of energies w.r.t coordinates. | + +Note that the `hessian.npy` should contain the **full** Hessian matrices with shape of `(3Natoms * 3Natoms)` for each frame, rather than the upper or lower triangular matrices with shape of `(3Natoms * (3Natoms + 1) / 2)` for each frame. + +## Train the Model + +There are two approaches to training a Hessian model. The first method involves training the model from scratch using the same command as in the `ener` mode within the PyTorch backend: + +::::{tab-set} + +:::{tab-item} PyTorch {{ pytorch_icon }} + +```bash +dp --pt train input.json +``` + +::: + +:::: + +The second approach is to train a Hessian model from a pretrained energy model, following the same command as the `finetune` strategy within the PyTorch backend: + +::::{tab-set} + +:::{tab-item} PyTorch {{ pytorch_icon }} + +```bash +dp --pt train input.json --finetune pretrained_energy.pt +``` + +::: + +:::: + +The detailed loss can be found in `lcurve.out`: + +``` +# step rmse_val rmse_trn rmse_e_val rmse_e_trn rmse_f_val rmse_f_trn rmse_h_val rmse_h_trn lr + 0 1.05e+02 2.28e+01 2.11e-01 1.59e+00 3.25e+00 3.37e-01 6.00e+00 6.37e+00 1.0e-03 + 200 1.86e+01 3.23e+01 9.24e-03 1.54e-01 2.51e-01 4.70e-01 5.31e+00 9.05e+00 1.0e-03 + 400 2.69e+01 2.98e+01 1.03e-01 1.07e-01 5.67e-01 4.17e-01 6.35e+00 8.47e+00 1.0e-03 + 600 2.00e+01 1.90e+01 7.23e-02 6.90e-03 3.35e-01 2.58e-01 5.37e+00 5.41e+00 1.0e-03 + 800 1.68e+01 1.48e+01 4.06e-02 2.27e-01 2.35e-01 1.98e-01 4.76e+00 4.24e+00 1.0e-03 + 1000 1.70e+01 1.81e+01 3.90e-01 1.66e-01 2.02e-01 1.99e-01 4.98e+00 5.37e+00 1.0e-03 +``` + +## Test the Model + +:::{warning} +A model trained with Hessian cannot be frozen. If freezing is enforced, the model will be treated as a standard energy model, and the frozen one will no longer be able to output Hessian predictions. +::: + +If one do freeze and test a Hessian model using the commands: + +::::{tab-set} + +:::{tab-item} PyTorch {{ pytorch_icon }} + +```bash + +dp --pt freeze -o frozen_model.pth + +dp --pt test -m frozen_model.pth -s test_system -d ${output_prefix} -a -n 1 +``` + +::: + +:::: + +If `dp --pt test -d ${output_prefix} -a` is specified, the output files will be the same as those in the `ener` mode, i.e., + +``` +${output_prefix}.e.out ${output_prefix}.e_peratom.out ${output_prefix}.f.out +${output_prefix}.v.out ${output_prefix}.v_peratom.out +``` + +If one intends to use the trained model for Hessian predictions, then he/she is supposed to test the model directly without performing a freezing operation: + +::::{tab-set} + +:::{tab-item} PyTorch {{ pytorch_icon }} + +```bash + +dp --pt test -m model.pt -s test_system -d ${output_prefix} -a -n 1 +``` + +::: + +:::: + +If `dp --pt test -d ${output_prefix} -a` is specified, the predicted Hessian for each frame are output in an additional file in the working directory: + +``` +${output_prefix}.h.out +``` + +For `*.h.out.*`, it contains matrix with shape of `(2, n_hess)`: + +``` +# frame - 0: data_h pred_h (3Na*3Na matrix in row-major order) +5.897392891323943331e+01 2.909700516268236825e+01 +-7.682282297964052376e+00 2.535680817045881774e+00 +-1.266442953072092514e+01 -2.127310638041492652e+01 +5.442541716174009031e-02 7.202825779190234756e-02 +5.198263170894957939e-05 -8.110080221576332349e-02 +7.443552765043950914e-02 -2.248597801730128215e-02 +1.029910175689553675e+00 1.938646932394622047e-03 +1.213862217511276764e+00 5.344132558814301825e-02 +-1.221943904909605250e+00 1.602557574981743893e-01 +``` + +The full Hessian matrices are stored in a flattened form in the row-major order. Here, `n_hess` is the total number of Hessian matrix elements across all frames, calculated as: + +```math +n_\text{hess} = \sum_{i} 3N_{\text{atom}, i}*3N_{\text{atom}, i} +``` + +where $N_{\text{atom}, i}$ represents the number of atoms in the $i^{\text{th}}$ frame. diff --git a/examples/hessian/data/H10C5N2O/set.000/box.npy b/examples/hessian/data/H10C5N2O/set.000/box.npy new file mode 100644 index 0000000000000000000000000000000000000000..663e5b7b764049bf55ca04b33d2bc6d5b34a1613 GIT binary patch literal 776 zcmbR27wQ`j$;eQ~P_3SlTAW;@Zl$1ZlV+i=qoAIaUsO_*m=~X4l#&V(cT3DEP6dh= zXCxM+0{I%2ItrGWItsN4WCJb+Fo<-3(6~rq<%v~C$evMiu!j&VTwpYIRSXQo$`h-O JkUf-}0{}W+E1&=X literal 0 HcmV?d00001 diff --git a/examples/hessian/data/H10C5N2O/set.000/coord.npy b/examples/hessian/data/H10C5N2O/set.000/coord.npy new file mode 100644 index 0000000000000000000000000000000000000000..9fa44dbd217462e54622bc8ae48bcdd16872d9f4 GIT binary patch literal 4016 zcmbVO`9IW)`<;bB*`?BENv5Q_DJ_NRcE(?$zRQOO~W4m273{ zd1v2uvSt~BvCfPbWcc|03*YCrbACFn^LoyCohOIQO^^IZrFc+oGSAz)o^xU9sxb90 z>|koCFfTZ{xVhL~wRUnjZ~uS1iLIln{XFk_(e|4CJYKs~OGRhvIxUr3D*xYGFx>sI zO6vh174=_lzP{l*Y*6*5#%-Gdo58JJsht8SKXA#$c?XT`9TsJ_nD-(_^F^tD$0Sax z$G=#tFMtuRBUj|_vT*+j9qA39JD~rK!kwY<38d5v#Jsqefd>z2Hz&(A0ntMdnyxL! zyjfd;-lA3G`41C3n+P)-u`)F78Vi-4$Qm#7`VXx z-HOv3%o0^|xfnVJ`OsO>xH;k zDQ$vb&BEkYLrhi+-^S1^ukb@$Z?(^1@AvdP@Eno3+rJm_sy&cpb)pDHRH zG=h%C!#u-cNWL#$))Jo!Jm;Lv^kA@VGq|i04xww1f@7xrTVbD8em~(_|hnt2%PlNDJ_)KB$CfAEbMp&NEZ~4N8;|0W1 zfpZPAmyaDRE)pRBsZ*Bt@piO(uRmle83SX!gWdT*x&nclDNX0K`mMnDkdN*SH3tTzc?Vp9S`8rF82ks_o5 zH=%+a&Iij!X^%eN?gq0u*Q^LR3DVo$(?#346+EXq_pQn1pwAFT>~Y2{<~Q6>@P0CZ zp?0SIG3m8vyH;EGaT$YX%nWdv5K9MoyJy=U4+rp&UQ~6pvIt@4uJ7ycyAMCPo@*|T zVT1cZIffs}AS?>LMDo{iA$HK(ug){^MLD%qk!tjY(gZEhSJVLLS1x>QBSaN&K*m6d-h{lv_d zMdS*DdUVWL!EL)ajqR_3r5oou!0JVR_v`1~xTex^<6EZ&5aziS>GS(h_Q#p_-9v+r zwJDJm?JvaqA+Cz@uWF#uUk*--4rAH1h%Fb_(#hx5ox@M1c~G}KDJrChLG~r2&*O1q z7%aQhP|CtHUet8iI0jj~--g19;GwbY6C%!<4>~!fu|$U$Sz82rF?s`B^te^#vo8aR z^{sN8W_S?5o9ikcr4lpY+J|dG`LK(1wl?@4l~9pW4S7Teq57`;K++un`W{o3YjNp^ z`U=VJ@=aodAT~;s{YHSCYWGjc2M2(5n&Ha}769vNq;0F62wA!^B>3xJ9Z++Vp}IYf z4Wp$VbPet_Yz=AYIFL>uma>vhkALk4V9GBHCx^gBlg=MJB}aa^tE$30^cFi7_4`e& zF9bWU1;gjql7!Kmd*P+PcDyWqVK_2$3}sO}`pDl@LUzxFq`UMXe3tu{ZBoksoHa8U z=Kd>2@{N~G>O8K;;LPWdHgk0#S=3XgwVY0zV;E=d-IjrF7jc`jODi&`sMeMPv&cFd zk;ojYgH>N2>s@)&3OiSimhN9Wh;*-(CC!z+pmyVVN>Xn>1Uie4iO{AY!9zv)TX{dk zWvGsJGUt2Ctnu=e!zswI?lDZ_j6jO+o9ZyvA~-C*#auyMo}_L1-Hjv*RGIJhO$4$r zZZag##!7*7zFxWWGk+BEq|8uT>Lti{ymGtJxIFpAFlT1+U<*X!xa!ma9$eU~Shq1i znuzmJPmc<&g$FBAj~q^&Lr49xuYpbri7BnE678=iFv24t;hXDk+*HV<6td(9<8?B2 zul>5QGJ3A!^^R^duyD)c)-VVScg^HL+jd+&bLVnS)D)1(36e!A^MCK9V6JPy!D0^Y zhMM>os1u?qBvR?b+YQ0m7Rj?{BPYGUShp7<=C+*9k`f`uew%Q!IHhpcVgH9cI(-neF|qMM5RF(~;+bVFS__Faeyg=KYj6u?$0)s; zi}95uu^&|$&|~t3ut|LkX&4wPrd9_iW_Kk>yAz}FR-xpHE}Z>CA#KKY7GqXtbg7&6 zfM4lf$rFNJFxz>+Fswj~cye#f`Hfo#2Kj%WyO%b?S=+ibkB*8GS*A;kzdYcBc6}q8 zW7&>tR#h0++!df(>jp=Sj!9sBEXwoTQH$>Kc?CuDdARa2VNc=Jz!er`e~vA(Yd zt{1?E7B(RlR0@KR=$3q`5hfF-SNon3A@W|F=QV_+JK zk6XWGYNn!lmg!Hb6&HM^9ikmeo6*-QgF3Duz{Lrc_na7ithlb%0?(%~J4dzFh& zsIfj79pwrAtBx-3RC-{z%DgeIwHqG9u~d?hsbqO(40WQW1e9Rkb>mNbxS-UD)_5=i08cNn|Yfr2OR}7esDfqyH;5i^e97?}aOATp#_Clx#`%L(-I0v{Nom)N{+#5frh2|HUaE} z?%e$?5~RnSFGB~)@Dsg4R-VUOmRXj;O&9yKfYrk^J&NB$WKsbS)m_m$N}}p3;Q+`B<|Od_dI2giaR8( ziVf5=h*J^4c0LhYw7AW>*fUm)ckM^FtY1Eb3uQtL*+3yS8YM}YHW$EqBjJozSUR+% zYNRclp%JGaeA}p06pc)WTM)E{Ley;~UD`MT5Jq%`4rFyg`$*Mrnhk}hEcJW-{wNQ; zkFmD2`HrEUS7q~&Hv=zSthKml6jEkjR$6nj2uvLB!MeNEjDy}ta z-gRD*WXk+mnR9;_?{ba@`tE5&YSoA%C7niE9NJ1Oo$SMpJtF^1D)K-%dwYtNWE&Kv zQaD+yPm zxdHXP&LOoR*D*c%eQ*}cJg&(*6m;UzyxCK3gC!`RyQInEG#frvX};-e4@a-p=?U`o zWq1WojE)Uy0Dy$6e4M1e@ge&%; z-bRNiFuTmpY}Mj%eCQ)}sxO=e6!U_Pbo51&=-H7s|WU<=OZH}YcZ*bcH@QG1a^0+D*p(`gxw`U29_=_FwgVeygxjc z(>*G3Qo0Ve_e<(hO$Weq^gvIv{~X-QaY$&U&*zYA(Oo3kg_*yOH0^yuC-+V3ItY4( z@#PDH;U@7(_?kEnke@RNif%Hj;{=8DvUAL{R^Wj~vZ&1C`5oyW^~+l{flA7!j50ml zdvR`aLp5K26#L9u%-7e^$hs|-d(9O{ScGmw#=qSluPM8i{)I}I{4S?e<>Z0$k}1Qa zbu@B(wM5uZ6O~YZWYH9nJBH6sMXPiEF~tLF T#N-39r?Z&i5a%mB)(`&!R4W)| literal 0 HcmV?d00001 diff --git a/examples/hessian/data/H10C5N2O/set.000/energy.npy b/examples/hessian/data/H10C5N2O/set.000/energy.npy new file mode 100644 index 0000000000000000000000000000000000000000..94e73c8303210509ce3a9c15dc42b0e05fdb0278 GIT binary patch literal 200 zcmbR27wQ`j$;eQ~P_3SlTAW;@Zl$1ZlV+i=qoAIaUsO_*m=~X4l#&V(cT3DEP6dh= zXCxM+0{I%2I+{8PwF(pft`i3x+pV0B9N3(DZ;ratkptpuDyrjveBL+NQRP7T$zKPi l3dbV{)Q)%Y?g7%S=Epc%f#M%FoW64hsJ^;(OX77P9{_X2LZbix literal 0 HcmV?d00001 diff --git a/examples/hessian/data/H10C5N2O/set.000/force.npy b/examples/hessian/data/H10C5N2O/set.000/force.npy new file mode 100644 index 0000000000000000000000000000000000000000..b7b378fac63ada7e2ffb4de959b2dcb0916d20f4 GIT binary patch literal 4016 zcmbW3_dgYm|HX+yX^ChV$p}T+Qm?EQktkUqvt*AD@64M`T)WI{vb$X4b+7CQw@AoV zLS&_+`t=oDl6f&KK=CkITGlH$SOi{4d8-Oss+!>*oG^Z6UgbgL(py~L$c}yO3>HE^XJ>`dr zLFSF`#4^xfeh`%c3}`*$&@ZfrgGO&|@bs-l@v z+EdXbA2K2mD}jgo!hO-CScv(hX!Hb0NWX3W>ZEZkm}|M7Jg5}{9tORh_xi4({(0VJ zeuXUP+2Kzw8Xt!yZ~vZKkjjJ|H1mGu=1hFx))tkPlZlUMlqlzu-SC&cQLE1B6%5(Z z-}iR!0)r@9QK`iiSYsehb^d7m*MFqc^LiZGpH*;)F0BW5*VdG;OeMHyq&$Vsvl+v) z_X-;tQ}Iiwv+=ri0ebe*|7_E011wwu$zGTQAN_nc}e(p^A6L*txW7wToUzonu*sw!Q0PD8JH>%ou(C+ z3lI8c&fVJ^hCS<|(rz*=(F^mcbL*6W{fi)r-%LPMnE;f4%YSC+G( z&j^HaNn`JF;c!T*L3i-rMN96fl%g;qxJ-zBFVnn^R%}eRKfT;g`$%~jsUZhW)xF5n z2~LDa=Angzo)|cMV)~$gNFiL{<~jYXAQ9s$ddf$)#zDSg9^s>63!rLe^wPH$@D6$Q z!@2GqsQg-gvOGEfA88&l9$FO`e3i$lez6JWiym**O*a7Bd3ENcY8udDoqbnjma)LX zbKK2-6&*@+IfOG-1BFkkWwJIEfI0{Laj<|_$Kg4bXz#$S`$$EibsSlus8xeus#bb)X}lhz573Y*DaYLd}zIJC-#oq}Tbyu|)k zB!SJ!LYwuQdeGa&Mj}721>Zkm$skjM5#go2M|i@phlR{7Yehw^YG-eDw+xJwWhXH4 zB?1$Plks$W68NkTLZs{)kX@1%H!0DE#(ng$@@Gnsl|{D5F{cCyO}}cUYP7=5JHC>9 zD^+md;Lq#I|3%{LeVct}yb^IaGP0yF8Afm-_~-XM+ZINKU%85u zwyt-0H?Q_#$Z`iTS6=n;WbTJuj_QnpB^2x=_7SF69^)lVjzoe*DMDE@JD9bC?8=IK ziE}rk3{V}8XSd*NvY?<&ZwHvz4_16EAR#AT%Y0}^FNhq^AKc6P2{zgTD1@i)Q1Kc6 z4|3!f2DGfHO-@WAz4)Tl#jAt(Wg}g!Q9lOFh*m!psO9)+Z(iQtjxxBFx|MC}W+P^I zo?81VU5b(h;%aVI{V?~YjJ4711!M@*Rv2K4qb|FNw?YBTlHo4BW-AD-3c`o<4z`0x`8<8$N+AksJ`E3wKs$j!Sm5GQci%hM=Sh1Pe|Nk+wukscrZhAdE0K*M5POEf?leYuoS8#R5G`!X@!; zGL%ZKj$RzE2AeyETlkrCpktN8fBG^RPC2!QYX5qJ864ch8?J3wea!Z%pJ~?-wW_|l8aZ|ED;W#{ct*-w;niu zk#oL+3%0xF^BL39(cxqH9-k{_P_t9nTu>t!$92Y4Pn#yfeE7{E^-VLF=`E^_{+o;H zk4NS6!o5LYa-+p;?=w)7`uaV|Cl~Ga4A%>63Bpyv>4ANRqH)L0eN$|h3EFA=W85dw zQA@q4ooFOX+?em6lN$`@_5~TF;6|Q`wC`t53Pz48{r4ztl8N6 zemq`zZidH%inc48*RCoee$sV#b)aDoY~AP1v*ZQCb5?WZAm<+{pl~_ipEO z^MfFY#Ut)U8WkR7ew(#E@)VX&eog;}AE3h>(Yx?D0X5HfNC=L_;8^thbF)YOSf3fB zzbhdcEi}Ykdd2*3f5U@scM}q@i}AOrcAY;CE=;*Pb7!OBfi&Gf%?OmK=(Z2h3q`e0 zJ9BP-&%@)b&J`tEQLuCcT50?w2vZIxcYF5W)IN#n4yhL8T(b)|Irv`TC?ATb$-?F8f#G_JT9ixPLrD&mdLrG?0#A)d#O-sQRI@{^~;45&=2ymd+c$ z$O6W)ifA6wNF;1uwrd+sglsAk^LK+t*gn$r`iDd;-ZzgT-6Mqpg8+ZD)*d5#r^x?} zEE5cbE8KfKLK?C8El8e8vVG3d4u`X>H70)h zVvL<~H!af%2MnrjI66?lm|VQJhgydov_N0KOMdXRMqqct$QxW^Uh^rVFGb>MQxR)& z9s1|TpJ|+pfinZoEPbpqv6axvSk^!SfiO9<1IjUQqs}2+jw1`2ws>AJzfuD9=Ov>D zo|Z!RW$LGT_Ixnb{+yALS`E&9j(RrIeh}pNnc-@F3&v{&hnuUGptw|VHP=uM$nTpC zOWaDv+0*1360;RR37@7*KK%l2pWL3l9u$Zd%>1YBzj%SHfez~xw)G%NEW50AB^mV- zTy7R6_~WSHsM=x24pfO@H0_^gfFi4JO?lQ-Sez}R$USHVttpN+zP2n}J)ok@5zvFn z{#Bw>{c$)yr71Og^gZg*m^|F{Zo$f;?|!i_RJ_Mv6GJbNj>JGkpQH6nAYQzv`Qrcy zrM0d1RqC4}F}SqgkU%foxh>=}>^FuF6sv@f%=N*5(m-1ia~rs)*KDs9se?&M?%xwn z9>YSN!@iZIb|i<#yscqs2fL}4$0fg{z&`qSruRi$uu}!K#EstL*1eN^{6Yg!)BB6g z&aH2-nC;yAM{1)$`mD72St0|jrz`MebJ+r4m_TmW{kPzMGEjA2o7aFZn{T?}oAXh)_7kytRN~;eYipWfdD36=s54l=F!?ju0X|W3?T2ACLeT)Uy zj6We8oz1B2-NbxU;yExeNZmQTD;ib8c5M6{iA3iSI=w{w55cD_(ggKMKo{0j3n2F>v}*xk$rB_)Xx= z*8rMhlqMH(n1MMEVeGeX%~vWZf5O15my+a9HatVmXb z%#27L&+GY~pFiO78!x|Iuj{&x^Elqe8F1{B<_Ud923rP)t(PuaU9{Y~kALfalRaCv z^KUgVx3so2zIoo<^3vu1`*#&%Gpoysf4917eCzV!?_ztm^Y7RzyKCEaekcC_zn^6c z_d3#FJ8DvOfn)OVxqS5JE|*O*I@{<~RX&rk?Fpn~9Y@4GwVbpb9UPKdnNB|3j$7|` zXOtBF`Cc`9eS%B~XmN0PPr%o`CWVpDpTfwyVWE?*StJZ<1>bw;6T{%2`i@&CAcsFO z|DDG?ob_V*JH2*xcOxRt=<-~c}(;pF^X+m{exeaV%~LjE{DGXex^DWxCk~9 z?N!I>^5<&FSguiQk}7aTakEm#hnCoMt3p6nNH5l>BBcmEB?LG(IFirID@WE|A@Uh6C*q5;M#Z>w#>6T={G^lsy$ za}&VNuDOTIO#!o`=Y7ugz2y4YrQaI1y&#u;jc@mH)Ie_Li^m2{&%nF+&Y#@+VUlB! z`)1AW3G#-Uw_^3!BnWSIdmbFs1al9>-&es}T5z|lX=b@FrLk6M=ixn7gjcE3Lbs+4 z{?re#*lkDx-p4L#YnO}AGU?N|Sj>0O(VWNh_AB*+%TF1DlAa(E%H!XpQU4GIO)t!a zRs@qOyY0{Sk0%m_xy&?c#w7CKNvwZSK^&a#e$v2|w1s-Y=OCC+wVBqgYWd8u`!5`i zc#)y`P?*X$zR;0nu!nZ(Y2|++CrFj4h2Dq}yiH_YuZ&=SZwGcY3J2H~EXbjV3unG; zH781&-wo{Gy$=J2H%9HhX9;F5rrs+yo5AEc<+i8aufmZzt?BAE6X@7BA`rSXn4}Hn z)l}Gh=StUx_Ftk!7DFZTJxEuA$x`D{Ph)b)s+Q5n-7ljeFE^=O* zb%H@~lq}(%xlnp$5@Ke2_e8IJ2ZzVA$KJX(k*emIv8aYxu#4q7;B7lj7ACySW}X!h zaeXoK|+pv+k-F;xK|M?A6iFRMkY_2>l_j>nBmxxG0Y3Lq_4$KDa8U6+esMRaW%5BA&Qu#O}|N6`jiL>B&WJDZKhJiwDV3~VxkY}FlY9~Y@(Bunl|eO zbJFJ3alRq!f>hF|*~+{GewwpHfxrHiDIo*5pNk%}AcuW|lCB#mk*~*!=PkISLAr12 z%)8@(M0FRWti1Aw+`9I3(rerVdYz9Jy4qQgS6R_qFQjcqz&=x^{v-KBr8@W3`y7C( zg9=npSpl&+V^9)jtq7e?qBr_Ed!X&znn`uCkyNMiR56Sf_OH!4feJwIFtBB!n2z8Idj11^@WU2nH~LXnx}lTAB3$Op;MKZ=jNz?@DQI3jcpJ~f?K z{p+nM(Q3Hs8d%ItJFtAwwsq(Mmcjkk7mT>*&)puz+v4`n;veQT&V3P})!ElgDmF9I zOvjHruTfk>kN4yo_byCcaSO;6jYx~1y+r~nvfOun)&KANgzp~Sr~l4L z|94e>q6W2j@EUi}dJcMp3kOZZ<*#OgoU2@{pUg&Y;NgO&Kf8o$_uPUykTkN z*9vcSZpISA;LIO5niNMQ*n}t4YW?8F>P*A=eNQ3Nn(>rWbSUXnF?9bpO_KwM;-BUn z3x$V0wF4T*Ly2+8AI;Soj-)WXpdx+7lk{@D&<>D3PyW>Eoi{(62J5<~Ep3|mASuH# zaXxt+ov(k<=$7|VI)AL@{WDoXdd90Fy1ie3u5A;mW)B~N{Wru8JsVvB??1cF7aQ)O zBmG8L1s-gr?p|LJBe&9xOyAy~Xrtx>-My0>&&~#uqng>iJqDKuTlQs5Bhyom`NMXF z#We+B8~!uQm1ay{xkx^%(!T`~wPuNC*UkXy2>d?byNCA)&*@rLX1()0bt+D}XZPT~ z6tb>nVlB_bJaBDWT`IYd5C0}i)JMD4P;y_&_kEFAM_0&pC38y)(faJBQoh0;A$7tG$1TXJtTgf8?GS)V;-yQ@2)>$8gO*u^$d zucq`j=uj8zIl%aJz`Y#iZolqVJy=cdZ5MLRH!LQ{<$i_=eRu{x1N>gt?uv%bong=3 z1;&%hD$7(Cx&lD`$U^q*oI~*8tq{Z2!+AvT+~&>8p4fu?PMHq(b|1JN6DYST#F>nr zW;)=w?l{cvZZAvU>j#a|gTL1XJtgX=qe4`9xoGDgr9j=+OKHYTo!v!63bg8(SGT2% zndt-bcQyR2M`4_ygXy=}T6*o!KaBA1XLnoL8&W5vc;nXGBexl@hJ|kT1U)~7oInnHBDv9S-rmj^ z+{mcPQ)}1%^d9O6{668khxZB3>7&cjAFPq;l=$R3rq60D)Rw;ZA-*4+^ry0QfzKUk zVPlBIuGiX?WcFe4M!qk}i2eVZ7)*F*81 z5&s%$!PBGPPlBD6Wy$*LU~oxj{_F zZf7uiO#Yr4Ug~h&fiyL=O-U}>LGN+zV;PU-r|#eLRFi)vLfzzZ>#E-@M4P9^aX9vE zq$}Qj{CYiq3%%N6K=#rrFf@;I$q;%?=DDSn@xpOa8=#+}lk_W2lbp_T^2^(e&Z#rd>RU1dr*%sb`MZLAC z$HoZq_uuy&#|{3#pvc3y@T_?vop|0&v+^%+hO_*7u!e?fRYNzk9=swizw^A;J)Z`j zGlvsD3;Mze|4!3#`BxA`-}6moizn`BUN=khBFR{!VIHel4B3{Y&3!Al0_@0qv;V^^ zqAjk(y}Lf0q<>Rb6KzpN=6|V=^!SI9ou-uE?xrv>n~Xom8W#ZPH=2frYdFK#9@Fmw zF^@^^%aYZlTWkrF*WZ;f9zBLNvo+ zexCIzn`ozZtbOyBH`3XwyTye(CFp?ze^R8aMCj#}=KO=Zg=sF8dj6{)WNC#JV$Y8W z3s5*ep)N|D`rcT0VIy?CI+M<3BZcb<2J4J$`c@jr1vC%d1Enf%9R#Kl`+V0v7iqNkVthF;9 z=90TZMjKx*dke1;|1GJ?1Yl~hJsI|I3hLUK1Ey|{k$jbLBBZ104^NCAnpT+(%d9W?%o(ew#ayj!D;M36()a?dozqkyx;KyZzE z7)YGaf0ypB14<>v{yDBLq=!91N1exs%u7fcG+4NkGSB{BFVp;CH16Bw!&e-^oOj_O zhnNU8FmLiFVb4aIG3DJ(!X!>B-7h+$ptFT;Z@4@bIa0cVcX6wU9*uA z@eX6jUOcCVdJX4lhlOaIb8&t`U51oUh2HRb3EwW$jV(Wh~Ue+yu^a z=|M12B&M+PPwhHrw^LDrYQ-yG)NPgHs7?Gxa$@ z^A#`)%jF-~_mX;_(zV4_tn`{)Z^YGW7^rL#Qhi@{mLyJnoBM3l3Cp6pwe3ZJlFUCJ z3I=kH9$>=O@(VsP|Au;P(mNJ-knNPMz;Hje;Yz zDF#oLpe#87+U}Na=lQHPl-cIH>-MW^(AWPu6#FdMMy>LYEETYxA_kJL?~1mqpm+Em zY+7qs4rlI^*3=~o!}Onox%+0E^Z_eRHc7=bRL0r!{QI*k!Rlgp!8+3_XgB2PvSX`( zR9WV6_wg#QW%wa1_OXfxb7all{8dKyHaU*}y<0;51{XIJZfk}w0ekhMv={F;%hIYE zqZj1bgKo2Qdd)xwS+=ojyS(EdnIC%N5MP=>*6bWq*X3PG)$!!$2IVu- zx$KEy%+~YJ_Hd2*+O7gJ_BvSCLo)$-D$|4{ic$z?yK3Z57aszPQxX4&4&i5z8BtMB zAlkdHCzW*sf%vMztqz8%BzG4jZn*B!f4&j=5jf}K{Dit3^&aX7{668khxZB3=_-5U zp}^PjRAjO0tz$JK#6$Lgt;ZfgN>Jur@up3pbdrd#!jN|h%--8Dms21{w?>y{u2G z>L;sysyyQ5e@}i~+_dS4=m)S`J==XnHV7WDU*J5VpGEo(%a@7pWe{hb7QVL?DP+je zVcfYgA3AsHDUEc-62XrxHyrF!$q%R2Yj$;2z!)=Ul2!ATxE)!2ZnU16V$m00_QZja zX6)aq<<&V3`a+L4optz3k{(rk!Tmp%f+{hIlGt3ydk zTI8gp`b~1}dF3IaYYFgu_*I2SV#t5~9{NV;N8p@`^AqZF)O)BS@cV@C9^NNBr-P42 ze|C5%Q@e@D<(%yzw3p*|F^})E^r@61mkli>>0ePt@;jb$QTtVW5@g?VQuB|+JQTYm zX@0t~we1QkJv-GhI`vABx|vsA$|g8NJl;)&JBCle#QdCZLfH_QG3z|L)>}kG+*Iqr z{uaQYeT{+IbIstIKOc8%{WQFbisECtGX+uW?8EYvSgC?=mtb#)bQ1ZkE>9=?4S{)6 zj!=;RBEg;LT%Hs}oJ#Ii>{f~=(k^h>@NgD{$8;vR=f5VwNo57c8b6UwI>Tp1<%`Hj znD1dz9)K&)&p#_Z)=G9y_Hrh0gb>%;w~d;2eufV@e>@I`eIizoSwbr`YskR+=5*D( zH(>G#wo}j158rkXttf!~J?SA!Ww$ThwAzKynHn3CLlGj^) zy&fb_)EgPD=FSmHCo+g7ZUT}+CVb0gvxtgXm7K;vDx738=|~K2C&2?#Q*MiTU!x=R zuE(7sP-*#f?ccX@qE?i$z2Bh@c;7X!mR@NfCZ=ysw9NGoRUONg`w;`MzvI3_ziSmq z)|e5ZR2zt&mBsx{EsZ2!JIpHUVF@fP(^zrev2o29WKo3KN~Dqha$wd6o!^ zGUBPwak}H%39`>BIQ7un8=@mVbjdX@3Uq$_*m6%Y5?YiAb#X;LWJ#TO_DPQX&o4(G z7X3Z+jnI$4ITz@Rw<7(iRPR(xSbKgnsFRy*`*Hb{xpY17cLn;YcbHR6&bv2L$6N$V}YvKJ~T}~d`W|IAHgJS)UMxdrXv)(j)^zoMzz+40Ua`a)*-$UOB{Ro_MaehKwj(QJu z1b(0J-NXBY=agXo-JIpPGBy3%JS|^dm<}l{UzZWPj($|hduXZQA8?6sN(??SM4a_+ zuFM{vCVGQof!}yH(Z-&a%N7b}K)G8uy0Dmw-oEE3+h3M=VxLnY95wrz-2Il2d#Egx zIGwJk8hDsV+y{EYqPjgmzMYj4%kD8> zR^0$z8@>H@baz35O87N>_DMMByK#l z7{UAra}D&%(T7EU4}Bx_BXG{e`3ZG7>OIsE_!^N4vwB0C+dA0t9e6*NZx}QS96^|U{_YZq9;&!Fo^Ytm4W5+={+P41p5?r|dUq z4IEU9DVQO$+Z|P6Rj0r~sqsTO$Mk{8h8cFy`a&yI9e9DXv0UXl9cm6c_w43uD@!6$KOJl7q+;S^k@C#{RV~@~ zt$%yVNHN%r>{;qr*bIu5F-z@F=8<>iyHaaQI)IbOR7h&;MN&DjHR!C=2QX0Ko7t|} z1@-Tp&wXMlh0+d9RoN${BrxI`e|1H4oqhnBiORK$b-!Rz|u;KpI#xWRP z88dcOc7i;&+PqoYb^_LqRCCN;9s~AJx6$if!{lc1ew(`=#{u(P%n31n!dwIWa`a)* z-$UOB{Ro_MaehKwj(QJu1b(0J-NXBY=cL8FPuisR2$i^8W$gBTKKcrWc5LQ>19Zrk z=ewOURiyr9#x!FoO+rG#6yz@!k*nLk+`neBmTsukdAniRAi!~V)u}a0DP6;}EDN!O z9!TG*`KV5uD*VS6WldDQ}}8K5#K#SiEn>N^xc#$uHtz|#Le%oEfS6(sTb;g zWc7ML=HC^K%ND|+N4-zZ*s6s1*fbd$ysm&c<-&c9LIohedh~GV%Mx;o#ko->JQh~) zZd;wnmI`%38y~bwhmd<`7pggqM38GMNKEXTH)MrtkMFHlS7Gk+tFyaK#ejD5CM%Yv z#XMoBj)G{21L^2}a4aV^h{(HE>2^>5Eu7W#Q?dU{Lr;>RzD+{_e0s0ymUTXwVD652 zF6M-oKVhzcemVNE=`0n9-!gE@6leIH+zc%IJ8=AP* zXer$)w60_FeJboLDEF;PDI&us@5?X-MT3`mirkZ|B{ah*PuGio%JidDNs;BFob>58 zVdtG3B&kTwaCQZe?evm-pJ*0~l{6dQ$)*VA=de)uvU|B;2q{vRvaKG=hs4(2z#YP` ziRcEqwgZCOf%@dhuthoxYG<0nmY+-^d=vNnUW$HAhPyj2ee$YS7FbcGOF3I`1#2p?;PUo0he+tJ08R|rboyb7^&$T+0nWUv|$mpza27z~pViH|( zfP4hz?wIFdPKfyv<{IdiqYsPz9{NV;N8p@`^AqZF)O)BS@cV@C9^NNBr>C2ZeNV|L zP|9j_Rrf#^Q1v`t^^7vX>M2)_|4cDiJ>&Fmu!fy_EtjO3$H7SZj_*+~_{B_H$~wZo z^%7M1^Tw9VS2<};rR67sC%EXkJms2aeb>oDpB-n7og-oVJIhx&=`Les|Z8?Ya}>-|hS zjf4`-Eav1?V$09zc<;<5l6TS}I*Y$rnM+I?cPLk&6Fu zEXYS-?v8mb=7g9(VXlFGIr^~Z@1bvmegw|BI6t8-N44lA?%{pHbNVImK3+$0 z7v)y-LRaUJ6lMP_Ly{@Knu6)MAf`_GjIgOo;Bm^^0+l)aTsiPiRjnupge z>|65!-d(!OZoxpqtC=+iidz`y(y!0&|0rOiXGRbFI5|2DE24LJjg9w{)nQuxH9Bz+ z!}syh?-c_i6i&;3_~}nhok@BS^T`>cz4iu_t-S~qQar&?TLa*?8mDo2W(2uyqxn}+ z_Bz-|u3|FN@P&z57t8G1p5#kTS2SC{4&k&^Tt4_BmDt-vdLOPS1N-Zbdy^friGos_ z&br77Xw6g6oP6F$0wU8lXmB*Zo9$uUthqFK*dwXjeKDW>idouWzQvayZ-g8R@)4N3 zW1fpSA?8n*YoK3_J}mls=o_IQfpad-PpHdL@1c&s?-Razc%Sf`w(}I9Jk7G7QhyVn zX-@G|pC)*F>u&pzxqO>OMhQ#cs1|EA^vMQNBs~AAESyNJK5u;@{ss}wA6>0~#|8e$ zB(DFnwg=*z8Vi_Wy2+z`Oy9fpyU4j77tMp0Sm~qd+}^5bG0`GIL7zi%euCEf3oq_J zd6aWU zaa9fklbtLDeykTrQ=9O=r$?*7C`@S5S-ONgDvWd}%gl$A4L%!$Og<6ymDPST9mNo` z?|1cTgBG$ysEF-pKqWW~?GP*WswIQn=D)+bs{y$%`0n9-!gD$$Ft=pSzCDyc_}j@BhKn3s1J8x$ zD_#>zfg2JI>4jjTMJjZLGRbY}_KKq4Nrcn(xSHXfH>7^%;V&J-F>;`UdB4$`Nn*`A zsGn5&nY7;bdN_Y}ik#E0nz+|6NzStV5cK)fN{)YRB(-Id>!#w z<@s>vqdR%taDpc=zmy=qhg=x)M#!-sAAz|$=DC;?V*Z4=2Kwdb!=k^3z7hHnIOpR0 zgt{E{9_k4EKHj`Hk-_SHsWt804EyhAiy^zVS;{Hn5LQ_U^ zMwU|Qj@G_a-Z>CnSr_{ro;hANT_AxZ098hZD%TjRNX-ZYPmid4pBFm*QZ{D6dhv z=mWxj^GL?UwN8X@{I z;7RVEx}V%n;ZDcj%jcJ!BuYc+d$+ha6LA*dW9bc!B)C7jGyl95xpQLKW9!!zP~)*_ zILPb>+2H!S&FP~%{LxJq4a+fsrEpi2!!rOX)OdxJygC;tpBD!rG&%r$@AOLay()V@>#-va%g78Ix6Q&v+xgABMxj zOXum&2IQ{XtV@2h52PP$>vvq22}}vXVY9{waE$x&v6rszK)>~YA`|x`@@7KQeOtCG z)ZLJeY4!2~|N1o%-)}qyu38n%Ro@aJW;c_o$D35iu40$8uSdfpv+#T~=%n31n!dwIWa`a)*-$UOB{Ro_MaehKwj(QJu1b(0J-NXBY z=VWVg%vb)21l6+T(lX85wNyr^J_t6tNazha zj^A&*1~wd`9NxQZpdBz)x)P`ZPJvdeQ5|>u1}eLblRQ>mEE1TBc*n&fiVj4RY2BGYwcCW*Qgnpp-ZcQ$fZWjghD7a<)|!^^ND^J9%N`vN{9g|M z`Eulzk%vXj68Syk!jLyYjs^J$%-u21#heiHC(Jd_FGn91{XO)J(2u}57w0F`<*4^i zN8tAf-#xrfcupOK`u7>H^HTX9`|}5d9)rM!WsTEzmhhgJcaBKKLa1$mlzF)wd35)- zwM&;EaV{=my}@YsBmp=hjg8tZu=)X=xUm=kd6oyXap= znyY^5QVoPop}Drkj6{%~@i1N(Tgw1-}>%lCSkoCaROXYD~}y~T z0Qqv{mXU`=&Jy`OJq!}plA6j19UZ=xVu7!`^6Mn*{ zr&8w6ZrT!E<6j?rbZ)^o7ioT<>H;S`63F-!Yr?bdtlLr^VaQx2Y$kl{DohD~d_BME zFg%}l`s-q^9_--Qc_91iO_IH~P|exI7j_rk*J*SPh9r&Z6_rYnT@?o20xh~l{G zu~(lSK}Ku+MlpL=Qod`|E&b9nuyFQrrJCv)Si|9SUZKg5VE+kwHQ3j{9su&?$Sore zi<~9$d&q?$Z-g8R@)4N3W1fpSA?8n*YoK3_J}mls=o_IQfpad-PpHdL@1c&s?-Raz zc%Sf`EKW;0d0pK|Q89{9Z$(F8FYni>*z8ZFb90o1o4m-U(b|VpLJCnaZ=E)*;s&5r8FLKtaZk-6CCzWm%3@ly-*IM`;6Dw2!xRQzvm<>h9cqC zzrPJ1=at}KKt-oYs~^cbf7a=X#%1^xajl0g66UglUp)D5krKr^<{Be& zDCymGK=S2bxLVb4O4lA z?%{pHbMl)M)^FR!PaRNPe!-x|8LqsOj}g&yCie%Xt4@kMCakga$Mg*$@Nl)B-s@R2 zl2T;C6_n~u_Da9hXC?WF&3M^&Rs=U z-FMvqC9j#SV#5BAJE6b-po2ZsLBnWKryd;Za<6AC&H&YGk^6cJ)5-dzGy1x}^8x$0 z*!#pjA@}y~T0Qqv{mXU`=&Jy`OQKGQL*st@`-Iq2!~PTYYOt??Jpkm(ky}O{7CB4g_mB%i-UvAs z5q*Fe7h-a{RM-zR+c@IK)=DQb&yxWDA0 zI9yUYt=m^oGF@7R9jf(^_&M&y@x^{izobNUdB+3<%=v5lne8DHdJ&cS`?^T|kbTl} zij}fl(lv8+i4(Do$~8&7{0QdjcmHE)@*uq?3CVw-YlHPj$n~v-R|r*Io+|Rj7~Tx2 zf{CUMNv+sX>zeEUfjhr4KB>6;U*8>j)Y#9(-Y516v8RUpC+yW=Ujusp$d@Cxj65uI zmdNiR7lynMaxBP4VD652F6M-oKVhzcemVNE=`0n9- z!gF$!ubdvWK1gkPvcl!mOa@3qaYz!D*F~05h9=u3KCBps{xyuu! z6W(P9Vot#c3j_OO38!H5%9^pyg~tEwCBVKr_NcL+i@i_m6Jk#d`%l=b!M+Ce0FW<7 zZW(!4r;mmTAqz{6W14CLq9~%=@@qLc>m47PCu6Yu{`lt{T;}gaL zbkd;D*KDo+JzcOKOy6&CNskzDxbr6lYZ6BzgMC?>w8?fpAdU$*nh%a4fZv#2Y`Gz za?8lWB4>&G9&%yG8zIMnd<5q1nCD_ni1`!d8t9j!4~zaD`bOwS;GB!|6Y6r*d#EGu z`-JZv-X}b#7MkTN>=dN-#t2JpE*&R)V&j^RCz$EXvTpKWaJ;U`r-r1l9r>qeCY~1nwokzB~4)v7d{*PwW$7PYwG|*sH<52KE4uFGp?}d06Buk>C4|3;Q2$ zgd7X<5tzGUo{Kpl=1-VwpkIzYEc$!s8=)V8b1u$LsLN6Bp^m`s6TW+RpYWWvxP3gZ z?;;na#dLA%-1G0m?`v;7JgZo%;&j;?0z`X?6 zcgG$z_H(iKiG4!ssbT*Kdo|eCz#ahd<;X1~4~v{7@_WdIA#a2n3-S?|yJMbl_UP}-HJJQjbJtUXzl&tzf=9edQ+&&A#+_6f14hW#h()nH!( zdjQCnBe#q^EOM5}?;#h4yb*FN$VXuAj(INTgqS~Ju7Q3z`mpHlp>KqK1kSlQKcOy1 zy@xsizfbt?;eEn$>iaqytT-h~t(WJKXl^qk)qY9uYjign_FN*GjaO#si!$9-M$Rvzjn zmw2oEkF6j0Z(kVhp~C$}xHkv)vEZH$+#i8^39#>uJ!x> ztC{q%Svb%i8~!hRj5Mwl3zc~M`@g+zxGxO%P~mY~ z1nwokzB~4)v7d{*PwW$7PYwG|*sH<52KE4uFGp?}d06Buk>5iu40$8uSdfpv+#T~= z%n31n!dwIWa`a)*-$UOB{Ro_MaehKwj(QJu1b(0J-NXBY=OnpMb!+z$;l)_z*Z92> zUh2Ws)9XG~2~cOoHA=Mo`@utLtCh9eAe6NEeF*XX;8tiLe4*>ac4lA?%{pHb82Pdo}0S6 zgE9?lNo)K4gIGWA4)h!yBB`l40b8&t`U54lA?%{pHb2?baC4X3OCw2MGW1jeXLNxC2 z#r?3jcNF(o;+{m@zlVF>a9uJ!TPm;0G}p{MvUt}5bK2% zo92{SA;oc(!=<%N@bl`YJ62Aw!0#MeTX7_iw;{7#-#*6@i;=0r6(V&cZma#Ccnv2A zHJLAPoX>^yvz%?t1f#PGO0>E$FS7?D6>+LgHt_V<#F5p=qYH%}VDkbRK5leH8PS$Ov)z zUo{Y<%R}~!%WW7VHw448^FxKGl%~V?^c2?7{!gZ)hC7x}RchBigdJp}#m?H8N#+XD z=2m-Ge*Mc&Yn_$}>ie*s&eP-`l@2Z<(ydJ#)4kCo{0Yml>CPB1=IE|G{KJ+c*}5#> zu(4#Z{cvR4i6w#HV>gf!rsxW#5xb;9gJa-J_xmiyng@XA^tS4CY54R}s&(e>3!+y7 zR$cu+%P?c)q>Mm9fSD%Bv++^&hd{5k$P^9TbS7#C`GLVbvQJGyOn*_N*`6C3X}us4v)3Ceq~&ppB4JAY zUD1;EFL39xil%&b5jk;ka^(5)dPru=&H29h6-2~;Q*?Wf2$ehCd5+b80QSeyEUmoR z+Nkp%iQHADWv@QP0yjl}li6$nQHjc~Dm77{d0i2V4I;rM+VC;s-az=|^~v?l zZ(lM(-fR`iyafEk&lImSIFXpS*xTQUE%6^cqOi*P7Lgra?ozbf52S6DCVJ#26ZYX` zb+zN;fxd72SqgZ(07s#-O0WwN*zAX5v-HJ zPmAP9G4AWxNvE3y-pv0XNN?!d%;lPKdh^KYZp-G8nS^Cm(l6BR%^o zOn>MGfX=f`hPU^J!1G->e}!@iN!#9^3ZYO!Qtl{}pHT_}yia&e`%m9*pdKBe=xwnd zbc~zf@gJ=hmFqKz4(ZQ&>E2Bu;`^Sz*~3h=TR*+iSJy|DO$x2jT>gzL`6V(TR=b?O z)ZW+qX+;CHmyCVf!zDm39QX03T+>L#*Z36H*G-^(K)t}ZI*c$|45!^D-QeHPf3Z=x zhv>XA8#jADN4zxEzwRCChNJz3$%XEX#OOrOm|V?g@U>Nwd_6Y@ucV#mmLt7TmA+&( zgJUZR@bJi8?{t|c9QC~;@;j1LCH1EDX$6q<1#wcDro(I1$8*m~S;#G?Czi?CToqlSrr|Q>7~T48@{DvwpH7 zR8sVo;d}W^G^0bQ%-)@Q=+RS_1EQ`BG)LuMQ^7D%YQ?W2JB~9VG{5AufD_+(T0`2Q z?;ooWy<>-I?u>^BO>f$&;eCSXs@XW6jq7C&QH(eK6O}$&L zQ>K5r7X*^Fh;h^kG!vtxYWp`mF`j`SMhS( z4C^aU-xm;n%{hZe>wmN}GV%k5IZK(s(i^}m@>ljnYATFLy>Y6p$%W14Uf*=8lE}5U z*V=cwzapL<&Ci#ZcEV_#-htI|4|`USKiL;P%eU!kC7JkP`S-PG0bw5CIeT)k z4`f!yeE8DXemY}LlZktbnYtqpX_fhdpH98>zQsmj4P9c)uBmeD7fhFVyowcIp}+g{ zd@oj$q77<`nLi6|qUBF8hNy-MQTTnrcMtCqp3^aIk9Ca3suU&OHW)VkgN&y^##~Vj z$v3)?sxUYU$D&5%$-8NWBl#?$>os_FTGjXN);yFaP z?J0atWf2>%zb~T?m4gd&$R~-*&CnB=9r)Lv9Mng$nIkw}K#7KKM`h@9FibNFe19|o zqL-dO+RSPNx2&Xtf~Ri7my1z9xE(2|_1Y&E)Iln%R;_j|1|%>WxV36t&-BM8Y~mB%b5;D`~p74)BA2N{YKdR4qw-v?jei4vNsLO z7WZn){P}u;=_Bbe>q$|nj3W*CYBT=T<>36_z2w`~)ZB2&}x>!w=; zDLCxMn6Z3u&(y5c)!O$-L^<-LknGVoIKLv^Y*fY-VqDg*J6Bgqt~DC?aczTyvzYP-Wwxi7wZ~^gX&voyG;g>-1W?K z!4T7d*drqJ%SXu@KFIH(_g=PAuK6cKeM{vzF>K09*>Rn*c46C2quxUuf!`;5_wYX9 zIqeQ*vQ1Y~rs`#19F+L9o^mUFIFlo|k>*x#{kZes4*JmnM&YeH1ZcN%i>asc<8bcJ zztqp{gYaZY?)G^`R%&lM&mP0r-_SQ6d^hyBC|$kVDf`8AGnDa*94!39L%X|6PF~U8 zLVu{+aX2rZlTr_USn$s67uhu`bh2NLg_1pRBG3JDKe%!I{_~|kn0m|2_9F9l4se+& zCvNPkg_&ggs(7(VIO9h7sh2f_a?cir6!~u0yUSkSon;}EojWJU-jfaoM7D2mGJj3J zxBA#yGB=aM7C|@PZCOHV9=n$OFTV?9;*(xn@?)V6EEUp<>>Yv$)#&pQkBi_%zD?KP zu+OmK={l9?q4se5Ln1=<*4^iN8tAf-#xrfcutm`+cR!-9;FI(o$9y8uA!O-JC}bs zEJa_LW%Ro0y@x(0TESrzPzRIMOVhKJ#)%aBWbEd@t)#2r$f?%OQ5ZV-JZOpKFj?W{ zpv2G0NnZ(5zWLIYfv#V{G;BV;nyPyI^c8Q#PMSM=?^ww8R;W&h+HhN(liH&XcUrb^ z(&H(5_BuwhuyXh23hnAi5^SiXbv$SVt&+T7)y#B!GBJ#Gi|8?@2kg; zdcR2_akrhKuRO>kZiNi$v(-@$_~=h}`ahbyWIMON zkL&#WKRkVhKbG(NzNDfkQkjv8LX?r6>yA*AN|KdCgQ7AsQj$U>l#)%95!oYg-gZ`m z?3IwDB1J>`J>S>wbH4wD=X$R5I?iL=9~r$irHWci{v?Z(U(5@5qgM{nQ?*C7*SC>? z$;bAehMvMd*;Qq{P%q*8~F(dG^Ui!q-(_ z!mbAA|F=IOE=RnFI0D}%oO^hm@SLQQ)9W-{c2c(bJ8Bo@rQ!!SmSWSi0pLuj`!&gvv|Dr&^>@Qpk z!Nq#RxJrJl@&PPUjnf|?psXtrNEYa_V6pG8)2`k z-0}scY%s#W(OSaX6iox0Yq!U- zYtKlE?3v5XM;nOOopZ0N;ts*1hgsj|k3I&b_>zp`+UG>(q>RMJS0!*o&V2IV?Ggx( zD~jgkN`z#YFUF%xNn|Iy2wAYG{%_C4{Rwe7;yuI>_&(v>!~2Biw8N)@JE23D@^vt# zUtHj$O8@-Sei+0@6(+?i|9sL$^sN1(1f$|fy0*2GZf_ph7bft=Sz!Tm;yzza8Cpc! z*t7KOUTPxD+=mAB89jx24Z=C6*nW}86=STCUWH&+6fgXw?<df9xfG0{{ z?cXY9kcQ04?xn1H&?R|#^P{6jNPX~#Ozuva=nsUBtUr-To@{EZ>zx`P`GOsLQYQt^+em?0wEa^Pp~p0}ibmE3&?>3Im+UB%Ad zR!7$41^>}_T1n)XT|e>i4S{h`zR2VBs<|UC?;m=e0__v=7t~8Ht)H-W&cWAyprv~?i01d{MhAo^9^kD1ODK;KYK=CPx^qb z3q4E>X9lg4NCnifQ3gu|2BE=2K{@?M8yr;h<}cjdNCu5pt}&3L$;fbD#$A^Z7{9ik z=j>=Y$>GiGzT+MbhtEruWHNHDYFh-9R!0mH{dA=dj#D{AVUx!Bfm7lPlG!Ci}sD&b#}1ZAa8_x1n#-GKOrthy!RiD_}}}4a}Vzm zp3|h|pF!U=IcnFUs=P~E^GVVv?@m6sD(H@NQCQ{{K^Ay~B}V1`LjOxy=S;(YVDsU2 zL*wt?#JpwqRb56;JEivCxlv~b7ETV|P<@v~9Cxo3J)oHf7MT@a$ffVZt7Pp?6W-Od z&&D|6k*BwS!(erQDO)j_ZaO!~9TG#H3i`9{P%I`(sl^#)%K2my??@!~l}O^Gp(@6Tb;+Mz>kQeOFDT}zXvsbjz^~)S*cZ# zq0qZx2yW`AY?%ssN2XGLn;g3K2YfLP$cx?I%QpQRjwBYRh(*04 z2bQ-cTQT^9V+oCX90FBP+hyAB@p_8*NgsAt;!^{~{eo-LlEz4VY@T<%;1YW1!KA#a3y1n#-GKOrthyoWdf-zS`Vc%Sf` z{_4FQ^Dfv&mC0S}eyhESzOBV`lk?{`>UC?>*>py)d48`>77x=V>X-Nz$+#1%XmepR z-q4y#7{A`RJfu`!+`JarhQv9ff1_Aj`dJ{faxY^9wyt$9B5&5y1r~>X9#mUP`MMj+_15rG`NAi5$=B?n19MKZ z)tLyWk`V54y;bZ#zmHfR zW8L!R=6NvGjS*R@7D6NnH#t`HIg_-gK(j_$b6EN!?4G6XEmAL*6AudO}hd2V?C!BkDpYWV?Rw}({ zd8$Dvap`lk&hMnu1YU>@@hqp@*4%h(Ya~kx=G%$~>{%cwOPiSX7T1x6oViPpY^88* zY4*mXj%=cl+9bSWqzCM-9%f;x>L>fE8~${5zJ-lYS89LZ>d+VZ)>*%M4o183i1!(zm;x(#Kd~|2u zsGj6yW@_D#L2iu_3;j~*M#g7_w?x`*!FGdqIeGr9`l=}Z1;%&twcU4I&8Ch|BVb{TqRcRzl?=ymq_7}xupyt)|aR&ON+Ocut!a_qTB9=u#0IcbBQZ z8;2_wx;B0y-{G+9i${g7BD8?=zE~sEw?u2Uda-XQgNHizXCy|rm5AHfPOZAdLQlx7 z{=!$boDz8DYbS10OC0i#erCNDMV>^jFrDnq0-a3V=)l}mnCaTul5HhSm8?1cF3?|+ z_TEzE7NbDuF7|5&HkiuNm$k25EYT97eWg@?oY^WtN2`9&dack&)ce=J6_xIXEvc_Y z<-&SM^2PnD?sY$e!`yaGnboe)&2=@xtj86q&o=Je<#L%^DA17o!06FX?}mR*r@}!b zFT#B8eJIq9{dw8B!2xcsuMo(~@`LD%iF125#lyv#6+f-cg~FtM!n7{e3n)3@^8O}C zB^4y6R()MORNbg=vD0UKCn>mVO7L4CTv2|=^|34(M5B%h5O;5ax(4#)$ipJPhrAK; z5xD2#{)D(3@gCv`e4lXc;eEn$@?L(Wab188wM$xf=f;mCM6HQesNPYA3j7@V{EPWG z1K(*Mb8&q`G^WlK#rnM@QV#y|ucRdDf&u#{-+FssYS7L%aKALYz5Px|*~MG%C6V>B z&v!G@cF%5yXiy_L&@~s!XE+3+;k(XQoc%$(|8b>L*XgyPXP5N)HRSVM;;dWJ>-p$kH9?__b0^Vi1!dj;QNGg5APG6)6&qC z;brrB)KRt2HJ^9#P=#wY^BU$dQ8!ZUKm8K<2FrFmO-ZrOAtxt4c3A!CB_dDkUw`|$ zo_04WJ+n=D33bT&va*nKD~URz-8S{12%ZX!xXAMqlPhbrix1r!Bl`Y-)86tgrsG|9 zNOt}EOsozzWVGFygLgyQTTkkIAaf-bna+vDL*`vpP7M`$_ z@0ONA$LeP)0R_Qii>h#i;-W`zG9&lnmzLLX(68Z{!tHK&>u;*op;SWn-J3WnIZ}xS zTeq^DU;&I9+RL+j$S2GRtI86lst6Z{@bR&g*)U#KTW9RYq(nPNYS z4=|XS#9?%gj~=tV`H|V`D@1D@$;#X#N1J#r+4!>j6;R<$;Ty)@5N#zcrW=Wmz&Jtp z1bH}5TrAzxQpT1rh@BaJk%kW>#>wYWi*Fa~x6QD~Eci!$*laWVwc;I|dPJS`-pfX( z9G3K4eU*n^X;$;hZyOhVaII{{ir;-8U*Z2<&Ce6kPZ`BOY%hn@O4n&I%MxPPlu_JJ zc!Oj`tiN>hWjw@XKd|c=tRjUUqg6An<-&HJfh}uSGxB`Pm0$Bmg%Yj3R@>+4Eb??u zO@sDxZ&2Sb^lI6~EK>04so|I6a!~a?cY1@;GteJny_Qp*097`^by=SZNsZK@@s%5% z{HN!lPKf#w>Ke$GBM*!G9`Z)WN8p}|`xD}F#CwP%@O{F$hxZB3>B($&@UJsslWT;+D)>ae$9i^v>bthLK3`2F>z2=Rj6Hh_!w%jimoG5B^I0aNw}cVNV^ zCR0wkko-I`;J#I~3<}J8-85nwi0z)ak@M_~?}aV>H`Sr~3LH$UJtjBi5tiV!-dm)? z!G9q8Q*2xa#K`rnKDV0(@<&F%iKqE3kV6Y3hsmm?30{2uZ~$VcFwi~AGea>RRxBk+B~xrg@& z&*|n}+tCxr+LRzuR$;x!4q7a_W8QZ52VtrFcy^jC5VH4tu-%^X3yz8JvlmuhMQ>=7 zkxSYkPj`slW!DN&qz)T#23+2fL+t6leLHk+k|V}?7q^(Dfq}5|@pt$0$@%xU?kdJ7 z!MU5a4_%7zBq>feUd1UVk`D{xPTS;@$!|B&$6*}}c0-GUzYYgU^K;m7`Zd^7jg{)!yY~tYM|mTA52E(}ZWt$IOQ~Xl~_`Y;l03fHNrv zxS~i&^mP_{_6%ZnMfnwNln7ICZx*LHhLQN8-%H!g+{vv?X4{JQM1gmmsc!A6G@{4; z`AEEpBaE+L&XXI6gn$tr`P$ZC20pipRv6TSuMYVgV`}B(y)Ty!TZl1GAF_ox2L5dt`R+3%PotXr^)Tcu*s#Er~EW+}lBJS;&MppB#rjP91q~{=R~i`;xv3CzjKD!|zX3{Tm_+t>*8>SEP~wOAW);$QPvWN%k?H zff%T8-*M3DPa=Ua(XWzG4rHylYE6nvB@Dll&Uo-Om(*00f+J*s%vj5W^0NdudFR|0 zkzo%q_?#&2A=$*dKK_D8cL@l^nm@GcdP&I5e|$~r7~f~P8VX&@GKkdY0a>aq5unJ} zR{nm%e|{tMv7kQ!b$8TrQ71(G33UzR%aMmgeh+ygQpo%3%S8%bE2jb=`CGc>!gGrDa@h(&hcC6AFxa^TvM^qiS?MtyDL)pF&!|`PZz#{K-ku zx5<$I1Un0D^DXk7-lJMV?$m_t@C}E>&np-0n!Zb%ot?C$+R9l`=~C$-S?>OQ=7R=>_Akxlx%Gglt`@)(k|#^v;cQz0$Ce2b9hBciSQOY!fKcu?(e zUGa(a41Ay56U1Fe6FvVUQor8^l1AuVYVb6Tuw1d`%?YRBT49U(VAKnOzA*F~p^pXq z5vaSPo{Ksm>QAU^AYYC=Eb@EE8zCQodoJ!zh|3Z0A&$WJ3FjW(Cp@Qo|HF|xrZuQF zH_H_{uGSFOm`J9!MK57D^YvM2=X~;}Z9qxA9^hg}UWxtEH>6l!sxN=AiDYMG2Goof zKvM9GM#Hr#q8qVX%4Mz`9=I|q*a|VebNSnTpKE0uq<-5Xb#2o>kd4{0+lD6(YL0cN zU3riV3lHj-&8o(e0_FCqK&LD+>O6eDpraH<#7?bLzY|A>hr45YcNdb0!I>7zNXB=7 zIoTfEy_Zfnew~UgopS=mr?HoE?eCJ;zx9~9KSo2vhnvkuIMabCVr@pxL^-JwI<-$V ztB}0>xh_Cjgz-J#x}mMToJ(P;=g%nik#vIoJ@kd4-w1sy=#N0%9raw)2~mGST?6@Y zTD(knLr;bgiN~bPe%UR!V=XK5!21q~oHHLxR&#~StKXH@ z*_Oka%$E0is^UqC+D6l`)GV+KJdyl!e+=p5a(o=&Z%_W{8m^UPaRmMI`*dXl^FYg1 zf7=*B8#tixf|%SpRJ zM$Z)bEYZJ*zA*F~p^pXq5vaSPo{Ksm>QAU^AYYC=Eb@EE8zCQodoJ!zh|3Z0A&$WJ z3FjW(Cp@Qd4LcR)Y(?tT$_txUDk;#XulsY2yb2^a(Vu&sJhX))ri5fm`w+Vkg_V0* z?nA5B^M?mmy@`2J-bSr!J`m-X8?thg@%#QL7|iDnB7g5aa~|bkd>_5f^Yf8RV-lov z_4``G+eEd~CN^Eoo0M;oduyGP4Khz!-p$kH9?__b0^Vi1!dj;QNGg5APG6lW2gCkPN+xYA-iV+F{~JHk;bV ze|r-Q-xMDo=dK-3j)XsZSC{ZGdb5icb3PoE4JRVG{fu~V zIJs3VZcazKl8SSuIO~%Fp;y-~!>8^NiI=#d#Nl;;m`9i$EXun@?ytG45zlD@>2tsC zw{p6|t9Pa4?6U{S&(p(#6UpvicEmNoCC?V5O$0w$`}=^EaqQtCmK%V+W%R?M&l3H6 z=nF%?5&BrrAA!0%>ba;BqW*-s2J+>|!y>jgt#2>9^wdmpK$KseZq5k zX|<4FAhe$HX@B&!q??bPNU^m%;3`P-tv)SjB{NCFJrCr{ubhX{C+<#ybxUYbk?bu} zf0ALm@r08FbU%{Z-KVf!x zK#`!o9DU2^hee+y`uETmhJGXTv7kQ!b$8TrQ71(G33UzR%aMmgeh+ygjPIEa<(@5UgTAdzA#b#ch|`u;I`@8r!_k}NCOmb9L}#yI1*CcaH1c%9?Xw_(pAKYNvxw?KT?TUYMnMlc6}{&Ms!qaPN1mgwI@Ul{t0(8q%Q2-MwC&qbXO^(WLdkS|9b7WqBo zjgXJPJs0;U#N~+h5J%wqgmVw?6P^?Ig65z14TSpM!`l3+V2TuFPsQ)4>Vx)jo4AJF zS@^d9tZC2AEEshT+_ayqfn+*yE3oHfgN|Jf`>dTYxyJJMj^`~=c)B}iw%WX>tZ1h4 zMBMjF@UOn~WS6l6vAZ;*VUuD+jHxo^BH}^1WV94ym+OOna$l&|s1q43QMg(@ol7iV zUf@rZc|Zae%by!RlLa@Hdz?ERbCPhGMd@C?v4>cDZCp0-ogxL{!tc8l$HSC6^NOcu zE)$7f@0-;uHv#h+m;*q6Ir^5-4~sra^zWfB4E;vvV?loe>h7rLqE3kV6Y3hsmm?30 z{2uZ~$VcFwi~AGea>RRxBk+B~xrg@&&uRY2i_Q`wb!yG>Ut60$@KGPWT@n-fDML-J z9#ApfL=&&o$LnYL8Q&eRocHAU5JcAby>90eH-(w%$kxV0UHC2W*on(g4bDT`#LLW6 z@opZ9;c8q8~84gme-=vziVEcz_bzlXjs^c$g%1^p4IyQ7|qIw9&$sB0i!jyx># zd&nChAAx%=?oWuz5$_?6!1oE~9^NNBr{lp68J~g#DS5}d!}oFosB;$zVh)I1g94_U z&@kszA`tqlMewr?Y4LC{c^Y&EzLafA>r~%H4ra1ch$JeK-<)kLtCbw#NZBg&yPC=* zMd(EIzn#1gRHA>!%lR~!Gcq~-q(lukuF2mG`>jdZ-1{sa)|>=S>2I7;w-U%RgICu6 zUfJM(cezDPksmoQKAX5o;jj9Zs(-yB?<`5BSokicZBJmT*&}duyaQPokt#pv?DJp# z33D}=*T5VA`peO`jDA@3S)zXrePQS~LLUqIBT#ooJr{LC)Spn-K)xJ#SmgJRH$pxF z_gvhc5SJs~LmYwc6V5%nPk2ryFBTtV-z-gu7WTy-3#G|P{<#$Kk2FZ$KJl#dMgeJ< zW>OWE3nOOoCY(2?caw{(%|qo}r^vH=I|bRKPr@oUQE9D%N6A4R$D1d&=#$8$cUH5e z8^bGA<8{$Hw8$^|o%!c|TVUgpiqliyBA{$+yX?#1WH{wsF1Y)2GPxHfz$UOU41ATh z#0LdmCz^MD2jw$6!(&s4@C2Ku#A!-&tNaQFq9T#r=^-2RUrr74PnfI0yawg~&|i+e zW%R?M&l3H6=nF%?5&BrrAA!0%>ba;BqW*-s2J+>|!y>jgt#2>9^wdm zpK$KseZq5U6;q&C=QdGu3i`os@-|TaqK#erY&fZj4_hx3sh2_RQ)>6LSO`2#$)0~H z84ui{2Lzbe55akR{{5^K;Skg%w8X`q5w;m=`RTp>GEp~|4qT*h8gzV3Mw^a#!r+P~ z>%eOn;HE4-mTsE_f-!6KpIZf!w=IVjBqter$2!&E+7F4Kf7OdtkPRh6mOSsph0=(* zwXS>9d^CKh2~+gvNFbOe#GD%DpDj432`~%J;V|CKH=QM`-JDzINr?G9;!^`W?zZ? z)z1HawG5h*5w~Hz&q3Rdi}i`z^yF#bx-ih36I^S{_|DC)5L+&BhX}HLdEMq?!M2b) z6&Bw9?i#VMzSQlhX9?+kN0!+>+(TNs&Gcn1KP5bK5)J1Ve3akSpD}wL_yEVUnV4}e zKXQLiZJkr^4KlqYxgo1spPX36?;&J&2lhSP8#N}UL3CSXfn$-|f4NW06Jkzn@&EEq zn5)6O2Ic_JUyif^xE%2w;s|`7aPHxK!gKN+y!fSb8xK{n&NOEJdk$H4Xj0()cL%u2KXxdn^&Xj6 ze7`(3I|jbERYp8y-~wAt*<;TaIg?|TV)YMmxIoJ7;O&yq_lPcYR=`8o5OAEBIlF$B z8Jsfgb$YV;5V5k!2>A%yb8&w{T#k4TaRk0kIQQ^A;W=5}qc#Bu;P3UYI<_#176XJ5jdx#_O zeZski_X*EQbMXAg85u6hZsRcf{@sfyJ?ocN;{TV6(%7_8+M=HP@-$Dpd3-UovFh~~ zP2X>1yLeXm=1xZ6@*d-;&99i~S#rfnYW4x7m%F^36@LKd&PE>;h)*LW{k(F_y4C;@ zjIViDJ|%Ob7OUp99+GOEnYZGD$H027T+?U!Q#e0re<%OqqyO^mn4`vgF6KTlPl!1+ z%s*kS2J;%213-T{`j*iTi#|*A@1ZXY{YL0xL4O45?x^RYPKf#w>Ke$GBM*!G9`Z)W zN8p}|`xD}F#CwP%@O{F$hxZB3N#|<&&SEd}TQuA_0IgqQV3xkc$K=A+1~L3 z6ar!*?HS%oeRluThZ#d=>6}oJdSBrRK%0yXuHJNi=Brn4k<&e>wV=(GQD0OZ4xdFAV)g=wm^D1nTan=b}!C`V;CJ$d@Az zi~Jt)M#x9to{Rev;&Q}$h$HZQ!nud{3D0Ru|3pq6U0DeyKKedWVm0}ATzO{l89*ae=AOpAs0+*)b4QH zgX?_Twk@jjCx%BVzcPKlNS@A}|GHt}0r~N4RoxHHC(x9|>#b*IM6f>sdkHY_jyY<~ z=VI;?^MsgF!~7HGYA~;XIRNyRqi-4gu;{Zy{~r3n&~Jo37W7A;?v8pc>V&92p{{{^ zIr6Z`?;&r5d<5>fxIZB-N4$qP0^cW`dw8GloSI%IY_OG5p&Trlm${W}rOn=C^6l_? zOKiK%W;B=ng6@`J?n30Gsm%BS$h?oE+IE33U)@>c|4&j zVxAClYM6h*Tn*+mFb9DCa`Y{u9~OO<=-)$M82XLS$AbO{)ZJ0fMV%1!C)72NFGn60 z`90)~kdMGU7xyQ`<%stXN8tN}a}Vzmo>R|bK@#I3MrEBl{AYbb2k~s~8JMh_1r^`p z+pD`K8Q&F_;WLSFCU)f}t;WBtU?AUXQJ_x*ag%$<-0(CWVx%+ID)a}F+_bHA0bHjU zIPs-MzJfC}6{xj&ut&p=z!N4!#SpNM1$#cQKLUFRFz=2zYRu zAudO}hd2V?C!BkDpYWWfc-+=q;1;DU^Y3pdxVn)_^HVum#?D6-7@yoL>B9#mMk*I3 z4@ASr?Ju_hVhF2N%a5F5k!2>A%yb8&w{T#k4TaRk0kIQQ^A z;W^2jIny}3ZYSmH=Fxv^izxMsMUSoVkt-ZMq2skw@CGqc;&WG897^Qmn}WY5`w(?$ zzWqnclNox>yT|t_7m(cF>!JZH7kHB66 z%)4Wb8uPiB`@}pU=F~9%gt;2bYhVrl{pILeMn5e2EYZJ*zA*F~p^pXq5vaSPo{Ksm z>QAU^AYYC=Eb@EE8zCQodoJ!zh|3Z0A&$WJ3FjW(Cp@RNl#(wlH^nIFM`ls#dHy6` z_*eG#%=3gZO>jd$_i12lzqMEWa5#8;kC?u6&Yc`_YIKb%J4n)29e1l(aSw`ZWs5Ic zN0L`&Te)@aq%!g>|G$R{`;D+S2m4sC=L7pAu$KVy?wF&-d@klbF;9p&HOxO@t_JfO zm;*q6Ir^5-4~sra^zWfB4E;vvV?loe>h7rLqE3kV6Y3hsmm?30{2uZ~$VcFwi~AGe za>RRxBk+B~xrg@&&xxB?nSaqLLhbC@C!)1&IlWBv!|z*48>zJ=-3|8-M1t3zSZ|qr zhM&=Qq=?NWn;b0IDO2h@0~hQ+8kNn|60I$qt&X3D0Q-p$kH9?__b0^Vi1!dj;QNGg5APG6 zlU-7h;-h3?YIShHr{hj6bkec6r#2Y&kY$%SLpQA(hdnH36bf4Yk&Fv*hxQ0}!hk~z zPqOhxg8h5g>xO+{*h7W=M%bH!eJt4Xf&CHKOMrQI%u!=L7jvJOC&Zi@=ASTEgLw_i z0ieGeeaq;FMV}@5_s|!Hek1g;pg#h2chqxHCq(@Tbq(apk%vWo4|yZxBXG~f{Rwe7 z;yuI>_&(v>!~2BibVy^n#(nwKRHMFgSpwfTBB=jwN1reY)fPATOTe(0gz29NEYJSR z&@0W~PMQDxuO|`v_psLu`@*n?3j2+)HwXJzu;&B&Be0hM^X{0V#(Xa3J~2;-IW^2b zVXg-A8khq>e>wV=(GQD0OZ4xdFAV)g=wm^D1nTan=b}!C`V;CJ$d@Azi~Jt)M#x9t zo{Rev;&Q}$h$HZQ!nud{3D4;^clD}1Mt`XAVCu13U1HSa-j*LrY|Dwv8pl^J{YQz= zpN`!_D5hZ0p{H?M~(Sh%za{>5OZpn zf5Kc1<~1+}fc|pyEu$Y6eU|9oLthyBjnKz}{s`3FQO`x45cMb2HIOey9v1mMi;yowzm%YT8c-ONPLFUn#o1}zvut! z9mPIN>`BD_J?wSEzA)^e!hR#{&A~nv?D@d{2<#=mygTNoF`tXMPs|fyP7U)f^ zxE%2w;s|`7aPHxK!gKPB+4Wo{cRM9{CbKDXg%BnGugXBoT#UwkSnM6eK1=LL#Qr_( zb;G_e?4iPbBkaw=J{Iix!2SsACBVEp=BP2Bi@8tC6Jkyc^G}$o!Mq0M0MK8KzGd{o zqR$fjd*};8zY+Ra&>w-iJLV&92p{{{^Ir6Z`?;&r5d<5>fxIZB-N4$qP0^cW`dw8GloJCLp{qV+0iI~Rhedyzp(gcL>#>?Fvf|%W>0^>XzmkILH-;<5&*6XgKEj{$+Zd~Q2g;<{Li>cwGbA_E}>Kd2O4MVvzoS%58(tm10 zU#^SL>6=Ha-#yqwm440f-gE6cw0RU6=nRAt4f7=3-FC6$+mE=6Iw^NpC7{|fDiuH! zk9^!-pyW>md-q73k&1!z7KIu`t7}9*@ACA*jVMxQtbJl@-F3L+z3JqkY;TfiqnLAZ z>?8mq<5^bj1wV)P9X8Ppg8-3V0cTpHVLCQ-w>?`Tu=Vw%pHX!t|E}9DMBWL7vK`lh z#j?T(o|8hI`?Yrt2dHy-4VAC2{)0nvZN{<-0U*Znw@B?>4zwzVq)?ZVVWVcy^f)7j z5#x90Q}}0PusgR;R`gZ`si}D?*5dyQMzkdF25Sn_EiBj1K?|doAy#o~+`R@6`DbzT zJ4ZE0!nuBr$WEfz5hrWXyqrq4KFxlpHG`blP&=rU{sCCus~-@3#X<)sEA89%={>Z} zN)NADoK3Fn(Owfg`jWg^(U%o`z6L+#E+0S{gR}dbSn24iAX+Sc&-#)zl z0#fzPymztUpjYPV2?WsFXen9MzWxX?+Q;<5XjIiAI?0y#&(Bg3dY_JzqaD*m>d8lw zyY7Y})QR`ig_ieKXn(VP?>$y8qClg7>ks2Q@tt}z7Y}T(X7nv?lBlpZA-3Us(}5N? zV4uCBMmowDmYjGQQn2|v;R+R2*`gH+Mlbab^=k*hg?}*_z0&^V(fRa{N^1`ix!-=w z+u%C+rv8}hpf5xH+6fOYYiBU>`xH7geupfBAD>1-UE$H94fmGsHYHMUvU2&mYcOb~ zp-Y)M{CA)5oZ3EStw@sGPnou7Tav2&Rh&E`l_*>R=MaJxQ*u_`6Co?=m38X ziRX?#={4RBveh>?Or9={|5MAUW1Fgc9&0}%LhB@J z8)XZ?eCAbJgF+9u#S2@eu@926nGd&Xn7awzN)>~Z&gmec;3yUqSVTgZsc9dHIM5ed zN(GCrrl%Iyk9uW_(Y8YG+}|t7(b-!9{+Wlap>E}jKPexWZCs=h9mR8CLu2_Q zm}p)c-Rr%GI#u^>bAsX;`s;~`!DBq;&{!X^I@8M$9=Y*KCw_}2#Y3T8e7vq?(D(aD zq^vWD8$PZoY_bE>1g0;m6`zpnd-sW%KT09u`yBr`3Z=r{rTsS>S#OcOYkfm_vQI)) z=wPf+z-E#&apZ*RUQ6iIl}b)Z3@4%w`gIO`d_sQv>D%Pb1&}fRAC^kRb|Beac{uuT z6v4TN_X*EQ_R7KddqcX^u_qOGReiogSL7p&_vsvzlll0dA@?Ves~pm(6*)lG+vi-& zyZ8xQY>I0~SM?E34H1olvQw~Zu|~P#R4t^o{F)MD^m@A*ezelyUqWBtl}>x0!%F$) zxStn)*9zM_ZQC_`v*1^E&~vlea&mt!=gmcq|DdmJf!W+>9^6_f6!_ND(F3x@W=(8ogj>h$0}l}29pOgd{jMf8hU z=*5-&HM8#gw8zm)G)sUmb!~nym2Le9@eUq4BsLdP&A?eOHWteJ&wvEZ+yT#}A*G zx|{>=B?N{V74N~#klu132Rra(tM5tV*hAJUuUH&ON+OcupeuYK=TAb*ZiKtS<|E zrimIq+sb8rzewL1Uf-(JMYN)90+n3R3$s;wgyY&<7~d^0kJ6fICEEI9rq$=zDC@5H z^%|GfP^Mv@E7#3S)9TZa(=lQdu>S0zjoQ|1GTb#^6>XUcd`E3r!qNuF+07L`6N}@C z{HTq6lJOY1$ZWPIOC2l!|o<8oJqq5Dnjy(1^;1BTXtSO0#RS1(?n z`&M$v0TN~+tbb9^feb+2Z|fV!;h>w|U$OVuWb6L7Li(mzx)(wQ zd~NEP4&46_?;(!B_X+17-X}b#YqnS7^&aR_mvfBvy8W3TU+xt5%&lKb4{cI-FuSM* z9yC)OlLdpMK~- zAwX(=&1^~_Z0%+mXydj8;=`)$^GkumJbisZCWzsX`6)5H#>)X-PW%#>`FxV>-5*d2 zuQ};+W4SMc)xN}Ot=p8!28piwLGZrA_>M=FO;7Tx5zr86kgwJofN!Jn^Lv+y(D6}C zJLa!4=Jh`x*>T2q1NNr}R%ktGfVNx~>3tdJ;8>fclVbg1x_Y}{+^Mg>NFDWN@weY! zzav0nr5C-eQ`*#hheuoRLC6>D))aU~-8XiC7*U|Z268j%-d_uNQlJ_i{ z$wX#nhf>-U7j)^ZjN-{pBOJ^YGoKwp;DXb|m+O8rZpk;xqMLo_b;<6=rbDgX^{so$1MD+Igmm z;QoZT9Pu9F2z;M#?%{pHb4q6Wxa_OyUdrc#^umUXE##DuXG}l;3uyj0-C(}570w^v zx?O6|N|#(r5evCrLxS304s3fMLuV{*On9-Z06zTPwAy`X2yDKcP~R|9NnVTA@F#!$ z33dlPSZ+wZgXXxM&L-azL22!tbL_jlh?oE3nwz`QiMsLO300#45dWB?`P}Flxt1U` z!F}{52$zplGswXS)sFw%XVA^aMr%OHQ~WyupReE zah=H~JNOjk-}%=OBgGuirEls$+kN0vvD#+p%jPTUeHtQEyr%A|)@B*Hx<+ahALIYu zR2%PC`{N!7JDwSP_7)e7doJ!zh|3Z0A&$WJ3FjW(Cp;%55f*Q0%YziFpHnqk#(VNN zqDoih5eMC#d#a}SHybT*{xdJz;Bq=s_SumWDJ!TevIzzk6C~)qbA#LNoZ_bZ9{Nvl zJ*|g%`^y2X4q~*w{+>cPZ z1?8>diS^bmq1{a2-$TJ%M!&F4=$e0v=%tHGJ4ZIYg|fjL7k)>-A@>@$*?;;IN`#Vm zt;p^a;N0fLzI}Hi$*wl2Ago!$jqO(J@lwY3dy4+#e4Ks=H(Fk#>AF>cr>1&se^m{v z+djAKg_#LGeJ2j7nM&E}B*WszUX z`A{ORr4u4ikVrn+_q93OWiW8PkKQ%Ed_X<|_gvhc5SJs~LmYwc6V5%nPk2ssqqOKh zA1z8DbtAjC(@y%0bl|>pWhQ#$IrU;hmYF^zy*cXhX;$hLtJ%GTPeX*Y&9j`zP=-!b zkF2=s#!cPvUUw?XiIcY0(ob+{lcW=9?fo;iH&K+epIM>@FSSC7_wX{WE?^!5eXH>f zSY@Qcm+d0~)W zK&0kD^u=C%b5Dkj+u3_AlN<7T)?)Km>f}hw~Fs2iJ3X!E;9Xy zlnnO`-5M`~>py+3A20PLR)NtSKJB5f+mj`1yTDT-W)da&j*5XG;gp!nQ#3fapVjw$ z77NQdLOAYCn8G5%sbg#E6TtT|m;2|<4+!!`$VcFwi~AGea>RRxBk+B~xrg@&&*^(+ zSHcM%HH!Pm%+iMMQ)JK9LX{G)LK18C{V?NG-Jt8ibtLF=KX5mxnJBmlQUapP;ektq zXt8>ZxKxgnl>fwu)fVbv6ca7{+&XJJ%{6MeKDyx@QFD<`nrR+^xMeriFF83&G7{Ji zLclmA7g#KHVtjAzc>j{);rp{8Pa%GiQ-mhYuIa+sX-_ ztK=WPs4Nf}d(O3W$7P5cK6La_dkDB#C(GBW6%i(}m0r)^zkp4&YUP@k8i+h=@psqn zS40rzUd5e$Om3Z8xldf79Hz`B4p%g_{KxMhZ-jgV?zy->AudO}hd2V?C!BkDpYWXK z@7jxa{?eo7=PZBzd7?z^DBb0{QAdz+IG_BsDshlJKB44Ro7qNgzJE1*B;y^h8y4I# zSVGZxv$6@@&9b!ETan2|#&GjwZtVaN`PN@oKnulPh(LR3>~r z?!?w1UP*Xt_6-?6$RY=wqfPS+vY_wFRrSD!7O*BFMSe8>9&9f);F3Mz0EZupi8Yv} zl6wogFIY||fOs(pVyVo8iYF#Y9%lQ=#5E6_PK(E+kG)=dLBAVZXXF-mk2Jy>rmi?m zsuT{sw=cMI@CRIf%QK&D{tsq_st4{5EvEbhS6n`8-A-<8Zhm)o+iTLc@XB>&=Oq5FB{6wjGmTrICk4eh+ygG0!NEvqN>7S1QCcKj;IhjNo?W!~l;^mJ8JS7f&=-OBP+ zMa6Xyt)1q*_4RNK-0$44rdz}4)1T#vu#oa0X#<@*xi35;t_)!wsyy+$gA;Q|Jl0&gl0QN=+ELh)@cXY1NJH1*kNKbF$0vyUH=!bO~O z?TqhGO}c4%c4ag}zop}u5#3*4KcUo{zkME9bG7}t=|zG+;q3R$&xG+|50%a#Mo(FEpiU3(Pa;U2u`cdyB74Ic2iZhv@=iQ)i){b5 z|M+s`VUgcM-U#^!+;ee%LR^k`4{-#(PdNARKH)j3YrIPGuir;WOtDC^X^oR_Eh?|q zL|2o?tBft9J!rDWwEgK$0a033fa|;A@pX*7lKXtW=r($*XIIg-;kR%k;6f&CR{=%0 zo-u7GE`iUr@fUiX3xGvjso{e}8rkGC_$2%e4euwVmq&g`B1r{c;T-dTEKc06TNQi< zdM@MF0ywd6*ddD8>`&=sr?wvis z_kuDZeA@cA`=w^`b^5kt#O4{ID}2t>G3_r=86FZXXZ#O~)gMJRT24Zulj@;P`D*w& z9=GRe-d{$KWn@$&0Lh6XpD{{?4u1wZ3iwKDhK09TfPrmTlI{wJ5CY>HCebu!9*K6H} z`Zo7~Yx%u(-G6g{UKrS;`?(a_^XI`Gp0UOMDsxGf$?}hyZ}WhE@UZ0Hky22K zmb{U7p%~Q5g?Bw?@Sjr2zG>sTt zhrK_nl!cNAgnzgklc!z9`0n3i^SAU$Sdwrv?d$SD>Yl;v z4J&E^^(WLdkS|9b7WqBojgXJPJs0;U#N~+h5J%wqgmVw?6P}aW89}Z9*}c@+n2HL| zobB}NnUhBRaoJGEYIyf>4mpB%WU%~c{%L5Vp zjd0vJT~MyzA(@C&v^k&AM1(e+wrsKP1{wJS+nV2G0q3TFvc=KmWao1Ui;k7KgiKVn zZA!UKESeXKN}P`+4O#g$KQBBcO3!ty*Gb2aJ3rTgy5s{8ZvJz;lJPy;`KKwzZW?Bh zd>hTj?~9+p`xvwEO{1PL#Rs|PXN%$6PCi%B z%xZ9~Ei;kdUrrPz=e8*Flo8K>HvbpR>Hq13s6U~ufqXggu*mNrZ-jgV?zy->AudO} zhd2V?C!BkDpYWUn%g(hcOH$PFybbF-gBzjS^IEiW$tp@za`$Y;YZm%v?3*j)ZU2v_ z^YG{TeZP21iv}%eAS)ydMTzU3N=B(vw2h2}iWG`M_R0t$$_|lP8M)t?k%Y)fDuhZ? zL#cj#kH`0X{)YQ;->-9>^E}r}lB1{5#vdVAL9!nj)Qjg!a%KH)6gbYE$I))Ha382$ z%xS6olN)?ZlAG>-_S<*A9Cp92yxVgvm-b6!SuV^eAh}?*;I-TbbUkzVYXx;TI#+fw z(3Blt zkd;?$m_v#}vo9x3O!+U*MNWwP3AqORa`>?D_uw1BkHDOZ`3brldJj4R-zU6#xKB8z zw{rxR_%Gke5&mjW*dVLSQQG5Sruk+zhkNtKX1?lVnml!FZSb6@l%zU!QCiPq)))6G z^Q5Z}n|Os+eP&84ovb=#bNY7-a|`9prtDgJuA6otc#AhZJ2~~PEcXVBoFJA^boVtQ zd3ou*D;_iMpN+->OWjHSrcjq%)Kf|=YsmT6Rlr1+@n+`MjP{Bz*#Ak{QqCr*i+LTr zS4ubfubt=3?_?8nq64m8jwd~GfKUQ}&f5zL;BD%M~ z>-3nycvj|rdD^)bx2UFPtKpc152#4XbxlcjDKnm8Ag-r3ovmuw>gF*$on87~*!-2R zh}>6H{p0>DCC}CAgVW{jv0mQ7Wcg1Ggxno@E^XBOW|#ATOB%gPNoJ zpI**CekW;n6WL@)T-Jh|?V z;N^O{X7PQk7@s%OnCZGzaNc#sD;nYDZ-ql(lW<6;s`jTyMGJBU3Gefqn9Nd^6B zU6rDA{wm%0t|a{3O;JxhH>r6?-M}W=YKPbhcFF?nIS&#VkbU zm$JN8DJ75Fa8Gwq!s!1AC*CbO{E+2(xxUHIdHNq8f!rN=E^i=m>nD@b2M0;hg^7I+1!PojAvT$0g1jSI4HPX633)Y-QhO2fWxZ z`hT>sv86K?uc{zTpKyV*KORu(V#yGH>3)(G`CL3}#zSW7Vy&e*y6@PvckZH7FEUAy zjTIU6C}xv1|6Y%5K26#ezwroa8(YLX*uTu>pa-XCu1WceXFVXZoc}+u?G~om5JQDo=vjT zTRzwh#8cjh*ukMS4NTHux-4_dW>Z9b`Ue-hVc=N6M<91co{O9i`4e&t_~r0n;qSpW zf**l77xNQzIrJWM1invr_i&$ZPHxM(-+j*C$vG)gCs4FbjBCjMknMGm=C;;wnr_UQ z#!Y)HKXL5$N+y*idF7MGFfE(&XYf=_Gt)QWx4E#I%dXCSlz8!M9*ci>i%sXf!B!7B zUFKS*vi^w{^6_&*h-XEXu6#!s`#IF&bJRMSi3!+-UdfK9s{_52k2yJ{`Kw-e=zBIR zRphyK<3biKEPOC6L?@fVPnzlIbZ3x8Q*A=7RyM2h9Gv{4AdY_ga&c$c%HH1ZJoG_~Ga@)UdD^T;?2An6_GZx$wnj*L^4h9lTG+a} zH?F&zX8(9CA5dFKCRq^|zsxyHeLag;&fA+43h zA1L<+h#aJ>bsOCO&UK|ZCoK(h`ZTHO#YNuWD@SP4!Ub<$zYSqAsx1XSNB=L-zGU5| z#Zre^;n;J&q1Ew}oqamJ^FSOa^{Au17lpB@r$@hz@=5>kd*H&r8-Zg1 zAA#KczdZMUIU({Vs0&_0rC+KqMJ?IF0pYZPCKH;1uY98c_50T-V zJ#)a*aFYnfBwI@9#*;(MP1Ia`y3_bEpWfjf0g~}FzH!y(UZL%Hz_u?%#?AWx#Ek2Pg4@s7~6C6WJJ;!Y8z8yl_ zj|&ek+ZoGNkGplyIW?A0Exo z3}e#hKK?dr`!PD+qUvzrP%tYr%~a2NJGv({{P__Z1qEu@JrWr*W-mpiZ=D=tdyeYO zJZ@)vHK!Qf;iYDyb9j!y)DQXM&a~>9wf3F>9mZWY_Q!Ltjm&4(kL7MpA{d_-k9pwu zO>D{Rp?`{lXaD1t!NY>H1iuF^47?FI7Vr_s-I3=aCq({)TmybNd|3E<@QvU{V9v$- z1YHij2OWX$6W%@CC!ABo-iJ>Y8;fuP?wDMwu@K-yx*QI@uxWJPTJP>J&qSMOXtwsk zc{g8?v6)m-R_H5oI&X9K-;++Nlop&B5#2+UyQEHBT>6Qgec2g0PRX5lJ>-{|sN~63 zrcJi#&2S<8Ap5cpV|KF?vk3c|0c&RR_Gaz!ZhLw#c4pEZEp=KMMJf*izZb zdHY%PjYq<(ZfsYFd}N6?eS z8aCTMnUh_AzvG?DmULHJWL$HRDSuGzfdu)_f7PWVk{`AC(mYJmZ z&6}>l9xt$|cYJ0*vvVK$&OEu5n&iG*S-EyKP4KMG+~TQCvS(7-uB~1{)xIYO?sn~> z(0ixcKDTZgeQ)DdPL4UmR5U7DXX|bxb+4cp4>F+T=F5~sj1JRh&(wVX{3FC&R9d4i z>OdL2`we`>4e4X#x}5JD4Cz3y^RYtHGlU)h_;PT|;9KV7`=k5u#sp0$&uJ#1CEF*ou4F>aq-zJ7WS9mFUjSz8cw76S}#;?u1DGQ8s^t#M>i>ZJG7H zh4(ifG^JVn{(Eds=(AnxRC7O#n@NpYn>Gn|YtWCxkX8G)ouWYH>59#U)?~A5jo)Ox zZFF?W%`rcmjhI;RoBa9jEotAh`pzy5Cx*TTdH~?d!7YP_1!oC<4_p{{BXBI>Bapiz z&qYp%{0X@R{BroP@b};w!H>Y4i}?w<9C{Br0^cXRd$>q;R*c8Oo3u2JU=ip;v>x26_PC%fT&!hXrQ|eh*w2cq4Eu;3JT`BhN)n zi2Mn;2K;jPu<-Zb8^MpjoQwGhx*U2BIs)G(ynDD$I48>wTimSHF5xJyR5jdPDaOq- znYCd5pSMhtmUln5f6G)l#@`&LH=a}GbYkVb&3oAKh5E}1tTkBd$m~_^`Dd6aeYDh9 zJjT{F7S8B7yoP3an4WKuelI~&q0Sh&sZrD^9a$!~DEf6+^G_ITDFX21Rb>AwEtlef>D znyqwCC3l}>=s*3}tNCAF13dun<=~dV!-BH}zXvW1yb(AS@Da$}k>?^OME->7RdY;}>m&9U(I#n}9|8rP0 zYb17)@tsjlBzV{93~SQ8U0g14PMvnOA5Gt&x|?3<-2N4PQlD~a4n6*^zKvq#I|j$4 ztR!L8zM{{38(D$%jnxvqyV>5*7vazRudDt|9_aa@ZOj%rs;Exeufuv=z1zCW)ac&H zXH^zcM)&09P2l@_N1Y^h)n&IFI?fDxKc#-Uxs{rSs-@cxtFuXIx_^$0^5y8Mq5p(l z4f-1B0e~+Dw+tQ@oF(`@aADw$z_EajK<$V{lp=EYPAq3l5{DBEzHB3t^II3be0hj{JkvD8Zm~J@i%;9o z-MN~@ZCv`kev08}&ugAc=k6_xyX!#O;v34$;~-CsV%zAs9Cr8~t>s3F>z>alpMI7y zPv&%4oiV1=-%jn-73NgCKkHk0`5A^jA$n@)KcQEHz6N>#;LE`+*Z+@)1!oC<4_p{{ zBXBI>Bapiz&qYp%{0X@R{BroP@b};w!H>Y4i}?w<9C{Br0^cXRd$>=zXG3 zh@KkyPw3U4uYn!__;PT|;9N2y)2I5*pz4*Xeqk_iTk?OpKt2rKpKoUX|HQ4Gq=`cI_1?SE3NmlwfeuWXl=G*dqZqC89%e3hWSUomHa)+&P(Xc;NNIS z=~7yG{R{QU`9rP2mRlD8^>fktM4u2nHT0j*t3h7_Jpl0K;FiI|g0lp_2QCb}5jYm` z5y;(<=OQOW{)AiuemQ(t_diXP4p8-Ax&M1G~C)V8jN}qmhUh^`g*_TZC+rGTIpT-)N6-|!5H_SX;UYoZ~ zf5}#;|GOY7-%L&^^L-lzCUP7lVp=s;z4@<4jeahApXd{!r-uF$dNt^4pa%fH9NaQ^ zSa6o$_rQgLHv-23J_5Nr@?7ME$e)mFz%Pdn3x5y35&Q_uxtO1z%c1w6Bk+B~yNCOP zb1JG@`9N?1ajq0@Tqku*khAqhO{4rTJvuXW!{fdYXC@UKc_H5M3R4xIFy-nt6H1-m zo*cuIK!1~Jzg(!f%(nQ-);KLY=-`bBzwEnZN&oDh7ca`vFBQ*q3^d?Ws%^ z>dB2G*#j*qN1x}iR88}}YnHn)hX+N^cUzpMSM8Qs1hkI5O80nim7M?f5}@yn9yR*8=zXG3h@KkyPw3U4 zuYn!__;PT|;9k3in(w6 zHS5JGzn@&lUJ*8%Tp=`m&ow4S#lK$HtfxQIiyQ4>qelB{S=o6x+hW-=VA0=YZ#T;znvpO9<7FNY5ce-FM9{0Pjsn4h4_q4%I8 z@O{F&hx>$cvgt^F`*o85$3%MjuG{mTv27~Ohl^-3XTs9Chg?qIrAo&P)6dJF(o~I8 zr&14mVXroHEKG`zWUA{oW+ird(n}fh&_)YG=2^(MqQif*PhHDQAEsU%Z0_qXL-WS#eZR=2H4Bt8r4sY3;3w7mT32BAwyR2e*5C^RVR zzkMv&^MU;l*h_%E`+q&^|Mhdx`$V4*JvH>7(5pdT13dun<=~dV!-BH}zXvW1yb(AS z@Da$}k>?^OME-3izXoN+y8$S*gvpjW_{VQ&uhv0%>!_D5hZ0s8LfQKO%W z-Y5Em=&7OqgkBB$8t4IlF9){_9u}M>_&so8;ElksfR8}#jyxARA@V2W8t}{E!@}Q# zZv;OAb1vp5=yK>i=m>nD@b2M0;ha*%{bo4`sd6+Y9v;I{7{f73(wCics+s1-j!)58 z{f@0`x!q@QIgS=wPIF&kRDQ-t_5#q_;%i zxXJwM4Ev3+HwXJzu;&B&Be0hMeRuSz(a%Ni6MaJT)X;xIuLgY$^Z>w@gIfj<3(gY! z9=I^@M&MY$M<91co{O9i`4e&t_~r0n;qSpWf**l77xNQzIrJWM1invr_i&$ZPEVI^ z55AixzzJWKa`~I$CvqP-_-bC#1n!o9?)&Oa?xZP}(z7@I^da4v;%z-)=`8U>-`tz~ z9N9QSXA$pTH>g3OW36cJ7Pg|&Pdv~%im-C3C~v(lT$*{jqb|4>|r^Rlqd!T(o0 zP3&(pwR#)DZu!}#p4^#4arKpl{u%^P(5?cRFLNVUcyvuu>f+e{_Jv^&74{opZw~gc zV9y8kM_?}j`tImaqo0f3C;EiwsiFUbUJd#h=mCH)2e%9!7MvycJ#b;*jli*hk3jB@ zJQq13@+agP@XO)D!ry~$1U~|EF6Jlba_Bwi2z;OL?%_V+oLq-%#CP|Ka0ErCX%-KQ zatQ^Z>ZwFI$y?KV-R8J#zqC~nSUM+yI ze-C@zurCaIsIcD%dvmal1$#cQKLUFR(050V8vR`KKG7#cPYwMi^lH%8Ko077d<1fLkh>$#MNWwP3Ax69e)<1=SonMJjo?RM&c*x$T@Jkm9f9u? z-aXtWoYRjcquW#}!TGpxs>O!7CEO=foAmlNbd#R7_&8yMSM+GV?cebuga7Tb#GXX# z-@{%v>>!JZH7kHB66^xe^;Mn4z5PxJ}VQ$zm=y&Cj2&;tNp4sIDd zEI3Q>d*H&r8-Zg1AA#H*c`kB7i2z~_ST+C0<<xO+{*h7W= zM%bH!eJt4Xf&CHKOMt#RderFWqW6hDA$n@)KcQEHz6N>#;LE`+gNFrY34RY;7|(hC%k*OPdKMg=My$nb5uAF z46R;&Z4~3`_5?{7T9z>EhsEAe?6bt4MC{+gUN`Iu!yYQ^H^SZ=>|?>65A2V?UIO&p z(W6E`7rjsP3DHwS{|UVs^fk}}0ACJn89XdFOYnQ(!oVB-$Fcm6k3jB@JQq13@+agP z@XO)D!ry~$1U~|EF6Jlba_Bwi2z;OL?%_V+oD^ISOZG^JaCUtW6d0SBP1xg${jk_O zihY*YlZgF$*z1OUVc0{3{YKcEgMBR6^MU;l*h_%EJ9^aU=c4zCJ|TK)=s%%XgT4lO z0N~5PErW*zX9<1}To`yGa4g^>kh>$#MNWwP3AqORa`>?D_uw1BkHDOZ`3brldJj4R z-zU6#xKB8zx*ezX&zduf)ArMM*41sAoVt^(VlySjaCT0Kkl)2$!j80g`&`PuMph;P zpS*3O8H@hWerRSG3*)&W*kWYBWEf9Q$SH4jZUe~=7l)9TUr^wSi1pl+T}!3jDv#v^ zd)~b$%o)$sT%R9dHNs^t)Guzk>T-vUiLMkcS9GVYn@t(sWB9pd!S5tS#+Fiy{?O4Q zhq_sdQ1bR+r@vG`-a$iXLk}rRu9&k(=N0XY`gvD(;bI|IVSV`=VHhv|XmXLGxg;_`kx8p9d-b?2Li*XVwtH2rhO}}r{Y}K3FZ@C_Woizyj@@IXD;F{TrpF)Z51zPsN3zIgv&zC(_b6&#OEI* zi$}jCMiyAnzV*2$0=O2eZ%&)$l*E(NmL8w3Ccl*e0Plm_2bqS1{)qHMXbv(U! z`{(1gC86x|_lh-dM*A5FZc<}K_PMZ!sWv-9rn)eklh@I--tb&)PMxYu+8^g<6f2^A zj%V;9*(&dyc1g;C4KCUdlUr>!`XBBiMe{ZsBiod4^Q)6CFb}VvdUZ;!lw(ypWnHHr zr)@%z&yxcJTvM~#oHW;dG7Gx-adl<_dpTb7!tt(abpC8e{*lI`G`?FZ(YBSJyZQT} zManP6aeIq(oLcSr+4~ve&)IG1A=C2sJwH?1X*ed=a87Fx*|+hco~^EaaVq ze_Q!px)S@OFxy3ugum;0hP_l`C&rzz|7&kZy)|v|2r2FFS&Op?y?0-Yt~HVNjGD-PdKNFxq*9qg|s!sJ{zT$$&MmF@)kETZtFNt1?a0>zlOWxg+;)Gwh z`r?0cYe~zC%=_C|69manP-G zEq9k=T!?O?673$EqwW~1L4s?}-qtr$ru7yfZO=C76Q4|{Zl>xEW*l*N`3%0bOw6*# zFKQ!){MU|CuxwgE&$s{8t1=N}db~j=YwplzAEe+X*f^OmLYirmprP$J}HYviphx>$cvbZvH{*EdQ zj?wiUT&q*x$bL?B0AD&E=gItOH~U3O=!wG7RhOQ>CfiRNm-FU)qV}%VI%Dr#_Ok8N z_Qu1v+2iSVuZ`|eU_EO6B_S8&xca~Jc2?_kP*%%g{^u5RI6rSZ3$5H=OxvXPMCQq7 z(Y#)XW}fsES~fOYI~659Tebh7QH&wX)!+!3US~tqk8&HvTpHa2C(GY4V&I&}@O{F&hx>$cy0KdGN2Gx^ zXV>C?>73WIxW`xf#GL5;z@}C?y#J#5Rf(m)Uv6st8)HVwZBm=`>|V9(QZ!)%QqHR{Uhwl=C!Y#eL6Ok`*^E-=G11+DjbwZQZ%D(HQArS`?g)8GY&#vr|Nc@^W`u`TlI}YVi z(Ttg5Gqd+lypGAXEHN9BdgD_XIAY3TMp6q;jdGn=+QQ~}pFT_@a}V@KztE$JqUS~5 z>xz)RSJ*R?2Pf$8JGby`**%O~xL9~voG$x)cZtaPi&Hok7wZ~&SIBTuO1K|fvsZGD z->rlB zHucXU|2{q&-pv^;5w+0uPc&Ep~=@|fLu zL}>9e#__BgIGO#FotM7z>b`X))#a`Eyv*?VXe&Z8ufv0}#X0G#N|xWH``^;;ys^7a z0ZAv_AC%Tos2KV#q6-xUzy+&Gr5{^^GxH_be1^);RO9Q(O(<+8Cq2)Z144>|(h zC%k*OPdKN3IsS}Wby}Q>X3Ya{J*IH%D+=bkPPj>t{`CgeMKfvKtAat{CvGgaHSTWo z&@f9%++{u?K!Q8x@8g5_r_SWO_?|1gLN%R5y>sXsG|go}T}KqRhnX;Ag(OS=iC$t^9aFB9CtWzE?BNDr{ zM}yB(h3JKJ)3K7Hlr_Ms_3P&zrrMXS&HHj2>-w@|SAtkQ6=yqa>UVTy>VNN>lysLe z6~BHSLDPE1yT!HXK%pDe$(`A_)Z#u}p7hTls3VI$tGkr8KJ%ttE5+X-lX971MgE+D zocY`mkBF@w0~d1(*0wh;XRlOlpd~i=B0BJFK5otE&kbCS8)GCHJ|t(nM?Ym7oBO; z>Kv);&#vH0rq-^wOp8mU%$9{)v-(S@eaE`afyW_^>ggAQ?F8(#N1+yaK1b2VYSJT!O4c~%6suCDx-Y*YhiaOGe0%P zcdgi1H03C(E)}R8uGqznC7(8N($!;Eww^NYP&>zp8upaa3@f@^x4AfjyN^DukFr|i z@|dE3@87XfFN>YhoXfW=DvpUtHTS7*AKh>C_xHPt$42)aEjw^8b4MiYnm%aAWD4oS zGR-B%*RE3@w>9B)JeP@CoX@y?UXokwqI>8-)dH@L(XPG0Q7gG8+y^G!NfF^}da*(H zA#XQ(`F3dW!|(hC%k*O zPdKO8Tm7AzE*#)QN;wOteO2KkXu9)Cn=a+ZO-!6#Hp&%`M0_)wuGzpokKZ<1eu6s{ zp3Kv;+O?CrF1S4Ager%7(l1xV#CieO`o>Q-93#YSR|{kiE{cZ7+D%Mt?6g|?&o_e43SrC7Z9Wy%mGFyK< zcg9cs+Y0V|G|MLMQq`CqcF6Cxbz0i!f2<;2a)WE1($P(uE*wm3VOITL)+E$S=BnS) zJGZ@{j}|599lox3k3@5Wj#$fDv2~Fz{Ec3^u=R^B)qWDRrAZqL_k1i5WLhVmnP{py zl8xG*=TZhvRNvipc{nxDH}&pF98P*X z>WA<~iL5hZdopK)*?E7R;RnKA3xbH;yDl(vc6*%e2>CG#%nZxU%Qslo2SG?fpX zEdG*at-sLd9^8+su5Eo1Lm!X#`!AlH!`!Wxt%=AO#})GLb2ha8LehC}Uc7!%$x6nk zr@p-Yg`9)*cb*=SZj!T<$b>95$?(t-lgAPC zP}MwmtZNv-H-aC5IT!O2bUE}MbOgRnc=vFha88jrYfp#`E#zpM_HR5j<2Ef$Jboua zHlHn-di|^Qm?Zk!zx&?w(H^SINLy^yG6;8P-*&M981b1sdXjkMq1NiASAmi4api26cr{-{24 zvQK7OhB2@2W$|#;``7%CxXPt%{pO4P#mmWF)Mj(o>>jpL++VoyaRo^_>hT1>%%?9} zGX1LE<2V784(31P3)twNPxszTdVz z{RXAvL=GOX^<`b!ud+OMxUtd+8opk?pRq@N1*gLup3*-3pm8tsYAJ6O|NHvLJ~}2` z=$$GM!zL&1-KkX5!wxK;d3W>a-|S3cP5g}I10)`+*yN(}jg|!~Ib~P zQSPJ#xeeF;vA&&qvrFzYu`|wta-~!LGCRMg*K`~wbDqyD-fHl^ij6&f%(teYn;xCy zww#{({y!fU{vLcI_z{?MF+V|K-WYp|^U)s};9BXF_pT@=GrHG2@Kg zb>Fw1rGlYB-Vh}RCLJbNqt)Tf&Ki1udnkwBv5!HFFB%Wao! z?a5`O^_EfELUk0oOk&H8;A;9ebgTHuo<=r)+{0Y|K5?#4r0r#IV>yoEgTNetd&{|w zN6s|AQGL#S8wx50%&TH@LU%7nhIEqfc*jp!N6xd$XTItDo_mgFEd23dzJwLK-l00@ zdB|RRAZC4Y{evycn{8=dsAxo*?D*l+zime&dF>T^myfdwqmuTN@`EgPMoz_q=zpYZ zC)+i+y`Me0l=eNR<|iAYFnPU7#22z$#9KR8p@D@yNOvws>1Vp^LvPN-vD}Kjs&&za zUeh!2zWK*vzf;v%jib*UA5ur1oBi2$U+IaC!83J%f2>@6$&Zuv8C0Iwpx*Mlp7!Mx zIu>|;W?=`UJdb;KvdPt3InM<9SWbbyvfk>!|8foZq-|C_S~i*syl;8xT@$~SGs?0m&0M^T`~1R#8LPgnyZg={y38#gkOU8JMjQCUGpCg{` zFW*pLX04UM&!_B1baY9>$yc^1ntL{kl@Uf(}=h8ua z8!nJdWKr|jtEX5@(8uO_olyE@-4`sa;mUOU59}WdcV#P&yBVxh_oRti&P@2m*+Eek zY|9^>wPtH1n;wPoxzdlNWlvkDtFZWh{jTHqZ74SB^iApFJ4`1{;GO=&a>{l)At7@( znMoTsZS!7~Lq>CM_-*?h%_Mp*3&#zGGvtKGpO9<7FNY5ce-FM9{0Pjsn4h4_q4%I8 z@O{F&hx>$cDofA!w8MA>$4O~*@59^`T({>}-sE%VaCL8e`r3R~jQjhN*#_;=|0`X* zi{6u%9 zJa%92m|(`uDynTyZK_EtV+Z(NBv$-RC;$4>Yv#HnQxea@BmKs&sjm2O?zhvY*z)3< zuoZL5C?}`RTf0_{y~~l`adXva`lamrLM!huSv7RKmF)B<(F2>3L|-^DcX_!JM_tac zec8R0i6Y))W5G8Zxc4BldG+P4mX;ke8B*N7JHdf&r6|hV|FUJi)e)Pde%cVvYtwst z(rK*w%=^O%3uA~as=hq^?p3Cje6K6*k}s(jM!c}E_F*=D@AFn&o<_)XkrN_+LaqV7 z96l`kJ@`iOBQWP;eu6HC-h+<7_X+PF?i0@GgWbZ=+@v`@Q|Io!Ly{hxjrt5xf+c&8J?{(YOQdRQWwZ9USPQ`Gm2 z3C8lM^{ptS`(Z=IOV2(avGqI}tA%&czb#oNdq(H2z+I=u9$jrtHCx6N+UX{+FpGqa zLn^_fYWZQCs!1>vRUPT&ACtfiI@l@gIdG1o!=+S27x~b!siubVvB50%DsSUKz=S-!}N zi~BEkN1ltE5cv~w4fy5oVd3w=H-aC5IT!O2bUE}MbOgRnc=vFha85lhGlrMnm*==? z-u0kEE$t$!K1H+jI)&TkX#~=e>_4if{@h?fYjyQB#N$}Y zjfu-FFTY?M`5^m@1rMlwQH5S|Y$RzeX_%cJ>q?HR-rh={YR)d1#oHLy`mjAa_`UY< zI5V{?inHb`*wFCyvSyVTr%CmCr_X5RpY5C_ySi7%oYoiC=Xd;br!QCePRuAsrt?uY z(IJ_;>GahZX7|-SSUGP@R>Gqgnk8nF8tieKJ&o=(Py7_mmM@g(t^FH97Pjj9f_;MM z+R!cA9hZF=_z2|g$a9esB7Z`z0lyqREc`wAM(`sr=VE?>E{EQOj==W`?;h?G&S{?J zgrXe>6gek`W*dG~e@-8LBMYbY6tG#tjzO=hp7{YQ1g@88_(KeSj&;fIGt-Cm@^jBZWl z^QKAf>Y}#z9AiRCH!GyAT$uj7i*}k8zI1zojni}g<1Dl=Vr}ZWgUsY*?>n<0Puk=j zUN*EVh_(Jnce`e7OW#{}tdLw5MY(0tSFcho`{kr{yT1^`<^`W_4@%E z63F@Raz%dBe;f<=2;}a_bCDAwe?qPSzZ^a+{5|+a@FOtiVt#@yhu(vZ!1oF79_|y) zDOEK7cHAE|&TOaa&ADwGxnc8Nt0%iia#)N+W`M$Ls^|ILCahP>rr%evTR*9hO$y?_ zxo$^1vk&Ce3(F3q*Y|tA?yd@9$8!YJ+J>$&uLTQor9(W~p=0f(pNbvXpu5@5b$aP+ z@Y|3euR$0s3#k6Y?{%4uZn$MMWk~~-`3sG^T3p5=Z^}AasXb*&wrvblXnf7|cJ5x< zwXd3Xl$?Kc;!-7To_(9HZtq~?T0SLfm)4O^Fs+M~s3(^L4Fye+_bGiXe}wDUQg%^b zig)GvCuGh9p9(6C@+KSCze!)hsPk~;qLEc~>_v&0=7pR`wB~7K-$+#^+p=Ybdyw)2 z2Hpr93-}1+?#OeI6C!^?t^vOsJ}mq__(t#}Fy~@^f-Z;NgO0%W3GW{66V9nMx-?&E zfjXyW`_zMqb5?P?r{tthxYozkr^f!B`TjY}IQR8c-poegAEVpoH|#@^KWwiD-#f!> z4-bEE^FKk4y&jbO^>L+U`{F6m%#Josx9`=jKSV96ms1Nz0;$_0J5_4jdG=Gr)JQ@3 z6bsVwoF^HZNjpz#sXnO6Avvi#F1=Pxmdl8h0m*9cJSRkrn7YI#+`l7S=>?i8B>3^knGw{@hqMw zQreX3)-=wMcCs9I9#brBPa|Jrsyr=Gxtfj0uj0zLw{JMvuQ zgvg(eYrrpu4-0<}z7hNg%(3an%CutQ%(}#O4~o!ha!RZ?!S=3K9$%2C$KD+r2yi^v$kb;{e`2ee#T;9n zY5a_PLOHR%w^NtqvA#QjU$b6~{>Xddt{X$6eXZz~cYj`q?IOhs&&5})s%MgWzZe($rEmaxaqRUBo6AFz+bJksYSn<)Qs z=$JO2Bsx{F4}75%&2*RPgnnK@-k~# z6tdBseb?l@oBJ!0sqr6>e^Tee?iuJ8&bZ?>`aV67sITxR*Y?26(O2SVwvEx=pj!c~ zB|!h!uf*Wd{X!3AjxNihW6NKi@Yf1pp&lk$u|s-A26i4a_27jv9pk%nAWD}@apFLPXqB{!B&1-WQpAxN~zuS|AQAC2hoE=;Krgw74DnMEU;T;ke)=Ch(U;klO;N=Ed4 z2po1Gt0Kue*DmyjN=}4{z|JmIrFJ8#CNS zeb(KlAC3>ARj(wY&GW4pZ^D@9qS61uwoaIO@RocW3%ZkL?!|kFPI_$Jqo@`_>E0g` zf@-d@+~d9;pN=0Ro@XasXk<82)DpwG@e`a`?cSg!&w}f;Hss>|Oyx*+_to603a1#R z9H1uGVaBD0JzLDmPPs8lxh?t26Vu2jVf+M51rItuy+t|BA)bL-1`i9)68s*xFz`m; zSinahcSoL!oDlgFat-+9@L}Qa!8d{*fjJlR6LdNB9&`l0Pk8rmpKwm;UW+s&>*sSK zzBHFF+cS~7JlZez_gFrT^_{3y`ese^N6lmP$Fw(Wv1GNx>-sM=IC;yd9R>|_wr$2c z?$JK>MLuMOWX>B}D4Dai@RJ5Rzb01gldJ{{`{w)RN}(3XKD0Z~UbK_F0w&y@+keilp%pH z2e%9!7MvycJ#b;*jli*hk3jB@JQq13@+agP@XO)D!ry~$1U~|EF6Jlba_Bwi2z;OL z?%_V+och#xjXE6VIM!wj(}mW_aCcUzZa&)3O5?Xz91mXelkFZhH2(T(Eaz>|ktd7a zHL|;T;rrgr|3FD{CWA+lKC?RCqe1$cH`09-VUA7lMtY^RBPqT`mG$gDaVz+!8jX>f zYpywWEpzJotJ|JEmQ0S^-&oodhNsv^b0|0C+WJlu!iDo<#I~Mq88~gHmyseex8v3R+=JM|-eL`OYJpl0K;FiI|g0lp_2QCb}5jYm` z5y;(<=OQOW{)AiuemQ(t_@jS&;RQc?Nogxd0K65PJ!ac`0wla%YrB-H1tSnP|y{BOdcvciroz6|#rO;eO}a3zbf+{-U#;*}N?j z1PJO>qdm;P7@4 z%b%3F+aqrwQ!hMk=c7N3mBgIy>@3?&Z)V)sSo&rgdpDH$F=gTglC{g~zq^8C+*9|z zOH7uNJTG{y-6w5A{|UVs^fk}}0ACJn89XdFOYnQ(!oVAWV*ww5+#Pu?azf-!$Ti@X z!-s{x2j2*O1m;}KPtfJid(aVhpK$NtJmEV98FdvbcM|79Y%go3#dWjczhv_tx&pkx zEqzb@7uT_~D+3H;%q6LQ(Nz85;s2@o9y-OV{#)tKXK&PFmyaWj)DK5;Pl~d^q6vx< zrzuhJmALfq8S_|Ph>3H4j0odDd&EfKuoxTv;pr5oQPM2PPBom|r?D0{4}QjtVmn$> zM(!7$Pt%l>Wd-zCv2P;p4-MYbr+hi7`qOMC6Iy-FVd8mts!npY`gl!~&{ISI3B4Nh zHP8b9Uk+{=JS;d%@O$9Gz#D;M0Uv?f9eFNtLgY`#HQ<-ShlRff-w1vL=3LBA(B;s3 z&=Gi_aPQ$f;X4%`@=xhdnaPD7sh{m$IGH!wEkxkXX=o_7SoZ zS$xecdmno;JHD->YZrZ5>O4Yu!&X9{5Ir^YpU|t3|6gANJpl0K;FiI|g0lp_2QCb} z5jYm`5y;(<=OQOW{)AiuemQ(t_aajJ^lRbN5@EW48=U82&xbZyxTm7gFZ7NX;SfSEag=5*{admQdToy{0;ricIi+ zocoNv#q6tFtAUftN;0DFdI7mo`(w@=J0!;FBX(%mw-S}T~NTg= zGHCZQl6$p>33!>zpSFBI%@3MgQ{!yOPB_cxboi|!GNIgL8aKh*km%wf+1 z{Pf-_%piNKyZ+lxO`#{t?8ce9E@0Q4PsMMZs!QHJwx$iT_e-;SKP5{F&7cb1ai7M| z(xVvrPVptS@|3KT+qJA+fp(8Ces_4r-r?Wtdlc0mzKhgU$E!|KGhuhLMqgbUv6NOS zt^d;1x{P@#K5f&>(q`!AqW6hDA$n@)KcQEHz6N>#;LE`+gNFrY34RY;7|(x6Yf2nCw!*{?U<3=X9+H4gq6|p z_Y*j&fUx!h$&+|*@Adum9$Ue>&nHj$vhW2{zVU3mbXpySKe^;5Ngqhv_UQJ+HGe7Q zeucAeXFvOH72b5{#CfJ2ekWFRhBc|oB=M@B86;=E%xAlWJL!kmST5a{%xa$&H*Onb zlA2-0w9pTTgYq4Ceu6rZu-caq~Y~nkSrec;7|Vs5Mt} zK%|(7oy)Bi+nK>+L!0|nx*TWib^3Aqi}lDTLtnS1_&i;$H}K7#7EF&eL~l6goI+#m zH@w==kjM=4{du$MtXTU+1A+aOmr1{FZIW})DTcl~derFWqW6hDA$n@)KcQEHz6N># z;LE`+gNFrY34RY;7|(x z6Yf2nCw!;3eo-yLgKC_*tw-;5-V5fJu<&ognjg&dVz;Sr{ZDE(AJ=>Tb_?k|+_<$S z;s?zVY}lfA_6I9W&ibbv9L-)|Gx5J2Xvf;eyIeCfiKm=bPA-O5eJSd(=dRSd8T8JQ z|4JdRi0!M}ST9!$5t+`ixy6&#a`Sm;qhv(IEq zIN_Jd#&DJcgAN|FtD@@C9e!~5cVpY5ocA4P=7n8l*(R6S zl8M`o3m#9Qs3)`gCjKq zSg_{<`y;TI0DX7#sL{_w?-PAO^wiLQLazpW4fFuOmxEgd4-3u`{2sV4@J8TRz(*i= zN1ltE5cv~w4fy5oVd3w=H-aC5IT!O2bUE}MbOhcf+3tH}lI*f-bt z>PX(uv+bXl>KslqX6x=}3!<5%g3FH5tzk^EjZbYmS48c`O9aPGx3!`>Y1W5J#e?2o`+0`%R{qeed$y-)NB z(NjbJ3B4NhHP8b9Uk+{=JS;d%@O$9Gz#D;M0Uv?f9eFNtLgY`#HQ<-ShlRff-w1vL z=3LBA(B;s3&=Gi_aPQ$f;XCaMdN;X&Uxd52b^q=0qI|r`O;0T^4gb!%8sFv7VIw1` zamQc9uX-_T+3np2NfWTQI}Kvlymc3)>d(ZnI{Bz& zQlTyk`;D+S2m4sC=L7pAu$KURcl4;y&qePOeM0oq(0@X&27L|m0Kk`nTLupc&Jz3{ zxG?ZW;8?&%Aa_Tei<}Vo6LJmsx26_PC%fT&!hXrQ|eh*w2cq4Eu;3JT`BhN)ni2Mn;2K;jPu<-Zb8^Mpj zoQwGhx*U2BIs)$#?me6*e5cCDxCNW?=W?NTnc6c?NOSs^RRVucR^dkK#2U}l`OPFv z-^5>W_{m}q{VdBkz|YIJSs-%l=o^a3o_4;ftd=D9YFu>~{P5qtFzlhiek1J7!9Eu3 z`M~}N>?J_o9X)FFbJ6=mpAbDY^qwUD5_vi9(U*p8({!3Mq{(VY`z=t6=zNuNjaMgF_w(Y4|Kt4YYd)=@v z411`s-w1niu#W|MKCnLmdkN5YM~@o)T=YKCCqz#T{U`Kl(APi@0DL*PW$>`zEWz)A z3j=Qijs<)Ka(Cpp$O(}@A=iLk4j&f&9(*JC5twr^KS7s6??Ff4eZswm^Mvm-ZIN(M zT(uf!-11T)tGJCeEFGb4I@~in^7)6>?TXJ-Am^~qGO~<`?R7u&b4fNE*twy#T;?gm z{ypqo_`$V4*JvH>7(5pdT13dun<=~dV z!-BH}zXvW1yb(AS@Da$}k>?^OME-oLSewo6ZkVfHd^nk~ zClUMiu-6Uy!mx)5`;D+S2m4sC=L7pAu$KURcl4;y&qePOeM0oq(0@X&27L|m0Kk`n zTLupc&Jz3{xG?ZW;8?&%Aa_Tei<}Vo6LJmsBapiz&qYp%{0X@R{BroP@b};w!H>Y4i}?w<9C{Br0`C*Pe8J#<)bIX6J)Ef#`ldI><@MG!z^!U}g#I);A{@Xi>eU{jhi2Zxm>xO+{*h7W= zM%bH!eJt4Xf&CHKOMt#RderFWqW6hDA$n@)KcQEHz6N>#;LE`+gNFrY34RY;7|(x6Yf2nCw!;u^6I!BC#G-; zcl+HJ`O5GD){lEQEw!6rKP>i+VxJ}UBx3&__PSwT81_(MzY+H4U>^(id|-bB_7b4) zjvh7ox&QS(|JNr(PYwMi^lH%8Ko077d<1fL77d<1fLeb>Q88g1`xk?a4KKIvb-rc-&3);|51C@^X)FZIb2p7$>S&LZzv zQ~p3J<7}@9_x@Dlb?9FFYOd15%s>3Kk?j-b)@S`VX1g<(4c5xm*bn#r@1L4{_eXd; z$-VbZm~QUR9^Kd6zWq`LTk`GO+LqTz6kynP=BVvCc51Ck%jlkK>`t=%Ej;&c4FZ0Fu7 zJcFXv!`u!jE@1R(8PU+6Gbe#!a#h`vHaX8tFt{2>eMQDW@+wY*@8rXte15LIo@+j!A@i~AI!kP= z`&+OohrFxLrwa|`l1GSs-cL_o=J3pY?K<^e6y2jZxJBm=<%v!WZ+Z8WK92J%mi2qd zG!4w7cYmy-g@IDqdb2OF#=7^JFD9L)*~%qTGopN1!rCW;WmZY7vf_kz;iC}Rm=g4| zkpUz z3nV7DOLFr+%O1*h(dKQ-UN7}oK#W(}DgE@u`eLGu($fQrRd@~~JVzO-D{*JG+&J~} zr5Nwzje&24D`xPPCXJNva+}JtE-dN$l;+2lk0+z+oFi$=J^R@s>q4jYCiAHWC9
&Sba+v#Xt1ZNc06X}iSJz{#H~X>+jGsXF~CN*(#hs_4Oc zmVB?^OT*l+wDRp$H5dH~QriD+1sDC29l5D$^~AP?CHHOVvmD?tr4M~~lw|W+n1a|n z$0@&=-w}s(rk?ebzIecG>+t?qyAb8I7kpc&M_|nM8k6CD)lEld*{`pse+5tDYSP}( ztkHw#cgDA{i0cQ&oO`6kx(t7Ro#7lo4Nnw1@>gGB2h$#O-@0;{xyc(W8;bRy?&|c} zxph~m!IVGzqi#9dJKOn7jCe46yycF^hv8nIk6mxWwZvO=BuRL!_NHJOH=?L@-$gIt zo^E{BGQ3Aj=INuI0g7Tg|A6Ig%7-L*S*IjVA35I4#M>U~j_gq4uGl>5i0D!0#UIMJ z^JBCE&tQpA|E}dzc|pxm>&Goz$SYod-&UjDpLsfr%G;cBfKp}*e*NZY#_S$i-+El{ zO_wvb&Xbl3p~SA+?!ID2n31)F=8op8Y}%-YZEr8SXutCwwQ5m=P1w zob)+|m5mxr$>Q9@sM48Xb|0za`s|*?2i~(e$|<2~b1SK=!Tt3&ky56iEHq!vNR4;u z_0uF@Qz@>w!RD7iVj5k0u`2pgf1P$Tm4?w>CClrb#eESDR=q9>B3b3?CPm|zn--vvTa^JML`pN6>G-JE z@Ae62;Y{<^GHr93?{hhO{qMO{yUPQCN|GAbhl*zC!^LL@IS*Y zEM47qUC@?VGD{Syw7sckt^JandE&gxs!QA&rC!=#?3;PGaWdCGXWUTI5fPs1qR^hm z+J9_&froWl#}9II*H~V0ei2Xf!KKqSlVwK3} zb>{`mG%3+o&{W7OKK1Q2y#AirHlJ(Rt>jN9><{<1TK{0CUu_3YPWVp0S`^(|MrTq} z=Unfio9~%^eLzjor5yHYkG0S5xnZPP960v&`7#jDwp24~`e-#;t z71Gb@ms1ZfxWXp-j%<4rm`*cS?`b+T@*xu&xtsT9c&~HXb!YC!Kil*Qsl7Hy0$oRS)BO9d@kzLynw(bR;!vu;#)(sy&SXR#*9EXcE`dQSOzHon!y zEPJjQz1vbadh(urlK0x;tWzM%bBR~0Xq!BP7p73q6T=%~XQw43SFcp(y+ThH+v{a@0#1AO1+h1Rg_QFX#&^jc|!Zt#y^fiEchTtdyI_J_=9aCLA}MHag; z!Ya=D+e^AEHfQ(at~(^gt5UsJb(EfkhfVutmPa*&+3mJRoV#Q7_D{?885acSR!HfOwP#`ux{w2upOd!j8L zWG$P)d*>x`y<0?^_oM3$XXeyHeubh3*#{M_w{t__fbm@3?{cTFcPb@#+OE;JYA1^D zrnzi-^l7^^4|)$e0`C*|6Kmn!%k(8t=yQ{zQKtq<_*Bn#da|)Hik2JuyyjK}+IDr2#6+Ts9|LQkt_q z{7Xm2V?3|1Ma1=I&V6R}*1)_Xe1Nt}eal^TXB4+e+~aprLpwXKTEEy#On}#&ESPKb!5!5%w)1V@JIgk#h0Wm^l^t>?pgA_*YD*x`36-emad<3y^ma; z8|sIO-($gitNLZuc9Nh<%mk^lbT+18L&z*wFXmfw=<=WLkE~`*Sl0%=DmL#=ui0$r|9cJ#>~57&39cZU{7~kPIM`+BJl>deS8;km{d%d# z1@CUBa|!l;^XqOC^Sr3)vi2d1xNySc-tOW3p)K8af8ETd4ZmU?gj+7N4E{W2!`0>N zhNIZC9y><2Bx;>s?vG+Q+vPQrx8J0JeOj;TgmUTYI=M@~U!NrCa_Bwi2)s|Y_i&!@ zoy7K;L33qIQJP4sfq|`sD<0bv+O>MrrbdWqxeMk(WztmgPzuorc5c~ckiC@g3os~c8KRsj0WS-J2 zLmI1mlP$PwymYfb4b%5|S2HGH6i@K(YM1NdhS<+Po*U&-KC?Z`kB_u$&t)t0-<~lQ z-pkzF&pz60+(5iZ7n)~%t)^A65g`Mm1$1`S()8f+hopAgRWWmVKKrzO{wcd@g^WVN zXY961W=t!q{dm|N>d#)4yDB`I=~@NLBm@|haU(05^|&}hsK1O1 z4<0k$WV(l@ugF|HLG2##KUu;;-`%503*J=v4DSU=R(9ZNre~7jvzGVgC0bZVK&XXr z$8#!QJnOexnImC-f-Z;NgO0%agnJL?3EzqI4|;A(o5i^{DHxTh@^cqD24BQXmE*~a zP0GB@FT&%VYl(#Y8ls05WvQuAQoPv2=~8L#^1QF>U%S{?)-$cy;`e;ZdKnr0i2>BKc}sq&8Zg55q(nd^h{ zQF@EsGq%VmJXV~K+j2~P?fH@VSzi1zYYj z!+4b>_dNkjRy6uW?aX@=bJ64Qz0RA=F+a2Ys9HFi_xo(`s$*BlXGB2v#ebVv6+3@0 z`Ti9)x?cTP&n{Q8iMakjbt7yJq@kW71WD2FYrDJh!%Hjl>qb$_gK!92fY`W%o9B) z^D}&X4XbUhNSdhjkLCHN{$7$K$?N&PH8TFsFR~EqowK;9jrH{_Ybu)X@ygxZ`$b|$ z4eu8X9R2LxKRS1As7yBC1xa7Hvck=h$99g~X!2YA1{*!?onTAzONv{zeNE7v+iY~i zra`GyRdgm~TK0@{A876MYZIJOF4N+v`nMX@(pa{D-=akJn)OUF?aWAOWAUbeUFBCx znTteF&SCEoraO7y=N9D{@|pO4#K@9N`r;(o=r}ru?AM>3F1nS+Ha$8QoHi$e_>b=p zd-%PKX%x0Z6+QEzxwEYfx_idcdEH%81IK=5S~J#hx@^j0R*@qH@$i%TD~ zopnoedb#(+PTMY)xm`e~_(xsLl;z{?T~`$<@vonP#@3jKR_6cbM_|sy`~+PNy$2nE z_X+nN&J(^<^!)O<66zZ{vl$D8R=LV?m40y?Z3@tZUuf!)jxB;{&&SEr}B1qVi^(3;*_$%+ric*V1=3x-V{aRG295 z$Da~FZa2_(muK=bb(1JtXq!~66OX;{>TKNX`I8#VV&2(Hd}9;E0)wL#XEF)n zW77}p6W|2oPi5r}?{eQ^q_}-U#C^7FM?>}7nZ0c8eDnABb-vTsG3_VaeiZ)a8^Mpj zoQwGhx*U2BIs)$#?me6*e5c+AU)J1gSje6GWGc5zu$e-2H#W@K{*$fR;ia&6-fQ;s zZi&YOhi|NP|Haj+L0{O6?5T<`qGWigk^z=Y#ldv#K*5t<-QBDpg&ksK+1y4CiyhIKicTPg-U8(wr7E+Pbo{We=p~`Zungo zvr?mUMG>?1_;*XFq>z^Td%X)7{fe3%i+++gc%SC4>*f0p%%gcWH5H`7$J0*P*<1O# zhs9h~6rAi-KK%Oul1eS>e3|Ux^DB~a9O#bh{%+NAF7)tUkAL6>AIdqhQpw3WoFp&1 zn>Rna#C}X}wA2f`L3iprmhIf<$v&8yX)5R^v0t zXq|@8yGH{@1)d|IZjH2yLRpu-wSI6o_TAY%tExWJMUF`GxL=9tmuf}0 z#x=I<_MMo;jnazwwCrUCvwtC@tgK~5V+J(7H|IIiA$|Swxp@_|yH4az=eBg(Yiy8d z;1kGZ9z0g!YLrN=!I|+BobssjfnMJ8Nl6raae4BU71``)+KV$4GtROUrB~$!!~V(6 z-KOURRLj_tRVhz7+c4(8CsjY7;xbj$Y@PDF;S?>{zTvF4PZ(2hSR4KH=BeS|#rqgs zn&(0BvuvJ=S01Bw!L52XqrzFvk0|HWLGF}(#4{wOCWWO}lJ*ne&U!J$R_Flij!~(<^X!fP#lFB$s);biP3C zs7CW!#%Hr%;Ggv%QyW(<-JP5**2|$c7Eqh zGE4S8lJSaJw^sP$i0g(6xl=1MzeP`%-~>!|%wM@&p2w9oBs*)(;H8@O z-1+^uj>#2+q(!iD!;|d)`C*CeQ#$XuoTpKH zld1zP)0pYL$&*F=A5pVgsab#Gc_#F!Kb?a7=Y?VTW>$xlbKY z$E?hp96Gx)*hIPJFZ`0t-KG|1Xd39xM1oH@UoS_n& z#Jk z%?TSz_*w~@z;x^@I)8&yJGWWCf7K@x~-ZhiGp486(+viNN zTGI5}_4{n@Hwm}#d%Rih8?CrGrxU=JqqyN*H}`R9Af(Mei0`K1ix9 zqvW2Dsk7{An8-c9g#332+grfY|DbttU&$(n`3?Vm7@}mkUt^UfL{(D7XBW5 zBlr=Rb1^?bmqYJCN8o+Jy@&IJ@8o!9(kby+U9LF)vw`fSn`EVMD?rV3GS6xKiRS*i zJ9Kqv@VQHOn%Li~4@Qa?i}N1D3cY%0xR5t}c7wQ{%53g*{G5MT?a!Fu!8`FDdFAZm zCqAzwhq9@v((2?+`#f52bn}3xQz^5Z5O6TKy@Khh+>h!MYNP_YtSwWtUb7c#ISHTB zd_0?frVVSq{~Z3koTv^b6*-=UME9daznQ$k<~=7?ihW|G!C$I!cO{T>E|=}j-%97B zvqonguV)sgE#GVHxzAjb(*lJn+UW7GOdXefP3-5Q=?1UQoo6rgi;SOfxvbP>p9_;s zA<<&JttLB%Cx=b-Th>2~r=SbMjw2Q{P^fH~jLv-?Q>eM!ru-#?I=9yQCl|e<)5?0< zC#Jk$zJ9KG&%Lh@azf-!$Ti@X!-s{x2j2*O1m;}KPtfJid(aVhpK$NtJmEXFj~rv` zzsQ7>oqYTv|Fe-?%c%V~tgdyjzxSiF0)xePwPCkzJ!qBW`hWLocK=c4_H4Di6Ro(6 z=j~=Qa`C(lDl_vIT~Hjy)&y0}opzPc!9&~I#@My7^JceQuWT(N=5cG@eVcqX&UWGN z9fF@o-1O(1$gAVHDc#nWJ%`WpvewR3KZo}Zo%`q$w{52aZ_d00E^@LQdH+=4B zqXB1+S?}gVQJLiqqaB7dtlsRkqu2N{+SzPU?Xy0P)g&#O7w1^PW<3;4a7(dZil3Gb zhP}B(59`>evww5x^g{Kuf9r2h<@aAzzwck9th7lAM!$ksl#TaNmyidnXYKX4P5UEh zOV7n4uRL8?!P*C}ZNtMTwEyXsKQ2-9p~)kyJg1P5=OQQkFMs-9t^vOsJ}mq__(t#} zFy~@^f-Z;NgO0%agnJL?3E%1V(1IsdczWEKvG2bY_x&a9tfCnI1bN<=32Qbr^LLZ2 zgRtl606xwyU`2U9`@>fMUhXWz6)`!N$Qm7sB+?Bp6Iz*B#->|ZO(@!y%DfCanrd`I zhxaMCF`GpXXvsab)n@s3$bRDcoIT>zY`34^~ z|Fcdd?TXc&Zu0qbqxsL^^tqSV?k7{dp9?=>I^Pc898kMUH!NQ?RvINS!uYx7{Ml6O>t){Oay#E+<7Z&~j}J{m_@-*zR)LDXMg`SLZBg#PKApzuaL$ zmg&u6op(v0ux06c@tb5{uwLIx_A9GTRBlZVtE0P(4MAClOW5&i?gM4V2FU;A#rd}XC9)eeKGPqWjOSV+`!y!a|4ds21p>$Ob+fi`mGLcihM4x2ELF>+^)w;p zcXP93BPr)xl5x7%%i3FRI$Fr0FbfidVdQ zR7wxi9f+w_Il<4#JyU26Sl&umqsrr_Z}Xv$@_Hes%3-~mrz3N~KaWlKzxGA+WDMy= z<(yM>j-ix$12zNxKIA9tR5xVvfF}OrJ)C&7lo`j(?En1Y5xt#dF7b834=TDDs{L5_ zI~#X(;g8O=H+1`QU*z6--zZ7`xAB^?OnNLnJJuk$npqn=O+5KWh%3L~CjG;%j4oJj zHgLM~h22dqJ7|CZ2b;pRI9YypKx?<}kn=k}K-Z0gyKg8LGr3cjrmT6IL5|b6U$oV9 zWD|OlTf$!E(EQxj{|4Pd>8r|uqwmdQ*vbjr=U1s`kl@d=B5ypy*wKKjip9l21dauK z1af!exyT8TKOxtEUk)D@{vLcI_z{?MF+V|e+vk^h2K zW3$NAw@2smxgiOCh(7QIp+MQdoRijbD!DS^{qbjs<)Ka(Cpp z$O(}@A=iLk4j&f&9(*JC5twr^KS7s6??Ff4eZswm^Mvm-ZvQHk)wek=`>#cP()HGihKJV>0s_&)XZjuh>k zJH!M}HYSp$Jg;0+a%5GG60av+^W&|;T1s(GE$hqpNICQRmpgoULdQLXEYHW^BXQjg zk+1cW=*98P=6m~#m|)X^-K`zB=<>UR>vE*d(!M6!?KfsTVnbCgC;s%jPp8a;R^+!{ zp_WF?*L*^;lokG-cW3x_3q0HXueWTvNNcoKwv79-nDrdhONiBa&c<#s9(A@cZP?!m zyshIC&tzsb_S6T2u+Y1)igE!(bY^Av_=wAC|8Zg9jli*hk3jB@JQq13@+agP@XO)D z!ry~$1U~|EF6Jlba_Bwi2)s|Y_i&!@oi<&%xzFmV5!Yt1c+$4sGPck?XNT9P99D3s zWO2cf5ZXH6-;nX-5-mM-FZ#Dq7VC{UC~UH~goO-gxpfGQ=S_UFx@C9V0^ZwILuXxE ziML`){zfyvG$YVV=lIymdC-S^%3@`4|hg~bY9J^_Z_w-SWVAGrEKV9tDyhTfGrHAK$6~z%RW?0gy9UrHR4FAIH zRc@);e(xf|dpcjje_y1ok8<~Qq9fR^t{^u<<9H^gl7BcpynxiMXvbEnrO?H}lJrLv zaqQ}*bxKw?r&;kDF7)H6Lo_L7{PG>LCu!sxxs{p^TxfJ`_%Xj>KjQJW`6KFI9%YaJ zsV7NE+0ZTJmV+Cuc9EW=qLS8un);Yo0o}PqQ8Ob=LR1x zWObc~Ov`-^)5g8B8~f&7B>hF^f9!QHF&i8ICP_gb+HG^qPqjMoKh6^T9=I^@M&MY$ zM<91co{O9i`4e&t_~r0n;qSpWf**l77xNQzIrJWM1l}jydpJ+{P6DcyT}qQQxPccV zX-uga&xyWFJ^C$|HSw>S5$gVvYNC&nUR(2>6%>T^oQu$-hf{uSmiT;#y^YCWdE`vv z^Q2!TJ&$GKZ)U9+SmeX<`$Pt3#+{~)=qX2&2VI%trC5x#7w;n9s5Gm`-WN%E zPTzi^1%B*}&5{qshGDewqqHEm=?q&R@iE3;`!ofNNU9as>_;M}&8uX>tmx5J>;B{t zTQ=7&$i64Xg}}ptvjo2fE)2X8I2P~`$la0WA}2)tgj@rDIeb|7d+?3mM_|sy`~+PN zy$2nE_X+nN&J(^>qs50VE*W7>B6BV#Y99_nysNe3kc@xNV;=5j0J#l0cub)l){KJ_UK2S5ed*&?TuGQVR^86_KnwKb* zI^aRM)0R$~(cwe4Y`#Zb{j{G%Z*D67Bk9Xl$NC(ToMOzxavK&&N1bH>^Ha@kyScHm zsWt9eSGQ7dS+mxWY3FF3<_hbnwUKPu>*CestDfmTb84=7^wygR)mud+`0b%@A7*X& zeL$76yT2s`tvo?Jms+$JW~`^ZtNCVqXm$RNTLupc&Jz3{xG?ZW;8?&%Aa_Tei<}Vo z6LJmsaOu;a-hXKJNH{C%lmdBt=cMlb#3qkCYZm%YrAwY?SK5_ z-P^w>Sru=~O`j8S?9 zG$Sln#q)Lc`ws@PHt`1!R;Vsw+?q?rle&(xtJls)#Wo$LnwCm!!PCBMOiR|hE$0t1 zorX0DmcCvTEm(IdB+i~bKbmB8#KN1-yzz6guk~hs?p`9fyWu32`>LaNy%`BEmppDV z;xc{wQ)&3GGKh^h`*G@{YmQ_;*SOC*&5xe#_jna$XTmnPi>W@{@AzL20DL*PW$>`z zEWz)A3j=Qijs<)Ka(Cpp$O(}@A=iLk4j&f&9(*JC5twr^KS7s6??Ff4eZswm^Mvn| z(Y3`ymDA&NPA?srP;iCay>lk2>KwGa_Oxn7I9*CoQ;odo&D1<)=G<<- zKyS+gOUAx8C)qD|=Xy?dX48zsZN_d0VP=0n7N?J}Wd|>Hl|>K#elz+S=mCH)2e%9! z7MvycJ#b;*jli*hk3jB@JQq13@+agP@XO)D{^#%g&o_b}fjJlR6LdNB9&`lWC)|5D zPxwx2t=w}5FaD!{dL!Fp%>UAf;U#%;<^0_ELlKF_KR=LY$*x!4&uZBDNwujj9}M^Q zMMdf?CS}kf*$2GOs%P121;3gf&qJuRW>-;mL>W7DwcdZ)yIfilm9WNqIKR}F&Jap} za)||e`IeS0H z1iuF^47?FI7Vr_s-I3=aCq({)TmybNd|3E<@QvU{V9v$-1YHij2OWX;3HKh(6TZ`y z5y7)2?UCnRjGpx2+&>l0(7f`&t>JlBH~+o8)TR=8xm)Mf?cfsntuni{q$QRW4fvfg zIq63QnXTRP@B7ozq`+lX+8N|)%G)8lJb{h&xUua{{S^|HKfRy7@CYkUYWI*B9n8{3 z9{Vx5>Ii##x^JZI#Ow&_vIBE|JQ#)uLgY$^Z>w@gIfj<3(gY!9=I^@M&MY$ zM<91co{O9i`4e&t_~r0n;qSpWf**l77xNQzIrJWM1l}jydpJ+{PKt+D{u#*B*IES7tiZvBO6pJf@=jD~PdpY)E0XR@;Yv|5EgZT6Kym zJ-T($H81Aae?2wypU|s8Ujsb=@a5o^!NY>H1iuF^47?FI7Vr_s-I3=aCq({)TmybN zd|3E<@QvU{V9v$-1YHij2OWX;3HKh(6TVZ7*{`+pCW&&FG80_CZEPd+g*w6&UGK@^ z_QnTi#Y)+(ef(da@A9L|BPPsWUAc~Q{l>&-MmsXok^>ee*G01UZO)f&Cxp?@Z=F{% z4!E;zk46>gtSFyx`a-E`09&}RBc=as36LjAE;=rx$!z58885wys?f>@)|JPGP{|UVs^fk}} z0ACJn89XdFOYnQ(!oVAWV*ww5+#Pu?azf-!$Ti@X!-s{x2j2*O1m;}KPtfJid(aVh zpK$NtJmEWO%XSE!+b_@QuIy5Jm_3tQZz^)v)4GUuHrncFEHWd$r( zDdfl=5g+|XLhloOLiE(oe?qSYeGT*gz?Xwt1`i9)68s*xFz`m;SinahcSoL!oDlgF zat-+9@L}Qa!8d{*fjJlR6LdNB9&`lWC)|5DPxwyL^1`ZX=Id~qSBy_oNZU62d$Gs< zt##5RGu`)=b{#%!NF>{&jqfbmIo?(}M*SqoH2(;&8}Gz6gqQ96AP`Nrj|F#0&7zih^M2+z072#ZA-Fel={-F>G`Yb23FIp0)-t`)t1!XRFmtTd5pO^RS(1$ zZe$C7P16$UKf@N(btWX)pQhG>+{<)J_y792=zXG3h@KkyPw3U4uYn!__;PT|;9u!tw zuOBEWY~zoc)&uNS!{Y+AiaN$uknYz#!GgNd*H&r8-Zg1AA#H*c`kB7i2z~_ST+C0< z<d*H&r8-Zg1AA#H*c`kB7i z2z~_ST+C0<<&eL;#Be)ptk{Ln~ zpXlJVjYARw@932M9IwnAexBT7*IOwmO)Pk}de`&Y4&-HD9cj_ClWC>uJ(HF;A?0zu zWYl?jOzn-{@~H~8tar)NM28+TYBDKw^piFi{(a~6W&Y_FENG}MCVb=G|Mn7~?~Wcd z`nl+RqECpP8v0M@)u69|9su}qaLeFf!C8Xe0~ZF~2pkLe2;}a_bCDAwe?qPSzZ^a+ z{5|+a@FOtiVt#@yhu(vZ!25)I59bNrX>Hi)X<8R0IdgyKL)SmHlJ^7YZ{`cyS>Bsd zizW`0k?#zp`6mXy(;RJ)aerKfxtv>zKcp?2Otx(yW9AC&W5&fvF8e+hu=BnGE8a>3 zu~vm~6*-+8ORwB#x|0FKKus;HO3D9>(j~e}4 z^ghuiL{AOFl7%^9HWWkow6se;Q>R7Oc+n<>p#{9l=K?yyMd&J{i+lhd+X{8OP{>w@%2WL_ZRm zGJf~>o8~O7R^jcxf=zV#XTZQ0>y@;7%P56u>Sq7#`M~}N>?J_o9X)FFbJ6=mpAbDY z^qE{EQOj==kbdk^Oc->GUtf5)Wn8eG)&?uH2xPgrYG?c_qCUYfmCPh5HbAUpnV z-L$(#yD0I-hl};Ewvhb8iCrOthK1>abM#;Qv9BOyw}f? zuCH8fydw1=OPI2L!!Z?8!af%4`M~}N>?J_o9X)FFbJ6=mpAbDY^qc+AVX6=gAK|%8TRI2 z9}D(;V1ESm5}@yn9yR*8=zXG3h@KkyPw3U4uYn!__;PT|;9tx=Q)X~pX;~wS zlV!LaJESAkw%M|i)x{>t&RZzSK+(E>wK3a9k$ZYHjoHbNoI{_o^=ZA$p{L)sttN@c zpZk(unzFM}N?&~yblEM%#}P&AB?MdyvNH&HDU2 zz6bx--w_eztzm8Rg|`*3-maprO1U}oc4#QqzPo}f>Kb2&^u8tgg-_RPiuuT@qz|v% z-^|a$zA)^e!hR#{&A~nv?D@d{2<#<5-yJ<_^mEbsM4u2nHT0j*t3h7_Jpl0K;FiI| zg0lp_2QCb}5jYm`5y;(<=OQOW{)AiuemQ(t_nGPh!NGaF1u&q+S?ouutNhc0#Sb1s##GUH8NlY6q& z+Q~24SZGa#@3b++ZbFARIAu-^!KbFhyEdp@u~0(%M2cSnyJ{ao}u(I-St4gDwd zYS7m}4*+~QxMlFL;4H!KfeQm~1dauK1af!exyT8TKOxtEUk)D@{vLcI_z{?MF+V|< zL+?RH;C;fqhx3H*w0pc(>L_U)PP*lS=RCd_v}|1IlfjLj*$S_*Lh}m$Fp0vnE{hHK z$i@D`1+J%*?G;_~*~q<(VgDZXx?x`!_E2HJ5%%U_9}D(;V1ESm5}@yn9yR*8=zXG3 zh@KkyPw3U4uYn!__;PT|;9xO+{*h7W=M%bH!eJt4Xf&CHKOMt#RderFWqW6hD zA$n@)KcQEHz6N>#;LE`+gNFrY34RY;7|(x6Yf2nCwwQ#QLAkJkEinv=lc8KxY-a{DJ7DmQVI@^hcnV0C{)hsFBY_?h|=JG$ zho1|d5dIT<4d~_2VWICqH-a94Js0~E=5ow?m?Q9g!n=q2gmb$7BJAqR{cBh*vM-PO za%j-kp*OjXe;uNB7Ks;6$MpT{9YvoddJ@sUhh8`Ig`tP)U%%1+dUMdnf}RibN1&Gg zd3WTfk}Bk+B~yNCOPa~jaP=huIBJ*&C!eU4f!YJ zYLM4J4gh>PxMlFL;4H!KfeQm~1dauK1blb+x$p_$Kf%|4UJe}=`W|#6=n>d+u|Huh z$GnF*0^cXRd$>_&so8;Elks zfRBLh4nG$@A^a!!8qmw3!$RMKZUj97doK1T%;lK(Fh}6~gm(}33FlOJtnTkl0SOl0 zk%-O*vYM>ohSDwK(?wb5G|c|w9u#9me&+oXdRC5U@Q+lNiJi_`Gh@Szl0(y3fj^fh zZ~36Y>=#fnuuZ^5mw6VGSL*{3AD0VFsfzQ{=IqNJ51K60bVv$rdLcqCR$EBZu65@@2E`QpxVK#4z&6eSkPzb^Jm=>2+G#A2zgi-9qL(qT76Lc5 zB6%2g+`8$dv^cZHp*}L`_hKd@D^`13*iZV+t2cD5bvxadTske{(i@`wwMw<*OD)ZJ zIMKb}br5~sv_*Tc?*?(-XrE_)Ifl%bohf0uo|`4naDg+4J^xG_?qI!-=3ug}R2&=2 z{!A7((LGnsQF_duXN7OnAACrth|! zB)d9hJT9mnC%5mP5t-wVNw0W1E%S&hAn`p;Ro<<+w!i(_l%<1EX6j` zz5UGga+PVUI&*O$+oONzh`pS?+0t)xNv(-T_1(ABcFI;V#VVGlB`p6^KR!&DkHtZY z1jHE6qr=St!Xm8l{EKx8FSasPX$5(8I_y5IJti;pHjAezq>;lZ#ZSUuO1+e?cQ`6 z$d3?%J8er|%o-p(enKyYyXvWT#zCL8cZ|qN{+!mXo2_(tJcoB*4?9PfAN057(knV6 zL9)gxsgCw21;4hdVCcM+b%L{Z1(8?p);MmI2&B3@kAxk&97k`zEfp@j=SjlVOU=+?@6PZHl6z>8AuH727Xs-)uZa>b4E5SJ3<0X7j+!Tze$Dm zU2b@FJ)WMPFIKXBp*87VIbAAldm_QRhx>$c((L0~noy>}(%5i#PhIN*X2q7JT6=|s zSX(~?EaE*V!Z7K_lEg3mA!}!D{I&A?AL84yXn&AP9!cTX2|8J3L9>{{`qIK@sByQd z{%t)8=EL)CNs28cRCHx2eR6U>GwUhu>9?^0Om@$6f7do%mVos;g;Qta$e-+(xW63b zB+=yO`FqFGs7iO1Ox!3XZ9E!rC&nsht>R_V8B4mzP~Tc{jYCz$VABnWbykmwC5Lcc zu3SD1ySIqb^g|v!*}v}X@=Y@tmru{1$cOjPUKiptML*DxwL5t(2}m$pWqVs+XM7`j zbUhm?+0lHvj5Vf9Y^LaEvRkrUXo9#O3SDR2IgJ^6b!tS~_cIM%H{~V7J)6m5bM6~7 zC+Tl1^1b7EC)HV(BlzF17Gi#Z)9z$eJ`K2+a`;bM2`yQa6|h67nG{=nz4=_ej$WIi z|Ng5{I=Na=tJPnbL6yCqXyz*=l8F_EAD)v-BCM)yLxOgRRNMMZ&V6>@Y+P;$ul0o& zH2zvv{|%Kya&=^8kKCnpg6|XFJ=`aplXjWt3W(z#&=&zatv z%kUoFHF}Mwi>A4jCC3SUAVqI>=t)UzVV2gs;&{&~&FZb4x3}h3CUw`_(=BP4Lc-%N zP58gcrY$`2dY3(>vzGiSnRayZUpki~L~xfQ7xRh51}s(B?~LyBM?XJ?(Bzem%w0NvnSaQ464+Bef18CQGbAO=wSGe<(P^>E z_r1c!Y8hF$I@6?$s4no2S-OskRp1a=I8TgyPP#EZR{7JJ)MXyx+%Y3GbimZid*VI0 z8Ru!eGuD@$tMBzT*^@)BXjYxJKKPbSwSQ+SYbn_j^|_@<=__4ioF$-lCxj&QyuW0! z+=F=QMJdl5Eu{AKo8pRxACTl$x6gT7i-^!$&ybbbsU$J_L?SD2=A-XPg@K3nrazsek>6EY zko`S3xvEiNI$TWqhwLr|sXe4TXEz%CI4{Y3@+y2GmexZ>bw5i!JEp-ni%PbK@{AJc z>!&0ew#l%#7Z&K5k1uAPYDpb@l{}3l9bmJE9dpqdQn-2~P zzpNw;3H}i!$*;)0K}lh$4Z-AmScgwsMgWb8pa1Uqgd@%0xRiI3ItObJuU>JAS}XaK z(!2Ye_y=<7scoj3>Il)--smTE;WM4F=j-|%K8u)&gEE78=lGcmN58K%9{oW?`&z>U z4LF(Uj9lPaTMnjSpYe{rViWX`aq*vqn0HkAtNp&Wb#H04VcYZ79~w!>6<5E=m7&ze zT2OU7B9o|gT`>!G>|+0(#l9=|V>*cQjWI5FiCeTj@l)F*mp8F{H~VLCXc{R#WIH^o z@c~`!T-B2H^C7XEVe77`oy_)I9O&Ww%_KChBV_OIEGjn@Bf0iPGsV1zIRf7&ynDD$ zIH$`sP7*tx9cNMhRZ2hUHr7m?fOzfwg!L>*<<+;#hnR=Df|~=m)tGCXnoezG6JvX< zNvk!afmE*JnqBoZgT`t}d-`%jlXpCheov#aNN&#i;5e&PVl9*GWTwr~8uL`Q`76KB ziG6eXZaKfE_Q(4zv*z?uA+GCdHvauc^$)J^o}2KICXK2YO3ZAcht+Q8pSsjdURye4 zUP+E49K6}#i@(Our$0Q_N-TOz9hcDU=67PKwWwfSkkWm+`9t$Z#j=M)?frP?XM;Dy zW=D3mgMTyGd?I_)(ld_e$6Y0_uEf)I;iXFHL+;dPW0B1C>+DG-)< z9YGaeDWt9Oy-Yvf*O?7BkpJYq-sjFueNpclfMqOZNTC*LaXzWOqLjSi^#%Wu~-rOaF7J?{5{ zX~xIdS-dAr$?Rp?7qgZ}5X|M6_b^A``-FE7_X+3ps!ijdOp$U&%fpHC6|N`*Hs2BOC}dx zi>Cr}(n*r?!BwAS>*#B~u9p#towP-%x^Uw3f7JHUfa{!ZiDc}c;z9+{U?RBh1fSut zC~~Is#;>nUwbbp~J^n|rW#s&m5!*#i63IBXyx{ruPBb^wL?bEW4t=JUyIZ^0oD8+M zPyHtsNf)Gf<`;QHl3AMHSlcwBiQqhO$HJ9%#P#%bC8-uuqMG;SgvD8Fy6x>r+kMGy zR8RAA_U=PhiH_U+9fca6q}X#pM`<#IRQzgvbB$A%TCL|h`ul1q#r}l39P=LL2z;OL z?%_V+oZgooI&1etnYGL&?tH+yO$>KXii=A2Vn)3@P#|})9J5q&x)---DG7-*4ohHV zlJ)|#H(PEOl8(D2CXJo(^olb_^{rhAWL)t>DXTStE`O=D)>mu><8!)d?y0@w^pw=d z0@~P0-A43_9FBGoe${D0-n*mep`Dq&>*dQysf7LHaDO6My>8X6OWh^p`nO$SuA5(x zXD&|7^*;;ACHal67uw@V#?+2O$K11s4gab#&J~3;^!3=L)OWAx>fi5;FDp0G$d<2G zXA-Q5-{{}7YmQseld?uOKiGX9`WxJWlWU5o=r#W|!_4jUcD>HLqB{0_R}_Blk#jIv z-m*lZG}(ooy)*WPd16d3r65;1qprCVeye-gT;Y{o8!^wNAvP^y?;30ZWo#rdRppWEgqy!KLWRS|?gQ zDJ@qf7)7z?Vt>M1j(HDr1invr_i&$ZPCQmThn*Equu32Hn-&&rXI;y8zRJH>ofWlL zzkX6UgN|}vS#Q{7LVRc6Xt={2PdR4HI{#PIl^m!uE9iKeO*77h2Ad9KkT-8^->n`i zWA``Zdb)RK(UNH`Yp=3%a~^7R#kyncxoh5V$0go6qIR{1Pa-^%mTo7XTeqq+XO_zF z?O^|X98D?Ryo^JJ;phtexX?R}Dy@v+9h>P)4J7r+(yejy@O0H(#|>(T)I?drx^jEE zaPI1x3W>!eX`m=&$Bk@yw_pK;L%5pJe z^ViR%-|rRDkg2QYKR+0ddr>z$drxE$&oRf3;}hAW@pRg;?MJhT-r}1t{?m#f4q{8U zv@6OnIq%YDAMg`r6fX2`wA;zYJk;alIcw@qZG`GaLm5XZmg(ueu`PhExM_Q6hfNj{ zx#1($EbBneZz-tNNJu9?q@IK)sIdRP{QQ~DJ5veI-S-VDyJP9c5cNZH!uM#-*9ID% zA3_9_odpY~Ig#_hwUd^YEXh`(4`B;Tc2N}`^Oc&(W~6fUxoN+uoapzqv}@%Bw!|sw z`(LjumL!O4Wup^#S(pS*5O*k)wO>e z$#PSc?V`dg(y5~xAHC-RDgU`nzRn|!oWF8-N#n6XdMK9btaU{OX_}!R_C3&_%-)i} za@qK1-PuwC_+@90DF=xJJY z^nfi{vSi7-yiGUANq1YDO{cCCx@pek*A`~9H@96gGv0)pb{kwEF5yhQlH5O*a$h0G z@3^nJH)k%nX1H_h%zy(_;{r>j%GsEz^ps|t+j5cY(CHmbTdGZJCd8FhH=H5jlS1O} z_|@s4>W)SGY^~{%Wiq!6iw_fzO!-Bti>?#sd(e%bM_|vz{)D+4^B(31e4p^{;XdJ< ztRzl@pR?0Q*>o+R)w2dA-TCq%UWQQI}KRtAsSQ}N=l~Rh)rs5B5Hbl zGxklElO1Azk}~_bnE>928+<35X+VOT`?RAkiPbro7n?nA&^o z`6MZtN`Bhxn5ht&PUVKTrAV{;l+PVB>~XfvBA@Q$`EpyNQ&kVXd7LZr$r-my6Hnoz zr2V$MvzzlHvQe-yp!}mZxt@Dz&C1yU|8!XBd(e%bM_|vz{)D+4^B(31e4p^{;XdJ< zbhKR_e7r5nl0WnM>%BFfNT$Ob>(^fxvadRs_gcv~5v`W{qV$A^)p%`LS~smH*SH7V zf)p!hk67tjha!e@4+a#c35QcY*ArKNxx3T7`(=#PN;4?8>b%37j_;z$t7{69lg<#A zifM-qeA!D|8`=-_j)S(T+Sd@Zpy~Ur8=;4^=8@*HO9YzRQ&@ zM(AX@YTwqhE^^dftG}t#h^{*K`%d9_HF+Jj%#`meyLYYiVfygqa(eik;+WdsSUO9p zKxbJ%CgJ|F$JAyhkv@C%C|rAQIH?IWef(rzICb$qvra2Di;nCl_cd=zCL*g9cI*ic zqn%D(7jBAs&<4Ld@7iz1kh@x{wYL-Fi1hMQzu58=`YG}$&xr4Ba!dFC*Rn@9$ihL< z^s(FVw1qz{xJB#UKfN3}Ec89-M$jX$=VE`tT#k7Ua|FImc=vFha86r}q~#8pZD+Mg zBna}puBFrLO$4q5zN6RT^@RL|LOlX)fQ|`hh)!k7(@k;$_&lmpZ-xK?xmBGk^Lzmmv6qddqv_uUjuqM zbXe$n(2bx+V9&+=gt;8^9_9#qpYZPCKH;2Rr(O;-Z4+f_bgG;@y8kWBDmm?&_KB0( zQ5hVhn*D@Kh7S%Ft0&T5XRB7op3I<4%Z!|qv}&nd^_2zRtrCdgr3XsnZUwpNsdxRQ zOaLtqc&70_?+oSWyMHEC&wz&JJh~fww~{WOz9d|GdN^4iP&BMBT26!cFXvS`KOsj1 zYXs$0gqa2DdzOd}{34foBE1_`{-fUB`zNQ({6e;AHy?SMT0=Mw8QMj#I@x}_aJacu z6G;w<54qcWlYBZO^P-@;o9wx%pPq1{j?9-*IU)Enox~Zk4%il#Q+g!%l(SC|c@cQ$ zYv-PN`o%qB>`{3JnN}8A;VTtS0(K359yZ9Mr_GB57hQ>=0X96VJ52qFm66w#i?<5M zccrNbj;AqnM(@jRgTW}0wWR!M)Zah?{|UYZ^m6F1(D$GlL65+mi~R|6Ip#gg5%@mg z-NSvtISHf+WH{5UtYzE|v7v6#OrKhkY5$o#YEUtd(Q@Y@;o3U){DDo$GD1ke4<}VtD7^eEl!Nl z8xm7_`{ke0>akA_chytL690nY^LEdvY=RKy1NQyrVE-8MTj@5sCbY1PA+KnVqu{xP znor2O?K~3IUn2-dZm{AIyPsJ>I_BVo3u)BYNpEDFY$L}aqx~|)AJ8K8@y*;H6NyjG znqeIyC)zwzzERRUgj}9^VSP#7eR^N|@U+H)3?g=#f56^*Bc1WLC|K%NGKEhF{|UYZ z^m6F1(D$GlL65+mi~R|6Ip#gg5%@mg-NSvtIT_D2wf}K)F^fx9Y1sjVEcX7N`uttT zWBRmp{g;T<@iav%ysS8)opc}Bcze;tX1cn=v(}2UiS5g3lAQ|H|i;lZVF=k6Kif(*<$Ea!$8LNzMhAj|n^zRL{+D@3WyF z#8e=2Wck}M^4#r+QF{DW;_^gJ(V?V=E;{9JewW>2A0F4?BV*D^;veTPNE<4od#6Rd zJKFPz-1FX-61FCrEJ=vtd{y+6@}EDoVNpXd^_5GzQpsUM*34x2{#=(wS(~0R2Ob5I zh<5An^8cL3;x%vfC4YQGckaJno6ny`KFEjP9y^jjLk6$*j;RIFqWnsQ;nZx}*L?YX z!ayvQdo1o0bS8no&xKD2{|UYZ^m6F1(D$GlL65+mi~R|6Ip#gg5%@mg-NSvtIn@R! z`?}lBXZiU=%iLf7l?tXgj!3qOurBZFRVa%ty4y2Y)p7N>E_ovF~!okj^>s=nv zEZw!GWy&ek!9}w>`feuWnB<%DsW6rpl)Sj*A{#;@lpIT+2w9NCm9m$QUOYo|d0!jb zSoqKgmCZNiDY=o{I`Mf?Rd!?pwa`s@dV~78sYs^B-y#wcexVI|H|cbvaijHZmbClJ z{>`UEeQ2*S4KQ0}O832P$WGXHl)!g~p9`N5{u6u+=;hF1q3=OAf*yfA7yA?Da?E>} zBk+B~yNCOPbGqKV_r;73G1eKqbJK@j2rzSVYhJ9F)k$hL3YN6Xw9vzK#oqj`(^;oF z7D--Y{~pOM9aksc#g}Q%Jb2J~|1u+aCQ8$pl2 zo{Rknb2;Wc%n|rL;oZZ1!a2ot9rh7cXR#{Y|9Udbs+_t=FXmmWR8Fk)rNqikcG9>- z{VN|eIny-d=Ai>Q?B6R1p5dDEMun{1l|NndRWNyx^77W|4Oa+btn;O{`!e}EB(k-+ zDW0Y-_X$1X`k2~9@maNavVV_q-?gRJXZumvf~E0-no&gdalxP7tv87Fgp_##n{W|d?zn|ZKkVrRHMJ=+wWx2Eu>4w0Na;uan}i-KbT9|7MTelC1M_)qXPpqE33g}w*f z2zmteT_XzQmv+CL3 zCnL#Mw=ag!=$hV>&kOF-hX2mBtIWI3{{04rHBKA+=@OTO-Dd_e$%>d@(c>*?wBZ+YIP`a524d%G!s z_KtTs*>X73+&BAgYu?wPX*^3EYK%@3@J8TRz(>G$ho1|d5dIT<4d~_2VWICqH-a94 zJs0~E=5ow?m?Q9g!n=q2gma40h)5k@!(vsLCzhNx6JzEL>E+l(-=JfsF6xCmGA5GC zd+Ts?4XI!N+c7bz)M&#f$E=hI~8NwK0hHKG*GD#@4L@d7O*y^9Hf|807Ekey|`@ zgS+N0&Nx7RUkN_6!tEH1k+d4}*}s=$7mLkM{C0s1i0(0WFtj9QUjz!TXmJvo^^br2 z(Y!>oHU&(D>t7<^!oVAWV*wul-yMD~d_wq7@HL>9Lx+XF2i*vI1om9)PngRw?_rL> z_X+PF?i0@GLTFy&V+V28jnWf!KSl1)mw)HD9vqJ)i#dm@-c`HOUV&5Fghj&W<_Yf) zrae*g$%*!d6Mna-vb^8?TW{{uD51ZvqfWAYo$kfyI~ub{#EiyZzH$#@@z&`8KU>co z2y?59Xv-%J>M5ao;mIWY#zK>ZI!Cg&ci*`~^Hb>&x6q34*f?@0R;o$O^)%^N6diWN z$CU_-m~z;yiy%vzyo4ui-=YC&^J{qy2GbhN1+5oP+mcby8Gk}~PLkeD?T6!MI8mE@ ze!j^NyBg7f9aS_^r~JCja<7aADw$ zz_EajfbR}J7d|2UC-@rB%b~+U--B)hJpy|!_9x8cnD;P8;QNGk5BCY@)Ftz+$KV5t zC74;cyz+n?>xAK)vcw%Stc{Tu=g!#cLEM9t)!&Mk5H*3LD_P_Aq&h7rwKL=)J#^3L z_24VE|9g4p_ZxQKtIrWB&R(-1@{F7tU;pGPefs8xh>Ers@%<9BbY#MfRL8|+Mz{wM z4%Nj@wsCHxV3xXT`NSbQp0KR+c&Q=LsIHnJw)ZIQS={m?dcu?_WEMQ;;xndm3vL_g z8;j9?j*xAVH+Rypx3@Za;}?=Sy5oKt8?@+qC;R?^zT4_I*V`_ z7_}C4n2`DbU69XCY_+yECQCE zoPCTA_phu!r*f3GDgSjFsy{{7{kQdC^2Z0{`@Ps(8oRxS4Wqt2+eDx6oUA^lv^juY zSa-tW^HmqJtkzRV+bn>Lsp+ZgtiDNSg+xWXYB@?(p8dMT`$~e6fzM>0;bBr_=x@B| z%3k{I;j{Fe*UynAz3kjjYD{-Ln)cnk_c*=SJ@4cvLv@;C``fWuYXdDZ3S08+&o**; z_z#zK+ZM9rb(3b%Lru#6kvXwdS%}Bk+B~yNCOPbNa(M z_tguQ5X)ZF?9YIzC7pct{mJ$dhv^A}s}Dl18PkOQj-EmnZK#4s#JuhXFLE&?v+ySsUbPzZ{If1@`$1>G4ElH!1oF79_|y)=}pSL*S3+`EQMTUSJB6R$@SQ|3wRh2*46o;yTz~&}Y|k$fjpi zx|%AFDeG#nU`4hlQ@V8BhOvS%BGnppCrRZ8wMgtPzI|pcW0YQYT=JwS!*%9tY{ZTc zYAW`zEWz)A3j=Qijs<)Ke0TV{@Co5R z!PkIZ4jmTy9&{t<5!iFFKVdG%yoWgg-zU6#xKB7I*W@#Ynsszo#c>Z#i|YM8#=kqzaxARlUk(6#Ik;u;u;47g?|};gZv>77 zd<1-V__^>2;XlFGfL;zA7Wy7^Bj^#>bFn{RF2}rwIRf7&ynDD$I49@$*AA!H<+D}Q zqL&vpinCr%oHCo8B+RV%7|pr1Q=Tb5<+uLS!yd9~x&Kk$t|1~IomrbT{F~~1ynl>T zjgrZ2uN^<$|4O%MMbX(O+o{%sO6aiN6Jl$k<9n#Tfw+bgKbgnYC%r!_W@mAmQtM@k zOiWV+Nq$#Q(PN)M&vXjvNv`-#KdS$Qz~04-*r0sB9ciO4T536rtE3p@HIM@UUk+{=JS;d%@O$9Gz#D;M z0UrV19eyr+LikVcHK3P6hlRce-3WRF_FU{wn9DKmVUED}3GW{66V53h%K3bxv<7RP zb)(FjDQ=eJvHQ_`Vr7_}$ziIK`Fu>;&v!C9gQN7GN0ngo*CyhX73O02L5{hlbi~bd z5RO za0)u|n|_|yky=@oO)}T|Wc%*QrJnn9WD*nFXvF0g{taI3B*Z*={(qa_(B!-6MlX84 z(4w8+jDA-BrDg(u$|kma`G20@)lZy@g_&so8;ElksfRBLh z4nG$@A^fL*zQ+Iba_F$o_n;d=kHDUb{Rwk9<~__2_&(v?!+pXzX?L8J>fEKx;%*QY zbr4l!9qMm>&{I}VCtQ8v9rQR^!|$cF>(>e~-^pR?xdkFj zwotH_7AYqmR@^U)sQXDo22Q3NaPOe0H&4r-up1&0TX!uhXyjs)@11G6Jvc#c`1kHn z^dF^F$)@r)Gn>h{=HaE%!jnW_xc>N|SPs_wBct~YcZ|@1KZ2Sy<>uU=4* z_YXDmwG)Wk^8kUOKlT6ePsr6EuYnu@_;PT|;9*Pk8rmpKwn1hh>T0)g3I3JK>pO>HpEaj>1RX zMcC)FNytIZ_z&fDy>E5C;}@+z%FVC(VLelL@3q0Ezsk(M;;7Akb;D_o)vVK_j>E)o z*}bXowmzC@a#rV>#xVIiUd<)r(N67NUa>gu+)nwv>P%MJ{~-MLqxhF^9-_NL2dpRF zOi}3++d6Z;R?@B!G^3_8lkPflT(RtX2R&xa+ne!ekUC8hG-`EvL)NCGX|%b2{FhTh z{t3An9Lx+XF2i*vI1om9) zPngRw?_rL>_X+PF?i0?*V&(wYqEC{*jSwDqQfS05_uaH}B>69ILN)8gp42OFf<*+&y6St0#n zWj<8olilxbwr?%rHg2f!Ba9q~ntMg>TMk8@5IHsEpOC9TUIRG*@a5o^!NY>H1iuF^ z47?FI7Vr`9-QnlLCxrh5UjuqMbXe$n(2bx+V9&+=gt;8^9_9#qpYZPCKH;2-%Ci2p zo|R?sJkn_To4krux=*LRRJVlw(0Qr$pjm(!STiHZF|&mf3MZaRzsAc{+!kLGHYLf7 zkc00`)K@W)Hy-hO+O!Zp!J$=)Hq}$T(OpDnI?S=5n?6OVF+ zJc8-2v?}fIjmIh9vi)^DQWd1TxJ@mxp_;7QI@3YfJCh7}nyQa(DWqTLPuqF?c<#U4 zC-Q{IsUiP_Tn+LX$N_*a2e%9!7MvycJ#b;*jli*hkAUwEKNmhB{3rMt(95C2Lf?aK z1U&+KF7_wP<(T&{N8tN}cMtao=M;W)VUv@f7E91(hY)W^Bk8pAF+LMBO!n#7a46dR zBCGxMdCpi$Fd>en;TuHOFd6gz&gMF_lUX^`U%)3*PC4I6aqgU!MSct^KMDO@LaPtm zJEbe$Om3a4GmmshCQ-ZBJyfWEMwOUX7rv`rrn!A{`5QIrNU|!|jDkzW^b@atR?nmz zeN(ZoYl~PSnPFFYb(KfxzkDuopU4v;r-u9!ay7_nAO`@x9NaQ^Sa6o$_rQgLHv-23 zJ_5cw{9O2i@SosoKre?53w;l|5%dV`x!9jDmt)?;9D(l>-aXtWoKx-d%FzHJP1fag zHYT>k%B=1nWm&VYX~gB!k0{kQ@${TuUDuuih4hC=%c^^+!9=ZH-iT9b8*@#%{;fl= z920uwY3BO*T&$mkLeZCO?osuk!X=8T`K01z~i6$C4;x_ zDLNQ*k~QY)k;-n*h_yue*#yo+I%A#f(Y$jd|8msG=OXv{FHiWtoEq{^$kiaPfgAw% za&XJwVZm8~-vbv0-Uu8E_z3v!@N?l4!heFV0lgeLEc89-M$jX$=VE`tT#k7Ua|FIm zc=vFha84(yq7F3h?Pm21y%#?xqrxih=v+DDNd_(HN%?NUS55iU*8B`UQclAL^2Af- zDKf^1cQlNK7cmk}r;WICJIUvHqmr>Sn$CJ&Z^E@TjodwFuqQ64hKBH-dvM|;L+S=6 zt4kah68Wd>R!8eoDiC>e`$=|wKYSxc;P>7*x>e!rv?=wHe|dN0sFBY_?h|=JG$ho1|d5dIT<4d~_2VWICqH-a94Js0~E z=5ow?m?Q9g!n=q2gmdCQ-2KVHY8}gEN9+o_vx!tw;5fS_zMEP|j~d+2X(ak1^E3}X z?5D8}$&){oKTx^v&NBOLIGL`tb7$?WYv@%~JztSIsifbtd8~^6B?=VA0=_%^T=<0WpWtgiFNY2beGj@3^a$*^ z*q<<$W8T9Yf$tOEJ=`apla*M{M&8Chq_bz;8;LM()~-c&qxBCgWPN*O6}$1+J}NeE zXIRko2Q-l9)5Z72X=G1Sj@ICZBs#^;_!-+hq&XLpM(T~zh-E^Of2wc*E!o!Xym>T+ z_V+6PKCR$Hm+HOLGuwEX7I}M|{{P?C%oNN!|CBS}Uw;I836OV3jvDz~y0smeG6FQ$7lT>EM3L=xNw+r^nxqly=|uJG!;poO1zSr z(ugD*Ic=)TJMyToY~w}MrQTHhl!=DJcJ_B{a{t`CQg^a5^5~v1t9z9Dobai@{s^)p zJ}V%BUZH1iuF^47?FI7Vr`9-QnlLCxrh5UjuqMbXe$n(2bx+V9&+=gt;8^9_9#q zpYZPCKH;1=N_Dyy7w=@nf0j11+2TShmfW#3KH^QX&wf_^wA6vp3*Qf0tVtl2p5m{I z*Cvv+Lb`Ijg+X+|(gCrU;~`YgdUd)|OcDw6>^1pXmO<)oR_>Fs^&#FSvcD~4FVexH zveuP$*Z=jgpyvbq5$GjA-W@q=#Oc%&H#>tiYavAEG-Q=xVd)j$T#aWUrmtA^BUe^th;&aC>_k&9`~391|2sBjxYw zn3=>8D}%@k%LNI9U#xkzvs4T@6{)pLZv7oH!*8zefre0u-W>F?pyvbq5$GjA-W@q= zBjCHk&xKD2{|UYZ^m6F1(D$GlL65+mi~R|6Ip#gg5%@mg-NSvt zIjv(;9{zdUtaR2@xj6Rk`ijckdsh+mfL^>?ZCaaJK%cChUiYz+&EIXPNeEoW&f)Ji z`s^8&NCxBwSX&;ulj6}sUyBX0=q%$Ex6hq$|JOr>ek1heppOMTALx%jF9GuI$WbGo zi`*yjgvhBO|Abr(@*2ni{Nv02$1Q`01!oC<4_p{{BXBI>BjCHk&xKD2{|UYZ^m6F1 z(D$GlL65+mi~R|6Ip#gg5%@mg-NSvtIYobyIls|Jl(iu6nQLgo1pUhAG{^kIB-z%s zdaIOnEt?=3 zQqTVNx}h%&Jyhs7LT?WGSkUu<{s{CEAn%SGHS)R0eIieYoEq{^$kiaPfgAw%a&XJw zVZm8~-vbv0-Uu8E_z3v!@N?l4!heFV0lgeLEc89-M$jX$=VE`tT#k7Ua|FImc=vFh za84ZG9*^~&*}y9DPrgXDmXJTODl;{bI_bi#C&fy~2Z(JzsX#hOASJxVBRs=;$jfKq z9a&Zb6#aYXbwghmdZ^HEgx(zVv7qMz{SoLTK;9iWYUFc~`$V1)IW^>;kgGvn133Wj z<=~dV!-BH}zXvW1yb(AS@DcFc;pf69g#QFz19~}hSm=Auji5(h&&B?Pxg7Hz<_LVB z@b2M0;ha7i{j!!ToyX#K?p*tIWDaYmiPYo2og<`I*ZF#jRvXn84RwCkIE|SzGedR2 zvY(;kgGvn133Wj z<=~dV!-BH}zXvW1yb(AS@DcFc;pf69g#QFz19~}hSm=Auji5(h&&B?Pxg7Hz<_LVB z@b2M0;hb*2J=%QqsTymKR)oLdiG{2WyLBjCHk&xKD2{|UYZ^m6F1 z(D$GlL65+mi~R|6Ip#gg5%@mg-NSvtIk{_oSK1aL%wjfeP`wiW{$GzT`eD&Kiaty9 zB%*&0y>93WLk|`DjnJEeJ{I(Rpg#h=1jxH1M~!?ga-YZ(BBzG@6LK}kYaj;zz8u^# zcvx_j;P=3Vfj0uj0zLx1JN#Vugz%r>Yd|lD4hww`x)JmU?77&VFqdQA!yJL{6W%@C zC!AA*Qn<^DyV5K}N%u{+SL|ZxEm2+h?(J7PZ(f}2_wQAtu7z8V6;eX9Pr2_+56vMH zcUWQ?!B-(XC`crdG70!Ia4JY3;IWZX#~H;a@q* zvPo;};j@vJgCtR~s7I22oRX`#`i^;ZB+%;BWQuM8v60(o$=ZL52;HmM=k&#ps&ZV4 zdAdT85m-Dp^vPritNFC9ZN~*ZrYvPq)e#XTX2AielE{~$Oo+FvcJ`JL^2zML$?^ry zNKW^YFYl`hXw>`ie_abdyRd_J&yUk57oFLC5%&0F%l+twwH{@5zJ zOQ0r}Ds8##p3DlP8@BWm{{A~kBHJrO-uT|((JGykrswj|2q7tz9L?oZVEc6i9M z^KSOsACC3Zh_d#HC|?K?TElE<+Fw%7J4pU(PTR{^NHb1mv`go_JR|n(ntq|q7;za) zE|e`1Vy*Sm@9IA{gW>r2XwI<00J&fvW8!NcN^>JFsf?JFQX!j;mD_kisrdZ+;wax<@GA z{?Q64W@x^&*~*Q8QpzfQ+T$lg3L53=>B*od8(RQ07W48mH$VAB^XTC#T^pm?* z^3>EFn&;y4MRNx`7Z!B>=!5nClNx(^r`P=HLuKkE5F8ra8IRt z&+N{oIT9a^mHhS~Me?WYKN-i9=v5q#l4`n0jo@6j?<=aQ6z2#KqE-QnJ0dT#8Yw*`uy<_8al+JnVR2$vfh8(aG9MMVdLh z(_*I6>o4TR;_9bWD?iiM=StKKrWcUrV7ak;`HSSLf7^yUlRWBr$*IfHD2#gAHU>tw z)syl?9?@Tm3h3Hz4KddbKOrZlpDt8;_L08Glx+PoCCl(BY8DK~tzl@vjq?7>e~Dp! z)ZP%od8~>fZWr_)s4$PUPv5!L=|WTIZ8wNhvL&9)KZT#P+0yy<-6nGnIayZ8vvg1@dJM{i08M z2RQg zIZ6MqEN7OWKi%;=K6l*Mf=XWJwf6{3pd*j`epL1RFr|Q0&&5Bqlq;J?U z_W90oy8r3Xui*lIwDI87?-PHnQS~c4pA^fDsc8MLHj8%`h)1lY{`)tfbWd0A{6!p| zwC8V-H1B$2@+NDQX^-JNX6c=J=Mz%n^vI??&YKs_XM}z{cI>KJ&$NG;o1W{nf{|lg zO=bGV>7nPxm;ai!p7DL}Jn<@KG3$KN-hE*)OIZB2n=3Aqzo5-N64Iw@^XaPV2D_@c zI_Y5%JHP)714wvkw5u|CLA>T&{oyP6^xyXh?;h?G&S}D>USevu7E3SMMYO+wn>A|w zSgzi`i*8<%Rprh8eLnX&hZmNvA0f_p2K&d^eE0hF%%H82%FMcGwIvy0b6LBS-nF7kC zi$$9o+1Mp5s_u1#F0Ptav2%4HIdt#8EzSLH^t|X+d4Up!loYA-eT#J`dUh6<6Ki+S zV>jn+RT;7+lIcxWhNCv5ZL!?n$Od=vCv^N$*D4#L6RuWRo9jb_`scDvip5d2e55mM-c6+-~1~)-JBW?-(TK7-;^xR-Jv64ew&mXQIpoLbSDP`>wR|=oTMU4 zc)doOzfo-kk2|J+MVT{OS4d3Pn$666esZ&kzC5$*{G3^F!>gEppgndCSH4nryE8gN zEsI&%>xV{Pc*rms*Se1lS&A{S4QlH;oa*Vy?vdu_#ZAQOqmOKQRSQYct@Pm2FQ8h( zZ59f$kyK^z-i=FTUQ^5w_&(v?!+pXzMWr)6kr%aDDWa>Y>&6(;T_!KS{qGy%Qhs*d z^wGy;@TX0cUFayir17Xj@1YoL-iDNwFKU-CS+co{?nI_hv3Fal*DcPWc~f^h29q<0 zeOl?_&N&10zCe*d4|Cw#^Xe%~N6ZBrLLKDrWnez*NT=?6&P+@pV*bbTqi zMZH*~*M~&L^sgurxk$y*jK@7Q?$Mj~tDm-Ri=i!9`}Vm_k05gI_mv5-`ydvc_PbCm zdYuL;8z=^TKTc(W7yq_3bEJhg%1`}WaFN=0MfsLE=n#K7^@-)G^BHTwtumtR0*v6S zV>O@JRT+^#0Y?tHj?l9ESJjo%KhYiCk2P#(iZTbr+<(3KwuIGnjAn>V6JypJC3?3! zU&8nwllBSx)H(|kO49%Rie3A+BBV+nJwZ};TFFmtXz2Oi66puS+4lw}_dX$}ZtHO~$rC*1gMiKNZ>V%TNhL)v}?J3hJcl=$@;>?}XoLSETF zUg+4@N^ZyP`l#XBL=~5Eb+<(aQKgjj?Qy&F$<3#3^M2^p)1GSQxY*%J`f!uw)5_m3 z$>xZpWJ~!fx|$LDlF!Pab0#YXRQ(#rnpcNrg|5jV!q2}R%xa4wGn@8|26J5|jia#_ z+GQCst{wE}fkg_f%~K!Smz+%&+6y*US2R=1<(T&{N8tN}cMtao=XB!x&C(-oyp>UDQAiphlo3LTPLluZrmeRisjeq`C9)WyXo9Xj}eJOE(;rs+kQo`UEA`8YfcyS|B@;ys?@YfIm`(?UyvVKf5sQ~^ z$pcPP3oiZ&aQ!>eEV6R=&P^ zV>%ZeP8PEYB~?IDv6{e5*%IJ;RdQyH{u?3{t<%vw@EV@!_kQs2Yy>LL(=V^{DJeea z^l#7cT;h5mN16X@AHn?zaXI2W#1Z&D;oQUfgy;00e^l<#$}N=FHJ5;1owwwTW&!K{ z)L*28>!Q^jEg{-ZtGTN5?Q>$gwXFLbGb?p5`}oT<`*>&xzxMBE6UKl_dg0n3I0ARB zbsv`<>m%y*lKLWBD?sg1;iKoxapb+_{`NxZF?oFV(y6Tt3#3o3|N2qA24D#le6F`+ z9W7xKIj1dq9co>E&)(X@=yPai-B+KP4hur(^dtkS$!fu?CUu`C;e}bGP0+G_sG8AV zaZ4qM)O<2JvqkI+EX_Z=Pgl4NKHe0w^7(ZKs0%lWmftHPWs_bR7a6@$6~nIQ+v8iv z&5;zd5sf+$T$oju%&3*k)(I7e@2e!PBbntTt+}MuQ!4g7!*6EeKAZ9KY%NIZO~}Mq zy(F#C0-s_#LP>_nj%t<1Q+2EKe$GzhB#{c4_=VZV6vBN}Nk&nuo=`&eQcuslhF#~~ zp~3SDJgVV8yz0s)@HpeH8}jiJ@hY{Iw-g!rZ_ma332`~%J;V|CKH=QM`-JDTKdPrL zSnv=v${ziQ<18ordFNxXC~ZZmGD7#^aLFLqcC}MvCZq$xjc0#-yZC~LyUr#r$tu+V_%-uw*R~5iMItbSKC)nEC`or(BoC*CE$>dySz@f*#_-Kyi@2m2L zCZIG+`O4wF#ZaqzUjIR#3yjokozI?*A^yh?i07<}g~N}oaobM^gHCx_HI-japJThs$8fYsO=PZw{G?%zPT0(gikm?0eh`zLS{BR?Tw#F4)xHJ!c!( zLxc~TMY1kkLR+TPzM)@El9c`W+8ZkR;r%j3(!*m36>7hAN66Y=5P7orv$kCpG>7Ue zy=)NzVLL~InvUlXn}n}sE92*ZwQG|-f72lGTGg++yl51VkH9?__b0^Vi1!dj;QNGg z5APG6lWXi9W|gA-l*6GlL8;R6boVsV`I~Aiv|ap~cwVj*bg1Em$k&%%6TuV{<(UZ) z>V@%xiOG5yT6q7X!DdF^kfXhPirMSOBvJdq`7h3)@WuGx{yW)8B*L!0;AeOraTcA9 z4>g$~AIpU-m+-bg-ky;_NVx%P?jC2az7|VvI;&KL82AGBjlTZ5rH?@&u1qL(MF!j& z`lc39mIeI6j`OehpA)muSYFqpZdmPj`JX>OO>!O=g&tg-N3ML^=En6eoK%#K9H4o^ zh=p-!%bw*yWVf&?!~5q6Pq?#H+09SGx%H7Mfj1gSw%}Nfu=hLSSSiPszcmVy_jE+f z-up`YT)EF3;ra_}zkPO{%YaN${+;+Ob#B zj68lEWa!0shw4jYH{Ro{1d&shraLtv$l1)V5;!S?=4_&wx}kdMGU7xyQ`<%stX zN8tN}a}Vzmo|D~G)2}t6##G}0L+fL=S*SAG-p_0T8|X!Xg6eA<8lkkCi4m`P0Z{T< zR^7A`mMC14o4eNn@&~+Z_+Ax(YX8Jl`K&T>@|);_OCS@y{^-987l-+&o%;(ox5dAO z3+FuP%T9GfC;Hcyd)@h95L$9)hj<(iE!X$Ho|<&`!kWE{ST<13lAO}-(`2dS-mIxJ zf$DUH()~I%DJ6P+)NYU9XB@Ov=#c`&MF~)^D4NG}^co0lDGIsLUkDZ<`uU}gYsqeI z?OW29>p>-_YxiqYAXjux?_JA#4(`>)q-ggYfrCfPt$!32kkjJwvtPz;k?@=8FXglz z!QM?_O{aqHk~8H{AF}=?tTq4a8@b~)>^anGanw8p_O6YdpKS_;`5ml(xhGrT&D;!6 z!iJk5GW+=U-ADf9h1F6wX88iRGJHGn-|njacv$53kT*g;0{2|ppAeTL-a{OL?-R~F zyia&egRU#xPu)C7Egfp?-QT*78rp2M_kssaDk3Bjd%D};)#r;AqL*q(&^KvWn;$KN zl`XJt-)J4Vb1dP*i?tchqROhH7{)~zC7exWA7!V@|E_!eEP^7E*9=#kta%QcWm;bg z*fPlj%iph@e&v(1(QmGKyb-5#sxo&4#B88FB!UEQDz2t;O^+vv98jVU)*P%1I;KK* zrCH91d-2jMPsbmhoopfIm5rC44R?_HJfE%PcQ+8Gu>|$R&8=iCe$!^1?pH+3Hb%f} z=rLT?&)D`eD1k7sg zvK1=T-3282fbi+XJ5mYq<;cS#zlXdL@)5Y_;{JrV9Pu9F2z;M#?%{pHa~hrvxZiV1 zh%!8v!QtQE06xjde@uDniIm~QZ*%ut*zj1)JJY%lcJQy&$jHcs(rg*U;fo1Gu>OPH z>1RD~V%O#iT*jTmKb)UlHOWkEvFE0er_0Elr+;P!b1h&tF?Dq*qc{8Fo{~qUeUjA9 zp?kgQhdF6MQ`PmoS`423zVppEkwx^o>k227n3mI9?L5>J*8BkP)TLDu8VxYBUHs*I zK@s>{TDni9)Pqz@}?$7acmfUG61phOkP zkF46x_+JWSdUBmFh#?YUvO1UK1K^rM?c(R7DKNZ8s$t+sC+H5|wu`H|PB=e?=@vDm zkaKP;G9SF~htG?`y$9s|VQA-pnKNk_@anF`I-rY94_AC_^mTZ?j%oYq1OHN2#p4J7UI?l!`+htHLYlJ`zMV8L5 z!BDj-9Xv-LgRt@vW$Csk;OG8b^{79CY?40{xO`IvdBpNBt&9HcGL#qqdSQy2 zr4G2kMvEf;Yh$Qxjtre_{Ac>b^7 zo*tKDdPEfWm^H@jp-Be&(%Z*1u7j_@j@)fmDu{IEr^Y;^4w&hBu~NRRiKH2YO^VyB zqJEy)b=l>)8eOraV9(AGY3i7C@ABRvIeJ}Q@Pl8UCJJ>z)Spn-K)xJ# zSmgJRH$pxF_gvhc5SJs~LmYwc6V5%nPk2r%F?>AAs? z)?~X^(|)8YE_Zev<9iD~-*$1e9llFmwes193p9}wCEvMamt4T0-B02DdUw+D$k@78 z&jr@cZZW*yQbE28x-5EZ5I~kId48UFR!QVadYFftFT&i@1BuIb6_OXcYu?<5x&eFV zdcM{L+rW1%>U&wjMWTN`@`(>zCOj=-5A=NE$kj&0$@h#NQQ4iFW2j@NpwWQ&fnr1o zjQw0-`V&${b~XI7aM`UyBJUWjI?k2^-R~6Wshw3&_q6}YdO8s1N(D8i9bF*!9DmeQ zP9y1hcvYt@xCZn@9zS9+XQ2(2(c&8Kq$!U3mlj>6Hqh0h##>f9j**f4W!hWn7s$?! z!XGjZucA@UMV%1!C)72NFGn60`90)~kdMGU7xyQ`<%stXN8tN}a}Vzmp3_9d{lC3z z6vfMy%x}B+87SE1J6>%Tr?zwQvaZuzODDg2a7virW9B}7X=&>GAap%Ac4}e6Iyz*R zaH##T2)+BStJs%MoU~E9-MZ5m;Y6!9_AudO}hd2V? zC!BkDpYWUxew?^{eW^aBC$_V;JVlJEvcK1PSa=!~G;b^F5Bw(A7H!i}jp_kQt!(G( zuQ$@xX_CyV_U@rwxI=daI?B+i`*f0m=8TEZ)YmfE)G{)eYWj73O%#cZvE6>=00WPy zdn)hSQB4l@yfoN%FOXQ-$K>@bUQQXU@cFsv*%HPetF_uJ+egYBtYp}q*!{4`GQg?%$@1?wtweFzSJ0XS+ko% zL*B1k*EoMNUHUd#pJjBH%?oiq)ja`#{s`3FQO`x45cMb2HIOey9v1mMfD`2=`EwAbo;FI zND?nC7w0rId0LWwpyGTlXb~6v>DEI7-b`BtK9JSDr)5Sq`X*k>w@QI;PYMm0u3Ur7 z7hQwg<06RO{??+0I=4akaP#-F;Yrfs9dh%~>^QU)c4&PQ8z%|TN2iXuG1GaybbM?I z<9o3Uhci}{-XiP9U$k*93WtY!r~mdRYm*(+v9}faq2%}AyO`IGCrR%j6&CTq(-2wH zz^U=-9LbL9vEH^V079p)x3u$U0&5I^%wM^y@bp#JgZ2_<^6rTwhdR$02xgzE<6P|s zuZG?4LYpm=wj5^pv+*$5q`lb3>9!?79}D^;P&ON+OcuuLo;$D9J#+3B>2U?#DMd@{yJ8p~gve8d7w~rs?_(C#D z;>KS|Gy3Knm%e3VZh)-BBj1L7-;$@dnFkD7?hubP8gB%1$_bZ^?E{vDc8GCWVrrLP z4KL2#-E#RXqxaBo`3e0;-(hXyFQXDDNLlvm2-;89dQ#>E{^lSBag{?EPf2NI8mr>XT23 z!#e|i%{&He#R&ON+Ocuplpw??xEQPg8rcRrhjQ7G?_Z8{%R3e=I)`9U15L?Y(wiRQL! z63+T)rD0|r=*}`ovD9y%>vJ{Q<6brSQnk_lN?0jjy|Ou}#=4DM_A9t*Z1^1fvrI3t zE?z?C#B=FQdi?^&A1;-3I&F;aPHpTEVDugqo}&k?#=pX+p?h2IGRhp4Q**-Xv+P0K zD9PK1&6n6mm*t8s6v1BR;~JaKH9)$(ZDxd489B7f()cfTBq>nI{xI9{8k(hEsr)T0 zAqSm*YJ6kt7k@@aw0l1n!z@$=iV0_u1Iy2|O)F$EzB{$9C#xk3^!97-QRPY_BhR`g zN`F@Y`ohp}ggzGZN1*PGdM@gOs6U~ufqXggu*mNrZ-jgV?zy->AudO}hd2V?C!BkD zpYWU#tRmkF$VyNg;htC0wf)GO(7P^6O2gnUqzZ~TufEy2J-OF;*IQCPoQ^9^R<@3UwFYax=f~K3=D3wE-G5q zPk7TRYc=edsdc4GyJouol2m<O{C8K3bZ@h&=_$n{Ecq}jhX z#i@YY+-13xvndCNm!*X#;cXza9d_8Na%s-32 zCeMpfM)rKPswNlZWP0zWUTXlTC&&9wJrjwl z|KxaBF#@Jio|P~3e+2jWm7jJ&7fIN$`)a6gC3LQ{=@gJI2iBQy=T}u_lVV|smoYmE zA)s+UbQL)ZX2u8Q_C7rgBH`7~?mF0#hicvN>;ApoJ@gs+v_?R4FuFs&y zu6?m(@m@xEsLw zt7^9Qr~{Fb-ZyR#68(GV3q!vV`dH8(fx0{Dxu_GO{)D;)^5w|GBEN^c5%LkZ z=i>f^xE%2w;s|`7aPHxK!gI3ot9`eNku~@=$>tU$piQVpQg&mI-}x`iB;<_f%*{+xup=Z`(TkA{g2AGfd{ zdr3aHxM}^7ONG>gh_yR@CBgZXZe{D<7Q*6m4#TsJI=~fst03mzK4KlRR5aUzB11j} z(;D3hWTmdw2I=kRp-1N6{+Ftnu;g9Z?k+}8deZ^dwPs01-p$kH9?__b0^Vi1!dj;QNGg5APG6lXCg>1-a8) z)K#_DYW6HgfaRsrTS)OGFLvImyRzpRQMP*#Df9a}Snrn?xEjbPz0Op+nAQ;pKa!5x zx9dD1`>vRe?&W?0+fG>P`8j%!CqxHUtbYnh3khw-o{a=H2z{niD&fPE)PsU1;Y85K zJN2H-Y52J5dY$6&P#DkiWjuYpaHT`vtmcS0;dxzutv$^i-hVk76XJ5j zdx#_OeZski_X*F*g9@@VI z*cZNJadmVqqev~E(wk$9?~(JR32D7F3Iv&8^T%hbPLS}W^Iw?WT!fwvK9Myi9Ee_* zTDs9dB++~G<%05=YXs!q_18OkfU4dH(Z0u3;3>lH^6XO$ zlb>&1<&{Ykjc%CwzZ`wb=!Zq0CHnW!7lwW#^s%5n0(E!Pb5SQm{Rwpqh{Gsi-%?)|pcu66Ju zTTm|{HW0R$PZyYdDkPsb`7U}*g%HdEpuZe_%jkzipC$VD&=-b&BlNMLKLT}k)N@fM zMEwbM4dly_hedu5c_ZW_aL>j432`~%J;V|CKH=QM`-JC|bl|M{85Le?WKBqG_PIg0 z1@1kZml)qUPg>{;D|rbiXY&K-J=tVDSn?lBUJBeZ{uuA~!kw_cf7fDF9SSYi?M!=% zbI7yiNXNRwJkaKrk5?^qfx*bxy-%vI5m~MDkJF18-xu<6{(JvO0JJI_kLF%9BAv^s ztq!Jrc$@L+`b9cKH@f*)BjTdhKM}seyw*JkABU`naIh;!2 z`J!_D-)rI-z3QFW35zSq=)Qpmg_4>7{OnEb{BMnwi z2u`?F!ChV=Y=irUCjcK^B%E(nv;kfN4`<0u0Gk) zs~Yi-T^DxSHH=OKIzw@uk$Kx_G;kKoI%Z^KkTZw$EIU6YLBf9F`YG!@Zjz z=!Ygvmjjx4<&-Dwvz4N^mn`Q zpM=Tn%-zoBCy2hHkHN2Bs^nclwfv8E4d|R{;4D(tCl#hw7WIh)ke+LtpEK6%2A`8u zNTOvT;d#FQk91iK?6&A>Ufy{Wy6K$gptY_e>wV=(GQD0OZ4xdFAV)g=wm^D1nTan=b}!C`V;CJ$d@Azi~Jt)M#x9to{Rev z;&Q}$h$HZQ!nud{3D49YLc zjTVxjJ2kR+@f&i$+B4R@(G?(UgZNma6_M9VOcOV-BU=S}!sa))!>=FB5tr{MlM|;~ z>o1O+B&Fh7acOS8FywkbzS=#491r}k?}57=nOw2vm+ot4_#|?u_`a+cDQ;C0v)JNH z=HE?TQQ&YPkv8X6iM(}#9d1Iy>t$mA^MsgF!~7HGYA~;XIRNyRqi-4gu;{Zy{~r3n z&~Jo37W7A;?v8pc>V&92p{{{^Ir6Z`?;&r5d<5>fxIg`e%m0t}5J%wqgmVw?6P^>t zY_L;-#TM#Ka)U+T32DkRVcn+iwI|`~in+}a+#^I>txbmOQ4e|B7VbNse-LVpA8cN2 zxEDMY-<0gVZ%d|^fV5Jq54aD;2j5_PSNL`RfkpaH&XRS4Z>x8#IYOQpt`z9n5JLpg zG~6Dsioo)hYPvN=7eMUmmn%|_E|aY`2RdHrg%E+!la`xLo+5Q3nsr^7eh{d2=*g+_ z*#B~$m?y-X8s?ucSA%&C%mJXk9DU2^hee+y`uETmhJGXTv7kQ!b$8TrQ71(G33UzR z%aMmgeh+ygG*^RKP>tz z(Z7ejF!URtj|Ke^sJo+{i#j3dPpE4kUyeL1@_Wb|As>NzF78i=%MtG(j==W`=N{fC zJf}vRJ>BW61u2J3dfz(Uy(i=KI|uWUW?^+Fo71qe3pqLVe8`&1nLLj*VfX&y1it%5 zPD?ah2H`}%r0r~%;FRSJ3kx0-s1IB}F_Pv$9>!>=`gR47e>J4=-BVLAo))tSx!^Fxh>HJI1H902;u(YK6#SoB$+IzWM;gu*jk54i9_T{zm*9-;+wt?y*+A)aoFr8u+Blb*kOHIKLWlIzE^ zPn7%Gkzc#dOgd+}kRfJgrAq-;q{&5V=HYoq7}(iw^1(X{27J>s*^RKP>tz(Z7ejF!URtj|Ke^ zsJo+{i#j3dPpE4kUyeL1@_Wb|As>NzF78i=%MtG(j==W`=N{fCJg20jY_@BP0@To@ z&#MzYjFGl|?Bajb)=_aa&3hi5?15g{Y}?}S7BICqmv6Ux2_3rRvao}3KVd)UK-X`2 zLjnhW=!7RMp_;V#V6LwVE|E1uX&vassY?(toB$fSMOX6kYdi8~xx7hIeMA9C1K zZeO_bUw;Jl5@6mPbJUp6#oQ<62{EUJ`6tZPU|s`r0O&7A-!l4P(PxSNJ@kd4-w1sy z=#N0%9raw)2~mGST?6@YSagEJ;PiMDx)j0qcyh`F0mi_=|9#AZ(cr7+%obPRE_c;{2l3r<<^enMzznOrQTrA z(__=1-<5dk81qBow)R8oTh3ZI6W-zjRSod;Li*Ozzk{Lin`la|Ob$_6_eXa?>K?(K z5A2V?UINU!V~!g0xtROJJR#=PF#m+P8q8~84gme-=vziVEcz_bzlXjs^c$g%1^p4I zyQ7|qIw9&$sB0i!jyx>#d&nChAAx%=?oWuz5$_?6!1oE~9^NNBrvPCOy%&24#pKtv z;80aYI64)gy@dkdqUeb!@5CyyescuJdhTvWV(FbSFnk7jZ~GFj{+@ba;BqW*-s z2J+>|!y>jgt#2>9^wdmpK$KseZq5kWG&x*USE}Z`fBFFH6lmtA3k!} zTWp4eEn+>Z#Ed>$Rx9Z%#Z zm9vFJu93L}+Wmoy1+gycIc#1Z57?W7eJt4Xf&CHKOMrQI%u!=L7jvJOC&Zi@=ASTE zgLw_i0ieGeeaq;FMV}@5_s|!Hek1g;pg#h2chqxHCq(@Tb&db{^8fR&$nPOpFI)-NzBp2<>K;; z9*qC@8)0t__OW2k2lhu`F9GJ=F-ML0T+Dr9o)B|tn18}t4dyj42Y~)^^ev+w7JZiJ z-$P#*`i;=Xg8m59-BHg)oe=dW)HRSVM;;dWJ>-p$kH9?__b0^Vi1!dj;QNGg5APG6 zQw}4xQ5?ZdHA(b}ZX6jST&!o7OtOxX6`2+r9}AQb+pbf;A14PBq4@ICvB@{!!5O2~ zTUx_GAgwxZ>SY0(692O_?`j~)d8ZZOmy}4bhYI_Rur~+$Sg_{<`y;TI0Q2scqsDwL z<~}h`h&eUPKVhy0^BR}~Kz}*76XJ5jdx#_OeZski_X*EQNO=!$TcZlKxa!SE^;H|_ug7bP$aV?3 zU_nlUJI4^(-4bVdE&PaM^8vm5fdX(V@flR@3?m7{7xkm7Vu+F7pty}&&VPMj*h7W= zM%bH!eJt4Xf&CHKOMrQI%u!=L7jvJOC&Zi@=ASTEgLw_i0ieGeeaq;FMV}@5_s|!H zek1g;pg#h2chqxHCq(@Tbq(apk%vWo4|yZxBXG~f{Rwe7;yuI>_&(v>!~2BiByOd@ z)Bcw@)u^3$siA!})e`yV^p1s8QgQWRp4tq<&&tK=HeSc*uUnI>q@|ESJhY}hEYiq< z^?5(P?Ki6Yuh$Lx!mx)5`;D+S2m4sC=L7pAu$KVy?wF&-d@klbF;9p&HOxO@t_JfO zm;*q6Ir^5-4~sra^zWfB4E;vvV?loe>h7rLqE3kV6Y3hsmm?30{2uZ~$VcFwi~AGe za>RRxBk+B~xrg@&&#Cp{isT1pBq-@I<2eJS^YEQ%lmFGl>2Q9B;u)SVk70Jm&WsXE zfLZN}`69M?Ke$GBM*!G z9`Z)WN8p}|`xD}F#CwP%@O{F$hxZB3>CCD0C79~cl0e`ql7X~ z7W0*TCBsD{ZbFylA;b1b;ST9Bf<1}YzlXhU*cXO9RM>BXy*b#&f;}JDAA!9Dn0Ln< zHRf|M_lbEz%&B4i33D}=*T5VA`peO`jDA@3S)zXrePQS~LLUqIBT#ooJr{LC)Spn- zK)xJ#SmgJRH$pxF_gvhc5SRal_x_I~@O{F$hxZB3>HWS%wI5a0DY`Z%{jG{DEwJ|N z-Eqwx61Gry^Oo^XnBZYc3tl!vu+I{E60v^|d)=@v411`s-w1niu#W|MKCnLmdkHY_ zjyY<~=VI;?^MsgF!~7HGYA~;XIRNyRqi-4gu;{Zy{~r3n&~Jo37W7A;?v8pc>V&92 zp{{{^Ir6Z`?;&r5d<5>fxIZB-N4$qP0^cW`dw8GloL2LFI(S-Focj3Kua_FDff60J zt{e4bq%lA@sN`Y`!QN5qv&5c6?BBy)H|z_;9xCiN!rmP0W5J#e?2o`+0?fN(jvDj1 znES*$A?DOD|Ae_3%xho{0R83YTSh-D`Yh4EhrTfM8=;Q{{Sm0Uqn?X8A?i=4Yam~a zJS_5i$QvOafqO3QPl(GA?;(!B_X+17-X}aKDW@RA1;%$Wl~tmQX}!%~pA`)Q z_QPWDDE3)mPa^j3VXqtZg<%gB_8Vbu4)(EN&jVxAClYM6h* zTn*+mFb9DCa`Y{u9~OO<=-)$M82XLS$AbO{)ZJ0fMV%1!C)72NFGn60`90)~kdMGU z7xyQ`<%stXN8tN}a}Vzmo|8^H*R9PDGko)7Ggz+M8(yJL*^RKP>tz z(Z7ejF!URtj|Ke^sJo+{i#j3dPpE4kUyeL1@_Wb|As>NzF78i=%MtG(j==W`=N{fC zJg3Ehy}u zU3E!Q!%OI$*6#i2S_8Y8>9wzHZJ!H^Vj=s zpt4DC=e~=AJe%3%_DCYUr9d)cX-4H zugOP|C}%%Y9@8|UF16Go#oU2d#qh*UzS&K}cJ$d;GQJzD$#?nkwi{vO35#`u2wNd6 z^R<39G+{{!^?8`Z4Yi@};nE}JmBxhe{j1&|3$Eb0e!j%ukSRPr&iQPm_F=f@(0Adu zp*=}D(sTC&WkrTx3iaK#-UfJ1z5=Pm?{xQ27vqA2zXww^+wa$59CeI7MWK*u!%XEM z8pu`Y?Bxws8@t21`l6wWU2d!6`B-xJw$FpZXDx}AbkxU{-3nCEJ&%g&*d275$RyP) zr9w%URo%7UQA^hUTwKQP{|HWfe)jUlZAUV?lFxV|;VDF{&QF}bUQ5z@pK`=r%p`MB zHAzpJ7f~W#U$gdPEv3IN+PZLmgoWaE3zff8oeCF|zmF~?R1yF7r#kwfQJ@-J*}VK! zBh*T+^WqzML#~U}Nu8E|3J>Hq^4?wJ3$;s$?B+;+XryEg9GZ(IrF{~HX098k&BAGV z8t#I$oUw(`<(?VX1E=3VU&2X`h}*`WN)e=&|Gc%x{E#?RCAQ-@tC^9!FHVo8Bopwy665IE|p@0^^A zAlCxg)bzW3Nm!FZ|o!-7jP}zbTQ}+N8hK z*p&={H_XJ&AND3Zisz1bg$ICekm(a1C4bo9w*R$oo(!FBxZ)eNT7}|qc-Q(tLyg{* zY^;1@c?>xp+nLj-kVmr8xw4l|UWco1P0ar!+$BCyrUEI^$>3{Qa?4(@oP3$c>t0$m z56L&O83?z3MVL4CsX?o#0d*!H_t)8}Re$qZl?-Fop6qdmF`_y zqu0pw{v8_U|74M`M!Ui%G{cBj#HzeFZ7*{DhDwJnZzd$kF`aYRX+@R~2ddRRE`m>| zUumXzTEXE_!I`9uN8nAy<-EDojzmPQpC#VfnzSo_F)DYmAX^MPdIQJAi0k~tFoP-+ za!c-Gz|UuANWj{?mCZ~>fO8M;6P}aq-R1A&Mf9m29ig6`&Q zP?=gqWw&k|4=QUT$8C*2%$yRX#o}5f9bY!X%I%9I>6wS1VPLuXf!rM;b4i|veySwS zrrkQiyxMi+iD?2Dk z|I_Q<_)C4Jm^Ek46nVR?hlYO^}?i-#wOM1w1%9IqE5dd?S#zJCa!`j!R9 zx^5BnueVlwkiQGAMb*v;`|l7f)q#6=_S_{?FXm2W%tjE$_^%C=M=b=#s8bE!+j8a?VSH8(Rp|hC`@5%3{)9_yzP+*!V_?~lD(-JHFF}GQP&xD)5SH||rvv+X zU_2&e-fUoym`a44t+z^s4e`0H`Etbk>|}X`a*tHf|akP(yj3fqBM0TSMG8(5oULZ zy%F04$s_OMx6d>3<(DOrG&o~mQG`se41X~w7(LxBB34AM-Q$YCTzwZ1N8tN}a}Vzm zp3^q{*33%51JnjpQ^kCz46r+WXXnl3VdO;a*gBz=_esUL5ewUPKhRfvs3E;3j_mtl zzU<1QNceR>Qo+{n2~m94u~Utun7ltvRz#-#0EIe^PS!tVVgSLRXC=(hVKp>cdFd_g5x~A+HfuJal-&fW;zt) z!tD*cgQtzY$)5prHvx;w_MSvVPFdZ%-Ji&h8#ES#L=k~25|=+QzH9I^ZtCOazfmAF zso0$VCz6!2MT=4Fam4$d-HCNpzQku!6W=O9KT_k;GrRbiCuvyqX8GEF5vtN{nHE3e zSO2T1#+KQw^oT*{TGu!c`h4`^lMSqEX|1Hw!#fvL=>?@}X$SGoz}_X}ZsU+ecAeV1 zsNI~GUeoiKrDg9uViWl%Na}kAfmb6p`TxYjPt$k%`Hy>pQU4oX&z&AHmG+ZqYFP!C zZg{!2Ww-|Vuj$?4OYw(VdFk|ntBL^e9^wdmpK$KseZq69DwRDb#^^O~A6~4Ktj15z z^tLUlnfwAp1|Lk1`9_nBfy5rq``!Q+*&ia;ohD7&tJ&VDD$r`4w;TS22-0WQPxQxT zk3d)2c#qkx74#u>&i4Z86VMgBOwey-8^qA!&-x(>=z+CUhO>TT@PxTy)35Pe5+0JL z-&gk#p1lhqcHjG9_{B%NGmLjZf#kgPNpdANnfI6Wbesm)T|z&SLM%vQo^x=P{aun* zbzQfwrHv$h(T@&!&iD?&)+fA@A>o9>xs&V0!<&R<>)q^W{Rmjs-Q0F?zz@Q+Yl98t zj*%H@O)2IoO?X+J>Jh2OA%ypv*j{CQP@vV;T;(uo#jK98N{ZDSXzL0;1a^wqGveD;((Y;pU z=wJ9cZ%UO`SN}90c6%fBm__Jbe@GX(anP4Fcry>3%TwC@q-&7y`Iq%6|NKc1mm}Uo z9D(l>&ON+Ocut>9ygJ-zUFs)qrc~3B#}Ih#wq(r}H*lxsmo=7pfmgLZ_qX*PL{0D> z=k^x@bezJ83(FE@=#C2U_OA8U$|w)B*{sSY2AzdHvAcS@0!TXyx9lcYFmVI zwI~SPwz{*c*aVK<=;EwrZUnLIUgl;wZ{cFSnu&sX0|>ue9Nr%04VoRH8JX{s!9)71 z?>FL01oFD3r+N%-_h1;tVBlHqL zxc&S`mv^^GQWVe2(AEnuz4+6%qe)pLb)=26{;fHQ5HWUWUV0IJvpo7*H|bBlHp%>c zH1QJhcp_Lu1UJ){8vnRfL~f*li%M*)MP;aEmr9y9&B)N*C%n^kbU%l^J5#dPcMOoG z0I|FKmP^trFRXog!&-{Q{Rwe7;yuI>_&(v>!~2BiL}KZB-gzBLv!uef$-$K{Z9jB2 zIsPhSk{CS?^((~pe&_RdJ|n=qAhl-bd_Npq%r9i?FG6!?H<{Z>CxSA$*k;V|n>-Oc z>JjQUM)V4__-}u501J^DI_8s+q*=&mXLBjzJ2`iT}#BBdTp< zUH2Sw32o$~^({CDM&#ehTJ@eICl4uHwO*A2MK@!Y`Mt`9yD_$ta;NP{;q=NIg3nwC z=fmma69SbW<=-gada0E}i*~A7c5bBd5A|LCmbZmYa*s)SKK_>MoRYHs!2A@7a;l5H zGr8#xa-w3BuYZ!4$+7>|6-&{-j>g3Ct=mT9o{Rev;&Q}$h$HZQ!nud{3D0RmRrsXR z6HO|{aB|a>s1|+uer&0UDn-3;;%A>{^dS`JE@W&xT1eD7C7hD}*bwzldUV4&#=P3L zbop7ULQ;43o#^fp>F{HTtmy|fMS2s>e7C1%3-#C_b9caZ1$t?}y|zZ>by7HFVdl^i z2*Q$or);wP8rVQ@OW#Ma{D|B)nvr$b;5lDrll?y1)jeIol;Xt=Gp#4m)B?8 z`pqF^l}Wkl;kT18oy1-&?)HalsNWnj_m7D-wm3SP@a`LF@ja`0&~X6V|5eL7O!tz! zJ<=l_9GQgem#}c7yFWSCyL7#NMmVhMx~6$`C=NVLK2xns{^X>FOF_&%7e*fTMQ~Hl zeK=QiG2_;@M^GIWE%T(khS(>^OdmL9OiXH|eI9PGB~DY8+mF>cfafi#QNI1IWar$A z57Lj1k<{_zR+btA!pWxeCT?^$IDwAMypk4FUSSjZ++;(LkH9?__b0^Vi1!dj;QNGg z5APG66YszdU1QZl)b2g8C#R0;(&M)KF6_9IM}FP!zP>FijR;Q{C6#Tx4*t?FhGCVk=IF|2Bg95xXO%`RsZxDL{<%Gr3N_!Kk|c^%YJhEo_m60tU_UDS}^&1n{1XfJ_$y0 zr&SL~JHz&!(~GEXx#&CRKTr#AT!H>;PGcKZ`Z!~Dje zp15Z6{q&Lx1$Q5SSpMr9D_1Jg!|6|cN(AqqoQ#)w<*2CB_9NUqZ9L~l&`GWI)w{Ea z!ry~e(k8OOL+OXwQ>!1adbL#J+g=VzPmtC9x$+#0-r2lix7ZbsGp(yC`5aEZd<;{) zIF?ByPOlI;;aCnq-J)e5cfSFiXS+Uc4EjK{wc0z$p}(-DKuOMCa)!(qnQ0XrnFZmM z%HRBlQi#GPpD!D06G)+(thPf|Jfs;pq;%wzLh7M)yY{&RGv)?+=!H}-&~EEBnA-h- zFs;2=G-(qHj`CA~-|b}dr7In%^AU6>Z(p>Ss2ZOqa#m?i?-+SQx5wG0xoc)*Y+b** z@HHzUs4!E`rf){J{=0Ov?!FU=h?|`a-D?2aW?GI$-p$kH9?__b0^V|KYv= z;|P48aPHxK!gD&|*)=abu0shuTVVfM^%1!Jl?9`2{UVop&PnA(ZKG>`EE~72=-Ir}YU~zTj(?jV~fu1ET|X1oKGb-^9d#o&>l%HXF9s8h3xZ4)o&?!K-3CMIKzVmU}J)~Pr=hSFk?UKqRaP`Z2m2#_`sqa z`t~zhHJQdhyxi>Gr`?Rcs?pJ^{@$nL-ItgA+n*&v0*eW^o%Uk-vUsVg$dP&2^-a># zg8MZj`fKnCDtsmzZq*+(&Us5#76XJ5jdx#_OeZski z_X*GG_UYp$V^L<5Hmmx}_a*|=*UG%j(OO@Ln5dcH`W|W8L`KD#zr6=SIi2D!Fy#aL z$`|G(LrGBOBR9MzGKmDAl=k%So*`ESOW#pb`gGvQyGdVpxak{@Zb-R^#gH53FJyL3 zX%o$c5k)8QKqAk)VKDa5Ut-CwGUdaxf^RA^WJg#363c&IS%kCyC$#h*J3?yOCQ@lDGD zj1&I=>+RSA<#m5ReBC0t={n^AEaswn^jE&)KC+%>mdB*uIYh_z&D8zvt49biAA&hRt%Z2pWS_7tqD1zZZOWczb;4Io?0VJj!wM4V*8jR#lXA| z!y>jgt#2>9^wdmpK$KseZq5EJ-Aa>(PuaH+|6%EhpaTcPU%9cmP{LI zy>cO~P53@^=5jAv&FJ4>G9-9<&qxdu`qFCZDof~_(tR5Dck@!s**9y_SenV=M#aBp zdJ;&gP@cp|{zBYKfP1N@dA z52mbJ$mue@x#dHFa7SW`W0glGR2M|k%jYvl`|lg3T=OpEP{gQTs!vyDL?t-Gm6yv+rYfGveZgEk3VrR|} z@i)ZBud?*yT5l4qJvo`N&5t#l6{!ti}Knm_?ckz|NfBF;Z8pxL;4~zUB@76XJ5jdx#_OeZski z_X*EwUgZAQeOLBUOpPk1E83P&r#(goqqnA#Qx^umo{ViF7k%r78_w3iH0`-XnR`9O zJ=mTq`;C(>5ZLgqe8X#4ZmPLm`?(*qE{i#t7L*A6?*Bf1-hLF;^>6w_O91siY9|?(|n~KQDjY$N%|3!ayYfy<;mvmWVrcMD^W$|I>bMVj;!?eBNrmC zCzaN?k!YKyHQet%lEe;YR&ZgXjfz_oC$;{^)0w~J*u7m`(Lkh-CY44aq*S8Xml9D3 zl|-QonNtc4q$15}qEebm8JZ+b`%d#H4Makr6e>*$@!s!oeAn|2^h5jD*IwsZpLO0( z$kp;cO7l;*69tj`9>;lSvYh^Ho4z#aBh@jOZE1V*9ogo#du+UQih2Z9heWP>MmMTu z2s=M1BUev9wlcsk?(y7v)Zt>AS$;$@ z**A8@dq4aAdy%+HapkDJ3Uw&a70ZsG&#FqtjiJNzhfq{wrT6C!^?t^vOsJ}mq__(t#} zFy~@^f-Z;NgO0%agzp~i6V54#x%@~YfMI4Oryn#)RAp_@TYosBTbFq^>q8Jxji)VF z4)mJZ#*)zUauDMcGI~P z-?wd^_J)4^uB1al!)ay3mXfeO8>-V;ZpgShlc!GuwRm1d(;4;WdkPwkkn;h&E6(0O zLT8)u|m9OjQV!CQmSdp~#G14(LW@U4H3vJx9X`Aus1OMgj$a9es zB7Z`z0lyqREc`wAM(`sr=VE?>E{EQOj==kb?;h?G&Z%E-hf2CBAb}Nc5=;5>A61-0hx;u0gUfv*F z5@xz?Ee@3VcD?-qUlJ(`+OoyQHIq7U@r|EbS3vSFESj9P#)bU2cEjU|%>^T2eSn7Wbxf{^+d=<+mo`lgJ3>psgLj#J-$m5i-?#G#p81bs z0Uv?f9eFNtLgY`#HQ<-ShlRff-w1vL=3LBA(B;s3&=Gi_@ZH0G!Z~RTpP4J>ug~=T z(JM3+kzvNRo8-6i&0wkxijO-ec#+O1i_4Ol&eY7^q9ghKaT4s~RjZz^%_`kDd(~q< zCDy#JIUe(Rr?VafFdyuqQ%UtwWufrsWTIXZ(_?r4Cf#0?eY~RKI&FG2oIHEffpm8^ znLeK`NH^}A+CRkv(>Jak=l0fAk?zLOImPcYX?NQ^HL&3&Cm z-e13bJt6!ib=F)ymZA5Y*dIuytx5spzR9DtvqkFZsXp;9PoA~XR=4{NRg;%#*M~T_ z4euI=3`c6UfmjiJlh#7_%(f)49k0B8jn>j;MHBnZ(iWN$$J$(}p7kGZ1dauK1af!e zxyT8TKOxtEUk)D@{vLcI_z{?MF+V|C~RI6Tze*?Rnqr&Tw+xujaM-w=(*>Zu#eDn`3BR$>ha^f>@$->}l17 zluGvR_H5~iUgb?2ICjUMZ?2=K^v@~0Rja49E-sd8gT?flrelL3tDGEjxxjg_mPLmpMXx16=7O54Sr%i1U#dYkBU#*e!I)o z2Xo6@B+}^9sXZp=_Lq`2-w&l;TfJz&oF<*?T-IdC=1^VV_2;OPXH1hcZx~Tr^P=>0 z_ao{__&SJl2s>x6Y+4o&&i-ATx*}2DNMc#C?tH6!Fx}fH@JIJVDBbx?B+}~KMUr`U zxTknY9@T#KSh}|-`agaTTo`yGa4g^>kh>$#MNWwP3AqORa`>?D_uw1BkHDOZ`3brl zdJj4R?-RazxKB8z7TwpviU(ISX(zjNJ0!W7o$;b4)2!Aoo0JvW>V$2mQ4ULgkG3Pp z+bZ_fEpre1cbo!Rc_a}DixooCZsaNAHzlG7=r`iP;M(m*nBSRh>4ppMjoZWv%`8N{2lEZCR zi?)#(yGMGJjvJ|;O=bQs^Rbc@k9teIj_stUTbvWVm+6x2^Zy8Fwrf&wmf-img@HE$ z#{xbAxjXV)+wpltT{)`f^7dp{=7@x-ws&AtrbF_E2;0j z`S=LA(lTSd2Bl|^FTkE%q6u=R`nQdRV$dB@Np(R z_j#<(jyjT29A?7ZUyaskoz0Bt70{5$!It zk#?l?*G9wzf%ai=s@7cZk+n2p2nn`TwzkxGXD`?%R;3dDVzoCkyBYp3WHd1FEtLRI6yX~p=WXd)P}<#SYOtI znx%gI^jXrA+hiUjfIB;9pR!P4IJR+_ z-)ZJ%imF^!tlhASepv9_P2`6Qsq^+%wzKqO=lwpzCEe#pYRih}zkeO1JNs&N&pXYh zn++Q_ylQo(6A#t@PLn-N)JtFdEEG6T`S)xi#QQ2e@^QF-MTjf0I?wec#n_QVTypo^ z!m=Z=9yV^gszFpiexb%BpAKbxwcQZ3&7Xv-+q}uy^MJ+}%Ni_m@}UZkyYH{tUPQJ^ znQ@pGhmoxM=bct#ZX|zn{`M`Z4wO%q5j)bKLoejtSGj!Ko`5e0w+tQ@oF(`@aADw$ zz_EajK<Y+E?#*%myBVtE8?se)d=1n296nAtTExbuY zyJK3@7dX=InxX6OD8>-ASYJ+?C3}f_V(h_t-&3iQaP?eyHve1QoX`;C=|}HA_-Xs} zZ4fEGzu}lZO(OSSudl!N#)bT<)4MSBh0T5Z=JqRGnN5Qatvb4**_ogR0KOdDGI&^U zmf-img@HE$#{xbAxjXV);iz@6%dQNw9@#>KI}+3z-}{ori7$f2S{KPiG3QM!2B&Ck zTDACC0-Ntrm+tq7u%k~Cd2)Ty*3!0+xW3p2hsm6=fTyH1iuF^ z47?FI7Vr_s-I3=aCq({)TmybNd|3E<@QvU{V9v$-1YHij2OWX;3Ew^3C!Etf+3B`V zZAF>EecR(pHjfbT$|nkShrZD-o~J)u?dl<=6Q4E0SGN+G8s0cvi7---7IVYRVm&F; z^Kt9`eT^pE+`7o{k_8pzF|Ts?%KkkYiL3X0)Xm6Dr8#!Tj9iIce~+3syEBlrfBK6B zmbZvOLO!3alq>b;>gE31euQ%7h;6mc-p}SrR>sxc_aK7nG===Mj*#3?X>E&{ro?IJ z-Nj9f&g}biLazVTQR;LeS#DbNF^c{ZdNt^4pa%fH9NaQ^Sa6o$_rQgLHv-23J_5Nr z@?7ME$e)mFz%Pdn3x5y35&Q_uxtO1z%c1w6Bk(@qyNCOPbJ~=7&+X-|1x(OwogFI4 z)0jO^Wb@j*zR=X)A}VvQWK)@c=Q|toPZCEf`IgxyL+HJ@yy`;Rb2QXU+2qF$FRC@9 zBp@XmPPvoi12%;?)BC%*PV`h>rIlR{(i_W8krOAD=#_X~BgFCT?n92+bm7pOjDvD} z=*YnC!uuuOm;NrSsGDob>-3r9saIdDYj=rJ2r4gu|UJ>C)nFzkj6J(Lt&3 zc-~}Vik=$!Pw3U4uYn!__;PT|;96j?I6ik-d0O?NJ4$kWM(X(?7b2Gl8j&vK(Sh6c5? z9_Wx*K+yX{pAbDY^qgPZ(&?_MFA3R#by@3ALQnW=_&so8 z;ElksfR8}#jyxARA@V2W8t}{E!@}Q#Zv;OAb1vp5=yK>i=m@+|`0n98;hZcao#oH- z$ug(D?@NEJwt(3%z1+)OONKRbvZV2XZxZ&G1eW#|1 zMvCXl#^_duLq_9rGZ>ZQ??24B@{DpW-~UmFJCn?9%1bBapiz&qYp%{0X@R{BroP@b};w!H>Y4i}?w<9C{Br z0`C*Pd$>kh>$#MNWwP3AqORa`>?D_uw1B zkHDOZ`3brldJj4R?-RazxKB8z70$8oCu-H0&;DtO&WfXCOh%mJ;l*Kc)5JAFNbw)_ z*t;%HcxMTl7qGCDwy&V)<6np+^;VGe*Sy#L3b{kCBn7Hpo##cvo--exPs=4sia&Fe zpT17DRHkvSzMnwruGAme6BkZwc5fNuS{q2Voayhs78ODqoU0FZm)xb;OMt#RderFW zqW6hDA$n@)KcQEHz6N>#;LE`+gNFrY34RY;7|(x6TW-6PdKNaHd=u*9NaQ^Sa6o$_rQgLHv-23J_5Nr@?7ME$e)mFz%Pdn3x5y3 z5&Q_uxtO1z%c1w6Bk(@qyNCOPbF!5mQ{n57VLCnwXy)`RX4ZUe46=S^Mt4qJR`s6U zM2a_F<_i*iLG&*)HfTKcC9ESUBxLv!X&%kHDA^K1&V4PcKiR#VTAx!tJelD@cF)PV zDry!^0?*IlTW^?4@>)HE^`q_(t=U;6Sugzm+w+0_5!g$BzB_u<=;xyMi9R8EYUn?q zSA)I=dH~?d!7YP_1!oC<4_p{{BXBI>Bapiz&qYp%{0X@R{BroP@b};w!H>Y4i}?w< z9C{Br0`C*Pd$>`z zEWz)A3j=Qijs<)Ka(Cpp$O(}@A=iLk4j&f&9(*JC5twr^KS7s6??Ff4eZqGS_X+3Z z*`xI};U&Y|T2WJ;;laaNXVz_&7u8AL252p~_tlA9Q}>Vi-G72SiaOvGIewE?$uAod zl)gwzgj9YFhP#vc!691>8#Q|O=>EPvk#^*Cu(4dhd~^Ee@w+dHD}o93=3pNS_IzM} z1ojf3?~Wcd`nl+RqECpP8v0M@)u69|9su}qaLeFf!C8Xe0~ZF~2pkLe2;}a_bCDAw ze?qPSzZ^a+{5|+a@FOtiVt#@yhu(vZ!25*n9_|y)>B5Y&dmMj?GHRB5jb6L%5&e77 zeEM^467!LckI{#`iEVm!<(EYXG*l+ZOtapb)HSEGNt9R;6Vh__PJCzCa;7ht zn<42HchiCh^_vGYC&vA^-w1niu#W|MKCnLmdkN5YM~@o)T=YKCCqz#T{U`Kl(APi@ z0DL*PW$>`zEWz)A3j=Qijs<)Ka(Cpp$O(}@A=iLk4j&f&9(*JC5twr^KS7s6??Ff4 zeZqGS_X+2u6Dy#0BX=GX6({eR?d3r^=4PZ1Ef1mN=SowSM1_*OdOe0RUNYp4!lhpG zEHm;=wL2*9c?{`I-n}wl*n=!j*{1jBfDZ``=?x?Y;|TUpVZRaf=3pNS_IzM}1ojf3 z?~Wcd`nl+RqECpP8v0M@)u69|9su}qaLeFf!C8Xe0~ZF~2pkLe2;}a_bCDAwe?qPS zzZ^a+{5|+a@FOtiVt#@yhu(vZ!25*n9_|y)>Dcm0UcsJu%%{V#Zj0UClA#}_^Gpvm z(7t~yiE+9why~Zs{YRM-q~N${Q)1OL)(h?hQI};(Xs2xE1+!^c^i1k&$8Nvk|MrDp z4;A(sVQ&uhv0%>!_D5hZ0s8LfQKO%W-Y5Em=&7OqgkBB$8t4IlF9){_9u}M>_&so8 z;ElksfR8}#jyxARA@V2W8t}{E!@}Q#Zv;OAb1vp5=yK>i=m@+|`0n98;hbF0bi9zB zy_y+5l9Ru9iHv)>;Y=K(`S|a>Jh?!`N^R8{A{v8S-P`HX@msv z*=fjr8vSpt8}@}^4;A(sVQ&uhv0%>!_D5hZ0s8LfQKO%W-Y5Em=&7OqgkBB$8t4Il zF9){_9u}M>_&so8;ElksfR8}#jyxARA@V2W8t}{E!@}Q#Zv;OAb1vp5=yK>i=m@+| z`0n98;hf&6m+f9Hs>WC5hpq4fknN=^!Eg~1U2~7)r6i<%G&2paI zJw^J?|9Go>?=QvvJ?wSEzA)^e!hR#{&A~nv?D@d{2<#<5-yJ<_^mEbsM4u2nHT0j* zt3h7_Jpl0K;FiI|g0lp_2QCb}5jYm`5y;(<=OQOW{)AiuemQ(t_o_`$V4*JvH>7 z(5pdT13dun<=~dV!-BH}zXvW1yb(AS@Da$}k>?^OME-*!5878bd&UAy#7i#{{I%%Np z8NohF>`BD_J?wSEzA)^e!hR#{&A~nv?D@d{2<#<5-yJ<_^mEbsM4u2nHT0j*t3h7_ zJpl0K;FiI|g0lp_2QCb}5jYm`5y;(<=OQOW{)AiuemQ(t_H{Ey%J9~TDR2pkLe2;}a_ zbCDAwe?qPSzZ^a+{5|+a@FOtiVt#@yhu(vZ!25*n9_|y)DT&N?(NbE|?>65A2V?UIO&p(W6E`7rjsP3DHwS z{|UVs^fk}}0ACJn89XdFOYnQ(!oVAWV*ww5+#Pu?azf-!$Tj}+%m3%Y!ry~$1U~|E zF6Jlba_Bwi2)s}D?%_V+oD|gyIQS+t8TRiJrtvH?U}o(+8SvYriByjEFLW87&Jx&s zFKRTkjLd&Hbl%AA3CXJXSjoRNgT!*Za2=VKM3MuW9sW!`rE^R_AN=vInykq!9Ed!* zhBc;Ym)3cOgNYZeEa5MhqR-FGX)_w(msrhZ=ahq`16 z_ug)4p~ou6Bz=~1u)5xV&O83Aoi5kR|ISg*~qGWywe8u_~(#Hh5l_W4(~sYUk+?LSq)}KlxNl zfmyMX^9koSZ)$q#-oL=0E98Yp?a`-4Z&AO7%(h)UN%YAF&Uo4D+sVx8!d>T2>?Bt- z=QNzq3?NRf&t2cJ_w4w-ACwB|2%%w@98K0Q@T98~o@cObhLi2fwlyQo3QJ_fFH^Cq%pQ$=BWm8|l3cpY;>9 z4W#JYy}ycOlO)YoURr;e2&-&cLaNr$pVW4)?89j5eiF#k*nby%Lz0evtlW9@1Ib~S zv3mbkq-fMg=j62(D%{T6xH)^6Z0qadY96^u%=S{SiR??1fo8yehK2Q>v_xgQ=(3-UKUf%vhoBogK-mmvsAHGM}Y9 zP42^ugeLMywf=a9K?bQmn)NkcG@6`|xU}}6dKAg|dvb;=XC7^;Ep1u3@-kig#=z~2 zcO>EbY-gI+m`+~o4jTA9JA(M9j9+O}WbYm0Z*2a>lSm45ngd5z>Pomu;@$KSE=LdbF$LWS{BIe2e`IBQgBz9puXFO zxF7S$GdG*ZzH2>{XX%JV^JfGKuoNU;m$b9b$^M7nV@t=Ste6QAdr5a$=J)&k)uqF4 ziMyTMlXx*rmULfz!UhQz>9<(zu+1=%TDT8=DQtJ4@w&1q-aC`Xi}CA%m-M}<)6tVI z#$HZzv24wk{PMe0e@kTD#kV?S@3mK}^i_RH?J-`N84b7THSX>;+Rt4`{sV4>A&p?V zsC4F^kc>>a;ZfdzvuP69q4C_x<#!0x**_FLgWco&@wdz8!8%)#o%g{w(LRXayNCOP zbJD+D=4YX|nGr6$tlYAthpZJfF_Cgq#AKjzJCv9(cw}1#WQwPlKnq_ zb1z*vMU}lSJs#}2O8)uAnWbueB*%PTT#%AdWmy;`arg3-lS|9xk3Ij}O}3c~Y?!s= zDfQcF&QT>FMtq$kW4`9*(V8vOS10k_CMM_9qYh2Pl13dTLykEQN&5XI?<#G$SVpJp zwR~3g(X{jDAF{qrV}0Hv8NX$pGhHGRUAiG8l#ai=;Cfg7GUd7a%IuD%H9hGVlw~4z zh2%&{X#eK*rtXUzW2OZkqUX9VlOtjFL_P1@uF3IYnGv7(D_T(<{=ifDp)!m?Q=GUksQ@Sj?V&82kR{55vuHNR-OworK+&&FI zsgB3;-+8|zSS|)d{7qYhSQ9K(LTRlqD^C3O*DSr8R5^2%0I$I{y8p(7he?i}H1Wv0 zw6D)%sYV$8vZ>_<$=0HwO=0#PWGF5CP|(D38pc0E$zVqm9Tof0sNGud-}{8`9_|y) zX_vCcR!12_CO?sPQ9;cQ^0~x2VQugsa!al`t$WCeM(k;EQk1A5nNcH##^*nhHD|h` zLpKhQvv!Z8M9vP-k=JIApxp2I=?#5)uF}y$Jail?x#PWJ0ppK#fnS z9v=z3Py4^!?tFOS7}YH#aU%0mY08w0*mIgncf8XHobW3APey3el0o4s1K>lzZzGRvBj1GU^Vc95` zME6VYmDT=pkoF&6qnfC6lUgrM^?G6wKnjLd^wcW^Q%jcp3a?j3$cn|E`1+4{(fy<@ zq^ZV}4(|Fm-S2@4vC-)J{d>xpJPf(@$jT&)R%=-+(djPa!1ms$q`5aqhuGZ1&o^gK zo_kYXYiFmCy^g#o8$VqkPN7$e3az~LOMf48U4UsV$~b^g)}D^cda$g|T0*2`ICTs%u3S@AH% zUn@2WY~W-*pZz-gJd=xckar_BjCnzqUj3I7{&XwJ{Q1X3!#pt!PS>v9%~3~utmep{5PwczI*f#74+c@eAKaG$-s~j1A66_6X$hwy6YUy3?ESFY zPfUo!7w)GAvLvG>KklZk4Zp}|TQADLKwn*abp)N;YLVi6&YcRb9$e*L6Gj$_FCFbv z$fPD;L*Lo12_}wJenYLRAL`DSOrJEwd)-?zC(rFzE zk3VWWAf?$&4lfdASzO#^1ts;;tR27qHf5atLFIc|v|o$PV2OOc7BN$EF-zfjNaX&U z#mu_vN`3l&pObW-yZm>(RapwBlk&NQm$5KEL6<}CK}X2s!^l=JI%l2mRO3G*_(6gCsAvH!)MOp`Chv z3sgE%NLbVOBPKh8cDHxVE6xh0dGp3^NG!Tf8&blSuYa6D?hHSUIJlsY*d}*wnZD`? zEsguhd7Ime+EuVU4e5IZYJ3k#6}* zcD)*8iY4YT8JAW?MinWss%koRq}U$gSAZQjkrTz<9a;R2mrGCS9_ z^fhNc@w}=$scO$#-RQVXXV ziNAyfIg!N4$~f?(%-ZA=`E4(0e>|dtl0(YdjK-$33@ZNpYkPf*2(10md8Oekja((I z{95ZDm9>3cb&{`!oDct9rn91p%0}#;KK)}CjTcvT6KTpKOye!B&A+bEh%()y#@03T zfo^`Upk4|gIl21j?ZqTj#jIQO*8@^erlaD!;~XUiZtV-7`Ghdx<~cqo_o&6gP($;t z7wNi3LzfJtg30Ea#~MA`+Nh_Ywc5w6)x`f%WZ;g~rLYY3H96lUF65;OL!rHw97*!%JN-}}VIxz8w! zXN;KC2|Tm4;b)!NXS(xtat=-J3fkMDGe(+Tx|Zm@o5spNQS?Pos*hfCJ>PzIyo@gV z^F?B0w2bW5be};??wLk-?|cPVGc( zEzgGaX8Gju9f67Bl3#RcuIc^B<2h6|BfskP>uP%Qnpf5xvH$zq_Z>dhUEuUP{M{<1HS8glGMta%nRmM5>0CMm3qO10rIix9p zKlW*T19^GJMtDr=G1+eU)OdDv5nXZXfa>VJDspAw+wv=`OUPUQvahb!eAqeT&G=nQ zi|L1VR}5x5)sfEr)`+z4&uOX8qR=2Vhv;J9^v2*id$0c72-U++t`PMr7P3*RACss@ z>z-M$_r2YA?0zu6FpI$7gKq>s0&_0rC+KqMJ?IF$Px$WPKH;3q-7a0*yTq8uK=p6h{Lo5(?;Gd4T=hGP{y{&qcI>YP{9 zsM*@XA!3|9@n+phUd8@VoDXG{$A8abZ5KECWO;oJt38)rH>(3H4*nHqcg{_KHO;)>2;lsk;gKq>s0&_0rC+KqMJ?IF$Px$WPKH;1+l$@5|2+(8-BrNF{Cs}5a z_fpu;*+W#$F@)w64O1Gab;7b|lp60`T6llu19Gq2{`+H(PV#4Gt4yoFQ!-R3a3f-x zAnUH~+mF}3$g!?3?HpRDC&d~IU$gu7LH6(N?Q^9OhmvSTQv0bxp7G??m9wwU*85V4 zH{l$2jWfu+3aK^P!Z|edvqWNQ9LB4D=MWjzE&IfAIfu^P{I(!K zIh^izl{`%@D~U9^>yKB|4bzKv1V$WBmeGF4z;)I?>gdUt$+HveW-&VY8#jEv(L&Da zJ*o}($KD@s+HscACJvUVS(r`wj4EoHcd&EwuTs)7`BQAwCw4DgwQ>35wm2$boS&eO z6hd6BZdJ;&{h4h~&v~4h??g9f%8RlboRmhZxg;# zo}8Tg>>Wa^3qqXh!+77*oj>`y9k;zE8-&_FS%mx~VX-zZqC!wc67pE2vE3({@o_eM3*zUPg}X{)ozQumJ7z}olp#FW|x$7yl2YHCr3-Gv&;|5<@A zP;M4uzR%h}@>Lkw5Usr6;`}(m|7KIdhWs9CQvK*rl*=mLu3uyArx$eM`KOv;r!3Oh^^Uvb ziZ^Z3l-8WLFOmM5@g-V=*OvzEUAL)qX)UeKPU9F)y zWgp!83aN5>YQONQ7{XzZFRe83ki>jarlT_DRP@}^+5R1+bkjp+gEeIjhyZWaU%`RD zG~ncjzREpLhKJK;{y(j6G)sS2MmwyNL>;b6JeJ=}t3|rizwEe2c67Xz*cB5MMTd}Cf18QQZ)=ZkX>jqv+-H*nSM5c#aEY= zjal?jyJspXQNAxo=z7IA?~ZX&weWk@)92$$T5G(78KRGQkt1cA@siX#Ej7yR1XkS?wpu?P?p*onXJ)c5}k> z4%CwuazzGXJnX&gL497L*9!=zoz&0KCtal2X@y;ueLTIiS9;25Lor>N(s1^KPZ#NK z7%(od^Pzjh)IXk3il$jBxsP>O6w$W!gD+mQ{r`%knjeh6g;NKQFsRNchW zWb$;+!)|_#eByim_^?B$H# zv*Wz#9R(pXYP5`$7aw9IQrP|ca$u}50izXR8; zB5>{{srZ+0b}*^B8%ej6lpO~to+CDO-hMQM14GrdM zVIwN6_%*^=J^R9gN#zGHKpCCKui!KM!#od9@;r2$)J0@Lr84G0zGrLk`E&G3kHTRAQYd z|4ZYV4cBbwyF#liM$41w4GG`Hw_lXghr2edmX+YBLXtB~kP3%cU-A{-87cKC9Q7SG`IgcP`#YT+J0ml&^f|Uwq^^ z74fOquhdQHm(Ca?soTY5OOB)Xz0cm1ySg#-XT1TzHN*h98D(*fx3QS zUHR0a?3`Jqbr})kPxKoYkNuBh0Uv?f9eFNtLgY`#HQ<-ShlRff-w1vL=3LBA(B;s3 z&=Gi_@ZH0G!a0q58x92C)nNXO$n0Kgw~z^1IPdxt`*+<=&B$s|UKdYA+6x>M_N7pp z+);`6xrZssefB4hgfenP`1#$#$5~W!UY~w+S~0EL`TmYYc?CHvjao5oec_9euh9`iM6c}&{=u&zljs3KkCQiX5YT8QkqZwt!5CDDfY(@t;Ks-aGT zclK|Q$)O`|n*ySg@6qVqezO(brBrqEnb%w62dL*CLBEf7_ej;{CE|@ItEgT_l=%IQ z7-A7UWk>Z6kynSsZv(Ym8QFYvH`7lgSWb7%hfXxeHt zYcxhCkw%5tBzzn;Cg6?0v4D?2?v6YcIU({Vs0&_0rC+PD3^xpq; z1l}in_i&$ZPBOilH3|$DF|GBo|JKASFh_Y!|CVdVl8~($I_=Bvk`0yJcHLuZh?DR! zNrx#mXB54EN<1%)*xY^8cRn_UsQxU=)s%fo_DHrEeqVi$W=(Vp7)1mVg`=zrsfl=M zEL#yU{yUC5&p&Leem9is0&_0rC+KqMJ?IF$Px$WPKH;3a z()PhJgm9ow78X`w$;2SZEfL$Oc| z+3g`j|Fu&6mn|=dV31dvj#M%kUGz&`aAPTbyf@8myPYt3xAywNsHF+iU2H=Uk7@=v z*ll-pD_05CtdV^peZGXAvDKSym-Cb;>mP94QbMODe=jF7oo)KZ6jG?p%^HCy^8~_w(d^2at?_j7hq1e=U^H!L zU%!inN0VF+%em`$0%<3|YO!yn3B8tAbLG{IaB|^doprc|B>}$&E)2X8I2P~`$la0W zA}2)tgj@rDIeb|7d+?3mM_|sy`~+PNy$2nE_X*!U+$WsV)lo?y-@et1M$mlQNh4(z zufMOnqNxm{)wXp~bd4A7OxgAR;x=!RaI)*3Xx}MXo$@NA^c9X1ZX1=B zmz!=Av3qTwzmj6Qx#G^^6{LiY@(f%sc6m(Cm+n3pm$H@4`Ehi^qY4}9Ikt&k_>d&o zs?4(ZP`qOEK#X?`Xb2zARE1ELpjU5-D-t z2J2!nJoEix8TMS)>7F@z>lPQX`(N_rhxvv?z3x{1x^p)uZ^jHgjxX8t?T`2iPWN-D zVr|XVP~&Ih!&CC+P8E9(^#3?Z@O$9Gz#D;M0Uv?f9eFNtLgY`#HQ<-ShlRff-w1vL z=3LBA(B;s3&=Gi_@ZH0G!a1o)8mQSXRAc(D?Ok+pOqw+&sOeDg(Tx_LWtOjSD|bos zW^gI}ZU6KT)Ms1OT{i^SABd(JE=FLGGbCGahq<2XE)|5kGJ z9z7D_7sPipl^(YdYmH!HXqeBBz>D-G^)q|#dMwh2D#m$g+-Wr>{Yvk%t-M4jf1`Hk z8qR&BAd=4~RMwn4Fz~8Zn@&hdZZ{p_GbY-#PAeB3+ecayrE+}r!s(_@FTO3j=Rtx+ z_ry;s-KPCBk9-`QbBl0F?Yr@ieNOe?SKL&~PbFtn56F9uMUcSm@EL~{9@3JlDMN2n zvk16l@UY-4!S8_!18)S51$+c@cjURq36Vb`*MMIR9~S-|d?WY~m~$~dL6<}CK}XXuKo^`(_^;4H~7n=8oepWx9w|+RRfmqc`aQzpJdbgd5%D;AcB3wS{<8 zh@3S)&fYVB;IOA+g9-g0_jt!a&zbJRiDXjb!ttc{|C@&Jr=685Z>K_Hg&2 zH378v{qmhf#Erc8>z!zqb)7!S>^vG_aGP>Y#{Ch0b&XW6jJeSx!QOwl>9Og753c0e z+h_ORm&+H@Na_vSFBBZArQc^M{1vxxAOY_mmEBymmE7?4 z-uy*Vo-BO3t46lmf++r9PiOuYV%CQ7WEqkfq+XH9o@Fdah&YcWDN;(&tH_$ER9eZF zNKLj7GDQ+8hDIYKb&`ZZnL^n^4M|$GAoBLZ`|WRdKIfeKzOL_eJykg-C!K9%XQf)t zB6^|ptDW0LTP;`?b@FPM>99iDxKyssPj;3yjZaql{60qZs5x?JXUsH_s;}*8*40S_ zfBIT|{-UesQ1#+j^NuS&9*ude-+gkkNOhs7Mk^N?_5Ihnw%X3(bXwH-eNScyascq< zaLe$paF+0UaAEL9a4hf<`0n_*_=NaR_!{Wt=&f@Js0~Oo4fVjrehhkULg11RyK1)>BF{V>)%<_# z|L$Iru2yIz$F}|A>X+gm>|!quUsqux6gAmBrG~M zSIovWl zESx3$9$Xl_5gZGA1im|dE_n3+;>xqZCCQw+GklJ!Fq!4E%)WJQ?1pz z>jkrg-QW>gQ5RN<^iu!AkE%i9*wBi1`*sbHo=z205*O>p#Ny#~ez9|8pYgQ^I&E4i zu6(~UD67atIG8zymjB`;i+(GgD|_in~mGi4*Hyi6< z^IRb+7kC^xq}%-8J)?=K=Q>)+>I5x|)f1OU@=xSy$ZL=TfG>wzhKGf-gx`Y;gExX> zfsep<$Irzl#DBuqKrcs!Mc+d=LXTk2Wq)EWXWnCu;C=5yi^(okJ1{VyfVp8 z*wIif)6Rw8-jv_WPZgysbnjFvBk#6r>mKbaIt=sQvd+LmJPLkh{iM6McykypOyJ?#Pt!3lAQSI_3d&|lX89V2j2FQcyCr1AH z%uDDbxo%CcSS!e>k$)mrLtcX%0DL*zGCVAtCHx*-7`zc23w#8=JAN)cA^sD-26{O< zEczb05qboBF8dR6IrAQK1n(2~9_NYgv`BTGedR(u<;c@d`i{m4GVe}DLtDFnnO5Bv zYow;fOI^o)ep5PZ6j=^))mBW@7oXa=HzXVFlR?$~cEdcqMfHLH3s*M($HUXK-u1}E z5V7Z{+MCPI7|Ct^ZaM~KF5=klp6k6|d5RkzKl)}ndCFd)de@t_t(2y*h2MG?E){MY zN6fo)#6>!1pS4(fy1A!*%+=fvCeGrJ`Q>b1@7+?hsON2y2LmK|LUL;4pUBma*B}P~ zUkJc={={6)yvH2D`^3G+ zdEz@2{+#7L^+i|Z%(8rk16@+&gNXBgXI$AK>PJoZt`jg)cKp0#R=;5jh0)o&!mh|y zYK8WmHg~0on7u6K`MQplLeIje)1t!`;)h(7wmwzuMZ>j<*R5<_Mb7N2`y6do%F&C1 zb8{#BEUX`tM_em+mN$P>AEI)`U-UX-Wk1Svz8ta4=i$e@9>OVW+MtuBYsJd@=?{l3 z_LhHlpPaJJds$2FlRP0gHS$m7YRGGl1As4wTZV^)vxMJ+3xhX;V}XyrcgN4gC&Yik z*FY~vheh8*H$snK&t-pNE@$3jj^KUb-s3#+ofZ#BFFxC0f|9$VPhFgPkPOrM@!RCE znd02fYg{~yeMDou;hqI1ONI5^ITb0VSBRZ{Hw_Yhm@3i+ILw<~uucqqsy%d3qL=7D zbHUy>{=TB%-~E^N#yiOi$M4mzlI~*kl8>F!A1;%Q$xBm869Pru-6pSr<{PA^WoCip z%O&Dm;`Z>Jm6Js0vRk^}%mRdU_uWUM-qcvv_~ z_&vBVcq2HL7Cz$tzB_&{J|X@Sz6N?ZIxPAgx)FK=doKGEb2;-Ka|G`b_a5ho?{x9q z>#ZMrdMk6=M*L(nNlp3i-qKCiw|C~_&pPKG#5@u)1Kl$qw|D*LR!MVyL8>m- z-u_U8%`~Yx5d5rJpFgWqcvv_~_&vBVcq2F#_y~M={9JrO{3m=3^m24q^gVPV^a%D`_9y0Y z<~`;J-Y4!o&J*9sXklRws~x(^%+w^0tS`MYi(B8i|2?uwj!8*ybgz9U_t^TC_t$8n zBuAW@e>&rh_)Rx?$jzKLqUwX~D~AP*^4;}mGb>8=itcu1qYb(S%dXD(+uX-li(Xy5 z_pi#g72WRD#MgC?6|P_Uz6dA}6I~-hXaCT&MKn!H4T-WkA*@d|6x}S2YRS8kqb8qA z?vp$rIW_W6qY;k)w37mMT)qBo$6zn zIgm47H=J48m?Le}Q(2+y)?adrm62X
ma&W$rO{+Hl};iFlB#rqsmL%iFby+Qp-v z3vWgBQvP3o@?q9^|AYOG%e3z?=c{yL<(CRm_5PZN<-KiVUbviz6(6hPB45@Y6rX!c zNnTa7N47oms$6^aX>s{dVAFqd*p!^E`vObBjt~+ z=Gc_9*(ZCL>6?8v4wIkuK8lYnx0N$&BR%~rLxh%NY4O35baBPFrhdY~^POf?fjh?&PS+=aTy*Pe@LU{1dqv@*3m-;LG8b;bGw{;rHOe;Emu|;3M$e z@pJJB@t^QD(96+b(f81e&?DG$*`JupnfI6@c%QiUI8S`1(S@3=PCwFB{;IZY?;ftD zob!1Uo^w@O3EpqGA$m-#2p!xl$a7ABEa`JF=3iSs5ni6rx|NQPFb&%nvi$aDanI!Y z_}Y*;GI!Uh#yqRwe;rD$3o8s{SovMkas6XO+J_0CwW40 zYUH2D)sWX92LN9Vw+s&pX9>Rt7Y1(x#{wUL?~b2~Pl*47uYq2U4vW5rZiF7ep3DBk zT+Y159Krj* zyW{8L6XHMNYoM2-!=mq@8=*(A=dwRBmox7%NANyz?{S{^PP$k0d}m!SP<>;{Jd+0{!5$w6_Pt4`ad(08MPuzQ) zC%#ke)cXzn@1tB+J?yW1{2rNcKSWj8r2g*R{3j)Ht>tQ6&9Ho#x~alo-0F0x zaV3BH!9VVcg!@x&?7VnQ-1@NHrXc=LL0=d>RP-Csn?oN9JsYxF7?%gUynF^#H_H5z96eS1BZNsm|0 zSGkcRBQH0t-x98p*>_*J9w)5d$@GL+$VWLa%$wC$kmY7AO`?n4z~;s3ug(x2Nwo!1jhm&f$xr=i%*FEgs*{Kjt+~y zhi-%(!Jfy} zvGa)btupDeq$iR7J$l{f3!{gMej|Ew=wqShgZ>D53CO#Xqb8qA?vp$rIW_W6VTeU|hj(!WQq z8+~E)P|>`%<)%zMlcyieSFoF~51$cMd7q+QZi F{s)Fwez*Vt literal 0 HcmV?d00001 diff --git a/examples/hessian/data/H10C5N2O/type.raw b/examples/hessian/data/H10C5N2O/type.raw new file mode 100644 index 0000000000..034f24e9c3 --- /dev/null +++ b/examples/hessian/data/H10C5N2O/type.raw @@ -0,0 +1,18 @@ +0 +0 +0 +2 +0 +0 +2 +3 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 diff --git a/examples/hessian/data/H10C5N2O/type_map.raw b/examples/hessian/data/H10C5N2O/type_map.raw new file mode 100644 index 0000000000..5d0a0b4b31 --- /dev/null +++ b/examples/hessian/data/H10C5N2O/type_map.raw @@ -0,0 +1,4 @@ +C +H +N +O diff --git a/examples/hessian/data/H8C4N2O/set.000/box.npy b/examples/hessian/data/H8C4N2O/set.000/box.npy new file mode 100644 index 0000000000000000000000000000000000000000..9fbff5b5c4837fd77cd99c79ee0ee0203b5e3809 GIT binary patch literal 920 zcmbR27wQ`j$;eQ~P_3SlTAW;@Zl$1ZlV+i=qoAIaUsO_*m=~X4l#&V(cT3DEP6dh= zXCxM+0{I$-hB^wCnmP)#3giMV1~7nTc2x`v#L5$^ Kj*vaW&l~{UX3Q9j?bG*rU7vs8bACJbFZXqw>)hAtj~bcqzCN_O*e^;I=c zRNZ}Mm#U_s>Y4Mt{=UxMr_cM6-Tt3$?Cj;|_E-1waQ1QgyQ^twX)5aIs%t8SDE=R3 z89jmJF>BKe$4)#Nh)iYU>MOT5>@50#@^7{zGT!oFKyx8`eseEwQM$?ZN^ckh8?0`; zBnuGjf?-D6UUtBMdg-OK%8EsBgs0#w~3sSQ73Szvu-;`{da4wN3b>h|FNIFuE9A+L4eVfAX=hfnt~ z@Cv8#uqZzT4pcOFic6BHi6!^m8uWERtqF6rQa&5@xmVJ*d?-Q2f?0g9_AD5Ro-Xb# z=O>g`O_{13OWTn(su=Ga5EDP9F^pUPLk1on*W1jgYdHGq^*y4E0_OLMN9* zyvuyeij!fZT!2JfSVIQxD0?IwpgMwz{{?L#501gH+o49y4)v&H8ZrBRLXe=`HX)|^ z8*!-8yJ5ZaXAnCt(s27O6Lj8DqOxxD6aQhs_wcS%D9Dh_iPoA1G1QT)s^x;+y|Lt^ z&`HRaNYx#4DFx5x4UI-;cZ@p=7iA?i*4tnUZ`?;tLw|&ousO=yTK2mQ?5=k7) zG1zs2>o^YG(N^xxRtz+pRb*Yw>;vteCzE%s$^`-8UCjbp*l_yJmOcrgZcLG#isJ>w z;?^_kw)O<^Q}=`lgunN0g!uMim)zGipmHog^~OsoR_||K>{;7_Zx`2Rx^DOb3=2UP zBWo7Ed?xwIa6jY2{rM8Mxh$mYw{wxS6`)F3-L02X?7< zQW=Wsgo+TcJ1Ll@GQ5CEERW%k-e&M&+-)RBO+$4AnVGm|9LKr}rd#YK)p zRTB4j-y=4SE_d5+e#`2?@=WEj~`R*fuqft2YM8$;IW9Wv#H7_IFRXiZSVbhm^PnF+O5e#qp_GT31%r+ zS(mPKyJHFlRqj{^tX_a)db%tBQT~Kt(n@ldY4fne+Eu?MeGXc425!WE`G}520xDPH zIT#~N-pJ?KfO_wG=`{CvIQh?#eQtA1P+v?Iz3{#Z+ElXUZgHN$yNE}vX6u$v-xL@_ zq+}tcEH&KJav}$ei7jINMq?;KRuTCxf``f8@hy7-b5Q7r)lD*AE5*+4dEm=SOg4_j&ftlon)HwY^sLT1pkE*mFFc zIRr!>Y2HGu9Cc6eCT)2`=x-lZ%D-tAI>YY`J?|=ng6`hvKHnMe>tJ1y{KijERaR;Y zamTUnqiKe~AP=&c98b6X0z?JvT3+=@I_NJL_Gx-8B`(>`&pfN1hLe)RB5ir!Fp;uX zedy9Wyepm=b~wYtkyUE+1FL(WVOPmzmxg+bc5;--A1VX(VVAoU+8~6C=luwg|A|4I z82^lg9`sG)IVa#Ej@sRQU^AGCgC#rua6k6LlAf^V$AhNP*Z$lFc4-&vuq#bEr9ObA zFA9ebtY*W4?C~DGbqSg?D%#?`jC3Y`r_w0D_KBHy_lx!?U9*r8Sajb*w}^W5Bz;t946j7t@$SdbGknC_Ti=TR74C*|ovH$=-&=qQ z$sNnCGcZ_vNS7o#1ODfJT-2(_N68}zZ^lm3(c$_ZsVA0M5c5P-b5ZF7Xy54EC7xdm zq5bi87B(Z;luEKRf-bC9SEp%Lk*EQZ8G?H5UFZ~Bbh1~w7?tdQ%!r4+L+AK22EVTM zB4nuZ%EYk^le+ffLd1DC{M^ge2rSK=k%!X8V66HC zU;ckfiK_jnzgkWY!14RHMx=;g>~LSzcz6#FvR=IjkIKF$Z5qVrXn976#1)StPW*#gUVL0*Gc3QPrH)%)kE`3X#(c?_}}tc5LPQ^IZ!N zb6m&j$g+NzwoZwhw(P}(zQK~boC*wnT)%i!mJ4kPXXZ*87ohZIJAc6P8F(4H!A~pszrIw|N_7%C^t&1E1&MT}s4+1lv##ZZ`5YXwH*DRi zL&w7_GbHEE({b|kQx&0W?eP8E>4z-F91i*aTVtTXgq-FoRZSKXY?>>ot6>TsVYy_K zHyf`Rig_q|)xrqv@XGIzwcxzbudey`EV!?(TDfe+6!NKU_j=@%0^WhjStvhk5f8`^j zci#EiGpE3T{B-^nXAlZirP!2Q4oF?!P_Fh^fLP(O>ZX`gD+a|0b_a#E!;K?mAAaO@ z0lzx^oCx(J?z`Ojb>qYk${09gRk>E9d%{Rt_RSV-S`i*n{5QY(X+m#Lb`OE>=PYAA zn|>sJDxcB|VWZ;|Ti-8+gN&%`<@Y@L;NWlm(f-TbfBmnhJo@`jZOL+OcgbuxxX%C5 zONm8T$Zj=DGb@D1f1K*SbPwX{VVi%C(dJ;~jKI1U(ij$qy+7%_y$1DlGc3mk;Ev+w=gCP!7|LwueU&kZ^V7a}EgNPqUw9L(yKV{` zS$ZU^byK)XHJ_{yC`1iWe3B$2S_4zGh+&#L11UbqZfmAU)H_eaPM2Fx!m=Z@A5B43 zpbXZr5@$eF=D?e( zt$b98RP;AEGKU1^Oq#h?J(&AV`&b**!^L&l%DQ8X5GXu$(&{hw{kxMTZT0DBD9hR0 z;8TtDpUr<8$khWyIWI##au6yr1v~mK&%x2`FnU?-7qm65d6VfuqBfad6+a`@2eTCv zLEOxxB_iGVXoJcJ0yfg#rWxv@c zCUnE4Xw#;r=ay0*d=K1u`_H|N?v@jTOsJ`K5#0x6~7oYp^|I93Hg zKhpW_4Z1<cJGz9)W6HXP>4PpFqVpafPgks44uOQll1E(tw>mLe3ID zIk0Cjy7|LKLF(2H(}H(bCU6@ymJ)0=gk^gtKi(&^QOnG-evr<>t6M0Kebnag?2`ZD zmnSSGu15|;jN7p>%vhyWzIFi5{ggYR+ag4e2DlZ+g-5VlKJbulM;DECYH%& z9!ia$Le+i^O_!!3aPiz?b+4%n`;@a~S_1irk@O;QiT|JDp0VX>OprY5K=->SNc>az iSTnn$46CMHhOfo+pyccLq&=(oiScLmmT2lT@P7dLj$kMN literal 0 HcmV?d00001 diff --git a/examples/hessian/data/H8C4N2O/set.000/energy.npy b/examples/hessian/data/H8C4N2O/set.000/energy.npy new file mode 100644 index 0000000000000000000000000000000000000000..4761a45a4183c19d1a7d6c87d6e2777bdd3aad9a GIT binary patch literal 216 zcmbR27wQ`j$;eQ~P_3SlTAW;@Zl$1ZlV+i=qoAIaUsO_*m=~X4l#&V(cT3DEP6dh= zXCxM+0{I$-hB}%$3bhL411|3H4<8J54js7KcepVfNMCsxz}XL^*Ijh+DF@QqIkN7` z=pH(-=H{J+TY&uQ@wxV$T89p7KFabq0LWh`x?3e)`_KWIkT;LDf%JFFB&$0>c>oA- BN`e3Y literal 0 HcmV?d00001 diff --git a/examples/hessian/data/H8C4N2O/set.000/force.npy b/examples/hessian/data/H8C4N2O/set.000/force.npy new file mode 100644 index 0000000000000000000000000000000000000000..c69ac3552d881cb46f45e099b3cae1cd2727553e GIT binary patch literal 4088 zcmbW1=_3@1+lFn)sg9IWoFY^QCrd&p%AYnBL&WymNq$TF5^ zkg63a(+vv;XQ zFxJ>qI6y&g_WUrk^cXv$1jDXID}q3!o#|g`b@+6;S5VEB0oVGCEctHG!At7BZZ?Y! z4qpU$oxi+68n;7saa#>&Z3!b+hQ7sn@5gFBZCwDV5SIDIeHP9|P#%jFvVbNvITNKe zgZaDsKc6w5hl&2dnr$t^xVcL=sECV&#>6Ba56M=vwQ^IB=4Zh@8_C02q&jqw^dVOt z8pd_K7}<;10k&!z(j7KTpgj6iL>()FYirS$-1P#{M7*{1w|OeI{7^L}{Fx8-*V^Q! zs*p$mmX-gGISAytT94OXfroT;+?M z$p}uuBZrQ=uB%0&))8|TR(uA;D-%LT|EV*76Z=>4L^_Ig-bp=EQ3|=|cb|77lCf|-&y8XK1h!`x%&)NOK+XDG(0j8isBsjU zGf-w>l(%$hVMiDSr`(ZV`VoVgx6Qb{Rr=vzfT|1qP6b%YZxeYskd3VLD+6=JlR)mj zDX?M5P9Grmhh8F#0X3tbD|`6|QaRoaE2oX1XsIXMdi)Dwj>mKunT+Acoy^Ta+KZqN zle{_2EEV>q4UB2j3;|s(=G5=iZ{T3`!uXoSGS=%jdR%W>0N=8jvwibD;I%MQ|Dtam zQ`d#VoX#DG0s+kH`kC>B4SQ z@Guru_NAis=(x}9ax^OH@w1)PeuC8ozg|2J>Vk{L9s~BYDrkE6aQAgv1UCg^@$&n1 z@H*=Pf8lH$N>84gG_iXP$u1+k%Yk$-P_UMYfH)X;kUSx%Tn!!^Gf&r+TJX;tMsIUu z3H~$V<1M>HN8u}vkEO5QgW>5TmmAqA$ed8y^3A00rJV+hv=W*?TXDZfpv9D`kh+e-<4gJAeTdQxuk2V5ZSyt?UH0hl!A z6NFPnpw&u9P`{@eyKOpMnT~Wk&-Ufe>efc6x;mL#RQMT7uhjY43uxk(Udj;_N*`2> za`Ab%b>Y4(%@PgTEC}eOnTDj-;tA7fA(N7K&@VM-t4XfHCp;fy}n$OX{9@SDt*(`l((TUfWbo)RaIZ<%|?9N*XnX_q73h~HBrRs7J%4dli5-}13?dMk16@I;)s#8ow~?Ryz6O7`Jw+C zn=1Br9L#tHLKc&!w&&D=ccR|qnVKdz(c@7dZ^J;dGEw%ZaSsT!5z}9A?gja0X%%WA zck#2@`;Xp1186}F92_fW06n|#`E}Pk$TeLRSd@E%+q1MU=lxrWv9IobACyT&-YW&o zGwf@yRZ8q*#^fk`4)C0kruU$z?qP*P^%KB-Qo}1~vJ3WB3#)Uojlilp`-D2DgOlxt;16jx%++SatUs~?el zYUf4itVYNot}(m=dO>qw=M!h8cKmj$ASceV@xLEL?LgifNbk>Z;j%4-tYaOIcU|a% zxm?N5>SyV&Em-UAl28v?MXeN0I5IIUy4lWcwg_h!3*N-DU0~d`$)D)di3a(t5djK~ zQ2a<^y6QtQI2#=v%kgZ17aut7^5jckYWn+t)nWw(8t|pr1&e!jo^zV`DE zk{e+_!CX98G}bisZGM1g;4gIAj4^248OD2{o^jTq6#-p z?ftAayij}SZeD&bDEW==6g*FcTWTE_{VojSm@8YC=dVw=_`yUnL7#z){D(aXflN5_ zp|?F%tOzxRieuVU6H!v~)5Wd2B~b9Ie7mq?21p#c`f*2gD5yl;W%j54*YDx-V>&)B z(f7#G=Eu@`m|F2p%p@Wb2e>}&n967XxAx^7I`P#od^dd}&D8@J*(L_V_$n~$P=r9j z!3vapOO0opqrw0Tfx-uzq6pm&AfnSKxKFznMW@7foS@WToX+uk zu7ecpGTh~muSy5e4`0sm9Hl}hRq#TqS2-kMI&JWE5-3e6t!h|#;)6W}-*}fx;bD{e zQ>VYU~uBWGNk0+9)Y|p41^BC!FsyvjJzL za`eM%`cbi=uhp0o19l4E*d)$ZM zjb%yB3;S{;O{QmUn{7ZPHj}1lX)3n9?Y!2V(}GgxEq4B;Ru8A+xsQD6_QV;kU0mr0 z!*H+Z^;;b)2^dzaazMwA2&GoRS;03V;F2tJfT|G*iqpqb_^k@yp;bjh*M(F#qLpp6 zbM`r=^+*?-G718g{6>AnmTXw!xv4rk=naBTvItMLV{l}T=#rU8G*n7KMAow;6#j;P zfAx=nw#cFvmYF`-{Y8TNr=1bd7CtOpsZT^HwLcFpoeu|#d8@mUAu%AE_SZW>hqpMY z+_agfQvw`@MQ+y?S|D?^Qm-~G8S}Xw03@{Fh>zh~O;8JPd>TJ?;?@cdbZGnh?f)HK zZGB+gaFT@fIn7>QGS3(E+!MG6Vg>K7tdQsklsT z7oN*CJD|_m30^wlTK>N#(Atr#*Lb5IW@Hkq&)PB|Vo5jins6KZBvn+$jOD^tiEG0R zQnxVtl|=uWgg8tHdjo-Io?)>2pJE|DgYjqD#&fUdNg(_+%2u=SG2Y|u=AR>{VSc;4 z*t<@16dIndV{wM!kv&u7aVHPl`h4DYJv|%@cu$HYcX)xHPgxc7KmbUtJ5JA41!DH= zUw+RbeL>tjbUtje4@f90f0NAcgs;n#KRotF0N2Dp&E@0CC?5KS*ixH_snH9hj367B zTu2fv7O2JF_0qdELg)~k7V?k0sTNQT$4ta&)ev(hrOKVy30&JwY84-@hQ3gh@j<@( zSkwH%#wb4-hV%vs7o#a?x}&gW!LSxZh6Z(Q4pu|)w-o`t(@*fiMRhxVVg>##jdoM| z@gDmvA`f5xMT0!O+;^0tvDlp^@#Eiww|KBh>zl*j73&eBHZ2> zN!8H2i^XO7na2S$SUZf8c^d>YIejBn8_On`qb z?cP0H7!OzF{$)<`nPV|dyx>rYMhV`~McK1YfUrZcx-{7VYQKL|KiwaN`vz4jc2FK+ zwt&(yCy5tM(-jzV-LW{nEr6X7bOv5NdVU#$1Mum5q_9l(a|{{aa5uP}1$5$2lYwJ2 z#MDP}l+;y2efy0=)*)p;_@(Oq2caB=LPGMCL!z;y$WLjW84os!3HDku&yX0gY%k#3 zfP49sV-~XWAiBhy`fgJ#?tpqJ5wSWDGNlQ&up>ks^}YV*tP5T(ccG2%eg%5YUaFln zwaApsvSue#pjvjXWPcJ9{fzwfH!&mOrt{>@JsEwl#4CJrSdE6i`=xZ;n_l4wz2SMf zT@c#Xy*sa4MFPL1zKm0sNqEmQo#%`-4ao-HZ|+D`!mF>x#SKkfqf_g^7M|2nY!P#_ ztf{Pk^eJ7fs7x|0O58TB>a7Lb|1-t7D+&C=726k2v07g)6qPk&&c4&;4F2~)c%5t-&xoi zSikfTwVA|<7+(DYU-JzMk8rK0%_4b3EgneGj;D+Ftl^)Ad(qu7a!ku<{egW6+phhA zoV0ujZjA%4LDiF0f@8p!Q^3h2yqR_kqvN>r1gY!>Y0-+muffl+pJ%<$2$UXQ9BCnp z6#qiKCyV(9%j$^=OG85ILM=JVVl*LP9ZUqDk@oek#pq=l*pi-K=Av|C9HOQR#Hp#o z75>JrBpmHxwhDLabE>Wrv6MMRdq^ZuE{+NY!BL1<)T!LtP-c;n3h>TD+L9g9;_VzPXc`Pk8`T$F--gZbCD;&K#1j!ZFywY61NXkrzN_y%Dk-2oId(F%Q zfnnCbEqQO?;H7PY?>Sqb-Sei?GOk+EnRL@bfH9V2$!_QPYW|F~TWv*;E z7+OR~^T6@T(&Z%R^v7QkKT`kOUz~4Wv_B{N9}Q~FaFm^z`3kx?d8kN0;XzBzbp$`)&Jw3uuP8BUcIC?b+#l5?AO#6jdSgTSq0VpKwyg8r=R{wKVjV6w7$+ThqAC}f-~X>V&H%s-^vI0{?Hq++CwttG&{ z=e}|^ieAvRNAiFmk$zaz6iFnH)eg}@1*A!9vGSu$4ym6KV2pTDLJ%Kte{sIL zt_;G@UaM2)!HoiCSz>f{M%m2t$Wpq8OEj!7b3M(WbTQt_g@b+*rdVk8hLz%Ye{Zd= z#xnYnvZ~LCeO@G{LF}bKWWT!n2vjv&?LNC5R z!I3NX&+Qw9v574`4afQk;xFO@?k~>QNcd!lY0qJ5KX23|+pcaxm_>4~WpL0g0nH)W z>o(D?LPI&C#yr%4&m}IqVPY zmXzAbJX4^n1@;}Q1wJZtCP>C>u>*Fj%VT@K<~y7hi~W0rd?tyTJomqNKMPi8{Z}jc z{e@5==4<<+X2@ZFXW<7y{p9pV9_6GpBjm;9`mbRk1OMGO#9zb*++Uop@>23}ob?FR z_hA202bE=1OSSKc?{~N~w zJ!N7l31mB6tm1CB5b-@KTOYfhAuhjWmmKrWCdO9&r_ZmKCci6>ERoyANY}h~(@qo| z2ceq73ldUu@L+VwQB{qLTcBFkUwF8~!dvMQ;sR7`QA(o3HBe zndRL<-!H+l`X= zuq~H}9e=_4EUx1ar}@d4&KXn7C68PdOQUW&G3q@r@tO8@Ww7x@F;H^g7W z2i#wr?}$=`RGp~?`NQi)a_noPu1h(PD)|}fjHQ<#v9Md{O}-0kO_i>6GJZsw`}bN1Nt`FG@jvR! z6=T7A)$^$xa&92=_)bX2pEl9Ugsudvlk~YgiE`cH#1t2H5}i=7F^}1CUK#C_MBU zgD}p@;;mH!Bv0O@D1&)|@LliSBK~QbBz6i0p58bHuXPh5viue4nX`Rg#~D}CB3X@0 zVaufG(-*tyi}-;1i}UTb-`T31beM9Ub>g4B%}tdn&pv9`iy~19dk^U!c}1!!C&cf! zl!Cj0g+hfx3~^QIXel+hN-BOe|J^frAHLKcAudY;$nM6({LG?dMEGMB*Vfr^a(TUi zs*6$)+;yDsV>?E}*wwdRecpTpcq*&3M?W*;BmaJN#9DmqDQPV_4lZAc?Z zqveyCS)N2w{bYlQXdyXE=|->GUkQ8AZ(#Y?6m#iWk{YqhyLzatLqI->Z?^1RuT0uZ0#!-wGDe{Qjo+3sg z|3!X=`~mM9;xFO@?k~=F+Wj|=ii8Fw_wH!b7K=R6y31d2?qUnPe0MEr|7qxR{VX0C7_k-IrC?dx02`VphXY-2{J3zcqK{OvL83DjN+q-V(ii2yBq2L>dm3W zCzjKn4t!q_-Ni)(8y(ukeNT|q%8LK9s%I@VaEdwlMz0t>@^ETCYH=rx`U3ec@-yTQ zc;EiR-~ZtQ?k~>wd^zWq$TT&oT3&ADE=~n{k3xD$>jf^#=tj?h)8Z0z=*SL<+KdG< zvnnuCp{*Ynvsg8}ns{lInN@F|9TcG2nYuJ4Qddy6hkm;>pIlEHo3ULlc_c*XWJqwH zZ{nw)ERic{_a6mW(^aW~GryqfIHTx3mLBrp?_>U}|8$Z|r02FDj944< zmV0fnzBA<0y^On`{`>+aR;QPG>2*ZbGX7Ea@@ldIo)&r!XA>va>LRDIDxw&pFI6d; z1N;}>U9f#o4MUbm!sYH|kQTOFa3rXRI6Aps;W?T|7W8;K8N(kyV+mWZa9s&VH(Xw+ zrJM^+8Pn~b)$KsV_P(ufju()iGM?<15LjGm794iy8bSSr`U3ec@-yTQc;66z5g%}W zalT&c%e0^99;SGtzJJy#+f6szGQM=<*LN}{Hp3R&AWqLSJ~vwVbO<)SJDS|yE=ZA6 z_l}FFhYC1tx030?Bploq1AHP& zXs@qLJU@Q)1Jm{cVbbH@$VI}k&oyWW)(s?G@v`|qQs3fbU?Qi@mXm^Pe)lV4ie zcn80b?7E$b*KVeRy6O~Lpj$K~PO?3fajb?{;~s_uiG6T#|J?Uh{}u?@y!?IWWEW(A zjdohj6$er)tP(5gD`7ILz5JPZF**F}n7{qVOE8WekWn6f17}*zW;(7I0`s)zs!``~ z!dqFQE6s5qTJFc&)vt^tsE<*qOoZhDd)>m8R?1dA*{IS*cRJlBpuGKj4)dKh$DBPi_ocQww}6NJm;T zB^G?#O~u}Gv<_xIN`uj^%e}gNu!7cmsnqs~=vnWcm{IS9M7rkNrU*uAqigEJhT}|h zr=xQ5#jMXH`5D)}(xfT4v-y^UyXrJydZ@8oHt0JDILOL1F3p3H>Y$MoJ5z}6R%>pX zrN@rikd3Y6*GTU#Xm*>jKlC*30w!35#;U;#S%H;`Aa5a_HW?#gA{sZb`)NiOS zkpCh-L;isG4e=N80rwZ@>puNvi|m?X6r1l;!RBlgTKR~H(0&$CO1-o$yoLQR{aiw0 z=$8qhRIZC{Vx5tu)$QK8JFMy>65l!)?PmI5s@W_h+Jcj+IsfB(3Qs3USs%IR{)3I0 z<$mU0>bn5GijTZfvui+x@!2F(+&dz`!T~R$)sw#w&)3g5Hc*JrLw6cg(N)ci&dV@h?v{J2KY-$DNY^)c!<)ECHqk)I)d!25>yi}-;1i}T&teM2^>j-s>; zvVJW2yn%k6Gs%3Waxc~VSuSQUV>kV?RlR!pg#@+QPQzk-dKQU0_*b{RrB{6NW=%2KVx1A#ok+Xe?A{Y!WVeUbQyz5W?AQjecN6Vc?JR1_f!IT zGw&{#kpF^|FUoB4JC{#Vl^iPa_ueC?*#}m}pMOAp|1i&PXSzwI)*3JE>avFq5q^)} zx;hiaZ_h--Ducm%y;E_``v;J7dB>YSTAQFgPI1?o^MM5YGxT@Re?WbV`VI93@?Yd< z$RF^&A^svh;Qr!#Nki@d8-0qpx%rbrPo^-vb;3*T@8*@%z22RL{AV`O)d#ds+~((| zxW8TTT4Bsc{r%~iw9jEJ?H9O1&9$75p1SCKFnF4odbRNki@Mna6geNNJ4d~SsD}r) z{pO*K# zcF6OY4u$J3M`Evgr;|NYysdQcBf`eRSVJGXPiW!Gm1fMB;DF)Vb8c}Ye{y0zDDOQ)j@3pv{ZV)b>xAbO9Ai=j`lhTZ{`PL|@LgzPx-Tub7 zn$mBS_$-`SM^=1pO=de3PB`y;ig_MB3&!V0kDt)zqs?BgW3=BnMq*aYm)$pHriHFL zb^m354Fhwf9Hl#p$diU6M-HoI!-Tp8{Zoi0&nu003JRx#>FCu(6RUg>`_y}A_=O|% zEV;sEX=ej0oV)v&ICQ}GYt)U_^myQ3-IC{VB?_W{EkxKkL_w|DG3UHqXNbM4tfkR+ z3y#HGJva6^0F}X~xcL6z!<|`^tQIi~2v$!`+rsNd%G;&Jqx!u_SC8MfxqTL3W+=QE zVYi;3|BL=4`e*3xp#OmS81)ND3~PT{-UzeVkF{Sa`lJ+&A4lWr2U4K)G3DL zi$QJ=h-G@+pE0!@_+r}9zy4u3$@FBuqM-YT$h?iJz4bK^jJR>j|Dr#M{u%l^=s%!7M*W8R0{JiUGvp6=-w=NhA8>zhz6*C%m6z2XprSS$ zyye0@NVfR0L_CV`hg~LO4QsXx!-0^vET61@ek@}}eJCRjrL0rS(zBP)ho&BKjt>@) zJtzDd7st!t+h3c?wyNjE&%3s(=k5z~)OPgDI?Wu2tfuBH?*{?bv*H=%tY@Tc{W^o+ ziV5WQ(y&Zz$qX_SE`RvuYk$b7c%!lTrzi0hQ#o{zC5mX4e3bK>^di-%?5T~C7YL)) zYPSta)&wq@y^!!XgphrVJ+IdI!RMWWiT)Y-JLo^4K1Th9`U3ec@-yTQc;66z5g%}WalT76duL3Y#i-`&;-5`> zR?|m@HOg0qt)SRew8;2*w~-Blj*9EUX{ZR3VVbagLWb`SUpkjjNw~Yobtj$IQ2*6* z)@k|y(0RrevS7Mnp5_{o&I|5m2rcoc(6uAz9zjtuhfF2Wk~7|1Qf1 zh}dj6v2=4XX;;*Z7~)Wdo9*xUTc@K*rEiLxaFsLs^naO<&S3_a4`F_R{yO@<=ue`5 zhW-xv52%k(zoEWB{)_w!`2*fJ#9zb*++Un;?jOy=Gi#No2b=!LN4Qe7f4#3~S5F2} z<9}}TG366sKG3JW<@P(Ud2=GW>T5G>F?q&%Rkn&qZ9X=u+K>U@V0XJUwUN9tOwX}+ zRtLW{<9n+%my_Z;aqrNG$7I|@>t@gXSn~JF(ybg_>BOR*t5UD8hKQe(-zKx9ntVCE zdeJ+%f^0P3?c4DC9>lc86^tItBj2UW8gyh*;D%58{==P-U}sRzN6*oux?GcG>rPW* zIl1+K*&iq3;c;*}dXFLb&A7DBXHzg>{)YJw<`?L%qyLNkB>HFQ@1XyH`WW>a>I>w* z$j^{J;C(~;)DS4GNZzEew6@gL3oVbw zf?}BCmt?Z5CsyFc=R{b&Yc^`fkvvf2mh9f_kpZ7+=GWJJQ(t%68$suchG-8eT@1I^#$@@Z&_R!nz zLp_L(1-`I*&snznk^0_Km(KCIkuOZc#3c7Rsa;s;?DV(`=|;W!ZLaaelPqnnFS|#k zBDfDYUiBlxe|3WS{ce!9q2JH-mT1BKg%9*QejAc1?Y{Z&QAdc@Iu~uGeiwYRR%?_m zm_UyO%i6z}%^{tos!rhkdH8ksklEyCZNU5(^G(d(FdxGF0{wOLf6<>r{|x;d^dC?k zqkcnuf&3Tw8S)3bZ-~E$54gWLU*>o}oyi_S>dAt7JF-hqQF>r_^YYxBqbw<(`PG&uSyyk^Jw7+Ef z&c^f{X8xWHm0g!aDoahO#TlJRb$592v$5N7l1bYz=ZXzj{5mmQRB;k+HE!V65ZDWE z<2y@JZe9XC+RD`?(u!1SY*vzde3L96Qel6Tbc5`BqutS(c8WYr)$`lfVF#GcV}6YJ zCgyLL4`F_R{yO@<=ue`5hW-xv52%k(zoEWB{)_w!`2*fJ#9zb*++UpU(g2Y5T}4rA zf;tbcWwRqer+RW(Rkwkm+{2KYf}X^`{Eho_h=W1@JFC9)coBu;ql1gboXErMy*@WL zJCc>#I}>!RHHlW`c>T%9GZ0dhZZOU20&YHPF~XcC|M)WhD$bi)p!<0FsDP*~42Hzc zs{gnErk(rbuL*-liTNAmLzrKnzmEPd`jhCNp}&Lv1L|YcZ>TSj{~|v_{($!l z@fYy{_ZR0YIC_k>i!p_?%h0epXo-UL1qa${iEucW5Ru4P%^?JmDW#qMig2+ zp8X&zWLQkv``f{H$F|iJ{T959zZHmwH^W`4W33HFg`~iKd+@5p=j32h)5(^GRFc1> zIWbb$oAf99itY%FB|_{wGy?3ZBw0rPpxk1^lG{0;LV%rDSiNByi}-;1i}N+nOET==BS8g+c5hugAwflp$HcSUoy2%Y-wJ34W!k&p1Ee=2NRcm>q+*PLX;?<77za|FlZC~nfW@2=zV_8E}8d& z)Eesg{$#sGa&k*s!{j4K^{wZry+IK~wRWRYLv&vm*Y9nAtS+jOuPiLG`y;f8%axS+ zbx)te_?N9RVeJpW=kQ(qFW;UL?C)T|1p5z|&tra!`6lLXm=9rof&M!Bzvxe*e}?`J z`VXj&QNN+SK>myT4EY1zH^g7W2i#wrZ=9avIj2!2Ds!1wg!pV1$)Bo$5z|4~5ol0e z6)_HVUZP$O>}|0B>fW44vjX7y!FiGCbrzIrsWYF{zClFa&RgtINFi!%0s{xP1`&&j z^G$PZSKu}0MefD+SQ2zdO(Un{A({7%x2Tl40s$he@xJ2PL_oIcTM2^+Os6$0vwoQf zcc-+Djjl<7;s)-LM@4A_`%&26!F~z$A26TC{2231%-=8{!u$gLb@YGHpG5x*{T=il zP#>dyLw$k#7x@|T2fS~HzlaaGzc}Ag2NND(+dx^e>zZkoi&IyO?|hw3Uq#F9W;nL3 z{w3LKXk!~76H9aq4X4C349GgMf}6f7X++J2!QE(mHt{%6xb(isOR`y$wf)(_Ea<$| zcJSloa#-@qAf(kNgHT4^@B9CF6XDXW*-!SkfmyT4EY1zH^g7W2i#wr@63jBZ7-*76ld@%^9?4F^mNO4<{aY)GH=GcUPs>p+H&?> zd^O}r9t!`|zrPSm&TyFzoro+TCloziem{N=n8 z{V443V7~XMhJm$xkZ({z2`4Hw8=&z&yi~c0~XXx*s|A6`! z^&9F7!@Y@A|;y*vq1(HdYQ>vnxOD=gjdowwcJqHpcKSXWbnMG86EoCLcN=Vrz2Kg-J zCt&}8y)?eO1$sVJzI4!SgRhaCMl0l7|Lgx^KNb6v*zd#s8TO;Fzk~e}>_1>WkNGj? zo0z|0K7{!N`s?WbqCbiL8TvcuKcGHF{f7Di`7iP_WD=Rjx4Utl! zzOdHc&b|H)awM}0tlqYh&2!Vi0bjn8J8Yu*4TAT`WK5~zsS#! zKj3{s{6&1g{l)oOl;-lEage7<4GtFSrf;N8^sL`B3G-1q^QE#iC-TYui#v}hG^7xo zh@}#>n-gJt`={)kiDkgT-sYtKu@!v1QcuZVs3#sB;~B#*o{>rBHn&mpO!9L#uzi_JpMOvc?SAynxKW)~p7@ci^f1Eh@pJ9zKfP_i&hQA|0*T^Q;%{z~hAEvl@zp zL`U_k$|2F41m6c>e;xbP*#E_TD)uL_--rD(>_=gL2m2-1f53bm^JC06F@M8+2=fc{ z*U|q)e-iyO^movIKz)q*4fO@`U*u=VAMm~*{vtl${^ERR4v%EM7F$i}J>K7OHr9&F zbNhUCIhqY&L+XVZQaL2A=(q~|rD~F5b$%D$p++L(l_&xyp>)5Zx{x9}Zu|J9ZKJ1@iKMMOh*e}8U1LpIXA7j3W`5Wd# zm|vj3j{YzDljxtJzk~h*>SNSzs4tNJB0od^fcFjY7x4l27v~$dPk~Q|YX_xbyJkrghsOxFJ;cA1?Ix1Z zk^0X40|ei<;QI@FAAtRJ>{ny|7yGH$pTvG2_Rp{%h5a4umtg+^^LfmVG2g`e4f7$) zFVJ5{{}=s9^v}@WLH_~uG3qze7s!8+pCNz1`-b?7_<;M1^QDdinn9T;wRl~Q!!UuL zGPO7qJG*;ZzXG_#r^V)9k5a~6KUb7kY=ahD!wUn?fDFYwcU?}zYx z3%1My(UNZw84Pt2BCDl zTCf_l=)3Tw18(p9OZRMNq=yHp+O0NygCe=LQ7fXR0pDlg`yqVag6}WzeE{~?v0sh- zU+kx1e-itB*gwO56!v$pUxNJy%;zyb#(Wd=H_V4Hzd(N-{a^Ga(LY0f2mJ@s$Ee>> zUm*WQeun%3?;GMT;sfq4&Q~jU`_(6FvhWr8V8{#kG1MV-* z_ie&9xvoFksYc(g)4Zj;bi_E@#5b?cMB5@_R)%31c6Vj9w=HS@?|mV@f5Z1#_?@Qb?jGT{}=nI*q_9HANJ3%ABFuL?3ZBw0rPpxk1^lG{0;LV%rDSiNByi}-;1i}Rg$r#%vRNQjaZr)n?D{wA@@vK{WI zFw$4WxnvzQ2MNC4#P@~x{te$};rk(c--7Qi@O=RG*RfxX{a@^-Vt*3*eb_(4eiZh1 zuwR1x2h8U&KgN6$^Eb?gFuy>59sOVQC(%Dce+T^s)W@jbP+uVbMSh0-0q+~)FX994 zFV463JL8e`!995Bq1>kHY>A_Dit;fcZS;$Cz(o{)YJw<`?L%qyLNkB>HFQ@1XyH z`WW>a>I>w*$j^{J;C(~!XU7W)A$M`-L-*4jkLVW** z@3Zjz5Wa80_ZRp+0Q>9Mug3l__EWJxiTyt8pJ6`=`#ab#!Ttm0^OzrFzKQu8=0liY zpudj(FZz?{pP|2l{sZb`)NiOSkpCh-L;isG4e=N80rwZ@E14v|kjA7;39vG}Y-&16 zRVb(KI-b6So;6X-6udk^*!-td^&Ynp*40AP9{noVo$BVw(fyV%yGU)_8C(c0!TZwZ zSGdF6r_imJIrHI_%G8~_o8=%Zx2o}qYc>>oT+}n>|MyIzeeB>CzIF6Cr}Kr1sVVqf zxezi|+fNGauHV90nhkD|CH8l_Zo_MVx6^H_f{5l8QMWs%`l0#s{C)lo4m$eUa9@Pw zD3OTlaJ;_F5_AI?$_q-pK)}>+%lMTrVp2NeyL(AA`DpTm_4t8!;=c4o@?Tje@>*SY zL!nY2BuA!{zT9#bm|9IYs1>>Ze z@MnKIV9P*VZREIZ?KlW5JVMvnHFwc6+Y4SV@0cNStClN%kdvgkqNSq1LxB$3xViC0 zz5sR4@@nDDN-6qbYW8l(FpuvP3pzwO=7d; zGl$C`MYFHu<@8|_LBCcAX7Nor@81qxGxrMZDx*MHb4P&J13mI&1!u)x-Y3K^PK|j6 za!H-Rlki2$dXjDz+P2u!1p6H~j!c~T0>vd6m8QxHw98egJ=<6CQD>a~SgvlYfO)M% z&eYLcMEld5oO}P?8@oGL9*a+0LVM?lyzvelgDtg-D-ZY2k(FsYmot7|CyH%KA-v-r zM0swFQ{|HT2zCm*9O_(o8P= z8(au_+ikxqmo&tDD)#)H2q&B0m@hssCb?J3jzY#qLTR75G*RnIuq!8Noo8ZOTaB?*8k}do#6BdoO;QZ4|+r`L(ghZ{;4t?ZF?r2td7A&hEy}x@lrzEG5g|5b1 zI@`lZ#$X1uMzWQN&S{?eer1|u1#tYi9@9r+JT-J*T#%%sdi*=8^}Ard=j7IO{|)ph zW}`>qqf)e9Rk%iqAU`!&p<6s*DofjJs0ly(?iYNJw$#Z}+C(#PZ*NsPq(CD+;Qr!# z>&GqhZr;Xh3fUF+7-QEMX-E^U>dB>(*_{j%Fg|NOSq^YjyOdI$R&`H)ot^jd>n z*D;xmG|%1T{5FvyRDy|~Vx7n|nb=z6xc=lSx;&co#lG4RawP2I@}0+LL1<0EwoCmy zv}^vObLV(?sV+x*jhbr>U?J=hBA&UEo|bEflDRENePH3;G@i>r-zz)7QBz(4YgoUs zNg9k1sg}5%sjC`^VkOI6p7ozddA!vo$Mzp2Do{azh?4T6tIbZv&laVZ|&O2lJE{9HM`adYjgm3N;4|?4G?bSf(J|W$KZsH0OtX=H(*iF5fbhk z3)zf6zGQiJkSkV+(E++2h!wZq;e59oBI*`UXXO$H8oNZVCU>Ti+dOxwQG z>T@C3ht?EK$-L;OX2!2QMfdghd*i`yzv zYJa3=mcy>;(9vMn94;(Ez2I2f}pJ#)}1=r&rU0T|eg! z>s3I=JAYmnhQ_WMh&jO|sQ-@yI7Qo-;@zU)MWu$dXrTaI>Pofs<8l1@3 zM=pPsYHTrVCsQH}@8GF@M@+Gc!kJsQJINVLE zyZf-1ygcv5VsNnd51Xg z-IvI4o4<%eKHj?dM4d0ue7IqiFmoobeb5NouXTdtE*(-7G^>TJCKoQh(n$M{Kj3{s z{6&1g{l)p-e58>2@s2XJBX(Zxe$!_#4G?BuGA&Or%sKip@11}=8Hx1meQluY$hBcA zsSP@n(^4jDYKe&7z_WndI->9GzHRnIEohtd>l;b^BJL|*TIa3!L=GzFzvj5#L9VUK zDbw@kr{CQU4XmhILPvAnJ1Vw}k@gifQP(a_Bo6})`aS*h0USBje-^ov13IghSWicY z&^<$$hi3HFQDw?kg6}5r&_lcGs?8tF1K(PM%?gX-Aa}oGWup2T+S7lz;8Qy{*sxQ7 z!s22*89#KJ?QDMyneSxU%eJlr*pttkaDCuJW-m&AyIA~!Y~RmDrx@4)mt4#y&kIqY z{WU4g;Mr61#!g$k$YvWccNMG=(7p+!M${g|`bRJ}wc6_2X${i7?XXk)=}?lsO>8Er z#^pbLhWr8V8{#kG1MV-*H+*SO`?bx-DCrj6cSaig^o1eL;kfxFbc+Ft{)aqHTF4?f zda2J0?0IG0Bb8fBY;OCsgu+Xp-U^z%vW|z$zd2Dw+yn4s*IHWp&ljk9sO{bK?>imO zIk&gRmR7+|2UVf<3{xaw&D(92&TVALvEwYNVQ)eHqv=hNh+dNKwd+t)RvFp1*Y)D* z`!WHjVafgZvJr1fUF+V_tm_96aFfepO+&MaBBJAGb6#Fu<_)9?crU) z#8_X(@V4V!IBaumYx?3-!tjFWHXVME^l}$@@o78-qt`O~O>7?#do9zs{frkNV$J&Q z&t<*@`7iP_WhV1t_IYKn3mP56VPmlG(<9nygZbnSOu63MxCKi3f z-g4+ret!+D8@kP*BUT2wxpbV1T?6q57V+49whZ1qFyCFW@R z=|*Nh_8n4l3LsxsPybjO5&-Hc%){bOuMwRgH&`a%OF9mxNPOjd0PD=2dX}(z5!4sR zf03Udf57{O_>1`P-~Rq@zDGYb3nZ!>r;?Tng5<>?z_9yhB(+O|+8x4We(O^|xuD~I za6sw<(bFp0dtpUAVZ3{My7=85T3W*Q`O`TTYR>uRFWybdXoqJ$RhxVnXcxUhF2eM3 z%Gdtk1_g-<*fF89up?syt-8l;TSQ%xx%EFX4oUerwbPrsqQK>myT4EY1z zH^g7W2i#wrZ+#Efp@du7l)@v)!i;5dl*`05i>9^HaI~;Hlzq4cb}9?Rv}wKu{T8|6 zFUD)BAX7d5qvxdQh2EYtxydGw+E+|z269kqGdGT?9vKGj)NOs;Cx%Gb*T?np+c{~C z_XDbTBF2e`L0_-jwq6MTJ=I&?Q4REnRIsvB9W-*rRtWDYhisQIn~n|jWMi;tXy%hw zAn~H;=GbU75pFzbXW1i7m$3+SvheSuomYuh)xcVsFIWAgwR;Q%co)9yn2Uvyi(Q+I z*5{JY!I!U+2cN=;lgH08+)oAGedEhmOp;+)R1j06XEU*CWi+bTZM?|(# zotB54Zols)))&48eTu3fE6>_27o4~R58CYKCKFib;LF>c+d@Y{d!wOq?nz#{ba%UY zUB)B~UsW&NpfybxZ}qP6%S$I2N{3?Po{z#IVe_P4yI+zrsSZtzOX+aiwBzLBiW+!Z z>#FQ46As&I4TQc3IKdU^JCdq;`6N3ic>jg`GWfhZ=oFuSArWD3Uu!BOKp%VX=kdw| zYiZ7bFO5wL!gOdd@4e>OXHYlO_?T784bFrF^Gqi{C3^Xues8MP0zZR#~Q#Y+v7+yKJobpMX zyCu1{66^{K*O}~iNLVE|_}MkL!{xHTi8H!~X_@EG>JB<7(B9j+hrg(>QwO<<0!sTW zfl=8_W6QD^WX9@}o0?}LSu+q%e|=E_!{2wXr@BXj?}qHAN3WB>yTJ33P+BF}4}aD+ za7%;lKDV4$93GHx*2zDAtu>Jmr_dvb$Ar2 ze}YuJ_^OeCx3E+EV)D_IpJD9dhtiEUs*QU|&!!lxAYc!rC= z@%JmppX=AWo+(MyHLg?t60(W@8Zxr4Crf}bSjimj%kvPnUU2^I%~wY@_-QbGpgN%Q z&7K^E)ve@X)21!DnGm?!53_yl7?b|xnxHZFxkKUNpBK6Q^oIB$Crvr8V1 z-#hmC@zvYVC^&z0`+HY7ATe^;eCs39FYJBm*4QdKM#c5-OHM(mZI0)dmZS(Bv^W~3 zm6k(XwoO?&zMdt&1^lTh`+kVgFNRP2J;b<~f1}+V2I_mmyT4EY1zH^g7W2i#wrZ)2_2uh3L= z>b`t;fR*4n>Vb2GZo17HN-wL!bw|CFBPU{@BR||X!Y-VEX)z#lS7+8mN~HX>|`rQt3>APU6iF? zdYu>UOWaS(_|?Q6n-HKq{H?MC7@G-S=wcwWz9epzqd&En8X?c-{6Ur6JaDwyaj)h~ zA^ECbG5K9E5t<+IpS%9=yFVvw)vwn|OpqT-B38I(zkNnIE$bXTaA%FPq zzWon>5g%}WalVs}UnamyT4EY1zH^g7W z2i#wr?_f@1-9nZQ)p=9Wq`_O3GM(qX#CLUuoNvCqmzmiRk8XnK`=+W^+GXUPkb=y&b%n&vx4vF= zI0E(=)+j{Uz6S3G7hj>gPU4@}X2uOXw1u75I&_@zi85V8R;k*ewx>60z3 zfnY!m2xJKz&|zr=xd)%!8AnnezTTqUL;E?IJy9RV?Op)v8MApOk7NHFQ@1XyH`WW>a>I>w*$j^{J;C(~nAp_hoFHBXukMPJ4^1Y(=fG^n z?tDjhbll_v^Wp4&-<|sVyXz=TwuCdQPYble@kf@^7rpu+?~Frsg!C9ZDefK{RPKUL zUumP=y=BDi+lK&-Tvw>B^dE0omQ4aLZ2n-;W={I}w+={|wLrnAs?9mv6(nH#mOcN# zTjF?N7q6@G0ErZS_jC8HM!59sOVQC(%Dce+T^s)W@jbP+uVbMSh0- z0q+~)FX994FV5HC@xjXG&k7WiXf(@q!)=s->U>n^FgK+>OkQa$FC*-iB#g(^Vj--A z?M*oK64=hwU;e}%4Xtyh7jtd@y-#ykco{bP@4nSkbg1!ufFZM9sjo&YgrlW?!;(vh zWLZ?Irr@1_-!pR7pVr&pP6So&j_nP!B8$m4dVaQ@A!74EuQfh<5sNsBpNYNpa4gAj z$y>L3F#jm+>7l@FKc>BEn~1G#_JO!zl7c)W!dw?`#2p4=EszsS#!Kj3{s{6&1g{l)q6=Uqt+o~9@ns{dl* zU14gJwIm>1e~-LIIXT-WPdhUd!Y_EQdZRpZn>TkN@&{%#ShO#QY8OA1^}`-}7axaMQ2Nx@R;TDqOu&*VZl?W%XwNzVw@Hrv_d?hGZCw&kKF-59sOVQC(%Dce+T^s z)W@jbP+uVbMSh0-0q+~)FX994FV2@MJE+oiVht6yWsUwfskBAa)4eE&Z#A|py#LS?21MMLYlE0shV zq(rHVjD)15q(nlpqa`aOBda8?+X`7xRtlBKNFhSS&+nY?^Yg!RoO7M)ysqbXyxwlZOEHuc&s{>J?}Vi zDhc-q3Oh!@a6e4~6|~ex7M`TnURa!6!nvKk?7z?(E4r7ME-XC${nc@@b7)~% z6^{$~?#ntn?>a%jM}gk~UjqIC{XF_(^qc74&<~-%K)#Or7x^UeGvqtSAK;JS-{3Ex zf1%HyAMn0m{$f7h{^EQ~#)B$KngtlsHjUDXL;XZlAiVpv=>o>vX=ByLUk~V@M?{zD z!bg+7S%deAZaVQqh0+RwK;bqK=I=~V)5KJ{a2AOZD~l>;#zry za5gPVtBZA|e~YiH+;(!IOCImoXH)J@W>ZJYk{_nzuyxv(-%Csh_&)Gw;G@9rfG+|6 zfPNnRG5SsPZ|H~6Um#yc{)>DP`5E#ZW-?F0G zJicl$yV4R}wgj6JQ}GK5g*MmdaiiQuqs5UVKj^S*xY;FAF{Aul#h4sjuKOuZZt~os zqVx7o-)ci6=4TZpGbgChukO<8(xx=@^()!QET728-iml4;7BxXEGp#wvXRd$QT`dFY(1(>i z(kJuj^DP`5E#ZW$Ep@|ifXK3qtkUN{{ug{I_(|}6;LpHEf!_gN0{#L0Jo;nwo9N%r523$6zK;AC`6Ti) zq`-630Ea;lt;jH3Ut>WjS{9+Jq@^$3D$S0AXA>Tp%0Dlbs27dwl3w;LtfcFja7xMx47w4;>nY>~4wlz$8 z_4i*d&&#nZKDSGl^olcFcf%yw$1{o1#^(p^zda$tGtUk;G)B@Q(wb)cDvema>fKTu z_KL>eIa|K-ZaQhpw~aB5{FmpHSev<}J)Bs!IJuN;dP2dkgRchv3qBS6B=|n?XW*m2 z?|?4>|A2lT{W1DY^l#{g&|e^5NB)a^68RbO9pn%2$MA3P7tp`ZXV4FL-!OkMA8>zh zzQO;Z-Y4EvV9sQIS6SyF%Ek-me+>AOL|q~RJA9PW$QxnzfYuUsvgo+h!XwU=boEBP z!Q_=6>0jZf)s0&w^S=;byTXOOG$?g@_N$u9RAa65N8Nqt6#D_-*TGkV{{^24eiD2i z_%rZP;CH~6fPX+gkNz0_Ci*w@L+CG%uOt6OK8gGc`3~|2_+$7t_zUP?=riaCyl_yJLN98vWb6^wqcFyTs41v;Ji~OB*++D4hCU zLXPe~ddrn#lKYBVv|MFYD0LC#sa6^;C(o`-b{BT@KNA*z?Xo3KtGTE82u*t zH}pg3FOaVz|3yBD{0#XH@(1{1_&4|q=wIkF=m)%Sn7^10xW72xfXI!lwL9gQoUNm_ zt9!?275Az&%kR69mIWEhelde2;Fs69xDE#+VBvMsb7wSloNih1^H@K{eirtJu-}6H z3+xAgUk6_e{ug{I_(|}6;LpHEf!_gN0{#L0Jo;nwo9N%r523$6zK;AC`6Ti)q@^$3D$S0AXA>Tp%0Dlbs_MgA_pZq@^$3D$S0AX zA>Tp%0Dlbs27dwl3w;Lt@ZbCPzxj*#fcuN{J^4u`RzOjO3I1N)+3Pn-I}8-HYbv^k zRlSk;vDP7q{Y~r_V*d^MS=b-Kehc<5upa<^9eg$TU+}5mC&BlDKLZ~Heg}LB_y_d! z=#SBFqJKj_g#H5gI`UuSlgQ7I?;wAGKZbvUzkvRQK7)S1`-b_8`GEV2^WDbr)8b92 zF!QXqYN^qtF~avo*~`Falwdy=`ve{TA$BU_Su-I{0evzu;5BPlE3Q ze+E7Z{0{gM@DJ$c(I2DVME{0<2>k`}b>zRuCy}2a-$DKWe+>Tye*ygqeFpu2_YLzG z^8xo4=WDNfv&F4q1(Wvkn$oqGiVXITu^)^5P3#w9{|)FZW!P5#`zL`{(8!rnw}{A=F!l5mWMQs_PSEmtEKNtaU$6 zGInjA@lVW+3bpQWIyme4QP zm7G6bkiq`N(-%B{O8t({zc|yFZ}K~A<~I{H=dj;TUQH0+GeUg-<|wXQ)K8?C>woS& zp2pZYA2Y4L(oNI+7R!5Wt)iZ}mTCPTGDz(Uv8xv|V#qyCIWFx3?ewn5@iD%WujtU| zYiIu_*`!?a&F*E3-_q=Q?{zL~-;>4THY$$m@6up{KUTAZu7J9 z1!(MD!W6fybke-Yi?T{4zuX0kVv`+rui93Wd_ zpRepVQ9~A7m8lXE$t7+TS`)iQ>u6))%(FQgo{$+zD+g*ePks;l^7huzfAQ2-ciRe$ zs<*^k`S;l7mY3w?myLn@j@%&A`IjHrVO&Ob#qj7<`o5xdd@-+1AAd-M-S-t2>nSpO zdOnF5Hm+o?Hg(F)ESt()N);UrOIpa<|EsEPy1$U!B^|!sqwzPn^E;27vnQB5wlhBP zN5YEC&^8WsUOkWc)nwi`-yKZMwm(|Fo4G_AbJbFGGp-U9|CoVT2{)P))b*o2#glvz z3fMhr6Gm}=alVDj`m1x8rHs>At3j^)fwX07v|XHT0Cjb8(!5$1O3L4#?BV9%V($8i zS5Lbl#c&Cl-#=Qsm=&m68{&TDGm%m3IW@A^mw0(~M1@}|r>Sl^C*-7i$xhaJjc(9S zVke%k$ZE?E;(4b#uXBDo-Lvn8*w}?(VxK7ZB3|)3`Cek=7NVFPQ!<^p~Q0b_wmP6 zZ0NU`ec4@dZ;QH^O+O{l)heg2O$#TN3?0IsxpA_=4?KhelDU}{R_b4`5HEY%ThXW6 zdKzVZc2-?wem>BTinXS0mlzd3LVySob2IbkM{5|M)5W#%F{l)pNs!G>Nn6Jfn z3O<#|ef*Dz^)6V_JvN(7k`RqLQJ+rLbrs7kPPEb$Rc?FT+wRg6;y!DdcPwT5`%h@3 zf9GceADlK7nIg=V{Z+nFE&Gc4IEJM=RArJK8To$P3%to9x!o>Xb*8dK{%7>UR&y|_ zLHxT6-AAaXqT#A_?Smv_@xddR>ps$EVNQ$vx8D*82T@N!t@|YE;)1S(JGY4~=Uw;4 z!HXnXvLn=flMVIlHVZh_S42{+E^IE=Vrk@5%h$d?D##^XF{Ar286;ZJR9J7#7c$`e zVwzg$eyUWtcKM0!Yh;Q4$_RPp3^{g0@jQ>T73uGvyY|qRgLLDJ)tqw@Y)I-Ie%{J* zc{Wlw{mkqHX?B^*6m2mFF;<)R%g_vW1vXup>#55PX=eB6;q)tK7Bgy#`=W!@dxuQ<~ePt)`# z{3U1E(*vQ^2dh+j$=WLm%U-XmCcCFZ-Z zWqvs^@txXabG(B%C~T1bAe`o_JHC-Taqal=R5h(Tn`CZ}I|?o|@E%A{yJrwps3`_6O81r=7Q)^T&r5=pjtBVWLXlQeVdO@`iNyMku@8!=(orsSN*~RkD7|G5A1rEulXUwRB*kEm|HE+#+9Y~ z`L>89w=OAuxRH=W=bR8Zc+dI)UH82)f!nc@ICh^)xcBo7{o}&LS1#F0L|aV`C>gx8M?AgKeN`7;6S&1EOcKaP%lAKA_orzxQ zm{&#bd^LLMTOCWZDpvk6h$5ZV?mcK9M{+yofz&b)9EglpLFwy0!Fuh!j)q`EW*ZjUaU-=Oq~^)nV?+jB+3T&s2n@N&&fQ|jp__#Rnhxv;yrixjOV`d@KYnLM)nxy1x z9fVWWb(*r2<8T13*H_TtmhyU*H|IXLRB*m-$oIaBj*tbXew+j2s;`Ym5sa(v2GBL1f zDkb|K2hHYLH-&v9Fj%wa#0b6aws#YKxP)D^_rRQhFE5CW&NV|VA8E#C%RjFj@oQLa zjW0((x=Ao+x(DAMaO$M@o@QN7T+>b0SxquUon0hrjeB2Y?QAw8bBH51X_V^Sn*M0E zP&?^1o^h@D+92T#I=D%<74drzo%#BO ztBfAd5v3}|wjqH)|3aTZKj3}C{Kb60{l)o4Xd2e36|Z96*VU9X?;oSKLsHS(4g0Br ziPq+TH;rV?(jO=7n}wJmx5TA&(|d@}3xy`tn0c(O8lT#6<}X?K*4R+8WF}Mh{@Fy# zJ2BQQC2ouV_Rl2ZNYYoG%`JqT&29cLxS5_l>pW5v|C1USuQ1Y2E~1`tGorrrKPD@d zmyE?#PtYwbjsZg#co=3leRF7bGd=n?Bjv?-F`2eVcE0tGH&orajM-B=N_LyLEih=@zD2G`!{sdHloj99Kgjwec*94=V~F;umK< zo4O~O`dv@p_1v|OO!+BWy?5XdO`F)PxaXHS6^q?qa=|x{wiSQL6S{DTDy-Jwv7Q=E zy~5k>-dCO6C;gwlfc}L(gMPsKhWU&6fcuN{?O8a5^L6HK#;;@BPrJI=>s{9iex|}uG}w_(oY|}R zXgU8P6;?t@J9u@dFxygkcvbg71;(5@2wV6zP=y5pTdmh*5birQ{d`jCwBB4(A-buJ zj_Yd&*hEtLWz(DesVa43d2L%_+LHI=+fVIJC-IQzE;utmCIY==6bkk;f7e^Bhh=-s1ei#X(@zejPfN*MKS=p5hR?1a zn#?!;=ilHjpnsvypdawQVg6!1;Qr!#8$L$V+}N<0`K4#TF2BB-m6-n6l23pok4@r| zi|g{}c+||b_Z3*8-6ea|I6Q?N4h{Xb!YGieQTJPV z!9I;VJ=e3lKdg^#tK_UYIq`||w3u8lFymqd6N|}*M?v(eRD$-Nd5_3vO*hG5z30Tj zM(}Dy>oc+@QDBMhrD7MPlo>V5xOCU%_MhZm<@@c`@4IOAIH;$MA3P7tp`Z zXV4FL-!OkMA8>zhzFUTle6aD{!wmSTN`6f2pwfz~H=jspC%4zxp3%NXNniiom<2PY zF=x)cuvfh+#5`}!efxn!hLtqlaIG;$irKKy{#~7&0%N>Gv|shmR`%<3Eft4V6=avx zT#2#9C&V&I`t`wrbn@;}lQ*|8AJZzgX3^BI^)$==xX1XlKKjFOsk`~SapLGw*U0NL zMxu7a2!;erV`qP`5V~EEMx^iYwO^R@mv7CkC(Q0BqYr8%BV~nc$??_(e)VU?WZRXm zb-JQeq*iT9egC?jRFc+hHCgqYOnIXwZBRT&)o)(Q*irj~G*o02Jh;|Fqw7{adaTw@ zd}FuVnYCk(E({d&%;CC7NFMohPhJS;-5-1Rfbwaw9wGM}=<(C107la~m~xozk|Y{)#e$b(1`!XJ`|DVj!zdq5GkcHcI_NpUp( z9pn%2$MA3P7tp`ZXV4FL-!OkMA8>zhz9u{_9uMB`VqQ`80gg5qrX*&+qUh$C%$f!2 zdZo8I$%gI0IxbzG=&d#P_Z-c7MV`H>d7Qhhhy=K4XrxD0Qt7KPbM4L!Q13RiuyOBg zY|>}3xexf|Sv6BeGMi5B6XtH7`~|U>M1S#1n(Fw5>O`o%Tj0Ep9K5}Lo=9aHRZG;G zwe(6j-KG~XAZ+`T_MfXOvrEh;s|{izS9E65xeF`X`Qx+b0^9g{;@nDaG@FTT%Bdv= zSF5)kRJ=yIuc_tb^XSuAtABEth#HbLo;B{*PZrwd-fVhx{y`Ux8XWPeujJp`2+kh{2TlQ^e^-o^aI{E%wNn0++UpUoJ*bk zHWwE$^S^rW`d|4#yY)8cIaWr}d8=jL@D6+-mL;$Ec|CbbFUeli<>|_%L$^Pj&=h|` zv{vO^Uy@irc-u|7wfD%eom&cTTuGbBHtgV-nf0lRv|Amqi>t4s``dOX)^TN!EU%sg zW{y$x?>UuS8$D7;!jyjhE4(+!cE>XAj5~o;PC(u@Nw_VP2`D*7gI8&_ps z+PY;LrjtI*&N_7P!QEaGsTVNOVq`@Zd>nG{NVX@#UiH5;Q}jqz#tloa?jv;m?2GT!zxZE1iTn)t4)O>1WB51t3+P|y zGw27rZsj6in(O>FlV3H+Z?u1V()QZ-BojQ1uOh4_lvy#Ox8E$nCIc% zahe}_!aDeoF!QHX$nu1BJJ~N=adg4rdZMgKC3t4OBHq2%^W0`iu`5&GNX}ChVsCai zh0N)fWO_VbKTq0`L%C-veci8BP6nk4aC7bbI{)>DP`5E#Z zWd$tVhSnjZcnWJyCuC#SMTUSu#*s3=|)f2W0 zJyPIi7P=IiQamt3PNgi3{dHpUyV2)G9#t76kszgQYn^+Z((WYrXa9cku}8MXKHyJS z%FfH$ciJXk1=IN`ug1#r4V}J6>5Ay_ZfaUHuer|s19fWWcb&GcivGD~$tN_q+hOA9mRbe3m7 zBFBRq78w=Qka&mUq8P7i8m-o0YyGm6_-M$OXSqd__Y1F{j(VI%biZ+V->`i}k*_2F zMLvoA4EYZ72l!+7H~0(aU+6RF2fS~XznBlWzc}CO$Eyl%E#JmWtT`(xdqad7^?SLi zJF1KLb&cd~Zuvp`1bGd|QzFR5jc@DXxr*qDtUV_i-aMlxT>b@mn=WQ5c66-06|Br? zJ=Zmp>|DoMI-EI8tXLwtX#KHFi7!;6Rp#mKhtWi8jrX0}xFYgW#?{8+H8B{97rx}Ka6aws3jf3JOhXK)RT~uh(JxrLNYJ&?vz7)OPD%yt^B|L zq*!C2Grhw663p~@YFDnz%^+c+)(`i#gpi2fm=B6~U(w9lcIO^*XOfR{*F=j_bIAVX zO71%LugRwk7At;Vh@w3rrtcQ@$5RPa4YB=?UXk(TFTc7??i~)w9@(WJ^^ybz{u=sx zEt#%4UA$w^vV`1y`AARdeG#3<^&xP*N!@?_8~P#i7s%I<|0176eujJp`2+kh{2TlQ z^e^-o^aI{E%wNn0++Un;5U=Wyv8UQhjSJJbqLro6W|v=IT^d3*L^rL!S@x82zhzE=)6 zjdKW!F)m*(UpZGYpV_j0_S{7+J*57A+iH*2Qp(X`)Fi0lMfXT9^gbqCO_L0+OylA> zPg4sr_#K=Bs0Qzxn?{yFM7(e75u5Ah35jh_3Z49Y5?j83ddmYT^w-_ssS9(=$c?;u z7a`+J+GwepCwc4@ss4UJA^!AH((x-Szv1v+V(;EI>&RVwx_G=Z=;4{`WS#8{SMMTc zdOh;!jZfoFB*KBuM^)L8x((ZMN2DGjEg?oeSNJUc>yOcIqJKj_g#H5gI`UuSlgQ7I z?;wAGKZbvUzkvRQK7)S1`-b_8`GEV2^R43elHabho!N8ODd$Up9Ag_B^3yUmp7b5v zysB(VE}2&NZenV8Dxsd6?Q1Uk)7s>RGiHw25-Sg;WW9qq5!qx_6!5}|9RE;oW;S00 z`M^6gp|&G~7L1#}*{gYztkHTNd45!eyt^vqeWNCqoQ>GN^qA-Y+C4J&qEyXp`svIN z=S3Y;^2e%Pk=xFhgmF*pHo0p>W%*^Lh*yz_Os^~!0m3mq(?=Yav?y}}n^esv3 z4ew|7%x}<)yJv;3m))k`f5Y`_f2C0_gFC7lI#Ws9?5N*&_GC}~KJ?tCZS|Ct)i1jj zb;pE=&v@otE~ZUJWK8lCERIvbjDIPmyIjb}GTR9r2{l@>*Xn3T0U_xkYTF+jSxH?E z_1#$$>`Ho)<}01M<3_ ze;m_jw7;g%x;gu(=VI0A*WO3c3sxb63di%P=BI;#981!uWBaZ^tJOi&ui@dtMU%fL zwWTWjidSPSvGJ6#sQLYfUMn6Na^3GkTXIZVg~>w_y=9;Mey2pzb9`;o)H8v!OVOr* zU-dRIp0YyAYDo;SiMx16_plyOtuN{eGEDxD-vM6&{sH|w`eXE)=-<+w5a=vg&tw=E2rDPW(S2!IJA}W* z(EAEL$#h)^xi#Zqya|^VWTCP#Z;nbJH5u7_#DU+IeBqCh zJ3Z`AD@wKraD>Lv7%TOQ%J&bDv-0oyO3qy)M1PWJFmR!>S`)qG8x5#QQ_94t97k#= zetvGT!gZSUuTzim-68saf9G;MP9flm~6LarFG$1G_G+$RJ(Iw`<+lszm-gQ0d~)wAr~>WBaFXW*m2?|?4>|A2lT{W1DY^l#{g&|e^5NB)a^68RbO9pn%2 z$MA3P7tp`ZXV4FL-!OkMA8>#FJKtNqf-TZA%#n+GZp;mnW*2bg+*rRZfHv#4XTLn? zLj|I^xaZFFCvAya)Px$M$g?@d5~DIKU6twE*I!di&-N(yjKt@VIl`5*^DZaQ&XA;g z8=tVG_-+@g9hFT34<9lalS?9z?-grGg`=qLwV#LlyPZg%IA7k{$$qVXt+93=OZ<=T z1AhiS3j7ZE67Ub`=g}Xd-$eh0ehB>q@^$3D$S0AXA>Tp%0Dlbs27dwl3w;LtfcFja z7xMx47w5~t8Z@n#wPN!39+js@FN~y|3BT2X-#C#2nR`|n6~<1!Z*R*+=0?yZ;XjS^ zZ)8*d1HOIn#!qPHK$YR;@I-MAtmfaBH{g5uuvwYtt;z=%~ zRbR8S4<|zrN1J>1vvic&?c+7hC9*}o_U>Bwf`Xp}-v|B-d=&T{@Fn0M(9fekM!$*v z4gC=M3*_s_f00ijKSREQ`~m(L{tf;D`WN~P`T_47<}c<0?k~=FT;DZ;j;k`wsww$9 zUM^=hT$H%Btd^76BsXn;$@4+F!A#L_JmoL3u+UE|zEV$1b#A8#jcSuTkE3Ug?%qIy zhPMqS)|gUuU%O-$rzx>2a~7_?vyGU`6&ZUc@1WH)))@Gf{n>P?K0j=gwF8OsDl8eg zWlO=Qf}aH62mTCv6!;zRCEy>>&!ay^zlr`0{Sf*K(@CP!x*@YkRE0L(YG}#`m_u`F z<-10;^=SS0;K#H{f24La*0jUIk%IpPp9+2wd>{BT@KNA*z?Xo3KtGTE82u*tH}pg3 zFOaVz|3yBD{0#XH@(1{1_&4|q=wIkF=m)%Sn7^10xW72xe_nZ>`ngk?q1_ib-D3(# z@OZM(XT>tQ?VZhA!z0DSUcaSIYPu$U`dDH4d2LC-SA+irp9+2wd>{BT@KNA* zz?Xo3KtGTE82u*tH}pg3FOaVz|3yBD{0#XH@(1{1_&4|q=wIkF=m)%Sn7^10xW72x zz)f~KQ{@;&Zm(YcZso;HY3g&$X)G^$r|ny3!9_DFD;Xy5`TiJncyZ+7*!6SNQNpK? z5i}&JqPNeivzMYtHP^MW0|n^U=K%#V37e=0m%rjplby8u-w%<8>61&d|Kr!eSA+ir zp9+2wd>{BT@KNA*z?Xo3KtGTE82u*tH}pg3FOaVz|3yBD{0#XH@(1{1_&4|q=wIkF z=m)%Sn7^10xW72x{qnncFHK#@-25lb!~0{HuJSv>f9JFS5qoNJGhyH$vGvamtnN8Q z&mN7rqu6?aL#H$a96!ODVHx$><55f z2VV{T7kn!CN$`E(&%j54-vM6&{sH|w`eXE)=-r_P_}LW0-ldg$+Sq&mXo zni?@X$?P8vnO~OX;lY*l?#r_5M1Hi9? zuLl1MJ{9~V_&)Gw;G@9rfG+|6fPNnRG5SsPZ|H~6Um#yc{)>DP`5E#ZWq8m=SialJ$th+cxpkj|KvplA7=h60sjFtb%2riRBGCx0< zn5-Kj5l-T=<^{bZdgI3$osk||@uWC&@8&+b>xkOgov*w8+i$`C1@;5LuY<1!{|i19 z{3Q53@Mqwo!0&)B0snx09{n-;P4sW*htOXjUq}9nd=mK?@*U(4@W=3P@E6d(&}YyO zc;7I8F&}V$alSdmzt$hlT)`}s;GfpcBgKdqc@4EG4AAvreOvn@2I-<-c|T56bzi}`^2i}U?CamM!0=yIk=J^d47FUu;f zk{#A>U(Bk$y&0-z-AgB~Y?2mv-bdCP0Dc{OHTYlf zso*EU_klkH9|e8~d&yepRe}F%Re}lh({)Ik+ ze!%;N`HT60`-}5k{#5dx^B*N`0KX2t8vHN#RPdAF`@o-pj{?5~z6AUO`g!!n=r_^7p&vqj zfqWhLFY-y`XUKPuKfoWuzrkNX|3aTZKj3}C{Kb60{l)nv9zFJNLRp?U&2@98+vNV< zDxb}j-4c`E^%_zT|6tHZvA>D^LhQd`KMVUq*l)r91@;5LuY<1!{|i19{3Q53@Mqwo z!0&)B0snx09{n-;P4sW*htOXjUq}9nd=mK?@*U(4@W=3P@E6d(&}YyOc;7I8F&}V$ zalVbVhyAzYE@eLUe3MRSmSBwkuHb!=Da~R(7Wq@^$3D$S0AXA>Tp%0Dlbs27dwl z3w;LtfcFja7xMx47v~!;b}r7+Sf1I}_NnsCHW?QC$Jmd>{wDSdvHynsEbI?qzXkgj z*be}|4!#=vFZfjOli>TnpMj48zXQGm`~&)V^vCEo(Z8V|LVtmL9r-WvN#tk9caT58 zAH%=FUqJstpFuz1eZ&04e8Bz1`Q{qCzjeF3f?2iS*4#@#i#a1b{mHD=Tx_6q%*Z9H z8SF`2odt@FHI?Q{*;a3Qnd}l)Jo)ftBz-S9Gh<(H85wksid7%`Kk+7PrxaX)Yus+sJj}`*wb8F!;78}4#$UY8#oGxsn9q1j$M{C%yhwwy=u)Q;mYl1 z>46kFceCi6hV($PY%wdrr!(}^jr9GC)uKU?EeX{Sx`Ffwc6ciAx#e82LR{QBbH+yc{dxrQH1HjzhdM_WE;3xqzQ4*lc11*zS>&C2VUGMP=GWH+uYLXIu*|@Uw~so8=my6c zuG=bnOqF=)`P_<)tgS7NMeA56eYbTow(3V!tPlKzn}b$aF=FRZ1CONL^QjG zOS0n;ku0ix^TQ*O;{M`%`R173SfrrEa4o5qDZP|NlWy!2nC|IArrlh3FTo~>1k$Uz zzdK?{tmcBYCdYSlzKZNEc|AJ0_odl=dEeysZV!&j9N59bc2sQUQ@)|d>Rh*Wv8a1Y zRGP$<%p|f&W)bf|{AxF9w9tPkpQ<~F*?%Hu6?X{nnV9-Rp&)@O-zha2a1AA4`U+9> zc^cvE853Nu8AFkNgn`Ms001?>5*3SqVN`DDFq(&?O2-E_{<);S(5BpQ4C&Lx$-q5D6(@IMzVwA{AX+C+|mPV>TD}8EMuhNq+IA6P(N`nK|^WH9xz0G}wLb zcqv(5ED7BC*Z;t(X-npFk8{GXiEY?Om zpO1bPjpt-nZyk|cwr7+Ws?GEGVZzJC=bU|7_wX%=+kDT?SFnQwwzpPgYSa<3PRHNa zEtN#3xwqHz$IRX*~P^aoLr|Fc94&G_Sms| zwT=`UIep^oQ!yE~a`j)sLucjKW;vtXPnx%|DcBUVG^lPA6IKB_9Q-CrcPIbpOGc1 z=V#sYC?u~kU(8-}t(4r?+@3vhO2LSUaD&#ig^d?7Jjuc=DrY&hO3a#D2jszukZ6=`+`CTphZpQt&Oq8q0a? z-|m=C(!c-GO7Aq{Cda{K?A_=db#;{H{@nHZ54}r{WIqsipLvH${hYJadQlwlED!6{ z5V%a_%#y97j@l5ZO%*Fb?ne{3M=5{y%ikj@3V}s>n%78k&7VF;1s_`YDWb&QGLTe? zsSkPGzeQ`AzBHd9TXK56>aE3ppVPUYowELV#8aEHv4Z=Xt4Lw$s^FJHZS=VBCy(!u z1!Qqo^WDMEO(gZrzuf}opOLoU2dZOp%V@FTlzYYts%c8lm3qG?J|x)0+D*_XmCgv} z+H-d}^*{Z9_YLzG^8xo4=i8%eH{(v22J>!;pzxAUVyvR(yhN87gES)cN3H7a*(@pD z*(sO8%|=a~^S1BTbEA4 zWrZhSoQSWYY=6@atp}&bMPuR50{7oi&s((|60T!3zcyNYIH8i-Ec!J4uTCG~eI=_| z+^Eb-?~!s-wVK87hMx34;5v&jii(kJI3~c3*=|?aK<6J&xNxiBFJ_A%#@;xCr8- z`StWg&rA~WZC>^7d&Ly`4Eh1@8|E+O1MV-*H?mrFZiba6{oawEg4;u}zNQM6CX!S>3irBsHmS%T}jC5+rjk!~c&zxhFVfCX*6QmR9q* z`F@C^k$-eL|8<0ru29`yfjWgmyeUD!=fX?k-Cv`&E|!B8nGq&7@}`669dne`5$++0 z#yLKfyno2)f6`=r&Ro{CV@-8!NjFv7el<8~u@JM?t1G5(mjv5Wx96yV8W*cIRCDIy zSUOqKB&(8sxs*JwemYD3y8nOr7y1nP0q+~;FXjX8FV5HS_xGa?tTyxE!%m*V_P=Pn z^kR+W7Bw_(Ah$*^burug;F9HS1y0tCZj|C5>)F`30u{R_@4wV#n~H{VP9{}` z)NAT~nEXzgI@73`K<3oAU%KLPliv0WjvqZ9N15Wc9jdJX^!u&Q6;>9Z#IW-C(`}B) zM0CkMF#JZ zexlCLa9*!XOqk3oR*+T6)-z?!G-(vHpMP}Uv`PDxOA_dHWe1hBx?U5k6tPT z=$t7fQw6$(#yuLysFjk)YU4?r;MO@K_WT%)lQ8T5Ue-_T1#Vt)`|^hps~3NQ!%~Rn zIukWDi&tb@{n)%-p$1YU$P@Tr_YLBG(rNvczkS4WxybDK!mo+l1##Y1@rCU3hlfqH zEf%sXQu-TCUK%0a9sRxtrY~g!&#RAIDfmRJ7kyqsY#KpNk=jV}S z{D#X*E?gk+Z}1n;ztCsU4|v}&e=#3$e{sGY?YjjhCim2mp3|f^7Z>U5}iOBAr zs365%WL1r=Naxp9B3qaqe&=N&S@p_4FL7lD$r8`md2ALJyW>*)uZP=z(XqVetM>PD zv0p0=yxu7vPNrFV`584&?&Z$sR@A;2L>@_gGUrnkW!JUobx&V7m34U0xN^;x0dnPF za@s*d0Y)~{bIXh-F*f3L$bQNH*CWBlZ%;j%Pd8O&5B_M*q#-}MI1ko`{O6D1-{3Ex zfB(~G|I-h6-!OkMA8>zhzDXzI+%J9J#UyNrvLAR6K<2dVoasH4B_VGuq{Dw^(c3dV z(w&DZX|S|`;7C{5#yo*-pLf^rX0P-7BeH`YCU% z_V$2$TEfM5c2wmJ$<*kvR7fbGN%Q7QUTM2crB3mD+q?1_ZM5(0$>zLD=Z@^-@ST%I z)E3^lv^+GP@>ox?;o1{Noj# zU}o^-@2o94e`50Q)}^kq<9HlI-#`E6Vd4Lr&i1@#^e6o}k=2mna+{h@veG!jeK-q9 zMBD0Z`Ypw@L-P5>YTaB~FzvR`$J}E2>5oUQj8GtDI81WB51t3+P|yGw27rZOkPj^G@d!1 ziT*U+jDtIeXz=+Tk^GrLEF}Z8o@}WiNn-)=YnGSO6bo7N6_Pb%UuucSoYKjhpxuD8 zAWN0n+lXh}2#%tUG|!(APK=_`M`DehOs}9D4vH$jT$x8(x10UyFgr*s;sQ50?Xn<- z27LiX7Wh!d!H*KHj~s~Oo*PZG4n+{#&wIp@CX(nC%_9xmj{>tfvT5vDX@gz=^6B#ZkIU)1I4bV! z>bKr5h=!h!m$s_cq5LI{&t&2Q|I5#i?;wAGKZbvUzkvRQK7)S1`-b_8`GEV2^LG2z3E`h zW2&-X_(|3SSCYSf$86E%SLwdN6=xqV@Fy=)?>j4RaHP4uy;NE6F z?t#-mXdIIPxxc`;Lv$-m$J^axQAZ&*KApG{KUJ93NX zPGae9Aw<{!NPQvXzcx&4CMAaGkzvL>*V5jRS!5MQ;M^Lr>eqo@AL}$)?pgNK ztSXE4**tu6$pU3O30H9QS$3&KfO+N z-?v`A!^EEGx8J%Y>bRCXI{L*`bDKT6pO`7t{`M#tSTg^e#grrDYDda@#&8F%OcI<{ zIQcz}XY*8ITwLyx_xoyD%QI<2RIEHV=(``yD@aiYSI(vJ$JRWTIL6X){hGwb-?FLJ zSN*?w!C9nnXYTkJ%~X;kVCfy|ew#Y3d0tnMpFlJ?yLX7JHKDImi*CwV`u&%$BmYG{ ziTn)t4)TZpy*KB<(HqC{3gBD4!Bd(V_} zBbh0gMIkDRkkPK+`*&U6XaB;v&h_GoNt$q z;uUg2nwc|OEF|6J4)GpTI--8{Hr*8v(!6WWo0YyAWpA#JI+3x@r^Aw^C|M|2TN7Xq zLgvglCZ;@WM;xa17u`B^oV+&bG5c1RLQeUZ74wQYlGpmz<#KFy5W{arJH+D%Q4d^_ z^WgC@;t_Jaikruq3TXwdSF5^7dX-MtO^HpRc7adAdVU7by3bO9S6wgCIXnCt)wRyk zDeYGNjUf@_LKBz0lV2e574V;XNjsO`S(6^C?c+}vGxZs@PtwWx)D0`-(_KkX>zUyD zQ{5@I)QiR>-1( z?`kn7AZ>D;p-LZBeRIL%jBppNl}{Dw@k}EZhBuW-@RX9uzX$9jt>a10U~9_Iv0X%6 zYoC7{6H0cdRmwJh^&xua9xi*eLzm=5=SQtJa3?$tDhE22xROPNSDIeQ+@%l@OhE*W2ueTqCx%Yr!?E5;Ky(@Cx;%Kcz-aijq2*^B~|$Lkm99nyG&-kCv330 z;!0&c#>-jY((kGg>cZC^ZCKt(_&-K%x|vf&GYh&@xHsG*aXSUHPCv}1JyUXv>kA(K z>xa-^AYVuRi+mFK8S)+E5AetEZ}1n;ztCsU4|v}&e=#3$e{sHbzYkWk!ib}F2)lH>rw&tHCJhoeaktXM>Vt|?^g9Og$C+i_m@U%7g5vZAYVuRi+mFK z8S)+E5AetEZ}1n;ztCsU4|v}&e=#3$e{sH>n+5g_onOIdiKr_SS|2A7F|`eyN3YUb zSDVAtG#`-a4dEZ|esv{fvvM9eN(YknT{Sz-iieO3>#9xKD|d19u)S$FyMfNSxIbYM z<4F^?<;*;0?@J6koU2a=x)E*1n*qK5`HmwdD8s~lvMrt zF!?Cwd%ym}HQu%Tuc+lc%fkA7@2GR}rlpc$4@vV!yQLR6-;KRCYyLeg>p1$SRnBnN zF-z)sD5y2n_7b@)boI7iXxhJi6a5?dA@mo>*OC7spG1C!d_GobTIt7y1ovN;7wF3>B`d{7Nb_uPNOM=4LGt&MkcMyPo!&3HTXialSVl za+Uk{g(f<%Gt$3&c{0hE8WtQ8dWWOi9SEHbyzz!fucV~zlr`0 z{Sf*K(qM@j4L@qHiKis-q0jv3cKKP1A%Np?k?|3|l1|JJ(Z z7)wc_>%_V{FX^Io>%vP;EWH$4JbsHOn|u#jvB&$ydoq5aCbV~BFI~`;eoMXc4*4`+ zvejJ6p9pV%RO@ASouHpbe~f+;{Tuor^cTq2k^dr}M1F>R2l)g1G5j0+1@tfU8T13* zH_Ttm2i#wruh;LsYmS0T8QH$eGD3e}Q0Dv8Ew$I4(6`nLVtYCM%vL{UuFB&Knra~W ze!#<*ROL>5cpmIejEWzYXpUtNu{X9wMK@z?L_d(n=A7@;*eobFKj4 z_iglP=)$1ekIoTIkMJ2i7oCZ{?!D6HBCmh^1NwRN$LKfFzo8#Oe}Q}*`7iQGtFYK|kPq!~Dg3!2QMf#*Vs7_g*f+?Dqao$lT%&>B_8FJD@U+<(+(Y ztFh<{dbHuH)=Dx;b-FfMx^9`wY`&#ibx_%lx_dln=P9ZnX|J{ocqzBj_WDNxb~<*X zkadl45xP#;#pl&NGG_>xwAi3i;T*9|-_b2sQ9!lSeajXux=W?J^xi#tT0`c2Y+UoI zlqIvqd!K!iT(}!5c&(` z>&SnRPa;1$23W-5F10NvlcVA89Nj`UjInDub*wtxH%_!96B=;zTNqu)gT zhJFbB1@d*|zsM(%pCR8t{s4ar{|0{n{R@2t{ebrk^B400_ZR1DLVnNQZM2+ul31*5 zt$&y3ZT0%n6!wfLee@I@9(hZb<@#OsB;nNm{zJjP_cH0qA+h9-oPD;ANpb0aICVwrQWTPHIq#qFDWe;3E`#6!;zRCEy>>&!ay^zlr`0 z{Sf*K*;;M`yo*B4#~`UnblvDMW(mT4)&Ldr7>G~pB=qeKr@RI z8gE&hBwIa>&HJh3Lx){1N5!maBzI4?ZTU5Jjz;gkV{=lqk!mytAF|rkLcg0D6s%Wk zp%D+-#@*&y(Y4j`BxB+%-K1OOH9aVZfIkBt1%3y73HS%}^XQM!Z=!!gKZO1Q`8x7n zzi}`^2i}StA6a0SPumrOw>e_>?g5SyX_E)FU z-|{p3o^JKCX*-Qit66!`>?5`6`0%Z=i?37M9!1V+>Cg>JUee&!;MCj&ENwEVYFD}Qln(sW{rlmSJ6%0Bnx}cq zQTl#t^`+M_rwRBz@Mqwo!0&)B0snx09{n-;P4sW*htOXjUq}9nd=mK?@*U(4@W=3P z@E6d(&}YyOc;7I8F&}V$alXs<{`B72Ka3V0msKAr}h2SNuzGcIx~8eD~Oi*{u2W#f%M_dj-f{e zZ)n-kz@bseCb~Vfy*X`iCEZo2Z5A16Lvy5@4C;6I(BB7IC3W)x3HV9yec;c)M}gk~ zUjqIC{XF_(^qc74&<~-%K)#Or7x^UeGvqtSAK;JS-{3Exf1%HyAMn0m{$f7h{^ESU z8>PSgIA5GuE?l(mRIw1d?0Uq-d9kI0N6#VLr{o4*I#5&fc4i8R&EF(Da_0&W_ur(u zY9O0Tm3rvBq4Ej!)ca7Rw)Q-IYGENOxu=Z$4wx&LSIz0A9^{=mQkO+El`JBXC4IAYVuRi+mFK z8S)+E5C8n}|NI;L1@tfU8T13*H_Ttm2i#wr?=R`6J$h0MV>^|rTF-r$tW18-h#rcd zvqR2)DPDPv1XX2atbY|rsxSIyo%$9@;-)<}&*jRXa|BN6AI(2X?&L2yYvqwj4=p;f zV0m;YIdX;bvFE4+;wrv*6aOD?qHJ-%sp|Dfvgp>uND(-j>ex0*Q3`<7&*W51!FP^JeObD&<(R$IeG{dvO|R;q>&I7naZqD(r42 z@tZV!xl72h-0S3-?#t>GKg!8Si}1amr`e=*m2kQJE?;tBvvI7Z$2oFCMdwairZ)v& z4gME=D)>q8ec;c)M}gk~UjqIC{XF_(^qc74&<~-%K)#Or7x^UeGvqtSAK;JS-{3Ex zf1%HyAMn0m{$f7h{^ESqhb$YakFRG0b)#+-MQbw=ww)zDhU#qen}PnuD{I+=xt~pV zD>Yby{WA*^29+5NF^|_hH#=y6%i*(wX~L|mahP_nOgkN)eSXjFav?VM)1k?wQ+b)p zn&7Q>g?b41b@0{Tf5E4Mp9J3r{tSE+_#N;i;2+S>qd!K!iT(}!5c&(`>&SnRPa;1< zzJvS${uurZ{sQ_J`V9I3?;GYX<^%38&UgOS-FrG#Zf4ePkap|XJxG3g#r$@7pv!J) z^_KI^(`UyoPuZVZF3w0;<*Rikh_fflt97G(Phs1FaufDuj+5%^1CMlhvx-m!BcuLH+=L4F3jy0sRYo2K|8d4f7ZC0rwZ@Yd6(3&@Xc?b1>N3B|C2}(|@b! zz%+e6rt#?mmFU}wto*jmaY=jH>GqxDi-f-Qk%ex3lM{W5NKty~RK|Fm@Ofk0Dc{OHTYlfso*EU_klkH9|e8~d&yepRe}F%Re}lh({)Ik+e!%;N`HT60`-}6vzuK-yuGXB{JKERlU$4uok%%}k zr(By2)%b6$dCMF&VC;MEz!M&3;hyLdy%#3Px1+25vloxh?>^zL&gP7hZ(^2*mkKly z?6+Y50{a2r*TGkV{{^24eiD2i_%rZP;CH~6fPX+gkNz0_Ci*w@L+CG%uOt6OK8gGc z`3~|2_+$7t_zUP?=riaCyl zwb{QWGwfm4E*B@g$!v>AM_Afx5q8-@CaLN(9~+rB`;yFtiGTY;*l)r91@;5LuY<1! z{|i19{3Q53@Mqwo!0&)B0sru?pZ{NfjD8dS8~P#i7s%I<|0176eujJp`2+kh{2TlQ z^e^-o^aI{E%wNn0++UpU^@(dy8#)Y_Fr8UfqkLP)?@itkE!8vFa7t7Ke@U`a)Kc1W zuDqggp);iSmiN&%rSKVlroH>OpN0J)?6+Y50{a2r*TGkV{{^24eiD2i_%rZP;CH~6 zfPX+gkNz0_Ci*w@L+CG%uOt6OK8gGc`3~|2_+$7t_zUP?=riaCylq8ec;c)M}gk~UjqIC{XF_(^qc74&<~-%K)#Or7x^Ue zGvqtSAK;JS-{3Exf1%HyAMn0m{$f7h{^ER31y6oEW8MZPr7~M#aFHbQz;kb|b$$&s zpI#to9`}-1UmX4-m74!=zYzOx*w4cL5cXTJe}Vk~@ay2K!T*9!1wRSC5BwSUDDXSr zOTa&%pGSX;eiQv0`XTfe$k&noBA-NlhI|M41N<@k8~g?IFZ3Do1Ku~xU(5&GUz~6M z>Mw)+OXo7RORlFnnspQPiuDb)GcrktP=$}hkr9IZP3#w9{|){@H5%|E)n{M(Pk{wDSdvHyns zEbI?qzXkgj*be}|4!#=vFZfjOli>TnpMj48zXQGm`~&)V^vCEo(Z8V|LVtmL9r-Wv zN#tk9caT58AH%=FUqJstpFuz1eZ&04e8Bz1`L<_em#~s+nMoaEkz50k4EB$)AB+7> z>=$DH4f|QxAHseM_Ajs>0Dc{OHTYlfso*EU_klkH9|e8~d&yepRe}F%Re}lh({)Ik+e!%;N`HT60`-}5UIcQscxK5ZkcW91g<@$|G z+4ZpFWVZ-=aQdT?%kNlXzM$OWz~~#gvZ7*(eozy6oV>WyK`n>aTW^?h=9DJuB(+pp z>V_2SoG|vP*6$6WV>83foPJ1|+DxU1jB>iLpf2|H>R5WY$SF`RqmX1}-Pw?|F@c^| z-SC9|@};ItZ1*0ApY-AGm`O5@eC%-2Cd(&Mm1OmO8wsy%T+E|MgU*JA6C^M0`L;C^ zNmOSd?&n|T0e$MZG4!%`Abqzub;vT8C0{n5qB_!V4L2D?5V_wor?g-2pn4KhAFcT4NLSe3i2Ivs zK*yB6_PnXgqYu9{%^hsFp-ZMeit~|^VaKK|ox_tL$Q*v?vQGMf1Z(j$p}$>Af-SGp ztH>Ij#FX#eQfZSY%)A+u_53-$oLv%cdpK;lG^6<@wZ-=AFv0o0i|TpVuzmyM=pUsz zx1f&9V3vjc8hJz9*bO1qC)bmWb#a$wefJ^S6)izM0+Q_S;$6E;3g@wQ8M|GKf3%a& zZqL{4&876>TbW3znn@m;$*KlMWRWG##q#gt%Bb9VyYs*Aveaa+;Fp_+bLdlk^~KZm zd#LK6C4t5lt4OIzr&-~$T=Hboo~_!o1Jr+8^g=5}f_*-@Onb-XH{@CIp`bq*Rpdpb z@q6Ead~$F~;DHrQmxy2VC7JRy7G!-%z><~X*GToQaf4&}SBXvYiGp~WIO4^3RETt* zA=(~Gvx6FmmP>0AjznNP>>)7xvNC|FC}A!Dod zYzphROokU6viZJUkV*S=@kD2cGTZA#Uafwk%mPe0Ul2N>*s1Xw&&Gw}(dnuGxLvep`zD>R>4!$~W%!nIB9Gm5kL*_Po2vLyA zA+2s|p&6{$(#CJKfB}QS*>g9AJpwmCf-5K?(IAai9w>| zk+>Dr^z-26A_284G{^L3HLt*3$}7>{BOmI)*&~=DGuV?!_pfDRrq3!PmD@FSYURdB z^#0iYVn*)~r;r)fF23&~Qn3PhmMgfK57+XmH-9doFGVyOWHv_8)Ri}*>|U3UcP2BB zY5dG47HQjGmm7uA8Eqab&Ye3?#` zPulFMN8Q9&%m>_GobM`$+fIhMTN$2^j0cnImiVdcWVN6wiyiTSuiZsG~&zuU8Cf3oQ9na6&)ox8HzQHpsM zyF7cd{byo&>Cn0b`xmpicO>JZ=PYE6jlPY2I@3qT-}U8*PMILq32T4IAp9!nn zZNl}5JXt;Os>W@W_oJTfO;JLX0}&9T{n&zKh0hsGRV?f%Z~RrxknPrU(5&GU!1R!Tf-gc zUt5{&dmk0}TYe=?jh_Nf1WK^$T5PqaYfWVg_4*V7iXPGllj%a)=~Eb+yUD?$;u>tz zwoMv&uVd+!aC4R9FmYzl?D;}2?^d#fPDNFsg}jWLiR!IsJeut86^c%WClmI+Yf%M( zGbX5r%<8b-s8KSbKgq4-<#g6zsI4KlGltyTu6d}=;13z_P*L#!R3$rHH^F4M7^bleH=TfR6Leuhn>4%rwha#RFqbkla5=I|c@_TxKs@;Y> z@@%j1%lR^X|K2yuU(5&GU!3ombt{yI_UkfcZUGxt=G~wNjJCd*+E`0Y$eh^jHr7bR zYk99*b@4J=)Ga%gB)9h`vLk$@(3GbIod_$dtb8|v|Q7V}%pkTG;A?=hm zuCr(^qo2j6F6!G9O}9!KmhGLC!pRgwL5#B0%fKHVpM^!t}&E7@C|bI|k*@YI{k@K^dT z4n8|Tgl}E{SS-1K&E_3xS=q(KG$?z|e)_SI!>u6Jzi}`^2i}Q`<@q6gS zW5H}|?Y2sr!_VG5EO-6lhCaGT#ZD)EO&`s2%Mp0!l|k-{rmX#&@}3BLEs`4H{O4Rg zC%-iy{1xS2Uze;DEW|E7o7LRKsq|&0yrDh%VdW=e)w}iWbF>Cq0R&SXF zFY{vX3a^pH7*!{eP82TrLf0Es{Rn)QOr+}t!SWt;;^&${H}&dVk&m5 zcac*iO}9|WeAt&j=3Rf0p*6FJrq0@YqqFQLnO#?MaPHw;dP^=@O6p=3wOsA7Ys*o7 zrulZ{TI2b3^lYY8c}i|1y`S7-@on{6y4_^)DT&N6y7#$x%W0Kd%KJ99#$?HR`fhy4 z;qk_A^z8i}v#hUANx8Ot8LWcC4i%DC$TS@!Hgo#~?j3jGUx2K|8d4f7ZC0rwZ@ zTVP!CQ-XgnvrW#{_~w>sI-*&~G@6!@d8ISWk6C>pBHwS@o^$4WXJkd{^P^3a28*la zcZzh=1N@%?euT7>Ri@Kz7Jbj67LP<~&&bbWZ&8~ie`XF-7e_hgBX@Zi=E||=l=HJ# zH`6e?xWVGZ+nxDC*~7h-}_+$&n{7~^q|wW zj~|eGX}az{i%-%E+h?XI+trhPu@PzCuqOSw$7jT8gOsaSRw{|HyeQkdfF+w=eEc1`x}3saK>tFYK|kPq!~Dg3!2QMf zQWyTC+pA5PAbM^>|KwlF>{9pG;HJeWJ8@fd`A=fU?8tw2GPs$PV_v7lZMwzQm+k-BFe?^n7hy?kkXN-&W+S!?*T=>z?JUvie)*M6e;RjO@L^$Ef%zpZ&|oI(2E zi>Utk6h?TPVRA+tbrn#?u zq0G+O^t~==uNu4EH+og|dwEuN>ez9cI0dG+Xu9u!zYIHg)O}Tz?n9EaN5aAOJuJ9F0S^fg-aKIBEdz8!&ogTH|Og+7CR!25>zi}`^2i}RHV$g<|Q zGG~skpTb0Vr!X3)G%o0Um15$DOf#=?eWQ^HIq#Lux6n1qZM~TODeM&+60o;xlz4iX zAM;vWOI8WU8Uz(zq7jFMcMkS-67}o6-Fr49Qi&ZJ&VSo$>E-w!OAn`hdidJuAM@Rc zsi{eP=JY)kgn3?)x=o>l=t_hp(2 z7h5%(Sz5u&&nQ^MDr9^XIIW<;SWo^Luix~5{xMnH&nr<*l;<^Q+C*hibK8r?Rm)w; zx|gZ*Z}mHq|0HIYd4AnUf46PF-YMWhGL`1@=sLQQgPV7fQH@p9^LeR%c5@ufRkhsn zFz7CA=H8c_xmcUPAH%=FUqJstpFuz1eZ&04e8Bz1`JPXZ=B#SzGE($_fv#;EahVph ze47<#XZ2!V_A*XxsPugL!u{+g(ibbNJo4ckkq=tu-`)9>)NHb9QT+0Pxc9{kE@*0? ztwK4qOE>4!1!BFrT2_Tr;~B5rWsP*Q&3mDupGPZsJ^fItu+0;q@ViCfMfM~5XypQx znLn&z!j?e?6ndTKAlY%#c)N2PU7~ApC3wyF}1i^M#ipQx_*BcKd-O zOCPGv9O`wWURL}K$w@iHd@Mm?QFsS6HS#(%vE@FQtMF~cnIm_IEYEH3k(^!Rr4g|# z6ZWHm|&WJLVSAK;JS-{3Ex zf1%HyAMn0m{$f7h{^ESYd3T7I3~XU$sLY<$y^Dv<%=ym8l-Oem>x=^pzdhTlWP?6AJ@z%+V z`{j%c5BASwrv@uj*=(E7atox0#I06fXJik%oe`SHw1}3xd8+i9G;0jb)=F$3To!%V zGlx0*Q0gudMO=NfYxa%njdLE8<*GW-3SQ5NUaao({f_n2c&g^9W{Xyu?zz+@(V~=! zZV7cU)tY=n6V{ortG_RnJ1L4jdydONRbh z*|Yn=b(JIEj4kKy0oFQ9*+&!8XhzG41iKH&c1d_N7v7|fP3 zWrjPqXEq(7@7T%y7+ zU+Sz)Z=m4@Y`%KRz8m+&0uu1tv1P@XdUAPfdU)v7A|kY*V90VI=R1fulk&D*`9#8R+1I2mi={&cwKjIC z_>lg#BgKA)FVQtzJ0G4s$l?7ZUzs{RJ4ct!Dl#;SJV_O#nu`{`yGYBTb~{`xT}M4j zW)&M4`uxi$k)I*oLH+=L4F3jy0sRYo2K|8d4f7ZC0rwZ@>wCUKGKNPX4#*yWU;72o_TXF zJ-K4-i;~?e)!Znlaz=R;LH>(;68RbO9pn%2$MA3P7tp`ZXV4FL-!OkMA8>zhzET$) zZ$DYJmFY;TR!h(pW-_whJ}`6>VWttw`_9(q{MWbUUL2u->gM=*DlW{#`;&@imtAvJA(I)OYTr$#ua5=Mx?J`#h*lTJDE&k zV5Zfwn^;oXA5!&CCfH5G%^2j<+R<86<2 z=O1dPInTEy9Guccb&Horz2AG4AYVuRi+mFK8S)+E5AetEZ}1n;ztCsU4|v}&e=#3$ ze{sI*BSDp#c8bjFjszYRea?6H(&}x`rbH0K?gPjBdw!D> zTq0rK@+$GaL^@!XGjp~=IvqT+WLA^^0QI&{73<-9NYW0e)^1vIgLHopyBd1vE^Vzo z_4!CeEB$&))xsu;lMBh_Z);4xP5ZyTQtaK=OzV?pPZOGapStbliuy9IgAA>koV=&@ zIZ3nb+?O^IOVJOZzd*i@{1^Eo@-yT+$RFU3;osmdpnsvypdawQVg6!1;Qr!#*R5|I zF^<{9ybyVQ?n8_KJ7#LL;Qo;b!hOvC)n@vR6x=Z~iSNmyJHm#7(@b7*zPp&{{H*N; zt-rAE?9b>1%BPbTZ4{P7=7#ur1Uy2==^t2+6s0doP z^lD_2At70JZnr1I#faO<@OcnXOt(}Ia+b6LP@`-DlOkK{ja&oG6?5}JH=X*mvp&vqj zfqWhLFY-y`XUKPuKfoWuzrkNX|3aTZKj3}C{Kb60{l)nzyqDKM+&hK&PwM70_bEk0 zeOu*?)}l+)aiLkhYF0PBa(Lv*{QYG#?uB$=$>4gzy=-#(yRsmvll61!U2Q-5N#jSy z_7qq0`pbjG%^nH#X!*#1ae@O?*)K46qAiZh=CV}3niEZIe0|MBR$rp8*M@xi(3nm` zM`rO$6m}5TI{!jH@iJ-5t-=k;DTY2@&j!lku;!ikbgux+D3HsM|o z=6L*OIOjiwH`e6JZYGZ1yLTEE#F10Ky#&+y14-xM+Qe0bcj)D+Bps@AoBrV&U2*I1 zK7#%j{U-W1^h4+`kgp^EMLvoA4EYZ72l!+7H~0(aU+6RF2fS~XznBlWzc^nTbM=v# zp|hFPV(Uf5pTASznhS@d&T_Mc%}Y;Ii?x!;zt(A;pdB>7xMsKENL!*yC${mg$rHjNDW`KW*K zyg+9)|98!Qw*`4GD!nyP_Xhb>q4x8Pfe-Col#pU|ej#aEouJQc7)3Po%I`-xpCGb_ zuIWUcK1Y{)5qa|<%bObCZvEq;v5BCcM}Lfd6a5?dA@mo>*OC7spG1C!d_GoNwrzropZgGZ=?!AHI1u@G#Fa;*CnR+v)C08C&1%$s~0z zZzNQRJ|JsGUhMs8oI-{RoSb|Q+^4c$Q(h1Ly+x+~wyiiP>_bD#3sv>1%gD{qK3}zs zK16?M%+-sNVu<&4@k3$z3ut%vv#-S+r>WIr;iZqI!>MxamFgE~42kLUxjgn!XNY~i z#e&u9mx;wM>3R3%k5PkHk&X1N9XUS#g?mqiCcV4%UEX4?z5n&yepRe}F%Re}lh({)Ik+e!%;N`HT60`-}5k+|$KHH!fg)vezB7YUeVu z`7)x!8Z}t&>Q`>x-(*s?DcSN${}q$pK0h2Z?p4r{^?v$cxd9}h!Pmz}Cy_QdF}|zv zQ>a;~#s0|-exyH1^{iF19Z{NLX1m)clN?cS)=~Gfr)obu?y);B(Ra+ZqK6mP5p_Ac z=l(Vq$i3suwktI~i1@Bu+j*{^qH;!RUO8c&Bu#jC@7pO(r2NXxU#3nwDEJca59sI7 zAEVzy|Au}D{RQ%MPt=^koZb+dCLLQkT@}4TsfHb&ul>36r2fDs zzJaZe=&vo2af6=<=-Ou|cxHO#QmxBxI%Ac~C|_&Cjb&MWq|2``_gUOQ@@)CbqxS{1 z$%_rA?e>|wlO$`SMv>56muJn7D{ zH1NcriHd0~l^M9@<+Ck^u0MbKz@rs;ME0ZW23;Y4Qn!)!yurFfWXZxc_bj>Blgh+! zIazTZ5}|74tPy&W8WzUn4t?HAqeOm0U7UN898C{tD-gCJoUhV*Umg^p;G@9rfG+|6 zfPNnRG5SsPZ|H~6Um#yc{)>DP`5E#ZWCI>YZ zms%}gZZ3W2_-=|i^B}Qy_n7xwW;FU(im&%OqIB<|!?e`{q~z1xk=LFdNOyLu>9;AD z$#=J_2c#05$?aHU`QR^iD3{lp^DFbt5W}V96pJk+th(BL z*#SEul5~H)vZgip6_Dva-`3_I-v|B-d=&T{@Fn0M(9fekM!$*v4gC=M3*_tn^56gE zlgQ7I?;wAGKZbvUzkvRQK7)S1`-b_8`GEV2^ED5eXg}7Y!uW9ZU%#|}Br9cRlq&On zA$NM)4XR}u$v*xe%dpp{h{vMAXEgNydHP@A%<1+I=;M~{cRga4kj*9MzismLC&2~t zRFlH3sr=4>qBTXq^kVI&kURaJ)VtzfuvVBQ2{5UT&mMK7A>Gve>LNvYw(es6FEy`! z{3Q53@Mqwo!0&)B0snx09{n-;P4sW*htOXjUq}9nd=mK?@*U(4@W=3P@E6d(&}YyO zc;7I8F&}V$alT6pvoiCqC^BKi8jB8jD6wlf_s`!>*o!2LQtiGF*OC7spG1C!eCJ>O@IQYH{|0{n{R@2t{ebrk^B400_ZR1@xclMx`^Br7!L>=E z=_1N(;u+z~%S<9^@?Y(JN{bWdp7N}IL(2z*s~}f9dy5}+@QMzqbG4-}GyYZxudyVo z$lZaejQup`%UD2VzauGH@lMxm)Rj~Fd&&$iIY6T{gKv099iq95mOb}ZU4#jlRA_0?)u(9=l)uFJ%l^a5 z>6N*1{40Yu67bdFf5E4Mp9J3r{tSE+_#N;i;2+S>qd!K!iT(}!5c&(`>&SnRPa;1< zzJvS${uurZ{sQ_J`V9I3?;GYX<^%38&R6qeNsk7lM3BC{f8TcshJK#&eKcJsSe~f+;{Tuor z^cTq2k^dr}M1F>R2l)g1G5j0+1@tfU8T13*H_Ttm2i#wr?_AH*V|BrV8JPEWOh#}L zlQ%2Bvrkl!hJBQ2%{UcEavBl?^zR1Ks@A~0#+^=dNPrW`#$6(%HZs}!rNc|7A}c1JmkH})XGegOD&@YUdd!KZ?s1m6e#415&$oqv4E|M&;=^XQM! zZ=!!gKZO1Q`8x7nzi}`^2i}Q6hd~v|gk&985 z`~EY4^SyGJXJaC+E-{3E;bKew?s&RlU1OYyn=3WdUm%WHMdiUS{1@;5LuY<1!|ND?%W$bt9Yehc<5upa<^9eg$TU+}5mC&BlDKLZ~Heg}LB_y_d!=#SBFqJKj_ zg#H5gI`UuSlgQ7I?;wAGKZbvUzkvRQK7)S1`-b_8`GEV2^Hn5l?d3pKCV4)_xA59sI7AEVzy|Au}D{RQ%MAYVuRi+mFK8S)+E5AetEZ}1n;ztCsU4|v}& ze=#3$e{sIOf!gaw2gDh57sa)I9`mx{x?j|%@y%ehPsTBlstq*yXx|doiXS8;+igg` zpNGNz8}_rXKZN}j>|bC%0Q@@mYVg0{Q^8My?*o4ZJ_`H}_!96B=;zTNqu)gThJFbB z1@d*|zsM(%pCR8t{s4ar{|0{n{R@2t{ebrk^B400_ZR1DBU-KyjUvnGf))1B6;QN|lzYzOx*w4cL5cXTJe}Vk~@ay2K!T*9!1wRSC z5BwSUDDXSrOTa&%pGSX;eiQv0`XTfe$k&noBA-NlhI|M41N<@k8~g?IFZ3Do1Ku~x zU(5&GU!3m+=P<|Q_lp=uZCn01lP=KdRdlM3BC{f8TcshJK#&eKcJsSe~f+;{Tuor^cTq2k^dr}M1F>R z2l)g1G5j0+1@tfU8T13*H_Ttm2i#wrZ`+26IVQ&mBW$GD_h6$m`zqC+`EB`yVm}u9 zo7gYJ{u}nQus?+T7VKYOKLGqX_-gRK;8Vd*g6{)=20jY>4)_xA59sI7AEVzy|Au}D z{RQ%M>=$DH4f|QxAHseM_Ajs>0Dc{OHTYlfso*EU_klkH9|e8~d&yepRe}F%Re}lh({)Ik+e!%;N`HT60`-}69o8rD?dXFfx zY)xUM+nEi_rTXE_C%4AP)B4~T_Af6p)*NwTn*U#_^4BKw)#`U-dyU4CBVS*UoUB8t z7JcP(eJPI&|H3j-u-Z#pve`;%Z%6YO z^|Upzo4@xh(a-rl_E+OA+5E85dBw3d8oGDKGOzljtYvk{lWC?3tn+!dIj4_|($e*H zTQf>TSi9Y-W<|3;QL9I*_Q<>wX4!_77ev-;vE~VD7s-^VGq}GvUyauCQ){C)GOC{w z-zsOmr^^N_4^^G$q(5iPSk2!vNUA=|mb^IQL)*U!%RCbgCsMNxG&;E^&?RlDj$PUH zbohf}rfPF3x%*{S$%T|OYJFkGBPD4m=7oS)tjFpmIOC5>MLM?4fV#$6`MU?5D3epX<7JhMV~_H%Vx-;Rkv-JLIVa`<_mV z_~_MonWd&&!S0Qs3hdR&=nbxi=dke(@n=Ojd($qChnF0rAL--yGmTqa7qUBc?E0t~ z%;8Fg4G&s>S7&&#qUTyU>9Uv)xW71Go*jN$CdqGNwr}Nd3(QiM_iW8=LC$|Wqu(t1 z*<0O14J}$5nS>nr{`0f-7h*bSZYBAf6;?$mJ&QJJeQKgp|2}!8Gp&?1a$>1~{gK&U$i*;UAuo{|B=PE3;TrLK zB>9Z6t5Zn>360$1==8&%Uf;9yY5d}R(#tjVoY!~+iCJI2@A&&sa^uy^q!qVP>16Mi zlT~TeMDS#WS>|{jX;S8I?LXQ>&wOdvccr?A81EPvT-I7hzI~{ZQa+GFZJl292R<#M z_96K_9{y$2;>v{SP(U&HR&!D3`ucLR+y7MXQT|fmJ>gutt{{)N$Sz?Tic9~^U(5&G zUz~6L$R^K^vc}BNwf~m)Owng~&B~=eq(#z8ahnz58%C*Us$m|RIZQ8Qso4KI&`h;A z2W%Id|AmC@SLxP?>?YzC#f5|EI?S;vJgfQM8n7Mno7MX5R2k--kB`9G$&BRD`H8MV zN-UE*kaRkTo7p?~_PE2Ee$trQG+cafKD%^a-#4YdO3X+=O+0Cz#=IdPFB@+krb&}c zl~QWC+3$jBtJ37}Qmd9D4jGg3$)6o@t9s44Xq)Hc_9a>AWY^y)#nSvTsvfcK(OdnS zctkRSE}t3~`%YH- zt9Eu??)xbo2Cgts=U+Gx)T;p4)KR9Q=7{`GsjMo5F4)ngU$S&XIkn%8eC7P2Sq z98~;CCTMAuF}ru&WAei}fv4QNj|?gL+z)!4M|t#%9-e(sNf(_yly&IAbJEQ#>AF|v z0Udq!sr}A_r{s3wj7g@M=jpac{X4o(ID3lWTf}7z8tF)_8h1_bJu=>1@7T8O7yZwm zpud-oi?K4(?DrG-L_25j)})oSk|})0ErL6$X=l{^NZr@Z=pWhgv*m)%i642%_n%xQ zy%lt-BJ6!K{qWOq+Uy!i&iOeAy=lqfeAnYHe`{6_g?_;MhWU&6fcuN{ecpXVqVuXK z^I+rD`~)RoW|tL>K5M4IUOi9{ozO1I_MWM(w4V4)3-i^R6VrbX8Ht2 zbX4YQ^~nb6D%L-1c0?YXk-K4dolgv{nX2!z{b)Kl7nULV|KFbxO%qGqbC8eKlV$?6 zUI{WA{lzZqkN-{!Y@Q6?vKS{^tL)n9A1W}9r+Dp|cdM6#p5k3NBdCkIe(${!Fsqc( zpZ5w6cG%DbU%fAH^+_ewk4ERcUUQiqY|(0p_rFCXPc9!e zpA=7+mLxg;E9Yq7+gNw^j7(ZwL^e+0{6{ZbaOid^=Ob*ojLG#>xn>?^cMIWQ;Owdw6uvC3#Ucdg)znzlYUi zt3+u~(fuBx@ZnxlSrUg^)^+%Ahd~87%oTLw;%E!89rs=S>+5U!aS42Izm=D< zD_0r}S+3GyWEM!g$&39(7Qc*p5$mGJhMX>wd)g+?j^Ar|xB6Z)J+eH#df7)F);wla zxPkj1b+TA4CHOg)u5{kpaB|`ixv^YsUB8|W?Hsk!)tg^JZP)q6oarm&aOWCQG^YEB~-yL^tOxS&`dEs&ypoN54z5H{DLR=$+7FwfDP=sPbo$ zyr1mKieuqqCtumt?FCKLbyoP*I{SL^@N35;PsdVn({a7j(qB)g^7p;l{uAf)p!3oT zwvI@$CA-I;s457uTe)YBo>C~IXOsl1s`C7)v0_u;(cWy1ZZ4Ngvz^S`I2_S;^P~>b znDl*Kr}h#yB5vaL*A^varlY&80XQ@?Ww%=jPic)Tdb+D^C z-abr54O`!A{(PU_Qt&cc`S2`p=PLVN8XCp<5!>UkSfvY8zC4P1^}a0fLNG;4yg8A) zH%Po7Di}x)CPZ#uHWo~N&Dl6-)(1DDxlWmkdZ$rU&E*CkTdvSQmPU!b9R3{s0{R#F z4Eh1@8|E+O1MV-*SL*4Y2G6;LjLpUk2c~WiWsZhRS|63+^qIbQJ4W8sQH_)ISd~^A zIjSx=`LJFdHTiVA^-yag6|J9pub;1q&U+M8>L~P_tT+?lNphrFi=W+%ww(V*9n(4c z>q@97qiNvo_v~9bsf-D+&^mFA8cpx+lytsDURl3iHnR2xCC(4R--$F*^G#J>pL71R zVZ1oDT;lRaI^#nB(ZjBvNsM6E=#O|`YLv$p#Jxp=4T{?MP+O6YwS1|4Kx^7$=9I_z zd#Mxm=`(-5wi%~V=pn<<5&yHdX=6LtB^>TR-{vG<*z!Ay^1j|gbeMe7)%(NubpJsz z=VQ2yuJAoVs{5b(A5Z7u&h_`daSdrmN{LVk5h4vLa$bqFREmaZ4-G1-vWkk56|%Bt z_KXtF3mHj8R+6k_L}aB>e(&r0{_f8o&~ z1-obliLZHVp>O|;!oR^^K>tFYK|kPq!~Dg3!2QMfHrw!>H8Id-Ql(Z3>FwZU>LNT1 zZgJk5pbKLX+cbJ;pQ6^c-40*K+anKmU;I~0@0#8_Vv_!Wo*Cs;meCy`(-&`c`yM$! zOx_mdJzgcp-kQg=Yr#c1cFXBI{{l9a5mz3ii2agj^z%cBBRt;ksang=DSsW>sZe2S zkV3>%mX|pv(W6~SA1_x6<#Vf~Nzor{OVwW!=cYl!UxA#hrH8G2n=+=d`UUiI{L(dS zgY4_s#YwWv$-Mk|Pu3?Au9>a3+(c^Vr@tznW6Bcg;2ptN9yjtxTEQ}h1?mMg|AXIo*&d-pqaTE0Mf0xLDj=fQf51O3LSwVkL$>;;tp{WbeqG zm#ginB#K{dF3G-{Om3#-h?N!8Q?YcjyG@E2sbJych#$c2Z8|^b&@6No}ByyVQ ztBx4Xf2ly%b2|!($?ESWS}vlGXl@bVsBeIMJ3tY`1K2Yd;cB z*VMe&vEo=9^*ui2&B-#B4)QBa+a2+mL~T#@bJ}o&fIq+=!@t2_K>tFYK|kPq!~Dg3 z!2QMfE-OiJbq(3Y^u&yZPR&@)ei}DjZgxtT%~M(SC4_# zF(AVEorgO2?kKg#^fMEx_M9h$t`EHGV-?7muY7B)Dtesh_QhY%2}xVgRZbZj`v2t6 z_*ub!6k`I(^_2r2u1)@=wfX(vfR_-<-*cw^`;!SOQanHBLBAk#-{HXriSkBz;?P{L zm(s07&f=JKZ&w{zz%EI&zBYxGJm2x$EVY}s*%+$-O&BHu1uZKCecCwR%N3gQST2Kf z>v-7sDc2M59q##y{f{$>*UcrHHSgR5Jg@J4vA^Y8LL0ld2m>m+w0^^;JVX$u+dgUoDeLAE!jD zGUVt0E~kJg^Jp~n%8`p3%E zFQdq=4`0o~J^oRDlqmFuE$zpE!1>_wH19LR@}}MRI3vvxu3f!A9%u#wZL^X*mN$(sXug_3F6bmpYciK!Rg)5oo$y!cZCkt6=-D6zp9DVx-vNJsKZbvU zzkvRQK7)S1`-b_8`GEV2^WAP*l>1RagGnFUKYsDmI`+_Amx?^j_rP_Gnr2Rv{h(I^ z#8a9*TB(ghtn{1FDU3yQ*ebE_66`qpxu8*6i8(K9PYt2ew|iMSot7_+9fSn}o<8_fqe)q-Sf) z6QnZ}iL#wf`Im`2%IlSC5+z@Lu1`|pMS$A7^m z!Oy^Vz#rg`;osmdpnsvypdawQVg6!1;Qr!#t$wEl8d_;Fk&lW?_U=()D<0%V+8In@ z4>CEt35C7nU_{|fu6JCFY2r8bx`rH+GU7MLExegccFXjh;8tSxnilfq_gWL)pZ0nt zf4!(DZ~Noxhw=%}uA8l^x?_mjLY+|UGGC%K^z(bc2EGIS0Dlbs27dwl3w;LtfcFja7xMx47w5Zr8vn#A5fx?z^M(jBbD83Evfnnj zl#&PazWhcnIK2GR)bvVkK9gtQDoSjm`Ph5}(L6r)w6P3Y<@5 zsn(4@a%%I7sIO~@vv_a?ovZotUOG*rddZ6~@3#!1>P4TOY#Z*8_nBnad)$^Xx*`R6 zV;MyBi_MuI3tc&SWaq~FZxU$tAE#433O7jpa0DNJrz1W4Wx-vg7uWvFFTmHqf59ig z&%k%UAK;JS-{3Exf1%HyAMn0m{$f7h{^ERvn%yTqwwqqsV=_Qb+764Plr3eA ztX*UMD(lIYpTX_(UcRR(vM(*WwaTgd?vQVZ+^^|X4TWoJ>mJcjpK!PR%p+RtGmz`| ztDRI12wa>QTTFY(w{0GIo)o1PfZ5c}v?M0e;~G!#j=5%aV<(S>il z$l32EDOc@c=pS_(dTK`mxe*+_dez2Q(zNu83zdKNUp|EV0(>3(7km=@415Rt0sa{N z4gLc97y1nP0q+~;FXjX8FV1(O_gQLxeR@HDZ5ZKE zD-@f*C7qO81teQL=aFhNY1*t)MPht=elSj9^w}fH>sGf4=)ykpqheFO(1ZRj9%sG} zr^f$=`d%9+6B&csIsBs|WJ$o$b-kUVL}}Wctn_7^d@d-It&3?Pd1+gE%Qc#b))tSD zz_Tw%!-%%1*wG-$ZS?g)3g@#`zA=`keq0SFn)kmd3a30I_eAS8x83!iTNpj*jbl&A zE5=g9T_Kk0)>?1M-=Fbc{)T)A`33ko_%HY*_!;;P_yhbg{2TlQ^e^-o^aI{E%wNn0 z++UoplkNL!^~U;4%$mHJ@wXf4)yM8@maoktb5EqTT11tQ3IEf3cueC-uhzZZ2YIDr z;q$8}hM%|5n7SwaWe3Yh)OV9OS0zdAD_p(Xeo}7;^KXW*H zNLS={*OO z*h3qmKAk@?pP{dB`|R5pOhzqJ@&(KsNQB_&lQ|i;sgOW{PT$+J^iR-}j*8g3G_~>+ z^LO(-vQ=EKd-~)fEpP3Mysdc2lrzy0ys z)Q%=BPlBI;?|?tRAH%=FUqJst zpFuz1eZ&04e8Bz1`4(%2ujjeHnRy;2@}~RJN|rz1ZCZ=+9rCABDy8hOIX#`P(r#I# z%GnQz+212^f+n34NpR1ypz5}vBZXzQ)GebiB{D6J7(Ke}6gQ?pbR50krQA797Tg-t zKk|`t`<$N6nKnO}W__LN^rkbF{t#O|`u1f!^*-I_D!AH(u&(ON zIs|SwB5F(5DvJ#cWcbsC3V~eBrp`p~!>^RnhfQelhu;@33M~CEpGSU-d=vQ_@*(6G z;OpSO;FI8I;5*oCqRc5N zYjVV?`__~h9^})ylJ11r-t^OPX1l6YI<-Eq+RM8}cFK7vSsQzu=SLXW%>F5AetEZ}1n;ztCsU4|v}&e=#3$e{sGe z&y&9g%5%Oqe{$6g)o31Oy44l4EkV=S5Sd5kEn=JK_^x}#6Wd#8inFcJCihArVkx(M zv+rkeaq9(#!W)_NVSPFOjMjC;FuQM(>T5?j;|;4{anhH(jhinwrXNHK{L+(``lV52 zmDfUh%(F@V${)Y_8-h6BZ_Rzz_UIwi{ww-Hpgxcoj|xnV+nz!c+$3HPJdCGVCOXeA z>ZMY;8R@=fG#$cK<$fUkr9f=_~8D_EWV(gYt|B&on$D__4#UYGV}4G#O!y{shX zcMgK+)Ja$E((mgM^mouNLH_~yJo01Yo5^xpRiFr_xtp5A=09BkRnYmpn zl4P`;Ffg*JrE8);T+^yIqp_D>=}s5cq>K3TXRp@vB-{mjsa<30q&`>QtmIP^ZS$|W zzFKz|z3hi>&M3_qGxC^Q#4Js=OkGXcxJWIs69DWR`c3o&Q01pbyIxi<44rxs@UMtDi4bO z8TwJ^@1S3T{sZ!Pc8p~;rveS@Z8KZWbwzt_D^K)QGu@|$87Yj(GUDN1KvtUsj}vlPQL9^Nl{tr zD|>Xj@l zCPv})i-*k#^vEW~=e!DT;8R@=fG#$cK<$fUkr9f=_~< zf$xAnz#qfE!CyfCLZ3lD;C;jV#eBg1#rgU^p7}26%}gf4x>D5s(hR0)rm;zB$P9Kc zoM-D^Zy%!b;dDW=NfOm;S6vc2EGIS0Dlbs27dwl3w;LtfcFja7xMx4 z7w7w|!>J*|WG%C9?CC*~Q*x|9L+{NTDRUa6JYcVT{T}g%e0g`~_lwkFmF11WKR4*R zWbN;YI#X_`~>-a|)|%;uA6PWw|`z0VQz2kUJ{-|3Oh>WV#gv(C_6S?a!x zXN>7W>vh*(-7zER|DvCY{v`T+=%1kr@t}w&@MG)#(f$W^VaeDZ%ZzW`qc{{^1}KLg(Ze}F%Re}lh({)Ik+e!%;N`HT60`-}7CePa?Gttrf0 z_jxYbew&M#z2o6q?GuaG&9fbB<-@~>OZlAp3j?CaP($nY{VGl*aAUHzv{fFF^V8Hn zxhsM0n5Xf0g46q274_Hh(xy+8Z{M}nXt82)_|nR285NZr9sa-mI{MY<|DvCY{v`T+ z=%1k z*C^-HG)hn79+8r`?9MtBUn1Yv*^y>jLL8+It=sLinrIciGq-!h`Tey2-w!~49sO$b zf6-4xe-iyZ^v}?bLVpMS67(OC&m%uZzKQ$|`4I98@OAKC@JaA9@E!06_+$7t_zUP? z=riaCyl_ZWp3^Nj{avx!qOuFJU(k(HbP8&R6aL%3z*!ab z|BHSq`jhDQp?`*c6#6^pm!SWEd>;8R@=fG#$cK<$fUkr9f=_~zo^Sex3e0$xo?YGj-~JHxTd;qD{Q&gW(XU4T7yVT9C(-Xi{|x;o^mouN zLH_~yJo01Yo5oS*gWAlUQRDt=vxIZ&WA1D*fy7qom1dO|jX>NsN$xP-++7 z0H^o!zx^!i4`IIr`xn>`Kz|+mYV?26Pep$c{XX>1(2qiY2mKQCACS)@KSsWZ{0;dK z@(b{F@L%vr@H6lo@CW!~_&4|q=wIkF=m)%Sn7^10xW72x7qcuX+W6KoMX@^E5l6oh zl{{%F{Tj~iwz%ZJSUl85M4GmEg^3i9+dc0dYjXCsu>XerEbI?qzXkgj*bhK|9sO$b zf6-4xe-iyZ^v}?bLVpMS67(OC&m%uZzKQ$|`4I98@OAKC@JaA9@E!06_+$7t_zUP? z=riaCyl3mz=X@9hTQtWSHzYzOx*w4cL5cXTJe}Vk~^w-g^M*kQ6RP-m&??e9# z{V4Qz&@Vy%0r@=gW8|C2-;fU>zW`qc{{^1}KLg(Ze}F%Re}lh({)Ik+e!%;N`HT60 z`-}5kc)8he{}W;6`(^v}S7P`WzjKrM!h{6=+mFTmCiV-l|Azf6>g$qIr!VUx z_Xp%vx2pf4dp1u}5BBF_Gb@tzeDrAK?3G^6c>TVRwsnZinfUsOGBrv&_dPG|X z4A}mrl5;aYold0m+3rd9mbLaY>aNeVn0cJsV}t+hAI&_hcMy5H_0uGFqD}woob4~j zijsF9PW*@`cydd~y34ai=UHW!2*@zv z&mTD5;_Tt-p47Ol=pMr9A-*myooq&?CR>Z^|GrDK`HnxO)$i$Ixd@RyqfoM>Bvh+) zjz2xscdtBs>@GR?r7?Kuc`Cj3F12dP#YYt9`?o&fm(GzW^{8vb4-aJ|TbMbe&V;xj(<&hMF;#`m46YF7M^ zrH>~$)!}$K*CL&i#60Dl_390wGdE2sDl4FW)GEhxd}cCU{j2=M*9x*ux9|Ns7|6?T z-V67UxnuOA;|1;?x(nEtF>$NKLQ|QFOrhmnN-G$#T(jTeHtX2lmZ+C|W=?0R`^RikLX+0md&Va^oy7w3C>ip2G?`W?)5$6;TubK@1tL=MCQYw~B0T zm~Jnq`;JuJS(Ya5JwP6qDhSu>{iU<{x1{@}{-HvvUI?GK`kVe;E4HfjN(+6+?Nhwl zp_aIZiQb$3u$U@6@}GXR{}b8%rNw013od4%{pLkOa!*M0GdIS!G=)0fT=IE{^058p zp4)iadD!JiA%b}>!t5&fTsHC!OQZHa&~Ojt{Jj((wH~|?K3AIH6a;G~wFG;8r%)y>k4pJ}{9ChTL@vDS_T3FRU(%q68(o#k1R8MX47 zsKsro*_CRg_jDIeVv`^3XS>3Q{Bd8yHwMQk<^%38 z&bO!Z^JZ1f9{t0r7DFKhoaz_)xA$1wCAo zXK{F>fO02271;H8l)Sn_c4u#YMWc$t!;~xs>CpvehcmA5vkTcPj$cfh$vM+)TZ%`_Op@#QX(%Azb`741eWYO6C?JX9De)vR!MIY{XsVA z?3Zq#;SU{tmmYpgZQsc@cqV01$ARj_ZJRU6V%I;LBS)j@q5%H3L(}Yt;N`8KWA=E` z==s~CCstaMTN4%^3sX_PYma~$>;s*BcE@Wlyi0A3inyEtzHarvE%|H>XE#VXbqzi{4N%3>Vy4LoBq;j$igDCXcW9 z{j)y)iex0n6#V#7NY0wSC>mlpebFt>k7t+XQH$H2CIVJPboJ?^U4nAqlq>9Qy_lgt zDPOWL*Qzp@+|)KtH(-=xo#UIJtRD zaf_9X*03}yOX@x+dM;;gT$-}Lc=jMos=2sGbM6A>kIf^`*L|XFmB%fy4n-jr?;GYX z<^%38&i6)Qv&=mEoy>&vYM!j!UucMG)y6i3HnL%#(Vxv?B5a#;Z>CXI2jOY*)l1|1 zMdw}ejj#0eqq!^idkwRmk^mG5Tt(C%#8g}ha>n9V!6 z?VdH{5U1+XMiOUl6P4*@8AE?7>2SG7QQF^b%GG7e`F>0^wdmg&Gju4Ns+)h8n`G-m zLIv$jeDm^2=eZk;+UIb-%QscZJ8)ewNo?uKZ&q<8OIvt?2Ni7S-CyfoUX76>Z@4?l zj7kIOv@iA}btzBiF=yeZr)NxPJ9&8HVVo67`4D2Jp%O@hUg+;Se&-R%2tHntr6s`{ zE+-5sEMi+o>fIVj5t zt@l?`Kedo)D=pgIwPzWtzi#r!XD(A%^%I6K%@PMmS7Y|cT^-!))-!RXMoU}idd;=n zn*&}^3$5hdjhuNktYq!#wcVF+8IM>rm*tS8 zq$N@`?HQ@7zr4t9IFj)GVt+@s#gV?f?HgZ)xe*U}bxF-Lm+9jTmm8mYn~?ZimAk_dD9;2)sjl97-5YmJnS4>kBcYY)sPzQ z==ePcKhb$^D}Uz}aWQ93yEtz>R!O1HpdawQVg6!1;Qr!#e`|i5{yu6KWBnl7L+k1S zHoo+VP#VYK)zu;V^G;>#KS=583URvblF>pO4cCl-+T(I^YkNP7kHo3)a3aE9HEb zq*+nfW%2>~HzhO6()~i*2 z{;81cJu7cIYd_~ZE4lkWht2&-1+{2OfUvM838n;&fT zbM>PY0Y;Br7-myRiPAVpm)E52evsJPv>GBB6&fxlmPt?N9ZS}E&XRuXPgg2ehSS1Z zmqpi#S5cGJQ(@Wr-_wj4FYnEL&%-DNMXl_7_m0~4%I~!O@RX#zUvem;zhzI#;%KfdtZ z%FLYJvG4Bqa%S2LshhpROPQ?ohvZJE&ShhQ_v{msctQDQm6%j%@H01KG{0_35NFiJ zmJMtuUdEoioYCcColP>TzrG$1*I=JrdONyWb2iK8a!BOH=2^_9)sCOa7j)1Aye}>n zmxR)qvR{K2dMUZnJ@EVM0nYdM`n(R0hc05zX{4kE$?&p*MLF`D>Ql&fN$vJUmM^Jo zgP!YColx>`)ea*q`!cfc-^ZOUM*>ODh7>jLxO&dHx%x4j{Y1Z2y_vr&>WJiNMZ3+` zHB|ba-jPejNhHO7#Ink_4%YO3ale9s$bLOZ%72mnO z=uUbj8MIuQDtc@b(;S|qx|SwdY$W6M{H&#*_Fh9_ohGz5%PW% zIX+_%oBsMwcXs`H_LuUH(BK!6>_S6_NqcX-r$Z^5yo`3$P;HS}IY~wlq+>#6UF4Sx z`n{&OK5cFUDU;Qa78`v|4wW?T*M9YsHgAk~{+e7%ycLhd2i3nMqbfEhW!Je;o{PG^ zPh0be;I0*}_C`-h*!@cW3!%wWxhT=Y?AZh2qH;gdK*5c+v^Hd)Kbc7Hq;@UxU>oRM zbzvt*&hM1LAH%=FUqJstpFuz1eZ&04e8Bz1`3_2^2~HDJWxj<^$@uc&C%x(RM?f=h zfLz$psPR63fM{xzCp;?UWn#O{p5^KPqbqfLoOen{vJ<=Ae;Nczv)f;cZoc|hfSqGH zJtS=YRCYx~wr<@}H8~bEy83=Ll=cKGO>->&mku+eR z)M$uYHu3wo>Fl%6=cMEOh4bG7a%tqi+S?({El*NdO?``+CAUx&y2D6gpLlUcKj=%Tr5!=Xme1pER182%0Z0{R#F z4Eh1@8|E+O1MV-*ciT0sTR&H-G1DifUd%qUl$j**&34_n+00(+_L4rOxh$Q&Jnpwc z2T?uVQMJ#kgQ`3(544*QN*#&q!O9M;^n zq3YVu2+4}b3bxIyBJYoB>Bg(((HU|5Mt0j1NM?`UfvUI=5^vVrm|GN2H~v;KJ}>`> zF7u43|IryiyKfY`nHavLW^x^EDR-i2PvS*`$oCJ)t)+pdf)-@ag_YX?;O_Fcvu>%F*jtL__<7B%+YM=ebcfcRukKy0oFQ9*+&!8XhzG41iKH&c1d{=1r-e|q7#W;q@ z72KC!&+@)%nKNhK0CDv>a;Wn`A6aoiF;cv(o2tmQ&EOVDBtGoDy+%H*Bzp4lJ}n=X z%v@01bHt{d-f1a%Ry3l{p3m=4ckEGNY8PfrQ7B2HdDkv+I|x*hYoimnmvm!D9fv_( zGk!#uUocGEusxc5Ji4iZSF)GX4YB3gAfz@N&?yz#AFl1ltydfiQA zyXp3c;pul9a>*Zc&G~8v@@e>dX?q9BH{^on$P1OV1{C}ZdYeNo@6Wio94ajZ#Qz`{7&?b_L)Y{3%7E`%jTyf{#Y}$ z(tAV0e>Dnq&S)lO55>)wS$NaAAz79MXWx`Mu? zII8K}bEK9wT+7}zD>#NoMBV0zDaoWQUvgx8TylwBW7hM3&)<+;K`-aqKMW#&E9ST> zcRnRgj+D6xj(U^A*+jANS_JvLf2W(?YJY0(p6xYU;YHFN4}>W{2qab1cwx3eDV4L> zacrR0?LYntJ_&vXz61UMe+>Tye*ygqeFpu2_YLzG^8xo4=Q~??}}DM}9{>aJ{v+z}2a9!zts`EZOugfO_24X#Q$cNF{G? ze}6=`jYw6V?{{)&BllJN^@Du>(8&3l+(WZd$krnX>Wf9)$qPrF_D4mT)Ub(moq3T( zS1sN9yM0y>xe)$(uCjMBm6*^_*kWKuhBj@Fdg4QghOt6vK}J5c+9~W;yR4Mhne-R; zoa(0Q#oC`vZiuF3ZDQ-HNCMqh@V7Ym{ad24v(a8^dlbFyI5&K~!Z}(Ir6XK4yM{3L zK1KCiO#Y9rga3k0f}er!fIq+=!@t2_K>tFYK|kPq!~Dg3!2QMf4(K?yS@v#VMlHB5 zuJN49$knO5DUco~`8LY;rM$;zMb5T;wN9@oUxG#1+R7+muzK(0&yKkyWWoG%Ig3iD znC`_(mnJ@v!ki_+HZw+PMTFws#@PqAmjbgagH-?C?%JwB7Os<-bD4==M0 z&rS!dJ+{P?^bGBr65;=haNBgeDBoR3uimTP(pSsHu1y|06Wq{A zW52&D&IkyjCMv_uO!hx|I`ooq{iMlkbVKY&dPyx!7Em$1G5n9(^;dq~@qL^INh^qJ zOnXfq%urogw_u3OIW+NLOKm4%eVX$2ooFVyL0wvoJvLNE$8!D4{T@`==;4N_!@)$u z$nDqQhr6`uO^bo(+(@#qYdG_$sS#1u8{k%{ktWJP&pKC2N0E-%R@tga_7wRL@(b{F z@L%vr@H6lo@CW!~_&4|q=wIkF=m)%Sn7^10xW72x4;PrUnX!afbR{*kD_W2dRY{%j z$S5au=Vs35TAM@5>^lc78+~ZywGY4Ze(|s;+W1Y`6EBHK#rc1!m&!?=p5fbj{oS-A zKJH9XJWI~rSe@iC`#E(hkufd0HB5gS>PxNN_l>648P2vY&7-%awRt1yIGv%LaNzLe z>8#+kSpMICJLv;YjVq~bkLjXM`_{OdhLT{B6EQ^_yvSR<{e`hDX|!GbLC$xp2-dT`*%{}J(1~46U#2FARFYa&lDLrKup(1 zByapNN{@?_?$pube9y9FkEhs)kJP!=RPbNoFcoeY$tqkkmCcKNoFC0sNQdH*o<&;s zlcWbWet#=}lYhBiuM2+cB^$hlB<6)R(~7K?Aj_gITI=50{zB<1F)4FjFRGbFC(BPU z+$)z)+*fyP)rj|?*G;-iUTcR??wMaK{)t7<6MZ}{I*s&b=Y`KMCUa6r?9Jo5zV^PQ zx6ZXaP!x-!$TyL{As<410lp6Y3qA>c2EGIS0Dlbs27dwl3w;LtfcFja7xMx47w0RI z&9hjA3NZQ~A5INO;%AoD3ir*;AD}VWE#GoprO`uL=Gk^no)BJ%;FCU_--*fYuC-ng zKOxlN?RK3!OE5`=7?dHX2i1ypjH`nj(C$q#8XGw1eCUPd?a?6PYMShHY6ZsqRq5txW|L5!Azu=SL zXW%>F5AetEZ}1n;ztCsU4|v}&e=#3$e{sG6#s;r9SToF+T8njEyE40Go8R2mXA{Va zB`IolCv51lgxlh`cL!3};PbHs-3~PDY0J@?%QuNe#>A(P182!BuYau>rnwQi!>XTPlA^)-j9ub#&38yO#J>l#f zCr{N)aip4qm-Ym2bt4uROMM({DCc)lTHeoib%p*>z9i6n>=tPk+dY2bVSf$T=M=Ms(isfc%ttHky|J;e)qT; z**$ul>TNYAcN_ouzvjC_@~54fZQFc;oUfG;4m$5lxHp^{b9K2xy=RIi4Yw;1McFk! zU!FH1mrFB8m+kkZUA!w3tXEhO*=p~BuS>(Jhg#{MgY+5oy_E7Ox;2bQi!`kame@kY zJ}RY$DB2NkiKYW5gRcMAe?UHu{22Kr@;Br|$S=Ux!GFOg!Oy^Vz#rg`;osmdpnsvy zpdawQVg6!1;Qr!#SM3+eev~E7tpBSr7F{jI4BM`0$n@f6mL{ER9fA#Lsg88nO-NtN5l-&HjB2Le~ZPUTc1|6X!aiI|Rg^_T2PHEVDV16_enSE5v zYt}LH`1RKDF>xF6Vg1+vZaZi4x%FAB>_tC%RZod~>R%HYdFA8Hx|hevw=EBcSQ!ht zVXGP6_TFPOxo7Fkn*20!DNCwbK*)!pUxNMv@_FRP$TyL{As<410lp6Y3qA>c2EGIS z0Dlbs27dwl3w;LtfcFja7xMx47w0SPo^`xn>ITL;|3ZpXmNYZ}oq^oFv~pZz6v~K7{-Nd>#B3d=mT& zd2w5n>>tJLbL4OadR7`jqMm-eS);~*}=w0Um5w)Mu^jU`JTA>dvG{!G@!<|=w zDuSsk14_BlTV z20yD0q)K%BY6#XdlchE0E{{i_t@?+$i$ls6;A-@1$2mb}11V01c z0e^r$hJS;kOrCY1lARI%rp!7@cGS!|*fZiyXSFxG8!ZT@CJnMu(#kif_*k-a{Je*BPO{2T z?Jg6Vd2Y(af7S*R{WJ8V(BDD71pNo(^T>~pZz6v~K7{-Nd>#B3d=mT&dIc0bWA6wqQr}QAYh8&!Cz<0^yESfiI zSDr{fE!om>(tncu5K%YHI&qM{lFkfztY_^!L4KxAmf?^PcGiVx#VxoueU35A;vowR1p^%#-H2d$8J_M4D+k zrE+PKe-4s&CUd@ruG)5Y+)Cg&$*cOt9_GKm`S2u4Zzti7IP*oO;7CGS5H0^vD_qs-+(^+}RdIo_aZ)+OX4& z-bj5`v1G!XoSt4I_%Zn^wNK(5`88}r(Emk075z!{`_Ml_KMMUF^h?lxKt7NB82Kjh zH{?UeFTmHqf59ig&%k%UAK;JS-{3Exf1%HyAMn0m{$f7h{^EQK)DD$xG+fKX>knMl z9=%GYbljcWxHX;JOQG++{V3z``Jdlfx1S^*#8wNeak@xm@pCVY+SQ`IbKJjI;)4@$ ze&L#NrTr!m`<7=M;9^9h72@uXEzqG|&7-F?79S(w;}yn1)6@z2)#(4CpNjq@`hDo1 zp&y0*4*DhNKOmn+evEt*`5W>fW))(qtrBo>}&er?yU0)){MmqgGwUo0Lm@{$Y(k<&q!UiX3{qlND+6)4%8t^w-g^ zM*kQ6RP-m&??e9#{V4Qz&@Vy%0r@=gW8|C2-;fU>zW`qc{{^1}KLg+Sk3amsKZbvU zzkvRQK7)S1`-b_8`GEV2^KCsMIF$5Jg|Qqf+$8^F7ISEwwwTm*H}d`Cn9o=F<76mX zF`KKqCbg#ANptLN1?xiehK;y$mfwCBi}^+hI|P51^7DnFZd+*8TbzP z1N<@k8~g?IFZ3Do1Ku~xU(5&GU!1S4r_Ko1Sph~^pYKj~m@eJ*^MiX{f+@Axyd%8p zpd~FYx?=TcZZy@HzTUuw^WAyz5l0QhNrmM4)SbDk@guV9$^p}WRpCTRctPNH!z~2+ z7uXL#e;xg5^ncM$MSl|gKJ?Gfk3xS3{Sx#akk2DOM!t#s4fzoA3-ERDU+_urGw>bo z2l!+7H~0(aU+6RF2fS~XznBlWzc}9yL6a9toLj~uiUiBWiA`qLq)kmc$}Pp*uf94( z_u_ZzSNb4uB>M~TXf1tpikZTOG)~K0Dbq{zPapGb;AF%v)gPR1v-->V9n1giw_yJQ z`vK^$qhF2w?|=Q&|L;$t--rGg`cdfbpkIRi1M+#~$H+I4zabw&egVD?{tG?{eg?h+ z{s4ar{|0{n{R@2t{ebrk^B400_ZR1@W)d%H!X?D)3|VO@m@B}lxUOoRsn9}0qa&!k zP&HYkKhrPv?N7Srl|aR5(-yjLZJU7&>m!hQ?(FR&kg{yO^A=>MXh zivA?}edwQ|ABFx7`X%T;AfHEmjC>RM8}cFK7vSsQzu=SLXW%>F5AetEZ}1n;ztCsU z4|v}&e=#3$e{sGCYo2<|3{hod#yf6sy-y=N@^f4a-C}4Vm;L2CdmmGi+&R3REBa}% z%GTaxai2KzTdY zdF02)H<7;~A3}Zsz7GBiJ_&vXz61UMe+>Tye*ygqeFpu2_YLzG^8xo4=X=Iqe zm1#OQIHDlAf*E+fRm~u55>r}mYd}WgC%qW9ZS2j;UaEdK@#LHD)&K3kVLuD|L)dS@ z{sr~}&|gQt8vS4NQ_-J9zYqO0^rO(Q>BW}z(qd_*Kbo2l!+7H~0(aU+6RF2fS~XznBlWzc^pBSHJh) zc{Z2Pd%O2}xBEY`C3oXjaXLhlj+86>yH-W8zlr@q?7v|@3;RRZZ^8Zr_5;vgN52~V zU-VPapG3b8{WJ8V(BDD71pNo(^T>~pZz6v~K7{-Nd>#B3d=mT&dbo2l!+7H~0(aU+6RF2fS~XznBlWzc}9~4^BT0>tDjO*X_CWea0dN`^VUi#r`Jt z3$g!({VePcVZR0Y7uXL#e;xg5^ncM$MSl|gKJ?Gfk3xS3{Sx#akk2DOM!t#s4fzoA z3-ERDU+_urGw>bo2l!+7H~0(aU+6RF2fS~XznBlWzc}Bg{lmQqjjNgRLrP*h*6(5F z1P`U&X8w{{mD(TfXz;Pie!t%7GTEQ3?axZJ8&W4@A(=!N8qNyN3QnL-Ywx9)UHVK{G;KcY;L$;qx4hNJQfG;Z zVQ2V*>y<=dMfmvpUyE3`R{;lB#SN2`Ed~p&*6=gSIDhaVw+ef7)-kaydhJAMt!F4- zPzqi2wS3`n*(uEH3zE;%O!|pK#Oz5)iyDY~p8I^2znpU|Ng4m;oJM?-u5z6#DWD!x zqWi)f$2t4G8Xw}_-jg#QSGI}|yA$1-gn!nB!Q{ucqrGWkSv2D~F zOWAX(aYj8w z?JrAyzooCsb3`=f&!L}>S7k0V^rAniic@@##M9P1>1Yj`r_^uH^mzs`X|(li(lR6L4LInss3D_Z%d^Rhqv|0woP=_lu; z8`59RUCp{~jr-|bJxttI3w>GZI!toh?+K~83bCsV7uYX#>>x7&>{Vwy94B2qbziHG zP0+L;UBAB1g;e!TZ)3iI7wI^4cw&v$Te^07qQTFecjVB+TW@k#asGf~&i5&`h09ilEn-WyZ94s8%WT$0?9`uy;^M4<+A6&= zZ)rxRP3hi5&@|S+t$k>@pFDfy(L0sv;-$niTJ1gkgVlkVbM5mZNU z-x7V!{$W7TO_g++1RBa~pk>L~qp#M|;?*9GrdFGEzvdi!Lc89rHEHxaPo&BkcC_?9 zBze3hG5lIb2<8LsFV6SkmNh<|k~^6zQJ*&@W%98<`CerRUF@ejUN3e^+$PP69sXoi zHEjXA?p;~QA=gQ44uCv-Kyv&{K)7|;v8lN9&?s?umV8fDeD=v|U^j4B=FLyOB@-Drx zszh8+BZa!uX}F#+kD#{v7A-%#A_(UnMfGs`4XW~Cs@Z0%(?oFU1BDrdp7h2VBYU42 zQFN@&M|Fy6IcYlbqBm|+7HQ5f;k0=FkEru->-p{9e@lf(Q)!S@2^A#~*DDlh$tIMH zB0iMJmPE8vnwr`g+C$OxYA>XnNJ=D<(jb+79mn@R{r-Ua_}s_S)$=;f$9Z0__o~TW zwocz^We&FLbl0klItomWUBZEk(AjL?Ew3{(T0fF4Swl`nm*%jiMplK^T~}hye)0G% za8RC^)evvFNPmoC|6)Jj{9?Y%<(cOTGxQjtocf{IFMnug+i8=PrJ}5Up4>ZM9#yuc ziKr@y@v)t$m0BdUocwoDKY^ol7m?iZw`^le2HhGYC-p7df%?vfy&V74ix_;d`1wU9 zl5<Pty_x$VoRXQ`0-U;>?A)qBi=P!Dtk_ADp@h35WY4qM%}U;%$f8R+twT$M z*`NO1GNntnm{-OH-=-%jFy9VYCjYTkXOFo4o~gmh!{C0y{>6U4`Ne#NPP;z#SKrE{ z%S^lRE|G)PJ~+q|cW?@;@chUz)=rL6WGS=nY+QYhq8w+&+?=-x=xu@+CPS zyyx#^$~rG_^HWDGl&(LGtev-L7eA|Q6c3(`gb=Yogi*Urkm{g%6xYv=_Sq~ zrWwv;dG^ycp{)*7?D?7~yOU8gLtXc~o8k%LKKI2^e=P+jHmx}43h!^S$Ers%El`2o zm#Le-L`sT1ab;_wSt$n-p}Af_-9d{rSZ8s5|J+gf$00C7<02Px$^B2#F?CTE`T_SF z_AmAW&M)RWr&8>P(xYw6be=NvgIngYryTdx-S{ZPEVSs->!_4ugWoQGP<81Itz9#? zW{+DwmA6~iXSTJSp1Ur6*0{HsjBX0}k@)r@?ef!oac0MQ)|oJVk)HWvQ)rHMD9g#d zxiC0Wc=-x8%#73a(rr~{d556uK64Ici(2Z-`=M>bFIHsT#D!;M##8Q<@!^g1x|83< zhvprm*VjIlwK(beNfm(_hzVG?Kl}i4Mmca#BJ}9%|WKswNX#0Zn@ft_=Ge% zILEHiAvlAqJ3g=KcVF$~`>EQ1na>N!pOv9E7d5^nHw({>-YI=a)(Z`&E&24DT2zH^ z8(zW9s{cN2ks#Mky(^3s*!=lK6_nJc|J>S5Z^TMo=HPxs&0e=mfBL(Ra7c>MFO%=4 z-*9zg9uNNVPoF_Q;C{pY#eTr~#e83-HqI58*v{njT+SS&3z!0HEw@MFCB!=_!v1R! zB}b;3+9}RxpdHV*v`#+});#4o{vh$+)&qf^*W6f8G*FTVLq7s7MYm)k_X}G;~#1h%J zn;|YoB*3TNmG@%C{b?-rD_w{&*rWNAboe(S7#xuEV}I62zN-?7jYL zT6BGE`E<6P9@Jm4$LnJaUG!}Bx2L?6*naz!6|t$3=y5j>U10B#NWR%EIpafgV$6}l z{pMe4?Y7f6)u#KO{)Ik+e!%^P{fqs8^NacNTWV}_zoE%E4Q%`oDl?h$ys*-!Gh<2N zd7IO_Giqog8zCb(`JT~usok}hja78j)$$`NuN2V%uP?DyFXyn|XLqgq`>CCV$qNS0 z$(LnYiWlz;xztDs;-pd>WrbLYJEOZt{pPX1T~?8Zv3y$jc<08%Sy7~$*T>S%JC~^5 z)Hy=aTFF3*ZRYR630l87%RtugH97Kpx|!{&`&6?bEWO+?idN~rymdC9f{Ip=9kK;U z^qlKMo3q~DQ-^Hui$<`pl5*_nDbYf9`pP?on`)VZIY(Z)_Rr2Itk^Zom+!Xe-jqGeE z?iS`VrY_2+@E6d(&}YyOxZkjUu^(`LG2e3v-K}K_CQNPiQ4g7|GnldFDJdp;^Vv;v zpKlQEculyDrUywGcao2Bg@$#DN+`FPD^umNhJE&3qRTB@m|dUvSl~jv3L|m1vqo95 zmeA~HpRbp7QCk0er{}Xy`r_-XjzxDfXo2BxLxJETvgKiivGQ;=74?cvlE}`bjoW(@ z6V^x4{y#g89@y}j-ejGJ}`l$vSZRsMK0^>a7Sf?=|m<%A7Lpfi_82U71f<;+ZV*-?*t<;hPr~RT^BP zT@pe!mi#A|s+>TRVwe7>^(2wDdGOzk%eSR7#1d_HvX_ZanoN4&lLz!`S%-m`+A0G7 z27dwl3w;Ltfcp*m7yAL{7xQ)S@Yh;@L7ll))O2X*%>tIG-c@KE+fVH5w=3laPGv8c z&N6+?H-q`AsczwIDbDsa?kothmSy*GUcK%WF3E7Me-U=+`$yuh^leV_xmv2W%kAl@ zB_*VtCy1*>^)m4j^l4gc8cg1W>BkDbjv@PdtOA1E^U0%^0VkH`<&z`x78}l>QRKyc zmIB^>MO0?#nYc$uA4o8NB2(f%NG+8(HoyB-O%)am)+tptk+MUkPopB+=rPLE7&Bw? zzgwZV>jNXgYTxl#;5@69#3$aoV|U$++~f2&+Pc@DT=-4|Gv>OILIJ-Do3l4);NjPC zKMK#&WeYs>>qjU3iBjYezIU;7(~7HO2I+QW#+j&|q7vtS{uurZ{sQ_J`V9I3_Z#*v z_5;o@=KD1~swywskg@rxZF#^tht{1Jlsh^3JL0*P13N<>U!nU>O*~7qP-EtmHCBH< zEXnLVwE3XRV#3bSZ=Dew^p?(_uX<&hY%S?I=DWJM+=cLR8J|tOn@g>h3>ny2xsvGc zinB*{)(~N9yD189t4Z!ap#3SUNA#?a|NLtb=G5o8^`9qSil|#xE$_m~?++Y$x?SR) z)OY%6-ROYajV?O6?bJ*0D{o0wPUf0N^<&iY{e$so6%wql9B;>l@J6yb;7jl3F==+o z2SclFzBGD5{^D5LLT|b-C$hNcZWP@d8(m-_9Y-aH)iw$7`cbnL4Mrh)Pw2&EOZWOL zawGPQ=5l%c$;3b6%g1Tye*ygqeFpu2`wjaS`vK<{^IecCVsT)r4ihk2 zc`UIHw4+ete6;z(VM;IMcgpUb|I?uLPshmB7c-Y0SDr3C`ThO3w->stdO*w`#WneyxJUj< zcZ4|{vm+f9LY#MX?$Exw>Z7~E90>o-{_odH&QLp}JGGXQKGdzXBI5c5RRX>P{s4ar z{|0{n{R@2t{eb%o`xpBG=NI$M%Ke>t@cssdo%K}EmUfb7$K#`eCwo-Km%8)+7U&@6 z%F%sS9ybxLT{3-sb86`0p8~%|iyBEnRs>THS8J5Pz7*jKL|rZuD>)qmyJ z`M&gozC&YI-2>XuP*`Wv?oVv;?Z)P3#Syv9nzu#Q*pW38{>^8{!im(C=>m_YI+L4s zo~L+T3MWjck-7g9mw)^Wdm;Ee`m5dLiNb%2*oB=Zvp>!aps$7kg?E@fAuq}j5~fCHkW7otsjmi0h~rwx zp6NDybbdtG=?bw9lDFw?ex+*xN$YGjjLG9*Yr_R4hWR_l9kuuTMZrOIAvJuGtM{6A zU8lOodZK9L_Z>HXRCtlkulVF65`4&?0L84es{W+fXv#EE`AD*CI5lkRt7wwYxM9PL z<{;v__}Y@sNk^&B7w;>74;j*}fm;Gw@13LIli+9IJKzuS$MA3P7tp`ZXV4G0->`qN zA8>v#-{HI5awgH+7@6#^v%(yC7>6n0#yd}_u#fC|cKExBu^v~BcOAMU#`s;jNCTw$ zX-~v}=w`21)GliC@{n5@RLLh-+Tu|KDKR-MA?f>?N=x6rv0k}|Zq5iUpq~wF>QbHp6{!EOw;TnHLmHT(U-?=?L0D)NevEMHhgIiPZM{| zY|;`-Ap=OifBYAG68sE&2mAs482%0Z z0{R#F4Eh208}={u1I{nzJGfdSAT5766E5c6nt5HC}|IcRJ@W-^%jg2-)y^0r+s^DP+j|$bn|H0H+`ZczO9%^Jk63p zC*!Tx3Map(v1N4pQ+^ixv2UQkRk50gq}a6a%_t}JYh9ayyYuPQ4@Wv&%WbOuLqq|#c+@UTyd+eb6fWk+zwoUkX-t8Go zn9TK6qxpH{+H1?MfnSECZO?~0j$O%gWd7H}!JJZ3XP0{@?fM(y8*bJjBhW{8Oz&E` z-`O?HY2B<8HCTjT|bMr<9nGTTJqehcdJ;exz$R`Jd5k{6wm( z8&{qauBDkPfBZPQtDZ=e2=WW?b?{&CN$@l99q-S=ujT>10Ns5$Os#Cz`bHB8cHza&RsK)pn4q@bn~K(D(S0!? zxgBm9ANDoW=&Eo zJR7Yo1voaUaO-F8zCpimp2pzb`AE+`a0;6KxkvU;7TkI<9(8^@Z$| zZAAW~`ty^{&*Y?}cIxl&JL>|;J}Idv#?+Il4n7L}s**`w%`Cfa9pgg_2IfvzyR(Nl zE8g|d++9Ysn5M$1TUuzl*=xt`cMAUHL&z_{*TH|mC&ACacfcRukKy0oFQ9*+&!8W0 zzhVDkKj8dgzHhJHyYcz*?r*K&q6?mRg-JB(g(4It1BP>e^Tei0_uKBNM1E<7=woR!PDaEf9NJ zokF%GZE1eGBc0d<2y86&X30f4s{`AkimCM;Q}cvVZlw8l9QVlnS{m)KYK0x=2U`1c zzqav~$?rG*FMmTmg!}@09sC!368sE&2mAs482%0Z0{R#F4Eh208}={u1I{nzn-+FU zaAU3_lYCUd|Hgl>Xjba0mV0Z;=#-jI=ME-_+2>Av0I`}X6 zB={Nl4)_E7G5j0+1@tfU8T140H|$^R2b^Eb_sGbWk(W-|OxvOc+XW-C?9kR2#ULkn_3e2ASoGnmv!Szn7* zcc4eql2vE2+Eg^aaI@rv!$d0W_U~gJm#Ns^C5Kj=J3|LOpA)$=j^urP(HcXSlf-4y z%rN1i2gJrah|%&s_b)$2zKQ$|`4IApe|-Ia{1aZoMCQ6&*(Cb9cMMPtP1O*#FSl{df+&ao|>fZ%rg={4uho?C59eBfL3&@@I2NnsZ0WrIrV@ zKaA%~)5Klk(&Qo4;O#<3*1m{+W@<*Q#plb0lwTo%_J5PgMeL}`4vwPL4=z#1CV5^3 z?w!;)JC)&iu!HKH2#fh)zm#^yR4Av0I`}X6 zB={Nl4)_E7G5j0+1@tfU8T140H|$^R2b^Eb_r`y3X3l07G2@H9pPX0ykL;=~rB1pD zq~)snL659PI;3S|RNI(ERtp#y?c~ZPD!Uz({es-aEb~L>ZzZOM>VlH|tZTF@HgS>+6J>nOC<7VEnyL4a9i{YHUI8xZ2&vB*6i|i3g zE+|?M{;ywx{sZ!PKZY0v7WVW-# zK4NoelU=UEZAyDBt~To4B@Z4LC;hm6mTpPiD=M6&sw1YV#2h@4x~ z{(W@%OLH_~yJo01Yo5yx1&=}_3z1BC*L}tlw=XHT#k~7EgfsO2TYMi9M zB>$KXxmjYvieJA;mJeTjwy@Bbe09Cu)F%~8-&gkj;r0rpwFTX!x87&df603{gy_LHhUT* zMkW61N1?xiehK;y$mfwCBi}^+hI|P51^7DnFZd+*8TbzP1N<@k8~g?IFZ3Do1MWBM zU+f2*U(7d9ZQ%EjcN$FF>o*R{-xe}y&&+m@S<5hWn#v72?xXZ*#(7l<%|5!P!SB@Z zlCRXw^yIG!nQT&XXyUxun*jQH)u*^Ckp)yaDv)N}I!M#@SH{U3Z70I@$BY-c`;pJ# z$HtcHh11U!zC4+GqA8EIqf6NB1ade{mv7MWEa_WNRY4wKBZ1pKyUb7tpy;2WABFx7 z`X%T;AfHEmjC>RM8}cFK7vSsQzu=SLXW%>F5AetEZ}1n;ztCsU54hj3f3Y8Melg#7 z<~^&Px5zW8bB}+YwOWwbB_h3i!_CR>+`Vhq<$s-=44JD~$i&bYtKW~jIChFS zTc`!ia+!SBHt@q;clRX{W0SmPUhXy8-XrDR@!%RkzYqO0^rO(<64*%y&=oi|qsU>I`S__qkVC5%$aF#6Dr7$S$D08!LL# zNVe(4k+@+`TAggFvUS-cACYccozroQgpJ#IOdGJJ=37Mfof*4Hyw25>2!4J*46>h! zRG+v{%sWF*M#fzsI+q6g6=rCYmGeYbong;YulmX!kA@=z{Z#ZP(eFe54E-qdchE0E z{{i_t@?+$i$ls6;A-@1$2mb}11V8)7cmBs8;E&tFYK|kPr!~Vs7!1={|Rl;KP?l>=Jl=T9pTO3cJI}}aU-c#};jZZSCZfQ>; zB{4Z$PFPxya4Fy1S+n=iy9r3vc29mMVyE8I z9{G9e==br|8Gkg+(0R8nTk?sVr07?p|BHSq`jhDQp?`*c6#6^pm!SWEd>;8R@=fG# z$cK<$fUkr9f=_~zFqt8v9?y z71OVxe#6?bq76rXAi(@2`<5^gQfD>$SJLOkH>C-}?ac*U_&={}=sK^e55pL;no@ zDD-#GFG2qS`8@Jt z8-r#ob0UdS!G|q8?P+h)kK@BxhbWKcn5Wh{BZ~JIcprfNI{MY<|DvCY{v`T+=%1k< zh5iouCFnmOpGSU-d=vQ_@*(6G;OpSO;FI8I;5*tFYK|kPr!~Vs7!1={|^QOF6 z@kUQ{^1DjTk*cqUX=ioT_o$kebj(F%_`2*f~&IDgEEL8N4sV`!~GL!uuh- zZ^8QuybnNs9sO$bf6-4xe-iyZ^v}?bLVpMS67(OC&m%uZzKQ$|`4I98@OAKC@JaA9 z@E!06_+$7t_zUP?=riaC+;7;w*bg|rm~UwV-+t@j^~~r-rB{!CM3BINJ8K2j*^}a* zP99BGdH>#T;(a0Bzu|or-Vfn@3*KMgeE|CF=vSlvi+(Ellj!%Me};Y(`a9^Cp#Ok; z9{DlyP2_LLhmc=@uY>=BPlBI;?|?tRAH%=FUqJstpFuz1e#8F7e!%&~d@ns2v%A5$ zjN!}Q_cN1!3CmUteQybwAb1~(_nUZMi1%-JpN02Bc;ABe7kD3l{yO^A=>MXhivA?} zedwQ|ABFx7`X%T;AfHEmjC>RM8}cFK7vSsQzu=Sq_}TyX4)_E7G5j0+1@tfU8T140 zH|$^R2b^Ebw_sNHwjC!ln82`k&El`QS-d~S`&hi+#QQ?Lf5ZDMydT2*7QDZ}`vCOU z(XU4T7yVT9C(-Xi{|x;o^mouNLH_~yJo01Yo5F~3TW zzM7G}ggxmuvvKp)`Rs_p-Y;@9C&vUs-ja_RTayYod+6HwG1ure zZdRuH^}*76zvzs7QWACh#n{Zf zsYffDm$CAS3%cKn&S8XnXQ@70J)2oA(6DQGbUstnelYmeDH+z-_pF-PTbLW znd}q9$Q|EROoS#ri1@k`kdNJ!d!?Fk>Di;j@8-RGOk_0@M^2Ok5%){sFa1Bo()KZr ztp)j+R79g;Vc^0>I%nvOMqWoF!F)$tB+JFS6&ODq$J3hU=(~2iL+;>KWN1^jqNLAeRXVmMqaWwJnike=Us~e0Oyot2k$MG4pXL!wR*J3>nX4 z&$U-i+>jP#^R-gXSiM}xu34d6JymZWvxo0L>HfIaG@n~bGB==`N_t8fPII2l?mZa2 z=Ze$-T`M(f#r>*pG)JeCoqxNTzROfy?|LSYEQ%YG4xZ&hxVES|%@I9I_m$q9nJQRE z+GM_Y+&WcGD~~2{&esShb$sT0hAl;8`=ju&V|}Ug&+H!y?%AeN*KOj`6O-SG*B_Ad zY}lGZq&M5PJWR-=>WjOLzt7JkURhS#f66{3Cad#4#)z|Y+{~akX*`uG@T}JJ|3hh# zkeZsXVhO?d#e54YHuCDu)@5e>Q1E}v^)nU$;)^=XtyUG-t|miLj1R{iNWJp0I+V9U<3A<-OF2~QNJXs2_wdQzoj32*&h{eL=Lrdqz7CoE-LanYQt4@gO?>yU z~Ot^jE@X-#k7x!(4jJxgYYZ;M+rAdD~|)2i#=%dMD47icSlZa>31C^3qC}9oYy)^x@C9xa^%&~py->VTFH@Ue#l-SHTnIq z`huB>2C|$t&}LnE_G19ZW1|4w0Yb1t&SCB+}yl=h>m?E@72%jd@1{x z=8&q@pXWX!4Zg#wx|tJnP_R{d5kCjB&Y@UDz`c%ImWEB+D%D8^Q)bSq*xE|o3s>we zyqG}8L;llhPW(W;d6vDW+`owAb~0n#jS)Jw+Hl*;$-ghjDN)zNMr-K7Y1wqtwV4<2x4*UCcv&Rfb{Rrp=|da z1AXL-!m1lCru}q0EAi8u$=|1XndE6UN3x9QD19ZoliwYGa9tsJ_&f*WeX3+ydEi3U zgY(zk2PTVH!4<}erv1Ojg%c9CVP6O7hnJPRQiAeGm*3iWKh2-?lF#0ck+nT!UEkeg zk)N+& zv>)o)INVCs#x1uoc-lwaJZ+WP7xIJddQrrk%)X|3ckGD{pWR1`YNY49N$MxK->`qN zA8>v#-jK?DsN<`&Y{}So3#jOHG%I)7_V)M-EjFldB1*rth_Gp!YI< zs};3$(g>wnS_>1uklu;Pldf$Qbam}WVAJHkS7~3N;T*Yfx-v=dWbVTEr2J}V{BHyC!#m%y>r}ek_ww;LWA*l-kz4DBzV zV=p^DQ!^3nrm1?J|MUayH|$^R2b^EbH&xNn{n`61Ot{nW8?t*OnLYdh3-UDoP?rLq ztFF48q;0v;s)nU6=&alE-wglQ6K1XW7*Mp_80C^I37w&E*5O43hAKB4M^QCzIJkYC2wzV@8s{Czn4_cSv7~P`6|g8$oL|hhNmi@J=9(V!IkQ7I-{un; znqRp&<;y79krHeFs)B=^JACG1GG7;&$$nE@J^5}UBq~W=IHi&fBxt9b&#b3~O4IuL z@7B|Rw{~-79qUN-ADdTQlfU;_TfP0DNlGWxe5K6U)KN&hCLG7p63fVmD=ljzZto%f zh6;y(jzqPwnHO9_nMk}cFhdp7bnXs ze|aadvoez0dHVgJ@ugHMroE@<^0r*6=hMH#a{6;(n^Jc*&8&ck%T4$X4%?A%Evu}- z*1L4i`Xrs6{jNmCGR04^a)JnMbGg5E>L|79@|9V8@*^3W;#_>>sVJjh96Xh~PoCi} z9tkL({Ekq}7lY%2qLQrk!c${s{s^!YDS7w%S4y#VH>X!6zn;aMNUe>r$X~?1Tw}XR zRak=!DrSr-3v)@sV1Tm8r3SKMjd+&Df;xKOcLx8)j~TS=^~*!!iUCAw#~yj}smbJI z)@AxeI-Y83u^V!eb0`_QaHVwecc|enpnsvypdWC*VgF)3;QV5~qqSLrzXS~!b-Fxc zbt50Ud31`=x(*&j;#l?bfm6lA%5$KRXLTOY-@NUN>$O|NASANw&3l%1r>5DMn3MA)qiX2T72gM2<&IPP14&3mm?m$nVk=z@c4FZI z<@Ub$Z1Sva_kGlEn_nU)59h$4<_!rh!&o5>@SfmZ_p zKM0@Od|U3P-{=8R1<}m>*F-kKdf|JnYU(t%$33$*g}63IM4Z$wpkbP_qR&Ed$>P>{ zsg!`n^n{@6n!39zO+J#bF0v_|96eJa`=oNRS1Y%2_1m*cn8<)~O&c3kcI(WA_57}X z$%a--gRx8FED*ui?IvG%u#Pys+d<_j;xiXV18`_*STeY6JE;UtnP*O@S9q1WT zsq&!*^47l#k$6NH?x@+O`hm2+-JhOuCweD2Y=7Ro^K^k!o5hx0L7)O#qZ0k8by3sD>SPO&TJ5)bNsnk?Gfhblut#j7Ju-9b>!W zFT|~8qf}4VG-XS&?c=wen5ax+&z}tlPZpJ8!5`p{;osmdpnsvypdWC*VgF)3;QV5~ zwc<{GyKieT=1&)S+5XUEHeBSYA^iO$V&1Q?z}f9|k;t6%rJf_ixM51*rm+d~%ECrg z|G(bJchRLge!6n9b3&(!$Zq>W7Ou+eF2pY(zGeWgy2uKFfMLMFtCbou=Fo7xaUx-CechkxL%}@Y&SGiYB z?TZsR<8n6e{mo>0C_i<^%nP@PX2s4u*Y|`{*^A2Vb?FIIZ?}V>ny?KCymTmW??ecF zdx5{U)6tE#fBa8Aaq@RbtrQ1G@}K9>S^6=rhLa!Bd9pj}Lk}d7_ahIx0$wH%HPcaz zT{Vdmd8!_%?1YM}X40c?c=xOFFj;hCi;bo2l!+7H~0(aU+6RF2i$Mizt|5rznJfQ z{>2MIk~T2JLz^!v;w4q_zj6L=%m8^Q6Pq}@WSG96ryiQ}-w4_IgI6kO`*e1d^AnzszLk=~syfS{Bg#Zz6O;-aV(nZ4vLg^$W@U z)siMkkzS-?SpN7?x$ETFLE>Q}5ke;xFkXA_B+w7N{AIFBawmJrQXP^a;pAM{Lggk$!3NPk&$GL7(-r0&;riJ^Xm1_xYZBgvkUZ$1Ibp3o!1ZtJqwWzbz4AD(P| z%M#KvaGFf&%LTSCPh|V{zZnp=oq4K zmrAfc7uKs;lkIHCx~$OpL+hE|HFefZOb7kiwxjdwx=P~pvUB3v(zmok^t-u>NHOIR z-{^c+G?ljA<1O3ty_}8-%$hYZoKAKYe|&Eu{eXV|dt5ZGB8R-td!{1hl};?O*Ktf; zQAST1e5uGk6;1x8t$b$|97=qm_NL19-lJtdOz!+@xlfa0I$w%%hSRoz@0niv3uvRN z$3lZADWvLWYmjhUG~x8Q^1AC@2=Q$BnYkkNG^zGExuB&mjb0N8{m?kslga(1)q8Zq zYXZIw{tG?{eg?h+{s4ar{|0{n{R@2t{eb%o`xpBG=NI$6w&mP^A-=1a?#-e`g>GWZ z$f3;-+A_77cK+_Hy8Wu`*EyeLGX?7DMz_l~Io0u`>{VW^aefh*Ffa<6wJein*vEg~ z(e#Wcsi>4RoO?Hi{hI zH2wE>3peU?`_{CTQCF#@m>0FTGNaXTA2vAhPX6Ap+i#wO5~k$c86nHzXSRf=ypD^> z2_e}wo6|SR+0tq8>sx&$-$fW#$&}BPJVusEY_2T%1nxU2>Av0I`}X6B={Nl z4)_E7G5j0+1@tfU8T140H|$^R2b^Ebci)B&nIFY>GF9D;8>jU!Y{^w=@iR*Y$VZz| z`;e3ERBCbYyF*i75ObRl!?*7%N$IZHUtgarBo#9}drlixll6_C#8fUnqnTBmKIE2 zMzwyPyEBrm+8t`FHLsG0^JyQnk9tm#zabw&egVD?{tG?{eg?h+{s4ar{|0{n{R@2t z{eb%o`xpBG=NI#BY_S>m$fe38&+fb_8uo;|t(qxYzVbHZ+AL5fkXl2_nxii?rlnHx zH$&PQhK{XExH`NjXdMPe+pHQ-jA&7lhN(5i*%s7LUoEhWNcV1>8s_$B=o^ z%4`}_9qH+O-JjUKJEh{d%7@N8xAUCF)(2$bQuW^@Q*5aN$9?8+dNj#Uz82f}Ds}QZ zbw7E+uX|Dch{)T~?{Cv}3wll|EpaEx53gK$s_jOM3=XC}_i&-BmK#f`&3#4-hI3>i zhVuyWP2_LLhmc=@uY>=BPlBI;?|?tRAH%=FUqJstpFuzTyWjq|f3Y8Melg!lC*My; zZPb`wTb-w!U8=zZ6b4!j{{2Jdo|r24ZEH3av5no$R}?{S*wj87$y_fwyO@7afyXM#K$ZlP#m6Tp3AZksu(v6KDTz4VQRhyzeYh9w}l2+)( zpFd8ICZ2M4=nWuta?|{Tm!GHnqn(4y4|iI`^jG=k?jbev&ENcr@g>3LTMku3Ig`hM zW2D;TF3q3&QG@HlC34E@SZ4L09d!#fl+M_%N01*Q-$ed~d<64*%vXr6b5U+s#~j?OuD_6-%Cxp~pITaOMP2Tw&)!uQ zL2N%b4i4|SLu8om+*5^ZXj%JPYwlBriPPEFtB)RXCXXI9Y_ckFB8wh4zN@ff=;Dd1 zy+XdniPrlS_jal7qTVy692b{#A`49xj6`g4q*sfl^obT4(oOneelcCAY3%B7@mt}x zm3w$;b)m!nXq6L{eqHzM7BozkD9~ zG4f61Z^(y`Ux2TJ|AJ3~pMmdyKfoWuzrkNX|3aTZKj41D{>6U4`Ne#*OqL$e%b$~u>+m5%v-I) z@G#+(S*R9QaEp8|3hTWXzk}#+tId-ybEH4Agl44k-ysq!o@=b^JWUp=D2r@aeU19- zRB2THbf*3*ls&~3I!yAEykqAcdee=SNBPd{9ibo2l!+7H~0(aU+6RF2i$Mi zzt|5rznJgxzZFZSD$QZI-!3&wdcn!Qj^FH5>mbDPid~D$$<8KuwGYOa$?tX5aKyB8 zKMJLXJP$O4YQ>XHGo4IIk7m%RD$eBrX)z?=jOMj7qV=@0jJ?ys-9X}EMi?HhXH-4# z?E9Q64+&3?q={vsCk@j%ts$>;LPQp#Ok;9{DlyP2_LLhmc=@uY>=BPlBI;?|?tRAH%=F zUqJstpFuz1e#8F7e!%&~e7hI#*=e~|p82BJ^zpvqVkSwUe&CZpGHp5`c~NzS1^IF? z>4`t@HG0}W(!Mk*hJG*J7n;5-k5u|z`a5&dN4BZo-{KTsNB*lmaC@k^f}ZK!zT{~x zOa9pUxcG!d6TkWVMsZg%h~@iX^*Ix9v`A^kOQqf-I`jLsrd{W2=-R%$?pZUMh~ufN z^D^(>C%bDb23JO>lVdIxkH42>Q1o}uFG2qS`8@JtPO>k=XhEshLForB8ngDSo-IUX;$3E0QRwfWUxNMv@_FRP$TyL{As<410lp6Y3qA>c2EGIS0Dlbs z27dwl3w;Ltfcp*m7yAL{7xUe8=Ae?5zZ4_lVLDjDA;RP@P(AI@BgQ_FV*RWoKhp93 zn9BM0f71k`Ro%P`CWxudoi!X^UeVK)18bglHPbC}(++SY)e&bi&x=$fm6(*T?viPK zKKZ?lgt+nWY)b4CxLOw6p{AaT_8-o;MOja&3!;Y?(w9@*Bc>nsCR!8E?`|^*B>G7O zqdF0H|Mkz%k3xS3{Sx#akk2DOM!t#s4fzoA3-ERDU+_urGw_{%{NaE882%0Z0{R#F z4Eh208}={u1I{nzd*+kMZ@=d%%&kljKdF)N948&Wz*fbh(>Op2l@b3Y?Ow zIq#~{6RPX)W+&ewzx#=5Oo}^MrgXmf)j2Oxc`EAL;w@3+GC#fj%QN_2zYqO0^rO(< zLB9n32jug}kCAU8e?vZm`~rL({1H{mpP?Ux{to&j=szHzM}CZa6ZsqRA>)^lOli+9IJKzuS$MA3P7tp`ZXV4G0->`qNA8>v#UxjL$bwbBhGdd%e%VO0RGlqvp zj-PduVt8gHC@(kZB%FV?R-S*>ME;XLpjTB;P6p&drpNOKl0NsUV{bR#AP$><4N?9m zqPBL+{4`}Xa==SFT7B#iG0FDrIwih`v@Yf{(_WEG)I`om>MTkns|-H9s+9=;*H1-% z68%2(&(M!Te+T^%^dFGVBR@vIiTn-u5b_J~b?{&CN$@l99q-S=?;M^7Z)7qSGvZh4hx7VpG8#jEuk^hK=`zndPOsg*P|w1pt&&`Ibh)37 z@KF_W%KcpAqu!K2arv0RP-m&??e9#{V4Qz&@Vy%0r@=gW8|C2-;fU>zW`qc z{{^1}KLg(Ze}F%Re}lh({)Ik+e!%^P{fqs8^NaZ=nqC=re_o1Na$05jg$-ORk3fp~ z^+N-6BwQoF{oN<}sNOtqhD-p_S`iX0TTRG4!7@jU?DOQWjlEM-k^{Z@pRv9Zxk{Uq zo4!c5+tR;Mg-)4$YD6aKQSrBk6hd@oTl8w}rs!9r|BHSq`jhDQp?`*c6#6^pm!SWE zd>;8R@=fG#$cK<$fUkr9f=_~SUDlIcP@6a;5?ce(V^w-g^M*kQ6 zRP-m&??e9#{V4Qz&@Vy%0r@=gW8|C2-;fU>zW`qc{{^1}KLg(Ze}F%Re}lh({)Ik+ ze!%^P{fqs8^NaZ|D6XG1?fL4-Z~KcMR(NSmn%;}von~!E2PMUit-pSk+!R|{8RC0` z?#!@T6tsM@$L93Ze0kU%;^XbRzWUS+a>=X4(E8C1+Tp!0{MD|j|K4BVeE|CF=vSlv zi+(EllmGgC|LdQjABFx7`X%T;AfHEmjC>RM8}cFK7vSsQzu=SLXW%>F5AetEZ}1n; zztCsU54hj3f3Y8MelcH<1$RG>PwGL#Hs17C2bZ$`^Y-**M{qK`rrazoURO&wNaOqJ zH$_Bdnx&dtL^>^gtv27@{SUqRpf1ZmX`JjZ6MVVxWEsW#mj6f8dH-|${$D&1CCP|F zDwT)`MM=7zMMF_aG-xQL5)Gw2(6Evfii+$V84bCfR#wOg$rg#sLPX!!51;e?{tMUb zx;?IQ?&mz7PwZb{KLGu8^sCYTML!k&N%Z^BKSMtX{T=j6(0@QakNg<c2EGIS z0Dlbs27dwl3w;LtfcFja7xMx47w0P{l-}f#F^g$)I##87XA&d+=I^&#*CuH7CJUzY zV?B|WbgAQcSt~8s;QM_aS39W?&5In~l>gs;7WRj*--7)M><6H~j(#=zzv!o;KZ$-H z`e*1zp}&KE3HlGn=aC;H-$ed~d_GoNth1kxjRN7&Eju$-uO3obD*RyX$h-Og7}YlgymqCX$nKc!+1)ASb_ix40&f zo5lVc_Oq}*g#8xmUtm7~{dM%K(f>t175z!{`_Ml_KMMUF^h?lxKt7NB82KjhH{?Ue zFTmHqf59ig&%k%UAK;JS-{3Exf1%HyAMn0m{$f7h{^ES=3(7km=@415Rt0sa{N4gLc97y1nP0q+~; zFXjX8?|=BPlBI; z?|?tRAH%=FUqJstpFuz1eZ&04e8Bz1`Tp>T(^ZksWP04DXEbh*Wt#mSbe42+`0W4o zW3j)9{X*=&VLuD|L)dS@{sr~}&|gQt8vS4NQ_-J9zYqO0^rQak@BFV{g8l>YdF02) zH<7;~A3}Zsz7GBiJ_&vXz61UMe+>Tye*ygqeFpu2_YLzG^8xo4=Nl*8IWx?7F%va% z=6HVvKZE^a?8jn%6Z?hOf5Uzj_J^?Fg8d8Z2cW->el_~P=%=DTiGCmYXXrAv0I`}X6B={Nl4)_E7G5j0+1@tfU8T13*H_YGv=EMK)FU~i- z{^SX{z`0Dqlr|${9W_R4>zU-SJ{4wljF%d5^S%@b!=yxnT;TmBdE-K3_os^u$@ zdHuIsw||hPEHDyXnK(q1T3!T5tY{?xi$&v9#ilW*XU1HbSyDyKzB#W^f1}7$X@56O z378;UO-H|U)`_y!L2kcxn#i+t&s9$FjZ~A@LRR_>C(6m^ImhPQi7BA3E?E{$;ru6Q z&xnCX%m1gM2K%<>6ZgE_|oURQ9*;6!NFSZ;o^YjfYXe zw1>~m>Bf_J`{!7iZRdRNb-lO8h8Z7ditL5G-R5c3HB3KSj4O!r^siAZEKQ@P#+fGr zveO98_x{@#r|mbbXT+>B43;YiFj1#(JTQ1E#u_cpskXCQ#Ga-y>w;USGB2)wTxuO8 z%4Rw3&r8u#W6v$?cb2&>#jX*)E0&`$K%5mO6^WR3k=?2m&*XcXsAyxzo*Jv^>`Rm2 zOkZZ5q|LxF0vSP(9?ud3CR=)L-|J#56sM&{z8|wq+vojg>z^Jq#dP&IiV8Ih! z=Kai)2l@-KxbjWVLM>Y5VLxTd?p0-IQk@CR6;6e!RMG{(6m0 z%DmH+3*dc5&b`m`f4ZQTUcC6$_e)D7$$lW|yZ$ogKQ=;gG5YyoRJJpJ;*||c&s^P2 z6rDa3oipFR2d^rlw#}D%`#z_T;JC7e7n>dtySwqR;!i?JRf+k&+kMdl_ZR2;$1WDd__ z59zG=(Hg<&ukBTwJ9eUl`n_8`t#)r2S?IrZINA6M)isa4X%}8gJ2yQ#>LJojF66IY zVrL-EoLr&4SeWyl>gFiPy}n!KGTZWIUfuP(k~C>QTX=g1Hyg_eISp=Gz?Qt+5q0T8 zF70d3<$Yg}NQUB#=80U$Av+Bd>i>C8VPqncjk*kflV0QXr`q2P(7rcWwaYJ6kbbWm z0h8u_Vq-Nf`}}koIahvhht;W0`nm9gPtNx#>^(te-nNo3n(*Gc|6pbsxiP)+*XpNz zWLV~oq4KS8qR8=hQKr#kK)Nh3N6v{{c=|WqPx>BNQ)s@wVdxW04Qh&&?N24+8RgHi z#a~h@rM1QP<8!EL=G!2QMfdVAlRVs56(TsEu{>>8UO85ifu z)wgs0Q`@**|LoNU@_qhn=Z15f|6U#u>zI1s8=1bhCPG9`ksbWUvrsW^5_5C2hEM&K zAzHrutWZ?rY*sr>t#wf|7prHId7Ycn`_wpmT~4lS5*yMJl|OY^GjUkQQ?JQ6Z>s~o z_A4L!rQ0WKjU}GzB89^n1drx$Gh0ocrS4MV^ysrgD(v!_$k@s!6Vc|J|CJhZ89kZG z`A>QGUjMBUmBjq=3f{ffrn3AIHIt4uWzz+V9p8MI{egbHeC66Mu2?#I-NNKI&m8E( zrS7k{n->x9%72Rt+wrrTh%ifC|${n8tUTuPQB|)4rz^T z4qX1NfMWh)KH&c1d}G~i%^1of;)|Ua~Ne!z*{)(6@L=E|)$xn%5@GMxPPVT6B1VL_BJ|F~rLfv4FVo znu%mG#qbzyOh_OzauxTdpN=Cf(Q9{(%*!N#)_3Tvg`s58PmR&h6hAtu>RWnCtAXk+ zt}A$HmP7}1=9Kr?{3X}qGEE%Y2dF?}`Q7|$oO{tZyf={ZKe?Z?25-+yY$4Kh)q`=h zJ$Z;h_Qcx0cL*RTPT+n0CZxNZlv zl(JaoCp18BO>0T{+wzHij-2;T$DE55j94A0!`TB4{Lo+`dwBut@Z`f(ZJAbb_M+&6 zzMes<;4#6!`>gnZi6)R$rP0mO5i)!p{qG6Fk>5uvc3D=*?7N0V|&=m{C2j|F# z(3rE*?!2dZNNVnT<}Lp>@;9uoVqK62wOAw}>uUCh7-ejKyw~YHN%(SN(U+oNvR$|J zc<9ZyL^IJ&Ctz+6S-2u|;K>t3R)6{oo%x{)nYvR6OEz1{u>Gwm(uE&qu|ACKi6;v9ehHYTxF^+WqH&3V%wNFcW2_ieuIr_w-bdv zgMPsKhWU&6fcuN{EmyQ95A8QGxdp{mbb-vSwMF*#u)eVq)1GMXyduO@K=P}OIz<9Unz;U z)2(cGFma@G(Z_0k;Ni(_JQgcd9=90)Y?XyKD z&0(88Qn)K?#aaHJGhC&Yin2qqB(1(Ai?TDOozxtP7$m{xwtNlm2qmO=g;vV$$JFSI z*RIX$kC0p8zhzG+!pgR72gWrRX{Uc_jzq(J}8m9;#ao>SkTvX;(k`ZY|+ zdE56iBJjm}>AdNwWVYf(eQEbxvQg~%k)39PH2RNB#A)4+#K){YMB9#seeuWdj-_uG zEiTR2U&8f|d{t+b`3H^?5n;2Sds#7LW`{`QbN^^E#&q`ZXS$Kq9d}Mfy9JS0kHC== z9dF6K*`~ldQ^%Iyh)re~!`YNkjBtXW{#c&fRpH+hKO( zraEhQCU~k_mn^%i=v=L#r!=!If9Z_Ls-mo}?ZKAqnU%y@?TpEiN&oNP;4h$mq0gWn z@V;UGVm{#h;(V1G{f=$9zLoK@V#GY2j}fyYdbz!uc$ngrC#FH!Gufmb*@EwK1@z5X zo!j>Ot@I{O{+7mP`D9J*QsqfyWi;m4cj?l!3c4zIbJ*|Re6lnCxt+FT4e{Jir50R# zl_nZIjXYWvPgQr?>nqB{k#${>-YQ(*h*->=w*ALS=+XI~_gi zI<-Vam~w5*4`(0a4QXzg#BU4H({RquK-<4mV=8kakj8Zp__wwv}OOD7M zzav;Rg_Ss}INOkpk(r5cM%I^~#5_NgMr1fj(GUmXG%IW7ghdaP_-pKE z>m$qN?Xc)It6j_{y>XkN<1Wo)yxtmA(=NJs;_-$!>tnQsF#n4XzR&NkH=4``LtE#a#At0hDj_cIUhpT4TeW%-n~zo zj``W8ugjv9*M!YnTpm#i7pttlRTVTb%q6E(`wO{v()PRbvJ7hEAH5Y zYcwr0m0zYTdXA1e%ntuu^_sYBD{4M7o6`4Fwti%;g%R)_@CW!~_&4|q=wIkF=m)%S zn7^10xW72xovh2RTq(kw`$q7#0{mY^-Y18H$I)(f4!C&_>$j7nWC*}&J z6Yw+e9qt*_x z#J0oZ_vPkK^v~*?k5ao|61{sdd(v~G$k~j&AG}Pf$hEJF&6loP$b47UnY~|-n|U*M zwrK97&8(bR5YN%zMmp{*@^IJ4Om?p;ueY%JZyI?xOh|c;JN-~S`LU>yKTW*5@SDHj z36k(6?d!1h3(_Fp_HooEgKR2^n0bJ+5hwd-Z#u&%m>_GoUhf* z)lo+NLdzCBedAbndVtEYw~(g2GpzX6>>dg=AigI4|}boI8|qJlP@?-<>?s&Jq; zn+hD5rSXEp1!MW+hv#cOB9ANf&A#{h0y(5Hw0fnzH+?P?VPvo(hdRCTnQhyZO?Ym5 z_Y}B3qwi*QKQJG^MxQ$Rr^MLaAuHTdPxVOpQ1D;yN$@l99qJJS-kW z=iSZBcwMuSBTye*ygq zeFpu2_YLzG^8xo4=j*+6Nl^5eeawrw_x7p9%CTA7r%YJbu3%eI6&D08*v}@bFG{X2 z&7luMj2Gm3e5U7QLR!p^=8&{e-tP&ujih7ScE7}%o4kDOY2&G4+P3*8@lO6t>PPr~q=&YIdBLKXg;J(7CpCAC;Ne8X&3GMUKz zOb?{&CN$@l99qKmb|yr^vQN6=X@+p%AH&7y(*DT@a)Umd$EZ4J&#rwFwZ1wz83o2 zQcNVYn^#YuJC&rLe9t{?X*o5hiaN!+<~^BmzMT2J?*hFjyFGXL{+A@@$q}_mQ_g>3 z8;!OdseMANQ~fOknI~kY0-y9Y0ef=rZQQPX=`ZP&>5JQLTX6P@lzn$+xW|ye*@xpy z-ajVix+aEm<&IH{<8<1LYEDo3fB6vd3-ERDU+_urGw>bo2l!+7H~0(aU+6RF2fS~X zznBlWzc^n#cMpm95eX(OqDD5saRtLS-Tri!-y>3TU1Q!YlT;GXa$?q+z6iSdg6WO+ zyOnfNw$seBhCy`B;TcJXF9nn9%N^PxymF{kLh{GJPhY8_nS4j zkfuBwj`Xj2QQEU`O2+@aJ^gL#TQc+c_NVLkc9Man%tku&hSG8|zZ#j%v`gw#=FT8v zy09$O!1;|A+25^|WSkd4F21@ftC<^0TMG-5GeTYH+%xsO<#TO`^S)m(_ZqCIp3~dP zlyPT@{0;dK@(b{F@L%vr@H6lo@CW!~_&4|q=wIkF=m)%Sn7^10xW72xvvvbZc4(S1 zu9M8APArTk>SufH#u97D^ttMvn*?%c_UpS>MqR7u=lJ$2z4z5rFJ#5W2cs>dzP@U% z^}1K&ba`Gn&*BE!cy+zw4vj=A{Z{(H>!K=R+_meyz`A;JL%;iZKw>=+dD`PJO(mMh z_wMTP;;A4ZZd_j^E~nGWJP#kVia()lK^Y}8f83_aFDdVFxm`YFW9Jp+INKC{>}X{`B0koC-mu5fhdA}6ZsqRA>)^lOli+9I zJKzuS$MA3P7tp`ZXV4FL-!OkMA8>zhzS9#PM}!S=F_!6VGi&u)$>Be<##?*lv$>yL zc^2L9ql@pvP{+UuI&;Yx=g;Ojw4roTrL6WLQc&{aBY(CFscskeaeJO7nb)}frB9q2 zIqZ6}$zL>vc*Wm0YN>odT>q|~;H=Qo2gySlXBt_N&!v~XFBf;<{QqvVLx7bRJ?$#n zb2`hAn*Oo;5_vp<)+b+*4i@sGYNPGt>U|GM-C84y^Sgq`d6Cn_vbp}GrbIb4t0tbv zg(uz(UlK@>A0yvH{)T)A`33ko_%HY*_!;;P_yhbg{2TlQ^e^-o^aI{E%wNn0++UpU zt(%UKDhI?E)dm0jH;!{LyZSV}&TNaJmCXs)?RPg*+a7g435`6e5%^kZnw=xvKUys} zKfs!9jpaMVb;Oi799y}lP5dIsaOi2z<@`5yO@h{i{$K7Sd!EXdw1OL?c-1C7lLg0! zY3XE%YO8x>frMvSS+ECr5aPFYY4kmMcic;F^URlI&ieTmI_^5rM}DIH8}yG+8nSP+ zrpK9V5z7ry{uWNO3Z&gr6+@|h&&o;9j9&hi&m%uZzKQ$|`4I98@OAKC@JaA9@E!06 z_+$7t_zUP?=riaCylJi2gC%Lc6y&BU8b)gdXN5}aj3*&N<=U$EkI=vuGY>}P z+$IaN6NZa-+^6S%%r2f6Ye)OyGAzRtIk|%puX9?h8~s!>Hf7V>C;#;ykk2DOM!t#s z4fzoA3-ERDU+_urGw>bo2l!+7H~0(aU+6RF2fS~XznBlWzc}CTo8GSJ_Y`LaoOxdF zsVb&E^A^&`W7C+5{bAZ>=8wqY`Fz*x54w`K3&Pzk_PS9LrZP5Tax^L7uDqymKbi_v z@Uz2yZ|Ib#AKN>^?o!vv-GK#*&XN(Q6KiKY3!~2+YIVms-*GkC?zuur@;>E#eNJd? zOAP0G@GsBB(l_M%yW2;2l;2R^Z7(eB9gmWqvu^)(X{x1?L7L9rX$7e_ulZ^sn*3kC z1pNo(^T>~pZz6v~K7{-Nd>#B3d=mT&dLn74h=I-h=le(L7rrCL&ebKfUE5ES13&m*9$M{8^G!sz^&31TJ;%4o zI~xVjv!_i1Wa`rBqgxidNxSY+ehr<5^+Fy*QT{4ngrle`uV0rFR}B49>u+PC8cYss zO86VCb(9!C9JdZEdPZYrc5G7onn3<)1)u0vN~Dtyc_uG&t01$sl$UxYv-J7o0PA9n zR|NeX^h?lxKt7NB82KjhH{?UeFTmHqf59ig&%k%UAK;JS-{3Exf1%HyAMn0m{$f7h z{^ER_o5LrD?hp_fqB@XdbJf?@^D?2nR;>fQcZJ!!Z zSMuZ7;`-=|k7=2%Wm3rJXLOr`d+MsH$0Un)-JdMGP&%j@<-R9AjGVsXSp9TC#((`N z^mouNLH_~yJo01Yo56oia7PDf3tKu3u7c!rp9>0;$oI$wqX6=;mDxisre`xUt7Llrn z726LRjZ!K5Lcw9RF|znXlk)KU5t?WHeRcCt7`=WtI^Lr^mU?e|9k5g{j2I=pSNQTE zh-&H>X(k`OLo!6`Hf2@%&>O}d95$NCkksWKlkYltP^(j!dIziSQuNQzk3xS3{Sx#a zkk2DOM!t#s4fzoA3-ERDU+_urGw>bo2l!+7H~0(aU+6RF2fS~XznBlWzc^q1&rVrv zr6MDp9vNTbmqnVoCR#Tsy(nu8cFU$)vsiDWUb}{)@b@ULIY5pFwS~5J( zZD)H<9i8Igp~Tnzm@eM!GD+&+6QUj7?WtWEOzec__I{Igrz<=HX3>kGWZJi>l1iQ4 zRM+SCMdnZdow}y#_@Z4t^oZGsxg}4ZQS|%JKSMtX{T=j6(0@QakNg<R^XYzQT&(M4|PF0Vc z8@|&wN{+7WKhje@!r>j37dfnyyvQ~z9 z)D~R&uRn=?ANptLN1?xiehK;y$mfwCBi}^+hI|P51^7DnFZd+*8TbzP1N<@k8~g?I zFZ3Do1Ku~xU(5&GU!3n~=HE0`Zc#>8WNA%d-Y0s;=ghn+_fZ<=sca})5I|qQ7Sfmo8mFlkNZOr=mZJejoa0=trTygMJD656I_{ zA0yvH{)T)A`33ko_%HY*_!;;P_yhbg{2TlQ^e^-o^aI{E%wNn0++Un;!cEQSqQ!HW z{lLzp+&Ei`J_ zG-ydG;|EXw6tkwaw(*Bn^WP*l-d)_Atz=A(EKp9knR}14RwTrftS~1J4~44b{5VI^ z|3yC){Ymuu&_6>z3jH1QOVEEnK9Bqu`6lu=NHY5gJI#C>-fpMLjlI zOlZi3&}|cv)7Crw*RMwZ7yVT9C(-Xi{|x;o^mouNLH_~yJo01Yo5hgx`RGTyD?M(f{5avb3B9aS z60}F`2HD3GfAi$k8zj?`$My5OJp}!A^sCYTML!k&N%Z^BKSMtX{T=j6(0@QakNg<< z=70Iy|MDT^7vSsQzu=SLXW%>F5AetEZ}1n;ztCsU4|v}&e=#3$e{sG7kK1*70%tR2 zLYs3p#f{K<*5yFNfk)Kn)lS`r2R`KK4#mtMLk}{dH8o~2*A;Tm;&#c*wVvd}RRO(M zdv_Co=WPOu_uQu%X_JCyl2ydnbid=6FvtJ=Z$AM2b@Z#z|3yC){Ymuu&_6>z3jH1Q zOVEEnK9Bqu`6lu=c2EGIS0Dlbs27dwl3w;LtfcFja7xMx47w60O zV!B_=Q&A@0Aie0{?>}Uu<2X50M*%^V|?UPu$j|cBx*!r8E2^bzM z>SO68W_ZVtaXI}kNB6#j*~|a-Td;qD{Q&gW(XU4T7yVT9C(-Xi{|x;o^mouNLH_~y zJo01Yo5gE1cePmHrGb=W&i75T5Gwzz7NBI`4uxOaV%NF`=x=qCbg#ANptLN1?xiehK;y z$mfwCBi}^+hI|P5#eaPLfBYAG68sE&2mAs482%0Z0{R#F4Eh1@8|E+O1MV-*S50r$ z=6C1hm_Lr{gHQj8vftclclrD+rB!k@ubsXY(|Z~#JdG~~6YLja{|)H{mpP?Ux{to&j=szHzM}CZa6ZsqRA>)^lOli+9IJKzuS z$MA3P7tp`ZXV4FL-!OkMA8>zhzBcM#HS`|NXWqMNKmIMj!(3K7Df98v2|bC%0R46JtI_{OKNbB+^!w01Lq7`r9rR1ke?UHu{22Kr z@;Br|$S=Ux!GFOg!Oy^Vz#rg`;osmdpnsvypdawQVg6!1;Qr!#Z4DY%wIH{mpP?Ux z{to&j=szHzM}CZa6ZsqRA>)^lOli+9IJKzuS$MA3P7tp`ZXV4FL-!OkMA8>zh zzE=-w{OlJNV>a?E_Pa4I#A5##`?1*H#C{?6->{#B{UPkPVE+R90qC!zUyc4R`l;wo zqTh%98TwJ^@1S3T{sZ!PSr7@`$w%ywpm8<$cAN}KJoK8J>JkvIh^PkqaU&kK)CLbPJ zJ4wilQmyk6%fxDn=<^%14c@9svvzwcdVV_BlS?%PKlsRNQqr58oY#{__PU+w_EE?n zQscTOed~j%KwH8q_tVv+X4=i)MfnAEXhd8=QNfSy-dg-K_}VCazIjX|;{qQuciF$k zW%ijQc6-E-6=Fs+H`cQZ#ymUly|y3nocCXbh&r?E@p*_ z68-l`@iSlkc|KbzBg=@K)3|0|xS9={*PI``V+p%&?5Fo@(J@kOEI7XRq#|pxW#@uB z2K>w>pBSFAd*xZ2ug>XU?VZ`1nPiP6!I?VWX#M)Uv!|{FlE7K}v{zV+QgwI7wkb^& zM5}s5aZk@5QeE|NiC};_+jM= z#*_q3FN=GdkN34p%_|JDQ(9~-St zB1zqG+c((0C$~4wOc3SIraPlOuUK-v_wM~@ZAbiQC<@Xcys>pRa*Hu zJ4Ne`3$0xLdFXxeL*msxefoBb2c$dl=$V6=E;Pt4@#FDKf2u5H=d1E9iWH^9tV>+s zKsug#yz;1fPF|RbgdODyBT4dW3lBGB(cK0Ml;rPNQh|!Q2Xs$NWoG;Pd?;6uV6$D{ z59J-1!}8K9mOp6;`&eH__++Il8!xtI@q4#EqH&;iYl-(%#_OeiU}u6jJMYErI>kbf z|L!l&H>~zUk=ORE44=fQZ#+Klh}AJ=>#%d>)NHMC@}~DKnC<|S;a+*21Sl=_S#H!V)L%2kUZP8ffWX)sk_s@u}i+HneIl+o{!Yc4To;Ic68kQl^yfMSa>sJ^U`?lYKhTS!CuH}EsITVy zr}^dlYcxGAAy#W?cI+hwb)#^6g=U1}o}Uo|e@(My_2} zN-9+8BSoqQcg)fhWzD;74mw|!XVpV`r|oVOVFDI1r-XS|vX~FJzc}9%lc9Ipr8Y8s z_6f)8lLf4{tLEuMlWFY9h0{)DC^T}ugD2CZ5wMh99qAIe zOqH8mw0}u+1mR-&3bF+f7mN_ex}Vp-yl5t!M^$*#1tVxuRDn%>g9NM8d~3F!0zYdU zaG>zui6P=NqVc>TM1@Van)_9*zk^6?McJ)9yp$O+c++`>pNq{%8Z9>2SWM6ADtFqH zPh;y8e{oNFH9)3?&fZWR%K3Ntc7atpOt~0I#ToNG_@8k8KfO`wWL_>Y-5weK{_bn? zH!@~3hn#Vpzb6(NU$=d0$^V>-HChw@l>MzOg{dr<;wl7G> zhI-#A`l*D=?UKDuZ8F9D#eBg1#rZy3oYtpNznt0jV%kl2)mco)10jF*vj8K&E1AY6 zEXQ84Ro=`O-ba%nDsP|KTTjguzfVb9IE~#Xvj6Pi#Z7edeNfd+hgs}e^KBOHC#JFa zM)N$qJKBkP-KlcNo#M>?$96J@cB?Rp{#5(OC~~pUM+Fts(`rfD<&NU4>$BN(d9fnB znZq=6sH|8oZh)Sz=22V3xpyDJl3OBw4N;BTlJ`H)>86=Gc6@j1`c5^*8i!<`4p9D? zZeOb{zK|sWzb(sD-;xjJ&RqGgtEf%c1Gk)=ImE8%^wxFSU+KAv*)f zyI~{yktUY?4KJ>UB^@m6d6rXta&lBT z@1!s1oPLs&69?VY%f{(GjmD^kgmMyoJuU0HX$MKner)l~IiDJ5cw0X4$)}CNHx2|> zwbQw#p5oE6pU4_TC!W7@-|4_()4xOSCdgsl4}sN|Q`o1*v*Y!5$C9=A-R;XO-qUx^ zo1V?volFmfx=#Q8Fqm8`y-~aOL=ojZR9H7f=K+yC&F86mGJ$k9WhDF!ZZ@t4dxuvD_d)=~a zmhw$B@l$kql3yZKE?E|TB=9wLTM=ePiQ5FK83FQ!&^q2vwDY~ag`DO2mV!Oxu-JC1|_qPSB;TS!3CY`*S665{Ikj} zzLoUyqwb@RT>p}%hYO27O6SruYOcwAK zGI0HDO-{e{PjTuA`E&w(2K|8d4f7ZC0rwZ@t1V2J>fC0tZ^WgI z{#d5rf(c5_diqib=McNkNCD_ z{tcz>0Ve;r-h82483F;l&vHoYnXlg->BiFhDRbBSnZ(UX9#*mXcTkiq-zqR~PFe@) zx^vp5HmRNLTvR2k@bWh`p25@qt3921owVJ0rK^d|6?pxf<(i;sdu?ZqFHNFRXQLm@ zv*{qYjh6eHL-nEqKR`?J>Ao}TIRV~D;*X76ntx4 z7g5vv=|`?lWmPkN)VU8&))x)+)Yln z(5hKM{KHK4KID9#YT>kqN}IVQq$R)o(4gpR8g3nVMN9EBNpan@_nB28mGu()t5=;v z|B3fqaL=E@?2K4)ezN47|NI5?FZ3Do1Ku~xU(5&GU!1RPr@ljru0GRH;dFEKhAcD3 zWVCMS2PHOJ<9L;G>vuX9JX+!(|A^?^iqH(N&L@7mb>`+}3=-XfOE%$$%E;Gpt5dTV zG}AwRN!Lydb1{#_RycfISxnqgbZ4jQa{hZ9>-$N1VhuZOrnZ2;-wHOOpP!Ha=t8DH zVbZbH)0J4$KcgMCKV_J!23@ZMZ_6=hqfejIct;ZbdCnPiE7QsLr5m@)u6#$c3e~KS zt++v|-o$1w&r|7_`}14_O5c#CQ*|4D7;>|lysg`AW;by5_znIn_xVF(4Cq|_g^wwp zvbAKAoe$l7Z^;2cn_G0pthJ|%IRAO$X+M9te87-=-m|4Gq$rfEHtmWPDt$mLq^$mo zNWP|HnfI*qEIj`6Z}1n;ztCsU4|v}&e=#3$e{sGxRwq`i;$6qYomgURP%F>a$jjNr zD)o>b6MO!&rZtg-^HESSi9a6otLwo5JkFYz3_bRh*zxzxNS5Rc#Hd+6ac`U>{+&%Bny03GYO$p|K z%4YIxVVC&ud&{NlrMz`Kw)$~2dD=|{K82Sgafim`A*m=LDM2T=qN3@~Nz%f>(S9^! ziRPEHgE4eQ{Y_sg#mAWM+Ieto)=P3jAv{xBu$P!D?pi7%=|RZJ%bxFU1d}R(_a)vR zkCC#yHC<9Y-n3_EtngD^9QhP>=Pdt(JGFh|u;Yiw4dUH;OtID~j)bQa`zFE6U+BUa5f70?q9&ed!xp zzBcn$EbZWY|6!Y;1I>1jTh?aqmUvEti?f{XC$00V&=RWJM>j8A_$<_D3VUjK=xTqv zZ)9DE%JOY?eH}(U%KmV z?x|6E52CTvwmOsdA(b;}=Nky&doU)i;;gd0 z8vVZt@1K(R<*kUUGM<^IeaOHw#rBXJ90$-aq=DshPq$yQ0-C(%;D zbo;qYYRF3+5{grVZC2P{PvvPbAS z;VwC0#>h0k;Gz2xHucQvjf}?(_Sy8=LT@IukhLA#LR*h~Ai7(X8j6ZI^ILuUuDi3} z(`Aw0%uXUGfIo(RgTH|Og+7CR!25>zi}`^2i}UTjf3HKvOOq)-aq%@d z74R*8^WQ|$Kl6K`*3Lvysv=&ztiX;mEi!J6yckH&*sNcW=<T69p|q;9IfSNz4SLoeLFOT;Z=HQbYk}~eR~!;FgbEJy&8Sazqv1*s)_X8>y~;(72oMS zEzUkj@1!V_+HpKM^Y zPZd=*z4|~7HlJT`u(NyX(HSy&ZRxUwXzSdEa89(O6e7QbE zcD??*y%W^m1kIl)om5o0kQ88)i|Ny~niMp1dT@bILAUpB7BH>ZK%t zth0zs;&AdO&wP3%YoH-rm5;UGdeWL%(?l+cM5>y&Qp+;)cHdvUikVL9J2)n%~|41s=f2+Ee zl0?OKwA>At{DvGjyxH=hR}JO;z_;Gf!ILUVuI;+NB$EWluMS(R_=ap~(%<@}GKzrz zf=_~lH>m@%$ufZ zeYzEs#uhu5HPPX`Y^|b{Pc+vod8$xbB^}%~9IqF2gg71E zGf%hcBe_jb#>t8nbNf=2K5F5)X?f+kZ^Y`6*RukX~HiJ=?gslm2M>^F5AetEZ}1n;ztCsU4|v}&e=#3$e{sIM54)0w8y7Ri)AlQV=xHQM zuMF;c7LVIlhZ^E zmW1?*f7?qx?s^e@_{ImCs~yfAI1o#}Yv!C&s81wqca^D6MhVHhuenriV-)cdoe?_u z!4%fOy!D8K;$L!PLQAGNU@{Zpl4>L{HHF>~tXY>-Q$%k>3iTZ{v?sNE$CCdFKPTA% zCOms}BB`&&^F4F9lWD%ogHhWP;q;Bj!j7rS!f5LCDVG)IME;i#A-@1$2mb}11V01c z0e^r$hJS;J^Rul#T5HH~7CTtk7;+7!sS`VP8yiKG+`8^9v`wFC-_Pssz)US#MmfNM#aH!nZ`G@)7x& zl_w*3*`1iX9Ci^BI!JjvG+Gu-as4lULq3H30(>3(7km=@415Rt0sa{N4gLc97y1nP z0q+~;FXjX8FV1&cFYCBjU^8=X)~S0H1|@{cOz?xVSTf~%eP3c^T^8wf_^17)y_^b& z@0;zw`CjOfl$n!5{E=VZJKygR$J7T8j#aypu%kJ;y&g_v<%=gn zSxlT{=NvJV({gWe)Ut@TvgNa{qSunKVIGLvhgk%jx11{B;iU^ za@D0?j$b3Y9NJhGZ5a<7V*A|FK5vRJKzwEG14Jo01Y zo5Yje;jIDokLz8G|n#A@PhJ=d)d_d@~6kg7M8xf$NBx;qF%5sh(Iw|@TJt)aSQfk?(8NxS}tCSonqL$Je zMoA%y6*k|te%P~r;d)$;_w_nF-}jCFMW3M`c;A@6%m?l-=UZ>4+2?4wr7~BqJh%K} z*Y~D_9A7`zG%tL=*Gt=}DO6TR^dD9g6)x|^Uo4#8xJ8ymmpVqde=E*=EpRL+b*Nexl7W?twP;@l!G{HQTOP(Ljcy!-1hG3 zb06C}{Np9|<-9lDqE{l9oN4m=zVL!@4vA~kz7j9idjA#J(6&I_nO~jdNsmeN93) zzXM+a|3E)ae@wqg|3*JVe?h)Z{!2bden!4S{(wKmzu_;?zvwgc1MeI2m-)c`<$TXM zrlpLjw^hWw@cCi7-^t3%sWA(`Iv^V^n5Wb)Ob~$sw})lu?Gz^sGxRf;?icoUL(6|0 z8Y7*~y>@;cTq=^5)yEzklP^-1{*iE{^1PVvQl*+cZH+wO-}jf{+qR3ud%XvRB?U{P zl&wFeZ(1#idevnbX+(&{*?mpVB`p&pZ`fKKu5gxBw=$m&KD$Sh4A&plq6+Qiqu_Vo zOW+^q=jo5>H|gK#hv+ZJ*U5j$C&|yqcgP>`$M`q=1^O3#hJN6EWBxK9xWAmQYS$dM z+e6Kjr=yA;bvKSJ{B=gs%MLBm!b|B|Lk{OX7lQ{sb2&7*MXpb)tTnv)Lgqc)HYL#F zsuMv;+wPDE>ASJ3HUSkDEJ-t68H!DdHQ4e zP5L+bA^HpQb@E^GN%AxD9r6eKG5!sIf&N9Gp&xkPn7`fg;lKOK`DWYy&}sFlv9f8^ zbKefdQYqgw;DuZGU0JaFXxjEAw?u@##n#FfU0-e+e{p`Pno5rJ?7wVTdy?D}R&IA< zRi+qpz3UH#S^?r%S>n4%*8AnS9dX6B>5;;Br+%B}%V2q|$n&FRqY~vE)A;Ba`CDZE z$s>uqQ{$yql(@52Cq=YhHgC=H3>NTx@MrK*@H_A&@DKF!^vCp@^l$V-^cUpoF3ZcXznCE=~;+Pdk#w#Y~JAmHxV#{rp#oF-dDG)4W#5-(Bu#C13tlG;Og_MLt|1 z;V0qy;LqTr;CJ9l;2-Gc>5u6*>EGyw=r73E$$!Zw$_&59o`WJnMe&Bs$ z{xTo9znt$#&G|XNHh!-&_x#u_s4-7(X`d-c2e0QJDws zZ%LE4j`Vmn^6UyZy3_XMq^Ki8tw8apG}(rc^qlCB2)pOD*u?m;Q zqj%SpPD}3kZeUJJN$7SN{wTp>!WNfqJ{5ivz7PHkJ_>#Zz6Ab(exClAev|%_U!Z@{XXppsH|8(%f&0t(F1!BlSKnSADc`%ewZFUEw@@|n zud*$cMRKk8yFav5$#UgSlPw>2B+I*oK@s6SVq~`d^fSXZM+%pUwb?6b!lkvwDJ}2y z8|01RgvTdlq{#WTKUf#N+$B0XdX8UMv`TEYYgC_o=BR9#w($7gfN%-_3!e%<3Eu~Q z1|J2#178CFKtE4^OutG0Mn6P4I@pPs5eOYwQ~D67opmiQ*?$iRM2PRfM)^M<62N|TOe_IZ9Gkz#^Po$lb$-SSVZ z$ZI;)u0lyt^E*DaIwfGO+!HVVL43nybpa8eP_jyf+s5vSfph>4ED%Tky4j zuZI7HPlca^?}I;skAmNUFM)rcpQk^j-=u$|AELh?Unl=1pCmsc-ywg%ALHNf7wBK~ z8Tx_ujrq%b;Qn&H6$=tOe^BqCMCS#>*d1(_`iCBWY~1uz7$1I}>aj0d4w~+Bx##XO z`DeWI&1Vzi#V=(in{{ISWtHpi#X&bt%Ubo7)2pU<%Ki)b-dO3GF6Y_@Z`C(GD+{XD zjX!az%a{Jguftcv|H7xjPr~=XpTS4L@4%P9KhV$9AJcErztIoTUy!ep|B_FVpONp7 zKj4q?Z};03{@-Kt zuDBt+-dfEum=qv4*34QutY5V7eKk35VnCLN zU;N1QLG>oVegOPBd^P+pd@B4Td>{N7d=&f+d*g5YQ4L@ zC&B&&`vLIl@YV3Y@Tu^V@O|)S@KNwP@FnmM^z-z`^qcf=^h5L)NQK7@RUK%@^ z+oXAF%75Tn4*3KA82^U9K>wo8 z&=0(C%wOgM_m}gvZ2rT(_5EN)-G5un&bx-n@2mYbJ1=gK2eoRx3V(i2jGMhJrM{y> ztoO!2RWXRc?NB zzl^a}{z+MNK}WeLFQ{(i^*)v(;zD#XPA%Cd$_9RtvFqk5;iNs_P(eeZ%r$7MGimCS z>}Rn*#C{9=7wiYXuftcv|H7xjPr~=XpTS4L@4%P9KhV$9AJcErztIoTUy!ep|B_FV zpONp7Kj4q?Z}5u6*>EGyw=r73E$$!Zw$_&59o`WJnMe&Bs${xTo9znt%z zgB4b1hM6c@7n0Wb?Rq7>YF71lAJHhRGfp^YwcHV(9z8ZM?~7}a{X+KN*w12ri2WA! zFW3)&Ux%-T|AkM5pM>v&KZB2g-+?cIf1sbIKc?TLf1@9wzaU>H|0SO!KO^5Ef50E( z-|!dcU-TLJf%lF1%Y5Mea=s&)e$m@DT3a#CY9G<4)1$DDt7~HVC++ffXl+4$YEAe4 zCi{i#zjg0t{kK2Fehd2-><7TF!&k%q!l%Md!uP?S!AHUGz?Z;3(9hE!({Iwh(GSsI zkgt>fl24MKk?)W{;E(Zd_zUze`V9TR`^NlbK5&0I-y~h>TJqXLnbElQ!P+C5g~PW` zwH*>~rm!E&{wDi{?7y*}#r_ccE$m;g9{|4&Uk(2Yp9((--v@sN9|gYyUjqL?KTm&5 zze)c_KSX~)zE1v2K1qH?zC-?iKgPe|FVMf}GxP)R8}pa>!2RWXO~0R7TlbxTGATxH z`{|83h3p@*AIttG`-SYkv7g2M5c@6cU$7qlzYbpw{|lcAKMCIle+C}~zXM+a|3E)a ze@wqg|3*JVe?h)Z{!2bden!4S{(wKmzu_;?zvwgc1MeI2m-)c`<$QIfm7g+8n5g^< D3P99R literal 0 HcmV?d00001 diff --git a/examples/hessian/data/H8C4N2O/type.raw b/examples/hessian/data/H8C4N2O/type.raw new file mode 100644 index 0000000000..a6510b1c81 --- /dev/null +++ b/examples/hessian/data/H8C4N2O/type.raw @@ -0,0 +1,15 @@ +0 +0 +0 +2 +2 +0 +3 +1 +1 +1 +1 +1 +1 +1 +1 diff --git a/examples/hessian/data/H8C4N2O/type_map.raw b/examples/hessian/data/H8C4N2O/type_map.raw new file mode 100644 index 0000000000..5d0a0b4b31 --- /dev/null +++ b/examples/hessian/data/H8C4N2O/type_map.raw @@ -0,0 +1,4 @@ +C +H +N +O diff --git a/examples/hessian/multi_task/input.json b/examples/hessian/multi_task/input.json new file mode 100644 index 0000000000..b9a347581b --- /dev/null +++ b/examples/hessian/multi_task/input.json @@ -0,0 +1,129 @@ +{ + "_comment": "that's all", + "model": { + "shared_dict": { + "type_map_all": [ + "C", + "H", + "N", + "O" + ], + "dpa1_descriptor": { + "type": "dpa1", + "sel": 120, + "rcut_smth": 0.5, + "rcut": 6.0, + "neuron": [ + 25, + 50, + 100 + ], + "tebd_dim": 256, + "axis_neuron": 16, + "type_one_side": true, + "attn": 128, + "attn_layer": 0, + "attn_dotr": true, + "attn_mask": false, + "activation_function": "tanh", + "scaling_factor": 1.0, + "normalize": true, + "temperature": 1.0 + }, + "_comment": "that's all" + }, + "model_dict": { + "H10C5N2O": { + "type_map": "type_map_all", + "descriptor": "dpa1_descriptor", + "fitting_net": { + "neuron": [ + 240, + 240, + 240 + ], + "resnet_dt": true, + "seed": 1, + "_comment": " that's all" + } + }, + "H8C4N2O": { + "type_map": "type_map_all", + "descriptor": "dpa1_descriptor", + "fitting_net": { + "neuron": [ + 240, + 240, + 240 + ], + "resnet_dt": true, + "seed": 1, + "_comment": " that's all" + } + } + } + }, + "learning_rate": { + "type": "exp", + "decay_steps": 20000, + "start_lr": 0.0002, + "stop_lr": 3.51e-08, + "_comment": "that's all" + }, + "loss_dict": { + "H10C5N2O": { + "type": "ener", + "start_pref_e": 0.02, + "limit_pref_e": 1, + "start_pref_f": 1000, + "limit_pref_f": 1, + "start_pref_v": 0, + "limit_pref_v": 0 + }, + "H8C4N2O": { + "type": "ener", + "start_pref_e": 0.02, + "limit_pref_e": 1, + "start_pref_f": 1000, + "limit_pref_f": 1, + "start_pref_v": 0, + "limit_pref_v": 0, + "start_pref_h": 10, + "limit_pref_h": 1 + } + }, + "training": { + "model_prob": { + "H10C5N2O": 2.0, + "H8C4N2O": 3.0 + }, + "data_dict": { + "H10C5N2O": { + "training_data": { + "systems": [ + "../data/H10C5N2O/" + ], + "batch_size": 1, + "_comment": "that's all" + } + }, + "H8C4N2O": { + "training_data": { + "systems": [ + "../data/H8C4N2O/" + ], + "batch_size": 1, + "_comment": "that's all" + } + } + }, + "numb_steps": 1, + "warmup_steps": 0, + "gradient_max_norm": 5.0, + "seed": 10, + "disp_file": "lcurve.out", + "disp_freq": 100, + "save_freq": 2000, + "_comment": "that's all" + } +} diff --git a/examples/hessian/single_task/input.json b/examples/hessian/single_task/input.json new file mode 100644 index 0000000000..3e61deac52 --- /dev/null +++ b/examples/hessian/single_task/input.json @@ -0,0 +1,116 @@ +{ + "_comment": "that's all", + "model": { + "type_map": [ + "C", + "H", + "N", + "O" + ], + "descriptor": { + "type": "dpa2", + "repinit": { + "tebd_dim": 8, + "rcut": 6.0, + "rcut_smth": 0.5, + "nsel": 120, + "neuron": [ + 25, + 50, + 100 + ], + "axis_neuron": 12, + "activation_function": "tanh", + "three_body_sel": 48, + "three_body_rcut": 4.0, + "three_body_rcut_smth": 3.5, + "use_three_body": true + }, + "repformer": { + "rcut": 4.0, + "rcut_smth": 3.5, + "nsel": 48, + "nlayers": 3, + "g1_dim": 128, + "g2_dim": 32, + "attn2_hidden": 32, + "attn2_nhead": 4, + "attn1_hidden": 128, + "attn1_nhead": 4, + "axis_neuron": 4, + "update_h2": false, + "update_g1_has_conv": true, + "update_g1_has_grrg": true, + "update_g1_has_drrd": true, + "update_g1_has_attn": false, + "update_g2_has_g1g1": false, + "update_g2_has_attn": false, + "update_style": "res_residual", + "update_residual": 0.01, + "update_residual_init": "norm", + "attn2_has_gate": true, + "use_sqrt_nnei": true, + "g1_out_conv": true, + "g1_out_mlp": true + }, + "precision": "float64", + "add_tebd_to_repinit_out": false + }, + "fitting_net": { + "neuron": [ + 240, + 240, + 240 + ], + "resnet_dt": true, + "precision": "float64", + "seed": 1, + "_comment": " that's all" + }, + "_comment": " that's all" + }, + "learning_rate": { + "type": "exp", + "decay_steps": 5000, + "start_lr": 0.001, + "stop_lr": 3.51e-08, + "_comment": "that's all" + }, + "loss": { + "type": "ener", + "start_pref_e": 0.02, + "limit_pref_e": 1, + "start_pref_f": 1000, + "limit_pref_f": 1, + "start_pref_v": 0, + "limit_pref_v": 0, + "start_pref_h": 10, + "limit_pref_h": 1, + "_comment": " that's all" + }, + "training": { + "stat_file": "./hess.hdf5", + "training_data": { + "systems": [ + "../data/H8C4N2O" + ], + "batch_size": 1, + "_comment": "that's all" + }, + "validation_data": { + "systems": [ + "../data/H10C5N2O" + ], + "batch_size": 1, + "_comment": "that's all" + }, + "numb_steps": 1000000, + "warmup_steps": 0, + "gradient_max_norm": 5.0, + "seed": 10, + "disp_file": "lcurve.out", + "disp_freq": 100, + "save_freq": 2000, + "_comment": "that's all" + } +} diff --git a/source/tests/common/test_examples.py b/source/tests/common/test_examples.py index 1ddbb50db9..92ecf3a09f 100644 --- a/source/tests/common/test_examples.py +++ b/source/tests/common/test_examples.py @@ -60,11 +60,13 @@ p_examples / "water" / "dpa2" / "input_torch_compressible.json", p_examples / "property" / "train" / "input_torch.json", p_examples / "water" / "se_e3_tebd" / "input_torch.json", + p_examples / "hessian" / "single_task" / "input.json", ) input_files_multi = ( p_examples / "water_multi_task" / "pytorch_example" / "input_torch.json", p_examples / "water_multi_task" / "pytorch_example" / "input_torch_sharefit.json", + p_examples / "hessian" / "multi_task" / "input.json", ) diff --git a/source/tests/infer/test_models.py b/source/tests/infer/test_models.py index a6cde3206c..a79ce0ab21 100644 --- a/source/tests/infer/test_models.py +++ b/source/tests/infer/test_models.py @@ -70,7 +70,7 @@ def test_1frame(self) -> None: atomic=False, fparam=result.fparam, aparam=result.aparam, - ) + )[:3] # check shape of the returns nframes = 1 natoms = len(result.atype) @@ -108,7 +108,7 @@ def test_1frame_atm(self) -> None: atomic=True, fparam=result.fparam, aparam=result.aparam, - ) + )[:5] # check shape of the returns nframes = 1 natoms = len(result.atype) @@ -174,7 +174,7 @@ def test_2frame_atm(self) -> None: atomic=True, fparam=result.fparam, aparam=result.aparam, - ) + )[:5] # check shape of the returns nframes = 2 natoms = len(result.atype) @@ -232,7 +232,7 @@ def test_zero_input(self) -> None: aparam=np.zeros([0, self.case.dim_aparam], dtype=np.float64) if self.case.dim_aparam else None, - ) + )[:3] # check shape of the returns natoms = 0 self.assertEqual(ee.shape, (nframes, 1)) diff --git a/source/tests/pt/hessian/data/H8C4N2O/set.000/box.npy b/source/tests/pt/hessian/data/H8C4N2O/set.000/box.npy new file mode 100644 index 0000000000000000000000000000000000000000..9fbff5b5c4837fd77cd99c79ee0ee0203b5e3809 GIT binary patch literal 920 zcmbR27wQ`j$;eQ~P_3SlTAW;@Zl$1ZlV+i=qoAIaUsO_*m=~X4l#&V(cT3DEP6dh= zXCxM+0{I$-hB^wCnmP)#3giMV1~7nTc2x`v#L5$^ Kj*vaW&l~{UX3Q9j?bG*rU7vs8bACJbFZXqw>)hAtj~bcqzCN_O*e^;I=c zRNZ}Mm#U_s>Y4Mt{=UxMr_cM6-Tt3$?Cj;|_E-1waQ1QgyQ^twX)5aIs%t8SDE=R3 z89jmJF>BKe$4)#Nh)iYU>MOT5>@50#@^7{zGT!oFKyx8`eseEwQM$?ZN^ckh8?0`; zBnuGjf?-D6UUtBMdg-OK%8EsBgs0#w~3sSQ73Szvu-;`{da4wN3b>h|FNIFuE9A+L4eVfAX=hfnt~ z@Cv8#uqZzT4pcOFic6BHi6!^m8uWERtqF6rQa&5@xmVJ*d?-Q2f?0g9_AD5Ro-Xb# z=O>g`O_{13OWTn(su=Ga5EDP9F^pUPLk1on*W1jgYdHGq^*y4E0_OLMN9* zyvuyeij!fZT!2JfSVIQxD0?IwpgMwz{{?L#501gH+o49y4)v&H8ZrBRLXe=`HX)|^ z8*!-8yJ5ZaXAnCt(s27O6Lj8DqOxxD6aQhs_wcS%D9Dh_iPoA1G1QT)s^x;+y|Lt^ z&`HRaNYx#4DFx5x4UI-;cZ@p=7iA?i*4tnUZ`?;tLw|&ousO=yTK2mQ?5=k7) zG1zs2>o^YG(N^xxRtz+pRb*Yw>;vteCzE%s$^`-8UCjbp*l_yJmOcrgZcLG#isJ>w z;?^_kw)O<^Q}=`lgunN0g!uMim)zGipmHog^~OsoR_||K>{;7_Zx`2Rx^DOb3=2UP zBWo7Ed?xwIa6jY2{rM8Mxh$mYw{wxS6`)F3-L02X?7< zQW=Wsgo+TcJ1Ll@GQ5CEERW%k-e&M&+-)RBO+$4AnVGm|9LKr}rd#YK)p zRTB4j-y=4SE_d5+e#`2?@=WEj~`R*fuqft2YM8$;IW9Wv#H7_IFRXiZSVbhm^PnF+O5e#qp_GT31%r+ zS(mPKyJHFlRqj{^tX_a)db%tBQT~Kt(n@ldY4fne+Eu?MeGXc425!WE`G}520xDPH zIT#~N-pJ?KfO_wG=`{CvIQh?#eQtA1P+v?Iz3{#Z+ElXUZgHN$yNE}vX6u$v-xL@_ zq+}tcEH&KJav}$ei7jINMq?;KRuTCxf``f8@hy7-b5Q7r)lD*AE5*+4dEm=SOg4_j&ftlon)HwY^sLT1pkE*mFFc zIRr!>Y2HGu9Cc6eCT)2`=x-lZ%D-tAI>YY`J?|=ng6`hvKHnMe>tJ1y{KijERaR;Y zamTUnqiKe~AP=&c98b6X0z?JvT3+=@I_NJL_Gx-8B`(>`&pfN1hLe)RB5ir!Fp;uX zedy9Wyepm=b~wYtkyUE+1FL(WVOPmzmxg+bc5;--A1VX(VVAoU+8~6C=luwg|A|4I z82^lg9`sG)IVa#Ej@sRQU^AGCgC#rua6k6LlAf^V$AhNP*Z$lFc4-&vuq#bEr9ObA zFA9ebtY*W4?C~DGbqSg?D%#?`jC3Y`r_w0D_KBHy_lx!?U9*r8Sajb*w}^W5Bz;t946j7t@$SdbGknC_Ti=TR74C*|ovH$=-&=qQ z$sNnCGcZ_vNS7o#1ODfJT-2(_N68}zZ^lm3(c$_ZsVA0M5c5P-b5ZF7Xy54EC7xdm zq5bi87B(Z;luEKRf-bC9SEp%Lk*EQZ8G?H5UFZ~Bbh1~w7?tdQ%!r4+L+AK22EVTM zB4nuZ%EYk^le+ffLd1DC{M^ge2rSK=k%!X8V66HC zU;ckfiK_jnzgkWY!14RHMx=;g>~LSzcz6#FvR=IjkIKF$Z5qVrXn976#1)StPW*#gUVL0*Gc3QPrH)%)kE`3X#(c?_}}tc5LPQ^IZ!N zb6m&j$g+NzwoZwhw(P}(zQK~boC*wnT)%i!mJ4kPXXZ*87ohZIJAc6P8F(4H!A~pszrIw|N_7%C^t&1E1&MT}s4+1lv##ZZ`5YXwH*DRi zL&w7_GbHEE({b|kQx&0W?eP8E>4z-F91i*aTVtTXgq-FoRZSKXY?>>ot6>TsVYy_K zHyf`Rig_q|)xrqv@XGIzwcxzbudey`EV!?(TDfe+6!NKU_j=@%0^WhjStvhk5f8`^j zci#EiGpE3T{B-^nXAlZirP!2Q4oF?!P_Fh^fLP(O>ZX`gD+a|0b_a#E!;K?mAAaO@ z0lzx^oCx(J?z`Ojb>qYk${09gRk>E9d%{Rt_RSV-S`i*n{5QY(X+m#Lb`OE>=PYAA zn|>sJDxcB|VWZ;|Ti-8+gN&%`<@Y@L;NWlm(f-TbfBmnhJo@`jZOL+OcgbuxxX%C5 zONm8T$Zj=DGb@D1f1K*SbPwX{VVi%C(dJ;~jKI1U(ij$qy+7%_y$1DlGc3mk;Ev+w=gCP!7|LwueU&kZ^V7a}EgNPqUw9L(yKV{` zS$ZU^byK)XHJ_{yC`1iWe3B$2S_4zGh+&#L11UbqZfmAU)H_eaPM2Fx!m=Z@A5B43 zpbXZr5@$eF=D?e( zt$b98RP;AEGKU1^Oq#h?J(&AV`&b**!^L&l%DQ8X5GXu$(&{hw{kxMTZT0DBD9hR0 z;8TtDpUr<8$khWyIWI##au6yr1v~mK&%x2`FnU?-7qm65d6VfuqBfad6+a`@2eTCv zLEOxxB_iGVXoJcJ0yfg#rWxv@c zCUnE4Xw#;r=ay0*d=K1u`_H|N?v@jTOsJ`K5#0x6~7oYp^|I93Hg zKhpW_4Z1<cJGz9)W6HXP>4PpFqVpafPgks44uOQll1E(tw>mLe3ID zIk0Cjy7|LKLF(2H(}H(bCU6@ymJ)0=gk^gtKi(&^QOnG-evr<>t6M0Kebnag?2`ZD zmnSSGu15|;jN7p>%vhyWzIFi5{ggYR+ag4e2DlZ+g-5VlKJbulM;DECYH%& z9!ia$Le+i^O_!!3aPiz?b+4%n`;@a~S_1irk@O;QiT|JDp0VX>OprY5K=->SNc>az iSTnn$46CMHhOfo+pyccLq&=(oiScLmmT2lT@P7dLj$kMN literal 0 HcmV?d00001 diff --git a/source/tests/pt/hessian/data/H8C4N2O/set.000/energy.npy b/source/tests/pt/hessian/data/H8C4N2O/set.000/energy.npy new file mode 100644 index 0000000000000000000000000000000000000000..4761a45a4183c19d1a7d6c87d6e2777bdd3aad9a GIT binary patch literal 216 zcmbR27wQ`j$;eQ~P_3SlTAW;@Zl$1ZlV+i=qoAIaUsO_*m=~X4l#&V(cT3DEP6dh= zXCxM+0{I$-hB}%$3bhL411|3H4<8J54js7KcepVfNMCsxz}XL^*Ijh+DF@QqIkN7` z=pH(-=H{J+TY&uQ@wxV$T89p7KFabq0LWh`x?3e)`_KWIkT;LDf%JFFB&$0>c>oA- BN`e3Y literal 0 HcmV?d00001 diff --git a/source/tests/pt/hessian/data/H8C4N2O/set.000/force.npy b/source/tests/pt/hessian/data/H8C4N2O/set.000/force.npy new file mode 100644 index 0000000000000000000000000000000000000000..c69ac3552d881cb46f45e099b3cae1cd2727553e GIT binary patch literal 4088 zcmbW1=_3@1+lFn)sg9IWoFY^QCrd&p%AYnBL&WymNq$TF5^ zkg63a(+vv;XQ zFxJ>qI6y&g_WUrk^cXv$1jDXID}q3!o#|g`b@+6;S5VEB0oVGCEctHG!At7BZZ?Y! z4qpU$oxi+68n;7saa#>&Z3!b+hQ7sn@5gFBZCwDV5SIDIeHP9|P#%jFvVbNvITNKe zgZaDsKc6w5hl&2dnr$t^xVcL=sECV&#>6Ba56M=vwQ^IB=4Zh@8_C02q&jqw^dVOt z8pd_K7}<;10k&!z(j7KTpgj6iL>()FYirS$-1P#{M7*{1w|OeI{7^L}{Fx8-*V^Q! zs*p$mmX-gGISAytT94OXfroT;+?M z$p}uuBZrQ=uB%0&))8|TR(uA;D-%LT|EV*76Z=>4L^_Ig-bp=EQ3|=|cb|77lCf|-&y8XK1h!`x%&)NOK+XDG(0j8isBsjU zGf-w>l(%$hVMiDSr`(ZV`VoVgx6Qb{Rr=vzfT|1qP6b%YZxeYskd3VLD+6=JlR)mj zDX?M5P9Grmhh8F#0X3tbD|`6|QaRoaE2oX1XsIXMdi)Dwj>mKunT+Acoy^Ta+KZqN zle{_2EEV>q4UB2j3;|s(=G5=iZ{T3`!uXoSGS=%jdR%W>0N=8jvwibD;I%MQ|Dtam zQ`d#VoX#DG0s+kH`kC>B4SQ z@Guru_NAis=(x}9ax^OH@w1)PeuC8ozg|2J>Vk{L9s~BYDrkE6aQAgv1UCg^@$&n1 z@H*=Pf8lH$N>84gG_iXP$u1+k%Yk$-P_UMYfH)X;kUSx%Tn!!^Gf&r+TJX;tMsIUu z3H~$V<1M>HN8u}vkEO5QgW>5TmmAqA$ed8y^3A00rJV+hv=W*?TXDZfpv9D`kh+e-<4gJAeTdQxuk2V5ZSyt?UH0hl!A z6NFPnpw&u9P`{@eyKOpMnT~Wk&-Ufe>efc6x;mL#RQMT7uhjY43uxk(Udj;_N*`2> za`Ab%b>Y4(%@PgTEC}eOnTDj-;tA7fA(N7K&@VM-t4XfHCp;fy}n$OX{9@SDt*(`l((TUfWbo)RaIZ<%|?9N*XnX_q73h~HBrRs7J%4dli5-}13?dMk16@I;)s#8ow~?Ryz6O7`Jw+C zn=1Br9L#tHLKc&!w&&D=ccR|qnVKdz(c@7dZ^J;dGEw%ZaSsT!5z}9A?gja0X%%WA zck#2@`;Xp1186}F92_fW06n|#`E}Pk$TeLRSd@E%+q1MU=lxrWv9IobACyT&-YW&o zGwf@yRZ8q*#^fk`4)C0kruU$z?qP*P^%KB-Qo}1~vJ3WB3#)Uojlilp`-D2DgOlxt;16jx%++SatUs~?el zYUf4itVYNot}(m=dO>qw=M!h8cKmj$ASceV@xLEL?LgifNbk>Z;j%4-tYaOIcU|a% zxm?N5>SyV&Em-UAl28v?MXeN0I5IIUy4lWcwg_h!3*N-DU0~d`$)D)di3a(t5djK~ zQ2a<^y6QtQI2#=v%kgZ17aut7^5jckYWn+t)nWw(8t|pr1&e!jo^zV`DE zk{e+_!CX98G}bisZGM1g;4gIAj4^248OD2{o^jTq6#-p z?ftAayij}SZeD&bDEW==6g*FcTWTE_{VojSm@8YC=dVw=_`yUnL7#z){D(aXflN5_ zp|?F%tOzxRieuVU6H!v~)5Wd2B~b9Ie7mq?21p#c`f*2gD5yl;W%j54*YDx-V>&)B z(f7#G=Eu@`m|F2p%p@Wb2e>}&n967XxAx^7I`P#od^dd}&D8@J*(L_V_$n~$P=r9j z!3vapOO0opqrw0Tfx-uzq6pm&AfnSKxKFznMW@7foS@WToX+uk zu7ecpGTh~muSy5e4`0sm9Hl}hRq#TqS2-kMI&JWE5-3e6t!h|#;)6W}-*}fx;bD{e zQ>VYU~uBWGNk0+9)Y|p41^BC!FsyvjJzL za`eM%`cbi=uhp0o19l4E*d)$ZM zjb%yB3;S{;O{QmUn{7ZPHj}1lX)3n9?Y!2V(}GgxEq4B;Ru8A+xsQD6_QV;kU0mr0 z!*H+Z^;;b)2^dzaazMwA2&GoRS;03V;F2tJfT|G*iqpqb_^k@yp;bjh*M(F#qLpp6 zbM`r=^+*?-G718g{6>AnmTXw!xv4rk=naBTvItMLV{l}T=#rU8G*n7KMAow;6#j;P zfAx=nw#cFvmYF`-{Y8TNr=1bd7CtOpsZT^HwLcFpoeu|#d8@mUAu%AE_SZW>hqpMY z+_agfQvw`@MQ+y?S|D?^Qm-~G8S}Xw03@{Fh>zh~O;8JPd>TJ?;?@cdbZGnh?f)HK zZGB+gaFT@fIn7>QGS3(E+!MG6Vg>K7tdQsklsT z7oN*CJD|_m30^wlTK>N#(Atr#*Lb5IW@Hkq&)PB|Vo5jins6KZBvn+$jOD^tiEG0R zQnxVtl|=uWgg8tHdjo-Io?)>2pJE|DgYjqD#&fUdNg(_+%2u=SG2Y|u=AR>{VSc;4 z*t<@16dIndV{wM!kv&u7aVHPl`h4DYJv|%@cu$HYcX)xHPgxc7KmbUtJ5JA41!DH= zUw+RbeL>tjbUtje4@f90f0NAcgs;n#KRotF0N2Dp&E@0CC?5KS*ixH_snH9hj367B zTu2fv7O2JF_0qdELg)~k7V?k0sTNQT$4ta&)ev(hrOKVy30&JwY84-@hQ3gh@j<@( zSkwH%#wb4-hV%vs7o#a?x}&gW!LSxZh6Z(Q4pu|)w-o`t(@*fiMRhxVVg>##jdoM| z@gDmvA`f5xMT0!O+;^0tvDlp^@#Eiww|KBh>zl*j73&eBHZ2> zN!8H2i^XO7na2S$SUZf8c^d>YIejBn8_On`qb z?cP0H7!OzF{$)<`nPV|dyx>rYMhV`~McK1YfUrZcx-{7VYQKL|KiwaN`vz4jc2FK+ zwt&(yCy5tM(-jzV-LW{nEr6X7bOv5NdVU#$1Mum5q_9l(a|{{aa5uP}1$5$2lYwJ2 z#MDP}l+;y2efy0=)*)p;_@(Oq2caB=LPGMCL!z;y$WLjW84os!3HDku&yX0gY%k#3 zfP49sV-~XWAiBhy`fgJ#?tpqJ5wSWDGNlQ&up>ks^}YV*tP5T(ccG2%eg%5YUaFln zwaApsvSue#pjvjXWPcJ9{fzwfH!&mOrt{>@JsEwl#4CJrSdE6i`=xZ;n_l4wz2SMf zT@c#Xy*sa4MFPL1zKm0sNqEmQo#%`-4ao-HZ|+D`!mF>x#SKkfqf_g^7M|2nY!P#_ ztf{Pk^eJ7fs7x|0O58TB>a7Lb|1-t7D+&C=726k2v07g)6qPk&&c4&;4F2~)c%5t-&xoi zSikfTwVA|<7+(DYU-JzMk8rK0%_4b3EgneGj;D+Ftl^)Ad(qu7a!ku<{egW6+phhA zoV0ujZjA%4LDiF0f@8p!Q^3h2yqR_kqvN>r1gY!>Y0-+muffl+pJ%<$2$UXQ9BCnp z6#qiKCyV(9%j$^=OG85ILM=JVVl*LP9ZUqDk@oek#pq=l*pi-K=Av|C9HOQR#Hp#o z75>JrBpmHxwhDLabE>Wrv6MMRdq^ZuE{+NY!BL1<)T!LtP-c;n3h>TD+L9g9;_VzPXc`Pk8`T$F--gZbCD;&K#1j!ZFywY61NXkrzN_y%Dk-2oId(F%Q zfnnCbEqQO?;H7PY?>Sqb-Sei?GOk+EnRL@bfH9V2$!_QPYW|F~TWv*;E z7+OR~^T6@T(&Z%R^v7QkKT`kOUz~4Wv_B{N9}Q~FaFm^z`3kx?d8kN0;XzBzbp$`)&Jw3uuP8BUcIC?b+#l5?AO#6jdSgTSq0VpKwyg8r=R{wKVjV6w7$+ThqAC}f-~X>V&H%s-^vI0{?Hq++CwttG&{ z=e}|^ieAvRNAiFmk$zaz6iFnH)eg}@1*A!9vGSu$4ym6KV2pTDLJ%Kte{sIL zt_;G@UaM2)!HoiCSz>f{M%m2t$Wpq8OEj!7b3M(WbTQt_g@b+*rdVk8hLz%Ye{Zd= z#xnYnvZ~LCeO@G{LF}bKWWT!n2vjv&?LNC5R z!I3NX&+Qw9v574`4afQk;xFO@?k~>QNcd!lY0qJ5KX23|+pcaxm_>4~WpL0g0nH)W z>o(D?LPI&C#yr%4&m}IqVPY zmXzAbJX4^n1@;}Q1wJZtCP>C>u>*Fj%VT@K<~y7hi~W0rd?tyTJomqNKMPi8{Z}jc z{e@5==4<<+X2@ZFXW<7y{p9pV9_6GpBjm;9`mbRk1OMGO#9zb*++Uop@>23}ob?FR z_hA202bE=1OSSKc?{~N~w zJ!N7l31mB6tm1CB5b-@KTOYfhAuhjWmmKrWCdO9&r_ZmKCci6>ERoyANY}h~(@qo| z2ceq73ldUu@L+VwQB{qLTcBFkUwF8~!dvMQ;sR7`QA(o3HBe zndRL<-!H+l`X= zuq~H}9e=_4EUx1ar}@d4&KXn7C68PdOQUW&G3q@r@tO8@Ww7x@F;H^g7W z2i#wr?}$=`RGp~?`NQi)a_noPu1h(PD)|}fjHQ<#v9Md{O}-0kO_i>6GJZsw`}bN1Nt`FG@jvR! z6=T7A)$^$xa&92=_)bX2pEl9Ugsudvlk~YgiE`cH#1t2H5}i=7F^}1CUK#C_MBU zgD}p@;;mH!Bv0O@D1&)|@LliSBK~QbBz6i0p58bHuXPh5viue4nX`Rg#~D}CB3X@0 zVaufG(-*tyi}-;1i}UTb-`T31beM9Ub>g4B%}tdn&pv9`iy~19dk^U!c}1!!C&cf! zl!Cj0g+hfx3~^QIXel+hN-BOe|J^frAHLKcAudY;$nM6({LG?dMEGMB*Vfr^a(TUi zs*6$)+;yDsV>?E}*wwdRecpTpcq*&3M?W*;BmaJN#9DmqDQPV_4lZAc?Z zqveyCS)N2w{bYlQXdyXE=|->GUkQ8AZ(#Y?6m#iWk{YqhyLzatLqI->Z?^1RuT0uZ0#!-wGDe{Qjo+3sg z|3!X=`~mM9;xFO@?k~=F+Wj|=ii8Fw_wH!b7K=R6y31d2?qUnPe0MEr|7qxR{VX0C7_k-IrC?dx02`VphXY-2{J3zcqK{OvL83DjN+q-V(ii2yBq2L>dm3W zCzjKn4t!q_-Ni)(8y(ukeNT|q%8LK9s%I@VaEdwlMz0t>@^ETCYH=rx`U3ec@-yTQ zc;EiR-~ZtQ?k~>wd^zWq$TT&oT3&ADE=~n{k3xD$>jf^#=tj?h)8Z0z=*SL<+KdG< zvnnuCp{*Ynvsg8}ns{lInN@F|9TcG2nYuJ4Qddy6hkm;>pIlEHo3ULlc_c*XWJqwH zZ{nw)ERic{_a6mW(^aW~GryqfIHTx3mLBrp?_>U}|8$Z|r02FDj944< zmV0fnzBA<0y^On`{`>+aR;QPG>2*ZbGX7Ea@@ldIo)&r!XA>va>LRDIDxw&pFI6d; z1N;}>U9f#o4MUbm!sYH|kQTOFa3rXRI6Aps;W?T|7W8;K8N(kyV+mWZa9s&VH(Xw+ zrJM^+8Pn~b)$KsV_P(ufju()iGM?<15LjGm794iy8bSSr`U3ec@-yTQc;66z5g%}W zalT&c%e0^99;SGtzJJy#+f6szGQM=<*LN}{Hp3R&AWqLSJ~vwVbO<)SJDS|yE=ZA6 z_l}FFhYC1tx030?Bploq1AHP& zXs@qLJU@Q)1Jm{cVbbH@$VI}k&oyWW)(s?G@v`|qQs3fbU?Qi@mXm^Pe)lV4ie zcn80b?7E$b*KVeRy6O~Lpj$K~PO?3fajb?{;~s_uiG6T#|J?Uh{}u?@y!?IWWEW(A zjdohj6$er)tP(5gD`7ILz5JPZF**F}n7{qVOE8WekWn6f17}*zW;(7I0`s)zs!``~ z!dqFQE6s5qTJFc&)vt^tsE<*qOoZhDd)>m8R?1dA*{IS*cRJlBpuGKj4)dKh$DBPi_ocQww}6NJm;T zB^G?#O~u}Gv<_xIN`uj^%e}gNu!7cmsnqs~=vnWcm{IS9M7rkNrU*uAqigEJhT}|h zr=xQ5#jMXH`5D)}(xfT4v-y^UyXrJydZ@8oHt0JDILOL1F3p3H>Y$MoJ5z}6R%>pX zrN@rikd3Y6*GTU#Xm*>jKlC*30w!35#;U;#S%H;`Aa5a_HW?#gA{sZb`)NiOS zkpCh-L;isG4e=N80rwZ@>puNvi|m?X6r1l;!RBlgTKR~H(0&$CO1-o$yoLQR{aiw0 z=$8qhRIZC{Vx5tu)$QK8JFMy>65l!)?PmI5s@W_h+Jcj+IsfB(3Qs3USs%IR{)3I0 z<$mU0>bn5GijTZfvui+x@!2F(+&dz`!T~R$)sw#w&)3g5Hc*JrLw6cg(N)ci&dV@h?v{J2KY-$DNY^)c!<)ECHqk)I)d!25>yi}-;1i}T&teM2^>j-s>; zvVJW2yn%k6Gs%3Waxc~VSuSQUV>kV?RlR!pg#@+QPQzk-dKQU0_*b{RrB{6NW=%2KVx1A#ok+Xe?A{Y!WVeUbQyz5W?AQjecN6Vc?JR1_f!IT zGw&{#kpF^|FUoB4JC{#Vl^iPa_ueC?*#}m}pMOAp|1i&PXSzwI)*3JE>avFq5q^)} zx;hiaZ_h--Ducm%y;E_``v;J7dB>YSTAQFgPI1?o^MM5YGxT@Re?WbV`VI93@?Yd< z$RF^&A^svh;Qr!#Nki@d8-0qpx%rbrPo^-vb;3*T@8*@%z22RL{AV`O)d#ds+~((| zxW8TTT4Bsc{r%~iw9jEJ?H9O1&9$75p1SCKFnF4odbRNki@Mna6geNNJ4d~SsD}r) z{pO*K# zcF6OY4u$J3M`Evgr;|NYysdQcBf`eRSVJGXPiW!Gm1fMB;DF)Vb8c}Ye{y0zDDOQ)j@3pv{ZV)b>xAbO9Ai=j`lhTZ{`PL|@LgzPx-Tub7 zn$mBS_$-`SM^=1pO=de3PB`y;ig_MB3&!V0kDt)zqs?BgW3=BnMq*aYm)$pHriHFL zb^m354Fhwf9Hl#p$diU6M-HoI!-Tp8{Zoi0&nu003JRx#>FCu(6RUg>`_y}A_=O|% zEV;sEX=ej0oV)v&ICQ}GYt)U_^myQ3-IC{VB?_W{EkxKkL_w|DG3UHqXNbM4tfkR+ z3y#HGJva6^0F}X~xcL6z!<|`^tQIi~2v$!`+rsNd%G;&Jqx!u_SC8MfxqTL3W+=QE zVYi;3|BL=4`e*3xp#OmS81)ND3~PT{-UzeVkF{Sa`lJ+&A4lWr2U4K)G3DL zi$QJ=h-G@+pE0!@_+r}9zy4u3$@FBuqM-YT$h?iJz4bK^jJR>j|Dr#M{u%l^=s%!7M*W8R0{JiUGvp6=-w=NhA8>zhz6*C%m6z2XprSS$ zyye0@NVfR0L_CV`hg~LO4QsXx!-0^vET61@ek@}}eJCRjrL0rS(zBP)ho&BKjt>@) zJtzDd7st!t+h3c?wyNjE&%3s(=k5z~)OPgDI?Wu2tfuBH?*{?bv*H=%tY@Tc{W^o+ ziV5WQ(y&Zz$qX_SE`RvuYk$b7c%!lTrzi0hQ#o{zC5mX4e3bK>^di-%?5T~C7YL)) zYPSta)&wq@y^!!XgphrVJ+IdI!RMWWiT)Y-JLo^4K1Th9`U3ec@-yTQc;66z5g%}WalT76duL3Y#i-`&;-5`> zR?|m@HOg0qt)SRew8;2*w~-Blj*9EUX{ZR3VVbagLWb`SUpkjjNw~Yobtj$IQ2*6* z)@k|y(0RrevS7Mnp5_{o&I|5m2rcoc(6uAz9zjtuhfF2Wk~7|1Qf1 zh}dj6v2=4XX;;*Z7~)Wdo9*xUTc@K*rEiLxaFsLs^naO<&S3_a4`F_R{yO@<=ue`5 zhW-xv52%k(zoEWB{)_w!`2*fJ#9zb*++Un;?jOy=Gi#No2b=!LN4Qe7f4#3~S5F2} z<9}}TG366sKG3JW<@P(Ud2=GW>T5G>F?q&%Rkn&qZ9X=u+K>U@V0XJUwUN9tOwX}+ zRtLW{<9n+%my_Z;aqrNG$7I|@>t@gXSn~JF(ybg_>BOR*t5UD8hKQe(-zKx9ntVCE zdeJ+%f^0P3?c4DC9>lc86^tItBj2UW8gyh*;D%58{==P-U}sRzN6*oux?GcG>rPW* zIl1+K*&iq3;c;*}dXFLb&A7DBXHzg>{)YJw<`?L%qyLNkB>HFQ@1XyH`WW>a>I>w* z$j^{J;C(~;)DS4GNZzEew6@gL3oVbw zf?}BCmt?Z5CsyFc=R{b&Yc^`fkvvf2mh9f_kpZ7+=GWJJQ(t%68$suchG-8eT@1I^#$@@Z&_R!nz zLp_L(1-`I*&snznk^0_Km(KCIkuOZc#3c7Rsa;s;?DV(`=|;W!ZLaaelPqnnFS|#k zBDfDYUiBlxe|3WS{ce!9q2JH-mT1BKg%9*QejAc1?Y{Z&QAdc@Iu~uGeiwYRR%?_m zm_UyO%i6z}%^{tos!rhkdH8ksklEyCZNU5(^G(d(FdxGF0{wOLf6<>r{|x;d^dC?k zqkcnuf&3Tw8S)3bZ-~E$54gWLU*>o}oyi_S>dAt7JF-hqQF>r_^YYxBqbw<(`PG&uSyyk^Jw7+Ef z&c^f{X8xWHm0g!aDoahO#TlJRb$592v$5N7l1bYz=ZXzj{5mmQRB;k+HE!V65ZDWE z<2y@JZe9XC+RD`?(u!1SY*vzde3L96Qel6Tbc5`BqutS(c8WYr)$`lfVF#GcV}6YJ zCgyLL4`F_R{yO@<=ue`5hW-xv52%k(zoEWB{)_w!`2*fJ#9zb*++UpU(g2Y5T}4rA zf;tbcWwRqer+RW(Rkwkm+{2KYf}X^`{Eho_h=W1@JFC9)coBu;ql1gboXErMy*@WL zJCc>#I}>!RHHlW`c>T%9GZ0dhZZOU20&YHPF~XcC|M)WhD$bi)p!<0FsDP*~42Hzc zs{gnErk(rbuL*-liTNAmLzrKnzmEPd`jhCNp}&Lv1L|YcZ>TSj{~|v_{($!l z@fYy{_ZR0YIC_k>i!p_?%h0epXo-UL1qa${iEucW5Ru4P%^?JmDW#qMig2+ zp8X&zWLQkv``f{H$F|iJ{T959zZHmwH^W`4W33HFg`~iKd+@5p=j32h)5(^GRFc1> zIWbb$oAf99itY%FB|_{wGy?3ZBw0rPpxk1^lG{0;LV%rDSiNByi}-;1i}N+nOET==BS8g+c5hugAwflp$HcSUoy2%Y-wJ34W!k&p1Ee=2NRcm>q+*PLX;?<77za|FlZC~nfW@2=zV_8E}8d& z)Eesg{$#sGa&k*s!{j4K^{wZry+IK~wRWRYLv&vm*Y9nAtS+jOuPiLG`y;f8%axS+ zbx)te_?N9RVeJpW=kQ(qFW;UL?C)T|1p5z|&tra!`6lLXm=9rof&M!Bzvxe*e}?`J z`VXj&QNN+SK>myT4EY1zH^g7W2i#wrZ=9avIj2!2Ds!1wg!pV1$)Bo$5z|4~5ol0e z6)_HVUZP$O>}|0B>fW44vjX7y!FiGCbrzIrsWYF{zClFa&RgtINFi!%0s{xP1`&&j z^G$PZSKu}0MefD+SQ2zdO(Un{A({7%x2Tl40s$he@xJ2PL_oIcTM2^+Os6$0vwoQf zcc-+Djjl<7;s)-LM@4A_`%&26!F~z$A26TC{2231%-=8{!u$gLb@YGHpG5x*{T=il zP#>dyLw$k#7x@|T2fS~HzlaaGzc}Ag2NND(+dx^e>zZkoi&IyO?|hw3Uq#F9W;nL3 z{w3LKXk!~76H9aq4X4C349GgMf}6f7X++J2!QE(mHt{%6xb(isOR`y$wf)(_Ea<$| zcJSloa#-@qAf(kNgHT4^@B9CF6XDXW*-!SkfmyT4EY1zH^g7W2i#wr@63jBZ7-*76ld@%^9?4F^mNO4<{aY)GH=GcUPs>p+H&?> zd^O}r9t!`|zrPSm&TyFzoro+TCloziem{N=n8 z{V443V7~XMhJm$xkZ({z2`4Hw8=&z&yi~c0~XXx*s|A6`! z^&9F7!@Y@A|;y*vq1(HdYQ>vnxOD=gjdowwcJqHpcKSXWbnMG86EoCLcN=Vrz2Kg-J zCt&}8y)?eO1$sVJzI4!SgRhaCMl0l7|Lgx^KNb6v*zd#s8TO;Fzk~e}>_1>WkNGj? zo0z|0K7{!N`s?WbqCbiL8TvcuKcGHF{f7Di`7iP_WD=Rjx4Utl! zzOdHc&b|H)awM}0tlqYh&2!Vi0bjn8J8Yu*4TAT`WK5~zsS#! zKj3{s{6&1g{l)oOl;-lEage7<4GtFSrf;N8^sL`B3G-1q^QE#iC-TYui#v}hG^7xo zh@}#>n-gJt`={)kiDkgT-sYtKu@!v1QcuZVs3#sB;~B#*o{>rBHn&mpO!9L#uzi_JpMOvc?SAynxKW)~p7@ci^f1Eh@pJ9zKfP_i&hQA|0*T^Q;%{z~hAEvl@zp zL`U_k$|2F41m6c>e;xbP*#E_TD)uL_--rD(>_=gL2m2-1f53bm^JC06F@M8+2=fc{ z*U|q)e-iyO^movIKz)q*4fO@`U*u=VAMm~*{vtl${^ERR4v%EM7F$i}J>K7OHr9&F zbNhUCIhqY&L+XVZQaL2A=(q~|rD~F5b$%D$p++L(l_&xyp>)5Zx{x9}Zu|J9ZKJ1@iKMMOh*e}8U1LpIXA7j3W`5Wd# zm|vj3j{YzDljxtJzk~h*>SNSzs4tNJB0od^fcFjY7x4l27v~$dPk~Q|YX_xbyJkrghsOxFJ;cA1?Ix1Z zk^0X40|ei<;QI@FAAtRJ>{ny|7yGH$pTvG2_Rp{%h5a4umtg+^^LfmVG2g`e4f7$) zFVJ5{{}=s9^v}@WLH_~uG3qze7s!8+pCNz1`-b?7_<;M1^QDdinn9T;wRl~Q!!UuL zGPO7qJG*;ZzXG_#r^V)9k5a~6KUb7kY=ahD!wUn?fDFYwcU?}zYx z3%1My(UNZw84Pt2BCDl zTCf_l=)3Tw18(p9OZRMNq=yHp+O0NygCe=LQ7fXR0pDlg`yqVag6}WzeE{~?v0sh- zU+kx1e-itB*gwO56!v$pUxNJy%;zyb#(Wd=H_V4Hzd(N-{a^Ga(LY0f2mJ@s$Ee>> zUm*WQeun%3?;GMT;sfq4&Q~jU`_(6FvhWr8V8{#kG1MV-* z_ie&9xvoFksYc(g)4Zj;bi_E@#5b?cMB5@_R)%31c6Vj9w=HS@?|mV@f5Z1#_?@Qb?jGT{}=nI*q_9HANJ3%ABFuL?3ZBw0rPpxk1^lG{0;LV%rDSiNByi}-;1i}Rg$r#%vRNQjaZr)n?D{wA@@vK{WI zFw$4WxnvzQ2MNC4#P@~x{te$};rk(c--7Qi@O=RG*RfxX{a@^-Vt*3*eb_(4eiZh1 zuwR1x2h8U&KgN6$^Eb?gFuy>59sOVQC(%Dce+T^s)W@jbP+uVbMSh0-0q+~)FX994 zFV463JL8e`!995Bq1>kHY>A_Dit;fcZS;$Cz(o{)YJw<`?L%qyLNkB>HFQ@1XyH z`WW>a>I>w*$j^{J;C(~!XU7W)A$M`-L-*4jkLVW** z@3Zjz5Wa80_ZRp+0Q>9Mug3l__EWJxiTyt8pJ6`=`#ab#!Ttm0^OzrFzKQu8=0liY zpudj(FZz?{pP|2l{sZb`)NiOSkpCh-L;isG4e=N80rwZ@E14v|kjA7;39vG}Y-&16 zRVb(KI-b6So;6X-6udk^*!-td^&Ynp*40AP9{noVo$BVw(fyV%yGU)_8C(c0!TZwZ zSGdF6r_imJIrHI_%G8~_o8=%Zx2o}qYc>>oT+}n>|MyIzeeB>CzIF6Cr}Kr1sVVqf zxezi|+fNGauHV90nhkD|CH8l_Zo_MVx6^H_f{5l8QMWs%`l0#s{C)lo4m$eUa9@Pw zD3OTlaJ;_F5_AI?$_q-pK)}>+%lMTrVp2NeyL(AA`DpTm_4t8!;=c4o@?Tje@>*SY zL!nY2BuA!{zT9#bm|9IYs1>>Ze z@MnKIV9P*VZREIZ?KlW5JVMvnHFwc6+Y4SV@0cNStClN%kdvgkqNSq1LxB$3xViC0 zz5sR4@@nDDN-6qbYW8l(FpuvP3pzwO=7d; zGl$C`MYFHu<@8|_LBCcAX7Nor@81qxGxrMZDx*MHb4P&J13mI&1!u)x-Y3K^PK|j6 za!H-Rlki2$dXjDz+P2u!1p6H~j!c~T0>vd6m8QxHw98egJ=<6CQD>a~SgvlYfO)M% z&eYLcMEld5oO}P?8@oGL9*a+0LVM?lyzvelgDtg-D-ZY2k(FsYmot7|CyH%KA-v-r zM0swFQ{|HT2zCm*9O_(o8P= z8(au_+ikxqmo&tDD)#)H2q&B0m@hssCb?J3jzY#qLTR75G*RnIuq!8Noo8ZOTaB?*8k}do#6BdoO;QZ4|+r`L(ghZ{;4t?ZF?r2td7A&hEy}x@lrzEG5g|5b1 zI@`lZ#$X1uMzWQN&S{?eer1|u1#tYi9@9r+JT-J*T#%%sdi*=8^}Ard=j7IO{|)ph zW}`>qqf)e9Rk%iqAU`!&p<6s*DofjJs0ly(?iYNJw$#Z}+C(#PZ*NsPq(CD+;Qr!# z>&GqhZr;Xh3fUF+7-QEMX-E^U>dB>(*_{j%Fg|NOSq^YjyOdI$R&`H)ot^jd>n z*D;xmG|%1T{5FvyRDy|~Vx7n|nb=z6xc=lSx;&co#lG4RawP2I@}0+LL1<0EwoCmy zv}^vObLV(?sV+x*jhbr>U?J=hBA&UEo|bEflDRENePH3;G@i>r-zz)7QBz(4YgoUs zNg9k1sg}5%sjC`^VkOI6p7ozddA!vo$Mzp2Do{azh?4T6tIbZv&laVZ|&O2lJE{9HM`adYjgm3N;4|?4G?bSf(J|W$KZsH0OtX=H(*iF5fbhk z3)zf6zGQiJkSkV+(E++2h!wZq;e59oBI*`UXXO$H8oNZVCU>Ti+dOxwQG z>T@C3ht?EK$-L;OX2!2QMfdghd*i`yzv zYJa3=mcy>;(9vMn94;(Ez2I2f}pJ#)}1=r&rU0T|eg! z>s3I=JAYmnhQ_WMh&jO|sQ-@yI7Qo-;@zU)MWu$dXrTaI>Pofs<8l1@3 zM=pPsYHTrVCsQH}@8GF@M@+Gc!kJsQJINVLE zyZf-1ygcv5VsNnd51Xg z-IvI4o4<%eKHj?dM4d0ue7IqiFmoobeb5NouXTdtE*(-7G^>TJCKoQh(n$M{Kj3{s z{6&1g{l)p-e58>2@s2XJBX(Zxe$!_#4G?BuGA&Or%sKip@11}=8Hx1meQluY$hBcA zsSP@n(^4jDYKe&7z_WndI->9GzHRnIEohtd>l;b^BJL|*TIa3!L=GzFzvj5#L9VUK zDbw@kr{CQU4XmhILPvAnJ1Vw}k@gifQP(a_Bo6})`aS*h0USBje-^ov13IghSWicY z&^<$$hi3HFQDw?kg6}5r&_lcGs?8tF1K(PM%?gX-Aa}oGWup2T+S7lz;8Qy{*sxQ7 z!s22*89#KJ?QDMyneSxU%eJlr*pttkaDCuJW-m&AyIA~!Y~RmDrx@4)mt4#y&kIqY z{WU4g;Mr61#!g$k$YvWccNMG=(7p+!M${g|`bRJ}wc6_2X${i7?XXk)=}?lsO>8Er z#^pbLhWr8V8{#kG1MV-*H+*SO`?bx-DCrj6cSaig^o1eL;kfxFbc+Ft{)aqHTF4?f zda2J0?0IG0Bb8fBY;OCsgu+Xp-U^z%vW|z$zd2Dw+yn4s*IHWp&ljk9sO{bK?>imO zIk&gRmR7+|2UVf<3{xaw&D(92&TVALvEwYNVQ)eHqv=hNh+dNKwd+t)RvFp1*Y)D* z`!WHjVafgZvJr1fUF+V_tm_96aFfepO+&MaBBJAGb6#Fu<_)9?crU) z#8_X(@V4V!IBaumYx?3-!tjFWHXVME^l}$@@o78-qt`O~O>7?#do9zs{frkNV$J&Q z&t<*@`7iP_WhV1t_IYKn3mP56VPmlG(<9nygZbnSOu63MxCKi3f z-g4+ret!+D8@kP*BUT2wxpbV1T?6q57V+49whZ1qFyCFW@R z=|*Nh_8n4l3LsxsPybjO5&-Hc%){bOuMwRgH&`a%OF9mxNPOjd0PD=2dX}(z5!4sR zf03Udf57{O_>1`P-~Rq@zDGYb3nZ!>r;?Tng5<>?z_9yhB(+O|+8x4We(O^|xuD~I za6sw<(bFp0dtpUAVZ3{My7=85T3W*Q`O`TTYR>uRFWybdXoqJ$RhxVnXcxUhF2eM3 z%Gdtk1_g-<*fF89up?syt-8l;TSQ%xx%EFX4oUerwbPrsqQK>myT4EY1z zH^g7W2i#wrZ+#Efp@du7l)@v)!i;5dl*`05i>9^HaI~;Hlzq4cb}9?Rv}wKu{T8|6 zFUD)BAX7d5qvxdQh2EYtxydGw+E+|z269kqGdGT?9vKGj)NOs;Cx%Gb*T?np+c{~C z_XDbTBF2e`L0_-jwq6MTJ=I&?Q4REnRIsvB9W-*rRtWDYhisQIn~n|jWMi;tXy%hw zAn~H;=GbU75pFzbXW1i7m$3+SvheSuomYuh)xcVsFIWAgwR;Q%co)9yn2Uvyi(Q+I z*5{JY!I!U+2cN=;lgH08+)oAGedEhmOp;+)R1j06XEU*CWi+bTZM?|(# zotB54Zols)))&48eTu3fE6>_27o4~R58CYKCKFib;LF>c+d@Y{d!wOq?nz#{ba%UY zUB)B~UsW&NpfybxZ}qP6%S$I2N{3?Po{z#IVe_P4yI+zrsSZtzOX+aiwBzLBiW+!Z z>#FQ46As&I4TQc3IKdU^JCdq;`6N3ic>jg`GWfhZ=oFuSArWD3Uu!BOKp%VX=kdw| zYiZ7bFO5wL!gOdd@4e>OXHYlO_?T784bFrF^Gqi{C3^Xues8MP0zZR#~Q#Y+v7+yKJobpMX zyCu1{66^{K*O}~iNLVE|_}MkL!{xHTi8H!~X_@EG>JB<7(B9j+hrg(>QwO<<0!sTW zfl=8_W6QD^WX9@}o0?}LSu+q%e|=E_!{2wXr@BXj?}qHAN3WB>yTJ33P+BF}4}aD+ za7%;lKDV4$93GHx*2zDAtu>Jmr_dvb$Ar2 ze}YuJ_^OeCx3E+EV)D_IpJD9dhtiEUs*QU|&!!lxAYc!rC= z@%JmppX=AWo+(MyHLg?t60(W@8Zxr4Crf}bSjimj%kvPnUU2^I%~wY@_-QbGpgN%Q z&7K^E)ve@X)21!DnGm?!53_yl7?b|xnxHZFxkKUNpBK6Q^oIB$Crvr8V1 z-#hmC@zvYVC^&z0`+HY7ATe^;eCs39FYJBm*4QdKM#c5-OHM(mZI0)dmZS(Bv^W~3 zm6k(XwoO?&zMdt&1^lTh`+kVgFNRP2J;b<~f1}+V2I_mmyT4EY1zH^g7W2i#wrZ)2_2uh3L= z>b`t;fR*4n>Vb2GZo17HN-wL!bw|CFBPU{@BR||X!Y-VEX)z#lS7+8mN~HX>|`rQt3>APU6iF? zdYu>UOWaS(_|?Q6n-HKq{H?MC7@G-S=wcwWz9epzqd&En8X?c-{6Ur6JaDwyaj)h~ zA^ECbG5K9E5t<+IpS%9=yFVvw)vwn|OpqT-B38I(zkNnIE$bXTaA%FPq zzWon>5g%}WalVs}UnamyT4EY1zH^g7W z2i#wr?_f@1-9nZQ)p=9Wq`_O3GM(qX#CLUuoNvCqmzmiRk8XnK`=+W^+GXUPkb=y&b%n&vx4vF= zI0E(=)+j{Uz6S3G7hj>gPU4@}X2uOXw1u75I&_@zi85V8R;k*ewx>60z3 zfnY!m2xJKz&|zr=xd)%!8AnnezTTqUL;E?IJy9RV?Op)v8MApOk7NHFQ@1XyH`WW>a>I>w*$j^{J;C(~nAp_hoFHBXukMPJ4^1Y(=fG^n z?tDjhbll_v^Wp4&-<|sVyXz=TwuCdQPYble@kf@^7rpu+?~Frsg!C9ZDefK{RPKUL zUumP=y=BDi+lK&-Tvw>B^dE0omQ4aLZ2n-;W={I}w+={|wLrnAs?9mv6(nH#mOcN# zTjF?N7q6@G0ErZS_jC8HM!59sOVQC(%Dce+T^s)W@jbP+uVbMSh0- z0q+~)FX994FV5HC@xjXG&k7WiXf(@q!)=s->U>n^FgK+>OkQa$FC*-iB#g(^Vj--A z?M*oK64=hwU;e}%4Xtyh7jtd@y-#ykco{bP@4nSkbg1!ufFZM9sjo&YgrlW?!;(vh zWLZ?Irr@1_-!pR7pVr&pP6So&j_nP!B8$m4dVaQ@A!74EuQfh<5sNsBpNYNpa4gAj z$y>L3F#jm+>7l@FKc>BEn~1G#_JO!zl7c)W!dw?`#2p4=EszsS#!Kj3{s{6&1g{l)q6=Uqt+o~9@ns{dl* zU14gJwIm>1e~-LIIXT-WPdhUd!Y_EQdZRpZn>TkN@&{%#ShO#QY8OA1^}`-}7axaMQ2Nx@R;TDqOu&*VZl?W%XwNzVw@Hrv_d?hGZCw&kKF-59sOVQC(%Dce+T^s z)W@jbP+uVbMSh0-0q+~)FX994FV2@MJE+oiVht6yWsUwfskBAa)4eE&Z#A|py#LS?21MMLYlE0shV zq(rHVjD)15q(nlpqa`aOBda8?+X`7xRtlBKNFhSS&+nY?^Yg!RoO7M)ysqbXyxwlZOEHuc&s{>J?}Vi zDhc-q3Oh!@a6e4~6|~ex7M`TnURa!6!nvKk?7z?(E4r7ME-XC${nc@@b7)~% z6^{$~?#ntn?>a%jM}gk~UjqIC{XF_(^qc74&<~-%K)#Or7x^UeGvqtSAK;JS-{3Ex zf1%HyAMn0m{$f7h{^EQ~#)B$KngtlsHjUDXL;XZlAiVpv=>o>vX=ByLUk~V@M?{zD z!bg+7S%deAZaVQqh0+RwK;bqK=I=~V)5KJ{a2AOZD~l>;#zry za5gPVtBZA|e~YiH+;(!IOCImoXH)J@W>ZJYk{_nzuyxv(-%Csh_&)Gw;G@9rfG+|6 zfPNnRG5SsPZ|H~6Um#yc{)>DP`5E#ZW-?F0G zJicl$yV4R}wgj6JQ}GK5g*MmdaiiQuqs5UVKj^S*xY;FAF{Aul#h4sjuKOuZZt~os zqVx7o-)ci6=4TZpGbgChukO<8(xx=@^()!QET728-iml4;7BxXEGp#wvXRd$QT`dFY(1(>i z(kJuj^DP`5E#ZW$Ep@|ifXK3qtkUN{{ug{I_(|}6;LpHEf!_gN0{#L0Jo;nwo9N%r523$6zK;AC`6Ti) zq`-630Ea;lt;jH3Ut>WjS{9+Jq@^$3D$S0AXA>Tp%0Dlbs27dwl3w;LtfcFja7xMx47w4;>nY>~4wlz$8 z_4i*d&&#nZKDSGl^olcFcf%yw$1{o1#^(p^zda$tGtUk;G)B@Q(wb)cDvema>fKTu z_KL>eIa|K-ZaQhpw~aB5{FmpHSev<}J)Bs!IJuN;dP2dkgRchv3qBS6B=|n?XW*m2 z?|?4>|A2lT{W1DY^l#{g&|e^5NB)a^68RbO9pn%2$MA3P7tp`ZXV4FL-!OkMA8>zh zzQO;Z-Y4EvV9sQIS6SyF%Ek-me+>AOL|q~RJA9PW$QxnzfYuUsvgo+h!XwU=boEBP z!Q_=6>0jZf)s0&w^S=;byTXOOG$?g@_N$u9RAa65N8Nqt6#D_-*TGkV{{^24eiD2i z_%rZP;CH~6fPX+gkNz0_Ci*w@L+CG%uOt6OK8gGc`3~|2_+$7t_zUP?=riaCyl_yJLN98vWb6^wqcFyTs41v;Ji~OB*++D4hCU zLXPe~ddrn#lKYBVv|MFYD0LC#sa6^;C(o`-b{BT@KNA*z?Xo3KtGTE82u*t zH}pg3FOaVz|3yBD{0#XH@(1{1_&4|q=wIkF=m)%Sn7^10xW72xfXI!lwL9gQoUNm_ zt9!?275Az&%kR69mIWEhelde2;Fs69xDE#+VBvMsb7wSloNih1^H@K{eirtJu-}6H z3+xAgUk6_e{ug{I_(|}6;LpHEf!_gN0{#L0Jo;nwo9N%r523$6zK;AC`6Ti)q@^$3D$S0AXA>Tp%0Dlbs_MgA_pZq@^$3D$S0AX zA>Tp%0Dlbs27dwl3w;Lt@ZbCPzxj*#fcuN{J^4u`RzOjO3I1N)+3Pn-I}8-HYbv^k zRlSk;vDP7q{Y~r_V*d^MS=b-Kehc<5upa<^9eg$TU+}5mC&BlDKLZ~Heg}LB_y_d! z=#SBFqJKj_g#H5gI`UuSlgQ7I?;wAGKZbvUzkvRQK7)S1`-b_8`GEV2^WDbr)8b92 zF!QXqYN^qtF~avo*~`Falwdy=`ve{TA$BU_Su-I{0evzu;5BPlE3Q ze+E7Z{0{gM@DJ$c(I2DVME{0<2>k`}b>zRuCy}2a-$DKWe+>Tye*ygqeFpu2_YLzG z^8xo4=WDNfv&F4q1(Wvkn$oqGiVXITu^)^5P3#w9{|)FZW!P5#`zL`{(8!rnw}{A=F!l5mWMQs_PSEmtEKNtaU$6 zGInjA@lVW+3bpQWIyme4QP zm7G6bkiq`N(-%B{O8t({zc|yFZ}K~A<~I{H=dj;TUQH0+GeUg-<|wXQ)K8?C>woS& zp2pZYA2Y4L(oNI+7R!5Wt)iZ}mTCPTGDz(Uv8xv|V#qyCIWFx3?ewn5@iD%WujtU| zYiIu_*`!?a&F*E3-_q=Q?{zL~-;>4THY$$m@6up{KUTAZu7J9 z1!(MD!W6fybke-Yi?T{4zuX0kVv`+rui93Wd_ zpRepVQ9~A7m8lXE$t7+TS`)iQ>u6))%(FQgo{$+zD+g*ePks;l^7huzfAQ2-ciRe$ zs<*^k`S;l7mY3w?myLn@j@%&A`IjHrVO&Ob#qj7<`o5xdd@-+1AAd-M-S-t2>nSpO zdOnF5Hm+o?Hg(F)ESt()N);UrOIpa<|EsEPy1$U!B^|!sqwzPn^E;27vnQB5wlhBP zN5YEC&^8WsUOkWc)nwi`-yKZMwm(|Fo4G_AbJbFGGp-U9|CoVT2{)P))b*o2#glvz z3fMhr6Gm}=alVDj`m1x8rHs>At3j^)fwX07v|XHT0Cjb8(!5$1O3L4#?BV9%V($8i zS5Lbl#c&Cl-#=Qsm=&m68{&TDGm%m3IW@A^mw0(~M1@}|r>Sl^C*-7i$xhaJjc(9S zVke%k$ZE?E;(4b#uXBDo-Lvn8*w}?(VxK7ZB3|)3`Cek=7NVFPQ!<^p~Q0b_wmP6 zZ0NU`ec4@dZ;QH^O+O{l)heg2O$#TN3?0IsxpA_=4?KhelDU}{R_b4`5HEY%ThXW6 zdKzVZc2-?wem>BTinXS0mlzd3LVySob2IbkM{5|M)5W#%F{l)pNs!G>Nn6Jfn z3O<#|ef*Dz^)6V_JvN(7k`RqLQJ+rLbrs7kPPEb$Rc?FT+wRg6;y!DdcPwT5`%h@3 zf9GceADlK7nIg=V{Z+nFE&Gc4IEJM=RArJK8To$P3%to9x!o>Xb*8dK{%7>UR&y|_ zLHxT6-AAaXqT#A_?Smv_@xddR>ps$EVNQ$vx8D*82T@N!t@|YE;)1S(JGY4~=Uw;4 z!HXnXvLn=flMVIlHVZh_S42{+E^IE=Vrk@5%h$d?D##^XF{Ar286;ZJR9J7#7c$`e zVwzg$eyUWtcKM0!Yh;Q4$_RPp3^{g0@jQ>T73uGvyY|qRgLLDJ)tqw@Y)I-Ie%{J* zc{Wlw{mkqHX?B^*6m2mFF;<)R%g_vW1vXup>#55PX=eB6;q)tK7Bgy#`=W!@dxuQ<~ePt)`# z{3U1E(*vQ^2dh+j$=WLm%U-XmCcCFZ-Z zWqvs^@txXabG(B%C~T1bAe`o_JHC-Taqal=R5h(Tn`CZ}I|?o|@E%A{yJrwps3`_6O81r=7Q)^T&r5=pjtBVWLXlQeVdO@`iNyMku@8!=(orsSN*~RkD7|G5A1rEulXUwRB*kEm|HE+#+9Y~ z`L>89w=OAuxRH=W=bR8Zc+dI)UH82)f!nc@ICh^)xcBo7{o}&LS1#F0L|aV`C>gx8M?AgKeN`7;6S&1EOcKaP%lAKA_orzxQ zm{&#bd^LLMTOCWZDpvk6h$5ZV?mcK9M{+yofz&b)9EglpLFwy0!Fuh!j)q`EW*ZjUaU-=Oq~^)nV?+jB+3T&s2n@N&&fQ|jp__#Rnhxv;yrixjOV`d@KYnLM)nxy1x z9fVWWb(*r2<8T13*H_TtmhyU*H|IXLRB*m-$oIaBj*tbXew+j2s;`Ym5sa(v2GBL1f zDkb|K2hHYLH-&v9Fj%wa#0b6aws#YKxP)D^_rRQhFE5CW&NV|VA8E#C%RjFj@oQLa zjW0((x=Ao+x(DAMaO$M@o@QN7T+>b0SxquUon0hrjeB2Y?QAw8bBH51X_V^Sn*M0E zP&?^1o^h@D+92T#I=D%<74drzo%#BO ztBfAd5v3}|wjqH)|3aTZKj3}C{Kb60{l)o4Xd2e36|Z96*VU9X?;oSKLsHS(4g0Br ziPq+TH;rV?(jO=7n}wJmx5TA&(|d@}3xy`tn0c(O8lT#6<}X?K*4R+8WF}Mh{@Fy# zJ2BQQC2ouV_Rl2ZNYYoG%`JqT&29cLxS5_l>pW5v|C1USuQ1Y2E~1`tGorrrKPD@d zmyE?#PtYwbjsZg#co=3leRF7bGd=n?Bjv?-F`2eVcE0tGH&orajM-B=N_LyLEih=@zD2G`!{sdHloj99Kgjwec*94=V~F;umK< zo4O~O`dv@p_1v|OO!+BWy?5XdO`F)PxaXHS6^q?qa=|x{wiSQL6S{DTDy-Jwv7Q=E zy~5k>-dCO6C;gwlfc}L(gMPsKhWU&6fcuN{?O8a5^L6HK#;;@BPrJI=>s{9iex|}uG}w_(oY|}R zXgU8P6;?t@J9u@dFxygkcvbg71;(5@2wV6zP=y5pTdmh*5birQ{d`jCwBB4(A-buJ zj_Yd&*hEtLWz(DesVa43d2L%_+LHI=+fVIJC-IQzE;utmCIY==6bkk;f7e^Bhh=-s1ei#X(@zejPfN*MKS=p5hR?1a zn#?!;=ilHjpnsvypdawQVg6!1;Qr!#8$L$V+}N<0`K4#TF2BB-m6-n6l23pok4@r| zi|g{}c+||b_Z3*8-6ea|I6Q?N4h{Xb!YGieQTJPV z!9I;VJ=e3lKdg^#tK_UYIq`||w3u8lFymqd6N|}*M?v(eRD$-Nd5_3vO*hG5z30Tj zM(}Dy>oc+@QDBMhrD7MPlo>V5xOCU%_MhZm<@@c`@4IOAIH;$MA3P7tp`Z zXV4FL-!OkMA8>zhzFUTle6aD{!wmSTN`6f2pwfz~H=jspC%4zxp3%NXNniiom<2PY zF=x)cuvfh+#5`}!efxn!hLtqlaIG;$irKKy{#~7&0%N>Gv|shmR`%<3Eft4V6=avx zT#2#9C&V&I`t`wrbn@;}lQ*|8AJZzgX3^BI^)$==xX1XlKKjFOsk`~SapLGw*U0NL zMxu7a2!;erV`qP`5V~EEMx^iYwO^R@mv7CkC(Q0BqYr8%BV~nc$??_(e)VU?WZRXm zb-JQeq*iT9egC?jRFc+hHCgqYOnIXwZBRT&)o)(Q*irj~G*o02Jh;|Fqw7{adaTw@ zd}FuVnYCk(E({d&%;CC7NFMohPhJS;-5-1Rfbwaw9wGM}=<(C107la~m~xozk|Y{)#e$b(1`!XJ`|DVj!zdq5GkcHcI_NpUp( z9pn%2$MA3P7tp`ZXV4FL-!OkMA8>zhz9u{_9uMB`VqQ`80gg5qrX*&+qUh$C%$f!2 zdZo8I$%gI0IxbzG=&d#P_Z-c7MV`H>d7Qhhhy=K4XrxD0Qt7KPbM4L!Q13RiuyOBg zY|>}3xexf|Sv6BeGMi5B6XtH7`~|U>M1S#1n(Fw5>O`o%Tj0Ep9K5}Lo=9aHRZG;G zwe(6j-KG~XAZ+`T_MfXOvrEh;s|{izS9E65xeF`X`Qx+b0^9g{;@nDaG@FTT%Bdv= zSF5)kRJ=yIuc_tb^XSuAtABEth#HbLo;B{*PZrwd-fVhx{y`Ux8XWPeujJp`2+kh{2TlQ^e^-o^aI{E%wNn0++UpUoJ*bk zHWwE$^S^rW`d|4#yY)8cIaWr}d8=jL@D6+-mL;$Ec|CbbFUeli<>|_%L$^Pj&=h|` zv{vO^Uy@irc-u|7wfD%eom&cTTuGbBHtgV-nf0lRv|Amqi>t4s``dOX)^TN!EU%sg zW{y$x?>UuS8$D7;!jyjhE4(+!cE>XAj5~o;PC(u@Nw_VP2`D*7gI8&_ps z+PY;LrjtI*&N_7P!QEaGsTVNOVq`@Zd>nG{NVX@#UiH5;Q}jqz#tloa?jv;m?2GT!zxZE1iTn)t4)O>1WB51t3+P|y zGw27rZsj6in(O>FlV3H+Z?u1V()QZ-BojQ1uOh4_lvy#Ox8E$nCIc% zahe}_!aDeoF!QHX$nu1BJJ~N=adg4rdZMgKC3t4OBHq2%^W0`iu`5&GNX}ChVsCai zh0N)fWO_VbKTq0`L%C-veci8BP6nk4aC7bbI{)>DP`5E#Z zWd$tVhSnjZcnWJyCuC#SMTUSu#*s3=|)f2W0 zJyPIi7P=IiQamt3PNgi3{dHpUyV2)G9#t76kszgQYn^+Z((WYrXa9cku}8MXKHyJS z%FfH$ciJXk1=IN`ug1#r4V}J6>5Ay_ZfaUHuer|s19fWWcb&GcivGD~$tN_q+hOA9mRbe3m7 zBFBRq78w=Qka&mUq8P7i8m-o0YyGm6_-M$OXSqd__Y1F{j(VI%biZ+V->`i}k*_2F zMLvoA4EYZ72l!+7H~0(aU+6RF2fS~XznBlWzc}CO$Eyl%E#JmWtT`(xdqad7^?SLi zJF1KLb&cd~Zuvp`1bGd|QzFR5jc@DXxr*qDtUV_i-aMlxT>b@mn=WQ5c66-06|Br? zJ=Zmp>|DoMI-EI8tXLwtX#KHFi7!;6Rp#mKhtWi8jrX0}xFYgW#?{8+H8B{97rx}Ka6aws3jf3JOhXK)RT~uh(JxrLNYJ&?vz7)OPD%yt^B|L zq*!C2Grhw663p~@YFDnz%^+c+)(`i#gpi2fm=B6~U(w9lcIO^*XOfR{*F=j_bIAVX zO71%LugRwk7At;Vh@w3rrtcQ@$5RPa4YB=?UXk(TFTc7??i~)w9@(WJ^^ybz{u=sx zEt#%4UA$w^vV`1y`AARdeG#3<^&xP*N!@?_8~P#i7s%I<|0176eujJp`2+kh{2TlQ z^e^-o^aI{E%wNn0++Un;5U=Wyv8UQhjSJJbqLro6W|v=IT^d3*L^rL!S@x82zhzE=)6 zjdKW!F)m*(UpZGYpV_j0_S{7+J*57A+iH*2Qp(X`)Fi0lMfXT9^gbqCO_L0+OylA> zPg4sr_#K=Bs0Qzxn?{yFM7(e75u5Ah35jh_3Z49Y5?j83ddmYT^w-_ssS9(=$c?;u z7a`+J+GwepCwc4@ss4UJA^!AH((x-Szv1v+V(;EI>&RVwx_G=Z=;4{`WS#8{SMMTc zdOh;!jZfoFB*KBuM^)L8x((ZMN2DGjEg?oeSNJUc>yOcIqJKj_g#H5gI`UuSlgQ7I z?;wAGKZbvUzkvRQK7)S1`-b_8`GEV2^R43elHabho!N8ODd$Up9Ag_B^3yUmp7b5v zysB(VE}2&NZenV8Dxsd6?Q1Uk)7s>RGiHw25-Sg;WW9qq5!qx_6!5}|9RE;oW;S00 z`M^6gp|&G~7L1#}*{gYztkHTNd45!eyt^vqeWNCqoQ>GN^qA-Y+C4J&qEyXp`svIN z=S3Y;^2e%Pk=xFhgmF*pHo0p>W%*^Lh*yz_Os^~!0m3mq(?=Yav?y}}n^esv3 z4ew|7%x}<)yJv;3m))k`f5Y`_f2C0_gFC7lI#Ws9?5N*&_GC}~KJ?tCZS|Ct)i1jj zb;pE=&v@otE~ZUJWK8lCERIvbjDIPmyIjb}GTR9r2{l@>*Xn3T0U_xkYTF+jSxH?E z_1#$$>`Ho)<}01M<3_ ze;m_jw7;g%x;gu(=VI0A*WO3c3sxb63di%P=BI;#981!uWBaZ^tJOi&ui@dtMU%fL zwWTWjidSPSvGJ6#sQLYfUMn6Na^3GkTXIZVg~>w_y=9;Mey2pzb9`;o)H8v!OVOr* zU-dRIp0YyAYDo;SiMx16_plyOtuN{eGEDxD-vM6&{sH|w`eXE)=-<+w5a=vg&tw=E2rDPW(S2!IJA}W* z(EAEL$#h)^xi#Zqya|^VWTCP#Z;nbJH5u7_#DU+IeBqCh zJ3Z`AD@wKraD>Lv7%TOQ%J&bDv-0oyO3qy)M1PWJFmR!>S`)qG8x5#QQ_94t97k#= zetvGT!gZSUuTzim-68saf9G;MP9flm~6LarFG$1G_G+$RJ(Iw`<+lszm-gQ0d~)wAr~>WBaFXW*m2?|?4>|A2lT{W1DY^l#{g&|e^5NB)a^68RbO9pn%2 z$MA3P7tp`ZXV4FL-!OkMA8>#FJKtNqf-TZA%#n+GZp;mnW*2bg+*rRZfHv#4XTLn? zLj|I^xaZFFCvAya)Px$M$g?@d5~DIKU6twE*I!di&-N(yjKt@VIl`5*^DZaQ&XA;g z8=tVG_-+@g9hFT34<9lalS?9z?-grGg`=qLwV#LlyPZg%IA7k{$$qVXt+93=OZ<=T z1AhiS3j7ZE67Ub`=g}Xd-$eh0ehB>q@^$3D$S0AXA>Tp%0Dlbs27dwl3w;LtfcFja z7xMx47w5~t8Z@n#wPN!39+js@FN~y|3BT2X-#C#2nR`|n6~<1!Z*R*+=0?yZ;XjS^ zZ)8*d1HOIn#!qPHK$YR;@I-MAtmfaBH{g5uuvwYtt;z=%~ zRbR8S4<|zrN1J>1vvic&?c+7hC9*}o_U>Bwf`Xp}-v|B-d=&T{@Fn0M(9fekM!$*v z4gC=M3*_s_f00ijKSREQ`~m(L{tf;D`WN~P`T_47<}c<0?k~=FT;DZ;j;k`wsww$9 zUM^=hT$H%Btd^76BsXn;$@4+F!A#L_JmoL3u+UE|zEV$1b#A8#jcSuTkE3Ug?%qIy zhPMqS)|gUuU%O-$rzx>2a~7_?vyGU`6&ZUc@1WH)))@Gf{n>P?K0j=gwF8OsDl8eg zWlO=Qf}aH62mTCv6!;zRCEy>>&!ay^zlr`0{Sf*K(@CP!x*@YkRE0L(YG}#`m_u`F z<-10;^=SS0;K#H{f24La*0jUIk%IpPp9+2wd>{BT@KNA*z?Xo3KtGTE82u*tH}pg3 zFOaVz|3yBD{0#XH@(1{1_&4|q=wIkF=m)%Sn7^10xW72xe_nZ>`ngk?q1_ib-D3(# z@OZM(XT>tQ?VZhA!z0DSUcaSIYPu$U`dDH4d2LC-SA+irp9+2wd>{BT@KNA* zz?Xo3KtGTE82u*tH}pg3FOaVz|3yBD{0#XH@(1{1_&4|q=wIkF=m)%Sn7^10xW72x zz)f~KQ{@;&Zm(YcZso;HY3g&$X)G^$r|ny3!9_DFD;Xy5`TiJncyZ+7*!6SNQNpK? z5i}&JqPNeivzMYtHP^MW0|n^U=K%#V37e=0m%rjplby8u-w%<8>61&d|Kr!eSA+ir zp9+2wd>{BT@KNA*z?Xo3KtGTE82u*tH}pg3FOaVz|3yBD{0#XH@(1{1_&4|q=wIkF z=m)%Sn7^10xW72x{qnncFHK#@-25lb!~0{HuJSv>f9JFS5qoNJGhyH$vGvamtnN8Q z&mN7rqu6?aL#H$a96!ODVHx$><55f z2VV{T7kn!CN$`E(&%j54-vM6&{sH|w`eXE)=-r_P_}LW0-ldg$+Sq&mXo zni?@X$?P8vnO~OX;lY*l?#r_5M1Hi9? zuLl1MJ{9~V_&)Gw;G@9rfG+|6fPNnRG5SsPZ|H~6Um#yc{)>DP`5E#ZWq8m=SialJ$th+cxpkj|KvplA7=h60sjFtb%2riRBGCx0< zn5-Kj5l-T=<^{bZdgI3$osk||@uWC&@8&+b>xkOgov*w8+i$`C1@;5LuY<1!{|i19 z{3Q53@Mqwo!0&)B0snx09{n-;P4sW*htOXjUq}9nd=mK?@*U(4@W=3P@E6d(&}YyO zc;7I8F&}V$alSdmzt$hlT)`}s;GfpcBgKdqc@4EG4AAvreOvn@2I-<-c|T56bzi}`^2i}U?CamM!0=yIk=J^d47FUu;f zk{#A>U(Bk$y&0-z-AgB~Y?2mv-bdCP0Dc{OHTYlf zso*EU_klkH9|e8~d&yepRe}F%Re}lh({)Ik+ ze!%;N`HT60`-}5k{#5dx^B*N`0KX2t8vHN#RPdAF`@o-pj{?5~z6AUO`g!!n=r_^7p&vqj zfqWhLFY-y`XUKPuKfoWuzrkNX|3aTZKj3}C{Kb60{l)nv9zFJNLRp?U&2@98+vNV< zDxb}j-4c`E^%_zT|6tHZvA>D^LhQd`KMVUq*l)r91@;5LuY<1!{|i19{3Q53@Mqwo z!0&)B0snx09{n-;P4sW*htOXjUq}9nd=mK?@*U(4@W=3P@E6d(&}YyOc;7I8F&}V$ zalVbVhyAzYE@eLUe3MRSmSBwkuHb!=Da~R(7Wq@^$3D$S0AXA>Tp%0Dlbs27dwl z3w;LtfcFja7xMx47v~!;b}r7+Sf1I}_NnsCHW?QC$Jmd>{wDSdvHynsEbI?qzXkgj z*be}|4!#=vFZfjOli>TnpMj48zXQGm`~&)V^vCEo(Z8V|LVtmL9r-WvN#tk9caT58 zAH%=FUqJstpFuz1eZ&04e8Bz1`Q{qCzjeF3f?2iS*4#@#i#a1b{mHD=Tx_6q%*Z9H z8SF`2odt@FHI?Q{*;a3Qnd}l)Jo)ftBz-S9Gh<(H85wksid7%`Kk+7PrxaX)Yus+sJj}`*wb8F!;78}4#$UY8#oGxsn9q1j$M{C%yhwwy=u)Q;mYl1 z>46kFceCi6hV($PY%wdrr!(}^jr9GC)uKU?EeX{Sx`Ffwc6ciAx#e82LR{QBbH+yc{dxrQH1HjzhdM_WE;3xqzQ4*lc11*zS>&C2VUGMP=GWH+uYLXIu*|@Uw~so8=my6c zuG=bnOqF=)`P_<)tgS7NMeA56eYbTow(3V!tPlKzn}b$aF=FRZ1CONL^QjG zOS0n;ku0ix^TQ*O;{M`%`R173SfrrEa4o5qDZP|NlWy!2nC|IArrlh3FTo~>1k$Uz zzdK?{tmcBYCdYSlzKZNEc|AJ0_odl=dEeysZV!&j9N59bc2sQUQ@)|d>Rh*Wv8a1Y zRGP$<%p|f&W)bf|{AxF9w9tPkpQ<~F*?%Hu6?X{nnV9-Rp&)@O-zha2a1AA4`U+9> zc^cvE853Nu8AFkNgn`Ms001?>5*3SqVN`DDFq(&?O2-E_{<);S(5BpQ4C&Lx$-q5D6(@IMzVwA{AX+C+|mPV>TD}8EMuhNq+IA6P(N`nK|^WH9xz0G}wLb zcqv(5ED7BC*Z;t(X-npFk8{GXiEY?Om zpO1bPjpt-nZyk|cwr7+Ws?GEGVZzJC=bU|7_wX%=+kDT?SFnQwwzpPgYSa<3PRHNa zEtN#3xwqHz$IRX*~P^aoLr|Fc94&G_Sms| zwT=`UIep^oQ!yE~a`j)sLucjKW;vtXPnx%|DcBUVG^lPA6IKB_9Q-CrcPIbpOGc1 z=V#sYC?u~kU(8-}t(4r?+@3vhO2LSUaD&#ig^d?7Jjuc=DrY&hO3a#D2jszukZ6=`+`CTphZpQt&Oq8q0a? z-|m=C(!c-GO7Aq{Cda{K?A_=db#;{H{@nHZ54}r{WIqsipLvH${hYJadQlwlED!6{ z5V%a_%#y97j@l5ZO%*Fb?ne{3M=5{y%ikj@3V}s>n%78k&7VF;1s_`YDWb&QGLTe? zsSkPGzeQ`AzBHd9TXK56>aE3ppVPUYowELV#8aEHv4Z=Xt4Lw$s^FJHZS=VBCy(!u z1!Qqo^WDMEO(gZrzuf}opOLoU2dZOp%V@FTlzYYts%c8lm3qG?J|x)0+D*_XmCgv} z+H-d}^*{Z9_YLzG^8xo4=i8%eH{(v22J>!;pzxAUVyvR(yhN87gES)cN3H7a*(@pD z*(sO8%|=a~^S1BTbEA4 zWrZhSoQSWYY=6@atp}&bMPuR50{7oi&s((|60T!3zcyNYIH8i-Ec!J4uTCG~eI=_| z+^Eb-?~!s-wVK87hMx34;5v&jii(kJI3~c3*=|?aK<6J&xNxiBFJ_A%#@;xCr8- z`StWg&rA~WZC>^7d&Ly`4Eh1@8|E+O1MV-*H?mrFZiba6{oawEg4;u}zNQM6CX!S>3irBsHmS%T}jC5+rjk!~c&zxhFVfCX*6QmR9q* z`F@C^k$-eL|8<0ru29`yfjWgmyeUD!=fX?k-Cv`&E|!B8nGq&7@}`669dne`5$++0 z#yLKfyno2)f6`=r&Ro{CV@-8!NjFv7el<8~u@JM?t1G5(mjv5Wx96yV8W*cIRCDIy zSUOqKB&(8sxs*JwemYD3y8nOr7y1nP0q+~;FXjX8FV5HS_xGa?tTyxE!%m*V_P=Pn z^kR+W7Bw_(Ah$*^burug;F9HS1y0tCZj|C5>)F`30u{R_@4wV#n~H{VP9{}` z)NAT~nEXzgI@73`K<3oAU%KLPliv0WjvqZ9N15Wc9jdJX^!u&Q6;>9Z#IW-C(`}B) zM0CkMF#JZ zexlCLa9*!XOqk3oR*+T6)-z?!G-(vHpMP}Uv`PDxOA_dHWe1hBx?U5k6tPT z=$t7fQw6$(#yuLysFjk)YU4?r;MO@K_WT%)lQ8T5Ue-_T1#Vt)`|^hps~3NQ!%~Rn zIukWDi&tb@{n)%-p$1YU$P@Tr_YLBG(rNvczkS4WxybDK!mo+l1##Y1@rCU3hlfqH zEf%sXQu-TCUK%0a9sRxtrY~g!&#RAIDfmRJ7kyqsY#KpNk=jV}S z{D#X*E?gk+Z}1n;ztCsU4|v}&e=#3$e{sGY?YjjhCim2mp3|f^7Z>U5}iOBAr zs365%WL1r=Naxp9B3qaqe&=N&S@p_4FL7lD$r8`md2ALJyW>*)uZP=z(XqVetM>PD zv0p0=yxu7vPNrFV`584&?&Z$sR@A;2L>@_gGUrnkW!JUobx&V7m34U0xN^;x0dnPF za@s*d0Y)~{bIXh-F*f3L$bQNH*CWBlZ%;j%Pd8O&5B_M*q#-}MI1ko`{O6D1-{3Ex zfB(~G|I-h6-!OkMA8>zhzDXzI+%J9J#UyNrvLAR6K<2dVoasH4B_VGuq{Dw^(c3dV z(w&DZX|S|`;7C{5#yo*-pLf^rX0P-7BeH`YCU% z_V$2$TEfM5c2wmJ$<*kvR7fbGN%Q7QUTM2crB3mD+q?1_ZM5(0$>zLD=Z@^-@ST%I z)E3^lv^+GP@>ox?;o1{Noj# zU}o^-@2o94e`50Q)}^kq<9HlI-#`E6Vd4Lr&i1@#^e6o}k=2mna+{h@veG!jeK-q9 zMBD0Z`Ypw@L-P5>YTaB~FzvR`$J}E2>5oUQj8GtDI81WB51t3+P|yGw27rZOkPj^G@d!1 ziT*U+jDtIeXz=+Tk^GrLEF}Z8o@}WiNn-)=YnGSO6bo7N6_Pb%UuucSoYKjhpxuD8 zAWN0n+lXh}2#%tUG|!(APK=_`M`DehOs}9D4vH$jT$x8(x10UyFgr*s;sQ50?Xn<- z27LiX7Wh!d!H*KHj~s~Oo*PZG4n+{#&wIp@CX(nC%_9xmj{>tfvT5vDX@gz=^6B#ZkIU)1I4bV! z>bKr5h=!h!m$s_cq5LI{&t&2Q|I5#i?;wAGKZbvUzkvRQK7)S1`-b_8`GEV2^LG2z3E`h zW2&-X_(|3SSCYSf$86E%SLwdN6=xqV@Fy=)?>j4RaHP4uy;NE6F z?t#-mXdIIPxxc`;Lv$-m$J^axQAZ&*KApG{KUJ93NX zPGae9Aw<{!NPQvXzcx&4CMAaGkzvL>*V5jRS!5MQ;M^Lr>eqo@AL}$)?pgNK ztSXE4**tu6$pU3O30H9QS$3&KfO+N z-?v`A!^EEGx8J%Y>bRCXI{L*`bDKT6pO`7t{`M#tSTg^e#grrDYDda@#&8F%OcI<{ zIQcz}XY*8ITwLyx_xoyD%QI<2RIEHV=(``yD@aiYSI(vJ$JRWTIL6X){hGwb-?FLJ zSN*?w!C9nnXYTkJ%~X;kVCfy|ew#Y3d0tnMpFlJ?yLX7JHKDImi*CwV`u&%$BmYG{ ziTn)t4)TZpy*KB<(HqC{3gBD4!Bd(V_} zBbh0gMIkDRkkPK+`*&U6XaB;v&h_GoNt$q z;uUg2nwc|OEF|6J4)GpTI--8{Hr*8v(!6WWo0YyAWpA#JI+3x@r^Aw^C|M|2TN7Xq zLgvglCZ;@WM;xa17u`B^oV+&bG5c1RLQeUZ74wQYlGpmz<#KFy5W{arJH+D%Q4d^_ z^WgC@;t_Jaikruq3TXwdSF5^7dX-MtO^HpRc7adAdVU7by3bO9S6wgCIXnCt)wRyk zDeYGNjUf@_LKBz0lV2e574V;XNjsO`S(6^C?c+}vGxZs@PtwWx)D0`-(_KkX>zUyD zQ{5@I)QiR>-1( z?`kn7AZ>D;p-LZBeRIL%jBppNl}{Dw@k}EZhBuW-@RX9uzX$9jt>a10U~9_Iv0X%6 zYoC7{6H0cdRmwJh^&xua9xi*eLzm=5=SQtJa3?$tDhE22xROPNSDIeQ+@%l@OhE*W2ueTqCx%Yr!?E5;Ky(@Cx;%Kcz-aijq2*^B~|$Lkm99nyG&-kCv330 z;!0&c#>-jY((kGg>cZC^ZCKt(_&-K%x|vf&GYh&@xHsG*aXSUHPCv}1JyUXv>kA(K z>xa-^AYVuRi+mFK8S)+E5AetEZ}1n;ztCsU4|v}&e=#3$e{sHbzYkWk!ib}F2)lH>rw&tHCJhoeaktXM>Vt|?^g9Og$C+i_m@U%7g5vZAYVuRi+mFK z8S)+E5AetEZ}1n;ztCsU4|v}&e=#3$e{sH>n+5g_onOIdiKr_SS|2A7F|`eyN3YUb zSDVAtG#`-a4dEZ|esv{fvvM9eN(YknT{Sz-iieO3>#9xKD|d19u)S$FyMfNSxIbYM z<4F^?<;*;0?@J6koU2a=x)E*1n*qK5`HmwdD8s~lvMrt zF!?Cwd%ym}HQu%Tuc+lc%fkA7@2GR}rlpc$4@vV!yQLR6-;KRCYyLeg>p1$SRnBnN zF-z)sD5y2n_7b@)boI7iXxhJi6a5?dA@mo>*OC7spG1C!d_GobTIt7y1ovN;7wF3>B`d{7Nb_uPNOM=4LGt&MkcMyPo!&3HTXialSVl za+Uk{g(f<%Gt$3&c{0hE8WtQ8dWWOi9SEHbyzz!fucV~zlr`0 z{Sf*K(qM@j4L@qHiKis-q0jv3cKKP1A%Np?k?|3|l1|JJ(Z z7)wc_>%_V{FX^Io>%vP;EWH$4JbsHOn|u#jvB&$ydoq5aCbV~BFI~`;eoMXc4*4`+ zvejJ6p9pV%RO@ASouHpbe~f+;{Tuor^cTq2k^dr}M1F>R2l)g1G5j0+1@tfU8T13* zH_Ttm2i#wruh;LsYmS0T8QH$eGD3e}Q0Dv8Ew$I4(6`nLVtYCM%vL{UuFB&Knra~W ze!#<*ROL>5cpmIejEWzYXpUtNu{X9wMK@z?L_d(n=A7@;*eobFKj4 z_iglP=)$1ekIoTIkMJ2i7oCZ{?!D6HBCmh^1NwRN$LKfFzo8#Oe}Q}*`7iQGtFYK|kPq!~Dg3!2QMf#*Vs7_g*f+?Dqao$lT%&>B_8FJD@U+<(+(Y ztFh<{dbHuH)=Dx;b-FfMx^9`wY`&#ibx_%lx_dln=P9ZnX|J{ocqzBj_WDNxb~<*X zkadl45xP#;#pl&NGG_>xwAi3i;T*9|-_b2sQ9!lSeajXux=W?J^xi#tT0`c2Y+UoI zlqIvqd!K!iT(}!5c&(` z>&SnRPa;1$23W-5F10NvlcVA89Nj`UjInDub*wtxH%_!96B=;zTNqu)gT zhJFbB1@d*|zsM(%pCR8t{s4ar{|0{n{R@2t{ebrk^B400_ZR1DLVnNQZM2+ul31*5 zt$&y3ZT0%n6!wfLee@I@9(hZb<@#OsB;nNm{zJjP_cH0qA+h9-oPD;ANpb0aICVwrQWTPHIq#qFDWe;3E`#6!;zRCEy>>&!ay^zlr`0 z{Sf*K*;;M`yo*B4#~`UnblvDMW(mT4)&Ldr7>G~pB=qeKr@RI z8gE&hBwIa>&HJh3Lx){1N5!maBzI4?ZTU5Jjz;gkV{=lqk!mytAF|rkLcg0D6s%Wk zp%D+-#@*&y(Y4j`BxB+%-K1OOH9aVZfIkBt1%3y73HS%}^XQM!Z=!!gKZO1Q`8x7n zzi}`^2i}StA6a0SPumrOw>e_>?g5SyX_E)FU z-|{p3o^JKCX*-Qit66!`>?5`6`0%Z=i?37M9!1V+>Cg>JUee&!;MCj&ENwEVYFD}Qln(sW{rlmSJ6%0Bnx}cq zQTl#t^`+M_rwRBz@Mqwo!0&)B0snx09{n-;P4sW*htOXjUq}9nd=mK?@*U(4@W=3P z@E6d(&}YyOc;7I8F&}V$alXs<{`B72Ka3V0msKAr}h2SNuzGcIx~8eD~Oi*{u2W#f%M_dj-f{e zZ)n-kz@bseCb~Vfy*X`iCEZo2Z5A16Lvy5@4C;6I(BB7IC3W)x3HV9yec;c)M}gk~ zUjqIC{XF_(^qc74&<~-%K)#Or7x^UeGvqtSAK;JS-{3Exf1%HyAMn0m{$f7h{^ESU z8>PSgIA5GuE?l(mRIw1d?0Uq-d9kI0N6#VLr{o4*I#5&fc4i8R&EF(Da_0&W_ur(u zY9O0Tm3rvBq4Ej!)ca7Rw)Q-IYGENOxu=Z$4wx&LSIz0A9^{=mQkO+El`JBXC4IAYVuRi+mFK z8S)+E5C8n}|NI;L1@tfU8T13*H_Ttm2i#wr?=R`6J$h0MV>^|rTF-r$tW18-h#rcd zvqR2)DPDPv1XX2atbY|rsxSIyo%$9@;-)<}&*jRXa|BN6AI(2X?&L2yYvqwj4=p;f zV0m;YIdX;bvFE4+;wrv*6aOD?qHJ-%sp|Dfvgp>uND(-j>ex0*Q3`<7&*W51!FP^JeObD&<(R$IeG{dvO|R;q>&I7naZqD(r42 z@tZV!xl72h-0S3-?#t>GKg!8Si}1amr`e=*m2kQJE?;tBvvI7Z$2oFCMdwairZ)v& z4gME=D)>q8ec;c)M}gk~UjqIC{XF_(^qc74&<~-%K)#Or7x^UeGvqtSAK;JS-{3Ex zf1%HyAMn0m{$f7h{^ESqhb$YakFRG0b)#+-MQbw=ww)zDhU#qen}PnuD{I+=xt~pV zD>Yby{WA*^29+5NF^|_hH#=y6%i*(wX~L|mahP_nOgkN)eSXjFav?VM)1k?wQ+b)p zn&7Q>g?b41b@0{Tf5E4Mp9J3r{tSE+_#N;i;2+S>qd!K!iT(}!5c&(`>&SnRPa;1< zzJvS${uurZ{sQ_J`V9I3?;GYX<^%38&UgOS-FrG#Zf4ePkap|XJxG3g#r$@7pv!J) z^_KI^(`UyoPuZVZF3w0;<*Rikh_fflt97G(Phs1FaufDuj+5%^1CMlhvx-m!BcuLH+=L4F3jy0sRYo2K|8d4f7ZC0rwZ@Yd6(3&@Xc?b1>N3B|C2}(|@b! zz%+e6rt#?mmFU}wto*jmaY=jH>GqxDi-f-Qk%ex3lM{W5NKty~RK|Fm@Ofk0Dc{OHTYlfso*EU_klkH9|e8~d&yepRe}F%Re}lh({)Ik+e!%;N`HT60`-}6vzuK-yuGXB{JKERlU$4uok%%}k zr(By2)%b6$dCMF&VC;MEz!M&3;hyLdy%#3Px1+25vloxh?>^zL&gP7hZ(^2*mkKly z?6+Y50{a2r*TGkV{{^24eiD2i_%rZP;CH~6fPX+gkNz0_Ci*w@L+CG%uOt6OK8gGc z`3~|2_+$7t_zUP?=riaCyl zwb{QWGwfm4E*B@g$!v>AM_Afx5q8-@CaLN(9~+rB`;yFtiGTY;*l)r91@;5LuY<1! z{|i19{3Q53@Mqwo!0&)B0sru?pZ{NfjD8dS8~P#i7s%I<|0176eujJp`2+kh{2TlQ z^e^-o^aI{E%wNn0++UpU^@(dy8#)Y_Fr8UfqkLP)?@itkE!8vFa7t7Ke@U`a)Kc1W zuDqggp);iSmiN&%rSKVlroH>OpN0J)?6+Y50{a2r*TGkV{{^24eiD2i_%rZP;CH~6 zfPX+gkNz0_Ci*w@L+CG%uOt6OK8gGc`3~|2_+$7t_zUP?=riaCylq8ec;c)M}gk~UjqIC{XF_(^qc74&<~-%K)#Or7x^Ue zGvqtSAK;JS-{3Exf1%HyAMn0m{$f7h{^ER31y6oEW8MZPr7~M#aFHbQz;kb|b$$&s zpI#to9`}-1UmX4-m74!=zYzOx*w4cL5cXTJe}Vk~@ay2K!T*9!1wRSC5BwSUDDXSr zOTa&%pGSX;eiQv0`XTfe$k&noBA-NlhI|M41N<@k8~g?IFZ3Do1Ku~xU(5&GUz~6M z>Mw)+OXo7RORlFnnspQPiuDb)GcrktP=$}hkr9IZP3#w9{|){@H5%|E)n{M(Pk{wDSdvHyns zEbI?qzXkgj*be}|4!#=vFZfjOli>TnpMj48zXQGm`~&)V^vCEo(Z8V|LVtmL9r-Wv zN#tk9caT58AH%=FUqJstpFuz1eZ&04e8Bz1`L<_em#~s+nMoaEkz50k4EB$)AB+7> z>=$DH4f|QxAHseM_Ajs>0Dc{OHTYlfso*EU_klkH9|e8~d&yepRe}F%Re}lh({)Ik+e!%;N`HT60`-}5UIcQscxK5ZkcW91g<@$|G z+4ZpFWVZ-=aQdT?%kNlXzM$OWz~~#gvZ7*(eozy6oV>WyK`n>aTW^?h=9DJuB(+pp z>V_2SoG|vP*6$6WV>83foPJ1|+DxU1jB>iLpf2|H>R5WY$SF`RqmX1}-Pw?|F@c^| z-SC9|@};ItZ1*0ApY-AGm`O5@eC%-2Cd(&Mm1OmO8wsy%T+E|MgU*JA6C^M0`L;C^ zNmOSd?&n|T0e$MZG4!%`Abqzub;vT8C0{n5qB_!V4L2D?5V_wor?g-2pn4KhAFcT4NLSe3i2Ivs zK*yB6_PnXgqYu9{%^hsFp-ZMeit~|^VaKK|ox_tL$Q*v?vQGMf1Z(j$p}$>Af-SGp ztH>Ij#FX#eQfZSY%)A+u_53-$oLv%cdpK;lG^6<@wZ-=AFv0o0i|TpVuzmyM=pUsz zx1f&9V3vjc8hJz9*bO1qC)bmWb#a$wefJ^S6)izM0+Q_S;$6E;3g@wQ8M|GKf3%a& zZqL{4&876>TbW3znn@m;$*KlMWRWG##q#gt%Bb9VyYs*Aveaa+;Fp_+bLdlk^~KZm zd#LK6C4t5lt4OIzr&-~$T=Hboo~_!o1Jr+8^g=5}f_*-@Onb-XH{@CIp`bq*Rpdpb z@q6Ead~$F~;DHrQmxy2VC7JRy7G!-%z><~X*GToQaf4&}SBXvYiGp~WIO4^3RETt* zA=(~Gvx6FmmP>0AjznNP>>)7xvNC|FC}A!Dod zYzphROokU6viZJUkV*S=@kD2cGTZA#Uafwk%mPe0Ul2N>*s1Xw&&Gw}(dnuGxLvep`zD>R>4!$~W%!nIB9Gm5kL*_Po2vLyA zA+2s|p&6{$(#CJKfB}QS*>g9AJpwmCf-5K?(IAai9w>| zk+>Dr^z-26A_284G{^L3HLt*3$}7>{BOmI)*&~=DGuV?!_pfDRrq3!PmD@FSYURdB z^#0iYVn*)~r;r)fF23&~Qn3PhmMgfK57+XmH-9doFGVyOWHv_8)Ri}*>|U3UcP2BB zY5dG47HQjGmm7uA8Eqab&Ye3?#` zPulFMN8Q9&%m>_GobM`$+fIhMTN$2^j0cnImiVdcWVN6wiyiTSuiZsG~&zuU8Cf3oQ9na6&)ox8HzQHpsM zyF7cd{byo&>Cn0b`xmpicO>JZ=PYE6jlPY2I@3qT-}U8*PMILq32T4IAp9!nn zZNl}5JXt;Os>W@W_oJTfO;JLX0}&9T{n&zKh0hsGRV?f%Z~RrxknPrU(5&GU!1R!Tf-gc zUt5{&dmk0}TYe=?jh_Nf1WK^$T5PqaYfWVg_4*V7iXPGllj%a)=~Eb+yUD?$;u>tz zwoMv&uVd+!aC4R9FmYzl?D;}2?^d#fPDNFsg}jWLiR!IsJeut86^c%WClmI+Yf%M( zGbX5r%<8b-s8KSbKgq4-<#g6zsI4KlGltyTu6d}=;13z_P*L#!R3$rHH^F4M7^bleH=TfR6Leuhn>4%rwha#RFqbkla5=I|c@_TxKs@;Y> z@@%j1%lR^X|K2yuU(5&GU!3ombt{yI_UkfcZUGxt=G~wNjJCd*+E`0Y$eh^jHr7bR zYk99*b@4J=)Ga%gB)9h`vLk$@(3GbIod_$dtb8|v|Q7V}%pkTG;A?=hm zuCr(^qo2j6F6!G9O}9!KmhGLC!pRgwL5#B0%fKHVpM^!t}&E7@C|bI|k*@YI{k@K^dT z4n8|Tgl}E{SS-1K&E_3xS=q(KG$?z|e)_SI!>u6Jzi}`^2i}Q`<@q6gS zW5H}|?Y2sr!_VG5EO-6lhCaGT#ZD)EO&`s2%Mp0!l|k-{rmX#&@}3BLEs`4H{O4Rg zC%-iy{1xS2Uze;DEW|E7o7LRKsq|&0yrDh%VdW=e)w}iWbF>Cq0R&SXF zFY{vX3a^pH7*!{eP82TrLf0Es{Rn)QOr+}t!SWt;;^&${H}&dVk&m5 zcac*iO}9|WeAt&j=3Rf0p*6FJrq0@YqqFQLnO#?MaPHw;dP^=@O6p=3wOsA7Ys*o7 zrulZ{TI2b3^lYY8c}i|1y`S7-@on{6y4_^)DT&N6y7#$x%W0Kd%KJ99#$?HR`fhy4 z;qk_A^z8i}v#hUANx8Ot8LWcC4i%DC$TS@!Hgo#~?j3jGUx2K|8d4f7ZC0rwZ@ zTVP!CQ-XgnvrW#{_~w>sI-*&~G@6!@d8ISWk6C>pBHwS@o^$4WXJkd{^P^3a28*la zcZzh=1N@%?euT7>Ri@Kz7Jbj67LP<~&&bbWZ&8~ie`XF-7e_hgBX@Zi=E||=l=HJ# zH`6e?xWVGZ+nxDC*~7h-}_+$&n{7~^q|wW zj~|eGX}az{i%-%E+h?XI+trhPu@PzCuqOSw$7jT8gOsaSRw{|HyeQkdfF+w=eEc1`x}3saK>tFYK|kPq!~Dg3!2QMf zQWyTC+pA5PAbM^>|KwlF>{9pG;HJeWJ8@fd`A=fU?8tw2GPs$PV_v7lZMwzQm+k-BFe?^n7hy?kkXN-&W+S!?*T=>z?JUvie)*M6e;RjO@L^$Ef%zpZ&|oI(2E zi>Utk6h?TPVRA+tbrn#?u zq0G+O^t~==uNu4EH+og|dwEuN>ez9cI0dG+Xu9u!zYIHg)O}Tz?n9EaN5aAOJuJ9F0S^fg-aKIBEdz8!&ogTH|Og+7CR!25>zi}`^2i}RHV$g<|Q zGG~skpTb0Vr!X3)G%o0Um15$DOf#=?eWQ^HIq#Lux6n1qZM~TODeM&+60o;xlz4iX zAM;vWOI8WU8Uz(zq7jFMcMkS-67}o6-Fr49Qi&ZJ&VSo$>E-w!OAn`hdidJuAM@Rc zsi{eP=JY)kgn3?)x=o>l=t_hp(2 z7h5%(Sz5u&&nQ^MDr9^XIIW<;SWo^Luix~5{xMnH&nr<*l;<^Q+C*hibK8r?Rm)w; zx|gZ*Z}mHq|0HIYd4AnUf46PF-YMWhGL`1@=sLQQgPV7fQH@p9^LeR%c5@ufRkhsn zFz7CA=H8c_xmcUPAH%=FUqJstpFuz1eZ&04e8Bz1`JPXZ=B#SzGE($_fv#;EahVph ze47<#XZ2!V_A*XxsPugL!u{+g(ibbNJo4ckkq=tu-`)9>)NHb9QT+0Pxc9{kE@*0? ztwK4qOE>4!1!BFrT2_Tr;~B5rWsP*Q&3mDupGPZsJ^fItu+0;q@ViCfMfM~5XypQx znLn&z!j?e?6ndTKAlY%#c)N2PU7~ApC3wyF}1i^M#ipQx_*BcKd-O zOCPGv9O`wWURL}K$w@iHd@Mm?QFsS6HS#(%vE@FQtMF~cnIm_IEYEH3k(^!Rr4g|# z6ZWHm|&WJLVSAK;JS-{3Ex zf1%HyAMn0m{$f7h{^ESYd3T7I3~XU$sLY<$y^Dv<%=ym8l-Oem>x=^pzdhTlWP?6AJ@z%+V z`{j%c5BASwrv@uj*=(E7atox0#I06fXJik%oe`SHw1}3xd8+i9G;0jb)=F$3To!%V zGlx0*Q0gudMO=NfYxa%njdLE8<*GW-3SQ5NUaao({f_n2c&g^9W{Xyu?zz+@(V~=! zZV7cU)tY=n6V{ortG_RnJ1L4jdydONRbh z*|Yn=b(JIEj4kKy0oFQ9*+&!8XhzG41iKH&c1d_N7v7|fP3 zWrjPqXEq(7@7T%y7+ zU+Sz)Z=m4@Y`%KRz8m+&0uu1tv1P@XdUAPfdU)v7A|kY*V90VI=R1fulk&D*`9#8R+1I2mi={&cwKjIC z_>lg#BgKA)FVQtzJ0G4s$l?7ZUzs{RJ4ct!Dl#;SJV_O#nu`{`yGYBTb~{`xT}M4j zW)&M4`uxi$k)I*oLH+=L4F3jy0sRYo2K|8d4f7ZC0rwZ@>wCUKGKNPX4#*yWU;72o_TXF zJ-K4-i;~?e)!Znlaz=R;LH>(;68RbO9pn%2$MA3P7tp`ZXV4FL-!OkMA8>zhzET$) zZ$DYJmFY;TR!h(pW-_whJ}`6>VWttw`_9(q{MWbUUL2u->gM=*DlW{#`;&@imtAvJA(I)OYTr$#ua5=Mx?J`#h*lTJDE&k zV5Zfwn^;oXA5!&CCfH5G%^2j<+R<86<2 z=O1dPInTEy9Guccb&Horz2AG4AYVuRi+mFK8S)+E5AetEZ}1n;ztCsU4|v}&e=#3$ ze{sI*BSDp#c8bjFjszYRea?6H(&}x`rbH0K?gPjBdw!D> zTq0rK@+$GaL^@!XGjp~=IvqT+WLA^^0QI&{73<-9NYW0e)^1vIgLHopyBd1vE^Vzo z_4!CeEB$&))xsu;lMBh_Z);4xP5ZyTQtaK=OzV?pPZOGapStbliuy9IgAA>koV=&@ zIZ3nb+?O^IOVJOZzd*i@{1^Eo@-yT+$RFU3;osmdpnsvypdawQVg6!1;Qr!#*R5|I zF^<{9ybyVQ?n8_KJ7#LL;Qo;b!hOvC)n@vR6x=Z~iSNmyJHm#7(@b7*zPp&{{H*N; zt-rAE?9b>1%BPbTZ4{P7=7#ur1Uy2==^t2+6s0doP z^lD_2At70JZnr1I#faO<@OcnXOt(}Ia+b6LP@`-DlOkK{ja&oG6?5}JH=X*mvp&vqj zfqWhLFY-y`XUKPuKfoWuzrkNX|3aTZKj3}C{Kb60{l)nzyqDKM+&hK&PwM70_bEk0 zeOu*?)}l+)aiLkhYF0PBa(Lv*{QYG#?uB$=$>4gzy=-#(yRsmvll61!U2Q-5N#jSy z_7qq0`pbjG%^nH#X!*#1ae@O?*)K46qAiZh=CV}3niEZIe0|MBR$rp8*M@xi(3nm` zM`rO$6m}5TI{!jH@iJ-5t-=k;DTY2@&j!lku;!ikbgux+D3HsM|o z=6L*OIOjiwH`e6JZYGZ1yLTEE#F10Ky#&+y14-xM+Qe0bcj)D+Bps@AoBrV&U2*I1 zK7#%j{U-W1^h4+`kgp^EMLvoA4EYZ72l!+7H~0(aU+6RF2fS~XznBlWzc^nTbM=v# zp|hFPV(Uf5pTASznhS@d&T_Mc%}Y;Ii?x!;zt(A;pdB>7xMsKENL!*yC${mg$rHjNDW`KW*K zyg+9)|98!Qw*`4GD!nyP_Xhb>q4x8Pfe-Col#pU|ej#aEouJQc7)3Po%I`-xpCGb_ zuIWUcK1Y{)5qa|<%bObCZvEq;v5BCcM}Lfd6a5?dA@mo>*OC7spG1C!d_GoNwrzropZgGZ=?!AHI1u@G#Fa;*CnR+v)C08C&1%$s~0z zZzNQRJ|JsGUhMs8oI-{RoSb|Q+^4c$Q(h1Ly+x+~wyiiP>_bD#3sv>1%gD{qK3}zs zK16?M%+-sNVu<&4@k3$z3ut%vv#-S+r>WIr;iZqI!>MxamFgE~42kLUxjgn!XNY~i z#e&u9mx;wM>3R3%k5PkHk&X1N9XUS#g?mqiCcV4%UEX4?z5n&yepRe}F%Re}lh({)Ik+e!%;N`HT60`-}5k+|$KHH!fg)vezB7YUeVu z`7)x!8Z}t&>Q`>x-(*s?DcSN${}q$pK0h2Z?p4r{^?v$cxd9}h!Pmz}Cy_QdF}|zv zQ>a;~#s0|-exyH1^{iF19Z{NLX1m)clN?cS)=~Gfr)obu?y);B(Ra+ZqK6mP5p_Ac z=l(Vq$i3suwktI~i1@Bu+j*{^qH;!RUO8c&Bu#jC@7pO(r2NXxU#3nwDEJca59sI7 zAEVzy|Au}D{RQ%MPt=^koZb+dCLLQkT@}4TsfHb&ul>36r2fDs zzJaZe=&vo2af6=<=-Ou|cxHO#QmxBxI%Ac~C|_&Cjb&MWq|2``_gUOQ@@)CbqxS{1 z$%_rA?e>|wlO$`SMv>56muJn7D{ zH1NcriHd0~l^M9@<+Ck^u0MbKz@rs;ME0ZW23;Y4Qn!)!yurFfWXZxc_bj>Blgh+! zIazTZ5}|74tPy&W8WzUn4t?HAqeOm0U7UN898C{tD-gCJoUhV*Umg^p;G@9rfG+|6 zfPNnRG5SsPZ|H~6Um#yc{)>DP`5E#ZWCI>YZ zms%}gZZ3W2_-=|i^B}Qy_n7xwW;FU(im&%OqIB<|!?e`{q~z1xk=LFdNOyLu>9;AD z$#=J_2c#05$?aHU`QR^iD3{lp^DFbt5W}V96pJk+th(BL z*#SEul5~H)vZgip6_Dva-`3_I-v|B-d=&T{@Fn0M(9fekM!$*v4gC=M3*_tn^56gE zlgQ7I?;wAGKZbvUzkvRQK7)S1`-b_8`GEV2^ED5eXg}7Y!uW9ZU%#|}Br9cRlq&On zA$NM)4XR}u$v*xe%dpp{h{vMAXEgNydHP@A%<1+I=;M~{cRga4kj*9MzismLC&2~t zRFlH3sr=4>qBTXq^kVI&kURaJ)VtzfuvVBQ2{5UT&mMK7A>Gve>LNvYw(es6FEy`! z{3Q53@Mqwo!0&)B0snx09{n-;P4sW*htOXjUq}9nd=mK?@*U(4@W=3P@E6d(&}YyO zc;7I8F&}V$alT6pvoiCqC^BKi8jB8jD6wlf_s`!>*o!2LQtiGF*OC7spG1C!eCJ>O@IQYH{|0{n{R@2t{ebrk^B400_ZR1@xclMx`^Br7!L>=E z=_1N(;u+z~%S<9^@?Y(JN{bWdp7N}IL(2z*s~}f9dy5}+@QMzqbG4-}GyYZxudyVo z$lZaejQup`%UD2VzauGH@lMxm)Rj~Fd&&$iIY6T{gKv099iq95mOb}ZU4#jlRA_0?)u(9=l)uFJ%l^a5 z>6N*1{40Yu67bdFf5E4Mp9J3r{tSE+_#N;i;2+S>qd!K!iT(}!5c&(`>&SnRPa;1< zzJvS${uurZ{sQ_J`V9I3?;GYX<^%38&R6qeNsk7lM3BC{f8TcshJK#&eKcJsSe~f+;{Tuor z^cTq2k^dr}M1F>R2l)g1G5j0+1@tfU8T13*H_Ttm2i#wr?_AH*V|BrV8JPEWOh#}L zlQ%2Bvrkl!hJBQ2%{UcEavBl?^zR1Ks@A~0#+^=dNPrW`#$6(%HZs}!rNc|7A}c1JmkH})XGegOD&@YUdd!KZ?s1m6e#415&$oqv4E|M&;=^XQM! zZ=!!gKZO1Q`8x7nzi}`^2i}Q6hd~v|gk&985 z`~EY4^SyGJXJaC+E-{3E;bKew?s&RlU1OYyn=3WdUm%WHMdiUS{1@;5LuY<1!|ND?%W$bt9Yehc<5upa<^9eg$TU+}5mC&BlDKLZ~Heg}LB_y_d!=#SBFqJKj_ zg#H5gI`UuSlgQ7I?;wAGKZbvUzkvRQK7)S1`-b_8`GEV2^Hn5l?d3pKCV4)_xA59sI7AEVzy|Au}D{RQ%MAYVuRi+mFK8S)+E5AetEZ}1n;ztCsU4|v}& ze=#3$e{sIOf!gaw2gDh57sa)I9`mx{x?j|%@y%ehPsTBlstq*yXx|doiXS8;+igg` zpNGNz8}_rXKZN}j>|bC%0Q@@mYVg0{Q^8My?*o4ZJ_`H}_!96B=;zTNqu)gThJFbB z1@d*|zsM(%pCR8t{s4ar{|0{n{R@2t{ebrk^B400_ZR1DBU-KyjUvnGf))1B6;QN|lzYzOx*w4cL5cXTJe}Vk~@ay2K!T*9!1wRSC z5BwSUDDXSrOTa&%pGSX;eiQv0`XTfe$k&noBA-NlhI|M41N<@k8~g?IFZ3Do1Ku~x zU(5&GU!3m+=P<|Q_lp=uZCn01lP=KdRdlM3BC{f8TcshJK#&eKcJsSe~f+;{Tuor^cTq2k^dr}M1F>R z2l)g1G5j0+1@tfU8T13*H_Ttm2i#wrZ`+26IVQ&mBW$GD_h6$m`zqC+`EB`yVm}u9 zo7gYJ{u}nQus?+T7VKYOKLGqX_-gRK;8Vd*g6{)=20jY>4)_xA59sI7AEVzy|Au}D z{RQ%M>=$DH4f|QxAHseM_Ajs>0Dc{OHTYlfso*EU_klkH9|e8~d&yepRe}F%Re}lh({)Ik+e!%;N`HT60`-}69o8rD?dXFfx zY)xUM+nEi_rTXE_C%4AP)B4~T_Af6p)*NwTn*U#_^4BKw)#`U-dyU4CBVS*UoUB8t z7JcP(eJPI&|H3j-u-Z#pve`;%Z%6YO z^|Upzo4@xh(a-rl_E+OA+5E85dBw3d8oGDKGOzljtYvk{lWC?3tn+!dIj4_|($e*H zTQf>TSi9Y-W<|3;QL9I*_Q<>wX4!_77ev-;vE~VD7s-^VGq}GvUyauCQ){C)GOC{w z-zsOmr^^N_4^^G$q(5iPSk2!vNUA=|mb^IQL)*U!%RCbgCsMNxG&;E^&?RlDj$PUH zbohf}rfPF3x%*{S$%T|OYJFkGBPD4m=7oS)tjFpmIOC5>MLM?4fV#$6`MU?5D3epX<7JhMV~_H%Vx-;Rkv-JLIVa`<_mV z_~_MonWd&&!S0Qs3hdR&=nbxi=dke(@n=Ojd($qChnF0rAL--yGmTqa7qUBc?E0t~ z%;8Fg4G&s>S7&&#qUTyU>9Uv)xW71Go*jN$CdqGNwr}Nd3(QiM_iW8=LC$|Wqu(t1 z*<0O14J}$5nS>nr{`0f-7h*bSZYBAf6;?$mJ&QJJeQKgp|2}!8Gp&?1a$>1~{gK&U$i*;UAuo{|B=PE3;TrLK zB>9Z6t5Zn>360$1==8&%Uf;9yY5d}R(#tjVoY!~+iCJI2@A&&sa^uy^q!qVP>16Mi zlT~TeMDS#WS>|{jX;S8I?LXQ>&wOdvccr?A81EPvT-I7hzI~{ZQa+GFZJl292R<#M z_96K_9{y$2;>v{SP(U&HR&!D3`ucLR+y7MXQT|fmJ>gutt{{)N$Sz?Tic9~^U(5&G zUz~6L$R^K^vc}BNwf~m)Owng~&B~=eq(#z8ahnz58%C*Us$m|RIZQ8Qso4KI&`h;A z2W%Id|AmC@SLxP?>?YzC#f5|EI?S;vJgfQM8n7Mno7MX5R2k--kB`9G$&BRD`H8MV zN-UE*kaRkTo7p?~_PE2Ee$trQG+cafKD%^a-#4YdO3X+=O+0Cz#=IdPFB@+krb&}c zl~QWC+3$jBtJ37}Qmd9D4jGg3$)6o@t9s44Xq)Hc_9a>AWY^y)#nSvTsvfcK(OdnS zctkRSE}t3~`%YH- zt9Eu??)xbo2Cgts=U+Gx)T;p4)KR9Q=7{`GsjMo5F4)ngU$S&XIkn%8eC7P2Sq z98~;CCTMAuF}ru&WAei}fv4QNj|?gL+z)!4M|t#%9-e(sNf(_yly&IAbJEQ#>AF|v z0Udq!sr}A_r{s3wj7g@M=jpac{X4o(ID3lWTf}7z8tF)_8h1_bJu=>1@7T8O7yZwm zpud-oi?K4(?DrG-L_25j)})oSk|})0ErL6$X=l{^NZr@Z=pWhgv*m)%i642%_n%xQ zy%lt-BJ6!K{qWOq+Uy!i&iOeAy=lqfeAnYHe`{6_g?_;MhWU&6fcuN{ecpXVqVuXK z^I+rD`~)RoW|tL>K5M4IUOi9{ozO1I_MWM(w4V4)3-i^R6VrbX8Ht2 zbX4YQ^~nb6D%L-1c0?YXk-K4dolgv{nX2!z{b)Kl7nULV|KFbxO%qGqbC8eKlV$?6 zUI{WA{lzZqkN-{!Y@Q6?vKS{^tL)n9A1W}9r+Dp|cdM6#p5k3NBdCkIe(${!Fsqc( zpZ5w6cG%DbU%fAH^+_ewk4ERcUUQiqY|(0p_rFCXPc9!e zpA=7+mLxg;E9Yq7+gNw^j7(ZwL^e+0{6{ZbaOid^=Ob*ojLG#>xn>?^cMIWQ;Owdw6uvC3#Ucdg)znzlYUi zt3+u~(fuBx@ZnxlSrUg^)^+%Ahd~87%oTLw;%E!89rs=S>+5U!aS42Izm=D< zD_0r}S+3GyWEM!g$&39(7Qc*p5$mGJhMX>wd)g+?j^Ar|xB6Z)J+eH#df7)F);wla zxPkj1b+TA4CHOg)u5{kpaB|`ixv^YsUB8|W?Hsk!)tg^JZP)q6oarm&aOWCQG^YEB~-yL^tOxS&`dEs&ypoN54z5H{DLR=$+7FwfDP=sPbo$ zyr1mKieuqqCtumt?FCKLbyoP*I{SL^@N35;PsdVn({a7j(qB)g^7p;l{uAf)p!3oT zwvI@$CA-I;s457uTe)YBo>C~IXOsl1s`C7)v0_u;(cWy1ZZ4Ngvz^S`I2_S;^P~>b znDl*Kr}h#yB5vaL*A^varlY&80XQ@?Ww%=jPic)Tdb+D^C z-abr54O`!A{(PU_Qt&cc`S2`p=PLVN8XCp<5!>UkSfvY8zC4P1^}a0fLNG;4yg8A) zH%Po7Di}x)CPZ#uHWo~N&Dl6-)(1DDxlWmkdZ$rU&E*CkTdvSQmPU!b9R3{s0{R#F z4Eh1@8|E+O1MV-*SL*4Y2G6;LjLpUk2c~WiWsZhRS|63+^qIbQJ4W8sQH_)ISd~^A zIjSx=`LJFdHTiVA^-yag6|J9pub;1q&U+M8>L~P_tT+?lNphrFi=W+%ww(V*9n(4c z>q@97qiNvo_v~9bsf-D+&^mFA8cpx+lytsDURl3iHnR2xCC(4R--$F*^G#J>pL71R zVZ1oDT;lRaI^#nB(ZjBvNsM6E=#O|`YLv$p#Jxp=4T{?MP+O6YwS1|4Kx^7$=9I_z zd#Mxm=`(-5wi%~V=pn<<5&yHdX=6LtB^>TR-{vG<*z!Ay^1j|gbeMe7)%(NubpJsz z=VQ2yuJAoVs{5b(A5Z7u&h_`daSdrmN{LVk5h4vLa$bqFREmaZ4-G1-vWkk56|%Bt z_KXtF3mHj8R+6k_L}aB>e(&r0{_f8o&~ z1-obliLZHVp>O|;!oR^^K>tFYK|kPq!~Dg3!2QMfHrw!>H8Id-Ql(Z3>FwZU>LNT1 zZgJk5pbKLX+cbJ;pQ6^c-40*K+anKmU;I~0@0#8_Vv_!Wo*Cs;meCy`(-&`c`yM$! zOx_mdJzgcp-kQg=Yr#c1cFXBI{{l9a5mz3ii2agj^z%cBBRt;ksang=DSsW>sZe2S zkV3>%mX|pv(W6~SA1_x6<#Vf~Nzor{OVwW!=cYl!UxA#hrH8G2n=+=d`UUiI{L(dS zgY4_s#YwWv$-Mk|Pu3?Au9>a3+(c^Vr@tznW6Bcg;2ptN9yjtxTEQ}h1?mMg|AXIo*&d-pqaTE0Mf0xLDj=fQf51O3LSwVkL$>;;tp{WbeqG zm#ginB#K{dF3G-{Om3#-h?N!8Q?YcjyG@E2sbJych#$c2Z8|^b&@6No}ByyVQ ztBx4Xf2ly%b2|!($?ESWS}vlGXl@bVsBeIMJ3tY`1K2Yd;cB z*VMe&vEo=9^*ui2&B-#B4)QBa+a2+mL~T#@bJ}o&fIq+=!@t2_K>tFYK|kPq!~Dg3 z!2QMfE-OiJbq(3Y^u&yZPR&@)ei}DjZgxtT%~M(SC4_# zF(AVEorgO2?kKg#^fMEx_M9h$t`EHGV-?7muY7B)Dtesh_QhY%2}xVgRZbZj`v2t6 z_*ub!6k`I(^_2r2u1)@=wfX(vfR_-<-*cw^`;!SOQanHBLBAk#-{HXriSkBz;?P{L zm(s07&f=JKZ&w{zz%EI&zBYxGJm2x$EVY}s*%+$-O&BHu1uZKCecCwR%N3gQST2Kf z>v-7sDc2M59q##y{f{$>*UcrHHSgR5Jg@J4vA^Y8LL0ld2m>m+w0^^;JVX$u+dgUoDeLAE!jD zGUVt0E~kJg^Jp~n%8`p3%E zFQdq=4`0o~J^oRDlqmFuE$zpE!1>_wH19LR@}}MRI3vvxu3f!A9%u#wZL^X*mN$(sXug_3F6bmpYciK!Rg)5oo$y!cZCkt6=-D6zp9DVx-vNJsKZbvU zzkvRQK7)S1`-b_8`GEV2^WAP*l>1RagGnFUKYsDmI`+_Amx?^j_rP_Gnr2Rv{h(I^ z#8a9*TB(ghtn{1FDU3yQ*ebE_66`qpxu8*6i8(K9PYt2ew|iMSot7_+9fSn}o<8_fqe)q-Sf) z6QnZ}iL#wf`Im`2%IlSC5+z@Lu1`|pMS$A7^m z!Oy^Vz#rg`;osmdpnsvypdawQVg6!1;Qr!#t$wEl8d_;Fk&lW?_U=()D<0%V+8In@ z4>CEt35C7nU_{|fu6JCFY2r8bx`rH+GU7MLExegccFXjh;8tSxnilfq_gWL)pZ0nt zf4!(DZ~Noxhw=%}uA8l^x?_mjLY+|UGGC%K^z(bc2EGIS0Dlbs27dwl3w;LtfcFja7xMx47w5Zr8vn#A5fx?z^M(jBbD83Evfnnj zl#&PazWhcnIK2GR)bvVkK9gtQDoSjm`Ph5}(L6r)w6P3Y<@5 zsn(4@a%%I7sIO~@vv_a?ovZotUOG*rddZ6~@3#!1>P4TOY#Z*8_nBnad)$^Xx*`R6 zV;MyBi_MuI3tc&SWaq~FZxU$tAE#433O7jpa0DNJrz1W4Wx-vg7uWvFFTmHqf59ig z&%k%UAK;JS-{3Exf1%HyAMn0m{$f7h{^ERvn%yTqwwqqsV=_Qb+764Plr3eA ztX*UMD(lIYpTX_(UcRR(vM(*WwaTgd?vQVZ+^^|X4TWoJ>mJcjpK!PR%p+RtGmz`| ztDRI12wa>QTTFY(w{0GIo)o1PfZ5c}v?M0e;~G!#j=5%aV<(S>il z$l32EDOc@c=pS_(dTK`mxe*+_dez2Q(zNu83zdKNUp|EV0(>3(7km=@415Rt0sa{N z4gLc97y1nP0q+~;FXjX8FV1(O_gQLxeR@HDZ5ZKE zD-@f*C7qO81teQL=aFhNY1*t)MPht=elSj9^w}fH>sGf4=)ykpqheFO(1ZRj9%sG} zr^f$=`d%9+6B&csIsBs|WJ$o$b-kUVL}}Wctn_7^d@d-It&3?Pd1+gE%Qc#b))tSD zz_Tw%!-%%1*wG-$ZS?g)3g@#`zA=`keq0SFn)kmd3a30I_eAS8x83!iTNpj*jbl&A zE5=g9T_Kk0)>?1M-=Fbc{)T)A`33ko_%HY*_!;;P_yhbg{2TlQ^e^-o^aI{E%wNn0 z++UoplkNL!^~U;4%$mHJ@wXf4)yM8@maoktb5EqTT11tQ3IEf3cueC-uhzZZ2YIDr z;q$8}hM%|5n7SwaWe3Yh)OV9OS0zdAD_p(Xeo}7;^KXW*H zNLS={*OO z*h3qmKAk@?pP{dB`|R5pOhzqJ@&(KsNQB_&lQ|i;sgOW{PT$+J^iR-}j*8g3G_~>+ z^LO(-vQ=EKd-~)fEpP3Mysdc2lrzy0ys z)Q%=BPlBI;?|?tRAH%=FUqJst zpFuz1eZ&04e8Bz1`4(%2ujjeHnRy;2@}~RJN|rz1ZCZ=+9rCABDy8hOIX#`P(r#I# z%GnQz+212^f+n34NpR1ypz5}vBZXzQ)GebiB{D6J7(Ke}6gQ?pbR50krQA797Tg-t zKk|`t`<$N6nKnO}W__LN^rkbF{t#O|`u1f!^*-I_D!AH(u&(ON zIs|SwB5F(5DvJ#cWcbsC3V~eBrp`p~!>^RnhfQelhu;@33M~CEpGSU-d=vQ_@*(6G z;OpSO;FI8I;5*oCqRc5N zYjVV?`__~h9^})ylJ11r-t^OPX1l6YI<-Eq+RM8}cFK7vSsQzu=SLXW%>F5AetEZ}1n;ztCsU4|v}&e=#3$e{sGe z&y&9g%5%Oqe{$6g)o31Oy44l4EkV=S5Sd5kEn=JK_^x}#6Wd#8inFcJCihArVkx(M zv+rkeaq9(#!W)_NVSPFOjMjC;FuQM(>T5?j;|;4{anhH(jhinwrXNHK{L+(``lV52 zmDfUh%(F@V${)Y_8-h6BZ_Rzz_UIwi{ww-Hpgxcoj|xnV+nz!c+$3HPJdCGVCOXeA z>ZMY;8R@=fG#$cK<$fUkr9f=_~8D_EWV(gYt|B&on$D__4#UYGV}4G#O!y{shX zcMgK+)Ja$E((mgM^mouNLH_~yJo01Yo5^xpRiFr_xtp5A=09BkRnYmpn zl4P`;Ffg*JrE8);T+^yIqp_D>=}s5cq>K3TXRp@vB-{mjsa<30q&`>QtmIP^ZS$|W zzFKz|z3hi>&M3_qGxC^Q#4Js=OkGXcxJWIs69DWR`c3o&Q01pbyIxi<44rxs@UMtDi4bO z8TwJ^@1S3T{sZ!Pc8p~;rveS@Z8KZWbwzt_D^K)QGu@|$87Yj(GUDN1KvtUsj}vlPQL9^Nl{tr zD|>Xj@l zCPv})i-*k#^vEW~=e!DT;8R@=fG#$cK<$fUkr9f=_~< zf$xAnz#qfE!CyfCLZ3lD;C;jV#eBg1#rgU^p7}26%}gf4x>D5s(hR0)rm;zB$P9Kc zoM-D^Zy%!b;dDW=NfOm;S6vc2EGIS0Dlbs27dwl3w;LtfcFja7xMx4 z7w7w|!>J*|WG%C9?CC*~Q*x|9L+{NTDRUa6JYcVT{T}g%e0g`~_lwkFmF11WKR4*R zWbN;YI#X_`~>-a|)|%;uA6PWw|`z0VQz2kUJ{-|3Oh>WV#gv(C_6S?a!x zXN>7W>vh*(-7zER|DvCY{v`T+=%1kr@t}w&@MG)#(f$W^VaeDZ%ZzW`qc{{^1}KLg(Ze}F%Re}lh({)Ik+e!%;N`HT60`-}7CePa?Gttrf0 z_jxYbew&M#z2o6q?GuaG&9fbB<-@~>OZlAp3j?CaP($nY{VGl*aAUHzv{fFF^V8Hn zxhsM0n5Xf0g46q274_Hh(xy+8Z{M}nXt82)_|nR285NZr9sa-mI{MY<|DvCY{v`T+ z=%1k z*C^-HG)hn79+8r`?9MtBUn1Yv*^y>jLL8+It=sLinrIciGq-!h`Tey2-w!~49sO$b zf6-4xe-iyZ^v}?bLVpMS67(OC&m%uZzKQ$|`4I98@OAKC@JaA9@E!06_+$7t_zUP? z=riaCyl_ZWp3^Nj{avx!qOuFJU(k(HbP8&R6aL%3z*!ab z|BHSq`jhDQp?`*c6#6^pm!SWEd>;8R@=fG#$cK<$fUkr9f=_~zo^Sex3e0$xo?YGj-~JHxTd;qD{Q&gW(XU4T7yVT9C(-Xi{|x;o^mouN zLH_~yJo01Yo5oS*gWAlUQRDt=vxIZ&WA1D*fy7qom1dO|jX>NsN$xP-++7 z0H^o!zx^!i4`IIr`xn>`Kz|+mYV?26Pep$c{XX>1(2qiY2mKQCACS)@KSsWZ{0;dK z@(b{F@L%vr@H6lo@CW!~_&4|q=wIkF=m)%Sn7^10xW72x7qcuX+W6KoMX@^E5l6oh zl{{%F{Tj~iwz%ZJSUl85M4GmEg^3i9+dc0dYjXCsu>XerEbI?qzXkgj*bhK|9sO$b zf6-4xe-iyZ^v}?bLVpMS67(OC&m%uZzKQ$|`4I98@OAKC@JaA9@E!06_+$7t_zUP? z=riaCyl3mz=X@9hTQtWSHzYzOx*w4cL5cXTJe}Vk~^w-g^M*kQ6RP-m&??e9# z{V4Qz&@Vy%0r@=gW8|C2-;fU>zW`qc{{^1}KLg(Ze}F%Re}lh({)Ik+e!%;N`HT60 z`-}5kc)8he{}W;6`(^v}S7P`WzjKrM!h{6=+mFTmCiV-l|Azf6>g$qIr!VUx z_Xp%vx2pf4dp1u}5BBF_Gb@tzeDrAK?3G^6c>TVRwsnZinfUsOGBrv&_dPG|X z4A}mrl5;aYold0m+3rd9mbLaY>aNeVn0cJsV}t+hAI&_hcMy5H_0uGFqD}woob4~j zijsF9PW*@`cydd~y34ai=UHW!2*@zv z&mTD5;_Tt-p47Ol=pMr9A-*myooq&?CR>Z^|GrDK`HnxO)$i$Ixd@RyqfoM>Bvh+) zjz2xscdtBs>@GR?r7?Kuc`Cj3F12dP#YYt9`?o&fm(GzW^{8vb4-aJ|TbMbe&V;xj(<&hMF;#`m46YF7M^ zrH>~$)!}$K*CL&i#60Dl_390wGdE2sDl4FW)GEhxd}cCU{j2=M*9x*ux9|Ns7|6?T z-V67UxnuOA;|1;?x(nEtF>$NKLQ|QFOrhmnN-G$#T(jTeHtX2lmZ+C|W=?0R`^RikLX+0md&Va^oy7w3C>ip2G?`W?)5$6;TubK@1tL=MCQYw~B0T zm~Jnq`;JuJS(Ya5JwP6qDhSu>{iU<{x1{@}{-HvvUI?GK`kVe;E4HfjN(+6+?Nhwl zp_aIZiQb$3u$U@6@}GXR{}b8%rNw013od4%{pLkOa!*M0GdIS!G=)0fT=IE{^058p zp4)iadD!JiA%b}>!t5&fTsHC!OQZHa&~Ojt{Jj((wH~|?K3AIH6a;G~wFG;8r%)y>k4pJ}{9ChTL@vDS_T3FRU(%q68(o#k1R8MX47 zsKsro*_CRg_jDIeVv`^3XS>3Q{Bd8yHwMQk<^%38 z&bO!Z^JZ1f9{t0r7DFKhoaz_)xA$1wCAo zXK{F>fO02271;H8l)Sn_c4u#YMWc$t!;~xs>CpvehcmA5vkTcPj$cfh$vM+)TZ%`_Op@#QX(%Azb`741eWYO6C?JX9De)vR!MIY{XsVA z?3Zq#;SU{tmmYpgZQsc@cqV01$ARj_ZJRU6V%I;LBS)j@q5%H3L(}Yt;N`8KWA=E` z==s~CCstaMTN4%^3sX_PYma~$>;s*BcE@Wlyi0A3inyEtzHarvE%|H>XE#VXbqzi{4N%3>Vy4LoBq;j$igDCXcW9 z{j)y)iex0n6#V#7NY0wSC>mlpebFt>k7t+XQH$H2CIVJPboJ?^U4nAqlq>9Qy_lgt zDPOWL*Qzp@+|)KtH(-=xo#UIJtRD zaf_9X*03}yOX@x+dM;;gT$-}Lc=jMos=2sGbM6A>kIf^`*L|XFmB%fy4n-jr?;GYX z<^%38&i6)Qv&=mEoy>&vYM!j!UucMG)y6i3HnL%#(Vxv?B5a#;Z>CXI2jOY*)l1|1 zMdw}ejj#0eqq!^idkwRmk^mG5Tt(C%#8g}ha>n9V!6 z?VdH{5U1+XMiOUl6P4*@8AE?7>2SG7QQF^b%GG7e`F>0^wdmg&Gju4Ns+)h8n`G-m zLIv$jeDm^2=eZk;+UIb-%QscZJ8)ewNo?uKZ&q<8OIvt?2Ni7S-CyfoUX76>Z@4?l zj7kIOv@iA}btzBiF=yeZr)NxPJ9&8HVVo67`4D2Jp%O@hUg+;Se&-R%2tHntr6s`{ zE+-5sEMi+o>fIVj5t zt@l?`Kedo)D=pgIwPzWtzi#r!XD(A%^%I6K%@PMmS7Y|cT^-!))-!RXMoU}idd;=n zn*&}^3$5hdjhuNktYq!#wcVF+8IM>rm*tS8 zq$N@`?HQ@7zr4t9IFj)GVt+@s#gV?f?HgZ)xe*U}bxF-Lm+9jTmm8mYn~?ZimAk_dD9;2)sjl97-5YmJnS4>kBcYY)sPzQ z==ePcKhb$^D}Uz}aWQ93yEtz>R!O1HpdawQVg6!1;Qr!#e`|i5{yu6KWBnl7L+k1S zHoo+VP#VYK)zu;V^G;>#KS=583URvblF>pO4cCl-+T(I^YkNP7kHo3)a3aE9HEb zq*+nfW%2>~HzhO6()~i*2 z{;81cJu7cIYd_~ZE4lkWht2&-1+{2OfUvM838n;&fT zbM>PY0Y;Br7-myRiPAVpm)E52evsJPv>GBB6&fxlmPt?N9ZS}E&XRuXPgg2ehSS1Z zmqpi#S5cGJQ(@Wr-_wj4FYnEL&%-DNMXl_7_m0~4%I~!O@RX#zUvem;zhzI#;%KfdtZ z%FLYJvG4Bqa%S2LshhpROPQ?ohvZJE&ShhQ_v{msctQDQm6%j%@H01KG{0_35NFiJ zmJMtuUdEoioYCcColP>TzrG$1*I=JrdONyWb2iK8a!BOH=2^_9)sCOa7j)1Aye}>n zmxR)qvR{K2dMUZnJ@EVM0nYdM`n(R0hc05zX{4kE$?&p*MLF`D>Ql&fN$vJUmM^Jo zgP!YColx>`)ea*q`!cfc-^ZOUM*>ODh7>jLxO&dHx%x4j{Y1Z2y_vr&>WJiNMZ3+` zHB|ba-jPejNhHO7#Ink_4%YO3ale9s$bLOZ%72mnO z=uUbj8MIuQDtc@b(;S|qx|SwdY$W6M{H&#*_Fh9_ohGz5%PW% zIX+_%oBsMwcXs`H_LuUH(BK!6>_S6_NqcX-r$Z^5yo`3$P;HS}IY~wlq+>#6UF4Sx z`n{&OK5cFUDU;Qa78`v|4wW?T*M9YsHgAk~{+e7%ycLhd2i3nMqbfEhW!Je;o{PG^ zPh0be;I0*}_C`-h*!@cW3!%wWxhT=Y?AZh2qH;gdK*5c+v^Hd)Kbc7Hq;@UxU>oRM zbzvt*&hM1LAH%=FUqJstpFuz1eZ&04e8Bz1`3_2^2~HDJWxj<^$@uc&C%x(RM?f=h zfLz$psPR63fM{xzCp;?UWn#O{p5^KPqbqfLoOen{vJ<=Ae;Nczv)f;cZoc|hfSqGH zJtS=YRCYx~wr<@}H8~bEy83=Ll=cKGO>->&mku+eR z)M$uYHu3wo>Fl%6=cMEOh4bG7a%tqi+S?({El*NdO?``+CAUx&y2D6gpLlUcKj=%Tr5!=Xme1pER182%0Z0{R#F z4Eh1@8|E+O1MV-*ciT0sTR&H-G1DifUd%qUl$j**&34_n+00(+_L4rOxh$Q&Jnpwc z2T?uVQMJ#kgQ`3(544*QN*#&q!O9M;^n zq3YVu2+4}b3bxIyBJYoB>Bg(((HU|5Mt0j1NM?`UfvUI=5^vVrm|GN2H~v;KJ}>`> zF7u43|IryiyKfY`nHavLW^x^EDR-i2PvS*`$oCJ)t)+pdf)-@ag_YX?;O_Fcvu>%F*jtL__<7B%+YM=ebcfcRukKy0oFQ9*+&!8XhzG41iKH&c1d{=1r-e|q7#W;q@ z72KC!&+@)%nKNhK0CDv>a;Wn`A6aoiF;cv(o2tmQ&EOVDBtGoDy+%H*Bzp4lJ}n=X z%v@01bHt{d-f1a%Ry3l{p3m=4ckEGNY8PfrQ7B2HdDkv+I|x*hYoimnmvm!D9fv_( zGk!#uUocGEusxc5Ji4iZSF)GX4YB3gAfz@N&?yz#AFl1ltydfiQA zyXp3c;pul9a>*Zc&G~8v@@e>dX?q9BH{^on$P1OV1{C}ZdYeNo@6Wio94ajZ#Qz`{7&?b_L)Y{3%7E`%jTyf{#Y}$ z(tAV0e>Dnq&S)lO55>)wS$NaAAz79MXWx`Mu? zII8K}bEK9wT+7}zD>#NoMBV0zDaoWQUvgx8TylwBW7hM3&)<+;K`-aqKMW#&E9ST> zcRnRgj+D6xj(U^A*+jANS_JvLf2W(?YJY0(p6xYU;YHFN4}>W{2qab1cwx3eDV4L> zacrR0?LYntJ_&vXz61UMe+>Tye*ygqeFpu2_YLzG^8xo4=Q~??}}DM}9{>aJ{v+z}2a9!zts`EZOugfO_24X#Q$cNF{G? ze}6=`jYw6V?{{)&BllJN^@Du>(8&3l+(WZd$krnX>Wf9)$qPrF_D4mT)Ub(moq3T( zS1sN9yM0y>xe)$(uCjMBm6*^_*kWKuhBj@Fdg4QghOt6vK}J5c+9~W;yR4Mhne-R; zoa(0Q#oC`vZiuF3ZDQ-HNCMqh@V7Ym{ad24v(a8^dlbFyI5&K~!Z}(Ir6XK4yM{3L zK1KCiO#Y9rga3k0f}er!fIq+=!@t2_K>tFYK|kPq!~Dg3!2QMf4(K?yS@v#VMlHB5 zuJN49$knO5DUco~`8LY;rM$;zMb5T;wN9@oUxG#1+R7+muzK(0&yKkyWWoG%Ig3iD znC`_(mnJ@v!ki_+HZw+PMTFws#@PqAmjbgagH-?C?%JwB7Os<-bD4==M0 z&rS!dJ+{P?^bGBr65;=haNBgeDBoR3uimTP(pSsHu1y|06Wq{A zW52&D&IkyjCMv_uO!hx|I`ooq{iMlkbVKY&dPyx!7Em$1G5n9(^;dq~@qL^INh^qJ zOnXfq%urogw_u3OIW+NLOKm4%eVX$2ooFVyL0wvoJvLNE$8!D4{T@`==;4N_!@)$u z$nDqQhr6`uO^bo(+(@#qYdG_$sS#1u8{k%{ktWJP&pKC2N0E-%R@tga_7wRL@(b{F z@L%vr@H6lo@CW!~_&4|q=wIkF=m)%Sn7^10xW72x4;PrUnX!afbR{*kD_W2dRY{%j z$S5au=Vs35TAM@5>^lc78+~ZywGY4Ze(|s;+W1Y`6EBHK#rc1!m&!?=p5fbj{oS-A zKJH9XJWI~rSe@iC`#E(hkufd0HB5gS>PxNN_l>648P2vY&7-%awRt1yIGv%LaNzLe z>8#+kSpMICJLv;YjVq~bkLjXM`_{OdhLT{B6EQ^_yvSR<{e`hDX|!GbLC$xp2-dT`*%{}J(1~46U#2FARFYa&lDLrKup(1 zByapNN{@?_?$pube9y9FkEhs)kJP!=RPbNoFcoeY$tqkkmCcKNoFC0sNQdH*o<&;s zlcWbWet#=}lYhBiuM2+cB^$hlB<6)R(~7K?Aj_gITI=50{zB<1F)4FjFRGbFC(BPU z+$)z)+*fyP)rj|?*G;-iUTcR??wMaK{)t7<6MZ}{I*s&b=Y`KMCUa6r?9Jo5zV^PQ zx6ZXaP!x-!$TyL{As<410lp6Y3qA>c2EGIS0Dlbs27dwl3w;LtfcFja7xMx47w0RI z&9hjA3NZQ~A5INO;%AoD3ir*;AD}VWE#GoprO`uL=Gk^no)BJ%;FCU_--*fYuC-ng zKOxlN?RK3!OE5`=7?dHX2i1ypjH`nj(C$q#8XGw1eCUPd?a?6PYMShHY6ZsqRq5txW|L5!Azu=SL zXW%>F5AetEZ}1n;ztCsU4|v}&e=#3$e{sG6#s;r9SToF+T8njEyE40Go8R2mXA{Va zB`IolCv51lgxlh`cL!3};PbHs-3~PDY0J@?%QuNe#>A(P182!BuYau>rnwQi!>XTPlA^)-j9ub#&38yO#J>l#f zCr{N)aip4qm-Ym2bt4uROMM({DCc)lTHeoib%p*>z9i6n>=tPk+dY2bVSf$T=M=Ms(isfc%ttHky|J;e)qT; z**$ul>TNYAcN_ouzvjC_@~54fZQFc;oUfG;4m$5lxHp^{b9K2xy=RIi4Yw;1McFk! zU!FH1mrFB8m+kkZUA!w3tXEhO*=p~BuS>(Jhg#{MgY+5oy_E7Ox;2bQi!`kame@kY zJ}RY$DB2NkiKYW5gRcMAe?UHu{22Kr@;Br|$S=Ux!GFOg!Oy^Vz#rg`;osmdpnsvy zpdawQVg6!1;Qr!#SM3+eev~E7tpBSr7F{jI4BM`0$n@f6mL{ER9fA#Lsg88nO-NtN5l-&HjB2Le~ZPUTc1|6X!aiI|Rg^_T2PHEVDV16_enSE5v zYt}LH`1RKDF>xF6Vg1+vZaZi4x%FAB>_tC%RZod~>R%HYdFA8Hx|hevw=EBcSQ!ht zVXGP6_TFPOxo7Fkn*20!DNCwbK*)!pUxNMv@_FRP$TyL{As<410lp6Y3qA>c2EGIS z0Dlbs27dwl3w;LtfcFja7xMx47w0SPo^`xn>ITL;|3ZpXmNYZ}oq^oFv~pZz6v~K7{-Nd>#B3d=mT& zd2w5n>>tJLbL4OadR7`jqMm-eS);~*}=w0Um5w)Mu^jU`JTA>dvG{!G@!<|=w zDuSsk14_BlTV z20yD0q)K%BY6#XdlchE0E{{i_t@?+$i$ls6;A-@1$2mb}11V01c z0e^r$hJS;kOrCY1lARI%rp!7@cGS!|*fZiyXSFxG8!ZT@CJnMu(#kif_*k-a{Je*BPO{2T z?Jg6Vd2Y(af7S*R{WJ8V(BDD71pNo(^T>~pZz6v~K7{-Nd>#B3d=mT&dIc0bWA6wqQr}QAYh8&!Cz<0^yESfiI zSDr{fE!om>(tncu5K%YHI&qM{lFkfztY_^!L4KxAmf?^PcGiVx#VxoueU35A;vowR1p^%#-H2d$8J_M4D+k zrE+PKe-4s&CUd@ruG)5Y+)Cg&$*cOt9_GKm`S2u4Zzti7IP*oO;7CGS5H0^vD_qs-+(^+}RdIo_aZ)+OX4& z-bj5`v1G!XoSt4I_%Zn^wNK(5`88}r(Emk075z!{`_Ml_KMMUF^h?lxKt7NB82Kjh zH{?UeFTmHqf59ig&%k%UAK;JS-{3Exf1%HyAMn0m{$f7h{^EQK)DD$xG+fKX>knMl z9=%GYbljcWxHX;JOQG++{V3z``Jdlfx1S^*#8wNeak@xm@pCVY+SQ`IbKJjI;)4@$ ze&L#NrTr!m`<7=M;9^9h72@uXEzqG|&7-F?79S(w;}yn1)6@z2)#(4CpNjq@`hDo1 zp&y0*4*DhNKOmn+evEt*`5W>fW))(qtrBo>}&er?yU0)){MmqgGwUo0Lm@{$Y(k<&q!UiX3{qlND+6)4%8t^w-g^ zM*kQ6RP-m&??e9#{V4Qz&@Vy%0r@=gW8|C2-;fU>zW`qc{{^1}KLg+Sk3amsKZbvU zzkvRQK7)S1`-b_8`GEV2^KCsMIF$5Jg|Qqf+$8^F7ISEwwwTm*H}d`Cn9o=F<76mX zF`KKqCbg#ANptLN1?xiehK;y$mfwCBi}^+hI|P51^7DnFZd+*8TbzP z1N<@k8~g?IFZ3Do1Ku~xU(5&GU!1S4r_Ko1Sph~^pYKj~m@eJ*^MiX{f+@Axyd%8p zpd~FYx?=TcZZy@HzTUuw^WAyz5l0QhNrmM4)SbDk@guV9$^p}WRpCTRctPNH!z~2+ z7uXL#e;xg5^ncM$MSl|gKJ?Gfk3xS3{Sx#akk2DOM!t#s4fzoA3-ERDU+_urGw>bo z2l!+7H~0(aU+6RF2fS~XznBlWzc}9yL6a9toLj~uiUiBWiA`qLq)kmc$}Pp*uf94( z_u_ZzSNb4uB>M~TXf1tpikZTOG)~K0Dbq{zPapGb;AF%v)gPR1v-->V9n1giw_yJQ z`vK^$qhF2w?|=Q&|L;$t--rGg`cdfbpkIRi1M+#~$H+I4zabw&egVD?{tG?{eg?h+ z{s4ar{|0{n{R@2t{ebrk^B400_ZR1@W)d%H!X?D)3|VO@m@B}lxUOoRsn9}0qa&!k zP&HYkKhrPv?N7Srl|aR5(-yjLZJU7&>m!hQ?(FR&kg{yO^A=>MXh zivA?}edwQ|ABFx7`X%T;AfHEmjC>RM8}cFK7vSsQzu=SLXW%>F5AetEZ}1n;ztCsU z4|v}&e=#3$e{sGCYo2<|3{hod#yf6sy-y=N@^f4a-C}4Vm;L2CdmmGi+&R3REBa}% z%GTaxai2KzTdY zdF02)H<7;~A3}Zsz7GBiJ_&vXz61UMe+>Tye*ygqeFpu2_YLzG^8xo4=X=Iqe zm1#OQIHDlAf*E+fRm~u55>r}mYd}WgC%qW9ZS2j;UaEdK@#LHD)&K3kVLuD|L)dS@ z{sr~}&|gQt8vS4NQ_-J9zYqO0^rO(Q>BW}z(qd_*Kbo2l!+7H~0(aU+6RF2fS~XznBlWzc^pBSHJh) zc{Z2Pd%O2}xBEY`C3oXjaXLhlj+86>yH-W8zlr@q?7v|@3;RRZZ^8Zr_5;vgN52~V zU-VPapG3b8{WJ8V(BDD71pNo(^T>~pZz6v~K7{-Nd>#B3d=mT&dbo2l!+7H~0(aU+6RF2fS~XznBlWzc}9~4^BT0>tDjO*X_CWea0dN`^VUi#r`Jt z3$g!({VePcVZR0Y7uXL#e;xg5^ncM$MSl|gKJ?Gfk3xS3{Sx#akk2DOM!t#s4fzoA z3-ERDU+_urGw>bo2l!+7H~0(aU+6RF2fS~XznBlWzc}Bg{lmQqjjNgRLrP*h*6(5F z1P`U&X8w{{mD(TfXz;Pie!t%7GTEQ3?axZJ8&W4@A(=!N8qNyN3QnL-Ywx9)UHVK{G;KcY;L$;qx4hNJQfG;Z zVQ2V*>y<=dMfmvpUyE3`R{;lB#SN2`Ed~p&*6=gSIDhaVw+ef7)-kaydhJAMt!F4- zPzqi2wS3`n*(uEH3zE;%O!|pK#Oz5)iyDY~p8I^2znpU|Ng4m;oJM?-u5z6#DWD!x zqWi)f$2t4G8Xw}_-jg#QSGI}|yA$1-gn!nB!Q{ucqrGWkSv2D~F zOWAX(aYj8w z?JrAyzooCsb3`=f&!L}>S7k0V^rAniic@@##M9P1>1Yj`r_^uH^mzs`X|(li(lR6L4LInss3D_Z%d^Rhqv|0woP=_lu; z8`59RUCp{~jr-|bJxttI3w>GZI!toh?+K~83bCsV7uYX#>>x7&>{Vwy94B2qbziHG zP0+L;UBAB1g;e!TZ)3iI7wI^4cw&v$Te^07qQTFecjVB+TW@k#asGf~&i5&`h09ilEn-WyZ94s8%WT$0?9`uy;^M4<+A6&= zZ)rxRP3hi5&@|S+t$k>@pFDfy(L0sv;-$niTJ1gkgVlkVbM5mZNU z-x7V!{$W7TO_g++1RBa~pk>L~qp#M|;?*9GrdFGEzvdi!Lc89rHEHxaPo&BkcC_?9 zBze3hG5lIb2<8LsFV6SkmNh<|k~^6zQJ*&@W%98<`CerRUF@ejUN3e^+$PP69sXoi zHEjXA?p;~QA=gQ44uCv-Kyv&{K)7|;v8lN9&?s?umV8fDeD=v|U^j4B=FLyOB@-Drx zszh8+BZa!uX}F#+kD#{v7A-%#A_(UnMfGs`4XW~Cs@Z0%(?oFU1BDrdp7h2VBYU42 zQFN@&M|Fy6IcYlbqBm|+7HQ5f;k0=FkEru->-p{9e@lf(Q)!S@2^A#~*DDlh$tIMH zB0iMJmPE8vnwr`g+C$OxYA>XnNJ=D<(jb+79mn@R{r-Ua_}s_S)$=;f$9Z0__o~TW zwocz^We&FLbl0klItomWUBZEk(AjL?Ew3{(T0fF4Swl`nm*%jiMplK^T~}hye)0G% za8RC^)evvFNPmoC|6)Jj{9?Y%<(cOTGxQjtocf{IFMnug+i8=PrJ}5Up4>ZM9#yuc ziKr@y@v)t$m0BdUocwoDKY^ol7m?iZw`^le2HhGYC-p7df%?vfy&V74ix_;d`1wU9 zl5<Pty_x$VoRXQ`0-U;>?A)qBi=P!Dtk_ADp@h35WY4qM%}U;%$f8R+twT$M z*`NO1GNntnm{-OH-=-%jFy9VYCjYTkXOFo4o~gmh!{C0y{>6U4`Ne#NPP;z#SKrE{ z%S^lRE|G)PJ~+q|cW?@;@chUz)=rL6WGS=nY+QYhq8w+&+?=-x=xu@+CPS zyyx#^$~rG_^HWDGl&(LGtev-L7eA|Q6c3(`gb=Yogi*Urkm{g%6xYv=_Sq~ zrWwv;dG^ycp{)*7?D?7~yOU8gLtXc~o8k%LKKI2^e=P+jHmx}43h!^S$Ers%El`2o zm#Le-L`sT1ab;_wSt$n-p}Af_-9d{rSZ8s5|J+gf$00C7<02Px$^B2#F?CTE`T_SF z_AmAW&M)RWr&8>P(xYw6be=NvgIngYryTdx-S{ZPEVSs->!_4ugWoQGP<81Itz9#? zW{+DwmA6~iXSTJSp1Ur6*0{HsjBX0}k@)r@?ef!oac0MQ)|oJVk)HWvQ)rHMD9g#d zxiC0Wc=-x8%#73a(rr~{d556uK64Ici(2Z-`=M>bFIHsT#D!;M##8Q<@!^g1x|83< zhvprm*VjIlwK(beNfm(_hzVG?Kl}i4Mmca#BJ}9%|WKswNX#0Zn@ft_=Ge% zILEHiAvlAqJ3g=KcVF$~`>EQ1na>N!pOv9E7d5^nHw({>-YI=a)(Z`&E&24DT2zH^ z8(zW9s{cN2ks#Mky(^3s*!=lK6_nJc|J>S5Z^TMo=HPxs&0e=mfBL(Ra7c>MFO%=4 z-*9zg9uNNVPoF_Q;C{pY#eTr~#e83-HqI58*v{njT+SS&3z!0HEw@MFCB!=_!v1R! zB}b;3+9}RxpdHV*v`#+});#4o{vh$+)&qf^*W6f8G*FTVLq7s7MYm)k_X}G;~#1h%J zn;|YoB*3TNmG@%C{b?-rD_w{&*rWNAboe(S7#xuEV}I62zN-?7jYL zT6BGE`E<6P9@Jm4$LnJaUG!}Bx2L?6*naz!6|t$3=y5j>U10B#NWR%EIpafgV$6}l z{pMe4?Y7f6)u#KO{)Ik+e!%^P{fqs8^NacNTWV}_zoE%E4Q%`oDl?h$ys*-!Gh<2N zd7IO_Giqog8zCb(`JT~usok}hja78j)$$`NuN2V%uP?DyFXyn|XLqgq`>CCV$qNS0 z$(LnYiWlz;xztDs;-pd>WrbLYJEOZt{pPX1T~?8Zv3y$jc<08%Sy7~$*T>S%JC~^5 z)Hy=aTFF3*ZRYR630l87%RtugH97Kpx|!{&`&6?bEWO+?idN~rymdC9f{Ip=9kK;U z^qlKMo3q~DQ-^Hui$<`pl5*_nDbYf9`pP?on`)VZIY(Z)_Rr2Itk^Zom+!Xe-jqGeE z?iS`VrY_2+@E6d(&}YyOxZkjUu^(`LG2e3v-K}K_CQNPiQ4g7|GnldFDJdp;^Vv;v zpKlQEculyDrUywGcao2Bg@$#DN+`FPD^umNhJE&3qRTB@m|dUvSl~jv3L|m1vqo95 zmeA~HpRbp7QCk0er{}Xy`r_-XjzxDfXo2BxLxJETvgKiivGQ;=74?cvlE}`bjoW(@ z6V^x4{y#g89@y}j-ejGJ}`l$vSZRsMK0^>a7Sf?=|m<%A7Lpfi_82U71f<;+ZV*-?*t<;hPr~RT^BP zT@pe!mi#A|s+>TRVwe7>^(2wDdGOzk%eSR7#1d_HvX_ZanoN4&lLz!`S%-m`+A0G7 z27dwl3w;Ltfcp*m7yAL{7xQ)S@Yh;@L7ll))O2X*%>tIG-c@KE+fVH5w=3laPGv8c z&N6+?H-q`AsczwIDbDsa?kothmSy*GUcK%WF3E7Me-U=+`$yuh^leV_xmv2W%kAl@ zB_*VtCy1*>^)m4j^l4gc8cg1W>BkDbjv@PdtOA1E^U0%^0VkH`<&z`x78}l>QRKyc zmIB^>MO0?#nYc$uA4o8NB2(f%NG+8(HoyB-O%)am)+tptk+MUkPopB+=rPLE7&Bw? zzgwZV>jNXgYTxl#;5@69#3$aoV|U$++~f2&+Pc@DT=-4|Gv>OILIJ-Do3l4);NjPC zKMK#&WeYs>>qjU3iBjYezIU;7(~7HO2I+QW#+j&|q7vtS{uurZ{sQ_J`V9I3_Z#*v z_5;o@=KD1~swywskg@rxZF#^tht{1Jlsh^3JL0*P13N<>U!nU>O*~7qP-EtmHCBH< zEXnLVwE3XRV#3bSZ=Dew^p?(_uX<&hY%S?I=DWJM+=cLR8J|tOn@g>h3>ny2xsvGc zinB*{)(~N9yD189t4Z!ap#3SUNA#?a|NLtb=G5o8^`9qSil|#xE$_m~?++Y$x?SR) z)OY%6-ROYajV?O6?bJ*0D{o0wPUf0N^<&iY{e$so6%wql9B;>l@J6yb;7jl3F==+o z2SclFzBGD5{^D5LLT|b-C$hNcZWP@d8(m-_9Y-aH)iw$7`cbnL4Mrh)Pw2&EOZWOL zawGPQ=5l%c$;3b6%g1Tye*ygqeFpu2`wjaS`vK<{^IecCVsT)r4ihk2 zc`UIHw4+ete6;z(VM;IMcgpUb|I?uLPshmB7c-Y0SDr3C`ThO3w->stdO*w`#WneyxJUj< zcZ4|{vm+f9LY#MX?$Exw>Z7~E90>o-{_odH&QLp}JGGXQKGdzXBI5c5RRX>P{s4ar z{|0{n{R@2t{eb%o`xpBG=NI$M%Ke>t@cssdo%K}EmUfb7$K#`eCwo-Km%8)+7U&@6 z%F%sS9ybxLT{3-sb86`0p8~%|iyBEnRs>THS8J5Pz7*jKL|rZuD>)qmyJ z`M&gozC&YI-2>XuP*`Wv?oVv;?Z)P3#Syv9nzu#Q*pW38{>^8{!im(C=>m_YI+L4s zo~L+T3MWjck-7g9mw)^Wdm;Ee`m5dLiNb%2*oB=Zvp>!aps$7kg?E@fAuq}j5~fCHkW7otsjmi0h~rwx zp6NDybbdtG=?bw9lDFw?ex+*xN$YGjjLG9*Yr_R4hWR_l9kuuTMZrOIAvJuGtM{6A zU8lOodZK9L_Z>HXRCtlkulVF65`4&?0L84es{W+fXv#EE`AD*CI5lkRt7wwYxM9PL z<{;v__}Y@sNk^&B7w;>74;j*}fm;Gw@13LIli+9IJKzuS$MA3P7tp`ZXV4G0->`qN zA8>v#-{HI5awgH+7@6#^v%(yC7>6n0#yd}_u#fC|cKExBu^v~BcOAMU#`s;jNCTw$ zX-~v}=w`21)GliC@{n5@RLLh-+Tu|KDKR-MA?f>?N=x6rv0k}|Zq5iUpq~wF>QbHp6{!EOw;TnHLmHT(U-?=?L0D)NevEMHhgIiPZM{| zY|;`-Ap=OifBYAG68sE&2mAs482%0Z z0{R#F4Eh208}={u1I{nzJGfdSAT5766E5c6nt5HC}|IcRJ@W-^%jg2-)y^0r+s^DP+j|$bn|H0H+`ZczO9%^Jk63p zC*!Tx3Map(v1N4pQ+^ixv2UQkRk50gq}a6a%_t}JYh9ayyYuPQ4@Wv&%WbOuLqq|#c+@UTyd+eb6fWk+zwoUkX-t8Go zn9TK6qxpH{+H1?MfnSECZO?~0j$O%gWd7H}!JJZ3XP0{@?fM(y8*bJjBhW{8Oz&E` z-`O?HY2B<8HCTjT|bMr<9nGTTJqehcdJ;exz$R`Jd5k{6wm( z8&{qauBDkPfBZPQtDZ=e2=WW?b?{&CN$@l99q-S=ujT>10Ns5$Os#Cz`bHB8cHza&RsK)pn4q@bn~K(D(S0!? zxgBm9ANDoW=&Eo zJR7Yo1voaUaO-F8zCpimp2pzb`AE+`a0;6KxkvU;7TkI<9(8^@Z$| zZAAW~`ty^{&*Y?}cIxl&JL>|;J}Idv#?+Il4n7L}s**`w%`Cfa9pgg_2IfvzyR(Nl zE8g|d++9Ysn5M$1TUuzl*=xt`cMAUHL&z_{*TH|mC&ACacfcRukKy0oFQ9*+&!8W0 zzhVDkKj8dgzHhJHyYcz*?r*K&q6?mRg-JB(g(4It1BP>e^Tei0_uKBNM1E<7=woR!PDaEf9NJ zokF%GZE1eGBc0d<2y86&X30f4s{`AkimCM;Q}cvVZlw8l9QVlnS{m)KYK0x=2U`1c zzqav~$?rG*FMmTmg!}@09sC!368sE&2mAs482%0Z0{R#F4Eh208}={u1I{nzn-+FU zaAU3_lYCUd|Hgl>Xjba0mV0Z;=#-jI=ME-_+2>Av0I`}X6 zB={Nl4)_E7G5j0+1@tfU8T140H|$^R2b^Eb_sGbWk(W-|OxvOc+XW-C?9kR2#ULkn_3e2ASoGnmv!Szn7* zcc4eql2vE2+Eg^aaI@rv!$d0W_U~gJm#Ns^C5Kj=J3|LOpA)$=j^urP(HcXSlf-4y z%rN1i2gJrah|%&s_b)$2zKQ$|`4IApe|-Ia{1aZoMCQ6&*(Cb9cMMPtP1O*#FSl{df+&ao|>fZ%rg={4uho?C59eBfL3&@@I2NnsZ0WrIrV@ zKaA%~)5Klk(&Qo4;O#<3*1m{+W@<*Q#plb0lwTo%_J5PgMeL}`4vwPL4=z#1CV5^3 z?w!;)JC)&iu!HKH2#fh)zm#^yR4Av0I`}X6 zB={Nl4)_E7G5j0+1@tfU8T140H|$^R2b^Eb_r`y3X3l07G2@H9pPX0ykL;=~rB1pD zq~)snL659PI;3S|RNI(ERtp#y?c~ZPD!Uz({es-aEb~L>ZzZOM>VlH|tZTF@HgS>+6J>nOC<7VEnyL4a9i{YHUI8xZ2&vB*6i|i3g zE+|?M{;ywx{sZ!PKZY0v7WVW-# zK4NoelU=UEZAyDBt~To4B@Z4LC;hm6mTpPiD=M6&sw1YV#2h@4x~ z{(W@%OLH_~yJo01Yo5yx1&=}_3z1BC*L}tlw=XHT#k~7EgfsO2TYMi9M zB>$KXxmjYvieJA;mJeTjwy@Bbe09Cu)F%~8-&gkj;r0rpwFTX!x87&df603{gy_LHhUT* zMkW61N1?xiehK;y$mfwCBi}^+hI|P51^7DnFZd+*8TbzP1N<@k8~g?IFZ3Do1MWBM zU+f2*U(7d9ZQ%EjcN$FF>o*R{-xe}y&&+m@S<5hWn#v72?xXZ*#(7l<%|5!P!SB@Z zlCRXw^yIG!nQT&XXyUxun*jQH)u*^Ckp)yaDv)N}I!M#@SH{U3Z70I@$BY-c`;pJ# z$HtcHh11U!zC4+GqA8EIqf6NB1ade{mv7MWEa_WNRY4wKBZ1pKyUb7tpy;2WABFx7 z`X%T;AfHEmjC>RM8}cFK7vSsQzu=SLXW%>F5AetEZ}1n;ztCsU54hj3f3Y8Melg#7 z<~^&Px5zW8bB}+YwOWwbB_h3i!_CR>+`Vhq<$s-=44JD~$i&bYtKW~jIChFS zTc`!ia+!SBHt@q;clRX{W0SmPUhXy8-XrDR@!%RkzYqO0^rO(<64*%y&=oi|qsU>I`S__qkVC5%$aF#6Dr7$S$D08!LL# zNVe(4k+@+`TAggFvUS-cACYccozroQgpJ#IOdGJJ=37Mfof*4Hyw25>2!4J*46>h! zRG+v{%sWF*M#fzsI+q6g6=rCYmGeYbong;YulmX!kA@=z{Z#ZP(eFe54E-qdchE0E z{{i_t@?+$i$ls6;A-@1$2mb}11V8)7cmBs8;E&tFYK|kPr!~Vs7!1={|Rl;KP?l>=Jl=T9pTO3cJI}}aU-c#};jZZSCZfQ>; zB{4Z$PFPxya4Fy1S+n=iy9r3vc29mMVyE8I z9{G9e==br|8Gkg+(0R8nTk?sVr07?p|BHSq`jhDQp?`*c6#6^pm!SWEd>;8R@=fG# z$cK<$fUkr9f=_~zFqt8v9?y z71OVxe#6?bq76rXAi(@2`<5^gQfD>$SJLOkH>C-}?ac*U_&={}=sK^e55pL;no@ zDD-#GFG2qS`8@Jt z8-r#ob0UdS!G|q8?P+h)kK@BxhbWKcn5Wh{BZ~JIcprfNI{MY<|DvCY{v`T+=%1k< zh5iouCFnmOpGSU-d=vQ_@*(6G;OpSO;FI8I;5*tFYK|kPr!~Vs7!1={|^QOF6 z@kUQ{^1DjTk*cqUX=ioT_o$kebj(F%_`2*f~&IDgEEL8N4sV`!~GL!uuh- zZ^8QuybnNs9sO$bf6-4xe-iyZ^v}?bLVpMS67(OC&m%uZzKQ$|`4I98@OAKC@JaA9 z@E!06_+$7t_zUP?=riaC+;7;w*bg|rm~UwV-+t@j^~~r-rB{!CM3BINJ8K2j*^}a* zP99BGdH>#T;(a0Bzu|or-Vfn@3*KMgeE|CF=vSlvi+(Ellj!%Me};Y(`a9^Cp#Ok; z9{DlyP2_LLhmc=@uY>=BPlBI;?|?tRAH%=FUqJstpFuz1e#8F7e!%&~d@ns2v%A5$ zjN!}Q_cN1!3CmUteQybwAb1~(_nUZMi1%-JpN02Bc;ABe7kD3l{yO^A=>MXhivA?} zedwQ|ABFx7`X%T;AfHEmjC>RM8}cFK7vSsQzu=Sq_}TyX4)_E7G5j0+1@tfU8T140 zH|$^R2b^Ebw_sNHwjC!ln82`k&El`QS-d~S`&hi+#QQ?Lf5ZDMydT2*7QDZ}`vCOU z(XU4T7yVT9C(-Xi{|x;o^mouNLH_~yJo01Yo5F~3TW zzM7G}ggxmuvvKp)`Rs_p-Y;@9C&vUs-ja_RTayYod+6HwG1ure zZdRuH^}*76zvzs7QWACh#n{Zf zsYffDm$CAS3%cKn&S8XnXQ@70J)2oA(6DQGbUstnelYmeDH+z-_pF-PTbLW znd}q9$Q|EROoS#ri1@k`kdNJ!d!?Fk>Di;j@8-RGOk_0@M^2Ok5%){sFa1Bo()KZr ztp)j+R79g;Vc^0>I%nvOMqWoF!F)$tB+JFS6&ODq$J3hU=(~2iL+;>KWN1^jqNLAeRXVmMqaWwJnike=Us~e0Oyot2k$MG4pXL!wR*J3>nX4 z&$U-i+>jP#^R-gXSiM}xu34d6JymZWvxo0L>HfIaG@n~bGB==`N_t8fPII2l?mZa2 z=Ze$-T`M(f#r>*pG)JeCoqxNTzROfy?|LSYEQ%YG4xZ&hxVES|%@I9I_m$q9nJQRE z+GM_Y+&WcGD~~2{&esShb$sT0hAl;8`=ju&V|}Ug&+H!y?%AeN*KOj`6O-SG*B_Ad zY}lGZq&M5PJWR-=>WjOLzt7JkURhS#f66{3Cad#4#)z|Y+{~akX*`uG@T}JJ|3hh# zkeZsXVhO?d#e54YHuCDu)@5e>Q1E}v^)nU$;)^=XtyUG-t|miLj1R{iNWJp0I+V9U<3A<-OF2~QNJXs2_wdQzoj32*&h{eL=Lrdqz7CoE-LanYQt4@gO?>yU z~Ot^jE@X-#k7x!(4jJxgYYZ;M+rAdD~|)2i#=%dMD47icSlZa>31C^3qC}9oYy)^x@C9xa^%&~py->VTFH@Ue#l-SHTnIq z`huB>2C|$t&}LnE_G19ZW1|4w0Yb1t&SCB+}yl=h>m?E@72%jd@1{x z=8&q@pXWX!4Zg#wx|tJnP_R{d5kCjB&Y@UDz`c%ImWEB+D%D8^Q)bSq*xE|o3s>we zyqG}8L;llhPW(W;d6vDW+`owAb~0n#jS)Jw+Hl*;$-ghjDN)zNMr-K7Y1wqtwV4<2x4*UCcv&Rfb{Rrp=|da z1AXL-!m1lCru}q0EAi8u$=|1XndE6UN3x9QD19ZoliwYGa9tsJ_&f*WeX3+ydEi3U zgY(zk2PTVH!4<}erv1Ojg%c9CVP6O7hnJPRQiAeGm*3iWKh2-?lF#0ck+nT!UEkeg zk)N+& zv>)o)INVCs#x1uoc-lwaJZ+WP7xIJddQrrk%)X|3ckGD{pWR1`YNY49N$MxK->`qN zA8>v#-jK?DsN<`&Y{}So3#jOHG%I)7_V)M-EjFldB1*rth_Gp!YI< zs};3$(g>wnS_>1uklu;Pldf$Qbam}WVAJHkS7~3N;T*Yfx-v=dWbVTEr2J}V{BHyC!#m%y>r}ek_ww;LWA*l-kz4DBzV zV=p^DQ!^3nrm1?J|MUayH|$^R2b^EbH&xNn{n`61Ot{nW8?t*OnLYdh3-UDoP?rLq ztFF48q;0v;s)nU6=&alE-wglQ6K1XW7*Mp_80C^I37w&E*5O43hAKB4M^QCzIJkYC2wzV@8s{Czn4_cSv7~P`6|g8$oL|hhNmi@J=9(V!IkQ7I-{un; znqRp&<;y79krHeFs)B=^JACG1GG7;&$$nE@J^5}UBq~W=IHi&fBxt9b&#b3~O4IuL z@7B|Rw{~-79qUN-ADdTQlfU;_TfP0DNlGWxe5K6U)KN&hCLG7p63fVmD=ljzZto%f zh6;y(jzqPwnHO9_nMk}cFhdp7bnXs ze|aadvoez0dHVgJ@ugHMroE@<^0r*6=hMH#a{6;(n^Jc*&8&ck%T4$X4%?A%Evu}- z*1L4i`Xrs6{jNmCGR04^a)JnMbGg5E>L|79@|9V8@*^3W;#_>>sVJjh96Xh~PoCi} z9tkL({Ekq}7lY%2qLQrk!c${s{s^!YDS7w%S4y#VH>X!6zn;aMNUe>r$X~?1Tw}XR zRak=!DrSr-3v)@sV1Tm8r3SKMjd+&Df;xKOcLx8)j~TS=^~*!!iUCAw#~yj}smbJI z)@AxeI-Y83u^V!eb0`_QaHVwecc|enpnsvypdWC*VgF)3;QV5~qqSLrzXS~!b-Fxc zbt50Ud31`=x(*&j;#l?bfm6lA%5$KRXLTOY-@NUN>$O|NASANw&3l%1r>5DMn3MA)qiX2T72gM2<&IPP14&3mm?m$nVk=z@c4FZI z<@Ub$Z1Sva_kGlEn_nU)59h$4<_!rh!&o5>@SfmZ_p zKM0@Od|U3P-{=8R1<}m>*F-kKdf|JnYU(t%$33$*g}63IM4Z$wpkbP_qR&Ed$>P>{ zsg!`n^n{@6n!39zO+J#bF0v_|96eJa`=oNRS1Y%2_1m*cn8<)~O&c3kcI(WA_57}X z$%a--gRx8FED*ui?IvG%u#Pys+d<_j;xiXV18`_*STeY6JE;UtnP*O@S9q1WT zsq&!*^47l#k$6NH?x@+O`hm2+-JhOuCweD2Y=7Ro^K^k!o5hx0L7)O#qZ0k8by3sD>SPO&TJ5)bNsnk?Gfhblut#j7Ju-9b>!W zFT|~8qf}4VG-XS&?c=wen5ax+&z}tlPZpJ8!5`p{;osmdpnsvypdWC*VgF)3;QV5~ zwc<{GyKieT=1&)S+5XUEHeBSYA^iO$V&1Q?z}f9|k;t6%rJf_ixM51*rm+d~%ECrg z|G(bJchRLge!6n9b3&(!$Zq>W7Ou+eF2pY(zGeWgy2uKFfMLMFtCbou=Fo7xaUx-CechkxL%}@Y&SGiYB z?TZsR<8n6e{mo>0C_i<^%nP@PX2s4u*Y|`{*^A2Vb?FIIZ?}V>ny?KCymTmW??ecF zdx5{U)6tE#fBa8Aaq@RbtrQ1G@}K9>S^6=rhLa!Bd9pj}Lk}d7_ahIx0$wH%HPcaz zT{Vdmd8!_%?1YM}X40c?c=xOFFj;hCi;bo2l!+7H~0(aU+6RF2i$Mizt|5rznJfQ z{>2MIk~T2JLz^!v;w4q_zj6L=%m8^Q6Pq}@WSG96ryiQ}-w4_IgI6kO`*e1d^AnzszLk=~syfS{Bg#Zz6O;-aV(nZ4vLg^$W@U z)siMkkzS-?SpN7?x$ETFLE>Q}5ke;xFkXA_B+w7N{AIFBawmJrQXP^a;pAM{Lggk$!3NPk&$GL7(-r0&;riJ^Xm1_xYZBgvkUZ$1Ibp3o!1ZtJqwWzbz4AD(P| z%M#KvaGFf&%LTSCPh|V{zZnp=oq4K zmrAfc7uKs;lkIHCx~$OpL+hE|HFefZOb7kiwxjdwx=P~pvUB3v(zmok^t-u>NHOIR z-{^c+G?ljA<1O3ty_}8-%$hYZoKAKYe|&Eu{eXV|dt5ZGB8R-td!{1hl};?O*Ktf; zQAST1e5uGk6;1x8t$b$|97=qm_NL19-lJtdOz!+@xlfa0I$w%%hSRoz@0niv3uvRN z$3lZADWvLWYmjhUG~x8Q^1AC@2=Q$BnYkkNG^zGExuB&mjb0N8{m?kslga(1)q8Zq zYXZIw{tG?{eg?h+{s4ar{|0{n{R@2t{eb%o`xpBG=NI$6w&mP^A-=1a?#-e`g>GWZ z$f3;-+A_77cK+_Hy8Wu`*EyeLGX?7DMz_l~Io0u`>{VW^aefh*Ffa<6wJein*vEg~ z(e#Wcsi>4RoO?Hi{hI zH2wE>3peU?`_{CTQCF#@m>0FTGNaXTA2vAhPX6Ap+i#wO5~k$c86nHzXSRf=ypD^> z2_e}wo6|SR+0tq8>sx&$-$fW#$&}BPJVusEY_2T%1nxU2>Av0I`}X6B={Nl z4)_E7G5j0+1@tfU8T140H|$^R2b^Ebci)B&nIFY>GF9D;8>jU!Y{^w=@iR*Y$VZz| z`;e3ERBCbYyF*i75ObRl!?*7%N$IZHUtgarBo#9}drlixll6_C#8fUnqnTBmKIE2 zMzwyPyEBrm+8t`FHLsG0^JyQnk9tm#zabw&egVD?{tG?{eg?h+{s4ar{|0{n{R@2t z{eb%o`xpBG=NI#BY_S>m$fe38&+fb_8uo;|t(qxYzVbHZ+AL5fkXl2_nxii?rlnHx zH$&PQhK{XExH`NjXdMPe+pHQ-jA&7lhN(5i*%s7LUoEhWNcV1>8s_$B=o^ z%4`}_9qH+O-JjUKJEh{d%7@N8xAUCF)(2$bQuW^@Q*5aN$9?8+dNj#Uz82f}Ds}QZ zbw7E+uX|Dch{)T~?{Cv}3wll|EpaEx53gK$s_jOM3=XC}_i&-BmK#f`&3#4-hI3>i zhVuyWP2_LLhmc=@uY>=BPlBI;?|?tRAH%=FUqJstpFuzTyWjq|f3Y8Melg!lC*My; zZPb`wTb-w!U8=zZ6b4!j{{2Jdo|r24ZEH3av5no$R}?{S*wj87$y_fwyO@7afyXM#K$ZlP#m6Tp3AZksu(v6KDTz4VQRhyzeYh9w}l2+)( zpFd8ICZ2M4=nWuta?|{Tm!GHnqn(4y4|iI`^jG=k?jbev&ENcr@g>3LTMku3Ig`hM zW2D;TF3q3&QG@HlC34E@SZ4L09d!#fl+M_%N01*Q-$ed~d<64*%vXr6b5U+s#~j?OuD_6-%Cxp~pITaOMP2Tw&)!uQ zL2N%b4i4|SLu8om+*5^ZXj%JPYwlBriPPEFtB)RXCXXI9Y_ckFB8wh4zN@ff=;Dd1 zy+XdniPrlS_jal7qTVy692b{#A`49xj6`g4q*sfl^obT4(oOneelcCAY3%B7@mt}x zm3w$;b)m!nXq6L{eqHzM7BozkD9~ zG4f61Z^(y`Ux2TJ|AJ3~pMmdyKfoWuzrkNX|3aTZKj41D{>6U4`Ne#*OqL$e%b$~u>+m5%v-I) z@G#+(S*R9QaEp8|3hTWXzk}#+tId-ybEH4Agl44k-ysq!o@=b^JWUp=D2r@aeU19- zRB2THbf*3*ls&~3I!yAEykqAcdee=SNBPd{9ibo2l!+7H~0(aU+6RF2i$Mi zzt|5rznJgxzZFZSD$QZI-!3&wdcn!Qj^FH5>mbDPid~D$$<8KuwGYOa$?tX5aKyB8 zKMJLXJP$O4YQ>XHGo4IIk7m%RD$eBrX)z?=jOMj7qV=@0jJ?ys-9X}EMi?HhXH-4# z?E9Q64+&3?q={vsCk@j%ts$>;LPQp#Ok;9{DlyP2_LLhmc=@uY>=BPlBI;?|?tRAH%=F zUqJstpFuz1e#8F7e!%&~e7hI#*=e~|p82BJ^zpvqVkSwUe&CZpGHp5`c~NzS1^IF? z>4`t@HG0}W(!Mk*hJG*J7n;5-k5u|z`a5&dN4BZo-{KTsNB*lmaC@k^f}ZK!zT{~x zOa9pUxcG!d6TkWVMsZg%h~@iX^*Ix9v`A^kOQqf-I`jLsrd{W2=-R%$?pZUMh~ufN z^D^(>C%bDb23JO>lVdIxkH42>Q1o}uFG2qS`8@JtPO>k=XhEshLForB8ngDSo-IUX;$3E0QRwfWUxNMv@_FRP$TyL{As<410lp6Y3qA>c2EGIS0Dlbs z27dwl3w;Ltfcp*m7yAL{7xUe8=Ae?5zZ4_lVLDjDA;RP@P(AI@BgQ_FV*RWoKhp93 zn9BM0f71k`Ro%P`CWxudoi!X^UeVK)18bglHPbC}(++SY)e&bi&x=$fm6(*T?viPK zKKZ?lgt+nWY)b4CxLOw6p{AaT_8-o;MOja&3!;Y?(w9@*Bc>nsCR!8E?`|^*B>G7O zqdF0H|Mkz%k3xS3{Sx#akk2DOM!t#s4fzoA3-ERDU+_urGw_{%{NaE882%0Z0{R#F z4Eh208}={u1I{nzd*+kMZ@=d%%&kljKdF)N948&Wz*fbh(>Op2l@b3Y?Ow zIq#~{6RPX)W+&ewzx#=5Oo}^MrgXmf)j2Oxc`EAL;w@3+GC#fj%QN_2zYqO0^rO(< zLB9n32jug}kCAU8e?vZm`~rL({1H{mpP?Ux{to&j=szHzM}CZa6ZsqRA>)^lOli+9IJKzuS$MA3P7tp`ZXV4G0->`qNA8>v#UxjL$bwbBhGdd%e%VO0RGlqvp zj-PduVt8gHC@(kZB%FV?R-S*>ME;XLpjTB;P6p&drpNOKl0NsUV{bR#AP$><4N?9m zqPBL+{4`}Xa==SFT7B#iG0FDrIwih`v@Yf{(_WEG)I`om>MTkns|-H9s+9=;*H1-% z68%2(&(M!Te+T^%^dFGVBR@vIiTn-u5b_J~b?{&CN$@l99q-S=?;M^7Z)7qSGvZh4hx7VpG8#jEuk^hK=`zndPOsg*P|w1pt&&`Ibh)37 z@KF_W%KcpAqu!K2arv0RP-m&??e9#{V4Qz&@Vy%0r@=gW8|C2-;fU>zW`qc z{{^1}KLg(Ze}F%Re}lh({)Ik+e!%^P{fqs8^NaZ=nqC=re_o1Na$05jg$-ORk3fp~ z^+N-6BwQoF{oN<}sNOtqhD-p_S`iX0TTRG4!7@jU?DOQWjlEM-k^{Z@pRv9Zxk{Uq zo4!c5+tR;Mg-)4$YD6aKQSrBk6hd@oTl8w}rs!9r|BHSq`jhDQp?`*c6#6^pm!SWE zd>;8R@=fG#$cK<$fUkr9f=_~SUDlIcP@6a;5?ce(V^w-g^M*kQ6 zRP-m&??e9#{V4Qz&@Vy%0r@=gW8|C2-;fU>zW`qc{{^1}KLg(Ze}F%Re}lh({)Ik+ ze!%^P{fqs8^NaZ|D6XG1?fL4-Z~KcMR(NSmn%;}von~!E2PMUit-pSk+!R|{8RC0` z?#!@T6tsM@$L93Ze0kU%;^XbRzWUS+a>=X4(E8C1+Tp!0{MD|j|K4BVeE|CF=vSlv zi+(EllmGgC|LdQjABFx7`X%T;AfHEmjC>RM8}cFK7vSsQzu=SLXW%>F5AetEZ}1n; zztCsU54hj3f3Y8MelcH<1$RG>PwGL#Hs17C2bZ$`^Y-**M{qK`rrazoURO&wNaOqJ zH$_Bdnx&dtL^>^gtv27@{SUqRpf1ZmX`JjZ6MVVxWEsW#mj6f8dH-|${$D&1CCP|F zDwT)`MM=7zMMF_aG-xQL5)Gw2(6Evfii+$V84bCfR#wOg$rg#sLPX!!51;e?{tMUb zx;?IQ?&mz7PwZb{KLGu8^sCYTML!k&N%Z^BKSMtX{T=j6(0@QakNg<c2EGIS z0Dlbs27dwl3w;LtfcFja7xMx47w0P{l-}f#F^g$)I##87XA&d+=I^&#*CuH7CJUzY zV?B|WbgAQcSt~8s;QM_aS39W?&5In~l>gs;7WRj*--7)M><6H~j(#=zzv!o;KZ$-H z`e*1zp}&KE3HlGn=aC;H-$ed~d_GoNth1kxjRN7&Eju$-uO3obD*RyX$h-Og7}YlgymqCX$nKc!+1)ASb_ix40&f zo5lVc_Oq}*g#8xmUtm7~{dM%K(f>t175z!{`_Ml_KMMUF^h?lxKt7NB82KjhH{?Ue zFTmHqf59ig&%k%UAK;JS-{3Exf1%HyAMn0m{$f7h{^ES=3(7km=@415Rt0sa{N4gLc97y1nP0q+~; zFXjX8?|=BPlBI; z?|?tRAH%=FUqJstpFuz1eZ&04e8Bz1`Tp>T(^ZksWP04DXEbh*Wt#mSbe42+`0W4o zW3j)9{X*=&VLuD|L)dS@{sr~}&|gQt8vS4NQ_-J9zYqO0^rQak@BFV{g8l>YdF02) zH<7;~A3}Zsz7GBiJ_&vXz61UMe+>Tye*ygqeFpu2_YLzG^8xo4=Nl*8IWx?7F%va% z=6HVvKZE^a?8jn%6Z?hOf5Uzj_J^?Fg8d8Z2cW->el_~P=%=DTiGCmYXXrAv0I`}X6B={Nl4)_E7G5j0+1@tfU8T13*H_YGv=EMK)FU~i- z{^SX{z`0Dqlr|${9W_R4>zU-SJ{4wljF%d5^S%@b!=yxnT;TmBdE-K3_os^u$@ zdHuIsw||hPEHDyXnK(q1T3!T5tY{?xi$&v9#ilW*XU1HbSyDyKzB#W^f1}7$X@56O z378;UO-H|U)`_y!L2kcxn#i+t&s9$FjZ~A@LRR_>C(6m^ImhPQi7BA3E?E{$;ru6Q z&xnCX%m1gM2K%<>6ZgE_|oURQ9*;6!NFSZ;o^YjfYXe zw1>~m>Bf_J`{!7iZRdRNb-lO8h8Z7ditL5G-R5c3HB3KSj4O!r^siAZEKQ@P#+fGr zveO98_x{@#r|mbbXT+>B43;YiFj1#(JTQ1E#u_cpskXCQ#Ga-y>w;USGB2)wTxuO8 z%4Rw3&r8u#W6v$?cb2&>#jX*)E0&`$K%5mO6^WR3k=?2m&*XcXsAyxzo*Jv^>`Rm2 zOkZZ5q|LxF0vSP(9?ud3CR=)L-|J#56sM&{z8|wq+vojg>z^Jq#dP&IiV8Ih! z=Kai)2l@-KxbjWVLM>Y5VLxTd?p0-IQk@CR6;6e!RMG{(6m0 z%DmH+3*dc5&b`m`f4ZQTUcC6$_e)D7$$lW|yZ$ogKQ=;gG5YyoRJJpJ;*||c&s^P2 z6rDa3oipFR2d^rlw#}D%`#z_T;JC7e7n>dtySwqR;!i?JRf+k&+kMdl_ZR2;$1WDd__ z59zG=(Hg<&ukBTwJ9eUl`n_8`t#)r2S?IrZINA6M)isa4X%}8gJ2yQ#>LJojF66IY zVrL-EoLr&4SeWyl>gFiPy}n!KGTZWIUfuP(k~C>QTX=g1Hyg_eISp=Gz?Qt+5q0T8 zF70d3<$Yg}NQUB#=80U$Av+Bd>i>C8VPqncjk*kflV0QXr`q2P(7rcWwaYJ6kbbWm z0h8u_Vq-Nf`}}koIahvhht;W0`nm9gPtNx#>^(te-nNo3n(*Gc|6pbsxiP)+*XpNz zWLV~oq4KS8qR8=hQKr#kK)Nh3N6v{{c=|WqPx>BNQ)s@wVdxW04Qh&&?N24+8RgHi z#a~h@rM1QP<8!EL=G!2QMfdVAlRVs56(TsEu{>>8UO85ifu z)wgs0Q`@**|LoNU@_qhn=Z15f|6U#u>zI1s8=1bhCPG9`ksbWUvrsW^5_5C2hEM&K zAzHrutWZ?rY*sr>t#wf|7prHId7Ycn`_wpmT~4lS5*yMJl|OY^GjUkQQ?JQ6Z>s~o z_A4L!rQ0WKjU}GzB89^n1drx$Gh0ocrS4MV^ysrgD(v!_$k@s!6Vc|J|CJhZ89kZG z`A>QGUjMBUmBjq=3f{ffrn3AIHIt4uWzz+V9p8MI{egbHeC66Mu2?#I-NNKI&m8E( zrS7k{n->x9%72Rt+wrrTh%ifC|${n8tUTuPQB|)4rz^T z4qX1NfMWh)KH&c1d}G~i%^1of;)|Ua~Ne!z*{)(6@L=E|)$xn%5@GMxPPVT6B1VL_BJ|F~rLfv4FVo znu%mG#qbzyOh_OzauxTdpN=Cf(Q9{(%*!N#)_3Tvg`s58PmR&h6hAtu>RWnCtAXk+ zt}A$HmP7}1=9Kr?{3X}qGEE%Y2dF?}`Q7|$oO{tZyf={ZKe?Z?25-+yY$4Kh)q`=h zJ$Z;h_Qcx0cL*RTPT+n0CZxNZlv zl(JaoCp18BO>0T{+wzHij-2;T$DE55j94A0!`TB4{Lo+`dwBut@Z`f(ZJAbb_M+&6 zzMes<;4#6!`>gnZi6)R$rP0mO5i)!p{qG6Fk>5uvc3D=*?7N0V|&=m{C2j|F# z(3rE*?!2dZNNVnT<}Lp>@;9uoVqK62wOAw}>uUCh7-ejKyw~YHN%(SN(U+oNvR$|J zc<9ZyL^IJ&Ctz+6S-2u|;K>t3R)6{oo%x{)nYvR6OEz1{u>Gwm(uE&qu|ACKi6;v9ehHYTxF^+WqH&3V%wNFcW2_ieuIr_w-bdv zgMPsKhWU&6fcuN{EmyQ95A8QGxdp{mbb-vSwMF*#u)eVq)1GMXyduO@K=P}OIz<9Unz;U z)2(cGFma@G(Z_0k;Ni(_JQgcd9=90)Y?XyKD z&0(88Qn)K?#aaHJGhC&Yin2qqB(1(Ai?TDOozxtP7$m{xwtNlm2qmO=g;vV$$JFSI z*RIX$kC0p8zhzG+!pgR72gWrRX{Uc_jzq(J}8m9;#ao>SkTvX;(k`ZY|+ zdE56iBJjm}>AdNwWVYf(eQEbxvQg~%k)39PH2RNB#A)4+#K){YMB9#seeuWdj-_uG zEiTR2U&8f|d{t+b`3H^?5n;2Sds#7LW`{`QbN^^E#&q`ZXS$Kq9d}Mfy9JS0kHC== z9dF6K*`~ldQ^%Iyh)re~!`YNkjBtXW{#c&fRpH+hKO( zraEhQCU~k_mn^%i=v=L#r!=!If9Z_Ls-mo}?ZKAqnU%y@?TpEiN&oNP;4h$mq0gWn z@V;UGVm{#h;(V1G{f=$9zLoK@V#GY2j}fyYdbz!uc$ngrC#FH!Gufmb*@EwK1@z5X zo!j>Ot@I{O{+7mP`D9J*QsqfyWi;m4cj?l!3c4zIbJ*|Re6lnCxt+FT4e{Jir50R# zl_nZIjXYWvPgQr?>nqB{k#${>-YQ(*h*->=w*ALS=+XI~_gi zI<-Vam~w5*4`(0a4QXzg#BU4H({RquK-<4mV=8kakj8Zp__wwv}OOD7M zzav;Rg_Ss}INOkpk(r5cM%I^~#5_NgMr1fj(GUmXG%IW7ghdaP_-pKE z>m$qN?Xc)It6j_{y>XkN<1Wo)yxtmA(=NJs;_-$!>tnQsF#n4XzR&NkH=4``LtE#a#At0hDj_cIUhpT4TeW%-n~zo zj``W8ugjv9*M!YnTpm#i7pttlRTVTb%q6E(`wO{v()PRbvJ7hEAH5Y zYcwr0m0zYTdXA1e%ntuu^_sYBD{4M7o6`4Fwti%;g%R)_@CW!~_&4|q=wIkF=m)%S zn7^10xW72xovh2RTq(kw`$q7#0{mY^-Y18H$I)(f4!C&_>$j7nWC*}&J z6Yw+e9qt*_x z#J0oZ_vPkK^v~*?k5ao|61{sdd(v~G$k~j&AG}Pf$hEJF&6loP$b47UnY~|-n|U*M zwrK97&8(bR5YN%zMmp{*@^IJ4Om?p;ueY%JZyI?xOh|c;JN-~S`LU>yKTW*5@SDHj z36k(6?d!1h3(_Fp_HooEgKR2^n0bJ+5hwd-Z#u&%m>_GoUhf* z)lo+NLdzCBedAbndVtEYw~(g2GpzX6>>dg=AigI4|}boI8|qJlP@?-<>?s&Jq; zn+hD5rSXEp1!MW+hv#cOB9ANf&A#{h0y(5Hw0fnzH+?P?VPvo(hdRCTnQhyZO?Ym5 z_Y}B3qwi*QKQJG^MxQ$Rr^MLaAuHTdPxVOpQ1D;yN$@l99qJJS-kW z=iSZBcwMuSBTye*ygq zeFpu2_YLzG^8xo4=j*+6Nl^5eeawrw_x7p9%CTA7r%YJbu3%eI6&D08*v}@bFG{X2 z&7luMj2Gm3e5U7QLR!p^=8&{e-tP&ujih7ScE7}%o4kDOY2&G4+P3*8@lO6t>PPr~q=&YIdBLKXg;J(7CpCAC;Ne8X&3GMUKz zOb?{&CN$@l99qKmb|yr^vQN6=X@+p%AH&7y(*DT@a)Umd$EZ4J&#rwFwZ1wz83o2 zQcNVYn^#YuJC&rLe9t{?X*o5hiaN!+<~^BmzMT2J?*hFjyFGXL{+A@@$q}_mQ_g>3 z8;!OdseMANQ~fOknI~kY0-y9Y0ef=rZQQPX=`ZP&>5JQLTX6P@lzn$+xW|ye*@xpy z-ajVix+aEm<&IH{<8<1LYEDo3fB6vd3-ERDU+_urGw>bo2l!+7H~0(aU+6RF2fS~X zznBlWzc^n#cMpm95eX(OqDD5saRtLS-Tri!-y>3TU1Q!YlT;GXa$?q+z6iSdg6WO+ zyOnfNw$seBhCy`B;TcJXF9nn9%N^PxymF{kLh{GJPhY8_nS4j zkfuBwj`Xj2QQEU`O2+@aJ^gL#TQc+c_NVLkc9Man%tku&hSG8|zZ#j%v`gw#=FT8v zy09$O!1;|A+25^|WSkd4F21@ftC<^0TMG-5GeTYH+%xsO<#TO`^S)m(_ZqCIp3~dP zlyPT@{0;dK@(b{F@L%vr@H6lo@CW!~_&4|q=wIkF=m)%Sn7^10xW72xvvvbZc4(S1 zu9M8APArTk>SufH#u97D^ttMvn*?%c_UpS>MqR7u=lJ$2z4z5rFJ#5W2cs>dzP@U% z^}1K&ba`Gn&*BE!cy+zw4vj=A{Z{(H>!K=R+_meyz`A;JL%;iZKw>=+dD`PJO(mMh z_wMTP;;A4ZZd_j^E~nGWJP#kVia()lK^Y}8f83_aFDdVFxm`YFW9Jp+INKC{>}X{`B0koC-mu5fhdA}6ZsqRA>)^lOli+9I zJKzuS$MA3P7tp`ZXV4FL-!OkMA8>zhzS9#PM}!S=F_!6VGi&u)$>Be<##?*lv$>yL zc^2L9ql@pvP{+UuI&;Yx=g;Ojw4roTrL6WLQc&{aBY(CFscskeaeJO7nb)}frB9q2 zIqZ6}$zL>vc*Wm0YN>odT>q|~;H=Qo2gySlXBt_N&!v~XFBf;<{QqvVLx7bRJ?$#n zb2`hAn*Oo;5_vp<)+b+*4i@sGYNPGt>U|GM-C84y^Sgq`d6Cn_vbp}GrbIb4t0tbv zg(uz(UlK@>A0yvH{)T)A`33ko_%HY*_!;;P_yhbg{2TlQ^e^-o^aI{E%wNn0++UpU zt(%UKDhI?E)dm0jH;!{LyZSV}&TNaJmCXs)?RPg*+a7g435`6e5%^kZnw=xvKUys} zKfs!9jpaMVb;Oi799y}lP5dIsaOi2z<@`5yO@h{i{$K7Sd!EXdw1OL?c-1C7lLg0! zY3XE%YO8x>frMvSS+ECr5aPFYY4kmMcic;F^URlI&ieTmI_^5rM}DIH8}yG+8nSP+ zrpK9V5z7ry{uWNO3Z&gr6+@|h&&o;9j9&hi&m%uZzKQ$|`4I98@OAKC@JaA9@E!06 z_+$7t_zUP?=riaCylJi2gC%Lc6y&BU8b)gdXN5}aj3*&N<=U$EkI=vuGY>}P z+$IaN6NZa-+^6S%%r2f6Ye)OyGAzRtIk|%puX9?h8~s!>Hf7V>C;#;ykk2DOM!t#s z4fzoA3-ERDU+_urGw>bo2l!+7H~0(aU+6RF2fS~XznBlWzc}CTo8GSJ_Y`LaoOxdF zsVb&E^A^&`W7C+5{bAZ>=8wqY`Fz*x54w`K3&Pzk_PS9LrZP5Tax^L7uDqymKbi_v z@Uz2yZ|Ib#AKN>^?o!vv-GK#*&XN(Q6KiKY3!~2+YIVms-*GkC?zuur@;>E#eNJd? zOAP0G@GsBB(l_M%yW2;2l;2R^Z7(eB9gmWqvu^)(X{x1?L7L9rX$7e_ulZ^sn*3kC z1pNo(^T>~pZz6v~K7{-Nd>#B3d=mT&dLn74h=I-h=le(L7rrCL&ebKfUE5ES13&m*9$M{8^G!sz^&31TJ;%4o zI~xVjv!_i1Wa`rBqgxidNxSY+ehr<5^+Fy*QT{4ngrle`uV0rFR}B49>u+PC8cYss zO86VCb(9!C9JdZEdPZYrc5G7onn3<)1)u0vN~Dtyc_uG&t01$sl$UxYv-J7o0PA9n zR|NeX^h?lxKt7NB82KjhH{?UeFTmHqf59ig&%k%UAK;JS-{3Exf1%HyAMn0m{$f7h z{^ER_o5LrD?hp_fqB@XdbJf?@^D?2nR;>fQcZJ!!Z zSMuZ7;`-=|k7=2%Wm3rJXLOr`d+MsH$0Un)-JdMGP&%j@<-R9AjGVsXSp9TC#((`N z^mouNLH_~yJo01Yo56oia7PDf3tKu3u7c!rp9>0;$oI$wqX6=;mDxisre`xUt7Llrn z726LRjZ!K5Lcw9RF|znXlk)KU5t?WHeRcCt7`=WtI^Lr^mU?e|9k5g{j2I=pSNQTE zh-&H>X(k`OLo!6`Hf2@%&>O}d95$NCkksWKlkYltP^(j!dIziSQuNQzk3xS3{Sx#a zkk2DOM!t#s4fzoA3-ERDU+_urGw>bo2l!+7H~0(aU+6RF2fS~XznBlWzc^q1&rVrv zr6MDp9vNTbmqnVoCR#Tsy(nu8cFU$)vsiDWUb}{)@b@ULIY5pFwS~5J( zZD)H<9i8Igp~Tnzm@eM!GD+&+6QUj7?WtWEOzec__I{Igrz<=HX3>kGWZJi>l1iQ4 zRM+SCMdnZdow}y#_@Z4t^oZGsxg}4ZQS|%JKSMtX{T=j6(0@QakNg<R^XYzQT&(M4|PF0Vc z8@|&wN{+7WKhje@!r>j37dfnyyvQ~z9 z)D~R&uRn=?ANptLN1?xiehK;y$mfwCBi}^+hI|P51^7DnFZd+*8TbzP1N<@k8~g?I zFZ3Do1Ku~xU(5&GU!3n~=HE0`Zc#>8WNA%d-Y0s;=ghn+_fZ<=sca})5I|qQ7Sfmo8mFlkNZOr=mZJejoa0=trTygMJD656I_{ zA0yvH{)T)A`33ko_%HY*_!;;P_yhbg{2TlQ^e^-o^aI{E%wNn0++Un;!cEQSqQ!HW z{lLzp+&Ei`J_ zG-ydG;|EXw6tkwaw(*Bn^WP*l-d)_Atz=A(EKp9knR}14RwTrftS~1J4~44b{5VI^ z|3yC){Ymuu&_6>z3jH1QOVEEnK9Bqu`6lu=NHY5gJI#C>-fpMLjlI zOlZi3&}|cv)7Crw*RMwZ7yVT9C(-Xi{|x;o^mouNLH_~yJo01Yo5hgx`RGTyD?M(f{5avb3B9aS z60}F`2HD3GfAi$k8zj?`$My5OJp}!A^sCYTML!k&N%Z^BKSMtX{T=j6(0@QakNg<< z=70Iy|MDT^7vSsQzu=SLXW%>F5AetEZ}1n;ztCsU4|v}&e=#3$e{sG7kK1*70%tR2 zLYs3p#f{K<*5yFNfk)Kn)lS`r2R`KK4#mtMLk}{dH8o~2*A;Tm;&#c*wVvd}RRO(M zdv_Co=WPOu_uQu%X_JCyl2ydnbid=6FvtJ=Z$AM2b@Z#z|3yC){Ymuu&_6>z3jH1Q zOVEEnK9Bqu`6lu=c2EGIS0Dlbs27dwl3w;LtfcFja7xMx47w60O zV!B_=Q&A@0Aie0{?>}Uu<2X50M*%^V|?UPu$j|cBx*!r8E2^bzM z>SO68W_ZVtaXI}kNB6#j*~|a-Td;qD{Q&gW(XU4T7yVT9C(-Xi{|x;o^mouNLH_~y zJo01Yo5gE1cePmHrGb=W&i75T5Gwzz7NBI`4uxOaV%NF`=x=qCbg#ANptLN1?xiehK;y z$mfwCBi}^+hI|P5#eaPLfBYAG68sE&2mAs482%0Z0{R#F4Eh1@8|E+O1MV-*S50r$ z=6C1hm_Lr{gHQj8vftclclrD+rB!k@ubsXY(|Z~#JdG~~6YLja{|)H{mpP?Ux{to&j=szHzM}CZa6ZsqRA>)^lOli+9IJKzuS z$MA3P7tp`ZXV4FL-!OkMA8>zhzBcM#HS`|NXWqMNKmIMj!(3K7Df98v2|bC%0R46JtI_{OKNbB+^!w01Lq7`r9rR1ke?UHu{22Kr z@;Br|$S=Ux!GFOg!Oy^Vz#rg`;osmdpnsvypdawQVg6!1;Qr!#Z4DY%wIH{mpP?Ux z{to&j=szHzM}CZa6ZsqRA>)^lOli+9IJKzuS$MA3P7tp`ZXV4FL-!OkMA8>zh zzE=-w{OlJNV>a?E_Pa4I#A5##`?1*H#C{?6->{#B{UPkPVE+R90qC!zUyc4R`l;wo zqTh%98TwJ^@1S3T{sZ!PSr7@`$w%ywpm8<$cAN}KJoK8J>JkvIh^PkqaU&kK)CLbPJ zJ4wilQmyk6%fxDn=<^%14c@9svvzwcdVV_BlS?%PKlsRNQqr58oY#{__PU+w_EE?n zQscTOed~j%KwH8q_tVv+X4=i)MfnAEXhd8=QNfSy-dg-K_}VCazIjX|;{qQuciF$k zW%ijQc6-E-6=Fs+H`cQZ#ymUly|y3nocCXbh&r?E@p*_ z68-l`@iSlkc|KbzBg=@K)3|0|xS9={*PI``V+p%&?5Fo@(J@kOEI7XRq#|pxW#@uB z2K>w>pBSFAd*xZ2ug>XU?VZ`1nPiP6!I?VWX#M)Uv!|{FlE7K}v{zV+QgwI7wkb^& zM5}s5aZk@5QeE|NiC};_+jM= z#*_q3FN=GdkN34p%_|JDQ(9~-St zB1zqG+c((0C$~4wOc3SIraPlOuUK-v_wM~@ZAbiQC<@Xcys>pRa*Hu zJ4Ne`3$0xLdFXxeL*msxefoBb2c$dl=$V6=E;Pt4@#FDKf2u5H=d1E9iWH^9tV>+s zKsug#yz;1fPF|RbgdODyBT4dW3lBGB(cK0Ml;rPNQh|!Q2Xs$NWoG;Pd?;6uV6$D{ z59J-1!}8K9mOp6;`&eH__++Il8!xtI@q4#EqH&;iYl-(%#_OeiU}u6jJMYErI>kbf z|L!l&H>~zUk=ORE44=fQZ#+Klh}AJ=>#%d>)NHMC@}~DKnC<|S;a+*21Sl=_S#H!V)L%2kUZP8ffWX)sk_s@u}i+HneIl+o{!Yc4To;Ic68kQl^yfMSa>sJ^U`?lYKhTS!CuH}EsITVy zr}^dlYcxGAAy#W?cI+hwb)#^6g=U1}o}Uo|e@(My_2} zN-9+8BSoqQcg)fhWzD;74mw|!XVpV`r|oVOVFDI1r-XS|vX~FJzc}9%lc9Ipr8Y8s z_6f)8lLf4{tLEuMlWFY9h0{)DC^T}ugD2CZ5wMh99qAIe zOqH8mw0}u+1mR-&3bF+f7mN_ex}Vp-yl5t!M^$*#1tVxuRDn%>g9NM8d~3F!0zYdU zaG>zui6P=NqVc>TM1@Van)_9*zk^6?McJ)9yp$O+c++`>pNq{%8Z9>2SWM6ADtFqH zPh;y8e{oNFH9)3?&fZWR%K3Ntc7atpOt~0I#ToNG_@8k8KfO`wWL_>Y-5weK{_bn? zH!@~3hn#Vpzb6(NU$=d0$^V>-HChw@l>MzOg{dr<;wl7G> zhI-#A`l*D=?UKDuZ8F9D#eBg1#rZy3oYtpNznt0jV%kl2)mco)10jF*vj8K&E1AY6 zEXQ84Ro=`O-ba%nDsP|KTTjguzfVb9IE~#Xvj6Pi#Z7edeNfd+hgs}e^KBOHC#JFa zM)N$qJKBkP-KlcNo#M>?$96J@cB?Rp{#5(OC~~pUM+Fts(`rfD<&NU4>$BN(d9fnB znZq=6sH|8oZh)Sz=22V3xpyDJl3OBw4N;BTlJ`H)>86=Gc6@j1`c5^*8i!<`4p9D? zZeOb{zK|sWzb(sD-;xjJ&RqGgtEf%c1Gk)=ImE8%^wxFSU+KAv*)f zyI~{yktUY?4KJ>UB^@m6d6rXta&lBT z@1!s1oPLs&69?VY%f{(GjmD^kgmMyoJuU0HX$MKner)l~IiDJ5cw0X4$)}CNHx2|> zwbQw#p5oE6pU4_TC!W7@-|4_()4xOSCdgsl4}sN|Q`o1*v*Y!5$C9=A-R;XO-qUx^ zo1V?volFmfx=#Q8Fqm8`y-~aOL=ojZR9H7f=K+yC&F86mGJ$k9WhDF!ZZ@t4dxuvD_d)=~a zmhw$B@l$kql3yZKE?E|TB=9wLTM=ePiQ5FK83FQ!&^q2vwDY~ag`DO2mV!Oxu-JC1|_qPSB;TS!3CY`*S665{Ikj} zzLoUyqwb@RT>p}%hYO27O6SruYOcwAK zGI0HDO-{e{PjTuA`E&w(2K|8d4f7ZC0rwZ@t1V2J>fC0tZ^WgI z{#d5rf(c5_diqib=McNkNCD_ z{tcz>0Ve;r-h82483F;l&vHoYnXlg->BiFhDRbBSnZ(UX9#*mXcTkiq-zqR~PFe@) zx^vp5HmRNLTvR2k@bWh`p25@qt3921owVJ0rK^d|6?pxf<(i;sdu?ZqFHNFRXQLm@ zv*{qYjh6eHL-nEqKR`?J>Ao}TIRV~D;*X76ntx4 z7g5vv=|`?lWmPkN)VU8&))x)+)Yln z(5hKM{KHK4KID9#YT>kqN}IVQq$R)o(4gpR8g3nVMN9EBNpan@_nB28mGu()t5=;v z|B3fqaL=E@?2K4)ezN47|NI5?FZ3Do1Ku~xU(5&GU!1RPr@ljru0GRH;dFEKhAcD3 zWVCMS2PHOJ<9L;G>vuX9JX+!(|A^?^iqH(N&L@7mb>`+}3=-XfOE%$$%E;Gpt5dTV zG}AwRN!Lydb1{#_RycfISxnqgbZ4jQa{hZ9>-$N1VhuZOrnZ2;-wHOOpP!Ha=t8DH zVbZbH)0J4$KcgMCKV_J!23@ZMZ_6=hqfejIct;ZbdCnPiE7QsLr5m@)u6#$c3e~KS zt++v|-o$1w&r|7_`}14_O5c#CQ*|4D7;>|lysg`AW;by5_znIn_xVF(4Cq|_g^wwp zvbAKAoe$l7Z^;2cn_G0pthJ|%IRAO$X+M9te87-=-m|4Gq$rfEHtmWPDt$mLq^$mo zNWP|HnfI*qEIj`6Z}1n;ztCsU4|v}&e=#3$e{sGxRwq`i;$6qYomgURP%F>a$jjNr zD)o>b6MO!&rZtg-^HESSi9a6otLwo5JkFYz3_bRh*zxzxNS5Rc#Hd+6ac`U>{+&%Bny03GYO$p|K z%4YIxVVC&ud&{NlrMz`Kw)$~2dD=|{K82Sgafim`A*m=LDM2T=qN3@~Nz%f>(S9^! ziRPEHgE4eQ{Y_sg#mAWM+Ieto)=P3jAv{xBu$P!D?pi7%=|RZJ%bxFU1d}R(_a)vR zkCC#yHC<9Y-n3_EtngD^9QhP>=Pdt(JGFh|u;Yiw4dUH;OtID~j)bQa`zFE6U+BUa5f70?q9&ed!xp zzBcn$EbZWY|6!Y;1I>1jTh?aqmUvEti?f{XC$00V&=RWJM>j8A_$<_D3VUjK=xTqv zZ)9DE%JOY?eH}(U%KmV z?x|6E52CTvwmOsdA(b;}=Nky&doU)i;;gd0 z8vVZt@1K(R<*kUUGM<^IeaOHw#rBXJ90$-aq=DshPq$yQ0-C(%;D zbo;qYYRF3+5{grVZC2P{PvvPbAS z;VwC0#>h0k;Gz2xHucQvjf}?(_Sy8=LT@IukhLA#LR*h~Ai7(X8j6ZI^ILuUuDi3} z(`Aw0%uXUGfIo(RgTH|Og+7CR!25>zi}`^2i}UTjf3HKvOOq)-aq%@d z74R*8^WQ|$Kl6K`*3Lvysv=&ztiX;mEi!J6yckH&*sNcW=<T69p|q;9IfSNz4SLoeLFOT;Z=HQbYk}~eR~!;FgbEJy&8Sazqv1*s)_X8>y~;(72oMS zEzUkj@1!V_+HpKM^Y zPZd=*z4|~7HlJT`u(NyX(HSy&ZRxUwXzSdEa89(O6e7QbE zcD??*y%W^m1kIl)om5o0kQ88)i|Ny~niMp1dT@bILAUpB7BH>ZK%t zth0zs;&AdO&wP3%YoH-rm5;UGdeWL%(?l+cM5>y&Qp+;)cHdvUikVL9J2)n%~|41s=f2+Ee zl0?OKwA>At{DvGjyxH=hR}JO;z_;Gf!ILUVuI;+NB$EWluMS(R_=ap~(%<@}GKzrz zf=_~lH>m@%$ufZ zeYzEs#uhu5HPPX`Y^|b{Pc+vod8$xbB^}%~9IqF2gg71E zGf%hcBe_jb#>t8nbNf=2K5F5)X?f+kZ^Y`6*RukX~HiJ=?gslm2M>^F5AetEZ}1n;ztCsU4|v}&e=#3$e{sIM54)0w8y7Ri)AlQV=xHQM zuMF;c7LVIlhZ^E zmW1?*f7?qx?s^e@_{ImCs~yfAI1o#}Yv!C&s81wqca^D6MhVHhuenriV-)cdoe?_u z!4%fOy!D8K;$L!PLQAGNU@{Zpl4>L{HHF>~tXY>-Q$%k>3iTZ{v?sNE$CCdFKPTA% zCOms}BB`&&^F4F9lWD%ogHhWP;q;Bj!j7rS!f5LCDVG)IME;i#A-@1$2mb}11V01c z0e^r$hJS;J^Rul#T5HH~7CTtk7;+7!sS`VP8yiKG+`8^9v`wFC-_Pssz)US#MmfNM#aH!nZ`G@)7x& zl_w*3*`1iX9Ci^BI!JjvG+Gu-as4lULq3H30(>3(7km=@415Rt0sa{N4gLc97y1nP z0q+~;FXjX8FV1&cFYCBjU^8=X)~S0H1|@{cOz?xVSTf~%eP3c^T^8wf_^17)y_^b& z@0;zw`CjOfl$n!5{E=VZJKygR$J7T8j#aypu%kJ;y&g_v<%=gn zSxlT{=NvJV({gWe)Ut@TvgNa{qSunKVIGLvhgk%jx11{B;iU^ za@D0?j$b3Y9NJhGZ5a<7V*A|FK5vRJKzwEG14Jo01Y zo5Yje;jIDokLz8G|n#A@PhJ=d)d_d@~6kg7M8xf$NBx;qF%5sh(Iw|@TJt)aSQfk?(8NxS}tCSonqL$Je zMoA%y6*k|te%P~r;d)$;_w_nF-}jCFMW3M`c;A@6%m?l-=UZ>4+2?4wr7~BqJh%K} z*Y~D_9A7`zG%tL=*Gt=}DO6TR^dD9g6)x|^Uo4#8xJ8ymmpVqde=E*=EpRL+b*Nexl7W?twP;@l!G{HQTOP(Ljcy!-1hG3 zb06C}{Np9|<-9lDqE{l9oN4m=zVL!@4vA~kz7j9idjA#J(6&I_nO~jdNsmeN93) zzXM+a|3E)ae@wqg|3*JVe?h)Z{!2bden!4S{(wKmzu_;?zvwgc1MeI2m-)c`<$TXM zrlpLjw^hWw@cCi7-^t3%sWA(`Iv^V^n5Wb)Ob~$sw})lu?Gz^sGxRf;?icoUL(6|0 z8Y7*~y>@;cTq=^5)yEzklP^-1{*iE{^1PVvQl*+cZH+wO-}jf{+qR3ud%XvRB?U{P zl&wFeZ(1#idevnbX+(&{*?mpVB`p&pZ`fKKu5gxBw=$m&KD$Sh4A&plq6+Qiqu_Vo zOW+^q=jo5>H|gK#hv+ZJ*U5j$C&|yqcgP>`$M`q=1^O3#hJN6EWBxK9xWAmQYS$dM z+e6Kjr=yA;bvKSJ{B=gs%MLBm!b|B|Lk{OX7lQ{sb2&7*MXpb)tTnv)Lgqc)HYL#F zsuMv;+wPDE>ASJ3HUSkDEJ-t68H!DdHQ4e zP5L+bA^HpQb@E^GN%AxD9r6eKG5!sIf&N9Gp&xkPn7`fg;lKOK`DWYy&}sFlv9f8^ zbKefdQYqgw;DuZGU0JaFXxjEAw?u@##n#FfU0-e+e{p`Pno5rJ?7wVTdy?D}R&IA< zRi+qpz3UH#S^?r%S>n4%*8AnS9dX6B>5;;Br+%B}%V2q|$n&FRqY~vE)A;Ba`CDZE z$s>uqQ{$yql(@52Cq=YhHgC=H3>NTx@MrK*@H_A&@DKF!^vCp@^l$V-^cUpoF3ZcXznCE=~;+Pdk#w#Y~JAmHxV#{rp#oF-dDG)4W#5-(Bu#C13tlG;Og_MLt|1 z;V0qy;LqTr;CJ9l;2-Gc>5u6*>EGyw=r73E$$!Zw$_&59o`WJnMe&Bs$ z{xTo9znt$#&G|XNHh!-&_x#u_s4-7(X`d-c2e0QJDws zZ%LE4j`Vmn^6UyZy3_XMq^Ki8tw8apG}(rc^qlCB2)pOD*u?m;Q zqj%SpPD}3kZeUJJN$7SN{wTp>!WNfqJ{5ivz7PHkJ_>#Zz6Ab(exClAev|%_U!Z@{XXppsH|8(%f&0t(F1!BlSKnSADc`%ewZFUEw@@|n zud*$cMRKk8yFav5$#UgSlPw>2B+I*oK@s6SVq~`d^fSXZM+%pUwb?6b!lkvwDJ}2y z8|01RgvTdlq{#WTKUf#N+$B0XdX8UMv`TEYYgC_o=BR9#w($7gfN%-_3!e%<3Eu~Q z1|J2#178CFKtE4^OutG0Mn6P4I@pPs5eOYwQ~D67opmiQ*?$iRM2PRfM)^M<62N|TOe_IZ9Gkz#^Po$lb$-SSVZ z$ZI;)u0lyt^E*DaIwfGO+!HVVL43nybpa8eP_jyf+s5vSfph>4ED%Tky4j zuZI7HPlca^?}I;skAmNUFM)rcpQk^j-=u$|AELh?Unl=1pCmsc-ywg%ALHNf7wBK~ z8Tx_ujrq%b;Qn&H6$=tOe^BqCMCS#>*d1(_`iCBWY~1uz7$1I}>aj0d4w~+Bx##XO z`DeWI&1Vzi#V=(in{{ISWtHpi#X&bt%Ubo7)2pU<%Ki)b-dO3GF6Y_@Z`C(GD+{XD zjX!az%a{Jguftcv|H7xjPr~=XpTS4L@4%P9KhV$9AJcErztIoTUy!ep|B_FVpONp7 zKj4q?Z};03{@-Kt zuDBt+-dfEum=qv4*34QutY5V7eKk35VnCLN zU;N1QLG>oVegOPBd^P+pd@B4Td>{N7d=&f+d*g5YQ4L@ zC&B&&`vLIl@YV3Y@Tu^V@O|)S@KNwP@FnmM^z-z`^qcf=^h5L)NQK7@RUK%@^ z+oXAF%75Tn4*3KA82^U9K>wo8 z&=0(C%wOgM_m}gvZ2rT(_5EN)-G5un&bx-n@2mYbJ1=gK2eoRx3V(i2jGMhJrM{y> ztoO!2RWXRc?NB zzl^a}{z+MNK}WeLFQ{(i^*)v(;zD#XPA%Cd$_9RtvFqk5;iNs_P(eeZ%r$7MGimCS z>}Rn*#C{9=7wiYXuftcv|H7xjPr~=XpTS4L@4%P9KhV$9AJcErztIoTUy!ep|B_FV zpONp7Kj4q?Z}5u6*>EGyw=r73E$$!Zw$_&59o`WJnMe&Bs${xTo9znt%z zgB4b1hM6c@7n0Wb?Rq7>YF71lAJHhRGfp^YwcHV(9z8ZM?~7}a{X+KN*w12ri2WA! zFW3)&Ux%-T|AkM5pM>v&KZB2g-+?cIf1sbIKc?TLf1@9wzaU>H|0SO!KO^5Ef50E( z-|!dcU-TLJf%lF1%Y5Mea=s&)e$m@DT3a#CY9G<4)1$DDt7~HVC++ffXl+4$YEAe4 zCi{i#zjg0t{kK2Fehd2-><7TF!&k%q!l%Md!uP?S!AHUGz?Z;3(9hE!({Iwh(GSsI zkgt>fl24MKk?)W{;E(Zd_zUze`V9TR`^NlbK5&0I-y~h>TJqXLnbElQ!P+C5g~PW` zwH*>~rm!E&{wDi{?7y*}#r_ccE$m;g9{|4&Uk(2Yp9((--v@sN9|gYyUjqL?KTm&5 zze)c_KSX~)zE1v2K1qH?zC-?iKgPe|FVMf}GxP)R8}pa>!2RWXO~0R7TlbxTGATxH z`{|83h3p@*AIttG`-SYkv7g2M5c@6cU$7qlzYbpw{|lcAKMCIle+C}~zXM+a|3E)a ze@wqg|3*JVe?h)Z{!2bden!4S{(wKmzu_;?zvwgc1MeI2m-)c`<$QIfm7g+8n5g^< D3P99R literal 0 HcmV?d00001 diff --git a/source/tests/pt/hessian/data/H8C4N2O/type.raw b/source/tests/pt/hessian/data/H8C4N2O/type.raw new file mode 100644 index 0000000000..a6510b1c81 --- /dev/null +++ b/source/tests/pt/hessian/data/H8C4N2O/type.raw @@ -0,0 +1,15 @@ +0 +0 +0 +2 +2 +0 +3 +1 +1 +1 +1 +1 +1 +1 +1 diff --git a/source/tests/pt/hessian/data/H8C4N2O/type_map.raw b/source/tests/pt/hessian/data/H8C4N2O/type_map.raw new file mode 100644 index 0000000000..5d0a0b4b31 --- /dev/null +++ b/source/tests/pt/hessian/data/H8C4N2O/type_map.raw @@ -0,0 +1,4 @@ +C +H +N +O diff --git a/source/tests/pt/model/test_dp_hessian_model.py b/source/tests/pt/model/test_dp_hessian_model.py new file mode 100644 index 0000000000..55631f67c6 --- /dev/null +++ b/source/tests/pt/model/test_dp_hessian_model.py @@ -0,0 +1,183 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import unittest + +import numpy as np + +from deepmd.pt.model.descriptor.se_a import ( + DescrptSeA, +) +from deepmd.pt.model.model import ( + EnergyModel, +) +from deepmd.pt.model.task.ener import ( + EnergyFittingNet, +) +from deepmd.pt.utils import ( + env, +) +from deepmd.pt.utils.utils import ( + to_numpy_array, + to_torch_tensor, +) + +from .test_env_mat import ( + TestCaseSingleFrameWithoutNlist, +) + +dtype = env.GLOBAL_PT_FLOAT_PRECISION + + +class TestEnergyHessianModel(unittest.TestCase, TestCaseSingleFrameWithoutNlist): + def setUp(self): + TestCaseSingleFrameWithoutNlist.setUp(self) + + def test_self_consistency(self): + ds = DescrptSeA( + self.rcut, + self.rcut_smth, + self.sel, + ).to(env.DEVICE) + ft = EnergyFittingNet( + self.nt, + ds.get_dim_out(), + mixed_types=ds.mixed_types(), + ).to(env.DEVICE) + type_map = ["foo", "bar"] + md0 = EnergyModel(ds, ft, type_map=type_map).to(env.DEVICE) + md1 = EnergyModel.deserialize(md0.serialize()).to(env.DEVICE) + md0.enable_hessian() + md1.enable_hessian() + args = [to_torch_tensor(ii) for ii in [self.coord, self.atype, self.cell]] + ret0 = md0.forward(*args) + ret1 = md1.forward(*args) + np.testing.assert_allclose( + to_numpy_array(ret0["atom_energy"]), + to_numpy_array(ret1["atom_energy"]), + atol=self.atol, + ) + np.testing.assert_allclose( + to_numpy_array(ret0["energy"]), + to_numpy_array(ret1["energy"]), + atol=self.atol, + ) + np.testing.assert_allclose( + to_numpy_array(ret0["force"]), + to_numpy_array(ret1["force"]), + atol=self.atol, + ) + np.testing.assert_allclose( + to_numpy_array(ret0["virial"]), + to_numpy_array(ret1["virial"]), + atol=self.atol, + ) + np.testing.assert_allclose( + to_numpy_array(ret0["hessian"]), + to_numpy_array(ret1["hessian"]), + atol=self.atol, + ) + ret0 = md0.forward(*args, do_atomic_virial=True) + ret1 = md1.forward(*args, do_atomic_virial=True) + np.testing.assert_allclose( + to_numpy_array(ret0["atom_virial"]), + to_numpy_array(ret1["atom_virial"]), + atol=self.atol, + ) + + def test_energy_consistency(self): + ds = DescrptSeA( + self.rcut, + self.rcut_smth, + self.sel, + ).to(env.DEVICE) + ft = EnergyFittingNet( + self.nt, + ds.get_dim_out(), + mixed_types=ds.mixed_types(), + ).to(env.DEVICE) + type_map = ["foo", "bar"] + md0 = EnergyModel(ds, ft, type_map=type_map).to(env.DEVICE) + md1 = EnergyModel.deserialize(md0.serialize()).to(env.DEVICE) + md1.enable_hessian() + args = [to_torch_tensor(ii) for ii in [self.coord, self.atype, self.cell]] + ret0 = md0.forward(*args) + ret1 = md1.forward(*args) + np.testing.assert_allclose( + to_numpy_array(ret0["atom_energy"]), + to_numpy_array(ret1["atom_energy"]), + atol=self.atol, + ) + np.testing.assert_allclose( + to_numpy_array(ret0["energy"]), + to_numpy_array(ret1["energy"]), + atol=self.atol, + ) + np.testing.assert_allclose( + to_numpy_array(ret0["force"]), + to_numpy_array(ret1["force"]), + atol=self.atol, + ) + np.testing.assert_allclose( + to_numpy_array(ret0["virial"]), + to_numpy_array(ret1["virial"]), + atol=self.atol, + ) + ret0 = md0.forward(*args, do_atomic_virial=True) + ret1 = md1.forward(*args, do_atomic_virial=True) + np.testing.assert_allclose( + to_numpy_array(ret0["atom_virial"]), + to_numpy_array(ret1["atom_virial"]), + atol=self.atol, + ) + + def test_forward_consistency(self): + ds = DescrptSeA( + self.rcut, + self.rcut_smth, + self.sel, + ).to(env.DEVICE) + ft = EnergyFittingNet( + self.nt, + ds.get_dim_out(), + mixed_types=ds.mixed_types(), + ).to(env.DEVICE) + type_map = ["foo", "bar"] + md0 = EnergyModel(ds, ft, type_map=type_map).to(env.DEVICE) + md1 = EnergyModel.deserialize(md0.serialize()).to(env.DEVICE) + md0.enable_hessian() + md1.enable_hessian() + md0.requires_hessian("energy") + args = [to_torch_tensor(ii) for ii in [self.coord, self.atype, self.cell]] + ret0 = md0.forward_common(*args) + ret1 = md1.forward(*args) + np.testing.assert_allclose( + to_numpy_array(ret0["energy"].squeeze()), + to_numpy_array(ret1["atom_energy"].squeeze()), + atol=self.atol, + ) + np.testing.assert_allclose( + to_numpy_array(ret0["energy_redu"].squeeze()), + to_numpy_array(ret1["energy"].squeeze()), + atol=self.atol, + ) + np.testing.assert_allclose( + to_numpy_array(ret0["energy_derv_r"].squeeze()), + to_numpy_array(ret1["force"].squeeze()), + atol=self.atol, + ) + np.testing.assert_allclose( + to_numpy_array(ret0["energy_derv_c_redu"].squeeze()), + to_numpy_array(ret1["virial"].squeeze()), + atol=self.atol, + ) + np.testing.assert_allclose( + to_numpy_array(ret0["energy_derv_r_derv_r"].squeeze()), + to_numpy_array(ret1["hessian"].squeeze()), + atol=self.atol, + ) + ret0 = md0.forward_common(*args, do_atomic_virial=True) + ret1 = md1.forward(*args, do_atomic_virial=True) + np.testing.assert_allclose( + to_numpy_array(ret0["energy_derv_c"].squeeze()), + to_numpy_array(ret1["atom_virial"].squeeze()), + atol=self.atol, + ) diff --git a/source/tests/pt/test_change_bias.py b/source/tests/pt/test_change_bias.py index a3cf3edbbc..58fd953656 100644 --- a/source/tests/pt/test_change_bias.py +++ b/source/tests/pt/test_change_bias.py @@ -87,6 +87,7 @@ def setUp(self) -> None: self.model_path_user_bias = Path(current_path) / ( model_name + "user_bias" + ".pt" ) + self.loss_params = self.config["loss"] def test_change_bias_with_data(self) -> None: run_dp( @@ -96,7 +97,10 @@ def test_change_bias_with_data(self) -> None: str(self.model_path_data_bias), map_location=DEVICE, weights_only=True ) model_params = state_dict["model"]["_extra_state"]["model_params"] - model_for_wrapper = get_model_for_wrapper(model_params) + model_for_wrapper = get_model_for_wrapper( + model_params, + _loss_params=self.loss_params, + ) wrapper = ModelWrapper(model_for_wrapper) wrapper.load_state_dict(state_dict["model"]) updated_bias = wrapper.model["Default"].get_out_bias() @@ -119,7 +123,10 @@ def test_change_bias_with_data_sys_file(self) -> None: str(self.model_path_data_file_bias), map_location=DEVICE, weights_only=True ) model_params = state_dict["model"]["_extra_state"]["model_params"] - model_for_wrapper = get_model_for_wrapper(model_params) + model_for_wrapper = get_model_for_wrapper( + model_params, + _loss_params=self.loss_params, + ) wrapper = ModelWrapper(model_for_wrapper) wrapper.load_state_dict(state_dict["model"]) updated_bias = wrapper.model["Default"].get_out_bias() @@ -140,7 +147,10 @@ def test_change_bias_with_user_defined(self) -> None: str(self.model_path_user_bias), map_location=DEVICE, weights_only=True ) model_params = state_dict["model"]["_extra_state"]["model_params"] - model_for_wrapper = get_model_for_wrapper(model_params) + model_for_wrapper = get_model_for_wrapper( + model_params, + _loss_params=self.loss_params, + ) wrapper = ModelWrapper(model_for_wrapper) wrapper.load_state_dict(state_dict["model"]) updated_bias = wrapper.model["Default"].get_out_bias() diff --git a/source/tests/pt/test_loss.py b/source/tests/pt/test_loss.py index d0746c1368..2519111357 100644 --- a/source/tests/pt/test_loss.py +++ b/source/tests/pt/test_loss.py @@ -12,6 +12,7 @@ ) from deepmd.pt.loss import ( + EnergyHessianStdLoss, EnergySpinLoss, EnergyStdLoss, ) @@ -52,6 +53,18 @@ def setUp(self) -> None: if not self.spin: self.system = str(Path(__file__).parent / "water/data/data_0") self.type_map = ["H", "O"] + if self.hess: + self.system = str(Path(__file__).parent / "hessian/data/H8C4N2O") + self.type_map = ["C", "H", "N", "O"] + energy_data_requirement.append( + DataRequirementItem( + "hessian", + ndof=1, + atomic=True, + must=False, + high_prec=False, + ) + ) else: self.system = str(Path(__file__).parent / "NiO/data/data_0") self.type_map = ["Ni", "O"] @@ -238,6 +251,14 @@ def setUp(self) -> None: "drdq": torch.from_numpy(drdq), "atom_ener_coeff": torch.from_numpy(atom_ener_coeff), } + if self.hess: + l_hessian = np_batch["hessian"] + p_hessian = np.ones_like(l_hessian) + self.model_pred["hessian"] = torch.from_numpy(p_hessian) + self.label["hessian"] = torch.from_numpy(l_hessian) + self.label["find_hessian"] = 1.0 + self.label_absent["hessian"] = torch.from_numpy(l_hessian) + else: self.model_pred = { "energy": torch.from_numpy(p_energy), @@ -310,6 +331,7 @@ def setUp(self) -> None: self.limit_pref_v, ) self.spin = False + self.hess = False super().setUp() def test_consistency(self) -> None: @@ -399,6 +421,7 @@ def setUp(self) -> None: numb_generalized_coord=self.numb_generalized_coord, ) self.spin = False + self.hess = False super().setUp() def test_consistency(self) -> None: @@ -469,6 +492,7 @@ def setUp(self) -> None: enable_atom_ener_coeff=True, ) self.spin = False + self.hess = False super().setUp() def test_consistency(self) -> None: @@ -539,6 +563,7 @@ def setUp(self) -> None: relative_f=0.1, ) self.spin = False + self.hess = False super().setUp() def test_consistency(self) -> None: @@ -577,6 +602,112 @@ def fake_model(): self.assertTrue(np.isnan(pt_more_loss_absent[f"l2_{key}_loss"])) +class TestEnerHessStdLoss(LossCommonTest): + def setUp(self): + self.start_lr = 1.1 + self.start_pref_e = 0.02 + self.limit_pref_e = 1.0 + self.start_pref_f = 1000.0 + self.limit_pref_f = 1.0 + self.start_pref_v = 0.02 + self.limit_pref_v = 1.0 + self.start_pref_h = 10.0 + self.limit_pref_h = 1.0 + # tf + self.tf_loss = EnerStdLoss( + self.start_lr, + self.start_pref_e, + self.limit_pref_e, + self.start_pref_f, + self.limit_pref_f, + self.start_pref_v, + self.limit_pref_v, + ) + # pt + self.pt_loss = EnergyStdLoss( + self.start_lr, + self.start_pref_e, + self.limit_pref_e, + self.start_pref_f, + self.limit_pref_f, + self.start_pref_v, + self.limit_pref_v, + ) + # pt-hess + self.pt_loss_h = EnergyHessianStdLoss( + starter_learning_rate=self.start_lr, + start_pref_e=self.start_pref_e, + limit_pref_e=self.limit_pref_e, + start_pref_f=self.start_pref_f, + limit_pref_f=self.limit_pref_f, + start_pref_v=self.start_pref_v, + limit_pref_v=self.limit_pref_v, + start_pref_h=self.start_pref_h, + limit_pref_h=self.limit_pref_h, + ) + self.spin = False + self.hess = True + super().setUp() + + def test_consistency(self): + with tf.Session(graph=self.g) as sess: + tf_loss, tf_more_loss = sess.run( + self.tf_loss_sess, feed_dict=self.feed_dict + ) + + def fake_model(): + return self.model_pred + + _, pt_loss, pt_more_loss = self.pt_loss( + {}, + fake_model, + self.label, + self.nloc, + self.cur_lr, + ) + _, pt_loss_absent, pt_more_loss_absent = self.pt_loss( + {}, + fake_model, + self.label_absent, + self.nloc, + self.cur_lr, + ) + pt_loss = pt_loss.detach().cpu() + pt_loss_absent = pt_loss_absent.detach().cpu() + _, pt_loss_h, pt_more_loss_h = self.pt_loss_h( + {}, + fake_model, + self.label, + self.nloc, + self.cur_lr, + ) + _, pt_loss_h_absent, pt_more_loss_h_absent = self.pt_loss_h( + {}, + fake_model, + self.label_absent, + self.nloc, + self.cur_lr, + ) + pt_loss_h_absent = pt_loss_h_absent.detach().cpu() + self.assertTrue(np.allclose(tf_loss, pt_loss.numpy())) + self.assertTrue(np.allclose(0.0, pt_loss_absent.numpy())) + self.assertTrue(np.allclose(0.0, pt_loss_h_absent.numpy())) + for key in ["ener", "force", "virial"]: + self.assertTrue( + np.allclose( + tf_more_loss[f"l2_{key}_loss"], pt_more_loss[f"l2_{key}_loss"] + ) + ) + self.assertTrue( + np.allclose( + pt_more_loss[f"l2_{key}_loss"], pt_more_loss_h[f"l2_{key}_loss"] + ) + ) + self.assertTrue(np.isnan(pt_more_loss_absent[f"l2_{key}_loss"])) + for key in ["ener", "force", "virial", "hessian"]: + self.assertTrue(np.isnan(pt_more_loss_h_absent[f"l2_{key}_loss"])) + + class TestEnerSpinLoss(LossCommonTest): def setUp(self) -> None: self.start_lr = 1.1 @@ -610,6 +741,7 @@ def setUp(self) -> None: self.limit_pref_fm, ) self.spin = True + self.hess = False super().setUp() def test_consistency(self) -> None: @@ -687,6 +819,7 @@ def setUp(self) -> None: limit_pref_ae=self.limit_pref_ae, ) self.spin = True + self.hess = False super().setUp() def test_consistency(self) -> None: @@ -760,6 +893,7 @@ def setUp(self) -> None: enable_atom_ener_coeff=True, ) self.spin = True + self.hess = False super().setUp() def test_consistency(self) -> None: From 8d4c27b59e6f5a4b662f765848ed350317e27171 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 31 Dec 2024 06:36:36 +0800 Subject: [PATCH 43/43] [pre-commit.ci] pre-commit autoupdate (#4521) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - https://github.com/pylint-dev/pylint/: v3.3.2 → v3.3.3 Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index bd36fd6e63..fea500ab0e 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -154,7 +154,7 @@ repos: exclude: .pre-commit-config.yaml|source/lmp # customized pylint rules - repo: https://github.com/pylint-dev/pylint/ - rev: v3.3.2 + rev: v3.3.3 hooks: - id: pylint entry: env PYTHONPATH=source/checker pylint