From a810eb0f8715e182d4e1b1dc1636356b97023711 Mon Sep 17 00:00:00 2001 From: Jonathan Karlsen <107626001+jonathan-eq@users.noreply.github.com> Date: Wed, 20 Dec 2023 15:20:16 +0100 Subject: [PATCH] Have integration tests run with both scheduler and job queue (#6787) * Move integration tests to separate directory This commit moves all integration tests (the ones marked with pytest.mark.integration_tests atleast) to a new directory tests/integration_tests. * Have integration tests run with both queue and scheduler This commits adds the pytest.mark.scheduler mark and scheduler fixture to some of the integration tests, so that they will be ran with both the scheduler and job queue. * Move pytest snapshots down one level --- .../analysis}/__init__.py | 0 .../0/update_log | 17 + .../test_update_report/0-False/update_log | 222 +++++++ .../test_update_report/0-True/update_log | 222 +++++++ .../analysis/test_adaptive_localization.py | 15 +- .../analysis/test_es_update.py | 443 +++++++++++++ tests/integration_tests/cli/__init__.py | 0 .../False/es_mda_integration_snapshot | 21 + .../True/es_mda_integration_snapshot | 21 + .../test_es_mda/es_mda_integration_snapshot | 21 + .../cli/test_integration_cli.py | 470 ++++++++++++++ tests/integration_tests/job_queue/__init__.py | 0 .../job_queue/test_lsf_driver.py | 3 + tests/integration_tests/shared/__init__.py | 0 .../shared/share/__init__.py | 0 .../shared/share/test_shell.py | 47 ++ tests/integration_tests/status/__init__.py | 0 .../status/test_tracking_integration.py | 9 +- tests/integration_tests/storage/__init__.py | 0 .../storage/test_field_parameter.py | 300 +++++++++ .../storage/test_parameter_sample_types.py | 607 ++++++++++++++++++ tests/unit_tests/analysis/test_es_update.py | 395 +----------- .../analysis/test_misfit_preprocessor.py | 1 - tests/unit_tests/cli/test_integration_cli.py | 437 +------------ tests/unit_tests/cli/test_model_factory.py | 1 - tests/unit_tests/shared/share/test_shell.py | 41 -- .../storage/test_field_parameter.py | 290 --------- .../storage/test_parameter_sample_types.py | 543 +--------------- 28 files changed, 2418 insertions(+), 1708 deletions(-) rename tests/{unit_tests/status => integration_tests/analysis}/__init__.py (100%) create mode 100644 tests/integration_tests/analysis/snapshots/test_es_update/test_update_only_using_subset_observations/0/update_log create mode 100644 tests/integration_tests/analysis/snapshots/test_es_update/test_update_report/0-False/update_log create mode 100644 tests/integration_tests/analysis/snapshots/test_es_update/test_update_report/0-True/update_log rename tests/{unit_tests => integration_tests}/analysis/test_adaptive_localization.py (95%) create mode 100644 tests/integration_tests/analysis/test_es_update.py create mode 100644 tests/integration_tests/cli/__init__.py create mode 100644 tests/integration_tests/cli/snapshots/test_integration_cli/test_es_mda/False/es_mda_integration_snapshot create mode 100644 tests/integration_tests/cli/snapshots/test_integration_cli/test_es_mda/True/es_mda_integration_snapshot create mode 100644 tests/integration_tests/cli/snapshots/test_integration_cli/test_es_mda/es_mda_integration_snapshot create mode 100644 tests/integration_tests/cli/test_integration_cli.py create mode 100644 tests/integration_tests/job_queue/__init__.py rename tests/{unit_tests => integration_tests}/job_queue/test_lsf_driver.py (98%) create mode 100644 tests/integration_tests/shared/__init__.py create mode 100644 tests/integration_tests/shared/share/__init__.py create mode 100644 tests/integration_tests/shared/share/test_shell.py create mode 100644 tests/integration_tests/status/__init__.py rename tests/{unit_tests => integration_tests}/status/test_tracking_integration.py (98%) create mode 100644 tests/integration_tests/storage/__init__.py create mode 100644 tests/integration_tests/storage/test_field_parameter.py create mode 100644 tests/integration_tests/storage/test_parameter_sample_types.py diff --git a/tests/unit_tests/status/__init__.py b/tests/integration_tests/analysis/__init__.py similarity index 100% rename from tests/unit_tests/status/__init__.py rename to tests/integration_tests/analysis/__init__.py diff --git a/tests/integration_tests/analysis/snapshots/test_es_update/test_update_only_using_subset_observations/0/update_log b/tests/integration_tests/analysis/snapshots/test_es_update/test_update_only_using_subset_observations/0/update_log new file mode 100644 index 00000000000..c39a3f8e30e --- /dev/null +++ b/tests/integration_tests/analysis/snapshots/test_es_update/test_update_only_using_subset_observations/0/update_log @@ -0,0 +1,17 @@ +====================================================================================================================================================== +Time: +Parent ensemble: default_0 +Target ensemble: new_ensemble +Alpha: 3.0 +Global scaling: 1.0 +Standard cutoff: 1e-06 +Run id: id +Update step: DISABLED_OBSERVATIONS +------------------------------------------------------------------------------------------------------------------------------------------------------ + Observed history | Simulated data | Status +------------------------------------------------------------------------------------------------------------------------------------------------------ + 1 : FOPR 0.008 +/- 0.100 | 0.079 +/- 0.107 | Active + 2 : WPR_DIFF_1 0.000 +/- 0.100 | -0.011 +/- 0.060 | Active + 3 : WPR_DIFF_1 0.100 +/- 0.200 | 0.081 +/- 0.126 | Active + 4 : WPR_DIFF_1 0.200 +/- 0.150 | 0.073 +/- 0.130 | Active + 5 : WPR_DIFF_1 0.000 +/- 0.050 | 0.127 +/- 0.125 | Active diff --git a/tests/integration_tests/analysis/snapshots/test_es_update/test_update_report/0-False/update_log b/tests/integration_tests/analysis/snapshots/test_es_update/test_update_report/0-False/update_log new file mode 100644 index 00000000000..ac823345200 --- /dev/null +++ b/tests/integration_tests/analysis/snapshots/test_es_update/test_update_report/0-False/update_log @@ -0,0 +1,222 @@ +====================================================================================================================================================== +Time: +Parent ensemble: default_0 +Target ensemble: new_ensemble +Alpha: 3.0 +Global scaling: 1.0 +Standard cutoff: 1e-06 +Run id: id +Update step: ALL_ACTIVE +------------------------------------------------------------------------------------------------------------------------------------------------------ + Observed history | Simulated data | Status +------------------------------------------------------------------------------------------------------------------------------------------------------ + 1 : FOPR 0.002 +/- 0.100 | 0.076 +/- 0.105 | Active + 2 : FOPR 0.008 +/- 0.100 | 0.079 +/- 0.107 | Active + 3 : FOPR 0.018 +/- 0.100 | 0.085 +/- 0.110 | Active + 4 : FOPR 0.032 +/- 0.100 | 0.092 +/- 0.114 | Active + 5 : FOPR 0.050 +/- 0.100 | 0.103 +/- 0.118 | Active + 6 : FOPR 0.071 +/- 0.100 | 0.117 +/- 0.122 | Active + 7 : FOPR 0.097 +/- 0.100 | 0.133 +/- 0.128 | Active + 8 : FOPR 0.126 +/- 0.100 | 0.151 +/- 0.134 | Active + 9 : FOPR 0.159 +/- 0.100 | 0.171 +/- 0.140 | Active + 10 : FOPR 0.194 +/- 0.100 | 0.193 +/- 0.148 | Active + 11 : FOPR 0.233 +/- 0.100 | 0.221 +/- 0.154 | Active + 12 : FOPR 0.274 +/- 0.100 | 0.251 +/- 0.161 | Active + 13 : FOPR 0.318 +/- 0.100 | 0.293 +/- 0.164 | Active + 14 : FOPR 0.363 +/- 0.100 | 0.340 +/- 0.163 | Active + 15 : FOPR 0.411 +/- 0.100 | 0.389 +/- 0.163 | Active + 16 : FOPR 0.460 +/- 0.100 | 0.439 +/- 0.163 | Active + 17 : FOPR 0.510 +/- 0.100 | 0.491 +/- 0.164 | Active + 18 : FOPR 0.561 +/- 0.100 | 0.544 +/- 0.164 | Active + 19 : FOPR 0.613 +/- 0.100 | 0.598 +/- 0.164 | Active + 20 : FOPR 0.666 +/- 0.100 | 0.652 +/- 0.163 | Active + 21 : FOPR 0.718 +/- 0.100 | 0.706 +/- 0.164 | Active + 22 : FOPR 0.770 +/- 0.100 | 0.760 +/- 0.164 | Active + 23 : FOPR 0.823 +/- 0.100 | 0.813 +/- 0.164 | Active + 24 : FOPR 0.875 +/- 0.100 | 0.864 +/- 0.164 | Active + 25 : FOPR 0.926 +/- 0.100 | 0.914 +/- 0.165 | Active + 26 : FOPR 0.977 +/- 0.100 | 0.963 +/- 0.165 | Active + 27 : FOPR 1.027 +/- 0.103 | 1.008 +/- 0.167 | Active + 28 : FOPR 1.075 +/- 0.108 | 1.049 +/- 0.169 | Active + 29 : FOPR 1.122 +/- 0.112 | 1.089 +/- 0.171 | Active + 30 : FOPR 1.166 +/- 0.117 | 1.126 +/- 0.172 | Active + 31 : FOPR 1.208 +/- 0.121 | 1.160 +/- 0.174 | Active + 32 : FOPR 1.247 +/- 0.125 | 1.192 +/- 0.175 | Active + 33 : FOPR 1.284 +/- 0.128 | 1.219 +/- 0.175 | Active + 34 : FOPR 1.317 +/- 0.132 | 1.243 +/- 0.175 | Active + 35 : FOPR 1.346 +/- 0.135 | 1.263 +/- 0.176 | Active + 36 : FOPR 1.371 +/- 0.137 | 1.279 +/- 0.176 | Active + 37 : FOPR 1.392 +/- 0.139 | 1.292 +/- 0.177 | Active + 38 : FOPR 1.407 +/- 0.141 | 1.300 +/- 0.179 | Active + 39 : FOPR 1.418 +/- 0.142 | 1.303 +/- 0.181 | Active + 40 : FOPR 1.422 +/- 0.142 | 1.303 +/- 0.183 | Active + 41 : FOPR 1.424 +/- 0.142 | 1.299 +/- 0.185 | Active + 42 : FOPR 1.425 +/- 0.143 | 1.294 +/- 0.187 | Active + 43 : FOPR 1.427 +/- 0.143 | 1.290 +/- 0.188 | Active + 44 : FOPR 1.430 +/- 0.143 | 1.283 +/- 0.189 | Active + 45 : FOPR 1.433 +/- 0.143 | 1.275 +/- 0.187 | Active + 46 : FOPR 1.438 +/- 0.144 | 1.263 +/- 0.186 | Active + 47 : FOPR 1.443 +/- 0.144 | 1.250 +/- 0.186 | Active + 48 : FOPR 1.449 +/- 0.145 | 1.237 +/- 0.186 | Active + 49 : FOPR 1.455 +/- 0.145 | 1.222 +/- 0.185 | Active + 50 : FOPR 1.460 +/- 0.146 | 1.207 +/- 0.184 | Active + 51 : FOPR 1.466 +/- 0.147 | 1.190 +/- 0.184 | Active + 52 : FOPR 1.470 +/- 0.147 | 1.170 +/- 0.183 | Active + 53 : FOPR 1.474 +/- 0.147 | 1.146 +/- 0.183 | Active + 54 : FOPR 1.475 +/- 0.148 | 1.122 +/- 0.184 | Active + 55 : FOPR 1.474 +/- 0.147 | 1.098 +/- 0.188 | Active + 56 : FOPR 1.469 +/- 0.147 | 1.077 +/- 0.192 | Active + 57 : FOPR 1.461 +/- 0.146 | 1.053 +/- 0.194 | Active + 58 : FOPR 1.449 +/- 0.145 | 1.027 +/- 0.196 | Active + 59 : FOPR 1.436 +/- 0.144 | 1.002 +/- 0.196 | Active + 60 : FOPR 1.421 +/- 0.142 | 0.975 +/- 0.197 | Active + 61 : FOPR 1.403 +/- 0.140 | 0.947 +/- 0.200 | Active + 62 : FOPR 1.379 +/- 0.138 | 0.928 +/- 0.200 | Active + 63 : FOPR 1.353 +/- 0.135 | 0.902 +/- 0.203 | Active + 64 : FOPR 1.324 +/- 0.132 | 0.878 +/- 0.206 | Active + 65 : FOPR 1.297 +/- 0.130 | 0.851 +/- 0.210 | Active + 66 : FOPR 1.270 +/- 0.127 | 0.824 +/- 0.213 | Active + 67 : FOPR 1.243 +/- 0.124 | 0.801 +/- 0.215 | Active + 68 : FOPR 1.216 +/- 0.122 | 0.781 +/- 0.216 | Active + 69 : FOPR 1.189 +/- 0.119 | 0.762 +/- 0.216 | Active + 70 : FOPR 1.161 +/- 0.116 | 0.744 +/- 0.215 | Active + 71 : FOPR 1.134 +/- 0.113 | 0.725 +/- 0.212 | Active + 72 : FOPR 1.112 +/- 0.111 | 0.704 +/- 0.206 | Active + 73 : FOPR 1.091 +/- 0.109 | 0.683 +/- 0.200 | Active + 74 : FOPR 1.072 +/- 0.107 | 0.661 +/- 0.194 | Active + 75 : FOPR 1.053 +/- 0.105 | 0.640 +/- 0.189 | Active + 76 : FOPR 1.033 +/- 0.103 | 0.619 +/- 0.185 | Active + 77 : FOPR 1.013 +/- 0.101 | 0.597 +/- 0.181 | Active + 78 : FOPR 0.995 +/- 0.100 | 0.576 +/- 0.176 | Active + 79 : FOPR 0.975 +/- 0.100 | 0.555 +/- 0.171 | Active + 80 : FOPR 0.956 +/- 0.100 | 0.533 +/- 0.171 | Active + 81 : FOPR 0.936 +/- 0.100 | 0.513 +/- 0.171 | Active + 82 : FOPR 0.916 +/- 0.100 | 0.494 +/- 0.170 | Active + 83 : FOPR 0.893 +/- 0.100 | 0.477 +/- 0.169 | Active + 84 : FOPR 0.869 +/- 0.100 | 0.462 +/- 0.169 | Active + 85 : FOPR 0.842 +/- 0.100 | 0.447 +/- 0.170 | Active + 86 : FOPR 0.812 +/- 0.100 | 0.432 +/- 0.170 | Active + 87 : FOPR 0.779 +/- 0.100 | 0.417 +/- 0.171 | Active + 88 : FOPR 0.742 +/- 0.100 | 0.403 +/- 0.170 | Active + 89 : FOPR 0.702 +/- 0.100 | 0.389 +/- 0.171 | Active + 90 : FOPR 0.661 +/- 0.100 | 0.379 +/- 0.171 | Active + 91 : FOPR 0.619 +/- 0.100 | 0.370 +/- 0.171 | Active + 92 : FOPR 0.578 +/- 0.100 | 0.361 +/- 0.169 | Active + 93 : FOPR 0.540 +/- 0.100 | 0.354 +/- 0.168 | Active + 94 : FOPR 0.505 +/- 0.100 | 0.349 +/- 0.166 | Active + 95 : FOPR 0.475 +/- 0.100 | 0.344 +/- 0.165 | Active + 96 : FOPR 0.450 +/- 0.100 | 0.340 +/- 0.165 | Active + 97 : FOPR 0.431 +/- 0.100 | 0.344 +/- 0.168 | Active + 98 : FOPR 0.419 +/- 0.100 | 0.350 +/- 0.171 | Active + 99 : FOPR 0.410 +/- 0.100 | 0.349 +/- 0.171 | Active + 100 : FOPR 0.406 +/- 0.100 | 0.350 +/- 0.173 | Active + 101 : FOPR 0.404 +/- 0.100 | 0.347 +/- 0.171 | Active + 102 : FOPR 0.399 +/- 0.100 | 0.344 +/- 0.168 | Active + 103 : FOPR 0.389 +/- 0.100 | 0.346 +/- 0.165 | Active + 104 : FOPR 0.374 +/- 0.100 | 0.348 +/- 0.162 | Active + 105 : FOPR 0.355 +/- 0.100 | 0.350 +/- 0.156 | Active + 106 : FOPR 0.332 +/- 0.100 | 0.350 +/- 0.148 | Active + 107 : FOPR 0.306 +/- 0.100 | 0.349 +/- 0.140 | Active + 108 : FOPR 0.282 +/- 0.100 | 0.348 +/- 0.133 | Active + 109 : FOPR 0.264 +/- 0.100 | 0.344 +/- 0.125 | Active + 110 : FOPR 0.248 +/- 0.100 | 0.340 +/- 0.118 | Active + 111 : FOPR 0.233 +/- 0.100 | 0.337 +/- 0.114 | Active + 112 : FOPR 0.219 +/- 0.100 | 0.335 +/- 0.112 | Active + 113 : FOPR 0.205 +/- 0.100 | 0.334 +/- 0.110 | Active + 114 : FOPR 0.192 +/- 0.100 | 0.333 +/- 0.110 | Active + 115 : FOPR 0.180 +/- 0.100 | 0.332 +/- 0.109 | Active + 116 : FOPR 0.169 +/- 0.100 | 0.330 +/- 0.107 | Active + 117 : FOPR 0.160 +/- 0.100 | 0.327 +/- 0.106 | Active + 118 : FOPR 0.152 +/- 0.100 | 0.323 +/- 0.105 | Active + 119 : FOPR 0.146 +/- 0.100 | 0.317 +/- 0.102 | Active + 120 : FOPR 0.141 +/- 0.100 | 0.310 +/- 0.100 | Active + 121 : FOPR 0.137 +/- 0.100 | 0.303 +/- 0.098 | Active + 122 : FOPR 0.134 +/- 0.100 | 0.296 +/- 0.096 | Active + 123 : FOPR 0.130 +/- 0.100 | 0.290 +/- 0.094 | Active + 124 : FOPR 0.127 +/- 0.100 | 0.284 +/- 0.092 | Active + 125 : FOPR 0.123 +/- 0.100 | 0.279 +/- 0.090 | Active + 126 : FOPR 0.119 +/- 0.100 | 0.275 +/- 0.088 | Active + 127 : FOPR 0.120 +/- 0.100 | 0.270 +/- 0.085 | Active + 128 : FOPR 0.128 +/- 0.100 | 0.266 +/- 0.081 | Active + 129 : FOPR 0.136 +/- 0.100 | 0.263 +/- 0.077 | Active + 130 : FOPR 0.143 +/- 0.100 | 0.261 +/- 0.073 | Active + 131 : FOPR 0.150 +/- 0.100 | 0.258 +/- 0.069 | Active + 132 : FOPR 0.155 +/- 0.100 | 0.256 +/- 0.066 | Active + 133 : FOPR 0.159 +/- 0.100 | 0.254 +/- 0.063 | Active + 134 : FOPR 0.163 +/- 0.100 | 0.251 +/- 0.061 | Active + 135 : FOPR 0.166 +/- 0.100 | 0.248 +/- 0.059 | Active + 136 : FOPR 0.167 +/- 0.100 | 0.247 +/- 0.058 | Active + 137 : FOPR 0.167 +/- 0.100 | 0.245 +/- 0.058 | Active + 138 : FOPR 0.166 +/- 0.100 | 0.243 +/- 0.058 | Active + 139 : FOPR 0.165 +/- 0.100 | 0.243 +/- 0.058 | Active + 140 : FOPR 0.164 +/- 0.100 | 0.242 +/- 0.059 | Active + 141 : FOPR 0.165 +/- 0.100 | 0.243 +/- 0.059 | Active + 142 : FOPR 0.169 +/- 0.100 | 0.243 +/- 0.059 | Active + 143 : FOPR 0.176 +/- 0.100 | 0.242 +/- 0.058 | Active + 144 : FOPR 0.186 +/- 0.100 | 0.242 +/- 0.057 | Active + 145 : FOPR 0.197 +/- 0.100 | 0.241 +/- 0.057 | Active + 146 : FOPR 0.211 +/- 0.100 | 0.239 +/- 0.058 | Active + 147 : FOPR 0.225 +/- 0.100 | 0.238 +/- 0.059 | Active + 148 : FOPR 0.239 +/- 0.100 | 0.238 +/- 0.061 | Active + 149 : FOPR 0.252 +/- 0.100 | 0.238 +/- 0.061 | Active + 150 : FOPR 0.264 +/- 0.100 | 0.237 +/- 0.061 | Active + 151 : FOPR 0.275 +/- 0.100 | 0.236 +/- 0.062 | Active + 152 : FOPR 0.285 +/- 0.100 | 0.236 +/- 0.064 | Active + 153 : FOPR 0.295 +/- 0.100 | 0.236 +/- 0.066 | Active + 154 : FOPR 0.303 +/- 0.100 | 0.235 +/- 0.069 | Active + 155 : FOPR 0.309 +/- 0.100 | 0.234 +/- 0.072 | Active + 156 : FOPR 0.312 +/- 0.100 | 0.231 +/- 0.074 | Active + 157 : FOPR 0.313 +/- 0.100 | 0.229 +/- 0.076 | Active + 158 : FOPR 0.310 +/- 0.100 | 0.225 +/- 0.077 | Active + 159 : FOPR 0.304 +/- 0.100 | 0.220 +/- 0.078 | Active + 160 : FOPR 0.296 +/- 0.100 | 0.215 +/- 0.078 | Active + 161 : FOPR 0.286 +/- 0.100 | 0.209 +/- 0.078 | Active + 162 : FOPR 0.275 +/- 0.100 | 0.202 +/- 0.078 | Active + 163 : FOPR 0.264 +/- 0.100 | 0.195 +/- 0.079 | Active + 164 : FOPR 0.253 +/- 0.100 | 0.188 +/- 0.079 | Active + 165 : FOPR 0.241 +/- 0.100 | 0.181 +/- 0.080 | Active + 166 : FOPR 0.230 +/- 0.100 | 0.173 +/- 0.082 | Active + 167 : FOPR 0.218 +/- 0.100 | 0.167 +/- 0.084 | Active + 168 : FOPR 0.207 +/- 0.100 | 0.161 +/- 0.086 | Active + 169 : FOPR 0.197 +/- 0.100 | 0.155 +/- 0.088 | Active + 170 : FOPR 0.187 +/- 0.100 | 0.149 +/- 0.090 | Active + 171 : FOPR 0.178 +/- 0.100 | 0.143 +/- 0.092 | Active + 172 : FOPR 0.168 +/- 0.100 | 0.138 +/- 0.094 | Active + 173 : FOPR 0.159 +/- 0.100 | 0.132 +/- 0.095 | Active + 174 : FOPR 0.150 +/- 0.100 | 0.128 +/- 0.096 | Active + 175 : FOPR 0.141 +/- 0.100 | 0.124 +/- 0.096 | Active + 176 : FOPR 0.134 +/- 0.100 | 0.120 +/- 0.096 | Active + 177 : FOPR 0.127 +/- 0.100 | 0.116 +/- 0.097 | Active + 178 : FOPR 0.120 +/- 0.100 | 0.113 +/- 0.097 | Active + 179 : FOPR 0.115 +/- 0.100 | 0.110 +/- 0.096 | Active + 180 : FOPR 0.111 +/- 0.100 | 0.107 +/- 0.096 | Active + 181 : FOPR 0.107 +/- 0.100 | 0.105 +/- 0.095 | Active + 182 : FOPR 0.101 +/- 0.100 | 0.102 +/- 0.095 | Active + 183 : FOPR 0.096 +/- 0.100 | 0.100 +/- 0.095 | Active + 184 : FOPR 0.089 +/- 0.100 | 0.097 +/- 0.096 | Active + 185 : FOPR 0.081 +/- 0.100 | 0.094 +/- 0.096 | Active + 186 : FOPR 0.073 +/- 0.100 | 0.092 +/- 0.098 | Active + 187 : FOPR 0.065 +/- 0.100 | 0.090 +/- 0.099 | Active + 188 : FOPR 0.058 +/- 0.100 | 0.088 +/- 0.101 | Active + 189 : FOPR 0.050 +/- 0.100 | 0.087 +/- 0.103 | Active + 190 : FOPR 0.044 +/- 0.100 | 0.086 +/- 0.104 | Active + 191 : FOPR 0.038 +/- 0.100 | 0.085 +/- 0.106 | Active + 192 : FOPR 0.033 +/- 0.100 | 0.084 +/- 0.107 | Active + 193 : FOPR 0.029 +/- 0.100 | 0.084 +/- 0.108 | Active + 194 : FOPR 0.026 +/- 0.100 | 0.084 +/- 0.108 | Active + 195 : FOPR 0.024 +/- 0.100 | 0.084 +/- 0.109 | Active + 196 : FOPR 0.022 +/- 0.100 | 0.084 +/- 0.109 | Active + 197 : FOPR 0.021 +/- 0.100 | 0.084 +/- 0.109 | Active + 198 : FOPR 0.020 +/- 0.100 | 0.084 +/- 0.110 | Active + 199 : FOPR 0.020 +/- 0.100 | 0.084 +/- 0.110 | Active + 200 : FOPR 0.020 +/- 0.100 | 0.084 +/- 0.110 | Active + 201 : WOPR_OP1_108 0.300 +/- 0.075 | 0.257 +/- 0.099 | Active + 202 : WOPR_OP1_144 0.200 +/- 0.035 | 0.183 +/- 0.106 | Active + 203 : WOPR_OP1_190 0.015 +/- 0.010 | 0.042 +/- 0.041 | Active + 204 : WOPR_OP1_36 0.700 +/- 0.070 | 0.650 +/- 0.084 | Active + 205 : WOPR_OP1_72 0.500 +/- 0.050 | 0.405 +/- 0.170 | Active + 206 : WOPR_OP1_9 0.100 +/- 0.050 | 0.096 +/- 0.060 | Active + 207 : WPR_DIFF_1 0.000 +/- 0.100 | -0.011 +/- 0.060 | Active + 208 : WPR_DIFF_1 0.100 +/- 0.200 | 0.081 +/- 0.126 | Active + 209 : WPR_DIFF_1 0.200 +/- 0.150 | 0.073 +/- 0.130 | Active + 210 : WPR_DIFF_1 0.000 +/- 0.050 | 0.127 +/- 0.125 | Active diff --git a/tests/integration_tests/analysis/snapshots/test_es_update/test_update_report/0-True/update_log b/tests/integration_tests/analysis/snapshots/test_es_update/test_update_report/0-True/update_log new file mode 100644 index 00000000000..ce88046d969 --- /dev/null +++ b/tests/integration_tests/analysis/snapshots/test_es_update/test_update_report/0-True/update_log @@ -0,0 +1,222 @@ +====================================================================================================================================================== +Time: +Parent ensemble: default_0 +Target ensemble: new_ensemble +Alpha: 3.0 +Global scaling: 1.0 +Standard cutoff: 1e-06 +Run id: id +Update step: ALL_ACTIVE +------------------------------------------------------------------------------------------------------------------------------------------------------ + Observed history | Simulated data | Status +------------------------------------------------------------------------------------------------------------------------------------------------------ + 1 : FOPR 0.002 +/- 0.566 (0.100 * 5.657) | 0.076 +/- 0.105 | Active + 2 : FOPR 0.008 +/- 0.566 (0.100 * 5.657) | 0.079 +/- 0.107 | Active + 3 : FOPR 0.018 +/- 0.566 (0.100 * 5.657) | 0.085 +/- 0.110 | Active + 4 : FOPR 0.032 +/- 0.566 (0.100 * 5.657) | 0.092 +/- 0.114 | Active + 5 : FOPR 0.050 +/- 0.566 (0.100 * 5.657) | 0.103 +/- 0.118 | Active + 6 : FOPR 0.071 +/- 0.566 (0.100 * 5.657) | 0.117 +/- 0.122 | Active + 7 : FOPR 0.097 +/- 0.566 (0.100 * 5.657) | 0.133 +/- 0.128 | Active + 8 : FOPR 0.126 +/- 0.566 (0.100 * 5.657) | 0.151 +/- 0.134 | Active + 9 : FOPR 0.159 +/- 0.566 (0.100 * 5.657) | 0.171 +/- 0.140 | Active + 10 : FOPR 0.194 +/- 0.566 (0.100 * 5.657) | 0.193 +/- 0.148 | Active + 11 : FOPR 0.233 +/- 0.539 (0.100 * 5.385) | 0.221 +/- 0.154 | Active + 12 : FOPR 0.274 +/- 0.539 (0.100 * 5.385) | 0.251 +/- 0.161 | Active + 13 : FOPR 0.318 +/- 0.539 (0.100 * 5.385) | 0.293 +/- 0.164 | Active + 14 : FOPR 0.363 +/- 0.539 (0.100 * 5.385) | 0.340 +/- 0.163 | Active + 15 : FOPR 0.411 +/- 0.539 (0.100 * 5.385) | 0.389 +/- 0.163 | Active + 16 : FOPR 0.460 +/- 0.539 (0.100 * 5.385) | 0.439 +/- 0.163 | Active + 17 : FOPR 0.510 +/- 0.539 (0.100 * 5.385) | 0.491 +/- 0.164 | Active + 18 : FOPR 0.561 +/- 0.566 (0.100 * 5.657) | 0.544 +/- 0.164 | Active + 19 : FOPR 0.613 +/- 0.566 (0.100 * 5.657) | 0.598 +/- 0.164 | Active + 20 : FOPR 0.666 +/- 0.566 (0.100 * 5.657) | 0.652 +/- 0.163 | Active + 21 : FOPR 0.718 +/- 0.566 (0.100 * 5.657) | 0.706 +/- 0.164 | Active + 22 : FOPR 0.770 +/- 0.332 (0.100 * 3.317) | 0.760 +/- 0.164 | Active + 23 : FOPR 0.823 +/- 0.332 (0.100 * 3.317) | 0.813 +/- 0.164 | Active + 24 : FOPR 0.875 +/- 0.332 (0.100 * 3.317) | 0.864 +/- 0.164 | Active + 25 : FOPR 0.926 +/- 0.332 (0.100 * 3.317) | 0.914 +/- 0.165 | Active + 26 : FOPR 0.977 +/- 0.332 (0.100 * 3.317) | 0.963 +/- 0.165 | Active + 27 : FOPR 1.027 +/- 0.341 (0.103 * 3.317) | 1.008 +/- 0.167 | Active + 28 : FOPR 1.075 +/- 0.357 (0.108 * 3.317) | 1.049 +/- 0.169 | Active + 29 : FOPR 1.122 +/- 0.372 (0.112 * 3.317) | 1.089 +/- 0.171 | Active + 30 : FOPR 1.166 +/- 0.387 (0.117 * 3.317) | 1.126 +/- 0.172 | Active + 31 : FOPR 1.208 +/- 0.400 (0.121 * 3.317) | 1.160 +/- 0.174 | Active + 32 : FOPR 1.247 +/- 0.414 (0.125 * 3.317) | 1.192 +/- 0.175 | Active + 33 : FOPR 1.284 +/- 0.257 (0.128 * 2.000) | 1.219 +/- 0.175 | Active + 34 : FOPR 1.317 +/- 0.263 (0.132 * 2.000) | 1.243 +/- 0.175 | Active + 35 : FOPR 1.346 +/- 0.269 (0.135 * 2.000) | 1.263 +/- 0.176 | Active + 36 : FOPR 1.371 +/- 0.274 (0.137 * 2.000) | 1.279 +/- 0.176 | Active + 37 : FOPR 1.392 +/- 0.462 (0.139 * 3.317) | 1.292 +/- 0.177 | Active + 38 : FOPR 1.407 +/- 0.467 (0.141 * 3.317) | 1.300 +/- 0.179 | Active + 39 : FOPR 1.418 +/- 0.470 (0.142 * 3.317) | 1.303 +/- 0.181 | Active + 40 : FOPR 1.422 +/- 0.472 (0.142 * 3.317) | 1.303 +/- 0.183 | Active + 41 : FOPR 1.424 +/- 0.472 (0.142 * 3.317) | 1.299 +/- 0.185 | Active + 42 : FOPR 1.425 +/- 0.473 (0.143 * 3.317) | 1.294 +/- 0.187 | Active + 43 : FOPR 1.427 +/- 0.473 (0.143 * 3.317) | 1.290 +/- 0.188 | Active + 44 : FOPR 1.430 +/- 0.474 (0.143 * 3.317) | 1.283 +/- 0.189 | Active + 45 : FOPR 1.433 +/- 0.475 (0.143 * 3.317) | 1.275 +/- 0.187 | Active + 46 : FOPR 1.438 +/- 0.477 (0.144 * 3.317) | 1.263 +/- 0.186 | Active + 47 : FOPR 1.443 +/- 0.479 (0.144 * 3.317) | 1.250 +/- 0.186 | Active + 48 : FOPR 1.449 +/- 0.435 (0.145 * 3.000) | 1.237 +/- 0.186 | Active + 49 : FOPR 1.455 +/- 0.436 (0.145 * 3.000) | 1.222 +/- 0.185 | Active + 50 : FOPR 1.460 +/- 0.438 (0.146 * 3.000) | 1.207 +/- 0.184 | Active + 51 : FOPR 1.466 +/- 0.655 (0.147 * 4.472) | 1.190 +/- 0.184 | Active + 52 : FOPR 1.470 +/- 0.658 (0.147 * 4.472) | 1.170 +/- 0.183 | Active + 53 : FOPR 1.474 +/- 0.659 (0.147 * 4.472) | 1.146 +/- 0.183 | Active + 54 : FOPR 1.475 +/- 0.660 (0.148 * 4.472) | 1.122 +/- 0.184 | Active + 55 : FOPR 1.474 +/- 0.442 (0.147 * 3.000) | 1.098 +/- 0.188 | Active + 56 : FOPR 1.469 +/- 0.657 (0.147 * 4.472) | 1.077 +/- 0.192 | Active + 57 : FOPR 1.461 +/- 0.653 (0.146 * 4.472) | 1.053 +/- 0.194 | Active + 58 : FOPR 1.449 +/- 0.435 (0.145 * 3.000) | 1.027 +/- 0.196 | Active + 59 : FOPR 1.436 +/- 0.642 (0.144 * 4.472) | 1.002 +/- 0.196 | Active + 60 : FOPR 1.421 +/- 0.636 (0.142 * 4.472) | 0.975 +/- 0.197 | Active + 61 : FOPR 1.403 +/- 0.421 (0.140 * 3.000) | 0.947 +/- 0.200 | Active + 62 : FOPR 1.379 +/- 0.617 (0.138 * 4.472) | 0.928 +/- 0.200 | Active + 63 : FOPR 1.353 +/- 0.605 (0.135 * 4.472) | 0.902 +/- 0.203 | Active + 64 : FOPR 1.324 +/- 0.592 (0.132 * 4.472) | 0.878 +/- 0.206 | Active + 65 : FOPR 1.297 +/- 0.580 (0.130 * 4.472) | 0.851 +/- 0.210 | Active + 66 : FOPR 1.270 +/- 0.381 (0.127 * 3.000) | 0.824 +/- 0.213 | Active + 67 : FOPR 1.243 +/- 0.373 (0.124 * 3.000) | 0.801 +/- 0.215 | Active + 68 : FOPR 1.216 +/- 0.365 (0.122 * 3.000) | 0.781 +/- 0.216 | Active + 69 : FOPR 1.189 +/- 0.532 (0.119 * 4.472) | 0.762 +/- 0.216 | Active + 70 : FOPR 1.161 +/- 0.519 (0.116 * 4.472) | 0.744 +/- 0.215 | Active + 71 : FOPR 1.134 +/- 0.507 (0.113 * 4.472) | 0.725 +/- 0.212 | Active + 72 : FOPR 1.112 +/- 0.497 (0.111 * 4.472) | 0.704 +/- 0.206 | Active + 73 : FOPR 1.091 +/- 0.488 (0.109 * 4.472) | 0.683 +/- 0.200 | Active + 74 : FOPR 1.072 +/- 0.479 (0.107 * 4.472) | 0.661 +/- 0.194 | Active + 75 : FOPR 1.053 +/- 0.471 (0.105 * 4.472) | 0.640 +/- 0.189 | Active + 76 : FOPR 1.033 +/- 0.462 (0.103 * 4.472) | 0.619 +/- 0.185 | Active + 77 : FOPR 1.013 +/- 0.545 (0.101 * 5.385) | 0.597 +/- 0.181 | Active + 78 : FOPR 0.995 +/- 0.539 (0.100 * 5.385) | 0.576 +/- 0.176 | Active + 79 : FOPR 0.975 +/- 0.539 (0.100 * 5.385) | 0.555 +/- 0.171 | Active + 80 : FOPR 0.956 +/- 0.539 (0.100 * 5.385) | 0.533 +/- 0.171 | Active + 81 : FOPR 0.936 +/- 0.539 (0.100 * 5.385) | 0.513 +/- 0.171 | Active + 82 : FOPR 0.916 +/- 0.539 (0.100 * 5.385) | 0.494 +/- 0.170 | Active + 83 : FOPR 0.893 +/- 0.539 (0.100 * 5.385) | 0.477 +/- 0.169 | Active + 84 : FOPR 0.869 +/- 0.539 (0.100 * 5.385) | 0.462 +/- 0.169 | Active + 85 : FOPR 0.842 +/- 0.539 (0.100 * 5.385) | 0.447 +/- 0.170 | Active + 86 : FOPR 0.812 +/- 0.539 (0.100 * 5.385) | 0.432 +/- 0.170 | Active + 87 : FOPR 0.779 +/- 0.539 (0.100 * 5.385) | 0.417 +/- 0.171 | Active + 88 : FOPR 0.742 +/- 0.539 (0.100 * 5.385) | 0.403 +/- 0.170 | Active + 89 : FOPR 0.702 +/- 0.539 (0.100 * 5.385) | 0.389 +/- 0.171 | Active + 90 : FOPR 0.661 +/- 0.539 (0.100 * 5.385) | 0.379 +/- 0.171 | Active + 91 : FOPR 0.619 +/- 0.539 (0.100 * 5.385) | 0.370 +/- 0.171 | Active + 92 : FOPR 0.578 +/- 0.539 (0.100 * 5.385) | 0.361 +/- 0.169 | Active + 93 : FOPR 0.540 +/- 0.539 (0.100 * 5.385) | 0.354 +/- 0.168 | Active + 94 : FOPR 0.505 +/- 0.539 (0.100 * 5.385) | 0.349 +/- 0.166 | Active + 95 : FOPR 0.475 +/- 0.539 (0.100 * 5.385) | 0.344 +/- 0.165 | Active + 96 : FOPR 0.450 +/- 0.539 (0.100 * 5.385) | 0.340 +/- 0.165 | Active + 97 : FOPR 0.431 +/- 0.539 (0.100 * 5.385) | 0.344 +/- 0.168 | Active + 98 : FOPR 0.419 +/- 0.539 (0.100 * 5.385) | 0.350 +/- 0.171 | Active + 99 : FOPR 0.410 +/- 0.539 (0.100 * 5.385) | 0.349 +/- 0.171 | Active + 100 : FOPR 0.406 +/- 0.539 (0.100 * 5.385) | 0.350 +/- 0.173 | Active + 101 : FOPR 0.404 +/- 0.539 (0.100 * 5.385) | 0.347 +/- 0.171 | Active + 102 : FOPR 0.399 +/- 0.539 (0.100 * 5.385) | 0.344 +/- 0.168 | Active + 103 : FOPR 0.389 +/- 0.539 (0.100 * 5.385) | 0.346 +/- 0.165 | Active + 104 : FOPR 0.374 +/- 0.539 (0.100 * 5.385) | 0.348 +/- 0.162 | Active + 105 : FOPR 0.355 +/- 0.539 (0.100 * 5.385) | 0.350 +/- 0.156 | Active + 106 : FOPR 0.332 +/- 0.173 (0.100 * 1.732) | 0.350 +/- 0.148 | Active + 107 : FOPR 0.306 +/- 0.173 (0.100 * 1.732) | 0.349 +/- 0.140 | Active + 108 : FOPR 0.282 +/- 0.173 (0.100 * 1.732) | 0.348 +/- 0.133 | Active + 109 : FOPR 0.264 +/- 0.458 (0.100 * 4.583) | 0.344 +/- 0.125 | Active + 110 : FOPR 0.248 +/- 0.458 (0.100 * 4.583) | 0.340 +/- 0.118 | Active + 111 : FOPR 0.233 +/- 0.458 (0.100 * 4.583) | 0.337 +/- 0.114 | Active + 112 : FOPR 0.219 +/- 0.458 (0.100 * 4.583) | 0.335 +/- 0.112 | Active + 113 : FOPR 0.205 +/- 0.458 (0.100 * 4.583) | 0.334 +/- 0.110 | Active + 114 : FOPR 0.192 +/- 0.458 (0.100 * 4.583) | 0.333 +/- 0.110 | Active + 115 : FOPR 0.180 +/- 0.458 (0.100 * 4.583) | 0.332 +/- 0.109 | Active + 116 : FOPR 0.169 +/- 0.458 (0.100 * 4.583) | 0.330 +/- 0.107 | Active + 117 : FOPR 0.160 +/- 0.458 (0.100 * 4.583) | 0.327 +/- 0.106 | Active + 118 : FOPR 0.152 +/- 0.458 (0.100 * 4.583) | 0.323 +/- 0.105 | Active + 119 : FOPR 0.146 +/- 0.458 (0.100 * 4.583) | 0.317 +/- 0.102 | Active + 120 : FOPR 0.141 +/- 0.458 (0.100 * 4.583) | 0.310 +/- 0.100 | Active + 121 : FOPR 0.137 +/- 0.458 (0.100 * 4.583) | 0.303 +/- 0.098 | Active + 122 : FOPR 0.134 +/- 0.458 (0.100 * 4.583) | 0.296 +/- 0.096 | Active + 123 : FOPR 0.130 +/- 0.458 (0.100 * 4.583) | 0.290 +/- 0.094 | Active + 124 : FOPR 0.127 +/- 0.458 (0.100 * 4.583) | 0.284 +/- 0.092 | Active + 125 : FOPR 0.123 +/- 0.458 (0.100 * 4.583) | 0.279 +/- 0.090 | Active + 126 : FOPR 0.119 +/- 0.458 (0.100 * 4.583) | 0.275 +/- 0.088 | Active + 127 : FOPR 0.120 +/- 0.458 (0.100 * 4.583) | 0.270 +/- 0.085 | Active + 128 : FOPR 0.128 +/- 0.458 (0.100 * 4.583) | 0.266 +/- 0.081 | Active + 129 : FOPR 0.136 +/- 0.458 (0.100 * 4.583) | 0.263 +/- 0.077 | Active + 130 : FOPR 0.143 +/- 0.100 | 0.261 +/- 0.073 | Active + 131 : FOPR 0.150 +/- 0.200 (0.100 * 2.000) | 0.258 +/- 0.069 | Active + 132 : FOPR 0.155 +/- 0.200 (0.100 * 2.000) | 0.256 +/- 0.066 | Active + 133 : FOPR 0.159 +/- 0.200 (0.100 * 2.000) | 0.254 +/- 0.063 | Active + 134 : FOPR 0.163 +/- 0.200 (0.100 * 2.000) | 0.251 +/- 0.061 | Active + 135 : FOPR 0.166 +/- 0.346 (0.100 * 3.464) | 0.248 +/- 0.059 | Active + 136 : FOPR 0.167 +/- 0.346 (0.100 * 3.464) | 0.247 +/- 0.058 | Active + 137 : FOPR 0.167 +/- 0.346 (0.100 * 3.464) | 0.245 +/- 0.058 | Active + 138 : FOPR 0.166 +/- 0.346 (0.100 * 3.464) | 0.243 +/- 0.058 | Active + 139 : FOPR 0.165 +/- 0.346 (0.100 * 3.464) | 0.243 +/- 0.058 | Active + 140 : FOPR 0.164 +/- 0.346 (0.100 * 3.464) | 0.242 +/- 0.059 | Active + 141 : FOPR 0.165 +/- 0.346 (0.100 * 3.464) | 0.243 +/- 0.059 | Active + 142 : FOPR 0.169 +/- 0.346 (0.100 * 3.464) | 0.243 +/- 0.059 | Active + 143 : FOPR 0.176 +/- 0.346 (0.100 * 3.464) | 0.242 +/- 0.058 | Active + 144 : FOPR 0.186 +/- 0.346 (0.100 * 3.464) | 0.242 +/- 0.057 | Active + 145 : FOPR 0.197 +/- 0.346 (0.100 * 3.464) | 0.241 +/- 0.057 | Active + 146 : FOPR 0.211 +/- 0.346 (0.100 * 3.464) | 0.239 +/- 0.058 | Active + 147 : FOPR 0.225 +/- 0.100 | 0.238 +/- 0.059 | Active + 148 : FOPR 0.239 +/- 0.141 (0.100 * 1.414) | 0.238 +/- 0.061 | Active + 149 : FOPR 0.252 +/- 0.141 (0.100 * 1.414) | 0.238 +/- 0.061 | Active + 150 : FOPR 0.264 +/- 0.224 (0.100 * 2.236) | 0.237 +/- 0.061 | Active + 151 : FOPR 0.275 +/- 0.224 (0.100 * 2.236) | 0.236 +/- 0.062 | Active + 152 : FOPR 0.285 +/- 0.224 (0.100 * 2.236) | 0.236 +/- 0.064 | Active + 153 : FOPR 0.295 +/- 0.224 (0.100 * 2.236) | 0.236 +/- 0.066 | Active + 154 : FOPR 0.303 +/- 0.224 (0.100 * 2.236) | 0.235 +/- 0.069 | Active + 155 : FOPR 0.309 +/- 0.245 (0.100 * 2.449) | 0.234 +/- 0.072 | Active + 156 : FOPR 0.312 +/- 0.245 (0.100 * 2.449) | 0.231 +/- 0.074 | Active + 157 : FOPR 0.313 +/- 0.245 (0.100 * 2.449) | 0.229 +/- 0.076 | Active + 158 : FOPR 0.310 +/- 0.245 (0.100 * 2.449) | 0.225 +/- 0.077 | Active + 159 : FOPR 0.304 +/- 0.245 (0.100 * 2.449) | 0.220 +/- 0.078 | Active + 160 : FOPR 0.296 +/- 0.245 (0.100 * 2.449) | 0.215 +/- 0.078 | Active + 161 : FOPR 0.286 +/- 0.566 (0.100 * 5.657) | 0.209 +/- 0.078 | Active + 162 : FOPR 0.275 +/- 0.566 (0.100 * 5.657) | 0.202 +/- 0.078 | Active + 163 : FOPR 0.264 +/- 0.566 (0.100 * 5.657) | 0.195 +/- 0.079 | Active + 164 : FOPR 0.253 +/- 0.566 (0.100 * 5.657) | 0.188 +/- 0.079 | Active + 165 : FOPR 0.241 +/- 0.566 (0.100 * 5.657) | 0.181 +/- 0.080 | Active + 166 : FOPR 0.230 +/- 0.566 (0.100 * 5.657) | 0.173 +/- 0.082 | Active + 167 : FOPR 0.218 +/- 0.566 (0.100 * 5.657) | 0.167 +/- 0.084 | Active + 168 : FOPR 0.207 +/- 0.566 (0.100 * 5.657) | 0.161 +/- 0.086 | Active + 169 : FOPR 0.197 +/- 0.566 (0.100 * 5.657) | 0.155 +/- 0.088 | Active + 170 : FOPR 0.187 +/- 0.566 (0.100 * 5.657) | 0.149 +/- 0.090 | Active + 171 : FOPR 0.178 +/- 0.566 (0.100 * 5.657) | 0.143 +/- 0.092 | Active + 172 : FOPR 0.168 +/- 0.539 (0.100 * 5.385) | 0.138 +/- 0.094 | Active + 173 : FOPR 0.159 +/- 0.539 (0.100 * 5.385) | 0.132 +/- 0.095 | Active + 174 : FOPR 0.150 +/- 0.539 (0.100 * 5.385) | 0.128 +/- 0.096 | Active + 175 : FOPR 0.141 +/- 0.539 (0.100 * 5.385) | 0.124 +/- 0.096 | Active + 176 : FOPR 0.134 +/- 0.539 (0.100 * 5.385) | 0.120 +/- 0.096 | Active + 177 : FOPR 0.127 +/- 0.539 (0.100 * 5.385) | 0.116 +/- 0.097 | Active + 178 : FOPR 0.120 +/- 0.539 (0.100 * 5.385) | 0.113 +/- 0.097 | Active + 179 : FOPR 0.115 +/- 0.539 (0.100 * 5.385) | 0.110 +/- 0.096 | Active + 180 : FOPR 0.111 +/- 0.539 (0.100 * 5.385) | 0.107 +/- 0.096 | Active + 181 : FOPR 0.107 +/- 0.539 (0.100 * 5.385) | 0.105 +/- 0.095 | Active + 182 : FOPR 0.101 +/- 0.539 (0.100 * 5.385) | 0.102 +/- 0.095 | Active + 183 : FOPR 0.096 +/- 0.539 (0.100 * 5.385) | 0.100 +/- 0.095 | Active + 184 : FOPR 0.089 +/- 0.539 (0.100 * 5.385) | 0.097 +/- 0.096 | Active + 185 : FOPR 0.081 +/- 0.539 (0.100 * 5.385) | 0.094 +/- 0.096 | Active + 186 : FOPR 0.073 +/- 0.539 (0.100 * 5.385) | 0.092 +/- 0.098 | Active + 187 : FOPR 0.065 +/- 0.539 (0.100 * 5.385) | 0.090 +/- 0.099 | Active + 188 : FOPR 0.058 +/- 0.539 (0.100 * 5.385) | 0.088 +/- 0.101 | Active + 189 : FOPR 0.050 +/- 0.539 (0.100 * 5.385) | 0.087 +/- 0.103 | Active + 190 : FOPR 0.044 +/- 0.539 (0.100 * 5.385) | 0.086 +/- 0.104 | Active + 191 : FOPR 0.038 +/- 0.539 (0.100 * 5.385) | 0.085 +/- 0.106 | Active + 192 : FOPR 0.033 +/- 0.539 (0.100 * 5.385) | 0.084 +/- 0.107 | Active + 193 : FOPR 0.029 +/- 0.539 (0.100 * 5.385) | 0.084 +/- 0.108 | Active + 194 : FOPR 0.026 +/- 0.566 (0.100 * 5.657) | 0.084 +/- 0.108 | Active + 195 : FOPR 0.024 +/- 0.566 (0.100 * 5.657) | 0.084 +/- 0.109 | Active + 196 : FOPR 0.022 +/- 0.566 (0.100 * 5.657) | 0.084 +/- 0.109 | Active + 197 : FOPR 0.021 +/- 0.566 (0.100 * 5.657) | 0.084 +/- 0.109 | Active + 198 : FOPR 0.020 +/- 0.566 (0.100 * 5.657) | 0.084 +/- 0.110 | Active + 199 : FOPR 0.020 +/- 0.566 (0.100 * 5.657) | 0.084 +/- 0.110 | Active + 200 : FOPR 0.020 +/- 0.566 (0.100 * 5.657) | 0.084 +/- 0.110 | Active + 201 : WOPR_OP1_108 0.300 +/- 0.075 | 0.257 +/- 0.099 | Active + 202 : WOPR_OP1_144 0.200 +/- 0.035 | 0.183 +/- 0.106 | Active + 203 : WOPR_OP1_190 0.015 +/- 0.017 (0.010 * 1.732) | 0.042 +/- 0.041 | Active + 204 : WOPR_OP1_36 0.700 +/- 0.121 (0.070 * 1.732) | 0.650 +/- 0.084 | Active + 205 : WOPR_OP1_72 0.500 +/- 0.050 | 0.405 +/- 0.170 | Active + 206 : WOPR_OP1_9 0.100 +/- 0.087 (0.050 * 1.732) | 0.096 +/- 0.060 | Active + 207 : WPR_DIFF_1 0.000 +/- 0.141 (0.100 * 1.414) | -0.011 +/- 0.060 | Active + 208 : WPR_DIFF_1 0.100 +/- 0.283 (0.200 * 1.414) | 0.081 +/- 0.126 | Active + 209 : WPR_DIFF_1 0.200 +/- 0.150 | 0.073 +/- 0.130 | Active + 210 : WPR_DIFF_1 0.000 +/- 0.050 | 0.127 +/- 0.125 | Active diff --git a/tests/unit_tests/analysis/test_adaptive_localization.py b/tests/integration_tests/analysis/test_adaptive_localization.py similarity index 95% rename from tests/unit_tests/analysis/test_adaptive_localization.py rename to tests/integration_tests/analysis/test_adaptive_localization.py index 726f0875f68..dbf49dc03ba 100644 --- a/tests/unit_tests/analysis/test_adaptive_localization.py +++ b/tests/integration_tests/analysis/test_adaptive_localization.py @@ -43,8 +43,11 @@ def run_cli_ES_with_case(poly_config): return prior_sample, posterior_sample +@pytest.mark.scheduler @pytest.mark.integration_test -def test_that_adaptive_localization_with_cutoff_1_equals_ensemble_prior(copy_poly_case): +def test_that_adaptive_localization_with_cutoff_1_equals_ensemble_prior( + copy_poly_case, try_queue_and_scheduler, monkeypatch +): set_adaptive_localization_1 = dedent( """ ANALYSIS_SET_VAR STD_ENKF LOCALIZATION True @@ -65,8 +68,11 @@ def test_that_adaptive_localization_with_cutoff_1_equals_ensemble_prior(copy_pol assert np.allclose(posterior_sample, prior_sample) +@pytest.mark.scheduler @pytest.mark.integration_test -def test_that_adaptive_localization_with_cutoff_0_equals_ESupdate(copy_poly_case): +def test_that_adaptive_localization_with_cutoff_0_equals_ESupdate( + copy_poly_case, try_queue_and_scheduler, monkeypatch +): """ Note that "RANDOM_SEED" in both ert configs needs to be the same to obtain the same sample from the prior. @@ -101,8 +107,11 @@ def test_that_adaptive_localization_with_cutoff_0_equals_ESupdate(copy_poly_case assert np.allclose(posterior_sample_loc0, posterior_sample_noloc) +@pytest.mark.scheduler @pytest.mark.integration_test -def test_that_posterior_generalized_variance_increases_in_cutoff(copy_poly_case): +def test_that_posterior_generalized_variance_increases_in_cutoff( + copy_poly_case, try_queue_and_scheduler, monkeypatch +): rng = np.random.default_rng(42) cutoff1 = rng.uniform(0, 1) cutoff2 = rng.uniform(cutoff1, 1) diff --git a/tests/integration_tests/analysis/test_es_update.py b/tests/integration_tests/analysis/test_es_update.py new file mode 100644 index 00000000000..dd8adf0ac8b --- /dev/null +++ b/tests/integration_tests/analysis/test_es_update.py @@ -0,0 +1,443 @@ +from argparse import ArgumentParser +from pathlib import Path + +import numpy as np +import pytest +import xarray as xr + +from ert import LibresFacade +from ert.__main__ import ert_parser +from ert.analysis import ( + ErtAnalysisError, + UpdateConfiguration, + smoother_update, +) +from ert.analysis._es_update import ( + TempStorage, + UpdateSettings, + _create_temporary_parameter_storage, +) +from ert.analysis.configuration import UpdateStep +from ert.cli import ENSEMBLE_SMOOTHER_MODE +from ert.cli.main import run_cli +from ert.config import AnalysisConfig, ErtConfig, GenDataConfig, GenKwConfig +from ert.config.analysis_module import ESSettings +from ert.storage import open_storage +from ert.storage.realization_storage_state import RealizationStorageState + + +@pytest.fixture +def update_config(): + return UpdateConfiguration( + update_steps=[ + UpdateStep( + name="ALL_ACTIVE", + observations=["OBSERVATION"], + parameters=["PARAMETER"], + ) + ] + ) + + +@pytest.fixture +def uniform_parameter(): + return GenKwConfig( + name="PARAMETER", + forward_init=False, + template_file="", + transfer_function_definitions=[ + "KEY1 UNIFORM 0 1", + ], + output_file="kw.txt", + ) + + +@pytest.fixture +def obs(): + return xr.Dataset( + { + "observations": (["report_step", "index"], [[1.0, 1.0, 1.0]]), + "std": (["report_step", "index"], [[0.1, 1.0, 10.0]]), + }, + coords={"index": [0, 1, 2], "report_step": [0]}, + attrs={"response": "RESPONSE"}, + ) + + +@pytest.mark.scheduler +@pytest.mark.integration_test +def test_that_posterior_has_lower_variance_than_prior( + copy_case, try_queue_and_scheduler, monkeypatch +): + copy_case("poly_example") + + parser = ArgumentParser(prog="test_main") + parsed = ert_parser( + parser, + [ + ENSEMBLE_SMOOTHER_MODE, + "--current-case", + "default", + "--target-case", + "target", + "--realizations", + "1-50", + "poly.ert", + ], + ) + + run_cli(parsed) + facade = LibresFacade.from_config_file("poly.ert") + with open_storage(facade.enspath) as storage: + default_fs = storage.get_ensemble_by_name("default") + df_default = facade.load_all_gen_kw_data(default_fs) + target_fs = storage.get_ensemble_by_name("target") + df_target = facade.load_all_gen_kw_data(target_fs) + + # We expect that ERT's update step lowers the + # generalized variance for the parameters. + assert ( + 0 + < np.linalg.det(df_target.cov().to_numpy()) + < np.linalg.det(df_default.cov().to_numpy()) + ) + + +@pytest.mark.scheduler(skip=True) +@pytest.mark.integration_test +def test_that_surfaces_retain_their_order_when_loaded_and_saved_by_ert( + copy_case, try_queue_and_scheduler, monkeypatch +): + """This is a regression test to make sure ert does not use the wrong order + (row-major / column-major) when working with surfaces. + """ + rng = np.random.default_rng() + import xtgeo + from scipy.ndimage import gaussian_filter + + def sample_prior(nx, ny): + return np.exp( + 5 + * gaussian_filter( + gaussian_filter(rng.random(size=(nx, ny)), sigma=2.0), sigma=1.0 + ) + ) + + copy_case("snake_oil_field") + + nx = 5 + ny = 7 + ensemble_size = 2 + + Path("./surface").mkdir() + for i in range(ensemble_size): + surf = xtgeo.RegularSurface( + ncol=nx, nrow=ny, xinc=1.0, yinc=1.0, values=sample_prior(nx, ny) + ) + surf.to_file(f"surface/surf_init_{i}.irap", fformat="irap_ascii") + + # Single observation with a large ERROR to make sure the udpate is minimal. + obs = """ + SUMMARY_OBSERVATION WOPR_OP1_9 + { + VALUE = 0.1; + ERROR = 200.0; + DATE = 2010-03-31; + KEY = WOPR:OP1; + }; + """ + + with open("observations/observations.txt", "w", encoding="utf-8") as file: + file.write(obs) + + parser = ArgumentParser(prog="test_main") + parsed = ert_parser( + parser, + [ + ENSEMBLE_SMOOTHER_MODE, + "snake_oil_surface.ert", + "--target-case", + "es_udpate", + ], + ) + run_cli(parsed) + + ert_config = ErtConfig.from_file("snake_oil_surface.ert") + + storage = open_storage(ert_config.ens_path) + + ens_prior = storage.get_ensemble_by_name("default") + ens_posterior = storage.get_ensemble_by_name("es_udpate") + + # Check that surfaces defined in INIT_FILES are not changed by ERT + surf_prior = ens_prior.load_parameters("TOP", list(range(ensemble_size)))["values"] + for i in range(ensemble_size): + _prior_init = xtgeo.surface_from_file( + f"surface/surf_init_{i}.irap", fformat="irap_ascii", dtype=np.float32 + ) + np.testing.assert_array_equal(surf_prior[i], _prior_init.values.data) + + surf_posterior = ens_posterior.load_parameters("TOP", list(range(ensemble_size)))[ + "values" + ] + + assert surf_prior.shape == surf_posterior.shape + + for i in range(ensemble_size): + with pytest.raises(AssertionError): + np.testing.assert_array_equal(surf_prior[i], surf_posterior[i]) + np.testing.assert_almost_equal( + surf_prior[i].values, surf_posterior[i].values, decimal=3 + ) + + +@pytest.mark.scheduler +@pytest.mark.integration_test +def test_update_multiple_param(copy_case, try_queue_and_scheduler, monkeypatch): + """ + Note that this is now a snapshot test, so there is no guarantee that the + snapshots are correct, they are just documenting the current behavior. + """ + copy_case("snake_oil_field") + parser = ArgumentParser(prog="test_main") + parsed = ert_parser( + parser, + [ + ENSEMBLE_SMOOTHER_MODE, + "snake_oil.ert", + "--target-case", + "posterior", + ], + ) + + run_cli(parsed) + + ert_config = ErtConfig.from_file("snake_oil.ert") + + storage = open_storage(ert_config.ens_path) + sim_fs = storage.get_ensemble_by_name("default") + posterior_fs = storage.get_ensemble_by_name("posterior") + + def _load_parameters(source_ens, iens_active_index, param_groups): + temp_storage = TempStorage() + for param_group in param_groups: + _temp_storage = _create_temporary_parameter_storage( + source_ens, iens_active_index, param_group + ) + temp_storage[param_group] = _temp_storage[param_group] + return temp_storage + + sim_fs.load_parameters("SNAKE_OIL_PARAM_BPR")["values"] + param_groups = list(sim_fs.experiment.parameter_configuration.keys()) + prior = _load_parameters(sim_fs, list(range(10)), param_groups) + posterior = _load_parameters(posterior_fs, list(range(10)), param_groups) + + # We expect that ERT's update step lowers the + # generalized variance for the parameters. + # https://en.wikipedia.org/wiki/Variance#For_vector-valued_random_variables + for prior_name, prior_data in prior.items(): + assert np.trace(np.cov(posterior[prior_name])) < np.trace(np.cov(prior_data)) + + +@pytest.mark.integration_test +def test_gen_data_obs_data_mismatch(storage, uniform_parameter, update_config): + resp = GenDataConfig(name="RESPONSE") + obs = xr.Dataset( + { + "observations": (["report_step", "index"], [[1.0]]), + "std": (["report_step", "index"], [[0.1]]), + }, + coords={"index": [1000], "report_step": [0]}, + attrs={"response": "RESPONSE"}, + ) + experiment = storage.create_experiment( + parameters=[uniform_parameter], + responses=[resp], + observations={"OBSERVATION": obs}, + ) + prior = storage.create_ensemble( + experiment, + ensemble_size=10, + iteration=0, + name="prior", + ) + rng = np.random.default_rng(1234) + for iens in range(prior.ensemble_size): + prior.state_map[iens] = RealizationStorageState.HAS_DATA + data = rng.uniform(0, 1) + prior.save_parameters( + "PARAMETER", + iens, + xr.Dataset( + { + "values": ("names", [data]), + "transformed_values": ("names", [data]), + "names": ["KEY_1"], + } + ), + ) + data = rng.uniform(0.8, 1, 3) + prior.save_response( + "RESPONSE", + xr.Dataset( + {"values": (["report_step", "index"], [data])}, + coords={"index": range(len(data)), "report_step": [0]}, + ), + iens, + ) + posterior_ens = storage.create_ensemble( + prior.experiment_id, + ensemble_size=prior.ensemble_size, + iteration=1, + name="posterior", + prior_ensemble=prior, + ) + AnalysisConfig() + with pytest.raises( + ErtAnalysisError, + match="No active observations", + ): + smoother_update( + prior, posterior_ens, "id", update_config, UpdateSettings(), ESSettings() + ) + + +@pytest.mark.usefixtures("use_tmpdir") +@pytest.mark.integration_test +def test_gen_data_missing(storage, update_config, uniform_parameter, obs): + resp = GenDataConfig(name="RESPONSE") + experiment = storage.create_experiment( + parameters=[uniform_parameter], + responses=[resp], + observations={"OBSERVATION": obs}, + ) + prior = storage.create_ensemble( + experiment, + ensemble_size=10, + iteration=0, + name="prior", + ) + rng = np.random.default_rng(1234) + for iens in range(prior.ensemble_size): + prior.state_map[iens] = RealizationStorageState.HAS_DATA + data = rng.uniform(0, 1) + prior.save_parameters( + "PARAMETER", + iens, + xr.Dataset( + { + "values": ("names", [data]), + "transformed_values": ("names", [data]), + "names": ["KEY_1"], + } + ), + ) + data = rng.uniform(0.8, 1, 2) # Importantly, shorter than obs + prior.save_response( + "RESPONSE", + xr.Dataset( + {"values": (["report_step", "index"], [data])}, + coords={"index": range(len(data)), "report_step": [0]}, + ), + iens, + ) + posterior_ens = storage.create_ensemble( + prior.experiment_id, + ensemble_size=prior.ensemble_size, + iteration=1, + name="posterior", + prior_ensemble=prior, + ) + update_snapshot = smoother_update( + prior, posterior_ens, "id", update_config, UpdateSettings(), ESSettings() + ) + assert [ + step.status for step in update_snapshot.update_step_snapshots["ALL_ACTIVE"] + ] == ["Active", "Active", "Deactivated, missing response(es)"] + + +@pytest.mark.usefixtures("use_tmpdir") +@pytest.mark.integration_test +def test_update_subset_parameters(storage, uniform_parameter, obs): + no_update_param = GenKwConfig( + name="EXTRA_PARAMETER", + forward_init=False, + template_file="", + transfer_function_definitions=[ + "KEY1 UNIFORM 0 1", + ], + output_file=None, + ) + resp = GenDataConfig(name="RESPONSE") + experiment = storage.create_experiment( + parameters=[uniform_parameter, no_update_param], + responses=[resp], + observations={"OBSERVATION": obs}, + ) + prior = storage.create_ensemble( + experiment, + ensemble_size=10, + iteration=0, + name="prior", + ) + rng = np.random.default_rng(1234) + for iens in range(prior.ensemble_size): + prior.state_map[iens] = RealizationStorageState.HAS_DATA + data = rng.uniform(0, 1) + prior.save_parameters( + "PARAMETER", + iens, + xr.Dataset( + { + "values": ("names", [data]), + "transformed_values": ("names", [data]), + "names": ["KEY_1"], + } + ), + ) + prior.save_parameters( + "EXTRA_PARAMETER", + iens, + xr.Dataset( + { + "values": ("names", [data]), + "transformed_values": ("names", [data]), + "names": ["KEY_1"], + } + ), + ) + + data = rng.uniform(0.8, 1, 10) + prior.save_response( + "RESPONSE", + xr.Dataset( + {"values": (["report_step", "index"], [data])}, + coords={"index": range(len(data)), "report_step": [0]}, + ), + iens, + ) + posterior_ens = storage.create_ensemble( + prior.experiment_id, + ensemble_size=prior.ensemble_size, + iteration=1, + name="posterior", + prior_ensemble=prior, + ) + update_config = UpdateConfiguration( + update_steps=[ + UpdateStep( + name="NOT_ALL_ACTIVE", + observations=["OBSERVATION"], + parameters=["PARAMETER"], # No EXTRA_PARAMETER here + ) + ] + ) + smoother_update( + prior, posterior_ens, "id", update_config, UpdateSettings(), ESSettings() + ) + assert prior.load_parameters("EXTRA_PARAMETER", 0)["values"].equals( + posterior_ens.load_parameters("EXTRA_PARAMETER", 0)["values"] + ) + assert not prior.load_parameters("PARAMETER", 0)["values"].equals( + posterior_ens.load_parameters("PARAMETER", 0)["values"] + ) diff --git a/tests/integration_tests/cli/__init__.py b/tests/integration_tests/cli/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/integration_tests/cli/snapshots/test_integration_cli/test_es_mda/False/es_mda_integration_snapshot b/tests/integration_tests/cli/snapshots/test_integration_cli/test_es_mda/False/es_mda_integration_snapshot new file mode 100644 index 00000000000..cb38a463014 --- /dev/null +++ b/tests/integration_tests/cli/snapshots/test_integration_cli/test_es_mda/False/es_mda_integration_snapshot @@ -0,0 +1,21 @@ +Iteration,Realization,COEFFS:a,COEFFS:b,COEFFS:c +iter-0,1,0.703986294605,1.83992056754,4.46053898598 +iter-0,2,0.198965241239,0.881942229016,4.1521713031 +iter-0,4,0.249105100695,1.28419800854,4.90535532003 +iter-0,8,0.558106754935,0.819887823895,2.00307282443 +iter-0,16,0.591623902385,1.7663810255,4.44242760126 +iter-1,1,0.528687318172,1.32403490437,3.69608391642 +iter-1,2,0.446754485528,1.14115565057,3.06814203437 +iter-1,4,0.483635549821,1.57544165709,4.7628689028 +iter-1,8,0.484429573005,0.672512465712,2.05458536861 +iter-1,16,0.389874371197,1.32924399914,4.19237881105 +iter-2,1,0.523645503842,1.19838581664,3.25086460774 +iter-2,2,0.43086689669,0.946942861008,2.36404806355 +iter-2,4,0.463046868611,1.02003340787,3.45834744009 +iter-2,8,0.509481840513,0.618716362006,1.7605474942 +iter-2,16,0.405495589342,0.898454980592,2.70541815914 +iter-3,1,0.533130473238,1.14367178973,2.99509799222 +iter-3,2,0.423337415775,0.964256985466,2.45442264577 +iter-3,4,0.498792171119,1.09932535439,3.54553895903 +iter-3,8,0.542240490526,0.756044439407,2.18736947131 +iter-3,16,0.426694705709,1.13572495324,3.4399530398 diff --git a/tests/integration_tests/cli/snapshots/test_integration_cli/test_es_mda/True/es_mda_integration_snapshot b/tests/integration_tests/cli/snapshots/test_integration_cli/test_es_mda/True/es_mda_integration_snapshot new file mode 100644 index 00000000000..cb38a463014 --- /dev/null +++ b/tests/integration_tests/cli/snapshots/test_integration_cli/test_es_mda/True/es_mda_integration_snapshot @@ -0,0 +1,21 @@ +Iteration,Realization,COEFFS:a,COEFFS:b,COEFFS:c +iter-0,1,0.703986294605,1.83992056754,4.46053898598 +iter-0,2,0.198965241239,0.881942229016,4.1521713031 +iter-0,4,0.249105100695,1.28419800854,4.90535532003 +iter-0,8,0.558106754935,0.819887823895,2.00307282443 +iter-0,16,0.591623902385,1.7663810255,4.44242760126 +iter-1,1,0.528687318172,1.32403490437,3.69608391642 +iter-1,2,0.446754485528,1.14115565057,3.06814203437 +iter-1,4,0.483635549821,1.57544165709,4.7628689028 +iter-1,8,0.484429573005,0.672512465712,2.05458536861 +iter-1,16,0.389874371197,1.32924399914,4.19237881105 +iter-2,1,0.523645503842,1.19838581664,3.25086460774 +iter-2,2,0.43086689669,0.946942861008,2.36404806355 +iter-2,4,0.463046868611,1.02003340787,3.45834744009 +iter-2,8,0.509481840513,0.618716362006,1.7605474942 +iter-2,16,0.405495589342,0.898454980592,2.70541815914 +iter-3,1,0.533130473238,1.14367178973,2.99509799222 +iter-3,2,0.423337415775,0.964256985466,2.45442264577 +iter-3,4,0.498792171119,1.09932535439,3.54553895903 +iter-3,8,0.542240490526,0.756044439407,2.18736947131 +iter-3,16,0.426694705709,1.13572495324,3.4399530398 diff --git a/tests/integration_tests/cli/snapshots/test_integration_cli/test_es_mda/es_mda_integration_snapshot b/tests/integration_tests/cli/snapshots/test_integration_cli/test_es_mda/es_mda_integration_snapshot new file mode 100644 index 00000000000..cb38a463014 --- /dev/null +++ b/tests/integration_tests/cli/snapshots/test_integration_cli/test_es_mda/es_mda_integration_snapshot @@ -0,0 +1,21 @@ +Iteration,Realization,COEFFS:a,COEFFS:b,COEFFS:c +iter-0,1,0.703986294605,1.83992056754,4.46053898598 +iter-0,2,0.198965241239,0.881942229016,4.1521713031 +iter-0,4,0.249105100695,1.28419800854,4.90535532003 +iter-0,8,0.558106754935,0.819887823895,2.00307282443 +iter-0,16,0.591623902385,1.7663810255,4.44242760126 +iter-1,1,0.528687318172,1.32403490437,3.69608391642 +iter-1,2,0.446754485528,1.14115565057,3.06814203437 +iter-1,4,0.483635549821,1.57544165709,4.7628689028 +iter-1,8,0.484429573005,0.672512465712,2.05458536861 +iter-1,16,0.389874371197,1.32924399914,4.19237881105 +iter-2,1,0.523645503842,1.19838581664,3.25086460774 +iter-2,2,0.43086689669,0.946942861008,2.36404806355 +iter-2,4,0.463046868611,1.02003340787,3.45834744009 +iter-2,8,0.509481840513,0.618716362006,1.7605474942 +iter-2,16,0.405495589342,0.898454980592,2.70541815914 +iter-3,1,0.533130473238,1.14367178973,2.99509799222 +iter-3,2,0.423337415775,0.964256985466,2.45442264577 +iter-3,4,0.498792171119,1.09932535439,3.54553895903 +iter-3,8,0.542240490526,0.756044439407,2.18736947131 +iter-3,16,0.426694705709,1.13572495324,3.4399530398 diff --git a/tests/integration_tests/cli/test_integration_cli.py b/tests/integration_tests/cli/test_integration_cli.py new file mode 100644 index 00000000000..7f3723f5fdb --- /dev/null +++ b/tests/integration_tests/cli/test_integration_cli.py @@ -0,0 +1,470 @@ +# pylint: disable=too-many-lines + +import fileinput +import os +import shutil +import threading +from argparse import ArgumentParser +from pathlib import Path +from textwrap import dedent +from unittest.mock import Mock, call + +import numpy as np +import pandas as pd +import pytest + +import ert.shared +from ert import LibresFacade +from ert.__main__ import ert_parser +from ert.cli import ( + ENSEMBLE_EXPERIMENT_MODE, + ENSEMBLE_SMOOTHER_MODE, + ES_MDA_MODE, + ITERATIVE_ENSEMBLE_SMOOTHER_MODE, + TEST_RUN_MODE, +) +from ert.cli.main import ErtCliError, run_cli +from ert.config import ErtConfig +from ert.enkf_main import sample_prior +from ert.shared.feature_toggling import FeatureToggling +from ert.storage import open_storage + + +@pytest.fixture(name="mock_cli_run") +def fixture_mock_cli_run(monkeypatch): + mocked_monitor = Mock() + mocked_thread_start = Mock() + mocked_thread_join = Mock() + monkeypatch.setattr(threading.Thread, "start", mocked_thread_start) + monkeypatch.setattr(threading.Thread, "join", mocked_thread_join) + monkeypatch.setattr(ert.cli.monitor.Monitor, "monitor", mocked_monitor) + yield mocked_monitor, mocked_thread_join, mocked_thread_start + + +@pytest.mark.scheduler +@pytest.mark.integration_test +def test_runpath_file(tmpdir, source_root, try_queue_and_scheduler, monkeypatch): + shutil.copytree( + os.path.join(source_root, "test-data", "poly_example"), + os.path.join(str(tmpdir), "poly_example"), + ) + + with tmpdir.as_cwd(): + with open("poly_example/poly.ert", "a", encoding="utf-8") as fh: + config_lines = [ + "LOAD_WORKFLOW_JOB ASSERT_RUNPATH_FILE\n" + "LOAD_WORKFLOW TEST_RUNPATH_FILE\n", + "HOOK_WORKFLOW TEST_RUNPATH_FILE PRE_SIMULATION\n", + ] + + fh.writelines(config_lines) + + parser = ArgumentParser(prog="test_main") + parsed = ert_parser( + parser, + [ + ENSEMBLE_SMOOTHER_MODE, + "--target-case", + "poly_runpath_file", + "--realizations", + "1,2,4,8,16,32,64", + "poly_example/poly.ert", + ], + ) + + run_cli(parsed) + + assert os.path.isfile("RUNPATH_WORKFLOW_0.OK") + assert os.path.isfile("RUNPATH_WORKFLOW_1.OK") + + +@pytest.mark.scheduler +@pytest.mark.integration_test +def test_ensemble_evaluator(tmpdir, source_root, try_queue_and_scheduler, monkeypatch): + shutil.copytree( + os.path.join(source_root, "test-data", "poly_example"), + os.path.join(str(tmpdir), "poly_example"), + ) + + with tmpdir.as_cwd(): + parser = ArgumentParser(prog="test_main") + parsed = ert_parser( + parser, + [ + ENSEMBLE_SMOOTHER_MODE, + "--target-case", + "poly_runpath_file", + "--realizations", + "1,2,4,8,16,32,64", + "poly_example/poly.ert", + ], + ) + FeatureToggling.update_from_args(parsed) + + run_cli(parsed) + FeatureToggling.reset() + + +@pytest.mark.scheduler +@pytest.mark.integration_test +def test_es_mda(tmpdir, source_root, snapshot, try_queue_and_scheduler, monkeypatch): + shutil.copytree( + os.path.join(source_root, "test-data", "poly_example"), + os.path.join(str(tmpdir), "poly_example"), + ) + + with tmpdir.as_cwd(): + with fileinput.input("poly_example/poly.ert", inplace=True) as fin: + for line_nr, line in enumerate(fin): + if line_nr == 1: + print("RANDOM_SEED 1234", end="") + print(line, end="") + parser = ArgumentParser(prog="test_main") + parsed = ert_parser( + parser, + [ + ES_MDA_MODE, + "--target-case", + "iter-%d", + "--realizations", + "1,2,4,8,16", + "poly_example/poly.ert", + ], + ) + FeatureToggling.update_from_args(parsed) + + run_cli(parsed) + FeatureToggling.reset() + facade = LibresFacade.from_config_file("poly.ert") + with open_storage("storage", "r") as storage: + data = [] + for iter_nr in range(4): + data.append( + facade.load_all_gen_kw_data( + storage.get_ensemble_by_name(f"iter-{iter_nr}") + ) + ) + result = pd.concat( + data, + keys=[f"iter-{iter}" for iter in range(len(data))], + names=("Iteration", "Realization"), + ) + snapshot.assert_match( + result.to_csv(float_format="%.12g"), "es_mda_integration_snapshot" + ) + + +@pytest.mark.parametrize( + "mode, target", + [ + pytest.param(ENSEMBLE_SMOOTHER_MODE, "target", id=f"{ENSEMBLE_SMOOTHER_MODE}"), + pytest.param( + ITERATIVE_ENSEMBLE_SMOOTHER_MODE, + "iter-%d", + id=f"{ITERATIVE_ENSEMBLE_SMOOTHER_MODE}", + ), + pytest.param(ES_MDA_MODE, "iter-%d", id=f"{ES_MDA_MODE}"), + ], +) +@pytest.mark.integration_test +def test_cli_does_not_run_without_observations(tmpdir, source_root, mode, target): + shutil.copytree( + os.path.join(source_root, "test-data", "poly_example"), + os.path.join(str(tmpdir), "poly_example"), + ) + + def remove_linestartswith(file_name: str, startswith: str): + lines = Path(file_name).read_text(encoding="utf-8").split("\n") + lines = [line for line in lines if not line.startswith(startswith)] + Path(file_name).write_text("\n".join(lines), encoding="utf-8") + + with tmpdir.as_cwd(): + # Remove observations from config file + remove_linestartswith("poly_example/poly.ert", "OBS_CONFIG") + + parser = ArgumentParser(prog="test_main") + parsed = ert_parser( + parser, + [ + mode, + "--target-case", + target, + "poly_example/poly.ert", + ], + ) + with pytest.raises( + ErtCliError, match=f"To run {mode}, observations are needed." + ): + run_cli(parsed) + + +@pytest.mark.scheduler +@pytest.mark.integration_test +def test_ensemble_evaluator_disable_monitoring( + tmpdir, source_root, try_queue_and_scheduler, monkeypatch +): + shutil.copytree( + os.path.join(source_root, "test-data", "poly_example"), + os.path.join(str(tmpdir), "poly_example"), + ) + + with tmpdir.as_cwd(): + parser = ArgumentParser(prog="test_main") + parsed = ert_parser( + parser, + [ + ENSEMBLE_SMOOTHER_MODE, + "--disable-monitoring", + "--target-case", + "poly_runpath_file", + "--realizations", + "1,2,4,8,16,32,64", + "poly_example/poly.ert", + ], + ) + FeatureToggling.update_from_args(parsed) + + run_cli(parsed) + FeatureToggling.reset() + + +@pytest.mark.scheduler +@pytest.mark.integration_test +def test_cli_test_run( + tmpdir, source_root, mock_cli_run, try_queue_and_scheduler, monkeypatch +): + shutil.copytree( + os.path.join(source_root, "test-data", "poly_example"), + os.path.join(str(tmpdir), "poly_example"), + ) + + with tmpdir.as_cwd(): + parser = ArgumentParser(prog="test_main") + parsed = ert_parser(parser, [TEST_RUN_MODE, "poly_example/poly.ert"]) + run_cli(parsed) + + monitor_mock, thread_join_mock, thread_start_mock = mock_cli_run + monitor_mock.assert_called_once() + thread_join_mock.assert_called_once() + thread_start_mock.assert_has_calls([[call(), call()]]) + + +@pytest.mark.scheduler +@pytest.mark.integration_test +def test_ies(tmpdir, source_root, try_queue_and_scheduler, monkeypatch): + shutil.copytree( + os.path.join(source_root, "test-data", "poly_example"), + os.path.join(str(tmpdir), "poly_example"), + ) + + with tmpdir.as_cwd(): + parser = ArgumentParser(prog="test_main") + parsed = ert_parser( + parser, + [ + ITERATIVE_ENSEMBLE_SMOOTHER_MODE, + "--target-case", + "iter-%d", + "--realizations", + "1,2,4,8,16", + "poly_example/poly.ert", + ], + ) + FeatureToggling.update_from_args(parsed) + + run_cli(parsed) + FeatureToggling.reset() + + +@pytest.mark.scheduler +@pytest.mark.integration_test +def test_that_running_ies_with_different_steplength_produces_different_result( + tmpdir, source_root +): + """This is a regression test to make sure that different step-lengths + give different results when running SIES. + """ + shutil.copytree( + os.path.join(source_root, "test-data", "poly_example"), + os.path.join(str(tmpdir), "poly_example"), + ) + + def _run(target): + parser = ArgumentParser(prog="test_main") + parsed = ert_parser( + parser, + [ + ITERATIVE_ENSEMBLE_SMOOTHER_MODE, + "--target-case", + f"{target}-%d", + "--realizations", + "1,2,4,8", + "poly_example/poly.ert", + "--num-iterations", + "1", + ], + ) + run_cli(parsed) + facade = LibresFacade.from_config_file("poly.ert") + + with open_storage(facade.enspath) as storage: + iter_0_fs = storage.get_ensemble_by_name(f"{target}-0") + df_iter_0 = facade.load_all_gen_kw_data(iter_0_fs) + iter_1_fs = storage.get_ensemble_by_name(f"{target}-1") + df_iter_1 = facade.load_all_gen_kw_data(iter_1_fs) + + result = pd.concat( + [df_iter_0, df_iter_1], + keys=["iter-0", "iter-1"], + ) + return result + + # Run SIES with step-lengths defined + with tmpdir.as_cwd(): + with open("poly_example/poly.ert", mode="a", encoding="utf-8") as fh: + fh.write( + dedent( + """ + RANDOM_SEED 123456 + ANALYSIS_SET_VAR IES_ENKF IES_MAX_STEPLENGTH 0.5 + ANALYSIS_SET_VAR IES_ENKF IES_MIN_STEPLENGTH 0.2 + ANALYSIS_SET_VAR IES_ENKF IES_DEC_STEPLENGTH 2.5 + """ + ) + ) + + result_1 = _run("target_result_1") + + # Run SIES with different step-lengths defined + with tmpdir.as_cwd(): + with open("poly_example/poly.ert", mode="a", encoding="utf-8") as fh: + fh.write( + dedent( + """ + ANALYSIS_SET_VAR IES_ENKF IES_MAX_STEPLENGTH 0.6 + ANALYSIS_SET_VAR IES_ENKF IES_MIN_STEPLENGTH 0.3 + ANALYSIS_SET_VAR IES_ENKF IES_DEC_STEPLENGTH 2.0 + """ + ) + ) + + result_2 = _run("target_result_2") + + # Prior should be the same + assert result_1.loc["iter-0"].equals(result_2.loc["iter-0"]) + + # Posterior should be different + assert not np.isclose(result_1.loc["iter-1"], result_2.loc["iter-1"]).all() + + +@pytest.mark.scheduler +@pytest.mark.integration_test +@pytest.mark.parametrize( + "prior_mask,reals_rerun_option,should_resample", + [ + pytest.param( + range(5), "0-4", False, id="All realisations first, subset second run" + ), + pytest.param( + [1, 2, 3, 4], + "2-3", + False, + id="Subset of realisation first run, subs-subset second run", + ), + pytest.param( + [0, 1, 2], + "0-5", + True, + id="Subset of realisation first, superset in second run - must resample", + ), + ], +) +def test_that_prior_is_not_overwritten_in_ensemble_experiment( + prior_mask, + reals_rerun_option, + should_resample, + tmpdir, + source_root, + try_queue_and_scheduler, + monkeypatch, +): + shutil.copytree( + os.path.join(source_root, "test-data", "poly_example"), + os.path.join(str(tmpdir), "poly_example"), + ) + + with tmpdir.as_cwd(): + ert_config = ErtConfig.from_file("poly_example/poly.ert") + num_realizations = ert_config.model_config.num_realizations + storage = open_storage(ert_config.ens_path, mode="w") + experiment_id = storage.create_experiment( + ert_config.ensemble_config.parameter_configuration + ) + ensemble = storage.create_ensemble( + experiment_id, name="iter-0", ensemble_size=num_realizations + ) + sample_prior(ensemble, prior_mask) + prior_values = storage.get_ensemble(ensemble.id).load_parameters("COEFFS")[ + "values" + ] + storage.close() + + parser = ArgumentParser(prog="test_main") + parsed = ert_parser( + parser, + [ + ENSEMBLE_EXPERIMENT_MODE, + "poly_example/poly.ert", + "--current-case=iter-0", + "--realizations", + reals_rerun_option, + ], + ) + + FeatureToggling.update_from_args(parsed) + run_cli(parsed) + storage = open_storage(ert_config.ens_path, mode="w") + parameter_values = storage.get_ensemble(ensemble.id).load_parameters("COEFFS")[ + "values" + ] + + if should_resample: + with pytest.raises(AssertionError): + np.testing.assert_array_equal(parameter_values, prior_values) + else: + np.testing.assert_array_equal(parameter_values, prior_values) + storage.close() + + +@pytest.mark.scheduler(skip=True) +@pytest.mark.integration_test +@pytest.mark.usefixtures("copy_poly_case", "try_queue_and_scheduler", "monkeypatch") +def test_failing_job_cli_error_message(): + # modify poly_eval.py + with open("poly_eval.py", mode="a", encoding="utf-8") as poly_script: + poly_script.writelines([" raise RuntimeError('Argh')"]) + + args = Mock() + args.config = "poly_high_min_reals.ert" + parser = ArgumentParser(prog="test_main") + + parser = ArgumentParser(prog="test_main") + parsed = ert_parser( + parser, + [TEST_RUN_MODE, "poly.ert"], + ) + expected_substrings = [ + "Realization: 0 failed after reaching max submit (2)", + "job poly_eval failed", + "Process exited with status code 1", + "Traceback", + "raise RuntimeError('Argh')", + "RuntimeError: Argh", + ] + try: + run_cli(parsed) + except ErtCliError as error: + for substring in expected_substrings: + assert substring in f"{error}" + else: + pytest.fail(msg="Expected run cli to raise ErtCliError!") diff --git a/tests/integration_tests/job_queue/__init__.py b/tests/integration_tests/job_queue/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/unit_tests/job_queue/test_lsf_driver.py b/tests/integration_tests/job_queue/test_lsf_driver.py similarity index 98% rename from tests/unit_tests/job_queue/test_lsf_driver.py rename to tests/integration_tests/job_queue/test_lsf_driver.py index 24019ab14e0..34f5a37c21c 100644 --- a/tests/unit_tests/job_queue/test_lsf_driver.py +++ b/tests/integration_tests/job_queue/test_lsf_driver.py @@ -176,7 +176,10 @@ def copy_lsf_poly_case(copy_poly_case, tmp_path): "mock_bsub", "mock_bjobs", "mock_start_server", + "try_queue_and_scheduler", + "monkeypatch", ) +@pytest.mark.scheduler(skip=True) @pytest.mark.integration_test def test_run_mocked_lsf_queue(): run_cli( diff --git a/tests/integration_tests/shared/__init__.py b/tests/integration_tests/shared/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/integration_tests/shared/share/__init__.py b/tests/integration_tests/shared/share/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/integration_tests/shared/share/test_shell.py b/tests/integration_tests/shared/share/test_shell.py new file mode 100644 index 00000000000..09fb41cc2bc --- /dev/null +++ b/tests/integration_tests/shared/share/test_shell.py @@ -0,0 +1,47 @@ +import os +import os.path +import subprocess + +import pytest + + +@pytest.mark.scheduler +@pytest.mark.integration_test +def test_shell_scripts_integration(tmpdir, try_queue_and_scheduler, monkeypatch): + """ + The following test is a regression test that + checks that the scripts under src/ert/shared/share/ert/shell_scripts + are not broken, and correctly installed through site-config. + """ + with tmpdir.as_cwd(): + ert_config_fname = "test.ert" + with open(ert_config_fname, "w", encoding="utf-8") as file_h: + file_h.write( + """ +RUNPATH realization-/iter- +JOBNAME TEST +QUEUE_SYSTEM LOCAL +NUM_REALIZATIONS 1 +FORWARD_MODEL COPY_FILE(=/file.txt, =copied.txt) +FORWARD_MODEL COPY_FILE(=/file.txt, =copied2.txt) +FORWARD_MODEL CAREFUL_COPY_FILE(=/file.txt, =copied3.txt) +FORWARD_MODEL MOVE_FILE(=copied.txt, =moved.txt) +FORWARD_MODEL DELETE_FILE(=copied2.txt) +FORWARD_MODEL MAKE_DIRECTORY(=mydir) +FORWARD_MODEL COPY_DIRECTORY(=mydir, =mydir2) +FORWARD_MODEL DELETE_DIRECTORY(=mydir) +""" + ) + + with open("file.txt", "w", encoding="utf-8") as file_h: + file_h.write("something") + + subprocess.run(["ert", "test_run", ert_config_fname], check=True) + + with open("realization-0/iter-0/moved.txt", encoding="utf-8") as output_file: + assert output_file.read() == "something" + assert not os.path.exists("realization-0/iter-0/copied.txt") + assert not os.path.exists("realization-0/iter-0/copied2.txt") + assert os.path.exists("realization-0/iter-0/copied3.txt") + assert not os.path.exists("realization-0/iter-0/mydir") + assert os.path.exists("realization-0/iter-0/mydir2") diff --git a/tests/integration_tests/status/__init__.py b/tests/integration_tests/status/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/unit_tests/status/test_tracking_integration.py b/tests/integration_tests/status/test_tracking_integration.py similarity index 98% rename from tests/unit_tests/status/test_tracking_integration.py rename to tests/integration_tests/status/test_tracking_integration.py index c6fb71dd886..884cd0bce16 100644 --- a/tests/unit_tests/status/test_tracking_integration.py +++ b/tests/integration_tests/status/test_tracking_integration.py @@ -47,6 +47,7 @@ def check_expression(original, path_expression, expected, msg_start): assert match_found, f"{msg_start} Nothing matched {path_expression}" +@pytest.mark.scheduler(skip=True) @pytest.mark.integration_test @pytest.mark.parametrize( ( @@ -139,7 +140,6 @@ def check_expression(original, path_expression, expected, msg_start): ), ], ) -@pytest.mark.scheduler(skip=True) def test_tracking( extra_config, extra_poly_eval, @@ -152,8 +152,8 @@ def test_tracking( tmpdir, source_root, storage, - monkeypatch, try_queue_and_scheduler, + monkeypatch, ): experiment_folder = "poly_example" shutil.copytree( @@ -261,6 +261,7 @@ def test_tracking( FeatureToggling.reset() +@pytest.mark.scheduler(skip=True) @pytest.mark.integration_test @pytest.mark.parametrize( ("mode, cmd_line_arguments"), @@ -289,6 +290,8 @@ def test_setting_env_context_during_run( tmpdir, source_root, storage, + try_queue_and_scheduler, + monkeypatch, ): experiment_folder = "poly_example" shutil.copytree( @@ -369,7 +372,7 @@ def run_sim(start_date): @pytest.mark.scheduler(skip=True) @pytest.mark.integration_test def test_tracking_missing_ecl( - tmpdir, source_root, caplog, storage, monkeypatch, try_queue_and_scheduler + tmpdir, source_root, caplog, storage, try_queue_and_scheduler, monkeypatch ): with tmpdir.as_cwd(): config = dedent( diff --git a/tests/integration_tests/storage/__init__.py b/tests/integration_tests/storage/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/integration_tests/storage/test_field_parameter.py b/tests/integration_tests/storage/test_field_parameter.py new file mode 100644 index 00000000000..3d4254525b9 --- /dev/null +++ b/tests/integration_tests/storage/test_field_parameter.py @@ -0,0 +1,300 @@ +import os +import stat +from argparse import ArgumentParser +from pathlib import Path +from textwrap import dedent + +import numpy as np +import numpy.testing +import pytest +import xtgeo + +from ert.__main__ import ert_parser +from ert.cli import ENSEMBLE_SMOOTHER_MODE +from ert.cli.main import run_cli +from ert.config import ErtConfig +from ert.storage import open_storage + + +@pytest.mark.integration_test +def test_field_param_update(tmpdir): + """ + This replicates the poly example, only it uses FIELD parameter + """ + with tmpdir.as_cwd(): + config = dedent( + """ + NUM_REALIZATIONS 5 + OBS_CONFIG observations + + FIELD MY_PARAM PARAMETER my_param.grdecl INIT_FILES:my_param.grdecl FORWARD_INIT:True + GRID MY_EGRID.EGRID + + GEN_DATA MY_RESPONSE RESULT_FILE:gen_data_%d.out REPORT_STEPS:0 INPUT_FORMAT:ASCII + INSTALL_JOB poly_eval POLY_EVAL + SIMULATION_JOB poly_eval + """ + ) + with open("config.ert", "w", encoding="utf-8") as fh: + fh.writelines(config) + + NCOL = 5 + NROW = 4 + NLAY = 1 + grid = xtgeo.create_box_grid(dimension=(NCOL, NROW, NLAY)) + grid.to_file("MY_EGRID.EGRID", "egrid") + + with open("forward_model", "w", encoding="utf-8") as f: + f.write( + dedent( + """#!/usr/bin/env python +import xtgeo +import numpy as np +import os + +if __name__ == "__main__": + if not os.path.exists("my_param.grdecl"): + values = np.random.standard_normal(5*4) + with open("my_param.grdecl", "w") as fout: + fout.write("MY_PARAM\\n") + fout.write(" ".join([str(val) for val in values]) + " /\\n") + with open("my_param.grdecl", "r") as fin: + for line_nr, line in enumerate(fin): + if line_nr == 1: + a, b, c, *_ = line.split() + + output = [float(a) * x**2 + float(b) * x + float(c) for x in range(10)] + with open("gen_data_0.out", "w", encoding="utf-8") as f: + f.write("\\n".join(map(str, output))) + """ + ) + ) + os.chmod( + "forward_model", + os.stat("forward_model").st_mode + | stat.S_IXUSR + | stat.S_IXGRP + | stat.S_IXOTH, + ) + with open("POLY_EVAL", "w", encoding="utf-8") as fout: + fout.write("EXECUTABLE forward_model") + with open("observations", "w", encoding="utf-8") as fout: + fout.write( + dedent( + """ + GENERAL_OBSERVATION MY_OBS { + DATA = MY_RESPONSE; + INDEX_LIST = 0,2,4,6,8; + RESTART = 0; + OBS_FILE = obs.txt; + };""" + ) + ) + + with open("obs.txt", "w", encoding="utf-8") as fobs: + fobs.write( + dedent( + """ + 2.1457049781272213 0.6 + 8.769219841380755 1.4 + 12.388014786122742 3.0 + 25.600464531354252 5.4 + 42.35204755970952 8.6""" + ) + ) + + parser = ArgumentParser(prog="test_main") + parsed = ert_parser( + parser, + [ + ENSEMBLE_SMOOTHER_MODE, + "--current-case", + "prior", + "--target-case", + "smoother_update", + "config.ert", + ], + ) + + run_cli(parsed) + config = ErtConfig.from_file("config.ert") + with open_storage(config.ens_path, mode="w") as storage: + prior = storage.get_ensemble_by_name("prior") + posterior = storage.get_ensemble_by_name("smoother_update") + + prior_result = prior.load_parameters("MY_PARAM", list(range(5)))["values"] + assert len(prior_result.x) == NCOL + assert len(prior_result.y) == NROW + assert len(prior_result.z) == NLAY + + posterior_result = posterior.load_parameters("MY_PARAM", list(range(5)))[ + "values" + ] + # Only assert on the first three rows, as there are only three parameters, + # a, b and c, the rest have no correlation to the results. + assert np.linalg.det( + np.cov(prior_result.values.reshape(5, NCOL * NROW * NLAY).T[:3]) + ) > np.linalg.det( + np.cov(posterior_result.values.reshape(5, NCOL * NROW * NLAY).T[:3]) + ) + # This checks that the fields in the runpath are different between iterations + assert Path("simulations/realization-0/iter-0/my_param.grdecl").read_text( + encoding="utf-8" + ) != Path("simulations/realization-0/iter-1/my_param.grdecl").read_text( + encoding="utf-8" + ) + + +@pytest.mark.integration_test +def test_parameter_update_with_inactive_cells_xtgeo_grdecl(tmpdir): + """ + This replicates the poly example, only it uses FIELD parameter + """ + with tmpdir.as_cwd(): + config = dedent( + """ + NUM_REALIZATIONS 5 + OBS_CONFIG observations + FIELD MY_PARAM PARAMETER my_param.grdecl INIT_FILES:my_param.grdecl FORWARD_INIT:True + GRID MY_EGRID.EGRID + GEN_DATA MY_RESPONSE RESULT_FILE:gen_data_%d.out REPORT_STEPS:0 INPUT_FORMAT:ASCII + INSTALL_JOB poly_eval POLY_EVAL + SIMULATION_JOB poly_eval + """ + ) + with open("config.ert", "w", encoding="utf-8") as fh: + fh.writelines(config) + + NCOL = 4 + NROW = 4 + NLAY = 1 + grid = xtgeo.create_box_grid(dimension=(NCOL, NROW, NLAY)) + mask = grid.get_actnum() + mask_list = [True] * 3 + [False] * 12 + [True] + mask.values = mask_list + grid.set_actnum(mask) + grid.to_file("MY_EGRID.EGRID", "egrid") + + with open("forward_model", "w", encoding="utf-8") as f: + f.write( + dedent( + """#!/usr/bin/env python +import xtgeo +import numpy as np +import os +if __name__ == "__main__": + if not os.path.exists("my_param.grdecl"): + values = np.random.standard_normal(4*4) + with open("my_param.grdecl", "w") as fout: + fout.write("MY_PARAM\\n") + fout.write(" ".join([str(val) for val in values]) + " /\\n") + with open("my_param.grdecl", "r") as fin: + for line_nr, line in enumerate(fin): + if line_nr == 1: + a, b, c, *_ = line.split() + output = [float(a) * x**2 + float(b) * x + float(c) for x in range(10)] + with open("gen_data_0.out", "w", encoding="utf-8") as f: + f.write("\\n".join(map(str, output))) + """ + ) + ) + os.chmod( + "forward_model", + os.stat("forward_model").st_mode + | stat.S_IXUSR + | stat.S_IXGRP + | stat.S_IXOTH, + ) + with open("POLY_EVAL", "w", encoding="utf-8") as fout: + fout.write("EXECUTABLE forward_model") + with open("observations", "w", encoding="utf-8") as fout: + fout.write( + dedent( + """ + GENERAL_OBSERVATION MY_OBS { + DATA = MY_RESPONSE; + INDEX_LIST = 0,2,4,6,8; + RESTART = 0; + OBS_FILE = obs.txt; + };""" + ) + ) + + with open("obs.txt", "w", encoding="utf-8") as fobs: + fobs.write( + dedent( + """ + 2.1457049781272213 0.6 + 8.769219841380755 1.4 + 12.388014786122742 3.0 + 25.600464531354252 5.4 + 42.35204755970952 8.6""" + ) + ) + + parser = ArgumentParser(prog="test_main") + parsed = ert_parser( + parser, + [ + ENSEMBLE_SMOOTHER_MODE, + "--current-case", + "prior", + "--target-case", + "smoother_update", + "config.ert", + ], + ) + + run_cli(parsed) + config = ErtConfig.from_file("config.ert") + with open_storage(config.ens_path) as storage: + prior = storage.get_ensemble_by_name("prior") + posterior = storage.get_ensemble_by_name("smoother_update") + + prior_result = prior.load_parameters("MY_PARAM", list(range(5)))["values"] + posterior_result = posterior.load_parameters("MY_PARAM", list(range(5)))[ + "values" + ] + + # check the shape of internal data used in the update + assert prior_result.shape == (5, NCOL, NROW, NLAY) + assert posterior_result.shape == (5, NCOL, NROW, NLAY) + + # Only assert on the first three rows, as there are only three parameters, + # a, b and c, the rest have no correlation to the results. + assert np.linalg.det( + np.cov(prior_result.values.reshape(5, NCOL * NROW * NLAY).T[:3]) + ) > np.linalg.det( + np.cov(posterior_result.values.reshape(5, NCOL * NROW * NLAY).T[:3]) + ) + + # This checks that the fields in the runpath + # are different between iterations + assert Path("simulations/realization-0/iter-0/my_param.grdecl").read_text( + encoding="utf-8" + ) != Path("simulations/realization-0/iter-1/my_param.grdecl").read_text( + encoding="utf-8" + ) + + # check shapre of written data + prop0 = xtgeo.grid_property.gridproperty_from_file( + "simulations/realization-0/iter-0/my_param.grdecl", + fformat="grdecl", + grid=grid, + name="MY_PARAM", + ) + assert len(prop0.get_npvalues1d()) == 16 + numpy.testing.assert_array_equal( + np.logical_not(prop0.values1d.mask), mask_list + ) + + prop1 = xtgeo.grid_property.gridproperty_from_file( + "simulations/realization-0/iter-0/my_param.grdecl", + fformat="grdecl", + grid=grid, + name="MY_PARAM", + ) + assert len(prop1.get_npvalues1d()) == 16 + numpy.testing.assert_array_equal( + np.logical_not(prop1.values1d.mask), mask_list + ) diff --git a/tests/integration_tests/storage/test_parameter_sample_types.py b/tests/integration_tests/storage/test_parameter_sample_types.py new file mode 100644 index 00000000000..ba80c244e50 --- /dev/null +++ b/tests/integration_tests/storage/test_parameter_sample_types.py @@ -0,0 +1,607 @@ +import os +import stat +from argparse import ArgumentParser +from contextlib import ExitStack as does_not_raise +from multiprocessing import Process +from pathlib import Path +from textwrap import dedent +from typing import Optional, Tuple + +import numpy as np +import pytest +import xtgeo +from flaky import flaky + +from ert.__main__ import ert_parser +from ert.cli import ENSEMBLE_SMOOTHER_MODE +from ert.cli.main import run_cli +from ert.config import ConfigValidationError, ErtConfig +from ert.enkf_main import create_run_path, ensemble_context, sample_prior +from ert.libres_facade import LibresFacade +from ert.storage import EnsembleAccessor, open_storage + + +def write_file(fname, contents): + with open(fname, mode="w", encoding="utf-8") as fout: + fout.writelines(contents) + + +def create_runpath( + storage, + config, + active_mask=None, + *, + ensemble: Optional[EnsembleAccessor] = None, + iteration=0, + random_seed: Optional[int] = 1234, +) -> Tuple[ErtConfig, EnsembleAccessor]: + active_mask = [True] if active_mask is None else active_mask + ert_config = ErtConfig.from_file(config) + + if ensemble is None: + experiment_id = storage.create_experiment( + ert_config.ensemble_config.parameter_configuration + ) + ensemble = storage.create_ensemble( + experiment_id, + name="default", + ensemble_size=ert_config.model_config.num_realizations, + ) + + prior = ensemble_context( + ensemble, + active_mask, + iteration, + None, + "", + ert_config.model_config.runpath_format_string, + "name", + ) + + sample_prior( + ensemble, + [i for i, active in enumerate(active_mask) if active], + random_seed=random_seed, + ) + create_run_path(prior, ert_config.substitution_list, ert_config) + return ert_config.ensemble_config, ensemble + + +def load_from_forward_model(ert_config, ensemble): + facade = LibresFacade.from_config_file(ert_config) + realizations = [True] * facade.get_ensemble_size() + return facade.load_from_forward_model(ensemble, realizations, 0) + + +@pytest.fixture +def storage(tmp_path): + with open_storage(tmp_path / "storage", mode="w") as storage: + yield storage + + +@pytest.mark.integration_test +@pytest.mark.parametrize( + "config_str, expected, extra_files, expectation", + [ + ( + "GEN_KW KW_NAME template.txt kw.txt prior.txt\nRANDOM_SEED 1234", + "MY_KEYWORD -0.881423", + [], + does_not_raise(), + ), + ( + "GEN_KW KW_NAME template.txt kw.txt prior.txt INIT_FILES:custom_param%d", + "MY_KEYWORD 1.31", + [("custom_param0", "MY_KEYWORD 1.31")], + does_not_raise(), + ), + ( + "GEN_KW KW_NAME template.txt kw.txt prior.txt INIT_FILES:custom_param%d", + "MY_KEYWORD 1.31", + [("custom_param0", "1.31")], + does_not_raise(), + ), + ( + "GEN_KW KW_NAME template.txt kw.txt prior.txt INIT_FILES:custom_param0", # noqa + "Not expecting a file", + [], + pytest.raises( + ConfigValidationError, match="Loading GEN_KW from files requires %d" + ), + ), + ], +) +def test_gen_kw(storage, tmpdir, config_str, expected, extra_files, expectation): + with tmpdir.as_cwd(): + config = dedent( + """ + JOBNAME my_name%d + NUM_REALIZATIONS 1 + """ + ) + config += config_str + with open("config.ert", mode="w", encoding="utf-8") as fh: + fh.writelines(config) + with open("template.txt", mode="w", encoding="utf-8") as fh: + fh.writelines("MY_KEYWORD ") + with open("prior.txt", mode="w", encoding="utf-8") as fh: + fh.writelines("MY_KEYWORD NORMAL 0 1") + for fname, contents in extra_files: + write_file(fname, contents) + + with expectation: + create_runpath(storage, "config.ert") + assert ( + Path("simulations/realization-0/iter-0/kw.txt").read_text( + encoding="utf-8" + ) + == expected + ) + + +@pytest.mark.integration_test +@pytest.mark.parametrize( + "config_str, expected, extra_files", + [ + pytest.param( + "GEN_KW KW_NAME template.txt kw.txt prior.txt", + "MY_KEYWORD -0.881423\nNOT KEYWORD ", + [["template.txt", "MY_KEYWORD \nNOT KEYWORD "]], + id="Second magic string that should not be replaced", + ), + pytest.param( + "GEN_KW KW_NAME template.txt kw.txt prior.txt", + "MY_KEYWORD -0.881423\n-- if K<=28 then blah blah", + [["template.txt", "MY_KEYWORD \n-- if K<=28 then blah blah"]], + id="Comment in file with <", + ), + pytest.param( + "GEN_KW KW_NAME template.txt kw.txt prior.txt", + "MY_KEYWORD -0.881423\nNR_TWO 0.654691", + [ + ["template.txt", "MY_KEYWORD \nNR_TWO "], + ["prior.txt", "MY_KEYWORD NORMAL 0 1\nNR_TWO NORMAL 0 1"], + ], + id="Two parameters", + ), + ], +) +def test_gen_kw_templating( + storage, + tmpdir, + config_str, + expected, + extra_files, +): + with tmpdir.as_cwd(): + config = dedent( + """ + JOBNAME my_name%d + NUM_REALIZATIONS 1 + RANDOM_SEED 1234 + """ + ) + config += config_str + with open("config.ert", mode="w", encoding="utf-8") as fh: + fh.writelines(config) + with open("prior.txt", mode="w", encoding="utf-8") as fh: + fh.writelines("MY_KEYWORD NORMAL 0 1") + for fname, contents in extra_files: + write_file(fname, contents) + create_runpath(storage, "config.ert") + assert ( + Path("simulations/realization-0/iter-0/kw.txt").read_text(encoding="utf-8") + == expected + ) + + +@pytest.mark.integration_test +@pytest.mark.parametrize( + "relpath", + [ + "somepath/", + # This test was added to show current behaviour for Ert. + # If absolute paths should be possible to be used like this is up for debate. + "/tmp/somepath/", # ert removes leading '/' + ], +) +def test_gen_kw_outfile_will_use_paths(tmpdir, storage, relpath: str): + with tmpdir.as_cwd(): + config = dedent( + f""" + JOBNAME my_name%d + NUM_REALIZATIONS 1 + GEN_KW KW_NAME template.txt {relpath}kw.txt prior.txt + """ + ) + + with open("config.ert", mode="w", encoding="utf-8") as fh: + fh.writelines(config) + with open("template.txt", mode="w", encoding="utf-8") as fh: + fh.writelines("MY_KEYWORD ") + with open("prior.txt", mode="w", encoding="utf-8") as fh: + fh.writelines("MY_KEYWORD NORMAL 0 1") + if relpath.startswith("/"): + relpath = relpath[1:] + create_runpath(storage, "config.ert") + assert os.path.exists(f"simulations/realization-0/iter-0/{relpath}kw.txt") + + +@pytest.mark.integration_test +@pytest.mark.parametrize( + "config_str, expected, extra_files", + [ + ( + "GEN_KW KW_NAME template.txt kw.txt prior.txt INIT_FILES:custom_param%d", + "MY_KEYWORD 1.31\nMY_SECOND_KEYWORD 1.01", + [("custom_param0", "MY_SECOND_KEYWORD 1.01\nMY_KEYWORD 1.31")], + ), + ], +) +def test_that_order_of_input_in_user_input_is_abritrary_for_gen_kw_init_files( + tmpdir, config_str, expected, extra_files, storage +): + with tmpdir.as_cwd(): + config = dedent( + """ + JOBNAME my_name%d + NUM_REALIZATIONS 1 + """ + ) + config += config_str + with open("config.ert", mode="w", encoding="utf-8") as fh: + fh.writelines(config) + with open("template.txt", mode="w", encoding="utf-8") as fh: + fh.writelines( + "MY_KEYWORD \nMY_SECOND_KEYWORD " + ) + with open("prior.txt", mode="w", encoding="utf-8") as fh: + fh.writelines("MY_KEYWORD NORMAL 0 1\nMY_SECOND_KEYWORD NORMAL 0 1") + for fname, contents in extra_files: + write_file(fname, contents) + + create_runpath(storage, "config.ert") + assert ( + Path("simulations/realization-0/iter-0/kw.txt").read_text("utf-8") + == expected + ) + + +@pytest.mark.integration_test +@pytest.mark.parametrize("load_forward_init", [True, False]) +def test_gen_kw_forward_init(tmpdir, storage, load_forward_init): + with tmpdir.as_cwd(): + config = dedent( + """ + JOBNAME my_name%d + NUM_REALIZATIONS 1 + GEN_KW KW_NAME template.txt kw.txt prior.txt """ + f"""FORWARD_INIT:{str(load_forward_init)} INIT_FILES:custom_param%d + """ + ) + with open("config.ert", mode="w", encoding="utf-8") as fh: + fh.writelines(config) + + with open("template.txt", mode="w", encoding="utf-8") as fh: + fh.writelines("MY_KEYWORD ") + with open("prior.txt", mode="w", encoding="utf-8") as fh: + fh.writelines("MY_KEYWORD NORMAL 0 1") + if not load_forward_init: + write_file("custom_param0", "1.31") + + if load_forward_init: + with pytest.raises( + ConfigValidationError, + match=( + "Loading GEN_KW from files created by " + "the forward model is not supported." + ), + ): + create_runpath(storage, "config.ert") + else: + _, fs = create_runpath(storage, "config.ert") + assert Path("simulations/realization-0/iter-0/kw.txt").exists() + value = ( + fs.load_parameters("KW_NAME", 0) + .sel(names="MY_KEYWORD")["values"] + .values + ) + assert value == 1.31 + + +@pytest.mark.integration_test +def test_surface_param_update(tmpdir): + """Full update with a surface parameter, it mirrors the poly example, + except it uses SURFACE instead of GEN_KW. + """ + ensemble_size = 5 + with tmpdir.as_cwd(): + config = f""" +NUM_REALIZATIONS {ensemble_size} +QUEUE_OPTION LOCAL MAX_RUNNING 5 +OBS_CONFIG observations +SURFACE MY_PARAM OUTPUT_FILE:surf.irap INIT_FILES:surf.irap BASE_SURFACE:surf.irap FORWARD_INIT:True +GEN_DATA MY_RESPONSE RESULT_FILE:gen_data_%d.out REPORT_STEPS:0 INPUT_FORMAT:ASCII +INSTALL_JOB poly_eval POLY_EVAL +SIMULATION_JOB poly_eval +""" + base_surface = xtgeo.RegularSurface( + ncol=2, + nrow=3, + xinc=1, + yinc=1, + xori=1, + yori=1, + yflip=1, + rotation=1, + ) + base_surface.to_file("surf.irap", fformat="irap_ascii") + + with open("forward_model", "w", encoding="utf-8") as f: + f.write( + """#!/usr/bin/env python +import os + +import xtgeo +import numpy as np + +if __name__ == "__main__": + if not os.path.exists("surf.irap"): + nx = 2 + ny = 3 + values = np.random.standard_normal(nx * ny) + surf = xtgeo.RegularSurface(ncol=nx, + nrow=ny, + xinc=1, + yinc=1, + rotation=0, + values=values) + surf.to_file("surf.irap", fformat="irap_ascii") + + surf_fs = xtgeo.surface_from_file("surf.irap", fformat="irap_ascii", + dtype=np.float32) + a, b, c, *_ = surf_fs.values.data.ravel() + + output = [a * x**2 + b * x + c for x in range(10)] + + with open("gen_data_0.out", "w", encoding="utf-8") as f: + f.write("\\n".join(map(str, output))) + """ + ) + os.chmod( + "forward_model", + os.stat("forward_model").st_mode + | stat.S_IXUSR + | stat.S_IXGRP + | stat.S_IXOTH, + ) + with open("POLY_EVAL", "w", encoding="utf-8") as fout: + fout.write("EXECUTABLE forward_model") + with open("observations", "w", encoding="utf-8") as fout: + fout.write( + dedent( + """ + GENERAL_OBSERVATION MY_OBS { + DATA = MY_RESPONSE; + INDEX_LIST = 0,2,4,6,8; + RESTART = 0; + OBS_FILE = obs.txt; + };""" + ) + ) + + with open("obs.txt", "w", encoding="utf-8") as fobs: + fobs.write( + dedent( + """ + 2.1457049781272213 0.6 + 8.769219841380755 1.4 + 12.388014786122742 3.0 + 25.600464531354252 5.4 + 42.35204755970952 8.6""" + ) + ) + + with open("config.ert", "w", encoding="utf-8") as fh: + fh.writelines(config) + + parser = ArgumentParser(prog="test_main") + parsed = ert_parser( + parser, + [ + ENSEMBLE_SMOOTHER_MODE, + "--current-case", + "prior", + "--target-case", + "smoother_update", + "config.ert", + ], + ) + + run_cli(parsed) + with open_storage(tmpdir / "storage") as storage: + prior = storage.get_ensemble_by_name("prior") + posterior = storage.get_ensemble_by_name("smoother_update") + prior_param = ( + prior.load_parameters("MY_PARAM", range(5))["values"] + .values.reshape(5, 2 * 3) + .T + ) + posterior_param = ( + posterior.load_parameters("MY_PARAM", range(5))["values"] + .values.reshape(5, 2 * 3) + .T + ) + + assert prior_param.dtype == np.float32 + assert posterior_param.dtype == np.float32 + + assert np.linalg.det(np.cov(prior_param[:3])) > np.linalg.det( + np.cov(posterior_param[:3]) + ) + + realizations_to_test = np.random.choice( + range(ensemble_size), size=2, replace=False + ) + surf = xtgeo.surface_from_file( + f"simulations/realization-{realizations_to_test[0]}/iter-1/surf.irap", + fformat="irap_ascii", + dtype=np.float32, + ) + + assert base_surface.ncol == surf.ncol + assert base_surface.nrow == surf.nrow + assert base_surface.xinc == surf.xinc + assert base_surface.yinc == surf.yinc + assert base_surface.xori == surf.xori + assert base_surface.yori == surf.yori + assert base_surface.yflip == surf.yflip + assert base_surface.rotation == surf.yflip + + surf2 = xtgeo.surface_from_file( + f"simulations/realization-{realizations_to_test[1]}/iter-1/surf.irap", + fformat="irap_ascii", + dtype=np.float32, + ) + + assert not (surf.values == surf2.values).any() + + assert len(prior.load_parameters("MY_PARAM", 0)["values"].x) == 2 + assert len(prior.load_parameters("MY_PARAM", 0)["values"].y) == 3 + + +@pytest.mark.integration_test +@pytest.mark.limit_memory("110 MB") +@flaky(max_runs=5, min_passes=1) +def test_field_param_memory(tmpdir): + with tmpdir.as_cwd(): + # Setup is done in a subprocess so that memray does not pick up the allocations + p = Process(target=create_poly_with_field, args=((2000, 1000, 1), 2)) + p.start() + p.join() # this blocks until the process terminates + + run_poly() + + +def create_poly_with_field(field_dim: Tuple[int, int, int], realisations: int): + """ + This replicates the poly example, only it uses FIELD parameter + """ + grid_size = field_dim[0] * field_dim[1] * field_dim[2] + config = dedent( + f""" + NUM_REALIZATIONS {realisations} + OBS_CONFIG observations + + FIELD MY_PARAM PARAMETER my_param.bgrdecl INIT_FILES:my_param.bgrdecl FORWARD_INIT:True + GRID MY_EGRID.EGRID + + GEN_DATA MY_RESPONSE RESULT_FILE:gen_data_%d.out REPORT_STEPS:0 INPUT_FORMAT:ASCII + INSTALL_JOB poly_eval POLY_EVAL + SIMULATION_JOB poly_eval + """ + ) + with open("config.ert", "w", encoding="utf-8") as fh: + fh.writelines(config) + + grid = xtgeo.create_box_grid(dimension=field_dim) + grid.to_file("MY_EGRID.EGRID", "egrid") + del grid + + with open("forward_model", "w", encoding="utf-8") as f: + f.write( + f"""#!/usr/bin/env python +import numpy as np +import os +import resfo + +if __name__ == "__main__": + if not os.path.exists("my_param.bgrdecl"): + values = np.random.standard_normal({grid_size}) + resfo.write("my_param.bgrdecl", [("MY_PARAM", values)]) + datas = resfo.read("my_param.bgrdecl") + assert datas[0][0] == "MY_PARAM" + a,b,c,*_ = datas[0][1] + + output = [float(a) * x**2 + float(b) * x + float(c) for x in range(10)] + with open("gen_data_0.out", "w", encoding="utf-8") as f: + f.write("\\n".join(map(str, output))) + """ + ) + os.chmod( + "forward_model", + os.stat("forward_model").st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH, + ) + with open("POLY_EVAL", "w", encoding="utf-8") as fout: + fout.write("EXECUTABLE forward_model") + with open("observations", "w", encoding="utf-8") as fout: + fout.write( + dedent( + """ + GENERAL_OBSERVATION MY_OBS { + DATA = MY_RESPONSE; + INDEX_LIST = 0,2,4,6,8; + RESTART = 0; + OBS_FILE = obs.txt; + };""" + ) + ) + + with open("obs.txt", "w", encoding="utf-8") as fobs: + fobs.write( + dedent( + """ + 2.1457049781272213 0.6 + 8.769219841380755 1.4 + 12.388014786122742 3.0 + 25.600464531354252 5.4 + 42.35204755970952 8.6""" + ) + ) + + +def run_poly(): + parser = ArgumentParser(prog="test_main") + parsed = ert_parser( + parser, + [ + ENSEMBLE_SMOOTHER_MODE, + "--current-case", + "prior", + "--target-case", + "smoother_update", + "config.ert", + ], + ) + + run_cli(parsed) + + +@pytest.mark.integration_test +@pytest.mark.parametrize( + "config_str, expected", + [ + ( + "GEN_KW KW_NAME prior.txt\nRANDOM_SEED 1234", + -0.881423, + ), + ], +) +def test_gen_kw_optional_template(storage, tmpdir, config_str, expected): + with tmpdir.as_cwd(): + config = dedent( + """ + JOBNAME my_name%d + NUM_REALIZATIONS 1 + """ + ) + config += config_str + with open("config.ert", mode="w", encoding="utf-8") as fh: + fh.writelines(config) + with open("prior.txt", mode="w", encoding="utf-8") as fh: + fh.writelines("MY_KEYWORD NORMAL 0 1") + + create_runpath(storage, "config.ert") + assert list(storage.ensembles)[0].load_parameters("KW_NAME")[ + "values" + ].values.flatten().tolist() == pytest.approx([expected]) diff --git a/tests/unit_tests/analysis/test_es_update.py b/tests/unit_tests/analysis/test_es_update.py index d1e8ec4184f..2af5d43b6b0 100644 --- a/tests/unit_tests/analysis/test_es_update.py +++ b/tests/unit_tests/analysis/test_es_update.py @@ -1,6 +1,5 @@ import functools import re -from argparse import ArgumentParser from functools import partial from pathlib import Path @@ -10,29 +9,20 @@ import scipy as sp import xarray as xr import xtgeo -from iterative_ensemble_smoother import SIES, steplength_exponential +from iterative_ensemble_smoother import steplength_exponential -from ert import LibresFacade -from ert.__main__ import ert_parser from ert.analysis import ( ErtAnalysisError, UpdateConfiguration, iterative_smoother_update, smoother_update, ) -from ert.analysis._es_update import ( - TempStorage, - UpdateSettings, - _create_temporary_parameter_storage, -) +from ert.analysis._es_update import UpdateSettings from ert.analysis.configuration import UpdateStep from ert.analysis.row_scaling import RowScaling -from ert.cli import ENSEMBLE_SMOOTHER_MODE -from ert.cli.main import run_cli -from ert.config import AnalysisConfig, ErtConfig, Field, GenDataConfig, GenKwConfig +from ert.config import Field, GenDataConfig, GenKwConfig from ert.config.analysis_module import ESSettings, IESSettings from ert.field_utils import Shape -from ert.storage import open_storage from ert.storage.realization_storage_state import RealizationStorageState @@ -74,14 +64,6 @@ def obs(): ) -@pytest.fixture() -def minimal_config(use_tmpdir): - with open("config_file.ert", "w", encoding="utf-8") as fout: - fout.write("NUM_REALIZATIONS 1") - ert_config = ErtConfig.from_file("config_file.ert") - yield ert_config - - def remove_timestamp_from_logfile(log_file: Path): with open(log_file, "r", encoding="utf-8") as fin: buf = fin.read() @@ -287,42 +269,6 @@ def test_update_snapshot( assert target_gen_kw == pytest.approx(expected_gen_kw) -@pytest.mark.integration_test -def test_that_posterior_has_lower_variance_than_prior(copy_case): - copy_case("poly_example") - - parser = ArgumentParser(prog="test_main") - parsed = ert_parser( - parser, - [ - ENSEMBLE_SMOOTHER_MODE, - "--current-case", - "default", - "--target-case", - "target", - "--realizations", - "1-50", - "poly.ert", - ], - ) - - run_cli(parsed) - facade = LibresFacade.from_config_file("poly.ert") - with open_storage(facade.enspath) as storage: - default_fs = storage.get_ensemble_by_name("default") - df_default = facade.load_all_gen_kw_data(default_fs) - target_fs = storage.get_ensemble_by_name("target") - df_target = facade.load_all_gen_kw_data(target_fs) - - # We expect that ERT's update step lowers the - # generalized variance for the parameters. - assert ( - 0 - < np.linalg.det(df_target.cov().to_numpy()) - < np.linalg.det(df_default.cov().to_numpy()) - ) - - @pytest.mark.parametrize( "expected_target_gen_kw, update_step", [ @@ -530,137 +476,6 @@ def test_snapshot_alpha( ] == expected -@pytest.mark.integration_test -def test_that_surfaces_retain_their_order_when_loaded_and_saved_by_ert(copy_case): - """This is a regression test to make sure ert does not use the wrong order - (row-major / column-major) when working with surfaces. - """ - rng = np.random.default_rng() - from scipy.ndimage import gaussian_filter - - def sample_prior(nx, ny): - return np.exp( - 5 - * gaussian_filter( - gaussian_filter(rng.random(size=(nx, ny)), sigma=2.0), sigma=1.0 - ) - ) - - copy_case("snake_oil_field") - - nx = 5 - ny = 7 - ensemble_size = 2 - - Path("./surface").mkdir() - for i in range(ensemble_size): - surf = xtgeo.RegularSurface( - ncol=nx, nrow=ny, xinc=1.0, yinc=1.0, values=sample_prior(nx, ny) - ) - surf.to_file(f"surface/surf_init_{i}.irap", fformat="irap_ascii") - - # Single observation with a large ERROR to make sure the udpate is minimal. - obs = """ - SUMMARY_OBSERVATION WOPR_OP1_9 - { - VALUE = 0.1; - ERROR = 200.0; - DATE = 2010-03-31; - KEY = WOPR:OP1; - }; - """ - - with open("observations/observations.txt", "w", encoding="utf-8") as file: - file.write(obs) - - parser = ArgumentParser(prog="test_main") - parsed = ert_parser( - parser, - [ - ENSEMBLE_SMOOTHER_MODE, - "snake_oil_surface.ert", - "--target-case", - "es_udpate", - ], - ) - run_cli(parsed) - - ert_config = ErtConfig.from_file("snake_oil_surface.ert") - - storage = open_storage(ert_config.ens_path) - - ens_prior = storage.get_ensemble_by_name("default") - ens_posterior = storage.get_ensemble_by_name("es_udpate") - - # Check that surfaces defined in INIT_FILES are not changed by ERT - surf_prior = ens_prior.load_parameters("TOP", list(range(ensemble_size)))["values"] - for i in range(ensemble_size): - _prior_init = xtgeo.surface_from_file( - f"surface/surf_init_{i}.irap", fformat="irap_ascii", dtype=np.float32 - ) - np.testing.assert_array_equal(surf_prior[i], _prior_init.values.data) - - surf_posterior = ens_posterior.load_parameters("TOP", list(range(ensemble_size)))[ - "values" - ] - - assert surf_prior.shape == surf_posterior.shape - - for i in range(ensemble_size): - with pytest.raises(AssertionError): - np.testing.assert_array_equal(surf_prior[i], surf_posterior[i]) - np.testing.assert_almost_equal( - surf_prior[i].values, surf_posterior[i].values, decimal=3 - ) - - -@pytest.mark.integration_test -def test_update_multiple_param(copy_case): - """ - Note that this is now a snapshot test, so there is no guarantee that the - snapshots are correct, they are just documenting the current behavior. - """ - copy_case("snake_oil_field") - parser = ArgumentParser(prog="test_main") - parsed = ert_parser( - parser, - [ - ENSEMBLE_SMOOTHER_MODE, - "snake_oil.ert", - "--target-case", - "posterior", - ], - ) - - run_cli(parsed) - - ert_config = ErtConfig.from_file("snake_oil.ert") - - storage = open_storage(ert_config.ens_path) - sim_fs = storage.get_ensemble_by_name("default") - posterior_fs = storage.get_ensemble_by_name("posterior") - - def _load_parameters(source_ens, iens_active_index, param_groups): - temp_storage = TempStorage() - for param_group in param_groups: - _temp_storage = _create_temporary_parameter_storage( - source_ens, iens_active_index, param_group - ) - temp_storage[param_group] = _temp_storage[param_group] - return temp_storage - - sim_fs.load_parameters("SNAKE_OIL_PARAM_BPR")["values"] - param_groups = list(sim_fs.experiment.parameter_configuration.keys()) - prior = _load_parameters(sim_fs, list(range(10)), param_groups) - posterior = _load_parameters(posterior_fs, list(range(10)), param_groups) - - # We expect that ERT's update step lowers the - # generalized variance for the parameters. - # https://en.wikipedia.org/wiki/Variance#For_vector-valued_random_variables - for prior_name, prior_data in prior.items(): - assert np.trace(np.cov(posterior[prior_name])) < np.trace(np.cov(prior_data)) - - def test_and_benchmark_adaptive_localization_with_fields( storage, tmp_path, monkeypatch, benchmark ): @@ -815,123 +630,6 @@ def g(X): assert np.isclose(prior_da, posterior_da).sum() > 0 -@pytest.mark.integration_test -def test_gen_data_obs_data_mismatch(storage, uniform_parameter, update_config): - resp = GenDataConfig(name="RESPONSE") - obs = xr.Dataset( - { - "observations": (["report_step", "index"], [[1.0]]), - "std": (["report_step", "index"], [[0.1]]), - }, - coords={"index": [1000], "report_step": [0]}, - attrs={"response": "RESPONSE"}, - ) - experiment = storage.create_experiment( - parameters=[uniform_parameter], - responses=[resp], - observations={"OBSERVATION": obs}, - ) - prior = storage.create_ensemble( - experiment, - ensemble_size=10, - iteration=0, - name="prior", - ) - rng = np.random.default_rng(1234) - for iens in range(prior.ensemble_size): - prior.state_map[iens] = RealizationStorageState.HAS_DATA - data = rng.uniform(0, 1) - prior.save_parameters( - "PARAMETER", - iens, - xr.Dataset( - { - "values": ("names", [data]), - "transformed_values": ("names", [data]), - "names": ["KEY_1"], - } - ), - ) - data = rng.uniform(0.8, 1, 3) - prior.save_response( - "RESPONSE", - xr.Dataset( - {"values": (["report_step", "index"], [data])}, - coords={"index": range(len(data)), "report_step": [0]}, - ), - iens, - ) - posterior_ens = storage.create_ensemble( - prior.experiment_id, - ensemble_size=prior.ensemble_size, - iteration=1, - name="posterior", - prior_ensemble=prior, - ) - AnalysisConfig() - with pytest.raises( - ErtAnalysisError, - match="No active observations", - ): - smoother_update( - prior, posterior_ens, "id", update_config, UpdateSettings(), ESSettings() - ) - - -@pytest.mark.usefixtures("use_tmpdir") -@pytest.mark.integration_test -def test_gen_data_missing(storage, update_config, uniform_parameter, obs): - resp = GenDataConfig(name="RESPONSE") - experiment = storage.create_experiment( - parameters=[uniform_parameter], - responses=[resp], - observations={"OBSERVATION": obs}, - ) - prior = storage.create_ensemble( - experiment, - ensemble_size=10, - iteration=0, - name="prior", - ) - rng = np.random.default_rng(1234) - for iens in range(prior.ensemble_size): - prior.state_map[iens] = RealizationStorageState.HAS_DATA - data = rng.uniform(0, 1) - prior.save_parameters( - "PARAMETER", - iens, - xr.Dataset( - { - "values": ("names", [data]), - "transformed_values": ("names", [data]), - "names": ["KEY_1"], - } - ), - ) - data = rng.uniform(0.8, 1, 2) # Importantly, shorter than obs - prior.save_response( - "RESPONSE", - xr.Dataset( - {"values": (["report_step", "index"], [data])}, - coords={"index": range(len(data)), "report_step": [0]}, - ), - iens, - ) - posterior_ens = storage.create_ensemble( - prior.experiment_id, - ensemble_size=prior.ensemble_size, - iteration=1, - name="posterior", - prior_ensemble=prior, - ) - update_snapshot = smoother_update( - prior, posterior_ens, "id", update_config, UpdateSettings(), ESSettings() - ) - assert [ - step.status for step in update_snapshot.update_step_snapshots["ALL_ACTIVE"] - ] == ["Active", "Active", "Deactivated, missing response(es)"] - - def test_update_only_using_subset_observations( snake_oil_case_storage, snake_oil_storage, snapshot ): @@ -973,90 +671,3 @@ def test_update_only_using_subset_observations( log_file = Path(ert_config.analysis_config.log_path) / "id.txt" remove_timestamp_from_logfile(log_file) snapshot.assert_match(log_file.read_text("utf-8"), "update_log") - - -@pytest.mark.usefixtures("use_tmpdir") -@pytest.mark.integration_test -def test_update_subset_parameters(storage, uniform_parameter, obs): - no_update_param = GenKwConfig( - name="EXTRA_PARAMETER", - forward_init=False, - template_file="", - transfer_function_definitions=[ - "KEY1 UNIFORM 0 1", - ], - output_file=None, - ) - resp = GenDataConfig(name="RESPONSE") - experiment = storage.create_experiment( - parameters=[uniform_parameter, no_update_param], - responses=[resp], - observations={"OBSERVATION": obs}, - ) - prior = storage.create_ensemble( - experiment, - ensemble_size=10, - iteration=0, - name="prior", - ) - rng = np.random.default_rng(1234) - for iens in range(prior.ensemble_size): - prior.state_map[iens] = RealizationStorageState.HAS_DATA - data = rng.uniform(0, 1) - prior.save_parameters( - "PARAMETER", - iens, - xr.Dataset( - { - "values": ("names", [data]), - "transformed_values": ("names", [data]), - "names": ["KEY_1"], - } - ), - ) - prior.save_parameters( - "EXTRA_PARAMETER", - iens, - xr.Dataset( - { - "values": ("names", [data]), - "transformed_values": ("names", [data]), - "names": ["KEY_1"], - } - ), - ) - - data = rng.uniform(0.8, 1, 10) - prior.save_response( - "RESPONSE", - xr.Dataset( - {"values": (["report_step", "index"], [data])}, - coords={"index": range(len(data)), "report_step": [0]}, - ), - iens, - ) - posterior_ens = storage.create_ensemble( - prior.experiment_id, - ensemble_size=prior.ensemble_size, - iteration=1, - name="posterior", - prior_ensemble=prior, - ) - update_config = UpdateConfiguration( - update_steps=[ - UpdateStep( - name="NOT_ALL_ACTIVE", - observations=["OBSERVATION"], - parameters=["PARAMETER"], # No EXTRA_PARAMETER here - ) - ] - ) - smoother_update( - prior, posterior_ens, "id", update_config, UpdateSettings(), ESSettings() - ) - assert prior.load_parameters("EXTRA_PARAMETER", 0)["values"].equals( - posterior_ens.load_parameters("EXTRA_PARAMETER", 0)["values"] - ) - assert not prior.load_parameters("PARAMETER", 0)["values"].equals( - posterior_ens.load_parameters("PARAMETER", 0)["values"] - ) diff --git a/tests/unit_tests/analysis/test_misfit_preprocessor.py b/tests/unit_tests/analysis/test_misfit_preprocessor.py index e6d28225539..6f26012351b 100644 --- a/tests/unit_tests/analysis/test_misfit_preprocessor.py +++ b/tests/unit_tests/analysis/test_misfit_preprocessor.py @@ -2,7 +2,6 @@ import pytest from sklearn.preprocessing import StandardScaler -from ert.analysis import smoother_update from ert.analysis.misfit_preprocessor import ( get_nr_primary_components, get_scaling_factor, diff --git a/tests/unit_tests/cli/test_integration_cli.py b/tests/unit_tests/cli/test_integration_cli.py index b535bd282ff..59ca8cc8396 100644 --- a/tests/unit_tests/cli/test_integration_cli.py +++ b/tests/unit_tests/cli/test_integration_cli.py @@ -1,27 +1,22 @@ # pylint: disable=too-many-lines -import asyncio -import fileinput import json import logging import os -import shutil import threading from argparse import ArgumentParser from pathlib import Path from textwrap import dedent -from unittest.mock import Mock, call +from unittest.mock import Mock import numpy as np -import pandas as pd import pytest import xtgeo import ert.shared -from ert import LibresFacade, ensemble_evaluator +from ert import ensemble_evaluator from ert.__main__ import ert_parser from ert.cli import ( - ENSEMBLE_EXPERIMENT_MODE, ENSEMBLE_SMOOTHER_MODE, ES_MDA_MODE, ITERATIVE_ENSEMBLE_SMOOTHER_MODE, @@ -29,325 +24,6 @@ ) from ert.cli.main import ErtCliError, run_cli from ert.config import ConfigValidationError, ConfigWarning, ErtConfig -from ert.enkf_main import sample_prior -from ert.shared.feature_toggling import FeatureToggling -from ert.storage import open_storage - - -@pytest.fixture(name="mock_cli_run") -def fixture_mock_cli_run(monkeypatch): - mocked_monitor = Mock() - mocked_thread_start = Mock() - mocked_thread_join = Mock() - monkeypatch.setattr(threading.Thread, "start", mocked_thread_start) - monkeypatch.setattr(threading.Thread, "join", mocked_thread_join) - monkeypatch.setattr(ert.cli.monitor.Monitor, "monitor", mocked_monitor) - yield mocked_monitor, mocked_thread_join, mocked_thread_start - - -@pytest.mark.integration_test -def test_runpath_file(tmpdir, source_root): - shutil.copytree( - os.path.join(source_root, "test-data", "poly_example"), - os.path.join(str(tmpdir), "poly_example"), - ) - - with tmpdir.as_cwd(): - with open("poly_example/poly.ert", "a", encoding="utf-8") as fh: - config_lines = [ - "LOAD_WORKFLOW_JOB ASSERT_RUNPATH_FILE\n" - "LOAD_WORKFLOW TEST_RUNPATH_FILE\n", - "HOOK_WORKFLOW TEST_RUNPATH_FILE PRE_SIMULATION\n", - ] - - fh.writelines(config_lines) - - parser = ArgumentParser(prog="test_main") - parsed = ert_parser( - parser, - [ - ENSEMBLE_SMOOTHER_MODE, - "--target-case", - "poly_runpath_file", - "--realizations", - "1,2,4,8,16,32,64", - "poly_example/poly.ert", - ], - ) - - run_cli(parsed) - - assert os.path.isfile("RUNPATH_WORKFLOW_0.OK") - assert os.path.isfile("RUNPATH_WORKFLOW_1.OK") - - -@pytest.mark.integration_test -def test_ensemble_evaluator(tmpdir, source_root): - shutil.copytree( - os.path.join(source_root, "test-data", "poly_example"), - os.path.join(str(tmpdir), "poly_example"), - ) - - with tmpdir.as_cwd(): - parser = ArgumentParser(prog="test_main") - parsed = ert_parser( - parser, - [ - ENSEMBLE_SMOOTHER_MODE, - "--target-case", - "poly_runpath_file", - "--realizations", - "1,2,4,8,16,32,64", - "poly_example/poly.ert", - ], - ) - FeatureToggling.update_from_args(parsed) - - run_cli(parsed) - FeatureToggling.reset() - - -@pytest.mark.integration_test -def test_es_mda(tmpdir, source_root, snapshot): - shutil.copytree( - os.path.join(source_root, "test-data", "poly_example"), - os.path.join(str(tmpdir), "poly_example"), - ) - - with tmpdir.as_cwd(): - with fileinput.input("poly_example/poly.ert", inplace=True) as fin: - for line_nr, line in enumerate(fin): - if line_nr == 1: - print("RANDOM_SEED 1234", end="") - print(line, end="") - parser = ArgumentParser(prog="test_main") - parsed = ert_parser( - parser, - [ - ES_MDA_MODE, - "--target-case", - "iter-%d", - "--realizations", - "1,2,4,8,16", - "poly_example/poly.ert", - ], - ) - FeatureToggling.update_from_args(parsed) - - run_cli(parsed) - FeatureToggling.reset() - facade = LibresFacade.from_config_file("poly.ert") - with open_storage("storage", "r") as storage: - data = [] - for iter_nr in range(4): - data.append( - facade.load_all_gen_kw_data( - storage.get_ensemble_by_name(f"iter-{iter_nr}") - ) - ) - result = pd.concat( - data, - keys=[f"iter-{iter}" for iter in range(len(data))], - names=("Iteration", "Realization"), - ) - snapshot.assert_match( - result.to_csv(float_format="%.12g"), "es_mda_integration_snapshot" - ) - - -@pytest.mark.parametrize( - "mode, target", - [ - pytest.param(ENSEMBLE_SMOOTHER_MODE, "target", id=f"{ENSEMBLE_SMOOTHER_MODE}"), - pytest.param( - ITERATIVE_ENSEMBLE_SMOOTHER_MODE, - "iter-%d", - id=f"{ITERATIVE_ENSEMBLE_SMOOTHER_MODE}", - ), - pytest.param(ES_MDA_MODE, "iter-%d", id=f"{ES_MDA_MODE}"), - ], -) -@pytest.mark.integration_test -def test_cli_does_not_run_without_observations(tmpdir, source_root, mode, target): - shutil.copytree( - os.path.join(source_root, "test-data", "poly_example"), - os.path.join(str(tmpdir), "poly_example"), - ) - - def remove_linestartswith(file_name: str, startswith: str): - lines = Path(file_name).read_text(encoding="utf-8").split("\n") - lines = [line for line in lines if not line.startswith(startswith)] - Path(file_name).write_text("\n".join(lines), encoding="utf-8") - - with tmpdir.as_cwd(): - # Remove observations from config file - remove_linestartswith("poly_example/poly.ert", "OBS_CONFIG") - - parser = ArgumentParser(prog="test_main") - parsed = ert_parser( - parser, - [ - mode, - "--target-case", - target, - "poly_example/poly.ert", - ], - ) - with pytest.raises( - ErtCliError, match=f"To run {mode}, observations are needed." - ): - run_cli(parsed) - - -@pytest.mark.integration_test -def test_ensemble_evaluator_disable_monitoring(tmpdir, source_root): - shutil.copytree( - os.path.join(source_root, "test-data", "poly_example"), - os.path.join(str(tmpdir), "poly_example"), - ) - - with tmpdir.as_cwd(): - parser = ArgumentParser(prog="test_main") - parsed = ert_parser( - parser, - [ - ENSEMBLE_SMOOTHER_MODE, - "--disable-monitoring", - "--target-case", - "poly_runpath_file", - "--realizations", - "1,2,4,8,16,32,64", - "poly_example/poly.ert", - ], - ) - FeatureToggling.update_from_args(parsed) - - run_cli(parsed) - FeatureToggling.reset() - - -@pytest.mark.integration_test -def test_cli_test_run(tmpdir, source_root, mock_cli_run): - shutil.copytree( - os.path.join(source_root, "test-data", "poly_example"), - os.path.join(str(tmpdir), "poly_example"), - ) - - with tmpdir.as_cwd(): - parser = ArgumentParser(prog="test_main") - parsed = ert_parser(parser, [TEST_RUN_MODE, "poly_example/poly.ert"]) - run_cli(parsed) - - monitor_mock, thread_join_mock, thread_start_mock = mock_cli_run - monitor_mock.assert_called_once() - thread_join_mock.assert_called_once() - thread_start_mock.assert_has_calls([[call(), call()]]) - - -@pytest.mark.integration_test -def test_ies(tmpdir, source_root): - shutil.copytree( - os.path.join(source_root, "test-data", "poly_example"), - os.path.join(str(tmpdir), "poly_example"), - ) - - with tmpdir.as_cwd(): - parser = ArgumentParser(prog="test_main") - parsed = ert_parser( - parser, - [ - ITERATIVE_ENSEMBLE_SMOOTHER_MODE, - "--target-case", - "iter-%d", - "--realizations", - "1,2,4,8,16", - "poly_example/poly.ert", - ], - ) - FeatureToggling.update_from_args(parsed) - - run_cli(parsed) - FeatureToggling.reset() - - -@pytest.mark.integration_test -def test_that_running_ies_with_different_steplength_produces_different_result( - tmpdir, source_root -): - """This is a regression test to make sure that different step-lengths - give different results when running SIES. - """ - shutil.copytree( - os.path.join(source_root, "test-data", "poly_example"), - os.path.join(str(tmpdir), "poly_example"), - ) - - def _run(target): - parser = ArgumentParser(prog="test_main") - parsed = ert_parser( - parser, - [ - ITERATIVE_ENSEMBLE_SMOOTHER_MODE, - "--target-case", - f"{target}-%d", - "--realizations", - "1,2,4,8", - "poly_example/poly.ert", - "--num-iterations", - "1", - ], - ) - run_cli(parsed) - facade = LibresFacade.from_config_file("poly.ert") - - with open_storage(facade.enspath) as storage: - iter_0_fs = storage.get_ensemble_by_name(f"{target}-0") - df_iter_0 = facade.load_all_gen_kw_data(iter_0_fs) - iter_1_fs = storage.get_ensemble_by_name(f"{target}-1") - df_iter_1 = facade.load_all_gen_kw_data(iter_1_fs) - - result = pd.concat( - [df_iter_0, df_iter_1], - keys=["iter-0", "iter-1"], - ) - return result - - # Run SIES with step-lengths defined - with tmpdir.as_cwd(): - with open("poly_example/poly.ert", mode="a", encoding="utf-8") as fh: - fh.write( - dedent( - """ - RANDOM_SEED 123456 - ANALYSIS_SET_VAR IES_ENKF IES_MAX_STEPLENGTH 0.5 - ANALYSIS_SET_VAR IES_ENKF IES_MIN_STEPLENGTH 0.2 - ANALYSIS_SET_VAR IES_ENKF IES_DEC_STEPLENGTH 2.5 - """ - ) - ) - - result_1 = _run("target_result_1") - - # Run SIES with different step-lengths defined - with tmpdir.as_cwd(): - with open("poly_example/poly.ert", mode="a", encoding="utf-8") as fh: - fh.write( - dedent( - """ - ANALYSIS_SET_VAR IES_ENKF IES_MAX_STEPLENGTH 0.6 - ANALYSIS_SET_VAR IES_ENKF IES_MIN_STEPLENGTH 0.3 - ANALYSIS_SET_VAR IES_ENKF IES_DEC_STEPLENGTH 2.0 - """ - ) - ) - - result_2 = _run("target_result_2") - - # Prior should be the same - assert result_1.loc["iter-0"].equals(result_2.loc["iter-0"]) - - # Posterior should be different - assert not np.isclose(result_1.loc["iter-1"], result_2.loc["iter-1"]).all() @pytest.mark.filterwarnings("ignore::ert.config.ConfigWarning") @@ -365,82 +41,6 @@ def test_bad_config_error_message(tmp_path): run_cli(parsed) -@pytest.mark.integration_test -@pytest.mark.parametrize( - "prior_mask,reals_rerun_option,should_resample", - [ - pytest.param( - range(5), "0-4", False, id="All realisations first, subset second run" - ), - pytest.param( - [1, 2, 3, 4], - "2-3", - False, - id="Subset of realisation first run, subs-subset second run", - ), - pytest.param( - [0, 1, 2], - "0-5", - True, - id="Subset of realisation first, superset in second run - must resample", - ), - ], -) -def test_that_prior_is_not_overwritten_in_ensemble_experiment( - prior_mask, - reals_rerun_option, - should_resample, - tmpdir, - source_root, -): - shutil.copytree( - os.path.join(source_root, "test-data", "poly_example"), - os.path.join(str(tmpdir), "poly_example"), - ) - - with tmpdir.as_cwd(): - ert_config = ErtConfig.from_file("poly_example/poly.ert") - num_realizations = ert_config.model_config.num_realizations - storage = open_storage(ert_config.ens_path, mode="w") - experiment_id = storage.create_experiment( - ert_config.ensemble_config.parameter_configuration - ) - ensemble = storage.create_ensemble( - experiment_id, name="iter-0", ensemble_size=num_realizations - ) - sample_prior(ensemble, prior_mask) - prior_values = storage.get_ensemble(ensemble.id).load_parameters("COEFFS")[ - "values" - ] - storage.close() - - parser = ArgumentParser(prog="test_main") - parsed = ert_parser( - parser, - [ - ENSEMBLE_EXPERIMENT_MODE, - "poly_example/poly.ert", - "--current-case=iter-0", - "--realizations", - reals_rerun_option, - ], - ) - - FeatureToggling.update_from_args(parsed) - run_cli(parsed) - storage = open_storage(ert_config.ens_path, mode="w") - parameter_values = storage.get_ensemble(ensemble.id).load_parameters("COEFFS")[ - "values" - ] - - if should_resample: - with pytest.raises(AssertionError): - np.testing.assert_array_equal(parameter_values, prior_values) - else: - np.testing.assert_array_equal(parameter_values, prior_values) - storage.close() - - @pytest.mark.parametrize( "mode", [ @@ -693,39 +293,6 @@ def test_that_the_model_warns_when_active_realizations_less_min_realizations( run_cli(parsed) -@pytest.mark.integration_test -@pytest.mark.usefixtures("copy_poly_case") -def test_failing_job_cli_error_message(): - # modify poly_eval.py - with open("poly_eval.py", mode="a", encoding="utf-8") as poly_script: - poly_script.writelines([" raise RuntimeError('Argh')"]) - - args = Mock() - args.config = "poly_high_min_reals.ert" - parser = ArgumentParser(prog="test_main") - - parser = ArgumentParser(prog="test_main") - parsed = ert_parser( - parser, - [TEST_RUN_MODE, "poly.ert"], - ) - expected_substrings = [ - "Realization: 0 failed after reaching max submit (2)", - "job poly_eval failed", - "Process exited with status code 1", - "Traceback", - "raise RuntimeError('Argh')", - "RuntimeError: Argh", - ] - try: - run_cli(parsed) - except ErtCliError as error: - for substring in expected_substrings: - assert substring in f"{error}" - else: - pytest.fail(msg="Expected run cli to raise ErtCliError!") - - @pytest.fixture def setenv_config(tmp_path): config = tmp_path / "test.ert" diff --git a/tests/unit_tests/cli/test_model_factory.py b/tests/unit_tests/cli/test_model_factory.py index 66ddc960534..94503720815 100644 --- a/tests/unit_tests/cli/test_model_factory.py +++ b/tests/unit_tests/cli/test_model_factory.py @@ -6,7 +6,6 @@ import pytest from ert.cli import model_factory -from ert.enkf_main import EnKFMain from ert.libres_facade import LibresFacade from ert.run_models import ( EnsembleExperiment, diff --git a/tests/unit_tests/shared/share/test_shell.py b/tests/unit_tests/shared/share/test_shell.py index 33054f2b0f9..dbc89d138c8 100644 --- a/tests/unit_tests/shared/share/test_shell.py +++ b/tests/unit_tests/shared/share/test_shell.py @@ -373,47 +373,6 @@ def test_careful_copy_file(shell): assert os.path.isfile("file3") -@pytest.mark.integration_test -def test_shell_scripts_integration(tmpdir): - """ - The following test is a regression test that - checks that the scripts under src/ert/shared/share/ert/shell_scripts - are not broken, and correctly installed through site-config. - """ - with tmpdir.as_cwd(): - ert_config_fname = "test.ert" - with open(ert_config_fname, "w", encoding="utf-8") as file_h: - file_h.write( - """ -RUNPATH realization-/iter- -JOBNAME TEST -QUEUE_SYSTEM LOCAL -NUM_REALIZATIONS 1 -FORWARD_MODEL COPY_FILE(=/file.txt, =copied.txt) -FORWARD_MODEL COPY_FILE(=/file.txt, =copied2.txt) -FORWARD_MODEL CAREFUL_COPY_FILE(=/file.txt, =copied3.txt) -FORWARD_MODEL MOVE_FILE(=copied.txt, =moved.txt) -FORWARD_MODEL DELETE_FILE(=copied2.txt) -FORWARD_MODEL MAKE_DIRECTORY(=mydir) -FORWARD_MODEL COPY_DIRECTORY(=mydir, =mydir2) -FORWARD_MODEL DELETE_DIRECTORY(=mydir) -""" - ) - - with open("file.txt", "w", encoding="utf-8") as file_h: - file_h.write("something") - - subprocess.run(["ert", "test_run", ert_config_fname], check=True) - - with open("realization-0/iter-0/moved.txt", encoding="utf-8") as output_file: - assert output_file.read() == "something" - assert not os.path.exists("realization-0/iter-0/copied.txt") - assert not os.path.exists("realization-0/iter-0/copied2.txt") - assert os.path.exists("realization-0/iter-0/copied3.txt") - assert not os.path.exists("realization-0/iter-0/mydir") - assert os.path.exists("realization-0/iter-0/mydir2") - - @pytest.fixture def minimal_case(tmpdir): with tmpdir.as_cwd(): diff --git a/tests/unit_tests/storage/test_field_parameter.py b/tests/unit_tests/storage/test_field_parameter.py index 12b10e3ef8a..0a303c5b395 100644 --- a/tests/unit_tests/storage/test_field_parameter.py +++ b/tests/unit_tests/storage/test_field_parameter.py @@ -1,7 +1,4 @@ import math -import os -import stat -from argparse import ArgumentParser from pathlib import Path from textwrap import dedent from typing import Optional @@ -16,9 +13,6 @@ from resdata.grid import GridGenerator from resdata.resfile import ResdataKW -from ert.__main__ import ert_parser -from ert.cli import ENSEMBLE_SMOOTHER_MODE -from ert.cli.main import run_cli from ert.config import ErtConfig, Field, SummaryConfig from ert.enkf_main import create_run_path, ensemble_context, sample_prior from ert.field_utils import Shape @@ -441,290 +435,6 @@ def test_forward_init(storage, tmpdir, config_str, expect_forward_init): assert len(arr.values.ravel()) == 16 -@pytest.mark.integration_test -def test_field_param_update(tmpdir): - """ - This replicates the poly example, only it uses FIELD parameter - """ - with tmpdir.as_cwd(): - config = dedent( - """ - NUM_REALIZATIONS 5 - OBS_CONFIG observations - - FIELD MY_PARAM PARAMETER my_param.grdecl INIT_FILES:my_param.grdecl FORWARD_INIT:True - GRID MY_EGRID.EGRID - - GEN_DATA MY_RESPONSE RESULT_FILE:gen_data_%d.out REPORT_STEPS:0 INPUT_FORMAT:ASCII - INSTALL_JOB poly_eval POLY_EVAL - SIMULATION_JOB poly_eval - """ - ) - with open("config.ert", "w", encoding="utf-8") as fh: - fh.writelines(config) - - NCOL = 5 - NROW = 4 - NLAY = 1 - grid = xtgeo.create_box_grid(dimension=(NCOL, NROW, NLAY)) - grid.to_file("MY_EGRID.EGRID", "egrid") - - with open("forward_model", "w", encoding="utf-8") as f: - f.write( - dedent( - """#!/usr/bin/env python -import xtgeo -import numpy as np -import os - -if __name__ == "__main__": - if not os.path.exists("my_param.grdecl"): - values = np.random.standard_normal(5*4) - with open("my_param.grdecl", "w") as fout: - fout.write("MY_PARAM\\n") - fout.write(" ".join([str(val) for val in values]) + " /\\n") - with open("my_param.grdecl", "r") as fin: - for line_nr, line in enumerate(fin): - if line_nr == 1: - a, b, c, *_ = line.split() - - output = [float(a) * x**2 + float(b) * x + float(c) for x in range(10)] - with open("gen_data_0.out", "w", encoding="utf-8") as f: - f.write("\\n".join(map(str, output))) - """ - ) - ) - os.chmod( - "forward_model", - os.stat("forward_model").st_mode - | stat.S_IXUSR - | stat.S_IXGRP - | stat.S_IXOTH, - ) - with open("POLY_EVAL", "w", encoding="utf-8") as fout: - fout.write("EXECUTABLE forward_model") - with open("observations", "w", encoding="utf-8") as fout: - fout.write( - dedent( - """ - GENERAL_OBSERVATION MY_OBS { - DATA = MY_RESPONSE; - INDEX_LIST = 0,2,4,6,8; - RESTART = 0; - OBS_FILE = obs.txt; - };""" - ) - ) - - with open("obs.txt", "w", encoding="utf-8") as fobs: - fobs.write( - dedent( - """ - 2.1457049781272213 0.6 - 8.769219841380755 1.4 - 12.388014786122742 3.0 - 25.600464531354252 5.4 - 42.35204755970952 8.6""" - ) - ) - - parser = ArgumentParser(prog="test_main") - parsed = ert_parser( - parser, - [ - ENSEMBLE_SMOOTHER_MODE, - "--current-case", - "prior", - "--target-case", - "smoother_update", - "config.ert", - ], - ) - - run_cli(parsed) - config = ErtConfig.from_file("config.ert") - with open_storage(config.ens_path, mode="w") as storage: - prior = storage.get_ensemble_by_name("prior") - posterior = storage.get_ensemble_by_name("smoother_update") - - prior_result = prior.load_parameters("MY_PARAM", list(range(5)))["values"] - assert len(prior_result.x) == NCOL - assert len(prior_result.y) == NROW - assert len(prior_result.z) == NLAY - - posterior_result = posterior.load_parameters("MY_PARAM", list(range(5)))[ - "values" - ] - # Only assert on the first three rows, as there are only three parameters, - # a, b and c, the rest have no correlation to the results. - assert np.linalg.det( - np.cov(prior_result.values.reshape(5, NCOL * NROW * NLAY).T[:3]) - ) > np.linalg.det( - np.cov(posterior_result.values.reshape(5, NCOL * NROW * NLAY).T[:3]) - ) - # This checks that the fields in the runpath are different between iterations - assert Path("simulations/realization-0/iter-0/my_param.grdecl").read_text( - encoding="utf-8" - ) != Path("simulations/realization-0/iter-1/my_param.grdecl").read_text( - encoding="utf-8" - ) - - -@pytest.mark.integration_test -def test_parameter_update_with_inactive_cells_xtgeo_grdecl(tmpdir): - """ - This replicates the poly example, only it uses FIELD parameter - """ - with tmpdir.as_cwd(): - config = dedent( - """ - NUM_REALIZATIONS 5 - OBS_CONFIG observations - FIELD MY_PARAM PARAMETER my_param.grdecl INIT_FILES:my_param.grdecl FORWARD_INIT:True - GRID MY_EGRID.EGRID - GEN_DATA MY_RESPONSE RESULT_FILE:gen_data_%d.out REPORT_STEPS:0 INPUT_FORMAT:ASCII - INSTALL_JOB poly_eval POLY_EVAL - SIMULATION_JOB poly_eval - """ - ) - with open("config.ert", "w", encoding="utf-8") as fh: - fh.writelines(config) - - NCOL = 4 - NROW = 4 - NLAY = 1 - grid = xtgeo.create_box_grid(dimension=(NCOL, NROW, NLAY)) - mask = grid.get_actnum() - mask_list = [True] * 3 + [False] * 12 + [True] - mask.values = mask_list - grid.set_actnum(mask) - grid.to_file("MY_EGRID.EGRID", "egrid") - - with open("forward_model", "w", encoding="utf-8") as f: - f.write( - dedent( - """#!/usr/bin/env python -import xtgeo -import numpy as np -import os -if __name__ == "__main__": - if not os.path.exists("my_param.grdecl"): - values = np.random.standard_normal(4*4) - with open("my_param.grdecl", "w") as fout: - fout.write("MY_PARAM\\n") - fout.write(" ".join([str(val) for val in values]) + " /\\n") - with open("my_param.grdecl", "r") as fin: - for line_nr, line in enumerate(fin): - if line_nr == 1: - a, b, c, *_ = line.split() - output = [float(a) * x**2 + float(b) * x + float(c) for x in range(10)] - with open("gen_data_0.out", "w", encoding="utf-8") as f: - f.write("\\n".join(map(str, output))) - """ - ) - ) - os.chmod( - "forward_model", - os.stat("forward_model").st_mode - | stat.S_IXUSR - | stat.S_IXGRP - | stat.S_IXOTH, - ) - with open("POLY_EVAL", "w", encoding="utf-8") as fout: - fout.write("EXECUTABLE forward_model") - with open("observations", "w", encoding="utf-8") as fout: - fout.write( - dedent( - """ - GENERAL_OBSERVATION MY_OBS { - DATA = MY_RESPONSE; - INDEX_LIST = 0,2,4,6,8; - RESTART = 0; - OBS_FILE = obs.txt; - };""" - ) - ) - - with open("obs.txt", "w", encoding="utf-8") as fobs: - fobs.write( - dedent( - """ - 2.1457049781272213 0.6 - 8.769219841380755 1.4 - 12.388014786122742 3.0 - 25.600464531354252 5.4 - 42.35204755970952 8.6""" - ) - ) - - parser = ArgumentParser(prog="test_main") - parsed = ert_parser( - parser, - [ - ENSEMBLE_SMOOTHER_MODE, - "--current-case", - "prior", - "--target-case", - "smoother_update", - "config.ert", - ], - ) - - run_cli(parsed) - config = ErtConfig.from_file("config.ert") - with open_storage(config.ens_path) as storage: - prior = storage.get_ensemble_by_name("prior") - posterior = storage.get_ensemble_by_name("smoother_update") - - prior_result = prior.load_parameters("MY_PARAM", list(range(5)))["values"] - posterior_result = posterior.load_parameters("MY_PARAM", list(range(5)))[ - "values" - ] - - # check the shape of internal data used in the update - assert prior_result.shape == (5, NCOL, NROW, NLAY) - assert posterior_result.shape == (5, NCOL, NROW, NLAY) - - # Only assert on the first three rows, as there are only three parameters, - # a, b and c, the rest have no correlation to the results. - assert np.linalg.det( - np.cov(prior_result.values.reshape(5, NCOL * NROW * NLAY).T[:3]) - ) > np.linalg.det( - np.cov(posterior_result.values.reshape(5, NCOL * NROW * NLAY).T[:3]) - ) - - # This checks that the fields in the runpath - # are different between iterations - assert Path("simulations/realization-0/iter-0/my_param.grdecl").read_text( - encoding="utf-8" - ) != Path("simulations/realization-0/iter-1/my_param.grdecl").read_text( - encoding="utf-8" - ) - - # check shapre of written data - prop0 = xtgeo.grid_property.gridproperty_from_file( - "simulations/realization-0/iter-0/my_param.grdecl", - fformat="grdecl", - grid=grid, - name="MY_PARAM", - ) - assert len(prop0.get_npvalues1d()) == 16 - numpy.testing.assert_array_equal( - np.logical_not(prop0.values1d.mask), mask_list - ) - - prop1 = xtgeo.grid_property.gridproperty_from_file( - "simulations/realization-0/iter-0/my_param.grdecl", - fformat="grdecl", - grid=grid, - name="MY_PARAM", - ) - assert len(prop1.get_npvalues1d()) == 16 - numpy.testing.assert_array_equal( - np.logical_not(prop1.values1d.mask), mask_list - ) - - @pytest.mark.parametrize( "actnum", [ diff --git a/tests/unit_tests/storage/test_parameter_sample_types.py b/tests/unit_tests/storage/test_parameter_sample_types.py index 51a35892712..91ae155bcc0 100644 --- a/tests/unit_tests/storage/test_parameter_sample_types.py +++ b/tests/unit_tests/storage/test_parameter_sample_types.py @@ -1,34 +1,21 @@ import logging import os -import stat -from argparse import ArgumentParser from contextlib import ExitStack as does_not_raise from hashlib import sha256 -from multiprocessing import Process from pathlib import Path from textwrap import dedent from typing import Optional, Tuple import numpy as np import pytest -import xtgeo -from flaky import flaky from resdata.geometry import Surface -from ert.__main__ import ert_parser -from ert.cli import ENSEMBLE_SMOOTHER_MODE -from ert.cli.main import run_cli -from ert.config import ConfigValidationError, ErtConfig, GenKwConfig +from ert.config import ErtConfig, GenKwConfig from ert.enkf_main import create_run_path, ensemble_context, sample_prior from ert.libres_facade import LibresFacade from ert.storage import EnsembleAccessor, open_storage -def write_file(fname, contents): - with open(fname, mode="w", encoding="utf-8") as fout: - fout.writelines(contents) - - def create_runpath( storage, config, @@ -82,194 +69,6 @@ def storage(tmp_path): yield storage -@pytest.mark.integration_test -@pytest.mark.parametrize( - "config_str, expected, extra_files, expectation", - [ - ( - "GEN_KW KW_NAME template.txt kw.txt prior.txt\nRANDOM_SEED 1234", - "MY_KEYWORD -0.881423", - [], - does_not_raise(), - ), - ( - "GEN_KW KW_NAME template.txt kw.txt prior.txt INIT_FILES:custom_param%d", - "MY_KEYWORD 1.31", - [("custom_param0", "MY_KEYWORD 1.31")], - does_not_raise(), - ), - ( - "GEN_KW KW_NAME template.txt kw.txt prior.txt INIT_FILES:custom_param%d", - "MY_KEYWORD 1.31", - [("custom_param0", "1.31")], - does_not_raise(), - ), - ( - "GEN_KW KW_NAME template.txt kw.txt prior.txt INIT_FILES:custom_param0", # noqa - "Not expecting a file", - [], - pytest.raises( - ConfigValidationError, match="Loading GEN_KW from files requires %d" - ), - ), - ], -) -def test_gen_kw(storage, tmpdir, config_str, expected, extra_files, expectation): - with tmpdir.as_cwd(): - config = dedent( - """ - JOBNAME my_name%d - NUM_REALIZATIONS 1 - """ - ) - config += config_str - with open("config.ert", mode="w", encoding="utf-8") as fh: - fh.writelines(config) - with open("template.txt", mode="w", encoding="utf-8") as fh: - fh.writelines("MY_KEYWORD ") - with open("prior.txt", mode="w", encoding="utf-8") as fh: - fh.writelines("MY_KEYWORD NORMAL 0 1") - for fname, contents in extra_files: - write_file(fname, contents) - - with expectation: - create_runpath(storage, "config.ert") - assert ( - Path("simulations/realization-0/iter-0/kw.txt").read_text( - encoding="utf-8" - ) - == expected - ) - - -@pytest.mark.integration_test -@pytest.mark.parametrize( - "config_str, expected, extra_files", - [ - pytest.param( - "GEN_KW KW_NAME template.txt kw.txt prior.txt", - "MY_KEYWORD -0.881423\nNOT KEYWORD ", - [["template.txt", "MY_KEYWORD \nNOT KEYWORD "]], - id="Second magic string that should not be replaced", - ), - pytest.param( - "GEN_KW KW_NAME template.txt kw.txt prior.txt", - "MY_KEYWORD -0.881423\n-- if K<=28 then blah blah", - [["template.txt", "MY_KEYWORD \n-- if K<=28 then blah blah"]], - id="Comment in file with <", - ), - pytest.param( - "GEN_KW KW_NAME template.txt kw.txt prior.txt", - "MY_KEYWORD -0.881423\nNR_TWO 0.654691", - [ - ["template.txt", "MY_KEYWORD \nNR_TWO "], - ["prior.txt", "MY_KEYWORD NORMAL 0 1\nNR_TWO NORMAL 0 1"], - ], - id="Two parameters", - ), - ], -) -def test_gen_kw_templating( - storage, - tmpdir, - config_str, - expected, - extra_files, -): - with tmpdir.as_cwd(): - config = dedent( - """ - JOBNAME my_name%d - NUM_REALIZATIONS 1 - RANDOM_SEED 1234 - """ - ) - config += config_str - with open("config.ert", mode="w", encoding="utf-8") as fh: - fh.writelines(config) - with open("prior.txt", mode="w", encoding="utf-8") as fh: - fh.writelines("MY_KEYWORD NORMAL 0 1") - for fname, contents in extra_files: - write_file(fname, contents) - create_runpath(storage, "config.ert") - assert ( - Path("simulations/realization-0/iter-0/kw.txt").read_text(encoding="utf-8") - == expected - ) - - -@pytest.mark.integration_test -@pytest.mark.parametrize( - "relpath", - [ - "somepath/", - # This test was added to show current behaviour for Ert. - # If absolute paths should be possible to be used like this is up for debate. - "/tmp/somepath/", # ert removes leading '/' - ], -) -def test_gen_kw_outfile_will_use_paths(tmpdir, storage, relpath: str): - with tmpdir.as_cwd(): - config = dedent( - f""" - JOBNAME my_name%d - NUM_REALIZATIONS 1 - GEN_KW KW_NAME template.txt {relpath}kw.txt prior.txt - """ - ) - - with open("config.ert", mode="w", encoding="utf-8") as fh: - fh.writelines(config) - with open("template.txt", mode="w", encoding="utf-8") as fh: - fh.writelines("MY_KEYWORD ") - with open("prior.txt", mode="w", encoding="utf-8") as fh: - fh.writelines("MY_KEYWORD NORMAL 0 1") - if relpath.startswith("/"): - relpath = relpath[1:] - create_runpath(storage, "config.ert") - assert os.path.exists(f"simulations/realization-0/iter-0/{relpath}kw.txt") - - -@pytest.mark.integration_test -@pytest.mark.parametrize( - "config_str, expected, extra_files", - [ - ( - "GEN_KW KW_NAME template.txt kw.txt prior.txt INIT_FILES:custom_param%d", - "MY_KEYWORD 1.31\nMY_SECOND_KEYWORD 1.01", - [("custom_param0", "MY_SECOND_KEYWORD 1.01\nMY_KEYWORD 1.31")], - ), - ], -) -def test_that_order_of_input_in_user_input_is_abritrary_for_gen_kw_init_files( - tmpdir, config_str, expected, extra_files, storage -): - with tmpdir.as_cwd(): - config = dedent( - """ - JOBNAME my_name%d - NUM_REALIZATIONS 1 - """ - ) - config += config_str - with open("config.ert", mode="w", encoding="utf-8") as fh: - fh.writelines(config) - with open("template.txt", mode="w", encoding="utf-8") as fh: - fh.writelines( - "MY_KEYWORD \nMY_SECOND_KEYWORD " - ) - with open("prior.txt", mode="w", encoding="utf-8") as fh: - fh.writelines("MY_KEYWORD NORMAL 0 1\nMY_SECOND_KEYWORD NORMAL 0 1") - for fname, contents in extra_files: - write_file(fname, contents) - - create_runpath(storage, "config.ert") - assert ( - Path("simulations/realization-0/iter-0/kw.txt").read_text("utf-8") - == expected - ) - - @pytest.mark.parametrize( "config_str, expect_forward_init, expect_num_loaded, error", [ @@ -371,48 +170,6 @@ def test_surface_param( fs.load_parameters("MY_PARAM", 0)["values"] -@pytest.mark.integration_test -@pytest.mark.parametrize("load_forward_init", [True, False]) -def test_gen_kw_forward_init(tmpdir, storage, load_forward_init): - with tmpdir.as_cwd(): - config = dedent( - """ - JOBNAME my_name%d - NUM_REALIZATIONS 1 - GEN_KW KW_NAME template.txt kw.txt prior.txt """ - f"""FORWARD_INIT:{str(load_forward_init)} INIT_FILES:custom_param%d - """ - ) - with open("config.ert", mode="w", encoding="utf-8") as fh: - fh.writelines(config) - - with open("template.txt", mode="w", encoding="utf-8") as fh: - fh.writelines("MY_KEYWORD ") - with open("prior.txt", mode="w", encoding="utf-8") as fh: - fh.writelines("MY_KEYWORD NORMAL 0 1") - if not load_forward_init: - write_file("custom_param0", "1.31") - - if load_forward_init: - with pytest.raises( - ConfigValidationError, - match=( - "Loading GEN_KW from files created by " - "the forward model is not supported." - ), - ): - create_runpath(storage, "config.ert") - else: - _, fs = create_runpath(storage, "config.ert") - assert Path("simulations/realization-0/iter-0/kw.txt").exists() - value = ( - fs.load_parameters("KW_NAME", 0) - .sel(names="MY_KEYWORD")["values"] - .values - ) - assert value == 1.31 - - @pytest.mark.parametrize( "check_random_seed, expectation", [ @@ -619,301 +376,3 @@ def test_that_sub_sample_maintains_order(tmpdir, storage, mask, expected): .tolist() == expected ) - - -@pytest.mark.integration_test -def test_surface_param_update(tmpdir): - """Full update with a surface parameter, it mirrors the poly example, - except it uses SURFACE instead of GEN_KW. - """ - ensemble_size = 5 - with tmpdir.as_cwd(): - config = f""" -NUM_REALIZATIONS {ensemble_size} -QUEUE_OPTION LOCAL MAX_RUNNING 5 -OBS_CONFIG observations -SURFACE MY_PARAM OUTPUT_FILE:surf.irap INIT_FILES:surf.irap BASE_SURFACE:surf.irap FORWARD_INIT:True -GEN_DATA MY_RESPONSE RESULT_FILE:gen_data_%d.out REPORT_STEPS:0 INPUT_FORMAT:ASCII -INSTALL_JOB poly_eval POLY_EVAL -SIMULATION_JOB poly_eval -""" - base_surface = xtgeo.RegularSurface( - ncol=2, - nrow=3, - xinc=1, - yinc=1, - xori=1, - yori=1, - yflip=1, - rotation=1, - ) - base_surface.to_file("surf.irap", fformat="irap_ascii") - - with open("forward_model", "w", encoding="utf-8") as f: - f.write( - """#!/usr/bin/env python -import os - -import xtgeo -import numpy as np - -if __name__ == "__main__": - if not os.path.exists("surf.irap"): - nx = 2 - ny = 3 - values = np.random.standard_normal(nx * ny) - surf = xtgeo.RegularSurface(ncol=nx, - nrow=ny, - xinc=1, - yinc=1, - rotation=0, - values=values) - surf.to_file("surf.irap", fformat="irap_ascii") - - surf_fs = xtgeo.surface_from_file("surf.irap", fformat="irap_ascii", - dtype=np.float32) - a, b, c, *_ = surf_fs.values.data.ravel() - - output = [a * x**2 + b * x + c for x in range(10)] - - with open("gen_data_0.out", "w", encoding="utf-8") as f: - f.write("\\n".join(map(str, output))) - """ - ) - os.chmod( - "forward_model", - os.stat("forward_model").st_mode - | stat.S_IXUSR - | stat.S_IXGRP - | stat.S_IXOTH, - ) - with open("POLY_EVAL", "w", encoding="utf-8") as fout: - fout.write("EXECUTABLE forward_model") - with open("observations", "w", encoding="utf-8") as fout: - fout.write( - dedent( - """ - GENERAL_OBSERVATION MY_OBS { - DATA = MY_RESPONSE; - INDEX_LIST = 0,2,4,6,8; - RESTART = 0; - OBS_FILE = obs.txt; - };""" - ) - ) - - with open("obs.txt", "w", encoding="utf-8") as fobs: - fobs.write( - dedent( - """ - 2.1457049781272213 0.6 - 8.769219841380755 1.4 - 12.388014786122742 3.0 - 25.600464531354252 5.4 - 42.35204755970952 8.6""" - ) - ) - - with open("config.ert", "w", encoding="utf-8") as fh: - fh.writelines(config) - - parser = ArgumentParser(prog="test_main") - parsed = ert_parser( - parser, - [ - ENSEMBLE_SMOOTHER_MODE, - "--current-case", - "prior", - "--target-case", - "smoother_update", - "config.ert", - ], - ) - - run_cli(parsed) - with open_storage(tmpdir / "storage") as storage: - prior = storage.get_ensemble_by_name("prior") - posterior = storage.get_ensemble_by_name("smoother_update") - prior_param = ( - prior.load_parameters("MY_PARAM", range(5))["values"] - .values.reshape(5, 2 * 3) - .T - ) - posterior_param = ( - posterior.load_parameters("MY_PARAM", range(5))["values"] - .values.reshape(5, 2 * 3) - .T - ) - - assert prior_param.dtype == np.float32 - assert posterior_param.dtype == np.float32 - - assert np.linalg.det(np.cov(prior_param[:3])) > np.linalg.det( - np.cov(posterior_param[:3]) - ) - - realizations_to_test = np.random.choice( - range(ensemble_size), size=2, replace=False - ) - surf = xtgeo.surface_from_file( - f"simulations/realization-{realizations_to_test[0]}/iter-1/surf.irap", - fformat="irap_ascii", - dtype=np.float32, - ) - - assert base_surface.ncol == surf.ncol - assert base_surface.nrow == surf.nrow - assert base_surface.xinc == surf.xinc - assert base_surface.yinc == surf.yinc - assert base_surface.xori == surf.xori - assert base_surface.yori == surf.yori - assert base_surface.yflip == surf.yflip - assert base_surface.rotation == surf.yflip - - surf2 = xtgeo.surface_from_file( - f"simulations/realization-{realizations_to_test[1]}/iter-1/surf.irap", - fformat="irap_ascii", - dtype=np.float32, - ) - - assert not (surf.values == surf2.values).any() - - assert len(prior.load_parameters("MY_PARAM", 0)["values"].x) == 2 - assert len(prior.load_parameters("MY_PARAM", 0)["values"].y) == 3 - - -@pytest.mark.integration_test -@pytest.mark.limit_memory("110 MB") -@flaky(max_runs=5, min_passes=1) -def test_field_param_memory(tmpdir): - with tmpdir.as_cwd(): - # Setup is done in a subprocess so that memray does not pick up the allocations - p = Process(target=create_poly_with_field, args=((2000, 1000, 1), 2)) - p.start() - p.join() # this blocks until the process terminates - - run_poly() - - -def create_poly_with_field(field_dim: Tuple[int, int, int], realisations: int): - """ - This replicates the poly example, only it uses FIELD parameter - """ - grid_size = field_dim[0] * field_dim[1] * field_dim[2] - config = dedent( - f""" - NUM_REALIZATIONS {realisations} - OBS_CONFIG observations - - FIELD MY_PARAM PARAMETER my_param.bgrdecl INIT_FILES:my_param.bgrdecl FORWARD_INIT:True - GRID MY_EGRID.EGRID - - GEN_DATA MY_RESPONSE RESULT_FILE:gen_data_%d.out REPORT_STEPS:0 INPUT_FORMAT:ASCII - INSTALL_JOB poly_eval POLY_EVAL - SIMULATION_JOB poly_eval - """ - ) - with open("config.ert", "w", encoding="utf-8") as fh: - fh.writelines(config) - - grid = xtgeo.create_box_grid(dimension=field_dim) - grid.to_file("MY_EGRID.EGRID", "egrid") - del grid - - with open("forward_model", "w", encoding="utf-8") as f: - f.write( - f"""#!/usr/bin/env python -import numpy as np -import os -import resfo - -if __name__ == "__main__": - if not os.path.exists("my_param.bgrdecl"): - values = np.random.standard_normal({grid_size}) - resfo.write("my_param.bgrdecl", [("MY_PARAM", values)]) - datas = resfo.read("my_param.bgrdecl") - assert datas[0][0] == "MY_PARAM" - a,b,c,*_ = datas[0][1] - - output = [float(a) * x**2 + float(b) * x + float(c) for x in range(10)] - with open("gen_data_0.out", "w", encoding="utf-8") as f: - f.write("\\n".join(map(str, output))) - """ - ) - os.chmod( - "forward_model", - os.stat("forward_model").st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH, - ) - with open("POLY_EVAL", "w", encoding="utf-8") as fout: - fout.write("EXECUTABLE forward_model") - with open("observations", "w", encoding="utf-8") as fout: - fout.write( - dedent( - """ - GENERAL_OBSERVATION MY_OBS { - DATA = MY_RESPONSE; - INDEX_LIST = 0,2,4,6,8; - RESTART = 0; - OBS_FILE = obs.txt; - };""" - ) - ) - - with open("obs.txt", "w", encoding="utf-8") as fobs: - fobs.write( - dedent( - """ - 2.1457049781272213 0.6 - 8.769219841380755 1.4 - 12.388014786122742 3.0 - 25.600464531354252 5.4 - 42.35204755970952 8.6""" - ) - ) - - -def run_poly(): - parser = ArgumentParser(prog="test_main") - parsed = ert_parser( - parser, - [ - ENSEMBLE_SMOOTHER_MODE, - "--current-case", - "prior", - "--target-case", - "smoother_update", - "config.ert", - ], - ) - - run_cli(parsed) - - -@pytest.mark.integration_test -@pytest.mark.parametrize( - "config_str, expected", - [ - ( - "GEN_KW KW_NAME prior.txt\nRANDOM_SEED 1234", - -0.881423, - ), - ], -) -def test_gen_kw_optional_template(storage, tmpdir, config_str, expected): - with tmpdir.as_cwd(): - config = dedent( - """ - JOBNAME my_name%d - NUM_REALIZATIONS 1 - """ - ) - config += config_str - with open("config.ert", mode="w", encoding="utf-8") as fh: - fh.writelines(config) - with open("prior.txt", mode="w", encoding="utf-8") as fh: - fh.writelines("MY_KEYWORD NORMAL 0 1") - - create_runpath(storage, "config.ert") - assert list(storage.ensembles)[0].load_parameters("KW_NAME")[ - "values" - ].values.flatten().tolist() == pytest.approx([expected])