diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 675356f48..fa8ca9ef1 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -123,7 +123,7 @@ jobs: markers: -m 'not slow' os: windows-latest # macOS builds - - tox-env: py310-coverage-extras-lmoments + - tox-env: py310-coverage-extras-lmoments-numpy python-version: "3.10" markers: -m 'not slow' os: macos-latest @@ -135,7 +135,7 @@ jobs: - tox-env: py310-coverage-lmoments # No markers -- includes slow tests python-version: "3.10" os: ubuntu-latest - - tox-env: py311-coverage-extras-sbck + - tox-env: py311-coverage-extras-sbck-numpy python-version: "3.11" markers: -m 'not slow' os: ubuntu-latest diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 957b7106f..3f700e367 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -4,7 +4,7 @@ Changelog v0.52.0 (unreleased) -------------------- -Contributors to this version: David Huard (:user:`huard`), Trevor James Smith (:user:`Zeitsperre`), Hui-Min Wang (:user:`Hem-W`), Éric Dupuis (:user:`coxipi`), Sarah Gammon (:user:`SarahG-579462`). +Contributors to this version: David Huard (:user:`huard`), Trevor James Smith (:user:`Zeitsperre`), Hui-Min Wang (:user:`Hem-W`), Éric Dupuis (:user:`coxipi`), Sarah Gammon (:user:`SarahG-579462`), Pascal Bourgault (:user:`aulemahal`). New features and enhancements ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -12,10 +12,15 @@ New features and enhancements * New multivariate bias adjustment class `MBCn`, giving a faster and more accurate implementation of the 'MBCn' algorithm (:issue:`1551`, :pull:`1580`). * `xclim` is now compatible with `pytest` versions `>=8.0.0`. (:pull:`1632`). +Breaking changes +^^^^^^^^^^^^^^^^ +* As updated in ``cf_xarray>=0.9.3``, dimensionless quantities now use the "1" units attribute as specified by the CF conventions, previously an empty string was returned. (:pull:`1814`). + Bug fixes ^^^^^^^^^ * Fixed the indexer bug in the ``xclim.indices.standardized_index_fit_params`` when multiple or non-array indexers are specified and fitted parameters are reloaded from netCDF. (:issue:`1842`, :pull:`1843`). * Addressed a bug found in ``wet_spell_*`` indicators that was contributing to erroneous results. A new generic spell length statistic function ``xclim.indices.generic.spell_length_statistics`` is now used in wet and dry spells indicators. (:issue:`1834`, :pull:`1838`). +* Syntax for ``nan`` and ``inf`` was adapted to support ``numpy>=2.0.0``. (:pull:`1814`, :issue:`1785`). Internal changes ^^^^^^^^^^^^^^^^ diff --git a/docs/notebooks/sdba-advanced.ipynb b/docs/notebooks/sdba-advanced.ipynb index a14e07402..97456b6bd 100644 --- a/docs/notebooks/sdba-advanced.ipynb +++ b/docs/notebooks/sdba-advanced.ipynb @@ -271,7 +271,9 @@ "metadata": {}, "outputs": [], "source": [ - "QDM.ds.to_netcdf(\"QDM_training.nc\")\n", + "# The engine keyword is only needed if netCDF4 is not available\n", + "# The training dataset contains some attributes with types not supported by scipy/netCDF3\n", + "QDM.ds.to_netcdf(\"QDM_training.nc\", engine=\"h5netcdf\")\n", "ds = xr.open_dataset(\"QDM_training.nc\")\n", "QDM2 = QuantileDeltaMapping.from_dataset(ds)\n", "QDM2" @@ -292,7 +294,7 @@ "metadata": {}, "outputs": [], "source": [ - "QDM.ds.to_netcdf(\"QDM_training2.nc\")\n", + "QDM.ds.to_netcdf(\"QDM_training2.nc\", engine=\"h5netcdf\")\n", "ds = xr.open_dataset(\"QDM_training2.nc\")\n", "ds.attrs[\"title\"] = \"This is the dataset, but read from disk.\"\n", "QDM.set_dataset(ds)\n", @@ -878,7 +880,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.11.4" + "version": "3.12.4" }, "toc": { "base_numbering": 1, diff --git a/docs/notebooks/sdba.ipynb b/docs/notebooks/sdba.ipynb index d1030788d..6bcc5c4f8 100644 --- a/docs/notebooks/sdba.ipynb +++ b/docs/notebooks/sdba.ipynb @@ -360,7 +360,7 @@ "outputs": [], "source": [ "# We are using xarray's \"air_temperature\" dataset\n", - "ds = xr.tutorial.open_dataset(\"air_temperature\")" + "ds = xr.tutorial.load_dataset(\"air_temperature\")" ] }, { @@ -598,7 +598,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.12.3" + "version": "3.12.4" }, "toc": { "base_numbering": 1, diff --git a/environment.yml b/environment.yml index 07254ac3b..918737e7f 100644 --- a/environment.yml +++ b/environment.yml @@ -1,6 +1,5 @@ name: xclim channels: - - numba # Added to gain access to Python3.12-compatible numba release candidates. - conda-forge - defaults dependencies: @@ -13,9 +12,9 @@ dependencies: - dask >=2.6.0 - jsonpickle - numba - - numpy >=1.20.0,<2.0.0 + - numpy >=1.20.0 - pandas >=2.2.0 - - pint >=0.10,<0.24 + - pint >=0.18.0 - poppler >=0.67 - pyarrow # Strongly encouraged for Pandas v2.2.0+ - pyyaml @@ -53,7 +52,6 @@ dependencies: - nbsphinx - nbval >=0.11.0 - nc-time-axis - - netCDF4 >=1.4,<1.7 - notebook - pandas-stubs - platformdirs diff --git a/pyproject.toml b/pyproject.toml index 79951abf0..93febe5ac 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -41,10 +41,10 @@ dependencies = [ "filelock", "jsonpickle", "numba", - "numpy>=1.20.0,<2.0.0", + "numpy>=1.20.0", "packaging", "pandas>=2.2", - "pint>=0.10,<0.24", + "pint>=0.18", "platformdirs >=3.2", "pyarrow", # Strongly encouraged for pandas v2.2.0+ "pyyaml", @@ -74,7 +74,6 @@ dev = [ "nbconvert <7.14", # Pinned due to directive errors in sphinx. See: https://github.com/jupyter/nbconvert/issues/2092 "nbqa >=1.8.2", "nbval >=0.11.0", - "netCDF4 >=1.4,<1.7", "pandas-stubs >=2.2", "platformdirs >=3.2", "pooch", @@ -181,7 +180,7 @@ pep621_dev_dependency_groups = ["all", "dev", "docs"] [tool.deptry.per_rule_ignores] DEP001 = ["SBCK"] -DEP002 = ["bottleneck", "pyarrow"] +DEP002 = ["bottleneck", "h5netcdf", "pyarrow"] DEP004 = ["matplotlib", "pytest", "pytest_socket"] [tool.flit.sdist] diff --git a/tests/test_atmos.py b/tests/test_atmos.py index adbd7b224..d705e7493 100644 --- a/tests/test_atmos.py +++ b/tests/test_atmos.py @@ -4,6 +4,8 @@ import numpy as np import xarray as xr +from cf_xarray import __version__ as __cfxr_version__ +from packaging.version import Version from xclim import atmos, set_options @@ -260,7 +262,11 @@ def test_wind_profile(atmosds): def test_wind_power_potential(atmosds): out = atmos.wind_power_potential(wind_speed=atmosds.sfcWind) - assert out.attrs["units"] == "" + + if Version(__cfxr_version__) < Version("0.9.3"): + assert out.attrs["units"] == "" + else: + assert out.attrs["units"] == "1" assert (out >= 0).all() assert (out <= 1).all() diff --git a/tests/test_calendar.py b/tests/test_calendar.py index 31a3849a0..58b8d5857 100644 --- a/tests/test_calendar.py +++ b/tests/test_calendar.py @@ -281,7 +281,7 @@ def test_convert_calendar_and_doy(): out = convert_doy(doy, target_cal="360_day", align_on="date").convert_calendar( "360_day", align_on="date" ) - np.testing.assert_array_equal(out, [np.NaN, 31, 332, 360.23, np.NaN]) + np.testing.assert_array_equal(out, [np.nan, 31, 332, 360.23, np.nan]) assert out.time.dt.calendar == "360_day" @@ -456,7 +456,7 @@ def test_convert_doy(): ) out = convert_doy(doy, "360_day", align_on="date") - np.testing.assert_array_equal(out, [np.NaN, 31, 332, 360.23, np.NaN]) + np.testing.assert_array_equal(out, [np.nan, 31, 332, 360.23, np.nan]) assert out.time.dt.calendar == "noleap" out = convert_doy(doy, "360_day", align_on="year") np.testing.assert_allclose( @@ -475,7 +475,7 @@ def test_convert_doy(): ).expand_dims(lat=[10, 20, 30]) out = convert_doy(doy, "noleap", align_on="date") - np.testing.assert_array_equal(out.isel(lat=0), [31, 200.48, 190, np.NaN, 299.54]) + np.testing.assert_array_equal(out.isel(lat=0), [31, 200.48, 190, np.nan, 299.54]) out = convert_doy(doy, "noleap", align_on="year") np.testing.assert_allclose( out.isel(lat=0), [31.0, 200.48, 190.0, 59.83607, 299.71885] diff --git a/tests/test_cli.py b/tests/test_cli.py index 966308e74..7c18a04c5 100644 --- a/tests/test_cli.py +++ b/tests/test_cli.py @@ -89,7 +89,7 @@ def test_normal_computation( input_file = tmp_path / "in.nc" output_file = tmp_path / "out.nc" - ds.to_netcdf(input_file) + ds.to_netcdf(input_file, engine="h5netcdf") args = ["-i", str(input_file), "-o", str(output_file), "-v", indicator] runner = CliRunner() @@ -136,7 +136,7 @@ def test_multi_output(tmp_path, open_dataset): ds = open_dataset("ERA5/daily_surface_cancities_1990-1993.nc") input_file = tmp_path / "ws_in.nc" output_file = tmp_path / "out.nc" - ds.to_netcdf(input_file) + ds.to_netcdf(input_file, engine="h5netcdf") runner = CliRunner() results = runner.invoke( @@ -216,7 +216,7 @@ def test_missing_variable(tas_series, tmp_path): input_file = tmp_path / "tas.nc" output_file = tmp_path / "out.nc" - tas.to_netcdf(input_file) + tas.to_netcdf(input_file, engine="h5netcdf") runner = CliRunner() results = runner.invoke( @@ -243,7 +243,7 @@ def test_global_options(tas_series, tmp_path, options, output): input_file = tmp_path / "tas.nc" output_file = tmp_path / "out.nc" - tas.to_netcdf(input_file) + tas.to_netcdf(input_file, engine="h5netcdf") runner = CliRunner() results = runner.invoke( @@ -283,7 +283,7 @@ def test_dataflags_output(tmp_path, tas_series, tasmax_series, tasmin_series): arr = series(vals, start="1971-01-01") ds = xr.merge([ds, arr]) input_file = tmp_path / "ws_in.nc" - ds.to_netcdf(input_file) + ds.to_netcdf(input_file, engine="h5netcdf") runner = CliRunner() results = runner.invoke( @@ -303,7 +303,7 @@ def test_bad_usage(tas_series, tmp_path): input_file = tmp_path / "tas.nc" output_file = tmp_path / "out.nc" - tas.to_netcdf(input_file) + tas.to_netcdf(input_file, engine="h5netcdf") runner = CliRunner() diff --git a/tests/test_ensembles.py b/tests/test_ensembles.py index 4db62df9b..79033b441 100644 --- a/tests/test_ensembles.py +++ b/tests/test_ensembles.py @@ -79,7 +79,7 @@ def test_no_time(self, tmp_path, ensemble_dataset_objects, open_dataset): ds["time"] = xr.decode_cf(ds).time ds_all.append(ds.groupby(ds.time.dt.month).mean("time", keep_attrs=True)) ds.groupby(ds.time.dt.month).mean("time", keep_attrs=True).to_netcdf( - f1.joinpath(Path(n).name) + f1.joinpath(Path(n).name), engine="h5netcdf" ) ens = ensembles.create_ensemble(ds_all) @@ -288,8 +288,14 @@ def test_calc_mean_std_min_max(self, ensemble_dataset_objects, open_dataset): ) out2 = ensembles.ensemble_mean_std_max_min(ens, weights=weights) values = ens["tg_mean"][:, 0, 5, 5] + # Explicit float64 so numpy does the expected datatype promotion (change in numpy 2) np.testing.assert_array_equal( - (values[0] * 1 + values[1] * 0.1 + values[2] * 3.5 + values[3] * 5) + ( + values[0] * np.float64(1) + + values[1] * np.float64(0.1) + + values[2] * np.float64(3.5) + + values[3] * np.float64(5) + ) / np.sum(weights), out2.tg_mean_mean[0, 5, 5], ) @@ -311,7 +317,7 @@ def test_stats_min_members(self, ensemble_dataset_objects, open_dataset, aggfunc ds_all = [open_dataset(n) for n in ensemble_dataset_objects["nc_files_simple"]] ens = ensembles.create_ensemble(ds_all).isel(lat=0, lon=0) ens = ens.where(ens.realization > 0) - ens = xr.where((ens.realization == 1) & (ens.time.dt.year == 1950), np.NaN, ens) + ens = xr.where((ens.realization == 1) & (ens.time.dt.year == 1950), np.nan, ens) def first(ds): return ds[list(ds.data_vars.keys())[0]] diff --git a/tests/test_generic.py b/tests/test_generic.py index c1eab6bf2..ef129121a 100644 --- a/tests/test_generic.py +++ b/tests/test_generic.py @@ -5,6 +5,8 @@ import numpy as np import pytest import xarray as xr +from cf_xarray import __version__ as __cfxr_version__ +from packaging.version import Version from xclim.core.calendar import doy_to_days_since, select_time from xclim.indices import generic @@ -101,7 +103,11 @@ def test_doyminmax(self, q_series): for da in [dmx, dmn]: for attr in ["units", "is_dayofyear", "calendar"]: assert attr in da.attrs.keys() - assert da.attrs["units"] == "" + + if Version(__cfxr_version__) < Version("0.9.3"): + assert da.attrs["units"] == "" + else: + assert da.attrs["units"] == "1" assert da.attrs["is_dayofyear"] == 1 @@ -393,13 +399,13 @@ def test_generic_forbidden_op(self, pr_series): class TestGetDailyEvents: def test_simple(self, tas_series): - arr = xr.DataArray(np.array([-10, 15, 20, np.NaN, 10]), name="Stuff") + arr = xr.DataArray(np.array([-10, 15, 20, np.nan, 10]), name="Stuff") out = generic.get_daily_events(arr, threshold=10, op=">=") assert out.name == "events" assert out.sum() == 3 - np.testing.assert_array_equal(out, [0, 1, 1, np.NaN, 1]) + np.testing.assert_array_equal(out, [0, 1, 1, np.nan, 1]) class TestGenericCountingIndices: @@ -470,7 +476,7 @@ def test_count_occurrences(self, tas_series, op, constrain, expected, should_fai @pytest.mark.parametrize( "op, constrain, expected, should_fail", [ - ("<", None, np.NaN, False), + ("<", None, np.nan, False), ("<=", None, 3, False), ("!=", ("!=",), 1, False), ("==", ("==", "!="), 3, False), @@ -497,7 +503,7 @@ def test_first_occurrence(self, tas_series, op, constrain, expected, should_fail @pytest.mark.parametrize( "op, constrain, expected, should_fail", [ - ("<", None, np.NaN, False), + ("<", None, np.nan, False), ("<=", None, 8, False), ("!=", ("!=",), 9, False), ("==", ("==", "!="), 8, False), diff --git a/tests/test_hydrology.py b/tests/test_hydrology.py index e31c64ffc..b2276bda7 100644 --- a/tests/test_hydrology.py +++ b/tests/test_hydrology.py @@ -1,6 +1,8 @@ from __future__ import annotations import numpy as np +from cf_xarray import __version__ as __cfxr_version__ +from packaging.version import Version from xclim import indices as xci @@ -40,7 +42,11 @@ def test_simple(self, snw_series): snw = snw_series(a, start="1999-01-01") out = xci.snw_max_doy(snw, freq="YS") np.testing.assert_array_equal(out, [11, np.nan]) - assert out.units == "" + + if Version(__cfxr_version__) < Version("0.9.3"): + assert out.attrs["units"] == "" + else: + assert out.attrs["units"] == "1" class TestSnowMeltWEMax: diff --git a/tests/test_indicators.py b/tests/test_indicators.py index 1395f214c..4ca95ea2f 100644 --- a/tests/test_indicators.py +++ b/tests/test_indicators.py @@ -848,7 +848,7 @@ def test_resampling_indicator_with_indexing(tas_series): out = xclim.atmos.tx_days_above( tas, thresh="0 degC", freq="YS-JUL", doy_bounds=(1, 50) ) - np.testing.assert_allclose(out, [50, 50, np.NaN]) + np.testing.assert_allclose(out, [50, 50, np.nan]) out = xclim.atmos.tx_days_above( tas, thresh="0 degC", freq="YS", date_bounds=("02-29", "04-01") diff --git a/tests/test_indices.py b/tests/test_indices.py index 90fb45b3e..b635b27db 100644 --- a/tests/test_indices.py +++ b/tests/test_indices.py @@ -19,6 +19,9 @@ import pandas as pd import pytest import xarray as xr +from cf_xarray import __version__ as __cfxr_version__ +from packaging.version import Version +from pint import __version__ as __pint_version__ from xclim import indices as xci from xclim.core.calendar import percentile_doy @@ -137,11 +140,19 @@ def test_simple(self, tas_series): out = xci.cold_spell_frequency(da, thresh="-10. C", freq="ME") np.testing.assert_array_equal(out, [1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0]) - assert out.units == "" + + if Version(__cfxr_version__) < Version("0.9.3"): + assert out.units == "" + else: + assert out.units == "1" out = xci.cold_spell_frequency(da, thresh="-10. C", freq="YS") np.testing.assert_array_equal(out, 3) - assert out.units == "" + + if Version(__cfxr_version__) < Version("0.9.3"): + assert out.units == "" + else: + assert out.units == "1" class TestColdSpellMaxLength: @@ -231,7 +242,11 @@ def test_no_cdd(self, tas_series): a = tas_series(np.array([10, 15, -5, 18]) + K2C) cdd = xci.cooling_degree_days(a) assert cdd == 0 - assert cdd.units == "K d" + + if Version(__pint_version__) < Version("0.24.1"): + assert cdd.units == "K d" + else: + assert cdd.units == "d K" def test_cdd(self, tas_series): a = tas_series(np.array([20, 25, -15, 19]) + K2C) @@ -454,7 +469,7 @@ def test_effective_growing_degree_days( tasmax=mx, tasmin=mn, method=method, freq="YS" ) - np.testing.assert_array_equal(out, np.array([np.NaN, expected])) + np.testing.assert_array_equal(out, np.array([np.nan, expected])) class TestStandardizedIndices: @@ -530,7 +545,7 @@ class TestStandardizedIndices: 1, "gamma", "ML", - [-0.011698, 1.597031, 0.969714, 0.265561, -0.132654], + [-0.083785, 1.457647, 0.993296, 0.271894, -0.449684], 2e-2, ), ( @@ -538,7 +553,7 @@ class TestStandardizedIndices: 12, "gamma", "ML", - [-0.158116, -0.049222, 0.672544, 1.08332, 0.660903], + [-0.158854, -0.049165, 0.675863, 0.960247, 0.660831], 2e-2, ), ( @@ -546,7 +561,7 @@ class TestStandardizedIndices: 1, "fisk", "ML", - [-0.158949, 1.308225, 0.449846, 0.146699, -0.502737], + [-0.194235, 1.308198, 0.530768, 0.22234, -0.502635], 2e-2, ), ( @@ -596,7 +611,7 @@ def test_standardized_precipitation_index( ): ds = open_dataset("sdba/CanESM2_1950-2100.nc").isel(location=1) if freq == "D": - ds = ds.convert_calendar("366_day", missing=np.NaN) + ds = ds.convert_calendar("366_day", missing=np.nan) # to compare with ``climate_indices`` pr = ds.pr.sel(time=slice("1998", "2000")) pr_cal = ds.pr.sel(time=slice("1950", "1980")) @@ -769,9 +784,9 @@ def test_standardized_index_modularity(self, open_dataset, tmp_path, indexer): # Save the parameters to a file to test against that saving process may modify the netCDF file paramsfile = tmp_path / "params0.nc" - params.to_netcdf(paramsfile) + params.to_netcdf(paramsfile, engine="h5netcdf") params.close() - params = xr.open_dataset(paramsfile).__xarray_dataarray_variable__ + params = open_dataset(paramsfile).__xarray_dataarray_variable__ spei1 = xci.standardized_precipitation_evapotranspiration_index( wb.sel(time=slice("1998", "2000")), params=params @@ -905,7 +920,12 @@ def test_simple(self, tas_series): assert lsf == 180 for attr in ["units", "is_dayofyear", "calendar"]: assert attr in lsf.attrs.keys() - assert lsf.attrs["units"] == "" + + if Version(__cfxr_version__) < Version("0.9.3"): + assert lsf.attrs["units"] == "" + else: + assert lsf.attrs["units"] == "1" + assert lsf.attrs["is_dayofyear"] == 1 assert lsf.attrs["is_dayofyear"].dtype == np.int32 @@ -926,7 +946,12 @@ def test_simple(self, tas_series): assert np.isnan(fdb) for attr in ["units", "is_dayofyear", "calendar"]: assert attr in fdb.attrs.keys() - assert fdb.attrs["units"] == "" + + if Version(__cfxr_version__) < Version("0.9.3"): + assert fdb.attrs["units"] == "" + else: + assert fdb.attrs["units"] == "1" + assert fdb.attrs["is_dayofyear"] == 1 def test_below_forbidden(self, tasmax_series): @@ -957,7 +982,12 @@ def test_simple(self, tas_series): assert np.isnan(fda) for attr in ["units", "is_dayofyear", "calendar"]: assert attr in fda.attrs.keys() - assert fda.attrs["units"] == "" + + if Version(__cfxr_version__) < Version("0.9.3"): + assert fda.attrs["units"] == "" + else: + assert fda.attrs["units"] == "1" + assert fda.attrs["is_dayofyear"] == 1 def test_thresholds(self, tas_series): @@ -982,7 +1012,12 @@ def test_thresholds(self, tas_series): assert out[0] == tg.indexes["time"][30].dayofyear for attr in ["units", "is_dayofyear", "calendar"]: assert attr in out.attrs.keys() - assert out.attrs["units"] == "" + + if Version(__cfxr_version__) < Version("0.9.3"): + assert out.attrs["units"] == "" + else: + assert out.attrs["units"] == "1" + assert out.attrs["is_dayofyear"] == 1 def test_above_forbidden(self, tasmax_series): @@ -1069,7 +1104,12 @@ def test_simple(self, tas_series): assert out[0] == tg.indexes["time"][20].dayofyear for attr in ["units", "is_dayofyear", "calendar"]: assert attr in out.attrs.keys() - assert out.attrs["units"] == "" + + if Version(__cfxr_version__) < Version("0.9.3"): + assert out.attrs["units"] == "" + else: + assert out.attrs["units"] == "1" + assert out.attrs["is_dayofyear"] == 1 def test_no_start(self, tas_series): @@ -1102,7 +1142,7 @@ def test_varying_mid_dates(self, tas_series, d1, d2, mid_date, expected): np.testing.assert_array_equal(gs_end, expected) for attr in ["units", "is_dayofyear", "calendar"]: assert attr in gs_end.attrs.keys() - assert gs_end.attrs["units"] == "" + assert gs_end.attrs["units"] == "1" assert gs_end.attrs["is_dayofyear"] == 1 @@ -1182,7 +1222,12 @@ def test_simple(self, tasmin_series): assert out[0] == tn.indexes["time"][20].dayofyear for attr in ["units", "is_dayofyear", "calendar"]: assert attr in out.attrs.keys() - assert out.attrs["units"] == "" + + if Version(__cfxr_version__) < Version("0.9.3"): + assert out.attrs["units"] == "" + else: + assert out.attrs["units"] == "1" + assert out.attrs["is_dayofyear"] == 1 def test_no_start(self, tasmin_series): @@ -1215,7 +1260,12 @@ def test_varying_mid_dates(self, tasmin_series, d1, d2, mid_date, expected): np.testing.assert_array_equal(gs_end, expected) for attr in ["units", "is_dayofyear", "calendar"]: assert attr in gs_end.attrs.keys() - assert gs_end.attrs["units"] == "" + + if Version(__cfxr_version__) < Version("0.9.3"): + assert gs_end.attrs["units"] == "" + else: + assert gs_end.attrs["units"] == "1" + assert gs_end.attrs["is_dayofyear"] == 1 @@ -2724,7 +2774,12 @@ def test_degree_days_exceedance_date(tas_series): for attr in ["units", "is_dayofyear", "calendar"]: assert attr in out.attrs.keys() - assert out.attrs["units"] == "" + + if Version(__cfxr_version__) < Version("0.9.3"): + assert out.attrs["units"] == "" + else: + assert out.attrs["units"] == "1" + assert out.attrs["is_dayofyear"] == 1 @@ -2764,7 +2819,11 @@ def test_first_snowfall(prsn_series, prsnd_series): assert out[0] == 166 for attr in ["units", "is_dayofyear", "calendar"]: assert attr in out.attrs.keys() - assert out.attrs["units"] == "" + + if Version(__cfxr_version__) < Version("0.9.3"): + assert out.attrs["units"] == "" + else: + assert out.attrs["units"] == "1" assert out.attrs["is_dayofyear"] == 1 # test with prsnd [m s-1] @@ -2773,7 +2832,7 @@ def test_first_snowfall(prsn_series, prsnd_series): assert out[0] == 166 for attr in ["units", "is_dayofyear", "calendar"]: assert attr in out.attrs.keys() - assert out.attrs["units"] == "" + assert out.attrs["units"] == "1" assert out.attrs["is_dayofyear"] == 1 # test with prsn [kg m-2 s-1] @@ -2785,7 +2844,11 @@ def test_first_snowfall(prsn_series, prsnd_series): assert out[0] == 166 for attr in ["units", "is_dayofyear", "calendar"]: assert attr in out.attrs.keys() - assert out.attrs["units"] == "" + + if Version(__cfxr_version__) < Version("0.9.3"): + assert out.attrs["units"] == "" + else: + assert out.attrs["units"] == "1" assert out.attrs["is_dayofyear"] == 1 @@ -2853,7 +2916,7 @@ def test_simple(self, snd_series, snw_series): np.testing.assert_array_equal(out, [0.3, 0.01]) def test_nan_slices(self, snd_series, snw_series): - a = np.ones(366) * np.NaN + a = np.ones(366) * np.nan snd = snd_series(a) snw = snw_series(a) @@ -2878,7 +2941,7 @@ def test_simple(self, snd_series, snw_series): np.testing.assert_array_equal(out, [193, 182]) def test_nan_slices(self, snd_series, snw_series): - a = np.ones(366) * np.NaN + a = np.ones(366) * np.nan snd = snd_series(a) snw = snw_series(a) @@ -2922,7 +2985,12 @@ def test_continous_snow_season_start(self, snd_series, snw_series): np.testing.assert_array_equal(out, [snd.time.dt.dayofyear[0].data + 2, np.nan]) for attr in ["units", "is_dayofyear", "calendar"]: assert attr in out.attrs.keys() - assert out.attrs["units"] == "" + + if Version(__cfxr_version__) < Version("0.9.3"): + assert out.attrs["units"] == "" + else: + assert out.attrs["units"] == "1" + assert out.attrs["is_dayofyear"] == 1 out = xci.snw_season_start(snw) @@ -2930,7 +2998,12 @@ def test_continous_snow_season_start(self, snd_series, snw_series): np.testing.assert_array_equal(out, [snw.time.dt.dayofyear[0].data + 1, np.nan]) for attr in ["units", "is_dayofyear", "calendar"]: assert attr in out.attrs.keys() - assert out.attrs["units"] == "" + + if Version(__cfxr_version__) < Version("0.9.3"): + assert out.attrs["units"] == "" + else: + assert out.attrs["units"] == "1" + assert out.attrs["is_dayofyear"] == 1 def test_snow_season_end(self, snd_series, snw_series): @@ -2952,7 +3025,12 @@ def test_snow_season_end(self, snd_series, snw_series): np.testing.assert_array_equal(out, [(doy + 219) % 366, np.nan]) for attr in ["units", "is_dayofyear", "calendar"]: assert attr in out.attrs.keys() - assert out.attrs["units"] == "" + + if Version(__cfxr_version__) < Version("0.9.3"): + assert out.attrs["units"] == "" + else: + assert out.attrs["units"] == "1" + assert out.attrs["is_dayofyear"] == 1 out = xci.snw_season_end(snw) @@ -2961,7 +3039,7 @@ def test_snow_season_end(self, snd_series, snw_series): np.testing.assert_array_equal(out, [(doy + 219) % 366, np.nan]) for attr in ["units", "is_dayofyear", "calendar"]: assert attr in out.attrs.keys() - assert out.attrs["units"] == "" + assert out.attrs["units"] == "1" assert out.attrs["is_dayofyear"] == 1 @@ -2974,7 +3052,7 @@ def test_snow_season_end(self, snd_series, snw_series): ["per_day", "total"], ) def test_rain_season(pr_series, result_type, method_dry_start): - pr = pr_series(np.arange(365) * np.NaN, start="2000-01-01", units="mm/d") + pr = pr_series(np.arange(365) * np.nan, start="2000-01-01", units="mm/d") # input values in mm (amount): a correcting factor is used below pr[{"time": slice(0, 0 + 3)}] = 10 # to satisfy cond1_start pr[{"time": slice(3, 3 + 30)}] = 5 # to satisfy cond2_start @@ -2983,13 +3061,13 @@ def test_rain_season(pr_series, result_type, method_dry_start): out_exp = [3, 100, 97] elif result_type == "start_cond1_fails": pr[{"time": 2}] = 0 - out_exp = [np.NaN, np.NaN, np.NaN] + out_exp = [np.nan, np.nan, np.nan] elif result_type == "start_cond2_fails": pr[{"time": slice(10, 10 + 7)}] = 0 - out_exp = [np.NaN, np.NaN, np.NaN] + out_exp = [np.nan, np.nan, np.nan] elif result_type == "end_cond_fails": pr[{"time": 99 + 20 - 1}] = 5 - out_exp = [3, np.NaN, 363] + out_exp = [3, np.nan, 363] else: raise ValueError(f"Unknown result_type: {result_type}") @@ -3119,7 +3197,7 @@ def test_wind_chill(tas_series, sfcWind_series): # The calculator was altered to remove the rounding of the output. np.testing.assert_allclose( out, - [-4.509267062481955, -22.619869069856854, -30.478945408950928, np.NaN, -16.443], + [-4.509267062481955, -22.619869069856854, -30.478945408950928, np.nan, -16.443], ) out = xci.wind_chill_index(tas=tas, sfcWind=sfcWind, method="US") @@ -3540,16 +3618,16 @@ def test_simple(self, pr_series, prc_series): [ 2, 0, - np.NaN, + np.nan, 2, - np.NaN, - np.NaN, - np.NaN, - np.NaN, - np.NaN, - np.NaN, - np.NaN, - np.NaN, + np.nan, + np.nan, + np.nan, + np.nan, + np.nan, + np.nan, + np.nan, + np.nan, ], ) @@ -3672,7 +3750,7 @@ def test_dryness_index(self, atmosds): (-47, "usda", 1), (-6, "anbg", 1), (19, "anbg", 6), - (-47, "anbg", np.NaN), + (-47, "anbg", np.nan), ], # There are 26 USDA zones: 1a -> 1, 1b -> 2, ... 13b -> 26 # There are 7 angb zones: 1,2, ..., 7 diff --git a/tests/test_land.py b/tests/test_land.py index 4418aa2fc..07841cd87 100644 --- a/tests/test_land.py +++ b/tests/test_land.py @@ -4,25 +4,41 @@ import numpy as np import xarray as xr +from cf_xarray import __version__ as __cfxr_version__ +from packaging.version import Version from xclim import land def test_base_flow_index(ndq_series): out = land.base_flow_index(ndq_series, freq="YS") - assert out.attrs["units"] == "" + + if Version(__cfxr_version__) < Version("0.9.3"): + assert out.attrs["units"] == "" + else: + assert out.attrs["units"] == "1" + assert isinstance(out, xr.DataArray) def test_rb_flashiness_index(ndq_series): out = land.base_flow_index(ndq_series, freq="YS") - assert out.attrs["units"] == "" + + if Version(__cfxr_version__) < Version("0.9.3"): + assert out.attrs["units"] == "" + else: + assert out.attrs["units"] == "1" + assert isinstance(out, xr.DataArray) def test_qdoy_max(ndq_series, q_series): out = land.doy_qmax(ndq_series, freq="YS", season="JJA") - assert out.attrs["units"] == "" + + if Version(__cfxr_version__) < Version("0.9.3"): + assert out.attrs["units"] == "" + else: + assert out.attrs["units"] == "1" a = np.ones(450) a[100] = 2 diff --git a/tests/test_precip.py b/tests/test_precip.py index fcf091c51..68b3b1bb7 100644 --- a/tests/test_precip.py +++ b/tests/test_precip.py @@ -77,8 +77,8 @@ def test_3d_data_with_nans(self, open_dataset): pr.attrs["units"] = "kg m-2 s-1" out3 = atmos.precip_accumulation(pr, freq="MS") - np.testing.assert_array_almost_equal(out1, out2, 3) - np.testing.assert_array_almost_equal(out1, out3, 5) + np.testing.assert_allclose(out1, out2, rtol=1e-7, atol=1e-4) + np.testing.assert_allclose(out1, out3, rtol=1e-7, atol=1e-4) # check some vector with and without a nan x1 = prMM[:31, 0, 0].values @@ -674,7 +674,7 @@ def test_dry_spell(atmosds): def test_dry_spell_total_length_indexer(pr_series): pr = pr_series( - [np.NaN] + [1] * 4 + [0] * 10 + [1] * 350, start="1900-01-01", units="mm/d" + [np.nan] + [1] * 4 + [0] * 10 + [1] * 350, start="1900-01-01", units="mm/d" ) out = atmos.dry_spell_total_length( pr, @@ -683,7 +683,7 @@ def test_dry_spell_total_length_indexer(pr_series): thresh="3 mm", freq="MS", ) - np.testing.assert_allclose(out, [np.NaN] + [0] * 11) + np.testing.assert_allclose(out, [np.nan] + [0] * 11) out = atmos.dry_spell_total_length( pr, window=7, op="sum", thresh="3 mm", freq="MS", date_bounds=("01-10", "12-31") @@ -693,7 +693,7 @@ def test_dry_spell_total_length_indexer(pr_series): def test_dry_spell_max_length_indexer(pr_series): pr = pr_series( - [np.NaN] + [1] * 4 + [0] * 10 + [1] * 350, start="1900-01-01", units="mm/d" + [np.nan] + [1] * 4 + [0] * 10 + [1] * 350, start="1900-01-01", units="mm/d" ) out = atmos.dry_spell_max_length( pr, @@ -702,7 +702,7 @@ def test_dry_spell_max_length_indexer(pr_series): thresh="3 mm", freq="MS", ) - np.testing.assert_allclose(out, [np.NaN] + [0] * 11) + np.testing.assert_allclose(out, [np.nan] + [0] * 11) out = atmos.dry_spell_total_length( pr, window=7, op="sum", thresh="3 mm", freq="MS", date_bounds=("01-10", "12-31") diff --git a/tests/test_run_length.py b/tests/test_run_length.py index 7617c6f87..d0e10ae7e 100644 --- a/tests/test_run_length.py +++ b/tests/test_run_length.py @@ -470,7 +470,7 @@ class TestRunsWithDates: [ ("07-01", 210, 70), ("07-01", 190, 50), - ("04-01", 150, np.NaN), # date falls early + ("04-01", 150, np.nan), # date falls early ("11-01", 150, 165), # date ends late (None, 150, 10), # no date, real length ], @@ -501,7 +501,7 @@ def test_season_length(self, tas_series, date, end, expected, use_dask, ufunc): [ ("dayofyear", "07-01", 210, 211), (False, "07-01", 190, 190), - ("dayofyear", "04-01", 150, np.NaN), # date falls early + ("dayofyear", "04-01", 150, np.nan), # date falls early ("dayofyear", "11-01", 150, 306), # date ends late ], ) @@ -529,7 +529,7 @@ def test_run_end_after_date( [ ("dayofyear", "07-01", 210, 211), (False, "07-01", 190, 190), - ("dayofyear", "04-01", False, np.NaN), # no run + ("dayofyear", "04-01", False, np.nan), # no run ("dayofyear", "11-01", 150, 306), # run already started ], ) @@ -559,7 +559,7 @@ def test_first_run_after_date( [ ("dayofyear", "07-01", 210, 183), (False, "07-01", 190, 182), - ("dayofyear", "04-01", 150, np.NaN), # date falls early + ("dayofyear", "04-01", 150, np.nan), # date falls early ("dayofyear", "11-01", 150, 150), # date ends late ], ) diff --git a/tests/test_sdba/test_adjustment.py b/tests/test_sdba/test_adjustment.py index d4ddfb865..aea505073 100644 --- a/tests/test_sdba/test_adjustment.py +++ b/tests/test_sdba/test_adjustment.py @@ -67,7 +67,7 @@ def test_harmonize_units_multivariate(self, series, random, use_dask): class TestLoci: @pytest.mark.parametrize("group,dec", (["time", 2], ["time.month", 1])) - def test_time_and_from_ds(self, series, group, dec, tmp_path, random): + def test_time_and_from_ds(self, series, group, dec, tmp_path, random, open_dataset): n = 10000 u = random.random(n) @@ -91,9 +91,9 @@ def test_time_and_from_ds(self, series, group, dec, tmp_path, random): assert "Bias-adjusted with LOCI(" in p.attrs["history"] file = tmp_path / "test_loci.nc" - loci.ds.to_netcdf(file) + loci.ds.to_netcdf(file, engine="h5netcdf") - ds = xr.open_dataset(file) + ds = open_dataset(file) loci2 = LOCI.from_dataset(ds) xr.testing.assert_equal(loci.ds, loci2.ds) @@ -283,7 +283,7 @@ def test_mon_U(self, mon_series, series, kind, name, add_dims, random): np.testing.assert_array_almost_equal(mqm, int(kind == MULTIPLICATIVE), 1) np.testing.assert_allclose(p.transpose(..., "time"), ref_t, rtol=0.1, atol=0.5) - def test_cannon_and_from_ds(self, cannon_2015_rvs, tmp_path, random): + def test_cannon_and_from_ds(self, cannon_2015_rvs, tmp_path, open_dataset, random): ref, hist, sim = cannon_2015_rvs(15000, random=random) DQM = DetrendedQuantileMapping.train(ref, hist, kind="*", group="time") @@ -293,9 +293,9 @@ def test_cannon_and_from_ds(self, cannon_2015_rvs, tmp_path, random): np.testing.assert_almost_equal(p.std(), 15.0, 0) file = tmp_path / "test_dqm.nc" - DQM.ds.to_netcdf(file) + DQM.ds.to_netcdf(file, engine="h5netcdf") - ds = xr.open_dataset(file) + ds = open_dataset(file) DQM2 = DetrendedQuantileMapping.from_dataset(ds) xr.testing.assert_equal(DQM.ds, DQM2.ds) diff --git a/tests/test_sdba/test_detrending.py b/tests/test_sdba/test_detrending.py index 8a237203a..99a7cca4f 100644 --- a/tests/test_sdba/test_detrending.py +++ b/tests/test_sdba/test_detrending.py @@ -15,7 +15,7 @@ ) -def test_poly_detrend_and_from_ds(series, tmp_path): +def test_poly_detrend_and_from_ds(series, tmp_path, open_dataset): x = series(np.arange(20 * 365.25), "tas") poly = PolyDetrend(degree=1) @@ -30,9 +30,9 @@ def test_poly_detrend_and_from_ds(series, tmp_path): np.testing.assert_array_almost_equal(xt, x) file = tmp_path / "test_polydetrend.nc" - fx.ds.to_netcdf(file) + fx.ds.to_netcdf(file, engine="h5netcdf") - ds = xr.open_dataset(file) + ds = open_dataset(file) fx2 = PolyDetrend.from_dataset(ds) xr.testing.assert_equal(fx.ds, fx2.ds) diff --git a/tests/test_sdba/test_processing.py b/tests/test_sdba/test_processing.py index 7a8c21f80..efc1078e4 100644 --- a/tests/test_sdba/test_processing.py +++ b/tests/test_sdba/test_processing.py @@ -157,7 +157,7 @@ def test_escore(): def test_standardize(random): x = random.standard_normal((2, 10000)) - x[0, 50] = np.NaN + x[0, 50] = np.nan x = xr.DataArray(x, dims=("x", "y"), attrs={"units": "m"}) xp, avg, std = standardize(x, dim="y") @@ -216,7 +216,7 @@ def test_to_additive(pr_series, hurs_series): with units.context("hydro"): prlog = to_additive_space(pr, lower_bound="0 mm/d", trans="log") - np.testing.assert_allclose(prlog, [-np.Inf, -11.512925, 0, 10]) + np.testing.assert_allclose(prlog, [-np.inf, -11.512925, 0, 10]) assert prlog.attrs["sdba_transform"] == "log" assert prlog.attrs["sdba_transform_units"] == "kg m-2 s-1" @@ -224,7 +224,7 @@ def test_to_additive(pr_series, hurs_series): pr1 = pr + 1 with units.context("hydro"): prlog2 = to_additive_space(pr1, trans="log", lower_bound="1.0 kg m-2 s-1") - np.testing.assert_allclose(prlog2, [-np.Inf, -11.512925, 0, 10]) + np.testing.assert_allclose(prlog2, [-np.inf, -11.512925, 0, 10]) assert prlog2.attrs["sdba_transform_lower"] == 1.0 # logit @@ -234,7 +234,7 @@ def test_to_additive(pr_series, hurs_series): hurs, lower_bound="0 %", trans="logit", upper_bound="100 %" ) np.testing.assert_allclose( - hurslogit, [-np.Inf, -11.5129154649, 2.197224577, np.Inf] + hurslogit, [-np.inf, -11.5129154649, 2.197224577, np.inf] ) assert hurslogit.attrs["sdba_transform"] == "logit" assert hurslogit.attrs["sdba_transform_units"] == "%" @@ -245,7 +245,7 @@ def test_to_additive(pr_series, hurs_series): hursscl, trans="logit", lower_bound="2", upper_bound="6" ) np.testing.assert_allclose( - hurslogit2, [-np.Inf, -11.5129154649, 2.197224577, np.Inf] + hurslogit2, [-np.inf, -11.5129154649, 2.197224577, np.inf] ) assert hurslogit2.attrs["sdba_transform_lower"] == 200.0 assert hurslogit2.attrs["sdba_transform_upper"] == 600.0 diff --git a/tests/test_sdba/test_sdba_utils.py b/tests/test_sdba/test_sdba_utils.py index f415cf090..48fe0bdcc 100644 --- a/tests/test_sdba/test_sdba_utils.py +++ b/tests/test_sdba/test_sdba_utils.py @@ -68,7 +68,7 @@ def test_equally_spaced_nodes(): @pytest.mark.parametrize( "interp,expi", [("nearest", 2.9), ("linear", 2.95), ("cubic", 2.95)] ) -@pytest.mark.parametrize("extrap,expe", [("constant", 4.4), ("nan", np.NaN)]) +@pytest.mark.parametrize("extrap,expe", [("constant", 4.4), ("nan", np.nan)]) def test_interp_on_quantiles_constant(interp, expi, extrap, expe): quantiles = np.linspace(0, 1, num=25) xq = xr.DataArray( @@ -163,7 +163,7 @@ def test_interp_on_quantiles_monthly(random): @pytest.mark.parametrize( "interp,expi", [("nearest", 2.9), ("linear", 2.95), ("cubic", 2.95)] ) -@pytest.mark.parametrize("extrap,expe", [("constant", 4.4), ("nan", np.NaN)]) +@pytest.mark.parametrize("extrap,expe", [("constant", 4.4), ("nan", np.nan)]) def test_interp_on_quantiles_constant_with_nan(interp, expi, extrap, expe): quantiles = np.linspace(0, 1, num=30) xq = xr.DataArray( diff --git a/tests/test_snow.py b/tests/test_snow.py index 13b0ce5b0..6cbb20c65 100644 --- a/tests/test_snow.py +++ b/tests/test_snow.py @@ -2,6 +2,8 @@ import numpy as np import pytest +from cf_xarray import __version__ as __cfxr_version__ +from packaging.version import Version from xclim import land from xclim.core.utils import ValidationError @@ -26,7 +28,7 @@ def test_simple(self, snd_series): class TestSnowWaterCoverDuration: @pytest.mark.parametrize( - "factor,exp", ([1000, [31, 28, 31, np.NaN]], [0, [0, 0, 0, np.NaN]]) + "factor,exp", ([1000, [31, 28, 31, np.nan]], [0, [0, 0, 0, np.nan]]) ) def test_simple(self, snw_series, factor, exp): snw = snw_series(np.ones(110) * factor, start="2001-01-01") @@ -45,11 +47,21 @@ def test_simple(self, snd_series): snd = snd.expand_dims(lat=[0, 1, 2]) out = land.snd_season_start(snd) - assert out.units == "" + + if Version(__cfxr_version__) < Version("0.9.3"): + assert out.units == "" + else: + assert out.units == "1" + np.testing.assert_array_equal(out.isel(lat=0), snd.time.dt.dayofyear[100]) out = land.snd_season_end(snd) - assert out.units == "" + + if Version(__cfxr_version__) < Version("0.9.3"): + assert out.units == "" + else: + assert out.units == "1" + np.testing.assert_array_equal(out.isel(lat=0), snd.time.dt.dayofyear[200]) out = land.snd_season_length(snd) @@ -67,11 +79,21 @@ def test_simple(self, snw_series): snw = snw.expand_dims(lat=[0, 1, 2]) out = land.snw_season_start(snw) - assert out.units == "" + + if Version(__cfxr_version__) < Version("0.9.3"): + assert out.units == "" + else: + assert out.units == "1" + np.testing.assert_array_equal(out.isel(lat=0), snw.time.dt.dayofyear[100]) out = land.snw_season_end(snw) - assert out.units == "" + + if Version(__cfxr_version__) < Version("0.9.3"): + assert out.units == "" + else: + assert out.units == "1" + np.testing.assert_array_equal(out.isel(lat=0), snw.time.dt.dayofyear[200]) out = land.snw_season_length(snw) @@ -97,7 +119,7 @@ def test_no_snow(self, atmosds): # Put 0 on one row. snd = atmosds.snd.where(atmosds.location != "Victoria", 0) out = land.snd_max_doy(snd) - np.testing.assert_array_equal(out.isel(time=1), [16, 13, 91, 29, np.NaN]) + np.testing.assert_array_equal(out.isel(time=1), [16, 13, 91, 29, np.nan]) class TestSnwMax: diff --git a/tests/test_temperature.py b/tests/test_temperature.py index de6d1d389..3331d0d79 100644 --- a/tests/test_temperature.py +++ b/tests/test_temperature.py @@ -1285,7 +1285,7 @@ def test_degree_days_exceedance_date(open_dataset): @pytest.mark.parametrize( - "never_reached,exp", [(None, np.NaN), (300, 300), ("12-01", 335)] + "never_reached,exp", [(None, np.nan), (300, 300), ("12-01", 335)] ) def test_degree_days_exceedance_date_never_reached(open_dataset, never_reached, exp): tas = open_dataset("FWI/GFWED_sample_2017.nc").tas diff --git a/tests/test_testing_utils.py b/tests/test_testing_utils.py index 10a080cbb..3bbc044e3 100644 --- a/tests/test_testing_utils.py +++ b/tests/test_testing_utils.py @@ -78,7 +78,7 @@ def test_open_dataset_with_bad_file(self, tmp_path): @pytest.mark.requires_internet def test_open_testdata(self): ds = utilities.open_dataset( - Path("cmip5/tas_Amon_CanESM2_rcp85_r1i1p1_200701-200712") + Path("cmip5/tas_Amon_CanESM2_rcp85_r1i1p1_200701-200712"), engine="h5netcdf" ) assert ds.lon.size == 128 @@ -146,15 +146,6 @@ def test_unsafe_urls(self): "doesnt_exist.nc", github_url="ftp://domain.does.not.exist/" ) - with pytest.raises( - OSError, - match="OPeNDAP file not read. Verify that the service is available: " - "'https://seemingly.trustworthy.com/doesnt_exist.nc'", - ): - utilities.open_dataset( - "doesnt_exist.nc", dap_url="https://seemingly.trustworthy.com/" - ) - def test_malicious_urls(self): with pytest.raises( URLError, diff --git a/tests/test_units.py b/tests/test_units.py index b59626fde..040d50dd0 100644 --- a/tests/test_units.py +++ b/tests/test_units.py @@ -6,7 +6,9 @@ import pint.errors import pytest import xarray as xr +from cf_xarray import __version__ as __cfxr_version__ from dask import array as dsk +from packaging.version import Version from xclim import indices, set_options from xclim.core.units import ( @@ -140,7 +142,12 @@ def test_units2pint(self, pr_series): assert pint2cfunits(u) == "%" u = units2pint("1") - assert pint2cfunits(u) == "" + assert pint2cfunits(u) == "1" + + if Version(__cfxr_version__) < Version("0.9.3"): + assert pint2cfunits(u) == "" + else: + assert pint2cfunits(u) == "1" def test_pint_multiply(self, pr_series): a = pr_series([1, 2, 3]) @@ -331,8 +338,14 @@ def index( ("", "sum", "count", 365, "d"), ("", "sum", "count", 365, "d"), ("kg m-2", "var", "var", 0, "kg2 m-4"), - ("°C", "argmax", "doymax", 0, ""), - ("°C", "sum", "integral", 365, "K d"), + ("°C", "argmax", "doymax", 0, ("", "1")), # dependent on numpy/pint version + ( + "°C", + "sum", + "integral", + 365, + ("K d", "d K"), + ), # dependent on numpy/pint version ("°F", "sum", "integral", 365, "d °R"), # not sure why the order is different ], ) @@ -346,4 +359,11 @@ def test_to_agg_units(in_u, opfunc, op, exp, exp_u): out = to_agg_units(getattr(da, opfunc)(), da, op) np.testing.assert_allclose(out, exp) - assert out.attrs["units"] == exp_u + + if isinstance(exp_u, tuple): + if Version(__cfxr_version__) < Version("0.9.3"): + assert out.attrs["units"] == exp_u[0] + else: + assert out.attrs["units"] == exp_u[1] + else: + assert out.attrs["units"] == exp_u diff --git a/tests/test_utils.py b/tests/test_utils.py index c5e614185..0d1e766f5 100644 --- a/tests/test_utils.py +++ b/tests/test_utils.py @@ -71,7 +71,7 @@ def test_calc_perc_2d(self): assert np.all(res[0][1] == 29) def test_calc_perc_nan(self): - arr = np.asarray([np.NAN]) + arr = np.asarray([np.nan]) res = nan_calc_percentiles(arr, percentiles=[50.0]) assert np.isnan(res) @@ -81,7 +81,7 @@ def test_calc_perc_empty(self): assert np.isnan(res) def test_calc_perc_partial_nan(self): - arr = np.asarray([np.NaN, 41.0, 41.0, 43.0, 43.0]) + arr = np.asarray([np.nan, 41.0, 41.0, 43.0, 43.0]) res = nan_calc_percentiles(arr, percentiles=[50.0], alpha=1 / 3.0, beta=1 / 3.0) # The expected is from R `quantile(arr, 0.5, type=8, na.rm = TRUE)` # Note that scipy mquantiles would give a different result here diff --git a/tox.ini b/tox.ini index 229b62242..f648f166a 100644 --- a/tox.ini +++ b/tox.ini @@ -113,9 +113,10 @@ extras = dev extras: extras deps = - upstream: -r CI/requirements_upstream.txt - sbck: pybind11 lmoments: lmoments3 + numpy: numpy>=1.20,<2.0 + sbck: pybind11 + upstream: -r CI/requirements_upstream.txt install_command = python -m pip install --no-user {opts} {packages} download = True commands_pre = diff --git a/xclim/analog.py b/xclim/analog.py index bdc60f7c8..05d5157fc 100644 --- a/xclim/analog.py +++ b/xclim/analog.py @@ -139,7 +139,7 @@ def metric(func): @wraps(func) def _metric_overhead(x, y, **kwargs): if np.any(np.isnan(x)) or np.any(np.isnan(y)): - return np.NaN + return np.nan x = np.atleast_2d(x) y = np.atleast_2d(y) diff --git a/xclim/cli.py b/xclim/cli.py index fd4ddedbb..ebb35aba1 100644 --- a/xclim/cli.py +++ b/xclim/cli.py @@ -413,6 +413,11 @@ def get_command(self, ctx, name): help="Chunks to use when opening the input dataset(s). " "Given as :num,. Ex: time:365,lat:168,lon:150.", ) +@click.option( + "--engine", + help="Engine to use when opening the input dataset(s). " + "If not specified, xarrat decides.", +) @click.pass_context def cli(ctx, **kwargs): """Entry point for the command line interface. @@ -463,7 +468,9 @@ def cli(ctx, **kwargs): for dim, num in map(lambda x: x.split(":"), kwargs["chunks"].split(",")) } - kwargs["xr_kwargs"] = {"chunks": kwargs["chunks"] or {}} + kwargs["xr_kwargs"] = { + "chunks": kwargs["chunks"] or {}, + } ctx.obj = kwargs @@ -475,7 +482,9 @@ def write_file(ctx, *args, **kwargs): if ctx.obj["verbose"]: click.echo(f"Writing to file {ctx.obj['output']}") with ProgressBar(): - r = ctx.obj["ds_out"].to_netcdf(ctx.obj["output"], compute=False) + r = ctx.obj["ds_out"].to_netcdf( + ctx.obj["output"], engine=kwargs["engine"], compute=False + ) if ctx.obj["dask_nthreads"] is not None: progress(r.data) r.compute() diff --git a/xclim/core/bootstrapping.py b/xclim/core/bootstrapping.py index 280ac316d..fd1dac8c1 100644 --- a/xclim/core/bootstrapping.py +++ b/xclim/core/bootstrapping.py @@ -264,7 +264,7 @@ def build_bootstrap_year_da( out_view.loc[{dim: bloc}] = source.convert_calendar("noleap").data elif len(bloc) == 366: out_view.loc[{dim: bloc}] = source.convert_calendar( - "366_day", missing=np.NAN + "366_day", missing=np.nan ).data elif len(bloc) < 365: # 360 days calendar case or anchored years for both source[dim] and bloc case diff --git a/xclim/core/calendar.py b/xclim/core/calendar.py index 98e01e283..00bafc83a 100644 --- a/xclim/core/calendar.py +++ b/xclim/core/calendar.py @@ -693,8 +693,8 @@ def is_offset_divisor(divisor: str, offset: str): # Simple length comparison is sufficient for submonthly freqs # In case one of bA or bB is > W, we test many to be sure. tA = pd.date_range("1970-01-01T00:00:00", freq=offAs, periods=13) - return np.all( - (np.diff(tB)[:, np.newaxis] / np.diff(tA)[np.newaxis, :]) % 1 == 0 + return bool( + np.all((np.diff(tB)[:, np.newaxis] / np.diff(tA)[np.newaxis, :]) % 1 == 0) ) # else, we test alignment with some real dates diff --git a/xclim/core/units.py b/xclim/core/units.py index 1c6ec4b62..183c66d4d 100644 --- a/xclim/core/units.py +++ b/xclim/core/units.py @@ -35,6 +35,7 @@ "convert_units_to", "declare_relative_units", "declare_units", + "ensure_absolute_temperature", "ensure_cf_units", "ensure_delta", "flux2rate", @@ -183,11 +184,7 @@ def pint2cfunits(value: units.Quantity | units.Unit) -> str: if isinstance(value, (pint.Quantity, units.Quantity)): value = value.units - # Issue originally introduced in https://github.com/hgrecco/pint/issues/1486 - # Should be resolved in pint v0.24. See: https://github.com/hgrecco/pint/issues/1913 - with warnings.catch_warnings(): - warnings.simplefilter("ignore", category=DeprecationWarning) - return f"{value:cf}".replace("dimensionless", "") + return f"{value:cf}" def ensure_cf_units(ustr: str) -> str: @@ -469,10 +466,14 @@ def infer_sampling_units( } -def ensure_absolute_temperature(units: str): +def ensure_absolute_temperature(units: str) -> str: """Convert temperature units to their absolute counterpart, assuming they represented a difference (delta). Celsius becomes Kelvin, Fahrenheit becomes Rankine. Does nothing for other units. + + See Also + -------- + :py:func:`ensure_delta` """ a = str2pint(units) # ensure a delta pint unit @@ -482,6 +483,33 @@ def ensure_absolute_temperature(units: str): return units +def ensure_delta(unit: str) -> str: + """Return delta units for temperature. + + For dimensions where delta exist in pint (Temperature), it replaces the temperature unit by delta_degC or + delta_degF based on the input unit. For other dimensionality, it just gives back the input units. + + Parameters + ---------- + unit : str + unit to transform in delta (or not) + + See Also + -------- + :py:func:`ensure_absolute_temperature` + """ + u = units2pint(unit) + d = 1 * u + # + delta_unit = pint2cfunits(d - d) + # replace kelvin/rankine by delta_degC/F + if "kelvin" in u._units: + delta_unit = pint2cfunits(u / units2pint("K") * units2pint("delta_degC")) + if "degree_Rankine" in u._units: + delta_unit = pint2cfunits(u / units2pint("°R") * units2pint("delta_degF")) + return delta_unit + + def to_agg_units( out: xr.DataArray, orig: xr.DataArray, op: str, dim: str = "time" ) -> xr.DataArray: @@ -543,7 +571,7 @@ def to_agg_units( >>> degdays = convert_units_to(degdays, "K days") >>> degdays.units - 'K d' + 'd K' """ if op in ["amin", "min", "amax", "max", "mean", "sum"]: out.attrs["units"] = orig.attrs["units"] @@ -558,7 +586,7 @@ def to_agg_units( elif op in ["doymin", "doymax"]: out.attrs.update( - units="", is_dayofyear=np.int32(1), calendar=get_calendar(orig) + units="1", is_dayofyear=np.int32(1), calendar=get_calendar(orig) ) elif op in ["count", "integral"]: @@ -1219,11 +1247,7 @@ def wrapper(*args, **kwargs): context = None for ref, refvar in bound_args.arguments.items(): if f"<{ref}>" in dim: - # Issue originally introduced in https://github.com/hgrecco/pint/issues/1486 - # Should be resolved in pint v0.24. See: https://github.com/hgrecco/pint/issues/1913 - with warnings.catch_warnings(): - warnings.simplefilter("ignore", category=DeprecationWarning) - dim = dim.replace(f"<{ref}>", f"({units2pint(refvar)})") + dim = dim.replace(f"<{ref}>", f"({units2pint(refvar)})") # check_units will guess the hydro context if "precipitation" appears in dim, # but here we pass a real unit. It will also check the standard name of the arg, @@ -1328,29 +1352,6 @@ def wrapper(*args, **kwargs): return dec -def ensure_delta(unit: str) -> str: - """Return delta units for temperature. - - For dimensions where delta exist in pint (Temperature), it replaces the temperature unit by delta_degC or - delta_degF based on the input unit. For other dimensionality, it just gives back the input units. - - Parameters - ---------- - unit : str - unit to transform in delta (or not) - """ - u = units2pint(unit) - d = 1 * u - # - delta_unit = pint2cfunits(d - d) - # replace kelvin/rankine by delta_degC/F - if "kelvin" in u._units: - delta_unit = pint2cfunits(u / units2pint("K") * units2pint("delta_degC")) - if "degree_Rankine" in u._units: - delta_unit = pint2cfunits(u / units2pint("°R") * units2pint("delta_degF")) - return delta_unit - - def infer_context( standard_name: str | None = None, dimension: str | None = None ) -> str: diff --git a/xclim/core/utils.py b/xclim/core/utils.py index 25fc7b985..f56714894 100644 --- a/xclim/core/utils.py +++ b/xclim/core/utils.py @@ -438,7 +438,7 @@ def _nan_quantile( # --- Setup data_axis_length = arr.shape[axis] if data_axis_length == 0: - return np.NAN + return np.nan if data_axis_length == 1: result = np.take(arr, 0, axis=axis) return np.broadcast_to(result, (quantiles.size,) + result.shape) @@ -454,7 +454,7 @@ def _nan_quantile( too_few_values = valid_values_count < 2 if too_few_values.any(): # This will result in getting the only available value if it exists - valid_values_count[too_few_values] = np.NaN + valid_values_count[too_few_values] = np.nan # --- Computation of indexes # Add axis for quantiles valid_values_count = valid_values_count[..., np.newaxis] diff --git a/xclim/ensembles/_reduce.py b/xclim/ensembles/_reduce.py index 404188fd2..fdc9821bf 100644 --- a/xclim/ensembles/_reduce.py +++ b/xclim/ensembles/_reduce.py @@ -94,7 +94,7 @@ def _make_crit(da): ( da[crd].values if crd in da.coords - else [np.NaN] * da.criteria.size + else [np.nan] * da.criteria.size ) for da in stacked.values() ], diff --git a/xclim/ensembles/_robustness.py b/xclim/ensembles/_robustness.py index 322d85e53..fcfc81893 100644 --- a/xclim/ensembles/_robustness.py +++ b/xclim/ensembles/_robustness.py @@ -477,7 +477,7 @@ def _ttest(fut, ref, *, p_change=0.05): def _ttest_func(f, r): # scipy>=1.9: popmean.axis[-1] must equal 1 for both fut and ref if np.isnan(f).all() or np.isnan(r).all(): - return np.NaN + return np.nan return spstats.ttest_1samp(f, r[..., np.newaxis], axis=-1, nan_policy="omit")[1] @@ -508,7 +508,7 @@ def _welch_ttest(fut, ref, *, p_change=0.05): # equal_var=False -> Welch's T-test def wtt_wrapper(f, r): # This specific test can't manage an all-NaN slice if np.isnan(f).all() or np.isnan(r).all(): - return np.NaN + return np.nan return spstats.ttest_ind(f, r, axis=-1, equal_var=False, nan_policy="omit")[1] pvals = xr.apply_ufunc( @@ -534,7 +534,7 @@ def _mannwhitney_utest(ref, fut, *, p_change=0.05): def mwu_wrapper(f, r): # This specific test can't manage an all-NaN slice if np.isnan(f).all() or np.isnan(r).all(): - return np.NaN + return np.nan return spstats.mannwhitneyu(f, r, axis=-1, nan_policy="omit")[1] pvals = xr.apply_ufunc( diff --git a/xclim/indices/_agro.py b/xclim/indices/_agro.py index 2c3c8855c..6cb6c277e 100644 --- a/xclim/indices/_agro.py +++ b/xclim/indices/_agro.py @@ -234,7 +234,7 @@ def huglin_index( if method.lower() == "smoothed": lat_mask = abs(lat) <= 50 lat_coefficient = ((abs(lat) - 40) / 10).clip(min=0) * 0.06 - k = 1 + xarray.where(lat_mask, lat_coefficient, np.NaN) + k = 1 + xarray.where(lat_mask, lat_coefficient, np.nan) k_aggregated = 1 elif method.lower() == "icclim": k_f = [0, 0.02, 0.03, 0.04, 0.05, 0.06] @@ -255,7 +255,7 @@ def huglin_index( (46 < abs(lat)) & (abs(lat) <= 48), k_f[4], xarray.where( - (48 < abs(lat)) & (abs(lat) <= 50), k_f[5], np.NaN + (48 < abs(lat)) & (abs(lat) <= 50), k_f[5], np.nan ), ), ), @@ -1010,7 +1010,7 @@ def _get_first_run(run_positions, start_date, end_date): run_positions = select_time(run_positions, date_bounds=(start_date, end_date)) first_start = run_positions.argmax("time") return xarray.where( - first_start != run_positions.argmin("time"), first_start, np.NaN + first_start != run_positions.argmin("time"), first_start, np.nan ) # Find the start of the rain season @@ -1065,7 +1065,7 @@ def _get_rain_season(_pram): # `start != NaN` only possible if a condition on next few time steps is respected. # Thus, `start+1` exists if `start != NaN` start_ind = (start + 1).fillna(-1).astype(int) - mask = _pram * np.NaN + mask = _pram * np.nan # Put "True" on the day of run start mask[{"time": start_ind}] = 1 # Mask back points without runs, propagate the True diff --git a/xclim/indices/fire/_cffwis.py b/xclim/indices/fire/_cffwis.py index 1a58b8448..869fa571d 100644 --- a/xclim/indices/fire/_cffwis.py +++ b/xclim/indices/fire/_cffwis.py @@ -430,7 +430,7 @@ def _drought_code( # pragma: no cover if dr > 0.0: dc = dr + pe elif np.isnan(dc0): - dc = np.NaN + dc = np.nan else: dc = pe else: # f p <= 2.8: diff --git a/xclim/indices/run_length.py b/xclim/indices/run_length.py index 0aad6f947..090e52872 100644 --- a/xclim/indices/run_length.py +++ b/xclim/indices/run_length.py @@ -746,8 +746,8 @@ def extract_events( start_runs = _cumsum_reset_on_zero(da_start, dim=dim, index="first") stop_runs = _cumsum_reset_on_zero(da_stop, dim=dim, index="first") - start_positions = xr.where(start_runs >= window_start, 1, np.NaN) - stop_positions = xr.where(stop_runs >= window_stop, 0, np.NaN) + start_positions = xr.where(start_runs >= window_start, 1, np.nan) + stop_positions = xr.where(stop_runs >= window_stop, 0, np.nan) # start positions (1) are f-filled until a stop position (0) is met runs = stop_positions.combine_first(start_positions).ffill(dim=dim).fillna(0) @@ -1145,7 +1145,7 @@ def statistics_run_1d(arr: Sequence[bool], reducer: str, window: int) -> int: if reducer == "count": return (v * rl >= window).sum() func = getattr(np, f"nan{reducer}") - return func(np.where(v * rl >= window, rl, np.NaN)) + return func(np.where(v * rl >= window, rl, np.nan)) def windowed_run_count_1d(arr: Sequence[bool], window: int) -> int: diff --git a/xclim/sdba/_adjustment.py b/xclim/sdba/_adjustment.py index b339d5ba2..f6df322d6 100644 --- a/xclim/sdba/_adjustment.py +++ b/xclim/sdba/_adjustment.py @@ -175,7 +175,7 @@ def _npdft_train(ref, hist, rots, quantiles, method, extrap, n_escore, standardi np.nanstd(hist, axis=-1, keepdims=True) ) af_q = np.zeros((len(rots), ref.shape[0], len(quantiles))) - escores = np.zeros(len(rots)) * np.NaN + escores = np.zeros(len(rots)) * np.nan if n_escore > 0: ref_step, hist_step = ( int(np.ceil(arr.shape[1] / n_escore)) for arr in [ref, hist] @@ -708,7 +708,7 @@ def npdf_transform(ds: xr.Dataset, **kwargs) -> xr.Dataset: ------- xr.Dataset Dataset with `scenh`, `scens` and `escores` DataArrays, where `scenh` and `scens` are `hist` and `sim` - respectively after adjustment according to `ref`. If `n_escore` is negative, `escores` will be filled with NaNs. + respectively after adjustment according to `ref`. If `n_escore` is negative, `escores` will be filled with nans. """ ref = ds.ref.rename(time_hist="time") hist = ds.hist.rename(time_hist="time") @@ -752,10 +752,10 @@ def npdf_transform(ds: xr.Dataset, **kwargs) -> xr.Dataset: if kwargs["n_escore"] >= 0: escores = xr.concat(escores, "iterations") else: - # All NaN, but with the proper shape. + # All nan, but with the proper shape. escores = ( ref.isel({dim: 0, "time": 0}) * hist.isel({dim: 0, "time": 0}) - ).expand_dims(iterations=ds.iterations) * np.NaN + ).expand_dims(iterations=ds.iterations) * np.nan return xr.Dataset( data_vars={ @@ -809,8 +809,8 @@ def _extremes_train_1d(ref, hist, ref_params, *, q_thresh, cluster_thresh, dist, af = hist_in_ref / hist[Pcommon] # sort them in Px order, and pad to have N values. order = np.argsort(Px_hist) - px_hist = np.pad(Px_hist[order], ((0, N - af.size),), constant_values=np.NaN) - af = np.pad(af[order], ((0, N - af.size),), constant_values=np.NaN) + px_hist = np.pad(Px_hist[order], ((0, N - af.size),), constant_values=np.nan) + af = np.pad(af[order], ((0, N - af.size),), constant_values=np.nan) return px_hist, af, thresh @@ -853,7 +853,7 @@ def extremes_train( _extremes_train_1d, ds.ref, ds.hist, - ds.ref_params or np.NaN, + ds.ref_params or np.nan, input_core_dims=[("time",), ("time",), ()], output_core_dims=[("quantiles",), ("quantiles",), ()], vectorize=True, diff --git a/xclim/sdba/adjustment.py b/xclim/sdba/adjustment.py index 632397f59..90e95ceb4 100644 --- a/xclim/sdba/adjustment.py +++ b/xclim/sdba/adjustment.py @@ -745,7 +745,7 @@ def _train( { "ref": ref, "hist": hist, - "ref_params": ref_params or np.float32(np.NaN), + "ref_params": ref_params or np.float32(np.nan), } ), q_thresh=q_thresh, @@ -1234,8 +1234,8 @@ def _adjust( template = xr.Dataset( data_vars={ - "scenh": xr.full_like(hist, np.NaN).rename(time="time_hist"), - "scen": xr.full_like(sim, np.NaN), + "scenh": xr.full_like(hist, np.nan).rename(time="time_hist"), + "scen": xr.full_like(sim, np.nan), "escores": escores_tmpl, } ) diff --git a/xclim/sdba/loess.py b/xclim/sdba/loess.py index abb15480e..f20974efd 100644 --- a/xclim/sdba/loess.py +++ b/xclim/sdba/loess.py @@ -92,7 +92,7 @@ def _loess_nb( """ if skipna: nan = np.isnan(y) - out = np.full(x.size, np.NaN) + out = np.full(x.size, np.nan) y = y[~nan] x = x[~nan] if x.size == 0: diff --git a/xclim/sdba/nbutils.py b/xclim/sdba/nbutils.py index fe942b11d..1b653b2d2 100644 --- a/xclim/sdba/nbutils.py +++ b/xclim/sdba/nbutils.py @@ -160,7 +160,7 @@ def _nan_quantile_1d( ) def _vecquantiles(arr, rnk, res): if np.isnan(rnk): - res[0] = np.NaN + res[0] = np.nan else: res[0] = np.nanquantile(arr, rnk) @@ -376,7 +376,7 @@ def _first_and_last_nonnull(arr): if idxs.size > 0: out[i] = arr[i][idxs[np.array([0, -1])]] else: - out[i] = np.array([np.NaN, np.NaN]) + out[i] = np.array([np.nan, np.nan]) return out @@ -403,8 +403,8 @@ def _extrapolate_on_quantiles( interp[toolow] = cnstlow[toolow] interp[toohigh] = cnsthigh[toohigh] else: # 'nan' - interp[toolow] = np.NaN - interp[toohigh] = np.NaN + interp[toolow] = np.nan + interp[toohigh] = np.nan return interp diff --git a/xclim/sdba/utils.py b/xclim/sdba/utils.py index 512e7e8b4..153243ea4 100644 --- a/xclim/sdba/utils.py +++ b/xclim/sdba/utils.py @@ -325,7 +325,7 @@ def _interp_on_quantiles_1D_multi(newxs, oldx, oldy, method, extrap): # noqa: N oldy[~np.isnan(oldy)][-1], ) else: # extrap == 'nan' - fill_value = np.NaN + fill_value = np.nan finterp1d = interp1d( oldx[~mask_old], @@ -338,7 +338,7 @@ def _interp_on_quantiles_1D_multi(newxs, oldx, oldy, method, extrap): # noqa: N out = np.zeros_like(newxs) for ii in range(newxs.shape[0]): mask_new = np.isnan(newxs[ii, :]) - y1 = newxs[ii, :].copy() * np.NaN + y1 = newxs[ii, :].copy() * np.nan y1[~mask_new] = finterp1d(newxs[ii, ~mask_new]) out[ii, :] = y1.flatten() return out @@ -347,10 +347,10 @@ def _interp_on_quantiles_1D_multi(newxs, oldx, oldy, method, extrap): # noqa: N def _interp_on_quantiles_1D(newx, oldx, oldy, method, extrap): # noqa: N802 mask_new = np.isnan(newx) mask_old = np.isnan(oldy) | np.isnan(oldx) - out = np.full_like(newx, np.NaN, dtype=f"float{oldy.dtype.itemsize * 8}") + out = np.full_like(newx, np.nan, dtype=f"float{oldy.dtype.itemsize * 8}") if np.all(mask_new) or np.all(mask_old): warn( - "All-NaN slice encountered in interp_on_quantiles", + "All-nan slice encountered in interp_on_quantiles", category=RuntimeWarning, ) return out @@ -361,7 +361,8 @@ def _interp_on_quantiles_1D(newx, oldx, oldy, method, extrap): # noqa: N802 oldy[~np.isnan(oldy)][-1], ) else: # extrap == 'nan' - fill_value = np.NaN + fill_value = np.nan + out[~mask_new] = interp1d( oldx[~mask_old], oldy[~mask_old], @@ -369,16 +370,17 @@ def _interp_on_quantiles_1D(newx, oldx, oldy, method, extrap): # noqa: N802 bounds_error=False, fill_value=fill_value, )(newx[~mask_new]) + return out def _interp_on_quantiles_2D(newx, newg, oldx, oldy, oldg, method, extrap): # noqa mask_new = np.isnan(newx) | np.isnan(newg) mask_old = np.isnan(oldy) | np.isnan(oldx) | np.isnan(oldg) - out = np.full_like(newx, np.NaN, dtype=f"float{oldy.dtype.itemsize * 8}") + out = np.full_like(newx, np.nan, dtype=f"float{oldy.dtype.itemsize * 8}") if np.all(mask_new) or np.all(mask_old): warn( - "All-NaN slice encountered in interp_on_quantiles", + "All-nan slice encountered in interp_on_quantiles", category=RuntimeWarning, ) return out @@ -413,8 +415,8 @@ def interp_on_quantiles( Interpolate in 2D with :py:func:`scipy.interpolate.griddata` if grouping is used, in 1D otherwise, with :py:class:`scipy.interpolate.interp1d`. - Any NaNs in `xq` or `yq` are removed from the input map. - Similarly, NaNs in newx are left NaNs. + Any nans in `xq` or `yq` are removed from the input map. + Similarly, nans in newx are left nans. Parameters ---------- @@ -439,7 +441,7 @@ def interp_on_quantiles( ----- Extrapolation methods: - - 'nan' : Any value of `newx` outside the range of `xq` is set to NaN. + - 'nan' : Any value of `newx` outside the range of `xq` is set to 'nan'. - 'constant' : Values of `newx` smaller than the minimum of `xq` are set to the first value of `yq` and those larger than the maximum, set to the last one (first and last non-nan values along the "quantiles" dimension). When the grouping is "time.month", @@ -536,7 +538,7 @@ def rank( Notes ----- - The `bottleneck` library is required. NaNs in the input array are returned as NaNs. + The `bottleneck` library is required. nans in the input array are returned as nans. See Also -------- @@ -577,8 +579,8 @@ def pc_matrix(arr: np.ndarray | dsk.Array) -> np.ndarray | dsk.Array: """Construct a Principal Component matrix. This matrix can be used to transform points in arr to principal components - coordinates. Note that this function does not manage NaNs; if a single observation is null, all elements - of the transformation matrix involving that variable will be NaN. + coordinates. Note that this function does not manage nans; if a single observation is null, all elements + of the transformation matrix involving that variable will be nan. Parameters ---------- @@ -795,7 +797,7 @@ def get_clusters(data: xr.DataArray, u1, u2, dim: str = "time") -> xr.Dataset: - `maxpos` : Index of the maximal value within the cluster (`dim` reduced, new `cluster`), int - `maximum` : Maximal value within the cluster (`dim` reduced, new `cluster`), same dtype as data. - For `start`, `end` and `maxpos`, -1 means NaN and should always correspond to a `NaN` in `maximum`. + For `start`, `end` and `maxpos`, -1 means nan and should always correspond to a `nan` in `maximum`. The length along `cluster` is half the size of "dim", the maximal theoretical number of clusters. """ @@ -807,7 +809,7 @@ def _get_clusters(arr, u1, u2, N): np.append(st, pad), np.append(ed, pad), np.append(mp, pad), - np.append(mv, [np.NaN] * (N - count)), + np.append(mv, [np.nan] * (N - count)), count, ) @@ -917,7 +919,7 @@ def copy_all_attrs(ds: xr.Dataset | xr.DataArray, ref: xr.Dataset | xr.DataArray def _pairwise_spearman(da, dims): """Area-averaged pairwise temporal correlation. - With skipna-shortcuts for cases where all times or all points are NaN. + With skipna-shortcuts for cases where all times or all points are nan. """ da = da - da.mean(dims) da = ( @@ -928,7 +930,7 @@ def _pairwise_spearman(da, dims): def _skipna_correlation(data): nv, _nt = data.shape - # Mask of which variable are all NaN + # Mask of which variable are all nan mask_omit = np.isnan(data).all(axis=1) # Remove useless variables data_noallnan = data[~mask_omit, :] @@ -937,7 +939,7 @@ def _skipna_correlation(data): # Remove those times (they'll be omitted anyway) data_nonan = data_noallnan[:, ~mask_skip] - # We still have a possibility that a NaN was unique to a variable and time. + # We still have a possibility that a nan was unique to a variable and time. # If this is the case, it will be a lot longer, but what can we do. coef = spearmanr(data_nonan, axis=1, nan_policy="omit").correlation diff --git a/xclim/testing/helpers.py b/xclim/testing/helpers.py index ab2673d79..34e10823d 100644 --- a/xclim/testing/helpers.py +++ b/xclim/testing/helpers.py @@ -141,7 +141,7 @@ def generate_atmos(cache_dir: Path) -> dict[str, xr.DataArray]: # Create a file in session scoped temporary directory atmos_file = cache_dir.joinpath("atmosds.nc") - ds.to_netcdf(atmos_file) + ds.to_netcdf(atmos_file, engine="h5netcdf") # Give access to dataset variables by name in namespace namespace = dict() diff --git a/xclim/testing/utils.py b/xclim/testing/utils.py index cc7d124a8..bcc3691f8 100644 --- a/xclim/testing/utils.py +++ b/xclim/testing/utils.py @@ -42,6 +42,7 @@ "scipy", "pint", "pandas", + "numpy", "numba", "lmoments3", "jsonpickle",