diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index df5e4b26eae..ecc69e5783a 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -45,6 +45,7 @@ repos: types-PyYAML, types-pytz, typing-extensions==3.10.0.0, + numpy, ] # run this occasionally, ref discussion https://github.com/pydata/xarray/pull/3194 # - repo: https://github.com/asottile/pyupgrade diff --git a/xarray/backends/file_manager.py b/xarray/backends/file_manager.py index 4b9c95ec792..47a4201539b 100644 --- a/xarray/backends/file_manager.py +++ b/xarray/backends/file_manager.py @@ -2,7 +2,7 @@ import io import threading import warnings -from typing import Any, Dict, cast +from typing import Any, Dict from ..core import utils from ..core.options import OPTIONS @@ -11,7 +11,7 @@ # Global cache for storing open files. FILE_CACHE: LRUCache[str, io.IOBase] = LRUCache( - maxsize=cast(int, OPTIONS["file_cache_maxsize"]), on_evict=lambda k, v: v.close() + maxsize=OPTIONS["file_cache_maxsize"], on_evict=lambda k, v: v.close() ) assert FILE_CACHE.maxsize, "file cache must be at least size one" diff --git a/xarray/core/accessor_str.py b/xarray/core/accessor_str.py index 0f0a256b77a..9c9de76c0ed 100644 --- a/xarray/core/accessor_str.py +++ b/xarray/core/accessor_str.py @@ -276,8 +276,8 @@ def func(x): if isinstance(pat, np.ndarray): # apply_ufunc doesn't work for numpy arrays with output object dtypes - func = np.vectorize(func) - return func(pat) + func_ = np.vectorize(func) + return func_(pat) else: return _apply_str_ufunc(func=func, obj=pat, dtype=np.object_) diff --git a/xarray/core/dataset.py b/xarray/core/dataset.py index 22f4e32e83e..3d05d56492b 100644 --- a/xarray/core/dataset.py +++ b/xarray/core/dataset.py @@ -6918,9 +6918,9 @@ def polyfit( if full: rank = xr.DataArray(rank, name=xname + "matrix_rank") variables[rank.name] = rank - sing = np.linalg.svd(lhs, compute_uv=False) + _sing = np.linalg.svd(lhs, compute_uv=False) sing = xr.DataArray( - sing, + _sing, dims=(degree_dim,), coords={degree_dim: np.arange(rank - 1, -1, -1)}, name=xname + "singular_values", diff --git a/xarray/core/duck_array_ops.py b/xarray/core/duck_array_ops.py index ee6f311718d..5b0d9a4fcd4 100644 --- a/xarray/core/duck_array_ops.py +++ b/xarray/core/duck_array_ops.py @@ -16,7 +16,7 @@ from numpy import zeros_like # noqa from numpy import around, broadcast_to # noqa from numpy import concatenate as _concatenate -from numpy import einsum, isclose, isin, isnan, isnat, pad # noqa +from numpy import einsum, isclose, isin, isnan, isnat # noqa from numpy import stack as _stack from numpy import take, tensordot, transpose, unravel_index # noqa from numpy import where as _where @@ -168,7 +168,7 @@ def cumulative_trapezoid(y, x, axis): # Pad so that 'axis' has same length in result as it did in y pads = [(1, 0) if i == axis else (0, 0) for i in range(y.ndim)] - integrand = pad(integrand, pads, mode="constant", constant_values=0.0) + integrand = np.pad(integrand, pads, mode="constant", constant_values=0.0) return cumsum(integrand, axis=axis, skipna=False) diff --git a/xarray/core/formatting.py b/xarray/core/formatting.py index 05a8d163a41..c0633064231 100644 --- a/xarray/core/formatting.py +++ b/xarray/core/formatting.py @@ -200,7 +200,7 @@ def format_array_flat(array, max_width: int): (max_possibly_relevant < array.size) or (cum_len > max_width).any() ): padding = " ... " - max_len = max(int(np.argmax(cum_len + len(padding) - 1 > max_width)), 2) # type: ignore[type-var] + max_len = max(int(np.argmax(cum_len + len(padding) - 1 > max_width)), 2) count = min(array.size, max_len) else: count = array.size diff --git a/xarray/core/indexing.py b/xarray/core/indexing.py index 9b4c0534204..c93d797266b 100644 --- a/xarray/core/indexing.py +++ b/xarray/core/indexing.py @@ -872,7 +872,7 @@ def _decompose_outer_indexer( backend_indexer: List[Any] = [] np_indexer = [] # make indexer positive - pos_indexer = [] + pos_indexer: list[np.ndarray | int | np.number] = [] for k, s in zip(indexer.tuple, shape): if isinstance(k, np.ndarray): pos_indexer.append(np.where(k < 0, k + s, k)) diff --git a/xarray/core/utils.py b/xarray/core/utils.py index 89e3714ffff..68615eef74f 100644 --- a/xarray/core/utils.py +++ b/xarray/core/utils.py @@ -652,7 +652,7 @@ def read_magic_number_from_file(filename_or_obj, count=8) -> bytes: "file-like object read/write pointer not at the start of the file, " "please close and reopen, or use a context manager" ) - magic_number = filename_or_obj.read(count) # type: ignore + magic_number = filename_or_obj.read(count) filename_or_obj.seek(0) else: raise TypeError(f"cannot read the magic number form {type(filename_or_obj)}") diff --git a/xarray/core/variable.py b/xarray/core/variable.py index 025a07fa9de..58aeceed3b1 100644 --- a/xarray/core/variable.py +++ b/xarray/core/variable.py @@ -1238,7 +1238,7 @@ def _shift_one_dim(self, dim, count, fill_value=dtypes.NA): dim_pad = (width, 0) if count >= 0 else (0, width) pads = [(0, 0) if d != dim else dim_pad for d in self.dims] - data = duck_array_ops.pad( + data = np.pad( trimmed_data.astype(dtype), pads, mode="constant", @@ -1378,7 +1378,7 @@ def pad( if reflect_type is not None: pad_option_kwargs["reflect_type"] = reflect_type # type: ignore[assignment] - array = duck_array_ops.pad( + array = np.pad( # type: ignore[call-overload] self.data.astype(dtype, copy=False), pad_width_by_index, mode=mode, diff --git a/xarray/tests/test_computation.py b/xarray/tests/test_computation.py index 6af93607e6b..c9a10b7cc43 100644 --- a/xarray/tests/test_computation.py +++ b/xarray/tests/test_computation.py @@ -1934,7 +1934,8 @@ def test_polyval(use_dask, use_datetime) -> None: ) x = xr.core.missing.get_clean_interp_index(xcoord, "x") else: - xcoord = x = np.arange(10) + x = np.arange(10) + xcoord = xr.DataArray(x, dims=("x",), name="x") da = xr.DataArray( np.stack((1.0 + x + 2.0 * x ** 2, 1.0 + 2.0 * x + 3.0 * x ** 2)), diff --git a/xarray/tests/test_conventions.py b/xarray/tests/test_conventions.py index b364b405423..ab290955e6c 100644 --- a/xarray/tests/test_conventions.py +++ b/xarray/tests/test_conventions.py @@ -292,7 +292,7 @@ def test_decode_cf_datetime_transition_to_invalid(self) -> None: warnings.filterwarnings("ignore", "unable to decode time") ds_decoded = conventions.decode_cf(ds) - expected = [datetime(2000, 1, 1, 0, 0), datetime(2265, 10, 28, 0, 0)] + expected = np.array([datetime(2000, 1, 1, 0, 0), datetime(2265, 10, 28, 0, 0)]) assert_array_equal(ds_decoded.time.values, expected)