diff --git a/.github/dependabot.yml b/.github/dependabot.yml
index 90e05c40d0..95179b06c9 100644
--- a/.github/dependabot.yml
+++ b/.github/dependabot.yml
@@ -8,4 +8,4 @@ updates:
- package-ecosystem: "github-actions" # See documentation for possible values
directory: "/" # Location of package manifests
schedule:
- interval: "weekly"
+ interval: "monthly"
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index 99e77cb56a..a398bd445f 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -3,7 +3,7 @@ fail_fast: false
repos:
- repo: https://github.com/astral-sh/ruff-pre-commit
# Ruff version.
- rev: 'v0.1.6'
+ rev: 'v0.1.7'
hooks:
- id: ruff
- repo: https://github.com/pre-commit/pre-commit-hooks
@@ -14,7 +14,7 @@ repos:
- id: check-yaml
args: [--unsafe]
- repo: https://github.com/PyCQA/bandit
- rev: '1.7.5' # Update me!
+ rev: '1.7.6' # Update me!
hooks:
- id: bandit
args: [--ini, .bandit]
@@ -29,11 +29,12 @@ repos:
- types-requests
args: ["--python-version", "3.9", "--ignore-missing-imports"]
- repo: https://github.com/pycqa/isort
- rev: 5.12.0
+ rev: 5.13.1
hooks:
- id: isort
language_version: python3
ci:
# To trigger manually, comment on a pull request with "pre-commit.ci autofix"
autofix_prs: false
+ autoupdate_schedule: "monthly"
skip: [bandit]
diff --git a/AUTHORS.md b/AUTHORS.md
index 9078e441b4..796ee9743b 100644
--- a/AUTHORS.md
+++ b/AUTHORS.md
@@ -50,6 +50,7 @@ The following people have made contributions to this project:
- [Lu Liu (yukaribbba)](https://github.com/yukaribbba)
- [Andrea Meraner (ameraner)](https://github.com/ameraner)
- [Aronne Merrelli (aronnem)](https://github.com/aronnem)
+- [Luca Merucci (lmeru)](https://github.com/lmeru)
- [Lucas Meyer (LTMeyer)](https://github.com/LTMeyer)
- [Zifeng Mo (Isotr0py)](https://github.com/Isotr0py)
- [Ondrej Nedelcev (nedelceo)](https://github.com/nedelceo)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index aa85b83f56..8730209f99 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,3 +1,59 @@
+## Version 0.46.0 (2023/12/18)
+
+### Issues Closed
+
+* [Issue 2668](https://github.com/pytroll/satpy/issues/2668) - FCI HRFI true_color unavailable even after native resampling if upper_right_corner is used ([PR 2690](https://github.com/pytroll/satpy/pull/2690) by [@djhoese](https://github.com/djhoese))
+* [Issue 2664](https://github.com/pytroll/satpy/issues/2664) - Cannot generate day-night composites
+* [Issue 2654](https://github.com/pytroll/satpy/issues/2654) - Unable to read radiance with AVHRR EPS ([PR 2655](https://github.com/pytroll/satpy/pull/2655) by [@mraspaud](https://github.com/mraspaud))
+* [Issue 2647](https://github.com/pytroll/satpy/issues/2647) - Preservation of input data dtype in processing FCI data
+* [Issue 2618](https://github.com/pytroll/satpy/issues/2618) - GCOM-C Support (Continued) ([PR 1094](https://github.com/pytroll/satpy/pull/1094) by [@mraspaud](https://github.com/mraspaud))
+* [Issue 2588](https://github.com/pytroll/satpy/issues/2588) - FCI chunks/segments out of order if pad_data=False ([PR 2692](https://github.com/pytroll/satpy/pull/2692) by [@ameraner](https://github.com/ameraner))
+* [Issue 2263](https://github.com/pytroll/satpy/issues/2263) - VIIRS day composite 'snow_age' does not work with Satpy 0.37.1
+* [Issue 1496](https://github.com/pytroll/satpy/issues/1496) - Improve error reporting of satpy.utils.get_satpos
+* [Issue 1086](https://github.com/pytroll/satpy/issues/1086) - Add a reader for GCOM-C Level 1 data ([PR 1094](https://github.com/pytroll/satpy/pull/1094) by [@mraspaud](https://github.com/mraspaud))
+
+In this release 9 issues were closed.
+
+### Pull Requests Merged
+
+#### Bugs fixed
+
+* [PR 2694](https://github.com/pytroll/satpy/pull/2694) - Match all projectables in `NDVIHybridGreen.__call__` to avoid coordinate mismatch errors ([2668](https://github.com/pytroll/satpy/issues/2668), [2668](https://github.com/pytroll/satpy/issues/2668))
+* [PR 2692](https://github.com/pytroll/satpy/pull/2692) - Anticipate filehandler sorting in `GEOSegmentYAMLReader` to have sorted handlers also with `pad_data=False` ([2588](https://github.com/pytroll/satpy/issues/2588))
+* [PR 2690](https://github.com/pytroll/satpy/pull/2690) - Fix composites failing on non-aligned geolocation coordinates ([2668](https://github.com/pytroll/satpy/issues/2668))
+* [PR 2682](https://github.com/pytroll/satpy/pull/2682) - Update AHI HSD reader to correctly handle singleton arrays.
+* [PR 2674](https://github.com/pytroll/satpy/pull/2674) - Update xarray version in CF writer tests for compression kwarg
+* [PR 2671](https://github.com/pytroll/satpy/pull/2671) - Workaround AWIPS bug not handling integers properly in "awips_tiled" writer
+* [PR 2669](https://github.com/pytroll/satpy/pull/2669) - Fix RealisticColors compositor upcasting data to float64
+* [PR 2655](https://github.com/pytroll/satpy/pull/2655) - Fix missing radiance units in eps l1b ([2654](https://github.com/pytroll/satpy/issues/2654))
+
+#### Features added
+
+* [PR 2683](https://github.com/pytroll/satpy/pull/2683) - Fci/l2/amv/reader
+* [PR 2679](https://github.com/pytroll/satpy/pull/2679) - Update MiRS reader coefficient files to newer version
+* [PR 2677](https://github.com/pytroll/satpy/pull/2677) - Add remaining JPSS satellite platform aliases to "mirs" reader ([665](https://github.com/ssec/polar2grid/issues/665))
+* [PR 2669](https://github.com/pytroll/satpy/pull/2669) - Fix RealisticColors compositor upcasting data to float64
+* [PR 2660](https://github.com/pytroll/satpy/pull/2660) - Update tropomi_l2 reader with "_reduced" file patterns
+* [PR 2557](https://github.com/pytroll/satpy/pull/2557) - Add baseline for GeoColor composite including FCI, AHI and ABI recipes
+* [PR 2106](https://github.com/pytroll/satpy/pull/2106) - Add Scene function to use Hvplot backend visualization
+* [PR 1094](https://github.com/pytroll/satpy/pull/1094) - Add Gcom-C sgli reader ([2618](https://github.com/pytroll/satpy/issues/2618), [1086](https://github.com/pytroll/satpy/issues/1086))
+
+#### Backward incompatible changes
+
+* [PR 2684](https://github.com/pytroll/satpy/pull/2684) - Get rid of warnings in compositor tests
+
+#### Clean ups
+
+* [PR 2691](https://github.com/pytroll/satpy/pull/2691) - Reduce the number of warnings in writer tests
+* [PR 2690](https://github.com/pytroll/satpy/pull/2690) - Fix composites failing on non-aligned geolocation coordinates ([2668](https://github.com/pytroll/satpy/issues/2668))
+* [PR 2684](https://github.com/pytroll/satpy/pull/2684) - Get rid of warnings in compositor tests
+* [PR 2681](https://github.com/pytroll/satpy/pull/2681) - Get rid of warnings in resampler tests
+* [PR 2676](https://github.com/pytroll/satpy/pull/2676) - Convert times in SEVIRI readers to nanosecond precision to silence warnings
+* [PR 2658](https://github.com/pytroll/satpy/pull/2658) - Update unstable version of h5py in CI
+
+In this release 23 pull requests were closed.
+
+
## Version 0.45.0 (2023/11/29)
### Issues Closed
diff --git a/continuous_integration/environment.yaml b/continuous_integration/environment.yaml
index e1b52b384e..ecc0084ea7 100644
--- a/continuous_integration/environment.yaml
+++ b/continuous_integration/environment.yaml
@@ -37,11 +37,12 @@ dependencies:
- mock
- libtiff
- geoviews
+ - holoviews
+ - hvplot
- zarr
- python-eccodes
# 2.19.1 seems to cause library linking issues
- eccodes>=2.20
- - geoviews
- pytest
- pytest-cov
- pytest-lazy-fixture
diff --git a/doc/source/conf.py b/doc/source/conf.py
index 3aa810420e..37c197c6eb 100644
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -70,7 +70,7 @@ def __getattr__(cls, name):
for mod_name in MOCK_MODULES:
sys.modules[mod_name] = Mock() # type: ignore
-autodoc_mock_imports = ["cf", "glymur", "h5netcdf", "imageio", "mipp", "netCDF4",
+autodoc_mock_imports = ["cf", "glymur", "h5netcdf", "holoviews", "imageio", "mipp", "netCDF4",
"pygac", "pygrib", "pyhdf", "pyninjotiff",
"pyorbital", "pyspectral", "rasterio", "trollimage",
"zarr"]
diff --git a/satpy/cf/coords.py b/satpy/cf/coords.py
index 9220632fcb..2449ab79ee 100644
--- a/satpy/cf/coords.py
+++ b/satpy/cf/coords.py
@@ -291,8 +291,8 @@ def add_time_bounds_dimension(ds: xr.Dataset, time: str = "time") -> xr.Dataset:
if start_time is not None)
end_time = min(end_time for end_time in end_times
if end_time is not None)
- ds["time_bnds"] = xr.DataArray([[np.datetime64(start_time),
- np.datetime64(end_time)]],
+ ds["time_bnds"] = xr.DataArray([[np.datetime64(start_time, "ns"),
+ np.datetime64(end_time, "ns")]],
dims=["time", "bnds_1d"])
ds[time].attrs["bounds"] = "time_bnds"
ds[time].attrs["standard_name"] = "time"
diff --git a/satpy/composites/__init__.py b/satpy/composites/__init__.py
index 9295f94dc7..a70bbea86f 100644
--- a/satpy/composites/__init__.py
+++ b/satpy/composites/__init__.py
@@ -157,7 +157,7 @@ def apply_modifier_info(self, origin, destination):
elif o.get(k) is not None:
d[k] = o[k]
- def match_data_arrays(self, data_arrays):
+ def match_data_arrays(self, data_arrays: Sequence[xr.DataArray]) -> list[xr.DataArray]:
"""Match data arrays so that they can be used together in a composite.
For the purpose of this method, "can be used together" means:
@@ -185,32 +185,11 @@ def match_data_arrays(self, data_arrays):
"""
self.check_geolocation(data_arrays)
new_arrays = self.drop_coordinates(data_arrays)
+ new_arrays = self.align_geo_coordinates(new_arrays)
new_arrays = list(unify_chunks(*new_arrays))
return new_arrays
- def drop_coordinates(self, data_arrays):
- """Drop negligible non-dimensional coordinates.
-
- Drops negligible coordinates if they do not correspond to any
- dimension. Negligible coordinates are defined in the
- :attr:`NEGLIGIBLE_COORDS` module attribute.
-
- Args:
- data_arrays (List[arrays]): Arrays to be checked
- """
- new_arrays = []
- for ds in data_arrays:
- drop = [coord for coord in ds.coords
- if coord not in ds.dims and
- any([neglible in coord for neglible in NEGLIGIBLE_COORDS])]
- if drop:
- new_arrays.append(ds.drop(drop))
- else:
- new_arrays.append(ds)
-
- return new_arrays
-
- def check_geolocation(self, data_arrays):
+ def check_geolocation(self, data_arrays: Sequence[xr.DataArray]) -> None:
"""Check that the geolocations of the *data_arrays* are compatible.
For the purpose of this method, "compatible" means:
@@ -220,7 +199,7 @@ def check_geolocation(self, data_arrays):
- If all have an area, the areas should be all the same.
Args:
- data_arrays (List[arrays]): Arrays to be checked
+ data_arrays: Arrays to be checked
Raises:
:class:`IncompatibleAreas`:
@@ -251,6 +230,47 @@ def check_geolocation(self, data_arrays):
"'{}'".format(self.attrs["name"]))
raise IncompatibleAreas("Areas are different")
+ @staticmethod
+ def drop_coordinates(data_arrays: Sequence[xr.DataArray]) -> list[xr.DataArray]:
+ """Drop negligible non-dimensional coordinates.
+
+ Drops negligible coordinates if they do not correspond to any
+ dimension. Negligible coordinates are defined in the
+ :attr:`NEGLIGIBLE_COORDS` module attribute.
+
+ Args:
+ data_arrays (List[arrays]): Arrays to be checked
+ """
+ new_arrays = []
+ for ds in data_arrays:
+ drop = [coord for coord in ds.coords
+ if coord not in ds.dims and
+ any([neglible in coord for neglible in NEGLIGIBLE_COORDS])]
+ if drop:
+ new_arrays.append(ds.drop_vars(drop))
+ else:
+ new_arrays.append(ds)
+
+ return new_arrays
+
+ @staticmethod
+ def align_geo_coordinates(data_arrays: Sequence[xr.DataArray]) -> list[xr.DataArray]:
+ """Align DataArrays along geolocation coordinates.
+
+ See :func:`~xarray.align` for more information. This function uses
+ the "override" join method to essentially ignore differences between
+ coordinates. The :meth:`check_geolocation` should be called before
+ this to ensure that geolocation coordinates and "area" are compatible.
+ The :meth:`drop_coordinates` method should be called before this to
+ ensure that coordinates that are considered "negligible" when computing
+ composites do not affect alignment.
+
+ """
+ non_geo_coords = tuple(
+ coord_name for data_arr in data_arrays
+ for coord_name in data_arr.coords if coord_name not in ("x", "y"))
+ return list(xr.align(*data_arrays, join="override", exclude=non_geo_coords))
+
class DifferenceCompositor(CompositeBase):
"""Make the difference of two data arrays."""
@@ -992,17 +1012,17 @@ def __call__(self, projectables, *args, **kwargs):
hrv = projectables[2]
try:
- ch3 = 3 * hrv - vis06 - vis08
+ ch3 = 3.0 * hrv - vis06 - vis08
ch3.attrs = hrv.attrs
except ValueError:
raise IncompatibleAreas
ndvi = (vis08 - vis06) / (vis08 + vis06)
- ndvi = np.where(ndvi < 0, 0, ndvi)
+ ndvi = ndvi.where(ndvi >= 0.0, 0.0)
- ch1 = ndvi * vis06 + (1 - ndvi) * vis08
+ ch1 = ndvi * vis06 + (1.0 - ndvi) * vis08
ch1.attrs = vis06.attrs
- ch2 = ndvi * vis08 + (1 - ndvi) * vis06
+ ch2 = ndvi * vis08 + (1.0 - ndvi) * vis06
ch2.attrs = vis08.attrs
res = super(RealisticColors, self).__call__((ch1, ch2, ch3),
@@ -1014,7 +1034,7 @@ class CloudCompositor(GenericCompositor):
"""Detect clouds based on thresholding and use it as a mask for compositing."""
def __init__(self, name, transition_min=258.15, transition_max=298.15, # noqa: D417
- transition_gamma=3.0, **kwargs):
+ transition_gamma=3.0, invert_alpha=False, **kwargs):
"""Collect custom configuration values.
Args:
@@ -1023,11 +1043,14 @@ def __init__(self, name, transition_min=258.15, transition_max=298.15, # noqa:
transition_max (float): Values above this are
cloud free -> transparent
transition_gamma (float): Gamma correction to apply at the end
+ invert_alpha (bool): Invert the alpha channel to make low data values transparent
+ and high data values opaque.
"""
self.transition_min = transition_min
self.transition_max = transition_max
self.transition_gamma = transition_gamma
+ self.invert_alpha = invert_alpha
super(CloudCompositor, self).__init__(name, **kwargs)
def __call__(self, projectables, **kwargs):
@@ -1049,12 +1072,179 @@ def __call__(self, projectables, **kwargs):
alpha = alpha.where(data <= tr_max, 0.)
alpha = alpha.where((data <= tr_min) | (data > tr_max), slope * data + offset)
+ if self.invert_alpha:
+ alpha.data = 1.0 - alpha.data
+
# gamma adjustment
alpha **= gamma
res = super(CloudCompositor, self).__call__((data, alpha), **kwargs)
return res
+class HighCloudCompositor(CloudCompositor):
+ """Detect high clouds based on latitude-dependent thresholding and use it as a mask for compositing.
+
+ This compositor aims at identifying high clouds and assigning them a transparency based on the brightness
+ temperature (cloud opacity). In contrast to the `CloudCompositor`, the brightness temperature threshold at
+ the lower end, used to identify high opaque clouds, is made a function of the latitude in order to have
+ tropopause level clouds appear opaque at both high and low latitudes. This follows the Geocolor
+ implementation of high clouds in Miller et al. (2020, :doi:`10.1175/JTECH-D-19-0134.1`), but
+ with some adjustments to the thresholds based on recent developments and feedback from CIRA.
+
+ The two brightness temperature thresholds in `transition_min` are used together with the corresponding
+ latitude limits in `latitude_min` to compute a modified version of `transition_min` that is later used
+ when calling `CloudCompositor`. The modified version of `transition_min` will be an array with the same
+ shape as the input projectable dataset, where the actual values of threshold_min are a function of the
+ dataset `latitude`:
+
+ - transition_min = transition_min[0] where abs(latitude) < latitude_min(0)
+ - transition_min = transition_min[1] where abs(latitude) > latitude_min(0)
+ - transition_min = linear interpolation between transition_min[0] and transition_min[1] as a function
+ of where abs(latitude).
+ """
+
+ def __init__(self, name, transition_min_limits=(210., 230.), latitude_min_limits=(30., 60.), # noqa: D417
+ transition_max=300, transition_gamma=1.0, **kwargs):
+ """Collect custom configuration values.
+
+ Args:
+ transition_min_limits (tuple): Brightness temperature values used to identify opaque white
+ clouds at different latitudes
+ transition_max (float): Brightness temperatures above this value are not considered to
+ be high clouds -> transparent
+ latitude_min_limits (tuple): Latitude values defining the intervals for computing latitude-dependent
+ `transition_min` values from `transition_min_limits`.
+ transition_gamma (float): Gamma correction to apply to the alpha channel within the brightness
+ temperature range (`transition_min` to `transition_max`).
+
+ """
+ if len(transition_min_limits) != 2:
+ raise ValueError(f"Expected 2 `transition_min_limits` values, got {len(transition_min_limits)}")
+ if len(latitude_min_limits) != 2:
+ raise ValueError(f"Expected 2 `latitude_min_limits` values, got {len(latitude_min_limits)}")
+ if type(transition_max) in [list, tuple]:
+ raise ValueError(f"Expected `transition_max` to be of type float, is of type {type(transition_max)}")
+
+ self.transition_min_limits = transition_min_limits
+ self.latitude_min_limits = latitude_min_limits
+ super().__init__(name, transition_min=None, transition_max=transition_max,
+ transition_gamma=transition_gamma, **kwargs)
+
+ def __call__(self, projectables, **kwargs):
+ """Generate the composite.
+
+ `projectables` is expected to be a list or tuple with a single element:
+ - index 0: Brightness temperature of a thermal infrared window channel (e.g. 10.5 microns).
+ """
+ if len(projectables) != 1:
+ raise ValueError(f"Expected 1 dataset, got {len(projectables)}")
+
+ data = projectables[0]
+ _, lats = data.attrs["area"].get_lonlats(chunks=data.chunks, dtype=data.dtype)
+ lats = np.abs(lats)
+
+ slope = (self.transition_min_limits[1] - self.transition_min_limits[0]) / \
+ (self.latitude_min_limits[1] - self.latitude_min_limits[0])
+ offset = self.transition_min_limits[0] - slope * self.latitude_min_limits[0]
+
+ # Compute pixel-level latitude dependent transition_min values and pass to parent CloudCompositor class
+ transition_min = xr.DataArray(name="transition_min", coords=data.coords, dims=data.dims).astype(data.dtype)
+ transition_min = transition_min.where(lats >= self.latitude_min_limits[0], self.transition_min_limits[0])
+ transition_min = transition_min.where(lats <= self.latitude_min_limits[1], self.transition_min_limits[1])
+ transition_min = transition_min.where((lats < self.latitude_min_limits[0]) |
+ (lats > self.latitude_min_limits[1]), slope * lats + offset)
+ self.transition_min = transition_min
+
+ return super().__call__(projectables, **kwargs)
+
+
+class LowCloudCompositor(CloudCompositor):
+ """Detect low-level clouds based on thresholding and use it as a mask for compositing during night-time.
+
+ This compositor computes the brightness temperature difference between a window channel (e.g. 10.5 micron)
+ and the near-infrared channel e.g. (3.8 micron) and uses this brightness temperature difference, `BTD`, to
+ create a partially transparent mask for compositing.
+
+ Pixels with `BTD` values below a given threshold will be transparent, whereas pixels with `BTD` values
+ above another threshold will be opaque. The transparency of all other `BTD` values will be a linear
+ function of the `BTD` value itself. Two sets of thresholds are used, one set for land surface types
+ (`range_land`) and another one for water surface types (`range_water`), respectively. Hence,
+ this compositor requires a land-water-mask as a prerequisite input. This follows the GeoColor
+ implementation of night-time low-level clouds in Miller et al. (2020, :doi:`10.1175/JTECH-D-19-0134.1`), but
+ with some adjustments to the thresholds based on recent developments and feedback from CIRA.
+
+ Please note that the spectral test and thus the output of the compositor (using the expected input data) is
+ only applicable during night-time.
+ """
+
+ def __init__(self, name, values_land=(1,), values_water=(0,), # noqa: D417
+ range_land=(0.0, 4.0),
+ range_water=(0.0, 4.0),
+ transition_gamma=1.0,
+ invert_alpha=True, **kwargs):
+ """Init info.
+
+ Collect custom configuration values.
+
+ Args:
+ values_land (list): List of values used to identify land surface pixels in the land-water-mask.
+ values_water (list): List of values used to identify water surface pixels in the land-water-mask.
+ range_land (tuple): Threshold values used for masking low-level clouds from the brightness temperature
+ difference over land surface types.
+ range_water (tuple): Threshold values used for masking low-level clouds from the brightness temperature
+ difference over water.
+ transition_gamma (float): Gamma correction to apply to the alpha channel within the brightness
+ temperature difference range.
+ invert_alpha (bool): Invert the alpha channel to make low data values transparent
+ and high data values opaque.
+ """
+ if len(range_land) != 2:
+ raise ValueError(f"Expected 2 `range_land` values, got {len(range_land)}")
+ if len(range_water) != 2:
+ raise ValueError(f"Expected 2 `range_water` values, got {len(range_water)}")
+
+ self.values_land = values_land if type(values_land) in [list, tuple] else [values_land]
+ self.values_water = values_water if type(values_water) in [list, tuple] else [values_water]
+ self.range_land = range_land
+ self.range_water = range_water
+ super().__init__(name, transition_min=None, transition_max=None,
+ transition_gamma=transition_gamma, invert_alpha=invert_alpha, **kwargs)
+
+ def __call__(self, projectables, **kwargs):
+ """Generate the composite.
+
+ `projectables` is expected to be a list or tuple with the following three elements:
+ - index 0: Brightness temperature difference between a window channel (e.g. 10.5 micron) and a
+ near-infrared channel e.g. (3.8 micron).
+ - index 1. Brightness temperature of the window channel (used to filter out noise-induced false alarms).
+ - index 2: Land-Sea-Mask.
+ """
+ if len(projectables) != 3:
+ raise ValueError(f"Expected 3 datasets, got {len(projectables)}")
+
+ projectables = self.match_data_arrays(projectables)
+ btd, bt_win, lsm = projectables
+ lsm = lsm.squeeze(drop=True)
+ lsm = lsm.round() # Make sure to have whole numbers in case of smearing from resampling
+
+ # Call CloudCompositor for land surface pixels
+ self.transition_min, self.transition_max = self.range_land
+ res = super().__call__([btd.where(lsm.isin(self.values_land))], **kwargs)
+
+ # Call CloudCompositor for /water surface pixels
+ self.transition_min, self.transition_max = self.range_water
+ res_water = super().__call__([btd.where(lsm.isin(self.values_water))], **kwargs)
+
+ # Compine resutls for land and water surface pixels
+ res = res.where(lsm.isin(self.values_land), res_water)
+
+ # Make pixels with cold window channel brightness temperatures transparent to avoid spurious false
+ # alarms caused by noise in the 3.9um channel that can occur for very cold cloud tops
+ res.loc["A"] = res.sel(bands="A").where(bt_win >= 230, 0.0)
+
+ return res
+
+
class RatioSharpenedRGB(GenericCompositor):
"""Sharpen RGB bands with ratio of a high resolution band to a lower resolution version.
@@ -1180,7 +1370,8 @@ def _combined_sharpened_info(self, info, new_attrs):
def _get_sharpening_ratio(high_res, low_res):
- ratio = high_res / low_res
+ with np.errstate(divide="ignore"):
+ ratio = high_res / low_res
# make ratio a no-op (multiply by 1) where the ratio is NaN, infinity,
# or it is negative.
ratio[~np.isfinite(ratio) | (ratio < 0)] = 1.0
diff --git a/satpy/composites/ahi.py b/satpy/composites/ahi.py
index bb96a94581..4826f84820 100644
--- a/satpy/composites/ahi.py
+++ b/satpy/composites/ahi.py
@@ -14,7 +14,3 @@
# You should have received a copy of the GNU General Public License along with
# satpy. If not, see .
"""Composite classes for AHI."""
-
-# The green corrector used to be defined here, but was moved to spectral.py
-# in Satpy 0.38 because it also applies to FCI.
-from .spectral import GreenCorrector # noqa: F401
diff --git a/satpy/composites/spectral.py b/satpy/composites/spectral.py
index 448d7cb26a..f7219ec94d 100644
--- a/satpy/composites/spectral.py
+++ b/satpy/composites/spectral.py
@@ -16,7 +16,6 @@
"""Composite classes for spectral adjustments."""
import logging
-import warnings
from satpy.composites import GenericCompositor
from satpy.dataset import combine_metadata
@@ -160,9 +159,9 @@ def __call__(self, projectables, optional_datasets=None, **attrs):
LOG.info(f"Applying NDVI-weighted hybrid-green correction with limits [{self.limits[0]}, "
f"{self.limits[1]}] and strength {self.strength}.")
- ndvi_input = self.match_data_arrays([projectables[1], projectables[2]])
+ projectables = self.match_data_arrays(projectables)
- ndvi = (ndvi_input[1] - ndvi_input[0]) / (ndvi_input[1] + ndvi_input[0])
+ ndvi = (projectables[2] - projectables[1]) / (projectables[2] + projectables[1])
ndvi = ndvi.clip(self.ndvi_min, self.ndvi_max)
@@ -199,23 +198,3 @@ def _compute_blend_fraction(self, ndvi):
+ self.limits[0]
return fraction
-
-
-class GreenCorrector(SpectralBlender):
- """Previous class used to blend channels for green band corrections.
-
- This method has been refactored to make it more generic. The replacement class is 'SpectralBlender' which computes
- a weighted average based on N number of channels and N number of corresponding weights/fractions. A new class
- called 'HybridGreen' has been created, which performs a correction of green bands centered at 0.51 microns
- following Miller et al. (2016, :doi:`10.1175/BAMS-D-15-00154.2`) in order to improve true color imagery.
- """
-
- def __init__(self, *args, fractions=(0.85, 0.15), **kwargs):
- """Set default keyword argument values."""
- warnings.warn(
- "'GreenCorrector' is deprecated, use 'SpectralBlender' instead, or 'HybridGreen' for hybrid green"
- " correction following Miller et al. (2016).",
- UserWarning,
- stacklevel=2
- )
- super().__init__(fractions=fractions, *args, **kwargs)
diff --git a/satpy/etc/composites/abi.yaml b/satpy/etc/composites/abi.yaml
index 1437b91df4..4700aa470b 100644
--- a/satpy/etc/composites/abi.yaml
+++ b/satpy/etc/composites/abi.yaml
@@ -752,3 +752,57 @@ composites:
- name: green_nocorr
- name: C01
standard_name: true_color_reproduction_color_stretch
+
+ # GeoColor
+ geo_color:
+ compositor: !!python/name:satpy.composites.DayNightCompositor
+ description: >
+ GeoColor is a multi-layer blended RGB composite where the day-time part of the image is represented by true
+ color imagery and the nighttime part of the image by a three layer vertically blended stack composed of a
+ high-level cloud layer (single IR window channel), a low-level cloud layer (IR split window) and a static
+ surface terrain layer with city lights (NASA Black Marble).
+ references:
+ Research Article: https://journals.ametsoc.org/view/journals/atot/37/3/JTECH-D-19-0134.1.xml
+ lim_low: 78
+ lim_high: 88
+ standard_name: geo_color_day_night_blend
+ prerequisites:
+ - true_color
+ - geo_color_night
+
+ # GeoColor Night-time
+ geo_color_high_clouds:
+ standard_name: geo_color_high_clouds
+ compositor: !!python/name:satpy.composites.HighCloudCompositor
+ prerequisites:
+ - name: C13
+
+ geo_color_low_clouds:
+ standard_name: geo_color_low_clouds
+ compositor: !!python/name:satpy.composites.LowCloudCompositor
+ values_water: 0
+ values_land: 100
+ prerequisites:
+ - compositor: !!python/name:satpy.composites.DifferenceCompositor
+ prerequisites:
+ - name: C13
+ - name: C07
+ - name: C13
+ - compositor: !!python/name:satpy.composites.StaticImageCompositor
+ standard_name: land_water_mask
+ url: "https://zenodo.org/records/10076199/files/gshhs_land_water_mask_3km_i.tif"
+ known_hash: "sha256:96df83c57416217e191f95dde3d3c1ce0373a8fc220e929228873db246ca3569"
+
+ geo_color_background_with_low_clouds:
+ compositor: !!python/name:satpy.composites.BackgroundCompositor
+ standard_name: night_ir_with_background
+ prerequisites:
+ - geo_color_low_clouds
+ - _night_background_hires
+
+ geo_color_night:
+ compositor: !!python/name:satpy.composites.BackgroundCompositor
+ standard_name: night_ir_with_background
+ prerequisites:
+ - geo_color_high_clouds
+ - geo_color_background_with_low_clouds
diff --git a/satpy/etc/composites/ahi.yaml b/satpy/etc/composites/ahi.yaml
index cda79a5fac..5d633056be 100644
--- a/satpy/etc/composites/ahi.yaml
+++ b/satpy/etc/composites/ahi.yaml
@@ -15,46 +15,6 @@ modifiers:
- solar_zenith_angle
composites:
- green:
- deprecation_warning: "'green' is a deprecated composite. Use the equivalent 'hybrid_green' instead."
- compositor: !!python/name:satpy.composites.spectral.HybridGreen
- # FUTURE: Set a wavelength...see what happens. Dependency finding
- # probably wouldn't work.
- prerequisites:
- # should we be using the most corrected or least corrected inputs?
- # what happens if something requests more modifiers on top of this?
- - wavelength: 0.51
- modifiers: [sunz_corrected, rayleigh_corrected]
- - wavelength: 0.85
- modifiers: [sunz_corrected]
- standard_name: toa_bidirectional_reflectance
-
- green_true_color_reproduction:
- # JMA True Color Reproduction green band
- # http://www.jma.go.jp/jma/jma-eng/satellite/introduction/TCR.html
- deprecation_warning: "'green_true_color_reproduction' is a deprecated composite. Use the equivalent 'reproduced_green' instead."
- compositor: !!python/name:satpy.composites.spectral.SpectralBlender
- fractions: [0.6321, 0.2928, 0.0751]
- prerequisites:
- - name: B02
- modifiers: [sunz_corrected, rayleigh_corrected]
- - name: B03
- modifiers: [sunz_corrected, rayleigh_corrected]
- - name: B04
- modifiers: [sunz_corrected]
- standard_name: none
-
- green_nocorr:
- deprecation_warning: "'green_nocorr' is a deprecated composite. Use the equivalent 'hybrid_green_nocorr' instead."
- compositor: !!python/name:satpy.composites.spectral.HybridGreen
- # FUTURE: Set a wavelength...see what happens. Dependency finding
- # probably wouldn't work.
- prerequisites:
- # should we be using the most corrected or least corrected inputs?
- # what happens if something requests more modifiers on top of this?
- - wavelength: 0.51
- - wavelength: 0.85
- standard_name: toa_reflectance
hybrid_green:
compositor: !!python/name:satpy.composites.spectral.HybridGreen
@@ -501,3 +461,57 @@ composites:
prerequisites:
- night_ir_alpha
- _night_background_hires
+
+ # GeoColor
+ geo_color:
+ compositor: !!python/name:satpy.composites.DayNightCompositor
+ description: >
+ GeoColor is a multi-layer blended RGB composite where the day-time part of the image is represented by true
+ color imagery and the nighttime part of the image by a three layer vertically blended stack composed of a
+ high-level cloud layer (single IR window channel), a low-level cloud layer (IR split window) and a static
+ surface terrain layer with city lights (NASA Black Marble).
+ references:
+ Research Article: https://journals.ametsoc.org/view/journals/atot/37/3/JTECH-D-19-0134.1.xml
+ lim_low: 78
+ lim_high: 88
+ standard_name: geo_color_day_night_blend
+ prerequisites:
+ - true_color_ndvi_green
+ - geo_color_night
+
+ # GeoColor Night-time
+ geo_color_high_clouds:
+ standard_name: geo_color_high_clouds
+ compositor: !!python/name:satpy.composites.HighCloudCompositor
+ prerequisites:
+ - name: B13
+
+ geo_color_low_clouds:
+ standard_name: geo_color_low_clouds
+ compositor: !!python/name:satpy.composites.LowCloudCompositor
+ values_water: 0
+ values_land: 100
+ prerequisites:
+ - compositor: !!python/name:satpy.composites.DifferenceCompositor
+ prerequisites:
+ - name: B13
+ - name: B07
+ - name: B13
+ - compositor: !!python/name:satpy.composites.StaticImageCompositor
+ standard_name: land_water_mask
+ url: "https://zenodo.org/records/10076199/files/gshhs_land_water_mask_3km_i.tif"
+ known_hash: "sha256:96df83c57416217e191f95dde3d3c1ce0373a8fc220e929228873db246ca3569"
+
+ geo_color_background_with_low_clouds:
+ compositor: !!python/name:satpy.composites.BackgroundCompositor
+ standard_name: night_ir_with_background
+ prerequisites:
+ - geo_color_low_clouds
+ - _night_background_hires
+
+ geo_color_night:
+ compositor: !!python/name:satpy.composites.BackgroundCompositor
+ standard_name: night_ir_with_background
+ prerequisites:
+ - geo_color_high_clouds
+ - geo_color_background_with_low_clouds
diff --git a/satpy/etc/composites/fci.yaml b/satpy/etc/composites/fci.yaml
index 366b8bbc20..0f0e98f4e0 100644
--- a/satpy/etc/composites/fci.yaml
+++ b/satpy/etc/composites/fci.yaml
@@ -124,3 +124,59 @@ composites:
- name: ndvi_hybrid_green_raw
- name: vis_04
standard_name: true_color_reproduction_color_stretch
+
+ # GeoColor
+ geo_color:
+ compositor: !!python/name:satpy.composites.DayNightCompositor
+ description: >
+ GeoColor is a multi-layer blended RGB composite where the day-time part of the image is represented by true
+ color imagery and the nighttime part of the image by a three layer vertically blended stack composed of a
+ high-level cloud layer (single IR window channel), a low-level cloud layer (IR split window) and a static
+ surface terrain layer with city lights (NASA Black Marble).
+ references:
+ Research Article: https://journals.ametsoc.org/view/journals/atot/37/3/JTECH-D-19-0134.1.xml
+ lim_low: 78
+ lim_high: 88
+ standard_name: geo_color_day_night_blend
+ prerequisites:
+ - true_color
+ - geo_color_night
+
+ # GeoColor Night-time
+ geo_color_high_clouds:
+ standard_name: geo_color_high_clouds
+ compositor: !!python/name:satpy.composites.HighCloudCompositor
+ prerequisites:
+ - name: ir_105
+
+ geo_color_low_clouds:
+ standard_name: geo_color_low_clouds
+ compositor: !!python/name:satpy.composites.LowCloudCompositor
+ values_water: 0
+ values_land: 100
+ range_water: [1.35, 5.0]
+ range_land: [4.35, 6.75]
+ prerequisites:
+ - compositor: !!python/name:satpy.composites.DifferenceCompositor
+ prerequisites:
+ - name: ir_105
+ - name: ir_38
+ - name: ir_105
+ - compositor: !!python/name:satpy.composites.StaticImageCompositor
+ standard_name: land_water_mask
+ url: "https://zenodo.org/records/10076199/files/gshhs_land_water_mask_3km_i.tif"
+ known_hash: "sha256:96df83c57416217e191f95dde3d3c1ce0373a8fc220e929228873db246ca3569"
+
+ geo_color_background_with_low_clouds:
+ compositor: !!python/name:satpy.composites.BackgroundCompositor
+ standard_name: night_ir_with_background
+ prerequisites:
+ - geo_color_low_clouds
+ - _night_background_hires
+
+ geo_color_night:
+ compositor: !!python/name:satpy.composites.BackgroundCompositor
+ standard_name: night_ir_with_background
+ prerequisites:
+ - geo_color_high_clouds
+ - geo_color_background_with_low_clouds
diff --git a/satpy/etc/enhancements/generic.yaml b/satpy/etc/enhancements/generic.yaml
index c9e116310b..25680d6db9 100644
--- a/satpy/etc/enhancements/generic.yaml
+++ b/satpy/etc/enhancements/generic.yaml
@@ -954,6 +954,41 @@ enhancements:
kwargs:
weight: 1.0
+ geo_color_high_clouds:
+ standard_name: geo_color_high_clouds
+ operations:
+ - name: inverse
+ method: !!python/name:satpy.enhancements.invert
+ args:
+ - [True, false]
+ - name: stretch
+ method: !!python/name:satpy.enhancements.stretch
+ kwargs:
+ stretch: crude
+
+ geo_color_low_clouds:
+ standard_name: geo_color_low_clouds
+ operations:
+ - name: stretch
+ method: !!python/name:satpy.enhancements.stretch
+ kwargs:
+ stretch: crude
+ - name: colorize
+ method: !!python/name:satpy.enhancements.colorize
+ kwargs:
+ palettes:
+ - {colors: [[140.25, 191.25, 249.9]]}
+
+ geo_color_day_night_blend:
+ standard_name: geo_color_day_night_blend
+ operations:
+ - name: stretch
+ method: !!python/name:satpy.enhancements.stretch
+ kwargs:
+ stretch: crude
+ min_stretch: [ 0,0,0 ]
+ max_stretch: [ 1,1,1 ]
+
colorized_ir_clouds:
standard_name: colorized_ir_clouds
operations:
diff --git a/satpy/etc/readers/fci_l2_nc.yaml b/satpy/etc/readers/fci_l2_nc.yaml
index 20d9935682..1ad5d576a0 100644
--- a/satpy/etc/readers/fci_l2_nc.yaml
+++ b/satpy/etc/readers/fci_l2_nc.yaml
@@ -64,6 +64,16 @@ file_types:
file_patterns:
- '{pflag}_{location_indicator},{data_designator},MTI{spacecraft_id:1d}+FCI-2-ASR-{subtype}-{coverage}-{subsetting}-{component1}-{component2}-{component3}-{purpose}-{format}_{oflag}_{originator}_{processing_time:%Y%m%d%H%M%S}_{facility_or_tool}_{environment}_{start_time:%Y%m%d%H%M%S}_{end_time:%Y%m%d%H%M%S}_{processing_mode}_{special_compression}_{disposition_mode}_{repeat_cycle_in_day:>04d}_{count_in_repeat_cycle:>04d}.nc'
+ nc_fci_amvi:
+ file_reader: !!python/name:satpy.readers.fci_l2_nc.FciL2NCAMVFileHandler
+ file_patterns:
+ - '{pflag}_{location_indicator},{data_designator},MTI{spacecraft_id:1d}+FCI-2-AMVI-{channel}-{coverage}-{subsetting}-{component1}-{component2}-{component3}-{purpose}-{format}_{oflag}_{originator}_{processing_time:%Y%m%d%H%M%S}_{facility_or_tool}_{environment}_{start_time:%Y%m%d%H%M%S}_{end_time:%Y%m%d%H%M%S}_{processing_mode}_{special_compression}_{disposition_mode}_{repeat_cycle_in_day:>04d}_{count_in_repeat_cycle:>04d}.nc'
+
+ nc_fci_amv:
+ file_reader: !!python/name:satpy.readers.fci_l2_nc.FciL2NCAMVFileHandler
+ file_patterns:
+ - '{pflag}_{location_indicator},{data_designator},MTI{spacecraft_id:1d}+FCI-2-AMV-{channel}-{coverage}-{subsetting}-{component1}-{component2}-{component3}-{purpose}-{format}_{oflag}_{originator}_{processing_time:%Y%m%d%H%M%S}_{facility_or_tool}_{environment}_{start_time:%Y%m%d%H%M%S}_{end_time:%Y%m%d%H%M%S}_{processing_mode}_{special_compression}_{disposition_mode}_{repeat_cycle_in_day:>04d}_{count_in_repeat_cycle:>04d}.nc'
+
datasets:
# CLM
@@ -2734,3 +2744,260 @@ datasets:
file_type: nc_fci_asr
file_key: product_timeliness
long_name: product_timeliness_index
+
+# AMV Intermediate Product
+ intm_latitude:
+ name: intm_latitude
+ file_type: nc_fci_amvi
+ file_key: intm_latitude
+ standard_name: latitude
+
+ intm_longitude:
+ name: intm_longitude
+ file_type: nc_fci_amvi
+ file_key: intm_longitude
+ standard_name: longitude
+
+ intm_speed:
+ name: intm_speed
+ file_type: nc_fci_amvi
+ file_key: intm_speed
+ standard_name: wind_speed
+ coordinates:
+ - intm_longitude
+ - intm_latitude
+
+ intm_u_component:
+ name: intm_u_component
+ file_type: nc_fci_amvi
+ file_key: intm_u_component
+ standard_name: wind_speed_horizontal_component
+ coordinates:
+ - intm_longitude
+ - intm_latitude
+
+ intm_v_component:
+ name: intm_v_component
+ file_type: nc_fci_amvi
+ file_key: intm_v_component
+ standard_name: wind_speed_vertical_component
+ coordinates:
+ - intm_longitude
+ - intm_latitude
+
+ intm_direction:
+ name: intm_direction
+ file_type: nc_fci_amvi
+ file_key: intm_direction
+ standard_name: wind_to_direction
+ coordinates:
+ - intm_longitude
+ - intm_latitude
+
+ intm_pressure:
+ name: intm_pressure
+ file_type: nc_fci_amvi
+ file_key: intm_pressure
+ standard_name: wind_pressure
+ coordinates:
+ - intm_longitude
+ - intm_latitude
+
+ intm_temperature:
+ name: intm_temperature
+ file_type: nc_fci_amvi
+ file_key: intm_temperature
+ standard_name: wind_temperature
+ coordinates:
+ - intm_longitude
+ - intm_latitude
+
+ intm_target_type:
+ name: intm_target_type
+ file_type: nc_fci_amvi
+ file_key: target_type
+ standard_name: wind_target_type
+ coordinates:
+ - intm_longitude
+ - intm_latitude
+
+ intm_wind_method:
+ name: intm_wind_method
+ file_type: nc_fci_amvi
+ file_key: wind_method
+ standard_name: wind_wind_method
+ coordinates:
+ - intm_longitude
+ - intm_latitude
+
+# AMV Final Product
+ channel_id:
+ name: channel_id
+ file_type: nc_fci_amv
+ file_key: channel_id
+ standard_name: channel_id
+
+ amv_latitude:
+ name: latitude
+ file_type: nc_fci_amv
+ file_key: latitude
+ standard_name: latitude
+
+ amv_longitude:
+ name: longitude
+ file_type: nc_fci_amv
+ file_key: longitude
+ standard_name: longitude
+
+ speed:
+ name: speed
+ file_type: nc_fci_amv
+ file_key: speed
+ standard_name: wind_speed
+ coordinates:
+ - longitude
+ - latitude
+
+ speed_u_component:
+ name: speed_u_component
+ file_type: nc_fci_amv
+ file_key: speed_u_component
+ standard_name: wind_speed_horizontal_component
+ coordinates:
+ - longitude
+ - latitude
+
+ speed_v_component:
+ name: speed_v_component
+ file_type: nc_fci_amv
+ file_key: speed_v_component
+ standard_name: wind_speed_vertical_component
+ coordinates:
+ - longitude
+ - latitude
+
+ direction:
+ name: direction
+ file_type: nc_fci_amv
+ file_key: direction
+ standard_name: wind_to_direction
+ coordinates:
+ - longitude
+ - latitude
+
+ pressure:
+ name: pressure
+ file_type: nc_fci_amv
+ file_key: pressure
+ standard_name: wind_pressure
+ coordinates:
+ - longitude
+ - latitude
+
+ temperature:
+ name: temperature
+ file_type: nc_fci_amv
+ file_key: temperature
+ standard_name: wind_temperature
+ coordinates:
+ - longitude
+ - latitude
+
+ target_type:
+ name: target_type
+ file_type: nc_fci_amv
+ file_key: target_type
+ standard_name: wind_target_type
+ coordinates:
+ - longitude
+ - latitude
+
+ wind_method:
+ name: wind_method
+ file_type: nc_fci_amv
+ file_key: wind_method
+ standard_name: wind_wind_method
+ coordinates:
+ - longitude
+ - latitude
+
+ fcst_u:
+ name: fcst_u
+ file_type: nc_fci_amv
+ file_key: forecast_u_component
+ standard_name: wind_forecast_u_component
+ coordinates:
+ - longitude
+ - latitude
+
+ fcst_v:
+ name: fcst_v
+ file_type: nc_fci_amv
+ file_key: forecast_v_component
+ standard_name: wind_forecast_v_component
+ coordinates:
+ - longitude
+ - latitude
+
+ best_fit_pres:
+ name: best_fit_pres
+ file_type: nc_fci_amv
+ file_key: best_fit_pressure
+ standard_name: wind_best_fit_pressure
+ coordinates:
+ - longitude
+ - latitude
+
+ best_fit_u:
+ name: best_fit_u
+ file_type: nc_fci_amv
+ file_key: best_fit_u_component
+ standard_name: wind_best_fit_u_component
+ coordinates:
+ - longitude
+ - latitude
+
+ best_fit_v:
+ name: best_fit_v
+ file_type: nc_fci_amv
+ file_key: best_fit_v_component
+ standard_name: wind_best_fit_v_component
+ coordinates:
+ - longitude
+ - latitude
+
+ qi:
+ name: qi
+ file_type: nc_fci_amv
+ file_key: overall_reliability
+ standard_name: wind_overall_reliability
+ coordinates:
+ - longitude
+ - latitude
+
+ qi_excl_fcst:
+ name: qi_excl_fcst
+ file_type: nc_fci_amv
+ file_key: overall_reliability_exc_forecast
+ standard_name: wind_overall_reliability_exc_forecast
+ coordinates:
+ - longitude
+ - latitude
+
+ product_quality:
+ name: product_quality
+ file_type: nc_fci_amv
+ file_key: product_quality
+ long_name: product_quality_index
+
+ product_completeness:
+ name: product_completeness
+ file_type: nc_fci_amv
+ file_key: product_completeness
+ long_name: product_completeness_index
+
+ product_timeliness:
+ name: product_timeliness
+ file_type: nc_fci_amv
+ file_key: product_timeliness
+ long_name: product_timeliness_index
diff --git a/satpy/etc/readers/mirs.yaml b/satpy/etc/readers/mirs.yaml
index 4e70fbed2c..5ca15f66b0 100644
--- a/satpy/etc/readers/mirs.yaml
+++ b/satpy/etc/readers/mirs.yaml
@@ -8,13 +8,13 @@ reader:
reader: !!python/name:satpy.readers.yaml_reader.FileYAMLReader
sensors: [amsu, amsu-mhs, atms, ssmis, gmi]
data_files:
- - url: "https://zenodo.org/record/4472664/files/limbcoef_atmsland_noaa20.txt"
- known_hash: "08a3b7c1594a963610dd864b7ecd12f0ab486412d35185c2371d924dd92c5779"
- - url: "https://zenodo.org/record/4472664/files/limbcoef_atmsland_snpp.txt"
+ - url: "https://zenodo.org/record/10357932/files/limbcoef_atmsland_noaa20.txt"
+ known_hash: "08deca15afe8638effac9e6ccb442c2c386f5444926129d30a250d5840264c1d"
+ - url: "https://zenodo.org/record/10357932/files/limbcoef_atmsland_snpp.txt"
known_hash: "4b01543699792306711ef1699244e96186487e8a869e4ae42bf1f0e4d00fd063"
- - url: "https://zenodo.org/record/4472664/files/limbcoef_atmssea_noaa20.txt"
- known_hash: "6853d0536b11c31dc130ab12c61fa322a76d3823a4b8ff9a18a0ecedbf269a88"
- - url: "https://zenodo.org/record/4472664/files/limbcoef_atmssea_snpp.txt"
+ - url: "https://zenodo.org/record/10357932/files/limbcoef_atmssea_noaa20.txt"
+ known_hash: "07cd7874ff3f069cc3d473bdd0d1d19880ef01ac8d75cb0212a3687c059557f4"
+ - url: "https://zenodo.org/record/10357932/files/limbcoef_atmssea_snpp.txt"
known_hash: "d0f806051b80320e046bdae6a9b68616152bbf8c2dbf3667b9834459259c0d72"
file_types:
diff --git a/satpy/readers/ahi_hsd.py b/satpy/readers/ahi_hsd.py
index 8e14d049b9..313e5ccab5 100644
--- a/satpy/readers/ahi_hsd.py
+++ b/satpy/readers/ahi_hsd.py
@@ -419,12 +419,12 @@ def end_time(self):
@property
def observation_start_time(self):
"""Get the observation start time."""
- return datetime(1858, 11, 17) + timedelta(days=float(self.basic_info["observation_start_time"]))
+ return datetime(1858, 11, 17) + timedelta(days=float(self.basic_info["observation_start_time"].item()))
@property
def observation_end_time(self):
"""Get the observation end time."""
- return datetime(1858, 11, 17) + timedelta(days=float(self.basic_info["observation_end_time"]))
+ return datetime(1858, 11, 17) + timedelta(days=float(self.basic_info["observation_end_time"].item()))
@property
def nominal_start_time(self):
@@ -498,8 +498,8 @@ def _get_area_def(self):
pdict["h"] = float(self.proj_info["distance_from_earth_center"] * 1000 - pdict["a"])
pdict["b"] = float(self.proj_info["earth_polar_radius"] * 1000)
pdict["ssp_lon"] = float(self.proj_info["sub_lon"])
- pdict["nlines"] = int(self.data_info["number_of_lines"])
- pdict["ncols"] = int(self.data_info["number_of_columns"])
+ pdict["nlines"] = int(self.data_info["number_of_lines"].item())
+ pdict["ncols"] = int(self.data_info["number_of_columns"].item())
pdict["scandir"] = "N2S"
pdict["loff"] = pdict["loff"] + (self.segment_number * pdict["nlines"])
@@ -528,19 +528,19 @@ def _read_header(self, fp_):
fpos = 0
header["block1"] = np.fromfile(
fp_, dtype=_BASIC_INFO_TYPE, count=1)
- fpos = fpos + int(header["block1"]["blocklength"])
+ fpos = fpos + int(header["block1"]["blocklength"].item())
self._check_fpos(fp_, fpos, 0, "block1")
fp_.seek(fpos, 0)
header["block2"] = np.fromfile(fp_, dtype=_DATA_INFO_TYPE, count=1)
- fpos = fpos + int(header["block2"]["blocklength"])
+ fpos = fpos + int(header["block2"]["blocklength"].item())
self._check_fpos(fp_, fpos, 0, "block2")
fp_.seek(fpos, 0)
header["block3"] = np.fromfile(fp_, dtype=_PROJ_INFO_TYPE, count=1)
- fpos = fpos + int(header["block3"]["blocklength"])
+ fpos = fpos + int(header["block3"]["blocklength"].item())
self._check_fpos(fp_, fpos, 0, "block3")
fp_.seek(fpos, 0)
header["block4"] = np.fromfile(fp_, dtype=_NAV_INFO_TYPE, count=1)
- fpos = fpos + int(header["block4"]["blocklength"])
+ fpos = fpos + int(header["block4"]["blocklength"].item())
self._check_fpos(fp_, fpos, 0, "block4")
fp_.seek(fpos, 0)
header["block5"] = np.fromfile(fp_, dtype=_CAL_INFO_TYPE, count=1)
@@ -553,7 +553,7 @@ def _read_header(self, fp_):
cal = np.fromfile(fp_, dtype=_VISCAL_INFO_TYPE, count=1)
else:
cal = np.fromfile(fp_, dtype=_IRCAL_INFO_TYPE, count=1)
- fpos = fpos + int(header["block5"]["blocklength"])
+ fpos = fpos + int(header["block5"]["blocklength"].item())
self._check_fpos(fp_, fpos, 0, "block5")
fp_.seek(fpos, 0)
@@ -561,12 +561,12 @@ def _read_header(self, fp_):
header["block6"] = np.fromfile(
fp_, dtype=_INTER_CALIBRATION_INFO_TYPE, count=1)
- fpos = fpos + int(header["block6"]["blocklength"])
+ fpos = fpos + int(header["block6"]["blocklength"].item())
self._check_fpos(fp_, fpos, 0, "block6")
fp_.seek(fpos, 0)
header["block7"] = np.fromfile(
fp_, dtype=_SEGMENT_INFO_TYPE, count=1)
- fpos = fpos + int(header["block7"]["blocklength"])
+ fpos = fpos + int(header["block7"]["blocklength"].item())
self._check_fpos(fp_, fpos, 0, "block7")
fp_.seek(fpos, 0)
header["block8"] = np.fromfile(
@@ -576,7 +576,7 @@ def _read_header(self, fp_):
corrections = []
for _i in range(ncorrs):
corrections.append(np.fromfile(fp_, dtype=_NAVIGATION_CORRECTION_SUBINFO_TYPE, count=1))
- fpos = fpos + int(header["block8"]["blocklength"])
+ fpos = fpos + int(header["block8"]["blocklength"].item())
self._check_fpos(fp_, fpos, 40, "block8")
fp_.seek(fpos, 0)
header["navigation_corrections"] = corrections
@@ -591,7 +591,7 @@ def _read_header(self, fp_):
dtype=_OBSERVATION_LINE_TIME_INFO_TYPE,
count=1))
header["observation_time_information"] = lines_and_times
- fpos = fpos + int(header["block9"]["blocklength"])
+ fpos = fpos + int(header["block9"]["blocklength"].item())
self._check_fpos(fp_, fpos, 40, "block9")
fp_.seek(fpos, 0)
@@ -604,12 +604,12 @@ def _read_header(self, fp_):
for _i in range(num_err_info_data):
err_info_data.append(np.fromfile(fp_, dtype=_ERROR_LINE_INFO_TYPE, count=1))
header["error_information_data"] = err_info_data
- fpos = fpos + int(header["block10"]["blocklength"])
+ fpos = fpos + int(header["block10"]["blocklength"].item())
self._check_fpos(fp_, fpos, 40, "block10")
fp_.seek(fpos, 0)
header["block11"] = np.fromfile(fp_, dtype=_SPARE_TYPE, count=1)
- fpos = fpos + int(header["block11"]["blocklength"])
+ fpos = fpos + int(header["block11"]["blocklength"].item())
self._check_fpos(fp_, fpos, 0, "block11")
fp_.seek(fpos, 0)
@@ -617,8 +617,8 @@ def _read_header(self, fp_):
def _read_data(self, fp_, header, resolution):
"""Read data block."""
- nlines = int(header["block2"]["number_of_lines"][0])
- ncols = int(header["block2"]["number_of_columns"][0])
+ nlines = int(header["block2"]["number_of_lines"].item())
+ ncols = int(header["block2"]["number_of_columns"].item())
chunks = normalize_low_res_chunks(
("auto", "auto"),
(nlines, ncols),
diff --git a/satpy/readers/fci_l2_nc.py b/satpy/readers/fci_l2_nc.py
index c387326f89..78020cdcf9 100644
--- a/satpy/readers/fci_l2_nc.py
+++ b/satpy/readers/fci_l2_nc.py
@@ -22,6 +22,7 @@
import xarray as xr
from pyresample import geometry
+from satpy._compat import cached_property
from satpy.readers._geos_area import get_geos_area_naming, make_ext
from satpy.readers.eum_base import get_service_mode
from satpy.readers.file_handlers import BaseFileHandler
@@ -153,6 +154,7 @@ def __init__(self, filename, filename_info, filetype_info, with_area_definition=
self._projection = self.nc["mtg_geos_projection"]
self.multi_dims = {"maximum_number_of_layers": "layer", "number_of_vis_channels": "vis_channel_id"}
+
def get_area_def(self, key):
"""Return the area definition."""
try:
@@ -401,3 +403,59 @@ def _modify_area_extent(stand_area_extent):
area_extent = tuple([ll_x, ll_y, ur_x, ur_y])
return area_extent
+
+class FciL2NCAMVFileHandler(FciL2CommonFunctions, BaseFileHandler):
+ """Reader class for FCI L2 AMV products in NetCDF4 format."""
+ def __init__(self, filename, filename_info, filetype_info):
+ """Open the NetCDF file with xarray and prepare for dataset reading."""
+ super().__init__(filename, filename_info, filetype_info)
+
+ @cached_property
+ def nc(self):
+ """Read the file."""
+ return xr.open_dataset(
+ self.filename,
+ decode_cf=True,
+ mask_and_scale=True,
+ chunks={
+ "number_of_images": CHUNK_SIZE,
+ "number_of_winds": CHUNK_SIZE
+ }
+ )
+
+ def _get_global_attributes(self):
+ """Create a dictionary of global attributes to be added to all datasets.
+
+ Returns:
+ dict: A dictionary of global attributes.
+ filename: name of the product file
+ spacecraft_name: name of the spacecraft
+ sensor: name of sensor
+ platform_name: name of the platform
+
+ """
+ attributes = {
+ "filename": self.filename,
+ "spacecraft_name": self.spacecraft_name,
+ "sensor": self.sensor_name,
+ "platform_name": self.spacecraft_name,
+ "channel":self.filename_info["channel"]
+ }
+ return attributes
+
+ def get_dataset(self, dataset_id, dataset_info):
+ """Get dataset using the file_key in dataset_info."""
+ var_key = dataset_info["file_key"]
+ logger.debug("Reading in file to get dataset with key %s.", var_key)
+
+ try:
+ variable = self.nc[var_key]
+ except KeyError:
+ logger.warning("Could not find key %s in NetCDF file, no valid Dataset created", var_key)
+ return None
+
+ # Manage the attributes of the dataset
+ variable.attrs.update(dataset_info)
+ variable.attrs.update(self._get_global_attributes())
+
+ return variable
diff --git a/satpy/readers/mirs.py b/satpy/readers/mirs.py
index 1ee0912b0f..34edd02739 100644
--- a/satpy/readers/mirs.py
+++ b/satpy/readers/mirs.py
@@ -50,6 +50,10 @@
PLATFORMS = {"n18": "NOAA-18",
"n19": "NOAA-19",
"np": "NOAA-19",
+ "n20": "NOAA-20",
+ "n21": "NOAA-21",
+ "n22": "NOAA-22",
+ "n23": "NOAA-23",
"m2": "MetOp-A",
"m1": "MetOp-B",
"m3": "MetOp-C",
@@ -60,11 +64,14 @@
"f17": "DMSP-F17",
"f18": "DMSP-F18",
"gpm": "GPM",
- "n20": "NOAA-20",
}
SENSOR = {"n18": amsu,
"n19": amsu,
"n20": "atms",
+ "n21": "atms",
+ "n22": "atms",
+ "n23": "atms",
+ "n24": "atms",
"np": amsu,
"m1": amsu,
"m2": amsu,
diff --git a/satpy/readers/seviri_base.py b/satpy/readers/seviri_base.py
index 25e6ed1a8b..5b19e56833 100644
--- a/satpy/readers/seviri_base.py
+++ b/satpy/readers/seviri_base.py
@@ -475,8 +475,10 @@ def get_cds_time(days, msecs):
days = np.array([days], dtype="int64")
msecs = np.array([msecs], dtype="int64")
+ # use nanosecond precision to silence warning from XArray
+ nsecs = 1000000 * msecs.astype("timedelta64[ns]")
time = np.datetime64("1958-01-01").astype("datetime64[ms]") + \
- days.astype("timedelta64[D]") + msecs.astype("timedelta64[ms]")
+ days.astype("timedelta64[D]") + nsecs
time[time == np.datetime64("1958-01-01 00:00")] = np.datetime64("NaT")
if len(time) == 1:
diff --git a/satpy/readers/yaml_reader.py b/satpy/readers/yaml_reader.py
index 502aa7f004..e6357afa62 100644
--- a/satpy/readers/yaml_reader.py
+++ b/satpy/readers/yaml_reader.py
@@ -1162,7 +1162,13 @@ class GEOSegmentYAMLReader(GEOFlippableFileYAMLReader):
"""
def create_filehandlers(self, filenames, fh_kwargs=None):
- """Create file handler objects and determine expected segments for each."""
+ """Create file handler objects and determine expected segments for each.
+
+ Additionally, sort the filehandlers by segment number to avoid
+ issues with filenames where start_time or alphabetic sorting does not
+ produce the correct order.
+
+ """
created_fhs = super().create_filehandlers(
filenames, fh_kwargs=fh_kwargs)
@@ -1176,8 +1182,16 @@ def create_filehandlers(self, filenames, fh_kwargs=None):
# add segment key-values for FCI filehandlers
if "segment" not in fh.filename_info:
fh.filename_info["segment"] = fh.filename_info.get("count_in_repeat_cycle", 1)
+
+ self._sort_segment_filehandler_by_segment_number()
return created_fhs
+ def _sort_segment_filehandler_by_segment_number(self):
+ if hasattr(self, "file_handlers"):
+ for file_type in self.file_handlers.keys():
+ self.file_handlers[file_type] = sorted(self.file_handlers[file_type],
+ key=lambda x: x.filename_info.get("segment", 0))
+
def _load_dataset(self, dsid, ds_info, file_handlers, dim="y", pad_data=True):
"""Load only a piece of the dataset."""
if not pad_data:
@@ -1327,11 +1341,9 @@ def _find_missing_segments(file_handlers, ds_info, dsid):
failure = True
counter = 1
expected_segments = 1
- # get list of file handlers in segment order
- # (ex. first segment, second segment, etc)
- handlers = sorted(file_handlers, key=lambda x: x.filename_info.get("segment", 1))
+
projectable = None
- for fh in handlers:
+ for fh in file_handlers:
if fh.filetype_info["file_type"] in ds_info["file_type"]:
expected_segments = fh.filetype_info["expected_segments"]
diff --git a/satpy/resample.py b/satpy/resample.py
index ddab90be82..8b8f67dabf 100644
--- a/satpy/resample.py
+++ b/satpy/resample.py
@@ -42,7 +42,7 @@
"bucket_sum", "Sum Bucket Resampling", :class:`~satpy.resample.BucketSum`
"bucket_count", "Count Bucket Resampling", :class:`~satpy.resample.BucketCount`
"bucket_fraction", "Fraction Bucket Resampling", :class:`~satpy.resample.BucketFraction`
- "gradient_search", "Gradient Search Resampling", :class:`~pyresample.gradient.GradientSearchResampler`
+ "gradient_search", "Gradient Search Resampling", :meth:`~pyresample.gradient.create_gradient_search_resampler`
The resampling algorithm used can be specified with the ``resampler`` keyword
argument and defaults to ``nearest``:
@@ -148,13 +148,11 @@
import dask.array as da
import numpy as np
-import pyresample
import xarray as xr
import zarr
-from packaging import version
from pyresample.ewa import DaskEWAResampler, LegacyDaskEWAResampler
from pyresample.geometry import SwathDefinition
-from pyresample.gradient import GradientSearchResampler
+from pyresample.gradient import create_gradient_search_resampler
from pyresample.resampler import BaseResampler as PRBaseResampler
from satpy._config import config_search_paths, get_config_path
@@ -177,8 +175,6 @@
resamplers_cache: "WeakValueDictionary[tuple, object]" = WeakValueDictionary()
-PR_USE_SKIPNA = version.parse(pyresample.__version__) > version.parse("1.17.0")
-
def hash_dict(the_dict, the_hash=None):
"""Calculate a hash for a dictionary."""
@@ -773,33 +769,6 @@ def _get_replicated_chunk_sizes(d_arr, repeats):
return tuple(repeated_chunks)
-def _get_arg_to_pass_for_skipna_handling(**kwargs):
- """Determine if skipna can be passed to the compute functions for the average and sum bucket resampler."""
- # FIXME this can be removed once Pyresample 1.18.0 is a Satpy requirement
-
- if PR_USE_SKIPNA:
- if "mask_all_nan" in kwargs:
- warnings.warn(
- "Argument mask_all_nan is deprecated. Please use skipna for missing values handling. "
- "Continuing with default skipna=True, if not provided differently.",
- DeprecationWarning,
- stacklevel=3
- )
- kwargs.pop("mask_all_nan")
- else:
- if "mask_all_nan" in kwargs:
- warnings.warn(
- "Argument mask_all_nan is deprecated."
- "Please update Pyresample and use skipna for missing values handling.",
- DeprecationWarning,
- stacklevel=3
- )
- kwargs.setdefault("mask_all_nan", False)
- kwargs.pop("skipna")
-
- return kwargs
-
-
class BucketResamplerBase(PRBaseResampler):
"""Base class for bucket resampling which implements averaging."""
@@ -832,11 +801,6 @@ def resample(self, data, **kwargs): # noqa: D417
Returns (xarray.DataArray): Data resampled to the target area
"""
- if not PR_USE_SKIPNA and "skipna" in kwargs:
- raise ValueError("You are trying to set the skipna argument but you are using an old version of"
- " Pyresample that does not support it."
- "Please update Pyresample to 1.18.0 or higher to be able to use this argument.")
-
self.precompute(**kwargs)
attrs = data.attrs.copy()
data_arr = data.data
@@ -910,17 +874,16 @@ def compute(self, data, fill_value=np.nan, skipna=True, **kwargs): # noqa: D417
Returns:
dask.Array
"""
- kwargs = _get_arg_to_pass_for_skipna_handling(skipna=skipna, **kwargs)
-
results = []
if data.ndim == 3:
for i in range(data.shape[0]):
res = self.resampler.get_average(data[i, :, :],
fill_value=fill_value,
+ skipna=skipna,
**kwargs)
results.append(res)
else:
- res = self.resampler.get_average(data, fill_value=fill_value,
+ res = self.resampler.get_average(data, fill_value=fill_value, skipna=skipna,
**kwargs)
results.append(res)
@@ -948,16 +911,14 @@ class BucketSum(BucketResamplerBase):
def compute(self, data, skipna=True, **kwargs):
"""Call the resampling."""
- kwargs = _get_arg_to_pass_for_skipna_handling(skipna=skipna, **kwargs)
-
results = []
if data.ndim == 3:
for i in range(data.shape[0]):
- res = self.resampler.get_sum(data[i, :, :],
+ res = self.resampler.get_sum(data[i, :, :], skipna=skipna,
**kwargs)
results.append(res)
else:
- res = self.resampler.get_sum(data, **kwargs)
+ res = self.resampler.get_sum(data, skipna=skipna, **kwargs)
results.append(res)
return da.stack(results)
@@ -1009,7 +970,7 @@ def compute(self, data, fill_value=np.nan, categories=None, **kwargs):
"nearest": KDTreeResampler,
"bilinear": BilinearResampler,
"native": NativeResampler,
- "gradient_search": GradientSearchResampler,
+ "gradient_search": create_gradient_search_resampler,
"bucket_avg": BucketAvg,
"bucket_sum": BucketSum,
"bucket_count": BucketCount,
diff --git a/satpy/scene.py b/satpy/scene.py
index 660049e3b6..d1ba795ac8 100644
--- a/satpy/scene.py
+++ b/satpy/scene.py
@@ -28,7 +28,7 @@
from pyresample.geometry import AreaDefinition, BaseDefinition, SwathDefinition
from xarray import DataArray
-from satpy.composites import IncompatibleAreas
+from satpy.composites import IncompatibleAreas, enhance2dataset
from satpy.composites.config_loader import load_compositor_configs_for_sensors
from satpy.dataset import DataID, DataQuery, DatasetDict, combine_metadata, dataset_walker, replace_anc
from satpy.dependency_tree import DependencyTree
@@ -1066,6 +1066,80 @@ def to_geoviews(self, gvtype=None, datasets=None, kdims=None, vdims=None, dynami
return gview
+ def to_hvplot(self, datasets=None, *args, **kwargs):
+ """Convert satpy Scene to Hvplot. The method could not be used with composites of swath data.
+
+ Args:
+ datasets (list): Limit included products to these datasets.
+ args: Arguments coming from hvplot
+ kwargs: hvplot options dictionary.
+
+ Returns: hvplot object that contains within it the plots of datasets list.
+ As default it contains all Scene datasets plots and a plot title is shown.
+
+ Example usage::
+
+ scene_list = ['ash','IR_108']
+ scn = Scene()
+ scn.load(scene_list)
+ scn = scn.resample('eurol')
+ plot = scn.to_hvplot(datasets=scene_list)
+ plot.ash+plot.IR_108
+ """
+
+ def _get_crs(xarray_ds):
+ return xarray_ds.area.to_cartopy_crs()
+
+ def _get_timestamp(xarray_ds):
+ time = xarray_ds.attrs["start_time"]
+ return time.strftime("%Y %m %d -- %H:%M UTC")
+
+ def _get_units(xarray_ds, variable):
+ return xarray_ds[variable].attrs["units"]
+
+ def _plot_rgb(xarray_ds, variable, **defaults):
+ img = enhance2dataset(xarray_ds[variable])
+ return img.hvplot.rgb(bands="bands", title=title,
+ clabel="", **defaults)
+
+ def _plot_quadmesh(xarray_ds, variable, **defaults):
+ return xarray_ds[variable].hvplot.quadmesh(
+ clabel=f"[{_get_units(xarray_ds,variable)}]", title=title,
+ **defaults)
+
+ import hvplot.xarray as hvplot_xarray # noqa
+ from holoviews import Overlay
+
+ plot = Overlay()
+ xarray_ds = self.to_xarray_dataset(datasets)
+
+ if hasattr(xarray_ds, "area") and hasattr(xarray_ds.area, "to_cartopy_crs"):
+ ccrs = _get_crs(xarray_ds)
+ defaults={"x":"x","y":"y"}
+ else:
+ ccrs = None
+ defaults={"x":"longitude","y":"latitude"}
+
+ if datasets is None:
+ datasets = list(xarray_ds.keys())
+
+ defaults.update(data_aspect=1, project=True, geo=True,
+ crs=ccrs, projection=ccrs, rasterize=True, coastline="110m",
+ cmap="Plasma", responsive=True, dynamic=False, framewise=True,
+ colorbar=False, global_extent=False, xlabel="Longitude",
+ ylabel="Latitude")
+
+ defaults.update(kwargs)
+
+ for element in datasets:
+ title = f"{element} @ {_get_timestamp(xarray_ds)}"
+ if xarray_ds[element].shape[0] == 3:
+ plot[element] = _plot_rgb(xarray_ds, element, **defaults)
+ else:
+ plot[element] = _plot_quadmesh(xarray_ds, element, **defaults)
+
+ return plot
+
def to_xarray_dataset(self, datasets=None):
"""Merge all xr.DataArrays of a scene to a xr.DataSet.
diff --git a/satpy/tests/compositor_tests/test_spectral.py b/satpy/tests/compositor_tests/test_spectral.py
index e46cff4d0c..2f7d9fd7cb 100644
--- a/satpy/tests/compositor_tests/test_spectral.py
+++ b/satpy/tests/compositor_tests/test_spectral.py
@@ -21,7 +21,7 @@
import pytest
import xarray as xr
-from satpy.composites.spectral import GreenCorrector, HybridGreen, NDVIHybridGreen, SpectralBlender
+from satpy.composites.spectral import HybridGreen, NDVIHybridGreen, SpectralBlender
from satpy.tests.utils import CustomScheduler
@@ -67,68 +67,56 @@ def test_hybrid_green(self):
data = res.compute()
np.testing.assert_allclose(data, 0.23)
- def test_green_corrector(self):
- """Test the deprecated class for green corrections."""
- comp = GreenCorrector("blended_channel", fractions=(0.85, 0.15), prerequisites=(0.51, 0.85),
- standard_name="toa_bidirectional_reflectance")
- res = comp((self.c01, self.c03))
- assert isinstance(res, xr.DataArray)
- assert isinstance(res.data, da.Array)
- assert res.attrs["name"] == "blended_channel"
- assert res.attrs["standard_name"] == "toa_bidirectional_reflectance"
- data = res.compute()
- np.testing.assert_allclose(data, 0.23)
-
class TestNdviHybridGreenCompositor:
"""Test NDVI-weighted hybrid green correction of green band."""
def setup_method(self):
"""Initialize channels."""
+ coord_val = [1.0, 2.0]
self.c01 = xr.DataArray(
da.from_array(np.array([[0.25, 0.30], [0.20, 0.30]], dtype=np.float32), chunks=25),
- dims=("y", "x"), attrs={"name": "C02"})
+ dims=("y", "x"), coords=[coord_val, coord_val], attrs={"name": "C02"})
self.c02 = xr.DataArray(
da.from_array(np.array([[0.25, 0.30], [0.25, 0.35]], dtype=np.float32), chunks=25),
- dims=("y", "x"), attrs={"name": "C03"})
+ dims=("y", "x"), coords=[coord_val, coord_val], attrs={"name": "C03"})
self.c03 = xr.DataArray(
da.from_array(np.array([[0.35, 0.35], [0.28, 0.65]], dtype=np.float32), chunks=25),
- dims=("y", "x"), attrs={"name": "C04"})
+ dims=("y", "x"), coords=[coord_val, coord_val], attrs={"name": "C04"})
def test_ndvi_hybrid_green(self):
"""Test General functionality with linear scaling from ndvi to blend fraction."""
- with dask.config.set(scheduler=CustomScheduler(max_computes=1)):
+ with dask.config.set(scheduler=CustomScheduler(max_computes=0)):
comp = NDVIHybridGreen("ndvi_hybrid_green", limits=(0.15, 0.05), prerequisites=(0.51, 0.65, 0.85),
standard_name="toa_bidirectional_reflectance")
# Test General functionality with linear strength (=1.0)
res = comp((self.c01, self.c02, self.c03))
- assert isinstance(res, xr.DataArray)
- assert isinstance(res.data, da.Array)
- assert res.attrs["name"] == "ndvi_hybrid_green"
- assert res.attrs["standard_name"] == "toa_bidirectional_reflectance"
- data = res.values
+ assert isinstance(res, xr.DataArray)
+ assert isinstance(res.data, da.Array)
+ assert res.attrs["name"] == "ndvi_hybrid_green"
+ assert res.attrs["standard_name"] == "toa_bidirectional_reflectance"
+ data = res.values
np.testing.assert_array_almost_equal(data, np.array([[0.2633, 0.3071], [0.2115, 0.3420]]), decimal=4)
def test_ndvi_hybrid_green_dtype(self):
"""Test that the datatype is not altered by the compositor."""
- with dask.config.set(scheduler=CustomScheduler(max_computes=1)):
+ with dask.config.set(scheduler=CustomScheduler(max_computes=0)):
comp = NDVIHybridGreen("ndvi_hybrid_green", limits=(0.15, 0.05), prerequisites=(0.51, 0.65, 0.85),
standard_name="toa_bidirectional_reflectance")
- res = comp((self.c01, self.c02, self.c03)).compute()
+ res = comp((self.c01, self.c02, self.c03))
assert res.data.dtype == np.float32
def test_nonlinear_scaling(self):
"""Test non-linear scaling using `strength` term."""
- with dask.config.set(scheduler=CustomScheduler(max_computes=1)):
+ with dask.config.set(scheduler=CustomScheduler(max_computes=0)):
comp = NDVIHybridGreen("ndvi_hybrid_green", limits=(0.15, 0.05), strength=2.0,
prerequisites=(0.51, 0.65, 0.85),
standard_name="toa_bidirectional_reflectance")
-
res = comp((self.c01, self.c02, self.c03))
- res_np = res.data.compute()
- assert res.dtype == res_np.dtype
- assert res.dtype == np.float32
+ res_np = res.data.compute()
+ assert res.dtype == res_np.dtype
+ assert res.dtype == np.float32
np.testing.assert_array_almost_equal(res.data, np.array([[0.2646, 0.3075], [0.2120, 0.3471]]), decimal=4)
def test_invalid_strength(self):
@@ -136,3 +124,17 @@ def test_invalid_strength(self):
with pytest.raises(ValueError, match="Expected strength greater than 0.0, got 0.0."):
_ = NDVIHybridGreen("ndvi_hybrid_green", strength=0.0, prerequisites=(0.51, 0.65, 0.85),
standard_name="toa_bidirectional_reflectance")
+
+ def test_with_slightly_mismatching_coord_input(self):
+ """Test the case where an input (typically the red band) has a slightly different coordinate.
+
+ If match_data_arrays is called correctly, the coords will be aligned and the array will have the expected shape.
+
+ """
+ comp = NDVIHybridGreen("ndvi_hybrid_green", limits=(0.15, 0.05), prerequisites=(0.51, 0.65, 0.85),
+ standard_name="toa_bidirectional_reflectance")
+
+ c02_bad_shape = self.c02.copy()
+ c02_bad_shape.coords["y"] = [1.1, 2.]
+ res = comp((self.c01, c02_bad_shape, self.c03))
+ assert res.shape == (2, 2)
diff --git a/satpy/tests/reader_tests/test_ahi_hsd.py b/satpy/tests/reader_tests/test_ahi_hsd.py
index 9338440246..7bf1562e1c 100644
--- a/satpy/tests/reader_tests/test_ahi_hsd.py
+++ b/satpy/tests/reader_tests/test_ahi_hsd.py
@@ -48,8 +48,8 @@
"compression_flag_for_data": 0,
"hblock_number": 2,
"number_of_bits_per_pixel": 16,
- "number_of_columns": 11000,
- "number_of_lines": 1100,
+ "number_of_columns": np.array([11000]),
+ "number_of_lines": np.array([1100]),
"spare": "",
}
FAKE_PROJ_INFO: InfoDict = {
@@ -135,8 +135,8 @@ def test_region(self, fromfile, np2str):
"compression_flag_for_data": 0,
"hblock_number": 2,
"number_of_bits_per_pixel": 16,
- "number_of_columns": 1000,
- "number_of_lines": 1000,
+ "number_of_columns": np.array([1000]),
+ "number_of_lines": np.array([1000]),
"spare": ""}
area_def = fh.get_area_def(None)
@@ -183,8 +183,8 @@ def test_segment(self, fromfile, np2str):
"compression_flag_for_data": 0,
"hblock_number": 2,
"number_of_bits_per_pixel": 16,
- "number_of_columns": 11000,
- "number_of_lines": 1100,
+ "number_of_columns": np.array([11000]),
+ "number_of_lines": np.array([1100]),
"spare": ""}
area_def = fh.get_area_def(None)
diff --git a/satpy/tests/reader_tests/test_fci_l2_nc.py b/satpy/tests/reader_tests/test_fci_l2_nc.py
index 22611a8469..84681b0f02 100644
--- a/satpy/tests/reader_tests/test_fci_l2_nc.py
+++ b/satpy/tests/reader_tests/test_fci_l2_nc.py
@@ -29,7 +29,7 @@
from netCDF4 import Dataset
from pyresample import geometry
-from satpy.readers.fci_l2_nc import FciL2NCFileHandler, FciL2NCSegmentFileHandler
+from satpy.readers.fci_l2_nc import FciL2NCAMVFileHandler, FciL2NCFileHandler, FciL2NCSegmentFileHandler
from satpy.tests.utils import make_dataid
AREA_DEF = geometry.AreaDefinition(
@@ -507,3 +507,90 @@ def test_byte_extraction(self):
})
assert dataset.values == 0
+
+
+@pytest.fixture(scope="module")
+def amv_file(tmp_path_factory):
+ """Create an AMV file."""
+ test_file = tmp_path_factory.mktemp("data") / "fci_l2_amv.nc"
+
+ with Dataset(test_file, "w") as nc:
+ # Create dimensions
+ nc.createDimension("number_of_winds", 50000)
+
+ # add global attributes
+ nc.data_source = "test_data_source"
+ nc.platform = "test_platform"
+
+ # Add datasets
+ latitude = nc.createVariable("latitude", np.float32, dimensions=("number_of_winds",))
+ latitude[:] = np.arange(50000)
+
+ longitude = nc.createVariable("y", np.float32, dimensions=("number_of_winds",))
+ longitude[:] = np.arange(50000)
+
+ qi = nc.createVariable("product_quality", np.int8)
+ qi[:] = 99.
+
+ test_dataset = nc.createVariable("test_dataset", np.float32,
+ dimensions="number_of_winds")
+ test_dataset[:] = np.ones(50000)
+ test_dataset.test_attr = "attr"
+ test_dataset.units = "test_units"
+
+ mtg_geos_projection = nc.createVariable("mtg_geos_projection", int, dimensions=())
+ mtg_geos_projection.longitude_of_projection_origin = 0.0
+ mtg_geos_projection.semi_major_axis = 6378137.
+ mtg_geos_projection.inverse_flattening = 298.257223563
+ mtg_geos_projection.perspective_point_height = 35786400.
+ return test_file
+
+
+@pytest.fixture(scope="module")
+def amv_filehandler(amv_file):
+ """Create an AMV filehandler."""
+ return FciL2NCAMVFileHandler(filename=amv_file,
+ filename_info={"channel":"test_channel"},
+ filetype_info={}
+ )
+
+
+class TestFciL2NCAMVFileHandler:
+ """Test the FciL2NCAMVFileHandler reader."""
+
+ def test_all_basic(self, amv_filehandler, amv_file):
+ """Test all basic functionalities."""
+ assert amv_filehandler.spacecraft_name == "test_platform"
+ assert amv_filehandler.sensor_name == "test_data_source"
+ assert amv_filehandler.ssp_lon == 0.0
+
+ global_attributes = amv_filehandler._get_global_attributes()
+ expected_global_attributes = {
+ "filename": amv_file,
+ "spacecraft_name": "test_platform",
+ "sensor": "test_data_source",
+ "platform_name": "test_platform",
+ "channel": "test_channel"
+ }
+ assert global_attributes == expected_global_attributes
+
+ def test_dataset(self, amv_filehandler):
+ """Test the correct execution of the get_dataset function with a valid file_key."""
+ dataset = amv_filehandler.get_dataset(make_dataid(name="test_dataset", resolution=2000),
+ {"name": "test_dataset",
+ "file_key": "test_dataset",
+ "fill_value": -999,
+ "file_type": "test_file_type"})
+ np.testing.assert_allclose(dataset.values, np.ones(50000))
+ assert dataset.attrs["test_attr"] == "attr"
+ assert dataset.attrs["units"] == "test_units"
+ assert dataset.attrs["fill_value"] == -999
+
+ def test_dataset_with_invalid_filekey(self, amv_filehandler):
+ """Test the correct execution of the get_dataset function with an invalid file_key."""
+ invalid_dataset = amv_filehandler.get_dataset(make_dataid(name="test_invalid", resolution=2000),
+ {"name": "test_invalid",
+ "file_key": "test_invalid",
+ "fill_value": -999,
+ "file_type": "test_file_type"})
+ assert invalid_dataset is None
diff --git a/satpy/tests/reader_tests/test_seviri_base.py b/satpy/tests/reader_tests/test_seviri_base.py
index c2d190e084..86f684bb5e 100644
--- a/satpy/tests/reader_tests/test_seviri_base.py
+++ b/satpy/tests/reader_tests/test_seviri_base.py
@@ -74,23 +74,28 @@ def test_chebyshev(self):
exp = chebyshev4(coefs, time, domain)
np.testing.assert_allclose(res, exp)
- def test_get_cds_time(self):
- """Test the get_cds_time function."""
- # Scalar
+ def test_get_cds_time_scalar(self):
+ """Test the get_cds_time function for scalar inputs."""
assert get_cds_time(days=21246, msecs=12 * 3600 * 1000) == np.datetime64("2016-03-03 12:00")
- # Array
+ def test_get_cds_time_array(self):
+ """Test the get_cds_time function for array inputs."""
days = np.array([21246, 21247, 21248])
msecs = np.array([12*3600*1000, 13*3600*1000 + 1, 14*3600*1000 + 2])
expected = np.array([np.datetime64("2016-03-03 12:00:00.000"),
np.datetime64("2016-03-04 13:00:00.001"),
np.datetime64("2016-03-05 14:00:00.002")])
- np.testing.assert_equal(get_cds_time(days=days, msecs=msecs), expected)
+ res = get_cds_time(days=days, msecs=msecs)
+ np.testing.assert_equal(res, expected)
+ def test_get_cds_time_nanoseconds(self):
+ """Test the get_cds_time function for having nanosecond precision."""
days = 21246
- msecs = 12*3600*1000
+ msecs = 12 * 3600 * 1000
expected = np.datetime64("2016-03-03 12:00:00.000")
- np.testing.assert_equal(get_cds_time(days=days, msecs=msecs), expected)
+ res = get_cds_time(days=days, msecs=msecs)
+ np.testing.assert_equal(res, expected)
+ assert res.dtype == np.dtype("datetime64[ns]")
def test_pad_data_horizontally_bad_shape(self):
"""Test the error handling for the horizontal hrv padding."""
diff --git a/satpy/tests/scene_tests/test_conversions.py b/satpy/tests/scene_tests/test_conversions.py
index a886c3fa60..9b0dd9098e 100644
--- a/satpy/tests/scene_tests/test_conversions.py
+++ b/satpy/tests/scene_tests/test_conversions.py
@@ -81,6 +81,53 @@ def test_geoviews_basic_with_swath(self):
# we assume that if we got something back, geoviews can use it
assert gv_obj is not None
+ def test_hvplot_basic_with_area(self):
+ """Test converting a Scene to hvplot with a AreaDefinition."""
+ from pyresample.geometry import AreaDefinition
+ scn = Scene()
+ area = AreaDefinition("test", "test", "test",
+ {"proj": "geos", "lon_0": -95.5, "h": 35786023.0},
+ 2, 2, [-200, -200, 200, 200])
+ scn["ds1"] = xr.DataArray(da.zeros((2, 2), chunks=-1), dims=("y", "x"),
+ attrs={"start_time": datetime(2018, 1, 1),
+ "area": area, "units": "m"})
+ hv_obj = scn.to_hvplot()
+ # we assume that if we got something back, hvplot can use it
+ assert hv_obj is not None
+
+ def test_hvplot_rgb_with_area(self):
+ """Test converting a Scene to hvplot with a AreaDefinition."""
+ from pyresample.geometry import AreaDefinition
+ scn = Scene()
+ area = AreaDefinition("test", "test", "test",
+ {"proj": "geos", "lon_0": -95.5, "h": 35786023.0},
+ 2, 2, [-200, -200, 200, 200])
+ scn["ds1"] = xr.DataArray(da.zeros((2, 2), chunks=-1), dims=("y", "x"),
+ attrs={"start_time": datetime(2018, 1, 1),
+ "area": area, "units": "m"})
+ scn["ds2"] = xr.DataArray(da.zeros((2, 2), chunks=-1), dims=("y", "x"),
+ attrs={"start_time": datetime(2018, 1, 1),
+ "area": area, "units": "m"})
+ scn["ds3"] = xr.DataArray(da.zeros((2, 2), chunks=-1), dims=("y", "x"),
+ attrs={"start_time": datetime(2018, 1, 1),
+ "area": area, "units": "m"})
+ hv_obj = scn.to_hvplot()
+ # we assume that if we got something back, hvplot can use it
+ assert hv_obj is not None
+
+ def test_hvplot_basic_with_swath(self):
+ """Test converting a Scene to hvplot with a SwathDefinition."""
+ from pyresample.geometry import SwathDefinition
+ scn = Scene()
+ longitude = xr.DataArray(da.zeros((2, 2)))
+ latitude = xr.DataArray(da.zeros((2, 2)))
+ area = SwathDefinition(longitude, latitude)
+ scn["ds1"] = xr.DataArray(da.zeros((2, 2), chunks=-1), dims=("y", "x"),
+ attrs={"start_time": datetime(2018, 1, 1),
+ "area": area, "units": "m"})
+ hv_obj = scn.to_hvplot()
+ # we assume that if we got something back, hvplot can use it
+ assert hv_obj is not None
class TestToXarrayConversion:
"""Test Scene.to_xarray() conversion."""
diff --git a/satpy/tests/test_composites.py b/satpy/tests/test_composites.py
index 7fbe177bfb..b5d5a54b96 100644
--- a/satpy/tests/test_composites.py
+++ b/satpy/tests/test_composites.py
@@ -37,7 +37,7 @@
# - tmp_path
-class TestMatchDataArrays(unittest.TestCase):
+class TestMatchDataArrays:
"""Test the utility method 'match_data_arrays'."""
def _get_test_ds(self, shape=(50, 100), dims=("y", "x")):
@@ -132,6 +132,38 @@ def test_nondimensional_coords(self):
ret_datasets = comp.match_data_arrays([ds, ds])
assert "acq_time" not in ret_datasets[0].coords
+ def test_almost_equal_geo_coordinates(self):
+ """Test that coordinates that are almost-equal still match.
+
+ See https://github.com/pytroll/satpy/issues/2668 for discussion.
+
+ Various operations like cropping and resampling can cause
+ geo-coordinates (y, x) to be very slightly unequal due to floating
+ point precision. This test makes sure that even in those cases we
+ can still generate composites from DataArrays with these coordinates.
+
+ """
+ from satpy.composites import CompositeBase
+ from satpy.resample import add_crs_xy_coords
+
+ comp = CompositeBase("test_comp")
+ data_arr1 = self._get_test_ds(shape=(2, 2))
+ data_arr1 = add_crs_xy_coords(data_arr1, data_arr1.attrs["area"])
+ data_arr2 = self._get_test_ds(shape=(2, 2))
+ data_arr2 = data_arr2.assign_coords(
+ x=data_arr1.coords["x"] + 0.000001,
+ y=data_arr1.coords["y"],
+ crs=data_arr1.coords["crs"],
+ )
+ # data_arr2 = add_crs_xy_coords(data_arr2, data_arr2.attrs["area"])
+ # data_arr2.assign_coords(x=data_arr2.coords["x"].copy() + 1.1)
+ # default xarray alignment would fail and collapse one of our dims
+ assert 0 in (data_arr2 - data_arr1).shape
+ new_data_arr1, new_data_arr2 = comp.match_data_arrays([data_arr1, data_arr2])
+ assert 0 not in new_data_arr1.shape
+ assert 0 not in new_data_arr2.shape
+ assert 0 not in (new_data_arr2 - new_data_arr1).shape
+
class TestRatioSharpenedCompositors:
"""Test RatioSharpenedRGB and SelfSharpendRGB compositors."""
@@ -948,6 +980,120 @@ def test_call(self):
np.testing.assert_allclose(res, exp)
+class TestHighCloudCompositor:
+ """Test HighCloudCompositor."""
+
+ def setup_method(self):
+ """Create test data."""
+ from pyresample.geometry import create_area_def
+ area = create_area_def(area_id="test", projection={"proj": "latlong"},
+ center=(0, 45), width=3, height=3, resolution=35)
+ self.dtype = np.float32
+ self.data = xr.DataArray(
+ da.from_array(np.array([[200, 250, 300], [200, 250, 300], [200, 250, 300]], dtype=self.dtype)),
+ dims=("y", "x"), coords={"y": [0, 1, 2], "x": [0, 1, 2]},
+ attrs={"area": area}
+ )
+
+ def test_high_cloud_compositor(self):
+ """Test general default functionality of compositor."""
+ from satpy.composites import HighCloudCompositor
+ with dask.config.set(scheduler=CustomScheduler(max_computes=0)):
+ comp = HighCloudCompositor(name="test")
+ res = comp([self.data])
+ assert isinstance(res, xr.DataArray)
+ assert isinstance(res.data, da.Array)
+ expexted_alpha = np.array([[1.0, 0.7142857, 0.0], [1.0, 0.625, 0.0], [1.0, 0.5555555, 0.0]])
+ expected = np.stack([self.data, expexted_alpha])
+ np.testing.assert_almost_equal(res.values, expected)
+
+ def test_high_cloud_compositor_multiple_calls(self):
+ """Test that the modified init variables are reset properly when calling the compositor multiple times."""
+ from satpy.composites import HighCloudCompositor
+ comp = HighCloudCompositor(name="test")
+ res = comp([self.data])
+ res2 = comp([self.data])
+ np.testing.assert_equal(res.values, res2.values)
+
+ def test_high_cloud_compositor_dtype(self):
+ """Test that the datatype is not altered by the compositor."""
+ from satpy.composites import HighCloudCompositor
+ comp = HighCloudCompositor(name="test")
+ res = comp([self.data])
+ assert res.data.dtype == self.dtype
+
+ def test_high_cloud_compositor_validity_checks(self):
+ """Test that errors are raised for invalid input data and settings."""
+ from satpy.composites import HighCloudCompositor
+
+ with pytest.raises(ValueError, match="Expected 2 `transition_min_limits` values, got 1"):
+ _ = HighCloudCompositor("test", transition_min_limits=(210., ))
+
+ with pytest.raises(ValueError, match="Expected 2 `latitude_min_limits` values, got 3"):
+ _ = HighCloudCompositor("test", latitude_min_limits=(20., 40., 60.))
+
+ with pytest.raises(ValueError, match="Expected `transition_max` to be of type float, "
+ "is of type "):
+ _ = HighCloudCompositor("test", transition_max=(250., 300.))
+
+ comp = HighCloudCompositor("test")
+ with pytest.raises(ValueError, match="Expected 1 dataset, got 2"):
+ _ = comp([self.data, self.data])
+
+
+class TestLowCloudCompositor:
+ """Test LowCloudCompositor."""
+
+ def setup_method(self):
+ """Create test data."""
+ self.dtype = np.float32
+ self.btd = xr.DataArray(
+ da.from_array(np.array([[0.0, 1.0, 10.0], [0.0, 1.0, 10.0], [0.0, 1.0, 10.0]], dtype=self.dtype)),
+ dims=("y", "x"), coords={"y": [0, 1, 2], "x": [0, 1, 2]}
+ )
+ self.bt_win = xr.DataArray(
+ da.from_array(np.array([[250, 250, 250], [250, 250, 250], [150, 150, 150]], dtype=self.dtype)),
+ dims=("y", "x"), coords={"y": [0, 1, 2], "x": [0, 1, 2]}
+ )
+ self.lsm = xr.DataArray(
+ da.from_array(np.array([[0., 0., 0.], [1., 1., 1.], [0., 1., 0.]], dtype=self.dtype)),
+ dims=("y", "x"), coords={"y": [0, 1, 2], "x": [0, 1, 2]}
+ )
+
+ def test_low_cloud_compositor(self):
+ """Test general default functionality of compositor."""
+ from satpy.composites import LowCloudCompositor
+ with dask.config.set(scheduler=CustomScheduler(max_computes=0)):
+ comp = LowCloudCompositor(name="test")
+ res = comp([self.btd, self.bt_win, self.lsm])
+ assert isinstance(res, xr.DataArray)
+ assert isinstance(res.data, da.Array)
+ expexted_alpha = np.array([[0.0, 0.25, 1.0], [0.0, 0.25, 1.0], [0.0, 0.0, 0.0]])
+ expected = np.stack([self.btd, expexted_alpha])
+ np.testing.assert_equal(res.values, expected)
+
+ def test_low_cloud_compositor_dtype(self):
+ """Test that the datatype is not altered by the compositor."""
+ from satpy.composites import LowCloudCompositor
+ comp = LowCloudCompositor(name="test")
+ res = comp([self.btd, self.bt_win, self.lsm])
+ assert res.data.dtype == self.dtype
+
+ def test_low_cloud_compositor_validity_checks(self):
+ """Test that errors are raised for invalid input data and settings."""
+ from satpy.composites import LowCloudCompositor
+
+ with pytest.raises(ValueError, match="Expected 2 `range_land` values, got 1"):
+ _ = LowCloudCompositor("test", range_land=(2.0, ))
+
+ with pytest.raises(ValueError, match="Expected 2 `range_water` values, got 1"):
+ _ = LowCloudCompositor("test", range_water=(2.0,))
+
+ comp = LowCloudCompositor("test")
+ with pytest.raises(ValueError, match="Expected 3 datasets, got 2"):
+ _ = comp([self.btd, self.lsm])
+
+
class TestSingleBandCompositor(unittest.TestCase):
"""Test the single-band compositor."""
@@ -1867,3 +2013,37 @@ def _create_fake_composite_config(yaml_filename: str):
},
comp_file,
)
+
+
+class TestRealisticColors:
+ """Test the SEVIRI Realistic Colors compositor."""
+
+ def test_realistic_colors(self):
+ """Test the compositor."""
+ from satpy.composites import RealisticColors
+
+ vis06 = xr.DataArray(da.arange(0, 15, dtype=np.float32).reshape(3, 5), dims=("y", "x"),
+ attrs={"foo": "foo"})
+ vis08 = xr.DataArray(da.arange(15, 0, -1, dtype=np.float32).reshape(3, 5), dims=("y", "x"),
+ attrs={"bar": "bar"})
+ hrv = xr.DataArray(6 * da.ones((3, 5), dtype=np.float32), dims=("y", "x"),
+ attrs={"baz": "baz"})
+
+ expected_red = np.array([[0.0, 2.733333, 4.9333334, 6.6, 7.733333],
+ [8.333333, 8.400001, 7.9333334, 7.0, 6.0],
+ [5.0, 4.0, 3.0, 2.0, 1.0]], dtype=np.float32)
+ expected_green = np.array([
+ [15.0, 12.266666, 10.066668, 8.400001, 7.2666664],
+ [6.6666665, 6.6000004, 7.0666666, 8.0, 9.0],
+ [10.0, 11.0, 12.0, 13.0, 14.0]], dtype=np.float32)
+
+ with dask.config.set(scheduler=CustomScheduler(max_computes=1)):
+ comp = RealisticColors("Ni!")
+ res = comp((vis06, vis08, hrv))
+
+ arr = res.values
+
+ assert res.dtype == np.float32
+ np.testing.assert_allclose(arr[0, :, :], expected_red)
+ np.testing.assert_allclose(arr[1, :, :], expected_green)
+ np.testing.assert_allclose(arr[2, :, :], 3.0)
diff --git a/satpy/tests/test_resample.py b/satpy/tests/test_resample.py
index 7135661578..11a9644eb4 100644
--- a/satpy/tests/test_resample.py
+++ b/satpy/tests/test_resample.py
@@ -48,7 +48,6 @@ def get_test_data(input_shape=(100, 50), output_shape=(200, 100), output_proj=No
"""
import dask.array as da
from pyresample.geometry import AreaDefinition, SwathDefinition
- from pyresample.utils import proj4_str_to_dict
from xarray import DataArray
ds1 = DataArray(da.zeros(input_shape, chunks=85),
dims=input_dims,
@@ -62,16 +61,16 @@ def get_test_data(input_shape=(100, 50), output_shape=(200, 100), output_proj=No
input_proj_str = ("+proj=geos +lon_0=-95.0 +h=35786023.0 +a=6378137.0 "
"+b=6356752.31414 +sweep=x +units=m +no_defs")
+ crs = CRS(input_proj_str)
source = AreaDefinition(
"test_target",
"test_target",
"test_target",
- proj4_str_to_dict(input_proj_str),
+ crs,
input_shape[1], # width
input_shape[0], # height
(-1000., -1500., 1000., 1500.))
ds1.attrs["area"] = source
- crs = CRS.from_string(input_proj_str)
ds1 = ds1.assign_coords(crs=crs)
ds2 = ds1.copy()
@@ -95,7 +94,7 @@ def get_test_data(input_shape=(100, 50), output_shape=(200, 100), output_proj=No
"test_target",
"test_target",
"test_target",
- proj4_str_to_dict(output_proj_str),
+ CRS(output_proj_str),
output_shape[1], # width
output_shape[0], # height
(-1000., -1500., 1000., 1500.),
@@ -248,8 +247,12 @@ def test_expand_reduce_agg_rechunk(self):
into that chunk size.
"""
+ from satpy.utils import PerformanceWarning
+
d_arr = da.zeros((6, 20), chunks=3)
- new_data = NativeResampler._expand_reduce(d_arr, {0: 0.5, 1: 0.5})
+ text = "Array chunk size is not divisible by aggregation factor. Re-chunking to continue native resampling."
+ with pytest.warns(PerformanceWarning, match=text):
+ new_data = NativeResampler._expand_reduce(d_arr, {0: 0.5, 1: 0.5})
assert new_data.shape == (3, 10)
def test_expand_reduce_numpy(self):
@@ -582,17 +585,10 @@ def test_compute(self):
res = self._compute_mocked_bucket_avg(data, return_data=data[0, :, :], fill_value=2)
assert res.shape == (3, 5, 5)
- @mock.patch("satpy.resample.PR_USE_SKIPNA", True)
def test_compute_and_use_skipna_handling(self):
"""Test bucket resampler computation and use skipna handling."""
data = da.ones((5,))
- self._compute_mocked_bucket_avg(data, fill_value=2, mask_all_nan=True)
- self.bucket.resampler.get_average.assert_called_once_with(
- data,
- fill_value=2,
- skipna=True)
-
self._compute_mocked_bucket_avg(data, fill_value=2, skipna=False)
self.bucket.resampler.get_average.assert_called_once_with(
data,
@@ -605,35 +601,6 @@ def test_compute_and_use_skipna_handling(self):
fill_value=2,
skipna=True)
- @mock.patch("satpy.resample.PR_USE_SKIPNA", False)
- def test_compute_and_not_use_skipna_handling(self):
- """Test bucket resampler computation and not use skipna handling."""
- data = da.ones((5,))
-
- self._compute_mocked_bucket_avg(data, fill_value=2, mask_all_nan=True)
- self.bucket.resampler.get_average.assert_called_once_with(
- data,
- fill_value=2,
- mask_all_nan=True)
-
- self._compute_mocked_bucket_avg(data, fill_value=2, mask_all_nan=False)
- self.bucket.resampler.get_average.assert_called_once_with(
- data,
- fill_value=2,
- mask_all_nan=False)
-
- self._compute_mocked_bucket_avg(data, fill_value=2)
- self.bucket.resampler.get_average.assert_called_once_with(
- data,
- fill_value=2,
- mask_all_nan=False)
-
- self._compute_mocked_bucket_avg(data, fill_value=2, skipna=True)
- self.bucket.resampler.get_average.assert_called_once_with(
- data,
- fill_value=2,
- mask_all_nan=False)
-
@mock.patch("pyresample.bucket.BucketResampler")
def test_resample(self, pyresample_bucket):
"""Test bucket resamplers resample method."""
@@ -713,16 +680,10 @@ def test_compute(self):
res = self._compute_mocked_bucket_sum(data, return_data=data[0, :, :])
assert res.shape == (3, 5, 5)
- @mock.patch("satpy.resample.PR_USE_SKIPNA", True)
def test_compute_and_use_skipna_handling(self):
"""Test bucket resampler computation and use skipna handling."""
data = da.ones((5,))
- self._compute_mocked_bucket_sum(data, mask_all_nan=True)
- self.bucket.resampler.get_sum.assert_called_once_with(
- data,
- skipna=True)
-
self._compute_mocked_bucket_sum(data, skipna=False)
self.bucket.resampler.get_sum.assert_called_once_with(
data,
@@ -733,32 +694,6 @@ def test_compute_and_use_skipna_handling(self):
data,
skipna=True)
- @mock.patch("satpy.resample.PR_USE_SKIPNA", False)
- def test_compute_and_not_use_skipna_handling(self):
- """Test bucket resampler computation and not use skipna handling."""
- data = da.ones((5,))
-
- self._compute_mocked_bucket_sum(data, mask_all_nan=True)
- self.bucket.resampler.get_sum.assert_called_once_with(
- data,
- mask_all_nan=True)
-
- self._compute_mocked_bucket_sum(data, mask_all_nan=False)
- self.bucket.resampler.get_sum.assert_called_once_with(
- data,
- mask_all_nan=False)
-
- self._compute_mocked_bucket_sum(data)
- self.bucket.resampler.get_sum.assert_called_once_with(
- data,
- mask_all_nan=False)
-
- self._compute_mocked_bucket_sum(data, fill_value=2, skipna=True)
- self.bucket.resampler.get_sum.assert_called_once_with(
- data,
- fill_value=2,
- mask_all_nan=False)
-
class TestBucketCount(unittest.TestCase):
"""Test the count bucket resampler."""
diff --git a/satpy/tests/test_writers.py b/satpy/tests/test_writers.py
index e2bfd898ab..bc68d767c1 100644
--- a/satpy/tests/test_writers.py
+++ b/satpy/tests/test_writers.py
@@ -548,13 +548,20 @@ def setUp(self):
import tempfile
from datetime import datetime
+ from pyresample.geometry import AreaDefinition
+
from satpy.scene import Scene
+ adef = AreaDefinition(
+ "test", "test", "test", "EPSG:4326",
+ 100, 200, (-180., -90., 180., 90.),
+ )
ds1 = xr.DataArray(
da.zeros((100, 200), chunks=50),
dims=("y", "x"),
attrs={"name": "test",
- "start_time": datetime(2018, 1, 1, 0, 0, 0)}
+ "start_time": datetime(2018, 1, 1, 0, 0, 0),
+ "area": adef}
)
self.scn = Scene()
self.scn["test"] = ds1
@@ -650,8 +657,14 @@ def setup_method(self):
import tempfile
from datetime import datetime
+ from pyresample.geometry import AreaDefinition
+
from satpy.scene import Scene
+ adef = AreaDefinition(
+ "test", "test", "test", "EPSG:4326",
+ 100, 200, (-180., -90., 180., 90.),
+ )
ds1 = xr.DataArray(
da.zeros((100, 200), chunks=50),
dims=("y", "x"),
@@ -659,6 +672,7 @@ def setup_method(self):
"name": "test",
"start_time": datetime(2018, 1, 1, 0, 0, 0),
"sensor": "fake_sensor",
+ "area": adef,
}
)
ds2 = ds1.copy()
diff --git a/satpy/tests/test_yaml_reader.py b/satpy/tests/test_yaml_reader.py
index 41439a1ac6..0b0293e453 100644
--- a/satpy/tests/test_yaml_reader.py
+++ b/satpy/tests/test_yaml_reader.py
@@ -971,10 +971,11 @@ def _create_mocked_fh_and_areadef(aex, ashape, expected_segments, segment, chk_p
get_segment_position_info = MagicMock()
get_segment_position_info.return_value = chk_pos_info
- fh = MagicMock()
filetype_info = {"expected_segments": expected_segments,
"file_type": "filetype1"}
filename_info = {"segment": segment}
+
+ fh = _create_mocked_basic_fh()
fh.filetype_info = filetype_info
fh.filename_info = filename_info
fh.get_area_def = get_area_def
@@ -983,6 +984,12 @@ def _create_mocked_fh_and_areadef(aex, ashape, expected_segments, segment, chk_p
return fh, seg_area
+def _create_mocked_basic_fh():
+ fake_fh = MagicMock()
+ fake_fh.filename_info = {}
+ fake_fh.filetype_info = {}
+ return fake_fh
+
class TestGEOSegmentYAMLReader(unittest.TestCase):
"""Test GEOSegmentYAMLReader."""
@@ -993,9 +1000,7 @@ def test_get_expected_segments(self, cfh):
from satpy.readers.yaml_reader import GEOSegmentYAMLReader
reader = GEOSegmentYAMLReader()
- fake_fh = MagicMock()
- fake_fh.filename_info = {}
- fake_fh.filetype_info = {}
+ fake_fh = _create_mocked_basic_fh()
cfh.return_value = {"ft1": [fake_fh]}
# default (1)
@@ -1030,6 +1035,28 @@ def test_get_expected_segments(self, cfh):
es = created_fhs["ft1"][0].filename_info["segment"]
assert es == 5
+ @patch.object(yr.FileYAMLReader, "__init__", lambda x: None)
+ @patch.object(yr.FileYAMLReader, "create_filehandlers")
+ def test_segments_sorting(self, cfh):
+ """Test that segment filehandlers are sorted by segment number."""
+ from satpy.readers.yaml_reader import GEOSegmentYAMLReader
+ reader = GEOSegmentYAMLReader()
+
+ # create filehandlers with different segment numbers
+ fake_fh_1 = _create_mocked_basic_fh()
+ fake_fh_1.filename_info["segment"] = 1
+ fake_fh_2 = _create_mocked_basic_fh()
+ fake_fh_2.filename_info["segment"] = 2
+ fake_fh_3 = _create_mocked_basic_fh()
+ fake_fh_3.filename_info["segment"] = 3
+
+ # put the filehandlers in an unsorted order
+ reader.file_handlers = {"ft1": [fake_fh_1, fake_fh_3, fake_fh_2]}
+
+ # check that the created filehandlers are sorted by segment number
+ reader.create_filehandlers(["fake.nc"])
+ assert [fh.filename_info["segment"] for fh in reader.file_handlers["ft1"]] == [1, 2, 3]
+
@patch.object(yr.FileYAMLReader, "__init__", lambda x: None)
@patch("satpy.readers.yaml_reader.FileYAMLReader._load_dataset")
@patch("satpy.readers.yaml_reader.xr")
diff --git a/satpy/tests/writer_tests/test_awips_tiled.py b/satpy/tests/writer_tests/test_awips_tiled.py
index 63113a9f94..dbc1bc82d7 100644
--- a/satpy/tests/writer_tests/test_awips_tiled.py
+++ b/satpy/tests/writer_tests/test_awips_tiled.py
@@ -198,7 +198,7 @@ def test_basic_numbered_1_tile(self, extra_attrs, expected_filename, use_save_da
check_required_properties(unmasked_ds, output_ds)
scale_factor = output_ds["data"].encoding["scale_factor"]
np.testing.assert_allclose(input_data_arr.values, output_ds["data"].data,
- atol=scale_factor / 2)
+ atol=scale_factor * 0.75)
def test_units_length_warning(self, tmp_path):
"""Test long 'units' warnings are raised."""
diff --git a/satpy/tests/writer_tests/test_cf.py b/satpy/tests/writer_tests/test_cf.py
index 7fabb04f10..6d1d15527b 100644
--- a/satpy/tests/writer_tests/test_cf.py
+++ b/satpy/tests/writer_tests/test_cf.py
@@ -152,7 +152,8 @@ def test_save_dataset_a_digit_no_prefix_include_attr(self):
scn = Scene()
scn["1"] = xr.DataArray([1, 2, 3])
with TempFile() as filename:
- scn.save_datasets(filename=filename, writer="cf", include_orig_name=True, numeric_name_prefix="")
+ with pytest.warns(UserWarning, match=r"Invalid NetCDF dataset name"):
+ scn.save_datasets(filename=filename, writer="cf", include_orig_name=True, numeric_name_prefix="")
with xr.open_dataset(filename) as f:
np.testing.assert_array_equal(f["1"][:], [1, 2, 3])
assert "original_name" not in f["1"].attrs
@@ -208,8 +209,10 @@ def test_groups(self):
attrs={"name": "HRV", "start_time": tstart, "end_time": tend})
with TempFile() as filename:
- scn.save_datasets(filename=filename, writer="cf", groups={"visir": ["IR_108", "VIS006"], "hrv": ["HRV"]},
- pretty=True)
+ with pytest.warns(UserWarning, match=r"Cannot pretty-format"):
+ scn.save_datasets(filename=filename, writer="cf",
+ groups={"visir": ["IR_108", "VIS006"], "hrv": ["HRV"]},
+ pretty=True)
nc_root = xr.open_dataset(filename)
assert "history" in nc_root.attrs
@@ -240,11 +243,11 @@ def test_single_time_value(self):
test_array = np.array([[1, 2], [3, 4]])
scn["test-array"] = xr.DataArray(test_array,
dims=["x", "y"],
- coords={"time": np.datetime64("2018-05-30T10:05:00")},
+ coords={"time": np.datetime64("2018-05-30T10:05:00", "ns")},
attrs=dict(start_time=start_time,
end_time=end_time))
with TempFile() as filename:
- scn.save_datasets(filename=filename, writer="cf")
+ scn.save_datasets(filename=filename, writer="cf", encoding={"time": {"units": "seconds since 2018-01-01"}})
with xr.open_dataset(filename, decode_cf=True) as f:
np.testing.assert_array_equal(f["time"], scn["test-array"]["time"])
bounds_exp = np.array([[start_time, end_time]], dtype="datetime64[m]")
@@ -255,13 +258,14 @@ def test_time_coordinate_on_a_swath(self):
scn = Scene()
test_array = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
times = np.array(["2018-05-30T10:05:00", "2018-05-30T10:05:01",
- "2018-05-30T10:05:02", "2018-05-30T10:05:03"], dtype=np.datetime64)
+ "2018-05-30T10:05:02", "2018-05-30T10:05:03"], dtype="datetime64[ns]")
scn["test-array"] = xr.DataArray(test_array,
dims=["y", "x"],
coords={"time": ("y", times)},
attrs=dict(start_time=times[0], end_time=times[-1]))
with TempFile() as filename:
- scn.save_datasets(filename=filename, writer="cf", pretty=True)
+ scn.save_datasets(filename=filename, writer="cf", pretty=True,
+ encoding={"time": {"units": "seconds since 2018-01-01"}})
with xr.open_dataset(filename, decode_cf=True) as f:
np.testing.assert_array_equal(f["time"], scn["test-array"]["time"])
@@ -273,11 +277,15 @@ def test_bounds(self):
test_array = np.array([[1, 2], [3, 4]]).reshape(2, 2, 1)
scn["test-array"] = xr.DataArray(test_array,
dims=["x", "y", "time"],
- coords={"time": [np.datetime64("2018-05-30T10:05:00")]},
+ coords={"time": [np.datetime64("2018-05-30T10:05:00", "ns")]},
attrs=dict(start_time=start_time,
end_time=end_time))
with TempFile() as filename:
- scn.save_datasets(filename=filename, writer="cf")
+ with warnings.catch_warnings():
+ # The purpose is to use the default time encoding, silence the warning
+ warnings.filterwarnings("ignore", category=UserWarning,
+ message=r"Times can't be serialized faithfully to int64 with requested units")
+ scn.save_datasets(filename=filename, writer="cf")
# Check decoded time coordinates & bounds
with xr.open_dataset(filename, decode_cf=True) as f:
bounds_exp = np.array([[start_time, end_time]], dtype="datetime64[m]")
@@ -307,16 +315,17 @@ def test_bounds_minimum(self):
test_arrayB = np.array([[1, 2], [3, 5]]).reshape(2, 2, 1)
scn["test-arrayA"] = xr.DataArray(test_arrayA,
dims=["x", "y", "time"],
- coords={"time": [np.datetime64("2018-05-30T10:05:00")]},
+ coords={"time": [np.datetime64("2018-05-30T10:05:00", "ns")]},
attrs=dict(start_time=start_timeA,
end_time=end_timeA))
scn["test-arrayB"] = xr.DataArray(test_arrayB,
dims=["x", "y", "time"],
- coords={"time": [np.datetime64("2018-05-30T10:05:00")]},
+ coords={"time": [np.datetime64("2018-05-30T10:05:00", "ns")]},
attrs=dict(start_time=start_timeB,
end_time=end_timeB))
with TempFile() as filename:
- scn.save_datasets(filename=filename, writer="cf")
+ scn.save_datasets(filename=filename, writer="cf",
+ encoding={"time": {"units": "seconds since 2018-01-01"}})
with xr.open_dataset(filename, decode_cf=True) as f:
bounds_exp = np.array([[start_timeA, end_timeB]], dtype="datetime64[m]")
np.testing.assert_array_equal(f["time_bnds"], bounds_exp)
@@ -330,14 +339,15 @@ def test_bounds_missing_time_info(self):
test_arrayB = np.array([[1, 2], [3, 5]]).reshape(2, 2, 1)
scn["test-arrayA"] = xr.DataArray(test_arrayA,
dims=["x", "y", "time"],
- coords={"time": [np.datetime64("2018-05-30T10:05:00")]},
+ coords={"time": [np.datetime64("2018-05-30T10:05:00", "ns")]},
attrs=dict(start_time=start_timeA,
end_time=end_timeA))
scn["test-arrayB"] = xr.DataArray(test_arrayB,
dims=["x", "y", "time"],
- coords={"time": [np.datetime64("2018-05-30T10:05:00")]})
+ coords={"time": [np.datetime64("2018-05-30T10:05:00", "ns")]})
with TempFile() as filename:
- scn.save_datasets(filename=filename, writer="cf")
+ scn.save_datasets(filename=filename, writer="cf",
+ encoding={"time": {"units": "seconds since 2018-01-01"}})
with xr.open_dataset(filename, decode_cf=True) as f:
bounds_exp = np.array([[start_timeA, end_timeA]], dtype="datetime64[m]")
np.testing.assert_array_equal(f["time_bnds"], bounds_exp)
@@ -350,11 +360,12 @@ def test_unlimited_dims_kwarg(self):
test_array = np.array([[1, 2], [3, 4]])
scn["test-array"] = xr.DataArray(test_array,
dims=["x", "y"],
- coords={"time": np.datetime64("2018-05-30T10:05:00")},
+ coords={"time": np.datetime64("2018-05-30T10:05:00", "ns")},
attrs=dict(start_time=start_time,
end_time=end_time))
with TempFile() as filename:
- scn.save_datasets(filename=filename, writer="cf", unlimited_dims=["time"])
+ scn.save_datasets(filename=filename, writer="cf", unlimited_dims=["time"],
+ encoding={"time": {"units": "seconds since 2018-01-01"}})
with xr.open_dataset(filename) as f:
assert set(f.encoding["unlimited_dims"]) == {"time"}
@@ -570,5 +581,5 @@ def _should_use_compression_keyword():
versions = _get_backend_versions()
return (
versions["libnetcdf"] >= Version("4.9.0") and
- versions["xarray"] >= Version("2023.12")
+ versions["xarray"] >= Version("2024.1")
)
diff --git a/satpy/tests/writer_tests/test_geotiff.py b/satpy/tests/writer_tests/test_geotiff.py
index 74fcd43609..8925857637 100644
--- a/satpy/tests/writer_tests/test_geotiff.py
+++ b/satpy/tests/writer_tests/test_geotiff.py
@@ -32,12 +32,19 @@
def _get_test_datasets_2d():
"""Create a single 2D test dataset."""
+ from pyresample.geometry import AreaDefinition
+
+ adef = AreaDefinition(
+ "test", "test", "test", "EPSG:4326",
+ 100, 200, (-180., -90., 180., 90.),
+ )
ds1 = xr.DataArray(
da.zeros((100, 200), chunks=50),
dims=("y", "x"),
attrs={"name": "test",
"start_time": datetime.utcnow(),
- "units": "K"}
+ "units": "K",
+ "area": adef}
)
return [ds1]
@@ -54,12 +61,19 @@ def _get_test_datasets_2d_nonlinear_enhancement():
def _get_test_datasets_3d():
"""Create a single 3D test dataset."""
+ from pyresample.geometry import AreaDefinition
+
+ adef = AreaDefinition(
+ "test", "test", "test", "EPSG:4326",
+ 100, 200, (-180., -90., 180., 90.),
+ )
ds1 = xr.DataArray(
da.zeros((3, 100, 200), chunks=50),
dims=("bands", "y", "x"),
coords={"bands": ["R", "G", "B"]},
attrs={"name": "test",
- "start_time": datetime.utcnow()}
+ "start_time": datetime.utcnow(),
+ "area": adef}
)
return [ds1]
diff --git a/satpy/tests/writer_tests/test_mitiff.py b/satpy/tests/writer_tests/test_mitiff.py
index b4ff371dab..4e8878687a 100644
--- a/satpy/tests/writer_tests/test_mitiff.py
+++ b/satpy/tests/writer_tests/test_mitiff.py
@@ -52,14 +52,13 @@ def _get_test_datasets(self):
import dask.array as da
import xarray as xr
+ from pyproj import CRS
from pyresample.geometry import AreaDefinition
- from pyresample.utils import proj4_str_to_dict
area_def = AreaDefinition(
"test",
"test",
"test",
- proj4_str_to_dict("+proj=stere +datum=WGS84 +ellps=WGS84 "
- "+lon_0=0. +lat_0=90 +lat_ts=60 +units=km"),
+ CRS("+proj=stere +datum=WGS84 +ellps=WGS84 +lon_0=0. +lat_0=90 +lat_ts=60 +units=km"),
100,
200,
(-1000., -1500., 1000., 1500.),
@@ -119,14 +118,13 @@ def _get_test_datasets_sensor_set(self):
import dask.array as da
import xarray as xr
+ from pyproj import CRS
from pyresample.geometry import AreaDefinition
- from pyresample.utils import proj4_str_to_dict
area_def = AreaDefinition(
"test",
"test",
"test",
- proj4_str_to_dict("+proj=stere +datum=WGS84 +ellps=WGS84 "
- "+lon_0=0. +lat_0=90 +lat_ts=60 +units=km"),
+ CRS("+proj=stere +datum=WGS84 +ellps=WGS84 +lon_0=0. +lat_0=90 +lat_ts=60 +units=km"),
100,
200,
(-1000., -1500., 1000., 1500.),
@@ -186,14 +184,14 @@ def _get_test_dataset(self, bands=3):
import dask.array as da
import xarray as xr
+ from pyproj import CRS
from pyresample.geometry import AreaDefinition
- from pyresample.utils import proj4_str_to_dict
+
area_def = AreaDefinition(
"test",
"test",
"test",
- proj4_str_to_dict("+proj=stere +datum=WGS84 +ellps=WGS84 "
- "+lon_0=0. +lat_0=90 +lat_ts=60 +units=km"),
+ CRS("+proj=stere +datum=WGS84 +ellps=WGS84 +lon_0=0. +lat_0=90 +lat_ts=60 +units=km"),
100,
200,
(-1000., -1500., 1000., 1500.),
@@ -217,14 +215,14 @@ def _get_test_one_dataset(self):
import dask.array as da
import xarray as xr
+ from pyproj import CRS
from pyresample.geometry import AreaDefinition
- from pyresample.utils import proj4_str_to_dict
+
area_def = AreaDefinition(
"test",
"test",
"test",
- proj4_str_to_dict("+proj=geos +datum=WGS84 +ellps=WGS84 "
- "+lon_0=0. h=36000. +units=km"),
+ CRS("+proj=geos +datum=WGS84 +ellps=WGS84 +lon_0=0. h=36000. +units=km"),
100,
200,
(-1000., -1500., 1000., 1500.),
@@ -248,14 +246,14 @@ def _get_test_one_dataset_sensor_set(self):
import dask.array as da
import xarray as xr
+ from pyproj import CRS
from pyresample.geometry import AreaDefinition
- from pyresample.utils import proj4_str_to_dict
+
area_def = AreaDefinition(
"test",
"test",
"test",
- proj4_str_to_dict("+proj=geos +datum=WGS84 +ellps=WGS84 "
- "+lon_0=0. h=36000. +units=km"),
+ CRS("+proj=geos +datum=WGS84 +ellps=WGS84 +lon_0=0. h=36000. +units=km"),
100,
200,
(-1000., -1500., 1000., 1500.),
@@ -278,14 +276,14 @@ def _get_test_dataset_with_bad_values(self, bands=3):
from datetime import datetime
import xarray as xr
+ from pyproj import CRS
from pyresample.geometry import AreaDefinition
- from pyresample.utils import proj4_str_to_dict
+
area_def = AreaDefinition(
"test",
"test",
"test",
- proj4_str_to_dict("+proj=stere +datum=WGS84 +ellps=WGS84 "
- "+lon_0=0. +lat_0=90 +lat_ts=60 +units=km"),
+ CRS("+proj=stere +datum=WGS84 +ellps=WGS84 +lon_0=0. +lat_0=90 +lat_ts=60 +units=km"),
100,
200,
(-1000., -1500., 1000., 1500.),
@@ -313,8 +311,8 @@ def _get_test_dataset_calibration(self, bands=6):
import dask.array as da
import xarray as xr
+ from pyproj import CRS
from pyresample.geometry import AreaDefinition
- from pyresample.utils import proj4_str_to_dict
from satpy.scene import Scene
from satpy.tests.utils import make_dsq
@@ -322,8 +320,7 @@ def _get_test_dataset_calibration(self, bands=6):
"test",
"test",
"test",
- proj4_str_to_dict("+proj=stere +datum=WGS84 +ellps=WGS84 "
- "+lon_0=0. +lat_0=90 +lat_ts=60 +units=km"),
+ CRS("+proj=stere +datum=WGS84 +ellps=WGS84 +lon_0=0. +lat_0=90 +lat_ts=60 +units=km"),
100,
200,
(-1000., -1500., 1000., 1500.),
@@ -418,8 +415,8 @@ def _get_test_dataset_calibration_one_dataset(self, bands=1):
import dask.array as da
import xarray as xr
+ from pyproj import CRS
from pyresample.geometry import AreaDefinition
- from pyresample.utils import proj4_str_to_dict
from satpy.scene import Scene
from satpy.tests.utils import make_dsq
@@ -427,8 +424,7 @@ def _get_test_dataset_calibration_one_dataset(self, bands=1):
"test",
"test",
"test",
- proj4_str_to_dict("+proj=stere +datum=WGS84 +ellps=WGS84 "
- "+lon_0=0. +lat_0=90 +lat_ts=60 +units=km"),
+ CRS("+proj=stere +datum=WGS84 +ellps=WGS84 +lon_0=0. +lat_0=90 +lat_ts=60 +units=km"),
100,
200,
(-1000., -1500., 1000., 1500.),
@@ -473,16 +469,15 @@ def _get_test_dataset_three_bands_two_prereq(self, bands=3):
import dask.array as da
import xarray as xr
+ from pyproj import CRS
from pyresample.geometry import AreaDefinition
- from pyresample.utils import proj4_str_to_dict
from satpy.tests.utils import make_dsq
area_def = AreaDefinition(
"test",
"test",
"test",
- proj4_str_to_dict("+proj=stere +datum=WGS84 +ellps=WGS84 "
- "+lon_0=0. +lat_0=90 +lat_ts=60 +units=km"),
+ CRS("+proj=stere +datum=WGS84 +ellps=WGS84 +lon_0=0. +lat_0=90 +lat_ts=60 +units=km"),
100,
200,
(-1000., -1500., 1000., 1500.),
@@ -508,16 +503,15 @@ def _get_test_dataset_three_bands_prereq(self, bands=3):
import dask.array as da
import xarray as xr
+ from pyproj import CRS
from pyresample.geometry import AreaDefinition
- from pyresample.utils import proj4_str_to_dict
from satpy.tests.utils import make_dsq
area_def = AreaDefinition(
"test",
"test",
"test",
- proj4_str_to_dict("+proj=stere +datum=WGS84 +ellps=WGS84 "
- "+lon_0=0. +lat_0=90 +lat_ts=60 +units=km"),
+ CRS("+proj=stere +datum=WGS84 +ellps=WGS84 +lon_0=0. +lat_0=90 +lat_ts=60 +units=km"),
100,
200,
(-1000., -1500., 1000., 1500.),
@@ -844,23 +838,23 @@ def test_convert_proj4_string(self):
from pyresample.geometry import AreaDefinition
from satpy.writers.mitiff import MITIFFWriter
- checks = [{"epsg": "+init=EPSG:32631",
+ checks = [{"epsg": "EPSG:32631",
"proj4": (" Proj string: +proj=etmerc +lat_0=0 +lon_0=3 +k=0.9996 "
"+ellps=WGS84 +datum=WGS84 +units=km +x_0=501020.000000 "
"+y_0=1515.000000\n")},
- {"epsg": "+init=EPSG:32632",
+ {"epsg": "EPSG:32632",
"proj4": (" Proj string: +proj=etmerc +lat_0=0 +lon_0=9 +k=0.9996 "
"+ellps=WGS84 +datum=WGS84 +units=km +x_0=501020.000000 "
"+y_0=1515.000000\n")},
- {"epsg": "+init=EPSG:32633",
+ {"epsg": "EPSG:32633",
"proj4": (" Proj string: +proj=etmerc +lat_0=0 +lon_0=15 +k=0.9996 "
"+ellps=WGS84 +datum=WGS84 +units=km +x_0=501020.000000 "
"+y_0=1515.000000\n")},
- {"epsg": "+init=EPSG:32634",
+ {"epsg": "EPSG:32634",
"proj4": (" Proj string: +proj=etmerc +lat_0=0 +lon_0=21 +k=0.9996 "
"+ellps=WGS84 +datum=WGS84 +units=km +x_0=501020.000000 "
"+y_0=1515.000000\n")},
- {"epsg": "+init=EPSG:32635",
+ {"epsg": "EPSG:32635",
"proj4": (" Proj string: +proj=etmerc +lat_0=0 +lon_0=27 +k=0.9996 "
"+ellps=WGS84 +datum=WGS84 +units=km +x_0=501020.000000 "
"+y_0=1515.000000\n")}]
diff --git a/satpy/writers/awips_tiled.py b/satpy/writers/awips_tiled.py
index 15680e8091..03ce3e9d68 100644
--- a/satpy/writers/awips_tiled.py
+++ b/satpy/writers/awips_tiled.py
@@ -630,7 +630,13 @@ def _get_factor_offset_fill(input_data_arr, vmin, vmax, encoding):
# max value
fills = [2 ** (file_bit_depth - 1) - 1]
- mx = (vmax - vmin) / (2 ** bit_depth - 1 - num_fills)
+ # NOTE: AWIPS is buggy and does not properly handle both
+ # halves an integers data space. The below code limits
+ # unsigned integers to the positive half and this seems
+ # to work better with current AWIPS.
+ mx = (vmax - vmin) / (2 ** (bit_depth - 1) - 1 - num_fills)
+ # NOTE: This is what the line should look like if AWIPS wasn't buggy:
+ # mx = (vmax - vmin) / (2 ** bit_depth - 1 - num_fills)
bx = vmin
if not is_unsigned and not unsigned_in_signed:
bx += 2 ** (bit_depth - 1) * mx
diff --git a/satpy/writers/mitiff.py b/satpy/writers/mitiff.py
index 950fce8b21..3658ac16b7 100644
--- a/satpy/writers/mitiff.py
+++ b/satpy/writers/mitiff.py
@@ -221,6 +221,8 @@ def _add_sizes(self, datasets, first_dataset):
return _image_description
def _add_proj4_string(self, datasets, first_dataset):
+ import warnings
+
proj4_string = " Proj string: "
if isinstance(datasets, list):
@@ -232,7 +234,11 @@ def _add_proj4_string(self, datasets, first_dataset):
if hasattr(area, "crs") and area.crs.to_epsg() is not None:
proj4_string += "+init=EPSG:{}".format(area.crs.to_epsg())
else:
- proj4_string += area.proj_str
+ # Filter out the PROJ warning of losing projection information
+ with warnings.catch_warnings():
+ warnings.filterwarnings("ignore", category=UserWarning,
+ message=r"You will likely lose important projection information")
+ proj4_string += area.proj_str
x_0 = 0
y_0 = 0
diff --git a/setup.py b/setup.py
index cd1c43422e..3439e8fa89 100644
--- a/setup.py
+++ b/setup.py
@@ -23,7 +23,7 @@
from setuptools import find_packages, setup
requires = ["numpy >=1.21", "pillow", "pyresample >=1.24.0", "trollsift",
- "trollimage >=1.20", "pykdtree", "pyyaml >=5.1", "xarray >=0.10.1, !=0.13.0",
+ "trollimage >=1.20", "pykdtree", "pyyaml >=5.1", "xarray >=0.14.1",
"dask[array] >=0.17.1", "pyproj>=2.2", "zarr", "donfig", "appdirs",
"packaging", "pooch", "pyorbital"]
@@ -76,6 +76,8 @@
"doc": ["sphinx", "sphinx_rtd_theme", "sphinxcontrib-apidoc"],
# Other
"geoviews": ["geoviews"],
+ "holoviews": ["holoviews"],
+ "hvplot": ["hvplot", "geoviews", "cartopy", "holoviews"],
"overlays": ["pycoast", "pydecorate"],
"satpos_from_tle": ["skyfield", "astropy"],
"tests": test_requires,