diff --git a/docs/source/transforms.rst b/docs/source/transforms.rst index bd3feb3497..8bd5bfd99f 100644 --- a/docs/source/transforms.rst +++ b/docs/source/transforms.rst @@ -309,6 +309,12 @@ Intensity :members: :special-members: __call__ +`ClipIntensityPercentiles` +"""""""""""""""""""""""""" +.. autoclass:: ClipIntensityPercentiles + :members: + :special-members: __call__ + `RandScaleIntensity` """""""""""""""""""" .. image:: https://raw.githubusercontent.com/Project-MONAI/DocImages/main/transforms/RandScaleIntensity.png @@ -1405,6 +1411,12 @@ Intensity (Dict) :members: :special-members: __call__ +`ClipIntensityPercentilesd` +""""""""""""""""""""""""""" +.. autoclass:: ClipIntensityPercentilesd + :members: + :special-members: __call__ + `RandScaleIntensityd` """"""""""""""""""""" .. image:: https://raw.githubusercontent.com/Project-MONAI/DocImages/main/transforms/RandScaleIntensityd.png diff --git a/monai/transforms/__init__.py b/monai/transforms/__init__.py index 349533fb3e..ab9adb6a99 100644 --- a/monai/transforms/__init__.py +++ b/monai/transforms/__init__.py @@ -92,6 +92,7 @@ from .croppad.functional import crop_func, crop_or_pad_nd, pad_func, pad_nd from .intensity.array import ( AdjustContrast, + ClipIntensityPercentiles, ComputeHoVerMaps, DetectEnvelope, ForegroundMask, @@ -135,6 +136,9 @@ AdjustContrastd, AdjustContrastD, AdjustContrastDict, + ClipIntensityPercentilesd, + ClipIntensityPercentilesD, + ClipIntensityPercentilesDict, ComputeHoVerMapsd, ComputeHoVerMapsD, ComputeHoVerMapsDict, diff --git a/monai/transforms/intensity/array.py b/monai/transforms/intensity/array.py index 0085050ee3..f656475a36 100644 --- a/monai/transforms/intensity/array.py +++ b/monai/transforms/intensity/array.py @@ -30,7 +30,7 @@ from monai.data.utils import get_random_patch, get_valid_patch_size from monai.networks.layers import GaussianFilter, HilbertTransform, MedianFilter, SavitzkyGolayFilter from monai.transforms.transform import RandomizableTransform, Transform -from monai.transforms.utils import Fourier, equalize_hist, is_positive, rescale_array +from monai.transforms.utils import Fourier, equalize_hist, is_positive, rescale_array, soft_clip from monai.transforms.utils_pytorch_numpy_unification import clip, percentile, where from monai.utils.enums import TransformBackends from monai.utils.misc import ensure_tuple, ensure_tuple_rep, ensure_tuple_size, fall_back_tuple @@ -54,6 +54,7 @@ "NormalizeIntensity", "ThresholdIntensity", "ScaleIntensityRange", + "ClipIntensityPercentiles", "AdjustContrast", "RandAdjustContrast", "ScaleIntensityRangePercentiles", @@ -1007,6 +1008,151 @@ def __call__(self, img: NdarrayOrTensor) -> NdarrayOrTensor: return ret +class ClipIntensityPercentiles(Transform): + """ + Apply clip based on the intensity distribution of input image. + If `sharpness_factor` is provided, the intensity values will be soft clipped according to + f(x) = x + (1/sharpness_factor)*softplus(- c(x - minv)) - (1/sharpness_factor)*softplus(c(x - maxv)) + From https://medium.com/life-at-hopper/clip-it-clip-it-good-1f1bf711b291 + + Soft clipping preserves the order of the values and maintains the gradient everywhere. + For example: + + .. code-block:: python + :emphasize-lines: 11, 22 + + image = torch.Tensor( + [[[1, 2, 3, 4, 5], + [1, 2, 3, 4, 5], + [1, 2, 3, 4, 5], + [1, 2, 3, 4, 5], + [1, 2, 3, 4, 5], + [1, 2, 3, 4, 5]]]) + + # Hard clipping from lower and upper image intensity percentiles + hard_clipper = ClipIntensityPercentiles(30, 70) + print(hard_clipper(image)) + metatensor([[[2., 2., 3., 4., 4.], + [2., 2., 3., 4., 4.], + [2., 2., 3., 4., 4.], + [2., 2., 3., 4., 4.], + [2., 2., 3., 4., 4.], + [2., 2., 3., 4., 4.]]]) + + + # Soft clipping from lower and upper image intensity percentiles + soft_clipper = ClipIntensityPercentiles(30, 70, 10.) + print(soft_clipper(image)) + metatensor([[[2.0000, 2.0693, 3.0000, 3.9307, 4.0000], + [2.0000, 2.0693, 3.0000, 3.9307, 4.0000], + [2.0000, 2.0693, 3.0000, 3.9307, 4.0000], + [2.0000, 2.0693, 3.0000, 3.9307, 4.0000], + [2.0000, 2.0693, 3.0000, 3.9307, 4.0000], + [2.0000, 2.0693, 3.0000, 3.9307, 4.0000]]]) + + See Also: + + - :py:class:`monai.transforms.ScaleIntensityRangePercentiles` + """ + + backend = [TransformBackends.TORCH, TransformBackends.NUMPY] + + def __init__( + self, + lower: float | None, + upper: float | None, + sharpness_factor: float | None = None, + channel_wise: bool = False, + return_clipping_values: bool = False, + dtype: DtypeLike = np.float32, + ) -> None: + """ + Args: + lower: lower intensity percentile. In the case of hard clipping, None will have the same effect as 0 by + not clipping the lowest input values. However, in the case of soft clipping, None and zero will have + two different effects: None will not apply clipping to low values, whereas zero will still transform + the lower values according to the soft clipping transformation. Please check for more details: + https://medium.com/life-at-hopper/clip-it-clip-it-good-1f1bf711b291. + upper: upper intensity percentile. The same as for lower, but this time with the highest values. If we + are looking to perform soft clipping, if None then there will be no effect on this side whereas if set + to 100, the values will be passed via the corresponding clipping equation. + sharpness_factor: if not None, the intensity values will be soft clipped according to + f(x) = x + (1/sharpness_factor)*softplus(- c(x - minv)) - (1/sharpness_factor)*softplus(c(x - maxv)). + defaults to None. + channel_wise: if True, compute intensity percentile and normalize every channel separately. + default to False. + return_clipping_values: whether to return the calculated percentiles in tensor meta information. + If soft clipping and requested percentile is None, return None as the corresponding clipping + values in meta information. Clipping values are stored in a list with each element corresponding + to a channel if channel_wise is set to True. defaults to False. + dtype: output data type, if None, same as input image. defaults to float32. + """ + if lower is None and upper is None: + raise ValueError("lower or upper percentiles must be provided") + if lower is not None and (lower < 0.0 or lower > 100.0): + raise ValueError("Percentiles must be in the range [0, 100]") + if upper is not None and (upper < 0.0 or upper > 100.0): + raise ValueError("Percentiles must be in the range [0, 100]") + if upper is not None and lower is not None and upper < lower: + raise ValueError("upper must be greater than or equal to lower") + if sharpness_factor is not None and sharpness_factor <= 0: + raise ValueError("sharpness_factor must be greater than 0") + + self.lower = lower + self.upper = upper + self.sharpness_factor = sharpness_factor + self.channel_wise = channel_wise + if return_clipping_values: + self.clipping_values: list[tuple[float | None, float | None]] = [] + self.return_clipping_values = return_clipping_values + self.dtype = dtype + + def _clip(self, img: NdarrayOrTensor) -> NdarrayOrTensor: + if self.sharpness_factor is not None: + lower_percentile = percentile(img, self.lower) if self.lower is not None else None + upper_percentile = percentile(img, self.upper) if self.upper is not None else None + img = soft_clip(img, self.sharpness_factor, lower_percentile, upper_percentile, self.dtype) + else: + lower_percentile = percentile(img, self.lower) if self.lower is not None else percentile(img, 0) + upper_percentile = percentile(img, self.upper) if self.upper is not None else percentile(img, 100) + img = clip(img, lower_percentile, upper_percentile) + + if self.return_clipping_values: + self.clipping_values.append( + ( + ( + lower_percentile + if lower_percentile is None + else lower_percentile.item() if hasattr(lower_percentile, "item") else lower_percentile + ), + ( + upper_percentile + if upper_percentile is None + else upper_percentile.item() if hasattr(upper_percentile, "item") else upper_percentile + ), + ) + ) + img = convert_to_tensor(img, track_meta=False) + return img + + def __call__(self, img: NdarrayOrTensor) -> NdarrayOrTensor: + """ + Apply the transform to `img`. + """ + img = convert_to_tensor(img, track_meta=get_track_meta()) + img_t = convert_to_tensor(img, track_meta=False) + if self.channel_wise: + img_t = torch.stack([self._clip(img=d) for d in img_t]) # type: ignore + else: + img_t = self._clip(img=img_t) + + img = convert_to_dst_type(img_t, dst=img)[0] + if self.return_clipping_values: + img.meta["clipping_values"] = self.clipping_values # type: ignore + + return img + + class AdjustContrast(Transform): """ Changes image intensity with gamma transform. Each pixel/voxel intensity is updated as:: diff --git a/monai/transforms/intensity/dictionary.py b/monai/transforms/intensity/dictionary.py index 5b911904b0..5dbac485fe 100644 --- a/monai/transforms/intensity/dictionary.py +++ b/monai/transforms/intensity/dictionary.py @@ -26,6 +26,7 @@ from monai.data.meta_obj import get_track_meta from monai.transforms.intensity.array import ( AdjustContrast, + ClipIntensityPercentiles, ComputeHoVerMaps, ForegroundMask, GaussianSharpen, @@ -77,6 +78,7 @@ "NormalizeIntensityd", "ThresholdIntensityd", "ScaleIntensityRanged", + "ClipIntensityPercentilesd", "AdjustContrastd", "RandAdjustContrastd", "ScaleIntensityRangePercentilesd", @@ -122,6 +124,8 @@ "ThresholdIntensityDict", "ScaleIntensityRangeD", "ScaleIntensityRangeDict", + "ClipIntensityPercentilesD", + "ClipIntensityPercentilesDict", "AdjustContrastD", "AdjustContrastDict", "RandAdjustContrastD", @@ -886,6 +890,36 @@ def __call__(self, data: Mapping[Hashable, NdarrayOrTensor]) -> dict[Hashable, N return d +class ClipIntensityPercentilesd(MapTransform): + """ + Dictionary-based wrapper of :py:class:`monai.transforms.ClipIntensityPercentiles`. + Clip the intensity values of input image to a specific range based on the intensity distribution of the input. + If `sharpness_factor` is provided, the intensity values will be soft clipped according to + f(x) = x + (1/sharpness_factor) * softplus(- c(x - minv)) - (1/sharpness_factor)*softplus(c(x - maxv)) + """ + + def __init__( + self, + keys: KeysCollection, + lower: float | None, + upper: float | None, + sharpness_factor: float | None = None, + channel_wise: bool = False, + dtype: DtypeLike = np.float32, + allow_missing_keys: bool = False, + ) -> None: + super().__init__(keys, allow_missing_keys) + self.scaler = ClipIntensityPercentiles( + lower=lower, upper=upper, sharpness_factor=sharpness_factor, channel_wise=channel_wise, dtype=dtype + ) + + def __call__(self, data: dict) -> dict: + d = dict(data) + for key in self.key_iterator(d): + d[key] = self.scaler(d[key]) + return d + + class AdjustContrastd(MapTransform): """ Dictionary-based wrapper of :py:class:`monai.transforms.AdjustContrast`. @@ -1929,6 +1963,7 @@ def __call__(self, data: Mapping[Hashable, NdarrayOrTensor]) -> dict[Hashable, N NormalizeIntensityD = NormalizeIntensityDict = NormalizeIntensityd ThresholdIntensityD = ThresholdIntensityDict = ThresholdIntensityd ScaleIntensityRangeD = ScaleIntensityRangeDict = ScaleIntensityRanged +ClipIntensityPercentilesD = ClipIntensityPercentilesDict = ClipIntensityPercentilesd AdjustContrastD = AdjustContrastDict = AdjustContrastd RandAdjustContrastD = RandAdjustContrastDict = RandAdjustContrastd ScaleIntensityRangePercentilesD = ScaleIntensityRangePercentilesDict = ScaleIntensityRangePercentilesd diff --git a/monai/transforms/utils.py b/monai/transforms/utils.py index 455b0cfb7d..14f35e1219 100644 --- a/monai/transforms/utils.py +++ b/monai/transforms/utils.py @@ -38,6 +38,7 @@ nonzero, ravel, searchsorted, + softplus, unique, unravel_index, where, @@ -131,9 +132,45 @@ "resolves_modes", "has_status_keys", "distance_transform_edt", + "soft_clip", ] +def soft_clip( + arr: NdarrayOrTensor, + sharpness_factor: float = 1.0, + minv: NdarrayOrTensor | float | int | None = None, + maxv: NdarrayOrTensor | float | int | None = None, + dtype: DtypeLike | torch.dtype = np.float32, +) -> NdarrayOrTensor: + """ + Apply soft clip to the input array or tensor. + The intensity values will be soft clipped according to + f(x) = x + (1/sharpness_factor)*softplus(- c(x - minv)) - (1/sharpness_factor)*softplus(c(x - maxv)) + From https://medium.com/life-at-hopper/clip-it-clip-it-good-1f1bf711b291 + + To perform one-sided clipping, set either minv or maxv to None. + Args: + arr: input array to clip. + sharpness_factor: the sharpness of the soft clip function, default to 1. + minv: minimum value of target clipped array. + maxv: maximum value of target clipped array. + dtype: if not None, convert input array to dtype before computation. + + """ + + if dtype is not None: + arr, *_ = convert_data_type(arr, dtype=dtype) + + v = arr + if minv is not None: + v = v + softplus(-sharpness_factor * (arr - minv)) / sharpness_factor + if maxv is not None: + v = v - softplus(sharpness_factor * (arr - maxv)) / sharpness_factor + + return v + + def rand_choice(prob: float = 0.5) -> bool: """ Returns True if a randomly chosen number is less than or equal to `prob`, by default this is a 50/50 chance. diff --git a/monai/transforms/utils_pytorch_numpy_unification.py b/monai/transforms/utils_pytorch_numpy_unification.py index 0774d50314..020d99af16 100644 --- a/monai/transforms/utils_pytorch_numpy_unification.py +++ b/monai/transforms/utils_pytorch_numpy_unification.py @@ -52,9 +52,24 @@ "median", "mean", "std", + "softplus", ] +def softplus(x: NdarrayOrTensor) -> NdarrayOrTensor: + """stable softplus through `np.logaddexp` with equivalent implementation for torch. + + Args: + x: array/tensor. + + Returns: + Softplus of the input. + """ + if isinstance(x, np.ndarray): + return np.logaddexp(np.zeros_like(x), x) + return torch.logaddexp(torch.zeros_like(x), x) + + def allclose(a: NdarrayTensor, b: NdarrayOrTensor, rtol=1e-5, atol=1e-8, equal_nan=False) -> bool: """`np.allclose` with equivalent implementation for torch.""" b, *_ = convert_to_dst_type(b, a, wrap_sequence=True) diff --git a/tests/test_clip_intensity_percentiles.py b/tests/test_clip_intensity_percentiles.py new file mode 100644 index 0000000000..f5fe07a323 --- /dev/null +++ b/tests/test_clip_intensity_percentiles.py @@ -0,0 +1,185 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import annotations + +import unittest + +import numpy as np +from parameterized import parameterized + +from monai.transforms import ClipIntensityPercentiles +from monai.transforms.utils import soft_clip +from monai.transforms.utils_pytorch_numpy_unification import clip +from tests.utils import TEST_NDARRAYS, NumpyImageTestCase2D, NumpyImageTestCase3D, assert_allclose + + +class TestClipIntensityPercentiles2D(NumpyImageTestCase2D): + + @parameterized.expand([[p] for p in TEST_NDARRAYS]) + def test_hard_clipping_two_sided(self, p): + hard_clipper = ClipIntensityPercentiles(upper=95, lower=5) + im = p(self.imt) + result = hard_clipper(im) + lower, upper = np.percentile(self.imt, (5, 95)) + expected = clip(self.imt, lower, upper) + assert_allclose(result, p(expected), type_test="tensor", rtol=1e-7, atol=0) + + @parameterized.expand([[p] for p in TEST_NDARRAYS]) + def test_hard_clipping_one_sided_high(self, p): + hard_clipper = ClipIntensityPercentiles(upper=95, lower=None) + im = p(self.imt) + result = hard_clipper(im) + lower, upper = np.percentile(self.imt, (0, 95)) + expected = clip(self.imt, lower, upper) + assert_allclose(result, p(expected), type_test="tensor", rtol=1e-7, atol=0) + + @parameterized.expand([[p] for p in TEST_NDARRAYS]) + def test_hard_clipping_one_sided_low(self, p): + hard_clipper = ClipIntensityPercentiles(upper=None, lower=5) + im = p(self.imt) + result = hard_clipper(im) + lower, upper = np.percentile(self.imt, (5, 100)) + expected = clip(self.imt, lower, upper) + assert_allclose(result, p(expected), type_test="tensor", rtol=1e-7, atol=0) + + @parameterized.expand([[p] for p in TEST_NDARRAYS]) + def test_soft_clipping_two_sided(self, p): + soft_clipper = ClipIntensityPercentiles(upper=95, lower=5, sharpness_factor=1.0) + im = p(self.imt) + result = soft_clipper(im) + lower, upper = np.percentile(self.imt, (5, 95)) + expected = soft_clip(self.imt, sharpness_factor=1.0, minv=lower, maxv=upper) + # the rtol is set to 1e-6 because the logaddexp function used in softplus is not stable accross torch and numpy + assert_allclose(result, p(expected), type_test="tensor", rtol=1e-6, atol=0) + + @parameterized.expand([[p] for p in TEST_NDARRAYS]) + def test_soft_clipping_one_sided_high(self, p): + soft_clipper = ClipIntensityPercentiles(upper=95, lower=None, sharpness_factor=1.0) + im = p(self.imt) + result = soft_clipper(im) + upper = np.percentile(self.imt, 95) + expected = soft_clip(self.imt, sharpness_factor=1.0, minv=None, maxv=upper) + # the rtol is set to 5e-5 because the logaddexp function used in softplus is not stable accross torch and numpy + assert_allclose(result, p(expected), type_test="tensor", rtol=5e-5, atol=0) + + @parameterized.expand([[p] for p in TEST_NDARRAYS]) + def test_soft_clipping_one_sided_low(self, p): + soft_clipper = ClipIntensityPercentiles(upper=None, lower=5, sharpness_factor=1.0) + im = p(self.imt) + result = soft_clipper(im) + lower = np.percentile(self.imt, 5) + expected = soft_clip(self.imt, sharpness_factor=1.0, minv=lower, maxv=None) + # the rtol is set to 1e-6 because the logaddexp function used in softplus is not stable accross torch and numpy + assert_allclose(result, p(expected), type_test="tensor", rtol=1e-6, atol=0) + + @parameterized.expand([[p] for p in TEST_NDARRAYS]) + def test_channel_wise(self, p): + clipper = ClipIntensityPercentiles(upper=95, lower=5, channel_wise=True) + im = p(self.imt) + result = clipper(im) + for i, c in enumerate(self.imt): + lower, upper = np.percentile(c, (5, 95)) + expected = clip(c, lower, upper) + assert_allclose(result[i], p(expected), type_test="tensor", rtol=1e-7, atol=0) + + def test_ill_sharpness_factor(self): + with self.assertRaises(ValueError): + ClipIntensityPercentiles(upper=95, lower=5, sharpness_factor=0.0) + + def test_ill_lower_percentile(self): + with self.assertRaises(ValueError): + ClipIntensityPercentiles(upper=None, lower=-1) + + def test_ill_upper_percentile(self): + with self.assertRaises(ValueError): + ClipIntensityPercentiles(upper=101, lower=None) + + def test_ill_percentiles(self): + with self.assertRaises(ValueError): + ClipIntensityPercentiles(upper=95, lower=96) + + def test_ill_both_none(self): + with self.assertRaises(ValueError): + ClipIntensityPercentiles(upper=None, lower=None) + + +class TestClipIntensityPercentiles3D(NumpyImageTestCase3D): + + @parameterized.expand([[p] for p in TEST_NDARRAYS]) + def test_hard_clipping_two_sided(self, p): + hard_clipper = ClipIntensityPercentiles(upper=95, lower=5) + im = p(self.imt) + result = hard_clipper(im) + lower, upper = np.percentile(self.imt, (5, 95)) + expected = clip(self.imt, lower, upper) + assert_allclose(result, p(expected), type_test="tensor", rtol=1e-7, atol=0) + + @parameterized.expand([[p] for p in TEST_NDARRAYS]) + def test_hard_clipping_one_sided_high(self, p): + hard_clipper = ClipIntensityPercentiles(upper=95, lower=None) + im = p(self.imt) + result = hard_clipper(im) + lower, upper = np.percentile(self.imt, (0, 95)) + expected = clip(self.imt, lower, upper) + assert_allclose(result, p(expected), type_test="tensor", rtol=1e-7, atol=0) + + @parameterized.expand([[p] for p in TEST_NDARRAYS]) + def test_hard_clipping_one_sided_low(self, p): + hard_clipper = ClipIntensityPercentiles(upper=None, lower=5) + im = p(self.imt) + result = hard_clipper(im) + lower, upper = np.percentile(self.imt, (5, 100)) + expected = clip(self.imt, lower, upper) + assert_allclose(result, p(expected), type_test="tensor", rtol=1e-7, atol=0) + + @parameterized.expand([[p] for p in TEST_NDARRAYS]) + def test_soft_clipping_two_sided(self, p): + soft_clipper = ClipIntensityPercentiles(upper=95, lower=5, sharpness_factor=1.0) + im = p(self.imt) + result = soft_clipper(im) + lower, upper = np.percentile(self.imt, (5, 95)) + expected = soft_clip(self.imt, sharpness_factor=1.0, minv=lower, maxv=upper) + # the rtol is set to 1e-6 because the logaddexp function used in softplus is not stable accross torch and numpy + assert_allclose(result, p(expected), type_test="tensor", rtol=1e-6, atol=0) + + @parameterized.expand([[p] for p in TEST_NDARRAYS]) + def test_soft_clipping_one_sided_high(self, p): + soft_clipper = ClipIntensityPercentiles(upper=95, lower=None, sharpness_factor=1.0) + im = p(self.imt) + result = soft_clipper(im) + upper = np.percentile(self.imt, 95) + expected = soft_clip(self.imt, sharpness_factor=1.0, minv=None, maxv=upper) + # the rtol is set to 5e-5 because the logaddexp function used in softplus is not stable accross torch and numpy + assert_allclose(result, p(expected), type_test="tensor", rtol=5e-5, atol=0) + + @parameterized.expand([[p] for p in TEST_NDARRAYS]) + def test_soft_clipping_one_sided_low(self, p): + soft_clipper = ClipIntensityPercentiles(upper=None, lower=5, sharpness_factor=1.0) + im = p(self.imt) + result = soft_clipper(im) + lower = np.percentile(self.imt, 5) + expected = soft_clip(self.imt, sharpness_factor=1.0, minv=lower, maxv=None) + # the rtol is set to 1e-6 because the logaddexp function used in softplus is not stable accross torch and numpy + assert_allclose(result, p(expected), type_test="tensor", rtol=1e-6, atol=0) + + @parameterized.expand([[p] for p in TEST_NDARRAYS]) + def test_channel_wise(self, p): + clipper = ClipIntensityPercentiles(upper=95, lower=5, channel_wise=True) + im = p(self.imt) + result = clipper(im) + for i, c in enumerate(self.imt): + lower, upper = np.percentile(c, (5, 95)) + expected = clip(c, lower, upper) + assert_allclose(result[i], p(expected), type_test="tensor", rtol=1e-7, atol=0) + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/test_clip_intensity_percentilesd.py b/tests/test_clip_intensity_percentilesd.py new file mode 100644 index 0000000000..193fa8b487 --- /dev/null +++ b/tests/test_clip_intensity_percentilesd.py @@ -0,0 +1,203 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +import unittest + +import numpy as np +from parameterized import parameterized + +from monai.transforms import ClipIntensityPercentilesd +from monai.transforms.utils import soft_clip +from monai.transforms.utils_pytorch_numpy_unification import clip +from tests.utils import TEST_NDARRAYS, NumpyImageTestCase2D, NumpyImageTestCase3D, assert_allclose + + +class TestClipIntensityPercentilesd2D(NumpyImageTestCase2D): + @parameterized.expand([[p] for p in TEST_NDARRAYS]) + def test_hard_clipping_two_sided(self, p): + key = "img" + hard_clipper = ClipIntensityPercentilesd(keys=[key], upper=95, lower=5) + im = p(self.imt) + result = hard_clipper({key: im}) + lower, upper = np.percentile(self.imt, (5, 95)) + expected = clip(self.imt, lower, upper) + assert_allclose(result[key], p(expected), type_test="tensor", rtol=1e-7, atol=0) + + @parameterized.expand([[p] for p in TEST_NDARRAYS]) + def test_hard_clipping_one_sided_high(self, p): + key = "img" + hard_clipper = ClipIntensityPercentilesd(keys=[key], upper=95, lower=None) + im = p(self.imt) + result = hard_clipper({key: im}) + lower, upper = np.percentile(self.imt, (0, 95)) + expected = clip(self.imt, lower, upper) + assert_allclose(result[key], p(expected), type_test="tensor", rtol=1e-7, atol=0) + + @parameterized.expand([[p] for p in TEST_NDARRAYS]) + def test_hard_clipping_one_sided_low(self, p): + key = "img" + hard_clipper = ClipIntensityPercentilesd(keys=[key], upper=None, lower=5) + im = p(self.imt) + result = hard_clipper({key: im}) + lower, upper = np.percentile(self.imt, (5, 100)) + expected = clip(self.imt, lower, upper) + assert_allclose(result[key], p(expected), type_test="tensor", rtol=1e-7, atol=0) + + @parameterized.expand([[p] for p in TEST_NDARRAYS]) + def test_soft_clipping_two_sided(self, p): + key = "img" + soft_clipper = ClipIntensityPercentilesd(keys=[key], upper=95, lower=5, sharpness_factor=1.0) + im = p(self.imt) + result = soft_clipper({key: im}) + lower, upper = np.percentile(self.imt, (5, 95)) + expected = soft_clip(self.imt, sharpness_factor=1.0, minv=lower, maxv=upper) + # the rtol is set to 1e-6 because the logaddexp function used in softplus is not stable accross torch and numpy + assert_allclose(result[key], p(expected), type_test="tensor", rtol=1e-6, atol=0) + + @parameterized.expand([[p] for p in TEST_NDARRAYS]) + def test_soft_clipping_one_sided_high(self, p): + key = "img" + soft_clipper = ClipIntensityPercentilesd(keys=[key], upper=95, lower=None, sharpness_factor=1.0) + im = p(self.imt) + result = soft_clipper({key: im}) + upper = np.percentile(self.imt, 95) + expected = soft_clip(self.imt, sharpness_factor=1.0, minv=None, maxv=upper) + # the rtol is set to 5e-5 because the logaddexp function used in softplus is not stable accross torch and numpy + assert_allclose(result[key], p(expected), type_test="tensor", rtol=5e-5, atol=0) + + @parameterized.expand([[p] for p in TEST_NDARRAYS]) + def test_soft_clipping_one_sided_low(self, p): + key = "img" + soft_clipper = ClipIntensityPercentilesd(keys=[key], upper=None, lower=5, sharpness_factor=1.0) + im = p(self.imt) + result = soft_clipper({key: im}) + lower = np.percentile(self.imt, 5) + expected = soft_clip(self.imt, sharpness_factor=1.0, minv=lower, maxv=None) + # the rtol is set to 1e-6 because the logaddexp function used in softplus is not stable accross torch and numpy + assert_allclose(result[key], p(expected), type_test="tensor", rtol=1e-6, atol=0) + + @parameterized.expand([[p] for p in TEST_NDARRAYS]) + def test_channel_wise(self, p): + key = "img" + clipper = ClipIntensityPercentilesd(keys=[key], upper=95, lower=5, channel_wise=True) + im = p(self.imt) + result = clipper({key: im}) + for i, c in enumerate(self.imt): + lower, upper = np.percentile(c, (5, 95)) + expected = clip(c, lower, upper) + assert_allclose(result[key][i], p(expected), type_test="tensor", rtol=1e-7, atol=0) + + def test_ill_sharpness_factor(self): + key = "img" + with self.assertRaises(ValueError): + ClipIntensityPercentilesd(keys=[key], upper=95, lower=5, sharpness_factor=0.0) + + def test_ill_lower_percentile(self): + key = "img" + with self.assertRaises(ValueError): + ClipIntensityPercentilesd(keys=[key], upper=None, lower=-1) + + def test_ill_upper_percentile(self): + key = "img" + with self.assertRaises(ValueError): + ClipIntensityPercentilesd(keys=[key], upper=101, lower=None) + + def test_ill_percentiles(self): + key = "img" + with self.assertRaises(ValueError): + ClipIntensityPercentilesd(keys=[key], upper=95, lower=96) + + def test_ill_both_none(self): + key = "img" + with self.assertRaises(ValueError): + ClipIntensityPercentilesd(keys=[key], upper=None, lower=None) + + +class TestClipIntensityPercentilesd3D(NumpyImageTestCase3D): + @parameterized.expand([[p] for p in TEST_NDARRAYS]) + def test_hard_clipping_two_sided(self, p): + key = "img" + hard_clipper = ClipIntensityPercentilesd(keys=[key], upper=95, lower=5) + im = p(self.imt) + result = hard_clipper({key: im}) + lower, upper = np.percentile(self.imt, (5, 95)) + expected = clip(self.imt, lower, upper) + assert_allclose(result[key], p(expected), type_test="tensor", rtol=1e-7, atol=0) + + @parameterized.expand([[p] for p in TEST_NDARRAYS]) + def test_hard_clipping_one_sided_high(self, p): + key = "img" + hard_clipper = ClipIntensityPercentilesd(keys=[key], upper=95, lower=None) + im = p(self.imt) + result = hard_clipper({key: im}) + lower, upper = np.percentile(self.imt, (0, 95)) + expected = clip(self.imt, lower, upper) + assert_allclose(result[key], p(expected), type_test="tensor", rtol=1e-7, atol=0) + + @parameterized.expand([[p] for p in TEST_NDARRAYS]) + def test_hard_clipping_one_sided_low(self, p): + key = "img" + hard_clipper = ClipIntensityPercentilesd(keys=[key], upper=None, lower=5) + im = p(self.imt) + result = hard_clipper({key: im}) + lower, upper = np.percentile(self.imt, (5, 100)) + expected = clip(self.imt, lower, upper) + assert_allclose(result[key], p(expected), type_test="tensor", rtol=1e-7, atol=0) + + @parameterized.expand([[p] for p in TEST_NDARRAYS]) + def test_soft_clipping_two_sided(self, p): + key = "img" + soft_clipper = ClipIntensityPercentilesd(keys=[key], upper=95, lower=5, sharpness_factor=1.0) + im = p(self.imt) + result = soft_clipper({key: im}) + lower, upper = np.percentile(self.imt, (5, 95)) + expected = soft_clip(self.imt, sharpness_factor=1.0, minv=lower, maxv=upper) + # the rtol is set to 1e-6 because the logaddexp function used in softplus is not stable accross torch and numpy + assert_allclose(result[key], p(expected), type_test="tensor", rtol=1e-6, atol=0) + + @parameterized.expand([[p] for p in TEST_NDARRAYS]) + def test_soft_clipping_one_sided_high(self, p): + key = "img" + soft_clipper = ClipIntensityPercentilesd(keys=[key], upper=95, lower=None, sharpness_factor=1.0) + im = p(self.imt) + result = soft_clipper({key: im}) + upper = np.percentile(self.imt, 95) + expected = soft_clip(self.imt, sharpness_factor=1.0, minv=None, maxv=upper) + # the rtol is set to 5e-5 because the logaddexp function used in softplus is not stable accross torch and numpy + assert_allclose(result[key], p(expected), type_test="tensor", rtol=5e-5, atol=0) + + @parameterized.expand([[p] for p in TEST_NDARRAYS]) + def test_soft_clipping_one_sided_low(self, p): + key = "img" + soft_clipper = ClipIntensityPercentilesd(keys=[key], upper=None, lower=5, sharpness_factor=1.0) + im = p(self.imt) + result = soft_clipper({key: im}) + lower = np.percentile(self.imt, 5) + expected = soft_clip(self.imt, sharpness_factor=1.0, minv=lower, maxv=None) + # the rtol is set to 1e-6 because the logaddexp function used in softplus is not stable accross torch and numpy + assert_allclose(result[key], p(expected), type_test="tensor", rtol=1e-6, atol=0) + + @parameterized.expand([[p] for p in TEST_NDARRAYS]) + def test_channel_wise(self, p): + key = "img" + clipper = ClipIntensityPercentilesd(keys=[key], upper=95, lower=5, channel_wise=True) + im = p(self.imt) + result = clipper({key: im}) + for i, c in enumerate(self.imt): + lower, upper = np.percentile(c, (5, 95)) + expected = clip(c, lower, upper) + assert_allclose(result[key][i], p(expected), type_test="tensor", rtol=1e-7, atol=0) + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/test_soft_clip.py b/tests/test_soft_clip.py new file mode 100644 index 0000000000..de5122e982 --- /dev/null +++ b/tests/test_soft_clip.py @@ -0,0 +1,125 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +import unittest + +import numpy as np +import torch +from parameterized import parameterized + +from monai.transforms.utils import soft_clip + +TEST_CASES = [ + [ + {"minv": 2, "maxv": 8, "sharpness_factor": 10}, + { + "input": torch.arange(10).float(), + "clipped": torch.tensor([2.0000, 2.0000, 2.0693, 3.0000, 4.0000, 5.0000, 6.0000, 7.0000, 7.9307, 8.0000]), + }, + ], + [ + {"minv": 2, "maxv": None, "sharpness_factor": 10}, + { + "input": torch.arange(10).float(), + "clipped": torch.tensor([2.0000, 2.0000, 2.0693, 3.0000, 4.0000, 5.0000, 6.0000, 7.0000, 8.0000, 9.0000]), + }, + ], + [ + {"minv": None, "maxv": 7, "sharpness_factor": 10}, + { + "input": torch.arange(10).float(), + "clipped": torch.tensor([0.0000, 1.0000, 2.0000, 3.0000, 4.0000, 5.0000, 6.0000, 6.9307, 7.0000, 7.0000]), + }, + ], + [ + {"minv": 2, "maxv": 8, "sharpness_factor": 1.0}, + { + "input": torch.arange(10).float(), + "clipped": torch.tensor([2.1266, 2.3124, 2.6907, 3.3065, 4.1088, 5.0000, 5.8912, 6.6935, 7.3093, 7.6877]), + }, + ], + [ + {"minv": 2, "maxv": 8, "sharpness_factor": 3.0}, + { + "input": torch.arange(10).float(), + "clipped": torch.tensor([2.0008, 2.0162, 2.2310, 3.0162, 4.0008, 5.0000, 5.9992, 6.9838, 7.7690, 7.9838]), + }, + ], + [ + {"minv": 2, "maxv": 8, "sharpness_factor": 5.0}, + { + "input": torch.arange(10).float(), + "clipped": torch.tensor([2.0000, 2.0013, 2.1386, 3.0013, 4.0000, 5.0000, 6.0000, 6.9987, 7.8614, 7.9987]), + }, + ], + [ + {"minv": 2, "maxv": 8, "sharpness_factor": 10}, + { + "input": np.arange(10).astype(np.float32), + "clipped": np.array([2.0000, 2.0000, 2.0693, 3.0000, 4.0000, 5.0000, 6.0000, 7.0000, 7.9307, 8.0000]), + }, + ], + [ + {"minv": 2, "maxv": None, "sharpness_factor": 10}, + { + "input": np.arange(10).astype(float), + "clipped": np.array([2.0000, 2.0000, 2.0693, 3.0000, 4.0000, 5.0000, 6.0000, 7.0000, 8.0000, 9.0000]), + }, + ], + [ + {"minv": None, "maxv": 7, "sharpness_factor": 10}, + { + "input": np.arange(10).astype(float), + "clipped": np.array([0.0000, 1.0000, 2.0000, 3.0000, 4.0000, 5.0000, 6.0000, 6.9307, 7.0000, 7.0000]), + }, + ], + [ + {"minv": 2, "maxv": 8, "sharpness_factor": 1.0}, + { + "input": np.arange(10).astype(float), + "clipped": np.array([2.1266, 2.3124, 2.6907, 3.3065, 4.1088, 5.0000, 5.8912, 6.6935, 7.3093, 7.6877]), + }, + ], + [ + {"minv": 2, "maxv": 8, "sharpness_factor": 3.0}, + { + "input": np.arange(10).astype(float), + "clipped": np.array([2.0008, 2.0162, 2.2310, 3.0162, 4.0008, 5.0000, 5.9992, 6.9838, 7.7690, 7.9838]), + }, + ], + [ + {"minv": 2, "maxv": 8, "sharpness_factor": 5.0}, + { + "input": np.arange(10).astype(float), + "clipped": np.array([2.0000, 2.0013, 2.1386, 3.0013, 4.0000, 5.0000, 6.0000, 6.9987, 7.8614, 7.9987]), + }, + ], +] + + +class TestSoftClip(unittest.TestCase): + + @parameterized.expand(TEST_CASES) + def test_result(self, input_param, input_data): + outputs = soft_clip(input_data["input"], **input_param) + expected_val = input_data["clipped"] + if isinstance(outputs, torch.Tensor): + np.testing.assert_allclose( + outputs.detach().cpu().numpy(), expected_val.detach().cpu().numpy(), atol=1e-4, rtol=1e-4 + ) + else: + np.testing.assert_allclose(outputs, expected_val, atol=1e-4, rtol=1e-4) + + +if __name__ == "__main__": + unittest.main()