diff --git a/docs/source/transforms.rst b/docs/source/transforms.rst index 7081f2ac95..c0ffbc144c 100644 --- a/docs/source/transforms.rst +++ b/docs/source/transforms.rst @@ -317,6 +317,18 @@ Intensity :members: :special-members: __call__ +`ScaleIntensityFixedMean` +""""""""""""""""""""""""" +.. autoclass:: ScaleIntensityFixedMean + :members: + :special-members: __call__ + +`RandScaleIntensityFixedMean` +""""""""""""""""""""""""""""" +.. autoclass:: RandScaleIntensityFixedMean + :members: + :special-members: __call__ + `NormalizeIntensity` """""""""""""""""""" .. image:: https://github.com/Project-MONAI/DocImages/raw/main/transforms/NormalizeIntensity.png @@ -1386,6 +1398,12 @@ Intensity (Dict) :members: :special-members: __call__ +`RandScaleIntensityFixedMeand` +""""""""""""""""""""""""""""""" +.. autoclass:: RandScaleIntensityFixedMeand + :members: + :special-members: __call__ + `NormalizeIntensityd` """"""""""""""""""""" .. image:: https://github.com/Project-MONAI/DocImages/raw/main/transforms/NormalizeIntensityd.png diff --git a/monai/transforms/__init__.py b/monai/transforms/__init__.py index 84817d17b0..cdad6ec6c3 100644 --- a/monai/transforms/__init__.py +++ b/monai/transforms/__init__.py @@ -118,10 +118,12 @@ RandKSpaceSpikeNoise, RandRicianNoise, RandScaleIntensity, + RandScaleIntensityFixedMean, RandShiftIntensity, RandStdShiftIntensity, SavitzkyGolaySmooth, ScaleIntensity, + ScaleIntensityFixedMean, ScaleIntensityRange, ScaleIntensityRangePercentiles, ShiftIntensity, @@ -198,6 +200,9 @@ RandScaleIntensityd, RandScaleIntensityD, RandScaleIntensityDict, + RandScaleIntensityFixedMeand, + RandScaleIntensityFixedMeanD, + RandScaleIntensityFixedMeanDict, RandShiftIntensityd, RandShiftIntensityD, RandShiftIntensityDict, diff --git a/monai/transforms/intensity/array.py b/monai/transforms/intensity/array.py index f61e022c5c..4467e78f12 100644 --- a/monai/transforms/intensity/array.py +++ b/monai/transforms/intensity/array.py @@ -48,6 +48,8 @@ "RandBiasField", "ScaleIntensity", "RandScaleIntensity", + "ScaleIntensityFixedMean", + "RandScaleIntensityFixedMean", "NormalizeIntensity", "ThresholdIntensity", "ScaleIntensityRange", @@ -466,6 +468,161 @@ def __call__(self, img: NdarrayOrTensor) -> NdarrayOrTensor: return ret +class ScaleIntensityFixedMean(Transform): + """ + Scale the intensity of input image by ``v = v * (1 + factor)``, then shift the output so that the output image has the + same mean as the input. + """ + + backend = [TransformBackends.TORCH, TransformBackends.NUMPY] + + def __init__( + self, + factor: float = 0, + preserve_range: bool = False, + fixed_mean: bool = True, + channel_wise: bool = False, + dtype: DtypeLike = np.float32, + ) -> None: + """ + Args: + factor: factor scale by ``v = v * (1 + factor)``. + preserve_range: clips the output array/tensor to the range of the input array/tensor + fixed_mean: subtract the mean intensity before scaling with `factor`, then add the same value after scaling + to ensure that the output has the same mean as the input. + channel_wise: if True, scale on each channel separately. `preserve_range` and `fixed_mean` are also applied + on each channel separately if `channel_wise` is True. Please ensure that the first dimension represents the + channel of the image if True. + dtype: output data type, if None, same as input image. defaults to float32. + """ + self.factor = factor + self.preserve_range = preserve_range + self.fixed_mean = fixed_mean + self.channel_wise = channel_wise + self.dtype = dtype + + def __call__(self, img: NdarrayOrTensor, factor=None) -> NdarrayOrTensor: + """ + Apply the transform to `img`. + Args: + img: the input tensor/array + factor: factor scale by ``v = v * (1 + factor)`` + + """ + + factor = factor if factor is not None else self.factor + + img = convert_to_tensor(img, track_meta=get_track_meta()) + img_t = convert_to_tensor(img, track_meta=False) + ret: NdarrayOrTensor + if self.channel_wise: + out = [] + for d in img_t: + if self.preserve_range: + clip_min = d.min() + clip_max = d.max() + + if self.fixed_mean: + mn = d.mean() + d = d - mn + + out_channel = d * (1 + factor) + + if self.fixed_mean: + out_channel = out_channel + mn + + if self.preserve_range: + out_channel = clip(out_channel, clip_min, clip_max) + + out.append(out_channel) + ret = torch.stack(out) # type: ignore + else: + if self.preserve_range: + clip_min = img_t.min() + clip_max = img_t.max() + + if self.fixed_mean: + mn = img_t.mean() + img_t = img_t - mn + + ret = img_t * (1 + factor) + + if self.fixed_mean: + ret = ret + mn + + if self.preserve_range: + ret = clip(ret, clip_min, clip_max) + + ret = convert_to_dst_type(ret, dst=img, dtype=self.dtype or img_t.dtype)[0] + return ret + + +class RandScaleIntensityFixedMean(RandomizableTransform): + """ + Randomly scale the intensity of input image by ``v = v * (1 + factor)`` where the `factor` + is randomly picked. Subtract the mean intensity before scaling with `factor`, then add the same value after scaling + to ensure that the output has the same mean as the input. + """ + + backend = ScaleIntensityFixedMean.backend + + def __init__( + self, + prob: float = 0.1, + factors: Sequence[float] | float = 0, + fixed_mean: bool = True, + preserve_range: bool = False, + dtype: DtypeLike = np.float32, + ) -> None: + """ + Args: + factors: factor range to randomly scale by ``v = v * (1 + factor)``. + if single number, factor value is picked from (-factors, factors). + preserve_range: clips the output array/tensor to the range of the input array/tensor + fixed_mean: subtract the mean intensity before scaling with `factor`, then add the same value after scaling + to ensure that the output has the same mean as the input. + channel_wise: if True, scale on each channel separately. `preserve_range` and `fixed_mean` are also applied + on each channel separately if `channel_wise` is True. Please ensure that the first dimension represents the + channel of the image if True. + dtype: output data type, if None, same as input image. defaults to float32. + + """ + RandomizableTransform.__init__(self, prob) + if isinstance(factors, (int, float)): + self.factors = (min(-factors, factors), max(-factors, factors)) + elif len(factors) != 2: + raise ValueError("factors should be a number or pair of numbers.") + else: + self.factors = (min(factors), max(factors)) + self.factor = self.factors[0] + self.fixed_mean = fixed_mean + self.preserve_range = preserve_range + self.dtype = dtype + + self.scaler = ScaleIntensityFixedMean( + factor=self.factor, fixed_mean=self.fixed_mean, preserve_range=self.preserve_range, dtype=self.dtype + ) + + def randomize(self, data: Any | None = None) -> None: + super().randomize(None) + if not self._do_transform: + return None + self.factor = self.R.uniform(low=self.factors[0], high=self.factors[1]) + + def __call__(self, img: NdarrayOrTensor, randomize: bool = True) -> NdarrayOrTensor: + """ + Apply the transform to `img`. + """ + img = convert_to_tensor(img, track_meta=get_track_meta()) + if randomize: + self.randomize() + + if not self._do_transform: + return convert_data_type(img, dtype=self.dtype)[0] + + return self.scaler(img, self.factor) + + class RandScaleIntensity(RandomizableTransform): """ Randomly scale the intensity of input image by ``v = v * (1 + factor)`` where the `factor` @@ -799,36 +956,70 @@ def __call__(self, img: NdarrayOrTensor) -> NdarrayOrTensor: class AdjustContrast(Transform): """ - Changes image intensity by gamma. Each pixel/voxel intensity is updated as:: + Changes image intensity with gamma transform. Each pixel/voxel intensity is updated as:: x = ((x - min) / intensity_range) ^ gamma * intensity_range + min Args: gamma: gamma value to adjust the contrast as function. + invert_image: whether to invert the image before applying gamma augmentation. If True, multiply all intensity + values with -1 before the gamma transform and again after the gamma transform. This behaviour is mimicked + from `nnU-Net `_, specifically `this + `_ + function. + retain_stats: if True, applies a scaling factor and an offset to all intensity values after gamma transform to + ensure that the output intensity distribution has the same mean and standard deviation as the intensity + distribution of the input. This behaviour is mimicked from `nnU-Net + `_, specifically `this + `_ + function. """ backend = [TransformBackends.TORCH, TransformBackends.NUMPY] - def __init__(self, gamma: float) -> None: + def __init__(self, gamma: float, invert_image: bool = False, retain_stats: bool = False) -> None: if not isinstance(gamma, (int, float)): raise ValueError(f"gamma must be a float or int number, got {type(gamma)} {gamma}.") self.gamma = gamma + self.invert_image = invert_image + self.retain_stats = retain_stats - def __call__(self, img: NdarrayOrTensor) -> NdarrayOrTensor: + def __call__(self, img: NdarrayOrTensor, gamma=None) -> NdarrayOrTensor: """ Apply the transform to `img`. + gamma: gamma value to adjust the contrast as function. """ img = convert_to_tensor(img, track_meta=get_track_meta()) + gamma = gamma if gamma is not None else self.gamma + + if self.invert_image: + img = -img + + if self.retain_stats: + mn = img.mean() + sd = img.std() + epsilon = 1e-7 img_min = img.min() img_range = img.max() - img_min - ret: NdarrayOrTensor = ((img - img_min) / float(img_range + epsilon)) ** self.gamma * img_range + img_min + ret: NdarrayOrTensor = ((img - img_min) / float(img_range + epsilon)) ** gamma * img_range + img_min + + if self.retain_stats: + # zero mean and normalize + ret = ret - ret.mean() + ret = ret / (ret.std() + 1e-8) + # restore old mean and standard deviation + ret = sd * ret + mn + + if self.invert_image: + ret = -ret + return ret class RandAdjustContrast(RandomizableTransform): """ - Randomly changes image intensity by gamma. Each pixel/voxel intensity is updated as:: + Randomly changes image intensity with gamma transform. Each pixel/voxel intensity is updated as: x = ((x - min) / intensity_range) ^ gamma * intensity_range + min @@ -836,11 +1027,28 @@ class RandAdjustContrast(RandomizableTransform): prob: Probability of adjustment. gamma: Range of gamma values. If single number, value is picked from (0.5, gamma), default is (0.5, 4.5). + invert_image: whether to invert the image before applying gamma augmentation. If True, multiply all intensity + values with -1 before the gamma transform and again after the gamma transform. This behaviour is mimicked + from `nnU-Net `_, specifically `this + `_ + function. + retain_stats: if True, applies a scaling factor and an offset to all intensity values after gamma transform to + ensure that the output intensity distribution has the same mean and standard deviation as the intensity + distribution of the input. This behaviour is mimicked from `nnU-Net + `_, specifically `this + `_ + function. """ backend = AdjustContrast.backend - def __init__(self, prob: float = 0.1, gamma: Sequence[float] | float = (0.5, 4.5)) -> None: + def __init__( + self, + prob: float = 0.1, + gamma: Sequence[float] | float = (0.5, 4.5), + invert_image: bool = False, + retain_stats: bool = False, + ) -> None: RandomizableTransform.__init__(self, prob) if isinstance(gamma, (int, float)): @@ -854,7 +1062,13 @@ def __init__(self, prob: float = 0.1, gamma: Sequence[float] | float = (0.5, 4.5 else: self.gamma = (min(gamma), max(gamma)) - self.gamma_value: float | None = None + self.gamma_value: float = 1.0 + self.invert_image: bool = invert_image + self.retain_stats: bool = retain_stats + + self.adjust_contrast = AdjustContrast( + self.gamma_value, invert_image=self.invert_image, retain_stats=self.retain_stats + ) def randomize(self, data: Any | None = None) -> None: super().randomize(None) @@ -875,7 +1089,8 @@ def __call__(self, img: NdarrayOrTensor, randomize: bool = True) -> NdarrayOrTen if self.gamma_value is None: raise RuntimeError("gamma_value is not set, please call `randomize` function first.") - return AdjustContrast(self.gamma_value)(img) + + return self.adjust_contrast(img, self.gamma_value) class ScaleIntensityRangePercentiles(Transform): diff --git a/monai/transforms/intensity/dictionary.py b/monai/transforms/intensity/dictionary.py index 790cb38671..91acff0c3d 100644 --- a/monai/transforms/intensity/dictionary.py +++ b/monai/transforms/intensity/dictionary.py @@ -48,6 +48,7 @@ RandKSpaceSpikeNoise, RandRicianNoise, RandScaleIntensity, + RandScaleIntensityFixedMean, RandShiftIntensity, RandStdShiftIntensity, SavitzkyGolaySmooth, @@ -108,6 +109,9 @@ "StdShiftIntensityDict", "RandScaleIntensityD", "RandScaleIntensityDict", + "RandScaleIntensityFixedMeand", + "RandScaleIntensityFixedMeanDict", + "RandScaleIntensityFixedMeanD", "RandStdShiftIntensityD", "RandStdShiftIntensityDict", "RandBiasFieldD", @@ -623,6 +627,71 @@ def __call__(self, data: Mapping[Hashable, NdarrayOrTensor]) -> dict[Hashable, N return d +class RandScaleIntensityFixedMeand(RandomizableTransform, MapTransform): + """ + Dictionary-based version :py:class:`monai.transforms.RandScaleIntensity`. + Subtract the mean intensity before scaling with `factor`, then add the same value after scaling + to ensure that the output has the same mean as the input. + """ + + backend = RandScaleIntensityFixedMean.backend + + def __init__( + self, + keys: KeysCollection, + factors: Sequence[float] | float, + fixed_mean: bool = True, + preserve_range: bool = False, + prob: float = 0.1, + dtype: DtypeLike = np.float32, + allow_missing_keys: bool = False, + ) -> None: + """ + Args: + keys: keys of the corresponding items to be transformed. + See also: :py:class:`monai.transforms.compose.MapTransform` + factors: factor range to randomly scale by ``v = v * (1 + factor)``. + if single number, factor value is picked from (-factors, factors). + preserve_range: clips the output array/tensor to the range of the input array/tensor + fixed_mean: subtract the mean intensity before scaling with `factor`, then add the same value after scaling + to ensure that the output has the same mean as the input. + channel_wise: if True, scale on each channel separately. `preserve_range` and `fixed_mean` are also applied + on each channel separately if `channel_wise` is True. Please ensure that the first dimension represents the + channel of the image if True. + dtype: output data type, if None, same as input image. defaults to float32. + allow_missing_keys: don't raise exception if key is missing. + + """ + MapTransform.__init__(self, keys, allow_missing_keys) + RandomizableTransform.__init__(self, prob) + self.fixed_mean = fixed_mean + self.preserve_range = preserve_range + self.scaler = RandScaleIntensityFixedMean( + factors=factors, fixed_mean=self.fixed_mean, preserve_range=preserve_range, dtype=dtype, prob=1.0 + ) + + def set_random_state( + self, seed: int | None = None, state: np.random.RandomState | None = None + ) -> RandScaleIntensityFixedMeand: + super().set_random_state(seed, state) + self.scaler.set_random_state(seed, state) + return self + + def __call__(self, data: Mapping[Hashable, NdarrayOrTensor]) -> dict[Hashable, NdarrayOrTensor]: + d = dict(data) + self.randomize(None) + if not self._do_transform: + for key in self.key_iterator(d): + d[key] = convert_to_tensor(d[key], track_meta=get_track_meta()) + return d + + # all the keys share the same random scale factor + self.scaler.randomize(None) + for key in self.key_iterator(d): + d[key] = self.scaler(d[key], randomize=False) + return d + + class RandBiasFieldd(RandomizableTransform, MapTransform): """ Dictionary-based version :py:class:`monai.transforms.RandBiasField`. @@ -798,7 +867,7 @@ def __call__(self, data: Mapping[Hashable, NdarrayOrTensor]) -> dict[Hashable, N class AdjustContrastd(MapTransform): """ Dictionary-based wrapper of :py:class:`monai.transforms.AdjustContrast`. - Changes image intensity by gamma. Each pixel/voxel intensity is updated as: + Changes image intensity with gamma transform. Each pixel/voxel intensity is updated as: `x = ((x - min) / intensity_range) ^ gamma * intensity_range + min` @@ -806,14 +875,32 @@ class AdjustContrastd(MapTransform): keys: keys of the corresponding items to be transformed. See also: monai.transforms.MapTransform gamma: gamma value to adjust the contrast as function. + invert_image: whether to invert the image before applying gamma augmentation. If True, multiply all intensity + values with -1 before the gamma transform and again after the gamma transform. This behaviour is mimicked + from `nnU-Net `_, specifically `this + `_ + function. + retain_stats: if True, applies a scaling factor and an offset to all intensity values after gamma transform to + ensure that the output intensity distribution has the same mean and standard deviation as the intensity + distribution of the input. This behaviour is mimicked from `nnU-Net + `_, specifically `this + `_ + function. allow_missing_keys: don't raise exception if key is missing. """ backend = AdjustContrast.backend - def __init__(self, keys: KeysCollection, gamma: float, allow_missing_keys: bool = False) -> None: + def __init__( + self, + keys: KeysCollection, + gamma: float, + invert_image: bool = False, + retain_stats: bool = False, + allow_missing_keys: bool = False, + ) -> None: super().__init__(keys, allow_missing_keys) - self.adjuster = AdjustContrast(gamma) + self.adjuster = AdjustContrast(gamma, invert_image, retain_stats) def __call__(self, data: Mapping[Hashable, NdarrayOrTensor]) -> dict[Hashable, NdarrayOrTensor]: d = dict(data) @@ -825,7 +912,7 @@ def __call__(self, data: Mapping[Hashable, NdarrayOrTensor]) -> dict[Hashable, N class RandAdjustContrastd(RandomizableTransform, MapTransform): """ Dictionary-based version :py:class:`monai.transforms.RandAdjustContrast`. - Randomly changes image intensity by gamma. Each pixel/voxel intensity is updated as: + Randomly changes image intensity with gamma transform. Each pixel/voxel intensity is updated as: `x = ((x - min) / intensity_range) ^ gamma * intensity_range + min` @@ -835,6 +922,17 @@ class RandAdjustContrastd(RandomizableTransform, MapTransform): prob: Probability of adjustment. gamma: Range of gamma values. If single number, value is picked from (0.5, gamma), default is (0.5, 4.5). + invert_image: whether to invert the image before applying gamma augmentation. If True, multiply all intensity + values with -1 before the gamma transform and again after the gamma transform. This behaviour is mimicked + from `nnU-Net `_, specifically `this + `_ + function. + retain_stats: if True, applies a scaling factor and an offset to all intensity values after gamma transform to + ensure that the output intensity distribution has the same mean and standard deviation as the intensity + distribution of the input. This behaviour is mimicked from `nnU-Net + `_, specifically `this + `_ + function. allow_missing_keys: don't raise exception if key is missing. """ @@ -845,11 +943,14 @@ def __init__( keys: KeysCollection, prob: float = 0.1, gamma: tuple[float, float] | float = (0.5, 4.5), + invert_image: bool = False, + retain_stats: bool = False, allow_missing_keys: bool = False, ) -> None: MapTransform.__init__(self, keys, allow_missing_keys) RandomizableTransform.__init__(self, prob) - self.adjuster = RandAdjustContrast(gamma=gamma, prob=1.0) + self.adjuster = RandAdjustContrast(gamma=gamma, prob=1.0, invert_image=invert_image, retain_stats=retain_stats) + self.invert_image = invert_image def set_random_state( self, seed: int | None = None, state: np.random.RandomState | None = None @@ -1801,6 +1902,7 @@ def __call__(self, data: Mapping[Hashable, NdarrayOrTensor]) -> dict[Hashable, N RandBiasFieldD = RandBiasFieldDict = RandBiasFieldd ScaleIntensityD = ScaleIntensityDict = ScaleIntensityd RandScaleIntensityD = RandScaleIntensityDict = RandScaleIntensityd +RandScaleIntensityFixedMeanD = RandScaleIntensityFixedMeanDict = RandScaleIntensityFixedMeand NormalizeIntensityD = NormalizeIntensityDict = NormalizeIntensityd ThresholdIntensityD = ThresholdIntensityDict = ThresholdIntensityd ScaleIntensityRangeD = ScaleIntensityRangeDict = ScaleIntensityRanged diff --git a/tests/test_adjust_contrast.py b/tests/test_adjust_contrast.py index c239f43346..9fa0247115 100644 --- a/tests/test_adjust_contrast.py +++ b/tests/test_adjust_contrast.py @@ -19,29 +19,51 @@ from monai.transforms import AdjustContrast from tests.utils import TEST_NDARRAYS, NumpyImageTestCase2D, assert_allclose -TEST_CASE_1 = [1.0] +TESTS = [] +for invert_image in (True, False): + for retain_stats in (True, False): + TEST_CASE_1 = [1.0, invert_image, retain_stats] + TEST_CASE_2 = [0.5, invert_image, retain_stats] + TEST_CASE_3 = [4.5, invert_image, retain_stats] -TEST_CASE_2 = [0.5] - -TEST_CASE_3 = [4.5] + TESTS.extend([TEST_CASE_1, TEST_CASE_2, TEST_CASE_3]) class TestAdjustContrast(NumpyImageTestCase2D): - @parameterized.expand([TEST_CASE_1, TEST_CASE_2, TEST_CASE_3]) - def test_correct_results(self, gamma): - adjuster = AdjustContrast(gamma=gamma) + @parameterized.expand(TESTS) + def test_correct_results(self, gamma, invert_image, retain_stats): + adjuster = AdjustContrast(gamma=gamma, invert_image=invert_image, retain_stats=retain_stats) for p in TEST_NDARRAYS: im = p(self.imt) result = adjuster(im) self.assertTrue(type(im), type(result)) - if gamma == 1.0: + if False: # gamma == 1.0: expected = self.imt else: + if invert_image: + self.imt = -self.imt + + if retain_stats: + mn = self.imt.mean() + sd = self.imt.std() + epsilon = 1e-7 img_min = self.imt.min() img_range = self.imt.max() - img_min + expected = np.power(((self.imt - img_min) / float(img_range + epsilon)), gamma) * img_range + img_min - assert_allclose(result, expected, rtol=1e-05, type_test="tensor") + + if retain_stats: + # zero mean and normalize + expected = expected - expected.mean() + expected = expected / (expected.std() + 1e-8) + # restore old mean and standard deviation + expected = sd * expected + mn + + if invert_image: + expected = -expected + + assert_allclose(result, expected, atol=1e-05, type_test="tensor") if __name__ == "__main__": diff --git a/tests/test_adjust_contrastd.py b/tests/test_adjust_contrastd.py index 6de2658a5b..4a671ef7be 100644 --- a/tests/test_adjust_contrastd.py +++ b/tests/test_adjust_contrastd.py @@ -19,27 +19,45 @@ from monai.transforms import AdjustContrastd from tests.utils import TEST_NDARRAYS, NumpyImageTestCase2D, assert_allclose -TEST_CASE_1 = [1.0] +TESTS = [] +for invert_image in (True, False): + for retain_stats in (True, False): + TEST_CASE_1 = [1.0, invert_image, retain_stats] + TEST_CASE_2 = [0.5, invert_image, retain_stats] + TEST_CASE_3 = [4.5, invert_image, retain_stats] -TEST_CASE_2 = [0.5] - -TEST_CASE_3 = [4.5] + TESTS.extend([TEST_CASE_1, TEST_CASE_2, TEST_CASE_3]) class TestAdjustContrastd(NumpyImageTestCase2D): - @parameterized.expand([TEST_CASE_1, TEST_CASE_2, TEST_CASE_3]) - def test_correct_results(self, gamma): - adjuster = AdjustContrastd("img", gamma=gamma) + @parameterized.expand(TESTS) + def test_correct_results(self, gamma, invert_image, retain_stats): + adjuster = AdjustContrastd("img", gamma=gamma, invert_image=invert_image, retain_stats=retain_stats) for p in TEST_NDARRAYS: result = adjuster({"img": p(self.imt)}) - if gamma == 1.0: - expected = self.imt - else: - epsilon = 1e-7 - img_min = self.imt.min() - img_range = self.imt.max() - img_min - expected = np.power(((self.imt - img_min) / float(img_range + epsilon)), gamma) * img_range + img_min - assert_allclose(result["img"], expected, rtol=1e-05, type_test="tensor") + if invert_image: + self.imt = -self.imt + + if retain_stats: + mn = self.imt.mean() + sd = self.imt.std() + + epsilon = 1e-7 + img_min = self.imt.min() + img_range = self.imt.max() - img_min + + expected = np.power(((self.imt - img_min) / float(img_range + epsilon)), gamma) * img_range + img_min + + if retain_stats: + # zero mean and normalize + expected = expected - expected.mean() + expected = expected / (expected.std() + 1e-8) + # restore old mean and standard deviation + expected = sd * expected + mn + + if invert_image: + expected = -expected + assert_allclose(result["img"], expected, atol=1e-05, type_test="tensor") if __name__ == "__main__": diff --git a/tests/test_rand_scale_intensity_fixed_mean.py b/tests/test_rand_scale_intensity_fixed_mean.py new file mode 100644 index 0000000000..f43adab32f --- /dev/null +++ b/tests/test_rand_scale_intensity_fixed_mean.py @@ -0,0 +1,41 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +import unittest + +import numpy as np +from parameterized import parameterized + +from monai.transforms import RandScaleIntensityFixedMean +from tests.utils import TEST_NDARRAYS, NumpyImageTestCase2D, assert_allclose + + +class TestRandScaleIntensity(NumpyImageTestCase2D): + @parameterized.expand([[p] for p in TEST_NDARRAYS]) + def test_value(self, p): + scaler = RandScaleIntensityFixedMean(prob=1.0, factors=0.5) + scaler.set_random_state(seed=0) + im = p(self.imt) + result = scaler(im) + np.random.seed(0) + # simulate the randomize() of transform + np.random.random() + mn = im.mean() + im = im - mn + expected = (1 + np.random.uniform(low=-0.5, high=0.5)) * im + expected = expected + mn + assert_allclose(result, expected, type_test="tensor", atol=1e-7) + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/test_rand_scale_intensity_fixed_meand.py b/tests/test_rand_scale_intensity_fixed_meand.py new file mode 100644 index 0000000000..c85c764a55 --- /dev/null +++ b/tests/test_rand_scale_intensity_fixed_meand.py @@ -0,0 +1,41 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +import unittest + +import numpy as np + +from monai.transforms import RandScaleIntensityFixedMeand +from tests.utils import TEST_NDARRAYS, NumpyImageTestCase2D, assert_allclose + + +class TestRandScaleIntensityFixedMeand(NumpyImageTestCase2D): + def test_value(self): + key = "img" + for p in TEST_NDARRAYS: + scaler = RandScaleIntensityFixedMeand(keys=[key], factors=0.5, prob=1.0) + scaler.set_random_state(seed=0) + result = scaler({key: p(self.imt)}) + np.random.seed(0) + # simulate the randomize function of transform + np.random.random() + im = self.imt + mn = im.mean() + im = im - mn + expected = (1 + np.random.uniform(low=-0.5, high=0.5)) * im + expected = expected + mn + assert_allclose(result[key], p(expected), type_test="tensor", atol=1e-6) + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/test_scale_intensity_fixed_mean.py b/tests/test_scale_intensity_fixed_mean.py new file mode 100644 index 0000000000..afbcd46141 --- /dev/null +++ b/tests/test_scale_intensity_fixed_mean.py @@ -0,0 +1,94 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +import unittest + +import numpy as np +from parameterized import parameterized + +from monai.transforms import ScaleIntensityFixedMean +from tests.utils import TEST_NDARRAYS, NumpyImageTestCase2D, assert_allclose + + +class TestScaleIntensityFixedMean(NumpyImageTestCase2D): + def test_factor_scale(self): + for p in TEST_NDARRAYS: + scaler = ScaleIntensityFixedMean(factor=0.1, fixed_mean=False) + result = scaler(p(self.imt)) + expected = p((self.imt * (1 + 0.1)).astype(np.float32)) + assert_allclose(result, p(expected), type_test="tensor", rtol=1e-7, atol=0) + + @parameterized.expand([[p] for p in TEST_NDARRAYS]) + def test_preserve_range(self, p): + for channel_wise in [False, True]: + factor = 0.9 + scaler = ScaleIntensityFixedMean( + factor=factor, preserve_range=True, channel_wise=channel_wise, fixed_mean=False + ) + im = p(self.imt) + result = scaler(im) + + if False: # channel_wise: + out = [] + for d in im: + clip_min = d.min() + clip_max = d.max() + d = (1 + factor) * d + d[d < clip_min] = clip_min + d[d > clip_max] = clip_max + out.append(d) + expected = p(out) + else: + clip_min = im.min() + clip_max = im.max() + im = (1 + factor) * im + im[im < clip_min] = clip_min + im[im > clip_max] = clip_max + expected = im + assert_allclose(result, expected, type_test="tensor", atol=1e-7) + + @parameterized.expand([[p] for p in TEST_NDARRAYS]) + def test_fixed_mean(self, p): + for channel_wise in [False, True]: + factor = 0.9 + scaler = ScaleIntensityFixedMean(factor=factor, fixed_mean=True, channel_wise=channel_wise) + im = p(self.imt) + result = scaler(im) + mn = im.mean() + im = im - mn + expected = (1 + factor) * im + expected = expected + mn + assert_allclose(result, expected, type_test="tensor", atol=1e-7) + + @parameterized.expand([[p] for p in TEST_NDARRAYS]) + def test_fixed_mean_preserve_range(self, p): + for channel_wise in [False, True]: + factor = 0.9 + scaler = ScaleIntensityFixedMean( + factor=factor, preserve_range=True, fixed_mean=True, channel_wise=channel_wise + ) + im = p(self.imt) + clip_min = im.min() + clip_max = im.max() + result = scaler(im) + mn = im.mean() + im = im - mn + expected = (1 + factor) * im + expected = expected + mn + expected[expected < clip_min] = clip_min + expected[expected > clip_max] = clip_max + assert_allclose(result, expected, type_test="tensor", atol=1e-7) + + +if __name__ == "__main__": + unittest.main()