diff --git a/docs/source/highlights.md b/docs/source/highlights.md index b339850220..571f20db9f 100644 --- a/docs/source/highlights.md +++ b/docs/source/highlights.md @@ -81,7 +81,7 @@ For example: ```py # define a transform chain for pre-processing train_transforms = monai.transforms.Compose([ - LoadNiftid(keys=['image', 'label']), + LoadImaged(keys=['image', 'label']), RandRotate90d(keys=['image', 'label'], prob=0.2, spatial_axes=[0, 2]), ... ... ]) diff --git a/monai/apps/datasets.py b/monai/apps/datasets.py index 6aceb159ca..99643fd4db 100644 --- a/monai/apps/datasets.py +++ b/monai/apps/datasets.py @@ -23,7 +23,7 @@ partition_dataset, select_cross_validation_folds, ) -from monai.transforms import LoadNiftid, LoadPNGd, Randomizable +from monai.transforms import LoadImaged, Randomizable from monai.utils import ensure_tuple @@ -92,7 +92,7 @@ def __init__( ) data = self._generate_data_list(dataset_dir) if transform == (): - transform = LoadPNGd("image") + transform = LoadImaged("image") super().__init__(data, transform, cache_num=cache_num, cache_rate=cache_rate, num_workers=num_workers) def randomize(self, data: Optional[Any] = None) -> None: @@ -268,7 +268,7 @@ def __init__( ] self._properties = load_decathlon_properties(os.path.join(dataset_dir, "dataset.json"), property_keys) if transform == (): - transform = LoadNiftid(["image", "label"]) + transform = LoadImaged(["image", "label"]) super().__init__(data, transform, cache_num=cache_num, cache_rate=cache_rate, num_workers=num_workers) def get_indices(self) -> np.ndarray: diff --git a/monai/data/image_reader.py b/monai/data/image_reader.py index ab89e9ef49..0e14f5ba65 100644 --- a/monai/data/image_reader.py +++ b/monai/data/image_reader.py @@ -14,6 +14,7 @@ from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Sequence, Tuple, Union import numpy as np +from torch.utils.data._utils.collate import np_str_obj_array_pattern from monai.config import KeysCollection from monai.data.utils import correct_nifti_header_if_necessary @@ -80,6 +81,29 @@ def get_data(self, img) -> Tuple[np.ndarray, Dict]: raise NotImplementedError(f"Subclass {self.__class__.__name__} must implement this method.") +def _copy_compatible_dict(from_dict: Dict, to_dict: Dict): + if not isinstance(to_dict, dict): + raise ValueError(f"to_dict must be a Dict, got {type(to_dict)}.") + if not to_dict: + for key in from_dict: + datum = from_dict[key] + if isinstance(datum, np.ndarray) and np_str_obj_array_pattern.search(datum.dtype.str) is not None: + continue + to_dict[key] = datum + else: + affine_key, shape_key = "affine", "spatial_shape" + if affine_key in from_dict and not np.allclose(from_dict[affine_key], to_dict[affine_key]): + raise RuntimeError( + "affine matrix of all images should be the same for channel-wise concatenation. " + f"Got {from_dict[affine_key]} and {to_dict[affine_key]}." + ) + if shape_key in from_dict and not np.allclose(from_dict[shape_key], to_dict[shape_key]): + raise RuntimeError( + "spatial_shape of all images should be the same for channel-wise concatenation. " + f"Got {from_dict[shape_key]} and {to_dict[shape_key]}." + ) + + class ITKReader(ImageReader): """ Load medical images based on ITK library. @@ -159,7 +183,7 @@ def get_data(self, img): """ img_array: List[np.ndarray] = list() - compatible_meta: Dict = None + compatible_meta: Dict = {} for i in ensure_tuple(img): header = self._get_meta_dict(i) @@ -167,14 +191,7 @@ def get_data(self, img): header["affine"] = header["original_affine"].copy() header["spatial_shape"] = self._get_spatial_shape(i) img_array.append(self._get_array_data(i)) - - if compatible_meta is None: - compatible_meta = header - else: - if not np.allclose(header["affine"], compatible_meta["affine"]): - raise RuntimeError("affine matrix of all images should be same.") - if not np.allclose(header["spatial_shape"], compatible_meta["spatial_shape"]): - raise RuntimeError("spatial_shape of all images should be same.") + _copy_compatible_dict(header, compatible_meta) img_array_ = np.stack(img_array, axis=0) if len(img_array) > 1 else img_array[0] return img_array_, compatible_meta @@ -188,7 +205,7 @@ def _get_meta_dict(self, img) -> Dict: """ img_meta_dict = img.GetMetaDataDictionary() - meta_dict = dict() + meta_dict = {} for key in img_meta_dict.GetKeys(): # ignore deprecated, legacy members that cause issues if key.startswith("ITK_original_"): @@ -220,7 +237,7 @@ def _get_affine(self, img) -> np.ndarray: affine[(slice(-1), -1)] = origin return affine - def _get_spatial_shape(self, img) -> Sequence: + def _get_spatial_shape(self, img) -> np.ndarray: """ Get the spatial shape of image data, it doesn't contain the channel dim. @@ -230,7 +247,7 @@ def _get_spatial_shape(self, img) -> Sequence: """ shape = list(itk.size(img)) shape.reverse() - return shape + return np.asarray(shape) def _get_array_data(self, img) -> np.ndarray: """ @@ -247,17 +264,15 @@ def _get_array_data(self, img) -> np.ndarray: channels = img.GetNumberOfComponentsPerPixel() if channels == 1: return itk.array_view_from_image(img, keep_axes=False) - else: - # The memory layout of itk.Image has all pixel's channels adjacent - # in memory, i.e. R1G1B1R2G2B2R3G3B3. For PyTorch/MONAI, we need - # channels to be contiguous, i.e. R1R2R3G1G2G3B1B2B3. - arr = itk.array_view_from_image(img, keep_axes=False) - dest = list(range(img.ndim)) - source = dest.copy() - end = source.pop() - source.insert(0, end) - arr_contiguous_channels = np.moveaxis(arr, source, dest) - return arr_contiguous_channels + # The memory layout of itk.Image has all pixel's channels adjacent + # in memory, i.e. R1G1B1R2G2B2R3G3B3. For PyTorch/MONAI, we need + # channels to be contiguous, i.e. R1R2R3G1G2G3B1B2B3. + arr = itk.array_view_from_image(img, keep_axes=False) + dest = list(range(img.ndim)) + source = dest.copy() + end = source.pop() + source.insert(0, end) + return np.moveaxis(arr, source, dest) class NibabelReader(ImageReader): @@ -271,9 +286,10 @@ class NibabelReader(ImageReader): """ - def __init__(self, as_closest_canonical: bool = False, **kwargs): + def __init__(self, as_closest_canonical: bool = False, dtype: Optional[np.dtype] = np.float32, **kwargs): super().__init__() self.as_closest_canonical = as_closest_canonical + self.dtype = dtype self.kwargs = kwargs def verify_suffix(self, filename: Union[Sequence[str], str]) -> bool: @@ -324,26 +340,19 @@ def get_data(self, img): """ img_array: List[np.ndarray] = list() - compatible_meta: Dict = None + compatible_meta: Dict = {} for i in ensure_tuple(img): header = self._get_meta_dict(i) + header["affine"] = self._get_affine(i) header["original_affine"] = self._get_affine(i) - header["affine"] = header["original_affine"].copy() + header["as_closest_canonical"] = self.as_closest_canonical if self.as_closest_canonical: i = nib.as_closest_canonical(i) header["affine"] = self._get_affine(i) - header["as_closest_canonical"] = self.as_closest_canonical header["spatial_shape"] = self._get_spatial_shape(i) img_array.append(self._get_array_data(i)) - - if compatible_meta is None: - compatible_meta = header - else: - if not np.allclose(header["affine"], compatible_meta["affine"]): - raise RuntimeError("affine matrix of all images should be same.") - if not np.allclose(header["spatial_shape"], compatible_meta["spatial_shape"]): - raise RuntimeError("spatial_shape of all images should be same.") + _copy_compatible_dict(header, compatible_meta) img_array_ = np.stack(img_array, axis=0) if len(img_array) > 1 else img_array[0] return img_array_, compatible_meta @@ -367,9 +376,9 @@ def _get_affine(self, img) -> np.ndarray: img: a Nibabel image object loaded from a image file. """ - return img.affine + return img.affine.copy() - def _get_spatial_shape(self, img) -> Sequence: + def _get_spatial_shape(self, img) -> np.ndarray: """ Get the spatial shape of image data, it doesn't contain the channel dim. @@ -379,7 +388,7 @@ def _get_spatial_shape(self, img) -> Sequence: """ ndim = img.header["dim"][0] spatial_rank = min(ndim, 3) - return list(img.header["dim"][1 : spatial_rank + 1]) + return np.asarray(img.header["dim"][1 : spatial_rank + 1]) def _get_array_data(self, img) -> np.ndarray: """ @@ -389,7 +398,9 @@ def _get_array_data(self, img) -> np.ndarray: img: a Nibabel image object loaded from a image file. """ - return np.asarray(img.dataobj) + _array = np.array(img.get_fdata(dtype=self.dtype)) + img.uncache() + return _array class NumpyReader(ImageReader): @@ -466,7 +477,7 @@ def get_data(self, img): """ img_array: List[np.ndarray] = list() - compatible_meta: Dict = None + compatible_meta: Dict = {} if isinstance(img, np.ndarray): img = (img,) @@ -475,12 +486,7 @@ def get_data(self, img): if isinstance(i, np.ndarray): header["spatial_shape"] = i.shape img_array.append(i) - - if compatible_meta is None: - compatible_meta = header - else: - if not np.allclose(header["spatial_shape"], compatible_meta["spatial_shape"]): - raise RuntimeError("spatial_shape of all images should be same.") + _copy_compatible_dict(header, compatible_meta) img_array_ = np.stack(img_array, axis=0) if len(img_array) > 1 else img_array[0] return img_array_, compatible_meta @@ -551,18 +557,13 @@ def get_data(self, img): """ img_array: List[np.ndarray] = list() - compatible_meta: Dict = None + compatible_meta: Dict = {} for i in ensure_tuple(img): header = self._get_meta_dict(i) header["spatial_shape"] = self._get_spatial_shape(i) img_array.append(np.asarray(i)) - - if compatible_meta is None: - compatible_meta = header - else: - if not np.allclose(header["spatial_shape"], compatible_meta["spatial_shape"]): - raise RuntimeError("spatial_shape of all images should be same.") + _copy_compatible_dict(header, compatible_meta) img_array_ = np.stack(img_array, axis=0) if len(img_array) > 1 else img_array[0] return img_array_, compatible_meta @@ -574,17 +575,17 @@ def _get_meta_dict(self, img) -> Dict: img: a PIL Image object loaded from a image file. """ - meta = dict() - meta["format"] = img.format - meta["mode"] = img.mode - meta["width"] = img.width - meta["height"] = img.height - return meta + return { + "format": img.format, + "mode": img.mode, + "width": img.width, + "height": img.height, + } - def _get_spatial_shape(self, img) -> Sequence: + def _get_spatial_shape(self, img) -> np.ndarray: """ Get the spatial shape of image data, it doesn't contain the channel dim. Args: img: a PIL Image object loaded from a image file. """ - return [img.width, img.height] + return np.asarray((img.width, img.height)) diff --git a/monai/data/iterable_dataset.py b/monai/data/iterable_dataset.py index cc572ce276..c8ee006b12 100644 --- a/monai/data/iterable_dataset.py +++ b/monai/data/iterable_dataset.py @@ -42,5 +42,4 @@ def __iter__(self): for data in self.source: if self.transform is not None: data = apply_transform(self.transform, data) - yield data diff --git a/monai/transforms/io/array.py b/monai/transforms/io/array.py index 6bd5229924..22e2b8e3d6 100644 --- a/monai/transforms/io/array.py +++ b/monai/transforms/io/array.py @@ -13,6 +13,7 @@ https://github.com/Project-MONAI/MONAI/wiki/MONAI_Design """ +import warnings from pathlib import Path from typing import Dict, List, Optional, Sequence, Union @@ -20,7 +21,7 @@ from torch.utils.data._utils.collate import np_str_obj_array_pattern from monai.config import KeysCollection -from monai.data.image_reader import ImageReader, ITKReader +from monai.data.image_reader import ImageReader, ITKReader, NibabelReader, NumpyReader, PILReader from monai.data.utils import correct_nifti_header_if_necessary from monai.transforms.compose import Transform from monai.utils import ensure_tuple, optional_import @@ -31,51 +32,70 @@ class LoadImage(Transform): """ - Load image file or files from provided path based on reader, default reader is ITK. - All the supported image formats of ITK: - https://github.com/InsightSoftwareConsortium/ITK/tree/master/Modules/IO + Load image file or files from provided path based on reader. Automatically choose readers based on the supported suffixes and in below order: - User specified reader at runtime when call this loader. - - Registered readers from the first to the last in list. - - Default ITK reader. + - Registered readers from the latest to the first in list. + - Default readers: (nii, nii.gz -> NibabelReader), (png, jpg, bmp -> PILReader), + (npz, npy -> NumpyReader), (others -> ITKReader). """ def __init__( self, - reader: Optional[ImageReader] = None, + reader: Optional[Union[ImageReader, str]] = None, image_only: bool = False, dtype: np.dtype = np.float32, + *args, + **kwargs, ) -> None: """ Args: reader: register reader to load image file and meta data, if None, still can register readers - at runtime or use the default ITK reader. + at runtime or use the default readers. If a string of reader name provided, will construct + a reader object with the `*args` and `**kwargs` parameters, supported reader name: "NibabelReader", + "PILReader", "ITKReader", "NumpyReader" image_only: if True return only the image volume, otherwise return image data array and header dict. dtype: if not None convert the loaded image to this data type. + args: additional parameters for reader if providing a reader name. + kwargs: additional parameters for reader if providing a reader name. Note: The transform returns image data array if `image_only` is True, or a tuple of two elements containing the data array, and the meta data in a dict format otherwise. """ - self.default_reader: ITKReader = ITKReader() - self.readers: List[ImageReader] = list() + # set predefined readers as default + self.readers: List[ImageReader] = [ITKReader(), NumpyReader(), PILReader(), NibabelReader()] if reader is not None: - self.readers.append(reader) + if isinstance(reader, str): + supported_readers = { + "NibabelReader": NibabelReader, + "PILReader": PILReader, + "ITKReader": ITKReader, + "NumpyReader": NumpyReader, + } + if reader not in supported_readers: + raise ValueError(f"unsupported reader type: {reader}.") + self.register(supported_readers[reader](*args, **kwargs)) + else: + self.register(reader) + self.image_only = image_only self.dtype = dtype def register(self, reader: ImageReader) -> List[ImageReader]: """ - Register image reader to load image file and meta data. + Register image reader to load image file and meta data, latest registered reader has higher priority. Return all the registered image readers. Args: reader: registered reader to load image file and meta data based on suffix, - if all registered readers can't match suffix at runtime, use the default ITK reader. + if all registered readers can't match suffix at runtime, use the default readers. """ + if not isinstance(reader, ImageReader): + raise ValueError(f"reader must be ImageReader object, but got {type(reader)}.") self.readers.append(reader) return self.readers @@ -93,12 +113,13 @@ def __call__( """ if reader is None or not reader.verify_suffix(filename): - reader = self.default_reader - if len(self.readers) > 0: - for r in self.readers: - if r.verify_suffix(filename): - reader = r - break + for r in reversed(self.readers): + if r.verify_suffix(filename): + reader = r + break + + if reader is None: + raise RuntimeError(f"can not find suitable reader for this file: {filename}.") img = reader.read(filename) img_array, meta_data = reader.get_data(img) @@ -136,6 +157,7 @@ def __init__( - header['affine'] stores the affine of the image. - header['original_affine'] will be additionally created to store the original affine. """ + warnings.warn("LoadNifti will be deprecated in v0.5, please use LoadImage instead.", DeprecationWarning) self.as_closest_canonical = as_closest_canonical self.image_only = image_only self.dtype = dtype @@ -205,6 +227,7 @@ def __init__(self, image_only: bool = False, dtype: Optional[np.dtype] = np.floa image_only: if True return only the image volume, otherwise return image data array and metadata. dtype: if not None convert the loaded image to this data type. """ + warnings.warn("LoadPNG will be deprecated in v0.5, please use LoadImage instead.", DeprecationWarning) self.image_only = image_only self.dtype = dtype @@ -267,6 +290,7 @@ def __init__( stack the loaded items together to construct a new first dimension. """ + warnings.warn("LoadNumpy will be deprecated in v0.5, please use LoadImage instead.", DeprecationWarning) self.data_only = data_only self.dtype = dtype if npz_keys is not None: diff --git a/monai/transforms/io/dictionary.py b/monai/transforms/io/dictionary.py index 5533c4c24a..ff8c439d3b 100644 --- a/monai/transforms/io/dictionary.py +++ b/monai/transforms/io/dictionary.py @@ -15,7 +15,7 @@ Class names are ended with 'd' to denote dictionary-based transforms. """ -from typing import Callable, Optional +from typing import Callable, Optional, Union import numpy as np @@ -33,31 +33,44 @@ class LoadImaged(MapTransform): meta data of the first image to represent the stacked result. Note that the affine transform of all the stacked images should be same. The output metadata field will be created as ``key_{meta_key_postfix}``. + + It can automatically choose readers based on the supported suffixes and in below order: + - User specified reader at runtime when call this loader. + - Registered readers from the latest to the first in list. + - Default readers: (nii, nii.gz -> NibabelReader), (png, jpg, bmp -> PILReader), + (npz, npy -> NumpyReader), (others -> ITKReader). + """ def __init__( self, keys: KeysCollection, - reader: Optional[ImageReader] = None, + reader: Optional[Union[ImageReader, str]] = None, dtype: Optional[np.dtype] = np.float32, meta_key_postfix: str = "meta_dict", overwriting: bool = False, + *args, + **kwargs, ) -> None: """ Args: keys: keys of the corresponding items to be transformed. See also: :py:class:`monai.transforms.compose.MapTransform` reader: register reader to load image file and meta data, if None, still can register readers - at runtime or use the default ITK reader. + at runtime or use the default readers. If a string of reader name provided, will construct + a reader object with the `*args` and `**kwargs` parameters, supported reader name: "NibabelReader", + "PILReader", "ITKReader", "NumpyReader" dtype: if not None convert the loaded image data to this data type. meta_key_postfix: use `key_{postfix}` to store the metadata of the nifti image, default is `meta_dict`. The meta data is a dictionary object. For example, load nifti file for `image`, store the metadata into `image_meta_dict`. overwriting: whether allow to overwrite existing meta data of same key. default is False, which will raise exception if encountering existing key. + args: additional parameters for reader if providing a reader name. + kwargs: additional parameters for reader if providing a reader name. """ super().__init__(keys) - self._loader = LoadImage(reader, False, dtype) + self._loader = LoadImage(reader, False, dtype, *args, **kwargs) if not isinstance(meta_key_postfix, str): raise TypeError(f"meta_key_postfix must be a str but is {type(meta_key_postfix).__name__}.") self.meta_key_postfix = meta_key_postfix @@ -75,9 +88,11 @@ def __call__(self, data, reader: Optional[ImageReader] = None): d = dict(data) for key in self.keys: data = self._loader(d[key], reader) - assert isinstance(data, (tuple, list)), "loader must return a tuple or list." + if not isinstance(data, (tuple, list)): + raise ValueError("loader must return a tuple or list.") d[key] = data[0] - assert isinstance(data[1], dict), "metadata must be a dict." + if not isinstance(data[1], dict): + raise ValueError("metadata must be a dict.") key_to_add = f"{key}_{self.meta_key_postfix}" if key_to_add in d and not self.overwriting: raise KeyError(f"Meta data with key {key_to_add} already exists and overwriting=False.") diff --git a/tests/test_arraydataset.py b/tests/test_arraydataset.py index 2183d9b1e8..d5112d4200 100644 --- a/tests/test_arraydataset.py +++ b/tests/test_arraydataset.py @@ -20,18 +20,18 @@ from torch.utils.data import DataLoader from monai.data import ArrayDataset -from monai.transforms import AddChannel, Compose, LoadNifti, RandAdjustContrast, RandGaussianNoise, Spacing +from monai.transforms import AddChannel, Compose, LoadImage, RandAdjustContrast, RandGaussianNoise, Spacing TEST_CASE_1 = [ - Compose([LoadNifti(image_only=True), AddChannel(), RandGaussianNoise(prob=1.0)]), - Compose([LoadNifti(image_only=True), AddChannel(), RandGaussianNoise(prob=1.0)]), + Compose([LoadImage(image_only=True), AddChannel(), RandGaussianNoise(prob=1.0)]), + Compose([LoadImage(image_only=True), AddChannel(), RandGaussianNoise(prob=1.0)]), (0, 1), (1, 128, 128, 128), ] TEST_CASE_2 = [ - Compose([LoadNifti(image_only=True), AddChannel(), RandAdjustContrast(prob=1.0)]), - Compose([LoadNifti(image_only=True), AddChannel(), RandAdjustContrast(prob=1.0)]), + Compose([LoadImage(image_only=True), AddChannel(), RandAdjustContrast(prob=1.0)]), + Compose([LoadImage(image_only=True), AddChannel(), RandAdjustContrast(prob=1.0)]), (0, 1), (1, 128, 128, 128), ] @@ -46,13 +46,13 @@ def __call__(self, input_): TEST_CASE_3 = [ - TestCompose([LoadNifti(image_only=False), AddChannel(), Spacing(pixdim=(2, 2, 4)), RandAdjustContrast(prob=1.0)]), - TestCompose([LoadNifti(image_only=False), AddChannel(), Spacing(pixdim=(2, 2, 4)), RandAdjustContrast(prob=1.0)]), + TestCompose([LoadImage(image_only=False), AddChannel(), Spacing(pixdim=(2, 2, 4)), RandAdjustContrast(prob=1.0)]), + TestCompose([LoadImage(image_only=False), AddChannel(), Spacing(pixdim=(2, 2, 4)), RandAdjustContrast(prob=1.0)]), (0, 2), (1, 64, 64, 33), ] -TEST_CASE_4 = [Compose([LoadNifti(image_only=True), AddChannel(), RandGaussianNoise(prob=1.0)]), (1, 128, 128, 128)] +TEST_CASE_4 = [Compose([LoadImage(image_only=True), AddChannel(), RandGaussianNoise(prob=1.0)]), (1, 128, 128, 128)] class TestArrayDataset(unittest.TestCase): diff --git a/tests/test_cachedataset.py b/tests/test_cachedataset.py index 7450d7dfdc..8e9350255c 100644 --- a/tests/test_cachedataset.py +++ b/tests/test_cachedataset.py @@ -18,9 +18,9 @@ from parameterized import parameterized from monai.data import CacheDataset -from monai.transforms import Compose, LoadNiftid +from monai.transforms import Compose, LoadImaged -TEST_CASE_1 = [Compose([LoadNiftid(keys=["image", "label", "extra"])]), (128, 128, 128)] +TEST_CASE_1 = [Compose([LoadImaged(keys=["image", "label", "extra"])]), (128, 128, 128)] TEST_CASE_2 = [None, (128, 128, 128)] diff --git a/tests/test_cachedataset_parallel.py b/tests/test_cachedataset_parallel.py index 0fbb591731..0f8453b041 100644 --- a/tests/test_cachedataset_parallel.py +++ b/tests/test_cachedataset_parallel.py @@ -18,11 +18,11 @@ from parameterized import parameterized from monai.data import CacheDataset -from monai.transforms import Compose, LoadNiftid +from monai.transforms import Compose, LoadImaged -TEST_CASE_1 = [0, 5, Compose([LoadNiftid(keys=["image", "label", "extra"])])] +TEST_CASE_1 = [0, 5, Compose([LoadImaged(keys=["image", "label", "extra"])])] -TEST_CASE_2 = [4, 5, Compose([LoadNiftid(keys=["image", "label", "extra"])])] +TEST_CASE_2 = [4, 5, Compose([LoadImaged(keys=["image", "label", "extra"])])] TEST_CASE_3 = [4, 5, None] diff --git a/tests/test_cachentransdataset.py b/tests/test_cachentransdataset.py index 9242fb8108..c9617d64db 100644 --- a/tests/test_cachentransdataset.py +++ b/tests/test_cachentransdataset.py @@ -18,11 +18,11 @@ from parameterized import parameterized from monai.data import CacheNTransDataset -from monai.transforms import LoadNiftid, ShiftIntensityd +from monai.transforms import LoadImaged, ShiftIntensityd TEST_CASE_1 = [ [ - LoadNiftid(keys="image"), + LoadImaged(keys="image"), ShiftIntensityd(keys="image", offset=1.0), ShiftIntensityd(keys="image", offset=2.0), ShiftIntensityd(keys="image", offset=3.0), diff --git a/tests/test_cross_validation.py b/tests/test_cross_validation.py index bb8b1e7892..21d5b7edf7 100644 --- a/tests/test_cross_validation.py +++ b/tests/test_cross_validation.py @@ -14,7 +14,7 @@ from urllib.error import ContentTooShortError, HTTPError from monai.apps import CrossValidation, DecathlonDataset -from monai.transforms import AddChanneld, Compose, LoadNiftid, ScaleIntensityd, ToTensord +from monai.transforms import AddChanneld, Compose, LoadImaged, ScaleIntensityd, ToTensord from tests.utils import skip_if_quick @@ -24,7 +24,7 @@ def test_values(self): testing_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "testing_data") transform = Compose( [ - LoadNiftid(keys=["image", "label"]), + LoadImaged(keys=["image", "label"]), AddChanneld(keys=["image", "label"]), ScaleIntensityd(keys="image"), ToTensord(keys=["image", "label"]), diff --git a/tests/test_dataset.py b/tests/test_dataset.py index 93b531bb41..b03a9a9552 100644 --- a/tests/test_dataset.py +++ b/tests/test_dataset.py @@ -18,7 +18,7 @@ from parameterized import parameterized from monai.data import Dataset -from monai.transforms import Compose, LoadNiftid, SimulateDelayd +from monai.transforms import Compose, LoadImaged, SimulateDelayd TEST_CASE_1 = [(128, 128, 128)] @@ -48,7 +48,7 @@ def test_shape(self, expected_shape): ] test_transform = Compose( [ - LoadNiftid(keys=["image", "label", "extra"]), + LoadImaged(keys=["image", "label", "extra"]), SimulateDelayd(keys=["image", "label", "extra"], delay_time=[1e-7, 1e-6, 1e-5]), ] ) @@ -63,7 +63,7 @@ def test_shape(self, expected_shape): self.assertTupleEqual(data2["label"].shape, expected_shape) self.assertTupleEqual(data2["extra"].shape, expected_shape) - dataset = Dataset(data=test_data, transform=LoadNiftid(keys=["image", "label", "extra"])) + dataset = Dataset(data=test_data, transform=LoadImaged(keys=["image", "label", "extra"])) data1_simple = dataset[0] data2_simple = dataset[1] diff --git a/tests/test_decathlondataset.py b/tests/test_decathlondataset.py index c3a9e61f98..e7b9678f4d 100644 --- a/tests/test_decathlondataset.py +++ b/tests/test_decathlondataset.py @@ -15,7 +15,7 @@ from urllib.error import ContentTooShortError, HTTPError from monai.apps import DecathlonDataset -from monai.transforms import AddChanneld, Compose, LoadNiftid, ScaleIntensityd, ToTensord +from monai.transforms import AddChanneld, Compose, LoadImaged, ScaleIntensityd, ToTensord from tests.utils import skip_if_quick @@ -25,7 +25,7 @@ def test_values(self): testing_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "testing_data") transform = Compose( [ - LoadNiftid(keys=["image", "label"]), + LoadImaged(keys=["image", "label"]), AddChanneld(keys=["image", "label"]), ScaleIntensityd(keys="image"), ToTensord(keys=["image", "label"]), diff --git a/tests/test_integration_classification_2d.py b/tests/test_integration_classification_2d.py index b9a458ac86..143a5479c2 100644 --- a/tests/test_integration_classification_2d.py +++ b/tests/test_integration_classification_2d.py @@ -22,7 +22,7 @@ from monai.apps import download_and_extract from monai.metrics import compute_roc_auc from monai.networks.nets import densenet121 -from monai.transforms import AddChannel, Compose, LoadPNG, RandFlip, RandRotate, RandZoom, ScaleIntensity, ToTensor +from monai.transforms import AddChannel, Compose, LoadImage, RandFlip, RandRotate, RandZoom, ScaleIntensity, ToTensor from monai.utils import set_determinism from tests.testing_data.integration_answers import test_integration_value from tests.utils import DistTestCase, TimedCall, skip_if_quick @@ -51,7 +51,7 @@ def run_training_test(root_dir, train_x, train_y, val_x, val_y, device="cuda:0", # define transforms for image and classification train_transforms = Compose( [ - LoadPNG(image_only=True), + LoadImage(image_only=True), AddChannel(), ScaleIntensity(), RandRotate(range_x=np.pi / 12, prob=0.5, keep_size=True), @@ -61,7 +61,7 @@ def run_training_test(root_dir, train_x, train_y, val_x, val_y, device="cuda:0", ] ) train_transforms.set_random_state(1234) - val_transforms = Compose([LoadPNG(image_only=True), AddChannel(), ScaleIntensity(), ToTensor()]) + val_transforms = Compose([LoadImage(image_only=True), AddChannel(), ScaleIntensity(), ToTensor()]) # create train, val data loaders train_ds = MedNISTDataset(train_x, train_y, train_transforms) @@ -129,7 +129,7 @@ def run_training_test(root_dir, train_x, train_y, val_x, val_y, device="cuda:0", def run_inference_test(root_dir, test_x, test_y, device="cuda:0", num_workers=10): # define transforms for image and classification - val_transforms = Compose([LoadPNG(image_only=True), AddChannel(), ScaleIntensity(), ToTensor()]) + val_transforms = Compose([LoadImage(image_only=True), AddChannel(), ScaleIntensity(), ToTensor()]) val_ds = MedNISTDataset(test_x, test_y, val_transforms) val_loader = DataLoader(val_ds, batch_size=300, num_workers=num_workers) diff --git a/tests/test_integration_segmentation_3d.py b/tests/test_integration_segmentation_3d.py index cfeefc3f46..123476f560 100644 --- a/tests/test_integration_segmentation_3d.py +++ b/tests/test_integration_segmentation_3d.py @@ -30,7 +30,7 @@ AsChannelFirstd, AsDiscrete, Compose, - LoadNiftid, + LoadImaged, RandCropByPosNegLabeld, RandRotate90d, ScaleIntensityd, @@ -55,7 +55,7 @@ def run_training_test(root_dir, device="cuda:0", cachedataset=0): # define transforms for image and segmentation train_transforms = Compose( [ - LoadNiftid(keys=["img", "seg"]), + LoadImaged(keys=["img", "seg"]), AsChannelFirstd(keys=["img", "seg"], channel_dim=-1), # resampling with align_corners=True or dtype=float64 will generate # slight different results between PyTorch 1.5 an 1.6 @@ -71,7 +71,7 @@ def run_training_test(root_dir, device="cuda:0", cachedataset=0): train_transforms.set_random_state(1234) val_transforms = Compose( [ - LoadNiftid(keys=["img", "seg"]), + LoadImaged(keys=["img", "seg"]), AsChannelFirstd(keys=["img", "seg"], channel_dim=-1), # resampling with align_corners=True or dtype=float64 will generate # slight different results between PyTorch 1.5 an 1.6 @@ -181,7 +181,7 @@ def run_inference_test(root_dir, device="cuda:0"): # define transforms for image and segmentation val_transforms = Compose( [ - LoadNiftid(keys=["img", "seg"]), + LoadImaged(keys=["img", "seg"]), AsChannelFirstd(keys=["img", "seg"], channel_dim=-1), # resampling with align_corners=True or dtype=float64 will generate # slight different results between PyTorch 1.5 an 1.6 diff --git a/tests/test_integration_workflows.py b/tests/test_integration_workflows.py index 8d117ddb5a..148c71c3f9 100644 --- a/tests/test_integration_workflows.py +++ b/tests/test_integration_workflows.py @@ -44,7 +44,7 @@ AsDiscreted, Compose, KeepLargestConnectedComponentd, - LoadNiftid, + LoadImaged, RandCropByPosNegLabeld, RandRotate90d, ScaleIntensityd, @@ -66,7 +66,7 @@ def run_training_test(root_dir, device="cuda:0", amp=False, num_workers=4): # define transforms for image and segmentation train_transforms = Compose( [ - LoadNiftid(keys=["image", "label"]), + LoadImaged(keys=["image", "label"]), AsChannelFirstd(keys=["image", "label"], channel_dim=-1), ScaleIntensityd(keys=["image", "label"]), RandCropByPosNegLabeld( @@ -78,7 +78,7 @@ def run_training_test(root_dir, device="cuda:0", amp=False, num_workers=4): ) val_transforms = Compose( [ - LoadNiftid(keys=["image", "label"]), + LoadImaged(keys=["image", "label"]), AsChannelFirstd(keys=["image", "label"], channel_dim=-1), ScaleIntensityd(keys=["image", "label"]), ToTensord(keys=["image", "label"]), @@ -177,7 +177,7 @@ def run_inference_test(root_dir, model_file, device="cuda:0", amp=False, num_wor # define transforms for image and segmentation val_transforms = Compose( [ - LoadNiftid(keys=["image", "label"]), + LoadImaged(keys=["image", "label"]), AsChannelFirstd(keys=["image", "label"], channel_dim=-1), ScaleIntensityd(keys=["image", "label"]), ToTensord(keys=["image", "label"]), diff --git a/tests/test_integration_workflows_gan.py b/tests/test_integration_workflows_gan.py index 56dd8b93e0..a4133b788f 100644 --- a/tests/test_integration_workflows_gan.py +++ b/tests/test_integration_workflows_gan.py @@ -28,7 +28,7 @@ from monai.handlers import CheckpointSaver, StatsHandler, TensorBoardStatsHandler from monai.networks import normal_init from monai.networks.nets import Discriminator, Generator -from monai.transforms import AsChannelFirstd, Compose, LoadNiftid, RandFlipd, ScaleIntensityd, ToTensord +from monai.transforms import AsChannelFirstd, Compose, LoadImaged, RandFlipd, ScaleIntensityd, ToTensord from monai.utils import set_determinism from tests.utils import DistTestCase, TimedCall, skip_if_quick @@ -40,7 +40,7 @@ def run_training_test(root_dir, device="cuda:0"): # prepare real data train_transforms = Compose( [ - LoadNiftid(keys=["reals"]), + LoadImaged(keys=["reals"]), AsChannelFirstd(keys=["reals"]), ScaleIntensityd(keys=["reals"]), RandFlipd(keys=["reals"], prob=0.5), diff --git a/tests/test_iterable_dataset.py b/tests/test_iterable_dataset.py index 5a7345905e..da63374e39 100644 --- a/tests/test_iterable_dataset.py +++ b/tests/test_iterable_dataset.py @@ -19,7 +19,7 @@ import numpy as np from monai.data import DataLoader, IterableDataset -from monai.transforms import Compose, LoadNiftid, SimulateDelayd +from monai.transforms import Compose, LoadImaged, SimulateDelayd lock = Lock() @@ -38,12 +38,10 @@ def __next__(self): data = None # support multi-process access to the database lock.acquire() - count = 0 with open(self.dbpath) as f: count = json.load(f)["count"] - if count > 0: - data = self.data[count - 1] if count > 0: + data = self.data[count - 1] with open(self.dbpath, "w") as f: json.dump({"count": count - 1}, f) lock.release() @@ -69,7 +67,7 @@ def test_shape(self): test_transform = Compose( [ - LoadNiftid(keys="image"), + LoadImaged(keys="image"), SimulateDelayd(keys="image", delay_time=1e-7), ] ) diff --git a/tests/test_lmdbdataset.py b/tests/test_lmdbdataset.py index 90cfe928e4..b867e31e20 100644 --- a/tests/test_lmdbdataset.py +++ b/tests/test_lmdbdataset.py @@ -18,13 +18,13 @@ from parameterized import parameterized from monai.data import LMDBDataset, json_hashing -from monai.transforms import Compose, LoadNiftid, SimulateDelayd, Transform +from monai.transforms import Compose, LoadImaged, SimulateDelayd, Transform from tests.utils import skip_if_windows TEST_CASE_1 = [ Compose( [ - LoadNiftid(keys=["image", "label", "extra"]), + LoadImaged(keys=["image", "label", "extra"]), SimulateDelayd(keys=["image", "label", "extra"], delay_time=[1e-7, 1e-6, 1e-5]), ] ), @@ -33,7 +33,7 @@ TEST_CASE_2 = [ [ - LoadNiftid(keys=["image", "label", "extra"]), + LoadImaged(keys=["image", "label", "extra"]), SimulateDelayd(keys=["image", "label", "extra"], delay_time=[1e-7, 1e-6, 1e-5]), ], (128, 128, 128), @@ -43,7 +43,7 @@ TEST_CASE_4 = [ [ - LoadNiftid(keys=["image", "label", "extra"]), + LoadImaged(keys=["image", "label", "extra"]), SimulateDelayd(keys=["image", "label", "extra"], delay_time=[1e-7, 1e-6, 1e-5]), ], (128, 128, 128), @@ -52,7 +52,7 @@ TEST_CASE_5 = [ [ - LoadNiftid(keys=["image", "label", "extra"]), + LoadImaged(keys=["image", "label", "extra"]), SimulateDelayd(keys=["image", "label", "extra"], delay_time=[1e-7, 1e-6, 1e-5]), ], (128, 128, 128), @@ -61,7 +61,7 @@ TEST_CASE_6 = [ [ - LoadNiftid(keys=["image", "label", "extra"]), + LoadImaged(keys=["image", "label", "extra"]), SimulateDelayd(keys=["image", "label", "extra"], delay_time=[1e-7, 1e-6, 1e-5]), ], (128, 128, 128), @@ -70,7 +70,7 @@ TEST_CASE_7 = [ [ - LoadNiftid(keys=["image", "label", "extra"]), + LoadImaged(keys=["image", "label", "extra"]), SimulateDelayd(keys=["image", "label", "extra"], delay_time=[1e-7, 1e-6, 1e-5]), ], (128, 128, 128), diff --git a/tests/test_load_image.py b/tests/test_load_image.py index ca72fbc684..272a528f81 100644 --- a/tests/test_load_image.py +++ b/tests/test_load_image.py @@ -21,28 +21,19 @@ from monai.data import ITKReader, NibabelReader from monai.transforms import LoadImage -from tests.utils import SkipIfNoModule -TEST_CASE_1 = [ - {"reader": NibabelReader(), "image_only": True}, - ["test_image.nii.gz"], - (128, 128, 128), -] +TEST_CASE_1 = [{"image_only": True}, ["test_image.nii.gz"], (128, 128, 128)] -TEST_CASE_2 = [ - {"reader": NibabelReader(), "image_only": False}, - ["test_image.nii.gz"], - (128, 128, 128), -] +TEST_CASE_2 = [{"image_only": False}, ["test_image.nii.gz"], (128, 128, 128)] TEST_CASE_3 = [ - {"reader": NibabelReader(), "image_only": True}, + {"image_only": True}, ["test_image.nii.gz", "test_image2.nii.gz", "test_image3.nii.gz"], (3, 128, 128, 128), ] TEST_CASE_4 = [ - {"reader": NibabelReader(), "image_only": False}, + {"image_only": False}, ["test_image.nii.gz", "test_image2.nii.gz", "test_image3.nii.gz"], (3, 128, 128, 128), ] @@ -53,18 +44,18 @@ (128, 128, 128), ] -TEST_CASE_6 = [{"image_only": True}, ["test_image.nii.gz"], (128, 128, 128)] +TEST_CASE_6 = [{"reader": ITKReader(), "image_only": True}, ["test_image.nii.gz"], (128, 128, 128)] -TEST_CASE_7 = [{"image_only": False}, ["test_image.nii.gz"], (128, 128, 128)] +TEST_CASE_7 = [{"reader": ITKReader(), "image_only": False}, ["test_image.nii.gz"], (128, 128, 128)] TEST_CASE_8 = [ - {"image_only": True}, + {"reader": ITKReader(), "image_only": True}, ["test_image.nii.gz", "test_image2.nii.gz", "test_image3.nii.gz"], (3, 128, 128, 128), ] TEST_CASE_9 = [ - {"image_only": False}, + {"reader": ITKReader(), "image_only": False}, ["test_image.nii.gz", "test_image2.nii.gz", "test_image3.nii.gz"], (3, 128, 128, 128), ] @@ -75,6 +66,12 @@ (4, 16, 16), ] +TEST_CASE_11 = [ + {"image_only": False, "reader": "ITKReader", "pixel_type": itk.UC}, + "tests/testing_data/CT_DICOM", + (4, 16, 16), +] + class TestLoadImage(unittest.TestCase): @parameterized.expand([TEST_CASE_1, TEST_CASE_2, TEST_CASE_3, TEST_CASE_4, TEST_CASE_5]) @@ -112,8 +109,7 @@ def test_itk_reader(self, input_param, filenames, expected_shape): np.testing.assert_allclose(header["original_affine"], np.eye(4)) self.assertTupleEqual(result.shape, expected_shape) - @parameterized.expand([TEST_CASE_10]) - @SkipIfNoModule("itk") + @parameterized.expand([TEST_CASE_10, TEST_CASE_11]) def test_itk_dicom_series_reader(self, input_param, filenames, expected_shape): result, header = LoadImage(**input_param)(filenames) self.assertTrue("affine" in header) @@ -138,7 +134,7 @@ def test_itk_reader_multichannel(self): filename = os.path.join(tempdir, "test_image.png") itk_np_view = itk.image_view_from_array(test_image, is_vector=True) itk.imwrite(itk_np_view, filename) - result, header = LoadImage()(filename) + result, header = LoadImage(reader=ITKReader())(filename) self.assertTupleEqual(tuple(header["spatial_shape"]), (256, 256)) np.testing.assert_allclose(result[0, :, :], test_image[:, :, 0]) @@ -154,8 +150,6 @@ def test_load_png(self): result, header = LoadImage(image_only=False)(filename) self.assertTupleEqual(tuple(header["spatial_shape"]), spatial_size) self.assertTupleEqual(result.shape, spatial_size) - np.testing.assert_allclose(header["affine"], np.eye(3)) - np.testing.assert_allclose(header["original_affine"], np.eye(3)) np.testing.assert_allclose(result, test_image) def test_register(self): @@ -189,7 +183,7 @@ def test_kwargs(self): reader = ITKReader() img = reader.read(filename, fallback_only=False) result_raw, header_raw = reader.get_data(img) - self.assertListEqual(header["spatial_shape"], header_raw["spatial_shape"]) + np.testing.assert_allclose(header["spatial_shape"], header_raw["spatial_shape"]) self.assertTupleEqual(result.shape, result_raw.shape) diff --git a/tests/test_load_imaged.py b/tests/test_load_imaged.py index 396167046b..ef733cac2f 100644 --- a/tests/test_load_imaged.py +++ b/tests/test_load_imaged.py @@ -25,9 +25,11 @@ TEST_CASE_1 = [{"keys": KEYS}, (128, 128, 128)] +TEST_CASE_2 = [{"keys": KEYS, "reader": "ITKReader", "fallback_only": False}, (128, 128, 128)] + class TestLoadImaged(unittest.TestCase): - @parameterized.expand([TEST_CASE_1]) + @parameterized.expand([TEST_CASE_1, TEST_CASE_2]) def test_shape(self, input_param, expected_shape): test_image = nib.Nifti1Image(np.random.rand(128, 128, 128), np.eye(4)) test_data = dict() diff --git a/tests/test_load_niftid.py b/tests/test_load_niftid.py index d46c8a865c..54d816bead 100644 --- a/tests/test_load_niftid.py +++ b/tests/test_load_niftid.py @@ -17,7 +17,7 @@ import numpy as np from parameterized import parameterized -from monai.transforms import LoadNiftid +from monai.transforms import LoadImaged KEYS = ["image", "label", "extra"] @@ -33,7 +33,7 @@ def test_shape(self, input_param, expected_shape): for key in KEYS: nib.save(test_image, os.path.join(tempdir, key + ".nii.gz")) test_data.update({key: os.path.join(tempdir, key + ".nii.gz")}) - result = LoadNiftid(**input_param)(test_data) + result = LoadImaged(**input_param)(test_data) for key in KEYS: self.assertTupleEqual(result[key].shape, expected_shape) diff --git a/tests/test_load_spacing_orientation.py b/tests/test_load_spacing_orientation.py index 7c004129a2..5ae9cc4326 100644 --- a/tests/test_load_spacing_orientation.py +++ b/tests/test_load_spacing_orientation.py @@ -18,7 +18,7 @@ from nibabel.processing import resample_to_output from parameterized import parameterized -from monai.transforms import AddChanneld, LoadNiftid, Orientationd, Spacingd +from monai.transforms import AddChanneld, LoadImaged, Orientationd, Spacingd FILES = tuple( os.path.join(os.path.dirname(__file__), "testing_data", filename) @@ -30,7 +30,7 @@ class TestLoadSpacingOrientation(unittest.TestCase): @parameterized.expand(FILES) def test_load_spacingd(self, filename): data = {"image": filename} - data_dict = LoadNiftid(keys="image")(data) + data_dict = LoadImaged(keys="image")(data) data_dict = AddChanneld(keys="image")(data_dict) t = time.time() res_dict = Spacingd(keys="image", pixdim=(1, 0.2, 1), diagonal=True, padding_mode="zeros")(data_dict) @@ -48,7 +48,7 @@ def test_load_spacingd(self, filename): @parameterized.expand(FILES) def test_load_spacingd_rotate(self, filename): data = {"image": filename} - data_dict = LoadNiftid(keys="image")(data) + data_dict = LoadImaged(keys="image")(data) data_dict = AddChanneld(keys="image")(data_dict) affine = data_dict["image_meta_dict"]["affine"] data_dict["image_meta_dict"]["original_affine"] = data_dict["image_meta_dict"]["affine"] = ( @@ -74,7 +74,7 @@ def test_load_spacingd_rotate(self, filename): def test_load_spacingd_non_diag(self): data = {"image": FILES[1]} - data_dict = LoadNiftid(keys="image")(data) + data_dict = LoadImaged(keys="image")(data) data_dict = AddChanneld(keys="image")(data_dict) affine = data_dict["image_meta_dict"]["affine"] data_dict["image_meta_dict"]["original_affine"] = data_dict["image_meta_dict"]["affine"] = ( @@ -95,7 +95,7 @@ def test_load_spacingd_non_diag(self): def test_load_spacingd_rotate_non_diag(self): data = {"image": FILES[0]} - data_dict = LoadNiftid(keys="image")(data) + data_dict = LoadImaged(keys="image")(data) data_dict = AddChanneld(keys="image")(data_dict) res_dict = Spacingd(keys="image", pixdim=(1, 2, 3), diagonal=False, padding_mode="border")(data_dict) np.testing.assert_allclose( @@ -105,7 +105,7 @@ def test_load_spacingd_rotate_non_diag(self): def test_load_spacingd_rotate_non_diag_ornt(self): data = {"image": FILES[0]} - data_dict = LoadNiftid(keys="image")(data) + data_dict = LoadImaged(keys="image")(data) data_dict = AddChanneld(keys="image")(data_dict) res_dict = Spacingd(keys="image", pixdim=(1, 2, 3), diagonal=False, padding_mode="border")(data_dict) res_dict = Orientationd(keys="image", axcodes="LPI")(res_dict) @@ -116,7 +116,7 @@ def test_load_spacingd_rotate_non_diag_ornt(self): def test_load_spacingd_non_diag_ornt(self): data = {"image": FILES[1]} - data_dict = LoadNiftid(keys="image")(data) + data_dict = LoadImaged(keys="image")(data) data_dict = AddChanneld(keys="image")(data_dict) affine = data_dict["image_meta_dict"]["affine"] data_dict["image_meta_dict"]["original_affine"] = data_dict["image_meta_dict"]["affine"] = ( diff --git a/tests/test_mednistdataset.py b/tests/test_mednistdataset.py index 868116b9d8..9b9f8a75b1 100644 --- a/tests/test_mednistdataset.py +++ b/tests/test_mednistdataset.py @@ -15,7 +15,7 @@ from urllib.error import ContentTooShortError, HTTPError from monai.apps import MedNISTDataset -from monai.transforms import AddChanneld, Compose, LoadPNGd, ScaleIntensityd, ToTensord +from monai.transforms import AddChanneld, Compose, LoadImaged, ScaleIntensityd, ToTensord from tests.utils import skip_if_quick @@ -25,7 +25,7 @@ def test_values(self): testing_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "testing_data") transform = Compose( [ - LoadPNGd(keys="image"), + LoadImaged(keys="image"), AddChanneld(keys="image"), ScaleIntensityd(keys="image"), ToTensord(keys=["image", "label"]), diff --git a/tests/test_persistentdataset.py b/tests/test_persistentdataset.py index fa51eaf295..f13fb31ac5 100644 --- a/tests/test_persistentdataset.py +++ b/tests/test_persistentdataset.py @@ -18,12 +18,12 @@ from parameterized import parameterized from monai.data import PersistentDataset, json_hashing -from monai.transforms import Compose, LoadNiftid, SimulateDelayd, Transform +from monai.transforms import Compose, LoadImaged, SimulateDelayd, Transform TEST_CASE_1 = [ Compose( [ - LoadNiftid(keys=["image", "label", "extra"]), + LoadImaged(keys=["image", "label", "extra"]), SimulateDelayd(keys=["image", "label", "extra"], delay_time=[1e-7, 1e-6, 1e-5]), ] ), @@ -32,7 +32,7 @@ TEST_CASE_2 = [ [ - LoadNiftid(keys=["image", "label", "extra"]), + LoadImaged(keys=["image", "label", "extra"]), SimulateDelayd(keys=["image", "label", "extra"], delay_time=[1e-7, 1e-6, 1e-5]), ], (128, 128, 128), diff --git a/tests/test_smartcachedataset.py b/tests/test_smartcachedataset.py index f4aff0f1f5..cc458b281b 100644 --- a/tests/test_smartcachedataset.py +++ b/tests/test_smartcachedataset.py @@ -18,15 +18,15 @@ from parameterized import parameterized from monai.data import SmartCacheDataset -from monai.transforms import Compose, LoadNiftid +from monai.transforms import Compose, LoadImaged -TEST_CASE_1 = [0.1, 0, Compose([LoadNiftid(keys=["image", "label", "extra"])])] +TEST_CASE_1 = [0.1, 0, Compose([LoadImaged(keys=["image", "label", "extra"])])] -TEST_CASE_2 = [0.1, 4, Compose([LoadNiftid(keys=["image", "label", "extra"])])] +TEST_CASE_2 = [0.1, 4, Compose([LoadImaged(keys=["image", "label", "extra"])])] TEST_CASE_3 = [0.1, 4, None] -TEST_CASE_4 = [0.5, 2, Compose([LoadNiftid(keys=["image", "label", "extra"])])] +TEST_CASE_4 = [0.5, 2, Compose([LoadImaged(keys=["image", "label", "extra"])])] class TestSmartCacheDataset(unittest.TestCase): diff --git a/tests/testing_data/integration_answers.py b/tests/testing_data/integration_answers.py index 2b6bcff442..6a52cb4ed3 100644 --- a/tests/testing_data/integration_answers.py +++ b/tests/testing_data/integration_answers.py @@ -324,4 +324,4 @@ def test_integration_value(test_name, key, data, rtol=1e-2): value = expected[test_name][key] if np.allclose(data, value, rtol=rtol): return True - return False + raise ValueError(f"no matched results for {test_name}, {key}. {data}.")