diff --git a/monai/apps/auto3dseg/auto_runner.py b/monai/apps/auto3dseg/auto_runner.py index 8296973f6e..12566c0d34 100644 --- a/monai/apps/auto3dseg/auto_runner.py +++ b/monai/apps/auto3dseg/auto_runner.py @@ -36,7 +36,7 @@ from monai.auto3dseg.utils import algo_to_pickle from monai.bundle import ConfigParser from monai.transforms import SaveImage -from monai.utils.enums import AlgoEnsembleKeys +from monai.utils.enums import AlgoKeys from monai.utils.module import look_up_option, optional_import logger = get_logger(module_name=__name__) @@ -636,11 +636,11 @@ def _train_algo_in_sequence(self, history: list[dict[str, Any]]) -> None: progress.yaml, accuracies in CSV and a pickle file of the Algo object. """ for algo_dict in history: - algo = algo_dict[AlgoEnsembleKeys.ALGO] + algo = algo_dict[AlgoKeys.ALGO] algo.train(self.train_params) acc = algo.get_score() - algo_meta_data = {str(AlgoEnsembleKeys.SCORE): acc} + algo_meta_data = {str(AlgoKeys.SCORE): acc} algo_to_pickle(algo, template_path=algo.template_path, **algo_meta_data) def _train_algo_in_nni(self, history: list[dict[str, Any]]) -> None: @@ -675,8 +675,8 @@ def _train_algo_in_nni(self, history: list[dict[str, Any]]) -> None: last_total_tasks = len(import_bundle_algo_history(self.work_dir, only_trained=True)) mode_dry_run = self.hpo_params.pop("nni_dry_run", False) for algo_dict in history: - name = algo_dict[AlgoEnsembleKeys.ID] - algo = algo_dict[AlgoEnsembleKeys.ALGO] + name = algo_dict[AlgoKeys.ID] + algo = algo_dict[AlgoKeys.ALGO] nni_gen = NNIGen(algo=algo, params=self.hpo_params) obj_filename = nni_gen.get_obj_filename() nni_config = deepcopy(default_nni_config) @@ -772,13 +772,13 @@ def run(self): ) if auto_train_choice: - skip_algos = [h[AlgoEnsembleKeys.ID] for h in history if h["is_trained"]] + skip_algos = [h[AlgoKeys.ID] for h in history if h[AlgoKeys.IS_TRAINED]] if len(skip_algos) > 0: logger.info( f"Skipping already trained algos {skip_algos}." "Set option train=True to always retrain all algos." ) - history = [h for h in history if not h["is_trained"]] + history = [h for h in history if not h[AlgoKeys.IS_TRAINED]] if len(history) > 0: if not self.hpo: @@ -794,13 +794,13 @@ def run(self): if self.ensemble: history = import_bundle_algo_history(self.work_dir, only_trained=False) - history_untrained = [h for h in history if not h["is_trained"]] + history_untrained = [h for h in history if not h[AlgoKeys.IS_TRAINED]] if len(history_untrained) > 0: warnings.warn( f"Ensembling step will skip {[h['name'] for h in history_untrained]} untrained algos." "Generally it means these algos did not complete training." ) - history = [h for h in history if h["is_trained"]] + history = [h for h in history if h[AlgoKeys.IS_TRAINED]] if len(history) == 0: raise ValueError( @@ -816,7 +816,7 @@ def run(self): if len(preds) > 0: logger.info("Auto3Dseg picked the following networks to ensemble:") for algo in ensembler.get_algo_ensemble(): - logger.info(algo[AlgoEnsembleKeys.ID]) + logger.info(algo[AlgoKeys.ID]) for pred in preds: self.save_image(pred) diff --git a/monai/apps/auto3dseg/bundle_gen.py b/monai/apps/auto3dseg/bundle_gen.py index 3f34e4453f..51fcf684be 100644 --- a/monai/apps/auto3dseg/bundle_gen.py +++ b/monai/apps/auto3dseg/bundle_gen.py @@ -33,7 +33,7 @@ from monai.auto3dseg.utils import algo_to_pickle from monai.bundle.config_parser import ConfigParser from monai.utils import ensure_tuple -from monai.utils.enums import AlgoEnsembleKeys +from monai.utils.enums import AlgoKeys logger = get_logger(module_name=__name__) ALGO_HASH = os.environ.get("MONAI_ALGO_HASH", "7758ad1") @@ -539,5 +539,5 @@ def generate( algo_to_pickle(gen_algo, template_path=algo.template_path) self.history.append( - {AlgoEnsembleKeys.ID: name, AlgoEnsembleKeys.ALGO: gen_algo} + {AlgoKeys.ID: name, AlgoKeys.ALGO: gen_algo} ) # track the previous, may create a persistent history diff --git a/monai/apps/auto3dseg/ensemble_builder.py b/monai/apps/auto3dseg/ensemble_builder.py index dffadb818d..3a77fd9c05 100644 --- a/monai/apps/auto3dseg/ensemble_builder.py +++ b/monai/apps/auto3dseg/ensemble_builder.py @@ -27,7 +27,7 @@ from monai.auto3dseg.utils import datafold_read from monai.bundle import ConfigParser from monai.transforms import MeanEnsemble, VoteEnsemble -from monai.utils.enums import AlgoEnsembleKeys +from monai.utils.enums import AlgoKeys from monai.utils.misc import prob2class from monai.utils.module import look_up_option @@ -59,7 +59,7 @@ def get_algo(self, identifier): identifier: the name of the bundleAlgo """ for algo in self.algos: - if identifier == algo[AlgoEnsembleKeys.ID]: + if identifier == algo[AlgoKeys.ID]: return algo def get_algo_ensemble(self): @@ -160,7 +160,7 @@ def __call__(self, pred_param: dict[str, Any] | None = None) -> list[torch.Tenso print(i) preds = [] for algo in self.algo_ensemble: - infer_instance = algo[AlgoEnsembleKeys.ALGO] + infer_instance = algo[AlgoKeys.ALGO] pred = infer_instance.predict(predict_files=[file], predict_params=param) preds.append(pred[0]) outputs.append(self.ensemble_pred(preds, sigmoid=sigmoid)) @@ -187,7 +187,7 @@ def sort_score(self): """ Sort the best_metrics """ - scores = concat_val_to_np(self.algos, [AlgoEnsembleKeys.SCORE]) + scores = concat_val_to_np(self.algos, [AlgoKeys.SCORE]) return np.argsort(scores).tolist() def collect_algos(self, n_best: int = -1) -> None: @@ -238,14 +238,14 @@ def collect_algos(self) -> None: best_model: BundleAlgo | None = None for algo in self.algos: # algorithm folder: {net}_{fold_index}_{other} - identifier = algo[AlgoEnsembleKeys.ID].split("_")[1] + identifier = algo[AlgoKeys.ID].split("_")[1] try: algo_id = int(identifier) except ValueError as err: raise ValueError(f"model identifier {identifier} is not number.") from err - if algo_id == f_idx and algo[AlgoEnsembleKeys.SCORE] > best_score: + if algo_id == f_idx and algo[AlgoKeys.SCORE] > best_score: best_model = algo - best_score = algo[AlgoEnsembleKeys.SCORE] + best_score = algo[AlgoKeys.SCORE] self.algo_ensemble.append(best_model) @@ -268,7 +268,7 @@ class AlgoEnsembleBuilder: """ def __init__(self, history: Sequence[dict[str, Any]], data_src_cfg_filename: str | None = None): - self.infer_algos: list[dict[AlgoEnsembleKeys, Any]] = [] + self.infer_algos: list[dict[AlgoKeys, Any]] = [] self.ensemble: AlgoEnsemble self.data_src_cfg = ConfigParser(globals=False) @@ -278,8 +278,8 @@ def __init__(self, history: Sequence[dict[str, Any]], data_src_cfg_filename: str for algo_dict in history: # load inference_config_paths - name = algo_dict[AlgoEnsembleKeys.ID] - gen_algo = algo_dict[AlgoEnsembleKeys.ALGO] + name = algo_dict[AlgoKeys.ID] + gen_algo = algo_dict[AlgoKeys.ALGO] best_metric = gen_algo.get_score() algo_path = gen_algo.output_path @@ -306,7 +306,7 @@ def add_inferer(self, identifier: str, gen_algo: BundleAlgo, best_metric: float if best_metric is None: raise ValueError("Feature to re-validate is to be implemented") - algo = {AlgoEnsembleKeys.ID: identifier, AlgoEnsembleKeys.ALGO: gen_algo, AlgoEnsembleKeys.SCORE: best_metric} + algo = {AlgoKeys.ID: identifier, AlgoKeys.ALGO: gen_algo, AlgoKeys.SCORE: best_metric} self.infer_algos.append(algo) def set_ensemble_method(self, ensemble: AlgoEnsemble, *args: Any, **kwargs: Any) -> None: diff --git a/monai/apps/auto3dseg/hpo_gen.py b/monai/apps/auto3dseg/hpo_gen.py index 8acf56e7c6..87ba3fa84f 100644 --- a/monai/apps/auto3dseg/hpo_gen.py +++ b/monai/apps/auto3dseg/hpo_gen.py @@ -23,7 +23,7 @@ from monai.bundle.config_parser import ConfigParser from monai.config import PathLike from monai.utils import optional_import -from monai.utils.enums import AlgoEnsembleKeys +from monai.utils.enums import AlgoKeys nni, has_nni = optional_import("nni") optuna, has_optuna = optional_import("optuna") @@ -99,8 +99,8 @@ class NNIGen(HPOGen): # Bundle Algorithms are already generated by BundleGen in work_dir import_bundle_algo_history(work_dir, only_trained=False) algo_dict = self.history[0] # pick the first algorithm - algo_name = algo_dict[AlgoEnsembleKeys.ID] - onealgo = algo_dict[AlgoEnsembleKeys.ALGO] + algo_name = algo_dict[AlgoKeys.ID] + onealgo = algo_dict[AlgoKeys.ALGO] nni_gen = NNIGen(algo=onealgo) nni_gen.print_bundle_algo_instruction() @@ -238,7 +238,7 @@ def run_algo(self, obj_filename: str, output_folder: str = ".", template_path: P self.algo.train(self.params) # step 4 report validation acc to controller acc = self.algo.get_score() - algo_meta_data = {str(AlgoEnsembleKeys.SCORE): acc} + algo_meta_data = {str(AlgoKeys.SCORE): acc} if isinstance(self.algo, BundleAlgo): algo_to_pickle(self.algo, template_path=self.algo.template_path, **algo_meta_data) @@ -411,7 +411,7 @@ def run_algo(self, obj_filename: str, output_folder: str = ".", template_path: P self.algo.train(self.params) # step 4 report validation acc to controller acc = self.algo.get_score() - algo_meta_data = {str(AlgoEnsembleKeys.SCORE): acc} + algo_meta_data = {str(AlgoKeys.SCORE): acc} if isinstance(self.algo, BundleAlgo): algo_to_pickle(self.algo, template_path=self.algo.template_path, **algo_meta_data) else: diff --git a/monai/apps/auto3dseg/utils.py b/monai/apps/auto3dseg/utils.py index 9382aff569..90de5e8f75 100644 --- a/monai/apps/auto3dseg/utils.py +++ b/monai/apps/auto3dseg/utils.py @@ -15,7 +15,7 @@ from monai.apps.auto3dseg.bundle_gen import BundleAlgo from monai.auto3dseg import algo_from_pickle, algo_to_pickle -from monai.utils.enums import AlgoEnsembleKeys +from monai.utils.enums import AlgoKeys def import_bundle_algo_history( @@ -49,17 +49,12 @@ def import_bundle_algo_history( if isinstance(algo, BundleAlgo): # algo's template path needs override algo.template_path = algo_meta_data["template_path"] - best_metric = algo_meta_data.get(AlgoEnsembleKeys.SCORE, None) + best_metric = algo_meta_data.get(AlgoKeys.SCORE, None) is_trained = best_metric is not None if (only_trained and is_trained) or not only_trained: history.append( - { - AlgoEnsembleKeys.ID: name, - AlgoEnsembleKeys.ALGO: algo, - AlgoEnsembleKeys.SCORE: best_metric, - "is_trained": is_trained, - } + {AlgoKeys.ID: name, AlgoKeys.ALGO: algo, AlgoKeys.SCORE: best_metric, AlgoKeys.IS_TRAINED: is_trained} ) return history @@ -73,5 +68,5 @@ def export_bundle_algo_history(history: list[dict[str, BundleAlgo]]) -> None: history: a List of Bundle. Typically, the history can be obtained from BundleGen get_history method """ for algo_dict in history: - algo = algo_dict[AlgoEnsembleKeys.ALGO] + algo = algo_dict[AlgoKeys.ALGO] algo_to_pickle(algo, template_path=algo.template_path) diff --git a/monai/data/meta_tensor.py b/monai/data/meta_tensor.py index 48b9320f99..e3aacb95ee 100644 --- a/monai/data/meta_tensor.py +++ b/monai/data/meta_tensor.py @@ -510,9 +510,16 @@ def new_empty(self, size, dtype=None, device=None, requires_grad=False): self.as_tensor().new_empty(size=size, dtype=dtype, device=device, requires_grad=requires_grad) ) - def clone(self): - """returns a copy of the MetaTensor instance.""" - new_inst = MetaTensor(self.as_tensor().clone()) + def clone(self, **kwargs): + """ + Returns a copy of the MetaTensor instance. + + Args: + kwargs: additional keyword arguments to `torch.clone`. + + See also: https://pytorch.org/docs/stable/generated/torch.clone.html + """ + new_inst = MetaTensor(self.as_tensor().clone(**kwargs)) new_inst.__dict__ = deepcopy(self.__dict__) return new_inst diff --git a/monai/inferers/utils.py b/monai/inferers/utils.py index 0e1f0f4a42..b171d20ebb 100644 --- a/monai/inferers/utils.py +++ b/monai/inferers/utils.py @@ -41,7 +41,7 @@ def sliding_window_inference( - inputs: torch.Tensor, + inputs: torch.Tensor | MetaTensor, roi_size: Sequence[int] | int, sw_batch_size: int, predictor: Callable[..., torch.Tensor | Sequence[torch.Tensor] | dict[Any, torch.Tensor]], @@ -307,9 +307,11 @@ def sliding_window_inference( output_image_list[ss] = output_i[(slice(None), slice(None), *final_slicing)] final_output = _pack_struct(output_image_list, dict_keys) - final_output = convert_to_dst_type(final_output, inputs, device=device)[0] # type: ignore if temp_meta is not None: - final_output = MetaTensor(final_output).copy_meta_from(temp_meta) + final_output = convert_to_dst_type(final_output, temp_meta, device=device)[0] # type: ignore + else: + final_output = convert_to_dst_type(final_output, inputs, device=device)[0] + return final_output # type: ignore @@ -322,7 +324,7 @@ def _create_buffered_slices(slices, batch_size, sw_batch_size, buffer_dim, buffe _, _, _b_lens = np.unique(slices_np[:, 0], return_counts=True, return_index=True) b_ends = np.cumsum(_b_lens).tolist() # possible buffer flush boundaries - x = [0, *b_ends][:: min(len(b_ends), int(buffer_steps))] # type: ignore + x = [0, *b_ends][:: min(len(b_ends), int(buffer_steps))] if x[-1] < b_ends[-1]: x.append(b_ends[-1]) n_per_batch = len(x) - 1 @@ -385,7 +387,7 @@ def _flatten_struct(seg_out): dict_keys = sorted(seg_out.keys()) # track predictor's output keys seg_probs = tuple(seg_out[k] for k in dict_keys) else: - seg_probs = ensure_tuple(seg_out) # type: ignore + seg_probs = ensure_tuple(seg_out) return dict_keys, seg_probs diff --git a/monai/networks/utils.py b/monai/networks/utils.py index 962db956c7..d264c22e28 100644 --- a/monai/networks/utils.py +++ b/monai/networks/utils.py @@ -672,7 +672,7 @@ def convert_to_onnx( set_determinism(seed=None) # compare onnx/ort and PyTorch results for r1, r2 in zip(torch_out, onnx_out): - torch.testing.assert_allclose(r1.cpu(), r2, rtol=rtol, atol=atol) # type: ignore + torch.testing.assert_allclose(r1.cpu(), r2, rtol=rtol, atol=atol) return onnx_model diff --git a/monai/transforms/spatial/array.py b/monai/transforms/spatial/array.py index 2b61b1f61a..f263e89152 100644 --- a/monai/transforms/spatial/array.py +++ b/monai/transforms/spatial/array.py @@ -230,6 +230,7 @@ def inverse(self, data: torch.Tensor) -> torch.Tensor: with self.trace_transform(False): # we can't use `self.__call__` in case a child class calls this inverse. out: torch.Tensor = SpatialResample.__call__(self, data, **kw_args) + kw_args["src_affine"] = kw_args.get("dst_affine") return out diff --git a/monai/utils/enums.py b/monai/utils/enums.py index 6b01e43b47..a7ea9e29a8 100644 --- a/monai/utils/enums.py +++ b/monai/utils/enums.py @@ -14,6 +14,8 @@ import random from enum import Enum +from monai.utils import deprecated + __all__ = [ "StrEnum", "NumpyPadMode", @@ -56,6 +58,7 @@ "LazyAttr", "BundleProperty", "BundlePropertyConfig", + "AlgoKeys", ] @@ -592,6 +595,7 @@ class LabelStatsKeys(StrEnum): LABEL_NCOMP = "ncomponents" +@deprecated(since="1.2", msg_suffix="please use `AlgoKeys` instead.") class AlgoEnsembleKeys(StrEnum): """ Default keys for Mixed Ensemble @@ -664,3 +668,18 @@ class BundlePropertyConfig(StrEnum): ID = "id" REF_ID = "refer_id" + + +class AlgoKeys(StrEnum): + """ + Default keys for templated Auto3DSeg Algo. + `ID` is the identifier of the algorithm. The string has the format of __. + `ALGO` is the Auto3DSeg Algo instance. + `IS_TRAINED` is the status that shows if the Algo has been trained. + `SCORE` is the score the Algo has achieved after training. + """ + + ID = "identifier" + ALGO = "algo_instance" + IS_TRAINED = "is_trained" + SCORE = "best_metric" diff --git a/monai/utils/type_conversion.py b/monai/utils/type_conversion.py index c5dd3a797c..734d8a2b17 100644 --- a/monai/utils/type_conversion.py +++ b/monai/utils/type_conversion.py @@ -45,7 +45,7 @@ def get_numpy_dtype_from_string(dtype: str) -> np.dtype: """Get a numpy dtype (e.g., `np.float32`) from its string (e.g., `"float32"`).""" - return np.empty([], dtype=dtype).dtype + return np.empty([], dtype=str(dtype).split(".")[-1]).dtype def get_torch_dtype_from_string(dtype: str) -> torch.dtype: diff --git a/tests/test_auto3dseg_ensemble.py b/tests/test_auto3dseg_ensemble.py index f2170f1dfc..2c8d615c58 100644 --- a/tests/test_auto3dseg_ensemble.py +++ b/tests/test_auto3dseg_ensemble.py @@ -23,7 +23,7 @@ from monai.bundle.config_parser import ConfigParser from monai.data import create_test_image_3d from monai.utils import optional_import, set_determinism -from monai.utils.enums import AlgoEnsembleKeys +from monai.utils.enums import AlgoKeys from tests.utils import ( SkipIfBeforePyTorchVersion, get_testing_algo_template_path, @@ -135,8 +135,8 @@ def test_ensemble(self) -> None: history = bundle_generator.get_history() for algo_dict in history: - name = algo_dict[AlgoEnsembleKeys.ID] - algo = algo_dict[AlgoEnsembleKeys.ALGO] + name = algo_dict[AlgoKeys.ID] + algo = algo_dict[AlgoKeys.ALGO] _train_param = train_param.copy() if name.startswith("segresnet"): _train_param["network#init_filters"] = 8 @@ -148,7 +148,7 @@ def test_ensemble(self) -> None: builder = AlgoEnsembleBuilder(history, data_src_cfg) builder.set_ensemble_method(AlgoEnsembleBestN(n_best=1)) ensemble = builder.get_ensemble() - name = ensemble.get_algo_ensemble()[0][AlgoEnsembleKeys.ID] + name = ensemble.get_algo_ensemble()[0][AlgoKeys.ID] if name.startswith("segresnet"): pred_param["network#init_filters"] = 8 elif name.startswith("swinunetr"): @@ -159,7 +159,7 @@ def test_ensemble(self) -> None: builder.set_ensemble_method(AlgoEnsembleBestByFold(1)) ensemble = builder.get_ensemble() for algo in ensemble.get_algo_ensemble(): - print(algo[AlgoEnsembleKeys.ID]) + print(algo[AlgoKeys.ID]) def tearDown(self) -> None: set_determinism(None) diff --git a/tests/test_auto3dseg_hpo.py b/tests/test_auto3dseg_hpo.py index 00b13adc35..60298ae254 100644 --- a/tests/test_auto3dseg_hpo.py +++ b/tests/test_auto3dseg_hpo.py @@ -24,7 +24,7 @@ from monai.bundle.config_parser import ConfigParser from monai.data import create_test_image_3d from monai.utils import optional_import -from monai.utils.enums import AlgoEnsembleKeys +from monai.utils.enums import AlgoKeys from tests.utils import ( SkipIfBeforePyTorchVersion, get_testing_algo_template_path, @@ -140,7 +140,7 @@ def setUp(self) -> None: @skip_if_no_cuda def test_run_algo(self) -> None: algo_dict = self.history[0] - algo = algo_dict[AlgoEnsembleKeys.ALGO] + algo = algo_dict[AlgoKeys.ALGO] nni_gen = NNIGen(algo=algo, params=override_param) obj_filename = nni_gen.get_obj_filename() # this function will be used in HPO via Python Fire @@ -150,7 +150,7 @@ def test_run_algo(self) -> None: @skip_if_no_optuna def test_run_optuna(self) -> None: algo_dict = self.history[0] - algo = algo_dict[AlgoEnsembleKeys.ALGO] + algo = algo_dict[AlgoKeys.ALGO] class OptunaGenLearningRate(OptunaGen): def get_hyperparameters(self): @@ -172,7 +172,7 @@ def get_hyperparameters(self): @skip_if_no_cuda def test_get_history(self) -> None: algo_dict = self.history[0] - algo = algo_dict[AlgoEnsembleKeys.ALGO] + algo = algo_dict[AlgoKeys.ALGO] nni_gen = NNIGen(algo=algo, params=override_param) obj_filename = nni_gen.get_obj_filename() diff --git a/tests/test_get_equivalent_dtype.py b/tests/test_get_equivalent_dtype.py index 01f8adca73..299a3963b7 100644 --- a/tests/test_get_equivalent_dtype.py +++ b/tests/test_get_equivalent_dtype.py @@ -42,7 +42,15 @@ def test_native_type(self): out_dtype = get_equivalent_dtype(n, type(im_dtype)) self.assertEqual(out_dtype, n) - @parameterized.expand([["float", np.float64], ["float32", np.float32], ["float64", np.float64]]) + @parameterized.expand( + [ + ["float", np.float64], + ["float32", np.float32], + ["np.float32", np.float32], + ["float64", np.float64], + ["torch.float64", np.float64], + ] + ) def test_from_string(self, dtype_str, expected_np): expected_pt = get_equivalent_dtype(expected_np, torch.Tensor) # numpy diff --git a/tests/test_integration_classification_2d.py b/tests/test_integration_classification_2d.py index 9aef9cbe18..91ce331a4a 100644 --- a/tests/test_integration_classification_2d.py +++ b/tests/test_integration_classification_2d.py @@ -266,7 +266,7 @@ def test_training(self): repeated.append(results) np.testing.assert_allclose(repeated[0], repeated[1]) - @TimedCall(seconds=1000, skip_timing=not torch.cuda.is_available(), force_quit=False, daemon=False) + @TimedCall(seconds=2000, skip_timing=not torch.cuda.is_available(), force_quit=False, daemon=False) def test_timing(self): self.train_and_infer() diff --git a/tests/test_integration_gpu_customization.py b/tests/test_integration_gpu_customization.py index 1f871f246e..4a7490e529 100644 --- a/tests/test_integration_gpu_customization.py +++ b/tests/test_integration_gpu_customization.py @@ -23,7 +23,7 @@ from monai.bundle.config_parser import ConfigParser from monai.data import create_test_image_3d from monai.utils import optional_import -from monai.utils.enums import AlgoEnsembleKeys +from monai.utils.enums import AlgoKeys from tests.utils import ( SkipIfBeforePyTorchVersion, get_testing_algo_template_path, @@ -139,7 +139,7 @@ def test_ensemble_gpu_customization(self) -> None: history = bundle_generator.get_history() for algo_dict in history: - algo = algo_dict[AlgoEnsembleKeys.ALGO] + algo = algo_dict[AlgoKeys.ALGO] algo.train(train_param) builder = AlgoEnsembleBuilder(history, data_src_cfg) @@ -151,7 +151,7 @@ def test_ensemble_gpu_customization(self) -> None: builder.set_ensemble_method(AlgoEnsembleBestByFold(1)) ensemble = builder.get_ensemble() for algo in ensemble.get_algo_ensemble(): - print(algo[AlgoEnsembleKeys.ID]) + print(algo[AlgoKeys.ID]) def tearDown(self) -> None: self.test_dir.cleanup() diff --git a/tests/test_meta_tensor.py b/tests/test_meta_tensor.py index a6607a3ccd..e547675a0e 100644 --- a/tests/test_meta_tensor.py +++ b/tests/test_meta_tensor.py @@ -177,6 +177,7 @@ def test_copy(self, device, dtype): a = deepcopy(m) self.check(a, m, ids=False) # clone + a = m.clone(memory_format=torch.preserve_format) a = m.clone() self.check(a, m, ids=False) a = MetaTensor([[]], device=device, dtype=dtype) diff --git a/tests/test_sliding_window_hovernet_inference.py b/tests/test_sliding_window_hovernet_inference.py index 8f7f8346cc..b17e8525ec 100644 --- a/tests/test_sliding_window_hovernet_inference.py +++ b/tests/test_sliding_window_hovernet_inference.py @@ -18,6 +18,7 @@ from parameterized import parameterized from monai.apps.pathology.inferers import SlidingWindowHoVerNetInferer +from monai.data import MetaTensor from monai.inferers import sliding_window_inference from monai.utils import optional_import from tests.test_sliding_window_inference import TEST_CASES @@ -31,6 +32,8 @@ ["hover", (1, 3, 16, 8), (4, 4), 7, 0.5, "constant", torch.device("cpu:0"), (1,) * 4], ] +TEST_CASES_MULTIOUTPUT = [[torch.ones((1, 6, 20, 20))], [MetaTensor(torch.ones((1, 6, 20, 20)))]] + class TestSlidingWindowHoVerNetInference(unittest.TestCase): @parameterized.expand(TEST_CASES_PADDING) @@ -245,9 +248,10 @@ def compute(data, test1, test2): )(inputs, compute, t1, test2=t2) np.testing.assert_allclose(result.cpu().numpy(), expected, rtol=1e-4) - def test_multioutput(self): + @parameterized.expand(TEST_CASES_MULTIOUTPUT) + def test_multioutput(self, inputs): device = "cuda" if torch.cuda.is_available() else "cpu:0" - inputs = torch.ones((1, 6, 20, 20)).to(device=device) + inputs = inputs.to(device=device) roi_shape = (8, 8) sw_batch_size = 10