Skip to content

Commit

Permalink
Inline metatensor_to_array, rename image_to_metatensor to itk_image_t…
Browse files Browse the repository at this point in the history
…o_metatensor and partly fix dtype warning

Signed-off-by: Felix Schnabel <[email protected]>
  • Loading branch information
Shadow-Devil committed Feb 7, 2023
1 parent a991f2b commit b080bc3
Show file tree
Hide file tree
Showing 2 changed files with 24 additions and 40 deletions.
25 changes: 7 additions & 18 deletions monai/data/itk_torch_affine_matrix_bridge.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,8 +30,7 @@
itk, has_itk = optional_import("itk")

__all__ = [
"metatensor_to_array",
"image_to_metatensor",
"itk_image_to_metatensor",
"itk_to_monai_affine",
"monai_to_itk_affine",
"create_itk_affine_from_parameters",
Expand All @@ -40,15 +39,7 @@
]


# TODO remove
def metatensor_to_array(metatensor: MetaTensor):
metatensor = metatensor.squeeze()
metatensor = metatensor.permute(*torch.arange(metatensor.ndim - 1, -1, -1))

return metatensor.get_array()


def image_to_metatensor(image):
def itk_image_to_metatensor(image):
"""
Converts an ITK image to a MetaTensor object.
Expand All @@ -60,7 +51,7 @@ def image_to_metatensor(image):
"""
reader = ITKReader(affine_lps_to_ras=False)
image_array, meta_data = reader.get_data(image)
image_array = convert_to_dst_type(image_array, dst=image_array, dtype=itk.D)[0]
image_array = convert_to_dst_type(image_array, dst=image_array, dtype=np.dtype(itk.D))[0]
metatensor = MetaTensor.ensure_torch_and_prune_meta(image_array, meta_data)
metatensor = EnsureChannelFirst()(metatensor)

Expand Down Expand Up @@ -277,10 +268,8 @@ def itk_affine_resample(image, matrix, translation, center_of_rotation=None):


def monai_affine_resample(metatensor: MetaTensor, affine_matrix: NdarrayOrTensor):
# TODO documentation, change to mode=3
# affine = Affine(affine=affine_matrix, mode=3, padding_mode='mirror', dtype=torch.float64, image_only=True)
# output_tensor = cast(MetaTensor, affine(metatensor))
affine = Affine(affine=affine_matrix, padding_mode="zeros", dtype=torch.float64, image_only=True)
output_tensor = cast(MetaTensor, affine(metatensor, mode="bilinear"))
# TODO documentation
affine = Affine(affine=affine_matrix, padding_mode="zeros", mode="bilinear", dtype=torch.float64, image_only=True)
output_tensor = cast(MetaTensor, affine(metatensor))

return metatensor_to_array(output_tensor)
return output_tensor.squeeze().permute(*torch.arange(output_tensor.ndim - 2, -1, -1)).array
39 changes: 17 additions & 22 deletions tests/test_itk_torch_affine_matrix_bridge.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,22 +21,17 @@
from monai.data import ITKReader
from monai.data.itk_torch_affine_matrix_bridge import (
create_itk_affine_from_parameters,
image_to_metatensor,
itk_image_to_metatensor,
itk_affine_resample,
itk_to_monai_affine,
metatensor_to_array,
monai_affine_resample,
monai_to_itk_affine,
)
from monai.utils import optional_import

itk, has_itk = optional_import("itk")

TESTS = [
"CT_2D_head_fixed.mha",
"CT_2D_head_moving.mha",
# "copd1_highres_INSP_STD_COPD_img.nii.gz"
]
TESTS = ["CT_2D_head_fixed.mha", "CT_2D_head_moving.mha", "copd1_highres_INSP_STD_COPD_img.nii.gz"]
# Download URL:
# SHA-521: 60193cd6ef0cf055c623046446b74f969a2be838444801bd32ad5bedc8a7eeec
# b343e8a1208769c9c7a711e101c806a3133eccdda7790c551a69a64b9b3701e9
Expand Down Expand Up @@ -86,23 +81,19 @@ def test_setting_affine_parameters(self, filepath):
output_array_itk = itk_affine_resample(image, matrix=matrix, translation=translation)

# MONAI
metatensor = image_to_metatensor(image)
metatensor = itk_image_to_metatensor(image)
affine_matrix_for_monai = itk_to_monai_affine(image, matrix, translation)
output_array_monai = monai_affine_resample(metatensor, affine_matrix=affine_matrix_for_monai)

###########################################################################
# Make sure that the array conversion of the inputs is the same
input_array_monai = metatensor_to_array(metatensor)
# output_array_monai = ITKWriter.create_backend_obj(
# metatensor.array,
# channel_dim=None,
# affine=affine_matrix_for_monai,
# affine_lps_to_ras=False, # False if the affine is in itk convention
# )
input_array_monai = metatensor.squeeze().permute(*torch.arange(metatensor.ndim - 2, -1, -1)).array
np.testing.assert_array_equal(input_array_monai, np.asarray(image))

# Compare outputs
percentage = 100 * np.isclose(output_array_monai, output_array_itk).sum() / output_array_itk.size
percentage = (
100 * np.isclose(output_array_monai, output_array_itk).sum(dtype=np.float64) / output_array_itk.size
)
print("MONAI equals result: ", percentage, "%")
self.assertGreaterEqual(percentage, 99.0)

Expand Down Expand Up @@ -145,17 +136,19 @@ def test_arbitary_center_of_rotation(self, filepath):
output_array_itk = itk_affine_resample(image, matrix, translation, center_of_rotation)

# MONAI
metatensor = image_to_metatensor(image)
metatensor = itk_image_to_metatensor(image)
affine_matrix_for_monai = itk_to_monai_affine(image, matrix, translation, center_of_rotation)
output_array_monai = monai_affine_resample(metatensor, affine_matrix=affine_matrix_for_monai)

# Make sure that the array conversion of the inputs is the same
input_array_monai = metatensor_to_array(metatensor)
input_array_monai = metatensor.squeeze().permute(*torch.arange(metatensor.ndim - 2, -1, -1)).array
np.testing.assert_array_equal(input_array_monai, np.asarray(image))

###########################################################################
# Compare outputs
percentage = 100 * np.isclose(output_array_monai, output_array_itk).sum() / output_array_itk.size
percentage = (
100 * np.isclose(output_array_monai, output_array_itk).sum(dtype=np.float64) / output_array_itk.size
)
print("MONAI equals result: ", percentage, "%")
self.assertGreaterEqual(percentage, 99.0)

Expand Down Expand Up @@ -199,16 +192,18 @@ def test_monai_to_itk(self, filepath):
output_array_itk = itk_affine_resample(image, matrix, translation, center_of_rotation)

# MONAI
metatensor = image_to_metatensor(image)
metatensor = itk_image_to_metatensor(image)
output_array_monai = monai_affine_resample(metatensor, affine_matrix)

# Make sure that the array conversion of the inputs is the same
input_array_monai = metatensor_to_array(metatensor)
input_array_monai = metatensor.squeeze().permute(*torch.arange(metatensor.ndim - 2, -1, -1)).array
np.testing.assert_array_equal(input_array_monai, np.asarray(image))

###########################################################################
# Compare outputs
percentage = 100 * np.isclose(output_array_monai, output_array_itk).sum() / output_array_itk.size
percentage = (
100 * np.isclose(output_array_monai, output_array_itk).sum(dtype=np.float64) / output_array_itk.size
)
print("MONAI equals result: ", percentage, "%")
self.assertGreaterEqual(percentage, 99.0)

Expand Down

0 comments on commit b080bc3

Please sign in to comment.