diff --git a/monai/utils/generateddata.py b/monai/utils/generateddata.py new file mode 100644 index 0000000000..26904943e1 --- /dev/null +++ b/monai/utils/generateddata.py @@ -0,0 +1,44 @@ +# Copyright 2020 MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import numpy as np + +from monai.utils.arrayutils import rescale_array + + +def create_test_image_2d(width, height, num_objs=12, rad_max=30, noise_max=0.0, num_seg_classes=5): + """ + Return a noisy 2D image with `numObj' circles and a 2D mask image. The maximum radius of the circles is given as + `radMax'. The mask will have `numSegClasses' number of classes for segmentations labeled sequentially from 1, plus a + background class represented as 0. If `noiseMax' is greater than 0 then noise will be added to the image taken from + the uniform distribution on range [0,noiseMax). + """ + image = np.zeros((width, height)) + + for i in range(num_objs): + x = np.random.randint(rad_max, width - rad_max) + y = np.random.randint(rad_max, height - rad_max) + rad = np.random.randint(5, rad_max) + spy, spx = np.ogrid[-x : width - x, -y : height - y] + circle = (spx * spx + spy * spy) <= rad * rad + + if num_seg_classes > 1: + image[circle] = np.ceil(np.random.random() * num_seg_classes) + else: + image[circle] = np.random.random() * 0.5 + 0.5 + + labels = np.ceil(image).astype(np.int32) + + norm = np.random.uniform(0, num_seg_classes * noise_max, size=image.shape) + noisyimage = rescale_array(np.maximum(image, norm)) + + return noisyimage, labels diff --git a/runtests.sh b/runtests.sh index 299b47dee6..a6e06f9198 100755 --- a/runtests.sh +++ b/runtests.sh @@ -6,7 +6,7 @@ set -e homedir="$( cd -P "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" cd $homedir -#export PYTHONPATH="$homedir:$PYTHONPATH" +export PYTHONPATH="$homedir:$PYTHONPATH" # configuration values doCoverage=false @@ -81,7 +81,7 @@ ${cmdprefix}${cmd} -m unittest # network training/inference/eval tests if [ "$doNetTests" = 'true' ] then - for i in examples/*.py + for i in tests/integration_*.py do echo $i ${cmdprefix}${cmd} $i diff --git a/tests/integration_unet2d.py b/tests/integration_unet2d.py new file mode 100644 index 0000000000..dc1b1b863a --- /dev/null +++ b/tests/integration_unet2d.py @@ -0,0 +1,58 @@ +# Copyright 2020 MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys + +import torch +import numpy as np + +from ignite.engine import create_supervised_trainer + +from monai import data, networks, utils + + +def run_test(batch_size=64, train_steps=100, device=torch.device("cuda:0")): + def generate_test_batch(): + for _ in range(train_steps): + im, seg = utils.generateddata.create_test_image_2d(128, 128, noise_max=1, num_objs=4, num_seg_classes=1) + yield im[None], seg[None].astype(np.float32) + + def _prepare_batch(batch, device=None, non_blocking=False): + x, y = batch + return torch.from_numpy(x).to(device), torch.from_numpy(y).to(device) + + net = networks.nets.UNet( + dimensions=2, + in_channels=1, + num_classes=1, + channels=(4, 8, 16, 32), + strides=(2, 2, 2), + num_res_units=2, + ) + + loss = networks.losses.DiceLoss() + opt = torch.optim.Adam(net.parameters(), 1e-4) + src = data.streams.BatchStream(generate_test_batch(), batch_size) + + def loss_fn(pred, grnd): + return loss(pred[0], grnd) + + trainer = create_supervised_trainer(net, opt, loss_fn, device, False, _prepare_batch) + + trainer.run(src, 1) + + return trainer.state.output + + +if __name__ == "__main__": + result = run_test() + + sys.exit(0 if result < 1 else 1) diff --git a/tests/utils.py b/tests/utils.py index f780220b77..2e10a29912 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -15,7 +15,7 @@ import torch import numpy as np -from monai.utils.arrayutils import rescale_array +from monai.utils.generateddata import create_test_image_2d quick_test_var = "QUICKTEST" @@ -26,35 +26,6 @@ def skip_if_quick(obj): return unittest.skipIf(is_quick, "Skipping slow tests")(obj) -def create_test_image(width, height, num_objs=12, rad_max=30, noise_max=0.0, num_seg_classes=5): - """ - Return a noisy 2D image with `numObj' circles and a 2D mask image. The maximum radius of the circles is given as - `radMax'. The mask will have `numSegClasses' number of classes for segmentations labeled sequentially from 1, plus a - background class represented as 0. If `noiseMax' is greater than 0 then noise will be added to the image taken from - the uniform distribution on range [0,noiseMax). - """ - image = np.zeros((width, height)) - - for i in range(num_objs): - x = np.random.randint(rad_max, width - rad_max) - y = np.random.randint(rad_max, height - rad_max) - rad = np.random.randint(5, rad_max) - spy, spx = np.ogrid[-x : width - x, -y : height - y] - circle = (spx * spx + spy * spy) <= rad * rad - - if num_seg_classes > 1: - image[circle] = np.ceil(np.random.random() * num_seg_classes) - else: - image[circle] = np.random.random() * 0.5 + 0.5 - - labels = np.ceil(image).astype(np.int32) - - norm = np.random.uniform(0, num_seg_classes * noise_max, size=image.shape) - noisyimage = rescale_array(np.maximum(image, norm)) - - return noisyimage, labels - - class ImageTestCase(unittest.TestCase): im_shape = (128, 128) input_channels = 1 @@ -62,7 +33,7 @@ class ImageTestCase(unittest.TestCase): num_classes = 3 def setUp(self): - im, msk = create_test_image(self.im_shape[0], self.im_shape[1], 4, 20, 0, self.num_classes) + im, msk = create_test_image_2d(self.im_shape[0], self.im_shape[1], 4, 20, 0, self.num_classes) self.imt = torch.tensor(im[None, None])