diff --git a/coremltools/converters/mil/frontend/torch/ops.py b/coremltools/converters/mil/frontend/torch/ops.py index 812ffe7df..438b1e28a 100644 --- a/coremltools/converters/mil/frontend/torch/ops.py +++ b/coremltools/converters/mil/frontend/torch/ops.py @@ -409,8 +409,10 @@ def _convolution(context, node): # PyTorch weight ordering [Cin, Cout, H, W] # MIL expects [Cout, Cin, H, W] + perm = _np.arange(len(weight.shape)) + perm[[0, 1]] = perm[[1, 0]] weight_transpose = mb.transpose( - x=weight, perm=[1, 0, 2, 3], name=weight.name + "_transpose" + x=weight, perm=perm, name=weight.name + "_transpose" ) # Handle output_padding using pre-pad or post-crop diff --git a/coremltools/converters/mil/frontend/torch/test/test_torch_ops.py b/coremltools/converters/mil/frontend/torch/test/test_torch_ops.py index 4f25fccb6..e853a6ef7 100644 --- a/coremltools/converters/mil/frontend/torch/test/test_torch_ops.py +++ b/coremltools/converters/mil/frontend/torch/test/test_torch_ops.py @@ -100,6 +100,33 @@ def test_convolution2d( class TestConvTranspose: + @pytest.mark.parametrize( + "width, in_channels, out_channels, kernel_size, stride, padding, dilation, backend", + itertools.product( + [5, 7], [1, 3], [1, 3], [1, 3], [2, 3], [0, 1], [1, 3], backends + ), + ) + def test_convolution_transpose1d( + self, + width, + in_channels, + out_channels, + kernel_size, + stride, + padding, + dilation, + backend, + ): + model = nn.ConvTranspose1d( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=kernel_size, + stride=stride, + padding=padding, + dilation=dilation, + ) + run_compare_torch((1, in_channels, width), model, backend=backend) + @pytest.mark.parametrize( "height, width, in_channels, out_channels, kernel_size, stride, padding, dilation, backend", itertools.product( @@ -129,6 +156,37 @@ def test_convolution_transpose2d( ) run_compare_torch((1, in_channels, height, width), model, backend=backend) + @pytest.mark.parametrize( + "depth, height, width, in_channels, out_channels, kernel_size, stride, padding, dilation, backend", + itertools.product( + [3, 4], [5, 6], [5, 7], [1, 3], [1, 3], [1, 3], [2, 3], [0, 1], [1, 3], backends + ), + ) + @pytest.mark.skip(reason="old macOS version on the CI machine does not have fixes for convolution transposed 3D. " + "Please, see details in https://github.com/apple/coremltools/pull/942") + def test_convolution_transpose3d( + self, + depth, + height, + width, + in_channels, + out_channels, + kernel_size, + stride, + padding, + dilation, + backend, + ): + model = nn.ConvTranspose3d( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=kernel_size, + stride=stride, + padding=padding, + dilation=dilation, + ) + run_compare_torch((1, in_channels, depth, height, width), model, backend=backend) + # TODO: rdar://65588783 ([PyTorch] Define and error out on unsupported configuration for output_padding) # TODO: rdar://65550420 (Add Image Resizing (crop, upsample, resize_bilinear) layers to the MIL backend) @pytest.mark.parametrize(