Skip to content

Commit

Permalink
TorchFX: Additonal unit tests
Browse files Browse the repository at this point in the history
  • Loading branch information
cavusmustafa committed Mar 6, 2024
1 parent 4830b78 commit 3f95242
Show file tree
Hide file tree
Showing 15 changed files with 178 additions and 3 deletions.
1 change: 1 addition & 0 deletions tests/layer_tests/pytorch_tests/test_addcmul.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,7 @@ def forward(self, x, y, z):
@pytest.mark.nightly
@pytest.mark.precommit
@pytest.mark.precommit_torch_export
@pytest.mark.precommit_fx_backend
def test_addcmul(self, input_type, value, ie_device, precision, ir_version):
self.input_type = input_type
self._test(*self.create_model(value), ie_device, precision, ir_version)
2 changes: 2 additions & 0 deletions tests/layer_tests/pytorch_tests/test_all.py
Original file line number Diff line number Diff line change
Expand Up @@ -77,6 +77,7 @@ def _prepare_input(self, out=False):
@pytest.mark.parametrize("out", [True, False])
@pytest.mark.nightly
@pytest.mark.precommit
@pytest.mark.precommit_fx_backend
def test_all_noparams(self, input_shape, d_type, out, ie_device, precision, ir_version):
if type(input_shape) is list:
self.input_tensor = np.random.randint(0, 2, input_shape, dtype=d_type)
Expand Down Expand Up @@ -104,6 +105,7 @@ def test_all_noparams(self, input_shape, d_type, out, ie_device, precision, ir_v
@pytest.mark.parametrize("out", [True, False])
@pytest.mark.nightly
@pytest.mark.precommit
@pytest.mark.precommit_fx_backend
@pytest.mark.xfail(condition=platform.system() in ('Darwin', 'Linux') and platform.machine() in ('arm', 'armv7l',
'aarch64',
'arm64', 'ARM64'),
Expand Down
37 changes: 37 additions & 0 deletions tests/layer_tests/pytorch_tests/test_any.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,37 @@
# Copyright (C) 2018-2023 Intel Corporation
# SPDX-License-Identifier: Apache-2.0

import pytest

from pytorch_layer_test_class import PytorchLayerTest


class TestAny(PytorchLayerTest):
def _prepare_input(self):
import numpy as np
return ((np.random.randint(2, size=(3,3,10,10)) > 0),)

def create_model(self, dim, keep_dim):

import torch
class aten_any(torch.nn.Module):
def __init__(self, dim=None, keep_dim=None):
super(aten_any, self).__init__()
self.dim = dim
self.keep_dim = keep_dim

def forward(self, x):
return torch.any(x, dim=self.dim, keepdim=self.keep_dim)


ref_net = None

return aten_any(dim, keep_dim), ref_net, "aten::any"

@pytest.mark.parametrize(("dim", "keep_dim"),
[(0, False), (0, True), (-1, True)])

@pytest.mark.precommit_fx_backend
def test_any(self, dim, keep_dim, ie_device, precision, ir_version):
self._test(*self.create_model(dim, keep_dim),
ie_device, precision, ir_version)
6 changes: 6 additions & 0 deletions tests/layer_tests/pytorch_tests/test_arange.py
Original file line number Diff line number Diff line change
Expand Up @@ -109,6 +109,7 @@ def forward(self, x, y, z, d):

@pytest.mark.nightly
@pytest.mark.precommit_torch_export
@pytest.mark.precommit_fx_backend
@pytest.mark.parametrize("dtype", [None, "float32", "float64", "int32", "int64", "int8", "uin8"])
@pytest.mark.parametrize("end", [1, 2, 3])
@pytest.mark.parametrize("use_out", [skip_if_export(True), False])
Expand All @@ -117,6 +118,7 @@ def test_arange_end_only(self, dtype, end, use_out, ie_device, precision, ir_ver
kwargs_to_prepare_input={"end": end})

@pytest.mark.nightly
@pytest.mark.precommit_fx_backend
@pytest.mark.parametrize("dtype", [None, "float32", "float64", "int32", "int64", "int8"])
@pytest.mark.parametrize("start,end", [(0, 1), (-1, 1), (1, 5), (0.5, 2.5)])
def test_arange_start_end(self, dtype, end, start, ie_device, precision, ir_version):
Expand All @@ -125,6 +127,7 @@ def test_arange_start_end(self, dtype, end, start, ie_device, precision, ir_vers

@pytest.mark.nightly
@pytest.mark.precommit
@pytest.mark.precommit_fx_backend
@pytest.mark.parametrize("dtype", [None, "float32", "float64", "int32", "int64", "int8"])
@pytest.mark.parametrize("start,end,step", [(0, 1, 1), (-2, 1, 1.25), (1, -5, -1), (1, 10, 2), (-1, -5, -2)])
def test_arange_start_end_step(self, dtype, end, start, step, ie_device, precision, ir_version):
Expand All @@ -133,13 +136,15 @@ def test_arange_start_end_step(self, dtype, end, start, step, ie_device, precisi

@pytest.mark.nightly
@pytest.mark.precommit_torch_export
@pytest.mark.precommit_fx_backend
@pytest.mark.parametrize("dtype", ["float32", "float64", "int32", "int64", "int8", "uint8"])
@pytest.mark.parametrize("end", [1, 2, 3])
def test_arange_end_only_with_prim_dtype(self, dtype, end, ie_device, precision, ir_version):
self._test(*self.create_model(dtype, 1, False, True), ie_device, precision, ir_version,
kwargs_to_prepare_input={"end": end, "ref_dtype": dtype})

@pytest.mark.nightly
@pytest.mark.precommit_fx_backend
@pytest.mark.parametrize("dtype", ["float32", "float64", "int32", "int64", "int8"])
@pytest.mark.parametrize("start,end", [(0, 1), (-1, 1), (1, 5), (0.5, 2.5)])
def test_arange_start_end_with_prim_dtype(self, dtype, end, start, ie_device, precision, ir_version):
Expand All @@ -148,6 +153,7 @@ def test_arange_start_end_with_prim_dtype(self, dtype, end, start, ie_device, pr

@pytest.mark.nightly
@pytest.mark.precommit
@pytest.mark.precommit_fx_backend
@pytest.mark.parametrize("dtype", ["float32", "float64", "int32", "int64", "int8"])
@pytest.mark.parametrize("start,end,step", [(0, 1, 1), (-2, 1, 1.25), (1, -5, -1), (1, 10, 2), (-1, -5, -2)])
def test_arange_start_end_step_with_prim_dtype(self, dtype, end, start, step, ie_device, precision, ir_version):
Expand Down
1 change: 1 addition & 0 deletions tests/layer_tests/pytorch_tests/test_argmax_argmin.py
Original file line number Diff line number Diff line change
Expand Up @@ -74,6 +74,7 @@ def forward(self, x):
@pytest.mark.nightly
@pytest.mark.precommit
@pytest.mark.precommit_torch_export
@pytest.mark.precommit_fx_backend
@pytest.mark.xfail(condition=platform.system() in ('Darwin', 'Linux') and platform.machine() in ('arm', 'armv7l',
'aarch64',
'arm64', 'ARM64'),
Expand Down
3 changes: 3 additions & 0 deletions tests/layer_tests/pytorch_tests/test_as_strided.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,7 @@ def forward(self, x):
@pytest.mark.nightly
@pytest.mark.precommit
@pytest.mark.precommit_torch_export
@pytest.mark.precommit_fx_backend
def test_as_strided(self, size, stride, offset, ie_device, precision, ir_version):
self._test(*self.create_model(size, stride, offset), ie_device, precision, ir_version, trace_model=True)

Expand Down Expand Up @@ -92,6 +93,7 @@ def forward_size_const(self, x, size_shape_tensor, stride_shape_tensor):
@pytest.mark.nightly
@pytest.mark.precommit
@pytest.mark.precommit_torch_export
@pytest.mark.precommit_fx_backend
def test_as_strided_list_construct(self, size, stride, offset, mode, ie_device, precision, ir_version):
inp_kwargs = {"size_shape_tensor": size, "stride_shape_tensor": stride}
self._test(
Expand Down Expand Up @@ -124,5 +126,6 @@ def forward(self, x):
@pytest.mark.nightly
@pytest.mark.precommit
@pytest.mark.precommit_torch_export
@pytest.mark.precommit_fx_backend
def test_as_strided_lf(self, ie_device, precision, ir_version):
self._test(*self.create_model(), ie_device, precision, ir_version, trace_model=True, freeze_model=False)
2 changes: 2 additions & 0 deletions tests/layer_tests/pytorch_tests/test_bitwise_ops.py
Original file line number Diff line number Diff line change
Expand Up @@ -55,6 +55,7 @@ def forward_not_out(self, tensor_a, out):
@pytest.mark.nightly
@pytest.mark.precommit
@pytest.mark.precommit_torch_export
@pytest.mark.precommit_fx_backend
@pytest.mark.parametrize("op_type", ["and", "or", "not", "xor"])
@pytest.mark.parametrize("lhs_dtype", ["bool", "int32", "uint8", "int64"])
@pytest.mark.parametrize("rhs_dtype", ["bool", "int32", "uint8", "int64"])
Expand Down Expand Up @@ -107,6 +108,7 @@ def forward(self, lhs, rhs):
@pytest.mark.nightly
@pytest.mark.precommit
@pytest.mark.precommit_torch_export
@pytest.mark.precommit_fx_backend
@pytest.mark.parametrize("lhs_dtype", ["bool", "int32"])
@pytest.mark.parametrize("rhs_dtype", ["bool", "int32"])
@pytest.mark.parametrize(
Expand Down
3 changes: 3 additions & 0 deletions tests/layer_tests/pytorch_tests/test_clamp.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,6 +48,7 @@ def forward_clip_(self, x):
@pytest.mark.nightly
@pytest.mark.precommit
@pytest.mark.precommit_torch_export
@pytest.mark.precommit_fx_backend
def test_clamp(self, minimum, maximum, as_tensors, op_type, ie_device, precision, ir_version):
self._test(*self.create_model(minimum, maximum, as_tensors,
op_type), ie_device, precision, ir_version)
Expand Down Expand Up @@ -76,6 +77,7 @@ def forward(self, x):
@pytest.mark.parametrize("minimum", [0., 1., -1., 0.5, 2])
@pytest.mark.parametrize("as_tensor", [True, False])
@pytest.mark.nightly
@pytest.mark.precommit_fx_backend
def test_clamp_min(self, minimum, as_tensor, ie_device, precision, ir_version):
self._test(*self.create_model(minimum, as_tensor), ie_device,
precision, ir_version, use_convert_model=True, trace_model=True)
Expand Down Expand Up @@ -106,6 +108,7 @@ def forward(self, x):
@pytest.mark.nightly
@pytest.mark.precommit
@pytest.mark.precommit_torch_export
@pytest.mark.precommit_fx_backend
def test_clamp(self, maximum, as_tensor, ie_device, precision, ir_version):
self._test(*self.create_model(maximum, as_tensor), ie_device,
precision, ir_version, use_convert_model=True, trace_model=True)
37 changes: 37 additions & 0 deletions tests/layer_tests/pytorch_tests/test_constant_pad_nd.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,37 @@
# Copyright (C) 2018-2023 Intel Corporation
# SPDX-License-Identifier: Apache-2.0

import pytest

from pytorch_layer_test_class import PytorchLayerTest


class TestConstantPadND(PytorchLayerTest):
def _prepare_input(self):
import numpy as np
return (np.random.randn(2, 5, 3, 4).astype(np.float32),)

def create_model(self, pad, value):

import torch
class aten_constant_pad_nd(torch.nn.Module):
def __init__(self, pad=None, value=None):
super(aten_constant_pad_nd, self).__init__()
self.pad = pad
self.value = value

def forward(self, x):
return torch.constant_pad_nd(x, self.pad, self.value);


ref_net = None

return aten_constant_pad_nd(pad, value), ref_net, "aten::constant_pad_nd"

@pytest.mark.parametrize(("pad", "value"),
[((1,1,1,1), 0),((0,2,0,2), -1.0),((3,1,5,2), 0.5),((0,0,0,0), 0),])

@pytest.mark.precommit_fx_backend
def test_constant_pad_nd(self, pad, value, ie_device, precision, ir_version):
self._test(*self.create_model(pad, value),
ie_device, precision, ir_version)
3 changes: 2 additions & 1 deletion tests/layer_tests/pytorch_tests/test_copy.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@ def forward(self, x):

@pytest.mark.nightly
@pytest.mark.precommit
@pytest.mark.precommit_fx_backend
@pytest.mark.parametrize("value", [1, [2.5], range(224)])
def test_copy_(self, value, ie_device, precision, ir_version):
self._test(*self.create_model(value), ie_device, precision, ir_version)
Expand Down Expand Up @@ -63,4 +64,4 @@ def forward_out(self, x, y):
@pytest.mark.precommit
@pytest.mark.parametrize("out", [True, False])
def test_copy_(self, out, ie_device, precision, ir_version):
self._test(*self.create_model(out), ie_device, precision, ir_version, kwargs_to_prepare_input={"out": out})
self._test(*self.create_model(out), ie_device, precision, ir_version, kwargs_to_prepare_input={"out": out})
3 changes: 2 additions & 1 deletion tests/layer_tests/pytorch_tests/test_fake_quantize.py
Original file line number Diff line number Diff line change
Expand Up @@ -91,7 +91,7 @@ def forward(self, x):

@pytest.mark.nightly
@pytest.mark.precommit
#@pytest.mark.precommit_fx_backend
@pytest.mark.precommit_fx_backend
@pytest.mark.parametrize(
"scale, zero_point, quant_min, quant_max",
[
Expand Down Expand Up @@ -146,6 +146,7 @@ def forward(self, x):

@pytest.mark.nightly
@pytest.mark.precommit
@pytest.mark.precommit_fx_backend
@pytest.mark.parametrize(
"scale, zero_point, axis, quant_min, quant_max",
[
Expand Down
1 change: 0 additions & 1 deletion tests/layer_tests/pytorch_tests/test_flip.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,6 @@ def forward_out(self, x, y):
@pytest.mark.nightly
@pytest.mark.precommit
@pytest.mark.precommit_torch_export
@pytest.mark.precommit_fx_backend
@pytest.mark.parametrize("axis", [[0], [1], [-1], [1, 2], [2, 3], [1, 2, 3]])
@pytest.mark.parametrize("out", [skip_if_export(True), False])
@pytest.mark.parametrize("dtype", ["float32", "float64", "int32", "int64", "uint8"])
Expand Down
2 changes: 2 additions & 0 deletions tests/layer_tests/pytorch_tests/test_pooling.py
Original file line number Diff line number Diff line change
Expand Up @@ -157,6 +157,7 @@ def test_avg_pool1d(self, params, ceil_mode, count_include_pad, ie_device, preci
@pytest.mark.nightly
@pytest.mark.precommit
@pytest.mark.precommit_torch_export
@pytest.mark.precommit_fx_backend
@pytest.mark.xfail(condition=platform.system() == 'Darwin' and platform.machine() == 'arm64',
reason='Ticket - 122715')
def test_avg_pool2d(self, params, ceil_mode, count_include_pad, ie_device, precision, ir_version):
Expand All @@ -169,6 +170,7 @@ def test_avg_pool2d(self, params, ceil_mode, count_include_pad, ie_device, preci
@pytest.mark.nightly
@pytest.mark.precommit
@pytest.mark.precommit_torch_export
@pytest.mark.precommit_fx_backend
@pytest.mark.xfail(condition=platform.system() == 'Darwin' and platform.machine() == 'arm64',
reason='Ticket - 122715')
def test_avg_pool3d(self, params, ceil_mode, count_include_pad, ie_device, precision, ir_version):
Expand Down
39 changes: 39 additions & 0 deletions tests/layer_tests/pytorch_tests/test_select_scatter.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,39 @@
# Copyright (C) 2018-2023 Intel Corporation
# SPDX-License-Identifier: Apache-2.0

import pytest

from pytorch_layer_test_class import PytorchLayerTest



class TestSelectScatter(PytorchLayerTest):
def _prepare_input(self):
import numpy as np
return (np.random.randn(2, 5, 3, 4).astype(np.float32),)

def create_model(self, src, dim, index):

import torch
class aten_select_scatter(torch.nn.Module):
def __init__(self, src=None, dim=None, index=None):
super(aten_select_scatter, self).__init__()
self.src = src
self.dim = dim
self.index = index

def forward(self, x):
return torch.select_scatter(x, self.src, self.dim, self.index);


ref_net = None

return aten_select_scatter(src, dim, index), ref_net, "aten::select_scatter"

import torch
@pytest.mark.precommit_fx_backend
@pytest.mark.parametrize(("src", "dim", "index"),
[(torch.ones(2), 0, 0),])
def aten_select_scatter(self, src, dim, index, ie_device, precision, ir_version):
self._test(*self.create_model(src, dim, index),
ie_device, precision, ir_version)
41 changes: 41 additions & 0 deletions tests/layer_tests/pytorch_tests/test_slice_scatter.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,41 @@
# Copyright (C) 2018-2023 Intel Corporation
# SPDX-License-Identifier: Apache-2.0

import pytest

from pytorch_layer_test_class import PytorchLayerTest



class TestSliceScatter(PytorchLayerTest):
def _prepare_input(self):
import numpy as np
return (np.random.randn(2, 5, 3, 4).astype(np.float32),)

def create_model(self, src, dim, start, end, step):

import torch
class aten_slice_scatter(torch.nn.Module):
def __init__(self, src=None, dim=None, start=None, end=None, step=None):
super(aten_slice_scatter, self).__init__()
self.src = src
self.dim = dim
self.start = start
self.end = end
self.step = step

def forward(self, x):
return torch.slice_scatter(x, src=self.src, dim=self.dim, start=self.start, end=self.end, step=self.step);


ref_net = None

return aten_slice_scatter(src, dim, start, end, step), ref_net, "aten::slice_scatter"

import torch
@pytest.mark.precommit_fx_backend
@pytest.mark.parametrize(("src", "dim", "start", "end", "step"),
[(torch.ones(2), 1, 1, 2, 1),])
def aten_slice_scatter(self, src, dim, start, end, step, ie_device, precision, ir_version):
self._test(*self.create_model(src, dim, start, end, step),
ie_device, precision, ir_version)

0 comments on commit 3f95242

Please sign in to comment.