Skip to content

Commit

Permalink
[GHA] Speed up PyTorch Layer unit tests (openvinotoolkit#20613)
Browse files Browse the repository at this point in the history
* test

* fixed tests

* typo

* fixed tests

* rest of the tests

* fixed rsub test

* tmp fix

* Revert "tmp fix"

This reverts commit b8bf1e9.

* fixed test params

* reset thirdparty/pugixml

* Revert "fixed rsub test"

This reverts commit 9b6be34.

* fixed typo

* fixed test data

* reset test_rsub

* removed unused param

* reverrted runner

* simplified call

* fixed random

* changed logical to auto mode

* Revert "fixed random"

This reverts commit 8a4f20b.

* fixed test_all

* replaced random_sample with randn

* fixed rebase issue

* reverted logical splitting

* Update tests/layer_tests/pytorch_tests/test_repeat_interleave.py

Co-authored-by: Maxim Vafin <[email protected]>

* Update tests/layer_tests/pytorch_tests/test_all.py

Co-authored-by: Maxim Vafin <[email protected]>

* Apply suggestions from code review

Co-authored-by: Maxim Vafin <[email protected]>

* fixed merge conflict

---------

Co-authored-by: Maxim Vafin <[email protected]>
  • Loading branch information
2 people authored and allnes committed Nov 23, 2023
1 parent d1b2ece commit 8e6e470
Show file tree
Hide file tree
Showing 21 changed files with 414 additions and 266 deletions.
6 changes: 4 additions & 2 deletions .github/workflows/linux.yml
Original file line number Diff line number Diff line change
Expand Up @@ -931,6 +931,8 @@ jobs:
- name: Install Python API tests dependencies
run: |
# To enable pytest parallel features
python3 -m pip install pytest-xdist[psutil]
# For torchvision to OpenVINO preprocessing converter
python3 -m pip install -r ${INSTALL_TEST_DIR}/python/preprocess/torchvision/requirements.txt
Expand Down Expand Up @@ -1006,7 +1008,7 @@ jobs:
python3 -m pytest ${LAYER_TESTS_INSTALL_DIR}/py_frontend_tests --junitxml=${INSTALL_TEST_DIR}/TEST-test_py_fontend.xml
- name: PyTorch Layer Tests
run: python3 -m pytest ${LAYER_TESTS_INSTALL_DIR}/pytorch_tests -m precommit --junitxml=${INSTALL_TEST_DIR}/TEST-pytorch.xml
run: python3 -m pytest ${LAYER_TESTS_INSTALL_DIR}/pytorch_tests -n logical -m precommit --junitxml=${INSTALL_TEST_DIR}/TEST-pytorch.xml
env:
TEST_DEVICE: CPU
TEST_PRECISION: FP16
Expand Down Expand Up @@ -1327,7 +1329,7 @@ jobs:
- name: PyTorch Models Tests
run: |
export PYTHONPATH=${MODEL_HUB_TESTS_INSTALL_DIR}:$PYTHONPATH
python3 -m pytest ${MODEL_HUB_TESTS_INSTALL_DIR}/torch_tests/ -m ${TYPE} --html=${INSTALL_TEST_DIR}/TEST-torch_model_tests.html --self-contained-html -v
python3 -m pytest ${MODEL_HUB_TESTS_INSTALL_DIR}/torch_tests -m ${TYPE} --html=${INSTALL_TEST_DIR}/TEST-torch_model_tests.html --self-contained-html -v
env:
TYPE: ${{ github.event_name == 'schedule' && 'nightly' || 'precommit'}}
TEST_DEVICE: CPU
Expand Down
29 changes: 14 additions & 15 deletions tests/layer_tests/pytorch_tests/test_adaptive_avg_pool.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,13 +8,12 @@
from pytorch_layer_test_class import PytorchLayerTest


@pytest.mark.parametrize('input_tensor', (np.random.randn(1, 2, 8, 9, 10).astype(np.float32),
np.random.randn(2, 8, 9, 10).astype(np.float32)))
@pytest.mark.parametrize('output_size', ([5, 7, 9], 7))
@pytest.mark.parametrize('input_tensor', [[1, 2, 8, 9, 10], [2, 8, 9, 10]])
@pytest.mark.parametrize('output_size', [[5, 7, 9], 7])
class TestAdaptiveAvgPool3D(PytorchLayerTest):

def _prepare_input(self):
return (self.input_tensor,)
return (self.input_tensor, )

def create_model(self, output_size):
class aten_adaptive_avg_pool3d(torch.nn.Module):
Expand All @@ -35,16 +34,16 @@ def forward(self, input_tensor):
@pytest.mark.precommit_ts_backend
@pytest.mark.precommit_fx_backend
def test_adaptive_avg_pool3d(self, ie_device, precision, ir_version, input_tensor, output_size):
self.input_tensor = input_tensor
self.input_tensor = np.random.randn(*input_tensor).astype(np.float32)
self._test(*self.create_model(output_size), ie_device, precision, ir_version)


@pytest.mark.parametrize('input_tensor', [np.random.randn(2, 8, 9, 10).astype(np.float32), np.random.randn(8, 9, 10).astype(np.float32)])
@pytest.mark.parametrize('output_size', ([7, 9], 7))
@pytest.mark.parametrize('input_shape', [[2, 8, 9, 10], [8, 9, 10]])
@pytest.mark.parametrize('output_size', [[7, 9], 7])
class TestAdaptiveAvgPool2D(PytorchLayerTest):

def _prepare_input(self):
return (self.input_tensor,)
return (self.input_tensor, )

def create_model(self, output_size):
class aten_adaptive_avg_pool2d(torch.nn.Module):
Expand All @@ -64,17 +63,17 @@ def forward(self, input_tensor):
@pytest.mark.precommit
@pytest.mark.precommit_ts_backend
@pytest.mark.precommit_fx_backend
def test_adaptive_avg_pool2d(self, ie_device, precision, ir_version, input_tensor, output_size):
self.input_tensor = input_tensor
def test_adaptive_avg_pool2d(self, ie_device, precision, ir_version, input_shape, output_size):
self.input_tensor = np.random.randn(*input_shape).astype(np.float32)
self._test(*self.create_model(output_size), ie_device, precision, ir_version)


@pytest.mark.parametrize('input_tensor', [np.random.randn(8, 9, 10).astype(np.float32), np.random.randn(9, 10).astype(np.float32)] )
@pytest.mark.parametrize('output_size', ( 7, ))
@pytest.mark.parametrize('input_shape', [[8, 9, 10], [9, 10]])
@pytest.mark.parametrize('output_size', [7, ])
class TestAdaptiveAvgPool1D(PytorchLayerTest):

def _prepare_input(self):
return (self.input_tensor,)
return (self.input_tensor, )

def create_model(self, output_size):
class aten_adaptive_avg_pool1d(torch.nn.Module):
Expand All @@ -94,8 +93,8 @@ def forward(self, input_tensor):
@pytest.mark.precommit
@pytest.mark.precommit_ts_backend
@pytest.mark.precommit_fx_backend
def test_adaptive_avg_pool1d(self, ie_device, precision, ir_version, input_tensor, output_size):
self.input_tensor = input_tensor
def test_adaptive_avg_pool1d(self, ie_device, precision, ir_version, input_shape, output_size):
self.input_tensor = np.random.randn(*input_shape).astype(np.float32)
self._test(*self.create_model(output_size), ie_device, precision, ir_version)


38 changes: 17 additions & 21 deletions tests/layer_tests/pytorch_tests/test_adaptive_max_pool.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,11 +34,9 @@ def forward(self, input_tensor):

return aten_adaptive_max_pool3d(output_size, return_indices), ref_net, "aten::adaptive_max_pool3d"

@pytest.mark.parametrize('input_tensor', ([
np.random.randn(2, 1, 1, 4, 4).astype(np.float32),
np.random.randn(4, 1, 3, 32, 32).astype(np.float32),
np.random.randn(1, 3, 32, 32).astype(np.float32)
]))
@pytest.mark.parametrize('input_shape', [[2, 1, 1, 4, 4],
[4, 1, 3, 32, 32],
[1, 3, 32, 32]])
@pytest.mark.parametrize('output_size', ([
[2, 2, 2],
[4, 4, 4],
Expand All @@ -53,8 +51,8 @@ def forward(self, input_tensor):
@pytest.mark.precommit_fx_backend
@pytest.mark.xfail(condition=platform.system() == 'Darwin' and platform.machine() == 'arm64',
reason='Ticket - 122715')
def test_adaptive_max_pool3d(self, ie_device, precision, ir_version, input_tensor, output_size, return_indices):
self.input_tensor = input_tensor
def test_adaptive_max_pool3d(self, ie_device, precision, ir_version, input_shape, output_size, return_indices):
self.input_tensor = np.random.randn(*input_shape).astype(np.float32)
self._test(*self.create_model(output_size, return_indices), ie_device, precision, ir_version)


Expand All @@ -81,11 +79,9 @@ def forward(self, input_tensor):

return aten_adaptive_max_pool2d(output_size, return_indices), ref_net, "aten::adaptive_max_pool2d"

@pytest.mark.parametrize('input_tensor', ([
np.random.randn(2, 1, 4, 4).astype(np.float32),
np.random.randn(1, 3, 32, 32).astype(np.float32),
np.random.randn(3, 32, 32).astype(np.float32)
]))
@pytest.mark.parametrize('input_shape', [[2, 1, 4, 4],
[1, 3, 32, 32],
[3, 32, 32]])
@pytest.mark.parametrize('output_size', ([
[2, 2],
[4, 4],
Expand All @@ -100,8 +96,8 @@ def forward(self, input_tensor):
@pytest.mark.precommit_fx_backend
@pytest.mark.xfail(condition=platform.system() == 'Darwin' and platform.machine() == 'arm64',
reason='Ticket - 122715')
def test_adaptive_max_pool2d(self, ie_device, precision, ir_version, input_tensor, output_size, return_indices):
self.input_tensor = input_tensor
def test_adaptive_max_pool2d(self, ie_device, precision, ir_version, input_shape, output_size, return_indices):
self.input_tensor = np.random.randn(*input_shape).astype(np.float32)
self._test(*self.create_model(output_size, return_indices), ie_device, precision, ir_version)


Expand All @@ -128,11 +124,11 @@ def forward(self, input_tensor):

return aten_adaptive_max_pool1d(output_size, return_indices), ref_net, "aten::adaptive_max_pool1d"

@pytest.mark.parametrize('input_tensor', ([
np.random.randn(1, 4, 4).astype(np.float32),
np.random.randn(3, 32, 32).astype(np.float32),
np.random.randn(16, 8).astype(np.float32),
]))
@pytest.mark.parametrize('input_shape', [
[1, 4, 4],
[3, 32, 32],
[16, 8]
])
@pytest.mark.parametrize('output_size', ([
2,
4,
Expand All @@ -147,6 +143,6 @@ def forward(self, input_tensor):
@pytest.mark.precommit_fx_backend
@pytest.mark.xfail(condition=platform.system() == 'Darwin' and platform.machine() == 'arm64',
reason='Ticket - 122715')
def test_adaptive_max_pool1d(self, ie_device, precision, ir_version, input_tensor, output_size, return_indices):
self.input_tensor = input_tensor
def test_adaptive_max_pool1d(self, ie_device, precision, ir_version, input_shape, output_size, return_indices):
self.input_tensor = np.random.randn(*input_shape).astype(np.float32)
self._test(*self.create_model(output_size, return_indices), ie_device, precision, ir_version)
13 changes: 7 additions & 6 deletions tests/layer_tests/pytorch_tests/test_add.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,10 +9,11 @@


@pytest.mark.parametrize('alpha', (-0.5, 0, 0.5, 1, 2))
@pytest.mark.parametrize('input_rhs', (np.random.randn(2, 5, 3, 4).astype(np.float32),
np.random.randn(
1, 5, 3, 4).astype(np.float32),
np.random.randn(1).astype(np.float32)))
@pytest.mark.parametrize('input_shape_rhs', [
[2, 5, 3, 4],
[1, 5, 3, 4],
[1]
])
class TestAdd(PytorchLayerTest):

def _prepare_input(self):
Expand Down Expand Up @@ -41,8 +42,8 @@ def forward2(self, lhs, rhs):
@pytest.mark.precommit_ts_backend
@pytest.mark.precommit_fx_backend
@pytest.mark.parametrize("op_type", ["add", "add_"])
def test_add(self, ie_device, precision, ir_version, alpha, input_rhs, op_type):
self.input_rhs = input_rhs
def test_add(self, ie_device, precision, ir_version, alpha, input_shape_rhs, op_type):
self.input_rhs = np.random.randn(*input_shape_rhs).astype(np.float32)
self._test(*self.create_model(alpha, op_type), ie_device, precision, ir_version, use_convert_model=True)


Expand Down
62 changes: 34 additions & 28 deletions tests/layer_tests/pytorch_tests/test_all.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,10 +24,10 @@ def __init__(self, dim, keepdim) -> None:

def forward(self, input_tensor):
return torch.all(
input_tensor,
input_tensor,
dim = self.dim
) if self.keepdim is None else torch.all(
input_tensor,
input_tensor,
dim = self.dim,
keepdim = self.keepdim
)
Expand All @@ -36,32 +36,35 @@ class TestAll(PytorchLayerTest):
def _prepare_input(self):
return (self.input_tensor,)

@pytest.mark.parametrize("input_tensor", [
np.eye(5,5),
np.zeros((5, 5)),
np.zeros((9,8)) + 1,
np.random.randint(0, 2, (5, 9, 7)),
np.random.randint(0, 2, (10, 13, 11)),
np.random.randint(0, 2, (8, 7, 6, 5, 4)),
np.random.randint(0, 2, (11, 11), dtype=np.uint8),
np.random.randint(0, 2, (7, 7), dtype=np.uint8),
@pytest.mark.parametrize("input_shape, d_type", [
(np.eye(5,5), np.int64),
(np.zeros((5, 5)), np.int64),
(np.zeros((9,8)) + 1, np.int64),
([5, 9, 7], np.int64),
([10, 13, 11], np.int64),
([8, 7, 6, 5, 4], np.int64),
([11, 11], np.uint8),
([7, 7], np.uint8)
])
@pytest.mark.nightly
@pytest.mark.precommit
def test_all_noparams(self, input_tensor, ie_device, precision, ir_version):
self.input_tensor = input_tensor
self._test(aten_all_noparam(), None, "aten::all",
def test_all_noparams(self, input_shape, d_type, ie_device, precision, ir_version):
if type(input_shape) is list:
self.input_tensor = np.random.randint(0, 2, input_shape, dtype=d_type)
else:
self.input_tensor = input_shape
self._test(aten_all_noparam(), None, "aten::all",
ie_device, precision, ir_version, trace_model=True, freeze_model=False)
@pytest.mark.parametrize("input_tensor", [
np.eye(5,5),
np.zeros((5, 5)),
np.zeros((9,8)) + 1,
np.random.randint(0, 2, (5, 9, 7)),
np.random.randint(0, 2, (10, 13, 11)),
np.random.randint(0, 2, (8, 7, 6, 5, 4)),
np.random.randint(0, 2, (11, 11), dtype=np.uint8),
np.random.randint(0, 2, (7, 7), dtype=np.uint8),

@pytest.mark.parametrize("input_shape, d_type", [
(np.eye(5,5), np.int64),
(np.zeros((5, 5)), np.int64),
(np.zeros((9,8)) + 1, np.int64),
([5, 9, 7], np.int64),
([10, 13, 11], np.int64),
([8, 7, 6, 5, 4], np.int64),
([11, 11], np.uint8),
([7, 7], np.uint8)
])
@pytest.mark.parametrize("keepdim", [
True,
Expand All @@ -72,8 +75,11 @@ def test_all_noparams(self, input_tensor, ie_device, precision, ir_version):
@pytest.mark.precommit
@pytest.mark.xfail(condition=platform.system() == 'Darwin' and platform.machine() == 'arm64',
reason='Ticket - 122715')
def test_all(self, input_tensor, keepdim, ie_device, precision, ir_version):
self.input_tensor = input_tensor
for dim in range(len(input_tensor.shape)):
self._test(aten_all(dim, keepdim), None, "aten::all",
def test_all(self, input_shape, d_type, keepdim, ie_device, precision, ir_version):
if type(input_shape) is list:
self.input_tensor = np.random.randint(0, 2, input_shape, dtype=d_type)
else:
self.input_tensor = input_shape
for dim in range(len(self.input_tensor.shape)):
self._test(aten_all(dim, keepdim), None, "aten::all",
ie_device, precision, ir_version, trace_model=True, freeze_model=False)
44 changes: 24 additions & 20 deletions tests/layer_tests/pytorch_tests/test_argsort.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,24 +22,24 @@ def __init__(self, dim, descending, stable) -> None:

def forward(self, input_tensor):
if self.stable is not None:
return torch.argsort(input_tensor,
dim = self.dim,
descending = self.descending,
return torch.argsort(input_tensor,
dim = self.dim,
descending = self.descending,
stable = self.stable
)
else:
return torch.argsort(input_tensor,
dim = self.dim,
return torch.argsort(input_tensor,
dim = self.dim,
descending = self.descending
)
)
ref_net = None

return aten_argsort(dim, descending, stable), ref_net, "aten::argsort"

@pytest.mark.parametrize("tensor_stable_pair", [
(np.random.rand(1, 4), False),
(np.random.rand(4, 4), False),
(np.random.rand(4, 4, 4), False),
([1, 4], False),
([4, 4], False),
([4, 4, 4], False),
(np.array([1, 2, 4, 6, 5, 8, 7]), False),
(np.array([6, 5, 4, 2, 3, 0, 1]), False),
(np.array([1, 1, 1, 2, 1, 3, 1, 4, 2, 5, 1, 2, 4, 4, 0]), True),
Expand All @@ -49,20 +49,20 @@ def forward(self, input_tensor):
(np.array([[9, 8, 8], [8, 7, 7], [7, 5, 6],
[8, 8, 9], [7, 7, 8], [6, 5, 7],
[8, 9, 8], [7, 8, 7], [5, 6, 7]]), True),
(np.array([[[1, 2, 3], [4, 5, 6], [7, 8, 9]],
[[5, 2, 4], [4, 9, 0], [7, 7, 9]],
(np.array([[[1, 2, 3], [4, 5, 6], [7, 8, 9]],
[[5, 2, 4], [4, 9, 0], [7, 7, 9]],
[[5, 2, 4], [4, 9, 0], [7, 7, 9]]]), True),
(np.array([[[3, 2, 2], [1, 2, 1], [3, 2, 2]],
[[1, 2, 1], [4, 3, 4], [3, 2, 2]],
(np.array([[[3, 2, 2], [1, 2, 1], [3, 2, 2]],
[[1, 2, 1], [4, 3, 4], [3, 2, 2]],
[[3, 2, 2], [1, 2, 1], [7, 9, 9]]]), True),
(np.array([[[2, 1, 3], [3, 2, 1], [1, 2, 3]],
[[2, 0, 2], [1, 2, 1], [3, 2, 8]],
(np.array([[[2, 1, 3], [3, 2, 1], [1, 2, 3]],
[[2, 0, 2], [1, 2, 1], [3, 2, 8]],
[[3, 2, 2], [3, 2, 1], [1, 2, 3]],
[[2, 1, 3], [3, 2, 1], [1, 2, 3]],
[[2, 0, 2], [1, 2, 1], [3, 2, 8]],
[[2, 1, 3], [3, 2, 1], [1, 2, 3]],
[[2, 0, 2], [1, 2, 1], [3, 2, 8]],
[[3, 2, 2], [3, 2, 1], [1, 2, 3]],
[[2, 1, 3], [3, 2, 1], [1, 2, 3]],
[[2, 0, 2], [1, 2, 1], [3, 2, 8]],
[[2, 1, 3], [3, 2, 1], [1, 2, 3]],
[[2, 0, 2], [1, 2, 1], [3, 2, 8]],
[[3, 2, 2], [3, 2, 1], [1, 2, 3]]]), True)
])
@pytest.mark.parametrize("descending", [
Expand All @@ -72,7 +72,11 @@ def forward(self, input_tensor):
@pytest.mark.nightly
@pytest.mark.precommit
def test_argsort(self, tensor_stable_pair, descending, ie_device, precision, ir_version):
self.input_tensor, stable = tensor_stable_pair
input_shape, stable = tensor_stable_pair
if type(input_shape) is list:
self.input_tensor = np.random.randn(*input_shape).astype(np.float32)
else:
self.input_tensor = input_shape
dims = len(self.input_tensor.shape)
for dim in range(-dims, dims):
stable_values = [True] if stable else [True, False, None]
Expand Down
Loading

0 comments on commit 8e6e470

Please sign in to comment.