From df79c8396a7c318107fb51874c31016a684c6d6b Mon Sep 17 00:00:00 2001 From: eaidova Date: Tue, 29 Nov 2022 06:07:30 -0500 Subject: [PATCH] add tests --- tests/layer_tests/pytorch_tests/test_addmm.py | 45 +++++++++++ tests/layer_tests/pytorch_tests/test_mm.py | 79 +++++++++++++++++++ 2 files changed, 124 insertions(+) create mode 100644 tests/layer_tests/pytorch_tests/test_addmm.py create mode 100644 tests/layer_tests/pytorch_tests/test_mm.py diff --git a/tests/layer_tests/pytorch_tests/test_addmm.py b/tests/layer_tests/pytorch_tests/test_addmm.py new file mode 100644 index 00000000000000..9fe8012c31307b --- /dev/null +++ b/tests/layer_tests/pytorch_tests/test_addmm.py @@ -0,0 +1,45 @@ +# Copyright (C) 2018-2022 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import pytest +from pytorch_layer_test_class import PytorchLayerTest + + +class TestAddMM(PytorchLayerTest): + def _prepare_input(self, input_shape=(2,2), matrix1_shape=(2, 2), matrix2_shape=(2, 2)): + import numpy as np + return ( + np.random.randn(*input_shape).astype(np.float32), + np.random.randn(*matrix1_shape).astype(np.float32), + np.random.randn(*matrix2_shape).astype(np.float32) + ) + + def create_model(self, alpha, beta): + + import torch + + class aten_addmm(torch.nn.Module): + def __init__(self, alpha, beta): + super(aten_addmm, self).__init__() + self.alpha = alpha + self.beta = beta + + def forward(self, m0, m1, m2): + return torch.addmm(m0, m1, m2, alpha=self.alpha, beta=self.beta) + + ref_net = None + + return aten_addmm(alpha, beta), ref_net, 'aten::addmm' + + @pytest.mark.parametrize("kwargs_to_prepare_input", [ + {"input_shape": (3, 3), 'matrix1_shape': (3, 3), 'matrix2_shape': (3, 3)}, + {"input_shape": (2, 2), 'matrix1_shape': (2, 3), 'matrix2_shape': (3, 2)}, + {"input_shape": (10, 1), 'matrix1_shape': (10, 5), 'matrix2_shape': (5, 1)}, + {"input_shape": (1, 2), 'matrix1_shape': (1, 10), 'matrix2_shape': (10, 2)}, + {"input_shape": (1, 1), 'matrix1_shape': (1, 10), 'matrix2_shape': (10, 1)}, + + ]) + @pytest.mark.parametrize("alpha,beta", [(1., 1.), (0., 1.), (1., 0.), (1., 2.), (2., 1.), (-5., -6.), (3., 4.), (0.5, 0.75)]) + @pytest.mark.nightly + def test_addmm(self, kwargs_to_prepare_input, alpha, beta, ie_device, precision, ir_version): + self._test(*self.create_model(alpha, beta), ie_device, precision, ir_version, kwargs_to_prepare_input=kwargs_to_prepare_input) \ No newline at end of file diff --git a/tests/layer_tests/pytorch_tests/test_mm.py b/tests/layer_tests/pytorch_tests/test_mm.py new file mode 100644 index 00000000000000..10e62797ae0a5f --- /dev/null +++ b/tests/layer_tests/pytorch_tests/test_mm.py @@ -0,0 +1,79 @@ +# Copyright (C) 2018-2022 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import pytest +from pytorch_layer_test_class import PytorchLayerTest + + +class TestMatMul(PytorchLayerTest): + def _prepare_input(self, matrix1_shape=(2, 2), matrix2_shape=(2, 2)): + import numpy as np + return (np.random.randn(*matrix1_shape).astype(np.float32), np.random.randn(*matrix2_shape).astype(np.float32)) + + def create_model(self, op_type="aten::mm"): + + import torch + ops = { + "aten::mm": torch.mm, + "aten::bmm": torch.bmm, + "aten::matmul": torch.matmul + } + + class aten_mm(torch.nn.Module): + def __init__(self, op): + super(aten_mm, self).__init__() + self.op = op + + def forward(self, m1, m2): + return self.op(m1, m2) + ref_net = None + + return aten_mm(ops[op_type]), ref_net, op_type + + @pytest.mark.parametrize("kwargs_to_prepare_input", [ + {'matrix1_shape': (3, 3), 'matrix2_shape': (3, 3)}, + {'matrix1_shape': (2, 3), 'matrix2_shape': (3, 2)}, + {'matrix1_shape': (10, 5), 'matrix2_shape': (5, 1)}, + {'matrix1_shape': (1, 10), 'matrix2_shape': (10, 2)}, + {'matrix1_shape': (1, 10), 'matrix2_shape': (10, 1)}, + + ]) + @pytest.mark.nightly + def test_mm(self, kwargs_to_prepare_input, ie_device, precision, ir_version): + self._test(*self.create_model('aten::mm'), ie_device, precision, ir_version, kwargs_to_prepare_input=kwargs_to_prepare_input) + + @pytest.mark.parametrize("kwargs_to_prepare_input", [ + {'matrix1_shape': (10, 3, 3), 'matrix2_shape': (10, 3, 3)}, + {'matrix1_shape': (1, 2, 3), 'matrix2_shape': (1, 3, 2)}, + {'matrix1_shape': (2, 10, 5), 'matrix2_shape': (2, 5, 1)}, + {'matrix1_shape': (3, 1, 10), 'matrix2_shape': (3, 10, 2)}, + {'matrix1_shape': (4, 1, 10), 'matrix2_shape': (4, 10, 1)}, + + ]) + @pytest.mark.nightly + def test_bmm(self, kwargs_to_prepare_input, ie_device, precision, ir_version): + self._test(*self.create_model('aten::bmm'), ie_device, precision, ir_version, kwargs_to_prepare_input=kwargs_to_prepare_input) + + @pytest.mark.parametrize("kwargs_to_prepare_input", [ + {'matrix1_shape': (10, 3, 3), 'matrix2_shape': (10, 3, 3)}, + {'matrix1_shape': (1, 2, 3), 'matrix2_shape': (1, 3, 2)}, + {'matrix1_shape': (2, 10, 5), 'matrix2_shape': (2, 5, 1)}, + {'matrix1_shape': (3, 1, 10), 'matrix2_shape': (3, 10, 2)}, + {'matrix1_shape': (4, 1, 10), 'matrix2_shape': (4, 10, 1)}, + {'matrix1_shape': (3, 3), 'matrix2_shape': (3, 3)}, + {'matrix1_shape': (2, 3), 'matrix2_shape': (3, 2)}, + {'matrix1_shape': (10, 5), 'matrix2_shape': (5, 1)}, + {'matrix1_shape': (1, 10), 'matrix2_shape': (10, 2)}, + {'matrix1_shape': (1, 10), 'matrix2_shape': (10, 1)}, + {'matrix1_shape': (10, 3, 3), 'matrix2_shape': (3, 3)}, + {'matrix1_shape': (2, 3), 'matrix2_shape': (10, 3, 2)}, + {'matrix1_shape': (1, 10, 5), 'matrix2_shape': (5, 1)}, + {'matrix1_shape': (5, 1, 10), 'matrix2_shape': (10, 2)}, + {'matrix1_shape': (1, 10), 'matrix2_shape': (4, 10, 2)}, + {'matrix1_shape': (2, 1, 10), 'matrix2_shape': (10, 1)}, + {'matrix1_shape': (1, 10), 'matrix2_shape': (2, 10, 1)}, + + ]) + @pytest.mark.nightly + def test_matmul(self, kwargs_to_prepare_input, ie_device, precision, ir_version): + self._test(*self.create_model('aten::matmul'), ie_device, precision, ir_version, kwargs_to_prepare_input=kwargs_to_prepare_input)