Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Enable ngraph python tests in OpenVINO-ONNX CI #1603

56 changes: 56 additions & 0 deletions ngraph/python/tests/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************
import pytest

# test.BACKEND_NAME is a configuration variable determining which
# nGraph backend tests will use. It's set during pytest configuration time.
Expand All @@ -24,3 +25,58 @@
# configuration time. See `pytest_configure` hook in `conftest.py` for more
# details.
ADDITIONAL_MODELS_DIR = None


def xfail_test(reason="Mark the test as expected to fail", strict=True):
return pytest.mark.xfail(reason=reason, strict=strict)


skip_segfault = pytest.mark.skip(reason="Segmentation fault error")
xfail_issue_35893 = xfail_test(reason="ValueError: could not broadcast input array")
xfail_issue_35911 = xfail_test(reason="Assertion error: Pad model mismatch error")
xfail_issue_35912 = xfail_test(reason="RuntimeError: Error of validate layer: B with type: "
"Pad. Cannot parse parameter pads_end from IR for layer B. "
"Value -1,0 cannot be casted to int.")
xfail_issue_35914 = xfail_test(reason="IndexError: too many indices for array: "
"array is 0-dimensional, but 1 were indexed")
xfail_issue_35915 = xfail_test(reason="RuntimeError: Eltwise node with unsupported combination "
"of input and output types")
xfail_issue_35916 = xfail_test(reason="RuntimeError: Unsupported input dims count for layer Z")
xfail_issue_35917 = xfail_test(reason="RuntimeError: Unsupported input dims count for "
"layer MatMul")
xfail_issue_35918 = xfail_test(reason="onnx.onnx_cpp2py_export.checker.ValidationError: "
"Mismatched attribute type in 'test_node : alpha'")
xfail_issue_35921 = xfail_test(reason="ValueError - shapes mismatch in gemm")

xfail_issue_35923 = xfail_test(reason="RuntimeError: PReLU without weights is not supported")
xfail_issue_35924 = xfail_test(reason="Assertion error - elu results mismatch")
xfail_issue_35925 = xfail_test(reason="Assertion error - reduction ops results mismatch")
xfail_issue_35926 = xfail_test(reason="RuntimeError: [NOT_IMPLEMENTED] Input image format I64 is "
"not supported yet...")
xfail_issue_35927 = xfail_test(reason="RuntimeError: B has zero dimension that is not allowable")
xfail_issue_35929 = xfail_test(reason="CRuntimeError: Incorrect precision f64!")
xfail_issue_34323 = xfail_test(reason="RuntimeError: data [value] doesn't exist")
xfail_issue_35930 = xfail_test(reason="onnx.onnx_cpp2py_export.checker.ValidationError: "
"Required attribute 'to' is missing.")
xfail_issue_35932 = xfail_test(reason="Assertion error - logsoftmax results mismatch")
xfail_issue_36437 = xfail_test(reason="RuntimeError: Cannot find blob with name: y")
xfail_issue_36476 = xfail_test(reason="RuntimeError: [NOT_IMPLEMENTED] Input image format U32 is "
"not supported yet...")
xfail_issue_36478 = xfail_test(reason="RuntimeError: [NOT_IMPLEMENTED] Input image format U64 is "
"not supported yet...")
xfail_issue_36479 = xfail_test(reason="Assertion error - basic computation on ndarrays mismatch")
xfail_issue_36480 = xfail_test(reason="RuntimeError: [NOT_FOUND] Unsupported property dummy_option "
"by CPU plugin")
xfail_issue_36481 = xfail_test(reason="TypeError: _get_node_factory() takes from 0 to 1 positional "
"arguments but 2 were given")
xfail_issue_36483 = xfail_test(reason="RuntimeError: Unsupported primitive of type: "
"Ceiling name: Ceiling_22669")
xfail_issue_34327 = xfail_test(reason="RuntimeError: '<value>' layer has different "
"IN and OUT channels number")
xfail_issue_36485 = xfail_test(reason="RuntimeError: Check 'm_group >= 1' failed at "
"/openvino/ngraph/src/ngraph/op/fused/shuffle_channels.cpp:77:")
xfail_issue_36486 = xfail_test(reason="RuntimeError: HardSigmoid operation should be converted "
"to HardSigmoid_IE")
xfail_issue_34314 = xfail_test(reason="RuntimeError: RNNCell operation has a form that is not "
"supported.RNNCell_21204 should be converted to RNNCellIE operation")
xfail_issue_36487 = xfail_test(reason="Assertion error - mvn operator computation mismatch")
4 changes: 4 additions & 0 deletions ngraph/python/tests/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -73,3 +73,7 @@ def pytest_collection_modifyitems(config, items):
skip_this_backend = keywords[backend_name]
if skip_this_backend in item.keywords:
item.add_marker(skip_markers[backend_name])

if item.name.startswith(("test_reduce_operation[None-False",
rblaczkowski marked this conversation as resolved.
Show resolved Hide resolved
"test_reduce_operation[axes7-False")):
item.add_marker(tests.xfail_issue_35925)
37 changes: 26 additions & 11 deletions ngraph/python/tests/test_ngraph/test_basic.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,13 @@
from ngraph.impl import Function, PartialShape, Shape
from tests.runtime import get_runtime
from tests.test_ngraph.util import run_op_node
from tests import (xfail_issue_34323,
xfail_issue_35929,
xfail_issue_35926,
xfail_issue_36476,
xfail_issue_36478,
xfail_issue_36479,
xfail_issue_36480)


def test_ngraph_function_api():
Expand Down Expand Up @@ -53,15 +60,15 @@ def test_ngraph_function_api():
"dtype",
[
np.float32,
np.float64,
np.int8,
pytest.param(np.float64, marks=xfail_issue_35929),
pytest.param(np.int8, marks=xfail_issue_36479),
np.int16,
np.int32,
np.int64,
np.uint8,
np.uint16,
np.uint32,
np.uint64,
pytest.param(np.int64, marks=xfail_issue_35926),
pytest.param(np.uint8, marks=xfail_issue_36479),
pytest.param(np.uint16, marks=xfail_issue_36479),
pytest.param(np.uint32, marks=xfail_issue_36476),
pytest.param(np.uint64, marks=xfail_issue_36478),
],
)
def test_simple_computation_on_ndarrays(dtype):
Expand Down Expand Up @@ -107,6 +114,7 @@ def test_serialization():
pass


@xfail_issue_34323
def test_broadcast_1():
input_data = np.array([1, 2, 3])

Expand All @@ -116,6 +124,7 @@ def test_broadcast_1():
assert np.allclose(result, expected)


@xfail_issue_34323
def test_broadcast_2():
input_data = np.arange(4)
new_shape = [3, 4, 2, 4]
Expand All @@ -124,6 +133,7 @@ def test_broadcast_2():
assert np.allclose(result, expected)


@xfail_issue_34323
def test_broadcast_3():
input_data = np.array([1, 2, 3])
new_shape = [3, 3]
Expand All @@ -134,6 +144,7 @@ def test_broadcast_3():
assert np.allclose(result, expected)


@xfail_issue_34323
@pytest.mark.parametrize(
"destination_type, input_data",
[(bool, np.zeros((2, 2), dtype=int)), ("boolean", np.zeros((2, 2), dtype=int))],
Expand All @@ -148,10 +159,10 @@ def test_convert_to_bool(destination_type, input_data):
@pytest.mark.parametrize(
"destination_type, rand_range, in_dtype, expected_type",
[
(np.float32, (-8, 8), np.int32, np.float32),
(np.float64, (-16383, 16383), np.int64, np.float64),
("f32", (-8, 8), np.int32, np.float32),
("f64", (-16383, 16383), np.int64, np.float64),
pytest.param(np.float32, (-8, 8), np.int32, np.float32, marks=xfail_issue_34323),
pytest.param(np.float64, (-16383, 16383), np.int64, np.float64, marks=xfail_issue_35929),
pytest.param("f32", (-8, 8), np.int32, np.float32, marks=xfail_issue_34323),
pytest.param("f64", (-16383, 16383), np.int64, np.float64, marks=xfail_issue_35929),
],
)
def test_convert_to_float(destination_type, rand_range, in_dtype, expected_type):
Expand All @@ -163,6 +174,7 @@ def test_convert_to_float(destination_type, rand_range, in_dtype, expected_type)
assert np.array(result).dtype == expected_type


@xfail_issue_34323
@pytest.mark.parametrize(
"destination_type, expected_type",
[
Expand All @@ -185,6 +197,7 @@ def test_convert_to_int(destination_type, expected_type):
assert np.array(result).dtype == expected_type


@xfail_issue_34323
@pytest.mark.parametrize(
"destination_type, expected_type",
[
Expand Down Expand Up @@ -262,13 +275,15 @@ def test_constant_get_data_unsigned_integer(data_type):
assert np.allclose(input_data, retrieved_data)


@xfail_issue_36480
def test_backend_config():
dummy_config = {"dummy_option": "dummy_value"}
# Expect no throw
runtime = get_runtime()
runtime.set_config(dummy_config)


@xfail_issue_34323
def test_result():
node = [[11, 10], [1, 8], [3, 4]]
result = run_op_node([node], ng.result)
Expand Down
3 changes: 3 additions & 0 deletions ngraph/python/tests/test_ngraph/test_convolution.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,8 +20,10 @@
from tests.runtime import get_runtime
from tests.test_ngraph.test_ops import convolution2d
from tests.test_ngraph.util import run_op_node
from tests import xfail_issue_34323


@xfail_issue_34323
def test_convolution_2d():

# input_x should have shape N(batch) x C x H x W
Expand Down Expand Up @@ -212,6 +214,7 @@ def test_convolution_backprop_data():
)


@xfail_issue_34323
def test_convolution_v1():
input_tensor = np.arange(-128, 128, 1, dtype=np.float32).reshape(1, 1, 16, 16)
filters = np.ones(9, dtype=np.float32).reshape(1, 1, 3, 3)
Expand Down
3 changes: 3 additions & 0 deletions ngraph/python/tests/test_ngraph/test_data_movement.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@
import ngraph as ng
from tests.runtime import get_runtime
from tests.test_ngraph.util import run_op_node
from tests import xfail_issue_35926, xfail_issue_34323


def test_reverse_sequence():
Expand Down Expand Up @@ -165,6 +166,7 @@ def test_pad_edge():
assert np.allclose(result, expected)


@xfail_issue_35926
def test_pad_constant():
input_data = np.arange(1, 13).reshape([3, 4])
pads_begin = np.array([0, 1], dtype=np.int32)
Expand All @@ -189,6 +191,7 @@ def test_pad_constant():
assert np.allclose(result, expected)


@xfail_issue_34323
def test_select():
cond = [[False, False], [True, False], [True, True]]
then_node = [[-1, 0], [1, 2], [3, 4]]
Expand Down
4 changes: 4 additions & 0 deletions ngraph/python/tests/test_ngraph/test_normalization.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,8 +19,10 @@
import ngraph as ng
from tests.runtime import get_runtime
from tests.test_ngraph.util import run_op_node
from tests import xfail_issue_34323, xfail_issue_35929


@xfail_issue_34323
def test_lrn():
input_image_shape = (2, 3, 2, 1)
input_image = np.arange(int(np.prod(input_image_shape))).reshape(input_image_shape).astype("f")
Expand Down Expand Up @@ -56,6 +58,7 @@ def test_lrn():
)


@xfail_issue_34323
def test_lrn_factory():
alpha = 0.0002
beta = 0.5
Expand Down Expand Up @@ -101,6 +104,7 @@ def test_lrn_factory():
assert np.allclose(result, excepted)


@xfail_issue_35929
def test_batch_norm_inference():
data = [[1.0, 2.0, 3.0], [-1.0, -2.0, -3.0]]
gamma = [2.0, 3.0, 4.0]
Expand Down
3 changes: 3 additions & 0 deletions ngraph/python/tests/test_ngraph/test_ops.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@
from ngraph.impl import AxisSet, Function, Shape, Type
from ngraph.impl.op import Constant, Parameter
from tests.runtime import get_runtime
from tests import xfail_issue_36483, xfail_issue_34323


def binary_op(op_str, a, b):
Expand Down Expand Up @@ -339,6 +340,7 @@ def test_atan():
unary_op_exec(op_str, input_list)


@xfail_issue_36483
def test_ceiling():
input_list = [0.5, 0, 0.4, 0.5]
op_str = "Ceiling"
Expand Down Expand Up @@ -450,6 +452,7 @@ def test_broadcast():
assert np.allclose(result, expected)


@xfail_issue_34323
def test_constant():
element_type = Type.f32
parameter_list = []
Expand Down
3 changes: 3 additions & 0 deletions ngraph/python/tests/test_ngraph/test_ops_binary.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@
import ngraph as ng
from tests.runtime import get_runtime
from tests.test_ngraph.util import run_op_node
from tests import xfail_issue_34323


@pytest.mark.parametrize(
Expand Down Expand Up @@ -201,6 +202,7 @@ def test_binary_operators_with_scalar(operator, numpy_function):
assert np.allclose(result, expected)


@xfail_issue_34323
def test_multiply():
A = np.arange(48).reshape((8, 1, 6, 1))
B = np.arange(35).reshape((7, 1, 5))
Expand All @@ -211,6 +213,7 @@ def test_multiply():
assert np.allclose(result, expected)


@xfail_issue_34323
def test_power_v1():
A = np.arange(48, dtype=np.float32).reshape((8, 1, 6, 1))
B = np.arange(20, dtype=np.float32).reshape((4, 1, 5))
Expand Down
Loading