Skip to content

Commit

Permalink
Update ONNX Python tests (#1514)
Browse files Browse the repository at this point in the history
  • Loading branch information
rblaczkowski authored Aug 4, 2020
1 parent 19f798b commit d14d09e
Show file tree
Hide file tree
Showing 12 changed files with 223 additions and 304 deletions.
3 changes: 2 additions & 1 deletion ngraph/python/tests/test_onnx/test_ops_batchnorm.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@
import numpy as np
import onnx

from tests.test_onnx.utils import run_node
from tests.test_onnx.utils import run_node, xfail_issue_35893


def make_batch_norm_node(**node_attributes):
Expand All @@ -26,6 +26,7 @@ def make_batch_norm_node(**node_attributes):
)


@xfail_issue_35893
def test_batch_norm_test_node():
data = np.arange(48).reshape((1, 3, 4, 4)).astype(np.float32)
scale = np.ones((3,)).astype(np.float32) # Gamma
Expand Down
10 changes: 5 additions & 5 deletions ngraph/python/tests/test_onnx/test_ops_binary.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@
import pytest
from onnx.helper import make_graph, make_model, make_tensor_value_info

from tests.test_onnx.utils import run_model
from tests.test_onnx.utils import run_model, skip_segfault


def import_and_compute(op_type, input_data_left, input_data_right, opset=7, **node_attributes):
Expand All @@ -37,7 +37,7 @@ def import_and_compute(op_type, input_data_left, input_data_right, opset=7, **no
return run_model(model, inputs)[0]


@pytest.mark.skip(reason="Causes segmentation fault")
@skip_segfault
def test_add_opset4():
assert np.array_equal(import_and_compute("Add", 1, 2, opset=4), np.array(3, dtype=np.float32))

Expand Down Expand Up @@ -110,7 +110,7 @@ def test_add_opset7(left_shape, right_shape):
assert np.array_equal(import_and_compute("Add", left_input, right_input), left_input + right_input)


@pytest.mark.skip(reason="Causes segmentation fault")
@skip_segfault
def test_sub():
assert np.array_equal(import_and_compute("Sub", 20, 1), np.array(19, dtype=np.float32))

Expand All @@ -124,7 +124,7 @@ def test_sub():
)


@pytest.mark.skip(reason="Causes segmentation fault")
@skip_segfault
def test_mul():
assert np.array_equal(import_and_compute("Mul", 2, 3), np.array(6, dtype=np.float32))

Expand All @@ -138,7 +138,7 @@ def test_mul():
)


@pytest.mark.skip(reason="Causes segmentation fault")
@skip_segfault
def test_div():
assert np.array_equal(import_and_compute("Div", 6, 3), np.array(2, dtype=np.float32))

Expand Down
15 changes: 10 additions & 5 deletions ngraph/python/tests/test_onnx/test_ops_convpool.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,13 @@
from onnx.helper import make_graph, make_model, make_node, make_tensor_value_info

from tests.runtime import get_runtime
from tests.test_onnx.utils import get_node_model, import_onnx_model, run_model, run_node
from tests.test_onnx.utils import (get_node_model,
import_onnx_model,
run_model,
run_node,
xfail_issue_35911,
xfail_issue_35912
)


@pytest.fixture
Expand Down Expand Up @@ -268,6 +274,7 @@ def test_2d_conv_transpose():
)


@xfail_issue_35911
def test_pad_opset_1():
x = np.ones((2, 2), dtype=np.float32)
y = np.pad(x, pad_width=1, mode="constant")
Expand Down Expand Up @@ -317,8 +324,7 @@ def test_pad_opset_2():
run_model(model, [x])


# Error of validate layer: B with type: Pad. Cannot parse parameter pads_begin
# from IR for layer B. Value -1,0 cannot be casted to int.
@xfail_issue_35912
def test_pad_negative_values_begin():
x = np.ones((2, 2), dtype=np.float32)

Expand All @@ -333,8 +339,7 @@ def test_pad_negative_values_begin():
assert np.array_equal(ng_result, np.array([[1], [1]]))


# Error of validate layer: B with type: Pad. Cannot parse parameter pads_begin
# from IR for layer B. Value -1,0 cannot be casted to int.
@xfail_issue_35912
def test_pad_negative_values_end():
x = np.ones((2, 2), dtype=np.float32)

Expand Down
15 changes: 7 additions & 8 deletions ngraph/python/tests/test_onnx/test_ops_logical.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,19 +17,18 @@
import onnx
import pytest

# [PARAMETER_MISMATCH] Failed to set Blob with precision FP32
from tests.test_onnx.utils import run_node
from tests.test_onnx.utils import run_node, xfail_issue_35914, xfail_issue_35915


@pytest.mark.parametrize(
"onnx_op, numpy_func, data_type",
[
("And", np.logical_and, np.bool),
("Or", np.logical_or, np.bool),
("Xor", np.logical_xor, np.bool),
("Equal", np.equal, np.int32),
("Greater", np.greater, np.int32),
("Less", np.less, np.int32),
pytest.param("And", np.logical_and, np.bool, marks=xfail_issue_35914),
pytest.param("Or", np.logical_or, np.bool, marks=xfail_issue_35914),
pytest.param("Xor", np.logical_xor, np.bool, marks=xfail_issue_35914),
pytest.param("Equal", np.equal, np.int32, marks=xfail_issue_35915),
pytest.param("Greater", np.greater, np.int32, marks=xfail_issue_35915),
pytest.param("Less", np.less, np.int32, marks=xfail_issue_35915),
],
)
def test_logical(onnx_op, numpy_func, data_type):
Expand Down
116 changes: 54 additions & 62 deletions ngraph/python/tests/test_onnx/test_ops_matmul.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,14 @@
from onnx.helper import make_graph, make_model, make_node, make_tensor_value_info

from tests.runtime import get_runtime
from tests.test_onnx.utils import import_onnx_model
from tests.test_onnx.utils import (import_onnx_model,
xfail_issue_35916,
xfail_issue_35917,
xfail_issue_35918,
xfail_issue_35921
)

import pytest


def make_onnx_model_for_matmul_op(input_left, input_right):
Expand Down Expand Up @@ -99,33 +106,20 @@ def import_and_compute_gemm(input_a, input_b, input_c, **kwargs):
return computation(input_a, input_b, input_c)[0]


def test_op_matmul():
# vector @ vector
data = ([1, 2], [1, 3])
assert np.array_equal(import_and_compute_matmul(*data), np.matmul(*data))

data = ([1, 2, 3], [[4], [5], [6]])
assert np.array_equal(import_and_compute_matmul(*data), np.matmul(*data))

data = ([[1, 2, 3]], [1, 2, 3])
assert np.array_equal(import_and_compute_matmul(*data), np.matmul(*data))

# vector @ matrix
data = ([1, 2, 3], [[4, 5], [6, 7], [8, 9]])
assert np.array_equal(import_and_compute_matmul(*data), np.matmul(*data))

# matrix @ vector
data = ([[1, 2, 3], [4, 5, 6]], [[7], [8], [9]])
assert np.array_equal(import_and_compute_matmul(*data), np.matmul(*data))

# matrix @ matrix
data = ([[1, 2], [3, 4]], [[5, 6], [7, 8]])
assert np.array_equal(import_and_compute_matmul(*data), np.matmul(*data))

data = ([[1, 2, 3], [4, 5, 6]], [[7, 8], [9, 10], [11, 12]])
assert np.array_equal(import_and_compute_matmul(*data), np.matmul(*data))

data = ([[1, 2], [3, 4], [5, 6]], [[7, 8, 9], [10, 11, 12]])
@pytest.mark.parametrize(
"data, description",
[
pytest.param(([1, 2], [1, 3]), "vector and vector 1", marks=xfail_issue_35916),
(([1, 2, 3], [[4], [5], [6]]), "vector and vector 2"),
(([[1, 2, 3]], [1, 2, 3]), "vector and vector 3"),
(([1, 2, 3], [[4, 5], [6, 7], [8, 9]]), "vector and matrix"),
(([[1, 2, 3], [4, 5, 6]], [[7], [8], [9]]), "matrix and vector"),
(([[1, 2], [3, 4]], [[5, 6], [7, 8]]), "matrix and matrix 1"),
(([[1, 2, 3], [4, 5, 6]], [[7, 8], [9, 10], [11, 12]]), "matrix and matrix 2"),
(([[1, 2], [3, 4], [5, 6]], [[7, 8, 9], [10, 11, 12]]), "matrix and matrix 3")
],
)
def test_op_matmul(data, description):
assert np.array_equal(import_and_compute_matmul(*data), np.matmul(*data))


Expand All @@ -138,43 +132,41 @@ def test_op_matmul_3d():
assert np.array_equal(import_and_compute_matmul(*data), np.matmul(*data))


def test_gemm():
data = ([1, 2], [1, 3], [1, 4])
assert np.array_equal(import_and_compute_gemm(*data), numpy_gemm(*data))

data = ([1, 2], [1, 3], 1)
assert np.array_equal(import_and_compute_gemm(*data), numpy_gemm(*data))

data = ([1, 2], [1, 3], [1])
assert np.array_equal(import_and_compute_gemm(*data), numpy_gemm(*data))

data = ([1, 2], [1, 3], [1, 4])
kwargs = {"alpha": 7, "beta": 9}
assert np.array_equal(import_and_compute_gemm(*data, **kwargs), numpy_gemm(*data, **kwargs))

data = ([1, 2, 3, 4], [1, 3, 5, 7], [1, 4])
kwargs = {"alpha": 7, "beta": 9}
assert np.array_equal(import_and_compute_gemm(*data, **kwargs), numpy_gemm(*data, **kwargs))


def test_gemm_transpositions():
data = ([1, 2], [1, 3], [1, 4])
kwargs = {"trans_a": True, "trans_b": True}
assert np.array_equal(import_and_compute_gemm(*data, **kwargs), numpy_gemm(*data, **kwargs))

data = ([[1, 2], [1, 2]], [[1, 3], [1, 3]], [4, 1])
kwargs = {"trans_a": True, "trans_b": True, "alpha": 7, "beta": 9}
assert np.array_equal(import_and_compute_gemm(*data, **kwargs), numpy_gemm(*data, **kwargs))

data = ([[1, 2]], [[1, 3]], 1)
kwargs = {"trans_b": True, "alpha": 7, "beta": 9}
assert np.array_equal(import_and_compute_gemm(*data, **kwargs), numpy_gemm(*data, **kwargs))

data = ([[1], [2]], [[1], [3]], 1)
kwargs = {"trans_a": True, "alpha": 7, "beta": 9}
@pytest.mark.parametrize(
"data, kwargs, description",
[
pytest.param(([1, 2], [1, 3], [1, 4]), {}, "vectors", marks=xfail_issue_35917),
pytest.param(([1, 2], [1, 3], 1), {}, "vectors and scalar", marks=xfail_issue_35917),
pytest.param(([1, 2], [1, 3], [1]), {}, "vectors and identity vector", marks=xfail_issue_35917),
pytest.param(([1, 2], [1, 3], [1, 4]), {"alpha": 7, "beta": 9},
"vectors with alpha and beta", marks=xfail_issue_35918),
pytest.param(([1, 2, 3, 4], [1, 3, 5, 7], [1, 4]), {"alpha": 7, "beta": 9},
"longer vectors with alpha and beta", marks=xfail_issue_35918)
],
)
def test_gemm(data, kwargs, description):
assert np.array_equal(import_and_compute_gemm(*data, **kwargs), numpy_gemm(*data))


@pytest.mark.parametrize(
"data, kwargs, description",
[
pytest.param(([1, 2], [1, 3], [1, 4]), {"trans_a": True, "trans_b": True},
"vectors with trans_a/trans_b", marks=xfail_issue_35917),
pytest.param(([[1, 2], [1, 2]], [[1, 3], [1, 3]], [4, 1]),
{"trans_a": True, "trans_b": True, "alpha": 7, "beta": 9},
"matrices and vector with trans_b and alpha/beta", marks=xfail_issue_35918),
pytest.param(([[1, 2]], [[1, 3]], 1), {"trans_b": True, "alpha": 7, "beta": 9},
"matrices and scalar with trans_b and alpha/beta", marks=xfail_issue_35918),
pytest.param(([[1], [2]], [[1], [3]], 1), {"trans_a": True, "alpha": 7, "beta": 9},
"matrices and scalar with trans_a and alpha/beta", marks=xfail_issue_35918),
],
)
def test_gemm_transpositions(data, kwargs, description):
assert np.array_equal(import_and_compute_gemm(*data, **kwargs), numpy_gemm(*data, **kwargs))


@xfail_issue_35921
def test_gemm_flatten():
# input_a.shape is (4,1,1)
data = ([[[1]], [[2]], [[3]], [[4]]], [1, 3, 5, 7], [1, 4])
Expand Down
27 changes: 17 additions & 10 deletions ngraph/python/tests/test_onnx/test_ops_nonlinear.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@
import onnx
import pytest

from tests.test_onnx.utils import run_node
from tests.test_onnx.utils import run_node, xfail_issue_35918, xfail_issue_35923, xfail_issue_35924


def import_and_compute(op_type, input_data, **node_attrs):
Expand Down Expand Up @@ -70,15 +70,16 @@ def leaky_relu(x, alpha=0.01):
assert_onnx_import_equals_callable("LeakyRelu", leaky_relu, [[-3, -2, -1], [1, 2, 3]])


@xfail_issue_35923
@pytest.mark.parametrize(
"x,slope",
"x, slope",
[
([-2, -1.0, 0.0, 1.0, 2.0], 0.5),
([0.0], 1),
([-0.9, -0.8, -0.7, -0.4, -0.3, -0.2, -0.1], 1),
([[1, 2, 3], [4, 5, 6]], 0.5),
([[-3, -2, -1], [1, 2, 3]], 1),
],
]
)
def test_parametric_relu(x, slope):
def parametic_relu(x, slope):
Expand All @@ -103,13 +104,19 @@ def selu(x, alpha=1.67326319217681884765625, gamma=1.05070102214813232421875):
assert_onnx_import_equals_callable("Selu", selu, [-2, -1.0, 0.0, 1.0, 2.0], gamma=0.5, alpha=0.5)


def test_elu():
@pytest.mark.parametrize(
"data, alpha_value",
[
pytest.param([-2, -1.0, 0.0, 1.0, 2.0], 1, marks=xfail_issue_35918),
pytest.param([0.0], 1, marks=xfail_issue_35918),
pytest.param([-0.9, -0.8, -0.7, -0.4, -0.3, -0.2, -0.1], 1, marks=xfail_issue_35918),
pytest.param([[1, 2, 3], [4, 5, 6]], 1, marks=xfail_issue_35918),
pytest.param([-2, -1.0, 0.0, 1.0, 2.0], 0.5, marks=xfail_issue_35924)
]
)
def test_elu(data, alpha_value):
# f(x) = alpha * (exp(x) - 1) for x < 0, f(x) = x for x >= 0
def elu(x, alpha=1):
def elu(x, alpha):
return np.where(x < 0, alpha * (np.exp(x) - 1), x)

assert_onnx_import_equals_callable("Elu", elu, [-2, -1.0, 0.0, 1.0, 2.0])
assert_onnx_import_equals_callable("Elu", elu, [0.0])
assert_onnx_import_equals_callable("Elu", elu, [-0.9, -0.8, -0.7, -0.4, -0.3, -0.2, -0.1])
assert_onnx_import_equals_callable("Elu", elu, [[1, 2, 3], [4, 5, 6]])
assert_onnx_import_equals_callable("Elu", elu, [-2, -1.0, 0.0, 1.0, 2.0], alpha=0.5)
assert_onnx_import_equals_callable("Elu", elu, data, alpha=alpha_value)
Loading

0 comments on commit d14d09e

Please sign in to comment.