diff --git a/src/frontends/onnx/frontend/src/op/reduce.cpp b/src/frontends/onnx/frontend/src/op/reduce.cpp index 284b984ab4aa2c..d8f5f9647a07ed 100644 --- a/src/frontends/onnx/frontend/src/op/reduce.cpp +++ b/src/frontends/onnx/frontend/src/op/reduce.cpp @@ -185,6 +185,7 @@ ov::OutputVector reduce_sum_square(const ov::frontend::onnx::Node& node) { return {make_ov_reduction_op(node, square_node, supported_types_v1)}; } } // namespace set_1 + /* Opset 11 is skipped because there are no significant difference between opset1 and opset 11. Found difference is: @@ -198,12 +199,18 @@ namespace set_13 { ov::OutputVector reduce_sum(const ov::frontend::onnx::Node& node) { return {make_ov_reduction_op(node, node.get_ov_inputs().at(0), supported_types_v2, false)}; } +ov::OutputVector reduce_l2(const Node& node) { + return {make_ov_reduction_op(node, node.get_ov_inputs().at(0), supported_types_v2)}; +} ov::OutputVector reduce_max(const ov::frontend::onnx::Node& node) { return {make_ov_reduction_op(node, node.get_ov_inputs().at(0), supported_types_v3)}; } } // namespace set_13 namespace set_18 { +ov::OutputVector reduce_l2(const Node& node) { + return {make_ov_reduction_op(node, node.get_ov_inputs().at(0), supported_types_v2, false)}; +} ov::OutputVector reduce_max(const ov::frontend::onnx::Node& node) { return {make_ov_reduction_op(node, node.get_ov_inputs().at(0), supported_types_v3, false)}; } diff --git a/src/frontends/onnx/frontend/src/op/reduce.hpp b/src/frontends/onnx/frontend/src/op/reduce.hpp index a30f1ec86b7d2d..efefc8ad02a6d2 100644 --- a/src/frontends/onnx/frontend/src/op/reduce.hpp +++ b/src/frontends/onnx/frontend/src/op/reduce.hpp @@ -28,6 +28,12 @@ ov::OutputVector reduce_l1(const ov::frontend::onnx::Node& node); namespace set_1 { ov::OutputVector reduce_l2(const ov::frontend::onnx::Node& node); } // namespace set_1 +namespace set_13 { +ov::OutputVector reduce_l2(const ov::frontend::onnx::Node& node); +} // namespace set_13 +namespace set_18 { +ov::OutputVector reduce_l2(const ov::frontend::onnx::Node& node); +} // namespace set_18 namespace set_1 { ov::OutputVector reduce_max(const ov::frontend::onnx::Node& node); @@ -64,6 +70,7 @@ ov::OutputVector reduce_sum(const ov::frontend::onnx::Node& node); namespace set_1 { ov::OutputVector reduce_sum_square(const ov::frontend::onnx::Node& node); } // namespace set_1 + } // namespace op } // namespace onnx } // namespace frontend diff --git a/src/frontends/onnx/frontend/src/ops_bridge.cpp b/src/frontends/onnx/frontend/src/ops_bridge.cpp index 02255b673ca576..df835648454d83 100644 --- a/src/frontends/onnx/frontend/src/ops_bridge.cpp +++ b/src/frontends/onnx/frontend/src/ops_bridge.cpp @@ -486,6 +486,8 @@ OperatorsBridge::OperatorsBridge() { REGISTER_OPERATOR("ReduceLogSumExp", 1, reduce_log_sum_exp); REGISTER_OPERATOR("ReduceL1", 1, reduce_l1); REGISTER_OPERATOR("ReduceL2", 1, reduce_l2); + REGISTER_OPERATOR("ReduceL2", 13, reduce_l2); + REGISTER_OPERATOR("ReduceL2", 18, reduce_l2); REGISTER_OPERATOR("ReduceMax", 1, reduce_max); REGISTER_OPERATOR("ReduceMax", 13, reduce_max); REGISTER_OPERATOR("ReduceMax", 18, reduce_max); diff --git a/src/frontends/onnx/tests/__init__.py b/src/frontends/onnx/tests/__init__.py index 2768f08d031804..19ca6fc1f43b7d 100644 --- a/src/frontends/onnx/tests/__init__.py +++ b/src/frontends/onnx/tests/__init__.py @@ -61,8 +61,8 @@ def xfail_test(reason="Mark the test as expected to fail", strict=True): xfail_issue_99957 = xfail_test(reason="LayerNorm - RuntimeError: While validating node ''") xfail_issue_99960 = xfail_test(reason="MVN - Results mismatch") xfail_issue_99961 = xfail_test(reason="Optional has/get element operators are not supported)'") -xfail_issue_99962 = pytest.mark.skip(reason="ReduceL1/L2 - Unrecognized attribute: axes for operator ReduceL1/L2") -xfail_issue_99968 = xfail_test(reason="ReduceL1/L2 - Results mismatch or unsupported ReduceSum with " +xfail_issue_99962 = pytest.mark.skip(reason="ReduceL1 - Unrecognized attribute: axes for operator ReduceL1") +xfail_issue_99968 = xfail_test(reason="ReduceL1 - Results mismatch or unsupported ReduceSum with " "dynamic rank by CPU plugin") xfail_issue_99969 = xfail_test(reason="Resize - Results mismatch / " "RuntimeError: While validating ONNX node '' / " diff --git a/src/frontends/onnx/tests/models/reduce_l2_11.prototxt b/src/frontends/onnx/tests/models/reduce_l2_11.prototxt new file mode 100644 index 00000000000000..2a3f4006bff030 --- /dev/null +++ b/src/frontends/onnx/tests/models/reduce_l2_11.prototxt @@ -0,0 +1,48 @@ +ir_version: 4 +producer_name: "nGraph ONNX Importer" +graph { + node { + input: "A" + output: "B" + op_type: "ReduceL2" + } + name: "compute_graph" + input { + name: "A" + type { + tensor_type { + elem_type: 1 + shape { + dim { + dim_value: 1 + } + dim { + dim_value: 1 + } + dim { + dim_value: 4 + } + dim { + dim_value: 4 + } + } + } + } + } + output { + name: "B" + type { + tensor_type { + elem_type: 1 + shape { + dim { + dim_value: 1 + } + } + } + } + } +} +opset_import { + version: 11 +} diff --git a/src/frontends/onnx/tests/models/reduce_l2_13.prototxt b/src/frontends/onnx/tests/models/reduce_l2_13.prototxt new file mode 100644 index 00000000000000..3159100051e933 --- /dev/null +++ b/src/frontends/onnx/tests/models/reduce_l2_13.prototxt @@ -0,0 +1,48 @@ +ir_version: 4 +producer_name: "nGraph ONNX Importer" +graph { + node { + input: "A" + output: "B" + op_type: "ReduceL2" + } + name: "compute_graph" + input { + name: "A" + type { + tensor_type { + elem_type: 1 + shape { + dim { + dim_value: 1 + } + dim { + dim_value: 1 + } + dim { + dim_value: 4 + } + dim { + dim_value: 4 + } + } + } + } + } + output { + name: "B" + type { + tensor_type { + elem_type: 1 + shape { + dim { + dim_value: 1 + } + } + } + } + } +} +opset_import { + version: 13 +} diff --git a/src/frontends/onnx/tests/models/reduce_l2_18.prototxt b/src/frontends/onnx/tests/models/reduce_l2_18.prototxt new file mode 100644 index 00000000000000..06303553db3996 --- /dev/null +++ b/src/frontends/onnx/tests/models/reduce_l2_18.prototxt @@ -0,0 +1,61 @@ +ir_version: 4 +producer_name: "nGraph ONNX Importer" +graph { + node { + input: "A" + output: "B" + op_type: "ReduceL2" + } + name: "compute_graph" + input { + name: "A" + type { + tensor_type { + elem_type: 1 + shape { + dim { + dim_value: 1 + } + dim { + dim_value: 1 + } + dim { + dim_value: 4 + } + dim { + dim_value: 4 + } + } + } + } + } + input { + name: "axes" + type { + tensor_type { + elem_type: 6 + shape { + dim { + dim_value: 1 + } + } + } + } + } + output { + name: "B" + type { + tensor_type { + elem_type: 1 + shape { + dim { + dim_value: 1 + } + } + } + } + } +} +opset_import { + version: 18 +} diff --git a/src/frontends/onnx/tests/onnx_import.in.cpp b/src/frontends/onnx/tests/onnx_import.in.cpp index 7b3e39e71399aa..d529b19dd4524a 100644 --- a/src/frontends/onnx/tests/onnx_import.in.cpp +++ b/src/frontends/onnx/tests/onnx_import.in.cpp @@ -1076,6 +1076,54 @@ OPENVINO_TEST(${BACKEND_NAME}, onnx_model_reduce_l2) { test_case.run(); } +OPENVINO_TEST(${BACKEND_NAME}, onnx_model_reduce_l2_11) { + auto model = convert_model("reduce_l2_11.onnx"); + + // input data shape (1, 1, 4, 4) + Inputs inputs{ + ov::test::NDArray({{{{3, 3, 3, 3}, {3, 3, 3, 3}, {3, 3, 3, 3}, {3, 3, 3, 3}}}}).get_vector()}; + + // output data shape (1,) + auto expected_output = ov::test::NDArray({{{{12}}}}).get_vector(); + + auto test_case = ov::test::TestCase(model, s_device); + test_case.add_multiple_inputs(inputs); + test_case.add_expected_output(expected_output); + test_case.run(); +} + +OPENVINO_TEST(${BACKEND_NAME}, onnx_model_reduce_l2_13) { + auto model = convert_model("reduce_l2_13.onnx"); + + // input data shape (1, 1, 4, 4) + Inputs inputs{ + ov::test::NDArray({{{{4, 4, 4, 4}, {4, 4, 4, 4}, {4, 4, 4, 4}, {4, 4, 4, 4}}}}).get_vector()}; + + // output data shape (1,) + auto expected_output = ov::test::NDArray({{{{16}}}}).get_vector(); + + auto test_case = ov::test::TestCase(model, s_device); + test_case.add_multiple_inputs(inputs); + test_case.add_expected_output(expected_output); + test_case.run(); +} + +OPENVINO_TEST(${BACKEND_NAME}, onnx_model_reduce_l2_18) { + auto model = convert_model("reduce_l2_18.onnx"); + + // input data shape (1, 1, 4, 4) + Inputs inputs{ + ov::test::NDArray({{{{5, 5, 5, 5}, {5, 5, 5, 5}, {5, 5, 5, 5}, {5, 5, 5, 5}}}}).get_vector()}; + + // output data shape (1,) + auto expected_output = ov::test::NDArray({{{{20}}}}).get_vector(); + + auto test_case = ov::test::TestCase(model, s_device); + test_case.add_multiple_inputs(inputs); + test_case.add_expected_output(expected_output); + test_case.run(); +} + OPENVINO_TEST(${BACKEND_NAME}, onnx_model_reduce_max) { auto model = convert_model("reduce_max.onnx"); diff --git a/src/frontends/onnx/tests/tests_python/test_backend.py b/src/frontends/onnx/tests/tests_python/test_backend.py index a3df52ef064779..fe1e87a2652dc7 100644 --- a/src/frontends/onnx/tests/tests_python/test_backend.py +++ b/src/frontends/onnx/tests/tests_python/test_backend.py @@ -466,12 +466,6 @@ def expect_fail(test_case_path, xfail): # type: (str) -> None "OnnxBackendNodeModelTest.test_reduce_l1_keep_dims_random_cpu", "OnnxBackendNodeModelTest.test_reduce_l1_negative_axes_keep_dims_example_cpu", "OnnxBackendNodeModelTest.test_reduce_l1_negative_axes_keep_dims_random_cpu", - "OnnxBackendNodeModelTest.test_reduce_l2_do_not_keepdims_example_cpu", - "OnnxBackendNodeModelTest.test_reduce_l2_do_not_keepdims_random_cpu", - "OnnxBackendNodeModelTest.test_reduce_l2_keep_dims_example_cpu", - "OnnxBackendNodeModelTest.test_reduce_l2_keep_dims_random_cpu", - "OnnxBackendNodeModelTest.test_reduce_l2_negative_axes_keep_dims_example_cpu", - "OnnxBackendNodeModelTest.test_reduce_l2_negative_axes_keep_dims_random_cpu", "OnnxBackendNodeModelTest.test_reduce_log_sum_asc_axes_cpu", "OnnxBackendNodeModelTest.test_reduce_log_sum_asc_axes_expanded_cpu", "OnnxBackendNodeModelTest.test_reduce_log_sum_desc_axes_cpu", @@ -696,7 +690,6 @@ def expect_fail(test_case_path, xfail): # type: (str) -> None ( xfail_issue_125493, "OnnxBackendNodeModelTest.test_reduce_l1_empty_set_cpu", - "OnnxBackendNodeModelTest.test_reduce_l2_empty_set_cpu", "OnnxBackendNodeModelTest.test_reduce_log_sum_exp_empty_set_cpu", "OnnxBackendNodeModelTest.test_reduce_min_empty_set_cpu", "OnnxBackendNodeModelTest.test_reduce_prod_empty_set_cpu",