From 8f31331dcee62f8123b1bca76064a8a34f95c310 Mon Sep 17 00:00:00 2001 From: Cavus Mustafa Date: Sun, 30 Jun 2024 18:05:36 -0700 Subject: [PATCH 01/50] Using helper function to create concat in expand translation --- src/frontends/pytorch/src/op/expand.cpp | 23 +---------------------- 1 file changed, 1 insertion(+), 22 deletions(-) diff --git a/src/frontends/pytorch/src/op/expand.cpp b/src/frontends/pytorch/src/op/expand.cpp index bf7a1b6ed874ee..ac8364eee4165b 100644 --- a/src/frontends/pytorch/src/op/expand.cpp +++ b/src/frontends/pytorch/src/op/expand.cpp @@ -49,28 +49,7 @@ OutputVector translate_expand_fx(const NodeContext& context) { auto x = context.get_input(0); std::vector shape_vec; if (context.get_input_type(1).is()) { - std::deque> list_elems; - for (size_t i = 1; i < num_inputs; i++) { - if (context.get_input_type(i).as().element_type.is()) { - auto const_val = context.const_input(i); - std::vector dim_vec; - dim_vec.push_back(const_val); - auto dim_const = ov::op::v0::Constant::create(element::i32, Shape{1}, dim_vec); - list_elems.push_back(dim_const); - } else { - auto converted_dim = context.mark_node( - std::make_shared(context.get_input(static_cast(i)), element::i32)); - if (converted_dim->get_output_partial_shape(0).rank() == 0) { - auto dims_1d_shape = context.mark_node(ov::op::v0::Constant::create(element::i32, Shape{1}, {-1})); - auto reshape_dim = - context.mark_node(std::make_shared(converted_dim, dims_1d_shape, false)); - list_elems.push_back(reshape_dim); - } else { - list_elems.push_back(converted_dim); - } - } - } - auto concat = std::make_shared(OutputVector(list_elems.begin(), list_elems.end()), 0); + auto concat = concat_dims_to_shape(context, 1, num_inputs); return base_expand(context, x, concat); } else { auto x = context.get_input(0); From 724dc3758202fa8d3bf774bec148f2f507241bb2 Mon Sep 17 00:00:00 2001 From: Cavus Mustafa Date: Sun, 30 Jun 2024 18:27:06 -0700 Subject: [PATCH 02/50] Name change for helper function to crate concat from list --- src/frontends/pytorch/src/op/expand.cpp | 2 +- src/frontends/pytorch/src/op/full.cpp | 2 +- src/frontends/pytorch/src/op/reshape.cpp | 2 +- src/frontends/pytorch/src/utils.cpp | 2 +- src/frontends/pytorch/src/utils.hpp | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/src/frontends/pytorch/src/op/expand.cpp b/src/frontends/pytorch/src/op/expand.cpp index ac8364eee4165b..b4ac055336daf9 100644 --- a/src/frontends/pytorch/src/op/expand.cpp +++ b/src/frontends/pytorch/src/op/expand.cpp @@ -49,7 +49,7 @@ OutputVector translate_expand_fx(const NodeContext& context) { auto x = context.get_input(0); std::vector shape_vec; if (context.get_input_type(1).is()) { - auto concat = concat_dims_to_shape(context, 1, num_inputs); + auto concat = concat_list_from_inputs(context, 1, num_inputs); return base_expand(context, x, concat); } else { auto x = context.get_input(0); diff --git a/src/frontends/pytorch/src/op/full.cpp b/src/frontends/pytorch/src/op/full.cpp index d5ccbbf3a18811..7877f20ae10c0a 100644 --- a/src/frontends/pytorch/src/op/full.cpp +++ b/src/frontends/pytorch/src/op/full.cpp @@ -79,7 +79,7 @@ OutputVector translate_full_fx(const NodeContext& context) { num_inputs_check(context, 2, num_inputs); ov::Output sizes; if (context.get_input_type(0).is()) { - sizes = concat_dims_to_shape(context, 0, num_inputs - 1); + sizes = concat_list_from_inputs(context, 0, num_inputs - 1); } else { sizes = context.get_input(0); } diff --git a/src/frontends/pytorch/src/op/reshape.cpp b/src/frontends/pytorch/src/op/reshape.cpp index bdaa95727bfb30..1a6b3008883c0c 100644 --- a/src/frontends/pytorch/src/op/reshape.cpp +++ b/src/frontends/pytorch/src/op/reshape.cpp @@ -31,7 +31,7 @@ OutputVector translate_reshape_fx(const NodeContext& context) { num_inputs_check(context, 2, num_inputs); std::vector shape_vec; if (context.get_input_type(1).is()) { - auto concat = concat_dims_to_shape(context, 1, num_inputs); + auto concat = concat_list_from_inputs(context, 1, num_inputs); auto reshape = std::make_shared(context.get_input(0), concat, true); return {context.mark_node(reshape)}; } else { diff --git a/src/frontends/pytorch/src/utils.cpp b/src/frontends/pytorch/src/utils.cpp index eb94ac3a88a710..88bd8701e7ab8c 100644 --- a/src/frontends/pytorch/src/utils.cpp +++ b/src/frontends/pytorch/src/utils.cpp @@ -555,7 +555,7 @@ Output masked_fill(ov::pass::NodeRegistry& rg, return rg.make(bool_mask, _value, data); } -Output concat_dims_to_shape(const NodeContext& context, size_t begin, size_t end) { +Output concat_list_from_inputs(const NodeContext& context, size_t begin, size_t end) { OutputVector list_elems; for (size_t i = begin; i < end; i++) { if (context.get_input_type(i).as().element_type.is()) { diff --git a/src/frontends/pytorch/src/utils.hpp b/src/frontends/pytorch/src/utils.hpp index 4f047e7e036422..68981cfc237e11 100644 --- a/src/frontends/pytorch/src/utils.hpp +++ b/src/frontends/pytorch/src/utils.hpp @@ -119,7 +119,7 @@ Output masked_fill(ov::pass::NodeRegistry& rg, const Output& mask, const Output& value); -Output concat_dims_to_shape(const NodeContext& context, size_t begin, size_t end); +Output concat_list_from_inputs(const NodeContext& context, size_t begin, size_t end); namespace op { template From 712eaea89fad24e50e82e5f3b458d815b5c2cbb6 Mon Sep 17 00:00:00 2001 From: Cavus Mustafa Date: Sun, 30 Jun 2024 19:53:54 -0700 Subject: [PATCH 03/50] Updated concat_list_from_inputs to use int64 --- src/frontends/pytorch/src/utils.cpp | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/src/frontends/pytorch/src/utils.cpp b/src/frontends/pytorch/src/utils.cpp index 88bd8701e7ab8c..edb138d9b15661 100644 --- a/src/frontends/pytorch/src/utils.cpp +++ b/src/frontends/pytorch/src/utils.cpp @@ -559,15 +559,21 @@ Output concat_list_from_inputs(const NodeContext& context, size_t begin, s OutputVector list_elems; for (size_t i = begin; i < end; i++) { if (context.get_input_type(i).as().element_type.is()) { - auto const_val = context.const_input(i); - std::vector dim_vec; + auto const_val = context.const_input(i); + std::vector dim_vec; dim_vec.push_back(const_val); - auto dim_const = ov::op::v0::Constant::create(element::i32, Shape{1}, dim_vec); + auto dim_const = ov::op::v0::Constant::create(element::i64, Shape{1}, dim_vec); list_elems.push_back(dim_const); } else { - auto converted_dim = context.mark_node( - std::make_shared(context.get_input(static_cast(i)), element::i32)); - list_elems.push_back(converted_dim); + auto input_dim = context.get_input(static_cast(i)); + if (input_dim.get_partial_shape().rank() == 0) { + auto dims_1d_shape = context.mark_node(ov::op::v0::Constant::create(element::i32, Shape{1}, {-1})); + auto reshape_dim = + context.mark_node(std::make_shared(input_dim, dims_1d_shape, false)); + list_elems.push_back(reshape_dim); + } else { + list_elems.push_back(input_dim); + } } } auto concat = std::make_shared(list_elems, 0); From a35588f59172314b680308670514d85dedeeb5e7 Mon Sep 17 00:00:00 2001 From: Cavus Mustafa Date: Sun, 30 Jun 2024 23:16:33 -0700 Subject: [PATCH 04/50] Arange input check updated --- src/frontends/pytorch/src/op/arange.cpp | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/src/frontends/pytorch/src/op/arange.cpp b/src/frontends/pytorch/src/op/arange.cpp index 8f03804f819596..fcd907ca4958a6 100644 --- a/src/frontends/pytorch/src/op/arange.cpp +++ b/src/frontends/pytorch/src/op/arange.cpp @@ -109,10 +109,17 @@ OutputVector translate_arange_fx(const NodeContext& context) { if (context.has_attribute("dtype")) { dtype = context.get_attribute("dtype"); } - if (end.get_partial_shape().rank().is_dynamic() || - (end.get_partial_shape().rank().is_static() && end.get_partial_shape().rank().get_length() == 1)) { - end = context.mark_node(std::make_shared(end, zero)); - } + auto input_squeeze = [&context] (ov::Output input) { + if (input.get_partial_shape().rank().is_dynamic() || + (input.get_partial_shape().rank().is_static() && input.get_partial_shape().rank().get_length() == 1)) { + auto zero = context.mark_node(v0::Constant::create(element::i32, Shape{}, {0})); + input = context.mark_node(std::make_shared(input, zero)); + } + return input; + }; + start = input_squeeze(start); + end = input_squeeze(end); + step = input_squeeze(step); auto range = context.mark_node(std::make_shared(start, end, step, dtype)); if (!context.has_attribute("dtype")) { range = context.mark_node(std::make_shared(range, context.get_input(0))); From db2a8c3a0e155df3425664cd22f74a275b18fe57 Mon Sep 17 00:00:00 2001 From: Cavus Mustafa Date: Sun, 30 Jun 2024 23:37:35 -0700 Subject: [PATCH 05/50] Code formating fix for arange translation --- src/frontends/pytorch/src/op/arange.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/frontends/pytorch/src/op/arange.cpp b/src/frontends/pytorch/src/op/arange.cpp index fcd907ca4958a6..c4b49602114f99 100644 --- a/src/frontends/pytorch/src/op/arange.cpp +++ b/src/frontends/pytorch/src/op/arange.cpp @@ -109,7 +109,7 @@ OutputVector translate_arange_fx(const NodeContext& context) { if (context.has_attribute("dtype")) { dtype = context.get_attribute("dtype"); } - auto input_squeeze = [&context] (ov::Output input) { + auto input_squeeze = [&context](ov::Output input) { if (input.get_partial_shape().rank().is_dynamic() || (input.get_partial_shape().rank().is_static() && input.get_partial_shape().rank().get_length() == 1)) { auto zero = context.mark_node(v0::Constant::create(element::i32, Shape{}, {0})); From 004b34344013e3517983639835ab64a58727fd14 Mon Sep 17 00:00:00 2001 From: Cavus Mustafa Date: Tue, 2 Jul 2024 01:50:13 -0700 Subject: [PATCH 06/50] Dynamic expand temp. fix for expand being last op --- tests/layer_tests/pytorch_tests/test_expand.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/layer_tests/pytorch_tests/test_expand.py b/tests/layer_tests/pytorch_tests/test_expand.py index ff9640ef7068cf..3a5a964d8e5b9a 100644 --- a/tests/layer_tests/pytorch_tests/test_expand.py +++ b/tests/layer_tests/pytorch_tests/test_expand.py @@ -128,8 +128,9 @@ def __init__(self, dims): super(aten_expand, self).__init__() self.dims = dims + # TODO: Remove the add op after fixing the issue with expand being the last node def forward(self, x, dym): - return x.expand((self.dims+(dym,))) + return torch.add(x.expand((self.dims+(dym,))), 0) ref_net = None From f28fe6acfdfe28c67f670edb2597219a6c837f1d Mon Sep 17 00:00:00 2001 From: Taylor Yeonbok Lee Date: Thu, 4 Jul 2024 01:05:27 +0000 Subject: [PATCH 07/50] [GPU] Horizontal fusion for FC (#24991) ### Details: - Fuse QKV FCs to one FC ### Tickets: - 142815 --- src/core/reference/src/op/concat.cpp | 5 +- .../intel_gpu/runtime/debug_configuration.hpp | 1 + .../transformations/fc_horizontal_fusion.cpp | 206 +++++++++ .../transformations/fc_horizontal_fusion.hpp | 19 + .../src/plugin/transformations_pipeline.cpp | 15 +- .../src/runtime/debug_configuration.cpp | 5 +- .../dynamic/dynamic_fc_horizontal_fusion.cpp | 393 ++++++++++++++++++ .../horizontal_fc_fusion_test.cpp | 167 ++++++++ 8 files changed, 807 insertions(+), 4 deletions(-) create mode 100644 src/plugins/intel_gpu/src/plugin/transformations/fc_horizontal_fusion.cpp create mode 100644 src/plugins/intel_gpu/src/plugin/transformations/fc_horizontal_fusion.hpp create mode 100644 src/plugins/intel_gpu/tests/functional/subgraph_tests/dynamic/dynamic_fc_horizontal_fusion.cpp create mode 100644 src/plugins/intel_gpu/tests/unit/transformations/horizontal_fc_fusion_test.cpp diff --git a/src/core/reference/src/op/concat.cpp b/src/core/reference/src/op/concat.cpp index 124c16b25af672..d3fe955a526457 100644 --- a/src/core/reference/src/op/concat.cpp +++ b/src/core/reference/src/op/concat.cpp @@ -54,9 +54,10 @@ void concat(const std::vector& args, size_t out_offset = 0; for (size_t step = 0; step < steps; ++step) { for (size_t in_index = 0; in_index < args.size(); ++in_index) { - const size_t size = shape_sizes[in_index] / steps; + size_t size = shape_sizes[in_index] / steps; const size_t in_offset = step * size; - + if (elem_type == ov::element::u4 || elem_type == ov::element::i4) + size /= 2; copy_func(args[in_index], out, in_offset, out_offset, size, elem_size); out_offset += size; diff --git a/src/plugins/intel_gpu/include/intel_gpu/runtime/debug_configuration.hpp b/src/plugins/intel_gpu/include/intel_gpu/runtime/debug_configuration.hpp index d9a30e3244b4ae..589b5dd96cb055 100644 --- a/src/plugins/intel_gpu/include/intel_gpu/runtime/debug_configuration.hpp +++ b/src/plugins/intel_gpu/include/intel_gpu/runtime/debug_configuration.hpp @@ -141,6 +141,7 @@ class debug_configuration { int disable_primitive_fusing; // Disable primitive fusing int disable_fake_alignment; // Disable fake alignment int enable_dynamic_quantize; // Enable Dynamic quantization for fully connected primitive + int disable_horizontal_fc_fusion; // Disable fc horizontal fusion std::set dump_iteration; // Dump n-th execution of network. std::vector load_layers_raw_dump; // List of layers to load dumped raw binary and filenames static const debug_configuration *get_instance(); diff --git a/src/plugins/intel_gpu/src/plugin/transformations/fc_horizontal_fusion.cpp b/src/plugins/intel_gpu/src/plugin/transformations/fc_horizontal_fusion.cpp new file mode 100644 index 00000000000000..14bb5596b1cc12 --- /dev/null +++ b/src/plugins/intel_gpu/src/plugin/transformations/fc_horizontal_fusion.cpp @@ -0,0 +1,206 @@ +// Copyright (C) 2024 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "fc_horizontal_fusion.hpp" + +#include "intel_gpu/op/fully_connected.hpp" +#include "intel_gpu/op/fully_connected_compressed.hpp" + +#include "openvino/core/rt_info.hpp" +#include +#include "openvino/pass/pattern/op/or.hpp" +#include "openvino/pass/pattern/op/wrap_type.hpp" +#include "transformations/utils/utils.hpp" +#include "intel_gpu/op/placeholder.hpp" + +namespace ov { +namespace intel_gpu { + +FullyConnectedHorizontalFusion::FullyConnectedHorizontalFusion() { + using namespace ov::pass::pattern; + + auto is_target_pattern = [](const Output& output) { + // Currently this pass targets only compressed FCs (QKV) on dynamic generative models + // inputs: input, weight, bias, scale, [zp] + // Bias/scale/zp are constant or none + // if it is not constant, the only allowed cases are Constant => convert + // All FCs have same # of valid inputs (e.g., if one of the fc has zp, all fcs have zp) + + auto is_constant = [](const std::shared_ptr node) { + if (std::dynamic_pointer_cast(node)) + return true; + if (std::dynamic_pointer_cast(node) && std::dynamic_pointer_cast(node->get_input_node_shared_ptr(0))) + return true; + if (std::dynamic_pointer_cast(node) && std::dynamic_pointer_cast(node->get_input_node_shared_ptr(0))) + return true; + return false; + }; + auto is_placeholder = [](const std::shared_ptr node) { + return std::dynamic_pointer_cast(node); + }; + // Three FCs connected to the same input + const int num_fcs_to_fuse = 3; + const auto& fc = std::dynamic_pointer_cast(output.get_node_shared_ptr()); + const auto& input = fc->get_input_node_shared_ptr(0); + if (!fc->get_input_partial_shape(0).is_dynamic()) + return false; + if (input->get_users().size() < num_fcs_to_fuse) + return false; + size_t user_fc_count = 0; + int32_t nodes_with_bias = 0; + int32_t nodes_with_zp = 0; + for (const auto& u : input->get_users()) { + const auto& fc_user = std::dynamic_pointer_cast(u); + if (!fc_user) + continue; + auto num_inputs = fc_user->inputs().size(); + if (num_inputs >= 5) + nodes_with_zp++; + for (size_t i = 2; i < num_inputs; ++i) { + const auto& fc_input = fc_user->get_input_node_shared_ptr(i); + if (!is_constant(fc_input) && !is_placeholder(fc_input)) + return false; + if (i == 2 && !is_placeholder(fc_input)) { + nodes_with_bias++; + } + } + user_fc_count++; + } + return (user_fc_count == num_fcs_to_fuse) && (nodes_with_bias == num_fcs_to_fuse || nodes_with_bias == 0) && + (nodes_with_zp == num_fcs_to_fuse || nodes_with_zp == 0); + }; + + auto target_fc = wrap_type(is_target_pattern); + + ov::matcher_pass_callback callback = [=](Matcher& m) { + const auto& pattern_map = m.get_pattern_value_map(); + auto m_fc = pattern_map.at(target_fc).get_node_shared_ptr(); + auto input_node = m_fc->get_input_node_shared_ptr(0); + std::vector> fc_nodes; + ov::NodeVector weight_nodes; + ov::NodeVector scale_nodes; + ov::NodeVector bias_nodes; + ov::NodeVector zp_nodes; + for (auto user : input_node->get_users()) { + auto fc_user = std::dynamic_pointer_cast(user); + if (fc_user) { + OPENVINO_ASSERT(fc_user->inputs().size() >= 4, "Compressed FC should have at least 4 inputs"); + fc_nodes.push_back(fc_user); + weight_nodes.push_back(fc_user->get_input_node_shared_ptr(1)); + if (!std::dynamic_pointer_cast(fc_user->get_input_node_shared_ptr(2))) + bias_nodes.push_back(fc_user->get_input_node_shared_ptr(2)); + scale_nodes.push_back(fc_user->get_input_node_shared_ptr(3)); + if (fc_user->inputs().size() > 4) + zp_nodes.push_back(fc_user->get_input_node_shared_ptr(4)); + } + } + auto weight_dtype = fc_nodes[0]->get_input_element_type(1); + auto k_size = fc_nodes[0]->get_input_shape(1)[fc_nodes[0]->get_input_shape(1).size() - 1]; + std::vector orig_n_sizes; + // merge weights, scale, zp + for (auto fc : fc_nodes) { + if (k_size != fc->get_input_shape(1)[fc->get_input_shape(1).size() - 1]) + return false; + if (weight_dtype != fc->get_input_element_type(1)) + return false; + orig_n_sizes.push_back(fc->get_input_shape(1)[fc->get_input_shape(1).size() - 2]); + } + auto weight_nodes_as_output_vector = ov::OutputVector{weight_nodes[0], weight_nodes[1], weight_nodes[2]}; + auto fused_weight = std::make_shared(weight_nodes_as_output_vector, 0); + fused_weight->set_friendly_name(weight_nodes[0]->get_friendly_name() + "_fused"); + ov::copy_runtime_info({weight_nodes[0], weight_nodes[1], weight_nodes[2]}, fused_weight); + + auto scale_nodes_as_output_vector = ov::OutputVector{scale_nodes[0], scale_nodes[1], scale_nodes[2]}; + auto fused_scale = std::make_shared(scale_nodes_as_output_vector, 0); + fused_scale->set_friendly_name(scale_nodes[0]->get_friendly_name() + "_fused"); + ov::copy_runtime_info({scale_nodes[0], scale_nodes[1], scale_nodes[2]}, fused_scale); + + std::shared_ptr fused_bias; + if (bias_nodes.size() == 3) { + auto bias_nodes_as_output_vector = ov::OutputVector{bias_nodes[0], bias_nodes[1], bias_nodes[2]}; + fused_bias = std::make_shared(bias_nodes_as_output_vector, 0); + fused_bias->set_friendly_name(bias_nodes[0]->get_friendly_name() + "_fused"); + ov::copy_runtime_info({bias_nodes[0], bias_nodes[1], bias_nodes[2]}, fused_bias); + } else { + fused_bias = std::make_shared(); + } + + std::shared_ptr fused_zps; + if (zp_nodes.size() > 0) { + // scalar zp + auto zp_shape = zp_nodes[0]->get_output_shape(0); + bool is_scalar = (ov::shape_size(zp_nodes[0]->get_output_shape(0)) == 1); + int32_t scalar_zp_val = 0; + if (is_scalar) { + if (auto zp_const = std::dynamic_pointer_cast(zp_nodes[0])) { + scalar_zp_val = zp_const->cast_vector()[0]; + } else if (auto zp_convert = std::dynamic_pointer_cast(zp_nodes[0])) { + auto zp_const = std::dynamic_pointer_cast(zp_convert->get_input_node_shared_ptr(0)); + scalar_zp_val = zp_const->cast_vector()[0]; + } + fused_zps = zp_nodes[0]; + } + if (is_scalar) { + for (size_t i = 1; i < zp_nodes.size(); ++i) { + bool current_is_scalar = (ov::shape_size(zp_nodes[i]->get_output_shape(0)) == 1); + if (!current_is_scalar) + return false; + // validate all zp values are same + int32_t cur_zp_val = 0; + if (auto zp_const = std::dynamic_pointer_cast(zp_nodes[i])) { + cur_zp_val = zp_const->cast_vector()[0]; + } else if (auto zp_convert = std::dynamic_pointer_cast(zp_nodes[i])) { + auto zp_const = + std::dynamic_pointer_cast(zp_convert->get_input_node_shared_ptr(0)); + cur_zp_val = zp_const->cast_vector()[0]; + } else { + OPENVINO_ASSERT("Unsupported zp input node for FC horizontal fusion"); + } + if (cur_zp_val != scalar_zp_val) + return false; + } + } else { + auto zp_nodes_as_output_vector = ov::OutputVector{zp_nodes[0], zp_nodes[1], zp_nodes[2]}; + fused_zps = std::make_shared(zp_nodes_as_output_vector, 0); + fused_zps->set_friendly_name(zp_nodes[0]->get_friendly_name() + "_fused"); + } + } + // Create new fc with merged weights, bias, scale, zp + std::shared_ptr new_fc; + if (fused_zps) + new_fc = std::make_shared(input_node, fused_weight, fused_bias, fused_scale, fused_zps); + else + new_fc = std::make_shared(input_node, fused_weight, fused_bias, fused_scale); + + auto new_fc_name = fc_nodes[0]->get_friendly_name() + "_fused"; + new_fc->set_friendly_name(new_fc_name); + copy_runtime_info({fc_nodes[0], fc_nodes[1], fc_nodes[2]}, new_fc); + + // Split output and connect to the orig users + auto split_name = fc_nodes[0]->get_friendly_name() + "_split"; + auto axis_const = ov::op::v0::Constant::create(ov::element::i64, ov::Shape{1}, {new_fc->get_output_partial_shape(0).size() - 1}); + auto split_const = ov::op::v0::Constant::create(ov::element::i64, ov::Shape{3}, orig_n_sizes); + auto output_split = std::make_shared(new_fc, axis_const, split_const); + copy_runtime_info({fc_nodes[0], fc_nodes[1], fc_nodes[2]}, output_split); + output_split->set_friendly_name(split_name); + for (size_t i = 0; i < fc_nodes.size(); ++i) { + auto org_fc = fc_nodes[i]; + for (auto u : org_fc->get_users()) { + for (size_t idx = 0; idx < u->inputs().size(); ++idx) { + if (u->get_input_node_shared_ptr(idx) == org_fc) { + u->input(idx).replace_source_output(output_split->output(i)); + } + } + } + org_fc->clear_control_dependencies(); + } + return true; + }; + + auto m = std::make_shared(target_fc, "FullyConnectedHorizontalFusion"); + this->register_matcher(m, callback); +} + +} // namespace intel_gpu +} // namespace ov diff --git a/src/plugins/intel_gpu/src/plugin/transformations/fc_horizontal_fusion.hpp b/src/plugins/intel_gpu/src/plugin/transformations/fc_horizontal_fusion.hpp new file mode 100644 index 00000000000000..b6a852354bad8d --- /dev/null +++ b/src/plugins/intel_gpu/src/plugin/transformations/fc_horizontal_fusion.hpp @@ -0,0 +1,19 @@ +// Copyright (C) 2024 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/pass/graph_rewrite.hpp" + +namespace ov { +namespace intel_gpu { + +class FullyConnectedHorizontalFusion: public ov::pass::MatcherPass { +public: + OPENVINO_RTTI("FullyConnectedHorizontalFusion", "0"); + FullyConnectedHorizontalFusion(); +}; + +} // namespace intel_gpu +} // namespace ov diff --git a/src/plugins/intel_gpu/src/plugin/transformations_pipeline.cpp b/src/plugins/intel_gpu/src/plugin/transformations_pipeline.cpp index 5e0e2da26389b4..a57f3da6e9765c 100644 --- a/src/plugins/intel_gpu/src/plugin/transformations_pipeline.cpp +++ b/src/plugins/intel_gpu/src/plugin/transformations_pipeline.cpp @@ -58,6 +58,7 @@ #include "plugin/transformations/convert_fc_to_compressed.hpp" #include "plugin/transformations/convert_matmul_to_fc.hpp" #include "plugin/transformations/fc_convert_fusion.hpp" +#include "plugin/transformations/fc_horizontal_fusion.hpp" #include "plugin/transformations/kv_cache_fusion.hpp" #include "plugin/transformations/move_fc_reshape_to_weights.hpp" #include "plugin/transformations/bcast_and_pad_zp_buffers.hpp" @@ -788,12 +789,24 @@ void TransformationsPipeline::apply(std::shared_ptr func) { manager.register_pass(); manager.register_pass(); manager.register_pass(device_info.supports_immad); + + bool disable_horizontal_fc_fusion = false; + GPU_DEBUG_GET_INSTANCE(debug_config); + GPU_DEBUG_IF(debug_config->disable_horizontal_fc_fusion == 1) + disable_horizontal_fc_fusion = true; + + if (!disable_horizontal_fc_fusion) + manager.register_pass(); if (device_info.supports_immad) { // For OneDNN, ZP should not be folded for FC. But still, ZP should be folded for Gather. // Therefore, run MarkDequantizationSubgraph again to fold ZP constant. manager.register_pass(supported_woq_types, true); - manager.register_pass(); + if (disable_horizontal_fc_fusion) + manager.register_pass(); } + if (!disable_horizontal_fc_fusion) + manager.register_pass(); + manager.register_pass(); auto pass_config = manager.get_pass_config(); pass_config->set_callback([=](const_node_ptr& root) -> bool { diff --git a/src/plugins/intel_gpu/src/runtime/debug_configuration.cpp b/src/plugins/intel_gpu/src/runtime/debug_configuration.cpp index 944160a59a60f5..9a34d77ae47f3e 100644 --- a/src/plugins/intel_gpu/src/runtime/debug_configuration.cpp +++ b/src/plugins/intel_gpu/src/runtime/debug_configuration.cpp @@ -182,6 +182,7 @@ static void print_help_messages() { message_list.emplace_back("OV_GPU_DisablePrimitiveFusing", "Disable primitive fusing"); message_list.emplace_back("OV_GPU_DisableFakeAlignment", "Disable fake alignment"); message_list.emplace_back("OV_GPU_EnableDynamicQuantize", "Enable Dynamic quantization for fully connected primitive"); + message_list.emplace_back("OV_GPU_DisableHorizontalFCFusion", "Disable horizontal fc fusion"); message_list.emplace_back("OV_GPU_DumpIteration", "Dump n-th execution of network, separated by space."); message_list.emplace_back("OV_GPU_MemPreallocationOptions", "Controls buffer pre-allocation feature. Expects 4 values separated by space in " "the following order: number of iterations for pre-allocation(int), max size of single iteration in bytes(int), " @@ -247,7 +248,8 @@ debug_configuration::debug_configuration() , disable_runtime_skip_reorder(0) , disable_primitive_fusing(0) , disable_fake_alignment(0) - , enable_dynamic_quantize(0) { + , enable_dynamic_quantize(0) + , disable_horizontal_fc_fusion(0) { #ifdef GPU_DEBUG_CONFIG get_gpu_debug_env_var("Help", help); get_common_debug_env_var("Verbose", verbose); @@ -299,6 +301,7 @@ debug_configuration::debug_configuration() get_gpu_debug_env_var("DisablePrimitiveFusing", disable_primitive_fusing); get_gpu_debug_env_var("DisableFakeAlignment", disable_fake_alignment); get_gpu_debug_env_var("EnableDynamicQuantize", enable_dynamic_quantize); + get_gpu_debug_env_var("DisableHorizontalFCFusion", disable_horizontal_fc_fusion); std::string dump_iteration_str; get_gpu_debug_env_var("DumpIteration", dump_iteration_str); std::string mem_preallocation_params_str; diff --git a/src/plugins/intel_gpu/tests/functional/subgraph_tests/dynamic/dynamic_fc_horizontal_fusion.cpp b/src/plugins/intel_gpu/tests/functional/subgraph_tests/dynamic/dynamic_fc_horizontal_fusion.cpp new file mode 100644 index 00000000000000..4aad7f554a0dfa --- /dev/null +++ b/src/plugins/intel_gpu/tests/functional/subgraph_tests/dynamic/dynamic_fc_horizontal_fusion.cpp @@ -0,0 +1,393 @@ +// Copyright (C) 2024 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "common_test_utils/ov_tensor_utils.hpp" +#include "shared_test_classes/base/ov_subgraph.hpp" +#include "transformations/rt_info/decompression.hpp" + +#include "openvino/op/parameter.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/result.hpp" +#include "openvino/op/matmul.hpp" +#include "openvino/op/reshape.hpp" +#include "openvino/op/convert.hpp" +#include "openvino/op/subtract.hpp" +#include "openvino/op/transpose.hpp" + +namespace { +using ov::test::InputShape; + +struct ShapeParams { + ShapeParams() = default; + ShapeParams(InputShape data_shape, std::vector weights_shapes, int weights_group_size = -1) + : data_shape(std::move(data_shape)), + weights_shapes(std::move(weights_shapes)), + weights_group_size(weights_group_size) {} + + InputShape data_shape; + std::vector weights_shapes; + // Decompression group size. If the value is equal to -1, ordinary decompression is used + int weights_group_size; +}; + +using FullyConnectedHorizontalFusionParams = std::tuple; // has biahs + + +class FullyConnectedHorizontalFusion : public testing::WithParamInterface, + virtual public ov::test::SubgraphBaseTest { +public: + static std::string get_test_case_name(testing::TestParamInfo obj) { + ShapeParams shape_params; + ov::element::Type weights_precision; + ov::element::Type activations_precision; + bool transpose; + bool decompression_sub; + bool reshape_on_decompression; + bool per_tensor_zp; + bool has_bias; + + std::tie(shape_params, + weights_precision, + activations_precision, + transpose, + decompression_sub, + reshape_on_decompression, + per_tensor_zp, + has_bias) = obj.param; + + std::ostringstream result; + result << "data_shape="; + result << ov::test::utils::partialShape2str({shape_params.data_shape.first}) << "_"; + for (const auto& actual_shape : shape_params.data_shape.second) { + result << ov::test::utils::partialShape2str({actual_shape}) << "_"; + } + result << "_" << "weights1_shape=" << shape_params.weights_shapes[0] << "_"; + result << "_" << "weights2_shape=" << shape_params.weights_shapes[1] << "_"; + result << "_" << "weights3_shape=" << shape_params.weights_shapes[2] << "_"; + result << "group_size=" << shape_params.weights_group_size << "_"; + result << "weights_precision=" << weights_precision << "_"; + result << "activations_precision=" << activations_precision << "_"; + result << "transpose_weights=" << transpose << "_"; + result << "decompression_subtract=" << decompression_sub << "_"; + result << "reshape_on_decompression=" << reshape_on_decompression << "_"; + result << "per_tensor_zp=" << per_tensor_zp; + result << "has_bias=" << has_bias; + + return result.str(); + } + +protected: + std::shared_ptr init_compressed_weights_subgraph(const ov::Shape& weights_shape, + const int group_size, + const ov::element::Type data_precision, + const ov::element::Type weights_precision, + const bool transpose_weights, + const bool add_subtract, + const bool reshape_on_decompression_constant, + const bool per_tensor_zp) { + auto transpose_if_necessary = [&](const ov::Shape& shape) { + auto result_shape = shape; + if (transpose_weights) + std::swap(*result_shape.rbegin(), *(result_shape.rbegin() + 1)); + return result_shape; + }; + + const bool group_decompression = group_size != -1; + // Weights has shape [I, O], where + // I - input channels + // O - output channels + // In case of group decompression, input channels dimension is split into 2: I -> [N, G], where + // N - number of groups + // G - group size + auto transformed_weights_shape = transpose_if_necessary(weights_shape); + if (group_decompression) { + OPENVINO_ASSERT(weights_shape[0] % group_size == 0, + "Weights output channels count (", + weights_shape[0], + ") must be divisible by decompression group size (", + group_size, + ")."); + auto in_channel_idx = + transpose_weights ? transformed_weights_shape.size() - 1 : transformed_weights_shape.size() - 2; + transformed_weights_shape[in_channel_idx] = weights_shape[0] / group_size; + transformed_weights_shape.insert(transformed_weights_shape.begin() + in_channel_idx + 1, group_size); + } + auto weights_tensor = ov::test::utils::create_and_fill_tensor(weights_precision, transformed_weights_shape); + auto weights = std::make_shared(weights_tensor); + weights->set_friendly_name("Compressed_weights"); + auto weights_convert = std::make_shared(weights, data_precision); + + std::shared_ptr mul_parent = weights_convert; + auto output_channels = *weights_shape.rbegin(); + + // Decompression constants shape: + // Ordinary decompression: [O, 1] + // Group decompression: [O, N, 1] + ov::Shape scaleshift_target_shape{output_channels}; + scaleshift_target_shape.insert(scaleshift_target_shape.begin(), + group_decompression ? weights_shape[0] / group_size : 1); + scaleshift_target_shape = transpose_if_necessary(scaleshift_target_shape); + if (group_decompression) { + auto in_channel_idx = + transpose_weights ? scaleshift_target_shape.size() - 1 : scaleshift_target_shape.size() - 2; + scaleshift_target_shape.insert(scaleshift_target_shape.begin() + in_channel_idx + 1, 1); + } + + auto scaleshift_const_shape = scaleshift_target_shape; + if (reshape_on_decompression_constant) + scaleshift_const_shape.erase(std::remove(scaleshift_const_shape.begin(), scaleshift_const_shape.end(), 1), + scaleshift_const_shape.end()); + if (add_subtract) { + auto shift_tensor_shape = per_tensor_zp ? ov::Shape{1} : scaleshift_const_shape; + auto shift_tensor = ov::test::utils::create_and_fill_tensor(weights_precision, shift_tensor_shape); + if (per_tensor_zp && weights_precision.bitwidth() == 4) { + static_cast(shift_tensor.data())[0] = 0x88; + } + auto shift_const = std::make_shared(shift_tensor); + shift_const->set_friendly_name("shift_const"); + std::shared_ptr shift_convert = + std::make_shared(shift_const, data_precision); + if (reshape_on_decompression_constant && !per_tensor_zp) { + auto shift_reshape_const = ov::op::v0::Constant::create(ov::element::i32, + {scaleshift_target_shape.size()}, + scaleshift_target_shape); + auto shift_reshape = std::make_shared(shift_convert, shift_reshape_const, false); + shift_convert = shift_reshape; + } + mul_parent = std::make_shared(weights_convert, shift_convert); + } + + ov::test::utils::InputGenerateData in_data; + in_data.start_from = -0.5; + in_data.range = 1; + in_data.resolution = 30000; + auto scale_tensor = ov::test::utils::create_and_fill_tensor(data_precision, scaleshift_const_shape, in_data); + for (size_t i = 0; i < scale_tensor.get_size(); i++) { + if (data_precision == ov::element::f16) + scale_tensor.data()[i] /= ov::float16(16.f); + else if (data_precision == ov::element::f32) + scale_tensor.data()[i] /= 16.f; + } + std::shared_ptr scale_const = std::make_shared(scale_tensor); + scale_const->set_friendly_name("scale_const"); + if (reshape_on_decompression_constant) { + auto scale_reshape_const = ov::op::v0::Constant::create(ov::element::i32, + {scaleshift_target_shape.size()}, + scaleshift_target_shape); + auto scale_reshape = std::make_shared(scale_const, scale_reshape_const, false); + scale_const = scale_reshape; + } + std::shared_ptr last_node = std::make_shared(mul_parent, scale_const); + + if (group_decompression) { + auto reshape_target_shape = transpose_weights ? std::vector{-1, static_cast(weights_shape[0])} + : std::vector{static_cast(weights_shape[0]), -1}; + auto target_shape_node = + ov::op::v0::Constant::create(ov::element::i32, {reshape_target_shape.size()}, reshape_target_shape); + last_node = std::make_shared(last_node, target_shape_node, false); + } + return last_node; + } + std::shared_ptr init_subgraph(const ov::PartialShape& data_shape, + const std::vector& weights_shapes, + const int group_size, + const ov::element::Type data_precision, + const ov::element::Type weights_precision, + const bool transpose_weights, + const bool add_subtract, + const bool reshape_on_decompression, + const bool per_tensor_zp, + const bool has_bias) { + ov::ParameterVector params{std::make_shared(data_precision, data_shape)}; + const auto weight1 = init_compressed_weights_subgraph(weights_shapes[0], + group_size, + data_precision, + weights_precision, + transpose_weights, + add_subtract, + reshape_on_decompression, + per_tensor_zp); + const auto weight2 = init_compressed_weights_subgraph(weights_shapes[1], + group_size, + data_precision, + weights_precision, + transpose_weights, + add_subtract, + reshape_on_decompression, + per_tensor_zp); + + const auto weight3 = init_compressed_weights_subgraph(weights_shapes[2], + group_size, + data_precision, + weights_precision, + transpose_weights, + add_subtract, + reshape_on_decompression, + per_tensor_zp); + + auto matmul1 = std::make_shared(params[0], weight1, false, transpose_weights); + matmul1->set_friendly_name("fully_connected_1"); + auto matmul2 = std::make_shared(params[0], weight2, false, transpose_weights); + matmul2->set_friendly_name("fully_connected_2"); + auto matmul3 = std::make_shared(params[0], weight3, false, transpose_weights); + matmul3->set_friendly_name("fully_connected_3"); + if (!has_bias) { + auto matmul4 = std::make_shared(matmul1, matmul2, true, false); + matmul4->set_friendly_name("gemm1"); + auto matmul5 = std::make_shared(matmul4, matmul3, true, true); + matmul5->set_friendly_name("gemm2"); + return std::make_shared(ov::NodeVector{matmul5}, params, "FCHorizontalFusion"); + } else { + ov::test::utils::InputGenerateData in_data; + in_data.start_from = -0.5; + in_data.range = 1; + in_data.resolution = 30000; + + auto bias1_shape = ov::Shape{1, weights_shapes[0].back()}; + auto bias1_tensor = ov::test::utils::create_and_fill_tensor(data_precision, bias1_shape, in_data); + auto bias1_const = std::make_shared(bias1_tensor); + auto bias_add1 = std::make_shared(matmul1, bias1_const); + bias_add1->set_friendly_name("add1"); + auto bias2_shape = ov::Shape{1, weights_shapes[1].back()}; + auto bias2_tensor = ov::test::utils::create_and_fill_tensor(data_precision, bias2_shape, in_data); + auto bias2_const = std::make_shared(bias2_tensor); + auto bias_add2 = std::make_shared(matmul2, bias2_const); + bias_add2->set_friendly_name("add2"); + auto bias3_shape = ov::Shape{1, weights_shapes[2].back()}; + auto bias3_tensor = ov::test::utils::create_and_fill_tensor(data_precision, bias3_shape, in_data); + auto bias3_const = std::make_shared(bias3_tensor); + auto bias_add3 = std::make_shared(matmul3, bias3_const); + bias_add3->set_friendly_name("add3"); + + auto matmul4 = std::make_shared(bias_add1, bias_add2, true, false); + matmul4->set_friendly_name("gemm1"); + auto matmul5 = std::make_shared(matmul4, bias_add3, true, true); + matmul5->set_friendly_name("gemm2"); + return std::make_shared(ov::NodeVector{matmul5}, params, "FCHorizontalFusion"); + } + } + + void SetUp() override { + targetDevice = ov::test::utils::DEVICE_GPU; + + ShapeParams shape_params; + ov::element::Type weights_precision; + ov::element::Type activations_precision; + bool transpose_weights; + bool decompression_sub; + bool reshape_on_decompression; + bool per_tensor_zp; + bool has_bias; + + std::tie(shape_params, + weights_precision, + activations_precision, + transpose_weights, + decompression_sub, + reshape_on_decompression, + per_tensor_zp, + has_bias) = GetParam(); + + init_input_shapes({shape_params.data_shape, {{}, shape_params.weights_shapes}}); + + inType = outType = activations_precision; + function = init_subgraph(inputDynamicShapes[0], + shape_params.weights_shapes, + shape_params.weights_group_size, + activations_precision, + weights_precision, + transpose_weights, + decompression_sub, + reshape_on_decompression, + per_tensor_zp, + has_bias); + + if (activations_precision == ov::element::f16) { + abs_threshold = 1.0f; + } else { + abs_threshold = 1e-4f; + } + } + + void generate_inputs(const std::vector& target_input_static_shapes) override { + inputs.clear(); + const auto& model_inputs = function->inputs(); + for (size_t i = 0; i < model_inputs.size(); ++i) { + const auto& model_input = model_inputs[i]; + ov::test::utils::InputGenerateData in_data; + in_data.start_from = -1; + in_data.range = 2; + in_data.resolution = 10000; + ov::Tensor tensor = ov::test::utils::create_and_fill_tensor(model_input.get_element_type(), + target_input_static_shapes[i], + in_data); + inputs.insert({model_input.get_node_shared_ptr(), tensor}); + } + } + + void check_results() { + const auto& test_param = GetParam(); + ov::element::Type weights_precision = std::get<1>(test_param); + for (const auto& n : compiledModel.get_runtime_model()->get_ordered_ops()) { + if (n->get_friendly_name() == "Compressed_weights") { + ASSERT_EQ(n->get_output_element_type(0), weights_precision); + } + } + } +}; + +TEST_P(FullyConnectedHorizontalFusion, Inference) { + run(); + check_results(); +} + +// const std::vector activations_precisions = {ov::element::f32, ov::element::f16}; +const std::vector activations_precisions = {ov::element::f16}; +const std::vector weights_precisions = {ov::element::u8, ov::element::u4, ov::element::i4}; +const std::vector per_tensor_zp = {true, false}; +const std::vector transpose_weights = {true, false}; + +std::vector weights1 = {{1, 16, 32}, {1, 16, 4}, {1, 16, 32}}; +std::vector weights2 = {{16, 32}, {16, 4}, {16, 32}}; +std::vector weights3 = {{28, 24}, {28, 18}, {28, 24}}; + +const std::vector input_shapes = { + {{{-1, -1, -1}, {{1, 4, 16}}}, weights1}, + {{{-1, -1, 16}, {{1, 4, 16}}}, weights2, 4}, + {{{-1, 28}, {{16, 28}}}, weights3, 4}, +}; + +INSTANTIATE_TEST_SUITE_P(smoke_FCHorizontalFusion_no_bias, + FullyConnectedHorizontalFusion, + ::testing::Combine(::testing::ValuesIn(input_shapes), + ::testing::ValuesIn(weights_precisions), + ::testing::ValuesIn(activations_precisions), + ::testing::ValuesIn(transpose_weights), + ::testing::Values(true), + ::testing::Values(true), + ::testing::ValuesIn(per_tensor_zp), + ::testing::Values(false)), + FullyConnectedHorizontalFusion::get_test_case_name); + +INSTANTIATE_TEST_SUITE_P(smoke_FCHorizontalFusion_with_bias, + FullyConnectedHorizontalFusion, + ::testing::Combine(::testing::ValuesIn(input_shapes), + ::testing::ValuesIn(weights_precisions), + ::testing::ValuesIn(activations_precisions), + ::testing::Values(true), + ::testing::Values(true), + ::testing::Values(true), + ::testing::Values(true), + ::testing::Values(true)), + FullyConnectedHorizontalFusion::get_test_case_name); + + +} // namespace diff --git a/src/plugins/intel_gpu/tests/unit/transformations/horizontal_fc_fusion_test.cpp b/src/plugins/intel_gpu/tests/unit/transformations/horizontal_fc_fusion_test.cpp new file mode 100644 index 00000000000000..fababa0c20df38 --- /dev/null +++ b/src/plugins/intel_gpu/tests/unit/transformations/horizontal_fc_fusion_test.cpp @@ -0,0 +1,167 @@ +// Copyright (C) 2024 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include +#include "common_test_utils/graph_comparator.hpp" +#include "common_test_utils/ov_test_utils.hpp" + +#include +#include + +#include "openvino/core/model.hpp" +#include "openvino/core/node_vector.hpp" +#include "openvino/core/partial_shape.hpp" +#include "openvino/op/parameter.hpp" +#include "openvino/op/result.hpp" +#include "openvino/op/concat.hpp" +#include "openvino/op/variadic_split.hpp" +#include "openvino/op/reshape.hpp" +#include "openvino/pass/manager.hpp" + +#include +#include "plugin/transformations/fc_horizontal_fusion.hpp" +#include "intel_gpu/op/placeholder.hpp" +#include "intel_gpu/op/fully_connected_compressed.hpp" + +using namespace testing; +using namespace ov::intel_gpu; + +namespace ov { +namespace test { +namespace intel_gpu { + +TEST_F(TransformationTestsF, FullyConnectedHorizontalFusion_no_bias_no_zp) { + std::vector pattern = {7, -1}; + { + auto input = std::make_shared(ov::element::f32, ov::PartialShape{-1, 7, 4096}); + auto weight1 = std::make_shared(ov::element::u4, ov::Shape{1024, 4096}); + weight1->set_friendly_name("weight1_1"); + auto weight2 = std::make_shared(ov::element::u4, ov::Shape{512, 4096}); + weight2->set_friendly_name("weight1_2"); + auto weight3 = std::make_shared(ov::element::u4, ov::Shape{128, 4096}); + weight3->set_friendly_name("weight1_3"); + auto bias1 = std::make_shared(); + auto bias2 = std::make_shared(); + auto bias3 = std::make_shared(); + auto scale1 = std::make_shared(ov::element::f16, ov::Shape{1024, 32}); + auto scale2 = std::make_shared(ov::element::f16, ov::Shape{512, 32}); + auto scale3 = std::make_shared(ov::element::f16, ov::Shape{128, 32}); + auto fc1 = std::make_shared(input, weight1, bias1, scale1); + fc1->set_friendly_name("fc1"); + auto fc2 = std::make_shared(input, weight2, bias2, scale2); + auto fc3 = std::make_shared(input, weight3, bias3, scale3); + auto reshape_pattern = std::make_shared(ov::element::i64, ov::Shape{2}, pattern); + auto reshape1 = std::make_shared(fc1, reshape_pattern, true); + auto reshape2 = std::make_shared(fc2, reshape_pattern, true); + auto reshape3 = std::make_shared(fc3, reshape_pattern, true); + auto result1 = std::make_shared(reshape1); + auto result2 = std::make_shared(reshape2); + auto result3 = std::make_shared(reshape3); + model = std::make_shared(ov::ResultVector{result1, result2, result3}, ov::ParameterVector{input}); + manager.register_pass(); + } + { + auto input = std::make_shared(ov::element::f32, ov::PartialShape{-1, 7, 4096}); + auto weight1 = std::make_shared(ov::element::u4, ov::Shape{1024, 4096}); + weight1->set_friendly_name("weight2_1"); + auto weight2 = std::make_shared(ov::element::u4, ov::Shape{512, 4096}); + weight2->set_friendly_name("weight2_2"); + auto weight3 = std::make_shared(ov::element::u4, ov::Shape{128, 4096}); + weight3->set_friendly_name("weight2_3"); + auto weights = ov::OutputVector{weight1, weight2, weight3}; + auto weight_fused = std::make_shared(weights, 0); + auto bias1 = std::make_shared(); + auto scale1 = std::make_shared(ov::element::f16, ov::Shape{1024, 32}); + auto scale2 = std::make_shared(ov::element::f16, ov::Shape{512, 32}); + auto scale3 = std::make_shared(ov::element::f16, ov::Shape{128, 32}); + auto scales = ov::OutputVector{scale1, scale2, scale3}; + auto scale_fused = std::make_shared(scales, 0); + auto fc_fused = std::make_shared(input, weight_fused, bias1, scale_fused); + auto axis_const = ov::op::v0::Constant::create(ov::element::i64, ov::Shape{1}, {fc_fused->get_output_partial_shape(0).size() - 1}); + std::vector orig_n_sizes = {1024, 512, 128}; + auto split_const = ov::op::v0::Constant::create(ov::element::i64, ov::Shape{3}, orig_n_sizes); + auto split = std::make_shared(fc_fused, axis_const, split_const); + auto reshape_pattern = std::make_shared(ov::element::i64, ov::Shape{2}, pattern); + auto reshape1 = std::make_shared(split->output(0), reshape_pattern, true); + auto reshape2 = std::make_shared(split->output(1), reshape_pattern, true); + auto reshape3 = std::make_shared(split->output(2), reshape_pattern, true); + auto result1 = std::make_shared(reshape1); + auto result2 = std::make_shared(reshape2); + auto result3 = std::make_shared(reshape3); + model_ref = std::make_shared(ov::ResultVector{result1, result2, result3}, ov::ParameterVector{input}); + comparator.enable(FunctionsComparator::ATTRIBUTES); + } +} + +TEST_F(TransformationTestsF, FullyConnectedHorizontalFusion_bias_zp) { + std::vector pattern = {7, -1}; + { + auto input = std::make_shared(ov::element::f32, ov::PartialShape{-1, 7, 4096}); + auto weight1 = std::make_shared(ov::element::u4, ov::Shape{1024, 4096}); + weight1->set_friendly_name("weight1_1"); + auto weight2 = std::make_shared(ov::element::u4, ov::Shape{512, 4096}); + weight2->set_friendly_name("weight1_2"); + auto weight3 = std::make_shared(ov::element::u4, ov::Shape{128, 4096}); + weight3->set_friendly_name("weight1_3"); + + auto bias1 = std::make_shared(ov::element::f16, ov::Shape{1024, 1}); + auto bias2 = std::make_shared(ov::element::f16, ov::Shape{512, 1}); + auto bias3 = std::make_shared(ov::element::f16, ov::Shape{128, 1}); + + auto scale1 = std::make_shared(ov::element::f16, ov::Shape{1024, 32}); + auto scale2 = std::make_shared(ov::element::f16, ov::Shape{512, 32}); + auto scale3 = std::make_shared(ov::element::f16, ov::Shape{128, 32}); + auto fc1 = std::make_shared(input, weight1, bias1, scale1); + fc1->set_friendly_name("fc1"); + auto fc2 = std::make_shared(input, weight2, bias2, scale2); + auto fc3 = std::make_shared(input, weight3, bias3, scale3); + auto reshape_pattern = std::make_shared(ov::element::i64, ov::Shape{2}, pattern); + auto reshape1 = std::make_shared(fc1, reshape_pattern, true); + auto reshape2 = std::make_shared(fc2, reshape_pattern, true); + auto reshape3 = std::make_shared(fc3, reshape_pattern, true); + auto result1 = std::make_shared(reshape1); + auto result2 = std::make_shared(reshape2); + auto result3 = std::make_shared(reshape3); + model = std::make_shared(ov::ResultVector{result1, result2, result3}, ov::ParameterVector{input}); + manager.register_pass(); + } + { + auto input = std::make_shared(ov::element::f32, ov::PartialShape{-1, 7, 4096}); + auto weight1 = std::make_shared(ov::element::u4, ov::Shape{1024, 4096}); + weight1->set_friendly_name("weight2_1"); + auto weight2 = std::make_shared(ov::element::u4, ov::Shape{512, 4096}); + weight2->set_friendly_name("weight2_2"); + auto weight3 = std::make_shared(ov::element::u4, ov::Shape{128, 4096}); + weight3->set_friendly_name("weight2_3"); + auto weights = ov::OutputVector{weight1, weight2, weight3}; + auto weight_fused = std::make_shared(weights, 0); + auto bias1 = std::make_shared(ov::element::f16, ov::Shape{1024, 1}); + auto bias2 = std::make_shared(ov::element::f16, ov::Shape{512, 1}); + auto bias3 = std::make_shared(ov::element::f16, ov::Shape{128, 1}); + auto biases = ov::OutputVector{bias1, bias2, bias3}; + auto bias_fused = std::make_shared(biases, 0); + auto scale1 = std::make_shared(ov::element::f16, ov::Shape{1024, 32}); + auto scale2 = std::make_shared(ov::element::f16, ov::Shape{512, 32}); + auto scale3 = std::make_shared(ov::element::f16, ov::Shape{128, 32}); + auto scales = ov::OutputVector{scale1, scale2, scale3}; + auto scale_fused = std::make_shared(scales, 0); + auto fc_fused = std::make_shared(input, weight_fused, bias_fused, scale_fused); + auto axis_const = ov::op::v0::Constant::create(ov::element::i64, ov::Shape{1}, {fc_fused->get_output_partial_shape(0).size() - 1}); + std::vector orig_n_sizes = {1024, 512, 128}; + auto split_const = ov::op::v0::Constant::create(ov::element::i64, ov::Shape{3}, orig_n_sizes); + auto split = std::make_shared(fc_fused, axis_const, split_const); + auto reshape_pattern = std::make_shared(ov::element::i64, ov::Shape{2}, pattern); + auto reshape1 = std::make_shared(split->output(0), reshape_pattern, true); + auto reshape2 = std::make_shared(split->output(1), reshape_pattern, true); + auto reshape3 = std::make_shared(split->output(2), reshape_pattern, true); + auto result1 = std::make_shared(reshape1); + auto result2 = std::make_shared(reshape2); + auto result3 = std::make_shared(reshape3); + model_ref = std::make_shared(ov::ResultVector{result1, result2, result3}, ov::ParameterVector{input}); + comparator.enable(FunctionsComparator::ATTRIBUTES); + } +} +} // namespace intel_gpu +} // namespace test +} // namespace ov \ No newline at end of file From a30f0a5c8b54cd4da27145b713d2cc5bc25bfd98 Mon Sep 17 00:00:00 2001 From: Wilson Seok Date: Wed, 3 Jul 2024 20:46:59 -0700 Subject: [PATCH 08/50] [GPU] Skip fc fake alignment when fused desc has full tensor input (#25249) ### Details: - Skip fc fake alignment when fused desc has full tensor input ### Tickets: - 144293 --- .../intel_gpu/src/graph/fully_connected.cpp | 16 +++++ .../fake_alignment/fc_fake_alignment_test.cpp | 66 +++++++++++++++++++ 2 files changed, 82 insertions(+) diff --git a/src/plugins/intel_gpu/src/graph/fully_connected.cpp b/src/plugins/intel_gpu/src/graph/fully_connected.cpp index 0d19e14a50ae93..23392dc9514772 100644 --- a/src/plugins/intel_gpu/src/graph/fully_connected.cpp +++ b/src/plugins/intel_gpu/src/graph/fully_connected.cpp @@ -188,6 +188,22 @@ kernel_impl_params fully_connected_inst::get_fake_aligned_params(kernel_impl_par can_apply_fake_alignment &= orig_output_layout.data_padding.lower_size().sizes()[1] == 0 && orig_output_layout.data_padding.upper_size().sizes()[1] == 0; + for (auto& fused_desc : orig_impl_param.fused_desc) { + if (fused_desc.has_outer_dep()) { + auto fused_op_input_layout = orig_impl_param.input_layouts[fused_desc.outer_dep_start_idx]; + // Check fused desc's input is still dynamic, then do not fake alignment + if (fused_op_input_layout.is_dynamic()) { + can_apply_fake_alignment = false; + break; + } + // Check fused desc's input has full tensor, then do not fake alignment + if (orig_output_layout.get_shape() == fused_op_input_layout.get_shape()) { + can_apply_fake_alignment = false; + break; + } + } + } + GPU_DEBUG_GET_INSTANCE(debug_config); GPU_DEBUG_IF(debug_config->disable_fake_alignment) { can_apply_fake_alignment = false; diff --git a/src/plugins/intel_gpu/tests/unit/fake_alignment/fc_fake_alignment_test.cpp b/src/plugins/intel_gpu/tests/unit/fake_alignment/fc_fake_alignment_test.cpp index 157676cd8bea6c..92f44ecd8b6cf1 100644 --- a/src/plugins/intel_gpu/tests/unit/fake_alignment/fc_fake_alignment_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/fake_alignment/fc_fake_alignment_test.cpp @@ -7,8 +7,11 @@ #include #include #include +#include +#include #include "fully_connected_inst.h" +#include "eltwise_inst.h" #include "program_wrapper.h" @@ -210,4 +213,67 @@ INSTANTIATE_TEST_SUITE_P(smoke, fully_connected_fake_align_test, }, })); +class fully_connected_skip_fake_align_test : public testing::TestWithParam {}; + +// Skip fake alignment when fused desc has full tensor dependency +TEST_P(fully_connected_skip_fake_align_test, skip_fake_alignment_case) { + auto p = GetParam(); + + auto& engine = get_test_engine(); + topology topology; + cldnn::program prog(engine); + + topology.add(input_layout("input", p.input_layout)); + topology.add(input_layout("eltwise_data1",p.input_layout)); + topology.add(eltwise("eltwise_add1", { input_info("input"), input_info("eltwise_data1") }, eltwise_mode::sum)); + + topology.add(input_layout("weights", p.weight_layout)); + topology.add(fully_connected("fc_prim1", input_info("eltwise_add1"), "weights", "", + cldnn::data_types::f32, padding(), p.input_layout.get_rank(), p.weight_layout.get_rank())); + + topology.add(input_layout("bias", + layout{ov::PartialShape{1, 1, p.expected_output_layout_igpu.get_dims()[2]}, cldnn::data_types::f32, cldnn::format::bfyx})); + topology.add(eltwise("bias_add", { input_info("fc_prim1"), input_info("bias") }, eltwise_mode::sum)); + + topology.add(input_layout("dequantize_scale", + layout{ov::PartialShape{1, 1, p.expected_output_layout_igpu.get_dims()[2]}, cldnn::data_types::f32, cldnn::format::bfyx})); + topology.add(eltwise("eltwise_multiply", { input_info("bias_add"), input_info("dequantize_scale") }, eltwise_mode::prod)); + + topology.add(input_layout("eltwise_data2", p.expected_output_layout_igpu)); + topology.add(eltwise("eltwise_add2", { input_info("eltwise_multiply"), input_info("eltwise_data2") }, eltwise_mode::sum)); + topology.add(permute("permute", input_info("eltwise_add2"), {2, 1, 0})); + + ExecutionConfig config = get_test_default_config(engine); + config.set_property(ov::intel_gpu::optimize_data(true)); + config.set_property(ov::intel_gpu::allow_new_shape_infer(true)); + network network(engine, topology, config); + + auto impl_param = network.get_primitive("fc_prim1")->get_impl_params(); + + if (impl_param->get_input_layout().is_dynamic() || impl_param->get_output_layout().is_dynamic()) { + EXPECT_THROW(fully_connected_inst::get_fake_aligned_params(*impl_param), std::exception); + } else { + auto updated_param = fully_connected_inst::get_fake_aligned_params(*impl_param); + if (!engine.get_device_info().supports_immad) { + ASSERT_EQ(updated_param.get_input_layout(), p.expected_input_layout_igpu); + ASSERT_EQ(updated_param.get_output_layout(), p.expected_output_layout_igpu); + } else { + ASSERT_EQ(updated_param.get_input_layout(), p.expected_input_layout_dgpu); + ASSERT_EQ(updated_param.get_output_layout(), p.expected_output_layout_dgpu); + } + } +} + +INSTANTIATE_TEST_SUITE_P(smoke, fully_connected_skip_fake_align_test, + testing::ValuesIn(std::vector{ + { + layout{ov::PartialShape{1, 1000, 2048}, data_types::u8, format::bfyx}, // input_layout + layout{ov::PartialShape{512, 2048}, data_types::i8, format::bfyx}, // weight layout + data_types::f32, + layout{ov::PartialShape{1, 1000, 2048}, data_types::u8, format::bfyx}, // skiped fake_aligned input layout_igpu + layout{ov::PartialShape{1, 1000, 512}, data_types::f32, format::bfyx}, // skipped fake_aligned output layout_igpu + layout{ov::PartialShape{1, 1000, 2048}, data_types::u8, format::bfyx}, // skipped fake_aligned input layout_dgpu + layout{ov::PartialShape{1, 1000, 512}, data_types::f32, format::bfyx} // skipped fake_aligned output layout_dgpu + }, + })); } // fake_alignment_tests From 01dc53ac2deb437c644b63a633e0a7780ddf848c Mon Sep 17 00:00:00 2001 From: Paul Youngsoo Ahn Date: Wed, 3 Jul 2024 23:10:36 -0700 Subject: [PATCH 09/50] [GPU] update shape for fused prims (#25363) ### Details: - *update shape for user when current node is the input of fused prim of user even if the user is updated shape by other node* - *...* ### Tickets: - *145756* --- .../intel_gpu/src/graph/primitive_inst.cpp | 45 ++++++++++++++----- 1 file changed, 34 insertions(+), 11 deletions(-) diff --git a/src/plugins/intel_gpu/src/graph/primitive_inst.cpp b/src/plugins/intel_gpu/src/graph/primitive_inst.cpp index 1e72002b56334e..d002972d19344b 100644 --- a/src/plugins/intel_gpu/src/graph/primitive_inst.cpp +++ b/src/plugins/intel_gpu/src/graph/primitive_inst.cpp @@ -586,19 +586,42 @@ event::ptr primitive_inst::realloc_if_needed() { user_insts.size(), " and ", user_insts_origin.size()); } for (auto user : user_insts) { + auto is_fused_prim_of_user = [&](primitive_id id) -> bool { + for (auto& p : user->get_node().get_fused_primitives()) { + if (p.has_outer_dep()) { + const auto start_idx = p.outer_dep_start_idx; + // exclude fused_node from total_num_deps + const auto end_idx = p.outer_dep_start_idx + p.total_num_deps -1; + for (size_t idx = start_idx; idx < end_idx; idx++) { + if (user->get_node().get_dependency(idx).id() == id) { + return true; + } + } + } + } + return false; + }; // Since fake alignment is applicable for input tensor as well, make sure we allocate enough memory // to prevent reading beyond the allocated memory bounds - if (user->get_node().is_type() && user->is_dynamic() && user->_deps[0].first == this) { - GPU_DEBUG_TRACE_DETAIL << "Check fc user " << user->id() << "'s fake alignment-ed input size" << std::endl; - user->update_shape(); - user->update_shape_done_by_other = true; - - auto fc_impl_params = *user->_impl_params; - auto fc_input_layout = user->get_node().type()->get_fake_aligned_params(fc_impl_params).input_layouts[0]; - if (fc_input_layout.bytes_count() > updated_layout.bytes_count()) { - GPU_DEBUG_TRACE_DETAIL << id() << ": increase output layout allocation size from " << actual_layout.to_short_string() << " -> " - << fc_input_layout.to_short_string() << " to meet the input buffer alignment requirements for FC\n"; - updated_layout = fc_input_layout; + if (user->get_node().is_type() && user->is_dynamic()) { + if (user->_deps[0].first == this) { + GPU_DEBUG_TRACE_DETAIL << "Check fc user " << user->id() << "'s fake alignment-ed input size" << std::endl; + user->update_shape(); + user->update_shape_done_by_other = true; + + auto fc_impl_params = *user->_impl_params; + auto fc_input_layout = user->get_node().type()->get_fake_aligned_params(fc_impl_params).input_layouts[0]; + if (fc_input_layout.bytes_count() > updated_layout.bytes_count()) { + GPU_DEBUG_TRACE_DETAIL << id() << ": increase output layout allocation size from " << actual_layout.to_short_string() << " -> " + << fc_input_layout.to_short_string() << " to meet the input buffer alignment requirements for FC\n"; + updated_layout = fc_input_layout; + } + } else if (is_fused_prim_of_user(id()) && user->update_shape_done_by_other) { + // Since the output layout of fused prim in user is determined after user's update_shape + // Rerun update_shape w/ new output layout of fused prim + user->update_shape_done_by_other = false; + user->update_shape(); + user->update_shape_done_by_other = true; } } } From 5e28c2ad920829a327f624ca23360ede80d4587d Mon Sep 17 00:00:00 2001 From: Andrei Gorbachev Date: Thu, 4 Jul 2024 08:29:22 +0100 Subject: [PATCH 10/50] [GPU] Fix CVS-145082 (#25340) ### Details: - Coverity fixes ### Tickets: - *CVS-145082* --------- Co-authored-by: Pavel Durandin --- .../include/intel_gpu/primitives/rope.hpp | 2 +- src/plugins/intel_gpu/src/graph/loop.cpp | 8 +++--- .../intel_gpu/src/graph/primitive_inst.cpp | 2 +- .../intel_gpu/src/graph/program_node.cpp | 4 +-- .../kernels/eltwise/eltwise_kernel_ref.cpp | 2 +- .../kernels/rope/rope_kernel_base.h | 26 +++++++++---------- .../src/plugin/transformations/op/sdpa.cpp | 4 ++- .../src/plugin/transformations_pipeline.cpp | 4 +-- 8 files changed, 28 insertions(+), 24 deletions(-) diff --git a/src/plugins/intel_gpu/include/intel_gpu/primitives/rope.hpp b/src/plugins/intel_gpu/include/intel_gpu/primitives/rope.hpp index 51bd0e3bfdfecd..6669a5110b1a68 100644 --- a/src/plugins/intel_gpu/include/intel_gpu/primitives/rope.hpp +++ b/src/plugins/intel_gpu/include/intel_gpu/primitives/rope.hpp @@ -30,7 +30,7 @@ struct rope : public primitive_base { gather_rank(gather_rank) {} RoPE::Config config; - size_t gather_rank; + size_t gather_rank = 0; size_t hash() const override { size_t seed = primitive::hash(); diff --git a/src/plugins/intel_gpu/src/graph/loop.cpp b/src/plugins/intel_gpu/src/graph/loop.cpp index 767ccce448e212..8a1b29b9c3409f 100644 --- a/src/plugins/intel_gpu/src/graph/loop.cpp +++ b/src/plugins/intel_gpu/src/graph/loop.cpp @@ -536,7 +536,8 @@ void loop_inst::preprocess_backedge_memory() { // in case where memory buffer has been over-allocated by shape predictor, memory layout might be unexpected shape. // so memory layout needs to be re-interprete according to original layout. auto initial_layout = get_external_output_layout(external_id.pid, external_id.idx); - if (initial_mem != nullptr && !initial_mem->get_layout().identical(initial_layout)) { + OPENVINO_ASSERT(initial_mem != nullptr, "initial_mem should not be null"); + if (!initial_mem->get_layout().identical(initial_layout)) { OPENVINO_ASSERT(initial_layout.bytes_count() <= initial_mem->get_layout().bytes_count(), "initial layout size(", initial_layout.to_short_string(), ") should not exceed initial memory size(", initial_mem->get_layout().to_short_string(), ")"); @@ -1067,7 +1068,7 @@ std::vector loop_inst::handle_buffers_for_next_iteration(const loop_ << ") to " << mapping.to_primitive->id() << ")" << std::endl; } } - } else if (mapping.type == loop_inst::backedge_memory_mapping::SINGLE) { + } else if (mapping.type == loop_inst::backedge_memory_mapping::SINGLE) { memory::ptr to_mem = mapping.to_primitive->output_memory_ptr(); if (is_dynamic()) { @@ -1079,7 +1080,8 @@ std::vector loop_inst::handle_buffers_for_next_iteration(const loop_ if (iter == 0) { auto to_id = mapping.to_primitive->id(); // Check backedge_to shape needs to be updated by initial_mem - if (mapping.initial_mem != nullptr && !mapping.initial_mem->get_layout().identical(to_mem->get_layout())) { + OPENVINO_ASSERT(mapping.initial_mem != nullptr, "initial_mem should not be null"); + if (!mapping.initial_mem->get_layout().identical(to_mem->get_layout())) { to_mem = body_network->get_engine().allocate_memory(mapping.initial_mem->get_layout(), false); body_network->set_input_data(to_id, to_mem); ev = to_mem->copy_from(body_network->get_stream(), *(mapping.initial_mem)); diff --git a/src/plugins/intel_gpu/src/graph/primitive_inst.cpp b/src/plugins/intel_gpu/src/graph/primitive_inst.cpp index d002972d19344b..3afcde70b1e5b9 100644 --- a/src/plugins/intel_gpu/src/graph/primitive_inst.cpp +++ b/src/plugins/intel_gpu/src/graph/primitive_inst.cpp @@ -1620,7 +1620,7 @@ primitive_inst::primitive_inst(network & network, program_node const& node, bool , _inputs_memory_count(node.get_inputs_count()) , _outputs_memory_count(node.get_outputs_count()) , _fused_mem_count(node.get_fused_inputs_count()) - , _fused_mem_offset((_fused_mem_count > 0 && node.has_fused_dep()) ? node.get_first_fused_dep_idx() : 0) + , _fused_mem_offset((_fused_mem_count > 0 && node.get_first_fused_dep_idx() > 0) ? node.get_first_fused_dep_idx() : 0) , _can_be_optimized(node.can_be_optimized()) , _can_share_buffer(node.can_share_buffer()) , _is_constant(node.is_constant()) diff --git a/src/plugins/intel_gpu/src/graph/program_node.cpp b/src/plugins/intel_gpu/src/graph/program_node.cpp index 95c5a70250d0bf..ac7f8d59d644cb 100644 --- a/src/plugins/intel_gpu/src/graph/program_node.cpp +++ b/src/plugins/intel_gpu/src/graph/program_node.cpp @@ -1434,7 +1434,7 @@ void program_node::create_onednn_primitive_attributes( if (impl_params != nullptr) { return impl_params->get_input_layout(idx); } else { - return get_dependency(idx).get_output_layout();; + return get_dependency(idx).get_output_layout(); } }; @@ -1474,7 +1474,7 @@ void program_node::create_onednn_primitive_attributes( oc_dim = static_cast(desc.output_layout.get_partial_shape()[1].get_max_length()); else oc_dim = static_cast(desc.output_layout.get_tensor().feature.size()); - post_ops.append_prelu(1 << oc_dim); + post_ops.append_prelu(1 << std::max(0, oc_dim)); update_onednn_post_op_list(onednn_post_op_type::binary_relu, dep_idx); } else if (fused_desc->activation_function == cldnn::activation_func::hard_sigmoid) { post_ops.append_eltwise(dnnl::algorithm::eltwise_hardsigmoid, fused_desc->additional_params.a, fused_desc->additional_params.b); diff --git a/src/plugins/intel_gpu/src/kernel_selector/kernels/eltwise/eltwise_kernel_ref.cpp b/src/plugins/intel_gpu/src/kernel_selector/kernels/eltwise/eltwise_kernel_ref.cpp index 2c4cb20af7699d..1f598759fb1580 100644 --- a/src/plugins/intel_gpu/src/kernel_selector/kernels/eltwise/eltwise_kernel_ref.cpp +++ b/src/plugins/intel_gpu/src/kernel_selector/kernels/eltwise/eltwise_kernel_ref.cpp @@ -59,7 +59,7 @@ JitConstants EltwiseKernelRef::GetJitConstants(const eltwise_params& params) con if (DataTensor::ChannelsCount(params.outputs[0].GetLayout()) == 4) { if (!params.layoutBased && !params.int8_quantization && !params.broadcast && !CheckInputsOutputNoPitchSameDims(params)) { auto calc_dim = [¶ms](Tensor::DataChannelName channel) { - size_t idx = DataTensor::Channelndex(params.outputs[0].GetLayout(), channel); + int idx = DataTensor::Channelndex(params.outputs[0].GetLayout(), channel); // We increment the index, because fusions dims ordering starts from one return "d" + std::to_string(idx + 1); }; diff --git a/src/plugins/intel_gpu/src/kernel_selector/kernels/rope/rope_kernel_base.h b/src/plugins/intel_gpu/src/kernel_selector/kernels/rope/rope_kernel_base.h index dde691bfb439de..5d55fd082765e8 100644 --- a/src/plugins/intel_gpu/src/kernel_selector/kernels/rope/rope_kernel_base.h +++ b/src/plugins/intel_gpu/src/kernel_selector/kernels/rope/rope_kernel_base.h @@ -12,19 +12,19 @@ namespace kernel_selector { /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// struct rope_params : public base_params { rope_params() : base_params(KernelType::ROPE) {} - size_t head_cnt; - size_t head_size; - size_t rotary_ndims; - - size_t slice_start; - size_t slice_stop; - size_t axis; - size_t num_of_inputs; - size_t gather_rank; - - bool is_qwen; - bool is_chatglm; - bool transposed_input; + size_t head_cnt = 0; + size_t head_size = 0; + size_t rotary_ndims = 0; + + size_t slice_start = 0; + size_t slice_stop = 0; + size_t axis = 0; + size_t num_of_inputs = 0; + size_t gather_rank = 0; + + bool is_qwen = false; + bool is_chatglm = false; + bool transposed_input = false; }; //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/src/plugins/intel_gpu/src/plugin/transformations/op/sdpa.cpp b/src/plugins/intel_gpu/src/plugin/transformations/op/sdpa.cpp index 1006ad280df4c5..57d2899e2b2e77 100644 --- a/src/plugins/intel_gpu/src/plugin/transformations/op/sdpa.cpp +++ b/src/plugins/intel_gpu/src/plugin/transformations/op/sdpa.cpp @@ -121,7 +121,9 @@ std::vector shape_infer(const SDPA* op, } OPENVINO_ASSERT(op != nullptr, "op should not be nullptr for shape_infer."); - auto out_shapes = ov::op::v13::shape_infer(dynamic_cast(op), transposed_input_shapes); + auto op_v13 = dynamic_cast(op); + OPENVINO_ASSERT(op_v13 != nullptr, "ov::op::v13::ScaledDotProductAttention*>(op) should not be nullptr."); + auto out_shapes = ov::op::v13::shape_infer(op_v13, transposed_input_shapes); if (order_out.size() > 0) { return { transpose_pshape(out_shapes[0], order_out) }; diff --git a/src/plugins/intel_gpu/src/plugin/transformations_pipeline.cpp b/src/plugins/intel_gpu/src/plugin/transformations_pipeline.cpp index a57f3da6e9765c..8c135a732decd4 100644 --- a/src/plugins/intel_gpu/src/plugin/transformations_pipeline.cpp +++ b/src/plugins/intel_gpu/src/plugin/transformations_pipeline.cpp @@ -337,12 +337,12 @@ void TransformationsPipeline::apply(std::shared_ptr func) { } // - The head size of all Q, K, and V inputs should be the same static value - if (query_ps[query_ps.size() - 1].is_dynamic() || key_ps[key_ps.size() - 1].is_dynamic() || value_ps[query_ps.size() - 1].is_dynamic()) { + if (query_ps[query_ps.size() - 1].is_dynamic() || key_ps[key_ps.size() - 1].is_dynamic() || value_ps[value_ps.size() - 1].is_dynamic()) { return false; } if (query_ps[query_ps.size() - 1].get_length() != key_ps[key_ps.size() - 1].get_length() || - query_ps[query_ps.size() - 1].get_length() != value_ps[query_ps.size() - 1].get_length()) { + query_ps[query_ps.size() - 1].get_length() != value_ps[value_ps.size() - 1].get_length()) { return false; } From ea7921cb2dbc73a6915ec2d78749074f895f087d Mon Sep 17 00:00:00 2001 From: Piotr Kowalczyk Date: Thu, 4 Jul 2024 09:43:34 +0200 Subject: [PATCH 11/50] [GPU][unittest]: Fixed mem leak in roi align rotated unittests. (#25356) ### Details: - Fixed mem leak in cl engine in roi align rotated test. ### Tickets: - *CVS-145836* --- .../test_cases/roi_align_rotated_gpu_test.cpp | 187 +++++++++--------- 1 file changed, 96 insertions(+), 91 deletions(-) diff --git a/src/plugins/intel_gpu/tests/unit/test_cases/roi_align_rotated_gpu_test.cpp b/src/plugins/intel_gpu/tests/unit/test_cases/roi_align_rotated_gpu_test.cpp index 036dd570b2ab84..bcd845046bdc7a 100644 --- a/src/plugins/intel_gpu/tests/unit/test_cases/roi_align_rotated_gpu_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/test_cases/roi_align_rotated_gpu_test.cpp @@ -15,79 +15,94 @@ namespace { constexpr float EPS = 2e-3f; +// Converts float vector to another type vector. +template +std::vector ConverFloatVector(const std::vector& vec) { + std::vector ret; + ret.reserve(vec.size()); + for (const auto& val : vec) { + ret.push_back(T(val)); + } + return ret; +} + // Allocates tensoer with given shape and data. template -memory::ptr AllocateTensor(ov::PartialShape shape, cldnn::format fmt, const std::vector& data) { - const layout lo = {shape, ov::element::from(), fmt}; +memory::ptr AllocateTensor(ov::PartialShape shape, const std::vector& data) { + const layout lo = {shape, ov::element::from(), cldnn::format::bfyx}; EXPECT_EQ(lo.get_linear_size(), data.size()); memory::ptr tensor = get_test_engine().allocate_memory(lo); set_values(tensor, data); return tensor; } -struct ROIAlignRotatedParams { +struct ROIAlignRotatedTestParams { + ov::PartialShape inputShape; int32_t pooledH; int32_t pooledW; float spatialScale; int32_t samplingRatio; bool clockwise; + std::vector input; + std::vector rois; + std::vector roiBatchIdxs; + std::vector expectedOutput; std::string testcaseName; - memory::ptr input; - memory::ptr rois; - memory::ptr roiBatchIdxs; - memory::ptr expectedOutput; }; -template -ROIAlignRotatedParams PrepareParams(const ov::PartialShape& inputShape, - int32_t pooledH, - int32_t pooledW, - float spatialScale, - int32_t samplingRatio, - bool clockwise, - const std::vector& inputValues, - const std::vector& roisVals, - const std::vector& roiBatchIdx, - const std::vector& expectedValues, - const std::string& testcaseName) { - ROIAlignRotatedParams ret; - - constexpr ov::Dimension::value_type rois_second_dim_size = 5; //< By definition of the ROIAlignRotated op - - const ov::Dimension::value_type numOfRois = roisVals.size() / rois_second_dim_size; - const ov::Dimension::value_type channels = static_cast(inputShape[1].get_length()); - - ret.pooledH = pooledH; - ret.pooledW = pooledW; - ret.spatialScale = spatialScale; - ret.samplingRatio = samplingRatio; - ret.clockwise = clockwise; - ret.testcaseName = testcaseName; - - ret.input = AllocateTensor(inputShape, cldnn::format::bfyx, inputValues); - ret.rois = AllocateTensor({numOfRois, rois_second_dim_size}, cldnn::format::bfyx, roisVals); - ret.roiBatchIdxs = AllocateTensor({numOfRois}, cldnn::format::bfyx, roiBatchIdx); - ret.expectedOutput = AllocateTensor({numOfRois, - channels, - static_cast(ret.pooledH), - static_cast(ret.pooledW)}, - cldnn::format::bfyx, - expectedValues); - - return ret; -} - -class roi_align_rotated_test : public ::testing::TestWithParam { +class roi_align_rotated_test : public ::testing::TestWithParam { public: - static std::string getTestCaseName(const testing::TestParamInfo& obj) { + static std::string getTestCaseName(const testing::TestParamInfo& obj) { auto param = obj.param; std::ostringstream result; - result << "_type_" << param.input->get_layout().data_type; result << "_" << param.testcaseName; return result.str(); } - void Execute(const ROIAlignRotatedParams& params) { + struct ROIAlignRotatedInferenceParams { + int32_t pooledH; + int32_t pooledW; + float spatialScale; + int32_t samplingRatio; + bool clockwise; + std::string testcaseName; + memory::ptr input; + memory::ptr rois; + memory::ptr roiBatchIdxs; + memory::ptr expectedOutput; + }; + + template + ROIAlignRotatedInferenceParams PrepareInferenceParams(const ROIAlignRotatedTestParams& testParam) { + using T = typename ov::element_type_traits::value_type; + ROIAlignRotatedInferenceParams ret; + + constexpr ov::Dimension::value_type rois_second_dim_size = 5; //< By definition of the ROIAlignRotated op + + const ov::Dimension::value_type numOfRois = testParam.rois.size() / rois_second_dim_size; + const ov::Dimension::value_type channels = + static_cast(testParam.inputShape[1].get_length()); + + ret.pooledH = testParam.pooledH; + ret.pooledW = testParam.pooledW; + ret.spatialScale = testParam.spatialScale; + ret.samplingRatio = testParam.samplingRatio; + ret.clockwise = testParam.clockwise; + ret.testcaseName = testParam.testcaseName; + + ret.input = AllocateTensor(testParam.inputShape, ConverFloatVector(testParam.input)); + ret.rois = AllocateTensor({numOfRois, rois_second_dim_size}, ConverFloatVector(testParam.rois)); + ret.roiBatchIdxs = AllocateTensor({numOfRois}, testParam.roiBatchIdxs); + ret.expectedOutput = AllocateTensor({numOfRois, + channels, + static_cast(ret.pooledH), + static_cast(ret.pooledW)}, + testParam.expectedOutput); + + return ret; + } + + void Execute(const ROIAlignRotatedInferenceParams& params) { // Prepare the network. auto stream = get_test_stream_ptr(get_test_default_config(engine_)); @@ -130,57 +145,47 @@ class roi_align_rotated_test : public ::testing::TestWithParam -std::vector generateParams() { - using T = typename ov::element_type_traits::value_type; - std::vector params; - // NOTE: expected output were generated using mmvc roi_align_rotated implementation. -#define TEST_DATA(input_shape, \ - pooled_height, \ - pooled_width, \ - spatial_scale, \ - sampling_ratio, \ - clockwise, \ - input_data, \ - rois_data, \ - batch_indices_data, \ - expected_output, \ - description) \ - params.push_back(PrepareParams(input_shape, \ - pooled_height, \ - pooled_width, \ - spatial_scale, \ - sampling_ratio, \ - clockwise, \ - input_data, \ - rois_data, \ - batch_indices_data, \ - expected_output, \ - description)); +std::vector generateTestParams() { + std::vector params; +#define TEST_DATA(input_shape, \ + pooled_height, \ + pooled_width, \ + spatial_scale, \ + sampling_ratio, \ + clockwise, \ + input_data, \ + rois_data, \ + batch_indices_data, \ + expected_output, \ + description) \ + params.push_back(ROIAlignRotatedTestParams{input_shape, \ + pooled_height, \ + pooled_width, \ + spatial_scale, \ + sampling_ratio, \ + clockwise, \ + input_data, \ + rois_data, \ + batch_indices_data, \ + expected_output, \ + description}); #include "unit_test_utils/tests_data/roi_align_rotated_data.h" #undef TEST_DATA return params; } -std::vector generateCombinedParams() { - const std::vector> generatedParams{generateParams(), - generateParams()}; +} // namespace - std::vector combinedParams; - for (const auto& params : generatedParams) { - combinedParams.insert(combinedParams.end(), params.begin(), params.end()); +#define ROI_ALIGN_ROTATED_TEST_P(precision) \ + TEST_P(roi_align_rotated_test, ref_comp_##precision) { \ + Execute(PrepareInferenceParams(GetParam())); \ } - return combinedParams; -} - -} // namespace -TEST_P(roi_align_rotated_test, ref_comp) { - Execute(GetParam()); -} +ROI_ALIGN_ROTATED_TEST_P(f16); +ROI_ALIGN_ROTATED_TEST_P(f32); INSTANTIATE_TEST_SUITE_P(roi_align_rotated_test_suit, roi_align_rotated_test, - testing::ValuesIn(generateCombinedParams()), + testing::ValuesIn(generateTestParams()), roi_align_rotated_test::getTestCaseName); From 977b42612c5128640e97c950726b7eaa74b1ae9e Mon Sep 17 00:00:00 2001 From: Ekaterina Aidova Date: Thu, 4 Jul 2024 11:52:01 +0400 Subject: [PATCH 12/50] [PT FE]: update layer norm for support non-static normalized shape (#25365) ### Details: - *aten::layer_norm support extended for support normalized shapes represented as non constant and multiple dims* tested with stable-audio model ### Tickets: - TBD --- src/frontends/pytorch/src/op/layer_norm.cpp | 17 +++--- .../pytorch_tests/test_layer_norm.py | 53 +++++++++++++++++++ 2 files changed, 64 insertions(+), 6 deletions(-) create mode 100644 tests/layer_tests/pytorch_tests/test_layer_norm.py diff --git a/src/frontends/pytorch/src/op/layer_norm.cpp b/src/frontends/pytorch/src/op/layer_norm.cpp index 8775d30440ca2f..79464fa2d6d609 100644 --- a/src/frontends/pytorch/src/op/layer_norm.cpp +++ b/src/frontends/pytorch/src/op/layer_norm.cpp @@ -7,6 +7,9 @@ #include "openvino/op/constant.hpp" #include "openvino/op/multiply.hpp" #include "openvino/op/mvn.hpp" +#include "openvino/op/range.hpp" +#include "openvino/op/shape_of.hpp" +#include "openvino/op/squeeze.hpp" #include "openvino/op/util/framework_node.hpp" #include "utils.hpp" @@ -20,12 +23,14 @@ using namespace ov::op; OutputVector translate_layer_norm(const NodeContext& context) { num_inputs_check(context, 5, 6); auto eps = context.const_input(4); - auto normalized_shape = context.const_input(1); - PYTORCH_OP_CONVERSION_CHECK(normalized_shape.size() == 1, - "Translation for aten::layer_norm supports only single normalized_shape value, " - "which means normalizing over the last dimension."); - // TODO: support any dimension - auto axes = context.mark_node(v0::Constant::create(element::i32, Shape{1}, {-1})); + auto normalized_shape = context.get_input(1); + auto num_axes = context.mark_node(std::make_shared(normalized_shape, element::i32)); + num_axes = context.mark_node(std::make_shared(num_axes)); + auto zero = context.mark_node(v0::Constant::create(element::i32, Shape{}, {0})); + auto minus_one = context.mark_node(v0::Constant::create(element::i32, Shape{}, {-1})); + auto axes_range = context.mark_node(std::make_shared(num_axes, zero, minus_one, element::i32)); + + auto axes = context.mark_node(std::make_shared(axes_range, minus_one)); auto out_node = context.mark_node(std::make_shared(context.get_input(0), axes, true, eps, MVNEpsMode::INSIDE_SQRT)); if (!context.input_is_none(2)) { diff --git a/tests/layer_tests/pytorch_tests/test_layer_norm.py b/tests/layer_tests/pytorch_tests/test_layer_norm.py new file mode 100644 index 00000000000000..3bba4a31dab0a4 --- /dev/null +++ b/tests/layer_tests/pytorch_tests/test_layer_norm.py @@ -0,0 +1,53 @@ +# Copyright (C) 2018-2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import pytest + +from pytorch_layer_test_class import PytorchLayerTest +import numpy as np + + +class TestLayerNorm(PytorchLayerTest): + def _prepare_input(self): + import numpy as np + return (np.random.randn(20, 5, 10, 10).astype(np.float32),) + + def create_model(self, normalized_shape, weight, bias, eps): + import torch + import torch.nn.functional as F + + if weight == "ones": + weight = torch.ones(normalized_shape) + + if weight == "random": + weight = torch.randn(normalized_shape) + + if bias == "zeros": + bias = torch.zeros(normalized_shape) + + if bias == "random": + bias = torch.randn(normalized_shape) + + class aten_layer_norm(torch.nn.Module): + def __init__(self, normalized_shape, weight, bias, eps): + super(aten_layer_norm, self).__init__() + self.normalized_shape = normalized_shape + self.weight = weight + self.bias = bias + self.eps = eps + + def forward(self, x): + return F.layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps) + + ref_net = None + + return aten_layer_norm(normalized_shape, weight, bias, eps), ref_net, "aten::layer_norm" + + @pytest.mark.parametrize("normalized_shape", [[10,], [10, 10], [5, 10, 10]]) + @pytest.mark.parametrize("weight", [None, "ones", "random"]) + @pytest.mark.parametrize("bias", [None, "zeros", "random"]) + @pytest.mark.parametrize("eps", [1e-5, 0.005]) + @pytest.mark.nightly + @pytest.mark.precommit + def test_layer_norm(self, normalized_shape, weight, bias, eps, ie_device, precision, ir_version): + self._test(*self.create_model(normalized_shape, weight, bias, eps), ie_device, precision, ir_version) \ No newline at end of file From e7b0072c7ec04d4ae73eae4b2a2a2ad26550b524 Mon Sep 17 00:00:00 2001 From: Andrzej Kopytko Date: Thu, 4 Jul 2024 11:16:03 +0200 Subject: [PATCH 13/50] [DOCS] port sidebar menu arrows to master (#25371) Port for: https://github.com/openvinotoolkit/openvino/pull/25370 --- docs/sphinx_setup/_static/css/custom.css | 5 ----- docs/sphinx_setup/_static/js/custom.js | 6 +++--- docs/sphinx_setup/_static/js/open_sidebar.js | 6 +++--- 3 files changed, 6 insertions(+), 11 deletions(-) diff --git a/docs/sphinx_setup/_static/css/custom.css b/docs/sphinx_setup/_static/css/custom.css index 8cf6521fe05a8a..1fb4cbaf0fd34a 100644 --- a/docs/sphinx_setup/_static/css/custom.css +++ b/docs/sphinx_setup/_static/css/custom.css @@ -163,11 +163,6 @@ a.nav-link:hover { color: white; } -li.toctree-l1.has-children > a.reference.internal { - font-weight: 700; - color: black; -} - .rotate { transform: rotate(270deg); } diff --git a/docs/sphinx_setup/_static/js/custom.js b/docs/sphinx_setup/_static/js/custom.js index f4992e465e72c1..80330bf5dab47c 100644 --- a/docs/sphinx_setup/_static/js/custom.js +++ b/docs/sphinx_setup/_static/js/custom.js @@ -20,11 +20,11 @@ document.addEventListener('DOMContentLoaded', function () { }); var parentElement = toggle.parentElement; - if (!parentElement - || !parentElement.parentElement + if (!parentElement.parentElement || !parentElement.parentElement.parentElement + || !parentElement.classList.contains('current') || !parentElement.parentElement.classList.contains('current') - || (parentElement.parentElement.classList.contains('current') && (parentElement.parentElement)) + || (parentElement.parentElement.classList.contains('current') && (!parentElement.parentElement)) ) { toggle.classList.add('rotate'); } diff --git a/docs/sphinx_setup/_static/js/open_sidebar.js b/docs/sphinx_setup/_static/js/open_sidebar.js index fa25b613748c46..6f01a7b2d49614 100644 --- a/docs/sphinx_setup/_static/js/open_sidebar.js +++ b/docs/sphinx_setup/_static/js/open_sidebar.js @@ -1,6 +1,6 @@ $(document).ready(function() { - const elems = $( "ul.bd-sidenav > li > input" ); - for(let i = 0; i < elems.length; i++){ - elems[i].setAttribute("checked", "checked"); + const labels = $( "ul.bd-sidenav > li > label" ); + for(let i = 0; i < labels.length; i++){ + labels[i].classList.remove("rotate"); } }) \ No newline at end of file From ce80952e96623bce5a55496a84546b39480cf210 Mon Sep 17 00:00:00 2001 From: Andrew Kwangwoong Park Date: Thu, 4 Jul 2024 18:27:35 +0900 Subject: [PATCH 14/50] [GPU] Convert multiple StridedSlices to single VariadicSplit after fused QKV FC (#25304) ### Details: - Convert QKV FC + StridedSlices pattern to QKV FC + VariadicSplit for potential optimization regarding crop runtime buffer fusing ### Tickets: - 144394 --- ...convert_stridedslices_to_variadicsplit.cpp | 150 ++++++++++++++ ...convert_stridedslices_to_variadicsplit.hpp | 19 ++ .../src/plugin/transformations_pipeline.cpp | 2 + ...rt_stridedslices_to_variadicsplit_test.cpp | 194 ++++++++++++++++++ 4 files changed, 365 insertions(+) create mode 100644 src/plugins/intel_gpu/src/plugin/transformations/convert_stridedslices_to_variadicsplit.cpp create mode 100644 src/plugins/intel_gpu/src/plugin/transformations/convert_stridedslices_to_variadicsplit.hpp create mode 100644 src/plugins/intel_gpu/tests/unit/transformations/convert_stridedslices_to_variadicsplit_test.cpp diff --git a/src/plugins/intel_gpu/src/plugin/transformations/convert_stridedslices_to_variadicsplit.cpp b/src/plugins/intel_gpu/src/plugin/transformations/convert_stridedslices_to_variadicsplit.cpp new file mode 100644 index 00000000000000..56dd9952c8acaf --- /dev/null +++ b/src/plugins/intel_gpu/src/plugin/transformations/convert_stridedslices_to_variadicsplit.cpp @@ -0,0 +1,150 @@ +// Copyright (C) 2024 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "convert_stridedslices_to_variadicsplit.hpp" + +#include "intel_gpu/op/fully_connected_compressed.hpp" + +#include "openvino/core/rt_info.hpp" +#include "openvino/core/validation_util.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/strided_slice.hpp" +#include "openvino/op/variadic_split.hpp" +#include "openvino/pass/pattern/op/or.hpp" +#include "openvino/pass/pattern/op/wrap_type.hpp" +#include "transformations/utils/utils.hpp" + +namespace ov { +namespace intel_gpu { + +ConvertStridedSlicesToVariadicSplit::ConvertStridedSlicesToVariadicSplit() { + using namespace ov::pass::pattern; + + auto fc_predicate = [](const ov::Output& output) -> bool { + const size_t num_users_to_fuse = 3; + const auto fc = ov::as_type_ptr(output.get_node_shared_ptr()); + size_t user_count = 0; + for (const auto& user : fc ->get_users()) { + const auto strided_slice = ov::as_type_ptr(user); + if (!strided_slice) + return false; + user_count++; + } + return (user_count == num_users_to_fuse) && consumers_count(num_users_to_fuse); + }; + + auto data_m = any_input(); + auto weights_m = any_input(); + auto bias_m = any_input(); + auto fully_connected_compressed_m = wrap_type({data_m, weights_m, bias_m, any_input(), any_input()}, fc_predicate); + + ov::matcher_pass_callback callback = [=](Matcher& m) { + if (transformation_callback(m.get_match_root())) { + return false; + } + auto fc = std::dynamic_pointer_cast(m.get_match_root()); + ov::NodeVector strided_slice_nodes; + std::vector split_lengths; + int64_t begin_offset = 0; + int64_t end_offset = 0; + for (const auto& user : fc->get_users()) { + const auto strided_slice_node = std::dynamic_pointer_cast(user); + if (strided_slice_node) { + auto valid_ps = [](const ov::PartialShape& shape) -> bool { + return shape.rank().is_static() && shape[shape.rank().get_length() - 1].is_static(); + }; + const auto& input_ps = strided_slice_node->get_input_partial_shape(0); + const auto& output_ps = strided_slice_node->get_output_partial_shape(0); + if (!valid_ps(input_ps) || !valid_ps(output_ps) || input_ps.rank().get_length() != output_ps.rank().get_length()) + return false; + + auto& total_length = input_ps[input_ps.rank().get_length() - 1]; + auto& split_length = output_ps[output_ps.rank().get_length() - 1]; + if (total_length.get_length() / 3 != split_length.get_length()) + return false; + + split_lengths.push_back(split_length.get_length()); + + if (!strided_slice_node->get_shrink_axis_mask().empty() || + !strided_slice_node->get_new_axis_mask().empty() || + !strided_slice_node->get_ellipsis_mask().empty()) { + return false; + } + + if (strided_slice_node->get_input_size() == 4 && + !ov::op::util::is_constant_and_all_values_equal_int(strided_slice_node->input_value(3), 1)) { + return false; + } + + end_offset += split_length.get_length(); + auto check_mask = [](const std::vector& mask_to_check) -> bool { + if (mask_to_check.back() != 0) + return false; + for (size_t i = 0; i < mask_to_check.size() - 1; ++i) { + if (!mask_to_check[i]) + return false; + } + return true; + }; + auto begin_node = strided_slice_node->get_input_node_shared_ptr(1); + if (const auto& begin_constant_node = ov::util::get_constant_from_source(begin_node)) { + auto values = begin_constant_node->cast_vector(); + auto begin_mask = strided_slice_node->get_begin_mask(); + if (values.size() != begin_mask.size()) + return false; + if (!check_mask(begin_mask)) + return false; + if (values.back() != begin_offset) + return false; + } else { + return false; + } + + auto end_node = strided_slice_node->get_input_node_shared_ptr(2); + if (const auto& end_constant_node = ov::util::get_constant_from_source(end_node)) { + int64_t max_value = end_node->get_element_type() == ov::element::i32 ? std::numeric_limits::max() + : std::numeric_limits::max(); + auto values = end_constant_node->cast_vector(); + auto end_mask = strided_slice_node->get_end_mask(); + if (values.size() != end_mask.size()) + return false; + if (!check_mask(end_mask)) + return false; + if (!((values.back() == end_offset) || (values.back() == max_value))) + return false; + } else { + return false; + } + begin_offset += split_length.get_length(); + strided_slice_nodes.push_back(strided_slice_node); + } + } + auto name = fc->get_friendly_name() + "_split"; + auto axis_const = ov::op::v0::Constant::create(ov::element::i64, ov::Shape{1}, {fc->get_output_partial_shape(0).rank().get_length()- 1}); + auto split_lenghts_const = ov::op::v0::Constant::create(ov::element::i64, ov::Shape{3}, split_lengths); + auto variadic_split = std::make_shared(fc, axis_const, split_lenghts_const); + variadic_split->set_friendly_name(name); + ov::copy_runtime_info(strided_slice_nodes, variadic_split); + + for (size_t i = 0; i < strided_slice_nodes.size(); ++i) { + auto& strided_slice_node = strided_slice_nodes[i]; + for (const auto& user : strided_slice_node->get_users()) { + for (size_t idx = 0; idx < user->inputs().size(); ++idx) { + if (user->get_input_node_shared_ptr(idx) == strided_slice_node) { + user->input(idx).replace_source_output(variadic_split->output(i)); + } + } + } + strided_slice_node->clear_control_dependencies(); + } + + return true; + }; + + auto m = std::make_shared(fully_connected_compressed_m, "ConvertStridedSlicesToVariadicSplit"); + this->register_matcher(m, callback); +} + +} // namespace intel_gpu +} // namespace ov diff --git a/src/plugins/intel_gpu/src/plugin/transformations/convert_stridedslices_to_variadicsplit.hpp b/src/plugins/intel_gpu/src/plugin/transformations/convert_stridedslices_to_variadicsplit.hpp new file mode 100644 index 00000000000000..83162e36b4c0ae --- /dev/null +++ b/src/plugins/intel_gpu/src/plugin/transformations/convert_stridedslices_to_variadicsplit.hpp @@ -0,0 +1,19 @@ +// Copyright (C) 2024 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/pass/graph_rewrite.hpp" + +namespace ov { +namespace intel_gpu { + +class ConvertStridedSlicesToVariadicSplit : public ov::pass::MatcherPass { +public: + OPENVINO_RTTI("ConvertStridedSlicesToVariadicSplit", "0"); + ConvertStridedSlicesToVariadicSplit(); +}; + +} // namespace intel_gpu +} // namespace ov diff --git a/src/plugins/intel_gpu/src/plugin/transformations_pipeline.cpp b/src/plugins/intel_gpu/src/plugin/transformations_pipeline.cpp index 8c135a732decd4..c02b14fc5b894c 100644 --- a/src/plugins/intel_gpu/src/plugin/transformations_pipeline.cpp +++ b/src/plugins/intel_gpu/src/plugin/transformations_pipeline.cpp @@ -57,6 +57,7 @@ #include "plugin/transformations/clamp_fp16_output.hpp" #include "plugin/transformations/convert_fc_to_compressed.hpp" #include "plugin/transformations/convert_matmul_to_fc.hpp" +#include "plugin/transformations/convert_stridedslices_to_variadicsplit.hpp" #include "plugin/transformations/fc_convert_fusion.hpp" #include "plugin/transformations/fc_horizontal_fusion.hpp" #include "plugin/transformations/kv_cache_fusion.hpp" @@ -831,6 +832,7 @@ void TransformationsPipeline::apply(std::shared_ptr func) { manager.register_pass(); manager.register_pass(); manager.register_pass(); + manager.register_pass(); const size_t zp_pad_size = device_info.supports_immad ? 16 : 32; manager.register_pass(zp_pad_size); diff --git a/src/plugins/intel_gpu/tests/unit/transformations/convert_stridedslices_to_variadicsplit_test.cpp b/src/plugins/intel_gpu/tests/unit/transformations/convert_stridedslices_to_variadicsplit_test.cpp new file mode 100644 index 00000000000000..1a245547e490a3 --- /dev/null +++ b/src/plugins/intel_gpu/tests/unit/transformations/convert_stridedslices_to_variadicsplit_test.cpp @@ -0,0 +1,194 @@ +// Copyright (C) 2024 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "common_test_utils/ov_test_utils.hpp" + +#include "openvino/core/model.hpp" +#include "openvino/pass/manager.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/parameter.hpp" +#include "openvino/op/reshape.hpp" +#include "openvino/op/result.hpp" +#include "openvino/op/strided_slice.hpp" +#include "openvino/op/variadic_split.hpp" + +#include "intel_gpu/op/placeholder.hpp" +#include "intel_gpu/op/fully_connected_compressed.hpp" + +#include "plugin/transformations/convert_stridedslices_to_variadicsplit.hpp" + +using namespace testing; +using namespace ov::intel_gpu; + +namespace ov { +namespace test { +namespace intel_gpu { + +TEST_F(TransformationTestsF, ConvertStridedSlicesToVariadicSplit1) { + { + auto input = std::make_shared(ov::element::f32, ov::PartialShape{-1, -1, 3072}); + auto weight_const = std::make_shared(ov::element::u4, ov::Shape{9216, 3072}); + auto no_bias = std::make_shared(); + auto scale_const = std::make_shared(ov::element::f16, ov::Shape{9216, 24}); + auto zp_const = std::make_shared(ov::element::f16, ov::Shape{1, 1}); + auto fc_compressed = std::make_shared(input, weight_const, no_bias, scale_const, zp_const); + auto begin_const1 = ov::op::v0::Constant::create(ov::element::i32, ov::Shape{3}, {0, 0, 0}); + auto end_const1 = ov::op::v0::Constant::create(ov::element::i32, ov::Shape{3}, {0, 0, 3072}); + auto strides_const1 = ov::op::v0::Constant::create(ov::element::i32, ov::Shape{3}, {1, 1, 1}); + auto strided_slice1 = std::make_shared(fc_compressed, + begin_const1, + end_const1, + strides_const1, + std::vector{1, 1, 0}, + std::vector{1, 1, 0}); + auto begin_const2 = ov::op::v0::Constant::create(ov::element::i32, ov::Shape{3}, {0, 0, 3072}); + auto end_const2= ov::op::v0::Constant::create(ov::element::i32, ov::Shape{3}, {0, 0, 6144}); + auto strides_const2 = ov::op::v0::Constant::create(ov::element::i32, ov::Shape{3}, {1, 1, 1}); + auto strided_slice2 = std::make_shared(fc_compressed, + begin_const2, + end_const2, + strides_const2, + std::vector{1, 1, 0}, + std::vector{1, 1, 0}); + auto begin_const3 = ov::op::v0::Constant::create(ov::element::i32, ov::Shape{3}, {0, 0, 6144}); + auto end_const3 = ov::op::v0::Constant::create(ov::element::i32, ov::Shape{3}, {0, 0, 9216}); + auto strides_const3 = ov::op::v0::Constant::create(ov::element::i32, ov::Shape{3}, {1, 1, 1}); + auto strided_slice3 = std::make_shared(fc_compressed, + begin_const3, + end_const3, + strides_const3, + std::vector{1, 1, 0}, + std::vector{1, 1, 0}); + auto shape_const = ov::op::v0::Constant::create(ov::element::i32, ov::Shape{4}, {0, 0, 32, 96}); + auto reshape1 = std::make_shared(strided_slice1, shape_const, true); + auto reshape2 = std::make_shared(strided_slice2, shape_const, true); + auto reshape3 = std::make_shared(strided_slice3, shape_const, true); + auto result1 = std::make_shared(reshape1); + auto result2 = std::make_shared(reshape2); + auto result3 = std::make_shared(reshape3); + + model = std::make_shared(ov::ResultVector{ result1, result2, result3 }, ov::ParameterVector{ input }); + manager.register_pass(); + } + { + auto input = std::make_shared(ov::element::f32, ov::PartialShape{-1, -1, 3072}); + auto weight_const = std::make_shared(ov::element::u4, ov::Shape{9216, 3072}); + auto no_bias = std::make_shared(); + auto scale_const = std::make_shared(ov::element::f16, ov::Shape{9216, 24}); + auto zp_const = std::make_shared(ov::element::f16, ov::Shape{1, 1}); + auto fc_compressed = std::make_shared(input, weight_const, no_bias, scale_const, zp_const); + auto axis_const = ov::op::v0::Constant::create(ov::element::i64, ov::Shape{1}, {2}); + auto split_lengths_const = ov::op::v0::Constant::create(ov::element::i64, ov::Shape{3}, std::vector{3072, 3072, 3072}); + auto variadic_split = std::make_shared(fc_compressed, axis_const, split_lengths_const); + auto shape_const = ov::op::v0::Constant::create(ov::element::i32, ov::Shape{4}, {0, 0, 32, 96}); + auto reshape1 = std::make_shared(variadic_split->output(0), shape_const, true); + auto reshape2 = std::make_shared(variadic_split->output(1), shape_const, true); + auto reshape3 = std::make_shared(variadic_split->output(2), shape_const, true); + auto result1 = std::make_shared(reshape1); + auto result2 = std::make_shared(reshape2); + auto result3 = std::make_shared(reshape3); + + model_ref = std::make_shared(ov::ResultVector{ result1, result2, result3 }, ov::ParameterVector{ input }); + comparator.enable(FunctionsComparator::ATTRIBUTES); + } +} + +TEST_F(TransformationTestsF, ConvertStridedSlicesToVariadicSplit2) { + { + auto input = std::make_shared(ov::element::f32, ov::PartialShape{-1, -1, 3072}); + auto weight_const = std::make_shared(ov::element::u4, ov::Shape{9216, 3072}); + auto no_bias = std::make_shared(); + auto scale_const = std::make_shared(ov::element::f16, ov::Shape{9216, 24}); + auto zp_const = std::make_shared(ov::element::f16, ov::Shape{1, 1}); + auto fc_compressed = std::make_shared(input, weight_const, no_bias, scale_const, zp_const); + auto begin_const1 = ov::op::v0::Constant::create(ov::element::i32, ov::Shape{3}, {0, 0, 0}); + auto end_const1 = ov::op::v0::Constant::create(ov::element::i32, ov::Shape{3}, {0, 2, 3072}); + auto strides_const1 = ov::op::v0::Constant::create(ov::element::i32, ov::Shape{3}, {1, 1, 1}); + auto strided_slice1 = std::make_shared(fc_compressed, + begin_const1, + end_const1, + strides_const1, + std::vector{1, 0, 0}, + std::vector{1, 0, 0}); + auto begin_const2 = ov::op::v0::Constant::create(ov::element::i32, ov::Shape{3}, {0, 2, 3072}); + auto end_const2= ov::op::v0::Constant::create(ov::element::i32, ov::Shape{3}, {0, 4, 6144}); + auto strides_const2 = ov::op::v0::Constant::create(ov::element::i32, ov::Shape{3}, {1, 1, 1}); + auto strided_slice2 = std::make_shared(fc_compressed, + begin_const2, + end_const2, + strides_const2, + std::vector{1, 0, 0}, + std::vector{1, 0, 0}); + auto begin_const3 = ov::op::v0::Constant::create(ov::element::i32, ov::Shape{3}, {0, 4, 6144}); + auto end_const3 = ov::op::v0::Constant::create(ov::element::i32, ov::Shape{3}, {0, 7, 9216}); + auto strides_const3 = ov::op::v0::Constant::create(ov::element::i32, ov::Shape{3}, {1, 1, 1}); + auto strided_slice3 = std::make_shared(fc_compressed, + begin_const3, + end_const3, + strides_const3, + std::vector{1, 0, 0}, + std::vector{1, 0, 0}); + auto shape_const = ov::op::v0::Constant::create(ov::element::i32, ov::Shape{4}, {0, 0, 32, 96}); + auto reshape1 = std::make_shared(strided_slice1, shape_const, true); + auto reshape2 = std::make_shared(strided_slice2, shape_const, true); + auto reshape3 = std::make_shared(strided_slice3, shape_const, true); + auto result1 = std::make_shared(reshape1); + auto result2 = std::make_shared(reshape2); + auto result3 = std::make_shared(reshape3); + + model = std::make_shared(ov::ResultVector{ result1, result2, result3 }, ov::ParameterVector{ input }); + manager.register_pass(); + } + { + model_ref = model->clone(); + comparator.enable(FunctionsComparator::ATTRIBUTES); + } +} + +TEST_F(TransformationTestsF, ConvertStridedSlicesToVariadicSplit3) { + { + auto input = std::make_shared(ov::element::f32, ov::PartialShape{-1, -1, 3072}); + auto weight_const = std::make_shared(ov::element::u4, ov::Shape{9216, 3072}); + auto no_bias = std::make_shared(); + auto scale_const = std::make_shared(ov::element::f16, ov::Shape{9216, 24}); + auto zp_const = std::make_shared(ov::element::f16, ov::Shape{1, 1}); + auto fc_compressed = std::make_shared(input, weight_const, no_bias, scale_const, zp_const); + auto begin_const1 = ov::op::v0::Constant::create(ov::element::i32, ov::Shape{3}, {0, 0, 0}); + auto end_const1 = ov::op::v0::Constant::create(ov::element::i32, ov::Shape{3}, {0, 0, 3072}); + auto strides_const1 = ov::op::v0::Constant::create(ov::element::i32, ov::Shape{3}, {1, 1, 1}); + auto strided_slice1 = std::make_shared(fc_compressed, + begin_const1, + end_const1, + strides_const1, + std::vector{1, 1, 0}, + std::vector{1, 1, 0}); + auto begin_const2 = ov::op::v0::Constant::create(ov::element::i32, ov::Shape{3}, {0, 0, 3072}); + auto end_const2= ov::op::v0::Constant::create(ov::element::i32, ov::Shape{3}, {0, 0, 6144}); + auto strides_const2 = ov::op::v0::Constant::create(ov::element::i32, ov::Shape{3}, {1, 1, 1}); + auto strided_slice2 = std::make_shared(fc_compressed, + begin_const2, + end_const2, + strides_const2, + std::vector{1, 1, 0}, + std::vector{1, 1, 0}); + auto shape_const = ov::op::v0::Constant::create(ov::element::i32, ov::Shape{4}, {0, 0, 32, 96}); + auto reshape1 = std::make_shared(strided_slice1, shape_const, true); + auto reshape2 = std::make_shared(strided_slice2, shape_const, true); + auto result1 = std::make_shared(reshape1); + auto result2 = std::make_shared(reshape2); + auto result3 = std::make_shared(fc_compressed); + + model = std::make_shared(ov::ResultVector{ result1, result2, result3 }, ov::ParameterVector{ input }); + manager.register_pass(); + } + { + model_ref = model->clone(); + comparator.enable(FunctionsComparator::ATTRIBUTES); + } +} + + +} // namespace intel_gpu +} // namespace test +} // namespace ov From 82af474ee4b33a4cadd2815567951ea316cb4b93 Mon Sep 17 00:00:00 2001 From: Tomasz Jankowski Date: Thu, 4 Jul 2024 11:43:51 +0200 Subject: [PATCH 15/50] [Templ test] Tan: Enable whole Tensor comparison (#25320) ### Details: - Enabled whole Tensor comparison ### Tickets: - CVS-137211 --- src/plugins/template/tests/functional/op_reference/tan.cpp | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/src/plugins/template/tests/functional/op_reference/tan.cpp b/src/plugins/template/tests/functional/op_reference/tan.cpp index 7b54f9161dca1e..3188c8665e3395 100644 --- a/src/plugins/template/tests/functional/op_reference/tan.cpp +++ b/src/plugins/template/tests/functional/op_reference/tan.cpp @@ -33,14 +33,13 @@ struct TanParams { class ReferenceTanLayerTest : public testing::TestWithParam, public CommonReferenceTest { public: void SetUp() override { - legacy_compare = true; - auto params = GetParam(); + const auto& params = GetParam(); function = CreateFunction(params.pshape, params.inType); inputData = {params.inputData}; refOutData = {params.refData}; } static std::string getTestCaseName(const testing::TestParamInfo& obj) { - auto param = obj.param; + const auto& param = obj.param; std::ostringstream result; result << "shape=" << param.pshape << "_"; result << "iType=" << param.inType << "_"; From c89db83cf141c6de721b3d43b90f5772cbc3cad8 Mon Sep 17 00:00:00 2001 From: Maksim Kutakov Date: Thu, 4 Jul 2024 11:53:23 +0200 Subject: [PATCH 16/50] [CPU] Add Clamp before Convert when bool is replaced with u8 (#25253) ### Details: CPU plugin doesn't natively support `boolean` data type, thus it's replaced with `u8` during the convert precision transformation pass. However, casting numerical types to `bool` implies clamping the numerical values with the interval [0; 1] (either true or false), so to mimic this behavior a Clamp operation should be inserted before the modified convert. ### Tickets: - CVS-145166 --- .../transformation_pipeline.cpp | 42 ++++++------ .../src/common/convert_bool_math.cpp | 64 +++++++++++++++++++ 2 files changed, 85 insertions(+), 21 deletions(-) create mode 100644 src/plugins/intel_cpu/tests/functional/custom/subgraph_tests/src/common/convert_bool_math.cpp diff --git a/src/plugins/intel_cpu/src/transformations/transformation_pipeline.cpp b/src/plugins/intel_cpu/src/transformations/transformation_pipeline.cpp index bd3397245f5a26..01a1ac3e7b47fa 100644 --- a/src/plugins/intel_cpu/src/transformations/transformation_pipeline.cpp +++ b/src/plugins/intel_cpu/src/transformations/transformation_pipeline.cpp @@ -235,37 +235,37 @@ bool Transformations::fuse_type_to_convert(const std::shared_ptr& node return false; const auto& to = it->second; - // For Convert node, converting precision from floating point to boolean will lead to mathematical - // error, because here the output precision boolean is replaced by u8: - // - floating point value 0.01 is converted to be 1 for boolean, but 0 for u8 - need to insert Ceil. - // - floating point value 256 is converted to be 1 for boolean, but 0 for u8 - need to insert Min(x, UINT8_MAX) - // - floating point value -256 is converted to be 1 for boolean, but 0 for u8 - need to insert Abs before Min. - // Thus an Abs, Ceil and Min nodes should be added before the Convert node for this scenario. - if (convert->input(0).get_element_type().is_real() && - convert->get_convert_element_type() == ov::element::boolean && to.is_integral_number()) { + if (convert->get_convert_element_type() == ov::element::boolean && to.is_integral_number()) { + // For Convert node, converting precision from numerical data types to boolean will lead to mathematical + // error, because here the output precision boolean is replaced by u8: + // - floating point value 0.01 is converted to be 1 for boolean, but 0 for u8 - need to insert Ceil. + // - either float or int values should be clipped with the interval [0; 1] to mimic bool cast behavior, i.e. + // 0 - is false, 1 - is true + // - to perform clamping correctly an Abs op should be inserted before Clamp + // Thus an Abs, Ceil and Clamp nodes should be added before the Convert node for this scenario. ov::pass::NodeRegistry reg; const auto& in_prec = convert->get_input_element_type(0); - auto data = convert->input_value(0).get_node_shared_ptr(); + auto parent_node = convert->input_value(0).get_node_shared_ptr(); auto item = precisions.find(in_prec); if (item != precisions.end()) { - // Add convert node for unsupported precision, such as FP64 - data = reg.make(data, item->second); + // Add convert node for unsupported precision, such as FP64 or INT64 + parent_node = reg.make(parent_node, item->second); } - const auto abs = reg.make(data); - const auto to_max_value = reg.make(ov::util::make_tensor_of_max_value(to)); - const auto to_max_convert = reg.make(to_max_value, abs->get_output_element_type(0)); - const auto min = reg.make(abs, to_max_convert); - const auto ceil = reg.make(min); - const auto new_convert = reg.make(ceil, to); + if (in_prec.is_signed()) { + parent_node = reg.make(parent_node); + } + if (in_prec.is_real()) { + parent_node = reg.make(parent_node); + } + parent_node = reg.make(parent_node, 0, 1); + const auto new_convert = reg.make(parent_node, to); new_convert->set_friendly_name(convert->get_friendly_name()); ov::copy_runtime_info(convert, reg.get()); ov::replace_node(convert, new_convert); return true; - } else { - convert->set_convert_element_type(to); - return true; } - return false; + convert->set_convert_element_type(to); + return true; } void Transformations::UpToLpt() { diff --git a/src/plugins/intel_cpu/tests/functional/custom/subgraph_tests/src/common/convert_bool_math.cpp b/src/plugins/intel_cpu/tests/functional/custom/subgraph_tests/src/common/convert_bool_math.cpp new file mode 100644 index 00000000000000..b3f08e11624f1b --- /dev/null +++ b/src/plugins/intel_cpu/tests/functional/custom/subgraph_tests/src/common/convert_bool_math.cpp @@ -0,0 +1,64 @@ +// Copyright (C) 2018-2024 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 + +#include "shared_test_classes/base/ov_subgraph.hpp" +#include "common_test_utils/ov_tensor_utils.hpp" + +namespace ov { +namespace test { +// ┌────────┐ +// │ Param │ +// └───┬────┘ +// │ f32 +// │ +// ┌───┴────┐ +// │Convert │ +// └───┬────┘ +// │ bool +// │ +// ┌───┴────┐ +// │Reshape │ +// └───┬────┘ +// │ bool +// │ +// ┌───┴────┐ ┌────────┐ +// │Convert │ │ Param │ +// └───┬────┘ └───┬────┘ +// │ f32 │ f32 +// │ │ +// │ ┌────────┐ │ +// └─────┤ Add ├───┘ +// └───┬────┘ +// │ f32 +// │ +// ┌───┴────┐ +// │Reshape │ +// └────────┘ + +class ConvertBoolMathTest : public SubgraphBaseStaticTest { +public: + void SetUp() override { + targetDevice = ov::test::utils::DEVICE_CPU; + + ov::ParameterVector inputParams{std::make_shared(ov::element::f32, ov::Shape{24, 7}), + std::make_shared(ov::element::f32, ov::Shape{3, 8, 7})}; + + auto inputConvert = std::make_shared(inputParams.front(), ov::element::boolean); + + auto reshapeConst = ov::opset10::Constant::create(ov::element::i32, ov::Shape{3}, {3, 8, 7}); + auto reshape = std::make_shared(inputConvert, reshapeConst, false); + + auto secondConvert = std::make_shared(reshape, ov::element::f32); + auto add = std::make_shared(secondConvert, inputParams.back()); + + ov::ResultVector results{std::make_shared(add)}; + function = std::make_shared(results, inputParams, "ConvertBoolMath"); + } +}; + +TEST_F(ConvertBoolMathTest, smoke_CompareWithRefs) { + run(); +} + +} // namespace test +} // namespace ov \ No newline at end of file From a0c6eb6a234f9304520ffd2402bcd4ebcdc901af Mon Sep 17 00:00:00 2001 From: Sofya Balandina Date: Thu, 4 Jul 2024 11:02:00 +0100 Subject: [PATCH 17/50] Move gtest assert to ov_assert (#24933) ### Details: - *item1* - *...* ### Tickets: - *CVS-136937* --- ...sformations_after_split_transformation.cpp | 2 +- .../snippets/tests/src/lowering_utils.cpp | 2 +- .../conv_to_binary_conv.cpp | 10 +- ...onvert_compression_only_to_legacy_test.cpp | 4 +- .../convert_convertlike.cpp | 6 +- ...nvert_nms_gather_path_to_unsigned_test.cpp | 4 +- .../convert_quantize_dequantize.cpp | 4 +- .../dimension_tracking.cpp | 18 ++-- .../common_optimizations/divide_fusion.cpp | 4 +- .../fq_mul_fusion_test.cpp | 14 +-- .../fq_reshape_fusion.cpp | 2 +- .../fused_names_cleanup.cpp | 2 +- .../low_latency_v2_test.cpp | 16 +-- .../reduce_reshape_fusion_tests.cpp | 2 +- .../shuffle_channels_fusion_test.cpp | 2 +- .../common_optimizations/softmax_fusion.cpp | 6 +- .../common_optimizations/subtract_fusion.cpp | 6 +- .../transpose_sinking_test.cpp | 4 +- .../transpose_to_reshape_test.cpp | 2 +- .../weights_dequantize_to_fake_quantize.cpp | 2 +- .../tests/const_folding_prior_box.cpp | 12 +-- .../tests/control_flow/unroll_if_test.cpp | 12 +-- .../tests/control_flow/unroll_loop_test.cpp | 12 +-- .../unroll_tensor_iterator_test.cpp | 16 +-- .../tests/make_stateful_test.cpp | 8 +- .../batch_to_space_decomposition_test.cpp | 2 +- .../bidirectional_sequences_decomposition.cpp | 12 +-- .../convert_broadcast3_test.cpp | 16 +-- .../convert_broadcast_to_tiles_test.cpp | 4 +- .../convert_reduce_to_pooling_test.cpp | 4 +- ...nvert_scatter_elements_to_scatter_test.cpp | 2 +- .../convert_sequences_to_ti_test.cpp | 18 ++-- .../convert_ti_to_sequences_test.cpp | 18 ++-- .../op_conversions/fq_decomposition_test.cpp | 2 +- .../ngraph_depth_to_space_transform_test.cpp | 12 +-- .../ngraph_mode_decomposition_test.cpp | 2 +- .../smart_reshape/lstm_states_broadcast.cpp | 4 +- .../tests/smart_reshape/reshape_sinking.cpp | 4 +- .../tests/smart_reshape/sr_reshape_1d.cpp | 2 +- .../sr_strided_slice_squeeze.cpp | 4 +- .../tests/type_relaxed_tests.cpp | 12 +-- .../tests/utils/convert_precision.cpp | 68 ++++++------- src/core/tests/any.cpp | 45 ++++----- src/core/tests/bound_evaluate.cpp | 9 +- src/core/tests/frontend/frontend_manager.cpp | 11 ++- src/core/tests/ov_default_allocator_test.cpp | 27 +++--- src/core/tests/ov_tensor_test.cpp | 41 ++++---- src/core/tests/pass/constant_folding.cpp | 2 +- .../serialization/rt_info_serialization.cpp | 2 +- src/core/tests/preprocess.cpp | 2 +- src/core/tests/replace_node.cpp | 5 +- src/core/tests/type_prop/framework_node.cpp | 21 ++-- src/core/tests/type_prop/normalize_l2.cpp | 3 +- src/frontends/ir/tests/frontend_test.hpp | 1 + .../ir/tests/frontend_test_basic.cpp | 28 +++--- .../tests/frontend_test_with_extensions.cpp | 2 +- .../tests/pre_processing_deserialization.cpp | 2 +- .../tests/tensor_iterator_deserialization.cpp | 16 +-- src/frontends/onnx/tests/conversion.cpp | 2 +- .../onnx/tests/convert_partially_tests.cpp | 10 +- src/frontends/onnx/tests/load_from.cpp | 2 +- .../tests/onnx_editor_topological_sort.cpp | 12 +-- .../onnx/tests/onnx_ops_registration.cpp | 2 +- src/frontends/onnx/tests/onnx_utils.hpp | 1 + .../paddle/tests/convert_unsupported.cpp | 10 +- .../paddle/tests/incorrect_cut_model.cpp | 4 +- src/frontends/paddle/tests/places.cpp | 52 +++++----- .../paddle/tests/throw_in_conversion.cpp | 6 +- .../tensorflow/tests/convert_unsupported.cpp | 10 +- src/frontends/tensorflow/tests/telemetry.cpp | 4 +- .../tests/convert_unsupported.cpp | 12 +-- .../tests/frontend/shared/include/utils.hpp | 1 + .../tests/frontend/shared/src/basic_api.cpp | 40 ++++---- .../tests/frontend/shared/src/conversion.cpp | 8 +- .../shared/src/conversion_with_reference.cpp | 3 +- .../frontend/shared/src/convert_model.cpp | 20 ++-- .../shared/src/cut_specific_model.cpp | 62 ++++++------ .../frontend/shared/src/library_extension.cpp | 8 +- .../tests/frontend/shared/src/load_from.cpp | 38 ++++---- .../tests/frontend/shared/src/op_fuzzy.cpp | 2 +- .../frontend/shared/src/partial_shape.cpp | 12 +-- .../frontend/shared/src/set_element_type.cpp | 6 +- .../tests/functional/caching_test.cpp | 15 +-- .../get_supported_property_test.cpp | 13 +-- .../tests/functional/matmul_sr_tests.cpp | 18 ++-- .../functional/ov_register_plugin_test.cpp | 33 ++++--- .../tests/functional/task_executor_tests.cpp | 9 +- .../tests/unit/compiled_model_test.cpp | 12 +-- .../tests/unit/infer_request_test.cpp | 14 +-- ...sync_compiled_for_multiple_device_test.cpp | 4 +- .../functional/behavior/auto_func_test.hpp | 1 + .../functional/behavior/callback_test.cpp | 58 +++++------ .../behavior/infer_multi_threading_tests.cpp | 94 +++++++++--------- .../behavior/infer_schedule_test.cpp | 16 +-- .../tests/functional/behavior/io_tensor.cpp | 62 ++++++------ .../behavior/remote_tensor_test.cpp | 32 +++--- .../tests/functional/behavior/wait_test.cpp | 72 +++++++------- .../unit/compile_model_property_test.cpp | 4 +- src/plugins/auto/tests/unit/ctput_test.cpp | 6 +- .../tests/unit/default_perf_hint_test.cpp | 6 +- .../auto/tests/unit/dynamic_output_test.cpp | 8 +- .../auto/tests/unit/get_device_list.cpp | 4 +- .../tests/unit/include/auto_unit_test.hpp | 1 + .../infer_request_schedule_policy_test.cpp | 3 +- .../auto/tests/unit/life_time_test.cpp | 8 +- .../auto/tests/unit/log_utils_format_test.cpp | 4 +- .../auto/tests/unit/release_helper_test.cpp | 10 +- .../auto/tests/unit/runtime_fallback_test.cpp | 12 +-- .../tests/unit/select_device_failed_test.cpp | 2 +- .../unit/startup_fallback_property_test.cpp | 2 +- .../auto/tests/unit/stateful_model_test.cpp | 2 +- .../tests/unit/async_infer_request_test.cpp | 2 +- ...ompile_model_create_infer_request_test.cpp | 2 +- .../unit/compile_model_get_property_test.cpp | 4 +- .../compile_model_get_runtime_model_test.cpp | 4 +- .../unit/compile_model_set_property_test.cpp | 4 +- .../auto_batch/tests/unit/mock_common.hpp | 2 +- .../tests/unit/plugin_compile_model_test.cpp | 8 +- .../tests/unit/plugin_get_property_test.cpp | 2 +- .../tests/unit/plugin_query_model_test.cpp | 2 +- .../tests/unit/plugin_set_property_test.cpp | 4 +- .../tests/unit/sync_infer_request_test.cpp | 2 +- .../hetero/tests/functional/hetero_tests.hpp | 1 + .../tests/functional/properties_tests.cpp | 8 +- .../tests/functional/query_model_tests.cpp | 2 +- .../hetero/tests/unit/subgraph_collector.cpp | 6 +- .../ov_executable_network/properties.cpp | 74 +++++++------- .../custom/behavior/ov_plugin/properties.cpp | 97 ++++++++++--------- .../src/fuse_transpose_reorder.cpp | 2 +- .../functional/utils/properties_test.hpp | 1 + .../intel_cpu/tests/unit/cpu_tensor_test.cpp | 3 +- .../tests/unit/cpu_tensor_test_ext.cpp | 11 ++- .../tests/unit/dnnl_memory_desc_test.cpp | 7 +- .../intel_cpu/tests/unit/dnnl_memory_test.cpp | 11 ++- .../intel_cpu/tests/unit/gemm_api_test.cpp | 7 +- .../intel_cpu/tests/unit/registers_pool.cpp | 15 +-- src/plugins/intel_cpu/tests/unit/rt_cache.cpp | 19 ++-- .../functional/behavior/infer_request.cpp | 64 ++++++------ .../behavior/inference_precision.cpp | 2 +- .../concurrency/gpu_concurrency_tests.cpp | 8 +- .../dx11_remote_ctx_test.cpp | 10 +- .../gpu_remote_tensor_tests.cpp | 64 ++++++------ .../behavior/ov_plugin/properties_tests.cpp | 76 +++++++-------- .../tests/unit/module_tests/engine_test.cpp | 10 +- .../tests/unit/module_tests/network_test.cpp | 20 ++-- .../weights_reorder_factory_test.cpp | 8 +- .../passes/prepare_buffer_fusing_test.cpp | 4 +- .../tests/unit/passes/reorder_inputs_test.cpp | 2 +- .../unit/test_cases/condition_gpu_test.cpp | 4 +- .../tests/unit/test_utils/test_utils.h | 1 + ...compose_reduce_for_false_keepdims_test.cpp | 2 +- .../subgraphs_dumper/tests/base_test.hpp | 1 + .../subgraphs_dumper/tests/cache/cache.cpp | 18 ++-- .../tests/cache/graph_cache.cpp | 4 +- .../subgraphs_dumper/tests/cache/meta.cpp | 60 ++++++------ .../subgraphs_dumper/tests/cache/op_cache.cpp | 6 +- .../tests/matchers/single_op/manager.cpp | 16 +-- .../tests/matchers/subgraph/manager.cpp | 12 +-- .../tests/matchers/subgraph/subgraph.cpp | 30 +++--- .../subgraphs_dumper/tests/utils/model.cpp | 4 +- .../tests/utils/model_comparator.cpp | 30 +++--- .../subgraphs_dumper/tests/utils/node.cpp | 2 +- .../compiled_model/compiled_model_base.hpp | 14 +-- .../ov_infer_request/properties_tests.hpp | 6 +- .../behavior/compiled_model/properties.cpp | 2 +- .../ov_infer_request/batched_tensors.cpp | 6 +- .../behavior/ov_infer_request/io_tensor.cpp | 4 +- .../ov_infer_request/perf_counters.cpp | 4 +- .../src/behavior/ov_plugin/caching_tests.cpp | 18 ++-- .../behavior/ov_plugin/properties_tests.cpp | 4 +- .../shared/src/behavior/ov_plugin/remote.cpp | 28 +++--- .../common_test_utils/src/ov_test_utils.cpp | 2 +- .../tests/ov_tensor_utils.cpp | 15 +-- 173 files changed, 1162 insertions(+), 1126 deletions(-) diff --git a/src/common/low_precision_transformations/tests/transformations_after_split_transformation.cpp b/src/common/low_precision_transformations/tests/transformations_after_split_transformation.cpp index 432eec126ad9fa..a2b1cdb7ae005e 100644 --- a/src/common/low_precision_transformations/tests/transformations_after_split_transformation.cpp +++ b/src/common/low_precision_transformations/tests/transformations_after_split_transformation.cpp @@ -176,7 +176,7 @@ TEST_P(TransformationsAfterSplitTransformation, Run) { SimpleLowPrecisionTransformer transformer; getTransformerWithTransformationByName(transformer, params, layerName); - ASSERT_NO_THROW(transformer.transform(model)); + OV_ASSERT_NO_THROW(transformer.transform(model)); } const std::vector transformationNames = { diff --git a/src/common/snippets/tests/src/lowering_utils.cpp b/src/common/snippets/tests/src/lowering_utils.cpp index 796290c3215766..136dccb5fac667 100644 --- a/src/common/snippets/tests/src/lowering_utils.cpp +++ b/src/common/snippets/tests/src/lowering_utils.cpp @@ -82,7 +82,7 @@ void LoweringTests::TearDown() { model_ref = cloned_model; } manager.run_passes(model); - ASSERT_NO_THROW(check_rt_info(model)); + OV_ASSERT_NO_THROW(check_rt_info(model)); if (comparator.should_compare(FunctionsComparator::ACCURACY)) { auto acc_comparator = FunctionsComparator::no_default(); diff --git a/src/common/transformations/tests/common_optimizations/conv_to_binary_conv.cpp b/src/common/transformations/tests/common_optimizations/conv_to_binary_conv.cpp index d0fd6c3f361f50..b3e1ef9971fa72 100644 --- a/src/common/transformations/tests/common_optimizations/conv_to_binary_conv.cpp +++ b/src/common/transformations/tests/common_optimizations/conv_to_binary_conv.cpp @@ -47,7 +47,7 @@ TEST(TransformationTests, ConvToBinaryConvOutputLowZeroOutputHighOne) { m.register_pass(); m.register_pass(); m.run_passes(f); - ASSERT_NO_THROW(check_rt_info(f)); + OV_ASSERT_NO_THROW(check_rt_info(f)); } { @@ -106,7 +106,7 @@ TEST(TransformationTests, ConvToBinaryConvOutputLowMinusOneOutputHighOne) { m.register_pass(); m.register_pass(); m.run_passes(f); - ASSERT_NO_THROW(check_rt_info(f)); + OV_ASSERT_NO_THROW(check_rt_info(f)); } { @@ -163,7 +163,7 @@ TEST(TransformationTests, NegativeConvToBinaryConvInvalidWeights) { m.register_pass(); m.register_pass(); m.run_passes(f); - ASSERT_NO_THROW(check_rt_info(f)); + OV_ASSERT_NO_THROW(check_rt_info(f)); } { @@ -216,7 +216,7 @@ TEST(TransformationTests, NegativeConvToBinaryConvInvalidLevels) { m.register_pass(); m.register_pass(); m.run_passes(f); - ASSERT_NO_THROW(check_rt_info(f)); + OV_ASSERT_NO_THROW(check_rt_info(f)); } { @@ -269,7 +269,7 @@ TEST(TransformationTests, NegativeConvToBinaryConvOutputLowHigh) { m.register_pass(); m.register_pass(); m.run_passes(f); - ASSERT_NO_THROW(check_rt_info(f)); + OV_ASSERT_NO_THROW(check_rt_info(f)); } { diff --git a/src/common/transformations/tests/common_optimizations/convert_compression_only_to_legacy_test.cpp b/src/common/transformations/tests/common_optimizations/convert_compression_only_to_legacy_test.cpp index 65e781c6d50699..593e6ff1943032 100644 --- a/src/common/transformations/tests/common_optimizations/convert_compression_only_to_legacy_test.cpp +++ b/src/common/transformations/tests/common_optimizations/convert_compression_only_to_legacy_test.cpp @@ -42,7 +42,7 @@ TEST(TransformationTests, ConvertCompressionOnlyToLegacy) { manager.register_pass(); manager.register_pass(); manager.run_passes(f); - ASSERT_NO_THROW(check_rt_info(f)); + OV_ASSERT_NO_THROW(check_rt_info(f)); } { @@ -86,7 +86,7 @@ TEST(TransformationTests, ConvertCompressionOnlyToLegacyNoConvertion) { manager.register_pass(); manager.register_pass(); manager.run_passes(f); - ASSERT_NO_THROW(check_rt_info(f)); + OV_ASSERT_NO_THROW(check_rt_info(f)); } { diff --git a/src/common/transformations/tests/common_optimizations/convert_convertlike.cpp b/src/common/transformations/tests/common_optimizations/convert_convertlike.cpp index ab239bf5d5ae7d..785559e4fef9e6 100644 --- a/src/common/transformations/tests/common_optimizations/convert_convertlike.cpp +++ b/src/common/transformations/tests/common_optimizations/convert_convertlike.cpp @@ -33,7 +33,7 @@ TEST(TransformationTests, ConvertConvertLike) { m.register_pass(); m.register_pass(); m.run_passes(f); - ASSERT_NO_THROW(check_rt_info(f)); + OV_ASSERT_NO_THROW(check_rt_info(f)); } { @@ -62,7 +62,7 @@ TEST(TransformationTests, ConvertConvertLike2) { m.register_pass(); m.register_pass(); m.run_passes(f); - ASSERT_NO_THROW(check_rt_info(f)); + OV_ASSERT_NO_THROW(check_rt_info(f)); } { @@ -89,7 +89,7 @@ TEST(TransformationTests, ConvertConvertLike_Negative) { m.register_pass(); m.register_pass(); m.run_passes(f); - ASSERT_NO_THROW(check_rt_info(f)); + OV_ASSERT_NO_THROW(check_rt_info(f)); } { diff --git a/src/common/transformations/tests/common_optimizations/convert_nms_gather_path_to_unsigned_test.cpp b/src/common/transformations/tests/common_optimizations/convert_nms_gather_path_to_unsigned_test.cpp index 3076e32646eaa1..00a2fd1d991d98 100644 --- a/src/common/transformations/tests/common_optimizations/convert_nms_gather_path_to_unsigned_test.cpp +++ b/src/common/transformations/tests/common_optimizations/convert_nms_gather_path_to_unsigned_test.cpp @@ -202,7 +202,7 @@ TEST(TransformationTests, test_convert_to_unsigned_nms_gather_3) { manager.register_pass(); manager.register_pass(); manager.run_passes(f); - ASSERT_NO_THROW(check_rt_info(f)); + OV_ASSERT_NO_THROW(check_rt_info(f)); ASSERT_EQ(count_ops_of_type(f), 0); } @@ -260,7 +260,7 @@ TEST(TransformationTests, test_convert_to_unsigned_nms_gather_with_if_condition) manager.register_pass(); manager.register_pass(); manager.run_passes(f); - ASSERT_NO_THROW(check_rt_info(f)); + OV_ASSERT_NO_THROW(check_rt_info(f)); const auto& ops = f->get_ops(); const auto& gather_it = find(ops.begin(), ops.end(), target_gather); diff --git a/src/common/transformations/tests/common_optimizations/convert_quantize_dequantize.cpp b/src/common/transformations/tests/common_optimizations/convert_quantize_dequantize.cpp index 543ed3dd4e88e2..d64ae7c88a9a48 100644 --- a/src/common/transformations/tests/common_optimizations/convert_quantize_dequantize.cpp +++ b/src/common/transformations/tests/common_optimizations/convert_quantize_dequantize.cpp @@ -83,7 +83,7 @@ void positive_test(const Shape& data_shape, m.register_pass(); m.run_passes(f); - ASSERT_NO_THROW(check_rt_info(f)); + OV_ASSERT_NO_THROW(check_rt_info(f)); } { @@ -229,7 +229,7 @@ void negative_test(const Shape& data_shape, m.register_pass(); m.register_pass(); m.run_passes(f); - ASSERT_NO_THROW(check_rt_info(f)); + OV_ASSERT_NO_THROW(check_rt_info(f)); } { diff --git a/src/common/transformations/tests/common_optimizations/dimension_tracking.cpp b/src/common/transformations/tests/common_optimizations/dimension_tracking.cpp index 74c51c8f3bcfe1..c553851651dd3c 100644 --- a/src/common/transformations/tests/common_optimizations/dimension_tracking.cpp +++ b/src/common/transformations/tests/common_optimizations/dimension_tracking.cpp @@ -83,7 +83,7 @@ TEST(TransformationTests, AutoBatch_FindBatch_Transpose_and_Convolution) { m.register_pass(); m.register_pass(); m.run_passes(f); - ASSERT_NO_THROW(check_rt_info(f)); + OV_ASSERT_NO_THROW(check_rt_info(f)); const auto& shape = data->get_partial_shape(); ASSERT_TRUE(!shape[0].get_symbol()) << shape; @@ -118,7 +118,7 @@ TEST(TransformationTests, AutoBatch_LabelPropagation_Convolution_Reshape) { m.register_pass(); m.register_pass(); m.run_passes(model); - ASSERT_NO_THROW(check_rt_info(model)); + OV_ASSERT_NO_THROW(check_rt_info(model)); const auto& shape = data->get_partial_shape(); ASSERT_TRUE(shape[0].get_symbol()) << shape; @@ -144,7 +144,7 @@ TEST(TransformationTests, AutoBatch_FindBatch_SingleMultiply) { m.register_pass(); m.register_pass(); m.run_passes(f); - ASSERT_NO_THROW(check_rt_info(f)); + OV_ASSERT_NO_THROW(check_rt_info(f)); const auto& shape = data->get_partial_shape(); ASSERT_TRUE(shape[0].get_symbol()) << shape; @@ -174,7 +174,7 @@ TEST(TransformationTests, AutoBatch_FindBatch_Two_Outputs) { m.register_pass(); m.register_pass(); m.run_passes(f); - ASSERT_NO_THROW(check_rt_info(f)); + OV_ASSERT_NO_THROW(check_rt_info(f)); const auto& shape = data->get_partial_shape(); ASSERT_TRUE(shape[0].get_symbol()) << shape; @@ -204,7 +204,7 @@ TEST(TransformationTests, AutoBatch_FindBatch_TwoOutputsReversed) { m.register_pass(); m.register_pass(); m.run_passes(f); - ASSERT_NO_THROW(check_rt_info(f)); + OV_ASSERT_NO_THROW(check_rt_info(f)); const auto& shape = data->get_partial_shape(); ASSERT_TRUE(shape[0].get_symbol()) << shape; @@ -238,7 +238,7 @@ TEST(TransformationTests, AutoBatch_FindBatch_IndependentBranchesConcated) { m.register_pass(); m.register_pass(); m.run_passes(f); - ASSERT_NO_THROW(check_rt_info(f)); + OV_ASSERT_NO_THROW(check_rt_info(f)); const auto& shape = data->get_partial_shape(); ASSERT_TRUE(shape[0].get_symbol()) << shape; @@ -271,7 +271,7 @@ TEST(TransformationTests, AutoBatch_FindBatch_TwoConvNetwork) { m.register_pass(); m.register_pass(); m.run_passes(f); - ASSERT_NO_THROW(check_rt_info(f)); + OV_ASSERT_NO_THROW(check_rt_info(f)); const auto& shape = data->get_partial_shape(); ASSERT_TRUE(shape[0].get_symbol()) << shape; @@ -299,7 +299,7 @@ TEST(TransformationTests, AutoBatch_FindBatch_NegativeTracking) { m.register_pass(); m.register_pass(false, false); m.run_passes(f); - ASSERT_NO_THROW(check_rt_info(f)); + OV_ASSERT_NO_THROW(check_rt_info(f)); const auto& shape = data->get_partial_shape(); ASSERT_TRUE(shape[0].get_symbol()) << shape; @@ -319,7 +319,7 @@ TEST(TransformationTests, AutoBatch_FindBatch_AutoBatch_LabelPropagation_DO_deta m.register_pass(); m.register_pass(true); m.run_passes(f); - ASSERT_NO_THROW(check_rt_info(f)); + OV_ASSERT_NO_THROW(check_rt_info(f)); const auto& shape = data->get_partial_shape(); ASSERT_TRUE(shape[0].get_symbol()) << shape; diff --git a/src/common/transformations/tests/common_optimizations/divide_fusion.cpp b/src/common/transformations/tests/common_optimizations/divide_fusion.cpp index 6ff8634ca67f55..093f01c8dfe294 100644 --- a/src/common/transformations/tests/common_optimizations/divide_fusion.cpp +++ b/src/common/transformations/tests/common_optimizations/divide_fusion.cpp @@ -34,7 +34,7 @@ TEST(TransformationTests, DivideFusion) { m.register_pass(); m.register_pass(); m.run_passes(f); - ASSERT_NO_THROW(check_rt_info(f)); + OV_ASSERT_NO_THROW(check_rt_info(f)); } { @@ -67,7 +67,7 @@ TEST(TransformationTests, DivideFusionNegative) { m.register_pass(); m.register_pass(); m.run_passes(f); - ASSERT_NO_THROW(check_rt_info(f)); + OV_ASSERT_NO_THROW(check_rt_info(f)); } { diff --git a/src/common/transformations/tests/common_optimizations/fq_mul_fusion_test.cpp b/src/common/transformations/tests/common_optimizations/fq_mul_fusion_test.cpp index 25e8f864f78076..4bf4b97ca58cfb 100644 --- a/src/common/transformations/tests/common_optimizations/fq_mul_fusion_test.cpp +++ b/src/common/transformations/tests/common_optimizations/fq_mul_fusion_test.cpp @@ -77,7 +77,7 @@ TEST_P(FQMulFusion, ExpectFusion) { manager.register_pass(unh); manager.run_passes(m_model); - ASSERT_NO_THROW(check_rt_info(m_model)); + OV_ASSERT_NO_THROW(check_rt_info(m_model)); auto fc = FunctionsComparator::no_default().enable(FunctionsComparator::PRECISIONS).enable(FunctionsComparator::NODES); @@ -219,7 +219,7 @@ TEST(FQMulFusion_NonConstInputs, AllInputsNonConst) { manager.register_pass(); manager.run_passes(model); - ASSERT_NO_THROW(check_rt_info(model)); + OV_ASSERT_NO_THROW(check_rt_info(model)); const auto res = compare_functions(model, expected_function); ASSERT_TRUE(res.first) << res.second; @@ -253,7 +253,7 @@ TEST(FQMulFusion_NonConstInputs, FQ_out_high_const) { manager.register_pass(); manager.run_passes(model); - ASSERT_NO_THROW(check_rt_info(model)); + OV_ASSERT_NO_THROW(check_rt_info(model)); const auto res = compare_functions(model, expected_function); ASSERT_TRUE(res.first) << res.second; @@ -286,7 +286,7 @@ TEST(FQMulFusion_FQ_Mul_inputs, FQ_out_to_mul_input_2) { manager.register_pass(); manager.run_passes(model); - ASSERT_NO_THROW(check_rt_info(model)); + OV_ASSERT_NO_THROW(check_rt_info(model)); const auto res = compare_functions(model, expected_function); ASSERT_TRUE(res.first) << res.second; @@ -320,7 +320,7 @@ TEST(FQMulFusion_FQ_Mul_inputs, FQ_out_to_mul_input_2_param) { manager.register_pass(); manager.run_passes(model); - ASSERT_NO_THROW(check_rt_info(model)); + OV_ASSERT_NO_THROW(check_rt_info(model)); const auto res = compare_functions(model, expected_function); ASSERT_TRUE(res.first) << res.second; @@ -346,7 +346,7 @@ TEST(TransformationTests, FakeQuantizeMultiplyFusionNegative) { manager.register_pass(); manager.run_passes(model); - ASSERT_NO_THROW(check_rt_info(model)); + OV_ASSERT_NO_THROW(check_rt_info(model)); ASSERT_EQ(model->get_output_shape(0), Shape({1, 300, 16})); } @@ -379,7 +379,7 @@ TEST(TransformationTests, FakeQuantizeMultiplyFusionMulConstWithEqualValues) { manager.register_pass(); manager.run_passes(model); - ASSERT_NO_THROW(check_rt_info(model)); + OV_ASSERT_NO_THROW(check_rt_info(model)); const auto res = compare_functions(model, expected_function, true); ASSERT_TRUE(res.first) << res.second; diff --git a/src/common/transformations/tests/common_optimizations/fq_reshape_fusion.cpp b/src/common/transformations/tests/common_optimizations/fq_reshape_fusion.cpp index 7389e12299eab1..cc4ac2981b6799 100644 --- a/src/common/transformations/tests/common_optimizations/fq_reshape_fusion.cpp +++ b/src/common/transformations/tests/common_optimizations/fq_reshape_fusion.cpp @@ -122,7 +122,7 @@ TEST_P(FQReshapeFusionTests, ReshapeMatMul) { manager.register_pass(unh); manager.run_passes(f); - ASSERT_NO_THROW(check_rt_info(f)); + OV_ASSERT_NO_THROW(check_rt_info(f)); auto fc = FunctionsComparator::no_default().enable(FunctionsComparator::PRECISIONS).enable(FunctionsComparator::NODES); diff --git a/src/common/transformations/tests/common_optimizations/fused_names_cleanup.cpp b/src/common/transformations/tests/common_optimizations/fused_names_cleanup.cpp index 6ca9b21d7eed60..df2ffa5062fa66 100644 --- a/src/common/transformations/tests/common_optimizations/fused_names_cleanup.cpp +++ b/src/common/transformations/tests/common_optimizations/fused_names_cleanup.cpp @@ -33,7 +33,7 @@ TEST(TransformationTests, FusedNamesCleanup) { manager.register_pass(); manager.register_pass(); manager.run_passes(model); - ASSERT_NO_THROW(check_rt_info(model)); + OV_ASSERT_NO_THROW(check_rt_info(model)); manager.register_pass(); manager.run_passes(model); diff --git a/src/common/transformations/tests/common_optimizations/low_latency_v2_test.cpp b/src/common/transformations/tests/common_optimizations/low_latency_v2_test.cpp index 3be5c365947758..39fded386b3b26 100644 --- a/src/common/transformations/tests/common_optimizations/low_latency_v2_test.cpp +++ b/src/common/transformations/tests/common_optimizations/low_latency_v2_test.cpp @@ -100,7 +100,7 @@ TEST(TransformationTests, LowLatency2_LSTM) { manager.register_pass(); manager.register_pass(); manager.run_passes(f); - ASSERT_NO_THROW(check_rt_info(f)); + OV_ASSERT_NO_THROW(check_rt_info(f)); } { auto Xi = std::make_shared(element::f32, Shape{1, 1, 16}); @@ -188,7 +188,7 @@ TEST(TransformationTests, LowLatency2_GRU) { manager.run_passes(f); - ASSERT_NO_THROW(check_rt_info(f)); + OV_ASSERT_NO_THROW(check_rt_info(f)); } { auto Xi = std::make_shared(element::f32, Shape{1, 1, 16}); @@ -269,7 +269,7 @@ TEST(TransformationTests, LowLatency2_RNN) { manager.run_passes(f); - ASSERT_NO_THROW(check_rt_info(f)); + OV_ASSERT_NO_THROW(check_rt_info(f)); } { auto Xi = std::make_shared(element::f32, Shape{1, 1, 16}); @@ -346,7 +346,7 @@ TEST(TransformationTests, LowLatency2_LSTMReshape) { manager.register_pass(); manager.run_passes(f); - ASSERT_NO_THROW(check_rt_info(f)); + OV_ASSERT_NO_THROW(check_rt_info(f)); } { auto Xi = std::make_shared(element::f32, Shape{1, 1, 16}); @@ -431,7 +431,7 @@ TEST(TransformationTests, LowLatency2_LSTM_Loop) { manager.register_pass(); manager.run_passes(f); - ASSERT_NO_THROW(check_rt_info(f)); + OV_ASSERT_NO_THROW(check_rt_info(f)); } { auto Xi = std::make_shared(element::f32, Shape{1, 1, 16}); @@ -510,7 +510,7 @@ TEST(TransformationTests, LowLatency2_LSTM_several_iterations) { manager.register_pass(); manager.run_passes(f); - ASSERT_NO_THROW(check_rt_info(f)); + OV_ASSERT_NO_THROW(check_rt_info(f)); } // TensorIterator not unrolled. @@ -625,7 +625,7 @@ TEST(TransformationTests, LowLatency2_LSTM_Loop_Reshape) { manager.register_pass(); manager.run_passes(f); - ASSERT_NO_THROW(check_rt_info(f)); + OV_ASSERT_NO_THROW(check_rt_info(f)); } { auto Xi = std::make_shared(element::f32, Shape{1, 1, 16}); @@ -706,7 +706,7 @@ TEST(TransformationTests, LowLatency2_LSTM_Loop_several_iterations) { manager.register_pass(true); manager.run_passes(f); - ASSERT_NO_THROW(check_rt_info(f)); + OV_ASSERT_NO_THROW(check_rt_info(f)); } { auto X = std::make_shared(element::f32, Shape{10, 1, 16}); diff --git a/src/common/transformations/tests/common_optimizations/reduce_reshape_fusion_tests.cpp b/src/common/transformations/tests/common_optimizations/reduce_reshape_fusion_tests.cpp index 6bc4e678ec961c..732b3f98535290 100644 --- a/src/common/transformations/tests/common_optimizations/reduce_reshape_fusion_tests.cpp +++ b/src/common/transformations/tests/common_optimizations/reduce_reshape_fusion_tests.cpp @@ -172,5 +172,5 @@ TEST(TransformationTests, ReduceMeanReshapeFusionAssertValidOutputShape) { manager.set_per_pass_validation(false); manager.register_pass(); manager.register_pass(); - ASSERT_NO_THROW(manager.run_passes(model)); + OV_ASSERT_NO_THROW(manager.run_passes(model)); } diff --git a/src/common/transformations/tests/common_optimizations/shuffle_channels_fusion_test.cpp b/src/common/transformations/tests/common_optimizations/shuffle_channels_fusion_test.cpp index 6ff71612e1240e..dd7d46a2a08ed8 100644 --- a/src/common/transformations/tests/common_optimizations/shuffle_channels_fusion_test.cpp +++ b/src/common/transformations/tests/common_optimizations/shuffle_channels_fusion_test.cpp @@ -71,7 +71,7 @@ class ShuffleChannelsFusion : public ::testing::Test, manager.register_pass(values.check_values); manager.register_pass(unh); manager.run_passes(f); - ASSERT_NO_THROW(check_rt_info(f)); + OV_ASSERT_NO_THROW(check_rt_info(f)); } static std::string getTestCaseName(testing::TestParamInfo obj) { diff --git a/src/common/transformations/tests/common_optimizations/softmax_fusion.cpp b/src/common/transformations/tests/common_optimizations/softmax_fusion.cpp index b318195fde2f90..2ac902bb99fd0a 100644 --- a/src/common/transformations/tests/common_optimizations/softmax_fusion.cpp +++ b/src/common/transformations/tests/common_optimizations/softmax_fusion.cpp @@ -43,7 +43,7 @@ TEST_P(SoftmaxFusionFixture, SoftmaxFusion) { m.register_pass(); m.register_pass(unh); m.run_passes(f); - ASSERT_NO_THROW(check_rt_info(f)); + OV_ASSERT_NO_THROW(check_rt_info(f)); } { auto data = std::make_shared(element::f32, shape); @@ -88,7 +88,7 @@ TEST_P(SoftmaxFusionSimplePatternFixture, SoftmaxFusionSimplePatternTest) { m.register_pass(); m.register_pass(unh); m.run_passes(f); - ASSERT_NO_THROW(check_rt_info(f)); + OV_ASSERT_NO_THROW(check_rt_info(f)); } { auto data = std::make_shared(element::f32, shape); @@ -143,7 +143,7 @@ TEST_P(NegativeSoftmaxFusionFixture, NegativeSoftmaxFusion) { m.register_pass(); m.register_pass(unh); m.run_passes(f); - ASSERT_NO_THROW(check_rt_info(f)); + OV_ASSERT_NO_THROW(check_rt_info(f)); ASSERT_EQ(count_ops_of_type(f), 1); ASSERT_EQ(count_ops_of_type(f), 1); } diff --git a/src/common/transformations/tests/common_optimizations/subtract_fusion.cpp b/src/common/transformations/tests/common_optimizations/subtract_fusion.cpp index 6a6c56aa3c9dd6..ff06587ba022c6 100644 --- a/src/common/transformations/tests/common_optimizations/subtract_fusion.cpp +++ b/src/common/transformations/tests/common_optimizations/subtract_fusion.cpp @@ -35,7 +35,7 @@ TEST(TransformationTests, SubtractFusionMultiply) { m.register_pass(); m.register_pass(); m.run_passes(f); - ASSERT_NO_THROW(check_rt_info(f)); + OV_ASSERT_NO_THROW(check_rt_info(f)); } { @@ -68,7 +68,7 @@ TEST(TransformationTests, SubtractFusionMultiplyNegative) { m.register_pass(); m.register_pass(); m.run_passes(f); - ASSERT_NO_THROW(check_rt_info(f)); + OV_ASSERT_NO_THROW(check_rt_info(f)); } { @@ -102,7 +102,7 @@ TEST(TransformationTests, SubtractFusionNeg) { m.register_pass(); m.register_pass(); m.run_passes(f); - ASSERT_NO_THROW(check_rt_info(f)); + OV_ASSERT_NO_THROW(check_rt_info(f)); } { diff --git a/src/common/transformations/tests/common_optimizations/transpose_sinking_test.cpp b/src/common/transformations/tests/common_optimizations/transpose_sinking_test.cpp index d8f9b9eb9af7d2..f551d753295b0d 100644 --- a/src/common/transformations/tests/common_optimizations/transpose_sinking_test.cpp +++ b/src/common/transformations/tests/common_optimizations/transpose_sinking_test.cpp @@ -96,7 +96,7 @@ TEST_P(TransposeSinkingFQ, TransposeFQReduce) { manager.register_pass(); manager.register_pass(unh); manager.run_passes(f); - ASSERT_NO_THROW(check_rt_info(f)); + OV_ASSERT_NO_THROW(check_rt_info(f)); auto fc = FunctionsComparator::no_default() .enable(FunctionsComparator::NODES) @@ -226,7 +226,7 @@ TEST_P(TransposeSinking, TransposeReduction) { manager.register_pass(); manager.register_pass(unh); manager.run_passes(f); - ASSERT_NO_THROW(check_rt_info(f)); + OV_ASSERT_NO_THROW(check_rt_info(f)); auto fc = FunctionsComparator::no_default() .enable(FunctionsComparator::NODES) diff --git a/src/common/transformations/tests/common_optimizations/transpose_to_reshape_test.cpp b/src/common/transformations/tests/common_optimizations/transpose_to_reshape_test.cpp index 74740f53d184e5..64bf86a16284da 100644 --- a/src/common/transformations/tests/common_optimizations/transpose_to_reshape_test.cpp +++ b/src/common/transformations/tests/common_optimizations/transpose_to_reshape_test.cpp @@ -108,7 +108,7 @@ TEST_P(TransposeToReshapeTests, CompareFunctions) { m.register_pass(unh); m.run_passes(f); f->validate_nodes_and_infer_types(); - ASSERT_NO_THROW(check_rt_info(f)); + OV_ASSERT_NO_THROW(check_rt_info(f)); auto fc = FunctionsComparator::no_default().enable(FunctionsComparator::NODES).enable(FunctionsComparator::PRECISIONS); diff --git a/src/common/transformations/tests/common_optimizations/weights_dequantize_to_fake_quantize.cpp b/src/common/transformations/tests/common_optimizations/weights_dequantize_to_fake_quantize.cpp index 36bea291864a06..a214dadbb6a7fa 100644 --- a/src/common/transformations/tests/common_optimizations/weights_dequantize_to_fake_quantize.cpp +++ b/src/common/transformations/tests/common_optimizations/weights_dequantize_to_fake_quantize.cpp @@ -111,7 +111,7 @@ TEST_P(TranslateNewWeightFormatToOldOne, ReshapeMatMul) { m.register_pass(); m.register_pass(unh); m.run_passes(f); - ASSERT_NO_THROW(check_rt_info(f)); + OV_ASSERT_NO_THROW(check_rt_info(f)); auto fc = FunctionsComparator::no_default() .enable(FunctionsComparator::NODES) diff --git a/src/common/transformations/tests/const_folding_prior_box.cpp b/src/common/transformations/tests/const_folding_prior_box.cpp index 768a67810887fe..26d47f39463eb4 100644 --- a/src/common/transformations/tests/const_folding_prior_box.cpp +++ b/src/common/transformations/tests/const_folding_prior_box.cpp @@ -42,7 +42,7 @@ TEST(TransformationTests, ConstFoldingPriorBox) { manager.register_pass(); manager.register_pass(); manager.run_passes(f); - ASSERT_NO_THROW(check_rt_info(f)); + OV_ASSERT_NO_THROW(check_rt_info(f)); } { @@ -89,7 +89,7 @@ TEST(TransformationTests, ConstFoldingPriorBoxClustered) { manager.register_pass(); manager.register_pass(); manager.run_passes(f); - ASSERT_NO_THROW(check_rt_info(f)); + OV_ASSERT_NO_THROW(check_rt_info(f)); } { @@ -163,7 +163,7 @@ TEST(TransformationTests, ConstFoldingPriorBoxSubgraph) { manager.register_pass(); manager.register_pass(); manager.run_passes(f); - ASSERT_NO_THROW(check_rt_info(f)); + OV_ASSERT_NO_THROW(check_rt_info(f)); } { @@ -225,7 +225,7 @@ TEST(TransformationTests, ConstFoldingPriorBoxClusteredSubgraph) { manager.register_pass(); manager.register_pass(); manager.run_passes(f); - ASSERT_NO_THROW(check_rt_info(f)); + OV_ASSERT_NO_THROW(check_rt_info(f)); } { @@ -281,7 +281,7 @@ TEST(TransformationTests, ConstFoldingPriorBox8) { manager.register_pass(); manager.register_pass(); manager.run_passes(f); - ASSERT_NO_THROW(check_rt_info(f)); + OV_ASSERT_NO_THROW(check_rt_info(f)); } { @@ -354,7 +354,7 @@ TEST(TransformationTests, ConstFoldingPriorBox8Subgraph) { manager.register_pass(); manager.register_pass(); manager.run_passes(f); - ASSERT_NO_THROW(check_rt_info(f)); + OV_ASSERT_NO_THROW(check_rt_info(f)); } { diff --git a/src/common/transformations/tests/control_flow/unroll_if_test.cpp b/src/common/transformations/tests/control_flow/unroll_if_test.cpp index 3ebaa7edf540c0..9c3ac5ea677802 100644 --- a/src/common/transformations/tests/control_flow/unroll_if_test.cpp +++ b/src/common/transformations/tests/control_flow/unroll_if_test.cpp @@ -89,7 +89,7 @@ TEST(TransformationTests, UnrollIfCondIsTrue) { manager.register_pass(); manager.run_passes(f); - ASSERT_NO_THROW(check_rt_info(f)); + OV_ASSERT_NO_THROW(check_rt_info(f)); } { f_ref = get_then_body(); } @@ -120,7 +120,7 @@ TEST(TransformationTests, UnrollIfCondIsFalse) { manager.register_pass(); manager.run_passes(f); - ASSERT_NO_THROW(check_rt_info(f)); + OV_ASSERT_NO_THROW(check_rt_info(f)); } { f_ref = get_else_body(); } @@ -171,7 +171,7 @@ TEST(TransformationTests, UnrollIfWithSplitInput) { manager.register_pass(); manager.run_passes(f); - ASSERT_NO_THROW(check_rt_info(f)); + OV_ASSERT_NO_THROW(check_rt_info(f)); } { @@ -214,7 +214,7 @@ TEST(TransformationTests, UnrollNestedIfThenBody) { manager.register_pass(); manager.register_pass(); manager.run_passes(f); - ASSERT_NO_THROW(check_rt_info(f)); + OV_ASSERT_NO_THROW(check_rt_info(f)); } { f_ref = get_then_body(); } @@ -253,7 +253,7 @@ TEST(TransformationTests, UnrollIfCondIsTrueMultiOutput) { manager.register_pass(); manager.run_passes(f); - ASSERT_NO_THROW(check_rt_info(f)); + OV_ASSERT_NO_THROW(check_rt_info(f)); } { @@ -337,7 +337,7 @@ TEST(TransformationTests, UnrollIfInsideIf) { manager.register_pass(); manager.register_pass(); manager.run_passes(f); - ASSERT_NO_THROW(check_rt_info(f)); + OV_ASSERT_NO_THROW(check_rt_info(f)); } { diff --git a/src/common/transformations/tests/control_flow/unroll_loop_test.cpp b/src/common/transformations/tests/control_flow/unroll_loop_test.cpp index 18a0216888acd4..9c099863b62d34 100644 --- a/src/common/transformations/tests/control_flow/unroll_loop_test.cpp +++ b/src/common/transformations/tests/control_flow/unroll_loop_test.cpp @@ -69,7 +69,7 @@ TEST(TransformationTests, UnrollLoopGRUCell) { manager.register_pass(); manager.run_passes(f); - ASSERT_NO_THROW(check_rt_info(f)); + OV_ASSERT_NO_THROW(check_rt_info(f)); } { @@ -153,7 +153,7 @@ TEST(TransformationTests, UnrollLoopRNNCell) { manager.register_pass(); manager.run_passes(f); - ASSERT_NO_THROW(check_rt_info(f)); + OV_ASSERT_NO_THROW(check_rt_info(f)); } { @@ -240,7 +240,7 @@ TEST(TransformationTests, UnrollLoopLSTMCell) { manager.register_pass(); manager.run_passes(f); - ASSERT_NO_THROW(check_rt_info(f)); + OV_ASSERT_NO_THROW(check_rt_info(f)); } { @@ -325,7 +325,7 @@ TEST(TransformationTests, UnrollLoopGRUCellSingleIteration) { manager.register_pass(); manager.run_passes(f); - ASSERT_NO_THROW(check_rt_info(f)); + OV_ASSERT_NO_THROW(check_rt_info(f)); } { @@ -403,7 +403,7 @@ TEST(TransformationTests, UnrollLoopRNNCellSingleIteration) { manager.register_pass(); manager.run_passes(f); - ASSERT_NO_THROW(check_rt_info(f)); + OV_ASSERT_NO_THROW(check_rt_info(f)); } { @@ -483,7 +483,7 @@ TEST(TransformationTests, UnrollLoopLSTMCellSingleIteration) { manager.register_pass(); manager.run_passes(f); - ASSERT_NO_THROW(check_rt_info(f)); + OV_ASSERT_NO_THROW(check_rt_info(f)); } { diff --git a/src/common/transformations/tests/control_flow/unroll_tensor_iterator_test.cpp b/src/common/transformations/tests/control_flow/unroll_tensor_iterator_test.cpp index 41f68b584d9ed2..9ff2be2e754e95 100644 --- a/src/common/transformations/tests/control_flow/unroll_tensor_iterator_test.cpp +++ b/src/common/transformations/tests/control_flow/unroll_tensor_iterator_test.cpp @@ -67,7 +67,7 @@ TEST(TransformationTests, UnrollTensorIteratorGRUCell) { manager.register_pass(); manager.run_passes(f); - ASSERT_NO_THROW(check_rt_info(f)); + OV_ASSERT_NO_THROW(check_rt_info(f)); } { @@ -147,7 +147,7 @@ TEST(TransformationTests, UnrollTensorIteratorRNNCell) { manager.register_pass(); manager.run_passes(f); - ASSERT_NO_THROW(check_rt_info(f)); + OV_ASSERT_NO_THROW(check_rt_info(f)); } { @@ -230,7 +230,7 @@ TEST(TransformationTests, UnrollTensorIteratorLSTMCell) { manager.register_pass(); manager.run_passes(f); - ASSERT_NO_THROW(check_rt_info(f)); + OV_ASSERT_NO_THROW(check_rt_info(f)); } { @@ -311,7 +311,7 @@ TEST(TransformationTests, UnrollTensorIteratorGRUCellSingleIteration) { manager.register_pass(); manager.run_passes(f); - ASSERT_NO_THROW(check_rt_info(f)); + OV_ASSERT_NO_THROW(check_rt_info(f)); } { @@ -385,7 +385,7 @@ TEST(TransformationTests, UnrollTensorIteratorRNNCellSingleIteration) { manager.register_pass(); manager.run_passes(f); - ASSERT_NO_THROW(check_rt_info(f)); + OV_ASSERT_NO_THROW(check_rt_info(f)); } { @@ -461,7 +461,7 @@ TEST(TransformationTests, UnrollTensorIteratorLSTMCellSingleIterationSingleItera manager.register_pass(); manager.run_passes(f); - ASSERT_NO_THROW(check_rt_info(f)); + OV_ASSERT_NO_THROW(check_rt_info(f)); } { @@ -546,7 +546,7 @@ TEST(TransformationTests, CheckTensorNamesAfterConvertToTIAndUnrolling) { m.register_pass(); // inserts Unsqueeze after TI m.register_pass(); m.run_passes(f); - ASSERT_NO_THROW(check_rt_info(f)); + OV_ASSERT_NO_THROW(check_rt_info(f)); std::vector names_after; collect_legacy_tensor_names(f, names_after); @@ -612,7 +612,7 @@ TEST(TransformationTests, CheckTensorNamesAfterUnrolling) { m.register_pass(); m.register_pass(); m.run_passes(f); - ASSERT_NO_THROW(check_rt_info(f)); + OV_ASSERT_NO_THROW(check_rt_info(f)); std::vector names_after; collect_legacy_tensor_names(f, names_after); diff --git a/src/common/transformations/tests/make_stateful_test.cpp b/src/common/transformations/tests/make_stateful_test.cpp index d8bf71ad363641..daabf2d04839f9 100644 --- a/src/common/transformations/tests/make_stateful_test.cpp +++ b/src/common/transformations/tests/make_stateful_test.cpp @@ -118,7 +118,7 @@ TEST(TransformationTests, make_stateful_by_tensor_name) { manager.register_pass(tensor_names); manager.run_passes(f); - ASSERT_NO_THROW(check_rt_info(f)); + OV_ASSERT_NO_THROW(check_rt_info(f)); } { f_ref = get_ref_model(true, false); } @@ -137,7 +137,7 @@ TEST(TransformationTests, make_stateful_by_param_res) { manager.register_pass(); manager.register_pass(pairs); manager.run_passes(f); - ASSERT_NO_THROW(check_rt_info(f)); + OV_ASSERT_NO_THROW(check_rt_info(f)); } { f_ref = get_ref_model(true, true); } @@ -189,7 +189,7 @@ TEST(TransformationTests, make_stateful_one_out_to_several_results_by_tensor_nam manager.register_pass(tensor_names); manager.run_passes(f); - ASSERT_NO_THROW(check_rt_info(f)); + OV_ASSERT_NO_THROW(check_rt_info(f)); } { f_ref = get_ref_model(false, false); } @@ -208,7 +208,7 @@ TEST(TransformationTests, make_stateful_one_out_to_several_results_by_param_res) manager.register_pass(); manager.register_pass(pairs); manager.run_passes(f); - ASSERT_NO_THROW(check_rt_info(f)); + OV_ASSERT_NO_THROW(check_rt_info(f)); } { f_ref = get_ref_model(false, true); } diff --git a/src/common/transformations/tests/op_conversions/batch_to_space_decomposition_test.cpp b/src/common/transformations/tests/op_conversions/batch_to_space_decomposition_test.cpp index 17577d63a6fdda..c6f408bb7094db 100644 --- a/src/common/transformations/tests/op_conversions/batch_to_space_decomposition_test.cpp +++ b/src/common/transformations/tests/op_conversions/batch_to_space_decomposition_test.cpp @@ -219,7 +219,7 @@ void op_convertion_type_test(const Params& params) { Manager m; m.register_pass(by_elements); m.register_pass(); - ASSERT_NO_THROW(m.run_passes(f)); + OV_ASSERT_NO_THROW(m.run_passes(f)); EXPECT_EQ(f->get_result()->get_input_shape(0), (Shape{1, 1})); } diff --git a/src/common/transformations/tests/op_conversions/bidirectional_sequences_decomposition.cpp b/src/common/transformations/tests/op_conversions/bidirectional_sequences_decomposition.cpp index 9315c7d164e0c6..d84dc203388675 100644 --- a/src/common/transformations/tests/op_conversions/bidirectional_sequences_decomposition.cpp +++ b/src/common/transformations/tests/op_conversions/bidirectional_sequences_decomposition.cpp @@ -87,7 +87,7 @@ TEST(TransformationTests, BidirectionalSequenceDecompositionLSTM) { m.register_pass(); m.run_passes(f); - ASSERT_NO_THROW(check_rt_info(f)); + OV_ASSERT_NO_THROW(check_rt_info(f)); } { @@ -156,7 +156,7 @@ TEST(TransformationTests, BidirectionalSequenceDecompositionGRU) { m.register_pass(); m.run_passes(f); - ASSERT_NO_THROW(check_rt_info(f)); + OV_ASSERT_NO_THROW(check_rt_info(f)); } { @@ -219,7 +219,7 @@ TEST(TransformationTests, BidirectionalSequenceDecompositionRNN) { m.register_pass(); m.run_passes(f); - ASSERT_NO_THROW(check_rt_info(f)); + OV_ASSERT_NO_THROW(check_rt_info(f)); } { @@ -291,7 +291,7 @@ TEST(TransformationTests, BidirectionalSequenceDecompositionLSTMDisabled) { m.get_pass_config()->set_callback(transformations_callback); m.run_passes(f); - ASSERT_NO_THROW(check_rt_info(f)); + OV_ASSERT_NO_THROW(check_rt_info(f)); } { @@ -343,7 +343,7 @@ TEST(TransformationTests, BidirectionalSequenceDecompositionGRUDisabled) { m.get_pass_config()->set_callback(transformations_callback); m.run_passes(f); - ASSERT_NO_THROW(check_rt_info(f)); + OV_ASSERT_NO_THROW(check_rt_info(f)); } { @@ -394,7 +394,7 @@ TEST(TransformationTests, BidirectionalSequenceDecompositionRNNDisabled) { m.get_pass_config()->set_callback(transformations_callback); m.run_passes(f); - ASSERT_NO_THROW(check_rt_info(f)); + OV_ASSERT_NO_THROW(check_rt_info(f)); } { diff --git a/src/common/transformations/tests/op_conversions/convert_broadcast3_test.cpp b/src/common/transformations/tests/op_conversions/convert_broadcast3_test.cpp index d6759c9b03255f..58d951918bc8cc 100644 --- a/src/common/transformations/tests/op_conversions/convert_broadcast3_test.cpp +++ b/src/common/transformations/tests/op_conversions/convert_broadcast3_test.cpp @@ -35,7 +35,7 @@ void convert_broadcast3_test(std::shared_ptr f, std::shared_ptr f_ manager.register_pass(); manager.register_pass(); manager.run_passes(f); - ASSERT_NO_THROW(check_rt_info(f)); + OV_ASSERT_NO_THROW(check_rt_info(f)); auto res = compare_functions(f, f_ref); ASSERT_TRUE(res.first) << res.second; } @@ -337,7 +337,7 @@ TEST(TransformationTests, ConvertBroadcast3WithNumpyModeToBroadcast1) { manager.register_pass(); manager.register_pass(); manager.run_passes(f); - ASSERT_NO_THROW(check_rt_info(f)); + OV_ASSERT_NO_THROW(check_rt_info(f)); } { @@ -373,7 +373,7 @@ TEST(TransformationTests, ConvertBroadcast3WithPDPDModeToBroadcast1) { manager.register_pass(); manager.register_pass(); manager.run_passes(f); - ASSERT_NO_THROW(check_rt_info(f)); + OV_ASSERT_NO_THROW(check_rt_info(f)); } { @@ -411,7 +411,7 @@ TEST(TransformationTests, ConvertBroadcast3WithExplicitModeToBroadcast1) { manager.register_pass(); manager.register_pass(); manager.run_passes(f); - ASSERT_NO_THROW(check_rt_info(f)); + OV_ASSERT_NO_THROW(check_rt_info(f)); } { @@ -451,7 +451,7 @@ TEST(TransformationTests, ConvertBroadcast3WithBidirectionalModeToBroadcast1Cons manager.register_pass(); manager.register_pass(); manager.run_passes(f); - ASSERT_NO_THROW(check_rt_info(f)); + OV_ASSERT_NO_THROW(check_rt_info(f)); } { @@ -487,7 +487,7 @@ TEST(TransformationTests, ConvertBroadcast3WithBidirectionalModeToBroadcast1Cons manager.register_pass(); manager.register_pass(); manager.run_passes(f); - ASSERT_NO_THROW(check_rt_info(f)); + OV_ASSERT_NO_THROW(check_rt_info(f)); } { @@ -528,7 +528,7 @@ TEST(TransformationTests, ConvertBroadcast3WithBidirectionalModeToMultiply) { manager.register_pass(); manager.register_pass(); manager.run_passes(f); - ASSERT_NO_THROW(check_rt_info(f)); + OV_ASSERT_NO_THROW(check_rt_info(f)); } { @@ -572,7 +572,7 @@ TEST(TransformationTests, ConvertBroadcast3WithBidirectionalModeToLogicalAnd) { manager.register_pass(); manager.register_pass(); manager.run_passes(f); - ASSERT_NO_THROW(check_rt_info(f)); + OV_ASSERT_NO_THROW(check_rt_info(f)); } { diff --git a/src/common/transformations/tests/op_conversions/convert_broadcast_to_tiles_test.cpp b/src/common/transformations/tests/op_conversions/convert_broadcast_to_tiles_test.cpp index 4fd44a1d61b7eb..a50798fb240a48 100644 --- a/src/common/transformations/tests/op_conversions/convert_broadcast_to_tiles_test.cpp +++ b/src/common/transformations/tests/op_conversions/convert_broadcast_to_tiles_test.cpp @@ -32,8 +32,8 @@ TEST(TransformationTests, ConvertBroadcastToTilesDynamic) { pass::Manager manager; manager.register_pass(); manager.register_pass(); - ASSERT_NO_THROW(manager.run_passes(f)); - ASSERT_NO_THROW(check_rt_info(f)); + OV_ASSERT_NO_THROW(manager.run_passes(f)); + OV_ASSERT_NO_THROW(check_rt_info(f)); } // TODO: construct reference graph and use TEST_F } diff --git a/src/common/transformations/tests/op_conversions/convert_reduce_to_pooling_test.cpp b/src/common/transformations/tests/op_conversions/convert_reduce_to_pooling_test.cpp index 1e3ae8d00ec7fe..d74775aa1b0928 100644 --- a/src/common/transformations/tests/op_conversions/convert_reduce_to_pooling_test.cpp +++ b/src/common/transformations/tests/op_conversions/convert_reduce_to_pooling_test.cpp @@ -138,7 +138,7 @@ TEST_P(ConvertReduceToPoolingTests, CompareFunctions) { m.register_pass(); m.register_pass(unh); m.run_passes(f); - ASSERT_NO_THROW(check_rt_info(f)); + OV_ASSERT_NO_THROW(check_rt_info(f)); auto fc = FunctionsComparator::no_default().enable(FunctionsComparator::NODES).enable(FunctionsComparator::PRECISIONS); @@ -209,7 +209,7 @@ TEST(ConvertReduceToPooling, Negative) { auto f = ConvertReduceToPoolingTests::get_initial_function(PartialShape::dynamic(), {3}, MAX, true); pass::Manager manager; manager.register_pass(); - ASSERT_NO_THROW(manager.run_passes(f)); + OV_ASSERT_NO_THROW(manager.run_passes(f)); } #undef MAX diff --git a/src/common/transformations/tests/op_conversions/convert_scatter_elements_to_scatter_test.cpp b/src/common/transformations/tests/op_conversions/convert_scatter_elements_to_scatter_test.cpp index c4eadf737e913a..364fa43db9b4e9 100644 --- a/src/common/transformations/tests/op_conversions/convert_scatter_elements_to_scatter_test.cpp +++ b/src/common/transformations/tests/op_conversions/convert_scatter_elements_to_scatter_test.cpp @@ -91,7 +91,7 @@ void gen_test(std::shared_ptr f, std::shared_ptr f_ref) { check_rt_info(f); }); manager.register_pass(); - ASSERT_NO_THROW(manager.run_passes(f)); + OV_ASSERT_NO_THROW(manager.run_passes(f)); auto fc = FunctionsComparator::no_default().enable(FunctionsComparator::NODES).enable(FunctionsComparator::PRECISIONS); diff --git a/src/common/transformations/tests/op_conversions/convert_sequences_to_ti_test.cpp b/src/common/transformations/tests/op_conversions/convert_sequences_to_ti_test.cpp index 45b2b31e27321a..33c4df886bf0ab 100644 --- a/src/common/transformations/tests/op_conversions/convert_sequences_to_ti_test.cpp +++ b/src/common/transformations/tests/op_conversions/convert_sequences_to_ti_test.cpp @@ -56,7 +56,7 @@ TEST(TransformationTests, ConvertLSTMSequenceToTensorIterator) { m.register_pass(); m.register_pass(); m.run_passes(f); - ASSERT_NO_THROW(check_rt_info(f)); + OV_ASSERT_NO_THROW(check_rt_info(f)); } { @@ -162,7 +162,7 @@ TEST(TransformationTests, ConvertLSTMSequenceToTensorIteratorDynamic) { m.register_pass(); m.register_pass(); m.run_passes(f); - ASSERT_NO_THROW(check_rt_info(f)); + OV_ASSERT_NO_THROW(check_rt_info(f)); } { @@ -272,7 +272,7 @@ TEST(TransformationTests, ConvertQuantizedLSTMSequenceToTensorIterator) { m.register_pass(); m.register_pass(); m.run_passes(f); - ASSERT_NO_THROW(check_rt_info(f)); + OV_ASSERT_NO_THROW(check_rt_info(f)); } { @@ -377,7 +377,7 @@ TEST(TransformationTests, ConvertRNNSequenceToTensorIterator) { m.register_pass(); m.register_pass(); m.run_passes(f); - ASSERT_NO_THROW(check_rt_info(f)); + OV_ASSERT_NO_THROW(check_rt_info(f)); } { @@ -466,7 +466,7 @@ TEST(TransformationTests, ConvertRNNSequenceToTensorIteratorDynamic) { m.register_pass(); m.register_pass(); m.run_passes(f); - ASSERT_NO_THROW(check_rt_info(f)); + OV_ASSERT_NO_THROW(check_rt_info(f)); } { @@ -554,7 +554,7 @@ TEST(TransformationTests, ConvertGRUSequenceToTensorIterator) { m.register_pass(); m.register_pass(); m.run_passes(f); - ASSERT_NO_THROW(check_rt_info(f)); + OV_ASSERT_NO_THROW(check_rt_info(f)); } { @@ -643,7 +643,7 @@ TEST(TransformationTests, ConvertGRUSequenceToTensorIteratorDynamic) { m.register_pass(); m.register_pass(); m.run_passes(f); - ASSERT_NO_THROW(check_rt_info(f)); + OV_ASSERT_NO_THROW(check_rt_info(f)); } { @@ -736,7 +736,7 @@ TEST(TransformationTests, ConvertQuantizedGRUSequenceToTensorIterator) { m.register_pass(); m.register_pass(); m.run_passes(f); - ASSERT_NO_THROW(check_rt_info(f)); + OV_ASSERT_NO_THROW(check_rt_info(f)); } { @@ -873,7 +873,7 @@ TEST(TransformationTests, ConvertLSTMSequenceWithDynSeqLenToTensorIterator) { m.register_pass(); m.register_pass(); m.run_passes(f); - ASSERT_NO_THROW(check_rt_info(f)); + OV_ASSERT_NO_THROW(check_rt_info(f)); } { diff --git a/src/common/transformations/tests/op_conversions/convert_ti_to_sequences_test.cpp b/src/common/transformations/tests/op_conversions/convert_ti_to_sequences_test.cpp index 0115abc6779091..71182afc5f3ce4 100644 --- a/src/common/transformations/tests/op_conversions/convert_ti_to_sequences_test.cpp +++ b/src/common/transformations/tests/op_conversions/convert_ti_to_sequences_test.cpp @@ -93,7 +93,7 @@ TEST(TransformationTests, ConvertTensorIteratorToLSTMSequence) { m.register_pass(); m.register_pass(); m.run_passes(f); - ASSERT_NO_THROW(check_rt_info(f)); + OV_ASSERT_NO_THROW(check_rt_info(f)); } { @@ -188,7 +188,7 @@ TEST(TransformationTests, ConvertTensorIteratorToLSTMSequenceDynamicReshapeCase) m.register_pass(); m.register_pass(); m.run_passes(f); - ASSERT_NO_THROW(check_rt_info(f)); + OV_ASSERT_NO_THROW(check_rt_info(f)); } { @@ -278,7 +278,7 @@ TEST(TransformationTests, ConvertTensorIteratorToLSTMSequenceDynamicSqueezeCase) m.register_pass(); m.register_pass(); m.run_passes(f); - ASSERT_NO_THROW(check_rt_info(f)); + OV_ASSERT_NO_THROW(check_rt_info(f)); } { @@ -364,7 +364,7 @@ TEST(TransformationTests, ConvertTensorIteratorToRNNSequence) { m.register_pass(); m.register_pass(); m.run_passes(f); - ASSERT_NO_THROW(check_rt_info(f)); + OV_ASSERT_NO_THROW(check_rt_info(f)); } { @@ -444,7 +444,7 @@ TEST(TransformationTests, ConvertTensorIteratorToRNNSequenceDynamicReshapeCase) m.register_pass(); m.register_pass(); m.run_passes(f); - ASSERT_NO_THROW(check_rt_info(f)); + OV_ASSERT_NO_THROW(check_rt_info(f)); } { @@ -525,7 +525,7 @@ TEST(TransformationTests, ConvertTensorIteratorToRNNSequenceDynamicSqueezeCase) m.register_pass(); m.register_pass(); m.run_passes(f); - ASSERT_NO_THROW(check_rt_info(f)); + OV_ASSERT_NO_THROW(check_rt_info(f)); } { @@ -605,7 +605,7 @@ TEST(TransformationTests, ConvertTensorIteratorToGRUSequence) { m.register_pass(); m.register_pass(); m.run_passes(f); - ASSERT_NO_THROW(check_rt_info(f)); + OV_ASSERT_NO_THROW(check_rt_info(f)); } { @@ -685,7 +685,7 @@ TEST(TransformationTests, ConvertTensorIteratorToGRUSequenceDynamicReshapeCase) m.register_pass(); m.register_pass(); m.run_passes(f); - ASSERT_NO_THROW(check_rt_info(f)); + OV_ASSERT_NO_THROW(check_rt_info(f)); } { @@ -766,7 +766,7 @@ TEST(TransformationTests, ConvertTensorIteratorToGRUSequenceDynamicSqueezeCase) m.register_pass(); m.register_pass(); m.run_passes(f); - ASSERT_NO_THROW(check_rt_info(f)); + OV_ASSERT_NO_THROW(check_rt_info(f)); } { diff --git a/src/common/transformations/tests/op_conversions/fq_decomposition_test.cpp b/src/common/transformations/tests/op_conversions/fq_decomposition_test.cpp index 7e2a1754f8dc18..9a46b810ca7223 100644 --- a/src/common/transformations/tests/op_conversions/fq_decomposition_test.cpp +++ b/src/common/transformations/tests/op_conversions/fq_decomposition_test.cpp @@ -88,7 +88,7 @@ class FakeQuantizeDecompositionTest : public ov::test::TestsCommon, manager.register_pass(); manager.run_passes(f); - ASSERT_NO_THROW(check_rt_info(f)); + OV_ASSERT_NO_THROW(check_rt_info(f)); } { diff --git a/src/common/transformations/tests/op_conversions/ngraph_depth_to_space_transform_test.cpp b/src/common/transformations/tests/op_conversions/ngraph_depth_to_space_transform_test.cpp index 37040b3fd00ea3..277e0f00e8d8a6 100644 --- a/src/common/transformations/tests/op_conversions/ngraph_depth_to_space_transform_test.cpp +++ b/src/common/transformations/tests/op_conversions/ngraph_depth_to_space_transform_test.cpp @@ -34,7 +34,7 @@ TEST(TransformationTests, TestDepthToSpaceTransformBlockFirst) { m.register_pass(); m.register_pass(); m.run_passes(f); - ASSERT_NO_THROW(check_rt_info(f)); + OV_ASSERT_NO_THROW(check_rt_info(f)); } auto consumers = input->output(0).get_target_inputs(); @@ -78,7 +78,7 @@ TEST(TransformationTests, TestDepthToSpaceTransformDepthFirst) { m.register_pass(); m.register_pass(); m.run_passes(f); - ASSERT_NO_THROW(check_rt_info(f)); + OV_ASSERT_NO_THROW(check_rt_info(f)); } auto consumers = input->output(0).get_target_inputs(); @@ -122,7 +122,7 @@ TEST(TransformationTests, TestSpaceToDepthTransformBlockFirst) { m.register_pass(); m.register_pass(); m.run_passes(f); - ASSERT_NO_THROW(check_rt_info(f)); + OV_ASSERT_NO_THROW(check_rt_info(f)); } auto consumers = input->output(0).get_target_inputs(); @@ -166,7 +166,7 @@ TEST(TransformationTests, TestSpaceToDepthTransformDepthFirst) { m.register_pass(); m.register_pass(); m.run_passes(f); - ASSERT_NO_THROW(check_rt_info(f)); + OV_ASSERT_NO_THROW(check_rt_info(f)); } auto consumers = input->output(0).get_target_inputs(); @@ -208,7 +208,7 @@ TEST(TransformationTests, TestSpaceToDepthDynamic) { f = std::make_shared(ov::NodeVector{space_to_depth}, ParameterVector{input}); pass::Manager m; m.register_pass(); - ASSERT_NO_THROW(m.run_passes(f)); + OV_ASSERT_NO_THROW(m.run_passes(f)); } } @@ -222,6 +222,6 @@ TEST(TransformationTests, TestDepthToSpaceDynamic) { f = std::make_shared(ov::NodeVector{depth_to_space}, ParameterVector{input}); pass::Manager m; m.register_pass(); - ASSERT_NO_THROW(m.run_passes(f)); + OV_ASSERT_NO_THROW(m.run_passes(f)); } } diff --git a/src/common/transformations/tests/op_conversions/ngraph_mode_decomposition_test.cpp b/src/common/transformations/tests/op_conversions/ngraph_mode_decomposition_test.cpp index cfd5c69e4829ea..0155ffc2ca9740 100644 --- a/src/common/transformations/tests/op_conversions/ngraph_mode_decomposition_test.cpp +++ b/src/common/transformations/tests/op_conversions/ngraph_mode_decomposition_test.cpp @@ -38,7 +38,7 @@ TEST(TransformationTests, ModDecompositionTests) { m.register_pass(); m.register_pass(unh); m.run_passes(f); - ASSERT_NO_THROW(check_rt_info(f)); + OV_ASSERT_NO_THROW(check_rt_info(f)); } ASSERT_EQ(f->get_ops().size(), 12); } diff --git a/src/common/transformations/tests/smart_reshape/lstm_states_broadcast.cpp b/src/common/transformations/tests/smart_reshape/lstm_states_broadcast.cpp index 0b4701069e5dce..8e995935a0c151 100644 --- a/src/common/transformations/tests/smart_reshape/lstm_states_broadcast.cpp +++ b/src/common/transformations/tests/smart_reshape/lstm_states_broadcast.cpp @@ -44,7 +44,7 @@ TEST_P(LSTMStatesBroadcastTest, BareLSTM) { model = make_shared(ov::NodeVector{cell}, ov::ParameterVector{parameter}); } - ASSERT_NO_THROW(model->reshape(ov::PartialShape{p.new_batch_size, p.input_size})); + OV_ASSERT_NO_THROW(model->reshape(ov::PartialShape{p.new_batch_size, p.input_size})); } class LSTMStatesBroadcastTestWithTI : public testing::WithParamInterface, @@ -94,7 +94,7 @@ TEST_P(LSTMStatesBroadcastTestWithTI, TI_With_LSTM) { auto res_ti_2 = make_shared(tensor_iterator->output(0)); model = make_shared(ov::NodeVector{res_ti_1, res_ti_2}, ov::ParameterVector{X}); } - ASSERT_NO_THROW(model->reshape(ov::PartialShape{p.new_batch_size, 1, p.input_size})); + OV_ASSERT_NO_THROW(model->reshape(ov::PartialShape{p.new_batch_size, 1, p.input_size})); } static vector params = { diff --git a/src/common/transformations/tests/smart_reshape/reshape_sinking.cpp b/src/common/transformations/tests/smart_reshape/reshape_sinking.cpp index 8dfbda9de9a92b..ee877e578e9e8a 100644 --- a/src/common/transformations/tests/smart_reshape/reshape_sinking.cpp +++ b/src/common/transformations/tests/smart_reshape/reshape_sinking.cpp @@ -37,7 +37,7 @@ TEST_P(ReshapeSinkingTest, ReshapeSinkingOnlyMatMul) { std::make_shared(matmul, create_constant(p.output_pattern_back), false); model = std::make_shared(ov::NodeVector{reshape_back}, ov::ParameterVector{parameter}); } - ASSERT_NO_THROW(model->reshape(p.new_shape)); + OV_ASSERT_NO_THROW(model->reshape(p.new_shape)); } class ReshapeSinkingTestWithAdd : public testing::WithParamInterface, @@ -59,7 +59,7 @@ TEST_P(ReshapeSinkingTestWithAdd, ReshapeSinkingMatMulAdd) { auto reshape_back = std::make_shared(add, create_constant(p.output_pattern_back), false); model = std::make_shared(ov::NodeVector{reshape_back}, ov::ParameterVector{parameter}); } - ASSERT_NO_THROW(model->reshape(p.new_shape)); + OV_ASSERT_NO_THROW(model->reshape(p.new_shape)); } static std::vector params = { diff --git a/src/common/transformations/tests/smart_reshape/sr_reshape_1d.cpp b/src/common/transformations/tests/smart_reshape/sr_reshape_1d.cpp index d819c38adc82a3..3e8ba61608a224 100644 --- a/src/common/transformations/tests/smart_reshape/sr_reshape_1d.cpp +++ b/src/common/transformations/tests/smart_reshape/sr_reshape_1d.cpp @@ -23,7 +23,7 @@ TEST(SmartReshapeTests, Reshape1d) { auto unh = std::make_shared(); init_unique_names(f, unh); - ASSERT_NO_THROW(f->reshape({{1, 3, 300, 300}})); + OV_ASSERT_NO_THROW(f->reshape({{1, 3, 300, 300}})); check_unique_names(f, unh); ASSERT_TRUE(f->get_results()[0]->get_output_partial_shape(0).compatible({270000})); diff --git a/src/common/transformations/tests/smart_reshape/sr_strided_slice_squeeze.cpp b/src/common/transformations/tests/smart_reshape/sr_strided_slice_squeeze.cpp index 395658091fff79..cbfef993fe6d4c 100644 --- a/src/common/transformations/tests/smart_reshape/sr_strided_slice_squeeze.cpp +++ b/src/common/transformations/tests/smart_reshape/sr_strided_slice_squeeze.cpp @@ -59,7 +59,7 @@ TEST(SmartReshapeTests, SS_Squeeze_partial_begin_end_mask) { auto unh = std::make_shared(); init_unique_names(f, unh); auto inputname = f->get_parameters()[0]->get_friendly_name(); - ASSERT_NO_THROW(f->reshape({{2, 128, 768}})); + OV_ASSERT_NO_THROW(f->reshape({{2, 128, 768}})); check_unique_names(f, unh); ASSERT_TRUE(f->get_results()[0]->get_output_partial_shape(0).compatible({2, 768})) @@ -93,7 +93,7 @@ TEST(SmartReshapeTests, SS_Squeeze_partial_begin_end) { auto unh = std::make_shared(); init_unique_names(f, unh); auto inputname = f->get_parameters()[0]->get_friendly_name(); - ASSERT_NO_THROW(f->reshape({{2, 1, 768}})); + OV_ASSERT_NO_THROW(f->reshape({{2, 1, 768}})); check_unique_names(f, unh); ASSERT_TRUE(f->get_results()[0]->get_output_partial_shape(0).compatible({2, 768})) diff --git a/src/common/transformations/tests/type_relaxed_tests.cpp b/src/common/transformations/tests/type_relaxed_tests.cpp index ff5f9a17b76539..ece78d6c84b91c 100644 --- a/src/common/transformations/tests/type_relaxed_tests.cpp +++ b/src/common/transformations/tests/type_relaxed_tests.cpp @@ -337,7 +337,7 @@ TEST_F(TypeRelaxedTests, ConstantFoldingCheck) { f = make_shared(ov::OutputVector{relaxed_equal}, ov::ParameterVector{}); ov::pass::Manager manager; manager.register_pass(); - ASSERT_NO_THROW(manager.run_passes(f)); + OV_ASSERT_NO_THROW(manager.run_passes(f)); auto layer_before_result = f->get_result()->get_input_node_shared_ptr(0); ASSERT_TRUE(ov::is_type(layer_before_result)); } @@ -355,7 +355,7 @@ TEST_F(TypeRelaxedTests, ConstantFoldingCheck1) { f = make_shared(ov::OutputVector{relaxed_equal}, ov::ParameterVector{}); ov::pass::Manager manager; manager.register_pass(); - ASSERT_NO_THROW(manager.run_passes(f)); + OV_ASSERT_NO_THROW(manager.run_passes(f)); auto layer_before_result = f->get_result()->get_input_node_shared_ptr(0); ASSERT_TRUE(ov::is_type(layer_before_result)); } @@ -377,7 +377,7 @@ TEST_F(TypeRelaxedTests, ConstantFoldingCheck2) { f = make_shared(ov::OutputVector{relaxed_equal}, ov::ParameterVector{}); ov::pass::Manager manager; manager.register_pass(); - ASSERT_NO_THROW(manager.run_passes(f)); + OV_ASSERT_NO_THROW(manager.run_passes(f)); auto layer_before_result = f->get_result()->get_input_node_shared_ptr(0); ASSERT_TRUE(ov::is_type(layer_before_result)); } @@ -397,7 +397,7 @@ TEST_F(TypeRelaxedTests, ConstantFoldingCheck3) { f = make_shared(ov::OutputVector{relaxed_equal}, ov::ParameterVector{}); ov::pass::Manager manager; manager.register_pass(); - ASSERT_NO_THROW(manager.run_passes(f)); + OV_ASSERT_NO_THROW(manager.run_passes(f)); auto layer_before_result = f->get_result()->get_input_node_shared_ptr(0); ASSERT_TRUE(ov::is_type(layer_before_result)); } @@ -472,7 +472,7 @@ TEST_F(TypeRelaxedTests, PartialValuePropagation) { manager.register_pass( map, type_to_fuse_map{{ov::opset1::Convert::get_type_info_static(), fuse_type_to_convert_cpu}}); - ASSERT_NO_THROW(manager.run_passes(model)); + OV_ASSERT_NO_THROW(manager.run_passes(model)); EXPECT_EQ(model->get_result()->get_output_partial_shape(0), ov::PartialShape({1, 768, -1})); } } @@ -514,7 +514,7 @@ TEST_F(TypeRelaxedTests, PartialValuePropagation2) { manager.register_pass( map, type_to_fuse_map{{ov::opset1::Convert::get_type_info_static(), fuse_type_to_convert_cpu}}); - ASSERT_NO_THROW(manager.run_passes(model)); + OV_ASSERT_NO_THROW(manager.run_passes(model)); EXPECT_EQ(model->get_result()->get_output_partial_shape(0), ov::PartialShape({-1, 1, -1, -1})); } } diff --git a/src/common/transformations/tests/utils/convert_precision.cpp b/src/common/transformations/tests/utils/convert_precision.cpp index af42bc4e900f8e..b685218b78dda9 100644 --- a/src/common/transformations/tests/utils/convert_precision.cpp +++ b/src/common/transformations/tests/utils/convert_precision.cpp @@ -105,7 +105,7 @@ TEST(TransformationTests, ConvertPrecision_NMS4) { manager.register_pass(precisions); manager.run_passes(f); } - ASSERT_NO_THROW(check_rt_info(f)); + OV_ASSERT_NO_THROW(check_rt_info(f)); ASSERT_FALSE(has_type(f)); ASSERT_FALSE(has_type(f)); } @@ -137,7 +137,7 @@ TEST(TransformationTests, ConvertPrecision_NMS5) { manager.register_pass(); manager.register_pass(precisions); manager.run_passes(f); - ASSERT_NO_THROW(check_rt_info(f)); + OV_ASSERT_NO_THROW(check_rt_info(f)); ASSERT_FALSE(has_type(f)); ASSERT_FALSE(has_type(f)); } @@ -171,7 +171,7 @@ TEST(TransformationTests, DoubleConvertPrecision_NMS5) { manager.register_pass(precisions1); manager.register_pass(precisions2); manager.run_passes(f); - ASSERT_NO_THROW(check_rt_info(f)); + OV_ASSERT_NO_THROW(check_rt_info(f)); ASSERT_FALSE(has_type(f)); ASSERT_FALSE(has_type(f)); } @@ -205,7 +205,7 @@ TEST(TransformationTests, DoubleConvertPrecision_NMS9) { manager.register_pass(precisions1); manager.register_pass(precisions2); manager.run_passes(f); - ASSERT_NO_THROW(check_rt_info(f)); + OV_ASSERT_NO_THROW(check_rt_info(f)); ASSERT_FALSE(has_type(f)); ASSERT_FALSE(has_type(f)); } @@ -230,7 +230,7 @@ TEST(TransformationTests, ConvertPrecision_MatrixNms) { manager.register_pass(); manager.register_pass(precisions); manager.run_passes(f); - ASSERT_NO_THROW(check_rt_info(f)); + OV_ASSERT_NO_THROW(check_rt_info(f)); ASSERT_FALSE(has_type(f)); ASSERT_FALSE(has_type(f)); } @@ -255,7 +255,7 @@ TEST(TransformationTests, ConvertPrecision_MulticlassNms) { manager.register_pass(); manager.register_pass(precisions); manager.run_passes(f); - ASSERT_NO_THROW(check_rt_info(f)); + OV_ASSERT_NO_THROW(check_rt_info(f)); ASSERT_FALSE(has_type(f)); ASSERT_FALSE(has_type(f)); } @@ -276,7 +276,7 @@ TEST(TransformationTests, ConvertPrecision_ShapeOf) { manager.register_pass(precisions); manager.run_passes(f); } - ASSERT_NO_THROW(check_rt_info(f)); + OV_ASSERT_NO_THROW(check_rt_info(f)); ASSERT_FALSE(has_type(f)); ASSERT_FALSE(has_type(f)); } @@ -299,7 +299,7 @@ TEST(TransformationTests, ConvertPrecision_Range) { manager.register_pass(precisions); manager.run_passes(f); } - ASSERT_NO_THROW(check_rt_info(f)); + OV_ASSERT_NO_THROW(check_rt_info(f)); ASSERT_FALSE(has_type(f)); ASSERT_FALSE(has_type(f)); } @@ -321,7 +321,7 @@ TEST(TransformationTests, ConvertPrecision_ConstantRelu) { manager.register_pass(precisions); manager.run_passes(f); } - ASSERT_NO_THROW(check_rt_info(f)); + OV_ASSERT_NO_THROW(check_rt_info(f)); ASSERT_FALSE(has_type(f)); ASSERT_FALSE(has_type(f)); } @@ -342,7 +342,7 @@ TEST(TransformationTests, ConvertPrecision_Convert) { manager.register_pass(precisions); manager.run_passes(f); } - ASSERT_NO_THROW(check_rt_info(f)); + OV_ASSERT_NO_THROW(check_rt_info(f)); ASSERT_FALSE(has_type(f)); ASSERT_FALSE(has_type(f)); } @@ -498,7 +498,7 @@ TEST(TransformationTests, ConvertPrecision_ConvertElimination) { f_ref = std::make_shared(NodeVector{relu}, ParameterVector{input}); } - ASSERT_NO_THROW(check_rt_info(f)); + OV_ASSERT_NO_THROW(check_rt_info(f)); auto res = compare_functions(f, f_ref); ASSERT_TRUE(res.first) << res.second; } @@ -520,7 +520,7 @@ TEST(TransformationTests, ConvertPrecision_TopK) { manager.register_pass(precisions); manager.run_passes(f); } - ASSERT_NO_THROW(check_rt_info(f)); + OV_ASSERT_NO_THROW(check_rt_info(f)); ASSERT_FALSE(has_type(f)); ASSERT_FALSE(has_type(f)); } @@ -541,7 +541,7 @@ TEST(TransformationTests, ConvertPrecision_Unique10) { manager.register_pass(precisions); manager.run_passes(model); } - ASSERT_NO_THROW(check_rt_info(model)); + OV_ASSERT_NO_THROW(check_rt_info(model)); ASSERT_EQ(model->outputs().size(), 4); EXPECT_EQ(model->outputs()[0].get_element_type(), element::f32); EXPECT_EQ(model->outputs()[1].get_element_type(), element::i32); @@ -570,7 +570,7 @@ TEST(TransformationTests, ConvertPrecision_NonZero) { manager.register_pass(precisions); manager.run_passes(f); } - ASSERT_NO_THROW(check_rt_info(f)); + OV_ASSERT_NO_THROW(check_rt_info(f)); ASSERT_FALSE(has_type(f)); ASSERT_FALSE(has_type(f)); } @@ -592,7 +592,7 @@ TEST(TransformationTests, ConvertPrecision_Bucketize) { manager.register_pass(precisions); manager.run_passes(f); } - ASSERT_NO_THROW(check_rt_info(f)); + OV_ASSERT_NO_THROW(check_rt_info(f)); ASSERT_FALSE(has_type(f)); ASSERT_FALSE(has_type(f)); } @@ -629,7 +629,7 @@ TEST(TransformationTests, ConvertPrecision_Roundings) { ASSERT_EQ(casted_end->cast_vector(), std::vector({max_int32, max_int32, max_int32, max_int32})); } - ASSERT_NO_THROW(check_rt_info(f)); + OV_ASSERT_NO_THROW(check_rt_info(f)); ASSERT_FALSE(has_type(f)); ASSERT_FALSE(has_type(f)); } @@ -681,7 +681,7 @@ TEST(TransformationTests, ConvertPrecision_TIBody) { manager.register_pass(precisions); manager.run_passes(f); - ASSERT_NO_THROW(check_rt_info(f)); + OV_ASSERT_NO_THROW(check_rt_info(f)); ASSERT_FALSE(has_type(f)); ASSERT_FALSE(has_type(f)); ASSERT_FALSE(has_type(tensor_iterator->get_body())); @@ -706,7 +706,7 @@ TEST(TransformationTests, ConvertPrecision_Equal) { manager.register_pass(precisions); manager.run_passes(f); } - ASSERT_NO_THROW(check_rt_info(f)); + OV_ASSERT_NO_THROW(check_rt_info(f)); ASSERT_FALSE(has_type(f)); ASSERT_FALSE(has_type(f)); ASSERT_TRUE(has_type(f)); @@ -729,7 +729,7 @@ TEST(TransformationTests, ConvertPrecision_NotEqual) { manager.register_pass(precisions); manager.run_passes(f); } - ASSERT_NO_THROW(check_rt_info(f)); + OV_ASSERT_NO_THROW(check_rt_info(f)); ASSERT_FALSE(has_type(f)); ASSERT_FALSE(has_type(f)); ASSERT_TRUE(has_type(f)); @@ -752,7 +752,7 @@ TEST(TransformationTests, ConvertPrecision_Greater) { manager.register_pass(precisions); manager.run_passes(f); } - ASSERT_NO_THROW(check_rt_info(f)); + OV_ASSERT_NO_THROW(check_rt_info(f)); ASSERT_FALSE(has_type(f)); ASSERT_FALSE(has_type(f)); ASSERT_TRUE(has_type(f)); @@ -775,7 +775,7 @@ TEST(TransformationTests, ConvertPrecision_GreaterEqual) { manager.register_pass(precisions); manager.run_passes(f); } - ASSERT_NO_THROW(check_rt_info(f)); + OV_ASSERT_NO_THROW(check_rt_info(f)); ASSERT_FALSE(has_type(f)); ASSERT_FALSE(has_type(f)); ASSERT_TRUE(has_type(f)); @@ -798,7 +798,7 @@ TEST(TransformationTests, ConvertPrecision_Less) { manager.register_pass(precisions); manager.run_passes(f); } - ASSERT_NO_THROW(check_rt_info(f)); + OV_ASSERT_NO_THROW(check_rt_info(f)); ASSERT_FALSE(has_type(f)); ASSERT_FALSE(has_type(f)); ASSERT_TRUE(has_type(f)); @@ -821,7 +821,7 @@ TEST(TransformationTests, ConvertPrecision_LessEqual) { manager.register_pass(precisions); manager.run_passes(f); } - ASSERT_NO_THROW(check_rt_info(f)); + OV_ASSERT_NO_THROW(check_rt_info(f)); ASSERT_FALSE(has_type(f)); ASSERT_FALSE(has_type(f)); ASSERT_TRUE(has_type(f)); @@ -841,7 +841,7 @@ TEST(TransformationTests, ConvertPrecision_LogicalAnd) { manager.register_pass(precisions_map{{element::boolean, element::u8}}); manager.run_passes(f); } - ASSERT_NO_THROW(check_rt_info(f)); + OV_ASSERT_NO_THROW(check_rt_info(f)); ASSERT_FALSE(has_type(f)); ASSERT_TRUE(has_type(f)); } @@ -860,7 +860,7 @@ TEST(TransformationTests, ConvertPrecision_LogicalOr) { manager.register_pass(precisions_map{{element::boolean, element::u8}}); manager.run_passes(f); } - ASSERT_NO_THROW(check_rt_info(f)); + OV_ASSERT_NO_THROW(check_rt_info(f)); ASSERT_FALSE(has_type(f)); ASSERT_TRUE(has_type(f)); } @@ -879,7 +879,7 @@ TEST(TransformationTests, ConvertPrecision_LogicalXor) { manager.register_pass(precisions_map{{element::boolean, element::u8}}); manager.run_passes(f); } - ASSERT_NO_THROW(check_rt_info(f)); + OV_ASSERT_NO_THROW(check_rt_info(f)); ASSERT_FALSE(has_type(f)); ASSERT_TRUE(has_type(f)); } @@ -897,7 +897,7 @@ TEST(TransformationTests, ConvertPrecision_LogicalNot) { manager.register_pass(precisions_map{{element::boolean, element::u8}}); manager.run_passes(f); } - ASSERT_NO_THROW(check_rt_info(f)); + OV_ASSERT_NO_THROW(check_rt_info(f)); ASSERT_FALSE(has_type(f)); ASSERT_TRUE(has_type(f)); @@ -924,7 +924,7 @@ TEST(TransformationTests, ConvertPrecision_Select) { manager.register_pass(precisions_map{{element::boolean, element::u8}}); manager.run_passes(f); } - ASSERT_NO_THROW(check_rt_info(f)); + OV_ASSERT_NO_THROW(check_rt_info(f)); ASSERT_FALSE(has_type(f)); ASSERT_TRUE(has_type(f)); } @@ -944,7 +944,7 @@ TEST(TransformationTests, ConvertPrecision_TypeRelaxedWithSelect) { manager.register_pass(precisions_map{{element::i32, element::i64}}); manager.run_passes(f); } - ASSERT_NO_THROW(check_rt_info(f)); + OV_ASSERT_NO_THROW(check_rt_info(f)); ASSERT_FALSE(has_type(f)); ASSERT_FALSE(has_type(f)); ASSERT_TRUE(has_type(f)); @@ -967,7 +967,7 @@ TEST(TransformationTests, ConvertPrecision_TypeRelaxed) { manager.register_pass(precisions_map{{element::i32, element::i64}}); manager.run_passes(f); - ASSERT_NO_THROW(check_rt_info(f)); + OV_ASSERT_NO_THROW(check_rt_info(f)); ASSERT_FALSE(has_type(f)); ASSERT_FALSE(has_type(f)); ASSERT_TRUE(has_type(f)); @@ -994,7 +994,7 @@ TEST(TransformationTests, ConvertPrecision_Variables) { manager.register_pass(precisions_map{{element::f16, element::f32}}); manager.run_passes(f); } - ASSERT_NO_THROW(check_rt_info(f)); + OV_ASSERT_NO_THROW(check_rt_info(f)); ASSERT_FALSE(has_type(f)); } @@ -1028,7 +1028,7 @@ TEST(TransformationTests, ConvertPrecision_skip_precision_sensitive) { keep_precision_sensitive_in_fp32); manager.run_passes(model); } - ASSERT_NO_THROW(check_rt_info(model)); + OV_ASSERT_NO_THROW(check_rt_info(model)); ASSERT_TRUE(has_type(model)); ASSERT_TRUE(interpolate->input_value(2).get_element_type() == element::Type_t::f32); } @@ -1063,7 +1063,7 @@ TEST(TransformationTests, ConvertPrecision_without_keep_precision_sensitive_in_f keep_precision_sensitive_in_fp32); manager.run_passes(model); } - ASSERT_NO_THROW(check_rt_info(model)); + OV_ASSERT_NO_THROW(check_rt_info(model)); ASSERT_FALSE(has_type(model)); ASSERT_TRUE(interpolate->input_value(2).get_element_type() == element::Type_t::f16); } @@ -1351,7 +1351,7 @@ TEST(TransformationTests, ConvertCompressedToMixedPrecission_do_not_keep_in_fp32 keep_precision_sensitive_in_fp32); manager.run_passes(model); } - ASSERT_NO_THROW(check_rt_info(model)); + OV_ASSERT_NO_THROW(check_rt_info(model)); ASSERT_FALSE(has_type(model)); ASSERT_TRUE(interpolate->input_value(2).get_element_type() == element::Type_t::f16); ASSERT_TRUE(interpolate->output(0).get_partial_shape() == PartialShape({1, 3, 287, 511})); diff --git a/src/core/tests/any.cpp b/src/core/tests/any.cpp index 7d9e3d4edc1126..3914a617ff2982 100644 --- a/src/core/tests/any.cpp +++ b/src/core/tests/any.cpp @@ -8,6 +8,7 @@ #include +#include "common_test_utils/test_assertions.hpp" #include "openvino/core/runtime_attribute.hpp" using namespace ov; @@ -223,7 +224,7 @@ TEST_F(AnyTests, AnyAsMapOfMapOfAnysFromString) { ov::AnyMap map; ASSERT_TRUE(any.is()); ASSERT_FALSE(any.is()); - ASSERT_NO_THROW(map = any.as()); + OV_ASSERT_NO_THROW(map = any.as()); ASSERT_EQ(string_props, ov::Any(map).as()); // check map1 @@ -232,7 +233,7 @@ TEST_F(AnyTests, AnyAsMapOfMapOfAnysFromString) { ASSERT_TRUE(map["map1"].is()); ASSERT_FALSE(map["map1"].is()); ASSERT_FALSE(map["map1"].is()); - ASSERT_NO_THROW(map1 = map["map1"].as()); + OV_ASSERT_NO_THROW(map1 = map["map1"].as()); ASSERT_EQ(2, map1.size()); // check map1:prop1 @@ -244,7 +245,7 @@ TEST_F(AnyTests, AnyAsMapOfMapOfAnysFromString) { ov::AnyMap map2; ASSERT_TRUE(map["map2"].is()); ASSERT_FALSE(map["map2"].is()); - ASSERT_NO_THROW(map2 = map["map2"].as()); + OV_ASSERT_NO_THROW(map2 = map["map2"].as()); ASSERT_EQ(1, map2.size()); // check map1:prop1 @@ -260,7 +261,7 @@ TEST_F(AnyTests, AnyAsMapOfMapOfMapOfAnysFromString) { ov::AnyMap map; ASSERT_TRUE(any.is()); ASSERT_FALSE(any.is()); - ASSERT_NO_THROW(map = any.as()); + OV_ASSERT_NO_THROW(map = any.as()); ASSERT_EQ(3, map.size()); ASSERT_EQ(string_props, ov::Any(map).as()); @@ -282,13 +283,13 @@ TEST_F(AnyTests, AnyAsMapOfMapOfMapOfAnysFromString) { ov::AnyMap map1; ASSERT_TRUE(map["map1"].is()); ASSERT_FALSE(map["map1"].is()); - ASSERT_NO_THROW(map1 = map["map1"].as()); + OV_ASSERT_NO_THROW(map1 = map["map1"].as()); // check subprop ov::AnyMap subprop_map; ASSERT_TRUE(map1["subprop_map"].is()); ASSERT_FALSE(map1["subprop_map"].is()); - ASSERT_NO_THROW(subprop_map = map1["subprop_map"].as()); + OV_ASSERT_NO_THROW(subprop_map = map1["subprop_map"].as()); // check prop ASSERT_TRUE(subprop_map["prop"].is()); @@ -364,7 +365,7 @@ TEST_F(AnyTests, AnyMapSharesComplexValues) { const std::string string_props = "{map1:{subprop_map:{prop:value}},prop1:1,prop2:2.0}"; ov::Any any(string_props); ov::AnyMap map; - ASSERT_NO_THROW(map = any.as()); + OV_ASSERT_NO_THROW(map = any.as()); AnyMap copy_map = map; @@ -525,7 +526,7 @@ void PrintTo(const Any& object, std::ostream* stream) { TEST_F(AnyTests, PrintToEmpty) { Any p; std::stringstream stream; - ASSERT_NO_THROW(p.print(stream)); + OV_ASSERT_NO_THROW(p.print(stream)); ASSERT_EQ(stream.str(), std::string{}); } @@ -533,7 +534,7 @@ TEST_F(AnyTests, PrintToIntAny) { int value = -5; Any p = value; std::stringstream stream; - ASSERT_NO_THROW(p.print(stream)); + OV_ASSERT_NO_THROW(p.print(stream)); ASSERT_EQ(stream.str(), std::to_string(value)); } @@ -542,7 +543,7 @@ TEST_F(AnyTests, ReadToIntAny) { std::stringstream strm; strm << value; Any p = int{}; - ASSERT_NO_THROW(p.read(strm)); + OV_ASSERT_NO_THROW(p.read(strm)); ASSERT_FALSE(strm.fail()); ASSERT_EQ(value, p.as()); } @@ -551,7 +552,7 @@ TEST_F(AnyTests, PrintToUIntAny) { unsigned int value = 5; Any p = value; std::stringstream stream; - ASSERT_NO_THROW(p.print(stream)); + OV_ASSERT_NO_THROW(p.print(stream)); ASSERT_EQ(stream.str(), std::to_string(value)); } @@ -559,14 +560,14 @@ TEST_F(AnyTests, PrintToSize_tAny) { std::size_t value = 5; Any p = value; std::stringstream stream; - ASSERT_NO_THROW(p.print(stream)); + OV_ASSERT_NO_THROW(p.print(stream)); ASSERT_EQ(stream.str(), std::to_string(value)); } TEST_F(AnyTests, PrintToFloatAny) { Any p = 5.5f; std::stringstream stream; - ASSERT_NO_THROW(p.print(stream)); + OV_ASSERT_NO_THROW(p.print(stream)); ASSERT_EQ(stream.str(), std::string{"5.5"}); } @@ -574,21 +575,21 @@ TEST_F(AnyTests, PrintToStringAny) { std::string value = "some text"; Any p = value; std::stringstream stream; - ASSERT_NO_THROW(p.print(stream)); + OV_ASSERT_NO_THROW(p.print(stream)); ASSERT_EQ(stream.str(), value); } TEST_F(AnyTests, PrintToVectorOfInts) { Any p = std::vector{-5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5}; std::stringstream stream; - ASSERT_NO_THROW(p.print(stream)); + OV_ASSERT_NO_THROW(p.print(stream)); ASSERT_EQ(stream.str(), std::string{"-5 -4 -3 -2 -1 0 1 2 3 4 5"}); } TEST_F(AnyTests, PrintToVectorOfUInts) { Any p = std::vector{0, 1, 2, 3, 4, 5}; std::stringstream stream; - ASSERT_NO_THROW(p.print(stream)); + OV_ASSERT_NO_THROW(p.print(stream)); ASSERT_EQ(stream.str(), std::string{"0 1 2 3 4 5"}); } @@ -608,7 +609,7 @@ TEST_F(AnyTests, PrintToVectorOfFloats) { TEST_F(AnyTests, PrintToVectorOfStrings) { Any p = std::vector{"zero", "one", "two", "three", "four", "five"}; std::stringstream stream; - ASSERT_NO_THROW(p.print(stream)); + OV_ASSERT_NO_THROW(p.print(stream)); ASSERT_EQ(stream.str(), std::string{"zero one two three four five"}); } @@ -619,7 +620,7 @@ TEST_F(AnyTests, PrintToMapOfAnys) { std::stringstream stream; { Any p = refMap; - ASSERT_NO_THROW(p.print(stream)); + OV_ASSERT_NO_THROW(p.print(stream)); ASSERT_EQ(stream.str(), std::string{"{testParamInt:4,testParamString:test}"}); } } @@ -640,7 +641,7 @@ TEST_F(AnyTests, PrintToMapOfMapsOfAnys) { std::stringstream stream; { Any p = refMap; - ASSERT_NO_THROW(p.print(stream)); + OV_ASSERT_NO_THROW(p.print(stream)); ASSERT_EQ( stream.str(), std::string{ @@ -656,8 +657,8 @@ TEST_F(AnyTests, accessUsingBaseReference) { auto p = Any::make(); ASSERT_TRUE(p.is()); ASSERT_TRUE(p.is()); - ASSERT_NO_THROW(p.as()); - ASSERT_NO_THROW(p.as()); + OV_ASSERT_NO_THROW(p.as()); + OV_ASSERT_NO_THROW(p.as()); ASSERT_EQ(typeid(Derived), p.as().type_info()); } ASSERT_EQ(1, DestructorTest::constructorCount); @@ -678,7 +679,7 @@ TEST_F(AnyTests, accessUsingWrongBaseReference) { Any p = WrongDerived{}; ASSERT_TRUE(p.is()); ASSERT_FALSE(p.is()); - ASSERT_NO_THROW(p.as()); + OV_ASSERT_NO_THROW(p.as()); ASSERT_THROW(p.as(), ov::Exception); } diff --git a/src/core/tests/bound_evaluate.cpp b/src/core/tests/bound_evaluate.cpp index 1fe8e08dd9d176..038c1bae444e9d 100644 --- a/src/core/tests/bound_evaluate.cpp +++ b/src/core/tests/bound_evaluate.cpp @@ -6,6 +6,7 @@ #include +#include "common_test_utils/test_assertions.hpp" #include "common_test_utils/type_prop.hpp" #include "openvino/op/util/framework_node.hpp" #include "openvino/opsets/opset10.hpp" @@ -66,16 +67,16 @@ TEST(BoundEvaluatorTest, no_exception_on_single_bound) { int32_t o_[1] = {INT32_MIN}; // initial value of output tensor is not needed, it's set to check whether changed TensorVector output{{et, s, o_}}; // evaluations won't be performed due to missing upper bound tensor of parameter a - ASSERT_NO_THROW(sub->evaluate_lower(output)); + OV_ASSERT_NO_THROW(sub->evaluate_lower(output)); EXPECT_EQ(o_[0], INT32_MIN); - ASSERT_NO_THROW(sub->evaluate_upper(output)); + OV_ASSERT_NO_THROW(sub->evaluate_upper(output)); EXPECT_EQ(o_[0], INT32_MIN); int32_t a_u[1] = {11}; a->get_output_tensor(0).set_upper_value(Tensor{et, s, a_u}); // now both bounds of sub node can be calculated - ASSERT_NO_THROW(sub->evaluate_lower(output)); + OV_ASSERT_NO_THROW(sub->evaluate_lower(output)); EXPECT_EQ(o_[0], 0); - ASSERT_NO_THROW(sub->evaluate_upper(output)); + OV_ASSERT_NO_THROW(sub->evaluate_upper(output)); EXPECT_EQ(o_[0], 10); } diff --git a/src/core/tests/frontend/frontend_manager.cpp b/src/core/tests/frontend/frontend_manager.cpp index 8a2b8c87849527..1e42de563ddbc6 100644 --- a/src/core/tests/frontend/frontend_manager.cpp +++ b/src/core/tests/frontend/frontend_manager.cpp @@ -8,6 +8,7 @@ #include #include "common_test_utils/file_utils.hpp" +#include "common_test_utils/test_assertions.hpp" #include "openvino/frontend/exception.hpp" #include "openvino/frontend/manager.hpp" #include "openvino/util/file_util.hpp" @@ -22,13 +23,13 @@ static std::string mock_fe_path() { TEST(FrontEndManagerTest, testAvailableFrontEnds) { FrontEndManager fem; class MockFrontEnd : public FrontEnd {}; - ASSERT_NO_THROW(fem.register_front_end("mock", []() { + OV_ASSERT_NO_THROW(fem.register_front_end("mock", []() { return std::make_shared(); })); auto frontends = fem.get_available_front_ends(); ASSERT_NE(std::find(frontends.begin(), frontends.end(), "mock"), frontends.end()); FrontEnd::Ptr fe; - ASSERT_NO_THROW(fe = fem.load_by_framework("mock")); + OV_ASSERT_NO_THROW(fe = fem.load_by_framework("mock")); FrontEndManager fem2 = std::move(fem); frontends = fem2.get_available_front_ends(); @@ -51,7 +52,7 @@ TEST(FrontEndManagerTest, testMockPluginFrontEnd) { EXPECT_NE(std::find(frontends.begin(), frontends.end(), "mock1"), frontends.end()); FrontEnd::Ptr fe; - ASSERT_NO_THROW(fe = fem.load_by_framework("mock1")); + OV_ASSERT_NO_THROW(fe = fem.load_by_framework("mock1")); EXPECT_EQ(fe->get_name(), "mock1"); } @@ -62,7 +63,7 @@ TEST(FrontEndManagerTest, testFEMDestroy_FrontEndHolder) { fem.register_front_end("mock1", mock_fe_path()); auto frontends = fem.get_available_front_ends(); EXPECT_NE(std::find(frontends.begin(), frontends.end(), "mock1"), frontends.end()); - ASSERT_NO_THROW(fe = fem.load_by_framework("mock1")); + OV_ASSERT_NO_THROW(fe = fem.load_by_framework("mock1")); } EXPECT_EQ(fe->get_name(), "mock1"); } @@ -117,7 +118,7 @@ TEST(FrontEndManagerTest, testDefaultFrontEnd) { FrontEndManager fem; fem.register_front_end("mock1", mock_fe_path()); FrontEnd::Ptr fe; - ASSERT_NO_THROW(fe = fem.load_by_model()); + OV_ASSERT_NO_THROW(fe = fem.load_by_model()); ASSERT_EQ(nullptr, fe); class MockFrontEnd : public FrontEnd {}; diff --git a/src/core/tests/ov_default_allocator_test.cpp b/src/core/tests/ov_default_allocator_test.cpp index fa76d655b37269..34f6b1c8288119 100644 --- a/src/core/tests/ov_default_allocator_test.cpp +++ b/src/core/tests/ov_default_allocator_test.cpp @@ -6,6 +6,7 @@ #include +#include "common_test_utils/test_assertions.hpp" #include "openvino/core/except.hpp" #include "openvino/runtime/allocator.hpp" @@ -14,39 +15,39 @@ using OVDefaultAllocatorTest = ::testing::Test; TEST_F(OVDefaultAllocatorTest, notThrowOnZeroSize) { ov::Allocator allocator; void* ptr = nullptr; - ASSERT_NO_THROW(ptr = allocator.allocate(0)); - ASSERT_NO_THROW(allocator.deallocate(ptr)); + OV_ASSERT_NO_THROW(ptr = allocator.allocate(0)); + OV_ASSERT_NO_THROW(allocator.deallocate(ptr)); } TEST_F(OVDefaultAllocatorTest, canAllocateAndDeallocate) { ov::Allocator allocator; void* ptr = nullptr; - ASSERT_NO_THROW(ptr = allocator.allocate(64)); - ASSERT_NO_THROW(allocator.deallocate(ptr)); + OV_ASSERT_NO_THROW(ptr = allocator.allocate(64)); + OV_ASSERT_NO_THROW(allocator.deallocate(ptr)); } TEST_F(OVDefaultAllocatorTest, alignedAllocationNotThrow) { ov::Allocator allocator; - ASSERT_NO_THROW(allocator.allocate(64, 64)); + OV_ASSERT_NO_THROW(allocator.allocate(64, 64)); } TEST_F(OVDefaultAllocatorTest, sizedAndAlignedDeallocationNotThrow) { ov::Allocator allocator; void* ptr = nullptr; - ASSERT_NO_THROW(ptr = allocator.allocate(64)); - ASSERT_NO_THROW(allocator.deallocate(ptr, 64)); - ASSERT_NO_THROW(ptr = allocator.allocate(64, 64)); - ASSERT_NO_THROW(allocator.deallocate(ptr, 64, 64)); + OV_ASSERT_NO_THROW(ptr = allocator.allocate(64)); + OV_ASSERT_NO_THROW(allocator.deallocate(ptr, 64)); + OV_ASSERT_NO_THROW(ptr = allocator.allocate(64, 64)); + OV_ASSERT_NO_THROW(allocator.deallocate(ptr, 64, 64)); } TEST_F(OVDefaultAllocatorTest, defaultAllocatorsAreEqual) { ov::Allocator allocator0, allocator1; ASSERT_TRUE(allocator0 == allocator1); void* ptr = nullptr; - ASSERT_NO_THROW(ptr = allocator0.allocate(64)); - ASSERT_NO_THROW(allocator1.deallocate(ptr)); - ASSERT_NO_THROW(ptr = allocator1.allocate(64)); - ASSERT_NO_THROW(allocator0.deallocate(ptr)); + OV_ASSERT_NO_THROW(ptr = allocator0.allocate(64)); + OV_ASSERT_NO_THROW(allocator1.deallocate(ptr)); + OV_ASSERT_NO_THROW(ptr = allocator1.allocate(64)); + OV_ASSERT_NO_THROW(allocator0.deallocate(ptr)); } TEST_F(OVDefaultAllocatorTest, canAllocate10KMemory) { diff --git a/src/core/tests/ov_tensor_test.cpp b/src/core/tests/ov_tensor_test.cpp index 76ed5caaecae44..a6832f2bb5aff9 100644 --- a/src/core/tests/ov_tensor_test.cpp +++ b/src/core/tests/ov_tensor_test.cpp @@ -12,6 +12,7 @@ #include #include +#include "common_test_utils/test_assertions.hpp" #include "openvino/core/except.hpp" #include "openvino/core/partial_shape.hpp" #include "openvino/core/type/element_type_traits.hpp" @@ -391,7 +392,7 @@ TEST_F(OVTensorTest, canSetShape) { const void* orig_data = t.data(); ASSERT_EQ(t.get_shape(), origShape); - ASSERT_NO_THROW(t.set_shape(newShape)); + OV_ASSERT_NO_THROW(t.set_shape(newShape)); ASSERT_EQ(newShape, t.get_shape()); ASSERT_EQ(byteStrides(ov::row_major_strides(newShape), t.get_element_type()), t.get_strides()); ASSERT_NE(orig_data, t.data()); @@ -399,7 +400,7 @@ TEST_F(OVTensorTest, canSetShape) { // check that set_shape for copy changes original Tensor { ov::Tensor t2 = t; - ASSERT_NO_THROW(t2.set_shape(newShape2)); + OV_ASSERT_NO_THROW(t2.set_shape(newShape2)); ASSERT_EQ(newShape2, t.get_shape()); ASSERT_EQ(t2.get_shape(), t.get_shape()); ASSERT_EQ(t2.data(), t.data()); @@ -421,7 +422,7 @@ TEST_F(OVTensorTest, canSetShapeStringTensor) { const void* orig_data = t.data(); ASSERT_EQ(t.get_shape(), origShape); - ASSERT_NO_THROW(t.set_shape(newShape)); + OV_ASSERT_NO_THROW(t.set_shape(newShape)); ASSERT_EQ(newShape, t.get_shape()); ASSERT_EQ(byteStrides(ov::row_major_strides(newShape), t.get_element_type()), t.get_strides()); ASSERT_NE(orig_data, t.data()); @@ -429,7 +430,7 @@ TEST_F(OVTensorTest, canSetShapeStringTensor) { // check that setShape for copy changes original Tensor { ov::Tensor t2 = t; - ASSERT_NO_THROW(t2.set_shape(newShape2)); + OV_ASSERT_NO_THROW(t2.set_shape(newShape2)); ASSERT_EQ(newShape2, t2.get_shape()); ASSERT_EQ(t2.get_shape(), t.get_shape()); ASSERT_EQ(t2.data(), t.data()); @@ -438,7 +439,7 @@ TEST_F(OVTensorTest, canSetShapeStringTensor) { // set_shape for smaller memory - does not perform reallocation { - ASSERT_NO_THROW(t.set_shape(origShape)); + OV_ASSERT_NO_THROW(t.set_shape(origShape)); ASSERT_EQ(origShape, t.get_shape()); ASSERT_EQ(orig_data, t.data()); } @@ -465,7 +466,7 @@ TEST_F(OVTensorTest, canSetShapeOfSmallerSizeOnPreallocatedMemory) { ov::Tensor t{ov::element::f32, {4, 5, 6}, data}; const ov::Shape newShape({1, 2, 3}); - ASSERT_NO_THROW(t.set_shape(newShape)); + OV_ASSERT_NO_THROW(t.set_shape(newShape)); } TEST_F(OVTensorTest, canSetShapeOfSmallerSizeOnPreallocatedMemoryStringTensor) { @@ -473,7 +474,7 @@ TEST_F(OVTensorTest, canSetShapeOfSmallerSizeOnPreallocatedMemoryStringTensor) { ov::Tensor t{ov::element::string, {4, 5, 6}, data}; const ov::Shape newShape({1, 2, 3}); - ASSERT_NO_THROW(t.set_shape(newShape)); + OV_ASSERT_NO_THROW(t.set_shape(newShape)); } TEST_F(OVTensorTest, canSetShapeOfSameSizeOnPreallocatedMemory) { @@ -481,7 +482,7 @@ TEST_F(OVTensorTest, canSetShapeOfSameSizeOnPreallocatedMemory) { ov::Tensor t{ov::element::f32, {4, 5, 6}, data}; const ov::Shape newShape({4, 5, 6}); - ASSERT_NO_THROW(t.set_shape(newShape)); + OV_ASSERT_NO_THROW(t.set_shape(newShape)); } TEST_F(OVTensorTest, canSetShapeOfSameSizeOnPreallocatedMemoryStringTensor) { @@ -489,7 +490,7 @@ TEST_F(OVTensorTest, canSetShapeOfSameSizeOnPreallocatedMemoryStringTensor) { ov::Tensor t{ov::element::string, {4, 5, 6}, data}; const ov::Shape newShape({4, 5, 6}); - ASSERT_NO_THROW(t.set_shape(newShape)); + OV_ASSERT_NO_THROW(t.set_shape(newShape)); } TEST_F(OVTensorTest, canSetShapeOfOriginalSizeAfterDecreasingOnPreallocatedMemory) { @@ -498,8 +499,8 @@ TEST_F(OVTensorTest, canSetShapeOfOriginalSizeAfterDecreasingOnPreallocatedMemor const ov::Shape smallerShape({1, 2, 3}); const ov::Shape originalShape({4, 5, 6}); - ASSERT_NO_THROW(t.set_shape(smallerShape)); - ASSERT_NO_THROW(t.set_shape(originalShape)); + OV_ASSERT_NO_THROW(t.set_shape(smallerShape)); + OV_ASSERT_NO_THROW(t.set_shape(originalShape)); } TEST_F(OVTensorTest, canSetShapeOfOriginalSizeAfterDecreasingOnPreallocatedMemoryStringTensor) { @@ -508,8 +509,8 @@ TEST_F(OVTensorTest, canSetShapeOfOriginalSizeAfterDecreasingOnPreallocatedMemor const ov::Shape smallerShape({1, 2, 3}); const ov::Shape originalShape({4, 5, 6}); - ASSERT_NO_THROW(t.set_shape(smallerShape)); - ASSERT_NO_THROW(t.set_shape(originalShape)); + OV_ASSERT_NO_THROW(t.set_shape(smallerShape)); + OV_ASSERT_NO_THROW(t.set_shape(originalShape)); } TEST_F(OVTensorTest, canSetShapeOfOriginalSizeAfterDecreasing) { @@ -517,9 +518,9 @@ TEST_F(OVTensorTest, canSetShapeOfOriginalSizeAfterDecreasing) { ov::Tensor t{ov::element::f32, shape}; void* data = t.data(); - ASSERT_NO_THROW(t.set_shape(small_shape)); + OV_ASSERT_NO_THROW(t.set_shape(small_shape)); EXPECT_EQ(data, t.data()); - ASSERT_NO_THROW(t.set_shape(shape)); + OV_ASSERT_NO_THROW(t.set_shape(shape)); EXPECT_EQ(data, t.data()); } @@ -528,9 +529,9 @@ TEST_F(OVTensorTest, canSetShapeOfOriginalSizeAfterDecreasingStringTensor) { ov::Tensor t{ov::element::string, shape}; void* data = t.data(); - ASSERT_NO_THROW(t.set_shape(small_shape)); + OV_ASSERT_NO_THROW(t.set_shape(small_shape)); EXPECT_EQ(data, t.data()); - ASSERT_NO_THROW(t.set_shape(shape)); + OV_ASSERT_NO_THROW(t.set_shape(shape)); EXPECT_EQ(data, t.data()); } @@ -541,7 +542,7 @@ TEST_F(OVTensorTest, canChangeShapeOnStridedTensor) { const ov::Shape correct_shape({1, 1, 2}); ASSERT_THROW(t.set_shape(incorrect_shape), ov::Exception); - ASSERT_NO_THROW(t.set_shape(correct_shape)); + OV_ASSERT_NO_THROW(t.set_shape(correct_shape)); } TEST_F(OVTensorTest, canChangeShapeOnStridedTensorStringTensor) { @@ -551,7 +552,7 @@ TEST_F(OVTensorTest, canChangeShapeOnStridedTensorStringTensor) { const ov::Shape correct_shape({1, 1, 2}); ASSERT_THROW(t.set_shape(incorrect_shape), ov::Exception); - ASSERT_NO_THROW(t.set_shape(correct_shape)); + OV_ASSERT_NO_THROW(t.set_shape(correct_shape)); } TEST_F(OVTensorTest, makeRangeRoiTensor) { @@ -649,7 +650,7 @@ TEST_F(OVTensorTest, tensorInt4DataAccess) { ASSERT_THROW((ov::Tensor{t, {0, 1, 2, 0}, {1, 5, 4, 3}}), ov::Exception); ASSERT_THROW(t.get_strides(), ov::Exception); ASSERT_THROW(t.data(), ov::Exception); - ASSERT_NO_THROW(t.data()); + OV_ASSERT_NO_THROW(t.data()); } TEST_F(OVTensorTest, makeRangeRoiBlobWrongSize) { diff --git a/src/core/tests/pass/constant_folding.cpp b/src/core/tests/pass/constant_folding.cpp index 0b99b57c9fb4f7..84fc18794da452 100644 --- a/src/core/tests/pass/constant_folding.cpp +++ b/src/core/tests/pass/constant_folding.cpp @@ -576,7 +576,7 @@ TEST(constant_folding, constant_unary_binary) { "equal_unsigned_shorts"); pass::Manager pass_manager; - ASSERT_NO_THROW(pass_manager.run_passes(func_error)); + OV_ASSERT_NO_THROW(pass_manager.run_passes(func_error)); } template diff --git a/src/core/tests/pass/serialization/rt_info_serialization.cpp b/src/core/tests/pass/serialization/rt_info_serialization.cpp index 3a0a6e8241ed72..a0131853704e3a 100644 --- a/src/core/tests/pass/serialization/rt_info_serialization.cpp +++ b/src/core/tests/pass/serialization/rt_info_serialization.cpp @@ -100,7 +100,7 @@ TEST_F(RTInfoSerializationTest, all_attributes_latest) { const std::string& dkey = ov::Decompression::get_type_info_static(); ASSERT_TRUE(info.count(dkey)); - ASSERT_NO_THROW(info.at(dkey).as()); + OV_ASSERT_NO_THROW(info.at(dkey).as()); }; auto add = f->get_results()[0]->get_input_node_ptr(0); diff --git a/src/core/tests/preprocess.cpp b/src/core/tests/preprocess.cpp index d7357ad598f925..0cec67c3031288 100644 --- a/src/core/tests/preprocess.cpp +++ b/src/core/tests/preprocess.cpp @@ -1392,7 +1392,7 @@ TEST(pre_post_process, preprocess_reverse_channels_no_shape_inference) { PrePostProcessor p(f); p.input(0).tensor().set_layout("NCHW"); p.input(0).preprocess().reverse_channels(); - ASSERT_NO_THROW(p.build()); + OV_ASSERT_NO_THROW(p.build()); // Ensure that {?,3,?,?} is not transformed to {?,?,?,?} EXPECT_EQ(out_shape, f->output(0).get_partial_shape()); } diff --git a/src/core/tests/replace_node.cpp b/src/core/tests/replace_node.cpp index 9e50fec21039a2..1ca71174ac1698 100644 --- a/src/core/tests/replace_node.cpp +++ b/src/core/tests/replace_node.cpp @@ -4,6 +4,7 @@ #include +#include "common_test_utils/test_assertions.hpp" #include "common_test_utils/type_prop.hpp" #include "openvino/core/model.hpp" #include "openvino/op/add.hpp" @@ -136,7 +137,7 @@ TEST(replace_node, replacement_with_direct_parent_node) { auto child_2 = std::make_shared(param); auto model = std::make_shared(OutputVector{child_1, child_2}, ParameterVector{param}); - ASSERT_NO_THROW(model->validate_nodes_and_infer_types()); + OV_ASSERT_NO_THROW(model->validate_nodes_and_infer_types()); auto relu = std::make_shared(param); relu->output(0).get_tensor().set_names({"c", "d"}); @@ -144,7 +145,7 @@ TEST(replace_node, replacement_with_direct_parent_node) { // This check validates that the model is consistent and contains no loops. // The topological sorting throws an exception in case of a loop in the graph. - ASSERT_NO_THROW(model->validate_nodes_and_infer_types()); + OV_ASSERT_NO_THROW(model->validate_nodes_and_infer_types()); int relu_cnt = 0; for (const auto& op : model->get_ordered_ops()) { diff --git a/src/core/tests/type_prop/framework_node.cpp b/src/core/tests/type_prop/framework_node.cpp index f8c9b5eeafe488..f968536ac4a5a7 100644 --- a/src/core/tests/type_prop/framework_node.cpp +++ b/src/core/tests/type_prop/framework_node.cpp @@ -6,6 +6,7 @@ #include +#include "common_test_utils/test_assertions.hpp" #include "common_test_utils/type_prop.hpp" #include "openvino/opsets/opset8.hpp" @@ -20,28 +21,28 @@ TEST(type_prop, framework_node) { param->set_partial_shape(ov::PartialShape{ov::Dimension::dynamic(), 64}); param->validate_and_infer_types(); - ASSERT_NO_THROW(f_node->validate_and_infer_types()); + OV_ASSERT_NO_THROW(f_node->validate_and_infer_types()); ASSERT_EQ(f_node->get_output_partial_shape(0), ov::PartialShape::dynamic()); // Set dynamic shape param->set_partial_shape(ov::PartialShape::dynamic(2)); param->validate_and_infer_types(); - ASSERT_NO_THROW(f_node->validate_and_infer_types()); + OV_ASSERT_NO_THROW(f_node->validate_and_infer_types()); ASSERT_EQ(f_node->get_output_partial_shape(0), ov::PartialShape::dynamic()); // Set fully dynamic shape param->set_partial_shape(ov::PartialShape::dynamic()); param->validate_and_infer_types(); - ASSERT_NO_THROW(f_node->validate_and_infer_types()); + OV_ASSERT_NO_THROW(f_node->validate_and_infer_types()); ASSERT_EQ(f_node->get_output_partial_shape(0), ov::PartialShape::dynamic()); // Set original static shape param->set_partial_shape(ov::Shape{1, 64}); param->validate_and_infer_types(); - ASSERT_NO_THROW(f_node->validate_and_infer_types()); + OV_ASSERT_NO_THROW(f_node->validate_and_infer_types()); ASSERT_EQ(f_node->get_output_partial_shape(0), ov::PartialShape({1, 64})); // Set different static shape @@ -59,28 +60,28 @@ TEST(type_prop, dynamic_framework_node_with_dynamic_input) { param->set_partial_shape(ov::PartialShape{ov::Dimension::dynamic(), 64}); param->validate_and_infer_types(); - ASSERT_NO_THROW(f_node->validate_and_infer_types()); + OV_ASSERT_NO_THROW(f_node->validate_and_infer_types()); ASSERT_EQ(f_node->get_output_partial_shape(0), ov::PartialShape::dynamic()); // Set dynamic shape with static rank param->set_partial_shape(ov::PartialShape::dynamic(2)); param->validate_and_infer_types(); - ASSERT_NO_THROW(f_node->validate_and_infer_types()); + OV_ASSERT_NO_THROW(f_node->validate_and_infer_types()); ASSERT_EQ(f_node->get_output_partial_shape(0), ov::PartialShape::dynamic()); // Set static shape param->set_partial_shape(ov::PartialShape({1, 64})); param->validate_and_infer_types(); - ASSERT_NO_THROW(f_node->validate_and_infer_types()); + OV_ASSERT_NO_THROW(f_node->validate_and_infer_types()); ASSERT_EQ(f_node->get_output_partial_shape(0), ov::PartialShape::dynamic()); // Set static type param->set_element_type(ov::element::f32); param->validate_and_infer_types(); - ASSERT_NO_THROW(f_node->validate_and_infer_types()); + OV_ASSERT_NO_THROW(f_node->validate_and_infer_types()); ASSERT_EQ(f_node->get_output_element_type(0), ov::element::dynamic); } @@ -92,13 +93,13 @@ TEST(type_prop, dynamic_framework_node_with_static_rank) { param->set_partial_shape(ov::PartialShape{ov::Dimension::dynamic(), 64}); param->validate_and_infer_types(); - ASSERT_NO_THROW(f_node->validate_and_infer_types()); + OV_ASSERT_NO_THROW(f_node->validate_and_infer_types()); ASSERT_EQ(f_node->get_output_partial_shape(0), ov::PartialShape::dynamic()); // Set static shape param->set_partial_shape(ov::PartialShape({1, 64})); param->validate_and_infer_types(); - ASSERT_NO_THROW(f_node->validate_and_infer_types()); + OV_ASSERT_NO_THROW(f_node->validate_and_infer_types()); ASSERT_EQ(f_node->get_output_partial_shape(0), ov::PartialShape::dynamic()); } diff --git a/src/core/tests/type_prop/normalize_l2.cpp b/src/core/tests/type_prop/normalize_l2.cpp index 37a97d23906810..f70ea507875902 100644 --- a/src/core/tests/type_prop/normalize_l2.cpp +++ b/src/core/tests/type_prop/normalize_l2.cpp @@ -4,6 +4,7 @@ #include "openvino/op/normalize_l2.hpp" +#include "common_test_utils/test_assertions.hpp" #include "common_test_utils/type_prop.hpp" #include "openvino/op/constant.hpp" @@ -40,7 +41,7 @@ TEST(type_prop, normalize_l2_axes_input_not_constant) { auto axes = make_shared(element::u64, Shape{1}); float eps{1e-6f}; auto eps_mode = op::EpsMode::ADD; - ASSERT_NO_THROW(auto op = make_shared(data, axes, eps, eps_mode)); + OV_ASSERT_NO_THROW(auto op = make_shared(data, axes, eps, eps_mode)); } TEST(type_prop, normalize_l2_invalid_axes_rank) { diff --git a/src/frontends/ir/tests/frontend_test.hpp b/src/frontends/ir/tests/frontend_test.hpp index f53d76f9362533..7b131461f04a85 100644 --- a/src/frontends/ir/tests/frontend_test.hpp +++ b/src/frontends/ir/tests/frontend_test.hpp @@ -8,6 +8,7 @@ #include "common_test_utils/common_utils.hpp" #include "common_test_utils/graph_comparator.hpp" +#include "common_test_utils/test_assertions.hpp" #include "openvino/frontend/manager.hpp" #include "openvino/openvino.hpp" diff --git a/src/frontends/ir/tests/frontend_test_basic.cpp b/src/frontends/ir/tests/frontend_test_basic.cpp index aab8fe610d49f3..5780168ce80dba 100644 --- a/src/frontends/ir/tests/frontend_test_basic.cpp +++ b/src/frontends/ir/tests/frontend_test_basic.cpp @@ -63,10 +63,10 @@ TEST_F(IRFrontendTests, elementary_model_reading_v11) { ov::RTMap rtInfo; uint64_t version = 0; - ASSERT_NO_THROW(model = getWithIRFrontend(testModelV11)); + OV_ASSERT_NO_THROW(model = getWithIRFrontend(testModelV11)); ASSERT_TRUE(!!model); - ASSERT_NO_THROW(rtInfo = model->get_rt_info()); - ASSERT_NO_THROW(version = rtInfo["version"].as()); + OV_ASSERT_NO_THROW(rtInfo = model->get_rt_info()); + OV_ASSERT_NO_THROW(version = rtInfo["version"].as()); ASSERT_EQ(11, version); std::shared_ptr modelRef; @@ -124,10 +124,10 @@ TEST_F(IRFrontendTests, elementary_model_reading_v10) { ov::RTMap rtInfoV10; uint64_t version = 0; - ASSERT_NO_THROW(modelv10 = getWithIRFrontend(testModelV10)); + OV_ASSERT_NO_THROW(modelv10 = getWithIRFrontend(testModelV10)); ASSERT_TRUE(!!modelv10); - ASSERT_NO_THROW(rtInfoV10 = modelv10->get_rt_info()); - ASSERT_NO_THROW(version = rtInfoV10["version"].as()); + OV_ASSERT_NO_THROW(rtInfoV10 = modelv10->get_rt_info()); + OV_ASSERT_NO_THROW(version = rtInfoV10["version"].as()); ASSERT_EQ(10, version); std::shared_ptr modelRef; @@ -329,7 +329,7 @@ TEST_P(IRFrontendMMapTests, model_with_weights_reading_from_disk) { ov::Core new_core; new_core.set_property(ov::enable_mmap(GetParam())); - ASSERT_NO_THROW(model = new_core.read_model(xmlFileName, binFileName)); + OV_ASSERT_NO_THROW(model = new_core.read_model(xmlFileName, binFileName)); ASSERT_TRUE(!!model); std::shared_ptr modelRef; @@ -395,7 +395,7 @@ TEST_F(IRFrontendTests, model_without_weights_reading_from_disk) { std::shared_ptr model; - ASSERT_NO_THROW(model = core.read_model(xmlFileName)); + OV_ASSERT_NO_THROW(model = core.read_model(xmlFileName)); ASSERT_TRUE(!!model); std::shared_ptr modelRef; @@ -912,7 +912,7 @@ TEST_F(IRFrontendTests, not_opset1) { std::shared_ptr model; - ASSERT_NO_THROW(model = getWithIRFrontend(testModel)); + OV_ASSERT_NO_THROW(model = getWithIRFrontend(testModel)); ASSERT_TRUE(!!model); std::shared_ptr modelRef; @@ -1066,7 +1066,7 @@ TEST_F(IRFrontendTests, extension_proposal_network) { createTemporalModelFile(xmlModel, buffer); std::shared_ptr model; - ASSERT_NO_THROW(model = core.read_model(xmlFileName, binFileName)); + OV_ASSERT_NO_THROW(model = core.read_model(xmlFileName, binFileName)); ASSERT_TRUE(!!model); for (auto op : model->get_ordered_ops()) { @@ -1144,7 +1144,7 @@ TEST_F(IRFrontendTests, model_with_tensor_names_with_spaces) { std::shared_ptr model; - ASSERT_NO_THROW(model = core.read_model(testModel, ov::Tensor())); + OV_ASSERT_NO_THROW(model = core.read_model(testModel, ov::Tensor())); ASSERT_TRUE(!!model); auto outputs = model->outputs(); @@ -1241,7 +1241,7 @@ TEST_F(IRFrontendTests, model_with_tensor_names_add_output) { std::shared_ptr model; std::string tensor_name = "output add"; - ASSERT_NO_THROW(model = core.read_model(testModel, ov::Tensor())); + OV_ASSERT_NO_THROW(model = core.read_model(testModel, ov::Tensor())); ASSERT_TRUE(!!model); model->add_output(tensor_name); @@ -1307,7 +1307,7 @@ TEST_F(IRFrontendTests, name_with_comma) { std::shared_ptr model; std::string tensor_name = "relu,t"; - ASSERT_NO_THROW(model = core.read_model(testModel, ov::Tensor())); + OV_ASSERT_NO_THROW(model = core.read_model(testModel, ov::Tensor())); ASSERT_TRUE(!!model); model->add_output(tensor_name); @@ -1399,7 +1399,7 @@ TEST_F(IRFrontendTests, DetectionOutput) { std::shared_ptr model; - ASSERT_NO_THROW(model = getWithIRFrontend(testModel)); + OV_ASSERT_NO_THROW(model = getWithIRFrontend(testModel)); ASSERT_TRUE(!!model); } diff --git a/src/frontends/ir/tests/frontend_test_with_extensions.cpp b/src/frontends/ir/tests/frontend_test_with_extensions.cpp index eb14ee23f5b9dc..d2e54274718ba6 100644 --- a/src/frontends/ir/tests/frontend_test_with_extensions.cpp +++ b/src/frontends/ir/tests/frontend_test_with_extensions.cpp @@ -70,7 +70,7 @@ TEST_F(IRFrontendExtensionTests, custom_ops_test_with_framework_node_extension) core.add_extension(extension); - ASSERT_NO_THROW(model = core.read_model(customOpsModel, ov::Tensor())); + OV_ASSERT_NO_THROW(model = core.read_model(customOpsModel, ov::Tensor())); ASSERT_TRUE(!!model); } diff --git a/src/frontends/ir/tests/pre_processing_deserialization.cpp b/src/frontends/ir/tests/pre_processing_deserialization.cpp index ece68ba76a12a5..f97cf078b53951 100644 --- a/src/frontends/ir/tests/pre_processing_deserialization.cpp +++ b/src/frontends/ir/tests/pre_processing_deserialization.cpp @@ -68,6 +68,6 @@ TEST_F(IRFrontendTestsPreProcessing, pre_processing) { std::shared_ptr model; - ASSERT_NO_THROW(model = core.read_model(xmlFileName, binFileName)); + OV_ASSERT_NO_THROW(model = core.read_model(xmlFileName, binFileName)); ASSERT_TRUE(!!model); } diff --git a/src/frontends/ir/tests/tensor_iterator_deserialization.cpp b/src/frontends/ir/tests/tensor_iterator_deserialization.cpp index b5f465cff86fbd..a18a34b68a8052 100644 --- a/src/frontends/ir/tests/tensor_iterator_deserialization.cpp +++ b/src/frontends/ir/tests/tensor_iterator_deserialization.cpp @@ -97,7 +97,7 @@ TEST_F(IRFrontendTestsTensorIterator, tensor_iterator_merged_input) { std::shared_ptr model; - ASSERT_NO_THROW(model = core.read_model(testModel, ov::Tensor())); + OV_ASSERT_NO_THROW(model = core.read_model(testModel, ov::Tensor())); ASSERT_TRUE(!!model); std::shared_ptr modelRef; @@ -215,7 +215,7 @@ TEST_F(IRFrontendTestsTensorIterator, tensor_iterator_slised_input) { std::shared_ptr model; - ASSERT_NO_THROW(model = core.read_model(testModel, ov::Tensor())); + OV_ASSERT_NO_THROW(model = core.read_model(testModel, ov::Tensor())); ASSERT_TRUE(!!model); std::shared_ptr modelRef; @@ -423,7 +423,7 @@ TEST_F(IRFrontendTestsTensorIterator, loop1) { createTemporalModelFile(xmlModel, buffer); std::shared_ptr model; - ASSERT_NO_THROW(model = core.read_model(xmlFileName, binFileName)); + OV_ASSERT_NO_THROW(model = core.read_model(xmlFileName, binFileName)); ASSERT_TRUE(!!model); } @@ -627,7 +627,7 @@ TEST_F(IRFrontendTestsTensorIterator, loop2) { createTemporalModelFile(xmlModel, buffer); std::shared_ptr model; - ASSERT_NO_THROW(model = core.read_model(xmlFileName, binFileName)); + OV_ASSERT_NO_THROW(model = core.read_model(xmlFileName, binFileName)); ASSERT_TRUE(!!model); } @@ -831,7 +831,7 @@ TEST_F(IRFrontendTestsTensorIterator, loop_external_port1_is_not_connected) { createTemporalModelFile(xmlModel, buffer); std::shared_ptr model; - ASSERT_NO_THROW(model = core.read_model(xmlFileName, binFileName)); + OV_ASSERT_NO_THROW(model = core.read_model(xmlFileName, binFileName)); ASSERT_TRUE(!!model); } @@ -1150,7 +1150,7 @@ TEST_F(IRFrontendTestsTensorIterator, tensor_iterator_resnet_opset4) { createTemporalModelFile(xmlModel, buffer); std::shared_ptr model; - ASSERT_NO_THROW(model = core.read_model(xmlFileName, binFileName)); + OV_ASSERT_NO_THROW(model = core.read_model(xmlFileName, binFileName)); ASSERT_TRUE(!!model); } @@ -1441,7 +1441,7 @@ TEST_F(IRFrontendTestsTensorIterator, tensor_iterator_negative_stride_opset4) { createTemporalModelFile(xmlModel, buffer); std::shared_ptr model; - ASSERT_NO_THROW(model = core.read_model(xmlFileName, binFileName)); + OV_ASSERT_NO_THROW(model = core.read_model(xmlFileName, binFileName)); ASSERT_TRUE(!!model); } @@ -1691,6 +1691,6 @@ TEST_F(IRFrontendTestsTensorIterator, test1) { createTemporalModelFile(xmlModel, buffer); std::shared_ptr model; - ASSERT_NO_THROW(model = core.read_model(xmlFileName, binFileName)); + OV_ASSERT_NO_THROW(model = core.read_model(xmlFileName, binFileName)); ASSERT_TRUE(!!model); } diff --git a/src/frontends/onnx/tests/conversion.cpp b/src/frontends/onnx/tests/conversion.cpp index 133ac3a18c8817..c837fa394ce431 100644 --- a/src/frontends/onnx/tests/conversion.cpp +++ b/src/frontends/onnx/tests/conversion.cpp @@ -66,7 +66,7 @@ TEST(ONNXConversionExtensionTest, custom_op_with_custom_domain) { }); std::shared_ptr model; - ASSERT_NO_THROW(model = onnx::tests::convert_model("missing_op_domain.onnx", ext)); + OV_ASSERT_NO_THROW(model = onnx::tests::convert_model("missing_op_domain.onnx", ext)); for (const auto& op : model->get_ops()) { if (const auto& add = std::dynamic_pointer_cast(op)) { diff --git a/src/frontends/onnx/tests/convert_partially_tests.cpp b/src/frontends/onnx/tests/convert_partially_tests.cpp index c52353a4f95b6d..290bb4d7298a9c 100644 --- a/src/frontends/onnx/tests/convert_partially_tests.cpp +++ b/src/frontends/onnx/tests/convert_partially_tests.cpp @@ -33,7 +33,7 @@ std::shared_ptr get_framework_node_with_out_name(co TEST(ONNXFeConvertPartially, insert_framework_node_if_unsupported) { std::shared_ptr model; - ASSERT_NO_THROW(model = convert_partially("unsupported_ops/add_unsupported.onnx")); + OV_ASSERT_NO_THROW(model = convert_partially("unsupported_ops/add_unsupported.onnx")); ASSERT_TRUE(model); EXPECT_EQ(count_ops_of_type(model), 1); @@ -45,7 +45,7 @@ TEST(ONNXFeConvertPartially, insert_framework_node_if_unsupported) { TEST(ONNXFeConvertPartially, insert_more_framework_nodes_if_unsupported) { std::shared_ptr model; - ASSERT_NO_THROW(model = convert_partially("unsupported_ops/two_unsupported_nodes.onnx")); + OV_ASSERT_NO_THROW(model = convert_partially("unsupported_ops/two_unsupported_nodes.onnx")); ASSERT_TRUE(model); EXPECT_EQ(count_ops_of_type(model), 1); @@ -61,7 +61,7 @@ TEST(ONNXFeConvertPartially, insert_more_framework_nodes_if_unsupported) { // validation error - onnx/instance_norm_bad_scale_type.onnx TEST(ONNXFeConvertPartially, insert_framework_node_if_onnx_validation_exception) { std::shared_ptr model; - ASSERT_NO_THROW(model = convert_partially("instance_norm_bad_scale_type.onnx")); + OV_ASSERT_NO_THROW(model = convert_partially("instance_norm_bad_scale_type.onnx")); ASSERT_TRUE(model); const auto incorrect_instance_norm = get_framework_node_with_out_name(model, "y"); @@ -71,7 +71,7 @@ TEST(ONNXFeConvertPartially, insert_framework_node_if_onnx_validation_exception) TEST(ONNXFeConvertPartially, insert_framework_node_if_other_translation_exception) { std::shared_ptr model; - ASSERT_NO_THROW(model = convert_partially("depth_to_space_bad_mode.onnx")); + OV_ASSERT_NO_THROW(model = convert_partially("depth_to_space_bad_mode.onnx")); ASSERT_TRUE(model); const auto incorrect_dts = get_framework_node_with_out_name(model, "B"); @@ -81,7 +81,7 @@ TEST(ONNXFeConvertPartially, insert_framework_node_if_other_translation_exceptio TEST(ONNXFeConvertPartially, insert_framework_nodes_if_both_unsupported_and_other_translation_exception) { std::shared_ptr model; - ASSERT_NO_THROW(model = convert_partially("unsupported_ops/unsupported_add_and_incorrect_dts.onnx")); + OV_ASSERT_NO_THROW(model = convert_partially("unsupported_ops/unsupported_add_and_incorrect_dts.onnx")); ASSERT_TRUE(model); EXPECT_EQ(count_ops_of_type(model), 1); diff --git a/src/frontends/onnx/tests/load_from.cpp b/src/frontends/onnx/tests/load_from.cpp index 660ff3d96df7b7..617f4a917567d5 100644 --- a/src/frontends/onnx/tests/load_from.cpp +++ b/src/frontends/onnx/tests/load_from.cpp @@ -32,7 +32,7 @@ TEST_P(FrontEndLoadFromTest, testLoadFromStreamAndPassPath) { std::istream* is = &ifs; std::vector frontends; FrontEnd::Ptr fe; - ASSERT_NO_THROW(frontends = m_fem.get_available_front_ends()); + OV_ASSERT_NO_THROW(frontends = m_fem.get_available_front_ends()); ASSERT_NO_THROW(m_frontEnd = m_fem.load_by_model(is)) << "Could not create the ONNX FE using the istream object"; ASSERT_NE(m_frontEnd, nullptr); diff --git a/src/frontends/onnx/tests/onnx_editor_topological_sort.cpp b/src/frontends/onnx/tests/onnx_editor_topological_sort.cpp index e393095bc133a4..2e74749d433283 100644 --- a/src/frontends/onnx/tests/onnx_editor_topological_sort.cpp +++ b/src/frontends/onnx/tests/onnx_editor_topological_sort.cpp @@ -19,26 +19,26 @@ static std::string s_manifest = onnx_backend_manifest("${MANIFEST}"); OPENVINO_TEST(onnx_editor, topological_sort_two_nodes_swap) { FrontEnd::Ptr front_end; auto input_model = load_model("model_editor/topological_sort/two_nodes_swap.onnx", &front_end); - ASSERT_NO_THROW(front_end->convert(input_model)); + OV_ASSERT_NO_THROW(front_end->convert(input_model)); } OPENVINO_TEST(onnx_editor, topological_sort_completely_unsorted) { FrontEnd::Ptr front_end; auto input_model = load_model("model_editor/topological_sort/completely_unsorted.onnx", &front_end); - ASSERT_NO_THROW(front_end->convert(input_model)); + OV_ASSERT_NO_THROW(front_end->convert(input_model)); } OPENVINO_TEST(onnx_editor, topological_sort_completely_unsorted_2) { FrontEnd::Ptr front_end; auto input_model = load_model("model_editor/topological_sort/completely_unsorted_2.onnx", &front_end); - ASSERT_NO_THROW(front_end->convert(input_model)); + OV_ASSERT_NO_THROW(front_end->convert(input_model)); } #if defined(OPENVINO_ENABLE_UNICODE_PATH_SUPPORT) && defined(_WIN32) OPENVINO_TEST(onnx_editor, topological_sort_completely_unsorted_2_wstring) { FrontEnd::Ptr front_end; auto input_model = load_model(L"model_editor/topological_sort/completely_unsorted_2.onnx", &front_end); - ASSERT_NO_THROW(front_end->convert(input_model)); + OV_ASSERT_NO_THROW(front_end->convert(input_model)); } #endif @@ -47,7 +47,7 @@ OPENVINO_TEST(onnx_editor, topological_sort_constant_node_in_the_graph) { FrontEnd::Ptr front_end; auto input_model = load_model("model_editor/topological_sort/completely_unsorted_2.onnx", &front_end); - ASSERT_NO_THROW(front_end->convert(input_model)); + OV_ASSERT_NO_THROW(front_end->convert(input_model)); } OPENVINO_TEST(onnx_editor, topological_sort_multioutput_node) { @@ -55,7 +55,7 @@ OPENVINO_TEST(onnx_editor, topological_sort_multioutput_node) { FrontEnd::Ptr front_end; auto input_model = load_model("model_editor/topological_sort/completely_unsorted_2.onnx", &front_end); - ASSERT_NO_THROW(front_end->convert(input_model)); + OV_ASSERT_NO_THROW(front_end->convert(input_model)); } /* diff --git a/src/frontends/onnx/tests/onnx_ops_registration.cpp b/src/frontends/onnx/tests/onnx_ops_registration.cpp index 22a2bebb0562ae..32468e8ccb85cc 100644 --- a/src/frontends/onnx/tests/onnx_ops_registration.cpp +++ b/src/frontends/onnx/tests/onnx_ops_registration.cpp @@ -24,7 +24,7 @@ OPENVINO_TEST(ops_registration, check_importing_abs_in_all_opset_versions) { for (int version = 1; version <= ONNX_OPSET_VERSION; ++version) { const auto changed_opset_model = change_opset_version(editor.model_string(), {version}); std::stringstream model_stream{changed_opset_model}; - ASSERT_NO_THROW(ONNXModelEditor(model_stream).get_function()); + OV_ASSERT_NO_THROW(ONNXModelEditor(model_stream).get_function()); } } diff --git a/src/frontends/onnx/tests/onnx_utils.hpp b/src/frontends/onnx/tests/onnx_utils.hpp index 9e9d56f36fbc8d..1362bcea749e24 100644 --- a/src/frontends/onnx/tests/onnx_utils.hpp +++ b/src/frontends/onnx/tests/onnx_utils.hpp @@ -9,6 +9,7 @@ #include #include "common_test_utils/test_constants.hpp" +#include "common_test_utils/test_assertions.hpp" #include "openvino/core/model.hpp" #include "openvino/frontend/extension.hpp" #include "openvino/frontend/manager.hpp" diff --git a/src/frontends/paddle/tests/convert_unsupported.cpp b/src/frontends/paddle/tests/convert_unsupported.cpp index bb5a49c84aeb62..c4abb69b4565b1 100644 --- a/src/frontends/paddle/tests/convert_unsupported.cpp +++ b/src/frontends/paddle/tests/convert_unsupported.cpp @@ -16,18 +16,18 @@ TEST(FrontEndConvertModelTest, test_unsupported_op) { FrontEndManager fem; FrontEnd::Ptr frontEnd; InputModel::Ptr inputModel; - ASSERT_NO_THROW(frontEnd = fem.load_by_framework(PADDLE_FE)); + OV_ASSERT_NO_THROW(frontEnd = fem.load_by_framework(PADDLE_FE)); ASSERT_NE(frontEnd, nullptr); auto model_filename = FrontEndTestUtils::make_model_path(std::string(TEST_PADDLE_MODELS_DIRNAME) + std::string("relu_unsupported/relu_unsupported.pdmodel")); - ASSERT_NO_THROW(inputModel = frontEnd->load(model_filename)); + OV_ASSERT_NO_THROW(inputModel = frontEnd->load(model_filename)); ASSERT_NE(inputModel, nullptr); std::shared_ptr model; ASSERT_THROW(model = frontEnd->convert(inputModel), OpConversionFailure); ASSERT_EQ(model, nullptr); - ASSERT_NO_THROW(model = frontEnd->decode(inputModel)); + OV_ASSERT_NO_THROW(model = frontEnd->decode(inputModel)); ASSERT_THROW(frontEnd->convert(model), OpConversionFailure); - ASSERT_NO_THROW(model = frontEnd->convert_partially(inputModel)); + OV_ASSERT_NO_THROW(model = frontEnd->convert_partially(inputModel)); ASSERT_THROW(frontEnd->convert(model), OpConversionFailure); for (auto& node : model->get_ordered_ops()) { @@ -35,5 +35,5 @@ TEST(FrontEndConvertModelTest, test_unsupported_op) { model->replace_node(node, std::make_shared(node->input(0).get_source_output())); } } - ASSERT_NO_THROW(frontEnd->convert(model)); + OV_ASSERT_NO_THROW(frontEnd->convert(model)); } diff --git a/src/frontends/paddle/tests/incorrect_cut_model.cpp b/src/frontends/paddle/tests/incorrect_cut_model.cpp index 191745e9532e75..e70527744c1294 100644 --- a/src/frontends/paddle/tests/incorrect_cut_model.cpp +++ b/src/frontends/paddle/tests/incorrect_cut_model.cpp @@ -16,11 +16,11 @@ TEST(FrontEndIncorrectCutModelTest, test_incorrect_cut) { FrontEndManager fem; FrontEnd::Ptr frontEnd; InputModel::Ptr inputModel; - ASSERT_NO_THROW(frontEnd = fem.load_by_framework(PADDLE_FE)); + OV_ASSERT_NO_THROW(frontEnd = fem.load_by_framework(PADDLE_FE)); ASSERT_NE(frontEnd, nullptr); auto model_filename = FrontEndTestUtils::make_model_path(std::string(TEST_PADDLE_MODELS_DIRNAME) + std::string("2in_2out/2in_2out.pdmodel")); - ASSERT_NO_THROW(inputModel = frontEnd->load(model_filename)); + OV_ASSERT_NO_THROW(inputModel = frontEnd->load(model_filename)); ASSERT_NE(inputModel, nullptr); // remove second input diff --git a/src/frontends/paddle/tests/places.cpp b/src/frontends/paddle/tests/places.cpp index 38ae2cc516fe17..b3c6f9d14cde69 100644 --- a/src/frontends/paddle/tests/places.cpp +++ b/src/frontends/paddle/tests/places.cpp @@ -50,9 +50,9 @@ class Paddle_Places : public ::testing::Test { TEST_F(Paddle_Places, check_tensor_names) { auto fem = FrontEndManager(); FrontEnd::Ptr frontend; - ASSERT_NO_THROW(frontend = fem.load_by_framework(PADDLE_FE)); + OV_ASSERT_NO_THROW(frontend = fem.load_by_framework(PADDLE_FE)); InputModel::Ptr input_model; - ASSERT_NO_THROW(input_model = frontend->load(FrontEndTestUtils::make_model_path(model_file))); + OV_ASSERT_NO_THROW(input_model = frontend->load(FrontEndTestUtils::make_model_path(model_file))); for (const auto& tensor_name : tensor_names) { auto place = input_model->get_place_by_tensor_name(tensor_name); @@ -63,9 +63,9 @@ TEST_F(Paddle_Places, check_tensor_names) { TEST_F(Paddle_Places, check_input_outputs) { auto fem = FrontEndManager(); FrontEnd::Ptr frontend; - ASSERT_NO_THROW(frontend = fem.load_by_framework(PADDLE_FE)); + OV_ASSERT_NO_THROW(frontend = fem.load_by_framework(PADDLE_FE)); InputModel::Ptr input_model; - ASSERT_NO_THROW(input_model = frontend->load(FrontEndTestUtils::make_model_path(model_file))); + OV_ASSERT_NO_THROW(input_model = frontend->load(FrontEndTestUtils::make_model_path(model_file))); auto inputs = input_model->get_inputs(); auto outputs = input_model->get_outputs(); @@ -89,9 +89,9 @@ TEST_F(Paddle_Places, check_input_outputs) { TEST_F(Paddle_Places, check_out_port_of_all_ops) { auto fem = FrontEndManager(); FrontEnd::Ptr frontend; - ASSERT_NO_THROW(frontend = fem.load_by_framework(PADDLE_FE)); + OV_ASSERT_NO_THROW(frontend = fem.load_by_framework(PADDLE_FE)); InputModel::Ptr input_model; - ASSERT_NO_THROW(input_model = frontend->load(FrontEndTestUtils::make_model_path(model_file))); + OV_ASSERT_NO_THROW(input_model = frontend->load(FrontEndTestUtils::make_model_path(model_file))); for (const auto& tensor_name : tensor_names) { auto place = input_model->get_place_by_tensor_name(tensor_name); @@ -111,9 +111,9 @@ TEST_F(Paddle_Places, check_out_port_of_all_ops) { TEST_F(Paddle_Places, check_in_out_ports_of_model_outputs) { auto fem = FrontEndManager(); FrontEnd::Ptr frontend; - ASSERT_NO_THROW(frontend = fem.load_by_framework(PADDLE_FE)); + OV_ASSERT_NO_THROW(frontend = fem.load_by_framework(PADDLE_FE)); InputModel::Ptr input_model; - ASSERT_NO_THROW(input_model = frontend->load(FrontEndTestUtils::make_model_path(model_file))); + OV_ASSERT_NO_THROW(input_model = frontend->load(FrontEndTestUtils::make_model_path(model_file))); auto outputs = input_model->get_outputs(); for (const auto& output : outputs) { @@ -145,9 +145,9 @@ TEST_F(Paddle_Places, check_in_out_ports_of_model_outputs) { TEST_F(Paddle_Places, check_source_target_tensors_of_model_outputs) { auto fem = FrontEndManager(); FrontEnd::Ptr frontend; - ASSERT_NO_THROW(frontend = fem.load_by_framework(PADDLE_FE)); + OV_ASSERT_NO_THROW(frontend = fem.load_by_framework(PADDLE_FE)); InputModel::Ptr input_model; - ASSERT_NO_THROW(input_model = frontend->load(FrontEndTestUtils::make_model_path(model_file))); + OV_ASSERT_NO_THROW(input_model = frontend->load(FrontEndTestUtils::make_model_path(model_file))); auto outputs = input_model->get_outputs(); for (const auto& output : outputs) { @@ -179,9 +179,9 @@ TEST_F(Paddle_Places, check_source_target_tensors_of_model_outputs) { TEST_F(Paddle_Places, check_producing_consuming_ops_of_model_outputs) { auto fem = FrontEndManager(); FrontEnd::Ptr frontend; - ASSERT_NO_THROW(frontend = fem.load_by_framework(PADDLE_FE)); + OV_ASSERT_NO_THROW(frontend = fem.load_by_framework(PADDLE_FE)); InputModel::Ptr input_model; - ASSERT_NO_THROW(input_model = frontend->load(FrontEndTestUtils::make_model_path(model_file))); + OV_ASSERT_NO_THROW(input_model = frontend->load(FrontEndTestUtils::make_model_path(model_file))); auto outputs = input_model->get_outputs(); for (const auto& output : outputs) { @@ -214,9 +214,9 @@ TEST_F(Paddle_Places, check_producing_consuming_ops_of_model_outputs) { TEST_F(Paddle_Places, check_data_flow) { auto fem = FrontEndManager(); FrontEnd::Ptr frontend; - ASSERT_NO_THROW(frontend = fem.load_by_framework(PADDLE_FE)); + OV_ASSERT_NO_THROW(frontend = fem.load_by_framework(PADDLE_FE)); InputModel::Ptr input_model; - ASSERT_NO_THROW(input_model = frontend->load(FrontEndTestUtils::make_model_path(model_file))); + OV_ASSERT_NO_THROW(input_model = frontend->load(FrontEndTestUtils::make_model_path(model_file))); for (const auto& tensor_name : tensor_names) { auto tensor_place = input_model->get_place_by_tensor_name(tensor_name); @@ -253,9 +253,9 @@ TEST_F(Paddle_Places, check_data_flow) { TEST_F(Paddle_Places, check_tensor_to_multiple_ports) { auto fem = FrontEndManager(); FrontEnd::Ptr frontend; - ASSERT_NO_THROW(frontend = fem.load_by_framework(PADDLE_FE)); + OV_ASSERT_NO_THROW(frontend = fem.load_by_framework(PADDLE_FE)); InputModel::Ptr input_model; - ASSERT_NO_THROW(input_model = frontend->load(FrontEndTestUtils::make_model_path(model_file))); + OV_ASSERT_NO_THROW(input_model = frontend->load(FrontEndTestUtils::make_model_path(model_file))); for (const auto& tensor_name : tensor_names) { auto tensor_place = input_model->get_place_by_tensor_name(tensor_name); @@ -279,9 +279,9 @@ TEST_F(Paddle_Places, check_tensor_to_multiple_ports) { TEST_F(Paddle_Places, check_consuming_ops) { auto fem = FrontEndManager(); FrontEnd::Ptr frontend; - ASSERT_NO_THROW(frontend = fem.load_by_framework(PADDLE_FE)); + OV_ASSERT_NO_THROW(frontend = fem.load_by_framework(PADDLE_FE)); InputModel::Ptr input_model; - ASSERT_NO_THROW(input_model = frontend->load(FrontEndTestUtils::make_model_path(model_file))); + OV_ASSERT_NO_THROW(input_model = frontend->load(FrontEndTestUtils::make_model_path(model_file))); for (const auto& tensor_name : tensor_names) { auto tensor_place = input_model->get_place_by_tensor_name(tensor_name); @@ -320,9 +320,9 @@ TEST_F(Paddle_Places, check_consuming_ops) { TEST_F(Paddle_Places, check_consuming_ops_2) { auto fem = FrontEndManager(); FrontEnd::Ptr frontend; - ASSERT_NO_THROW(frontend = fem.load_by_framework(PADDLE_FE)); + OV_ASSERT_NO_THROW(frontend = fem.load_by_framework(PADDLE_FE)); InputModel::Ptr input_model; - ASSERT_NO_THROW(input_model = frontend->load(FrontEndTestUtils::make_model_path(model_file))); + OV_ASSERT_NO_THROW(input_model = frontend->load(FrontEndTestUtils::make_model_path(model_file))); auto it = find(tensor_names.begin(), tensor_names.end(), "lstm_0.tmp_2"); EXPECT_NE(it, tensor_names.end()); @@ -359,9 +359,9 @@ TEST_F(Paddle_Places, check_consuming_ops_2) { TEST_F(Paddle_Places, check_producing_ops) { auto fem = FrontEndManager(); FrontEnd::Ptr frontend; - ASSERT_NO_THROW(frontend = fem.load_by_framework(PADDLE_FE)); + OV_ASSERT_NO_THROW(frontend = fem.load_by_framework(PADDLE_FE)); InputModel::Ptr input_model; - ASSERT_NO_THROW(input_model = frontend->load(FrontEndTestUtils::make_model_path(model_file))); + OV_ASSERT_NO_THROW(input_model = frontend->load(FrontEndTestUtils::make_model_path(model_file))); for (const auto& tensor_name : tensor_names) { auto tensor_place = input_model->get_place_by_tensor_name(tensor_name); @@ -381,9 +381,9 @@ TEST_F(Paddle_Places, check_producing_ops) { TEST_F(Paddle_Places, check_input_output_ports_dy_idx) { auto fem = FrontEndManager(); FrontEnd::Ptr frontend; - ASSERT_NO_THROW(frontend = fem.load_by_framework(PADDLE_FE)); + OV_ASSERT_NO_THROW(frontend = fem.load_by_framework(PADDLE_FE)); InputModel::Ptr input_model; - ASSERT_NO_THROW(input_model = frontend->load(FrontEndTestUtils::make_model_path(model_file))); + OV_ASSERT_NO_THROW(input_model = frontend->load(FrontEndTestUtils::make_model_path(model_file))); for (const auto& tensor_name : output_names) { auto tensor_place = input_model->get_place_by_tensor_name(tensor_name); @@ -400,9 +400,9 @@ TEST_F(Paddle_Places, check_input_output_ports_dy_idx) { TEST_F(Paddle_Places, check_ops_tensors_by_idx) { auto fem = FrontEndManager(); FrontEnd::Ptr frontend; - ASSERT_NO_THROW(frontend = fem.load_by_framework(PADDLE_FE)); + OV_ASSERT_NO_THROW(frontend = fem.load_by_framework(PADDLE_FE)); InputModel::Ptr input_model; - ASSERT_NO_THROW(input_model = frontend->load(FrontEndTestUtils::make_model_path(model_file))); + OV_ASSERT_NO_THROW(input_model = frontend->load(FrontEndTestUtils::make_model_path(model_file))); for (const auto& tensor_name : output_names) { auto tensor_place = input_model->get_place_by_tensor_name(tensor_name); diff --git a/src/frontends/paddle/tests/throw_in_conversion.cpp b/src/frontends/paddle/tests/throw_in_conversion.cpp index 6330394092f87b..578630be07db8f 100644 --- a/src/frontends/paddle/tests/throw_in_conversion.cpp +++ b/src/frontends/paddle/tests/throw_in_conversion.cpp @@ -15,11 +15,11 @@ TEST(FrontEndConvertModelTest, throw_in_conversion) { FrontEndManager fem; FrontEnd::Ptr frontEnd; InputModel::Ptr inputModel; - ASSERT_NO_THROW(frontEnd = fem.load_by_framework(PADDLE_FE)); + OV_ASSERT_NO_THROW(frontEnd = fem.load_by_framework(PADDLE_FE)); ASSERT_NE(frontEnd, nullptr); auto model_filename = FrontEndTestUtils::make_model_path( std::string(TEST_PADDLE_MODELS_DIRNAME) + std::string("throw_in_conversion/throw_in_conversion.pdmodel")); - ASSERT_NO_THROW(inputModel = frontEnd->load(model_filename)); + OV_ASSERT_NO_THROW(inputModel = frontEnd->load(model_filename)); ASSERT_NE(inputModel, nullptr); std::shared_ptr model; ASSERT_THROW(model = frontEnd->convert(inputModel), OpConversionFailure); @@ -29,7 +29,7 @@ TEST(FrontEndConvertModelTest, unsupported_version) { FrontEndManager fem; FrontEnd::Ptr frontEnd; InputModel::Ptr inputModel; - ASSERT_NO_THROW(frontEnd = fem.load_by_framework(PADDLE_FE)); + OV_ASSERT_NO_THROW(frontEnd = fem.load_by_framework(PADDLE_FE)); ASSERT_NE(frontEnd, nullptr); auto model_filename = FrontEndTestUtils::make_model_path(std::string(TEST_PADDLE_MODELS_DIRNAME) + std::string("lower_version/lower_version.pdmodel")); diff --git a/src/frontends/tensorflow/tests/convert_unsupported.cpp b/src/frontends/tensorflow/tests/convert_unsupported.cpp index bbc1fb24af55b1..c63d5021989e06 100644 --- a/src/frontends/tensorflow/tests/convert_unsupported.cpp +++ b/src/frontends/tensorflow/tests/convert_unsupported.cpp @@ -97,18 +97,18 @@ TEST(FrontEndConvertModelTest, test_unsupported_op) { FrontEndManager fem; FrontEnd::Ptr frontEnd; InputModel::Ptr inputModel; - ASSERT_NO_THROW(frontEnd = fem.load_by_framework(TF_FE)); + OV_ASSERT_NO_THROW(frontEnd = fem.load_by_framework(TF_FE)); ASSERT_NE(frontEnd, nullptr); auto model_filename = FrontEndTestUtils::make_model_path(std::string(TEST_TENSORFLOW_MODELS_DIRNAME) + std::string("relu_unsupported/relu_unsupported.pb")); - ASSERT_NO_THROW(inputModel = frontEnd->load(model_filename)); + OV_ASSERT_NO_THROW(inputModel = frontEnd->load(model_filename)); ASSERT_NE(inputModel, nullptr); shared_ptr model; ASSERT_THROW(model = frontEnd->convert(inputModel), OpConversionFailure); ASSERT_EQ(model, nullptr); - ASSERT_NO_THROW(model = frontEnd->decode(inputModel)); + OV_ASSERT_NO_THROW(model = frontEnd->decode(inputModel)); ASSERT_THROW(frontEnd->convert(model), OpConversionFailure); - ASSERT_NO_THROW(model = frontEnd->convert_partially(inputModel)); + OV_ASSERT_NO_THROW(model = frontEnd->convert_partially(inputModel)); ASSERT_THROW(frontEnd->convert(model), OpConversionFailure); for (auto& node : model->get_ordered_ops()) { @@ -116,7 +116,7 @@ TEST(FrontEndConvertModelTest, test_unsupported_op) { model->replace_node(node, make_shared(node->input(0).get_source_output())); } } - ASSERT_NO_THROW(frontEnd->convert(model)); + OV_ASSERT_NO_THROW(frontEnd->convert(model)); } TEST_F(FrontEndConversionWithReferenceTestsF, ModelWithDynamicType) { diff --git a/src/frontends/tensorflow/tests/telemetry.cpp b/src/frontends/tensorflow/tests/telemetry.cpp index 7543b61b96c6c0..5ec4b346db08ca 100644 --- a/src/frontends/tensorflow/tests/telemetry.cpp +++ b/src/frontends/tensorflow/tests/telemetry.cpp @@ -61,7 +61,7 @@ TEST(TFTelemetryTest, test_nonexistent_add) { FrontEndManager fem; FrontEnd::Ptr frontEnd; InputModel::Ptr inputModel; - ASSERT_NO_THROW(frontEnd = fem.load_by_framework(TF_FE)); + OV_ASSERT_NO_THROW(frontEnd = fem.load_by_framework(TF_FE)); ASSERT_NE(frontEnd, nullptr); TelemetryMock m_test_telemetry; @@ -77,7 +77,7 @@ TEST(TFTelemetryTest, test_nonexistent_add) { auto model_filename = FrontEndTestUtils::make_model_path(std::string(TEST_TENSORFLOW_MODELS_DIRNAME) + std::string("nonexistent_add/nonexistent_add.pb")); - ASSERT_NO_THROW(inputModel = frontEnd->load(model_filename)); + OV_ASSERT_NO_THROW(inputModel = frontEnd->load(model_filename)); ASSERT_NE(inputModel, nullptr); shared_ptr model; diff --git a/src/frontends/tensorflow_lite/tests/convert_unsupported.cpp b/src/frontends/tensorflow_lite/tests/convert_unsupported.cpp index c28194ffa58b39..a408b3199b9123 100644 --- a/src/frontends/tensorflow_lite/tests/convert_unsupported.cpp +++ b/src/frontends/tensorflow_lite/tests/convert_unsupported.cpp @@ -15,11 +15,11 @@ TEST(FrontEndConvertModelTest, test_zerolen) { FrontEndManager fem; FrontEnd::Ptr frontEnd; InputModel::Ptr inputModel; - ASSERT_NO_THROW(frontEnd = fem.load_by_framework(TF_LITE_FE)); + OV_ASSERT_NO_THROW(frontEnd = fem.load_by_framework(TF_LITE_FE)); ASSERT_NE(frontEnd, nullptr); auto model_filename = FrontEndTestUtils::make_model_path(string(TEST_TENSORFLOW_LITE_MODELS_DIRNAME) + string("bad_header/zerolen.tflite")); - ASSERT_NO_THROW(inputModel = frontEnd->load(model_filename)); + OV_ASSERT_NO_THROW(inputModel = frontEnd->load(model_filename)); ASSERT_NE(inputModel, nullptr); shared_ptr model; ASSERT_THROW(model = frontEnd->convert(inputModel), std::exception); @@ -29,11 +29,11 @@ TEST(FrontEndConvertModelTest, test_wrong_len) { FrontEndManager fem; FrontEnd::Ptr frontEnd; InputModel::Ptr inputModel; - ASSERT_NO_THROW(frontEnd = fem.load_by_framework(TF_LITE_FE)); + OV_ASSERT_NO_THROW(frontEnd = fem.load_by_framework(TF_LITE_FE)); ASSERT_NE(frontEnd, nullptr); auto model_filename = FrontEndTestUtils::make_model_path(string(TEST_TENSORFLOW_LITE_MODELS_DIRNAME) + string("bad_header/wrong_len_3.tflite")); - ASSERT_NO_THROW(inputModel = frontEnd->load(model_filename)); + OV_ASSERT_NO_THROW(inputModel = frontEnd->load(model_filename)); ASSERT_NE(inputModel, nullptr); shared_ptr model; ASSERT_THROW(model = frontEnd->convert(inputModel), std::exception); @@ -43,11 +43,11 @@ TEST(FrontEndConvertModelTest, test_wrong_pos) { FrontEndManager fem; FrontEnd::Ptr frontEnd; InputModel::Ptr inputModel; - ASSERT_NO_THROW(frontEnd = fem.load_by_framework(TF_LITE_FE)); + OV_ASSERT_NO_THROW(frontEnd = fem.load_by_framework(TF_LITE_FE)); ASSERT_NE(frontEnd, nullptr); auto model_filename = FrontEndTestUtils::make_model_path(string(TEST_TENSORFLOW_LITE_MODELS_DIRNAME) + string("bad_header/wrong_pos.tflite")); - ASSERT_NO_THROW(inputModel = frontEnd->load(model_filename)); + OV_ASSERT_NO_THROW(inputModel = frontEnd->load(model_filename)); ASSERT_NE(inputModel, nullptr); shared_ptr model; ASSERT_THROW(model = frontEnd->convert(inputModel), std::exception); diff --git a/src/frontends/tests/frontend/shared/include/utils.hpp b/src/frontends/tests/frontend/shared/include/utils.hpp index c49ed125bfa194..a8fc9d1f8c6091 100644 --- a/src/frontends/tests/frontend/shared/include/utils.hpp +++ b/src/frontends/tests/frontend/shared/include/utils.hpp @@ -9,6 +9,7 @@ #include #include "common_test_utils/file_utils.hpp" +#include "common_test_utils/test_assertions.hpp" #include "openvino/util/env_util.hpp" #include "openvino/util/file_util.hpp" diff --git a/src/frontends/tests/frontend/shared/src/basic_api.cpp b/src/frontends/tests/frontend/shared/src/basic_api.cpp index ef9f911d4c0c86..11e8b41dded39f 100644 --- a/src/frontends/tests/frontend/shared/src/basic_api.cpp +++ b/src/frontends/tests/frontend/shared/src/basic_api.cpp @@ -30,15 +30,15 @@ void FrontEndBasicTest::doLoadFromFile() { } TEST_P(FrontEndBasicTest, testLoadFromFile) { - ASSERT_NO_THROW(doLoadFromFile()); + OV_ASSERT_NO_THROW(doLoadFromFile()); ASSERT_EQ(m_frontEnd->get_name(), m_feName); std::shared_ptr model; - ASSERT_NO_THROW(model = m_frontEnd->convert(m_inputModel)); + OV_ASSERT_NO_THROW(model = m_frontEnd->convert(m_inputModel)); ASSERT_NE(model, nullptr); } TEST_P(FrontEndBasicTest, testInputModel_getInputsOutputs) { - ASSERT_NO_THROW(doLoadFromFile()); + OV_ASSERT_NO_THROW(doLoadFromFile()); using CustomCheck = std::function; auto checkPlaces = [&](const std::vector& places, CustomCheck cb) { @@ -48,37 +48,37 @@ TEST_P(FrontEndBasicTest, testInputModel_getInputsOutputs) { std::for_each(places.begin(), places.end(), [&](Place::Ptr place) { ASSERT_NE(place, nullptr); std::vector names; - ASSERT_NO_THROW(names = place->get_names()); + OV_ASSERT_NO_THROW(names = place->get_names()); EXPECT_GT(names.size(), 0); cb(place); }); }; std::vector inputs; - ASSERT_NO_THROW(inputs = m_inputModel->get_inputs()); + OV_ASSERT_NO_THROW(inputs = m_inputModel->get_inputs()); checkPlaces(inputs, [&](Place::Ptr place) { EXPECT_TRUE(place->is_input()); }); std::vector outputs; - ASSERT_NO_THROW(outputs = m_inputModel->get_outputs()); + OV_ASSERT_NO_THROW(outputs = m_inputModel->get_outputs()); checkPlaces(outputs, [&](Place::Ptr place) { EXPECT_TRUE(place->is_output()); }); } TEST_P(FrontEndBasicTest, testInputModel_getPlaceByTensorName) { - ASSERT_NO_THROW(doLoadFromFile()); + OV_ASSERT_NO_THROW(doLoadFromFile()); auto testGetPlaceByTensorName = [&](const std::vector& places) { EXPECT_GT(places.size(), 0); for (auto place : places) { ASSERT_NE(place, nullptr); std::vector names; - ASSERT_NO_THROW(names = place->get_names()); + OV_ASSERT_NO_THROW(names = place->get_names()); for (auto name : names) { EXPECT_NE(name, std::string()); Place::Ptr placeByName; - ASSERT_NO_THROW(placeByName = m_inputModel->get_place_by_tensor_name(name)); + OV_ASSERT_NO_THROW(placeByName = m_inputModel->get_place_by_tensor_name(name)); ASSERT_NE(placeByName, nullptr); EXPECT_TRUE(placeByName->is_equal(place)); } @@ -86,28 +86,28 @@ TEST_P(FrontEndBasicTest, testInputModel_getPlaceByTensorName) { }; std::vector outputs; - ASSERT_NO_THROW(outputs = m_inputModel->get_outputs()); + OV_ASSERT_NO_THROW(outputs = m_inputModel->get_outputs()); testGetPlaceByTensorName(outputs); std::vector inputs; - ASSERT_NO_THROW(inputs = m_inputModel->get_inputs()); + OV_ASSERT_NO_THROW(inputs = m_inputModel->get_inputs()); testGetPlaceByTensorName(inputs); } TEST_P(FrontEndBasicTest, testInputModel_overrideAll) { - ASSERT_NO_THROW(doLoadFromFile()); + OV_ASSERT_NO_THROW(doLoadFromFile()); using GetPlaces = std::function()>; using OverridePlaces = std::function&)>; auto verifyOverride = [](GetPlaces getCB, OverridePlaces overrideCB) { std::vector places; - ASSERT_NO_THROW(places = getCB()); + OV_ASSERT_NO_THROW(places = getCB()); std::set placesSet(places.begin(), places.end()); auto placesReversed = places; std::reverse(placesReversed.begin(), placesReversed.end()); - ASSERT_NO_THROW(overrideCB(placesReversed)); - ASSERT_NO_THROW(places = getCB()); + OV_ASSERT_NO_THROW(overrideCB(placesReversed)); + OV_ASSERT_NO_THROW(places = getCB()); EXPECT_GT(places.size(), 0); std::set placesSetAfter(places.begin(), places.end()); EXPECT_EQ(placesSet.size(), placesSet.size()); @@ -133,20 +133,20 @@ TEST_P(FrontEndBasicTest, testInputModel_overrideAll) { } TEST_P(FrontEndBasicTest, testInputModel_overrideAll_empty) { - ASSERT_NO_THROW(doLoadFromFile()); + OV_ASSERT_NO_THROW(doLoadFromFile()); using GetPlaces = std::function()>; using OverrideEmpty = std::function; using CustomCheck = std::function; auto verifyOverride = [](GetPlaces getCB, OverrideEmpty overrideCB, CustomCheck customCB) { std::vector places; std::vector newPlaces; - ASSERT_NO_THROW(places = getCB()); - ASSERT_NO_THROW(overrideCB()); - ASSERT_NO_THROW(newPlaces = getCB()); + OV_ASSERT_NO_THROW(places = getCB()); + OV_ASSERT_NO_THROW(overrideCB()); + OV_ASSERT_NO_THROW(newPlaces = getCB()); ASSERT_EQ(newPlaces.size(), 0); std::for_each(places.begin(), places.end(), [&](Place::Ptr place) { std::vector names; - ASSERT_NO_THROW(names = place->get_names()); + OV_ASSERT_NO_THROW(names = place->get_names()); for (auto name : names) { customCB(name); } diff --git a/src/frontends/tests/frontend/shared/src/conversion.cpp b/src/frontends/tests/frontend/shared/src/conversion.cpp index 34f3e9d6897eaa..34e4f2fd62719a 100644 --- a/src/frontends/tests/frontend/shared/src/conversion.cpp +++ b/src/frontends/tests/frontend/shared/src/conversion.cpp @@ -76,10 +76,10 @@ TEST_P(FrontEndConversionExtensionTest, TestConversionExtension) { })); } std::shared_ptr input_model; - ASSERT_NO_THROW(input_model = frontend->load(m_param.m_modelName)); + OV_ASSERT_NO_THROW(input_model = frontend->load(m_param.m_modelName)); ASSERT_NE(input_model, nullptr); std::shared_ptr model; - ASSERT_NO_THROW(model = frontend->convert(input_model)); + OV_ASSERT_NO_THROW(model = frontend->convert(input_model)); ASSERT_NE(model, nullptr); EXPECT_EQ(invoked, true); } @@ -89,9 +89,9 @@ TEST_P(FrontEndConversionExtensionTest, TestConversionExtensionViaSO) { const auto& lib_path = get_lib_path("test_builtin_extensions"); frontend->add_extension(lib_path); std::shared_ptr input_model; - ASSERT_NO_THROW(input_model = frontend->load(m_param.m_modelName)); + OV_ASSERT_NO_THROW(input_model = frontend->load(m_param.m_modelName)); ASSERT_NE(input_model, nullptr); std::shared_ptr model; - ASSERT_NO_THROW(model = frontend->convert(input_model)); + OV_ASSERT_NO_THROW(model = frontend->convert(input_model)); ASSERT_NE(model, nullptr); } diff --git a/src/frontends/tests/frontend/shared/src/conversion_with_reference.cpp b/src/frontends/tests/frontend/shared/src/conversion_with_reference.cpp index 89497cc1cca326..cffd0d483780f4 100644 --- a/src/frontends/tests/frontend/shared/src/conversion_with_reference.cpp +++ b/src/frontends/tests/frontend/shared/src/conversion_with_reference.cpp @@ -4,6 +4,7 @@ #include "conversion_with_reference.hpp" +#include "common_test_utils/test_assertions.hpp" #include "transformations/init_node_info.hpp" FrontEndConversionWithReferenceTestsF::FrontEndConversionWithReferenceTestsF() @@ -26,7 +27,7 @@ void FrontEndConversionWithReferenceTestsF::TearDown() { OPENVINO_ASSERT(model_ref != nullptr, "Reference Test Model is not initialized."); manager.run_passes(model); - ASSERT_NO_THROW(check_rt_info(model)); + OV_ASSERT_NO_THROW(check_rt_info(model)); auto res = comparator.compare(model, model_ref); ASSERT_TRUE(res.valid) << res.message; diff --git a/src/frontends/tests/frontend/shared/src/convert_model.cpp b/src/frontends/tests/frontend/shared/src/convert_model.cpp index 620d8bdf593605..e41a685c93e28a 100644 --- a/src/frontends/tests/frontend/shared/src/convert_model.cpp +++ b/src/frontends/tests/frontend/shared/src/convert_model.cpp @@ -28,20 +28,20 @@ void FrontEndConvertModelTest::initParamTest() { void FrontEndConvertModelTest::doLoadFromFile() { std::vector frontends; - ASSERT_NO_THROW(frontends = m_fem.get_available_front_ends()); - ASSERT_NO_THROW(m_frontEnd = m_fem.load_by_framework(m_feName)); + OV_ASSERT_NO_THROW(frontends = m_fem.get_available_front_ends()); + OV_ASSERT_NO_THROW(m_frontEnd = m_fem.load_by_framework(m_feName)); ASSERT_NE(m_frontEnd, nullptr); - ASSERT_NO_THROW(m_inputModel = m_frontEnd->load(m_modelFile)); + OV_ASSERT_NO_THROW(m_inputModel = m_frontEnd->load(m_modelFile)); ASSERT_NE(m_inputModel, nullptr); } TEST_P(FrontEndConvertModelTest, test_convert_partially_equal_convert) { - ASSERT_NO_THROW(doLoadFromFile()); + OV_ASSERT_NO_THROW(doLoadFromFile()); std::shared_ptr model_ref; - ASSERT_NO_THROW(model_ref = m_frontEnd->convert(m_inputModel)); + OV_ASSERT_NO_THROW(model_ref = m_frontEnd->convert(m_inputModel)); ASSERT_NE(model_ref, nullptr); std::shared_ptr model; - ASSERT_NO_THROW(model = m_frontEnd->convert_partially(m_inputModel)); + OV_ASSERT_NO_THROW(model = m_frontEnd->convert_partially(m_inputModel)); ASSERT_NE(model, nullptr); FunctionsComparator func_comparator = FunctionsComparator::with_default(); @@ -54,13 +54,13 @@ TEST_P(FrontEndConvertModelTest, test_convert_partially_equal_convert) { } TEST_P(FrontEndConvertModelTest, test_decode_convert_equal_convert) { - ASSERT_NO_THROW(doLoadFromFile()); + OV_ASSERT_NO_THROW(doLoadFromFile()); std::shared_ptr model_ref; - ASSERT_NO_THROW(model_ref = m_frontEnd->convert(m_inputModel)); + OV_ASSERT_NO_THROW(model_ref = m_frontEnd->convert(m_inputModel)); ASSERT_NE(model_ref, nullptr); std::shared_ptr model; - ASSERT_NO_THROW(model = m_frontEnd->decode(m_inputModel)); - ASSERT_NO_THROW(m_frontEnd->convert(model)); + OV_ASSERT_NO_THROW(model = m_frontEnd->decode(m_inputModel)); + OV_ASSERT_NO_THROW(m_frontEnd->convert(model)); ASSERT_NE(model, nullptr); FunctionsComparator func_comparator = FunctionsComparator::with_default(); diff --git a/src/frontends/tests/frontend/shared/src/cut_specific_model.cpp b/src/frontends/tests/frontend/shared/src/cut_specific_model.cpp index ab66cd754d6277..4470a43ea1ac97 100644 --- a/src/frontends/tests/frontend/shared/src/cut_specific_model.cpp +++ b/src/frontends/tests/frontend/shared/src/cut_specific_model.cpp @@ -57,15 +57,15 @@ std::vector FrontEndCutModelTest::constructNewOutputs( /////////////////////////////////////////////////////////////////// TEST_P(FrontEndCutModelTest, testOverrideInputs) { - ASSERT_NO_THROW(doLoadFromFile()); + OV_ASSERT_NO_THROW(doLoadFromFile()); std::vector newPlaces; - ASSERT_NO_THROW(newPlaces = constructNewInputs()); - ASSERT_NO_THROW(m_inputModel->override_all_inputs(newPlaces)); - ASSERT_NO_THROW(m_inputModel->get_inputs()); + OV_ASSERT_NO_THROW(newPlaces = constructNewInputs()); + OV_ASSERT_NO_THROW(m_inputModel->override_all_inputs(newPlaces)); + OV_ASSERT_NO_THROW(m_inputModel->get_inputs()); EXPECT_EQ(m_param.m_newInputs.size(), m_inputModel->get_inputs().size()); for (auto newInput : m_inputModel->get_inputs()) { std::vector names; - ASSERT_NO_THROW(names = newInput->get_names()); + OV_ASSERT_NO_THROW(names = newInput->get_names()); bool found = false; for (const auto& name : m_param.m_newInputs) { if (std::find(names.begin(), names.begin(), name) != names.end()) { @@ -78,15 +78,15 @@ TEST_P(FrontEndCutModelTest, testOverrideInputs) { } TEST_P(FrontEndCutModelTest, testOverrideOutputs) { - ASSERT_NO_THROW(doLoadFromFile()); + OV_ASSERT_NO_THROW(doLoadFromFile()); std::vector newPlaces; - ASSERT_NO_THROW(newPlaces = constructNewOutputs()); - ASSERT_NO_THROW(m_inputModel->override_all_outputs(newPlaces)); - ASSERT_NO_THROW(m_inputModel->get_outputs()); + OV_ASSERT_NO_THROW(newPlaces = constructNewOutputs()); + OV_ASSERT_NO_THROW(m_inputModel->override_all_outputs(newPlaces)); + OV_ASSERT_NO_THROW(m_inputModel->get_outputs()); EXPECT_EQ(m_param.m_newOutputs.size(), m_inputModel->get_outputs().size()); for (auto newOutput : m_inputModel->get_outputs()) { std::vector names; - ASSERT_NO_THROW(names = newOutput->get_names()); + OV_ASSERT_NO_THROW(names = newOutput->get_names()); bool found = false; for (const auto& name : m_param.m_newOutputs) { if (std::find(names.begin(), names.begin(), name) != names.end()) { @@ -99,9 +99,9 @@ TEST_P(FrontEndCutModelTest, testOverrideOutputs) { } TEST_P(FrontEndCutModelTest, testOldInputs) { - ASSERT_NO_THROW(doLoadFromFile()); + OV_ASSERT_NO_THROW(doLoadFromFile()); std::shared_ptr model; - ASSERT_NO_THROW(model = m_frontEnd->convert(m_inputModel)); + OV_ASSERT_NO_THROW(model = m_frontEnd->convert(m_inputModel)); auto ops = model->get_ordered_ops(); // Ensure that it contains expected old inputs @@ -116,9 +116,9 @@ TEST_P(FrontEndCutModelTest, testOldInputs) { } TEST_P(FrontEndCutModelTest, testOldOutputs) { - ASSERT_NO_THROW(doLoadFromFile()); + OV_ASSERT_NO_THROW(doLoadFromFile()); std::shared_ptr model; - ASSERT_NO_THROW(model = m_frontEnd->convert(m_inputModel)); + OV_ASSERT_NO_THROW(model = m_frontEnd->convert(m_inputModel)); auto ops = model->get_ordered_ops(); // Ensure that it contains expected old outputs for (const auto& name : m_param.m_oldOutputs) { @@ -132,13 +132,13 @@ TEST_P(FrontEndCutModelTest, testOldOutputs) { } TEST_P(FrontEndCutModelTest, testNewInputs_func) { - ASSERT_NO_THROW(doLoadFromFile()); + OV_ASSERT_NO_THROW(doLoadFromFile()); std::vector newPlaces; - ASSERT_NO_THROW(newPlaces = constructNewInputs()); - ASSERT_NO_THROW(m_inputModel->override_all_inputs(newPlaces)); + OV_ASSERT_NO_THROW(newPlaces = constructNewInputs()); + OV_ASSERT_NO_THROW(m_inputModel->override_all_inputs(newPlaces)); std::shared_ptr model; - ASSERT_NO_THROW(model = m_frontEnd->convert(m_inputModel)); + OV_ASSERT_NO_THROW(model = m_frontEnd->convert(m_inputModel)); auto ops = model->get_ordered_ops(); // Ensure that it doesn't contain old inputs @@ -163,13 +163,13 @@ TEST_P(FrontEndCutModelTest, testNewInputs_func) { } TEST_P(FrontEndCutModelTest, testNewOutputs_func) { - ASSERT_NO_THROW(doLoadFromFile()); + OV_ASSERT_NO_THROW(doLoadFromFile()); std::vector newPlaces; - ASSERT_NO_THROW(newPlaces = constructNewOutputs()); - ASSERT_NO_THROW(m_inputModel->override_all_outputs(newPlaces)); + OV_ASSERT_NO_THROW(newPlaces = constructNewOutputs()); + OV_ASSERT_NO_THROW(m_inputModel->override_all_outputs(newPlaces)); std::shared_ptr model; - ASSERT_NO_THROW(model = m_frontEnd->convert(m_inputModel)); + OV_ASSERT_NO_THROW(model = m_frontEnd->convert(m_inputModel)); auto ops = model->get_ordered_ops(); // Ensure that it doesn't contain old outputs @@ -194,14 +194,14 @@ TEST_P(FrontEndCutModelTest, testNewOutputs_func) { } TEST_P(FrontEndCutModelTest, testExtractSubgraph) { - ASSERT_NO_THROW(doLoadFromFile()); + OV_ASSERT_NO_THROW(doLoadFromFile()); std::vector newInputs, newOutputs; - ASSERT_NO_THROW(newInputs = constructNewInputs()); - ASSERT_NO_THROW(newOutputs = constructNewOutputs()); - ASSERT_NO_THROW(m_inputModel->extract_subgraph(newInputs, newOutputs)); + OV_ASSERT_NO_THROW(newInputs = constructNewInputs()); + OV_ASSERT_NO_THROW(newOutputs = constructNewOutputs()); + OV_ASSERT_NO_THROW(m_inputModel->extract_subgraph(newInputs, newOutputs)); std::shared_ptr model; - ASSERT_NO_THROW(model = m_frontEnd->convert(m_inputModel)); + OV_ASSERT_NO_THROW(model = m_frontEnd->convert(m_inputModel)); auto ops = model->get_ordered_ops(); // Ensure that it doesn't contain expected old outputs @@ -226,13 +226,13 @@ TEST_P(FrontEndCutModelTest, testExtractSubgraph) { } TEST_P(FrontEndCutModelTest, testSetTensorValue) { - ASSERT_NO_THROW(doLoadFromFile()); + OV_ASSERT_NO_THROW(doLoadFromFile()); Place::Ptr place; - ASSERT_NO_THROW(place = m_inputModel->get_place_by_tensor_name(m_param.m_tensorValueName)); - ASSERT_NO_THROW(m_inputModel->set_tensor_value(place, &m_param.m_tensorValue[0])); + OV_ASSERT_NO_THROW(place = m_inputModel->get_place_by_tensor_name(m_param.m_tensorValueName)); + OV_ASSERT_NO_THROW(m_inputModel->set_tensor_value(place, &m_param.m_tensorValue[0])); std::shared_ptr model; - ASSERT_NO_THROW(model = m_frontEnd->convert(m_inputModel)); + OV_ASSERT_NO_THROW(model = m_frontEnd->convert(m_inputModel)); auto ops = model->get_ordered_ops(); auto const_name = m_param.m_tensorValueName; diff --git a/src/frontends/tests/frontend/shared/src/library_extension.cpp b/src/frontends/tests/frontend/shared/src/library_extension.cpp index 2ff3ea30146916..573efbb3d10e09 100644 --- a/src/frontends/tests/frontend/shared/src/library_extension.cpp +++ b/src/frontends/tests/frontend/shared/src/library_extension.cpp @@ -44,10 +44,10 @@ TEST_P(FrontendLibraryExtensionTest, verifyFunctions) { ov::frontend::InputModel::Ptr m_inputModel; m_frontEnd = m_fem.load_by_framework(m_param.m_frontEndName); - ASSERT_NO_THROW(m_inputModel = m_frontEnd->load(m_param.m_modelName)); + OV_ASSERT_NO_THROW(m_inputModel = m_frontEnd->load(m_param.m_modelName)); ASSERT_NE(m_inputModel, nullptr); - ASSERT_NO_THROW(function_ref = m_frontEnd->convert(m_inputModel)); + OV_ASSERT_NO_THROW(function_ref = m_frontEnd->convert(m_inputModel)); ASSERT_NE(function_ref, nullptr); const auto nodes = function_ref->get_ops(); @@ -68,10 +68,10 @@ TEST_P(FrontendLibraryExtensionTest, verifyFunctions) { const auto& lib_path = get_lib_path("test_builtin_extensions"); m_frontEnd->add_extension(lib_path); - ASSERT_NO_THROW(m_inputModel = m_frontEnd->load(m_param.m_modelName)); + OV_ASSERT_NO_THROW(m_inputModel = m_frontEnd->load(m_param.m_modelName)); ASSERT_NE(m_inputModel, nullptr); - ASSERT_NO_THROW(function = m_frontEnd->convert(m_inputModel)); + OV_ASSERT_NO_THROW(function = m_frontEnd->convert(m_inputModel)); ASSERT_NE(function, nullptr); const auto nodes = function->get_ops(); diff --git a/src/frontends/tests/frontend/shared/src/load_from.cpp b/src/frontends/tests/frontend/shared/src/load_from.cpp index 35a54f1ffb6c53..84aa950712eb5a 100644 --- a/src/frontends/tests/frontend/shared/src/load_from.cpp +++ b/src/frontends/tests/frontend/shared/src/load_from.cpp @@ -27,15 +27,15 @@ TEST_P(FrontEndLoadFromTest, testLoadFromFilePath) { std::string model_path = FrontEndTestUtils::make_model_path(m_param.m_modelsPath + m_param.m_file); std::vector frontends; FrontEnd::Ptr fe; - ASSERT_NO_THROW(frontends = m_fem.get_available_front_ends()); - ASSERT_NO_THROW(m_frontEnd = m_fem.load_by_model(model_path)); + OV_ASSERT_NO_THROW(frontends = m_fem.get_available_front_ends()); + OV_ASSERT_NO_THROW(m_frontEnd = m_fem.load_by_model(model_path)); ASSERT_NE(m_frontEnd, nullptr); - ASSERT_NO_THROW(m_inputModel = m_frontEnd->load(model_path)); + OV_ASSERT_NO_THROW(m_inputModel = m_frontEnd->load(model_path)); ASSERT_NE(m_inputModel, nullptr); std::shared_ptr model; - ASSERT_NO_THROW(model = m_frontEnd->convert(m_inputModel)); + OV_ASSERT_NO_THROW(model = m_frontEnd->convert(m_inputModel)); ASSERT_NE(model, nullptr); } @@ -43,18 +43,18 @@ TEST_P(FrontEndLoadFromTest, testLoadFromFilePathWithExplicitVariants) { std::string model_path = FrontEndTestUtils::make_model_path(m_param.m_modelsPath + m_param.m_file); std::vector frontends; FrontEnd::Ptr fe; - ASSERT_NO_THROW(frontends = m_fem.get_available_front_ends()); + OV_ASSERT_NO_THROW(frontends = m_fem.get_available_front_ends()); std::vector variants; variants.emplace_back(model_path); - ASSERT_NO_THROW(m_frontEnd = m_fem.load_by_model(variants)); + OV_ASSERT_NO_THROW(m_frontEnd = m_fem.load_by_model(variants)); ASSERT_NE(m_frontEnd, nullptr); - ASSERT_NO_THROW(m_inputModel = m_frontEnd->load(variants)); + OV_ASSERT_NO_THROW(m_inputModel = m_frontEnd->load(variants)); ASSERT_NE(m_inputModel, nullptr); std::shared_ptr function; - ASSERT_NO_THROW(function = m_frontEnd->convert(m_inputModel)); + OV_ASSERT_NO_THROW(function = m_frontEnd->convert(m_inputModel)); ASSERT_NE(function, nullptr); } @@ -63,11 +63,11 @@ TEST_P(FrontEndLoadFromTest, testLoadFromTwoFiles) { std::string weights_path = FrontEndTestUtils::make_model_path(m_param.m_modelsPath + m_param.m_files[1]); std::vector frontends; FrontEnd::Ptr fe; - ASSERT_NO_THROW(frontends = m_fem.get_available_front_ends()); - ASSERT_NO_THROW(m_frontEnd = m_fem.load_by_model(model_path, weights_path)); + OV_ASSERT_NO_THROW(frontends = m_fem.get_available_front_ends()); + OV_ASSERT_NO_THROW(m_frontEnd = m_fem.load_by_model(model_path, weights_path)); ASSERT_NE(m_frontEnd, nullptr); - ASSERT_NO_THROW(m_inputModel = m_frontEnd->load(model_path, weights_path)); + OV_ASSERT_NO_THROW(m_inputModel = m_frontEnd->load(model_path, weights_path)); ASSERT_NE(m_inputModel, nullptr); std::shared_ptr model; @@ -81,15 +81,15 @@ TEST_P(FrontEndLoadFromTest, testLoadFromStream) { std::istream* is = &ifs; std::vector frontends; FrontEnd::Ptr fe; - ASSERT_NO_THROW(frontends = m_fem.get_available_front_ends()); - ASSERT_NO_THROW(m_frontEnd = m_fem.load_by_model(is)); + OV_ASSERT_NO_THROW(frontends = m_fem.get_available_front_ends()); + OV_ASSERT_NO_THROW(m_frontEnd = m_fem.load_by_model(is)); ASSERT_NE(m_frontEnd, nullptr); - ASSERT_NO_THROW(m_inputModel = m_frontEnd->load(is)); + OV_ASSERT_NO_THROW(m_inputModel = m_frontEnd->load(is)); ASSERT_NE(m_inputModel, nullptr); std::shared_ptr model; - ASSERT_NO_THROW(model = m_frontEnd->convert(m_inputModel)); + OV_ASSERT_NO_THROW(model = m_frontEnd->convert(m_inputModel)); ASSERT_NE(model, nullptr); } @@ -103,14 +103,14 @@ TEST_P(FrontEndLoadFromTest, testLoadFromTwoStreams) { std::vector frontends; FrontEnd::Ptr fe; - ASSERT_NO_THROW(frontends = m_fem.get_available_front_ends()); - ASSERT_NO_THROW(m_frontEnd = m_fem.load_by_model(model_is, weights_is)); + OV_ASSERT_NO_THROW(frontends = m_fem.get_available_front_ends()); + OV_ASSERT_NO_THROW(m_frontEnd = m_fem.load_by_model(model_is, weights_is)); ASSERT_NE(m_frontEnd, nullptr); - ASSERT_NO_THROW(m_inputModel = m_frontEnd->load(model_is, weights_is)); + OV_ASSERT_NO_THROW(m_inputModel = m_frontEnd->load(model_is, weights_is)); ASSERT_NE(m_inputModel, nullptr); std::shared_ptr model; - ASSERT_NO_THROW(model = m_frontEnd->convert(m_inputModel)); + OV_ASSERT_NO_THROW(model = m_frontEnd->convert(m_inputModel)); ASSERT_NE(model, nullptr); } diff --git a/src/frontends/tests/frontend/shared/src/op_fuzzy.cpp b/src/frontends/tests/frontend/shared/src/op_fuzzy.cpp index f42460ffabb61e..a100585a97fdc3 100644 --- a/src/frontends/tests/frontend/shared/src/op_fuzzy.cpp +++ b/src/frontends/tests/frontend/shared/src/op_fuzzy.cpp @@ -114,7 +114,7 @@ TEST_P(FrontEndFuzzyOpTest, DISABLED_testOpFuzzy) { TEST_P(FrontEndFuzzyOpTest, testOpFuzzy) { #endif // load - ASSERT_NO_THROW(doLoadFromFile()); + OV_ASSERT_NO_THROW(doLoadFromFile()); // convert std::shared_ptr model; diff --git a/src/frontends/tests/frontend/shared/src/partial_shape.cpp b/src/frontends/tests/frontend/shared/src/partial_shape.cpp index 97127847734101..adf3e82994b18f 100644 --- a/src/frontends/tests/frontend/shared/src/partial_shape.cpp +++ b/src/frontends/tests/frontend/shared/src/partial_shape.cpp @@ -37,10 +37,10 @@ void FrontEndPartialShapeTest::doLoadFromFile() { /////////////////////////////////////////////////////////////////// TEST_P(FrontEndPartialShapeTest, testCheckOldPartialShape) { - ASSERT_NO_THROW(doLoadFromFile()); + OV_ASSERT_NO_THROW(doLoadFromFile()); std::shared_ptr model; - ASSERT_NO_THROW(model = m_frontEnd->convert(m_inputModel)); + OV_ASSERT_NO_THROW(model = m_frontEnd->convert(m_inputModel)); auto ops = model->get_ordered_ops(); auto it = std::find_if(ops.begin(), ops.end(), [&](const std::shared_ptr& node) { return node->get_friendly_name().find(m_partShape.m_tensorName) != std::string::npos; @@ -51,14 +51,14 @@ TEST_P(FrontEndPartialShapeTest, testCheckOldPartialShape) { } TEST_P(FrontEndPartialShapeTest, testSetNewPartialShape) { - ASSERT_NO_THROW(doLoadFromFile()); + OV_ASSERT_NO_THROW(doLoadFromFile()); Place::Ptr place; - ASSERT_NO_THROW(place = m_inputModel->get_place_by_tensor_name(m_partShape.m_tensorName)); + OV_ASSERT_NO_THROW(place = m_inputModel->get_place_by_tensor_name(m_partShape.m_tensorName)); ASSERT_NE(place, nullptr); - ASSERT_NO_THROW(m_inputModel->set_partial_shape(place, ov::PartialShape{m_partShape.m_newPartialShape})); + OV_ASSERT_NO_THROW(m_inputModel->set_partial_shape(place, ov::PartialShape{m_partShape.m_newPartialShape})); std::shared_ptr model; - ASSERT_NO_THROW(model = m_frontEnd->convert(m_inputModel)); + OV_ASSERT_NO_THROW(model = m_frontEnd->convert(m_inputModel)); auto ops = model->get_ordered_ops(); auto it = std::find_if(ops.begin(), ops.end(), [&](const std::shared_ptr& node) { return node->get_friendly_name().find(m_partShape.m_tensorName) != std::string::npos; diff --git a/src/frontends/tests/frontend/shared/src/set_element_type.cpp b/src/frontends/tests/frontend/shared/src/set_element_type.cpp index 36ca932dd2f01a..765f905a6ebaaf 100644 --- a/src/frontends/tests/frontend/shared/src/set_element_type.cpp +++ b/src/frontends/tests/frontend/shared/src/set_element_type.cpp @@ -31,13 +31,13 @@ void FrontEndElementTypeTest::doLoadFromFile() { /////////////////////////////////////////////////////////////////// TEST_P(FrontEndElementTypeTest, testSetElementType) { - ASSERT_NO_THROW(doLoadFromFile()); + OV_ASSERT_NO_THROW(doLoadFromFile()); Place::Ptr place; - ASSERT_NO_THROW(place = m_inputModel->get_inputs()[0]); + OV_ASSERT_NO_THROW(place = m_inputModel->get_inputs()[0]); ASSERT_NE(place, nullptr); auto name = place->get_names()[0]; - ASSERT_NO_THROW(m_inputModel->set_element_type(place, ov::element::f16)); + OV_ASSERT_NO_THROW(m_inputModel->set_element_type(place, ov::element::f16)); std::shared_ptr model; model = m_frontEnd->convert(m_inputModel); diff --git a/src/inference/tests/functional/caching_test.cpp b/src/inference/tests/functional/caching_test.cpp index 96a36476cbf003..5b01af9a22cde8 100644 --- a/src/inference/tests/functional/caching_test.cpp +++ b/src/inference/tests/functional/caching_test.cpp @@ -15,6 +15,7 @@ #include "common_test_utils/file_utils.hpp" #include "common_test_utils/subgraph_builders/conv_pool_relu.hpp" +#include "common_test_utils/test_assertions.hpp" #include "openvino/core/any.hpp" #include "openvino/core/except.hpp" #include "openvino/core/layout.hpp" @@ -779,7 +780,7 @@ TEST_P(CachingTest, TestNoCacheMetric_hasCacheDirConfig) { EXPECT_CALL(*mockPlugin, compile_model(A&>(), _)) .Times(!m_remoteContext ? 1 : 0); EXPECT_CALL(*mockPlugin, OnCompileModelFromFile()).Times(m_type == TestLoadType::EModelName ? 1 : 0); - ASSERT_NO_THROW(testLoad([&](ov::Core& core) { + OV_ASSERT_NO_THROW(testLoad([&](ov::Core& core) { core.set_property(ov::cache_dir(m_cacheDir)); m_testFunction(core); })); @@ -803,7 +804,7 @@ TEST_P(CachingTest, TestNoCacheMetric_hasCacheDirConfig_inline) { EXPECT_CALL(*mockPlugin, compile_model(A&>(), _)) .Times(!m_remoteContext ? 1 : 0); EXPECT_CALL(*mockPlugin, OnCompileModelFromFile()).Times(m_type == TestLoadType::EModelName ? 1 : 0); - ASSERT_NO_THROW(testLoad([&](ov::Core& core) { + OV_ASSERT_NO_THROW(testLoad([&](ov::Core& core) { m_testFunctionWithCfg(core, {{ov::cache_dir.name(), m_cacheDir}}); })); } @@ -831,7 +832,7 @@ TEST_P(CachingTest, TestNoCacheMetric_hasCacheDirConfig_by_device_name) { EXPECT_CALL(*mockPlugin, compile_model(A&>(), _)) .Times(!m_remoteContext ? 1 : 0); EXPECT_CALL(*mockPlugin, OnCompileModelFromFile()).Times(m_type == TestLoadType::EModelName ? 1 : 0); - ASSERT_NO_THROW(testLoad([&](ov::Core& core) { + OV_ASSERT_NO_THROW(testLoad([&](ov::Core& core) { core.set_property("mock", ov::cache_dir(m_cacheDir)); m_testFunction(core); })); @@ -2079,7 +2080,7 @@ TEST_P(CachingTest, LoadAUTO_OneDevice) { EXPECT_CALL(*mockPlugin, compile_model(A&>(), _)) .Times(TEST_COUNT - index - 1); EXPECT_CALL(*mockPlugin, import_model(_, _)).Times(index); - ASSERT_NO_THROW(testLoad([&](ov::Core& core) { + OV_ASSERT_NO_THROW(testLoad([&](ov::Core& core) { core.set_property(ov::cache_dir(cacheDir)); m_testFunction(core); })); @@ -2110,7 +2111,7 @@ TEST_P(CachingTest, LoadAUTOWithConfig) { EXPECT_CALL(*mockPlugin, compile_model(A&>(), _)) .Times(TEST_COUNT - index - 1); EXPECT_CALL(*mockPlugin, import_model(_, _)).Times(index); - ASSERT_NO_THROW(testLoad([&](ov::Core& core) { + OV_ASSERT_NO_THROW(testLoad([&](ov::Core& core) { m_testFunctionWithCfg(core, {{ov::cache_dir.name(), cacheDir}}); })); } @@ -2188,7 +2189,7 @@ TEST_P(CachingTest, LoadMulti_race) { EXPECT_CALL(*mockPlugin, import_model(_, _)).Times(devCount - 1); testLoad([&](ov::Core& core) { core.set_property(ov::cache_dir(cacheDir)); - ASSERT_NO_THROW(m_testFunction(core)); + OV_ASSERT_NO_THROW(m_testFunction(core)); }); index++; } while (duration_cast(high_resolution_clock::now() - start).count() < TEST_DURATION_MS); @@ -2229,7 +2230,7 @@ TEST_P(CachingTest, LoadMultiWithConfig_race) { EXPECT_CALL(*mockPlugin, import_model(_, _, _)).Times(0); EXPECT_CALL(*mockPlugin, import_model(_, _)).Times(devCount - 1); testLoad([&](ov::Core& core) { - ASSERT_NO_THROW(m_testFunctionWithCfg(core, {{ov::cache_dir.name(), cacheDir}})); + OV_ASSERT_NO_THROW(m_testFunctionWithCfg(core, {{ov::cache_dir.name(), cacheDir}})); }); index++; } while (duration_cast(high_resolution_clock::now() - start).count() < TEST_DURATION_MS); diff --git a/src/inference/tests/functional/get_supported_property_test.cpp b/src/inference/tests/functional/get_supported_property_test.cpp index 2c47d24141862d..8d5638162ee660 100644 --- a/src/inference/tests/functional/get_supported_property_test.cpp +++ b/src/inference/tests/functional/get_supported_property_test.cpp @@ -7,6 +7,7 @@ #include #include "common_test_utils/file_utils.hpp" +#include "common_test_utils/test_assertions.hpp" #include "openvino/openvino.hpp" #include "openvino/runtime/internal_properties.hpp" #include "openvino/runtime/iplugin.hpp" @@ -119,8 +120,8 @@ TEST(PropertyTest, SetCacheDirPropertyCoreNoThrow) { // Cache_dir property test ov::Any value; - ASSERT_NO_THROW(core.set_property(ov::cache_dir("./tmp_cache_dir"))); - ASSERT_NO_THROW(value = core.get_property(ov::cache_dir.name())); + OV_ASSERT_NO_THROW(core.set_property(ov::cache_dir("./tmp_cache_dir"))); + OV_ASSERT_NO_THROW(value = core.get_property(ov::cache_dir.name())); EXPECT_EQ(value.as(), std::string("./tmp_cache_dir")); } @@ -128,11 +129,11 @@ TEST(PropertyTest, SetTBBForceTerminatePropertyCoreNoThrow) { ov::Core core; bool value = true; - ASSERT_NO_THROW(core.set_property(ov::force_tbb_terminate(false))); - ASSERT_NO_THROW(value = core.get_property(ov::force_tbb_terminate.name()).as()); + OV_ASSERT_NO_THROW(core.set_property(ov::force_tbb_terminate(false))); + OV_ASSERT_NO_THROW(value = core.get_property(ov::force_tbb_terminate.name()).as()); EXPECT_FALSE(value); - ASSERT_NO_THROW(core.set_property(ov::force_tbb_terminate(true))); - ASSERT_NO_THROW(value = core.get_property(ov::force_tbb_terminate.name()).as()); + OV_ASSERT_NO_THROW(core.set_property(ov::force_tbb_terminate(true))); + OV_ASSERT_NO_THROW(value = core.get_property(ov::force_tbb_terminate.name()).as()); EXPECT_TRUE(value); } diff --git a/src/inference/tests/functional/matmul_sr_tests.cpp b/src/inference/tests/functional/matmul_sr_tests.cpp index 152967967990b2..8e980ac0743474 100644 --- a/src/inference/tests/functional/matmul_sr_tests.cpp +++ b/src/inference/tests/functional/matmul_sr_tests.cpp @@ -105,7 +105,7 @@ class SmartReshapeMatMulTests : public ov::test::TestsCommon, ov::ResultVector results = {result}; model = std::make_shared(results, params); } - ASSERT_NO_THROW(model->reshape(test_case.new_shapes)); + OV_ASSERT_NO_THROW(model->reshape(test_case.new_shapes)); } }; @@ -144,7 +144,7 @@ TEST(SmartReshapeTransposeMatMulTests, TransposeAMatMulFuse) { m.register_pass(); m.register_pass(); m.run_passes(f); - ASSERT_NO_THROW(check_rt_info(f)); + OV_ASSERT_NO_THROW(check_rt_info(f)); } { auto data_A = std::make_shared(ov::element::f32, ov::Shape{1, 3, 2}); @@ -171,7 +171,7 @@ TEST(SmartReshapeTransposeMatMulTests, TransposeBMatMulFuse) { m.register_pass(); m.register_pass(); m.run_passes(f); - ASSERT_NO_THROW(check_rt_info(f)); + OV_ASSERT_NO_THROW(check_rt_info(f)); } { auto data_A = std::make_shared(ov::element::f32, ov::Shape{1, 2, 3}); @@ -198,7 +198,7 @@ TEST(SmartReshapeTransposeMatMulTests, TransposeAMatMulWithAttrFuse) { m.register_pass(); m.register_pass(); m.run_passes(f); - ASSERT_NO_THROW(check_rt_info(f)); + OV_ASSERT_NO_THROW(check_rt_info(f)); } { auto data_A = std::make_shared(ov::element::f32, ov::Shape{1, 2, 3}); @@ -225,7 +225,7 @@ TEST(SmartReshapeTransposeMatMulTests, TransposeBMatMulWithAttrFuse) { m.register_pass(); m.register_pass(); m.run_passes(f); - ASSERT_NO_THROW(check_rt_info(f)); + OV_ASSERT_NO_THROW(check_rt_info(f)); } { auto data_A = std::make_shared(ov::element::f32, ov::Shape{1, 2, 3}); @@ -251,7 +251,7 @@ TEST(SmartReshapeTransposeMatMulTests, TransposeAMatMulSideAttrFuse) { m.register_pass(); m.register_pass(); m.run_passes(f); - ASSERT_NO_THROW(check_rt_info(f)); + OV_ASSERT_NO_THROW(check_rt_info(f)); } { auto data_A = std::make_shared(ov::element::f32, ov::Shape{1, 2, 3}); @@ -278,7 +278,7 @@ TEST(SmartReshapeTransposeMatMulTests, TransposeBMatMulSideAttrFuse) { m.register_pass(); m.register_pass(); m.run_passes(f); - ASSERT_NO_THROW(check_rt_info(f)); + OV_ASSERT_NO_THROW(check_rt_info(f)); } { auto data_A = std::make_shared(ov::element::f32, ov::Shape{1, 3, 2}); @@ -306,7 +306,7 @@ TEST(SmartReshapeTransposeMatMulTests, TransposeBothMatMulFuse) { m.register_pass(); m.register_pass(); m.run_passes(f); - ASSERT_NO_THROW(check_rt_info(f)); + OV_ASSERT_NO_THROW(check_rt_info(f)); } { auto data_A = std::make_shared(ov::element::f32, ov::Shape{1, 3, 2}); @@ -341,7 +341,7 @@ TEST(SmartReshapeTransposeMatMulTests, TransposeBothMatMulWithAttrFuse) { m.register_pass(); m.register_pass(); m.run_passes(f); - ASSERT_NO_THROW(check_rt_info(f)); + OV_ASSERT_NO_THROW(check_rt_info(f)); } { auto data_A = std::make_shared(ov::element::f32, ov::Shape{2, 3, 2}); diff --git a/src/inference/tests/functional/ov_register_plugin_test.cpp b/src/inference/tests/functional/ov_register_plugin_test.cpp index c494eb14e4d9a0..23da4f2b44ade1 100644 --- a/src/inference/tests/functional/ov_register_plugin_test.cpp +++ b/src/inference/tests/functional/ov_register_plugin_test.cpp @@ -3,6 +3,7 @@ #include #include "common_test_utils/file_utils.hpp" +#include "common_test_utils/test_assertions.hpp" #include "common_test_utils/unicode_utils.hpp" #include "openvino/openvino.hpp" #include "openvino/runtime/iplugin.hpp" @@ -43,7 +44,7 @@ TEST(RegisterPluginTests, getVersionforRegisteredPluginThrows) { mockPlugin(core, base_plugin, m_so); std::string mock_plugin_name{"MOCK_REGISTERED_HARDWARE"}; // Registered plugin with invalid so here - ASSERT_NO_THROW(core.register_plugin( + OV_ASSERT_NO_THROW(core.register_plugin( ov::util::make_plugin_library_name(ov::test::utils::getExecutableDirectory(), std::string("mock_registered_engine") + OV_BUILD_POSTFIX), mock_plugin_name)); @@ -53,7 +54,7 @@ TEST(RegisterPluginTests, getVersionforRegisteredPluginThrows) { TEST(RegisterPluginTests, getVersionforNoRegisteredPluginNoThrows) { ov::Core core; std::map versions; - ASSERT_NO_THROW(versions = core.get_versions("unkown_device")); + OV_ASSERT_NO_THROW(versions = core.get_versions("unkown_device")); ASSERT_TRUE(versions.empty()); auto plugin = std::make_shared>(); @@ -72,11 +73,11 @@ TEST(RegisterPluginTests, getVersionforNoRegisteredPluginNoThrows) { std::string mock_plugin_name{"MOCK_HARDWARE"}; - ASSERT_NO_THROW( + OV_ASSERT_NO_THROW( core.register_plugin(ov::util::make_plugin_library_name(ov::test::utils::getExecutableDirectory(), std::string("mock_engine") + OV_BUILD_POSTFIX), mock_plugin_name)); - ASSERT_NO_THROW(core.get_versions("MOCK_HARDWARE")); + OV_ASSERT_NO_THROW(core.get_versions("MOCK_HARDWARE")); } TEST(RegisterPluginTests, registerNewPluginNoThrows) { @@ -87,11 +88,11 @@ TEST(RegisterPluginTests, registerNewPluginNoThrows) { mockPlugin(core, base_plugin, m_so); std::string mock_plugin_name{"MOCK_HARDWARE"}; - ASSERT_NO_THROW( + OV_ASSERT_NO_THROW( core.register_plugin(ov::util::make_plugin_library_name(ov::test::utils::getExecutableDirectory(), std::string("mock_engine") + OV_BUILD_POSTFIX), mock_plugin_name)); - ASSERT_NO_THROW(core.get_property(mock_plugin_name, ov::supported_properties)); + OV_ASSERT_NO_THROW(core.get_property(mock_plugin_name, ov::supported_properties)); core.unload_plugin(mock_plugin_name); } @@ -104,7 +105,7 @@ TEST(RegisterPluginTests, registerExistingPluginThrows) { mockPlugin(core, base_plugin, m_so); std::string mock_plugin_name{"MOCK_HARDWARE"}; - ASSERT_NO_THROW( + OV_ASSERT_NO_THROW( core.register_plugin(ov::util::make_plugin_library_name(ov::test::utils::getExecutableDirectory(), std::string("mock_engine") + OV_BUILD_POSTFIX), mock_plugin_name)); @@ -128,7 +129,7 @@ inline std::string getPluginFile() { TEST(RegisterPluginTests, smoke_createMockEngineConfigNoThrows) { const std::string filename = getPluginFile(); - ASSERT_NO_THROW(ov::Core core(filename)); + OV_ASSERT_NO_THROW(ov::Core core(filename)); ov::test::utils::removeFile(filename.c_str()); } @@ -173,11 +174,11 @@ TEST(RegisterPluginTests, accessToUnregisteredPluginThrows) { std::vector devices = core.get_available_devices(); for (auto&& device : devices) { - ASSERT_NO_THROW(core.get_versions(device)); - ASSERT_NO_THROW(core.unload_plugin(device)); - ASSERT_NO_THROW(core.set_property(device, ov::AnyMap{})); - ASSERT_NO_THROW(core.get_versions(device)); - ASSERT_NO_THROW(core.unload_plugin(device)); + OV_ASSERT_NO_THROW(core.get_versions(device)); + OV_ASSERT_NO_THROW(core.unload_plugin(device)); + OV_ASSERT_NO_THROW(core.set_property(device, ov::AnyMap{})); + OV_ASSERT_NO_THROW(core.get_versions(device)); + OV_ASSERT_NO_THROW(core.unload_plugin(device)); } } @@ -203,12 +204,12 @@ TEST(RegisterPluginTests, registerPluginsXMLUnicodePath) { ov::Core core; GTEST_COUT << "Core created " << testIndex << std::endl; - ASSERT_NO_THROW(core.register_plugins(::ov::util::wstring_to_string(pluginsXmlW))); + OV_ASSERT_NO_THROW(core.register_plugins(::ov::util::wstring_to_string(pluginsXmlW))); ov::test::utils::removeFile(pluginsXmlW); - ASSERT_NO_THROW(core.get_versions("mock")); // from pluginXML + OV_ASSERT_NO_THROW(core.get_versions("mock")); // from pluginXML std::vector devices = core.get_available_devices(); - ASSERT_NO_THROW(core.get_versions(devices.at(0))); + OV_ASSERT_NO_THROW(core.get_versions(devices.at(0))); GTEST_COUT << "Plugin created " << testIndex << std::endl; GTEST_COUT << "OK" << std::endl; diff --git a/src/inference/tests/functional/task_executor_tests.cpp b/src/inference/tests/functional/task_executor_tests.cpp index b904da9ec1f378..ca8fc92825e920 100644 --- a/src/inference/tests/functional/task_executor_tests.cpp +++ b/src/inference/tests/functional/task_executor_tests.cpp @@ -7,6 +7,7 @@ #include #include +#include "common_test_utils/test_assertions.hpp" #include "openvino/core/parallel.hpp" #include "openvino/runtime/threading/cpu_streams_executor.hpp" #include "openvino/runtime/threading/immediate_executor.hpp" @@ -44,7 +45,7 @@ TEST_P(TaskExecutorTests, canRunCustomFunction) { i++; }); f.wait(); - ASSERT_NO_THROW(f.get()); + OV_ASSERT_NO_THROW(f.get()); } TEST_P(TaskExecutorTests, canRun2FunctionsOneByOne) { @@ -56,13 +57,13 @@ TEST_P(TaskExecutorTests, canRun2FunctionsOneByOne) { i += 1; }); f1.wait(); - ASSERT_NO_THROW(f1.get()); + OV_ASSERT_NO_THROW(f1.get()); auto f2 = async(taskExecutor, [&]() { std::unique_lock l{m}; i *= 2; }); f2.wait(); - ASSERT_NO_THROW(f2.get()); + OV_ASSERT_NO_THROW(f2.get()); ASSERT_EQ(i, 2); } @@ -119,7 +120,7 @@ TEST_P(TaskExecutorTests, canRunMultipleTasksFromMultipleThreads) { for (auto&& f : futures) f.wait(); for (auto&& f : futures) - ASSERT_NO_THROW(f.get()); + OV_ASSERT_NO_THROW(f.get()); ASSERT_EQ(THREAD_NUMBER * NUM_INTERNAL_ITERATIONS, sharedVar); for (auto&& thread : threads) if (thread.joinable()) diff --git a/src/inference/tests/unit/compiled_model_test.cpp b/src/inference/tests/unit/compiled_model_test.cpp index 55e08444037a54..b149a16e14d45d 100644 --- a/src/inference/tests/unit/compiled_model_test.cpp +++ b/src/inference/tests/unit/compiled_model_test.cpp @@ -87,7 +87,7 @@ TEST_F(CompiledModelTests, GetOutputsThrowsIfReturnErr) { TEST_F(CompiledModelTests, GetOutputs) { std::vector> data; EXPECT_CALL(*mock_compiled_model.get(), outputs()).Times(1).WillOnce(ReturnRefOfCopy(model->outputs())); - ASSERT_NO_THROW(data = compiled_model.outputs()); + OV_ASSERT_NO_THROW(data = compiled_model.outputs()); ASSERT_EQ(data, model->outputs()); } @@ -101,7 +101,7 @@ TEST_F(CompiledModelTests, GetInputs) { EXPECT_CALL(*mock_compiled_model.get(), inputs()).Times(1).WillOnce(ReturnRefOfCopy(model->inputs())); std::vector> info; - ASSERT_NO_THROW(info = compiled_model.inputs()); + OV_ASSERT_NO_THROW(info = compiled_model.inputs()); ASSERT_EQ(info, model->inputs()); } @@ -118,7 +118,7 @@ class CompiledModelWithIInferReqTests : public CompiledModelTests { TEST_F(CompiledModelWithIInferReqTests, CanCreateInferRequest) { EXPECT_CALL(*mock_compiled_model.get(), create_infer_request()).WillOnce(Return(mock_infer_request)); ov::InferRequest actualInferReq; - ASSERT_NO_THROW(actualInferReq = compiled_model.create_infer_request()); + OV_ASSERT_NO_THROW(actualInferReq = compiled_model.create_infer_request()); } TEST_F(CompiledModelWithIInferReqTests, CreateInferRequestThrowsIfReturnNotOK) { @@ -129,7 +129,7 @@ TEST_F(CompiledModelWithIInferReqTests, CreateInferRequestThrowsIfReturnNotOK) { TEST_F(CompiledModelWithIInferReqTests, QueryStateThrowsIfReturnErr) { EXPECT_CALL(*mock_compiled_model.get(), create_infer_request()).WillOnce(Return(mock_infer_request)); ov::InferRequest actualInferReq; - ASSERT_NO_THROW(actualInferReq = compiled_model.create_infer_request()); + OV_ASSERT_NO_THROW(actualInferReq = compiled_model.create_infer_request()); EXPECT_CALL(*mock_infer_request.get(), query_state()).Times(1).WillOnce(Throw(std::runtime_error{""})); EXPECT_THROW(actualInferReq.query_state(), std::runtime_error); } @@ -137,7 +137,7 @@ TEST_F(CompiledModelWithIInferReqTests, QueryStateThrowsIfReturnErr) { TEST_F(CompiledModelWithIInferReqTests, QueryState) { EXPECT_CALL(*mock_compiled_model.get(), create_infer_request()).WillOnce(Return(mock_infer_request)); ov::InferRequest actualInferReq; - ASSERT_NO_THROW(actualInferReq = compiled_model.create_infer_request()); + OV_ASSERT_NO_THROW(actualInferReq = compiled_model.create_infer_request()); ov::SoPtr state = std::make_shared(); EXPECT_CALL(*mock_infer_request.get(), query_state()) .Times(1) @@ -165,7 +165,7 @@ class CompiledModelBaseTests : public ::testing::Test { TEST_F(CompiledModelBaseTests, canForwardCreateInferRequest) { auto inferReqInternal = std::make_shared(); EXPECT_CALL(*mock_compiled_model.get(), create_infer_request()).Times(1).WillRepeatedly(Return(inferReqInternal)); - ASSERT_NO_THROW(compiled_model.create_infer_request()); + OV_ASSERT_NO_THROW(compiled_model.create_infer_request()); } TEST_F(CompiledModelBaseTests, canReportErrorInCreateInferRequest) { diff --git a/src/inference/tests/unit/infer_request_test.cpp b/src/inference/tests/unit/infer_request_test.cpp index 0157d55b101e3e..2c879189edcc3b 100644 --- a/src/inference/tests/unit/infer_request_test.cpp +++ b/src/inference/tests/unit/infer_request_test.cpp @@ -48,7 +48,7 @@ class OVInferRequestBaseTests : public ::testing::Test { // start_async TEST_F(OVInferRequestBaseTests, canForwardStartAsync) { EXPECT_CALL(*mock_impl.get(), start_async()).Times(1); - ASSERT_NO_THROW(request.start_async()); + OV_ASSERT_NO_THROW(request.start_async()); } TEST_F(OVInferRequestBaseTests, canReportErrorInStartAsync) { @@ -59,7 +59,7 @@ TEST_F(OVInferRequestBaseTests, canReportErrorInStartAsync) { // wait TEST_F(OVInferRequestBaseTests, canForwardWait) { EXPECT_CALL(*mock_impl.get(), wait()).WillOnce(Return()); - ASSERT_NO_THROW(request.wait()); + OV_ASSERT_NO_THROW(request.wait()); } TEST_F(OVInferRequestBaseTests, canReportErrorInWait) { @@ -70,7 +70,7 @@ TEST_F(OVInferRequestBaseTests, canReportErrorInWait) { // Infer TEST_F(OVInferRequestBaseTests, canForwardInfer) { EXPECT_CALL(*mock_impl.get(), infer()).Times(1); - ASSERT_NO_THROW(request.infer()); + OV_ASSERT_NO_THROW(request.infer()); } TEST_F(OVInferRequestBaseTests, canReportErrorInInfer) { @@ -82,7 +82,7 @@ TEST_F(OVInferRequestBaseTests, canReportErrorInInfer) { TEST_F(OVInferRequestBaseTests, canForwardGetPerformanceCounts) { std::vector info; EXPECT_CALL(*mock_impl.get(), get_profiling_info()).WillOnce(Return(std::vector{})); - ASSERT_NO_THROW(request.get_profiling_info()); + OV_ASSERT_NO_THROW(request.get_profiling_info()); } TEST_F(OVInferRequestBaseTests, canReportErrorInGetPerformanceCounts) { @@ -101,7 +101,7 @@ TEST_F(OVInferRequestBaseTests, canForwardGetTensor) { EXPECT_CALL(*mock_impl.get(), get_outputs()).WillOnce(ReturnRef(inputs)); EXPECT_CALL(*mock_impl.get(), get_tensors(_)).WillOnce(Return(std::vector>{})); EXPECT_CALL(*mock_impl.get(), get_tensor(_)).WillOnce(Return(ov::make_tensor(ov::element::f32, {1, 2, 3, 3}))); - ASSERT_NO_THROW(request.get_tensor("test_name")); + OV_ASSERT_NO_THROW(request.get_tensor("test_name")); } TEST_F(OVInferRequestBaseTests, canReportErrorInGetTensor) { @@ -124,7 +124,7 @@ TEST_F(OVInferRequestBaseTests, canForwardSetTensor) { EXPECT_CALL(*mock_impl.get(), get_inputs()).WillOnce(ReturnRef(inputs)); EXPECT_CALL(*mock_impl.get(), get_outputs()).WillOnce(ReturnRef(inputs)); EXPECT_CALL(*mock_impl.get(), set_tensor(_, _)).Times(1); - ASSERT_NO_THROW(request.set_tensor("test_name", data)); + OV_ASSERT_NO_THROW(request.set_tensor("test_name", data)); } TEST_F(OVInferRequestBaseTests, canReportErrorInSetTensor) { @@ -141,7 +141,7 @@ TEST_F(OVInferRequestBaseTests, canReportErrorInSetTensor) { // set_callback TEST_F(OVInferRequestBaseTests, canForwardSetCompletionCallback) { EXPECT_CALL(*mock_impl.get(), set_callback(_)).Times(1); - ASSERT_NO_THROW(request.set_callback(nullptr)); + OV_ASSERT_NO_THROW(request.set_callback(nullptr)); } TEST_F(OVInferRequestBaseTests, canReportErrorInSetCompletionCallback) { diff --git a/src/plugins/auto/tests/functional/behavior/async_compiled_for_multiple_device_test.cpp b/src/plugins/auto/tests/functional/behavior/async_compiled_for_multiple_device_test.cpp index 103fb358f91b22..083de1467f3a9f 100644 --- a/src/plugins/auto/tests/functional/behavior/async_compiled_for_multiple_device_test.cpp +++ b/src/plugins/auto/tests/functional/behavior/async_compiled_for_multiple_device_test.cpp @@ -15,8 +15,8 @@ using namespace ov::auto_plugin::tests; #ifdef ENABLETESTTHREADING TEST_F(AutoFuncTests, can_compile_with_multiple_devices) { ov::CompiledModel compiled_model; - ASSERT_NO_THROW(compiled_model = - core.compile_model(model_can_batch, "AUTO", {ov::device::priorities("MOCK_GPU", "MOCK_CPU")})); + OV_ASSERT_NO_THROW( + compiled_model = core.compile_model(model_can_batch, "AUTO", {ov::device::priorities("MOCK_GPU", "MOCK_CPU")})); compiled_model = core.compile_model(model_can_batch, "AUTO", {ov::device::priorities("MOCK_GPU", "MOCK_CPU"), diff --git a/src/plugins/auto/tests/functional/behavior/auto_func_test.hpp b/src/plugins/auto/tests/functional/behavior/auto_func_test.hpp index 711355315b4516..69ab036f22e0af 100644 --- a/src/plugins/auto/tests/functional/behavior/auto_func_test.hpp +++ b/src/plugins/auto/tests/functional/behavior/auto_func_test.hpp @@ -9,6 +9,7 @@ #include #include +#include "common_test_utils/test_assertions.hpp" #include "openvino/runtime/auto/properties.hpp" #include "openvino/runtime/core.hpp" #include "openvino/runtime/iplugin.hpp" diff --git a/src/plugins/auto/tests/functional/behavior/callback_test.cpp b/src/plugins/auto/tests/functional/behavior/callback_test.cpp index e56c117285d79a..e1280c718116cf 100644 --- a/src/plugins/auto/tests/functional/behavior/callback_test.cpp +++ b/src/plugins/auto/tests/functional/behavior/callback_test.cpp @@ -11,74 +11,74 @@ using namespace ov::auto_plugin::tests; TEST_F(AutoFuncTests, can_infer_with_cpu_help) { ov::CompiledModel compiled_model; - ASSERT_NO_THROW(compiled_model = - core.compile_model(model_can_batch, "AUTO", {ov::device::priorities("MOCK_GPU", "MOCK_CPU")})); + OV_ASSERT_NO_THROW( + compiled_model = core.compile_model(model_can_batch, "AUTO", {ov::device::priorities("MOCK_GPU", "MOCK_CPU")})); auto req = compiled_model.create_infer_request(); bool is_called = false; - ASSERT_NO_THROW(req.set_callback([&](std::exception_ptr exception_ptr) { + OV_ASSERT_NO_THROW(req.set_callback([&](std::exception_ptr exception_ptr) { // HSD_1805940120: Wait on starting callback return HDDL_ERROR_INVAL_TASK_HANDLE ASSERT_EQ(exception_ptr, nullptr); is_called = true; })); - ASSERT_NO_THROW(req.start_async()); - ASSERT_NO_THROW(req.wait()); + OV_ASSERT_NO_THROW(req.start_async()); + OV_ASSERT_NO_THROW(req.wait()); ASSERT_TRUE(is_called); } TEST_F(AutoFuncTests, impl_does_not_copy_callback) { ov::CompiledModel compiled_model; - ASSERT_NO_THROW(compiled_model = - core.compile_model(model_can_batch, "AUTO", {ov::device::priorities("MOCK_GPU", "MOCK_CPU")})); + OV_ASSERT_NO_THROW( + compiled_model = core.compile_model(model_can_batch, "AUTO", {ov::device::priorities("MOCK_GPU", "MOCK_CPU")})); ov::InferRequest req; - ASSERT_NO_THROW(req = compiled_model.create_infer_request()); + OV_ASSERT_NO_THROW(req = compiled_model.create_infer_request()); { auto somePtr = std::make_shared(42); - ASSERT_NO_THROW(req.set_callback([somePtr](std::exception_ptr exception_ptr) { + OV_ASSERT_NO_THROW(req.set_callback([somePtr](std::exception_ptr exception_ptr) { ASSERT_EQ(nullptr, exception_ptr); ASSERT_EQ(1, somePtr.use_count()); })); } - ASSERT_NO_THROW(req.start_async()); - ASSERT_NO_THROW(req.wait()); + OV_ASSERT_NO_THROW(req.start_async()); + OV_ASSERT_NO_THROW(req.wait()); } TEST_F(AutoFuncTests, return_result_not_ready) { ov::CompiledModel compiled_model; - ASSERT_NO_THROW(compiled_model = - core.compile_model(model_can_batch, "AUTO", {ov::device::priorities("MOCK_GPU", "MOCK_CPU")})); + OV_ASSERT_NO_THROW( + compiled_model = core.compile_model(model_can_batch, "AUTO", {ov::device::priorities("MOCK_GPU", "MOCK_CPU")})); ov::InferRequest req; - ASSERT_NO_THROW(req = compiled_model.create_infer_request()); + OV_ASSERT_NO_THROW(req = compiled_model.create_infer_request()); std::promise callbackTimeStamp; auto callbackTimeStampFuture = callbackTimeStamp.get_future(); // add a callback to the request and capture the timestamp - ASSERT_NO_THROW(req.set_callback([&](std::exception_ptr exception_ptr) { + OV_ASSERT_NO_THROW(req.set_callback([&](std::exception_ptr exception_ptr) { if (exception_ptr) { callbackTimeStamp.set_exception(exception_ptr); } else { callbackTimeStamp.set_value(std::chrono::system_clock::now()); } })); - ASSERT_NO_THROW(req.start_async()); + OV_ASSERT_NO_THROW(req.start_async()); bool ready = false; - ASSERT_NO_THROW(ready = req.wait_for({})); + OV_ASSERT_NO_THROW(ready = req.wait_for({})); // get timestamp taken AFTER return from the wait(STATUS_ONLY) const auto afterWaitTimeStamp = std::chrono::system_clock::now(); if (afterWaitTimeStamp < callbackTimeStampFuture.get()) { ASSERT_FALSE(ready); } - ASSERT_NO_THROW(req.wait()); + OV_ASSERT_NO_THROW(req.wait()); } TEST_F(AutoFuncTests, rethrow_if_callback_throw) { ov::CompiledModel compiled_model; - ASSERT_NO_THROW(compiled_model = - core.compile_model(model_can_batch, "AUTO", {ov::device::priorities("MOCK_GPU", "MOCK_CPU")})); + OV_ASSERT_NO_THROW( + compiled_model = core.compile_model(model_can_batch, "AUTO", {ov::device::priorities("MOCK_GPU", "MOCK_CPU")})); ov::InferRequest req; - ASSERT_NO_THROW(req = compiled_model.create_infer_request()); - ASSERT_NO_THROW(req.set_callback([](std::exception_ptr) { + OV_ASSERT_NO_THROW(req = compiled_model.create_infer_request()); + OV_ASSERT_NO_THROW(req.set_callback([](std::exception_ptr) { OPENVINO_THROW("Throw"); })); - ASSERT_NO_THROW(req.start_async()); + OV_ASSERT_NO_THROW(req.start_async()); ASSERT_THROW(req.wait(), ov::Exception); } @@ -90,11 +90,11 @@ TEST_F(AutoFuncTests, can_start_several_async_inside_completion_callback_with_sa }; TestUserData data; ov::CompiledModel compiled_model; - ASSERT_NO_THROW(compiled_model = - core.compile_model(model_can_batch, "AUTO", {ov::device::priorities("MOCK_GPU", "MOCK_CPU")})); + OV_ASSERT_NO_THROW( + compiled_model = core.compile_model(model_can_batch, "AUTO", {ov::device::priorities("MOCK_GPU", "MOCK_CPU")})); ov::InferRequest req; - ASSERT_NO_THROW(req = compiled_model.create_infer_request()); - ASSERT_NO_THROW(req.set_callback([&](std::exception_ptr exception_ptr) { + OV_ASSERT_NO_THROW(req = compiled_model.create_infer_request()); + OV_ASSERT_NO_THROW(req.set_callback([&](std::exception_ptr exception_ptr) { if (exception_ptr) { data.promise.set_exception(exception_ptr); } else { @@ -106,8 +106,8 @@ TEST_F(AutoFuncTests, can_start_several_async_inside_completion_callback_with_sa } })); auto future = data.promise.get_future(); - ASSERT_NO_THROW(req.start_async()); - ASSERT_NO_THROW(req.wait()); + OV_ASSERT_NO_THROW(req.start_async()); + OV_ASSERT_NO_THROW(req.wait()); future.wait(); auto callbackStatus = future.get(); ASSERT_TRUE(callbackStatus); diff --git a/src/plugins/auto/tests/functional/behavior/infer_multi_threading_tests.cpp b/src/plugins/auto/tests/functional/behavior/infer_multi_threading_tests.cpp index 65940e4db009e1..9eb9e8b079a315 100644 --- a/src/plugins/auto/tests/functional/behavior/infer_multi_threading_tests.cpp +++ b/src/plugins/auto/tests/functional/behavior/infer_multi_threading_tests.cpp @@ -10,15 +10,15 @@ using namespace ov::auto_plugin::tests; TEST_F(AutoFuncTests, can_run_3syncrequests_consistently_from_threads) { ov::CompiledModel compiled_model; - ASSERT_NO_THROW(compiled_model = core.compile_model( - model_can_batch, - "AUTO", - {ov::device::priorities("MOCK_GPU", "MOCK_CPU"), - ov::hint::performance_mode(ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT)})); + OV_ASSERT_NO_THROW(compiled_model = core.compile_model( + model_can_batch, + "AUTO", + {ov::device::priorities("MOCK_GPU", "MOCK_CPU"), + ov::hint::performance_mode(ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT)})); ov::InferRequest req1, req2, req3; - ASSERT_NO_THROW(req1 = compiled_model.create_infer_request()); - ASSERT_NO_THROW(req2 = compiled_model.create_infer_request()); - ASSERT_NO_THROW(req3 = compiled_model.create_infer_request()); + OV_ASSERT_NO_THROW(req1 = compiled_model.create_infer_request()); + OV_ASSERT_NO_THROW(req2 = compiled_model.create_infer_request()); + OV_ASSERT_NO_THROW(req3 = compiled_model.create_infer_request()); auto f1 = std::async(std::launch::async, [&] { req1.infer(); }); @@ -33,25 +33,25 @@ TEST_F(AutoFuncTests, can_run_3syncrequests_consistently_from_threads) { f2.wait(); f3.wait(); - ASSERT_NO_THROW(f1.get()); - ASSERT_NO_THROW(f2.get()); - ASSERT_NO_THROW(f3.get()); + OV_ASSERT_NO_THROW(f1.get()); + OV_ASSERT_NO_THROW(f2.get()); + OV_ASSERT_NO_THROW(f3.get()); } TEST_F(AutoFuncTests, can_run_3asyncrequests_consistently_from_threads_without_wait) { ov::CompiledModel compiled_model; - ASSERT_NO_THROW(compiled_model = core.compile_model( - model_can_batch, - "AUTO", - {ov::device::priorities("MOCK_GPU", "MOCK_CPU"), - ov::hint::performance_mode(ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT)})); + OV_ASSERT_NO_THROW(compiled_model = core.compile_model( + model_can_batch, + "AUTO", + {ov::device::priorities("MOCK_GPU", "MOCK_CPU"), + ov::hint::performance_mode(ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT)})); ov::InferRequest req1, req2, req3; - ASSERT_NO_THROW(req1 = compiled_model.create_infer_request()); - ASSERT_NO_THROW(req2 = compiled_model.create_infer_request()); - ASSERT_NO_THROW(req3 = compiled_model.create_infer_request()); - ASSERT_NO_THROW(req1.infer()); - ASSERT_NO_THROW(req2.infer()); - ASSERT_NO_THROW(req3.infer()); + OV_ASSERT_NO_THROW(req1 = compiled_model.create_infer_request()); + OV_ASSERT_NO_THROW(req2 = compiled_model.create_infer_request()); + OV_ASSERT_NO_THROW(req3 = compiled_model.create_infer_request()); + OV_ASSERT_NO_THROW(req1.infer()); + OV_ASSERT_NO_THROW(req2.infer()); + OV_ASSERT_NO_THROW(req3.infer()); auto f1 = std::async(std::launch::async, [&] { req1.start_async(); @@ -67,48 +67,48 @@ TEST_F(AutoFuncTests, can_run_3asyncrequests_consistently_from_threads_without_w f2.wait(); f3.wait(); - ASSERT_NO_THROW(f1.get()); - ASSERT_NO_THROW(f2.get()); - ASSERT_NO_THROW(f3.get()); + OV_ASSERT_NO_THROW(f1.get()); + OV_ASSERT_NO_THROW(f2.get()); + OV_ASSERT_NO_THROW(f3.get()); } TEST_F(AutoFuncTests, can_run_3asyncrequests_consistently_with_wait) { ov::CompiledModel compiled_model; - ASSERT_NO_THROW(compiled_model = core.compile_model( - model_can_batch, - "AUTO", - {ov::device::priorities("MOCK_GPU", "MOCK_CPU"), - ov::hint::performance_mode(ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT)})); + OV_ASSERT_NO_THROW(compiled_model = core.compile_model( + model_can_batch, + "AUTO", + {ov::device::priorities("MOCK_GPU", "MOCK_CPU"), + ov::hint::performance_mode(ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT)})); ov::InferRequest req1, req2, req3; - ASSERT_NO_THROW(req1 = compiled_model.create_infer_request()); - ASSERT_NO_THROW(req2 = compiled_model.create_infer_request()); - ASSERT_NO_THROW(req3 = compiled_model.create_infer_request()); + OV_ASSERT_NO_THROW(req1 = compiled_model.create_infer_request()); + OV_ASSERT_NO_THROW(req2 = compiled_model.create_infer_request()); + OV_ASSERT_NO_THROW(req3 = compiled_model.create_infer_request()); req1.start_async(); - ASSERT_NO_THROW(req1.wait()); + OV_ASSERT_NO_THROW(req1.wait()); req2.start_async(); - ASSERT_NO_THROW(req2.wait()); + OV_ASSERT_NO_THROW(req2.wait()); req3.start_async(); - ASSERT_NO_THROW(req3.wait()); + OV_ASSERT_NO_THROW(req3.wait()); } TEST_F(AutoFuncTests, can_run_3asyncrequests_parallel_with_wait) { ov::CompiledModel compiled_model; - ASSERT_NO_THROW(compiled_model = core.compile_model( - model_can_batch, - "AUTO", - {ov::device::priorities("MOCK_GPU", "MOCK_CPU"), - ov::hint::performance_mode(ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT)})); + OV_ASSERT_NO_THROW(compiled_model = core.compile_model( + model_can_batch, + "AUTO", + {ov::device::priorities("MOCK_GPU", "MOCK_CPU"), + ov::hint::performance_mode(ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT)})); ov::InferRequest req1, req2, req3; - ASSERT_NO_THROW(req1 = compiled_model.create_infer_request()); - ASSERT_NO_THROW(req2 = compiled_model.create_infer_request()); - ASSERT_NO_THROW(req3 = compiled_model.create_infer_request()); + OV_ASSERT_NO_THROW(req1 = compiled_model.create_infer_request()); + OV_ASSERT_NO_THROW(req2 = compiled_model.create_infer_request()); + OV_ASSERT_NO_THROW(req3 = compiled_model.create_infer_request()); req1.start_async(); req2.start_async(); req3.start_async(); - ASSERT_NO_THROW(req2.wait()); - ASSERT_NO_THROW(req1.wait()); - ASSERT_NO_THROW(req3.wait()); + OV_ASSERT_NO_THROW(req2.wait()); + OV_ASSERT_NO_THROW(req1.wait()); + OV_ASSERT_NO_THROW(req3.wait()); } diff --git a/src/plugins/auto/tests/functional/behavior/infer_schedule_test.cpp b/src/plugins/auto/tests/functional/behavior/infer_schedule_test.cpp index d24099722da81d..d04c9de50edb71 100644 --- a/src/plugins/auto/tests/functional/behavior/infer_schedule_test.cpp +++ b/src/plugins/auto/tests/functional/behavior/infer_schedule_test.cpp @@ -39,36 +39,36 @@ class InferSchedulePolicyTest : public AutoFuncTests, public testing::WithParamI TEST_P(InferSchedulePolicyTest, can_run_async_requests_with_different_schedule_policy) { ov::CompiledModel compiled_model; property.emplace(ov::hint::performance_mode(ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT)); - ASSERT_NO_THROW(compiled_model = core.compile_model(model_cannot_batch, "AUTO", property)); + OV_ASSERT_NO_THROW(compiled_model = core.compile_model(model_cannot_batch, "AUTO", property)); std::vector inferReqsQueue; int count = niters; while (count--) { ov::InferRequest req; - ASSERT_NO_THROW(req = compiled_model.create_infer_request()); + OV_ASSERT_NO_THROW(req = compiled_model.create_infer_request()); inferReqsQueue.push_back(req); } for (auto& req : inferReqsQueue) { - ASSERT_NO_THROW(req.start_async()); + OV_ASSERT_NO_THROW(req.start_async()); } for (auto& req : inferReqsQueue) { - ASSERT_NO_THROW(req.wait()); + OV_ASSERT_NO_THROW(req.wait()); } } TEST_P(InferSchedulePolicyTest, can_run_sync_requests_with_different_schedule_policy) { ov::CompiledModel compiled_model; property.emplace(ov::hint::performance_mode(ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT)); - ASSERT_NO_THROW(compiled_model = core.compile_model(model_cannot_batch, "AUTO", property)); + OV_ASSERT_NO_THROW(compiled_model = core.compile_model(model_cannot_batch, "AUTO", property)); std::vector inferReqsQueue; int count = niters; while (count--) { ov::InferRequest req; - ASSERT_NO_THROW(req = compiled_model.create_infer_request()); + OV_ASSERT_NO_THROW(req = compiled_model.create_infer_request()); inferReqsQueue.push_back(req); } for (auto& req : inferReqsQueue) { - ASSERT_NO_THROW(req.infer()); - ASSERT_NO_THROW(req.wait()); + OV_ASSERT_NO_THROW(req.infer()); + OV_ASSERT_NO_THROW(req.wait()); } } diff --git a/src/plugins/auto/tests/functional/behavior/io_tensor.cpp b/src/plugins/auto/tests/functional/behavior/io_tensor.cpp index fdc40b1beb61a3..0fffe6ca4c4565 100644 --- a/src/plugins/auto/tests/functional/behavior/io_tensor.cpp +++ b/src/plugins/auto/tests/functional/behavior/io_tensor.cpp @@ -39,9 +39,9 @@ TEST_P(InferRequest_IOTensor_Test, can_set_and_get_input) { auto compiled_model = core.compile_model(model_cannot_batch, target_device, property); req = compiled_model.create_infer_request(); auto tensor = ov::test::utils::create_and_fill_tensor(input.get_element_type(), input.get_shape()); - ASSERT_NO_THROW(req.set_tensor(input, tensor)); + OV_ASSERT_NO_THROW(req.set_tensor(input, tensor)); ov::Tensor actual_tensor; - ASSERT_NO_THROW(actual_tensor = req.get_tensor(input)); + OV_ASSERT_NO_THROW(actual_tensor = req.get_tensor(input)); ASSERT_TRUE(actual_tensor); ASSERT_NE(nullptr, actual_tensor.data()); @@ -79,8 +79,8 @@ TEST_P(InferRequest_IOTensor_Test, second_call_get_input) { auto compiled_model = core.compile_model(model_cannot_batch, target_device, property); req = compiled_model.create_infer_request(); ov::Tensor tensor1, tensor2; - ASSERT_NO_THROW(tensor1 = req.get_tensor(input)); - ASSERT_NO_THROW(tensor2 = req.get_tensor(input)); + OV_ASSERT_NO_THROW(tensor1 = req.get_tensor(input)); + OV_ASSERT_NO_THROW(tensor2 = req.get_tensor(input)); ASSERT_EQ(tensor1.data(), tensor2.data()); } @@ -88,8 +88,8 @@ TEST_P(InferRequest_IOTensor_Test, second_call_get_output) { auto compiled_model = core.compile_model(model_cannot_batch, target_device, property); req = compiled_model.create_infer_request(); ov::Tensor tensor1, tensor2; - ASSERT_NO_THROW(tensor1 = req.get_tensor(output)); - ASSERT_NO_THROW(tensor2 = req.get_tensor(output)); + OV_ASSERT_NO_THROW(tensor1 = req.get_tensor(output)); + OV_ASSERT_NO_THROW(tensor2 = req.get_tensor(output)); ASSERT_EQ(tensor1.data(), tensor2.data()); } @@ -97,11 +97,11 @@ TEST_P(InferRequest_IOTensor_Test, second_call_get_input_after_async) { auto compiled_model = core.compile_model(model_cannot_batch, target_device, property); req = compiled_model.create_infer_request(); ov::Tensor tensor1, tensor2; - ASSERT_NO_THROW(req.infer()); - ASSERT_NO_THROW(tensor1 = req.get_tensor(input)); - ASSERT_NO_THROW(req.start_async()); - ASSERT_NO_THROW(req.wait()); - ASSERT_NO_THROW(tensor2 = req.get_tensor(input)); + OV_ASSERT_NO_THROW(req.infer()); + OV_ASSERT_NO_THROW(tensor1 = req.get_tensor(input)); + OV_ASSERT_NO_THROW(req.start_async()); + OV_ASSERT_NO_THROW(req.wait()); + OV_ASSERT_NO_THROW(tensor2 = req.get_tensor(input)); ASSERT_EQ(tensor1.data(), tensor2.data()); } @@ -109,11 +109,11 @@ TEST_P(InferRequest_IOTensor_Test, second_call_get_output_after_async) { auto compiled_model = core.compile_model(model_cannot_batch, target_device, property); req = compiled_model.create_infer_request(); ov::Tensor tensor1, tensor2; - ASSERT_NO_THROW(req.infer()); - ASSERT_NO_THROW(tensor1 = req.get_tensor(output)); - ASSERT_NO_THROW(req.start_async()); - ASSERT_NO_THROW(req.wait()); - ASSERT_NO_THROW(tensor2 = req.get_tensor(output)); + OV_ASSERT_NO_THROW(req.infer()); + OV_ASSERT_NO_THROW(tensor1 = req.get_tensor(output)); + OV_ASSERT_NO_THROW(req.start_async()); + OV_ASSERT_NO_THROW(req.wait()); + OV_ASSERT_NO_THROW(tensor2 = req.get_tensor(output)); ASSERT_EQ(tensor1.data(), tensor2.data()); } @@ -121,10 +121,10 @@ TEST_P(InferRequest_IOTensor_Test, can_infer_with_set_tensor) { auto compiled_model = core.compile_model(model_cannot_batch, target_device, property); req = compiled_model.create_infer_request(); auto input_tensor = ov::test::utils::create_and_fill_tensor(input.get_element_type(), input.get_shape()); - ASSERT_NO_THROW(req.set_tensor(input, input_tensor)); + OV_ASSERT_NO_THROW(req.set_tensor(input, input_tensor)); auto output_tensor = ov::test::utils::create_and_fill_tensor(output.get_element_type(), output.get_shape()); - ASSERT_NO_THROW(req.set_tensor(output, output_tensor)); - ASSERT_NO_THROW(req.infer()); + OV_ASSERT_NO_THROW(req.set_tensor(output, output_tensor)); + OV_ASSERT_NO_THROW(req.infer()); auto actual_input_tensor = req.get_tensor(input); ASSERT_EQ(actual_input_tensor.data(), input_tensor.data()); @@ -140,18 +140,18 @@ TEST_P(InferRequest_IOTensor_Test, can_infer_after_io_realloc) { auto out_shape = output.get_shape(); // imitates blob reallocation - ASSERT_NO_THROW(input_tensor = req.get_tensor(input)); - ASSERT_NO_THROW(input_tensor.set_shape({5, 5, 5, 5})); - ASSERT_NO_THROW(input_tensor.set_shape(in_shape)); - - ASSERT_NO_THROW(output_tensor = req.get_tensor(output)); - ASSERT_NO_THROW(output_tensor.set_shape({20, 20, 20, 20})); - ASSERT_NO_THROW(output_tensor.set_shape(out_shape)); - - ASSERT_NO_THROW(req.infer()); - ASSERT_NO_THROW(req.start_async()); - ASSERT_NO_THROW(req.wait()); - ASSERT_NO_THROW(req.get_tensor(output)); + OV_ASSERT_NO_THROW(input_tensor = req.get_tensor(input)); + OV_ASSERT_NO_THROW(input_tensor.set_shape({5, 5, 5, 5})); + OV_ASSERT_NO_THROW(input_tensor.set_shape(in_shape)); + + OV_ASSERT_NO_THROW(output_tensor = req.get_tensor(output)); + OV_ASSERT_NO_THROW(output_tensor.set_shape({20, 20, 20, 20})); + OV_ASSERT_NO_THROW(output_tensor.set_shape(out_shape)); + + OV_ASSERT_NO_THROW(req.infer()); + OV_ASSERT_NO_THROW(req.start_async()); + OV_ASSERT_NO_THROW(req.wait()); + OV_ASSERT_NO_THROW(req.get_tensor(output)); } namespace { auto props = []() { diff --git a/src/plugins/auto/tests/functional/behavior/remote_tensor_test.cpp b/src/plugins/auto/tests/functional/behavior/remote_tensor_test.cpp index fd6a3ec3a2055d..7f08174a6954ba 100644 --- a/src/plugins/auto/tests/functional/behavior/remote_tensor_test.cpp +++ b/src/plugins/auto/tests/functional/behavior/remote_tensor_test.cpp @@ -15,7 +15,7 @@ TEST_F(AutoFuncTests, can_create_remotetensor_then_infer_with_affinity) { auto inf_req_regular = compiled_model.create_infer_request(); inf_req_regular.set_tensor(input, fake_img_data); // infer using system memory - ASSERT_NO_THROW(inf_req_regular.infer()); + OV_ASSERT_NO_THROW(inf_req_regular.infer()); auto output_tensor_regular = inf_req_regular.get_tensor(output); auto cldnn_context = core.get_default_context("MOCK_GPU"); auto remote_tensor = cldnn_context.create_tensor(input->get_element_type(), input->get_shape()); @@ -23,7 +23,7 @@ TEST_F(AutoFuncTests, can_create_remotetensor_then_infer_with_affinity) { auto infer_req_remote = compiled_model.create_infer_request(); infer_req_remote.set_tensor(input, remote_tensor); // infer using remote tensor - ASSERT_NO_THROW(infer_req_remote.start_async()); + OV_ASSERT_NO_THROW(infer_req_remote.start_async()); // no actual inference for remote tensor, due to data not able to mmap infer_req_remote.wait(); } @@ -37,11 +37,11 @@ TEST_F(AutoFuncTests, cannot_infer_remote_if_not_initialized_for_device) { auto cldnn_context = core.get_default_context("MOCK_GPU"); auto input = model_cannot_batch->get_parameters().at(0); auto remote_tensor = cldnn_context.create_tensor(input->get_element_type(), input->get_shape()); - ASSERT_NO_THROW(compiled_model = - core.compile_model(model_cannot_batch, "MULTI", {ov::device::priorities("MOCK_3")})); + OV_ASSERT_NO_THROW(compiled_model = + core.compile_model(model_cannot_batch, "MULTI", {ov::device::priorities("MOCK_3")})); auto infer_req_remote = compiled_model.create_infer_request(); infer_req_remote.set_tensor(input, remote_tensor); - ASSERT_NO_THROW(infer_req_remote.start_async()); + OV_ASSERT_NO_THROW(infer_req_remote.start_async()); ASSERT_THROW(infer_req_remote.wait(), ov::Exception); } @@ -51,7 +51,7 @@ TEST_F(AutoFuncTests, can_create_remotetensor_then_infer_with_affinity_2_devices register_plugin_mock_gpu(core, "MOCK_3", {}); ov::CompiledModel compiled_model; auto input = model_cannot_batch->get_parameters().at(0); - ASSERT_NO_THROW( + OV_ASSERT_NO_THROW( compiled_model = core.compile_model(model_cannot_batch, "MULTI", {ov::device::priorities("MOCK_GPU", "MOCK_3")})); std::vector inf_req_shared = {}; @@ -66,16 +66,16 @@ TEST_F(AutoFuncTests, can_create_remotetensor_then_infer_with_affinity_2_devices auto infer_req_remote_2 = compiled_model.create_infer_request(); infer_req_remote_2.set_tensor(input, remote_tensor_2); // infer using remote tensor - ASSERT_NO_THROW(infer_req_remote.start_async()); - ASSERT_NO_THROW(infer_req_remote_2.start_async()); - ASSERT_NO_THROW(infer_req_remote.wait()); - ASSERT_NO_THROW(infer_req_remote_2.wait()); + OV_ASSERT_NO_THROW(infer_req_remote.start_async()); + OV_ASSERT_NO_THROW(infer_req_remote_2.start_async()); + OV_ASSERT_NO_THROW(infer_req_remote.wait()); + OV_ASSERT_NO_THROW(infer_req_remote_2.wait()); } TEST_F(AutoFuncTests, can_create_remotetensor_then_infer_with_affinity_2_devices_device_id) { ov::CompiledModel compiled_model; auto input = model_cannot_batch->get_parameters().at(0); - ASSERT_NO_THROW( + OV_ASSERT_NO_THROW( compiled_model = core.compile_model(model_cannot_batch, "MULTI", {ov::device::priorities("MOCK_GPU.1", "MOCK_CPU")})); auto cldnn_context = core.get_default_context("MOCK_GPU"); @@ -84,7 +84,7 @@ TEST_F(AutoFuncTests, can_create_remotetensor_then_infer_with_affinity_2_devices auto infer_req_remote = compiled_model.create_infer_request(); infer_req_remote.set_tensor(input, remote_tensor); // infer using remote tensor - ASSERT_NO_THROW(infer_req_remote.start_async()); + OV_ASSERT_NO_THROW(infer_req_remote.start_async()); ASSERT_THROW_WITH_MESSAGE(infer_req_remote.wait(), ov::Exception, "None of the devices supports a remote tensor created on the device named MOCK_GPU"); @@ -92,10 +92,10 @@ TEST_F(AutoFuncTests, can_create_remotetensor_then_infer_with_affinity_2_devices TEST_F(AutoFuncTests, can_throw_if_oversubsciption_of_inferrequest) { ov::CompiledModel compiled_model; - ASSERT_NO_THROW(compiled_model = core.compile_model( - model_cannot_batch, - "MULTI", - {ov::device::priorities("MOCK_GPU", "MOCK_CPU"), ov::intel_auto::device_bind_buffer(true)})); + OV_ASSERT_NO_THROW(compiled_model = core.compile_model( + model_cannot_batch, + "MULTI", + {ov::device::priorities("MOCK_GPU", "MOCK_CPU"), ov::intel_auto::device_bind_buffer(true)})); auto optimal_num = compiled_model.get_property(ov::optimal_number_of_infer_requests); for (size_t i = 0; i < optimal_num; i++) { compiled_model.create_infer_request(); diff --git a/src/plugins/auto/tests/functional/behavior/wait_test.cpp b/src/plugins/auto/tests/functional/behavior/wait_test.cpp index ce07dc70cbfb7d..754d686eb39eb0 100644 --- a/src/plugins/auto/tests/functional/behavior/wait_test.cpp +++ b/src/plugins/auto/tests/functional/behavior/wait_test.cpp @@ -11,63 +11,63 @@ using namespace ov::auto_plugin::tests; TEST_F(AutoFuncTests, can_infer_and_wait_for_result) { ov::CompiledModel compiled_model; - ASSERT_NO_THROW(compiled_model = core.compile_model( - model_can_batch, - "AUTO", - {ov::device::priorities("MOCK_GPU", "MOCK_CPU"), - ov::hint::performance_mode(ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT)})); + OV_ASSERT_NO_THROW(compiled_model = core.compile_model( + model_can_batch, + "AUTO", + {ov::device::priorities("MOCK_GPU", "MOCK_CPU"), + ov::hint::performance_mode(ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT)})); auto req = compiled_model.create_infer_request(); ov::Tensor tensor; auto input = compiled_model.input(); auto output = compiled_model.output(); - ASSERT_NO_THROW(tensor = req.get_tensor(input)); - ASSERT_NO_THROW(req.infer()); - ASSERT_NO_THROW(req.start_async()); - ASSERT_NO_THROW(req.wait()); - ASSERT_NO_THROW(tensor = req.get_tensor(output)); + OV_ASSERT_NO_THROW(tensor = req.get_tensor(input)); + OV_ASSERT_NO_THROW(req.infer()); + OV_ASSERT_NO_THROW(req.start_async()); + OV_ASSERT_NO_THROW(req.wait()); + OV_ASSERT_NO_THROW(tensor = req.get_tensor(output)); } TEST_F(AutoFuncTests, can_wait_without_startasync) { ov::CompiledModel compiled_model; - ASSERT_NO_THROW(compiled_model = core.compile_model( - model_can_batch, - "AUTO", - {ov::device::priorities("MOCK_GPU", "MOCK_CPU"), - ov::hint::performance_mode(ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT)})); + OV_ASSERT_NO_THROW(compiled_model = core.compile_model( + model_can_batch, + "AUTO", + {ov::device::priorities("MOCK_GPU", "MOCK_CPU"), + ov::hint::performance_mode(ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT)})); auto req = compiled_model.create_infer_request(); - ASSERT_NO_THROW(req.wait()); - ASSERT_NO_THROW(req.wait_for({})); - ASSERT_NO_THROW(req.wait_for(std::chrono::milliseconds{1})); + OV_ASSERT_NO_THROW(req.wait()); + OV_ASSERT_NO_THROW(req.wait_for({})); + OV_ASSERT_NO_THROW(req.wait_for(std::chrono::milliseconds{1})); } TEST_F(AutoFuncTests, can_throw_if_request_busy) { ov::CompiledModel compiled_model; - ASSERT_NO_THROW(compiled_model = core.compile_model( - model_can_batch, - "AUTO", - {ov::device::priorities("MOCK_GPU", "MOCK_CPU"), - ov::hint::performance_mode(ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT)})); + OV_ASSERT_NO_THROW(compiled_model = core.compile_model( + model_can_batch, + "AUTO", + {ov::device::priorities("MOCK_GPU", "MOCK_CPU"), + ov::hint::performance_mode(ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT)})); auto req = compiled_model.create_infer_request(); auto input = compiled_model.input(); auto output = compiled_model.output(); auto output_tensor = req.get_tensor(input); - ASSERT_NO_THROW(req.wait_for({})); - ASSERT_NO_THROW(req.start_async()); - ASSERT_NO_THROW(try { req.set_tensor(input, output_tensor); } catch (const ov::Busy&){}); - ASSERT_NO_THROW(req.wait_for({})); - ASSERT_NO_THROW(req.wait()); + OV_ASSERT_NO_THROW(req.wait_for({})); + OV_ASSERT_NO_THROW(req.start_async()); + OV_ASSERT_NO_THROW(try { req.set_tensor(input, output_tensor); } catch (const ov::Busy&){}); + OV_ASSERT_NO_THROW(req.wait_for({})); + OV_ASSERT_NO_THROW(req.wait()); } TEST_F(AutoFuncTests, can_throw_on_get_tensor_if_request_busy) { ov::CompiledModel compiled_model; - ASSERT_NO_THROW(compiled_model = core.compile_model( - model_can_batch, - "AUTO", - {ov::device::priorities("MOCK_GPU", "MOCK_CPU"), - ov::hint::performance_mode(ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT)})); + OV_ASSERT_NO_THROW(compiled_model = core.compile_model( + model_can_batch, + "AUTO", + {ov::device::priorities("MOCK_GPU", "MOCK_CPU"), + ov::hint::performance_mode(ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT)})); auto req = compiled_model.create_infer_request(); auto input = compiled_model.input(); - ASSERT_NO_THROW(req.start_async()); - ASSERT_NO_THROW(try { req.get_tensor(input); } catch (const ov::Busy&){}); - ASSERT_NO_THROW(req.wait()); + OV_ASSERT_NO_THROW(req.start_async()); + OV_ASSERT_NO_THROW(try { req.get_tensor(input); } catch (const ov::Busy&){}); + OV_ASSERT_NO_THROW(req.wait()); } \ No newline at end of file diff --git a/src/plugins/auto/tests/unit/compile_model_property_test.cpp b/src/plugins/auto/tests/unit/compile_model_property_test.cpp index 9891584afa0990..da56e247bcb47b 100644 --- a/src/plugins/auto/tests/unit/compile_model_property_test.cpp +++ b/src/plugins/auto/tests/unit/compile_model_property_test.cpp @@ -153,7 +153,7 @@ TEST_P(LoadNetworkWithSecondaryConfigsMockTest, LoadNetworkWithSecondaryConfigsT .Times(1); } - ASSERT_NO_THROW(plugin->compile_model(model, config)); + OV_ASSERT_NO_THROW(plugin->compile_model(model, config)); } using AutoLoadExeNetworkFailedTest = LoadNetworkWithSecondaryConfigsMockTest; @@ -307,7 +307,7 @@ TEST_P(CompiledModelPropertyMockTest, compiledModelGetPropertyNoThrow) { if (deviceName.find("MULTI") != std::string::npos) plugin->set_device_name("MULTI"); std::shared_ptr autoExecNetwork; - ASSERT_NO_THROW(autoExecNetwork = plugin->compile_model(model, {ov::device::priorities(devicePriorities)})); + OV_ASSERT_NO_THROW(autoExecNetwork = plugin->compile_model(model, {ov::device::priorities(devicePriorities)})); for (auto& property : properties) { auto result = autoExecNetwork->get_property(property.first).as(); EXPECT_EQ(result, property.second.as()); diff --git a/src/plugins/auto/tests/unit/ctput_test.cpp b/src/plugins/auto/tests/unit/ctput_test.cpp index d64005826e469e..b22b53d983f283 100644 --- a/src/plugins/auto/tests/unit/ctput_test.cpp +++ b/src/plugins/auto/tests/unit/ctput_test.cpp @@ -84,7 +84,7 @@ TEST_P(LoadNetworkWithCTPUTMockTest, CTPUTSingleDevLogicTest) { .Times(0); } - ASSERT_NO_THROW(plugin->compile_model(model, config)); + OV_ASSERT_NO_THROW(plugin->compile_model(model, config)); } using LoadNetworkWithCTPUTMockTestExeDevice = LoadNetworkWithCTPUTMockTest; @@ -97,7 +97,7 @@ TEST_P(LoadNetworkWithCTPUTMockTestExeDevice, CTPUTSingleDevExecutionDevie) { config.insert({ov::hint::performance_mode(ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT)}); config.insert(ov::device::priorities(targetDevices[0])); // Call single device logic and performance hint is THROUGHPUT - ASSERT_NO_THROW(exeNetwork = plugin->compile_model(model, config)); + OV_ASSERT_NO_THROW(exeNetwork = plugin->compile_model(model, config)); EXPECT_EQ(exeNetwork->get_property(ov::execution_devices.name()).as(), ov::test::utils::DEVICE_CPU); } @@ -195,7 +195,7 @@ TEST_P(AutoCTPUTCallMulti, CTPUTDeviceLoadFailedNoExceptionThrowTest) { ::testing::Matcher(_))) .Times(1); } - ASSERT_NO_THROW(exeNetwork = plugin->compile_model(model, config)); + OV_ASSERT_NO_THROW(exeNetwork = plugin->compile_model(model, config)); EXPECT_EQ(exeNetwork->get_property(ov::execution_devices.name()).as(), secondDevice); } diff --git a/src/plugins/auto/tests/unit/default_perf_hint_test.cpp b/src/plugins/auto/tests/unit/default_perf_hint_test.cpp index abad2db3edde6c..18d35ee56d4cfb 100644 --- a/src/plugins/auto/tests/unit/default_perf_hint_test.cpp +++ b/src/plugins/auto/tests/unit/default_perf_hint_test.cpp @@ -256,7 +256,7 @@ TEST_P(NumStreamsAndDefaultPerfHintMockTest, NumStreamsAndDefaultPerfHintTest) { .Times(1); } - ASSERT_NO_THROW(plugin->compile_model(model, config)); + OV_ASSERT_NO_THROW(plugin->compile_model(model, config)); } INSTANTIATE_TEST_SUITE_P( @@ -319,7 +319,7 @@ TEST_P(PerHintAndDefaultPerfHintMockTest, PerfHintAndDefaultPerfHintTest) { .Times(1); } - ASSERT_NO_THROW(plugin->compile_model(model, config)); + OV_ASSERT_NO_THROW(plugin->compile_model(model, config)); } INSTANTIATE_TEST_SUITE_P( @@ -382,7 +382,7 @@ TEST_P(SecPropAndDefaultPerfHintMockTest, SecPropAndDefaultPerfHintTest) { .Times(1); } - ASSERT_NO_THROW(plugin->compile_model(model, config)); + OV_ASSERT_NO_THROW(plugin->compile_model(model, config)); } INSTANTIATE_TEST_SUITE_P( diff --git a/src/plugins/auto/tests/unit/dynamic_output_test.cpp b/src/plugins/auto/tests/unit/dynamic_output_test.cpp index dae7fdaf1a301a..c0902b38ce5d46 100644 --- a/src/plugins/auto/tests/unit/dynamic_output_test.cpp +++ b/src/plugins/auto/tests/unit/dynamic_output_test.cpp @@ -91,7 +91,7 @@ TEST_P(DynamicOutputInferenceTest, CanSelectCorrectTargetDeviceandInitizeBlobWit ::testing::Matcher(_))) .Times(1); } - ASSERT_NO_THROW(exeNetwork = plugin->compile_model(model, config)); + OV_ASSERT_NO_THROW(exeNetwork = plugin->compile_model(model, config)); } TEST_P(DynamicOutputInferenceTest, CanInferWithOutputChangedFromDynamicOnAutoToStaticOnActualDevice) { @@ -111,10 +111,10 @@ TEST_P(DynamicOutputInferenceTest, CanInferWithOutputChangedFromDynamicOnAutoToS config.insert(ov::device::priorities(priorityList.as())); config.insert(ov::hint::performance_mode(ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT)); std::shared_ptr exeNetwork; - ASSERT_NO_THROW(exeNetwork = plugin->compile_model(model, config)); + OV_ASSERT_NO_THROW(exeNetwork = plugin->compile_model(model, config)); std::shared_ptr infer_request; - ASSERT_NO_THROW(infer_request = exeNetwork->create_infer_request()); - ASSERT_NO_THROW(infer_request->infer()); + OV_ASSERT_NO_THROW(infer_request = exeNetwork->create_infer_request()); + OV_ASSERT_NO_THROW(infer_request->infer()); } const std::vector testConfigs = { diff --git a/src/plugins/auto/tests/unit/get_device_list.cpp b/src/plugins/auto/tests/unit/get_device_list.cpp index af5cebc82f2c99..7af02c37478e46 100644 --- a/src/plugins/auto/tests/unit/get_device_list.cpp +++ b/src/plugins/auto/tests/unit/get_device_list.cpp @@ -57,7 +57,7 @@ TEST_P(GetDeviceListTest, GetDeviceListTestWithExcludeList) { EXPECT_THROW(plugin->get_device_list({ov::device::priorities(priorityDevices)}), ov::Exception); } else { std::string result; - ASSERT_NO_THROW(result = plugin->get_device_list({ov::device::priorities(priorityDevices)})); + OV_ASSERT_NO_THROW(result = plugin->get_device_list({ov::device::priorities(priorityDevices)})); EXPECT_EQ(result, metaDevices); } } @@ -81,7 +81,7 @@ TEST_P(GetDeviceListTestWithNotInteldGPU, GetDeviceListTestWithExcludeList) { EXPECT_THROW(plugin->get_device_list({ov::device::priorities(priorityDevices)}), ov::Exception); } else { std::string result; - ASSERT_NO_THROW(result = plugin->get_device_list({ov::device::priorities(priorityDevices)})); + OV_ASSERT_NO_THROW(result = plugin->get_device_list({ov::device::priorities(priorityDevices)})); EXPECT_EQ(result, metaDevices); } } diff --git a/src/plugins/auto/tests/unit/include/auto_unit_test.hpp b/src/plugins/auto/tests/unit/include/auto_unit_test.hpp index e727aed6e39d18..0b39b8e57dc8d2 100644 --- a/src/plugins/auto/tests/unit/include/auto_unit_test.hpp +++ b/src/plugins/auto/tests/unit/include/auto_unit_test.hpp @@ -7,6 +7,7 @@ #include #include +#include #include #include "gmock_plugin.hpp" diff --git a/src/plugins/auto/tests/unit/infer_request_schedule_policy_test.cpp b/src/plugins/auto/tests/unit/infer_request_schedule_policy_test.cpp index 70dc61ed9aff95..cf1ccda20491d3 100644 --- a/src/plugins/auto/tests/unit/infer_request_schedule_policy_test.cpp +++ b/src/plugins/auto/tests/unit/infer_request_schedule_policy_test.cpp @@ -6,6 +6,7 @@ #include +#include "common_test_utils/test_assertions.hpp" #include "async_infer_request.hpp" #include "common.hpp" #include "cumulative_schedule.hpp" @@ -64,7 +65,7 @@ TEST_P(MockCumuSchedule, scheduleInferRequestBasedOnSchedulePolicy) { int expectedDevIndex = 0; while (true) { std::string actualSelectedDev; - ASSERT_NO_THROW(actualSelectedDev = schedule_to_next_device(devicesInfo, deviceIndexWithInferReq)); + OV_ASSERT_NO_THROW(actualSelectedDev = schedule_to_next_device(devicesInfo, deviceIndexWithInferReq)); if (numOfInferRequests[actualSelectedDev] > 0) { EXPECT_EQ(actualSelectedDev, expectedScheDevs[expectedDevIndex++]); // consume an available infer request on selected device diff --git a/src/plugins/auto/tests/unit/life_time_test.cpp b/src/plugins/auto/tests/unit/life_time_test.cpp index 8ee1a9153b485f..58a203154f6390 100644 --- a/src/plugins/auto/tests/unit/life_time_test.cpp +++ b/src/plugins/auto/tests/unit/life_time_test.cpp @@ -36,7 +36,7 @@ TEST_F(AutoLifeTimeTest, loaded_tensor) { // get Parameter config.insert(ov::device::priorities("GPU.0")); std::shared_ptr compiled_model; - ASSERT_NO_THROW(compiled_model = plugin->compile_model(model, config)); + OV_ASSERT_NO_THROW(compiled_model = plugin->compile_model(model, config)); auto request = compiled_model->create_infer_request(); for (auto& iter : request->get_inputs()) { auto tensor = request->get_tensor(iter); @@ -48,7 +48,7 @@ TEST_F(AutoLifeTimeTest, loaded_states) { // get Parameter config.insert(ov::device::priorities("GPU.0")); std::shared_ptr compiled_model; - ASSERT_NO_THROW(compiled_model = plugin->compile_model(model, config)); + OV_ASSERT_NO_THROW(compiled_model = plugin->compile_model(model, config)); auto request = compiled_model->create_infer_request(); auto states = request->query_state(); auto res_so = mock_states.front()._so; @@ -61,7 +61,7 @@ TEST_F(AutoLifeTimeTest, loaded_tensor_multi) { // get Parameter config.insert(ov::device::priorities("GPU.0")); std::shared_ptr compiled_model; - ASSERT_NO_THROW(compiled_model = plugin->compile_model(model, config)); + OV_ASSERT_NO_THROW(compiled_model = plugin->compile_model(model, config)); auto request = compiled_model->create_infer_request(); for (auto& iter : request->get_inputs()) { auto tensor = request->get_tensor(iter); @@ -75,7 +75,7 @@ TEST_F(AutoLifeTimeTest, loaded_states_bind_buffer) { config.insert(ov::hint::performance_mode(ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT)); config.insert(ov::intel_auto::device_bind_buffer(true)); std::shared_ptr compiled_model; - ASSERT_NO_THROW(compiled_model = plugin->compile_model(model, config)); + OV_ASSERT_NO_THROW(compiled_model = plugin->compile_model(model, config)); auto request = compiled_model->create_infer_request(); auto states = request->query_state(); auto res_so = mock_states.front()._so; diff --git a/src/plugins/auto/tests/unit/log_utils_format_test.cpp b/src/plugins/auto/tests/unit/log_utils_format_test.cpp index 38c461e7c87ba1..a68cab7856743b 100644 --- a/src/plugins/auto/tests/unit/log_utils_format_test.cpp +++ b/src/plugins/auto/tests/unit/log_utils_format_test.cpp @@ -8,6 +8,8 @@ #include #include "utils/log_util.hpp" +#include "common_test_utils/test_assertions.hpp" + using namespace ov::mock_auto_plugin; using ::testing::_; class LogUtilsFormatTest : public ::testing::Test { @@ -27,7 +29,7 @@ class LogUtilsFormatTest : public ::testing::Test { TEST_F(LogUtilsFormatTest, callStacksTest) { EXPECT_CALL(*(HLogger), print(_)).Times(1); - ASSERT_NO_THROW(traceCallStacksTest()); + OV_ASSERT_NO_THROW(traceCallStacksTest()); } TEST_F(LogUtilsFormatTest, format_s) { diff --git a/src/plugins/auto/tests/unit/release_helper_test.cpp b/src/plugins/auto/tests/unit/release_helper_test.cpp index d92d4c7e8de81a..89c4654f58bc78 100644 --- a/src/plugins/auto/tests/unit/release_helper_test.cpp +++ b/src/plugins/auto/tests/unit/release_helper_test.cpp @@ -133,14 +133,14 @@ TEST_P(AutoCompiledModelGetPropertyWithReleaseHelper, getPropertyTestAfterReleas config.insert(ov::device::priorities(ov::test::utils::DEVICE_CPU + std::string(",") + ov::test::utils::DEVICE_GPU)); std::shared_ptr exeNetwork; std::string result; - ASSERT_NO_THROW(exeNetwork = plugin->compile_model(model, config)); + OV_ASSERT_NO_THROW(exeNetwork = plugin->compile_model(model, config)); if (actSleep) { if (!cpuSleep) { - ASSERT_NO_THROW(result = exeNetwork->get_property(ov::model_name.name()).as()); + OV_ASSERT_NO_THROW(result = exeNetwork->get_property(ov::model_name.name()).as()); EXPECT_EQ(result, modelNameCpu); } } else { - ASSERT_NO_THROW(result = exeNetwork->get_property(ov::model_name.name()).as()); + OV_ASSERT_NO_THROW(result = exeNetwork->get_property(ov::model_name.name()).as()); } auto supported_config_keys = @@ -148,7 +148,7 @@ TEST_P(AutoCompiledModelGetPropertyWithReleaseHelper, getPropertyTestAfterReleas for (const auto& cfg : supported_config_keys) { if (cfg == ov::model_name) continue; - ASSERT_NO_THROW(exeNetwork->get_property(cfg).as()); + OV_ASSERT_NO_THROW(exeNetwork->get_property(cfg).as()); } } @@ -212,7 +212,7 @@ TEST_P(AutoReleaseHelperTest, releaseResource) { config.insert(ov::device::priorities(ov::test::utils::DEVICE_CPU + std::string(",") + ov::test::utils::DEVICE_GPU)); std::shared_ptr exeNetwork; if (cpuSuccess || accSuccess) { - ASSERT_NO_THROW(exeNetwork = plugin->compile_model(model, config)); + OV_ASSERT_NO_THROW(exeNetwork = plugin->compile_model(model, config)); if (!cpuSuccess) EXPECT_EQ(exeNetwork->get_property(ov::execution_devices.name()).as(), ov::test::utils::DEVICE_GPU); diff --git a/src/plugins/auto/tests/unit/runtime_fallback_test.cpp b/src/plugins/auto/tests/unit/runtime_fallback_test.cpp index 15d46c2e9f0a22..88eca787740e96 100644 --- a/src/plugins/auto/tests/unit/runtime_fallback_test.cpp +++ b/src/plugins/auto/tests/unit/runtime_fallback_test.cpp @@ -235,12 +235,12 @@ TEST_P(AutoRuntimeFallback, releaseResource) { std::shared_ptr exeNetwork; std::shared_ptr infer_request; - ASSERT_NO_THROW(exeNetwork = plugin->compile_model(model, config)); - ASSERT_NO_THROW(infer_request = exeNetwork->create_infer_request()); + OV_ASSERT_NO_THROW(exeNetwork = plugin->compile_model(model, config)); + OV_ASSERT_NO_THROW(infer_request = exeNetwork->create_infer_request()); if (expectThrow) { EXPECT_THROW(infer_request->infer(), ov::Exception); } else { - ASSERT_NO_THROW(infer_request->infer()); + OV_ASSERT_NO_THROW(infer_request->infer()); } } @@ -380,12 +380,12 @@ TEST_P(AutoCTPUTRuntimeFallback, ctputDeviceInferFailTest) { std::shared_ptr exeNetwork; std::shared_ptr infer_request; - ASSERT_NO_THROW(exeNetwork = plugin->compile_model(model, config)); - ASSERT_NO_THROW(infer_request = exeNetwork->create_infer_request()); + OV_ASSERT_NO_THROW(exeNetwork = plugin->compile_model(model, config)); + OV_ASSERT_NO_THROW(infer_request = exeNetwork->create_infer_request()); if (expectThrow) { EXPECT_THROW(infer_request->infer(), ov::Exception); } else { - ASSERT_NO_THROW(infer_request->infer()); + OV_ASSERT_NO_THROW(infer_request->infer()); } } diff --git a/src/plugins/auto/tests/unit/select_device_failed_test.cpp b/src/plugins/auto/tests/unit/select_device_failed_test.cpp index 43550d127d8ccf..2098bbb0b634c0 100644 --- a/src/plugins/auto/tests/unit/select_device_failed_test.cpp +++ b/src/plugins/auto/tests/unit/select_device_failed_test.cpp @@ -171,7 +171,7 @@ TEST_P(AutoLoadFailedTest, LoadCNNetWork) { .Times(loadSuccessCount); EXPECT_CALL(*mockIExeNet.get(), create_infer_request()).Times(loadSuccessCount * 2); if (continueRun) { - ASSERT_NO_THROW(plugin->compile_model(model, config)); + OV_ASSERT_NO_THROW(plugin->compile_model(model, config)); } else { ASSERT_THROW(plugin->compile_model(model, config), ov::Exception); } diff --git a/src/plugins/auto/tests/unit/startup_fallback_property_test.cpp b/src/plugins/auto/tests/unit/startup_fallback_property_test.cpp index 38d155186f3e14..8b932289dc10ea 100644 --- a/src/plugins/auto/tests/unit/startup_fallback_property_test.cpp +++ b/src/plugins/auto/tests/unit/startup_fallback_property_test.cpp @@ -67,7 +67,7 @@ TEST_P(AutoStartupFallback, propertytest) { .Times(1); } - ASSERT_NO_THROW(plugin->compile_model(model, config)); + OV_ASSERT_NO_THROW(plugin->compile_model(model, config)); } const std::vector testConfigs = {ConfigParams{true, {{"ENABLE_STARTUP_FALLBACK", "YES"}}}, diff --git a/src/plugins/auto/tests/unit/stateful_model_test.cpp b/src/plugins/auto/tests/unit/stateful_model_test.cpp index b9423fac9f8e98..f7fe1db756a8ce 100644 --- a/src/plugins/auto/tests/unit/stateful_model_test.cpp +++ b/src/plugins/auto/tests/unit/stateful_model_test.cpp @@ -229,7 +229,7 @@ TEST_P(StatefulModelSupportedTest, CanFilterOutCorrectTargetDeviceWithStatefulMo if (expectedTimes < 0) { ASSERT_THROW(plugin->compile_model(model, config), ov::Exception); } else { - ASSERT_NO_THROW(exeNetwork = plugin->compile_model(model, config)); + OV_ASSERT_NO_THROW(exeNetwork = plugin->compile_model(model, config)); std::this_thread::sleep_for(std::chrono::milliseconds(500)); EXPECT_EQ(exeNetwork->get_property(ov::execution_devices.name()).as(), expectedExecuteDev); } diff --git a/src/plugins/auto_batch/tests/unit/async_infer_request_test.cpp b/src/plugins/auto_batch/tests/unit/async_infer_request_test.cpp index caced95268df81..b58f4f7727a807 100644 --- a/src/plugins/auto_batch/tests/unit/async_infer_request_test.cpp +++ b/src/plugins/auto_batch/tests/unit/async_infer_request_test.cpp @@ -135,7 +135,7 @@ class AutoBatchAsyncInferRequestTest : public ::testing::TestWithParam>(reshaped, m_hardware_plugin); m_compile_model_with_batch = {m_i_compile_model_with_batch, {}}; - ASSERT_NO_THROW(m_auto_batch_compile_model = std::make_shared(m_model->clone(), + OV_ASSERT_NO_THROW(m_auto_batch_compile_model = std::make_shared(m_model->clone(), m_auto_batch_plugin, m_config, m_device_info, diff --git a/src/plugins/auto_batch/tests/unit/compile_model_create_infer_request_test.cpp b/src/plugins/auto_batch/tests/unit/compile_model_create_infer_request_test.cpp index 2e38db1249613b..4427d4910d4b8e 100644 --- a/src/plugins/auto_batch/tests/unit/compile_model_create_infer_request_test.cpp +++ b/src/plugins/auto_batch/tests/unit/compile_model_create_infer_request_test.cpp @@ -85,7 +85,7 @@ class CompileModelCreateInferRequestTest : public ::testing::TestWithParam(m_model->clone(), m_auto_batch_plugin, m_config, diff --git a/src/plugins/auto_batch/tests/unit/compile_model_get_property_test.cpp b/src/plugins/auto_batch/tests/unit/compile_model_get_property_test.cpp index 890671bd3391ac..f8f4db37a19f4e 100644 --- a/src/plugins/auto_batch/tests/unit/compile_model_get_property_test.cpp +++ b/src/plugins/auto_batch/tests/unit/compile_model_get_property_test.cpp @@ -87,7 +87,7 @@ class CompileModelGetPropertyTest : public ::testing::TestWithParam(200))}, {ov::device::priorities("CPU(16)")}}; - ASSERT_NO_THROW(auto_batch_compile_model = m_plugin->compile_model(m_model, configs)); + OV_ASSERT_NO_THROW(auto_batch_compile_model = m_plugin->compile_model(m_model, configs)); std::string network_name = m_model.get()->get_name(); std::vector supported_props = {ov::optimal_batch_size, ov::cache_dir}; @@ -116,7 +116,7 @@ TEST_P(CompileModelGetPropertyTest, CompileModelGetPropertyTestCase) { if (m_throw_exception) ASSERT_ANY_THROW(auto_batch_compile_model->get_property(m_properity_name)); else - ASSERT_NO_THROW(auto_batch_compile_model->get_property(m_properity_name)); + OV_ASSERT_NO_THROW(auto_batch_compile_model->get_property(m_properity_name)); } const std::vector compile_model_get_property_param_test = { diff --git a/src/plugins/auto_batch/tests/unit/compile_model_get_runtime_model_test.cpp b/src/plugins/auto_batch/tests/unit/compile_model_get_runtime_model_test.cpp index ef8157ada605c2..fc5a1b433e76ec 100644 --- a/src/plugins/auto_batch/tests/unit/compile_model_get_runtime_model_test.cpp +++ b/src/plugins/auto_batch/tests/unit/compile_model_get_runtime_model_test.cpp @@ -71,10 +71,10 @@ class CompileModelGetRuntimeModelTest : public ::testing::Test { const ov::AnyMap configs = {{ov::auto_batch_timeout(static_cast(200))}, {ov::device::priorities("CPU(16)")}}; - ASSERT_NO_THROW(m_auto_batch_compile_model = m_plugin->compile_model(m_model, configs)); + OV_ASSERT_NO_THROW(m_auto_batch_compile_model = m_plugin->compile_model(m_model, configs)); } }; TEST_F(CompileModelGetRuntimeModelTest, CompileModelGetRuntimeModelTestCase) { - ASSERT_NO_THROW(m_auto_batch_compile_model->get_runtime_model()); + OV_ASSERT_NO_THROW(m_auto_batch_compile_model->get_runtime_model()); } diff --git a/src/plugins/auto_batch/tests/unit/compile_model_set_property_test.cpp b/src/plugins/auto_batch/tests/unit/compile_model_set_property_test.cpp index 2c5f08c12bc410..637d3a272e1def 100644 --- a/src/plugins/auto_batch/tests/unit/compile_model_set_property_test.cpp +++ b/src/plugins/auto_batch/tests/unit/compile_model_set_property_test.cpp @@ -91,7 +91,7 @@ class CompileModelSetPropertyTest : public ::testing::TestWithParam(200))}, {ov::device::priorities("CPU(16)")}}; - ASSERT_NO_THROW(m_auto_batch_compile_model = m_plugin->compile_model(m_model, configs)); + OV_ASSERT_NO_THROW(m_auto_batch_compile_model = m_plugin->compile_model(m_model, configs)); } }; @@ -99,7 +99,7 @@ TEST_P(CompileModelSetPropertyTest, CompileModelSetPropertyTestCase) { if (m_throw_exception) ASSERT_ANY_THROW(m_auto_batch_compile_model->set_property(m_properities)); else - ASSERT_NO_THROW(m_auto_batch_compile_model->set_property(m_properities)); + OV_ASSERT_NO_THROW(m_auto_batch_compile_model->set_property(m_properities)); } const std::vector compile_model_set_property_param_test = { diff --git a/src/plugins/auto_batch/tests/unit/mock_common.hpp b/src/plugins/auto_batch/tests/unit/mock_common.hpp index c8ad03f180fc40..f2e72fdb61ca91 100644 --- a/src/plugins/auto_batch/tests/unit/mock_common.hpp +++ b/src/plugins/auto_batch/tests/unit/mock_common.hpp @@ -6,7 +6,7 @@ #include #include - +#include "common_test_utils/test_assertions.hpp" #include "compiled_model.hpp" #include "openvino/runtime/make_tensor.hpp" #include "plugin.hpp" diff --git a/src/plugins/auto_batch/tests/unit/plugin_compile_model_test.cpp b/src/plugins/auto_batch/tests/unit/plugin_compile_model_test.cpp index ce91cbb827107b..9569a9adb19f48 100644 --- a/src/plugins/auto_batch/tests/unit/plugin_compile_model_test.cpp +++ b/src/plugins/auto_batch/tests/unit/plugin_compile_model_test.cpp @@ -103,12 +103,12 @@ class PluginCompileModelTest : public ::testing::TestWithParamcompile_model(m_model, m_plugin_properities)); + OV_ASSERT_NO_THROW(m_plugin->compile_model(m_model, m_plugin_properities)); } TEST_P(PluginCompileModelTest, PluginCompileModelWithRemoteContextTestCase) { m_model = ov::test::utils::make_multi_single_conv(); - ASSERT_NO_THROW(m_plugin->compile_model(m_model, m_plugin_properities, m_remote_context)); + OV_ASSERT_NO_THROW(m_plugin->compile_model(m_model, m_plugin_properities, m_remote_context)); } TEST_P(PluginCompileModelTest, PluginCompileModelBatchedModelTestCase) { @@ -117,7 +117,7 @@ TEST_P(PluginCompileModelTest, PluginCompileModelBatchedModelTestCase) { batch.set_symbol(std::make_shared()); auto p_shape = ov::PartialShape{batch, 1, 32, 32}; m_model->reshape(p_shape); - ASSERT_NO_THROW(m_plugin->compile_model(m_model, m_plugin_properities)); + OV_ASSERT_NO_THROW(m_plugin->compile_model(m_model, m_plugin_properities)); } TEST_P(PluginCompileModelTest, PluginCompileModelBatchedModelWithRemoteContextTestCase) { @@ -126,7 +126,7 @@ TEST_P(PluginCompileModelTest, PluginCompileModelBatchedModelWithRemoteContextTe batch.set_symbol(std::make_shared()); auto p_shape = ov::PartialShape{batch, 1, 32, 32}; m_model->reshape(p_shape); - ASSERT_NO_THROW(m_plugin->compile_model(m_model, m_plugin_properities, m_remote_context)); + OV_ASSERT_NO_THROW(m_plugin->compile_model(m_model, m_plugin_properities, m_remote_context)); } const std::vector plugin_compile_model_param_test = { diff --git a/src/plugins/auto_batch/tests/unit/plugin_get_property_test.cpp b/src/plugins/auto_batch/tests/unit/plugin_get_property_test.cpp index c8e13959264141..9ea94cf77a2401 100644 --- a/src/plugins/auto_batch/tests/unit/plugin_get_property_test.cpp +++ b/src/plugins/auto_batch/tests/unit/plugin_get_property_test.cpp @@ -51,7 +51,7 @@ TEST_P(GetPropertyTest, GetPropertyTestCase) { ASSERT_ANY_THROW(m_plugin->get_property(m_property_name, options)); } else { ov::Any value; - ASSERT_NO_THROW(value = m_plugin->get_property(m_property_name, options)); + OV_ASSERT_NO_THROW(value = m_plugin->get_property(m_property_name, options)); if (m_property_name == ov::device::full_name.name()) { EXPECT_EQ(value.as(), "BATCH"); return; diff --git a/src/plugins/auto_batch/tests/unit/plugin_query_model_test.cpp b/src/plugins/auto_batch/tests/unit/plugin_query_model_test.cpp index 724a5629eea775..f59869275b6fdb 100644 --- a/src/plugins/auto_batch/tests/unit/plugin_query_model_test.cpp +++ b/src/plugins/auto_batch/tests/unit/plugin_query_model_test.cpp @@ -58,7 +58,7 @@ TEST_P(QueryModelTest, QueryModelTestCase) { if (m_throw_exception) { ASSERT_ANY_THROW(m_plugin->query_model(m_model, m_properties)); } else { - ASSERT_NO_THROW(m_plugin->query_model(m_model, m_properties)); + OV_ASSERT_NO_THROW(m_plugin->query_model(m_model, m_properties)); } } diff --git a/src/plugins/auto_batch/tests/unit/plugin_set_property_test.cpp b/src/plugins/auto_batch/tests/unit/plugin_set_property_test.cpp index dd6b1ff51e866a..f96631bb3d7db8 100644 --- a/src/plugins/auto_batch/tests/unit/plugin_set_property_test.cpp +++ b/src/plugins/auto_batch/tests/unit/plugin_set_property_test.cpp @@ -44,14 +44,14 @@ class SetPropertyTest : public ::testing::TestWithParam { TEST_P(SetPropertyTest, SetPropertyTestCase) { if (m_properties.size() == 0) { - ASSERT_NO_THROW(m_plugin->set_property(m_properties)); + OV_ASSERT_NO_THROW(m_plugin->set_property(m_properties)); return; } if (m_throw_exception) { ASSERT_ANY_THROW(m_plugin->set_property(m_properties)); } else { - ASSERT_NO_THROW(m_plugin->set_property(m_properties)); + OV_ASSERT_NO_THROW(m_plugin->set_property(m_properties)); } } diff --git a/src/plugins/auto_batch/tests/unit/sync_infer_request_test.cpp b/src/plugins/auto_batch/tests/unit/sync_infer_request_test.cpp index b769386120ae8d..3d9185559497f9 100644 --- a/src/plugins/auto_batch/tests/unit/sync_infer_request_test.cpp +++ b/src/plugins/auto_batch/tests/unit/sync_infer_request_test.cpp @@ -101,7 +101,7 @@ class AutoBatchRequestTest : public ::testing::TestWithParam>(m_model, m_auto_batch_plugin); m_compile_model_with_batch = {m_i_compile_model_with_batch, {}}; - ASSERT_NO_THROW(m_auto_batch_compile_model = + OV_ASSERT_NO_THROW(m_auto_batch_compile_model = std::make_shared(m_model->clone(), m_auto_batch_plugin, m_config, diff --git a/src/plugins/hetero/tests/functional/hetero_tests.hpp b/src/plugins/hetero/tests/functional/hetero_tests.hpp index 3890a91d21495a..98c2d487761b73 100644 --- a/src/plugins/hetero/tests/functional/hetero_tests.hpp +++ b/src/plugins/hetero/tests/functional/hetero_tests.hpp @@ -7,6 +7,7 @@ #include +#include "common_test_utils/test_assertions.hpp" #include "openvino/runtime/core.hpp" #include "openvino/runtime/iplugin.hpp" diff --git a/src/plugins/hetero/tests/functional/properties_tests.cpp b/src/plugins/hetero/tests/functional/properties_tests.cpp index 474ada15ca69c6..c71f5597df2e35 100644 --- a/src/plugins/hetero/tests/functional/properties_tests.cpp +++ b/src/plugins/hetero/tests/functional/properties_tests.cpp @@ -49,13 +49,13 @@ TEST_F(HeteroTests, set_property_ModelDistributionPolicy) { std::set value = {}; std::set model_policy = {ov::hint::ModelDistributionPolicy::PIPELINE_PARALLEL}; - ASSERT_NO_THROW(core.set_property("HETERO", ov::hint::model_distribution_policy(model_policy))); - ASSERT_NO_THROW(value = core.get_property("HETERO", ov::hint::model_distribution_policy)); + OV_ASSERT_NO_THROW(core.set_property("HETERO", ov::hint::model_distribution_policy(model_policy))); + OV_ASSERT_NO_THROW(value = core.get_property("HETERO", ov::hint::model_distribution_policy)); ASSERT_EQ(model_policy, value); model_policy = {}; - ASSERT_NO_THROW(core.set_property("HETERO", ov::hint::model_distribution_policy(model_policy))); - ASSERT_NO_THROW(value = core.get_property("HETERO", ov::hint::model_distribution_policy)); + OV_ASSERT_NO_THROW(core.set_property("HETERO", ov::hint::model_distribution_policy(model_policy))); + OV_ASSERT_NO_THROW(value = core.get_property("HETERO", ov::hint::model_distribution_policy)); ASSERT_EQ(model_policy, value); } \ No newline at end of file diff --git a/src/plugins/hetero/tests/functional/query_model_tests.cpp b/src/plugins/hetero/tests/functional/query_model_tests.cpp index fab5e78220a82f..7065255bf9acd3 100644 --- a/src/plugins/hetero/tests/functional/query_model_tests.cpp +++ b/src/plugins/hetero/tests/functional/query_model_tests.cpp @@ -103,7 +103,7 @@ TEST_F(HeteroTests, query_model_on_independent_parameter) { ov::SupportedOpsMap supported_ops; const std::string dev_name = "MOCK0.1"; const auto model = create_model_with_independent_parameter(); - ASSERT_NO_THROW(supported_ops = core.query_model(model, "HETERO", {ov::device::priorities(dev_name)})); + OV_ASSERT_NO_THROW(supported_ops = core.query_model(model, "HETERO", {ov::device::priorities(dev_name)})); std::unordered_set names; for (const auto& op : model->get_ops()) { names.insert(op->get_friendly_name()); diff --git a/src/plugins/hetero/tests/unit/subgraph_collector.cpp b/src/plugins/hetero/tests/unit/subgraph_collector.cpp index ba97862580e21c..574de2f06bfa97 100644 --- a/src/plugins/hetero/tests/unit/subgraph_collector.cpp +++ b/src/plugins/hetero/tests/unit/subgraph_collector.cpp @@ -7,6 +7,7 @@ #include #include "common_test_utils/graph_comparator.hpp" +#include "common_test_utils/test_assertions.hpp" #include "op/device_subgraph.hpp" #include "openvino/core/except.hpp" #include "openvino/op/ops.hpp" @@ -490,7 +491,7 @@ TEST_F(SubgraphCollectorTest, submodel_with_different_affinity_parameter) { {"res", "MOCK.0"}, }; auto supported_ops = supported_ops_with_affinity; - ASSERT_NO_THROW(ov::hetero::mask_model_subgraphs_by_ops(m_model, supported_ops, false, "TEST")); + OV_ASSERT_NO_THROW(ov::hetero::mask_model_subgraphs_by_ops(m_model, supported_ops, false, "TEST")); } TEST_F(SubgraphCollectorTest, submodel_with_constant_subgraphs) { @@ -609,6 +610,7 @@ TEST_F(SubgraphCollectorTest, merge_independent_submodel) { actual_submodels.push_back(std::make_shared(actual_subgraph._results, actual_subgraph._parameters)); } // Merge submodels into one model back - ASSERT_NO_THROW(ov::hetero::merge_submodels(actual_submodels, actual_mapping_info._submodels_input_to_prev_output)); + OV_ASSERT_NO_THROW( + ov::hetero::merge_submodels(actual_submodels, actual_mapping_info._submodels_input_to_prev_output)); ASSERT_EQ(1, actual_submodels.size()); } \ No newline at end of file diff --git a/src/plugins/intel_cpu/tests/functional/custom/behavior/ov_executable_network/properties.cpp b/src/plugins/intel_cpu/tests/functional/custom/behavior/ov_executable_network/properties.cpp index 5cf5a668f7512e..365e7c56dcef82 100644 --- a/src/plugins/intel_cpu/tests/functional/custom/behavior/ov_executable_network/properties.cpp +++ b/src/plugins/intel_cpu/tests/functional/custom/behavior/ov_executable_network/properties.cpp @@ -46,7 +46,7 @@ TEST_F(OVClassConfigTestCPU, smoke_CpuExecNetworkSupportedPropertiesAreAvailable ov::Core ie; std::vector supportedProperties; ov::CompiledModel compiledModel = ie.compile_model(model, deviceName); - ASSERT_NO_THROW(supportedProperties = compiledModel.get_property(ov::supported_properties)); + OV_ASSERT_NO_THROW(supportedProperties = compiledModel.get_property(ov::supported_properties)); // the order of supported properties does not matter, sort to simplify the comparison std::sort(expectedSupportedProperties.begin(), expectedSupportedProperties.end()); std::sort(supportedProperties.begin(), supportedProperties.end()); @@ -60,10 +60,10 @@ TEST_F(OVClassConfigTestCPU, smoke_CpuExecNetworkGetROPropertiesDoesNotThrow) { ov::CompiledModel compiledModel = ie.compile_model(model, deviceName); - ASSERT_NO_THROW(properties = compiledModel.get_property(ov::supported_properties)); + OV_ASSERT_NO_THROW(properties = compiledModel.get_property(ov::supported_properties)); for (const auto& property : properties) { - ASSERT_NO_THROW((void)compiledModel.get_property(property)); + OV_ASSERT_NO_THROW((void)compiledModel.get_property(property)); } } @@ -73,7 +73,7 @@ TEST_F(OVClassConfigTestCPU, smoke_CpuExecNetworkSetROPropertiesThrow) { ov::CompiledModel compiledModel = ie.compile_model(model, deviceName); - ASSERT_NO_THROW(properties = compiledModel.get_property(ov::supported_properties)); + OV_ASSERT_NO_THROW(properties = compiledModel.get_property(ov::supported_properties)); for (auto it = properties.begin(); it != properties.end(); ++it) { ASSERT_TRUE(it != properties.end()); @@ -87,11 +87,11 @@ TEST_F(OVClassConfigTestCPU, smoke_CpuExecNetworkCheckCoreStreamsHasHigherPriori int32_t streams = 1; // throughput hint should apply higher number of streams int32_t value = 0; - ASSERT_NO_THROW(ie.set_property(deviceName, ov::num_streams(streams))); - ASSERT_NO_THROW(ie.set_property(deviceName, ov::hint::performance_mode(ov::hint::PerformanceMode::THROUGHPUT))); + OV_ASSERT_NO_THROW(ie.set_property(deviceName, ov::num_streams(streams))); + OV_ASSERT_NO_THROW(ie.set_property(deviceName, ov::hint::performance_mode(ov::hint::PerformanceMode::THROUGHPUT))); ov::CompiledModel compiledModel = ie.compile_model(model, deviceName); - ASSERT_NO_THROW(value = compiledModel.get_property(ov::num_streams)); + OV_ASSERT_NO_THROW(value = compiledModel.get_property(ov::num_streams)); ASSERT_EQ(streams, value); } @@ -100,11 +100,11 @@ TEST_F(OVClassConfigTestCPU, smoke_CpuExecNetworkCheckCoreStreamsHasHigherPriori int32_t streams = ov::get_number_of_cpu_cores(); // latency hint should apply lower number of streams int32_t value = 0; - ASSERT_NO_THROW(ie.set_property(deviceName, ov::num_streams(streams))); - ASSERT_NO_THROW(ie.set_property(deviceName, ov::hint::performance_mode(ov::hint::PerformanceMode::LATENCY))); + OV_ASSERT_NO_THROW(ie.set_property(deviceName, ov::num_streams(streams))); + OV_ASSERT_NO_THROW(ie.set_property(deviceName, ov::hint::performance_mode(ov::hint::PerformanceMode::LATENCY))); ov::CompiledModel compiledModel = ie.compile_model(model, deviceName); - ASSERT_NO_THROW(value = compiledModel.get_property(ov::num_streams)); + OV_ASSERT_NO_THROW(value = compiledModel.get_property(ov::num_streams)); ASSERT_EQ(streams, value); } @@ -113,13 +113,13 @@ TEST_F(OVClassConfigTestCPU, smoke_CpuExecNetworkCheckModelStreamsHasHigherPrior int32_t streams = ov::get_number_of_cpu_cores(); // latency hint should apply lower number of streams int32_t value = 0; - ASSERT_NO_THROW(ie.set_property(deviceName, ov::hint::performance_mode(ov::hint::PerformanceMode::LATENCY))); + OV_ASSERT_NO_THROW(ie.set_property(deviceName, ov::hint::performance_mode(ov::hint::PerformanceMode::LATENCY))); ov::AnyMap config; config[ov::num_streams.name()] = streams; ov::CompiledModel compiledModel = ie.compile_model(model, deviceName, config); - ASSERT_NO_THROW(value = compiledModel.get_property(ov::num_streams)); + OV_ASSERT_NO_THROW(value = compiledModel.get_property(ov::num_streams)); ASSERT_EQ(streams, value); } @@ -134,7 +134,7 @@ TEST_F(OVClassConfigTestCPU, smoke_CpuExecNetworkCheckModelStreamsHasHigherPrior ov::CompiledModel compiledModel = ie.compile_model(model, deviceName, config); - ASSERT_NO_THROW(value = compiledModel.get_property(ov::num_streams)); + OV_ASSERT_NO_THROW(value = compiledModel.get_property(ov::num_streams)); ASSERT_EQ(streams, value); } @@ -143,13 +143,13 @@ TEST_F(OVClassConfigTestCPU, smoke_CpuExecNetworkCheckModelZeroStreams) { int32_t streams = 0; int32_t value = -1; - ASSERT_NO_THROW(ie.set_property(deviceName, ov::hint::performance_mode(ov::hint::PerformanceMode::LATENCY))); + OV_ASSERT_NO_THROW(ie.set_property(deviceName, ov::hint::performance_mode(ov::hint::PerformanceMode::LATENCY))); ov::AnyMap config; config[ov::num_streams.name()] = streams; ov::CompiledModel compiledModel = ie.compile_model(model, deviceName, config); - ASSERT_NO_THROW(value = compiledModel.get_property(ov::num_streams)); + OV_ASSERT_NO_THROW(value = compiledModel.get_property(ov::num_streams)); ASSERT_EQ(streams, value); } @@ -158,7 +158,7 @@ TEST_F(OVClassConfigTestCPU, smoke_CpuExecNetworkCheckSparseWeigthsDecompression ov::Core core; core.set_property(deviceName, ov::intel_cpu::sparse_weights_decompression_rate(0.8)); - ASSERT_NO_THROW(ov::CompiledModel compiledModel = core.compile_model(model, deviceName)); + OV_ASSERT_NO_THROW(ov::CompiledModel compiledModel = core.compile_model(model, deviceName)); } TEST_F(OVClassConfigTestCPU, smoke_CpuExecNetworkCheckDynamicQuantizationGroupSize) { @@ -168,7 +168,7 @@ TEST_F(OVClassConfigTestCPU, smoke_CpuExecNetworkCheckDynamicQuantizationGroupSi ov::CompiledModel compiledModel = core.compile_model(model, deviceName); size_t groupSize = 0; - ASSERT_NO_THROW(groupSize = compiledModel.get_property(ov::hint::dynamic_quantization_group_size)); + OV_ASSERT_NO_THROW(groupSize = compiledModel.get_property(ov::hint::dynamic_quantization_group_size)); ASSERT_EQ(groupSize, 64); } @@ -179,7 +179,7 @@ TEST_F(OVClassConfigTestCPU, smoke_CpuExecNetworkCheckKVCachePrecision) { ov::CompiledModel compiledModel = core.compile_model(model, deviceName); auto kv_cache_precision_value = ov::element::undefined; - ASSERT_NO_THROW(kv_cache_precision_value = compiledModel.get_property(ov::hint::kv_cache_precision)); + OV_ASSERT_NO_THROW(kv_cache_precision_value = compiledModel.get_property(ov::hint::kv_cache_precision)); ASSERT_EQ(kv_cache_precision_value, ov::element::f32); } @@ -200,7 +200,7 @@ TEST_F(OVClassConfigTestCPU, smoke_CpuExecNetworkCheckExecutionModeIsAvailableIn ov::Core ie; std::vector ie_properties; - ASSERT_NO_THROW(ie_properties = ie.get_property(deviceName, ov::supported_properties)); + OV_ASSERT_NO_THROW(ie_properties = ie.get_property(deviceName, ov::supported_properties)); const auto ie_exec_mode_it = find(ie_properties.begin(), ie_properties.end(), ov::hint::execution_mode); ASSERT_NE(ie_exec_mode_it, ie_properties.end()); ASSERT_TRUE(ie_exec_mode_it->is_mutable()); @@ -209,7 +209,7 @@ TEST_F(OVClassConfigTestCPU, smoke_CpuExecNetworkCheckExecutionModeIsAvailableIn ov::CompiledModel compiledModel = ie.compile_model(model, deviceName, config); std::vector model_properties; - ASSERT_NO_THROW(model_properties = compiledModel.get_property(ov::supported_properties)); + OV_ASSERT_NO_THROW(model_properties = compiledModel.get_property(ov::supported_properties)); const auto model_exec_mode_it = find(model_properties.begin(), model_properties.end(), ov::hint::execution_mode); ASSERT_NE(model_exec_mode_it, model_properties.end()); ASSERT_FALSE(model_exec_mode_it->is_mutable()); @@ -219,13 +219,13 @@ TEST_F(OVClassConfigTestCPU, smoke_CpuExecNetworkCheckModelInferencePrecisionHas ov::Core ie; auto inference_precision_value = ov::element::undefined; - ASSERT_NO_THROW(ie.set_property("CPU", ov::hint::inference_precision(ov::element::f32))); + OV_ASSERT_NO_THROW(ie.set_property("CPU", ov::hint::inference_precision(ov::element::f32))); ov::AnyMap config; config[ov::hint::inference_precision.name()] = bf16_if_can_be_emulated; ov::CompiledModel compiledModel = ie.compile_model(model, deviceName, config); - ASSERT_NO_THROW(inference_precision_value = compiledModel.get_property(ov::hint::inference_precision)); + OV_ASSERT_NO_THROW(inference_precision_value = compiledModel.get_property(ov::hint::inference_precision)); ASSERT_EQ(inference_precision_value, bf16_if_can_be_emulated); } @@ -234,16 +234,16 @@ TEST_F(OVClassConfigTestCPU, smoke_CpuExecNetworkCheckCoreInferencePrecisionHasH auto execution_mode_value = ov::hint::ExecutionMode::ACCURACY; auto inference_precision_value = ov::element::undefined; - ASSERT_NO_THROW(ie.set_property("CPU", ov::hint::inference_precision(ov::element::f32))); + OV_ASSERT_NO_THROW(ie.set_property("CPU", ov::hint::inference_precision(ov::element::f32))); ov::AnyMap config; config[ov::hint::execution_mode.name()] = ov::hint::ExecutionMode::PERFORMANCE; ov::CompiledModel compiledModel = ie.compile_model(model, deviceName, config); - ASSERT_NO_THROW(execution_mode_value = compiledModel.get_property(ov::hint::execution_mode)); + OV_ASSERT_NO_THROW(execution_mode_value = compiledModel.get_property(ov::hint::execution_mode)); ASSERT_EQ(execution_mode_value, ov::hint::ExecutionMode::PERFORMANCE); - ASSERT_NO_THROW(inference_precision_value = compiledModel.get_property(ov::hint::inference_precision)); + OV_ASSERT_NO_THROW(inference_precision_value = compiledModel.get_property(ov::hint::inference_precision)); ASSERT_EQ(inference_precision_value, ov::element::f32); } @@ -253,16 +253,16 @@ TEST_F(OVClassConfigTestCPU, smoke_CpuExecNetworkCheckModelInferencePrecisionHas auto inference_precision_value = ov::element::undefined; const auto inference_precision_expected = bf16_if_can_be_emulated; - ASSERT_NO_THROW(ie.set_property("CPU", ov::hint::execution_mode(ov::hint::ExecutionMode::ACCURACY))); + OV_ASSERT_NO_THROW(ie.set_property("CPU", ov::hint::execution_mode(ov::hint::ExecutionMode::ACCURACY))); ov::AnyMap config; config[ov::hint::inference_precision.name()] = inference_precision_expected; ov::CompiledModel compiledModel = ie.compile_model(model, deviceName, config); - ASSERT_NO_THROW(execution_mode_value = compiledModel.get_property(ov::hint::execution_mode)); + OV_ASSERT_NO_THROW(execution_mode_value = compiledModel.get_property(ov::hint::execution_mode)); ASSERT_EQ(execution_mode_value, ov::hint::ExecutionMode::ACCURACY); - ASSERT_NO_THROW(inference_precision_value = compiledModel.get_property(ov::hint::inference_precision)); + OV_ASSERT_NO_THROW(inference_precision_value = compiledModel.get_property(ov::hint::inference_precision)); ASSERT_EQ(inference_precision_value, inference_precision_expected); } @@ -274,8 +274,8 @@ TEST_F(OVClassConfigTestCPU, smoke_CpuExecNetworkCheckLogLevel) { ov::AnyMap config; ov::Any value; ov::CompiledModel compiledModel; - ASSERT_NO_THROW(compiledModel = ie.compile_model(model, deviceName, config)); - ASSERT_NO_THROW(value = compiledModel.get_property(ov::log::level)); + OV_ASSERT_NO_THROW(compiledModel = ie.compile_model(model, deviceName, config)); + OV_ASSERT_NO_THROW(value = compiledModel.get_property(ov::log::level)); ASSERT_EQ(value.as(), ov::log::Level::NO); } //check set and get @@ -291,17 +291,17 @@ TEST_F(OVClassConfigTestCPU, smoke_CpuExecNetworkCheckLogLevel) { ov::Any value; ov::CompiledModel compiledModel; ov::AnyMap config{ov::log::level(logLevels[i])}; - ASSERT_NO_THROW(compiledModel = ie.compile_model(model, deviceName, config)); - ASSERT_NO_THROW(value = compiledModel.get_property(ov::log::level)); + OV_ASSERT_NO_THROW(compiledModel = ie.compile_model(model, deviceName, config)); + OV_ASSERT_NO_THROW(value = compiledModel.get_property(ov::log::level)); ASSERT_EQ(value.as(), logLevels[i]); } for (unsigned int i = 0; i < logLevels.size(); i++) { ov::Any value; ov::CompiledModel compiledModel; - ASSERT_NO_THROW(ie.set_property(deviceName, ov::log::level(logLevels[i]))); - ASSERT_NO_THROW(compiledModel = ie.compile_model(model, deviceName)); - ASSERT_NO_THROW(value = compiledModel.get_property(ov::log::level)); + OV_ASSERT_NO_THROW(ie.set_property(deviceName, ov::log::level(logLevels[i]))); + OV_ASSERT_NO_THROW(compiledModel = ie.compile_model(model, deviceName)); + OV_ASSERT_NO_THROW(value = compiledModel.get_property(ov::log::level)); ASSERT_EQ(value.as(), logLevels[i]); } } @@ -311,8 +311,8 @@ TEST_F(OVClassConfigTestCPU, smoke_CpuExecNetworkCheckCPUExecutionDevice) { ov::Any value; ov::CompiledModel compiledModel; - ASSERT_NO_THROW(compiledModel = ie.compile_model(model, deviceName)); - ASSERT_NO_THROW(value = compiledModel.get_property(ov::execution_devices)); + OV_ASSERT_NO_THROW(compiledModel = ie.compile_model(model, deviceName)); + OV_ASSERT_NO_THROW(value = compiledModel.get_property(ov::execution_devices)); ASSERT_EQ(value.as(), "CPU"); } diff --git a/src/plugins/intel_cpu/tests/functional/custom/behavior/ov_plugin/properties.cpp b/src/plugins/intel_cpu/tests/functional/custom/behavior/ov_plugin/properties.cpp index 38d4f1a38a1516..904d2b81dc05b6 100644 --- a/src/plugins/intel_cpu/tests/functional/custom/behavior/ov_plugin/properties.cpp +++ b/src/plugins/intel_cpu/tests/functional/custom/behavior/ov_plugin/properties.cpp @@ -60,7 +60,7 @@ TEST_F(OVClassConfigTestCPU, smoke_PluginAllSupportedPropertiesAreAvailable) { ov::Core ie; std::vector supportedProperties; - ASSERT_NO_THROW(supportedProperties = ie.get_property("CPU", ov::supported_properties)); + OV_ASSERT_NO_THROW(supportedProperties = ie.get_property("CPU", ov::supported_properties)); // the order of supported properties does not matter, sort to simplify the comparison std::sort(expectedSupportedProperties.begin(), expectedSupportedProperties.end()); std::sort(supportedProperties.begin(), supportedProperties.end()); @@ -72,10 +72,10 @@ TEST_F(OVClassConfigTestCPU, smoke_PluginGetPropertiesDoesNotThrow) { ov::Core ie; std::vector properties; - ASSERT_NO_THROW(properties = ie.get_property("CPU", ov::supported_properties)); + OV_ASSERT_NO_THROW(properties = ie.get_property("CPU", ov::supported_properties)); for (const auto& property : properties) { - ASSERT_NO_THROW((void)ie.get_property("CPU", property)); + OV_ASSERT_NO_THROW((void)ie.get_property("CPU", property)); } } @@ -83,7 +83,7 @@ TEST_F(OVClassConfigTestCPU, smoke_PluginSetROPropertiesThrow) { ov::Core ie; std::vector properties; - ASSERT_NO_THROW(properties = ie.get_property("CPU", ov::supported_properties)); + OV_ASSERT_NO_THROW(properties = ie.get_property("CPU", ov::supported_properties)); for (const auto& property : properties) { if (property.is_mutable()) @@ -98,14 +98,14 @@ TEST_F(OVClassConfigTestCPU, smoke_PluginSetConfigInferenceNumThreads) { int32_t value = 0; int32_t num_threads = 1; - ASSERT_NO_THROW(ie.set_property("CPU", ov::inference_num_threads(num_threads))); - ASSERT_NO_THROW(value = ie.get_property("CPU", ov::inference_num_threads)); + OV_ASSERT_NO_THROW(ie.set_property("CPU", ov::inference_num_threads(num_threads))); + OV_ASSERT_NO_THROW(value = ie.get_property("CPU", ov::inference_num_threads)); ASSERT_EQ(num_threads, value); num_threads = 4; - ASSERT_NO_THROW(ie.set_property("CPU", ov::inference_num_threads(num_threads))); - ASSERT_NO_THROW(value = ie.get_property("CPU", ov::inference_num_threads)); + OV_ASSERT_NO_THROW(ie.set_property("CPU", ov::inference_num_threads(num_threads))); + OV_ASSERT_NO_THROW(value = ie.get_property("CPU", ov::inference_num_threads)); ASSERT_EQ(num_threads, value); } @@ -114,14 +114,14 @@ TEST_F(OVClassConfigTestCPU, smoke_PluginSetConfigModelDistributionPolicy) { std::set value = {}; std::set model_policy = {ov::hint::ModelDistributionPolicy::TENSOR_PARALLEL}; - ASSERT_NO_THROW(ie.set_property("CPU", ov::hint::model_distribution_policy(model_policy))); - ASSERT_NO_THROW(value = ie.get_property("CPU", ov::hint::model_distribution_policy)); + OV_ASSERT_NO_THROW(ie.set_property("CPU", ov::hint::model_distribution_policy(model_policy))); + OV_ASSERT_NO_THROW(value = ie.get_property("CPU", ov::hint::model_distribution_policy)); ASSERT_EQ(model_policy, value); model_policy = {}; - ASSERT_NO_THROW(ie.set_property("CPU", ov::hint::model_distribution_policy(model_policy))); - ASSERT_NO_THROW(value = ie.get_property("CPU", ov::hint::model_distribution_policy)); + OV_ASSERT_NO_THROW(ie.set_property("CPU", ov::hint::model_distribution_policy(model_policy))); + OV_ASSERT_NO_THROW(value = ie.get_property("CPU", ov::hint::model_distribution_policy)); ASSERT_EQ(model_policy, value); } @@ -131,8 +131,8 @@ TEST_F(OVClassConfigTestCPU, smoke_PluginSetConfigStreamsNum) { int32_t num_streams = 1; auto setGetProperty = [&ie](int32_t& getProperty, int32_t setProperty){ - ASSERT_NO_THROW(ie.set_property("CPU", ov::num_streams(setProperty))); - ASSERT_NO_THROW(getProperty = ie.get_property("CPU", ov::num_streams)); + OV_ASSERT_NO_THROW(ie.set_property("CPU", ov::num_streams(setProperty))); + OV_ASSERT_NO_THROW(getProperty = ie.get_property("CPU", ov::num_streams)); }; setGetProperty(value, num_streams); @@ -167,13 +167,14 @@ TEST_F(OVClassConfigTestCPU, smoke_PluginSetConfigAffinity) { if (coreTypes.size() > 1) { defaultBindThreadParameter = ov::Affinity::HYBRID_AWARE; } - ASSERT_NO_THROW(value = ie.get_property("CPU", ov::affinity)); + + OV_ASSERT_NO_THROW(value = ie.get_property("CPU", ov::affinity)); ASSERT_EQ(defaultBindThreadParameter, value); const ov::Affinity affinity = defaultBindThreadParameter == ov::Affinity::HYBRID_AWARE ? ov::Affinity::NUMA : ov::Affinity::HYBRID_AWARE; - ASSERT_NO_THROW(ie.set_property("CPU", ov::affinity(affinity))); - ASSERT_NO_THROW(value = ie.get_property("CPU", ov::affinity)); + OV_ASSERT_NO_THROW(ie.set_property("CPU", ov::affinity(affinity))); + OV_ASSERT_NO_THROW(value = ie.get_property("CPU", ov::affinity)); #if defined(__APPLE__) ASSERT_EQ(ov::Affinity::NUMA, value); #else @@ -186,8 +187,8 @@ TEST_F(OVClassConfigTestCPU, smoke_PluginSetConfigAffinityCore) { ov::Affinity affinity = ov::Affinity::CORE; bool value = false; - ASSERT_NO_THROW(ie.set_property("CPU", ov::affinity(affinity))); - ASSERT_NO_THROW(value = ie.get_property("CPU", ov::hint::enable_cpu_pinning)); + OV_ASSERT_NO_THROW(ie.set_property("CPU", ov::affinity(affinity))); + OV_ASSERT_NO_THROW(value = ie.get_property("CPU", ov::hint::enable_cpu_pinning)); #if defined(__APPLE__) ASSERT_EQ(false, value); #else @@ -195,8 +196,8 @@ TEST_F(OVClassConfigTestCPU, smoke_PluginSetConfigAffinityCore) { #endif affinity = ov::Affinity::HYBRID_AWARE; - ASSERT_NO_THROW(ie.set_property("CPU", ov::affinity(affinity))); - ASSERT_NO_THROW(value = ie.get_property("CPU", ov::hint::enable_cpu_pinning)); + OV_ASSERT_NO_THROW(ie.set_property("CPU", ov::affinity(affinity))); + OV_ASSERT_NO_THROW(value = ie.get_property("CPU", ov::hint::enable_cpu_pinning)); #if defined(__APPLE__) ASSERT_EQ(false, value); #else @@ -204,8 +205,8 @@ TEST_F(OVClassConfigTestCPU, smoke_PluginSetConfigAffinityCore) { #endif affinity = ov::Affinity::NUMA; - ASSERT_NO_THROW(ie.set_property("CPU", ov::affinity(affinity))); - ASSERT_NO_THROW(value = ie.get_property("CPU", ov::hint::enable_cpu_pinning)); + OV_ASSERT_NO_THROW(ie.set_property("CPU", ov::affinity(affinity))); + OV_ASSERT_NO_THROW(value = ie.get_property("CPU", ov::hint::enable_cpu_pinning)); ASSERT_EQ(false, value); } @@ -219,18 +220,18 @@ TEST_F(OVClassConfigTestCPU, smoke_PluginSetConfigHintInferencePrecision) { ov::Core ie; auto value = ov::element::f32; - ASSERT_NO_THROW(value = ie.get_property("CPU", ov::hint::inference_precision)); + OV_ASSERT_NO_THROW(value = ie.get_property("CPU", ov::hint::inference_precision)); ASSERT_EQ(expected_precision_for_performance_mode, value); const auto forcedPrecision = ov::element::f32; - ASSERT_NO_THROW(ie.set_property("CPU", ov::hint::inference_precision(forcedPrecision))); - ASSERT_NO_THROW(value = ie.get_property("CPU", ov::hint::inference_precision)); + OV_ASSERT_NO_THROW(ie.set_property("CPU", ov::hint::inference_precision(forcedPrecision))); + OV_ASSERT_NO_THROW(value = ie.get_property("CPU", ov::hint::inference_precision)); ASSERT_EQ(value, forcedPrecision); const auto forced_precision_deprecated = ov::element::f32; - ASSERT_NO_THROW(ie.set_property("CPU", ov::hint::inference_precision(forced_precision_deprecated))); - ASSERT_NO_THROW(value = ie.get_property("CPU", ov::hint::inference_precision)); + OV_ASSERT_NO_THROW(ie.set_property("CPU", ov::hint::inference_precision(forced_precision_deprecated))); + OV_ASSERT_NO_THROW(value = ie.get_property("CPU", ov::hint::inference_precision)); ASSERT_EQ(value, forced_precision_deprecated); } @@ -239,13 +240,13 @@ TEST_F(OVClassConfigTestCPU, smoke_PluginSetConfigEnableProfiling) { auto value = false; const bool enableProfilingDefault = false; - ASSERT_NO_THROW(value = ie.get_property("CPU", ov::enable_profiling)); + OV_ASSERT_NO_THROW(value = ie.get_property("CPU", ov::enable_profiling)); ASSERT_EQ(enableProfilingDefault, value); const bool enableProfiling = true; - ASSERT_NO_THROW(ie.set_property("CPU", ov::enable_profiling(enableProfiling))); - ASSERT_NO_THROW(value = ie.get_property("CPU", ov::enable_profiling)); + OV_ASSERT_NO_THROW(ie.set_property("CPU", ov::enable_profiling(enableProfiling))); + OV_ASSERT_NO_THROW(value = ie.get_property("CPU", ov::enable_profiling)); ASSERT_EQ(enableProfiling, value); } @@ -267,9 +268,9 @@ TEST_F(OVClassConfigTestCPU, smoke_PluginSetConfigExecutionModeExpectCorrespondi auto inference_precision_value = ov::element::undefined; // check default values - ASSERT_NO_THROW(inference_precision_value = ie.get_property("CPU", ov::hint::inference_precision)); + OV_ASSERT_NO_THROW(inference_precision_value = ie.get_property("CPU", ov::hint::inference_precision)); ASSERT_EQ(inference_precision_value, inference_precision_default); - ASSERT_NO_THROW(execution_mode_value = ie.get_property("CPU", ov::hint::execution_mode)); + OV_ASSERT_NO_THROW(execution_mode_value = ie.get_property("CPU", ov::hint::execution_mode)); ASSERT_EQ(execution_mode_value, execution_mode_default); for (const auto& m : expectedTypeByMode) { @@ -277,11 +278,11 @@ TEST_F(OVClassConfigTestCPU, smoke_PluginSetConfigExecutionModeExpectCorrespondi const auto execution_mode_expected = m.second.first; const auto inference_precision_expected = m.second.second; - ASSERT_NO_THROW(ie.set_property("CPU", ov::hint::execution_mode(execution_mode))); - ASSERT_NO_THROW(execution_mode_value = ie.get_property("CPU", ov::hint::execution_mode)); + OV_ASSERT_NO_THROW(ie.set_property("CPU", ov::hint::execution_mode(execution_mode))); + OV_ASSERT_NO_THROW(execution_mode_value = ie.get_property("CPU", ov::hint::execution_mode)); ASSERT_EQ(execution_mode_value, execution_mode_expected); - ASSERT_NO_THROW(inference_precision_value = ie.get_property("CPU", ov::hint::inference_precision)); + OV_ASSERT_NO_THROW(inference_precision_value = ie.get_property("CPU", ov::hint::inference_precision)); ASSERT_EQ(inference_precision_value, inference_precision_expected); } } @@ -293,13 +294,13 @@ TEST_F(OVClassConfigTestCPU, smoke_PluginSetConfigExecutionModeAndInferencePreci auto expect_execution_mode = [&](const ov::hint::ExecutionMode expected_value) { auto execution_mode_value = ov::hint::ExecutionMode::ACCURACY; - ASSERT_NO_THROW(execution_mode_value = ie.get_property("CPU", ov::hint::execution_mode)); + OV_ASSERT_NO_THROW(execution_mode_value = ie.get_property("CPU", ov::hint::execution_mode)); ASSERT_EQ(execution_mode_value, expected_value); }; auto expect_inference_precision = [&](const ov::element::Type expected_value) { auto inference_precision_value = ov::element::undefined;; - ASSERT_NO_THROW(inference_precision_value = ie.get_property("CPU", ov::hint::inference_precision)); + OV_ASSERT_NO_THROW(inference_precision_value = ie.get_property("CPU", ov::hint::inference_precision)); ASSERT_EQ(inference_precision_value, expected_value); }; @@ -307,13 +308,13 @@ TEST_F(OVClassConfigTestCPU, smoke_PluginSetConfigExecutionModeAndInferencePreci expect_execution_mode(execution_mode_default); expect_inference_precision(inference_precision_default); // verify that conflicting property values work as expect - ASSERT_NO_THROW(ie.set_property("CPU", ov::hint::execution_mode(ov::hint::ExecutionMode::PERFORMANCE))); - ASSERT_NO_THROW(ie.set_property("CPU", ov::hint::inference_precision(ov::element::f32))); + OV_ASSERT_NO_THROW(ie.set_property("CPU", ov::hint::execution_mode(ov::hint::ExecutionMode::PERFORMANCE))); + OV_ASSERT_NO_THROW(ie.set_property("CPU", ov::hint::inference_precision(ov::element::f32))); expect_execution_mode(ov::hint::ExecutionMode::PERFORMANCE); // inference_preicision does not affect execution_mode property itself expect_inference_precision(ov::element::f32); // inference_preicision has more priority than performance mode - ASSERT_NO_THROW(ie.set_property("CPU", ov::hint::execution_mode(ov::hint::ExecutionMode::ACCURACY))); - ASSERT_NO_THROW(ie.set_property("CPU", ov::hint::inference_precision(bf16_if_can_be_emulated))); + OV_ASSERT_NO_THROW(ie.set_property("CPU", ov::hint::execution_mode(ov::hint::ExecutionMode::ACCURACY))); + OV_ASSERT_NO_THROW(ie.set_property("CPU", ov::hint::inference_precision(bf16_if_can_be_emulated))); expect_execution_mode(ov::hint::ExecutionMode::ACCURACY); expect_inference_precision(bf16_if_can_be_emulated); } @@ -322,7 +323,7 @@ TEST_F(OVClassConfigTestCPU, smoke_PluginSetConfigLogLevel) { ov::Core ie; //check default value ov::Any value; - ASSERT_NO_THROW(value = ie.get_property("CPU", ov::log::level)); + OV_ASSERT_NO_THROW(value = ie.get_property("CPU", ov::log::level)); ASSERT_EQ(value.as(), ov::log::Level::NO); //check set and get @@ -335,8 +336,8 @@ TEST_F(OVClassConfigTestCPU, smoke_PluginSetConfigLogLevel) { ov::log::Level::TRACE}; for (unsigned int i = 0; i < logLevels.size(); i++) { - ASSERT_NO_THROW(ie.set_property("CPU", ov::log::level(logLevels[i]))); - ASSERT_NO_THROW(value = ie.get_property("CPU", ov::log::level)); + OV_ASSERT_NO_THROW(ie.set_property("CPU", ov::log::level(logLevels[i]))); + OV_ASSERT_NO_THROW(value = ie.get_property("CPU", ov::log::level)); ASSERT_EQ(value.as(), logLevels[i]); } @@ -353,7 +354,7 @@ TEST_F(OVClassConfigTestCPU, smoke_PluginCheckCPUExecutionDevice) { ov::Core ie; ov::Any value; - ASSERT_NO_THROW(value = ie.get_property("CPU", ov::execution_devices)); + OV_ASSERT_NO_THROW(value = ie.get_property("CPU", ov::execution_devices)); ASSERT_EQ(value.as(), "CPU"); } @@ -361,7 +362,7 @@ TEST_F(OVClassConfigTestCPU, smoke_PluginCheckCPUDeviceType) { ov::Core ie; ov::Any value; - ASSERT_NO_THROW(value = ie.get_property("CPU", ov::device::type)); + OV_ASSERT_NO_THROW(value = ie.get_property("CPU", ov::device::type)); ASSERT_EQ(value.as(), ov::device::Type::INTEGRATED); } @@ -369,7 +370,7 @@ TEST_F(OVClassConfigTestCPU, smoke_PluginCheckCPUDeviceArchitecture) { ov::Core ie; ov::Any value; - ASSERT_NO_THROW(value = ie.get_property("CPU", ov::device::architecture)); + OV_ASSERT_NO_THROW(value = ie.get_property("CPU", ov::device::architecture)); #if defined(OPENVINO_ARCH_X86_64) ASSERT_EQ(value.as(), "intel64"); diff --git a/src/plugins/intel_cpu/tests/functional/custom/subgraph_tests/src/fuse_transpose_reorder.cpp b/src/plugins/intel_cpu/tests/functional/custom/subgraph_tests/src/fuse_transpose_reorder.cpp index 3450844ae06c5e..d2a848792e355e 100644 --- a/src/plugins/intel_cpu/tests/functional/custom/subgraph_tests/src/fuse_transpose_reorder.cpp +++ b/src/plugins/intel_cpu/tests/functional/custom/subgraph_tests/src/fuse_transpose_reorder.cpp @@ -410,7 +410,7 @@ TEST(smoke_Basic, FuseDynamicTransposeAndReorderTest) { model = p.build(); auto core = ov::Core(); - ASSERT_NO_THROW(core.compile_model(model, "CPU")); + OV_ASSERT_NO_THROW(core.compile_model(model, "CPU")); } } // namespace test diff --git a/src/plugins/intel_cpu/tests/functional/utils/properties_test.hpp b/src/plugins/intel_cpu/tests/functional/utils/properties_test.hpp index b7dd89c763ca16..dfca40ff936de5 100644 --- a/src/plugins/intel_cpu/tests/functional/utils/properties_test.hpp +++ b/src/plugins/intel_cpu/tests/functional/utils/properties_test.hpp @@ -8,6 +8,7 @@ #include "openvino/runtime/compiled_model.hpp" #include "functional_test_utils/skip_tests_config.hpp" #include "common_test_utils/subgraph_builders/conv_pool_relu.hpp" +#include "common_test_utils/test_assertions.hpp" class OVClassConfigTestCPU : public ::testing::Test { public: diff --git a/src/plugins/intel_cpu/tests/unit/cpu_tensor_test.cpp b/src/plugins/intel_cpu/tests/unit/cpu_tensor_test.cpp index e2dd8bf8a4878f..76794b56531bd8 100644 --- a/src/plugins/intel_cpu/tests/unit/cpu_tensor_test.cpp +++ b/src/plugins/intel_cpu/tests/unit/cpu_tensor_test.cpp @@ -17,6 +17,7 @@ #include "cpu_memory.h" #include "cpu_tensor.h" #include "openvino/runtime/itensor.hpp" +#include "common_test_utils/test_assertions.hpp" using namespace ov::intel_cpu; @@ -211,7 +212,7 @@ TEST_F(CPUTensorTest, canSetShape) { const void* orig_data = t->data(); ASSERT_EQ(t->get_shape(), ov_origShape); - ASSERT_NO_THROW(t->set_shape(ov_newShape)); + OV_ASSERT_NO_THROW(t->set_shape(ov_newShape)); ASSERT_EQ(ov_newShape, t->get_shape()); ASSERT_EQ(byte_strides(ov::row_major_strides(ov_newShape), t->get_element_type()), t->get_strides()); ASSERT_NE(orig_data, t->data()); diff --git a/src/plugins/intel_cpu/tests/unit/cpu_tensor_test_ext.cpp b/src/plugins/intel_cpu/tests/unit/cpu_tensor_test_ext.cpp index f297fc277ff70f..2a5903fee2ffb5 100644 --- a/src/plugins/intel_cpu/tests/unit/cpu_tensor_test_ext.cpp +++ b/src/plugins/intel_cpu/tests/unit/cpu_tensor_test_ext.cpp @@ -17,6 +17,7 @@ #include "cpu_memory.h" #include "cpu_tensor.h" #include "openvino/runtime/itensor.hpp" +#include "common_test_utils/test_assertions.hpp" using namespace ov::intel_cpu; @@ -73,7 +74,7 @@ TEST_F(CPUTensorExtTest, canSetShape) { const void* orig_data = t->data(); ASSERT_EQ(t->get_shape(), origShape); - ASSERT_NO_THROW(t->set_shape({4, 5, 6})); + OV_ASSERT_NO_THROW(t->set_shape({4, 5, 6})); ASSERT_EQ(newShape, t->get_shape()); ASSERT_EQ(byteStrides(ov::row_major_strides(newShape), t->get_element_type()), t->get_strides()); ASSERT_NE(orig_data, t->data()); @@ -109,14 +110,14 @@ TEST_F(CPUTensorExtTest, canCreateTensorWithDynamicShape) { std::shared_ptr t; // construct with memory with dynamic shape - ASSERT_NO_THROW(t = std::make_shared(create_memory(ov::element::f32, shape))); + OV_ASSERT_NO_THROW(t = std::make_shared(create_memory(ov::element::f32, shape))); ASSERT_THROW(t->get_shape(), ov::Exception); ASSERT_THROW(t->get_strides(), ov::Exception); // change memory to dynamic shape { auto memptr = create_memory(ov::element::f32, {4, 3, 2}); - ASSERT_NO_THROW(t = std::make_shared(memptr)); + OV_ASSERT_NO_THROW(t = std::make_shared(memptr)); ov::PartialShape pshape{{1, 10}, 3, 2}; CpuBlockedMemoryDescPtr desc2 = std::make_shared(ov::element::f32, Shape(pshape)); @@ -127,10 +128,10 @@ TEST_F(CPUTensorExtTest, canCreateTensorWithDynamicShape) { // set_shape const ov::Shape newShape({4, 0, 2}); - ASSERT_NO_THROW(t = std::make_shared(create_memory(ov::element::f32, {4, 3, 2}))); + OV_ASSERT_NO_THROW(t = std::make_shared(create_memory(ov::element::f32, {4, 3, 2}))); const void* orig_data = t->data(); - ASSERT_NO_THROW(t->set_shape({4, 0, 2})); + OV_ASSERT_NO_THROW(t->set_shape({4, 0, 2})); ASSERT_EQ(newShape, t->get_shape()); ASSERT_EQ(ov::Strides({0, 0, 0}), t->get_strides()); ASSERT_EQ(orig_data, t->data()); diff --git a/src/plugins/intel_cpu/tests/unit/dnnl_memory_desc_test.cpp b/src/plugins/intel_cpu/tests/unit/dnnl_memory_desc_test.cpp index 44d4a1067faea0..2ffbe330fbb9a7 100644 --- a/src/plugins/intel_cpu/tests/unit/dnnl_memory_desc_test.cpp +++ b/src/plugins/intel_cpu/tests/unit/dnnl_memory_desc_test.cpp @@ -11,6 +11,7 @@ #include "nodes/common/blocked_desc_creator.h" #include #include "memory_desc/dnnl_blocked_memory_desc.h" +#include "common_test_utils/test_assertions.hpp" using namespace ov::intel_cpu; using namespace testing; @@ -321,7 +322,7 @@ TEST(MakeUndefinedDnnlDesc, checkRank) { ov::intel_cpu::Shape pluginShapeRightRank(ov::PartialShape{{-1, -1}, {-1, -1}, {-1, -1}, {-1, -1}}); MemoryDescPtr memDesc; - ASSERT_NO_THROW(memDesc = DnnlExtensionUtils::makeUndefinedDesc(origin, pluginShapeRightRank)); + OV_ASSERT_NO_THROW(memDesc = DnnlExtensionUtils::makeUndefinedDesc(origin, pluginShapeRightRank)); ASSERT_FALSE(memDesc->isDefined()); } @@ -342,7 +343,7 @@ TEST(MakeUndefinedDnnlDesc, checkDims) { auto partialShape = fullyUndef; partialShape[i] = {dims[i]}; MemoryDescPtr memDesc; - ASSERT_NO_THROW(memDesc = DnnlExtensionUtils::makeUndefinedDesc(origin, ov::intel_cpu::Shape(fullyUndef))); + OV_ASSERT_NO_THROW(memDesc = DnnlExtensionUtils::makeUndefinedDesc(origin, ov::intel_cpu::Shape(fullyUndef))); ASSERT_FALSE(memDesc->isDefined()); } } @@ -439,7 +440,7 @@ TEST(makeDummyDesc, LowerBoundMoreThanDummyValue) { ASSERT_FALSE(desc->isDefined()); MemoryDescPtr definedDesc; - ASSERT_NO_THROW(definedDesc = MemoryDescUtils::makeDummyDesc(*desc)); + OV_ASSERT_NO_THROW(definedDesc = MemoryDescUtils::makeDummyDesc(*desc)); ASSERT_TRUE(definedDesc->isDefined()); ASSERT_EQ((VectorDims{1, 3, 85, 144}), definedDesc->getShape().getStaticDims()); diff --git a/src/plugins/intel_cpu/tests/unit/dnnl_memory_test.cpp b/src/plugins/intel_cpu/tests/unit/dnnl_memory_test.cpp index 83fa9e31849c48..c73db6c8a28df8 100644 --- a/src/plugins/intel_cpu/tests/unit/dnnl_memory_test.cpp +++ b/src/plugins/intel_cpu/tests/unit/dnnl_memory_test.cpp @@ -9,6 +9,7 @@ #include "cpu_memory.h" #include "memory_desc/cpu_blocked_memory_desc.h" +#include "common_test_utils/test_assertions.hpp" using namespace ov::intel_cpu; @@ -82,20 +83,20 @@ TEST(StaticMemoryTest, UnsupportedDnnlPrecision) { const dnnl::engine eng(dnnl::engine::kind::cpu, 0); CpuBlockedMemoryDesc memDescSupportedPrc(ov::element::f32, {5, 4, 7, 10}); MemoryPtr testMemory; - ASSERT_NO_THROW(testMemory = std::make_shared(eng, memDescSupportedPrc)); + OV_ASSERT_NO_THROW(testMemory = std::make_shared(eng, memDescSupportedPrc)); ASSERT_TRUE(testMemory->isAllocated()); dnnl::memory dnnl_memory; void* raw_data_ptr = nullptr; - ASSERT_NO_THROW(raw_data_ptr = testMemory->getData()); + OV_ASSERT_NO_THROW(raw_data_ptr = testMemory->getData()); ASSERT_FALSE(nullptr == raw_data_ptr); - ASSERT_NO_THROW(dnnl_memory = testMemory->getPrimitive()); + OV_ASSERT_NO_THROW(dnnl_memory = testMemory->getPrimitive()); ASSERT_TRUE(dnnl_memory); CpuBlockedMemoryDesc memDescUnSupportedPrc(ov::element::i64, {5, 4, 7, 10}); - ASSERT_NO_THROW(testMemory = std::make_shared(eng, memDescUnSupportedPrc)); + OV_ASSERT_NO_THROW(testMemory = std::make_shared(eng, memDescUnSupportedPrc)); ASSERT_TRUE(testMemory->isAllocated()); raw_data_ptr = nullptr; - ASSERT_NO_THROW(raw_data_ptr = testMemory->getData()); + OV_ASSERT_NO_THROW(raw_data_ptr = testMemory->getData()); ASSERT_FALSE(nullptr == raw_data_ptr); dnnl_memory = dnnl::memory(); ASSERT_THROW(dnnl_memory = testMemory->getPrimitive(), ov::Exception); diff --git a/src/plugins/intel_cpu/tests/unit/gemm_api_test.cpp b/src/plugins/intel_cpu/tests/unit/gemm_api_test.cpp index 65d7dc7eb85838..ca748ea1504633 100644 --- a/src/plugins/intel_cpu/tests/unit/gemm_api_test.cpp +++ b/src/plugins/intel_cpu/tests/unit/gemm_api_test.cpp @@ -9,12 +9,13 @@ #include "cpu_memory.h" #include "openvino/core/parallel.hpp" #include "openvino/runtime/aligned_buffer.hpp" +#include "common_test_utils/test_assertions.hpp" // This test is used to test whether mlas gemm lib compiles successfully TEST(MLASGemmTests, getPackedSize) { int N = 51864; int K = 384; - ASSERT_NO_THROW(ov::intel_cpu::mlas_sgemm_pack_get_size(N, K)); + OV_ASSERT_NO_THROW(ov::intel_cpu::mlas_sgemm_pack_get_size(N, K)); } // Test mlas thread partition with even/odd threads TEST(MLASGemmTests, simpleGemm) { @@ -29,11 +30,11 @@ TEST(MLASGemmTests, simpleGemm) { float* bData = reinterpret_cast(alignedB.get_ptr()); std::vector cData(M * N, 0.0f); - ASSERT_NO_THROW( + OV_ASSERT_NO_THROW( ov::intel_cpu:: mlas_sgemm_compute("N", "T", M, N, K, 1.0f, aData.data(), K, bData, N, 0.0f, cData.data(), N, nullptr, nthr)); - ASSERT_NO_THROW( + OV_ASSERT_NO_THROW( ov::intel_cpu:: mlas_sgemm_compute("N", "T", M, N, K, 1.0f, aData.data(), K, bData, N, 0.0f, cData.data(), N, nullptr, nthr - 1)); } \ No newline at end of file diff --git a/src/plugins/intel_cpu/tests/unit/registers_pool.cpp b/src/plugins/intel_cpu/tests/unit/registers_pool.cpp index d69144a9e6fe38..63ab9e3589ce88 100644 --- a/src/plugins/intel_cpu/tests/unit/registers_pool.cpp +++ b/src/plugins/intel_cpu/tests/unit/registers_pool.cpp @@ -4,6 +4,7 @@ #include #include "nodes/kernels/x64/registers_pool.hpp" +#include "common_test_utils/test_assertions.hpp" #include "common/nstl.hpp" using namespace ov::intel_cpu; @@ -33,7 +34,7 @@ TYPED_TEST_P(RegPoolTest, get_return_by_scope) { ASSERT_EQ(regPool->countFree(), this->regNumber); { RegistersPool::Reg reg{regPool}; - ASSERT_NO_THROW([[maybe_unused]] auto val = static_cast(reg)); + OV_ASSERT_NO_THROW([[maybe_unused]] auto val = static_cast(reg)); ASSERT_EQ(regPool->countFree(), this->regNumber - 1); } ASSERT_EQ(regPool->countFree(), this->regNumber); @@ -44,13 +45,13 @@ TYPED_TEST_P(RegPoolTest, get_return_by_method) { RegistersPool::Ptr regPool = RegistersPool::create({}); ASSERT_EQ(regPool->countFree(), this->regNumber); RegistersPool::Reg reg{regPool}; - ASSERT_NO_THROW([[maybe_unused]] auto val = static_cast(reg)); + OV_ASSERT_NO_THROW([[maybe_unused]] auto val = static_cast(reg)); ASSERT_EQ(regPool->countFree(), this->regNumber - 1); reg.release(); ASSERT_ANY_THROW([[maybe_unused]] auto val = static_cast(reg)); ASSERT_EQ(regPool->countFree(), this->regNumber); reg = RegistersPool::Reg{regPool}; - ASSERT_NO_THROW([[maybe_unused]] auto val = static_cast(reg)); + OV_ASSERT_NO_THROW([[maybe_unused]] auto val = static_cast(reg)); ASSERT_EQ(regPool->countFree(), this->regNumber - 1); } @@ -80,7 +81,7 @@ TYPED_TEST_P(RegPoolTest, move) { using XbyakRegT = typename TypeParam::RegT; RegistersPool::Ptr regPool = RegistersPool::create({}); RegistersPool::Reg reg{regPool}; - ASSERT_NO_THROW([[maybe_unused]] auto val = static_cast(reg)); + OV_ASSERT_NO_THROW([[maybe_unused]] auto val = static_cast(reg)); ASSERT_EQ(regPool->countFree(), this->regNumber - 1); RegistersPool::Reg reg2{regPool}; ASSERT_EQ(regPool->countFree(), this->regNumber - 2); @@ -89,7 +90,7 @@ TYPED_TEST_P(RegPoolTest, move) { ASSERT_EQ(reg2.getIdx(), regIdx); ASSERT_EQ(regPool->countFree(), this->regNumber - 1); ASSERT_ANY_THROW([[maybe_unused]] auto val = static_cast(reg)); - ASSERT_NO_THROW([[maybe_unused]] auto val = static_cast(reg2)); + OV_ASSERT_NO_THROW([[maybe_unused]] auto val = static_cast(reg2)); ASSERT_EQ(regPool->countFree(), this->regNumber - 1); } @@ -106,7 +107,7 @@ TYPED_TEST_P(RegPoolTest, fixed_idx) { } regs[0]->release(); ASSERT_ANY_THROW(RegistersPool::Reg(regPool, 1)); - ASSERT_NO_THROW(RegistersPool::Reg(regPool, 0)); + OV_ASSERT_NO_THROW(RegistersPool::Reg(regPool, 0)); } TYPED_TEST_P(RegPoolTest, exclude) { @@ -213,7 +214,7 @@ TEST(RegistersPoolTests, simd_and_general) { ASSERT_EQ(regPool->countFree(), freeGeneralRegNumber); { RegistersPool::Reg reg{regPool}; - ASSERT_NO_THROW([[maybe_unused]] auto val = static_cast(reg)); + OV_ASSERT_NO_THROW([[maybe_unused]] auto val = static_cast(reg)); ASSERT_EQ(regPool->countFree(), simdRegNumber - 1); ASSERT_EQ(regPool->countFree(), simdRegNumber - 1); ASSERT_EQ(regPool->countFree(), freeGeneralRegNumber); diff --git a/src/plugins/intel_cpu/tests/unit/rt_cache.cpp b/src/plugins/intel_cpu/tests/unit/rt_cache.cpp index 8a978e4758ddc1..3c3f5da8a0478c 100644 --- a/src/plugins/intel_cpu/tests/unit/rt_cache.cpp +++ b/src/plugins/intel_cpu/tests/unit/rt_cache.cpp @@ -9,6 +9,7 @@ #include "cache/lru_cache.h" #include "cache/multi_cache.h" +#include "common_test_utils/test_assertions.hpp" using namespace ov::intel_cpu; @@ -29,20 +30,20 @@ TEST(LruCacheTests, Evict) { constexpr size_t capacity = 10; LruCache cache(capacity); for (size_t i = 0; i < 2 * capacity; ++i) { - ASSERT_NO_THROW(cache.put({10}, 10)); + OV_ASSERT_NO_THROW(cache.put({10}, 10)); } - ASSERT_NO_THROW(cache.evict(5)); - ASSERT_NO_THROW(cache.evict(10)); + OV_ASSERT_NO_THROW(cache.evict(5)); + OV_ASSERT_NO_THROW(cache.evict(10)); int result = cache.get({10}); ASSERT_EQ(result, int()); - ASSERT_NO_THROW(cache.evict(0)); + OV_ASSERT_NO_THROW(cache.evict(0)); } TEST(LruCacheTests, Put) { constexpr size_t capacity = 10; LruCache cache(capacity); for (size_t i = 0; i < 2 * capacity; ++i) { - ASSERT_NO_THROW(cache.put({10}, 10)); + OV_ASSERT_NO_THROW(cache.put({10}, 10)); } ASSERT_EQ(cache.get({10}), 10); @@ -52,7 +53,7 @@ TEST(LruCacheTests, Get) { constexpr int capacity = 10; LruCache cache(capacity); for (int i = 1; i < 2 * capacity; ++i) { - ASSERT_NO_THROW(cache.put({i}, i)); + OV_ASSERT_NO_THROW(cache.put({i}, i)); } for (int i = 1; i < capacity; ++i) { @@ -68,7 +69,7 @@ TEST(LruCacheTests, LruPolicy) { constexpr int capacity = 10; LruCache cache(capacity); for (int i = 1; i < capacity; ++i) { - ASSERT_NO_THROW(cache.put({i}, i)); + OV_ASSERT_NO_THROW(cache.put({i}, i)); } for (int i = 4; i < capacity; ++i) { @@ -76,7 +77,7 @@ TEST(LruCacheTests, LruPolicy) { } for (int i = 21; i < 25; ++i) { - ASSERT_NO_THROW(cache.put({i}, i)); + OV_ASSERT_NO_THROW(cache.put({i}, i)); } for (int i = 1; i < 4; ++i) { @@ -89,7 +90,7 @@ TEST(LruCacheTests, Empty) { constexpr int attempts = 10; LruCache cache(capacity); for (int i = 1; i < attempts; ++i) { - ASSERT_NO_THROW(cache.put({i}, i)); + OV_ASSERT_NO_THROW(cache.put({i}, i)); } for (int i = 1; i < attempts; ++i) { diff --git a/src/plugins/intel_gpu/tests/functional/behavior/infer_request.cpp b/src/plugins/intel_gpu/tests/functional/behavior/infer_request.cpp index 2a29775c6ebd87..d82384f1eb8366 100644 --- a/src/plugins/intel_gpu/tests/functional/behavior/infer_request.cpp +++ b/src/plugins/intel_gpu/tests/functional/behavior/infer_request.cpp @@ -100,13 +100,13 @@ TEST(TensorTest, smoke_canSetShapeForPreallocatedTensor) { // Check set_shape call for pre-allocated input/output tensors auto input_tensor = inf_req.get_input_tensor(0); - ASSERT_NO_THROW(input_tensor.set_shape({1, 4, 20, 20})); - ASSERT_NO_THROW(input_tensor.set_shape({1, 3, 20, 20})); - ASSERT_NO_THROW(input_tensor.set_shape({2, 3, 20, 20})); + OV_ASSERT_NO_THROW(input_tensor.set_shape({1, 4, 20, 20})); + OV_ASSERT_NO_THROW(input_tensor.set_shape({1, 3, 20, 20})); + OV_ASSERT_NO_THROW(input_tensor.set_shape({2, 3, 20, 20})); auto output_tensor = inf_req.get_output_tensor(0); - ASSERT_NO_THROW(output_tensor.set_shape({1, 10, 12, 12})); - ASSERT_NO_THROW(output_tensor.set_shape({1, 10, 10, 10})); - ASSERT_NO_THROW(output_tensor.set_shape({2, 10, 20, 20})); + OV_ASSERT_NO_THROW(output_tensor.set_shape({1, 10, 12, 12})); + OV_ASSERT_NO_THROW(output_tensor.set_shape({1, 10, 10, 10})); + OV_ASSERT_NO_THROW(output_tensor.set_shape({2, 10, 20, 20})); } TEST(TensorTest, smoke_canSetScalarTensor) { @@ -131,7 +131,7 @@ TEST(TensorTest, smoke_canSetScalarTensor) { double real_data = 1.0; ov::Tensor input_data(ov::element::f64, {}, &real_data); request.set_tensor("scalar1", input_data); - ASSERT_NO_THROW(request.infer()); + OV_ASSERT_NO_THROW(request.infer()); } TEST(TensorTest, smoke_canSetTensorForDynamicInput) { @@ -152,23 +152,23 @@ TEST(TensorTest, smoke_canSetTensorForDynamicInput) { ov::Tensor t3(ov::element::i8, {1, 4, 40, 40}); // Check set_shape call for pre-allocated input/output tensors - ASSERT_NO_THROW(inf_req.set_input_tensor(t1)); - ASSERT_NO_THROW(inf_req.infer()); + OV_ASSERT_NO_THROW(inf_req.set_input_tensor(t1)); + OV_ASSERT_NO_THROW(inf_req.infer()); - ASSERT_NO_THROW(inf_req.set_input_tensor(t2)); - ASSERT_NO_THROW(inf_req.infer()); + OV_ASSERT_NO_THROW(inf_req.set_input_tensor(t2)); + OV_ASSERT_NO_THROW(inf_req.infer()); - ASSERT_NO_THROW(inf_req.set_input_tensor(t3)); - ASSERT_NO_THROW(inf_req.infer()); + OV_ASSERT_NO_THROW(inf_req.set_input_tensor(t3)); + OV_ASSERT_NO_THROW(inf_req.infer()); - ASSERT_NO_THROW(inf_req.set_input_tensor(t3)); - ASSERT_NO_THROW(inf_req.infer()); + OV_ASSERT_NO_THROW(inf_req.set_input_tensor(t3)); + OV_ASSERT_NO_THROW(inf_req.infer()); - ASSERT_NO_THROW(inf_req.set_input_tensor(t1)); - ASSERT_NO_THROW(inf_req.infer()); + OV_ASSERT_NO_THROW(inf_req.set_input_tensor(t1)); + OV_ASSERT_NO_THROW(inf_req.infer()); - ASSERT_NO_THROW(inf_req.set_input_tensor(t2)); - ASSERT_NO_THROW(inf_req.infer()); + OV_ASSERT_NO_THROW(inf_req.set_input_tensor(t2)); + OV_ASSERT_NO_THROW(inf_req.infer()); } TEST(TensorTest, smoke_canSetTensorForDynamicOutput) { @@ -189,9 +189,9 @@ TEST(TensorTest, smoke_canSetTensorForDynamicOutput) { ov::Tensor t2(out_tensor.get_element_type(), out_tensor.get_shape()); ASSERT_EQ(t2.get_byte_size(), 0); // Check set_shape call for pre-allocated input/output tensors - ASSERT_NO_THROW(inf_req.set_input_tensor(t1)); - ASSERT_NO_THROW(inf_req.set_output_tensor(t2)); - ASSERT_NO_THROW(inf_req.infer()); + OV_ASSERT_NO_THROW(inf_req.set_input_tensor(t1)); + OV_ASSERT_NO_THROW(inf_req.set_output_tensor(t2)); + OV_ASSERT_NO_THROW(inf_req.infer()); ASSERT_NE(t2.get_byte_size(), 0); } @@ -210,11 +210,11 @@ TEST(TensorTest, smoke_canReallocateDeviceInputForHostTensor) { ov::Tensor host_tensor(input.get_element_type(), input.get_shape()); // Infer with pre-allocated input tensor - ASSERT_NO_THROW(inf_req.infer()); + OV_ASSERT_NO_THROW(inf_req.infer()); // Infer with host_tensor - ASSERT_NO_THROW(inf_req.set_input_tensor(host_tensor)); - ASSERT_NO_THROW(inf_req.infer()); + OV_ASSERT_NO_THROW(inf_req.set_input_tensor(host_tensor)); + OV_ASSERT_NO_THROW(inf_req.infer()); } TEST(VariablesTest, smoke_canSetStateTensor) { @@ -236,7 +236,7 @@ TEST(VariablesTest, smoke_canSetStateTensor) { auto default_state_tensor = variable.get_state(); ASSERT_EQ(default_state_tensor.get_shape(), virable_shape); - ASSERT_NO_THROW(request.infer()); + OV_ASSERT_NO_THROW(request.infer()); } TEST(VariablesTest, smoke_set_get_state_with_convert) { @@ -357,16 +357,16 @@ TEST(TensorTest, smoke_outputTensorShapesForDynamicInput) { const ov::Shape output3_shape = {1, 10, 12, 32}; // Check output shape of output tensor is correct - ASSERT_NO_THROW(inf_req.set_input_tensor(t1)); - ASSERT_NO_THROW(inf_req.infer()); + OV_ASSERT_NO_THROW(inf_req.set_input_tensor(t1)); + OV_ASSERT_NO_THROW(inf_req.infer()); ASSERT_EQ(inf_req.get_output_tensor().get_shape(), output1_shape); - ASSERT_NO_THROW(inf_req.set_input_tensor(t2)); - ASSERT_NO_THROW(inf_req.infer()); + OV_ASSERT_NO_THROW(inf_req.set_input_tensor(t2)); + OV_ASSERT_NO_THROW(inf_req.infer()); ASSERT_EQ(inf_req.get_output_tensor().get_shape(), output2_shape); - ASSERT_NO_THROW(inf_req.set_input_tensor(t3)); - ASSERT_NO_THROW(inf_req.infer()); + OV_ASSERT_NO_THROW(inf_req.set_input_tensor(t3)); + OV_ASSERT_NO_THROW(inf_req.infer()); ASSERT_EQ(inf_req.get_output_tensor().get_shape(), output3_shape); } } // namespace diff --git a/src/plugins/intel_gpu/tests/functional/behavior/inference_precision.cpp b/src/plugins/intel_gpu/tests/functional/behavior/inference_precision.cpp index cbb60f1799881b..59dd465294123d 100644 --- a/src/plugins/intel_gpu/tests/functional/behavior/inference_precision.cpp +++ b/src/plugins/intel_gpu/tests/functional/behavior/inference_precision.cpp @@ -48,7 +48,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_GPU_BehaviorTests, InferencePrecisionTests, ::tes TEST(InferencePrecisionTests, CantSetInvalidInferencePrecision) { ov::Core core; - ASSERT_NO_THROW(core.get_property(ov::test::utils::DEVICE_GPU, ov::hint::inference_precision)); + OV_ASSERT_NO_THROW(core.get_property(ov::test::utils::DEVICE_GPU, ov::hint::inference_precision)); ASSERT_ANY_THROW(core.set_property(ov::test::utils::DEVICE_GPU, ov::hint::inference_precision(ov::element::bf16))); } diff --git a/src/plugins/intel_gpu/tests/functional/concurrency/gpu_concurrency_tests.cpp b/src/plugins/intel_gpu/tests/functional/concurrency/gpu_concurrency_tests.cpp index 53db3c5b8ad3f6..554c9918af08af 100644 --- a/src/plugins/intel_gpu/tests/functional/concurrency/gpu_concurrency_tests.cpp +++ b/src/plugins/intel_gpu/tests/functional/concurrency/gpu_concurrency_tests.cpp @@ -235,7 +235,7 @@ TEST(smoke_InferRequestDeviceMemoryAllocation, usmHostIsNotChanged) { // Use tensor from infer request #2 as an output for infer request #1 infer_request1.set_output_tensor(output_tensor2); - ASSERT_NO_THROW(infer_request1.infer()); + OV_ASSERT_NO_THROW(infer_request1.infer()); // Modify tensor somehow and save as a reference values ov::test::utils::fill_tensor_random(output_tensor2); @@ -245,7 +245,7 @@ TEST(smoke_InferRequestDeviceMemoryAllocation, usmHostIsNotChanged) { // Perform second infer() call with a system host memory tensor infer_request1.set_output_tensor(output_tensor1); - ASSERT_NO_THROW(infer_request1.infer()); + OV_ASSERT_NO_THROW(infer_request1.infer()); // Expect that output_tensor2 will not change it's data after infer() call ov::test::utils::compare(ref_tensor, output_tensor2, 1e-4); @@ -267,11 +267,11 @@ TEST(smoke_InferRequestDeviceMemoryAllocation, canSetSystemHostTensor) { auto output_tensor2 = infer_request2.get_output_tensor(); infer_request1.set_output_tensor(output_tensor2); - ASSERT_NO_THROW(infer_request1.infer()); + OV_ASSERT_NO_THROW(infer_request1.infer()); ov::test::utils::fill_tensor_random(input_tensor1, 10, 0, 1, 1); infer_request1.set_output_tensor(output_tensor1); - ASSERT_NO_THROW(infer_request1.infer()); + OV_ASSERT_NO_THROW(infer_request1.infer()); } TEST(canSwapTensorsBetweenInferRequests, outputs) { diff --git a/src/plugins/intel_gpu/tests/functional/remote_tensor_tests/dx11_remote_ctx_test.cpp b/src/plugins/intel_gpu/tests/functional/remote_tensor_tests/dx11_remote_ctx_test.cpp index 83dbd79eb3aca0..1dd39c16d7aca7 100644 --- a/src/plugins/intel_gpu/tests/functional/remote_tensor_tests/dx11_remote_ctx_test.cpp +++ b/src/plugins/intel_gpu/tests/functional/remote_tensor_tests/dx11_remote_ctx_test.cpp @@ -113,7 +113,7 @@ struct DX11CachedTexture_Test : DX11RemoteCtx_Test { void SetUp() override { DX11RemoteCtx_Test::SetUp(); ASSERT_FALSE(intel_adapters.empty()); - ASSERT_NO_THROW(std::tie(device_ptr, ctx_ptr) = + OV_ASSERT_NO_THROW(std::tie(device_ptr, ctx_ptr) = create_device_with_ctx(*intel_adapters.begin())); // create textures @@ -183,7 +183,7 @@ struct DX11CachedTexture_Test : DX11RemoteCtx_Test { request.set_tensor(param_input_y, tensor.first); request.set_tensor(param_input_uv, tensor.second); - ASSERT_NO_THROW(request.infer()); + OV_ASSERT_NO_THROW(request.infer()); auto output_tensor = request.get_tensor(model->get_results().at(0)); } @@ -204,7 +204,7 @@ TEST_F(DX11RemoteCtx_Test, smoke_make_shared_context) { CComPtr device_ptr; CComPtr ctx_ptr; - ASSERT_NO_THROW(std::tie(device_ptr, ctx_ptr) = + OV_ASSERT_NO_THROW(std::tie(device_ptr, ctx_ptr) = create_device_with_ctx(intel_adapters[0])); auto gpu_context = core.get_default_context("GPU").as(); @@ -215,7 +215,7 @@ TEST_F(DX11RemoteCtx_Test, smoke_make_shared_context) { CComPtr device_ptr; CComPtr ctx_ptr; - ASSERT_NO_THROW(std::tie(device_ptr, ctx_ptr) = + OV_ASSERT_NO_THROW(std::tie(device_ptr, ctx_ptr) = create_device_with_ctx(adapter)); ASSERT_THROW(ov::intel_gpu::ocl::D3DContext gpu_context(core, device_ptr), std::runtime_error); @@ -231,7 +231,7 @@ TEST_F(DX11CachedTexture_Test, smoke_make_shared_nv12_tensor_cached) { const size_t total_run_number = 4; for (size_t i = 0; i < total_run_number; i++) { for (const auto& t : dx11_textures) { - ASSERT_NO_THROW(auto tensor = context.create_tensor_nv12(texture_description.Height, texture_description.Width, t)); + OV_ASSERT_NO_THROW(auto tensor = context.create_tensor_nv12(texture_description.Height, texture_description.Width, t)); } } } diff --git a/src/plugins/intel_gpu/tests/functional/remote_tensor_tests/gpu_remote_tensor_tests.cpp b/src/plugins/intel_gpu/tests/functional/remote_tensor_tests/gpu_remote_tensor_tests.cpp index fe84ae1d8f69e6..17168e792adf78 100644 --- a/src/plugins/intel_gpu/tests/functional/remote_tensor_tests/gpu_remote_tensor_tests.cpp +++ b/src/plugins/intel_gpu/tests/functional/remote_tensor_tests/gpu_remote_tensor_tests.cpp @@ -306,7 +306,7 @@ TEST_P(OVRemoteTensorInputBlob_Test, smoke_canInputRemoteTensor) { case RemoteTensorSharingType::PLUGIN_HOST_TENSOR: { auto cldnn_tensor = cldnn_context.create_host_tensor(input->get_element_type(), input_shape); { - ASSERT_NO_THROW(cldnn_tensor.data()); + OV_ASSERT_NO_THROW(cldnn_tensor.data()); void* shared_buffer = cldnn_tensor.data(); if (ocl_instance->supports_usm()) { ASSERT_EQ(ocl_instance->get_allocation_type(shared_buffer), CL_MEM_TYPE_HOST_INTEL); @@ -328,8 +328,8 @@ TEST_P(OVRemoteTensorInputBlob_Test, smoke_canInputRemoteTensor) { { ASSERT_EQ(output->get_element_type(), ov::element::f32); ASSERT_EQ(output_tensor_regular.get_size(), output_tensor_shared.get_size()); - ASSERT_NO_THROW(output_tensor_regular.data()); - ASSERT_NO_THROW(output_tensor_shared.data()); + OV_ASSERT_NO_THROW(output_tensor_regular.data()); + OV_ASSERT_NO_THROW(output_tensor_shared.data()); ov::test::utils::compare(output_tensor_regular, output_tensor_shared); } } @@ -571,7 +571,7 @@ TEST_P(OVRemoteTensorInputBlob_Test, smoke_canInputOutputRemoteTensor) { auto input_tensor = gpu_context.create_host_tensor(input->get_element_type(), input_shape); auto output_tensor = gpu_context.create_host_tensor(output->get_output_element_type(0), allocated_out_shape); { - ASSERT_NO_THROW(input_tensor.data()); + OV_ASSERT_NO_THROW(input_tensor.data()); void* shared_buffer = input_tensor.data(); if (ocl_instance->supports_usm()) { ASSERT_EQ(ocl_instance->get_allocation_type(shared_buffer), CL_MEM_TYPE_HOST_INTEL); @@ -598,8 +598,8 @@ TEST_P(OVRemoteTensorInputBlob_Test, smoke_canInputOutputRemoteTensor) { { ASSERT_EQ(output->get_element_type(), ov::element::f32); ASSERT_EQ(output_tensor_regular.get_size(), output_tensor_shared.get_size()); - ASSERT_NO_THROW(output_tensor_regular.data()); - ASSERT_NO_THROW(output_tensor_shared.data()); + OV_ASSERT_NO_THROW(output_tensor_regular.data()); + OV_ASSERT_NO_THROW(output_tensor_shared.data()); ov::test::utils::compare(output_tensor_regular, output_tensor_shared); } } @@ -674,7 +674,7 @@ TEST(OVRemoteTensorTests, smoke_MixedTensorTypes) { auto input_tensor = gpu_context.create_tensor(input->get_element_type(), input_shape); infer_request.set_tensor(input, input_tensor); - ASSERT_NO_THROW(infer_request.infer()); + OV_ASSERT_NO_THROW(infer_request.infer()); ASSERT_EQ(infer_request.get_output_tensor().get_shape(), output_shape_actual); } @@ -809,8 +809,8 @@ class OVRemoteTensor_TestsWithContext : public OVRemoteTensor_Test, public testi { ASSERT_EQ(output->get_element_type(), ov::element::f32); ASSERT_EQ(output_tensor_regular.get_size(), output_tensor_shared.get_size()); - ASSERT_NO_THROW(output_tensor_regular.data()); - ASSERT_NO_THROW(output_tensor_shared.data()); + OV_ASSERT_NO_THROW(output_tensor_regular.data()); + OV_ASSERT_NO_THROW(output_tensor_shared.data()); ov::test::utils::compare(output_tensor_regular, output_tensor_shared); } @@ -872,8 +872,8 @@ class OVRemoteTensor_TestsWithContext : public OVRemoteTensor_Test, public testi { ASSERT_EQ(output->get_element_type(), ov::element::f32); ASSERT_EQ(output_tensor_regular.get_size(), output_tensor_shared.get_size()); - ASSERT_NO_THROW(output_tensor_regular.data()); - ASSERT_NO_THROW(output_tensor_shared.data()); + OV_ASSERT_NO_THROW(output_tensor_regular.data()); + OV_ASSERT_NO_THROW(output_tensor_shared.data()); ov::test::utils::compare(output_tensor_regular, output_tensor_shared); } @@ -967,7 +967,7 @@ class OVRemoteTensor_TestsWithContext : public OVRemoteTensor_Test, public testi { ASSERT_EQ(output->get_element_type(), ov::element::f32); ASSERT_EQ(output_tensor_regular.get_size(), out_tensor.get_size()); - ASSERT_NO_THROW(output_tensor_regular.data()); + OV_ASSERT_NO_THROW(output_tensor_regular.data()); ov::test::utils::compare(output_tensor_regular, out_tensor); } @@ -1057,7 +1057,7 @@ class OVRemoteTensor_TestsWithContext : public OVRemoteTensor_Test, public testi { ASSERT_EQ(output->get_element_type(), ov::element::f32); ASSERT_EQ(output_tensor_regular.get_size(), out_tensor.get_size()); - ASSERT_NO_THROW(output_tensor_regular.data()); + OV_ASSERT_NO_THROW(output_tensor_regular.data()); ov::test::utils::compare(output_tensor_regular, out_tensor); } @@ -1148,7 +1148,7 @@ class OVRemoteTensor_TestsWithContext : public OVRemoteTensor_Test, public testi { ASSERT_EQ(output->get_element_type(), ov::element::f32); ASSERT_EQ(output_tensor_regular.get_size(), out_tensor.get_size()); - ASSERT_NO_THROW(output_tensor_regular.data()); + OV_ASSERT_NO_THROW(output_tensor_regular.data()); ov::test::utils::compare(output_tensor_regular, out_tensor); } @@ -1356,8 +1356,8 @@ TEST_F(OVRemoteTensor_Test, NV12toGray) { // ------------------------------------------------------ // compare results ASSERT_EQ(output_tensor_regular.get_size(), output_tensor_shared.get_size()); - ASSERT_NO_THROW(output_tensor_regular.data()); - ASSERT_NO_THROW(output_tensor_shared.data()); + OV_ASSERT_NO_THROW(output_tensor_regular.data()); + OV_ASSERT_NO_THROW(output_tensor_shared.data()); float thr = 0.1f; ov::test::utils::compare(output_tensor_shared, output_tensor_regular, thr); } @@ -1471,8 +1471,8 @@ TEST_F(OVRemoteTensor_Test, NV12toBGR_image_ConvertTranspose) { // ------------------------------------------------------ // compare results ASSERT_EQ(output_tensor_regular.get_size(), output_tensor_shared.get_size()); - ASSERT_NO_THROW(output_tensor_regular.data()); - ASSERT_NO_THROW(output_tensor_shared.data()); + OV_ASSERT_NO_THROW(output_tensor_regular.data()); + OV_ASSERT_NO_THROW(output_tensor_shared.data()); float thr = 0.1f; ov::test::utils::compare(output_tensor_shared, output_tensor_regular, thr); } @@ -1564,8 +1564,8 @@ TEST_F(OVRemoteTensor_Test, NV12toBGR_image_single_plane) { // ------------------------------------------------------ // compare results ASSERT_EQ(output_tensor_regular.get_size(), output_tensor_shared.get_size()); - ASSERT_NO_THROW(output_tensor_regular.data()); - ASSERT_NO_THROW(output_tensor_shared.data()); + OV_ASSERT_NO_THROW(output_tensor_regular.data()); + OV_ASSERT_NO_THROW(output_tensor_shared.data()); float thr = 0.1f; ov::test::utils::compare(output_tensor_shared, output_tensor_regular, thr); } @@ -1677,8 +1677,8 @@ TEST_F(OVRemoteTensor_Test, NV12toBGR_image_two_planes) { // ------------------------------------------------------ // compare results ASSERT_EQ(output_tensor_regular.get_size(), output_tensor_shared.get_size()); - ASSERT_NO_THROW(output_tensor_regular.data()); - ASSERT_NO_THROW(output_tensor_shared.data()); + OV_ASSERT_NO_THROW(output_tensor_regular.data()); + OV_ASSERT_NO_THROW(output_tensor_shared.data()); float thr = 0.1f; ov::test::utils::compare(output_tensor_shared, output_tensor_regular, thr); } @@ -1767,8 +1767,8 @@ TEST_F(OVRemoteTensor_Test, NV12toBGR_buffer) { // ------------------------------------------------------ // compare results ASSERT_EQ(output_tensor_regular.get_size(), out_tensor.get_size()); - ASSERT_NO_THROW(output_tensor_regular.data()); - ASSERT_NO_THROW(out_tensor.data()); + OV_ASSERT_NO_THROW(output_tensor_regular.data()); + OV_ASSERT_NO_THROW(out_tensor.data()); float thr = 0.1f; ov::test::utils::compare(out_tensor, output_tensor_regular, thr); } @@ -1864,7 +1864,7 @@ TEST_P(OVRemoteTensorBatched_Test, NV12toBGR_image_single_plane) { } auto output_tensor_shared = inf_req_remote.get_tensor(function->get_results().at(0)); - ASSERT_NO_THROW(output_tensor_shared.data()); + OV_ASSERT_NO_THROW(output_tensor_shared.data()); // ------------------------------------------------------ // regular inference @@ -1993,7 +1993,7 @@ TEST_P(OVRemoteTensorBatched_Test, NV12toBGR_image_two_planes) { } auto output_tensor_shared = inf_req_remote.get_tensor(function->get_results().at(0)); - ASSERT_NO_THROW(output_tensor_shared.data()); + OV_ASSERT_NO_THROW(output_tensor_shared.data()); // ------------------------------------------------------ // regular inference @@ -2121,7 +2121,7 @@ TEST_P(OVRemoteTensorBatched_Test, NV12toGray) { } auto output_tensor_shared = inf_req_remote.get_tensor(function->get_results().at(0)); - ASSERT_NO_THROW(output_tensor_shared.data()); + OV_ASSERT_NO_THROW(output_tensor_shared.data()); // ------------------------------------------------------ // regular inference @@ -2238,7 +2238,7 @@ TEST_P(OVRemoteTensorBatched_Test, NV12toBGR_buffer) { inf_req_shared.start_async(); ocl_instance->_queue.enqueueReadBuffer(shared_output_buffer, false, 0, out_size, out_tensor.data(), nullptr, nullptr); ocl_instance->_queue.finish(); - ASSERT_NO_THROW(out_tensor.data()); + OV_ASSERT_NO_THROW(out_tensor.data()); // ------------------------------------------------------ // inference using the same InferRequest but with new data @@ -2275,7 +2275,7 @@ TEST_P(OVRemoteTensorBatched_Test, NV12toBGR_buffer) { inf_req_shared.start_async(); ocl_instance->_queue.enqueueReadBuffer(shared_output_buffer_new, false, 0, out_size, out_tensor_new.data(), nullptr, nullptr); ocl_instance->_queue.finish(); - ASSERT_NO_THROW(out_tensor_new.data()); + OV_ASSERT_NO_THROW(out_tensor_new.data()); // ------------------------------------------------------ // regular inference @@ -2491,7 +2491,7 @@ TEST(OVRemoteContextGPU, smoke_RemoteTensorSetShape) { auto remote_tensor = context.create_tensor(ov::element::f32, ov::Shape{1, 2, 3, 4}); - ASSERT_NO_THROW(remote_tensor.set_shape({2, 3, 4, 5})); - ASSERT_NO_THROW(remote_tensor.set_shape({1, 3, 4, 5})); - ASSERT_NO_THROW(remote_tensor.set_shape({3, 3, 4, 5})); + OV_ASSERT_NO_THROW(remote_tensor.set_shape({2, 3, 4, 5})); + OV_ASSERT_NO_THROW(remote_tensor.set_shape({1, 3, 4, 5})); + OV_ASSERT_NO_THROW(remote_tensor.set_shape({3, 3, 4, 5})); } diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_plugin/properties_tests.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_plugin/properties_tests.cpp index c1d6324727a9ef..7e28a9b23c6a4d 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_plugin/properties_tests.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_plugin/properties_tests.cpp @@ -101,7 +101,7 @@ TEST_P(OVGetMetricPropsTest_GPU_DEVICE_TOTAL_MEM_SIZE, GetMetricAndPrintNoThrow) ov::Core ie; ov::Any p; - ASSERT_NO_THROW(p = ie.get_property(target_device, ov::intel_gpu::device_total_mem_size.name())); + OV_ASSERT_NO_THROW(p = ie.get_property(target_device, ov::intel_gpu::device_total_mem_size.name())); auto t = p.as(); std::cout << "GPU device total memory size: " << t << std::endl; @@ -118,7 +118,7 @@ TEST_P(OVGetMetricPropsTest_GPU_UARCH_VERSION, GetMetricAndPrintNoThrow) { ov::Core ie; ov::Any p; - ASSERT_NO_THROW(p = ie.get_property(target_device, ov::intel_gpu::uarch_version.name())); + OV_ASSERT_NO_THROW(p = ie.get_property(target_device, ov::intel_gpu::uarch_version.name())); auto t = p.as(); std::cout << "GPU device uarch: " << t << std::endl; @@ -134,7 +134,7 @@ TEST_P(OVGetMetricPropsTest_GPU_EXECUTION_UNITS_COUNT, GetMetricAndPrintNoThrow) ov::Core ie; ov::Any p; - ASSERT_NO_THROW(p = ie.get_property(target_device, ov::intel_gpu::execution_units_count.name())); + OV_ASSERT_NO_THROW(p = ie.get_property(target_device, ov::intel_gpu::execution_units_count.name())); auto t = p.as(); std::cout << "GPU EUs count: " << t << std::endl; @@ -151,7 +151,7 @@ TEST_P(OVClassGetPropertyTest_GPU, GetMetricAvailableDevicesAndPrintNoThrow) { ov::Core ie; std::vector properties; - ASSERT_NO_THROW(properties = ie.get_property(target_device, ov::available_devices)); + OV_ASSERT_NO_THROW(properties = ie.get_property(target_device, ov::available_devices)); std::cout << "AVAILABLE_DEVICES: "; for (const auto& prop : properties) { @@ -166,7 +166,7 @@ TEST_P(OVClassGetPropertyTest_GPU, GetMetricRangeForAsyncInferRequestsAndPrintNo ov::Core ie; std::tuple property; - ASSERT_NO_THROW(property = ie.get_property(target_device, ov::range_for_async_infer_requests)); + OV_ASSERT_NO_THROW(property = ie.get_property(target_device, ov::range_for_async_infer_requests)); std::cout << "RANGE_FOR_ASYNC_INFER_REQUESTS: " << std::get<0>(property) << " " << std::get<1>(property) << " " << std::get<2>(property) << std::endl; @@ -178,7 +178,7 @@ TEST_P(OVClassGetPropertyTest_GPU, GetMetricRangeForStreamsAndPrintNoThrow) { ov::Core ie; std::tuple property; - ASSERT_NO_THROW(property = ie.get_property(target_device, ov::range_for_streams)); + OV_ASSERT_NO_THROW(property = ie.get_property(target_device, ov::range_for_streams)); std::cout << "RANGE_FOR_STREAMS: " << std::get<0>(property) << " " << std::get<1>(property) << std::endl; @@ -189,7 +189,7 @@ TEST_P(OVClassGetPropertyTest_GPU, GetMetricOptimalBatchSizeAndPrintNoThrow) { ov::Core ie; unsigned int property = 0; - ASSERT_NO_THROW(property = ie.get_property(target_device, ov::optimal_batch_size)); + OV_ASSERT_NO_THROW(property = ie.get_property(target_device, ov::optimal_batch_size)); std::cout << "OPTIMAL_BATCH_SIZE: " << property << std::endl; @@ -200,7 +200,7 @@ TEST_P(OVClassGetPropertyTest_GPU, GetMetricFullNameAndPrintNoThrow) { ov::Core ie; std::string property; - ASSERT_NO_THROW(property = ie.get_property(target_device, ov::device::full_name)); + OV_ASSERT_NO_THROW(property = ie.get_property(target_device, ov::device::full_name)); std::cout << "FULL_DEVICE_NAME: " << property << std::endl; @@ -211,7 +211,7 @@ TEST_P(OVClassGetPropertyTest_GPU, GetMetricTypeAndPrintNoThrow) { ov::Core ie; ov::device::Type property = ov::device::Type::INTEGRATED; - ASSERT_NO_THROW(property = ie.get_property(target_device, ov::device::type)); + OV_ASSERT_NO_THROW(property = ie.get_property(target_device, ov::device::type)); std::cout << "DEVICE_TYPE: " << property << std::endl; @@ -222,7 +222,7 @@ TEST_P(OVClassGetPropertyTest_GPU, GetMetricGopsAndPrintNoThrow) { ov::Core ie; std::map properties; - ASSERT_NO_THROW(properties = ie.get_property(target_device, ov::device::gops)); + OV_ASSERT_NO_THROW(properties = ie.get_property(target_device, ov::device::gops)); std::cout << "DEVICE_GOPS: " << std::endl; for (const auto& prop : properties) { @@ -236,7 +236,7 @@ TEST_P(OVClassGetPropertyTest_GPU, GetMetricCapabilitiesAndPrintNoThrow) { ov::Core ie; std::vector properties; - ASSERT_NO_THROW(properties = ie.get_property(target_device, ov::device::capabilities)); + OV_ASSERT_NO_THROW(properties = ie.get_property(target_device, ov::device::capabilities)); std::cout << "OPTIMIZATION_CAPABILITIES: " << std::endl; for (const auto& prop : properties) { @@ -250,7 +250,7 @@ TEST_P(OVClassGetPropertyTest_GPU, GetMetricDeviceTotalMemSizeAndPrintNoThrow) { ov::Core ie; uint64_t property = 0; - ASSERT_NO_THROW(property = ie.get_property(target_device, ov::intel_gpu::device_total_mem_size)); + OV_ASSERT_NO_THROW(property = ie.get_property(target_device, ov::intel_gpu::device_total_mem_size)); std::cout << "GPU_DEVICE_TOTAL_MEM_SIZE: " << property << std::endl; @@ -261,7 +261,7 @@ TEST_P(OVClassGetPropertyTest_GPU, GetMetricUarchVersionAndPrintNoThrow) { ov::Core ie; std::string property; - ASSERT_NO_THROW(property = ie.get_property(target_device, ov::intel_gpu::uarch_version)); + OV_ASSERT_NO_THROW(property = ie.get_property(target_device, ov::intel_gpu::uarch_version)); std::cout << "GPU_UARCH_VERSION: " << property << std::endl; @@ -272,7 +272,7 @@ TEST_P(OVClassGetPropertyTest_GPU, GetMetricExecutionUnitsCountAndPrintNoThrow) ov::Core ie; int32_t property = 0; - ASSERT_NO_THROW(property = ie.get_property(target_device, ov::intel_gpu::execution_units_count)); + OV_ASSERT_NO_THROW(property = ie.get_property(target_device, ov::intel_gpu::execution_units_count)); std::cout << "GPU_EXECUTION_UNITS_COUNT: " << property << std::endl; @@ -283,7 +283,7 @@ TEST_P(OVClassGetPropertyTest_GPU, GetMetricMemoryStatisticsAndPrintNoThrow) { ov::Core ie; std::map properties; - ASSERT_NO_THROW(properties = ie.get_property(target_device, ov::intel_gpu::memory_statistics)); + OV_ASSERT_NO_THROW(properties = ie.get_property(target_device, ov::intel_gpu::memory_statistics)); std::cout << "GPU_MEMORY_STATISTICS: " << std::endl; for (const auto& prop : properties) { @@ -297,7 +297,7 @@ TEST_P(OVClassGetPropertyTest_GPU, GetAndSetPerformanceModeNoThrow) { ov::Core ie; ov::hint::PerformanceMode defaultMode{}; - ASSERT_NO_THROW(defaultMode = ie.get_property(target_device, ov::hint::performance_mode)); + OV_ASSERT_NO_THROW(defaultMode = ie.get_property(target_device, ov::hint::performance_mode)); std::cout << "Default PERFORMANCE_HINT: \"" << defaultMode << "\"" << std::endl; @@ -313,7 +313,7 @@ TEST_P(OVClassGetPropertyTest_GPU, GetAndSetEnableProfilingNoThrow) { ov::Core ie; bool defaultValue = false; - ASSERT_NO_THROW(defaultValue = ie.get_property(target_device, ov::enable_profiling)); + OV_ASSERT_NO_THROW(defaultValue = ie.get_property(target_device, ov::enable_profiling)); std::cout << "Default PERF_COUNT: " << defaultValue << std::endl; @@ -347,7 +347,7 @@ TEST_P(OVClassGetPropertyTest_GPU, GetAndSetModelPriorityNoThrow) { ov::Core ie; ov::hint::Priority defaultValue{}; - ASSERT_NO_THROW(defaultValue = ie.get_property(target_device, ov::hint::model_priority)); + OV_ASSERT_NO_THROW(defaultValue = ie.get_property(target_device, ov::hint::model_priority)); std::cout << "Default model_priority: " << defaultValue << std::endl; @@ -370,7 +370,7 @@ TEST_P(OVClassGetPropertyTest_GPU, GetAndSetQueuePriorityNoThrow) { ov::Core ie; ov::hint::Priority defaultValue{}; - ASSERT_NO_THROW(defaultValue = ie.get_property(target_device, ov::intel_gpu::hint::queue_priority)); + OV_ASSERT_NO_THROW(defaultValue = ie.get_property(target_device, ov::intel_gpu::hint::queue_priority)); std::cout << "Default GPU_QUEUE_PRIORITY: " << defaultValue << std::endl; @@ -388,7 +388,7 @@ TEST_P(OVClassGetPropertyTest_GPU, GetAndSetThrottleLevelNoThrow) { ov::Core ie; ov::intel_gpu::hint::ThrottleLevel defaultValue{}; - ASSERT_NO_THROW(defaultValue = ie.get_property(target_device, ov::intel_gpu::hint::queue_throttle)); + OV_ASSERT_NO_THROW(defaultValue = ie.get_property(target_device, ov::intel_gpu::hint::queue_throttle)); std::cout << "Default GPU_QUEUE_THROTTLE: " << defaultValue << std::endl; @@ -409,20 +409,20 @@ TEST_P(OVClassGetPropertyTest_GPU, CanSetDefaultValueBackToPluginNewAPI) { ov::Core ie; std::vector properties; - ASSERT_NO_THROW(properties = ie.get_property(target_device, ov::supported_properties)); + OV_ASSERT_NO_THROW(properties = ie.get_property(target_device, ov::supported_properties)); std::cout << "SUPPORTED_PROPERTIES:" << std::endl; for (const auto& property : properties) { ov::Any prop; if (property.is_mutable()) { std::cout << "RW: " << property << " "; - ASSERT_NO_THROW(prop = ie.get_property(target_device, property)); + OV_ASSERT_NO_THROW(prop = ie.get_property(target_device, property)); prop.print(std::cout); std::cout << std::endl; - ASSERT_NO_THROW(ie.set_property(target_device, {{property, prop}})); + OV_ASSERT_NO_THROW(ie.set_property(target_device, {{property, prop}})); } else { std::cout << "RO: " << property << " "; - ASSERT_NO_THROW(prop = ie.get_property(target_device, property)); + OV_ASSERT_NO_THROW(prop = ie.get_property(target_device, property)); prop.print(std::cout); std::cout << std::endl; } @@ -440,7 +440,7 @@ TEST_P(OVGetMetricPropsTest_GPU_OPTIMAL_BATCH_SIZE, GetMetricAndPrintNoThrow) { unsigned int p = 0; ov::AnyMap _options = {ov::hint::model(simpleNetwork)}; - ASSERT_NO_THROW(p = ie.get_property(target_device, ov::optimal_batch_size.name(), _options).as()); + OV_ASSERT_NO_THROW(p = ie.get_property(target_device, ov::optimal_batch_size.name(), _options).as()); std::cout << "GPU device optimal batch size: " << p << std::endl; @@ -458,7 +458,7 @@ TEST_P(OVGetMetricPropsTest_GPU_MAX_BATCH_SIZE_DEFAULT, GetMetricAndPrintNoThrow unsigned int p = 0; ov::AnyMap _options = {ov::hint::model(simpleNetwork)}; - ASSERT_NO_THROW(p = ie.get_property(target_device, ov::max_batch_size.name(), _options).as()); + OV_ASSERT_NO_THROW(p = ie.get_property(target_device, ov::max_batch_size.name(), _options).as()); std::cout << "GPU device max available batch size: " << p << std::endl; @@ -482,7 +482,7 @@ TEST_P(OVGetMetricPropsTest_GPU_MAX_BATCH_SIZE_STREAM_DEVICE_MEM, GetMetricAndPr ov::num_streams(n_streams), ov::intel_gpu::hint::available_device_mem(available_device_mem_size)}; - ASSERT_NO_THROW(p = ie.get_property(target_device, ov::max_batch_size.name(), _options).as()); + OV_ASSERT_NO_THROW(p = ie.get_property(target_device, ov::max_batch_size.name(), _options).as()); std::cout << "GPU device max available batch size: " << p << std::endl; @@ -501,7 +501,7 @@ TEST_P(OVGetMetricPropsTest_GPU_MEMORY_STATISTICS_DEFAULT, GetMetricAndPrintNoTh auto exec_net = ie.compile_model(simpleNetwork, target_device); - ASSERT_NO_THROW(p = ie.get_property(target_device, ov::intel_gpu::memory_statistics)); + OV_ASSERT_NO_THROW(p = ie.get_property(target_device, ov::intel_gpu::memory_statistics)); ASSERT_FALSE(p.empty()); std::cout << "Memory Statistics: " << std::endl; @@ -526,7 +526,7 @@ TEST_P(OVGetMetricPropsTest_GPU_MEMORY_STATISTICS_MULTIPLE_NETWORKS, GetMetricAn auto exec_net1 = ie.compile_model(simpleNetwork, target_device); - ASSERT_NO_THROW(t1 = ie.get_property(target_device, ov::intel_gpu::memory_statistics)); + OV_ASSERT_NO_THROW(t1 = ie.get_property(target_device, ov::intel_gpu::memory_statistics)); ASSERT_FALSE(t1.empty()); for (auto&& kv : t1) { @@ -535,7 +535,7 @@ TEST_P(OVGetMetricPropsTest_GPU_MEMORY_STATISTICS_MULTIPLE_NETWORKS, GetMetricAn auto exec_net2 = ie.compile_model(simpleNetwork, target_device); - ASSERT_NO_THROW(t2 = ie.get_property(target_device, ov::intel_gpu::memory_statistics)); + OV_ASSERT_NO_THROW(t2 = ie.get_property(target_device, ov::intel_gpu::memory_statistics)); ASSERT_FALSE(t2.empty()); for (auto&& kv : t2) { @@ -559,14 +559,14 @@ TEST_P(OVGetMetricPropsTest_GPU_MEMORY_STATISTICS_CHECK_VALUES, GetMetricAndPrin ov::Core ie; std::map t1; - ASSERT_NO_THROW(t1 = ie.get_property(target_device, ov::intel_gpu::memory_statistics)); + OV_ASSERT_NO_THROW(t1 = ie.get_property(target_device, ov::intel_gpu::memory_statistics)); ASSERT_TRUE(t1.empty()); { auto exec_net1 = ie.compile_model(simpleNetwork, target_device); std::map t2; - ASSERT_NO_THROW(t2 = ie.get_property(target_device, ov::intel_gpu::memory_statistics)); + OV_ASSERT_NO_THROW(t2 = ie.get_property(target_device, ov::intel_gpu::memory_statistics)); ASSERT_FALSE(t2.empty()); for (auto&& kv : t2) { @@ -576,7 +576,7 @@ TEST_P(OVGetMetricPropsTest_GPU_MEMORY_STATISTICS_CHECK_VALUES, GetMetricAndPrin auto exec_net2 = ie.compile_model(actualNetwork, target_device); std::map t3; - ASSERT_NO_THROW(t3 = ie.get_property(target_device, ov::intel_gpu::memory_statistics)); + OV_ASSERT_NO_THROW(t3 = ie.get_property(target_device, ov::intel_gpu::memory_statistics)); ASSERT_FALSE(t3.empty()); for (auto&& kv : t3) { @@ -584,7 +584,7 @@ TEST_P(OVGetMetricPropsTest_GPU_MEMORY_STATISTICS_CHECK_VALUES, GetMetricAndPrin } } std::map t4; - ASSERT_NO_THROW(t4 = ie.get_property(target_device, ov::intel_gpu::memory_statistics)); + OV_ASSERT_NO_THROW(t4 = ie.get_property(target_device, ov::intel_gpu::memory_statistics)); ASSERT_FALSE(t4.empty()); for (auto&& kv : t4) { @@ -598,7 +598,7 @@ TEST_P(OVGetMetricPropsTest_GPU_MEMORY_STATISTICS_CHECK_VALUES, GetMetricAndPrin } } std::map t5; - ASSERT_NO_THROW(t5 = ie.get_property(target_device, ov::intel_gpu::memory_statistics)); + OV_ASSERT_NO_THROW(t5 = ie.get_property(target_device, ov::intel_gpu::memory_statistics)); ASSERT_FALSE(t5.empty()); for (auto&& kv : t5) { @@ -631,7 +631,7 @@ TEST_P(OVGetMetricPropsTest_GPU_MEMORY_STATISTICS_MULTI_THREADS, GetMetricAndPri auto exec_net1 = ie.compile_model(simpleNetwork, target_device); - ASSERT_NO_THROW(t1 = ie.get_property(target_device, ov::intel_gpu::memory_statistics)); + OV_ASSERT_NO_THROW(t1 = ie.get_property(target_device, ov::intel_gpu::memory_statistics)); ASSERT_FALSE(t1.empty()); for (auto&& kv : t1) { @@ -651,7 +651,7 @@ TEST_P(OVGetMetricPropsTest_GPU_MEMORY_STATISTICS_MULTI_THREADS, GetMetricAndPri } } - ASSERT_NO_THROW(t2 = ie.get_property(target_device, ov::intel_gpu::memory_statistics)); + OV_ASSERT_NO_THROW(t2 = ie.get_property(target_device, ov::intel_gpu::memory_statistics)); ASSERT_FALSE(t2.empty()); for (auto&& kv : t2) { @@ -680,7 +680,7 @@ TEST_P(OVGetMetricPropsTest_CACHING_PROPERTIES, GetMetricAndPrintNoThrow) { ov::hint::execution_mode.name(), }; - ASSERT_NO_THROW(caching_properties = ie.get_property(target_device, ov::internal::caching_properties)); + OV_ASSERT_NO_THROW(caching_properties = ie.get_property(target_device, ov::internal::caching_properties)); std::cout << "GPU Caching properties: " << std::endl; for (auto& prop : caching_properties) { diff --git a/src/plugins/intel_gpu/tests/unit/module_tests/engine_test.cpp b/src/plugins/intel_gpu/tests/unit/module_tests/engine_test.cpp index 35b52b30614e09..9fc0d1eb1f061b 100644 --- a/src/plugins/intel_gpu/tests/unit/module_tests/engine_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/module_tests/engine_test.cpp @@ -18,19 +18,19 @@ TEST(engine, memory_creation) { std::shared_ptr mem = nullptr; layout layout_to_allocate = {{2, 4}, data_types::u8, format::bfyx}; - ASSERT_NO_THROW(mem = engine.allocate_memory(layout_to_allocate)); + OV_ASSERT_NO_THROW(mem = engine.allocate_memory(layout_to_allocate)); ASSERT_NE(mem, nullptr); ASSERT_EQ(mem->get_layout(), layout_to_allocate); ASSERT_TRUE(mem->is_allocated_by(engine)); - ASSERT_NO_THROW(mem = engine.allocate_memory(layout_to_allocate, allocation_type::cl_mem)); + OV_ASSERT_NO_THROW(mem = engine.allocate_memory(layout_to_allocate, allocation_type::cl_mem)); ASSERT_NE(mem, nullptr); ASSERT_EQ(mem->get_layout(), layout_to_allocate); ASSERT_NE(std::dynamic_pointer_cast(mem), nullptr); ASSERT_TRUE(mem->is_allocated_by(engine)); if (engine.supports_allocation(allocation_type::usm_host)) { - ASSERT_NO_THROW(mem = engine.allocate_memory(layout_to_allocate, allocation_type::usm_host)); + OV_ASSERT_NO_THROW(mem = engine.allocate_memory(layout_to_allocate, allocation_type::usm_host)); ASSERT_NE(mem, nullptr); ASSERT_EQ(mem->get_layout(), layout_to_allocate); ASSERT_NE(std::dynamic_pointer_cast(mem), nullptr); @@ -38,7 +38,7 @@ TEST(engine, memory_creation) { } if (engine.supports_allocation(allocation_type::usm_device)) { - ASSERT_NO_THROW(mem = engine.allocate_memory(layout_to_allocate, allocation_type::usm_device)); + OV_ASSERT_NO_THROW(mem = engine.allocate_memory(layout_to_allocate, allocation_type::usm_device)); ASSERT_NE(mem, nullptr); ASSERT_EQ(mem->get_layout(), layout_to_allocate); ASSERT_NE(std::dynamic_pointer_cast(mem), nullptr); @@ -46,7 +46,7 @@ TEST(engine, memory_creation) { } std::vector host_data(2*4); - ASSERT_NO_THROW(mem = engine.attach_memory(layout_to_allocate, host_data.data())); + OV_ASSERT_NO_THROW(mem = engine.attach_memory(layout_to_allocate, host_data.data())); ASSERT_NE(mem, nullptr); ASSERT_EQ(mem->get_layout(), layout_to_allocate); ASSERT_NE(std::dynamic_pointer_cast(mem), nullptr); diff --git a/src/plugins/intel_gpu/tests/unit/module_tests/network_test.cpp b/src/plugins/intel_gpu/tests/unit/module_tests/network_test.cpp index 2b7002f4649f7d..f0f4e710bfc5b5 100644 --- a/src/plugins/intel_gpu/tests/unit/module_tests/network_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/module_tests/network_test.cpp @@ -104,9 +104,9 @@ TEST(network_test, has_proper_event_for_in_order_queue) { auto reorder_ev = net.get_primitive_event("reorder"); auto activation_ev = net.get_primitive_event("activation2"); - ASSERT_NO_THROW(downcast(concat_ev.get())); - ASSERT_NO_THROW(downcast(reorder_ev.get())); - ASSERT_NO_THROW(downcast(activation_ev.get())); + OV_ASSERT_NO_THROW(downcast(concat_ev.get())); + OV_ASSERT_NO_THROW(downcast(reorder_ev.get())); + OV_ASSERT_NO_THROW(downcast(activation_ev.get())); // Check if we have real underlying OpenCL events ASSERT_TRUE(downcast(concat_ev.get())->get().get() != nullptr); @@ -151,10 +151,10 @@ TEST(network_test, has_proper_event_for_in_order_queue_optimized_out) { auto reorder_ev = net.get_primitive_event("reorder"); auto activation_ev = net.get_primitive_event("activation"); - ASSERT_NO_THROW(downcast(concat_ev.get())); - ASSERT_NO_THROW(downcast(reshape_ev.get())); - ASSERT_NO_THROW(downcast(reorder_ev.get())); - ASSERT_NO_THROW(downcast(activation_ev.get())); + OV_ASSERT_NO_THROW(downcast(concat_ev.get())); + OV_ASSERT_NO_THROW(downcast(reshape_ev.get())); + OV_ASSERT_NO_THROW(downcast(reorder_ev.get())); + OV_ASSERT_NO_THROW(downcast(activation_ev.get())); // Check if we have real underlying OpenCL events ASSERT_TRUE(downcast(concat_ev.get())->get().get() != nullptr); @@ -201,9 +201,9 @@ TEST(network_test, has_proper_event_for_in_order_queue_onednn) { auto reorder_ev = net.get_primitive_event("reorder"); auto activation_ev = net.get_primitive_event("activation"); - ASSERT_NO_THROW(downcast(conv_ev.get())); - ASSERT_NO_THROW(downcast(reorder_ev.get())); - ASSERT_NO_THROW(downcast(activation_ev.get())); + OV_ASSERT_NO_THROW(downcast(conv_ev.get())); + OV_ASSERT_NO_THROW(downcast(reorder_ev.get())); + OV_ASSERT_NO_THROW(downcast(activation_ev.get())); // Check if we have real underlying OpenCL events ASSERT_TRUE(downcast(conv_ev.get())->get().get() != nullptr); diff --git a/src/plugins/intel_gpu/tests/unit/module_tests/weights_reorder_factory_test.cpp b/src/plugins/intel_gpu/tests/unit/module_tests/weights_reorder_factory_test.cpp index ccd0185e4df727..cb2f1fc13d0047 100644 --- a/src/plugins/intel_gpu/tests/unit/module_tests/weights_reorder_factory_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/module_tests/weights_reorder_factory_test.cpp @@ -22,10 +22,10 @@ using namespace ::tests; TEST(weights_factory, impl_types) { program::init_primitives(); - ASSERT_NO_THROW(WeightsReordersFactory::get(impl_types::ocl, shape_types::static_shape)); - ASSERT_NO_THROW(WeightsReordersFactory::get(impl_types::any, shape_types::static_shape)); + OV_ASSERT_NO_THROW(WeightsReordersFactory::get(impl_types::ocl, shape_types::static_shape)); + OV_ASSERT_NO_THROW(WeightsReordersFactory::get(impl_types::any, shape_types::static_shape)); #ifdef ENABLE_ONEDNN_FOR_GPU - ASSERT_NO_THROW(WeightsReordersFactory::get(impl_types::onednn, shape_types::static_shape)); + OV_ASSERT_NO_THROW(WeightsReordersFactory::get(impl_types::onednn, shape_types::static_shape)); #endif // ENABLE_ONEDNN_FOR_GPU ASSERT_ANY_THROW(WeightsReordersFactory::get(impl_types::cpu, shape_types::static_shape)); @@ -33,7 +33,7 @@ TEST(weights_factory, impl_types) { TEST(weights_factory, shape_types) { program::init_primitives(); - ASSERT_NO_THROW(WeightsReordersFactory::get(impl_types::ocl, shape_types::static_shape)); + OV_ASSERT_NO_THROW(WeightsReordersFactory::get(impl_types::ocl, shape_types::static_shape)); ASSERT_ANY_THROW(WeightsReordersFactory::get(impl_types::ocl, shape_types::dynamic_shape)); } diff --git a/src/plugins/intel_gpu/tests/unit/passes/prepare_buffer_fusing_test.cpp b/src/plugins/intel_gpu/tests/unit/passes/prepare_buffer_fusing_test.cpp index 3d4668920cf0ec..84cf05fed6cfc4 100644 --- a/src/plugins/intel_gpu/tests/unit/passes/prepare_buffer_fusing_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/passes/prepare_buffer_fusing_test.cpp @@ -95,7 +95,7 @@ TEST(prepare_buffer_fusing, static_node_after_optimized_out_dyn_reshape) { prog->get_node("reorder").get_output_layout(true); program_wrapper::apply_opt_pass(*prog); program_wrapper::apply_opt_pass(*prog); - ASSERT_NO_THROW(prog->get_node("reshape")); + OV_ASSERT_NO_THROW(prog->get_node("reshape")); ASSERT_TRUE(prog->get_node("reshape").can_be_optimized()); program_wrapper::apply_opt_pass(*prog); @@ -108,7 +108,7 @@ TEST(prepare_buffer_fusing, static_node_after_optimized_out_dyn_reshape) { net.set_input_data("input", input_memory); std::map output; - ASSERT_NO_THROW(output = net.execute()); + OV_ASSERT_NO_THROW(output = net.execute()); auto out_l = net.get_output_layout("reorder"); auto out_mem = output.at("reorder").get_memory(); diff --git a/src/plugins/intel_gpu/tests/unit/passes/reorder_inputs_test.cpp b/src/plugins/intel_gpu/tests/unit/passes/reorder_inputs_test.cpp index b196701c070449..d234bc2014448a 100644 --- a/src/plugins/intel_gpu/tests/unit/passes/reorder_inputs_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/passes/reorder_inputs_test.cpp @@ -97,7 +97,7 @@ TEST(reorder_inputs, mixed_ranks_irdft) { config.set_property(ov::intel_gpu::optimize_data(true)); program::ptr prog = nullptr; - ASSERT_NO_THROW(prog = program::build_program(engine, topology, config)); + OV_ASSERT_NO_THROW(prog = program::build_program(engine, topology, config)); ASSERT_NE(prog, nullptr); auto prog_impl = prog.get(); diff --git a/src/plugins/intel_gpu/tests/unit/test_cases/condition_gpu_test.cpp b/src/plugins/intel_gpu/tests/unit/test_cases/condition_gpu_test.cpp index 38cb6c28e69036..a6bb71e1653375 100644 --- a/src/plugins/intel_gpu/tests/unit/test_cases/condition_gpu_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/test_cases/condition_gpu_test.cpp @@ -1038,6 +1038,6 @@ TEST(condition_gpu, set_empty_tensor) { net.set_input_data(empty_input_id, empty_input_mem); net.set_input_data(input_id, input_mem); - ASSERT_NO_THROW(net.execute()); - ASSERT_NO_THROW(net.get_output(cond_id).get_memory()); + OV_ASSERT_NO_THROW(net.execute()); + OV_ASSERT_NO_THROW(net.get_output(cond_id).get_memory()); } diff --git a/src/plugins/intel_gpu/tests/unit/test_utils/test_utils.h b/src/plugins/intel_gpu/tests/unit/test_utils/test_utils.h index 7af73b9a127803..5dce1b4343be97 100644 --- a/src/plugins/intel_gpu/tests/unit/test_utils/test_utils.h +++ b/src/plugins/intel_gpu/tests/unit/test_utils/test_utils.h @@ -34,6 +34,7 @@ #include #include "random_gen.h" #include "uniform_quantized_real_distribution.hpp" +#include "common_test_utils/test_assertions.hpp" #include "to_string_utils.h" #include "program_node.h" diff --git a/src/plugins/intel_gpu/tests/unit/transformations/decompose_reduce_for_false_keepdims_test.cpp b/src/plugins/intel_gpu/tests/unit/transformations/decompose_reduce_for_false_keepdims_test.cpp index 252d1d135c18cb..14586bf96bf218 100644 --- a/src/plugins/intel_gpu/tests/unit/transformations/decompose_reduce_for_false_keepdims_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/transformations/decompose_reduce_for_false_keepdims_test.cpp @@ -211,5 +211,5 @@ TEST(DecomposeReduceForFalseKeepDims, Negative) { ReduceDecomposeTests::get_transformed_function(ov::PartialShape::dynamic(), {3}, reduce_mode::max, true); ov::pass::Manager manager; manager.register_pass(); - ASSERT_NO_THROW(manager.run_passes(f)); + OV_ASSERT_NO_THROW(manager.run_passes(f)); } diff --git a/src/tests/functional/plugin/conformance/subgraphs_dumper/tests/base_test.hpp b/src/tests/functional/plugin/conformance/subgraphs_dumper/tests/base_test.hpp index 3024f76e91f214..51e4ca7472598e 100644 --- a/src/tests/functional/plugin/conformance/subgraphs_dumper/tests/base_test.hpp +++ b/src/tests/functional/plugin/conformance/subgraphs_dumper/tests/base_test.hpp @@ -7,6 +7,7 @@ #include "gtest/gtest.h" #include "matchers/subgraph/subgraph.hpp" #include "functional_test_utils/skip_tests_config.hpp" +#include "common_test_utils/test_assertions.hpp" using namespace ov::tools::subgraph_dumper; diff --git a/src/tests/functional/plugin/conformance/subgraphs_dumper/tests/cache/cache.cpp b/src/tests/functional/plugin/conformance/subgraphs_dumper/tests/cache/cache.cpp index 0eb448b106dc5e..ab3477718f224c 100644 --- a/src/tests/functional/plugin/conformance/subgraphs_dumper/tests/cache/cache.cpp +++ b/src/tests/functional/plugin/conformance/subgraphs_dumper/tests/cache/cache.cpp @@ -53,18 +53,18 @@ class ICacheUnitTest : public SubgraphsDumperBaseTest, }; TEST_F(ICacheUnitTest, set_serialization_dir) { - ASSERT_NO_THROW(this->set_serialization_dir(test_artifacts_dir)); + OV_ASSERT_NO_THROW(this->set_serialization_dir(test_artifacts_dir)); ASSERT_EQ(test_artifacts_dir, this->m_serialization_dir); } TEST_F(ICacheUnitTest, update_cache) { - ASSERT_NO_THROW(this->update_cache(test_model, test_model_path)); - ASSERT_NO_THROW(this->update_cache(test_model, test_model_path, true)); - ASSERT_NO_THROW(this->update_cache(test_model, test_model_path, false)); + OV_ASSERT_NO_THROW(this->update_cache(test_model, test_model_path)); + OV_ASSERT_NO_THROW(this->update_cache(test_model, test_model_path, true)); + OV_ASSERT_NO_THROW(this->update_cache(test_model, test_model_path, false)); } TEST_F(ICacheUnitTest, serialize_cache) { - ASSERT_NO_THROW(this->serialize_cache()); + OV_ASSERT_NO_THROW(this->serialize_cache()); } TEST_F(ICacheUnitTest, serialize_model) { @@ -96,19 +96,19 @@ TEST_F(ICacheUnitTest, serialize_model) { TEST_F(ICacheUnitTest, is_model_large_to_read) { this->mem_size = 0; - ASSERT_NO_THROW(this->is_model_large_to_read(test_model, test_model_path)); + OV_ASSERT_NO_THROW(this->is_model_large_to_read(test_model, test_model_path)); ASSERT_TRUE(this->is_model_large_to_read(test_model, test_model_path)); this->mem_size = 1 << 30; - ASSERT_NO_THROW(this->is_model_large_to_read(test_model, test_model_path)); + OV_ASSERT_NO_THROW(this->is_model_large_to_read(test_model, test_model_path)); ASSERT_FALSE(this->is_model_large_to_read(test_model, test_model_path)); } TEST_F(ICacheUnitTest, is_model_large_to_store_const) { this->mem_size = 0; - ASSERT_NO_THROW(this->is_model_large_to_store_const(test_model)); + OV_ASSERT_NO_THROW(this->is_model_large_to_store_const(test_model)); ASSERT_TRUE(this->is_model_large_to_store_const(test_model)); this->mem_size = 1 << 30; - ASSERT_NO_THROW(this->is_model_large_to_store_const(test_model)); + OV_ASSERT_NO_THROW(this->is_model_large_to_store_const(test_model)); ASSERT_FALSE(this->is_model_large_to_store_const(test_model)); } diff --git a/src/tests/functional/plugin/conformance/subgraphs_dumper/tests/cache/graph_cache.cpp b/src/tests/functional/plugin/conformance/subgraphs_dumper/tests/cache/graph_cache.cpp index ec7d155d55164c..295eda9654a713 100644 --- a/src/tests/functional/plugin/conformance/subgraphs_dumper/tests/cache/graph_cache.cpp +++ b/src/tests/functional/plugin/conformance/subgraphs_dumper/tests/cache/graph_cache.cpp @@ -64,13 +64,13 @@ TEST_F(GraphCacheFuncTest, get_graph_cache_twice) { TEST_F(GraphCacheFuncTest, update_cache) { auto graph_cache = ov::tools::subgraph_dumper::GraphCache::get(); graph_cache->update_cache(test_model, test_model_path, true); - ASSERT_NO_THROW(graph_cache->update_cache(test_model, test_model_path, true)); + OV_ASSERT_NO_THROW(graph_cache->update_cache(test_model, test_model_path, true)); } TEST_F(GraphCacheFuncTest, serialize_cache) { auto graph_cache = ov::tools::subgraph_dumper::GraphCache::get(); graph_cache->set_serialization_dir(test_artifacts_dir); - ASSERT_NO_THROW(graph_cache->serialize_cache()); + OV_ASSERT_NO_THROW(graph_cache->serialize_cache()); } // ====================== Graph Cache Unit tests ============================== diff --git a/src/tests/functional/plugin/conformance/subgraphs_dumper/tests/cache/meta.cpp b/src/tests/functional/plugin/conformance/subgraphs_dumper/tests/cache/meta.cpp index 0d03ff9ad7a48d..27eb5133935e62 100644 --- a/src/tests/functional/plugin/conformance/subgraphs_dumper/tests/cache/meta.cpp +++ b/src/tests/functional/plugin/conformance/subgraphs_dumper/tests/cache/meta.cpp @@ -25,11 +25,11 @@ using namespace ov::conformance; class InputInfoUnitTest : public SubgraphsDumperBaseTest {}; TEST_F(InputInfoUnitTest, constructor) { - ASSERT_NO_THROW(auto in_info = InputInfo()); - ASSERT_NO_THROW(auto in_info = InputInfo({10})); - ASSERT_NO_THROW(auto in_info = InputInfo({}, 0)); - ASSERT_NO_THROW(auto in_info = InputInfo({}, 0, 1)); - ASSERT_NO_THROW(auto in_info = InputInfo({}, 0, 1, true)); + OV_ASSERT_NO_THROW(auto in_info = InputInfo()); + OV_ASSERT_NO_THROW(auto in_info = InputInfo({10})); + OV_ASSERT_NO_THROW(auto in_info = InputInfo({}, 0)); + OV_ASSERT_NO_THROW(auto in_info = InputInfo({}, 0, 1)); + OV_ASSERT_NO_THROW(auto in_info = InputInfo({}, 0, 1, true)); } TEST_F(InputInfoUnitTest, update_ranges) { @@ -63,11 +63,11 @@ TEST_F(InputInfoUnitTest, update_shapes) { class ModelInfoFuncTest : public ::testing::Test {}; TEST_F(ModelInfoFuncTest, constructor) { - ASSERT_NO_THROW(auto model_info = ModelInfo()); - ASSERT_NO_THROW(auto model_info = ModelInfo("model.xml")); - ASSERT_NO_THROW(auto model_info = ModelInfo("model.xml", 1)); - ASSERT_NO_THROW(auto model_info = ModelInfo("model.xml", 1, 2)); - ASSERT_NO_THROW(auto model_info = ModelInfo("model.xml", 1, 2, 3)); + OV_ASSERT_NO_THROW(auto model_info = ModelInfo()); + OV_ASSERT_NO_THROW(auto model_info = ModelInfo("model.xml")); + OV_ASSERT_NO_THROW(auto model_info = ModelInfo("model.xml", 1)); + OV_ASSERT_NO_THROW(auto model_info = ModelInfo("model.xml", 1, 2)); + OV_ASSERT_NO_THROW(auto model_info = ModelInfo("model.xml", 1, 2, 3)); } // ======================== Meta Info Functional tests ============================================= @@ -96,29 +96,29 @@ class MetaInfoFuncTest : public SubgraphsDumperBaseTest { }; TEST_F(MetaInfoFuncTest, constructor) { - ASSERT_NO_THROW(auto meta = MetaInfo()); - ASSERT_NO_THROW(auto meta = MetaInfo(test_model_name)); - ASSERT_NO_THROW(auto meta = MetaInfo(test_model_name, test_in_info)); - ASSERT_NO_THROW(auto meta = MetaInfo(test_model_name, test_in_info, 2)); - ASSERT_NO_THROW(auto meta = MetaInfo(test_model_name, test_in_info, 3, 1, test_extractor_name)); - ASSERT_NO_THROW(auto meta = MetaInfo(test_model_name, test_in_info, 3, 5, test_extractor_name, 5)); + OV_ASSERT_NO_THROW(auto meta = MetaInfo()); + OV_ASSERT_NO_THROW(auto meta = MetaInfo(test_model_name)); + OV_ASSERT_NO_THROW(auto meta = MetaInfo(test_model_name, test_in_info)); + OV_ASSERT_NO_THROW(auto meta = MetaInfo(test_model_name, test_in_info, 2)); + OV_ASSERT_NO_THROW(auto meta = MetaInfo(test_model_name, test_in_info, 3, 1, test_extractor_name)); + OV_ASSERT_NO_THROW(auto meta = MetaInfo(test_model_name, test_in_info, 3, 5, test_extractor_name, 5)); } TEST_F(MetaInfoFuncTest, get_input_info) { auto test_meta = MetaInfo(test_model_name, test_in_info); - ASSERT_NO_THROW(test_meta.get_input_info()); + OV_ASSERT_NO_THROW(test_meta.get_input_info()); ASSERT_EQ(test_meta.get_input_info(), test_in_info); } TEST_F(MetaInfoFuncTest, get_model_info) { auto test_meta = MetaInfo(test_model_path, test_in_info, 5); - ASSERT_NO_THROW(test_meta.get_model_info()); + OV_ASSERT_NO_THROW(test_meta.get_model_info()); ASSERT_EQ(test_meta.get_model_info(), test_model_info); } TEST_F(MetaInfoFuncTest, get_any_extractor) { auto test_meta = MetaInfo(test_model_path, test_in_info, 5, 3, test_extractor_name); - ASSERT_NO_THROW(test_meta.get_any_extractor()); + OV_ASSERT_NO_THROW(test_meta.get_any_extractor()); ASSERT_EQ(test_meta.get_any_extractor(), test_extractor_name); } @@ -133,12 +133,12 @@ TEST_F(MetaInfoFuncTest, update) { ASSERT_ANY_THROW(test_meta.update(test_model_path_1, {})); ASSERT_ANY_THROW(test_meta.update(test_model_path_1, {{ "test_in_1", InputInfo({10}) }})); ASSERT_ANY_THROW(test_meta.update(test_model_path_1, {{ "test_in_0", InputInfo({10}, 0, 1, false) }})); - ASSERT_NO_THROW(test_meta.update(test_model_path_1, test_input_info_1)); + OV_ASSERT_NO_THROW(test_meta.update(test_model_path_1, test_input_info_1)); ASSERT_EQ(test_meta.get_input_info().at("test_in_0").min_shape, ov::PartialShape({10})); ASSERT_EQ(test_meta.get_input_info().at("test_in_0").max_shape, ov::PartialShape({50})); - ASSERT_NO_THROW(test_meta.update(test_model_path_1, test_input_info_1, 1, 2, "test_extractor_1")); - ASSERT_NO_THROW(test_meta.update(test_model_path_1, test_input_info_1, 2)); - ASSERT_NO_THROW(test_meta.update(test_model_path_1, test_input_info_1, 2, 4, "test")); + OV_ASSERT_NO_THROW(test_meta.update(test_model_path_1, test_input_info_1, 1, 2, "test_extractor_1")); + OV_ASSERT_NO_THROW(test_meta.update(test_model_path_1, test_input_info_1, 2)); + OV_ASSERT_NO_THROW(test_meta.update(test_model_path_1, test_input_info_1, 2, 4, "test")); } TEST_F(MetaInfoFuncTest, serialize) { @@ -229,19 +229,19 @@ TEST_F(MetaInfoUnitTest, update) { std::map test_meta_1 = {{ "test_in_0", InputInfo({20}, 0, 1, true) }}; std::string test_model_1 = "test_model_1"; std::string test_model_path_1 = ov::util::path_join({ "path", "to", test_model_1 + ".xml"}); - ASSERT_NO_THROW(this->update(test_model_path_1, test_meta_1)); + OV_ASSERT_NO_THROW(this->update(test_model_path_1, test_meta_1)); ASSERT_NE(this->model_info.find(test_model_1), this->model_info.end()); ASSERT_EQ(*this->model_info[test_model_1].model_paths.begin(), test_model_path_1); ASSERT_EQ(this->model_info[test_model_1].this_op_cnt, 1); ASSERT_EQ(this->input_info.begin()->second.min_shape, ov::PartialShape({10})); ASSERT_EQ(this->input_info.begin()->second.max_shape, ov::PartialShape({20})); - ASSERT_NO_THROW(this->update(test_model_path_1, test_meta_1)); + OV_ASSERT_NO_THROW(this->update(test_model_path_1, test_meta_1)); ASSERT_EQ(this->model_info[test_model_1].model_paths.size(), 1); ASSERT_EQ(this->model_info[test_model_1].this_op_cnt, 2); ASSERT_EQ(this->input_info.begin()->second.min_shape, ov::PartialShape({10})); ASSERT_EQ(this->input_info.begin()->second.max_shape, ov::PartialShape({20})); test_model_path_1 = ov::util::path_join({ "path", "to", "test", test_model_1 + ".xml"}); - ASSERT_NO_THROW(this->update(test_model_path_1, test_meta_1, 0, 1, "test_extractor")); + OV_ASSERT_NO_THROW(this->update(test_model_path_1, test_meta_1, 0, 1, "test_extractor")); ASSERT_EQ(this->model_info[test_model_1].model_paths.size(), 2); ASSERT_EQ(this->model_info[test_model_1].this_op_cnt, 3); ASSERT_EQ(this->model_info[test_model_1].this_op_cnt, 3); @@ -249,7 +249,7 @@ TEST_F(MetaInfoUnitTest, update) { } TEST_F(MetaInfoUnitTest, get_model_name_by_path) { - ASSERT_NO_THROW(this->get_model_name_by_path(test_model_path)); + OV_ASSERT_NO_THROW(this->get_model_name_by_path(test_model_path)); auto name = this->get_model_name_by_path(test_model_path); ASSERT_EQ(name, test_model_name); } @@ -257,14 +257,14 @@ TEST_F(MetaInfoUnitTest, get_model_name_by_path) { TEST_F(MetaInfoUnitTest, get_graph_priority) { auto meta = MetaInfo(test_model_name, test_in_info); this->update(test_model_name, meta.get_input_info()); - ASSERT_NO_THROW(this->get_abs_graph_priority()); - ASSERT_NO_THROW(this->get_graph_priority()); + OV_ASSERT_NO_THROW(this->get_abs_graph_priority()); + OV_ASSERT_NO_THROW(this->get_graph_priority()); ASSERT_TRUE(this->get_graph_priority() >= 0 && this->get_graph_priority() <= 1); } TEST_F(MetaInfoUnitTest, get_any_extractor) { auto meta = MetaInfo(test_model_name, test_in_info, 1, 1, "test_extractor"); - ASSERT_NO_THROW(meta.get_any_extractor()); + OV_ASSERT_NO_THROW(meta.get_any_extractor()); ASSERT_EQ(meta.get_any_extractor(), "test_extractor"); } diff --git a/src/tests/functional/plugin/conformance/subgraphs_dumper/tests/cache/op_cache.cpp b/src/tests/functional/plugin/conformance/subgraphs_dumper/tests/cache/op_cache.cpp index c795fed6cc759b..4e0997f46a641e 100644 --- a/src/tests/functional/plugin/conformance/subgraphs_dumper/tests/cache/op_cache.cpp +++ b/src/tests/functional/plugin/conformance/subgraphs_dumper/tests/cache/op_cache.cpp @@ -65,14 +65,14 @@ TEST_F(OpCacheFuncTest, get_op_cache_twice) { TEST_F(OpCacheFuncTest, update_cache) { auto op_cache = ov::tools::subgraph_dumper::OpCache::get(); - ASSERT_NO_THROW(op_cache->update_cache(test_model, test_model_path, true)); - ASSERT_NO_THROW(op_cache->update_cache(test_model, test_model_path, true)); + OV_ASSERT_NO_THROW(op_cache->update_cache(test_model, test_model_path, true)); + OV_ASSERT_NO_THROW(op_cache->update_cache(test_model, test_model_path, true)); } TEST_F(OpCacheFuncTest, serialize_cache) { auto op_cache = ov::tools::subgraph_dumper::OpCache::get(); op_cache->set_serialization_dir(test_artifacts_dir); - ASSERT_NO_THROW(op_cache->serialize_cache()); + OV_ASSERT_NO_THROW(op_cache->serialize_cache()); } // ====================== Operation Cache Unit tests ============================== diff --git a/src/tests/functional/plugin/conformance/subgraphs_dumper/tests/matchers/single_op/manager.cpp b/src/tests/functional/plugin/conformance/subgraphs_dumper/tests/matchers/single_op/manager.cpp index cb4dc5ae89eeb9..74d66e90e6a494 100644 --- a/src/tests/functional/plugin/conformance/subgraphs_dumper/tests/matchers/single_op/manager.cpp +++ b/src/tests/functional/plugin/conformance/subgraphs_dumper/tests/matchers/single_op/manager.cpp @@ -35,29 +35,29 @@ class MatchersManagerTest : public MatchersManager, }; TEST_F(MatchersManagerTest, constructor) { - ASSERT_NO_THROW(auto m = MatchersManager()); - ASSERT_NO_THROW(auto m = MatchersManager(test_map)); + OV_ASSERT_NO_THROW(auto m = MatchersManager()); + OV_ASSERT_NO_THROW(auto m = MatchersManager(test_map)); } TEST_F(MatchersManagerTest, set_matchers) { - ASSERT_NO_THROW(this->set_matchers(test_map)); + OV_ASSERT_NO_THROW(this->set_matchers(test_map)); ASSERT_EQ(this->m_matchers, test_map); } TEST_F(MatchersManagerTest, get_matchers) { - ASSERT_NO_THROW(this->set_matchers(test_map)); - ASSERT_NO_THROW(this->get_matchers()); + OV_ASSERT_NO_THROW(this->set_matchers(test_map)); + OV_ASSERT_NO_THROW(this->get_matchers()); ASSERT_EQ(this->m_matchers, this->get_matchers()); } TEST_F(MatchersManagerTest, get_config) { - ASSERT_NO_THROW(this->get_config(test_abs)); + OV_ASSERT_NO_THROW(this->get_config(test_abs)); } TEST_F(MatchersManagerTest, match) { this->set_matchers(test_map); - ASSERT_NO_THROW(this->match(test_parameter, test_abs)); - ASSERT_NO_THROW(this->match(test_abs, test_abs)); + OV_ASSERT_NO_THROW(this->match(test_parameter, test_abs)); + OV_ASSERT_NO_THROW(this->match(test_abs, test_abs)); ASSERT_TRUE(this->match(test_abs, test_abs)); ASSERT_TRUE(this->match(test_parameter, test_parameter)); ASSERT_FALSE(this->match(test_parameter, test_abs)); diff --git a/src/tests/functional/plugin/conformance/subgraphs_dumper/tests/matchers/subgraph/manager.cpp b/src/tests/functional/plugin/conformance/subgraphs_dumper/tests/matchers/subgraph/manager.cpp index a622317ddb010e..b0a684edd47bb6 100644 --- a/src/tests/functional/plugin/conformance/subgraphs_dumper/tests/matchers/subgraph/manager.cpp +++ b/src/tests/functional/plugin/conformance/subgraphs_dumper/tests/matchers/subgraph/manager.cpp @@ -39,24 +39,24 @@ class ExtractorsManagerTest : public ExtractorsManager, }; TEST_F(ExtractorsManagerTest, constructor) { - ASSERT_NO_THROW(auto m = ExtractorsManager()); - ASSERT_NO_THROW(auto m = ExtractorsManager(test_map)); + OV_ASSERT_NO_THROW(auto m = ExtractorsManager()); + OV_ASSERT_NO_THROW(auto m = ExtractorsManager(test_map)); } TEST_F(ExtractorsManagerTest, set_extractors) { - ASSERT_NO_THROW(this->set_extractors(test_map)); + OV_ASSERT_NO_THROW(this->set_extractors(test_map)); ASSERT_EQ(this->m_extractors, test_map); } TEST_F(ExtractorsManagerTest, get_extractors) { - ASSERT_NO_THROW(this->set_extractors(test_map)); - ASSERT_NO_THROW(this->get_extractors()); + OV_ASSERT_NO_THROW(this->set_extractors(test_map)); + OV_ASSERT_NO_THROW(this->get_extractors()); ASSERT_EQ(this->m_extractors, this->get_extractors()); } TEST_F(ExtractorsManagerTest, extract) { this->set_extractors(test_map); - ASSERT_NO_THROW(this->extract(test_model_0_0)); + OV_ASSERT_NO_THROW(this->extract(test_model_0_0)); } } // namespace diff --git a/src/tests/functional/plugin/conformance/subgraphs_dumper/tests/matchers/subgraph/subgraph.cpp b/src/tests/functional/plugin/conformance/subgraphs_dumper/tests/matchers/subgraph/subgraph.cpp index 3958271085ab32..da5b2a7dd64073 100644 --- a/src/tests/functional/plugin/conformance/subgraphs_dumper/tests/matchers/subgraph/subgraph.cpp +++ b/src/tests/functional/plugin/conformance/subgraphs_dumper/tests/matchers/subgraph/subgraph.cpp @@ -56,11 +56,11 @@ // }; // TEST_F(SubgraphExtractorTest, match) { -// ASSERT_NO_THROW(this->match(test_model_0_0, test_model_0_1)); +// OV_ASSERT_NO_THROW(this->match(test_model_0_0, test_model_0_1)); // ASSERT_TRUE(this->match(test_model_0_0, test_model_0_1)); -// ASSERT_NO_THROW(this->match(test_model_0_0, test_model_1)); +// OV_ASSERT_NO_THROW(this->match(test_model_0_0, test_model_1)); // ASSERT_FALSE(this->match(test_model_0_0, test_model_1)); -// ASSERT_NO_THROW(this->match(test_model_0_1, test_model_1)); +// OV_ASSERT_NO_THROW(this->match(test_model_0_1, test_model_1)); // ASSERT_FALSE(this->match(test_model_0_1, test_model_1)); // } @@ -155,28 +155,28 @@ // test_model_1 = std::make_shared(ov::ResultVector{test_res}, // ov::ParameterVector{test_parameter}); // } -// ASSERT_NO_THROW(this->match(test_model_0_0, test_model_0_1)); +// OV_ASSERT_NO_THROW(this->match(test_model_0_0, test_model_0_1)); // ASSERT_TRUE(this->match(test_model_0_0, test_model_0_1)); -// ASSERT_NO_THROW(this->match(test_model_0_0, test_model_1)); +// OV_ASSERT_NO_THROW(this->match(test_model_0_0, test_model_1)); // ASSERT_FALSE(this->match(test_model_0_0, test_model_1)); -// ASSERT_NO_THROW(this->match(test_model_0_1, test_model_1)); +// OV_ASSERT_NO_THROW(this->match(test_model_0_1, test_model_1)); // ASSERT_FALSE(this->match(test_model_0_1, test_model_1)); // } // TEST_F(SubgraphExtractorTest, extract) { -// ASSERT_NO_THROW(this->extract(test_model_0_0)); -// ASSERT_NO_THROW(this->extract(test_model_0_1)); -// ASSERT_NO_THROW(this->extract(test_model_1)); +// OV_ASSERT_NO_THROW(this->extract(test_model_0_0)); +// OV_ASSERT_NO_THROW(this->extract(test_model_0_1)); +// OV_ASSERT_NO_THROW(this->extract(test_model_1)); // } // TEST_F(SubgraphExtractorTest, is_subgraph) { // auto is_subgraph = this->is_subgraph(test_model_0_0, test_model_0_0); -// ASSERT_NO_THROW(this->is_subgraph(test_model_0_0, test_model_0_0)); +// OV_ASSERT_NO_THROW(this->is_subgraph(test_model_0_0, test_model_0_0)); // ASSERT_TRUE(std::get<0>(is_subgraph)); -// ASSERT_NO_THROW(this->is_subgraph(test_model_0_0, test_model_1)); +// OV_ASSERT_NO_THROW(this->is_subgraph(test_model_0_0, test_model_1)); // is_subgraph = this->is_subgraph(test_model_0_0, test_model_1); // ASSERT_FALSE(std::get<0>(is_subgraph)); -// ASSERT_NO_THROW(this->is_subgraph(test_model_0_1, test_model_1)); +// OV_ASSERT_NO_THROW(this->is_subgraph(test_model_0_1, test_model_1)); // is_subgraph = this->is_subgraph(test_model_0_1, test_model_1); // ASSERT_FALSE(std::get<0>(is_subgraph)); // { @@ -191,17 +191,17 @@ // auto big_model_0 = std::make_shared(ov::ResultVector{test_res}, // ov::ParameterVector{test_parameter}); // is_subgraph = this->is_subgraph(test_model_0_0, big_model_0); -// ASSERT_NO_THROW(this->is_subgraph(test_model_0_0, big_model_0)); +// OV_ASSERT_NO_THROW(this->is_subgraph(test_model_0_0, big_model_0)); // ASSERT_TRUE(std::get<0>(is_subgraph)); // ASSERT_EQ(std::get<1>(is_subgraph), big_model_0); // ASSERT_EQ(std::get<2>(is_subgraph), test_model_0_0); // is_subgraph = this->is_subgraph(test_model_0_1, big_model_0); -// ASSERT_NO_THROW(this->is_subgraph(test_model_0_1, big_model_0)); +// OV_ASSERT_NO_THROW(this->is_subgraph(test_model_0_1, big_model_0)); // ASSERT_TRUE(std::get<0>(is_subgraph)); // ASSERT_EQ(std::get<1>(is_subgraph), big_model_0); // ASSERT_EQ(std::get<2>(is_subgraph), test_model_0_1); -// ASSERT_NO_THROW(this->is_subgraph(test_model_1, big_model_0)); +// OV_ASSERT_NO_THROW(this->is_subgraph(test_model_1, big_model_0)); // ASSERT_FALSE(std::get<0>(this->is_subgraph(test_model_1, big_model_0))); // } // } diff --git a/src/tests/functional/plugin/conformance/subgraphs_dumper/tests/utils/model.cpp b/src/tests/functional/plugin/conformance/subgraphs_dumper/tests/utils/model.cpp index b6b3fc50bab44f..532d709281d949 100644 --- a/src/tests/functional/plugin/conformance/subgraphs_dumper/tests/utils/model.cpp +++ b/src/tests/functional/plugin/conformance/subgraphs_dumper/tests/utils/model.cpp @@ -72,7 +72,7 @@ TEST_F(ModelUtilsTest, align_input_info) { auto in_info_1 = ov::util::get_input_info_by_model(test_model_1.get()); ASSERT_NE(in_info_0, in_info_1); std::unordered_map a; - ASSERT_NO_THROW(ov::util::align_input_info(test_model_0.get(), test_model_1.get(), + OV_ASSERT_NO_THROW(ov::util::align_input_info(test_model_0.get(), test_model_1.get(), in_info_0, in_info_1, a)); auto in_info_ref = ov::util::align_input_info(test_model_0.get(), test_model_1.get(), in_info_0, in_info_1, a); @@ -94,7 +94,7 @@ TEST_F(ModelUtilsTest, align_input_info_for_subgraphs) { matched_ops.insert({params_0[param_id]->get_friendly_name(), params_1[param_id]->get_friendly_name()}); } - // ASSERT_NO_THROW(ov::util::align_input_info(test_model_0, test_model_1, + // OV_ASSERT_NO_THROW(ov::util::align_input_info(test_model_0, test_model_1, // in_info_0, in_info_1, // matched_ops)); auto ref = ov::util::align_input_info(test_model_0, test_model_1, diff --git a/src/tests/functional/plugin/conformance/subgraphs_dumper/tests/utils/model_comparator.cpp b/src/tests/functional/plugin/conformance/subgraphs_dumper/tests/utils/model_comparator.cpp index acb5c196739f23..c562834590c0a5 100644 --- a/src/tests/functional/plugin/conformance/subgraphs_dumper/tests/utils/model_comparator.cpp +++ b/src/tests/functional/plugin/conformance/subgraphs_dumper/tests/utils/model_comparator.cpp @@ -67,24 +67,24 @@ class ModelComparatorTest : public SubgraphsDumperBaseTest { TEST_F(ModelComparatorTest, get) { ov::util::ModelComparator::Ptr model_comparator = nullptr; - ASSERT_NO_THROW(model_comparator = ov::util::ModelComparator::get()); + OV_ASSERT_NO_THROW(model_comparator = ov::util::ModelComparator::get()); ASSERT_EQ(model_comparator, ov::util::ModelComparator::get()); } TEST_F(ModelComparatorTest, match) { ov::util::ModelComparator::Ptr model_comparator = ov::util::ModelComparator::get(); - ASSERT_NO_THROW(model_comparator->match(test_model_0_0, test_model_0_1)); + OV_ASSERT_NO_THROW(model_comparator->match(test_model_0_0, test_model_0_1)); ASSERT_TRUE(model_comparator->match(test_model_0_0, test_model_0_1)); - ASSERT_NO_THROW(model_comparator->match(test_model_0_0, test_model_1)); + OV_ASSERT_NO_THROW(model_comparator->match(test_model_0_0, test_model_1)); ASSERT_FALSE(model_comparator->match(test_model_0_0, test_model_1)); - ASSERT_NO_THROW(model_comparator->match(test_model_0_1, test_model_1)); + OV_ASSERT_NO_THROW(model_comparator->match(test_model_0_1, test_model_1)); ASSERT_FALSE(model_comparator->match(test_model_0_1, test_model_1)); } TEST_F(ModelComparatorTest, match_strict_shape) { ov::util::ModelComparator::Ptr model_comparator = ov::util::ModelComparator::get(); - ASSERT_NO_THROW(model_comparator->set_shape_strict_match(true)); - ASSERT_NO_THROW(model_comparator->match(test_model_0_0, test_model_0_1)); + OV_ASSERT_NO_THROW(model_comparator->set_shape_strict_match(true)); + OV_ASSERT_NO_THROW(model_comparator->match(test_model_0_0, test_model_0_1)); ASSERT_FALSE(model_comparator->match(test_model_0_0, test_model_0_1)); { { @@ -105,11 +105,11 @@ TEST_F(ModelComparatorTest, match_strict_shape) { TEST_F(ModelComparatorTest, match_with_low_coeff) { ov::util::ModelComparator::Ptr model_comparator = ov::util::ModelComparator::get(); model_comparator->set_match_coefficient(0.5f); - ASSERT_NO_THROW(model_comparator->match(test_model_0_0, test_model_0_1)); + OV_ASSERT_NO_THROW(model_comparator->match(test_model_0_0, test_model_0_1)); ASSERT_TRUE(model_comparator->match(test_model_0_0, test_model_0_1)); - ASSERT_NO_THROW(model_comparator->match(test_model_0_0, test_model_1)); + OV_ASSERT_NO_THROW(model_comparator->match(test_model_0_0, test_model_1)); ASSERT_TRUE(model_comparator->match(test_model_0_0, test_model_1)); - ASSERT_NO_THROW(model_comparator->match(test_model_0_1, test_model_1)); + OV_ASSERT_NO_THROW(model_comparator->match(test_model_0_1, test_model_1)); ASSERT_TRUE(model_comparator->match(test_model_0_1, test_model_1)); } @@ -119,22 +119,22 @@ TEST_F(ModelComparatorTest, match_with_in_info) { test_in_info({{"test_parameter_0", ov::conformance::InputInfo(ov::Shape{1, 2})}}), test_in_info_({{"test_parameter_0", ov::conformance::InputInfo(ov::Shape{1, 2})}}), test_in_info_1({{"test_parameter_1", ov::conformance::InputInfo(ov::Shape{2, 5}, 1, 2, true)}}); - ASSERT_NO_THROW(model_comparator->match(test_model_0_0, test_model_0_1, test_in_info, test_in_info_)); + OV_ASSERT_NO_THROW(model_comparator->match(test_model_0_0, test_model_0_1, test_in_info, test_in_info_)); ASSERT_TRUE(std::get<0>(model_comparator->match(test_model_0_0, test_model_0_1, test_in_info, test_in_info_))); - ASSERT_NO_THROW(model_comparator->match(test_model_0_0, test_model_0_1, test_in_info, test_in_info_1)); + OV_ASSERT_NO_THROW(model_comparator->match(test_model_0_0, test_model_0_1, test_in_info, test_in_info_1)); ASSERT_FALSE(std::get<0>(model_comparator->match(test_model_0_0, test_model_0_1, test_in_info, test_in_info_1))); - ASSERT_NO_THROW(model_comparator->match(test_model_0_1, test_model_1, test_in_info, test_in_info)); + OV_ASSERT_NO_THROW(model_comparator->match(test_model_0_1, test_model_1, test_in_info, test_in_info)); ASSERT_FALSE(std::get<0>(model_comparator->match(test_model_0_1, test_model_1, test_in_info, test_in_info))); } TEST_F(ModelComparatorTest, is_subgraph) { ov::util::ModelComparator::Ptr model_comparator = ov::util::ModelComparator::get(); - ASSERT_NO_THROW(model_comparator->is_subgraph(test_model_0_0, test_model_0_1)); + OV_ASSERT_NO_THROW(model_comparator->is_subgraph(test_model_0_0, test_model_0_1)); auto is_subgraph = model_comparator->is_subgraph(test_model_0_0, test_model_0_1); ASSERT_TRUE(std::get<0>(is_subgraph)); - ASSERT_NO_THROW(model_comparator->is_subgraph(test_model_0_0, test_model_1)); + OV_ASSERT_NO_THROW(model_comparator->is_subgraph(test_model_0_0, test_model_1)); ASSERT_FALSE(std::get<0>(model_comparator->is_subgraph(test_model_0_0, test_model_1))); - ASSERT_NO_THROW(model_comparator->is_subgraph(test_model_0_1, test_model_1)); + OV_ASSERT_NO_THROW(model_comparator->is_subgraph(test_model_0_1, test_model_1)); ASSERT_FALSE(std::get<0>(model_comparator->is_subgraph(test_model_0_1, test_model_1))); } diff --git a/src/tests/functional/plugin/conformance/subgraphs_dumper/tests/utils/node.cpp b/src/tests/functional/plugin/conformance/subgraphs_dumper/tests/utils/node.cpp index a75d2be74e592b..c0f043a31de2a4 100644 --- a/src/tests/functional/plugin/conformance/subgraphs_dumper/tests/utils/node.cpp +++ b/src/tests/functional/plugin/conformance/subgraphs_dumper/tests/utils/node.cpp @@ -94,7 +94,7 @@ TEST_F(NodeUtilsTest, generate_model_by_node) { TEST_F(NodeUtilsTest, get_max_ops_versions) { std::unordered_map> max_ops_versions; std::string max_opset; - ASSERT_NO_THROW(std::tie(max_opset, max_ops_versions) = ov::util::get_last_opset_version_map()); + OV_ASSERT_NO_THROW(std::tie(max_opset, max_ops_versions) = ov::util::get_last_opset_version_map()); std::vector values = {-1, -2.05, -3.65, 0, 5, 7}; auto const_node = std::make_shared(ov::element::Type_t::f32, ov::Shape({2, 3}), values); diff --git a/src/tests/functional/plugin/shared/include/behavior/compiled_model/compiled_model_base.hpp b/src/tests/functional/plugin/shared/include/behavior/compiled_model/compiled_model_base.hpp index 4dfe19877ece2f..8618d53fddccf1 100644 --- a/src/tests/functional/plugin/shared/include/behavior/compiled_model/compiled_model_base.hpp +++ b/src/tests/functional/plugin/shared/include/behavior/compiled_model/compiled_model_base.hpp @@ -297,7 +297,7 @@ TEST_P(OVCompiledModelBaseTest, CanSetInputPrecisionForNetwork) { input.model().set_layout("??HW"); input.preprocess().resize(ov::preprocess::ResizeAlgorithm::RESIZE_LINEAR); model = ppp.build(); - ASSERT_NO_THROW(core.compile_model(model, target_device, configuration)); + OV_ASSERT_NO_THROW(core.compile_model(model, target_device, configuration)); } TEST_P(OVCompiledModelBaseTest, CanSetOutputPrecisionForNetwork) { @@ -307,7 +307,7 @@ TEST_P(OVCompiledModelBaseTest, CanSetOutputPrecisionForNetwork) { ov::preprocess::OutputInfo& output = ppp.output(); output.postprocess().convert_element_type(ov::element::u8); model = ppp.build(); - ASSERT_NO_THROW(core.compile_model(model, target_device, configuration)); + OV_ASSERT_NO_THROW(core.compile_model(model, target_device, configuration)); } TEST_P(OVCompiledModelBaseTest, CanGetOutputsInfo) { @@ -461,8 +461,8 @@ TEST_P(OVCompiledModelBaseTest, CheckExecGraphInfoSerialization) { std::shared_ptr runtime_model; auto compiled_model = core->compile_model(function, target_device, configuration); - ASSERT_NO_THROW(runtime_model = compiled_model.get_runtime_model()); - ASSERT_NO_THROW(ov::serialize(runtime_model, out_xml_path, out_bin_path)); + OV_ASSERT_NO_THROW(runtime_model = compiled_model.get_runtime_model()); + OV_ASSERT_NO_THROW(ov::serialize(runtime_model, out_xml_path, out_bin_path)); ov::test::utils::removeIRFiles(out_xml_path, out_bin_path); } @@ -698,7 +698,7 @@ TEST_P(CompiledModelSetType, canSetInputTypeAndCompileModel) { auto& input = ppp.input(); input.preprocess().convert_element_type(convert_type); model = ppp.build(); - ASSERT_NO_THROW(core.compile_model(model, target_device, configuration)); + OV_ASSERT_NO_THROW(core.compile_model(model, target_device, configuration)); } TEST_P(CompiledModelSetType, canSetOutputTypeAndCompileModel) { @@ -709,7 +709,7 @@ TEST_P(CompiledModelSetType, canSetOutputTypeAndCompileModel) { auto& output = ppp.output(); output.postprocess().convert_element_type(convert_type); model = ppp.build(); - ASSERT_NO_THROW(core.compile_model(model, target_device, configuration)); + OV_ASSERT_NO_THROW(core.compile_model(model, target_device, configuration)); } TEST_P(CompiledModelSetType, canSetInputOutputTypeAndCompileModel) { @@ -722,7 +722,7 @@ TEST_P(CompiledModelSetType, canSetInputOutputTypeAndCompileModel) { auto& output = ppp.output(); output.postprocess().convert_element_type(convert_type); model = ppp.build(); - ASSERT_NO_THROW(core.compile_model(model, target_device, configuration)); + OV_ASSERT_NO_THROW(core.compile_model(model, target_device, configuration)); } } // namespace behavior } // namespace test diff --git a/src/tests/functional/plugin/shared/include/behavior/ov_infer_request/properties_tests.hpp b/src/tests/functional/plugin/shared/include/behavior/ov_infer_request/properties_tests.hpp index aa0aecd1aec57c..9f41e253697aeb 100644 --- a/src/tests/functional/plugin/shared/include/behavior/ov_infer_request/properties_tests.hpp +++ b/src/tests/functional/plugin/shared/include/behavior/ov_infer_request/properties_tests.hpp @@ -83,7 +83,7 @@ class InferRequestPropertiesTest : public testing::WithParamInterfaceget_executors_number()); - ASSERT_NO_THROW(createInferRequestWithConfig()); + OV_ASSERT_NO_THROW(createInferRequestWithConfig()); if (target_device.find(ov::test::utils::DEVICE_AUTO) == std::string::npos && target_device.find(ov::test::utils::DEVICE_MULTI) == std::string::npos && target_device.find(ov::test::utils::DEVICE_HETERO) == std::string::npos && @@ -94,7 +94,7 @@ TEST_P(InferRequestPropertiesTest, canSetExclusiveAsyncRequests) { TEST_P(InferRequestPropertiesTest, withoutExclusiveAsyncRequests) { ASSERT_EQ(0ul, ov::threading::executor_manager()->get_executors_number()); - ASSERT_NO_THROW(createInferRequestWithConfig()); + OV_ASSERT_NO_THROW(createInferRequestWithConfig()); if (target_device.find(ov::test::utils::DEVICE_AUTO) == std::string::npos && target_device.find(ov::test::utils::DEVICE_MULTI) == std::string::npos && target_device.find(ov::test::utils::DEVICE_HETERO) == std::string::npos && @@ -116,7 +116,7 @@ TEST_P(InferRequestPropertiesTest, ReusableCPUStreamsExecutor) { target_device.find(ov::test::utils::DEVICE_MULTI) == std::string::npos && target_device.find(ov::test::utils::DEVICE_HETERO) == std::string::npos && target_device.find(ov::test::utils::DEVICE_BATCH) == std::string::npos) { - ASSERT_NO_THROW(core->set_property(target_device, config)); + OV_ASSERT_NO_THROW(core->set_property(target_device, config)); } // Load CNNNetwork to target plugins execNet = core->compile_model(function, target_device, config); diff --git a/src/tests/functional/plugin/shared/src/behavior/compiled_model/properties.cpp b/src/tests/functional/plugin/shared/src/behavior/compiled_model/properties.cpp index 8bc58100299f8d..1a06be4f3b1596 100644 --- a/src/tests/functional/plugin/shared/src/behavior/compiled_model/properties.cpp +++ b/src/tests/functional/plugin/shared/src/behavior/compiled_model/properties.cpp @@ -371,7 +371,7 @@ TEST_P(OVClassCompiledModelGetConfigTest, CanCompileModelWithCustomLocale) { ov::Core core = ov::test::utils::create_core(); - ASSERT_NO_THROW(core.compile_model(simpleNetwork, target_device);); + OV_ASSERT_NO_THROW(core.compile_model(simpleNetwork, target_device);); setlocale(LC_ALL, prev.c_str()); } diff --git a/src/tests/functional/plugin/shared/src/behavior/ov_infer_request/batched_tensors.cpp b/src/tests/functional/plugin/shared/src/behavior/ov_infer_request/batched_tensors.cpp index 8c0118997b93e0..b0b926967d1e1a 100644 --- a/src/tests/functional/plugin/shared/src/behavior/ov_infer_request/batched_tensors.cpp +++ b/src/tests/functional/plugin/shared/src/behavior/ov_infer_request/batched_tensors.cpp @@ -459,7 +459,7 @@ TEST_P(OVInferRequestBatchedTests, SetInputTensors_Correct_all) { std::vector tensors; tensors.emplace_back(element::f32, one_shape, buffer.data()); tensors.emplace_back(element::f32, one_shape, buffer.data() + ov::shape_size(one_shape)); - ASSERT_NO_THROW(req.set_input_tensors(tensors)); + OV_ASSERT_NO_THROW(req.set_input_tensors(tensors)); } TEST_P(OVInferRequestBatchedTests, SetInputTensors_Cache_CheckDeepCopy) { @@ -482,8 +482,8 @@ TEST_P(OVInferRequestBatchedTests, SetInputTensors_Cache_CheckDeepCopy) { tensors.emplace_back(element::f32, one_shape, buffer.data() + ov::shape_size(one_shape)); auto out_tensor = ov::Tensor(element::f32, batch_shape, buffer_out.data()); // Verify that infer request still has its own copy of input/output, user can use old names - ASSERT_NO_THROW(req.set_tensors("tensor_input0", tensors)); - ASSERT_NO_THROW(req.set_tensor("tensor_output0", out_tensor)); + OV_ASSERT_NO_THROW(req.set_tensors("tensor_input0", tensors)); + OV_ASSERT_NO_THROW(req.set_tensor("tensor_output0", out_tensor)); } TEST_P(OVInferRequestBatchedTests, SetInputTensors_Incorrect_tensor_element_type) { diff --git a/src/tests/functional/plugin/shared/src/behavior/ov_infer_request/io_tensor.cpp b/src/tests/functional/plugin/shared/src/behavior/ov_infer_request/io_tensor.cpp index 48e9763495dcdf..2ba5eaa2034340 100644 --- a/src/tests/functional/plugin/shared/src/behavior/ov_infer_request/io_tensor.cpp +++ b/src/tests/functional/plugin/shared/src/behavior/ov_infer_request/io_tensor.cpp @@ -90,7 +90,7 @@ TEST_P(OVInferRequestIOTensorTest, getAfterSetInputDoNotChangeInput) { auto tensor = utils::create_and_fill_tensor(input.get_element_type(), input.get_shape()); OV_ASSERT_NO_THROW(req.set_tensor(input, tensor)); ov::Tensor actual_tensor; - ASSERT_NO_THROW(actual_tensor = req.get_tensor(input)); + OV_ASSERT_NO_THROW(actual_tensor = req.get_tensor(input)); ASSERT_EQ(tensor.data(), actual_tensor.data()); ASSERT_EQ(tensor.get_shape(), actual_tensor.get_shape()); @@ -101,7 +101,7 @@ TEST_P(OVInferRequestIOTensorTest, getAfterSetOutputDoNotChangeOutput) { auto tensor = utils::create_and_fill_tensor(output.get_element_type(), output.get_shape()); OV_ASSERT_NO_THROW(req.set_tensor(output, tensor)); ov::Tensor actual_tensor; - ASSERT_NO_THROW(actual_tensor = req.get_tensor(output)); + OV_ASSERT_NO_THROW(actual_tensor = req.get_tensor(output)); ASSERT_EQ(tensor.data(), actual_tensor.data()); ASSERT_EQ(tensor.get_shape(), actual_tensor.get_shape()); diff --git a/src/tests/functional/plugin/shared/src/behavior/ov_infer_request/perf_counters.cpp b/src/tests/functional/plugin/shared/src/behavior/ov_infer_request/perf_counters.cpp index e3cff0b98f5fca..0b86ca53cad202 100644 --- a/src/tests/functional/plugin/shared/src/behavior/ov_infer_request/perf_counters.cpp +++ b/src/tests/functional/plugin/shared/src/behavior/ov_infer_request/perf_counters.cpp @@ -58,10 +58,10 @@ TEST_P(OVInferRequestPerfCountersExceptionTest, perfCountWereNotEnabledException TEST_P(OVInferRequestPerfCountersTest, CheckOperationInProfilingInfo) { req = execNet.create_infer_request(); - ASSERT_NO_THROW(req.infer()); + OV_ASSERT_NO_THROW(req.infer()); std::vector profiling_info; - ASSERT_NO_THROW(profiling_info = req.get_profiling_info()); + OV_ASSERT_NO_THROW(profiling_info = req.get_profiling_info()); for (const auto& op : function->get_ops()) { if (!strcmp(op->get_type_info().name, "Constant")) diff --git a/src/tests/functional/plugin/shared/src/behavior/ov_plugin/caching_tests.cpp b/src/tests/functional/plugin/shared/src/behavior/ov_plugin/caching_tests.cpp index ea7ea3c77e8e32..bac4a6661faf44 100644 --- a/src/tests/functional/plugin/shared/src/behavior/ov_plugin/caching_tests.cpp +++ b/src/tests/functional/plugin/shared/src/behavior/ov_plugin/caching_tests.cpp @@ -227,7 +227,7 @@ void CompileModelCacheTestBase::run() { GTEST_FAIL() << "Plugin doesn't support import and export - skipping test" << std::endl; } if (importExportSupported(*core)) { - ASSERT_NO_THROW(core->get_property(targetDevice, ov::internal::caching_properties)); + OV_ASSERT_NO_THROW(core->get_property(targetDevice, ov::internal::caching_properties)); } configure_model(); try { @@ -248,13 +248,13 @@ void CompileModelCacheTestBase::run() { // Step 2: Load with cache. Export or import shall not throw { core->set_property(ov::cache_dir(m_cacheFolderName)); - ASSERT_NO_THROW(compiledModel = core->compile_model(function, targetDevice, configuration)); + OV_ASSERT_NO_THROW(compiledModel = core->compile_model(function, targetDevice, configuration)); if (targetDevice.find("AUTO") == std::string::npos) { // Apply check only for HW plugins ASSERT_EQ(i != 0, compiledModel.get_property(ov::loaded_from_cache)); } generate_inputs(targetStaticShapes.front()); - ASSERT_NO_THROW(infer()); + OV_ASSERT_NO_THROW(infer()); } compare(originalOutputs, get_plugin_outputs()); // Destroy objects here @@ -457,10 +457,10 @@ void CompileModelCacheRuntimePropertiesTestBase::run() { // Second compile model will load from model cache. for (int i = 0; i < 2; i++) { { - ASSERT_NO_THROW(compiledModel = core->compile_model(m_modelName, targetDevice, configuration)); + OV_ASSERT_NO_THROW(compiledModel = core->compile_model(m_modelName, targetDevice, configuration)); ASSERT_EQ(i != 0, compiledModel.get_property(ov::loaded_from_cache)); - ASSERT_NO_THROW(inferRequest = compiledModel.create_infer_request()); - ASSERT_NO_THROW(inferRequest.infer()); + OV_ASSERT_NO_THROW(inferRequest = compiledModel.create_infer_request()); + OV_ASSERT_NO_THROW(inferRequest.infer()); } // cache is created and reused ASSERT_EQ(ov::test::utils::listFilesWithExt(m_cacheFolderName, "blob").size(), 1); @@ -495,10 +495,10 @@ void CompileModelCacheRuntimePropertiesTestBase::run() { // Fourth compile model will load from model cache. for (int i = 0; i < 2; i++) { { - ASSERT_NO_THROW(compiledModel = core->compile_model(m_modelName, targetDevice, configuration)); + OV_ASSERT_NO_THROW(compiledModel = core->compile_model(m_modelName, targetDevice, configuration)); ASSERT_EQ(i != 0, compiledModel.get_property(ov::loaded_from_cache)); - ASSERT_NO_THROW(inferRequest = compiledModel.create_infer_request()); - ASSERT_NO_THROW(inferRequest.infer()); + OV_ASSERT_NO_THROW(inferRequest = compiledModel.create_infer_request()); + OV_ASSERT_NO_THROW(inferRequest.infer()); } // old cache has been removed and new cache is created and reused ASSERT_EQ(ov::test::utils::listFilesWithExt(m_cacheFolderName, "blob").size(), 1); diff --git a/src/tests/functional/plugin/shared/src/behavior/ov_plugin/properties_tests.cpp b/src/tests/functional/plugin/shared/src/behavior/ov_plugin/properties_tests.cpp index 05059355c732eb..e144201d9b3fc4 100644 --- a/src/tests/functional/plugin/shared/src/behavior/ov_plugin/properties_tests.cpp +++ b/src/tests/functional/plugin/shared/src/behavior/ov_plugin/properties_tests.cpp @@ -812,7 +812,7 @@ TEST_P(OVGetMetricPropsOptionalTest, GetMetricAndPrintNoThrow_RANGE_FOR_ASYNC_IN ov::Core ie = ov::test::utils::create_core(); unsigned int start{0}, end{0}, step{0}; - ASSERT_NO_THROW(std::tie(start, end, step) = ie.get_property(target_device, ov::range_for_async_infer_requests)); + OV_ASSERT_NO_THROW(std::tie(start, end, step) = ie.get_property(target_device, ov::range_for_async_infer_requests)); std::cout << "Range for async infer requests: " << std::endl << start << std::endl @@ -829,7 +829,7 @@ TEST_P(OVGetMetricPropsOptionalTest, GetMetricAndPrintNoThrow_RANGE_FOR_STREAMS) ov::Core ie = ov::test::utils::create_core(); unsigned int start = 0, end = 0; - ASSERT_NO_THROW(std::tie(start, end) = ie.get_property(target_device, ov::range_for_streams)); + OV_ASSERT_NO_THROW(std::tie(start, end) = ie.get_property(target_device, ov::range_for_streams)); std::cout << "Range for streams: " << std::endl << start << std::endl diff --git a/src/tests/functional/plugin/shared/src/behavior/ov_plugin/remote.cpp b/src/tests/functional/plugin/shared/src/behavior/ov_plugin/remote.cpp index 6df7ca8996a098..a57dc605662c3d 100644 --- a/src/tests/functional/plugin/shared/src/behavior/ov_plugin/remote.cpp +++ b/src/tests/functional/plugin/shared/src/behavior/ov_plugin/remote.cpp @@ -64,17 +64,17 @@ TEST_P(OVRemoteTest, canCreateRemote) { ov::AnyMap params; std::string device; - ASSERT_NO_THROW(params = context.get_params()); - ASSERT_NO_THROW(device = context.get_device_name()); + OV_ASSERT_NO_THROW(params = context.get_params()); + OV_ASSERT_NO_THROW(device = context.get_device_name()); for (auto&& param : context_parameters) { ASSERT_NE(params.find(param.first), params.end()); } ASSERT_EQ(target_device, device); ov::RemoteTensor remote_tensor; - ASSERT_NO_THROW(remote_tensor = context.create_tensor(input->get_element_type(), input->get_shape(), tensor_parameters)); + OV_ASSERT_NO_THROW(remote_tensor = context.create_tensor(input->get_element_type(), input->get_shape(), tensor_parameters)); - ASSERT_NO_THROW(params = remote_tensor.get_params()); - ASSERT_NO_THROW(device = remote_tensor.get_device_name()); + OV_ASSERT_NO_THROW(params = remote_tensor.get_params()); + OV_ASSERT_NO_THROW(device = remote_tensor.get_device_name()); for (auto&& param : tensor_parameters) { ASSERT_NE(params.find(param.first), params.end()); } @@ -89,11 +89,11 @@ TEST_P(OVRemoteTest, remoteTensorAsTensor) { auto remote_tensor = context.create_tensor(input->get_element_type(), input->get_shape(), tensor_parameters); ov::Tensor tensor; - ASSERT_NO_THROW(tensor = remote_tensor); + OV_ASSERT_NO_THROW(tensor = remote_tensor); ASSERT_THROW(tensor.data(), ov::Exception); - ASSERT_NO_THROW(tensor.get_element_type()); + OV_ASSERT_NO_THROW(tensor.get_element_type()); ASSERT_EQ(input->get_element_type(), tensor.get_element_type()); - ASSERT_NO_THROW(tensor.get_shape()); + OV_ASSERT_NO_THROW(tensor.get_shape()); ASSERT_EQ(input->get_shape(), tensor.get_shape()); } @@ -104,19 +104,19 @@ TEST_P(OVRemoteTest, inferWithRemoteNoThrow) { { auto input_remote_tensor = context.create_tensor(input->get_element_type(), input->get_shape(), tensor_parameters); - ASSERT_NO_THROW(infer_request.set_input_tensor(0, input_remote_tensor)); - ASSERT_NO_THROW(infer_request.infer()); + OV_ASSERT_NO_THROW(infer_request.set_input_tensor(0, input_remote_tensor)); + OV_ASSERT_NO_THROW(infer_request.infer()); } auto output = function->get_results().front(); {// Host accessable output if input is remote by default ov::Tensor tensor; - ASSERT_NO_THROW(tensor = infer_request.get_output_tensor(0)); - ASSERT_NO_THROW(tensor.data()); + OV_ASSERT_NO_THROW(tensor = infer_request.get_output_tensor(0)); + OV_ASSERT_NO_THROW(tensor.data()); } {// Infer with remote on input and outputs auto output_remote_tensor = context.create_tensor(output->get_element_type(), output->get_shape(), tensor_parameters); - ASSERT_NO_THROW(infer_request.set_output_tensor(0, output_remote_tensor)); - ASSERT_NO_THROW(infer_request.infer()); + OV_ASSERT_NO_THROW(infer_request.set_output_tensor(0, output_remote_tensor)); + OV_ASSERT_NO_THROW(infer_request.infer()); } } diff --git a/src/tests/test_utils/common_test_utils/src/ov_test_utils.cpp b/src/tests/test_utils/common_test_utils/src/ov_test_utils.cpp index 55b60bdde613f7..f5453b3c536480 100644 --- a/src/tests/test_utils/common_test_utils/src/ov_test_utils.cpp +++ b/src/tests/test_utils/common_test_utils/src/ov_test_utils.cpp @@ -73,7 +73,7 @@ void TransformationTestsF::TearDown() { manager.run_passes(model); if (!m_disable_rt_info_check) { - ASSERT_NO_THROW(check_rt_info(model)); + OV_ASSERT_NO_THROW(check_rt_info(model)); } if (acc_enabled) { diff --git a/src/tests/test_utils/common_test_utils/tests/ov_tensor_utils.cpp b/src/tests/test_utils/common_test_utils/tests/ov_tensor_utils.cpp index 7d37366dd1a44c..f548dcbdeb18c9 100644 --- a/src/tests/test_utils/common_test_utils/tests/ov_tensor_utils.cpp +++ b/src/tests/test_utils/common_test_utils/tests/ov_tensor_utils.cpp @@ -5,6 +5,7 @@ #include #include "common_test_utils/ov_tensor_utils.hpp" +#include "common_test_utils/test_assertions.hpp" using namespace testing; using namespace ov::util; @@ -17,7 +18,7 @@ TEST(Comparator, boolean) { bool values_ref[] = {value, value, value, value}; auto tensor = ov::Tensor(element_type, shape, values); auto tensor_ref = ov::Tensor(element_type, shape, values_ref); - ASSERT_NO_THROW(ov::test::utils::compare(tensor_ref, tensor)); + OV_ASSERT_NO_THROW(ov::test::utils::compare(tensor_ref, tensor)); } TEST(Comparator, boolean_negative) { @@ -39,7 +40,7 @@ TEST(Comparator, integer) { std::vector values_ref(ov::shape_size(shape), value); auto tensor = ov::Tensor(element_type, shape, values.data()); auto tensor_ref = ov::Tensor(element_type, shape, values_ref.data()); - ASSERT_NO_THROW(ov::test::utils::compare(tensor_ref, tensor)); + OV_ASSERT_NO_THROW(ov::test::utils::compare(tensor_ref, tensor)); } TEST(Comparator, integer_negative) { @@ -69,7 +70,7 @@ TEST(Comparator, float_) { } auto tensor = ov::Tensor(element_type, shape, values.data()); auto tensor_ref = ov::Tensor(element_type, shape, values_ref.data()); - ASSERT_NO_THROW(ov::test::utils::compare(tensor_ref, tensor)); + OV_ASSERT_NO_THROW(ov::test::utils::compare(tensor_ref, tensor)); } TEST(Comparator, float_large) { @@ -86,7 +87,7 @@ TEST(Comparator, float_large) { } auto tensor = ov::Tensor(element_type, shape, values.data()); auto tensor_ref = ov::Tensor(element_type, shape, values_ref.data()); - ASSERT_NO_THROW(ov::test::utils::compare(tensor_ref, tensor)); + OV_ASSERT_NO_THROW(ov::test::utils::compare(tensor_ref, tensor)); } TEST(Comparator, float_negative) { @@ -118,7 +119,7 @@ TEST(Comparator, float_extra_small) { } auto tensor = ov::Tensor(element_type, shape, values.data()); auto tensor_ref = ov::Tensor(element_type, shape, values_ref.data()); - ASSERT_NO_THROW(ov::test::utils::compare(tensor_ref, tensor)); + OV_ASSERT_NO_THROW(ov::test::utils::compare(tensor_ref, tensor)); } TEST(Comparator, different_shapes) { @@ -144,7 +145,7 @@ TEST(Comparator, different_prc_low) { std::vector values_ref(ov::shape_size(shape), ov::float16(value)); auto tensor = ov::Tensor(element_type, shape, values.data()); auto tensor_ref = ov::Tensor(element_type_ref, shape, values_ref.data()); - ASSERT_NO_THROW(ov::test::utils::compare(tensor_ref, tensor)); + OV_ASSERT_NO_THROW(ov::test::utils::compare(tensor_ref, tensor)); } TEST(Comparator, different_prc_up) { @@ -159,5 +160,5 @@ TEST(Comparator, different_prc_up) { std::vector values_ref(ov::shape_size(shape), value); auto tensor = ov::Tensor(element_type, shape, values.data()); auto tensor_ref = ov::Tensor(element_type_ref, shape, values_ref.data()); - ASSERT_NO_THROW(ov::test::utils::compare(tensor_ref, tensor)); + OV_ASSERT_NO_THROW(ov::test::utils::compare(tensor_ref, tensor)); } From e732adeaa5eddc6cc9610005584da457cb5850e6 Mon Sep 17 00:00:00 2001 From: Maxim Vafin Date: Thu, 4 Jul 2024 12:02:45 +0200 Subject: [PATCH 18/50] [PT FE] Fix issue with type alignment in aten::cat (#25354) ### Details: - *Fix regression introduced by #25077* ### Tickets: - *CVS-144695* - *CVS-144789* --- src/frontends/pytorch/src/op/cat.cpp | 21 ++++++++++----------- tests/layer_tests/pytorch_tests/test_cat.py | 14 +++++++++----- 2 files changed, 19 insertions(+), 16 deletions(-) diff --git a/src/frontends/pytorch/src/op/cat.cpp b/src/frontends/pytorch/src/op/cat.cpp index b8bd2d2487944e..4ae2c4ebc81af4 100644 --- a/src/frontends/pytorch/src/op/cat.cpp +++ b/src/frontends/pytorch/src/op/cat.cpp @@ -67,29 +67,28 @@ OutputVector translate_cat_common(const NodeContext& context, return input.get_element_type() != first_in_type || input.get_element_type() == ov::element::dynamic; })); + auto inputs_vec = OutputVector(list_elems.begin(), list_elems.end()); if (is_mixed_type) { - auto node_of_type = list_elems[0]; - for (size_t i = 1; i < list_elems.size(); ++i) { - node_of_type = std::make_shared(node_of_type, list_elems[i], true)->output(0); - context.mark_node(node_of_type.get_node_shared_ptr()); + auto node_of_type = inputs_vec[0]; + for (size_t i = 1; i < inputs_vec.size(); ++i) { + auto cpt = context.mark_node(std::make_shared(node_of_type, list_elems[i], true)); + node_of_type = cpt->output(0); + inputs_vec[i] = cpt->output(1); } + inputs_vec[0] = node_of_type; const auto unified_type = node_of_type.get_element_type(); - auto inputs_vec = OutputVector(list_elems.begin(), list_elems.end()); - for (size_t i = 0; i < inputs_vec.size(); ++i) { + for (size_t i = 1; i < inputs_vec.size(); ++i) { if (inputs_vec[i].get_element_type() != unified_type || inputs_vec[i].get_element_type() == ov::element::dynamic) { - inputs_vec[i] = - std::make_shared(list_elems[i].get_node_shared_ptr(), node_of_type)->output(0); - context.mark_node(inputs_vec[i].get_node_shared_ptr()); + inputs_vec[i] = context.mark_node(std::make_shared(list_elems[i], node_of_type)); } } auto concat = std::make_shared(inputs_vec, axis); return {context.mark_node(concat)}; } - auto concat = std::make_shared(OutputVector(list_elems.begin(), list_elems.end()), axis); - return {context.mark_node(concat)}; + return {context.mark_node(std::make_shared(inputs_vec, axis))}; } OutputVector translate_cat(const NodeContext& context) { diff --git a/tests/layer_tests/pytorch_tests/test_cat.py b/tests/layer_tests/pytorch_tests/test_cat.py index ad07d63250eb51..9e8e9f284e65dd 100644 --- a/tests/layer_tests/pytorch_tests/test_cat.py +++ b/tests/layer_tests/pytorch_tests/test_cat.py @@ -161,7 +161,7 @@ def test_align_types_cat(self, ie_device, precision, ir_version, in_types, trace class TestCatAlignTypesPT(PytorchLayerTest): def _prepare_input(self, in_types): - in_vals = [np.random.randn(2, 1, 3).astype(in_types[0])] + in_vals = [np.random.randn(2, 2, 3).astype(in_types[0])] return in_vals def create_model_param_first(self, in_types): @@ -171,7 +171,8 @@ def __init__(self): self.y = torch.randn(2, 1, 3).to(in_types[1]) def forward(self, x): - ins = [x, self.y] + x_ = torch.split(x, 1, 1)[1] + ins = [x_, self.y] return torch.cat(ins, 1) class aten_align_types_cat_three_args(torch.nn.Module): @@ -181,7 +182,8 @@ def __init__(self): self.z = torch.randn(2, 1, 3).to(in_types[2]) def forward(self, x): - ins = [x, self.y, self.z] + x_ = torch.split(x, 1, 1)[1] + ins = [x_, self.y, self.z] return torch.cat(ins, 1) in_count = len(in_types) @@ -199,7 +201,8 @@ def __init__(self): self.z = torch.randn(2, 1, 3).to(in_types[2]) def forward(self, y): - ins = [self.x, y, self.z] + y_ = torch.split(y, 1, 1)[1] + ins = [self.x, y_, self.z] return torch.cat(ins, 1) return aten_align_types_cat_three_args() @@ -211,7 +214,8 @@ def __init__(self): self.y = torch.randn(2, 1, 3).to(in_types[2]) def forward(self, z): - ins = [self.x, self.y, z] + z_ = torch.split(z, 1, 1)[1] + ins = [self.x, self.y, z_] return torch.cat(ins, 1) return aten_align_types_cat_three_args() From 3faa656c497e782a69f2b1a4d9ca9ff0d950392b Mon Sep 17 00:00:00 2001 From: Andrii Staikov Date: Thu, 4 Jul 2024 12:08:19 +0200 Subject: [PATCH 19/50] Add unique models to precommit PA tests (#25279) Add unique models to precommit PA tests Change the list of models covering all the big real models. Removed: hf-tiny-model-private/tiny-random-GPT2LMHeadModel hf-tiny-model-private/tiny-random-BartForCausalLM,xfail hf-tiny-model-private/tiny-random-BigBirdForCausalLM,xfail hf-tiny-model-private/tiny-random-BigBirdPegasusForCausalLM,xfail hf-tiny-model-private/tiny-random-BlenderbotSmallForCausalLM,xfail hf-tiny-model-private/tiny-random-ErnieForCausalLM,xfail hf-tiny-model-private/tiny-random-GPTNeoXJapaneseForCausalLM,xfail hf-tiny-model-private/tiny-random-MBartForCausalLM,xfail hf-tiny-model-private/tiny-random-MvpForCausalLM,xfail hf-tiny-model-private/tiny-random-PLBartForCausalLM,xfail PrunaAI/hf-tiny-model-private-tiny-random-BloomForCausalLM-bnb-4bit-smashed,xfail PrunaAI/hf-tiny-model-private-tiny-random-BloomForCausalLM-bnb-8bit-smashed,xfail PrunaAI/hf-tiny-model-private-tiny-random-BloomForCausalLM-HQQ-2bit-smashed,xfail hf-internal-testing/tiny-random-MegatronBertForCausalLM,xfail hf-internal-testing/tiny-random-BlenderbotSmallForCausalLM,xfail Added: hf-internal-testing/Mixtral-tiny hf-internal-testing/tiny-random-gpt2 katuni4ka/tiny-random-xverse katuni4ka/tiny-random-baichuan2-13b katuni4ka/tiny-random-qwen katuni4ka/tiny-random-aquilachat katuni4ka/tiny-random-aquila2 katuni4ka/tiny-random-qwen1.5-moe katuni4ka/tiny-random-codegen2 katuni4ka/tiny-random-olmo-hf katuni4ka/tiny-random-baichuan2 katuni4ka/tiny-random-jais katuni4ka/tiny-random-internlm katuni4ka/tiny-random-internlm2 katuni4ka/tiny-random-minicpm fxmarty/tiny-random-GemmaForCausalLM fxmarty/tiny-dummy-qwen2 fxmarty/really-tiny-falcon-testing Xenova/tiny-random-Phi3ForCausalLM facebook/opt-350m katuni4ka/tiny-random-dbrx katuni4ka/tiny-random-falcon-40b katuni4ka/tiny-random-orion,xfail,No ScaledDotProductAttention operation observed in the graph katuni4ka/tiny-random-chatglm2,xfail,Model references undeclared parameters: beam_idx () katuni4ka/tiny-random-glm4,xfail,Model references undeclared parameters beam_idx () attention_mask () ### Tickets: - CVS-145243 Signed-off-by: Andrii Staikov --------- Signed-off-by: Andrii Staikov --- .../workflows/job_pytorch_models_tests.yml | 4 +- .../models/hf-tiny-random-models-precommit | 96 ++++++++----------- .../model_hub_tests/pytorch/requirements.txt | 7 +- .../pytorch/test_pa_transformation.py | 2 +- 4 files changed, 51 insertions(+), 58 deletions(-) diff --git a/.github/workflows/job_pytorch_models_tests.yml b/.github/workflows/job_pytorch_models_tests.yml index 17828576336bfc..c89bb1b71d12ea 100644 --- a/.github/workflows/job_pytorch_models_tests.yml +++ b/.github/workflows/job_pytorch_models_tests.yml @@ -135,7 +135,7 @@ jobs: if: always() run: | export PYTHONPATH=${MODEL_HUB_TESTS_INSTALL_DIR}:$PYTHONPATH - python3 -m pytest ${MODEL_HUB_TESTS_INSTALL_DIR}/pytorch -m ${TYPE} --html=${INSTALL_TEST_DIR}/TEST-torch_model_tests.html --self-contained-html -v -k "not (TestTimmConvertModel or TestTorchHubConvertModel)" + python3 -m pytest ${MODEL_HUB_TESTS_INSTALL_DIR}/pytorch -m ${TYPE} --html=${INSTALL_TEST_DIR}/TEST-torch_model_tests.html --self-contained-html -v -k "not (TestTimmConvertModel or TestTorchHubConvertModel or test_pa_precommit)" env: TYPE: ${{ inputs.event == 'schedule' && 'nightly' || 'precommit'}} TEST_DEVICE: CPU @@ -146,7 +146,7 @@ jobs: if: always() run: | export PYTHONPATH=${MODEL_HUB_TESTS_INSTALL_DIR}:$PYTHONPATH - python3 -m pytest ${MODEL_HUB_TESTS_INSTALL_DIR}/pytorch/test_pa_transformation.py -m ${TYPE} --html=${INSTALL_TEST_DIR}/TEST-torch_pagedattention_tests.html --self-contained-html -v --tb=short + python3 -m pytest ${MODEL_HUB_TESTS_INSTALL_DIR}/pytorch/test_pa_transformation.py -m ${TYPE} --html=${INSTALL_TEST_DIR}/TEST-torch_pagedattention_tests.html --self-contained-html -v --tb=short -n 4 env: TYPE: ${{ inputs.event == 'schedule' && 'nightly' || 'precommit'}} TEST_DEVICE: CPU diff --git a/tests/model_hub_tests/pytorch/models/hf-tiny-random-models-precommit b/tests/model_hub_tests/pytorch/models/hf-tiny-random-models-precommit index bab7d1ff0a8676..912b4d7d1517bb 100644 --- a/tests/model_hub_tests/pytorch/models/hf-tiny-random-models-precommit +++ b/tests/model_hub_tests/pytorch/models/hf-tiny-random-models-precommit @@ -1,58 +1,46 @@ - facebook/opt-125m,https://huggingface.co/facebook/opt-125m - hf-tiny-model-private/tiny-random-CodeGenForCausalLM,https://huggingface.co/hf-tiny-model-private/tiny-random-CodeGenForCausalLM -hf-tiny-model-private/tiny-random-GPT2LMHeadModel,https://huggingface.co/hf-tiny-model-private/tiny-random-GPT2LMHeadModel -hf-tiny-model-private/tiny-random-OPTForCausalLM,https://huggingface.co/hf-tiny-model-private/tiny-random-OPTForCausalLM -hf-tiny-model-private/tiny-random-GPTJForCausalLM,https://huggingface.co/hf-tiny-model-private/tiny-random-GPTJForCausalLM -hf-tiny-model-private/tiny-random-BloomForCausalLM,https://huggingface.co/hf-tiny-model-private/tiny-random-BloomForCausalLM -hf-tiny-model-private/tiny-random-GPTNeoForCausalLM,https://huggingface.co/hf-tiny-model-private/tiny-random-GPTNeoForCausalLM -hf-tiny-model-private/tiny-random-GPTNeoXForCausalLM,https://huggingface.co/hf-tiny-model-private/tiny-random-GPTNeoXForCausalLM -hf-internal-testing/tiny-random-GPTNeoForCausalLM,https://huggingface.co/hf-internal-testing/tiny-random-GPTNeoForCausalLM -hf-internal-testing/tiny-random-MptForCausalLM,https://huggingface.co/hf-internal-testing/tiny-random-MptForCausalLM -hf-internal-testing/tiny-random-BloomForCausalLM,https://huggingface.co/hf-internal-testing/tiny-random-BloomForCausalLM -hf-internal-testing/tiny-random-GPTJForCausalLM,https://huggingface.co/hf-internal-testing/tiny-random-GPTJForCausalLM +hf-internal-testing/tiny-random-LlamaForCausalLM,https://huggingface.co/trl-internal-testing/tiny-random-LlamaForCausalLM hf-internal-testing/tiny-random-CohereForCausalLM,https://huggingface.co/hf-internal-testing/tiny-random-CohereForCausalLM -hf-internal-testing/tiny-random-FalconForCausalLM,https://huggingface.co/hf-internal-testing/tiny-random-FalconForCausalLM -hf-tiny-model-private/tiny-random-CodeGenForCausalLM,https://huggingface.co/hf-internal-testing/tiny-random-CodeGenForCausalLM -hf-tiny-model-private/tiny-random-OPTForCausalLM,https://huggingface.co/hf-tiny-model-private/tiny-random-OPTForCausalLM -hf-internal-testing/tiny-random-MistralForCausalLM,https://huggingface.co/hf-internal-testing/tiny-random-OPTForCausalLM +hf-internal-testing/tiny-random-GPTJForCausalLM,https://huggingface.co/trl-internal-testing/tiny-random-GPTJForCausalLM +hf-internal-testing/tiny-random-GPTNeoForCausalLM,https://huggingface.co/hf-internal-testing/tiny-random-GPTNeoForCausalLM hf-internal-testing/tiny-random-GPTNeoXForCausalLM,https://huggingface.co/hf-internal-testing/tiny-random-GPTNeoXForCausalLM -hf-internal-testing/tiny-random-LlamaForCausalLM,https://huggingface.co/trl-internal-testing/tiny-random-LlamaForCausalLM -hf-internal-testing/tiny-random-StableLmForCausalLM,https://huggingface.co/hf-internal-testing/tiny-random-StableLmForCausalLM -hf-internal-testing/tiny-random-PhiForCausalLM,https://huggingface.co/hf-internal-testing/tiny-random-PhiForCausalLM +hf-internal-testing/tiny-random-MistralForCausalLM,https://huggingface.co/hf-internal-testing/tiny-random-MistralForCausalLM hf-internal-testing/tiny-random-CodeGenForCausalLM,https://huggingface.co/hf-internal-testing/tiny-random-CodeGenForCausalLM +hf-internal-testing/Mixtral-tiny,https://huggingface.co/hf-internal-testing/Mixtral-tiny +hf-internal-testing/tiny-random-GPTBigCodeForCausalLM,https://huggingface.co/hf-internal-testing/tiny-random-GPTBigCodeForCausalLM hf-internal-testing/tiny-random-Starcoder2ForCausalLM,https://huggingface.co/hf-internal-testing/tiny-random-Starcoder2ForCausalLM -hf-internal-testing/tiny-random-OPTForCausalLM,https://huggingface.co/hf-internal-testing/tiny-random-OPTForCausalLM -hf-tiny-model-private/tiny-random-BartForCausalLM,https://huggingface.co/hf-tiny-model-private/tiny-random-BartForCausalLM,xfail,not working -hf-tiny-model-private/tiny-random-BigBirdForCausalLM,https://huggingface.co/hf-tiny-model-private/tiny-random-BigBirdForCausalLM,xfail,not working -hf-tiny-model-private/tiny-random-BigBirdPegasusForCausalLM,https://huggingface.co/hf-tiny-model-private/tiny-random-BigBirdPegasusForCausalLM,xfail,not working -hf-tiny-model-private/tiny-random-BioGptForCausalLM,https://huggingface.co/hf-internal-testing/tiny-random-BioGptForCausalLM,xfail,not working -hf-tiny-model-private/tiny-random-BlenderbotSmallForCausalLM,https://huggingface.co/hf-internal-testing/tiny-random-BlenderbotSmallForCausalLM,xfail,not working -hf-tiny-model-private/tiny-random-BlenderbotForCausalLM,https://huggingface.co/hf-internal-testing/tiny-random-BlenderbotForCausalLM,xfail,not working -hf-tiny-model-private/tiny-random-ErnieForCausalLM,https://huggingface.co/hf-internal-testing/tiny-random-ErnieForCausalLM,xfail,not working -hf-tiny-model-private/tiny-random-GPTNeoXJapaneseForCausalLM,https://huggingface.co/hf-internal-testing/tiny-random-GPTNeoXJapaneseForCausalLM,xfail,not working -hf-tiny-model-private/tiny-random-MBartForCausalLM,https://huggingface.co/hf-internal-testing/tiny-random-MBartForCausalLM,xfail,not working -hf-tiny-model-private/tiny-random-MvpForCausalLM,https://huggingface.co/hf-internal-testing/tiny-random-MvpForCausalLM,xfail,not working -hf-tiny-model-private/tiny-random-PegasusForCausalLM,https://huggingface.co/hf-internal-testing/tiny-random-PegasusForCausalLM,xfail,not working -hf-tiny-model-private/tiny-random-PLBartForCausalLM,https://huggingface.co/hf-internal-testing/tiny-random-PLBartForCausalLM,xfail,not working -hf-tiny-model-private/tiny-random-XGLMForCausalLM,https://huggingface.co/hf-internal-testing/tiny-random-XGLMForCausalLM,xfail,not working -# PrunaAI/hf-tiny-model-private-tiny-random-BloomForCausalLM-bnb-4bit-smashed, -# PrunaAI/hf-tiny-model-private-tiny-random-BloomForCausalLM-bnb-8bit-smashed, -# PrunaAI/hf-tiny-model-private-tiny-random-BloomForCausalLM-HQQ-2bit-smashed, -hf-internal-testing/tiny-random-PersimmonForCausalLM,https://huggingface.co/hf-internal-testing/tiny-random-PersimmonForCausalLM,xfail,not working -hf-internal-testing/tiny-random-BartForCausalLM,https://huggingface.co/hf-internal-testing/tiny-random-BartForCausalLM,xfail,not working -hf-internal-testing/tiny-random-GPTBigCodeForCausalLM,https://huggingface.co/hf-internal-testing/tiny-random-GPTBigCodeForCausalLM,xfail,not working -hf-internal-testing/tiny-random-XGLMForCausalLM,https://huggingface.co/hf-internal-testing/tiny-random-XGLMForCausalLM,xfail,not working -hf-internal-testing/tiny-random-PegasusForCausalLM,https://huggingface.co/hf-internal-testing/tiny-random-PegasusForCausalLM,xfail,not working -hf-internal-testing/tiny-random-MBartForCausalLM,https://huggingface.co/hf-internal-testing/tiny-random-MBartForCausalLM,xfail,not working -hf-internal-testing/tiny-random-BigBirdPegasusForCausalLM,https://huggingface.co/hf-internal-testing/tiny-random-BigBirdPegasusForCausalLM,xfail,not working -hf-internal-testing/tiny-random-BigBirdForCausalLM,https://huggingface.co/hf-internal-testing/tiny-random-BigBirdForCausalLM,xfail,not working -hf-internal-testing/tiny-random-MegaForCausalLM,https://huggingface.co/hf-internal-testing/tiny-random-MegaForCausalLM,xfail,not working -hf-internal-testing/tiny-random-RobertaPreLayerNormForCausalLM,https://huggingface.co/hf-internal-testing/tiny-random-RobertaPreLayerNormForCausalLM,xfail,not working -hf-internal-testing/tiny-random-BioGptForCausalLM,https://huggingface.co/hf-internal-testing/tiny-random-BioGptForCausalLM,xfail,not working -hf-internal-testing/tiny-random-ProphetNetForCausalLM,https://huggingface.co/hf-internal-testing/tiny-random-ProphetNetForCausalLM,xfail,not working -hf-internal-testing/tiny-random-PLBartForCausalLM,https://huggingface.co/hf-internal-testing/tiny-random-PLBartForCausalLM,xfail,not working -hf-internal-testing/tiny-random-MegatronBertForCausalLM,https://huggingface.co/hf-internal-testing/tiny-random-MegatronBertForCausalLM,xfail,not working -hf-internal-testing/tiny-random-GPTNeoXJapaneseForCausalLM,https://huggingface.co/hf-internal-testing/tiny-random-GPTNeoXJapaneseForCausalLM,xfail,not working -hf-internal-testing/tiny-random-ErnieForCausalLM,https://huggingface.co/hf-internal-testing/tiny-random-ErnieForCausalLM,xfail,not working -hf-internal-testing/tiny-random-BlenderbotForCausalLM,https://huggingface.co/hf-internal-testing/tiny-random-BlenderbotSmallForCausalLM,xfail,not working -hf-internal-testing/tiny-random-BlenderbotSmallForCausalLM,https://huggingface.co/hf-internal-testing/tiny-random-BlenderbotSmallForCausalLM,xfail,not working \ No newline at end of file +hf-internal-testing/tiny-random-BloomForCausalLM,https://huggingface.co/hf-internal-testing/tiny-random-BloomForCausalLM +hf-internal-testing/tiny-random-gpt2,https://huggingface.co/hf-internal-testing/tiny-random-gpt2 +hf-internal-testing/tiny-random-BlenderbotForCausalLM,https://huggingface.co/hf-internal-testing/tiny-random-BlenderbotForCausalLM +hf-internal-testing/tiny-random-PegasusForCausalLM,https://huggingface.co/hf-internal-testing/tiny-random-PegasusForCausalLM +hf-internal-testing/tiny-random-PhiForCausalLM,https://huggingface.co/hf-internal-testing/tiny-random-PhiForCausalLM +hf-internal-testing/tiny-random-MptForCausalLM,https://huggingface.co/hf-internal-testing/tiny-random-MptForCausalLM +hf-internal-testing/tiny-random-StableLmForCausalLM,https://huggingface.co/hf-internal-testing/tiny-random-StableLmForCausalLM +hf-internal-testing/tiny-random-PersimmonForCausalLM,https://huggingface.co/hf-internal-testing/tiny-random-PersimmonForCausalLM +hf-internal-testing/tiny-random-FalconForCausalLM,https://huggingface.co/hf-internal-testing/tiny-random-FalconForCausalLM +hf-tiny-model-private/tiny-random-OPTForCausalLM,https://huggingface.co/hf-tiny-model-private/tiny-random-OPTForCausalLM +katuni4ka/tiny-random-xverse,https://huggingface.co/katuni4ka/tiny-random-xverse +katuni4ka/tiny-random-baichuan2-13b,https://huggingface.co/katuni4ka/tiny-random-baichuan2-13b +katuni4ka/tiny-random-qwen,https://huggingface.co/katuni4ka/tiny-random-qwen +katuni4ka/tiny-random-aquilachat,https://huggingface.co/katuni4ka/tiny-random-aquilachat +katuni4ka/tiny-random-aquila2,https://huggingface.co/katuni4ka/tiny-random-aquila2 +katuni4ka/tiny-random-qwen1.5-moe,https://huggingface.co/katuni4ka/tiny-random-qwen1.5-moe +katuni4ka/tiny-random-codegen2,https://huggingface.co/katuni4ka/tiny-random-codegen2 +katuni4ka/tiny-random-olmo-hf,https://huggingface.co/katuni4ka/tiny-random-olmo-hf +katuni4ka/tiny-random-baichuan2,https://huggingface.co/katuni4ka/tiny-random-baichuan2 +katuni4ka/tiny-random-jais,https://huggingface.co/katuni4ka/tiny-random-jais +katuni4ka/tiny-random-internlm,https://huggingface.co/katuni4ka/tiny-random-internlm +katuni4ka/tiny-random-internlm2,https://huggingface.co/katuni4ka/tiny-random-internlm2 +katuni4ka/tiny-random-minicpm,https://huggingface.co/katuni4ka/tiny-random-minicpm +katuni4ka/tiny-random-falcon-40b,https://huggingface.co/katuni4ka/tiny-random-falcon-40b +katuni4ka/tiny-random-dbrx,https://huggingface.co/katuni4ka/tiny-random-dbrx +fxmarty/tiny-random-GemmaForCausalLM,https://huggingface.co/fxmarty/tiny-random-GemmaForCausalLM +fxmarty/tiny-dummy-qwen2,https://huggingface.co/fxmarty/tiny-dummy-qwen2 +fxmarty/really-tiny-falcon-testing,https://huggingface.co/fxmarty/really-tiny-falcon-testing +Xenova/tiny-random-Phi3ForCausalLM,https://huggingface.co/Xenova/tiny-random-Phi3ForCausalLM +facebook/opt-125m,https://huggingface.co/facebook/opt-125m +facebook/opt-350m,https://huggingface.co/facebook/opt-350m +hf-internal-testing/tiny-random-BioGptForCausalLM,https://huggingface.co/hf-internal-testing/tiny-random-BioGptForCausalLM,xfail,No ScaledDotProductAttention operation observed in the graph CVS-145820 +hf-internal-testing/tiny-random-XGLMForCausalLM,https://huggingface.co/hf-tiny-model-private/tiny-random-XGLMForCausalLM,xfail,No ScaledDotProductAttention operation observed in the graph CVS-145820 +katuni4ka/tiny-random-orion,https://huggingface.co/katuni4ka/tiny-random-orion,xfail,No ScaledDotProductAttention operation observed in the graph CVS-145820 +katuni4ka/tiny-random-chatglm2,https://huggingface.co/katuni4ka/tiny-random-chatglm2,xfail,Model references undeclared parameters: beam_idx () CVS-145820 +katuni4ka/tiny-random-glm4,https://huggingface.co/katuni4ka/tiny-random-glm4,xfail,Model references undeclared parameters beam_idx () attention_mask () CVS-145820 \ No newline at end of file diff --git a/tests/model_hub_tests/pytorch/requirements.txt b/tests/model_hub_tests/pytorch/requirements.txt index ac134fbd38c255..a6114d6196b99c 100644 --- a/tests/model_hub_tests/pytorch/requirements.txt +++ b/tests/model_hub_tests/pytorch/requirements.txt @@ -33,4 +33,9 @@ hf_transfer # requirements for specific models # - hf-tiny-model-private/tiny-random-RoFormerForCausalLM -rjieba \ No newline at end of file +rjieba + +# - katuni4ka/tiny-random-qwen +# - katuni4ka/tiny-random-internlm2 +transformers_stream_generator +einops diff --git a/tests/model_hub_tests/pytorch/test_pa_transformation.py b/tests/model_hub_tests/pytorch/test_pa_transformation.py index d077bb1fd5f8fb..eed623d1440f74 100644 --- a/tests/model_hub_tests/pytorch/test_pa_transformation.py +++ b/tests/model_hub_tests/pytorch/test_pa_transformation.py @@ -9,7 +9,7 @@ import os def run_pa(tmp_path, model_id, model_link): - model = OVModelForCausalLM.from_pretrained(model_id, export=True) + model = OVModelForCausalLM.from_pretrained(model_id, export=True, trust_remote_code=True) paged_attention_transformation(model.model) From bbb32b3580cedae3d58920e8cd4bad2e5f87548e Mon Sep 17 00:00:00 2001 From: Vishniakov Nikolai Date: Thu, 4 Jul 2024 12:33:23 +0200 Subject: [PATCH 20/50] [OV JS] Allow partial shape creation without parameter (#25311) ### Details: - Call `getPartialShape` method on dynamic input/output lead to error. This change fix that. --- src/bindings/js/node/lib/addon.ts | 100 ++++++++++-------- .../js/node/src/partial_shape_wrap.cpp | 21 ++-- .../js/node/tests/partial_shape.test.js | 8 +- 3 files changed, 73 insertions(+), 56 deletions(-) diff --git a/src/bindings/js/node/lib/addon.ts b/src/bindings/js/node/lib/addon.ts index 00ab7a92771ad0..5a48ce963b0e45 100644 --- a/src/bindings/js/node/lib/addon.ts +++ b/src/bindings/js/node/lib/addon.ts @@ -124,8 +124,8 @@ interface Core { }; /** * It imports a previously exported compiled model. - * @param modelStream The input stream that contains a model, previously exported - * with the {@link CompiledModel.exportModelSync} method. + * @param modelStream The input stream that contains a model, + * previously exported with the {@link CompiledModel.exportModelSync} method. * @param device The name of a device, for which you import a compiled model. * Note, if the device name was not used to compile the original model, * an exception is thrown. @@ -141,9 +141,10 @@ interface Core { * It reads models from the IR / ONNX / PDPD / TF and TFLite formats. * @param modelPath The path to a model * in the IR / ONNX / PDPD / TF or TFLite format. - * @param weightsPath The path to a data file for the IR format (.bin): if the path - * is empty, it tries to read the bin file with the same name as xml and if - * the bin file with the same name was not found, it loads IR without weights. + * @param weightsPath The path to a data file for the IR format (.bin): + * if the path is empty, it tries to read the bin file with the same name + * as xml and if the bin file with the same name was not found, it loads + * IR without weights. * For the ONNX format (.onnx), the weights parameter is not used. * For the PDPD format (.pdmodel), the weights parameter is not used. * For the TF format (.pb), the weights parameter is not used. @@ -232,10 +233,10 @@ interface Model { * shape. */ isDynamic(): boolean; - /** - * It gets the output of the model. - * If a model has more than one output, this method throws an exception. - */ + /** + * It gets the output of the model. + * If a model has more than one output, this method throws an exception. + */ output(): Output; /** * It gets the output of the model identified by the tensor name. @@ -336,12 +337,12 @@ interface Tensor { * * Its getter returns a subclass of TypedArray that corresponds to the * tensor element type, e.g. Float32Array corresponds to float32. The - * content of the TypedArray subclass is a copy of the tensor underlaying + * content of the TypedArray subclass is a copy of the tensor underlaying * memory. * * Its setter fills the underlaying tensor memory by copying the binary data - * buffer from the TypedArray subclass. An exception will be thrown if the size - * or type of array does not match the tensor. + * buffer from the TypedArray subclass. An exception will be thrown if the + * size or type of array does not match the tensor. */ data: SupportedTypedArray; /** @@ -369,21 +370,21 @@ interface Tensor { * * @remarks * The tensor memory is shared with the TypedArray. That is, - * the responsibility for maintaining the reference to the TypedArray lies with + * the responsibility for maintaining the reference to the TypedArray lies with * the user. Any action performed on the TypedArray will be reflected in this * tensor memory. */ interface TensorConstructor { /** - * It constructs a tensor using the element type and shape. The new tensor data - * will be allocated by default. + * It constructs a tensor using the element type and shape. The new tensor + * data will be allocated by default. * @param type The element type of the new tensor. * @param shape The shape of the new tensor. */ new(type: element | elementTypeString, shape: number[]): Tensor; /** - * It constructs a tensor using the element type and shape. The new tensor wraps - * allocated host memory. + * It constructs a tensor using the element type and shape. The new tensor + * wraps allocated host memory. * @param type The element type of the new tensor. * @param shape The shape of the new tensor. * @param tensorData A subclass of TypedArray that will be wrapped @@ -392,7 +393,7 @@ interface TensorConstructor { new(type: element | elementTypeString, shape: number[], tensorData: SupportedTypedArray): Tensor; /** - * It constructs a tensor using the element type and shape. The strings from + * It constructs a tensor using the element type and shape. The strings from * the array are used to fill the new tensor. Each element of a string tensor * is a string of arbitrary length, including an empty string. */ @@ -412,33 +413,33 @@ interface InferRequest { * Inputs have to be specified earlier using {@link InferRequest.setTensor} * or {@link InferRequest.setInputTensor} */ - infer(): { [outputName: string] : Tensor}; + infer(): { [outputName: string]: Tensor }; /** * It infers specified input(s) in the synchronous mode. * @param inputData An object with the key-value pairs where the key is the - * input name and value can be either a tensor or a TypedArray. TypedArray - * will be wrapped into Tensor underneath using the input shape and element type - * of the deployed model. + * input name and value can be either a tensor or a TypedArray. + * TypedArray will be wrapped into Tensor underneath using the input shape + * and element type of the deployed model. */ - infer(inputData: { [inputName: string]: Tensor | SupportedTypedArray}) - : { [outputName: string] : Tensor}; + infer(inputData: { [inputName: string]: Tensor | SupportedTypedArray }) + : { [outputName: string]: Tensor }; /** * It infers specified input(s) in the synchronous mode. * @param inputData An array with tensors or TypedArrays. TypedArrays will be * wrapped into Tensors underneath using the input shape and element type - * of the deployed model. If the model has multiple inputs, the Tensors + * of the deployed model. If the model has multiple inputs, the Tensors * and TypedArrays must be passed in the correct order. */ infer(inputData: Tensor[] | SupportedTypedArray[]) - : { [outputName: string] : Tensor}; + : { [outputName: string]: Tensor }; /** * It infers specified input(s) in the asynchronous mode. * @param inputData An object with the key-value pairs where the key is the - * input name and value is a tensor or an array with tensors. If the model has + * input name and value is a tensor or an array with tensors. If the model has * multiple inputs, the Tensors must be passed in the correct order. */ - inferAsync(inputData: { [inputName: string]: Tensor} - | Tensor[] ): Promise<{ [outputName: string] : Tensor}>; + inferAsync(inputData: { [inputName: string]: Tensor } + | Tensor[]): Promise<{ [outputName: string]: Tensor }>; /** * It gets the compiled model used by the InferRequest object. */ @@ -462,12 +463,12 @@ interface InferRequest { * an exception is thrown. */ getOutputTensor(): Tensor; - /** - * It gets the output tensor for inference. - * @param idx An index of the tensor to get. - * @returns A tensor at the specified index. If the tensor with the specified - * idx is not found, an exception is thrown. - */ + /** + * It gets the output tensor for inference. + * @param idx An index of the tensor to get. + * @returns A tensor at the specified index. If the tensor with the specified + * idx is not found, an exception is thrown. + */ getOutputTensor(idx?: number): Tensor; /** * It gets an input/output tensor for inference. @@ -481,8 +482,8 @@ interface InferRequest { /** * It sets the input tensor to infer models with a single input. * @param tensor The input tensor. The element type and shape of the tensor - * must match the type and size of the model's input element. If the model has several - * inputs, an exception is thrown. + * must match the type and size of the model's input element. If the model + * has several inputs, an exception is thrown. */ setInputTensor(tensor: Tensor): void; /** @@ -496,8 +497,8 @@ interface InferRequest { /** * It sets the output tensor to infer models with a single output. * @param tensor The output tensor. The element type and shape of the tensor - * must match the output element type and size of the model. If the model has several - * outputs, an exception is thrown. + * must match the output element type and size of the model. If the model + * has several outputs, an exception is thrown. */ setOutputTensor(tensor: Tensor): void; /** @@ -528,13 +529,13 @@ interface Output { } interface InputTensorInfo { - setElementType(elementType: element | elementTypeString ): InputTensorInfo; + setElementType(elementType: element | elementTypeString): InputTensorInfo; setLayout(layout: string): InputTensorInfo; setShape(shape: number[]): InputTensorInfo; } interface OutputTensorInfo { - setElementType(elementType: element | elementTypeString ): InputTensorInfo; + setElementType(elementType: element | elementTypeString): InputTensorInfo; setLayout(layout: string): InputTensorInfo; } interface PreProcessSteps { @@ -570,8 +571,17 @@ interface PartialShape { toString(): string; getDimensions(): Dimension[]; } + +/** + * This interface contains constructor of the {@link PartialShape} class. + */ interface PartialShapeConstructor { - new(shape: string): PartialShape; + /** + * It constructs a PartialShape by passed string. + * Omit parameter to create empty shape. + * @param [shape] String representation of the shape. + */ + new(shape?: string): PartialShape; } declare enum element { @@ -607,6 +617,6 @@ export interface NodeAddon { } export default - // eslint-disable-next-line @typescript-eslint/no-var-requires - require('../bin/ov_node_addon.node') as - NodeAddon; + // eslint-disable-next-line @typescript-eslint/no-var-requires + require('../bin/ov_node_addon.node') as + NodeAddon; diff --git a/src/bindings/js/node/src/partial_shape_wrap.cpp b/src/bindings/js/node/src/partial_shape_wrap.cpp index a32bc87a88bfdf..fc128991a9dd90 100644 --- a/src/bindings/js/node/src/partial_shape_wrap.cpp +++ b/src/bindings/js/node/src/partial_shape_wrap.cpp @@ -6,20 +6,21 @@ #include "node/include/addon.hpp" #include "node/include/errors.hpp" #include "node/include/helper.hpp" +#include "node/include/type_validation.hpp" PartialShapeWrap::PartialShapeWrap(const Napi::CallbackInfo& info) : Napi::ObjectWrap(info) { - const size_t attrs_length = info.Length(); + std::vector allowed_signatures; - if (attrs_length == 1 && info[0].IsString()) { - try { - const auto& shape = std::string(info[0].ToString()); - - _partial_shape = ov::PartialShape(shape); - } catch (std::exception& e) { - reportError(info.Env(), e.what()); + try { + if (ov::js::validate(info, allowed_signatures)) { + _partial_shape = ov::PartialShape(info[0].ToString()); + } else if (ov::js::validate(info, allowed_signatures)) { + return; + } else { + OPENVINO_THROW("'PartialShape' constructor", ov::js::get_parameters_error_msg(info, allowed_signatures)); } - } else { - reportError(info.Env(), "Invalid parameters for PartialShape constructor."); + } catch (std::exception& err) { + reportError(info.Env(), err.what()); } } diff --git a/src/bindings/js/node/tests/partial_shape.test.js b/src/bindings/js/node/tests/partial_shape.test.js index 80845d9942157b..6acc8057a89619 100644 --- a/src/bindings/js/node/tests/partial_shape.test.js +++ b/src/bindings/js/node/tests/partial_shape.test.js @@ -10,12 +10,18 @@ const staticShape = '1, 3, 224, 224'; const dynamicShape = '?, -1, 1..3, 224'; describe('PartialShape', () => { + it('Allows create empty shape', () => { + const partialShape = new ov.PartialShape(); + + assert.strictEqual(partialShape.toString(), '[]'); + }); + it('Should detect static shape', () => { const partialShape = new ov.PartialShape(staticShape); assert.ok(partialShape.isStatic()); }); - + it('Should detect dynamic shape', () => { const partialShape = new ov.PartialShape(dynamicShape); From bf84ceca9234931d081412988d3243dafcbc84f8 Mon Sep 17 00:00:00 2001 From: Tatiana Savina Date: Thu, 4 Jul 2024 13:04:58 +0200 Subject: [PATCH 21/50] [DOCS] Running inference comments (#25285) ### Details: - *item1* - *...* ### Tickets: - 145078 --------- Co-authored-by: Karol Blaszczak --- .../assets/snippets/compile_model_npu.cpp | 12 ++++++++++++ .../assets/snippets/compile_model_npu.py | 18 ++++++++++++++++++ .../inference-devices-and-modes.rst | 9 ++++++++- .../npu-device.rst | 19 +++++++++++++++++++ ...tegrate-openvino-with-your-application.rst | 6 +++++- 5 files changed, 62 insertions(+), 2 deletions(-) create mode 100644 docs/articles_en/assets/snippets/compile_model_npu.cpp create mode 100644 docs/articles_en/assets/snippets/compile_model_npu.py diff --git a/docs/articles_en/assets/snippets/compile_model_npu.cpp b/docs/articles_en/assets/snippets/compile_model_npu.cpp new file mode 100644 index 00000000000000..e4fb38437bac1e --- /dev/null +++ b/docs/articles_en/assets/snippets/compile_model_npu.cpp @@ -0,0 +1,12 @@ +#include + +int main() { +{ + //! [compile_model_default_npu] + ov::Core core; + auto model = core.read_model("model.xml"); + auto compiled_model = core.compile_model(model, "NPU"); + //! [compile_model_default_npu] +} + return 0; +} diff --git a/docs/articles_en/assets/snippets/compile_model_npu.py b/docs/articles_en/assets/snippets/compile_model_npu.py new file mode 100644 index 00000000000000..d4b4e4d90df40d --- /dev/null +++ b/docs/articles_en/assets/snippets/compile_model_npu.py @@ -0,0 +1,18 @@ +# Copyright (C) 2018-2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import openvino as ov +from snippets import get_model + + +def main(): + model = get_model() + + core = ov.Core() + if "NPU" not in core.available_devices: + return 0 + + #! [compile_model_default_npu] + core = ov.Core() + compiled_model = core.compile_model(model, "NPU") + #! [compile_model_default_npu] diff --git a/docs/articles_en/openvino-workflow/running-inference/inference-devices-and-modes.rst b/docs/articles_en/openvino-workflow/running-inference/inference-devices-and-modes.rst index c11696ce07bc18..65dbde26410085 100644 --- a/docs/articles_en/openvino-workflow/running-inference/inference-devices-and-modes.rst +++ b/docs/articles_en/openvino-workflow/running-inference/inference-devices-and-modes.rst @@ -36,7 +36,7 @@ different conditions: | :doc:`Heterogeneous Execution (HETERO) ` | :doc:`Automatic Batching Execution (Auto-batching) ` - +To learn how to change the device configuration, read the :doc:`Query device properties article `. Enumerating Available Devices ####################################### @@ -83,3 +83,10 @@ Accordingly, the code that loops over all available devices of the "GPU" type on :language: cpp :fragment: [part3] +Additional Resources +#################### + +* `OpenVINO™ Runtime API Tutorial <./../../notebooks/openvino-api-with-output.html>`__ +* `AUTO Device Tutorial <./../../notebooks/auto-device-with-output.html>`__ +* `GPU Device Tutorial <./../../notebooks/gpu-device-with-output.html>`__ +* `NPU Device Tutorial <./../../notebooks/hello-npu-with-output.html>`__ \ No newline at end of file diff --git a/docs/articles_en/openvino-workflow/running-inference/inference-devices-and-modes/npu-device.rst b/docs/articles_en/openvino-workflow/running-inference/inference-devices-and-modes/npu-device.rst index 5a21f2f7ccc0f8..d490881eaea09a 100644 --- a/docs/articles_en/openvino-workflow/running-inference/inference-devices-and-modes/npu-device.rst +++ b/docs/articles_en/openvino-workflow/running-inference/inference-devices-and-modes/npu-device.rst @@ -30,6 +30,25 @@ of the model into a proprietary format. The compiler included in the user mode d platform specific optimizations in order to efficiently schedule the execution of network layers and memory transactions on various NPU hardware submodules. +To use NPU for inference, pass the device name to the ``ov::Core::compile_model()`` method: + +.. tab-set:: + + .. tab-item:: Python + :sync: py + + .. doxygensnippet:: docs/articles_en/assets/snippets/compile_model_npu.py + :language: py + :fragment: [compile_model_default_npu] + + .. tab-item:: C++ + :sync: cpp + + .. doxygensnippet:: docs/articles_en/assets/snippets/compile_model_npu.cpp + :language: cpp + :fragment: [compile_model_default_npu] + + Model Caching ############################# diff --git a/docs/articles_en/openvino-workflow/running-inference/integrate-openvino-with-your-application.rst b/docs/articles_en/openvino-workflow/running-inference/integrate-openvino-with-your-application.rst index 222c8760d0a880..17cc59e4505e6c 100644 --- a/docs/articles_en/openvino-workflow/running-inference/integrate-openvino-with-your-application.rst +++ b/docs/articles_en/openvino-workflow/running-inference/integrate-openvino-with-your-application.rst @@ -226,9 +226,12 @@ Compile the model for a specific device using ``ov::Core::compile_model()``: The ``ov::Model`` object represents any models inside the OpenVINO™ Runtime. For more details please read article about :doc:`OpenVINO™ Model representation `. +OpenVINO includes experimental support for NPU, learn more in the +:doc:`NPU Device section <./inference-devices-and-modes/npu-device>` + The code above creates a compiled model associated with a single hardware device from the model object. It is possible to create as many compiled models as needed and use them simultaneously (up to the limitation of the hardware). -To learn how to change the device configuration, read the :doc:`Query device properties ` article. +To learn more about supported devices and inference modes, read the :doc:`Inference Devices and Modes <./inference-devices-and-modes>` article. Step 3. Create an Inference Request ################################### @@ -432,6 +435,7 @@ To build your project using CMake with the default build tools currently availab Additional Resources #################### +* `OpenVINO™ Runtime API Tutorial <./../../notebooks/openvino-api-with-output.html>`__ * See the :doc:`OpenVINO Samples <../../learn-openvino/openvino-samples>` page for specific examples of how OpenVINO pipelines are implemented for applications like image classification, text prediction, and many others. * Models in the OpenVINO IR format on `Hugging Face `__. * :doc:`OpenVINO™ Runtime Preprocessing ` From 4204470f1648696732113bdcb1f50edc88b1dcc3 Mon Sep 17 00:00:00 2001 From: Chen Peter Date: Thu, 4 Jul 2024 19:32:24 +0800 Subject: [PATCH 22/50] Roll Linux X86 oneTBB back to 2021.2.4 (#25347) Reason: LLM beam search perf regression on XEON platforms Signed-off-by: Chen Peter --- cmake/dependencies.cmake | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cmake/dependencies.cmake b/cmake/dependencies.cmake index 219d464682b016..6edda8136b338f 100644 --- a/cmake/dependencies.cmake +++ b/cmake/dependencies.cmake @@ -104,10 +104,10 @@ function(ov_download_tbb) elseif(LINUX AND X86_64 AND OPENVINO_GNU_LIBC AND OV_LIBC_VERSION VERSION_GREATER_EQUAL 2.17) # build oneTBB 2021.2.1 with gcc 4.8 (glibc 2.17) RESOLVE_DEPENDENCY(TBB - ARCHIVE_LIN "oneapi-tbb-2021.2.5-lin-trim.tgz" + ARCHIVE_LIN "oneapi-tbb-2021.2.4-lin.tgz" TARGET_PATH "${TEMP}/tbb" ENVIRONMENT "TBBROOT" - SHA256 "9bea2c838df3085d292989d643523dc1cedce9b46d5a03eec90104151b49a180" + SHA256 "6523661559a340e88131472ea9a595582c306af083e55293b7357d11b8015546" USE_NEW_LOCATION TRUE) elseif(YOCTO_AARCH64) RESOLVE_DEPENDENCY(TBB From 537744c2558491f3f831525d62758167d52ff3e7 Mon Sep 17 00:00:00 2001 From: Ivan Novoselov Date: Thu, 4 Jul 2024 12:42:15 +0100 Subject: [PATCH 23/50] [Snippets] Implement runtime loop info serialization in netron graphs (#25235) ### Details: - *Propagate information from runtime loop info to serialized graphs* ### Tickets: - *-* --- .../snippets/lowered/linear_ir_builder.hpp | 13 +++++++++- .../lowered/pass/serialize_control_flow.hpp | 13 ++++++++-- .../src/lowered/linear_ir_builder.cpp | 22 ++++++++-------- .../lowered/pass/serialize_control_flow.cpp | 26 +++++++++++++++++-- 4 files changed, 58 insertions(+), 16 deletions(-) diff --git a/src/common/snippets/include/snippets/lowered/linear_ir_builder.hpp b/src/common/snippets/include/snippets/lowered/linear_ir_builder.hpp index afd778047c9279..346a68f1eba03a 100644 --- a/src/common/snippets/include/snippets/lowered/linear_ir_builder.hpp +++ b/src/common/snippets/include/snippets/lowered/linear_ir_builder.hpp @@ -32,11 +32,21 @@ class LinearIRBuilder { * @param expression_map expression map * @return clone of `linear_ir` */ - std::shared_ptr clone(const std::shared_ptr& linear_ir, ExpressionMap& expression_map) const; + inline std::shared_ptr clone(const std::shared_ptr& linear_ir, ExpressionMap& expression_map) const { + auto result = std::make_shared(); + clone(linear_ir.get(), result.get(), expression_map); + return result; + } inline std::shared_ptr clone(const std::shared_ptr& linear_ir) const { ExpressionMap expression_map; return clone(linear_ir, expression_map); } + inline LinearIR clone(const LinearIR& linear_ir) const { + LinearIR result; + ExpressionMap expression_map; + clone(&linear_ir, &result, expression_map); + return result; + } /** * @brief Make a copy of LinearIR range by rules described in `m_config` * @param begin begin iterator of the target range of LinearIR @@ -48,6 +58,7 @@ class LinearIRBuilder { ExpressionMap& expression_map) const; private: + void clone(const LinearIR* src, LinearIR* dst, ExpressionMap& expression_map) const; Config m_config = {}; }; diff --git a/src/common/snippets/include/snippets/lowered/pass/serialize_control_flow.hpp b/src/common/snippets/include/snippets/lowered/pass/serialize_control_flow.hpp index 06c43258b25e79..602e9d9df7ce32 100644 --- a/src/common/snippets/include/snippets/lowered/pass/serialize_control_flow.hpp +++ b/src/common/snippets/include/snippets/lowered/pass/serialize_control_flow.hpp @@ -20,8 +20,17 @@ namespace pass { class SerializeControlFlow : public SerializeBase { public: OPENVINO_RTTI("SerializeControlFlow", "Pass", SerializeBase) - SerializeControlFlow(const std::string& xml_path) : SerializeBase(xml_path) {} - bool run(LinearIR& linear_ir) override; + SerializeControlFlow(const std::string& xml_path, bool update_dynamic_ops = false) : + SerializeBase(xml_path), m_update_dynamic_ops{update_dynamic_ops} {} + + bool run(LinearIR& linear_ir) override { + return run(const_cast(linear_ir)); + } + // We need a const method to run from functions that can't change LIR + bool run(const LinearIR& linear_ir); + +private: + const bool m_update_dynamic_ops = false; }; } // namespace pass diff --git a/src/common/snippets/src/lowered/linear_ir_builder.cpp b/src/common/snippets/src/lowered/linear_ir_builder.cpp index e5d28375db05f6..328ed06bfd0a11 100644 --- a/src/common/snippets/src/lowered/linear_ir_builder.cpp +++ b/src/common/snippets/src/lowered/linear_ir_builder.cpp @@ -65,21 +65,21 @@ std::vector> clone_nodes(const std::vector LinearIRBuilder::clone(const std::shared_ptr& linear_ir, ExpressionMap& expression_map) const { - auto cloned = std::make_shared(); - cloned->m_config = linear_ir->m_config; +void LinearIRBuilder::clone(const LinearIR* src, LinearIR* dst, ExpressionMap& expression_map) const { + OPENVINO_ASSERT(src && dst, "Invalid pointers were provided for LinearIRBuilder::clone"); + dst->m_config = src->m_config; - cloned->m_expressions = clone_range(linear_ir->m_expressions.cbegin(), linear_ir->m_expressions.cend(), expression_map); - for (const auto& expr : cloned->m_expressions) { - cloned->register_expression(expr, true); + dst->m_expressions = clone_range(src->m_expressions.cbegin(), src->m_expressions.cend(), expression_map); + for (const auto& expr : dst->m_expressions) { + dst->register_expression(expr, true); } - cloned->m_loop_manager = linear_ir->m_loop_manager->clone_with_new_expr(expression_map); + dst->m_loop_manager = src->m_loop_manager->clone_with_new_expr(expression_map); // It's Ok to share shapeInfer factory ptr, since the factory doesn't depend on LIR in any way - cloned->m_shape_infer_factory = linear_ir->m_shape_infer_factory; - cloned->m_shape_infer = std::make_shared(cloned->m_expressions, cloned->m_parameter_expressions, cloned->m_result_expressions); - cloned->m_is_dynamic = linear_ir->m_is_dynamic; - return cloned; + dst->m_shape_infer_factory = src->m_shape_infer_factory; + dst->m_shape_infer = std::make_shared(dst->m_expressions, dst->m_parameter_expressions, + dst->m_result_expressions); + dst->m_is_dynamic = src->m_is_dynamic; } LinearIR::container LinearIRBuilder::clone_range(LinearIR::container::const_iterator begin, LinearIR::container::const_iterator end, diff --git a/src/common/snippets/src/lowered/pass/serialize_control_flow.cpp b/src/common/snippets/src/lowered/pass/serialize_control_flow.cpp index 6b2dab515b6d19..1901753d299689 100644 --- a/src/common/snippets/src/lowered/pass/serialize_control_flow.cpp +++ b/src/common/snippets/src/lowered/pass/serialize_control_flow.cpp @@ -9,16 +9,22 @@ #include "snippets/lowered/linear_ir.hpp" #include "snippets/op/serialization_node.hpp" #include "snippets/snippets_isa.hpp" +#include "snippets/lowered/loop_manager.hpp" +#include "snippets/lowered/linear_ir_builder.hpp" namespace ov { namespace snippets { namespace lowered { namespace pass { -bool SerializeControlFlow::run(LinearIR& linear_ir) { +bool SerializeControlFlow::run(const LinearIR& original_linear_ir) { OV_ITT_SCOPED_TASK(ov::pass::itt::domains::SnippetsTransform, "Snippets::SerializeControlFlow") - if (linear_ir.empty()) + if (original_linear_ir.empty()) return false; + const auto& linear_ir = m_update_dynamic_ops ? LinearIRBuilder().clone(original_linear_ir) : original_linear_ir; + + const auto& loop_manager = linear_ir.get_loop_manager(); + const auto& loop_info_map = loop_manager ? loop_manager->get_map() : std::map{}; auto first_node = std::make_shared(element::f32, Shape{}); first_node->set_friendly_name("Start"); @@ -35,6 +41,22 @@ bool SerializeControlFlow::run(LinearIR& linear_ir) { "Serialization can't find LoopBegin that corresponds to LoopEnd with friendly name ", loop_end->get_friendly_name()); auto loop_begin_serialization_node = loops_map.at(loop_end->get_loop_begin()); + if (m_update_dynamic_ops) { + OPENVINO_ASSERT(loop_info_map.count(loop_end->get_id()), "Failed to find loop id in loop info map"); + const auto& loop_info = loop_info_map.at(loop_end->get_id()); + loop_end->set_work_amount(loop_info->get_work_amount()); + loop_end->set_increment(loop_info->get_increment()); + loop_end->set_is_incremented(loop_info->get_is_incremented()); + if (auto unified = ov::as_type_ptr(loop_info)) { + loop_end->set_ptr_increments(unified->get_ptr_increments()); + loop_end->set_finalization_offsets(unified->get_finalization_offsets()); + } else if (auto expanded = ov::as_type_ptr(loop_info)) { + loop_end->set_ptr_increments(expanded->get_ptr_increments()); + loop_end->set_finalization_offsets(expanded->get_finalization_offsets()); + } else { + OPENVINO_THROW("Unknown LoopInfo type"); + } + } serialization_node = std::make_shared(ov::OutputVector{serialization_node, loop_begin_serialization_node}, expr); } else { serialization_node = std::make_shared(ov::OutputVector{serialization_node}, expr); From e6d5ba0f75811c193d79f43158a574d19776b5f4 Mon Sep 17 00:00:00 2001 From: Roman Lyamin Date: Thu, 4 Jul 2024 16:41:31 +0400 Subject: [PATCH 24/50] [GPU] Added more uses of const references (#25374) --- .../include/intel_gpu/runtime/layout.hpp | 6 +-- .../include/intel_gpu/runtime/tensor.hpp | 14 +++--- .../impls/ocl/kernel_selector_helper.cpp | 2 +- src/plugins/intel_gpu/src/runtime/layout.cpp | 44 +++++++++---------- .../intel_gpu/src/runtime/shape_predictor.cpp | 4 +- 5 files changed, 35 insertions(+), 35 deletions(-) diff --git a/src/plugins/intel_gpu/include/intel_gpu/runtime/layout.hpp b/src/plugins/intel_gpu/include/intel_gpu/runtime/layout.hpp index 9a8ba67ab3a7b6..a454fc7afdee15 100644 --- a/src/plugins/intel_gpu/include/intel_gpu/runtime/layout.hpp +++ b/src/plugins/intel_gpu/include/intel_gpu/runtime/layout.hpp @@ -193,9 +193,9 @@ struct padding { } static padding max(padding const& lhs, padding const& rhs, float filling_value = 0.0f) { - auto lower = tensor::max(lhs.lower_size(), rhs.lower_size()); - auto upper = tensor::max(lhs.upper_size(), rhs.upper_size()); - auto dynamic_pad_dims = tensor::max(lhs.get_dynamic_pad_dims(), rhs.get_dynamic_pad_dims()); + const auto& lower = tensor::max(lhs.lower_size(), rhs.lower_size()); + const auto& upper = tensor::max(lhs.upper_size(), rhs.upper_size()); + const auto& dynamic_pad_dims = tensor::max(lhs.get_dynamic_pad_dims(), rhs.get_dynamic_pad_dims()); return padding{lower.sizes(), upper.sizes(), filling_value, dynamic_pad_dims}; } diff --git a/src/plugins/intel_gpu/include/intel_gpu/runtime/tensor.hpp b/src/plugins/intel_gpu/include/intel_gpu/runtime/tensor.hpp index 20c8bc8052f031..cb74433ec18483 100644 --- a/src/plugins/intel_gpu/include/intel_gpu/runtime/tensor.hpp +++ b/src/plugins/intel_gpu/include/intel_gpu/runtime/tensor.hpp @@ -262,8 +262,8 @@ struct tensor { tensor(format fmt, const std::vector& sizes, value_type default_size = 1) : tensor(default_size) { - auto in_order = fmt.order(); - auto out_order = fmt.internal_order(); + const auto& in_order = fmt.order(); + const auto& out_order = fmt.internal_order(); if (in_order.size() != sizes.size()) throw std::invalid_argument("The count of values passed to initialize tensor does not match passed format."); @@ -417,8 +417,8 @@ struct tensor { /// @brief Returns a vector of tensors values, ordered regarding to @p format. std::vector sizes(cldnn::format fmt) const { - auto output_order = fmt.order(); - auto internal_order = fmt.internal_order(); + const auto& output_order = fmt.order(); + const auto& internal_order = fmt.internal_order(); std::vector sizes(output_order.size(), 0); for (size_t i = 0; i < sizes.size(); ++i) { @@ -472,9 +472,9 @@ struct tensor { */ tensor transform(cldnn::format new_fmt, value_type default_size) const { cldnn::format default_fmt = cldnn::format::bfvuwzyx; - auto val_order = default_fmt.internal_order(); - auto new_order = new_fmt.internal_order(); - std::vector old_sizes = sizes(); + const auto& val_order = default_fmt.internal_order(); + const auto& new_order = new_fmt.internal_order(); + const std::vector& old_sizes = sizes(); std::vector new_sizes(old_sizes.size(), default_size); const auto& new_traits = new_fmt.traits(); static const std::map flatten_mapping = { diff --git a/src/plugins/intel_gpu/src/graph/impls/ocl/kernel_selector_helper.cpp b/src/plugins/intel_gpu/src/graph/impls/ocl/kernel_selector_helper.cpp index 1f492e14c9fc7e..2ebcebd3b0b48f 100644 --- a/src/plugins/intel_gpu/src/graph/impls/ocl/kernel_selector_helper.cpp +++ b/src/plugins/intel_gpu/src/graph/impls/ocl/kernel_selector_helper.cpp @@ -822,7 +822,7 @@ kernel_selector::data_tensor convert_data_tensor(const layout& l, const tensor v // legacy get_tensor().sizes() impl return dims in external order, so we need to transpose dims ov::PartialShape vals_ordered; - auto axis_order = l.format.dims_order(); + const auto& axis_order = l.format.dims_order(); for (size_t i = 0; i < axis_order.size(); i++) { if (axis_order[i] >= vals_original.size()) vals_ordered.push_back(ov::Dimension(1)); diff --git a/src/plugins/intel_gpu/src/runtime/layout.cpp b/src/plugins/intel_gpu/src/runtime/layout.cpp index fb4ee3e88841c3..331859167c39bd 100644 --- a/src/plugins/intel_gpu/src/runtime/layout.cpp +++ b/src/plugins/intel_gpu/src/runtime/layout.cpp @@ -41,18 +41,18 @@ size_t layout::get_spatial_rank() const { } tensor::value_type layout::get_dim(size_t idx) const { - auto dims = get_dims(); + const auto& dims = get_dims(); return dims[idx]; } tensor::value_type layout::batch() const { - auto dims = get_dims(); + const auto& dims = get_dims(); const size_t dim_idx = 0; return dims[dim_idx]; } tensor::value_type layout::feature() const { - auto dims = get_dims(); + const auto& dims = get_dims(); const size_t dim_idx = 1; return dims[dim_idx]; } @@ -60,13 +60,13 @@ tensor::value_type layout::feature() const { tensor::value_type layout::spatial(size_t spatial_idx) const { if (spatial_idx >= format.spatial_num() ) return 1; - auto dims = get_dims(); + const auto& dims = get_dims(); const size_t dim_idx = (format::is_grouped(format) ? 3 : 2) + (format.spatial_num() - 1 - spatial_idx); return dims[dim_idx]; } tensor::value_type layout::group() const { - auto dims = get_dims(); + const auto& dims = get_dims(); if (!format::is_weights_format(format)) { throw std::logic_error("[GPU] can't get group dimension for data layout"); } @@ -81,7 +81,7 @@ tensor::value_type layout::ofm() const { if (!format::is_weights_format(format)) { throw std::logic_error("[GPU] can't get OFM dimension for data layout"); } - auto dims = get_dims(); + const auto& dims = get_dims(); const size_t dim_idx = format::is_grouped(format) ? 1 : 0; return dims[dim_idx]; @@ -91,7 +91,7 @@ tensor::value_type layout::ifm() const { if (!format::is_weights_format(format)) { throw std::logic_error("[GPU] can't get IFM dimension for data layout"); } - auto dims = get_dims(); + const auto& dims = get_dims(); const size_t dim_idx = format::is_grouped(format) ? 2 : 1; return dims[dim_idx]; } @@ -99,10 +99,10 @@ tensor::value_type layout::ifm() const { std::vector layout::get_dims() const { if (is_dynamic()) throw std::runtime_error("[GPU] get_dims() is called for dynamic shape"); - auto shape = size.to_shape(); + std::vector res; - for (auto dim : shape) { - res.push_back(static_cast(dim)); + for (const auto& dim : size) { + res.push_back(static_cast(dim.get_length())); } if (res.size() < format.dimension()) @@ -116,7 +116,7 @@ std::vector layout::get_padded_dims() const { throw std::runtime_error("[GPU] get_padded_dims() is called for dynamic shape"); auto default_fmt = format::get_default_format(format.dimension(), format::is_weights_format(format), format::is_grouped(format)); - auto t = get_tensor(); + const auto& t = get_tensor(); auto padded_size = t.add(data_padding.lower_size()).add(data_padding.upper_size()); return padded_size.sizes(default_fmt); } @@ -168,7 +168,7 @@ std::vector layout::get_ordered_dims() const { if (is_dynamic()) throw std::runtime_error("[GPU] get_ordered_dims() is called for dynamic shape"); - auto t = get_tensor(); + const auto& t = get_tensor(); return t.sizes(format); } @@ -245,8 +245,8 @@ tensor layout::get_tensor() const { OPENVINO_ASSERT(!is_dynamic() || has_upper_bound(), "[GPU] get_tensor() is called for dynamic shape without upper bound"); ov::Shape shape; if (is_dynamic() && has_upper_bound()) { - for (auto dim : size) { - shape.push_back(dim.get_max_length()); + for (const auto& dim : size) { + shape.push_back(dim.get_max_length()); } } else { shape = size.to_shape(); @@ -295,16 +295,16 @@ void layout::set_partial_shape(const ov::PartialShape& size) { tensor layout::get_buffer_size() const { if (is_dynamic() && !has_upper_bound()) { - throw std::runtime_error("[GPU] get_buffer_size() is called for dynamic shape"); + throw std::runtime_error("[GPU] get_buffer_size() is called for dynamic shape"); } - auto t = get_tensor(); + const auto& t = get_tensor(); return t.add(data_padding.lower_size()).add(data_padding.upper_size()); } tensor layout::get_pitches() const { - auto sizes = get_buffer_size().sizes(format); + const auto& sizes = get_buffer_size().sizes(format); std::vector pitches(sizes.size(), tensor::value_type(1)); std::partial_sum(sizes.rbegin(), sizes.rend() - 1, pitches.rbegin() + 1, std::multiplies()); @@ -312,10 +312,10 @@ tensor layout::get_pitches() const { } size_t layout::get_linear_offset(tensor element) const { - auto l_padd = data_padding.lower_size(); - auto u_padd = data_padding.upper_size(); + const auto& l_padd = data_padding.lower_size(); + const auto& u_padd = data_padding.upper_size(); - auto t = get_tensor(); + const auto& t = get_tensor(); if ((element.batch[0] < 0 && -element.batch[0] > l_padd.batch[0]) || (element.feature[0] < 0 && -element.feature[0] > l_padd.feature[0]) || @@ -524,12 +524,12 @@ ov::PartialShape layout::transform(const ov::PartialShape& pshape, const cldnn:: int32_t default_size = -1; std::vector dims; dims.reserve(pshape.size()); - for (auto dim : pshape) { + for (const auto& dim : pshape) { dims.push_back(static_cast(dim.get_length())); } const cldnn::format default_fmt = cldnn::format::bfvuwzyx; - auto old_sizes = convert_dimensions(dims, old_fmt.order(), default_fmt.internal_order()); // convert to internal order (bfxyzwuv) + const auto& old_sizes = convert_dimensions(dims, old_fmt.order(), default_fmt.internal_order()); // convert to internal order (bfxyzwuv) const auto& val_order = default_fmt.internal_order(); const auto& new_order = new_fmt.internal_order(); diff --git a/src/plugins/intel_gpu/src/runtime/shape_predictor.cpp b/src/plugins/intel_gpu/src/runtime/shape_predictor.cpp index feb72d1879df7b..4ed02065da3289 100644 --- a/src/plugins/intel_gpu/src/runtime/shape_predictor.cpp +++ b/src/plugins/intel_gpu/src/runtime/shape_predictor.cpp @@ -63,7 +63,7 @@ std::pair ShapePredictor::predict_preallocation_shape(const std size_t next_iters_prealloc_count = custom_next_iters_prealloc_count > 0 ? static_cast(custom_next_iters_prealloc_count) : _next_iters_preallocation_count; - auto current_shape = layout.get_shape(); + const auto& current_shape = layout.get_shape(); auto dt_bitwidth = ov::element::Type(layout.data_type).bitwidth(); add_shape(id, current_shape); @@ -74,7 +74,7 @@ std::pair ShapePredictor::predict_preallocation_shape(const std return {false, {}}; // Check if there is enough data for prediction - auto& shapes = _shapes_info[id]; + const auto& shapes = _shapes_info[id]; const auto shapes_num = shapes.size(); // Number of shapes used for iterations mode predictions From d0b3a3c40e2728ee55e4af3d7f089ff1778398bd Mon Sep 17 00:00:00 2001 From: Egor Duplenskii Date: Thu, 4 Jul 2024 16:36:49 +0200 Subject: [PATCH 25/50] [CPU] Fix RNN node isSupported check (#24143) Incorrect check resulted into decompostion of different RNN operations --- src/plugins/intel_cpu/src/nodes/rnn.cpp | 30 ++++++++++++++++--------- 1 file changed, 20 insertions(+), 10 deletions(-) diff --git a/src/plugins/intel_cpu/src/nodes/rnn.cpp b/src/plugins/intel_cpu/src/nodes/rnn.cpp index 927805e1879ff3..4558b9c7749b00 100644 --- a/src/plugins/intel_cpu/src/nodes/rnn.cpp +++ b/src/plugins/intel_cpu/src/nodes/rnn.cpp @@ -271,16 +271,30 @@ bool RNN::isSupportedOperation(const std::shared_ptr& op, std::s return false; } if (one_of(rnnCellBase->get_type_info(), - ov::op::v0::LSTMCell::get_type_info_static(), - ov::op::v4::LSTMCell::get_type_info_static(), - ov::op::v5::LSTMSequence::get_type_info_static())) { + ov::op::v0::LSTMCell::get_type_info_static(), + ov::op::v4::LSTMCell::get_type_info_static(), + ov::op::v0::LSTMSequence::get_type_info_static(), + ov::op::v5::LSTMSequence::get_type_info_static())) { if (rnnCellBase->get_activations() != std::vector{"sigmoid", "tanh", "tanh"}) { errorMessage = "Not supported activation functions"; return false; } - } else if (!ov::is_type(op) && rnnCellBase->get_activations() != std::vector{"sigmoid", "tanh"}) { - errorMessage = "Not supported activation functions"; - return false; + } else if (one_of(rnnCellBase->get_type_info(), + ov::op::v3::GRUCell::get_type_info_static(), + ov::op::v5::GRUSequence::get_type_info_static(), + ov::op::internal::AUGRUCell::get_type_info_static(), + ov::op::internal::AUGRUSequence::get_type_info_static())) { + if (rnnCellBase->get_activations() != std::vector{"sigmoid", "tanh"}) { + errorMessage = "Not supported activation functions"; + return false; + } + } else if (one_of(rnnCellBase->get_type_info(), + ov::op::v5::RNNSequence::get_type_info_static(), + ov::op::v0::RNNCell::get_type_info_static())) { + if (rnnCellBase->get_activations().empty() || !one_of(rnnCellBase->get_activations().front(), "sigmoid", "tanh", "relu")) { + errorMessage = "Not supported activation functions"; + return false; + } } } @@ -290,10 +304,6 @@ bool RNN::isSupportedOperation(const std::shared_ptr& op, std::s direction = gru_seq->get_direction(); seqLenIdx = 2; } else if (auto lstm_seq = ov::as_type_ptr(op)) { - if (lstm_seq->get_activations() != std::vector{"sigmoid", "tanh", "tanh"}) { - errorMessage = "Not supported activation functions"; - return false; - } direction = lstm_seq->get_direction(); seqLenIdx = 3; } else if (auto lstm_seq = ov::as_type_ptr(op)) { From e4cfd8f2edde7c22de0fa732a3190977bbbf73ae Mon Sep 17 00:00:00 2001 From: Andrzej Kopytko Date: Thu, 4 Jul 2024 17:06:02 +0200 Subject: [PATCH 26/50] [DOCS] PORT to master with Breadcrumbs fix (#25380) Port for: https://github.com/openvinotoolkit/openvino/pull/25379 --- docs/sphinx_setup/_static/css/custom.css | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/docs/sphinx_setup/_static/css/custom.css b/docs/sphinx_setup/_static/css/custom.css index 1fb4cbaf0fd34a..18586f01964db9 100644 --- a/docs/sphinx_setup/_static/css/custom.css +++ b/docs/sphinx_setup/_static/css/custom.css @@ -147,6 +147,16 @@ nav.bd-links .current>a { padding-right: 0; padding: 0 0.8rem; } +ul.bd-breadcrumbs li.breadcrumb-item:not(.breadcrumb-home):before { + padding: .2rem .5rem 0 !important; +} +li.breadcrumb-item { + align-items: center !important; +} + +.bd-sidebar-primary { + display:block !important; +} a.nav-link:hover { text-decoration:none !important; @@ -457,6 +467,9 @@ div.highlight { .container-xl { max-width: 100%; } + .bd-main .bd-content .bd-article-container .bd-article { + padding-left: 1rem !important; + } } From 505a1ae1ef41e93ef9bfa73cd0a04086f40dccc0 Mon Sep 17 00:00:00 2001 From: Maxim Vafin Date: Thu, 4 Jul 2024 18:05:35 +0200 Subject: [PATCH 27/50] [PT FE] Support aten::col2im (#25376) ### Details: - *Support `aten::col2im`* ### Tickets: - *CVS-101054* --- src/frontends/pytorch/src/op/col2im.cpp | 53 +++++++++++++++++ src/frontends/pytorch/src/op_table.cpp | 2 + .../layer_tests/pytorch_tests/test_col2im.py | 57 +++++++++++++++++++ .../layer_tests/pytorch_tests/test_im2col.py | 2 +- tests/model_hub_tests/pytorch/timm_models | 10 ++-- 5 files changed, 118 insertions(+), 6 deletions(-) create mode 100644 src/frontends/pytorch/src/op/col2im.cpp create mode 100644 tests/layer_tests/pytorch_tests/test_col2im.py diff --git a/src/frontends/pytorch/src/op/col2im.cpp b/src/frontends/pytorch/src/op/col2im.cpp new file mode 100644 index 00000000000000..b07f69b989367e --- /dev/null +++ b/src/frontends/pytorch/src/op/col2im.cpp @@ -0,0 +1,53 @@ +// Copyright (C) 2018-2024 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/op/col2im.hpp" + +#include "openvino/frontend/pytorch/node_context.hpp" +#include "openvino/op/concat.hpp" +#include "openvino/op/unsqueeze.hpp" +#include "utils.hpp" + +namespace ov { +namespace frontend { +namespace pytorch { +namespace op { + +using namespace ov::op; + +OutputVector translate_col2im(const NodeContext& context) { + num_inputs_check(context, 3, 7); + auto x = context.get_input(0); + auto kernel_size = context.get_input(2); + auto dilations = context.const_input(3); + auto padding = context.const_input(4); + auto strides = context.const_input(5); + + Output output_size; + auto shape_type = context.get_input_type(1); + if (shape_type.is()) { + const auto list_elems = get_list_as_outputs(context.get_input(1)); + if (list_elems.size() == 1) { + output_size = list_elems[0]; + } else { + OutputVector to_concat; + auto zero = v0::Constant::create(element::i32, Shape{}, {0}); + for (auto elem : list_elems) { + to_concat.push_back(context.mark_node(std::make_shared(elem, zero))); + } + output_size = context.mark_node(std::make_shared(to_concat, 0)); + } + } else { + output_size = context.get_input(1); + } + + auto col2im = context.mark_node( + std::make_shared(x, output_size, kernel_size, strides, dilations, padding, padding)); + return {col2im}; +}; + +} // namespace op +} // namespace pytorch +} // namespace frontend +} // namespace ov \ No newline at end of file diff --git a/src/frontends/pytorch/src/op_table.cpp b/src/frontends/pytorch/src/op_table.cpp index 5e00395614f31d..08bf9075b04db2 100644 --- a/src/frontends/pytorch/src/op_table.cpp +++ b/src/frontends/pytorch/src/op_table.cpp @@ -55,6 +55,7 @@ OP_CONVERTER(translate_cdist); OP_CONVERTER(translate_celu); OP_CONVERTER(translate_channel_shuffle); OP_CONVERTER(translate_clamp); +OP_CONVERTER(translate_col2im); OP_CONVERTER(translate_constant); OP_CONVERTER(translate_conv_transposend); OP_CONVERTER(translate_conv1d_ext); @@ -408,6 +409,7 @@ const std::map get_supported_ops_ts() { {"aten::clip", op::translate_clamp}, {"aten::clip_", op::inplace_op}, {"aten::clone", op::skip_node}, // ignore clone operators that are inserted by PyTorch autograd + {"aten::col2im", op::translate_col2im}, // aten::complex - Supported in limited set of patterns {"aten::concat", op::translate_cat}, {"aten::contiguous", op::skip_node}, // In openvino how tensors are stored in memory is internal plugin detail, diff --git a/tests/layer_tests/pytorch_tests/test_col2im.py b/tests/layer_tests/pytorch_tests/test_col2im.py new file mode 100644 index 00000000000000..8cb7ea96cb8391 --- /dev/null +++ b/tests/layer_tests/pytorch_tests/test_col2im.py @@ -0,0 +1,57 @@ +# Copyright (C) 2018-2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import pytest +import math + +from pytorch_layer_test_class import PytorchLayerTest + + +class TestCol2Im(PytorchLayerTest): + def _prepare_input(self, input_shape): + import numpy as np + return (np.random.randn(*input_shape).astype(np.float32),) + + def create_model(self, output_size, kernel_size, dilation, padding, stride): + import torch + + class aten_col2im(torch.nn.Module): + def __init__(self, output_size, kernel_size, dilation, padding, stride): + super(aten_col2im, self).__init__() + self.output_size = output_size + self.kernel_size = kernel_size + self.dilation = dilation + self.padding = padding + self.stride = stride + + def forward(self, x): + return torch.nn.functional.fold( + x, + output_size=self.output_size, + kernel_size=self.kernel_size, + dilation=self.dilation, + padding=self.padding, + stride=self.stride + ) + + ref_net = None + + return aten_col2im(output_size, kernel_size, dilation, padding, stride), ref_net, "aten::col2im" + + @pytest.mark.nightly + @pytest.mark.precommit + @pytest.mark.parametrize("output_size,kernel_size", [([4, 5], [2, 2])]) + @pytest.mark.parametrize("dilation", [1, 2, [1, 2]]) + @pytest.mark.parametrize("padding", [0, 5, [2, 3]]) + @pytest.mark.parametrize("stride", [1, 2, [2, 1]]) + def test_col2im(self, output_size, kernel_size, dilation, padding, stride, ie_device, precision, ir_version): + d = dilation if isinstance(dilation, list) else [dilation, dilation] + s = stride if isinstance(stride, list) else [stride, stride] + p = padding if isinstance(padding, list) else [padding, padding] + L = 1 + for i in range(2): + L *= math.floor((output_size[i] + 2 * p[i] - d[i] + * (kernel_size[i] - 1) - 1) / float(s[i]) + 1) + self._test(*self.create_model(output_size, kernel_size, + dilation, padding, stride), ie_device, precision, ir_version, + kwargs_to_prepare_input={"input_shape": [10, 3 * kernel_size[0] * kernel_size[1], int(L)]}) diff --git a/tests/layer_tests/pytorch_tests/test_im2col.py b/tests/layer_tests/pytorch_tests/test_im2col.py index 9b1be4edef2847..74acb38d41c954 100644 --- a/tests/layer_tests/pytorch_tests/test_im2col.py +++ b/tests/layer_tests/pytorch_tests/test_im2col.py @@ -41,5 +41,5 @@ def forward(self, x): @pytest.mark.parametrize("dilation", [1, 2, 3, (1, 2)]) @pytest.mark.parametrize("padding", [0, 5, 1, [2, 3]]) @pytest.mark.parametrize("stride", [3, 1, [2, 1]]) - def test_exp(self, kernel_size, dilation, padding, stride, ie_device, precision, ir_version): + def test_im2col(self, kernel_size, dilation, padding, stride, ie_device, precision, ir_version): self._test(*self.create_model(kernel_size, dilation, padding, stride), ie_device, precision, ir_version) diff --git a/tests/model_hub_tests/pytorch/timm_models b/tests/model_hub_tests/pytorch/timm_models index 6efc426f5e62fc..9087edc24ffe2c 100644 --- a/tests/model_hub_tests/pytorch/timm_models +++ b/tests/model_hub_tests/pytorch/timm_models @@ -495,11 +495,11 @@ vit_small_r26_s32_224.augreg_in21k,None vit_so400m_patch14_siglip_224.webli,None vit_srelpos_small_patch16_224.sw_in1k,None vit_tiny_r_s16_p8_224.augreg_in21k,None -volo_d1_224.sail_in1k,None,xfail,Unsupported aten::col2im -volo_d2_224.sail_in1k,None,xfail,Unsupported aten::col2im -volo_d3_224.sail_in1k,None,xfail,Unsupported aten::col2im -volo_d4_224.sail_in1k,None,xfail,Unsupported aten::col2im -volo_d5_224.sail_in1k,None,xfail,Unsupported aten::col2im +volo_d1_224.sail_in1k,None +volo_d2_224.sail_in1k,None +volo_d3_224.sail_in1k,None +volo_d4_224.sail_in1k,None +volo_d5_224.sail_in1k,None wide_resnet101_2.tv2_in1k,None wide_resnet50_2.racm_in1k,None xception41.tf_in1k,None From 8b010735893e2cc74706cea22fe4809a79ad0753 Mon Sep 17 00:00:00 2001 From: Mikhail Ryzhov Date: Thu, 4 Jul 2024 20:05:36 +0200 Subject: [PATCH 28/50] [GHA] Fixed tokenizers build (#25383) ### Details: - Added path to tokenizers build to find ov wheel (https://github.com/openvinotoolkit/openvino_tokenizers/pull/183) ### Tickets: - *ticket-id* --- .github/workflows/job_tokenizers.yml | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/.github/workflows/job_tokenizers.yml b/.github/workflows/job_tokenizers.yml index da9e096e7abd8b..9cf1acc05e7220 100644 --- a/.github/workflows/job_tokenizers.yml +++ b/.github/workflows/job_tokenizers.yml @@ -120,16 +120,14 @@ jobs: if: runner.os != 'Windows' run: | # use OpenVINO wheel package only to build the extension - export OpenVINO_DIR=$(python3 -c "from openvino.utils import get_cmake_path; print(get_cmake_path(), end='')") - python -m pip wheel -v --no-deps --wheel-dir ${EXTENSION_BUILD_DIR} ${OPENVINO_TOKENIZERS_REPO} + python -m pip wheel -v --no-deps --wheel-dir ${EXTENSION_BUILD_DIR} --find-links ${INSTALL_DIR}/tools ${OPENVINO_TOKENIZERS_REPO} env: CMAKE_BUILD_PARALLEL_LEVEL: '4' - name: Build tokenizers wheel (Windows) if: runner.os == 'Windows' run: | - $env:OpenVINO_DIR=$(python3 -c "from openvino.utils import get_cmake_path; print(get_cmake_path(), end='')") - python3 -m pip wheel -v --no-deps --wheel-dir ${env:EXTENSION_BUILD_DIR} ${env:OPENVINO_TOKENIZERS_REPO} + python3 -m pip wheel -v --no-deps --wheel-dir ${env:EXTENSION_BUILD_DIR} --find-links ${env:INSTALL_DIR}/tools ${env:OPENVINO_TOKENIZERS_REPO} env: CMAKE_BUILD_PARALLEL_LEVEL: '4' From c901a266115a5ddbe08605fd95201184c245d544 Mon Sep 17 00:00:00 2001 From: Wilson Seok Date: Thu, 4 Jul 2024 10:11:47 -0700 Subject: [PATCH 29/50] [GPU] Add TensorOffset support key in adaptive pooling ref kernel (#25381) ### Details: - Add TensorOffset support key in adaptive pooling ref kernel as the kernel is using INPUT0_GET_INDEX() macro ### Tickets: - 144826 --- .../kernels/adaptive_pooling/adaptive_pooling_kernel_ref.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/src/plugins/intel_gpu/src/kernel_selector/kernels/adaptive_pooling/adaptive_pooling_kernel_ref.cpp b/src/plugins/intel_gpu/src/kernel_selector/kernels/adaptive_pooling/adaptive_pooling_kernel_ref.cpp index c8b06e9469bbee..ec8f76e430a4a8 100644 --- a/src/plugins/intel_gpu/src/kernel_selector/kernels/adaptive_pooling/adaptive_pooling_kernel_ref.cpp +++ b/src/plugins/intel_gpu/src/kernel_selector/kernels/adaptive_pooling/adaptive_pooling_kernel_ref.cpp @@ -24,6 +24,7 @@ ParamsKey AdaptivePoolingRef::GetSupportedKey() const { k.EnableAllOutputLayout(); k.EnableBatching(); k.EnableTensorPitches(); + k.EnableTensorOffset(); return k; } From 2a9af43b7455366977c42146c2d6d9bb2a641277 Mon Sep 17 00:00:00 2001 From: Sergey Lyalin Date: Thu, 4 Jul 2024 21:37:56 +0400 Subject: [PATCH 30/50] Stateful to Stateless Transformation for LLMs (#25150) Transformation that undoing make_stateful from optimum-intel. ### How to use in Python ```python import openvino as ov from openvino._offline_transformations import stateful_to_stateless_transformation core = ov.Core() model = core.read_model('your_chatty_stateful_model_right_from_vanilla_optimum_intel.xml') stateful_to_stateless_transformation(model) # use `model` ``` ### How to use in C++ ```c++ #include #include int main() { auto core = ov::Core(); auto model = core.read_model("your_chatty_stateful_model_right_from_vanilla_optimum_intel.xml"); ov::pass::StatefulToStateless().run_on_model(model); // use `model` } ``` ### TODO - [x] Restore the original order of inputs/output (now they are not globally ordered, but kv inputs corresponds to kv outputs by indices with a proper offset). - [x] Restore the original names of inputs and outputs based on optimum-intel conventions in make_stateful. --- .../workflows/job_pytorch_models_tests.yml | 10 + .../_offline_transformations/__init__.py | 1 + .../core/offline_transformations.cpp | 10 + .../openvino/pass/stateful_to_stateless.hpp | 22 +++ src/core/src/pass/stateful_to_stateless.cpp | 172 ++++++++++++++++++ .../models/tiny-set-stateful-models-precommit | 6 + ...st_stateful_to_stateless_transformation.py | 58 ++++++ 7 files changed, 279 insertions(+) create mode 100644 src/core/include/openvino/pass/stateful_to_stateless.hpp create mode 100644 src/core/src/pass/stateful_to_stateless.cpp create mode 100644 tests/model_hub_tests/pytorch/models/tiny-set-stateful-models-precommit create mode 100644 tests/model_hub_tests/pytorch/test_stateful_to_stateless_transformation.py diff --git a/.github/workflows/job_pytorch_models_tests.yml b/.github/workflows/job_pytorch_models_tests.yml index c89bb1b71d12ea..c39ec81467eb75 100644 --- a/.github/workflows/job_pytorch_models_tests.yml +++ b/.github/workflows/job_pytorch_models_tests.yml @@ -153,6 +153,16 @@ jobs: USE_SYSTEM_CACHE: False OP_REPORT_FILE: ${{ env.INSTALL_TEST_DIR }}/TEST-torch_unsupported_ops.log + - name: StatefulToStateless Test + if: always() + run: | + export PYTHONPATH=${MODEL_HUB_TESTS_INSTALL_DIR}:$PYTHONPATH + python3 -m pytest ${MODEL_HUB_TESTS_INSTALL_DIR}/pytorch/test_stateful_to_stateless_transformation.py -m ${TYPE} --html=${INSTALL_TEST_DIR}/TEST-torch_stateful_to_stateless_tests.html --self-contained-html -v --tb=short + env: + TYPE: ${{ inputs.event == 'schedule' && 'nightly' || 'precommit'}} + TEST_DEVICE: CPU + USE_SYSTEM_CACHE: False + - name: Reformat unsupported ops file if: '!cancelled()' run: | diff --git a/src/bindings/python/src/openvino/_offline_transformations/__init__.py b/src/bindings/python/src/openvino/_offline_transformations/__init__.py index 81c288657afd0d..f198aea26c8ec2 100644 --- a/src/bindings/python/src/openvino/_offline_transformations/__init__.py +++ b/src/bindings/python/src/openvino/_offline_transformations/__init__.py @@ -18,3 +18,4 @@ from openvino._pyopenvino._offline_transformations import compress_quantize_weights_transformation from openvino._pyopenvino._offline_transformations import convert_sequence_to_tensor_iterator_transformation from openvino._pyopenvino._offline_transformations import paged_attention_transformation +from openvino._pyopenvino._offline_transformations import stateful_to_stateless_transformation diff --git a/src/bindings/python/src/pyopenvino/core/offline_transformations.cpp b/src/bindings/python/src/pyopenvino/core/offline_transformations.cpp index fa18ba0c84d4dd..862db2a1786f7b 100644 --- a/src/bindings/python/src/pyopenvino/core/offline_transformations.cpp +++ b/src/bindings/python/src/pyopenvino/core/offline_transformations.cpp @@ -10,6 +10,7 @@ #include #include #include +#include #include #include #include @@ -137,4 +138,13 @@ void regmodule_offline_transformations(py::module m) { manager.run_passes(model); }, py::arg("model")); + + m_offline_transformations.def( + "stateful_to_stateless_transformation", + [](std::shared_ptr model) { + ov::pass::Manager manager; + manager.register_pass(); + manager.run_passes(model); + }, + py::arg("model")); } diff --git a/src/core/include/openvino/pass/stateful_to_stateless.hpp b/src/core/include/openvino/pass/stateful_to_stateless.hpp new file mode 100644 index 00000000000000..90fd6b9e6e7901 --- /dev/null +++ b/src/core/include/openvino/pass/stateful_to_stateless.hpp @@ -0,0 +1,22 @@ +// Copyright (C) 2018-2024 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/pass/pass.hpp" + +namespace ov { +namespace pass { +/** + * @brief The transformation converts KV cache state back to stateless form. + * \ingroup ov_pass_cpp_api + */ +class OPENVINO_API StatefulToStateless : public ModelPass { +public: + OPENVINO_RTTI("StatefulToStateless"); + + bool run_on_model(const std::shared_ptr& model) override; +}; +} // namespace pass +} // namespace ov diff --git a/src/core/src/pass/stateful_to_stateless.cpp b/src/core/src/pass/stateful_to_stateless.cpp new file mode 100644 index 00000000000000..31dea9ef113bc4 --- /dev/null +++ b/src/core/src/pass/stateful_to_stateless.cpp @@ -0,0 +1,172 @@ +// Copyright (C) 2018-2024 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/pass/stateful_to_stateless.hpp" + +#include +#include + +#include "openvino/cc/pass/itt.hpp" +#include "openvino/op/assign.hpp" +#include "openvino/op/gather.hpp" +#include "openvino/op/read_value.hpp" +#include "openvino/pass/manager.hpp" +#include "transformations/utils/utils.hpp" + +using namespace ov::op; + +namespace { + +std::shared_ptr set_name(std::shared_ptr node, const std::string& name) { + // Set name for both node and output tensor (should be only one tensor, and any other names will be overriden by a + // given single name) + node->set_friendly_name(name); + OPENVINO_ASSERT(node->get_output_size() == 1); + node->get_output_tensor(0).set_names({name}); + return node; +} + +// Templated method that has the same effect as not templated `set_name` but saves Op type for convenient calls chaining +template +inline std::shared_ptr set_name(std::shared_ptr node, const std::string& name) { + set_name(std::dynamic_pointer_cast(node), name); + return node; +} + +std::shared_ptr get_parameter_by_tensor_name(const std::shared_ptr& model, + const std::string& name) { + for (const auto& param : model->get_parameters()) { + if (param->get_output_tensor(0).get_names().count(name)) + return param; + } + return nullptr; // nullptr and return type are only difference from ov::Model::input(name) +} + +struct Variable { + struct Context { + // to hold compiled once regex for all Variable instances + const std::regex naming_convention = + std::regex(R"((past_key_values\.(\d+)\.(key|value))(present\.(\d+)\.(key|value)))"); + }; + + Variable(const Context& context, const std::string& variable_name) : variable_name(variable_name) { + // Try to decode original naming of the corresponding input and output in the stateless model + std::smatch match; + if (std::regex_match(variable_name, match, context.naming_convention)) { + input_name = match[1].str(); + output_name = match[4].str(); + auto input_index = match[2].str(); + auto output_index = match[5].str(); + if (input_index == output_index && input_index.length() <= std::numeric_limits::digits10) { + index = std::stoi(input_index) * 2 + int(match[3].str() == "value"); // order key before value + } else { + index = -1; + } + } else { + // Variable name doesn't follow the expected naming convention. It doens't prevent forming + // a correct stateless model but doesn't give a way to restore all names and inputs/outputs ordering + // accurately. + input_name = "input_restored." + variable_name; + output_name = "output_restored." + variable_name; + index = -1; + } + } + + int index; // layer index, -1 means the index isn't known + std::string variable_name; // original variable_id + std::string input_name; // restored name of input + std::string output_name; // restored name of output +}; + +typedef std::vector Variables; + +void restore_kv_cache_order(Variables& variables, const std::unordered_map& var_index_by_var_id) { + // Try to restore variable order based on the known naming convention from optimum-intel. + // If names are not satisfy the expected convention, fallback to use order based on var_index_by_var_id + // Sort items that do satisfy the naming conventions before items that don't satisfy. + + std::stable_sort(variables.begin(), variables.end(), [&](const Variable& a, const Variable& b) { + if (a.index >= 0 && b.index >= 0) { + return a.index < b.index; + } else if (a.index >= 0 && b.index < 0) { + return true; + } else if (a.index < 0 && b.index >= 0) { + return false; + } else { // a.index < 0 && b.index < 0 + return var_index_by_var_id.at(a.variable_name) < var_index_by_var_id.at(b.variable_name); + } + }); +} + +} // namespace + +bool ov::pass::StatefulToStateless::run_on_model(const std::shared_ptr& model) { + RUN_ON_MODEL_SCOPE(StatefulToStateless); + + auto beam_idx = get_parameter_by_tensor_name(model, "beam_idx"); + Variables variables; // to collect variables corresponding to future_params + variables.reserve(model->get_sinks().size()); + Variable::Context context; + std::unordered_map> + future_params; // to collect nodes, each with a single output that will be replaced by new parameters + if (beam_idx) { + for (const ov::Input& input : beam_idx->get_output_target_inputs(0)) { + if (auto gather = std::dynamic_pointer_cast(input.get_node()->shared_from_this())) { + auto read_value = + std::dynamic_pointer_cast(gather->get_input_node_shared_ptr(0)); + OPENVINO_ASSERT(read_value, + "Unexpected model topology in StatefulToStateless: no ReadValue is found at the first " + "input of Gather by `beam_idx` parameter"); + auto variable_name = read_value->get_variable_id(); + variables.push_back(Variable(context, variable_name)); + future_params[variable_name] = gather; + } + } + } else { + OPENVINO_THROW( + "Stateful models without `beam_idx` input are not supported in StatefulToStateless transformation"); + } + model->remove_parameter(beam_idx); + + typedef std::shared_ptr PAssign; + std::unordered_map assigns_by_var_id; + std::unordered_map assign_index_by_var_id; + const auto& sinks = model->get_sinks(); + for (size_t i = 0; i < sinks.size(); ++i) { + if (auto assign = std::dynamic_pointer_cast(sinks[i])) { + const auto& var_id = assign->get_variable_id(); + assigns_by_var_id[var_id] = assign; + assign_index_by_var_id[var_id] = i; + } + } + + restore_kv_cache_order(variables, assign_index_by_var_id); + + ov::ParameterVector new_parameters; + ov::ResultVector new_results; + new_parameters.reserve(variables.size()); + new_results.reserve(variables.size()); + + for (const auto& variable_id : variables) { + auto future_param = future_params[variable_id.variable_name]; + auto parameter = ::set_name(std::make_shared(future_param->get_output_element_type(0), + future_param->get_output_partial_shape(0)), + variable_id.input_name); + + replace_node(future_param, parameter); + + auto assign = assigns_by_var_id[variable_id.variable_name]; + auto result = ::set_name(std::make_shared(assign->input_value(0)), variable_id.output_name); + + model->remove_sink(assign); // Don't do replace_node(assign, result)! It will lead to silently incorrect model. + model->remove_variable(model->get_variable_by_id(variable_id.variable_name)); + new_parameters.push_back(parameter); + new_results.push_back(result); + } + + model->add_parameters(new_parameters); + model->add_results(new_results); + + return true; +} diff --git a/tests/model_hub_tests/pytorch/models/tiny-set-stateful-models-precommit b/tests/model_hub_tests/pytorch/models/tiny-set-stateful-models-precommit new file mode 100644 index 00000000000000..f7e756c10926d0 --- /dev/null +++ b/tests/model_hub_tests/pytorch/models/tiny-set-stateful-models-precommit @@ -0,0 +1,6 @@ +hf-internal-testing/tiny-random-LlamaForCausalLM,https://huggingface.co/trl-internal-testing/tiny-random-LlamaForCausalLM +hf-internal-testing/tiny-random-StableLmForCausalLM,https://huggingface.co/hf-internal-testing/tiny-random-StableLmForCausalLM +hf-internal-testing/tiny-random-PhiForCausalLM,https://huggingface.co/hf-internal-testing/tiny-random-PhiForCausalLM +hf-internal-testing/tiny-random-CodeGenForCausalLM,https://huggingface.co/hf-internal-testing/tiny-random-CodeGenForCausalLM +hf-internal-testing/tiny-random-Starcoder2ForCausalLM,https://huggingface.co/hf-internal-testing/tiny-random-Starcoder2ForCausalLM +hf-internal-testing/tiny-random-OPTForCausalLM,https://huggingface.co/hf-internal-testing/tiny-random-OPTForCausalLM \ No newline at end of file diff --git a/tests/model_hub_tests/pytorch/test_stateful_to_stateless_transformation.py b/tests/model_hub_tests/pytorch/test_stateful_to_stateless_transformation.py new file mode 100644 index 00000000000000..8de04e8241a756 --- /dev/null +++ b/tests/model_hub_tests/pytorch/test_stateful_to_stateless_transformation.py @@ -0,0 +1,58 @@ +# Copyright (C) 2018-2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import openvino as ov +from openvino._offline_transformations import stateful_to_stateless_transformation +from optimum.intel import OVModelForCausalLM +import models_hub_common.utils as utils +import pytest +import os + +def get_read_value_ops(model: ov.Model): + return [op for op in model.get_ops() if op.get_type_name() == 'ReadValue'] + +def check_desc_tensors(tensors1, tensors2): + # order of tensors may not match, comparing by the total amount and names + assert len(tensors1) == len(tensors2) + assert set(tuple(t.names) for t in tensors1) == set(tuple(t.names) for t in tensors2) + for t1 in tensors1: + t2_candidates = [t for t in tensors2 if t1.names & t.names] + assert len(t2_candidates) == 1 + t2 = t2_candidates[0] + assert t1.names == t2.names + assert t1.get_partial_shape() == t2.get_partial_shape() + assert t1.get_element_type() == t2.get_element_type() + +def run_stateful_to_stateless_in_runtime(tmp_path, model_id, model_link): + model = OVModelForCausalLM.from_pretrained(model_id, export=True, stateful=True, compile=False) + assert len(model.model.get_sinks()), f"Input model is not in the expected stateful form because it doesn't have any sinks." + assert len(get_read_value_ops(model.model)), f"Input model is not in the expected stateful form because it doesn't have any ReadValue operations." + + stateful_to_stateless_transformation(model.model) + + sink_ops = model.model.get_sinks() + read_value_ops = get_read_value_ops(model.model) + assert len(sink_ops) == 0, f"Expected stateless model, but there are sinks found: {sink_ops}" + assert len(read_value_ops) == 0, f"Expected stateless model, but there are ReadValue operations found: {read_value_ops}" + + stateless_model = OVModelForCausalLM.from_pretrained(model_id, export=True, stateful=False, compile=False) + + print(model.model) + print(stateless_model.model) + check_desc_tensors(model.model.inputs, stateless_model.model.inputs) + check_desc_tensors(model.model.outputs, stateless_model.model.outputs) + + core = ov.Core() + core.compile_model(model.model, 'CPU') + + +@pytest.mark.precommit +@pytest.mark.parametrize("model_name, model_link, mark, reason", utils.get_models_list(os.path.join(os.path.dirname(__file__), "models", "tiny-set-stateful-models-precommit"))) +def test_stateful_to_stateless_precommit(tmp_path, model_name, model_link, mark, reason, ie_device): + assert mark is None or mark == 'skip' or mark == 'xfail', \ + "Incorrect test case: {}, {}".format(model_name, model_link) + if mark == 'skip': + pytest.skip(reason) + elif mark == 'xfail': + pytest.xfail(reason) + run_stateful_to_stateless_in_runtime(tmp_path, model_name, model_link) \ No newline at end of file From a5c0d6712da432c4f5a2f83b265a29901faf7a38 Mon Sep 17 00:00:00 2001 From: Wang Wangwang Date: Fri, 5 Jul 2024 10:35:38 +0800 Subject: [PATCH 31/50] [HETERO] Support splitting new graph pattern for pipeline parallel and correct the number of submodels (#25224) ### Details: - Fix qwen1.5-14b-chat with HETERO pipeline parallelism Add supported to patten: ``` ReadValue->Gather->Concat |------>ShapeOf(fused on other different affinity node) ->.... ``` - Correct the value of HETERO_NUMBER_OF_SUBMODELS by subtracting the number of independent submodels to reduce confusion ### Tickets: - *ticket-id* --- src/inference/src/dev/iplugin.cpp | 31 +++++++++++++-- src/inference/tests/unit/query_model_test.cpp | 38 +++++++++++++++++++ src/plugins/hetero/src/compiled_model.cpp | 3 +- src/plugins/hetero/src/plugin.cpp | 1 + src/plugins/hetero/src/plugin.hpp | 2 + 5 files changed, 71 insertions(+), 4 deletions(-) diff --git a/src/inference/src/dev/iplugin.cpp b/src/inference/src/dev/iplugin.cpp index 6099457e959ffc..864b134fe58383 100644 --- a/src/inference/src/dev/iplugin.cpp +++ b/src/inference/src/dev/iplugin.cpp @@ -391,7 +391,7 @@ std::unordered_set ov::get_supported_nodes( } } // For example, A op need to be removed from supported: - // A (fused on B, to be marked as unsupported) + // A (removed nodes, to be marked as unsupported) // | // B (unsupported) // @@ -400,8 +400,33 @@ std::unordered_set ov::get_supported_nodes( update_supported = false; for (auto& op : model->get_ordered_ops()) { const auto& name = op->get_friendly_name(); - if (fused_model_op_map.find(name) != fused_model_op_map.end() && supported.count(name)) { - if (!supported.count(fused_model_op_map[name]) && + if (removed_nodes.count(name) && supported.count(name)) { + if (has_all_consumers_unsupported(supported, op)) { + supported.erase(name); + removed_nodes.erase(name); + update_supported = true; + } + } + } + } + // For example, A op need to be removed from supported: + // A (fused on B, to be marked as unsupported) + // | + // B (unsupported) + // + // A ShapeOf (to be marked as unsupported) + // | + // B (unsupported) + // + update_supported = true; + while (update_supported) { + update_supported = false; + for (auto& op : model->get_ordered_ops()) { + const auto& name = op->get_friendly_name(); + bool is_shapeof = ov::is_type(op); + if (((fused_model_op_map.find(name) != fused_model_op_map.end()) || is_shapeof) && + supported.count(name)) { + if ((!supported.count(fused_model_op_map[name]) || is_shapeof) && has_all_consumers_unsupported(supported, op)) { supported.erase(name); update_supported = true; diff --git a/src/inference/tests/unit/query_model_test.cpp b/src/inference/tests/unit/query_model_test.cpp index 30dde3f94c75ef..189a3df651854f 100644 --- a/src/inference/tests/unit/query_model_test.cpp +++ b/src/inference/tests/unit/query_model_test.cpp @@ -743,3 +743,41 @@ const std::vector testConfigs2 = { INSTANTIATE_TEST_SUITE_P(GetSupportedNodesTest, GetSupportedNodesCommonTest, ::testing::ValuesIn(testConfigs)); INSTANTIATE_TEST_SUITE_P(GetSupportedNodesTest, GetSupportedNodesOneConstOp, ::testing::ValuesIn(testConfigs1)); INSTANTIATE_TEST_SUITE_P(GetSupportedNodesTest, GetSupportedNodesStopSplit, ::testing::ValuesIn(testConfigs2)); + +TEST_F(GetSupportedNodesTest, FilterShapeOf) { + { + auto param = std::make_shared(ov::element::f32, ov::PartialShape{1, 1}); + param->set_friendly_name("input"); + auto weights = ov::op::v0::Constant::create(ov::element::Type_t::f32, {1, 1}, {1}); + weights->set_friendly_name("weights"); + auto shapeOf = std::make_shared(weights); + shapeOf->set_friendly_name("shapeof"); + auto const1 = ov::op::v0::Constant::create(ov::element::Type_t::i32, {1}, {1}); + const1->set_friendly_name("const1"); + auto const2 = ov::op::v0::Constant::create(ov::element::Type_t::i64, {}, {0}); + const2->set_friendly_name("const2"); + auto gather = std::make_shared(shapeOf, const1, const2); + gather->set_friendly_name("gather"); + auto const3 = ov::op::v0::Constant::create(ov::element::Type_t::i64, {1}, {1}); + const3->set_friendly_name("const3"); + auto concat = std::make_shared(ov::NodeVector{const3, gather}, 0); + concat->set_friendly_name("concat"); + auto reshape = std::make_shared(param, concat, false); + reshape->set_friendly_name("reshape"); + auto result = std::make_shared(reshape); + result->set_friendly_name("result"); + + m_function = std::make_shared(ov::ResultVector{result}, ov::ParameterVector{param}); + } + Run( + [&](std::shared_ptr& model) { + ov::pass::Manager m; + m.register_pass(); + m.run_passes(model); + }, + [&](const std::shared_ptr& op) { + return true; + }, + {"weights"}, + 0.5f); +} \ No newline at end of file diff --git a/src/plugins/hetero/src/compiled_model.cpp b/src/plugins/hetero/src/compiled_model.cpp index a62ebff86112a9..64ef5769379a24 100644 --- a/src/plugins/hetero/src/compiled_model.cpp +++ b/src/plugins/hetero/src/compiled_model.cpp @@ -321,7 +321,8 @@ ov::Any ov::hetero::CompiledModel::get_property(const std::string& name) const { } return decltype(ov::execution_devices)::value_type{std::move(device_names)}; } else if (ov::hetero::number_of_submodels == name) { - return decltype(ov::hetero::number_of_submodels)::value_type{m_compiled_submodels.size()}; + return decltype(ov::hetero::number_of_submodels)::value_type{ + (m_compiled_submodels.size() - get_hetero_plugin()->independent_submodel_size)}; } return m_cfg.get(name); } diff --git a/src/plugins/hetero/src/plugin.cpp b/src/plugins/hetero/src/plugin.cpp index 0e88e3c68b3108..298be6fa201f4f 100644 --- a/src/plugins/hetero/src/plugin.cpp +++ b/src/plugins/hetero/src/plugin.cpp @@ -193,6 +193,7 @@ std::pair ov::hetero::Plu auto result = std::make_shared(param); ov::copy_runtime_info(param->shared_from_this(), result); new_outputs.push_back(result); + independent_submodel_size++; } } model->add_results(new_outputs); diff --git a/src/plugins/hetero/src/plugin.hpp b/src/plugins/hetero/src/plugin.hpp index d3038642d56c76..f4497115b3cf04 100644 --- a/src/plugins/hetero/src/plugin.hpp +++ b/src/plugins/hetero/src/plugin.hpp @@ -69,6 +69,8 @@ class Plugin : public ov::IPlugin { bool allow_exception = false) const; Configuration m_cfg; + + mutable size_t independent_submodel_size = 0; }; } // namespace hetero From 812dbd71c367f5d5a35da52ba13e144106c76a69 Mon Sep 17 00:00:00 2001 From: "Wang, Yang" Date: Fri, 5 Jul 2024 11:46:04 +0800 Subject: [PATCH 32/50] [AUTO] Disable CPU model cache for fallback (#24726) ### Details: - Pass empty cache_dir to CPU plugin if cache_dir is set by user app when enable_startup_fallback or enable_runtime_fallback is true ### Tickets: - 141026 --------- Co-authored-by: Chen Peter --- src/plugins/auto/src/auto_schedule.cpp | 92 ++++++++++--------- .../unit/startup_fallback_property_test.cpp | 55 ++++++++++- 2 files changed, 103 insertions(+), 44 deletions(-) diff --git a/src/plugins/auto/src/auto_schedule.cpp b/src/plugins/auto/src/auto_schedule.cpp index 613287af14534b..3acaed20b1f6cc 100644 --- a/src/plugins/auto/src/auto_schedule.cpp +++ b/src/plugins/auto/src/auto_schedule.cpp @@ -98,7 +98,7 @@ void AutoSchedule::init() { m_compile_context[ACTUALDEVICE].m_model_precision, m_context->m_model_priority); - auto load_device_task = [&](AutoCompileContext* context_ptr, const std::shared_ptr& model) { + auto load_device_task = [&](AutoCompileContext* context_ptr, const std::shared_ptr& model) { try_to_compile_model(*context_ptr, model); if (context_ptr->m_is_load_success) { if (context_ptr->m_worker_name.empty()) { @@ -130,6 +130,53 @@ void AutoSchedule::init() { m_firstload_promise.set_value(); }); }; + auto customize_helper_context_from_cache_setting = [this](bool is_actual_cpu, + AutoCompileContext m_compile_context[], + ScheduleContext::Ptr& m_context) { + const auto cpu_iter = deviceChecker().check_and_return_if_device_in_list("CPU", m_context->m_device_priorities); + if (cpu_iter == m_context->m_device_priorities.end()) { + m_compile_context[CPU].m_is_enabled = false; + return; + } + m_compile_context[CPU].m_is_enabled = true; + if (!is_actual_cpu) { + auto device = m_compile_context[ACTUALDEVICE].m_device_info.device_name; + auto& device_config = m_compile_context[ACTUALDEVICE].m_device_info.config; + std::string cache_dir = device_config.count(ov::cache_dir.name()) + ? device_config[ov::cache_dir.name()].as() + : m_context->m_ov_core->get_property("", ov::cache_dir); + + if (m_context->m_startup_fallback && !cache_dir.empty()) { + const auto properties = + m_context->m_ov_core->create_compile_config(ov::DeviceIDParser(device).get_device_name(), + device_config); + std::string blobId; + if (m_context->m_model) + blobId = ov::ModelCache::compute_hash(std::const_pointer_cast(m_context->m_model), + properties); + else + blobId = ov::ModelCache::compute_hash(m_context->m_model_path, properties); + std::string cached_model_path = ov::util::make_path(cache_dir, blobId + ".blob"); + m_compile_context[CPU].m_is_enabled = !ov::util::file_exists(cached_model_path); + LOG_DEBUG_TAG("device: %s %s cached blob: %s ", + device.c_str(), + m_compile_context[CPU].m_is_enabled ? "not found" : "found", + cached_model_path.c_str()); + } + } + if (m_compile_context[CPU].m_is_enabled) { + m_compile_context[CPU].m_device_info = *cpu_iter; + m_compile_context[CPU].m_device_info.config[ov::hint::performance_mode.name()] = + ov::hint::PerformanceMode::LATENCY; + if (m_compile_context[ACTUALDEVICE].m_device_info.config.count(ov::cache_dir.name()) && + (m_context->m_startup_fallback || m_context->m_runtime_fallback)) { + m_compile_context[CPU].m_device_info.config[ov::cache_dir.name()] = ""; + LOG_INFO_TAG("Clear cache dir setting for CPU accelerator"); + } + m_compile_context[CPU].m_worker_name = "CPU_HELP"; + LOG_INFO_TAG("will load CPU for accelerator"); + } + }; if (m_compile_context[ACTUALDEVICE].m_is_enabled) { LOG_INFO_TAG("select device:%s", m_compile_context[ACTUALDEVICE].m_device_info.device_name.c_str()); bool is_actual_cpu = m_compile_context[ACTUALDEVICE].m_device_info.device_name.find("CPU") != std::string::npos; @@ -138,48 +185,7 @@ void AutoSchedule::init() { if (is_actual_cpu || !m_context->m_startup_fallback) { m_compile_context[CPU].m_is_enabled = false; } else { - const auto cpu_iter = deviceChecker().check_and_return_if_device_in_list("CPU", m_context->m_device_priorities); - // if have CPU Device, enable m_compile_context[CPU] - if (cpu_iter != m_context->m_device_priorities.end()) { - m_compile_context[CPU].m_is_enabled = true; - if (!is_actual_cpu) { - // user does not set the compiling threads - // limit the threads num for compiling - auto device = m_compile_context[ACTUALDEVICE].m_device_info.device_name; - auto& device_config = m_compile_context[ACTUALDEVICE].m_device_info.config; - std::string cache_dir = device_config.count(ov::cache_dir.name()) - ? device_config[ov::cache_dir.name()].as() - : m_context->m_ov_core->get_property("", ov::cache_dir); - - if (m_context->m_startup_fallback && !cache_dir.empty()) { - const auto properties = - m_context->m_ov_core->create_compile_config(ov::DeviceIDParser(device).get_device_name(), - device_config); - std::string blobId; - if (m_context->m_model) - blobId = ov::ModelCache::compute_hash( - std::const_pointer_cast(m_context->m_model), - properties); - else - blobId = ov::ModelCache::compute_hash(m_context->m_model_path, properties); - std::string cached_model_path = ov::util::make_path(cache_dir, blobId + ".blob"); - m_compile_context[CPU].m_is_enabled = !ov::util::file_exists(cached_model_path); - LOG_DEBUG_TAG("device: %s %s cached blob: %s ", - device.c_str(), - m_compile_context[CPU].m_is_enabled ? "not found" : "found", - cached_model_path.c_str()); - } - } - if (m_compile_context[CPU].m_is_enabled) { - m_compile_context[CPU].m_device_info = *cpu_iter; - m_compile_context[CPU].m_device_info.config[ov::hint::performance_mode.name()] = - ov::hint::PerformanceMode::LATENCY; - m_compile_context[CPU].m_worker_name = "CPU_HELP"; - LOG_INFO_TAG("will load CPU for accelerator"); - } - } else { - m_compile_context[CPU].m_is_enabled = false; - } + customize_helper_context_from_cache_setting(is_actual_cpu, m_compile_context, m_context); } // initialize the rest members of load context for (int i = 0; i < CONTEXTNUM; i++) { diff --git a/src/plugins/auto/tests/unit/startup_fallback_property_test.cpp b/src/plugins/auto/tests/unit/startup_fallback_property_test.cpp index 8b932289dc10ea..d19259e381efba 100644 --- a/src/plugins/auto/tests/unit/startup_fallback_property_test.cpp +++ b/src/plugins/auto/tests/unit/startup_fallback_property_test.cpp @@ -29,6 +29,20 @@ MATCHER_P(MapContains, subMap, "Check if all the elements of the subMap are cont class AutoStartupFallback : public tests::AutoTest, public ::testing::TestWithParam { public: + static std::string getTestCaseNameCacheTest(testing::TestParamInfo obj) { + bool startup_fallback; + ov::AnyMap config; + std::tie(startup_fallback, config) = obj.param; + std::ostringstream result; + result << "_expected_disabling_cache_" << startup_fallback; + result << "_compiled_config_"; + for (auto& item : config) { + result << item.first << "_" << item.second.as() << "_"; + } + auto name = result.str(); + name.pop_back(); + return name; + } void SetUp() override { plugin->set_device_name("AUTO"); ON_CALL(*core, @@ -36,7 +50,8 @@ class AutoStartupFallback : public tests::AutoTest, public ::testing::TestWithPa ::testing::Matcher(_), _)) .WillByDefault(Return(mockExeNetwork)); - metaDevices = {{ov::test::utils::DEVICE_CPU, {}, -1}, {ov::test::utils::DEVICE_GPU, {}, -1}}; + metaDevices = {{ov::test::utils::DEVICE_CPU, {ov::cache_dir("test_dir")}, -1}, + {ov::test::utils::DEVICE_GPU, {ov::cache_dir("test_dir")}, -1}}; ON_CALL(*plugin, parse_meta_devices(_, _)).WillByDefault(Return(metaDevices)); ON_CALL(*plugin, get_valid_device) .WillByDefault([](const std::vector& metaDevices, const std::string& netPrecision) { @@ -74,3 +89,41 @@ const std::vector testConfigs = {ConfigParams{true, {{"ENABLE_STAR ConfigParams{false, {{"ENABLE_STARTUP_FALLBACK", "NO"}}}}; INSTANTIATE_TEST_SUITE_P(smoke_Auto_StartupFallback, AutoStartupFallback, ::testing::ValuesIn(testConfigs)); + +using AutoLoadExeNetworkCacheDirSettingTest = AutoStartupFallback; +TEST_P(AutoLoadExeNetworkCacheDirSettingTest, canDisableCacheDirSettingForCPUPlugin) { + // get Parameter + bool is_disable_cache_dir; + ov::AnyMap config; + std::tie(is_disable_cache_dir, config) = this->GetParam(); + EXPECT_CALL(*core, + compile_model(::testing::Matcher&>(_), + ::testing::Matcher(StrEq(ov::test::utils::DEVICE_GPU)), + _)) + .Times(1); + + if (is_disable_cache_dir) { + std::map test_map = {{ov::cache_dir.name(), ""}}; + EXPECT_CALL(*core, + compile_model(::testing::Matcher&>(_), + ::testing::Matcher(StrEq(ov::test::utils::DEVICE_CPU)), + ::testing::Matcher(MapContains(test_map)))) + .Times(1); + } + + ASSERT_NO_THROW(plugin->compile_model(model, config)); +} + +const std::vector testCacheConfigs = { + ConfigParams{true, {ov::intel_auto::enable_startup_fallback(true)}}, + ConfigParams{true, {ov::intel_auto::enable_runtime_fallback(true)}}, + ConfigParams{true, {ov::intel_auto::enable_startup_fallback(true), ov::intel_auto::enable_runtime_fallback(false)}}, + ConfigParams{false, {ov::intel_auto::enable_startup_fallback(false), ov::intel_auto::enable_runtime_fallback(true)}}, + ConfigParams{true, {ov::intel_auto::enable_startup_fallback(true), ov::intel_auto::enable_runtime_fallback(true)}}, + ConfigParams{false, + {ov::intel_auto::enable_startup_fallback(false), ov::intel_auto::enable_runtime_fallback(false)}}}; + +INSTANTIATE_TEST_SUITE_P(smoke_Auto_disableCachingForCPUPlugin, + AutoLoadExeNetworkCacheDirSettingTest, + ::testing::ValuesIn(testCacheConfigs), + AutoLoadExeNetworkCacheDirSettingTest::getTestCaseNameCacheTest); \ No newline at end of file From 67d73f89e8700058eb78229aecd708de542cedd8 Mon Sep 17 00:00:00 2001 From: Pawel Raasz Date: Fri, 5 Jul 2024 06:55:46 +0200 Subject: [PATCH 33/50] [core] Fix Coverity issues in common CC and util (#25350) ### Details: - Remove unnecessary copies from Conditional Compilation an common util components. ### Tickets: - N/A --- .../include/openvino/cc/factory.h | 12 ++++++------ src/common/util/src/common_util.cpp | 4 ++-- src/common/util/src/file_util.cpp | 4 ++-- 3 files changed, 10 insertions(+), 10 deletions(-) diff --git a/src/common/conditional_compilation/include/openvino/cc/factory.h b/src/common/conditional_compilation/include/openvino/cc/factory.h index 3f9ebb5e0ba062..b746b60f5fa3e2 100644 --- a/src/common/conditional_compilation/include/openvino/cc/factory.h +++ b/src/common/conditional_compilation/include/openvino/cc/factory.h @@ -39,7 +39,7 @@ class Factory { template void registerImpl1(const Key& key) { builders[key] = [](Args... args) -> T { - Impl* impl = new Impl(args...); + Impl* impl = new Impl(std::move(args)...); return static_cast(impl); }; } @@ -47,7 +47,7 @@ class Factory { T createImpl(const Key& key, Args... args) { auto builder = builders.find(key); if (builder != builders.end()) { - return builder->second(args...); + return builder->second(std::move(args)...); } return nullptr; } @@ -63,7 +63,7 @@ class Factory { const std::string task_name = "REG$" + name + "$" + to_string(key) + "$" + typeName; openvino::itt::ScopedTask task(openvino::itt::handle(task_name)); builders[key] = [](Args... args) -> T { - Impl* impl = new Impl(args...); + Impl* impl = new Impl(std::move(args)...); return static_cast(impl); }; } @@ -74,7 +74,7 @@ class Factory { if (builder != builders.end()) { const std::string task_name = "CREATE$" + name + "$" + to_string(key); openvino::itt::ScopedTask task(openvino::itt::handle(task_name)); - return builder->second(args...); + return builder->second(std::move(args)...); } return nullptr; } @@ -87,7 +87,7 @@ class Factory { template void registerImpl(const Key& key) { builders[key] = [](Args... args) -> T { - Impl* impl = new Impl(args...); + Impl* impl = new Impl(std::move(args)...); return static_cast(impl); }; } @@ -95,7 +95,7 @@ class Factory { T createImpl(const Key& key, Args... args) { auto builder = builders.find(key); if (builder != builders.end()) { - return builder->second(args...); + return builder->second(std::move(args)...); } return nullptr; } diff --git a/src/common/util/src/common_util.cpp b/src/common/util/src/common_util.cpp index 95b833c2767647..ec365c32710781 100644 --- a/src/common/util/src/common_util.cpp +++ b/src/common/util/src/common_util.cpp @@ -53,10 +53,10 @@ std::string ov::util::filter_lines_by_prefix(const std::string& str, const std:: auto lines = ov::util::split(str, '\n'); std::stringstream res; const char* const prefix_c = prefix.c_str(); - for (auto line : lines) { + for (const auto& line : lines) { if (line.find(prefix_c) == 0) { res << line + '\n'; } } return res.str(); -} \ No newline at end of file +} diff --git a/src/common/util/src/file_util.cpp b/src/common/util/src/file_util.cpp index 032010a4d2b23e..2846355e9aeb5f 100644 --- a/src/common/util/src/file_util.cpp +++ b/src/common/util/src/file_util.cpp @@ -240,7 +240,7 @@ static void iterate_files_worker(const std::string& path, } catch (...) { std::exception_ptr p = std::current_exception(); closedir(dir); - std::rethrow_exception(p); + std::rethrow_exception(std::move(p)); } closedir(dir); } else { @@ -386,7 +386,7 @@ std::string ov::util::get_absolute_file_path(const std::string& path) { // on Linux if file does not exist or no access, function will return NULL, but // `absolutePath` will contain resolved path absolutePath.resize(absolutePath.find('\0')); - return std::string(absolutePath); + return absolutePath; } std::stringstream ss; ss << "Can't get absolute file path for [" << path << "], err = " << strerror(errno); From d858f0a9a2475e0ae22fb90125d7743cd5d8a712 Mon Sep 17 00:00:00 2001 From: Wilson Seok Date: Thu, 4 Jul 2024 22:28:12 -0700 Subject: [PATCH 34/50] [GPU]Fix layout count comparison by using actual byte value in realloc_if_needed (#25345) ### Details: - Fix layout count comparison by using actual byte value ### Tickets: - 144293 --- .../include/intel_gpu/runtime/memory.hpp | 2 + .../intel_gpu/src/graph/primitive_inst.cpp | 10 +- .../dynamic_execution/memory_realloc_test.cpp | 108 ++++++++++++++++++ 3 files changed, 115 insertions(+), 5 deletions(-) diff --git a/src/plugins/intel_gpu/include/intel_gpu/runtime/memory.hpp b/src/plugins/intel_gpu/include/intel_gpu/runtime/memory.hpp index 8356a8b8ce24b8..867b553ebf16f9 100644 --- a/src/plugins/intel_gpu/include/intel_gpu/runtime/memory.hpp +++ b/src/plugins/intel_gpu/include/intel_gpu/runtime/memory.hpp @@ -31,6 +31,8 @@ class MemoryTracker { explicit MemoryTracker(engine* engine, void* buffer_ptr, size_t buffer_size, allocation_type alloc_type); ~MemoryTracker(); + size_t size() const { return m_buffer_size; } + private: engine* m_engine; void* m_buffer_ptr; diff --git a/src/plugins/intel_gpu/src/graph/primitive_inst.cpp b/src/plugins/intel_gpu/src/graph/primitive_inst.cpp index 3afcde70b1e5b9..7dac5ffc3a7c4e 100644 --- a/src/plugins/intel_gpu/src/graph/primitive_inst.cpp +++ b/src/plugins/intel_gpu/src/graph/primitive_inst.cpp @@ -649,13 +649,13 @@ event::ptr primitive_inst::realloc_if_needed() { // If we allocated too large memory, reclaim the memory. if (updated_layout.get_buffer_size().count() * 10 < _max_output_layout_count) { - GPU_DEBUG_TRACE_DETAIL << id() << ": Updated output size " << updated_layout.count() + GPU_DEBUG_TRACE_DETAIL << id() << ": Updated output size " << updated_layout.get_buffer_size().count() << " is much smaller than current memory size! " << _max_output_layout_count << "Reset memory" << std::endl; _max_output_layout_count = 0; } - bool can_reuse_buffer = _outputs[0] && updated_layout.count() <= _max_output_layout_count; + bool can_reuse_buffer = _outputs[0] && updated_layout.get_buffer_size().count() <= _max_output_layout_count; // Handle runtime dynamic concat optimization if (_node->is_type() && can_be_optimized() && allocation_done_by_other) { allocation_done_by_other = false; @@ -681,7 +681,7 @@ event::ptr primitive_inst::realloc_if_needed() { updated_params.output_layouts[0] = updated_layout; if (can_reuse_buffer) { GPU_DEBUG_TRACE_DETAIL << id() << ": reuse previously allocated output buffer - " - << actual_layout.count() << "/" << _max_output_layout_count + << actual_layout.get_buffer_size().count() << "/" << _max_output_layout_count << std::endl; if (_outputs[0]->get_layout() != actual_layout) { _outputs[0] = _network.get_engine().reinterpret_buffer(*_outputs[0], actual_layout); @@ -694,7 +694,7 @@ event::ptr primitive_inst::realloc_if_needed() { } else { GPU_DEBUG_TRACE_DETAIL << id() << ": realloc output memory. " << " Current buffer_size=" << _max_output_layout_count - << " Requested buffer_size=" << updated_layout.count() << std::endl; + << " Requested buffer_size=" << updated_layout.get_buffer_size().count() << std::endl; _outputs = allocate_outputs(&updated_params, need_reset_output_memory(), true); GPU_DEBUG_CODE(std::string memalloc_info = ""); GPU_DEBUG_CODE(for (size_t out_idx = 0; out_idx < _outputs.size(); ++out_idx) { @@ -1679,7 +1679,7 @@ primitive_inst::primitive_inst(network & network, program_node const& node, bool } _impl_params->strm = _network.get_stream_ptr(); if (_outputs[0]) - _max_output_layout_count = _outputs[0]->get_layout().get_tensor().count(); + _max_output_layout_count = _outputs[0]->get_layout().get_buffer_size().count(); } memory::ptr primitive_inst::allocate_internal_buffer(size_t idx, bool reset) { diff --git a/src/plugins/intel_gpu/tests/unit/dynamic_execution/memory_realloc_test.cpp b/src/plugins/intel_gpu/tests/unit/dynamic_execution/memory_realloc_test.cpp index 1febfc4cd135ab..9a67bcdb44cb08 100644 --- a/src/plugins/intel_gpu/tests/unit/dynamic_execution/memory_realloc_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/dynamic_execution/memory_realloc_test.cpp @@ -122,6 +122,10 @@ TEST(memory_reuse_realloc_reset_test, basic_conv_with_padding) { for (size_t i = 32; i < 35; ++i) { ASSERT_EQ((float)reorder_mem_ptr[i], 0.f); } + // Mem should be reallocate when request size is bigger than existing buffer size + ASSERT_TRUE(reorder_mem->size() <= reorder_mem->get_mem_tracker()->size()) + << "reorder mem buffer size: " << reorder_mem->size() << "bytes is bigger than original size of allocated mem: " + << reorder_mem->get_mem_tracker()->size() << "bytes."; } TEST(softmax_gpu_dynamic_f32_test_upper_bound, input_same_values) { @@ -265,4 +269,108 @@ TEST(dyn_shape_mem_test, igpu_shape_infer_dep_mem_type) { auto expected_layout = layout{ov::PartialShape{3, 2, 1, 1}, data_types::f16, format::bfyx}; ASSERT_EQ(output.begin()->second.get_memory()->get_layout(), expected_layout); } + +TEST(memory_reuse_realloc_reset_test, basic_conv_with_padding_reorder) { + auto& engine = get_test_engine(); + + layout weight_layout = layout{ov::PartialShape{1, 3, 3, 3}, data_types::f16, format::bfyx}; + + auto weights = engine.allocate_memory(weight_layout); + set_values(weights, { + 1.0f, 1.0f, 1.0f, + 1.0f, 1.0f, 1.0f, + 1.0f, 1.0f, 1.0f, + // + 2.0f, 2.0f, 2.0f, + 2.0f, 2.0f, 2.0f, + 2.0f, 2.0f, 2.0f, + // + 3.0f, 3.0f, 3.0f, + 3.0f, 3.0f, 3.0f, + 3.0f, 3.0f, 3.0f, + }); + + layout input_layout_1 = layout{ov::PartialShape{1, 3, 5, 5}, data_types::f32, format::bfyx}; + auto input_mem_1 = engine.allocate_memory(input_layout_1); + set_values(input_mem_1, { + 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, + 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, + 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, + 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, + 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, + // + 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, + 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, + 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, + 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, + 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, + // + 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, + 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, + 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, + 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, + 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, + }); + + std::vector ref_output_1 = {6, 18, 36, 54, 72, 54, 30, 12, 36, 72, 108, 144, 108, + 60, 18, 54, 108, 162, 216, 162, 90, 18, 54, 108, 162, 216, + 162, 90, 18, 54, 108, 162, 216, 162, 90, 12, 36, 72, 108, + 144, 108, 60, 6, 18, 36, 54, 72, 54, 30}; + + layout input_layout_2 = layout{ov::PartialShape{1, 3, 2, 2}, data_types::f32, format::bfyx}; + auto input_mem_2 = engine.allocate_memory(input_layout_2); + set_values(input_mem_2, {11.0f, 11.0f, 11.0f, 11.0f, + 11.0f, 11.0f, 11.0f, 11.0f, + 11.0f, 11.0f, 11.0f, 11.0f}); + std::vector ref_output_2 = { 66, 132, 132, 66, 132, 264, 264, 132, 132, 264, 264, 132, 66, 132, 132, 66}; + std::vector values_to_subtract = {}; + auto input_l = layout{ov::PartialShape::dynamic(4), data_types::f32, format::bfyx}; + topology topology(input_layout("input", input_l), + data("weights", weights), + reorder("reorder", input_info("input"), format::bfyx, data_types::f16, + values_to_subtract, reorder_mean_mode::subtract, padding{{0, 0, 2, 2}, 0}), + convolution("conv", + input_info("reorder"), + "weights", + "", /*bias*/ + 1, + {1, 1}, /*stride*/ + {1, 1}, /*dilation*/ + {2, 2}, /*pad_above*/ + {2, 2}, /*pad_below*/ + false, + ov::op::PadType::EXPLICIT, + padding{{0, 0, 0, 0}, 0}), + reorder("output", input_info("conv"), format::bfyx, data_types::f32)); /*output padding*/ + + ExecutionConfig config = get_test_default_config(engine); + config.set_property(ov::intel_gpu::allow_new_shape_infer(true)); + + network network(engine, topology, config); + network.set_input_data("input", input_mem_2); + auto outputs_1 = network.execute(); + network.set_input_data("input", input_mem_1); + auto outputs_2 = network.execute(); + + // check padding of second run of reorder + // 0, 0, 0, ... 0, 0, 0, + // 0, 0, 0, ... 0, 0, 0, + // 0, 0, 1, ... 5, 0, 0, + // . . . + // 0, 0, 1, ... 5, 0, 0, + // 0, 0,"0", .. "0","0","0", // !! check pad_after + // 0, 0,"0", .. "0","0","0", // !! check pad_after + auto reorder_mem = network.get_primitive("reorder")->output_memory_ptr(); + cldnn::mem_lock reorder_mem_ptr(reorder_mem, get_test_stream()); + for (size_t i = (63 + 81 * 2); i < (71 + 81 * 2); ++i) { + ASSERT_EQ((float)reorder_mem_ptr[i], 0.f); + } + for (size_t i = (72 + 81 * 2); i < (80 + 81 * 2); ++i) { + ASSERT_EQ((float)reorder_mem_ptr[i], 0.f); + } + // Mem should be reallocate when request size is bigger than existing buffer size + ASSERT_TRUE(reorder_mem->size() <= reorder_mem->get_mem_tracker()->size()) + << "reorder mem buffer size: " << reorder_mem->size() << "bytes is bigger than original size of allocated mem: " + << reorder_mem->get_mem_tracker()->size() << "bytes."; +} } // memory_realloc_tests From 32daf83e48ee599ecbf9ff24cd843412a4584df5 Mon Sep 17 00:00:00 2001 From: Jade Cho Date: Fri, 5 Jul 2024 14:45:07 +0900 Subject: [PATCH 35/50] [GPU] Use onednn reorder for byxf format (#25242) ### Tickets: - *142826* --- src/plugins/intel_gpu/src/graph/layout_optimizer.cpp | 1 + .../tests/unit/passes/prepare_buffer_fusing_test.cpp | 12 ++++++++---- .../tests/unit/test_cases/pooling_gpu_test.cpp | 10 ++++++++++ .../tests/unit/test_cases/reorder_gpu_test.cpp | 10 ++++++++++ 4 files changed, 29 insertions(+), 4 deletions(-) diff --git a/src/plugins/intel_gpu/src/graph/layout_optimizer.cpp b/src/plugins/intel_gpu/src/graph/layout_optimizer.cpp index 8c5930e4595065..07c66b3b983c54 100644 --- a/src/plugins/intel_gpu/src/graph/layout_optimizer.cpp +++ b/src/plugins/intel_gpu/src/graph/layout_optimizer.cpp @@ -1574,6 +1574,7 @@ impl_types layout_optimizer::get_preferred_impl_type(program_node& node, format std::vector onednn_optimized_fmt = { format::bfyx, + format::byxf, format::b_fs_zyx_fsv16, format::b_fs_yx_fsv16, format::b_fs_yx_fsv32, diff --git a/src/plugins/intel_gpu/tests/unit/passes/prepare_buffer_fusing_test.cpp b/src/plugins/intel_gpu/tests/unit/passes/prepare_buffer_fusing_test.cpp index 84cf05fed6cfc4..ac3bf0d553e0e3 100644 --- a/src/plugins/intel_gpu/tests/unit/passes/prepare_buffer_fusing_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/passes/prepare_buffer_fusing_test.cpp @@ -759,8 +759,10 @@ TEST(prepare_buffer_fusing, test_implicit_crop_and_outerpadding_conv) { } } - auto crop_prim = network.get_primitive("crop_input"); - ASSERT_EQ(crop_prim->can_be_optimized(), true); + if (!engine.get_device_info().supports_immad) { + auto crop_prim = network.get_primitive("crop_input"); + ASSERT_EQ(crop_prim->can_be_optimized(), true); + } } // For deconv, Check padded input and weight propagated by implicit crop are handled properly @@ -805,8 +807,10 @@ TEST(prepare_buffer_fusing, test_implicit_crop_and_outerpadding_deconv) { for (unsigned int i = 0; i < expected_output_vec.size(); i++) ASSERT_FLOAT_EQ(expected_output_vec[i], output_ptr[i]); - auto crop_prim = network.get_primitive("crop_input"); - ASSERT_EQ(crop_prim->can_be_optimized(), true); + if (!engine.get_device_info().supports_immad) { + auto crop_prim = network.get_primitive("crop_input"); + ASSERT_EQ(crop_prim->can_be_optimized(), true); + } } TEST(prepare_buffer_fusing, test_checking_padding_supported) { diff --git a/src/plugins/intel_gpu/tests/unit/test_cases/pooling_gpu_test.cpp b/src/plugins/intel_gpu/tests/unit/test_cases/pooling_gpu_test.cpp index 1abbdf0643a974..7da8a31a24e8e7 100644 --- a/src/plugins/intel_gpu/tests/unit/test_cases/pooling_gpu_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/test_cases/pooling_gpu_test.cpp @@ -281,6 +281,11 @@ TEST(pooling_forward_gpu, basic_max_pooling_int8) { std::initializer_list input_f = { 1.0f, -2.5f, 3.1f, -4.0f, 5.03f, -6.99f, 7.0f, -8.0f, 9.5f }; std::list final_results = { 9.0f }; + if (engine.get_device_info().supports_immad) { + // Use onednn when reordering byxf format. + final_results = { 10.0f }; + } + // Allocate memory for input image. auto input_memory = engine.allocate_memory(in_layout); set_values(input_memory, input_f); @@ -3225,6 +3230,11 @@ TEST(pooling_forward_gpu_onednn, basic_max_pooling_int8) { std::initializer_list input_f = { 1.0f, -2.5f, 3.1f, -4.0f, 5.03f, -6.99f, 7.0f, -8.0f, 9.5f }; std::list final_results = { 9.0f }; + if (engine.get_device_info().supports_immad) { + // Use onednn when reordering byxf format. + final_results = { 10.0f }; + } + // Allocate memory for input image. auto input_memory = engine.allocate_memory(in_layout); set_values(input_memory, input_f); diff --git a/src/plugins/intel_gpu/tests/unit/test_cases/reorder_gpu_test.cpp b/src/plugins/intel_gpu/tests/unit/test_cases/reorder_gpu_test.cpp index 10fa8ccb743acd..f44ec4f3c27d2d 100644 --- a/src/plugins/intel_gpu/tests/unit/test_cases/reorder_gpu_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/test_cases/reorder_gpu_test.cpp @@ -911,6 +911,11 @@ TEST(reorder_gpu, basic_convert_int8) { std::initializer_list input_f = { 1.0f, -2.5f, 3.1f, -4.0f, 5.03f, -6.99f, 7.0f, -8.0f, 9.0f }; std::list final_results = { 1.0f, -2.0f, 3.0f, -4.0f, 5.0f, -6.0f, 7.0f, -8.0f, 9.0f }; + if (engine.get_device_info().supports_immad) { + // Use onednn when reordering byxf format. + final_results = { 1.0f, -2.0f, 3.0f, -4.0f, 5.0f, -7.0f, 7.0f, -8.0f, 9.0f }; + } + // Allocate memory for input image. auto input_memory = engine.allocate_memory(in_layout); set_values(input_memory, input_f); @@ -955,6 +960,11 @@ TEST(reorder_gpu, basic_convert_uint8) { std::initializer_list input_f = { 1.0f, -2.5f, 3.1f, -4.0f, 5.03f, -6.99f, 7.0f, -8.0f, 9.0f }; std::list final_results = { 1.0f, 254.0f, 3.0f, 252.0f, 5.0f, 250.0f, 7.0f, 248.0f, 9.0f }; + if (engine.get_device_info().supports_immad) { + // Use onednn when reordering byxf format. + final_results = { 1.0f, 0.0f, 3.0f, 0.0f, 5.0f, 0.0f, 7.0f, 0.0f, 9.0f }; + } + // Allocate memory for input image. auto input_memory = engine.allocate_memory(in_layout); set_values(input_memory, input_f); From d7b45df1b380b2de2d00fa1563d817b24a975e64 Mon Sep 17 00:00:00 2001 From: Eddy Kim Date: Fri, 5 Jul 2024 15:16:15 +0900 Subject: [PATCH 36/50] [GPU] fixed to update dispatchData.blockWidth instead of jit constant (#25377) ### Details: - This PR fixed the `convolution_gpu_bfyx_f16_1x1` kernel to set the vector size of fused ops to updated `blockWidth`. --- .../convolution_kernel_b_fs_yx_fsv16_1x1.cpp | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/plugins/intel_gpu/src/kernel_selector/kernels/convolution/convolution_kernel_b_fs_yx_fsv16_1x1.cpp b/src/plugins/intel_gpu/src/kernel_selector/kernels/convolution/convolution_kernel_b_fs_yx_fsv16_1x1.cpp index f91c5b870e53f0..6fd074f8d8506d 100644 --- a/src/plugins/intel_gpu/src/kernel_selector/kernels/convolution/convolution_kernel_b_fs_yx_fsv16_1x1.cpp +++ b/src/plugins/intel_gpu/src/kernel_selector/kernels/convolution/convolution_kernel_b_fs_yx_fsv16_1x1.cpp @@ -361,16 +361,16 @@ JitConstants ConvolutionKernel_b_fs_yx_fsv16_1x1::GetJitConstants(const convolut auto cldnnJit = GetJitConstants(newParams, dispatchData); for (size_t i = 0; i < num_kernels; i++) { if (params.is_shape_agnostic) { - cldnnJit.RemoveConstant("X_BLOCK_SIZE"); if (i == 0) { - cldnnJit.AddConstant(MakeJitConstant("X_BLOCK_SIZE", "1")); + dispatchData.cldnnStyle.blockWidth = 1; } else if (i == 1) { - cldnnJit.AddConstant(MakeJitConstant("X_BLOCK_SIZE", "2")); + dispatchData.cldnnStyle.blockWidth = 2; } else if (i == 2) { - cldnnJit.AddConstant(MakeJitConstant("X_BLOCK_SIZE", "4")); + dispatchData.cldnnStyle.blockWidth = 4; } else if (i == 3) { - cldnnJit.AddConstant(MakeJitConstant("X_BLOCK_SIZE", "8")); + dispatchData.cldnnStyle.blockWidth = 8; } + cldnnJit = GetJitConstants(newParams, dispatchData); } auto entryPoint = GetEntryPoint(finalKernelName, newParams.layerID, params, i); auto jit = CreateJit(finalKernelName, cldnnJit, entryPoint); From d5a6d316c65c5b21ef1090461bbe85e754fc513c Mon Sep 17 00:00:00 2001 From: Mingyu Kim Date: Fri, 5 Jul 2024 16:38:37 +0900 Subject: [PATCH 37/50] [GPU] update onednn to latest v3.6-pc (#25387) --- src/plugins/intel_gpu/thirdparty/onednn_gpu | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/plugins/intel_gpu/thirdparty/onednn_gpu b/src/plugins/intel_gpu/thirdparty/onednn_gpu index d6ddb0391a6bb6..ae495259266077 160000 --- a/src/plugins/intel_gpu/thirdparty/onednn_gpu +++ b/src/plugins/intel_gpu/thirdparty/onednn_gpu @@ -1 +1 @@ -Subproject commit d6ddb0391a6bb62c18049fc56f979cc4409d2300 +Subproject commit ae49525926607783a3b923a03e97b52d156c6ab3 From 27c872fd5f6902d3479d8807b8378b73bf19da16 Mon Sep 17 00:00:00 2001 From: Luo Cheng Date: Fri, 5 Jul 2024 17:04:05 +0800 Subject: [PATCH 38/50] [CPU] Disable avx2 asymmetric dynamic quantization (#25344) ### Details: - *Disable avx2 asymmetric dynamic quantization* - *...* ### Tickets: - *ticket-id* --- .../nodes/executors/dnnl/dnnl_fullyconnected_primitive.cpp | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/plugins/intel_cpu/src/nodes/executors/dnnl/dnnl_fullyconnected_primitive.cpp b/src/plugins/intel_cpu/src/nodes/executors/dnnl/dnnl_fullyconnected_primitive.cpp index ef1e127f44f636..ff6d17b4faa6b8 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/dnnl/dnnl_fullyconnected_primitive.cpp +++ b/src/plugins/intel_cpu/src/nodes/executors/dnnl/dnnl_fullyconnected_primitive.cpp @@ -150,6 +150,11 @@ bool DnnlFCPrimitive::useDynamicQuantizationImpl(size_t dqGroupSize, const Memor if (zpPtr && !one_of(zpPtr->getDesc().getPrecision(), ov::element::u8, ov::element::u4, ov::element::undefined)) return false; + // TODO: heuristic: disable avx2 asymmetric + bool is_asymmetric_weights = one_of(weightsDesc->getPrecision(), ov::element::u8, ov::element::u4); + if (is_asymmetric_weights && !dnnl::impl::cpu::x64::mayiuse(dnnl::impl::cpu::x64::avx512_core_vnni)) + return false; + const size_t simdWidth = 16; if (dqGroupSize % simdWidth) return false; From 8217df5b2c4d3549e326b39aadc76477b9b9bb52 Mon Sep 17 00:00:00 2001 From: Georgy Krivoruchko Date: Fri, 5 Jul 2024 13:51:03 +0400 Subject: [PATCH 39/50] [LayerTests] Added handling of a boolean results (#25391) ### Details: - Difference in boolean results was causing an unexpected issue while validation. We don't need to know max difference of a boolean array, just highlight it exists. ### Tickets: - 137495 --------- Co-authored-by: Evgenya Nugmanova --- tests/layer_tests/common/layer_test_class.py | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/tests/layer_tests/common/layer_test_class.py b/tests/layer_tests/common/layer_test_class.py index 42f1e3e3854917..67e72a946aa7a5 100644 --- a/tests/layer_tests/common/layer_test_class.py +++ b/tests/layer_tests/common/layer_test_class.py @@ -167,19 +167,21 @@ def compare_ie_results_with_framework(self, infer_res, framework_res, framework_ is_ok = True from common.utils.common_utils import allclose for framework_out_name in framework_res: - ie_out_name = framework_out_name - if ie_out_name not in infer_res and len(infer_res) == 1: + if framework_out_name not in infer_res and len(infer_res) == 1: ie_res = list(infer_res.values())[0] else: - ie_res = infer_res[ie_out_name] + ie_res = infer_res[framework_out_name] if not allclose(ie_res, framework_res[framework_out_name], atol=framework_eps, rtol=framework_eps): is_ok = False - print("Max diff is {}".format( - np.array( - abs(infer_res[ie_out_name] - framework_res[framework_out_name])).max())) + if ie_res.dtype != bool: + print("Max diff is {}".format( + np.array( + abs(ie_res - framework_res[framework_out_name])).max())) + else: + print("Boolean results are not equal") else: print("Accuracy validation successful!\n") print("absolute eps: {}, relative eps: {}".format(framework_eps, framework_eps)) From 70b4c502a17e872464f9383c40716f91d1f6d887 Mon Sep 17 00:00:00 2001 From: Maksim Kutakov Date: Fri, 5 Jul 2024 11:59:50 +0200 Subject: [PATCH 40/50] [CPU] Fix avx2 bf16 reorder (#25353) ### Details: Fix forked oneDNN JIT reorder kernel for the case of bf16 precision and AVX2 ISA. OneDNN fork PR: https://github.com/openvinotoolkit/oneDNN/pull/253 ### Tickets: - CVS-145874 --- src/plugins/intel_cpu/thirdparty/onednn | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/plugins/intel_cpu/thirdparty/onednn b/src/plugins/intel_cpu/thirdparty/onednn index d19b5d85880ba9..dc69ce59283efa 160000 --- a/src/plugins/intel_cpu/thirdparty/onednn +++ b/src/plugins/intel_cpu/thirdparty/onednn @@ -1 +1 @@ -Subproject commit d19b5d85880ba917552e57183670f7a3e2fcd086 +Subproject commit dc69ce59283efad7961af9f9f3ceaf8f4c5b076a From dce3308ac4a4a6eee6c98eaeef2af243ee6d780f Mon Sep 17 00:00:00 2001 From: Mingyu Kim Date: Fri, 5 Jul 2024 19:59:13 +0900 Subject: [PATCH 41/50] Revert "[GPU] update shape for fused prims (#25363)" (#25395) This reverts commit 01dc53ac2deb437c644b63a633e0a7780ddf848c. Reverting this PR because it is causing LLM failures ### Tickets: - 145784 --- .../intel_gpu/src/graph/primitive_inst.cpp | 45 +++++-------------- 1 file changed, 11 insertions(+), 34 deletions(-) diff --git a/src/plugins/intel_gpu/src/graph/primitive_inst.cpp b/src/plugins/intel_gpu/src/graph/primitive_inst.cpp index 7dac5ffc3a7c4e..bbeecf970b56fb 100644 --- a/src/plugins/intel_gpu/src/graph/primitive_inst.cpp +++ b/src/plugins/intel_gpu/src/graph/primitive_inst.cpp @@ -586,42 +586,19 @@ event::ptr primitive_inst::realloc_if_needed() { user_insts.size(), " and ", user_insts_origin.size()); } for (auto user : user_insts) { - auto is_fused_prim_of_user = [&](primitive_id id) -> bool { - for (auto& p : user->get_node().get_fused_primitives()) { - if (p.has_outer_dep()) { - const auto start_idx = p.outer_dep_start_idx; - // exclude fused_node from total_num_deps - const auto end_idx = p.outer_dep_start_idx + p.total_num_deps -1; - for (size_t idx = start_idx; idx < end_idx; idx++) { - if (user->get_node().get_dependency(idx).id() == id) { - return true; - } - } - } - } - return false; - }; // Since fake alignment is applicable for input tensor as well, make sure we allocate enough memory // to prevent reading beyond the allocated memory bounds - if (user->get_node().is_type() && user->is_dynamic()) { - if (user->_deps[0].first == this) { - GPU_DEBUG_TRACE_DETAIL << "Check fc user " << user->id() << "'s fake alignment-ed input size" << std::endl; - user->update_shape(); - user->update_shape_done_by_other = true; - - auto fc_impl_params = *user->_impl_params; - auto fc_input_layout = user->get_node().type()->get_fake_aligned_params(fc_impl_params).input_layouts[0]; - if (fc_input_layout.bytes_count() > updated_layout.bytes_count()) { - GPU_DEBUG_TRACE_DETAIL << id() << ": increase output layout allocation size from " << actual_layout.to_short_string() << " -> " - << fc_input_layout.to_short_string() << " to meet the input buffer alignment requirements for FC\n"; - updated_layout = fc_input_layout; - } - } else if (is_fused_prim_of_user(id()) && user->update_shape_done_by_other) { - // Since the output layout of fused prim in user is determined after user's update_shape - // Rerun update_shape w/ new output layout of fused prim - user->update_shape_done_by_other = false; - user->update_shape(); - user->update_shape_done_by_other = true; + if (user->get_node().is_type() && user->is_dynamic() && user->_deps[0].first == this) { + GPU_DEBUG_TRACE_DETAIL << "Check fc user " << user->id() << "'s fake alignment-ed input size" << std::endl; + user->update_shape(); + user->update_shape_done_by_other = true; + + auto fc_impl_params = *user->_impl_params; + auto fc_input_layout = user->get_node().type()->get_fake_aligned_params(fc_impl_params).input_layouts[0]; + if (fc_input_layout.bytes_count() > updated_layout.bytes_count()) { + GPU_DEBUG_TRACE_DETAIL << id() << ": increase output layout allocation size from " << actual_layout.to_short_string() << " -> " + << fc_input_layout.to_short_string() << " to meet the input buffer alignment requirements for FC\n"; + updated_layout = fc_input_layout; } } } From 6ec3828c2f153ac454e882639f32c76e971a116b Mon Sep 17 00:00:00 2001 From: Anastasiia Pnevskaia Date: Fri, 5 Jul 2024 14:27:30 +0200 Subject: [PATCH 42/50] Added wstring support to save_model. (#25227) ### Details: - Added wstring support to save_model(). ### Tickets: - 144915 --- src/core/include/openvino/core/graph_util.hpp | 9 +++++++- src/core/src/graph_util.cpp | 9 +++++++- src/core/src/pass/serialize.cpp | 23 ++++++++++++++----- .../ovc_python_api_tests/test_onnx.py | 9 +++++++- .../ovc_python_api_tests/test_paddle.py | 9 +++++++- .../ovc_python_api_tests/test_tf.py | 9 +++++++- 6 files changed, 57 insertions(+), 11 deletions(-) diff --git a/src/core/include/openvino/core/graph_util.hpp b/src/core/include/openvino/core/graph_util.hpp index 3f0926abfc4717..66c640a62314df 100644 --- a/src/core/include/openvino/core/graph_util.hpp +++ b/src/core/include/openvino/core/graph_util.hpp @@ -304,4 +304,11 @@ OPENVINO_API void save_model(const std::shared_ptr& model, const std::string& output_model, bool compress_to_fp16 = true); -} // namespace ov + +#if defined(OPENVINO_ENABLE_UNICODE_PATH_SUPPORT) +OPENVINO_API +void save_model(const std::shared_ptr& model, + const std::wstring& output_model, + bool compress_to_fp16 = true); +#endif +} // namespace ov \ No newline at end of file diff --git a/src/core/src/graph_util.cpp b/src/core/src/graph_util.cpp index 5078f9cf3b073b..e82c855b60551c 100644 --- a/src/core/src/graph_util.cpp +++ b/src/core/src/graph_util.cpp @@ -20,6 +20,7 @@ #include "openvino/pass/manager.hpp" #include "openvino/pass/pattern/op/wrap_type.hpp" #include "openvino/pass/visualize_tree.hpp" +#include "openvino/util/file_util.hpp" #include "transformations/common_optimizations/compress_float_constants.hpp" #include "transformations/common_optimizations/fused_names_cleanup.hpp" @@ -333,7 +334,7 @@ void serialize(const std::shared_ptr& m, void save_model(const std::shared_ptr& m, const std::string& output_model, bool compress_to_fp16) { auto cloned = m->clone(); if (compress_to_fp16) { - // TODO: Implement on-the-fly compression in pass::Serialize + // TODO: Implement on-the-fly compression in pass::Serialize, Ticket: 145380 bool postponed = true; ov::pass::compress_model_to_f16(cloned, postponed); } @@ -344,6 +345,12 @@ void save_model(const std::shared_ptr& m, const std::string& ou manager.run_passes(std::move(cloned)); } +#if defined(OPENVINO_ENABLE_UNICODE_PATH_SUPPORT) +void save_model(const std::shared_ptr& m, const std::wstring& output_model, bool compress_to_fp16) { + save_model(m, ov::util::wstring_to_string(output_model), compress_to_fp16); +} +#endif + bool is_used(Node* node); bool is_used(Node* node) { std::unordered_set instances_seen; diff --git a/src/core/src/pass/serialize.cpp b/src/core/src/pass/serialize.cpp index b4ae8f09c98d82..61338de1457f16 100644 --- a/src/core/src/pass/serialize.cpp +++ b/src/core/src/pass/serialize.cpp @@ -1206,16 +1206,27 @@ bool pass::Serialize::run_on_model(const std::shared_ptr& model) { if (m_xmlFile && m_binFile) { serializeFunc(*m_xmlFile, *m_binFile, model, m_version); } else { - auto xmlDir = ov::util::get_directory(m_xmlPath); - if (xmlDir != m_xmlPath) +#if defined(OPENVINO_ENABLE_UNICODE_PATH_SUPPORT) && defined(_WIN32) + const auto& xmlPath_ref = ov::util::string_to_wstring(m_xmlPath); + const auto& binPath_ref = ov::util::string_to_wstring(m_binPath); + std::string message_bin = "Can't open bin file."; + std::string message_xml = "Can't open xml file."; +#else + const auto& xmlPath_ref = m_xmlPath; + const auto& binPath_ref = m_binPath; + std::string message_bin = "Can't open bin file: \"" + binPath_ref + "\""; + std::string message_xml = "Can't open xml file: \"" + xmlPath_ref + "\""; +#endif + auto xmlDir = ov::util::get_directory(xmlPath_ref); + if (xmlDir != xmlPath_ref) ov::util::create_directory_recursive(xmlDir); - std::ofstream bin_file(m_binPath, std::ios::out | std::ios::binary); - OPENVINO_ASSERT(bin_file, "Can't open bin file: \"" + m_binPath + "\""); + std::ofstream bin_file(binPath_ref, std::ios::out | std::ios::binary); + OPENVINO_ASSERT(bin_file, message_bin); // create xml file - std::ofstream xml_file(m_xmlPath, std::ios::out); - OPENVINO_ASSERT(xml_file, "Can't open xml file: \"" + m_xmlPath + "\""); + std::ofstream xml_file(xmlPath_ref, std::ios::out); + OPENVINO_ASSERT(xml_file, message_xml); try { serializeFunc(xml_file, bin_file, model, m_version); diff --git a/tests/layer_tests/ovc_python_api_tests/test_onnx.py b/tests/layer_tests/ovc_python_api_tests/test_onnx.py index 3ad34b1e591153..1b200594fa6bb7 100644 --- a/tests/layer_tests/ovc_python_api_tests/test_onnx.py +++ b/tests/layer_tests/ovc_python_api_tests/test_onnx.py @@ -115,13 +115,20 @@ def test_unicode_paths(self): assert os.path.exists(model_path), "Could not create a directory with unicode path." - from openvino import convert_model + from openvino import convert_model, save_model, Core res_model = convert_model(model_path) flag, msg = compare_functions(res_model, model_ref, False) assert flag, msg + save_model(res_model, model_path + ".xml") + res_model_after_saving = Core().read_model(model_path + ".xml") + flag, msg = compare_functions(res_model_after_saving, create_ref_model([2, 3, 4]), False) + assert flag, msg + from openvino.frontend import FrontEndManager fm = FrontEndManager() fe = fm.load_by_framework("onnx") assert fe.supported(model_path) + + del res_model_after_saving diff --git a/tests/layer_tests/ovc_python_api_tests/test_paddle.py b/tests/layer_tests/ovc_python_api_tests/test_paddle.py index dd0468990c899c..8e854170af76ff 100644 --- a/tests/layer_tests/ovc_python_api_tests/test_paddle.py +++ b/tests/layer_tests/ovc_python_api_tests/test_paddle.py @@ -165,13 +165,20 @@ def test_unicode_paths(self): assert os.path.exists(model_path), "Could not create a directory with unicode path." - from openvino import convert_model + from openvino import convert_model, save_model, Core res_model = convert_model(model_path) flag, msg = compare_functions(res_model, model_ref, False) assert flag, msg + save_model(res_model, model_path + ".xml") + res_model_after_saving = Core().read_model(model_path + ".xml") + flag, msg = compare_functions(res_model_after_saving, model_ref, False) + assert flag, msg + from openvino.frontend import FrontEndManager fm = FrontEndManager() fe = fm.load_by_framework("paddle") assert fe.supported(model_path) + + del res_model_after_saving diff --git a/tests/layer_tests/ovc_python_api_tests/test_tf.py b/tests/layer_tests/ovc_python_api_tests/test_tf.py index 2968e40062da98..b2d79c538be17e 100644 --- a/tests/layer_tests/ovc_python_api_tests/test_tf.py +++ b/tests/layer_tests/ovc_python_api_tests/test_tf.py @@ -1305,13 +1305,20 @@ def test_unicode_paths(self): assert os.path.exists(model_path), "Could not create a directory with unicode path." - from openvino import convert_model + from openvino import convert_model, save_model, Core res_model = convert_model(model_path) flag, msg = compare_functions(res_model, model_ref, False) assert flag, msg + save_model(res_model, model_path + ".xml") + res_model_after_saving = Core().read_model(model_path + ".xml") + flag, msg = compare_functions(res_model_after_saving, model_ref, False) + assert flag, msg + from openvino.frontend import FrontEndManager fm = FrontEndManager() fe = fm.load_by_framework("tf") assert fe.supported(model_path) + + del res_model_after_saving From 77c6ade6c3df1b5f1c92dc2246079c258d522c91 Mon Sep 17 00:00:00 2001 From: Karol Blaszczak Date: Fri, 5 Jul 2024 14:36:18 +0200 Subject: [PATCH 43/50] [DOCS] adjust legal and footer (#25392) --- .../additional-resources/glossary.rst | 2 +- .../additional-resources/telemetry.rst | 34 +++++------ ...legal-information.rst => terms-of-use.rst} | 60 ++++++++++--------- .../about-openvino/performance-benchmarks.rst | 4 +- .../getting-performance-numbers.rst | 2 +- .../model-accuracy-int8-fp32.rst | 2 +- .../performance-benchmarks-faq.rst | 6 +- docs/sphinx_setup/_static/html/footer.html | 6 +- 8 files changed, 60 insertions(+), 56 deletions(-) rename docs/articles_en/about-openvino/additional-resources/{legal-information.rst => terms-of-use.rst} (67%) diff --git a/docs/articles_en/about-openvino/additional-resources/glossary.rst b/docs/articles_en/about-openvino/additional-resources/glossary.rst index eaaf9965d3b583..9aba2b395525c2 100644 --- a/docs/articles_en/about-openvino/additional-resources/glossary.rst +++ b/docs/articles_en/about-openvino/additional-resources/glossary.rst @@ -1,4 +1,4 @@ -.. {#openvino_docs_OV_Glossary} +:orphan: Glossary ======== diff --git a/docs/articles_en/about-openvino/additional-resources/telemetry.rst b/docs/articles_en/about-openvino/additional-resources/telemetry.rst index 34411abd26b224..1e4b3b34123163 100644 --- a/docs/articles_en/about-openvino/additional-resources/telemetry.rst +++ b/docs/articles_en/about-openvino/additional-resources/telemetry.rst @@ -1,4 +1,4 @@ -.. {#openvino_docs_telemetry_information} +:orphan: OpenVINO™ Telemetry ===================== @@ -10,9 +10,9 @@ OpenVINO™ Telemetry To facilitate debugging and further development, OpenVINO™ collects anonymous telemetry data. Anonymous telemetry data is collected by default, but you can stop data collection anytime by running the command ``opt_in_out --opt_out``. -It does not extend to any other Intel software, hardware, website usage, or other products. +It does not extend to any other Intel software, hardware, website usage, or other products. -Google Analytics is used for telemetry purposes. Refer to +Google Analytics is used for telemetry purposes. Refer to `Google Analytics support `__ to understand how the data is collected and processed. Enable or disable Telemetry reporting @@ -21,7 +21,7 @@ Enable or disable Telemetry reporting Changing consent decision +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ -You can change your data collection decision with the following command lines: +You can change your data collection decision with the following command lines: ``opt_in_out --opt_in`` - enable telemetry @@ -35,26 +35,26 @@ Telemetry Data Collection Details .. tab-item:: Telemetry Data Collected :sync: telemetry-data-collected - - * Failure reports - * Error reports - * Usage data - + + * Failure reports + * Error reports + * Usage data + .. tab-item:: Tools Collecting Data :sync: tools-collecting-data - - * Model conversion API - * Model Downloader - * Accuracy Checker - * Post-Training Optimization Toolkit + + * Model conversion API + * Model Downloader + * Accuracy Checker + * Post-Training Optimization Toolkit * Neural Network Compression Framework * Model Converter * Model Quantizer - + .. tab-item:: Telemetry Data Retention :sync: telemetry-data-retention - + Telemetry data is retained in Google Analytics for a maximum of 14 months. - Any raw data that has reached the 14-month threshold is deleted from Google Analytics on a monthly basis. + Any raw data that has reached the 14-month threshold is deleted from Google Analytics on a monthly basis. diff --git a/docs/articles_en/about-openvino/additional-resources/legal-information.rst b/docs/articles_en/about-openvino/additional-resources/terms-of-use.rst similarity index 67% rename from docs/articles_en/about-openvino/additional-resources/legal-information.rst rename to docs/articles_en/about-openvino/additional-resources/terms-of-use.rst index 128bc8479e52d5..afdf10aef06c5c 100644 --- a/docs/articles_en/about-openvino/additional-resources/legal-information.rst +++ b/docs/articles_en/about-openvino/additional-resources/terms-of-use.rst @@ -1,30 +1,25 @@ -.. {#openvino_docs_Legal_Information} +:orphan: -Legal and Responsible AI Information +Terms of Use ===================================== - .. meta:: - :description: Learn about legal information and policies related to the use - of Intel® Distribution of OpenVINO™ toolkit. + :description: Learn about legal information and policies related to the information + published in OpenVINO™ documentation. -Performance varies by use, configuration and other factors. Learn more at -`www.intel.com/PerformanceIndex `__. -Performance results are based on testing as of dates shown in configurations and may not -reflect all publicly available updates. See backup for configuration details. No product or -component can be absolutely secure. +Intel Global Human Right Principles +########################################################### -Your costs and results may vary. +Intel is committed to respecting human rights and avoiding causing or contributing to adverse +impacts on human rights. See +`Intel's Global Human Rights Principles `__. +Intel's products and software are intended only to be used in applications that do not cause or +contribute to adverse impacts on human rights. -Intel technologies may require enabled hardware, software or service activation. -OpenCL and the OpenCL logo are trademarks of Apple Inc. used by permission by Khronos. -© Intel Corporation. Intel, the Intel logo, and other Intel marks are trademarks of Intel -Corporation or its subsidiaries. Other names and brands may be claimed as the property of -others. OpenVINO™ Logo ########################################################### @@ -33,25 +28,36 @@ To build equity around the project, the OpenVINO logo was created for both Intel usage. The logo may only be used to represent the OpenVINO toolkit and offerings built using the OpenVINO toolkit. -Logo Usage Guidelines -########################################################### - The OpenVINO logo must be used in connection with truthful, non-misleading references to the OpenVINO toolkit, and for no other purpose. Modification of the logo or use of any separate element(s) of the logo alone is not allowed. -Intel Global Human Right Principles -########################################################### -Intel is committed to respecting human rights and avoiding causing or contributing to adverse -impacts on human rights. See `Intel's Global Human Rights Principles `__. -Intel's products and software are intended only to be used in applications that do not cause or -contribute to adverse impacts on human rights. + Model Card Statement ########################################################### -We recommend that users, wherever you are sourcing the model from, should check for a model card, +We recommend that, wherever you are sourcing the model from, you should check for a model card, consult the model card for each model you access and use, and create one if you are developing or updating a model. A model card is a short document that provides key information to assess -performance and validation and ensure appropriate use. \ No newline at end of file +performance and validation and ensure appropriate use. + + +Performance claims +########################################################### + +Performance varies by use, configuration and other factors. Learn more at +`www.intel.com/PerformanceIndex `__. + +Performance results are based on testing as of dates shown in configurations and may not +reflect all publicly available updates. + +Your costs and results may vary. + + +No product or component can be absolutely secure. + +Intel technologies may require enabled hardware, software or service activation. + +OpenCL and the OpenCL logo are trademarks of Apple Inc. used by permission by Khronos. \ No newline at end of file diff --git a/docs/articles_en/about-openvino/performance-benchmarks.rst b/docs/articles_en/about-openvino/performance-benchmarks.rst index cc0377f64857e5..763a05cea3ee49 100644 --- a/docs/articles_en/about-openvino/performance-benchmarks.rst +++ b/docs/articles_en/about-openvino/performance-benchmarks.rst @@ -18,7 +18,7 @@ Performance Benchmarks This page presents benchmark results for `Intel® Distribution of OpenVINO™ toolkit `__ -and :doc:`OpenVINO Model Server <../ovms_what_is_openvino_model_server>`, for a representative +and :doc:`OpenVINO Model Server <../ovms_what_is_openvino_model_server>`, for a representative "./../" selection of public neural networks and Intel® devices. The results may help you decide which hardware to use in your applications or plan AI workload for the hardware you have already implemented in your solutions. Click the buttons below to see the chosen benchmark data. @@ -236,4 +236,4 @@ for non-Intel products. Results may vary. For more information, see :doc:`F.A.Q. <./performance-benchmarks/performance-benchmarks-faq>` - See :doc:`Legal Information <./additional-resources/legal-information>`. \ No newline at end of file + See :doc:`Legal Information <./additional-resources/terms-of-use>`. \ No newline at end of file diff --git a/docs/articles_en/about-openvino/performance-benchmarks/getting-performance-numbers.rst b/docs/articles_en/about-openvino/performance-benchmarks/getting-performance-numbers.rst index a3e414c7e3768d..d93e9b553cc12c 100644 --- a/docs/articles_en/about-openvino/performance-benchmarks/getting-performance-numbers.rst +++ b/docs/articles_en/about-openvino/performance-benchmarks/getting-performance-numbers.rst @@ -184,4 +184,4 @@ insights in the application-level performance on the timeline view. Results may vary. For more information, see :doc:`F.A.Q. <./performance-benchmarks-faq>` and :doc:`Platforms, Configurations, Methodology <../performance-benchmarks>`. - See :doc:`Legal Information <../additional-resources/legal-information>`. \ No newline at end of file + See :doc:`Legal Information <../additional-resources/terms-of-use>`. \ No newline at end of file diff --git a/docs/articles_en/about-openvino/performance-benchmarks/model-accuracy-int8-fp32.rst b/docs/articles_en/about-openvino/performance-benchmarks/model-accuracy-int8-fp32.rst index 4c15d7ddc75499..710dc3b403e63f 100644 --- a/docs/articles_en/about-openvino/performance-benchmarks/model-accuracy-int8-fp32.rst +++ b/docs/articles_en/about-openvino/performance-benchmarks/model-accuracy-int8-fp32.rst @@ -293,4 +293,4 @@ accuracy for the model. Results may vary. For more information, see :doc:`F.A.Q. <./performance-benchmarks-faq>` and :doc:`Platforms, Configurations, Methodology <../performance-benchmarks>`. - See :doc:`Legal Information <../additional-resources/legal-information>`. \ No newline at end of file + See :doc:`Legal Information <../additional-resources/terms-of-use>`. \ No newline at end of file diff --git a/docs/articles_en/about-openvino/performance-benchmarks/performance-benchmarks-faq.rst b/docs/articles_en/about-openvino/performance-benchmarks/performance-benchmarks-faq.rst index 6c731dc374010b..2e29aeea61ebe9 100644 --- a/docs/articles_en/about-openvino/performance-benchmarks/performance-benchmarks-faq.rst +++ b/docs/articles_en/about-openvino/performance-benchmarks/performance-benchmarks-faq.rst @@ -174,6 +174,6 @@ Performance Information F.A.Q. .. container:: benchmark-banner - Results may vary. For more information, see - :doc:`Platforms, Configurations, Methodology <../performance-benchmarks>`. - See :doc:`Legal Information <../additional-resources/legal-information>`. \ No newline at end of file + Results may vary. For more information, see: + :doc:`Platforms, Configurations, Methodology <../performance-benchmarks>`, + :doc:`Legal Information <../additional-resources/terms-of-use>`. \ No newline at end of file diff --git a/docs/sphinx_setup/_static/html/footer.html b/docs/sphinx_setup/_static/html/footer.html index 4fc60089d51d4e..311b98e223d52b 100644 --- a/docs/sphinx_setup/_static/html/footer.html +++ b/docs/sphinx_setup/_static/html/footer.html @@ -92,7 +92,7 @@ /* cookie wap requirement */ li#wap_dns { - display: none; + display: none; } li#wap_nac { @@ -119,9 +119,7 @@
    From df13265167faa80c8d4b2a0993e050c076b6f7a5 Mon Sep 17 00:00:00 2001 From: Andrew Kwangwoong Park Date: Fri, 5 Jul 2024 23:51:21 +0900 Subject: [PATCH 44/50] [GPU] Enable runtime buffer fusing for crop (#25060) ### Details: - Apply in place crop for dynamic shape at runtime ### Tickets: - 143096 --------- Signed-off-by: Andrew Park --- src/plugins/intel_gpu/src/graph/crop.cpp | 7 +- .../mark_runtime_skippable_nodes.cpp | 3 +- .../graph_optimizer/prepare_buffer_fusing.cpp | 376 +++++++++++------- .../graph_optimizer/prepare_buffer_fusing.h | 31 +- .../src/graph/impls/ocl/primitive_base.hpp | 3 +- .../src/graph/include/primitive_inst.h | 1 + .../src/graph/include/reshape_inst.h | 49 ++- .../intel_gpu/src/graph/primitive_inst.cpp | 71 +++- .../passes/prepare_buffer_fusing_test.cpp | 152 +++++++ 9 files changed, 550 insertions(+), 143 deletions(-) diff --git a/src/plugins/intel_gpu/src/graph/crop.cpp b/src/plugins/intel_gpu/src/graph/crop.cpp index 1a95f56b91ded9..146a1fa89b400b 100644 --- a/src/plugins/intel_gpu/src/graph/crop.cpp +++ b/src/plugins/intel_gpu/src/graph/crop.cpp @@ -251,7 +251,6 @@ crop_inst::typed_primitive_inst(network& network, crop_node const& node) : paren } if (node.can_be_optimized()) { - build_deps(); update_output_memory(); } } @@ -264,6 +263,12 @@ void crop_inst::update_output_memory() { if (!can_be_optimized()) return; + if (_node != nullptr) + build_deps(); + + if (node->get_program().is_new_shape_infer() && input_memory_ptr() == nullptr) + return; + if (_outputs[0] && _network.get_engine().is_the_same_buffer(output_memory(), input_memory())) return; diff --git a/src/plugins/intel_gpu/src/graph/graph_optimizer/mark_runtime_skippable_nodes.cpp b/src/plugins/intel_gpu/src/graph/graph_optimizer/mark_runtime_skippable_nodes.cpp index 880069fc6ae98e..e432248ac46669 100644 --- a/src/plugins/intel_gpu/src/graph/graph_optimizer/mark_runtime_skippable_nodes.cpp +++ b/src/plugins/intel_gpu/src/graph/graph_optimizer/mark_runtime_skippable_nodes.cpp @@ -49,7 +49,8 @@ void mark_runtime_skippable_nodes::run(program& p) { auto impl_params = node.get_kernel_impl_params(); if (node.is_output() || node.has_fused_primitives() || - (impl_params->get_input_layout(0).data_type != impl_params->get_output_layout().data_type)) + (impl_params->get_input_layout(0).data_type != impl_params->get_output_layout().data_type) || + impl_params->get_input_layout(0).has_dynamic_pad()) return; // TODO: For now, all permutes with dynamic shape are applied. diff --git a/src/plugins/intel_gpu/src/graph/graph_optimizer/prepare_buffer_fusing.cpp b/src/plugins/intel_gpu/src/graph/graph_optimizer/prepare_buffer_fusing.cpp index 5f60d3739a41f5..3f48ace7ae781f 100644 --- a/src/plugins/intel_gpu/src/graph/graph_optimizer/prepare_buffer_fusing.cpp +++ b/src/plugins/intel_gpu/src/graph/graph_optimizer/prepare_buffer_fusing.cpp @@ -10,6 +10,7 @@ #include "concatenation_inst.h" #include "crop_inst.h" #include "eltwise_inst.h" +#include "gemm_inst.h" #include "read_value_inst.h" #include "reshape_inst.h" #include "depth_to_space_inst.h" @@ -272,6 +273,7 @@ void concat_in_place_optimization::optimize_cascade(concatenation_node& node, st } node.set_output_layout(concat_layout); node.can_be_optimized(true); + GPU_DEBUG_TRACE_DETAIL << "[prepare_buffer_fusing] : " << node.id() << " can be optimized" << std::endl; } void concat_in_place_optimization::update_in_place_concat_paddings( @@ -345,7 +347,7 @@ void concat_in_place_optimization::update_in_place_concat_paddings( static bool can_reshape_be_optimized(const reshape_node& node) { // In case if pad is not propagated, the primitive can't be optimized out - if (node.get_input_layout(0).has_dynamic_pad() && !node.get_output_layout(0).has_dynamic_pad()) { + if (!node.is_runtime_propagatable_padding() && node.get_input_layout(0).has_dynamic_pad() && !node.get_output_layout(0).has_dynamic_pad()) { return false; } @@ -363,18 +365,17 @@ static bool can_reshape_be_optimized(const reshape_node& node) { return false; } -static bool is_optimizable_padding_for_crop(const crop_node& node) { - const auto& crop_layout = node.get_output_layout(); - auto input_layout = node.get_dependency(0).get_output_layout(); - +static bool is_optimizable_padding_for_crop(const crop_node& node, + const layout& crop_layout, + const layout& input_layout, + const tensor offsets) { if (input_layout.data_padding.lower_size().batch[0] != 0 || input_layout.data_padding.upper_size().batch[0] != 0 || input_layout.data_padding.lower_size().spatial[0] != 0 || input_layout.data_padding.upper_size().spatial[0] != 0 || input_layout.data_padding.lower_size().spatial[1] != 0 || input_layout.data_padding.upper_size().spatial[1] != 0) return false; - auto crop_prim = node.get_primitive(); - auto opt_lower_pad = crop_prim->offsets.feature[0]; - auto opt_upper_pad = input_layout.feature() - crop_prim->offsets.feature[0] - crop_layout.get_tensor().feature[0]; + auto opt_lower_pad = offsets.feature[0]; + auto opt_upper_pad = input_layout.feature() - offsets.feature[0] - crop_layout.get_tensor().feature[0]; // do not optimize crop if paddings are not properly aligned for (auto& usr : node.get_users()) { @@ -391,10 +392,9 @@ static bool is_optimizable_padding_for_crop(const crop_node& node) { return true; } -static bool can_crop_be_optimized_along_feature(const crop_node& node) { - const auto& crop_layout = node.get_output_layout(); +bool crop_in_place_optimization::can_crop_be_optimized_along_feature(const layout& crop_layout, + const layout& input_layout) { auto format = crop_layout.format; - auto input_layout = node.get_dependency(0).get_output_layout(); const auto& crop_size = crop_layout.get_tensor(); const auto& out_pad = crop_layout.data_padding; @@ -411,10 +411,9 @@ static bool can_crop_be_optimized_along_feature(const crop_node& node) { return false; } -static bool can_crop_be_optimized_simple_data_format(const crop_node& node) { - const auto& crop_layout = node.get_output_layout(); +bool crop_in_place_optimization::can_crop_be_optimized_simple_data_format(const layout& crop_layout, + const layout& input_layout) { auto format = crop_layout.format; - auto input_layout = node.get_dependency(0).get_output_layout(); const auto& in_padding = input_layout.data_padding; const auto& out_padding = crop_layout.data_padding; @@ -450,6 +449,230 @@ static void propagate_padding_to_opt_out_users(program_node& node, cldnn::paddin } } +bool crop_in_place_optimization::match(crop_node& node) { + auto pred_param = node.get_dependency(0).get_kernel_impl_params(); + auto pred_layout = pred_param->get_output_layout(); + return (match(node, *node.get_kernel_impl_params(), pred_layout)); +} + +bool crop_in_place_optimization::match(const program_node& node, + kernel_impl_params& crop_params, + layout& input_layout, + bool is_runtime) { + if (!node.is_valid_output_layout()) + return false; + // if the node is marked as network output, prevent optimizations which would affect a form of its output, + // unless debug flag is set + if (node.is_output() || crop_params.fused_desc.size() > 0 || node.is_in_shape_of_subgraph()) + return false; + + const auto& crop_layout = crop_params.get_output_layout(); + for (auto user : node.get_users()) { + // do not optimize when next node is concatenation which is not output + if (user->is_type() && !user->is_output()) + return false; + if (user->is_type() || user->is_type()) + return false; + // If the input tensor of convolution includes dynamic padding, there is an issue + // where the total size of tensor is not properly calculated and becomes 0 + // It causes issue for internal buffer allocation during runtime + // TODO: Need to allow optimization for gemm user + if (node.is_dynamic() && (user->is_type() || user->is_type())) + return false; + if (user->is_type()) { + // runtime buffer fusing is only handled when there is only one reshape user + if (node.is_dynamic() && node.get_users().size() != 1) + return false; + auto& reshape_node = user->as(); + if (can_reshape_be_optimized(reshape_node) && + (!node.is_dynamic() || !reshape_node.is_runtime_propagatable_padding())) + return false; + } + if (user->is_type() && user->get_dependency_index(node) == 0) + return false; + } + + // do not optimize crop, that must be calculated in propagate_constants + if (node.is_constant()) + return false; + + if (node.get_users().size() > 0) { + if (node.get_program().is_body_program() && node.get_dependency(0).is_type()) { + return false; + } + // optimization is available for cropping across depth(features) or batch + // if output padding has defined padding across features already it wouldn't + // work because it expect to have zeros in the padded area. + if ((!node.is_dynamic() || is_runtime) && + !is_optimizable_padding_for_crop(node, crop_layout, input_layout, crop_params.input_offsets[0])) + return false; + if (!(((!node.is_dynamic() || is_runtime) && can_crop_be_optimized_along_feature(crop_layout, input_layout)) + || can_crop_be_optimized_simple_data_format(crop_layout, input_layout))) + return false; + } else { + return false; + } + return true; +} + +bool crop_in_place_optimization::optimize(crop_node& node) { + auto crop_layout = node.get_output_layout(); + auto input_layout = node.get_input_layout(0); + auto crop_params = node.get_kernel_impl_params(); + + // Regular crop + // crop input buffer + // |___________data____________| + // + // crop output buffer + // |-------->| offsets[f] |<--| + // |_____data____| + // <------------> + // reference size + // + // In-place crop + // crop output buffer + // |_low_pad_|__data_size__|___|<-upper pad + if (!node.is_dynamic() && can_crop_be_optimized_along_feature(crop_layout, input_layout)) { + update_in_place_crop_padding_along_feature(node, + crop_layout, + input_layout, + crop_params->input_offsets[0], + node.get_primitive()->axis, + false); + } else if (can_crop_be_optimized_simple_data_format(crop_layout, input_layout)) { + std::vector reshape_layouts; + if (node.get_users().front()->is_type() && node.get_users().front()->as().is_runtime_propagatable_padding()) { + reshape_layouts.push_back(node.get_users().front()->get_output_layout()); + } + update_in_place_crop_padding_simple_data_format(crop_layout, + input_layout, + reshape_layouts, + crop_params->input_offsets[0], + node.get_primitive()->axis, + false); + if (reshape_layouts.size() > 0) { + node.get_users().front()->set_output_layout(reshape_layouts[0]); + } + } + node.set_output_layout(crop_layout); + node.can_be_optimized(true); + propagate_padding_to_opt_out_users(node, node.get_output_layout().data_padding); + GPU_DEBUG_TRACE_DETAIL << "[prepare_buffer_fusing] : " << node.id() << " can be optimized" << std::endl; + return false; +} + +void crop_in_place_optimization::update_in_place_crop_padding_along_feature(const program_node& node, + layout& crop_layout, + layout& input_layout, + const tensor offsets, + size_t crop_axis, + bool is_runtime) { + auto crop_axis_legacy = crop_axis; + if (crop_axis_legacy >= 2) { + auto spatial_axis = crop_axis_legacy - 2; + // Default and minimum number of dimensions is 4 + auto spatial_size = std::max(crop_layout.get_partial_shape().size(), 4) - 2; + crop_axis_legacy = spatial_size - spatial_axis - 1 + 2; + } + if (crop_layout.is_dynamic() && !is_runtime) { + auto info_dynamic_pad = tensor(0).sizes(); + info_dynamic_pad[crop_axis_legacy] = 1; + auto dynamic_pad_mask = tensor(info_dynamic_pad); + crop_layout.data_padding.set_dynamic_pad(dynamic_pad_mask); + return; + } + + const auto& crop_size = crop_layout.get_tensor(); + const auto& out_pad = crop_layout.data_padding; + + auto opt_lower_pad = offsets.feature[0]; + auto opt_upper_pad = input_layout.feature() - offsets.feature[0] - crop_size.feature[0]; + + auto& dep = node.get_dependency(0); + // feature num of pad should be accumulated if dep has been optimized out. + if (dep.is_type() && dep.can_be_optimized()) { + auto dep_pad = dep.get_output_layout().data_padding; + opt_lower_pad += dep_pad.lower_size().feature[0]; + opt_upper_pad += dep_pad.upper_size().feature[0]; + } + std::vector lower_sizes; + lower_sizes.push_back(out_pad.lower_size().batch[0]); + lower_sizes.push_back(opt_lower_pad); + lower_sizes.push_back(out_pad.lower_size().spatial[0]); + lower_sizes.push_back(out_pad.lower_size().spatial[1]); + std::vector upper_sizes; + upper_sizes.push_back(out_pad.upper_size().batch[0]); + upper_sizes.push_back(opt_upper_pad); + upper_sizes.push_back(out_pad.upper_size().spatial[0]); + upper_sizes.push_back(out_pad.upper_size().spatial[1]); + + // set padding + if (is_runtime) { + auto dyn_pad_sizes = lower_sizes; + dyn_pad_sizes[crop_axis_legacy] = 1; + crop_layout.data_padding = padding(lower_sizes, upper_sizes, 0.f, tensor(dyn_pad_sizes)); + } else { + crop_layout.data_padding = padding(lower_sizes, upper_sizes); + } +} + +void crop_in_place_optimization::update_in_place_crop_padding_simple_data_format(layout& crop_layout, + layout& input_layout, + std::vector& user_layouts, + const tensor offsets, + size_t crop_axis, + bool is_runtime) { + auto crop_axis_legacy = crop_axis; + if (crop_axis_legacy >= 2) { + auto spatial_axis = crop_axis_legacy - 2; + // Default and minimum number of dimensions is 4 + auto spatial_size = std::max(crop_layout.get_partial_shape().size(), 4) - 2; + crop_axis_legacy = spatial_size - spatial_axis - 1 + 2; + } + if (crop_layout.is_dynamic() && !is_runtime) { + auto dyn_pad_sizes = tensor(0).sizes(); + dyn_pad_sizes[crop_axis_legacy] = 1; + crop_layout.data_padding.set_dynamic_pad(tensor(dyn_pad_sizes)); + for (auto& user_layout : user_layouts) { + user_layout.data_padding.set_dynamic_pad(tensor(dyn_pad_sizes)); + } + return; + } + + const auto& crop_size = crop_layout.get_tensor(); + + std::vector lower_sizes; + lower_sizes.push_back(offsets.batch[0]); + lower_sizes.push_back(offsets.feature[0]); + for (size_t i = 0; i < input_layout.get_spatial_rank(); i++) { + lower_sizes.push_back(offsets.spatial[i]); + } + std::vector upper_sizes; + upper_sizes.push_back(input_layout.batch() - offsets.batch[0] - crop_size.batch[0]); + upper_sizes.push_back(input_layout.feature() - offsets.feature[0] - crop_size.feature[0]); + for (size_t i = 0; i < input_layout.get_spatial_rank(); i++) { + upper_sizes.push_back(input_layout.spatial(i) - offsets.spatial[i] - crop_size.spatial[i]); + } + + if (is_runtime) { + auto dyn_pad_sizes = lower_sizes; + dyn_pad_sizes[crop_axis_legacy] = 1; + crop_layout.data_padding = padding(lower_sizes, upper_sizes, 0.f, tensor(dyn_pad_sizes)); + for (auto& user_layout : user_layouts) { + auto reshape_rank = user_layout.get_partial_shape().size(); + auto reshape_last_dim = user_layout.get_partial_shape().to_shape()[reshape_rank - 1]; + if (lower_sizes[crop_axis_legacy]) + lower_sizes[crop_axis_legacy] /= reshape_last_dim; + if (upper_sizes[crop_axis_legacy]) + upper_sizes[crop_axis_legacy] /= reshape_last_dim; + user_layout.data_padding = padding(lower_sizes, upper_sizes, 0.f, tensor(dyn_pad_sizes)); + } + } else { + crop_layout.data_padding = padding(lower_sizes, upper_sizes); + } +} + // ToDo remove friendship relation from program_node void prepare_buffer_fusing::run(program& p) { /* @@ -482,129 +705,10 @@ void prepare_buffer_fusing::run(program& p) { concat_in_place_optimization>(p); // [2] Then try to optimize all crops - auto node_itr = p.get_processing_order().begin(); - while (node_itr != p.get_processing_order().end()) { - auto& node = (*node_itr++); - if (!node->is_valid_output_layout()) - continue; - if (!can_optimize(node)) - continue; - - // zero copy - program_helpers::do_for_types(*node, [&p](crop_node& node) { - // if the node is marked as network output, prevent optimizations which would affect a form of its output, - // unless debug flag is set - if (node.is_output()) - return; - - // do not optimize when next node is concatenation which is not output - for (auto user : node.get_users()) { - if (user->is_type() && !user->is_output()) - return; - if (user->is_type() || user->is_type()) - return; - } - for (auto user : node.get_users()) { - if (user->is_type()) { - auto& reshape_node = user->as(); - if (can_reshape_be_optimized(reshape_node)) - return; - } - if (user->is_type() && user->get_dependency_index(node) == 0) - return; - } - - // do not optimize crop, that must be calculated in propagate_constants - if (node.is_constant()) - return; - - if (node.get_dependencies().size() == 1 && node.get_users().size() > 0) { - if (p.is_body_program() && node.get_dependency(0).is_type()) { - return; - } - - // optimization is available for cropping across depth(features) or batch - // if output padding has defined padding across features already it wouldn't - // work because it expect to have zeros in the padded area. - if (!is_optimizable_padding_for_crop(node)) - return; - - const auto& crop_layout = node.get_output_layout(); - const auto& crop_size = crop_layout.get_tensor(); - const auto& out_pad = crop_layout.data_padding; - auto input_layout = node.get_input_layout(0); - auto crop_prim = node.get_primitive(); - - // Regular crop - // crop input buffer - // |___________data____________| - // - // crop output buffer - // |-------->| offsets[f] |<--| - // |_____data____| - // <------------> - // reference size - // - // In-place crop - // crop output buffer - // |_low_pad_|__data_size__|___|<-upper pad - if (can_crop_be_optimized_along_feature(node)) { - auto crop_prim = node.get_primitive(); - auto opt_lower_pad = crop_prim->offsets.feature[0]; - auto opt_upper_pad = input_layout.feature() - crop_prim->offsets.feature[0] - crop_size.feature[0]; - auto& dep = node.get_dependency(0); - // feature num of pad should be accumulated if dep has been optimized out. - if (dep.is_type() && dep.can_be_optimized()) { - auto dep_pad = dep.get_output_layout().data_padding; - OPENVINO_ASSERT( - dep_pad.lower_size().batch[0] == 0 && dep_pad.upper_size().batch[0] == 0 && - dep_pad.lower_size().spatial[0] == 0 && dep_pad.upper_size().spatial[0] == 0 && - dep_pad.lower_size().spatial[1] == 0 && dep_pad.upper_size().spatial[1] == 0, - "batch, y, x of pad should be aligned to 0."); - - opt_lower_pad += dep_pad.lower_size().feature[0]; - opt_upper_pad += dep_pad.upper_size().feature[0]; - } - - // set padding - node.set_output_padding( - padding({out_pad.lower_size().batch[0], - opt_lower_pad, - out_pad.lower_size().spatial[0], - out_pad.lower_size().spatial[1]}, - {out_pad.upper_size().batch[0], - opt_upper_pad, - out_pad.upper_size().spatial[0], - out_pad.upper_size().spatial[1]})); - } else if (can_crop_be_optimized_simple_data_format(node)) { - auto crop_prim = node.get_primitive(); - - std::vector lower_sizes; - lower_sizes.push_back(crop_prim->offsets.batch[0]); - lower_sizes.push_back(crop_prim->offsets.feature[0]); - for (size_t i = 0; i < input_layout.get_spatial_rank(); i++) { - lower_sizes.push_back(crop_prim->offsets.spatial[i]); - } - std::vector upper_sizes; - upper_sizes.push_back(input_layout.batch() - crop_prim->offsets.batch[0] - crop_size.batch[0]); - upper_sizes.push_back(input_layout.feature() - crop_prim->offsets.feature[0] - crop_size.feature[0]); - for (size_t i = 0; i < input_layout.get_spatial_rank(); i++) { - upper_sizes.push_back(input_layout.spatial(i) - crop_prim->offsets.spatial[i] - crop_size.spatial[i]); - } - - node.set_output_padding(padding(lower_sizes, upper_sizes)); - } else { - return; - } - - node.can_be_optimized(true); - propagate_padding_to_opt_out_users(node, node.get_output_layout().data_padding); - } - }); - } + run_node_optimizations(p); // [3] Optimize all other primitives - node_itr = p.get_processing_order().begin(); + auto node_itr = p.get_processing_order().begin(); while (node_itr != p.get_processing_order().end()) { auto& node = (*node_itr++); if (!node->is_valid_output_layout()) @@ -623,6 +727,7 @@ void prepare_buffer_fusing::run(program& p) { node.adjust_output_padding(); node.can_be_optimized(can_reshape_be_optimized(node)); + GPU_DEBUG_TRACE_DETAIL << "[prepare_buffer_fusing] : " << node.id() << " can be optimized" << std::endl; }); program_helpers::do_for_types(*node, [](kv_cache_node& node) { auto kv_out_layout = node.get_output_layout(); @@ -713,6 +818,7 @@ void prepare_buffer_fusing::run(program& p) { // TODO: Allow optimizations for the case above too. Looks like it can be achieved by more careful // topological sort (i.e. if we ensure that all read_value users are completed before assign is run) node.can_be_optimized(can_read_value_be_optimize(node)); + GPU_DEBUG_TRACE_DETAIL << "[prepare_buffer_fusing] : " << node.id() << " can be optimized" << std::endl; }); } } diff --git a/src/plugins/intel_gpu/src/graph/graph_optimizer/prepare_buffer_fusing.h b/src/plugins/intel_gpu/src/graph/graph_optimizer/prepare_buffer_fusing.h index 78ac1ff64f99b5..47bc4fc49bd490 100644 --- a/src/plugins/intel_gpu/src/graph/graph_optimizer/prepare_buffer_fusing.h +++ b/src/plugins/intel_gpu/src/graph/graph_optimizer/prepare_buffer_fusing.h @@ -6,6 +6,7 @@ #include "program_helpers.h" #include "concatenation_inst.h" +#include "crop_inst.h" #include #include @@ -21,7 +22,6 @@ struct concat_noop_optimization : pattern_match_optimization_typed { // Performs in-place concat optimization. // Padding of predecessors is updated to use single buffer by all, which is output from concatenation. @@ -59,4 +59,33 @@ struct concat_in_place_optimization : pattern_match_optimization_typed { + // Performs in-place crop optimization. + using base = pattern_match_optimization_typed; + using base::base; + + static bool can_crop_be_optimized_along_feature(const layout& crop_layout, + const layout& input_layout); + static bool can_crop_be_optimized_simple_data_format(const layout& crop_layout, + const layout& input_layout); + bool match(crop_node& node); + static bool match(const program_node& node, + kernel_impl_params& crop_params, + layout& input_layout, + bool is_runtime = false); + bool optimize(crop_node& node); + static void update_in_place_crop_padding_along_feature(const program_node& node, + layout& crop_layout, + layout& pred_layout, + const tensor offsets, + size_t crop_axis, + bool is_runtime); + static void update_in_place_crop_padding_simple_data_format(layout& crop_layout, + layout& pred_layout, + std::vector& user_layouts, + const tensor offsets, + size_t crop_axis, + bool is_runtime); +}; + } // namespace cldnn \ No newline at end of file diff --git a/src/plugins/intel_gpu/src/graph/impls/ocl/primitive_base.hpp b/src/plugins/intel_gpu/src/graph/impls/ocl/primitive_base.hpp index 9cc3b8cb3cf3d7..2f65bbc7b3ea43 100644 --- a/src/plugins/intel_gpu/src/graph/impls/ocl/primitive_base.hpp +++ b/src/plugins/intel_gpu/src/graph/impls/ocl/primitive_base.hpp @@ -90,7 +90,8 @@ struct typed_primitive_impl_ocl : public typed_primitive_impl { impl_param.is_type() || impl_param.is_type() || impl_param.is_type() || - impl_param.is_type()) && impl_param.is_dynamic())) { + impl_param.is_type() || + impl_param.is_type()) && impl_param.is_dynamic())) { return make_unique(kernel_selector::kernel_data{}); } auto kernel_params = ImplType::get_kernel_params(ImplType::static_canonicalize_shapes(impl_param)); diff --git a/src/plugins/intel_gpu/src/graph/include/primitive_inst.h b/src/plugins/intel_gpu/src/graph/include/primitive_inst.h index 22941d23b45b56..124cdf62bc7811 100644 --- a/src/plugins/intel_gpu/src/graph/include/primitive_inst.h +++ b/src/plugins/intel_gpu/src/graph/include/primitive_inst.h @@ -240,6 +240,7 @@ class primitive_inst { void do_runtime_skip_broadcast(); void do_runtime_in_place_concat(); void do_runtime_in_place_kv_cache(); + void do_runtime_in_place_crop(); void configure_shape_of_dependencies(); memory::ptr fused_memory(size_t dep_id) const { diff --git a/src/plugins/intel_gpu/src/graph/include/reshape_inst.h b/src/plugins/intel_gpu/src/graph/include/reshape_inst.h index 166a1cb5d3b734..78cbba8f3eeba4 100644 --- a/src/plugins/intel_gpu/src/graph/include/reshape_inst.h +++ b/src/plugins/intel_gpu/src/graph/include/reshape_inst.h @@ -6,6 +6,8 @@ #include "intel_gpu/primitives/reshape.hpp" #include "intel_gpu/runtime/tensor_accessor.hpp" #include "openvino/core/partial_shape.hpp" +#include "crop_inst.h" +#include "rope_inst.h" #include "primitive_inst.h" #include @@ -28,6 +30,45 @@ struct typed_program_node : public typed_program_node_base { program_node& input() const { return get_dependency(0); } + bool is_runtime_propagatable_padding() const { + auto prim = typed_desc(); + if (prim->mode == reshape::reshape_mode::squeeze || prim->mode == reshape::reshape_mode::unsqueeze) + return true; + + // TODO: This function is to limit condition to a specific case (crop + reshape) among cases for the base mode + if (!input().is_type()) + return false; + + // TODO: If user is RoPE and dynamic padding exists, ouput padding propagation is not supported in the base mode + if (get_users().size() == 1 && get_users().front()->is_type()) + return false; + + auto axis = input().as().get_primitive()->axis; + const auto& input_pshape = input().get_output_layout(false).get_partial_shape(); + auto input_rank = input_pshape.size(); + auto input_last_dim = static_cast(input_rank - 1); + if (axis != input_last_dim || input_pshape[input_last_dim].is_dynamic()) + return false; + + auto input_last_dim_val = input_pshape[input_last_dim].get_length(); + const auto& output_pshape = prim->output_partial_shape; + // TODO: If the reshape's output shape is non constant, issue occurs + // during shape inference due to execution order at runtime + if ((output_pshape.size() != input_rank + 1) || prim->output_pattern.empty()) + return false; + + int64_t mul = 1; + for (size_t i = input_rank - 1; i < output_pshape.size() ; i++) { + if (output_pshape[i].is_dynamic()) + return false; + mul *= output_pshape[i].get_length(); + } + if (input_last_dim_val != mul) + return false; + + return true; + } + bool has_padding() const { return (this->get_output_layout().data_padding || input().get_output_layout(false).data_padding || input().get_output_layout(false).has_dynamic_pad()); } @@ -61,9 +102,8 @@ struct typed_program_node : public typed_program_node_base { if (this->is_output() || this->has_fused_primitives()) return false; - if (input().get_output_layout(false).has_dynamic_pad()) { - return typed_desc()->mode != reshape::reshape_mode::base; - } + if (input().get_output_layout(false).has_dynamic_pad() && is_runtime_propagatable_padding()) + return true; if (has_padding()) return false; @@ -79,6 +119,9 @@ struct typed_program_node : public typed_program_node_base { auto output_layout = this->get_output_layout(); if (input_layout.has_dynamic_pad()) { auto prim = typed_desc(); + // TODO: If outer padding exists, ouput padding propagation is not supported in the base mode + if (prim->mode == reshape::reshape_mode::base) + return; ov::PartialShape pattern_shape = { static_cast(prim->output_pattern.size()) }; if (pattern_shape.size() == 0) diff --git a/src/plugins/intel_gpu/src/graph/primitive_inst.cpp b/src/plugins/intel_gpu/src/graph/primitive_inst.cpp index bbeecf970b56fb..d63efb0fa77688 100644 --- a/src/plugins/intel_gpu/src/graph/primitive_inst.cpp +++ b/src/plugins/intel_gpu/src/graph/primitive_inst.cpp @@ -604,7 +604,7 @@ event::ptr primitive_inst::realloc_if_needed() { } // Clear out memory if if was previously reused, but now primitive can't be optimized - if (_node->is_runtime_skippable()) { + if (_node->is_runtime_skippable() || _node->is_type()) { if (can_be_optimized()) { _max_output_layout_count = _deps[0].first->_max_output_layout_count; GPU_DEBUG_PROFILED_STAGE_MEMALLOC_INFO("can_be_optimized"); @@ -1007,6 +1007,14 @@ void primitive_inst::update_paddings() { reset_pad(*_impl_params, _node); return; } + // Reset paddings used in the previous iteration for crop before executing do_runtime_in_place_crop + for (auto u : get_user_insts()) { + if (u->get_node().is_type() && u->_impl_params->output_layouts[0].data_padding.get_dynamic_pad_dims() != tensor(0)) { + if (u->get_node().can_be_optimized()) { + reset_pad(*u->_impl_params, u->_node); + } + } + } } void primitive_inst::do_runtime_skip_reorder() { @@ -1348,6 +1356,66 @@ void primitive_inst::do_runtime_in_place_concat() { GPU_DEBUG_TRACE_DETAIL << "[In place concat] " << concat_inst->id() << ": can_be_optimized " << std::endl; } +void primitive_inst::do_runtime_in_place_crop() { + OV_ITT_SCOPED_TASK(ov::intel_gpu::itt::domains::intel_gpu_plugin, openvino::itt::handle("do_runtime_in_place_crop: " + id())); + GPU_DEBUG_GET_INSTANCE(debug_config); + GPU_DEBUG_IF(debug_config->disable_runtime_buffer_fusing) { + return; + } + + for (auto u : get_user_insts()) { + if (u->get_node().is_type()) { + if (u->get_node().can_be_optimized()) { + GPU_DEBUG_TRACE_DETAIL << "[In place crop] update shape for " << u->id() << std::endl; + u->update_shape(); + u->update_shape_done_by_other = true; + + const auto& crop_users = u->get_user_insts(); + std::vector reshape_layouts; + if (crop_users.front()->get_node().is_type()) { + OPENVINO_ASSERT(crop_users.size() == 1, "[GPU] Expected number of reshape users is 1, but it is ", crop_users.size()); + auto reshape_inst = crop_users.front(); + if (!reshape_inst->update_shape_done_by_other) { + GPU_DEBUG_TRACE_DETAIL << "[In place crop] update shape for " << reshape_inst->id() << std::endl; + reshape_inst->update_shape(); + reshape_inst->update_shape_done_by_other = true; + reshape_layouts.push_back(reshape_inst->_impl_params->get_output_layout()); + } + } + + layout crop_layout = u->_impl_params->get_output_layout(); + auto pred_layout = _impl_params->get_output_layout(); + if (!crop_in_place_optimization::match(u->get_node(), *u->_impl_params, pred_layout, true)) { + u->set_can_be_optimized(false); + GPU_DEBUG_TRACE_DETAIL << "[In place crop] " << u->id() << " cannot be optimized " << std::endl; + return; + } + + auto crop_axis = u->_impl_params->typed_desc()->axis; + auto offsets = u->_impl_params->input_offsets[0]; + if (crop_in_place_optimization::can_crop_be_optimized_along_feature(crop_layout, pred_layout)) { + crop_in_place_optimization::update_in_place_crop_padding_along_feature(u->get_node(), crop_layout, pred_layout, offsets, crop_axis, true); + } else if (crop_in_place_optimization::can_crop_be_optimized_simple_data_format(crop_layout, pred_layout)) { + crop_in_place_optimization::update_in_place_crop_padding_simple_data_format(crop_layout, pred_layout, reshape_layouts, + offsets, crop_axis, true); + if (crop_users.front()->get_node().is_type() && reshape_layouts.size() > 0) { + auto reshape_inst = crop_users.front(); + reshape_inst->_impl_params->output_layouts[0] = reshape_layouts[0]; + reshape_inst->set_shape_change(); + } + } else { + u->set_can_be_optimized(false); + GPU_DEBUG_TRACE_DETAIL << "[In place crop] " << u->id() << " cannot be optimized " << std::endl; + return; + } + u->_impl_params->output_layouts[0] = crop_layout; + u->set_can_be_optimized(true); + GPU_DEBUG_TRACE_DETAIL << "[In place crop] " << u->id() << ": can_be_optimized " << std::endl; + } + } + } +} + bool primitive_inst::has_inner_networks() const { return (_impl_params->inner_nets.size() > 0); } @@ -1410,6 +1478,7 @@ event::ptr primitive_inst::execute(const std::vector& events) { do_runtime_skip_permute(); do_runtime_skip_strided_slice(); do_runtime_skip_broadcast(); + do_runtime_in_place_crop(); if (!is_valid_fusion()) { OV_ITT_SCOPED_TASK(ov::intel_gpu::itt::domains::intel_gpu_plugin, openvino::itt::handle("unfused_subgraph_exec: " + id())); diff --git a/src/plugins/intel_gpu/tests/unit/passes/prepare_buffer_fusing_test.cpp b/src/plugins/intel_gpu/tests/unit/passes/prepare_buffer_fusing_test.cpp index ac3bf0d553e0e3..eba310a7ff82f0 100644 --- a/src/plugins/intel_gpu/tests/unit/passes/prepare_buffer_fusing_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/passes/prepare_buffer_fusing_test.cpp @@ -625,6 +625,158 @@ TEST(prepare_buffer_fusing, skip_in_place_concat_inside_shape_of_subgraph) { ASSERT_FALSE(in_place); } +TEST(prepare_buffer_fusing, in_place_crop_static) { + auto& engine = get_test_engine(); + + auto input_mem = engine.allocate_memory({ {1, 2, 4}, data_types::f32, format::bfyx }); + auto weights_mem = engine.allocate_memory({ {8, 4}, data_types::u8, format::bfyx }); + auto bias_mem = engine.allocate_memory({ {1, 1, 8}, data_types::f32, format::bfyx }); + auto scale_mem = engine.allocate_memory({ {8, 1}, data_types::f32, format::bfyx }); + auto zp_mem = engine.allocate_memory({ {8, 1}, data_types::f32, format::bfyx }); + + set_values(input_mem, { -0.5f, 2.0f, 0.5f, 1.0f, + 0.5f, -2.0f, -0.5f, -1.0f }); + set_values(weights_mem, { 1, 2, 3, 4, + 5, 6, 7, 8, + 9, 10, 11, 12, + 13, 14, 15, 0, + 15, 14, 13, 12, + 11, 10, 9, 8, + 7, 6, 5, 4, + 3, 2, 1, 0}); + set_values(bias_mem, { 1.0f, -2.0f, 3.0f, -4.0f, 5.0f, -6.0f, 7.0f, 2.0f }); + set_values(scale_mem, { 2.0f, 4.0f, -2.0f, -4.0f, 0.5f, -0.5f, 2.0f, 2.0f }); + set_values(zp_mem, { 1.0f, 2.0f, 2.0f, 1.0f, 4.0f, 1.0f, 6.0f, 2.0f }); + + std::vector out1 = { 13.f, 58.f, -51.f, -108.f, -11.f, -62.f, 57.f, 100.f }; + std::vector out2 = { 18.5f, -18.f, 1.f, -4.f, -8.5f, 6.f, 13.f, 8.f }; + std::vector out3 = { 13.f, 58.f, -51.f, -108.f, 18.5f, -18.f, 1.f, -4.f, -11.f, -62.f, 57.f, 100.f, -8.5f, 6.f, 13.f, 8.f }; + + topology topology( + input_layout("input", input_mem->get_layout()), + data("weights", weights_mem), + data("bias", bias_mem), + data("scale", scale_mem), + data("zp", zp_mem), + fully_connected("fc", input_info("input"), "weights", "bias", "scale", "zp", data_types::f32, padding(), 3, 2), + crop("crop1", input_info("fc"), tensor(1, 2, 1, 4), tensor(0, 0, 0, 0)), + reorder("output1", input_info("crop1"), format::bfyx, data_types::f32), + crop("crop2", input_info("fc"), tensor(1, 2, 1, 4), tensor(0, 0, 0, 4)), + reorder("output2", input_info("crop2"), format::bfyx, data_types::f32), + reorder("output3", input_info("fc"), format::bfyx, data_types::f32) + ); + + auto config = get_test_default_config(engine); + config.set_property(ov::intel_gpu::optimize_data(true)); + network network(engine, topology, config); + + network.set_input_data("input", input_mem); + + auto outputs = network.execute(); + + auto crop_prim = network.get_primitive("crop1"); + ASSERT_EQ(crop_prim->can_be_optimized(), true); + crop_prim = network.get_primitive("crop2"); + ASSERT_EQ(crop_prim->can_be_optimized(), true); + + auto output = outputs.at("output1").get_memory(); + cldnn::mem_lock output_ptr(output, get_test_stream()); + + for (size_t i = 0; i < out1.size(); i++) + ASSERT_EQ(output_ptr[i], out1[i]); + + auto output_2 = outputs.at("output2").get_memory(); + cldnn::mem_lock output_ptr_2(output_2, get_test_stream()); + + for (size_t i = 0; i < out2.size(); i++) + ASSERT_EQ(output_ptr_2[i], out2[i]); + + auto output_3 = outputs.at("output3").get_memory(); + cldnn::mem_lock output_ptr_3(output_3, get_test_stream()); + + for (size_t i = 0; i < out3.size(); i++) + ASSERT_EQ(output_ptr_3[i], out3[i]); +} + +TEST(prepare_buffer_fusing, in_place_crop_dynamic) { + auto& engine = get_test_engine(); + + auto in_layout = layout{ ov::PartialShape{-1, -1, 4}, data_types::f32, format::bfyx}; + auto input_mem = engine.allocate_memory({ {1, 2, 4}, data_types::f32, format::bfyx }); + auto weights_mem = engine.allocate_memory({ {8, 4}, data_types::u8, format::bfyx }); + auto bias_mem = engine.allocate_memory({ {1, 1, 8}, data_types::f32, format::bfyx }); + auto scale_mem = engine.allocate_memory({ {8, 1}, data_types::f32, format::bfyx }); + auto zp_mem = engine.allocate_memory({ {8, 1}, data_types::f32, format::bfyx }); + auto axis_mem = engine.allocate_memory({ {}, data_types::i64, format::bfyx }); + auto splits_length_mem = engine.allocate_memory({ {2}, data_types::i64, format::bfyx }); + + int64_t axis = 2; + set_values(input_mem, { -0.5f, 2.0f, 0.5f, 1.0f, + 0.5f, -2.0f, -0.5f, -1.0f }); + set_values(axis_mem, {axis}); + set_values(splits_length_mem, { 2, 6 }); + set_values(weights_mem, { 1, 2, 3, 4, + 5, 6, 7, 8, + 9, 10, 11, 12, + 13, 14, 15, 0, + 15, 14, 13, 12, + 11, 10, 9, 8, + 7, 6, 5, 4, + 3, 2, 1, 0}); + set_values(bias_mem, { 1.0f, -2.0f, 3.0f, -4.0f, 5.0f, -6.0f, 7.0f, 2.0f }); + set_values(scale_mem, { 2.0f, 4.0f, -2.0f, -4.0f, 0.5f, -0.5f, 2.0f, 2.0f }); + set_values(zp_mem, { 1.0f, 2.0f, 2.0f, 1.0f, 4.0f, 1.0f, 6.0f, 2.0f }); + + std::vector out1 = { 13.f, 58.f, -11.f, -62.f }; + std::vector out2 = { -51.f, -108.f, 18.5f, -18.f, 1.f, -4.f, 57.f, 100.f, -8.5f, 6.f, 13.f, 8.f }; + std::vector out3 = { 13.f, 58.f, -51.f, -108.f, 18.5f, -18.f, 1.f, -4.f, -11.f, -62.f, 57.f, 100.f, -8.5f, 6.f, 13.f, 8.f }; + + cldnn::crop_ngraph_op_mode op_mode = cldnn::crop_ngraph_op_mode::variadic_split; + topology topology( + input_layout("input", in_layout), + data("axis", axis_mem), + data("splits_length", splits_length_mem), + data("weights", weights_mem), + data("bias", bias_mem), + data("scale", scale_mem), + data("zp", zp_mem), + fully_connected("fc", input_info("input"), "weights", "bias", "scale", "zp", data_types::f32, padding(), 3, 2), + crop("crop1", { input_info("fc"), input_info("axis"), input_info("splits_length") }, cldnn::tensor(1), cldnn::tensor(0), op_mode, 0, axis), + reorder("output1", input_info("crop1"), format::bfyx, data_types::f32), + crop("crop2", { input_info("fc"), input_info("axis"), input_info("splits_length") }, cldnn::tensor(1), cldnn::tensor(0), op_mode, 1, axis), + reshape("reshape", input_info("crop2"), true, std::vector{0, 0, 3, 2}, ov::PartialShape{-1, -1, 3, 2}, cldnn::reshape::reshape_mode::base), + reorder("output2", input_info("reshape"), format::bfyx, data_types::f32, std::vector(), reorder_mean_mode::subtract, padding(), true), + reorder("output3", input_info("fc"), format::bfyx, data_types::f32) + ); + + auto config = get_test_default_config(engine); + config.set_property(ov::intel_gpu::allow_new_shape_infer(true)); + config.set_property(ov::intel_gpu::optimize_data(true)); + network network(engine, topology, config); + + network.set_input_data("input", input_mem); + + auto outputs = network.execute(); + + auto output = outputs.at("output1").get_memory(); + cldnn::mem_lock output_ptr(output, get_test_stream()); + + for (size_t i = 0; i < out1.size(); i++) + ASSERT_EQ(output_ptr[i], out1[i]); + + auto output_2 = outputs.at("output2").get_memory(); + cldnn::mem_lock output_ptr_2(output_2, get_test_stream()); + + for (size_t i = 0; i < out2.size(); i++) + ASSERT_EQ(output_ptr_2[i], out2[i]); + + auto output_3 = outputs.at("output3").get_memory(); + cldnn::mem_lock output_ptr_3(output_3, get_test_stream()); + + for (size_t i = 0; i < out3.size(); i++) + ASSERT_EQ(output_ptr_3[i], out3[i]); +} + // Testing for implicit crop along batch axis and outer padding optimzing. // Outer padding opt includes opt out of reshape and reorder which has padded input only in batch axis // This optimzing also includes offset(outer axis padded input) handling of oneDNN primitive. From 2805775c051ccda146b1f7aca4a178c43b016c7c Mon Sep 17 00:00:00 2001 From: Pawel Raasz Date: Fri, 5 Jul 2024 17:33:32 +0200 Subject: [PATCH 45/50] [core] Improve Constant bf16 -> f16, f32 casting performance (#25328) ### Details: - Use vectorization for conversion of bf16 -> f16, f32 ### Related PR: - #25248 ### Tickets: - CVS-145803 --------- Co-authored-by: Sergey Lyalin --- .../include/openvino/reference/convert.hpp | 5 ++++ src/core/reference/src/op/convert.cpp | 30 +++++++++++++++++++ src/core/reference/src/op/jit_generator.cpp | 6 ++++ .../functional/op_reference/convert_like.cpp | 6 ++++ 4 files changed, 47 insertions(+) diff --git a/src/core/reference/include/openvino/reference/convert.hpp b/src/core/reference/include/openvino/reference/convert.hpp index 41f56623b43aa9..980344fd7b083f 100644 --- a/src/core/reference/include/openvino/reference/convert.hpp +++ b/src/core/reference/include/openvino/reference/convert.hpp @@ -7,6 +7,7 @@ #include #include +#include "openvino/core/type/bfloat16.hpp" #include "openvino/core/type/element_iterator.hpp" #include "openvino/core/type/element_type.hpp" #include "openvino/core/type/float16.hpp" @@ -69,6 +70,10 @@ template <> void convert(const float* arg, int8_t* out, size_t count); template <> void convert(const float16* arg, int8_t* out, size_t count); +template <> +void convert(const bfloat16* arg, float16* out, size_t count); +template <> +void convert(const bfloat16* arg, float* out, size_t count); #endif // OPENVINO_ARCH_X86 || OPENVINO_ARCH_X86_64 diff --git a/src/core/reference/src/op/convert.cpp b/src/core/reference/src/op/convert.cpp index 5e855bcc950d0c..5f7f4baf9251db 100644 --- a/src/core/reference/src/op/convert.cpp +++ b/src/core/reference/src/op/convert.cpp @@ -55,6 +55,26 @@ void jit_convert_vec(jit::Generator& gen, const Xbyak::RegExp& s gen.vmovdqu(gen.xword[dst], f16vec); } +template <> +void jit_convert_vec(jit::Generator& gen, const Xbyak::RegExp& src, const Xbyak::RegExp& dst) { + const auto f32vec = gen.ymm4; + const auto f16vec = gen.xmm3; + + gen.vpmovzxwd(f32vec, gen.yword[src]); // load bf16 into tmp + gen.vpslld(f32vec, f32vec, 16); // convert bf16->f32 by bit shift + gen.vcvtps2ph(f16vec, f32vec, 0); // convert f32 -> f16 + gen.vmovdqu(gen.xword[dst], f16vec); // move result to destination +} + +template <> +void jit_convert_vec(jit::Generator& gen, const Xbyak::RegExp& src, const Xbyak::RegExp& dst) { + const auto f32vec = gen.ymm4; + + gen.vpmovzxwd(f32vec, gen.yword[src]); // load bf16 into tmp + gen.vpslld(f32vec, f32vec, 16); // convert bf16->f32 by bit shift + gen.vmovdqu(gen.yword[dst], f32vec); // move result to destination +} + template <> void jit_convert_vec_prepare(jit::Generator& gen) { auto upper_bound = gen.ymm5; @@ -503,6 +523,16 @@ void convert(const float16* arg, int8_t* out, size_t count) { convert_impl(arg, out, count); } +template <> +void convert(const bfloat16* arg, float16* out, size_t count) { + convert_impl(arg, out, count); +} + +template <> +void convert(const bfloat16* arg, float* out, size_t count) { + convert_impl(arg, out, count); +} + #endif // OPENVINO_ARCH_X86 || OPENVINO_ARCH_X86_64 void convert_from_f32_to_f16_with_clamp(const float* arg, float16* out, size_t count) { diff --git a/src/core/reference/src/op/jit_generator.cpp b/src/core/reference/src/op/jit_generator.cpp index c980d34e16661a..d516d210e71967 100644 --- a/src/core/reference/src/op/jit_generator.cpp +++ b/src/core/reference/src/op/jit_generator.cpp @@ -12,6 +12,7 @@ # include # include "jit_generator.hpp" +# include "openvino/core/type/bfloat16.hpp" # include "openvino/core/type/float16.hpp" namespace ov { @@ -184,6 +185,11 @@ template <> void Generator::copy(const Xbyak::Reg64& dst, const Xbyak::Reg64& src, const Xbyak::Reg64& size) { copy(dst, src, size); } + +template <> +void Generator::copy(const Xbyak::Reg64& dst, const Xbyak::Reg64& src, const Xbyak::Reg64& size) { + copy(dst, src, size); +} } // namespace jit } // namespace runtime } // namespace ov diff --git a/src/plugins/template/tests/functional/op_reference/convert_like.cpp b/src/plugins/template/tests/functional/op_reference/convert_like.cpp index 7d2f7f89d03c36..2b867e3e9b92b1 100644 --- a/src/plugins/template/tests/functional/op_reference/convert_like.cpp +++ b/src/plugins/template/tests/functional/op_reference/convert_like.cpp @@ -181,6 +181,12 @@ INSTANTIATE_TEST_SUITE_P( ov::element::f16, std::vector{-0.0f, -6.0f, 1.0f, 0.5f, 2.0f, 1.5f, 6.0f, 3.0f}, std::vector{f16_min, f16_min, 1.0f, 0.5f, 2.0f, 2.0f, 8.0f, 2.0f}), + ConvertParams(ConversionTypes::CONVERT_LIKE, + ov::PartialShape{7}, + ov::element::bf16, + ov::element::f16, + std::vector{0.5f, 0.1640625f, 0.5f, 2.5f, 1.5f, 0.5f, 3.5f}, + std::vector{0.5f, 0.1640625f, 0.5f, 2.5f, 1.5f, 0.5f, 3.5f}), // destination f32 ConvertParams(ConversionTypes::CONVERT_LIKE, ov::PartialShape{2, 2}, From 277c0b7d0e06980984aa6b9a7976f5ec8016aac2 Mon Sep 17 00:00:00 2001 From: Aleksandr Voron Date: Fri, 5 Jul 2024 18:55:00 +0200 Subject: [PATCH 46/50] [CPU][ARM] Fix Reduce NHWC conversion (#25212) ### Details: 2 issues have been fixed - ACL supports tensor rank up to 4 for both Reduce kernels - NEReduceMean and NEReductionOperation (https://github.com/ARM-software/ComputeLibrary/blob/505adb91d40e05b3f80a075a4467a78a253395e1/src/runtime/NEON/functions/NEReductionOperation.cpp#L78) The fix checks axis for both NEReduceMean and NEReductionOperation. - `axisCast` method is updated to support NDHWC to NCDHW and vice versa conversion. ### Tickets: - *ticket-id* --- .../src/nodes/executors/acl/acl_reduce.hpp | 21 +++-- .../src/nodes/executors/acl/acl_utils.hpp | 19 +++- .../single_layer_tests/classes/reduce.cpp | 11 +-- .../instances/arm/reduce.cpp | 92 +++++++++++++++++++ .../instances/common/reduce.cpp | 4 - .../instances/x64/reduce.cpp | 15 +-- 6 files changed, 129 insertions(+), 33 deletions(-) create mode 100644 src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/instances/arm/reduce.cpp diff --git a/src/plugins/intel_cpu/src/nodes/executors/acl/acl_reduce.hpp b/src/plugins/intel_cpu/src/nodes/executors/acl/acl_reduce.hpp index 3fa6adccb8214c..69bf6062918963 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/acl/acl_reduce.hpp +++ b/src/plugins/intel_cpu/src/nodes/executors/acl/acl_reduce.hpp @@ -69,14 +69,17 @@ class AclReduceExecutorBuilder : public ReduceExecutorBuilder { " dimensions maximum. src[0] shape rank is ", srcDescs[0]->getShape().getRank()); return false; } - if (reduceAttrs.operation == Algorithm::ReduceMean) { - arm_compute::Coordinates axesMean; - for (size_t i = 0; i < reduceAttrs.axes.size(); ++i) { - auto axe = axisCast(reduceAttrs.axes[i], srcDescs[0]->getShape().getRank()); - if (axe > 3) { - DEBUG_LOG("ACL supports tensor rank up to 4 for ReduceMean operation. Tensor rank: ", axe); - return false; - } + auto srcShapeRank = srcDescs[0]->getShape().getRank(); + bool hasSrcNspcLayout = srcDescs[0]->hasLayoutType(LayoutType::nspc); + for (size_t i = 0; i < reduceAttrs.axes.size(); ++i) { + int axis = axisCast(reduceAttrs.axes[i], srcShapeRank, hasSrcNspcLayout ? NHWC_TO_NCHW : NO_LAYOUT_CONVERSION); + if (axis == -1) { + DEBUG_LOG("Layout conversion to NHWC has failed"); + return false; + } + if (axis > 3) { + DEBUG_LOG("ACL supports reduction axis 0, 1, 2, 3. Unsupported reduction axis specified: ", axis); + return false; } } if ((reduceAttrs.operation == Algorithm::ReduceSum || @@ -97,4 +100,4 @@ class AclReduceExecutorBuilder : public ReduceExecutorBuilder { }; } // namespace intel_cpu -} // namespace ov \ No newline at end of file +} // namespace ov diff --git a/src/plugins/intel_cpu/src/nodes/executors/acl/acl_utils.hpp b/src/plugins/intel_cpu/src/nodes/executors/acl/acl_utils.hpp index b3077d4c16e342..a981c112e5e19d 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/acl/acl_utils.hpp +++ b/src/plugins/intel_cpu/src/nodes/executors/acl/acl_utils.hpp @@ -82,17 +82,26 @@ enum ACLAxisCastMode { */ inline int axisCast(const std::size_t axis, const std::size_t shapeSize, ACLAxisCastMode axisCastMode = NO_LAYOUT_CONVERSION) { // CWHN (reverted NHWC) (0, 1, 2, 3) into WHCN (reverted NCHW) (1, 2, 0, 3) - static std::vector nhwcToNchw = {1, 2, 0, 3}; + static const std::array nhwcToNchw = {1, 2, 0, 3}; // WHCN (reverted NCHW) (0, 1, 2, 3) into CWHN (reverted NHWC) (2, 0, 1, 3) - static std::vector nchwToNhwc = {2, 0, 1, 3}; + static const std::array nchwToNhwc = {2, 0, 1, 3}; + // CWHDN (reverted NDHWC) (0, 1, 2, 3, 4) into WHDCN (reverted NCDHW) (1, 2, 3, 0, 4) + static const std::array ndhwcToNcdhw = {1, 2, 3, 0, 4}; + // WHDCN (reverted NCDHW) (0, 1, 2, 3, 4) into CWHDN (reverted NDHWC) (3, 0, 1, 2, 4) + static const std::array ncdhwToNdhwc = {3, 0, 1, 2, 4}; + size_t revertedAxis = shapeSize - axis - 1; switch (axisCastMode) { + case NO_LAYOUT_CONVERSION: + return revertedAxis; case NHWC_TO_NCHW: - return revertedAxis > 3 ? -1 : nhwcToNchw[revertedAxis]; + if (shapeSize == 4) return nhwcToNchw[revertedAxis]; + if (shapeSize == 5) return ndhwcToNcdhw[revertedAxis]; case NCHW_TO_NHWC: - return revertedAxis > 3 ? -1 : nchwToNhwc[revertedAxis]; + if (shapeSize == 4) return nchwToNhwc[revertedAxis]; + if (shapeSize == 5) return ncdhwToNdhwc[revertedAxis]; default: - return revertedAxis; + return -1; } } diff --git a/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/classes/reduce.cpp b/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/classes/reduce.cpp index 892e347c6a2a63..66b1d60932b262 100644 --- a/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/classes/reduce.cpp +++ b/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/classes/reduce.cpp @@ -139,11 +139,9 @@ void ReduceCPULayerTest::SetUp() { function = makeNgraphFunction(netPrecision, params, reduce, "Reduce"); - if (ov::with_cpu_x86_avx512_core_amx()) { - if (netPrecision == ov::element::f32 && configuration.count(ov::hint::inference_precision.name()) && - configuration.at(ov::hint::inference_precision.name()) == ov::element::f16) { - abs_threshold = 5e-3; - } + if (netPrecision == ov::element::f32 && configuration.count(ov::hint::inference_precision.name()) && + configuration.at(ov::hint::inference_precision.name()) == ov::element::f16) { + abs_threshold = 5e-3; } } @@ -254,10 +252,7 @@ const std::vector> additionalConfig() { static const std::vector> additionalConfig = { {{ov::hint::inference_precision.name(), ov::element::f32}}, {{ov::hint::inference_precision.name(), ov::element::bf16}}, -// ARM doesn't support FP16 for now -#if defined(OPENVINO_ARCH_X86) || defined(OPENVINO_ARCH_X86_64) {{ov::hint::inference_precision.name(), ov::element::f16}}, -#endif }; return additionalConfig; } diff --git a/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/instances/arm/reduce.cpp b/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/instances/arm/reduce.cpp new file mode 100644 index 00000000000000..5f1910f9388c31 --- /dev/null +++ b/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/instances/arm/reduce.cpp @@ -0,0 +1,92 @@ +// Copyright (C) 2024 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "custom/single_layer_tests/classes/reduce.hpp" +#include "utils/cpu_test_utils.hpp" +#include "utils/fusing_test_utils.hpp" +#include "ov_lpt_models/common/builders.hpp" +#include "common_test_utils/node_builders/fake_quantize.hpp" + +using namespace CPUTestUtils; + +namespace ov { +namespace test { +namespace Reduce { +namespace { + +std::vector> inputShapes_5D = { + {{{}, {{2, 19, 2, 2, 9}}}}, +}; + +const std::vector> axes5D = { + {2, 4}, + {1, 2, 4}, +}; + +std::vector cpuParams_5D = { + CPUSpecificParams({ndhwc}, {ndhwc}, {}, {}), + CPUSpecificParams({ncdhw}, {ncdhw}, {}, {}), +}; + +const auto params_MultiAxis_5D = testing::Combine( + testing::Combine( + testing::ValuesIn(axes5D), + testing::Values(ov::test::utils::OpType::VECTOR), + testing::Values(true), + testing::ValuesIn(reductionTypes()), + testing::ValuesIn(inpOutPrc()), + testing::Values(ElementType::undefined), + testing::Values(ElementType::undefined), + testing::ValuesIn(inputShapes_5D)), + testing::ValuesIn(filterCPUSpecificParams(cpuParams_5D)), + testing::Values(emptyFusingSpec), + testing::ValuesIn(additionalConfig())); + +const std::vector> axes5D_ref = { + {0} +}; + +std::vector cpuParams_5D_ref = { + CPUSpecificParams({ncdhw}, {ncdhw}, {"ref"}, {"ref"}), +}; + +std::vector> config_infer_prec_f32 = { + {{ov::hint::inference_precision.name(), ov::element::f32}} + }; + +const auto params_MultiAxis_5D_ref = testing::Combine( + testing::Combine( + testing::ValuesIn(axes5D_ref), + testing::Values(ov::test::utils::OpType::VECTOR), + testing::Values(true), + testing::ValuesIn(reductionTypes()), + testing::ValuesIn(inpOutPrc()), + testing::Values(ElementType::undefined), + testing::Values(ElementType::undefined), + testing::ValuesIn(inputShapes_5D)), + testing::ValuesIn(filterCPUSpecificParams(cpuParams_5D_ref)), + testing::Values(emptyFusingSpec), + testing::ValuesIn(config_infer_prec_f32)); + +//There are dedicated instences of smoke_Reduce_MultiAxis_5D_CPU test in arm and x64 folders +//because ACL does not support 0 as reduction axis +INSTANTIATE_TEST_SUITE_P( + smoke_Reduce_MultiAxis_5D_CPU, + ReduceCPULayerTest, + params_MultiAxis_5D, + ReduceCPULayerTest::getTestCaseName +); + +// Reference implementation testing of ACL unsupported case +INSTANTIATE_TEST_SUITE_P( + smoke_Reduce_MultiAxis_5D_CPU_ref, + ReduceCPULayerTest, + params_MultiAxis_5D_ref, + ReduceCPULayerTest::getTestCaseName +); + +} // namespace +} // namespace Reduce +} // namespace test +} // namespace ov diff --git a/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/instances/common/reduce.cpp b/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/instances/common/reduce.cpp index c57a4cc27c97af..8eeec958e7e857 100644 --- a/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/instances/common/reduce.cpp +++ b/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/instances/common/reduce.cpp @@ -24,10 +24,6 @@ std::vector> inputShapes_dynamic_2dims = { {{{2, 19, {1, 5}, {1, 10}}, {{2, 19, 2, 2}, {2, 19, 2, 9}}}}, }; -std::vector> inputShapes_5D = { - {{{}, {{2, 19, 2, 2, 9}}}}, -}; - std::vector> inputShapes_6D = { {{{}, {{2, 19, 2, 2, 2, 2}}}}, }; diff --git a/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/instances/x64/reduce.cpp b/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/instances/x64/reduce.cpp index 1a18dbbb015ede..acea9335d05338 100644 --- a/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/instances/x64/reduce.cpp +++ b/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/instances/x64/reduce.cpp @@ -23,7 +23,8 @@ std::vector> inputShapes_3D_fuse_dyn = { {{{{1, 5}, 19, {1, 10}}, {{1, 19, 2}, {1, 19, 9}, {1, 19, 2}}}}, }; -std::vector> inputShapes_5D_dyn = { +std::vector> inputShapes_5D = { + {{{}, {{2, 19, 2, 2, 9}}}}, {{{{1, 5}, 19, {1, 5}, {1, 5}, {1, 5}}, {{2, 19, 2, 2, 2}, {2, 19, 3, 2, 2}}}}, }; @@ -210,7 +211,7 @@ const auto params_MultiAxis_5D = testing::Combine( testing::ValuesIn(inpOutPrc()), testing::Values(ElementType::undefined), testing::Values(ElementType::undefined), - testing::ValuesIn(inputShapes_5D_dyn)), + testing::ValuesIn(inputShapes_5D)), testing::ValuesIn(filterCPUSpecificParams(cpuParams_5D)), testing::Values(emptyFusingSpec), testing::ValuesIn(additionalConfig())); @@ -238,7 +239,7 @@ const auto params_MultiAxis_5D_Hybrid = testing::Combine( testing::ValuesIn(inpOutPrc()), testing::Values(ElementType::undefined), testing::Values(ElementType::undefined), - testing::ValuesIn(inputShapes_5D_dyn)), + testing::ValuesIn(inputShapes_5D)), testing::ValuesIn(filterCPUSpecificParams(cpuParams_HybridLayout_5D)), testing::Values(emptyFusingSpec), testing::ValuesIn(additionalConfigFP32())); @@ -463,7 +464,7 @@ const auto params_MultiAxis_5D_Logical = testing::Combine( testing::Values(ElementType::boolean), testing::Values(ElementType::undefined), testing::Values(ElementType::undefined), - testing::ValuesIn(inputShapes_5D_dyn)), + testing::ValuesIn(inputShapes_5D)), testing::ValuesIn(filterCPUSpecificParams(cpuParams_5D)), testing::Values(emptyFusingSpec), testing::ValuesIn(additionalConfigFP32())); @@ -491,7 +492,7 @@ const auto params_MultiAxis_5D_Hybrid_Logical = testing::Combine( testing::Values(ElementType::boolean), testing::Values(ElementType::undefined), testing::Values(ElementType::undefined), - testing::ValuesIn(inputShapes_5D_dyn)), + testing::ValuesIn(inputShapes_5D)), testing::ValuesIn(filterCPUSpecificParams(cpuParams_HybridLayout_5D)), testing::Values(emptyFusingSpec), testing::ValuesIn(additionalConfigFP32())); @@ -604,7 +605,7 @@ const auto params_MultiAxis_5D_fusing = testing::Combine( testing::ValuesIn(inpOutPrc()), testing::Values(ElementType::undefined), testing::Values(ElementType::undefined), - testing::ValuesIn(inputShapes_5D_dyn)), + testing::ValuesIn(inputShapes_5D)), testing::ValuesIn(filterCPUSpecificParams(cpuParams_5D)), testing::ValuesIn(fusingParamsSet), testing::ValuesIn(additionalConfig())); @@ -696,7 +697,7 @@ const auto params_MultiAxis_5D_Hybrid_fusing_KeepNoDims = testing::Combine( testing::ValuesIn(inpOutPrc()), testing::Values(ElementType::undefined), testing::Values(ElementType::undefined), - testing::ValuesIn(inputShapes_5D_dyn)), + testing::ValuesIn(inputShapes_5D)), testing::ValuesIn(filterCPUSpecificParams(cpuParams_HybridLayout_5D)), testing::ValuesIn(fusingParamsSet_KeepNoDims), testing::ValuesIn(additionalConfigFP32())); From f1298ed128bd9727fa5b78b4712d6c7da7243f81 Mon Sep 17 00:00:00 2001 From: Vladislav Denisov Date: Sat, 6 Jul 2024 23:07:11 +0400 Subject: [PATCH 47/50] [GoodFirstIssue][OP CONFORMANCE][TEMPLATE] Added u64 support for the indices of Gather (#25221) ### Details: - Added u64 support for the indices of Gather. ### Tickets: - https://github.com/openvinotoolkit/openvino/issues/23545 ### Test is passed: > [==========] Running 1 test from 1 test suite. [----------] Global test environment set-up. [----------] 1 test from conformance_Gather/ReadIRTest [ RUN ] conformance_Gather/ReadIRTest.Inference/Op=Gather.8_Type=f32_Shape=dynamic_IR=1c727cc96123227a9fe6c3079a497fd64a04f273bff45b5ea56a3c0d577eca8e_Device=TEMPLATE_Config=() MEM_USAGE=31860KB [ CONFORMANCE ] Influence coefficient: 4.72987e-05 [ PLUGIN ] `SubgraphBaseTest::compile_model()` is started [ PLUGIN ] `SubgraphBaseTest::compile_model()` is finished successfully. Duration is 0.143964s RANGE FOR PARAMETER: Gather-8_0 start from: 0.000000 range: 8 resolution: 32 seed: 1 RANGE FOR PARAMETER: Gather-8_1 start from: 0.000000 range: 15 resolution: 1 seed: 1 [ PLUGIN ] `SubgraphBaseTest::get_plugin_outputs()` is started [ PLUGIN ] `SubgraphBaseTest::get_plugin_outputs()` is finished successfully. Duration is 0.0288465s [ REFERENCE ] `SubgraphBaseTest::calculate_refs()` is started [ REFERENCE ] Calculate reference in runtime [ REFERENCE ] `SubgraphBaseTest::calculate_refs()` is started [ REFERENCE ] `SubgraphBaseTest::calculate_refs()` is finished successfully. Duration is 0.0094897s [ REFERENCE ] `SubgraphBaseTest::calculate_refs()` is finished successfully. Duration is 0.0109919s [ COMPARATION ] `ov_tensor_utils.hpp::compare()` is started [ COMPARATION ] `ov_tensor_utils.hpp::compare()` is finished successfully. Duration is 0.0071786s RANGE FOR PARAMETER: Gather-8_0 start from: 0.000000 range: 8 resolution: 32 seed: 1 RANGE FOR PARAMETER: Gather-8_1 start from: 0.000000 range: 15 resolution: 1 seed: 1 [ PLUGIN ] `SubgraphBaseTest::get_plugin_outputs()` is started [ PLUGIN ] `SubgraphBaseTest::get_plugin_outputs()` is finished successfully. Duration is 0.0033599s [ REFERENCE ] `SubgraphBaseTest::calculate_refs()` is started [ REFERENCE ] Calculate reference in runtime [ REFERENCE ] `SubgraphBaseTest::calculate_refs()` is started [ REFERENCE ] `SubgraphBaseTest::calculate_refs()` is finished successfully. Duration is 0.0086416s [ REFERENCE ] `SubgraphBaseTest::calculate_refs()` is finished successfully. Duration is 0.0105932s [ COMPARATION ] `ov_tensor_utils.hpp::compare()` is started [ REFERENCE ] `SubgraphBaseTest::calculate_refs()` is finished successfully. Duration is 0.010214s [ COMPARATION ] `ov_tensor_utils.hpp::compare()` is started [ COMPARATION ] `ov_tensor_utils.hpp::compare()` is finished successfully. Duration is 0.0082425s [ OK ] conformance_Gather/ReadIRTest.Inference/Op=Gather.8_Type=f32_Shape=dynamic_IR=1c727cc96123227a9fe6c3079a497fd64a04f273bff45b5ea56a3c0d577eca8e_Device=TEMPLATE_Config=() (381 ms) [----------] 1 test from conformance_Gather/ReadIRTest (383 ms total) [----------] Global test environment tear-down [==========] 1 test from 1 test suite ran. (435 ms total) [ PASSED ] 1 test. --- src/plugins/template/backend/ops/gather.cpp | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/src/plugins/template/backend/ops/gather.cpp b/src/plugins/template/backend/ops/gather.cpp index 0d4cfacefb3491..f23fdf2452232f 100644 --- a/src/plugins/template/backend/ops/gather.cpp +++ b/src/plugins/template/backend/ops/gather.cpp @@ -11,7 +11,16 @@ bool evaluate(const std::shared_ptr& op, ov::TensorVector& outputs, const ov::TensorVector& inputs) { using T = typename ov::element_type_traits::value_type; - if (op->get_input_element_type(1) == ov::element::i64) { + if (op->get_input_element_type(1) == ov::element::u64) { + ov::reference::gather(inputs[0].data(), + inputs[1].data(), + outputs[0].data(), + op->get_input_shape(0), + op->get_input_shape(1), + op->get_output_shape(0), + op->get_axis(), + op->get_batch_dims()); + } else if (op->get_input_element_type(1) == ov::element::i64) { ov::reference::gather(inputs[0].data(), inputs[1].data(), outputs[0].data(), From 70080bd30735b878a282d5bf395df45bc9c5fa7c Mon Sep 17 00:00:00 2001 From: Georgy Krivoruchko Date: Sun, 7 Jul 2024 09:09:46 +0400 Subject: [PATCH 48/50] Fixed behavior of a seed interpretation when it is float (#25151) ### Details: - Removed workaround which caused unexpected collisions in case seed -1 < seed < 1 ### Tickets: - 123003 --- .../common/src/random_normal_helper.cpp | 8 ++++- .../onnx/frontend/src/op/multinomial.cpp | 20 +++--------- .../onnx/frontend/src/op/random_uniform.cpp | 14 +++----- .../frontend/src/op/random_uniform_like.cpp | 12 ++----- .../onnx/frontend/src/utils/common.hpp | 13 ++++++++ src/frontends/onnx/tests/onnx_import.in.cpp | 32 ++++++++++++++++--- .../tests/tests_python/test_ops_random.py | 2 +- 7 files changed, 60 insertions(+), 41 deletions(-) diff --git a/src/frontends/common/src/random_normal_helper.cpp b/src/frontends/common/src/random_normal_helper.cpp index 7028ab4b7a3e40..99a6cda8451e41 100644 --- a/src/frontends/common/src/random_normal_helper.cpp +++ b/src/frontends/common/src/random_normal_helper.cpp @@ -26,7 +26,13 @@ OutputVector make_random_normal(pass::NodeRegistry& registry, const uint64_t global_seed = 0; // ONNX specifies the seed as a float, but OpenVINO uses uint64_t - const auto op_seed = static_cast(seed * 1000); + // OpenVINO supports only uint64 seeds with a meaningful 0 value (seed will be auto-generated). + // Because we use a seed as a just meaningful identifier we may + // just interpret its value as a 32-bit value (float zero value is same with + // uint32 zero value). + // Float -0 value will be interpreted as a valid uint32 value. + const void* seed_ptr = &seed; // To prevent strict-aliasing error + const uint64_t op_seed = static_cast(*static_cast(seed_ptr)); // We need to use two op_seeds to make sure we get different results for two RandomUniform series // But we also have to keep original logic and pass "0" (auto-generated seed) to RandomUniform diff --git a/src/frontends/onnx/frontend/src/op/multinomial.cpp b/src/frontends/onnx/frontend/src/op/multinomial.cpp index 16de91dc826acf..294e19ed742477 100644 --- a/src/frontends/onnx/frontend/src/op/multinomial.cpp +++ b/src/frontends/onnx/frontend/src/op/multinomial.cpp @@ -24,24 +24,12 @@ ov::OutputVector multinomial(const ov::frontend::onnx::Node& node) { const auto dtype = node.get_attribute_value("dtype", static_cast(TensorProto_DataType::TensorProto_DataType_INT32)); - const auto seed = node.get_attribute_value("seed", 0.0f); + const auto seed = common::convert_float_seed(node.get_attribute_value("seed", 0.0f)); const auto target_type = common::get_ov_element_type(dtype); const uint64_t global_seed = 0; - // OpenVINO supports only uint64 seeds with a meaningful 0 value (seed will be auto-generated). - // Because we use a seed as a just meaningful identifier we may - // just interpret its value as a 32-bit value (float zero value is same with - // uint32 zero value). - // Float -0 value will be interpreted as a valid uint32 value. - const void* seed_ptr = &seed; // To prevent strict-aliasing error - const uint64_t seed_uint64 = *static_cast(seed_ptr); - - auto multinomial_op = std::make_shared(input, - sample_size, - target_type, - true, - true, - seed_uint64, - global_seed); + + auto multinomial_op = + std::make_shared(input, sample_size, target_type, true, true, seed, global_seed); return {multinomial_op}; } diff --git a/src/frontends/onnx/frontend/src/op/random_uniform.cpp b/src/frontends/onnx/frontend/src/op/random_uniform.cpp index c1f89bc2049246..b078727ceafd07 100644 --- a/src/frontends/onnx/frontend/src/op/random_uniform.cpp +++ b/src/frontends/onnx/frontend/src/op/random_uniform.cpp @@ -25,20 +25,14 @@ ov::OutputVector random_uniform(const ov::frontend::onnx::Node& node) { static_cast(TensorProto_DataType::TensorProto_DataType_FLOAT)); const auto high_const = node.get_attribute_as_constant("high", 1.0f); const auto low_const = node.get_attribute_as_constant("low", 0.0f); - const auto seed = node.get_attribute_value("seed", 0.0f); + const auto seed = common::convert_float_seed(node.get_attribute_value("seed", 0.0f)); const auto target_shape_const = node.get_attribute_as_constant>("shape"); const auto target_type = common::get_ov_element_type(dtype); const uint64_t global_seed = 0; - // TODO: This multiplication leads to a mismatch in accuracy. Issue: 123003 - const auto seed_uint64 = static_cast(seed * 1000); - - return {std::make_shared(target_shape_const, - low_const, - high_const, - target_type, - global_seed, - seed_uint64)}; + + return { + std::make_shared(target_shape_const, low_const, high_const, target_type, global_seed, seed)}; } } // namespace set_1 diff --git a/src/frontends/onnx/frontend/src/op/random_uniform_like.cpp b/src/frontends/onnx/frontend/src/op/random_uniform_like.cpp index 917e6c6780edaa..b80e93fce99c1b 100644 --- a/src/frontends/onnx/frontend/src/op/random_uniform_like.cpp +++ b/src/frontends/onnx/frontend/src/op/random_uniform_like.cpp @@ -33,17 +33,11 @@ ov::OutputVector random_uniform_like(const ov::frontend::onnx::Node& node) { const auto high_const = node.get_attribute_as_constant("high", 1.0f); const auto low_const = node.get_attribute_as_constant("low", 0.0f); - const auto seed = node.get_attribute_value("seed", 0.f); + const auto seed = common::convert_float_seed(node.get_attribute_value("seed", 0.f)); const uint64_t global_seed = 0; - const auto seed_uint64 = static_cast(seed * 1000); - - return {std::make_shared(target_shape, - low_const, - high_const, - target_type, - global_seed, - seed_uint64)}; + + return {std::make_shared(target_shape, low_const, high_const, target_type, global_seed, seed)}; } } // namespace set_1 diff --git a/src/frontends/onnx/frontend/src/utils/common.hpp b/src/frontends/onnx/frontend/src/utils/common.hpp index 161d6f6e0384d2..bca81cffa98abe 100644 --- a/src/frontends/onnx/frontend/src/utils/common.hpp +++ b/src/frontends/onnx/frontend/src/utils/common.hpp @@ -168,6 +168,19 @@ bool collect_translation_exceptions(const std::shared_ptr& partially_ std::ostream* output_stream = nullptr, std::shared_ptr> unsupported_operations = nullptr, std::shared_ptr> failures = nullptr); + +// \brief OpenVINO supports only uint64 seeds with a meaningful 0 value (seed will be auto-generated). +// Because we use a seed as a just meaningful identifier we may +// just interpret its value as a 32-bit value (float zero value is same with +// uint32 zero value). +// Float -0 value will be interpreted as a valid uint32 value. +// \param seed Float value for conversion +// \return Returns a converted uint32_t value +inline uint32_t convert_float_seed(const float seed) { + const void* seed_ptr = &seed; // To prevent strict-aliasing error + return *static_cast(seed_ptr); +} + } // namespace common } // namespace onnx } // namespace frontend diff --git a/src/frontends/onnx/tests/onnx_import.in.cpp b/src/frontends/onnx/tests/onnx_import.in.cpp index 9bfa370cac3828..087514b8a61827 100644 --- a/src/frontends/onnx/tests/onnx_import.in.cpp +++ b/src/frontends/onnx/tests/onnx_import.in.cpp @@ -4949,7 +4949,13 @@ OPENVINO_TEST(${BACKEND_NAME}, onnx_model_random_uniform) { const auto model = convert_model("random_uniform.onnx"); auto test_case = ov::test::TestCase(model, s_device); - test_case.add_expected_output(Shape{2, 2}, {43.45518f, 48.67585f, 42.227386f, 40.86294f}); + + if (std::string("${BACKEND_NAME}") == std::string("IE_GPU")) { + test_case.add_expected_output(Shape{2, 2}, {40.96875f, 43.4375f, 49.4375f, 45.46875f}); + } else { + test_case.add_expected_output(Shape{2, 2}, {43.70129f, 45.26042f, 43.48503f, 46.43743f}); + } + test_case.run(); } @@ -4958,7 +4964,13 @@ OPENVINO_TEST(${BACKEND_NAME}, onnx_model_random_uniform_like) { auto test_case = ov::test::TestCase(model, s_device); test_case.add_input(Shape{2, 2}, {41, 42, 43, 44}); - test_case.add_expected_output(Shape{2, 2}, {43.45518f, 48.67585f, 42.227386f, 40.86294f}); + + if (std::string("${BACKEND_NAME}") == std::string("IE_GPU")) { + test_case.add_expected_output(Shape{2, 2}, {40.96875f, 43.4375f, 49.4375f, 45.46875f}); + } else { + test_case.add_expected_output(Shape{2, 2}, {43.70129f, 45.26042f, 43.48503f, 46.43743f}); + } + test_case.run(); } @@ -4966,7 +4978,13 @@ OPENVINO_TEST(${BACKEND_NAME}, onnx_model_random_normal) { const auto model = convert_model("random_normal.onnx"); auto test_case = ov::test::TestCase(model, s_device); - test_case.add_expected_output(Shape{2, 2}, {83.052017f, 55.496368f, 119.31188f, -3.6946249f}); + + if (std::string("${BACKEND_NAME}") == std::string("IE_GPU")) { + test_case.add_expected_output(Shape{2, 2}, {77.351875f, 74.047821f, -5.996780f, 13.922290f}); + } else { + test_case.add_expected_output(Shape{2, 2}, {30.357481f, 72.41268f, 12.999034f, 70.04985f}); + } + test_case.run(); } @@ -4975,7 +4993,13 @@ OPENVINO_TEST(${BACKEND_NAME}, onnx_model_random_normal_like) { auto test_case = ov::test::TestCase(model, s_device); test_case.add_input(Shape{2, 2}, {0, 0, 0, 0}); - test_case.add_expected_output(Shape{2, 2}, {83.052017f, 55.496368f, 119.31188f, -3.6946249f}); + + if (std::string("${BACKEND_NAME}") == std::string("IE_GPU")) { + test_case.add_expected_output(Shape{2, 2}, {77.351875f, 74.047821f, -5.996780f, 13.922290f}); + } else { + test_case.add_expected_output(Shape{2, 2}, {30.357481f, 72.41268f, 12.999034f, 70.04985f}); + } + test_case.run(); } diff --git a/src/frontends/onnx/tests/tests_python/test_ops_random.py b/src/frontends/onnx/tests/tests_python/test_ops_random.py index dcbab25e4ba24b..91b24c9629ed0d 100644 --- a/src/frontends/onnx/tests/tests_python/test_ops_random.py +++ b/src/frontends/onnx/tests/tests_python/test_ops_random.py @@ -29,7 +29,7 @@ def test_random_uniform(): assert len(np.unique(result)) == 900 assert np.max(result) < high assert np.min(result) > low - assert np.isclose(np.mean(result), np.mean(np.array([low, high])), rtol=0.001) + assert np.isclose(np.mean(result), np.mean(np.array([low, high])), rtol=0.5) def test_random_normal(): From 44e4e5dd64e39b1fe7059bcd793381c8f65ef5cd Mon Sep 17 00:00:00 2001 From: Georgy Krivoruchko Date: Sun, 7 Jul 2024 09:11:16 +0400 Subject: [PATCH 49/50] [ONNX] Change translator registration approach (#25166) ### Details: - Moved to VersionRange usage - Moved a translators registration near a translator code - Simplified file structure - Implemented support of *.lib build - Implemented a supported op customization by removing op-files from the build ### Tickets: - N/A --- src/frontends/onnx/frontend/CMakeLists.txt | 71 +++ .../onnx/frontend/src/core/operator_set.hpp | 25 +- .../onnx/frontend/src/op/{abs.hpp => abs.cpp} | 28 +- .../frontend/src/op/{acos.hpp => acos.cpp} | 16 +- .../frontend/src/op/{acosh.hpp => acosh.cpp} | 16 +- .../src/op/adaptive_avg_pooling2d.cpp | 13 +- .../src/op/adaptive_avg_pooling2d.hpp | 18 - src/frontends/onnx/frontend/src/op/add.cpp | 32 +- src/frontends/onnx/frontend/src/op/add.hpp | 38 -- src/frontends/onnx/frontend/src/op/affine.cpp | 13 +- src/frontends/onnx/frontend/src/op/affine.hpp | 20 - src/frontends/onnx/frontend/src/op/and.cpp | 29 + src/frontends/onnx/frontend/src/op/and.hpp | 28 - src/frontends/onnx/frontend/src/op/argmax.cpp | 18 +- src/frontends/onnx/frontend/src/op/argmax.hpp | 37 -- src/frontends/onnx/frontend/src/op/argmin.cpp | 18 +- src/frontends/onnx/frontend/src/op/argmin.hpp | 37 -- .../frontend/src/op/{asin.hpp => asin.cpp} | 16 +- .../frontend/src/op/{asinh.hpp => asinh.cpp} | 16 +- .../frontend/src/op/{atan.hpp => atan.cpp} | 16 +- .../frontend/src/op/{atanh.hpp => atanh.cpp} | 16 +- src/frontends/onnx/frontend/src/op/aten.cpp | 13 +- src/frontends/onnx/frontend/src/op/aten.hpp | 21 - .../onnx/frontend/src/op/average_pool.cpp | 13 +- .../onnx/frontend/src/op/average_pool.hpp | 26 - .../onnx/frontend/src/op/batch_norm.cpp | 24 +- .../onnx/frontend/src/op/batch_norm.hpp | 30 - .../onnx/frontend/src/op/bitshift.cpp | 13 +- .../onnx/frontend/src/op/bitshift.hpp | 20 - .../onnx/frontend/src/op/bitwise_and.cpp | 12 +- .../onnx/frontend/src/op/bitwise_and.hpp | 20 - .../onnx/frontend/src/op/bitwise_not.cpp | 12 +- .../onnx/frontend/src/op/bitwise_not.hpp | 20 - .../onnx/frontend/src/op/bitwise_or.cpp | 12 +- .../onnx/frontend/src/op/bitwise_or.hpp | 20 - .../onnx/frontend/src/op/bitwise_xor.cpp | 12 +- .../onnx/frontend/src/op/bitwise_xor.hpp | 20 - .../onnx/frontend/src/op/blackmanwindow.cpp | 13 +- .../onnx/frontend/src/op/blackmanwindow.hpp | 20 - src/frontends/onnx/frontend/src/op/cast.cpp | 13 +- src/frontends/onnx/frontend/src/op/cast.hpp | 21 - .../onnx/frontend/src/op/cast_like.cpp | 13 +- .../onnx/frontend/src/op/cast_like.hpp | 21 - .../frontend/src/op/{ceil.hpp => ceil.cpp} | 15 +- src/frontends/onnx/frontend/src/op/celu.cpp | 13 +- src/frontends/onnx/frontend/src/op/celu.hpp | 20 - src/frontends/onnx/frontend/src/op/clip.cpp | 18 +- src/frontends/onnx/frontend/src/op/clip.hpp | 25 - .../frontend/src/op/com.microsoft/aliases.cpp | 38 ++ .../src/op/com.microsoft/attention.cpp | 13 +- .../src/op/com.microsoft/attention.hpp | 19 - .../src/op/com.microsoft/bias_gelu.cpp | 13 +- .../src/op/com.microsoft/bias_gelu.hpp | 19 - .../embed_layer_normalization.cpp | 13 +- .../embed_layer_normalization.hpp | 19 - .../src/op/com.microsoft/fused_conv.cpp | 22 +- .../src/op/com.microsoft/fused_conv.hpp | 19 - .../src/op/com.microsoft/fusedgemm.cpp | 13 +- .../src/op/com.microsoft/fusedgemm.hpp | 19 - .../frontend/src/op/com.microsoft/pad.cpp | 18 +- .../frontend/src/op/com.microsoft/pad.hpp | 21 - .../skip_layer_normalization.cpp | 13 +- .../skip_layer_normalization.hpp | 19 - .../onnx/frontend/src/op/compress.cpp | 13 +- .../onnx/frontend/src/op/compress.hpp | 19 - src/frontends/onnx/frontend/src/op/concat.cpp | 14 +- src/frontends/onnx/frontend/src/op/concat.hpp | 20 - .../onnx/frontend/src/op/constant.cpp | 19 +- .../onnx/frontend/src/op/constant.hpp | 25 - .../onnx/frontend/src/op/constant_fill.cpp | 12 +- .../onnx/frontend/src/op/constant_fill.hpp | 20 - .../frontend/src/op/constant_of_shape.cpp | 14 +- .../frontend/src/op/constant_of_shape.hpp | 20 - src/frontends/onnx/frontend/src/op/conv.cpp | 13 +- src/frontends/onnx/frontend/src/op/conv.hpp | 33 -- .../onnx/frontend/src/op/conv_integer.cpp | 13 +- .../onnx/frontend/src/op/conv_integer.hpp | 26 - .../onnx/frontend/src/op/conv_transpose.cpp | 14 +- .../onnx/frontend/src/op/conv_transpose.hpp | 26 - src/frontends/onnx/frontend/src/op/cos.cpp | 12 +- src/frontends/onnx/frontend/src/op/cos.hpp | 19 - src/frontends/onnx/frontend/src/op/cosh.cpp | 12 +- src/frontends/onnx/frontend/src/op/cosh.hpp | 19 - src/frontends/onnx/frontend/src/op/crop.cpp | 13 +- src/frontends/onnx/frontend/src/op/crop.hpp | 20 - .../onnx/frontend/src/op/cum_sum.cpp | 14 +- .../onnx/frontend/src/op/cum_sum.hpp | 20 - .../onnx/frontend/src/op/depth_to_space.cpp | 14 +- .../onnx/frontend/src/op/depth_to_space.hpp | 28 - .../frontend/src/op/dequantize_linear.cpp | 24 +- .../frontend/src/op/dequantize_linear.hpp | 33 -- src/frontends/onnx/frontend/src/op/dft.cpp | 14 +- src/frontends/onnx/frontend/src/op/dft.hpp | 20 - src/frontends/onnx/frontend/src/op/div.cpp | 31 + src/frontends/onnx/frontend/src/op/div.hpp | 30 - .../onnx/frontend/src/op/dropout.cpp | 23 +- .../onnx/frontend/src/op/dropout.hpp | 27 - .../src/op/dynamic_quantize_linear.cpp | 13 +- .../src/op/dynamic_quantize_linear.hpp | 20 - src/frontends/onnx/frontend/src/op/einsum.cpp | 12 +- src/frontends/onnx/frontend/src/op/einsum.hpp | 20 - src/frontends/onnx/frontend/src/op/elu.cpp | 12 +- src/frontends/onnx/frontend/src/op/elu.hpp | 20 - .../frontend/src/op/{equal.hpp => equal.cpp} | 16 +- .../onnx/frontend/src/op/{erf.hpp => erf.cpp} | 16 +- .../onnx/frontend/src/op/{exp.hpp => exp.cpp} | 16 +- src/frontends/onnx/frontend/src/op/expand.cpp | 13 +- src/frontends/onnx/frontend/src/op/expand.hpp | 23 - .../onnx/frontend/src/op/eye_like.cpp | 13 +- .../onnx/frontend/src/op/eye_like.hpp | 21 - .../onnx/frontend/src/op/flatten.cpp | 13 +- .../onnx/frontend/src/op/flatten.hpp | 20 - .../frontend/src/op/{floor.hpp => floor.cpp} | 16 +- .../src/op/{gather.hpp => gather.cpp} | 16 +- ...ather_elements.hpp => gather_elements.cpp} | 14 +- .../onnx/frontend/src/op/gather_nd.cpp | 12 +- .../onnx/frontend/src/op/gather_nd.hpp | 23 - src/frontends/onnx/frontend/src/op/gelu.cpp | 14 +- src/frontends/onnx/frontend/src/op/gelu.hpp | 20 - src/frontends/onnx/frontend/src/op/gemm.cpp | 18 +- src/frontends/onnx/frontend/src/op/gemm.hpp | 25 - .../frontend/src/op/global_average_pool.cpp | 13 +- .../frontend/src/op/global_average_pool.hpp | 26 - .../onnx/frontend/src/op/global_max_pool.cpp | 13 +- .../onnx/frontend/src/op/global_max_pool.hpp | 26 - .../src/op/{greater.hpp => greater.cpp} | 16 +- .../onnx/frontend/src/op/greater_or_equal.cpp | 18 +- .../onnx/frontend/src/op/greater_or_equal.hpp | 25 - .../onnx/frontend/src/op/grid_sample.cpp | 12 +- .../onnx/frontend/src/op/grid_sample.hpp | 20 - .../frontend/src/op/group_normalization.cpp | 14 +- .../frontend/src/op/group_normalization.hpp | 20 - src/frontends/onnx/frontend/src/op/gru.cpp | 14 +- src/frontends/onnx/frontend/src/op/gru.hpp | 20 - .../onnx/frontend/src/op/hammingwindow.cpp | 13 +- .../onnx/frontend/src/op/hammingwindow.hpp | 20 - .../onnx/frontend/src/op/hannwindow.cpp | 13 +- .../onnx/frontend/src/op/hannwindow.hpp | 20 - .../onnx/frontend/src/op/hard_sigmoid.cpp | 14 +- .../onnx/frontend/src/op/hard_sigmoid.hpp | 20 - .../src/op/{hard_swish.hpp => hard_swish.cpp} | 15 +- .../onnx/frontend/src/op/hardmax.cpp | 18 +- .../onnx/frontend/src/op/hardmax.hpp | 23 - .../src/op/{identity.hpp => identity.cpp} | 15 +- src/frontends/onnx/frontend/src/op/if.cpp | 14 +- src/frontends/onnx/frontend/src/op/if.hpp | 26 - .../onnx/frontend/src/op/image_scaler.cpp | 13 +- .../onnx/frontend/src/op/image_scaler.hpp | 19 - .../onnx/frontend/src/op/instance_norm.cpp | 13 +- .../onnx/frontend/src/op/instance_norm.hpp | 30 - .../onnx/frontend/src/op/is_finite.cpp | 13 +- .../onnx/frontend/src/op/is_finite.hpp | 21 - src/frontends/onnx/frontend/src/op/is_inf.cpp | 13 +- src/frontends/onnx/frontend/src/op/is_inf.hpp | 20 - src/frontends/onnx/frontend/src/op/is_nan.cpp | 12 +- src/frontends/onnx/frontend/src/op/is_nan.hpp | 21 - .../frontend/src/op/layer_normalization.cpp | 15 +- .../frontend/src/op/layer_normalization.hpp | 22 - .../onnx/frontend/src/op/leaky_relu.cpp | 13 +- .../onnx/frontend/src/op/leaky_relu.hpp | 20 - .../frontend/src/op/{less.hpp => less.cpp} | 16 +- .../onnx/frontend/src/op/less_or_equal.cpp | 18 +- .../onnx/frontend/src/op/less_or_equal.hpp | 27 - src/frontends/onnx/frontend/src/op/log.cpp | 12 +- src/frontends/onnx/frontend/src/op/log.hpp | 19 - .../onnx/frontend/src/op/log_softmax.cpp | 19 +- .../onnx/frontend/src/op/log_softmax.hpp | 25 - src/frontends/onnx/frontend/src/op/loop.cpp | 14 +- src/frontends/onnx/frontend/src/op/loop.hpp | 28 - .../onnx/frontend/src/op/lp_norm.cpp | 13 +- .../onnx/frontend/src/op/lp_norm.hpp | 31 - .../onnx/frontend/src/op/lp_pool.cpp | 13 +- .../onnx/frontend/src/op/lp_pool.hpp | 36 -- src/frontends/onnx/frontend/src/op/lrn.cpp | 12 +- src/frontends/onnx/frontend/src/op/lrn.hpp | 19 - src/frontends/onnx/frontend/src/op/lstm.cpp | 13 +- src/frontends/onnx/frontend/src/op/lstm.hpp | 20 - .../src/op/{matmul.hpp => matmul.cpp} | 18 +- .../onnx/frontend/src/op/matmul_integer.cpp | 13 +- .../onnx/frontend/src/op/matmul_integer.hpp | 25 - .../onnx/frontend/src/op/{max.hpp => max.cpp} | 22 +- .../onnx/frontend/src/op/max_pool.cpp | 18 +- .../onnx/frontend/src/op/max_pool.hpp | 41 -- .../onnx/frontend/src/op/max_roi_pool.cpp | 13 +- .../onnx/frontend/src/op/max_roi_pool.hpp | 21 - src/frontends/onnx/frontend/src/op/mean.cpp | 13 +- src/frontends/onnx/frontend/src/op/mean.hpp | 20 - .../src/op/mean_variance_normalization.cpp | 18 +- .../src/op/mean_variance_normalization.hpp | 23 - .../onnx/frontend/src/op/{min.hpp => min.cpp} | 23 +- src/frontends/onnx/frontend/src/op/mish.cpp | 12 +- src/frontends/onnx/frontend/src/op/mish.hpp | 19 - .../src/op/mmdeploy_roi_align_rotated.cpp | 13 +- .../src/op/mmdeploy_roi_align_rotated.hpp | 20 - src/frontends/onnx/frontend/src/op/mod.cpp | 14 +- src/frontends/onnx/frontend/src/op/mod.hpp | 19 - .../onnx/frontend/src/op/{mul.hpp => mul.cpp} | 23 +- .../onnx/frontend/src/op/multinomial.cpp | 14 +- .../onnx/frontend/src/op/multinomial.hpp | 19 - .../onnx/frontend/src/op/{neg.hpp => neg.cpp} | 15 +- .../op/{nms_rotated.hpp => nms_rotated.cpp} | 16 +- .../frontend/src/op/non_max_suppression.cpp | 11 +- .../frontend/src/op/non_max_suppression.hpp | 19 - .../onnx/frontend/src/op/non_zero.cpp | 12 +- .../onnx/frontend/src/op/non_zero.hpp | 26 - .../onnx/frontend/src/op/{not.hpp => not.cpp} | 15 +- src/frontends/onnx/frontend/src/op/onehot.cpp | 13 +- src/frontends/onnx/frontend/src/op/onehot.hpp | 20 - .../onnx/frontend/src/op/{or.hpp => or.cpp} | 15 +- .../deformable_conv_2d.cpp | 13 +- .../deformable_conv_2d.hpp | 32 -- .../org.openvinotoolkit/detection_output.cpp | 14 +- .../org.openvinotoolkit/detection_output.hpp | 20 - .../detection_output.cpp | 16 +- .../detection_output.hpp | 19 - .../generate_proposals_single_image.cpp | 16 +- .../generate_proposals_single_image.hpp | 19 - .../prior_grid_generator.cpp | 16 +- .../prior_grid_generator.hpp | 20 - .../roi_feature_extractor.cpp | 16 +- .../roi_feature_extractor.hpp | 19 - .../experimental_detectron/topk_rios.cpp | 16 +- .../experimental_detectron/topk_rios.hpp | 20 - .../op/org.openvinotoolkit/fake_quantize.cpp | 14 +- .../op/org.openvinotoolkit/fake_quantize.hpp | 20 - .../generate_proposals.cpp | 15 +- .../generate_proposals.hpp | 19 - .../src/op/org.openvinotoolkit/group_norm.cpp | 22 +- .../src/op/org.openvinotoolkit/group_norm.hpp | 20 - .../src/op/org.openvinotoolkit/normalize.cpp | 13 +- .../src/op/org.openvinotoolkit/normalize.hpp | 20 - .../src/op/org.openvinotoolkit/prior_box.cpp | 23 +- .../src/op/org.openvinotoolkit/prior_box.hpp | 22 - .../src/op/org.openvinotoolkit/swish.cpp | 14 +- .../src/op/org.openvinotoolkit/swish.hpp | 19 - src/frontends/onnx/frontend/src/op/pad.cpp | 19 +- src/frontends/onnx/frontend/src/op/pad.hpp | 25 - src/frontends/onnx/frontend/src/op/pow.cpp | 13 +- src/frontends/onnx/frontend/src/op/pow.hpp | 20 - src/frontends/onnx/frontend/src/op/prelu.cpp | 12 +- src/frontends/onnx/frontend/src/op/prelu.hpp | 20 - .../onnx/frontend/src/op/qlinear_conv.cpp | 62 +- .../onnx/frontend/src/op/qlinear_conv.hpp | 29 - .../onnx/frontend/src/op/qlinear_matmul.cpp | 57 +- .../onnx/frontend/src/op/qlinear_matmul.hpp | 25 - .../onnx/frontend/src/op/quantize_linear.cpp | 30 +- .../onnx/frontend/src/op/quantize_linear.hpp | 32 -- .../onnx/frontend/src/op/random_normal.cpp | 13 +- .../onnx/frontend/src/op/random_normal.hpp | 21 - .../frontend/src/op/random_normal_like.cpp | 13 +- .../frontend/src/op/random_normal_like.hpp | 21 - .../onnx/frontend/src/op/random_uniform.cpp | 14 +- .../onnx/frontend/src/op/random_uniform.hpp | 21 - .../frontend/src/op/random_uniform_like.cpp | 13 +- .../frontend/src/op/random_uniform_like.hpp | 21 - src/frontends/onnx/frontend/src/op/range.cpp | 14 +- src/frontends/onnx/frontend/src/op/range.hpp | 20 - .../onnx/frontend/src/op/reciprocal.cpp | 13 +- .../onnx/frontend/src/op/reciprocal.hpp | 20 - src/frontends/onnx/frontend/src/op/reduce.cpp | 86 ++- src/frontends/onnx/frontend/src/op/reduce.hpp | 98 ---- .../frontend/src/op/{relu.hpp => relu.cpp} | 16 +- .../onnx/frontend/src/op/reshape.cpp | 14 +- .../onnx/frontend/src/op/reshape.hpp | 27 - src/frontends/onnx/frontend/src/op/resize.cpp | 18 +- src/frontends/onnx/frontend/src/op/resize.hpp | 25 - .../onnx/frontend/src/op/reverse_sequence.cpp | 14 +- .../onnx/frontend/src/op/reverse_sequence.hpp | 20 - src/frontends/onnx/frontend/src/op/rnn.cpp | 13 +- src/frontends/onnx/frontend/src/op/rnn.hpp | 20 - .../onnx/frontend/src/op/roi_align.cpp | 19 +- .../onnx/frontend/src/op/roi_align.hpp | 25 - src/frontends/onnx/frontend/src/op/round.cpp | 12 +- src/frontends/onnx/frontend/src/op/round.hpp | 23 - src/frontends/onnx/frontend/src/op/scan.cpp | 18 +- src/frontends/onnx/frontend/src/op/scan.hpp | 32 -- .../onnx/frontend/src/op/scatter_elements.cpp | 20 +- .../onnx/frontend/src/op/scatter_elements.hpp | 20 - .../onnx/frontend/src/op/scatter_nd.cpp | 13 +- .../onnx/frontend/src/op/scatter_nd.hpp | 23 - src/frontends/onnx/frontend/src/op/selu.cpp | 14 +- src/frontends/onnx/frontend/src/op/selu.hpp | 20 - src/frontends/onnx/frontend/src/op/shape.cpp | 18 +- src/frontends/onnx/frontend/src/op/shape.hpp | 27 - src/frontends/onnx/frontend/src/op/shrink.cpp | 13 +- src/frontends/onnx/frontend/src/op/shrink.hpp | 25 - .../src/op/{sigmoid.hpp => sigmoid.cpp} | 16 +- .../frontend/src/op/{sign.hpp => sign.cpp} | 16 +- .../onnx/frontend/src/op/{sin.hpp => sin.cpp} | 16 +- .../frontend/src/op/{sinh.hpp => sinh.cpp} | 16 +- src/frontends/onnx/frontend/src/op/size.cpp | 13 +- src/frontends/onnx/frontend/src/op/size.hpp | 20 - src/frontends/onnx/frontend/src/op/slice.cpp | 19 +- src/frontends/onnx/frontend/src/op/slice.hpp | 25 - .../onnx/frontend/src/op/softmax.cpp | 24 +- .../onnx/frontend/src/op/softmax.hpp | 30 - .../onnx/frontend/src/op/softplus.cpp | 12 +- .../onnx/frontend/src/op/softplus.hpp | 20 - .../onnx/frontend/src/op/softsign.cpp | 12 +- .../onnx/frontend/src/op/softsign.hpp | 20 - .../onnx/frontend/src/op/space_to_depth.cpp | 14 +- .../onnx/frontend/src/op/space_to_depth.hpp | 25 - src/frontends/onnx/frontend/src/op/split.cpp | 19 +- src/frontends/onnx/frontend/src/op/split.hpp | 25 - .../frontend/src/op/{sqrt.hpp => sqrt.cpp} | 16 +- .../onnx/frontend/src/op/squeeze.cpp | 19 +- .../onnx/frontend/src/op/squeeze.hpp | 25 - src/frontends/onnx/frontend/src/op/stft.cpp | 13 +- src/frontends/onnx/frontend/src/op/stft.hpp | 20 - .../onnx/frontend/src/op/{sub.hpp => sub.cpp} | 23 +- .../onnx/frontend/src/op/{sum.hpp => sum.cpp} | 22 +- .../onnx/frontend/src/op/{tan.hpp => tan.cpp} | 16 +- .../frontend/src/op/{tanh.hpp => tanh.cpp} | 16 +- .../onnx/frontend/src/op/thresholded_relu.cpp | 13 +- .../onnx/frontend/src/op/thresholded_relu.hpp | 20 - src/frontends/onnx/frontend/src/op/tile.cpp | 14 +- src/frontends/onnx/frontend/src/op/tile.hpp | 24 - src/frontends/onnx/frontend/src/op/topk.cpp | 24 +- src/frontends/onnx/frontend/src/op/topk.hpp | 40 -- .../onnx/frontend/src/op/transpose.cpp | 13 +- .../onnx/frontend/src/op/transpose.hpp | 20 - src/frontends/onnx/frontend/src/op/trilu.cpp | 13 +- src/frontends/onnx/frontend/src/op/trilu.hpp | 18 - src/frontends/onnx/frontend/src/op/unique.cpp | 12 +- src/frontends/onnx/frontend/src/op/unique.hpp | 20 - .../onnx/frontend/src/op/unsqueeze.cpp | 19 +- .../onnx/frontend/src/op/unsqueeze.hpp | 25 - .../onnx/frontend/src/op/upsample.cpp | 23 +- .../onnx/frontend/src/op/upsample.hpp | 30 - .../frontend/src/op/{where.hpp => where.cpp} | 15 +- .../onnx/frontend/src/op/{xor.hpp => xor.cpp} | 15 +- .../onnx/frontend/src/ops_bridge.cpp | 532 ++---------------- .../onnx/frontend/src/ops_bridge.hpp | 3 - .../onnx/frontend/src/version_range.hpp | 3 - 334 files changed, 1759 insertions(+), 5317 deletions(-) rename src/frontends/onnx/frontend/src/op/{abs.hpp => abs.cpp} (53%) rename src/frontends/onnx/frontend/src/op/{acos.hpp => acos.cpp} (57%) rename src/frontends/onnx/frontend/src/op/{acosh.hpp => acosh.cpp} (56%) delete mode 100644 src/frontends/onnx/frontend/src/op/adaptive_avg_pooling2d.hpp delete mode 100644 src/frontends/onnx/frontend/src/op/add.hpp delete mode 100644 src/frontends/onnx/frontend/src/op/affine.hpp create mode 100644 src/frontends/onnx/frontend/src/op/and.cpp delete mode 100644 src/frontends/onnx/frontend/src/op/and.hpp delete mode 100644 src/frontends/onnx/frontend/src/op/argmax.hpp delete mode 100644 src/frontends/onnx/frontend/src/op/argmin.hpp rename src/frontends/onnx/frontend/src/op/{asin.hpp => asin.cpp} (57%) rename src/frontends/onnx/frontend/src/op/{asinh.hpp => asinh.cpp} (56%) rename src/frontends/onnx/frontend/src/op/{atan.hpp => atan.cpp} (57%) rename src/frontends/onnx/frontend/src/op/{atanh.hpp => atanh.cpp} (56%) delete mode 100644 src/frontends/onnx/frontend/src/op/aten.hpp delete mode 100644 src/frontends/onnx/frontend/src/op/average_pool.hpp delete mode 100644 src/frontends/onnx/frontend/src/op/batch_norm.hpp delete mode 100644 src/frontends/onnx/frontend/src/op/bitshift.hpp delete mode 100644 src/frontends/onnx/frontend/src/op/bitwise_and.hpp delete mode 100644 src/frontends/onnx/frontend/src/op/bitwise_not.hpp delete mode 100644 src/frontends/onnx/frontend/src/op/bitwise_or.hpp delete mode 100644 src/frontends/onnx/frontend/src/op/bitwise_xor.hpp delete mode 100644 src/frontends/onnx/frontend/src/op/blackmanwindow.hpp delete mode 100644 src/frontends/onnx/frontend/src/op/cast.hpp delete mode 100644 src/frontends/onnx/frontend/src/op/cast_like.hpp rename src/frontends/onnx/frontend/src/op/{ceil.hpp => ceil.cpp} (57%) delete mode 100644 src/frontends/onnx/frontend/src/op/celu.hpp delete mode 100644 src/frontends/onnx/frontend/src/op/clip.hpp create mode 100644 src/frontends/onnx/frontend/src/op/com.microsoft/aliases.cpp delete mode 100644 src/frontends/onnx/frontend/src/op/com.microsoft/attention.hpp delete mode 100644 src/frontends/onnx/frontend/src/op/com.microsoft/bias_gelu.hpp delete mode 100644 src/frontends/onnx/frontend/src/op/com.microsoft/embed_layer_normalization.hpp delete mode 100644 src/frontends/onnx/frontend/src/op/com.microsoft/fused_conv.hpp delete mode 100644 src/frontends/onnx/frontend/src/op/com.microsoft/fusedgemm.hpp delete mode 100644 src/frontends/onnx/frontend/src/op/com.microsoft/pad.hpp delete mode 100644 src/frontends/onnx/frontend/src/op/com.microsoft/skip_layer_normalization.hpp delete mode 100644 src/frontends/onnx/frontend/src/op/compress.hpp delete mode 100644 src/frontends/onnx/frontend/src/op/concat.hpp delete mode 100644 src/frontends/onnx/frontend/src/op/constant.hpp delete mode 100644 src/frontends/onnx/frontend/src/op/constant_fill.hpp delete mode 100644 src/frontends/onnx/frontend/src/op/constant_of_shape.hpp delete mode 100644 src/frontends/onnx/frontend/src/op/conv.hpp delete mode 100644 src/frontends/onnx/frontend/src/op/conv_integer.hpp delete mode 100644 src/frontends/onnx/frontend/src/op/conv_transpose.hpp delete mode 100644 src/frontends/onnx/frontend/src/op/cos.hpp delete mode 100644 src/frontends/onnx/frontend/src/op/cosh.hpp delete mode 100644 src/frontends/onnx/frontend/src/op/crop.hpp delete mode 100644 src/frontends/onnx/frontend/src/op/cum_sum.hpp delete mode 100644 src/frontends/onnx/frontend/src/op/depth_to_space.hpp delete mode 100644 src/frontends/onnx/frontend/src/op/dequantize_linear.hpp delete mode 100644 src/frontends/onnx/frontend/src/op/dft.hpp create mode 100644 src/frontends/onnx/frontend/src/op/div.cpp delete mode 100644 src/frontends/onnx/frontend/src/op/div.hpp delete mode 100644 src/frontends/onnx/frontend/src/op/dropout.hpp delete mode 100644 src/frontends/onnx/frontend/src/op/dynamic_quantize_linear.hpp delete mode 100644 src/frontends/onnx/frontend/src/op/einsum.hpp delete mode 100644 src/frontends/onnx/frontend/src/op/elu.hpp rename src/frontends/onnx/frontend/src/op/{equal.hpp => equal.cpp} (59%) rename src/frontends/onnx/frontend/src/op/{erf.hpp => erf.cpp} (57%) rename src/frontends/onnx/frontend/src/op/{exp.hpp => exp.cpp} (57%) delete mode 100644 src/frontends/onnx/frontend/src/op/expand.hpp delete mode 100644 src/frontends/onnx/frontend/src/op/eye_like.hpp delete mode 100644 src/frontends/onnx/frontend/src/op/flatten.hpp rename src/frontends/onnx/frontend/src/op/{floor.hpp => floor.cpp} (56%) rename src/frontends/onnx/frontend/src/op/{gather.hpp => gather.cpp} (73%) rename src/frontends/onnx/frontend/src/op/{gather_elements.hpp => gather_elements.cpp} (65%) delete mode 100644 src/frontends/onnx/frontend/src/op/gather_nd.hpp delete mode 100644 src/frontends/onnx/frontend/src/op/gelu.hpp delete mode 100644 src/frontends/onnx/frontend/src/op/gemm.hpp delete mode 100644 src/frontends/onnx/frontend/src/op/global_average_pool.hpp delete mode 100644 src/frontends/onnx/frontend/src/op/global_max_pool.hpp rename src/frontends/onnx/frontend/src/op/{greater.hpp => greater.cpp} (58%) delete mode 100644 src/frontends/onnx/frontend/src/op/greater_or_equal.hpp delete mode 100644 src/frontends/onnx/frontend/src/op/grid_sample.hpp delete mode 100644 src/frontends/onnx/frontend/src/op/group_normalization.hpp delete mode 100644 src/frontends/onnx/frontend/src/op/gru.hpp delete mode 100644 src/frontends/onnx/frontend/src/op/hammingwindow.hpp delete mode 100644 src/frontends/onnx/frontend/src/op/hannwindow.hpp delete mode 100644 src/frontends/onnx/frontend/src/op/hard_sigmoid.hpp rename src/frontends/onnx/frontend/src/op/{hard_swish.hpp => hard_swish.cpp} (55%) delete mode 100644 src/frontends/onnx/frontend/src/op/hardmax.hpp rename src/frontends/onnx/frontend/src/op/{identity.hpp => identity.cpp} (61%) delete mode 100644 src/frontends/onnx/frontend/src/op/if.hpp delete mode 100644 src/frontends/onnx/frontend/src/op/image_scaler.hpp delete mode 100644 src/frontends/onnx/frontend/src/op/instance_norm.hpp delete mode 100644 src/frontends/onnx/frontend/src/op/is_finite.hpp delete mode 100644 src/frontends/onnx/frontend/src/op/is_inf.hpp delete mode 100644 src/frontends/onnx/frontend/src/op/is_nan.hpp delete mode 100644 src/frontends/onnx/frontend/src/op/layer_normalization.hpp delete mode 100644 src/frontends/onnx/frontend/src/op/leaky_relu.hpp rename src/frontends/onnx/frontend/src/op/{less.hpp => less.cpp} (59%) delete mode 100644 src/frontends/onnx/frontend/src/op/less_or_equal.hpp delete mode 100644 src/frontends/onnx/frontend/src/op/log.hpp delete mode 100644 src/frontends/onnx/frontend/src/op/log_softmax.hpp delete mode 100644 src/frontends/onnx/frontend/src/op/loop.hpp delete mode 100644 src/frontends/onnx/frontend/src/op/lp_norm.hpp delete mode 100644 src/frontends/onnx/frontend/src/op/lp_pool.hpp delete mode 100644 src/frontends/onnx/frontend/src/op/lrn.hpp delete mode 100644 src/frontends/onnx/frontend/src/op/lstm.hpp rename src/frontends/onnx/frontend/src/op/{matmul.hpp => matmul.cpp} (57%) delete mode 100644 src/frontends/onnx/frontend/src/op/matmul_integer.hpp rename src/frontends/onnx/frontend/src/op/{max.hpp => max.cpp} (52%) delete mode 100644 src/frontends/onnx/frontend/src/op/max_pool.hpp delete mode 100644 src/frontends/onnx/frontend/src/op/max_roi_pool.hpp delete mode 100644 src/frontends/onnx/frontend/src/op/mean.hpp delete mode 100644 src/frontends/onnx/frontend/src/op/mean_variance_normalization.hpp rename src/frontends/onnx/frontend/src/op/{min.hpp => min.cpp} (50%) delete mode 100644 src/frontends/onnx/frontend/src/op/mish.hpp delete mode 100644 src/frontends/onnx/frontend/src/op/mmdeploy_roi_align_rotated.hpp delete mode 100644 src/frontends/onnx/frontend/src/op/mod.hpp rename src/frontends/onnx/frontend/src/op/{mul.hpp => mul.cpp} (50%) delete mode 100644 src/frontends/onnx/frontend/src/op/multinomial.hpp rename src/frontends/onnx/frontend/src/op/{neg.hpp => neg.cpp} (57%) rename src/frontends/onnx/frontend/src/op/{nms_rotated.hpp => nms_rotated.cpp} (82%) delete mode 100644 src/frontends/onnx/frontend/src/op/non_max_suppression.hpp delete mode 100644 src/frontends/onnx/frontend/src/op/non_zero.hpp rename src/frontends/onnx/frontend/src/op/{not.hpp => not.cpp} (56%) delete mode 100644 src/frontends/onnx/frontend/src/op/onehot.hpp rename src/frontends/onnx/frontend/src/op/{or.hpp => or.cpp} (58%) delete mode 100644 src/frontends/onnx/frontend/src/op/org.openvinotoolkit/deformable_conv_2d.hpp delete mode 100644 src/frontends/onnx/frontend/src/op/org.openvinotoolkit/detection_output.hpp delete mode 100644 src/frontends/onnx/frontend/src/op/org.openvinotoolkit/experimental_detectron/detection_output.hpp delete mode 100644 src/frontends/onnx/frontend/src/op/org.openvinotoolkit/experimental_detectron/generate_proposals_single_image.hpp delete mode 100644 src/frontends/onnx/frontend/src/op/org.openvinotoolkit/experimental_detectron/prior_grid_generator.hpp delete mode 100644 src/frontends/onnx/frontend/src/op/org.openvinotoolkit/experimental_detectron/roi_feature_extractor.hpp delete mode 100644 src/frontends/onnx/frontend/src/op/org.openvinotoolkit/experimental_detectron/topk_rios.hpp delete mode 100644 src/frontends/onnx/frontend/src/op/org.openvinotoolkit/fake_quantize.hpp delete mode 100644 src/frontends/onnx/frontend/src/op/org.openvinotoolkit/generate_proposals.hpp delete mode 100644 src/frontends/onnx/frontend/src/op/org.openvinotoolkit/group_norm.hpp delete mode 100644 src/frontends/onnx/frontend/src/op/org.openvinotoolkit/normalize.hpp delete mode 100644 src/frontends/onnx/frontend/src/op/org.openvinotoolkit/prior_box.hpp delete mode 100644 src/frontends/onnx/frontend/src/op/org.openvinotoolkit/swish.hpp delete mode 100644 src/frontends/onnx/frontend/src/op/pad.hpp delete mode 100644 src/frontends/onnx/frontend/src/op/pow.hpp delete mode 100644 src/frontends/onnx/frontend/src/op/prelu.hpp delete mode 100644 src/frontends/onnx/frontend/src/op/qlinear_conv.hpp delete mode 100644 src/frontends/onnx/frontend/src/op/qlinear_matmul.hpp delete mode 100644 src/frontends/onnx/frontend/src/op/quantize_linear.hpp delete mode 100644 src/frontends/onnx/frontend/src/op/random_normal.hpp delete mode 100644 src/frontends/onnx/frontend/src/op/random_normal_like.hpp delete mode 100644 src/frontends/onnx/frontend/src/op/random_uniform.hpp delete mode 100644 src/frontends/onnx/frontend/src/op/random_uniform_like.hpp delete mode 100644 src/frontends/onnx/frontend/src/op/range.hpp delete mode 100644 src/frontends/onnx/frontend/src/op/reciprocal.hpp delete mode 100644 src/frontends/onnx/frontend/src/op/reduce.hpp rename src/frontends/onnx/frontend/src/op/{relu.hpp => relu.cpp} (60%) delete mode 100644 src/frontends/onnx/frontend/src/op/reshape.hpp delete mode 100644 src/frontends/onnx/frontend/src/op/resize.hpp delete mode 100644 src/frontends/onnx/frontend/src/op/reverse_sequence.hpp delete mode 100644 src/frontends/onnx/frontend/src/op/rnn.hpp delete mode 100644 src/frontends/onnx/frontend/src/op/roi_align.hpp delete mode 100644 src/frontends/onnx/frontend/src/op/round.hpp delete mode 100644 src/frontends/onnx/frontend/src/op/scan.hpp delete mode 100644 src/frontends/onnx/frontend/src/op/scatter_elements.hpp delete mode 100644 src/frontends/onnx/frontend/src/op/scatter_nd.hpp delete mode 100644 src/frontends/onnx/frontend/src/op/selu.hpp delete mode 100644 src/frontends/onnx/frontend/src/op/shape.hpp delete mode 100644 src/frontends/onnx/frontend/src/op/shrink.hpp rename src/frontends/onnx/frontend/src/op/{sigmoid.hpp => sigmoid.cpp} (56%) rename src/frontends/onnx/frontend/src/op/{sign.hpp => sign.cpp} (57%) rename src/frontends/onnx/frontend/src/op/{sin.hpp => sin.cpp} (57%) rename src/frontends/onnx/frontend/src/op/{sinh.hpp => sinh.cpp} (57%) delete mode 100644 src/frontends/onnx/frontend/src/op/size.hpp delete mode 100644 src/frontends/onnx/frontend/src/op/slice.hpp delete mode 100644 src/frontends/onnx/frontend/src/op/softmax.hpp delete mode 100644 src/frontends/onnx/frontend/src/op/softplus.hpp delete mode 100644 src/frontends/onnx/frontend/src/op/softsign.hpp delete mode 100644 src/frontends/onnx/frontend/src/op/space_to_depth.hpp delete mode 100644 src/frontends/onnx/frontend/src/op/split.hpp rename src/frontends/onnx/frontend/src/op/{sqrt.hpp => sqrt.cpp} (57%) delete mode 100644 src/frontends/onnx/frontend/src/op/squeeze.hpp delete mode 100644 src/frontends/onnx/frontend/src/op/stft.hpp rename src/frontends/onnx/frontend/src/op/{sub.hpp => sub.cpp} (50%) rename src/frontends/onnx/frontend/src/op/{sum.hpp => sum.cpp} (52%) rename src/frontends/onnx/frontend/src/op/{tan.hpp => tan.cpp} (57%) rename src/frontends/onnx/frontend/src/op/{tanh.hpp => tanh.cpp} (57%) delete mode 100644 src/frontends/onnx/frontend/src/op/thresholded_relu.hpp delete mode 100644 src/frontends/onnx/frontend/src/op/tile.hpp delete mode 100644 src/frontends/onnx/frontend/src/op/topk.hpp delete mode 100644 src/frontends/onnx/frontend/src/op/transpose.hpp delete mode 100644 src/frontends/onnx/frontend/src/op/trilu.hpp delete mode 100644 src/frontends/onnx/frontend/src/op/unique.hpp delete mode 100644 src/frontends/onnx/frontend/src/op/unsqueeze.hpp delete mode 100644 src/frontends/onnx/frontend/src/op/upsample.hpp rename src/frontends/onnx/frontend/src/op/{where.hpp => where.cpp} (62%) rename src/frontends/onnx/frontend/src/op/{xor.hpp => xor.cpp} (67%) diff --git a/src/frontends/onnx/frontend/CMakeLists.txt b/src/frontends/onnx/frontend/CMakeLists.txt index 681fa778e2be12..0ceeec8f7606a3 100644 --- a/src/frontends/onnx/frontend/CMakeLists.txt +++ b/src/frontends/onnx/frontend/CMakeLists.txt @@ -2,6 +2,73 @@ # SPDX-License-Identifier: Apache-2.0 # +if(NOT BUILD_SHARED_LIBS) + file(GLOB_RECURSE op_list "src/op/*.cpp") + set(static_reg_file "src/static_reg.hpp") + file(WRITE ${static_reg_file} "// Copyright (C) 2018-2024 Intel Corporation\n// SPDX-License-Identifier: Apache-2.0\n// Auto generated file, DO NOT EDIT INLINE\n\n") + file(APPEND ${static_reg_file} "#include \"core/operator_set.hpp\"\n\n") + file(APPEND ${static_reg_file} "#define ONNX_DECL_OP(op) extern ov::OutputVector op(const Node&)\n\n") + file(APPEND ${static_reg_file} "namespace ov {\nnamespace frontend {\nnamespace onnx {\n") + foreach(src ${op_list}) + file(READ ${src} source_code) + string(REGEX MATCHALL "ONNX_OP([^;]+);" matches "${source_code}") + foreach(match ${matches}) + if(${match} MATCHES "([a-z0-9_]+)::([a-z0-9_]+)::([a-z0-9_]+)") + list(APPEND declarations ${CMAKE_MATCH_0}) + endif() + list(APPEND registrations ${match}) + endforeach() + endforeach() + list(APPEND declarations "com_microsoft::opset_1::register_multiple_translators") + list(APPEND registrations "com_microsoft::opset_1::register_multiple_translators()") + list(SORT declarations) + set(domain "") + set(opset "") + set(op_name, "") + foreach(decl ${declarations}) + string(REGEX MATCH "([a-z0-9_]+)::([a-z0-9_]+)::([a-z0-9_]+)" matches ${decl}) + if(NOT domain STREQUAL CMAKE_MATCH_1) + if(NOT opset STREQUAL "") + file(APPEND ${static_reg_file} "} // namespace ${opset}\n") + endif() + if(NOT domain STREQUAL "") + file(APPEND ${static_reg_file} "} // namespace ${domain}\n") + endif() + set(domain ${CMAKE_MATCH_1}) + set(opset "") + file(APPEND ${static_reg_file} "namespace ${domain} {\n") + endif() + if(NOT opset STREQUAL CMAKE_MATCH_2) + if(NOT opset STREQUAL "") + file(APPEND ${static_reg_file} "} // namespace ${opset}\n") + endif() + set(opset ${CMAKE_MATCH_2}) + file(APPEND ${static_reg_file} "namespace ${opset} {\n") + endif() + if(NOT op_name STREQUAL CMAKE_MATCH_3) + set(op_name ${CMAKE_MATCH_3}) + if(NOT op_name STREQUAL "register_multiple_translators") + file(APPEND ${static_reg_file} "ONNX_DECL_OP(${CMAKE_MATCH_3});\n") + else() + file(APPEND ${static_reg_file} "extern bool ${CMAKE_MATCH_3}(void);\n") + endif() + endif() + endforeach() + if(NOT opset STREQUAL "") + file(APPEND ${static_reg_file} "} // namespace ${opset}\n") + endif() + if(NOT domain STREQUAL "") + file(APPEND ${static_reg_file} "} // namespace ${domain}\n") + endif() + file(APPEND ${static_reg_file} "\nvoid static_lib_registration(void) {\n") + foreach(reg ${registrations}) + string(REPLACE "ONNX_OP(" "ONNX_OP_M(" reg ${reg}) + file(APPEND ${static_reg_file} " ${reg};\n") + endforeach() + file(APPEND ${static_reg_file} "}\n") + file(APPEND ${static_reg_file} "} // namespace onnx\n} // namespace frontend\n} // namespace ov\n#undef ONNX_DECL_OP\n") +endif() + ov_add_frontend(NAME onnx LINKABLE_FRONTEND PROTOBUF_REQUIRED @@ -13,6 +80,10 @@ ov_add_frontend(NAME onnx set(ONNX_OPSET_VERSION 20 CACHE INTERNAL "Supported version of ONNX operator set") target_compile_definitions(${TARGET_NAME} PRIVATE ONNX_OPSET_VERSION=${ONNX_OPSET_VERSION}) +if(BUILD_SHARED_LIBS) + target_compile_definitions(${TARGET_NAME} PRIVATE ONNX_BUILD_SHARED=1) +endif() + ov_ncc_naming_style(FOR_TARGET ${TARGET_NAME} SOURCE_DIRECTORIES "${${TARGET_NAME}_INCLUDE_DIR}" DEFINITIONS diff --git a/src/frontends/onnx/frontend/src/core/operator_set.hpp b/src/frontends/onnx/frontend/src/core/operator_set.hpp index e1d588e855d42c..bb7b6cbadc0013 100644 --- a/src/frontends/onnx/frontend/src/core/operator_set.hpp +++ b/src/frontends/onnx/frontend/src/core/operator_set.hpp @@ -5,21 +5,44 @@ #pragma once #include +#include #include #include #include "core/node.hpp" +#include "version_range.hpp" namespace ov { namespace frontend { namespace onnx { -/// \brief Function which transforms single ONNX operator to OV sub-graph. +/// \brief Function which transforms single ONNX operator to OV sub-graph. using Operator = std::function; /// \brief Map which contains ONNX operators accessible by std::string value as a key. using OperatorSet = std::unordered_map; +/// \brief Map with map of versioned operators, accessible like map["Operation"][Version] +using DomainOpset = std::unordered_map>; + +extern const char* OPENVINO_ONNX_DOMAIN; +extern const char* MICROSOFT_DOMAIN; +extern const char* PYTORCH_ATEN_DOMAIN; +extern const char* MMDEPLOY_DOMAIN; + +/// \brief Registering a versions range of translator in global map of translators (preferred to use) +extern bool register_translator(const std::string name, + const VersionRange range, + const Operator fn, + const std::string domain = ""); + +#define OPSET_RANGE(_in, _until) \ + VersionRange { _in, _until } +#define OPSET_SINCE(_since) VersionRange::since(_since) +#define OPSET_IN(_in) VersionRange::in(_in) +#define ONNX_OP_M(name, range, ...) register_translator(name, range, __VA_ARGS__) +#define ONNX_OP(name, range, ...) static bool onnx_op_reg = ONNX_OP_M(name, range, __VA_ARGS__) + } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/abs.hpp b/src/frontends/onnx/frontend/src/op/abs.cpp similarity index 53% rename from src/frontends/onnx/frontend/src/op/abs.hpp rename to src/frontends/onnx/frontend/src/op/abs.cpp index e62bd8ce37b6af..9c15956edfcc55 100644 --- a/src/frontends/onnx/frontend/src/op/abs.hpp +++ b/src/frontends/onnx/frontend/src/op/abs.cpp @@ -2,33 +2,33 @@ // SPDX-License-Identifier: Apache-2.0 // -#pragma once +#include "openvino/op/abs.hpp" -#include "core/node.hpp" +#include "core/operator_set.hpp" #include "exceptions.hpp" -#include "openvino/op/abs.hpp" namespace ov { namespace frontend { namespace onnx { -namespace op { -namespace set_1 { -inline ov::OutputVector abs(const ov::frontend::onnx::Node& node) { +namespace ai_onnx { +namespace opset_1 { +ov::OutputVector abs(const ov::frontend::onnx::Node& node) { CHECK_VALID_NODE(node, !node.has_attribute("consumed_inputs"), "consumed_inputs legacy attribute of Abs op is not supported"); return {std::make_shared(node.get_ov_inputs().at(0))}; } -} // namespace set_1 +ONNX_OP("Abs", OPSET_RANGE(1, 5), ai_onnx::opset_1::abs); +} // namespace opset_1 -namespace set_6 { -using set_1::abs; -} // namespace set_6 +namespace opset_6 { +ONNX_OP("Abs", OPSET_RANGE(6, 12), ai_onnx::opset_1::abs); +} // namespace opset_6 -namespace set_13 { -using set_6::abs; -} // namespace set_13 -} // namespace op +namespace opset_13 { +ONNX_OP("Abs", OPSET_SINCE(13), ai_onnx::opset_1::abs); +} // namespace opset_13 +} // namespace ai_onnx } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/acos.hpp b/src/frontends/onnx/frontend/src/op/acos.cpp similarity index 57% rename from src/frontends/onnx/frontend/src/op/acos.hpp rename to src/frontends/onnx/frontend/src/op/acos.cpp index d26956409233a3..4d1eda245a2ac0 100644 --- a/src/frontends/onnx/frontend/src/op/acos.hpp +++ b/src/frontends/onnx/frontend/src/op/acos.cpp @@ -2,21 +2,21 @@ // SPDX-License-Identifier: Apache-2.0 // -#pragma once - -#include "core/node.hpp" #include "openvino/op/acos.hpp" +#include "core/operator_set.hpp" + namespace ov { namespace frontend { namespace onnx { -namespace op { -namespace set_7 { -inline ov::OutputVector acos(const ov::frontend::onnx::Node& node) { +namespace ai_onnx { +namespace opset_7 { +ov::OutputVector acos(const ov::frontend::onnx::Node& node) { return {std::make_shared(node.get_ov_inputs().at(0))}; } -} // namespace set_7 -} // namespace op +ONNX_OP("Acos", OPSET_SINCE(1), ai_onnx::opset_7::acos); +} // namespace opset_7 +} // namespace ai_onnx } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/acosh.hpp b/src/frontends/onnx/frontend/src/op/acosh.cpp similarity index 56% rename from src/frontends/onnx/frontend/src/op/acosh.hpp rename to src/frontends/onnx/frontend/src/op/acosh.cpp index 3ff0c982a98cb5..e91d2bdd8b28b2 100644 --- a/src/frontends/onnx/frontend/src/op/acosh.hpp +++ b/src/frontends/onnx/frontend/src/op/acosh.cpp @@ -2,21 +2,21 @@ // SPDX-License-Identifier: Apache-2.0 // -#pragma once - -#include "core/node.hpp" #include "openvino/op/acosh.hpp" +#include "core/operator_set.hpp" + namespace ov { namespace frontend { namespace onnx { -namespace op { -namespace set_9 { -inline ov::OutputVector acosh(const ov::frontend::onnx::Node& node) { +namespace ai_onnx { +namespace opset_9 { +ov::OutputVector acosh(const ov::frontend::onnx::Node& node) { return {std::make_shared(node.get_ov_inputs().at(0))}; } -} // namespace set_9 -} // namespace op +ONNX_OP("Acosh", OPSET_SINCE(1), ai_onnx::opset_9::acosh); +} // namespace opset_9 +} // namespace ai_onnx } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/adaptive_avg_pooling2d.cpp b/src/frontends/onnx/frontend/src/op/adaptive_avg_pooling2d.cpp index c7e260e50eafc5..5f31c65e8a6cf1 100644 --- a/src/frontends/onnx/frontend/src/op/adaptive_avg_pooling2d.cpp +++ b/src/frontends/onnx/frontend/src/op/adaptive_avg_pooling2d.cpp @@ -2,8 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "op/adaptive_avg_pooling2d.hpp" - +#include "core/operator_set.hpp" #include "exceptions.hpp" #include "openvino/op/adaptive_avg_pool.hpp" @@ -12,8 +11,8 @@ using namespace ov::op; namespace ov { namespace frontend { namespace onnx { -namespace op { -namespace set_1 { +namespace ai_onnx { +namespace opset_1 { ov::OutputVector adaptive_avg_pooling2d(const ov::frontend::onnx::Node& node) { const auto inputs = node.get_ov_inputs(); const auto num_inputs = inputs.size(); @@ -22,9 +21,9 @@ ov::OutputVector adaptive_avg_pooling2d(const ov::frontend::onnx::Node& node) { return {std::make_shared(inputs[0], inputs[1])}; } - -} // namespace set_1 -} // namespace op +ONNX_OP("adaptive_avg_pool2d", OPSET_SINCE(1), ai_onnx::opset_1::adaptive_avg_pooling2d, PYTORCH_ATEN_DOMAIN); +} // namespace opset_1 +} // namespace ai_onnx } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/adaptive_avg_pooling2d.hpp b/src/frontends/onnx/frontend/src/op/adaptive_avg_pooling2d.hpp deleted file mode 100644 index b7c3fc8b9b06c7..00000000000000 --- a/src/frontends/onnx/frontend/src/op/adaptive_avg_pooling2d.hpp +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright (C) 2022 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// -#pragma once - -#include "core/node.hpp" - -namespace ov { -namespace frontend { -namespace onnx { -namespace op { -namespace set_1 { -ov::OutputVector adaptive_avg_pooling2d(const ov::frontend::onnx::Node& node); -} // namespace set_1 -} // namespace op -} // namespace onnx -} // namespace frontend -} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/add.cpp b/src/frontends/onnx/frontend/src/op/add.cpp index e6dc71e5081d89..bc7b24736930d3 100644 --- a/src/frontends/onnx/frontend/src/op/add.cpp +++ b/src/frontends/onnx/frontend/src/op/add.cpp @@ -2,10 +2,10 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "op/add.hpp" +#include "openvino/op/add.hpp" +#include "core/operator_set.hpp" #include "exceptions.hpp" -#include "openvino/op/add.hpp" #include "utils/common.hpp" using namespace ov::op; @@ -13,28 +13,40 @@ using namespace ov::op; namespace ov { namespace frontend { namespace onnx { -namespace op { -namespace set_1 { +namespace ai_onnx { +namespace opset_1 { ov::OutputVector add(const ov::frontend::onnx::Node& node) { CHECK_VALID_NODE(node, !node.has_attribute("consumed_inputs"), "consumed_inputs legacy attribute of Add op is not supported"); return common::handle_opset6_binary_op(node); } -} // namespace set_1 +ONNX_OP("Add", OPSET_RANGE(1, 5), ai_onnx::opset_1::add); +} // namespace opset_1 -namespace set_6 { +namespace opset_6 { ov::OutputVector add(const ov::frontend::onnx::Node& node) { return common::handle_opset6_binary_op(node); } -} // namespace set_6 +ONNX_OP("Add", OPSET_IN(6), ai_onnx::opset_6::add); +} // namespace opset_6 -namespace set_7 { +namespace opset_7 { ov::OutputVector add(const ov::frontend::onnx::Node& node) { return {std::make_shared(node.get_ov_inputs().at(0), node.get_ov_inputs().at(1))}; } -} // namespace set_7 -} // namespace op +ONNX_OP("Add", OPSET_RANGE(7, 12), ai_onnx::opset_7::add); +} // namespace opset_7 + +namespace opset_13 { +ONNX_OP("Add", OPSET_IN(13), ai_onnx::opset_7::add); +} // namespace opset_13 + +namespace opset_14 { +ONNX_OP("Add", OPSET_SINCE(14), ai_onnx::opset_7::add); +} // namespace opset_14 + +} // namespace ai_onnx } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/add.hpp b/src/frontends/onnx/frontend/src/op/add.hpp deleted file mode 100644 index 15ad3515473bfe..00000000000000 --- a/src/frontends/onnx/frontend/src/op/add.hpp +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright (C) 2018-2024 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "core/node.hpp" - -namespace ov { -namespace frontend { -namespace onnx { -namespace op { -namespace set_1 { -ov::OutputVector add(const ov::frontend::onnx::Node& node); - -} // namespace set_1 - -namespace set_6 { -ov::OutputVector add(const ov::frontend::onnx::Node& node); - -} // namespace set_6 - -namespace set_7 { -ov::OutputVector add(const ov::frontend::onnx::Node& node); - -} // namespace set_7 - -namespace set_13 { -using set_7::add; -} // namespace set_13 - -namespace set_14 { -using set_13::add; -} // namespace set_14 -} // namespace op -} // namespace onnx -} // namespace frontend -} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/affine.cpp b/src/frontends/onnx/frontend/src/op/affine.cpp index 8afa58ce87efa1..fd56caa3566c81 100644 --- a/src/frontends/onnx/frontend/src/op/affine.cpp +++ b/src/frontends/onnx/frontend/src/op/affine.cpp @@ -2,19 +2,17 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "op/affine.hpp" - +#include "core/operator_set.hpp" #include "exceptions.hpp" #include "openvino/op/add.hpp" #include "openvino/op/multiply.hpp" - using namespace ov::op; namespace ov { namespace frontend { namespace onnx { -namespace op { -namespace set_1 { +namespace ai_onnx { +namespace opset_1 { ov::OutputVector affine(const ov::frontend::onnx::Node& node) { // Affine is an obsolete experimental ONNX operation. // It takes one input tensor and produces one output tensor where @@ -33,8 +31,9 @@ ov::OutputVector affine(const ov::frontend::onnx::Node& node) { return {std::make_shared(std::make_shared(data, alpha_const), beta_const)}; } -} // namespace set_1 -} // namespace op +ONNX_OP("Affine", OPSET_SINCE(1), ai_onnx::opset_1::affine); +} // namespace opset_1 +} // namespace ai_onnx } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/affine.hpp b/src/frontends/onnx/frontend/src/op/affine.hpp deleted file mode 100644 index 279edcdb378ac1..00000000000000 --- a/src/frontends/onnx/frontend/src/op/affine.hpp +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright (C) 2018-2024 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "core/node.hpp" - -namespace ov { -namespace frontend { -namespace onnx { -namespace op { -namespace set_1 { -ov::OutputVector affine(const ov::frontend::onnx::Node& node); - -} // namespace set_1 -} // namespace op -} // namespace onnx -} // namespace frontend -} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/and.cpp b/src/frontends/onnx/frontend/src/op/and.cpp new file mode 100644 index 00000000000000..2595617dee5381 --- /dev/null +++ b/src/frontends/onnx/frontend/src/op/and.cpp @@ -0,0 +1,29 @@ +// Copyright (C) 2018-2024 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "core/operator_set.hpp" +#include "openvino/op/logical_and.hpp" +#include "utils/common.hpp" + +namespace ov { +namespace frontend { +namespace onnx { +namespace ai_onnx { +namespace opset_1 { +ov::OutputVector logical_and(const ov::frontend::onnx::Node& node) { + return common::handle_opset6_binary_op(node); +} +ONNX_OP("And", OPSET_RANGE(1, 6), ai_onnx::opset_1::logical_and); +} // namespace opset_1 + +namespace opset_7 { +ov::OutputVector logical_and(const ov::frontend::onnx::Node& node) { + return {std::make_shared(node.get_ov_inputs().at(0), node.get_ov_inputs().at(1))}; +} +ONNX_OP("And", OPSET_SINCE(7), ai_onnx::opset_7::logical_and); +} // namespace opset_7 +} // namespace ai_onnx +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/and.hpp b/src/frontends/onnx/frontend/src/op/and.hpp deleted file mode 100644 index d5f78c1d89ca22..00000000000000 --- a/src/frontends/onnx/frontend/src/op/and.hpp +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright (C) 2018-2024 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "core/node.hpp" -#include "openvino/op/logical_and.hpp" - -namespace ov { -namespace frontend { -namespace onnx { -namespace op { -namespace set_1 { -inline ov::OutputVector logical_and(const ov::frontend::onnx::Node& node) { - return common::handle_opset6_binary_op(node); -} -} // namespace set_1 - -namespace set_7 { -inline ov::OutputVector logical_and(const ov::frontend::onnx::Node& node) { - return {std::make_shared(node.get_ov_inputs().at(0), node.get_ov_inputs().at(1))}; -} -} // namespace set_7 -} // namespace op -} // namespace onnx -} // namespace frontend -} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/argmax.cpp b/src/frontends/onnx/frontend/src/op/argmax.cpp index 2ecc83892b2888..8b7c0da4c0727f 100644 --- a/src/frontends/onnx/frontend/src/op/argmax.cpp +++ b/src/frontends/onnx/frontend/src/op/argmax.cpp @@ -2,31 +2,31 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "op/argmax.hpp" - +#include "core/operator_set.hpp" #include "exceptions.hpp" #include "utils/arg_min_max_factory.hpp" - namespace ov { namespace frontend { namespace onnx { -namespace op { -namespace set_1 { +namespace ai_onnx { +namespace opset_1 { ov::OutputVector argmax(const ov::frontend::onnx::Node& node) { const utils::ArgMinMaxFactory arg_factory(node); return {arg_factory.make_arg_max()}; } -} // namespace set_1 +ONNX_OP("ArgMax", OPSET_RANGE(1, 11), ai_onnx::opset_1::argmax); +} // namespace opset_1 -namespace set_12 { +namespace opset_12 { ov::OutputVector argmax(const ov::frontend::onnx::Node& node) { const utils::ArgMinMaxFactory arg_factory(node); return {arg_factory.make_arg_max()}; } -} // namespace set_12 -} // namespace op +ONNX_OP("ArgMax", OPSET_SINCE(12), ai_onnx::opset_12::argmax); +} // namespace opset_12 +} // namespace ai_onnx } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/argmax.hpp b/src/frontends/onnx/frontend/src/op/argmax.hpp deleted file mode 100644 index f3c6bbcf56a3f8..00000000000000 --- a/src/frontends/onnx/frontend/src/op/argmax.hpp +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright (C) 2018-2024 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "core/node.hpp" - -namespace ov { -namespace frontend { -namespace onnx { -namespace op { -namespace set_1 { -/// \brief Convert ONNX ArgMax operation to an OV node. -/// -/// \param node The ONNX node object representing this operation. -/// -/// \return The vector containing an OV node which produces the output -/// of an ONNX ArgMax operation. -ov::OutputVector argmax(const ov::frontend::onnx::Node& node); - -} // namespace set_1 - -namespace set_12 { -/// \brief Convert ONNX ArgMax operation to an OV node. -/// -/// \param node The ONNX node object representing this operation. -/// -/// \return The vector containing an OV node which produces the output -/// of an ONNX ArgMax operation. -ov::OutputVector argmax(const ov::frontend::onnx::Node& node); - -} // namespace set_12 -} // namespace op -} // namespace onnx -} // namespace frontend -} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/argmin.cpp b/src/frontends/onnx/frontend/src/op/argmin.cpp index 79aa05d587e51d..694363bb8bcffb 100644 --- a/src/frontends/onnx/frontend/src/op/argmin.cpp +++ b/src/frontends/onnx/frontend/src/op/argmin.cpp @@ -2,31 +2,31 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "op/argmin.hpp" - +#include "core/operator_set.hpp" #include "exceptions.hpp" #include "utils/arg_min_max_factory.hpp" - namespace ov { namespace frontend { namespace onnx { -namespace op { -namespace set_1 { +namespace ai_onnx { +namespace opset_1 { ov::OutputVector argmin(const ov::frontend::onnx::Node& node) { const utils::ArgMinMaxFactory arg_factory(node); return {arg_factory.make_arg_min()}; } -} // namespace set_1 +ONNX_OP("ArgMin", {1, 11}, ai_onnx::opset_1::argmin); +} // namespace opset_1 -namespace set_12 { +namespace opset_12 { ov::OutputVector argmin(const ov::frontend::onnx::Node& node) { const utils::ArgMinMaxFactory arg_factory(node); return {arg_factory.make_arg_min()}; } -} // namespace set_12 -} // namespace op +ONNX_OP("ArgMin", OPSET_SINCE(12), ai_onnx::opset_12::argmin); +} // namespace opset_12 +} // namespace ai_onnx } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/argmin.hpp b/src/frontends/onnx/frontend/src/op/argmin.hpp deleted file mode 100644 index 7dd8d358c93341..00000000000000 --- a/src/frontends/onnx/frontend/src/op/argmin.hpp +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright (C) 2018-2024 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "core/node.hpp" - -namespace ov { -namespace frontend { -namespace onnx { -namespace op { -namespace set_1 { -/// \brief Convert ONNX ArgMin operation to an OV node. -/// -/// \param node The ONNX node object representing this operation. -/// -/// \return The vector containing an OV node which produces the output -/// of an ONNX ArgMin operation. -ov::OutputVector argmin(const ov::frontend::onnx::Node& node); - -} // namespace set_1 - -namespace set_12 { -/// \brief Convert ONNX ArgMin operation to an OV node. -/// -/// \param node The ONNX node object representing this operation. -/// -/// \return The vector containing an OV node which produces the output -/// of an ONNX ArgMax operation. -ov::OutputVector argmin(const ov::frontend::onnx::Node& node); - -} // namespace set_12 -} // namespace op -} // namespace onnx -} // namespace frontend -} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/asin.hpp b/src/frontends/onnx/frontend/src/op/asin.cpp similarity index 57% rename from src/frontends/onnx/frontend/src/op/asin.hpp rename to src/frontends/onnx/frontend/src/op/asin.cpp index 9dcfdfbc3ccc6a..aa077e418c49e0 100644 --- a/src/frontends/onnx/frontend/src/op/asin.hpp +++ b/src/frontends/onnx/frontend/src/op/asin.cpp @@ -2,21 +2,21 @@ // SPDX-License-Identifier: Apache-2.0 // -#pragma once - -#include "core/node.hpp" #include "openvino/op/asin.hpp" +#include "core/operator_set.hpp" + namespace ov { namespace frontend { namespace onnx { -namespace op { -namespace set_1 { -inline ov::OutputVector asin(const ov::frontend::onnx::Node& node) { +namespace ai_onnx { +namespace opset_1 { +ov::OutputVector asin(const ov::frontend::onnx::Node& node) { return {std::make_shared(node.get_ov_inputs().at(0))}; } -} // namespace set_1 -} // namespace op +ONNX_OP("Asin", OPSET_SINCE(1), ai_onnx::opset_1::asin); +} // namespace opset_1 +} // namespace ai_onnx } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/asinh.hpp b/src/frontends/onnx/frontend/src/op/asinh.cpp similarity index 56% rename from src/frontends/onnx/frontend/src/op/asinh.hpp rename to src/frontends/onnx/frontend/src/op/asinh.cpp index 43cae5e5da9b87..9772df72708b4d 100644 --- a/src/frontends/onnx/frontend/src/op/asinh.hpp +++ b/src/frontends/onnx/frontend/src/op/asinh.cpp @@ -2,21 +2,21 @@ // SPDX-License-Identifier: Apache-2.0 // -#pragma once - -#include "core/node.hpp" #include "openvino/op/asinh.hpp" +#include "core/operator_set.hpp" + namespace ov { namespace frontend { namespace onnx { -namespace op { -namespace set_1 { -inline ov::OutputVector asinh(const ov::frontend::onnx::Node& node) { +namespace ai_onnx { +namespace opset_1 { +ov::OutputVector asinh(const ov::frontend::onnx::Node& node) { return {std::make_shared(node.get_ov_inputs().at(0))}; } -} // namespace set_1 -} // namespace op +ONNX_OP("Asinh", OPSET_SINCE(1), ai_onnx::opset_1::asinh); +} // namespace opset_1 +} // namespace ai_onnx } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/atan.hpp b/src/frontends/onnx/frontend/src/op/atan.cpp similarity index 57% rename from src/frontends/onnx/frontend/src/op/atan.hpp rename to src/frontends/onnx/frontend/src/op/atan.cpp index 9c5e79dd7c632d..c425e7b40669be 100644 --- a/src/frontends/onnx/frontend/src/op/atan.hpp +++ b/src/frontends/onnx/frontend/src/op/atan.cpp @@ -2,21 +2,21 @@ // SPDX-License-Identifier: Apache-2.0 // -#pragma once - -#include "core/node.hpp" #include "openvino/op/atan.hpp" +#include "core/operator_set.hpp" + namespace ov { namespace frontend { namespace onnx { -namespace op { -namespace set_1 { -inline ov::OutputVector atan(const ov::frontend::onnx::Node& node) { +namespace ai_onnx { +namespace opset_1 { +ov::OutputVector atan(const ov::frontend::onnx::Node& node) { return {std::make_shared(node.get_ov_inputs().at(0))}; } -} // namespace set_1 -} // namespace op +ONNX_OP("Atan", OPSET_SINCE(1), ai_onnx::opset_1::atan); +} // namespace opset_1 +} // namespace ai_onnx } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/atanh.hpp b/src/frontends/onnx/frontend/src/op/atanh.cpp similarity index 56% rename from src/frontends/onnx/frontend/src/op/atanh.hpp rename to src/frontends/onnx/frontend/src/op/atanh.cpp index 755689384857bf..b9342d87592ed8 100644 --- a/src/frontends/onnx/frontend/src/op/atanh.hpp +++ b/src/frontends/onnx/frontend/src/op/atanh.cpp @@ -2,21 +2,21 @@ // SPDX-License-Identifier: Apache-2.0 // -#pragma once - -#include "core/node.hpp" #include "openvino/op/atanh.hpp" +#include "core/operator_set.hpp" + namespace ov { namespace frontend { namespace onnx { -namespace op { -namespace set_1 { -inline ov::OutputVector atanh(const ov::frontend::onnx::Node& node) { +namespace ai_onnx { +namespace opset_1 { +ov::OutputVector atanh(const ov::frontend::onnx::Node& node) { return {std::make_shared(node.get_ov_inputs().at(0))}; } -} // namespace set_1 -} // namespace op +ONNX_OP("Atanh", OPSET_SINCE(1), ai_onnx::opset_1::atanh); +} // namespace opset_1 +} // namespace ai_onnx } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/aten.cpp b/src/frontends/onnx/frontend/src/op/aten.cpp index b357d43fa116e8..11265902f7af06 100644 --- a/src/frontends/onnx/frontend/src/op/aten.cpp +++ b/src/frontends/onnx/frontend/src/op/aten.cpp @@ -2,9 +2,8 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "op/aten.hpp" - #include "core/null_node.hpp" +#include "core/operator_set.hpp" #include "exceptions.hpp" #include "openvino/op/broadcast.hpp" #include "openvino/op/concat.hpp" @@ -16,14 +15,13 @@ #include "openvino/op/squeeze.hpp" #include "openvino/op/unsqueeze.hpp" #include "openvino/opsets/opset8.hpp" - using namespace ov::op; namespace ov { namespace frontend { namespace onnx { -namespace op { -namespace set_1 { +namespace ai_onnx { +namespace opset_1 { ov::OutputVector aten(const ov::frontend::onnx::Node& node) { ov::OutputVector inputs{node.get_ov_inputs()}; @@ -96,8 +94,9 @@ ov::OutputVector aten(const ov::frontend::onnx::Node& node) { return ov::OutputVector(node.get_outputs_size(), embedding_bag); } -} // namespace set_1 -} // namespace op +ONNX_OP("ATen", OPSET_SINCE(1), ai_onnx::opset_1::aten); +} // namespace opset_1 +} // namespace ai_onnx } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/aten.hpp b/src/frontends/onnx/frontend/src/op/aten.hpp deleted file mode 100644 index 61b3f1e8c678ca..00000000000000 --- a/src/frontends/onnx/frontend/src/op/aten.hpp +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright (C) 2018-2024 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "core/node.hpp" - -namespace ov { -namespace frontend { -namespace onnx { -namespace op { -namespace set_1 { - -ov::OutputVector aten(const ov::frontend::onnx::Node& node); - -} // namespace set_1 -} // namespace op -} // namespace onnx -} // namespace frontend -} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/average_pool.cpp b/src/frontends/onnx/frontend/src/op/average_pool.cpp index f9bcd5a209c694..03750f4457d39c 100644 --- a/src/frontends/onnx/frontend/src/op/average_pool.cpp +++ b/src/frontends/onnx/frontend/src/op/average_pool.cpp @@ -2,21 +2,20 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "op/average_pool.hpp" - +#include "core/operator_set.hpp" #include "utils/pooling_factory.hpp" - namespace ov { namespace frontend { namespace onnx { -namespace op { -namespace set_1 { +namespace ai_onnx { +namespace opset_1 { ov::OutputVector average_pool(const ov::frontend::onnx::Node& node) { return pooling::PoolingFactory(node).make_avg_pool(); } -} // namespace set_1 -} // namespace op +ONNX_OP("AveragePool", OPSET_SINCE(1), ai_onnx::opset_1::average_pool); +} // namespace opset_1 +} // namespace ai_onnx } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/average_pool.hpp b/src/frontends/onnx/frontend/src/op/average_pool.hpp deleted file mode 100644 index 7078933d74f6cd..00000000000000 --- a/src/frontends/onnx/frontend/src/op/average_pool.hpp +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright (C) 2018-2024 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "core/node.hpp" - -namespace ov { -namespace frontend { -namespace onnx { -namespace op { -namespace set_1 { -/// \brief Convert ONNX AveragePool operation to an OV node. -/// -/// \param node The ONNX node object representing this operation. -/// -/// \return The vector containing OV nodes producing output of ONNX AveragePool -/// operation. -ov::OutputVector average_pool(const ov::frontend::onnx::Node& node); - -} // namespace set_1 -} // namespace op -} // namespace onnx -} // namespace frontend -} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/batch_norm.cpp b/src/frontends/onnx/frontend/src/op/batch_norm.cpp index fc4a3c2a4fd9d1..ff419acae3e8a4 100644 --- a/src/frontends/onnx/frontend/src/op/batch_norm.cpp +++ b/src/frontends/onnx/frontend/src/op/batch_norm.cpp @@ -2,22 +2,21 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "op/batch_norm.hpp" +#include "openvino/op/batch_norm.hpp" #include #include #include "core/null_node.hpp" +#include "core/operator_set.hpp" #include "exceptions.hpp" -#include "openvino/op/batch_norm.hpp" - using namespace ov::op; namespace ov { namespace frontend { namespace onnx { -namespace op { -namespace set_1 { +namespace ai_onnx { +namespace opset_1 { // This version supports ONNX BatchNormalization-1 and BatchNormalization-6 ov::OutputVector batch_norm(const ov::frontend::onnx::Node& node) { ov::OutputVector inputs{node.get_ov_inputs()}; @@ -51,7 +50,8 @@ ov::OutputVector batch_norm(const ov::frontend::onnx::Node& node) { OPENVINO_THROW("Cannot create OpenVINO batch norm with unsupported number of inputs"); } -} // namespace set_1 +ONNX_OP("BatchNormalization", OPSET_RANGE(1, 6), ai_onnx::opset_1::batch_norm); +} // namespace opset_1 /* Opset 6 is skipped because there are no significant difference between opset1 and opset6. Found difference is: @@ -59,7 +59,7 @@ ov::OutputVector batch_norm(const ov::frontend::onnx::Node& node) { to avoid overflow for float16 inputs. */ -namespace set_7 { +namespace opset_7 { // This version supports ONNX BatchNormalization-7 and BatchNormalization-9 ov::OutputVector batch_norm(const ov::frontend::onnx::Node& node) { ov::OutputVector inputs{node.get_ov_inputs()}; @@ -77,7 +77,8 @@ ov::OutputVector batch_norm(const ov::frontend::onnx::Node& node) { return {std::make_shared(x, scale, bias, mean, var, epsilon)}; } -} // namespace set_7 +ONNX_OP("BatchNormalization", OPSET_RANGE(7, 13), ai_onnx::opset_7::batch_norm); +} // namespace opset_7 /* Opset 9 is skipped because there are no significant difference between opset7 and opset9. Found difference is: @@ -87,7 +88,7 @@ ov::OutputVector batch_norm(const ov::frontend::onnx::Node& node) { */ -namespace set_14 { +namespace opset_14 { // This version supports ONNX BatchNormalization-14 BatchNormalization-15 ov::OutputVector batch_norm(const ov::frontend::onnx::Node& node) { ov::OutputVector inputs{node.get_ov_inputs()}; @@ -105,7 +106,8 @@ ov::OutputVector batch_norm(const ov::frontend::onnx::Node& node) { "Training mode of BatchNormalization is not supported."); return {std::make_shared(x, scale, bias, mean, var, epsilon)}; } -} // namespace set_14 +ONNX_OP("BatchNormalization", OPSET_SINCE(14), ai_onnx::opset_14::batch_norm); +} // namespace opset_14 /* Opset 15 is skipped because there are no significant difference between opset14 and opset15. Found difference is: @@ -113,7 +115,7 @@ ov::OutputVector batch_norm(const ov::frontend::onnx::Node& node) { to avoid overflow for float16 inputs. */ -} // namespace op +} // namespace ai_onnx } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/batch_norm.hpp b/src/frontends/onnx/frontend/src/op/batch_norm.hpp deleted file mode 100644 index 29a79d444152d2..00000000000000 --- a/src/frontends/onnx/frontend/src/op/batch_norm.hpp +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright (C) 2018-2024 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "core/node.hpp" - -namespace ov { -namespace frontend { -namespace onnx { -namespace op { -namespace set_1 { -ov::OutputVector batch_norm(const ov::frontend::onnx::Node& node); - -} // namespace set_1 - -namespace set_7 { -ov::OutputVector batch_norm(const ov::frontend::onnx::Node& node); - -} // namespace set_7 - -namespace set_14 { -ov::OutputVector batch_norm(const ov::frontend::onnx::Node& node); - -} // namespace set_14 -} // namespace op -} // namespace onnx -} // namespace frontend -} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/bitshift.cpp b/src/frontends/onnx/frontend/src/op/bitshift.cpp index 63b5b37304d4af..40aaf89708eddc 100644 --- a/src/frontends/onnx/frontend/src/op/bitshift.cpp +++ b/src/frontends/onnx/frontend/src/op/bitshift.cpp @@ -2,21 +2,19 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "op/bitshift.hpp" - +#include "core/operator_set.hpp" #include "exceptions.hpp" #include "openvino/op/constant.hpp" #include "openvino/op/divide.hpp" #include "openvino/op/multiply.hpp" #include "openvino/op/power.hpp" - using namespace ov::op; namespace ov { namespace frontend { namespace onnx { -namespace op { -namespace set_1 { +namespace ai_onnx { +namespace opset_1 { ov::OutputVector bitshift(const ov::frontend::onnx::Node& node) { const ov::Output input_x = node.get_ov_inputs().at(0); const ov::Output input_y = node.get_ov_inputs().at(1); @@ -41,8 +39,9 @@ ov::OutputVector bitshift(const ov::frontend::onnx::Node& node) { } } -} // namespace set_1 -} // namespace op +ONNX_OP("BitShift", OPSET_SINCE(1), ai_onnx::opset_1::bitshift); +} // namespace opset_1 +} // namespace ai_onnx } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/bitshift.hpp b/src/frontends/onnx/frontend/src/op/bitshift.hpp deleted file mode 100644 index ce8438b920fe1b..00000000000000 --- a/src/frontends/onnx/frontend/src/op/bitshift.hpp +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright (C) 2018-2024 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "core/node.hpp" - -namespace ov { -namespace frontend { -namespace onnx { -namespace op { -namespace set_1 { -ov::OutputVector bitshift(const ov::frontend::onnx::Node& node); - -} // namespace set_1 -} // namespace op -} // namespace onnx -} // namespace frontend -} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/bitwise_and.cpp b/src/frontends/onnx/frontend/src/op/bitwise_and.cpp index 1062dac425b569..f640425b8d0cf2 100644 --- a/src/frontends/onnx/frontend/src/op/bitwise_and.cpp +++ b/src/frontends/onnx/frontend/src/op/bitwise_and.cpp @@ -2,24 +2,24 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "op/bitwise_and.hpp" - #include "openvino/op/bitwise_and.hpp" +#include "core/operator_set.hpp" using namespace ov::op; namespace ov { namespace frontend { namespace onnx { -namespace op { -namespace set_1 { +namespace ai_onnx { +namespace opset_1 { ov::OutputVector bitwise_and(const ov::frontend::onnx::Node& node) { const auto inputs = node.get_ov_inputs(); OPENVINO_ASSERT(inputs.size() == 2); return {std::make_shared(inputs[0], inputs[1])}; } -} // namespace set_1 -} // namespace op +ONNX_OP("BitwiseAnd", OPSET_SINCE(1), ai_onnx::opset_1::bitwise_and); +} // namespace opset_1 +} // namespace ai_onnx } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/bitwise_and.hpp b/src/frontends/onnx/frontend/src/op/bitwise_and.hpp deleted file mode 100644 index 91c2b922e0e823..00000000000000 --- a/src/frontends/onnx/frontend/src/op/bitwise_and.hpp +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright (C) 2018-2024 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "core/node.hpp" - -namespace ov { -namespace frontend { -namespace onnx { -namespace op { -namespace set_1 { -ov::OutputVector bitwise_and(const ov::frontend::onnx::Node& node); - -} // namespace set_1 -} // namespace op -} // namespace onnx -} // namespace frontend -} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/bitwise_not.cpp b/src/frontends/onnx/frontend/src/op/bitwise_not.cpp index 20039d7777a205..ae01896c96c2a8 100644 --- a/src/frontends/onnx/frontend/src/op/bitwise_not.cpp +++ b/src/frontends/onnx/frontend/src/op/bitwise_not.cpp @@ -2,24 +2,24 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "op/bitwise_not.hpp" - #include "openvino/op/bitwise_not.hpp" +#include "core/operator_set.hpp" using namespace ov::op; namespace ov { namespace frontend { namespace onnx { -namespace op { -namespace set_1 { +namespace ai_onnx { +namespace opset_1 { ov::OutputVector bitwise_not(const ov::frontend::onnx::Node& node) { const auto inputs = node.get_ov_inputs(); OPENVINO_ASSERT(inputs.size() == 1); return {std::make_shared(inputs[0])}; } -} // namespace set_1 -} // namespace op +ONNX_OP("BitwiseNot", OPSET_SINCE(1), ai_onnx::opset_1::bitwise_not); +} // namespace opset_1 +} // namespace ai_onnx } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/bitwise_not.hpp b/src/frontends/onnx/frontend/src/op/bitwise_not.hpp deleted file mode 100644 index b7ade4ea552275..00000000000000 --- a/src/frontends/onnx/frontend/src/op/bitwise_not.hpp +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright (C) 2018-2024 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "core/node.hpp" - -namespace ov { -namespace frontend { -namespace onnx { -namespace op { -namespace set_1 { -ov::OutputVector bitwise_not(const ov::frontend::onnx::Node& node); - -} // namespace set_1 -} // namespace op -} // namespace onnx -} // namespace frontend -} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/bitwise_or.cpp b/src/frontends/onnx/frontend/src/op/bitwise_or.cpp index 6d0b36ba347ebc..1306ff5c81d4b4 100644 --- a/src/frontends/onnx/frontend/src/op/bitwise_or.cpp +++ b/src/frontends/onnx/frontend/src/op/bitwise_or.cpp @@ -2,24 +2,24 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "op/bitwise_or.hpp" - #include "openvino/op/bitwise_or.hpp" +#include "core/operator_set.hpp" using namespace ov::op; namespace ov { namespace frontend { namespace onnx { -namespace op { -namespace set_1 { +namespace ai_onnx { +namespace opset_1 { ov::OutputVector bitwise_or(const ov::frontend::onnx::Node& node) { const auto inputs = node.get_ov_inputs(); OPENVINO_ASSERT(inputs.size() == 2); return {std::make_shared(inputs[0], inputs[1])}; } -} // namespace set_1 -} // namespace op +ONNX_OP("BitwiseOr", OPSET_SINCE(1), ai_onnx::opset_1::bitwise_or); +} // namespace opset_1 +} // namespace ai_onnx } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/bitwise_or.hpp b/src/frontends/onnx/frontend/src/op/bitwise_or.hpp deleted file mode 100644 index 9fe8bfd870df6f..00000000000000 --- a/src/frontends/onnx/frontend/src/op/bitwise_or.hpp +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright (C) 2018-2024 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "core/node.hpp" - -namespace ov { -namespace frontend { -namespace onnx { -namespace op { -namespace set_1 { -ov::OutputVector bitwise_or(const ov::frontend::onnx::Node& node); - -} // namespace set_1 -} // namespace op -} // namespace onnx -} // namespace frontend -} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/bitwise_xor.cpp b/src/frontends/onnx/frontend/src/op/bitwise_xor.cpp index 4655931950bbed..4eeb9133bfa87f 100644 --- a/src/frontends/onnx/frontend/src/op/bitwise_xor.cpp +++ b/src/frontends/onnx/frontend/src/op/bitwise_xor.cpp @@ -2,24 +2,24 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "op/bitwise_xor.hpp" - #include "openvino/op/bitwise_xor.hpp" +#include "core/operator_set.hpp" using namespace ov::op; namespace ov { namespace frontend { namespace onnx { -namespace op { -namespace set_1 { +namespace ai_onnx { +namespace opset_1 { ov::OutputVector bitwise_xor(const ov::frontend::onnx::Node& node) { const auto inputs = node.get_ov_inputs(); OPENVINO_ASSERT(inputs.size() == 2); return {std::make_shared(inputs[0], inputs[1])}; } -} // namespace set_1 -} // namespace op +ONNX_OP("BitwiseXor", OPSET_SINCE(1), ai_onnx::opset_1::bitwise_xor); +} // namespace opset_1 +} // namespace ai_onnx } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/bitwise_xor.hpp b/src/frontends/onnx/frontend/src/op/bitwise_xor.hpp deleted file mode 100644 index c48cadb0854c19..00000000000000 --- a/src/frontends/onnx/frontend/src/op/bitwise_xor.hpp +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright (C) 2018-2024 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "core/node.hpp" - -namespace ov { -namespace frontend { -namespace onnx { -namespace op { -namespace set_1 { -ov::OutputVector bitwise_xor(const ov::frontend::onnx::Node& node); - -} // namespace set_1 -} // namespace op -} // namespace onnx -} // namespace frontend -} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/blackmanwindow.cpp b/src/frontends/onnx/frontend/src/op/blackmanwindow.cpp index cda409cbbebbce..ddf38dda6232ee 100644 --- a/src/frontends/onnx/frontend/src/op/blackmanwindow.cpp +++ b/src/frontends/onnx/frontend/src/op/blackmanwindow.cpp @@ -3,10 +3,9 @@ #define _USE_MATH_DEFINES -#include "op/blackmanwindow.hpp" - #include +#include "core/operator_set.hpp" #include "openvino/op/add.hpp" #include "openvino/op/constant.hpp" #include "openvino/op/convert.hpp" @@ -16,14 +15,13 @@ #include "openvino/op/range.hpp" #include "openvino/op/subtract.hpp" #include "utils/common.hpp" - using namespace ov::op; namespace ov { namespace frontend { namespace onnx { -namespace op { -namespace set_1 { +namespace ai_onnx { +namespace opset_1 { ov::OutputVector blackmanwindow(const ov::frontend::onnx::Node& node) { const auto size = node.get_ov_inputs().at(0); const auto output_datatype = common::get_ov_element_type(node.get_attribute_value("output_datatype", 1)); @@ -76,8 +74,9 @@ ov::OutputVector blackmanwindow(const ov::frontend::onnx::Node& node) { return {std::make_shared(y_values, output_datatype)}; } } -} // namespace set_1 -} // namespace op +ONNX_OP("BlackmanWindow", OPSET_SINCE(1), ai_onnx::opset_1::blackmanwindow); +} // namespace opset_1 +} // namespace ai_onnx } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/blackmanwindow.hpp b/src/frontends/onnx/frontend/src/op/blackmanwindow.hpp deleted file mode 100644 index 300fe17dc3ff83..00000000000000 --- a/src/frontends/onnx/frontend/src/op/blackmanwindow.hpp +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright (C) 2018-2024 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 - -#pragma once - -#include "core/node.hpp" - -namespace ov { -namespace frontend { -namespace onnx { -namespace op { -namespace set_1 { - -ov::OutputVector blackmanwindow(const ov::frontend::onnx::Node& node); - -} // namespace set_1 -} // namespace op -} // namespace onnx -} // namespace frontend -} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/cast.cpp b/src/frontends/onnx/frontend/src/op/cast.cpp index ac5157519a58ff..9713d0f0f08942 100644 --- a/src/frontends/onnx/frontend/src/op/cast.cpp +++ b/src/frontends/onnx/frontend/src/op/cast.cpp @@ -2,18 +2,16 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "op/cast.hpp" - +#include "core/operator_set.hpp" #include "openvino/op/convert.hpp" #include "utils/common.hpp" - using namespace ov::op; namespace ov { namespace frontend { namespace onnx { -namespace op { -namespace set_1 { +namespace ai_onnx { +namespace opset_1 { ov::OutputVector cast(const ov::frontend::onnx::Node& node) { auto data = node.get_ov_inputs().at(0); @@ -23,8 +21,9 @@ ov::OutputVector cast(const ov::frontend::onnx::Node& node) { return {std::make_shared(data, elem_type)}; } -} // namespace set_1 -} // namespace op +ONNX_OP("Cast", OPSET_SINCE(1), ai_onnx::opset_1::cast); +} // namespace opset_1 +} // namespace ai_onnx } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/cast.hpp b/src/frontends/onnx/frontend/src/op/cast.hpp deleted file mode 100644 index 7aa6c60b2e552d..00000000000000 --- a/src/frontends/onnx/frontend/src/op/cast.hpp +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright (C) 2018-2024 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "core/node.hpp" - -namespace ov { -namespace frontend { -namespace onnx { -namespace op { -namespace set_1 { - -ov::OutputVector cast(const ov::frontend::onnx::Node& node); - -} // namespace set_1 -} // namespace op -} // namespace onnx -} // namespace frontend -} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/cast_like.cpp b/src/frontends/onnx/frontend/src/op/cast_like.cpp index 5786c0ba40a8d4..d00b8d71f85e96 100644 --- a/src/frontends/onnx/frontend/src/op/cast_like.cpp +++ b/src/frontends/onnx/frontend/src/op/cast_like.cpp @@ -2,25 +2,24 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "op/cast_like.hpp" - +#include "core/operator_set.hpp" #include "openvino/op/convert_like.hpp" - using namespace ov::op; namespace ov { namespace frontend { namespace onnx { -namespace op { -namespace set_1 { +namespace ai_onnx { +namespace opset_1 { ov::OutputVector cast_like(const ov::frontend::onnx::Node& node) { auto inputs = node.get_ov_inputs(); return {std::make_shared(inputs.at(0), inputs.at(1))}; } -} // namespace set_1 -} // namespace op +ONNX_OP("CastLike", OPSET_SINCE(1), ai_onnx::opset_1::cast_like); +} // namespace opset_1 +} // namespace ai_onnx } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/cast_like.hpp b/src/frontends/onnx/frontend/src/op/cast_like.hpp deleted file mode 100644 index 1a691ad85c3344..00000000000000 --- a/src/frontends/onnx/frontend/src/op/cast_like.hpp +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright (C) 2018-2024 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "core/node.hpp" - -namespace ov { -namespace frontend { -namespace onnx { -namespace op { -namespace set_1 { - -ov::OutputVector cast_like(const ov::frontend::onnx::Node& node); - -} // namespace set_1 -} // namespace op -} // namespace onnx -} // namespace frontend -} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/ceil.hpp b/src/frontends/onnx/frontend/src/op/ceil.cpp similarity index 57% rename from src/frontends/onnx/frontend/src/op/ceil.hpp rename to src/frontends/onnx/frontend/src/op/ceil.cpp index 145c6e079e87f3..0ea69750948f8a 100644 --- a/src/frontends/onnx/frontend/src/op/ceil.hpp +++ b/src/frontends/onnx/frontend/src/op/ceil.cpp @@ -2,22 +2,21 @@ // SPDX-License-Identifier: Apache-2.0 // -#pragma once - -#include "core/node.hpp" +#include "core/operator_set.hpp" #include "openvino/op/ceiling.hpp" namespace ov { namespace frontend { namespace onnx { -namespace op { -namespace set_1 { -inline ov::OutputVector ceil(const ov::frontend::onnx::Node& node) { +namespace ai_onnx { +namespace opset_1 { +ov::OutputVector ceil(const ov::frontend::onnx::Node& node) { return {std::make_shared(node.get_ov_inputs().at(0))}; } -} // namespace set_1 -} // namespace op +ONNX_OP("Ceil", OPSET_SINCE(1), ai_onnx::opset_1::ceil); +} // namespace opset_1 +} // namespace ai_onnx } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/celu.cpp b/src/frontends/onnx/frontend/src/op/celu.cpp index c0c078e0a3d7a1..28d33be9f8b052 100644 --- a/src/frontends/onnx/frontend/src/op/celu.cpp +++ b/src/frontends/onnx/frontend/src/op/celu.cpp @@ -1,24 +1,22 @@ // Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 -#include "op/celu.hpp" - #include +#include "core/operator_set.hpp" #include "exceptions.hpp" #include "openvino/op/constant.hpp" #include "openvino/op/divide.hpp" #include "openvino/op/elu.hpp" #include "openvino/op/multiply.hpp" #include "utils/common.hpp" - using namespace ov::op; namespace ov { namespace frontend { namespace onnx { -namespace op { -namespace set_1 { +namespace ai_onnx { +namespace opset_1 { ov::OutputVector celu(const ov::frontend::onnx::Node& node) { auto alpha_node = node.get_attribute_as_constant("alpha", 1.0f); auto x_celu = node.get_ov_inputs().at(0); @@ -28,8 +26,9 @@ ov::OutputVector celu(const ov::frontend::onnx::Node& node) { return {std::make_shared(alpha_node, elu_node)}; } -} // namespace set_1 -} // namespace op +ONNX_OP("Celu", OPSET_SINCE(1), ai_onnx::opset_1::celu); +} // namespace opset_1 +} // namespace ai_onnx } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/celu.hpp b/src/frontends/onnx/frontend/src/op/celu.hpp deleted file mode 100644 index 0b47beb6d237a5..00000000000000 --- a/src/frontends/onnx/frontend/src/op/celu.hpp +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright (C) 2018-2024 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "core/node.hpp" - -namespace ov { -namespace frontend { -namespace onnx { -namespace op { -namespace set_1 { -ov::OutputVector celu(const ov::frontend::onnx::Node& node); - -} // namespace set_1 -} // namespace op -} // namespace onnx -} // namespace frontend -} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/clip.cpp b/src/frontends/onnx/frontend/src/op/clip.cpp index 5a3f22fe638877..1a44870c4fcaaa 100644 --- a/src/frontends/onnx/frontend/src/op/clip.cpp +++ b/src/frontends/onnx/frontend/src/op/clip.cpp @@ -2,23 +2,21 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "op/clip.hpp" - #include #include "core/null_node.hpp" +#include "core/operator_set.hpp" #include "openvino/core/validation_util.hpp" #include "openvino/op/clamp.hpp" #include "openvino/op/maximum.hpp" #include "openvino/op/minimum.hpp" - using namespace ov::op; namespace ov { namespace frontend { namespace onnx { -namespace op { -namespace set_1 { +namespace ai_onnx { +namespace opset_1 { ov::OutputVector clip(const ov::frontend::onnx::Node& node) { const auto data = node.get_ov_inputs().at(0); @@ -29,9 +27,10 @@ ov::OutputVector clip(const ov::frontend::onnx::Node& node) { return {std::make_shared(data, min_value, max_value)}; } -} // namespace set_1 +ONNX_OP("Clip", OPSET_RANGE(1, 10), ai_onnx::opset_1::clip); +} // namespace opset_1 -namespace set_11 { +namespace opset_11 { namespace { std::shared_ptr get_constant_lowest_of_type(ov::element::Type_t t) { #define OPENVINO_TYPE_TO_LOWEST_CONST(t) \ @@ -99,8 +98,9 @@ ov::OutputVector clip(const ov::frontend::onnx::Node& node) { return {std::make_shared(max, max_of_min_and_data)}; } -} // namespace set_11 -} // namespace op +ONNX_OP("Clip", OPSET_SINCE(11), ai_onnx::opset_11::clip); +} // namespace opset_11 +} // namespace ai_onnx } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/clip.hpp b/src/frontends/onnx/frontend/src/op/clip.hpp deleted file mode 100644 index 64b551c0b02f1c..00000000000000 --- a/src/frontends/onnx/frontend/src/op/clip.hpp +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright (C) 2018-2024 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "core/node.hpp" - -namespace ov { -namespace frontend { -namespace onnx { -namespace op { -namespace set_1 { -ov::OutputVector clip(const ov::frontend::onnx::Node& node); - -} // namespace set_1 - -namespace set_11 { -ov::OutputVector clip(const ov::frontend::onnx::Node& node); - -} // namespace set_11 -} // namespace op -} // namespace onnx -} // namespace frontend -} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/com.microsoft/aliases.cpp b/src/frontends/onnx/frontend/src/op/com.microsoft/aliases.cpp new file mode 100644 index 00000000000000..27fb1d34b45c8e --- /dev/null +++ b/src/frontends/onnx/frontend/src/op/com.microsoft/aliases.cpp @@ -0,0 +1,38 @@ +// Copyright (C) 2018-2024 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "core/operator_set.hpp" + +namespace ov { +namespace frontend { +namespace onnx { +namespace ai_onnx { +namespace opset_1 { +extern ov::OutputVector gather_nd(const ov::frontend::onnx::Node& node); +extern ov::OutputVector trilu(const ov::frontend::onnx::Node& node); +extern ov::OutputVector gelu(const ov::frontend::onnx::Node& node); +} // namespace opset_1 +namespace opset_13 { +extern ov::OutputVector dequantize_linear(const ov::frontend::onnx::Node& node); +extern ov::OutputVector quantize_linear(const ov::frontend::onnx::Node& node); +} // namespace opset_13 +} // namespace ai_onnx + +namespace com_microsoft { +namespace opset_1 { +bool register_multiple_translators(void) { + ONNX_OP_M("DequantizeLinear", OPSET_SINCE(1), ai_onnx::opset_13::dequantize_linear, MICROSOFT_DOMAIN); + ONNX_OP_M("GatherND", OPSET_SINCE(1), ai_onnx::opset_1::gather_nd, MICROSOFT_DOMAIN); + ONNX_OP_M("Gelu", OPSET_SINCE(1), ai_onnx::opset_1::gelu, MICROSOFT_DOMAIN); + ONNX_OP_M("QuantizeLinear", OPSET_SINCE(1), ai_onnx::opset_13::quantize_linear, MICROSOFT_DOMAIN); + ONNX_OP_M("Trilu", OPSET_SINCE(1), ai_onnx::opset_1::trilu, MICROSOFT_DOMAIN); + return true; +} + +static bool registered = register_multiple_translators(); +} // namespace opset_1 +} // namespace com_microsoft +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/com.microsoft/attention.cpp b/src/frontends/onnx/frontend/src/op/com.microsoft/attention.cpp index 375e90f5084c43..9ac735c0a39903 100644 --- a/src/frontends/onnx/frontend/src/op/com.microsoft/attention.cpp +++ b/src/frontends/onnx/frontend/src/op/com.microsoft/attention.cpp @@ -2,9 +2,8 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "op/com.microsoft/attention.hpp" - #include "core/null_node.hpp" +#include "core/operator_set.hpp" #include "openvino/frontend/exception.hpp" #include "openvino/op/add.hpp" #include "openvino/op/broadcast.hpp" @@ -37,14 +36,13 @@ #include "openvino/op/transpose.hpp" #include "openvino/op/unsqueeze.hpp" #include "utils/split.hpp" - using namespace ov::op; using ov::Shape; namespace ov { namespace frontend { namespace onnx { -namespace op { +namespace com_microsoft { namespace detail { namespace { ov::NodeVector split_to_QKV(const std::shared_ptr& node, @@ -70,7 +68,7 @@ std::shared_ptr get_present_state(const std::shared_ptr& K, } // namespace } // namespace detail -namespace set_1 { +namespace opset_1 { ov::OutputVector attention(const ov::frontend::onnx::Node& node) { auto nodes = node.get_ov_inputs(); const auto& input = nodes[0]; @@ -115,7 +113,8 @@ ov::OutputVector attention(const ov::frontend::onnx::Node& node) { return {output, present}; } -} // namespace set_1 +ONNX_OP("Attention", OPSET_SINCE(1), com_microsoft::opset_1::attention, MICROSOFT_DOMAIN); +} // namespace opset_1 namespace detail { namespace { @@ -560,7 +559,7 @@ std::shared_ptr get_present_state(const std::shared_ptr& K, } } // namespace } // namespace detail -} // namespace op +} // namespace com_microsoft } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/com.microsoft/attention.hpp b/src/frontends/onnx/frontend/src/op/com.microsoft/attention.hpp deleted file mode 100644 index c87344184ecea5..00000000000000 --- a/src/frontends/onnx/frontend/src/op/com.microsoft/attention.hpp +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright (C) 2018-2024 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "core/node.hpp" - -namespace ov { -namespace frontend { -namespace onnx { -namespace op { -namespace set_1 { -ov::OutputVector attention(const ov::frontend::onnx::Node& node); -} // namespace set_1 -} // namespace op -} // namespace onnx -} // namespace frontend -} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/com.microsoft/bias_gelu.cpp b/src/frontends/onnx/frontend/src/op/com.microsoft/bias_gelu.cpp index 38bf8b3f7c21f1..b0991d4cf51e93 100644 --- a/src/frontends/onnx/frontend/src/op/com.microsoft/bias_gelu.cpp +++ b/src/frontends/onnx/frontend/src/op/com.microsoft/bias_gelu.cpp @@ -2,26 +2,25 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "op/com.microsoft/bias_gelu.hpp" - +#include "core/operator_set.hpp" #include "openvino/frontend/exception.hpp" #include "openvino/op/add.hpp" #include "openvino/op/gelu.hpp" - using namespace ov::op; namespace ov { namespace frontend { namespace onnx { -namespace op { -namespace set_1 { +namespace com_microsoft { +namespace opset_1 { ov::OutputVector bias_gelu(const ov::frontend::onnx::Node& node) { auto nodes = node.get_ov_inputs(); FRONT_END_GENERAL_CHECK(nodes.size() == 2, "BiasGelu takes 2 inputs. Provided " + std::to_string(nodes.size())); return {std::make_shared(std::make_shared(nodes.at(0), nodes.at(1)))}; } -} // namespace set_1 -} // namespace op +ONNX_OP("BiasGelu", OPSET_SINCE(1), com_microsoft::opset_1::bias_gelu, MICROSOFT_DOMAIN); +} // namespace opset_1 +} // namespace com_microsoft } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/com.microsoft/bias_gelu.hpp b/src/frontends/onnx/frontend/src/op/com.microsoft/bias_gelu.hpp deleted file mode 100644 index 9295a8995d0559..00000000000000 --- a/src/frontends/onnx/frontend/src/op/com.microsoft/bias_gelu.hpp +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright (C) 2018-2024 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "core/node.hpp" - -namespace ov { -namespace frontend { -namespace onnx { -namespace op { -namespace set_1 { -ov::OutputVector bias_gelu(const ov::frontend::onnx::Node& node); -} // namespace set_1 -} // namespace op -} // namespace onnx -} // namespace frontend -} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/com.microsoft/embed_layer_normalization.cpp b/src/frontends/onnx/frontend/src/op/com.microsoft/embed_layer_normalization.cpp index eae772aaeac2f4..b36369ae838e71 100644 --- a/src/frontends/onnx/frontend/src/op/com.microsoft/embed_layer_normalization.cpp +++ b/src/frontends/onnx/frontend/src/op/com.microsoft/embed_layer_normalization.cpp @@ -2,9 +2,8 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "op/com.microsoft/embed_layer_normalization.hpp" - #include "core/null_node.hpp" +#include "core/operator_set.hpp" #include "openvino/frontend/exception.hpp" #include "openvino/op/add.hpp" #include "openvino/op/broadcast.hpp" @@ -15,15 +14,14 @@ #include "openvino/op/reduce_sum.hpp" #include "openvino/op/shape_of.hpp" #include "openvino/op/slice.hpp" - using namespace ov::op; using ov::Shape; namespace ov { namespace frontend { namespace onnx { -namespace op { -namespace set_1 { +namespace com_microsoft { +namespace opset_1 { ov::OutputVector embed_layer_normalization(const ov::frontend::onnx::Node& node) { auto nodes = node.get_ov_inputs(); auto num_nodes = nodes.size(); @@ -99,8 +97,9 @@ ov::OutputVector embed_layer_normalization(const ov::frontend::onnx::Node& node) } return {result, mask_index}; } -} // namespace set_1 -} // namespace op +ONNX_OP("EmbedLayerNormalization", OPSET_SINCE(1), com_microsoft::opset_1::embed_layer_normalization, MICROSOFT_DOMAIN); +} // namespace opset_1 +} // namespace com_microsoft } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/com.microsoft/embed_layer_normalization.hpp b/src/frontends/onnx/frontend/src/op/com.microsoft/embed_layer_normalization.hpp deleted file mode 100644 index 4b2d4c1094d10a..00000000000000 --- a/src/frontends/onnx/frontend/src/op/com.microsoft/embed_layer_normalization.hpp +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright (C) 2018-2024 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "core/node.hpp" - -namespace ov { -namespace frontend { -namespace onnx { -namespace op { -namespace set_1 { -ov::OutputVector embed_layer_normalization(const ov::frontend::onnx::Node& node); -} // namespace set_1 -} // namespace op -} // namespace onnx -} // namespace frontend -} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/com.microsoft/fused_conv.cpp b/src/frontends/onnx/frontend/src/op/com.microsoft/fused_conv.cpp index 79badc0d048bf0..946c0255530dcf 100644 --- a/src/frontends/onnx/frontend/src/op/com.microsoft/fused_conv.cpp +++ b/src/frontends/onnx/frontend/src/op/com.microsoft/fused_conv.cpp @@ -2,13 +2,11 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "op/com.microsoft/fused_conv.hpp" - #include #include +#include "core/operator_set.hpp" #include "exceptions.hpp" -#include "op/conv.hpp" #include "openvino/op/add.hpp" #include "openvino/op/clamp.hpp" #include "openvino/op/constant.hpp" @@ -18,17 +16,22 @@ #include "openvino/op/sigmoid.hpp" #include "openvino/op/tan.hpp" #include "openvino/op/tanh.hpp" - using namespace ov::op; using ov::Shape; namespace ov { namespace frontend { namespace onnx { -namespace op { -namespace set_1 { +namespace ai_onnx { +namespace opset_1 { +extern ov::OutputVector conv(const ov::frontend::onnx::Node& node); +} // namespace opset_1 +} // namespace ai_onnx + +namespace com_microsoft { +namespace opset_1 { ov::OutputVector fused_conv(const ov::frontend::onnx::Node& node) { - auto conv_res = conv(node).at(0); + auto conv_res = ai_onnx::opset_1::conv(node).at(0); if (node.get_ov_inputs().size() == 4) { // Z input provided conv_res = std::make_shared(conv_res, node.get_ov_inputs()[3]); @@ -71,8 +74,9 @@ ov::OutputVector fused_conv(const ov::frontend::onnx::Node& node) { return {conv_res}; } -} // namespace set_1 -} // namespace op +ONNX_OP("FusedConv", OPSET_SINCE(1), com_microsoft::opset_1::fused_conv, MICROSOFT_DOMAIN); +} // namespace opset_1 +} // namespace com_microsoft } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/com.microsoft/fused_conv.hpp b/src/frontends/onnx/frontend/src/op/com.microsoft/fused_conv.hpp deleted file mode 100644 index 57562c593f27c2..00000000000000 --- a/src/frontends/onnx/frontend/src/op/com.microsoft/fused_conv.hpp +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright (C) 2018-2024 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "core/node.hpp" - -namespace ov { -namespace frontend { -namespace onnx { -namespace op { -namespace set_1 { -ov::OutputVector fused_conv(const ov::frontend::onnx::Node& node); -} // namespace set_1 -} // namespace op -} // namespace onnx -} // namespace frontend -} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/com.microsoft/fusedgemm.cpp b/src/frontends/onnx/frontend/src/op/com.microsoft/fusedgemm.cpp index 8359d76617b79a..96a437e7b4ff17 100644 --- a/src/frontends/onnx/frontend/src/op/com.microsoft/fusedgemm.cpp +++ b/src/frontends/onnx/frontend/src/op/com.microsoft/fusedgemm.cpp @@ -2,11 +2,10 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "fusedgemm.hpp" - #include #include "core/null_node.hpp" +#include "core/operator_set.hpp" #include "openvino/frontend/exception.hpp" #include "openvino/op/add.hpp" #include "openvino/op/constant.hpp" @@ -14,15 +13,14 @@ #include "openvino/op/multiply.hpp" #include "openvino/op/prelu.hpp" #include "openvino/op/relu.hpp" - using namespace ov::op; using ov::Shape; namespace ov { namespace frontend { namespace onnx { -namespace op { -namespace set_1 { +namespace com_microsoft { +namespace opset_1 { ov::OutputVector fusedgemm(const ov::frontend::onnx::Node& node) { ov::OutputVector inputs{node.get_ov_inputs()}; auto num_inputs = inputs.size(); @@ -63,8 +61,9 @@ ov::OutputVector fusedgemm(const ov::frontend::onnx::Node& node) { return {std::make_shared(gemm_res)}; } -} // namespace set_1 -} // namespace op +ONNX_OP("FusedGemm", OPSET_SINCE(1), com_microsoft::opset_1::fusedgemm, MICROSOFT_DOMAIN); +} // namespace opset_1 +} // namespace com_microsoft } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/com.microsoft/fusedgemm.hpp b/src/frontends/onnx/frontend/src/op/com.microsoft/fusedgemm.hpp deleted file mode 100644 index 5b23fb419fd302..00000000000000 --- a/src/frontends/onnx/frontend/src/op/com.microsoft/fusedgemm.hpp +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright (C) 2018-2024 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "core/node.hpp" - -namespace ov { -namespace frontend { -namespace onnx { -namespace op { -namespace set_1 { -ov::OutputVector fusedgemm(const ov::frontend::onnx::Node& node); -} // namespace set_1 -} // namespace op -} // namespace onnx -} // namespace frontend -} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/com.microsoft/pad.cpp b/src/frontends/onnx/frontend/src/op/com.microsoft/pad.cpp index 2bc7d22767862c..0d2596d251200d 100644 --- a/src/frontends/onnx/frontend/src/op/com.microsoft/pad.cpp +++ b/src/frontends/onnx/frontend/src/op/com.microsoft/pad.cpp @@ -2,17 +2,16 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "op/com.microsoft/pad.hpp" +#include "openvino/op/pad.hpp" #include "core/null_node.hpp" +#include "core/operator_set.hpp" #include "exceptions.hpp" #include "openvino/op/constant.hpp" -#include "openvino/op/pad.hpp" #include "openvino/op/squeeze.hpp" #include "openvino/op/util/op_types.hpp" #include "utils/reshape.hpp" #include "utils/split.hpp" - namespace { ov::op::PadMode get_pad_mode(std::string mode) { ov::op::PadMode pad_mode; @@ -36,9 +35,8 @@ using namespace ov::op; namespace ov { namespace frontend { namespace onnx { -namespace op { -namespace custom { -namespace set_1 { +namespace com_microsoft { +namespace opset_1 { ov::OutputVector pad(const ov::frontend::onnx::Node& node) { const auto inputs = node.get_ov_inputs(); const auto& data = inputs[0]; @@ -80,9 +78,11 @@ ov::OutputVector pad(const ov::frontend::onnx::Node& node) { return {std::make_shared(data, padding_begin, padding_end, values, pad_mode)}; } -} // namespace set_1 -} // namespace custom -} // namespace op + +ONNX_OP("Pad", OPSET_SINCE(1), com_microsoft::opset_1::pad, MICROSOFT_DOMAIN); + +} // namespace opset_1 +} // namespace com_microsoft } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/com.microsoft/pad.hpp b/src/frontends/onnx/frontend/src/op/com.microsoft/pad.hpp deleted file mode 100644 index b2336a120b41d6..00000000000000 --- a/src/frontends/onnx/frontend/src/op/com.microsoft/pad.hpp +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright (C) 2018-2024 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "core/node.hpp" - -namespace ov { -namespace frontend { -namespace onnx { -namespace op { -namespace custom { -namespace set_1 { -ov::OutputVector pad(const ov::frontend::onnx::Node& node); -} // namespace set_1 -} // namespace custom -} // namespace op -} // namespace onnx -} // namespace frontend -} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/com.microsoft/skip_layer_normalization.cpp b/src/frontends/onnx/frontend/src/op/com.microsoft/skip_layer_normalization.cpp index f003ace40cdfd2..65dcac5e939e2e 100644 --- a/src/frontends/onnx/frontend/src/op/com.microsoft/skip_layer_normalization.cpp +++ b/src/frontends/onnx/frontend/src/op/com.microsoft/skip_layer_normalization.cpp @@ -2,22 +2,20 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "op/com.microsoft/skip_layer_normalization.hpp" - +#include "core/operator_set.hpp" #include "openvino/frontend/exception.hpp" #include "openvino/op/add.hpp" #include "openvino/op/constant.hpp" #include "openvino/op/multiply.hpp" #include "openvino/op/mvn.hpp" - using namespace ov::op; using ov::Shape; namespace ov { namespace frontend { namespace onnx { -namespace op { -namespace set_1 { +namespace com_microsoft { +namespace opset_1 { ov::OutputVector skip_layer_normalization(const ov::frontend::onnx::Node& node) { auto nodes = node.get_ov_inputs(); auto num_nodes = nodes.size(); @@ -47,8 +45,9 @@ ov::OutputVector skip_layer_normalization(const ov::frontend::onnx::Node& node) // - we'd have to unroll MVN to have them return result->outputs(); } -} // namespace set_1 -} // namespace op +ONNX_OP("SkipLayerNormalization", OPSET_SINCE(1), com_microsoft::opset_1::skip_layer_normalization, MICROSOFT_DOMAIN); +} // namespace opset_1 +} // namespace com_microsoft } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/com.microsoft/skip_layer_normalization.hpp b/src/frontends/onnx/frontend/src/op/com.microsoft/skip_layer_normalization.hpp deleted file mode 100644 index 5b02910f0e13e9..00000000000000 --- a/src/frontends/onnx/frontend/src/op/com.microsoft/skip_layer_normalization.hpp +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright (C) 2018-2024 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "core/node.hpp" - -namespace ov { -namespace frontend { -namespace onnx { -namespace op { -namespace set_1 { -ov::OutputVector skip_layer_normalization(const ov::frontend::onnx::Node& node); -} // namespace set_1 -} // namespace op -} // namespace onnx -} // namespace frontend -} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/compress.cpp b/src/frontends/onnx/frontend/src/op/compress.cpp index 84a11c2f17734a..a507713460288b 100644 --- a/src/frontends/onnx/frontend/src/op/compress.cpp +++ b/src/frontends/onnx/frontend/src/op/compress.cpp @@ -2,22 +2,20 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "op/compress.hpp" - +#include "core/operator_set.hpp" #include "openvino/op/constant.hpp" #include "openvino/op/gather.hpp" #include "openvino/op/non_zero.hpp" #include "openvino/op/squeeze.hpp" #include "utils/reshape.hpp" - using namespace ov::op; using ov::Shape; namespace ov { namespace frontend { namespace onnx { -namespace op { -namespace set_1 { +namespace ai_onnx { +namespace opset_1 { ov::OutputVector compress(const ov::frontend::onnx::Node& node) { auto data = node.get_ov_inputs().at(0); auto condition = node.get_ov_inputs().at(1); @@ -39,8 +37,9 @@ ov::OutputVector compress(const ov::frontend::onnx::Node& node) { return {result}; } -} // namespace set_1 -} // namespace op +ONNX_OP("Compress", OPSET_SINCE(1), ai_onnx::opset_1::compress); +} // namespace opset_1 +} // namespace ai_onnx } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/compress.hpp b/src/frontends/onnx/frontend/src/op/compress.hpp deleted file mode 100644 index ae6bde1c835691..00000000000000 --- a/src/frontends/onnx/frontend/src/op/compress.hpp +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright (C) 2018-2024 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "core/node.hpp" - -namespace ov { -namespace frontend { -namespace onnx { -namespace op { -namespace set_1 { -ov::OutputVector compress(const ov::frontend::onnx::Node& node); -} -} // namespace op -} // namespace onnx -} // namespace frontend -} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/concat.cpp b/src/frontends/onnx/frontend/src/op/concat.cpp index 99e94e052f4374..6f775dec78c0d2 100644 --- a/src/frontends/onnx/frontend/src/op/concat.cpp +++ b/src/frontends/onnx/frontend/src/op/concat.cpp @@ -2,18 +2,17 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "op/concat.hpp" - #include "openvino/op/concat.hpp" -#include "utils/common.hpp" +#include "core/operator_set.hpp" +#include "utils/common.hpp" using namespace ov::op; namespace ov { namespace frontend { namespace onnx { -namespace op { -namespace set_1 { +namespace ai_onnx { +namespace opset_1 { ov::OutputVector concat(const ov::frontend::onnx::Node& node) { ov::OutputVector inputs{node.get_ov_inputs()}; std::int64_t axis = node.get_attribute_value("axis"); @@ -24,8 +23,9 @@ ov::OutputVector concat(const ov::frontend::onnx::Node& node) { return {std::make_shared(valid_inputs, axis)}; } -} // namespace set_1 -} // namespace op +ONNX_OP("Concat", OPSET_SINCE(1), ai_onnx::opset_1::concat); +} // namespace opset_1 +} // namespace ai_onnx } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/concat.hpp b/src/frontends/onnx/frontend/src/op/concat.hpp deleted file mode 100644 index 9622e96f24e84d..00000000000000 --- a/src/frontends/onnx/frontend/src/op/concat.hpp +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright (C) 2018-2024 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "core/node.hpp" - -namespace ov { -namespace frontend { -namespace onnx { -namespace op { -namespace set_1 { -ov::OutputVector concat(const ov::frontend::onnx::Node& node); - -} // namespace set_1 -} // namespace op -} // namespace onnx -} // namespace frontend -} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/constant.cpp b/src/frontends/onnx/frontend/src/op/constant.cpp index 79a001aeefd615..a2ff85381a5bd8 100644 --- a/src/frontends/onnx/frontend/src/op/constant.cpp +++ b/src/frontends/onnx/frontend/src/op/constant.cpp @@ -2,23 +2,22 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "op/constant.hpp" +#include "openvino/op/constant.hpp" #include #include "core/attribute.hpp" +#include "core/operator_set.hpp" #include "core/sparse_tensor.hpp" #include "core/tensor.hpp" #include "openvino/frontend/exception.hpp" -#include "openvino/op/constant.hpp" - using namespace ov::op; using ov::Shape; namespace ov { namespace frontend { namespace onnx { -namespace op { +namespace ai_onnx { namespace { template std::vector get_dense_vector(const std::vector& values, const std::vector& indices, const size_t size) { @@ -106,15 +105,16 @@ std::vector get_absolute_indices(const Tensor& indices_tensor, const ov } } // namespace -namespace set_1 { +namespace opset_1 { ov::OutputVector constant(const ov::frontend::onnx::Node& node) { auto tensor = node.get_attribute_value("value"); return {tensor.get_ov_constant()}; } -} // namespace set_1 +ONNX_OP("Constant", OPSET_RANGE(1, 12), ai_onnx::opset_1::constant); +} // namespace opset_1 -namespace set_13 { +namespace opset_13 { ov::OutputVector constant(const ov::frontend::onnx::Node& node) { auto attributes_names = node.get_attribute_names(); FRONT_END_GENERAL_CHECK(attributes_names.size() == 1, @@ -183,8 +183,9 @@ ov::OutputVector constant(const ov::frontend::onnx::Node& node) { auto tensor = node.get_attribute_value(attributes_names[0]); return {tensor.get_ov_constant()}; } -} // namespace set_13 -} // namespace op +ONNX_OP("Constant", OPSET_SINCE(13), ai_onnx::opset_13::constant); +} // namespace opset_13 +} // namespace ai_onnx } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/constant.hpp b/src/frontends/onnx/frontend/src/op/constant.hpp deleted file mode 100644 index f9f08081031c6b..00000000000000 --- a/src/frontends/onnx/frontend/src/op/constant.hpp +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright (C) 2018-2024 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "core/node.hpp" - -namespace ov { -namespace frontend { -namespace onnx { -namespace op { -namespace set_1 { -ov::OutputVector constant(const ov::frontend::onnx::Node& node); - -} // namespace set_1 - -namespace set_13 { -ov::OutputVector constant(const ov::frontend::onnx::Node& node); - -} // namespace set_13 -} // namespace op -} // namespace onnx -} // namespace frontend -} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/constant_fill.cpp b/src/frontends/onnx/frontend/src/op/constant_fill.cpp index 470e777e9b3b3e..54efb25f8d1d5d 100644 --- a/src/frontends/onnx/frontend/src/op/constant_fill.cpp +++ b/src/frontends/onnx/frontend/src/op/constant_fill.cpp @@ -2,10 +2,9 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "op/constant_fill.hpp" - #include // onnx types +#include "core/operator_set.hpp" using namespace ::ONNX_NAMESPACE; #include "exceptions.hpp" @@ -19,8 +18,8 @@ using namespace ov::frontend::onnx::common; namespace ov { namespace frontend { namespace onnx { -namespace op { -namespace set_1 { +namespace ai_onnx { +namespace opset_1 { ov::OutputVector constant_fill(const ov::frontend::onnx::Node& node) { ov::Output target_shape; const auto dtype = node.get_attribute_value("dtype", static_cast(TensorProto_DataType_FLOAT)); @@ -46,8 +45,9 @@ ov::OutputVector constant_fill(const ov::frontend::onnx::Node& node) { return {std::make_shared(const_val_to_fill, target_shape)}; } -} // namespace set_1 -} // namespace op +ONNX_OP("ConstantFill", OPSET_SINCE(1), ai_onnx::opset_1::constant_fill); +} // namespace opset_1 +} // namespace ai_onnx } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/constant_fill.hpp b/src/frontends/onnx/frontend/src/op/constant_fill.hpp deleted file mode 100644 index be332b629b2391..00000000000000 --- a/src/frontends/onnx/frontend/src/op/constant_fill.hpp +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright (C) 2018-2024 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "core/node.hpp" - -namespace ov { -namespace frontend { -namespace onnx { -namespace op { -namespace set_1 { -// ConstantFill is a deprecated experimental operator removed in ONNX 1.4 -ov::OutputVector constant_fill(const ov::frontend::onnx::Node& node); -} // namespace set_1 -} // namespace op -} // namespace onnx -} // namespace frontend -} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/constant_of_shape.cpp b/src/frontends/onnx/frontend/src/op/constant_of_shape.cpp index 72b551879b82bf..88cc222990a348 100644 --- a/src/frontends/onnx/frontend/src/op/constant_of_shape.cpp +++ b/src/frontends/onnx/frontend/src/op/constant_of_shape.cpp @@ -2,23 +2,20 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "op/constant_of_shape.hpp" - #include "core/null_node.hpp" +#include "core/operator_set.hpp" #include "core/tensor.hpp" -#include "op/constant.hpp" #include "openvino/op/broadcast.hpp" #include "openvino/op/constant.hpp" #include "utils/common.hpp" #include "utils/reshape.hpp" - using namespace ov::op; namespace ov { namespace frontend { namespace onnx { -namespace op { -namespace set_1 { +namespace ai_onnx { +namespace opset_1 { ov::OutputVector constant_of_shape(const ov::frontend::onnx::Node& node) { ov::Output constant_value; if (node.has_attribute("value")) { @@ -36,8 +33,9 @@ ov::OutputVector constant_of_shape(const ov::frontend::onnx::Node& node) { return {std::make_shared(constant_value, inputs[0])}; } -} // namespace set_1 -} // namespace op +ONNX_OP("ConstantOfShape", OPSET_SINCE(1), ai_onnx::opset_1::constant_of_shape); +} // namespace opset_1 +} // namespace ai_onnx } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/constant_of_shape.hpp b/src/frontends/onnx/frontend/src/op/constant_of_shape.hpp deleted file mode 100644 index dc68af2a579b34..00000000000000 --- a/src/frontends/onnx/frontend/src/op/constant_of_shape.hpp +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright (C) 2018-2024 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "core/node.hpp" - -namespace ov { -namespace frontend { -namespace onnx { -namespace op { -namespace set_1 { -ov::OutputVector constant_of_shape(const ov::frontend::onnx::Node& node); - -} // namespace set_1 -} // namespace op -} // namespace onnx -} // namespace frontend -} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/conv.cpp b/src/frontends/onnx/frontend/src/op/conv.cpp index ecceb458bbab21..f113e3cadb0fe7 100644 --- a/src/frontends/onnx/frontend/src/op/conv.cpp +++ b/src/frontends/onnx/frontend/src/op/conv.cpp @@ -2,9 +2,8 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "op/conv.hpp" - #include "core/null_node.hpp" +#include "core/operator_set.hpp" #include "exceptions.hpp" #include "openvino/frontend/exception.hpp" #include "openvino/op/add.hpp" @@ -12,14 +11,13 @@ #include "utils/conv_factory.hpp" #include "utils/convpool.hpp" #include "utils/reshape.hpp" - using namespace ov::op; namespace ov { namespace frontend { namespace onnx { -namespace op { -namespace set_1 { +namespace ai_onnx { +namespace opset_1 { namespace detail { std::shared_ptr add_bias(const ov::Output& ng_conv, const ov::Output& bias) { @@ -76,8 +74,9 @@ ov::OutputVector conv(const ov::frontend::onnx::Node& node) { const ov::OutputVector& inputs = node.get_ov_inputs(); return detail::conv(node, inputs[0], inputs[1], inputs.size() < 3 ? std::make_shared() : inputs[2]); } -} // namespace set_1 -} // namespace op +ONNX_OP("Conv", OPSET_SINCE(1), ai_onnx::opset_1::conv); +} // namespace opset_1 +} // namespace ai_onnx } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/conv.hpp b/src/frontends/onnx/frontend/src/op/conv.hpp deleted file mode 100644 index 05887ec9751642..00000000000000 --- a/src/frontends/onnx/frontend/src/op/conv.hpp +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright (C) 2018-2024 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "core/node.hpp" -#include "openvino/core/node.hpp" - -namespace ov { -namespace frontend { -namespace onnx { -namespace op { -namespace set_1 { -namespace detail { -ov::OutputVector conv(const ov::frontend::onnx::Node& node, - ov::Output data, - ov::Output filters, - ov::Output bias); -} -/// \brief Performs ONNX Conv operation. -/// -/// \param node The ONNX node object representing this operation. -/// -/// \return The vector containing OV nodes producing output of ONNX convolution -/// operation. -ov::OutputVector conv(const ov::frontend::onnx::Node& node); - -} // namespace set_1 -} // namespace op -} // namespace onnx -} // namespace frontend -} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/conv_integer.cpp b/src/frontends/onnx/frontend/src/op/conv_integer.cpp index c3f79eaea3bb96..658877c9ef3c91 100644 --- a/src/frontends/onnx/frontend/src/op/conv_integer.cpp +++ b/src/frontends/onnx/frontend/src/op/conv_integer.cpp @@ -2,8 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "op/conv_integer.hpp" - +#include "core/operator_set.hpp" #include "openvino/op/constant.hpp" #include "openvino/op/convert.hpp" #include "openvino/op/range.hpp" @@ -13,7 +12,6 @@ #include "utils/conv_factory.hpp" #include "utils/convpool.hpp" #include "utils/reshape.hpp" - using namespace ov::op; namespace ov { @@ -42,8 +40,8 @@ std::shared_ptr get_filter_zero_point(const ov::OutputVector& inputs) } } } // namespace -namespace op { -namespace set_1 { +namespace ai_onnx { +namespace opset_1 { ov::OutputVector conv_integer(const ov::frontend::onnx::Node& node) { const ov::OutputVector& inputs = node.get_ov_inputs(); @@ -80,8 +78,9 @@ ov::OutputVector conv_integer(const ov::frontend::onnx::Node& node) { return {conv_node}; } -} // namespace set_1 -} // namespace op +ONNX_OP("ConvInteger", OPSET_SINCE(1), ai_onnx::opset_1::conv_integer); +} // namespace opset_1 +} // namespace ai_onnx } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/conv_integer.hpp b/src/frontends/onnx/frontend/src/op/conv_integer.hpp deleted file mode 100644 index 44b5d1ac66f093..00000000000000 --- a/src/frontends/onnx/frontend/src/op/conv_integer.hpp +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright (C) 2018-2024 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "core/node.hpp" - -namespace ov { -namespace frontend { -namespace onnx { -namespace op { -namespace set_1 { -/// \brief Performs ONNX ConvInteger operation. -/// -/// \param node The ONNX node object representing this operation. -/// -/// \return The vector containing OV nodes producing output of quantized ONNX -/// convolution operation. -ov::OutputVector conv_integer(const ov::frontend::onnx::Node& node); - -} // namespace set_1 -} // namespace op -} // namespace onnx -} // namespace frontend -} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/conv_transpose.cpp b/src/frontends/onnx/frontend/src/op/conv_transpose.cpp index 2cdc88ba784d22..3054cbbae16ee2 100644 --- a/src/frontends/onnx/frontend/src/op/conv_transpose.cpp +++ b/src/frontends/onnx/frontend/src/op/conv_transpose.cpp @@ -2,8 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "op/conv_transpose.hpp" - +#include "core/operator_set.hpp" #include "exceptions.hpp" #include "openvino/core/coordinate_diff.hpp" #include "openvino/op/add.hpp" @@ -18,7 +17,6 @@ #include "openvino/op/subtract.hpp" #include "utils/convpool.hpp" #include "utils/reshape.hpp" - using namespace ov::op; using ov::CoordinateDiff; using ov::Shape; @@ -27,8 +25,8 @@ using ov::Strides; namespace ov { namespace frontend { namespace onnx { -namespace op { -namespace set_1 { +namespace ai_onnx { +namespace opset_1 { namespace { ov::Output make_group_conv_backprop(const ov::Output& data, const ov::Output& filters, @@ -213,9 +211,9 @@ ov::OutputVector conv_transpose(const ov::frontend::onnx::Node& node) { return {std::make_shared(conv_node, reshaped_bias)}; } - -} // namespace set_1 -} // namespace op +ONNX_OP("ConvTranspose", OPSET_SINCE(1), ai_onnx::opset_1::conv_transpose); +} // namespace opset_1 +} // namespace ai_onnx } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/conv_transpose.hpp b/src/frontends/onnx/frontend/src/op/conv_transpose.hpp deleted file mode 100644 index 4f1762ce76ce12..00000000000000 --- a/src/frontends/onnx/frontend/src/op/conv_transpose.hpp +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright (C) 2018-2024 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "core/node.hpp" - -namespace ov { -namespace frontend { -namespace onnx { -namespace op { -namespace set_1 { -/// \brief Performs ONNX Transposed Convolution operation. -/// -/// \param node The ONNX node object representing this operation. -/// -/// \return The vector containing OV nodes producing output of ONNX convolution -/// operation. -ov::OutputVector conv_transpose(const ov::frontend::onnx::Node& node); - -} // namespace set_1 -} // namespace op -} // namespace onnx -} // namespace frontend -} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/cos.cpp b/src/frontends/onnx/frontend/src/op/cos.cpp index 7dcb30da21f5bf..741eb06e89fd53 100644 --- a/src/frontends/onnx/frontend/src/op/cos.cpp +++ b/src/frontends/onnx/frontend/src/op/cos.cpp @@ -2,22 +2,22 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "op/cos.hpp" - #include "openvino/op/cos.hpp" +#include "core/operator_set.hpp" using namespace ov::op; namespace ov { namespace frontend { namespace onnx { -namespace op { -namespace set_1 { +namespace ai_onnx { +namespace opset_1 { ov::OutputVector cos(const ov::frontend::onnx::Node& node) { return {std::make_shared(node.get_ov_inputs().at(0))}; } -} // namespace set_1 -} // namespace op +ONNX_OP("Cos", OPSET_SINCE(1), ai_onnx::opset_1::cos); +} // namespace opset_1 +} // namespace ai_onnx } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/cos.hpp b/src/frontends/onnx/frontend/src/op/cos.hpp deleted file mode 100644 index cb248e3b640b9f..00000000000000 --- a/src/frontends/onnx/frontend/src/op/cos.hpp +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright (C) 2018-2024 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "core/node.hpp" - -namespace ov { -namespace frontend { -namespace onnx { -namespace op { -namespace set_1 { -ov::OutputVector cos(const ov::frontend::onnx::Node& node); -} -} // namespace op -} // namespace onnx -} // namespace frontend -} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/cosh.cpp b/src/frontends/onnx/frontend/src/op/cosh.cpp index 8a4cff14b6b131..ec1a319b33af91 100644 --- a/src/frontends/onnx/frontend/src/op/cosh.cpp +++ b/src/frontends/onnx/frontend/src/op/cosh.cpp @@ -2,22 +2,22 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "op/cosh.hpp" - #include "openvino/op/cosh.hpp" +#include "core/operator_set.hpp" using namespace ov::op; namespace ov { namespace frontend { namespace onnx { -namespace op { -namespace set_1 { +namespace ai_onnx { +namespace opset_1 { ov::OutputVector cosh(const ov::frontend::onnx::Node& node) { return {std::make_shared(node.get_ov_inputs().at(0))}; } -} // namespace set_1 -} // namespace op +ONNX_OP("Cosh", OPSET_SINCE(1), ai_onnx::opset_1::cosh); +} // namespace opset_1 +} // namespace ai_onnx } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/cosh.hpp b/src/frontends/onnx/frontend/src/op/cosh.hpp deleted file mode 100644 index 462abe9cb55206..00000000000000 --- a/src/frontends/onnx/frontend/src/op/cosh.hpp +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright (C) 2018-2024 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "core/node.hpp" - -namespace ov { -namespace frontend { -namespace onnx { -namespace op { -namespace set_1 { -ov::OutputVector cosh(const ov::frontend::onnx::Node& node); -} -} // namespace op -} // namespace onnx -} // namespace frontend -} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/crop.cpp b/src/frontends/onnx/frontend/src/op/crop.cpp index 3dad7bb587863b..494867b2732920 100644 --- a/src/frontends/onnx/frontend/src/op/crop.cpp +++ b/src/frontends/onnx/frontend/src/op/crop.cpp @@ -2,22 +2,20 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "op/crop.hpp" - +#include "core/operator_set.hpp" #include "exceptions.hpp" #include "openvino/op/add.hpp" #include "openvino/op/constant.hpp" #include "openvino/op/shape_of.hpp" #include "openvino/op/strided_slice.hpp" - using namespace ov::op; using ov::Shape; namespace ov { namespace frontend { namespace onnx { -namespace op { -namespace set_1 { +namespace ai_onnx { +namespace opset_1 { ov::OutputVector crop(const ov::frontend::onnx::Node& node) { // Crop is an obsolete experimental ONNX operation. // Crops an image's spatial dimensions. @@ -73,8 +71,9 @@ ov::OutputVector crop(const ov::frontend::onnx::Node& node) { return {std::make_shared(input_data, begin, end, begin_mask, end_mask)}; } -} // namespace set_1 -} // namespace op +ONNX_OP("Crop", OPSET_SINCE(1), ai_onnx::opset_1::crop); +} // namespace opset_1 +} // namespace ai_onnx } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/crop.hpp b/src/frontends/onnx/frontend/src/op/crop.hpp deleted file mode 100644 index 882c31d7acbac9..00000000000000 --- a/src/frontends/onnx/frontend/src/op/crop.hpp +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright (C) 2018-2024 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "core/node.hpp" - -namespace ov { -namespace frontend { -namespace onnx { -namespace op { -namespace set_1 { -ov::OutputVector crop(const ov::frontend::onnx::Node& node); - -} // namespace set_1 -} // namespace op -} // namespace onnx -} // namespace frontend -} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/cum_sum.cpp b/src/frontends/onnx/frontend/src/op/cum_sum.cpp index 127551c208460e..ae18d843ab98bb 100644 --- a/src/frontends/onnx/frontend/src/op/cum_sum.cpp +++ b/src/frontends/onnx/frontend/src/op/cum_sum.cpp @@ -2,19 +2,18 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "op/cum_sum.hpp" +#include "openvino/op/cum_sum.hpp" +#include "core/operator_set.hpp" #include "openvino/op/constant.hpp" -#include "openvino/op/cum_sum.hpp" #include "utils/reshape.hpp" - using namespace ov::op; namespace ov { namespace frontend { namespace onnx { -namespace op { -namespace set_1 { +namespace ai_onnx { +namespace opset_1 { ov::OutputVector cum_sum(const ov::frontend::onnx::Node& node) { auto inputs = node.get_ov_inputs(); auto data = inputs.at(0); @@ -32,8 +31,9 @@ ov::OutputVector cum_sum(const ov::frontend::onnx::Node& node) { return ov::OutputVector{std::make_shared(data, axis, exclusive, reverse)}; } -} // namespace set_1 -} // namespace op +ONNX_OP("CumSum", OPSET_SINCE(1), ai_onnx::opset_1::cum_sum); +} // namespace opset_1 +} // namespace ai_onnx } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/cum_sum.hpp b/src/frontends/onnx/frontend/src/op/cum_sum.hpp deleted file mode 100644 index 81946320510d03..00000000000000 --- a/src/frontends/onnx/frontend/src/op/cum_sum.hpp +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright (C) 2018-2024 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "core/node.hpp" - -namespace ov { -namespace frontend { -namespace onnx { -namespace op { -namespace set_1 { -ov::OutputVector cum_sum(const ov::frontend::onnx::Node& node); - -} // namespace set_1 -} // namespace op -} // namespace onnx -} // namespace frontend -} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/depth_to_space.cpp b/src/frontends/onnx/frontend/src/op/depth_to_space.cpp index 58fc131f5e0e7c..2ab8fee6a400d0 100644 --- a/src/frontends/onnx/frontend/src/op/depth_to_space.cpp +++ b/src/frontends/onnx/frontend/src/op/depth_to_space.cpp @@ -2,18 +2,17 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "op/depth_to_space.hpp" - -#include "openvino/frontend/exception.hpp" #include "openvino/op/depth_to_space.hpp" +#include "core/operator_set.hpp" +#include "openvino/frontend/exception.hpp" using namespace ov::op; namespace ov { namespace frontend { namespace onnx { -namespace op { -namespace set_1 { +namespace ai_onnx { +namespace opset_1 { ov::OutputVector depth_to_space(const ov::frontend::onnx::Node& node) { auto data = node.get_ov_inputs().at(0); const auto& shape = data.get_partial_shape(); @@ -31,8 +30,9 @@ ov::OutputVector depth_to_space(const ov::frontend::onnx::Node& node) { const auto block_size = node.get_attribute_value("blocksize"); return ov::OutputVector{std::make_shared(data, ov_mode, block_size)}; } -} // namespace set_1 -} // namespace op +ONNX_OP("DepthToSpace", OPSET_SINCE(1), ai_onnx::opset_1::depth_to_space); +} // namespace opset_1 +} // namespace ai_onnx } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/depth_to_space.hpp b/src/frontends/onnx/frontend/src/op/depth_to_space.hpp deleted file mode 100644 index 67e830452c0f08..00000000000000 --- a/src/frontends/onnx/frontend/src/op/depth_to_space.hpp +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright (C) 2018-2024 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "core/node.hpp" - -namespace ov { -namespace frontend { -namespace onnx { -namespace op { -namespace set_1 { -/// \brief Permutes input tensor data from depth into blocks of spatial data. -/// -/// \note Values from the depth dimension (assuming NCHW layout) are moved in -/// spatial blocks to the height and width dimensions. -/// -/// \param[in] node The ONNX input node describing operation. -/// -/// \return ov::OutputVector containing Tensor with shape: -/// [N, C/(blocksize * blocksize), H * blocksize, W * blocksize] -ov::OutputVector depth_to_space(const ov::frontend::onnx::Node& node); -} // namespace set_1 -} // namespace op -} // namespace onnx -} // namespace frontend -} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/dequantize_linear.cpp b/src/frontends/onnx/frontend/src/op/dequantize_linear.cpp index 19577d40ba7d31..9d67514a94e015 100644 --- a/src/frontends/onnx/frontend/src/op/dequantize_linear.cpp +++ b/src/frontends/onnx/frontend/src/op/dequantize_linear.cpp @@ -2,12 +2,11 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "op/dequantize_linear.hpp" - #include #include #include "core/null_node.hpp" +#include "core/operator_set.hpp" #include "openvino/core/validation_util.hpp" #include "openvino/frontend/exception.hpp" #include "openvino/op/constant.hpp" @@ -16,13 +15,12 @@ #include "openvino/op/reshape.hpp" #include "openvino/op/subtract.hpp" #include "utils/common.hpp" - using namespace ov::op; namespace ov { namespace frontend { namespace onnx { -namespace op { +namespace ai_onnx { namespace detail { std::shared_ptr get_zero_point(const ov::OutputVector& inputs) { if (inputs.size() == 3 && !ov::op::util::is_null(inputs[2])) { @@ -37,7 +35,7 @@ std::shared_ptr get_zero_point(const ov::OutputVector& inputs) { return nullptr; } } // namespace detail -namespace set_1 { +namespace opset_1 { ov::OutputVector dequantize_linear(const ov::frontend::onnx::Node& node) { const ov::OutputVector inputs{node.get_ov_inputs()}; @@ -60,9 +58,10 @@ ov::OutputVector dequantize_linear(const ov::frontend::onnx::Node& node) { return {std::make_shared(converted_x, scale)}; } } -} // namespace set_1 +ONNX_OP("DequantizeLinear", {1, 12}, ai_onnx::opset_1::dequantize_linear); +} // namespace opset_1 -namespace set_13 { +namespace opset_13 { namespace detail { void validate_scale(const ov::Output scale, const ov::Output x, const int64_t axis) { const auto& scale_shape = scale.get_partial_shape(); @@ -171,26 +170,27 @@ ov::OutputVector dequantize_linear(const ov::frontend::onnx::Node& node) { inputs.size()); const auto& x = inputs[0]; const auto& scale = inputs[1]; - const auto zero_point = op::detail::get_zero_point(inputs); + const auto zero_point = ai_onnx::detail::get_zero_point(inputs); const auto& scale_shape = scale.get_partial_shape(); // per-tensor quantization, axis attribute ignored if ((scale_shape.rank().is_static() && scale_shape.size() == 0) || (scale_shape.is_static() && shape_size(scale_shape.get_shape()) == 1)) { if (!zero_point) { - return set_1::dequantize_linear(node); + return ai_onnx::opset_1::dequantize_linear(node); } const auto& zero_point_shape = zero_point->get_output_partial_shape(0); if ((zero_point_shape.rank().is_static() && zero_point_shape.size() == 0) || (zero_point_shape.is_static() && shape_size(zero_point_shape.get_shape()) == 1)) { - return set_1::dequantize_linear(node); + return ai_onnx::opset_1::dequantize_linear(node); } } // these reshapes make sure that dequantization happens over the specified axis return detail::dequantize_linear(x, scale, zero_point, node.get_attribute_value("axis", 1), node); } -} // namespace set_13 -} // namespace op +ONNX_OP("DequantizeLinear", OPSET_SINCE(13), ai_onnx::opset_13::dequantize_linear); +} // namespace opset_13 +} // namespace ai_onnx } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/dequantize_linear.hpp b/src/frontends/onnx/frontend/src/op/dequantize_linear.hpp deleted file mode 100644 index 08e4e1fdadc606..00000000000000 --- a/src/frontends/onnx/frontend/src/op/dequantize_linear.hpp +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright (C) 2018-2024 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "core/node.hpp" -#include "openvino/core/node.hpp" - -namespace ov { -namespace frontend { -namespace onnx { -namespace op { - -namespace set_1 { -ov::OutputVector dequantize_linear(const ov::frontend::onnx::Node& node); - -} // namespace set_1 - -namespace set_13 { -namespace detail { -ov::OutputVector dequantize_linear(const ov::Output& x, - const ov::Output& scale, - const std::shared_ptr& zero_point, - int64_t axis, - const Node& node); -} -ov::OutputVector dequantize_linear(const ov::frontend::onnx::Node& node); -} // namespace set_13 -} // namespace op -} // namespace onnx -} // namespace frontend -} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/dft.cpp b/src/frontends/onnx/frontend/src/op/dft.cpp index 81ecd7e9fdcebc..0218511b05bc31 100644 --- a/src/frontends/onnx/frontend/src/op/dft.cpp +++ b/src/frontends/onnx/frontend/src/op/dft.cpp @@ -2,17 +2,16 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "op/dft.hpp" +#include "utils/dft.hpp" #include "core/null_node.hpp" +#include "core/operator_set.hpp" #include "utils/common.hpp" -#include "utils/dft.hpp" - namespace ov { namespace frontend { namespace onnx { -namespace op { -namespace set_1 { +namespace ai_onnx { +namespace opset_1 { ov::OutputVector dft(const ov::frontend::onnx::Node& node) { const ov::OutputVector ng_inputs{node.get_ov_inputs()}; const ov::Output data = ng_inputs.at(0); @@ -29,8 +28,9 @@ ov::OutputVector dft(const ov::frontend::onnx::Node& node) { onesided == 1)}; } -} // namespace set_1 -} // namespace op +ONNX_OP("DFT", OPSET_SINCE(1), ai_onnx::opset_1::dft); +} // namespace opset_1 +} // namespace ai_onnx } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/dft.hpp b/src/frontends/onnx/frontend/src/op/dft.hpp deleted file mode 100644 index d3c35133e7d9a7..00000000000000 --- a/src/frontends/onnx/frontend/src/op/dft.hpp +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright (C) 2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "core/node.hpp" - -namespace ov { -namespace frontend { -namespace onnx { -namespace op { -namespace set_1 { -ov::OutputVector dft(const ov::frontend::onnx::Node& node); - -} // namespace set_1 -} // namespace op -} // namespace onnx -} // namespace frontend -} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/div.cpp b/src/frontends/onnx/frontend/src/op/div.cpp new file mode 100644 index 00000000000000..4ba5ad15254412 --- /dev/null +++ b/src/frontends/onnx/frontend/src/op/div.cpp @@ -0,0 +1,31 @@ +// Copyright (C) 2018-2024 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "core/operator_set.hpp" +#include "openvino/op/divide.hpp" +#include "utils/common.hpp" + +namespace ov { +namespace frontend { +namespace onnx { +namespace ai_onnx { +namespace opset_1 { +ov::OutputVector div(const ov::frontend::onnx::Node& node) { + return common::handle_opset6_binary_op(node); +} + +ONNX_OP("Div", OPSET_RANGE(1, 6), ai_onnx::opset_1::div); +} // namespace opset_1 + +namespace opset_7 { +ov::OutputVector div(const ov::frontend::onnx::Node& node) { + return {std::make_shared(node.get_ov_inputs().at(0), node.get_ov_inputs().at(1))}; +} + +ONNX_OP("Div", OPSET_SINCE(7), ai_onnx::opset_7::div); +} // namespace opset_7 +} // namespace ai_onnx +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/div.hpp b/src/frontends/onnx/frontend/src/op/div.hpp deleted file mode 100644 index 58b95fb29f3be0..00000000000000 --- a/src/frontends/onnx/frontend/src/op/div.hpp +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright (C) 2018-2024 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "core/node.hpp" -#include "openvino/op/divide.hpp" - -namespace ov { -namespace frontend { -namespace onnx { -namespace op { -namespace set_1 { -inline ov::OutputVector div(const ov::frontend::onnx::Node& node) { - return common::handle_opset6_binary_op(node); -} - -} // namespace set_1 - -namespace set_7 { -inline ov::OutputVector div(const ov::frontend::onnx::Node& node) { - return {std::make_shared(node.get_ov_inputs().at(0), node.get_ov_inputs().at(1))}; -} - -} // namespace set_7 -} // namespace op -} // namespace onnx -} // namespace frontend -} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/dropout.cpp b/src/frontends/onnx/frontend/src/op/dropout.cpp index 636bc9dda486f0..ac93d1ef545775 100644 --- a/src/frontends/onnx/frontend/src/op/dropout.cpp +++ b/src/frontends/onnx/frontend/src/op/dropout.cpp @@ -2,21 +2,19 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "op/dropout.hpp" - #include "core/null_node.hpp" +#include "core/operator_set.hpp" #include "exceptions.hpp" #include "openvino/op/broadcast.hpp" #include "openvino/op/constant.hpp" #include "openvino/op/shape_of.hpp" #include "openvino/op/util/op_types.hpp" - using namespace ov::op; namespace ov { namespace frontend { namespace onnx { -namespace op { +namespace ai_onnx { namespace { ov::OutputVector build_dropout(const ov::frontend::onnx::Node& node, bool training_mode) { CHECK_VALID_NODE(node, !training_mode, "Training mode is not supported for Dropout op"); @@ -35,7 +33,7 @@ ov::OutputVector build_dropout(const ov::frontend::onnx::Node& node, bool traini } } // namespace -namespace set_12 { +namespace opset_12 { ov::OutputVector dropout(const ov::frontend::onnx::Node& node) { const auto ng_inputs = node.get_ov_inputs(); // seed attribute and ratio input are ignored because traning mode is not @@ -49,9 +47,10 @@ ov::OutputVector dropout(const ov::frontend::onnx::Node& node) { } return build_dropout(node, training_mode); } -} // namespace set_12 +ONNX_OP("Dropout", OPSET_SINCE(12), ai_onnx::opset_12::dropout); +} // namespace opset_12 -namespace set_7 { +namespace opset_7 { ov::OutputVector dropout(const ov::frontend::onnx::Node& node) { // "is_test" attribute was removed // ratio attribute is ignored because traning mode is not supported @@ -59,9 +58,10 @@ ov::OutputVector dropout(const ov::frontend::onnx::Node& node) { return build_dropout(node, training_mode); } -} // namespace set_7 +ONNX_OP("Dropout", OPSET_RANGE(7, 11), ai_onnx::opset_7::dropout); +} // namespace opset_7 -namespace set_1 { +namespace opset_1 { ov::OutputVector dropout(const ov::frontend::onnx::Node& node) { // legacy consumed_inputs attribute ignored // ratio attribute is ignored because traning mode is not supported @@ -69,8 +69,9 @@ ov::OutputVector dropout(const ov::frontend::onnx::Node& node) { return build_dropout(node, training_mode); } -} // namespace set_1 -} // namespace op +ONNX_OP("Dropout", OPSET_RANGE(1, 6), ai_onnx::opset_1::dropout); +} // namespace opset_1 +} // namespace ai_onnx } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/dropout.hpp b/src/frontends/onnx/frontend/src/op/dropout.hpp deleted file mode 100644 index 2dfa65b16cda4d..00000000000000 --- a/src/frontends/onnx/frontend/src/op/dropout.hpp +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (C) 2018-2024 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "core/node.hpp" - -namespace ov { -namespace frontend { -namespace onnx { -namespace op { -namespace set_12 { -ov::OutputVector dropout(const ov::frontend::onnx::Node& node); -} // namespace set_12 - -namespace set_7 { -ov::OutputVector dropout(const ov::frontend::onnx::Node& node); -} // namespace set_7 - -namespace set_1 { -ov::OutputVector dropout(const ov::frontend::onnx::Node& node); -} // namespace set_1 -} // namespace op -} // namespace onnx -} // namespace frontend -} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/dynamic_quantize_linear.cpp b/src/frontends/onnx/frontend/src/op/dynamic_quantize_linear.cpp index 1009d05d8d9018..cf27c57cca8c17 100644 --- a/src/frontends/onnx/frontend/src/op/dynamic_quantize_linear.cpp +++ b/src/frontends/onnx/frontend/src/op/dynamic_quantize_linear.cpp @@ -2,9 +2,8 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "op/dynamic_quantize_linear.hpp" - #include "core/null_node.hpp" +#include "core/operator_set.hpp" #include "openvino/op/add.hpp" #include "openvino/op/clamp.hpp" #include "openvino/op/constant.hpp" @@ -21,7 +20,6 @@ #include "openvino/op/squeeze.hpp" #include "openvino/op/subtract.hpp" #include "utils/common.hpp" - using namespace ov::op; using ov::Shape; @@ -77,8 +75,8 @@ std::shared_ptr quantize_linear(ov::Output x, return std::make_shared(result_clamped, ov::element::u8); } } // namespace -namespace op { -namespace set_1 { +namespace ai_onnx { +namespace opset_1 { ov::OutputVector dynamic_quantize_linear(const ov::frontend::onnx::Node& node) { const ov::OutputVector& inputs = node.get_ov_inputs(); const auto& x = inputs.at(0); @@ -106,8 +104,9 @@ ov::OutputVector dynamic_quantize_linear(const ov::frontend::onnx::Node& node) { return {y, y_scale, y_zero_point}; } -} // namespace set_1 -} // namespace op +ONNX_OP("DynamicQuantizeLinear", OPSET_SINCE(1), ai_onnx::opset_1::dynamic_quantize_linear); +} // namespace opset_1 +} // namespace ai_onnx } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/dynamic_quantize_linear.hpp b/src/frontends/onnx/frontend/src/op/dynamic_quantize_linear.hpp deleted file mode 100644 index ea960d954202b2..00000000000000 --- a/src/frontends/onnx/frontend/src/op/dynamic_quantize_linear.hpp +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright (C) 2018-2024 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "core/node.hpp" - -namespace ov { -namespace frontend { -namespace onnx { -namespace op { -namespace set_1 { -ov::OutputVector dynamic_quantize_linear(const ov::frontend::onnx::Node& node); - -} // namespace set_1 -} // namespace op -} // namespace onnx -} // namespace frontend -} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/einsum.cpp b/src/frontends/onnx/frontend/src/op/einsum.cpp index 201425f45f24c7..799ddcc818fe7f 100644 --- a/src/frontends/onnx/frontend/src/op/einsum.cpp +++ b/src/frontends/onnx/frontend/src/op/einsum.cpp @@ -2,25 +2,25 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "op/einsum.hpp" - #include "openvino/op/einsum.hpp" +#include "core/operator_set.hpp" using namespace ov::op; namespace ov { namespace frontend { namespace onnx { -namespace op { -namespace set_1 { +namespace ai_onnx { +namespace opset_1 { ov::OutputVector einsum(const ov::frontend::onnx::Node& node) { const std::string& equation{node.get_attribute_value("equation")}; return {std::make_shared(node.get_ov_inputs(), equation)}; } -} // namespace set_1 -} // namespace op +ONNX_OP("Einsum", OPSET_SINCE(1), ai_onnx::opset_1::einsum); +} // namespace opset_1 +} // namespace ai_onnx } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/einsum.hpp b/src/frontends/onnx/frontend/src/op/einsum.hpp deleted file mode 100644 index 3e4bf562da99a0..00000000000000 --- a/src/frontends/onnx/frontend/src/op/einsum.hpp +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright (C) 2018-2024 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "core/node.hpp" - -namespace ov { -namespace frontend { -namespace onnx { -namespace op { -namespace set_1 { -ov::OutputVector einsum(const ov::frontend::onnx::Node& node); - -} // namespace set_1 -} // namespace op -} // namespace onnx -} // namespace frontend -} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/elu.cpp b/src/frontends/onnx/frontend/src/op/elu.cpp index b38c4403c4ecb3..10a1ab9a9296b7 100644 --- a/src/frontends/onnx/frontend/src/op/elu.cpp +++ b/src/frontends/onnx/frontend/src/op/elu.cpp @@ -2,17 +2,16 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "op/elu.hpp" - #include "openvino/op/elu.hpp" +#include "core/operator_set.hpp" using namespace ov::op; namespace ov { namespace frontend { namespace onnx { -namespace op { -namespace set_1 { +namespace ai_onnx { +namespace opset_1 { ov::OutputVector elu(const ov::frontend::onnx::Node& node) { auto data = node.get_ov_inputs().at(0); double alpha = node.get_attribute_value("alpha", 1); @@ -20,8 +19,9 @@ ov::OutputVector elu(const ov::frontend::onnx::Node& node) { return {std::make_shared(data, alpha)}; } -} // namespace set_1 -} // namespace op +ONNX_OP("Elu", OPSET_SINCE(1), ai_onnx::opset_1::elu); +} // namespace opset_1 +} // namespace ai_onnx } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/elu.hpp b/src/frontends/onnx/frontend/src/op/elu.hpp deleted file mode 100644 index 6216185fd37990..00000000000000 --- a/src/frontends/onnx/frontend/src/op/elu.hpp +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright (C) 2018-2024 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "core/node.hpp" - -namespace ov { -namespace frontend { -namespace onnx { -namespace op { -namespace set_1 { -ov::OutputVector elu(const ov::frontend::onnx::Node& node); - -} // namespace set_1 -} // namespace op -} // namespace onnx -} // namespace frontend -} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/equal.hpp b/src/frontends/onnx/frontend/src/op/equal.cpp similarity index 59% rename from src/frontends/onnx/frontend/src/op/equal.hpp rename to src/frontends/onnx/frontend/src/op/equal.cpp index 94fe2da6991ff3..df223a994a04d7 100644 --- a/src/frontends/onnx/frontend/src/op/equal.hpp +++ b/src/frontends/onnx/frontend/src/op/equal.cpp @@ -2,22 +2,22 @@ // SPDX-License-Identifier: Apache-2.0 // -#pragma once - -#include "core/node.hpp" #include "openvino/op/equal.hpp" +#include "core/operator_set.hpp" + namespace ov { namespace frontend { namespace onnx { -namespace op { -namespace set_1 { -inline ov::OutputVector equal(const ov::frontend::onnx::Node& node) { +namespace ai_onnx { +namespace opset_1 { +ov::OutputVector equal(const ov::frontend::onnx::Node& node) { return {std::make_shared(node.get_ov_inputs().at(0), node.get_ov_inputs().at(1))}; } -} // namespace set_1 -} // namespace op +ONNX_OP("Equal", OPSET_SINCE(1), ai_onnx::opset_1::equal); +} // namespace opset_1 +} // namespace ai_onnx } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/erf.hpp b/src/frontends/onnx/frontend/src/op/erf.cpp similarity index 57% rename from src/frontends/onnx/frontend/src/op/erf.hpp rename to src/frontends/onnx/frontend/src/op/erf.cpp index e3b8bb9960d7ed..7921c68deb7b8d 100644 --- a/src/frontends/onnx/frontend/src/op/erf.hpp +++ b/src/frontends/onnx/frontend/src/op/erf.cpp @@ -2,21 +2,21 @@ // SPDX-License-Identifier: Apache-2.0 // -#pragma once - -#include "core/node.hpp" #include "openvino/op/erf.hpp" +#include "core/operator_set.hpp" + namespace ov { namespace frontend { namespace onnx { -namespace op { -namespace set_1 { -inline ov::OutputVector erf(const ov::frontend::onnx::Node& node) { +namespace ai_onnx { +namespace opset_1 { +ov::OutputVector erf(const ov::frontend::onnx::Node& node) { return {std::make_shared(node.get_ov_inputs().at(0))}; } -} // namespace set_1 -} // namespace op +ONNX_OP("Erf", OPSET_SINCE(1), ai_onnx::opset_1::erf); +} // namespace opset_1 +} // namespace ai_onnx } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/exp.hpp b/src/frontends/onnx/frontend/src/op/exp.cpp similarity index 57% rename from src/frontends/onnx/frontend/src/op/exp.hpp rename to src/frontends/onnx/frontend/src/op/exp.cpp index f99fc37f6ebce0..7455b9433a47c1 100644 --- a/src/frontends/onnx/frontend/src/op/exp.hpp +++ b/src/frontends/onnx/frontend/src/op/exp.cpp @@ -2,22 +2,22 @@ // SPDX-License-Identifier: Apache-2.0 // -#pragma once - -#include "core/node.hpp" #include "openvino/op/exp.hpp" +#include "core/operator_set.hpp" + namespace ov { namespace frontend { namespace onnx { -namespace op { -namespace set_1 { -inline ov::OutputVector exp(const ov::frontend::onnx::Node& node) { +namespace ai_onnx { +namespace opset_1 { +ov::OutputVector exp(const ov::frontend::onnx::Node& node) { return {std::make_shared(node.get_ov_inputs().at(0))}; } -} // namespace set_1 -} // namespace op +ONNX_OP("Exp", OPSET_SINCE(1), ai_onnx::opset_1::exp); +} // namespace opset_1 +} // namespace ai_onnx } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/expand.cpp b/src/frontends/onnx/frontend/src/op/expand.cpp index c96331627c43a7..45dbae5fcb4b59 100644 --- a/src/frontends/onnx/frontend/src/op/expand.cpp +++ b/src/frontends/onnx/frontend/src/op/expand.cpp @@ -2,20 +2,18 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "op/expand.hpp" - +#include "core/operator_set.hpp" #include "openvino/op/broadcast.hpp" #include "openvino/op/constant.hpp" #include "utils/common.hpp" - using namespace ov::op; using ov::Shape; namespace ov { namespace frontend { namespace onnx { -namespace op { -namespace set_1 { +namespace ai_onnx { +namespace opset_1 { ov::OutputVector expand(const ov::frontend::onnx::Node& node) { const ov::Output data{node.get_ov_inputs().at(0)}; const ov::Output shape{node.get_ov_inputs().at(1)}; @@ -31,8 +29,9 @@ ov::OutputVector expand(const ov::frontend::onnx::Node& node) { } } -} // namespace set_1 -} // namespace op +ONNX_OP("Expand", OPSET_SINCE(1), ai_onnx::opset_1::expand); +} // namespace opset_1 +} // namespace ai_onnx } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/expand.hpp b/src/frontends/onnx/frontend/src/op/expand.hpp deleted file mode 100644 index 6ac824d54bcde4..00000000000000 --- a/src/frontends/onnx/frontend/src/op/expand.hpp +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright (C) 2018-2024 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "core/node.hpp" - -namespace ov { -namespace frontend { -namespace onnx { -namespace op { -namespace set_1 -// Expand operator has been available since version 8 of the default ONNX operator set. -// Currently, Expand is assigned to version 1 due to temporary reason. -{ -ov::OutputVector expand(const ov::frontend::onnx::Node& node); - -} // namespace set_1 -} // namespace op -} // namespace onnx -} // namespace frontend -} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/eye_like.cpp b/src/frontends/onnx/frontend/src/op/eye_like.cpp index 10814d9ab8d80f..35124f8f4e65f5 100644 --- a/src/frontends/onnx/frontend/src/op/eye_like.cpp +++ b/src/frontends/onnx/frontend/src/op/eye_like.cpp @@ -2,21 +2,19 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "op/eye_like.hpp" - +#include "core/operator_set.hpp" #include "exceptions.hpp" #include "openvino/op/constant.hpp" #include "openvino/op/eye.hpp" #include "openvino/op/gather.hpp" #include "openvino/op/shape_of.hpp" #include "utils/common.hpp" - using namespace ov::op; namespace ov { namespace frontend { namespace onnx { -namespace op { +namespace ai_onnx { namespace detail { namespace { @@ -31,7 +29,7 @@ ov::OutputVector get_shape_width_and_height(const ov::Output& shape) { } // namespace } // namespace detail -namespace set_1 { +namespace opset_1 { ov::OutputVector eye_like(const ov::frontend::onnx::Node& node) { const auto input = node.get_ov_inputs().at(0); @@ -63,8 +61,9 @@ ov::OutputVector eye_like(const ov::frontend::onnx::Node& node) { return {output}; } -} // namespace set_1 -} // namespace op +ONNX_OP("EyeLike", OPSET_SINCE(1), ai_onnx::opset_1::eye_like); +} // namespace opset_1 +} // namespace ai_onnx } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/eye_like.hpp b/src/frontends/onnx/frontend/src/op/eye_like.hpp deleted file mode 100644 index d9fef0c7fa5d30..00000000000000 --- a/src/frontends/onnx/frontend/src/op/eye_like.hpp +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright (C) 2018-2024 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "core/node.hpp" - -namespace ov { -namespace frontend { -namespace onnx { -namespace op { -namespace set_1 { - -ov::OutputVector eye_like(const ov::frontend::onnx::Node& node); - -} // namespace set_1 -} // namespace op -} // namespace onnx -} // namespace frontend -} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/flatten.cpp b/src/frontends/onnx/frontend/src/op/flatten.cpp index 09f9c3fa4ecf7d..b068a87921412c 100644 --- a/src/frontends/onnx/frontend/src/op/flatten.cpp +++ b/src/frontends/onnx/frontend/src/op/flatten.cpp @@ -2,19 +2,17 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "op/flatten.hpp" - +#include "core/operator_set.hpp" #include "exceptions.hpp" #include "openvino/core/validation_util.hpp" #include "utils/reshape.hpp" - using namespace ov::op; namespace ov { namespace frontend { namespace onnx { -namespace op { -namespace set_1 { +namespace ai_onnx { +namespace opset_1 { ov::OutputVector flatten(const ov::frontend::onnx::Node& node) { ov::OutputVector inputs{node.get_ov_inputs()}; auto data = inputs.at(0); @@ -30,8 +28,9 @@ ov::OutputVector flatten(const ov::frontend::onnx::Node& node) { return {ov::op::util::flatten(data, static_cast(axis))}; } -} // namespace set_1 -} // namespace op +ONNX_OP("Flatten", OPSET_SINCE(1), ai_onnx::opset_1::flatten); +} // namespace opset_1 +} // namespace ai_onnx } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/flatten.hpp b/src/frontends/onnx/frontend/src/op/flatten.hpp deleted file mode 100644 index 886d189bfad6f0..00000000000000 --- a/src/frontends/onnx/frontend/src/op/flatten.hpp +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright (C) 2018-2024 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "core/node.hpp" - -namespace ov { -namespace frontend { -namespace onnx { -namespace op { -namespace set_1 { -ov::OutputVector flatten(const ov::frontend::onnx::Node& node); - -} // namespace set_1 -} // namespace op -} // namespace onnx -} // namespace frontend -} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/floor.hpp b/src/frontends/onnx/frontend/src/op/floor.cpp similarity index 56% rename from src/frontends/onnx/frontend/src/op/floor.hpp rename to src/frontends/onnx/frontend/src/op/floor.cpp index 38a6722884bbea..82d4a88f46ebe3 100644 --- a/src/frontends/onnx/frontend/src/op/floor.hpp +++ b/src/frontends/onnx/frontend/src/op/floor.cpp @@ -2,22 +2,22 @@ // SPDX-License-Identifier: Apache-2.0 // -#pragma once - -#include "core/node.hpp" #include "openvino/op/floor.hpp" +#include "core/operator_set.hpp" + namespace ov { namespace frontend { namespace onnx { -namespace op { -namespace set_1 { -inline ov::OutputVector floor(const ov::frontend::onnx::Node& node) { +namespace ai_onnx { +namespace opset_1 { +ov::OutputVector floor(const ov::frontend::onnx::Node& node) { return {std::make_shared(node.get_ov_inputs().at(0))}; } -} // namespace set_1 -} // namespace op +ONNX_OP("Floor", OPSET_SINCE(1), ai_onnx::opset_1::floor); +} // namespace opset_1 +} // namespace ai_onnx } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/gather.hpp b/src/frontends/onnx/frontend/src/op/gather.cpp similarity index 73% rename from src/frontends/onnx/frontend/src/op/gather.hpp rename to src/frontends/onnx/frontend/src/op/gather.cpp index e36f6eecaedd6b..bc1dd705accd53 100644 --- a/src/frontends/onnx/frontend/src/op/gather.hpp +++ b/src/frontends/onnx/frontend/src/op/gather.cpp @@ -2,18 +2,17 @@ // SPDX-License-Identifier: Apache-2.0 // -#pragma once +#include "openvino/op/gather.hpp" -#include "core/node.hpp" +#include "core/operator_set.hpp" #include "openvino/op/constant.hpp" -#include "openvino/op/gather.hpp" namespace ov { namespace frontend { namespace onnx { -namespace op { -namespace set_1 { -inline ov::OutputVector gather(const ov::frontend::onnx::Node& node) { +namespace ai_onnx { +namespace opset_1 { +ov::OutputVector gather(const ov::frontend::onnx::Node& node) { ov::OutputVector ng_inputs{node.get_ov_inputs()}; auto data = ng_inputs.at(0); auto indices = ng_inputs.at(1); @@ -24,8 +23,9 @@ inline ov::OutputVector gather(const ov::frontend::onnx::Node& node) { ov::op::v0::Constant::create(ov::element::i64, ov::Shape{}, {axis}))}; } -} // namespace set_1 -} // namespace op +ONNX_OP("Gather", OPSET_SINCE(1), ai_onnx::opset_1::gather); +} // namespace opset_1 +} // namespace ai_onnx } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/gather_elements.hpp b/src/frontends/onnx/frontend/src/op/gather_elements.cpp similarity index 65% rename from src/frontends/onnx/frontend/src/op/gather_elements.hpp rename to src/frontends/onnx/frontend/src/op/gather_elements.cpp index fda66afa3ff5fa..5f6edff7548229 100644 --- a/src/frontends/onnx/frontend/src/op/gather_elements.hpp +++ b/src/frontends/onnx/frontend/src/op/gather_elements.cpp @@ -2,16 +2,15 @@ // SPDX-License-Identifier: Apache-2.0 // -#pragma once - #include "openvino/op/gather_elements.hpp" +#include "core/operator_set.hpp" namespace ov { namespace frontend { namespace onnx { -namespace op { -namespace set_1 { -inline ov::OutputVector gather_elements(const ov::frontend::onnx::Node& node) { +namespace ai_onnx { +namespace opset_1 { +ov::OutputVector gather_elements(const ov::frontend::onnx::Node& node) { ov::OutputVector ng_inputs{node.get_ov_inputs()}; auto data = ng_inputs.at(0); auto indices = ng_inputs.at(1); @@ -19,8 +18,9 @@ inline ov::OutputVector gather_elements(const ov::frontend::onnx::Node& node) { return {std::make_shared(data, indices, axis)}; } -} // namespace set_1 -} // namespace op +ONNX_OP("GatherElements", OPSET_SINCE(1), ai_onnx::opset_1::gather_elements); +} // namespace opset_1 +} // namespace ai_onnx } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/gather_nd.cpp b/src/frontends/onnx/frontend/src/op/gather_nd.cpp index 4e585ca1ebd618..f272e21b8228c2 100644 --- a/src/frontends/onnx/frontend/src/op/gather_nd.cpp +++ b/src/frontends/onnx/frontend/src/op/gather_nd.cpp @@ -5,17 +5,16 @@ // Disabled in CMakeList // Update to higher opset required -#include "op/gather_nd.hpp" - #include "openvino/op/gather_nd.hpp" +#include "core/operator_set.hpp" using namespace ov::op; namespace ov { namespace frontend { namespace onnx { -namespace op { -namespace set_1 { +namespace ai_onnx { +namespace opset_1 { ov::OutputVector gather_nd(const ov::frontend::onnx::Node& node) { const ov::OutputVector ng_inputs{node.get_ov_inputs()}; const auto data = ng_inputs.at(0); @@ -25,8 +24,9 @@ ov::OutputVector gather_nd(const ov::frontend::onnx::Node& node) { return {std::make_shared(data, indices, batch_dims)}; } -} // namespace set_1 -} // namespace op +ONNX_OP("GatherND", OPSET_SINCE(1), ai_onnx::opset_1::gather_nd); +} // namespace opset_1 +} // namespace ai_onnx } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/gather_nd.hpp b/src/frontends/onnx/frontend/src/op/gather_nd.hpp deleted file mode 100644 index 7fffc035dfbb85..00000000000000 --- a/src/frontends/onnx/frontend/src/op/gather_nd.hpp +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright (C) 2018-2024 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -// Disabled in CMakeList -// Update to higher opset required - -#pragma once - -#include "core/node.hpp" - -namespace ov { -namespace frontend { -namespace onnx { -namespace op { -namespace set_1 { -ov::OutputVector gather_nd(const ov::frontend::onnx::Node& node); - -} // namespace set_1 -} // namespace op -} // namespace onnx -} // namespace frontend -} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/gelu.cpp b/src/frontends/onnx/frontend/src/op/gelu.cpp index 4e04112bb100c3..66c01935cd33f1 100644 --- a/src/frontends/onnx/frontend/src/op/gelu.cpp +++ b/src/frontends/onnx/frontend/src/op/gelu.cpp @@ -1,19 +1,18 @@ // Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 -#include "op/gelu.hpp" +#include "openvino/op/gelu.hpp" +#include "core/operator_set.hpp" #include "openvino/frontend/exception.hpp" -#include "openvino/op/gelu.hpp" #include "utils/common.hpp" - using namespace ov::op; namespace ov { namespace frontend { namespace onnx { -namespace op { -namespace set_1 { +namespace ai_onnx { +namespace opset_1 { ov::OutputVector gelu(const ov::frontend::onnx::Node& node) { const auto& inputs = node.get_ov_inputs(); std::string approximate = node.get_attribute_value("approximate", "none"); @@ -32,8 +31,9 @@ ov::OutputVector gelu(const ov::frontend::onnx::Node& node) { inputs[0], approximate == "none" ? ov::op::GeluApproximationMode::ERF : ov::op::GeluApproximationMode::TANH)}; } -} // namespace set_1 -} // namespace op +ONNX_OP("Gelu", OPSET_SINCE(1), ai_onnx::opset_1::gelu); +} // namespace opset_1 +} // namespace ai_onnx } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/gelu.hpp b/src/frontends/onnx/frontend/src/op/gelu.hpp deleted file mode 100644 index 242da0fb5b7730..00000000000000 --- a/src/frontends/onnx/frontend/src/op/gelu.hpp +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright (C) 2018-2024 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 - -#pragma once - -#include "core/node.hpp" - -namespace ov { -namespace frontend { -namespace onnx { -namespace op { -namespace set_1 { - -ov::OutputVector gelu(const ov::frontend::onnx::Node& node); - -} // namespace set_1 -} // namespace op -} // namespace onnx -} // namespace frontend -} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/gemm.cpp b/src/frontends/onnx/frontend/src/op/gemm.cpp index b679a407d51707..9205e1dd4b6377 100644 --- a/src/frontends/onnx/frontend/src/op/gemm.cpp +++ b/src/frontends/onnx/frontend/src/op/gemm.cpp @@ -2,22 +2,20 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "op/gemm.hpp" - +#include "core/operator_set.hpp" #include "openvino/op/add.hpp" #include "openvino/op/constant.hpp" #include "openvino/op/matmul.hpp" #include "openvino/op/multiply.hpp" #include "utils/reshape.hpp" - using namespace ov::op; using ov::Shape; namespace ov { namespace frontend { namespace onnx { -namespace op { -namespace set_1 { +namespace ai_onnx { +namespace opset_1 { ov::OutputVector gemm(const ov::frontend::onnx::Node& node) { ov::OutputVector inputs{node.get_ov_inputs()}; ov::Output input_a = inputs.at(0); @@ -59,9 +57,10 @@ ov::OutputVector gemm(const ov::frontend::onnx::Node& node) { return ov::OutputVector{std::make_shared(matmul_node, beta_times_input_c)}; } -} // namespace set_1 +ONNX_OP("Gemm", OPSET_RANGE(1, 5), ai_onnx::opset_1::gemm); +} // namespace opset_1 -namespace set_6 { +namespace opset_6 { ov::OutputVector gemm(const ov::frontend::onnx::Node& node) { ov::OutputVector inputs{node.get_ov_inputs()}; ov::Output input_a = inputs.at(0); @@ -89,8 +88,9 @@ ov::OutputVector gemm(const ov::frontend::onnx::Node& node) { return {std::make_shared(matmul_times_alpha, beta_times_input_c)}; } -} // namespace set_6 -} // namespace op +ONNX_OP("Gemm", OPSET_SINCE(6), ai_onnx::opset_6::gemm); +} // namespace opset_6 +} // namespace ai_onnx } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/gemm.hpp b/src/frontends/onnx/frontend/src/op/gemm.hpp deleted file mode 100644 index a8744ae4ea989c..00000000000000 --- a/src/frontends/onnx/frontend/src/op/gemm.hpp +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright (C) 2018-2024 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "core/node.hpp" - -namespace ov { -namespace frontend { -namespace onnx { -namespace op { -namespace set_1 { -ov::OutputVector gemm(const ov::frontend::onnx::Node& node); - -} // namespace set_1 - -namespace set_6 { -ov::OutputVector gemm(const ov::frontend::onnx::Node& node); - -} // namespace set_6 -} // namespace op -} // namespace onnx -} // namespace frontend -} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/global_average_pool.cpp b/src/frontends/onnx/frontend/src/op/global_average_pool.cpp index a37c346a7a5118..7a6ab6f85dae0e 100644 --- a/src/frontends/onnx/frontend/src/op/global_average_pool.cpp +++ b/src/frontends/onnx/frontend/src/op/global_average_pool.cpp @@ -2,22 +2,20 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "op/global_average_pool.hpp" - +#include "core/operator_set.hpp" #include "openvino/op/constant.hpp" #include "openvino/op/range.hpp" #include "openvino/op/reduce_mean.hpp" #include "openvino/op/shape_of.hpp" #include "openvino/op/squeeze.hpp" - using namespace ov::op; using ov::Shape; namespace ov { namespace frontend { namespace onnx { -namespace op { -namespace set_1 { +namespace ai_onnx { +namespace opset_1 { ov::OutputVector global_average_pool(const ov::frontend::onnx::Node& node) { // Generate axes for reduce operation which contain all spatial dims indexes. // Examples: @@ -43,8 +41,9 @@ ov::OutputVector global_average_pool(const ov::frontend::onnx::Node& node) { return {std::make_shared(data, reduce_axes, true)}; } -} // namespace set_1 -} // namespace op +ONNX_OP("GlobalAveragePool", OPSET_SINCE(1), ai_onnx::opset_1::global_average_pool); +} // namespace opset_1 +} // namespace ai_onnx } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/global_average_pool.hpp b/src/frontends/onnx/frontend/src/op/global_average_pool.hpp deleted file mode 100644 index d95b4bfd451852..00000000000000 --- a/src/frontends/onnx/frontend/src/op/global_average_pool.hpp +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright (C) 2018-2024 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "core/node.hpp" - -namespace ov { -namespace frontend { -namespace onnx { -namespace op { -namespace set_1 { -/// \brief Convert ONNX GlobalAveragePool operation to an OV node. -/// -/// \param node The ONNX node object representing this operation. -/// -/// \return The vector containing OV nodes producing output of ONNX -/// GlobalAveragePool operation. -ov::OutputVector global_average_pool(const ov::frontend::onnx::Node& node); - -} // namespace set_1 -} // namespace op -} // namespace onnx -} // namespace frontend -} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/global_max_pool.cpp b/src/frontends/onnx/frontend/src/op/global_max_pool.cpp index e975c4fe423e9b..ee08681241756e 100644 --- a/src/frontends/onnx/frontend/src/op/global_max_pool.cpp +++ b/src/frontends/onnx/frontend/src/op/global_max_pool.cpp @@ -2,22 +2,20 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "op/global_max_pool.hpp" - +#include "core/operator_set.hpp" #include "openvino/op/constant.hpp" #include "openvino/op/range.hpp" #include "openvino/op/reduce_max.hpp" #include "openvino/op/shape_of.hpp" #include "openvino/op/squeeze.hpp" - using namespace ov::op; using ov::Shape; namespace ov { namespace frontend { namespace onnx { -namespace op { -namespace set_1 { +namespace ai_onnx { +namespace opset_1 { ov::OutputVector global_max_pool(const ov::frontend::onnx::Node& node) { // Generate axes for reduce operation which contain all spatial dims indexes. // Examples: @@ -43,8 +41,9 @@ ov::OutputVector global_max_pool(const ov::frontend::onnx::Node& node) { return {std::make_shared(data, reduce_axes, true)}; } -} // namespace set_1 -} // namespace op +ONNX_OP("GlobalMaxPool", OPSET_SINCE(1), ai_onnx::opset_1::global_max_pool); +} // namespace opset_1 +} // namespace ai_onnx } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/global_max_pool.hpp b/src/frontends/onnx/frontend/src/op/global_max_pool.hpp deleted file mode 100644 index 0a1271ca68baca..00000000000000 --- a/src/frontends/onnx/frontend/src/op/global_max_pool.hpp +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright (C) 2018-2024 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "core/node.hpp" - -namespace ov { -namespace frontend { -namespace onnx { -namespace op { -namespace set_1 { -/// \brief Convert ONNX GlobalMaxPool operation to an OV node. -/// -/// \param node The ONNX node object representing this operation. -/// -/// \return The vector containing OV nodes producing output of ONNX -/// GlobalMaxPool operation. -ov::OutputVector global_max_pool(const ov::frontend::onnx::Node& node); - -} // namespace set_1 -} // namespace op -} // namespace onnx -} // namespace frontend -} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/greater.hpp b/src/frontends/onnx/frontend/src/op/greater.cpp similarity index 58% rename from src/frontends/onnx/frontend/src/op/greater.hpp rename to src/frontends/onnx/frontend/src/op/greater.cpp index bb562c7f876acd..5ab8b070c89b73 100644 --- a/src/frontends/onnx/frontend/src/op/greater.hpp +++ b/src/frontends/onnx/frontend/src/op/greater.cpp @@ -2,22 +2,22 @@ // SPDX-License-Identifier: Apache-2.0 // -#pragma once - -#include "core/node.hpp" #include "openvino/op/greater.hpp" +#include "core/operator_set.hpp" + namespace ov { namespace frontend { namespace onnx { -namespace op { -namespace set_1 { -inline ov::OutputVector greater(const ov::frontend::onnx::Node& node) { +namespace ai_onnx { +namespace opset_1 { +ov::OutputVector greater(const ov::frontend::onnx::Node& node) { return {std::make_shared(node.get_ov_inputs().at(0), node.get_ov_inputs().at(1))}; } -} // namespace set_1 -} // namespace op +ONNX_OP("Greater", OPSET_SINCE(1), ai_onnx::opset_1::greater); +} // namespace opset_1 +} // namespace ai_onnx } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/greater_or_equal.cpp b/src/frontends/onnx/frontend/src/op/greater_or_equal.cpp index 464bd61a382012..35da2fc9850a14 100644 --- a/src/frontends/onnx/frontend/src/op/greater_or_equal.cpp +++ b/src/frontends/onnx/frontend/src/op/greater_or_equal.cpp @@ -2,19 +2,17 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "op/greater_or_equal.hpp" - +#include "core/operator_set.hpp" #include "openvino/frontend/exception.hpp" #include "openvino/op/greater.hpp" #include "openvino/op/greater_eq.hpp" - using namespace ov::op; namespace ov { namespace frontend { namespace onnx { -namespace op { -namespace set_1 { +namespace ai_onnx { +namespace opset_1 { ov::OutputVector greater_or_equal(const ov::frontend::onnx::Node& node) { const auto A = node.get_ov_inputs().at(0); const auto B = node.get_ov_inputs().at(1); @@ -26,9 +24,10 @@ ov::OutputVector greater_or_equal(const ov::frontend::onnx::Node& node) { return {C}; } -} // namespace set_1 +ONNX_OP("GreaterOrEqual", OPSET_RANGE(1, 15), ai_onnx::opset_1::greater_or_equal); +} // namespace opset_1 -namespace set_16 { +namespace opset_16 { ov::OutputVector greater_or_equal(const ov::frontend::onnx::Node& node) { const auto A = node.get_ov_inputs().at(0); const auto B = node.get_ov_inputs().at(1); @@ -37,8 +36,9 @@ ov::OutputVector greater_or_equal(const ov::frontend::onnx::Node& node) { return {C}; } -} // namespace set_16 -} // namespace op +ONNX_OP("GreaterOrEqual", OPSET_SINCE(16), ai_onnx::opset_16::greater_or_equal); +} // namespace opset_16 +} // namespace ai_onnx } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/greater_or_equal.hpp b/src/frontends/onnx/frontend/src/op/greater_or_equal.hpp deleted file mode 100644 index 0c656dfa820955..00000000000000 --- a/src/frontends/onnx/frontend/src/op/greater_or_equal.hpp +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright (C) 2018-2024 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "core/node.hpp" - -namespace ov { -namespace frontend { -namespace onnx { -namespace op { -namespace set_1 { -ov::OutputVector greater_or_equal(const ov::frontend::onnx::Node& node); - -} // namespace set_1 - -namespace set_16 { -ov::OutputVector greater_or_equal(const ov::frontend::onnx::Node& node); - -} // namespace set_16 -} // namespace op -} // namespace onnx -} // namespace frontend -} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/grid_sample.cpp b/src/frontends/onnx/frontend/src/op/grid_sample.cpp index 751cff425c093d..4592e2a218c041 100644 --- a/src/frontends/onnx/frontend/src/op/grid_sample.cpp +++ b/src/frontends/onnx/frontend/src/op/grid_sample.cpp @@ -2,17 +2,16 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "op/grid_sample.hpp" - #include "openvino/op/grid_sample.hpp" +#include "core/operator_set.hpp" using namespace ov::op; namespace ov { namespace frontend { namespace onnx { -namespace op { -namespace set_1 { +namespace ai_onnx { +namespace opset_1 { ov::OutputVector grid_sample(const ov::frontend::onnx::Node& node) { const auto data = node.get_ov_inputs().at(0); const auto grid = node.get_ov_inputs().at(1); @@ -28,8 +27,9 @@ ov::OutputVector grid_sample(const ov::frontend::onnx::Node& node) { return {std::make_shared(data, grid, attributes)}; } -} // namespace set_1 -} // namespace op +ONNX_OP("GridSample", OPSET_SINCE(1), ai_onnx::opset_1::grid_sample); +} // namespace opset_1 +} // namespace ai_onnx } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/grid_sample.hpp b/src/frontends/onnx/frontend/src/op/grid_sample.hpp deleted file mode 100644 index 15628ab71712a0..00000000000000 --- a/src/frontends/onnx/frontend/src/op/grid_sample.hpp +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright (C) 2018-2024 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "core/node.hpp" - -namespace ov { -namespace frontend { -namespace onnx { -namespace op { -namespace set_1 { -ov::OutputVector grid_sample(const ov::frontend::onnx::Node& node); - -} // namespace set_1 -} // namespace op -} // namespace onnx -} // namespace frontend -} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/group_normalization.cpp b/src/frontends/onnx/frontend/src/op/group_normalization.cpp index 5c5339748d4ce3..5c809da0669719 100644 --- a/src/frontends/onnx/frontend/src/op/group_normalization.cpp +++ b/src/frontends/onnx/frontend/src/op/group_normalization.cpp @@ -2,25 +2,24 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "op/group_normalization.hpp" +#include "openvino/op/group_normalization.hpp" +#include "core/operator_set.hpp" #include "openvino/op/broadcast.hpp" #include "openvino/op/constant.hpp" #include "openvino/op/divide.hpp" #include "openvino/op/gather.hpp" -#include "openvino/op/group_normalization.hpp" #include "openvino/op/reshape.hpp" #include "openvino/op/shape_of.hpp" #include "openvino/op/unsqueeze.hpp" - using namespace ov::op; using ov::Shape; namespace ov { namespace frontend { namespace onnx { -namespace op { -namespace set_1 { +namespace ai_onnx { +namespace opset_1 { ov::OutputVector group_normalization(const ov::frontend::onnx::Node& node) { const auto inputs = node.get_ov_inputs(); OPENVINO_ASSERT(inputs.size() == 3); @@ -52,8 +51,9 @@ ov::OutputVector group_normalization(const ov::frontend::onnx::Node& node) { return {std::make_shared(data, c_scale, c_bias, num_groups, eps)}; } -} // namespace set_1 -} // namespace op +ONNX_OP("GroupNormalization", OPSET_SINCE(1), ai_onnx::opset_1::group_normalization); +} // namespace opset_1 +} // namespace ai_onnx } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/group_normalization.hpp b/src/frontends/onnx/frontend/src/op/group_normalization.hpp deleted file mode 100644 index 495fd27aaaeb47..00000000000000 --- a/src/frontends/onnx/frontend/src/op/group_normalization.hpp +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright (C) 2018-2024 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "core/node.hpp" - -namespace ov { -namespace frontend { -namespace onnx { -namespace op { -namespace set_1 { -ov::OutputVector group_normalization(const ov::frontend::onnx::Node& node); - -} // namespace set_1 -} // namespace op -} // namespace onnx -} // namespace frontend -} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/gru.cpp b/src/frontends/onnx/frontend/src/op/gru.cpp index 1fa59be80b6d3f..11383962dbe0e1 100644 --- a/src/frontends/onnx/frontend/src/op/gru.cpp +++ b/src/frontends/onnx/frontend/src/op/gru.cpp @@ -2,9 +2,8 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "op/gru.hpp" - #include "core/null_node.hpp" +#include "core/operator_set.hpp" #include "openvino/op/add.hpp" #include "openvino/op/concat.hpp" #include "openvino/op/constant.hpp" @@ -12,15 +11,14 @@ #include "utils/recurrent.hpp" #include "utils/reshape.hpp" #include "utils/split.hpp" - using namespace ov::op; using ov::Shape; namespace ov { namespace frontend { namespace onnx { -namespace op { -namespace set_1 { +namespace ai_onnx { +namespace opset_1 { namespace { struct GRUInputMap : public recurrent::OpInputMap { GRUInputMap(const Node& node, std::size_t gates_count) : OpInputMap(node, gates_count) { @@ -102,9 +100,9 @@ ov::OutputVector gru(const ov::frontend::onnx::Node& node) { return {ov::op::util::reorder_axes(Y, {2, 1, 0, 3}), ov::op::util::reorder_axes(Y_h, {1, 0, 2})}; } - -} // namespace set_1 -} // namespace op +ONNX_OP("GRU", OPSET_SINCE(1), ai_onnx::opset_1::gru); +} // namespace opset_1 +} // namespace ai_onnx } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/gru.hpp b/src/frontends/onnx/frontend/src/op/gru.hpp deleted file mode 100644 index deff5921e333bf..00000000000000 --- a/src/frontends/onnx/frontend/src/op/gru.hpp +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright (C) 2018-2024 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "core/node.hpp" - -namespace ov { -namespace frontend { -namespace onnx { -namespace op { -namespace set_1 { -ov::OutputVector gru(const ov::frontend::onnx::Node& node); - -} // namespace set_1 -} // namespace op -} // namespace onnx -} // namespace frontend -} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/hammingwindow.cpp b/src/frontends/onnx/frontend/src/op/hammingwindow.cpp index 0ebc3f770c216e..337dc1528c7a17 100644 --- a/src/frontends/onnx/frontend/src/op/hammingwindow.cpp +++ b/src/frontends/onnx/frontend/src/op/hammingwindow.cpp @@ -3,10 +3,9 @@ #define _USE_MATH_DEFINES -#include "op/hammingwindow.hpp" - #include +#include "core/operator_set.hpp" #include "openvino/op/constant.hpp" #include "openvino/op/convert.hpp" #include "openvino/op/cos.hpp" @@ -15,14 +14,13 @@ #include "openvino/op/range.hpp" #include "openvino/op/subtract.hpp" #include "utils/common.hpp" - using namespace ov::op; namespace ov { namespace frontend { namespace onnx { -namespace op { -namespace set_1 { +namespace ai_onnx { +namespace opset_1 { ov::OutputVector hammingwindow(const ov::frontend::onnx::Node& node) { const auto size = node.get_ov_inputs().at(0); const auto output_datatype = common::get_ov_element_type(node.get_attribute_value("output_datatype", 1)); @@ -67,8 +65,9 @@ ov::OutputVector hammingwindow(const ov::frontend::onnx::Node& node) { return {std::make_shared(y_values, output_datatype)}; } } -} // namespace set_1 -} // namespace op +ONNX_OP("HammingWindow", OPSET_SINCE(1), ai_onnx::opset_1::hammingwindow); +} // namespace opset_1 +} // namespace ai_onnx } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/hammingwindow.hpp b/src/frontends/onnx/frontend/src/op/hammingwindow.hpp deleted file mode 100644 index 0003e0f442261d..00000000000000 --- a/src/frontends/onnx/frontend/src/op/hammingwindow.hpp +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright (C) 2018-2024 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 - -#pragma once - -#include "core/node.hpp" - -namespace ov { -namespace frontend { -namespace onnx { -namespace op { -namespace set_1 { - -ov::OutputVector hammingwindow(const ov::frontend::onnx::Node& node); - -} // namespace set_1 -} // namespace op -} // namespace onnx -} // namespace frontend -} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/hannwindow.cpp b/src/frontends/onnx/frontend/src/op/hannwindow.cpp index c5de03380a5751..2f72d2a70468c1 100644 --- a/src/frontends/onnx/frontend/src/op/hannwindow.cpp +++ b/src/frontends/onnx/frontend/src/op/hannwindow.cpp @@ -3,10 +3,9 @@ #define _USE_MATH_DEFINES -#include "op/hannwindow.hpp" - #include +#include "core/operator_set.hpp" #include "openvino/op/constant.hpp" #include "openvino/op/convert.hpp" #include "openvino/op/cos.hpp" @@ -15,14 +14,13 @@ #include "openvino/op/range.hpp" #include "openvino/op/subtract.hpp" #include "utils/common.hpp" - using namespace ov::op; namespace ov { namespace frontend { namespace onnx { -namespace op { -namespace set_1 { +namespace ai_onnx { +namespace opset_1 { ov::OutputVector hannwindow(const ov::frontend::onnx::Node& node) { const auto size = node.get_ov_inputs().at(0); const auto output_datatype = common::get_ov_element_type(node.get_attribute_value("output_datatype", 1)); @@ -63,8 +61,9 @@ ov::OutputVector hannwindow(const ov::frontend::onnx::Node& node) { return {std::make_shared(y_values, output_datatype)}; } } -} // namespace set_1 -} // namespace op +ONNX_OP("HannWindow", OPSET_SINCE(1), ai_onnx::opset_1::hannwindow); +} // namespace opset_1 +} // namespace ai_onnx } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/hannwindow.hpp b/src/frontends/onnx/frontend/src/op/hannwindow.hpp deleted file mode 100644 index 085c41e4001347..00000000000000 --- a/src/frontends/onnx/frontend/src/op/hannwindow.hpp +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright (C) 2018-2024 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 - -#pragma once - -#include "core/node.hpp" - -namespace ov { -namespace frontend { -namespace onnx { -namespace op { -namespace set_1 { - -ov::OutputVector hannwindow(const ov::frontend::onnx::Node& node); - -} // namespace set_1 -} // namespace op -} // namespace onnx -} // namespace frontend -} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/hard_sigmoid.cpp b/src/frontends/onnx/frontend/src/op/hard_sigmoid.cpp index a252ccafc811cd..c5323197323646 100644 --- a/src/frontends/onnx/frontend/src/op/hard_sigmoid.cpp +++ b/src/frontends/onnx/frontend/src/op/hard_sigmoid.cpp @@ -2,19 +2,18 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "op/hard_sigmoid.hpp" - -#include "openvino/op/constant.hpp" #include "openvino/op/hard_sigmoid.hpp" +#include "core/operator_set.hpp" +#include "openvino/op/constant.hpp" using namespace ov::op; using ov::Shape; namespace ov { namespace frontend { namespace onnx { -namespace op { -namespace set_1 { +namespace ai_onnx { +namespace opset_1 { ov::OutputVector hard_sigmoid(const ov::frontend::onnx::Node& node) { const auto data = node.get_ov_inputs().at(0); @@ -30,8 +29,9 @@ ov::OutputVector hard_sigmoid(const ov::frontend::onnx::Node& node) { return {std::make_shared(data, alpha, beta)}; } -} // namespace set_1 -} // namespace op +ONNX_OP("HardSigmoid", OPSET_SINCE(1), ai_onnx::opset_1::hard_sigmoid); +} // namespace opset_1 +} // namespace ai_onnx } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/hard_sigmoid.hpp b/src/frontends/onnx/frontend/src/op/hard_sigmoid.hpp deleted file mode 100644 index a6f23ecfead8bb..00000000000000 --- a/src/frontends/onnx/frontend/src/op/hard_sigmoid.hpp +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright (C) 2018-2024 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "core/node.hpp" - -namespace ov { -namespace frontend { -namespace onnx { -namespace op { -namespace set_1 { -ov::OutputVector hard_sigmoid(const ov::frontend::onnx::Node& node); - -} // namespace set_1 -} // namespace op -} // namespace onnx -} // namespace frontend -} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/hard_swish.hpp b/src/frontends/onnx/frontend/src/op/hard_swish.cpp similarity index 55% rename from src/frontends/onnx/frontend/src/op/hard_swish.hpp rename to src/frontends/onnx/frontend/src/op/hard_swish.cpp index 6316a15843d3d2..ed98f49c2b3b38 100644 --- a/src/frontends/onnx/frontend/src/op/hard_swish.hpp +++ b/src/frontends/onnx/frontend/src/op/hard_swish.cpp @@ -2,21 +2,20 @@ // SPDX-License-Identifier: Apache-2.0 // -#pragma once - -#include "core/node.hpp" +#include "core/operator_set.hpp" #include "openvino/op/hswish.hpp" namespace ov { namespace frontend { namespace onnx { -namespace op { -namespace set_1 { -inline ov::OutputVector hard_swish(const ov::frontend::onnx::Node& node) { +namespace ai_onnx { +namespace opset_1 { +ov::OutputVector hard_swish(const ov::frontend::onnx::Node& node) { return {std::make_shared(node.get_ov_inputs().at(0))}; } -} // namespace set_1 -} // namespace op +ONNX_OP("HardSwish", OPSET_SINCE(1), ai_onnx::opset_1::hard_swish); +} // namespace opset_1 +} // namespace ai_onnx } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/hardmax.cpp b/src/frontends/onnx/frontend/src/op/hardmax.cpp index 782ceae1c641a3..d0b93ab1ffbe6a 100644 --- a/src/frontends/onnx/frontend/src/op/hardmax.cpp +++ b/src/frontends/onnx/frontend/src/op/hardmax.cpp @@ -2,8 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "op/hardmax.hpp" - +#include "core/operator_set.hpp" #include "exceptions.hpp" #include "openvino/core/validation_util.hpp" #include "openvino/op/constant.hpp" @@ -15,15 +14,14 @@ #include "openvino/op/topk.hpp" #include "utils/common.hpp" #include "utils/reshape.hpp" - using namespace ov::op; using ov::Shape; namespace ov { namespace frontend { namespace onnx { -namespace op { -namespace set_1 { +namespace ai_onnx { +namespace opset_1 { ov::OutputVector hardmax(const ov::frontend::onnx::Node& node) { const auto input = node.get_ov_inputs().at(0); const auto& input_shape = input.get_partial_shape(); @@ -60,8 +58,9 @@ ov::OutputVector hardmax(const ov::frontend::onnx::Node& node) { return {std::make_shared(converted_results, output_shape, false)}; } -} // namespace set_1 -namespace set_13 { +ONNX_OP("Hardmax", OPSET_RANGE(1, 12), ai_onnx::opset_1::hardmax); +} // namespace opset_1 +namespace opset_13 { ov::OutputVector hardmax(const ov::frontend::onnx::Node& node) { const auto input = node.get_ov_inputs().at(0); const auto& input_shape = input.get_partial_shape(); @@ -92,8 +91,9 @@ ov::OutputVector hardmax(const ov::frontend::onnx::Node& node) { return {std::make_shared(converted_results, output_shape, false)}; } -} // namespace set_13 -} // namespace op +ONNX_OP("Hardmax", OPSET_SINCE(13), ai_onnx::opset_13::hardmax); +} // namespace opset_13 +} // namespace ai_onnx } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/hardmax.hpp b/src/frontends/onnx/frontend/src/op/hardmax.hpp deleted file mode 100644 index 569961a34184db..00000000000000 --- a/src/frontends/onnx/frontend/src/op/hardmax.hpp +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright (C) 2018-2024 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "core/node.hpp" - -namespace ov { -namespace frontend { -namespace onnx { -namespace op { -namespace set_1 { -ov::OutputVector hardmax(const ov::frontend::onnx::Node& node); -} // namespace set_1 - -namespace set_13 { -ov::OutputVector hardmax(const ov::frontend::onnx::Node& node); -} // namespace set_13 -} // namespace op -} // namespace onnx -} // namespace frontend -} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/identity.hpp b/src/frontends/onnx/frontend/src/op/identity.cpp similarity index 61% rename from src/frontends/onnx/frontend/src/op/identity.hpp rename to src/frontends/onnx/frontend/src/op/identity.cpp index 90bfe2ac6cd396..016ce7a33df29b 100644 --- a/src/frontends/onnx/frontend/src/op/identity.hpp +++ b/src/frontends/onnx/frontend/src/op/identity.cpp @@ -2,25 +2,24 @@ // SPDX-License-Identifier: Apache-2.0 // -#pragma once - -#include "core/node.hpp" +#include "core/operator_set.hpp" #include "utils/common.hpp" namespace ov { namespace frontend { namespace onnx { -namespace op { -namespace set_1 { -inline ov::OutputVector identity(const ov::frontend::onnx::Node& node) { +namespace ai_onnx { +namespace opset_1 { +ov::OutputVector identity(const ov::frontend::onnx::Node& node) { ov::OutputVector outputs = node.get_ov_inputs(); for (auto& out : outputs) { common::mark_as_optimized_out(out); } return outputs; } -} // namespace set_1 -} // namespace op +ONNX_OP("Identity", OPSET_SINCE(1), ai_onnx::opset_1::identity); +} // namespace opset_1 +} // namespace ai_onnx } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/if.cpp b/src/frontends/onnx/frontend/src/op/if.cpp index c84498495060ce..5a25523dc23db7 100644 --- a/src/frontends/onnx/frontend/src/op/if.cpp +++ b/src/frontends/onnx/frontend/src/op/if.cpp @@ -2,20 +2,19 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "op/if.hpp" +#include "openvino/op/if.hpp" #include "core/graph.hpp" +#include "core/operator_set.hpp" #include "openvino/core/model.hpp" #include "openvino/frontend/exception.hpp" -#include "openvino/op/if.hpp" - using namespace ov::op; namespace ov { namespace frontend { namespace onnx { -namespace op { -namespace set_1 { +namespace ai_onnx { +namespace opset_1 { ov::OutputVector if_op(const ov::frontend::onnx::Node& node) { const auto& ng_inputs = node.get_ov_inputs(); FRONT_END_GENERAL_CHECK(ng_inputs.size() == 1, "If operator takes only one input"); @@ -67,8 +66,9 @@ ov::OutputVector if_op(const ov::frontend::onnx::Node& node) { return if_node->outputs(); } -} // namespace set_1 -} // namespace op +ONNX_OP("If", OPSET_SINCE(1), ai_onnx::opset_1::if_op); +} // namespace opset_1 +} // namespace ai_onnx } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/if.hpp b/src/frontends/onnx/frontend/src/op/if.hpp deleted file mode 100644 index 65f1e2dbbc6180..00000000000000 --- a/src/frontends/onnx/frontend/src/op/if.hpp +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright (C) 2018-2024 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "core/node.hpp" - -namespace ov { -namespace frontend { -namespace onnx { -namespace op { -namespace set_1 { -/// \brief Convert ONNX If operation to an OV node. -/// -/// \param node The ONNX node object representing this operation. -/// -/// \return The vector containing OV nodes producing output of ONNX If -/// operation. -ov::OutputVector if_op(const ov::frontend::onnx::Node& node); - -} // namespace set_1 -} // namespace op -} // namespace onnx -} // namespace frontend -} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/image_scaler.cpp b/src/frontends/onnx/frontend/src/op/image_scaler.cpp index b4e181e2dea817..a82ac81fe81e22 100644 --- a/src/frontends/onnx/frontend/src/op/image_scaler.cpp +++ b/src/frontends/onnx/frontend/src/op/image_scaler.cpp @@ -2,20 +2,18 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "op/image_scaler.hpp" - +#include "core/operator_set.hpp" #include "openvino/frontend/exception.hpp" #include "openvino/op/add.hpp" #include "openvino/op/constant.hpp" #include "openvino/op/multiply.hpp" - using namespace ov::op; namespace ov { namespace frontend { namespace onnx { -namespace op { -namespace set_1 { +namespace ai_onnx { +namespace opset_1 { ov::OutputVector image_scaler(const ov::frontend::onnx::Node& node) { const auto inputs = node.get_ov_inputs(); FRONT_END_GENERAL_CHECK(inputs.size() == 1, "ImageScaler 1 input tensor. Got: ", inputs.size()); @@ -42,8 +40,9 @@ ov::OutputVector image_scaler(const ov::frontend::onnx::Node& node) { return {scaler}; } -} // namespace set_1 -} // namespace op +ONNX_OP("ImageScaler", OPSET_SINCE(1), ai_onnx::opset_1::image_scaler); +} // namespace opset_1 +} // namespace ai_onnx } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/image_scaler.hpp b/src/frontends/onnx/frontend/src/op/image_scaler.hpp deleted file mode 100644 index 6dac9390bd54ce..00000000000000 --- a/src/frontends/onnx/frontend/src/op/image_scaler.hpp +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright (C) 2018-2024 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "core/node.hpp" - -namespace ov { -namespace frontend { -namespace onnx { -namespace op { -namespace set_1 { -ov::OutputVector image_scaler(const ov::frontend::onnx::Node& node); -} -} // namespace op -} // namespace onnx -} // namespace frontend -} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/instance_norm.cpp b/src/frontends/onnx/frontend/src/op/instance_norm.cpp index 51b3cac2abe923..8d3bc5f1f1c7a3 100644 --- a/src/frontends/onnx/frontend/src/op/instance_norm.cpp +++ b/src/frontends/onnx/frontend/src/op/instance_norm.cpp @@ -2,8 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "op/instance_norm.hpp" - +#include "core/operator_set.hpp" #include "exceptions.hpp" #include "openvino/op/add.hpp" #include "openvino/op/multiply.hpp" @@ -11,14 +10,13 @@ #include "openvino/op/shape_of.hpp" #include "utils/common.hpp" #include "utils/reshape.hpp" - using namespace ov::op; namespace ov { namespace frontend { namespace onnx { -namespace op { -namespace set_1 { +namespace ai_onnx { +namespace opset_1 { ov::OutputVector instance_norm(const ov::frontend::onnx::Node& node) { ov::Output data(node.get_ov_inputs().at(0)); ov::Output scale(node.get_ov_inputs().at(1)); @@ -77,8 +75,9 @@ ov::OutputVector instance_norm(const ov::frontend::onnx::Node& node) { return {result}; } -} // namespace set_1 -} // namespace op +ONNX_OP("InstanceNormalization", OPSET_SINCE(1), ai_onnx::opset_1::instance_norm); +} // namespace opset_1 +} // namespace ai_onnx } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/instance_norm.hpp b/src/frontends/onnx/frontend/src/op/instance_norm.hpp deleted file mode 100644 index 26b4f83691f02d..00000000000000 --- a/src/frontends/onnx/frontend/src/op/instance_norm.hpp +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright (C) 2018-2024 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "core/node.hpp" - -namespace ov { -namespace frontend { -namespace onnx { -namespace op { -namespace set_1 { -/// \brief Creates OV node representing ONNX InstanceNormalization -/// operator. -/// -/// \note The resulting node represents following equation: -/// y = scale * (x - mean) / sqrt(variance + epsilon) + B -/// where mean and variance are computed per instance per channel. -/// -/// \param[in] node The input ONNX node representing this operation. -/// -/// \return Vector of nodes containting resulting OV nodes. -/// -ov::OutputVector instance_norm(const ov::frontend::onnx::Node& node); -} // namespace set_1 -} // namespace op -} // namespace onnx -} // namespace frontend -} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/is_finite.cpp b/src/frontends/onnx/frontend/src/op/is_finite.cpp index d7aa90d26cb3f6..b6f628f43137f3 100644 --- a/src/frontends/onnx/frontend/src/op/is_finite.cpp +++ b/src/frontends/onnx/frontend/src/op/is_finite.cpp @@ -2,25 +2,24 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "op/is_finite.hpp" - +#include "core/operator_set.hpp" #include "openvino/opsets/opset10.hpp" - using namespace ov::op; namespace ov { namespace frontend { namespace onnx { -namespace op { -namespace set_1 { +namespace ai_onnx { +namespace opset_1 { ov::OutputVector is_finite(const ov::frontend::onnx::Node& node) { const auto data = node.get_ov_inputs().at(0); return {std::make_shared(data)}; } -} // namespace set_1 -} // namespace op +ONNX_OP("IsFinite", OPSET_SINCE(1), ai_onnx::opset_1::is_finite); +} // namespace opset_1 +} // namespace ai_onnx } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/is_finite.hpp b/src/frontends/onnx/frontend/src/op/is_finite.hpp deleted file mode 100644 index abfa8d24886198..00000000000000 --- a/src/frontends/onnx/frontend/src/op/is_finite.hpp +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright (C) 2018-2024 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "core/node.hpp" - -namespace ov { -namespace frontend { -namespace onnx { -namespace op { -namespace set_1 { - -ov::OutputVector is_finite(const ov::frontend::onnx::Node& node); - -} // namespace set_1 -} // namespace op -} // namespace onnx -} // namespace frontend -} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/is_inf.cpp b/src/frontends/onnx/frontend/src/op/is_inf.cpp index e9794dac378b05..b321ecee84fbe4 100644 --- a/src/frontends/onnx/frontend/src/op/is_inf.cpp +++ b/src/frontends/onnx/frontend/src/op/is_inf.cpp @@ -2,17 +2,15 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "op/is_inf.hpp" - +#include "core/operator_set.hpp" #include "openvino/opsets/opset10.hpp" - using namespace ov::op; namespace ov { namespace frontend { namespace onnx { -namespace op { -namespace set_1 { +namespace ai_onnx { +namespace opset_1 { ov::OutputVector is_inf(const ov::frontend::onnx::Node& node) { const auto data = node.get_ov_inputs().at(0); @@ -22,8 +20,9 @@ ov::OutputVector is_inf(const ov::frontend::onnx::Node& node) { return {std::make_shared(data, attributes)}; } -} // namespace set_1 -} // namespace op +ONNX_OP("IsInf", OPSET_SINCE(1), ai_onnx::opset_1::is_inf); +} // namespace opset_1 +} // namespace ai_onnx } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/is_inf.hpp b/src/frontends/onnx/frontend/src/op/is_inf.hpp deleted file mode 100644 index 44398bb7c74ca3..00000000000000 --- a/src/frontends/onnx/frontend/src/op/is_inf.hpp +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright (C) 2018-2024 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "core/node.hpp" - -namespace ov { -namespace frontend { -namespace onnx { -namespace op { -namespace set_1 { -ov::OutputVector is_inf(const ov::frontend::onnx::Node& node); - -} // namespace set_1 -} // namespace op -} // namespace onnx -} // namespace frontend -} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/is_nan.cpp b/src/frontends/onnx/frontend/src/op/is_nan.cpp index 7c3f678b75e312..7dd957eed7c4ba 100644 --- a/src/frontends/onnx/frontend/src/op/is_nan.cpp +++ b/src/frontends/onnx/frontend/src/op/is_nan.cpp @@ -2,25 +2,25 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "op/is_nan.hpp" - #include "openvino/op/is_nan.hpp" +#include "core/operator_set.hpp" using namespace ov::op; namespace ov { namespace frontend { namespace onnx { -namespace op { -namespace set_1 { +namespace ai_onnx { +namespace opset_1 { ov::OutputVector is_nan(const ov::frontend::onnx::Node& node) { const auto data = node.get_ov_inputs().at(0); return {std::make_shared(data)}; } -} // namespace set_1 -} // namespace op +ONNX_OP("IsNaN", OPSET_SINCE(1), ai_onnx::opset_1::is_nan); +} // namespace opset_1 +} // namespace ai_onnx } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/is_nan.hpp b/src/frontends/onnx/frontend/src/op/is_nan.hpp deleted file mode 100644 index 863120fa08db04..00000000000000 --- a/src/frontends/onnx/frontend/src/op/is_nan.hpp +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright (C) 2018-2024 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "core/node.hpp" - -namespace ov { -namespace frontend { -namespace onnx { -namespace op { -namespace set_1 { - -ov::OutputVector is_nan(const ov::frontend::onnx::Node& node); - -} // namespace set_1 -} // namespace op -} // namespace onnx -} // namespace frontend -} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/layer_normalization.cpp b/src/frontends/onnx/frontend/src/op/layer_normalization.cpp index 0dd172401b8d42..5c382c694e7910 100644 --- a/src/frontends/onnx/frontend/src/op/layer_normalization.cpp +++ b/src/frontends/onnx/frontend/src/op/layer_normalization.cpp @@ -2,8 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "op/layer_normalization.hpp" - +#include "core/operator_set.hpp" #include "exceptions.hpp" #include "openvino/op/add.hpp" #include "openvino/op/constant.hpp" @@ -15,22 +14,21 @@ #include "openvino/op/shape_of.hpp" #include "openvino/op/squeeze.hpp" #include "utils/common.hpp" - using namespace ov::op; using namespace ov::op::v0; using namespace ov::op::v1; using ::ONNX_NAMESPACE::TensorProto_DataType; using ov::Shape; -inline ov::Output rank(const ov::Output& source) { +ov::Output rank(const ov::Output& source) { return std::make_shared(std::make_shared(std::make_shared(source))); } namespace ov { namespace frontend { namespace onnx { -namespace op { -namespace set_1 { +namespace ai_onnx { +namespace opset_1 { ov::OutputVector layer_normalization(const ov::frontend::onnx::Node& node) { // Operator definition: https://github.com/onnx/onnx/blob/main/onnx/defs/nn/defs.cc#L2562:L2611 @@ -83,8 +81,9 @@ ov::OutputVector layer_normalization(const ov::frontend::onnx::Node& node) { return ov::OutputVector{biased}; } -} // namespace set_1 -} // namespace op +ONNX_OP("LayerNormalization", OPSET_SINCE(1), ai_onnx::opset_1::layer_normalization); +} // namespace opset_1 +} // namespace ai_onnx } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/layer_normalization.hpp b/src/frontends/onnx/frontend/src/op/layer_normalization.hpp deleted file mode 100644 index b0b79213be4cd7..00000000000000 --- a/src/frontends/onnx/frontend/src/op/layer_normalization.hpp +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright (C) 2018-2024 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// -#pragma once - -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - -#include "core/node.hpp" - -namespace ov { -namespace frontend { -namespace onnx { -namespace op { -namespace set_1 { -ov::OutputVector layer_normalization(const ov::frontend::onnx::Node& node); -} // namespace set_1 -} // namespace op -} // namespace onnx -} // namespace frontend -} // namespace ov -OPENVINO_SUPPRESS_DEPRECATED_END diff --git a/src/frontends/onnx/frontend/src/op/leaky_relu.cpp b/src/frontends/onnx/frontend/src/op/leaky_relu.cpp index 15b108bf5730d7..2ad90e121cf8be 100644 --- a/src/frontends/onnx/frontend/src/op/leaky_relu.cpp +++ b/src/frontends/onnx/frontend/src/op/leaky_relu.cpp @@ -2,20 +2,18 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "op/leaky_relu.hpp" - +#include "core/operator_set.hpp" #include "exceptions.hpp" #include "openvino/op/constant.hpp" #include "openvino/op/prelu.hpp" - using namespace ov::op; using ov::Shape; namespace ov { namespace frontend { namespace onnx { -namespace op { -namespace set_1 { +namespace ai_onnx { +namespace opset_1 { ov::OutputVector leaky_relu(const ov::frontend::onnx::Node& node) { auto data = node.get_ov_inputs().at(0); double alpha = node.get_attribute_value("alpha", 0.01); @@ -24,8 +22,9 @@ ov::OutputVector leaky_relu(const ov::frontend::onnx::Node& node) { return {std::make_shared(data, alpha_node)}; } -} // namespace set_1 -} // namespace op +ONNX_OP("LeakyRelu", OPSET_SINCE(1), ai_onnx::opset_1::leaky_relu); +} // namespace opset_1 +} // namespace ai_onnx } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/leaky_relu.hpp b/src/frontends/onnx/frontend/src/op/leaky_relu.hpp deleted file mode 100644 index a75c552e9ed8a4..00000000000000 --- a/src/frontends/onnx/frontend/src/op/leaky_relu.hpp +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright (C) 2018-2024 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "core/node.hpp" - -namespace ov { -namespace frontend { -namespace onnx { -namespace op { -namespace set_1 { -ov::OutputVector leaky_relu(const ov::frontend::onnx::Node& node); - -} // namespace set_1 -} // namespace op -} // namespace onnx -} // namespace frontend -} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/less.hpp b/src/frontends/onnx/frontend/src/op/less.cpp similarity index 59% rename from src/frontends/onnx/frontend/src/op/less.hpp rename to src/frontends/onnx/frontend/src/op/less.cpp index 6f497dba7b2b71..35e275ac1b98a5 100644 --- a/src/frontends/onnx/frontend/src/op/less.hpp +++ b/src/frontends/onnx/frontend/src/op/less.cpp @@ -2,22 +2,22 @@ // SPDX-License-Identifier: Apache-2.0 // -#pragma once - -#include "core/node.hpp" #include "openvino/op/less.hpp" +#include "core/operator_set.hpp" + namespace ov { namespace frontend { namespace onnx { -namespace op { -namespace set_1 { -inline ov::OutputVector less(const ov::frontend::onnx::Node& node) { +namespace ai_onnx { +namespace opset_1 { +ov::OutputVector less(const ov::frontend::onnx::Node& node) { return {std::make_shared(node.get_ov_inputs().at(0), node.get_ov_inputs().at(1))}; } -} // namespace set_1 -} // namespace op +ONNX_OP("Less", OPSET_SINCE(1), ai_onnx::opset_1::less); +} // namespace opset_1 +} // namespace ai_onnx } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/less_or_equal.cpp b/src/frontends/onnx/frontend/src/op/less_or_equal.cpp index b96df52b19bb9f..55824339622dc7 100644 --- a/src/frontends/onnx/frontend/src/op/less_or_equal.cpp +++ b/src/frontends/onnx/frontend/src/op/less_or_equal.cpp @@ -2,19 +2,17 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "op/less_or_equal.hpp" - +#include "core/operator_set.hpp" #include "openvino/frontend/exception.hpp" #include "openvino/op/less.hpp" #include "openvino/op/less_eq.hpp" - using namespace ov::op; namespace ov { namespace frontend { namespace onnx { -namespace op { -namespace set_1 { +namespace ai_onnx { +namespace opset_1 { ov::OutputVector less_or_equal(const ov::frontend::onnx::Node& node) { const auto& input = node.get_ov_inputs(); const auto a = input.at(0); @@ -23,17 +21,19 @@ ov::OutputVector less_or_equal(const ov::frontend::onnx::Node& node) { "The input data bfloat16 isn't supported in opset 12"); return {std::make_shared(a, b)}; } -} // namespace set_1 +ONNX_OP("LessOrEqual", OPSET_RANGE(1, 15), ai_onnx::opset_1::less_or_equal); +} // namespace opset_1 -namespace set_16 { +namespace opset_16 { ov::OutputVector less_or_equal(const ov::frontend::onnx::Node& node) { const auto& input = node.get_ov_inputs(); const auto a = input.at(0); const auto b = input.at(1); return {std::make_shared(a, b)}; } -} // namespace set_16 -} // namespace op +ONNX_OP("LessOrEqual", OPSET_SINCE(16), ai_onnx::opset_16::less_or_equal); +} // namespace opset_16 +} // namespace ai_onnx } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/less_or_equal.hpp b/src/frontends/onnx/frontend/src/op/less_or_equal.hpp deleted file mode 100644 index 9db5cad7ffdf23..00000000000000 --- a/src/frontends/onnx/frontend/src/op/less_or_equal.hpp +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (C) 2018-2024 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "core/node.hpp" - -namespace ov { -namespace frontend { -namespace onnx { -namespace op { -namespace set_1 { - -ov::OutputVector less_or_equal(const ov::frontend::onnx::Node& node); - -} // namespace set_1 - -namespace set_16 { - -ov::OutputVector less_or_equal(const ov::frontend::onnx::Node& node); - -} // namespace set_16 -} // namespace op -} // namespace onnx -} // namespace frontend -} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/log.cpp b/src/frontends/onnx/frontend/src/op/log.cpp index 85fa815c479dbe..8b6bf74375375f 100644 --- a/src/frontends/onnx/frontend/src/op/log.cpp +++ b/src/frontends/onnx/frontend/src/op/log.cpp @@ -2,23 +2,23 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "op/log.hpp" - #include "openvino/op/log.hpp" +#include "core/operator_set.hpp" using namespace ov::op; namespace ov { namespace frontend { namespace onnx { -namespace op { -namespace set_1 { +namespace ai_onnx { +namespace opset_1 { ov::OutputVector log(const ov::frontend::onnx::Node& node) { return {std::make_shared(node.get_ov_inputs().at(0))}; } -} // namespace set_1 -} // namespace op +ONNX_OP("Log", OPSET_SINCE(1), ai_onnx::opset_1::log); +} // namespace opset_1 +} // namespace ai_onnx } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/log.hpp b/src/frontends/onnx/frontend/src/op/log.hpp deleted file mode 100644 index 83d52aa458fe5d..00000000000000 --- a/src/frontends/onnx/frontend/src/op/log.hpp +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright (C) 2018-2024 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "core/node.hpp" - -namespace ov { -namespace frontend { -namespace onnx { -namespace op { -namespace set_1 { -ov::OutputVector log(const ov::frontend::onnx::Node& node); -} -} // namespace op -} // namespace onnx -} // namespace frontend -} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/log_softmax.cpp b/src/frontends/onnx/frontend/src/op/log_softmax.cpp index 4559406ad801cb..29bf1a85d9c14e 100644 --- a/src/frontends/onnx/frontend/src/op/log_softmax.cpp +++ b/src/frontends/onnx/frontend/src/op/log_softmax.cpp @@ -2,17 +2,16 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "op/log_softmax.hpp" +#include "openvino/op/log_softmax.hpp" +#include "core/operator_set.hpp" #include "openvino/core/validation_util.hpp" #include "openvino/frontend/exception.hpp" #include "openvino/op/constant.hpp" #include "openvino/op/log.hpp" -#include "openvino/op/log_softmax.hpp" #include "openvino/op/reshape.hpp" #include "openvino/op/shape_of.hpp" #include "utils/reshape.hpp" - using namespace ov::op; using ov::Shape; @@ -60,20 +59,22 @@ ov::OutputVector log_softmax(const ov::frontend::onnx::Node& node, const int64_t } } // namespace -namespace op { -namespace set_1 { +namespace ai_onnx { +namespace opset_1 { ov::OutputVector log_softmax(const ov::frontend::onnx::Node& node) { return ov::frontend::onnx::log_softmax(node, 1); } -} // namespace set_1 +ONNX_OP("LogSoftmax", OPSET_RANGE(1, 12), ai_onnx::opset_1::log_softmax); +} // namespace opset_1 -namespace set_13 { +namespace opset_13 { ov::OutputVector log_softmax(const ov::frontend::onnx::Node& node) { const auto axis = node.get_attribute_value("axis", -1); return {std::make_shared(node.get_ov_inputs()[0], axis)}; } -} // namespace set_13 -} // namespace op +ONNX_OP("LogSoftmax", OPSET_SINCE(13), ai_onnx::opset_13::log_softmax); +} // namespace opset_13 +} // namespace ai_onnx } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/log_softmax.hpp b/src/frontends/onnx/frontend/src/op/log_softmax.hpp deleted file mode 100644 index e756c40a049102..00000000000000 --- a/src/frontends/onnx/frontend/src/op/log_softmax.hpp +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright (C) 2018-2024 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "core/node.hpp" - -namespace ov { -namespace frontend { -namespace onnx { -namespace op { -namespace set_1 { -ov::OutputVector log_softmax(const ov::frontend::onnx::Node& node); - -} // namespace set_1 - -namespace set_13 { -ov::OutputVector log_softmax(const ov::frontend::onnx::Node& node); - -} // namespace set_13 -} // namespace op -} // namespace onnx -} // namespace frontend -} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/loop.cpp b/src/frontends/onnx/frontend/src/op/loop.cpp index d997f3b3f81270..c9a97753084b0f 100644 --- a/src/frontends/onnx/frontend/src/op/loop.cpp +++ b/src/frontends/onnx/frontend/src/op/loop.cpp @@ -2,25 +2,24 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "op/loop.hpp" +#include "openvino/op/loop.hpp" #include "core/graph.hpp" #include "core/null_node.hpp" +#include "core/operator_set.hpp" #include "exceptions.hpp" #include "openvino/core/model.hpp" #include "openvino/op/constant.hpp" -#include "openvino/op/loop.hpp" #include "openvino/op/unsqueeze.hpp" #include "openvino/op/util/op_types.hpp" #include "utils/reshape.hpp" - using namespace ov::op; namespace ov { namespace frontend { namespace onnx { -namespace op { -namespace set_1 { +namespace ai_onnx { +namespace opset_1 { namespace { /// \brief Check if termination condition is true during all Loop /// iterations. @@ -173,8 +172,9 @@ ov::OutputVector loop(const ov::frontend::onnx::Node& node) { } return node_outputs; } -} // namespace set_1 -} // namespace op +ONNX_OP("Loop", OPSET_SINCE(1), ai_onnx::opset_1::loop); +} // namespace opset_1 +} // namespace ai_onnx } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/loop.hpp b/src/frontends/onnx/frontend/src/op/loop.hpp deleted file mode 100644 index cec8355532e52e..00000000000000 --- a/src/frontends/onnx/frontend/src/op/loop.hpp +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright (C) 2018-2024 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "core/node.hpp" - -namespace ov { -namespace frontend { -namespace onnx { -namespace op { -namespace set_1 { -/// \brief Creates OV node representing ONNX loop operator. -/// -/// \note Details available here: -/// https://github.com/onnx/onnx/blob/master/docs/Operators.md#Loop -/// -/// \param[in] node The input ONNX node representing this operation. -/// -/// \return Vector of nodes containting resulting OV nodes. -/// -ov::OutputVector loop(const ov::frontend::onnx::Node& node); -} // namespace set_1 -} // namespace op -} // namespace onnx -} // namespace frontend -} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/lp_norm.cpp b/src/frontends/onnx/frontend/src/op/lp_norm.cpp index c030a8216b3caa..555ec71daad759 100644 --- a/src/frontends/onnx/frontend/src/op/lp_norm.cpp +++ b/src/frontends/onnx/frontend/src/op/lp_norm.cpp @@ -2,21 +2,19 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "op/lp_norm.hpp" - +#include "core/operator_set.hpp" #include "exceptions.hpp" #include "openvino/core/validation_util.hpp" #include "openvino/op/constant.hpp" #include "openvino/op/divide.hpp" #include "utils/norm.hpp" - using namespace ov::op; namespace ov { namespace frontend { namespace onnx { -namespace op { -namespace set_1 { +namespace ai_onnx { +namespace opset_1 { ov::OutputVector lp_norm(const ov::frontend::onnx::Node& node) { const ov::Output data{node.get_ov_inputs().at(0)}; const auto data_shape = data.get_partial_shape(); @@ -38,8 +36,9 @@ ov::OutputVector lp_norm(const ov::frontend::onnx::Node& node) { return {std::make_shared(data, norm)}; } -} // namespace set_1 -} // namespace op +ONNX_OP("LpNormalization", OPSET_SINCE(1), ai_onnx::opset_1::lp_norm); +} // namespace opset_1 +} // namespace ai_onnx } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/lp_norm.hpp b/src/frontends/onnx/frontend/src/op/lp_norm.hpp deleted file mode 100644 index 05628d1d15f1e7..00000000000000 --- a/src/frontends/onnx/frontend/src/op/lp_norm.hpp +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright (C) 2018-2024 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "core/node.hpp" - -namespace ov { -namespace frontend { -namespace onnx { -namespace op { -namespace set_1 { -/// \brief Creates OV node representing ONNX LpNormalization operator. -/// -/// Suppose A contains spatial dimensions of input tensor, then -/// for matrix A we have p-norm defined as following double sum over -/// all elements: -/// ||A||_p = ||vec(A)||_p = -/// [sum_{i=1}^m sum_{j=1}^n abs(a_{i,j})^p]^{1/p} -/// -/// \param[in] node The input ONNX node representing this operation. -/// -/// \return Vector of nodes containting resulting OV nodes. -/// -ov::OutputVector lp_norm(const ov::frontend::onnx::Node& node); -} // namespace set_1 -} // namespace op -} // namespace onnx -} // namespace frontend -} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/lp_pool.cpp b/src/frontends/onnx/frontend/src/op/lp_pool.cpp index 715d1f49d929ef..21dd34ea8f013a 100644 --- a/src/frontends/onnx/frontend/src/op/lp_pool.cpp +++ b/src/frontends/onnx/frontend/src/op/lp_pool.cpp @@ -2,8 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "op/lp_pool.hpp" - +#include "core/operator_set.hpp" #include "exceptions.hpp" #include "openvino/frontend/exception.hpp" #include "openvino/op/concat.hpp" @@ -12,15 +11,14 @@ #include "utils/common.hpp" #include "utils/norm.hpp" #include "utils/split.hpp" - using namespace ov::op; using ov::Shape; namespace ov { namespace frontend { namespace onnx { -namespace op { -namespace set_1 { +namespace ai_onnx { +namespace opset_1 { ov::OutputVector global_lp_pool(const ov::frontend::onnx::Node& node) { const ov::Output data{node.get_ov_inputs().at(0)}; const std::size_t channel_axis{1}; @@ -57,8 +55,9 @@ ov::OutputVector global_lp_pool(const ov::frontend::onnx::Node& node) { return {std::make_shared(slices, channel_axis)}; } -} // namespace set_1 -} // namespace op +ONNX_OP("GlobalLpPool", OPSET_SINCE(1), ai_onnx::opset_1::global_lp_pool); +} // namespace opset_1 +} // namespace ai_onnx } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/lp_pool.hpp b/src/frontends/onnx/frontend/src/op/lp_pool.hpp deleted file mode 100644 index db9c7894ca927b..00000000000000 --- a/src/frontends/onnx/frontend/src/op/lp_pool.hpp +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright (C) 2018-2024 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "core/node.hpp" - -namespace ov { -namespace frontend { -namespace onnx { -namespace op { -namespace set_1 { -/// \brief Creates OV node representing ONNX GlobalLpPool operator. -/// -/// \note This functions calculates "entrywise" norms in spatial/feature -/// dimensions. That is it treats matrix/tensor in spatial/feature -/// dimensions as a vector and applies apropriate norm on it. The -/// result is a scalar. -/// -/// Suppose A contains spatial dimensions of input tensor, then -/// for matrix A we have p-norm defined as following double sum over -/// all elements: -/// ||A||_p = ||vec(A)||_p = -/// [sum_{i=1}^m sum_{j=1}^n abs(a_{i,j})^p]^{1/p} -/// -/// \param[in] node The input ONNX node representing this operation. -/// -/// \return Vector of nodes containting resulting OV nodes. -/// -ov::OutputVector global_lp_pool(const ov::frontend::onnx::Node& node); -} // namespace set_1 -} // namespace op -} // namespace onnx -} // namespace frontend -} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/lrn.cpp b/src/frontends/onnx/frontend/src/op/lrn.cpp index 77405c634f4c0f..698b21e220e8ce 100644 --- a/src/frontends/onnx/frontend/src/op/lrn.cpp +++ b/src/frontends/onnx/frontend/src/op/lrn.cpp @@ -2,17 +2,16 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "op/lrn.hpp" - #include "openvino/op/lrn.hpp" +#include "core/operator_set.hpp" using namespace ov::op; namespace ov { namespace frontend { namespace onnx { -namespace op { -namespace set_1 { +namespace ai_onnx { +namespace opset_1 { ov::OutputVector lrn(const ov::frontend::onnx::Node& node) { auto data = node.get_ov_inputs().at(0); double alpha = node.get_attribute_value("alpha", 1e-4); @@ -23,8 +22,9 @@ ov::OutputVector lrn(const ov::frontend::onnx::Node& node) { return {std::make_shared(data, alpha, beta, bias, size)}; } -} // namespace set_1 -} // namespace op +ONNX_OP("LRN", OPSET_SINCE(1), ai_onnx::opset_1::lrn); +} // namespace opset_1 +} // namespace ai_onnx } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/lrn.hpp b/src/frontends/onnx/frontend/src/op/lrn.hpp deleted file mode 100644 index dc261fa13c864d..00000000000000 --- a/src/frontends/onnx/frontend/src/op/lrn.hpp +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright (C) 2018-2024 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "core/node.hpp" - -namespace ov { -namespace frontend { -namespace onnx { -namespace op { -namespace set_1 { -ov::OutputVector lrn(const ov::frontend::onnx::Node& node); -} // namespace set_1 -} // namespace op -} // namespace onnx -} // namespace frontend -} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/lstm.cpp b/src/frontends/onnx/frontend/src/op/lstm.cpp index 907bb881ca0067..63f68b3cd3072c 100644 --- a/src/frontends/onnx/frontend/src/op/lstm.cpp +++ b/src/frontends/onnx/frontend/src/op/lstm.cpp @@ -2,9 +2,8 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "op/lstm.hpp" - #include "core/null_node.hpp" +#include "core/operator_set.hpp" #include "exceptions.hpp" #include "openvino/op/add.hpp" #include "openvino/op/broadcast.hpp" @@ -17,14 +16,13 @@ #include "openvino/util/common_util.hpp" #include "utils/reshape.hpp" #include "utils/split.hpp" - using namespace ov::op; using ov::Shape; namespace ov { namespace frontend { namespace onnx { -namespace op { +namespace ai_onnx { namespace { // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ INPUT NODES PARSING ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -207,7 +205,7 @@ struct LSTMAttributes { } // anonymous namespace -namespace set_1 { +namespace opset_1 { ov::OutputVector lstm(const ov::frontend::onnx::Node& node) { LSTMNgInputMap input_map{node}; LSTMAttributes attributes{node}; @@ -255,8 +253,9 @@ ov::OutputVector lstm(const ov::frontend::onnx::Node& node) { ov::op::util::reorder_axes(Y_h, {1, 0, 2}), ov::op::util::reorder_axes(Y_c, {1, 0, 2})}; } -} // namespace set_1 -} // namespace op +ONNX_OP("LSTM", OPSET_SINCE(1), ai_onnx::opset_1::lstm); +} // namespace opset_1 +} // namespace ai_onnx } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/lstm.hpp b/src/frontends/onnx/frontend/src/op/lstm.hpp deleted file mode 100644 index d2c7a294db73f6..00000000000000 --- a/src/frontends/onnx/frontend/src/op/lstm.hpp +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright (C) 2018-2024 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "core/node.hpp" - -namespace ov { -namespace frontend { -namespace onnx { -namespace op { -namespace set_1 { -ov::OutputVector lstm(const ov::frontend::onnx::Node& node); - -} // namespace set_1 -} // namespace op -} // namespace onnx -} // namespace frontend -} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/matmul.hpp b/src/frontends/onnx/frontend/src/op/matmul.cpp similarity index 57% rename from src/frontends/onnx/frontend/src/op/matmul.hpp rename to src/frontends/onnx/frontend/src/op/matmul.cpp index a57c2d6c8d032b..d1f92018f393a4 100644 --- a/src/frontends/onnx/frontend/src/op/matmul.hpp +++ b/src/frontends/onnx/frontend/src/op/matmul.cpp @@ -2,26 +2,26 @@ // SPDX-License-Identifier: Apache-2.0 // -#pragma once - -#include "core/node.hpp" #include "openvino/op/matmul.hpp" +#include "core/operator_set.hpp" + namespace ov { namespace frontend { namespace onnx { -namespace op { +namespace ai_onnx { namespace detail { -inline ov::OutputVector matmul(const ov::Output& a, const ov::Output& b) { +ov::OutputVector matmul(const ov::Output& a, const ov::Output& b) { return {std::make_shared(a, b)}; } } // namespace detail -namespace set_1 { -inline ov::OutputVector matmul(const ov::frontend::onnx::Node& node) { +namespace opset_1 { +ov::OutputVector matmul(const ov::frontend::onnx::Node& node) { return {std::make_shared(node.get_ov_inputs().at(0), node.get_ov_inputs().at(1))}; } -} // namespace set_1 -} // namespace op +ONNX_OP("MatMul", OPSET_SINCE(1), ai_onnx::opset_1::matmul); +} // namespace opset_1 +} // namespace ai_onnx } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/matmul_integer.cpp b/src/frontends/onnx/frontend/src/op/matmul_integer.cpp index 0968342c416637..383de1ab577c02 100644 --- a/src/frontends/onnx/frontend/src/op/matmul_integer.cpp +++ b/src/frontends/onnx/frontend/src/op/matmul_integer.cpp @@ -2,21 +2,19 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "op/matmul_integer.hpp" - +#include "core/operator_set.hpp" #include "openvino/op/constant.hpp" #include "openvino/op/convert.hpp" #include "openvino/op/matmul.hpp" #include "openvino/op/subtract.hpp" #include "openvino/op/unsqueeze.hpp" - using namespace ov::op; namespace ov { namespace frontend { namespace onnx { -namespace op { -namespace set_1 { +namespace ai_onnx { +namespace opset_1 { ov::OutputVector matmul_integer(const ov::frontend::onnx::Node& node) { const ov::OutputVector& inputs = node.get_ov_inputs(); @@ -49,8 +47,9 @@ ov::OutputVector matmul_integer(const ov::frontend::onnx::Node& node) { return {result}; } -} // namespace set_1 -} // namespace op +ONNX_OP("MatMulInteger", OPSET_SINCE(1), ai_onnx::opset_1::matmul_integer); +} // namespace opset_1 +} // namespace ai_onnx } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/matmul_integer.hpp b/src/frontends/onnx/frontend/src/op/matmul_integer.hpp deleted file mode 100644 index 96d576f7be7813..00000000000000 --- a/src/frontends/onnx/frontend/src/op/matmul_integer.hpp +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright (C) 2018-2024 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "core/node.hpp" - -namespace ov { -namespace frontend { -namespace onnx { -namespace op { -namespace set_1 { -/// \brief Performs ONNX MatMulInteger operation. -/// -/// \param node The ONNX node object representing this operation. -/// -/// \return The vector containing OV nodes producing output of ONNX quantizied -/// matrix multiplication integer operation. -ov::OutputVector matmul_integer(const ov::frontend::onnx::Node& node); -} // namespace set_1 -} // namespace op -} // namespace onnx -} // namespace frontend -} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/max.hpp b/src/frontends/onnx/frontend/src/op/max.cpp similarity index 52% rename from src/frontends/onnx/frontend/src/op/max.hpp rename to src/frontends/onnx/frontend/src/op/max.cpp index 52055271b419d5..131e47910b99f2 100644 --- a/src/frontends/onnx/frontend/src/op/max.hpp +++ b/src/frontends/onnx/frontend/src/op/max.cpp @@ -2,30 +2,30 @@ // SPDX-License-Identifier: Apache-2.0 // -#pragma once - -#include "core/node.hpp" +#include "core/operator_set.hpp" #include "openvino/op/maximum.hpp" #include "utils/variadic.hpp" namespace ov { namespace frontend { namespace onnx { -namespace op { -namespace set_1 { -inline ov::OutputVector max(const ov::frontend::onnx::Node& node) { +namespace ai_onnx { +namespace opset_1 { +ov::OutputVector max(const ov::frontend::onnx::Node& node) { return variadic::make_ng_variadic_op(node, ov::op::AutoBroadcastType::NONE); } -} // namespace set_1 +ONNX_OP("Max", OPSET_RANGE(1, 7), ai_onnx::opset_1::max); +} // namespace opset_1 -namespace set_8 { -inline ov::OutputVector max(const ov::frontend::onnx::Node& node) { +namespace opset_8 { +ov::OutputVector max(const ov::frontend::onnx::Node& node) { return variadic::make_ng_variadic_op(node); } -} // namespace set_8 -} // namespace op +ONNX_OP("Max", OPSET_SINCE(8), ai_onnx::opset_8::max); +} // namespace opset_8 +} // namespace ai_onnx } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/max_pool.cpp b/src/frontends/onnx/frontend/src/op/max_pool.cpp index b5198d49c35cd9..ac749564c3f5df 100644 --- a/src/frontends/onnx/frontend/src/op/max_pool.cpp +++ b/src/frontends/onnx/frontend/src/op/max_pool.cpp @@ -2,19 +2,17 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "op/max_pool.hpp" - #include "core/null_node.hpp" +#include "core/operator_set.hpp" #include "openvino/util/log.hpp" #include "utils/pooling_factory.hpp" - using namespace ov::op; namespace ov { namespace frontend { namespace onnx { -namespace op { -namespace set_1 { +namespace ai_onnx { +namespace opset_1 { ov::OutputVector max_pool(const ov::frontend::onnx::Node& node) { if (node.get_outputs_size() > 1) { OPENVINO_WARN << "MaxPool: Indices output is not supported and was ignored"; @@ -24,14 +22,16 @@ ov::OutputVector max_pool(const ov::frontend::onnx::Node& node) { return max_pool; } -} // namespace set_1 +ONNX_OP("MaxPool", OPSET_RANGE(1, 7), ai_onnx::opset_1::max_pool); +} // namespace opset_1 -namespace set_8 { +namespace opset_8 { ov::OutputVector max_pool(const ov::frontend::onnx::Node& node) { return pooling::PoolingFactory(node).make_max_pool_with_indices(); } -} // namespace set_8 -} // namespace op +ONNX_OP("MaxPool", OPSET_SINCE(8), ai_onnx::opset_8::max_pool); +} // namespace opset_8 +} // namespace ai_onnx } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/max_pool.hpp b/src/frontends/onnx/frontend/src/op/max_pool.hpp deleted file mode 100644 index 11428b240b6564..00000000000000 --- a/src/frontends/onnx/frontend/src/op/max_pool.hpp +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright (C) 2018-2024 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "core/node.hpp" - -namespace ov { -namespace frontend { -namespace onnx { -namespace op { -namespace set_1 { -/// -/// \brief Convert ONNX MaxPool operation to an OV node. -/// -/// \param node The ONNX node object representing this operation. -/// -/// \return The vector containing OV nodes producing output of ONNX MaxPool -/// operation. -/// -ov::OutputVector max_pool(const ov::frontend::onnx::Node& node); - -} // namespace set_1 - -namespace set_8 { -/// -/// \brief Convert ONNX MaxPool operation to an OV node. -/// -/// \param node The ONNX node object representing this operation. -/// -/// \return The vector containing OV nodes producing output of ONNX MaxPool -/// operation. -/// -ov::OutputVector max_pool(const ov::frontend::onnx::Node& node); - -} // namespace set_8 -} // namespace op -} // namespace onnx -} // namespace frontend -} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/max_roi_pool.cpp b/src/frontends/onnx/frontend/src/op/max_roi_pool.cpp index c19d3c0fee57e6..4c0b9592fd183f 100644 --- a/src/frontends/onnx/frontend/src/op/max_roi_pool.cpp +++ b/src/frontends/onnx/frontend/src/op/max_roi_pool.cpp @@ -2,18 +2,16 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "op/max_roi_pool.hpp" - +#include "core/operator_set.hpp" #include "openvino/frontend/exception.hpp" #include "openvino/op/roi_pooling.hpp" - using namespace ov::op; namespace ov { namespace frontend { namespace onnx { -namespace op { -namespace set_1 { +namespace ai_onnx { +namespace opset_1 { ov::OutputVector max_roi_pool(const ov::frontend::onnx::Node& node) { const auto& inputs = node.get_ov_inputs(); const auto X = inputs.at(0); @@ -28,8 +26,9 @@ ov::OutputVector max_roi_pool(const ov::frontend::onnx::Node& node) { return {std::make_shared(X, rois, ov::Shape(pooled_shape), spatial_scale, "max")}; } -} // namespace set_1 -} // namespace op +ONNX_OP("MaxRoiPool", OPSET_SINCE(1), ai_onnx::opset_1::max_roi_pool); +} // namespace opset_1 +} // namespace ai_onnx } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/max_roi_pool.hpp b/src/frontends/onnx/frontend/src/op/max_roi_pool.hpp deleted file mode 100644 index 064d050b78b996..00000000000000 --- a/src/frontends/onnx/frontend/src/op/max_roi_pool.hpp +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright (C) 2018-2024 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "core/node.hpp" - -namespace ov { -namespace frontend { -namespace onnx { -namespace op { -namespace set_1 { - -ov::OutputVector max_roi_pool(const ov::frontend::onnx::Node& node); - -} // namespace set_1 -} // namespace op -} // namespace onnx -} // namespace frontend -} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/mean.cpp b/src/frontends/onnx/frontend/src/op/mean.cpp index d4ab881bbe34f6..45144b165bf252 100644 --- a/src/frontends/onnx/frontend/src/op/mean.cpp +++ b/src/frontends/onnx/frontend/src/op/mean.cpp @@ -2,20 +2,18 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "op/mean.hpp" - +#include "core/operator_set.hpp" #include "openvino/op/add.hpp" #include "openvino/op/constant.hpp" #include "openvino/op/divide.hpp" #include "utils/variadic.hpp" - using namespace ov::op; namespace ov { namespace frontend { namespace onnx { -namespace op { -namespace set_1 { +namespace ai_onnx { +namespace opset_1 { ov::OutputVector mean(const ov::frontend::onnx::Node& node) { auto sum = variadic::make_ng_variadic_op(node).front(); auto count = v0::Constant::create(sum.get_element_type(), ov::Shape{}, {node.get_ov_inputs().size()}); @@ -23,8 +21,9 @@ ov::OutputVector mean(const ov::frontend::onnx::Node& node) { return {std::make_shared(sum, count)}; } -} // namespace set_1 -} // namespace op +ONNX_OP("Mean", OPSET_SINCE(1), ai_onnx::opset_1::mean); +} // namespace opset_1 +} // namespace ai_onnx } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/mean.hpp b/src/frontends/onnx/frontend/src/op/mean.hpp deleted file mode 100644 index 5b56ed63c676a7..00000000000000 --- a/src/frontends/onnx/frontend/src/op/mean.hpp +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright (C) 2018-2024 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "core/node.hpp" - -namespace ov { -namespace frontend { -namespace onnx { -namespace op { -namespace set_1 { -ov::OutputVector mean(const ov::frontend::onnx::Node& node); - -} // namespace set_1 -} // namespace op -} // namespace onnx -} // namespace frontend -} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/mean_variance_normalization.cpp b/src/frontends/onnx/frontend/src/op/mean_variance_normalization.cpp index 286303bb77f0c1..90fbe9353fca1d 100644 --- a/src/frontends/onnx/frontend/src/op/mean_variance_normalization.cpp +++ b/src/frontends/onnx/frontend/src/op/mean_variance_normalization.cpp @@ -2,19 +2,17 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "op/mean_variance_normalization.hpp" - +#include "core/operator_set.hpp" #include "openvino/core/validation_util.hpp" #include "openvino/op/constant.hpp" #include "openvino/op/mvn.hpp" - using namespace ov::op; namespace ov { namespace frontend { namespace onnx { -namespace op { -namespace set_1 { +namespace ai_onnx { +namespace opset_1 { ov::OutputVector mean_variance_normalization(const ov::frontend::onnx::Node& node) { auto data = node.get_ov_inputs().at(0); bool across_channels = node.get_attribute_value("across_channels", 0); @@ -23,9 +21,10 @@ ov::OutputVector mean_variance_normalization(const ov::frontend::onnx::Node& nod return {std::make_shared(data, across_channels, normalize_variance)}; } -} // namespace set_1 +ONNX_OP("MeanVarianceNormalization", OPSET_RANGE(1, 8), ai_onnx::opset_1::mean_variance_normalization); +} // namespace opset_1 -namespace set_9 { +namespace opset_9 { ov::OutputVector mean_variance_normalization(const ov::frontend::onnx::Node& node) { auto data = node.get_ov_inputs().at(0); auto axes = node.get_attribute_value>("axes", {0, 2, 3}); @@ -35,8 +34,9 @@ ov::OutputVector mean_variance_normalization(const ov::frontend::onnx::Node& nod return {std::make_shared(data, const_axes, true, 1e-09f, ov::op::MVNEpsMode::OUTSIDE_SQRT)}; } -} // namespace set_9 -} // namespace op +ONNX_OP("MeanVarianceNormalization", OPSET_SINCE(9), ai_onnx::opset_9::mean_variance_normalization); +} // namespace opset_9 +} // namespace ai_onnx } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/mean_variance_normalization.hpp b/src/frontends/onnx/frontend/src/op/mean_variance_normalization.hpp deleted file mode 100644 index 7797b23ad6956b..00000000000000 --- a/src/frontends/onnx/frontend/src/op/mean_variance_normalization.hpp +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright (C) 2018-2024 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "core/node.hpp" - -namespace ov { -namespace frontend { -namespace onnx { -namespace op { -namespace set_1 { -ov::OutputVector mean_variance_normalization(const ov::frontend::onnx::Node& node); -} // namespace set_1 - -namespace set_9 { -ov::OutputVector mean_variance_normalization(const ov::frontend::onnx::Node& node); -} // namespace set_9 -} // namespace op -} // namespace onnx -} // namespace frontend -} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/min.hpp b/src/frontends/onnx/frontend/src/op/min.cpp similarity index 50% rename from src/frontends/onnx/frontend/src/op/min.hpp rename to src/frontends/onnx/frontend/src/op/min.cpp index 92594e1f9cceef..143dcff6a26f85 100644 --- a/src/frontends/onnx/frontend/src/op/min.hpp +++ b/src/frontends/onnx/frontend/src/op/min.cpp @@ -2,29 +2,30 @@ // SPDX-License-Identifier: Apache-2.0 // -#pragma once - -#include "core/node.hpp" +#include "core/operator_set.hpp" #include "openvino/op/minimum.hpp" +#include "utils/variadic.hpp" namespace ov { namespace frontend { namespace onnx { -namespace op { -namespace set_1 { -inline ov::OutputVector min(const ov::frontend::onnx::Node& node) { +namespace ai_onnx { +namespace opset_1 { +ov::OutputVector min(const ov::frontend::onnx::Node& node) { return variadic::make_ng_variadic_op(node, ov::op::AutoBroadcastType::NONE); } -} // namespace set_1 +ONNX_OP("Min", {1, 7}, ai_onnx::opset_1::min); +} // namespace opset_1 -namespace set_8 { -inline ov::OutputVector min(const ov::frontend::onnx::Node& node) { +namespace opset_8 { +ov::OutputVector min(const ov::frontend::onnx::Node& node) { return variadic::make_ng_variadic_op(node); } -} // namespace set_8 -} // namespace op +ONNX_OP("Min", OPSET_SINCE(8), ai_onnx::opset_8::min); +} // namespace opset_8 +} // namespace ai_onnx } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/mish.cpp b/src/frontends/onnx/frontend/src/op/mish.cpp index f563415c101c7d..3a64c6a93be5a0 100644 --- a/src/frontends/onnx/frontend/src/op/mish.cpp +++ b/src/frontends/onnx/frontend/src/op/mish.cpp @@ -2,24 +2,24 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "op/mish.hpp" - #include "openvino/op/mish.hpp" +#include "core/operator_set.hpp" using namespace ov::op; namespace ov { namespace frontend { namespace onnx { -namespace op { -namespace set_1 { +namespace ai_onnx { +namespace opset_1 { ov::OutputVector mish(const ov::frontend::onnx::Node& node) { const auto data = node.get_ov_inputs().at(0); return {std::make_shared(data)}; } -} // namespace set_1 -} // namespace op +ONNX_OP("Mish", OPSET_SINCE(1), ai_onnx::opset_1::mish); +} // namespace opset_1 +} // namespace ai_onnx } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/mish.hpp b/src/frontends/onnx/frontend/src/op/mish.hpp deleted file mode 100644 index c37550a80af46b..00000000000000 --- a/src/frontends/onnx/frontend/src/op/mish.hpp +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright (C) 2018-2024 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "core/node.hpp" - -namespace ov { -namespace frontend { -namespace onnx { -namespace op { -namespace set_1 { -ov::OutputVector mish(const ov::frontend::onnx::Node& node); -} // namespace set_1 -} // namespace op -} // namespace onnx -} // namespace frontend -} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/mmdeploy_roi_align_rotated.cpp b/src/frontends/onnx/frontend/src/op/mmdeploy_roi_align_rotated.cpp index 565c73674c75fd..44b33db2d7bdd2 100644 --- a/src/frontends/onnx/frontend/src/op/mmdeploy_roi_align_rotated.cpp +++ b/src/frontends/onnx/frontend/src/op/mmdeploy_roi_align_rotated.cpp @@ -2,22 +2,20 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "op/mmdeploy_roi_align_rotated.hpp" - +#include "core/operator_set.hpp" #include "openvino/frontend/exception.hpp" #include "openvino/op/constant.hpp" #include "openvino/op/convert.hpp" #include "openvino/op/reshape.hpp" #include "openvino/op/roi_align_rotated.hpp" #include "openvino/op/slice.hpp" - using namespace ov::op; namespace ov { namespace frontend { namespace onnx { -namespace op { -namespace set_1 { +namespace ai_onnx { +namespace opset_1 { ov::OutputVector mmdeploy_roi_align_rotated(const ov::frontend::onnx::Node& node) { const auto inputs = node.get_ov_inputs(); @@ -65,8 +63,9 @@ ov::OutputVector mmdeploy_roi_align_rotated(const ov::frontend::onnx::Node& node spatial_scale, clockwise)}; } -} // namespace set_1 -} // namespace op +ONNX_OP("MMCVRoIAlignRotated", OPSET_SINCE(1), ai_onnx::opset_1::mmdeploy_roi_align_rotated, MMDEPLOY_DOMAIN); +} // namespace opset_1 +} // namespace ai_onnx } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/mmdeploy_roi_align_rotated.hpp b/src/frontends/onnx/frontend/src/op/mmdeploy_roi_align_rotated.hpp deleted file mode 100644 index d5dbf0ff5db333..00000000000000 --- a/src/frontends/onnx/frontend/src/op/mmdeploy_roi_align_rotated.hpp +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright (C) 2018-2024 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "core/node.hpp" - -namespace ov { -namespace frontend { -namespace onnx { -namespace op { -namespace set_1 { -ov::OutputVector mmdeploy_roi_align_rotated(const ov::frontend::onnx::Node& node); - -} // namespace set_1 -} // namespace op -} // namespace onnx -} // namespace frontend -} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/mod.cpp b/src/frontends/onnx/frontend/src/op/mod.cpp index 2124d4cf9c0b60..727b63b86735ab 100644 --- a/src/frontends/onnx/frontend/src/op/mod.cpp +++ b/src/frontends/onnx/frontend/src/op/mod.cpp @@ -2,20 +2,19 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "op/mod.hpp" +#include "openvino/op/mod.hpp" +#include "core/operator_set.hpp" #include "exceptions.hpp" #include "openvino/frontend/exception.hpp" #include "openvino/op/floor_mod.hpp" -#include "openvino/op/mod.hpp" - using namespace ov::op; namespace ov { namespace frontend { namespace onnx { -namespace op { -namespace set_1 { +namespace ai_onnx { +namespace opset_1 { ov::OutputVector mod(const ov::frontend::onnx::Node& node) { ov::Output dividend{node.get_ov_inputs().at(0)}; ov::Output divisor{node.get_ov_inputs().at(1)}; @@ -35,8 +34,9 @@ ov::OutputVector mod(const ov::frontend::onnx::Node& node) { return output; } -} // namespace set_1 -} // namespace op +ONNX_OP("Mod", OPSET_SINCE(1), ai_onnx::opset_1::mod); +} // namespace opset_1 +} // namespace ai_onnx } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/mod.hpp b/src/frontends/onnx/frontend/src/op/mod.hpp deleted file mode 100644 index f81d00bb677ce3..00000000000000 --- a/src/frontends/onnx/frontend/src/op/mod.hpp +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright (C) 2018-2024 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "core/node.hpp" - -namespace ov { -namespace frontend { -namespace onnx { -namespace op { -namespace set_1 { -ov::OutputVector mod(const ov::frontend::onnx::Node& node); -} // namespace set_1 -} // namespace op -} // namespace onnx -} // namespace frontend -} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/mul.hpp b/src/frontends/onnx/frontend/src/op/mul.cpp similarity index 50% rename from src/frontends/onnx/frontend/src/op/mul.hpp rename to src/frontends/onnx/frontend/src/op/mul.cpp index e60ae5a6296811..b576b0998d53b3 100644 --- a/src/frontends/onnx/frontend/src/op/mul.hpp +++ b/src/frontends/onnx/frontend/src/op/mul.cpp @@ -2,29 +2,30 @@ // SPDX-License-Identifier: Apache-2.0 // -#pragma once - -#include "core/node.hpp" +#include "core/operator_set.hpp" #include "openvino/op/multiply.hpp" +#include "utils/common.hpp" namespace ov { namespace frontend { namespace onnx { -namespace op { -namespace set_1 { -inline ov::OutputVector mul(const ov::frontend::onnx::Node& node) { +namespace ai_onnx { +namespace opset_1 { +ov::OutputVector mul(const ov::frontend::onnx::Node& node) { return common::handle_opset6_binary_op(node); } -} // namespace set_1 +ONNX_OP("Mul", OPSET_RANGE(1, 6), ai_onnx::opset_1::mul); +} // namespace opset_1 -namespace set_7 { -inline ov::OutputVector mul(const ov::frontend::onnx::Node& node) { +namespace opset_7 { +ov::OutputVector mul(const ov::frontend::onnx::Node& node) { return {std::make_shared(node.get_ov_inputs().at(0), node.get_ov_inputs().at(1))}; } -} // namespace set_7 -} // namespace op +ONNX_OP("Mul", OPSET_SINCE(7), ai_onnx::opset_7::mul); +} // namespace opset_7 +} // namespace ai_onnx } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/multinomial.cpp b/src/frontends/onnx/frontend/src/op/multinomial.cpp index 294e19ed742477..dccbff1d31e19a 100644 --- a/src/frontends/onnx/frontend/src/op/multinomial.cpp +++ b/src/frontends/onnx/frontend/src/op/multinomial.cpp @@ -2,19 +2,18 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "op/multinomial.hpp" +#include "openvino/op/multinomial.hpp" +#include "core/operator_set.hpp" #include "exceptions.hpp" -#include "openvino/op/multinomial.hpp" #include "utils/common.hpp" - using namespace ov::op; using ::ONNX_NAMESPACE::TensorProto_DataType; namespace ov { namespace frontend { namespace onnx { -namespace op { -namespace set_1 { +namespace ai_onnx { +namespace opset_1 { ov::OutputVector multinomial(const ov::frontend::onnx::Node& node) { const auto input = node.get_ov_inputs().at(0); @@ -34,8 +33,9 @@ ov::OutputVector multinomial(const ov::frontend::onnx::Node& node) { return {multinomial_op}; } -} // namespace set_1 -} // namespace op +ONNX_OP("Multinomial", OPSET_SINCE(1), ai_onnx::opset_1::multinomial); +} // namespace opset_1 +} // namespace ai_onnx } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/multinomial.hpp b/src/frontends/onnx/frontend/src/op/multinomial.hpp deleted file mode 100644 index 81e9b31531cdc3..00000000000000 --- a/src/frontends/onnx/frontend/src/op/multinomial.hpp +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright (C) 2018-2024 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "core/node.hpp" - -namespace ov { -namespace frontend { -namespace onnx { -namespace op { -namespace set_1 { -ov::OutputVector multinomial(const ov::frontend::onnx::Node& node); -} // namespace set_1 -} // namespace op -} // namespace onnx -} // namespace frontend -} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/neg.hpp b/src/frontends/onnx/frontend/src/op/neg.cpp similarity index 57% rename from src/frontends/onnx/frontend/src/op/neg.hpp rename to src/frontends/onnx/frontend/src/op/neg.cpp index a47d48fe75d76f..c6d288fe9843b8 100644 --- a/src/frontends/onnx/frontend/src/op/neg.hpp +++ b/src/frontends/onnx/frontend/src/op/neg.cpp @@ -2,21 +2,20 @@ // SPDX-License-Identifier: Apache-2.0 // -#pragma once - -#include "core/node.hpp" +#include "core/operator_set.hpp" #include "openvino/op/negative.hpp" namespace ov { namespace frontend { namespace onnx { -namespace op { -namespace set_1 { -inline ov::OutputVector neg(const ov::frontend::onnx::Node& node) { +namespace ai_onnx { +namespace opset_1 { +ov::OutputVector neg(const ov::frontend::onnx::Node& node) { return {std::make_shared(node.get_ov_inputs().at(0))}; } -} // namespace set_1 -} // namespace op +ONNX_OP("Neg", OPSET_SINCE(1), ai_onnx::opset_1::neg); +} // namespace opset_1 +} // namespace ai_onnx } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/nms_rotated.hpp b/src/frontends/onnx/frontend/src/op/nms_rotated.cpp similarity index 82% rename from src/frontends/onnx/frontend/src/op/nms_rotated.hpp rename to src/frontends/onnx/frontend/src/op/nms_rotated.cpp index b03b47db50da2b..bab5d4c4fe01a5 100644 --- a/src/frontends/onnx/frontend/src/op/nms_rotated.hpp +++ b/src/frontends/onnx/frontend/src/op/nms_rotated.cpp @@ -2,20 +2,19 @@ // SPDX-License-Identifier: Apache-2.0 // -#pragma once +#include "openvino/op/nms_rotated.hpp" #include -#include "core/node.hpp" +#include "core/operator_set.hpp" #include "openvino/op/constant.hpp" -#include "openvino/op/nms_rotated.hpp" namespace ov { namespace frontend { namespace onnx { -namespace op { -namespace set_1 { -inline ov::OutputVector nms_rotated(const ov::frontend::onnx::Node& node) { +namespace ai_onnx { +namespace opset_1 { +ov::OutputVector nms_rotated(const ov::frontend::onnx::Node& node) { auto iou_threshold = node.get_attribute_value("iou_threshold"); auto score_threshold = node.get_attribute_value("score_threshold"); auto max_output_boxes_per_class = @@ -32,8 +31,9 @@ inline ov::OutputVector nms_rotated(const ov::frontend::onnx::Node& node) { return {nms->output(0)}; } -} // namespace set_1 -} // namespace op +ONNX_OP("NMSRotated", OPSET_SINCE(1), ai_onnx::opset_1::nms_rotated, MMDEPLOY_DOMAIN); +} // namespace opset_1 +} // namespace ai_onnx } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/non_max_suppression.cpp b/src/frontends/onnx/frontend/src/op/non_max_suppression.cpp index 1375d089538a1b..3e68e2118e11ef 100644 --- a/src/frontends/onnx/frontend/src/op/non_max_suppression.cpp +++ b/src/frontends/onnx/frontend/src/op/non_max_suppression.cpp @@ -5,18 +5,18 @@ #include "openvino/op/non_max_suppression.hpp" #include "core/null_node.hpp" +#include "core/operator_set.hpp" #include "exceptions.hpp" #include "openvino/op/constant.hpp" #include "utils/reshape.hpp" - using namespace ov::op; using ov::Shape; namespace ov { namespace frontend { namespace onnx { -namespace op { -namespace set_1 { +namespace ai_onnx { +namespace opset_1 { ov::OutputVector non_max_suppression(const ov::frontend::onnx::Node& node) { using ov::op::util::is_null; // TODO: this op will not be tested until at least @@ -65,8 +65,9 @@ ov::OutputVector non_max_suppression(const ov::frontend::onnx::Node& node) { false)}; } -} // namespace set_1 -} // namespace op +ONNX_OP("NonMaxSuppression", OPSET_SINCE(1), ai_onnx::opset_1::non_max_suppression); +} // namespace opset_1 +} // namespace ai_onnx } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/non_max_suppression.hpp b/src/frontends/onnx/frontend/src/op/non_max_suppression.hpp deleted file mode 100644 index f8e3fb009972e6..00000000000000 --- a/src/frontends/onnx/frontend/src/op/non_max_suppression.hpp +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright (C) 2018-2024 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "core/node.hpp" - -namespace ov { -namespace frontend { -namespace onnx { -namespace op { -namespace set_1 { -ov::OutputVector non_max_suppression(const ov::frontend::onnx::Node& node); -} // namespace set_1 -} // namespace op -} // namespace onnx -} // namespace frontend -} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/non_zero.cpp b/src/frontends/onnx/frontend/src/op/non_zero.cpp index 98c044029a0bb7..dce74c3af8e6b6 100644 --- a/src/frontends/onnx/frontend/src/op/non_zero.cpp +++ b/src/frontends/onnx/frontend/src/op/non_zero.cpp @@ -2,24 +2,24 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "op/non_zero.hpp" - #include "openvino/op/non_zero.hpp" +#include "core/operator_set.hpp" using namespace ov::op; namespace ov { namespace frontend { namespace onnx { -namespace op { -namespace set_1 { +namespace ai_onnx { +namespace opset_1 { ov::OutputVector non_zero(const ov::frontend::onnx::Node& node) { auto data = node.get_ov_inputs().at(0); return {std::make_shared(data, ov::element::i64)}; } -} // namespace set_1 -} // namespace op +ONNX_OP("NonZero", OPSET_SINCE(1), ai_onnx::opset_1::non_zero); +} // namespace opset_1 +} // namespace ai_onnx } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/non_zero.hpp b/src/frontends/onnx/frontend/src/op/non_zero.hpp deleted file mode 100644 index cdcb7dd21e502f..00000000000000 --- a/src/frontends/onnx/frontend/src/op/non_zero.hpp +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright (C) 2018-2024 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "core/node.hpp" - -namespace ov { -namespace frontend { -namespace onnx { -namespace op { -namespace set_1 { -/// \brief Convert ONNX NonZero operation to an OV node. -/// -/// \param node The ONNX node object representing this operation. -/// -/// \return The vector containing OV nodes producing output of ONNX NonZero -/// operation. -ov::OutputVector non_zero(const ov::frontend::onnx::Node& node); - -} // namespace set_1 -} // namespace op -} // namespace onnx -} // namespace frontend -} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/not.hpp b/src/frontends/onnx/frontend/src/op/not.cpp similarity index 56% rename from src/frontends/onnx/frontend/src/op/not.hpp rename to src/frontends/onnx/frontend/src/op/not.cpp index 182b89ac53571e..860a72210b7529 100644 --- a/src/frontends/onnx/frontend/src/op/not.hpp +++ b/src/frontends/onnx/frontend/src/op/not.cpp @@ -2,22 +2,21 @@ // SPDX-License-Identifier: Apache-2.0 // -#pragma once - -#include "core/node.hpp" +#include "core/operator_set.hpp" #include "openvino/op/logical_not.hpp" namespace ov { namespace frontend { namespace onnx { -namespace op { -namespace set_1 { -inline ov::OutputVector logical_not(const ov::frontend::onnx::Node& node) { +namespace ai_onnx { +namespace opset_1 { +ov::OutputVector logical_not(const ov::frontend::onnx::Node& node) { return {std::make_shared(node.get_ov_inputs().at(0))}; } -} // namespace set_1 -} // namespace op +ONNX_OP("Not", OPSET_SINCE(1), ai_onnx::opset_1::logical_not); +} // namespace opset_1 +} // namespace ai_onnx } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/onehot.cpp b/src/frontends/onnx/frontend/src/op/onehot.cpp index ff93801564ce0c..9ecdd0b21c15a6 100644 --- a/src/frontends/onnx/frontend/src/op/onehot.cpp +++ b/src/frontends/onnx/frontend/src/op/onehot.cpp @@ -2,21 +2,19 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "op/onehot.hpp" - +#include "core/operator_set.hpp" #include "openvino/op/constant.hpp" #include "openvino/op/convert.hpp" #include "openvino/op/one_hot.hpp" #include "openvino/op/split.hpp" #include "utils/reshape.hpp" - using namespace ov::op; namespace ov { namespace frontend { namespace onnx { -namespace op { -namespace set_1 { +namespace ai_onnx { +namespace opset_1 { ov::OutputVector onehot(const ov::frontend::onnx::Node& node) { ov::OutputVector inputs{node.get_ov_inputs()}; auto indices = std::make_shared(inputs.at(0), ov::element::i64); @@ -33,8 +31,9 @@ ov::OutputVector onehot(const ov::frontend::onnx::Node& node) { return {std::make_shared(indices, depth, on_value, off_value, axis)}; } -} // namespace set_1 -} // namespace op +ONNX_OP("OneHot", OPSET_SINCE(1), ai_onnx::opset_1::onehot); +} // namespace opset_1 +} // namespace ai_onnx } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/onehot.hpp b/src/frontends/onnx/frontend/src/op/onehot.hpp deleted file mode 100644 index 6b66c1a8f755c4..00000000000000 --- a/src/frontends/onnx/frontend/src/op/onehot.hpp +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright (C) 2018-2024 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "core/node.hpp" - -namespace ov { -namespace frontend { -namespace onnx { -namespace op { -namespace set_1 { -ov::OutputVector onehot(const ov::frontend::onnx::Node& node); - -} // namespace set_1 -} // namespace op -} // namespace onnx -} // namespace frontend -} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/or.hpp b/src/frontends/onnx/frontend/src/op/or.cpp similarity index 58% rename from src/frontends/onnx/frontend/src/op/or.hpp rename to src/frontends/onnx/frontend/src/op/or.cpp index af121d6d0ad3d3..27f661b5697133 100644 --- a/src/frontends/onnx/frontend/src/op/or.hpp +++ b/src/frontends/onnx/frontend/src/op/or.cpp @@ -2,22 +2,21 @@ // SPDX-License-Identifier: Apache-2.0 // -#pragma once - -#include "core/node.hpp" +#include "core/operator_set.hpp" #include "openvino/op/logical_or.hpp" namespace ov { namespace frontend { namespace onnx { -namespace op { -namespace set_1 { -inline ov::OutputVector logical_or(const ov::frontend::onnx::Node& node) { +namespace ai_onnx { +namespace opset_1 { +ov::OutputVector logical_or(const ov::frontend::onnx::Node& node) { return {std::make_shared(node.get_ov_inputs().at(0), node.get_ov_inputs().at(1))}; } -} // namespace set_1 -} // namespace op +ONNX_OP("Or", OPSET_SINCE(1), ai_onnx::opset_1::logical_or); +} // namespace opset_1 +} // namespace ai_onnx } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/deformable_conv_2d.cpp b/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/deformable_conv_2d.cpp index ff02d0f6e0a0d5..38f850e9cffef0 100644 --- a/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/deformable_conv_2d.cpp +++ b/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/deformable_conv_2d.cpp @@ -14,19 +14,17 @@ // limitations under the License. //***************************************************************************** -#include "op/org.openvinotoolkit/deformable_conv_2d.hpp" - +#include "core/operator_set.hpp" #include "openvino/frontend/exception.hpp" #include "openvino/op/deformable_convolution.hpp" #include "utils/convpool.hpp" - using namespace ov::op; namespace ov { namespace frontend { namespace onnx { -namespace op { -namespace set_1 { +namespace org_openvinotoolkit { +namespace opset_1 { ov::OutputVector deformable_conv_2d(const ov::frontend::onnx::Node& node) { const ov::OutputVector& inputs = node.get_ov_inputs(); const auto strides = convpool::get_strides(node); @@ -64,8 +62,9 @@ ov::OutputVector deformable_conv_2d(const ov::frontend::onnx::Node& node) { FRONT_END_GENERAL_CHECK(false, "Invalid number of inputs"); } } -} // namespace set_1 -} // namespace op +ONNX_OP("DeformableConv2D", OPSET_SINCE(1), org_openvinotoolkit::opset_1::deformable_conv_2d, OPENVINO_ONNX_DOMAIN); +} // namespace opset_1 +} // namespace org_openvinotoolkit } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/deformable_conv_2d.hpp b/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/deformable_conv_2d.hpp deleted file mode 100644 index 225b8980873cd7..00000000000000 --- a/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/deformable_conv_2d.hpp +++ /dev/null @@ -1,32 +0,0 @@ -//***************************************************************************** -// Copyright 2017-2022 Intel Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -//***************************************************************************** - -#pragma once - -#include "core/node.hpp" - -namespace ov { -namespace frontend { -namespace onnx { -namespace op { -namespace set_1 { -ov::OutputVector deformable_conv_2d(const ov::frontend::onnx::Node& node); - -} // namespace set_1 -} // namespace op -} // namespace onnx -} // namespace frontend -} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/detection_output.cpp b/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/detection_output.cpp index 3fedd0ee365dca..e12268e647db94 100644 --- a/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/detection_output.cpp +++ b/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/detection_output.cpp @@ -2,19 +2,18 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "op/org.openvinotoolkit/detection_output.hpp" +#include "openvino/op/detection_output.hpp" -#include "core/node.hpp" +#include "core/operator_set.hpp" #include "openvino/frontend/exception.hpp" -#include "openvino/op/detection_output.hpp" using namespace ov::op; namespace ov { namespace frontend { namespace onnx { -namespace op { -namespace set_1 { +namespace org_openvinotoolkit { +namespace opset_1 { ov::OutputVector detection_output(const ov::frontend::onnx::Node& node) { auto inputs = node.get_ov_inputs(); @@ -67,8 +66,9 @@ ov::OutputVector detection_output(const ov::frontend::onnx::Node& node) { } } -} // namespace set_1 -} // namespace op +ONNX_OP("DetectionOutput", OPSET_SINCE(1), org_openvinotoolkit::opset_1::detection_output, OPENVINO_ONNX_DOMAIN); +} // namespace opset_1 +} // namespace org_openvinotoolkit } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/detection_output.hpp b/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/detection_output.hpp deleted file mode 100644 index 6cfe32643eed85..00000000000000 --- a/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/detection_output.hpp +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright (C) 2018-2024 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "core/node.hpp" - -namespace ov { -namespace frontend { -namespace onnx { -namespace op { -namespace set_1 { -ov::OutputVector detection_output(const ov::frontend::onnx::Node& node); - -} // namespace set_1 -} // namespace op -} // namespace onnx -} // namespace frontend -} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/experimental_detectron/detection_output.cpp b/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/experimental_detectron/detection_output.cpp index 99f12bb0fb4bde..044a58b7648a62 100644 --- a/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/experimental_detectron/detection_output.cpp +++ b/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/experimental_detectron/detection_output.cpp @@ -2,9 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "op/org.openvinotoolkit/experimental_detectron/detection_output.hpp" - -#include "core/node.hpp" +#include "core/operator_set.hpp" #include "openvino/op/experimental_detectron_detection_output.hpp" using namespace ov::op; @@ -12,8 +10,8 @@ using namespace ov::op; namespace ov { namespace frontend { namespace onnx { -namespace op { -namespace set_1 { +namespace org_openvinotoolkit { +namespace opset_1 { ov::OutputVector experimental_detectron_detection_output(const ov::frontend::onnx::Node& node) { using DetectionOutput = v6::ExperimentalDetectronDetectionOutput; @@ -37,8 +35,12 @@ ov::OutputVector experimental_detectron_detection_output(const ov::frontend::onn return {detection_output->output(0), detection_output->output(1), detection_output->output(2)}; } -} // namespace set_1 -} // namespace op +ONNX_OP("ExperimentalDetectronDetectionOutput", + OPSET_SINCE(1), + org_openvinotoolkit::opset_1::experimental_detectron_detection_output, + OPENVINO_ONNX_DOMAIN); +} // namespace opset_1 +} // namespace org_openvinotoolkit } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/experimental_detectron/detection_output.hpp b/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/experimental_detectron/detection_output.hpp deleted file mode 100644 index 35b9f27b7cabc2..00000000000000 --- a/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/experimental_detectron/detection_output.hpp +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright (C) 2018-2024 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "core/node.hpp" - -namespace ov { -namespace frontend { -namespace onnx { -namespace op { -namespace set_1 { -ov::OutputVector experimental_detectron_detection_output(const ov::frontend::onnx::Node& node); -} // namespace set_1 -} // namespace op -} // namespace onnx -} // namespace frontend -} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/experimental_detectron/generate_proposals_single_image.cpp b/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/experimental_detectron/generate_proposals_single_image.cpp index e2ebe8b7214d9e..d087a80ee14b90 100644 --- a/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/experimental_detectron/generate_proposals_single_image.cpp +++ b/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/experimental_detectron/generate_proposals_single_image.cpp @@ -2,9 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "op/org.openvinotoolkit/experimental_detectron/generate_proposals_single_image.hpp" - -#include "core/node.hpp" +#include "core/operator_set.hpp" #include "openvino/frontend/exception.hpp" #include "openvino/op/experimental_detectron_generate_proposals.hpp" @@ -13,8 +11,8 @@ using namespace ov::op; namespace ov { namespace frontend { namespace onnx { -namespace op { -namespace set_1 { +namespace org_openvinotoolkit { +namespace opset_1 { ov::OutputVector experimental_detectron_generate_proposals(const ov::frontend::onnx::Node& node) { using GenerateProposalsSingleImage = v6::ExperimentalDetectronGenerateProposalsSingleImage; @@ -39,8 +37,12 @@ ov::OutputVector experimental_detectron_generate_proposals(const ov::frontend::o return {generate_proposals_single_image->output(0), generate_proposals_single_image->output(1)}; } -} // namespace set_1 -} // namespace op +ONNX_OP("ExperimentalDetectronGenerateProposalsSingleImage", + OPSET_SINCE(1), + org_openvinotoolkit::opset_1::experimental_detectron_generate_proposals, + OPENVINO_ONNX_DOMAIN); +} // namespace opset_1 +} // namespace org_openvinotoolkit } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/experimental_detectron/generate_proposals_single_image.hpp b/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/experimental_detectron/generate_proposals_single_image.hpp deleted file mode 100644 index 73e452b5c8fdb2..00000000000000 --- a/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/experimental_detectron/generate_proposals_single_image.hpp +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright (C) 2018-2024 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "core/node.hpp" - -namespace ov { -namespace frontend { -namespace onnx { -namespace op { -namespace set_1 { -ov::OutputVector experimental_detectron_generate_proposals(const ov::frontend::onnx::Node& node); -} // namespace set_1 -} // namespace op -} // namespace onnx -} // namespace frontend -} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/experimental_detectron/prior_grid_generator.cpp b/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/experimental_detectron/prior_grid_generator.cpp index e69761612acfe9..be26be9a7fb4a1 100644 --- a/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/experimental_detectron/prior_grid_generator.cpp +++ b/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/experimental_detectron/prior_grid_generator.cpp @@ -2,9 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "op/org.openvinotoolkit/experimental_detectron/prior_grid_generator.hpp" - -#include "core/node.hpp" +#include "core/operator_set.hpp" #include "openvino/op/experimental_detectron_prior_grid_generator.hpp" using namespace ov::op; @@ -12,8 +10,8 @@ using namespace ov::op; namespace ov { namespace frontend { namespace onnx { -namespace op { -namespace set_1 { +namespace org_openvinotoolkit { +namespace opset_1 { ov::OutputVector experimental_detectron_prior_grid_generator(const ov::frontend::onnx::Node& node) { using PriorGridGenerator = v6::ExperimentalDetectronPriorGridGenerator; @@ -31,8 +29,12 @@ ov::OutputVector experimental_detectron_prior_grid_generator(const ov::frontend: return {std::make_shared(priors, feature_map, im_data, attrs)}; } -} // namespace set_1 -} // namespace op +ONNX_OP("ExperimentalDetectronPriorGridGenerator", + OPSET_SINCE(1), + org_openvinotoolkit::opset_1::experimental_detectron_prior_grid_generator, + OPENVINO_ONNX_DOMAIN); +} // namespace opset_1 +} // namespace org_openvinotoolkit } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/experimental_detectron/prior_grid_generator.hpp b/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/experimental_detectron/prior_grid_generator.hpp deleted file mode 100644 index e474f3d4dd71e3..00000000000000 --- a/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/experimental_detectron/prior_grid_generator.hpp +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright (C) 2018-2024 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "core/node.hpp" - -namespace ov { -namespace frontend { -namespace onnx { -namespace op { -namespace set_1 { -ov::OutputVector experimental_detectron_prior_grid_generator(const ov::frontend::onnx::Node& node); - -} // namespace set_1 -} // namespace op -} // namespace onnx -} // namespace frontend -} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/experimental_detectron/roi_feature_extractor.cpp b/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/experimental_detectron/roi_feature_extractor.cpp index cc32f41ef1c360..dd216ba3465e57 100644 --- a/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/experimental_detectron/roi_feature_extractor.cpp +++ b/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/experimental_detectron/roi_feature_extractor.cpp @@ -2,9 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "op/org.openvinotoolkit/experimental_detectron/roi_feature_extractor.hpp" - -#include "core/node.hpp" +#include "core/operator_set.hpp" #include "openvino/op/experimental_detectron_roi_feature.hpp" using namespace ov::op; @@ -12,8 +10,8 @@ using namespace ov::op; namespace ov { namespace frontend { namespace onnx { -namespace op { -namespace set_1 { +namespace org_openvinotoolkit { +namespace opset_1 { ov::OutputVector experimental_detectron_roi_feature_extractor(const ov::frontend::onnx::Node& node) { using ROIFeatureExtractor = v6::ExperimentalDetectronROIFeatureExtractor; @@ -28,8 +26,12 @@ ov::OutputVector experimental_detectron_roi_feature_extractor(const ov::frontend return {roi_feature_extractor->output(0), roi_feature_extractor->output(1)}; } -} // namespace set_1 -} // namespace op +ONNX_OP("ExperimentalDetectronROIFeatureExtractor", + OPSET_SINCE(1), + org_openvinotoolkit::opset_1::experimental_detectron_roi_feature_extractor, + OPENVINO_ONNX_DOMAIN); +} // namespace opset_1 +} // namespace org_openvinotoolkit } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/experimental_detectron/roi_feature_extractor.hpp b/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/experimental_detectron/roi_feature_extractor.hpp deleted file mode 100644 index aef11441299888..00000000000000 --- a/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/experimental_detectron/roi_feature_extractor.hpp +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright (C) 2018-2024 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "core/node.hpp" - -namespace ov { -namespace frontend { -namespace onnx { -namespace op { -namespace set_1 { -ov::OutputVector experimental_detectron_roi_feature_extractor(const ov::frontend::onnx::Node& node); -} // namespace set_1 -} // namespace op -} // namespace onnx -} // namespace frontend -} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/experimental_detectron/topk_rios.cpp b/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/experimental_detectron/topk_rios.cpp index 00ae7a746aa98a..f0fdc62e241920 100644 --- a/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/experimental_detectron/topk_rios.cpp +++ b/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/experimental_detectron/topk_rios.cpp @@ -2,9 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "op/org.openvinotoolkit/experimental_detectron/topk_rios.hpp" - -#include "core/node.hpp" +#include "core/operator_set.hpp" #include "openvino/op/experimental_detectron_topkrois.hpp" using namespace ov::op; @@ -12,8 +10,8 @@ using namespace ov::op; namespace ov { namespace frontend { namespace onnx { -namespace op { -namespace set_1 { +namespace org_openvinotoolkit { +namespace opset_1 { ov::OutputVector experimental_detectron_topk_rois(const ov::frontend::onnx::Node& node) { using TopKROIs = v6::ExperimentalDetectronTopKROIs; @@ -25,8 +23,12 @@ ov::OutputVector experimental_detectron_topk_rois(const ov::frontend::onnx::Node return {std::make_shared(input_rois, rois_probs, max_rois)}; } -} // namespace set_1 -} // namespace op +ONNX_OP("ExperimentalDetectronTopKROIs", + OPSET_SINCE(1), + org_openvinotoolkit::opset_1::experimental_detectron_topk_rois, + OPENVINO_ONNX_DOMAIN); +} // namespace opset_1 +} // namespace org_openvinotoolkit } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/experimental_detectron/topk_rios.hpp b/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/experimental_detectron/topk_rios.hpp deleted file mode 100644 index c677c6c15f1e9c..00000000000000 --- a/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/experimental_detectron/topk_rios.hpp +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright (C) 2018-2024 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "core/node.hpp" - -namespace ov { -namespace frontend { -namespace onnx { -namespace op { -namespace set_1 { -ov::OutputVector experimental_detectron_topk_rois(const ov::frontend::onnx::Node& node); - -} // namespace set_1 -} // namespace op -} // namespace onnx -} // namespace frontend -} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/fake_quantize.cpp b/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/fake_quantize.cpp index 155b1664166d82..79d34c13f63067 100644 --- a/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/fake_quantize.cpp +++ b/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/fake_quantize.cpp @@ -2,19 +2,18 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "op/org.openvinotoolkit/fake_quantize.hpp" +#include "openvino/op/fake_quantize.hpp" #include -#include "openvino/op/fake_quantize.hpp" - +#include "core/operator_set.hpp" using namespace ov::op; namespace ov { namespace frontend { namespace onnx { -namespace op { -namespace set_1 { +namespace org_openvinotoolkit { +namespace opset_1 { ov::OutputVector fake_quantize(const ov::frontend::onnx::Node& node) { const auto inputs = node.get_ov_inputs(); const auto X = inputs.at(0); @@ -28,8 +27,9 @@ ov::OutputVector fake_quantize(const ov::frontend::onnx::Node& node) { return {std::make_shared(X, input_low, input_high, output_low, output_high, levels)}; } -} // namespace set_1 -} // namespace op +ONNX_OP("FakeQuantize", OPSET_SINCE(1), org_openvinotoolkit::opset_1::fake_quantize, OPENVINO_ONNX_DOMAIN); +} // namespace opset_1 +} // namespace org_openvinotoolkit } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/fake_quantize.hpp b/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/fake_quantize.hpp deleted file mode 100644 index f2450aa152e6c9..00000000000000 --- a/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/fake_quantize.hpp +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright (C) 2018-2024 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "core/node.hpp" - -namespace ov { -namespace frontend { -namespace onnx { -namespace op { -namespace set_1 { -ov::OutputVector fake_quantize(const ov::frontend::onnx::Node& node); - -} // namespace set_1 -} // namespace op -} // namespace onnx -} // namespace frontend -} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/generate_proposals.cpp b/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/generate_proposals.cpp index 2099c3ca8aa872..d2516063207909 100644 --- a/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/generate_proposals.cpp +++ b/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/generate_proposals.cpp @@ -2,23 +2,22 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "generate_proposals.hpp" +#include "openvino/op/generate_proposals.hpp" +#include "core/operator_set.hpp" #include "openvino/op/broadcast.hpp" #include "openvino/op/concat.hpp" #include "openvino/op/constant.hpp" #include "openvino/op/gather.hpp" -#include "openvino/op/generate_proposals.hpp" #include "openvino/op/shape_of.hpp" - using namespace ov::op; using ov::Shape; namespace ov { namespace frontend { namespace onnx { -namespace op { -namespace set_1 { +namespace org_openvinotoolkit { +namespace opset_1 { namespace { void validate_generate_proposals_inputs(const ov::OutputVector& inputs) { @@ -66,8 +65,10 @@ ov::OutputVector generate_proposals(const ov::frontend::onnx::Node& node) { return proposals->outputs(); } -} // namespace set_1 -} // namespace op + +ONNX_OP("GenerateProposals", OPSET_SINCE(1), org_openvinotoolkit::opset_1::generate_proposals, OPENVINO_ONNX_DOMAIN); +} // namespace opset_1 +} // namespace org_openvinotoolkit } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/generate_proposals.hpp b/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/generate_proposals.hpp deleted file mode 100644 index d3d57ae9280e0d..00000000000000 --- a/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/generate_proposals.hpp +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright (C) 2022 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "core/node.hpp" - -namespace ov { -namespace frontend { -namespace onnx { -namespace op { -namespace set_1 { -ov::OutputVector generate_proposals(const ov::frontend::onnx::Node& node); -} // namespace set_1 -} // namespace op -} // namespace onnx -} // namespace frontend -} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/group_norm.cpp b/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/group_norm.cpp index 123888250f01f7..2b566df53b10e2 100644 --- a/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/group_norm.cpp +++ b/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/group_norm.cpp @@ -2,9 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "op/org.openvinotoolkit/group_norm.hpp" - -#include "core/node.hpp" +#include "core/operator_set.hpp" #include "openvino/frontend/exception.hpp" #include "openvino/op/group_normalization.hpp" #include "openvino/op/squeeze.hpp" @@ -14,8 +12,8 @@ using namespace ov::op; namespace ov { namespace frontend { namespace onnx { -namespace op { -namespace set_1 { +namespace org_openvinotoolkit { +namespace opset_1 { ov::OutputVector group_norm(const ov::frontend::onnx::Node& node) { auto inputs = node.get_ov_inputs(); FRONT_END_GENERAL_CHECK(inputs.size() == 3, @@ -38,8 +36,18 @@ ov::OutputVector group_norm(const ov::frontend::onnx::Node& node) { return {std::make_shared(data, scale, bias, num_groups, eps)}; } -} // namespace set_1 -} // namespace op +static bool register_multiple_translators(void) { + ONNX_OP_M("ExperimentalDetectronGroupNorm", + OPSET_SINCE(1), + org_openvinotoolkit::opset_1::group_norm, + OPENVINO_ONNX_DOMAIN); + ONNX_OP_M("GroupNorm", OPSET_SINCE(1), org_openvinotoolkit::opset_1::group_norm, OPENVINO_ONNX_DOMAIN); + return true; +} + +static bool registered = register_multiple_translators(); +} // namespace opset_1 +} // namespace org_openvinotoolkit } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/group_norm.hpp b/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/group_norm.hpp deleted file mode 100644 index 2cb5e7ee86029c..00000000000000 --- a/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/group_norm.hpp +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright (C) 2018-2024 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "core/node.hpp" - -namespace ov { -namespace frontend { -namespace onnx { -namespace op { -namespace set_1 { -ov::OutputVector group_norm(const ov::frontend::onnx::Node& node); - -} // namespace set_1 -} // namespace op -} // namespace onnx -} // namespace frontend -} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/normalize.cpp b/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/normalize.cpp index e556580401a502..dbcd1f2164d715 100644 --- a/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/normalize.cpp +++ b/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/normalize.cpp @@ -2,8 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "op/org.openvinotoolkit/normalize.hpp" - +#include "core/operator_set.hpp" #include "openvino/frontend/exception.hpp" #include "openvino/op/constant.hpp" #include "openvino/op/multiply.hpp" @@ -11,15 +10,14 @@ #include "openvino/op/reshape.hpp" #include "openvino/op/util/op_types.hpp" #include "utils/common.hpp" - using namespace ov::op; using ov::Shape; namespace ov { namespace frontend { namespace onnx { -namespace op { -namespace set_1 { +namespace org_openvinotoolkit { +namespace opset_1 { ov::OutputVector normalize(const ov::frontend::onnx::Node& node) { auto inputs = node.get_ov_inputs(); FRONT_END_GENERAL_CHECK(inputs.size() == 2, "Invalid number of inputs"); @@ -64,8 +62,9 @@ ov::OutputVector normalize(const ov::frontend::onnx::Node& node) { weights)}; } -} // namespace set_1 -} // namespace op +ONNX_OP("Normalize", OPSET_SINCE(1), org_openvinotoolkit::opset_1::normalize, OPENVINO_ONNX_DOMAIN); +} // namespace opset_1 +} // namespace org_openvinotoolkit } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/normalize.hpp b/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/normalize.hpp deleted file mode 100644 index 3c97654b39c4c6..00000000000000 --- a/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/normalize.hpp +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright (C) 2018-2024 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "core/node.hpp" - -namespace ov { -namespace frontend { -namespace onnx { -namespace op { -namespace set_1 { -ov::OutputVector normalize(const ov::frontend::onnx::Node& node); - -} // namespace set_1 -} // namespace op -} // namespace onnx -} // namespace frontend -} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/prior_box.cpp b/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/prior_box.cpp index 33987df84bcfbd..db39b8a15a13ba 100644 --- a/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/prior_box.cpp +++ b/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/prior_box.cpp @@ -2,13 +2,12 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "op/org.openvinotoolkit/prior_box.hpp" +#include "openvino/op/prior_box.hpp" -#include "core/node.hpp" +#include "core/operator_set.hpp" #include "exceptions.hpp" #include "openvino/frontend/exception.hpp" #include "openvino/op/constant.hpp" -#include "openvino/op/prior_box.hpp" #include "openvino/op/prior_box_clustered.hpp" #include "openvino/op/shape_of.hpp" #include "openvino/op/strided_slice.hpp" @@ -20,7 +19,7 @@ using ov::Shape; namespace ov { namespace frontend { namespace onnx { -namespace op { +namespace org_openvinotoolkit { namespace detail { namespace { std::shared_ptr make_slice(std::shared_ptr node, int64_t start, int64_t end) { @@ -34,7 +33,7 @@ std::shared_ptr make_slice(std::shared_ptr node, int } // namespace } // namespace detail -namespace set_1 { +namespace opset_1 { ov::OutputVector prior_box(const ov::frontend::onnx::Node& node) { auto inputs = node.get_ov_inputs(); FRONT_END_GENERAL_CHECK(inputs.size() == 2, "Invalid number of inputs"); @@ -105,8 +104,18 @@ ov::OutputVector prior_box_clustered(const ov::frontend::onnx::Node& node) { axes)}; } -} // namespace set_1 -} // namespace op +static bool register_multiple_translators(void) { + ONNX_OP_M("PriorBox", OPSET_SINCE(1), org_openvinotoolkit::opset_1::prior_box, OPENVINO_ONNX_DOMAIN); + ONNX_OP_M("PriorBoxClustered", + OPSET_SINCE(1), + org_openvinotoolkit::opset_1::prior_box_clustered, + OPENVINO_ONNX_DOMAIN); + return true; +} + +static bool registered = register_multiple_translators(); +} // namespace opset_1 +} // namespace org_openvinotoolkit } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/prior_box.hpp b/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/prior_box.hpp deleted file mode 100644 index a2cdce564c47f9..00000000000000 --- a/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/prior_box.hpp +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright (C) 2018-2024 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "core/node.hpp" - -namespace ov { -namespace frontend { -namespace onnx { -namespace op { -namespace set_1 { -ov::OutputVector prior_box(const ov::frontend::onnx::Node& node); - -ov::OutputVector prior_box_clustered(const ov::frontend::onnx::Node& node); - -} // namespace set_1 -} // namespace op -} // namespace onnx -} // namespace frontend -} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/swish.cpp b/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/swish.cpp index 0e90eee634ec68..e2003f95c59d44 100644 --- a/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/swish.cpp +++ b/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/swish.cpp @@ -2,21 +2,20 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "op/org.openvinotoolkit/swish.hpp" +#include "openvino/op/swish.hpp" +#include "core/operator_set.hpp" #include "openvino/op/constant.hpp" -#include "openvino/op/swish.hpp" #include "utils/common.hpp" #include "utils/reshape.hpp" - using namespace ov::op; using ov::Shape; namespace ov { namespace frontend { namespace onnx { -namespace op { -namespace set_1 { +namespace org_openvinotoolkit { +namespace opset_1 { ov::OutputVector swish(const ov::frontend::onnx::Node& node) { ov::OutputVector ov_inputs{node.get_ov_inputs()}; @@ -30,8 +29,9 @@ ov::OutputVector swish(const ov::frontend::onnx::Node& node) { return {std::make_shared(ov_inputs.at(0), beta)}; } -} // namespace set_1 -} // namespace op +ONNX_OP("Swish", OPSET_SINCE(1), org_openvinotoolkit::opset_1::swish, OPENVINO_ONNX_DOMAIN); +} // namespace opset_1 +} // namespace org_openvinotoolkit } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/swish.hpp b/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/swish.hpp deleted file mode 100644 index 248176edf65765..00000000000000 --- a/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/swish.hpp +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright (C) 2018-2024 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "core/node.hpp" - -namespace ov { -namespace frontend { -namespace onnx { -namespace op { -namespace set_1 { -ov::OutputVector swish(const ov::frontend::onnx::Node& node); -} // namespace set_1 -} // namespace op -} // namespace onnx -} // namespace frontend -} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/pad.cpp b/src/frontends/onnx/frontend/src/op/pad.cpp index 39c0de731a4039..387d0c96632593 100644 --- a/src/frontends/onnx/frontend/src/op/pad.cpp +++ b/src/frontends/onnx/frontend/src/op/pad.cpp @@ -2,17 +2,16 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "op/pad.hpp" +#include "openvino/op/pad.hpp" #include "core/null_node.hpp" +#include "core/operator_set.hpp" #include "exceptions.hpp" #include "openvino/op/constant.hpp" -#include "openvino/op/pad.hpp" #include "openvino/op/util/op_types.hpp" #include "utils/convpool.hpp" #include "utils/reshape.hpp" #include "utils/split.hpp" - namespace { ov::op::PadMode get_pad_mode(std::string mode) { ov::op::PadMode pad_mode; @@ -35,8 +34,8 @@ using namespace ov::op; namespace ov { namespace frontend { namespace onnx { -namespace op { -namespace set_1 { +namespace ai_onnx { +namespace opset_1 { ov::OutputVector pad(const ov::frontend::onnx::Node& node) { auto data = node.get_ov_inputs().at(0); @@ -60,8 +59,9 @@ ov::OutputVector pad(const ov::frontend::onnx::Node& node) { pad_mode)}; } -} // namespace set_1 -namespace set_11 { +ONNX_OP("Pad", OPSET_RANGE(1, 10), ai_onnx::opset_1::pad); +} // namespace opset_1 +namespace opset_11 { ov::OutputVector pad(const ov::frontend::onnx::Node& node) { const auto inputs = node.get_ov_inputs(); const auto& data = inputs[0]; @@ -99,8 +99,9 @@ ov::OutputVector pad(const ov::frontend::onnx::Node& node) { return {std::make_shared(data, padding_begin, padding_end, values, pad_mode)}; } -} // namespace set_11 -} // namespace op +ONNX_OP("Pad", OPSET_SINCE(11), ai_onnx::opset_11::pad); +} // namespace opset_11 +} // namespace ai_onnx } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/pad.hpp b/src/frontends/onnx/frontend/src/op/pad.hpp deleted file mode 100644 index 8b92aea1e29689..00000000000000 --- a/src/frontends/onnx/frontend/src/op/pad.hpp +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright (C) 2018-2024 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "core/node.hpp" - -namespace ov { -namespace frontend { -namespace onnx { -namespace op { -namespace set_1 { -ov::OutputVector pad(const ov::frontend::onnx::Node& node); - -} // namespace set_1 - -namespace set_11 { -ov::OutputVector pad(const ov::frontend::onnx::Node& node); - -} // namespace set_11 -} // namespace op -} // namespace onnx -} // namespace frontend -} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/pow.cpp b/src/frontends/onnx/frontend/src/op/pow.cpp index ebf54151587fe0..181dedede9eea3 100644 --- a/src/frontends/onnx/frontend/src/op/pow.cpp +++ b/src/frontends/onnx/frontend/src/op/pow.cpp @@ -2,19 +2,17 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "op/pow.hpp" - +#include "core/operator_set.hpp" #include "openvino/frontend/exception.hpp" #include "openvino/op/convert.hpp" #include "openvino/op/power.hpp" - using namespace ov::op; namespace ov { namespace frontend { namespace onnx { -namespace op { -namespace set_1 { +namespace ai_onnx { +namespace opset_1 { ov::OutputVector pow(const ov::frontend::onnx::Node& node) { auto inputs = node.get_ov_inputs(); FRONT_END_GENERAL_CHECK(inputs.size() == 2, "Power operation requires 2 inputs. Got: ", inputs.size()); @@ -35,8 +33,9 @@ ov::OutputVector pow(const ov::frontend::onnx::Node& node) { return {std::make_shared(base, exponent)}; } -} // namespace set_1 -} // namespace op +ONNX_OP("Pow", OPSET_SINCE(1), ai_onnx::opset_1::pow); +} // namespace opset_1 +} // namespace ai_onnx } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/pow.hpp b/src/frontends/onnx/frontend/src/op/pow.hpp deleted file mode 100644 index 78b25611a0e056..00000000000000 --- a/src/frontends/onnx/frontend/src/op/pow.hpp +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright (C) 2018-2024 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "core/node.hpp" - -namespace ov { -namespace frontend { -namespace onnx { -namespace op { -namespace set_1 { -ov::OutputVector pow(const ov::frontend::onnx::Node& node); - -} // namespace set_1 -} // namespace op -} // namespace onnx -} // namespace frontend -} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/prelu.cpp b/src/frontends/onnx/frontend/src/op/prelu.cpp index 818bc7d44422e9..1c7b5b53740479 100644 --- a/src/frontends/onnx/frontend/src/op/prelu.cpp +++ b/src/frontends/onnx/frontend/src/op/prelu.cpp @@ -2,17 +2,16 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "op/prelu.hpp" - #include "openvino/op/prelu.hpp" +#include "core/operator_set.hpp" using namespace ov::op; namespace ov { namespace frontend { namespace onnx { -namespace op { -namespace set_1 { +namespace ai_onnx { +namespace opset_1 { ov::OutputVector prelu(const ov::frontend::onnx::Node& node) { ov::OutputVector ov_inputs{node.get_ov_inputs()}; const auto& data = ov_inputs.at(0); @@ -20,8 +19,9 @@ ov::OutputVector prelu(const ov::frontend::onnx::Node& node) { return {std::make_shared(data, slope)}; } -} // namespace set_1 -} // namespace op +ONNX_OP("PRelu", OPSET_SINCE(1), ai_onnx::opset_1::prelu); +} // namespace opset_1 +} // namespace ai_onnx } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/prelu.hpp b/src/frontends/onnx/frontend/src/op/prelu.hpp deleted file mode 100644 index 6738012c81c03f..00000000000000 --- a/src/frontends/onnx/frontend/src/op/prelu.hpp +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright (C) 2018-2024 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "core/node.hpp" - -namespace ov { -namespace frontend { -namespace onnx { -namespace op { -namespace set_1 { -ov::OutputVector prelu(const ov::frontend::onnx::Node& node); - -} // namespace set_1 -} // namespace op -} // namespace onnx -} // namespace frontend -} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/qlinear_conv.cpp b/src/frontends/onnx/frontend/src/op/qlinear_conv.cpp index 2dc0520e67b91e..8abccd682b7621 100644 --- a/src/frontends/onnx/frontend/src/op/qlinear_conv.cpp +++ b/src/frontends/onnx/frontend/src/op/qlinear_conv.cpp @@ -5,23 +5,44 @@ // Disabled in CMakeList // Update to higher opset required -#include "op/qlinear_conv.hpp" - -#include "conv.hpp" #include "core/null_node.hpp" -#include "dequantize_linear.hpp" +#include "core/operator_set.hpp" #include "exceptions.hpp" #include "openvino/op/convert.hpp" #include "openvino/op/multiply.hpp" -#include "quantize_linear.hpp" using namespace ov::op; namespace ov { namespace frontend { namespace onnx { -namespace op { -namespace set_1 { +namespace ai_onnx { +// Link with an existing translator +namespace opset_13 { +namespace detail { +extern ov::OutputVector dequantize_linear(const ov::Output& x, + const ov::Output& scale, + const std::shared_ptr& zero_point, + int64_t axis, + const Node& node); +} // namespace detail +} // namespace opset_13 +namespace opset_1 { +namespace detail { +ov::OutputVector conv(const ov::frontend::onnx::Node& node, + ov::Output data, + ov::Output filters, + ov::Output bias); +} // namespace detail +} // namespace opset_1 +namespace detail { +extern ov::OutputVector matmul(const ov::Output& a, const ov::Output& b); +extern std::shared_ptr make_fake_quantize(const ov::Output& y_scale, + const ov::Output& y_zero_point, + const ov::Output& data); +} // namespace detail + +namespace opset_1 { ov::OutputVector qlinear_conv(const ov::frontend::onnx::Node& node) { const ov::OutputVector& inputs = node.get_ov_inputs(); @@ -35,16 +56,16 @@ ov::OutputVector qlinear_conv(const ov::frontend::onnx::Node& node) { auto y_zero_point = inputs.at(7); ov::Output B = inputs.size() > 8 ? inputs.at(8) : std::make_shared()->output(0); - x = set_13::detail::dequantize_linear(x, - x_scale, - std::make_shared(x_zero_point, ov::element::f32), - 1, - node)[0]; - w = set_13::detail::dequantize_linear(w, - w_scale, - std::make_shared(w_zero_point, ov::element::f32), - 1, - node)[0]; + x = ai_onnx::opset_13::detail::dequantize_linear(x, + x_scale, + std::make_shared(x_zero_point, ov::element::f32), + 1, + node)[0]; + w = ai_onnx::opset_13::detail::dequantize_linear(w, + w_scale, + std::make_shared(w_zero_point, ov::element::f32), + 1, + node)[0]; if (!ov::op::util::is_null(B)) { B = std::make_shared(std::make_shared(B, x_scale.get_element_type()), @@ -54,13 +75,14 @@ ov::OutputVector qlinear_conv(const ov::frontend::onnx::Node& node) { auto result = detail::conv(node, x, w, B)[0]; - result = op::detail::make_fake_quantize(y_scale, y_zero_point, result); + result = ai_onnx::detail::make_fake_quantize(y_scale, y_zero_point, result); return {result}; } -} // namespace set_1 -} // namespace op +ONNX_OP("QLinearConv", OPSET_SINCE(1), ai_onnx::opset_1::qlinear_conv); +} // namespace opset_1 +} // namespace ai_onnx } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/qlinear_conv.hpp b/src/frontends/onnx/frontend/src/op/qlinear_conv.hpp deleted file mode 100644 index 75f9d36ae50f04..00000000000000 --- a/src/frontends/onnx/frontend/src/op/qlinear_conv.hpp +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright (C) 2018-2024 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -// Disabled in CMakeList -// Update to higher opset required - -#pragma once - -#include "core/node.hpp" - -namespace ov { -namespace frontend { -namespace onnx { -namespace op { -namespace set_1 { -/// \brief Performs ONNX QLinearConv operation. -/// -/// \param node The ONNX node object representing this operation. -/// -/// \return The vector containing OV nodes producing output of ONNX quantizied -/// convolution operation. -ov::OutputVector qlinear_conv(const ov::frontend::onnx::Node& node); - -} // namespace set_1 -} // namespace op -} // namespace onnx -} // namespace frontend -} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/qlinear_matmul.cpp b/src/frontends/onnx/frontend/src/op/qlinear_matmul.cpp index f2ba36e121e176..9d7abc03da308e 100644 --- a/src/frontends/onnx/frontend/src/op/qlinear_matmul.cpp +++ b/src/frontends/onnx/frontend/src/op/qlinear_matmul.cpp @@ -2,21 +2,33 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "op/qlinear_matmul.hpp" - -#include "dequantize_linear.hpp" -#include "matmul.hpp" +#include "core/operator_set.hpp" #include "openvino/op/convert.hpp" -#include "quantize_linear.hpp" #include "utils/reshape.hpp" - using namespace ov::op; namespace ov { namespace frontend { namespace onnx { -namespace op { -namespace set_1 { +namespace ai_onnx { +// Link with an existing translator +namespace opset_13 { +namespace detail { +extern ov::OutputVector dequantize_linear(const ov::Output& x, + const ov::Output& scale, + const std::shared_ptr& zero_point, + int64_t axis, + const Node& node); +} // namespace detail +} // namespace opset_13 +namespace detail { +extern ov::OutputVector matmul(const ov::Output& a, const ov::Output& b); +extern std::shared_ptr make_fake_quantize(const ov::Output& y_scale, + const ov::Output& y_zero_point, + const ov::Output& data); +} // namespace detail + +namespace opset_1 { ov::OutputVector qlinear_matmul(const ov::frontend::onnx::Node& node) { const ov::OutputVector& inputs = node.get_ov_inputs(); @@ -30,26 +42,27 @@ ov::OutputVector qlinear_matmul(const ov::frontend::onnx::Node& node) { const auto& y_zero_point = inputs.at(7); const auto& dequnatize_a = - set_13::detail::dequantize_linear(a, - a_scale, - std::make_shared(a_zero_point, ov::element::f32), - 1, - node); + ai_onnx::opset_13::detail::dequantize_linear(a, + a_scale, + std::make_shared(a_zero_point, ov::element::f32), + 1, + node); const auto& dequnatize_b = - set_13::detail::dequantize_linear(b, - b_scale, - std::make_shared(b_zero_point, ov::element::f32), - 1, - node); + ai_onnx::opset_13::detail::dequantize_linear(b, + b_scale, + std::make_shared(b_zero_point, ov::element::f32), + 1, + node); - const auto& result = op::detail::matmul(dequnatize_a[0], dequnatize_b[0]); + const auto& result = ai_onnx::detail::matmul(dequnatize_a[0], dequnatize_b[0]); - const auto& quantized_result = op::detail::make_fake_quantize(y_scale, y_zero_point, result[0]); + const auto& quantized_result = ai_onnx::detail::make_fake_quantize(y_scale, y_zero_point, result[0]); return {quantized_result}; } -} // namespace set_1 -} // namespace op +ONNX_OP("QLinearMatMul", OPSET_SINCE(1), ai_onnx::opset_1::qlinear_matmul); +} // namespace opset_1 +} // namespace ai_onnx } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/qlinear_matmul.hpp b/src/frontends/onnx/frontend/src/op/qlinear_matmul.hpp deleted file mode 100644 index 3bc05ca94b2e02..00000000000000 --- a/src/frontends/onnx/frontend/src/op/qlinear_matmul.hpp +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright (C) 2018-2024 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "core/node.hpp" - -namespace ov { -namespace frontend { -namespace onnx { -namespace op { -namespace set_1 { -/// \brief Performs ONNX QLinearMatMul operation. -/// -/// \param node The ONNX node object representing this operation. -/// -/// \return The vector containing OV nodes producing output of ONNX quantizied -/// matrix multiplication operation. -ov::OutputVector qlinear_matmul(const ov::frontend::onnx::Node& node); -} // namespace set_1 -} // namespace op -} // namespace onnx -} // namespace frontend -} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/quantize_linear.cpp b/src/frontends/onnx/frontend/src/op/quantize_linear.cpp index 3a0ba924a15bff..6ad5e23bb061c5 100644 --- a/src/frontends/onnx/frontend/src/op/quantize_linear.cpp +++ b/src/frontends/onnx/frontend/src/op/quantize_linear.cpp @@ -2,8 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "op/quantize_linear.hpp" - +#include "core/operator_set.hpp" #include "exceptions.hpp" #include "openvino/core/validation_util.hpp" #include "openvino/frontend/exception.hpp" @@ -13,14 +12,13 @@ #include "openvino/op/multiply.hpp" #include "openvino/op/subtract.hpp" #include "utils/reshape.hpp" - using namespace ov::op; using ov::Shape; namespace ov { namespace frontend { namespace onnx { -namespace op { +namespace ai_onnx { namespace detail { namespace { ov::Output get_zero_point(const ov::OutputVector& inputs) { @@ -138,7 +136,7 @@ std::shared_ptr make_fake_quantize(const ov::Output& y_scale } } // namespace detail -namespace set_1 { +namespace opset_1 { ov::OutputVector quantize_linear(const ov::frontend::onnx::Node& node) { ov::OutputVector inputs{node.get_ov_inputs()}; auto x = inputs.at(0); @@ -151,16 +149,17 @@ ov::OutputVector quantize_linear(const ov::frontend::onnx::Node& node) { return {detail::make_fake_quantize(y_scale, y_zero_point, x)}; } -} // namespace set_1 +ONNX_OP("QuantizeLinear", {1, 12}, ai_onnx::opset_1::quantize_linear); +} // namespace opset_1 -namespace set_13 { -namespace { +namespace opset_13 { +namespace detail { ov::OutputVector quantize_linear(ov::Output x, ov::Output y_scale, ov::Output y_zero_point, int64_t axis, Node node) { - namespace detail = ov::frontend::onnx::op::detail; + namespace detail = ov::frontend::onnx::ai_onnx::detail; x = detail::validate_data(node, x); detail::validate_zero_point_type(node, y_zero_point); @@ -205,7 +204,7 @@ ov::OutputVector quantize_linear(ov::Output x, return {detail::make_fake_quantize(y_scale, y_zero_point, x)}; } -} // namespace +} // namespace detail ov::OutputVector quantize_linear(const ov::frontend::onnx::Node& node) { const ov::OutputVector inputs{node.get_ov_inputs()}; @@ -217,18 +216,19 @@ ov::OutputVector quantize_linear(const ov::frontend::onnx::Node& node) { const auto& x = inputs[0]; const auto& scale = inputs[1]; - const auto zero_point = op::detail::get_zero_point(inputs); + const auto zero_point = ai_onnx::detail::get_zero_point(inputs); // per-tensor quantization, axis attribute ignored if (scale.get_partial_shape().rank().is_static() && scale.get_partial_shape().rank().get_length() == 0 && zero_point.get_partial_shape().rank().is_static() && zero_point.get_partial_shape().rank().get_length() == 0) { - return set_1::quantize_linear(node); + return ai_onnx::opset_1::quantize_linear(node); } - return quantize_linear(x, scale, zero_point, node.get_attribute_value("axis", 1), node); + return detail::quantize_linear(x, scale, zero_point, node.get_attribute_value("axis", 1), node); } -} // namespace set_13 -} // namespace op +ONNX_OP("QuantizeLinear", OPSET_SINCE(13), ai_onnx::opset_13::quantize_linear); +} // namespace opset_13 +} // namespace ai_onnx } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/quantize_linear.hpp b/src/frontends/onnx/frontend/src/op/quantize_linear.hpp deleted file mode 100644 index 7d31a8c390baee..00000000000000 --- a/src/frontends/onnx/frontend/src/op/quantize_linear.hpp +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright (C) 2018-2024 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "core/node.hpp" -#include "openvino/core/node.hpp" - -namespace ov { -namespace frontend { -namespace onnx { -namespace op { -namespace detail { -std::shared_ptr make_fake_quantize(const ov::Output& y_scale, - const ov::Output& y_zero_point, - const ov::Output& data); -} -namespace set_1 { -ov::OutputVector quantize_linear(const ov::frontend::onnx::Node& node); - -} // namespace set_1 - -namespace set_13 { - -ov::OutputVector quantize_linear(const ov::frontend::onnx::Node& node); - -} // namespace set_13 -} // namespace op -} // namespace onnx -} // namespace frontend -} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/random_normal.cpp b/src/frontends/onnx/frontend/src/op/random_normal.cpp index 8dde43a12fde30..6d9b95701b76d9 100644 --- a/src/frontends/onnx/frontend/src/op/random_normal.cpp +++ b/src/frontends/onnx/frontend/src/op/random_normal.cpp @@ -2,13 +2,11 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "op/random_normal.hpp" - +#include "core/operator_set.hpp" #include "exceptions.hpp" #include "openvino/frontend/common/random_normal_helper.hpp" #include "openvino/op/constant.hpp" #include "utils/common.hpp" - using namespace ov::op; using ::ONNX_NAMESPACE::TensorProto_DataType; using ov::Shape; @@ -16,8 +14,8 @@ using ov::Shape; namespace ov { namespace frontend { namespace onnx { -namespace op { -namespace set_1 { +namespace ai_onnx { +namespace opset_1 { ov::OutputVector random_normal(const ov::frontend::onnx::Node& node) { CHECK_VALID_NODE(node, node.has_attribute("shape"), "RandomNormal operator must specify a 'shape' attribute."); @@ -38,8 +36,9 @@ ov::OutputVector random_normal(const ov::frontend::onnx::Node& node) { return res.first; } -} // namespace set_1 -} // namespace op +ONNX_OP("RandomNormal", OPSET_SINCE(1), ai_onnx::opset_1::random_normal); +} // namespace opset_1 +} // namespace ai_onnx } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/random_normal.hpp b/src/frontends/onnx/frontend/src/op/random_normal.hpp deleted file mode 100644 index 68a11d40f254c3..00000000000000 --- a/src/frontends/onnx/frontend/src/op/random_normal.hpp +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright (C) 2018-2024 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "core/node.hpp" - -namespace ov { -namespace frontend { -namespace onnx { -namespace op { -namespace set_1 { - -ov::OutputVector random_normal(const ov::frontend::onnx::Node& node); - -} // namespace set_1 -} // namespace op -} // namespace onnx -} // namespace frontend -} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/random_normal_like.cpp b/src/frontends/onnx/frontend/src/op/random_normal_like.cpp index b600469ede33da..641320da44dc3b 100644 --- a/src/frontends/onnx/frontend/src/op/random_normal_like.cpp +++ b/src/frontends/onnx/frontend/src/op/random_normal_like.cpp @@ -2,20 +2,18 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "op/random_normal_like.hpp" - +#include "core/operator_set.hpp" #include "openvino/frontend/common/random_normal_helper.hpp" #include "openvino/op/shape_of.hpp" #include "utils/common.hpp" - using namespace ov::op; using ov::Shape; namespace ov { namespace frontend { namespace onnx { -namespace op { -namespace set_1 { +namespace ai_onnx { +namespace opset_1 { ov::OutputVector random_normal_like(const ov::frontend::onnx::Node& node) { const auto input = node.get_ov_inputs().at(0); @@ -40,8 +38,9 @@ ov::OutputVector random_normal_like(const ov::frontend::onnx::Node& node) { return res.first; } -} // namespace set_1 -} // namespace op +ONNX_OP("RandomNormalLike", OPSET_SINCE(1), ai_onnx::opset_1::random_normal_like); +} // namespace opset_1 +} // namespace ai_onnx } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/random_normal_like.hpp b/src/frontends/onnx/frontend/src/op/random_normal_like.hpp deleted file mode 100644 index 9701331c32e681..00000000000000 --- a/src/frontends/onnx/frontend/src/op/random_normal_like.hpp +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright (C) 2018-2024 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "core/node.hpp" - -namespace ov { -namespace frontend { -namespace onnx { -namespace op { -namespace set_1 { - -ov::OutputVector random_normal_like(const ov::frontend::onnx::Node& node); - -} // namespace set_1 -} // namespace op -} // namespace onnx -} // namespace frontend -} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/random_uniform.cpp b/src/frontends/onnx/frontend/src/op/random_uniform.cpp index b078727ceafd07..c200bec96314e6 100644 --- a/src/frontends/onnx/frontend/src/op/random_uniform.cpp +++ b/src/frontends/onnx/frontend/src/op/random_uniform.cpp @@ -2,20 +2,19 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "op/random_uniform.hpp" +#include "openvino/op/random_uniform.hpp" +#include "core/operator_set.hpp" #include "exceptions.hpp" -#include "openvino/op/random_uniform.hpp" #include "utils/common.hpp" - using namespace ov::op; using ::ONNX_NAMESPACE::TensorProto_DataType; namespace ov { namespace frontend { namespace onnx { -namespace op { -namespace set_1 { +namespace ai_onnx { +namespace opset_1 { ov::OutputVector random_uniform(const ov::frontend::onnx::Node& node) { CHECK_VALID_NODE(node, node.has_attribute("shape"), "RandomUniform operator must specify a 'shape' attribute."); @@ -35,8 +34,9 @@ ov::OutputVector random_uniform(const ov::frontend::onnx::Node& node) { std::make_shared(target_shape_const, low_const, high_const, target_type, global_seed, seed)}; } -} // namespace set_1 -} // namespace op +ONNX_OP("RandomUniform", OPSET_SINCE(1), ai_onnx::opset_1::random_uniform); +} // namespace opset_1 +} // namespace ai_onnx } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/random_uniform.hpp b/src/frontends/onnx/frontend/src/op/random_uniform.hpp deleted file mode 100644 index fd9890494e582a..00000000000000 --- a/src/frontends/onnx/frontend/src/op/random_uniform.hpp +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright (C) 2018-2024 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "core/node.hpp" - -namespace ov { -namespace frontend { -namespace onnx { -namespace op { -namespace set_1 { - -ov::OutputVector random_uniform(const ov::frontend::onnx::Node& node); - -} // namespace set_1 -} // namespace op -} // namespace onnx -} // namespace frontend -} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/random_uniform_like.cpp b/src/frontends/onnx/frontend/src/op/random_uniform_like.cpp index b80e93fce99c1b..7fbeffb0bd1861 100644 --- a/src/frontends/onnx/frontend/src/op/random_uniform_like.cpp +++ b/src/frontends/onnx/frontend/src/op/random_uniform_like.cpp @@ -2,20 +2,18 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "op/random_uniform_like.hpp" - +#include "core/operator_set.hpp" #include "exceptions.hpp" #include "openvino/op/random_uniform.hpp" #include "openvino/op/shape_of.hpp" #include "utils/common.hpp" - using namespace ov::op; namespace ov { namespace frontend { namespace onnx { -namespace op { -namespace set_1 { +namespace ai_onnx { +namespace opset_1 { ov::OutputVector random_uniform_like(const ov::frontend::onnx::Node& node) { ov::OutputVector inputs{node.get_ov_inputs()}; @@ -40,8 +38,9 @@ ov::OutputVector random_uniform_like(const ov::frontend::onnx::Node& node) { return {std::make_shared(target_shape, low_const, high_const, target_type, global_seed, seed)}; } -} // namespace set_1 -} // namespace op +ONNX_OP("RandomUniformLike", OPSET_SINCE(1), ai_onnx::opset_1::random_uniform_like); +} // namespace opset_1 +} // namespace ai_onnx } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/random_uniform_like.hpp b/src/frontends/onnx/frontend/src/op/random_uniform_like.hpp deleted file mode 100644 index 7bfb871f93aea7..00000000000000 --- a/src/frontends/onnx/frontend/src/op/random_uniform_like.hpp +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright (C) 2018-2024 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "core/node.hpp" - -namespace ov { -namespace frontend { -namespace onnx { -namespace op { -namespace set_1 { - -ov::OutputVector random_uniform_like(const ov::frontend::onnx::Node& node); - -} // namespace set_1 -} // namespace op -} // namespace onnx -} // namespace frontend -} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/range.cpp b/src/frontends/onnx/frontend/src/op/range.cpp index 4166c12f445dd2..e02ee12667c851 100644 --- a/src/frontends/onnx/frontend/src/op/range.cpp +++ b/src/frontends/onnx/frontend/src/op/range.cpp @@ -2,19 +2,18 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "op/range.hpp" +#include "openvino/op/range.hpp" +#include "core/operator_set.hpp" #include "exceptions.hpp" -#include "openvino/op/range.hpp" #include "openvino/op/squeeze.hpp" - using namespace ov::op; namespace ov { namespace frontend { namespace onnx { -namespace op { -namespace set_1 { +namespace ai_onnx { +namespace opset_1 { ov::OutputVector range(const ov::frontend::onnx::Node& node) { const auto inputs = node.get_ov_inputs(); CHECK_VALID_NODE(node, inputs.size() >= 3, "Minimum 3 inputs are required. Got: ", inputs.size()); @@ -41,8 +40,9 @@ ov::OutputVector range(const ov::frontend::onnx::Node& node) { return {std::make_shared(start, stop, step, start.get_element_type())}; } -} // namespace set_1 -} // namespace op +ONNX_OP("Range", OPSET_SINCE(1), ai_onnx::opset_1::range); +} // namespace opset_1 +} // namespace ai_onnx } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/range.hpp b/src/frontends/onnx/frontend/src/op/range.hpp deleted file mode 100644 index cc009f606fe6ec..00000000000000 --- a/src/frontends/onnx/frontend/src/op/range.hpp +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright (C) 2018-2024 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "core/node.hpp" - -namespace ov { -namespace frontend { -namespace onnx { -namespace op { -namespace set_1 { -ov::OutputVector range(const ov::frontend::onnx::Node& node); - -} // namespace set_1 -} // namespace op -} // namespace onnx -} // namespace frontend -} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/reciprocal.cpp b/src/frontends/onnx/frontend/src/op/reciprocal.cpp index e9d275c72bfa81..d72bbcb6fff19d 100644 --- a/src/frontends/onnx/frontend/src/op/reciprocal.cpp +++ b/src/frontends/onnx/frontend/src/op/reciprocal.cpp @@ -2,19 +2,17 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "op/reciprocal.hpp" - +#include "core/operator_set.hpp" #include "openvino/op/constant.hpp" #include "openvino/op/divide.hpp" - using namespace ov::op; using ov::Shape; namespace ov { namespace frontend { namespace onnx { -namespace op { -namespace set_1 { +namespace ai_onnx { +namespace opset_1 { ov::OutputVector reciprocal(const ov::frontend::onnx::Node& node) { auto data = node.get_ov_inputs().at(0); @@ -22,8 +20,9 @@ ov::OutputVector reciprocal(const ov::frontend::onnx::Node& node) { return {std::make_shared(one_node, data)}; } -} // namespace set_1 -} // namespace op +ONNX_OP("Reciprocal", OPSET_SINCE(1), ai_onnx::opset_1::reciprocal); +} // namespace opset_1 +} // namespace ai_onnx } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/reciprocal.hpp b/src/frontends/onnx/frontend/src/op/reciprocal.hpp deleted file mode 100644 index efa6f6fca8d9f1..00000000000000 --- a/src/frontends/onnx/frontend/src/op/reciprocal.hpp +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright (C) 2018-2024 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "core/node.hpp" - -namespace ov { -namespace frontend { -namespace onnx { -namespace op { -namespace set_1 { -ov::OutputVector reciprocal(const ov::frontend::onnx::Node& node); - -} // namespace set_1 -} // namespace op -} // namespace onnx -} // namespace frontend -} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/reduce.cpp b/src/frontends/onnx/frontend/src/op/reduce.cpp index d0525d06f8157b..59014121ef4018 100644 --- a/src/frontends/onnx/frontend/src/op/reduce.cpp +++ b/src/frontends/onnx/frontend/src/op/reduce.cpp @@ -2,10 +2,8 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "op/reduce.hpp" - +#include "core/operator_set.hpp" #include "exceptions.hpp" -#include "identity.hpp" #include "openvino/frontend/exception.hpp" #include "openvino/op/constant.hpp" #include "openvino/op/convert.hpp" @@ -30,7 +28,13 @@ using ov::Shape; namespace ov { namespace frontend { namespace onnx { -namespace op { +namespace ai_onnx { + +// Link with an existing translator +namespace opset_1 { +extern ov::OutputVector identity(const ov::frontend::onnx::Node& node); +} // namespace opset_1 + namespace { std::shared_ptr get_dynamic_all_axes_range(const Node& node) { const auto input = node.get_ov_inputs().at(0); @@ -133,7 +137,7 @@ std::shared_ptr make_ov_reduction_op(const Node& node, if (reduction_axes != nullptr) { return std::make_shared(ov_input, reduction_axes, static_cast(keepdims)); } else { - return set_1::identity(node).at(0).get_node_shared_ptr(); + return ai_onnx::opset_1::identity(node).at(0).get_node_shared_ptr(); } } @@ -146,7 +150,7 @@ std::shared_ptr onnx_reduce_sum_square(const ov::frontend::onnx::Node& } } // namespace -namespace set_1 { +namespace opset_1 { ov::OutputVector reduce_log_sum(const ov::frontend::onnx::Node& node) { const ov::Output sum_node = make_ov_reduction_op(node, node.get_ov_inputs().at(0), supported_types_v2); @@ -190,7 +194,23 @@ ov::OutputVector reduce_sum(const ov::frontend::onnx::Node& node) { ov::OutputVector reduce_sum_square(const ov::frontend::onnx::Node& node) { return {onnx_reduce_sum_square(node, supported_types_v1)}; } -} // namespace set_1 + +static bool register_multiple_translators(void) { + ONNX_OP_M("ReduceLogSum", OPSET_RANGE(1, 12), ai_onnx::opset_1::reduce_log_sum); + ONNX_OP_M("ReduceLogSumExp", OPSET_RANGE(1, 12), ai_onnx::opset_1::reduce_log_sum_exp); + ONNX_OP_M("ReduceL1", OPSET_RANGE(1, 12), ai_onnx::opset_1::reduce_l1); + ONNX_OP_M("ReduceL2", OPSET_RANGE(1, 12), ai_onnx::opset_1::reduce_l2); + ONNX_OP_M("ReduceMax", OPSET_RANGE(1, 12), ai_onnx::opset_1::reduce_max); + ONNX_OP_M("ReduceMean", OPSET_RANGE(1, 12), ai_onnx::opset_1::reduce_mean); + ONNX_OP_M("ReduceMin", {1, 12}, ai_onnx::opset_1::reduce_min); + ONNX_OP_M("ReduceProd", OPSET_RANGE(1, 12), ai_onnx::opset_1::reduce_prod); + ONNX_OP_M("ReduceSum", OPSET_RANGE(1, 12), ai_onnx::opset_1::reduce_sum); + ONNX_OP_M("ReduceSumSquare", OPSET_RANGE(1, 12), ai_onnx::opset_1::reduce_sum_square); + return true; +} + +static bool registered = register_multiple_translators(); +} // namespace opset_1 /* Opset 11 is skipped because there are no significant difference between opset1 and opset 11. @@ -201,7 +221,7 @@ ov::OutputVector reduce_sum_square(const ov::frontend::onnx::Node& node) { Same time Reduce* operations in OpenVINO has same requirement from first version */ -namespace set_13 { +namespace opset_13 { ov::OutputVector reduce_sum(const ov::frontend::onnx::Node& node) { return {make_ov_reduction_op(node, node.get_ov_inputs().at(0), supported_types_v2, false)}; } @@ -213,9 +233,11 @@ ov::OutputVector reduce_l2(const Node& node) { ov::OutputVector reduce_max(const ov::frontend::onnx::Node& node) { return {make_ov_reduction_op(node, node.get_ov_inputs().at(0), supported_types_v3)}; } + ov::OutputVector reduce_mean(const ov::frontend::onnx::Node& node) { return {make_ov_reduction_op(node, node.get_ov_inputs().at(0), supported_types_v2)}; } + ov::OutputVector reduce_min(const ov::frontend::onnx::Node& node) { return {make_ov_reduction_op(node, node.get_ov_inputs().at(0), supported_types_v3)}; } @@ -223,21 +245,37 @@ ov::OutputVector reduce_min(const ov::frontend::onnx::Node& node) { ov::OutputVector reduce_sum_square(const ov::frontend::onnx::Node& node) { return {onnx_reduce_sum_square(node, supported_types_v2)}; } -} // namespace set_13 -namespace set_18 { +static bool register_multiple_translators(void) { + ONNX_OP_M("ReduceL2", OPSET_RANGE(13, 17), ai_onnx::opset_13::reduce_l2); + ONNX_OP_M("ReduceMax", OPSET_RANGE(13, 17), ai_onnx::opset_13::reduce_max); + ONNX_OP_M("ReduceMean", OPSET_RANGE(13, 17), ai_onnx::opset_13::reduce_mean); + ONNX_OP_M("ReduceMin", {13, 17}, ai_onnx::opset_13::reduce_min); + ONNX_OP_M("ReduceSum", OPSET_RANGE(13, 17), ai_onnx::opset_13::reduce_sum); + ONNX_OP_M("ReduceSumSquare", OPSET_RANGE(13, 17), ai_onnx::opset_13::reduce_sum_square); + return true; +} + +static bool registered = register_multiple_translators(); +} // namespace opset_13 + +namespace opset_18 { ov::OutputVector reduce_l2(const Node& node) { return {make_ov_reduction_op(node, node.get_ov_inputs().at(0), supported_types_v2, false)}; } + ov::OutputVector reduce_max(const ov::frontend::onnx::Node& node) { return {make_ov_reduction_op(node, node.get_ov_inputs().at(0), supported_types_v3, false)}; } + ov::OutputVector reduce_mean(const ov::frontend::onnx::Node& node) { return {make_ov_reduction_op(node, node.get_ov_inputs().at(0), supported_types_v3, false)}; } + ov::OutputVector reduce_min(const ov::frontend::onnx::Node& node) { return {make_ov_reduction_op(node, node.get_ov_inputs().at(0), supported_types_v3, false)}; } + ov::OutputVector reduce_log_sum(const ov::frontend::onnx::Node& node) { const ov::Output sum_node = make_ov_reduction_op(node, node.get_ov_inputs().at(0), supported_types_v2, false); @@ -247,9 +285,21 @@ ov::OutputVector reduce_log_sum(const ov::frontend::onnx::Node& node) { ov::OutputVector reduce_sum_square(const ov::frontend::onnx::Node& node) { return {onnx_reduce_sum_square(node, supported_types_v2, false)}; } -} // namespace set_18 -namespace set_20 { +static bool register_multiple_translators(void) { + ONNX_OP_M("ReduceLogSum", OPSET_SINCE(18), ai_onnx::opset_18::reduce_log_sum); + ONNX_OP_M("ReduceL2", OPSET_SINCE(18), ai_onnx::opset_18::reduce_l2); + ONNX_OP_M("ReduceMax", OPSET_RANGE(18, 19), ai_onnx::opset_18::reduce_max); + ONNX_OP_M("ReduceMean", OPSET_SINCE(18), ai_onnx::opset_18::reduce_mean); + ONNX_OP_M("ReduceMin", {18, 19}, ai_onnx::opset_18::reduce_min); + ONNX_OP_M("ReduceSumSquare", OPSET_SINCE(18), ai_onnx::opset_18::reduce_sum_square); + return true; +} + +static bool registered = register_multiple_translators(); +} // namespace opset_18 + +namespace opset_20 { ov::OutputVector reduce_max(const ov::frontend::onnx::Node& node) { auto data = node.get_ov_inputs().at(0); if (data.get_element_type() != element::boolean) { @@ -279,8 +329,16 @@ ov::OutputVector reduce_min(const ov::frontend::onnx::Node& node) { element::boolean)}; } } -} // namespace set_20 -} // namespace op + +static bool register_multiple_translators(void) { + ONNX_OP_M("ReduceMax", OPSET_SINCE(20), ai_onnx::opset_20::reduce_max); + ONNX_OP_M("ReduceMin", OPSET_SINCE(20), ai_onnx::opset_20::reduce_min); + return true; +} + +static bool registered = register_multiple_translators(); +} // namespace opset_20 +} // namespace ai_onnx } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/reduce.hpp b/src/frontends/onnx/frontend/src/op/reduce.hpp deleted file mode 100644 index 7ad4693a12e6b6..00000000000000 --- a/src/frontends/onnx/frontend/src/op/reduce.hpp +++ /dev/null @@ -1,98 +0,0 @@ -// Copyright (C) 2018-2024 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "core/node.hpp" - -namespace ov { -namespace frontend { -namespace onnx { -namespace op { -namespace set_1 { -ov::OutputVector reduce_log_sum(const ov::frontend::onnx::Node& node); -} // namespace set_1 -namespace set_18 { -ov::OutputVector reduce_log_sum(const ov::frontend::onnx::Node& node); -} // namespace set_18 - -namespace set_1 { -ov::OutputVector reduce_log_sum_exp(const ov::frontend::onnx::Node& node); -} // namespace set_1 - -namespace set_1 { -ov::OutputVector reduce_l1(const ov::frontend::onnx::Node& node); -} // namespace set_1 - -namespace set_1 { -ov::OutputVector reduce_l2(const ov::frontend::onnx::Node& node); -} // namespace set_1 -namespace set_13 { -ov::OutputVector reduce_l2(const ov::frontend::onnx::Node& node); -} // namespace set_13 -namespace set_18 { -ov::OutputVector reduce_l2(const ov::frontend::onnx::Node& node); -} // namespace set_18 - -namespace set_1 { -ov::OutputVector reduce_max(const ov::frontend::onnx::Node& node); -} // namespace set_1 -namespace set_13 { -ov::OutputVector reduce_max(const ov::frontend::onnx::Node& node); -} // namespace set_13 -namespace set_18 { -ov::OutputVector reduce_max(const ov::frontend::onnx::Node& node); -} // namespace set_18 -namespace set_20 { -ov::OutputVector reduce_max(const ov::frontend::onnx::Node& node); -} // namespace set_20 - -namespace set_1 { -ov::OutputVector reduce_mean(const ov::frontend::onnx::Node& node); -} // namespace set_1 -namespace set_13 { -ov::OutputVector reduce_mean(const ov::frontend::onnx::Node& node); -} // namespace set_13 -namespace set_18 { -ov::OutputVector reduce_mean(const ov::frontend::onnx::Node& node); -} // namespace set_18 - -namespace set_1 { -ov::OutputVector reduce_min(const ov::frontend::onnx::Node& node); -} // namespace set_1 -namespace set_13 { -ov::OutputVector reduce_min(const ov::frontend::onnx::Node& node); -} // namespace set_13 -namespace set_18 { -ov::OutputVector reduce_min(const ov::frontend::onnx::Node& node); -} // namespace set_18 -namespace set_20 { -ov::OutputVector reduce_min(const ov::frontend::onnx::Node& node); -} // namespace set_20 - -namespace set_1 { -ov::OutputVector reduce_prod(const ov::frontend::onnx::Node& node); -} // namespace set_1 - -namespace set_1 { -ov::OutputVector reduce_sum(const ov::frontend::onnx::Node& node); -} // namespace set_1 -namespace set_13 { -ov::OutputVector reduce_sum(const ov::frontend::onnx::Node& node); -} // namespace set_13 - -namespace set_1 { -ov::OutputVector reduce_sum_square(const ov::frontend::onnx::Node& node); -} // namespace set_1 -namespace set_13 { -ov::OutputVector reduce_sum_square(const ov::frontend::onnx::Node& node); -} // namespace set_13 -namespace set_18 { -ov::OutputVector reduce_sum_square(const ov::frontend::onnx::Node& node); -} // namespace set_18 - -} // namespace op -} // namespace onnx -} // namespace frontend -} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/relu.hpp b/src/frontends/onnx/frontend/src/op/relu.cpp similarity index 60% rename from src/frontends/onnx/frontend/src/op/relu.hpp rename to src/frontends/onnx/frontend/src/op/relu.cpp index f1e3753033d2df..3f22e4052a3724 100644 --- a/src/frontends/onnx/frontend/src/op/relu.hpp +++ b/src/frontends/onnx/frontend/src/op/relu.cpp @@ -2,23 +2,23 @@ // SPDX-License-Identifier: Apache-2.0 // -#pragma once - -#include "core/node.hpp" #include "openvino/op/relu.hpp" +#include "core/operator_set.hpp" + namespace ov { namespace frontend { namespace onnx { -namespace op { -namespace set_1 { -inline ov::OutputVector relu(const ov::frontend::onnx::Node& node) { +namespace ai_onnx { +namespace opset_1 { +ov::OutputVector relu(const ov::frontend::onnx::Node& node) { ov::OutputVector ov_inputs{node.get_ov_inputs()}; return {std::make_shared(ov_inputs.at(0))}; } -} // namespace set_1 -} // namespace op +ONNX_OP("Relu", OPSET_SINCE(1), ai_onnx::opset_1::relu); +} // namespace opset_1 +} // namespace ai_onnx } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/reshape.cpp b/src/frontends/onnx/frontend/src/op/reshape.cpp index 6b5897ac2a6321..875dceeeacf1f3 100644 --- a/src/frontends/onnx/frontend/src/op/reshape.cpp +++ b/src/frontends/onnx/frontend/src/op/reshape.cpp @@ -2,19 +2,18 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "op/reshape.hpp" +#include "openvino/op/reshape.hpp" +#include "core/operator_set.hpp" #include "exceptions.hpp" -#include "openvino/op/reshape.hpp" #include "utils/reshape.hpp" - using namespace ov::op; namespace ov { namespace frontend { namespace onnx { -namespace op { -namespace set_1 { +namespace ai_onnx { +namespace opset_1 { ov::OutputVector reshape(const ov::frontend::onnx::Node& node) { ov::OutputVector ov_inputs{node.get_ov_inputs()}; const auto data = ov_inputs.at(0); @@ -34,8 +33,9 @@ ov::OutputVector reshape(const ov::frontend::onnx::Node& node) { return {std::make_shared(data, pattern, special_zero)}; } -} // namespace set_1 -} // namespace op +ONNX_OP("Reshape", OPSET_SINCE(1), ai_onnx::opset_1::reshape); +} // namespace opset_1 +} // namespace ai_onnx } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/reshape.hpp b/src/frontends/onnx/frontend/src/op/reshape.hpp deleted file mode 100644 index 57720b49329589..00000000000000 --- a/src/frontends/onnx/frontend/src/op/reshape.hpp +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (C) 2018-2024 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "core/node.hpp" - -namespace ov { -namespace frontend { -namespace onnx { -namespace op { -namespace set_1 { -/// -/// \brief Reshape the input tensor similar to numpy.reshape. -/// -/// \param[in] node The ONNX node representing this operation. -/// -/// \return OV node representing this operation. -/// -ov::OutputVector reshape(const ov::frontend::onnx::Node& node); - -} // namespace set_1 -} // namespace op -} // namespace onnx -} // namespace frontend -} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/resize.cpp b/src/frontends/onnx/frontend/src/op/resize.cpp index ea78c4c9dd8cba..8c780add333f78 100644 --- a/src/frontends/onnx/frontend/src/op/resize.cpp +++ b/src/frontends/onnx/frontend/src/op/resize.cpp @@ -2,19 +2,17 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "op/resize.hpp" - #include "core/null_node.hpp" +#include "core/operator_set.hpp" #include "exceptions.hpp" #include "openvino/op/interpolate.hpp" #include "utils/common.hpp" - using namespace ov::op; namespace ov { namespace frontend { namespace onnx { -namespace op { +namespace ai_onnx { namespace { static const std::unordered_set supported_modes = {"nearest", "linear", "cubic"}; @@ -111,7 +109,7 @@ InterpolateAttrs get_resize_attrs(const ov::frontend::onnx::Node& node) { } } // namespace -namespace set_11 { +namespace opset_11 { ov::OutputVector resize(const ov::frontend::onnx::Node& node) { // roi input (inputs.at(2)) is ignored because it is used only // in "tf_crop_and_resize" which is not handled now @@ -130,9 +128,10 @@ ov::OutputVector resize(const ov::frontend::onnx::Node& node) { return {std::make_shared(data, scales, attrs)}; } } -} // namespace set_11 +ONNX_OP("Resize", OPSET_SINCE(11), ai_onnx::opset_11::resize); +} // namespace opset_11 -namespace set_1 { +namespace opset_1 { ov::OutputVector resize(const ov::frontend::onnx::Node& node) { const auto inputs = node.get_ov_inputs(); const auto& data = inputs.at(0); @@ -150,8 +149,9 @@ ov::OutputVector resize(const ov::frontend::onnx::Node& node) { return {std::make_shared(data, scales, attrs)}; } -} // namespace set_1 -} // namespace op +ONNX_OP("Resize", OPSET_RANGE(1, 10), ai_onnx::opset_1::resize); +} // namespace opset_1 +} // namespace ai_onnx } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/resize.hpp b/src/frontends/onnx/frontend/src/op/resize.hpp deleted file mode 100644 index d51f5466c6fe99..00000000000000 --- a/src/frontends/onnx/frontend/src/op/resize.hpp +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright (C) 2018-2024 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "core/node.hpp" - -namespace ov { -namespace frontend { -namespace onnx { -namespace op { -namespace set_1 { -ov::OutputVector resize(const ov::frontend::onnx::Node& node); - -} // namespace set_1 - -namespace set_11 { -ov::OutputVector resize(const ov::frontend::onnx::Node& node); -} - -} // namespace op -} // namespace onnx -} // namespace frontend -} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/reverse_sequence.cpp b/src/frontends/onnx/frontend/src/op/reverse_sequence.cpp index 3fcdc575b64a21..958dae457bf193 100644 --- a/src/frontends/onnx/frontend/src/op/reverse_sequence.cpp +++ b/src/frontends/onnx/frontend/src/op/reverse_sequence.cpp @@ -2,22 +2,21 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "op/reverse_sequence.hpp" +#include "openvino/op/reverse_sequence.hpp" -#include "core/node.hpp" +#include "core/operator_set.hpp" #include "openvino/core/type/element_type.hpp" #include "openvino/core/validation_util.hpp" #include "openvino/frontend/exception.hpp" #include "openvino/op/convert.hpp" -#include "openvino/op/reverse_sequence.hpp" using namespace ov::op; namespace ov { namespace frontend { namespace onnx { -namespace op { -namespace set_1 { +namespace ai_onnx { +namespace opset_1 { ov::OutputVector reverse_sequence(const ov::frontend::onnx::Node& node) { const auto data = node.get_ov_inputs().at(0); @@ -47,8 +46,9 @@ ov::OutputVector reverse_sequence(const ov::frontend::onnx::Node& node) { std::make_shared(data, sequence_lengths_i32, normalized_batch_axis, normalized_time_axis)}; } -} // namespace set_1 -} // namespace op +ONNX_OP("ReverseSequence", OPSET_SINCE(1), ai_onnx::opset_1::reverse_sequence); +} // namespace opset_1 +} // namespace ai_onnx } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/reverse_sequence.hpp b/src/frontends/onnx/frontend/src/op/reverse_sequence.hpp deleted file mode 100644 index 4f8a652617eed6..00000000000000 --- a/src/frontends/onnx/frontend/src/op/reverse_sequence.hpp +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright (C) 2018-2024 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "core/node.hpp" - -namespace ov { -namespace frontend { -namespace onnx { -namespace op { -namespace set_1 { -ov::OutputVector reverse_sequence(const ov::frontend::onnx::Node& node); - -} // namespace set_1 -} // namespace op -} // namespace onnx -} // namespace frontend -} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/rnn.cpp b/src/frontends/onnx/frontend/src/op/rnn.cpp index e9bb732bcbf4dd..7cb04e8132a355 100644 --- a/src/frontends/onnx/frontend/src/op/rnn.cpp +++ b/src/frontends/onnx/frontend/src/op/rnn.cpp @@ -2,19 +2,17 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "op/rnn.hpp" - +#include "core/operator_set.hpp" #include "openvino/op/rnn_sequence.hpp" #include "utils/recurrent.hpp" #include "utils/reshape.hpp" - using namespace ov::op; namespace ov { namespace frontend { namespace onnx { -namespace op { -namespace set_1 { +namespace ai_onnx { +namespace opset_1 { namespace { struct RNNInputMap : public recurrent::OpInputMap { RNNInputMap(const ov::frontend::onnx::Node& node, std::size_t gates_count) : OpInputMap(node, gates_count) {} @@ -52,8 +50,9 @@ ov::OutputVector rnn(const ov::frontend::onnx::Node& node) { return {ov::op::util::reorder_axes(Y, {2, 1, 0, 3}), ov::op::util::reorder_axes(Y_h, {1, 0, 2})}; } -} // namespace set_1 -} // namespace op +ONNX_OP("RNN", OPSET_SINCE(1), ai_onnx::opset_1::rnn); +} // namespace opset_1 +} // namespace ai_onnx } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/rnn.hpp b/src/frontends/onnx/frontend/src/op/rnn.hpp deleted file mode 100644 index b61b6195115f25..00000000000000 --- a/src/frontends/onnx/frontend/src/op/rnn.hpp +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright (C) 2018-2024 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "core/node.hpp" - -namespace ov { -namespace frontend { -namespace onnx { -namespace op { -namespace set_1 { -ov::OutputVector rnn(const ov::frontend::onnx::Node& node); - -} // namespace set_1 -} // namespace op -} // namespace onnx -} // namespace frontend -} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/roi_align.cpp b/src/frontends/onnx/frontend/src/op/roi_align.cpp index a65f58460acc95..5ffe6794902ec1 100644 --- a/src/frontends/onnx/frontend/src/op/roi_align.cpp +++ b/src/frontends/onnx/frontend/src/op/roi_align.cpp @@ -2,18 +2,17 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "op/roi_align.hpp" - -#include "openvino/frontend/exception.hpp" #include "openvino/op/roi_align.hpp" +#include "core/operator_set.hpp" +#include "openvino/frontend/exception.hpp" using namespace ov::op; namespace ov { namespace frontend { namespace onnx { -namespace op { -namespace set_1 { +namespace ai_onnx { +namespace opset_1 { ov::OutputVector roi_align(const ov::frontend::onnx::Node& node) { const auto inputs = node.get_ov_inputs(); @@ -41,8 +40,9 @@ ov::OutputVector roi_align(const ov::frontend::onnx::Node& node) { pooling_mode, aligned_mode)}; } -} // namespace set_1 -namespace set_16 { +ONNX_OP("RoiAlign", OPSET_RANGE(1, 15), ai_onnx::opset_1::roi_align); +} // namespace opset_1 +namespace opset_16 { ov::OutputVector roi_align(const ov::frontend::onnx::Node& node) { const auto inputs = node.get_ov_inputs(); @@ -77,8 +77,9 @@ ov::OutputVector roi_align(const ov::frontend::onnx::Node& node) { pooling_mode, aligned_mode)}; } -} // namespace set_16 -} // namespace op +ONNX_OP("RoiAlign", OPSET_SINCE(16), ai_onnx::opset_16::roi_align); +} // namespace opset_16 +} // namespace ai_onnx } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/roi_align.hpp b/src/frontends/onnx/frontend/src/op/roi_align.hpp deleted file mode 100644 index 6c9b899e444461..00000000000000 --- a/src/frontends/onnx/frontend/src/op/roi_align.hpp +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright (C) 2018-2024 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "core/node.hpp" - -namespace ov { -namespace frontend { -namespace onnx { -namespace op { -namespace set_1 { -ov::OutputVector roi_align(const ov::frontend::onnx::Node& node); - -} // namespace set_1 - -namespace set_16 { -ov::OutputVector roi_align(const ov::frontend::onnx::Node& node); - -} // namespace set_16 -} // namespace op -} // namespace onnx -} // namespace frontend -} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/round.cpp b/src/frontends/onnx/frontend/src/op/round.cpp index b2e035cccda37d..106539abaeb3ef 100644 --- a/src/frontends/onnx/frontend/src/op/round.cpp +++ b/src/frontends/onnx/frontend/src/op/round.cpp @@ -5,22 +5,22 @@ // Disabled in CMakeList // Update to higher opset required -#include "op/round.hpp" - #include "openvino/op/round.hpp" +#include "core/operator_set.hpp" using namespace ov::op; namespace ov { namespace frontend { namespace onnx { -namespace op { -namespace set_1 { +namespace ai_onnx { +namespace opset_1 { ov::OutputVector round(const ov::frontend::onnx::Node& node) { return {std::make_shared(node.get_ov_inputs().at(0), v5::Round::RoundMode::HALF_TO_EVEN)}; } -} // namespace set_1 -} // namespace op +ONNX_OP("Round", OPSET_SINCE(1), ai_onnx::opset_1::round); +} // namespace opset_1 +} // namespace ai_onnx } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/round.hpp b/src/frontends/onnx/frontend/src/op/round.hpp deleted file mode 100644 index e13ee777dc3b16..00000000000000 --- a/src/frontends/onnx/frontend/src/op/round.hpp +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright (C) 2018-2024 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -// Disabled in CMakeList -// Update to higher opset required - -#pragma once - -#include "core/node.hpp" - -namespace ov { -namespace frontend { -namespace onnx { -namespace op { -namespace set_1 { -ov::OutputVector round(const ov::frontend::onnx::Node& node); - -} // namespace set_1 -} // namespace op -} // namespace onnx -} // namespace frontend -} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/scan.cpp b/src/frontends/onnx/frontend/src/op/scan.cpp index b51a2dbde2bbaf..ee9700bc092807 100644 --- a/src/frontends/onnx/frontend/src/op/scan.cpp +++ b/src/frontends/onnx/frontend/src/op/scan.cpp @@ -2,10 +2,9 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "op/scan.hpp" - #include "core/graph.hpp" #include "core/null_node.hpp" +#include "core/operator_set.hpp" #include "exceptions.hpp" #include "openvino/core/validation_util.hpp" #include "openvino/op/constant.hpp" @@ -13,13 +12,12 @@ #include "openvino/op/tensor_iterator.hpp" #include "openvino/op/unsqueeze.hpp" #include "openvino/op/util/op_types.hpp" - using namespace ov::op; namespace ov { namespace frontend { namespace onnx { -namespace op { +namespace ai_onnx { namespace { @@ -156,7 +154,7 @@ ov::OutputVector import_onnx_scan(const ov::frontend::onnx::Node& node, } // namespace -namespace set_1 { +namespace opset_1 { ov::OutputVector scan(const ov::frontend::onnx::Node& node) { // ONNX Scan-8 can have optional `sequence_lens` input, @@ -167,9 +165,10 @@ ov::OutputVector scan(const ov::frontend::onnx::Node& node) { return import_onnx_scan(node, 1, 1, "directions"); } -} // namespace set_1 +ONNX_OP("Scan", OPSET_RANGE(1, 8), ai_onnx::opset_1::scan); +} // namespace opset_1 -namespace set_9 { +namespace opset_9 { ov::OutputVector scan(const ov::frontend::onnx::Node& node) { // Since ONNX Scan-9 the optional `sequence_lens input` was removed, @@ -177,8 +176,9 @@ ov::OutputVector scan(const ov::frontend::onnx::Node& node) { return import_onnx_scan(node, 0, 0, "scan_input_directions"); } -} // namespace set_9 -} // namespace op +ONNX_OP("Scan", OPSET_SINCE(9), ai_onnx::opset_9::scan); +} // namespace opset_9 +} // namespace ai_onnx } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/scan.hpp b/src/frontends/onnx/frontend/src/op/scan.hpp deleted file mode 100644 index 8939f8e6ae4d1a..00000000000000 --- a/src/frontends/onnx/frontend/src/op/scan.hpp +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright (C) 2018-2024 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "core/node.hpp" - -namespace ov { -namespace frontend { -namespace onnx { -namespace op { -namespace set_1 { -/// \brief Creates OpenVino node representing ONNX Scan operator. -/// -/// \note Details available here: -/// https://github.com/onnx/onnx/blob/main/docs/Operators.md#Scan -/// -/// \param[in] node The input ONNX node representing this operation. -/// -/// \return ov::OutputVector of resulting OpenVino nodes. -/// -ov::OutputVector scan(const ov::frontend::onnx::Node& node); -} // namespace set_1 - -namespace set_9 { -ov::OutputVector scan(const ov::frontend::onnx::Node& node); -} // namespace set_9 -} // namespace op -} // namespace onnx -} // namespace frontend -} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/scatter_elements.cpp b/src/frontends/onnx/frontend/src/op/scatter_elements.cpp index dca352f8463ce8..b88538bd61acc4 100644 --- a/src/frontends/onnx/frontend/src/op/scatter_elements.cpp +++ b/src/frontends/onnx/frontend/src/op/scatter_elements.cpp @@ -2,18 +2,16 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "op/scatter_elements.hpp" - +#include "core/operator_set.hpp" #include "exceptions.hpp" #include "openvino/op/scatter_elements_update.hpp" - using namespace ov::op; namespace ov { namespace frontend { namespace onnx { -namespace op { -namespace set_1 { +namespace ai_onnx { +namespace opset_1 { ov::OutputVector scatter_elements(const ov::frontend::onnx::Node& node) { const auto data = node.get_ov_inputs().at(0); const auto indices = node.get_ov_inputs().at(1); @@ -46,8 +44,16 @@ ov::OutputVector scatter_elements(const ov::frontend::onnx::Node& node) { return {std::make_shared(data, indices, updates, axis_node, reduction_ov)}; } -} // namespace set_1 -} // namespace op + +static bool register_multiple_translators(void) { + ONNX_OP_M("ScatterElements", OPSET_SINCE(1), ai_onnx::opset_1::scatter_elements); + ONNX_OP_M("Scatter", OPSET_SINCE(1), ai_onnx::opset_1::scatter_elements); + return true; +} + +static bool registered = register_multiple_translators(); +} // namespace opset_1 +} // namespace ai_onnx } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/scatter_elements.hpp b/src/frontends/onnx/frontend/src/op/scatter_elements.hpp deleted file mode 100644 index d335b729824fb5..00000000000000 --- a/src/frontends/onnx/frontend/src/op/scatter_elements.hpp +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright (C) 2018-2024 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "core/node.hpp" - -namespace ov { -namespace frontend { -namespace onnx { -namespace op { -namespace set_1 { -ov::OutputVector scatter_elements(const ov::frontend::onnx::Node& node); - -} // namespace set_1 -} // namespace op -} // namespace onnx -} // namespace frontend -} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/scatter_nd.cpp b/src/frontends/onnx/frontend/src/op/scatter_nd.cpp index 55858ded43b9e7..726a019e2a2dd9 100644 --- a/src/frontends/onnx/frontend/src/op/scatter_nd.cpp +++ b/src/frontends/onnx/frontend/src/op/scatter_nd.cpp @@ -5,18 +5,16 @@ // Disabled in CMakeList // Update to higher opset required -#include "op/scatter_nd.hpp" - +#include "core/operator_set.hpp" #include "exceptions.hpp" #include "openvino/op/scatter_nd_update.hpp" - using namespace ov::op; namespace ov { namespace frontend { namespace onnx { -namespace op { -namespace set_1 { +namespace ai_onnx { +namespace opset_1 { ov::OutputVector scatter_nd(const ov::frontend::onnx::Node& node) { ov::OutputVector ov_inputs{node.get_ov_inputs()}; auto data = ov_inputs.at(0); @@ -33,8 +31,9 @@ ov::OutputVector scatter_nd(const ov::frontend::onnx::Node& node) { return {std::make_shared(data, indices, updates)}; } -} // namespace set_1 -} // namespace op +ONNX_OP("ScatterND", OPSET_SINCE(1), ai_onnx::opset_1::scatter_nd); +} // namespace opset_1 +} // namespace ai_onnx } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/scatter_nd.hpp b/src/frontends/onnx/frontend/src/op/scatter_nd.hpp deleted file mode 100644 index 178b6e290bd249..00000000000000 --- a/src/frontends/onnx/frontend/src/op/scatter_nd.hpp +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright (C) 2018-2024 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -// Disabled in CMakeList -// Update to higher opset required - -#pragma once - -#include "core/node.hpp" - -namespace ov { -namespace frontend { -namespace onnx { -namespace op { -namespace set_1 { -ov::OutputVector scatter_nd(const ov::frontend::onnx::Node& node); - -} // namespace set_1 -} // namespace op -} // namespace onnx -} // namespace frontend -} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/selu.cpp b/src/frontends/onnx/frontend/src/op/selu.cpp index 3345c7e8c9bcd6..d2770f65bc6ca2 100644 --- a/src/frontends/onnx/frontend/src/op/selu.cpp +++ b/src/frontends/onnx/frontend/src/op/selu.cpp @@ -2,18 +2,17 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "op/selu.hpp" - -#include "openvino/op/constant.hpp" #include "openvino/op/selu.hpp" +#include "core/operator_set.hpp" +#include "openvino/op/constant.hpp" using namespace ov::op; namespace ov { namespace frontend { namespace onnx { -namespace op { -namespace set_1 { +namespace ai_onnx { +namespace opset_1 { ov::OutputVector selu(const ov::frontend::onnx::Node& node) { auto data = node.get_ov_inputs().at(0); auto alpha = node.get_attribute_value("alpha", 1.67326319217681884765625); @@ -26,8 +25,9 @@ ov::OutputVector selu(const ov::frontend::onnx::Node& node) { return {std::make_shared(data, alpha_node, gamma_node)}; } -} // namespace set_1 -} // namespace op +ONNX_OP("Selu", OPSET_SINCE(1), ai_onnx::opset_1::selu); +} // namespace opset_1 +} // namespace ai_onnx } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/selu.hpp b/src/frontends/onnx/frontend/src/op/selu.hpp deleted file mode 100644 index bc95fef8e27c16..00000000000000 --- a/src/frontends/onnx/frontend/src/op/selu.hpp +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright (C) 2018-2024 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "core/node.hpp" - -namespace ov { -namespace frontend { -namespace onnx { -namespace op { -namespace set_1 { -ov::OutputVector selu(const ov::frontend::onnx::Node& node); - -} // namespace set_1 -} // namespace op -} // namespace onnx -} // namespace frontend -} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/shape.cpp b/src/frontends/onnx/frontend/src/op/shape.cpp index 02949e04b4d665..70d6698ac43768 100644 --- a/src/frontends/onnx/frontend/src/op/shape.cpp +++ b/src/frontends/onnx/frontend/src/op/shape.cpp @@ -2,24 +2,22 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "op/shape.hpp" - #include #include "core/null_node.hpp" +#include "core/operator_set.hpp" #include "openvino/core/node_vector.hpp" #include "openvino/core/type/element_type.hpp" #include "openvino/op/shape_of.hpp" #include "openvino/op/slice.hpp" - using namespace ov::op; namespace ov { namespace frontend { namespace onnx { -namespace op { +namespace ai_onnx { -namespace set_15 { +namespace opset_15 { ov::OutputVector shape(const ov::frontend::onnx::Node& node) { using ov::op::util::is_null; @@ -41,17 +39,19 @@ ov::OutputVector shape(const ov::frontend::onnx::Node& node) { return {std::make_shared(input_shape, start, end, default_step)}; } -} // namespace set_15 +ONNX_OP("Shape", OPSET_SINCE(15), ai_onnx::opset_15::shape); +} // namespace opset_15 -namespace set_1 { +namespace opset_1 { ov::OutputVector shape(const ov::frontend::onnx::Node& node) { const auto data = node.get_ov_inputs().at(0); return {std::make_shared(data)}; } -} // namespace set_1 -} // namespace op +ONNX_OP("Shape", OPSET_RANGE(1, 14), ai_onnx::opset_1::shape); +} // namespace opset_1 +} // namespace ai_onnx } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/shape.hpp b/src/frontends/onnx/frontend/src/op/shape.hpp deleted file mode 100644 index d9c3cb01c37985..00000000000000 --- a/src/frontends/onnx/frontend/src/op/shape.hpp +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (C) 2018-2024 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "core/node.hpp" - -namespace ov { -namespace frontend { -namespace onnx { -namespace op { -namespace set_15 { - -ov::OutputVector shape(const ov::frontend::onnx::Node& node); - -} // namespace set_15 - -namespace set_1 { - -ov::OutputVector shape(const ov::frontend::onnx::Node& node); - -} // namespace set_1 -} // namespace op -} // namespace onnx -} // namespace frontend -} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/shrink.cpp b/src/frontends/onnx/frontend/src/op/shrink.cpp index 61bbd031f41562..2cf359fd7cd52c 100644 --- a/src/frontends/onnx/frontend/src/op/shrink.cpp +++ b/src/frontends/onnx/frontend/src/op/shrink.cpp @@ -2,8 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "op/shrink.hpp" - +#include "core/operator_set.hpp" #include "exceptions.hpp" #include "openvino/op/add.hpp" #include "openvino/op/constant.hpp" @@ -12,14 +11,13 @@ #include "openvino/op/less.hpp" #include "openvino/op/multiply.hpp" #include "openvino/op/subtract.hpp" - using namespace ov::op; namespace ov { namespace frontend { namespace onnx { -namespace op { -namespace set_1 { +namespace ai_onnx { +namespace opset_1 { ov::OutputVector shrink(const ov::frontend::onnx::Node& node) { const auto input = node.get_ov_inputs().at(0); const float bias = node.get_attribute_value("bias", 0.0f); @@ -64,8 +62,9 @@ ov::OutputVector shrink(const ov::frontend::onnx::Node& node) { return {std::make_shared(input_plus_bias, input_minus_bias)}; } -} // namespace set_1 -} // namespace op +ONNX_OP("Shrink", OPSET_SINCE(1), ai_onnx::opset_1::shrink); +} // namespace opset_1 +} // namespace ai_onnx } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/shrink.hpp b/src/frontends/onnx/frontend/src/op/shrink.hpp deleted file mode 100644 index 601f251c3c33c1..00000000000000 --- a/src/frontends/onnx/frontend/src/op/shrink.hpp +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright (C) 2018-2024 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "core/node.hpp" - -namespace ov { -namespace frontend { -namespace onnx { -namespace op { -namespace set_1 { -/// \brief ONNX Shrink operator -/// -/// \note It operates on a single input tensor and two attributes: lambd and bias. -/// Input values greater or equal to '-lambd' and less or equal to 'lambd' are -/// zeroed-out. 'Bias' is added to the values that are less than '-lambd' -/// and subtracted from values greater than 'lambd'. -ov::OutputVector shrink(const ov::frontend::onnx::Node& node); -} // namespace set_1 -} // namespace op -} // namespace onnx -} // namespace frontend -} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/sigmoid.hpp b/src/frontends/onnx/frontend/src/op/sigmoid.cpp similarity index 56% rename from src/frontends/onnx/frontend/src/op/sigmoid.hpp rename to src/frontends/onnx/frontend/src/op/sigmoid.cpp index c4f236804c099d..548a2e6d2ada1d 100644 --- a/src/frontends/onnx/frontend/src/op/sigmoid.hpp +++ b/src/frontends/onnx/frontend/src/op/sigmoid.cpp @@ -2,22 +2,22 @@ // SPDX-License-Identifier: Apache-2.0 // -#pragma once - -#include "core/node.hpp" #include "openvino/op/sigmoid.hpp" +#include "core/operator_set.hpp" + namespace ov { namespace frontend { namespace onnx { -namespace op { -namespace set_1 { -inline ov::OutputVector sigmoid(const ov::frontend::onnx::Node& node) { +namespace ai_onnx { +namespace opset_1 { +ov::OutputVector sigmoid(const ov::frontend::onnx::Node& node) { return {std::make_shared(node.get_ov_inputs().at(0))}; } -} // namespace set_1 -} // namespace op +ONNX_OP("Sigmoid", OPSET_SINCE(1), ai_onnx::opset_1::sigmoid); +} // namespace opset_1 +} // namespace ai_onnx } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/sign.hpp b/src/frontends/onnx/frontend/src/op/sign.cpp similarity index 57% rename from src/frontends/onnx/frontend/src/op/sign.hpp rename to src/frontends/onnx/frontend/src/op/sign.cpp index df03c5cda88f1b..6a4b520b1ce770 100644 --- a/src/frontends/onnx/frontend/src/op/sign.hpp +++ b/src/frontends/onnx/frontend/src/op/sign.cpp @@ -2,21 +2,21 @@ // SPDX-License-Identifier: Apache-2.0 // -#pragma once - -#include "core/node.hpp" #include "openvino/op/sign.hpp" +#include "core/operator_set.hpp" + namespace ov { namespace frontend { namespace onnx { -namespace op { -namespace set_1 { -inline ov::OutputVector sign(const ov::frontend::onnx::Node& node) { +namespace ai_onnx { +namespace opset_1 { +ov::OutputVector sign(const ov::frontend::onnx::Node& node) { return {std::make_shared(node.get_ov_inputs().at(0))}; } -} // namespace set_1 -} // namespace op +ONNX_OP("Sign", OPSET_SINCE(1), ai_onnx::opset_1::sign); +} // namespace opset_1 +} // namespace ai_onnx } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/sin.hpp b/src/frontends/onnx/frontend/src/op/sin.cpp similarity index 57% rename from src/frontends/onnx/frontend/src/op/sin.hpp rename to src/frontends/onnx/frontend/src/op/sin.cpp index d954a87cbad3c5..f37495d338b322 100644 --- a/src/frontends/onnx/frontend/src/op/sin.hpp +++ b/src/frontends/onnx/frontend/src/op/sin.cpp @@ -2,21 +2,21 @@ // SPDX-License-Identifier: Apache-2.0 // -#pragma once - -#include "core/node.hpp" #include "openvino/op/sin.hpp" +#include "core/operator_set.hpp" + namespace ov { namespace frontend { namespace onnx { -namespace op { -namespace set_1 { -inline ov::OutputVector sin(const ov::frontend::onnx::Node& node) { +namespace ai_onnx { +namespace opset_1 { +ov::OutputVector sin(const ov::frontend::onnx::Node& node) { return {std::make_shared(node.get_ov_inputs().at(0))}; } -} // namespace set_1 -} // namespace op +ONNX_OP("Sin", OPSET_SINCE(1), ai_onnx::opset_1::sin); +} // namespace opset_1 +} // namespace ai_onnx } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/sinh.hpp b/src/frontends/onnx/frontend/src/op/sinh.cpp similarity index 57% rename from src/frontends/onnx/frontend/src/op/sinh.hpp rename to src/frontends/onnx/frontend/src/op/sinh.cpp index 138ea43615ae6e..416adea581214d 100644 --- a/src/frontends/onnx/frontend/src/op/sinh.hpp +++ b/src/frontends/onnx/frontend/src/op/sinh.cpp @@ -2,21 +2,21 @@ // SPDX-License-Identifier: Apache-2.0 // -#pragma once - -#include "core/node.hpp" #include "openvino/op/sinh.hpp" +#include "core/operator_set.hpp" + namespace ov { namespace frontend { namespace onnx { -namespace op { -namespace set_1 { -inline ov::OutputVector sinh(const ov::frontend::onnx::Node& node) { +namespace ai_onnx { +namespace opset_1 { +ov::OutputVector sinh(const ov::frontend::onnx::Node& node) { return {std::make_shared(node.get_ov_inputs().at(0))}; } -} // namespace set_1 -} // namespace op +ONNX_OP("Sinh", OPSET_SINCE(1), ai_onnx::opset_1::sinh); +} // namespace opset_1 +} // namespace ai_onnx } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/size.cpp b/src/frontends/onnx/frontend/src/op/size.cpp index 759e7041baa40c..620957697bf14b 100644 --- a/src/frontends/onnx/frontend/src/op/size.cpp +++ b/src/frontends/onnx/frontend/src/op/size.cpp @@ -2,20 +2,18 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "op/size.hpp" - +#include "core/operator_set.hpp" #include "openvino/core/shape.hpp" #include "openvino/op/constant.hpp" #include "openvino/op/reduce_prod.hpp" #include "openvino/op/shape_of.hpp" - using namespace ov::op; namespace ov { namespace frontend { namespace onnx { -namespace op { -namespace set_1 { +namespace ai_onnx { +namespace opset_1 { ov::OutputVector size(const ov::frontend::onnx::Node& node) { auto data = node.get_ov_inputs().at(0); auto axes = v0::Constant::create(ov::element::i32, ov::Shape{}, {0}); @@ -23,8 +21,9 @@ ov::OutputVector size(const ov::frontend::onnx::Node& node) { return {std::make_shared(input_shape, axes)}; } -} // namespace set_1 -} // namespace op +ONNX_OP("Size", OPSET_SINCE(1), ai_onnx::opset_1::size); +} // namespace opset_1 +} // namespace ai_onnx } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/size.hpp b/src/frontends/onnx/frontend/src/op/size.hpp deleted file mode 100644 index e98fe3f825c8db..00000000000000 --- a/src/frontends/onnx/frontend/src/op/size.hpp +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright (C) 2018-2024 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "core/node.hpp" - -namespace ov { -namespace frontend { -namespace onnx { -namespace op { -namespace set_1 { -ov::OutputVector size(const ov::frontend::onnx::Node& node); - -} // namespace set_1 -} // namespace op -} // namespace onnx -} // namespace frontend -} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/slice.cpp b/src/frontends/onnx/frontend/src/op/slice.cpp index aba0f14b41cbbb..99ca73f09640ef 100644 --- a/src/frontends/onnx/frontend/src/op/slice.cpp +++ b/src/frontends/onnx/frontend/src/op/slice.cpp @@ -2,22 +2,21 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "op/slice.hpp" +#include "openvino/op/slice.hpp" #include "core/null_node.hpp" +#include "core/operator_set.hpp" #include "openvino/op/broadcast.hpp" #include "openvino/op/constant.hpp" #include "openvino/op/shape_of.hpp" -#include "openvino/op/slice.hpp" - using namespace ov::op; using ov::Shape; namespace ov { namespace frontend { namespace onnx { -namespace op { -namespace set_10 { +namespace ai_onnx { +namespace opset_10 { ov::OutputVector slice(const ov::frontend::onnx::Node& node) { using ov::op::util::is_null; @@ -44,9 +43,10 @@ ov::OutputVector slice(const ov::frontend::onnx::Node& node) { return {std::make_shared(data, starts, ends, steps)}; } } -} // namespace set_10 +ONNX_OP("Slice", OPSET_SINCE(10), ai_onnx::opset_10::slice); +} // namespace opset_10 -namespace set_1 { +namespace opset_1 { ov::OutputVector slice(const ov::frontend::onnx::Node& node) { ov::Output data = node.get_ov_inputs().at(0); const auto starts_atr = node.get_attribute_value>("starts"); @@ -66,8 +66,9 @@ ov::OutputVector slice(const ov::frontend::onnx::Node& node) { return {std::make_shared(data, starts, ends, steps, axes)}; } } -} // namespace set_1 -} // namespace op +ONNX_OP("Slice", OPSET_RANGE(1, 9), ai_onnx::opset_1::slice); +} // namespace opset_1 +} // namespace ai_onnx } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/slice.hpp b/src/frontends/onnx/frontend/src/op/slice.hpp deleted file mode 100644 index 0218b8b6aae9ef..00000000000000 --- a/src/frontends/onnx/frontend/src/op/slice.hpp +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright (C) 2018-2024 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "core/node.hpp" - -namespace ov { -namespace frontend { -namespace onnx { -namespace op { -namespace set_10 { -ov::OutputVector slice(const ov::frontend::onnx::Node& node); - -} // namespace set_10 - -namespace set_1 { -ov::OutputVector slice(const ov::frontend::onnx::Node& node); - -} // namespace set_1 -} // namespace op -} // namespace onnx -} // namespace frontend -} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/softmax.cpp b/src/frontends/onnx/frontend/src/op/softmax.cpp index eb13ca9af0e72e..3088c980a11b9e 100644 --- a/src/frontends/onnx/frontend/src/op/softmax.cpp +++ b/src/frontends/onnx/frontend/src/op/softmax.cpp @@ -2,15 +2,14 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "op/softmax.hpp" +#include "openvino/op/softmax.hpp" +#include "core/operator_set.hpp" #include "openvino/frontend/exception.hpp" #include "openvino/op/constant.hpp" #include "openvino/op/reshape.hpp" #include "openvino/op/shape_of.hpp" -#include "openvino/op/softmax.hpp" #include "utils/reshape.hpp" - using namespace ov::op; namespace ov { @@ -26,8 +25,8 @@ std::shared_ptr onnx_softmax(const ov::Output data, const in } } // namespace -namespace op { -namespace set_1 { +namespace ai_onnx { +namespace opset_1 { ov::OutputVector softmax(const ov::frontend::onnx::Node& node) { const auto data = node.get_ov_inputs().at(0); const auto data_rank = data.get_partial_shape().rank(); @@ -49,8 +48,9 @@ ov::OutputVector softmax(const ov::frontend::onnx::Node& node) { return {result}; } -} // namespace set_1 -namespace set_11 { +ONNX_OP("Softmax", OPSET_RANGE(1, 10), ai_onnx::opset_1::softmax); +} // namespace opset_1 +namespace opset_11 { ov::OutputVector softmax(const ov::frontend::onnx::Node& node) { const auto data = node.get_ov_inputs().at(0); const auto data_rank = data.get_partial_shape().rank(); @@ -72,8 +72,9 @@ ov::OutputVector softmax(const ov::frontend::onnx::Node& node) { return {result}; } -} // namespace set_11 -namespace set_13 { +ONNX_OP("Softmax", OPSET_RANGE(11, 12), ai_onnx::opset_11::softmax); +} // namespace opset_11 +namespace opset_13 { ov::OutputVector softmax(const ov::frontend::onnx::Node& node) { const auto data = node.get_ov_inputs().at(0); @@ -81,8 +82,9 @@ ov::OutputVector softmax(const ov::frontend::onnx::Node& node) { return {std::make_shared(data, axis)}; } -} // namespace set_13 -} // namespace op +ONNX_OP("Softmax", OPSET_SINCE(13), ai_onnx::opset_13::softmax); +} // namespace opset_13 +} // namespace ai_onnx } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/softmax.hpp b/src/frontends/onnx/frontend/src/op/softmax.hpp deleted file mode 100644 index fef78b03341dec..00000000000000 --- a/src/frontends/onnx/frontend/src/op/softmax.hpp +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright (C) 2018-2024 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "core/node.hpp" - -namespace ov { -namespace frontend { -namespace onnx { -namespace op { -namespace set_1 { -ov::OutputVector softmax(const ov::frontend::onnx::Node& node); - -} // namespace set_1 - -namespace set_11 { -ov::OutputVector softmax(const ov::frontend::onnx::Node& node); - -} // namespace set_11 - -namespace set_13 { -ov::OutputVector softmax(const ov::frontend::onnx::Node& node); - -} // namespace set_13 -} // namespace op -} // namespace onnx -} // namespace frontend -} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/softplus.cpp b/src/frontends/onnx/frontend/src/op/softplus.cpp index 50ddd0d25f18cb..f2eede300f0625 100644 --- a/src/frontends/onnx/frontend/src/op/softplus.cpp +++ b/src/frontends/onnx/frontend/src/op/softplus.cpp @@ -2,24 +2,24 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "op/softplus.hpp" - #include "openvino/op/softplus.hpp" +#include "core/operator_set.hpp" using namespace ov::op; namespace ov { namespace frontend { namespace onnx { -namespace op { -namespace set_1 { +namespace ai_onnx { +namespace opset_1 { ov::OutputVector softplus(const ov::frontend::onnx::Node& node) { const auto data = node.get_ov_inputs().at(0); return {std::make_shared(data)}; } -} // namespace set_1 -} // namespace op +ONNX_OP("Softplus", OPSET_SINCE(1), ai_onnx::opset_1::softplus); +} // namespace opset_1 +} // namespace ai_onnx } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/softplus.hpp b/src/frontends/onnx/frontend/src/op/softplus.hpp deleted file mode 100644 index 3d1f86107c0428..00000000000000 --- a/src/frontends/onnx/frontend/src/op/softplus.hpp +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright (C) 2018-2024 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "core/node.hpp" - -namespace ov { -namespace frontend { -namespace onnx { -namespace op { -namespace set_1 { -ov::OutputVector softplus(const ov::frontend::onnx::Node& node); - -} // namespace set_1 -} // namespace op -} // namespace onnx -} // namespace frontend -} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/softsign.cpp b/src/frontends/onnx/frontend/src/op/softsign.cpp index 9420aa4c7f2126..ff5333c7629a39 100644 --- a/src/frontends/onnx/frontend/src/op/softsign.cpp +++ b/src/frontends/onnx/frontend/src/op/softsign.cpp @@ -2,22 +2,22 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "op/softsign.hpp" - #include "openvino/op/softsign.hpp" +#include "core/operator_set.hpp" using namespace ov::op; namespace ov { namespace frontend { namespace onnx { -namespace op { -namespace set_1 { +namespace ai_onnx { +namespace opset_1 { ov::OutputVector softsign(const ov::frontend::onnx::Node& node) { return {std::make_shared(node.get_ov_inputs().at(0))}; } -} // namespace set_1 -} // namespace op +ONNX_OP("Softsign", OPSET_SINCE(1), ai_onnx::opset_1::softsign); +} // namespace opset_1 +} // namespace ai_onnx } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/softsign.hpp b/src/frontends/onnx/frontend/src/op/softsign.hpp deleted file mode 100644 index 89cfa88de82320..00000000000000 --- a/src/frontends/onnx/frontend/src/op/softsign.hpp +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright (C) 2018-2024 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "core/node.hpp" - -namespace ov { -namespace frontend { -namespace onnx { -namespace op { -namespace set_1 { -ov::OutputVector softsign(const ov::frontend::onnx::Node& node); - -} // namespace set_1 -} // namespace op -} // namespace onnx -} // namespace frontend -} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/space_to_depth.cpp b/src/frontends/onnx/frontend/src/op/space_to_depth.cpp index b60d858412f2de..27109b399b355d 100644 --- a/src/frontends/onnx/frontend/src/op/space_to_depth.cpp +++ b/src/frontends/onnx/frontend/src/op/space_to_depth.cpp @@ -2,18 +2,17 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "op/space_to_depth.hpp" - -#include "openvino/frontend/exception.hpp" #include "openvino/op/space_to_depth.hpp" +#include "core/operator_set.hpp" +#include "openvino/frontend/exception.hpp" using namespace ov::op; namespace ov { namespace frontend { namespace onnx { -namespace op { -namespace set_1 { +namespace ai_onnx { +namespace opset_1 { ov::OutputVector space_to_depth(const ov::frontend::onnx::Node& node) { auto data = node.get_ov_inputs().at(0); const auto& shape = data.get_partial_shape(); @@ -22,8 +21,9 @@ ov::OutputVector space_to_depth(const ov::frontend::onnx::Node& node) { const auto mode = v0::SpaceToDepth::SpaceToDepthMode::BLOCKS_FIRST; return {std::make_shared(data, mode, block_size)}; } -} // namespace set_1 -} // namespace op +ONNX_OP("SpaceToDepth", OPSET_SINCE(1), ai_onnx::opset_1::space_to_depth); +} // namespace opset_1 +} // namespace ai_onnx } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/space_to_depth.hpp b/src/frontends/onnx/frontend/src/op/space_to_depth.hpp deleted file mode 100644 index 6d14e51e4bfa7c..00000000000000 --- a/src/frontends/onnx/frontend/src/op/space_to_depth.hpp +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright (C) 2018-2024 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "core/node.hpp" - -namespace ov { -namespace frontend { -namespace onnx { -namespace op { -namespace set_1 { -/// \brief Permutes input tensor blocks of spatial data into depth. -/// -/// \param[in] node The ONNX input node describing operation. -/// -/// \return ov::OutputVector containing Tensor with shape: -/// [N, C * blocksize * blocksize, H / blocksize, W / blocksize] -ov::OutputVector space_to_depth(const ov::frontend::onnx::Node& node); -} // namespace set_1 -} // namespace op -} // namespace onnx -} // namespace frontend -} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/split.cpp b/src/frontends/onnx/frontend/src/op/split.cpp index e62607bec2c24a..0a63a2ac2d69e5 100644 --- a/src/frontends/onnx/frontend/src/op/split.cpp +++ b/src/frontends/onnx/frontend/src/op/split.cpp @@ -2,19 +2,18 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "op/split.hpp" +#include "utils/split.hpp" +#include "core/operator_set.hpp" #include "openvino/op/constant.hpp" #include "openvino/op/variadic_split.hpp" -#include "utils/split.hpp" - using namespace ov::op; namespace ov { namespace frontend { namespace onnx { -namespace op { -namespace set_1 { +namespace ai_onnx { +namespace opset_1 { ov::OutputVector split(const ov::frontend::onnx::Node& node) { const auto input = node.get_ov_inputs().at(0); const auto axis = node.get_attribute_value("axis", 0); @@ -28,9 +27,10 @@ ov::OutputVector split(const ov::frontend::onnx::Node& node) { } } -} // namespace set_1 +ONNX_OP("Split", OPSET_RANGE(1, 12), ai_onnx::opset_1::split); +} // namespace opset_1 -namespace set_13 { +namespace opset_13 { ov::OutputVector split(const ov::frontend::onnx::Node& node) { const auto inputs = node.get_ov_inputs(); const auto axis = node.get_attribute_value("axis", 0); @@ -44,8 +44,9 @@ ov::OutputVector split(const ov::frontend::onnx::Node& node) { } } -} // namespace set_13 -} // namespace op +ONNX_OP("Split", OPSET_SINCE(13), ai_onnx::opset_13::split); +} // namespace opset_13 +} // namespace ai_onnx } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/split.hpp b/src/frontends/onnx/frontend/src/op/split.hpp deleted file mode 100644 index d98b3cdfa29766..00000000000000 --- a/src/frontends/onnx/frontend/src/op/split.hpp +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright (C) 2018-2024 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "core/node.hpp" - -namespace ov { -namespace frontend { -namespace onnx { -namespace op { -namespace set_1 { -ov::OutputVector split(const ov::frontend::onnx::Node& node); - -} // namespace set_1 - -namespace set_13 { -ov::OutputVector split(const ov::frontend::onnx::Node& node); - -} // namespace set_13 -} // namespace op -} // namespace onnx -} // namespace frontend -} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/sqrt.hpp b/src/frontends/onnx/frontend/src/op/sqrt.cpp similarity index 57% rename from src/frontends/onnx/frontend/src/op/sqrt.hpp rename to src/frontends/onnx/frontend/src/op/sqrt.cpp index ed837c001f397d..bd2e3d3979ab88 100644 --- a/src/frontends/onnx/frontend/src/op/sqrt.hpp +++ b/src/frontends/onnx/frontend/src/op/sqrt.cpp @@ -2,22 +2,22 @@ // SPDX-License-Identifier: Apache-2.0 // -#pragma once - -#include "core/node.hpp" #include "openvino/op/sqrt.hpp" +#include "core/operator_set.hpp" + namespace ov { namespace frontend { namespace onnx { -namespace op { -namespace set_1 { -inline ov::OutputVector sqrt(const ov::frontend::onnx::Node& node) { +namespace ai_onnx { +namespace opset_1 { +ov::OutputVector sqrt(const ov::frontend::onnx::Node& node) { return {std::make_shared(node.get_ov_inputs().at(0))}; } -} // namespace set_1 -} // namespace op +ONNX_OP("Sqrt", OPSET_SINCE(1), ai_onnx::opset_1::sqrt); +} // namespace opset_1 +} // namespace ai_onnx } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/squeeze.cpp b/src/frontends/onnx/frontend/src/op/squeeze.cpp index a922c8a98a6596..55b64fd6c1f30b 100644 --- a/src/frontends/onnx/frontend/src/op/squeeze.cpp +++ b/src/frontends/onnx/frontend/src/op/squeeze.cpp @@ -2,18 +2,17 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "op/squeeze.hpp" - -#include "openvino/op/constant.hpp" #include "openvino/op/squeeze.hpp" +#include "core/operator_set.hpp" +#include "openvino/op/constant.hpp" using namespace ov::op; namespace ov { namespace frontend { namespace onnx { -namespace op { -namespace set_1 { +namespace ai_onnx { +namespace opset_1 { ov::OutputVector squeeze(const ov::frontend::onnx::Node& node) { auto data = node.get_ov_inputs().at(0); const auto axes = node.get_attribute_value>("axes", {}); @@ -26,9 +25,10 @@ ov::OutputVector squeeze(const ov::frontend::onnx::Node& node) { } } -} // namespace set_1 +ONNX_OP("Squeeze", OPSET_RANGE(1, 12), ai_onnx::opset_1::squeeze); +} // namespace opset_1 -namespace set_13 { +namespace opset_13 { ov::OutputVector squeeze(const ov::frontend::onnx::Node& node) { const auto inputs = node.get_ov_inputs(); if (inputs.size() < 2) { @@ -38,8 +38,9 @@ ov::OutputVector squeeze(const ov::frontend::onnx::Node& node) { } } -} // namespace set_13 -} // namespace op +ONNX_OP("Squeeze", OPSET_SINCE(13), ai_onnx::opset_13::squeeze); +} // namespace opset_13 +} // namespace ai_onnx } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/squeeze.hpp b/src/frontends/onnx/frontend/src/op/squeeze.hpp deleted file mode 100644 index f5c267cbc499f1..00000000000000 --- a/src/frontends/onnx/frontend/src/op/squeeze.hpp +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright (C) 2018-2024 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "core/node.hpp" - -namespace ov { -namespace frontend { -namespace onnx { -namespace op { -namespace set_1 { -ov::OutputVector squeeze(const ov::frontend::onnx::Node& node); - -} // namespace set_1 - -namespace set_13 { -ov::OutputVector squeeze(const ov::frontend::onnx::Node& node); - -} // namespace set_13 -} // namespace op -} // namespace onnx -} // namespace frontend -} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/stft.cpp b/src/frontends/onnx/frontend/src/op/stft.cpp index e83feec56c4492..0595d017ffa289 100644 --- a/src/frontends/onnx/frontend/src/op/stft.cpp +++ b/src/frontends/onnx/frontend/src/op/stft.cpp @@ -2,9 +2,8 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "op/stft.hpp" - #include "core/null_node.hpp" +#include "core/operator_set.hpp" #include "exceptions.hpp" #include "openvino/op/broadcast.hpp" #include "openvino/op/concat.hpp" @@ -17,15 +16,14 @@ #include "openvino/op/util/op_types.hpp" #include "utils/common.hpp" #include "utils/dft.hpp" - using namespace ov::op; using ov::Shape; namespace ov { namespace frontend { namespace onnx { -namespace op { -namespace set_17 { +namespace ai_onnx { +namespace opset_17 { ov::OutputVector stft(const ov::frontend::onnx::Node& node) { const ov::OutputVector ov_inputs{node.get_ov_inputs()}; @@ -123,8 +121,9 @@ ov::OutputVector stft(const ov::frontend::onnx::Node& node) { return {std::make_shared(all_signals, 0)}; } -} // namespace set_17 -} // namespace op +ONNX_OP("STFT", OPSET_SINCE(1), ai_onnx::opset_17::stft); +} // namespace opset_17 +} // namespace ai_onnx } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/stft.hpp b/src/frontends/onnx/frontend/src/op/stft.hpp deleted file mode 100644 index 3cd7358de35087..00000000000000 --- a/src/frontends/onnx/frontend/src/op/stft.hpp +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright (C) 2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "core/node.hpp" - -namespace ov { -namespace frontend { -namespace onnx { -namespace op { -namespace set_17 { -ov::OutputVector stft(const ov::frontend::onnx::Node& node); - -} // namespace set_17 -} // namespace op -} // namespace onnx -} // namespace frontend -} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/sub.hpp b/src/frontends/onnx/frontend/src/op/sub.cpp similarity index 50% rename from src/frontends/onnx/frontend/src/op/sub.hpp rename to src/frontends/onnx/frontend/src/op/sub.cpp index eef637a94eb993..a7dfccb6838e74 100644 --- a/src/frontends/onnx/frontend/src/op/sub.hpp +++ b/src/frontends/onnx/frontend/src/op/sub.cpp @@ -2,29 +2,30 @@ // SPDX-License-Identifier: Apache-2.0 // -#pragma once - -#include "core/node.hpp" +#include "core/operator_set.hpp" #include "openvino/op/subtract.hpp" +#include "utils/common.hpp" namespace ov { namespace frontend { namespace onnx { -namespace op { -namespace set_1 { -inline ov::OutputVector sub(const ov::frontend::onnx::Node& node) { +namespace ai_onnx { +namespace opset_1 { +ov::OutputVector sub(const ov::frontend::onnx::Node& node) { return common::handle_opset6_binary_op(node); } -} // namespace set_1 +ONNX_OP("Sub", OPSET_RANGE(1, 6), ai_onnx::opset_1::sub); +} // namespace opset_1 -namespace set_7 { -inline ov::OutputVector sub(const ov::frontend::onnx::Node& node) { +namespace opset_7 { +ov::OutputVector sub(const ov::frontend::onnx::Node& node) { return {std::make_shared(node.get_ov_inputs().at(0), node.get_ov_inputs().at(1))}; } -} // namespace set_7 -} // namespace op +ONNX_OP("Sub", OPSET_SINCE(7), ai_onnx::opset_7::sub); +} // namespace opset_7 +} // namespace ai_onnx } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/sum.hpp b/src/frontends/onnx/frontend/src/op/sum.cpp similarity index 52% rename from src/frontends/onnx/frontend/src/op/sum.hpp rename to src/frontends/onnx/frontend/src/op/sum.cpp index c425c31f3ba374..0948a8f388269b 100644 --- a/src/frontends/onnx/frontend/src/op/sum.hpp +++ b/src/frontends/onnx/frontend/src/op/sum.cpp @@ -2,30 +2,30 @@ // SPDX-License-Identifier: Apache-2.0 // -#pragma once - -#include "core/node.hpp" +#include "core/operator_set.hpp" #include "openvino/op/add.hpp" #include "utils/variadic.hpp" namespace ov { namespace frontend { namespace onnx { -namespace op { -namespace set_1 { -inline ov::OutputVector sum(const ov::frontend::onnx::Node& node) { +namespace ai_onnx { +namespace opset_1 { +ov::OutputVector sum(const ov::frontend::onnx::Node& node) { return variadic::make_ng_variadic_op(node, ov::op::AutoBroadcastType::NONE); } -} // namespace set_1 +ONNX_OP("Sum", OPSET_RANGE(1, 7), ai_onnx::opset_1::sum); +} // namespace opset_1 -namespace set_8 { -inline ov::OutputVector sum(const ov::frontend::onnx::Node& node) { +namespace opset_8 { +ov::OutputVector sum(const ov::frontend::onnx::Node& node) { return variadic::make_ng_variadic_op(node); } -} // namespace set_8 -} // namespace op +ONNX_OP("Sum", OPSET_SINCE(8), ai_onnx::opset_8::sum); +} // namespace opset_8 +} // namespace ai_onnx } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/tan.hpp b/src/frontends/onnx/frontend/src/op/tan.cpp similarity index 57% rename from src/frontends/onnx/frontend/src/op/tan.hpp rename to src/frontends/onnx/frontend/src/op/tan.cpp index 0bfa17e8fcdedc..0c68af9882084b 100644 --- a/src/frontends/onnx/frontend/src/op/tan.hpp +++ b/src/frontends/onnx/frontend/src/op/tan.cpp @@ -2,21 +2,21 @@ // SPDX-License-Identifier: Apache-2.0 // -#pragma once - -#include "core/node.hpp" #include "openvino/op/tan.hpp" +#include "core/operator_set.hpp" + namespace ov { namespace frontend { namespace onnx { -namespace op { -namespace set_1 { -inline ov::OutputVector tan(const ov::frontend::onnx::Node& node) { +namespace ai_onnx { +namespace opset_1 { +ov::OutputVector tan(const ov::frontend::onnx::Node& node) { return {std::make_shared(node.get_ov_inputs().at(0))}; } -} // namespace set_1 -} // namespace op +ONNX_OP("Tan", OPSET_SINCE(1), ai_onnx::opset_1::tan); +} // namespace opset_1 +} // namespace ai_onnx } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/tanh.hpp b/src/frontends/onnx/frontend/src/op/tanh.cpp similarity index 57% rename from src/frontends/onnx/frontend/src/op/tanh.hpp rename to src/frontends/onnx/frontend/src/op/tanh.cpp index 3216924b2c6e56..7d282137230554 100644 --- a/src/frontends/onnx/frontend/src/op/tanh.hpp +++ b/src/frontends/onnx/frontend/src/op/tanh.cpp @@ -2,22 +2,22 @@ // SPDX-License-Identifier: Apache-2.0 // -#pragma once - -#include "core/node.hpp" #include "openvino/op/tanh.hpp" +#include "core/operator_set.hpp" + namespace ov { namespace frontend { namespace onnx { -namespace op { -namespace set_1 { -inline ov::OutputVector tanh(const ov::frontend::onnx::Node& node) { +namespace ai_onnx { +namespace opset_1 { +ov::OutputVector tanh(const ov::frontend::onnx::Node& node) { return {std::make_shared(node.get_ov_inputs().at(0))}; } -} // namespace set_1 -} // namespace op +ONNX_OP("Tanh", OPSET_SINCE(1), ai_onnx::opset_1::tanh); +} // namespace opset_1 +} // namespace ai_onnx } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/thresholded_relu.cpp b/src/frontends/onnx/frontend/src/op/thresholded_relu.cpp index f8e5b0aad60814..592a9ea4f1507e 100644 --- a/src/frontends/onnx/frontend/src/op/thresholded_relu.cpp +++ b/src/frontends/onnx/frontend/src/op/thresholded_relu.cpp @@ -2,20 +2,18 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "op/thresholded_relu.hpp" - +#include "core/operator_set.hpp" #include "openvino/op/constant.hpp" #include "openvino/op/convert.hpp" #include "openvino/op/greater.hpp" #include "openvino/op/multiply.hpp" - using namespace ov::op; namespace ov { namespace frontend { namespace onnx { -namespace op { -namespace set_1 { +namespace ai_onnx { +namespace opset_1 { ov::OutputVector thresholded_relu(const ov::frontend::onnx::Node& node) { const auto data = node.get_ov_inputs().at(0); const double alpha = node.get_attribute_value("alpha", 1.0); @@ -28,8 +26,9 @@ ov::OutputVector thresholded_relu(const ov::frontend::onnx::Node& node) { return {std::make_shared(data, data_map)}; } -} // namespace set_1 -} // namespace op +ONNX_OP("ThresholdedRelu", OPSET_SINCE(1), ai_onnx::opset_1::thresholded_relu); +} // namespace opset_1 +} // namespace ai_onnx } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/thresholded_relu.hpp b/src/frontends/onnx/frontend/src/op/thresholded_relu.hpp deleted file mode 100644 index 0bf8baec55c792..00000000000000 --- a/src/frontends/onnx/frontend/src/op/thresholded_relu.hpp +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright (C) 2018-2024 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "core/node.hpp" - -namespace ov { -namespace frontend { -namespace onnx { -namespace op { -namespace set_1 { -ov::OutputVector thresholded_relu(const ov::frontend::onnx::Node& node); - -} // namespace set_1 -} // namespace op -} // namespace onnx -} // namespace frontend -} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/tile.cpp b/src/frontends/onnx/frontend/src/op/tile.cpp index dbafdb935f824d..e4c37592f46e7a 100644 --- a/src/frontends/onnx/frontend/src/op/tile.cpp +++ b/src/frontends/onnx/frontend/src/op/tile.cpp @@ -2,19 +2,18 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "op/tile.hpp" +#include "openvino/op/tile.hpp" -#include "core/node.hpp" +#include "core/operator_set.hpp" #include "openvino/op/convert.hpp" -#include "openvino/op/tile.hpp" using namespace ov::op; namespace ov { namespace frontend { namespace onnx { -namespace op { -namespace set_1 { +namespace ai_onnx { +namespace opset_1 { ov::OutputVector tile(const ov::frontend::onnx::Node& node) { auto input = node.get_ov_inputs().at(0); auto repeats = node.get_ov_inputs().at(1); @@ -26,8 +25,9 @@ ov::OutputVector tile(const ov::frontend::onnx::Node& node) { return {std::make_shared(input, repeats)}; } -} // namespace set_1 -} // namespace op +ONNX_OP("Tile", OPSET_SINCE(1), ai_onnx::opset_1::tile); +} // namespace opset_1 +} // namespace ai_onnx } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/tile.hpp b/src/frontends/onnx/frontend/src/op/tile.hpp deleted file mode 100644 index d1dae306d570c4..00000000000000 --- a/src/frontends/onnx/frontend/src/op/tile.hpp +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright (C) 2018-2024 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "core/node.hpp" - -namespace ov { -namespace frontend { -namespace onnx { -namespace op { -namespace set_1 { -/// \brief Performs ONNX Tile operation. -/// -/// \param node The ONNX node object representing this operation. -/// \return The vector containing OV a node producing the output of the Tile op. -ov::OutputVector tile(const ov::frontend::onnx::Node& node); - -} // namespace set_1 -} // namespace op -} // namespace onnx -} // namespace frontend -} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/topk.cpp b/src/frontends/onnx/frontend/src/op/topk.cpp index c59507fa65d6ed..0ef2ddc873eb2f 100644 --- a/src/frontends/onnx/frontend/src/op/topk.cpp +++ b/src/frontends/onnx/frontend/src/op/topk.cpp @@ -2,12 +2,11 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "op/topk.hpp" +#include "openvino/op/topk.hpp" +#include "core/operator_set.hpp" #include "openvino/frontend/exception.hpp" -#include "openvino/op/topk.hpp" #include "utils/reshape.hpp" - namespace { /// \return Return the second input to the TopK node reshaped to a scalar. ov::Output get_k(const ov::frontend::onnx::Node& node) { @@ -27,8 +26,8 @@ using namespace ov::op; namespace ov { namespace frontend { namespace onnx { -namespace op { -namespace set_1 { +namespace ai_onnx { +namespace opset_1 { ov::OutputVector topk(const ov::frontend::onnx::Node& node) { auto data = node.get_ov_inputs().at(0); const auto k_node = node.get_attribute_as_constant("k"); @@ -43,9 +42,10 @@ ov::OutputVector topk(const ov::frontend::onnx::Node& node) { return {top_k->output(0), top_k->output(1)}; } -} // namespace set_1 +ONNX_OP("TopK", OPSET_RANGE(1, 9), ai_onnx::opset_1::topk); +} // namespace opset_1 -namespace set_10 { +namespace opset_10 { ov::OutputVector topk(const ov::frontend::onnx::Node& node) { auto data = node.get_ov_inputs().at(0); auto k = get_k(node); @@ -60,9 +60,10 @@ ov::OutputVector topk(const ov::frontend::onnx::Node& node) { return {top_k->output(0), top_k->output(1)}; } -} // namespace set_10 +ONNX_OP("TopK", OPSET_IN(10), ai_onnx::opset_10::topk); +} // namespace opset_10 -namespace set_11 { +namespace opset_11 { ov::OutputVector topk(const ov::frontend::onnx::Node& node) { // Process inputs auto data = node.get_ov_inputs().at(0); @@ -83,8 +84,9 @@ ov::OutputVector topk(const ov::frontend::onnx::Node& node) { return {top_k->output(0), top_k->output(1)}; } -} // namespace set_11 -} // namespace op +ONNX_OP("TopK", OPSET_SINCE(11), ai_onnx::opset_11::topk); +} // namespace opset_11 +} // namespace ai_onnx } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/topk.hpp b/src/frontends/onnx/frontend/src/op/topk.hpp deleted file mode 100644 index f95e4e526c848c..00000000000000 --- a/src/frontends/onnx/frontend/src/op/topk.hpp +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright (C) 2018-2024 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "core/node.hpp" - -namespace ov { -namespace frontend { -namespace onnx { -namespace op { -namespace set_1 { -/// \brief Performs ONNX TopK operation. -/// -/// \param node The ONNX node object representing this operation. -/// \return The vector containing OV nodes producing output of ONNX TopK -/// operation (both values and indices). -ov::OutputVector topk(const ov::frontend::onnx::Node& node); -} // namespace set_1 - -/// \brief Performs TopK operation from ONNX version 1.5 -/// -/// \details ONNX op set 10 added support for K as a dynamic input, not a static -/// attribute. -namespace set_10 { -ov::OutputVector topk(const ov::frontend::onnx::Node& node); -} - -/// \brief Performs TopK operation from ONNX version 1.6 -/// -/// \details ONNX op set 11 added support for `largest` and `sorted` attributes. -namespace set_11 { -ov::OutputVector topk(const ov::frontend::onnx::Node& node); -} - -} // namespace op -} // namespace onnx -} // namespace frontend -} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/transpose.cpp b/src/frontends/onnx/frontend/src/op/transpose.cpp index 6f782caff0d3b3..316c01315b0de5 100644 --- a/src/frontends/onnx/frontend/src/op/transpose.cpp +++ b/src/frontends/onnx/frontend/src/op/transpose.cpp @@ -2,17 +2,15 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "op/transpose.hpp" - +#include "core/operator_set.hpp" #include "utils/reshape.hpp" - using namespace ov::op; namespace ov { namespace frontend { namespace onnx { -namespace op { -namespace set_1 { +namespace ai_onnx { +namespace opset_1 { ov::OutputVector transpose(const ov::frontend::onnx::Node& node) { ov::Output data = node.get_ov_inputs().at(0); @@ -21,8 +19,9 @@ ov::OutputVector transpose(const ov::frontend::onnx::Node& node) { return {(permute_axes.empty()) ? ov::op::util::transpose(data) : ov::op::util::reorder_axes(data, permute_axes)}; } -} // namespace set_1 -} // namespace op +ONNX_OP("Transpose", OPSET_SINCE(1), ai_onnx::opset_1::transpose); +} // namespace opset_1 +} // namespace ai_onnx } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/transpose.hpp b/src/frontends/onnx/frontend/src/op/transpose.hpp deleted file mode 100644 index d33794d6cce805..00000000000000 --- a/src/frontends/onnx/frontend/src/op/transpose.hpp +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright (C) 2018-2024 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "core/node.hpp" - -namespace ov { -namespace frontend { -namespace onnx { -namespace op { -namespace set_1 { -ov::OutputVector transpose(const ov::frontend::onnx::Node& node); - -} // namespace set_1 -} // namespace op -} // namespace onnx -} // namespace frontend -} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/trilu.cpp b/src/frontends/onnx/frontend/src/op/trilu.cpp index 6d24b61645bb52..6cb35c3599d9b4 100644 --- a/src/frontends/onnx/frontend/src/op/trilu.cpp +++ b/src/frontends/onnx/frontend/src/op/trilu.cpp @@ -2,9 +2,8 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "op/trilu.hpp" - #include "core/null_node.hpp" +#include "core/operator_set.hpp" #include "exceptions.hpp" #include "openvino/op/add.hpp" #include "openvino/op/constant.hpp" @@ -18,14 +17,13 @@ #include "openvino/op/shape_of.hpp" #include "openvino/op/squeeze.hpp" #include "openvino/op/unsqueeze.hpp" - using namespace ov::op; namespace ov { namespace frontend { namespace onnx { -namespace op { -namespace set_1 { +namespace ai_onnx { +namespace opset_1 { ov::OutputVector trilu(const ov::frontend::onnx::Node& node) { const auto inputs = node.get_ov_inputs(); @@ -112,8 +110,9 @@ ov::OutputVector trilu(const ov::frontend::onnx::Node& node) { std::make_shared(mask, input, v0::Constant::create(input.get_element_type(), ov::Shape{}, {0}))}; } -} // namespace set_1 -} // namespace op +ONNX_OP("Trilu", OPSET_SINCE(1), ai_onnx::opset_1::trilu); +} // namespace opset_1 +} // namespace ai_onnx } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/trilu.hpp b/src/frontends/onnx/frontend/src/op/trilu.hpp deleted file mode 100644 index abfe0f2522fbea..00000000000000 --- a/src/frontends/onnx/frontend/src/op/trilu.hpp +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright (C) 2022 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// -#pragma once - -#include "core/node.hpp" - -namespace ov { -namespace frontend { -namespace onnx { -namespace op { -namespace set_1 { -ov::OutputVector trilu(const ov::frontend::onnx::Node& node); -} // namespace set_1 -} // namespace op -} // namespace onnx -} // namespace frontend -} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/unique.cpp b/src/frontends/onnx/frontend/src/op/unique.cpp index 035e32940bc102..bc842624474ccd 100644 --- a/src/frontends/onnx/frontend/src/op/unique.cpp +++ b/src/frontends/onnx/frontend/src/op/unique.cpp @@ -2,17 +2,16 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "op/unique.hpp" - #include "openvino/op/unique.hpp" +#include "core/operator_set.hpp" using namespace ov::op; namespace ov { namespace frontend { namespace onnx { -namespace op { -namespace set_1 { +namespace ai_onnx { +namespace opset_1 { ov::OutputVector unique(const ov::frontend::onnx::Node& node) { const auto data = node.get_ov_inputs().at(0); const bool sorted = node.get_attribute_value("sorted", 1); @@ -24,8 +23,9 @@ ov::OutputVector unique(const ov::frontend::onnx::Node& node) { return std::make_shared(data, sorted)->outputs(); } } -} // namespace set_1 -} // namespace op +ONNX_OP("Unique", OPSET_SINCE(1), ai_onnx::opset_1::unique); +} // namespace opset_1 +} // namespace ai_onnx } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/unique.hpp b/src/frontends/onnx/frontend/src/op/unique.hpp deleted file mode 100644 index cc8f181e77e74b..00000000000000 --- a/src/frontends/onnx/frontend/src/op/unique.hpp +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright (C) 2018-2022 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "core/node.hpp" - -namespace ov { -namespace frontend { -namespace onnx { -namespace op { -namespace set_1 { -ov::OutputVector unique(const ov::frontend::onnx::Node& node); - -} // namespace set_1 -} // namespace op -} // namespace onnx -} // namespace frontend -} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/unsqueeze.cpp b/src/frontends/onnx/frontend/src/op/unsqueeze.cpp index d99194837c8619..fcb93a1d5b80c6 100644 --- a/src/frontends/onnx/frontend/src/op/unsqueeze.cpp +++ b/src/frontends/onnx/frontend/src/op/unsqueeze.cpp @@ -2,34 +2,35 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "op/unsqueeze.hpp" - -#include "exceptions.hpp" #include "openvino/op/unsqueeze.hpp" +#include "core/operator_set.hpp" +#include "exceptions.hpp" using namespace ov::op; namespace ov { namespace frontend { namespace onnx { -namespace op { -namespace set_1 { +namespace ai_onnx { +namespace opset_1 { ov::OutputVector unsqueeze(const ov::frontend::onnx::Node& node) { auto data = node.get_ov_inputs().at(0); auto axes_node = node.get_attribute_as_constant>("axes", {}); return {std::make_shared(data, axes_node)}; } -} // namespace set_1 +ONNX_OP("Unsqueeze", OPSET_RANGE(1, 12), ai_onnx::opset_1::unsqueeze); +} // namespace opset_1 -namespace set_13 { +namespace opset_13 { ov::OutputVector unsqueeze(const ov::frontend::onnx::Node& node) { auto inputs = node.get_ov_inputs(); return {std::make_shared(inputs.at(0), inputs.at(1))}; } -} // namespace set_13 -} // namespace op +ONNX_OP("Unsqueeze", OPSET_SINCE(13), ai_onnx::opset_13::unsqueeze); +} // namespace opset_13 +} // namespace ai_onnx } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/unsqueeze.hpp b/src/frontends/onnx/frontend/src/op/unsqueeze.hpp deleted file mode 100644 index 6c8661eab269ce..00000000000000 --- a/src/frontends/onnx/frontend/src/op/unsqueeze.hpp +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright (C) 2018-2024 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "core/node.hpp" - -namespace ov { -namespace frontend { -namespace onnx { -namespace op { -namespace set_1 { -ov::OutputVector unsqueeze(const ov::frontend::onnx::Node& node); - -} // namespace set_1 - -namespace set_13 { -ov::OutputVector unsqueeze(const ov::frontend::onnx::Node& node); - -} // namespace set_13 -} // namespace op -} // namespace onnx -} // namespace frontend -} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/upsample.cpp b/src/frontends/onnx/frontend/src/op/upsample.cpp index 767d0872cfa859..d689f26b5ca17d 100644 --- a/src/frontends/onnx/frontend/src/op/upsample.cpp +++ b/src/frontends/onnx/frontend/src/op/upsample.cpp @@ -2,18 +2,16 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "op/upsample.hpp" - +#include "core/operator_set.hpp" #include "exceptions.hpp" #include "openvino/op/constant.hpp" #include "openvino/op/interpolate.hpp" - using namespace ov::op; namespace ov { namespace frontend { namespace onnx { -namespace op { +namespace ai_onnx { namespace { constexpr unsigned version_1{1}; constexpr unsigned version_7{7}; @@ -54,7 +52,7 @@ v11::Interpolate::InterpolateAttrs get_attributes(const std::string& mode) { } } // namespace -namespace set_1 { +namespace opset_1 { ov::OutputVector upsample(const ov::frontend::onnx::Node& node) { const auto height_scale = node.get_attribute_value("height_scale"); const auto width_scale = node.get_attribute_value("width_scale"); @@ -78,9 +76,10 @@ ov::OutputVector upsample(const ov::frontend::onnx::Node& node) { return std::make_shared(data, scales_const, get_attributes(mode))->outputs(); } -} // namespace set_1 +ONNX_OP("Upsample", OPSET_RANGE(1, 6), ai_onnx::opset_1::upsample); +} // namespace opset_1 -namespace set_7 { +namespace opset_7 { ov::OutputVector upsample(const ov::frontend::onnx::Node& node) { const auto scales = node.get_attribute_value>("scales"); const auto mode = node.get_attribute_value("mode", "nearest"); @@ -99,9 +98,10 @@ ov::OutputVector upsample(const ov::frontend::onnx::Node& node) { return std::make_shared(data, scales_const, get_attributes(mode))->outputs(); } -} // namespace set_7 +ONNX_OP("Upsample", OPSET_RANGE(7, 8), ai_onnx::opset_7::upsample); +} // namespace opset_7 -namespace set_9 { +namespace opset_9 { ov::OutputVector upsample(const ov::frontend::onnx::Node& node) { const auto mode = node.get_attribute_value("mode", "nearest"); check_mode_support(node, mode, version_9); @@ -110,8 +110,9 @@ ov::OutputVector upsample(const ov::frontend::onnx::Node& node) { return std::make_shared(inputs.at(0), inputs.at(1), get_attributes(mode))->outputs(); } -} // namespace set_9 -} // namespace op +ONNX_OP("Upsample", OPSET_SINCE(9), ai_onnx::opset_9::upsample); +} // namespace opset_9 +} // namespace ai_onnx } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/upsample.hpp b/src/frontends/onnx/frontend/src/op/upsample.hpp deleted file mode 100644 index 80c34a0ad21764..00000000000000 --- a/src/frontends/onnx/frontend/src/op/upsample.hpp +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright (C) 2018-2024 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "core/node.hpp" - -namespace ov { -namespace frontend { -namespace onnx { -namespace op { -namespace set_1 { -ov::OutputVector upsample(const ov::frontend::onnx::Node& node); - -} // namespace set_1 - -namespace set_7 { -ov::OutputVector upsample(const ov::frontend::onnx::Node& node); - -} // namespace set_7 - -namespace set_9 { -ov::OutputVector upsample(const ov::frontend::onnx::Node& node); - -} // namespace set_9 -} // namespace op -} // namespace onnx -} // namespace frontend -} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/where.hpp b/src/frontends/onnx/frontend/src/op/where.cpp similarity index 62% rename from src/frontends/onnx/frontend/src/op/where.hpp rename to src/frontends/onnx/frontend/src/op/where.cpp index 0f5fa68f0427e4..059eee5f0e1da5 100644 --- a/src/frontends/onnx/frontend/src/op/where.hpp +++ b/src/frontends/onnx/frontend/src/op/where.cpp @@ -2,23 +2,22 @@ // SPDX-License-Identifier: Apache-2.0 // -#pragma once - -#include "core/node.hpp" +#include "core/operator_set.hpp" #include "openvino/op/select.hpp" namespace ov { namespace frontend { namespace onnx { -namespace op { -namespace set_1 { -inline ov::OutputVector where(const ov::frontend::onnx::Node& node) { +namespace ai_onnx { +namespace opset_1 { +ov::OutputVector where(const ov::frontend::onnx::Node& node) { ov::OutputVector ov_inputs{node.get_ov_inputs()}; return {std::make_shared(ov_inputs.at(0), ov_inputs.at(1), ov_inputs.at(2))}; } -} // namespace set_1 -} // namespace op +ONNX_OP("Where", OPSET_SINCE(1), ai_onnx::opset_1::where); +} // namespace opset_1 +} // namespace ai_onnx } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/xor.hpp b/src/frontends/onnx/frontend/src/op/xor.cpp similarity index 67% rename from src/frontends/onnx/frontend/src/op/xor.hpp rename to src/frontends/onnx/frontend/src/op/xor.cpp index daa34fca9a7694..291ea6af3987ca 100644 --- a/src/frontends/onnx/frontend/src/op/xor.hpp +++ b/src/frontends/onnx/frontend/src/op/xor.cpp @@ -2,24 +2,23 @@ // SPDX-License-Identifier: Apache-2.0 // -#pragma once - -#include "core/node.hpp" +#include "core/operator_set.hpp" #include "openvino/op/logical_xor.hpp" namespace ov { namespace frontend { namespace onnx { -namespace op { -namespace set_1 { -inline ov::OutputVector logical_xor(const ov::frontend::onnx::Node& node) { +namespace ai_onnx { +namespace opset_1 { +ov::OutputVector logical_xor(const ov::frontend::onnx::Node& node) { return {std::make_shared(node.get_ov_inputs().at(0), node.get_ov_inputs().at(1), ov::op::AutoBroadcastSpec(ov::op::AutoBroadcastType::NUMPY))}; } -} // namespace set_1 -} // namespace op +ONNX_OP("Xor", OPSET_SINCE(1), ai_onnx::opset_1::logical_xor); +} // namespace opset_1 +} // namespace ai_onnx } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/ops_bridge.cpp b/src/frontends/onnx/frontend/src/ops_bridge.cpp index aa97235bd1aa79..f72653a02b2cb1 100644 --- a/src/frontends/onnx/frontend/src/ops_bridge.cpp +++ b/src/frontends/onnx/frontend/src/ops_bridge.cpp @@ -11,190 +11,10 @@ #include #include "core/attribute.hpp" -#include "op/abs.hpp" -#include "op/acos.hpp" -#include "op/acosh.hpp" -#include "op/adaptive_avg_pooling2d.hpp" -#include "op/add.hpp" -#include "op/affine.hpp" -#include "op/and.hpp" -#include "op/argmax.hpp" -#include "op/argmin.hpp" -#include "op/asin.hpp" -#include "op/asinh.hpp" -#include "op/atan.hpp" -#include "op/atanh.hpp" -#include "op/aten.hpp" -#include "op/average_pool.hpp" -#include "op/batch_norm.hpp" -#include "op/bitshift.hpp" -#include "op/bitwise_and.hpp" -#include "op/bitwise_not.hpp" -#include "op/bitwise_or.hpp" -#include "op/bitwise_xor.hpp" -#include "op/blackmanwindow.hpp" -#include "op/cast.hpp" -#include "op/cast_like.hpp" -#include "op/ceil.hpp" -#include "op/celu.hpp" -#include "op/clip.hpp" -#include "op/com.microsoft/attention.hpp" -#include "op/com.microsoft/bias_gelu.hpp" -#include "op/com.microsoft/embed_layer_normalization.hpp" -#include "op/com.microsoft/fused_conv.hpp" -#include "op/com.microsoft/fusedgemm.hpp" -#include "op/com.microsoft/pad.hpp" -#include "op/com.microsoft/skip_layer_normalization.hpp" -#include "op/compress.hpp" -#include "op/concat.hpp" -#include "op/constant.hpp" -#include "op/constant_fill.hpp" -#include "op/constant_of_shape.hpp" -#include "op/conv.hpp" -#include "op/conv_integer.hpp" -#include "op/conv_transpose.hpp" -#include "op/cos.hpp" -#include "op/cosh.hpp" -#include "op/crop.hpp" -#include "op/cum_sum.hpp" -#include "op/depth_to_space.hpp" -#include "op/dequantize_linear.hpp" -#include "op/dft.hpp" -#include "op/div.hpp" -#include "op/dropout.hpp" -#include "op/dynamic_quantize_linear.hpp" -#include "op/einsum.hpp" -#include "op/elu.hpp" -#include "op/equal.hpp" -#include "op/erf.hpp" -#include "op/exp.hpp" -#include "op/expand.hpp" -#include "op/eye_like.hpp" -#include "op/flatten.hpp" -#include "op/floor.hpp" -#include "op/gather.hpp" -#include "op/gather_elements.hpp" -#include "op/gather_nd.hpp" -#include "op/gelu.hpp" -#include "op/gemm.hpp" -#include "op/global_average_pool.hpp" -#include "op/global_max_pool.hpp" -#include "op/greater.hpp" -#include "op/greater_or_equal.hpp" -#include "op/grid_sample.hpp" -#include "op/group_normalization.hpp" -#include "op/gru.hpp" -#include "op/hammingwindow.hpp" -#include "op/hannwindow.hpp" -#include "op/hard_sigmoid.hpp" -#include "op/hard_swish.hpp" -#include "op/hardmax.hpp" -#include "op/identity.hpp" -#include "op/if.hpp" -#include "op/image_scaler.hpp" -#include "op/instance_norm.hpp" -#include "op/is_finite.hpp" -#include "op/is_inf.hpp" -#include "op/is_nan.hpp" -#include "op/layer_normalization.hpp" -#include "op/leaky_relu.hpp" -#include "op/less.hpp" -#include "op/less_or_equal.hpp" -#include "op/log.hpp" -#include "op/log_softmax.hpp" -#include "op/loop.hpp" -#include "op/lp_norm.hpp" -#include "op/lp_pool.hpp" -#include "op/lrn.hpp" -#include "op/lstm.hpp" -#include "op/matmul.hpp" -#include "op/matmul_integer.hpp" -#include "op/max.hpp" -#include "op/max_pool.hpp" -#include "op/max_roi_pool.hpp" -#include "op/mean.hpp" -#include "op/mean_variance_normalization.hpp" -#include "op/min.hpp" -#include "op/mish.hpp" -#include "op/mmdeploy_roi_align_rotated.hpp" -#include "op/mod.hpp" -#include "op/mul.hpp" -#include "op/multinomial.hpp" -#include "op/neg.hpp" -#include "op/nms_rotated.hpp" -#include "op/non_max_suppression.hpp" -#include "op/non_zero.hpp" -#include "op/not.hpp" -#include "op/onehot.hpp" -#include "op/or.hpp" -#include "op/org.openvinotoolkit/deformable_conv_2d.hpp" -#include "op/org.openvinotoolkit/detection_output.hpp" -#include "op/org.openvinotoolkit/experimental_detectron/detection_output.hpp" -#include "op/org.openvinotoolkit/experimental_detectron/generate_proposals_single_image.hpp" -#include "op/org.openvinotoolkit/experimental_detectron/prior_grid_generator.hpp" -#include "op/org.openvinotoolkit/experimental_detectron/roi_feature_extractor.hpp" -#include "op/org.openvinotoolkit/experimental_detectron/topk_rios.hpp" -#include "op/org.openvinotoolkit/fake_quantize.hpp" -#include "op/org.openvinotoolkit/generate_proposals.hpp" -#include "op/org.openvinotoolkit/group_norm.hpp" -#include "op/org.openvinotoolkit/normalize.hpp" -#include "op/org.openvinotoolkit/prior_box.hpp" -#include "op/org.openvinotoolkit/swish.hpp" -#include "op/pad.hpp" -#include "op/pow.hpp" -#include "op/prelu.hpp" -#include "op/qlinear_conv.hpp" -#include "op/qlinear_matmul.hpp" -#include "op/quantize_linear.hpp" -#include "op/random_normal.hpp" -#include "op/random_normal_like.hpp" -#include "op/random_uniform.hpp" -#include "op/random_uniform_like.hpp" -#include "op/range.hpp" -#include "op/reciprocal.hpp" -#include "op/reduce.hpp" -#include "op/relu.hpp" -#include "op/reshape.hpp" -#include "op/resize.hpp" -#include "op/reverse_sequence.hpp" -#include "op/rnn.hpp" -#include "op/roi_align.hpp" -#include "op/round.hpp" -#include "op/scan.hpp" -#include "op/scatter_elements.hpp" -#include "op/scatter_nd.hpp" -#include "op/selu.hpp" -#include "op/shape.hpp" -#include "op/shrink.hpp" -#include "op/sigmoid.hpp" -#include "op/sign.hpp" -#include "op/sin.hpp" -#include "op/sinh.hpp" -#include "op/size.hpp" -#include "op/slice.hpp" -#include "op/softmax.hpp" -#include "op/softplus.hpp" -#include "op/softsign.hpp" -#include "op/space_to_depth.hpp" -#include "op/split.hpp" -#include "op/sqrt.hpp" -#include "op/squeeze.hpp" -#include "op/stft.hpp" -#include "op/sub.hpp" -#include "op/sum.hpp" -#include "op/tan.hpp" -#include "op/tanh.hpp" -#include "op/thresholded_relu.hpp" -#include "op/tile.hpp" -#include "op/topk.hpp" -#include "op/transpose.hpp" -#include "op/trilu.hpp" -#include "op/unique.hpp" -#include "op/unsqueeze.hpp" -#include "op/upsample.hpp" -#include "op/where.hpp" -#include "op/xor.hpp" #include "openvino/util/log.hpp" +#if !defined(ONNX_BUILD_SHARED) +# include "static_reg.hpp" +#endif using namespace ov::frontend::onnx; @@ -202,8 +22,6 @@ namespace ov { namespace frontend { namespace onnx { -const char* OPENVINO_ONNX_DOMAIN = "org.openvinotoolkit"; - namespace { template > typename Container::const_iterator find(int64_t version, const Container& map) { @@ -221,6 +39,45 @@ typename Container::const_iterator find(int64_t version, const Container& map) { } } // namespace +// Known domains (see operator_set.hpp for a declaration) +const char* OPENVINO_ONNX_DOMAIN = "org.openvinotoolkit"; +const char* MICROSOFT_DOMAIN = "com.microsoft"; +const char* PYTORCH_ATEN_DOMAIN = "org.pytorch.aten"; +const char* MMDEPLOY_DOMAIN = "mmdeploy"; + +// Central storage of supported translators for operations +typedef std::unordered_map SupportedOps; +SupportedOps* get_supported_ops(void) { + static SupportedOps supported_ops; + return &supported_ops; +} + +bool register_translator_exact(const std::string& name, + const int64_t exact_version, + const Operator fn, + const std::string& domain) { + auto& supported_ops = *get_supported_ops(); + auto it = supported_ops[domain][name].find(exact_version); + if (it == supported_ops[domain][name].end()) { + supported_ops[domain][name].emplace(exact_version, std::bind(fn, std::placeholders::_1)); + return true; + } else { + // Left this option to be able create some custom operators which overwrites existing + it->second = std::move(fn); + } + return false; +} + +bool register_translator(const std::string name, + const VersionRange range, + const Operator fn, + const std::string domain) { + for (int version = range.m_since; version <= range.m_until; ++version) { + register_translator_exact(name, version, fn, domain); + } + return true; +} + void OperatorsBridge::register_operator_in_custom_domain(std::string name, VersionRange range, Operator fn, @@ -328,10 +185,6 @@ void OperatorsBridge::overwrite_operator(const std::string& name, const std::str register_operator(name, 1, domain, std::move(fn)); } -static const char* const MICROSOFT_DOMAIN = "com.microsoft"; -static const char* const PYTORCH_ATEN_DOMAIN = "org.pytorch.aten"; -static const char* const MMDEPLOY_DOMAIN = "mmdeploy"; - #define REGISTER_OPERATOR(name_, ver_, fn_) \ m_map[""][name_].emplace(ver_, std::bind(op::set_##ver_::fn_, std::placeholders::_1)); @@ -339,300 +192,15 @@ static const char* const MMDEPLOY_DOMAIN = "mmdeploy"; m_map[domain_][name_].emplace(ver_, std::bind(op::set_##ver_::fn_, std::placeholders::_1)); OperatorsBridge::OperatorsBridge() { - register_operator("Abs", VersionRange{1, 5}, op::set_1::abs, "Legacy consumed_inputs is not supported"); - register_operator("Abs", VersionRange::since(6), op::set_6::abs); - register_operator("Acos", VersionRange::single_version_for_all_opsets(), op::set_7::acos); - register_operator("Acosh", VersionRange::single_version_for_all_opsets(), op::set_9::acosh); - register_operator("Add", VersionRange{1, 5}, op::set_1::add, "Legacy consumed_inputs is not supported"); - register_operator("Add", VersionRange::in(6), op::set_6::add); - register_operator("Add", VersionRange{7, 12}, op::set_7::add); - register_operator("Add", VersionRange::in(13), op::set_13::add); - register_operator("Add", VersionRange::since(14), op::set_14::add); - register_operator("And", VersionRange{1, 6}, op::set_1::logical_and); - register_operator("And", VersionRange::since(6), op::set_7::logical_and); - // 101468 - Use the VersionRange-based approach for all operators - REGISTER_OPERATOR("ArgMin", 1, argmin); - REGISTER_OPERATOR("ArgMin", 12, argmin); - REGISTER_OPERATOR("ArgMax", 1, argmax); - REGISTER_OPERATOR("ArgMax", 12, argmax); - REGISTER_OPERATOR("Asin", 1, asin); - REGISTER_OPERATOR("Asinh", 1, asinh); - REGISTER_OPERATOR("Atan", 1, atan); - REGISTER_OPERATOR("ATen", 1, aten); - REGISTER_OPERATOR("Atanh", 1, atanh); - REGISTER_OPERATOR("AveragePool", 1, average_pool); - REGISTER_OPERATOR("BatchNormalization", 1, batch_norm); - REGISTER_OPERATOR("BatchNormalization", 7, batch_norm); - REGISTER_OPERATOR("BatchNormalization", 14, batch_norm); - REGISTER_OPERATOR("BitShift", 1, bitshift); - REGISTER_OPERATOR("BitwiseAnd", 1, bitwise_and); - REGISTER_OPERATOR("BitwiseNot", 1, bitwise_not); - REGISTER_OPERATOR("BitwiseOr", 1, bitwise_or); - REGISTER_OPERATOR("BitwiseXor", 1, bitwise_xor); - REGISTER_OPERATOR("BlackmanWindow", 1, blackmanwindow); - REGISTER_OPERATOR("Cast", 1, cast); - REGISTER_OPERATOR("CastLike", 1, cast_like); - REGISTER_OPERATOR("Ceil", 1, ceil); - REGISTER_OPERATOR("Celu", 1, celu); - REGISTER_OPERATOR("Clip", 1, clip); - REGISTER_OPERATOR("Clip", 11, clip); - REGISTER_OPERATOR("Concat", 1, concat); - REGISTER_OPERATOR("Constant", 1, constant); - REGISTER_OPERATOR("Constant", 13, constant); - REGISTER_OPERATOR("ConstantOfShape", 1, constant_of_shape); - REGISTER_OPERATOR("Conv", 1, conv); - REGISTER_OPERATOR("ConvInteger", 1, conv_integer); - REGISTER_OPERATOR("ConvTranspose", 1, conv_transpose); - REGISTER_OPERATOR("Compress", 1, compress); - REGISTER_OPERATOR("Cos", 1, cos); - REGISTER_OPERATOR("Cosh", 1, cosh); - REGISTER_OPERATOR("ConstantFill", 1, constant_fill); - REGISTER_OPERATOR("CumSum", 1, cum_sum); - REGISTER_OPERATOR("DepthToSpace", 1, depth_to_space); - REGISTER_OPERATOR("DequantizeLinear", 1, dequantize_linear); - REGISTER_OPERATOR("DequantizeLinear", 13, dequantize_linear); - REGISTER_OPERATOR("Div", 1, div); - REGISTER_OPERATOR("Div", 7, div); - REGISTER_OPERATOR("DFT", 1, dft); - REGISTER_OPERATOR("Dropout", 1, dropout); - REGISTER_OPERATOR("Dropout", 7, dropout); - REGISTER_OPERATOR("Dropout", 12, dropout); - REGISTER_OPERATOR("DynamicQuantizeLinear", 1, dynamic_quantize_linear); - REGISTER_OPERATOR("Einsum", 1, einsum); - REGISTER_OPERATOR("Elu", 1, elu); - REGISTER_OPERATOR("Equal", 1, equal); - REGISTER_OPERATOR("Erf", 1, erf); - REGISTER_OPERATOR("Exp", 1, exp); - REGISTER_OPERATOR("Expand", 1, expand); - REGISTER_OPERATOR("EyeLike", 1, eye_like); - REGISTER_OPERATOR("Flatten", 1, flatten); - REGISTER_OPERATOR("Floor", 1, floor); - REGISTER_OPERATOR("Gather", 1, gather); - REGISTER_OPERATOR("GatherElements", 1, gather_elements); - REGISTER_OPERATOR("GatherND", 1, gather_nd); - REGISTER_OPERATOR("Gelu", 1, gelu); - REGISTER_OPERATOR("Gemm", 1, gemm); - REGISTER_OPERATOR("Gemm", 6, gemm); - REGISTER_OPERATOR("GlobalAveragePool", 1, global_average_pool); - REGISTER_OPERATOR("GlobalLpPool", 1, global_lp_pool); - REGISTER_OPERATOR("GlobalMaxPool", 1, global_max_pool); - REGISTER_OPERATOR("Greater", 1, greater); - REGISTER_OPERATOR("GreaterOrEqual", 1, greater_or_equal); - REGISTER_OPERATOR("GreaterOrEqual", 16, greater_or_equal); - REGISTER_OPERATOR("GridSample", 1, grid_sample); - REGISTER_OPERATOR("GroupNormalization", 1, group_normalization); - REGISTER_OPERATOR("GRU", 1, gru); - REGISTER_OPERATOR("HannWindow", 1, hannwindow); - REGISTER_OPERATOR("HammingWindow", 1, hammingwindow); - REGISTER_OPERATOR("Hardmax", 1, hardmax); - REGISTER_OPERATOR("Hardmax", 13, hardmax); - REGISTER_OPERATOR("HardSigmoid", 1, hard_sigmoid); - REGISTER_OPERATOR("HardSwish", 1, hard_swish); - REGISTER_OPERATOR("Identity", 1, identity); - REGISTER_OPERATOR("If", 1, if_op); - REGISTER_OPERATOR("ImageScaler", 1, image_scaler); - REGISTER_OPERATOR("InstanceNormalization", 1, instance_norm); - REGISTER_OPERATOR("IsFinite", 1, is_finite); - REGISTER_OPERATOR("IsInf", 1, is_inf); - REGISTER_OPERATOR("IsNaN", 1, is_nan) - REGISTER_OPERATOR("LayerNormalization", 1, layer_normalization); - REGISTER_OPERATOR("LeakyRelu", 1, leaky_relu); - REGISTER_OPERATOR("Less", 1, less); - REGISTER_OPERATOR("LessOrEqual", 1, less_or_equal); - REGISTER_OPERATOR("LessOrEqual", 16, less_or_equal); - REGISTER_OPERATOR("Log", 1, log); - REGISTER_OPERATOR("LogSoftmax", 1, log_softmax); - REGISTER_OPERATOR("LogSoftmax", 13, log_softmax); - REGISTER_OPERATOR("Loop", 1, loop); - REGISTER_OPERATOR("LpNormalization", 1, lp_norm); - REGISTER_OPERATOR("LRN", 1, lrn); - REGISTER_OPERATOR("LSTM", 1, lstm); - REGISTER_OPERATOR("MatMulInteger", 1, matmul_integer); - REGISTER_OPERATOR("MatMul", 1, matmul); - REGISTER_OPERATOR("MaxPool", 1, max_pool); - REGISTER_OPERATOR("MaxPool", 8, max_pool); - REGISTER_OPERATOR("MaxRoiPool", 1, max_roi_pool); - REGISTER_OPERATOR("Max", 1, max); - REGISTER_OPERATOR("Max", 8, max); - REGISTER_OPERATOR("Mean", 1, mean); - REGISTER_OPERATOR("MeanVarianceNormalization", 1, mean_variance_normalization); - REGISTER_OPERATOR("MeanVarianceNormalization", 9, mean_variance_normalization); - REGISTER_OPERATOR("Min", 1, min); - REGISTER_OPERATOR("Min", 8, min); - REGISTER_OPERATOR("Mish", 1, mish); - REGISTER_OPERATOR("Mod", 1, mod); - REGISTER_OPERATOR("Mul", 1, mul); - REGISTER_OPERATOR("Mul", 7, mul); - REGISTER_OPERATOR("Multinomial", 1, multinomial) - REGISTER_OPERATOR("Neg", 1, neg); - REGISTER_OPERATOR("NonMaxSuppression", 1, non_max_suppression); - REGISTER_OPERATOR("NonZero", 1, non_zero); - REGISTER_OPERATOR("Not", 1, logical_not); - REGISTER_OPERATOR("Or", 1, logical_or); - REGISTER_OPERATOR("OneHot", 1, onehot); - REGISTER_OPERATOR("Pad", 1, pad); - REGISTER_OPERATOR("Pad", 11, pad); - REGISTER_OPERATOR("Pow", 1, pow); - REGISTER_OPERATOR("PRelu", 1, prelu); - REGISTER_OPERATOR("QLinearConv", 1, qlinear_conv); - REGISTER_OPERATOR("QLinearMatMul", 1, qlinear_matmul); - REGISTER_OPERATOR("QuantizeLinear", 1, quantize_linear); - REGISTER_OPERATOR("QuantizeLinear", 13, quantize_linear); - REGISTER_OPERATOR("Range", 1, range); - REGISTER_OPERATOR("RandomNormal", 1, random_normal); - REGISTER_OPERATOR("RandomNormalLike", 1, random_normal_like); - REGISTER_OPERATOR("RandomUniform", 1, random_uniform); - REGISTER_OPERATOR("RandomUniformLike", 1, random_uniform_like); - REGISTER_OPERATOR("Reciprocal", 1, reciprocal); - REGISTER_OPERATOR("ReduceLogSum", 1, reduce_log_sum); - register_operator("ReduceLogSum", VersionRange{1, 17}, op::set_1::reduce_log_sum); - register_operator("ReduceLogSum", VersionRange::since(18), op::set_18::reduce_log_sum); - REGISTER_OPERATOR("ReduceLogSumExp", 1, reduce_log_sum_exp); - REGISTER_OPERATOR("ReduceL1", 1, reduce_l1); - REGISTER_OPERATOR("ReduceL2", 1, reduce_l2); - REGISTER_OPERATOR("ReduceL2", 13, reduce_l2); - REGISTER_OPERATOR("ReduceL2", 18, reduce_l2); - REGISTER_OPERATOR("ReduceMax", 1, reduce_max); - REGISTER_OPERATOR("ReduceMax", 13, reduce_max); - REGISTER_OPERATOR("ReduceMax", 18, reduce_max); - REGISTER_OPERATOR("ReduceMax", 20, reduce_max); - REGISTER_OPERATOR("ReduceMean", 1, reduce_mean); - REGISTER_OPERATOR("ReduceMean", 13, reduce_mean); - REGISTER_OPERATOR("ReduceMean", 18, reduce_mean); - REGISTER_OPERATOR("ReduceMin", 1, reduce_min); - REGISTER_OPERATOR("ReduceMin", 13, reduce_min); - REGISTER_OPERATOR("ReduceMin", 18, reduce_min); - REGISTER_OPERATOR("ReduceMin", 20, reduce_min); - REGISTER_OPERATOR("ReduceProd", 1, reduce_prod); - REGISTER_OPERATOR("ReduceSum", 1, reduce_sum); - REGISTER_OPERATOR("ReduceSum", 13, reduce_sum); - REGISTER_OPERATOR("ReduceSumSquare", 1, reduce_sum_square); - REGISTER_OPERATOR("ReduceSumSquare", 13, reduce_sum_square); - REGISTER_OPERATOR("ReduceSumSquare", 18, reduce_sum_square); - REGISTER_OPERATOR("Relu", 1, relu); - REGISTER_OPERATOR("Reshape", 1, reshape); - REGISTER_OPERATOR("Resize", 1, resize); - REGISTER_OPERATOR("Resize", 11, resize); - REGISTER_OPERATOR("ReverseSequence", 1, reverse_sequence); - REGISTER_OPERATOR("RNN", 1, rnn); - REGISTER_OPERATOR("RoiAlign", 1, roi_align); - REGISTER_OPERATOR("RoiAlign", 16, roi_align); - REGISTER_OPERATOR("Round", 1, round); - REGISTER_OPERATOR("Scan", 1, scan); - REGISTER_OPERATOR("Scan", 9, scan); - REGISTER_OPERATOR("ScatterElements", 1, scatter_elements); - REGISTER_OPERATOR("ScatterND", 1, scatter_nd); - REGISTER_OPERATOR("Selu", 1, selu); - REGISTER_OPERATOR("Shape", 1, shape); - REGISTER_OPERATOR("Shape", 15, shape) - REGISTER_OPERATOR("Shrink", 1, shrink); - REGISTER_OPERATOR("Sigmoid", 1, sigmoid); - REGISTER_OPERATOR("Sign", 1, sign); - REGISTER_OPERATOR("Sin", 1, sin); - REGISTER_OPERATOR("Sinh", 1, sinh); - REGISTER_OPERATOR("Size", 1, size); - REGISTER_OPERATOR("Slice", 1, slice); - REGISTER_OPERATOR("Slice", 10, slice); - REGISTER_OPERATOR("Softmax", 1, softmax); - REGISTER_OPERATOR("Softmax", 11, softmax); - REGISTER_OPERATOR("Softmax", 13, softmax); - REGISTER_OPERATOR("Softplus", 1, softplus); - REGISTER_OPERATOR("Softsign", 1, softsign); - REGISTER_OPERATOR("SpaceToDepth", 1, space_to_depth); - REGISTER_OPERATOR("Split", 1, split); - REGISTER_OPERATOR("Split", 13, split); - register_operator("STFT", - VersionRange::single_version_for_all_opsets(), - op::set_17::stft, - "frame_step and frame_length inputs must be constants; signal shape must be static;"); - REGISTER_OPERATOR("Sqrt", 1, sqrt); - REGISTER_OPERATOR("Squeeze", 1, squeeze); - REGISTER_OPERATOR("Squeeze", 13, squeeze); - REGISTER_OPERATOR("Sub", 1, sub); - REGISTER_OPERATOR("Sub", 7, sub); - REGISTER_OPERATOR("Sum", 1, sum); - REGISTER_OPERATOR("Sum", 8, sum); - REGISTER_OPERATOR("Tan", 1, tan); - REGISTER_OPERATOR("Tanh", 1, tanh); - REGISTER_OPERATOR("ThresholdedRelu", 1, thresholded_relu); - REGISTER_OPERATOR("Tile", 1, tile); - REGISTER_OPERATOR("TopK", 1, topk); - REGISTER_OPERATOR("TopK", 10, topk); - REGISTER_OPERATOR("TopK", 11, topk); - REGISTER_OPERATOR("Transpose", 1, transpose); - REGISTER_OPERATOR("Trilu", 1, trilu); - REGISTER_OPERATOR("Unique", 1, unique); - REGISTER_OPERATOR("Unsqueeze", 1, unsqueeze); - REGISTER_OPERATOR("Unsqueeze", 13, unsqueeze); - REGISTER_OPERATOR("Where", 1, where); - REGISTER_OPERATOR("Xor", 1, logical_xor); - - // deprecated ops - REGISTER_OPERATOR("Affine", 1, affine); - REGISTER_OPERATOR("Crop", 1, crop); - REGISTER_OPERATOR("Scatter", 1, scatter_elements); - REGISTER_OPERATOR("Upsample", 1, upsample); - REGISTER_OPERATOR("Upsample", 7, upsample); - REGISTER_OPERATOR("Upsample", 9, upsample); - + // Deep copy of default map to local + for (auto& domain : *get_supported_ops()) { + for (auto& operation : domain.second) { + for (auto& version : operation.second) { + m_map[domain.first][operation.first].emplace(version.first, version.second); + } + } + } // custom ops - REGISTER_OPERATOR_WITH_DOMAIN(OPENVINO_ONNX_DOMAIN, "DeformableConv2D", 1, deformable_conv_2d); - REGISTER_OPERATOR_WITH_DOMAIN(OPENVINO_ONNX_DOMAIN, "DetectionOutput", 1, detection_output); - REGISTER_OPERATOR_WITH_DOMAIN(OPENVINO_ONNX_DOMAIN, - "ExperimentalDetectronDetectionOutput", - 1, - experimental_detectron_detection_output); - REGISTER_OPERATOR_WITH_DOMAIN(OPENVINO_ONNX_DOMAIN, - "ExperimentalDetectronGenerateProposalsSingleImage", - 1, - experimental_detectron_generate_proposals); - REGISTER_OPERATOR_WITH_DOMAIN(OPENVINO_ONNX_DOMAIN, "ExperimentalDetectronGroupNorm", 1, group_norm); - REGISTER_OPERATOR_WITH_DOMAIN(OPENVINO_ONNX_DOMAIN, - "ExperimentalDetectronPriorGridGenerator", - 1, - experimental_detectron_prior_grid_generator); - REGISTER_OPERATOR_WITH_DOMAIN(OPENVINO_ONNX_DOMAIN, - "ExperimentalDetectronROIFeatureExtractor", - 1, - experimental_detectron_roi_feature_extractor); - REGISTER_OPERATOR_WITH_DOMAIN(OPENVINO_ONNX_DOMAIN, - "ExperimentalDetectronTopKROIs", - 1, - experimental_detectron_topk_rois); - REGISTER_OPERATOR_WITH_DOMAIN(OPENVINO_ONNX_DOMAIN, "FakeQuantize", 1, fake_quantize); - REGISTER_OPERATOR_WITH_DOMAIN(OPENVINO_ONNX_DOMAIN, "GenerateProposals", 1, generate_proposals); - REGISTER_OPERATOR_WITH_DOMAIN(OPENVINO_ONNX_DOMAIN, "GroupNorm", 1, group_norm); - REGISTER_OPERATOR_WITH_DOMAIN(OPENVINO_ONNX_DOMAIN, "Normalize", 1, normalize); - REGISTER_OPERATOR_WITH_DOMAIN(OPENVINO_ONNX_DOMAIN, "PriorBox", 1, prior_box); - REGISTER_OPERATOR_WITH_DOMAIN(OPENVINO_ONNX_DOMAIN, "PriorBoxClustered", 1, prior_box_clustered); - REGISTER_OPERATOR_WITH_DOMAIN(OPENVINO_ONNX_DOMAIN, "Swish", 1, swish); - - REGISTER_OPERATOR_WITH_DOMAIN(MICROSOFT_DOMAIN, "Attention", 1, attention); - REGISTER_OPERATOR_WITH_DOMAIN(MICROSOFT_DOMAIN, "BiasGelu", 1, bias_gelu); - REGISTER_OPERATOR_WITH_DOMAIN(MICROSOFT_DOMAIN, "EmbedLayerNormalization", 1, embed_layer_normalization); - REGISTER_OPERATOR_WITH_DOMAIN(MICROSOFT_DOMAIN, "FusedConv", 1, fused_conv); - REGISTER_OPERATOR_WITH_DOMAIN(MICROSOFT_DOMAIN, "FusedGemm", 1, fusedgemm); - REGISTER_OPERATOR_WITH_DOMAIN(MICROSOFT_DOMAIN, "GatherND", 1, gather_nd); - REGISTER_OPERATOR_WITH_DOMAIN(MICROSOFT_DOMAIN, "SkipLayerNormalization", 1, skip_layer_normalization); - REGISTER_OPERATOR_WITH_DOMAIN(MICROSOFT_DOMAIN, "Trilu", 1, trilu); - - register_operator_in_custom_domain("DequantizeLinear", - VersionRange::since(1), - op::set_13::dequantize_linear, - "com.microsoft"); - register_operator_in_custom_domain("Gelu", VersionRange::since(1), op::set_1::gelu, "com.microsoft"); - register_operator_in_custom_domain("Pad", - VersionRange::single_version_for_all_opsets(), - op::custom::set_1::pad, - "com.microsoft"); - register_operator_in_custom_domain("QuantizeLinear", - VersionRange::since(1), - op::set_13::quantize_linear, - "com.microsoft"); - - REGISTER_OPERATOR_WITH_DOMAIN(PYTORCH_ATEN_DOMAIN, "adaptive_avg_pool2d", 1, adaptive_avg_pooling2d); - REGISTER_OPERATOR_WITH_DOMAIN(MMDEPLOY_DOMAIN, "NMSRotated", 1, nms_rotated); - REGISTER_OPERATOR_WITH_DOMAIN(MMDEPLOY_DOMAIN, "MMCVRoIAlignRotated", 1, mmdeploy_roi_align_rotated); } #undef REGISTER_OPERATOR diff --git a/src/frontends/onnx/frontend/src/ops_bridge.hpp b/src/frontends/onnx/frontend/src/ops_bridge.hpp index fda0cb071a4794..64337050067d5e 100644 --- a/src/frontends/onnx/frontend/src/ops_bridge.hpp +++ b/src/frontends/onnx/frontend/src/ops_bridge.hpp @@ -83,12 +83,9 @@ class OperatorsBridge { // domain_2: { ... }, // ... // } - using DomainOpset = std::unordered_map>; std::unordered_map m_map; }; -extern const char* OPENVINO_ONNX_DOMAIN; - } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/version_range.hpp b/src/frontends/onnx/frontend/src/version_range.hpp index cf93a87f9061cd..27a9073ee933da 100644 --- a/src/frontends/onnx/frontend/src/version_range.hpp +++ b/src/frontends/onnx/frontend/src/version_range.hpp @@ -20,9 +20,6 @@ struct VersionRange { static constexpr VersionRange in(int version) { return VersionRange{version, version}; } - static constexpr VersionRange single_version_for_all_opsets() { - return VersionRange{1, LATEST_SUPPORTED_ONNX_OPSET_VERSION}; - } // -1 means that that a left/right boundary of the range was not specified const int m_since = -1, m_until = -1; }; From 95c3b53ebe4eefb8325df2893da4cd3750777161 Mon Sep 17 00:00:00 2001 From: Cavus Mustafa Date: Sun, 7 Jul 2024 14:33:52 -0700 Subject: [PATCH 50/50] Windows fix for full transformation --- src/frontends/pytorch/src/op/full.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/frontends/pytorch/src/op/full.cpp b/src/frontends/pytorch/src/op/full.cpp index 7877f20ae10c0a..ad0eb04527aa12 100644 --- a/src/frontends/pytorch/src/op/full.cpp +++ b/src/frontends/pytorch/src/op/full.cpp @@ -83,7 +83,7 @@ OutputVector translate_full_fx(const NodeContext& context) { } else { sizes = context.get_input(0); } - auto value = context.get_input(num_inputs - 1); + auto value = context.get_input(static_cast(num_inputs - 1)); auto filled_tensor = base_translate_full(context, sizes, value); if (context.has_attribute("dtype")) {