From f1cba31319c3a2b150a801ea969bfe463041d5fc Mon Sep 17 00:00:00 2001 From: Pawel Raasz Date: Fri, 13 Dec 2024 09:26:26 +0100 Subject: [PATCH] [core] Preserve friendly name and tensor names in PPP (#23713) ### Details: - For models with `version > 10` the node's friendly name and tensor's names will be not moved from original node. If conversion node added by PPP or convert precision then new friendly name will be created based on previous node. The new node name will have format `[previous_node_name].[port_number]`. - Align `ConvertPrecision` transformation to with PPP - The issue with lost tensor names when set them on model's inputss/outputs has been solved in #25954 - For model version 10 the old behavior is preserved as legacy compatibility mode ### Tickets: - [CVS-127482](https://jira.devtools.intel.com/browse/CVS-127482) ### Depends on: - #25954 - implements tensor names handling for model outputs (should be used here) --------- Signed-off-by: Raasz, Pawel Co-authored-by: Michal Lukaszewski Co-authored-by: Anastasia Kuporosova --- .../graph/preprocess/pre_post_process.cpp | 13 ++- .../tests/test_runtime/test_input_node.py | 3 +- .../src/transformations/convert_precision.cpp | 32 ++++--- .../tests/utils/convert_precision.cpp | 21 ++-- src/core/src/preprocess/pre_post_process.cpp | 5 + src/core/src/preprocess/preprocess_impls.cpp | 50 ++++++---- src/core/src/preprocess/preprocess_impls.hpp | 9 ++ src/core/tests/preprocess.cpp | 95 ++++++++++++++++--- 8 files changed, 173 insertions(+), 55 deletions(-) diff --git a/src/bindings/python/src/pyopenvino/graph/preprocess/pre_post_process.cpp b/src/bindings/python/src/pyopenvino/graph/preprocess/pre_post_process.cpp index dee95c6a832d2c..a19f2b2f482337 100644 --- a/src/bindings/python/src/pyopenvino/graph/preprocess/pre_post_process.cpp +++ b/src/bindings/python/src/pyopenvino/graph/preprocess/pre_post_process.cpp @@ -191,7 +191,7 @@ static void regclass_graph_PreProcessSteps(py::module m) { :param pads_end: Number of elements matches the number of indices in data attribute. Specifies the number of padding elements at the ending of each axis. :type pads_end: 1D tensor of type T_INT. :param value: All new elements are populated with this value or with 0 if input not provided. Shouldn’t be set for other pad_mode values. - :type value: scalar tensor of type T. + :type value: scalar tensor of type T. :param mode: pad_mode specifies the method used to generate new element values. :type mode: string :return: Reference to itself, allows chaining of calls in client's code in a builder-like manner. @@ -219,7 +219,7 @@ static void regclass_graph_PreProcessSteps(py::module m) { :param pads_end: Number of elements matches the number of indices in data attribute. Specifies the number of padding elements at the ending of each axis. :type pads_end: 1D tensor of type T_INT. :param value: All new elements are populated with this value or with 0 if input not provided. Shouldn’t be set for other pad_mode values. - :type value: scalar tensor of type T. + :type value: scalar tensor of type T. :param mode: pad_mode specifies the method used to generate new element values. :type mode: string :return: Reference to itself, allows chaining of calls in client's code in a builder-like manner. @@ -308,7 +308,8 @@ static void regclass_graph_InputTensorInfo(py::module m) { }, py::arg("layout"), R"( - Set layout for input tensor info + Set layout for input tensor info + :param layout: layout to be set :type layout: Union[str, openvino.runtime.Layout] )"); @@ -422,7 +423,8 @@ static void regclass_graph_OutputTensorInfo(py::module m) { }, py::arg("layout"), R"( - Set layout for output tensor info + Set layout for output tensor info + :param layout: layout to be set :type layout: Union[str, openvino.runtime.Layout] )"); @@ -475,7 +477,8 @@ static void regclass_graph_OutputModelInfo(py::module m) { }, py::arg("layout"), R"( - Set layout for output model info + Set layout for output model info + :param layout: layout to be set :type layout: Union[str, openvino.runtime.Layout] )"); diff --git a/src/bindings/python/tests/test_runtime/test_input_node.py b/src/bindings/python/tests/test_runtime/test_input_node.py index 5e083051934afb..c12eb085317afc 100644 --- a/src/bindings/python/tests/test_runtime/test_input_node.py +++ b/src/bindings/python/tests/test_runtime/test_input_node.py @@ -75,7 +75,8 @@ def test_input_get_source_output(device): net_input = compiled_model.output(0) input_node = net_input.get_node().inputs()[0] name = input_node.get_source_output().get_node().get_friendly_name() - assert name == "relu" + # Expected ReLu node name can be changed if conversion precision applied (new Convert node added) + assert name in ("relu", "relu.0") def test_input_get_tensor(device): diff --git a/src/common/transformations/src/transformations/convert_precision.cpp b/src/common/transformations/src/transformations/convert_precision.cpp index aa067da4f360fd..d5e96ddafc252f 100644 --- a/src/common/transformations/src/transformations/convert_precision.cpp +++ b/src/common/transformations/src/transformations/convert_precision.cpp @@ -208,7 +208,8 @@ bool convert_function_precision(const std::shared_ptr& f, bool is_changed, bool is_subgraph, bool convert_input_output_precision, - bool store_original_precision_as_rt_attribute) { + bool store_original_precision_as_rt_attribute, + bool names_compatibility_mode) { bool is_output_precision_changed = false; ov::element::TypeVector orig_result_types; @@ -277,7 +278,8 @@ bool convert_function_precision(const std::shared_ptr& f, is_changed || is_output_precision_changed, true, true, - store_original_precision_as_rt_attribute) || + store_original_precision_as_rt_attribute, + names_compatibility_mode) || is_changed; } } @@ -325,18 +327,21 @@ bool convert_function_precision(const std::shared_ptr& f, if (result->get_input_element_type(0) != orig_result_types[i]) { auto result_input = result->input_value(0); const auto convert = std::make_shared(result_input, orig_result_types[i]); - if (result_input.get_node()->get_output_size() > 1) { - convert->set_friendly_name(result_input.get_node()->get_friendly_name() + "." + - std::to_string(result_input.get_index())); + + auto convert_f_name = result_input.get_node()->get_friendly_name(); + if (names_compatibility_mode) { + if (result_input.get_node()->get_output_size() > 1) { + convert_f_name += '.' + std::to_string(result_input.get_index()); + } else { + result_input.get_node()->set_friendly_name(""); + } + + convert->get_output_tensor(0).set_names(result_input.get_names()); } else { - convert->set_friendly_name(result_input.get_node()->get_friendly_name()); - result_input.get_node()->set_friendly_name(""); + convert_f_name += '.' + std::to_string(result_input.get_index()); } + convert->set_friendly_name(convert_f_name); - auto& convert_output_tensor = convert->get_output_tensor(0); - convert_output_tensor.set_names(result_input.get_names()); - - result_input.set_names({}); result->input(0).replace_source_output(convert->output(0)); result->revalidate_and_infer_types(); } @@ -359,6 +364,8 @@ bool convert_precision(ov::pass::PassBase& pass, // changing precision we need to understand which Constant consumers belongs // to the current ov::Model std::unordered_map>> const_to_internal_output; + + const auto names_compatibility_mode = f->has_rt_info("version") && f->get_rt_info("version") < 11; return convert_function_precision(f, type_to_fuse, type_to_extend, @@ -369,7 +376,8 @@ bool convert_precision(ov::pass::PassBase& pass, false, false, convert_input_output_precision, - store_original_precision_as_rt_attribute); + store_original_precision_as_rt_attribute, + names_compatibility_mode); } using precisions_set_t = std::unordered_set; diff --git a/src/common/transformations/tests/utils/convert_precision.cpp b/src/common/transformations/tests/utils/convert_precision.cpp index c2b7133506aebe..f4bdedf4764604 100644 --- a/src/common/transformations/tests/utils/convert_precision.cpp +++ b/src/common/transformations/tests/utils/convert_precision.cpp @@ -2197,8 +2197,9 @@ TEST(TransformationTests, ConvertPrecisionExplicitConvertsForParameterAndResult) auto param_1 = make_shared(element::f64, Shape{3}); auto converted_param = make_shared(param_1, element::f32); auto sin = make_shared(converted_param); + sin->get_output_tensor(0).add_names({"sine:0"}); auto converted_sin = make_shared(sin, element::f64); - converted_sin->get_output_tensor(0).add_names({"sine:0"}); + converted_sin->set_friendly_name("sine.0"); auto result_sin = make_shared(converted_sin); model_ref = make_shared(result_sin, ParameterVector{param_1}); } @@ -2208,7 +2209,7 @@ TEST(TransformationTests, ConvertPrecisionExplicitConvertsForParameterAndResult) ASSERT_TRUE(result.valid) << result.message; const auto& results = model->get_results(); - ASSERT_EQ("sine", results[0]->get_input_node_ptr(0)->get_friendly_name()); + ASSERT_EQ("sine.0", results[0]->get_input_node_ptr(0)->get_friendly_name()); } TEST(TransformationTests, ConvertPrecisionExplicitConvertsMultiParam) { @@ -2272,8 +2273,8 @@ TEST(TransformationTests, ConvertPrecisionExplicitConvertsMultiParam) { auto converted_mul = make_shared(mul, element::f64); auto sin = make_shared(convert_1); - converted_add->get_output_tensor(0).add_names({"add:0"}); - converted_mul->get_output_tensor(0).add_names({"mul:0"}); + add->get_output_tensor(0).add_names({"add:0"}); + mul->get_output_tensor(0).add_names({"mul:0"}); sin->get_output_tensor(0).add_names({"sine:0"}); auto result_add = make_shared(converted_add); @@ -2289,8 +2290,8 @@ TEST(TransformationTests, ConvertPrecisionExplicitConvertsMultiParam) { ASSERT_TRUE(result.valid) << result.message; const auto& results = model->get_results(); - ASSERT_EQ("add", results[0]->get_input_node_ptr(0)->get_friendly_name()); - ASSERT_EQ("mul", results[1]->get_input_node_ptr(0)->get_friendly_name()); + ASSERT_EQ("add.0", results[0]->get_input_node_ptr(0)->get_friendly_name()); + ASSERT_EQ("mul.0", results[1]->get_input_node_ptr(0)->get_friendly_name()); ASSERT_EQ("sine", results[2]->get_input_node_ptr(0)->get_friendly_name()); } @@ -2306,6 +2307,8 @@ TEST(TransformationTests, ConvertPrecisionExplicitConvertsSingleNodeMultipleOutp split->get_output_tensor(1).add_names({"split:1"}); split->get_output_tensor(2).add_names({"split:2"}); model = make_shared(split->outputs(), ParameterVector{param_1}); + // set version 10 to use names compatibility mode + model->get_rt_info()["version"] = static_cast(10); type_to_fuse_map empty_type_to_fuse_map = {}; bool keep_precision_sensitive_in_fp32 = false; @@ -2322,6 +2325,9 @@ TEST(TransformationTests, ConvertPrecisionExplicitConvertsSingleNodeMultipleOutp auto convert_1 = make_shared(param_1, element::f32); auto axis = opset10::Constant::create(element::i32, Shape{}, {0}); auto split = make_shared(convert_1, axis, 3); + split->get_output_tensor(0).add_names({"split:0"}); + split->get_output_tensor(1).add_names({"split:1"}); + split->get_output_tensor(2).add_names({"split:2"}); auto convert_split_0 = make_shared(split->output(0), element::f64); auto convert_split_1 = make_shared(split->output(1), element::f64); @@ -2390,6 +2396,8 @@ TEST(TransformationTests, ConvertPrecisionExplicitConvertsMultiSubgraphs) { result.get_node()->set_friendly_name("if_result"); result.add_names({"if_result:0"}); model = make_shared(OutputVector{result}, ParameterVector{cond, param_1, param_2}); + // set version 10 to use names compatibility mode + model->get_rt_info()["version"] = static_cast(10); type_to_fuse_map empty_type_to_fuse_map = {}; bool keep_precision_sensitive_in_fp32 = false; @@ -2443,6 +2451,7 @@ TEST(TransformationTests, ConvertPrecisionExplicitConvertsMultiSubgraphs) { if_op->set_input(convert_1, param_1_then, param_1_else); if_op->set_input(convert_2, param_2_then, param_2_else); auto result = if_op->set_output(result_then, result_else); + result.add_names({"if_result:0"}); auto converted_result = make_shared(result, element::f64); converted_result->get_output_tensor(0).add_names({"if_result:0"}); diff --git a/src/core/src/preprocess/pre_post_process.cpp b/src/core/src/preprocess/pre_post_process.cpp index d81d48082cde04..b408755a7d85a8 100644 --- a/src/core/src/preprocess/pre_post_process.cpp +++ b/src/core/src/preprocess/pre_post_process.cpp @@ -56,6 +56,10 @@ struct PrePostProcessor::PrePostProcessorImpl { PrePostProcessorImpl() = default; explicit PrePostProcessorImpl(const std::shared_ptr& f) : m_function(f) { OPENVINO_ASSERT(f, "Model can't be nullptr for PrePostProcessor"); + + // if IR version < 11, set compatibility mode + const auto names_mode = m_function->has_rt_info("version") && m_function->get_rt_info("version") < 11; + for (size_t i = 0; i < m_function->inputs().size(); ++i) { auto info = InputInfo(); info.m_impl->m_resolved_param = m_function->get_parameters()[i]; @@ -64,6 +68,7 @@ struct PrePostProcessor::PrePostProcessorImpl { for (size_t i = 0; i < m_function->outputs().size(); ++i) { auto info = OutputInfo(); info.m_impl->m_output_node = m_function->output(i); + info.m_impl->get_tensor_data()->set_names_compatibility_mode(names_mode); m_outputs.push_back(std::move(info)); } } diff --git a/src/core/src/preprocess/preprocess_impls.cpp b/src/core/src/preprocess/preprocess_impls.cpp index c2523beed66620..e0cdee2e76a140 100644 --- a/src/core/src/preprocess/preprocess_impls.cpp +++ b/src/core/src/preprocess/preprocess_impls.cpp @@ -370,30 +370,40 @@ void OutputInfo::OutputInfoImpl::build(ov::ResultVector& results) { } auto orig_parent = result->get_input_source_output(0).get_node_shared_ptr(); - // Move result tensor names from previous input to new - const auto result_input_names = result->get_input_tensor(0).get_names(); - result->get_input_tensor(0).set_names({}); - node.get_tensor().set_names(result_input_names); - - if (!post_processing_applied) { - return; - } - - if (orig_parent->get_output_size() == 1) { - node.get_node_shared_ptr()->set_friendly_name(orig_parent->get_friendly_name()); + if (get_tensor_data()->get_names_compatibility_mode()) { + // Move result tensor names from previous input to new + const auto result_input_names = result->get_input_tensor(0).get_names(); + result->get_input_tensor(0).set_names({}); + node.get_tensor().set_names(result_input_names); + + if (!post_processing_applied) { + return; + } - // Reset friendly name of input node to avoid names collision - // when there is at a new node inserted by post-processing steps - // If no new nodes are inserted by post-processing, then we need to preserve friendly name of input - // as it's required for old API correct work - result->get_input_source_output(0).get_node_shared_ptr()->set_friendly_name(""); + if (orig_parent->get_output_size() == 1) { + node.get_node_shared_ptr()->set_friendly_name(orig_parent->get_friendly_name()); + + // Reset friendly name of input node to avoid names collision + // when there is at a new node inserted by post-processing steps + // If no new nodes are inserted by post-processing, then we need to preserve friendly name of input + // as it's required for old API correct work + result->get_input_source_output(0).get_node_shared_ptr()->set_friendly_name(""); + } else if (node.get_node_shared_ptr() != orig_parent) { + // Result node is changed - add "." suffix + node.get_node_shared_ptr()->set_friendly_name( + orig_parent->get_friendly_name() + "." + + std::to_string(result->get_input_source_output(0).get_index())); + } + result->input(0).replace_source_output(node); + result->revalidate_and_infer_types(); } else if (node.get_node_shared_ptr() != orig_parent) { // Result node is changed - add "." suffix - node.get_node_shared_ptr()->set_friendly_name(orig_parent->get_friendly_name() + "." + - std::to_string(result->get_input_source_output(0).get_index())); + const auto suffix = std::string(".") + std::to_string(result->get_input_source_output(0).get_index()); + node.get_node_shared_ptr()->set_friendly_name(orig_parent->get_friendly_name() + suffix); + + result->input(0).replace_source_output(node); + result->revalidate_and_infer_types(); } - result->input(0).replace_source_output(node); - result->revalidate_and_infer_types(); // Update layout if (!context.layout().empty()) { diff --git a/src/core/src/preprocess/preprocess_impls.hpp b/src/core/src/preprocess/preprocess_impls.hpp index 87d6b5456badc3..ee74c534c361fb 100644 --- a/src/core/src/preprocess/preprocess_impls.hpp +++ b/src/core/src/preprocess/preprocess_impls.hpp @@ -122,12 +122,21 @@ class TensorInfoImplBase { return m_layout; } + void set_names_compatibility_mode(const bool compatiblity_mode) { + m_names_compatiblity_mode = compatiblity_mode; + } + + const bool get_names_compatibility_mode() const { + return m_names_compatiblity_mode; + } + protected: element::Type m_type = element::dynamic; bool m_type_set = false; Layout m_layout = Layout(); bool m_layout_set = false; + bool m_names_compatiblity_mode = false; }; class OutputTensorInfo::OutputTensorInfoImpl : public TensorInfoImplBase {}; diff --git a/src/core/tests/preprocess.cpp b/src/core/tests/preprocess.cpp index 0cec67c3031288..99f2789b217b6d 100644 --- a/src/core/tests/preprocess.cpp +++ b/src/core/tests/preprocess.cpp @@ -57,6 +57,12 @@ static std::shared_ptr create_n_inputs(element::Type type, const PartialS return std::make_shared(res, params); } +namespace { +void set_model_as_v10(ov::Model& model) { + model.get_rt_info()["version"] = static_cast(10); +} +} // namespace + TEST(pre_post_process, simple_mean_scale) { auto f = create_simple_function(element::f32, Shape{1, 3, 2, 2}); auto p = PrePostProcessor(f); @@ -1531,7 +1537,7 @@ TEST(pre_post_process, postprocess_convert_element_type_explicit) { auto f = create_simple_function(element::f32, Shape{1, 3, 2, 2}); auto name = f->output().get_node_shared_ptr()->get_friendly_name(); auto name_last_op = f->get_results().front()->get_input_source_output(0).get_node_shared_ptr()->get_friendly_name(); - auto old_names = f->output().get_tensor().get_names(); + auto old_names = std::unordered_set{"tensor_output1"}; auto p = PrePostProcessor(f); p.output().postprocess().convert_element_type(element::u8); @@ -1539,7 +1545,6 @@ TEST(pre_post_process, postprocess_convert_element_type_explicit) { EXPECT_EQ(f->get_results().size(), 1); EXPECT_EQ(f->get_results()[0]->get_element_type(), element::u8); EXPECT_EQ(f->output().get_tensor().get_names(), old_names); - EXPECT_EQ(old_names.count("tensor_output1"), 1); auto ops = f->get_ordered_ops(); auto res_count = std::count_if(ops.begin(), ops.end(), [](const std::shared_ptr& n) { return std::dynamic_pointer_cast(n) != nullptr; @@ -1548,9 +1553,37 @@ TEST(pre_post_process, postprocess_convert_element_type_explicit) { auto names_count = std::count_if(ops.begin(), ops.end(), [](std::shared_ptr n) { return n->output(0).get_tensor().get_names().count("tensor_output1") > 0; }); - EXPECT_EQ(names_count, 2); // last node + result referencing to it + EXPECT_EQ(names_count, 2); // result + node connected to it has same name referencing to it EXPECT_EQ(name, f->output().get_node_shared_ptr()->get_friendly_name()); - EXPECT_EQ(name_last_op, + EXPECT_EQ(name_last_op + ".0", + f->get_results().front()->get_input_source_output(0).get_node_shared_ptr()->get_friendly_name()); +} + +TEST(pre_post_process, trivial_model_convert_element_type_explicit) { + const auto f = create_trivial(element::f32, Shape{1, 3, 2, 2}); + const auto name = f->output().get_node_shared_ptr()->get_friendly_name(); + const auto name_last_op = + f->get_results().front()->get_input_source_output(0).get_node_shared_ptr()->get_friendly_name(); + const auto old_names = std::unordered_set{"tensor_output1"}; + const auto n = f->output().get_tensor().get_names(); + auto p = PrePostProcessor(f); + + p.output().postprocess().convert_element_type(element::u8); + p.build(); + EXPECT_EQ(f->get_results().size(), 1); + EXPECT_EQ(f->get_results()[0]->get_element_type(), element::u8); + EXPECT_THAT(f->output().get_tensor().get_names(), old_names); + const auto ops = f->get_ordered_ops(); + const auto res_count = std::count_if(ops.begin(), ops.end(), [](const std::shared_ptr& n) { + return std::dynamic_pointer_cast(n) != nullptr; + }); + EXPECT_EQ(res_count, 1); + const auto names_count = std::count_if(ops.begin(), ops.end(), [](std::shared_ptr n) { + return n->output(0).get_tensor().get_names().count("tensor_output1") > 0; + }); + EXPECT_EQ(names_count, 2); // result + node connected to it has same name referencing to it + EXPECT_EQ(name, f->output().get_node_shared_ptr()->get_friendly_name()); + EXPECT_EQ(name_last_op + ".0", f->get_results().front()->get_input_source_output(0).get_node_shared_ptr()->get_friendly_name()); } @@ -1776,25 +1809,43 @@ TEST(pre_post_process, postprocess_convert_layout_invalid_dims_dyn_shape) { TEST(pre_post_process, postprocess_keep_friendly_names_compatibility) { auto f = create_simple_function(element::f32, Shape{1, 3, 10, 10}); - auto result_fr_name = f->get_results()[0]->get_friendly_name(); - auto node_before_result_old = f->get_results()[0]->get_input_source_output(0).get_node_shared_ptr(); - auto node_name = node_before_result_old->get_friendly_name(); + const auto result_fr_name = f->get_results()[0]->get_friendly_name(); + const auto node_before_result_old = f->get_results()[0]->get_input_source_output(0).get_node_shared_ptr(); + const auto node_name = node_before_result_old->get_friendly_name(); + set_model_as_v10(*f); auto p = PrePostProcessor(f); p.output().postprocess().convert_element_type(element::u8); f = p.build(); EXPECT_EQ(f->get_results()[0]->get_friendly_name(), result_fr_name); - auto node_before_result_new = f->get_results()[0]->get_input_source_output(0).get_node_shared_ptr(); + const auto node_before_result_new = f->get_results()[0]->get_input_source_output(0).get_node_shared_ptr(); // Compatibility check: verify that old name is assigned to new 'output' node EXPECT_EQ(node_before_result_new->get_friendly_name(), node_name); // Compatibility check: Verify that old name is not set for old 'output' node anymore EXPECT_NE(node_before_result_old->get_friendly_name(), node_name); } +TEST(pre_post_process, postprocess_keep_friendly_names) { + auto f = create_simple_function(element::f32, Shape{1, 3, 10, 10}); + auto result_fr_name = f->get_results()[0]->get_friendly_name(); + auto node_before_result_old = f->get_results()[0]->get_input_source_output(0).get_node_shared_ptr(); + auto node_name = node_before_result_old->get_friendly_name(); + auto p = PrePostProcessor(f); + p.output().postprocess().convert_element_type(element::u8); + f = p.build(); + EXPECT_EQ(f->get_results()[0]->get_friendly_name(), result_fr_name); + auto node_before_result_new = f->get_results()[0]->get_input_source_output(0).get_node_shared_ptr(); + // Compatibility check: verify that old name + index is assigned to new 'output' node + EXPECT_EQ(node_before_result_new->get_friendly_name(), node_name + ".0"); + // Compatibility check: Verify that old name is not changed + EXPECT_EQ(node_before_result_old->get_friendly_name(), node_name); +} + TEST(pre_post_process, postprocess_keep_friendly_names_compatibility_implicit) { auto f = create_simple_function(element::f32, Shape{1, 3, 10, 10}); auto result_fr_name = f->get_results()[0]->get_friendly_name(); auto node_before_result_old = f->get_results()[0]->get_input_source_output(0).get_node_shared_ptr(); auto node_name = node_before_result_old->get_friendly_name(); + set_model_as_v10(*f); auto p = PrePostProcessor(f); p.output().model().set_layout("NCHW"); p.output().tensor().set_layout("NHWC"); @@ -1807,6 +1858,21 @@ TEST(pre_post_process, postprocess_keep_friendly_names_compatibility_implicit) { EXPECT_NE(node_before_result_old->get_friendly_name(), node_name); } +TEST(pre_post_process, postprocess_keep_friendly_names_implicit) { + auto f = create_simple_function(element::f32, Shape{1, 3, 10, 10}); + const auto result_fr_name = f->get_results()[0]->get_friendly_name(); + const auto node_before_result_old = f->get_results()[0]->get_input_source_output(0).get_node_shared_ptr(); + const auto node_name = node_before_result_old->get_friendly_name(); + auto p = PrePostProcessor(f); + p.output().model().set_layout("NCHW"); + p.output().postprocess().convert_layout("NHWC"); + f = p.build(); + EXPECT_EQ(f->get_results()[0]->get_friendly_name(), result_fr_name); + const auto node_before_result_new = f->get_results()[0]->get_input_source_output(0).get_node_shared_ptr(); + EXPECT_EQ(node_before_result_new->get_friendly_name(), node_name + ".0"); + EXPECT_EQ(node_before_result_old->get_friendly_name(), node_name); +} + // --- PostProcess - convert color format --- TEST(pre_post_process, postprocess_convert_color_format_BGR_RGB) { auto f = create_simple_function(element::f32, Shape{5, 30, 20, 3}); @@ -2017,7 +2083,11 @@ TEST(pre_post_process, postprocess_one_node_many_outputs) { results.emplace_back(res); } auto model = std::make_shared(ResultVector{results}, ParameterVector{data1}); - EXPECT_EQ(model->output(0).get_tensor().get_names().count("tensor_Split0"), 1); + // Set tensor name to model output 0 + model->output(0).set_names({"output_split0"}); + EXPECT_EQ(model->output(0).get_tensor().get_names().count("output_split0"), 1); + // Result input has still tensor_split0 names from split op + EXPECT_EQ(model->output(0).get_node()->get_input_tensor(0).get_names().count("tensor_Split0"), 1); EXPECT_EQ(model->output(1).get_tensor().get_names().count("tensor_Split1"), 1); EXPECT_EQ(model->output(2).get_tensor().get_names().count("tensor_Split2"), 1); @@ -2026,9 +2096,12 @@ TEST(pre_post_process, postprocess_one_node_many_outputs) { p.output(2).tensor().set_element_type(element::f32); model = p.build(); EXPECT_EQ(model->get_results().size(), 3); - EXPECT_EQ(model->output(0).get_tensor().get_names().count("tensor_Split0"), 1); + // Tensor names on output is lost as origin named tensor is before convert op + // New result has different precision means different tensor. + EXPECT_EQ(model->output(0).get_tensor().get_names().count("tensor_Split0"), 0); + EXPECT_EQ(model->output(0).get_tensor().get_names().count("output_split0"), 1); EXPECT_EQ(model->output(1).get_tensor().get_names().count("tensor_Split1"), 1); - EXPECT_EQ(model->output(2).get_tensor().get_names().count("tensor_Split2"), 1); + EXPECT_EQ(model->output(2).get_tensor().get_names().count("tensor_Split2"), 0); EXPECT_EQ(model->get_results()[0]->input(0).get_source_output().get_node()->get_friendly_name(), "Split.0"); EXPECT_EQ(model->get_results()[1]->input(0).get_source_output().get_node()->get_friendly_name(), "Split"); EXPECT_EQ(model->get_results()[2]->input(0).get_source_output().get_node()->get_friendly_name(), "Split.2");