Skip to content

Commit

Permalink
[FE] Set OV model's output's name after parsing model by frontend (op…
Browse files Browse the repository at this point in the history
…envinotoolkit#28105)

### Details:
- The model after read by IR frontend shows models output tensors names
as Nodes connected to Result's. But by default this names are not
dedicated Results name which can cause during pre-post processing that
names stay on node and will disapear as model output names. To fix set
the names as Results names so during transformations they will stay as
model's output names.
- Onnx frontens set OV model output's names when converting model to OV
represenation.
- Fix NPU test which reports `Attempt to get a name for a Tensor without
names`

### Related PRs:
- openvinotoolkit#28102

### Tickets:
 - CVS-159401

---------

Signed-off-by: Raasz, Pawel <[email protected]>
  • Loading branch information
praasz authored Dec 19, 2024
1 parent 6acc929 commit b982e19
Show file tree
Hide file tree
Showing 4 changed files with 148 additions and 0 deletions.
13 changes: 13 additions & 0 deletions src/frontends/ir/src/ir_deserializer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
#include <pugixml.hpp>
#include <regex>

#include "openvino/core/descriptor_tensor.hpp"
#include "openvino/core/except.hpp"
#include "openvino/core/meta_data.hpp"
#include "openvino/core/rt_info/weightless_caching_attributes.hpp"
Expand All @@ -18,6 +19,7 @@
#include "openvino/op/result.hpp"
#include "openvino/op/util/assign_base.hpp"
#include "openvino/op/util/framework_node.hpp"
#include "openvino/op/util/op_types.hpp"
#include "openvino/op/util/read_value_base.hpp"
#include "openvino/op/util/sub_graph_base.hpp"
#include "openvino/op/util/variable.hpp"
Expand Down Expand Up @@ -1023,6 +1025,17 @@ std::shared_ptr<ov::Node> ov::XmlDeserializer::create_node(const std::vector<ov:
++index;
}
}

// The IR does not store information about dedicated output names for Result node (model output),
// assume all names from parent node are Result's (model's) tensor names.
// Consider adding dedicated RT info with information about Result's output names.
if (auto result = ov::as_type<ov::op::v0::Result>(ovNode.get())) {
if (!ov::op::util::is_parameter(result->get_input_source_output(0).get_node())) {
// Copy names if parent node is not parameter, model's input names should not be dedicated
// output names as they could be removed from Parameter's tensor during model transformations.
ov::descriptor::copy_tensor_names(result->get_output_tensor(0), result->get_input_tensor(0));
}
}
}

return ovNode;
Expand Down
99 changes: 99 additions & 0 deletions src/frontends/ir/tests/pre_processing_deserialization.cpp
Original file line number Diff line number Diff line change
@@ -1,8 +1,10 @@
// Copyright (C) 2018-2024 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <gmock/gmock.h>

#include "frontend_test.hpp"
#include "openvino/core/preprocess/pre_post_process.hpp"

class IRFrontendTestsPreProcessing : public ::testing::Test, public IRFrontendTestsImpl {
protected:
Expand Down Expand Up @@ -71,3 +73,100 @@ TEST_F(IRFrontendTestsPreProcessing, pre_processing) {
OV_ASSERT_NO_THROW(model = core.read_model(xmlFileName, binFileName));
ASSERT_TRUE(!!model);
}

namespace ov {
namespace test {

using testing::ElementsAre;
using testing::Property;
using testing::UnorderedElementsAre;

TEST_F(IRFrontendTestsPreProcessing, check_tensor_names_after_read_and_pre_post_processing) {
std::string xml_model = R"V0G0N(
<?xml version="1.0" ?>
<net name="Model" version="11">
<layers>
<layer id="0" name="A" type="Parameter" version="opset1">
<data shape="" element_type="f32" />
<output>
<port id="0" precision="f32" names="input_a" />
</output>
</layer>
<layer id="1" name="B" type="Parameter" version="opset1">
<data shape="" element_type="f32" />
<output>
<port id="0" precision="f32" names="input_b" />
</output>
</layer>
<layer id="2" name="my_const" type="Const" version="opset1">
<data element_type="f32" shape="" offset="0" size="4" />
<output>
<port id="0" precision="f32" />
</output>
</layer>
<layer id="3" name="Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="f32" />
<port id="1" precision="f32" />
</input>
<output>
<port id="0" precision="f32" names="add_result" />
</output>
</layer>
<layer id="4" name="output_a" type="Result" version="opset1">
<input>
<port id="0" precision="f32" />
</input>
</layer>
<layer id="5" name="output_b" type="Result" version="opset1">
<input>
<port id="0" precision="f32" />
</input>
</layer>
</layers>
<edges>
<edge from-layer="0" from-port="0" to-layer="3" to-port="0" />
<edge from-layer="1" from-port="0" to-layer="5" to-port="0" />
<edge from-layer="2" from-port="0" to-layer="3" to-port="1" />
<edge from-layer="3" from-port="0" to-layer="4" to-port="0" />
</edges>
<rt_info />
</net>
// )V0G0N";

constexpr auto DATA_COUNT = 1;
std::vector<unsigned char> buffer(DATA_COUNT * sizeof(float), 0);
std::fill_n(reinterpret_cast<float*>(buffer.data()), DATA_COUNT, 1.f);

createTemporalModelFile(xml_model, buffer);

std::shared_ptr<Model> model;
OV_ASSERT_NO_THROW(model = core.read_model(xmlFileName, binFileName));
ASSERT_NE(model, nullptr);

EXPECT_THAT(model->inputs(),
ElementsAre(Property("Input 0", &Output<Node>::get_names, UnorderedElementsAre("input_a")),
Property("Input 1", &Output<Node>::get_names, UnorderedElementsAre("input_b"))));

EXPECT_THAT(model->outputs(),
ElementsAre(Property("Output 0", &Output<Node>::get_names, UnorderedElementsAre("add_result")),
// Directly connected to model input shows input's names.
Property("Output 1", &Output<Node>::get_names, UnorderedElementsAre("input_b"))));

auto p = preprocess::PrePostProcessor(model);
p.output(0).tensor().set_element_type(element::f16);
p.output(1).tensor().set_element_type(element::i32);
model = p.build();

EXPECT_THAT(model->inputs(),
ElementsAre(Property("Input 0", &Output<Node>::get_names, UnorderedElementsAre("input_a")),
Property("Input 1", &Output<Node>::get_names, UnorderedElementsAre("input_b"))));

EXPECT_THAT(model->outputs(),
ElementsAre(Property("Output 0", &Output<Node>::get_names, UnorderedElementsAre("add_result")),
// After PPP (inserts convert node) the tensor names stay on model's input.
Property("Output 1", &Output<Node>::get_names, testing::IsEmpty())));
}
} // namespace test
} // namespace ov
7 changes: 7 additions & 0 deletions src/frontends/onnx/frontend/src/input_model.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -533,6 +533,13 @@ void InputModel::add_tensor_names(std::shared_ptr<Model>& model) {
it->add_names(tensor_names.second);
}
}

// Set model output names
for (auto&& result : model->get_results()) {
if (!is_type<op::v0::Parameter>(result->get_input_source_output(0).get_node())) {
result->get_output_tensor(0).add_names(result->get_input_tensor(0).get_names());
}
}
}

void InputModel::reshape_model_inputs(std::shared_ptr<Model>& model) {
Expand Down
29 changes: 29 additions & 0 deletions src/frontends/onnx/tests/load_from.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -10,11 +10,15 @@

#include "common_test_utils/test_assertions.hpp"
#include "onnx_utils.hpp"
#include "openvino/core/preprocess/pre_post_process.hpp"
#include "utils.hpp"

using namespace ov::frontend;

using ONNXLoadTest = FrontEndLoadFromTest;
using testing::ElementsAre;
using testing::Property;
using testing::UnorderedElementsAre;

static LoadFromFEParam getTestData() {
LoadFromFEParam res;
Expand Down Expand Up @@ -58,6 +62,31 @@ TEST_P(FrontEndLoadFromTest, load_model_not_exists_at_path) {
OV_EXPECT_THROW(fe->load(model_file_path), ov::Exception, testing::HasSubstr(error_msg));
}

TEST_P(FrontEndLoadFromTest, load_model_and_apply_ppp) {
auto model_file_path =
ov::util::path_join({ov::test::utils::getExecutableDirectory(), TEST_ONNX_MODELS_DIRNAME, m_param.m_stream});

m_frontEnd = m_fem.load_by_model(model_file_path);
const auto fe_model = m_frontEnd->load(model_file_path);
auto model = m_frontEnd->convert(fe_model);

EXPECT_THAT(model->inputs(),
ElementsAre(Property("Input 0", &ov::Output<ov::Node>::get_names, UnorderedElementsAre("A")),
Property("Input 1", &ov::Output<ov::Node>::get_names, UnorderedElementsAre("B")),
Property("Input 2", &ov::Output<ov::Node>::get_names, UnorderedElementsAre("C"))));
EXPECT_THAT(model->output(0).get_names(), UnorderedElementsAre("Y"));

auto p = ov::preprocess::PrePostProcessor(model);
p.output(0).tensor().set_element_type(ov::element::f16);
model = p.build();

EXPECT_THAT(model->inputs(),
ElementsAre(Property("Input 0", &ov::Output<ov::Node>::get_names, UnorderedElementsAre("A")),
Property("Input 1", &ov::Output<ov::Node>::get_names, UnorderedElementsAre("B")),
Property("Input 2", &ov::Output<ov::Node>::get_names, UnorderedElementsAre("C"))));
EXPECT_THAT(model->output(0).get_names(), UnorderedElementsAre("Y"));
}

INSTANTIATE_TEST_SUITE_P(ONNXLoadTest,
FrontEndLoadFromTest,
::testing::Values(getTestData()),
Expand Down

0 comments on commit b982e19

Please sign in to comment.