From 7a37909539282fb935777eb79a198aaf652a8409 Mon Sep 17 00:00:00 2001 From: Ilya Churaev Date: Mon, 11 Jan 2021 15:50:26 +0300 Subject: [PATCH 01/23] Added tests --- inference-engine/include/cpp/ie_cnn_network.h | 26 ++++++++++++ inference-engine/include/ie_icnn_network.hpp | 32 +++++++++++++++ .../subgraph_tests/tensor_names.cpp | 16 ++++++++ .../include/subgraph_tests/tensor_names.hpp | 40 +++++++++++++++++++ .../subgraph/tensor_names.hpp | 28 +++++++++++++ .../src/subgraph/tensor_names.cpp | 40 +++++++++++++++++++ .../core/include/ngraph/descriptor/tensor.hpp | 9 ++++- ngraph/core/src/descriptor/tensor.cpp | 33 +++++++++++++-- 8 files changed, 219 insertions(+), 5 deletions(-) create mode 100644 inference-engine/tests/functional/plugin/cpu/shared_tests_instances/subgraph_tests/tensor_names.cpp create mode 100644 inference-engine/tests/functional/plugin/shared/include/subgraph_tests/tensor_names.hpp create mode 100644 inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/tensor_names.hpp create mode 100644 inference-engine/tests/functional/shared_test_classes/src/subgraph/tensor_names.cpp diff --git a/inference-engine/include/cpp/ie_cnn_network.h b/inference-engine/include/cpp/ie_cnn_network.h index 6f021ad42fd744..69b7891410ffde 100644 --- a/inference-engine/include/cpp/ie_cnn_network.h +++ b/inference-engine/include/cpp/ie_cnn_network.h @@ -244,6 +244,32 @@ class INFERENCE_ENGINE_API_CLASS(CNNNetwork) { CALL_STATUS_FNC(serialize, xmlPath, binPath); } + /** + * @brief Methods maps framework tensor name to OpenVINO name + * + * @param orig_name Framework tensor name + * + * @return OpenVINO name + */ + std::string getOVNameForTensor(const std::string& orig_name) const { + std::string ov_name; + CALL_STATUS_FNC(getOVNameForTensor, ov_name, orig_name); + return ov_name; + } + + /** + * @brief Methods maps framework operator name to OpenVINO name + * + * @param orig_name Framework operation name + * + * @return OpenVINO name + */ + std::string getOVNameForOperation(const std::string& orig_name) const { + std::string ov_name; + CALL_STATUS_FNC(getOVNameForOperation, ov_name, orig_name); + return ov_name; + } + protected: /** * @brief Network extra interface, might be nullptr diff --git a/inference-engine/include/ie_icnn_network.hpp b/inference-engine/include/ie_icnn_network.hpp index 1d803b3a6162df..ba629e74e684c4 100644 --- a/inference-engine/include/ie_icnn_network.hpp +++ b/inference-engine/include/ie_icnn_network.hpp @@ -170,6 +170,38 @@ class INFERENCE_ENGINE_API_CLASS(ICNNNetwork): public details::IRelease { virtual StatusCode serialize(const std::string& xmlPath, const std::string& binPath, ResponseDesc* resp) const noexcept = 0; + /** + * @brief Methods maps framework tensor name to OpenVINO name + * + * @param ov_name OpenVINO name + * @param orig_name Framework tensor name + * @param resp Pointer to the response message that holds a description of an error if any occurred + * + * @return Status code of the operation + */ + virtual StatusCode getOVNameForTensor(std::string& ov_name, const std::string& orig_name, ResponseDesc* resp) const noexcept { + (void) ov_name; + (void) orig_name; + (void) resp; + return NOT_IMPLEMENTED; + } + + /** + * @brief Methods maps framework operation name to OpenVINO name + * + * @param ov_name OpenVINO name + * @param orig_name Framework operation name + * @param resp Pointer to the response message that holds a description of an error if any occurred + * + * @return Status code of the operation + */ + virtual StatusCode getOVNameForOperation(std::string& ov_name, const std::string& orig_name, ResponseDesc* resp) const noexcept { + (void) ov_name; + (void) orig_name; + (void) resp; + return NOT_IMPLEMENTED; + } + /** * @brief A virtual destructor. */ diff --git a/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/subgraph_tests/tensor_names.cpp b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/subgraph_tests/tensor_names.cpp new file mode 100644 index 00000000000000..72e5481bd0e6b4 --- /dev/null +++ b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/subgraph_tests/tensor_names.cpp @@ -0,0 +1,16 @@ +// Copyright (C) 2019 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include + +#include "subgraph_tests/tensor_names.hpp" +#include "common_test_utils/test_constants.hpp" + +using namespace SubgraphTestsDefinitions; + +namespace { + INSTANTIATE_TEST_CASE_P(smoke_Check, TensorNamesTest, + ::testing::Values(CommonTestUtils::DEVICE_CPU), + TensorNamesTest::getTestCaseName); +} // namespace diff --git a/inference-engine/tests/functional/plugin/shared/include/subgraph_tests/tensor_names.hpp b/inference-engine/tests/functional/plugin/shared/include/subgraph_tests/tensor_names.hpp new file mode 100644 index 00000000000000..2f5071d1fd370e --- /dev/null +++ b/inference-engine/tests/functional/plugin/shared/include/subgraph_tests/tensor_names.hpp @@ -0,0 +1,40 @@ +// Copyright (C) 2020 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "shared_test_classes/subgraph/tensor_names.hpp" +#include + +namespace SubgraphTestsDefinitions { + +TEST_P(TensorNamesTest, CheckTensorNames) { + cnnNetwork = InferenceEngine::CNNNetwork{function}; + ConfigureNetwork(); + executableNetwork = core->LoadNetwork(cnnNetwork, targetDevice, configuration); + + auto inputs = cnnNetwork.getInputsInfo(); + auto outputs = cnnNetwork.getOutputsInfo(); + std::unordered_set inNames; + for (const auto& in : inputs) + inNames.emplace(in.first); + std::unordered_set outNames; + for (const auto& out : outputs) + outNames.emplace(out.first); + + for (const auto& param : function->get_parameters()) { + ASSERT_TRUE(inNames.count(cnnNetwork.getOVNameForOperation(param->get_friendly_name()))); + for (const auto& name : param->get_output_tensor(0).get_names()) + ASSERT_TRUE(inNames.count(cnnNetwork.getOVNameForTensor(name))); + } + + for (const auto& result : function->get_results()) { + ASSERT_TRUE(outNames.count(cnnNetwork.getOVNameForOperation(result->get_friendly_name()))); + for (const auto& name : result->get_input_tensor(0).get_names()) + ASSERT_TRUE(outNames.count(cnnNetwork.getOVNameForTensor(name))); + } +} + +} // namespace SubgraphTestsDefinitions + diff --git a/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/tensor_names.hpp b/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/tensor_names.hpp new file mode 100644 index 00000000000000..3d3afb2ff72936 --- /dev/null +++ b/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/tensor_names.hpp @@ -0,0 +1,28 @@ +// Copyright (C) 2020 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include +#include +#include +#include + +#include "shared_test_classes/base/layer_test_utils.hpp" +#include "ngraph_functions/builders.hpp" + +namespace SubgraphTestsDefinitions { + +typedef std::tuple< + std::string // Device name +> constResultParams; + +class TensorNamesTest : public testing::WithParamInterface, + virtual public LayerTestsUtils::LayerTestsCommon { +public: + static std::string getTestCaseName(testing::TestParamInfo obj); +protected: + void SetUp() override; +}; +} // namespace SubgraphTestsDefinitions diff --git a/inference-engine/tests/functional/shared_test_classes/src/subgraph/tensor_names.cpp b/inference-engine/tests/functional/shared_test_classes/src/subgraph/tensor_names.cpp new file mode 100644 index 00000000000000..e9214d84ac07da --- /dev/null +++ b/inference-engine/tests/functional/shared_test_classes/src/subgraph/tensor_names.cpp @@ -0,0 +1,40 @@ +// Copyright (C) 2020 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "shared_test_classes/subgraph/tensor_names.hpp" + +namespace SubgraphTestsDefinitions { + +std::string TensorNamesTest::getTestCaseName(testing::TestParamInfo obj) { + std::string targetDevice; + std::tie(targetDevice) = obj.param; + std::ostringstream result; + result << "TargetDevice=" << targetDevice; + return result.str(); +} + +void TensorNamesTest::SetUp() { + InferenceEngine::SizeVector inputShapes; + InferenceEngine::Precision inputPrecision; + std::tie(targetDevice) = this->GetParam(); + std::vector data(300); + for (size_t i = 0; i < 300; i++) + data[i] = i; + + auto parameter = std::make_shared(ngraph::element::Type_t::f32, ngraph::Shape{1, 3, 10, 10}); + parameter->set_friendly_name("parameter"); + parameter->get_output_tensor(0).set_name("input"); + auto relu = std::make_shared(parameter); + relu->set_friendly_name("relu"); + relu->get_output_tensor(0).set_names({"relu_t", "identity"}); + const ngraph::ResultVector results{std::make_shared(relu)}; + results[0]->set_friendly_name("out"); + results[0]->get_output_tensor(0).set_name("out_t"); + ngraph::ParameterVector params{parameter}; + function = std::make_shared(results, params, "TensorNames"); +} + +} // namespace SubgraphTestsDefinitions + + diff --git a/ngraph/core/include/ngraph/descriptor/tensor.hpp b/ngraph/core/include/ngraph/descriptor/tensor.hpp index 123b2ec507b66f..1ca13a31e5142f 100644 --- a/ngraph/core/include/ngraph/descriptor/tensor.hpp +++ b/ngraph/core/include/ngraph/descriptor/tensor.hpp @@ -18,6 +18,7 @@ #include #include +#include #include "ngraph/partial_shape.hpp" #include "ngraph/shape.hpp" @@ -36,6 +37,9 @@ namespace ngraph Tensor& operator=(const Tensor&) = delete; public: + Tensor(const element::Type& element_type, + const PartialShape& pshape, + std::vector name); Tensor(const element::Type& element_type, const PartialShape& pshape, const std::string& name); @@ -45,7 +49,10 @@ namespace ngraph size_t node_output_number); const std::string& get_name() const; + const std::vector& get_names() const; + void set_names(const std::vector& name); void set_name(const std::string& name); + void add_name(const std::string& name); void set_tensor_type(const element::Type& element_type, const PartialShape& pshape); void set_element_type(const element::Type& elemenet_type); void set_partial_shape(const PartialShape& partial_shape); @@ -67,7 +74,7 @@ namespace ngraph Node* m_node{nullptr}; size_t m_node_output_number{0}; - std::string m_name; + std::vector m_names; }; NGRAPH_API diff --git a/ngraph/core/src/descriptor/tensor.cpp b/ngraph/core/src/descriptor/tensor.cpp index 9669e9e3b8d6f9..03de51754c1ec6 100644 --- a/ngraph/core/src/descriptor/tensor.cpp +++ b/ngraph/core/src/descriptor/tensor.cpp @@ -22,11 +22,18 @@ using namespace std; descriptor::Tensor::Tensor(const element::Type& element_type, const PartialShape& pshape, - const std::string& name) + std::vector names) : m_element_type(element_type) , m_shape(pshape.is_static() ? pshape.to_shape() : Shape{}) , m_partial_shape(pshape) - , m_name(name) + , m_names(std::move(names)) +{ +} + +descriptor::Tensor::Tensor(const element::Type& element_type, + const PartialShape& pshape, + const std::string& name) + : Tensor(element_type, pshape, std::vector{name}) { } @@ -42,9 +49,20 @@ descriptor::Tensor::Tensor(const element::Type& element_type, { } +void descriptor::Tensor::add_name(const string& name) +{ + m_names.emplace_back(name); +} + void descriptor::Tensor::set_name(const string& name) { - m_name = name; + m_names.clear(); + m_names.emplace_back(name); +} + +void descriptor::Tensor::set_names(const std::vector& names) +{ + m_names = names; } void descriptor::Tensor::set_tensor_type(const element::Type& element_type, @@ -92,7 +110,14 @@ size_t descriptor::Tensor::size() const const std::string& descriptor::Tensor::get_name() const { - return m_name; + if (m_names.size() != 1) + throw ngraph_error("Tensor contains several names! Please use get_names() instead."); + return m_names[0]; +} + +const std::vector& descriptor::Tensor::get_names() const +{ + return m_names; } ostream& operator<<(ostream& out, const descriptor::Tensor& tensor) From 79554e3d245b0d7799672288470c4d456ce991b1 Mon Sep 17 00:00:00 2001 From: Ilya Churaev Date: Tue, 12 Jan 2021 14:28:03 +0300 Subject: [PATCH 02/23] Fixed tests --- .../cnn_network_ngraph_impl.cpp | 39 +++++++++++- .../cnn_network_ngraph_impl.hpp | 7 +++ .../include/subgraph_tests/tensor_names.hpp | 60 ++++++++++++++++++- .../src/subgraph/tensor_names.cpp | 4 +- .../core/include/ngraph/descriptor/output.hpp | 6 ++ .../core/include/ngraph/descriptor/tensor.hpp | 9 +-- ngraph/core/include/ngraph/node_output.hpp | 6 ++ ngraph/core/src/descriptor/output.cpp | 10 ++++ ngraph/core/src/descriptor/tensor.cpp | 33 ++-------- ngraph/core/src/node.cpp | 4 ++ ngraph/core/src/node_output.cpp | 14 +++++ 11 files changed, 149 insertions(+), 43 deletions(-) diff --git a/inference-engine/src/inference_engine/cnn_network_ngraph_impl.cpp b/inference-engine/src/inference_engine/cnn_network_ngraph_impl.cpp index dd3160cd53ddf1..5737c4b5724530 100644 --- a/inference-engine/src/inference_engine/cnn_network_ngraph_impl.cpp +++ b/inference-engine/src/inference_engine/cnn_network_ngraph_impl.cpp @@ -140,6 +140,12 @@ CNNNetworkNGraphImpl::CNNNetworkNGraphImpl( std::string outName = layer->get_friendly_name(); IE_ASSERT(layer->get_output_size() == 1); // Parameter as only singly output port + // map original names to OpenVINO name + _opNames[outName] = outName; + for (const auto& name : layer->output(0).get_names()) { + _tensorNames[name] = outName; + } + DataPtr& ptr = _data[outName]; IE_ASSERT(ptr); // Data must be allocated after the reshape method @@ -157,7 +163,8 @@ CNNNetworkNGraphImpl::CNNNetworkNGraphImpl( } CNNNetworkNGraphImpl::CNNNetworkNGraphImpl(const ICNNNetwork& network) { - if (network.getFunction() == nullptr) { + const auto* net = dynamic_cast(&network); + if (network.getFunction() == nullptr || !net) { THROW_IE_EXCEPTION << "Cannot create CNNNetwork with nGraph from legacy network format!"; } @@ -167,6 +174,9 @@ CNNNetworkNGraphImpl::CNNNetworkNGraphImpl(const ICNNNetwork& network) { network.getInputsInfo(inputs); network.getOutputsInfo(outputs); + _opNames = net->_opNames; + _tensorNames = net->_tensorNames; + for (const auto& outputInfo : outputs) { const auto& name = outputInfo.second->getName(); DataPtr output = std::make_shared(name, outputInfo.second->getTensorDesc()); @@ -252,6 +262,17 @@ void CNNNetworkNGraphImpl::addOutput(const ::ngraph::Output<::ngraph::Node> & ou createDataForResult(output, dataName, data); _data[dataName] = data; _outputData[dataName] = data; + + // Save original framework names + for (const auto& name : output.get_names()) { + _tensorNames[name] = dataName; + } + for (const auto consumerInput : output.get_target_inputs()) { + const auto &consumerLayer = consumerInput.get_node()->shared_from_this(); + if (std::dynamic_pointer_cast(consumerLayer)) { + _opNames[consumerLayer->get_friendly_name()] = dataName; + } + } } size_t CNNNetworkNGraphImpl::getBatchSize() const noexcept { @@ -408,7 +429,7 @@ StatusCode CNNNetworkNGraphImpl::serialize(const std::string& xmlPath, ResponseDesc* resp) const noexcept { try { std::map custom_opsets; - for (auto extension : _ie_extensions) { + for (const auto& extension : _ie_extensions) { auto opset = extension->getOpSets(); custom_opsets.insert(begin(opset), end(opset)); } @@ -427,6 +448,20 @@ StatusCode CNNNetworkNGraphImpl::serialize(const std::string& xmlPath, return OK; } +StatusCode CNNNetworkNGraphImpl::getOVNameForTensor(std::string& ov_name, const std::string& orig_name, ResponseDesc* resp) const noexcept { + if (_tensorNames.find(orig_name) == _tensorNames.end()) + return DescriptionBuffer(NOT_FOUND, resp) << "Framework tensor with name \"" << orig_name << "\" was not mapped to OpenVINO data!"; + ov_name = _tensorNames.at(orig_name); + return OK; +} + +StatusCode CNNNetworkNGraphImpl::getOVNameForOperation(std::string& ov_name, const std::string& orig_name, ResponseDesc* resp) const noexcept { + if (_opNames.find(orig_name) == _opNames.end()) + return DescriptionBuffer(NOT_FOUND, resp) << "Framework operation with name \"" << orig_name << "\" was not mapped to OpenVINO data!"; + ov_name = _opNames.at(orig_name); + return OK; +} + StatusCode CNNNetworkNGraphImpl::setBatchSize(size_t size, ResponseDesc* responseDesc) noexcept { try { if (getBatchSize() == size) return OK; diff --git a/inference-engine/src/inference_engine/cnn_network_ngraph_impl.hpp b/inference-engine/src/inference_engine/cnn_network_ngraph_impl.hpp index ed6c3c75f7e990..0fb80a5fca36b2 100644 --- a/inference-engine/src/inference_engine/cnn_network_ngraph_impl.hpp +++ b/inference-engine/src/inference_engine/cnn_network_ngraph_impl.hpp @@ -11,6 +11,7 @@ #include #include +#include #include #include #include @@ -81,6 +82,10 @@ class INFERENCE_ENGINE_API_CLASS(CNNNetworkNGraphImpl): public ICNNNetwork { StatusCode serialize(const std::string& xmlPath, const std::string& binPath, ResponseDesc* resp) const noexcept override; + StatusCode getOVNameForTensor(std::string& ov_name, const std::string& orig_name, ResponseDesc* resp) const noexcept override; + + StatusCode getOVNameForOperation(std::string& ov_name, const std::string& orig_name, ResponseDesc* resp) const noexcept override; + // used by convertFunctionToICNNNetwork from legacy library std::map _data; protected: @@ -91,6 +96,8 @@ class INFERENCE_ENGINE_API_CLASS(CNNNetworkNGraphImpl): public ICNNNetwork { InferenceEngine::InputsDataMap _inputData; std::map _outputData; const std::vector _ie_extensions; + std::unordered_map _opNames; + std::unordered_map _tensorNames; /** * @brief Create DataPtr for nGraph operation diff --git a/inference-engine/tests/functional/plugin/shared/include/subgraph_tests/tensor_names.hpp b/inference-engine/tests/functional/plugin/shared/include/subgraph_tests/tensor_names.hpp index 2f5071d1fd370e..c9fa01a7b0f00f 100644 --- a/inference-engine/tests/functional/plugin/shared/include/subgraph_tests/tensor_names.hpp +++ b/inference-engine/tests/functional/plugin/shared/include/subgraph_tests/tensor_names.hpp @@ -25,15 +25,71 @@ TEST_P(TensorNamesTest, CheckTensorNames) { for (const auto& param : function->get_parameters()) { ASSERT_TRUE(inNames.count(cnnNetwork.getOVNameForOperation(param->get_friendly_name()))); - for (const auto& name : param->get_output_tensor(0).get_names()) + for (const auto& name : param->output(0).get_names()) ASSERT_TRUE(inNames.count(cnnNetwork.getOVNameForTensor(name))); } for (const auto& result : function->get_results()) { ASSERT_TRUE(outNames.count(cnnNetwork.getOVNameForOperation(result->get_friendly_name()))); - for (const auto& name : result->get_input_tensor(0).get_names()) + for (const auto& name : result->input_value(0).get_names()) ASSERT_TRUE(outNames.count(cnnNetwork.getOVNameForTensor(name))); } + + inferRequest = executableNetwork.CreateInferRequest(); + + for (const auto& param : function->get_parameters()) { + ASSERT_NO_THROW(inferRequest.GetBlob(cnnNetwork.getOVNameForOperation(param->get_friendly_name()))); + for (const auto& name : param->output(0).get_names()) + ASSERT_NO_THROW(inferRequest.GetBlob(cnnNetwork.getOVNameForTensor(name))); + } + + for (const auto& result : function->get_results()) { + ASSERT_NO_THROW(inferRequest.GetBlob(cnnNetwork.getOVNameForOperation(result->get_friendly_name()))); + for (const auto& name : result->output(0).get_names()) + ASSERT_NO_THROW(inferRequest.GetBlob(cnnNetwork.getOVNameForTensor(name))); + } +} + +TEST_P(TensorNamesTest, CheckTensorNamesAfterClone) { + cnnNetwork = InferenceEngine::CNNNetwork{function}; + InferenceEngine::CNNNetwork clonedNet(static_cast(cnnNetwork)); + ConfigureNetwork(); + executableNetwork = core->LoadNetwork(clonedNet, targetDevice, configuration); + + auto inputs = clonedNet.getInputsInfo(); + auto outputs = clonedNet.getOutputsInfo(); + std::unordered_set inNames; + for (const auto& in : inputs) + inNames.emplace(in.first); + std::unordered_set outNames; + for (const auto& out : outputs) + outNames.emplace(out.first); + + for (const auto& param : function->get_parameters()) { + ASSERT_TRUE(inNames.count(clonedNet.getOVNameForOperation(param->get_friendly_name()))); + for (const auto& name : param->output(0).get_names()) + ASSERT_TRUE(inNames.count(clonedNet.getOVNameForTensor(name))); + } + + for (const auto& result : function->get_results()) { + ASSERT_TRUE(outNames.count(clonedNet.getOVNameForOperation(result->get_friendly_name()))); + for (const auto& name : result->input_value(0).get_names()) + ASSERT_TRUE(outNames.count(clonedNet.getOVNameForTensor(name))); + } + + inferRequest = executableNetwork.CreateInferRequest(); + + for (const auto& param : function->get_parameters()) { + ASSERT_NO_THROW(inferRequest.GetBlob(clonedNet.getOVNameForOperation(param->get_friendly_name()))); + for (const auto& name : param->output(0).get_names()) + ASSERT_NO_THROW(inferRequest.GetBlob(clonedNet.getOVNameForTensor(name))); + } + + for (const auto& result : function->get_results()) { + ASSERT_NO_THROW(inferRequest.GetBlob(clonedNet.getOVNameForOperation(result->get_friendly_name()))); + for (const auto& name : result->output(0).get_names()) + ASSERT_NO_THROW(inferRequest.GetBlob(clonedNet.getOVNameForTensor(name))); + } } } // namespace SubgraphTestsDefinitions diff --git a/inference-engine/tests/functional/shared_test_classes/src/subgraph/tensor_names.cpp b/inference-engine/tests/functional/shared_test_classes/src/subgraph/tensor_names.cpp index e9214d84ac07da..03c219957fb4af 100644 --- a/inference-engine/tests/functional/shared_test_classes/src/subgraph/tensor_names.cpp +++ b/inference-engine/tests/functional/shared_test_classes/src/subgraph/tensor_names.cpp @@ -24,10 +24,10 @@ void TensorNamesTest::SetUp() { auto parameter = std::make_shared(ngraph::element::Type_t::f32, ngraph::Shape{1, 3, 10, 10}); parameter->set_friendly_name("parameter"); - parameter->get_output_tensor(0).set_name("input"); + parameter->output(0).set_names({"input"}); auto relu = std::make_shared(parameter); relu->set_friendly_name("relu"); - relu->get_output_tensor(0).set_names({"relu_t", "identity"}); + relu->output(0).set_names({"relu_t", "identity"}); const ngraph::ResultVector results{std::make_shared(relu)}; results[0]->set_friendly_name("out"); results[0]->get_output_tensor(0).set_name("out_t"); diff --git a/ngraph/core/include/ngraph/descriptor/output.hpp b/ngraph/core/include/ngraph/descriptor/output.hpp index c7c2fc875a6ba3..18ee4838a7e355 100644 --- a/ngraph/core/include/ngraph/descriptor/output.hpp +++ b/ngraph/core/include/ngraph/descriptor/output.hpp @@ -17,6 +17,8 @@ #pragma once #include +#include +#include #include #include "ngraph/descriptor/input.hpp" @@ -70,6 +72,9 @@ namespace ngraph /// \return the element type of the output const element::Type& get_element_type() const; + const std::unordered_set& get_names() const; + void set_names(const std::unordered_set& names); + Output(const Output&) = default; Output(Output&&) = default; Output& operator=(const Output&) = default; @@ -79,6 +84,7 @@ namespace ngraph size_t m_index; std::shared_ptr m_tensor; std::vector m_inputs; + std::unordered_set names; }; } } diff --git a/ngraph/core/include/ngraph/descriptor/tensor.hpp b/ngraph/core/include/ngraph/descriptor/tensor.hpp index 1ca13a31e5142f..123b2ec507b66f 100644 --- a/ngraph/core/include/ngraph/descriptor/tensor.hpp +++ b/ngraph/core/include/ngraph/descriptor/tensor.hpp @@ -18,7 +18,6 @@ #include #include -#include #include "ngraph/partial_shape.hpp" #include "ngraph/shape.hpp" @@ -37,9 +36,6 @@ namespace ngraph Tensor& operator=(const Tensor&) = delete; public: - Tensor(const element::Type& element_type, - const PartialShape& pshape, - std::vector name); Tensor(const element::Type& element_type, const PartialShape& pshape, const std::string& name); @@ -49,10 +45,7 @@ namespace ngraph size_t node_output_number); const std::string& get_name() const; - const std::vector& get_names() const; - void set_names(const std::vector& name); void set_name(const std::string& name); - void add_name(const std::string& name); void set_tensor_type(const element::Type& element_type, const PartialShape& pshape); void set_element_type(const element::Type& elemenet_type); void set_partial_shape(const PartialShape& partial_shape); @@ -74,7 +67,7 @@ namespace ngraph Node* m_node{nullptr}; size_t m_node_output_number{0}; - std::vector m_names; + std::string m_name; }; NGRAPH_API diff --git a/ngraph/core/include/ngraph/node_output.hpp b/ngraph/core/include/ngraph/node_output.hpp index 359a1441cfc4b2..d645aa8c46c28f 100644 --- a/ngraph/core/include/ngraph/node_output.hpp +++ b/ngraph/core/include/ngraph/node_output.hpp @@ -17,6 +17,7 @@ #pragma once #include +#include #include "ngraph/descriptor/tensor.hpp" #include "ngraph/partial_shape.hpp" @@ -100,6 +101,9 @@ namespace ngraph /// \brief Replace all users of this value with replacement void replace(const Output& replacement); + const std::unordered_set& get_names() const; + void set_names(const std::unordered_set& names); + bool operator==(const Output& other) const; bool operator!=(const Output& other) const; bool operator<(const Output& other) const; @@ -174,6 +178,8 @@ namespace ngraph bool operator<=(const Output& other) const; bool operator>=(const Output& other) const; + const std::unordered_set& get_names() const; + private: std::shared_ptr m_node; size_t m_index{0}; diff --git a/ngraph/core/src/descriptor/output.cpp b/ngraph/core/src/descriptor/output.cpp index 33aba702ccabd4..535c4f47943ce1 100644 --- a/ngraph/core/src/descriptor/output.cpp +++ b/ngraph/core/src/descriptor/output.cpp @@ -78,3 +78,13 @@ const element::Type& descriptor::Output::get_element_type() const { return m_tensor->get_element_type(); } + +const std::unordered_set& descriptor::Output::get_names() const +{ + return names; +} + +void descriptor::Output::set_names(const std::unordered_set& names) +{ + this->names = names; +} diff --git a/ngraph/core/src/descriptor/tensor.cpp b/ngraph/core/src/descriptor/tensor.cpp index 03de51754c1ec6..9669e9e3b8d6f9 100644 --- a/ngraph/core/src/descriptor/tensor.cpp +++ b/ngraph/core/src/descriptor/tensor.cpp @@ -22,18 +22,11 @@ using namespace std; descriptor::Tensor::Tensor(const element::Type& element_type, const PartialShape& pshape, - std::vector names) + const std::string& name) : m_element_type(element_type) , m_shape(pshape.is_static() ? pshape.to_shape() : Shape{}) , m_partial_shape(pshape) - , m_names(std::move(names)) -{ -} - -descriptor::Tensor::Tensor(const element::Type& element_type, - const PartialShape& pshape, - const std::string& name) - : Tensor(element_type, pshape, std::vector{name}) + , m_name(name) { } @@ -49,20 +42,9 @@ descriptor::Tensor::Tensor(const element::Type& element_type, { } -void descriptor::Tensor::add_name(const string& name) -{ - m_names.emplace_back(name); -} - void descriptor::Tensor::set_name(const string& name) { - m_names.clear(); - m_names.emplace_back(name); -} - -void descriptor::Tensor::set_names(const std::vector& names) -{ - m_names = names; + m_name = name; } void descriptor::Tensor::set_tensor_type(const element::Type& element_type, @@ -110,14 +92,7 @@ size_t descriptor::Tensor::size() const const std::string& descriptor::Tensor::get_name() const { - if (m_names.size() != 1) - throw ngraph_error("Tensor contains several names! Please use get_names() instead."); - return m_names[0]; -} - -const std::vector& descriptor::Tensor::get_names() const -{ - return m_names; + return m_name; } ostream& operator<<(ostream& out, const descriptor::Tensor& tensor) diff --git a/ngraph/core/src/node.cpp b/ngraph/core/src/node.cpp index b74ee51111e254..cadc8c1c3f8516 100644 --- a/ngraph/core/src/node.cpp +++ b/ngraph/core/src/node.cpp @@ -143,6 +143,10 @@ std::shared_ptr { clone->add_control_dependency(cdep); } + for (size_t i = 0; i < get_output_size(); i++) + { + clone->output(i).set_names(output(i).get_names()); + } return clone; } diff --git a/ngraph/core/src/node_output.cpp b/ngraph/core/src/node_output.cpp index c59987c0068e06..75b43e98bfc889 100644 --- a/ngraph/core/src/node_output.cpp +++ b/ngraph/core/src/node_output.cpp @@ -89,6 +89,15 @@ namespace ngraph } } + const std::unordered_set& Output::get_names() const + { + return m_node->m_outputs.at(m_index).get_names(); + } + void Output::set_names(const std::unordered_set& names) + { + m_node->m_outputs.at(m_index).set_names(names); + } + bool Output::operator==(const Output& other) const { return m_node == other.m_node && m_index == other.m_index; @@ -182,6 +191,11 @@ namespace ngraph << output.get_partial_shape(); } + const std::unordered_set& Output::get_names() const + { + return m_node->m_outputs.at(m_index).get_names(); + } + std::ostream& operator<<(std::ostream& out, const Output& output) { return output.get_node()->write_description(out, 0) << "[" << output.get_index() From c08276609a6c104e875d0db0852725724a4dbe5c Mon Sep 17 00:00:00 2001 From: Ilya Churaev Date: Wed, 13 Jan 2021 09:21:44 +0300 Subject: [PATCH 03/23] Added tests to check addOutput method --- .../cnn_network_ngraph_impl.cpp | 8 ++- .../include/subgraph_tests/tensor_names.hpp | 71 ++++++++++++++++++- .../src/subgraph/tensor_names.cpp | 5 +- 3 files changed, 78 insertions(+), 6 deletions(-) diff --git a/inference-engine/src/inference_engine/cnn_network_ngraph_impl.cpp b/inference-engine/src/inference_engine/cnn_network_ngraph_impl.cpp index 5737c4b5724530..434b37e3c14d94 100644 --- a/inference-engine/src/inference_engine/cnn_network_ngraph_impl.cpp +++ b/inference-engine/src/inference_engine/cnn_network_ngraph_impl.cpp @@ -235,13 +235,15 @@ StatusCode CNNNetworkNGraphImpl::addOutput(const std::string& layerName, size_t try { for (const auto & layer : _ngraph_function->get_ops()) { if (layer->get_friendly_name() == layerName) { - auto result = make_shared<::ngraph::op::Result>(layer->output(outputIndex)); - _ngraph_function->add_results({result}); - std::string outputName = layerName; if (layer->outputs().size() != 1) { outputName += "." + std::to_string(outputIndex); } + + auto result = make_shared<::ngraph::op::Result>(layer->output(outputIndex)); + result->set_friendly_name(outputName); + _ngraph_function->add_results({result}); + if (_outputData.count(outputName) == 0) { reshape(); } diff --git a/inference-engine/tests/functional/plugin/shared/include/subgraph_tests/tensor_names.hpp b/inference-engine/tests/functional/plugin/shared/include/subgraph_tests/tensor_names.hpp index c9fa01a7b0f00f..77cb203617a899 100644 --- a/inference-engine/tests/functional/plugin/shared/include/subgraph_tests/tensor_names.hpp +++ b/inference-engine/tests/functional/plugin/shared/include/subgraph_tests/tensor_names.hpp @@ -12,7 +12,6 @@ namespace SubgraphTestsDefinitions { TEST_P(TensorNamesTest, CheckTensorNames) { cnnNetwork = InferenceEngine::CNNNetwork{function}; ConfigureNetwork(); - executableNetwork = core->LoadNetwork(cnnNetwork, targetDevice, configuration); auto inputs = cnnNetwork.getInputsInfo(); auto outputs = cnnNetwork.getOutputsInfo(); @@ -35,6 +34,7 @@ TEST_P(TensorNamesTest, CheckTensorNames) { ASSERT_TRUE(outNames.count(cnnNetwork.getOVNameForTensor(name))); } + executableNetwork = core->LoadNetwork(cnnNetwork, targetDevice, configuration); inferRequest = executableNetwork.CreateInferRequest(); for (const auto& param : function->get_parameters()) { @@ -54,7 +54,6 @@ TEST_P(TensorNamesTest, CheckTensorNamesAfterClone) { cnnNetwork = InferenceEngine::CNNNetwork{function}; InferenceEngine::CNNNetwork clonedNet(static_cast(cnnNetwork)); ConfigureNetwork(); - executableNetwork = core->LoadNetwork(clonedNet, targetDevice, configuration); auto inputs = clonedNet.getInputsInfo(); auto outputs = clonedNet.getOutputsInfo(); @@ -77,6 +76,7 @@ TEST_P(TensorNamesTest, CheckTensorNamesAfterClone) { ASSERT_TRUE(outNames.count(clonedNet.getOVNameForTensor(name))); } + executableNetwork = core->LoadNetwork(clonedNet, targetDevice, configuration); inferRequest = executableNetwork.CreateInferRequest(); for (const auto& param : function->get_parameters()) { @@ -92,5 +92,72 @@ TEST_P(TensorNamesTest, CheckTensorNamesAfterClone) { } } +TEST_P(TensorNamesTest, CheckAddOutput) { + cnnNetwork = InferenceEngine::CNNNetwork{function}; + ConfigureNetwork(); + + auto inputs = cnnNetwork.getInputsInfo(); + auto outputs = cnnNetwork.getOutputsInfo(); + std::unordered_set inNames; + for (const auto& in : inputs) + inNames.emplace(in.first); + std::unordered_set outNames; + for (const auto& out : outputs) + outNames.emplace(out.first); + + ASSERT_EQ(1, inputs.size()); + ASSERT_EQ(1, outputs.size()); + ASSERT_EQ(1, function->get_results().size()); + + // Check that relu_prev doesn't exist in output and input maps + for (const auto& names : {inNames, outNames}) { + ASSERT_THROW(cnnNetwork.getOVNameForOperation("relu_prev"), InferenceEngine::NotFound); + for (const std::string& tensor_name : {"relu_prev_t", "identity_prev_t"}) { + ASSERT_THROW(cnnNetwork.getOVNameForOperation(tensor_name), InferenceEngine::NotFound); + } + } + + // Add relu_prev as output + cnnNetwork.addOutput("relu_prev"); + + inputs = cnnNetwork.getInputsInfo(); + outputs = cnnNetwork.getOutputsInfo(); + inNames.clear(); + for (const auto& in : inputs) + inNames.emplace(in.first); + outNames.clear(); + for (const auto& out : outputs) + outNames.emplace(out.first); + + ASSERT_EQ(1, inputs.size()); + ASSERT_EQ(2, outputs.size()); + ASSERT_EQ(2, function->get_results().size()); + + // Check that relu_prev exists in output map + ASSERT_FALSE(inNames.count(cnnNetwork.getOVNameForOperation("relu_prev"))); + for (const std::string& tensor_name : {"relu_prev_t", "identity_prev_t"}) { + ASSERT_FALSE(inNames.count(cnnNetwork.getOVNameForTensor(tensor_name))); + } + ASSERT_TRUE(outNames.count(cnnNetwork.getOVNameForOperation("relu_prev"))); + for (const std::string& tensor_name : {"relu_prev_t", "identity_prev_t"}) { + ASSERT_TRUE(outNames.count(cnnNetwork.getOVNameForTensor(tensor_name))); + } + + executableNetwork = core->LoadNetwork(cnnNetwork, targetDevice, configuration); + inferRequest = executableNetwork.CreateInferRequest(); + + for (const auto& param : cnnNetwork.getFunction()->get_parameters()) { + ASSERT_NO_THROW(inferRequest.GetBlob(cnnNetwork.getOVNameForOperation(param->get_friendly_name()))); + for (const auto& name : param->output(0).get_names()) + ASSERT_NO_THROW(inferRequest.GetBlob(cnnNetwork.getOVNameForTensor(name))); + } + + for (const auto& result : cnnNetwork.getFunction()->get_results()) { + ASSERT_NO_THROW(inferRequest.GetBlob(cnnNetwork.getOVNameForOperation(result->get_friendly_name()))); + for (const auto& name : result->output(0).get_names()) + ASSERT_NO_THROW(inferRequest.GetBlob(cnnNetwork.getOVNameForTensor(name))); + } +} + } // namespace SubgraphTestsDefinitions diff --git a/inference-engine/tests/functional/shared_test_classes/src/subgraph/tensor_names.cpp b/inference-engine/tests/functional/shared_test_classes/src/subgraph/tensor_names.cpp index 03c219957fb4af..21ce35fd671f42 100644 --- a/inference-engine/tests/functional/shared_test_classes/src/subgraph/tensor_names.cpp +++ b/inference-engine/tests/functional/shared_test_classes/src/subgraph/tensor_names.cpp @@ -25,7 +25,10 @@ void TensorNamesTest::SetUp() { auto parameter = std::make_shared(ngraph::element::Type_t::f32, ngraph::Shape{1, 3, 10, 10}); parameter->set_friendly_name("parameter"); parameter->output(0).set_names({"input"}); - auto relu = std::make_shared(parameter); + auto relu_prev = std::make_shared(parameter); + relu_prev->set_friendly_name("relu_prev"); + relu_prev->output(0).set_names({"relu_prev_t", "identity_prev_t"}); + auto relu = std::make_shared(relu_prev); relu->set_friendly_name("relu"); relu->output(0).set_names({"relu_t", "identity"}); const ngraph::ResultVector results{std::make_shared(relu)}; From d553a24939d23f29cca61ff588cfde13d6b5883e Mon Sep 17 00:00:00 2001 From: Ilya Churaev Date: Wed, 13 Jan 2021 10:27:30 +0300 Subject: [PATCH 04/23] Added support of port names in the IR --- .../src/readers/ir_reader/ie_ir_parser.cpp | 14 ++- .../src/readers/ir_reader/ie_ir_parser.hpp | 3 +- .../ngraph_reader/tensor_names.cpp | 89 +++++++++++++++++++ .../include/subgraph_tests/tensor_names.hpp | 2 +- .../subgraph/tensor_names.hpp | 2 +- .../src/subgraph/tensor_names.cpp | 2 +- 6 files changed, 106 insertions(+), 6 deletions(-) create mode 100644 inference-engine/tests/functional/inference_engine/ngraph_reader/tensor_names.cpp diff --git a/inference-engine/src/readers/ir_reader/ie_ir_parser.cpp b/inference-engine/src/readers/ir_reader/ie_ir_parser.cpp index 38b0b1b8e7389d..7e1a727c982e28 100644 --- a/inference-engine/src/readers/ir_reader/ie_ir_parser.cpp +++ b/inference-engine/src/readers/ir_reader/ie_ir_parser.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2020 Intel Corporation +// Copyright (C) 2018-2021 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // @@ -559,7 +559,7 @@ void V10Parser::parsePreProcess(CNNNetwork& network, const pugi::xml_node& root, } V10Parser::GenericLayerParams V10Parser::XmlDeserializer::parseGenericParams(const pugi::xml_node& node) { - const auto parsePort = [](const pugi::xml_node& parentNode, + const auto parsePort = [this](const pugi::xml_node& parentNode, const GenericLayerParams& params, bool input) -> GenericLayerParams::LayerPortData { GenericLayerParams::LayerPortData port; @@ -584,6 +584,12 @@ V10Parser::GenericLayerParams V10Parser::XmlDeserializer::parseGenericParams(con type = InferenceEngine::details::convertPrecision(preStr); } port.precision = type; + std::vector names; + if (getParameters(parentNode, "names", names)) { + for (const auto& name : names) { + port.names.emplace(name); + } + } return port; }; GenericLayerParams params; @@ -772,6 +778,10 @@ std::shared_ptr V10Parser::XmlDeserializer::createNode( } ngraphNode->set_friendly_name(params.name); + for (size_t i = 0; i < params.outputPorts.size() && i < ngraphNode->get_output_size(); ++i) { + if (!params.outputPorts[i].names.empty()) + ngraphNode->output(i).set_names(params.outputPorts[i].names); + } return ngraphNode; } diff --git a/inference-engine/src/readers/ir_reader/ie_ir_parser.hpp b/inference-engine/src/readers/ir_reader/ie_ir_parser.hpp index 8c720336070128..800daca925aae1 100644 --- a/inference-engine/src/readers/ir_reader/ie_ir_parser.hpp +++ b/inference-engine/src/readers/ir_reader/ie_ir_parser.hpp @@ -69,6 +69,7 @@ class V10Parser : public IParser { // Precision and dimensions are needed only for GenericIE op ngraph::element::Type_t precision; SizeVector dims; + std::unordered_set names; }; size_t layerId; std::string version; @@ -351,4 +352,4 @@ class V10Parser : public IParser { #endif // IR_READER_V10 -} // namespace InferenceEngine \ No newline at end of file +} // namespace InferenceEngine diff --git a/inference-engine/tests/functional/inference_engine/ngraph_reader/tensor_names.cpp b/inference-engine/tests/functional/inference_engine/ngraph_reader/tensor_names.cpp new file mode 100644 index 00000000000000..e5feb809dac2f9 --- /dev/null +++ b/inference-engine/tests/functional/inference_engine/ngraph_reader/tensor_names.cpp @@ -0,0 +1,89 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include +#include "ngraph_reader_tests.hpp" + +TEST_F(NGraphReaderTests, ReadNetworkWithTensorNames) { + std::string model = R"V0G0N( + + + + + + + 1 + 3 + 22 + 22 + + + + + + + 1 + 3 + 22 + 22 + + + + + 1 + 3 + 22 + 22 + + + + + + + 1 + 3 + 22 + 22 + + + + + + + + + +)V0G0N"; + Core ie; + Blob::Ptr weights; + + auto network = ie.ReadNetwork(model, weights); + auto function = network.getFunction(); + auto inputs = network.getInputsInfo(); + auto outputs = network.getOutputsInfo(); + std::unordered_set inNames; + for (const auto& in : inputs) + inNames.emplace(in.first); + std::unordered_set outNames; + for (const auto& out : outputs) + outNames.emplace(out.first); + + ASSERT_EQ(1, inputs.size()); + ASSERT_EQ(1, outputs.size()); + ASSERT_EQ(1, function->get_results().size()); + + for (const auto& param : function->get_parameters()) { + ASSERT_TRUE(inNames.count(network.getOVNameForOperation(param->get_friendly_name()))); + ASSERT_TRUE(!param->output(0).get_names().empty()); + for (const auto& name : param->output(0).get_names()) + ASSERT_TRUE(inNames.count(network.getOVNameForTensor(name))); + } + + for (const auto& result : function->get_results()) { + ASSERT_TRUE(outNames.count(network.getOVNameForOperation(result->get_friendly_name()))); + ASSERT_TRUE(!result->input_value(0).get_names().empty()); + for (const auto& name : result->input_value(0).get_names()) + ASSERT_TRUE(outNames.count(network.getOVNameForTensor(name))); + } +} diff --git a/inference-engine/tests/functional/plugin/shared/include/subgraph_tests/tensor_names.hpp b/inference-engine/tests/functional/plugin/shared/include/subgraph_tests/tensor_names.hpp index 77cb203617a899..93d5a123f8832a 100644 --- a/inference-engine/tests/functional/plugin/shared/include/subgraph_tests/tensor_names.hpp +++ b/inference-engine/tests/functional/plugin/shared/include/subgraph_tests/tensor_names.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2020 Intel Corporation +// Copyright (C) 2021 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/tensor_names.hpp b/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/tensor_names.hpp index 3d3afb2ff72936..dfa2cbeaa259d7 100644 --- a/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/tensor_names.hpp +++ b/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/tensor_names.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2020 Intel Corporation +// Copyright (C) 2021 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/inference-engine/tests/functional/shared_test_classes/src/subgraph/tensor_names.cpp b/inference-engine/tests/functional/shared_test_classes/src/subgraph/tensor_names.cpp index 21ce35fd671f42..2840c6e8d996f5 100644 --- a/inference-engine/tests/functional/shared_test_classes/src/subgraph/tensor_names.cpp +++ b/inference-engine/tests/functional/shared_test_classes/src/subgraph/tensor_names.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2020 Intel Corporation +// Copyright (C) 2021 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // From af70e1083114481262e6ebd86a9f9f02810b0a6b Mon Sep 17 00:00:00 2001 From: Ilya Churaev Date: Wed, 13 Jan 2021 10:33:02 +0300 Subject: [PATCH 05/23] Update copyrights --- inference-engine/include/cpp/ie_cnn_network.h | 2 +- inference-engine/include/ie_icnn_network.hpp | 2 +- .../src/inference_engine/cnn_network_ngraph_impl.cpp | 2 +- .../src/inference_engine/cnn_network_ngraph_impl.hpp | 2 +- .../cpu/shared_tests_instances/subgraph_tests/tensor_names.cpp | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/inference-engine/include/cpp/ie_cnn_network.h b/inference-engine/include/cpp/ie_cnn_network.h index 69b7891410ffde..ed5fc910dcda1a 100644 --- a/inference-engine/include/cpp/ie_cnn_network.h +++ b/inference-engine/include/cpp/ie_cnn_network.h @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2020 Intel Corporation +// Copyright (C) 2018-2021 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/inference-engine/include/ie_icnn_network.hpp b/inference-engine/include/ie_icnn_network.hpp index ba629e74e684c4..3b24934b9d4d94 100644 --- a/inference-engine/include/ie_icnn_network.hpp +++ b/inference-engine/include/ie_icnn_network.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2020 Intel Corporation +// Copyright (C) 2018-2021 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/inference-engine/src/inference_engine/cnn_network_ngraph_impl.cpp b/inference-engine/src/inference_engine/cnn_network_ngraph_impl.cpp index 434b37e3c14d94..46b5505962770e 100644 --- a/inference-engine/src/inference_engine/cnn_network_ngraph_impl.cpp +++ b/inference-engine/src/inference_engine/cnn_network_ngraph_impl.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2020 Intel Corporation +// Copyright (C) 2018-2021 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/inference-engine/src/inference_engine/cnn_network_ngraph_impl.hpp b/inference-engine/src/inference_engine/cnn_network_ngraph_impl.hpp index 0fb80a5fca36b2..14979eaaae23aa 100644 --- a/inference-engine/src/inference_engine/cnn_network_ngraph_impl.hpp +++ b/inference-engine/src/inference_engine/cnn_network_ngraph_impl.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2020 Intel Corporation +// Copyright (C) 2018-2021 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/subgraph_tests/tensor_names.cpp b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/subgraph_tests/tensor_names.cpp index 72e5481bd0e6b4..99ceae1156ac85 100644 --- a/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/subgraph_tests/tensor_names.cpp +++ b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/subgraph_tests/tensor_names.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2019 Intel Corporation +// Copyright (C) 2021 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // From f8833ba6f4b3a7b185494a656c5215bb7f1ae4f6 Mon Sep 17 00:00:00 2001 From: Ilya Churaev Date: Wed, 13 Jan 2021 11:22:34 +0300 Subject: [PATCH 06/23] Deprecate tensor name --- .../src/cldnn_engine/ops/result.cpp | 2 ++ .../src/cldnn_engine/ops/split.cpp | 2 ++ .../src/convert_function_to_cnn_network.cpp | 20 +++++++++++-------- .../control_flow/unroll_tensor_iterator.cpp | 8 +++++++- .../convert_ti_to_sequences.cpp | 10 ++++++++-- .../src/subgraph/tensor_names.cpp | 3 --- .../core/include/ngraph/descriptor/tensor.hpp | 2 ++ ngraph/core/include/ngraph/node.hpp | 2 ++ ngraph/core/include/ngraph/runtime/tensor.hpp | 1 + ngraph/core/src/descriptor/tensor.cpp | 14 ++++++++----- ngraph/core/src/graph_util.cpp | 2 ++ ngraph/core/src/node.cpp | 16 ++++++++------- ngraph/core/src/runtime/host_tensor.cpp | 4 ++++ ngraph/core/src/runtime/tensor.cpp | 2 ++ 14 files changed, 62 insertions(+), 26 deletions(-) diff --git a/inference-engine/src/cldnn_engine/ops/result.cpp b/inference-engine/src/cldnn_engine/ops/result.cpp index 56ad5e9f5c017a..536caf22eb7555 100644 --- a/inference-engine/src/cldnn_engine/ops/result.cpp +++ b/inference-engine/src/cldnn_engine/ops/result.cpp @@ -18,7 +18,9 @@ void CreateResultOp(Program& p, const std::shared_ptr& o p.ValidateInputs(op, {1}); auto prev = op->get_input_node_shared_ptr(0); + NGRAPH_SUPPRESS_DEPRECATED_START auto inputID = op->get_input_source_output(0).get_tensor().get_name(); + NGRAPH_SUPPRESS_DEPRECATED_END if (inputID.empty()) { inputID = prev->get_friendly_name(); if (prev->get_output_size() > 1) { diff --git a/inference-engine/src/cldnn_engine/ops/split.cpp b/inference-engine/src/cldnn_engine/ops/split.cpp index 65cbf59873b831..3639a3c583a2e5 100644 --- a/inference-engine/src/cldnn_engine/ops/split.cpp +++ b/inference-engine/src/cldnn_engine/ops/split.cpp @@ -24,6 +24,7 @@ void CreateCommonSplitOp(Program& p, const std::shared_ptr& op) { for (size_t i = 0; i < op->get_output_size(); i++) { std::string outLayerName = layerName + (is_single_out_split ? "" : "." + std::to_string(i)); const auto outLayerDims = op->get_output_shape(i); + NGRAPH_SUPPRESS_DEPRECATED_START if (outLayerDims.size() != startOffset.size()) { THROW_IE_EXCEPTION << "Invalid dimesions in split layer: " << op->get_friendly_name() << " output: " << op->get_output_tensor_name(i); @@ -34,6 +35,7 @@ void CreateCommonSplitOp(Program& p, const std::shared_ptr& op) { << " output: " << op->get_output_tensor_name(i); } } + NGRAPH_SUPPRESS_DEPRECATED_END auto outTensor = CldnnTensorFromIEDims(outLayerDims, 1); auto offsetTensor = CldnnTensorFromIEDims(startOffset, 0); diff --git a/inference-engine/src/legacy_api/src/convert_function_to_cnn_network.cpp b/inference-engine/src/legacy_api/src/convert_function_to_cnn_network.cpp index 424d39d3c423ee..1c235939c996a0 100644 --- a/inference-engine/src/legacy_api/src/convert_function_to_cnn_network.cpp +++ b/inference-engine/src/legacy_api/src/convert_function_to_cnn_network.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2020 Intel Corporation +// Copyright (C) 2018-2021 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // @@ -349,7 +349,7 @@ class CNNLayerCreator : public ::ngraph::AttributeVisitor { void on_adapter(const std::string& name, ::ngraph::ValueAccessor& adapter) override { if (std::string(node->get_type_name()) != "Constant") { const auto data_beg = static_cast(adapter.get_ptr()); - params[name] = std::string(data_beg, adapter.size()); + params[name] = std::string(data_beg, adapter.size()); } } @@ -663,7 +663,7 @@ InferenceEngine::details::CNNLayerCreator::CNNLayerCreator(const std::shared_ptr } res->params["kernel"] = kernel_value; - const auto weightsNode = node->input_value(1).get_node_shared_ptr(); + const auto weightsNode = node->input_value(1).get_node_shared_ptr(); if (InferenceEngine::details::addBlob(weightsNode, res, InferenceEngine::details::weights)) { if (node->inputs().size() == 3) { const auto biasNode = node->input_value(2).get_node_shared_ptr(); @@ -752,7 +752,7 @@ InferenceEngine::details::CNNLayerCreator::CNNLayerCreator(const std::shared_ptr const auto biasNode = node->input_value(3).get_node_shared_ptr(); InferenceEngine::details::addBlob(biasNode, res, InferenceEngine::details::biases); - + return res; }); @@ -1317,12 +1317,12 @@ InferenceEngine::details::CNNLayerCreator::CNNLayerCreator(const std::shared_ptr return res; }); - addSpecificCreator({"NormalizeIE"}, [](const std::shared_ptr<::ngraph::Node> &node, + addSpecificCreator({"NormalizeIE"}, [](const std::shared_ptr<::ngraph::Node> &node, const std::map ¶ms) -> CNNLayerPtr { LayerParams attrs = {node->get_friendly_name(), "Normalize", details::convertPrecision(node->get_output_element_type(0))}; auto res = std::make_shared(attrs); - + res->params = params; res->params["channel_shared"] = res->getBoolStrParamAsIntStr("channel_shared"); res->params["across_spatial"] = res->getBoolStrParamAsIntStr("across_spatial"); @@ -1432,7 +1432,7 @@ InferenceEngine::details::CNNLayerCreator::CNNLayerCreator(const std::shared_ptr THROW_IE_EXCEPTION << "Interp do not support mode '" << interp_attrs.mode << "'"; } - bool align_corners; + bool align_corners; auto res = std::make_shared(attrs); res->params = params; @@ -1498,7 +1498,7 @@ InferenceEngine::details::CNNLayerCreator::CNNLayerCreator(const std::shared_ptr res->params.erase("auto_pad"); } - const auto weightsNode = node->input_value(1).get_node_shared_ptr(); + const auto weightsNode = node->input_value(1).get_node_shared_ptr(); if (!keep_constants && InferenceEngine::details::addBlob(weightsNode, res, InferenceEngine::details::weights)) { if (node->inputs().size() == 3) { const auto biasNode = node->input_value(2).get_node_shared_ptr(); @@ -1849,7 +1849,9 @@ void convertFunctionToICNNNetwork(const std::shared_ptroutData.clear(); continue; } + NGRAPH_SUPPRESS_DEPRECATED_START auto outName = layer->output(i).get_tensor().get_name(); + NGRAPH_SUPPRESS_DEPRECATED_END if (outName.empty()) { outName = ngraph::op::util::create_ie_output_name(layer->output(i)); } @@ -1903,7 +1905,9 @@ void convertFunctionToICNNNetwork(const std::shared_ptr(layer)) { IE_ASSERT(layer->get_input_size() == 1); const auto &input = layer->input_value(0); + NGRAPH_SUPPRESS_DEPRECATED_START auto name = input.get_tensor().get_name(); + NGRAPH_SUPPRESS_DEPRECATED_END if (!name.empty()) cnnNetworkImpl->addOutput(name); else diff --git a/inference-engine/src/transformations/src/transformations/control_flow/unroll_tensor_iterator.cpp b/inference-engine/src/transformations/src/transformations/control_flow/unroll_tensor_iterator.cpp index 40b062981b55d3..7c1f9f9a151c88 100644 --- a/inference-engine/src/transformations/src/transformations/control_flow/unroll_tensor_iterator.cpp +++ b/inference-engine/src/transformations/src/transformations/control_flow/unroll_tensor_iterator.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2020 Intel Corporation +// Copyright (C) 2021 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // @@ -127,8 +127,10 @@ bool ngraph::pass::UnrollTensorIterator::run_on_function(std::shared_ptroutput(0).get_tensor().set_name( op::util::create_ie_output_name(ti->output(concat_desc->m_output_index))); + NGRAPH_SUPPRESS_DEPRECATED_END // connect the Concat layer to the corresponding TI outputs for (auto &input : ti->output(concat_desc->m_output_index).get_target_inputs()) { input.replace_source_output(concat); @@ -138,7 +140,9 @@ bool ngraph::pass::UnrollTensorIterator::run_on_function(std::shared_ptr result = body_functions[0]->get_results().at(concat_desc->m_body_value_index); const auto& input_to_res = result->get_input_source_output(0); // set output name to Tensor to store it for ngraph to cnn conversion + NGRAPH_SUPPRESS_DEPRECATED_START input_to_res.get_tensor().set_name(op::util::create_ie_output_name(ti->output(concat_desc->m_output_index))); + NGRAPH_SUPPRESS_DEPRECATED_END for (auto &input : ti->output(concat_desc->m_output_index).get_target_inputs()) { input.replace_source_output(input_to_res); } @@ -151,7 +155,9 @@ bool ngraph::pass::UnrollTensorIterator::run_on_function(std::shared_ptrinput_value(0); // set output name to Tensor to store it for ngraph to cnn conversion + NGRAPH_SUPPRESS_DEPRECATED_START in_value.get_tensor().set_name(op::util::create_ie_output_name(ti->output(output_desc->m_output_index))); + NGRAPH_SUPPRESS_DEPRECATED_END for (const auto &input : ti->output(output_desc->m_output_index).get_target_inputs()) { input.replace_source_output(result->get_input_source_output(0)); } diff --git a/inference-engine/src/transformations/src/transformations/op_conversions/convert_ti_to_sequences.cpp b/inference-engine/src/transformations/src/transformations/op_conversions/convert_ti_to_sequences.cpp index 8168f18360d444..9b27b4fccbdaf0 100644 --- a/inference-engine/src/transformations/src/transformations/op_conversions/convert_ti_to_sequences.cpp +++ b/inference-engine/src/transformations/src/transformations/op_conversions/convert_ti_to_sequences.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2020 Intel Corporation +// Copyright (C) 2021 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // @@ -178,7 +178,9 @@ ngraph::pass::ConvertTensorIteratorToLSTMSequence::ConvertTensorIteratorToLSTMSe for (const auto &input : ti->output(ordered_out_descs[i]->m_output_index).get_target_inputs()) { input.replace_source_output(outputs[i]->output(0)); } + NGRAPH_SUPPRESS_DEPRECATED_START outputs[i]->get_output_tensor(0).set_name(op::util::create_ie_output_name(ti->output(ordered_out_descs[i]->m_output_index))); + NGRAPH_SUPPRESS_DEPRECATED_END } } @@ -331,7 +333,9 @@ ngraph::pass::ConvertTensorIteratorToRNNSequence::ConvertTensorIteratorToRNNSequ for (const auto &input : ti->output(ordered_out_descs[i]->m_output_index).get_target_inputs()) { input.replace_source_output(outputs[i]->output(0)); } + NGRAPH_SUPPRESS_DEPRECATED_START outputs[i]->get_output_tensor(0).set_name(op::util::create_ie_output_name(ti->output(ordered_out_descs[i]->m_output_index))); + NGRAPH_SUPPRESS_DEPRECATED_END } } @@ -485,7 +489,9 @@ ngraph::pass::ConvertTensorIteratorToGRUSequence::ConvertTensorIteratorToGRUSequ for (const auto &input : ti->output(ordered_out_descs[i]->m_output_index).get_target_inputs()) { input.replace_source_output(outputs[i]->output(0)); } + NGRAPH_SUPPRESS_DEPRECATED_START outputs[i]->get_output_tensor(0).set_name(op::util::create_ie_output_name(ti->output(ordered_out_descs[i]->m_output_index))); + NGRAPH_SUPPRESS_DEPRECATED_END } } @@ -500,4 +506,4 @@ ngraph::pass::ConvertTensorIteratorToGRUSequence::ConvertTensorIteratorToGRUSequ auto m = std::make_shared(tensor_iterator, "ConvertTensorIteratorToGRUSequence"); register_matcher(m, callback); -} \ No newline at end of file +} diff --git a/inference-engine/tests/functional/shared_test_classes/src/subgraph/tensor_names.cpp b/inference-engine/tests/functional/shared_test_classes/src/subgraph/tensor_names.cpp index 2840c6e8d996f5..a348e7d1128126 100644 --- a/inference-engine/tests/functional/shared_test_classes/src/subgraph/tensor_names.cpp +++ b/inference-engine/tests/functional/shared_test_classes/src/subgraph/tensor_names.cpp @@ -33,11 +33,8 @@ void TensorNamesTest::SetUp() { relu->output(0).set_names({"relu_t", "identity"}); const ngraph::ResultVector results{std::make_shared(relu)}; results[0]->set_friendly_name("out"); - results[0]->get_output_tensor(0).set_name("out_t"); ngraph::ParameterVector params{parameter}; function = std::make_shared(results, params, "TensorNames"); } } // namespace SubgraphTestsDefinitions - - diff --git a/ngraph/core/include/ngraph/descriptor/tensor.hpp b/ngraph/core/include/ngraph/descriptor/tensor.hpp index 123b2ec507b66f..b0e9aca6d66659 100644 --- a/ngraph/core/include/ngraph/descriptor/tensor.hpp +++ b/ngraph/core/include/ngraph/descriptor/tensor.hpp @@ -44,7 +44,9 @@ namespace ngraph Node* node, size_t node_output_number); + NGRAPH_DEPRECATED("Only output ports have names") const std::string& get_name() const; + NGRAPH_DEPRECATED("Only output ports have names") void set_name(const std::string& name); void set_tensor_type(const element::Type& element_type, const PartialShape& pshape); void set_element_type(const element::Type& elemenet_type); diff --git a/ngraph/core/include/ngraph/node.hpp b/ngraph/core/include/ngraph/node.hpp index 628a9c26866bea..75ab10e67f492e 100644 --- a/ngraph/core/include/ngraph/node.hpp +++ b/ngraph/core/include/ngraph/node.hpp @@ -327,6 +327,7 @@ namespace ngraph descriptor::Tensor& get_input_tensor(size_t i) const; /// Returns the tensor name for output i + NGRAPH_DEPRECATED("Tensor names were deprecated. Please use output names instead.") const std::string& get_output_tensor_name(size_t i) const; std::set> get_output_target_inputs(size_t i) const; @@ -347,6 +348,7 @@ namespace ngraph const PartialShape& get_input_partial_shape(size_t i) const; /// Returns the tensor name for input i + NGRAPH_DEPRECATED("Tensor names were deprecated.") const std::string& get_input_tensor_name(size_t i) const; std::unordered_set liveness_new_list; diff --git a/ngraph/core/include/ngraph/runtime/tensor.hpp b/ngraph/core/include/ngraph/runtime/tensor.hpp index 8985957faab24d..9e83c3a3f61072 100644 --- a/ngraph/core/include/ngraph/runtime/tensor.hpp +++ b/ngraph/core/include/ngraph/runtime/tensor.hpp @@ -63,6 +63,7 @@ namespace ngraph /// \brief Get tensor's unique name /// \return tensor's name + NGRAPH_DEPRECATED("Only output ports have names") const std::string& get_name() const; /// \brief Get the stale value of the tensor. A tensor is stale if its data is diff --git a/ngraph/core/src/descriptor/tensor.cpp b/ngraph/core/src/descriptor/tensor.cpp index 9669e9e3b8d6f9..f84bc79bdecab0 100644 --- a/ngraph/core/src/descriptor/tensor.cpp +++ b/ngraph/core/src/descriptor/tensor.cpp @@ -42,11 +42,6 @@ descriptor::Tensor::Tensor(const element::Type& element_type, { } -void descriptor::Tensor::set_name(const string& name) -{ - m_name = name; -} - void descriptor::Tensor::set_tensor_type(const element::Type& element_type, const PartialShape& pshape) { @@ -90,6 +85,13 @@ size_t descriptor::Tensor::size() const return shape_size(get_shape()) * m_element_type.size(); } +NGRAPH_SUPPRESS_DEPRECATED_START + +void descriptor::Tensor::set_name(const string& name) +{ + m_name = name; +} + const std::string& descriptor::Tensor::get_name() const { return m_name; @@ -100,3 +102,5 @@ ostream& operator<<(ostream& out, const descriptor::Tensor& tensor) out << "Tensor(" << tensor.get_name() << ")"; return out; } + +NGRAPH_SUPPRESS_DEPRECATED_END diff --git a/ngraph/core/src/graph_util.cpp b/ngraph/core/src/graph_util.cpp index bee2c68c0e3b48..fc011de40a54ac 100644 --- a/ngraph/core/src/graph_util.cpp +++ b/ngraph/core/src/graph_util.cpp @@ -924,7 +924,9 @@ bool ngraph::replace_output_update_name(Output output, const Output& { replacement.get_node()->set_friendly_name(output.get_node()->get_friendly_name()); // Update output tensor name + NGRAPH_SUPPRESS_DEPRECATED_START replacement.get_tensor().set_name(output.get_node()->get_friendly_name()); + NGRAPH_SUPPRESS_DEPRECATED_END } output.replace(replacement); copy_runtime_info({replacement.get_node_shared_ptr(), output.get_node_shared_ptr()}, diff --git a/ngraph/core/src/node.cpp b/ngraph/core/src/node.cpp index cadc8c1c3f8516..a7312333256e70 100644 --- a/ngraph/core/src/node.cpp +++ b/ngraph/core/src/node.cpp @@ -662,13 +662,6 @@ descriptor::Tensor& Node::get_input_tensor(size_t i) const return input.get_tensor(); } -const string& Node::get_output_tensor_name(size_t i) const -{ - NGRAPH_CHECK( - i < m_outputs.size(), "index '", i, "' out of range in get_output_tensor_name(size_t i)"); - return m_outputs[i].get_tensor().get_name(); -} - size_t Node::get_input_size() const { return m_inputs.size(); @@ -694,6 +687,7 @@ const PartialShape& Node::get_input_partial_shape(size_t i) const return m_inputs[i].get_partial_shape(); } +NGRAPH_SUPPRESS_DEPRECATED_START const string& Node::get_input_tensor_name(size_t i) const { NGRAPH_CHECK( @@ -701,6 +695,14 @@ const string& Node::get_input_tensor_name(size_t i) const return m_inputs[i].get_tensor().get_name(); } +const string& Node::get_output_tensor_name(size_t i) const +{ + NGRAPH_CHECK( + i < m_outputs.size(), "index '", i, "' out of range in get_output_tensor_name(size_t i)"); + return m_outputs[i].get_tensor().get_name(); +} +NGRAPH_SUPPRESS_DEPRECATED_END + bool Node::has_same_type(std::shared_ptr node) const { if (get_output_size() != node->get_output_size()) diff --git a/ngraph/core/src/runtime/host_tensor.cpp b/ngraph/core/src/runtime/host_tensor.cpp index 4a92e33e307f49..c1e432f321a397 100644 --- a/ngraph/core/src/runtime/host_tensor.cpp +++ b/ngraph/core/src/runtime/host_tensor.cpp @@ -66,10 +66,12 @@ runtime::HostTensor::HostTensor(const std::string& name) { } +NGRAPH_SUPPRESS_DEPRECATED_START runtime::HostTensor::HostTensor(const Output& value) : HostTensor(value.get_element_type(), value.get_partial_shape(), value.get_tensor().get_name()) { } +NGRAPH_SUPPRESS_DEPRECATED_END void runtime::HostTensor::allocate_buffer() { @@ -102,11 +104,13 @@ void runtime::HostTensor::allocate_buffer() } } +NGRAPH_SUPPRESS_DEPRECATED_START runtime::HostTensor::HostTensor(const std::shared_ptr& constant) : HostTensor(constant->output(0).get_tensor().get_name()) { initialize(constant); } +NGRAPH_SUPPRESS_DEPRECATED_END void runtime::HostTensor::initialize(const std::shared_ptr& constant) { diff --git a/ngraph/core/src/runtime/tensor.cpp b/ngraph/core/src/runtime/tensor.cpp index e5da131c7f3781..21e9c328a24d16 100644 --- a/ngraph/core/src/runtime/tensor.cpp +++ b/ngraph/core/src/runtime/tensor.cpp @@ -49,7 +49,9 @@ size_t runtime::Tensor::get_size_in_bytes() const const std::string& runtime::Tensor::get_name() const { + NGRAPH_SUPPRESS_DEPRECATED_START return m_descriptor->get_name(); + NGRAPH_SUPPRESS_DEPRECATED_END } bool runtime::Tensor::get_stale() const From 3bcf085cc47fbea41614967d9dfd2e5d8c5b7135 Mon Sep 17 00:00:00 2001 From: Ilya Churaev Date: Wed, 13 Jan 2021 23:33:08 +0300 Subject: [PATCH 07/23] Fixed comments --- ngraph/core/include/ngraph/descriptor/output.hpp | 2 +- ngraph/core/src/descriptor/output.cpp | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/ngraph/core/include/ngraph/descriptor/output.hpp b/ngraph/core/include/ngraph/descriptor/output.hpp index 18ee4838a7e355..889dbc1e701d3f 100644 --- a/ngraph/core/include/ngraph/descriptor/output.hpp +++ b/ngraph/core/include/ngraph/descriptor/output.hpp @@ -84,7 +84,7 @@ namespace ngraph size_t m_index; std::shared_ptr m_tensor; std::vector m_inputs; - std::unordered_set names; + std::unordered_set m_names; }; } } diff --git a/ngraph/core/src/descriptor/output.cpp b/ngraph/core/src/descriptor/output.cpp index 535c4f47943ce1..25f4fb606f8ac9 100644 --- a/ngraph/core/src/descriptor/output.cpp +++ b/ngraph/core/src/descriptor/output.cpp @@ -81,10 +81,10 @@ const element::Type& descriptor::Output::get_element_type() const const std::unordered_set& descriptor::Output::get_names() const { - return names; + return m_names; } void descriptor::Output::set_names(const std::unordered_set& names) { - this->names = names; + m_names = names; } From c425dd29cbbe94b6fe43ec13c68586bbee9bbb62 Mon Sep 17 00:00:00 2001 From: Ilya Churaev Date: Wed, 13 Jan 2021 23:39:03 +0300 Subject: [PATCH 08/23] Enabled functional tests for GPU, GNA and Myriad --- .../subgraph_tests/tensor_names.cpp | 17 +++++++++++++++++ .../subgraph_tests/tensor_names.cpp | 18 ++++++++++++++++++ .../subgraph_tests/tensor_names.cpp | 19 +++++++++++++++++++ 3 files changed, 54 insertions(+) create mode 100644 inference-engine/tests/functional/plugin/gna/shared_tests_instances/subgraph_tests/tensor_names.cpp create mode 100644 inference-engine/tests/functional/plugin/gpu/shared_tests_instances/subgraph_tests/tensor_names.cpp create mode 100644 inference-engine/tests/functional/plugin/myriad/shared_tests_instances/subgraph_tests/tensor_names.cpp diff --git a/inference-engine/tests/functional/plugin/gna/shared_tests_instances/subgraph_tests/tensor_names.cpp b/inference-engine/tests/functional/plugin/gna/shared_tests_instances/subgraph_tests/tensor_names.cpp new file mode 100644 index 00000000000000..0729a36e9a4dc6 --- /dev/null +++ b/inference-engine/tests/functional/plugin/gna/shared_tests_instances/subgraph_tests/tensor_names.cpp @@ -0,0 +1,17 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include + +#include "subgraph_tests/tensor_names.hpp" +#include "common_test_utils/test_constants.hpp" + +using namespace SubgraphTestsDefinitions; + +namespace { + INSTANTIATE_TEST_CASE_P(smoke_Check, TensorNamesTest, + ::testing::Values(CommonTestUtils::DEVICE_GNA), + TensorNamesTest::getTestCaseName); +} // namespace + diff --git a/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/subgraph_tests/tensor_names.cpp b/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/subgraph_tests/tensor_names.cpp new file mode 100644 index 00000000000000..b5258c33fd5e89 --- /dev/null +++ b/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/subgraph_tests/tensor_names.cpp @@ -0,0 +1,18 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include + +#include "subgraph_tests/tensor_names.hpp" +#include "common_test_utils/test_constants.hpp" + +using namespace SubgraphTestsDefinitions; + +namespace { + INSTANTIATE_TEST_CASE_P(smoke_Check, TensorNamesTest, + ::testing::Values(CommonTestUtils::DEVICE_GPU), + TensorNamesTest::getTestCaseName); +} // namespace + + diff --git a/inference-engine/tests/functional/plugin/myriad/shared_tests_instances/subgraph_tests/tensor_names.cpp b/inference-engine/tests/functional/plugin/myriad/shared_tests_instances/subgraph_tests/tensor_names.cpp new file mode 100644 index 00000000000000..93e978ab427b07 --- /dev/null +++ b/inference-engine/tests/functional/plugin/myriad/shared_tests_instances/subgraph_tests/tensor_names.cpp @@ -0,0 +1,19 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include + +#include "subgraph_tests/tensor_names.hpp" +#include "common_test_utils/test_constants.hpp" + +using namespace SubgraphTestsDefinitions; + +namespace { + INSTANTIATE_TEST_CASE_P(smoke_Check, TensorNamesTest, + ::testing::Values(CommonTestUtils::DEVICE_MYRIAD), + TensorNamesTest::getTestCaseName); +} // namespace + + + From fa08d64b2101d4be205bb8e882028b3c7d212987 Mon Sep 17 00:00:00 2001 From: Ilya Churaev Date: Mon, 18 Jan 2021 10:15:44 +0300 Subject: [PATCH 09/23] Fixed get_tensor().get_names() --- .../cnn_network_ngraph_impl.cpp | 4 +-- .../src/readers/ir_reader/ie_ir_parser.cpp | 2 +- .../include/subgraph_tests/tensor_names.hpp | 29 ++++++++++++------- .../src/subgraph/tensor_names.cpp | 6 ++-- .../core/include/ngraph/descriptor/output.hpp | 4 --- .../core/include/ngraph/descriptor/tensor.hpp | 9 ++++-- ngraph/core/include/ngraph/node.hpp | 4 +-- ngraph/core/include/ngraph/node_output.hpp | 5 ---- ngraph/core/src/descriptor/output.cpp | 10 ------- ngraph/core/src/descriptor/tensor.cpp | 25 +++++++++++++--- ngraph/core/src/node.cpp | 2 +- ngraph/core/src/node_output.cpp | 14 --------- 12 files changed, 56 insertions(+), 58 deletions(-) diff --git a/inference-engine/src/inference_engine/cnn_network_ngraph_impl.cpp b/inference-engine/src/inference_engine/cnn_network_ngraph_impl.cpp index 46b5505962770e..9fed3251f5ac68 100644 --- a/inference-engine/src/inference_engine/cnn_network_ngraph_impl.cpp +++ b/inference-engine/src/inference_engine/cnn_network_ngraph_impl.cpp @@ -142,7 +142,7 @@ CNNNetworkNGraphImpl::CNNNetworkNGraphImpl( // map original names to OpenVINO name _opNames[outName] = outName; - for (const auto& name : layer->output(0).get_names()) { + for (const auto& name : layer->get_output_tensor(0).get_names()) { _tensorNames[name] = outName; } @@ -266,7 +266,7 @@ void CNNNetworkNGraphImpl::addOutput(const ::ngraph::Output<::ngraph::Node> & ou _outputData[dataName] = data; // Save original framework names - for (const auto& name : output.get_names()) { + for (const auto& name : output.get_tensor().get_names()) { _tensorNames[name] = dataName; } for (const auto consumerInput : output.get_target_inputs()) { diff --git a/inference-engine/src/readers/ir_reader/ie_ir_parser.cpp b/inference-engine/src/readers/ir_reader/ie_ir_parser.cpp index 7e1a727c982e28..5ed47508a326bb 100644 --- a/inference-engine/src/readers/ir_reader/ie_ir_parser.cpp +++ b/inference-engine/src/readers/ir_reader/ie_ir_parser.cpp @@ -780,7 +780,7 @@ std::shared_ptr V10Parser::XmlDeserializer::createNode( ngraphNode->set_friendly_name(params.name); for (size_t i = 0; i < params.outputPorts.size() && i < ngraphNode->get_output_size(); ++i) { if (!params.outputPorts[i].names.empty()) - ngraphNode->output(i).set_names(params.outputPorts[i].names); + ngraphNode->get_output_tensor(i).set_names(params.outputPorts[i].names); } return ngraphNode; diff --git a/inference-engine/tests/functional/plugin/shared/include/subgraph_tests/tensor_names.hpp b/inference-engine/tests/functional/plugin/shared/include/subgraph_tests/tensor_names.hpp index 93d5a123f8832a..f8f52ef199ed65 100644 --- a/inference-engine/tests/functional/plugin/shared/include/subgraph_tests/tensor_names.hpp +++ b/inference-engine/tests/functional/plugin/shared/include/subgraph_tests/tensor_names.hpp @@ -24,13 +24,13 @@ TEST_P(TensorNamesTest, CheckTensorNames) { for (const auto& param : function->get_parameters()) { ASSERT_TRUE(inNames.count(cnnNetwork.getOVNameForOperation(param->get_friendly_name()))); - for (const auto& name : param->output(0).get_names()) + for (const auto& name : param->get_output_tensor(0).get_names()) ASSERT_TRUE(inNames.count(cnnNetwork.getOVNameForTensor(name))); } for (const auto& result : function->get_results()) { ASSERT_TRUE(outNames.count(cnnNetwork.getOVNameForOperation(result->get_friendly_name()))); - for (const auto& name : result->input_value(0).get_names()) + for (const auto& name : result->input_value(0).get_tensor().get_names()) ASSERT_TRUE(outNames.count(cnnNetwork.getOVNameForTensor(name))); } @@ -39,14 +39,17 @@ TEST_P(TensorNamesTest, CheckTensorNames) { for (const auto& param : function->get_parameters()) { ASSERT_NO_THROW(inferRequest.GetBlob(cnnNetwork.getOVNameForOperation(param->get_friendly_name()))); - for (const auto& name : param->output(0).get_names()) + for (const auto& name : param->get_output_tensor(0).get_names()) ASSERT_NO_THROW(inferRequest.GetBlob(cnnNetwork.getOVNameForTensor(name))); } for (const auto& result : function->get_results()) { ASSERT_NO_THROW(inferRequest.GetBlob(cnnNetwork.getOVNameForOperation(result->get_friendly_name()))); - for (const auto& name : result->output(0).get_names()) + std::cout << "AAAA AAAAA AAAAA" << std::endl; + for (const auto& name : result->get_input_tensor(0).get_names()) { + std::cout << "BBB BBB " << name << std::endl; ASSERT_NO_THROW(inferRequest.GetBlob(cnnNetwork.getOVNameForTensor(name))); + } } } @@ -66,14 +69,18 @@ TEST_P(TensorNamesTest, CheckTensorNamesAfterClone) { for (const auto& param : function->get_parameters()) { ASSERT_TRUE(inNames.count(clonedNet.getOVNameForOperation(param->get_friendly_name()))); - for (const auto& name : param->output(0).get_names()) + for (const auto& name : param->get_output_tensor(0).get_names()) ASSERT_TRUE(inNames.count(clonedNet.getOVNameForTensor(name))); } for (const auto& result : function->get_results()) { ASSERT_TRUE(outNames.count(clonedNet.getOVNameForOperation(result->get_friendly_name()))); - for (const auto& name : result->input_value(0).get_names()) + + std::cout << "AAAA22 AAAAA AAAAA" << std::endl; + for (const auto& name : result->get_input_tensor(0).get_names()) { + std::cout << "BBB BBB 222 " << name << std::endl; ASSERT_TRUE(outNames.count(clonedNet.getOVNameForTensor(name))); + } } executableNetwork = core->LoadNetwork(clonedNet, targetDevice, configuration); @@ -81,13 +88,13 @@ TEST_P(TensorNamesTest, CheckTensorNamesAfterClone) { for (const auto& param : function->get_parameters()) { ASSERT_NO_THROW(inferRequest.GetBlob(clonedNet.getOVNameForOperation(param->get_friendly_name()))); - for (const auto& name : param->output(0).get_names()) + for (const auto& name : param->get_output_tensor(0).get_names()) ASSERT_NO_THROW(inferRequest.GetBlob(clonedNet.getOVNameForTensor(name))); } for (const auto& result : function->get_results()) { ASSERT_NO_THROW(inferRequest.GetBlob(clonedNet.getOVNameForOperation(result->get_friendly_name()))); - for (const auto& name : result->output(0).get_names()) + for (const auto& name : result->input_value(0).get_tensor().get_names()) ASSERT_NO_THROW(inferRequest.GetBlob(clonedNet.getOVNameForTensor(name))); } } @@ -148,14 +155,16 @@ TEST_P(TensorNamesTest, CheckAddOutput) { for (const auto& param : cnnNetwork.getFunction()->get_parameters()) { ASSERT_NO_THROW(inferRequest.GetBlob(cnnNetwork.getOVNameForOperation(param->get_friendly_name()))); - for (const auto& name : param->output(0).get_names()) + for (const auto& name : param->get_output_tensor(0).get_names()) ASSERT_NO_THROW(inferRequest.GetBlob(cnnNetwork.getOVNameForTensor(name))); } for (const auto& result : cnnNetwork.getFunction()->get_results()) { ASSERT_NO_THROW(inferRequest.GetBlob(cnnNetwork.getOVNameForOperation(result->get_friendly_name()))); - for (const auto& name : result->output(0).get_names()) + for (const auto& name : result->get_input_tensor(0).get_names()) { + std::cout << "3333333333 " << name << std::endl; ASSERT_NO_THROW(inferRequest.GetBlob(cnnNetwork.getOVNameForTensor(name))); + } } } diff --git a/inference-engine/tests/functional/shared_test_classes/src/subgraph/tensor_names.cpp b/inference-engine/tests/functional/shared_test_classes/src/subgraph/tensor_names.cpp index a348e7d1128126..acde2d43d3060d 100644 --- a/inference-engine/tests/functional/shared_test_classes/src/subgraph/tensor_names.cpp +++ b/inference-engine/tests/functional/shared_test_classes/src/subgraph/tensor_names.cpp @@ -24,13 +24,13 @@ void TensorNamesTest::SetUp() { auto parameter = std::make_shared(ngraph::element::Type_t::f32, ngraph::Shape{1, 3, 10, 10}); parameter->set_friendly_name("parameter"); - parameter->output(0).set_names({"input"}); + parameter->get_output_tensor(0).set_names({"input"}); auto relu_prev = std::make_shared(parameter); relu_prev->set_friendly_name("relu_prev"); - relu_prev->output(0).set_names({"relu_prev_t", "identity_prev_t"}); + relu_prev->get_output_tensor(0).set_names({"relu_prev_t", "identity_prev_t"}); auto relu = std::make_shared(relu_prev); relu->set_friendly_name("relu"); - relu->output(0).set_names({"relu_t", "identity"}); + relu->get_output_tensor(0).set_names({"relu_t", "identity"}); const ngraph::ResultVector results{std::make_shared(relu)}; results[0]->set_friendly_name("out"); ngraph::ParameterVector params{parameter}; diff --git a/ngraph/core/include/ngraph/descriptor/output.hpp b/ngraph/core/include/ngraph/descriptor/output.hpp index 889dbc1e701d3f..611961c5e3e3bc 100644 --- a/ngraph/core/include/ngraph/descriptor/output.hpp +++ b/ngraph/core/include/ngraph/descriptor/output.hpp @@ -72,9 +72,6 @@ namespace ngraph /// \return the element type of the output const element::Type& get_element_type() const; - const std::unordered_set& get_names() const; - void set_names(const std::unordered_set& names); - Output(const Output&) = default; Output(Output&&) = default; Output& operator=(const Output&) = default; @@ -84,7 +81,6 @@ namespace ngraph size_t m_index; std::shared_ptr m_tensor; std::vector m_inputs; - std::unordered_set m_names; }; } } diff --git a/ngraph/core/include/ngraph/descriptor/tensor.hpp b/ngraph/core/include/ngraph/descriptor/tensor.hpp index b0e9aca6d66659..fcb527a4381edf 100644 --- a/ngraph/core/include/ngraph/descriptor/tensor.hpp +++ b/ngraph/core/include/ngraph/descriptor/tensor.hpp @@ -18,6 +18,7 @@ #include #include +#include #include "ngraph/partial_shape.hpp" #include "ngraph/shape.hpp" @@ -44,10 +45,13 @@ namespace ngraph Node* node, size_t node_output_number); - NGRAPH_DEPRECATED("Only output ports have names") + NGRAPH_DEPRECATED("get_name() is deprecated! Please use get_names() instead.") const std::string& get_name() const; - NGRAPH_DEPRECATED("Only output ports have names") + NGRAPH_DEPRECATED("set_name() is deprecated! Please use set_names() instead.") void set_name(const std::string& name); + + const std::unordered_set& get_names() const; + void set_names(const std::unordered_set& names); void set_tensor_type(const element::Type& element_type, const PartialShape& pshape); void set_element_type(const element::Type& elemenet_type); void set_partial_shape(const PartialShape& partial_shape); @@ -70,6 +74,7 @@ namespace ngraph size_t m_node_output_number{0}; std::string m_name; + std::unordered_set m_names; }; NGRAPH_API diff --git a/ngraph/core/include/ngraph/node.hpp b/ngraph/core/include/ngraph/node.hpp index 75ab10e67f492e..a2e4bc93b48626 100644 --- a/ngraph/core/include/ngraph/node.hpp +++ b/ngraph/core/include/ngraph/node.hpp @@ -327,7 +327,7 @@ namespace ngraph descriptor::Tensor& get_input_tensor(size_t i) const; /// Returns the tensor name for output i - NGRAPH_DEPRECATED("Tensor names were deprecated. Please use output names instead.") + NGRAPH_DEPRECATED("The tensor name was deprecated. Use get_output_tensor(i).get_names() instead.") const std::string& get_output_tensor_name(size_t i) const; std::set> get_output_target_inputs(size_t i) const; @@ -348,7 +348,7 @@ namespace ngraph const PartialShape& get_input_partial_shape(size_t i) const; /// Returns the tensor name for input i - NGRAPH_DEPRECATED("Tensor names were deprecated.") + NGRAPH_DEPRECATED("The tensor name was deprecated. Use get_input_tensor(i).get_names() instead.") const std::string& get_input_tensor_name(size_t i) const; std::unordered_set liveness_new_list; diff --git a/ngraph/core/include/ngraph/node_output.hpp b/ngraph/core/include/ngraph/node_output.hpp index d645aa8c46c28f..bcaed7812d3b2e 100644 --- a/ngraph/core/include/ngraph/node_output.hpp +++ b/ngraph/core/include/ngraph/node_output.hpp @@ -101,9 +101,6 @@ namespace ngraph /// \brief Replace all users of this value with replacement void replace(const Output& replacement); - const std::unordered_set& get_names() const; - void set_names(const std::unordered_set& names); - bool operator==(const Output& other) const; bool operator!=(const Output& other) const; bool operator<(const Output& other) const; @@ -178,8 +175,6 @@ namespace ngraph bool operator<=(const Output& other) const; bool operator>=(const Output& other) const; - const std::unordered_set& get_names() const; - private: std::shared_ptr m_node; size_t m_index{0}; diff --git a/ngraph/core/src/descriptor/output.cpp b/ngraph/core/src/descriptor/output.cpp index 25f4fb606f8ac9..33aba702ccabd4 100644 --- a/ngraph/core/src/descriptor/output.cpp +++ b/ngraph/core/src/descriptor/output.cpp @@ -78,13 +78,3 @@ const element::Type& descriptor::Output::get_element_type() const { return m_tensor->get_element_type(); } - -const std::unordered_set& descriptor::Output::get_names() const -{ - return m_names; -} - -void descriptor::Output::set_names(const std::unordered_set& names) -{ - m_names = names; -} diff --git a/ngraph/core/src/descriptor/tensor.cpp b/ngraph/core/src/descriptor/tensor.cpp index f84bc79bdecab0..8a0a845ec0b3ca 100644 --- a/ngraph/core/src/descriptor/tensor.cpp +++ b/ngraph/core/src/descriptor/tensor.cpp @@ -86,7 +86,6 @@ size_t descriptor::Tensor::size() const } NGRAPH_SUPPRESS_DEPRECATED_START - void descriptor::Tensor::set_name(const string& name) { m_name = name; @@ -96,11 +95,29 @@ const std::string& descriptor::Tensor::get_name() const { return m_name; } +NGRAPH_SUPPRESS_DEPRECATED_END + +const std::unordered_set& descriptor::Tensor::get_names() const +{ + return m_names; +} + +void descriptor::Tensor::set_names(const std::unordered_set& names) +{ + m_names = names; +} ostream& operator<<(ostream& out, const descriptor::Tensor& tensor) { - out << "Tensor(" << tensor.get_name() << ")"; + std::string names; + for (const auto& name : tensor.get_names()) { + if (!names.empty()) + names += ", "; + names += name; + } + NGRAPH_SUPPRESS_DEPRECATED_START + if (names.empty()) names = tensor.get_name(); + NGRAPH_SUPPRESS_DEPRECATED_END + out << "Tensor(" << names << ")"; return out; } - -NGRAPH_SUPPRESS_DEPRECATED_END diff --git a/ngraph/core/src/node.cpp b/ngraph/core/src/node.cpp index a7312333256e70..6aad6d0d501a7a 100644 --- a/ngraph/core/src/node.cpp +++ b/ngraph/core/src/node.cpp @@ -145,7 +145,7 @@ std::shared_ptr } for (size_t i = 0; i < get_output_size(); i++) { - clone->output(i).set_names(output(i).get_names()); + clone->get_output_tensor(i).set_names(get_output_tensor(i).get_names()); } return clone; } diff --git a/ngraph/core/src/node_output.cpp b/ngraph/core/src/node_output.cpp index 75b43e98bfc889..c59987c0068e06 100644 --- a/ngraph/core/src/node_output.cpp +++ b/ngraph/core/src/node_output.cpp @@ -89,15 +89,6 @@ namespace ngraph } } - const std::unordered_set& Output::get_names() const - { - return m_node->m_outputs.at(m_index).get_names(); - } - void Output::set_names(const std::unordered_set& names) - { - m_node->m_outputs.at(m_index).set_names(names); - } - bool Output::operator==(const Output& other) const { return m_node == other.m_node && m_index == other.m_index; @@ -191,11 +182,6 @@ namespace ngraph << output.get_partial_shape(); } - const std::unordered_set& Output::get_names() const - { - return m_node->m_outputs.at(m_index).get_names(); - } - std::ostream& operator<<(std::ostream& out, const Output& output) { return output.get_node()->write_description(out, 0) << "[" << output.get_index() From 35858cf730acfe1ec9656214c5fd366ef3d0e7ef Mon Sep 17 00:00:00 2001 From: Ilya Churaev Date: Mon, 18 Jan 2021 10:27:05 +0300 Subject: [PATCH 10/23] Added unit test to check tensor names --- ngraph/test/tensor.cpp | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/ngraph/test/tensor.cpp b/ngraph/test/tensor.cpp index 216831dc0de8ce..c48fa2760623e9 100644 --- a/ngraph/test/tensor.cpp +++ b/ngraph/test/tensor.cpp @@ -26,6 +26,7 @@ #include "ngraph/pass/manager.hpp" #include "pass/liveness.hpp" #include "util/test_tools.hpp" +#include "ngraph/opsets/opset6.hpp" NGRAPH_SUPPRESS_DEPRECATED_START @@ -91,3 +92,20 @@ TEST(tensor, output_flag) EXPECT_TRUE(op::is_output(f0->get_output_op(i))); } } + +TEST(tensor, tensor_names) +{ + auto arg0 = make_shared(element::f32, Shape{1}); + arg0->set_friendly_name("data"); + arg0->get_output_tensor(0).set_names({"input"}); + + auto relu = make_shared(arg0); + relu->set_friendly_name("relu"); + relu->get_output_tensor(0).set_names({"relu_t", "identity"}); + auto f0 = make_shared(relu, ParameterVector{arg0}); + + ASSERT_EQ(arg0->get_output_tensor(0).get_names(), relu->get_input_tensor(0).get_names()); + ASSERT_EQ(arg0->get_output_tensor(0).get_names(), relu->input_value(0).get_tensor().get_names()); + ASSERT_EQ(f0->get_result()->get_input_tensor(0).get_names(), relu->get_output_tensor(0).get_names()); + ASSERT_EQ(f0->get_result()->input_value(0).get_tensor().get_names(), relu->get_output_tensor(0).get_names()); +} From 364dd34e173d727481485fd235208cebec5a4613 Mon Sep 17 00:00:00 2001 From: Ilya Churaev Date: Mon, 18 Jan 2021 10:30:42 +0300 Subject: [PATCH 11/23] Fixed code style --- .../inference_engine/ngraph_reader/tensor_names.cpp | 8 ++++---- .../shared/include/subgraph_tests/tensor_names.hpp | 5 ----- ngraph/core/include/ngraph/node.hpp | 6 ++++-- ngraph/core/src/descriptor/tensor.cpp | 10 ++++++---- ngraph/test/tensor.cpp | 11 +++++++---- 5 files changed, 21 insertions(+), 19 deletions(-) diff --git a/inference-engine/tests/functional/inference_engine/ngraph_reader/tensor_names.cpp b/inference-engine/tests/functional/inference_engine/ngraph_reader/tensor_names.cpp index e5feb809dac2f9..2e3af86518d07b 100644 --- a/inference-engine/tests/functional/inference_engine/ngraph_reader/tensor_names.cpp +++ b/inference-engine/tests/functional/inference_engine/ngraph_reader/tensor_names.cpp @@ -75,15 +75,15 @@ TEST_F(NGraphReaderTests, ReadNetworkWithTensorNames) { for (const auto& param : function->get_parameters()) { ASSERT_TRUE(inNames.count(network.getOVNameForOperation(param->get_friendly_name()))); - ASSERT_TRUE(!param->output(0).get_names().empty()); - for (const auto& name : param->output(0).get_names()) + ASSERT_TRUE(!param->get_output_tensor(0).get_names().empty()); + for (const auto& name : param->get_output_tensor(0).get_names()) ASSERT_TRUE(inNames.count(network.getOVNameForTensor(name))); } for (const auto& result : function->get_results()) { ASSERT_TRUE(outNames.count(network.getOVNameForOperation(result->get_friendly_name()))); - ASSERT_TRUE(!result->input_value(0).get_names().empty()); - for (const auto& name : result->input_value(0).get_names()) + ASSERT_TRUE(!result->get_input_tensor(0).get_names().empty()); + for (const auto& name : result->get_input_tensor(0).get_names()) ASSERT_TRUE(outNames.count(network.getOVNameForTensor(name))); } } diff --git a/inference-engine/tests/functional/plugin/shared/include/subgraph_tests/tensor_names.hpp b/inference-engine/tests/functional/plugin/shared/include/subgraph_tests/tensor_names.hpp index f8f52ef199ed65..b6d793420f4fd1 100644 --- a/inference-engine/tests/functional/plugin/shared/include/subgraph_tests/tensor_names.hpp +++ b/inference-engine/tests/functional/plugin/shared/include/subgraph_tests/tensor_names.hpp @@ -45,9 +45,7 @@ TEST_P(TensorNamesTest, CheckTensorNames) { for (const auto& result : function->get_results()) { ASSERT_NO_THROW(inferRequest.GetBlob(cnnNetwork.getOVNameForOperation(result->get_friendly_name()))); - std::cout << "AAAA AAAAA AAAAA" << std::endl; for (const auto& name : result->get_input_tensor(0).get_names()) { - std::cout << "BBB BBB " << name << std::endl; ASSERT_NO_THROW(inferRequest.GetBlob(cnnNetwork.getOVNameForTensor(name))); } } @@ -76,9 +74,7 @@ TEST_P(TensorNamesTest, CheckTensorNamesAfterClone) { for (const auto& result : function->get_results()) { ASSERT_TRUE(outNames.count(clonedNet.getOVNameForOperation(result->get_friendly_name()))); - std::cout << "AAAA22 AAAAA AAAAA" << std::endl; for (const auto& name : result->get_input_tensor(0).get_names()) { - std::cout << "BBB BBB 222 " << name << std::endl; ASSERT_TRUE(outNames.count(clonedNet.getOVNameForTensor(name))); } } @@ -162,7 +158,6 @@ TEST_P(TensorNamesTest, CheckAddOutput) { for (const auto& result : cnnNetwork.getFunction()->get_results()) { ASSERT_NO_THROW(inferRequest.GetBlob(cnnNetwork.getOVNameForOperation(result->get_friendly_name()))); for (const auto& name : result->get_input_tensor(0).get_names()) { - std::cout << "3333333333 " << name << std::endl; ASSERT_NO_THROW(inferRequest.GetBlob(cnnNetwork.getOVNameForTensor(name))); } } diff --git a/ngraph/core/include/ngraph/node.hpp b/ngraph/core/include/ngraph/node.hpp index a2e4bc93b48626..f195dfbdbacaa7 100644 --- a/ngraph/core/include/ngraph/node.hpp +++ b/ngraph/core/include/ngraph/node.hpp @@ -327,7 +327,8 @@ namespace ngraph descriptor::Tensor& get_input_tensor(size_t i) const; /// Returns the tensor name for output i - NGRAPH_DEPRECATED("The tensor name was deprecated. Use get_output_tensor(i).get_names() instead.") + NGRAPH_DEPRECATED( + "The tensor name was deprecated. Use get_output_tensor(i).get_names() instead.") const std::string& get_output_tensor_name(size_t i) const; std::set> get_output_target_inputs(size_t i) const; @@ -348,7 +349,8 @@ namespace ngraph const PartialShape& get_input_partial_shape(size_t i) const; /// Returns the tensor name for input i - NGRAPH_DEPRECATED("The tensor name was deprecated. Use get_input_tensor(i).get_names() instead.") + NGRAPH_DEPRECATED( + "The tensor name was deprecated. Use get_input_tensor(i).get_names() instead.") const std::string& get_input_tensor_name(size_t i) const; std::unordered_set liveness_new_list; diff --git a/ngraph/core/src/descriptor/tensor.cpp b/ngraph/core/src/descriptor/tensor.cpp index 8a0a845ec0b3ca..dc9cc59b4b2f57 100644 --- a/ngraph/core/src/descriptor/tensor.cpp +++ b/ngraph/core/src/descriptor/tensor.cpp @@ -97,12 +97,12 @@ const std::string& descriptor::Tensor::get_name() const } NGRAPH_SUPPRESS_DEPRECATED_END -const std::unordered_set& descriptor::Tensor::get_names() const +const std::unordered_set& descriptor::Tensor::get_names() const { return m_names; } -void descriptor::Tensor::set_names(const std::unordered_set& names) +void descriptor::Tensor::set_names(const std::unordered_set& names) { m_names = names; } @@ -110,13 +110,15 @@ void descriptor::Tensor::set_names(const std::unordered_set& names) ostream& operator<<(ostream& out, const descriptor::Tensor& tensor) { std::string names; - for (const auto& name : tensor.get_names()) { + for (const auto& name : tensor.get_names()) + { if (!names.empty()) names += ", "; names += name; } NGRAPH_SUPPRESS_DEPRECATED_START - if (names.empty()) names = tensor.get_name(); + if (names.empty()) + names = tensor.get_name(); NGRAPH_SUPPRESS_DEPRECATED_END out << "Tensor(" << names << ")"; return out; diff --git a/ngraph/test/tensor.cpp b/ngraph/test/tensor.cpp index c48fa2760623e9..be9cc26ab1f180 100644 --- a/ngraph/test/tensor.cpp +++ b/ngraph/test/tensor.cpp @@ -23,10 +23,10 @@ #include "gtest/gtest.h" #include "ngraph/function.hpp" #include "ngraph/ngraph.hpp" +#include "ngraph/opsets/opset6.hpp" #include "ngraph/pass/manager.hpp" #include "pass/liveness.hpp" #include "util/test_tools.hpp" -#include "ngraph/opsets/opset6.hpp" NGRAPH_SUPPRESS_DEPRECATED_START @@ -105,7 +105,10 @@ TEST(tensor, tensor_names) auto f0 = make_shared(relu, ParameterVector{arg0}); ASSERT_EQ(arg0->get_output_tensor(0).get_names(), relu->get_input_tensor(0).get_names()); - ASSERT_EQ(arg0->get_output_tensor(0).get_names(), relu->input_value(0).get_tensor().get_names()); - ASSERT_EQ(f0->get_result()->get_input_tensor(0).get_names(), relu->get_output_tensor(0).get_names()); - ASSERT_EQ(f0->get_result()->input_value(0).get_tensor().get_names(), relu->get_output_tensor(0).get_names()); + ASSERT_EQ(arg0->get_output_tensor(0).get_names(), + relu->input_value(0).get_tensor().get_names()); + ASSERT_EQ(f0->get_result()->get_input_tensor(0).get_names(), + relu->get_output_tensor(0).get_names()); + ASSERT_EQ(f0->get_result()->input_value(0).get_tensor().get_names(), + relu->get_output_tensor(0).get_names()); } From cdfb8f1207a4c10e4f29928239316772d6c70d86 Mon Sep 17 00:00:00 2001 From: Ilya Churaev Date: Thu, 21 Jan 2021 08:45:11 +0300 Subject: [PATCH 12/23] Skip add output test for GNA --- .../plugin/gna/shared_tests_instances/skip_tests_config.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/inference-engine/tests/functional/plugin/gna/shared_tests_instances/skip_tests_config.cpp b/inference-engine/tests/functional/plugin/gna/shared_tests_instances/skip_tests_config.cpp index de921dee8be667..213aae0bdbcb5d 100644 --- a/inference-engine/tests/functional/plugin/gna/shared_tests_instances/skip_tests_config.cpp +++ b/inference-engine/tests/functional/plugin/gna/shared_tests_instances/skip_tests_config.cpp @@ -54,5 +54,6 @@ std::vector disabledTestPatterns() { // TODO: Issue 24839 R"(.*ConvolutionLayerTest.CompareWithRefs.*D=\(1.3\).*)", R"(.*ConvolutionLayerTest.CompareWithRefs.*D=\(3.1\).*)" + R"(.*TensorNamesTest.CheckAddOutput.*)", }; } From f2cace9038e5617ebeb18f1c4c2a0561a6b68b28 Mon Sep 17 00:00:00 2001 From: Ilya Churaev Date: Thu, 21 Jan 2021 09:40:07 +0300 Subject: [PATCH 13/23] Added serialization support --- .../src/transformations/serialize.cpp | 9 +++ .../ir_serialization/tensor_names.cpp | 59 +++++++++++++++++++ .../src/subgraph/tensor_names.cpp | 5 -- .../common_test_utils/ngraph_test_utils.cpp | 7 +++ 4 files changed, 75 insertions(+), 5 deletions(-) create mode 100644 inference-engine/tests/functional/inference_engine/ir_serialization/tensor_names.cpp diff --git a/inference-engine/src/transformations/src/transformations/serialize.cpp b/inference-engine/src/transformations/src/transformations/serialize.cpp index 91f46db0ba2069..828e3076ff4db5 100644 --- a/inference-engine/src/transformations/src/transformations/serialize.cpp +++ b/inference-engine/src/transformations/src/transformations/serialize.cpp @@ -602,6 +602,15 @@ void ngfunction_2_irv10(pugi::xml_node& netXml, port.append_attribute("id").set_value(port_id++); port.append_attribute("precision") .set_value(get_output_precision_name(o).c_str()); + std::string names; + for (const auto& name : o.get_tensor().get_names()) { + if (!names.empty()) + names += ", "; + names += name; + } + if (!names.empty()) { + port.append_attribute("names").set_value(names.c_str()); + } for (auto d : o.get_shape()) { pugi::xml_node dim = port.append_child("dim"); dim.append_child(pugi::xml_node_type::node_pcdata) diff --git a/inference-engine/tests/functional/inference_engine/ir_serialization/tensor_names.cpp b/inference-engine/tests/functional/inference_engine/ir_serialization/tensor_names.cpp new file mode 100644 index 00000000000000..38f0d30b80cf1b --- /dev/null +++ b/inference-engine/tests/functional/inference_engine/ir_serialization/tensor_names.cpp @@ -0,0 +1,59 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include + +#include +#include +#include +#include "common_test_utils/ngraph_test_utils.hpp" +#include "ie_core.hpp" +#include "ngraph/ngraph.hpp" +#include "transformations/serialize.hpp" +#include + +class TensorNameSerializationTest : public ::testing::Test { +protected: + std::string test_name = + ::testing::UnitTest::GetInstance()->current_test_info()->name(); + std::string m_out_xml_path = test_name + ".xml"; + std::string m_out_bin_path = test_name + ".bin"; + + void TearDown() override { + std::remove(m_out_xml_path.c_str()); + std::remove(m_out_bin_path.c_str()); + } +}; + +TEST_F(TensorNameSerializationTest, SerializeFunctionWithTensorNames) { + InferenceEngine::Core ie; + + std::shared_ptr function; + { + auto parameter = std::make_shared(ngraph::element::Type_t::f32, ngraph::Shape{1, 3, 10, 10}); + parameter->set_friendly_name("parameter"); + parameter->get_output_tensor(0).set_names({"input"}); + auto relu_prev = std::make_shared(parameter); + relu_prev->set_friendly_name("relu_prev"); + relu_prev->get_output_tensor(0).set_names({"relu_prev_t", "identity_prev_t"}); + auto relu = std::make_shared(relu_prev); + relu->set_friendly_name("relu"); + relu->get_output_tensor(0).set_names({"relu_t", "identity"}); + const ngraph::ResultVector results{std::make_shared(relu)}; + results[0]->set_friendly_name("out"); + ngraph::ParameterVector params{parameter}; + function = std::make_shared(results, params, "TensorNames"); + } + + InferenceEngine::CNNNetwork expected(function); + expected.serialize(m_out_xml_path, m_out_bin_path); + auto result = ie.ReadNetwork(m_out_xml_path, m_out_bin_path); + + bool success; + std::string message; + std::tie(success, message) = + compare_functions(result.getFunction(), expected.getFunction(), true, true, true, true); + + ASSERT_TRUE(success) << message; +} diff --git a/inference-engine/tests/functional/shared_test_classes/src/subgraph/tensor_names.cpp b/inference-engine/tests/functional/shared_test_classes/src/subgraph/tensor_names.cpp index acde2d43d3060d..f31eec544a0daf 100644 --- a/inference-engine/tests/functional/shared_test_classes/src/subgraph/tensor_names.cpp +++ b/inference-engine/tests/functional/shared_test_classes/src/subgraph/tensor_names.cpp @@ -15,12 +15,7 @@ std::string TensorNamesTest::getTestCaseName(testing::TestParamInfoGetParam(); - std::vector data(300); - for (size_t i = 0; i < 300; i++) - data[i] = i; auto parameter = std::make_shared(ngraph::element::Type_t::f32, ngraph::Shape{1, 3, 10, 10}); parameter->set_friendly_name("parameter"); diff --git a/inference-engine/tests/ie_test_utils/common_test_utils/ngraph_test_utils.cpp b/inference-engine/tests/ie_test_utils/common_test_utils/ngraph_test_utils.cpp index 697be047be16a4..b0ce1cf1b6ae92 100644 --- a/inference-engine/tests/ie_test_utils/common_test_utils/ngraph_test_utils.cpp +++ b/inference-engine/tests/ie_test_utils/common_test_utils/ngraph_test_utils.cpp @@ -186,6 +186,13 @@ std::pair compare_functions( } for (int i = 0; i < node1->outputs().size(); ++i) { + const auto& tensor1 = node1->output(i).get_tensor(); + const auto& tensor2 = node2->output(i).get_tensor(); + + if (tensor1.get_names() != tensor2.get_names()) { + err_log << "Output tensors names are different for nodes: " + << node1->get_friendly_name() << " and " << node2->get_friendly_name() << std::endl; + } if (!node1->output(i).get_partial_shape().same_scheme(node2->output(i).get_partial_shape())) { err_log << "Different shape detected" << std::endl << node1->get_friendly_name() << " Output(" << i << ") " << node1->output(i).get_partial_shape() << " and " From 1830a6d62e44c661a0e9e24a64f94d8bf07e4d8f Mon Sep 17 00:00:00 2001 From: Ilya Churaev Date: Thu, 21 Jan 2021 10:17:23 +0300 Subject: [PATCH 14/23] Added PythonAPI --- .../src/openvino/inference_engine/ie_api.pyx | 6 ++ .../openvino/inference_engine/ie_api_impl.cpp | 8 +++ .../openvino/inference_engine/ie_api_impl.hpp | 3 + .../inference_engine/ie_api_impl_defs.pxd | 2 + .../ie_bridges/python/tests/test_IENetwork.py | 57 +++++++++++++++++++ 5 files changed, 76 insertions(+) diff --git a/inference-engine/ie_bridges/python/src/openvino/inference_engine/ie_api.pyx b/inference-engine/ie_bridges/python/src/openvino/inference_engine/ie_api.pyx index e92dcaaa58e8ba..8197397bec00ec 100644 --- a/inference-engine/ie_bridges/python/src/openvino/inference_engine/ie_api.pyx +++ b/inference-engine/ie_bridges/python/src/openvino/inference_engine/ie_api.pyx @@ -1439,6 +1439,12 @@ cdef class IENetwork: def _get_function_capsule(self): return self.impl.getFunction() + def get_ov_name_for_tensor(self, orig_name: str): + return self.impl.getOVNameForTensor(orig_name) + + def get_ov_name_for_operation(self, orig_name: str): + return self.impl.getOVNameForOperation(orig_name) + cdef class BlobBuffer: """Copy-less accessor for Inference Engine Blob""" diff --git a/inference-engine/ie_bridges/python/src/openvino/inference_engine/ie_api_impl.cpp b/inference-engine/ie_bridges/python/src/openvino/inference_engine/ie_api_impl.cpp index 226cc73bc2ee42..7a2bd205a0837d 100644 --- a/inference-engine/ie_bridges/python/src/openvino/inference_engine/ie_api_impl.cpp +++ b/inference-engine/ie_bridges/python/src/openvino/inference_engine/ie_api_impl.cpp @@ -260,6 +260,14 @@ const std::map InferenceEnginePython::IE return outputs; } +std::string InferenceEnginePython::IENetwork::getOVNameForTensor(const std::string& orig_name) { + return actual->getOVNameForTensor(orig_name); +} + +std::string InferenceEnginePython::IENetwork::getOVNameForOperation(const std::string& orig_name) { + return actual->getOVNameForOperation(orig_name); +} + void InferenceEnginePython::IENetwork::addOutput(const std::string &out_layer, size_t port_id) { actual->addOutput(out_layer, port_id); diff --git a/inference-engine/ie_bridges/python/src/openvino/inference_engine/ie_api_impl.hpp b/inference-engine/ie_bridges/python/src/openvino/inference_engine/ie_api_impl.hpp index 5534d1ddb53215..eff8c8cec3f504 100644 --- a/inference-engine/ie_bridges/python/src/openvino/inference_engine/ie_api_impl.hpp +++ b/inference-engine/ie_bridges/python/src/openvino/inference_engine/ie_api_impl.hpp @@ -71,6 +71,9 @@ struct IENetwork { IENetwork() = default; void convertToOldRepresentation(); + + std::string getOVNameForTensor(const std::string& orig_name); + std::string getOVNameForOperation(const std::string& orig_name); }; diff --git a/inference-engine/ie_bridges/python/src/openvino/inference_engine/ie_api_impl_defs.pxd b/inference-engine/ie_bridges/python/src/openvino/inference_engine/ie_api_impl_defs.pxd index d11d8b526a8743..91b3e9af849e90 100644 --- a/inference-engine/ie_bridges/python/src/openvino/inference_engine/ie_api_impl_defs.pxd +++ b/inference-engine/ie_bridges/python/src/openvino/inference_engine/ie_api_impl_defs.pxd @@ -175,6 +175,8 @@ cdef extern from "ie_api_impl.hpp" namespace "InferenceEnginePython": void load_from_buffer(const char*xml, size_t xml_size, uint8_t*bin, size_t bin_size) except + object getFunction() except + void convertToOldRepresentation() except + + string getOVNameForTensor(const string &) except + + string getOVNameForOperation(const string &) except + cdef cppclass InferRequestWrap: double exec_time; diff --git a/inference-engine/ie_bridges/python/tests/test_IENetwork.py b/inference-engine/ie_bridges/python/tests/test_IENetwork.py index e3c52497814e1f..4e6b5d42c6a607 100644 --- a/inference-engine/ie_bridges/python/tests/test_IENetwork.py +++ b/inference-engine/ie_bridges/python/tests/test_IENetwork.py @@ -247,3 +247,60 @@ def test_multi_out_data(): assert net.outputs["28/Reshape"].name == "28/Reshape" and net.outputs["28/Reshape"].shape == [1, 5184] assert net.outputs["fc_out"].name == "fc_out" and net.outputs["fc_out"].shape == [1, 10] pass + +def test_tensor_names(): + model = """ + + + + + + + 1 + 3 + 22 + 22 + + + + + + + 1 + 3 + 22 + 22 + + + + + 1 + 3 + 22 + 22 + + + + + + + 1 + 3 + 22 + 22 + + + + + + + + + + """ + ie = IECore() + net = ie.read_network(model=model, init_from_buffer=True) + assert net.get_ov_name_for_tensor("retu_t") == "activation" + assert net.get_ov_name_for_tensor("identity_t") == "activation" + assert net.get_ov_name_for_tensor("input") == "in1" + assert net.get_ov_name_for_operation("output") == "activation" From 635c17a0600e5d016b2071f04b6023a5994815b8 Mon Sep 17 00:00:00 2001 From: Ilya Churaev Date: Thu, 21 Jan 2021 18:41:47 +0300 Subject: [PATCH 15/23] Fixed tests --- .../src/inference_engine/cnn_network_ngraph_impl.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/inference-engine/src/inference_engine/cnn_network_ngraph_impl.cpp b/inference-engine/src/inference_engine/cnn_network_ngraph_impl.cpp index b230fc6b29559f..c05b911895e85a 100644 --- a/inference-engine/src/inference_engine/cnn_network_ngraph_impl.cpp +++ b/inference-engine/src/inference_engine/cnn_network_ngraph_impl.cpp @@ -246,6 +246,7 @@ StatusCode CNNNetworkNGraphImpl::addOutput(const std::string& layerName, size_t return OK; } auto result = make_shared<::ngraph::op::Result>(layer->output(outputIndex)); + result->set_friendly_name(outputName); _ngraph_function->add_results({result}); if (_outputData.count(outputName) == 0) { From 0dff1bf7aca126b3a94093b5893355640d5efa94 Mon Sep 17 00:00:00 2001 From: Ilya Churaev Date: Fri, 22 Jan 2021 07:14:37 +0300 Subject: [PATCH 16/23] Fixed tests --- .../src/inference_engine/cnn_network_ngraph_impl.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/inference-engine/src/inference_engine/cnn_network_ngraph_impl.cpp b/inference-engine/src/inference_engine/cnn_network_ngraph_impl.cpp index c05b911895e85a..72247ea846b137 100644 --- a/inference-engine/src/inference_engine/cnn_network_ngraph_impl.cpp +++ b/inference-engine/src/inference_engine/cnn_network_ngraph_impl.cpp @@ -234,7 +234,8 @@ StatusCode CNNNetworkNGraphImpl::addOutput(const std::string& layerName, size_t try { for (const auto & layer : _ngraph_function->get_ops()) { - if (layer->get_friendly_name() == layerName) { + // Result can have the same name as previous operation + if (layer->get_friendly_name() == layerName && !std::dynamic_pointer_cast(layer)) { std::string outputName = layerName; if (layer->outputs().size() != 1) { outputName += "." + std::to_string(outputIndex); From 8214aa1717cb3e227af3f6279cf6bf42a861c911 Mon Sep 17 00:00:00 2001 From: Ilya Churaev Date: Fri, 22 Jan 2021 11:26:59 +0300 Subject: [PATCH 17/23] Fixed typo --- .../plugin/gna/shared_tests_instances/skip_tests_config.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/inference-engine/tests/functional/plugin/gna/shared_tests_instances/skip_tests_config.cpp b/inference-engine/tests/functional/plugin/gna/shared_tests_instances/skip_tests_config.cpp index 35743d695a228a..1e2a1ab52e8d2a 100644 --- a/inference-engine/tests/functional/plugin/gna/shared_tests_instances/skip_tests_config.cpp +++ b/inference-engine/tests/functional/plugin/gna/shared_tests_instances/skip_tests_config.cpp @@ -55,7 +55,7 @@ std::vector disabledTestPatterns() { R"(.*VariableStateTest.inferreq_smoke_VariableState_2infers*.*)", // TODO: Issue 24839 R"(.*ConvolutionLayerTest.CompareWithRefs.*D=\(1.3\).*)", - R"(.*ConvolutionLayerTest.CompareWithRefs.*D=\(3.1\).*)" - R"(.*TensorNamesTest.CheckAddOutput.*)", + R"(.*ConvolutionLayerTest.CompareWithRefs.*D=\(3.1\).*)", + R"(.*TensorNamesTest.CheckAddOutput.*)" }; } From f521fdec3761ff7dedc0bdf80403ff642b78c019 Mon Sep 17 00:00:00 2001 From: Ilya Churaev Date: Fri, 22 Jan 2021 14:01:34 +0300 Subject: [PATCH 18/23] Try to disable GNA test --- .../plugin/gna/shared_tests_instances/skip_tests_config.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/inference-engine/tests/functional/plugin/gna/shared_tests_instances/skip_tests_config.cpp b/inference-engine/tests/functional/plugin/gna/shared_tests_instances/skip_tests_config.cpp index 1e2a1ab52e8d2a..ec2853f0eb298e 100644 --- a/inference-engine/tests/functional/plugin/gna/shared_tests_instances/skip_tests_config.cpp +++ b/inference-engine/tests/functional/plugin/gna/shared_tests_instances/skip_tests_config.cpp @@ -9,6 +9,7 @@ std::vector disabledTestPatterns() { return { + ".*TensorNamesTest\\.CheckAddOutput.*", // TODO: FIX BUG 31661 // TODO: support InferRequest in GNAPlugin ".*InferRequestTests\\.canRun3AsyncRequestsConsistentlyFromThreadsWithoutWait.*", @@ -55,7 +56,6 @@ std::vector disabledTestPatterns() { R"(.*VariableStateTest.inferreq_smoke_VariableState_2infers*.*)", // TODO: Issue 24839 R"(.*ConvolutionLayerTest.CompareWithRefs.*D=\(1.3\).*)", - R"(.*ConvolutionLayerTest.CompareWithRefs.*D=\(3.1\).*)", - R"(.*TensorNamesTest.CheckAddOutput.*)" + R"(.*ConvolutionLayerTest.CompareWithRefs.*D=\(3.1\).*)" }; } From a0dd43562e5de2e83f0c14f1ce4d3c419e59c6d9 Mon Sep 17 00:00:00 2001 From: Ilya Churaev Date: Fri, 22 Jan 2021 17:20:25 +0300 Subject: [PATCH 19/23] Fixed tests --- .../plugin/shared/include/subgraph_tests/tensor_names.hpp | 1 + 1 file changed, 1 insertion(+) diff --git a/inference-engine/tests/functional/plugin/shared/include/subgraph_tests/tensor_names.hpp b/inference-engine/tests/functional/plugin/shared/include/subgraph_tests/tensor_names.hpp index b6d793420f4fd1..15a93c64d669a0 100644 --- a/inference-engine/tests/functional/plugin/shared/include/subgraph_tests/tensor_names.hpp +++ b/inference-engine/tests/functional/plugin/shared/include/subgraph_tests/tensor_names.hpp @@ -96,6 +96,7 @@ TEST_P(TensorNamesTest, CheckTensorNamesAfterClone) { } TEST_P(TensorNamesTest, CheckAddOutput) { + SKIP_IF_CURRENT_TEST_IS_DISABLED(); cnnNetwork = InferenceEngine::CNNNetwork{function}; ConfigureNetwork(); From 3f30bea82074ae3776a74ef5897360ea02a79b2f Mon Sep 17 00:00:00 2001 From: Ilya Churaev Date: Mon, 25 Jan 2021 21:16:21 +0300 Subject: [PATCH 20/23] Removed unused variables --- .../plugin/shared/include/subgraph_tests/tensor_names.hpp | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/inference-engine/tests/functional/plugin/shared/include/subgraph_tests/tensor_names.hpp b/inference-engine/tests/functional/plugin/shared/include/subgraph_tests/tensor_names.hpp index 15a93c64d669a0..69b109d670cd1e 100644 --- a/inference-engine/tests/functional/plugin/shared/include/subgraph_tests/tensor_names.hpp +++ b/inference-engine/tests/functional/plugin/shared/include/subgraph_tests/tensor_names.hpp @@ -114,11 +114,9 @@ TEST_P(TensorNamesTest, CheckAddOutput) { ASSERT_EQ(1, function->get_results().size()); // Check that relu_prev doesn't exist in output and input maps - for (const auto& names : {inNames, outNames}) { - ASSERT_THROW(cnnNetwork.getOVNameForOperation("relu_prev"), InferenceEngine::NotFound); - for (const std::string& tensor_name : {"relu_prev_t", "identity_prev_t"}) { - ASSERT_THROW(cnnNetwork.getOVNameForOperation(tensor_name), InferenceEngine::NotFound); - } + ASSERT_THROW(cnnNetwork.getOVNameForOperation("relu_prev"), InferenceEngine::NotFound); + for (const std::string& tensor_name : {"relu_prev_t", "identity_prev_t"}) { + ASSERT_THROW(cnnNetwork.getOVNameForOperation(tensor_name), InferenceEngine::NotFound); } // Add relu_prev as output From 1050816e06b1360249563a1cea1cb4f0c328f1ed Mon Sep 17 00:00:00 2001 From: Ilya Churaev Date: Tue, 26 Jan 2021 12:30:03 +0300 Subject: [PATCH 21/23] Fixed tests --- .../python/src/openvino/inference_engine/ie_api.pyx | 6 ++++-- inference-engine/ie_bridges/python/tests/test_IENetwork.py | 5 +++-- inference-engine/include/cpp/ie_cnn_network.h | 4 ++-- 3 files changed, 9 insertions(+), 6 deletions(-) diff --git a/inference-engine/ie_bridges/python/src/openvino/inference_engine/ie_api.pyx b/inference-engine/ie_bridges/python/src/openvino/inference_engine/ie_api.pyx index 8197397bec00ec..3a84f61cfa9dcc 100644 --- a/inference-engine/ie_bridges/python/src/openvino/inference_engine/ie_api.pyx +++ b/inference-engine/ie_bridges/python/src/openvino/inference_engine/ie_api.pyx @@ -1440,10 +1440,12 @@ cdef class IENetwork: return self.impl.getFunction() def get_ov_name_for_tensor(self, orig_name: str): - return self.impl.getOVNameForTensor(orig_name) + name = bytes(orig_name, 'utf-8') + return self.impl.getOVNameForTensor(name).decode('utf-8') def get_ov_name_for_operation(self, orig_name: str): - return self.impl.getOVNameForOperation(orig_name) + name = bytes(orig_name, 'utf-8') + return self.impl.getOVNameForOperation(name).decode('utf-8') cdef class BlobBuffer: """Copy-less accessor for Inference Engine Blob""" diff --git a/inference-engine/ie_bridges/python/tests/test_IENetwork.py b/inference-engine/ie_bridges/python/tests/test_IENetwork.py index 4e6b5d42c6a607..a1192fe64e9ccf 100644 --- a/inference-engine/ie_bridges/python/tests/test_IENetwork.py +++ b/inference-engine/ie_bridges/python/tests/test_IENetwork.py @@ -299,8 +299,9 @@ def test_tensor_names(): """ ie = IECore() - net = ie.read_network(model=model, init_from_buffer=True) - assert net.get_ov_name_for_tensor("retu_t") == "activation" + weights = b'' + net = ie.read_network(model=model.encode('utf-8'), weights=weights, init_from_buffer=True) + assert net.get_ov_name_for_tensor("relu_t") == "activation" assert net.get_ov_name_for_tensor("identity_t") == "activation" assert net.get_ov_name_for_tensor("input") == "in1" assert net.get_ov_name_for_operation("output") == "activation" diff --git a/inference-engine/include/cpp/ie_cnn_network.h b/inference-engine/include/cpp/ie_cnn_network.h index 40ecc63b2ae496..8fc28ec41351d0 100644 --- a/inference-engine/include/cpp/ie_cnn_network.h +++ b/inference-engine/include/cpp/ie_cnn_network.h @@ -190,7 +190,7 @@ class INFERENCE_ENGINE_API_CLASS(CNNNetwork) { void serialize(const std::string& xmlPath, const std::string& binPath = {}) const; /** - * @brief Methods maps framework tensor name to OpenVINO name + * @brief Method maps framework tensor name to OpenVINO name * * @param orig_name Framework tensor name * @@ -203,7 +203,7 @@ class INFERENCE_ENGINE_API_CLASS(CNNNetwork) { } /** - * @brief Methods maps framework operator name to OpenVINO name + * @brief Method maps framework operator name to OpenVINO name * * @param orig_name Framework operation name * From 3ed3f57d50a48800e0f9c7dd711a15c7adfdc3b5 Mon Sep 17 00:00:00 2001 From: Ilya Churaev Date: Tue, 26 Jan 2021 15:27:52 +0300 Subject: [PATCH 22/23] Update documentation --- inference-engine/include/ie_icnn_network.hpp | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/inference-engine/include/ie_icnn_network.hpp b/inference-engine/include/ie_icnn_network.hpp index a863b94c68232c..2c6b5bea3ff2f0 100644 --- a/inference-engine/include/ie_icnn_network.hpp +++ b/inference-engine/include/ie_icnn_network.hpp @@ -69,9 +69,11 @@ class INFERENCE_ENGINE_ICNNNETWORK_CLASS(ICNNNetwork) : public details::IRelease * * For single and multiple outputs networks. * - * This method need to be called to find output names for using them later + * This method need to be called to find out OpenVINO output names for using them later * when calling InferenceEngine::InferRequest::GetBlob or InferenceEngine::InferRequest::SetBlob * + * If you want to use framework names, you can use InferenceEngine::ICNNNetwork::getOVNameForTensor or + * InferenceEngine::ICNNNetwork::getOVNameForOperation methods to map framework names to OpenVINO names * * @param out Reference to the OutputsDataMap object */ @@ -82,9 +84,12 @@ class INFERENCE_ENGINE_ICNNNETWORK_CLASS(ICNNNetwork) : public details::IRelease * object. * * For single and multiple inputs networks. - * This method need to be called to find out input names for using them later + * This method need to be called to find out OpenVINO input names for using them later * when calling InferenceEngine::InferRequest::SetBlob * + * If you want to use framework names, you can use InferenceEngine::ICNNNetwork::getOVNameForTensor or + * InferenceEngine::ICNNNetwork::getOVNameForOperation methods to map framework names to OpenVINO names + * * @param inputs Reference to InputsDataMap object. */ virtual void getInputsInfo(InputsDataMap& inputs) const noexcept = 0; From 61876ebd66ffa483d6cdfe7ed50edd438f62ad7b Mon Sep 17 00:00:00 2001 From: Ilya Churaev Date: Thu, 28 Jan 2021 16:14:14 +0300 Subject: [PATCH 23/23] Fixed comment --- .../inference_engine/ir_serialization/tensor_names.cpp | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/inference-engine/tests/functional/inference_engine/ir_serialization/tensor_names.cpp b/inference-engine/tests/functional/inference_engine/ir_serialization/tensor_names.cpp index 38f0d30b80cf1b..9cf118e2f98f80 100644 --- a/inference-engine/tests/functional/inference_engine/ir_serialization/tensor_names.cpp +++ b/inference-engine/tests/functional/inference_engine/ir_serialization/tensor_names.cpp @@ -13,10 +13,9 @@ #include "transformations/serialize.hpp" #include -class TensorNameSerializationTest : public ::testing::Test { +class TensorNameSerializationTest : public CommonTestUtils::TestsCommon { protected: - std::string test_name = - ::testing::UnitTest::GetInstance()->current_test_info()->name(); + std::string test_name = GetTestName() + "_" + GetTimestamp(); std::string m_out_xml_path = test_name + ".xml"; std::string m_out_bin_path = test_name + ".bin";