diff --git a/inference-engine/ie_bridges/python/src/openvino/inference_engine/ie_api.pyx b/inference-engine/ie_bridges/python/src/openvino/inference_engine/ie_api.pyx index e92dcaaa58e8ba..3a84f61cfa9dcc 100644 --- a/inference-engine/ie_bridges/python/src/openvino/inference_engine/ie_api.pyx +++ b/inference-engine/ie_bridges/python/src/openvino/inference_engine/ie_api.pyx @@ -1439,6 +1439,14 @@ cdef class IENetwork: def _get_function_capsule(self): return self.impl.getFunction() + def get_ov_name_for_tensor(self, orig_name: str): + name = bytes(orig_name, 'utf-8') + return self.impl.getOVNameForTensor(name).decode('utf-8') + + def get_ov_name_for_operation(self, orig_name: str): + name = bytes(orig_name, 'utf-8') + return self.impl.getOVNameForOperation(name).decode('utf-8') + cdef class BlobBuffer: """Copy-less accessor for Inference Engine Blob""" diff --git a/inference-engine/ie_bridges/python/src/openvino/inference_engine/ie_api_impl.cpp b/inference-engine/ie_bridges/python/src/openvino/inference_engine/ie_api_impl.cpp index 226cc73bc2ee42..7a2bd205a0837d 100644 --- a/inference-engine/ie_bridges/python/src/openvino/inference_engine/ie_api_impl.cpp +++ b/inference-engine/ie_bridges/python/src/openvino/inference_engine/ie_api_impl.cpp @@ -260,6 +260,14 @@ const std::map InferenceEnginePython::IE return outputs; } +std::string InferenceEnginePython::IENetwork::getOVNameForTensor(const std::string& orig_name) { + return actual->getOVNameForTensor(orig_name); +} + +std::string InferenceEnginePython::IENetwork::getOVNameForOperation(const std::string& orig_name) { + return actual->getOVNameForOperation(orig_name); +} + void InferenceEnginePython::IENetwork::addOutput(const std::string &out_layer, size_t port_id) { actual->addOutput(out_layer, port_id); diff --git a/inference-engine/ie_bridges/python/src/openvino/inference_engine/ie_api_impl.hpp b/inference-engine/ie_bridges/python/src/openvino/inference_engine/ie_api_impl.hpp index 5534d1ddb53215..eff8c8cec3f504 100644 --- a/inference-engine/ie_bridges/python/src/openvino/inference_engine/ie_api_impl.hpp +++ b/inference-engine/ie_bridges/python/src/openvino/inference_engine/ie_api_impl.hpp @@ -71,6 +71,9 @@ struct IENetwork { IENetwork() = default; void convertToOldRepresentation(); + + std::string getOVNameForTensor(const std::string& orig_name); + std::string getOVNameForOperation(const std::string& orig_name); }; diff --git a/inference-engine/ie_bridges/python/src/openvino/inference_engine/ie_api_impl_defs.pxd b/inference-engine/ie_bridges/python/src/openvino/inference_engine/ie_api_impl_defs.pxd index d11d8b526a8743..91b3e9af849e90 100644 --- a/inference-engine/ie_bridges/python/src/openvino/inference_engine/ie_api_impl_defs.pxd +++ b/inference-engine/ie_bridges/python/src/openvino/inference_engine/ie_api_impl_defs.pxd @@ -175,6 +175,8 @@ cdef extern from "ie_api_impl.hpp" namespace "InferenceEnginePython": void load_from_buffer(const char*xml, size_t xml_size, uint8_t*bin, size_t bin_size) except + object getFunction() except + void convertToOldRepresentation() except + + string getOVNameForTensor(const string &) except + + string getOVNameForOperation(const string &) except + cdef cppclass InferRequestWrap: double exec_time; diff --git a/inference-engine/ie_bridges/python/tests/test_IENetwork.py b/inference-engine/ie_bridges/python/tests/test_IENetwork.py index e3c52497814e1f..a1192fe64e9ccf 100644 --- a/inference-engine/ie_bridges/python/tests/test_IENetwork.py +++ b/inference-engine/ie_bridges/python/tests/test_IENetwork.py @@ -247,3 +247,61 @@ def test_multi_out_data(): assert net.outputs["28/Reshape"].name == "28/Reshape" and net.outputs["28/Reshape"].shape == [1, 5184] assert net.outputs["fc_out"].name == "fc_out" and net.outputs["fc_out"].shape == [1, 10] pass + +def test_tensor_names(): + model = """ + + + + + + + 1 + 3 + 22 + 22 + + + + + + + 1 + 3 + 22 + 22 + + + + + 1 + 3 + 22 + 22 + + + + + + + 1 + 3 + 22 + 22 + + + + + + + + + + """ + ie = IECore() + weights = b'' + net = ie.read_network(model=model.encode('utf-8'), weights=weights, init_from_buffer=True) + assert net.get_ov_name_for_tensor("relu_t") == "activation" + assert net.get_ov_name_for_tensor("identity_t") == "activation" + assert net.get_ov_name_for_tensor("input") == "in1" + assert net.get_ov_name_for_operation("output") == "activation" diff --git a/inference-engine/include/cpp/ie_cnn_network.h b/inference-engine/include/cpp/ie_cnn_network.h index 9544646a6d089d..8fc28ec41351d0 100644 --- a/inference-engine/include/cpp/ie_cnn_network.h +++ b/inference-engine/include/cpp/ie_cnn_network.h @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2020 Intel Corporation +// Copyright (C) 2018-2021 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // @@ -189,6 +189,32 @@ class INFERENCE_ENGINE_API_CLASS(CNNNetwork) { */ void serialize(const std::string& xmlPath, const std::string& binPath = {}) const; + /** + * @brief Method maps framework tensor name to OpenVINO name + * + * @param orig_name Framework tensor name + * + * @return OpenVINO name + */ + std::string getOVNameForTensor(const std::string& orig_name) const { + std::string ov_name; + CALL_STATUS_FNC(getOVNameForTensor, ov_name, orig_name); + return ov_name; + } + + /** + * @brief Method maps framework operator name to OpenVINO name + * + * @param orig_name Framework operation name + * + * @return OpenVINO name + */ + std::string getOVNameForOperation(const std::string& orig_name) const { + std::string ov_name; + CALL_STATUS_FNC(getOVNameForOperation, ov_name, orig_name); + return ov_name; + } + protected: IE_SUPPRESS_DEPRECATED_START /** diff --git a/inference-engine/include/ie_icnn_network.hpp b/inference-engine/include/ie_icnn_network.hpp index 946e3044a30daf..2c6b5bea3ff2f0 100644 --- a/inference-engine/include/ie_icnn_network.hpp +++ b/inference-engine/include/ie_icnn_network.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2020 Intel Corporation +// Copyright (C) 2018-2021 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // @@ -69,9 +69,11 @@ class INFERENCE_ENGINE_ICNNNETWORK_CLASS(ICNNNetwork) : public details::IRelease * * For single and multiple outputs networks. * - * This method need to be called to find output names for using them later + * This method need to be called to find out OpenVINO output names for using them later * when calling InferenceEngine::InferRequest::GetBlob or InferenceEngine::InferRequest::SetBlob * + * If you want to use framework names, you can use InferenceEngine::ICNNNetwork::getOVNameForTensor or + * InferenceEngine::ICNNNetwork::getOVNameForOperation methods to map framework names to OpenVINO names * * @param out Reference to the OutputsDataMap object */ @@ -82,9 +84,12 @@ class INFERENCE_ENGINE_ICNNNETWORK_CLASS(ICNNNetwork) : public details::IRelease * object. * * For single and multiple inputs networks. - * This method need to be called to find out input names for using them later + * This method need to be called to find out OpenVINO input names for using them later * when calling InferenceEngine::InferRequest::SetBlob * + * If you want to use framework names, you can use InferenceEngine::ICNNNetwork::getOVNameForTensor or + * InferenceEngine::ICNNNetwork::getOVNameForOperation methods to map framework names to OpenVINO names + * * @param inputs Reference to InputsDataMap object. */ virtual void getInputsInfo(InputsDataMap& inputs) const noexcept = 0; @@ -179,6 +184,38 @@ class INFERENCE_ENGINE_ICNNNETWORK_CLASS(ICNNNetwork) : public details::IRelease virtual StatusCode serialize(const std::string& xmlPath, const std::string& binPath, ResponseDesc* resp) const noexcept = 0; + /** + * @brief Methods maps framework tensor name to OpenVINO name + * + * @param ov_name OpenVINO name + * @param orig_name Framework tensor name + * @param resp Pointer to the response message that holds a description of an error if any occurred + * + * @return Status code of the operation + */ + virtual StatusCode getOVNameForTensor(std::string& ov_name, const std::string& orig_name, ResponseDesc* resp) const noexcept { + (void) ov_name; + (void) orig_name; + (void) resp; + return NOT_IMPLEMENTED; + } + + /** + * @brief Methods maps framework operation name to OpenVINO name + * + * @param ov_name OpenVINO name + * @param orig_name Framework operation name + * @param resp Pointer to the response message that holds a description of an error if any occurred + * + * @return Status code of the operation + */ + virtual StatusCode getOVNameForOperation(std::string& ov_name, const std::string& orig_name, ResponseDesc* resp) const noexcept { + (void) ov_name; + (void) orig_name; + (void) resp; + return NOT_IMPLEMENTED; + } + /** * @brief A virtual destructor. */ diff --git a/inference-engine/src/cldnn_engine/ops/result.cpp b/inference-engine/src/cldnn_engine/ops/result.cpp index 56ad5e9f5c017a..536caf22eb7555 100644 --- a/inference-engine/src/cldnn_engine/ops/result.cpp +++ b/inference-engine/src/cldnn_engine/ops/result.cpp @@ -18,7 +18,9 @@ void CreateResultOp(Program& p, const std::shared_ptr& o p.ValidateInputs(op, {1}); auto prev = op->get_input_node_shared_ptr(0); + NGRAPH_SUPPRESS_DEPRECATED_START auto inputID = op->get_input_source_output(0).get_tensor().get_name(); + NGRAPH_SUPPRESS_DEPRECATED_END if (inputID.empty()) { inputID = prev->get_friendly_name(); if (prev->get_output_size() > 1) { diff --git a/inference-engine/src/cldnn_engine/ops/split.cpp b/inference-engine/src/cldnn_engine/ops/split.cpp index 65cbf59873b831..3639a3c583a2e5 100644 --- a/inference-engine/src/cldnn_engine/ops/split.cpp +++ b/inference-engine/src/cldnn_engine/ops/split.cpp @@ -24,6 +24,7 @@ void CreateCommonSplitOp(Program& p, const std::shared_ptr& op) { for (size_t i = 0; i < op->get_output_size(); i++) { std::string outLayerName = layerName + (is_single_out_split ? "" : "." + std::to_string(i)); const auto outLayerDims = op->get_output_shape(i); + NGRAPH_SUPPRESS_DEPRECATED_START if (outLayerDims.size() != startOffset.size()) { THROW_IE_EXCEPTION << "Invalid dimesions in split layer: " << op->get_friendly_name() << " output: " << op->get_output_tensor_name(i); @@ -34,6 +35,7 @@ void CreateCommonSplitOp(Program& p, const std::shared_ptr& op) { << " output: " << op->get_output_tensor_name(i); } } + NGRAPH_SUPPRESS_DEPRECATED_END auto outTensor = CldnnTensorFromIEDims(outLayerDims, 1); auto offsetTensor = CldnnTensorFromIEDims(startOffset, 0); diff --git a/inference-engine/src/inference_engine/cnn_network_ngraph_impl.cpp b/inference-engine/src/inference_engine/cnn_network_ngraph_impl.cpp index 2355a5674ab20c..715e362ea47b92 100644 --- a/inference-engine/src/inference_engine/cnn_network_ngraph_impl.cpp +++ b/inference-engine/src/inference_engine/cnn_network_ngraph_impl.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2020 Intel Corporation +// Copyright (C) 2018-2021 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // @@ -122,6 +122,12 @@ CNNNetworkNGraphImpl::CNNNetworkNGraphImpl( std::string outName = layer->get_friendly_name(); IE_ASSERT(layer->get_output_size() == 1); // Parameter as only singly output port + // map original names to OpenVINO name + _opNames[outName] = outName; + for (const auto& name : layer->get_output_tensor(0).get_names()) { + _tensorNames[name] = outName; + } + DataPtr& ptr = _data[outName]; IE_ASSERT(ptr); // Data must be allocated after the reshape method @@ -139,7 +145,10 @@ CNNNetworkNGraphImpl::CNNNetworkNGraphImpl( } CNNNetworkNGraphImpl::CNNNetworkNGraphImpl(const CNNNetwork& network) { - if (network.getFunction() == nullptr) { + IE_SUPPRESS_DEPRECATED_START + const ICNNNetwork& iNetwork = network; + const auto net = dynamic_cast(&iNetwork); + if (network.getFunction() == nullptr || !net) { THROW_IE_EXCEPTION << "Cannot create CNNNetwork with nGraph from legacy network format!"; } @@ -147,6 +156,9 @@ CNNNetworkNGraphImpl::CNNNetworkNGraphImpl(const CNNNetwork& network) { InputsDataMap inputs = network.getInputsInfo(); OutputsDataMap outputs = network.getOutputsInfo(); + _opNames = net->_opNames; + _tensorNames = net->_tensorNames; + for (const auto& outputInfo : outputs) { const auto& name = outputInfo.second->getName(); DataPtr output = std::make_shared(name, outputInfo.second->getTensorDesc()); @@ -164,6 +176,7 @@ CNNNetworkNGraphImpl::CNNNetworkNGraphImpl(const CNNNetwork& network) { info->setLayout(inputInfo.second->getLayout()); _inputData[name] = info; } + IE_SUPPRESS_DEPRECATED_END } void CNNNetworkNGraphImpl::setInputInfo(InputInfo::Ptr data) { @@ -204,19 +217,22 @@ StatusCode CNNNetworkNGraphImpl::addOutput(const std::string& layerName, size_t try { for (const auto & layer : _ngraph_function->get_ops()) { - if (layer->get_friendly_name() == layerName) { + // Result can have the same name as previous operation + if (layer->get_friendly_name() == layerName && !std::dynamic_pointer_cast(layer)) { + std::string outputName = layerName; + if (layer->outputs().size() != 1) { + outputName += "." + std::to_string(outputIndex); + } + // Check that we don't have a result for the output port for (const auto& port : layer->output(outputIndex).get_target_inputs()) { if (dynamic_cast(port.get_node())) return OK; } auto result = make_shared<::ngraph::op::Result>(layer->output(outputIndex)); + result->set_friendly_name(outputName); _ngraph_function->add_results({result}); - std::string outputName = layerName; - if (layer->outputs().size() != 1) { - outputName += "." + std::to_string(outputIndex); - } if (_outputData.count(outputName) == 0) { reshape(); } @@ -237,6 +253,17 @@ void CNNNetworkNGraphImpl::addOutput(const ::ngraph::Output<::ngraph::Node> & ou createDataForResult(output, dataName, data); _data[dataName] = data; _outputData[dataName] = data; + + // Save original framework names + for (const auto& name : output.get_tensor().get_names()) { + _tensorNames[name] = dataName; + } + for (const auto consumerInput : output.get_target_inputs()) { + const auto &consumerLayer = consumerInput.get_node()->shared_from_this(); + if (std::dynamic_pointer_cast(consumerLayer)) { + _opNames[consumerLayer->get_friendly_name()] = dataName; + } + } } size_t CNNNetworkNGraphImpl::getBatchSize() const noexcept { @@ -391,7 +418,7 @@ StatusCode CNNNetworkNGraphImpl::serialize(const std::string& xmlPath, ResponseDesc* resp) const noexcept { try { std::map custom_opsets; - for (auto extension : _ie_extensions) { + for (const auto& extension : _ie_extensions) { auto opset = extension->getOpSets(); custom_opsets.insert(begin(opset), end(opset)); } @@ -410,6 +437,20 @@ StatusCode CNNNetworkNGraphImpl::serialize(const std::string& xmlPath, return OK; } +StatusCode CNNNetworkNGraphImpl::getOVNameForTensor(std::string& ov_name, const std::string& orig_name, ResponseDesc* resp) const noexcept { + if (_tensorNames.find(orig_name) == _tensorNames.end()) + return DescriptionBuffer(NOT_FOUND, resp) << "Framework tensor with name \"" << orig_name << "\" was not mapped to OpenVINO data!"; + ov_name = _tensorNames.at(orig_name); + return OK; +} + +StatusCode CNNNetworkNGraphImpl::getOVNameForOperation(std::string& ov_name, const std::string& orig_name, ResponseDesc* resp) const noexcept { + if (_opNames.find(orig_name) == _opNames.end()) + return DescriptionBuffer(NOT_FOUND, resp) << "Framework operation with name \"" << orig_name << "\" was not mapped to OpenVINO data!"; + ov_name = _opNames.at(orig_name); + return OK; +} + StatusCode CNNNetworkNGraphImpl::setBatchSize(size_t size, ResponseDesc* responseDesc) noexcept { try { if (getBatchSize() == size) return OK; diff --git a/inference-engine/src/inference_engine/cnn_network_ngraph_impl.hpp b/inference-engine/src/inference_engine/cnn_network_ngraph_impl.hpp index 7d3070afaec472..7778ec8ae82424 100644 --- a/inference-engine/src/inference_engine/cnn_network_ngraph_impl.hpp +++ b/inference-engine/src/inference_engine/cnn_network_ngraph_impl.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2020 Intel Corporation +// Copyright (C) 2018-2021 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // @@ -11,6 +11,7 @@ #include #include +#include #include #include #include @@ -81,6 +82,10 @@ class INFERENCE_ENGINE_API_CLASS(CNNNetworkNGraphImpl): public ICNNNetwork { StatusCode serialize(const std::string& xmlPath, const std::string& binPath, ResponseDesc* resp) const noexcept override; + StatusCode getOVNameForTensor(std::string& ov_name, const std::string& orig_name, ResponseDesc* resp) const noexcept override; + + StatusCode getOVNameForOperation(std::string& ov_name, const std::string& orig_name, ResponseDesc* resp) const noexcept override; + // used by convertFunctionToICNNNetwork from legacy library std::map _data; protected: @@ -91,6 +96,8 @@ class INFERENCE_ENGINE_API_CLASS(CNNNetworkNGraphImpl): public ICNNNetwork { InferenceEngine::InputsDataMap _inputData; std::map _outputData; const std::vector _ie_extensions; + std::unordered_map _opNames; + std::unordered_map _tensorNames; /** * @brief Create DataPtr for nGraph operation diff --git a/inference-engine/src/legacy_api/src/convert_function_to_cnn_network.cpp b/inference-engine/src/legacy_api/src/convert_function_to_cnn_network.cpp index df634bf08e1491..423da8dde53d28 100644 --- a/inference-engine/src/legacy_api/src/convert_function_to_cnn_network.cpp +++ b/inference-engine/src/legacy_api/src/convert_function_to_cnn_network.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2020 Intel Corporation +// Copyright (C) 2018-2021 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // @@ -1876,7 +1876,9 @@ void convertFunctionToICNNNetwork(const std::shared_ptroutData.clear(); continue; } + NGRAPH_SUPPRESS_DEPRECATED_START auto outName = layer->output(i).get_tensor().get_name(); + NGRAPH_SUPPRESS_DEPRECATED_END if (outName.empty()) { outName = ngraph::op::util::create_ie_output_name(layer->output(i)); } @@ -1930,7 +1932,9 @@ void convertFunctionToICNNNetwork(const std::shared_ptr(layer)) { IE_ASSERT(layer->get_input_size() == 1); const auto &input = layer->input_value(0); + NGRAPH_SUPPRESS_DEPRECATED_START auto name = input.get_tensor().get_name(); + NGRAPH_SUPPRESS_DEPRECATED_END if (!name.empty()) cnnNetworkImpl->addOutput(name); else diff --git a/inference-engine/src/readers/ir_reader/ie_ir_parser.cpp b/inference-engine/src/readers/ir_reader/ie_ir_parser.cpp index 05a48664f523fc..cb40347a4135af 100644 --- a/inference-engine/src/readers/ir_reader/ie_ir_parser.cpp +++ b/inference-engine/src/readers/ir_reader/ie_ir_parser.cpp @@ -601,7 +601,7 @@ void V10Parser::parsePreProcess(CNNNetwork& network, const pugi::xml_node& root, } V10Parser::GenericLayerParams V10Parser::XmlDeserializer::parseGenericParams(const pugi::xml_node& node) { - const auto parsePort = [](const pugi::xml_node& parentNode, + const auto parsePort = [this](const pugi::xml_node& parentNode, const GenericLayerParams& params, bool input) -> GenericLayerParams::LayerPortData { GenericLayerParams::LayerPortData port; @@ -626,6 +626,12 @@ V10Parser::GenericLayerParams V10Parser::XmlDeserializer::parseGenericParams(con type = InferenceEngine::details::convertPrecision(preStr); } port.precision = type; + std::vector names; + if (getParameters(parentNode, "names", names)) { + for (const auto& name : names) { + port.names.emplace(name); + } + } return port; }; GenericLayerParams params; @@ -823,6 +829,10 @@ std::shared_ptr V10Parser::XmlDeserializer::createNode( } ngraphNode->set_friendly_name(params.name); + for (size_t i = 0; i < params.outputPorts.size() && i < ngraphNode->get_output_size(); ++i) { + if (!params.outputPorts[i].names.empty()) + ngraphNode->get_output_tensor(i).set_names(params.outputPorts[i].names); + } return ngraphNode; } diff --git a/inference-engine/src/readers/ir_reader/ie_ir_parser.hpp b/inference-engine/src/readers/ir_reader/ie_ir_parser.hpp index a97872faabeee0..43d1c4000038a4 100644 --- a/inference-engine/src/readers/ir_reader/ie_ir_parser.hpp +++ b/inference-engine/src/readers/ir_reader/ie_ir_parser.hpp @@ -69,6 +69,7 @@ class V10Parser : public IParser { // Precision and dimensions are needed only for GenericIE op ngraph::element::Type_t precision; SizeVector dims; + std::unordered_set names; }; size_t layerId; std::string version; @@ -355,4 +356,4 @@ class V10Parser : public IParser { #endif // IR_READER_V10 -} // namespace InferenceEngine \ No newline at end of file +} // namespace InferenceEngine diff --git a/inference-engine/src/transformations/src/transformations/control_flow/unroll_tensor_iterator.cpp b/inference-engine/src/transformations/src/transformations/control_flow/unroll_tensor_iterator.cpp index f7a54157e5f047..c2db73ea2ceef6 100644 --- a/inference-engine/src/transformations/src/transformations/control_flow/unroll_tensor_iterator.cpp +++ b/inference-engine/src/transformations/src/transformations/control_flow/unroll_tensor_iterator.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2020 Intel Corporation +// Copyright (C) 2021 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // @@ -129,8 +129,10 @@ bool ngraph::pass::UnrollTensorIterator::run_on_function(std::shared_ptroutput(0).get_tensor().set_name( op::util::create_ie_output_name(ti->output(concat_desc->m_output_index))); + NGRAPH_SUPPRESS_DEPRECATED_END // connect the Concat layer to the corresponding TI outputs for (auto &input : ti->output(concat_desc->m_output_index).get_target_inputs()) { input.replace_source_output(concat); @@ -140,7 +142,9 @@ bool ngraph::pass::UnrollTensorIterator::run_on_function(std::shared_ptr result = body_functions[0]->get_results().at(concat_desc->m_body_value_index); const auto& input_to_res = result->get_input_source_output(0); // set output name to Tensor to store it for ngraph to cnn conversion + NGRAPH_SUPPRESS_DEPRECATED_START input_to_res.get_tensor().set_name(op::util::create_ie_output_name(ti->output(concat_desc->m_output_index))); + NGRAPH_SUPPRESS_DEPRECATED_END for (auto &input : ti->output(concat_desc->m_output_index).get_target_inputs()) { input.replace_source_output(input_to_res); } @@ -153,7 +157,9 @@ bool ngraph::pass::UnrollTensorIterator::run_on_function(std::shared_ptrinput_value(0); // set output name to Tensor to store it for ngraph to cnn conversion + NGRAPH_SUPPRESS_DEPRECATED_START in_value.get_tensor().set_name(op::util::create_ie_output_name(ti->output(output_desc->m_output_index))); + NGRAPH_SUPPRESS_DEPRECATED_END for (const auto &input : ti->output(output_desc->m_output_index).get_target_inputs()) { input.replace_source_output(result->get_input_source_output(0)); } diff --git a/inference-engine/src/transformations/src/transformations/op_conversions/convert_ti_to_sequences.cpp b/inference-engine/src/transformations/src/transformations/op_conversions/convert_ti_to_sequences.cpp index c30123803b8197..4a1d8b8b28e4bb 100644 --- a/inference-engine/src/transformations/src/transformations/op_conversions/convert_ti_to_sequences.cpp +++ b/inference-engine/src/transformations/src/transformations/op_conversions/convert_ti_to_sequences.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2020 Intel Corporation +// Copyright (C) 2021 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // @@ -180,7 +180,9 @@ ngraph::pass::ConvertTensorIteratorToLSTMSequence::ConvertTensorIteratorToLSTMSe for (const auto &input : ti->output(ordered_out_descs[i]->m_output_index).get_target_inputs()) { input.replace_source_output(outputs[i]->output(0)); } + NGRAPH_SUPPRESS_DEPRECATED_START outputs[i]->get_output_tensor(0).set_name(op::util::create_ie_output_name(ti->output(ordered_out_descs[i]->m_output_index))); + NGRAPH_SUPPRESS_DEPRECATED_END } } @@ -334,7 +336,9 @@ ngraph::pass::ConvertTensorIteratorToRNNSequence::ConvertTensorIteratorToRNNSequ for (const auto &input : ti->output(ordered_out_descs[i]->m_output_index).get_target_inputs()) { input.replace_source_output(outputs[i]->output(0)); } + NGRAPH_SUPPRESS_DEPRECATED_START outputs[i]->get_output_tensor(0).set_name(op::util::create_ie_output_name(ti->output(ordered_out_descs[i]->m_output_index))); + NGRAPH_SUPPRESS_DEPRECATED_END } } @@ -489,7 +493,9 @@ ngraph::pass::ConvertTensorIteratorToGRUSequence::ConvertTensorIteratorToGRUSequ for (const auto &input : ti->output(ordered_out_descs[i]->m_output_index).get_target_inputs()) { input.replace_source_output(outputs[i]->output(0)); } + NGRAPH_SUPPRESS_DEPRECATED_START outputs[i]->get_output_tensor(0).set_name(op::util::create_ie_output_name(ti->output(ordered_out_descs[i]->m_output_index))); + NGRAPH_SUPPRESS_DEPRECATED_END } } diff --git a/inference-engine/src/transformations/src/transformations/serialize.cpp b/inference-engine/src/transformations/src/transformations/serialize.cpp index 06a994469fc7bb..9662722340db58 100644 --- a/inference-engine/src/transformations/src/transformations/serialize.cpp +++ b/inference-engine/src/transformations/src/transformations/serialize.cpp @@ -662,6 +662,15 @@ void ngfunction_2_irv10(pugi::xml_node& netXml, port.append_attribute("id").set_value(port_id++); port.append_attribute("precision") .set_value(get_output_precision_name(o).c_str()); + std::string names; + for (const auto& name : o.get_tensor().get_names()) { + if (!names.empty()) + names += ", "; + names += name; + } + if (!names.empty()) { + port.append_attribute("names").set_value(names.c_str()); + } for (auto d : o.get_shape()) { pugi::xml_node dim = port.append_child("dim"); dim.append_child(pugi::xml_node_type::node_pcdata) diff --git a/inference-engine/tests/functional/inference_engine/ir_serialization/tensor_names.cpp b/inference-engine/tests/functional/inference_engine/ir_serialization/tensor_names.cpp new file mode 100644 index 00000000000000..9cf118e2f98f80 --- /dev/null +++ b/inference-engine/tests/functional/inference_engine/ir_serialization/tensor_names.cpp @@ -0,0 +1,58 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include + +#include +#include +#include +#include "common_test_utils/ngraph_test_utils.hpp" +#include "ie_core.hpp" +#include "ngraph/ngraph.hpp" +#include "transformations/serialize.hpp" +#include + +class TensorNameSerializationTest : public CommonTestUtils::TestsCommon { +protected: + std::string test_name = GetTestName() + "_" + GetTimestamp(); + std::string m_out_xml_path = test_name + ".xml"; + std::string m_out_bin_path = test_name + ".bin"; + + void TearDown() override { + std::remove(m_out_xml_path.c_str()); + std::remove(m_out_bin_path.c_str()); + } +}; + +TEST_F(TensorNameSerializationTest, SerializeFunctionWithTensorNames) { + InferenceEngine::Core ie; + + std::shared_ptr function; + { + auto parameter = std::make_shared(ngraph::element::Type_t::f32, ngraph::Shape{1, 3, 10, 10}); + parameter->set_friendly_name("parameter"); + parameter->get_output_tensor(0).set_names({"input"}); + auto relu_prev = std::make_shared(parameter); + relu_prev->set_friendly_name("relu_prev"); + relu_prev->get_output_tensor(0).set_names({"relu_prev_t", "identity_prev_t"}); + auto relu = std::make_shared(relu_prev); + relu->set_friendly_name("relu"); + relu->get_output_tensor(0).set_names({"relu_t", "identity"}); + const ngraph::ResultVector results{std::make_shared(relu)}; + results[0]->set_friendly_name("out"); + ngraph::ParameterVector params{parameter}; + function = std::make_shared(results, params, "TensorNames"); + } + + InferenceEngine::CNNNetwork expected(function); + expected.serialize(m_out_xml_path, m_out_bin_path); + auto result = ie.ReadNetwork(m_out_xml_path, m_out_bin_path); + + bool success; + std::string message; + std::tie(success, message) = + compare_functions(result.getFunction(), expected.getFunction(), true, true, true, true); + + ASSERT_TRUE(success) << message; +} diff --git a/inference-engine/tests/functional/inference_engine/ngraph_reader/tensor_names.cpp b/inference-engine/tests/functional/inference_engine/ngraph_reader/tensor_names.cpp new file mode 100644 index 00000000000000..2e3af86518d07b --- /dev/null +++ b/inference-engine/tests/functional/inference_engine/ngraph_reader/tensor_names.cpp @@ -0,0 +1,89 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include +#include "ngraph_reader_tests.hpp" + +TEST_F(NGraphReaderTests, ReadNetworkWithTensorNames) { + std::string model = R"V0G0N( + + + + + + + 1 + 3 + 22 + 22 + + + + + + + 1 + 3 + 22 + 22 + + + + + 1 + 3 + 22 + 22 + + + + + + + 1 + 3 + 22 + 22 + + + + + + + + + +)V0G0N"; + Core ie; + Blob::Ptr weights; + + auto network = ie.ReadNetwork(model, weights); + auto function = network.getFunction(); + auto inputs = network.getInputsInfo(); + auto outputs = network.getOutputsInfo(); + std::unordered_set inNames; + for (const auto& in : inputs) + inNames.emplace(in.first); + std::unordered_set outNames; + for (const auto& out : outputs) + outNames.emplace(out.first); + + ASSERT_EQ(1, inputs.size()); + ASSERT_EQ(1, outputs.size()); + ASSERT_EQ(1, function->get_results().size()); + + for (const auto& param : function->get_parameters()) { + ASSERT_TRUE(inNames.count(network.getOVNameForOperation(param->get_friendly_name()))); + ASSERT_TRUE(!param->get_output_tensor(0).get_names().empty()); + for (const auto& name : param->get_output_tensor(0).get_names()) + ASSERT_TRUE(inNames.count(network.getOVNameForTensor(name))); + } + + for (const auto& result : function->get_results()) { + ASSERT_TRUE(outNames.count(network.getOVNameForOperation(result->get_friendly_name()))); + ASSERT_TRUE(!result->get_input_tensor(0).get_names().empty()); + for (const auto& name : result->get_input_tensor(0).get_names()) + ASSERT_TRUE(outNames.count(network.getOVNameForTensor(name))); + } +} diff --git a/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/subgraph_tests/tensor_names.cpp b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/subgraph_tests/tensor_names.cpp new file mode 100644 index 00000000000000..99ceae1156ac85 --- /dev/null +++ b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/subgraph_tests/tensor_names.cpp @@ -0,0 +1,16 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include + +#include "subgraph_tests/tensor_names.hpp" +#include "common_test_utils/test_constants.hpp" + +using namespace SubgraphTestsDefinitions; + +namespace { + INSTANTIATE_TEST_CASE_P(smoke_Check, TensorNamesTest, + ::testing::Values(CommonTestUtils::DEVICE_CPU), + TensorNamesTest::getTestCaseName); +} // namespace diff --git a/inference-engine/tests/functional/plugin/gna/shared_tests_instances/skip_tests_config.cpp b/inference-engine/tests/functional/plugin/gna/shared_tests_instances/skip_tests_config.cpp index 99ea78b165f7ad..ec2853f0eb298e 100644 --- a/inference-engine/tests/functional/plugin/gna/shared_tests_instances/skip_tests_config.cpp +++ b/inference-engine/tests/functional/plugin/gna/shared_tests_instances/skip_tests_config.cpp @@ -9,6 +9,7 @@ std::vector disabledTestPatterns() { return { + ".*TensorNamesTest\\.CheckAddOutput.*", // TODO: FIX BUG 31661 // TODO: support InferRequest in GNAPlugin ".*InferRequestTests\\.canRun3AsyncRequestsConsistentlyFromThreadsWithoutWait.*", diff --git a/inference-engine/tests/functional/plugin/gna/shared_tests_instances/subgraph_tests/tensor_names.cpp b/inference-engine/tests/functional/plugin/gna/shared_tests_instances/subgraph_tests/tensor_names.cpp new file mode 100644 index 00000000000000..0729a36e9a4dc6 --- /dev/null +++ b/inference-engine/tests/functional/plugin/gna/shared_tests_instances/subgraph_tests/tensor_names.cpp @@ -0,0 +1,17 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include + +#include "subgraph_tests/tensor_names.hpp" +#include "common_test_utils/test_constants.hpp" + +using namespace SubgraphTestsDefinitions; + +namespace { + INSTANTIATE_TEST_CASE_P(smoke_Check, TensorNamesTest, + ::testing::Values(CommonTestUtils::DEVICE_GNA), + TensorNamesTest::getTestCaseName); +} // namespace + diff --git a/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/subgraph_tests/tensor_names.cpp b/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/subgraph_tests/tensor_names.cpp new file mode 100644 index 00000000000000..b5258c33fd5e89 --- /dev/null +++ b/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/subgraph_tests/tensor_names.cpp @@ -0,0 +1,18 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include + +#include "subgraph_tests/tensor_names.hpp" +#include "common_test_utils/test_constants.hpp" + +using namespace SubgraphTestsDefinitions; + +namespace { + INSTANTIATE_TEST_CASE_P(smoke_Check, TensorNamesTest, + ::testing::Values(CommonTestUtils::DEVICE_GPU), + TensorNamesTest::getTestCaseName); +} // namespace + + diff --git a/inference-engine/tests/functional/plugin/myriad/shared_tests_instances/subgraph_tests/tensor_names.cpp b/inference-engine/tests/functional/plugin/myriad/shared_tests_instances/subgraph_tests/tensor_names.cpp new file mode 100644 index 00000000000000..93e978ab427b07 --- /dev/null +++ b/inference-engine/tests/functional/plugin/myriad/shared_tests_instances/subgraph_tests/tensor_names.cpp @@ -0,0 +1,19 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include + +#include "subgraph_tests/tensor_names.hpp" +#include "common_test_utils/test_constants.hpp" + +using namespace SubgraphTestsDefinitions; + +namespace { + INSTANTIATE_TEST_CASE_P(smoke_Check, TensorNamesTest, + ::testing::Values(CommonTestUtils::DEVICE_MYRIAD), + TensorNamesTest::getTestCaseName); +} // namespace + + + diff --git a/inference-engine/tests/functional/plugin/shared/include/subgraph_tests/tensor_names.hpp b/inference-engine/tests/functional/plugin/shared/include/subgraph_tests/tensor_names.hpp new file mode 100644 index 00000000000000..69b109d670cd1e --- /dev/null +++ b/inference-engine/tests/functional/plugin/shared/include/subgraph_tests/tensor_names.hpp @@ -0,0 +1,166 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "shared_test_classes/subgraph/tensor_names.hpp" +#include + +namespace SubgraphTestsDefinitions { + +TEST_P(TensorNamesTest, CheckTensorNames) { + cnnNetwork = InferenceEngine::CNNNetwork{function}; + ConfigureNetwork(); + + auto inputs = cnnNetwork.getInputsInfo(); + auto outputs = cnnNetwork.getOutputsInfo(); + std::unordered_set inNames; + for (const auto& in : inputs) + inNames.emplace(in.first); + std::unordered_set outNames; + for (const auto& out : outputs) + outNames.emplace(out.first); + + for (const auto& param : function->get_parameters()) { + ASSERT_TRUE(inNames.count(cnnNetwork.getOVNameForOperation(param->get_friendly_name()))); + for (const auto& name : param->get_output_tensor(0).get_names()) + ASSERT_TRUE(inNames.count(cnnNetwork.getOVNameForTensor(name))); + } + + for (const auto& result : function->get_results()) { + ASSERT_TRUE(outNames.count(cnnNetwork.getOVNameForOperation(result->get_friendly_name()))); + for (const auto& name : result->input_value(0).get_tensor().get_names()) + ASSERT_TRUE(outNames.count(cnnNetwork.getOVNameForTensor(name))); + } + + executableNetwork = core->LoadNetwork(cnnNetwork, targetDevice, configuration); + inferRequest = executableNetwork.CreateInferRequest(); + + for (const auto& param : function->get_parameters()) { + ASSERT_NO_THROW(inferRequest.GetBlob(cnnNetwork.getOVNameForOperation(param->get_friendly_name()))); + for (const auto& name : param->get_output_tensor(0).get_names()) + ASSERT_NO_THROW(inferRequest.GetBlob(cnnNetwork.getOVNameForTensor(name))); + } + + for (const auto& result : function->get_results()) { + ASSERT_NO_THROW(inferRequest.GetBlob(cnnNetwork.getOVNameForOperation(result->get_friendly_name()))); + for (const auto& name : result->get_input_tensor(0).get_names()) { + ASSERT_NO_THROW(inferRequest.GetBlob(cnnNetwork.getOVNameForTensor(name))); + } + } +} + +TEST_P(TensorNamesTest, CheckTensorNamesAfterClone) { + cnnNetwork = InferenceEngine::CNNNetwork{function}; + InferenceEngine::CNNNetwork clonedNet(static_cast(cnnNetwork)); + ConfigureNetwork(); + + auto inputs = clonedNet.getInputsInfo(); + auto outputs = clonedNet.getOutputsInfo(); + std::unordered_set inNames; + for (const auto& in : inputs) + inNames.emplace(in.first); + std::unordered_set outNames; + for (const auto& out : outputs) + outNames.emplace(out.first); + + for (const auto& param : function->get_parameters()) { + ASSERT_TRUE(inNames.count(clonedNet.getOVNameForOperation(param->get_friendly_name()))); + for (const auto& name : param->get_output_tensor(0).get_names()) + ASSERT_TRUE(inNames.count(clonedNet.getOVNameForTensor(name))); + } + + for (const auto& result : function->get_results()) { + ASSERT_TRUE(outNames.count(clonedNet.getOVNameForOperation(result->get_friendly_name()))); + + for (const auto& name : result->get_input_tensor(0).get_names()) { + ASSERT_TRUE(outNames.count(clonedNet.getOVNameForTensor(name))); + } + } + + executableNetwork = core->LoadNetwork(clonedNet, targetDevice, configuration); + inferRequest = executableNetwork.CreateInferRequest(); + + for (const auto& param : function->get_parameters()) { + ASSERT_NO_THROW(inferRequest.GetBlob(clonedNet.getOVNameForOperation(param->get_friendly_name()))); + for (const auto& name : param->get_output_tensor(0).get_names()) + ASSERT_NO_THROW(inferRequest.GetBlob(clonedNet.getOVNameForTensor(name))); + } + + for (const auto& result : function->get_results()) { + ASSERT_NO_THROW(inferRequest.GetBlob(clonedNet.getOVNameForOperation(result->get_friendly_name()))); + for (const auto& name : result->input_value(0).get_tensor().get_names()) + ASSERT_NO_THROW(inferRequest.GetBlob(clonedNet.getOVNameForTensor(name))); + } +} + +TEST_P(TensorNamesTest, CheckAddOutput) { + SKIP_IF_CURRENT_TEST_IS_DISABLED(); + cnnNetwork = InferenceEngine::CNNNetwork{function}; + ConfigureNetwork(); + + auto inputs = cnnNetwork.getInputsInfo(); + auto outputs = cnnNetwork.getOutputsInfo(); + std::unordered_set inNames; + for (const auto& in : inputs) + inNames.emplace(in.first); + std::unordered_set outNames; + for (const auto& out : outputs) + outNames.emplace(out.first); + + ASSERT_EQ(1, inputs.size()); + ASSERT_EQ(1, outputs.size()); + ASSERT_EQ(1, function->get_results().size()); + + // Check that relu_prev doesn't exist in output and input maps + ASSERT_THROW(cnnNetwork.getOVNameForOperation("relu_prev"), InferenceEngine::NotFound); + for (const std::string& tensor_name : {"relu_prev_t", "identity_prev_t"}) { + ASSERT_THROW(cnnNetwork.getOVNameForOperation(tensor_name), InferenceEngine::NotFound); + } + + // Add relu_prev as output + cnnNetwork.addOutput("relu_prev"); + + inputs = cnnNetwork.getInputsInfo(); + outputs = cnnNetwork.getOutputsInfo(); + inNames.clear(); + for (const auto& in : inputs) + inNames.emplace(in.first); + outNames.clear(); + for (const auto& out : outputs) + outNames.emplace(out.first); + + ASSERT_EQ(1, inputs.size()); + ASSERT_EQ(2, outputs.size()); + ASSERT_EQ(2, function->get_results().size()); + + // Check that relu_prev exists in output map + ASSERT_FALSE(inNames.count(cnnNetwork.getOVNameForOperation("relu_prev"))); + for (const std::string& tensor_name : {"relu_prev_t", "identity_prev_t"}) { + ASSERT_FALSE(inNames.count(cnnNetwork.getOVNameForTensor(tensor_name))); + } + ASSERT_TRUE(outNames.count(cnnNetwork.getOVNameForOperation("relu_prev"))); + for (const std::string& tensor_name : {"relu_prev_t", "identity_prev_t"}) { + ASSERT_TRUE(outNames.count(cnnNetwork.getOVNameForTensor(tensor_name))); + } + + executableNetwork = core->LoadNetwork(cnnNetwork, targetDevice, configuration); + inferRequest = executableNetwork.CreateInferRequest(); + + for (const auto& param : cnnNetwork.getFunction()->get_parameters()) { + ASSERT_NO_THROW(inferRequest.GetBlob(cnnNetwork.getOVNameForOperation(param->get_friendly_name()))); + for (const auto& name : param->get_output_tensor(0).get_names()) + ASSERT_NO_THROW(inferRequest.GetBlob(cnnNetwork.getOVNameForTensor(name))); + } + + for (const auto& result : cnnNetwork.getFunction()->get_results()) { + ASSERT_NO_THROW(inferRequest.GetBlob(cnnNetwork.getOVNameForOperation(result->get_friendly_name()))); + for (const auto& name : result->get_input_tensor(0).get_names()) { + ASSERT_NO_THROW(inferRequest.GetBlob(cnnNetwork.getOVNameForTensor(name))); + } + } +} + +} // namespace SubgraphTestsDefinitions + diff --git a/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/tensor_names.hpp b/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/tensor_names.hpp new file mode 100644 index 00000000000000..dfa2cbeaa259d7 --- /dev/null +++ b/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/tensor_names.hpp @@ -0,0 +1,28 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include +#include +#include +#include + +#include "shared_test_classes/base/layer_test_utils.hpp" +#include "ngraph_functions/builders.hpp" + +namespace SubgraphTestsDefinitions { + +typedef std::tuple< + std::string // Device name +> constResultParams; + +class TensorNamesTest : public testing::WithParamInterface, + virtual public LayerTestsUtils::LayerTestsCommon { +public: + static std::string getTestCaseName(testing::TestParamInfo obj); +protected: + void SetUp() override; +}; +} // namespace SubgraphTestsDefinitions diff --git a/inference-engine/tests/functional/shared_test_classes/src/subgraph/tensor_names.cpp b/inference-engine/tests/functional/shared_test_classes/src/subgraph/tensor_names.cpp new file mode 100644 index 00000000000000..f31eec544a0daf --- /dev/null +++ b/inference-engine/tests/functional/shared_test_classes/src/subgraph/tensor_names.cpp @@ -0,0 +1,35 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "shared_test_classes/subgraph/tensor_names.hpp" + +namespace SubgraphTestsDefinitions { + +std::string TensorNamesTest::getTestCaseName(testing::TestParamInfo obj) { + std::string targetDevice; + std::tie(targetDevice) = obj.param; + std::ostringstream result; + result << "TargetDevice=" << targetDevice; + return result.str(); +} + +void TensorNamesTest::SetUp() { + std::tie(targetDevice) = this->GetParam(); + + auto parameter = std::make_shared(ngraph::element::Type_t::f32, ngraph::Shape{1, 3, 10, 10}); + parameter->set_friendly_name("parameter"); + parameter->get_output_tensor(0).set_names({"input"}); + auto relu_prev = std::make_shared(parameter); + relu_prev->set_friendly_name("relu_prev"); + relu_prev->get_output_tensor(0).set_names({"relu_prev_t", "identity_prev_t"}); + auto relu = std::make_shared(relu_prev); + relu->set_friendly_name("relu"); + relu->get_output_tensor(0).set_names({"relu_t", "identity"}); + const ngraph::ResultVector results{std::make_shared(relu)}; + results[0]->set_friendly_name("out"); + ngraph::ParameterVector params{parameter}; + function = std::make_shared(results, params, "TensorNames"); +} + +} // namespace SubgraphTestsDefinitions diff --git a/inference-engine/tests/ie_test_utils/common_test_utils/ngraph_test_utils.cpp b/inference-engine/tests/ie_test_utils/common_test_utils/ngraph_test_utils.cpp index fa05b365e7add5..b3f8957f1b636c 100644 --- a/inference-engine/tests/ie_test_utils/common_test_utils/ngraph_test_utils.cpp +++ b/inference-engine/tests/ie_test_utils/common_test_utils/ngraph_test_utils.cpp @@ -248,6 +248,13 @@ std::pair compare_functions( } for (int i = 0; i < node1->outputs().size(); ++i) { + const auto& tensor1 = node1->output(i).get_tensor(); + const auto& tensor2 = node2->output(i).get_tensor(); + + if (tensor1.get_names() != tensor2.get_names()) { + err_log << "Output tensors names are different for nodes: " + << node1->get_friendly_name() << " and " << node2->get_friendly_name() << std::endl; + } if (!node1->output(i).get_partial_shape().same_scheme( node2->output(i).get_partial_shape())) { err_log << "Different shape detected\n" diff --git a/ngraph/core/include/ngraph/descriptor/output.hpp b/ngraph/core/include/ngraph/descriptor/output.hpp index c7c2fc875a6ba3..611961c5e3e3bc 100644 --- a/ngraph/core/include/ngraph/descriptor/output.hpp +++ b/ngraph/core/include/ngraph/descriptor/output.hpp @@ -17,6 +17,8 @@ #pragma once #include +#include +#include #include #include "ngraph/descriptor/input.hpp" diff --git a/ngraph/core/include/ngraph/descriptor/tensor.hpp b/ngraph/core/include/ngraph/descriptor/tensor.hpp index 123b2ec507b66f..fcb527a4381edf 100644 --- a/ngraph/core/include/ngraph/descriptor/tensor.hpp +++ b/ngraph/core/include/ngraph/descriptor/tensor.hpp @@ -18,6 +18,7 @@ #include #include +#include #include "ngraph/partial_shape.hpp" #include "ngraph/shape.hpp" @@ -44,8 +45,13 @@ namespace ngraph Node* node, size_t node_output_number); + NGRAPH_DEPRECATED("get_name() is deprecated! Please use get_names() instead.") const std::string& get_name() const; + NGRAPH_DEPRECATED("set_name() is deprecated! Please use set_names() instead.") void set_name(const std::string& name); + + const std::unordered_set& get_names() const; + void set_names(const std::unordered_set& names); void set_tensor_type(const element::Type& element_type, const PartialShape& pshape); void set_element_type(const element::Type& elemenet_type); void set_partial_shape(const PartialShape& partial_shape); @@ -68,6 +74,7 @@ namespace ngraph size_t m_node_output_number{0}; std::string m_name; + std::unordered_set m_names; }; NGRAPH_API diff --git a/ngraph/core/include/ngraph/node.hpp b/ngraph/core/include/ngraph/node.hpp index 628a9c26866bea..f195dfbdbacaa7 100644 --- a/ngraph/core/include/ngraph/node.hpp +++ b/ngraph/core/include/ngraph/node.hpp @@ -327,6 +327,8 @@ namespace ngraph descriptor::Tensor& get_input_tensor(size_t i) const; /// Returns the tensor name for output i + NGRAPH_DEPRECATED( + "The tensor name was deprecated. Use get_output_tensor(i).get_names() instead.") const std::string& get_output_tensor_name(size_t i) const; std::set> get_output_target_inputs(size_t i) const; @@ -347,6 +349,8 @@ namespace ngraph const PartialShape& get_input_partial_shape(size_t i) const; /// Returns the tensor name for input i + NGRAPH_DEPRECATED( + "The tensor name was deprecated. Use get_input_tensor(i).get_names() instead.") const std::string& get_input_tensor_name(size_t i) const; std::unordered_set liveness_new_list; diff --git a/ngraph/core/include/ngraph/node_output.hpp b/ngraph/core/include/ngraph/node_output.hpp index 359a1441cfc4b2..bcaed7812d3b2e 100644 --- a/ngraph/core/include/ngraph/node_output.hpp +++ b/ngraph/core/include/ngraph/node_output.hpp @@ -17,6 +17,7 @@ #pragma once #include +#include #include "ngraph/descriptor/tensor.hpp" #include "ngraph/partial_shape.hpp" diff --git a/ngraph/core/include/ngraph/runtime/tensor.hpp b/ngraph/core/include/ngraph/runtime/tensor.hpp index 8985957faab24d..9e83c3a3f61072 100644 --- a/ngraph/core/include/ngraph/runtime/tensor.hpp +++ b/ngraph/core/include/ngraph/runtime/tensor.hpp @@ -63,6 +63,7 @@ namespace ngraph /// \brief Get tensor's unique name /// \return tensor's name + NGRAPH_DEPRECATED("Only output ports have names") const std::string& get_name() const; /// \brief Get the stale value of the tensor. A tensor is stale if its data is diff --git a/ngraph/core/src/descriptor/tensor.cpp b/ngraph/core/src/descriptor/tensor.cpp index 9669e9e3b8d6f9..dc9cc59b4b2f57 100644 --- a/ngraph/core/src/descriptor/tensor.cpp +++ b/ngraph/core/src/descriptor/tensor.cpp @@ -42,11 +42,6 @@ descriptor::Tensor::Tensor(const element::Type& element_type, { } -void descriptor::Tensor::set_name(const string& name) -{ - m_name = name; -} - void descriptor::Tensor::set_tensor_type(const element::Type& element_type, const PartialShape& pshape) { @@ -90,13 +85,41 @@ size_t descriptor::Tensor::size() const return shape_size(get_shape()) * m_element_type.size(); } +NGRAPH_SUPPRESS_DEPRECATED_START +void descriptor::Tensor::set_name(const string& name) +{ + m_name = name; +} + const std::string& descriptor::Tensor::get_name() const { return m_name; } +NGRAPH_SUPPRESS_DEPRECATED_END + +const std::unordered_set& descriptor::Tensor::get_names() const +{ + return m_names; +} + +void descriptor::Tensor::set_names(const std::unordered_set& names) +{ + m_names = names; +} ostream& operator<<(ostream& out, const descriptor::Tensor& tensor) { - out << "Tensor(" << tensor.get_name() << ")"; + std::string names; + for (const auto& name : tensor.get_names()) + { + if (!names.empty()) + names += ", "; + names += name; + } + NGRAPH_SUPPRESS_DEPRECATED_START + if (names.empty()) + names = tensor.get_name(); + NGRAPH_SUPPRESS_DEPRECATED_END + out << "Tensor(" << names << ")"; return out; } diff --git a/ngraph/core/src/graph_util.cpp b/ngraph/core/src/graph_util.cpp index bee2c68c0e3b48..fc011de40a54ac 100644 --- a/ngraph/core/src/graph_util.cpp +++ b/ngraph/core/src/graph_util.cpp @@ -924,7 +924,9 @@ bool ngraph::replace_output_update_name(Output output, const Output& { replacement.get_node()->set_friendly_name(output.get_node()->get_friendly_name()); // Update output tensor name + NGRAPH_SUPPRESS_DEPRECATED_START replacement.get_tensor().set_name(output.get_node()->get_friendly_name()); + NGRAPH_SUPPRESS_DEPRECATED_END } output.replace(replacement); copy_runtime_info({replacement.get_node_shared_ptr(), output.get_node_shared_ptr()}, diff --git a/ngraph/core/src/node.cpp b/ngraph/core/src/node.cpp index b74ee51111e254..6aad6d0d501a7a 100644 --- a/ngraph/core/src/node.cpp +++ b/ngraph/core/src/node.cpp @@ -143,6 +143,10 @@ std::shared_ptr { clone->add_control_dependency(cdep); } + for (size_t i = 0; i < get_output_size(); i++) + { + clone->get_output_tensor(i).set_names(get_output_tensor(i).get_names()); + } return clone; } @@ -658,13 +662,6 @@ descriptor::Tensor& Node::get_input_tensor(size_t i) const return input.get_tensor(); } -const string& Node::get_output_tensor_name(size_t i) const -{ - NGRAPH_CHECK( - i < m_outputs.size(), "index '", i, "' out of range in get_output_tensor_name(size_t i)"); - return m_outputs[i].get_tensor().get_name(); -} - size_t Node::get_input_size() const { return m_inputs.size(); @@ -690,6 +687,7 @@ const PartialShape& Node::get_input_partial_shape(size_t i) const return m_inputs[i].get_partial_shape(); } +NGRAPH_SUPPRESS_DEPRECATED_START const string& Node::get_input_tensor_name(size_t i) const { NGRAPH_CHECK( @@ -697,6 +695,14 @@ const string& Node::get_input_tensor_name(size_t i) const return m_inputs[i].get_tensor().get_name(); } +const string& Node::get_output_tensor_name(size_t i) const +{ + NGRAPH_CHECK( + i < m_outputs.size(), "index '", i, "' out of range in get_output_tensor_name(size_t i)"); + return m_outputs[i].get_tensor().get_name(); +} +NGRAPH_SUPPRESS_DEPRECATED_END + bool Node::has_same_type(std::shared_ptr node) const { if (get_output_size() != node->get_output_size()) diff --git a/ngraph/core/src/runtime/host_tensor.cpp b/ngraph/core/src/runtime/host_tensor.cpp index 2c5de03136e8ed..da996869442eb9 100644 --- a/ngraph/core/src/runtime/host_tensor.cpp +++ b/ngraph/core/src/runtime/host_tensor.cpp @@ -65,10 +65,12 @@ runtime::HostTensor::HostTensor(const std::string& name) { } +NGRAPH_SUPPRESS_DEPRECATED_START runtime::HostTensor::HostTensor(const Output& value) : HostTensor(value.get_element_type(), value.get_partial_shape(), value.get_tensor().get_name()) { } +NGRAPH_SUPPRESS_DEPRECATED_END void runtime::HostTensor::allocate_buffer() { @@ -101,11 +103,13 @@ void runtime::HostTensor::allocate_buffer() } } +NGRAPH_SUPPRESS_DEPRECATED_START runtime::HostTensor::HostTensor(const std::shared_ptr& constant) : HostTensor(constant->output(0).get_tensor().get_name()) { initialize(constant); } +NGRAPH_SUPPRESS_DEPRECATED_END void runtime::HostTensor::initialize(const std::shared_ptr& constant) { diff --git a/ngraph/core/src/runtime/tensor.cpp b/ngraph/core/src/runtime/tensor.cpp index e5da131c7f3781..21e9c328a24d16 100644 --- a/ngraph/core/src/runtime/tensor.cpp +++ b/ngraph/core/src/runtime/tensor.cpp @@ -49,7 +49,9 @@ size_t runtime::Tensor::get_size_in_bytes() const const std::string& runtime::Tensor::get_name() const { + NGRAPH_SUPPRESS_DEPRECATED_START return m_descriptor->get_name(); + NGRAPH_SUPPRESS_DEPRECATED_END } bool runtime::Tensor::get_stale() const diff --git a/ngraph/test/tensor.cpp b/ngraph/test/tensor.cpp index 216831dc0de8ce..be9cc26ab1f180 100644 --- a/ngraph/test/tensor.cpp +++ b/ngraph/test/tensor.cpp @@ -23,6 +23,7 @@ #include "gtest/gtest.h" #include "ngraph/function.hpp" #include "ngraph/ngraph.hpp" +#include "ngraph/opsets/opset6.hpp" #include "ngraph/pass/manager.hpp" #include "pass/liveness.hpp" #include "util/test_tools.hpp" @@ -91,3 +92,23 @@ TEST(tensor, output_flag) EXPECT_TRUE(op::is_output(f0->get_output_op(i))); } } + +TEST(tensor, tensor_names) +{ + auto arg0 = make_shared(element::f32, Shape{1}); + arg0->set_friendly_name("data"); + arg0->get_output_tensor(0).set_names({"input"}); + + auto relu = make_shared(arg0); + relu->set_friendly_name("relu"); + relu->get_output_tensor(0).set_names({"relu_t", "identity"}); + auto f0 = make_shared(relu, ParameterVector{arg0}); + + ASSERT_EQ(arg0->get_output_tensor(0).get_names(), relu->get_input_tensor(0).get_names()); + ASSERT_EQ(arg0->get_output_tensor(0).get_names(), + relu->input_value(0).get_tensor().get_names()); + ASSERT_EQ(f0->get_result()->get_input_tensor(0).get_names(), + relu->get_output_tensor(0).get_names()); + ASSERT_EQ(f0->get_result()->input_value(0).get_tensor().get_names(), + relu->get_output_tensor(0).get_names()); +}