Skip to content

Commit

Permalink
Introduce the Broker API to map original framework names to OV (openv…
Browse files Browse the repository at this point in the history
…inotoolkit#3800)

* Added tests

* Fixed tests

* Added tests to check addOutput method

* Added support of port names in the IR

* Update copyrights

* Deprecate tensor name

* Fixed comments

* Enabled functional tests for GPU, GNA and Myriad

* Fixed get_tensor().get_names()

* Added unit test to check tensor names

* Fixed code style

* Skip add output test for GNA

* Added serialization support

* Added PythonAPI

* Fixed tests

* Fixed tests

* Fixed typo

* Try to disable GNA test

* Fixed tests

* Removed unused variables

* Fixed tests

* Update documentation

* Fixed comment
  • Loading branch information
ilyachur authored and Egor Shulman committed Feb 1, 2021
1 parent 0b66b25 commit c67851b
Show file tree
Hide file tree
Showing 39 changed files with 788 additions and 31 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -1439,6 +1439,14 @@ cdef class IENetwork:
def _get_function_capsule(self):
return self.impl.getFunction()

def get_ov_name_for_tensor(self, orig_name: str):
name = bytes(orig_name, 'utf-8')
return self.impl.getOVNameForTensor(name).decode('utf-8')

def get_ov_name_for_operation(self, orig_name: str):
name = bytes(orig_name, 'utf-8')
return self.impl.getOVNameForOperation(name).decode('utf-8')

cdef class BlobBuffer:
"""Copy-less accessor for Inference Engine Blob"""

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -260,6 +260,14 @@ const std::map <std::string, InferenceEngine::DataPtr> InferenceEnginePython::IE
return outputs;
}

std::string InferenceEnginePython::IENetwork::getOVNameForTensor(const std::string& orig_name) {
return actual->getOVNameForTensor(orig_name);
}

std::string InferenceEnginePython::IENetwork::getOVNameForOperation(const std::string& orig_name) {
return actual->getOVNameForOperation(orig_name);
}

void
InferenceEnginePython::IENetwork::addOutput(const std::string &out_layer, size_t port_id) {
actual->addOutput(out_layer, port_id);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -71,6 +71,9 @@ struct IENetwork {
IENetwork() = default;

void convertToOldRepresentation();

std::string getOVNameForTensor(const std::string& orig_name);
std::string getOVNameForOperation(const std::string& orig_name);
};


Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -175,6 +175,8 @@ cdef extern from "ie_api_impl.hpp" namespace "InferenceEnginePython":
void load_from_buffer(const char*xml, size_t xml_size, uint8_t*bin, size_t bin_size) except +
object getFunction() except +
void convertToOldRepresentation() except +
string getOVNameForTensor(const string &) except +
string getOVNameForOperation(const string &) except +

cdef cppclass InferRequestWrap:
double exec_time;
Expand Down
58 changes: 58 additions & 0 deletions inference-engine/ie_bridges/python/tests/test_IENetwork.py
Original file line number Diff line number Diff line change
Expand Up @@ -247,3 +247,61 @@ def test_multi_out_data():
assert net.outputs["28/Reshape"].name == "28/Reshape" and net.outputs["28/Reshape"].shape == [1, 5184]
assert net.outputs["fc_out"].name == "fc_out" and net.outputs["fc_out"].shape == [1, 10]
pass

def test_tensor_names():
model = """
<net name="Network" version="10">
<layers>
<layer name="in1" type="Parameter" id="0" version="opset1">
<data element_type="f32" shape="1,3,22,22"/>
<output>
<port id="0" precision="FP32" names="input">
<dim>1</dim>
<dim>3</dim>
<dim>22</dim>
<dim>22</dim>
</port>
</output>
</layer>
<layer name="activation" id="1" type="ReLU" version="opset1">
<input>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>3</dim>
<dim>22</dim>
<dim>22</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="relu_t, identity_t">
<dim>1</dim>
<dim>3</dim>
<dim>22</dim>
<dim>22</dim>
</port>
</output>
</layer>
<layer name="output" type="Result" id="2" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>3</dim>
<dim>22</dim>
<dim>22</dim>
</port>
</input>
</layer>
</layers>
<edges>
<edge from-layer="0" from-port="0" to-layer="1" to-port="1"/>
<edge from-layer="1" from-port="2" to-layer="2" to-port="0"/>
</edges>
</net>
"""
ie = IECore()
weights = b''
net = ie.read_network(model=model.encode('utf-8'), weights=weights, init_from_buffer=True)
assert net.get_ov_name_for_tensor("relu_t") == "activation"
assert net.get_ov_name_for_tensor("identity_t") == "activation"
assert net.get_ov_name_for_tensor("input") == "in1"
assert net.get_ov_name_for_operation("output") == "activation"
28 changes: 27 additions & 1 deletion inference-engine/include/cpp/ie_cnn_network.h
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
// Copyright (C) 2018-2020 Intel Corporation
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//

Expand Down Expand Up @@ -189,6 +189,32 @@ class INFERENCE_ENGINE_API_CLASS(CNNNetwork) {
*/
void serialize(const std::string& xmlPath, const std::string& binPath = {}) const;

/**
* @brief Method maps framework tensor name to OpenVINO name
*
* @param orig_name Framework tensor name
*
* @return OpenVINO name
*/
std::string getOVNameForTensor(const std::string& orig_name) const {
std::string ov_name;
CALL_STATUS_FNC(getOVNameForTensor, ov_name, orig_name);
return ov_name;
}

/**
* @brief Method maps framework operator name to OpenVINO name
*
* @param orig_name Framework operation name
*
* @return OpenVINO name
*/
std::string getOVNameForOperation(const std::string& orig_name) const {
std::string ov_name;
CALL_STATUS_FNC(getOVNameForOperation, ov_name, orig_name);
return ov_name;
}

protected:
IE_SUPPRESS_DEPRECATED_START
/**
Expand Down
43 changes: 40 additions & 3 deletions inference-engine/include/ie_icnn_network.hpp
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
// Copyright (C) 2018-2020 Intel Corporation
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//

Expand Down Expand Up @@ -69,9 +69,11 @@ class INFERENCE_ENGINE_ICNNNETWORK_CLASS(ICNNNetwork) : public details::IRelease
*
* For single and multiple outputs networks.
*
* This method need to be called to find output names for using them later
* This method need to be called to find out OpenVINO output names for using them later
* when calling InferenceEngine::InferRequest::GetBlob or InferenceEngine::InferRequest::SetBlob
*
* If you want to use framework names, you can use InferenceEngine::ICNNNetwork::getOVNameForTensor or
* InferenceEngine::ICNNNetwork::getOVNameForOperation methods to map framework names to OpenVINO names
*
* @param out Reference to the OutputsDataMap object
*/
Expand All @@ -82,9 +84,12 @@ class INFERENCE_ENGINE_ICNNNETWORK_CLASS(ICNNNetwork) : public details::IRelease
* object.
*
* For single and multiple inputs networks.
* This method need to be called to find out input names for using them later
* This method need to be called to find out OpenVINO input names for using them later
* when calling InferenceEngine::InferRequest::SetBlob
*
* If you want to use framework names, you can use InferenceEngine::ICNNNetwork::getOVNameForTensor or
* InferenceEngine::ICNNNetwork::getOVNameForOperation methods to map framework names to OpenVINO names
*
* @param inputs Reference to InputsDataMap object.
*/
virtual void getInputsInfo(InputsDataMap& inputs) const noexcept = 0;
Expand Down Expand Up @@ -179,6 +184,38 @@ class INFERENCE_ENGINE_ICNNNETWORK_CLASS(ICNNNetwork) : public details::IRelease
virtual StatusCode serialize(const std::string& xmlPath, const std::string& binPath, ResponseDesc* resp) const
noexcept = 0;

/**
* @brief Methods maps framework tensor name to OpenVINO name
*
* @param ov_name OpenVINO name
* @param orig_name Framework tensor name
* @param resp Pointer to the response message that holds a description of an error if any occurred
*
* @return Status code of the operation
*/
virtual StatusCode getOVNameForTensor(std::string& ov_name, const std::string& orig_name, ResponseDesc* resp) const noexcept {
(void) ov_name;
(void) orig_name;
(void) resp;
return NOT_IMPLEMENTED;
}

/**
* @brief Methods maps framework operation name to OpenVINO name
*
* @param ov_name OpenVINO name
* @param orig_name Framework operation name
* @param resp Pointer to the response message that holds a description of an error if any occurred
*
* @return Status code of the operation
*/
virtual StatusCode getOVNameForOperation(std::string& ov_name, const std::string& orig_name, ResponseDesc* resp) const noexcept {
(void) ov_name;
(void) orig_name;
(void) resp;
return NOT_IMPLEMENTED;
}

/**
* @brief A virtual destructor.
*/
Expand Down
2 changes: 2 additions & 0 deletions inference-engine/src/cldnn_engine/ops/result.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,9 @@ void CreateResultOp(Program& p, const std::shared_ptr<ngraph::op::v0::Result>& o
p.ValidateInputs(op, {1});

auto prev = op->get_input_node_shared_ptr(0);
NGRAPH_SUPPRESS_DEPRECATED_START
auto inputID = op->get_input_source_output(0).get_tensor().get_name();
NGRAPH_SUPPRESS_DEPRECATED_END
if (inputID.empty()) {
inputID = prev->get_friendly_name();
if (prev->get_output_size() > 1) {
Expand Down
2 changes: 2 additions & 0 deletions inference-engine/src/cldnn_engine/ops/split.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@ void CreateCommonSplitOp(Program& p, const std::shared_ptr<ngraph::Node>& op) {
for (size_t i = 0; i < op->get_output_size(); i++) {
std::string outLayerName = layerName + (is_single_out_split ? "" : "." + std::to_string(i));
const auto outLayerDims = op->get_output_shape(i);
NGRAPH_SUPPRESS_DEPRECATED_START
if (outLayerDims.size() != startOffset.size()) {
THROW_IE_EXCEPTION << "Invalid dimesions in split layer: " << op->get_friendly_name()
<< " output: " << op->get_output_tensor_name(i);
Expand All @@ -34,6 +35,7 @@ void CreateCommonSplitOp(Program& p, const std::shared_ptr<ngraph::Node>& op) {
<< " output: " << op->get_output_tensor_name(i);
}
}
NGRAPH_SUPPRESS_DEPRECATED_END

auto outTensor = CldnnTensorFromIEDims(outLayerDims, 1);
auto offsetTensor = CldnnTensorFromIEDims(startOffset, 0);
Expand Down
57 changes: 49 additions & 8 deletions inference-engine/src/inference_engine/cnn_network_ngraph_impl.cpp
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
// Copyright (C) 2018-2020 Intel Corporation
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//

Expand Down Expand Up @@ -122,6 +122,12 @@ CNNNetworkNGraphImpl::CNNNetworkNGraphImpl(
std::string outName = layer->get_friendly_name();
IE_ASSERT(layer->get_output_size() == 1); // Parameter as only singly output port

// map original names to OpenVINO name
_opNames[outName] = outName;
for (const auto& name : layer->get_output_tensor(0).get_names()) {
_tensorNames[name] = outName;
}

DataPtr& ptr = _data[outName];
IE_ASSERT(ptr); // Data must be allocated after the reshape method

Expand All @@ -139,14 +145,20 @@ CNNNetworkNGraphImpl::CNNNetworkNGraphImpl(
}

CNNNetworkNGraphImpl::CNNNetworkNGraphImpl(const CNNNetwork& network) {
if (network.getFunction() == nullptr) {
IE_SUPPRESS_DEPRECATED_START
const ICNNNetwork& iNetwork = network;
const auto net = dynamic_cast<const CNNNetworkNGraphImpl*>(&iNetwork);
if (network.getFunction() == nullptr || !net) {
THROW_IE_EXCEPTION << "Cannot create CNNNetwork with nGraph from legacy network format!";
}

_ngraph_function = copyFunction(network.getFunction(), false);
InputsDataMap inputs = network.getInputsInfo();
OutputsDataMap outputs = network.getOutputsInfo();

_opNames = net->_opNames;
_tensorNames = net->_tensorNames;

for (const auto& outputInfo : outputs) {
const auto& name = outputInfo.second->getName();
DataPtr output = std::make_shared<Data>(name, outputInfo.second->getTensorDesc());
Expand All @@ -164,6 +176,7 @@ CNNNetworkNGraphImpl::CNNNetworkNGraphImpl(const CNNNetwork& network) {
info->setLayout(inputInfo.second->getLayout());
_inputData[name] = info;
}
IE_SUPPRESS_DEPRECATED_END
}

void CNNNetworkNGraphImpl::setInputInfo(InputInfo::Ptr data) {
Expand Down Expand Up @@ -204,19 +217,22 @@ StatusCode CNNNetworkNGraphImpl::addOutput(const std::string& layerName, size_t

try {
for (const auto & layer : _ngraph_function->get_ops()) {
if (layer->get_friendly_name() == layerName) {
// Result can have the same name as previous operation
if (layer->get_friendly_name() == layerName && !std::dynamic_pointer_cast<ngraph::op::Result>(layer)) {
std::string outputName = layerName;
if (layer->outputs().size() != 1) {
outputName += "." + std::to_string(outputIndex);
}

// Check that we don't have a result for the output port
for (const auto& port : layer->output(outputIndex).get_target_inputs()) {
if (dynamic_cast<ngraph::op::Result*>(port.get_node()))
return OK;
}
auto result = make_shared<::ngraph::op::Result>(layer->output(outputIndex));
result->set_friendly_name(outputName);
_ngraph_function->add_results({result});

std::string outputName = layerName;
if (layer->outputs().size() != 1) {
outputName += "." + std::to_string(outputIndex);
}
if (_outputData.count(outputName) == 0) {
reshape();
}
Expand All @@ -237,6 +253,17 @@ void CNNNetworkNGraphImpl::addOutput(const ::ngraph::Output<::ngraph::Node> & ou
createDataForResult(output, dataName, data);
_data[dataName] = data;
_outputData[dataName] = data;

// Save original framework names
for (const auto& name : output.get_tensor().get_names()) {
_tensorNames[name] = dataName;
}
for (const auto consumerInput : output.get_target_inputs()) {
const auto &consumerLayer = consumerInput.get_node()->shared_from_this();
if (std::dynamic_pointer_cast<ngraph::op::Result>(consumerLayer)) {
_opNames[consumerLayer->get_friendly_name()] = dataName;
}
}
}

size_t CNNNetworkNGraphImpl::getBatchSize() const noexcept {
Expand Down Expand Up @@ -391,7 +418,7 @@ StatusCode CNNNetworkNGraphImpl::serialize(const std::string& xmlPath,
ResponseDesc* resp) const noexcept {
try {
std::map<std::string, ngraph::OpSet> custom_opsets;
for (auto extension : _ie_extensions) {
for (const auto& extension : _ie_extensions) {
auto opset = extension->getOpSets();
custom_opsets.insert(begin(opset), end(opset));
}
Expand All @@ -410,6 +437,20 @@ StatusCode CNNNetworkNGraphImpl::serialize(const std::string& xmlPath,
return OK;
}

StatusCode CNNNetworkNGraphImpl::getOVNameForTensor(std::string& ov_name, const std::string& orig_name, ResponseDesc* resp) const noexcept {
if (_tensorNames.find(orig_name) == _tensorNames.end())
return DescriptionBuffer(NOT_FOUND, resp) << "Framework tensor with name \"" << orig_name << "\" was not mapped to OpenVINO data!";
ov_name = _tensorNames.at(orig_name);
return OK;
}

StatusCode CNNNetworkNGraphImpl::getOVNameForOperation(std::string& ov_name, const std::string& orig_name, ResponseDesc* resp) const noexcept {
if (_opNames.find(orig_name) == _opNames.end())
return DescriptionBuffer(NOT_FOUND, resp) << "Framework operation with name \"" << orig_name << "\" was not mapped to OpenVINO data!";
ov_name = _opNames.at(orig_name);
return OK;
}

StatusCode CNNNetworkNGraphImpl::setBatchSize(size_t size, ResponseDesc* responseDesc) noexcept {
try {
if (getBatchSize() == size) return OK;
Expand Down
Loading

0 comments on commit c67851b

Please sign in to comment.