Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Introduce the Broker API to map original framework names to OV #3800

Merged
merged 37 commits into from
Jan 29, 2021
Merged
Show file tree
Hide file tree
Changes from 30 commits
Commits
Show all changes
37 commits
Select commit Hold shift + click to select a range
7a37909
Added tests
ilyachur Jan 11, 2021
79554e3
Fixed tests
ilyachur Jan 12, 2021
c082766
Added tests to check addOutput method
ilyachur Jan 13, 2021
d553a24
Added support of port names in the IR
ilyachur Jan 13, 2021
af70e10
Update copyrights
ilyachur Jan 13, 2021
f8833ba
Deprecate tensor name
ilyachur Jan 13, 2021
3bcf085
Fixed comments
ilyachur Jan 13, 2021
c425dd2
Enabled functional tests for GPU, GNA and Myriad
ilyachur Jan 13, 2021
fa08d64
Fixed get_tensor().get_names()
ilyachur Jan 18, 2021
35858cf
Added unit test to check tensor names
ilyachur Jan 18, 2021
364dd34
Fixed code style
ilyachur Jan 18, 2021
71b41df
Merge remote-tracking branch 'upstream/master' into brocker_names
ilyachur Jan 19, 2021
ed0866c
Merge remote-tracking branch 'upstream/master' into brocker_names
ilyachur Jan 19, 2021
d417109
Merge remote-tracking branch 'upstream/master' into brocker_names
ilyachur Jan 21, 2021
cdfb8f1
Skip add output test for GNA
ilyachur Jan 21, 2021
f2cace9
Added serialization support
ilyachur Jan 21, 2021
1830a6d
Added PythonAPI
ilyachur Jan 21, 2021
77a120e
Merge remote-tracking branch 'upstream/master' into brocker_names
ilyachur Jan 21, 2021
635c17a
Fixed tests
ilyachur Jan 21, 2021
0dff1bf
Fixed tests
ilyachur Jan 22, 2021
4683b0f
Merge remote-tracking branch 'upstream/master' into brocker_names
ilyachur Jan 22, 2021
8214aa1
Fixed typo
ilyachur Jan 22, 2021
0e1af02
Merge remote-tracking branch 'upstream/master' into brocker_names
ilyachur Jan 22, 2021
9a46250
Merge remote-tracking branch 'upstream/master' into brocker_names
ilyachur Jan 22, 2021
f521fde
Try to disable GNA test
ilyachur Jan 22, 2021
a0dd435
Fixed tests
ilyachur Jan 22, 2021
e4b1c9e
Merge remote-tracking branch 'upstream/master' into brocker_names
ilyachur Jan 22, 2021
1f96ba0
Merge remote-tracking branch 'upstream/master' into brocker_names
ilyachur Jan 25, 2021
0360347
Merge remote-tracking branch 'upstream/master' into brocker_names
ilyachur Jan 25, 2021
3f30bea
Removed unused variables
ilyachur Jan 25, 2021
1050816
Fixed tests
ilyachur Jan 26, 2021
d337c50
Merge remote-tracking branch 'upstream/master' into brocker_names
ilyachur Jan 26, 2021
8e9a9aa
Merge remote-tracking branch 'upstream/master' into brocker_names
ilyachur Jan 26, 2021
3ed3f57
Update documentation
ilyachur Jan 26, 2021
5f041ca
Merge remote-tracking branch 'upstream/master' into brocker_names
ilyachur Jan 28, 2021
0282c87
Merge remote-tracking branch 'upstream/master' into brocker_names
ilyachur Jan 28, 2021
61876eb
Fixed comment
ilyachur Jan 28, 2021
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -1439,6 +1439,12 @@ cdef class IENetwork:
def _get_function_capsule(self):
return self.impl.getFunction()

def get_ov_name_for_tensor(self, orig_name: str):
return self.impl.getOVNameForTensor(orig_name)

def get_ov_name_for_operation(self, orig_name: str):
return self.impl.getOVNameForOperation(orig_name)

cdef class BlobBuffer:
"""Copy-less accessor for Inference Engine Blob"""

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -260,6 +260,14 @@ const std::map <std::string, InferenceEngine::DataPtr> InferenceEnginePython::IE
return outputs;
}

std::string InferenceEnginePython::IENetwork::getOVNameForTensor(const std::string& orig_name) {
return actual->getOVNameForTensor(orig_name);
}

std::string InferenceEnginePython::IENetwork::getOVNameForOperation(const std::string& orig_name) {
return actual->getOVNameForOperation(orig_name);
}

void
InferenceEnginePython::IENetwork::addOutput(const std::string &out_layer, size_t port_id) {
actual->addOutput(out_layer, port_id);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -71,6 +71,9 @@ struct IENetwork {
IENetwork() = default;

void convertToOldRepresentation();

std::string getOVNameForTensor(const std::string& orig_name);
std::string getOVNameForOperation(const std::string& orig_name);
};


Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -175,6 +175,8 @@ cdef extern from "ie_api_impl.hpp" namespace "InferenceEnginePython":
void load_from_buffer(const char*xml, size_t xml_size, uint8_t*bin, size_t bin_size) except +
object getFunction() except +
void convertToOldRepresentation() except +
string getOVNameForTensor(const string &) except +
string getOVNameForOperation(const string &) except +

cdef cppclass InferRequestWrap:
double exec_time;
Expand Down
57 changes: 57 additions & 0 deletions inference-engine/ie_bridges/python/tests/test_IENetwork.py
Original file line number Diff line number Diff line change
Expand Up @@ -247,3 +247,60 @@ def test_multi_out_data():
assert net.outputs["28/Reshape"].name == "28/Reshape" and net.outputs["28/Reshape"].shape == [1, 5184]
assert net.outputs["fc_out"].name == "fc_out" and net.outputs["fc_out"].shape == [1, 10]
pass

def test_tensor_names():
model = """
<net name="Network" version="10">
<layers>
<layer name="in1" type="Parameter" id="0" version="opset1">
<data element_type="f32" shape="1,3,22,22"/>
<output>
<port id="0" precision="FP32" names="input">
<dim>1</dim>
<dim>3</dim>
<dim>22</dim>
<dim>22</dim>
</port>
</output>
</layer>
<layer name="activation" id="1" type="ReLU" version="opset1">
<input>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>3</dim>
<dim>22</dim>
<dim>22</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="relu_t, identity_t">
<dim>1</dim>
<dim>3</dim>
<dim>22</dim>
<dim>22</dim>
</port>
</output>
</layer>
<layer name="output" type="Result" id="2" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>3</dim>
<dim>22</dim>
<dim>22</dim>
</port>
</input>
</layer>
</layers>
<edges>
<edge from-layer="0" from-port="0" to-layer="1" to-port="1"/>
<edge from-layer="1" from-port="2" to-layer="2" to-port="0"/>
</edges>
</net>
"""
ie = IECore()
net = ie.read_network(model=model, init_from_buffer=True)
assert net.get_ov_name_for_tensor("retu_t") == "activation"
ilya-lavrenov marked this conversation as resolved.
Show resolved Hide resolved
assert net.get_ov_name_for_tensor("identity_t") == "activation"
assert net.get_ov_name_for_tensor("input") == "in1"
assert net.get_ov_name_for_operation("output") == "activation"
28 changes: 27 additions & 1 deletion inference-engine/include/cpp/ie_cnn_network.h
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
// Copyright (C) 2018-2020 Intel Corporation
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//

Expand Down Expand Up @@ -189,6 +189,32 @@ class INFERENCE_ENGINE_API_CLASS(CNNNetwork) {
*/
void serialize(const std::string& xmlPath, const std::string& binPath = {}) const;

/**
* @brief Methods maps framework tensor name to OpenVINO name
ilyachur marked this conversation as resolved.
Show resolved Hide resolved
*
* @param orig_name Framework tensor name
*
* @return OpenVINO name
*/
std::string getOVNameForTensor(const std::string& orig_name) const {
std::string ov_name;
CALL_STATUS_FNC(getOVNameForTensor, ov_name, orig_name);
return ov_name;
}

/**
* @brief Methods maps framework operator name to OpenVINO name
*
* @param orig_name Framework operation name
*
* @return OpenVINO name
*/
std::string getOVNameForOperation(const std::string& orig_name) const {
std::string ov_name;
CALL_STATUS_FNC(getOVNameForOperation, ov_name, orig_name);
return ov_name;
}

protected:
IE_SUPPRESS_DEPRECATED_START
/**
Expand Down
34 changes: 33 additions & 1 deletion inference-engine/include/ie_icnn_network.hpp
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
// Copyright (C) 2018-2020 Intel Corporation
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//

Expand Down Expand Up @@ -179,6 +179,38 @@ class INFERENCE_ENGINE_ICNNNETWORK_CLASS(ICNNNetwork) : public details::IRelease
virtual StatusCode serialize(const std::string& xmlPath, const std::string& binPath, ResponseDesc* resp) const
noexcept = 0;

/**
* @brief Methods maps framework tensor name to OpenVINO name
*
* @param ov_name OpenVINO name
* @param orig_name Framework tensor name
* @param resp Pointer to the response message that holds a description of an error if any occurred
*
* @return Status code of the operation
*/
virtual StatusCode getOVNameForTensor(std::string& ov_name, const std::string& orig_name, ResponseDesc* resp) const noexcept {
(void) ov_name;
(void) orig_name;
(void) resp;
return NOT_IMPLEMENTED;
}

/**
* @brief Methods maps framework operation name to OpenVINO name
*
* @param ov_name OpenVINO name
* @param orig_name Framework operation name
* @param resp Pointer to the response message that holds a description of an error if any occurred
*
* @return Status code of the operation
*/
virtual StatusCode getOVNameForOperation(std::string& ov_name, const std::string& orig_name, ResponseDesc* resp) const noexcept {
(void) ov_name;
(void) orig_name;
(void) resp;
return NOT_IMPLEMENTED;
}

/**
* @brief A virtual destructor.
*/
Expand Down
2 changes: 2 additions & 0 deletions inference-engine/src/cldnn_engine/ops/result.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,9 @@ void CreateResultOp(Program& p, const std::shared_ptr<ngraph::op::v0::Result>& o
p.ValidateInputs(op, {1});

auto prev = op->get_input_node_shared_ptr(0);
NGRAPH_SUPPRESS_DEPRECATED_START
auto inputID = op->get_input_source_output(0).get_tensor().get_name();
NGRAPH_SUPPRESS_DEPRECATED_END
if (inputID.empty()) {
inputID = prev->get_friendly_name();
if (prev->get_output_size() > 1) {
Expand Down
2 changes: 2 additions & 0 deletions inference-engine/src/cldnn_engine/ops/split.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@ void CreateCommonSplitOp(Program& p, const std::shared_ptr<ngraph::Node>& op) {
for (size_t i = 0; i < op->get_output_size(); i++) {
std::string outLayerName = layerName + (is_single_out_split ? "" : "." + std::to_string(i));
const auto outLayerDims = op->get_output_shape(i);
NGRAPH_SUPPRESS_DEPRECATED_START
if (outLayerDims.size() != startOffset.size()) {
THROW_IE_EXCEPTION << "Invalid dimesions in split layer: " << op->get_friendly_name()
<< " output: " << op->get_output_tensor_name(i);
Expand All @@ -34,6 +35,7 @@ void CreateCommonSplitOp(Program& p, const std::shared_ptr<ngraph::Node>& op) {
<< " output: " << op->get_output_tensor_name(i);
}
}
NGRAPH_SUPPRESS_DEPRECATED_END

auto outTensor = CldnnTensorFromIEDims(outLayerDims, 1);
auto offsetTensor = CldnnTensorFromIEDims(startOffset, 0);
Expand Down
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
// Copyright (C) 2018-2020 Intel Corporation
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//

Expand Down Expand Up @@ -122,6 +122,12 @@ CNNNetworkNGraphImpl::CNNNetworkNGraphImpl(
std::string outName = layer->get_friendly_name();
IE_ASSERT(layer->get_output_size() == 1); // Parameter as only singly output port

// map original names to OpenVINO name
_opNames[outName] = outName;
for (const auto& name : layer->get_output_tensor(0).get_names()) {
_tensorNames[name] = outName;
}

DataPtr& ptr = _data[outName];
IE_ASSERT(ptr); // Data must be allocated after the reshape method

Expand All @@ -139,14 +145,20 @@ CNNNetworkNGraphImpl::CNNNetworkNGraphImpl(
}

CNNNetworkNGraphImpl::CNNNetworkNGraphImpl(const CNNNetwork& network) {
if (network.getFunction() == nullptr) {
IE_SUPPRESS_DEPRECATED_START
const ICNNNetwork& iNetwork = network;
const auto net = dynamic_cast<const CNNNetworkNGraphImpl*>(&iNetwork);
if (network.getFunction() == nullptr || !net) {
THROW_IE_EXCEPTION << "Cannot create CNNNetwork with nGraph from legacy network format!";
}

_ngraph_function = copyFunction(network.getFunction(), false);
InputsDataMap inputs = network.getInputsInfo();
OutputsDataMap outputs = network.getOutputsInfo();

_opNames = net->_opNames;
_tensorNames = net->_tensorNames;

for (const auto& outputInfo : outputs) {
const auto& name = outputInfo.second->getName();
DataPtr output = std::make_shared<Data>(name, outputInfo.second->getTensorDesc());
Expand All @@ -164,6 +176,7 @@ CNNNetworkNGraphImpl::CNNNetworkNGraphImpl(const CNNNetwork& network) {
info->setLayout(inputInfo.second->getLayout());
_inputData[name] = info;
}
IE_SUPPRESS_DEPRECATED_END
}

void CNNNetworkNGraphImpl::setInputInfo(InputInfo::Ptr data) {
Expand Down Expand Up @@ -204,19 +217,22 @@ StatusCode CNNNetworkNGraphImpl::addOutput(const std::string& layerName, size_t

try {
for (const auto & layer : _ngraph_function->get_ops()) {
if (layer->get_friendly_name() == layerName) {
// Result can have the same name as previous operation
if (layer->get_friendly_name() == layerName && !std::dynamic_pointer_cast<ngraph::op::Result>(layer)) {
std::string outputName = layerName;
if (layer->outputs().size() != 1) {
outputName += "." + std::to_string(outputIndex);
}

// Check that we don't have a result for the output port
for (const auto& port : layer->output(outputIndex).get_target_inputs()) {
if (dynamic_cast<ngraph::op::Result*>(port.get_node()))
return OK;
}
auto result = make_shared<::ngraph::op::Result>(layer->output(outputIndex));
result->set_friendly_name(outputName);
_ngraph_function->add_results({result});

std::string outputName = layerName;
if (layer->outputs().size() != 1) {
outputName += "." + std::to_string(outputIndex);
}
if (_outputData.count(outputName) == 0) {
reshape();
}
Expand All @@ -237,6 +253,17 @@ void CNNNetworkNGraphImpl::addOutput(const ::ngraph::Output<::ngraph::Node> & ou
createDataForResult(output, dataName, data);
_data[dataName] = data;
_outputData[dataName] = data;

// Save original framework names
for (const auto& name : output.get_tensor().get_names()) {
_tensorNames[name] = dataName;
}
for (const auto consumerInput : output.get_target_inputs()) {
const auto &consumerLayer = consumerInput.get_node()->shared_from_this();
if (std::dynamic_pointer_cast<ngraph::op::Result>(consumerLayer)) {
_opNames[consumerLayer->get_friendly_name()] = dataName;
}
}
}

size_t CNNNetworkNGraphImpl::getBatchSize() const noexcept {
Expand Down Expand Up @@ -391,7 +418,7 @@ StatusCode CNNNetworkNGraphImpl::serialize(const std::string& xmlPath,
ResponseDesc* resp) const noexcept {
try {
std::map<std::string, ngraph::OpSet> custom_opsets;
for (auto extension : _ie_extensions) {
for (const auto& extension : _ie_extensions) {
auto opset = extension->getOpSets();
custom_opsets.insert(begin(opset), end(opset));
}
Expand All @@ -410,6 +437,20 @@ StatusCode CNNNetworkNGraphImpl::serialize(const std::string& xmlPath,
return OK;
}

StatusCode CNNNetworkNGraphImpl::getOVNameForTensor(std::string& ov_name, const std::string& orig_name, ResponseDesc* resp) const noexcept {
if (_tensorNames.find(orig_name) == _tensorNames.end())
return DescriptionBuffer(NOT_FOUND, resp) << "Framework tensor with name \"" << orig_name << "\" was not mapped to OpenVINO data!";
ov_name = _tensorNames.at(orig_name);
return OK;
}

StatusCode CNNNetworkNGraphImpl::getOVNameForOperation(std::string& ov_name, const std::string& orig_name, ResponseDesc* resp) const noexcept {
if (_opNames.find(orig_name) == _opNames.end())
return DescriptionBuffer(NOT_FOUND, resp) << "Framework operation with name \"" << orig_name << "\" was not mapped to OpenVINO data!";
ov_name = _opNames.at(orig_name);
return OK;
}

StatusCode CNNNetworkNGraphImpl::setBatchSize(size_t size, ResponseDesc* responseDesc) noexcept {
try {
if (getBatchSize() == size) return OK;
Expand Down
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
// Copyright (C) 2018-2020 Intel Corporation
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//

Expand All @@ -11,6 +11,7 @@

#include <algorithm>
#include <functional>
#include <unordered_map>
#include <map>
#include <memory>
#include <string>
Expand Down Expand Up @@ -81,6 +82,10 @@ class INFERENCE_ENGINE_API_CLASS(CNNNetworkNGraphImpl): public ICNNNetwork {
StatusCode serialize(const std::string& xmlPath, const std::string& binPath, ResponseDesc* resp) const
noexcept override;

StatusCode getOVNameForTensor(std::string& ov_name, const std::string& orig_name, ResponseDesc* resp) const noexcept override;

StatusCode getOVNameForOperation(std::string& ov_name, const std::string& orig_name, ResponseDesc* resp) const noexcept override;

// used by convertFunctionToICNNNetwork from legacy library
std::map<std::string, DataPtr> _data;
protected:
Expand All @@ -91,6 +96,8 @@ class INFERENCE_ENGINE_API_CLASS(CNNNetworkNGraphImpl): public ICNNNetwork {
InferenceEngine::InputsDataMap _inputData;
std::map<std::string, DataPtr> _outputData;
const std::vector<IExtensionPtr> _ie_extensions;
std::unordered_map<std::string, std::string> _opNames;
std::unordered_map<std::string, std::string> _tensorNames;
ilya-lavrenov marked this conversation as resolved.
Show resolved Hide resolved

/**
* @brief Create DataPtr for nGraph operation
Expand Down
Loading