Skip to content

Commit

Permalink
Deprecate tensor name
Browse files Browse the repository at this point in the history
  • Loading branch information
ilyachur committed Jan 13, 2021
1 parent 9fcdd37 commit 7157714
Show file tree
Hide file tree
Showing 14 changed files with 62 additions and 26 deletions.
2 changes: 2 additions & 0 deletions inference-engine/src/cldnn_engine/ops/result.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,9 @@ void CreateResultOp(Program& p, const std::shared_ptr<ngraph::op::v0::Result>& o
p.ValidateInputs(op, {1});

auto prev = op->get_input_node_shared_ptr(0);
NGRAPH_SUPPRESS_DEPRECATED_START
auto inputID = op->get_input_source_output(0).get_tensor().get_name();
NGRAPH_SUPPRESS_DEPRECATED_END
if (inputID.empty()) {
inputID = prev->get_friendly_name();
if (prev->get_output_size() > 1) {
Expand Down
2 changes: 2 additions & 0 deletions inference-engine/src/cldnn_engine/ops/split.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@ void CreateCommonSplitOp(Program& p, const std::shared_ptr<ngraph::Node>& op) {
for (size_t i = 0; i < op->get_output_size(); i++) {
std::string outLayerName = layerName + (is_single_out_split ? "" : "." + std::to_string(i));
const auto outLayerDims = op->get_output_shape(i);
NGRAPH_SUPPRESS_DEPRECATED_START
if (outLayerDims.size() != startOffset.size()) {
THROW_IE_EXCEPTION << "Invalid dimesions in split layer: " << op->get_friendly_name()
<< " output: " << op->get_output_tensor_name(i);
Expand All @@ -34,6 +35,7 @@ void CreateCommonSplitOp(Program& p, const std::shared_ptr<ngraph::Node>& op) {
<< " output: " << op->get_output_tensor_name(i);
}
}
NGRAPH_SUPPRESS_DEPRECATED_END

auto outTensor = CldnnTensorFromIEDims(outLayerDims, 1);
auto offsetTensor = CldnnTensorFromIEDims(startOffset, 0);
Expand Down
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
// Copyright (C) 2018-2020 Intel Corporation
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//

Expand Down Expand Up @@ -349,7 +349,7 @@ class CNNLayerCreator : public ::ngraph::AttributeVisitor {
void on_adapter(const std::string& name, ::ngraph::ValueAccessor<void*>& adapter) override {
if (std::string(node->get_type_name()) != "Constant") {
const auto data_beg = static_cast<char*>(adapter.get_ptr());
params[name] = std::string(data_beg, adapter.size());
params[name] = std::string(data_beg, adapter.size());
}
}

Expand Down Expand Up @@ -661,7 +661,7 @@ InferenceEngine::details::CNNLayerCreator::CNNLayerCreator(const std::shared_ptr
}
res->params["kernel"] = kernel_value;

const auto weightsNode = node->input_value(1).get_node_shared_ptr();
const auto weightsNode = node->input_value(1).get_node_shared_ptr();
if (InferenceEngine::details::addBlob(weightsNode, res, InferenceEngine::details::weights)) {
if (node->inputs().size() == 3) {
const auto biasNode = node->input_value(2).get_node_shared_ptr();
Expand Down Expand Up @@ -750,7 +750,7 @@ InferenceEngine::details::CNNLayerCreator::CNNLayerCreator(const std::shared_ptr

const auto biasNode = node->input_value(3).get_node_shared_ptr();
InferenceEngine::details::addBlob(biasNode, res, InferenceEngine::details::biases);

return res;
});

Expand Down Expand Up @@ -1315,12 +1315,12 @@ InferenceEngine::details::CNNLayerCreator::CNNLayerCreator(const std::shared_ptr
return res;
});

addSpecificCreator({"NormalizeIE"}, [](const std::shared_ptr<::ngraph::Node> &node,
addSpecificCreator({"NormalizeIE"}, [](const std::shared_ptr<::ngraph::Node> &node,
const std::map<std::string, std::string> &params) -> CNNLayerPtr {
LayerParams attrs = {node->get_friendly_name(), "Normalize",
details::convertPrecision(node->get_output_element_type(0))};
auto res = std::make_shared<InferenceEngine::NormLayer>(attrs);

res->params = params;
res->params["channel_shared"] = res->getBoolStrParamAsIntStr("channel_shared");
res->params["across_spatial"] = res->getBoolStrParamAsIntStr("across_spatial");
Expand Down Expand Up @@ -1430,7 +1430,7 @@ InferenceEngine::details::CNNLayerCreator::CNNLayerCreator(const std::shared_ptr
THROW_IE_EXCEPTION << "Interp do not support mode '" << interp_attrs.mode << "'";
}

bool align_corners;
bool align_corners;
auto res = std::make_shared<InferenceEngine::CNNLayer>(attrs);
res->params = params;

Expand Down Expand Up @@ -1496,7 +1496,7 @@ InferenceEngine::details::CNNLayerCreator::CNNLayerCreator(const std::shared_ptr
res->params.erase("auto_pad");
}

const auto weightsNode = node->input_value(1).get_node_shared_ptr();
const auto weightsNode = node->input_value(1).get_node_shared_ptr();
if (!keep_constants && InferenceEngine::details::addBlob(weightsNode, res, InferenceEngine::details::weights)) {
if (node->inputs().size() == 3) {
const auto biasNode = node->input_value(2).get_node_shared_ptr();
Expand Down Expand Up @@ -1847,7 +1847,9 @@ void convertFunctionToICNNNetwork(const std::shared_ptr<const ::ngraph::Function
cnnLayer->outData.clear();
continue;
}
NGRAPH_SUPPRESS_DEPRECATED_START
auto outName = layer->output(i).get_tensor().get_name();
NGRAPH_SUPPRESS_DEPRECATED_END
if (outName.empty()) {
outName = ngraph::op::util::create_ie_output_name(layer->output(i));
}
Expand Down Expand Up @@ -1901,7 +1903,9 @@ void convertFunctionToICNNNetwork(const std::shared_ptr<const ::ngraph::Function
if (std::dynamic_pointer_cast<::ngraph::op::Result>(layer)) {
IE_ASSERT(layer->get_input_size() == 1);
const auto &input = layer->input_value(0);
NGRAPH_SUPPRESS_DEPRECATED_START
auto name = input.get_tensor().get_name();
NGRAPH_SUPPRESS_DEPRECATED_END
if (!name.empty())
cnnNetworkImpl->addOutput(name);
else
Expand Down
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
// Copyright (C) 2020 Intel Corporation
// Copyright (C) 2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//

Expand Down Expand Up @@ -129,8 +129,10 @@ bool ngraph::pass::UnrollTensorIterator::run_on_function(std::shared_ptr<ngraph:
copy_runtime_info(ti, concat);

// set output name to Tensor to store it for ngraph to cnn conversion
NGRAPH_SUPPRESS_DEPRECATED_START
concat->output(0).get_tensor().set_name(
op::util::create_ie_output_name(ti->output(concat_desc->m_output_index)));
NGRAPH_SUPPRESS_DEPRECATED_END
// connect the Concat layer to the corresponding TI outputs
for (auto &input : ti->output(concat_desc->m_output_index).get_target_inputs()) {
input.replace_source_output(concat);
Expand All @@ -140,7 +142,9 @@ bool ngraph::pass::UnrollTensorIterator::run_on_function(std::shared_ptr<ngraph:
std::shared_ptr<opset4::Result> result = body_functions[0]->get_results().at(concat_desc->m_body_value_index);
const auto& input_to_res = result->get_input_source_output(0);
// set output name to Tensor to store it for ngraph to cnn conversion
NGRAPH_SUPPRESS_DEPRECATED_START
input_to_res.get_tensor().set_name(op::util::create_ie_output_name(ti->output(concat_desc->m_output_index)));
NGRAPH_SUPPRESS_DEPRECATED_END
for (auto &input : ti->output(concat_desc->m_output_index).get_target_inputs()) {
input.replace_source_output(input_to_res);
}
Expand All @@ -153,7 +157,9 @@ bool ngraph::pass::UnrollTensorIterator::run_on_function(std::shared_ptr<ngraph:
const auto& in_value = result->input_value(0);

// set output name to Tensor to store it for ngraph to cnn conversion
NGRAPH_SUPPRESS_DEPRECATED_START
in_value.get_tensor().set_name(op::util::create_ie_output_name(ti->output(output_desc->m_output_index)));
NGRAPH_SUPPRESS_DEPRECATED_END
for (const auto &input : ti->output(output_desc->m_output_index).get_target_inputs()) {
input.replace_source_output(result->get_input_source_output(0));
}
Expand Down
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
// Copyright (C) 2020 Intel Corporation
// Copyright (C) 2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//

Expand Down Expand Up @@ -178,7 +178,9 @@ ngraph::pass::ConvertTensorIteratorToLSTMSequence::ConvertTensorIteratorToLSTMSe
for (const auto &input : ti->output(ordered_out_descs[i]->m_output_index).get_target_inputs()) {
input.replace_source_output(outputs[i]->output(0));
}
NGRAPH_SUPPRESS_DEPRECATED_START
outputs[i]->get_output_tensor(0).set_name(op::util::create_ie_output_name(ti->output(ordered_out_descs[i]->m_output_index)));
NGRAPH_SUPPRESS_DEPRECATED_END
}
}

Expand Down Expand Up @@ -331,7 +333,9 @@ ngraph::pass::ConvertTensorIteratorToRNNSequence::ConvertTensorIteratorToRNNSequ
for (const auto &input : ti->output(ordered_out_descs[i]->m_output_index).get_target_inputs()) {
input.replace_source_output(outputs[i]->output(0));
}
NGRAPH_SUPPRESS_DEPRECATED_START
outputs[i]->get_output_tensor(0).set_name(op::util::create_ie_output_name(ti->output(ordered_out_descs[i]->m_output_index)));
NGRAPH_SUPPRESS_DEPRECATED_END
}
}

Expand Down Expand Up @@ -485,7 +489,9 @@ ngraph::pass::ConvertTensorIteratorToGRUSequence::ConvertTensorIteratorToGRUSequ
for (const auto &input : ti->output(ordered_out_descs[i]->m_output_index).get_target_inputs()) {
input.replace_source_output(outputs[i]->output(0));
}
NGRAPH_SUPPRESS_DEPRECATED_START
outputs[i]->get_output_tensor(0).set_name(op::util::create_ie_output_name(ti->output(ordered_out_descs[i]->m_output_index)));
NGRAPH_SUPPRESS_DEPRECATED_END
}
}

Expand All @@ -500,4 +506,4 @@ ngraph::pass::ConvertTensorIteratorToGRUSequence::ConvertTensorIteratorToGRUSequ

auto m = std::make_shared<ngraph::pattern::Matcher>(tensor_iterator, "ConvertTensorIteratorToGRUSequence");
register_matcher(m, callback);
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -33,11 +33,8 @@ void TensorNamesTest::SetUp() {
relu->output(0).set_names({"relu_t", "identity"});
const ngraph::ResultVector results{std::make_shared<ngraph::opset3::Result>(relu)};
results[0]->set_friendly_name("out");
results[0]->get_output_tensor(0).set_name("out_t");
ngraph::ParameterVector params{parameter};
function = std::make_shared<ngraph::Function>(results, params, "TensorNames");
}

} // namespace SubgraphTestsDefinitions


2 changes: 2 additions & 0 deletions ngraph/core/include/ngraph/descriptor/tensor.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,9 @@ namespace ngraph
Node* node,
size_t node_output_number);

NGRAPH_DEPRECATED("Only output ports have names")
const std::string& get_name() const;
NGRAPH_DEPRECATED("Only output ports have names")
void set_name(const std::string& name);
void set_tensor_type(const element::Type& element_type, const PartialShape& pshape);
void set_element_type(const element::Type& elemenet_type);
Expand Down
2 changes: 2 additions & 0 deletions ngraph/core/include/ngraph/node.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -327,6 +327,7 @@ namespace ngraph
descriptor::Tensor& get_input_tensor(size_t i) const;

/// Returns the tensor name for output i
NGRAPH_DEPRECATED("Tensor names were deprecated. Please use output names instead.")
const std::string& get_output_tensor_name(size_t i) const;

std::set<Input<Node>> get_output_target_inputs(size_t i) const;
Expand All @@ -347,6 +348,7 @@ namespace ngraph
const PartialShape& get_input_partial_shape(size_t i) const;

/// Returns the tensor name for input i
NGRAPH_DEPRECATED("Tensor names were deprecated.")
const std::string& get_input_tensor_name(size_t i) const;

std::unordered_set<descriptor::Tensor*> liveness_new_list;
Expand Down
1 change: 1 addition & 0 deletions ngraph/core/include/ngraph/runtime/tensor.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -63,6 +63,7 @@ namespace ngraph

/// \brief Get tensor's unique name
/// \return tensor's name
NGRAPH_DEPRECATED("Only output ports have names")
const std::string& get_name() const;

/// \brief Get the stale value of the tensor. A tensor is stale if its data is
Expand Down
14 changes: 9 additions & 5 deletions ngraph/core/src/descriptor/tensor.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -42,11 +42,6 @@ descriptor::Tensor::Tensor(const element::Type& element_type,
{
}

void descriptor::Tensor::set_name(const string& name)
{
m_name = name;
}

void descriptor::Tensor::set_tensor_type(const element::Type& element_type,
const PartialShape& pshape)
{
Expand Down Expand Up @@ -90,6 +85,13 @@ size_t descriptor::Tensor::size() const
return shape_size(get_shape()) * m_element_type.size();
}

NGRAPH_SUPPRESS_DEPRECATED_START

void descriptor::Tensor::set_name(const string& name)
{
m_name = name;
}

const std::string& descriptor::Tensor::get_name() const
{
return m_name;
Expand All @@ -100,3 +102,5 @@ ostream& operator<<(ostream& out, const descriptor::Tensor& tensor)
out << "Tensor(" << tensor.get_name() << ")";
return out;
}

NGRAPH_SUPPRESS_DEPRECATED_END
2 changes: 2 additions & 0 deletions ngraph/core/src/graph_util.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -924,7 +924,9 @@ bool ngraph::replace_output_update_name(Output<Node> output, const Output<Node>&
{
replacement.get_node()->set_friendly_name(output.get_node()->get_friendly_name());
// Update output tensor name
NGRAPH_SUPPRESS_DEPRECATED_START
replacement.get_tensor().set_name(output.get_node()->get_friendly_name());
NGRAPH_SUPPRESS_DEPRECATED_END
}
output.replace(replacement);
copy_runtime_info({replacement.get_node_shared_ptr(), output.get_node_shared_ptr()},
Expand Down
16 changes: 9 additions & 7 deletions ngraph/core/src/node.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -662,13 +662,6 @@ descriptor::Tensor& Node::get_input_tensor(size_t i) const
return input.get_tensor();
}

const string& Node::get_output_tensor_name(size_t i) const
{
NGRAPH_CHECK(
i < m_outputs.size(), "index '", i, "' out of range in get_output_tensor_name(size_t i)");
return m_outputs[i].get_tensor().get_name();
}

size_t Node::get_input_size() const
{
return m_inputs.size();
Expand All @@ -694,13 +687,22 @@ const PartialShape& Node::get_input_partial_shape(size_t i) const
return m_inputs[i].get_partial_shape();
}

NGRAPH_SUPPRESS_DEPRECATED_START
const string& Node::get_input_tensor_name(size_t i) const
{
NGRAPH_CHECK(
i < m_inputs.size(), "index '", i, "' out of range in get_input_tensor_name(size_t i)");
return m_inputs[i].get_tensor().get_name();
}

const string& Node::get_output_tensor_name(size_t i) const
{
NGRAPH_CHECK(
i < m_outputs.size(), "index '", i, "' out of range in get_output_tensor_name(size_t i)");
return m_outputs[i].get_tensor().get_name();
}
NGRAPH_SUPPRESS_DEPRECATED_END

bool Node::has_same_type(std::shared_ptr<const Node> node) const
{
if (get_output_size() != node->get_output_size())
Expand Down
4 changes: 4 additions & 0 deletions ngraph/core/src/runtime/host_tensor.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -66,10 +66,12 @@ runtime::HostTensor::HostTensor(const std::string& name)
{
}

NGRAPH_SUPPRESS_DEPRECATED_START
runtime::HostTensor::HostTensor(const Output<Node>& value)
: HostTensor(value.get_element_type(), value.get_partial_shape(), value.get_tensor().get_name())
{
}
NGRAPH_SUPPRESS_DEPRECATED_END

void runtime::HostTensor::allocate_buffer()
{
Expand Down Expand Up @@ -102,11 +104,13 @@ void runtime::HostTensor::allocate_buffer()
}
}

NGRAPH_SUPPRESS_DEPRECATED_START
runtime::HostTensor::HostTensor(const std::shared_ptr<op::v0::Constant>& constant)
: HostTensor(constant->output(0).get_tensor().get_name())
{
initialize(constant);
}
NGRAPH_SUPPRESS_DEPRECATED_END

void runtime::HostTensor::initialize(const std::shared_ptr<op::v0::Constant>& constant)
{
Expand Down
2 changes: 2 additions & 0 deletions ngraph/core/src/runtime/tensor.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,9 @@ size_t runtime::Tensor::get_size_in_bytes() const

const std::string& runtime::Tensor::get_name() const
{
NGRAPH_SUPPRESS_DEPRECATED_START
return m_descriptor->get_name();
NGRAPH_SUPPRESS_DEPRECATED_END
}

bool runtime::Tensor::get_stale() const
Expand Down

0 comments on commit 7157714

Please sign in to comment.