Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Moved ngraph::Node to ov namespace #7240

Merged
merged 14 commits into from
Aug 30, 2021
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ static void InsertPadding(ngraph::OutputVector& input_rows_to_concat, size_t siz
input_rows_to_concat.push_back(padding_const);
} else {
auto slice = FlatCrop(padding_const, 0, size);
copy_runtime_info(conv, slice);
ngraph::copy_runtime_info(conv, slice);
input_rows_to_concat.push_back(slice);
}
}
Expand All @@ -77,7 +77,7 @@ static std::shared_ptr<ngraph::Node> CreatePaddedNet(std::shared_ptr<ngraph::ops
// Constant with zero padding
auto const_holding_padding = std::make_shared<ngraph::opset7::Constant>(conv_data.element_type, ngraph::Shape{1, biggest_padding}, 0);

copy_runtime_info(conv, const_holding_padding);
ngraph::copy_runtime_info(conv, const_holding_padding);
std::shared_ptr<ngraph::Node> original_row = flat_input;
ngraph::OutputVector input_rows_to_concat;

Expand All @@ -98,7 +98,7 @@ static std::shared_ptr<ngraph::Node> CreatePaddedNet(std::shared_ptr<ngraph::ops
if (conv_data.input_height > 1)
original_row = FlatCrop(flat_input, h * conv_data.input_width * conv_data.input_channel_count,
conv_data.input_width * conv_data.input_channel_count);
copy_runtime_info(conv, original_row);
ngraph::copy_runtime_info(conv, original_row);

ngraph::OutputVector single_row_concat_inputs;
if (flat_left_padding) {
Expand All @@ -109,11 +109,11 @@ static std::shared_ptr<ngraph::Node> CreatePaddedNet(std::shared_ptr<ngraph::ops
InsertPadding(single_row_concat_inputs, flat_right_padding, conv, const_holding_padding, biggest_padding);
}
auto padded_row_concat = std::make_shared<ngraph::opset7::Concat>(single_row_concat_inputs, 1);
copy_runtime_info(conv, padded_row_concat);
ngraph::copy_runtime_info(conv, padded_row_concat);
input_rows_to_concat.push_back(padded_row_concat);
}
} else {
copy_runtime_info(conv, original_row);
ngraph::copy_runtime_info(conv, original_row);
input_rows_to_concat.push_back(original_row);
}

Expand All @@ -123,7 +123,7 @@ static std::shared_ptr<ngraph::Node> CreatePaddedNet(std::shared_ptr<ngraph::ops
}

auto padded_input_plane = std::make_shared<ngraph::opset7::Concat>(input_rows_to_concat, 1);
copy_runtime_info(conv, padded_input_plane);
ngraph::copy_runtime_info(conv, padded_input_plane);
return padded_input_plane;
}

Expand Down Expand Up @@ -160,7 +160,7 @@ static void GeneratePadding(std::shared_ptr<ngraph::opset7::Transpose> leading_t
conv->get_dilations(),
ngraph::op::PadType::EXPLICIT);

replace_node(conv, conv_copy);
ngraph::replace_node(conv, conv_copy);
}

static bool Convert(std::shared_ptr<ngraph::Node> leading_transpose,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -182,7 +182,7 @@ static ngraph::OutputVector SplitInput(const GraphData& graph_data, ConvData& co
auto padded_input_plane = std::make_shared<ngraph::opset7::Reshape>(graph_data.leading_transpose->input_value(0),
ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{2},
ngraph::Shape{1, shape_size(graph_data.leading_transpose->input_value(0).get_shape())}), false);
copy_runtime_info(graph_data.conv, padded_input_plane);
ngraph::copy_runtime_info(graph_data.conv, padded_input_plane);

if (graph_data.conv_count > 1) {
// If we have split input plane and convolutions due to GNA limitation - we must sum their results at the end
Expand Down Expand Up @@ -219,7 +219,7 @@ static std::vector<std::shared_ptr<ngraph::Node>> SplitFilters(const GraphData&
h_1_filters = Split2DConvFilters(filter_values, vertical_permute, horizontal_permute, graph_data.conv_count);

for (auto filter : h_1_filters)
copy_runtime_info(graph_data.conv, filter);
ngraph::copy_runtime_info(graph_data.conv, filter);

return h_1_filters;
}
Expand Down Expand Up @@ -247,13 +247,13 @@ static void TransformInput(const GraphData& graph_data, const ConvData& conv_dat
offset = (filter_height * conv_data.filter_dilation_height + output_height * conv_data.filter_stride_height) *
conv_data.input_width * conv_data.input_channel_count;
auto slice = FlatCrop(split_input_plane, offset, conv_data.input_width * conv_data.input_channel_count);
copy_runtime_info(graph_data.conv, slice);
ngraph::copy_runtime_info(graph_data.conv, slice);
dilated_input_planes.push_back(slice);
}
} else {
offset = filter_height * conv_data.filter_dilation_height * conv_data.input_width * conv_data.input_channel_count;
auto slice = FlatCrop(split_input_plane, offset, conv_data.input_width * conv_data.input_channel_count * conv_data.output_height);
copy_runtime_info(graph_data.conv, slice);
ngraph::copy_runtime_info(graph_data.conv, slice);
dilated_input_planes.push_back(slice);
}
}
Expand All @@ -276,7 +276,7 @@ static void TransformInput(const GraphData& graph_data, const ConvData& conv_dat
ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{2},
{(size_t)1, conv_data.input_width * conv_data.input_channel_count * conv_data.output_height * conv_data.filter_height}), false);

copy_runtime_info(graph_data.conv, {dilated_chunks_concat, flattened_dilated_transposed_input, transposed_dilated_chunks });
ngraph::copy_runtime_info(graph_data.conv, {dilated_chunks_concat, flattened_dilated_transposed_input, transposed_dilated_chunks });
split_input_plane = flattened_dilated_transposed_input;
}

Expand Down Expand Up @@ -311,7 +311,7 @@ static std::shared_ptr<ngraph::Node> Create1DConv(const GraphData& graph_data, c
std::shared_ptr<ngraph::Node> last_conv_block_op = conv;
if (graph_data.bias_const && conv_index == 0) {
last_conv_block_op = std::make_shared<ngraph::opset7::Add>(conv, graph_data.bias_const);
copy_runtime_info(graph_data.conv, last_conv_block_op);
ngraph::copy_runtime_info(graph_data.conv, last_conv_block_op);
InsertFQLayer(graph_data.fq_bias, last_conv_block_op);
}

Expand All @@ -325,14 +325,14 @@ static std::shared_ptr<ngraph::Node> Create1DConv(const GraphData& graph_data, c
// Activation function & fake quantize
if (graph_data.af && graph_data.conv_count == 1) {
last_conv_block_op = graph_data.af->copy_with_new_inputs({last_conv_block_op});
copy_runtime_info(conv, last_conv_block_op);
ngraph::copy_runtime_info(conv, last_conv_block_op);
InsertFQLayer(graph_data.fq_af, last_conv_block_op);
}

// Transpose NCHW => NHWC
auto nhwc_output = std::make_shared<ngraph::opset7::Transpose>(last_conv_block_op,
ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{4}, {0, 2, 3, 1})->output(0));
copy_runtime_info(graph_data.conv, {nchw_input, conv, nhwc_output});
ngraph::copy_runtime_info(graph_data.conv, {nchw_input, conv, nhwc_output});
return nhwc_output;
}

Expand Down Expand Up @@ -380,7 +380,7 @@ static std::shared_ptr<ngraph::Node> CreateDecomposedConv(const GraphData& graph
for (size_t filter_width = 0; filter_width < conv_data.filter_width; filter_width++) {
size_t offset = filter_width * conv_data.filter_dilation_width * h_1_filter_channel_count;
auto slice = FlatCrop(row, offset, h_1_filter_channel_count * output_width);
copy_runtime_info(graph_data.conv, slice);
ngraph::copy_runtime_info(graph_data.conv, slice);
dilated_chunks.push_back(slice);
}

Expand All @@ -394,7 +394,7 @@ static std::shared_ptr<ngraph::Node> CreateDecomposedConv(const GraphData& graph
ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{4},
ngraph::Shape{1, 1, output_width, h_1_filter_channel_count * conv_data.filter_width}), false);

copy_runtime_info(graph_data.conv, ngraph::NodeVector{flattened_dilated_conv_input, transposed_dilated_chunks, dilated_chunks_concat});
ngraph::copy_runtime_info(graph_data.conv, ngraph::NodeVector{flattened_dilated_conv_input, transposed_dilated_chunks, dilated_chunks_concat});

nhwc_conv_y_input = flattened_dilated_conv_input;
} else {
Expand All @@ -417,7 +417,7 @@ static std::shared_ptr<ngraph::Node> CreateDecomposedConv(const GraphData& graph
// Concat in horizontal dimension
// In NHWC index of H is 1
auto concatenated_sub_results = std::make_shared<ngraph::opset7::Concat>(result_chunks, 1);
copy_runtime_info(graph_data.conv, concatenated_sub_results);
ngraph::copy_runtime_info(graph_data.conv, concatenated_sub_results);
last_op = concatenated_sub_results;
}
return last_op;
Expand Down Expand Up @@ -447,7 +447,7 @@ static void Decompose(const GraphData& graph_data, ConvData& conv_data) {
std::shared_ptr<ngraph::Node> conv_result = partial_conv_results.front();
for (size_t i = 1; i < partial_conv_results.size(); i++) {
auto add_result = std::make_shared<ngraph::opset7::Add>(partial_conv_results[i], conv_result);
copy_runtime_info(graph_data.conv, add_result);
ngraph::copy_runtime_info(graph_data.conv, add_result);
conv_result = add_result;
}

Expand All @@ -458,12 +458,12 @@ static void Decompose(const GraphData& graph_data, ConvData& conv_data) {
// Activation function after trailing Transpose NCHW->NHWC
if (graph_data.af && graph_data.conv_count > 1) {
auto af_result = graph_data.af->copy_with_new_inputs({conv_result});
copy_runtime_info(graph_data.conv, af_result);
ngraph::copy_runtime_info(graph_data.conv, af_result);
conv_result = af_result;
}
// We need to put the same name as before for the Convolution layer, so its output can be used as network result
std::string conv_result_name = graph_data.last_op_in_sequence_for_replacement->get_friendly_name();
replace_node(graph_data.last_op_in_sequence_for_replacement, conv_result);
ngraph::replace_node(graph_data.last_op_in_sequence_for_replacement, conv_result);
conv_result->set_friendly_name(conv_result_name);
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@
#include "ie_common.h"
#include "ie_data.h"
#include <legacy/ie_layers_property.hpp>
#include <ngraph/node.hpp>

#if defined IMPLEMENT_INFERENCE_ENGINE_API || defined IMPLEMENT_INFERENCE_ENGINE_PLUGIN
# define INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(...) INFERENCE_ENGINE_API_CLASS(__VA_ARGS__)
Expand All @@ -31,12 +32,6 @@
INFERENCE_ENGINE_API_CLASS(__VA_ARGS__)
#endif

namespace ngraph {

class Node;

} // namespace ngraph

namespace InferenceEngine {

/**
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -91,7 +91,7 @@ ngraph::matcher_pass_callback ConvertReduceBase::convert_reduce_to_pooling() {

// If axes are empty we just remove Reduction operation
if (axes_vector.empty()) {
return replace_output_update_name(reduce->output(0), input);
return ngraph::replace_output_update_name(reduce->output(0), input);
}

auto input_shape = input.get_shape();
Expand All @@ -104,8 +104,8 @@ ngraph::matcher_pass_callback ConvertReduceBase::convert_reduce_to_pooling() {
ngraph::opset1::Constant::create(ngraph::element::i64, ngraph::Shape{reshape_shape.size()}, reshape_shape), true);

reshape->set_friendly_name(reduce->get_friendly_name());
copy_runtime_info(reduce, reshape);
replace_node(reduce, reshape);
ngraph::copy_runtime_info(reduce, reshape);
ngraph::replace_node(reduce, reshape);
return true;
}

Expand Down Expand Up @@ -260,7 +260,7 @@ ngraph::matcher_pass_callback ConvertReduceBase::convert_reduce_to_pooling() {
new_ops.push_back(input.get_node_shared_ptr());
}
input.get_node_shared_ptr()->set_friendly_name(reduce->get_friendly_name());
copy_runtime_info(reduce, new_ops);
ngraph::copy_runtime_info(reduce, new_ops);
reduce->output(0).replace(input);
return true;
};
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -22,9 +22,9 @@ std::pair<ngraph::Output<ngraph::Node>, ngraph::Output<ngraph::Node>>
ngraph::Output<ngraph::Node> multiplier) {
const auto mul_out_low = std::make_shared<ngraph::opset4::Multiply>(out_low, multiplier);
const auto mul_out_high = std::make_shared<ngraph::opset4::Multiply>(out_high, multiplier);
copy_runtime_info({out_low.get_node_shared_ptr(), multiplier.get_node_shared_ptr()},
ngraph::copy_runtime_info({out_low.get_node_shared_ptr(), multiplier.get_node_shared_ptr()},
mul_out_low);
copy_runtime_info({out_high.get_node_shared_ptr(), multiplier.get_node_shared_ptr()},
ngraph::copy_runtime_info({out_high.get_node_shared_ptr(), multiplier.get_node_shared_ptr()},
mul_out_high);

ngraph::OutputVector new_out_low(1), new_out_high(1);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ static void insert_pooling(const ngraph::Output<ngraph::Node>& first, ngraph::In
auto ones = ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{diff}, std::vector<int64_t>(diff, 1));
auto current_shape = std::make_shared<ngraph::opset7::ShapeOf>(first);
std::shared_ptr<ngraph::Node> new_shape = std::make_shared<ngraph::opset7::Concat>(ngraph::OutputVector{ones, current_shape}, 0);
std::shared_ptr<ngraph::Node> constant_new_shape = get_constant_from_source(new_shape);
std::shared_ptr<ngraph::Node> constant_new_shape = ngraph::get_constant_from_source(new_shape);
if (constant_new_shape)
new_shape = constant_new_shape;
first_node = std::make_shared<ngraph::opset7::Reshape>(first_node, new_shape, false);
Expand All @@ -64,7 +64,7 @@ static void insert_pooling(const ngraph::Output<ngraph::Node>& first, ngraph::In
new_node = std::make_shared<ngraph::opset7::Squeeze>(new_node,
ngraph::opset7::Constant::create(ngraph::element::u64, ngraph::Shape{diff}, axes));
}
std::shared_ptr<ngraph::Node> constant_new_node = get_constant_from_source(new_node);
std::shared_ptr<ngraph::Node> constant_new_node = ngraph::get_constant_from_source(new_node);
if (constant_new_node)
new_node = constant_new_node;
second.replace_source_output(new_node);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -42,8 +42,8 @@ bool relax_hc_reshape_followed_by_matmul(const ngraph::pattern::PatternValueMap

auto reshape_pattern = pattern_to_output.at(reshape_pattern_label).get_node_shared_ptr();
new_reshape_pattern->set_friendly_name(reshape_pattern->get_friendly_name());
copy_runtime_info(reshape_pattern, new_reshape_pattern);
replace_node(reshape_pattern, new_reshape_pattern);
ngraph::copy_runtime_info(reshape_pattern, new_reshape_pattern);
ngraph::replace_node(reshape_pattern, new_reshape_pattern);
return true;
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ MergeSubsequentDSROperations::MergeSubsequentDSROperations() {
auto newDsr = dsr->copy_with_new_inputs({predecessor->input_value(0), dsr->input_value(1)});
newDsr->set_friendly_name(dsr->get_friendly_name());
// replace DSR2 with new so DSR2 will lose all consumers so it will die after pass execution
replace_node(dsr, newDsr);
ngraph::replace_node(dsr, newDsr);
// reconnect all DSR1 consumers even with DSR2 which will be destructed so this is no more an issue
for (auto &consumer : predecessor->get_output_target_inputs(0)) {
consumer.replace_source_output(newDsr);
Expand Down
5 changes: 1 addition & 4 deletions ngraph/core/include/ngraph/attribute_adapter.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -21,10 +21,7 @@ using ov::DirectValueAccessor;

using ov::IndirectScalarValueAccessor;

template <typename A, typename B>
A copy_from(B& b) {
return ov::copy_from<A>(b);
}
using ov::copy_from;

using ov::IndirectVectorValueAccessor;

Expand Down
3 changes: 1 addition & 2 deletions ngraph/core/include/ngraph/descriptor/input.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -12,8 +12,7 @@
#include "openvino/core/descriptor/input.hpp"

namespace ngraph {
class Node;

using ov::Node;
namespace descriptor {

// Describes a tensor that is an input to an op, directly or indirectly via a tuple
Expand Down
8 changes: 1 addition & 7 deletions ngraph/core/include/ngraph/descriptor/output.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -17,13 +17,7 @@
#include "openvino/core/descriptor/output.hpp"

namespace ngraph {
// The forward declaration of Node is needed here because Node has a deque of
// Outputs, and Output is an incomplete type at this point. STL containers of
// incomplete type have undefined behavior according to the C++11 standard, and
// in practice including node.hpp here was causing compilation errors on some
// systems (namely macOS).
class Node;

using ov::Node;
namespace descriptor {
// Describes an output tensor of an op
using ov::descriptor::Output;
Expand Down
Loading