Skip to content

Commit

Permalink
Resolved problems with ssd_resnet34_mlperf_opset10 (#3487)
Browse files Browse the repository at this point in the history
* Resolved problems with ssd_resnet34_1200

* removed debug code

* Added correct handling onnx nodes from parent graph scope

* removed unnecessary include

* fixed calcution index to replace

* fixed LoopParentParametersUsedInBody test

* added set_friendly_name

* apply Unsqueeze for each concatenated Loop output

* added handling trip count with value max_int

* merge from upstream/master

* update xfail list

* added checking is trip_count is constant
  • Loading branch information
Mateusz Bencer authored Dec 21, 2020
1 parent c6bfac6 commit 0b05653
Show file tree
Hide file tree
Showing 18 changed files with 1,152 additions and 84 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -256,4 +256,134 @@ TEST(SmartReshapeTests, LoopDynamicParameters) {
// concat output
ASSERT_TRUE(network.getFunction()->get_results()[2]->get_output_partial_shape(0).compatible({32, 10, 10}));
ASSERT_TRUE(network.getFunction()->get_results()[3]->get_output_partial_shape(0).compatible({32, 1, 1}));
}
}

TEST(SmartReshapeTests, LoopParentParametersUsedInBody) {
std::shared_ptr<ngraph::Function> f(nullptr);
{
// That which we iterate over
auto X = std::make_shared<opset5::Parameter>(element::f32, PartialShape::dynamic());
auto Y = std::make_shared<opset5::Parameter>(element::f32, PartialShape::dynamic());
auto add_Y = std::make_shared<opset5::Add>(Y,
std::make_shared<ngraph::opset5::Constant>(ngraph::element::f32, ngraph::Shape{}, std::vector<float>{0.f}));
auto M = std::make_shared<opset5::Parameter>(element::f32, PartialShape::dynamic());
X->set_friendly_name("X");
Y->set_friendly_name("Y");
M->set_friendly_name("M");

// Set up the cell body, a function from (Xi, add_Y) -> (Zo)
// Body parameters
auto current_iteration = std::make_shared<opset5::Parameter>(element::i64, Shape{});
auto Xi = std::make_shared<opset5::Parameter>(element::f32, PartialShape::dynamic());
auto Yi = std::make_shared<opset5::Parameter>(element::f32, PartialShape::dynamic());
auto M_body = std::make_shared<opset5::Parameter>(element::f32, PartialShape::dynamic());
auto body_condition =
std::make_shared<ngraph::opset5::Constant>(ngraph::element::boolean, ngraph::Shape{}, true);

auto trip_count =
std::make_shared<ngraph::opset5::Constant>(ngraph::element::i64, ngraph::Shape{}, 10);
auto exec_condition =
std::make_shared<ngraph::opset5::Constant>(ngraph::element::boolean, ngraph::Shape{}, true);
// Body
auto sum = std::make_shared<ngraph::opset5::Add>(Xi, Yi);
auto Zo = std::make_shared<ngraph::opset5::Multiply>(sum, M_body);
auto body = std::make_shared<ngraph::Function>(OutputVector{Zo, body_condition, sum},
ParameterVector{Xi, current_iteration, Yi, M_body});

auto loop = std::make_shared<opset5::Loop>(trip_count, exec_condition);
loop->set_function(body);
loop->set_special_body_ports(ngraph::opset5::Loop::SpecialBodyPorts{1, 1});

loop->set_sliced_input(Xi, X, 0, 1, 1, -1, 2);
loop->set_merged_input(M_body, M, Zo);
// Set invariant input which uses parameter from parent graph
loop->set_invariant_input(Yi, add_Y);

// Output 0 is last Zo
auto out0 = loop->get_iter_value(body_condition, -1);
auto out1 = loop->get_iter_value(Zo, -1);
// Output 1 is concat of Zos
// start=0, stride=1, part_size=1, end=-1, axis=1
auto out2 = loop->get_concatenated_slices(Zo, 0, 1, 1, -1, 1);
auto out3 = loop->get_iter_value(sum, -1);

f = std::make_shared<Function>(OutputVector{out0, out1, out2, out3}, ParameterVector{X, Y, M});
}

InferenceEngine::CNNNetwork network(f);
ASSERT_TRUE(network.getFunction()->get_results()[0]->get_output_partial_shape(0).compatible({}));
ASSERT_TRUE(network.getFunction()->get_results()[1]->get_output_partial_shape(0).compatible(PartialShape::dynamic()));
// concat output
ASSERT_TRUE(network.getFunction()->get_results()[2]->get_output_partial_shape(0).compatible(PartialShape::dynamic()));
ASSERT_TRUE(network.getFunction()->get_results()[3]->get_output_partial_shape(0).compatible(PartialShape::dynamic()));

ASSERT_NO_THROW(network.reshape({{"X", {4, 3, 2}}, {"Y", {4, 3, 2}}, {"M", {4, 3, 2}}}));

ASSERT_TRUE(network.getFunction()->get_results()[0]->get_output_partial_shape(0).compatible({}));
ASSERT_TRUE(network.getFunction()->get_results()[1]->get_output_partial_shape(0).compatible({4, 3, 2}));
// concat output
ASSERT_TRUE(network.getFunction()->get_results()[2]->get_output_partial_shape(0).compatible({4, 30, 2}));
ASSERT_TRUE(network.getFunction()->get_results()[3]->get_output_partial_shape(0).compatible({4, 3, 2}));
}

TEST(SmartReshapeTests, TensorIteratorParentParameterUsedInBody) {
std::shared_ptr<ngraph::Function> f(nullptr);
{
// That which we iterate over
auto X = std::make_shared<opset5::Parameter>(element::f32, Shape{1, 1, 1});
auto Y = std::make_shared<opset5::Parameter>(element::f32, Shape{1, 1, 1});
auto add_Y = std::make_shared<opset5::Add>(Y,
std::make_shared<ngraph::opset5::Constant>(ngraph::element::f32, ngraph::Shape{}, std::vector<float>{0.f}));
auto M = std::make_shared<opset5::Parameter>(element::f32, Shape{1, 1, 1});
X->set_friendly_name("X");
Y->set_friendly_name("Y");
M->set_friendly_name("M");

// Set up the cell body, a function from (Xi, add_Y) -> (Zo)
// Body parameters
auto Xi = std::make_shared<opset5::Parameter>(element::f32, PartialShape::dynamic());
auto Yi = std::make_shared<opset5::Parameter>(element::f32, PartialShape::dynamic());
auto M_body = std::make_shared<opset5::Parameter>(element::f32, PartialShape::dynamic());
auto body_condition =
std::make_shared<ngraph::opset5::Constant>(ngraph::element::boolean, ngraph::Shape{}, true);

// Body
auto sum = std::make_shared<ngraph::opset5::Add>(Xi, Yi);
auto Zo = std::make_shared<ngraph::opset5::Multiply>(sum, M_body);
auto body = std::make_shared<ngraph::Function>(OutputVector{Zo, body_condition, sum},
ParameterVector{Xi, Yi, M_body});

auto tensor_iterator = std::make_shared<opset5::TensorIterator>();
tensor_iterator->set_function(body);

tensor_iterator->set_sliced_input(Xi, X, 0, 1, 1, -1, 2);
tensor_iterator->set_merged_input(M_body, M, Zo);
// Set invariant input which uses parameter from parent graph
tensor_iterator->set_invariant_input(Yi, add_Y);

// Output 0 is last Zo
auto out0 = tensor_iterator->get_iter_value(body_condition, -1);
auto out1 = tensor_iterator->get_iter_value(Zo, -1);
// Output 1 is concat of Zos
// start=0, stride=1, part_size=1, end=-1, axis=1
auto out2 = tensor_iterator->get_concatenated_slices(Zo, 0, 1, 1, -1, 1);
auto out3 = tensor_iterator->get_iter_value(sum, -1);

f = std::make_shared<Function>(OutputVector{out0, out1, out2, out3}, ParameterVector{X, Y, M});
}

InferenceEngine::CNNNetwork network(f);
ASSERT_TRUE(network.getFunction()->get_results()[0]->get_output_partial_shape(0).compatible({}));
ASSERT_TRUE(network.getFunction()->get_results()[1]->get_output_partial_shape(0).compatible({1, 1, 1}));
// concat output (seq len = 1, so it means num_iter = 1)
ASSERT_TRUE(network.getFunction()->get_results()[2]->get_output_partial_shape(0).compatible({1, 1, 1}));
ASSERT_TRUE(network.getFunction()->get_results()[3]->get_output_partial_shape(0).compatible({1, 1, 1}));

ASSERT_NO_THROW(network.reshape({{"X", {32, 1, 10}}, {"Y", {1, 1, 1}}, {"M", {32, 1, 10}}}));

ASSERT_TRUE(network.getFunction()->get_results()[0]->get_output_partial_shape(0).compatible({}));
ASSERT_TRUE(network.getFunction()->get_results()[1]->get_output_partial_shape(0).compatible({32, 1, 10}));
// concat output
ASSERT_TRUE(network.getFunction()->get_results()[2]->get_output_partial_shape(0).compatible({32, 10, 10}));
ASSERT_TRUE(network.getFunction()->get_results()[3]->get_output_partial_shape(0).compatible({32, 1, 1}));
}
37 changes: 22 additions & 15 deletions ngraph/core/src/op/loop.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -247,15 +247,22 @@ void op::v5::Loop::validate_and_infer_types()
as_type_ptr<TensorIterator::ConcatOutputDescription>(output_description))
{
const auto& body_value_partial_shape = body_value.get_partial_shape();
set_output_type(index, body_value.get_element_type(), PartialShape::dynamic());
if (body_value_partial_shape.is_static())
if (body_value_partial_shape.rank().is_dynamic())
{
set_output_type(index, body_value.get_element_type(), PartialShape::dynamic());
}
else
{
auto body_value_shape = body_value_partial_shape.to_shape();
auto axis = concat_output_description->m_axis;

Shape out_shape{body_value_shape};
NODE_VALIDATION_CHECK(this,
axis < body_value_partial_shape.rank().get_length(),
"Concatenation axis must be less than sliced output rank");

if (body_value_shape.empty())
PartialShape out_shape{body_value_partial_shape};

if (body_value_partial_shape.is_static() &&
ngraph::is_scalar(body_value_partial_shape.to_shape()))
{
NODE_VALIDATION_CHECK(
this,
Expand All @@ -266,23 +273,23 @@ void op::v5::Loop::validate_and_infer_types()
out_shape = Shape(1);
}

if (m_num_iterations != -1)
if (m_num_iterations != -1 && body_value_partial_shape[axis].is_static())
{
out_shape[axis] = m_num_iterations * body_value_shape[axis];
out_shape[axis] =
m_num_iterations * body_value_partial_shape[axis].get_length();
if (zero_number_of_iter)
{
out_shape.at(0) = 0;
out_shape[0] = 0;
}
set_output_type(index, body_value.get_element_type(), out_shape);
}
}
else
{
set_output_type(index,
body_value.get_element_type(),
PartialShape::dynamic(body_value.get_partial_shape().rank()));
else
{
out_shape[axis] = Dimension::dynamic();
}
set_output_type(index, body_value.get_element_type(), out_shape);
}
}

else if (auto body_output_description =
as_type_ptr<TensorIterator::BodyOutputDescription>(output_description))
{
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -67,10 +67,10 @@ namespace ngraph

protected:
ParameterVector m_parameters;
std::unique_ptr<GraphCache> m_cache;

private:
const ONNX_NAMESPACE::GraphProto* m_graph_proto;
std::unique_ptr<GraphCache> m_cache;
std::vector<Node> m_nodes;
std::vector<ValueInfo> m_inputs;
std::vector<ValueInfo> m_outputs;
Expand All @@ -91,6 +91,13 @@ namespace ngraph
Subgraph(const ONNX_NAMESPACE::GraphProto& proto,
Model& model,
const Graph& parent_graph);

/// \brief Return outputs which are on the edge the subgraph and the parent graph.
/// \return Vector of edge nodes from parent scope.
const std::vector<Output<ngraph::Node>> get_outputs_from_parent() const;

private:
std::vector<Output<ngraph::Node>> m_outputs_from_parent;
};

inline std::ostream& operator<<(std::ostream& outs, const Graph& graph)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,17 @@ namespace ngraph
{
namespace onnx_import
{
/// \brief Enum which determines scope (visibility) of nodes in GraphCache.
enum class NodeScope
{
// in parent graph scope
ParentGraph = 1,
// in subgraph scope
SubGraph,
// not available at all
Lack
};

/// \brief GraphCache stores and provides access to ONNX graph initializers.
class GraphCache
{
Expand Down Expand Up @@ -53,6 +64,16 @@ namespace ngraph
/// \return true if the node named `name` exist in the cache, false otherwise.
virtual bool contains(const std::string& name) const;

/// \brief Return NodeScope enum which determines scope of the node.
/// \note If the method is called on GraphCache the ParentGraph enum
/// value is retunred always.
///
/// \param[in] name The name of the node.
///
/// \return SubGraph if node belongs to SubgraphCache, ParentGraph if
/// is avalible in parent_graph_cache, otherwise Lack
virtual NodeScope node_scope(const std::string& name) const;

private:
std::map<std::string, Output<ngraph::Node>> m_graph_cache_map;
};
Expand Down Expand Up @@ -82,6 +103,14 @@ namespace ngraph
/// (subgraph or parent graph), false otherwise.
bool contains(const std::string& name) const override;

/// \brief Return NodeScope enum which determines scope of the node.
///
/// \param[in] name The name of the node.
///
/// \return SubGraph if the node belongs to SubgraphCache, ParentGraph if
/// is avalible in parent_graph_cache, otherwise Lack
NodeScope node_scope(const std::string& name) const override;

private:
const GraphCache* m_parent_graph_cache;
};
Expand Down
55 changes: 32 additions & 23 deletions ngraph/frontend/onnx_import/src/core/graph.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -315,39 +315,48 @@ namespace ngraph
model,
std::unique_ptr<SubgraphCache>(new SubgraphCache(parent_graph.get_graph_cache())))
{
std::vector<std::shared_ptr<ngraph::Node>> subgraph_root_nodes;
const auto& outputs = as_result_vector(get_ng_outputs());
for (auto& out : outputs)
// find all nodes on edge parent graph-subgraph
// (it means input of node from parent graph, output from subgraph)
for (const auto& node_proto : proto.node())
{
subgraph_root_nodes.push_back(out);
}
const auto& params = get_ng_parameters();
for (auto& param : params)
{
subgraph_root_nodes.push_back(param);
}
const auto subgraph_nodes = topological_sort(subgraph_root_nodes);

const auto& parent_graph_parameters = parent_graph.get_ng_parameters();
for (const auto& node : subgraph_nodes)
{
if (op::is_parameter(node))
int input_index = 0;
for (const auto& in_name : node_proto.input())
{
const auto sub_it = std::find(m_parameters.begin(), m_parameters.end(), node);
// not present as subgraph parameter
if (sub_it == m_parameters.end())
if (m_cache->node_scope(in_name) == NodeScope::ParentGraph)
{
const auto parent_it = std::find(
parent_graph_parameters.begin(), parent_graph_parameters.end(), node);
if (parent_it != m_parameters.end())
const auto& from_parent_node = m_cache->get_node(in_name);
// constants are skipped
if (!ngraph::is_type<ngraph::op::Constant>(
from_parent_node.get_node_shared_ptr()))
{
m_parameters.push_back(*parent_it);
for (const auto& out_name : node_proto.output())
{
if (m_cache->node_scope(out_name) == NodeScope::SubGraph)
{
auto out_node_to_replace_input = m_cache->get_node(out_name);
auto new_param = std::make_shared<ngraph::op::Parameter>(
from_parent_node.get_element_type(),
from_parent_node.get_partial_shape());
// replace input from parent scope with parameter
out_node_to_replace_input.get_node()
->input(input_index)
.replace_source_output(new_param);
m_parameters.push_back(new_param);
m_outputs_from_parent.push_back(from_parent_node);
}
}
}
}
++input_index;
}
}
}

const std::vector<Output<ngraph::Node>> Subgraph::get_outputs_from_parent() const
{
return m_outputs_from_parent;
}

} // namespace onnx_import

} // namespace ngraph
21 changes: 21 additions & 0 deletions ngraph/frontend/onnx_import/src/core/graph_cache.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,11 @@ namespace ngraph
return (m_graph_cache_map.count(name) > 0);
}

NodeScope GraphCache::node_scope(const std::string& name) const
{
return contains(name) ? NodeScope::ParentGraph : NodeScope::Lack;
}

SubgraphCache::SubgraphCache(const GraphCache& parent_graph_cache)
: m_parent_graph_cache{&parent_graph_cache}
{
Expand Down Expand Up @@ -71,5 +76,21 @@ namespace ngraph
return GraphCache::contains(name) || m_parent_graph_cache->contains(name);
}

NodeScope SubgraphCache::node_scope(const std::string& name) const
{
if (GraphCache::contains(name))
{
return NodeScope::SubGraph;
}
else if (m_parent_graph_cache->contains(name))
{
return NodeScope::ParentGraph;
}
else
{
return NodeScope::Lack;
}
}

} // namespace onnx_import
} // namespace ngraph
Loading

0 comments on commit 0b05653

Please sign in to comment.