Skip to content

Commit

Permalink
[opset5] ngraph implementation of Loop op (#2583)
Browse files Browse the repository at this point in the history
* Loop op ngraph implementation, update IE IR Reader and ngraph to cnn converter

* refactoring SubGraphOp class

* type prop unit tests

* ngraph code style

* update comment

* single layer tests for Loop operation

* fix file name

* Add SpecialBodyPorts attribute in Loop op, update single layer tests

* add several new tests cases, strict checks in Loop impl, temporary disable single layer tests

* ngraph codestyle, refactoring, clone_new_args test

* resolve review remarks

* fix build

* fix tests

* add a new constructor of Loop op, resolve review remarks
  • Loading branch information
itikhono authored Oct 19, 2020
1 parent cc569d2 commit 84b5fc5
Show file tree
Hide file tree
Showing 22 changed files with 2,488 additions and 728 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,7 @@
#include "caseless.hpp"
#include <debug.h>
#include <ngraph/opsets/opset1.hpp>
#include <ngraph/opsets/opset5.hpp>
#include "transformations/utils/utils.hpp"
#include "transformations/rt_info/fused_names_attribute.hpp"
#include "transformations/rt_info/primitives_priority_attribute.hpp"
Expand Down Expand Up @@ -809,6 +810,7 @@ void convertFunctionToICNNNetwork(const std::shared_ptr<const ::ngraph::Function
std::make_shared<Builder::NodeConverter<::ngraph::op::TopKIE>>(),
std::make_shared<Builder::NodeConverter<::ngraph::op::Unsqueeze>>(),
std::make_shared<Builder::NodeConverter<::ngraph::op::TensorIterator>>(),
std::make_shared<Builder::NodeConverter<::ngraph::opset5::Loop>>(),
std::make_shared<Builder::NodeConverter<::ngraph::op::HardSigmoid_IE>>(),
std::make_shared<Builder::NodeConverter<::ngraph::op::v1::LogicalNot>>(),
std::make_shared<Builder::NodeConverter<::ngraph::op::ShuffleChannels>>(),
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,7 @@
#include <cpp/ie_cnn_network.h>
#include <ngraph/ngraph.hpp>
#include <ngraph/variant.hpp>
#include <ngraph/opsets/opset5.hpp>

#include <legacy/convert_function_to_cnn_network.hpp>
#include "legacy/graph_transformer.h"
Expand Down Expand Up @@ -114,8 +115,7 @@ CNNLayer::Ptr NodeConverter<ngraph::op::GenericIE>::createLayer(const std::share
return res;
}

template <>
CNNLayer::Ptr NodeConverter<ngraph::op::TensorIterator>::createLayer(const std::shared_ptr<ngraph::Node>& layer) const {
CNNLayer::Ptr createSubGraphLayer(const std::shared_ptr<ngraph::Node>& layer) {
auto find_input_idx = [](const CNNLayerPtr& where, const DataPtr& what) {
auto it = std::find_if(where->insData.begin(), where->insData.end(), [&](const DataWeakPtr& wk_ptr) {
auto layer_data = wk_ptr.lock();
Expand All @@ -129,7 +129,7 @@ CNNLayer::Ptr NodeConverter<ngraph::op::TensorIterator>::createLayer(const std::
return it - where->insData.begin();
};

auto tensor_iterator = ngraph::as_type_ptr<ngraph::op::TensorIterator>(layer);
auto tensor_iterator = std::dynamic_pointer_cast<ngraph::op::util::SubGraphOp>(layer);
if (!tensor_iterator) {
THROW_IE_EXCEPTION << "Cannot cast layer to TensorIterator.";
}
Expand All @@ -142,8 +142,8 @@ CNNLayer::Ptr NodeConverter<ngraph::op::TensorIterator>::createLayer(const std::
std::map<std::string, DataPtr> out_info_map;

// inputs/outputs of TensorIterator (ngraph representation)
auto parameters = tensor_iterator->get_body()->get_parameters();
auto results = tensor_iterator->get_body()->get_results();
auto parameters = tensor_iterator->get_function()->get_parameters();
auto results = tensor_iterator->get_function()->get_results();

// Convert body (ngraph representation) to CNNNetwork.
// This network will contain nodes of type = "Input" and data nodes with wrong names.
Expand All @@ -155,7 +155,7 @@ CNNLayer::Ptr NodeConverter<ngraph::op::TensorIterator>::createLayer(const std::
// This map will save information about data nodes
std::map<std::string, std::vector<TensorDesc>> layer_name_to_tensor_desc;
{
CNNNetwork body_net(tensor_iterator->get_body());
CNNNetwork body_net(tensor_iterator->get_function());
CNNNetwork net(InferenceEngine::details::convertFunctionToICNNNetwork(body_net.getFunction(), body_net));
// Paranoid check for cycles
bool res = CNNNetForestDFS(
Expand Down Expand Up @@ -356,6 +356,20 @@ CNNLayer::Ptr NodeConverter<ngraph::op::TensorIterator>::createLayer(const std::
return res;
}

template<>
CNNLayer::Ptr NodeConverter<ngraph::op::TensorIterator>::createLayer(const std::shared_ptr<ngraph::Node>& layer) const {
auto res = createSubGraphLayer(layer);
res->type = "TensorIterator";
return res;
}

template<>
CNNLayer::Ptr NodeConverter<ngraph::opset5::Loop>::createLayer(const std::shared_ptr<ngraph::Node>& layer) const {
auto res = createSubGraphLayer(layer);
res->type = "Loop";
return res;
}

template <>
CNNLayer::Ptr NodeConverter<ngraph::op::Constant>::createLayer(const std::shared_ptr<ngraph::Node>& layer) const {
LayerParams params = {layer->get_friendly_name(), "Const",
Expand Down
38 changes: 30 additions & 8 deletions inference-engine/src/readers/ir_reader/ie_ir_parser.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@
#include <ngraph/opsets/opset.hpp>
#include <ngraph/opsets/opset2.hpp>
#include <ngraph/opsets/opset3.hpp>
#include <ngraph/opsets/opset5.hpp>
#include <ngraph/variant.hpp>

#include <cpp/ie_cnn_network.h>
Expand Down Expand Up @@ -477,6 +478,7 @@ std::shared_ptr<ngraph::Node> V10Parser::createNode(const std::vector<ngraph::Ou
std::make_shared<LayerCreator<ngraph::op::v0::Tile>>("Tile"),
std::make_shared<LayerCreator<ngraph::op::v1::TopK>>("TopK"),
std::make_shared<LayerCreator<ngraph::op::TensorIterator>>("TensorIterator"),
std::make_shared<LayerCreator<ngraph::opset5::Loop>>("Loop"),
std::make_shared<LayerCreator<ngraph::op::Transpose>>("Transpose"),
std::make_shared<LayerCreator<ngraph::op::Unsqueeze>>("Unsqueeze"),
std::make_shared<LayerCreator<ngraph::op::v1::LogicalAnd>>("LogicalAnd"),
Expand Down Expand Up @@ -662,12 +664,12 @@ std::shared_ptr<ngraph::Node> V10Parser::LayerCreator<ngraph::op::DetectionOutpu
}
}

// TensorIterator layer
template <>
std::shared_ptr<ngraph::Node> V10Parser::LayerCreator<ngraph::op::TensorIterator>::createLayer(
const ngraph::OutputVector& inputs, const pugi::xml_node& node, std::istream& binStream,
const GenericLayerParams& layerParsePrms) {
auto tensor_iterator = std::make_shared<ngraph::op::TensorIterator>();
// SubGraph layer
std::shared_ptr<ngraph::Node>
V10Parser::LayerBaseCreator::fillSubGraphLayer(const ngraph::OutputVector &inputs, const pugi::xml_node &node,
std::istream &binStream,
const V10Parser::GenericLayerParams &layerParsePrms,
std::shared_ptr<ngraph::op::util::SubGraphOp> tensor_iterator) {
tensor_iterator->set_friendly_name(GetStrAttr(node, "name"));
auto body_node = node.child("body");

Expand Down Expand Up @@ -695,7 +697,7 @@ std::shared_ptr<ngraph::Node> V10Parser::LayerCreator<ngraph::op::TensorIterator
// Disabled reshape for generic operations in the TI body
::ngraph::op::GenericIE::DisableReshape noReshape(ngraph_function);
auto body = std::make_shared<ngraph::Function>(result_nodes, parameter_nodes);
tensor_iterator->set_body(body);
tensor_iterator->set_function(body);

// Parse PortMap: inputs
std::map<uint64_t, pugi::xml_node> input_map;
Expand Down Expand Up @@ -795,7 +797,8 @@ std::shared_ptr<ngraph::Node> V10Parser::LayerCreator<ngraph::op::TensorIterator
tensor_iterator->get_concatenated_slices(*body_result, start, stride, part_size, end, axis);

if (!is_sliced_input_exists) {
tensor_iterator->set_num_iterations((std::abs(end - start)) / part_size);
if (auto ti = std::dynamic_pointer_cast<ngraph::op::TensorIterator>(tensor_iterator))
ti->set_num_iterations((std::abs(end - start)) / part_size);
}
} else {
// otherwise create ngraph::TensorIterator::BodyOutput. -1 means last iteration.
Expand All @@ -807,6 +810,25 @@ std::shared_ptr<ngraph::Node> V10Parser::LayerCreator<ngraph::op::TensorIterator
return tensor_iterator;
}


// TensorIterator layer
template <>
std::shared_ptr<ngraph::Node> V10Parser::LayerCreator<ngraph::op::TensorIterator>::createLayer(
const ngraph::OutputVector& inputs, const pugi::xml_node& node, std::istream& binStream,
const GenericLayerParams& layerParsePrms) {
auto ti = std::make_shared<ngraph::op::TensorIterator>();
return fillSubGraphLayer(inputs, node, binStream, layerParsePrms, ti);
}

// Loop layer
template <>
std::shared_ptr<ngraph::Node> V10Parser::LayerCreator<ngraph::opset5::Loop>::createLayer(
const ngraph::OutputVector& inputs, const pugi::xml_node& node, std::istream& binStream,
const GenericLayerParams& layerParsePrms) {
auto loop = std::make_shared<ngraph::opset5::Loop>();
return fillSubGraphLayer(inputs, node, binStream, layerParsePrms, loop);
}

// PriorBoxClustered layer
template <>
std::shared_ptr<ngraph::Node> V10Parser::LayerCreator<ngraph::op::PriorBoxClustered>::createLayer(
Expand Down
5 changes: 5 additions & 0 deletions inference-engine/src/readers/ir_reader/ie_ir_parser.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@

#ifdef IR_READER_V10
# include <ngraph/node.hpp>
# include <ngraph/op/util/sub_graph_base.hpp>
# include <legacy/ie_ngraph_utils.hpp>
# include <cpp/ie_cnn_network.h>
#endif // IR_READER_V10
Expand Down Expand Up @@ -102,6 +103,10 @@ class V10Parser : public IParser {
std::string type;

protected:
static std::shared_ptr<ngraph::Node> fillSubGraphLayer(const ngraph::OutputVector& inputs, const pugi::xml_node& node,
std::istream& binStream,
const GenericLayerParams& layerParsePrms,
std::shared_ptr<ngraph::op::util::SubGraphOp> sub_graph_node);
explicit LayerBaseCreator(const std::string& type): type(type) {}
std::string getType() {
return type;
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,34 @@
// Copyright (C) 2019 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//

#include <vector>
#include <ngraph/op/util/attr_types.hpp>
#include "single_layer_tests/loop.hpp"
#include "common_test_utils/test_constants.hpp"

using namespace LayerTestsDefinitions;

namespace {
// without clip values increase rapidly, so use only seq_lenghts = 2
std::vector<bool> execute_first_iteration{true};
std::vector<bool> is_body_condition_const{true, false};
std::vector<bool> body_condition{true, false}; // works only if is_body_condition_const == true
std::vector<int64_t> trip_count{1, 10, -1}; // -1 means infinity
std::vector<std::vector<std::pair<std::vector<size_t>, LOOP_IN_TYPE>>> inputs = {
{{{32, 1, 10}, LOOP_IN_TYPE::INVARIANT}, {{32, 1, 10}, LOOP_IN_TYPE::INVARIANT}, {{32, 1, 10}, LOOP_IN_TYPE::MERGED}},
};
std::vector<InferenceEngine::Precision> netPrecisions = {InferenceEngine::Precision::FP32,
InferenceEngine::Precision::FP16};

INSTANTIATE_TEST_CASE_P(smoke_LoopCommonZeroClip, LoopTest,
::testing::Combine(
::testing::ValuesIn(execute_first_iteration),
::testing::ValuesIn(is_body_condition_const),
::testing::ValuesIn(body_condition),
::testing::ValuesIn(trip_count),
::testing::ValuesIn(inputs),
::testing::ValuesIn(netPrecisions),
::testing::Values(CommonTestUtils::DEVICE_CPU)),
LoopTest::getTestCaseName);
} // namespace
Original file line number Diff line number Diff line change
Expand Up @@ -53,6 +53,9 @@ std::vector<std::string> disabledTestPatterns() {
R"(.*ReverseSequenceLayerTest.*netPRC=(I8|U8).*)",
// TODO: Issue: 38841
R"(.*TopKLayerTest.*k=10.*mode=min.*sort=index.*)",
R"(.*TopKLayerTest.*k=5.*sort=(none|index).*)"
R"(.*TopKLayerTest.*k=5.*sort=(none|index).*)",

// TODO: not supported yet, ticket 37690
R"(.*Loop.*)"
};
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,40 @@
// Copyright (C) 2019 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//

#pragma once

#include <tuple>
#include <string>
#include <vector>
#include <memory>
#include <ngraph/op/util/attr_types.hpp>
#include "functional_test_utils/layer_test_utils.hpp"
#include "ngraph_functions/builders.hpp"
#include "ngraph_functions/utils/ngraph_helpers.hpp"

namespace LayerTestsDefinitions {
enum LOOP_IN_TYPE {
INVARIANT,
MERGED
};

using LoopParams = typename std::tuple<
bool, // ExecuteFirstIteration
bool, // BodyCondition is a constant?
bool, // BodyCondition value, if it is a Const
int64_t, // TripCount, -1 means infinity
std::vector<std::pair<std::vector<size_t>, LOOP_IN_TYPE>>, // inputs
InferenceEngine::Precision, // Network precision
std::string>; // Device name

class LoopTest : public testing::WithParamInterface<LoopParams>,
virtual public LayerTestsUtils::LayerTestsCommon {
public:
static std::string getTestCaseName(const testing::TestParamInfo<LoopParams> &obj);

protected:
void SetUp() override;
};

} // namespace LayerTestsDefinitions
Loading

0 comments on commit 84b5fc5

Please sign in to comment.