diff --git a/docs/img/OV-diagram-step1.png b/docs/img/OV-diagram-step1.png index d1ff39f1aaa1ea..da3212d0713bdb 100644 Binary files a/docs/img/OV-diagram-step1.png and b/docs/img/OV-diagram-step1.png differ diff --git a/docs/img/OV-diagram-step4.png b/docs/img/OV-diagram-step4.png index 75fe645a313e32..7df9835e8e86e9 100644 Binary files a/docs/img/OV-diagram-step4.png and b/docs/img/OV-diagram-step4.png differ diff --git a/inference-engine/src/gna_plugin/backend/make_pwl.cpp b/inference-engine/src/gna_plugin/backend/make_pwl.cpp index cd9ff0852e30f4..e0f71bc7fc7ec8 100644 --- a/inference-engine/src/gna_plugin/backend/make_pwl.cpp +++ b/inference-engine/src/gna_plugin/backend/make_pwl.cpp @@ -279,19 +279,20 @@ void make_gna_pwl(const DnnActivation fun, gnalog() << "=========================== LeakyReLU Segments ======================\n"; int32_t x_lower = INT32_MIN; int32_t x_upper = INT32_MAX; - int16_t y_lower = y_min; + int32_t y_lower = y_min; int16_t y_upper = y_max; if (fun.fqParams.set) { x_lower = std::max(FLOAT_TO_INT64(*fun.fqParams.input_low * 1.25 * in_scale), static_cast(x_lower)); x_upper = std::min(FLOAT_TO_INT64(*fun.fqParams.input_high * 1.25 * in_scale), static_cast(x_upper)); - y_lower = std::max(FLOAT_TO_INT32(*fun.fqParams.input_low * 1.25 * out_scale), static_cast(y_lower)); + // y_lower can be reduced with negative slope + y_lower = *fun.fqParams.input_low * 1.25 * out_scale; y_upper = std::min(FLOAT_TO_INT32(*fun.fqParams.input_high * 1.25 * out_scale), static_cast(y_upper)); } else { if (x_lower < y_lower * in_scale / out_scale) x_lower = FLOAT_TO_INT32(y_lower * in_scale / out_scale); if (y_lower < x_lower * out_scale / in_scale) y_lower = FLOAT_TO_INT16(x_lower * out_scale / in_scale); } - gna_pwl[0].yBase = y_lower * fun.args.lrelu.negative_slope; + gna_pwl[0].yBase = std::max(FLOAT_TO_INT32(y_lower * fun.args.lrelu.negative_slope), static_cast(y_min)); s = gna_slope(fun.args.lrelu.negative_slope, in_scale, out_scale); gna_pwl[0].xBase = (x_lower & XBASEMASK) | s.slope_scale_index; // zero out the 2 lsb gna_pwl[0].slope = FLOAT_TO_INT16(s.slope * s.slope_scale); diff --git a/inference-engine/src/gna_plugin/frontend/scale_factor_calc.hpp b/inference-engine/src/gna_plugin/frontend/scale_factor_calc.hpp index 11f13a7a9acad7..f63784810369bb 100644 --- a/inference-engine/src/gna_plugin/frontend/scale_factor_calc.hpp +++ b/inference-engine/src/gna_plugin/frontend/scale_factor_calc.hpp @@ -978,7 +978,7 @@ class ScaleFactorPerLayer { gnalog() << "[UFS] from : " << concatLayer->name << " reached: " << layer->name; // found that direct input to concat is a indirect parent of align filter - so no link required auto info = LayerInfo(layer); - if (!info.isWeightable() && !info.isActivation() && !info.isConst()) { + if (!info.isWeightable() && !info.isActivation() && !info.isConst() && !info.isMemory()) { gnalog() << "... skipped\n"; return; } @@ -1030,8 +1030,8 @@ class ScaleFactorPerLayer { } quantDataForConCatInput->_dst_quant.SetScale(newScaleFactor); - } else if (restarLayerInfo.isConst()) { - gnalog() << "... warning const layer will be requantized\n"; + } else if (restarLayerInfo.isConst() || restarLayerInfo.isMemory()) { + gnalog() << "... warning " << restartedLayer->type << " layer will be requantized\n"; quantDataForConCatInput->_src_quant.SetScale(sourceQuantParams->_dst_quant.GetScale()); quantDataForConCatInput->_dst_quant.SetScale(sourceQuantParams->_dst_quant.GetScale()); } else { diff --git a/inference-engine/src/gna_plugin/gna_graph_compiler.cpp b/inference-engine/src/gna_plugin/gna_graph_compiler.cpp index e48595d6c272f3..9bc866dad5fcb4 100644 --- a/inference-engine/src/gna_plugin/gna_graph_compiler.cpp +++ b/inference-engine/src/gna_plugin/gna_graph_compiler.cpp @@ -145,12 +145,15 @@ void GNAGraphCompiler::fillSplitConnections(InferenceEngine::CNNLayerPtr layer) size_t output_layer_size = 0; for (int j = 0; j != getInputTo(layer->outData[i]).size(); j++) { - auto outFunctionalLayer = CNNNetGetNextLayerSkipCertain(layer, i, j, [](CNNLayerPtr l) { + auto outFunctionalLayer = CNNNetCheckNextLayerSkipCertain(layer, i, j, true, [](CNNLayerPtr l) { return LayerInfo(l).isNonFunctional(); }); if (!outFunctionalLayer.first) { - THROW_GNA_LAYER_EXCEPTION(layer) << " outData["<< i << "]" << " connected by " << j <<" connection doesnt connect to functional layer"; + output_layer_size = + InferenceEngine::details::product(begin(layer->outData[i]->getDims()), + end(layer->outData[i]->getDims())) * layer->outData[i]->getPrecision().size(); + continue; } for (int idx : outFunctionalLayer.second) { diff --git a/inference-engine/src/gna_plugin/gna_plugin.cpp b/inference-engine/src/gna_plugin/gna_plugin.cpp index d6944f0c621fd4..ba33b92dfeed2f 100644 --- a/inference-engine/src/gna_plugin/gna_plugin.cpp +++ b/inference-engine/src/gna_plugin/gna_plugin.cpp @@ -58,11 +58,11 @@ #include "transformations/remove_extra_reshapes.hpp" #include "transformations/insert_transpose_after_convolution_or_pooling.hpp" -#include "transformations/insert_transpose_before_matmul.hpp" #include "transformations/reorder_activation_and_pooling.hpp" #include "transformations/swap_input_matmul_gna.hpp" #include "transformations/convert_matmul_to_pointwise_convolution.hpp" #include "transformations/split_convolution_with_large_buffer_size.hpp" +#include "transformations/handle_transposes_around_matmul.hpp" #include @@ -687,7 +687,7 @@ void GNAPlugin::LoadNetwork(CNNNetwork & _network) { manager.register_pass(); manager.register_pass(); manager.register_pass(); - manager.register_pass(); + manager.register_pass(); manager.register_pass(); manager.register_pass(); manager.register_pass(); diff --git a/inference-engine/src/gna_plugin/optimizer/gna_pass_manager.cpp b/inference-engine/src/gna_plugin/optimizer/gna_pass_manager.cpp index f16645ae6cb2ad..de9a7a12c27f1d 100644 --- a/inference-engine/src/gna_plugin/optimizer/gna_pass_manager.cpp +++ b/inference-engine/src/gna_plugin/optimizer/gna_pass_manager.cpp @@ -2118,8 +2118,11 @@ void MoveFakeQuantizeLayerIntoQuantParamsPass :: run() { THROW_GNA_LAYER_EXCEPTION(fqLayer) << "Zero levels"; } - // Before FQ layer is removed, the previous layer has to be updated with its quantization data - auto quantParamsPrevLayer = InferenceEngine::getInjectedData(prevLayer); + // Before FQ layer is removed, the previous functional layer has to be updated with its quantization data + auto prevFuncLayer = CNNNetPrevLayerSkipCertain(*fqLayer, 0, [](CNNLayerPtr layer) { + return LayerInfo(layer).isNonFunctional(); + }); + auto quantParamsPrevLayer = InferenceEngine::getInjectedData(prevFuncLayer); quantParamsPrevLayer->_dst_quant.SetLevels(fqLevels); quantParamsPrevLayer->_dst_quant.SetMinValues({ inputRange.first[0] }, true); quantParamsPrevLayer->_dst_quant.SetMaxValues({ inputRange.second[0] }, true); diff --git a/inference-engine/src/gna_plugin/transformations/handle_transposes_around_matmul.cpp b/inference-engine/src/gna_plugin/transformations/handle_transposes_around_matmul.cpp new file mode 100644 index 00000000000000..6b0cad24ec2e31 --- /dev/null +++ b/inference-engine/src/gna_plugin/transformations/handle_transposes_around_matmul.cpp @@ -0,0 +1,125 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "transformations/handle_transposes_around_matmul.hpp" + +#include + +#include +#include +#include +#include + +#include "gna_plugin_log.hpp" + +using namespace GNAPluginNS; + +NGRAPH_RTTI_DEFINITION(HandleTransposesAroundMatMul, "HandleTransposesAroundMatMul", 0); +NGRAPH_RTTI_DEFINITION(HandleTransposeBeforeMatMul, "HandleTransposeBeforeMatMul", 0); +NGRAPH_RTTI_DEFINITION(HandleTransposeAfterMatMul, "HandleTransposeAfterMatMul", 0); + +static void ReplaceTransposeWithReshape(std::shared_ptr transpose_node) { + auto shape = transpose_node->get_output_shape(0); + auto reshape_const = std::make_shared(ngraph::element::Type_t::i64, + ngraph::Shape{shape.size()}, shape); + auto reshape_node = std::make_shared(transpose_node->input_value(0), reshape_const, false); + reshape_node->set_friendly_name(transpose_node->get_friendly_name() + "/reshape"); + ngraph::copy_runtime_info(transpose_node, reshape_node); + transpose_node->output(0).replace(reshape_node->output(0)); +} + +static void InsertTranspose(std::shared_ptr prev_node, const std::string& base_name) { + auto consumers = prev_node->output(0).get_target_inputs(); + const auto orig_shape = prev_node->get_output_shape(0); + std::vector transpose_ids; + for (size_t i = 0; i < orig_shape.size(); ++i) { + if (orig_shape[i] > 1) { + transpose_ids.push_back(i); + } + } + IE_ASSERT(transpose_ids.size() == 2); + std::vector permute_order(orig_shape.size()); + std::iota(std::begin(permute_order), std::end(permute_order), 0); + std::swap(permute_order[transpose_ids[0]], permute_order[transpose_ids[1]]); + + auto transpose_order = ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{permute_order.size()}, permute_order); + auto transpose = std::make_shared(prev_node, transpose_order); + transpose->set_friendly_name(base_name + "/in_transpose"); + + auto reshapeConstAfter = std::make_shared(ngraph::element::Type_t::i64, + ngraph::Shape{orig_shape.size()}, orig_shape); + auto reshapeAfter = std::make_shared(transpose, reshapeConstAfter, false); + reshapeAfter->set_friendly_name(base_name + "/reshape_after_transpose"); + ngraph::copy_runtime_info(prev_node, ngraph::NodeVector{transpose, reshapeAfter}); + + for (auto input : consumers) { + input.replace_source_output(reshapeAfter); + } +} + +static bool IsTransposeSupported(const ngraph::Shape& shape) { + auto shape_no_1 = shape; + shape_no_1.erase(std::remove(shape_no_1.begin(), shape_no_1.end(), 1), shape_no_1.end()); + if (shape_no_1.size() != 2) return false; + size_t min, max; + std::tie(min, max) = std::minmax(shape_no_1[0], shape_no_1[1]); + return min <= 8 && max % 8 == 0; +} + +HandleTransposeBeforeMatMul::HandleTransposeBeforeMatMul() { + auto reshape = ngraph::pattern::wrap_type({ngraph::pattern::any_input(), + ngraph::pattern::any_input()}, VerifyReshape()); + auto transpose = ngraph::pattern::wrap_type({reshape, + ngraph::pattern::any_input()}); + auto matmul_input = std::make_shared(ngraph::OutputVector{reshape, transpose}); + auto matmul1 = ngraph::pattern::wrap_type({matmul_input, ngraph::pattern::any_input()}); + auto matmul2 = ngraph::pattern::wrap_type({ngraph::pattern::any_input(), matmul_input}); + auto matmul = std::make_shared(ngraph::OutputVector{matmul1, matmul2}); + + ngraph::matcher_pass_callback callback = [=](ngraph::pattern::Matcher &m) { + const auto& pattern_map = m.get_pattern_value_map(); + auto transpose_it = pattern_map.find(transpose); + if (transpose_it != std::end(pattern_map)) { + ReplaceTransposeWithReshape(transpose_it->second.get_node_shared_ptr()); + } else { + auto reshape_node = pattern_map.at(reshape).get_node_shared_ptr(); + if (!IsTransposeSupported(reshape_node->get_output_shape(0))) return false; + auto matmul_it = pattern_map.find(matmul1); + auto matmul_out = matmul_it != std::end(pattern_map) ? matmul_it->second : pattern_map.at(matmul2); + InsertTranspose(reshape_node, matmul_out.get_node_shared_ptr()->get_friendly_name()); + } + return true; + }; + + auto m = std::make_shared(matmul, "HandleTransposeBeforeMatMul"); + this->register_matcher(m, callback); +} + +HandleTransposeAfterMatMul::HandleTransposeAfterMatMul() { + auto matmul = ngraph::pattern::wrap_type(); + auto fq = ngraph::pattern::wrap_type({matmul, ngraph::pattern::any_input(), + ngraph::pattern::any_input(), ngraph::pattern::any_input(), ngraph::pattern::any_input()}); + auto transpose_input = std::make_shared(ngraph::OutputVector{matmul, fq}); + auto transpose = ngraph::pattern::wrap_type({transpose_input, ngraph::pattern::any_input()}); + auto reshape_input = std::make_shared(ngraph::OutputVector{transpose_input, transpose}); + auto reshape = ngraph::pattern::wrap_type({reshape_input, + ngraph::pattern::any_input()}, VerifyReshape()); + + ngraph::matcher_pass_callback callback = [=](ngraph::pattern::Matcher &m) { + const auto& pattern_map = m.get_pattern_value_map(); + auto transpose_it = pattern_map.find(transpose); + if (transpose_it != std::end(pattern_map)) { + ReplaceTransposeWithReshape(transpose_it->second.get_node_shared_ptr()); + } else { + auto reshape_node = pattern_map.at(reshape).get_node_shared_ptr(); + if (!IsTransposeSupported(reshape_node->get_input_shape(0))) return false; + auto matmul_node = pattern_map.at(matmul).get_node_shared_ptr(); + InsertTranspose(matmul_node, matmul_node->get_friendly_name()); + } + return true; + }; + + auto m = std::make_shared(reshape, "HandleTransposeAfterMatMul"); + this->register_matcher(m, callback); +} \ No newline at end of file diff --git a/inference-engine/src/gna_plugin/transformations/handle_transposes_around_matmul.hpp b/inference-engine/src/gna_plugin/transformations/handle_transposes_around_matmul.hpp new file mode 100644 index 00000000000000..005b1dce14adec --- /dev/null +++ b/inference-engine/src/gna_plugin/transformations/handle_transposes_around_matmul.hpp @@ -0,0 +1,74 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include + +namespace GNAPluginNS { + +struct VerifyReshape { + bool operator()(const ngraph::Output& reshape_out) const { + auto in_shape = reshape_out.get_node_shared_ptr()->get_input_shape(0); + auto out_shape = reshape_out.get_node_shared_ptr()->get_output_shape(0); + + // Check if Reshape changes the final 2d shape of Affine primitive + in_shape.erase(std::remove(in_shape.begin(), in_shape.end(), 1), in_shape.end()); + out_shape.erase(std::remove(out_shape.begin(), out_shape.end(), 1), out_shape.end()); + return in_shape != out_shape; + } +}; + +/** + * @brief Inserts Transpose before MatMul or removes it (if it exists) if there is Reshape + * before MatMul which changes the batch size: + * [1, A*B] [1, A*B] + * | | + * Reshape Reshape + * | | + * [1, A, 1, B] [1, A, 1, B] + * | | + * | Transpose + * | -> | + * | <- [1, B, 1, A] + * | | + * MatMul MatMul + */ +class HandleTransposeBeforeMatMul : public ngraph::pass::MatcherPass { +public: + NGRAPH_RTTI_DECLARATION; + HandleTransposeBeforeMatMul(); +}; + +/** + * @brief Inserts Transpose after MatMul or removes it (if it exists) if there is Reshape + * after MatMul which changes the batch size: + * MatMul MatMul + * | | + * [1, A, 1, B] [1, A, 1, B] + * | | + * | Transpose + * | -> | + * | <- [1, B, 1, A] + * | | + * Reshape Reshape + * | | + * [1, A*B] [1, A*B] + */ +class HandleTransposeAfterMatMul : public ngraph::pass::MatcherPass { +public: + NGRAPH_RTTI_DECLARATION; + HandleTransposeAfterMatMul(); +}; + +class HandleTransposesAroundMatMul: public ngraph::pass::GraphRewrite { +public: + NGRAPH_RTTI_DECLARATION; + HandleTransposesAroundMatMul() { + add_matcher(); + add_matcher(); + } +}; + +} // namespace GNAPluginNS \ No newline at end of file diff --git a/inference-engine/src/gna_plugin/transformations/insert_transpose_before_matmul.cpp b/inference-engine/src/gna_plugin/transformations/insert_transpose_before_matmul.cpp deleted file mode 100644 index 3e5c579af8f14a..00000000000000 --- a/inference-engine/src/gna_plugin/transformations/insert_transpose_before_matmul.cpp +++ /dev/null @@ -1,67 +0,0 @@ -// Copyright (C) 2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include - -#include "transformations/insert_transpose_before_matmul.hpp" - -#include -#include -#include - -using namespace GNAPluginNS; - -NGRAPH_RTTI_DEFINITION(InsertTransposeBeforeMatmul, "InsertTransposeBeforeMatmul", 0); - -InsertTransposeBeforeMatmul::InsertTransposeBeforeMatmul() { - MATCHER_SCOPE(InsertTransposeBeforeMatmul); - auto reshape = ngraph::pattern::wrap_type({ngraph::pattern::any_input(), - ngraph::pattern::any_input()}, - ngraph::pattern::rank_equals(2)); - auto matmul1 = ngraph::pattern::wrap_type({ngraph::pattern::any_input(), reshape}); - auto matmul2 = ngraph::pattern::wrap_type({reshape, ngraph::pattern::any_input()}); - auto root = std::make_shared(ngraph::OutputVector{matmul1, matmul2}); - - ngraph::matcher_pass_callback callback = [=](ngraph::pattern::Matcher &m) { - auto& pattern_map = m.get_pattern_value_map(); - auto reshape_node = pattern_map.at(reshape).get_node_shared_ptr(); - auto reshape_in_shape = reshape_node->get_input_shape(0); - auto reshape_out_shape = reshape_node->get_output_shape(0); - if (reshape_in_shape.front() == reshape_out_shape.front()) { - return false; - } - - if (reshape_out_shape[0] == 1 || reshape_out_shape[1] == 1) { - return false; - } - - size_t min, max; - std::tie(min, max) = std::minmax(reshape_out_shape[0], reshape_out_shape[1]); - if (min > 8 || max % 8 != 0) return false; - - auto consumers = reshape_node->output(0).get_target_inputs(); - auto matmul_node = consumers.begin()->get_node()->shared_from_this(); - - auto transpose_order = ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{2}, std::vector{1, 0}); - auto transpose = register_new_node(reshape_node, transpose_order); - transpose->set_friendly_name(matmul_node->get_friendly_name() + "/in_transpose"); - - auto transpose_out_shape = transpose->output(0).get_shape(); - std::swap(transpose_out_shape[0], transpose_out_shape[1]); - auto reshapeConstAfter = std::make_shared(ngraph::element::Type_t::i64, - ngraph::Shape{2}, - transpose_out_shape); - auto reshapeAfter = std::make_shared(transpose, reshapeConstAfter, false); - reshapeAfter->set_friendly_name(matmul_node->get_friendly_name() + "/reshape_after_transpose"); - - for (auto input : consumers) { - input.replace_source_output(reshapeAfter); - } - - return true; - }; - - auto m = std::make_shared(root, matcher_name); - this->register_matcher(m, callback); -} diff --git a/inference-engine/src/gna_plugin/transformations/insert_transpose_before_matmul.hpp b/inference-engine/src/gna_plugin/transformations/insert_transpose_before_matmul.hpp deleted file mode 100644 index 943bb905f04169..00000000000000 --- a/inference-engine/src/gna_plugin/transformations/insert_transpose_before_matmul.hpp +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright (C) 2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include - -namespace GNAPluginNS { - -/** - * @brief Inserts Transpose before MatMul in the following topology: - * [1, A] - * | - * Reshape - * | - * [B, C], - * 1 < B <= 8, C % 8 == 0 or - * B % 8 == 0, 1 < C <= 8 - * | Const - * \ / - * Matmul - */ -class InsertTransposeBeforeMatmul : public ngraph::pass::MatcherPass { -public: - NGRAPH_RTTI_DECLARATION; - InsertTransposeBeforeMatmul(); -}; - -} // namespace GNAPluginNS \ No newline at end of file diff --git a/inference-engine/tests/functional/plugin/gna/CMakeLists.txt b/inference-engine/tests/functional/plugin/gna/CMakeLists.txt index 168780ca8346e4..2d86efcc770b09 100644 --- a/inference-engine/tests/functional/plugin/gna/CMakeLists.txt +++ b/inference-engine/tests/functional/plugin/gna/CMakeLists.txt @@ -7,10 +7,13 @@ set(TARGET_NAME gnaFuncTests) addIeTargetTest( NAME ${TARGET_NAME} ROOT ${CMAKE_CURRENT_SOURCE_DIR} + INCLUDES + ${IE_MAIN_SOURCE_DIR}/src/gna_plugin/transformations DEPENDENCIES GNAPlugin LINK_LIBRARIES funcSharedTests + GNAPlugin_test_static ADD_CPPLINT LABELS GNA diff --git a/inference-engine/tests/functional/plugin/gna/shared_tests_instances/subgraph_tests/relu_split_reshape.cpp b/inference-engine/tests/functional/plugin/gna/shared_tests_instances/subgraph_tests/relu_split_reshape.cpp new file mode 100644 index 00000000000000..73d36467ccdfda --- /dev/null +++ b/inference-engine/tests/functional/plugin/gna/shared_tests_instances/subgraph_tests/relu_split_reshape.cpp @@ -0,0 +1,45 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include +#include "subgraph_tests/relu_split_reshape.hpp" +#include "common_test_utils/test_constants.hpp" +#include "gna/gna_config.hpp" + +using namespace SubgraphTestsDefinitions; + +namespace { +std::vector> inputShape = { + {1, 1, 64}, + {1, 1, 128} +}; + +std::vector splitAxis = {2}; + +std::vector splitNum = {2}; + +std::vector netPrecisions = { + InferenceEngine::Precision::FP32, + InferenceEngine::Precision::FP16, +}; + +std::vector> additional_config = { + { + {"GNA_DEVICE_MODE", "GNA_SW_FP32"} + }, + { + {"GNA_DEVICE_MODE", "GNA_SW_EXACT"} + } +}; + +INSTANTIATE_TEST_CASE_P(smoke_relu_split_reshape, ReluSplitReshape, + ::testing::Combine( + ::testing::ValuesIn(inputShape), + ::testing::ValuesIn(splitAxis), + ::testing::ValuesIn(splitNum), + ::testing::ValuesIn(netPrecisions), + ::testing::Values(CommonTestUtils::DEVICE_GNA), + ::testing::ValuesIn(additional_config)), + ReluSplitReshape::getTestCaseName); +} // namespace diff --git a/inference-engine/tests/functional/plugin/shared/include/subgraph_tests/relu_split_reshape.hpp b/inference-engine/tests/functional/plugin/shared/include/subgraph_tests/relu_split_reshape.hpp new file mode 100644 index 00000000000000..73e50306c22155 --- /dev/null +++ b/inference-engine/tests/functional/plugin/shared/include/subgraph_tests/relu_split_reshape.hpp @@ -0,0 +1,15 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "shared_test_classes/subgraph/relu_split_reshape.hpp" + +namespace SubgraphTestsDefinitions { + +TEST_P(ReluSplitReshape, CompareWithRefs) { + Run(); +}; + +} // namespace SubgraphTestsDefinitions diff --git a/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/relu_split_reshape.hpp b/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/relu_split_reshape.hpp new file mode 100644 index 00000000000000..b6ca2e38b56b16 --- /dev/null +++ b/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/relu_split_reshape.hpp @@ -0,0 +1,34 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include +#include +#include +#include +#include "shared_test_classes/base/layer_test_utils.hpp" +#include "ngraph_functions/builders.hpp" +#include "common_test_utils/test_constants.hpp" + +namespace SubgraphTestsDefinitions { + +typedef std::tuple< + std::vector, // Input shape + size_t, // Split axis + size_t, // Split number + InferenceEngine::Precision, // Network precision + std::string, // Device name + std::map // Configuration +> ReluSplitReshapeTuple; + +class ReluSplitReshape: + public testing::WithParamInterface, + public LayerTestsUtils::LayerTestsCommon { +public: + static std::string getTestCaseName(const testing::TestParamInfo &obj); +protected: + void SetUp() override; +}; +} // namespace SubgraphTestsDefinitions diff --git a/inference-engine/tests/functional/shared_test_classes/src/subgraph/relu_split_reshape.cpp b/inference-engine/tests/functional/shared_test_classes/src/subgraph/relu_split_reshape.cpp new file mode 100644 index 00000000000000..38261ac6c90f73 --- /dev/null +++ b/inference-engine/tests/functional/shared_test_classes/src/subgraph/relu_split_reshape.cpp @@ -0,0 +1,50 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "shared_test_classes/subgraph/relu_split_reshape.hpp" + +namespace SubgraphTestsDefinitions { +std::string ReluSplitReshape::getTestCaseName(const testing::TestParamInfo &obj) { + std::vector inputShape; + size_t splitAxis, splitNum; + InferenceEngine::Precision netPrecision; + std::string targetName; + std::map config; + std::tie(inputShape, splitAxis, splitNum, netPrecision, targetName, config) = obj.param; + std::ostringstream results; + + results << "IS=" << CommonTestUtils::vec2str(inputShape) << "_"; + results << "axis=" << splitAxis << "_"; + results << "num=" << splitNum << "_"; + results << "netPRC=" << netPrecision.name() << "_"; + results << "targetDevice=" << targetName << "_"; + for (auto const& configItem : config) { + results << "_configItem=" << configItem.first << "_" << configItem.second; + } + return results.str(); +} + +void ReluSplitReshape::SetUp() { + std::vector inputShape; + size_t splitAxis, splitNum; + InferenceEngine::Precision netPrecision; + std::map additional_config; + std::tie(inputShape, splitAxis, splitNum, netPrecision, targetDevice, additional_config) = this->GetParam(); + configuration.insert(additional_config.begin(), additional_config.end()); + auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); + + auto params = ngraph::builder::makeParams(ngPrc, {inputShape}); + auto relu = std::make_shared(params[0]); + auto split = ngraph::builder::makeSplit(relu, ngPrc, splitNum, splitAxis); + + auto shape = split->get_output_shape(0); + shape[shape.size() - 2] *= 2; + shape[shape.size() - 1] /= 2; + auto reshape_const = std::make_shared(ngraph::element::Type_t::i64, + ngraph::Shape{shape.size()}, shape); + auto reshape = std::make_shared(split->output(0), reshape_const, false); + + function = std::make_shared(reshape, params, "ReluSplitReshape"); +} +} // namespace SubgraphTestsDefinitions