diff --git a/inference-engine/src/gna_plugin/gna_plugin.cpp b/inference-engine/src/gna_plugin/gna_plugin.cpp index 48815f964da2dc..0b9a53a7d2f1b3 100644 --- a/inference-engine/src/gna_plugin/gna_plugin.cpp +++ b/inference-engine/src/gna_plugin/gna_plugin.cpp @@ -64,8 +64,9 @@ #include "transformations/convert_matmul_to_pointwise_convolution.hpp" #include "transformations/split_convolution_with_large_buffer_size.hpp" #include "transformations/handle_transposes_around_matmul.hpp" -#include "transformations/decompose_2d_conv.hpp" -#include "transformations/convert_padded2valid_conv.hpp" +#include "transformations/decompose_2d_convolution.hpp" +#include "transformations/convert_padded_to_valid_convolution.hpp" +#include "transformations/convert_dwsc_to_scaleshifts.hpp" #include "transformations/op_conversions/lstm_cell_decomposition.hpp" #include @@ -694,7 +695,9 @@ void GNAPlugin::LoadNetwork(CNNNetwork & _network) { manager.register_pass(); manager.register_pass(); manager.register_pass(); - manager.register_pass(); + manager.register_pass(); + manager.register_pass(); + manager.register_pass(); if (config.gnaCompileTarget == InferenceEngine::GNAConfigParams::GNA_TARGET_2_0) { manager.register_pass(); manager.register_pass(); @@ -721,7 +724,6 @@ void GNAPlugin::LoadNetwork(CNNNetwork & _network) { manager.register_pass(); // UnrollTI should be the last transformation in the transformation pipeline manager.register_pass(); - const auto& pass_config = manager.get_pass_config(); pass_config->set_callback( [](const std::shared_ptr &node) -> bool { diff --git a/inference-engine/src/gna_plugin/transformations/convert_dwsc_to_scaleshifts.cpp b/inference-engine/src/gna_plugin/transformations/convert_dwsc_to_scaleshifts.cpp new file mode 100644 index 00000000000000..9990157b32506c --- /dev/null +++ b/inference-engine/src/gna_plugin/transformations/convert_dwsc_to_scaleshifts.cpp @@ -0,0 +1,209 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include + +#include "transformations/convert_dwsc_to_scaleshifts.hpp" + +#include +#include +#include +#include +#include +#include "utils/transformation_helper.hpp" +//#include "backend/gna_limitations.hpp" + + +using namespace GNAPluginNS; + +NGRAPH_RTTI_DEFINITION(ConvertDWSCToScaleShifts, "ConvertDWSCToScaleShifts", 0); +NGRAPH_RTTI_DEFINITION(ConvertDWSCBiasToScaleShifts, "ConvertDWSCBiasToScaleShifts", 0); + +static bool VerifyDWSC(std::shared_ptr dwsc) { + // Verify it's a 1D convolution + // Verify that filter group count == input channel count + // Verify that per group filter output channel count == 1 + if (dwsc->get_input_shape(1)[3] != 1 || dwsc->get_input_shape(0)[2] != 1 || dwsc->get_output_shape(0)[2] != 1 || + dwsc->get_input_shape(1)[0] != dwsc->get_input_shape(0)[1] || + dwsc->get_input_shape(1)[1] != 1) + return false; + + return true; +} + +static std::shared_ptr DecomposeDWSC(std::shared_ptr dwsc, std::shared_ptr bias_const, + std::shared_ptr flat_input_plane, std::shared_ptr flat_filters_plane) { + std::shared_ptr const_zero_padding; + std::shared_ptr reshaped_bias; + ngraph::OutputVector output_chunks; + auto input_channel_count = dwsc->get_input_shape(0)[1]; + auto input_width = dwsc->get_input_shape(0)[3]; + auto output_width = dwsc->get_output_shape(0)[3]; + auto filter_width = dwsc->get_input_shape(1)[4]; + auto pads_begin = dwsc->get_pads_begin()[1]; + auto stride_width = dwsc->get_strides()[1]; + auto dilation_width = dwsc->get_dilations()[1]; + + // Constant with zero padding + if (pads_begin) { + const_zero_padding = std::make_shared(dwsc->get_element_type(), ngraph::Shape{1, input_channel_count}, 0); + copy_runtime_info(dwsc, const_zero_padding); + } + + // Reshape bias const + if (bias_const) { + auto bias_size = shape_size(bias_const->get_shape()); + reshaped_bias = ngraph::op::util::make_try_fold(bias_const, + ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{2}, ngraph::Shape{1, bias_size}), false); + } + + // Move filter over input performing multiplication and addition (scaleshift), take padding, stride, dilation and bias into account + for (int32_t input_position = -pads_begin, o = 0; o < output_width; input_position += stride_width, o++) { + std::shared_ptr previous_layer_output, last_layer_output; + int32_t filter_end = input_position + filter_width * dilation_width; + bool first = true; + + filter_end = filter_end < input_width ? filter_end : input_width; + + for (int32_t filter_pos = input_position, filter_idx = 0; filter_pos < filter_end; filter_pos += dilation_width, filter_idx++) { + if (filter_pos >= 0) { + auto conv_input_slice = FlatCrop(flat_input_plane, filter_pos * input_channel_count, input_channel_count); + auto conv_filter_slice = FlatCrop(flat_filters_plane, filter_idx * input_channel_count, input_channel_count); + + if (first) { + first = false; + previous_layer_output = std::make_shared(conv_input_slice, conv_filter_slice); + copy_runtime_info(dwsc, previous_layer_output); + if (bias_const) { + previous_layer_output = std::make_shared(previous_layer_output, reshaped_bias); + copy_runtime_info(dwsc, previous_layer_output); + } + last_layer_output = previous_layer_output; + } else { + last_layer_output = std::make_shared(conv_input_slice, conv_filter_slice); + copy_runtime_info(dwsc, last_layer_output); + last_layer_output = std::make_shared(last_layer_output, previous_layer_output); + copy_runtime_info(dwsc, last_layer_output); + previous_layer_output = last_layer_output; + } + } + } + + if (!last_layer_output) { + IE_ASSERT(const_zero_padding); + last_layer_output = const_zero_padding; + } + + output_chunks.push_back(last_layer_output); + } + + // Concat and transpose is only needed when output width > 1 + if (output_chunks.size() > 1) { + auto concat_output_plane = std::make_shared(output_chunks, 0); + auto transposed_concat_output_plane = std::make_shared(concat_output_plane, + ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{2}, ngraph::Shape{1, 0})); + copy_runtime_info(dwsc, {concat_output_plane, transposed_concat_output_plane}); + return transposed_concat_output_plane; + } + + return output_chunks[0].get_node_shared_ptr(); +} + +static bool Convert(std::shared_ptr dwsc_node, + std::shared_ptr bias_node, + std::shared_ptr bias_const_node) { + auto dwsc = std::dynamic_pointer_cast(dwsc_node); + auto bias = std::dynamic_pointer_cast(bias_node); + auto bias_const = std::dynamic_pointer_cast(bias_const_node); + + if (!VerifyDWSC(dwsc)) + return false; + + auto input_channel_count = dwsc->get_input_shape(0)[1]; + auto input_width = dwsc->get_input_shape(0)[3]; + auto output_channel_count = dwsc->get_output_shape(0)[1]; + auto output_width = dwsc->get_output_shape(0)[3]; + auto original_last_node = (bias_const ? bias_node : dwsc_node); + + // Prepare flat input data + auto reshaped_input_plane = std::make_shared(dwsc->input_value(0), + ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{2}, + ngraph::Shape{input_channel_count, input_width}), false); + + auto transposed_input_plane = std::make_shared(reshaped_input_plane, + ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{2}, ngraph::Shape{1, 0})); + + auto flat_input_plane = std::make_shared(transposed_input_plane, + ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{2}, + ngraph::Shape{1, shape_size(dwsc->input_value(0).get_shape())}), false); + + // Prepare flat filter data + auto filters_const = std::dynamic_pointer_cast(dwsc->input_value(1).get_node_shared_ptr()); + auto filters_size = shape_size(filters_const->get_shape()); + + auto transposed_filters_const = ngraph::op::util::make_try_fold(filters_const, + ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{5}, ngraph::Shape{4, 1, 2, 3, 0})); + + auto flat_filters_plane = ngraph::op::util::make_try_fold(transposed_filters_const, + ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{2}, ngraph::Shape{1, filters_size}), false); + + copy_runtime_info(dwsc, {reshaped_input_plane, transposed_input_plane, flat_input_plane, transposed_filters_const, flat_filters_plane}); + + // Convert DWSC to a set of diagonal layers + auto output_plane = DecomposeDWSC(dwsc, bias_const, flat_input_plane, flat_filters_plane); + + // Restore the original output shape + auto result = std::make_shared(output_plane, + ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{4}, + ngraph::Shape{1, output_channel_count, 1, output_width}), false); + copy_runtime_info(dwsc, result); + + // We need to put here the original Group Convolution layer name, so the new layer output can be used as a network result + std::string result_name = original_last_node->get_friendly_name(); + replace_node(original_last_node, result); + result->set_friendly_name(result_name); + + return true; +} + +ConvertDWSCToScaleShifts::ConvertDWSCToScaleShifts() { + MATCHER_SCOPE(ConvertDWSCToScaleShifts); + + auto dwsc = ngraph::pattern::wrap_type( + {ngraph::pattern::any_input(), ngraph::pattern::wrap_type(ngraph::pattern::rank_equals(5))}, + ngraph::pattern::rank_equals(4)); + + ngraph::matcher_pass_callback callback = [=](ngraph::pattern::Matcher& m) { + const auto& pattern_map = m.get_pattern_value_map(); + return Convert(pattern_map.at(dwsc).get_node_shared_ptr(), nullptr, nullptr); + }; + + auto m = std::make_shared(dwsc, matcher_name); + this->register_matcher(m, callback); +} + +ConvertDWSCBiasToScaleShifts::ConvertDWSCBiasToScaleShifts() { + MATCHER_SCOPE(ConvertDWSCBiasToScaleShifts); + + auto dwsc = ngraph::pattern::wrap_type( + {ngraph::pattern::any_input(), ngraph::pattern::wrap_type(ngraph::pattern::rank_equals(5))}, + consumers_and_rank(1, 4)); + auto const_input = ngraph::pattern::wrap_type(); + auto bias = ngraph::pattern::wrap_type({dwsc, const_input}); + + ngraph::matcher_pass_callback callback = [=](ngraph::pattern::Matcher& m) { + const auto& pattern_map = m.get_pattern_value_map(); + auto bias_it = pattern_map.find(bias); + auto bias_node = (bias_it == std::end(pattern_map) ? nullptr : bias_it->second.get_node_shared_ptr()); + std::shared_ptr bias_const = nullptr; + + if (bias_node && (bias_const = VerifyBiasGetConst(pattern_map.at(dwsc).get_node_shared_ptr(), bias_node)) == nullptr) + return false; + + return Convert(pattern_map.at(dwsc).get_node_shared_ptr(), bias_node, bias_const); + }; + + auto m = std::make_shared(bias, matcher_name); + this->register_matcher(m, callback); +} diff --git a/inference-engine/src/gna_plugin/transformations/convert_dwsc_to_scaleshifts.hpp b/inference-engine/src/gna_plugin/transformations/convert_dwsc_to_scaleshifts.hpp new file mode 100644 index 00000000000000..423784200b43b5 --- /dev/null +++ b/inference-engine/src/gna_plugin/transformations/convert_dwsc_to_scaleshifts.hpp @@ -0,0 +1,29 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include + +namespace GNAPluginNS { + +/** + * @brief Convert a depthwise separable convolution (represented by a GroupConvolution) to a set of ScaleShift layers (MatMul + Add) + */ +class ConvertDWSCToScaleShifts : public ngraph::pass::MatcherPass { +public: + NGRAPH_RTTI_DECLARATION; + ConvertDWSCToScaleShifts(); +}; + +/** + * @brief Convert a depthwise separable convolution with bias (represented by a GroupConvolution + Add) to a set of ScaleShift layers (MatMul + Add) + */ +class ConvertDWSCBiasToScaleShifts : public ngraph::pass::MatcherPass { +public: + NGRAPH_RTTI_DECLARATION; + ConvertDWSCBiasToScaleShifts(); +}; + +} // namespace GNAPluginNS diff --git a/inference-engine/src/gna_plugin/transformations/convert_padded2valid_conv.cpp b/inference-engine/src/gna_plugin/transformations/convert_padded_to_valid_convolution.cpp similarity index 92% rename from inference-engine/src/gna_plugin/transformations/convert_padded2valid_conv.cpp rename to inference-engine/src/gna_plugin/transformations/convert_padded_to_valid_convolution.cpp index 82f8ccc5eadcaa..f32b9b2da0ed5a 100644 --- a/inference-engine/src/gna_plugin/transformations/convert_padded2valid_conv.cpp +++ b/inference-engine/src/gna_plugin/transformations/convert_padded_to_valid_convolution.cpp @@ -4,7 +4,7 @@ #include -#include "transformations/convert_padded2valid_conv.hpp" +#include "transformations/convert_padded_to_valid_convolution.hpp" #include @@ -19,7 +19,7 @@ using namespace GNAPluginNS; -NGRAPH_RTTI_DEFINITION(ConvertPadded2ValidConv, "ConvertPadded2ValidConv", 0); +NGRAPH_RTTI_DEFINITION(ConvertPaddedToValidConv, "ConvertPaddedToValidConv", 0); static bool VerifyAndGetConvData(std::shared_ptr conv, ConvData& conv_data) { const auto& input = conv->input_value(0); @@ -34,17 +34,6 @@ static bool VerifyAndGetConvData(std::shared_ptr co return conv_data.pads_begin_height || conv_data.pads_end_height || conv_data.pads_begin_width || conv_data.pads_end_width; } -static bool VerifyBias(std::shared_ptr bias, const size_t& filter_count) { - auto add_const = std::dynamic_pointer_cast(bias->input_value(0).get_node_shared_ptr()); - - // We need to check both inputs of Add when looking for constant - if (!add_const) - add_const = std::dynamic_pointer_cast(bias->input_value(1).get_node_shared_ptr()); - - // The add may be a normal add not convolution bias, then we just go further - return (add_const && shape_size(add_const->get_shape()) == filter_count); -} - static void InsertPadding(ngraph::OutputVector& input_rows_to_concat, size_t size, const std::shared_ptr& conv, const std::shared_ptr padding_const, size_t biggest_padding) { @@ -181,9 +170,6 @@ static bool Convert(std::shared_ptr leading_transpose, if (!TransposeOrderMatches(std::dynamic_pointer_cast(trailing_transpose), {0, 2, 3, 1})) return false; - if (bias && !VerifyBias(std::dynamic_pointer_cast(bias), conv_data.filter_count)) - return false; - GeneratePadding(std::dynamic_pointer_cast(leading_transpose), std::dynamic_pointer_cast(conv), conv_data); @@ -196,8 +182,8 @@ static std::function)> consumers_and_rank(cons }; } -ConvertPadded2ValidConv::ConvertPadded2ValidConv() { - MATCHER_SCOPE(ConvertPadded2ValidConv); +ConvertPaddedToValidConv::ConvertPaddedToValidConv() { + MATCHER_SCOPE(ConvertPaddedToValidConv); auto const_input = ngraph::pattern::wrap_type(); auto leading_transpose = ngraph::pattern::wrap_type({ngraph::pattern::any_input(), const_input}, @@ -237,6 +223,9 @@ ConvertPadded2ValidConv::ConvertPadded2ValidConv() { auto bias_it = pattern_map.find(bias); auto bias_node = (bias_it == std::end(pattern_map) ? nullptr : bias_it->second.get_node_shared_ptr()); + if (bias_node && !VerifyBiasGetConst(pattern_map.at(conv).get_node_shared_ptr(), bias_node)) + return false; + return Convert(pattern_map.at(leading_transpose).get_node_shared_ptr(), pattern_map.at(conv).get_node_shared_ptr(), pattern_map.at(trailing_transpose).get_node_shared_ptr(), bias_node); }; diff --git a/inference-engine/src/gna_plugin/transformations/convert_padded2valid_conv.hpp b/inference-engine/src/gna_plugin/transformations/convert_padded_to_valid_convolution.hpp similarity index 93% rename from inference-engine/src/gna_plugin/transformations/convert_padded2valid_conv.hpp rename to inference-engine/src/gna_plugin/transformations/convert_padded_to_valid_convolution.hpp index 55bef912b9c184..61f285e2549758 100644 --- a/inference-engine/src/gna_plugin/transformations/convert_padded2valid_conv.hpp +++ b/inference-engine/src/gna_plugin/transformations/convert_padded_to_valid_convolution.hpp @@ -28,10 +28,10 @@ namespace GNAPluginNS { * Transpose (NCHW -> NHWC) Transpose (NCHW -> NHWC) * */ -class ConvertPadded2ValidConv : public ngraph::pass::MatcherPass { +class ConvertPaddedToValidConv : public ngraph::pass::MatcherPass { public: NGRAPH_RTTI_DECLARATION; - ConvertPadded2ValidConv(); + ConvertPaddedToValidConv(); }; } // namespace GNAPluginNS diff --git a/inference-engine/src/gna_plugin/transformations/decompose_2d_conv.cpp b/inference-engine/src/gna_plugin/transformations/decompose_2d_convolution.cpp similarity index 94% rename from inference-engine/src/gna_plugin/transformations/decompose_2d_conv.cpp rename to inference-engine/src/gna_plugin/transformations/decompose_2d_convolution.cpp index 4b313ce8bb0650..f3ed07c27127b5 100644 --- a/inference-engine/src/gna_plugin/transformations/decompose_2d_conv.cpp +++ b/inference-engine/src/gna_plugin/transformations/decompose_2d_convolution.cpp @@ -4,9 +4,7 @@ #include -#include "transformations/decompose_2d_conv.hpp" - -#include +#include "transformations/decompose_2d_convolution.hpp" #include #include @@ -68,22 +66,6 @@ static bool VerifyAndGetConvData(std::shared_ptr co return true; } -static std::shared_ptr VerifyBiasAndReshapeConst(std::shared_ptr conv_bias, const ConvData& conv_data) { - auto add_const = std::dynamic_pointer_cast(conv_bias->input_value(1).get_node_shared_ptr()); - - if (add_const) { - auto bias_size = shape_size(add_const->get_shape()); - - // The add may be a normal add not conv bias, then we just go further - if (bias_size == conv_data.filter_count) { - return ngraph::op::util::make_try_fold(add_const, - ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{4}, ngraph::Shape{1, bias_size, 1, 1}), false); - } - } - // Bias size does not match (or dynamic bias), can't decompose such convolution - return nullptr; -} - static bool VerifyMaxPool(GraphData& graph_data, std::shared_ptr max_pool) { auto pool_filter = max_pool->get_kernel(); auto pool_strides = max_pool->get_strides(); @@ -236,7 +218,7 @@ static void TransformInput(const GraphData& graph_data, const ConvData& conv_dat */ // First we need to prepare flat (height = 1) slices of input data proper for flattened (height = 1) filters created later on; - // the input datat is overlapping (duplicated) + // the input data is overlapping (duplicated) ngraph::OutputVector dilated_input_planes; for (size_t filter_height = 0; filter_height < conv_data.filter_height; filter_height++) { size_t offset; @@ -306,11 +288,14 @@ static std::shared_ptr Create1DConv(const GraphData& graph_data, c ngraph::Strides{1, 1}, ngraph::op::PadType::VALID); std::string conv_name = graph_data.conv->get_friendly_name() + "_H_" + std::to_string(h_index) + "_CH_" + std::to_string(0); conv->set_friendly_name(conv_name); + std::shared_ptr last_conv_block_op = conv; // Bias & fake quantize - std::shared_ptr last_conv_block_op = conv; if (graph_data.bias_const && conv_index == 0) { - last_conv_block_op = std::make_shared(conv, graph_data.bias_const); + auto bias_size = shape_size(graph_data.bias_const->get_shape()); + auto reshaped_bias_const = ngraph::op::util::make_try_fold(graph_data.bias_const, + ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{4}, ngraph::Shape{1, bias_size, 1, 1}), false); + last_conv_block_op = std::make_shared(conv, reshaped_bias_const); copy_runtime_info(graph_data.conv, last_conv_block_op); InsertFQLayer(graph_data.fq_bias, last_conv_block_op); } @@ -472,6 +457,7 @@ static bool Convert(std::shared_ptr leading_transpose, std::shared_ptr conv, std::shared_ptr trailing_transpose, std::shared_ptr bias, + std::shared_ptr bias_const, std::shared_ptr fq_bias, std::shared_ptr max_pool, std::shared_ptr af, @@ -486,7 +472,7 @@ static bool Convert(std::shared_ptr leading_transpose, std::dynamic_pointer_cast(max_pool), std::dynamic_pointer_cast(af), std::dynamic_pointer_cast(fq_af), - last_op_for_replacement, nullptr, 1, 1, 1}; + last_op_for_replacement, bias_const, 1, 1, 1}; ConvData conv_data; if (!VerifyAndGetConvData(std::dynamic_pointer_cast(conv), conv_data)) @@ -500,9 +486,6 @@ static bool Convert(std::shared_ptr leading_transpose, if (!TransposeOrderMatches(std::dynamic_pointer_cast(trailing_transpose), {0, 2, 3, 1})) return false; - if (bias && !(graph_data.bias_const = VerifyBiasAndReshapeConst(std::dynamic_pointer_cast(bias), conv_data))) - return false; - if (max_pool && !VerifyMaxPool(graph_data, std::dynamic_pointer_cast(max_pool))) return false; @@ -515,22 +498,6 @@ static bool Convert(std::shared_ptr leading_transpose, return true; } -static bool VerifyBias(std::shared_ptr conv, std::shared_ptr bias) { - auto add_const = std::dynamic_pointer_cast(bias->input_value(1).get_node_shared_ptr()); - - if (!add_const) { - add_const = std::dynamic_pointer_cast(bias->input_value(0).get_node_shared_ptr()); - } - - if (!add_const) { - auto bias_size = shape_size(add_const->get_shape()); - auto conv_filter_count = conv->input_value(1).get_shape()[0]; - if (bias_size == conv_filter_count) - return true; - } - return false; -} - Decompose2DConv::Decompose2DConv() { MATCHER_SCOPE(Decompose2DConv); @@ -576,6 +543,11 @@ Decompose2DConv::Decompose2DConv() { auto fq_conv_node = (fq_conv_it == std::end(pattern_map) ? nullptr : fq_conv_it->second.get_node_shared_ptr()); auto bias_it = pattern_map.find(bias); auto bias_node = (bias_it == std::end(pattern_map) ? nullptr : bias_it->second.get_node_shared_ptr()); + std::shared_ptr bias_const_node = nullptr; + + if (bias_node && !(bias_const_node = VerifyBiasGetConst(pattern_map.at(conv).get_node_shared_ptr(), bias_node))) + return false; + auto fq_bias_it = pattern_map.find(fq_bias); auto fq_bias_node = (fq_bias_it == std::end(pattern_map) ? nullptr : fq_bias_it->second.get_node_shared_ptr()); auto fq_af_it = pattern_map.find(fq_af); @@ -596,7 +568,7 @@ Decompose2DConv::Decompose2DConv() { } return Convert(pattern_map.at(leading_transpose).get_node_shared_ptr(), fq_conv_node, pattern_map.at(conv).get_node_shared_ptr(), - pattern_map.at(trailing_transpose).get_node_shared_ptr(), bias_node, fq_bias_node, max_pool_node, af_node, fq_af_node, + pattern_map.at(trailing_transpose).get_node_shared_ptr(), bias_node, bias_const_node, fq_bias_node, max_pool_node, af_node, fq_af_node, pattern_map.at(trailing_transpose).get_node_shared_ptr()); }; @@ -621,11 +593,13 @@ Decompose2DConvTransposedWithBias::Decompose2DConvTransposedWithBias() { ngraph::matcher_pass_callback callback = [=](ngraph::pattern::Matcher& m) { const auto& pattern_map = m.get_pattern_value_map(); - if (!VerifyBias(pattern_map.at(conv).get_node_shared_ptr(), pattern_map.at(bias).get_node_shared_ptr())) + std::shared_ptr bias_const_node = nullptr; + + if (!(bias_const_node = VerifyBiasGetConst(pattern_map.at(conv).get_node_shared_ptr(), pattern_map.at(bias).get_node_shared_ptr()))) return false; return Convert(pattern_map.at(leading_transpose).get_node_shared_ptr(), nullptr, pattern_map.at(conv).get_node_shared_ptr(), - pattern_map.at(trailing_transpose).get_node_shared_ptr(), pattern_map.at(bias).get_node_shared_ptr(), nullptr, nullptr, + pattern_map.at(trailing_transpose).get_node_shared_ptr(), pattern_map.at(bias).get_node_shared_ptr(), bias_const_node, nullptr, nullptr, nullptr, nullptr, pattern_map.at(bias).get_node_shared_ptr()); }; @@ -654,11 +628,13 @@ Decompose2DConvTransposedWithBiasAF::Decompose2DConvTransposedWithBiasAF() { ngraph::matcher_pass_callback callback = [=](ngraph::pattern::Matcher& m) { const auto& pattern_map = m.get_pattern_value_map(); - if (!VerifyBias(pattern_map.at(conv).get_node_shared_ptr(), pattern_map.at(bias).get_node_shared_ptr())) + std::shared_ptr bias_const_node = nullptr; + + if (!(bias_const_node = VerifyBiasGetConst(pattern_map.at(conv).get_node_shared_ptr(), pattern_map.at(bias).get_node_shared_ptr()))) return false; return Convert(pattern_map.at(leading_transpose).get_node_shared_ptr(), nullptr, pattern_map.at(conv).get_node_shared_ptr(), - pattern_map.at(trailing_transpose).get_node_shared_ptr(), pattern_map.at(bias).get_node_shared_ptr(), nullptr, + pattern_map.at(trailing_transpose).get_node_shared_ptr(), pattern_map.at(bias).get_node_shared_ptr(), bias_const_node, nullptr, nullptr, pattern_map.at(af).get_node_shared_ptr(), nullptr, pattern_map.at(af).get_node_shared_ptr()); }; diff --git a/inference-engine/src/gna_plugin/transformations/decompose_2d_conv.hpp b/inference-engine/src/gna_plugin/transformations/decompose_2d_convolution.hpp similarity index 100% rename from inference-engine/src/gna_plugin/transformations/decompose_2d_conv.hpp rename to inference-engine/src/gna_plugin/transformations/decompose_2d_convolution.hpp diff --git a/inference-engine/src/gna_plugin/transformations/utils/transformation_helper.cpp b/inference-engine/src/gna_plugin/transformations/utils/transformation_helper.cpp index 79fe863a18fbb2..040ef6794cfe17 100644 --- a/inference-engine/src/gna_plugin/transformations/utils/transformation_helper.cpp +++ b/inference-engine/src/gna_plugin/transformations/utils/transformation_helper.cpp @@ -72,4 +72,22 @@ std::shared_ptr FlatCrop(ngraph::Output{1, 0}); // end mask } +std::shared_ptr VerifyBiasGetConst(std::shared_ptr conv, std::shared_ptr bias) { + auto add_const = std::dynamic_pointer_cast(bias->input_value(1).get_node_shared_ptr()); + + // We need to check both inputs of Add when looking for constant + if (!add_const) { + add_const = std::dynamic_pointer_cast(bias->input_value(0).get_node_shared_ptr()); + } + + // Check if it's really a bias and not just addition + if (add_const) { + auto bias_size = shape_size(add_const->get_shape()); + auto conv_filter_count = conv->get_output_shape(0)[1]; + if (bias_size == conv_filter_count) + return add_const; + } + return nullptr; +} + } // namespace GNAPluginNS diff --git a/inference-engine/src/gna_plugin/transformations/utils/transformation_helper.hpp b/inference-engine/src/gna_plugin/transformations/utils/transformation_helper.hpp index 14fca200f7b196..f8a0ba41bd3955 100644 --- a/inference-engine/src/gna_plugin/transformations/utils/transformation_helper.hpp +++ b/inference-engine/src/gna_plugin/transformations/utils/transformation_helper.hpp @@ -61,4 +61,13 @@ bool TransposeOrderMatches(std::shared_ptr transpose, * @return pointer to the newly created slice */ std::shared_ptr FlatCrop(ngraph::Output input, size_t offset, size_t size); + +/** + * @brief checks whether an add present after convolution is a bias and gets its const input + * @param conv convolution layer preceding potential bias + * @param bias potential bias layer passed from ngraph matcher + * @return bias const if the add layer present after convolution is a bias, nullptr otherwise + */ +std::shared_ptr VerifyBiasGetConst(std::shared_ptr conv, std::shared_ptr bias); + } // namespace GNAPluginNS diff --git a/inference-engine/tests/functional/plugin/gna/pass_tests/convert_dwsc_to_scaleshifts.cpp b/inference-engine/tests/functional/plugin/gna/pass_tests/convert_dwsc_to_scaleshifts.cpp new file mode 100644 index 00000000000000..b7cb0d510a8a1d --- /dev/null +++ b/inference-engine/tests/functional/plugin/gna/pass_tests/convert_dwsc_to_scaleshifts.cpp @@ -0,0 +1,210 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include + +#include "common_test_utils/test_common.hpp" +#include +#include +#include +#include +#include +#include + +#include "transformations/init_node_info.hpp" +#include "ngraph_functions/builders.hpp" +#include "shared_test_classes/base/layer_test_utils.hpp" + +using namespace ngraph; +using namespace ngraph::opset7; + +namespace LayerTestsDefinitions { + +enum class modelType { + DWSC = 0, /* Depth-Wise Separable Convolution (represented by Group Convolution in ngraph) */ + DWSCBias, /* DWSC => Broadcasted Add (Bias) */ +}; + +typedef std::tuple< + InferenceEngine::SizeVector, // Kernel size + InferenceEngine::SizeVector, // Strides + std::vector, // Pad begin + std::vector, // Pad end + InferenceEngine::SizeVector, // Dilation + op::PadType, // Padding type + size_t, // Num out channels + size_t, // Num groups + InferenceEngine::SizeVector // Bias +> DWSCParams; + +typedef std::tuple< + DWSCParams, // DWSC and bias parameters + InferenceEngine::Precision, // Network Precision + std::string, // Target Device + std::map, // Configuration + InferenceEngine::SizeVector, // Input shapes + modelType // Test model +> DWSCToScaleShiftsParams; + +class DWSCToScaleShiftsTest : public testing::WithParamInterface, + virtual public LayerTestsUtils::LayerTestsCommon { +public: + static std::string getTestCaseName(testing::TestParamInfo obj) { + DWSCParams params; + InferenceEngine::Precision netPrecision; + std::string targetDevice; + std::map configuration; + InferenceEngine::SizeVector inputShape; + modelType model; + std::tie(params, netPrecision, targetDevice, configuration, inputShape, model) = obj.param; + op::PadType padType; + InferenceEngine::SizeVector filter, stride, dilation, bias; + std::vector padBegin, padEnd; + size_t numOutChannels, numGroups; + std::tie(filter, stride, padBegin, padEnd, dilation, padType, numOutChannels, numGroups, bias) = params; + + std::ostringstream result; + result << "M=" << static_cast(model) << "_"; + result << "IS=" << CommonTestUtils::vec2str(inputShape) << "_"; + result << "K" << CommonTestUtils::vec2str(filter) << "_"; + result << "S" << CommonTestUtils::vec2str(stride) << "_"; + result << "PB" << CommonTestUtils::vec2str(padBegin) << "_"; + result << "PE" << CommonTestUtils::vec2str(padEnd) << "_"; + result << "D=" << CommonTestUtils::vec2str(dilation) << "_"; + result << "O=" << numOutChannels << "_"; + result << "AP=" << padType << "_"; + result << "B=" << CommonTestUtils::vec2str(bias) << "_"; + result << "netPRC=" << netPrecision.name() << "_"; + result << "targetDevice=" << targetDevice << "_"; + for (auto const& configItem : configuration) { + result << "_configItem=" << configItem.first << "_" << configItem.second; + } + return result.str(); + } + +protected: + void SetUp() override { + threshold = 0.05f; + DWSCParams params; + InferenceEngine::Precision netPrecision; + std::vector inputShape; + modelType model; + std::tie(params, netPrecision, targetDevice, configuration, inputShape, model) = this->GetParam(); + op::PadType padType; + InferenceEngine::SizeVector filter, stride, dilation, bias; + std::vector padBegin, padEnd; + size_t numOutChannels, numGroups; + std::tie(filter, stride, padBegin, padEnd, dilation, padType, numOutChannels, numGroups, bias) = params; + auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); + + auto input = builder::makeParams(ngPrc, {inputShape}); + auto filterSize = std::accumulate(std::begin(filter), std::end(filter), 1ull, std::multiplies()); + auto filterWeights = CommonTestUtils::generate_float_numbers(numOutChannels * (inputShape[1] / numGroups) * filterSize, -1.0f, 1.0f); + auto lastOp = builder::makeGroupConvolution(input[0], ngPrc, filter, stride, padBegin, + padEnd, dilation, padType, numOutChannels, numGroups, false, filterWeights); + + if (model == modelType::DWSCBias) { + Shape biasShape{bias}; + auto biasWeights = CommonTestUtils::generate_float_numbers(shape_size(biasShape), -1.0f, 1.0f); + auto biasConst = std::make_shared(ngPrc, biasShape, biasWeights); + lastOp = std::make_shared(lastOp, biasConst); + } + + auto result = std::make_shared(lastOp); + function = std::make_shared(ResultVector{result}, ParameterVector{input}); + } +}; + +TEST_P(DWSCToScaleShiftsTest, CompareWithRefs) { + Run(); +} + +const std::vector netPrecisions = { + InferenceEngine::Precision::FP32, + InferenceEngine::Precision::FP16 +}; + +const std::vector> configs = { + { + {"GNA_DEVICE_MODE", "GNA_SW_EXACT"}, + {"GNA_SCALE_FACTOR_0", "1"}, + } +}; + +const std::vector padTypes = { + op::PadType::VALID, + op::PadType::EXPLICIT, + op::PadType::SAME_LOWER, + op::PadType::SAME_UPPER +}; + +const std::vector models = { + modelType::DWSC, + modelType::DWSCBias +}; + +const std::vector> inputNCHW = {{1, 32, 1, 5}}; +const std::vector> filters = {{1, 3}}; +const std::vector> strides = {{1, 1}, {1, 2}}; +const std::vector> padBegins = {{0, 1}, {0, 2}}; +const std::vector> padEnds = {{0, 1}}; +const std::vector> dilations = {{1, 1}}; +const std::vector numOutChannels = {32}; +const std::vector numGroups = {32}; +const std::vector> biases = {{1, 32, 1, 1}}; + +const auto convParams = ::testing::Combine( + ::testing::ValuesIn(filters), + ::testing::ValuesIn(strides), + ::testing::ValuesIn(padBegins), + ::testing::ValuesIn(padEnds), + ::testing::ValuesIn(dilations), + ::testing::ValuesIn(padTypes), + ::testing::ValuesIn(numOutChannels), + ::testing::ValuesIn(numGroups), + ::testing::ValuesIn(biases) +); + +INSTANTIATE_TEST_CASE_P(smoke_DWSCToScaleShifts, DWSCToScaleShiftsTest, + ::testing::Combine( + convParams, + ::testing::ValuesIn(netPrecisions), + ::testing::Values(CommonTestUtils::DEVICE_GNA), + ::testing::ValuesIn(configs), + ::testing::ValuesIn(inputNCHW), + ::testing::ValuesIn(models)), + DWSCToScaleShiftsTest::getTestCaseName); + +/* ============= Strides & Dilations Combination ============= */ + +const std::vector padTypesSD = { + op::PadType::VALID, +}; + +const std::vector> inputNCHWSD = {{1, 32, 1, 8}}; +const std::vector> dilationsSD = {{1, 1}, {1, 2}}; + +const auto convParamsSD = ::testing::Combine( + ::testing::ValuesIn(filters), + ::testing::ValuesIn(strides), + ::testing::ValuesIn(padBegins), + ::testing::ValuesIn(padEnds), + ::testing::ValuesIn(dilationsSD), + ::testing::ValuesIn(padTypesSD), + ::testing::ValuesIn(numOutChannels), + ::testing::ValuesIn(numGroups), + ::testing::ValuesIn(biases) +); + +INSTANTIATE_TEST_CASE_P(smoke_DWSCToScaleShiftsStridesDilations, DWSCToScaleShiftsTest, + ::testing::Combine( + convParamsSD, + ::testing::ValuesIn(netPrecisions), + ::testing::Values(CommonTestUtils::DEVICE_GNA), + ::testing::ValuesIn(configs), + ::testing::ValuesIn(inputNCHWSD), + ::testing::ValuesIn(models)), + DWSCToScaleShiftsTest::getTestCaseName); + +} // namespace LayerTestsDefinitions diff --git a/inference-engine/tests/functional/plugin/gna/pass_tests/convert_padded2valid_conv.cpp b/inference-engine/tests/functional/plugin/gna/pass_tests/convert_padded_to_valid_conv.cpp similarity index 94% rename from inference-engine/tests/functional/plugin/gna/pass_tests/convert_padded2valid_conv.cpp rename to inference-engine/tests/functional/plugin/gna/pass_tests/convert_padded_to_valid_conv.cpp index 45faab02e841f3..0c3416b1969eb7 100644 --- a/inference-engine/tests/functional/plugin/gna/pass_tests/convert_padded2valid_conv.cpp +++ b/inference-engine/tests/functional/plugin/gna/pass_tests/convert_padded_to_valid_conv.cpp @@ -57,12 +57,12 @@ typedef std::tuple< std::map, // Configuration InferenceEngine::SizeVector, // Input shapes modelType // Test model -> padded2ValidParams; +> paddedToValidParams; -class Padded2ValidConvTest : public testing::WithParamInterface, +class PaddedToValidConvTest : public testing::WithParamInterface, virtual public LayerTestsUtils::LayerTestsCommon { public: - static std::string getTestCaseName(testing::TestParamInfo obj) { + static std::string getTestCaseName(testing::TestParamInfo obj) { convSpecificParams convParams; miscSpecificParams miscParams; InferenceEngine::Precision netPrecision; @@ -195,26 +195,26 @@ class Padded2ValidConvTest : public testing::WithParamInterface + +#include + +#include "transformations/convert_dwsc_to_scaleshifts.hpp" +#include "common_test_utils/ngraph_test_utils.hpp" +#include +#include +#include +#include +#include + +namespace testing { + +namespace { + +enum class modelType { + DWSC = 0, /* Depth-Wise Separable Convolution (represented by Group Convolution in ngraph) */ + DWSCBias, /* DWSC => Broadcasted Add (Bias) */ +}; + +typedef std::tuple< + modelType, // Test model + ngraph::Shape, // Input shape + ngraph::Shape, // Convolution filter shape + ngraph::Strides, // Convolution stride + ngraph::CoordinateDiff, // Convolution pads begin + ngraph::CoordinateDiff, // Convolution pads end + ngraph::Strides, // Convolution dilation + ngraph::Shape, // Bias shape + ngraph::op::PadType // Padding type +> DWSCToScaleShiftsParams; + +typedef std::tuple< + bool, // With / without Fake Quantize layers + DWSCToScaleShiftsParams // Test parameters +> fqDWSCToScaleShiftsParams; + +std::shared_ptr createFunction(const bool& fq, + const modelType& model, + const ngraph::Output& input_node, + const ngraph::Shape& filters_shape, + const ngraph::Strides& conv_stride, + const ngraph::CoordinateDiff& pads_begin, + const ngraph::CoordinateDiff& pads_end, + const ngraph::Strides& conv_dilation, + const ngraph::Shape& bias_shape, + const ngraph::op::PadType& pad_type, + std::shared_ptr& dwsc, + std::shared_ptr& bias_const) { + auto filters = std::make_shared(ngraph::element::i64, + ngraph::Shape{input_node.get_shape()[1], 1, 1, filters_shape[0], filters_shape[1]}); + dwsc = std::make_shared(input_node, filters, conv_stride, pads_begin, pads_end, conv_dilation, pad_type); + std::shared_ptr last_op = dwsc; + + if (model == modelType::DWSCBias) { + bias_const = std::make_shared(ngraph::element::i64, bias_shape); + last_op = std::make_shared(dwsc, bias_const); + } + + return std::make_shared(last_op); +} + +std::shared_ptr get_initial_function(const bool& fq, + const modelType& model, + const ngraph::Shape& input_shape, + const ngraph::Shape& filters_shape, + const ngraph::Strides& conv_stride, + const ngraph::CoordinateDiff& pads_begin, + const ngraph::CoordinateDiff& pads_end, + const ngraph::Strides& conv_dilation, + const ngraph::Shape& bias_shape, + const ngraph::op::PadType& pad_type, + std::shared_ptr& dwsc, + std::shared_ptr& bias_const) { + auto input_params = std::make_shared(ngraph::element::i64, input_shape); + auto result = createFunction(fq, model, input_params, filters_shape, conv_stride, pads_begin, pads_end, conv_dilation, bias_shape, pad_type, dwsc, bias_const); + return std::make_shared(ngraph::ResultVector{result}, ngraph::ParameterVector{input_params}); +} + +// --------------------------------------------------------------------------------------------------------------------- + +class ConvertDWSCToScaleShiftsTestInvalidFixture : public CommonTestUtils::TestsCommon, + public ::testing::WithParamInterface { +public: + void SetUp() override; +public: + std::shared_ptr function, reference_function; + modelType model; +}; + +void ConvertDWSCToScaleShiftsTestInvalidFixture::SetUp() { + bool fq; + DWSCToScaleShiftsParams params; + ngraph::Shape input_shape; + ngraph::Shape filters_shape, bias_shape; + ngraph::Strides conv_stride, conv_dilation; + ngraph::CoordinateDiff pads_begin, pads_end; + ngraph::op::PadType pad_type; + std::shared_ptr dwsc; + std::shared_ptr bias_const; + std::tie(fq, params) = this->GetParam(); + std::tie(model, input_shape, filters_shape, conv_stride, pads_begin, pads_end, conv_dilation, + bias_shape, pad_type) = params; + + function = get_initial_function(fq, model, input_shape, filters_shape, conv_stride, pads_begin, pads_end, conv_dilation, + bias_shape, pad_type, dwsc, bias_const); + reference_function = get_initial_function(fq, model, input_shape, filters_shape, conv_stride, pads_begin, pads_end, conv_dilation, + bias_shape, pad_type, dwsc, bias_const); +} + +// --------------------------------------------------------------------------------------------------------------------- + +class ConvertDWSCToScaleShiftsTestFixture: public CommonTestUtils::TestsCommon, + public ::testing::WithParamInterface { +public: + void SetUp() override; + std::shared_ptr get_reference(const bool& fq, + const modelType& model, + const ngraph::Shape& input_shape, + const ngraph::Shape& filters_shape, + const ngraph::Strides& conv_stride, + const ngraph::CoordinateDiff& pads_begin, + const ngraph::CoordinateDiff& pads_end, + const ngraph::Strides& conv_dilation, + const ngraph::Shape& bias_shape, + const ngraph::op::PadType& pad_type, + const std::shared_ptr& dwsc, + const std::shared_ptr& bias_const); +public: + std::shared_ptr function, reference_function; + modelType model; +}; + +void ConvertDWSCToScaleShiftsTestFixture::SetUp() { + bool fq; + DWSCToScaleShiftsParams params; + ngraph::Shape input_shape; + ngraph::Shape filters_shape, bias_shape; + ngraph::Strides conv_stride, conv_dilation; + ngraph::CoordinateDiff pads_begin, pads_end; + ngraph::op::PadType pad_type; + std::shared_ptr dwsc; + std::shared_ptr bias_const; + std::tie(fq, params) = this->GetParam(); + std::tie(model, input_shape, filters_shape, conv_stride, pads_begin, pads_end, conv_dilation, + bias_shape, pad_type) = params; + + function = get_initial_function(fq, model, input_shape, filters_shape, conv_stride, pads_begin, pads_end, conv_dilation, + bias_shape, pad_type, dwsc, bias_const); + reference_function = get_reference(fq, model, input_shape, filters_shape, conv_stride, pads_begin, pads_end, conv_dilation, + bias_shape, pad_type, dwsc, bias_const); +} + +std::shared_ptr FlatCrop(ngraph::Output input, size_t offset, size_t size) { + return std::make_shared( + input, // data + ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{2}, {(size_t)0, offset}), // begin sice index + ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{2}, {(size_t)0, offset + size}), // end slice index + ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{2}, {(size_t)1, (size_t)1}), // strides + std::vector{1, 0}, // begin mask + std::vector{1, 0}); // end mask +} + +std::shared_ptr DecomposeDWSC(std::shared_ptr dwsc, std::shared_ptr bias_const, + std::shared_ptr flat_input_plane, std::shared_ptr flat_filters_plane) { + std::shared_ptr const_zero_padding; + std::shared_ptr reshaped_bias; + ngraph::OutputVector output_chunks; + auto input_channel_count = dwsc->get_input_shape(0)[1]; + auto input_width = dwsc->get_input_shape(0)[3]; + auto output_width = dwsc->get_output_shape(0)[3]; + auto filter_width = dwsc->get_input_shape(1)[4]; + auto pads_begin = dwsc->get_pads_begin()[1]; + auto stride_width = dwsc->get_strides()[1]; + auto dilation_width = dwsc->get_dilations()[1]; + + // Constant with zero padding + if (pads_begin) { + const_zero_padding = std::make_shared(dwsc->get_element_type(), ngraph::Shape{1, input_channel_count}, 0); + } + + // Reshape bias const + if (bias_const) { + auto bias_size = shape_size(bias_const->get_shape()); + reshaped_bias = ngraph::op::util::make_try_fold(bias_const, + ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{2}, ngraph::Shape{1, bias_size}), false); + } + + // Move filter over input performing multiplication and addition (scaleshift), take padding, stride, dilation and bias into account + for (int32_t input_position = -pads_begin, o = 0; o < output_width; input_position += stride_width, o++) { + std::shared_ptr previous_layer_output, last_layer_output; + int32_t filter_end = input_position + filter_width * dilation_width; + bool first = true; + + filter_end = filter_end < input_width ? filter_end : input_width; + + for (int32_t filter_pos = input_position, filter_idx = 0; filter_pos < filter_end; filter_pos += dilation_width, filter_idx++) { + if (filter_pos >= 0) { + auto conv_input_slice = FlatCrop(flat_input_plane, filter_pos * input_channel_count, input_channel_count); + auto conv_filter_slice = FlatCrop(flat_filters_plane, filter_idx * input_channel_count, input_channel_count); + + if (first) { + first = false; + previous_layer_output = std::make_shared(conv_input_slice, conv_filter_slice); + if (bias_const) { + previous_layer_output = std::make_shared(previous_layer_output, reshaped_bias); + } + last_layer_output = previous_layer_output; + } else { + last_layer_output = std::make_shared(conv_input_slice, conv_filter_slice); + last_layer_output = std::make_shared(last_layer_output, previous_layer_output); + previous_layer_output = last_layer_output; + } + } + } + + if (!last_layer_output) { + IE_ASSERT(const_zero_padding); + last_layer_output = const_zero_padding; + } + + output_chunks.push_back(last_layer_output); + } + + // Concat and transpose is only needed when output width > 1 + if (output_chunks.size() > 1) { + auto concat_output_plane = std::make_shared(output_chunks, 0); + return std::make_shared(concat_output_plane, + ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{2}, ngraph::Shape{1, 0})); + } + + return output_chunks[0].get_node_shared_ptr(); +} + +std::shared_ptr ConvertDWSCToScaleShiftsTestFixture::get_reference(const bool& fq, + const modelType& model, + const ngraph::Shape& input_shape, + const ngraph::Shape& filters_shape, + const ngraph::Strides& conv_stride, + const ngraph::CoordinateDiff& pads_begin, + const ngraph::CoordinateDiff& pads_end, + const ngraph::Strides& conv_dilation, + const ngraph::Shape& bias_shape, + const ngraph::op::PadType& pad_type, + const std::shared_ptr& dwsc, + const std::shared_ptr& bias_const) { + auto input_params = std::make_shared(ngraph::element::i64, input_shape); + auto input_channel_count = input_shape[1]; + auto input_width = input_shape[3]; + auto output_channel_count = dwsc->get_output_shape(0)[1]; + auto output_width = dwsc->get_output_shape(0)[3]; + + // Prepare flat input data + auto reshaped_input_plane = std::make_shared(input_params, + ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{2}, + ngraph::Shape{input_channel_count, input_width}), false); + + auto transposed_input_plane = std::make_shared(reshaped_input_plane, + ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{2}, ngraph::Shape{1, 0})); + + auto flat_input_plane = std::make_shared(transposed_input_plane, + ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{2}, + ngraph::Shape{1, ngraph::shape_size(input_shape)}), false); + + // Prepare flat filter data + auto filters_const = std::dynamic_pointer_cast(dwsc->input_value(1).get_node_shared_ptr()); + auto filters_size = ngraph::shape_size(filters_const->get_shape()); + + auto transposed_filters_const = ngraph::op::util::make_try_fold(filters_const, + ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{5}, ngraph::Shape{4, 1, 2, 3, 0})); + + auto flat_filters_plane = ngraph::op::util::make_try_fold(transposed_filters_const, + ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{2}, ngraph::Shape{1, filters_size}), false); + + // Convert DWSC to a set of diagonal layers + auto output_plane = DecomposeDWSC(dwsc, bias_const, flat_input_plane, flat_filters_plane); + + // Restore the original output shape + auto result = std::make_shared(output_plane, + ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{4}, + ngraph::Shape{1, output_channel_count, 1, output_width}), false); + + return std::make_shared(ngraph::ResultVector{std::make_shared(result)}, ngraph::ParameterVector{input_params}); +} + +// --------------------------------------------------------------------------------------------------------------------- + +void execute_test(modelType model, std::shared_ptr function, std::shared_ptr reference_function) { + ngraph::pass::Manager manager; + manager.register_pass(); + + switch (model) { + default: + case modelType::DWSC: + manager.register_pass(); + break; + case modelType::DWSCBias: + manager.register_pass(); + break; + } + + manager.run_passes(function); + const FunctionsComparator func_comparator = FunctionsComparator::with_default().enable(FunctionsComparator::ATTRIBUTES); + const FunctionsComparator::Result result = func_comparator(function, reference_function); + ASSERT_TRUE(result.valid); +} + +TEST_P(ConvertDWSCToScaleShiftsTestFixture, CompareFunctions) { + execute_test(model, function, reference_function); +} + +INSTANTIATE_TEST_SUITE_P(ConvertDWSCToScaleShiftsTestSuite, ConvertDWSCToScaleShiftsTestFixture, + ::testing::Combine( + // With / without Fake Quantize layers + ::testing::Values(false), + ::testing::Values( + std::make_tuple(modelType::DWSC, ngraph::Shape{1, 32, 1, 5}, ngraph::Shape{1, 3}, ngraph::Strides{1, 1}, + ngraph::CoordinateDiff{0, 1}, ngraph::CoordinateDiff{0, 1}, ngraph::Strides{1, 1}, + ngraph::Shape{1, 32, 1, 1}, ngraph::op::PadType::VALID), + std::make_tuple(modelType::DWSCBias, ngraph::Shape{1, 32, 1, 5}, ngraph::Shape{1, 3}, ngraph::Strides{1, 1}, + ngraph::CoordinateDiff{0, 2}, ngraph::CoordinateDiff{0, 2}, ngraph::Strides{1, 1}, + ngraph::Shape{1, 32, 1, 1}, ngraph::op::PadType::VALID)))); + +TEST_P(ConvertDWSCToScaleShiftsTestInvalidFixture, CompareFunctions) { + execute_test(model, function, reference_function); +} + +INSTANTIATE_TEST_SUITE_P(ConvertDWSCToScaleShiftsInvalidTestSuite, ConvertDWSCToScaleShiftsTestInvalidFixture, + ::testing::Combine( + // With / without Fake Quantize layers + ::testing::Values(false), + ::testing::Values( + std::make_tuple(modelType::DWSC, ngraph::Shape{2, 1, 16, 8}, ngraph::Shape{1, 2}, ngraph::Strides{1, 1}, + ngraph::CoordinateDiff{0, 2}, ngraph::CoordinateDiff{0, 3}, ngraph::Strides{1, 1}, + ngraph::Shape{1, 4, 1, 1}, ngraph::op::PadType::SAME_UPPER), + std::make_tuple(modelType::DWSCBias, ngraph::Shape{2, 1, 16, 8}, ngraph::Shape{1, 2}, ngraph::Strides{1, 1}, + ngraph::CoordinateDiff{0, 2}, ngraph::CoordinateDiff{0, 3}, ngraph::Strides{1, 1}, + ngraph::Shape{1, 4, 1, 1}, ngraph::op::PadType::EXPLICIT)))); + +} // namespace + +} // namespace testing diff --git a/inference-engine/tests/unit/gna/ngraph/transformations/gna_convert_padded2valid_conv.cpp b/inference-engine/tests/unit/gna/ngraph/transformations/gna_convert_padded_to_valid_convolution.cpp similarity index 95% rename from inference-engine/tests/unit/gna/ngraph/transformations/gna_convert_padded2valid_conv.cpp rename to inference-engine/tests/unit/gna/ngraph/transformations/gna_convert_padded_to_valid_convolution.cpp index 381847b7a1c31d..dc3501a980b983 100644 --- a/inference-engine/tests/unit/gna/ngraph/transformations/gna_convert_padded2valid_conv.cpp +++ b/inference-engine/tests/unit/gna/ngraph/transformations/gna_convert_padded_to_valid_convolution.cpp @@ -6,7 +6,7 @@ #include -#include "transformations/convert_padded2valid_conv.hpp" +#include "transformations/convert_padded_to_valid_convolution.hpp" #include "common_test_utils/ngraph_test_utils.hpp" #include #include @@ -39,12 +39,12 @@ typedef std::tuple< ngraph::Strides, // Max Pool stride ngraph::Shape, // Max Pool shape ngraph::op::PadType // Padding type -> padded2ValidConvParams; +> paddedToValidConvParams; typedef std::tuple< bool, // With / without Fake Quantize layers - padded2ValidConvParams // Test parameters -> fqPadded2ValidConvParams; + paddedToValidConvParams // Test parameters +> fqPaddedToValidConvParams; struct ConvData { size_t input_height; @@ -193,17 +193,17 @@ std::shared_ptr get_initial_function(const bool& fq, // --------------------------------------------------------------------------------------------------------------------- -class ConvertPadded2ValidConvTestInvalidFixture : public CommonTestUtils::TestsCommon, - public ::testing::WithParamInterface { +class ConvertPaddedToValidConvTestInvalidFixture : public CommonTestUtils::TestsCommon, + public ::testing::WithParamInterface { public: void SetUp() override; public: std::shared_ptr function, reference_function; }; -void ConvertPadded2ValidConvTestInvalidFixture::SetUp() { +void ConvertPaddedToValidConvTestInvalidFixture::SetUp() { bool fq; - padded2ValidConvParams params; + paddedToValidConvParams params; modelType model; ngraph::PartialShape input_shape; ngraph::Shape filters_shape, bias_shape, maxpool_shape; @@ -223,8 +223,8 @@ void ConvertPadded2ValidConvTestInvalidFixture::SetUp() { // --------------------------------------------------------------------------------------------------------------------- -class ConvertPadded2ValidConvTestFixture: public CommonTestUtils::TestsCommon, - public ::testing::WithParamInterface { +class ConvertPaddedToValidConvTestFixture: public CommonTestUtils::TestsCommon, + public ::testing::WithParamInterface { public: void SetUp() override; std::shared_ptr get_reference(const bool& fq, @@ -244,9 +244,9 @@ class ConvertPadded2ValidConvTestFixture: public CommonTestUtils::TestsCommon, std::shared_ptr function, reference_function; }; -void ConvertPadded2ValidConvTestFixture::SetUp() { +void ConvertPaddedToValidConvTestFixture::SetUp() { bool fq; - padded2ValidConvParams params; + paddedToValidConvParams params; modelType model; ngraph::PartialShape input_shape; ngraph::Shape filters_shape, bias_shape, maxpool_shape; @@ -354,7 +354,7 @@ std::shared_ptr CreatePaddedNet(const ngraph::Output return padded_input_plane; } -std::shared_ptr ConvertPadded2ValidConvTestFixture::get_reference(const bool& fq, +std::shared_ptr ConvertPaddedToValidConvTestFixture::get_reference(const bool& fq, const modelType& model, const ngraph::PartialShape& input_shape, const ngraph::Shape& filters_shape, @@ -406,18 +406,18 @@ std::shared_ptr ConvertPadded2ValidConvTestFixture::get_refere void execute_test(std::shared_ptr function, std::shared_ptr reference_function) { ngraph::pass::Manager manager; manager.register_pass(); - manager.register_pass(); + manager.register_pass(); manager.run_passes(function); const FunctionsComparator func_comparator = FunctionsComparator::with_default().enable(FunctionsComparator::ATTRIBUTES); const FunctionsComparator::Result result = func_comparator(function, reference_function); ASSERT_TRUE(result.valid); } -TEST_P(ConvertPadded2ValidConvTestFixture, CompareFunctions) { +TEST_P(ConvertPaddedToValidConvTestFixture, CompareFunctions) { execute_test(function, reference_function); } -INSTANTIATE_TEST_SUITE_P(ConvertPadded2ValidConvTestSuite, ConvertPadded2ValidConvTestFixture, +INSTANTIATE_TEST_SUITE_P(ConvertPaddedToValidConvTestSuite, ConvertPaddedToValidConvTestFixture, ::testing::Combine( // With / without Fake Quantize layers ::testing::Values(true, false), @@ -444,11 +444,11 @@ INSTANTIATE_TEST_SUITE_P(ConvertPadded2ValidConvTestSuite, ConvertPadded2ValidCo ngraph::CoordinateDiff{0, 2}, ngraph::CoordinateDiff{0, 3}, ngraph::Strides{1, 1}, ngraph::Shape{1, 1, 1, 4}, ngraph::Strides{1, 1}, ngraph::Shape{1, 2}, ngraph::op::PadType::EXPLICIT)))); -TEST_P(ConvertPadded2ValidConvTestInvalidFixture, CompareFunctions) { +TEST_P(ConvertPaddedToValidConvTestInvalidFixture, CompareFunctions) { execute_test(function, reference_function); } -INSTANTIATE_TEST_SUITE_P(ConvertPadded2ValidConvInvalidTestSuite, ConvertPadded2ValidConvTestInvalidFixture, +INSTANTIATE_TEST_SUITE_P(ConvertPaddedToValidConvInvalidTestSuite, ConvertPaddedToValidConvTestInvalidFixture, ::testing::Combine( // With / without Fake Quantize layers ::testing::Values(true, false), diff --git a/inference-engine/tests/unit/gna/ngraph/transformations/gna_decompose_2d_conv.cpp b/inference-engine/tests/unit/gna/ngraph/transformations/gna_decompose_2d_convolution.cpp similarity index 99% rename from inference-engine/tests/unit/gna/ngraph/transformations/gna_decompose_2d_conv.cpp rename to inference-engine/tests/unit/gna/ngraph/transformations/gna_decompose_2d_convolution.cpp index c7e9323638ef98..dd4b50ba7faa8c 100644 --- a/inference-engine/tests/unit/gna/ngraph/transformations/gna_decompose_2d_conv.cpp +++ b/inference-engine/tests/unit/gna/ngraph/transformations/gna_decompose_2d_convolution.cpp @@ -6,7 +6,7 @@ #include -#include "transformations/decompose_2d_conv.hpp" +#include "transformations/decompose_2d_convolution.hpp" #include "common_test_utils/ngraph_test_utils.hpp" #include #include @@ -426,7 +426,7 @@ void TransformInput(const GraphData& graph_data, const ConvParams& conv_params, */ // First we need to prepare flat (height = 1) slices of input data proper for flattened (height = 1) filters created later on; - // the input datat is overlapping (duplicated) + // the input data is overlapping (duplicated) ngraph::OutputVector dilated_input_planes; for (size_t filter_height = 0; filter_height < conv_params.filter_height; filter_height++) { size_t offset; @@ -704,10 +704,13 @@ void execute_test(modelType model, std::shared_ptr function, s case modelType::TranspConvBcastAddActTransp: case modelType::TranspConvBcastAddMaxPoolActTransp: manager.register_pass(); + break; case modelType::TranspConvTranspBcastAdd: manager.register_pass(); + break; case modelType::TranspConvTranspBcastAddAct: manager.register_pass(); + break; } manager.run_passes(function);