diff --git a/src/plugins/intel_cpu/src/transformations/cpu_opset/arm/pass/convert_group_conv.cpp b/src/plugins/intel_cpu/src/transformations/cpu_opset/arm/pass/convert_group_conv.cpp index d9270a324e4765..b4c8738b6e9f10 100644 --- a/src/plugins/intel_cpu/src/transformations/cpu_opset/arm/pass/convert_group_conv.cpp +++ b/src/plugins/intel_cpu/src/transformations/cpu_opset/arm/pass/convert_group_conv.cpp @@ -19,11 +19,20 @@ ov::intel_cpu::ConvertGroupConvolution::ConvertGroupConvolution() { if (!gconv) { return false; } - - auto data_shape = gconv->get_input_shape(Inputs::Data); + const unsigned int channel_axis = 1; + const auto& input0 = gconv->input_value(0); + const auto& output_shape = gconv->get_output_partial_shape(0); + const auto& data_shape = input0.get_partial_shape(); // Weights layout GOIYX - size_t groups = gconv->get_input_shape(Inputs::Weights)[0]; - if (groups == data_shape.at(1) && groups == gconv->get_output_shape(0)[1]) { // depthwise case + int64_t groups = gconv->get_input_shape(Inputs::Weights)[0]; + + if (data_shape[channel_axis].is_dynamic() || + output_shape[channel_axis].is_dynamic()) { + return false; + } + + if (groups == data_shape[channel_axis].get_length() && + groups == output_shape[channel_axis].get_length()) { // depthwise case return false; } @@ -33,12 +42,12 @@ ov::intel_cpu::ConvertGroupConvolution::ConvertGroupConvolution() { groups); replace_nodes.push_back(split_weights); - auto axis = ov::opset8::Constant::create(ngraph::element::i64, ngraph::Shape{}, {1}); + auto axis = ov::opset8::Constant::create(ngraph::element::i64, ngraph::Shape{}, {channel_axis}); auto split = std::make_shared(gconv->input_value(Inputs::Data), axis, groups); replace_nodes.push_back(split); ngraph::NodeVector concat_inputs; - for (size_t g = 0; g < groups; g++) { + for (int64_t g = 0; g < groups; g++) { auto out = split->output(g); auto filter = std::make_shared(split_weights->output(g), ov::opset8::Constant::create(ngraph::element::i64, ngraph::Shape{}, {0})); diff --git a/src/plugins/intel_cpu/src/transformations/cpu_opset/arm/pass/convert_group_conv.hpp b/src/plugins/intel_cpu/src/transformations/cpu_opset/arm/pass/convert_group_conv.hpp index c26b4651af22f3..fd4cb79a49bf6c 100644 --- a/src/plugins/intel_cpu/src/transformations/cpu_opset/arm/pass/convert_group_conv.hpp +++ b/src/plugins/intel_cpu/src/transformations/cpu_opset/arm/pass/convert_group_conv.hpp @@ -6,6 +6,56 @@ #include #include +/* + * Description: + * ConvertGroupConvolution detects GroupConvolution and replaces it + * with a set of Convolution operations. Number of Convolution operations + * equals to number of groups. + * + * Before: + * + * +--------------+ +---------------+ + * | Input tensor | | Kernel tensor | + * +-----------+--+ +-+-------------+ + * | | + * +----v---------v----+ + * | Group Convolution | + * +---------+---------+ + * | + * +------v------+ + * | Result | + * +-------------+ + * + * After: + * + * +--------------+ +--------------+ +---------------+ +--------------+ + * | Input tensor | | Constant (1) | | Kernel tensor | | Constant (0) | + * +-----------+--+ +-+------------+ +-----------+---+ +-+------------+ + * | | | | + * +-v---------v-+ +-v---------v-+ + * | Split | | Split | + * +-+-----------+--------+ +-+---------+-+ + * | | | | + * | | +-----------v--+ +-v------------+ + * | | | Squeeze | | Squeeze | + * | +----------+---+--------------+ +-+------------+ + * | | | | + * | | +---------------+ | + * | | | | + * +-----------v---------v------------+ +-----------v---------v------------+ + * | Convolution | | Convolution | + * +-----------------------------+----+ +---+------------------------------+ + * | | + * +-----v----------v------+ + * | Concat | + * +----------+------------+ + * | + * +----------v------------+ + * | Result | + * +-----------------------+ + * + */ + namespace ov { namespace intel_cpu { diff --git a/src/plugins/intel_cpu/src/transformations/cpu_opset/arm/pass/convert_group_conv1d.cpp b/src/plugins/intel_cpu/src/transformations/cpu_opset/arm/pass/convert_group_conv1d.cpp index 8b1632354d8aa7..bf89620604c326 100644 --- a/src/plugins/intel_cpu/src/transformations/cpu_opset/arm/pass/convert_group_conv1d.cpp +++ b/src/plugins/intel_cpu/src/transformations/cpu_opset/arm/pass/convert_group_conv1d.cpp @@ -19,7 +19,8 @@ ngraph::matcher_pass_callback ov::intel_cpu::ConvertConv1DBase::convert_conv1d_t return false; } - auto input_shape = conv->get_input_shape(0); + const auto& input0 = conv->input_value(0); + const auto& input_shape = input0.get_partial_shape(); // is Conv1D if (input_shape.size() != 3) { return false; @@ -27,16 +28,19 @@ ngraph::matcher_pass_callback ov::intel_cpu::ConvertConv1DBase::convert_conv1d_t auto input = conv->input_value(0); auto weights = conv->input_value(1); - auto input2d_shape = input_shape; - input2d_shape.push_back(1); - auto in2d_shape = std::make_shared(ngraph::element::i64, ngraph::Shape{4}, input2d_shape); auto weights2d_shape = weights.get_shape(); weights2d_shape.push_back(1); auto w_shape = std::make_shared(ngraph::element::i64, ngraph::Shape{weights2d_shape.size()}, weights2d_shape); - auto input2d = std::make_shared(input, in2d_shape, true); - auto weights2d = std::make_shared(weights, w_shape, true); + auto getUnsqueeze = [&](const ngraph::Output& node) { + auto rank = node.get_partial_shape().rank().get_length(); + return std::make_shared(node, + ov::opset1::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {rank})); + }; + + auto input2d = getUnsqueeze(input); + auto weights2d = getUnsqueeze(weights); auto conv2d = std::make_shared(input2d, weights2d, @@ -46,8 +50,9 @@ ngraph::matcher_pass_callback ov::intel_cpu::ConvertConv1DBase::convert_conv1d_t ngraph::Strides{conv->get_dilations()[0], 1}, conv->get_auto_pad()); - auto in_shape = std::make_shared(ngraph::element::i64, ngraph::Shape{3}, conv->get_output_shape(0)); - auto reshape = std::make_shared(conv2d, in_shape, true); + auto reshape = std::make_shared( + conv2d, + ov::opset1::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {input_shape.rank().get_length()})); reshape->set_friendly_name(conv->get_friendly_name()); ngraph::copy_runtime_info(conv, {input2d, weights2d, conv2d, reshape}); @@ -58,16 +63,16 @@ ngraph::matcher_pass_callback ov::intel_cpu::ConvertConv1DBase::convert_conv1d_t ov::intel_cpu::ConvertConv1D::ConvertConv1D() { auto m = std::make_shared( - ngraph::pattern::wrap_type({ngraph::pattern::any_input(ngraph::pattern::has_static_shape()), - ngraph::pattern::any_input(ngraph::pattern::has_static_shape())}, - ngraph::pattern::has_static_shape()), "ConvertConvolutionToArm"); + ngraph::pattern::wrap_type({ngraph::pattern::any_input(), + ngraph::pattern::any_input()}), + "ConvertConvolutionToArm"); register_matcher(m, convert_conv1d_to_conv2d()); } ov::intel_cpu::ConvertGroupConv1D::ConvertGroupConv1D() { auto m = std::make_shared( - ngraph::pattern::wrap_type({ngraph::pattern::any_input(ngraph::pattern::has_static_shape()), - ngraph::pattern::any_input(ngraph::pattern::has_static_shape())}, - ngraph::pattern::has_static_shape()), "ConvertGroupConvolutionToArm"); + ngraph::pattern::wrap_type({ngraph::pattern::any_input(), + ngraph::pattern::any_input()}), + "ConvertGroupConvolutionToArm"); register_matcher(m, convert_conv1d_to_conv2d()); } \ No newline at end of file diff --git a/src/plugins/intel_cpu/src/transformations/cpu_opset/arm/pass/convert_group_conv1d.hpp b/src/plugins/intel_cpu/src/transformations/cpu_opset/arm/pass/convert_group_conv1d.hpp index c5f197e64f68a7..7790f68df6b819 100644 --- a/src/plugins/intel_cpu/src/transformations/cpu_opset/arm/pass/convert_group_conv1d.hpp +++ b/src/plugins/intel_cpu/src/transformations/cpu_opset/arm/pass/convert_group_conv1d.hpp @@ -5,6 +5,51 @@ #include +/* + * Description: + * ConvertConv1DBase detects 1D Convolution / GroupConvolution and replaces + * it with the sequence Unsqueeze - 2D Convolution / GroupConvolution - Squeeze. + * Unsqueeze adds the additional dimension to Convolution inputs and Squeeze + * removes the additional dimention from the Convolution output. + * + * Before: + * + * +--------------+ +---------------+ + * | Input tensor | | Kernel tensor | + * +-----------+--+ +-+-------------+ + * | | + * +-v---------v-+ + * | Convolution | + * +------+------+ + * | + * +------v------+ + * | Result | + * +-------------+ + * + * After: + * + * +--------------+ +--------------+ +---------------+ +--------------+ + * | Input tensor | | Constant (1) | | Kernel tensor | | Constant (1) | + * +-----------+--+ +-+------------+ +-----------+---+ +-+------------+ + * | | | | + * +-v---------v-+ +-v---------v-+ + * | Unsqueeze | | Unsqueeze | + * +------+------+ +------+------+ + * | | + * +------v------------------------------------v------+ +--------------+ + * | Convolution | | Constant (1) | + * +---------------------------------------------+----+ +-+------------+ + * | | + * +-v---------v-+ + * | Squeeze | + * +------+------+ + * | + * +------v------+ + * | Result | + * +-------------+ + * + */ + namespace ov { namespace intel_cpu { class ConvertConv1DBase: public ngraph::pass::MatcherPass { diff --git a/src/plugins/intel_cpu/src/transformations/cpu_opset/arm/pass/convert_reduce_multi_axis.cpp b/src/plugins/intel_cpu/src/transformations/cpu_opset/arm/pass/convert_reduce_multi_axis.cpp index e5bdf01d472e6b..27104457ec097b 100644 --- a/src/plugins/intel_cpu/src/transformations/cpu_opset/arm/pass/convert_reduce_multi_axis.cpp +++ b/src/plugins/intel_cpu/src/transformations/cpu_opset/arm/pass/convert_reduce_multi_axis.cpp @@ -16,16 +16,26 @@ ngraph::matcher_pass_callback ov::intel_cpu::ConvertReduceMultiAxisBase::convert if (!std::dynamic_pointer_cast(reduce)) { return false; } - if (ngraph::shape_size(reduce->input_value(1).get_shape()) <= 1) { + + const auto& input0 = reduce->input_value(0); + const auto& input1 = reduce->input_value(1); + const auto& data_shape0 = input0.get_partial_shape(); + const auto& data_shape1 = input1.get_partial_shape(); + if (data_shape0.is_dynamic() || + data_shape1.is_dynamic()) { + return false; + } + + if (ngraph::shape_size(input1.get_shape()) <= 1) { return false; } - auto reduction_axes = std::dynamic_pointer_cast(reduce->input_value(1).get_node_shared_ptr()); + auto reduction_axes = std::dynamic_pointer_cast(input1.get_node_shared_ptr()); if (!reduction_axes) { return false; } auto axes = reduction_axes->cast_vector(); ngraph::NodeVector new_ops; - std::shared_ptr node = reduce->input_value(0).get_node_shared_ptr(); + std::shared_ptr node = input0.get_node_shared_ptr(); for (auto axis : axes) { auto reduction_axis = ov::opset8::Constant::create(ngraph::element::i64, ngraph::Shape{}, {axis}); node = std::make_shared(node, reduction_axis, true); @@ -46,32 +56,28 @@ ngraph::matcher_pass_callback ov::intel_cpu::ConvertReduceMultiAxisBase::convert ov::intel_cpu::ConvertReduceProd::ConvertReduceProd() { auto m = std::make_shared( - ngraph::pattern::wrap_type({ngraph::pattern::any_input(ngraph::pattern::has_static_shape()), - ngraph::pattern::wrap_type()}, - ngraph::pattern::has_static_shape()), "ConvertReduceProd"); + ngraph::pattern::wrap_type({ngraph::pattern::any_input(), + ngraph::pattern::wrap_type()}), "ConvertReduceProd"); register_matcher(m, convert_reduce()); } ov::intel_cpu::ConvertReduceMin::ConvertReduceMin() { auto m = std::make_shared( - ngraph::pattern::wrap_type({ngraph::pattern::any_input(ngraph::pattern::has_static_shape()), - ngraph::pattern::wrap_type()}, - ngraph::pattern::has_static_shape()), "ConvertReduceMin"); + ngraph::pattern::wrap_type({ngraph::pattern::any_input(), + ngraph::pattern::wrap_type()}), "ConvertReduceMin"); register_matcher(m, convert_reduce()); } ov::intel_cpu::ConvertReduceMax::ConvertReduceMax() { auto m = std::make_shared( - ngraph::pattern::wrap_type({ngraph::pattern::any_input(ngraph::pattern::has_static_shape()), - ngraph::pattern::wrap_type()}, - ngraph::pattern::has_static_shape()), "ConvertReduceMax"); + ngraph::pattern::wrap_type({ngraph::pattern::any_input(), + ngraph::pattern::wrap_type()}), "ConvertReduceMax"); register_matcher(m, convert_reduce()); } ov::intel_cpu::ConvertReduceSum::ConvertReduceSum() { auto m = std::make_shared( - ngraph::pattern::wrap_type({ngraph::pattern::any_input(ngraph::pattern::has_static_shape()), - ngraph::pattern::wrap_type()}, - ngraph::pattern::has_static_shape()), "ConvertReduceSum"); + ngraph::pattern::wrap_type({ngraph::pattern::any_input(), + ngraph::pattern::wrap_type()}), "ConvertReduceSum"); register_matcher(m, convert_reduce()); } diff --git a/src/plugins/intel_cpu/src/transformations/cpu_opset/arm/pass/convert_reduce_multi_axis.hpp b/src/plugins/intel_cpu/src/transformations/cpu_opset/arm/pass/convert_reduce_multi_axis.hpp index 1b2087e85945cf..486c85eb62c351 100644 --- a/src/plugins/intel_cpu/src/transformations/cpu_opset/arm/pass/convert_reduce_multi_axis.hpp +++ b/src/plugins/intel_cpu/src/transformations/cpu_opset/arm/pass/convert_reduce_multi_axis.hpp @@ -6,6 +6,50 @@ #include #include +/* + * Description: + * ConvertReduceMultiAxisBase detects Reduce operations that do not support + * multi-axis input by ACL: Min, Max, Sum, Prod. Multi-axis Reduce operation + * is replaced with a sequence of single-axe Reduce operations. + * + * Before: + * + * +--------------+ +-------------------+ + * | Data | | Axes tensor [A,B] | + * +-----------+--+ +-+-----------------+ + * | | + * +----v---------v----+ + * | Reduce | + * +---------+---------+ + * | + * +------v------+ + * | Result | + * +-------------+ + * + * After: + * + * +-------------+ +---------------+ + * | Data | | Axes scalar A | + * +---------+---+ +----+----------+ + * | | + * +-v------------v--+ +-----------------+ + * | Reduce | | Axes scalar B | + * +--------------+--+ +---+-------------+ + * | | + * +-v-----------v---+ + * | Reduce | + * +-------+---------+ + * | + * +-------v---------+ + * | Reshape | + * +-------+---------+ + * | + * +-------v---------+ + * | Result | + * +-----------------+ + * + */ + namespace ov { namespace intel_cpu { diff --git a/src/plugins/intel_cpu/tests/functional/CMakeLists.txt b/src/plugins/intel_cpu/tests/functional/CMakeLists.txt index cf785c5d691edb..205c8f930036e1 100644 --- a/src/plugins/intel_cpu/tests/functional/CMakeLists.txt +++ b/src/plugins/intel_cpu/tests/functional/CMakeLists.txt @@ -22,11 +22,11 @@ endif() if(X86_64) list(APPEND EXCLUDED_SOURCE_PATHS ${CMAKE_CURRENT_SOURCE_DIR}/single_layer_tests/instances/arm - ${CMAKE_CURRENT_SOURCE_DIR}/subgraph_tests/arm) + ${CMAKE_CURRENT_SOURCE_DIR}/subgraph_tests/src/arm) else() list(APPEND EXCLUDED_SOURCE_PATHS ${CMAKE_CURRENT_SOURCE_DIR}/single_layer_tests/instances/x64 - ${CMAKE_CURRENT_SOURCE_DIR}/subgraph_tests/x64) + ${CMAKE_CURRENT_SOURCE_DIR}/subgraph_tests/src/x64) # temporary disable all custom tests for ARM list(APPEND EXCLUDED_SOURCE_PATHS ${CMAKE_CURRENT_SOURCE_DIR}/single_layer_tests @@ -35,6 +35,7 @@ else() file(GLOB_RECURSE TMP_LIST_OF_TEST_CLASSES ${CMAKE_CURRENT_SOURCE_DIR}/single_layer_tests/classes/*.cpp) file(GLOB_RECURSE TMP_LIST_OF_COMMON_TEST_INSTANCES ${CMAKE_CURRENT_SOURCE_DIR}/single_layer_tests/instances/common/*.cpp) file(GLOB_RECURSE TMP_LIST_OF_ARM_TEST_INSTANCES ${CMAKE_CURRENT_SOURCE_DIR}/single_layer_tests/instances/arm/*.cpp) + file(GLOB_RECURSE TMP_LIST_OF_ARM_TEST_INSTANCES ${CMAKE_CURRENT_SOURCE_DIR}/subgraph_tests/arm/*.cpp) list(APPEND TMP_LIST_OF_EXPLICITLY_ENABLED_TESTS ${TMP_LIST_OF_TEST_CLASSES} ${TMP_LIST_OF_COMMON_TEST_INSTANCES} ${TMP_LIST_OF_ARM_TEST_INSTANCES}) set(TMP_EXPLICITLY_ENABLED_TESTS "${TMP_LIST_OF_EXPLICITLY_ENABLED_TESTS}") diff --git a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/arm/convert_group_conv.cpp b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/arm/convert_group_conv.cpp new file mode 100644 index 00000000000000..5475027df05275 --- /dev/null +++ b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/arm/convert_group_conv.cpp @@ -0,0 +1,98 @@ +// Copyright (C) 2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include +#include +#include +#include +#include +#include +#include +#include "common_test_utils/common_utils.hpp" +#include +#include "functional_test_utils/skip_tests_config.hpp" +#include "test_utils/cpu_test_utils.hpp" +#include "cpp_interfaces/interface/ie_internal_plugin_config.hpp" + +#include "test_utils/cpu_test_utils.hpp" +#include "test_utils/convolution_params.hpp" + +using namespace CPUTestUtils; +using namespace ov::test; +using namespace ngraph; +using namespace ngraph::helpers; + +namespace CPUSubgraphTestsDefinitions { + +typedef std::tuple groupConvLayerCPUTestParamsSet; + +class GroupConvToConvTransformationCPUTest: public testing::WithParamInterface, + virtual public SubgraphBaseTest, public CPUTestsBase { +public: + static std::string getTestCaseName(testing::TestParamInfo obj) { + InputShape inputShapes; + std::tie(inputShapes) = obj.param; + + std::ostringstream result; + result << "IS=" << inputShapes; + + return result.str(); + } + +protected: + static const size_t numOfGroups = 2; + void SetUp() override { + targetDevice = CommonTestUtils::DEVICE_CPU; + InputShape inputShapes; + std::tie(inputShapes) = this->GetParam(); + + init_input_shapes({inputShapes}); + + std::shared_ptr conv; + const std::vector kernelSize = {1}; + const std::vector strides = {1}; + const std::vector padBegin = {0}; + const std::vector padEnd = {0}; + const std::vector dilation = {1}; + const size_t numOutChannels = 30; + const op::PadType paddingType = op::PadType::EXPLICIT; + + auto inputParams = ngraph::builder::makeDynamicParams(ngraph::element::f32, inputDynamicShapes); + auto paramOuts = ngraph::helpers::convert2OutputVector(ngraph::helpers::castOps2Nodes(inputParams)); + conv = builder::makeGroupConvolution(paramOuts.front(), element::f32, kernelSize, strides, padBegin, padEnd, dilation, + paddingType, numOutChannels, numOfGroups); + + ResultVector results; + results.push_back(std::make_shared(conv)); + + function = std::make_shared(results, inputParams, "groupConvolution"); + } +}; + +TEST_P(GroupConvToConvTransformationCPUTest, CompareWithRefs) { + run(); + CheckNumberOfNodesWithType(compiledModel, "Split", 1); + CheckNumberOfNodesWithType(compiledModel, "Convolution", numOfGroups); + CheckNumberOfNodesWithType(compiledModel, "Concatenation", 1); +} + +namespace { +std::vector inShapes = { + {{}, {{ 2, 12, 7 }}}, + { + //dynamic shape + {-1, 12, {1, 20}}, + { //target static shapes + { 2, 12, 7 }, + { 1, 12, 5 } + } + } +}; +const auto groupConvTransformationParams = ::testing::Combine(::testing::ValuesIn(inShapes)); + +INSTANTIATE_TEST_SUITE_P(smoke_GroupConvToConvTransformationTest, GroupConvToConvTransformationCPUTest, + groupConvTransformationParams, GroupConvToConvTransformationCPUTest::getTestCaseName); + +} // namespace +} // namespace CPUSubgraphTestsDefinitions diff --git a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/arm/convert_group_conv1d.cpp b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/arm/convert_group_conv1d.cpp new file mode 100644 index 00000000000000..d8e529acef16bb --- /dev/null +++ b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/arm/convert_group_conv1d.cpp @@ -0,0 +1,134 @@ +// Copyright (C) 2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include +#include +#include +#include +#include +#include +#include +#include "common_test_utils/common_utils.hpp" +#include +#include "functional_test_utils/skip_tests_config.hpp" +#include "test_utils/cpu_test_utils.hpp" +#include "cpp_interfaces/interface/ie_internal_plugin_config.hpp" + +#include "test_utils/cpu_test_utils.hpp" +#include "test_utils/convolution_params.hpp" + +using namespace InferenceEngine; +using namespace CPUTestUtils; +using namespace ov::test; +using namespace ngraph; +using namespace ngraph::helpers; + +namespace CPUSubgraphTestsDefinitions { + +typedef std::tuple conv1dConvertCPUTestParamsSet; + +class Conv1dConvertTransformationCPUTest: public testing::WithParamInterface, + virtual public SubgraphBaseTest, public CPUTestsBase { +public: + static std::string getTestCaseName(testing::TestParamInfo obj) { + InputShape inputShapes; + nodeType convType; + std::tie(convType, inputShapes) = obj.param; + + std::ostringstream result; + result << nodeType2str(convType) << "_"; + result << "IS=" << inputShapes; + + return result.str(); + } + +protected: + void SetUp() override { + targetDevice = CommonTestUtils::DEVICE_CPU; + InputShape inputShapes; + nodeType convType; + std::tie(convType, inputShapes) = this->GetParam(); + + init_input_shapes({inputShapes}); + + std::shared_ptr conv; + const std::vector kernelSize = {1}; + const std::vector strides = {1}; + const std::vector padBegin = {0}; + const std::vector padEnd = {0}; + const std::vector dilation = {1}; + const size_t numOutChannels = 30; + const size_t numOfGroups = 2; + const op::PadType paddingType = op::PadType::EXPLICIT; + + auto inputParams = ngraph::builder::makeDynamicParams(ngraph::element::f32, inputDynamicShapes); + auto paramOuts = ngraph::helpers::convert2OutputVector(ngraph::helpers::castOps2Nodes(inputParams)); + switch (convType) { + case nodeType::convolution : { + conv = builder::makeConvolution(paramOuts.front(), element::f32, kernelSize, strides, padBegin, padEnd, dilation, + paddingType, numOutChannels); + break; + } + case nodeType::groupConvolution : { + conv = builder::makeGroupConvolution(paramOuts.front(), element::f32, kernelSize, strides, padBegin, padEnd, dilation, + paddingType, numOutChannels, numOfGroups); + break; + } + default: { + throw std::runtime_error("Conv1dConvertTransformationCPUTest doesn't support this type of operation"); + } + } + + ResultVector results; + results.push_back(std::make_shared(conv)); + + function = std::make_shared(results, inputParams, "convolution"); + } +}; + +TEST_P(Conv1dConvertTransformationCPUTest, CompareWithRefs) { + run(); + CheckNumberOfNodesWithType(compiledModel, "Reshape", 2); +} + +namespace { +const std::vector convType = { nodeType::convolution, nodeType::groupConvolution }; +std::vector inputShapes1d = { + {{}, {{ 2, 64, 7 }}}, + {{}, {{ 1, 32, 7 }}}, + { + //dynamic shape + { -1, 64, {1, 20} }, + { //target static shapes + { 2, 64, 7 }, + { 1, 64, 9 } + } + }, + { + //dynamic shape + { -1, 32, {1, 20} }, + { //target static shapes + { 2, 32, 7 }, + { 1, 32, 9 } + } + }, + { + //dynamic shape + { {1, 20}, 64, -1 }, + { //target static shapes + { 2, 64, 7 }, + { 1, 64, 5 } + } + } +}; + +const auto groupConvTransformationParams = ::testing::Combine(::testing::ValuesIn(convType), + ::testing::ValuesIn(inputShapes1d)); + +INSTANTIATE_TEST_SUITE_P(smoke_GroupConvToConvTransformationTest, Conv1dConvertTransformationCPUTest, + groupConvTransformationParams, Conv1dConvertTransformationCPUTest::getTestCaseName); + +} // namespace +} // namespace CPUSubgraphTestsDefinitions diff --git a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/arm/convert_reduce_multi_axis.cpp b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/arm/convert_reduce_multi_axis.cpp new file mode 100644 index 00000000000000..00179599cd8cad --- /dev/null +++ b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/arm/convert_reduce_multi_axis.cpp @@ -0,0 +1,116 @@ +// Copyright (C) 2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include +#include +#include +#include +#include +#include +#include +#include "common_test_utils/common_utils.hpp" +#include +#include "functional_test_utils/skip_tests_config.hpp" +#include "test_utils/cpu_test_utils.hpp" +#include "cpp_interfaces/interface/ie_internal_plugin_config.hpp" + +#include "test_utils/cpu_test_utils.hpp" +#include "test_utils/convolution_params.hpp" + +using namespace InferenceEngine; +using namespace CPUTestUtils; +using namespace ov::test; +using namespace ngraph; +using namespace ngraph::helpers; + +namespace CPUSubgraphTestsDefinitions { + +typedef std::tuple< + std::vector, // Axis to reduce order + ngraph::helpers::ReductionType, // Reduce operation type + std::vector // Input shapes +> reduceConvertCPUTestParamsSet; + +class reduceTransformationCPUTest: public testing::WithParamInterface, + virtual public SubgraphBaseTest, public CPUTestsBase { +public: + static std::string getTestCaseName(testing::TestParamInfo obj) { + std::vector inputShapes; + std::vector axes; + ReductionType reductionType; + std::tie(axes, reductionType, inputShapes) = obj.param; + + std::ostringstream result; + result << "type=" << reductionType << "_"; + result << "IS=("; + for (const auto& shape : inputShapes) { + result << CommonTestUtils::partialShape2str({shape.first}) << "_"; + } + result << ")_axes=" << CommonTestUtils::vec2str(axes) << "_"; + return result.str(); + } + +protected: + int numberOfExpectedReduce; + void SetUp() override { + targetDevice = CommonTestUtils::DEVICE_CPU; + std::vector axes; + bool keepDims = true; + std::vector inputShapes; + std::tie(axes, reductionType, inputShapes) = this->GetParam(); + numberOfExpectedReduce = axes.size(); + + init_input_shapes(inputShapes); + + auto params = ngraph::builder::makeDynamicParams(ngraph::element::f32, inputDynamicShapes); + auto paramOuts = ngraph::helpers::convert2OutputVector(ngraph::helpers::castOps2Nodes(params)); + std::vector shapeAxes; + shapeAxes.push_back(axes.size()); + auto reductionAxesNode = std::dynamic_pointer_cast( + std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape(shapeAxes), axes)); + + const auto reduce = ngraph::builder::makeReduce(paramOuts[0], reductionAxesNode, keepDims, reductionType); + function = makeNgraphFunction(ElementType::f32, params, reduce, "Reduce"); + } +private: + ngraph::helpers::ReductionType reductionType; +}; + +TEST_P(reduceTransformationCPUTest, CompareWithRefs) { + run(); + CheckNumberOfNodesWithType(compiledModel, "Reduce", numberOfExpectedReduce); +} + +namespace { +std::vector> inputShapes = { + {{{}, {{2, 19, 2, 9}}}} +}; +const std::vector reductionTypes = { + ReductionType::Min, + ReductionType::Max, + ReductionType::Sum, + ReductionType::Prod +}; +const std::vector> axes = { + {0, 1}, + {0, 2}, + {0, 3}, + {1, 2}, + {1, 3}, + {2, 3}, + {0, 1, 3}, + {0, 2, 3}, + {1, 2, 3}, + {0, 1, 2, 3} +}; + +const auto reduceTransformationParams = ::testing::Combine(::testing::ValuesIn(axes), + ::testing::ValuesIn(reductionTypes), + ::testing::ValuesIn(inputShapes)); + +INSTANTIATE_TEST_SUITE_P(smoke_GroupConvToConvTransformationTest, reduceTransformationCPUTest, + reduceTransformationParams, reduceTransformationCPUTest::getTestCaseName); + +} // namespace +} // namespace CPUSubgraphTestsDefinitions diff --git a/src/plugins/intel_cpu/tests/functional/target_per_test.cmake b/src/plugins/intel_cpu/tests/functional/target_per_test.cmake index 80aa16c6fb4107..ca4995b9ede239 100644 --- a/src/plugins/intel_cpu/tests/functional/target_per_test.cmake +++ b/src/plugins/intel_cpu/tests/functional/target_per_test.cmake @@ -88,7 +88,7 @@ function(create_target_per_test_for_directory TEST_DIR TARGET_PREFIX) endfunction() if(ENABLE_CPU_SPECIFIC_TARGET_PER_TEST) - create_target_per_test_for_directory(${CMAKE_CURRENT_SOURCE_DIR}/subgraph_tests/src ov_cpu_func_subgraph) + create_target_per_test_for_directory(${CMAKE_CURRENT_SOURCE_DIR}/subgraph_tests/src/arm ov_cpu_func_subgraph) create_target_per_test_for_directory(${CMAKE_CURRENT_SOURCE_DIR}/single_layer_tests ov_cpu_func_slt) endif() diff --git a/src/plugins/intel_cpu/tests/unit/CMakeLists.txt b/src/plugins/intel_cpu/tests/unit/CMakeLists.txt index eb1a8fddd51354..2a06bf4235a19c 100644 --- a/src/plugins/intel_cpu/tests/unit/CMakeLists.txt +++ b/src/plugins/intel_cpu/tests/unit/CMakeLists.txt @@ -12,14 +12,16 @@ if(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC") ie_add_compiler_flags(/wd5051) endif() -if (NOT OPENVINO_ARCH_X86_64) - set(EXCLUDED_SOURCE_PATHS_FOR_NON_X64 ${CMAKE_CURRENT_SOURCE_DIR}/jit_kernel_test.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/registers_pool.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/ngraph_transformations/convert_to_interaction.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/ngraph_transformations/snipptes_mark_skipped.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/ngraph_transformations/mul_add_to_fma.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/snippets_transformations - ${CMAKE_CURRENT_SOURCE_DIR}/nodes/eltwise_node_test.cpp) +if (X86_64) + set(EXCLUDED_SOURCE_PATHS_FOR_UNIT_TEST + ${CMAKE_CURRENT_SOURCE_DIR}/ngraph_transformations/arm) +else() + set(EXCLUDED_SOURCE_PATHS_FOR_UNIT_TEST + ${CMAKE_CURRENT_SOURCE_DIR}/jit_kernel_test.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/registers_pool.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/ngraph_transformations/x64 + ${CMAKE_CURRENT_SOURCE_DIR}/snippets_transformations + ${CMAKE_CURRENT_SOURCE_DIR}/nodes/eltwise_node_test.cpp) endif() addIeTargetTest( @@ -35,7 +37,7 @@ addIeTargetTest( PRIVATE $/include EXCLUDED_SOURCE_PATHS - ${EXCLUDED_SOURCE_PATHS_FOR_NON_X64} + ${EXCLUDED_SOURCE_PATHS_FOR_UNIT_TEST} OBJECT_FILES ${OBJ_LIB} LINK_LIBRARIES diff --git a/src/plugins/intel_cpu/tests/unit/ngraph_transformations/arm/convert_group_conv.cpp b/src/plugins/intel_cpu/tests/unit/ngraph_transformations/arm/convert_group_conv.cpp new file mode 100644 index 00000000000000..63a4335fadead2 --- /dev/null +++ b/src/plugins/intel_cpu/tests/unit/ngraph_transformations/arm/convert_group_conv.cpp @@ -0,0 +1,109 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "common_test_utils/ngraph_test_utils.hpp" + +using namespace testing; +using namespace ov::intel_cpu; + +template +static std::shared_ptr createInitGraph(std::shared_ptr param, ngraph::Shape weights_shape) { + auto weights = ngraph::opset1::Constant::create(ngraph::element::f32, weights_shape, { 1 }); + auto conv = std::make_shared(param, + weights, + ngraph::Strides{1}, + ngraph::CoordinateDiff{0}, + ngraph::CoordinateDiff{0}, + ngraph::Strides{1}); + + return std::make_shared(ngraph::NodeVector{ conv }, ngraph::ParameterVector{ param }); +} + +TEST(TransformationTests, CheckConvertGroupConvIsApplied) { + std::shared_ptr function(nullptr), function_ref(nullptr); + { + auto param = std::make_shared(ngraph::element::f32, ngraph::Shape{1, 6, 224}); + function = createInitGraph(param, ngraph::Shape{2, 1, 3, 5}); + ov::pass::Manager manager; + manager.register_pass(); + manager.run_passes(function); + } + { + const unsigned int groups = 2; + const unsigned int channel_axis = 1; + auto param = std::make_shared(ngraph::element::f32, ngraph::Shape{1, 6, 224}); + auto weights = ngraph::opset1::Constant::create(ngraph::element::f32, ngraph::Shape{groups, 1, 3, 5}, { 1 }); + auto split_weights = std::make_shared(weights, + ngraph::opset1::Constant::create(ngraph::element::i64, ngraph::Shape{}, {0}), + groups); + auto axis = ngraph::opset1::Constant::create(ngraph::element::i64, ngraph::Shape{}, {channel_axis}); + auto split = std::make_shared(param, axis, groups); + ngraph::NodeVector concat_inputs; + for (size_t g = 0; g < groups; g++) { + auto out = split->output(g); + auto filter = std::make_shared(split_weights->output(g), + ngraph::opset1::Constant::create(ngraph::element::i64, ngraph::Shape{}, {0})); + auto conv = std::make_shared(out, + filter, + ngraph::Strides{1}, + ngraph::CoordinateDiff{0}, + ngraph::CoordinateDiff{0}, + ngraph::Strides{1}); + concat_inputs.push_back(conv); + } + auto concat = std::make_shared(concat_inputs, 1); + function_ref = std::make_shared(ngraph::NodeVector{ concat }, ngraph::ParameterVector{ param }); + } + auto res = compare_functions(function, function_ref); + ASSERT_TRUE(res.first) << res.second; +} + +TEST(TransformationTests, CheckConvertGroupConvIsNotAppliedForDepthwiseCase) { + std::shared_ptr function(nullptr), function_ref(nullptr); + { + auto param = std::make_shared(ngraph::element::f32, ngraph::Shape{1, 2, 224}); + function = createInitGraph(param, ngraph::Shape{2, 1, 1, 5}); + ov::pass::Manager manager; + manager.register_pass(); + manager.run_passes(function); + } + { + auto param = std::make_shared(ngraph::element::f32, ngraph::Shape{1, 2, 224}); + function_ref = createInitGraph(param, ngraph::Shape{2, 1, 1, 5}); + } + auto res = compare_functions(function, function_ref); + ASSERT_TRUE(res.first) << res.second; +} + +TEST(TransformationTests, CheckConvertGroupConvIsNotAppliedForDynamicShapes) { + std::shared_ptr function(nullptr), function_ref(nullptr); + { + auto param = std::make_shared(ngraph::element::f32, ngraph::PartialShape{1, -1, 224}); + function = createInitGraph(param, ngraph::Shape{2, 1, 1, 5}); + ov::pass::Manager manager; + manager.register_pass(); + manager.run_passes(function); + } + { + auto param = std::make_shared(ngraph::element::f32, ngraph::PartialShape{1, -1, 224}); + function_ref = createInitGraph(param, ngraph::Shape{2, 1, 1, 5}); + } + auto res = compare_functions(function, function_ref); + ASSERT_TRUE(res.first) << res.second; +} \ No newline at end of file diff --git a/src/plugins/intel_cpu/tests/unit/ngraph_transformations/arm/convert_group_conv1d.cpp b/src/plugins/intel_cpu/tests/unit/ngraph_transformations/arm/convert_group_conv1d.cpp new file mode 100644 index 00000000000000..f013d3c1a67b97 --- /dev/null +++ b/src/plugins/intel_cpu/tests/unit/ngraph_transformations/arm/convert_group_conv1d.cpp @@ -0,0 +1,123 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "common_test_utils/ngraph_test_utils.hpp" + +using namespace testing; +using namespace ov::intel_cpu; + +template +static std::shared_ptr createInitGraph(ngraph::Shape param_shape, ngraph::Shape weights_shape) { + auto type = ngraph::element::f32; + auto param = std::make_shared(type, param_shape); + auto weights = ngraph::opset1::Constant::create(type, weights_shape, { 1 }); + bool is1Dinput = param_shape.size() == 3; + auto conv = std::make_shared(param, + weights, + is1Dinput ? ngraph::Strides{1} : ngraph::Strides{1, 1}, + is1Dinput ? ngraph::CoordinateDiff{0} : ngraph::CoordinateDiff{0, 0}, + is1Dinput ? ngraph::CoordinateDiff{0} : ngraph::CoordinateDiff{0, 0}, + is1Dinput ? ngraph::Strides{1} : ngraph::Strides{1, 1}); + + return std::make_shared(ngraph::NodeVector{ conv }, ngraph::ParameterVector{ param }); +} + +template +static std::shared_ptr createTransformedGraph(ngraph::Shape param_shape, ngraph::Shape weights_shape) { + auto getUnsqueeze = [&](const ngraph::Output& node) { + auto rank = node.get_partial_shape().rank().get_length(); + return std::make_shared(node, + ngraph::opset1::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {rank})); + }; + auto type = ngraph::element::f32; + auto param = std::make_shared(type, param_shape); + auto weights = ngraph::opset1::Constant::create(type, weights_shape, { 1 }); + auto input2d = getUnsqueeze(param); + auto weights2d = getUnsqueeze(weights); + auto conv2d = std::make_shared(input2d, + weights2d, + ngraph::Strides{1, 1}, + ngraph::CoordinateDiff{0, 0}, + ngraph::CoordinateDiff{0, 0}, + ngraph::Strides{1, 1}); + + auto reshape = std::make_shared(conv2d, + ngraph::opset1::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {3})); + return std::make_shared(ngraph::NodeVector{ reshape }, ngraph::ParameterVector{ param }); +} + +TEST(TransformationTests, CheckConvertConv1DIsAppliedFor1DShapes) { + std::shared_ptr function(nullptr), function_ref(nullptr); + { + function = createInitGraph(ngraph::Shape{2, 64, 7}, ngraph::Shape{ 30, 64, 1 }); + ov::pass::Manager manager; + manager.register_pass(); + manager.run_passes(function); + } + { + function_ref = createTransformedGraph(ngraph::Shape{2, 64, 7}, ngraph::Shape{30, 64, 1}); + } + auto res = compare_functions(function, function_ref); + ASSERT_TRUE(res.first) << res.second; +} + +TEST(TransformationTests, CheckConvertConv1DIsNotAppliedFor2DShapes) { + std::shared_ptr function(nullptr), function_ref(nullptr); + { + function = createInitGraph(ngraph::Shape{2, 64, 7, 1}, ngraph::Shape{30, 64, 1, 1}); + ov::pass::Manager manager; + manager.register_pass(); + manager.run_passes(function); + } + { + function_ref = createInitGraph(ngraph::Shape{2, 64, 7, 1}, ngraph::Shape{30, 64, 1, 1}); + } + auto res = compare_functions(function, function_ref); + ASSERT_TRUE(res.first) << res.second; +} + +TEST(TransformationTests, CheckConvertGroupConv1DIsAppliedFor1dShapes) { + std::shared_ptr function(nullptr), function_ref(nullptr); + { + function = createInitGraph(ngraph::Shape{1, 12, 64}, ngraph::Shape{4, 1, 3, 5}); + ov::pass::Manager manager; + manager.register_pass(); + manager.run_passes(function); + } + { + function_ref = createTransformedGraph(ngraph::Shape{1, 12, 64}, ngraph::Shape{4, 1, 3, 5}); + } + auto res = compare_functions(function, function_ref); + ASSERT_TRUE(res.first) << res.second; +} + +TEST(TransformationTests, CheckConvertGroupConv1DIsNotAppliedFor2DShapes) { + std::shared_ptr function(nullptr), function_ref(nullptr); + { + function = createInitGraph(ngraph::Shape{1, 12, 64, 1}, ngraph::Shape{4, 1, 3, 5, 1}); + ov::pass::Manager manager; + manager.register_pass(); + manager.run_passes(function); + } + { + function_ref = createInitGraph(ngraph::Shape{1, 12, 64, 1}, ngraph::Shape{4, 1, 3, 5, 1}); + } + auto res = compare_functions(function, function_ref); + ASSERT_TRUE(res.first) << res.second; +} diff --git a/src/plugins/intel_cpu/tests/unit/ngraph_transformations/arm/convert_reduce_multi_axis.cpp b/src/plugins/intel_cpu/tests/unit/ngraph_transformations/arm/convert_reduce_multi_axis.cpp new file mode 100644 index 00000000000000..c887a4172ccefc --- /dev/null +++ b/src/plugins/intel_cpu/tests/unit/ngraph_transformations/arm/convert_reduce_multi_axis.cpp @@ -0,0 +1,109 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "common_test_utils/ngraph_test_utils.hpp" + +using namespace testing; +using namespace ov::intel_cpu; + +template +class ConvertReduceMultiAxisTest : public testing::Test {}; + +template +static std::shared_ptr createInitGraph(std::shared_ptr param) { + auto axes = ngraph::opset1::Constant::create(ngraph::element::i64, ngraph::Shape{2}, {0, 1}); + auto reduce = std::make_shared(param, axes, true); + return std::make_shared(ngraph::NodeVector{ reduce }, ngraph::ParameterVector{ param }); +} + +template +static std::shared_ptr createRefGraph(ov::Shape param_shape) { + auto param = std::make_shared(ngraph::element::f32, param_shape); + std::vector axes = {0, 1}; + ngraph::NodeVector new_ops; + std::shared_ptr node = param; + for (auto axis : axes) { + auto reduction_axis = ngraph::opset1::Constant::create(ngraph::element::i64, ngraph::Shape{}, {axis}); + node = std::make_shared(node, reduction_axis, true); + new_ops.push_back(node); + } + auto reshape_shape = ngraph::opset1::Constant::create(ngraph::element::i64, ngraph::Shape{param_shape.size()}, {1, 1, 2, 9}); + auto reshape = std::make_shared(node, reshape_shape, true); + + return std::make_shared(ngraph::NodeVector{ reshape }, ngraph::ParameterVector{ param }); +} + +template +static bool registerAndRunReducePass(std::shared_ptr function) { + ov::pass::Manager manager; + if (std::is_same::value) { + manager.register_pass(); + } else if (std::is_same::value) { + manager.register_pass(); + } else if (std::is_same::value) { + manager.register_pass(); + } else if (std::is_same::value) { + manager.register_pass(); + } else { + return false; + } + manager.run_passes(function); + return true; +} + +static ngraph::Shape static_param_shape = ngraph::Shape{2, 19, 2, 9}; +static ngraph::PartialShape dynamic_param_shape = ngraph::PartialShape{2, -1, 2, 9}; + +TYPED_TEST_SUITE_P(ConvertReduceMultiAxisTest); + +TYPED_TEST_P(ConvertReduceMultiAxisTest, CheckConvertReduceTransformationIsApplied) { + auto param = std::make_shared(ngraph::element::f32, static_param_shape); + auto function = createInitGraph(param); + auto function_ref = createRefGraph(static_param_shape); + + if (!registerAndRunReducePass(function)) { + FAIL() << "Reduce pass is not registered."; + } + + auto res = compare_functions(function, function_ref); + ASSERT_TRUE(res.first) << res.second; +} + +TYPED_TEST_P(ConvertReduceMultiAxisTest, CheckConvertReduceTransformationIsNotAppliedForDynaimcShapes) { + auto param = std::make_shared(ngraph::element::f32, dynamic_param_shape); + auto function = createInitGraph(param); + auto function_ref = createInitGraph(param); + + if (!registerAndRunReducePass(function)) { + FAIL() << "Reduce pass is not registered."; + } + + auto res = compare_functions(function, function_ref); + ASSERT_TRUE(res.first) << res.second; +} + +REGISTER_TYPED_TEST_SUITE_P(ConvertReduceMultiAxisTest, + CheckConvertReduceTransformationIsApplied, + CheckConvertReduceTransformationIsNotAppliedForDynaimcShapes); + +using reduceTypes = ::testing::Types; +INSTANTIATE_TYPED_TEST_SUITE_P(ConvertReduce, ConvertReduceMultiAxisTest, reduceTypes); diff --git a/src/plugins/intel_cpu/tests/unit/ngraph_transformations/convert_to_interaction.cpp b/src/plugins/intel_cpu/tests/unit/ngraph_transformations/x64/convert_to_interaction.cpp similarity index 100% rename from src/plugins/intel_cpu/tests/unit/ngraph_transformations/convert_to_interaction.cpp rename to src/plugins/intel_cpu/tests/unit/ngraph_transformations/x64/convert_to_interaction.cpp diff --git a/src/plugins/intel_cpu/tests/unit/ngraph_transformations/snipptes_mark_skipped.cpp b/src/plugins/intel_cpu/tests/unit/ngraph_transformations/x64/snipptes_mark_skipped.cpp similarity index 90% rename from src/plugins/intel_cpu/tests/unit/ngraph_transformations/snipptes_mark_skipped.cpp rename to src/plugins/intel_cpu/tests/unit/ngraph_transformations/x64/snipptes_mark_skipped.cpp index 5d038fd9b6de7e..12adfaf76d1bf8 100644 --- a/src/plugins/intel_cpu/tests/unit/ngraph_transformations/snipptes_mark_skipped.cpp +++ b/src/plugins/intel_cpu/tests/unit/ngraph_transformations/x64/snipptes_mark_skipped.cpp @@ -7,6 +7,7 @@ #include #include #include "snippets/pass/tokenization.hpp" +#include "snippets/pass/collapse_subgraph.hpp" namespace ov { namespace test { @@ -17,11 +18,11 @@ class SnippetsMarkSkippedTests : public TransformationTestsF { void run() { ASSERT_TRUE(function); manager.register_pass(); - manager.register_pass(); - manager.register_pass(); + manager.register_pass(); + manager.register_pass(); // // todo: This is a temporary work-around. remove when MatMul tokenization is supported through general pipeline - manager.get_pass_config()->set_callback( + manager.get_pass_config()->set_callback( [](const std::shared_ptr& n) -> bool { return ov::is_type(n); }); diff --git a/src/plugins/intel_cpu/tests/unit/snippets_transformations/fake_quantize_tokenization_test.cpp b/src/plugins/intel_cpu/tests/unit/snippets_transformations/fake_quantize_tokenization_test.cpp index 5705486b4f4b05..d61ea9cf0d1832 100644 --- a/src/plugins/intel_cpu/tests/unit/snippets_transformations/fake_quantize_tokenization_test.cpp +++ b/src/plugins/intel_cpu/tests/unit/snippets_transformations/fake_quantize_tokenization_test.cpp @@ -7,6 +7,7 @@ #include "common_test_utils/ngraph_test_utils.hpp" #include "snippets/pass/fq_decomposition.hpp" #include "snippets/pass/tokenization.hpp" +#include "snippets/pass/collapse_subgraph.hpp" #include "fake_quantize_function.hpp" #include "snippets/op/subgraph.hpp" #include "transformations/snippets/x64/pass/snippets_mark_skipped.hpp" @@ -20,9 +21,9 @@ class FakeQuantizeTokenizationTest : public TransformationTestsF { public: void register_passes() { manager.register_pass(); - manager.register_pass(); - manager.register_pass(); - manager.get_pass_config()->set_callback([](const std::shared_ptr& n) -> bool { + manager.register_pass(); + manager.register_pass(); + manager.get_pass_config()->set_callback([](const std::shared_ptr& n) -> bool { return false; }); } @@ -31,10 +32,10 @@ class FakeQuantizeTokenizationTest : public TransformationTestsF { TransformationTestsF::TearDown(); auto subgraph = FunctionHelper::getSubgraph(function); - auto body = subgraph == nullptr ? nullptr : std::dynamic_pointer_cast(subgraph)->body_ptr(); + auto body = subgraph == nullptr ? nullptr : std::dynamic_pointer_cast(subgraph)->body_ptr(); auto subgraph_ref = FunctionHelper::getSubgraph(function_ref); - auto body_ref = subgraph_ref == nullptr ? nullptr : std::dynamic_pointer_cast(subgraph_ref)->body_ptr(); + auto body_ref = subgraph_ref == nullptr ? nullptr : std::dynamic_pointer_cast(subgraph_ref)->body_ptr(); if ((body != nullptr) && (body_ref != nullptr)) { auto res = comparator.compare(body, body_ref); diff --git a/src/plugins/intel_cpu/tests/unit/snippets_transformations/mul_add_to_fma.cpp b/src/plugins/intel_cpu/tests/unit/snippets_transformations/mul_add_to_fma.cpp index 5184d9211f8105..9751e1785a8876 100644 --- a/src/plugins/intel_cpu/tests/unit/snippets_transformations/mul_add_to_fma.cpp +++ b/src/plugins/intel_cpu/tests/unit/snippets_transformations/mul_add_to_fma.cpp @@ -6,7 +6,7 @@ #include #include #include - +#include "snippets/op/scalar.hpp" #include "lowering_utils.hpp" namespace ov { @@ -60,7 +60,7 @@ class EltwiseWithMulAddFunction : public SnippetsFunctionBase { ParameterVector parameters{data0, data1}; std::shared_ptr data2; if (scalar_input) { - data2 = std::make_shared(precision, Shape{}, 2.f); + data2 = std::make_shared(precision, Shape{}, 2.f); } else { auto parameter = std::make_shared(precision, input_shapes[2]); parameters.push_back(parameter); @@ -132,12 +132,12 @@ class MulAddToFMATests : public LoweringTests, public testing::WithParamInterfac } std::shared_ptr snippets_function; - std::shared_ptr generator; + std::shared_ptr generator; ov::pass::Manager cpu_manager; }; TEST_P(MulAddToFMATests, MulAddToFMATests) { - auto subgraph = getLoweredSubgraph(snippets_function->getOriginal(), master_shape, {}, {}, cpu_manager, generator); + auto subgraph = getLoweredSubgraph(snippets_function->getOriginal(), master_shape, {}, {}, cpu_manager, {}, generator); model = subgraph->body_ptr(); model_ref = snippets_function->getLowered(); }