From 86a36411027aa7aba97784c261ffcb64dd2ff92f Mon Sep 17 00:00:00 2001 From: Mikhail Ryzhov Date: Mon, 13 Feb 2023 18:14:21 +0100 Subject: [PATCH] clang formatting --- src/plugins/intel_gna/src/gna_plugin.cpp | 32 ++--- src/plugins/intel_gna/src/gna_plugin.hpp | 14 +-- src/plugins/intel_gna/src/preprocessing.cpp | 18 ++- src/plugins/intel_gna/src/preprocessing.hpp | 6 +- .../intel_gna/src/serial/gna_model_serial.cpp | 25 ++-- .../intel_gna/src/serial/gna_model_serial.hpp | 2 +- .../serial/headers/2dot9/gna_model_header.hpp | 5 +- .../src/transformations/gather_remove.cpp | 60 +++++----- .../src/transformations/gather_remove.hpp | 16 +-- .../functional/pass_tests/gather_remove.cpp | 112 ++++++++---------- 10 files changed, 139 insertions(+), 151 deletions(-) diff --git a/src/plugins/intel_gna/src/gna_plugin.cpp b/src/plugins/intel_gna/src/gna_plugin.cpp index bc3ef0662c78cf..04d03e5b90bb99 100644 --- a/src/plugins/intel_gna/src/gna_plugin.cpp +++ b/src/plugins/intel_gna/src/gna_plugin.cpp @@ -39,7 +39,6 @@ #include "gna_fused_iterator.hpp" #include "gna_graph_patterns.hpp" #include "gna_itt.hpp" -#include "serial/gna_model_serial.hpp" #include "gna_plugin_config.hpp" #include "gna_tensor_tools.hpp" #include "gna_transformations_pipeline.hpp" @@ -341,7 +340,9 @@ void GNAPlugin::ImportFrames(void* ptr_dst, } } -void GNAPlugin::pre_post_process(InferenceEngine::Blob::Ptr input_blob, InferenceEngine::Blob::Ptr output_blob, std::shared_ptr model) { +void GNAPlugin::pre_post_process(InferenceEngine::Blob::Ptr input_blob, + InferenceEngine::Blob::Ptr output_blob, + std::shared_ptr model) { const ov::element::Type input_prc = details::convertPrecision(input_blob->getTensorDesc().getPrecision()); const ov::element::Type output_prc = details::convertPrecision(output_blob->getTensorDesc().getPrecision()); const ov::Shape& input_shape = input_blob->getTensorDesc().getDims(); @@ -985,12 +986,12 @@ void GNAPlugin::LoadNetwork(const CNNNetwork& _network) { } } - //TODO: Need to remove this conversation when ngraph NCHW<->NHWC transformation is enabled + // TODO: Need to remove this conversation when ngraph NCHW<->NHWC transformation is enabled if (!transpose_inputs_info.empty()) { - convert_transpose_map_to_model(transpose_inputs_info, inputs_ptr_->Get()); + ConvertTransposeMapToModel(transpose_inputs_info, inputs_ptr_->Get()); } if (!transpose_outputs_info.empty()) { - convert_transpose_map_to_model(transpose_outputs_info, outputs_.Get()); + ConvertTransposeMapToModel(transpose_outputs_info, outputs_.Get()); } DumpXNNToFile(); @@ -1115,14 +1116,15 @@ uint32_t GNAPlugin::QueueInference(const InferenceEngine::BlobMap& inputs, Infer int inputNum = 0; for (auto& input : inputs) { std::string input_name = input.first; - Blob::Ptr gna_input_blob = input.second; // this copy is needed to split user input and plugin's + Blob::Ptr gna_input_blob = input.second; // this copy is needed to split user input and plugin's InferenceEngine::Layout input_layout = gna_input_blob->getTensorDesc().getLayout(); if (input_layout != InferenceEngine::Layout::C && input_layout != InferenceEngine::Layout::NC && input_layout != InferenceEngine::Layout::CN && input_layout != InferenceEngine::Layout::CHW && input_layout != InferenceEngine::Layout::NCHW) { THROW_GNA_EXCEPTION << "Expected input blob to have Layout::C, Layout::NC, Layout::CN, Layout::NCHW or " - "Layout::CHW. But was: " << input_layout; + "Layout::CHW. But was: " + << input_layout; } if (input_layout == InferenceEngine::Layout::NCHW || input_layout == InferenceEngine::Layout::CHW) { @@ -1274,7 +1276,8 @@ RequestStatus GNAPlugin::WaitFor(uint32_t request_idx, int64_t millisTimeout) { output_layout != InferenceEngine::Layout::CN && output_layout != InferenceEngine::Layout::NCHW && output_layout != InferenceEngine::Layout::CHW && output_layout != InferenceEngine::Layout::SCALAR) { THROW_GNA_EXCEPTION << "Expected output blob to have Layout::C, Layout::NC, Layout::CN, Layout::NCHW or " - "Layout::CHW. But was " << output_layout; + "Layout::CHW. But was " + << output_layout; } auto dims = output_blob->getTensorDesc().getDims(); @@ -1282,7 +1285,8 @@ RequestStatus GNAPlugin::WaitFor(uint32_t request_idx, int64_t millisTimeout) { auto isScalar = output_layout == InferenceEngine::Layout::SCALAR; auto is3D = output_layout == InferenceEngine::Layout::CHW; auto batchSize = (is1D || isScalar || is3D) ? 1 : dims[0]; - auto elementsPerBatch = isScalar ? 1 : (is1D ? dims.front() : details::product(++std::begin(dims), std::end(dims)));\ + auto elementsPerBatch = + isScalar ? 1 : (is1D ? dims.front() : details::product(++std::begin(dims), std::end(dims))); OutputDesc& gna_output_desc = outputs_.at(output_name); TensorDesc tensor_desc(gna_output_desc.tensor_precision, gna_output_desc.dims, gna_output_desc.model_layout); @@ -1534,12 +1538,12 @@ InferenceEngine::IExecutableNetworkInternal::Ptr GNAPlugin::ImportNetwork(std::i } } - //TODO: Need to remove this conversation when ngraph NCHW<->NHWC transformation is enabled - if(!transpose_inputs_info.empty()) { - convert_transpose_map_to_model(transpose_inputs_info, inputs_ptr_->Get()); + // TODO: Need to remove this conversation when ngraph NCHW<->NHWC transformation is enabled + if (!transpose_inputs_info.empty()) { + ConvertTransposeMapToModel(transpose_inputs_info, inputs_ptr_->Get()); } - if(!transpose_outputs_info.empty()) { - convert_transpose_map_to_model(transpose_outputs_info, outputs_.Get()); + if (!transpose_outputs_info.empty()) { + ConvertTransposeMapToModel(transpose_outputs_info, outputs_.Get()); } for (auto&& memory : mt) { diff --git a/src/plugins/intel_gna/src/gna_plugin.hpp b/src/plugins/intel_gna/src/gna_plugin.hpp index f4e3ff4f3f7d5c..35c0c8e073a246 100644 --- a/src/plugins/intel_gna/src/gna_plugin.hpp +++ b/src/plugins/intel_gna/src/gna_plugin.hpp @@ -25,9 +25,9 @@ #include "gna_data_types.hpp" #include "gna_graph_compiler.hpp" #include "gna_plugin_config.hpp" -#include "preprocessing.hpp" #include "log/debug.hpp" #include "log/log.hpp" +#include "preprocessing.hpp" namespace ov { namespace intel_gna { @@ -193,8 +193,8 @@ class GNAPlugin : public InferenceEngine::IInferencePlugin { void DumpXNNToFile() const; void pre_post_process(InferenceEngine::Blob::Ptr input_blob, - InferenceEngine::Blob::Ptr output_blob, - std::shared_ptr model); + InferenceEngine::Blob::Ptr output_blob, + std::shared_ptr model); void ImportFrames(void* ptr_dst, const void* ptr_src, @@ -227,13 +227,13 @@ class GNAPlugin : public InferenceEngine::IInferencePlugin { intel_dnn_orientation_t orientation, float scaleFactor); + // TODO: Need to remove this conversation when ngraph NCHW<->NHWC transformation is enabled template - inline void convert_transpose_map_to_model(T1& transposes, T2& nodes) { - for (auto && node : nodes) - { + inline void ConvertTransposeMapToModel(T1& transposes, T2& nodes) { + for (auto&& node : nodes) { auto t_it = transposes.find(node.name); if (t_it != transposes.end() && !t_it->second.empty()) { - node.pre_post_process_model = to_pre_post_process_model(t_it->second); + node.pre_post_process_model = ToProcessModel(t_it->second); } } }; diff --git a/src/plugins/intel_gna/src/preprocessing.cpp b/src/plugins/intel_gna/src/preprocessing.cpp index 73eb660c7dfaca..c2a52651fb5328 100644 --- a/src/plugins/intel_gna/src/preprocessing.cpp +++ b/src/plugins/intel_gna/src/preprocessing.cpp @@ -4,12 +4,10 @@ #include "preprocessing.hpp" - +#include "gna_data_types.hpp" +#include "ngraph/opsets/opset9.hpp" #include "openvino/core/model.hpp" #include "openvino/core/shape.hpp" -#include "ngraph/opsets/opset9.hpp" -#include "gna_data_types.hpp" - using namespace ngraph::opset9; @@ -54,7 +52,7 @@ void ConvertToInt16(int16_t* ptr_dst, /* Convert legacy transposition info to preprocessing model */ -std::shared_ptr to_pre_post_process_model(const TranspositionInfo& t_info) { +std::shared_ptr ToProcessModel(const TranspositionInfo& t_info) { size_t c_size = t_info.num_transpose_rows; size_t hw_size = t_info.num_transpose_columns; @@ -81,20 +79,20 @@ std::shared_ptr to_pre_post_process_model(const TranspositionInfo& t_ /* Convert legacy transposition info to preprocessing model */ -std::shared_ptr to_pre_post_process_model(const std::vector& transposes) { +std::shared_ptr ToProcessModel(const std::vector& transposes) { // case wheb the input should be transposed entirely if (transposes.size() == 1) { - return to_pre_post_process_model(transposes.front()); + return ToProcessModel(transposes.front()); } std::vector indexes = {}; - for (auto & transpose : transposes) { + for (auto& transpose : transposes) { size_t c_size = transpose.num_transpose_rows; size_t hw_size = transpose.num_transpose_columns; size_t chw_size = c_size * hw_size; size_t id = indexes.size(); - for(size_t i{0}; i < chw_size; ++i) { - size_t idx = (transpose.transpose) ? hw_size * (i % c_size) + i / c_size : i; + for (size_t i{0}; i < chw_size; ++i) { + size_t idx = (transpose.transpose) ? hw_size * (i % c_size) + i / c_size : i; indexes.push_back(id + idx); } } diff --git a/src/plugins/intel_gna/src/preprocessing.hpp b/src/plugins/intel_gna/src/preprocessing.hpp index aac68f5864f522..736c9fce6917f9 100644 --- a/src/plugins/intel_gna/src/preprocessing.hpp +++ b/src/plugins/intel_gna/src/preprocessing.hpp @@ -6,14 +6,14 @@ #include -#include "openvino/core/model.hpp" #include "gna_data_types.hpp" +#include "openvino/core/model.hpp" namespace ov { namespace intel_gna { -std::shared_ptr to_pre_post_process_model(const TranspositionInfo& t_info); -std::shared_ptr to_pre_post_process_model(const std::vector& transposes); +std::shared_ptr ToProcessModel(const TranspositionInfo& t_info); +std::shared_ptr ToProcessModel(const std::vector& transposes); void ConvertToInt16(int16_t* ptr_dst, const float* ptr_src, diff --git a/src/plugins/intel_gna/src/serial/gna_model_serial.cpp b/src/plugins/intel_gna/src/serial/gna_model_serial.cpp index 55b5b7927b0f6c..2bd8a6f491e3e3 100644 --- a/src/plugins/intel_gna/src/serial/gna_model_serial.cpp +++ b/src/plugins/intel_gna/src/serial/gna_model_serial.cpp @@ -18,13 +18,12 @@ # include #endif -#include "openvino/pass/serialize.hpp" -#include "openvino/runtime/core.hpp" - #include "common/versioning.hpp" #include "gna2_model_helper.hpp" #include "gna_model_serial.hpp" #include "gna_plugin.hpp" +#include "openvino/pass/serialize.hpp" +#include "openvino/runtime/core.hpp" #include "serial/headers/2dot2/gna_model_header.hpp" #include "serial/headers/2dot5/gna_model_header.hpp" #include "serial/headers/2dot7/gna_model_header.hpp" @@ -478,7 +477,7 @@ void GNAModelSerial::Export(const GnaAllocations& allocations, std::ostream& os) writeString(tname, os); } // write pre_processing model - if(input.pre_post_process_model) { + if (input.pre_post_process_model) { // allocate buffer for ir.xml std::ostringstream xml_buf; // allocate buffer for ir.bin @@ -515,7 +514,7 @@ void GNAModelSerial::Export(const GnaAllocations& allocations, std::ostream& os) } // write pre_processing model - if(output.pre_post_process_model) { + if (output.pre_post_process_model) { // allocate buffer for ir.xml std::ostringstream xml_buf; // allocate buffer for ir.bin @@ -611,15 +610,16 @@ void GNAModelSerial::Export(const GnaAllocations& allocations, std::ostream& os) } template -void GNAModelSerial::ImportNodes(std::istream &is, void* base_ptr, T &nodes) { - for (auto &node : nodes.Get()) { +void GNAModelSerial::ImportNodes(std::istream& is, void* base_ptr, T& nodes) { + for (auto& node : nodes.Get()) { header_latest::RuntimeEndPoint ep = ReadEndPoint(is); - node.ptrs.push_back(reinterpret_cast(reinterpret_cast (base_ptr) + ep.descriptor_offset)); + node.ptrs.push_back(reinterpret_cast(reinterpret_cast(base_ptr) + ep.descriptor_offset)); node.orientation = ep.orientation; node.num_elements = ep.elements_count; node.scale_factor = ep.scaleFactor; - node.model_precision = InferenceEngine::Precision(static_cast(ep.precision)); + node.model_precision = + InferenceEngine::Precision(static_cast(ep.precision)); node.set_precision(ep.element_size); node.model_layout = static_cast(ep.layout); node.allocated_size = node.get_required_size(); @@ -637,11 +637,10 @@ void GNAModelSerial::ImportNodes(std::istream &is, void* base_ptr, T &nodes) { AppendTensorNameIfNeeded(node); // read preprocessing model - if (model_header_.version.major == 2 && model_header_.version.minor >= 9) - { + if (model_header_.version.major == 2 && model_header_.version.minor >= 9) { std::string ir_xml_str = readString(is); - if(!ir_xml_str.empty()) { - //read IR bin + if (!ir_xml_str.empty()) { + // read IR bin size_t ir_bin_size = 0; readBits(ir_bin_size, is); diff --git a/src/plugins/intel_gna/src/serial/gna_model_serial.hpp b/src/plugins/intel_gna/src/serial/gna_model_serial.hpp index 6f4b40ba96b7b1..dfc1ba300509cb 100644 --- a/src/plugins/intel_gna/src/serial/gna_model_serial.hpp +++ b/src/plugins/intel_gna/src/serial/gna_model_serial.hpp @@ -41,7 +41,7 @@ class GNAModelSerial { GNAVersionSerializer version_; template - void ImportNodes(std::istream &is, void* basePtr, T &inputs); //inputs or outputs + void ImportNodes(std::istream& is, void* basePtr, T& inputs); // inputs or outputs void ImportTranspositionInfo(std::istream& is, std::string& name, diff --git a/src/plugins/intel_gna/src/serial/headers/2dot9/gna_model_header.hpp b/src/plugins/intel_gna/src/serial/headers/2dot9/gna_model_header.hpp index cd30b5ca838eee..9c184f9825b485 100644 --- a/src/plugins/intel_gna/src/serial/headers/2dot9/gna_model_header.hpp +++ b/src/plugins/intel_gna/src/serial/headers/2dot9/gna_model_header.hpp @@ -6,9 +6,10 @@ #include #include + #include "backend/dnn_types.hpp" -#include "serial/headers/2dot8/gna_model_header.hpp" #include "gna_data_types.hpp" +#include "serial/headers/2dot8/gna_model_header.hpp" #pragma pack(push, 1) @@ -217,6 +218,6 @@ struct RuntimeEndPoint { orientation(orientation) {} }; -} // namespace header_2_dot_8 +} // namespace header_2_dot_9 } // namespace intel_gna } // namespace ov diff --git a/src/plugins/intel_gna/src/transformations/gather_remove.cpp b/src/plugins/intel_gna/src/transformations/gather_remove.cpp index 7c942752c29e67..be7bfb3b8458cb 100644 --- a/src/plugins/intel_gna/src/transformations/gather_remove.cpp +++ b/src/plugins/intel_gna/src/transformations/gather_remove.cpp @@ -2,19 +2,18 @@ // SPDX-License-Identifier: Apache-2.0 // -#include - #include "transformations/gather_remove.hpp" -#include "transformations/utils/transformation_helper.hpp" - #include -#include "ngraph/validation_util.hpp" #include -#include -#include #include #include +#include +#include +#include + +#include "ngraph/validation_util.hpp" +#include "transformations/utils/transformation_helper.hpp" using namespace ov; using namespace ov::intel_gna::pass; @@ -46,20 +45,22 @@ void SwapNames(NodePtr1 node1, NodePtr2 node2) { class GatherResultRemove : public ngraph::pass::MatcherPass { public: NGRAPH_RTTI_DECLARATION; - GatherResultRemove(ov::intel_gna::SubgraphCPUMap * subgraph_cpu_map = nullptr); + GatherResultRemove(ov::intel_gna::SubgraphCPUMap* subgraph_cpu_map = nullptr); + private: - ov::intel_gna::SubgraphCPUMap * m_subgraph_cpu_map; + ov::intel_gna::SubgraphCPUMap* m_subgraph_cpu_map; }; class GatherParamsRemove : public ngraph::pass::MatcherPass { public: NGRAPH_RTTI_DECLARATION; - GatherParamsRemove(ov::intel_gna::SubgraphCPUMap * subgraph_cpu_map = nullptr); + GatherParamsRemove(ov::intel_gna::SubgraphCPUMap* subgraph_cpu_map = nullptr); + private: - ov::intel_gna::SubgraphCPUMap * m_subgraph_cpu_map; + ov::intel_gna::SubgraphCPUMap* m_subgraph_cpu_map; }; -} // namespace +} // namespace NGRAPH_RTTI_DEFINITION(GatherResultRemove, "GatherResultRemove", 0); NGRAPH_RTTI_DEFINITION(GatherParamsRemove, "GatherParamsRemove", 0); @@ -89,7 +90,7 @@ void RemoveSingleInputNodeFromFunction(std::shared_ptr node) { Support only one data node as 0 input */ Function CopySingleInputNodeFromFunction(NodePtr node) { - const ngraph::Shape & input_shape = node->get_input_shape(0); + const ngraph::Shape& input_shape = node->get_input_shape(0); const ngraph::element::Type& input_elem_type = ngraph::element::Type_t::f32; auto input_params = std::make_shared(input_elem_type, input_shape); @@ -98,22 +99,20 @@ Function CopySingleInputNodeFromFunction(NodePtr node) { auto node_copy = node->clone_with_new_inputs(input_nodes); auto result = std::make_shared(node_copy); - return std::make_shared(ngraph::ResultVector{result}, - ngraph::ParameterVector{input_params}); + return std::make_shared(ngraph::ResultVector{result}, ngraph::ParameterVector{input_params}); } -} // namespace +} // namespace // ---------------------------------------------------------------------------- -GatherResultRemove::GatherResultRemove(SubgraphCPUMap * subgraph_cpu_map) - : m_subgraph_cpu_map(subgraph_cpu_map) { - +GatherResultRemove::GatherResultRemove(SubgraphCPUMap* subgraph_cpu_map) : m_subgraph_cpu_map(subgraph_cpu_map) { MATCHER_SCOPE(GatherResultRemove); - auto gather = ngraph::pattern::wrap_type({ngraph::pattern::any_input(), - ngraph::pattern::any_input(), - ngraph::pattern::any_input()}); // FIXME: add consumers(1) constraint + auto gather = ngraph::pattern::wrap_type( + {ngraph::pattern::any_input(), + ngraph::pattern::any_input(), + ngraph::pattern::any_input()}); // FIXME: add consumers(1) constraint auto result = ngraph::pattern::wrap_type({gather}); ngraph::matcher_pass_callback callback = [=](ngraph::pattern::Matcher& m) { @@ -124,7 +123,7 @@ GatherResultRemove::GatherResultRemove(SubgraphCPUMap * subgraph_cpu_map) NodePtr parent_node = gather_node->get_input_node_shared_ptr(0); if (m_subgraph_cpu_map) { - const std::string & gather_name = gather_node->get_friendly_name(); + const std::string& gather_name = gather_node->get_friendly_name(); m_subgraph_cpu_map->emplace(gather_name, CopySingleInputNodeFromFunction(gather_node)); } RemoveSingleInputNodeFromFunction(gather_node); @@ -138,21 +137,18 @@ GatherResultRemove::GatherResultRemove(SubgraphCPUMap * subgraph_cpu_map) this->register_matcher(m, callback); } -GatherParamsRemove::GatherParamsRemove(SubgraphCPUMap * subgraph_cpu_map) - : m_subgraph_cpu_map(subgraph_cpu_map) { - +GatherParamsRemove::GatherParamsRemove(SubgraphCPUMap* subgraph_cpu_map) : m_subgraph_cpu_map(subgraph_cpu_map) { MATCHER_SCOPE(GatherParamsRemove); auto param = ngraph::pattern::wrap_type(); - auto gather = ngraph::pattern::wrap_type({param, - ngraph::pattern::any_input(), - ngraph::pattern::any_input()}); // FIXME: add consumers(1) constraint + auto gather = ngraph::pattern::wrap_type( + {param, ngraph::pattern::any_input(), ngraph::pattern::any_input()}); // FIXME: add consumers(1) constraint ngraph::matcher_pass_callback callback = [=](ngraph::pattern::Matcher& m) { const auto& pattern_map = m.get_pattern_value_map(); const auto param_node = pattern_map.at(param).get_node_shared_ptr(); const auto gather_node = pattern_map.at(gather).get_node_shared_ptr(); - Node * child_node = gather_node->output(0).get_target_inputs().begin()->get_node(); + Node* child_node = gather_node->output(0).get_target_inputs().begin()->get_node(); if (m_subgraph_cpu_map) m_subgraph_cpu_map->emplace(param_node->get_friendly_name(), CopySingleInputNodeFromFunction(gather_node)); @@ -167,7 +163,7 @@ GatherParamsRemove::GatherParamsRemove(SubgraphCPUMap * subgraph_cpu_map) this->register_matcher(m, callback); } -bool GatherRemove::run_on_model(const std::shared_ptr & function) { +bool GatherRemove::run_on_model(const std::shared_ptr& function) { RUN_ON_FUNCTION_SCOPE(GatherRemove); ngraph::pass::Manager manager(get_pass_config()); @@ -175,5 +171,5 @@ bool GatherRemove::run_on_model(const std::shared_ptr & functi manager.register_pass(m_subgraph_cpu_map); manager.run_passes(function); - return false; // FIXME: should we return true here? + return false; // FIXME: should we return true here? } diff --git a/src/plugins/intel_gna/src/transformations/gather_remove.hpp b/src/plugins/intel_gna/src/transformations/gather_remove.hpp index 5b0e988b233e6d..9d88f2978c116a 100644 --- a/src/plugins/intel_gna/src/transformations/gather_remove.hpp +++ b/src/plugins/intel_gna/src/transformations/gather_remove.hpp @@ -4,10 +4,10 @@ #pragma once -#include "gna_data_types.hpp" - #include +#include "gna_data_types.hpp" + namespace ov { namespace intel_gna { namespace pass { @@ -15,13 +15,13 @@ namespace pass { class GatherRemove : public ngraph::pass::FunctionPass { public: NGRAPH_RTTI_DECLARATION; - GatherRemove(ov::intel_gna::SubgraphCPUMap * subgraph_cpu_map = nullptr) : m_subgraph_cpu_map(subgraph_cpu_map) {} + GatherRemove(ov::intel_gna::SubgraphCPUMap* subgraph_cpu_map = nullptr) : m_subgraph_cpu_map(subgraph_cpu_map) {} bool run_on_model(const std::shared_ptr& f) override; + private: - ov::intel_gna::SubgraphCPUMap * m_subgraph_cpu_map; + ov::intel_gna::SubgraphCPUMap* m_subgraph_cpu_map; }; -} // namespace pass -} // namespace intel_gna -} // namespace ov - +} // namespace pass +} // namespace intel_gna +} // namespace ov diff --git a/src/plugins/intel_gna/tests/functional/pass_tests/gather_remove.cpp b/src/plugins/intel_gna/tests/functional/pass_tests/gather_remove.cpp index 3345e6a468b59e..005b35321f6523 100644 --- a/src/plugins/intel_gna/tests/functional/pass_tests/gather_remove.cpp +++ b/src/plugins/intel_gna/tests/functional/pass_tests/gather_remove.cpp @@ -7,19 +7,18 @@ #include #include "common_test_utils/common_utils.hpp" -#include "functional_test_utils/plugin_cache.hpp" -#include "shared_test_classes/base/layer_test_utils.hpp" #include "functional_test_utils/blob_utils.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" +#include "functional_test_utils/plugin_cache.hpp" #include "ngraph_functions/builders.hpp" - #include "ngraph_functions/pass/convert_prc.hpp" +#include "ngraph_functions/utils/ngraph_helpers.hpp" +#include "shared_test_classes/base/layer_test_utils.hpp" -typedef std::tuple< - InferenceEngine::Precision, // Network Precision - std::string, // Target Device - std::map // Configuration -> GatherRemoveConvsParams; +typedef std::tuple // Configuration + > + GatherRemoveConvsParams; namespace { @@ -35,12 +34,12 @@ std::vector MakeGatherIndexes(size_t size) { return indexes; } -} // namespace +} // namespace namespace LayerTestsDefinitions { class RemoveInputGather : public testing::WithParamInterface, - public LayerTestsUtils::LayerTestsCommon { + public LayerTestsUtils::LayerTestsCommon { public: static std::string getTestCaseName(testing::TestParamInfo obj) { InferenceEngine::Precision netPrecision; @@ -76,36 +75,37 @@ class RemoveInputGather : public testing::WithParamInterface input_shape = {1, 128}; - const size_t input_shape_product = std::accumulate(input_shape.begin(), input_shape.end(), 1, std::multiplies()); + const size_t input_shape_product = + std::accumulate(input_shape.begin(), input_shape.end(), 1, std::multiplies()); - auto input_params = ngraph::builder::makeParams(ngPrc, { input_shape }); + auto input_params = ngraph::builder::makeParams(ngPrc, {input_shape}); const std::vector indexes = MakeGatherIndexes(input_shape_product); - auto gather_indexes_node = ngraph::opset9::Constant::create(ngraph::element::i64, ov::Shape{indexes.size()}, indexes); + auto gather_indexes_node = + ngraph::opset9::Constant::create(ngraph::element::i64, ov::Shape{indexes.size()}, indexes); const size_t gather_axis = 1; auto gather_axis_node = ngraph::opset9::Constant::create(ngraph::element::i64, ngraph::Shape{}, {gather_axis}); - auto gather_node = std::make_shared(input_params[0], - gather_indexes_node, - gather_axis_node); + auto gather_node = + std::make_shared(input_params[0], gather_indexes_node, gather_axis_node); - auto multiply_input_const_node = ngraph::opset9::Constant::create(ngPrc, input_shape, GenerateVector(input_shape_product, 1)); + auto multiply_input_const_node = + ngraph::opset9::Constant::create(ngPrc, input_shape, GenerateVector(input_shape_product, 1)); - auto matmul_node = std::make_shared(gather_node, - multiply_input_const_node); + auto matmul_node = std::make_shared(gather_node, multiply_input_const_node); - auto add_input_const_node = ngraph::opset9::Constant::create(ngPrc, input_shape, GenerateVector(input_shape_product, 1)); + auto add_input_const_node = + ngraph::opset9::Constant::create(ngPrc, input_shape, GenerateVector(input_shape_product, 1)); - auto add_node = std::make_shared(matmul_node, - add_input_const_node); + auto add_node = std::make_shared(matmul_node, add_input_const_node); auto result = std::make_shared(add_node); - function = std::make_shared(ngraph::ResultVector{result}, - ngraph::ParameterVector{input_params}); + function = + std::make_shared(ngraph::ResultVector{result}, ngraph::ParameterVector{input_params}); } }; class RemoveOutputGather : public testing::WithParamInterface, - public LayerTestsUtils::LayerTestsCommon { + public LayerTestsUtils::LayerTestsCommon { public: static std::string getTestCaseName(testing::TestParamInfo obj) { InferenceEngine::Precision netPrecision; @@ -141,27 +141,26 @@ class RemoveOutputGather : public testing::WithParamInterface input_shape = {1, 128}; - const size_t input_shape_product = std::accumulate(input_shape.begin(), input_shape.end(), 1, std::multiplies()); + const size_t input_shape_product = + std::accumulate(input_shape.begin(), input_shape.end(), 1, std::multiplies()); - auto input_params = ngraph::builder::makeParams(ngPrc, { input_shape }); + auto input_params = ngraph::builder::makeParams(ngPrc, {input_shape}); auto abs = std::make_shared(input_params[0]); const std::vector indexes = MakeGatherIndexes(input_shape_product); - auto gather_indexes_node = ngraph::opset9::Constant::create(ngraph::element::i64, ov::Shape{indexes.size()}, indexes); + auto gather_indexes_node = + ngraph::opset9::Constant::create(ngraph::element::i64, ov::Shape{indexes.size()}, indexes); const size_t gather_axis = 1; auto gather_axis_node = ngraph::opset9::Constant::create(ngraph::element::i64, ngraph::Shape{}, {gather_axis}); - auto gather_node = std::make_shared(abs, - gather_indexes_node, - gather_axis_node); + auto gather_node = std::make_shared(abs, gather_indexes_node, gather_axis_node); auto result = std::make_shared(gather_node); - function = std::make_shared(ngraph::ResultVector{result}, - ngraph::ParameterVector{input_params}); + function = + std::make_shared(ngraph::ResultVector{result}, ngraph::ParameterVector{input_params}); } }; - TEST_P(RemoveInputGather, CompareWithRefs) { Run(); } @@ -170,33 +169,24 @@ TEST_P(RemoveOutputGather, CompareWithRefs) { Run(); } -const std::vector netPrecisions = { - InferenceEngine::Precision::FP32, - InferenceEngine::Precision::FP16 -}; - -const std::vector> configs = { - { - {"GNA_DEVICE_MODE", "GNA_SW_EXACT"} - }, - { - {"GNA_DEVICE_MODE", "GNA_SW_FP32"} - } -}; +const std::vector netPrecisions = {InferenceEngine::Precision::FP32, + InferenceEngine::Precision::FP16}; -INSTANTIATE_TEST_SUITE_P(smoke_gather_on_cpu, RemoveInputGather, - ::testing::Combine( - ::testing::ValuesIn(netPrecisions), - ::testing::Values(CommonTestUtils::DEVICE_GNA), - ::testing::ValuesIn(configs)), - RemoveInputGather::getTestCaseName); +const std::vector> configs = {{{"GNA_DEVICE_MODE", "GNA_SW_EXACT"}}, + {{"GNA_DEVICE_MODE", "GNA_SW_FP32"}}}; -INSTANTIATE_TEST_SUITE_P(smoke_gather_on_cpu, RemoveOutputGather, - ::testing::Combine( - ::testing::ValuesIn(netPrecisions), - ::testing::Values(CommonTestUtils::DEVICE_GNA), - ::testing::ValuesIn(configs)), - RemoveOutputGather::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_gather_on_cpu, + RemoveInputGather, + ::testing::Combine(::testing::ValuesIn(netPrecisions), + ::testing::Values(CommonTestUtils::DEVICE_GNA), + ::testing::ValuesIn(configs)), + RemoveInputGather::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_gather_on_cpu, + RemoveOutputGather, + ::testing::Combine(::testing::ValuesIn(netPrecisions), + ::testing::Values(CommonTestUtils::DEVICE_GNA), + ::testing::ValuesIn(configs)), + RemoveOutputGather::getTestCaseName); -} // namespace LayerTestsDefinitions +} // namespace LayerTestsDefinitions