From c16e8b7bbfa9c787c25cb45a7453994ebf5b78d6 Mon Sep 17 00:00:00 2001 From: Aleksandr Voron Date: Thu, 2 Mar 2023 18:08:41 +0100 Subject: [PATCH] cleanup #2 --- .../src/nodes/executors/dnnl/dnnl_pooling.cpp | 130 ------------------ .../src/nodes/executors/dnnl/dnnl_pooling.hpp | 116 ---------------- .../intel_cpu/src/nodes/executors/pooling.hpp | 1 - .../src/nodes/executors/pooling_list.cpp | 1 - .../src/nodes/executors/pooling_list.hpp | 2 - src/plugins/intel_cpu/src/nodes/pooling.cpp | 24 ++-- src/plugins/intel_cpu/src/nodes/pooling.h | 25 ---- .../functional/single_layer_tests/pooling.cpp | 20 --- 8 files changed, 11 insertions(+), 308 deletions(-) delete mode 100644 src/plugins/intel_cpu/src/nodes/executors/dnnl/dnnl_pooling.cpp delete mode 100644 src/plugins/intel_cpu/src/nodes/executors/dnnl/dnnl_pooling.hpp diff --git a/src/plugins/intel_cpu/src/nodes/executors/dnnl/dnnl_pooling.cpp b/src/plugins/intel_cpu/src/nodes/executors/dnnl/dnnl_pooling.cpp deleted file mode 100644 index 54d514ca1ef877..00000000000000 --- a/src/plugins/intel_cpu/src/nodes/executors/dnnl/dnnl_pooling.cpp +++ /dev/null @@ -1,130 +0,0 @@ -// Copyright (C) 2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "dnnl_pooling.hpp" -#include "ie_parallel.hpp" -#include -#include "onednn/dnnl.h" -#include "common/primitive_cache.hpp" - -namespace ov { -namespace intel_cpu { - -DnnlPoolingExecutor::DnnlPoolingExecutor(const ExecutorContext::CPtr context) : PoolingExecutor(context) {} - -bool DnnlPoolingExecutor::init(const PoolingAttrs& poolingAttrs, - const std::vector& srcDescs, - const std::vector& dstDescs, - const dnnl::primitive_attr &attr) { - /*this->stream = dnnl::engine(context->getEngine()); - this->poolingAttrs = poolingAttrs; - auto localAttrs = dnnl::primitive_attr(attr.get()->clone()); - localAttrs.set_scratchpad_mode(dnnl::scratchpad_mode::user); - - auto desc = createDescriptor(poolingAttrs, srcDescs, dstDescs); - dnnl::pooling::primitive_desc prim_desc; - if (!context->getImplPriorities().empty()) { - for (auto preferredImplType : context->getImplPriorities()) { - dnnl::primitive_desc_iterator itpd = desc.createPrimitiveDescriptorIterator(context->getEngine(), localAttrs); - while (static_cast(itpd)) { - auto currentImplType = parse_impl_name(itpd.impl_info_str()); - if (currentImplType == preferredImplType) { - prim_desc = itpd.get(); - implType = currentImplType; - break; - } - - if (!itpd.next_impl()) - break; - } - - dnnl::pooling::primitive_desc prim_desc = itpd.get(); - } - } else { - dnnl::primitive_desc_iterator itpd = desc.createPrimitiveDescriptorIterator(context->getEngine(), localAttrs); - implType = parse_impl_name(itpd.impl_info_str()); - prim_desc = itpd.get(); - } - - if (!prim_desc) - return false; - - auto scratchpadMemoryDesc = DnnlExtensionUtils::makeDescriptor(prim_desc.query_md(dnnl::query::scratchpad_md)); - scratchpadMemory = context->getScratchPad()->createScratchPadMem(scratchpadMemoryDesc); - - prim = std::make_shared(prim_desc);*/ - - return true; -} - -void DnnlPoolingExecutor::exec(const std::vector& src, const std::vector& dst, std::unordered_map postOpsArgs) { - /*std::unordered_map primArgs; - - - primArgs[DNNL_ARG_SCRATCHPAD] = scratchpadMemory->GetPrimitive(); - primArgs[DNNL_ARG_SRC_0] = src[0]->GetPrimitive(); - primArgs[DNNL_ARG_WEIGHTS_0] = src[1]->GetPrimitive(); - primArgs[DNNL_ARG_DST] = dst[0]->GetPrimitive(); - if (poolingAttrs.withBias) - primArgs[DNNL_ARG_BIAS] = src[2]->GetPrimitive(); - - for (auto & entry : postOpsArgs) { - primArgs[entry.first] = entry.second->GetPrimitive(); - } - - (*prim).execute(stream, primArgs);*/ -} - -DnnlPoolingExecutor::Key::Key(const PoolingAttrs& poolingAttrs, - const std::vector& srcDescs, - const std::vector& dstDescs, - const dnnl::primitive_attr &attr) { - /*this->poolingAttrs = poolingAttrs; - this->inp0 = MemoryDescUtils::convertToDnnlMemoryDesc(srcDescs[0]); - this->inp1 = MemoryDescUtils::convertToDnnlMemoryDesc(srcDescs[1]); - this->bias = poolingAttrs.withBias ? MemoryDescUtils::convertToDnnlMemoryDesc(srcDescs[2]) : nullptr; - this->out = MemoryDescUtils::convertToDnnlMemoryDesc(dstDescs[0]); - this->attr = attr;*/ -} - -size_t DnnlPoolingExecutor::Key::hash() const { - using namespace dnnl::impl; - using namespace dnnl::impl::primitive_hashing; - - size_t seed = 0; - /*seed = hash_combine(seed, poolingAttrs.transposeA); - seed = hash_combine(seed, poolingAttrs.transposeB); - for (const auto& ptr : {inp0, inp1, bias, out}) { - if (ptr) { - seed = hash_combine(seed, get_md_hash(ptr->getDnnlDesc().data)); - } - } - - seed = hash_combine(seed, get_attr_hash(*attr.get()));*/ - return seed; -} - -bool DnnlPoolingExecutor::Key::operator==(const Key& rhs) const { - bool retVal = true; - /*retVal = retVal && poolingAttrs.transposeA == rhs.poolingAttrs.transposeA; - retVal = retVal && poolingAttrs.transposeB == rhs.poolingAttrs.transposeB; - - if (inp0 != rhs.inp0) { - retVal = retVal && inp0 && rhs.inp0 && inp0->getDnnlDesc() == rhs.inp0->getDnnlDesc(); - } - if (inp1 != rhs.inp1) { - retVal = retVal && inp1 && rhs.inp1 && inp1->getDnnlDesc() == rhs.inp1->getDnnlDesc(); - } - if (bias != rhs.bias) { - retVal = retVal && bias && rhs.bias && bias->getDnnlDesc() == rhs.bias->getDnnlDesc(); - } - if (out != rhs.out) { - retVal = retVal && out && rhs.out && out->getDnnlDesc() == rhs.out->getDnnlDesc(); - } - retVal = retVal && *attr.get() == *rhs.attr.get();*/ - return retVal; -} - -} // namespace intel_cpu -} // namespace ov diff --git a/src/plugins/intel_cpu/src/nodes/executors/dnnl/dnnl_pooling.hpp b/src/plugins/intel_cpu/src/nodes/executors/dnnl/dnnl_pooling.hpp deleted file mode 100644 index 69e42358a3f6fa..00000000000000 --- a/src/plugins/intel_cpu/src/nodes/executors/dnnl/dnnl_pooling.hpp +++ /dev/null @@ -1,116 +0,0 @@ -// Copyright (C) 2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -// TODO: remove relative path -#include "../pooling.hpp" -#include "dnnl.hpp" -#include "dnnl_descriptor.h" -#include "memory_desc/dnnl_blocked_memory_desc.h" - -namespace ov { -namespace intel_cpu { - - - -class DnnlPoolingExecutor : public PoolingExecutor { -public: - DnnlPoolingExecutor(const ExecutorContext::CPtr context); - - bool init(const PoolingAttrs& poolingAttrs, - const std::vector& srcDescs, - const std::vector& dstDescs, - const dnnl::primitive_attr &attr) override; - void exec(const std::vector& src, - const std::vector& dst, - std::unordered_map postOpsArgs) override; - - impl_desc_type getImplType() const override { - return implType; - } - - static DnnlDesriptor createDescriptor(const PoolingAttrs& poolingAttrs, - const std::vector& srcDescs, - const std::vector& dstDescs) { - /*auto inputShape0 = srcDescs[0]->getShape(); - const VectorDims inStrides0 = getStridesAndModifyShape(inputShape0, matmulAttrs.transposeA); - auto inDataDesc0 = std::make_shared(srcDescs[0]->getPrecision(), inputShape0, inStrides0); - - auto inputShape1 = srcDescs[1]->getShape(); - const VectorDims inStrides1 = getStridesAndModifyShape(inputShape1, matmulAttrs.transposeB); - auto inDataDesc1 = std::make_shared(srcDescs[1]->getPrecision(), inputShape1, inStrides1); - - auto outputShape = dstDescs[0]->getShape(); - auto outDataDesc = std::make_shared(dstDescs[0]->getPrecision(), outputShape); - - std::shared_ptr matmul_desc; - if (matmulAttrs.withBias) { - // oneDNN matmul requires shape for bias desc to be the same rank - VectorDims biasDims(outputShape.getRank(), 1); - const auto outDims = outputShape.getStaticDims(); - const auto chIdx = outputShape.getRank() - 1; - biasDims[chIdx] = outDims[chIdx]; - const auto bdt = DnnlExtensionUtils::IEPrecisionToDataType(srcDescs[2]->getPrecision()); - auto biasDesc = dnnl::memory::desc(DnnlExtensionUtils::convertToDnnlDims(biasDims), bdt, dnnl::memory::format_tag::any); - - matmul_desc.reset(new dnnl::matmul::desc(inDataDesc0->getDnnlDesc(), - inDataDesc1->getDnnlDesc(), - biasDesc, - outDataDesc->getDnnlDesc())); - } else { - matmul_desc.reset(new dnnl::matmul::desc(inDataDesc0->getDnnlDesc(), - inDataDesc1->getDnnlDesc(), - outDataDesc->getDnnlDesc())); - }*/ - - std::shared_ptr pooling_desc; - - return DnnlDesriptor(pooling_desc); - } - - - struct Key { - PoolingAttrs poolingAttrs; - DnnlMemoryDescPtr inp0; - DnnlMemoryDescPtr inp1; - DnnlMemoryDescPtr bias; - DnnlMemoryDescPtr out; - dnnl::primitive_attr attr; - - Key(const PoolingAttrs& poolingAttrs, - const std::vector& srcDescs, - const std::vector& dstDescs, - const dnnl::primitive_attr &attr); - size_t hash() const; - bool operator==(const Key& rhs) const; - }; - -private: - //static std::pair makeDummyInputShapes(const MatMulAttrs& matmulAttrs, const Shape& in0, const Shape& in1); - - dnnl::stream stream; - - PoolingAttrs poolingAttrs; - std::shared_ptr prim; - MemoryPtr scratchpadMemory; - impl_desc_type implType = impl_desc_type::undef; -}; - -class DnnlPoolingExecutorBuilder : public PoolingExecutorBuilder { -public: - bool isSupported(const PoolingAttrs& poolingAttrs, - const std::vector& srcDescs, - const std::vector& dstDescs) const override { - // TODO: add correct conditions - return true; - } - - PoolingExecutorPtr makeExecutor(const ExecutorContext::CPtr context) const override { - return std::make_shared(context); - } -}; - -} // namespace intel_cpu -} // namespace ov \ No newline at end of file diff --git a/src/plugins/intel_cpu/src/nodes/executors/pooling.hpp b/src/plugins/intel_cpu/src/nodes/executors/pooling.hpp index dd7d5506b1a0b1..ff8bc662227101 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/pooling.hpp +++ b/src/plugins/intel_cpu/src/nodes/executors/pooling.hpp @@ -24,7 +24,6 @@ struct PoolingAttrs { std::vector stride; std::vector kernel; std::vector dilation; - //ov::Strides dilation; std::vector data_pad_begin; std::vector data_pad_end; diff --git a/src/plugins/intel_cpu/src/nodes/executors/pooling_list.cpp b/src/plugins/intel_cpu/src/nodes/executors/pooling_list.cpp index 043f2ded91f44d..4b130f37bfff57 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/pooling_list.cpp +++ b/src/plugins/intel_cpu/src/nodes/executors/pooling_list.cpp @@ -10,7 +10,6 @@ namespace intel_cpu { const std::vector& getPoolingExecutorsList() { static std::vector descs = { OV_CPU_INSTANCE_ACL(ExecutorType::Acl, std::make_shared()) - //OV_CPU_INSTANCE_DNNL(ExecutorType::Dnnl, std::make_shared()) }; return descs; diff --git a/src/plugins/intel_cpu/src/nodes/executors/pooling_list.hpp b/src/plugins/intel_cpu/src/nodes/executors/pooling_list.hpp index b18ad2a9ce8d78..7d181eeb5b55be 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/pooling_list.hpp +++ b/src/plugins/intel_cpu/src/nodes/executors/pooling_list.hpp @@ -11,8 +11,6 @@ #include "acl/acl_pooling.hpp" #endif -//#include "dnnl/dnnl_matmul.hpp" - namespace ov { namespace intel_cpu { diff --git a/src/plugins/intel_cpu/src/nodes/pooling.cpp b/src/plugins/intel_cpu/src/nodes/pooling.cpp index 77470733cbd376..ad1a9a81124b52 100644 --- a/src/plugins/intel_cpu/src/nodes/pooling.cpp +++ b/src/plugins/intel_cpu/src/nodes/pooling.cpp @@ -4,19 +4,17 @@ #include "pooling.h" -#include -#include -#include -#include - -#include +#include "fake_quantize.h" +#include "conv.h" +#include "concat.h" #include #include - -#include "concat.h" -#include "conv.h" -#include "fake_quantize.h" +#include +#include +#include +#include #include "memory_desc/dnnl_blocked_memory_desc.h" +#include #if defined(OV_CPU_WITH_ACL) # include "executors/acl/acl_utils.hpp" @@ -553,8 +551,8 @@ dnnl::algorithm Pooling::getPoolingAlgorithm() const { std::shared_ptr Pooling::createDescriptorInternal( const dnnl::memory::desc& in_candidate, - const dnnl::memory::desc& out_candidate, - const dnnl::algorithm alg) const { + const dnnl::memory::desc& out_candidate, + const dnnl::algorithm alg) const { return createDescriptorHelper(in_candidate, out_candidate, alg, @@ -669,7 +667,7 @@ void Pooling::initSupportedPrimitiveDescriptors() { } } } - } +} void Pooling::initDescriptor(const NodeConfig& config) { if (useACL) diff --git a/src/plugins/intel_cpu/src/nodes/pooling.h b/src/plugins/intel_cpu/src/nodes/pooling.h index 9689fd41d7f3a6..3b1f1b4a28d5dd 100644 --- a/src/plugins/intel_cpu/src/nodes/pooling.h +++ b/src/plugins/intel_cpu/src/nodes/pooling.h @@ -54,33 +54,8 @@ class Pooling : public Node { const dnnl::algorithm alg) const; AttrPtr pAttr; - Shape inShape; - bool useACL = false; - //bool isMaxPool8 = false; - //bool auto_pad = false; - //bool exclude_pad = false; - //std::vector dilation; - //std::vector stride; - //std::vector kernel; - - /// Effective padding. Used to define correct output shape by oneDNN - /// reshape formula: (iw - kernel + pad_l + pad_r) / strides[i - 2] + 1 - /// should be passed into pooling desc constructor. - //std::vector effective_pad_begin; - //std::vector effective_pad_end; - - /// Effective dilation. Used to define correct dilation for OneDNN. - /// For OneDNN default dilation is vector of zero - //std::vector effective_dilation; - - /// Effective pad value. Describe how much zero element added to input - /// data tensor. May be less than "Effective padding" values. - /// If pooling window is out of this padding, the region of averaging - /// is decreased. - //std::vector data_pad_begin; - //std::vector data_pad_end; }; } // namespace node diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/pooling.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/pooling.cpp index 911ad7943e13a3..f0163c354b8da2 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/pooling.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/pooling.cpp @@ -116,15 +116,6 @@ class PoolingLayerCPUTest : public testing::WithParamInterface pooling = ngraph::builder::makePooling(poolInput, stride, padBegin, @@ -203,17 +194,6 @@ class MaxPoolingV8LayerCPUTest : public testing::WithParamInterface pooling = ngraph::builder::makeMaxPoolingV8(params[0], stride, dilation, padBegin, padEnd, kernel, roundingType, padType,