Skip to content

Commit

Permalink
final changes
Browse files Browse the repository at this point in the history
  • Loading branch information
allnes committed Nov 23, 2023
1 parent fb61c1a commit 2615bcd
Show file tree
Hide file tree
Showing 8 changed files with 697 additions and 507 deletions.
17 changes: 10 additions & 7 deletions src/plugins/intel_cpu/src/nodes/deconv.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -468,8 +468,8 @@ void Deconvolution::getSupportedDescriptors() {
}
VectorDims inDims, outDims;
std::tie(inDims, outDims) = makeDummyInOutShape();
inShape = Shape(inDims);
Shape outShape(outDims);
inShape = Shape(inDims);
outShape = Shape(outDims);
initPaddingR(inShape, outShape);

#if defined(OV_CPU_WITH_ACL)
Expand All @@ -486,10 +486,10 @@ void Deconvolution::getSupportedDescriptors() {

for (size_t i = 0; i < getParentEdges().size(); ++i) {
config.inConfs[i].setMemDesc(
creatorsMap.at(format)->createSharedDesc(getOriginalInputPrecisionAtPort(i), getInputShapeAtPort(i)));
creatorsMap.at(format)->createSharedDesc(getOriginalInputPrecisionAtPort(0), getInputShapeAtPort(i)));
}
config.outConfs[0].setMemDesc(
creatorsMap.at(format)->createSharedDesc(getOriginalOutputPrecisionAtPort(0), getOutputShapeAtPort(0)));
creatorsMap.at(format)->createSharedDesc(getOriginalOutputPrecisionAtPort(0), outShape));

std::vector<MemoryDescPtr> srcMemoryDescs;
for (size_t i = 0; i < config.inConfs.size(); i++) {
Expand All @@ -502,7 +502,7 @@ void Deconvolution::getSupportedDescriptors() {

return AclDeconvExecutorBuilder::customIsSupported(deconvAttrs, srcMemoryDescs, dstMemoryDescs);
};
useACL = checkDesc(LayoutType::nspc) || checkDesc(LayoutType::ncsp);
useACL = checkDesc(LayoutType::ncsp);
}
if (useACL) return;
#endif
Expand Down Expand Up @@ -1221,14 +1221,17 @@ void Deconvolution::initSupportedPrimitiveDescriptors() {
config.inConfs.resize(getParentEdges().size());
config.outConfs.resize(getOriginalOutputsNumber());

for (size_t i = 0; i < getParentEdges().size(); ++i) {
config.inConfs[0].setMemDesc(
// ACL expected equal precision
creatorsMap.at(format)->createSharedDesc(getOriginalInputPrecisionAtPort(0), inShape));
for (size_t i = 1; i < getParentEdges().size(); ++i) {
config.inConfs[i].setMemDesc(
// ACL expected equal precision
creatorsMap.at(format)->createSharedDesc(getOriginalInputPrecisionAtPort(0), getInputShapeAtPort(i)));
}
config.outConfs[0].setMemDesc(
// ACL expected equal precision
creatorsMap.at(format)->createSharedDesc(getOriginalInputPrecisionAtPort(0), getOutputShapeAtPort(0)));
creatorsMap.at(format)->createSharedDesc(getOriginalInputPrecisionAtPort(0), outShape));

std::vector<MemoryDescPtr> srcMemoryDescs;
for (size_t i = 0; i < config.inConfs.size(); i++) {
Expand Down
2 changes: 1 addition & 1 deletion src/plugins/intel_cpu/src/nodes/deconv.h
Original file line number Diff line number Diff line change
Expand Up @@ -100,7 +100,7 @@ class Deconvolution : public Node {
bool useACL = false;
DeconvAttrs deconvAttrs;

Shape inShape;
Shape inShape, outShape;;

AttrPtr pAttr;

Expand Down
2 changes: 2 additions & 0 deletions src/plugins/intel_cpu/src/nodes/executors/acl/acl_deconv.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -204,6 +204,8 @@ bool AclDeconvExecutorBuilder::customIsSupported(const DeconvAttrs &deconvAttrs,
if (!one_of(dilation_x, static_cast<unsigned int >(0), static_cast<unsigned int >(1)) ||
!one_of(dilation_y, static_cast<unsigned int >(0), static_cast<unsigned int >(1))) return false;

auto abc = srcDescs[0]->getShape().getDims();

size_t in_h = srcDescs[0]->hasLayoutType(LayoutType::ncsp) ? srcDescs[0]->getShape().getDims()[2] : srcDescs[0]->getShape().getDims()[1];
size_t in_w = srcDescs[0]->hasLayoutType(LayoutType::ncsp) ? srcDescs[0]->getShape().getDims()[3] : srcDescs[0]->getShape().getDims()[2];

Expand Down
8 changes: 4 additions & 4 deletions src/plugins/intel_cpu/src/plugin.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -160,10 +160,10 @@ std::shared_ptr<Engine::SchedulerGuard> Engine::SchedulerGuard::instance() {
}

Engine::SchedulerGuard::~SchedulerGuard() {
// To save the state of scheduler after ACLScheduler has been executed
// TODO: find out the cause of the state
std::lock_guard<std::mutex> lock{this->dest_mutex};
arm_compute::Scheduler::set(arm_compute::Scheduler::Type::ST);
// // To save the state of scheduler after ACLScheduler has been executed
// // TODO: find out the cause of the state
// std::lock_guard<std::mutex> lock{this->dest_mutex};
// arm_compute::Scheduler::set(arm_compute::Scheduler::Type::ST);
}
#endif

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -55,4 +55,29 @@ class DeconvolutionLayerCPUTest : public testing::WithParamInterface<DeconvLayer
size_t inferRequestNum = 0;
};

/* COMMON PARAMS */
const std::vector<fusingSpecificParams> fusingParamsSet{
emptyFusingSpec,
#if !defined(OPENVINO_ARCH_ARM64) && !defined(OPENVINO_ARCH_ARM)
fusingScaleShift
#endif
};

const std::vector<std::vector<ptrdiff_t>> emptyOutputPadding = { {} };

/* ============= Deconvolution params (planar layout) ============= */
const InferenceEngine::SizeVector numOutChannels_Planar = { 6 };

const std::map<std::string, std::string> cpuEmptyPluginConfig;

/* ============= Deconvolution params (blocked layout) ============= */
const InferenceEngine::SizeVector numOutChannels_Blocked = { 64 };

/* ============= Deconvolution params (2D) ============= */
const std::vector<InferenceEngine::SizeVector> kernels2d = { {3, 3}, {1, 1} };
const std::vector<InferenceEngine::SizeVector> strides2d = { {1, 1}, {2, 2} };
const std::vector<std::vector<ptrdiff_t>> padBegins2d = { {0, 0} };
const std::vector<std::vector<ptrdiff_t>> padEnds2d = { {0, 0} };
const std::vector<InferenceEngine::SizeVector> dilations2d = { {1, 1} };

} // namespace CPULayerTestsDefinitions
Original file line number Diff line number Diff line change
@@ -0,0 +1,118 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//

#include "single_layer_tests/classes/convolution_backprop_data.hpp"
#include "shared_test_classes/single_layer/convolution_backprop_data.hpp"
#include "test_utils/cpu_test_utils.hpp"
#include "test_utils/filter_cpu_info.hpp"
#include "test_utils/fusing_test_utils.hpp"

using namespace CPUTestUtils;
using namespace ov::test;

namespace CPULayerTestsDefinitions {

namespace {

/* INSTANCES */
/* ============= Deconvolution (Planar 2D) ============= */
const auto convParams_ExplicitPadding_Planar_2D = ::testing::Combine(
::testing::ValuesIn(kernels2d),
::testing::ValuesIn(strides2d),
::testing::ValuesIn(padBegins2d),
::testing::ValuesIn(padEnds2d),
::testing::ValuesIn(dilations2d),
::testing::ValuesIn(numOutChannels_Planar),
::testing::Values(ngraph::op::PadType::EXPLICIT),
::testing::ValuesIn(emptyOutputPadding)
);

const std::vector<DeconvInputData> Planar_2D_inputs_smoke = {
DeconvInputData{
InputShape{{}, {{ 2, 12, 7, 7 }}},
ngraph::helpers::InputLayerType::CONSTANT,
{}
},
};

const std::vector<DeconvInputData> Planar_2D_inputs_nightly = {
DeconvInputData{
InputShape{{{1, 10}, 12, 7, 7}, {{ 1, 12, 7, 7}, { 2, 12, 7, 7}, { 3, 12, 7, 7}}},
ngraph::helpers::InputLayerType::CONSTANT,
{{15, 15}}
},
};

INSTANTIATE_TEST_SUITE_P(smoke_Deconv_2D_Planar_FP32, DeconvolutionLayerCPUTest,
::testing::Combine(
convParams_ExplicitPadding_Planar_2D,
::testing::ValuesIn(Planar_2D_inputs_smoke),
::testing::Values(ElementType::f32),
::testing::ValuesIn(fusingParamsSet),
::testing::ValuesIn(filterCPUInfo({conv_gemm_2D_acl})),
::testing::Values(cpuEmptyPluginConfig)),
DeconvolutionLayerCPUTest::getTestCaseName);

INSTANTIATE_TEST_SUITE_P(nightly_Deconv_2D_Planar_FP32, DeconvolutionLayerCPUTest,
::testing::Combine(
convParams_ExplicitPadding_Planar_2D,
::testing::ValuesIn(Planar_2D_inputs_nightly),
::testing::Values(ElementType::f32),
::testing::ValuesIn(fusingParamsSet),
::testing::ValuesIn(filterCPUInfo({conv_gemm_2D_acl})),
::testing::Values(cpuEmptyPluginConfig)),
DeconvolutionLayerCPUTest::getTestCaseName);

INSTANTIATE_TEST_SUITE_P(nightly_Deconv_2D_Planar_BF16, DeconvolutionLayerCPUTest,
::testing::Combine(
convParams_ExplicitPadding_Planar_2D,
::testing::ValuesIn(Planar_2D_inputs_nightly),
::testing::Values(ElementType::f32),
::testing::ValuesIn(fusingParamsSet),
::testing::ValuesIn(filterCPUInfo({conv_gemm_2D_acl})),
::testing::Values(cpuBF16PluginConfig)),
DeconvolutionLayerCPUTest::getTestCaseName);

/* ============= Deconvolution (Blocked 2D) ============= */
const std::vector<DeconvInputData> Blocked_2D_inputs_smoke = {
DeconvInputData{
InputShape{{}, {{ 2, 67, 7, 7 }}},
ngraph::helpers::InputLayerType::CONSTANT,
{}
},
};

/* ============= Deconvolution auto padding tests ============= */
const std::vector<DeconvInputData> inputs_2D_AutoPadding = {
DeconvInputData{
InputShape{{}, {{ 2, 67, 7, 7 }}},
ngraph::helpers::InputLayerType::CONSTANT,
{}
},
};

const auto deconvParams_AutoPadding_2D = ::testing::Combine(
::testing::ValuesIn(kernels2d),
::testing::ValuesIn(strides2d),
::testing::ValuesIn(padBegins2d),
::testing::ValuesIn(padEnds2d),
::testing::ValuesIn(dilations2d),
::testing::ValuesIn(numOutChannels_Blocked),
::testing::Values(ngraph::op::PadType::SAME_UPPER, ngraph::op::PadType::SAME_LOWER),
::testing::ValuesIn(emptyOutputPadding)
);

INSTANTIATE_TEST_SUITE_P(smoke_Deconv_2D_AutoPadding_FP32, DeconvolutionLayerCPUTest,
::testing::Combine(
deconvParams_AutoPadding_2D,
::testing::ValuesIn(inputs_2D_AutoPadding),
::testing::Values(ElementType::f32),
::testing::Values(emptyFusingSpec),
::testing::ValuesIn(filterCPUInfo({conv_gemm_2D_acl})),
::testing::Values(cpuEmptyPluginConfig)),
DeconvolutionLayerCPUTest::getTestCaseName);

} // namespace

} // namespace CPULayerTestsDefinitions
Loading

0 comments on commit 2615bcd

Please sign in to comment.