forked from openvinotoolkit/openvino
-
Notifications
You must be signed in to change notification settings - Fork 0
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
[CPU] [ARM] FullyConnected: int8 support
- Loading branch information
Showing
106 changed files
with
955 additions
and
51 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
92 changes: 92 additions & 0 deletions
92
src/plugins/intel_cpu/src/nodes/executors/acl/acl_gemm.cpp
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,92 @@ | ||
// Copyright (C) 2024 Intel Corporation | ||
// SPDX-License-Identifier: Apache-2.0 | ||
// | ||
|
||
#include "acl_gemm.hpp" | ||
|
||
#include "arm_compute/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.h" | ||
|
||
#include "nodes/executors/executor.hpp" | ||
#include "nodes/executors/memory_arguments.hpp" | ||
#include "utils/debug_capabilities.h" | ||
#include "nodes/executors/debug_messages.hpp" | ||
#include "nodes/executors/implementation_utils.hpp" | ||
|
||
namespace ov { | ||
namespace intel_cpu { | ||
|
||
ACLGEMMExecutor::ACLGEMMExecutor(const GEMMAttrs &attrs, | ||
const PostOps &postOps, | ||
const MemoryArgs &memory, | ||
const ExecutorContext::CPtr context) { | ||
aclTensorAttrs.hasLayoutTypeNHWC = memory.at(ARG_SRC)->getDescPtr()->hasLayoutType(LayoutType::nspc); | ||
} | ||
|
||
bool ACLGEMMExecutor::supports(const GEMMConfig &config) { | ||
// TODO: check weights layout | ||
const auto attrs = static_cast<GEMMAttrs>(config.attrs); | ||
if (std::any_of( | ||
attrs.dequantizationScales.begin(), | ||
attrs.dequantizationScales.end(), | ||
[](float value) { return value != 1.f;})) { | ||
return false; | ||
} | ||
|
||
const auto src1_dims = std::dynamic_pointer_cast<BlockedMemoryDesc>(config.descs.at(ARG_SRC))->getBlockDims(); | ||
const auto src2_dims = std::dynamic_pointer_cast<BlockedMemoryDesc>(config.descs.at(ARG_WEI))->getBlockDims(); | ||
|
||
VERIFY(one_of(srcType(config), ov::element::f16, ov::element::f32, ov::element::i8, ov::element::u8), UNSUPPORTED_SRC_PRECISIONS); | ||
VERIFY(postOpsNumbers(config) < 2, UNSUPPORTED_NUMBER_OF_POSTOPS); | ||
VERIFY(one_of(srcRank(config), 2U, 3U, 4U), UNSUPPORTED_SRC_RANK); | ||
VERIFY(one_of(weiRank(config), 2U, 3U, 4U), UNSUPPORTED_WEI_RANK); | ||
VERIFY(static_cast<GEMMAttrs>(config.attrs).dequantizationScales.size() <= 1, UNSUPPORTED_PER_CHANNEL_QUANTIZATION); | ||
return true; | ||
} | ||
|
||
void ACLGEMMExecutor::updateTensorsShapes(ACLMemoryShapes& aclMemoryShapes) {} | ||
|
||
arm_compute::Status ACLGEMMExecutor::validateTensorsInfo(const ACLMemoryInfo & aclMemoryInfos) { | ||
const auto matMulValid = arm_compute::NEGEMMLowpMatrixMultiplyCore::validate( | ||
aclMemoryInfos[ACLArgs::ACL_SRC_0].get(), | ||
aclMemoryInfos[ACLArgs::ACL_WEI].get(), | ||
aclMemoryInfos[ACLArgs::ACL_BIAS].get(), | ||
aclMemoryInfos[ACLArgs::ACL_DST].get(), | ||
gemmInfo); | ||
return matMulValid; | ||
} | ||
|
||
ACLFunction ACLGEMMExecutor::configureFunction(const ACLMemoryTensors & aclMemoryTensors) { | ||
auto matMull = std::make_unique<arm_compute::NEGEMMLowpMatrixMultiplyCore>(); | ||
matMull->configure( | ||
aclMemoryTensors[ACLArgs::ACL_SRC_0].get(), | ||
aclMemoryTensors[ACLArgs::ACL_WEI].get(), | ||
// TODO: fix me | ||
nullptr, //aclMemoryTensors[ACLArgs::ACL_BIAS].get(), | ||
aclMemoryTensors.at(ACLArgs::ACL_DST).get()); | ||
return matMull; | ||
} | ||
|
||
ACLInfo ACLGEMMExecutor::initTensorInfo(const arm_compute::TensorShape& tensorShape, | ||
const arm_compute::DataType& dataType, | ||
const arm_compute::DataLayout& dataLayout) { | ||
arm_compute::DataType fcDataType; | ||
switch (dataType) { | ||
case arm_compute::DataType::S8: { | ||
fcDataType = arm_compute::DataType::QASYMM8_SIGNED; | ||
break; | ||
} | ||
case arm_compute::DataType::U8: { | ||
fcDataType = arm_compute::DataType::QASYMM8; | ||
break; | ||
} | ||
default: { | ||
fcDataType = dataType; | ||
break; | ||
} | ||
} | ||
|
||
return ACLCommonExecutor::initTensorInfo(tensorShape, fcDataType, dataLayout); | ||
} | ||
|
||
} // namespace intel_cpu | ||
} // namespace ov |
46 changes: 46 additions & 0 deletions
46
src/plugins/intel_cpu/src/nodes/executors/acl/acl_gemm.hpp
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,46 @@ | ||
// Copyright (C) 2018-2024 Intel Corporation | ||
// SPDX-License-Identifier: Apache-2.0 | ||
// | ||
|
||
#pragma once | ||
|
||
#include "acl_common_executor.hpp" | ||
#include "nodes/executors/gemm_config.hpp" | ||
|
||
namespace ov { | ||
namespace intel_cpu { | ||
|
||
class ACLGEMMExecutor : public ACLCommonExecutor { | ||
public: | ||
ACLGEMMExecutor(const GEMMAttrs& attrs, | ||
const PostOps& postOps, | ||
const MemoryArgs& memory, | ||
const ExecutorContext::CPtr context); | ||
|
||
static bool supports(const GEMMConfig& config); | ||
|
||
void updateTensorsShapes(ACLMemoryShapes& aclMemoryShapes) override; | ||
|
||
arm_compute::Status validateTensorsInfo(const ACLMemoryInfo & aclMemoryInfos) override; | ||
|
||
ACLFunction configureFunction(const ACLMemoryTensors & aclMemoryTensors) override; | ||
|
||
impl_desc_type implType() const override { | ||
return impl_desc_type::gemm_acl; | ||
} | ||
|
||
protected: | ||
ACLInfo initTensorInfo(const arm_compute::TensorShape& tensorShape, | ||
const arm_compute::DataType& dataType, | ||
const arm_compute::DataLayout& dataLayout) override; | ||
|
||
private: | ||
arm_compute::FullyConnectedLayerInfo fullyConnectedLayerInfo; | ||
arm_compute::GEMMInfo gemmInfo; | ||
arm_compute::WeightsInfo weightsInfo; | ||
}; | ||
|
||
using ACLGEMMExecutorPtr = std::shared_ptr<ACLGEMMExecutor>; | ||
|
||
} // namespace intel_cpu | ||
} // namespace ov |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,34 @@ | ||
// Copyright (C) 2023 Intel Corporation | ||
// SPDX-License-Identifier: Apache-2.0 | ||
// | ||
|
||
#pragma once | ||
|
||
#include <vector> | ||
|
||
#include "cpu_memory.h" | ||
#include "executor_config.hpp" | ||
|
||
namespace ov { | ||
namespace intel_cpu { | ||
|
||
// @todo require explicit initialization of all the attributes? | ||
struct GEMMAttrs { | ||
// @todo probably we don't want with bias flag, since this information is already | ||
// a part of src memory descs | ||
bool withBias = false; | ||
// TODO: why default is false??? | ||
bool weightsNonTransposed = true; | ||
bool sparseWeights = false; | ||
// @todo only memory descriptors should be a part of attributes | ||
// actual memory should be passed into "execute" or "prepareMemory" calls | ||
std::vector<float> dequantizationScales; | ||
// @todo should be passed as an additional memory input? | ||
MemoryCPtr decompressionSubtractPtr; | ||
MemoryCPtr decompressionMultiplyPtr; | ||
uint64_t dynamicQuantizationGroupSize; | ||
ov::intel_cpu::Config::ModelType modelType = ov::intel_cpu::Config::ModelType::Unknown; | ||
}; | ||
|
||
} // namespace intel_cpu | ||
} // namespace ov |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,14 @@ | ||
// Copyright (C) 2024 Intel Corporation | ||
// SPDX-License-Identifier: Apache-2.0 | ||
// | ||
|
||
#pragma once | ||
|
||
#include "executor_config.hpp" | ||
#include "gemm_attrs.hpp" | ||
|
||
namespace ov { | ||
namespace intel_cpu { | ||
using GEMMConfig = ov::intel_cpu::executor::Config<GEMMAttrs>; | ||
} // namespace intel_cpu | ||
} // namespace ov |
Oops, something went wrong.