Skip to content

Commit

Permalink
[CPU] [ARM] FullyConnected: int8 support
Browse files Browse the repository at this point in the history
  • Loading branch information
eshoguli committed Aug 6, 2024
1 parent 62bf061 commit 2e3b1d2
Show file tree
Hide file tree
Showing 100 changed files with 756 additions and 53 deletions.
Original file line number Diff line number Diff line change
@@ -0,0 +1,32 @@
// Copyright (C) 2024 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//

#pragma once

#include "low_precision/lpt_visibility.hpp"
#include <memory>
#include "openvino/pass/graph_rewrite.hpp"
#include "openvino/pass/pattern/matcher.hpp"

namespace ov {
namespace pass {
namespace low_precision {

/**
* @ingroup ov_transformation_common_api
* @brief MarkupDequantizationFuse transformation marks not updatable dequantization operations for fusing.
*
* For more details about the transformation, refer to
* [MarkupBias](@ref openvino_docs_OV_UG_lpt_MarkupBias) page
* in the OpenVINO Developer Guide.
*/
class LP_TRANSFORMATIONS_API MarkupDequantizationFuse : public ov::pass::MatcherPass {
public:
OPENVINO_RTTI("MarkupBias", "0");
MarkupDequantizationFuse();
};

} // namespace low_precision
} // namespace pass
} // namespace ov
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@
#include "low_precision/align_quantization_intervals.hpp"
#include "low_precision/fake_quantize_decomposition.hpp"
#include "low_precision/markup_bias.hpp"
#include "low_precision/markup_dequantization_fuse.hpp"
#include "low_precision/markup_precisions.hpp"
#include "low_precision/markup_can_be_quantized.hpp"
#include "low_precision/markup_avg_pool_precision_preserved.hpp"
Expand Down Expand Up @@ -208,6 +209,8 @@ bool ov::pass::low_precision::MarkupOptimizations::run_on_model(const std::share
markup.register_pass<low_precision::AlignQuantizationParameters>(params.defaultPrecisions);
}
markup.register_pass<low_precision::MarkupBias>();
// TODO: debug only
markup.register_pass<low_precision::MarkupDequantizationFuse>();
markup.run_passes(f);
return false;
}
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,38 @@
// Copyright (C) 2024 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//

#include "low_precision/markup_dequantization_fuse.hpp"

#include <memory>
#include "openvino/opsets/opset1.hpp"
#include "openvino/pass/pattern/op/wrap_type.hpp"

#include "itt.hpp"
#include "low_precision/rt_info/bias_attribute.hpp"

using namespace ov::pass::low_precision;

MarkupDequantizationFuse::MarkupDequantizationFuse() {
MATCHER_SCOPE(MarkupBias);
auto layer_m = ov::pass::pattern::wrap_type<ov::opset1::MatMul>(ov::pass::pattern::has_static_rank());
// TODO: getDequantization?
auto bias_const_m = ov::pass::pattern::wrap_type<ov::opset1::Constant>();
auto bias_m = ov::pass::pattern::wrap_type<ov::opset1::Multiply>({layer_m, bias_const_m});

ov::matcher_pass_callback callback = [=](ov::pass::pattern::Matcher& m) {
const auto& pattern_map = m.get_pattern_value_map();
const auto& const_shape = pattern_map.at(bias_const_m).get_shape();

const bool per_channel = std::count_if(const_shape.begin(), const_shape.end(), [](size_t x) { return x > 1; }) == 1;
if (ov::shape_size(const_shape) == 1 || per_channel) {
const auto bias = pattern_map.at(bias_m).get_node_shared_ptr();
ov::mark_as_bias(bias);
}

return false;
};

auto m = std::make_shared<ov::pass::pattern::Matcher>(bias_m, matcher_name);
register_matcher(m, callback);
}
4 changes: 4 additions & 0 deletions src/common/low_precision_transformations/src/mat_mul.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@
#include "openvino/pass/pattern/op/or.hpp"
#include "openvino/pass/pattern/op/wrap_type.hpp"

#include "low_precision/rt_info/bias_attribute.hpp"
#include "low_precision/network_helper.hpp"
#include "openvino/util/log.hpp"
#include "itt.hpp"
Expand Down Expand Up @@ -176,6 +177,9 @@ bool MatMulTransformation::transform(TransformationContext &context, ov::pass::p

updateOutput(context, newMultiply, newMatMul);

// TODO: debug only
ov::mark_as_bias(newMultiply);

OPENVINO_DEBUG("LPT: done: ", newMatMul);
return true;
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
#include "low_precision/common/ie_lpt_exception.hpp"
#include "low_precision/layer_transformation.hpp"
#include "low_precision/network_helper.hpp"
#include "low_precision/rt_info/bias_attribute.hpp"
#include "low_precision/rt_info/intervals_alignment_attribute.hpp"
#include "low_precision/rt_info/precision_preserved_attribute.hpp"
#include "low_precision/rt_info/quantization_alignment_attribute.hpp"
Expand Down Expand Up @@ -1183,7 +1184,7 @@ FakeQuantizeDequantization NetworkHelper::getDequantization(const std::shared_pt
const std::shared_ptr<ov::opset1::Multiply> multiply = ov::as_type_ptr<ov::opset1::Multiply>(dataNode.get_node_shared_ptr());
std::shared_ptr<ov::opset1::Constant> multiplyConstant;
if (multiply != nullptr) {
if (!FakeQuantizeDequantization::checkShape(multiply)) {
if (!FakeQuantizeDequantization::checkShape(multiply) || ov::marked_as_bias(multiply)) {
return FakeQuantizeDequantization();
}

Expand All @@ -1198,6 +1199,9 @@ FakeQuantizeDequantization NetworkHelper::getDequantization(const std::shared_pt
std::shared_ptr<ov::opset1::Convert> subtractConvert;
std::shared_ptr<ov::opset1::Constant> subtractConstant;
if (subtract != nullptr) {
if (ov::marked_as_bias(subtract)) {
return FakeQuantizeDequantization();
}
if (!FakeQuantizeDequantization::checkShape(subtract)) {
return FakeQuantizeDequantization(dataNode, nullptr, nullptr, nullptr, nullptr, multiply, multiplyConstant);
}
Expand All @@ -1211,6 +1215,9 @@ FakeQuantizeDequantization NetworkHelper::getDequantization(const std::shared_pt

const std::shared_ptr<ov::opset1::Convert> convert = ov::as_type_ptr<ov::opset1::Convert>(dataNode.get_node_shared_ptr());
if (convert != nullptr) {
if (ov::marked_as_bias(convert)) {
return FakeQuantizeDequantization();
}
auto el_type = convert->input(0).get_element_type();
auto foundIt = std::find(defaultPrecisions.begin(), defaultPrecisions.end(), el_type);
if (foundIt == defaultPrecisions.end() &&
Expand Down
5 changes: 3 additions & 2 deletions src/plugins/intel_cpu/src/config.h
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,8 @@ struct Config {
Unknown
};

bool collectPerfCounters = false;
// TODO: workaround to collect performance counters
bool collectPerfCounters = true;
bool exclusiveAsyncRequests = false;
SnippetsMode snippetsMode = SnippetsMode::Enable;
std::string dumpToDot = {};
Expand Down Expand Up @@ -75,7 +76,7 @@ struct Config {
std::set<ov::hint::ModelDistributionPolicy> modelDistributionPolicy = {};
bool enableHyperThreading = true;
bool changedHyperThreading = false;
#if defined(OPENVINO_ARCH_X86) || defined(OPENVINO_ARCH_X86_64)
#if defined(OPENVINO_ARCH_X86) || defined(OPENVINO_ARCH_X86_64) || defined(OPENVINO_ARCH_ARM64)
LPTransformsMode lpTransformsMode = LPTransformsMode::On;
#else
// Currently INT8 mode is not optimized on ARM / RISCV or other non-x86 platforms, fallback to FP32 mode.
Expand Down
3 changes: 2 additions & 1 deletion src/plugins/intel_cpu/src/cpu_memory.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -471,6 +471,7 @@ void DnnlMemoryMngr::notifyUpdate() {

StaticMemory::StaticMemory(const dnnl::engine& eng, MemoryDescPtr desc, const void* data, bool pads_zeroing) :
m_eng(eng), m_pMemDesc(desc) {
OPENVINO_ASSERT(!desc->empty() || (desc->empty() && (data == nullptr)));
if (desc->getPrecision() == element::string) {
OPENVINO_THROW("[CPU] StaticMemory object cannot be created for string data.");
}
Expand All @@ -480,7 +481,7 @@ StaticMemory::StaticMemory(const dnnl::engine& eng, MemoryDescPtr desc, const vo

m_size = m_pMemDesc->getCurrentMemSize();

if (data) {
if (data || desc->empty()) {
m_pMemMngr = std::make_shared<StaticMemoryMngr>(const_cast<void*>(data), m_size);
} else {
m_pMemMngr = std::make_shared<StaticMemoryMngr>(m_size);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,9 @@
//

#include "acl_common_executor.hpp"

#include <ostream>

#include "acl_utils.hpp"
#include "nodes/executors/memory_arguments.hpp"
#include "utils/debug_capabilities.h"
Expand Down Expand Up @@ -38,9 +41,9 @@ static void initACLTensorParams(const MemoryPtr& memoryPtr,
}
}

static std::shared_ptr<arm_compute::TensorInfo> initTensorInfo(const arm_compute::TensorShape& tensorShape,
const arm_compute::DataType& dataType,
const arm_compute::DataLayout& dataLayout) {
std::shared_ptr<arm_compute::TensorInfo> ACLCommonExecutor::initTensorInfo(const arm_compute::TensorShape& tensorShape,
const arm_compute::DataType& dataType,
const arm_compute::DataLayout& dataLayout) {
std::shared_ptr<arm_compute::TensorInfo> aclMemoryInfo = nullptr;
if (dataType != arm_compute::DataType::UNKNOWN) {
aclMemoryInfo = std::make_shared<arm_compute::TensorInfo>(
Expand Down Expand Up @@ -72,6 +75,9 @@ bool ACLCommonExecutor::update(const MemoryArgs &memory) {
ACLTypes aclDataType{};
ACLLayouts aclDataLayout{};
for (auto& cpu_mem_ptr : memory) {
if (cpu_mem_ptr.second->getSize() == 0) {
continue;
}
const ACLArgs index = argConvert.at(cpu_mem_ptr.first);
initACLTensorParams(cpu_mem_ptr.second, aclTensorAttrs,
aclMemoryShapes[index],
Expand Down Expand Up @@ -108,18 +114,79 @@ bool ACLCommonExecutor::update(const MemoryArgs &memory) {
configureThreadSafe([&] {
iFunction = configureFunction(aclMemoryTensors);
});

// for (auto& cpu_mem_ptr : memory) {
// const ACLArgs index = argConvert.at(cpu_mem_ptr.first);
// if (aclTensorAttrs.memoryUsageIndicator[index]) {
// aclMemoryTensors[index]->allocator()->import_memory(memory.at(cpu_mem_ptr.first)->getData());
// }
// }
return true;
}

//namespace {
//std::ostream& operator<<(std::ostream& os, const arm_compute::ITensorInfo* tensor_info) {
// const auto data_type = tensor_info->data_type();
// switch (data_type) {
// case arm_compute::DataType::S8: {
// return os << "S8";
// }
// case arm_compute::DataType::QSYMM8: {
// return os << "QSYMM8";
// }
// case arm_compute::DataType::QASYMM8: {
// return os << "QASYMM8";
// }
// case arm_compute::DataType::QASYMM8_SIGNED: {
// return os << "QASYMM8_SIGNED";
// }
// case arm_compute::DataType::S32: {
// return os << "S32";
// }
// case arm_compute::DataType::F32: {
// return os << "F32";
// }
// default: {
// return os << "[UNKNOWN]";
// }
// }
//}
//} // namespace

void ACLCommonExecutor::execute(const MemoryArgs &memory) {
// TODO: Move import_memory() to update() function - CVS-145871
for (auto& cpu_mem_ptr : memory) {
const ACLArgs index = argConvert.at(cpu_mem_ptr.first);
if (aclTensorAttrs.memoryUsageIndicator[index]) {
if (aclMemoryTensors[index]) {
aclMemoryTensors[index]->allocator()->import_memory(memory.at(cpu_mem_ptr.first)->getData());
}
}

// for (auto index = 0; index < aclMemoryTensors.size(); ++index) {
// const auto& tensor = aclMemoryTensors[index];
// if ((tensor == nullptr) || (index == ACLArgs::ACL_DST)) {
// continue;
// }
//
// if (index == ACLArgs::ACL_SRC_0) {
// std::cout << "src0 ";
// } else if (index == ACLArgs::ACL_WEI) {
// std::cout << "src1 ";
// } else if (index == ACLArgs::ACL_BIAS) {
// std::cout << "biases ";
// } else {
// std::cout << "[UNKNOWN] ";
// }
// std::cout << tensor->info() << ":" << std::endl;
// tensor->print(std::cout);
// }

iFunction->run();

// {
// std::shared_ptr<arm_compute::Tensor> tensor = aclMemoryTensors[ACLArgs::ACL_DST];
// std::cout << "dst " << tensor->info() << ":" << std::endl;
// tensor->print(std::cout);
// }
}

ACLCommonExecutor::~ACLCommonExecutor() {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,11 @@ class ACLCommonExecutor : public Executor {

protected:
ACLTensorAttrs aclTensorAttrs;

virtual std::shared_ptr<arm_compute::TensorInfo> initTensorInfo(const arm_compute::TensorShape& tensorShape,
const arm_compute::DataType& dataType,
const arm_compute::DataLayout& dataLayout);

private:
ACLTensors aclMemoryTensors;
ACLFunction iFunction = nullptr;
Expand Down
Loading

0 comments on commit 2e3b1d2

Please sign in to comment.