Skip to content

Commit

Permalink
hasHardwareSupport replacements, Conv DefaultImplPriority erase condi…
Browse files Browse the repository at this point in the history
…ation for all brgconv
  • Loading branch information
liubo-intel committed Jan 4, 2024
1 parent 6a94fe2 commit 104b6e2
Show file tree
Hide file tree
Showing 4 changed files with 7 additions and 8 deletions.
7 changes: 4 additions & 3 deletions src/plugins/intel_cpu/src/config.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@
#include "openvino/runtime/internal_properties.hpp"
#include "openvino/runtime/properties.hpp"
#include "utils/debug_capabilities.h"
#include "utils/precision_support.h"

#include <algorithm>
#include <map>
Expand Down Expand Up @@ -219,7 +220,7 @@ void Config::readProperties(const ov::AnyMap& prop, const ModelType modelType) {
". Expected only true/false");
}
if (enable) {
if (mayiuse(avx512_core) || mayiuse(avx2_vnni_2)) {
if (hasHardwareSupport(ov::element::bf16)) {
inferencePrecision = ov::element::bf16;
} else {
OPENVINO_THROW("Platform doesn't support BF16 format");
Expand All @@ -234,12 +235,12 @@ void Config::readProperties(const ov::AnyMap& prop, const ModelType modelType) {
auto const prec = val.as<ov::element::Type>();
inferencePrecisionSetExplicitly = true;
if (prec == ov::element::bf16) {
if (mayiuse(avx512_core) || mayiuse(avx2_vnni_2)) {
if (hasHardwareSupport(ov::element::bf16)) {
inferencePrecision = ov::element::bf16;
}
} else if (prec == ov::element::f16) {
#if defined(OPENVINO_ARCH_X86_64)
if (mayiuse(avx512_core_fp16) || mayiuse(avx512_core_amx_fp16) || mayiuse(avx2_vnni_2)) {
if (hasHardwareSupport(ov::element::f16)) {
inferencePrecision = ov::element::f16;
}
#elif defined(OV_CPU_ARM_ENABLE_FP16)
Expand Down
3 changes: 1 addition & 2 deletions src/plugins/intel_cpu/src/nodes/conv.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -374,8 +374,7 @@ const std::vector<impl_desc_type>& Convolution::getDefaultImplPriority() {
priorities.erase(std::remove_if(priorities.begin(),
priorities.end(),
[](impl_desc_type type) {
return !isBrgConvAvailable() && (type == impl_desc_type::brgconv_avx2_1x1 ||
type == impl_desc_type::brgconv_avx2);
return !isBrgConvAvailable() && (type & impl_desc_type::brgconv);
}),
priorities.end());

Expand Down
2 changes: 1 addition & 1 deletion src/plugins/intel_cpu/src/nodes/eltwise.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2199,7 +2199,7 @@ void Eltwise::initSupportedPrimitiveDescriptors() {
if (!fusedWith.empty()) {
outputPrecision = fusedWith[fusedWith.size() - 1]->getOriginalOutputPrecisionAtPort(0);
}
if (!mayiuse(avx512_core) && !mayiuse(avx2_vnni_2)) {
if (!hasHardwareSupport(ov::element::bf16)) {
bool hasBF16 = false;
for (auto &inPrc : inputPrecisions)
if (inPrc == ov::element::bf16)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -283,8 +283,7 @@ void Transformations::PreLpt(const std::vector<ov::element::Type>& defaultPrecis
};

// @todo should we always convert to f32 regardless of hardware support, as it is done for f16?
if (!dnnl::impl::cpu::x64::mayiuse(dnnl::impl::cpu::x64::avx512_core) &&
!dnnl::impl::cpu::x64::mayiuse(dnnl::impl::cpu::x64::avx2_vnni_2))
if (!hasHardwareSupport(ov::element::bf16))
map.insert({ov::element::bf16, ov::element::f32});
#if defined(OV_CPU_ARM_ENABLE_FP16)
if (inferencePrecision != ov::element::f16)
Expand Down

0 comments on commit 104b6e2

Please sign in to comment.