diff --git a/src/plugins/intel_cpu/src/config.cpp b/src/plugins/intel_cpu/src/config.cpp index 4882744939163c..ba5c54d6bfe219 100644 --- a/src/plugins/intel_cpu/src/config.cpp +++ b/src/plugins/intel_cpu/src/config.cpp @@ -12,6 +12,7 @@ #include "openvino/runtime/internal_properties.hpp" #include "openvino/runtime/properties.hpp" #include "utils/debug_capabilities.h" +#include "utils/precision_support.h" #include #include @@ -219,7 +220,7 @@ void Config::readProperties(const ov::AnyMap& prop, const ModelType modelType) { ". Expected only true/false"); } if (enable) { - if (mayiuse(avx512_core) || mayiuse(avx2_vnni_2)) { + if (hasHardwareSupport(ov::element::bf16)) { inferencePrecision = ov::element::bf16; } else { OPENVINO_THROW("Platform doesn't support BF16 format"); @@ -234,12 +235,12 @@ void Config::readProperties(const ov::AnyMap& prop, const ModelType modelType) { auto const prec = val.as(); inferencePrecisionSetExplicitly = true; if (prec == ov::element::bf16) { - if (mayiuse(avx512_core) || mayiuse(avx2_vnni_2)) { + if (hasHardwareSupport(ov::element::bf16)) { inferencePrecision = ov::element::bf16; } } else if (prec == ov::element::f16) { #if defined(OPENVINO_ARCH_X86_64) - if (mayiuse(avx512_core_fp16) || mayiuse(avx512_core_amx_fp16) || mayiuse(avx2_vnni_2)) { + if (hasHardwareSupport(ov::element::f16)) { inferencePrecision = ov::element::f16; } #elif defined(OV_CPU_ARM_ENABLE_FP16) diff --git a/src/plugins/intel_cpu/src/nodes/conv.cpp b/src/plugins/intel_cpu/src/nodes/conv.cpp index 6fe0b9175c27d8..a1eb6f49e9900f 100644 --- a/src/plugins/intel_cpu/src/nodes/conv.cpp +++ b/src/plugins/intel_cpu/src/nodes/conv.cpp @@ -374,8 +374,7 @@ const std::vector& Convolution::getDefaultImplPriority() { priorities.erase(std::remove_if(priorities.begin(), priorities.end(), [](impl_desc_type type) { - return !isBrgConvAvailable() && (type == impl_desc_type::brgconv_avx2_1x1 || - type == impl_desc_type::brgconv_avx2); + return !isBrgConvAvailable() && (type & impl_desc_type::brgconv); }), priorities.end()); diff --git a/src/plugins/intel_cpu/src/nodes/eltwise.cpp b/src/plugins/intel_cpu/src/nodes/eltwise.cpp index ebff119982cc5f..4ed4174b750aad 100644 --- a/src/plugins/intel_cpu/src/nodes/eltwise.cpp +++ b/src/plugins/intel_cpu/src/nodes/eltwise.cpp @@ -2199,7 +2199,7 @@ void Eltwise::initSupportedPrimitiveDescriptors() { if (!fusedWith.empty()) { outputPrecision = fusedWith[fusedWith.size() - 1]->getOriginalOutputPrecisionAtPort(0); } - if (!mayiuse(avx512_core) && !mayiuse(avx2_vnni_2)) { + if (!hasHardwareSupport(ov::element::bf16)) { bool hasBF16 = false; for (auto &inPrc : inputPrecisions) if (inPrc == ov::element::bf16) diff --git a/src/plugins/intel_cpu/src/transformations/transformation_pipeline.cpp b/src/plugins/intel_cpu/src/transformations/transformation_pipeline.cpp index 909453b8be3b72..b98d7a8979701d 100644 --- a/src/plugins/intel_cpu/src/transformations/transformation_pipeline.cpp +++ b/src/plugins/intel_cpu/src/transformations/transformation_pipeline.cpp @@ -283,8 +283,7 @@ void Transformations::PreLpt(const std::vector& defaultPrecis }; // @todo should we always convert to f32 regardless of hardware support, as it is done for f16? - if (!dnnl::impl::cpu::x64::mayiuse(dnnl::impl::cpu::x64::avx512_core) && - !dnnl::impl::cpu::x64::mayiuse(dnnl::impl::cpu::x64::avx2_vnni_2)) + if (!hasHardwareSupport(ov::element::bf16)) map.insert({ov::element::bf16, ov::element::f32}); #if defined(OV_CPU_ARM_ENABLE_FP16) if (inferencePrecision != ov::element::f16)