Skip to content

Commit

Permalink
Fix for fp16
Browse files Browse the repository at this point in the history
  • Loading branch information
allnes committed Oct 14, 2023
1 parent e8861fb commit 8be3970
Show file tree
Hide file tree
Showing 2 changed files with 8 additions and 5 deletions.
2 changes: 1 addition & 1 deletion src/plugins/intel_cpu/src/nodes/non_max_suppression.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -626,7 +626,7 @@ void NonMaxSuppression::initSupportedPrimitiveDescriptors() {
if (!supportedPrimitiveDescriptors.empty())
return;

const std::vector<Precision> supportedFloatPrecision = {Precision::FP32, Precision::BF16};
const std::vector<Precision> supportedFloatPrecision = {Precision::FP32, Precision::BF16, Precision::FP16};
const std::vector<Precision> supportedIntOutputPrecision = {Precision::I32, Precision::I64};

checkPrecision(getOriginalInputPrecisionAtPort(NMS_BOXES), supportedFloatPrecision, "boxes", inType);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -173,13 +173,18 @@ TEST_F(OVClassConfigTestCPU, smoke_PluginSetConfigAffinityCore) {
ASSERT_EQ(false, value);
}

#if defined(OV_CPU_ARM_ENABLE_FP16)
const auto expected_precision_for_performance_mode = ov::element::f16;
#else
const auto expected_precision_for_performance_mode = InferenceEngine::with_cpu_x86_bfloat16() ? ov::element::bf16 : ov::element::f32;
#endif

TEST_F(OVClassConfigTestCPU, smoke_PluginSetConfigHintInferencePrecision) {
ov::Core ie;
auto value = ov::element::f32;
const auto precision = InferenceEngine::with_cpu_x86_bfloat16() ? ov::element::bf16 : ov::element::f32;

ASSERT_NO_THROW(value = ie.get_property("CPU", ov::hint::inference_precision));
ASSERT_EQ(precision, value);
ASSERT_EQ(expected_precision_for_performance_mode, value);

const auto forcedPrecision = ov::element::f32;

Expand Down Expand Up @@ -210,8 +215,6 @@ TEST_F(OVClassConfigTestCPU, smoke_PluginSetConfigEnableProfiling) {
ASSERT_EQ(enableProfiling, value);
}

const auto expected_precision_for_performance_mode = InferenceEngine::with_cpu_x86_bfloat16() ? ov::element::bf16 : ov::element::f32;

const auto bf16_if_can_be_emulated = InferenceEngine::with_cpu_x86_avx512_core() ? ov::element::bf16 : ov::element::f32;
using ExpectedModeAndType = std::pair<ov::hint::ExecutionMode, ov::element::Type>;

Expand Down

0 comments on commit 8be3970

Please sign in to comment.