Skip to content

Commit

Permalink
Disabled i8 tests for non-VNNI
Browse files Browse the repository at this point in the history
  • Loading branch information
a-sidorova committed Jan 26, 2023
1 parent 45fcfe7 commit 40b569d
Show file tree
Hide file tree
Showing 3 changed files with 16 additions and 4 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -220,6 +220,10 @@ std::vector<std::string> disabledTestPatterns() {
retVector.emplace_back(R"(.*Snippets.*MHA.*)");
retVector.emplace_back(R"(.*Snippets.*(MatMul|Matmul).*)");
}
if (!InferenceEngine::with_cpu_x86_avx512_core_vnni() && !InferenceEngine::with_cpu_x86_avx512_core_amx_int8()) {
// MatMul in Snippets uses BRGEMM that supports i8 only on platforms with VNNI or AMX instructions
retVector.emplace_back(R"(.*Snippets.*MatMulFQ.*)");
}
if (!InferenceEngine::with_cpu_x86_avx512_core_amx_int8())
//TODO: Issue 92895
// on platforms which do not support AMX, we are disabling I8 input tests
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -24,8 +24,12 @@ static inline std::vector<std::vector<element::Type>> precisions(bool only_fp32
{element::f32, element::f32},
};
if (!only_fp32) {
prc.emplace_back(std::vector<element::Type>{element::i8, element::i8});
prc.emplace_back(std::vector<element::Type>{element::u8, element::i8});
// In Snippets MatMul INT8 is supported only on VNNI/AMX platforms
if (InferenceEngine::with_cpu_x86_avx512_core_vnni() || InferenceEngine::with_cpu_x86_avx512_core_amx_int8()) {
prc.emplace_back(std::vector<element::Type>{element::i8, element::i8});
prc.emplace_back(std::vector<element::Type>{element::u8, element::i8});
}
// In Snippets MatMul BF16 is supported only on bf16/AMX platforms
if (InferenceEngine::with_cpu_x86_bfloat16() || InferenceEngine::with_cpu_x86_avx512_core_amx_bf16()) {
prc.emplace_back(std::vector<element::Type>{element::bf16, element::bf16});
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,8 +17,12 @@ static inline std::vector<std::vector<element::Type>> precisions(bool only_fp32
{element::f32, element::f32},
};
if (!only_fp32) {
prc.emplace_back(std::vector<element::Type>{element::i8, element::i8});
prc.emplace_back(std::vector<element::Type>{element::u8, element::i8});
// In Snippets MatMul INT8 is supported only on VNNI/AMX platforms
if (InferenceEngine::with_cpu_x86_avx512_core_vnni() || InferenceEngine::with_cpu_x86_avx512_core_amx_int8()) {
prc.emplace_back(std::vector<element::Type>{element::i8, element::i8});
prc.emplace_back(std::vector<element::Type>{element::u8, element::i8});
}
// In Snippets MatMul BF16 is supported only on bf16/AMX platforms
if (InferenceEngine::with_cpu_x86_bfloat16() || InferenceEngine::with_cpu_x86_avx512_core_amx_bf16()) {
prc.emplace_back(std::vector<element::Type>{element::bf16, element::bf16});
}
Expand Down

0 comments on commit 40b569d

Please sign in to comment.