From 9cb05517f660c7b508e18a8fe55f43cc26de5b6b Mon Sep 17 00:00:00 2001 From: Ilya Lavrenov Date: Fri, 26 Jan 2024 03:41:55 +0800 Subject: [PATCH 1/9] Try to enable both fp16 and fp32 kernels for aarch64 --- src/plugins/intel_cpu/CMakeLists.txt | 19 ++++++++++++++++++- .../intel_cpu/thirdparty/ACLConfig.cmake | 10 ++++++---- .../intel_cpu/thirdparty/ComputeLibrary | 2 +- 3 files changed, 25 insertions(+), 6 deletions(-) diff --git a/src/plugins/intel_cpu/CMakeLists.txt b/src/plugins/intel_cpu/CMakeLists.txt index df9bddd23bc22e..e16f3483d3f568 100644 --- a/src/plugins/intel_cpu/CMakeLists.txt +++ b/src/plugins/intel_cpu/CMakeLists.txt @@ -32,7 +32,24 @@ if(ARM) # requires estate=32 ${OV_CPU_ARM_TARGET_GENERIC_ARCHS}) elseif(AARCH64) - set(OV_CPU_ARM_TARGET_ARCH_DEFAULT arm64-v8.2-a) + if(APPLE) + set(OV_CPU_ARM_TARGET_ARCH_DEFAULT arm64-v8.2-a) + else() + if(CMAKE_COMPILER_IS_GNUCXX AND CMAKE_CXX_COMPILER_VERSION VERSION_GREATER_EQUAL 10.2) + # according to https://github.com/ARM-software/ComputeLibrary/issues/1053#issuecomment-1846903707 comment + # the 'multi_isa=1' below enables FP32, FP16 and SVE / SVE2 kernels + # But: arm_sve.h header is not available on gcc older 10.2 (let's test it), so we have to check it + set(OV_CPU_AARCH64_USE_MULTI_ISA ON) + endif() + if(OV_CPU_AARCH64_USE_MULTI_ISA) + # set v8a even we want fp16 kernels, because + # we use multi_isa=1 in ACLConfig.cmake to enable both fp16 and fp32 kernels + # actual kernel is selected in runtime based on runtime capabilities + set(OV_CPU_ARM_TARGET_ARCH_DEFAULT arm64-v8a) + else() + set(OV_CPU_ARM_TARGET_ARCH_DEFAULT arm64-v8.2-a) + endif() + endif() set(OV_CPU_ARM_TARGET_ARCHS arm64-v8a arm64-v8.2-a arm64-v8.2-a-sve arm64-v8.2-a-sve2 # used with estate=64 diff --git a/src/plugins/intel_cpu/thirdparty/ACLConfig.cmake b/src/plugins/intel_cpu/thirdparty/ACLConfig.cmake index ef0427aa9c2168..f9fc65f7aefd76 100644 --- a/src/plugins/intel_cpu/thirdparty/ACLConfig.cmake +++ b/src/plugins/intel_cpu/thirdparty/ACLConfig.cmake @@ -149,10 +149,12 @@ elseif(NOT TARGET arm_compute::arm_compute) list(APPEND ARM_COMPUTE_OPTIONS estate=32) else() list(APPEND ARM_COMPUTE_OPTIONS estate=64) - if(NOT APPLE AND CMAKE_COMPILER_IS_GNUCXX AND CMAKE_CXX_COMPILER_VERSION VERSION_GREATER_EQUAL 10.2) - # arm_sve.h header is not available on gcc older 10.2 - # TODO: validate it on machines with FP16 / SVE support and enabled back - # list(APPEND ARM_COMPUTE_OPTIONS multi_isa=1) + if(NOT APPLE) + if(OV_CPU_AARCH64_USE_MULTI_ISA) + list(APPEND ARM_COMPUTE_OPTIONS multi_isa=1) + # let's additionally enable SME as well + set(extra_cxx_flags "${extra_cxx_flags} -DENABLE_SME -DARM_COMPUTE_ENABLE_SME -DARM_COMPUTE_ENABLE_SME2") + endif() endif() endif() diff --git a/src/plugins/intel_cpu/thirdparty/ComputeLibrary b/src/plugins/intel_cpu/thirdparty/ComputeLibrary index 874e0c7b3fe93a..afb5a987d084e1 160000 --- a/src/plugins/intel_cpu/thirdparty/ComputeLibrary +++ b/src/plugins/intel_cpu/thirdparty/ComputeLibrary @@ -1 +1 @@ -Subproject commit 874e0c7b3fe93a6764ecb2d8cfad924af19a9d25 +Subproject commit afb5a987d084e14a161b63eff916f5c0519d366f From c923f05d76b1cc7b3608e26c2cc36d4ea71814f3 Mon Sep 17 00:00:00 2001 From: Ilya Lavrenov Date: Fri, 26 Jan 2024 04:09:56 +0800 Subject: [PATCH 2/9] Try ubuntu 22.04 --- .github/workflows/linux_arm64.yml | 21 ++++++++++----------- 1 file changed, 10 insertions(+), 11 deletions(-) diff --git a/.github/workflows/linux_arm64.yml b/.github/workflows/linux_arm64.yml index 15d1c5b7fe0b1f..6810ec3750e00f 100644 --- a/.github/workflows/linux_arm64.yml +++ b/.github/workflows/linux_arm64.yml @@ -1,4 +1,4 @@ -name: Linux ARM64 (Ubuntu 20.04, Python 3.11) +name: Linux ARM64 (Ubuntu 22.04, Python 3.11) on: workflow_dispatch: pull_request: @@ -55,7 +55,7 @@ jobs: shell: bash runs-on: 'aks-linux-16-cores-arm' container: - image: openvinogithubactions.azurecr.io/dockerhub/ubuntu:20.04 + image: openvinogithubactions.azurecr.io/dockerhub/ubuntu:22.04 volumes: - /mount:/mount options: -e SCCACHE_AZURE_BLOB_CONTAINER -e SCCACHE_AZURE_CONNECTION_STRING @@ -183,7 +183,6 @@ jobs: - name: Pack Artifacts run: | - # Add the ONNX Runtime version and skip tests list to the archive to use in the ONNX Runtime Job # w/o the need to checkout repository @@ -289,7 +288,7 @@ jobs: uses: ./.github/workflows/job_debian_packages.yml with: runner: 'aks-linux-16-cores-arm' - image: 'openvinogithubactions.azurecr.io/dockerhub/ubuntu:20.04' + image: 'openvinogithubactions.azurecr.io/dockerhub/ubuntu:22.04' Samples: needs: [ Build, Smart_CI ] @@ -297,7 +296,7 @@ jobs: uses: ./.github/workflows/job_samples_tests.yml with: runner: 'aks-linux-16-cores-arm' - image: 'openvinogithubactions.azurecr.io/dockerhub/ubuntu:20.04' + image: 'openvinogithubactions.azurecr.io/dockerhub/ubuntu:22.04' affected-components: ${{ needs.smart_ci.outputs.affected_components }} JS_API: @@ -307,7 +306,7 @@ jobs: uses: ./.github/workflows/job_openvino_js.yml with: runner: 'aks-linux-16-cores-arm' - container: '{"image": "openvinogithubactions.azurecr.io/dockerhub/ubuntu:20.04"}' + container: '{"image": "openvinogithubactions.azurecr.io/dockerhub/ubuntu:22.04"}' ONNX_Runtime: name: ONNX Runtime Integration @@ -328,7 +327,7 @@ jobs: uses: ./.github/workflows/job_cxx_unit_tests.yml with: runner: 'aks-linux-16-cores-arm' - image: 'openvinogithubactions.azurecr.io/dockerhub/ubuntu:20.04' + image: 'openvinogithubactions.azurecr.io/dockerhub/ubuntu:22.04' affected-components: ${{ needs.smart_ci.outputs.affected_components }} Python_Unit_Tests: @@ -337,7 +336,7 @@ jobs: uses: ./.github/workflows/job_python_unit_tests.yml with: runner: 'aks-linux-16-cores-arm' - container: '{"image": "openvinogithubactions.azurecr.io/dockerhub/ubuntu:20.04", "volumes": ["/mount:/mount"]}' + container: '{"image": "openvinogithubactions.azurecr.io/dockerhub/ubuntu:22.04", "volumes": ["/mount:/mount"]}' affected-components: ${{ needs.smart_ci.outputs.affected_components }} CPU_Functional_Tests: @@ -347,7 +346,7 @@ jobs: uses: ./.github/workflows/job_cpu_functional_tests.yml with: runner: 'aks-linux-16-cores-arm' - image: 'openvinogithubactions.azurecr.io/dockerhub/ubuntu:20.04' + image: 'openvinogithubactions.azurecr.io/dockerhub/ubuntu:22.04' TensorFlow_Hub_Models_Tests: name: TensorFlow Hub Models tests @@ -358,7 +357,7 @@ jobs: uses: ./.github/workflows/job_tensorflow_hub_models_tests.yml with: runner: 'aks-linux-16-cores-arm' - container: '{"image": "openvinogithubactions.azurecr.io/dockerhub/ubuntu:20.04"}' + container: '{"image": "openvinogithubactions.azurecr.io/dockerhub/ubuntu:22.04"}' event: ${{ github.event_name }} PyTorch_Models_Tests: @@ -369,7 +368,7 @@ jobs: uses: ./.github/workflows/job_pytorch_models_tests.yml with: runner: 'aks-linux-16-cores-arm' - container: '{"image": "openvinogithubactions.azurecr.io/dockerhub/ubuntu:20.04"}' + container: '{"image": "openvinogithubactions.azurecr.io/dockerhub/ubuntu:22.04"}' event: ${{ github.event_name }} Overall_Status: From 1503264d7cc554a9c38fbebd3e96c445e205fb1c Mon Sep 17 00:00:00 2001 From: Ilya Lavrenov Date: Thu, 1 Feb 2024 18:12:20 +0800 Subject: [PATCH 3/9] Added extra condition for OV_CPU_ARM_ENABLE_FP16 --- src/plugins/intel_cpu/CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/plugins/intel_cpu/CMakeLists.txt b/src/plugins/intel_cpu/CMakeLists.txt index e16f3483d3f568..a0635e45b6d35d 100644 --- a/src/plugins/intel_cpu/CMakeLists.txt +++ b/src/plugins/intel_cpu/CMakeLists.txt @@ -57,7 +57,7 @@ elseif(AARCH64) endif() set(OV_CPU_ARM_TARGET_ARCH ${OV_CPU_ARM_TARGET_ARCH_DEFAULT} CACHE STRING "Architecture for ARM ComputeLibrary") set_property(CACHE OV_CPU_ARM_TARGET_ARCH PROPERTY STRINGS ${OV_CPU_ARM_TARGET_ARCHS}) -if(OV_CPU_ARM_TARGET_ARCH MATCHES "(armv|arm64-v)[8-9]\\.") +if(OV_CPU_ARM_TARGET_ARCH MATCHES "(armv|arm64-v)[8-9]\\." OR OV_CPU_AARCH64_USE_MULTI_ISA) add_definitions(-DOV_CPU_ARM_ENABLE_FP16) endif() From 6c314ea8db84b59242d35333536bb0bf708066f8 Mon Sep 17 00:00:00 2001 From: Aleksandr Voron Date: Thu, 1 Feb 2024 16:30:40 +0000 Subject: [PATCH 4/9] fixed ACL API usage and removed OV_CPU_ARM_ENABLE_FP16 --- src/plugins/intel_cpu/CMakeLists.txt | 3 --- src/plugins/intel_cpu/src/config.cpp | 20 ++++++++----------- src/plugins/intel_cpu/src/graph.cpp | 13 ++++++------ src/plugins/intel_cpu/src/nodes/normalize.cpp | 2 +- src/plugins/intel_cpu/src/nodes/reorder.cpp | 15 ++++++-------- src/plugins/intel_cpu/src/nodes/reorder.h | 5 +---- .../intel_cpu/src/nodes/roi_pooling.cpp | 2 +- .../transformation_pipeline.cpp | 17 +++++++++------- .../intel_cpu/src/utils/precision_support.cpp | 8 ++++++-- .../custom/behavior/ov_plugin/properties.cpp | 4 ++-- .../shared_tests_instances/core_config.cpp | 14 ++++++------- .../skip_tests_config.cpp | 19 +++++++++--------- src/plugins/intel_cpu/thirdparty/onednn | 2 +- 13 files changed, 58 insertions(+), 66 deletions(-) diff --git a/src/plugins/intel_cpu/CMakeLists.txt b/src/plugins/intel_cpu/CMakeLists.txt index a0635e45b6d35d..37b6ac33fd5064 100644 --- a/src/plugins/intel_cpu/CMakeLists.txt +++ b/src/plugins/intel_cpu/CMakeLists.txt @@ -57,9 +57,6 @@ elseif(AARCH64) endif() set(OV_CPU_ARM_TARGET_ARCH ${OV_CPU_ARM_TARGET_ARCH_DEFAULT} CACHE STRING "Architecture for ARM ComputeLibrary") set_property(CACHE OV_CPU_ARM_TARGET_ARCH PROPERTY STRINGS ${OV_CPU_ARM_TARGET_ARCHS}) -if(OV_CPU_ARM_TARGET_ARCH MATCHES "(armv|arm64-v)[8-9]\\." OR OV_CPU_AARCH64_USE_MULTI_ISA) - add_definitions(-DOV_CPU_ARM_ENABLE_FP16) -endif() if(X86 OR X86_64 OR AARCH64) # disable mlas with webassembly diff --git a/src/plugins/intel_cpu/src/config.cpp b/src/plugins/intel_cpu/src/config.cpp index a5ab2a3385b8a3..c9628039f7c103 100644 --- a/src/plugins/intel_cpu/src/config.cpp +++ b/src/plugins/intel_cpu/src/config.cpp @@ -222,14 +222,9 @@ void Config::readProperties(const ov::AnyMap& prop, const ModelType modelType) { inferencePrecision = ov::element::bf16; } } else if (prec == ov::element::f16) { -#if defined(OPENVINO_ARCH_X86_64) if (hasHardwareSupport(ov::element::f16)) { inferencePrecision = ov::element::f16; } -#elif defined(OV_CPU_ARM_ENABLE_FP16) - // TODO: add runtime FP16 feature support check for ARM - inferencePrecision = ov::element::f16; -#endif } else if (prec == ov::element::f32) { inferencePrecision = ov::element::f32; } else { @@ -305,15 +300,16 @@ void Config::readProperties(const ov::AnyMap& prop, const ModelType modelType) { if (!inferencePrecisionSetExplicitly) { if (executionMode == ov::hint::ExecutionMode::PERFORMANCE) { inferencePrecision = ov::element::f32; -#if defined(OV_CPU_ARM_ENABLE_FP16) - //fp16 precision is used as default precision on ARM for non-convolution networks - //fp16 ACL convolution is slower than fp32 - if (modelType != ModelType::CNN) - inferencePrecision = ov::element::f16; -#else +#if defined(OPENVINO_ARCH_ARM) || defined(OPENVINO_ARCH_ARM64) + if (hasHardwareSupport(ov::element::f16)) { + //fp16 precision is used as default precision on ARM for non-convolution networks + //fp16 ACL convolution is slower than fp32 + if (modelType != ModelType::CNN) + inferencePrecision = ov::element::f16; + } +#endif if (mayiuse(avx512_core_bf16)) inferencePrecision = ov::element::bf16; -#endif } else { inferencePrecision = ov::element::f32; } diff --git a/src/plugins/intel_cpu/src/graph.cpp b/src/plugins/intel_cpu/src/graph.cpp index 2dec1a49bb73d9..feecc45d8e2e95 100644 --- a/src/plugins/intel_cpu/src/graph.cpp +++ b/src/plugins/intel_cpu/src/graph.cpp @@ -37,11 +37,10 @@ #include "utils/ngraph_utils.hpp" #include "utils/node_dumper.h" #include "utils/verbose.h" +#include "utils/precision_support.h" #include -#if defined(OV_CPU_ARM_ENABLE_FP16) #include "common/primitive_desc_iface.hpp" -#endif #include "openvino/runtime/memory_solver.hpp" @@ -405,10 +404,12 @@ static bool isReorderAvailable(const MemoryDescPtr& parentDesc, const MemoryDesc dnnl_primitive_desc_t result = nullptr; auto status = dnnl_reorder_primitive_desc_create(&result, srcMemDesc.get(), eng.get(), dstMemDesc.get(), eng.get(), attr.get()); -#if defined(OV_CPU_ARM_ENABLE_FP16) +#if defined(OPENVINO_ARCH_ARM) || defined(OPENVINO_ARCH_ARM64) // temporary WA for slow FP32->FP16 conversion reorder in oneDNN on ARM // pretend the reorder is not available to use Convert node instead - if (result && parse_impl_name(result->impl()->name()) == ref_any) { + if (hasHardwareSupport(ov::element::f16) && + result && + parse_impl_name(result->impl()->name()) == ref_any) { dnnl_primitive_desc_destroy(result); return false; } @@ -1563,8 +1564,8 @@ void Graph::EnforceInferencePrecision() { if (inferPrec == ov::element::f32) return; // nothing to do, only precision reduction is currently allowed -#if defined(OV_CPU_ARM_ENABLE_FP16) - if (inferPrec == ov::element::f16) +#if defined(OPENVINO_ARCH_ARM) || defined(OPENVINO_ARCH_ARM64) + if (hasHardwareSupport(ov::element::f16) && inferPrec == ov::element::f16) return; // precision of configured by ov::pass::ConvertPrecision #endif std::function& skipNodes)> searchForNodesToSkip; diff --git a/src/plugins/intel_cpu/src/nodes/normalize.cpp b/src/plugins/intel_cpu/src/nodes/normalize.cpp index 8b8b7b10bbc8b0..3b14c4ccaa5a47 100644 --- a/src/plugins/intel_cpu/src/nodes/normalize.cpp +++ b/src/plugins/intel_cpu/src/nodes/normalize.cpp @@ -1492,7 +1492,7 @@ std::shared_ptr NormalizeL2::NormalizeL2Execut OV_CASE2(ov::element::i8, ov::element::f32, int8_t, float), OV_CASE2(ov::element::f32, ov::element::f32, float, float), OV_CASE2(ov::element::bf16, ov::element::bf16, bfloat16_t, bfloat16_t), - OV_CASE2(ov::element::f16, ov::element::f16, float16_t, float16_t)); + OV_CASE2(ov::element::f16, ov::element::f16, ::float16_t, ::float16_t)); return ctx.executor; } diff --git a/src/plugins/intel_cpu/src/nodes/reorder.cpp b/src/plugins/intel_cpu/src/nodes/reorder.cpp index c674c06a96a00d..895f1436ca7b00 100644 --- a/src/plugins/intel_cpu/src/nodes/reorder.cpp +++ b/src/plugins/intel_cpu/src/nodes/reorder.cpp @@ -11,11 +11,9 @@ #include "nodes/common/reorder_prim.h" #include "openvino/core/parallel.hpp" #include "shape_inference/shape_inference_pass_through.hpp" - -#if defined(OV_CPU_ARM_ENABLE_FP16) +#include "utils/precision_support.h" #include "nodes/executors/executor.hpp" #include "nodes/executors/transpose_list.hpp" -#endif namespace ov { namespace intel_cpu { @@ -115,7 +113,6 @@ void Reorder::executeDynamicImpl(dnnl::stream strm) { execute(strm); } -#if defined(OV_CPU_ARM_ENABLE_FP16) void Reorder::prepareReorderAsTranspose(MemoryDescPtr parentDesc, MemoryDescPtr childDesc) { auto getOrderAndBlockedDims = [](const MemoryDesc& lhs, const MemoryDesc& rhs) -> std::pair, std::vector> { const auto& in = lhs.as()->getBlockDims(); @@ -167,7 +164,6 @@ void Reorder::prepareReorderAsTranspose(MemoryDescPtr parentDesc, MemoryDescPtr getSelectedPrimitiveDescriptor()->setImplementationType(transposeExecutor->getImplType()); return; } -#endif // OV_CPU_ARM_ENABLE_FP16 void Reorder::prepareParams() { if (isOptimized) @@ -198,10 +194,11 @@ void Reorder::prepareParams() { const auto& parentDesc = srcMemPtr->getDescPtr(); const auto& childDesc = dstMemPtr->getDescPtr(); -#if defined(OV_CPU_ARM_ENABLE_FP16) +#if defined(OPENVINO_ARCH_ARM) || defined(OPENVINO_ARCH_ARM64) // @todo current oneDNN v3.2 lacks optimized jit implementation for fp16 reorders. // Use transpose executor as a temporary WA. - if (everyone_is(ov::element::f16, parentDesc->getPrecision(), childDesc->getPrecision()) && + if (hasHardwareSupport(ov::element::f16) && + everyone_is(ov::element::f16, parentDesc->getPrecision(), childDesc->getPrecision()) && ((parentDesc->hasLayoutType(LayoutType::ncsp) && childDesc->hasLayoutType(LayoutType::nspc)) || (parentDesc->hasLayoutType(LayoutType::nspc) && childDesc->hasLayoutType(LayoutType::ncsp))) && one_of(parentDesc->getShape().getRank(), 3u, 4u)) { @@ -391,8 +388,8 @@ void Reorder::optimizedNspc2Ncsp() { } void Reorder::execute(dnnl::stream strm) { -#if defined(OV_CPU_ARM_ENABLE_FP16) - if (transposeExecutor) { +#if defined(OPENVINO_ARCH_ARM) || defined(OPENVINO_ARCH_ARM64) + if (hasHardwareSupport(ov::element::f16) && transposeExecutor) { auto dstMemPtr = getDstMemoryAtPort(0); auto srcMemPtr = getSrcMemoryAtPort(0); int MB = srcMemPtr->getStaticDims()[0]; diff --git a/src/plugins/intel_cpu/src/nodes/reorder.h b/src/plugins/intel_cpu/src/nodes/reorder.h index baf73b52342c68..5755afc2f7c860 100644 --- a/src/plugins/intel_cpu/src/nodes/reorder.h +++ b/src/plugins/intel_cpu/src/nodes/reorder.h @@ -6,9 +6,7 @@ #include -#if defined(OV_CPU_ARM_ENABLE_FP16) #include "nodes/executors/transpose.hpp" -#endif namespace ov { namespace intel_cpu { @@ -76,10 +74,9 @@ class Reorder : public Node { void optimizedNspc2Ncsp(); void optimizedNcsp2Nspc(); void createReorderPrimitive(const dnnl::memory::desc &srcDesc, void* srcPtr, const dnnl::memory::desc &dstDesc, void* dstPtr); -#if defined(OV_CPU_ARM_ENABLE_FP16) + void prepareReorderAsTranspose(MemoryDescPtr parentDesc, MemoryDescPtr childDesc); TransposeExecutorPtr transposeExecutor; -#endif }; } // namespace node diff --git a/src/plugins/intel_cpu/src/nodes/roi_pooling.cpp b/src/plugins/intel_cpu/src/nodes/roi_pooling.cpp index ae398dd9166cfc..1e3b5c73e40c5c 100644 --- a/src/plugins/intel_cpu/src/nodes/roi_pooling.cpp +++ b/src/plugins/intel_cpu/src/nodes/roi_pooling.cpp @@ -830,7 +830,7 @@ std::shared_ptr ROIPooling::ROIPoolingExecutor:: OV_SWITCH(intel_cpu, ROIPoolingExecutorCreation, ctx, jpp.src_prc, OV_CASE(ov::element::f32, float), OV_CASE(ov::element::bf16, bfloat16_t), - OV_CASE(ov::element::f16, float16_t)) + OV_CASE(ov::element::f16, ::float16_t)) return ctx.executor; } diff --git a/src/plugins/intel_cpu/src/transformations/transformation_pipeline.cpp b/src/plugins/intel_cpu/src/transformations/transformation_pipeline.cpp index e08bec765f968e..9a9673cfdbc04c 100644 --- a/src/plugins/intel_cpu/src/transformations/transformation_pipeline.cpp +++ b/src/plugins/intel_cpu/src/transformations/transformation_pipeline.cpp @@ -281,21 +281,24 @@ void Transformations::PreLpt(const std::vector& defaultPrecis // @todo should we always convert to f32 regardless of hardware support, as it is done for f16? if (!hasHardwareSupport(ov::element::bf16)) map.insert({ov::element::bf16, ov::element::f32}); -#if defined(OV_CPU_ARM_ENABLE_FP16) - if (inferencePrecision != ov::element::f16) - map.insert({ov::element::f16, ov::element::f32}); -#else - map.insert({ov::element::f16, ov::element::f32}); +#if defined(OPENVINO_ARCH_ARM) || defined(OPENVINO_ARCH_ARM64) + if (hasHardwareSupport(ov::element::f16)) { + if (inferencePrecision != ov::element::f16) { + map.insert({ov::element::f16, ov::element::f32}); + } + return map; + } #endif + map.insert({ov::element::f16, ov::element::f32}); return map; }; type_to_fuse_map type_to_fuse = {{ov::opset10::Convert::get_type_info_static(), fuse_type_to_convert}}; -#if defined(OV_CPU_ARM_ENABLE_FP16) +#if defined(OPENVINO_ARCH_ARM) || defined(OPENVINO_ARCH_ARM64) // It cannot be static data, because it may be difference for different inferencePrecision const auto precisions = get_convert_precisions(); - if (inferencePrecision == ov::element::f16) { + if (hasHardwareSupport(ov::element::f16) && inferencePrecision == ov::element::f16) { precisions_map fp_convert_precision_map = {{ov::element::f32, ov::element::f16}}; type_to_fuse_map empty_fuse_map = {}; const bool keep_precision_sensitive_in_fp32 = true; diff --git a/src/plugins/intel_cpu/src/utils/precision_support.cpp b/src/plugins/intel_cpu/src/utils/precision_support.cpp index ebd71290624cfb..0c450fa2e47e2a 100644 --- a/src/plugins/intel_cpu/src/utils/precision_support.cpp +++ b/src/plugins/intel_cpu/src/utils/precision_support.cpp @@ -7,6 +7,10 @@ #include "cpu/x64/cpu_isa_traits.hpp" #include "openvino/core/visibility.hpp" +#if defined(OV_CPU_WITH_ACL) +#include "arm_compute/core/CPP/CPPTypes.h" +#endif + namespace ov { namespace intel_cpu { @@ -18,8 +22,8 @@ bool hasHardwareSupport(const ov::element::Type& precision) { dnnl::impl::cpu::x64::mayiuse(dnnl::impl::cpu::x64::avx2_vnni_2)) return true; return false; -#elif defined(OV_CPU_ARM_ENABLE_FP16) - return true; // @todo add runtime check for arm as well +#elif defined(OV_CPU_WITH_ACL) + return arm_compute::CPUInfo::get().has_fp16(); #else return false; #endif diff --git a/src/plugins/intel_cpu/tests/functional/custom/behavior/ov_plugin/properties.cpp b/src/plugins/intel_cpu/tests/functional/custom/behavior/ov_plugin/properties.cpp index 11de096fc0f26f..a180abd5cdfe93 100644 --- a/src/plugins/intel_cpu/tests/functional/custom/behavior/ov_plugin/properties.cpp +++ b/src/plugins/intel_cpu/tests/functional/custom/behavior/ov_plugin/properties.cpp @@ -178,8 +178,8 @@ TEST_F(OVClassConfigTestCPU, smoke_PluginSetConfigAffinityCore) { ASSERT_EQ(false, value); } -#if defined(OV_CPU_ARM_ENABLE_FP16) - const auto expected_precision_for_performance_mode = ov::element::f16; +#if defined(OPENVINO_ARCH_ARM) || defined(OPENVINO_ARCH_ARM64) + const auto expected_precision_for_performance_mode = hasHardwareSupport(ov::element::f16) ? ov::element::f16 : ov::element::f32; #else const auto expected_precision_for_performance_mode = ov::with_cpu_x86_bfloat16() ? ov::element::bf16 : ov::element::f32; #endif diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/core_config.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/core_config.cpp index 0d574c8e407506..bf01c4cf4debb0 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/core_config.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/core_config.cpp @@ -11,24 +11,22 @@ void CoreConfiguration(LayerTestsUtils::LayerTestsCommon* test) { if (!configuration.count(InferenceEngine::PluginConfigParams::KEY_ENFORCE_BF16)) { configuration.insert({InferenceEngine::PluginConfigParams::KEY_ENFORCE_BF16, InferenceEngine::PluginConfigParams::NO}); } - #if defined(OV_CPU_ARM_ENABLE_FP16) - //force fp32 inference precision if it is not configured specially - if (!configuration.count(ov::hint::inference_precision.name())) { - configuration.insert({ov::hint::inference_precision.name(), ov::element::f32.to_string()}); - } - #endif + + //force fp32 inference precision if it is not configured specially + if (!configuration.count(ov::hint::inference_precision.name())) { + configuration.insert({ov::hint::inference_precision.name(), ov::element::f32.to_string()}); + } } namespace ov { namespace test { void core_configuration(ov::test::SubgraphBaseTest* test) { - #if defined(OV_CPU_ARM_ENABLE_FP16) || defined(OPENVINO_ARCH_X86) || defined(OPENVINO_ARCH_X86_64) //force fp32 inference precision if it is not configured specially if (!test->configuration.count(ov::hint::inference_precision.name())) { test->configuration.insert({ov::hint::inference_precision.name(), ov::element::f32.to_string()}); } - #endif + // todo: issue: 123320 test->convert_precisions.insert({ov::element::bf16, ov::element::f32}); test->convert_precisions.insert({ov::element::f16, ov::element::f32}); diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/skip_tests_config.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/skip_tests_config.cpp index 08805d4e586c51..e7209bdd825922 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/skip_tests_config.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/skip_tests_config.cpp @@ -5,6 +5,7 @@ #include "openvino/core/visibility.hpp" #include "functional_test_utils/skip_tests_config.hpp" #include "openvino/runtime/system_conf.hpp" +#include "utils/precision_support.h" #include #include @@ -273,7 +274,6 @@ std::vector disabledTestPatterns() { // int8 specific retVector.emplace_back(R"(smoke_Quantized.*)"); -# if defined(OV_CPU_ARM_ENABLE_FP16) // Issue: 123019 retVector.emplace_back(R"(smoke_CompareWithRefs_Mvn.*INFERENCE_PRECISION_HINT=f16.*)"); retVector.emplace_back(R"(smoke_staticShapes4D.*INFERENCE_PRECISION_HINT=f16.*)"); @@ -288,7 +288,6 @@ std::vector disabledTestPatterns() { // Issue: 124395 retVector.emplace_back(R"(smoke_VariableStateBasic/InferRequestVariableStateTest.*)"); retVector.emplace_back(R"(smoke_VariableState/OVInferRequestVariableStateTest.*)"); -# endif #endif @@ -334,14 +333,14 @@ std::vector disabledTestPatterns() { retVector.emplace_back(R"(.*INFERENCE_PRECISION_HINT=(F|f)16.*)"); } #elif defined(OPENVINO_ARCH_ARM64) || defined(OPENVINO_ARCH_ARM) -# if !defined(OV_CPU_ARM_ENABLE_FP16) - // Skip fp16 tests for paltforms that don't support fp16 precision - retVector.emplace_back(R"(.*INFERENCE_PRECISION_HINT=(F|f)16.*)"); -# else - // Issue 117407 - retVector.emplace_back( - R"(.*EltwiseLayerCPUTest.*IS=\(\[1\.\.10\.2\.5\.6\]_\).*eltwiseOpType=SqDiff.*_configItem=INFERENCE_PRECISION_HINT=f16.*)"); -# endif // OV_CPU_ARM_ENABLE_FP16 + if (!hasHardwareSupport(ov::element::f16)) { + // Skip fp16 tests for paltforms that don't support fp16 precision + retVector.emplace_back(R"(.*INFERENCE_PRECISION_HINT=(F|f)16.*)"); + } else { + // Issue 117407 + retVector.emplace_back( + R"(.*EltwiseLayerCPUTest.*IS=\(\[1\.\.10\.2\.5\.6\]_\).*eltwiseOpType=SqDiff.*_configItem=INFERENCE_PRECISION_HINT=f16.*)"); + } #endif if (!ov::with_cpu_x86_avx512_core_vnni() && !ov::with_cpu_x86_avx512_core_amx_int8()) { // MatMul in Snippets uses BRGEMM that supports i8 only on platforms with VNNI or AMX instructions diff --git a/src/plugins/intel_cpu/thirdparty/onednn b/src/plugins/intel_cpu/thirdparty/onednn index c28d7021d63e93..e6180ce0124456 160000 --- a/src/plugins/intel_cpu/thirdparty/onednn +++ b/src/plugins/intel_cpu/thirdparty/onednn @@ -1 +1 @@ -Subproject commit c28d7021d63e93c470efc7b1a2c85b14da349ba4 +Subproject commit e6180ce0124456f4d18cf3390f1766ce62ef2e7d From 59ad63f5c5b72d8805fe9417dde8c78cd44638c1 Mon Sep 17 00:00:00 2001 From: Aleksandr Voron Date: Thu, 1 Feb 2024 18:00:10 +0000 Subject: [PATCH 5/9] cmakelists fix and comment hasHardwareSupport in tests --- .../intel_cpu/tests/functional/CMakeLists.txt | 2 +- .../custom/behavior/ov_plugin/properties.cpp | 4 +++- .../shared_tests_instances/skip_tests_config.cpp | 13 +++++++------ 3 files changed, 11 insertions(+), 8 deletions(-) diff --git a/src/plugins/intel_cpu/tests/functional/CMakeLists.txt b/src/plugins/intel_cpu/tests/functional/CMakeLists.txt index db5ae8d01c2d93..6941cb528d499e 100644 --- a/src/plugins/intel_cpu/tests/functional/CMakeLists.txt +++ b/src/plugins/intel_cpu/tests/functional/CMakeLists.txt @@ -16,7 +16,7 @@ set(LINK_LIBRARIES funcSharedTests cpuSpecificRtInfo openvino::snippets ov_snipp if(ENABLE_OV_ONNX_FRONTEND) list(APPEND DEFINES TEST_MODELS="${TEST_MODEL_ZOO}") else() - set(EXCLUDED_SOURCE_PATHS ${CMAKE_CURRENT_SOURCE_DIR}/extension ${CMAKE_CURRENT_SOURCE_DIR}/shared_tests_instances/onnx) + set(EXCLUDED_SOURCE_PATHS ${CMAKE_CURRENT_SOURCE_DIR}/custom/extension ${CMAKE_CURRENT_SOURCE_DIR}/shared_tests_instances/onnx) endif() if(NOT (ARM OR AARCH64)) diff --git a/src/plugins/intel_cpu/tests/functional/custom/behavior/ov_plugin/properties.cpp b/src/plugins/intel_cpu/tests/functional/custom/behavior/ov_plugin/properties.cpp index a180abd5cdfe93..6681180287f181 100644 --- a/src/plugins/intel_cpu/tests/functional/custom/behavior/ov_plugin/properties.cpp +++ b/src/plugins/intel_cpu/tests/functional/custom/behavior/ov_plugin/properties.cpp @@ -5,6 +5,7 @@ #include #include +#include "utils/precision_support.h" #include "utils/properties_test.hpp" #include "common_test_utils/test_assertions.hpp" #include "openvino/runtime/properties.hpp" @@ -179,7 +180,8 @@ TEST_F(OVClassConfigTestCPU, smoke_PluginSetConfigAffinityCore) { } #if defined(OPENVINO_ARCH_ARM) || defined(OPENVINO_ARCH_ARM64) - const auto expected_precision_for_performance_mode = hasHardwareSupport(ov::element::f16) ? ov::element::f16 : ov::element::f32; + //TODO: fix undefined reference to hasHardwareSupport + const auto expected_precision_for_performance_mode = /*ov::intel_cpu::hasHardwareSupport(ov::element::f16) ? ov::element::f16 :*/ ov::element::f32; #else const auto expected_precision_for_performance_mode = ov::with_cpu_x86_bfloat16() ? ov::element::bf16 : ov::element::f32; #endif diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/skip_tests_config.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/skip_tests_config.cpp index e7209bdd825922..876df37146f8f7 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/skip_tests_config.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/skip_tests_config.cpp @@ -333,14 +333,15 @@ std::vector disabledTestPatterns() { retVector.emplace_back(R"(.*INFERENCE_PRECISION_HINT=(F|f)16.*)"); } #elif defined(OPENVINO_ARCH_ARM64) || defined(OPENVINO_ARCH_ARM) - if (!hasHardwareSupport(ov::element::f16)) { + //TODO: fix undefined reference to ov::intel_cpu::hasHardwareSupport + //if (!ov::intel_cpu::hasHardwareSupport(ov::element::f16)) { // Skip fp16 tests for paltforms that don't support fp16 precision - retVector.emplace_back(R"(.*INFERENCE_PRECISION_HINT=(F|f)16.*)"); - } else { + //retVector.emplace_back(R"(.*INFERENCE_PRECISION_HINT=(F|f)16.*)"); + //} else { // Issue 117407 - retVector.emplace_back( - R"(.*EltwiseLayerCPUTest.*IS=\(\[1\.\.10\.2\.5\.6\]_\).*eltwiseOpType=SqDiff.*_configItem=INFERENCE_PRECISION_HINT=f16.*)"); - } + // retVector.emplace_back( + // R"(.*EltwiseLayerCPUTest.*IS=\(\[1\.\.10\.2\.5\.6\]_\).*eltwiseOpType=SqDiff.*_configItem=INFERENCE_PRECISION_HINT=f16.*)"); + //} #endif if (!ov::with_cpu_x86_avx512_core_vnni() && !ov::with_cpu_x86_avx512_core_amx_int8()) { // MatMul in Snippets uses BRGEMM that supports i8 only on platforms with VNNI or AMX instructions From ef8a260e6c0a90721ab4452c55f07095e59978a2 Mon Sep 17 00:00:00 2001 From: Aleksandr Voron Date: Fri, 2 Feb 2024 14:02:11 +0000 Subject: [PATCH 6/9] fix undefined reference issue --- .../intel_cpu/src/utils/precision_support.cpp | 2 ++ .../intel_cpu/tests/functional/CMakeLists.txt | 14 +++++++++++++- .../custom/behavior/ov_plugin/properties.cpp | 3 +-- .../shared_tests_instances/skip_tests_config.cpp | 13 ++++++------- 4 files changed, 22 insertions(+), 10 deletions(-) diff --git a/src/plugins/intel_cpu/src/utils/precision_support.cpp b/src/plugins/intel_cpu/src/utils/precision_support.cpp index fcb307a8924e02..ecb6d8eb82570b 100644 --- a/src/plugins/intel_cpu/src/utils/precision_support.cpp +++ b/src/plugins/intel_cpu/src/utils/precision_support.cpp @@ -4,7 +4,9 @@ #include "precision_support.h" +#if defined(OPENVINO_ARCH_X86_64) #include "cpu/x64/cpu_isa_traits.hpp" +#endif #include "openvino/core/type/element_type.hpp" #include "openvino/core/visibility.hpp" diff --git a/src/plugins/intel_cpu/tests/functional/CMakeLists.txt b/src/plugins/intel_cpu/tests/functional/CMakeLists.txt index 6941cb528d499e..a6fe5f03ce2ae0 100644 --- a/src/plugins/intel_cpu/tests/functional/CMakeLists.txt +++ b/src/plugins/intel_cpu/tests/functional/CMakeLists.txt @@ -9,9 +9,21 @@ add_library(cpuSpecificRtInfo STATIC $/src/utils/rt_info/memory_formats_attribute.cpp) target_link_libraries(cpuSpecificRtInfo PRIVATE openvino::runtime) +add_library(cpuUtils STATIC + $/src/utils/precision_support.h + $/src/utils/precision_support.cpp) +set(CPU_UTILS_LINK_LIBRARIES openvino::runtime) +if(OV_CPU_WITH_ACL) + list(APPEND CPU_UTILS_LINK_LIBRARIES arm_compute::arm_compute) +endif() +if(OV_CPU_WITH_DNNL) + list(APPEND CPU_UTILS_LINK_LIBRARIES dnnl) +endif() +target_link_libraries(cpuUtils PRIVATE ${CPU_UTILS_LINK_LIBRARIES}) + set(INCLUDES ${CMAKE_CURRENT_SOURCE_DIR} $/src) set(DEPENDENCIES openvino_intel_cpu_plugin openvino_template_extension) -set(LINK_LIBRARIES funcSharedTests cpuSpecificRtInfo openvino::snippets ov_snippets_models) +set(LINK_LIBRARIES funcSharedTests cpuSpecificRtInfo cpuUtils openvino::snippets ov_snippets_models) if(ENABLE_OV_ONNX_FRONTEND) list(APPEND DEFINES TEST_MODELS="${TEST_MODEL_ZOO}") diff --git a/src/plugins/intel_cpu/tests/functional/custom/behavior/ov_plugin/properties.cpp b/src/plugins/intel_cpu/tests/functional/custom/behavior/ov_plugin/properties.cpp index 6681180287f181..b4ff52cb1038d4 100644 --- a/src/plugins/intel_cpu/tests/functional/custom/behavior/ov_plugin/properties.cpp +++ b/src/plugins/intel_cpu/tests/functional/custom/behavior/ov_plugin/properties.cpp @@ -180,8 +180,7 @@ TEST_F(OVClassConfigTestCPU, smoke_PluginSetConfigAffinityCore) { } #if defined(OPENVINO_ARCH_ARM) || defined(OPENVINO_ARCH_ARM64) - //TODO: fix undefined reference to hasHardwareSupport - const auto expected_precision_for_performance_mode = /*ov::intel_cpu::hasHardwareSupport(ov::element::f16) ? ov::element::f16 :*/ ov::element::f32; + const auto expected_precision_for_performance_mode = ov::intel_cpu::hasHardwareSupport(ov::element::f16) ? ov::element::f16 : ov::element::f32; #else const auto expected_precision_for_performance_mode = ov::with_cpu_x86_bfloat16() ? ov::element::bf16 : ov::element::f32; #endif diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/skip_tests_config.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/skip_tests_config.cpp index 876df37146f8f7..5394ac0c84c714 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/skip_tests_config.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/skip_tests_config.cpp @@ -333,15 +333,14 @@ std::vector disabledTestPatterns() { retVector.emplace_back(R"(.*INFERENCE_PRECISION_HINT=(F|f)16.*)"); } #elif defined(OPENVINO_ARCH_ARM64) || defined(OPENVINO_ARCH_ARM) - //TODO: fix undefined reference to ov::intel_cpu::hasHardwareSupport - //if (!ov::intel_cpu::hasHardwareSupport(ov::element::f16)) { + if (!ov::intel_cpu::hasHardwareSupport(ov::element::f16)) { // Skip fp16 tests for paltforms that don't support fp16 precision - //retVector.emplace_back(R"(.*INFERENCE_PRECISION_HINT=(F|f)16.*)"); - //} else { + retVector.emplace_back(R"(.*INFERENCE_PRECISION_HINT=(F|f)16.*)"); + } else { // Issue 117407 - // retVector.emplace_back( - // R"(.*EltwiseLayerCPUTest.*IS=\(\[1\.\.10\.2\.5\.6\]_\).*eltwiseOpType=SqDiff.*_configItem=INFERENCE_PRECISION_HINT=f16.*)"); - //} + retVector.emplace_back( + R"(.*EltwiseLayerCPUTest.*IS=\(\[1\.\.10\.2\.5\.6\]_\).*eltwiseOpType=SqDiff.*_configItem=INFERENCE_PRECISION_HINT=f16.*)"); + } #endif if (!ov::with_cpu_x86_avx512_core_vnni() && !ov::with_cpu_x86_avx512_core_amx_int8()) { // MatMul in Snippets uses BRGEMM that supports i8 only on platforms with VNNI or AMX instructions From d88dbb5e661cf12e69b6cc00b8d2721e473e205e Mon Sep 17 00:00:00 2001 From: Aleksandr Voron Date: Fri, 2 Feb 2024 14:32:25 +0000 Subject: [PATCH 7/9] merge 2 static libs --- src/plugins/intel_cpu/tests/functional/CMakeLists.txt | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/src/plugins/intel_cpu/tests/functional/CMakeLists.txt b/src/plugins/intel_cpu/tests/functional/CMakeLists.txt index a6fe5f03ce2ae0..6fa8fc42e387e4 100644 --- a/src/plugins/intel_cpu/tests/functional/CMakeLists.txt +++ b/src/plugins/intel_cpu/tests/functional/CMakeLists.txt @@ -4,12 +4,9 @@ set(TARGET_NAME ov_cpu_func_tests) -add_library(cpuSpecificRtInfo STATIC - $/src/utils/rt_info/memory_formats_attribute.hpp - $/src/utils/rt_info/memory_formats_attribute.cpp) -target_link_libraries(cpuSpecificRtInfo PRIVATE openvino::runtime) - add_library(cpuUtils STATIC + $/src/utils/rt_info/memory_formats_attribute.hpp + $/src/utils/rt_info/memory_formats_attribute.cpp $/src/utils/precision_support.h $/src/utils/precision_support.cpp) set(CPU_UTILS_LINK_LIBRARIES openvino::runtime) From 32cb08da25eae912b58e1033c052eadaae39166c Mon Sep 17 00:00:00 2001 From: Aleksandr Voron Date: Fri, 2 Feb 2024 14:33:40 +0000 Subject: [PATCH 8/9] remove cpuSpecificRtInfo --- src/plugins/intel_cpu/tests/functional/CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/plugins/intel_cpu/tests/functional/CMakeLists.txt b/src/plugins/intel_cpu/tests/functional/CMakeLists.txt index 6fa8fc42e387e4..0b031cb9f2e161 100644 --- a/src/plugins/intel_cpu/tests/functional/CMakeLists.txt +++ b/src/plugins/intel_cpu/tests/functional/CMakeLists.txt @@ -20,7 +20,7 @@ target_link_libraries(cpuUtils PRIVATE ${CPU_UTILS_LINK_LIBRARIES}) set(INCLUDES ${CMAKE_CURRENT_SOURCE_DIR} $/src) set(DEPENDENCIES openvino_intel_cpu_plugin openvino_template_extension) -set(LINK_LIBRARIES funcSharedTests cpuSpecificRtInfo cpuUtils openvino::snippets ov_snippets_models) +set(LINK_LIBRARIES funcSharedTests cpuUtils openvino::snippets ov_snippets_models) if(ENABLE_OV_ONNX_FRONTEND) list(APPEND DEFINES TEST_MODELS="${TEST_MODEL_ZOO}") From aad4556c2860de8b24baf0733529568b37cd8a8f Mon Sep 17 00:00:00 2001 From: Aleksandr Voron Date: Fri, 2 Feb 2024 15:14:21 +0000 Subject: [PATCH 9/9] add include paths --- src/plugins/intel_cpu/tests/functional/CMakeLists.txt | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/plugins/intel_cpu/tests/functional/CMakeLists.txt b/src/plugins/intel_cpu/tests/functional/CMakeLists.txt index 0b031cb9f2e161..c1a6ecec5c4858 100644 --- a/src/plugins/intel_cpu/tests/functional/CMakeLists.txt +++ b/src/plugins/intel_cpu/tests/functional/CMakeLists.txt @@ -10,15 +10,18 @@ add_library(cpuUtils STATIC $/src/utils/precision_support.h $/src/utils/precision_support.cpp) set(CPU_UTILS_LINK_LIBRARIES openvino::runtime) +set(INCLUDE_PATHS $/src) if(OV_CPU_WITH_ACL) list(APPEND CPU_UTILS_LINK_LIBRARIES arm_compute::arm_compute) + list(APPEND INCLUDE_PATHS $) endif() if(OV_CPU_WITH_DNNL) list(APPEND CPU_UTILS_LINK_LIBRARIES dnnl) + list(APPEND INCLUDE_PATHS $/src) endif() target_link_libraries(cpuUtils PRIVATE ${CPU_UTILS_LINK_LIBRARIES}) -set(INCLUDES ${CMAKE_CURRENT_SOURCE_DIR} $/src) +set(INCLUDES ${CMAKE_CURRENT_SOURCE_DIR} ${INCLUDE_PATHS}) set(DEPENDENCIES openvino_intel_cpu_plugin openvino_template_extension) set(LINK_LIBRARIES funcSharedTests cpuUtils openvino::snippets ov_snippets_models)