diff --git a/.ci/azure/android_arm64.yml b/.ci/azure/android_arm64.yml index 325100e45505a4..29a2e2bf6ec5f7 100644 --- a/.ci/azure/android_arm64.yml +++ b/.ci/azure/android_arm64.yml @@ -153,6 +153,7 @@ jobs: -G Ninja -DCMAKE_VERBOSE_MAKEFILE=ON -DCMAKE_BUILD_TYPE=$(BUILD_TYPE) + -DVCPKG_BUILD_TYPE=$(BUILD_TYPE) \ -DVCPKG_TARGET_TRIPLET=arm64-android -DCMAKE_TOOLCHAIN_FILE=$(VCPKG_ROOT)/scripts/buildsystems/vcpkg.cmake -DVCPKG_CHAINLOAD_TOOLCHAIN_FILE=$(ANDROID_NDK_HOME)/build/cmake/android.toolchain.cmake diff --git a/.ci/azure/linux_arm64.yml b/.ci/azure/linux_arm64.yml index d1523ff8c0a5ae..f562b6b7764787 100644 --- a/.ci/azure/linux_arm64.yml +++ b/.ci/azure/linux_arm64.yml @@ -178,10 +178,11 @@ jobs: - script: | source $(BUILD_OPENVINO)/conanbuild.sh + # TODO: return tests building once GPU plugin migrates to Plugin API 2.0 cmake \ -G Ninja \ -DCMAKE_VERBOSE_MAKEFILE=ON \ - -DBUILD_SHARED_LIBS=ON \ + -DBUILD_SHARED_LIBS=OFF \ -DCMAKE_COMPILE_WARNING_AS_ERROR=ON \ -DENABLE_CPPLINT=ON \ -DENABLE_INTEL_GPU=ON \ @@ -191,7 +192,6 @@ jobs: -DPYTHON_MODULE_EXTENSION=$(aarch64-linux-gnu-python3-config --extension-suffix) \ -DPYTHON_LIBRARY=/usr/lib/aarch64-linux-gnu/libc-2.31.so \ -DPYTHON_INCLUDE_DIR=$(Agent.ToolsDirectory)/Python/$(OV_PYTHON_VERSION)/x64/include/python$(OV_PYTHON_VERSION_MAJOR_MINOR) \ - -DENABLE_TESTS=ON \ -DENABLE_DATA=OFF \ -DENABLE_SYSTEM_TBB=ON \ -DENABLE_SYSTEM_PROTOBUF=ON \ @@ -226,9 +226,3 @@ jobs: env: CMAKE_TOOLCHAIN_FILE: $(BUILD_OPENVINO)/conan_toolchain.cmake displayName: 'Build OpenVINO C++ samples' - - - task: PublishBuildArtifacts@1 - inputs: - PathtoPublish: $(INSTALL_OPENVINO) - ArtifactName: 'openvino_aarch64_linux' - displayName: 'Publish OpenVINO Runtime for ARM' diff --git a/cmake/features.cmake b/cmake/features.cmake index 2946c38eb72417..fc7a07252e6982 100644 --- a/cmake/features.cmake +++ b/cmake/features.cmake @@ -152,6 +152,15 @@ else() set(ENABLE_SYSTEM_PUGIXML_DEFAULT OFF) endif() +if(ANDROID) + # when protobuf from /usr/include is used, then Android toolchain ignores include paths + # but if we build for Android using vcpkg / conan / etc where flatbuffers is not located in + # the /usr/include folders, we can still use 'system' flatbuffers + set(ENABLE_SYSTEM_FLATBUFFERS_DEFAULT OFF) +else() + set(ENABLE_SYSTEM_FLATBUFFERS_DEFAULT ON) +endif() + # users wants to use his own TBB version, specific either via env vars or cmake options if(DEFINED ENV{TBBROOT} OR DEFINED ENV{TBB_DIR} OR DEFINED TBB_DIR OR DEFINED TBBROOT) set(ENABLE_SYSTEM_TBB_DEFAULT OFF) @@ -163,7 +172,7 @@ ie_dependent_option (ENABLE_SYSTEM_TBB "Enables use of system TBB" ${ENABLE_SYS # available out of box on all systems (like RHEL, UBI) ie_option (ENABLE_SYSTEM_PUGIXML "Enables use of system PugiXML" ${ENABLE_SYSTEM_PUGIXML_DEFAULT}) # the option is on by default, because we use only flatc compiler and don't use any libraries -ie_dependent_option(ENABLE_SYSTEM_FLATBUFFERS "Enables use of system flatbuffers" ON +ie_dependent_option(ENABLE_SYSTEM_FLATBUFFERS "Enables use of system flatbuffers" ${ENABLE_SYSTEM_FLATBUFFERS_DEFAULT} "ENABLE_OV_TF_LITE_FRONTEND" OFF) ie_dependent_option (ENABLE_SYSTEM_OPENCL "Enables use of system OpenCL" ${ENABLE_SYSTEM_LIBS_DEFAULT} "ENABLE_INTEL_GPU" OFF) diff --git a/cmake/templates/OpenVINOConfig.cmake.in b/cmake/templates/OpenVINOConfig.cmake.in index 3c0c0a8c536349..29af7dd50e5b15 100644 --- a/cmake/templates/OpenVINOConfig.cmake.in +++ b/cmake/templates/OpenVINOConfig.cmake.in @@ -345,20 +345,15 @@ macro(_ov_find_intel_cpu_dependencies) if(_OV_ENABLE_CPU_ACL) if(_ov_as_external_package) set_and_check(ARM_COMPUTE_LIB_DIR "@PACKAGE_ARM_COMPUTE_LIB_DIR@") - set(_ov_find_acl_options NO_DEFAULT_PATH) - set(_ov_find_acl_path "${CMAKE_CURRENT_LIST_DIR}") + set(ACL_DIR "${CMAKE_CURRENT_LIST_DIR}") else() - set_and_check(_ov_find_acl_path "@PACKAGE_FIND_ACL_PATH@") + set_and_check(ACL_DIR "@PACKAGE_FIND_ACL_PATH@") endif() - _ov_find_dependency(ACL - NO_MODULE - PATHS "${_ov_find_acl_path}" - ${_ov_find_acl_options}) + _ov_find_dependency(ACL) unset(ARM_COMPUTE_LIB_DIR) unset(_ov_find_acl_path) - unset(_ov_find_acl_options) endif() unset(_OV_ENABLE_CPU_ACL) endmacro() diff --git a/src/frontends/common/include/openvino/frontend/extension/conversion.hpp b/src/frontends/common/include/openvino/frontend/extension/conversion.hpp index 92d66e485c6c36..8619012d8f3d6d 100644 --- a/src/frontends/common/include/openvino/frontend/extension/conversion.hpp +++ b/src/frontends/common/include/openvino/frontend/extension/conversion.hpp @@ -20,7 +20,7 @@ class FRONTEND_API ConversionExtensionBase : public ov::Extension { return m_op_type; } - ~ConversionExtensionBase() override = 0; + virtual ~ConversionExtensionBase(); private: std::string m_op_type; diff --git a/src/inference/dev_api/threading/ie_cpu_streams_executor.hpp b/src/inference/dev_api/threading/ie_cpu_streams_executor.hpp index 12c2232a572e5d..312963fd45a8bd 100644 --- a/src/inference/dev_api/threading/ie_cpu_streams_executor.hpp +++ b/src/inference/dev_api/threading/ie_cpu_streams_executor.hpp @@ -33,7 +33,7 @@ class INFERENCE_ENGINE_API_CLASS(CPUStreamsExecutor) : public IStreamsExecutor { * @brief Constructor * @param config Stream executor parameters */ - explicit CPUStreamsExecutor(const InferenceEngine::IStreamsExecutor::Config& config = {}); + explicit CPUStreamsExecutor(const IStreamsExecutor::Config& config = {}); /** * @brief A class destructor diff --git a/src/inference/src/dev/threading/cpu_streams_executor.cpp b/src/inference/src/dev/threading/cpu_streams_executor.cpp index 89676cd500b269..1f689be5623468 100644 --- a/src/inference/src/dev/threading/cpu_streams_executor.cpp +++ b/src/inference/src/dev/threading/cpu_streams_executor.cpp @@ -428,7 +428,7 @@ struct CPUStreamsExecutor::Impl { std::queue _taskQueue; bool _isStopped = false; std::vector _usedNumaNodes; - ov::threading::ThreadLocal> _streams; + ThreadLocal> _streams; #if (OV_THREAD == OV_THREAD_TBB || OV_THREAD == OV_THREAD_TBB_AUTO) // stream id mapping to the core type // stored in the reversed order (so the big cores, with the highest core_type_id value, are populated first) @@ -451,8 +451,7 @@ int CPUStreamsExecutor::get_numa_node_id() { return stream->_numaNodeId; } -CPUStreamsExecutor::CPUStreamsExecutor(const ov::threading::IStreamsExecutor::Config& config) - : _impl{new Impl{config}} {} +CPUStreamsExecutor::CPUStreamsExecutor(const IStreamsExecutor::Config& config) : _impl{new Impl{config}} {} CPUStreamsExecutor::~CPUStreamsExecutor() { { diff --git a/src/inference/src/threading/ie_cpu_streams_executor.cpp b/src/inference/src/threading/ie_cpu_streams_executor.cpp index 676a9c3759db5c..a4a06529393f01 100644 --- a/src/inference/src/threading/ie_cpu_streams_executor.cpp +++ b/src/inference/src/threading/ie_cpu_streams_executor.cpp @@ -26,8 +26,6 @@ #include "threading/ie_thread_affinity.hpp" #include "threading/ie_thread_local.hpp" -using namespace openvino; - namespace InferenceEngine { struct CPUStreamsExecutor::Impl : public ov::threading::CPUStreamsExecutor { Impl(const InferenceEngine::IStreamsExecutor::Config& config) : ov::threading::CPUStreamsExecutor(config) {} @@ -41,7 +39,7 @@ int CPUStreamsExecutor::GetNumaNodeId() { return _impl->get_numa_node_id(); } -CPUStreamsExecutor::CPUStreamsExecutor(const Config& config) : _impl{new Impl(config)} {} +CPUStreamsExecutor::CPUStreamsExecutor(const IStreamsExecutor::Config& config) : _impl{new Impl(config)} {} CPUStreamsExecutor::~CPUStreamsExecutor() {}