From 641c00c434f502a5d27f826de017fb45697be4d8 Mon Sep 17 00:00:00 2001 From: "Liu, Dan1" Date: Tue, 28 May 2024 15:10:43 +0800 Subject: [PATCH] change npu-plugin backend log for changing the behavior of dry on execution --- src/plugins/intel_npu/src/plugin/include/backends.hpp | 3 +++ .../intel_npu/src/plugin/include/compiled_model.hpp | 1 + src/plugins/intel_npu/src/plugin/include/plugin.hpp | 4 ++++ src/plugins/intel_npu/src/plugin/src/backends.cpp | 8 +++++--- src/plugins/intel_npu/src/plugin/src/compiled_model.cpp | 9 +++++++++ src/plugins/intel_npu/src/plugin/src/plugin.cpp | 4 +++- 6 files changed, 25 insertions(+), 4 deletions(-) diff --git a/src/plugins/intel_npu/src/plugin/include/backends.hpp b/src/plugins/intel_npu/src/plugin/include/backends.hpp index 195c5a1199227b..6c612b1accdedb 100644 --- a/src/plugins/intel_npu/src/plugin/include/backends.hpp +++ b/src/plugins/intel_npu/src/plugin/include/backends.hpp @@ -36,6 +36,9 @@ class NPUBackends final { std::string getCompilationPlatform(const std::string_view platform, const std::string& deviceId) const; void setup(const Config& config); + bool is_empty() const { + return _backend == nullptr ? true : false; + } private: Logger _logger; diff --git a/src/plugins/intel_npu/src/plugin/include/compiled_model.hpp b/src/plugins/intel_npu/src/plugin/include/compiled_model.hpp index 083e41072ac32f..4731fe9e4cbaf5 100644 --- a/src/plugins/intel_npu/src/plugin/include/compiled_model.hpp +++ b/src/plugins/intel_npu/src/plugin/include/compiled_model.hpp @@ -10,6 +10,7 @@ #include "intel_npu/utils/logger/logger.hpp" #include "npu.hpp" #include "openvino/runtime/so_ptr.hpp" +#include "plugin.hpp" namespace intel_npu { diff --git a/src/plugins/intel_npu/src/plugin/include/plugin.hpp b/src/plugins/intel_npu/src/plugin/include/plugin.hpp index 10f48d90ab4367..962ab8a21b3a7b 100644 --- a/src/plugins/intel_npu/src/plugin/include/plugin.hpp +++ b/src/plugins/intel_npu/src/plugin/include/plugin.hpp @@ -53,6 +53,10 @@ class Plugin : public ov::IPlugin { ov::SupportedOpsMap query_model(const std::shared_ptr& model, const ov::AnyMap& properties) const override; + bool is_backend_empty() const { + return _backends->is_empty() ? true : false; + } + private: ov::SoPtr getCompiler(const Config& config) const; diff --git a/src/plugins/intel_npu/src/plugin/src/backends.cpp b/src/plugins/intel_npu/src/plugin/src/backends.cpp index 29a22e6c423027..89e7eabe7c87c1 100644 --- a/src/plugins/intel_npu/src/plugin/src/backends.cpp +++ b/src/plugins/intel_npu/src/plugin/src/backends.cpp @@ -109,9 +109,10 @@ NPUBackends::NPUBackends(const std::vector& backendRegistry, } #endif } catch (const std::exception& ex) { - _logger.error("Got an error during backend '%s' loading : %s", backendName.c_str(), ex.what()); + _logger.warning("Got an issue during backend '%s' loading : %s", backendName.c_str(), ex.what()); + // todo: can we get detail exception type and then throw it? need check openvin_throw } catch (...) { - _logger.error("Got an unknown error during backend '%s' loading", backendName.c_str()); + _logger.warning("Got an issue warning during backend '%s' loading", backendName.c_str()); } } @@ -127,7 +128,8 @@ NPUBackends::NPUBackends(const std::vector& backendRegistry, if (_backend != nullptr) { _logger.info("Use '%s' backend for inference", _backend->getName().c_str()); } else { - _logger.error("Cannot find backend for inference. Make sure the device is available."); + _logger.warning("Cannot find backend. Make sure the device is available. It is ok to compilation, but will " + "result in failure of inference!"); } } diff --git a/src/plugins/intel_npu/src/plugin/src/compiled_model.cpp b/src/plugins/intel_npu/src/plugin/src/compiled_model.cpp index e2d3fc84f94764..c80108e8d28396 100644 --- a/src/plugins/intel_npu/src/plugin/src/compiled_model.cpp +++ b/src/plugins/intel_npu/src/plugin/src/compiled_model.cpp @@ -3,6 +3,7 @@ // #include "compiled_model.hpp" +//#include "plugin.hpp" #include #include @@ -104,6 +105,14 @@ CompiledModel::CompiledModel(const std::shared_ptr& model, std::shared_ptr CompiledModel::create_infer_request() const { OV_ITT_SCOPED_TASK(itt::domains::NPUPlugin, "CompiledModel::create_infer_request"); + // check backend in plugin + if (std::dynamic_pointer_cast(get_plugin())->is_backend_empty()) { + _logger.error("Cannot find backend in inference. Make sure the device is available!"); + throw "ERROR Cannot find backend before inference"; + } else { + _logger.info("backend is ready for inference."); + } + if (_executorPtr == nullptr && _device != nullptr) { _executorPtr = _device->createExecutor(_networkPtr, _config); } diff --git a/src/plugins/intel_npu/src/plugin/src/plugin.cpp b/src/plugins/intel_npu/src/plugin/src/plugin.cpp index 36e31c6c438109..9ca394c497b531 100644 --- a/src/plugins/intel_npu/src/plugin/src/plugin.cpp +++ b/src/plugins/intel_npu/src/plugin/src/plugin.cpp @@ -563,7 +563,9 @@ std::shared_ptr Plugin::compile_model(const std::shared_ptr< OPENVINO_THROW("Option 'CACHE_DIR' is not supported with MLIR compiler type"); } } - + // std::printf(" <1>localConfig.get() =%s, <2>localConfig.get()=%s \n", + // localConfig.get().data(), + // localConfig.get().c_str()); const auto platform = _backends->getCompilationPlatform(localConfig.get(), localConfig.get()); auto device = _backends->getDevice(localConfig.get()); localConfig.update({{ov::intel_npu::platform.name(), platform}});