From a6a52b35d807e7097e5411a24eecdcdab70e6ddb Mon Sep 17 00:00:00 2001 From: "Hu, Yuan2" Date: Mon, 6 Sep 2021 14:31:04 +0800 Subject: [PATCH] Enable CPU accelerate FIL in MULTI Signed-off-by: Hu, Yuan2 --- .../multi_device_exec_network.cpp | 337 ++++++++++++++---- .../multi_device_exec_network.hpp | 39 +- .../src/multi_device/multi_device_plugin.cpp | 152 ++++++-- .../src/multi_device/multi_device_plugin.hpp | 8 +- .../behavior/infer_request/perf_counters.cpp | 11 + .../behavior/infer_request/config.cpp | 13 - .../behavior/infer_request/perf_counters.cpp | 11 + .../behavior/test_plugin.cpp | 8 +- .../behavior/config.cpp | 9 +- .../behavior/infer_request/callback.cpp | 13 +- .../behavior/infer_request/perf_counters.cpp | 12 + .../behavior/infer_request/wait.cpp | 10 +- .../behavior/preprocessing/set_preprocess.cpp | 10 +- .../behavior/test_plugin.cpp | 12 +- .../behavior/version.cpp | 8 +- 15 files changed, 539 insertions(+), 114 deletions(-) diff --git a/inference-engine/src/multi_device/multi_device_exec_network.cpp b/inference-engine/src/multi_device/multi_device_exec_network.cpp index b569a0cf40d863..3a2a3673e14aa8 100644 --- a/inference-engine/src/multi_device/multi_device_exec_network.cpp +++ b/inference-engine/src/multi_device/multi_device_exec_network.cpp @@ -11,16 +11,46 @@ #include #include - +#include "ie_icore.hpp" #include "ie_metric_helpers.hpp" #include #include "multi_device_exec_network.hpp" #include "multi_device_async_infer_request.hpp" #include "multi_device_plugin.hpp" +#include "ngraph/opsets/opset1.hpp" +#include "ngraph_ops/convolution_ie.hpp" +#include "ngraph_ops/deconvolution_ie.hpp" +#include "transformations/utils/utils.hpp" + // ------------------------------MultiDeviceExecutableNetwork---------------------------- namespace MultiDevicePlugin { - using namespace InferenceEngine; +using namespace InferenceEngine; + +namespace { +std::string GetNetworkPrecision(const InferenceEngine::CNNNetwork &network) { + auto nGraphFunc = network.getFunction(); + bool isINTModel = ngraph::op::util::has_op_with_type(nGraphFunc); + if (isINTModel) { + return METRIC_VALUE(INT8); + } + for (auto & node : nGraphFunc->get_ordered_ops()) { + if (std::dynamic_pointer_cast(node) || + std::dynamic_pointer_cast(node) || + std::dynamic_pointer_cast(node) || + std::dynamic_pointer_cast(node) || + std::dynamic_pointer_cast(node) || + std::dynamic_pointer_cast(node)) { + auto layerType = node->input(1).get_element_type().get_type_name(); + if (layerType == "f32") + return METRIC_VALUE(FP32); + if (layerType == "f16") + return METRIC_VALUE(FP16); + } + } + return METRIC_VALUE(FP32); +} +} // namespace thread_local MultiDeviceExecutableNetwork::WorkerInferRequest* MultiDeviceExecutableNetwork::_thisWorkerInferRequest = nullptr; // TODO: revert to the plain variable (see header file), when we moved to the next CentOS 8.x in our support matrix @@ -60,74 +90,215 @@ MultiDeviceExecutableNetwork::MultiDeviceExecutableNetwork(const DeviceMapGetMetric(METRIC_KEY(OPTIMAL_NUMBER_OF_INFER_REQUESTS)).as(); + } catch (const InferenceEngine::Exception &iie) { + IE_THROW() + << "Every device used with the Multi-Device should " + << "support OPTIMAL_NUMBER_OF_INFER_REQUESTS ExecutableNetwork metric. " + << "Failed to query the metric for the " << device << " with error:" << iie.what(); + } + const auto numRequests = (_devicePriorities.end() == itNumRequests || + itNumRequests->numRequestsPerDevices == -1) ? optimalNum : itNumRequests->numRequestsPerDevices; + auto& workerRequests = _workerRequests[device]; + auto& idleWorkerRequests = _idleWorkerRequests[device]; + workerRequests.resize(numRequests); + _inferPipelineTasksDeviceSpecific[device] = std::unique_ptr>(new ThreadSafeQueue); + auto* idleWorkerRequestsPtr = &(idleWorkerRequests); + idleWorkerRequests.set_capacity(numRequests); + for (auto&& workerRequest : workerRequests) { + workerRequest._inferRequest = { executableNetwork, executableNetwork->CreateInferRequest() }; + auto* workerRequestPtr = &workerRequest; + IE_ASSERT(idleWorkerRequests.try_push(workerRequestPtr) == true); + workerRequest._inferRequest->SetCallback( + [workerRequestPtr, this, device, idleWorkerRequestsPtr] (std::exception_ptr exceptionPtr) mutable { + IdleGuard idleGuard{workerRequestPtr, *idleWorkerRequestsPtr}; + workerRequestPtr->_exceptionPtr = exceptionPtr; + { + auto capturedTask = std::move(workerRequestPtr->_task); + capturedTask(); + } + // try to return the request to the idle list (fails if the overall object destruction has began) + if (idleGuard.Release()->try_push(workerRequestPtr)) { + // let's try to pop a task, as we know there is at least one idle request, schedule if succeeded + // if no device-agnostic tasks, let's try pop the device specific task, schedule if succeeded + Task t; + if (_inferPipelineTasks.try_pop(t)) + ScheduleToWorkerInferRequest(std::move(t)); + else if (_inferPipelineTasksDeviceSpecific[device]->try_pop(t)) + ScheduleToWorkerInferRequest(std::move(t), device); + } + }); + } +} + +MultiDeviceExecutableNetwork::MultiDeviceExecutableNetwork(const std::string& modelPath, + const InferenceEngine::CNNNetwork& network, + const std::vector& metaDevices, + const std::string& strDevices, + MultiDeviceInferencePlugin* plugin, + const bool needPerfCounters) + : _devicePriorities{metaDevices} + , _devicePrioritiesInitial{metaDevices} + , _needPerfCounters(needPerfCounters) + , _multiPlugin(plugin) + , _workModeIsAUTO(true) { + if (_multiPlugin->GetCore() == nullptr) { + IE_THROW() << "Please, work with MULTI device via InferencEngine::Core object"; + } + + if (modelPath.empty() && network.getFunction() == nullptr) { + IE_THROW() << "MULTI device supports just ngraph network representation"; + } + + _core = _multiPlugin->GetCore(); // shared_ptr that holds the Core + _config[MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES] = strDevices; + + std::vector needLoadDevices; + + // check if have cpu device + const auto CPUIter = std::find_if(metaDevices.begin(), metaDevices.end(), + [=](const DeviceInformation& d)->bool{return d.deviceName.find("CPU") != std::string::npos;}); + if (CPUIter != metaDevices.end()) { + _cpuDevice = *CPUIter; + _config.insert(_cpuDevice.config.begin(), _cpuDevice.config.end()); + needLoadDevices.push_back(_cpuDevice); + _cpuFuture = _cpuPromise.get_future(); + } + + // get accelerator device, like GPU + auto networkPrecision = GetNetworkPrecision(network); + _acceleratorDevice = _multiPlugin->SelectDevice(metaDevices, networkPrecision); + bool isAccelerator = + _acceleratorDevice.deviceName.find("CPU") == std::string::npos; + if (isAccelerator) { + _config.insert(_acceleratorDevice.config.begin(), _acceleratorDevice.config.end()); + needLoadDevices.push_back(_acceleratorDevice); + _acceleratorFuture = _acceleratorPromise.get_future(); + } + + if (needLoadDevices.size() == 0) { + IE_THROW() << "No device set"; + } - auto itNumRequests = std::find_if(_devicePriorities.cbegin(), _devicePriorities.cend(), - [&device](const DeviceInformation& d){ return d.deviceName == device;}); - unsigned int optimalNum = 0; + // will not wait for loading accelerator network, + // so the executor can't be destroyed before finished the task, + // so use executor as a member of MultiDeviceExecutableNetwork. + _executor = InferenceEngine::ExecutorManager::getInstance()->getIdleCPUStreamsExecutor( + IStreamsExecutor::Config{"AutoDeviceAsyncLoad", + static_cast(std::thread::hardware_concurrency()) /* max possible #streams*/, + 1 /*single thread per stream*/, + IStreamsExecutor::ThreadBindingType::NONE}); + + for (auto& p : needLoadDevices) { + // initialize these containers firstly to avoid insert operation in threads + _idleWorkerRequests[p.deviceName]; + _workerRequests[p.deviceName]; + _inferPipelineTasksDeviceSpecific[p.deviceName] = NULL; + const auto device = p.deviceName; + const auto deviceConfig = p.config; + // will not wait for loading accelerator network, + // so some parameters need to be transferred by value. + _executor->run([&, modelPath, network, device, deviceConfig]() { + SoExecutableNetworkInternal executableNetwork; + if (!modelPath.empty()) { + executableNetwork = _core->LoadNetwork(modelPath, device, deviceConfig); + } else { + executableNetwork = _core->LoadNetwork(network, device, deviceConfig); + } + + GenerateWorkers(device, executableNetwork); + + if (device.find("CPU") == std::string::npos) { + _alreadyActualNetwork = true; + _acceleratorPromise.set_value(executableNetwork); + } else { + _cpuPromise.set_value(executableNetwork); + } + }); + } + + WaitFirstNetworkReady(); +} + +void MultiDeviceExecutableNetwork::WaitFirstNetworkReady() { + if (_alreadyActualNetwork) { + return; + } + if (_cpuFuture.valid() && _acceleratorFuture.valid()) { try { - optimalNum = network->GetMetric(METRIC_KEY(OPTIMAL_NUMBER_OF_INFER_REQUESTS)).as(); - } catch (const InferenceEngine::Exception &iie) { - IE_THROW() - << "Every device used with the Multi-Device should " - << "support OPTIMAL_NUMBER_OF_INFER_REQUESTS ExecutableNetwork metric. " - << "Failed to query the metric for the " << device << " with error:" << iie.what(); - } - const auto numRequests = (_devicePriorities.end() == itNumRequests || - itNumRequests->numRequestsPerDevices == -1) ? optimalNum : itNumRequests->numRequestsPerDevices; - auto& workerRequests = _workerRequests[device]; - auto& idleWorkerRequests = _idleWorkerRequests[device]; - workerRequests.resize(numRequests); - _inferPipelineTasksDeviceSpecific[device] = std::unique_ptr>(new ThreadSafeQueue); - auto* idleWorkerRequestsPtr = &(idleWorkerRequests); - idleWorkerRequests.set_capacity(numRequests); - for (auto&& workerRequest : workerRequests) { - workerRequest._inferRequest = { network, network->CreateInferRequest() }; - auto* workerRequestPtr = &workerRequest; - IE_ASSERT(idleWorkerRequests.try_push(workerRequestPtr) == true); - workerRequest._inferRequest->SetCallback( - [workerRequestPtr, this, device, idleWorkerRequestsPtr] (std::exception_ptr exceptionPtr) mutable { - IdleGuard idleGuard{workerRequestPtr, *idleWorkerRequestsPtr}; - workerRequestPtr->_exceptionPtr = exceptionPtr; - { - auto capturedTask = std::move(workerRequestPtr->_task); - capturedTask(); - } - // try to return the request to the idle list (fails if the overall object destruction has began) - if (idleGuard.Release()->try_push(workerRequestPtr)) { - // let's try to pop a task, as we know there is at least one idle request, schedule if succeeded - // if no device-agnostic tasks, let's try pop the device specific task, schedule if succeeded - Task t; - if (_inferPipelineTasks.try_pop(t)) - ScheduleToWorkerInferRequest(std::move(t)); - else if (_inferPipelineTasksDeviceSpecific[device]->try_pop(t)) - ScheduleToWorkerInferRequest(std::move(t), device); - } - }); + _networkFirstReady = _cpuFuture.get(); + } catch (const std::exception& e) { + printf("Warning: load network to CPU failed: %s\n", e.what()); + _networkActualNeeded = _acceleratorFuture.get(); } + } else if (_acceleratorFuture.valid()) { // only accelerator is valid, like AUTO:GPU + _networkActualNeeded = _acceleratorFuture.get(); + } else if (_cpuFuture.valid()) { // only CPU is valid, like AUTO:CPU + _networkActualNeeded = _cpuFuture.get(); + } else { + IE_THROW() << "No device task available"; } + + // if there is only one device or loading CPU device is failed, + // the ActualNetwork is already ok now. + if (!_acceleratorFuture.valid()) { + _alreadyActualNetwork = true; + } +} + +void MultiDeviceExecutableNetwork::WaitActualNetworkReady() const { + // Maybe different API will call this function, so add call once here + // for every MultiDeviceExecutableNetwork instance + std::call_once(_oc, [&] () { + if (_acceleratorFuture.valid()) { + _networkActualNeeded = _acceleratorFuture.get(); + } + }); } void MultiDeviceExecutableNetwork::ScheduleToWorkerInferRequest(Task inferPipelineTask, DeviceName preferred_device) { - auto devices = [&] { - std::lock_guard lock(_mutex); - return _devicePriorities; - }(); + std::vector devices; + // AUTO work mode + if (_workModeIsAUTO) { + if (!preferred_device.empty()) { + // the preferred_device should be the selected device in AUTO work mode + if (preferred_device != _acceleratorDevice.deviceName) { + IE_THROW(NotFound) << "The preferred_device should be the selected device"; + } + // if the device needed by customer is not ready, need to wait for it + WaitActualNetworkReady(); + devices.push_back(_acceleratorDevice); + } else { + // _acceleratorDevice could be the same as _cpuDevice, such as AUTO:CPU + if (_alreadyActualNetwork) { + devices.push_back(_acceleratorDevice); + } else { + devices.push_back(_cpuDevice); + } + } + } else { + devices = [&] { + std::lock_guard lock(_mutex); + return _devicePriorities; + }(); + } for (auto&& device : devices) { if (!preferred_device.empty() && (device.deviceName != preferred_device)) continue; - WorkerInferRequest* workerRequestPtr = nullptr; - NotBusyWorkerRequests& idleWorkerRequests = _idleWorkerRequests[device.deviceName]; - if (idleWorkerRequests.try_pop(workerRequestPtr)) { - IdleGuard idleGuard{workerRequestPtr, idleWorkerRequests}; - _thisWorkerInferRequest = workerRequestPtr; - { - auto capturedTask = std::move(inferPipelineTask); - capturedTask(); - } - idleGuard.Release(); + if (RunPipelineTask(inferPipelineTask, _idleWorkerRequests[device.deviceName], preferred_device)) { return; } } + // no vacant requests this time, storing the task to the respective queue if (!preferred_device.empty()) _inferPipelineTasksDeviceSpecific[preferred_device]->push(std::move(inferPipelineTask)); @@ -135,11 +306,35 @@ void MultiDeviceExecutableNetwork::ScheduleToWorkerInferRequest(Task inferPipeli _inferPipelineTasks.push(std::move(inferPipelineTask)); } +bool MultiDeviceExecutableNetwork::RunPipelineTask(Task& inferPipelineTask, + NotBusyWorkerRequests& idleWorkerRequests, + const DeviceName& preferred_device) { + WorkerInferRequest *workerRequestPtr = nullptr; + if (idleWorkerRequests.try_pop(workerRequestPtr)) { + IdleGuard idleGuard{workerRequestPtr, idleWorkerRequests}; + _thisWorkerInferRequest = workerRequestPtr; + { + auto capturedTask = std::move(inferPipelineTask); + capturedTask(); + } + idleGuard.Release(); + return true; + } + return false; +} + void MultiDeviceExecutableNetwork::run(Task inferPipelineTask) { ScheduleToWorkerInferRequest(std::move(inferPipelineTask), _thisPreferredDeviceName); } MultiDeviceExecutableNetwork::~MultiDeviceExecutableNetwork() { + // this is necessary to guarantee member destroyed after getting future + if (_workModeIsAUTO) { + WaitActualNetworkReady(); + // it's necessary to wait the loading network threads to stop here. + InferenceEngine::ExecutorManager::getInstance()->clear("AutoDeviceAsyncLoad"); + _executor.reset(); + } { std::lock_guard lock(_mutex); _devicePriorities.clear(); @@ -147,14 +342,19 @@ MultiDeviceExecutableNetwork::~MultiDeviceExecutableNetwork() { /* NOTE: The only threads that use `MultiDeviceExecutableNetwork` worker infer requests' threads. * But AsyncInferRequest destructor should wait for all asynchronous tasks by the request */ - for (auto&& networkValue : _networksPerDevice) { + for (auto&& idleWorker : _idleWorkerRequests) { // stop accepting any idle requests back (for re-scheduling) - _idleWorkerRequests.at(networkValue.first).set_capacity(0); + idleWorker.second.set_capacity(0); } _workerRequests.clear(); } std::shared_ptr MultiDeviceExecutableNetwork::GetContext() const { + if (_workModeIsAUTO) { + WaitActualNetworkReady(); + return _networkActualNeeded->GetContext(); + } + auto devices = [&] { std::lock_guard lock(_mutex); return _devicePriorities; @@ -177,6 +377,11 @@ InferenceEngine::IInferRequestInternal::Ptr MultiDeviceExecutableNetwork::Create auto num = _numRequestsCreated++; size_t sum = 0; InferenceEngine::SoIInferRequestInternal request_to_share_blobs_with; + + if (_workModeIsAUTO) { + return std::make_shared(networkInputs, networkOutputs, request_to_share_blobs_with); + } + // borrowing device-specific blobs from the underlying requests for the device-agnostic, user-facing requests // this allows to potentially save on the data-copy later (if the requests are scheduled in the same order) for (const auto& device : _devicePrioritiesInitial) { @@ -200,6 +405,10 @@ IInferRequestInternal::Ptr MultiDeviceExecutableNetwork::CreateInferRequest() { } void MultiDeviceExecutableNetwork::SetConfig(const std::map &config) { + if (_workModeIsAUTO) { + IE_THROW(NotImplemented); + } + auto priorities = config.find(MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES); if (priorities == config.end() || config.size() > 1) { IE_THROW() << "The only config supported for the Network's SetConfig is MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES"; @@ -252,6 +461,15 @@ InferenceEngine::Parameter MultiDeviceExecutableNetwork::GetConfig(const std::st } InferenceEngine::Parameter MultiDeviceExecutableNetwork::GetMetric(const std::string &name) const { + if (_workModeIsAUTO) { + // fixme: should we wait actual device? meanwhile it will block inference, how to fix? + if (_alreadyActualNetwork) { + WaitActualNetworkReady(); + return _networkActualNeeded->GetMetric(name); + } + return _networkFirstReady->GetMetric(name); + } + if (name == METRIC_KEY(OPTIMAL_NUMBER_OF_INFER_REQUESTS)) { unsigned int res = 0u; for (auto n : _networksPerDevice) { @@ -284,5 +502,4 @@ InferenceEngine::Parameter MultiDeviceExecutableNetwork::GetMetric(const std::st IE_THROW() << "Unsupported Network metric: " << name; } } - } // namespace MultiDevicePlugin diff --git a/inference-engine/src/multi_device/multi_device_exec_network.hpp b/inference-engine/src/multi_device/multi_device_exec_network.hpp index 2fb6e9462a7dc2..2fd86c63170fad 100644 --- a/inference-engine/src/multi_device/multi_device_exec_network.hpp +++ b/inference-engine/src/multi_device/multi_device_exec_network.hpp @@ -16,14 +16,21 @@ #include #include #include +#include +#include "ie_icore.hpp" #if (IE_THREAD == IE_THREAD_TBB || IE_THREAD == IE_THREAD_TBB_AUTO) # include #endif + namespace MultiDevicePlugin { +class MultiDeviceInferencePlugin; + using DeviceName = std::string; +using NetworkFuture = std::future; +using NetworkPromise = std::promise; struct DeviceInformation { DeviceName deviceName; @@ -105,10 +112,16 @@ class MultiDeviceExecutableNetwork : public InferenceEngine::ExecutableNetworkTh }; using NotBusyWorkerRequests = ThreadSafeBoundedQueue; - explicit MultiDeviceExecutableNetwork(const DeviceMap& networksPerDevice, + explicit MultiDeviceExecutableNetwork(const DeviceMap& networksPerDevice, const std::vector& networkDevices, const std::unordered_map& config, const bool needPerfCounters = false); + MultiDeviceExecutableNetwork(const std::string& modelPath, + const InferenceEngine::CNNNetwork& network, + const std::vector& metaDevices, + const std::string& strDevices, + MultiDeviceInferencePlugin* plugin, + const bool needPerfCounters = false); void SetConfig(const std::map &config) override; InferenceEngine::Parameter GetConfig(const std::string &name) const override; @@ -138,6 +151,30 @@ class MultiDeviceExecutableNetwork : public InferenceEngine::ExecutableNetworkTh std::unordered_map _config; bool _needPerfCounters = false; std::atomic_size_t _numRequestsCreated = {0}; + +private: + void GenerateWorkers(const std::string& device, const InferenceEngine::SoExecutableNetworkInternal& executableNetwork); + void WaitActualNetworkReady() const; + void WaitFirstNetworkReady(); + static bool RunPipelineTask(InferenceEngine::Task& inferPipelineTask, + NotBusyWorkerRequests& idleWorkerRequests, + const DeviceName& preferred_device); + +private: + std::shared_ptr _core; + InferenceEngine::IStreamsExecutor::Ptr _executor; + MultiDeviceInferencePlugin* _multiPlugin; + InferenceEngine::SoExecutableNetworkInternal _networkFirstReady; + mutable InferenceEngine::SoExecutableNetworkInternal _networkActualNeeded; + NetworkFuture _cpuFuture; + NetworkPromise _cpuPromise; + mutable NetworkFuture _acceleratorFuture; + mutable NetworkPromise _acceleratorPromise; + mutable bool _alreadyActualNetwork = {false}; + bool _workModeIsAUTO = {false}; + DeviceInformation _cpuDevice; + DeviceInformation _acceleratorDevice; + mutable std::once_flag _oc; }; } // namespace MultiDevicePlugin diff --git a/inference-engine/src/multi_device/multi_device_plugin.cpp b/inference-engine/src/multi_device/multi_device_plugin.cpp index f63bcde8c4d70a..fa70f33d4085a3 100644 --- a/inference-engine/src/multi_device/multi_device_plugin.cpp +++ b/inference-engine/src/multi_device/multi_device_plugin.cpp @@ -219,34 +219,49 @@ IExecutableNetworkInternal::Ptr MultiDeviceInferencePlugin::LoadNetworkImpl(cons bool workModeAuto = workMode != fullConfig.end() && workMode->second == InferenceEngine::PluginConfigParams::YES; auto priorities = fullConfig.find(MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES); - // not found device priorities for -d AUTO use case - if (priorities == fullConfig.end()) { - if (workModeAuto) { - std::string allDevices; - auto availableDevices = GetCore()->GetAvailableDevices(); - if (availableDevices.empty()) { - IE_THROW(NotFound) << "No available device found"; - } - for (auto&& device : availableDevices) { - allDevices += device; - allDevices += ((device == availableDevices[availableDevices.size()-1]) ? "" : ","); - } - metaDevices = ParseMetaDevices(allDevices, fullConfig); - multiNetworkConfig.insert({MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, allDevices}); - } else { - IE_THROW() << "KEY_MULTI_DEVICE_PRIORITIES key is not set for " << GetName() << " device"; + // if workMode is AUTO + if (workModeAuto) { + // check the configure and check if need to set PerfCounters configure to device + // and set filter configure + bool needPerfCounters = false; + std::map filterConfig; + CheckConfig(fullConfig, needPerfCounters, filterConfig); + // filter the device that supports filter configure + auto strDevices = GetDeviceList(fullConfig); + auto metaDevices = ParseMetaDevices(strDevices, fullConfig); + auto supportDevices = FilterDevice(metaDevices, filterConfig); + if (supportDevices.size() == 0) { + IE_THROW() << "there is no device support the configure"; + } + // replace the configure with configure that auto want to pass to device + // and reset the strDevices to support devices + std::vector validConfigKey; + validConfigKey.push_back(PluginConfigParams::KEY_PERF_COUNT); + validConfigKey.push_back(PluginConfigParams::KEY_EXCLUSIVE_ASYNC_REQUESTS); + validConfigKey.push_back(PluginConfigParams::KEY_PERFORMANCE_HINT); + strDevices = ""; + for (auto iter = supportDevices.begin(); iter != supportDevices.end(); iter++) { + std::map deviceConfig; + auto& configs = iter->config; + for (auto& config : configs) { + if (std::find(validConfigKey.begin(), validConfigKey.end(), config.first) != validConfigKey.end()) { + deviceConfig.insert({config.first, config.second}); + } + } + iter->config = deviceConfig; + strDevices = iter->deviceName; + strDevices += ((iter + 1) == supportDevices.end()) ? "" : ","; } + + return std::make_shared(modelPath, network, supportDevices, strDevices, this, needPerfCounters); + } + + if (priorities == fullConfig.end()) { + IE_THROW() << "KEY_MULTI_DEVICE_PRIORITIES key is not set for " << GetName() << " device"; } else { // for use case -d MULTI:xPU or -d AUTO:xPU metaDevices = ParseMetaDevices(priorities->second, fullConfig); multiNetworkConfig.insert(*priorities); } - // check if it is -d AUTO or -d AUTO:xPU use case - if (workModeAuto) { - // select the device - auto device = SelectDevice(metaDevices, networkPrecision).deviceName; - // parse the config for the device - metaDevices = ParseMetaDevices(SelectDevice(metaDevices, networkPrecision).deviceName, fullConfig); - } DeviceMap executableNetworkPerDevice; std::mutex load_mutex; @@ -345,7 +360,6 @@ QueryNetworkResult MultiDeviceInferencePlugin::QueryNetwork(const CNNNetwork& return queryResult; } - DeviceInformation MultiDeviceInferencePlugin::SelectDevice(const std::vector& metaDevices, const std::string& networkPrecision) { if (metaDevices.empty()) { IE_THROW(NotFound) << "No available device to select in " << GetName() << " plugin"; @@ -466,4 +480,94 @@ DeviceInformation MultiDeviceInferencePlugin::SelectDevice(const std::vector& config) const { + std::string allDevices; + + auto deviceListConfig = config.find(MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES); + if (deviceListConfig == config.end()) { + auto deviceList = GetCore()->GetAvailableDevices(); + for (auto&& device : deviceList) { + allDevices += device; + allDevices += ((device == deviceList[deviceList.size()-1]) ? "" : ","); + } + } else { + allDevices = deviceListConfig->second; + } + + if (allDevices.empty()) { + IE_THROW() << "Please, check environment due to no supported devices can be used"; + } + + return allDevices; +} + +void MultiDeviceInferencePlugin::CheckConfig(const std::map& config, + bool& needPerfCounters, std::map& filterConfig) { + // TODO need to optimize this code, too much duplicated code + const auto perf_hints_configs = PerfHintsConfig::SupportedKeys(); + for (auto&& kvp : config) { + if (kvp.first.find("AUTO_") == 0) { + continue; + } else if (kvp.first == PluginConfigParams::KEY_PERF_COUNT) { + if (kvp.second == PluginConfigParams::YES) { + needPerfCounters = true; + filterConfig.insert({kvp.first, kvp.second}); + } else if (kvp.second == PluginConfigParams::NO) { + needPerfCounters = false; + } else { + IE_THROW() << "Unsupported config value: " << kvp.second + << " for key: " << kvp.first; + } + } else if (kvp.first == PluginConfigParams::KEY_EXCLUSIVE_ASYNC_REQUESTS) { + if (kvp.second == PluginConfigParams::YES || + kvp.second == PluginConfigParams::NO) { + continue; + } else { + IE_THROW() << "Unsupported config value: " << kvp.second + << " for key: " << kvp.first; + } + } else if (std::find(perf_hints_configs.begin(), perf_hints_configs.end(), kvp.first) != perf_hints_configs.end()) { + PerfHintsConfig::CheckConfigAndValue(kvp); + } else if (supported_configKeys.end() == std::find(supported_configKeys.begin(), supported_configKeys.end(), kvp.first)) { + IE_THROW() << "Unsupported config key: " << kvp.first; + } + } +} + +std::vector MultiDeviceInferencePlugin::FilterDevice(const std::vector& metaDevices, + const std::map& config) { + if (metaDevices.empty()) { + IE_THROW(NotFound) << "No available device to filter " << GetName() << " plugin"; + } + + if (config.size() == 0) { + return metaDevices; + } + + std::vector filterDevice; + for (auto&& item : metaDevices) { + bool support = true; + std::vector supportedMetrics = GetCore()->GetMetric(item.deviceName, METRIC_KEY(SUPPORTED_METRICS)); + if (std::find(supportedMetrics.begin(), supportedMetrics.end(), METRIC_KEY(SUPPORTED_CONFIG_KEYS)) != supportedMetrics.end()) { + std::vector supportKeys = GetCore()->GetMetric(item.deviceName, METRIC_KEY(SUPPORTED_CONFIG_KEYS)); + for (auto&& kvp : config) { + auto targetKey = std::find(supportKeys.begin(), supportKeys.end(), kvp.first); + // if device have the key, we think the device support it + if (targetKey != supportKeys.end()) { + continue; + } else { + support = false; + break; + } + } + } else { + support = false; + } + + if (support) { + filterDevice.push_back(item); + } + } + return filterDevice; +} } // namespace MultiDevicePlugin diff --git a/inference-engine/src/multi_device/multi_device_plugin.hpp b/inference-engine/src/multi_device/multi_device_plugin.hpp index 4021c5ec9e1aea..f6f0ed39809bb2 100644 --- a/inference-engine/src/multi_device/multi_device_plugin.hpp +++ b/inference-engine/src/multi_device/multi_device_plugin.hpp @@ -36,6 +36,9 @@ class MultiDeviceInferencePlugin : public InferenceEngine::IInferencePlugin { std::vector ParseMetaDevices(const std::string & devicesRequestsCfg, const std::map & config) const; + std::string GetDeviceList(const std::map& config) const; + DeviceInformation SelectDevice(const std::vector& metaDevices, const std::string& networkPrecision = METRIC_VALUE(FP32)); + protected: std::map GetSupportedConfig(const std::map& config, const MultiDevicePlugin::DeviceName & deviceName) const; @@ -45,7 +48,10 @@ class MultiDeviceInferencePlugin : public InferenceEngine::IInferencePlugin { InferenceEngine::CNNNetwork network, const std::map& config, const std::string &networkPrecision = METRIC_VALUE(FP32)); - DeviceInformation SelectDevice(const std::vector& metaDevices, const std::string& networkPrecision = METRIC_VALUE(FP32)); + static void CheckConfig(const std::map& config, bool& needPerfCounters, + std::map& filterConfig); + std::vector FilterDevice(const std::vector& metaDevices, + const std::map& config); }; } // namespace MultiDevicePlugin diff --git a/inference-engine/tests/functional/plugin/conformance/test_runner/src/behavior/infer_request/perf_counters.cpp b/inference-engine/tests/functional/plugin/conformance/test_runner/src/behavior/infer_request/perf_counters.cpp index b7007dd16f308a..a7bfe68707c6e7 100644 --- a/inference-engine/tests/functional/plugin/conformance/test_runner/src/behavior/infer_request/perf_counters.cpp +++ b/inference-engine/tests/functional/plugin/conformance/test_runner/src/behavior/infer_request/perf_counters.cpp @@ -18,6 +18,10 @@ const std::vector> MulticonfigsPerfCounters = {{ MULTI_CONFIG_KEY(DEVICE_PRIORITIES), targetDevice }} }; +const std::vector> AutoconfigsPerfCounters = { + {{ MULTI_CONFIG_KEY(DEVICE_PRIORITIES), targetDevice }} +}; + INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, InferRequestPerfCountersTest, ::testing::Combine( ::testing::Values(targetDevice), @@ -30,4 +34,11 @@ INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, InferRequestPerfCountersTest ::testing::ValuesIn(MulticonfigsPerfCounters)), InferRequestPerfCountersTest::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, InferRequestPerfCountersTest, + ::testing::Combine( + ::testing::Values(CommonTestUtils::DEVICE_AUTO), + ::testing::ValuesIn(AutoconfigsPerfCounters)), + InferRequestPerfCountersTest::getTestCaseName); + + } // namespace diff --git a/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/behavior/infer_request/config.cpp b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/behavior/infer_request/config.cpp index 7013c3096ddd75..c289a5831c005e 100644 --- a/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/behavior/infer_request/config.cpp +++ b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/behavior/infer_request/config.cpp @@ -62,18 +62,5 @@ namespace { ::testing::ValuesIn(MultiInConfigs)), InferRequestConfigTest::getTestCaseName); - INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, InferRequestConfigTest, - ::testing::Combine( - ::testing::Values(1u), - ::testing::Values(CommonTestUtils::DEVICE_AUTO), - ::testing::ValuesIn(multiConfigs)), - InferRequestConfigTest::getTestCaseName); - - INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests_, InferRequestConfigTest, - ::testing::Combine( - ::testing::Values(1u), - ::testing::Values(CommonTestUtils::DEVICE_AUTO), - ::testing::ValuesIn(MultiInConfigs)), - InferRequestConfigTest::getTestCaseName); } // namespace diff --git a/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/behavior/infer_request/perf_counters.cpp b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/behavior/infer_request/perf_counters.cpp index 684f1938b37f12..b75a7e8c789558 100644 --- a/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/behavior/infer_request/perf_counters.cpp +++ b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/behavior/infer_request/perf_counters.cpp @@ -37,6 +37,10 @@ const std::vector> Multiconfigs = { {{ MULTI_CONFIG_KEY(DEVICE_PRIORITIES) , CommonTestUtils::DEVICE_CPU}} }; +const std::vector> Autoconfigs = { + {{ MULTI_CONFIG_KEY(DEVICE_PRIORITIES) , CommonTestUtils::DEVICE_CPU}} +}; + INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, InferRequestPerfCountersTest, ::testing::Combine( ::testing::Values(CommonTestUtils::DEVICE_CPU), @@ -48,4 +52,11 @@ INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, InferRequestPerfCountersTest ::testing::Values(CommonTestUtils::DEVICE_MULTI), ::testing::ValuesIn(Multiconfigs)), InferRequestPerfCountersTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, InferRequestPerfCountersTest, + ::testing::Combine( + ::testing::Values(CommonTestUtils::DEVICE_AUTO), + ::testing::ValuesIn(Autoconfigs)), + InferRequestPerfCountersTest::getTestCaseName); + } // namespace diff --git a/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/behavior/test_plugin.cpp b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/behavior/test_plugin.cpp index c03c1a4f121cc2..f7656b81c760a7 100644 --- a/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/behavior/test_plugin.cpp +++ b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/behavior/test_plugin.cpp @@ -32,6 +32,10 @@ namespace { {InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS, InferenceEngine::PluginConfigParams::CPU_THROUGHPUT_AUTO}} }; + const std::vector> AutoConfigsInputOutput = { + {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES , CommonTestUtils::DEVICE_CPU}} + }; + const std::vector> configsOutput = { {}, {{InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS, InferenceEngine::PluginConfigParams::CPU_THROUGHPUT_AUTO}} @@ -56,7 +60,7 @@ namespace { ::testing::Combine( ::testing::ValuesIn(netPrecisions), ::testing::Values(CommonTestUtils::DEVICE_AUTO), - ::testing::ValuesIn(MultiConfigsInputOutput)), + ::testing::ValuesIn(AutoConfigsInputOutput)), BehaviorTestOutput::getTestCaseName); INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, BehaviorTests, @@ -98,7 +102,7 @@ namespace { ::testing::Combine( ::testing::ValuesIn(netPrecisions), ::testing::Values(CommonTestUtils::DEVICE_AUTO), - ::testing::ValuesIn(MultiConfigsInputOutput)), + ::testing::ValuesIn(AutoConfigsInputOutput)), BehaviorTestInput::getTestCaseName); } // namespace diff --git a/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/behavior/config.cpp b/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/behavior/config.cpp index b972a0e4f7c43d..c75aa903a2136a 100644 --- a/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/behavior/config.cpp +++ b/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/behavior/config.cpp @@ -111,6 +111,13 @@ namespace { {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT_NUM_REQUESTS, "1"}} }; + const std::vector> autoConfigs = { + {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES , CommonTestUtils::DEVICE_GPU}, + {InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, + CommonTestUtils::DEVICE_GPU + std::string(",") + CommonTestUtils::DEVICE_CPU}} + }; + + INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, CorrectConfigAPITests, ::testing::Combine( ::testing::ValuesIn(netPrecisions), @@ -136,7 +143,7 @@ namespace { ::testing::Combine( ::testing::ValuesIn(netPrecisions), ::testing::Values(CommonTestUtils::DEVICE_AUTO), - ::testing::ValuesIn(multiconf)), + ::testing::ValuesIn(autoConfigs)), CorrectConfigAPITests::getTestCaseName); INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, IncorrectConfigAPITests, diff --git a/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/behavior/infer_request/callback.cpp b/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/behavior/infer_request/callback.cpp index 90a22c2435cb86..dfaa591dd96376 100644 --- a/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/behavior/infer_request/callback.cpp +++ b/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/behavior/infer_request/callback.cpp @@ -14,6 +14,11 @@ const std::vector> multiConfigs = { {{ MULTI_CONFIG_KEY(DEVICE_PRIORITIES) , CommonTestUtils::DEVICE_GPU}} }; +const std::vector> autoConfigs = { + {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES , CommonTestUtils::DEVICE_GPU}, + {InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES , CommonTestUtils::DEVICE_GPU + std::string(",") + CommonTestUtils::DEVICE_CPU}} +}; + INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, InferRequestCallbackTests, ::testing::Combine( ::testing::Values(CommonTestUtils::DEVICE_GPU), @@ -27,8 +32,8 @@ INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, InferRequestCallbackTests, InferRequestCallbackTests::getTestCaseName); INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, InferRequestCallbackTests, - ::testing::Combine( - ::testing::Values(CommonTestUtils::DEVICE_AUTO), - ::testing::ValuesIn(multiConfigs)), - InferRequestCallbackTests::getTestCaseName); + ::testing::Combine( + ::testing::Values(CommonTestUtils::DEVICE_AUTO), + ::testing::ValuesIn(autoConfigs)), + InferRequestCallbackTests::getTestCaseName); } // namespace diff --git a/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/behavior/infer_request/perf_counters.cpp b/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/behavior/infer_request/perf_counters.cpp index 5a4a5852c5a3a2..bdb9cf905182f7 100644 --- a/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/behavior/infer_request/perf_counters.cpp +++ b/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/behavior/infer_request/perf_counters.cpp @@ -14,6 +14,12 @@ namespace { {{ MULTI_CONFIG_KEY(DEVICE_PRIORITIES) , CommonTestUtils::DEVICE_GPU}} }; + const std::vector> AutoConfigs = { + {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES , CommonTestUtils::DEVICE_GPU}, + {InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, + CommonTestUtils::DEVICE_GPU + std::string(",") + CommonTestUtils::DEVICE_CPU}} + }; + INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, InferRequestPerfCountersTest, ::testing::Combine( ::testing::Values(CommonTestUtils::DEVICE_GPU), @@ -26,4 +32,10 @@ namespace { ::testing::ValuesIn(Multiconfigs)), InferRequestPerfCountersTest::getTestCaseName); + INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, InferRequestPerfCountersTest, + ::testing::Combine( + ::testing::Values(CommonTestUtils::DEVICE_AUTO), + ::testing::ValuesIn(AutoConfigs)), + InferRequestPerfCountersTest::getTestCaseName); + } // namespace diff --git a/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/behavior/infer_request/wait.cpp b/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/behavior/infer_request/wait.cpp index 07fe3ddd855a3a..41da3069a871ea 100644 --- a/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/behavior/infer_request/wait.cpp +++ b/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/behavior/infer_request/wait.cpp @@ -13,6 +13,12 @@ namespace { {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, CommonTestUtils::DEVICE_GPU}} }; + const std::vector> autoConfigs = { + {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES , CommonTestUtils::DEVICE_GPU}, + {InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES , + CommonTestUtils::DEVICE_GPU + std::string(",") + CommonTestUtils::DEVICE_CPU}} + }; + INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, InferRequestWaitTests, ::testing::Combine( ::testing::Values(CommonTestUtils::DEVICE_GPU), @@ -28,7 +34,7 @@ namespace { INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, InferRequestWaitTests, ::testing::Combine( ::testing::Values(CommonTestUtils::DEVICE_AUTO), - ::testing::ValuesIn(configs)), + ::testing::ValuesIn(autoConfigs)), InferRequestWaitTests::getTestCaseName); -} // namespace \ No newline at end of file +} // namespace diff --git a/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/behavior/preprocessing/set_preprocess.cpp b/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/behavior/preprocessing/set_preprocess.cpp index c6a17bb4bf7761..950425675bc2d4 100644 --- a/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/behavior/preprocessing/set_preprocess.cpp +++ b/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/behavior/preprocessing/set_preprocess.cpp @@ -22,6 +22,12 @@ namespace { {{ InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES , CommonTestUtils::DEVICE_GPU}} }; + const std::vector> autoConfigs = { + {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES , CommonTestUtils::DEVICE_GPU}, + {InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES , + CommonTestUtils::DEVICE_GPU + std::string(",") + CommonTestUtils::DEVICE_CPU}} + }; + INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, InferRequestPreprocessTest, ::testing::Combine( ::testing::ValuesIn(netPrecisions), @@ -40,7 +46,7 @@ namespace { ::testing::Combine( ::testing::ValuesIn(netPrecisions), ::testing::Values(CommonTestUtils::DEVICE_AUTO), - ::testing::ValuesIn(multiConfigs)), + ::testing::ValuesIn(autoConfigs)), InferRequestPreprocessTest::getTestCaseName); const std::vector ioPrecisions = { @@ -85,4 +91,4 @@ namespace { ::testing::ValuesIn(configs)), InferRequestPreprocessDynamicallyInSetBlobTest::getTestCaseName); -} // namespace \ No newline at end of file +} // namespace diff --git a/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/behavior/test_plugin.cpp b/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/behavior/test_plugin.cpp index 98069d07303168..bfe1d09c36be7d 100644 --- a/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/behavior/test_plugin.cpp +++ b/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/behavior/test_plugin.cpp @@ -31,6 +31,12 @@ namespace { {{ MULTI_CONFIG_KEY(DEVICE_PRIORITIES) , CommonTestUtils::DEVICE_GPU}} }; + const std::vector> autoConfigs = { + {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES , CommonTestUtils::DEVICE_GPU}, + {InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES , + CommonTestUtils::DEVICE_GPU + std::string(",") + CommonTestUtils::DEVICE_CPU}} + }; + const std::vector> configsInput = { {}, {{InferenceEngine::PluginConfigParams::KEY_GPU_THROUGHPUT_STREAMS, InferenceEngine::PluginConfigParams::GPU_THROUGHPUT_AUTO}} @@ -65,7 +71,7 @@ namespace { ::testing::Combine( ::testing::ValuesIn(netPrecisions), ::testing::Values(CommonTestUtils::DEVICE_AUTO), - ::testing::ValuesIn(MultiConfigsInputOutput)), + ::testing::ValuesIn(autoConfigs)), BehaviorTestOutput::getTestCaseName); INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, BehaviorTests, @@ -86,7 +92,7 @@ namespace { ::testing::Combine( ::testing::Values(InferenceEngine::Precision::FP32), ::testing::Values(CommonTestUtils::DEVICE_AUTO), - ::testing::ValuesIn(MultiConfigs)), + ::testing::ValuesIn(autoConfigs)), BehaviorTests::getTestCaseName); INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, BehaviorTestInput, @@ -107,7 +113,7 @@ namespace { ::testing::Combine( ::testing::ValuesIn(netPrecisions), ::testing::Values(CommonTestUtils::DEVICE_AUTO), - ::testing::ValuesIn(MultiConfigsInputOutput)), + ::testing::ValuesIn(autoConfigs)), BehaviorTestInput::getTestCaseName); } // namespace diff --git a/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/behavior/version.cpp b/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/behavior/version.cpp index c02a209e9d59a9..fe7bbfa5c099f1 100644 --- a/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/behavior/version.cpp +++ b/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/behavior/version.cpp @@ -14,6 +14,12 @@ namespace { {{ MULTI_CONFIG_KEY(DEVICE_PRIORITIES) , CommonTestUtils::DEVICE_GPU}} }; + const std::vector> autoConfigs = { + {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES , CommonTestUtils::DEVICE_GPU}, + {InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES , + CommonTestUtils::DEVICE_GPU + std::string(",") + CommonTestUtils::DEVICE_CPU}} + }; + const std::vector> Heteroconfigs = { {{ HETERO_CONFIG_KEY(DUMP_GRAPH_DOT) , CommonTestUtils::DEVICE_GPU}} }; @@ -36,7 +42,7 @@ namespace { ::testing::Combine( ::testing::Values(InferenceEngine::Precision::FP32), ::testing::Values(CommonTestUtils::DEVICE_AUTO), - ::testing::ValuesIn(Multiconfigs)), + ::testing::ValuesIn(autoConfigs)), VersionTest::getTestCaseName); INSTANTIATE_TEST_SUITE_P(smoke_Hetero_BehaviorTests, VersionTest,