From f1bd8c116205e68debc878c8a74a496c7836344e Mon Sep 17 00:00:00 2001 From: "River.Li" Date: Mon, 11 Mar 2024 10:00:50 +0800 Subject: [PATCH] [BATCH] use ov::device::properties to replace batch size of BATCH:CPU(4) --- src/inference/src/dev/core_impl.cpp | 10 +--- src/inference/src/dev/device_id_parser.cpp | 8 +-- src/inference/tests/unit/core.cpp | 18 ------- src/plugins/auto/src/plugin.cpp | 3 ++ .../tests/unit/parse_meta_device_test.cpp | 4 +- src/plugins/auto_batch/src/plugin.cpp | 50 +++++++++++++++++-- src/plugins/auto_batch/src/plugin.hpp | 4 +- .../ov_executable_network/exec_net_base.cpp | 10 ++-- .../ov_executable_network/properties.cpp | 11 ++-- .../behavior/ov_infer_request/callback.cpp | 5 +- .../ov_infer_request/cancellation.cpp | 3 +- .../behavior/ov_infer_request/io_tensor.cpp | 5 +- .../ov_infer_request/multithreading.cpp | 5 +- .../ov_infer_request/perf_counters.cpp | 5 +- .../behavior/ov_infer_request/wait.cpp | 5 +- .../functional/behavior/ov_plugin/remote.cpp | 3 +- .../tests/unit/parse_batch_device_test.cpp | 41 +++++++++------ .../tests/unit/parse_meta_device_test.cpp | 37 ++++++++++---- .../tests/unit/plugin_compile_model_test.cpp | 17 +++++-- .../tests/unit/plugin_query_model_test.cpp | 13 +++-- .../behavior/compiled_model/properties.cpp | 13 +++-- .../ov_executable_network/exec_net_base.cpp | 12 ++--- .../behavior/ov_infer_request/callback.cpp | 3 +- .../ov_infer_request/cancellation.cpp | 3 +- .../behavior/ov_infer_request/io_tensor.cpp | 10 ++-- .../ov_infer_request/multithreading.cpp | 3 +- .../ov_infer_request/perf_counters.cpp | 3 +- .../behavior/ov_infer_request/wait.cpp | 3 +- .../behavior/ov_plugin/remote.cpp | 10 ++-- .../ov_executable_network/properties.cpp | 9 ++-- .../ov_plugin/auto_batching_tests.hpp | 7 +-- 31 files changed, 210 insertions(+), 123 deletions(-) diff --git a/src/inference/src/dev/core_impl.cpp b/src/inference/src/dev/core_impl.cpp index 1787c576408892..68f8e9a750adeb 100644 --- a/src/inference/src/dev/core_impl.cpp +++ b/src/inference/src/dev/core_impl.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // @@ -57,13 +57,6 @@ void allowNotImplemented(F&& f) { } } -void stripDeviceName(std::string& device, const std::string& substr) { - auto pos = device.find(substr); - if (pos == 0) { - device.erase(pos, substr.length()); - } -} - bool is_virtual_device(const std::string& device_name) { return (device_name.find("AUTO") != std::string::npos || device_name.find("MULTI") != std::string::npos || device_name.find("HETERO") != std::string::npos || device_name.find("BATCH") != std::string::npos); @@ -539,7 +532,6 @@ ov::Plugin ov::CoreImpl::get_plugin(const std::string& pluginName) const { auto deviceName = pluginName; if (deviceName == ov::DEFAULT_DEVICE_NAME) deviceName = "AUTO"; - stripDeviceName(deviceName, "-"); std::map::const_iterator it; { // Global lock to find plugin. diff --git a/src/inference/src/dev/device_id_parser.cpp b/src/inference/src/dev/device_id_parser.cpp index 3909b5e7bf1c4e..8b6d90145c5c7c 100644 --- a/src/inference/src/dev/device_id_parser.cpp +++ b/src/inference/src/dev/device_id_parser.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // @@ -84,12 +84,6 @@ std::vector DeviceIDParser::get_multi_devices(const std::string& de } std::string DeviceIDParser::get_batch_device(const std::string& device) { - if (device.find(",") != std::string::npos) { - OPENVINO_THROW("BATCH accepts only one device in list but got '", device, "'"); - } - if (device.find("-") != std::string::npos) { - OPENVINO_THROW("Invalid device name '", device, "' for BATCH"); - } auto trim_request_info = [](const std::string& device_with_requests) { auto opening_bracket = device_with_requests.find_first_of('('); return device_with_requests.substr(0, opening_bracket); diff --git a/src/inference/tests/unit/core.cpp b/src/inference/tests/unit/core.cpp index 8dece743ab454a..f635d88bab1bf1 100644 --- a/src/inference/tests/unit/core.cpp +++ b/src/inference/tests/unit/core.cpp @@ -384,24 +384,6 @@ TEST(CoreTests_parse_device_config, get_device_config) { ov::device::properties(ov::AnyMap{{"MULTI", ov::AnyMap{ov::device::priorities("DEVICE")}}})}); } -TEST(CoreTests_parse_device_config, get_batch_device_name) { - EXPECT_STREQ(ov::DeviceIDParser::get_batch_device("CPU").c_str(), "CPU"); - EXPECT_STREQ(ov::DeviceIDParser::get_batch_device("GPU(4)").c_str(), "GPU"); - - OV_EXPECT_THROW(ov::DeviceIDParser::get_batch_device("-CPU"), - ov::Exception, - ::testing::HasSubstr("Invalid device name '-CPU' for BATCH")); - OV_EXPECT_THROW(ov::DeviceIDParser::get_batch_device("CPU(0)-"), - ov::Exception, - ::testing::HasSubstr("Invalid device name 'CPU(0)-' for BATCH")); - OV_EXPECT_THROW(ov::DeviceIDParser::get_batch_device("GPU(4),CPU"), - ov::Exception, - ::testing::HasSubstr("BATCH accepts only one device in list but got 'GPU(4),CPU'")); - OV_EXPECT_THROW(ov::DeviceIDParser::get_batch_device("CPU,GPU"), - ov::Exception, - ::testing::HasSubstr("BATCH accepts only one device in list but got 'CPU,GPU'")); -} - class ApplyAutoBatchThreading : public testing::Test { public: static void runParallel(std::function func, diff --git a/src/plugins/auto/src/plugin.cpp b/src/plugins/auto/src/plugin.cpp index 81d78e1a55982b..9ce7677e0fdc49 100644 --- a/src/plugins/auto/src/plugin.cpp +++ b/src/plugins/auto/src/plugin.cpp @@ -189,6 +189,9 @@ std::vector Plugin::parse_meta_devices(const std::string& pri auto opening_bracket = d.find_first_of('('); auto closing_bracket = d.find_first_of(')', opening_bracket); auto device_name = d.substr(0, opening_bracket); + if (closing_bracket != std::string::npos && closing_bracket < d.length() - 1) { + OPENVINO_THROW("Device list with \"", d, "\" name is illegal in the AUTO plugin."); + } int num_requests = -1; if (closing_bracket != std::string::npos && opening_bracket < closing_bracket) { diff --git a/src/plugins/auto/tests/unit/parse_meta_device_test.cpp b/src/plugins/auto/tests/unit/parse_meta_device_test.cpp index e34791cf70f7a0..0482b9ad14f585 100644 --- a/src/plugins/auto/tests/unit/parse_meta_device_test.cpp +++ b/src/plugins/auto/tests/unit/parse_meta_device_test.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // @@ -159,6 +159,8 @@ const std::vector testConfigs = { ConfigParams{"CPU(-1),GPU,OTHER", {}, true, 0}, ConfigParams{"CPU(NA),GPU,OTHER", {}, true, 0}, + ConfigParams{"CPU(4)a", {}, true, 0}, + ConfigParams{"CPU(4)a,GPU,OTHER", {}, true, 0}, ConfigParams{"INVALID_DEVICE", {}, false, 0}, ConfigParams{"INVALID_DEVICE,CPU", {{"CPU", {}, -1, "", "CPU_", 1}}, false, 2}, diff --git a/src/plugins/auto_batch/src/plugin.cpp b/src/plugins/auto_batch/src/plugin.cpp index 32bb6e7230de95..b50c9afa9c1649 100644 --- a/src/plugins/auto_batch/src/plugin.cpp +++ b/src/plugins/auto_batch/src/plugin.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // @@ -19,7 +19,9 @@ namespace ov { namespace autobatch_plugin { -std::vector supported_configKeys = {ov::device::priorities.name(), ov::auto_batch_timeout.name()}; +std::vector supported_configKeys = {ov::device::priorities.name(), + ov::auto_batch_timeout.name(), + ov::device::properties.name()}; inline ov::AnyMap merge_properties(ov::AnyMap config, const ov::AnyMap& user_config) { for (auto&& kvp : user_config) { @@ -29,6 +31,9 @@ inline ov::AnyMap merge_properties(ov::AnyMap config, const ov::AnyMap& user_con } DeviceInformation Plugin::parse_batch_device(const std::string& device_with_batch) { + if (device_with_batch.find("-") != std::string::npos) { + OPENVINO_THROW("Invalide Batch device name ", device_with_batch); + } auto openingBracket = device_with_batch.find_first_of('('); auto closingBracket = device_with_batch.find_first_of(')', openingBracket); auto deviceName = device_with_batch.substr(0, openingBracket); @@ -44,9 +49,48 @@ DeviceInformation Plugin::parse_batch_device(const std::string& device_with_batc return {std::move(deviceName), {{}}, static_cast(batch)}; } +uint32_t Plugin::parse_batch_size(const std::string& device_name, const ov::AnyMap& properties) { + uint32_t num_requests = 0; + // Parse batch_size from ov::device::properties + auto item = properties.find(ov::device::properties.name()); + if (item != properties.end()) { + ov::AnyMap devices_properties = item->second.as(); + auto it = devices_properties.find(device_name); + + if (it != devices_properties.end()) { + auto props = it->second.as(); + if (props.find(ov::hint::num_requests.name()) != props.end()) { + try { + num_requests = props.at(ov::hint::num_requests.name()).as(); + if ((num_requests == 0) || num_requests == (uint32_t)-1) { + OPENVINO_THROW("BATCH can got valid num_request: ", + props.at(ov::hint::num_requests.name()).as()); + } + } catch (...) { + OPENVINO_THROW("BATCH can got valid num_request: ", + props.at(ov::hint::num_requests.name()).as()); + } + } + } + } + return num_requests; +} + DeviceInformation Plugin::parse_meta_device(const std::string& devices_batch_config, const ov::AnyMap& user_config) const { + if (devices_batch_config.find(",") != std::string::npos) { + OPENVINO_THROW("BATCH accepts only one device in list but got '", devices_batch_config, "'"); + } + // Batch_size will be got from ov::device::properties, while devices_name_with_batch_config will be deprecated + // after 25.0. + // For example: + // DeviceName = "BATCH:GPU", ov::device::properties("GPU",ov::hint::num_requests(8)), + // while similar DeviceName = "BATCH:GPU(8)" will be deprecated. auto meta_device = parse_batch_device(devices_batch_config); + auto batch_size = parse_batch_size(meta_device.device_name, user_config); + if (batch_size > 0) { + meta_device.device_batch_size = batch_size; + } meta_device.device_config = get_core()->get_supported_property(meta_device.device_name, user_config); // check that no irrelevant config-keys left for (const auto& k : user_config) { @@ -130,7 +174,7 @@ std::shared_ptr Plugin::compile_model(const std::shared_ptr< auto full_properties = merge_properties(m_plugin_config, properties); auto device_batch = full_properties.find(ov::device::priorities.name()); if (device_batch == full_properties.end()) { - OPENVINO_THROW("ov::device::priorities key for AUTO NATCH is not set for BATCH device"); + OPENVINO_THROW("ov::device::priorities key for AUTO BATCH is not set for BATCH device"); } auto meta_device = parse_meta_device(device_batch->second.as(), properties); diff --git a/src/plugins/auto_batch/src/plugin.hpp b/src/plugins/auto_batch/src/plugin.hpp index 9ca950bcc624af..8af69cf1b7d586 100644 --- a/src/plugins/auto_batch/src/plugin.hpp +++ b/src/plugins/auto_batch/src/plugin.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // @@ -64,6 +64,8 @@ class Plugin : public ov::IPlugin { static DeviceInformation parse_batch_device(const std::string& device_with_batch); + static uint32_t parse_batch_size(const std::string& device_name, const ov::AnyMap& properties); + private: mutable ov::AnyMap m_plugin_config; }; diff --git a/src/plugins/auto_batch/tests/functional/behavior/ov_executable_network/exec_net_base.cpp b/src/plugins/auto_batch/tests/functional/behavior/ov_executable_network/exec_net_base.cpp index 529c6dfcfb19f3..eeedf91f132b86 100644 --- a/src/plugins/auto_batch/tests/functional/behavior/ov_executable_network/exec_net_base.cpp +++ b/src/plugins/auto_batch/tests/functional/behavior/ov_executable_network/exec_net_base.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // @@ -9,9 +9,11 @@ namespace { auto autoBatchConfigs = []() { return std::vector{ // explicit batch size 4 to avoid fallback to no auto-batching - {{ov::device::priorities.name(), std::string(ov::test::utils::DEVICE_TEMPLATE) + "(4)"}, - // no timeout to avoid increasing the test time - {ov::auto_batch_timeout.name(), "0"}}}; + {ov::device::priorities(ov::test::utils::DEVICE_TEMPLATE)}, + {ov::device::priorities(ov::test::utils::DEVICE_TEMPLATE), + ov::device::properties(ov::test::utils::DEVICE_TEMPLATE, ov::hint::num_requests(4))}, + // no timeout to avoid increasing the test time + {ov::device::priorities(ov::test::utils::DEVICE_TEMPLATE), ov::auto_batch_timeout(0)}}; }; INSTANTIATE_TEST_SUITE_P(smoke_AutoBatch_BehaviorTests, OVCompiledModelBaseTest, diff --git a/src/plugins/auto_batch/tests/functional/behavior/ov_executable_network/properties.cpp b/src/plugins/auto_batch/tests/functional/behavior/ov_executable_network/properties.cpp index 68191c22097633..d8b63cd9cb5c9e 100644 --- a/src/plugins/auto_batch/tests/functional/behavior/ov_executable_network/properties.cpp +++ b/src/plugins/auto_batch/tests/functional/behavior/ov_executable_network/properties.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // @@ -20,10 +20,13 @@ INSTANTIATE_TEST_SUITE_P(smoke_AutoBatch_BehaviorTests, OVClassCompiledModelPropertiesIncorrectTests::getTestCaseName); const std::vector auto_batch_properties = { - {{ov::device::priorities.name(), std::string(ov::test::utils::DEVICE_TEMPLATE) + "(4)"}}, - {{ov::device::priorities.name(), std::string(ov::test::utils::DEVICE_TEMPLATE) + "(4)"}, + {{ov::device::priorities.name(), std::string(ov::test::utils::DEVICE_TEMPLATE)}, + {ov::device::properties(ov::test::utils::DEVICE_TEMPLATE, ov::hint::num_requests(4))}}, + {{ov::device::priorities.name(), std::string(ov::test::utils::DEVICE_TEMPLATE)}, + {ov::device::properties(ov::test::utils::DEVICE_TEMPLATE, ov::hint::num_requests(4))}, {ov::auto_batch_timeout(1)}}, - {{ov::device::priorities.name(), std::string(ov::test::utils::DEVICE_TEMPLATE) + "(4)"}, + {{ov::device::priorities.name(), std::string(ov::test::utils::DEVICE_TEMPLATE)}, + {ov::device::properties(ov::test::utils::DEVICE_TEMPLATE, ov::hint::num_requests(4))}, {ov::auto_batch_timeout(10)}}, }; diff --git a/src/plugins/auto_batch/tests/functional/behavior/ov_infer_request/callback.cpp b/src/plugins/auto_batch/tests/functional/behavior/ov_infer_request/callback.cpp index a3f1b5bb988465..80cd1fac4b4654 100644 --- a/src/plugins/auto_batch/tests/functional/behavior/ov_infer_request/callback.cpp +++ b/src/plugins/auto_batch/tests/functional/behavior/ov_infer_request/callback.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // @@ -10,7 +10,8 @@ namespace { auto autoBatchConfigs = []() { return std::vector{ // explicit batch size 4 to avoid fallback to no auto-batching - {{ov::device::priorities.name(), std::string(ov::test::utils::DEVICE_TEMPLATE) + "(4)"}, + {{ov::device::priorities.name(), std::string(ov::test::utils::DEVICE_TEMPLATE)}, + {ov::device::properties(ov::test::utils::DEVICE_TEMPLATE, ov::hint::num_requests(4))}, // no timeout to avoid increasing the test time {ov::auto_batch_timeout(0)}}}; }; diff --git a/src/plugins/auto_batch/tests/functional/behavior/ov_infer_request/cancellation.cpp b/src/plugins/auto_batch/tests/functional/behavior/ov_infer_request/cancellation.cpp index c9db7f7984721a..293cdf3b20d1c1 100644 --- a/src/plugins/auto_batch/tests/functional/behavior/ov_infer_request/cancellation.cpp +++ b/src/plugins/auto_batch/tests/functional/behavior/ov_infer_request/cancellation.cpp @@ -10,7 +10,8 @@ namespace { auto autoBatchConfigs = []() { return std::vector{ // explicit batch size 4 to avoid fallback to no auto-batching - {{ov::device::priorities.name(), std::string(ov::test::utils::DEVICE_TEMPLATE) + "(4)"}, + {{ov::device::priorities.name(), std::string(ov::test::utils::DEVICE_TEMPLATE)}, + {ov::device::properties(ov::test::utils::DEVICE_TEMPLATE, ov::hint::num_requests(4))}, // no timeout to avoid increasing the test time {ov::auto_batch_timeout(0)}}}; }; diff --git a/src/plugins/auto_batch/tests/functional/behavior/ov_infer_request/io_tensor.cpp b/src/plugins/auto_batch/tests/functional/behavior/ov_infer_request/io_tensor.cpp index 112de6230ea29c..eff11011a957e3 100644 --- a/src/plugins/auto_batch/tests/functional/behavior/ov_infer_request/io_tensor.cpp +++ b/src/plugins/auto_batch/tests/functional/behavior/ov_infer_request/io_tensor.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // @@ -10,7 +10,8 @@ namespace { auto AutoBatchConfigs = []() { return std::vector{ // explicit batch size 4 to avoid fallback to no auto-batching - {{ov::device::priorities.name(), std::string(ov::test::utils::DEVICE_TEMPLATE) + "(4)"}, + {{ov::device::priorities.name(), std::string(ov::test::utils::DEVICE_TEMPLATE)}, + {ov::device::properties(ov::test::utils::DEVICE_TEMPLATE, ov::hint::num_requests(4))}, // no timeout to avoid increasing the test time {ov::auto_batch_timeout(0)}}}; }; diff --git a/src/plugins/auto_batch/tests/functional/behavior/ov_infer_request/multithreading.cpp b/src/plugins/auto_batch/tests/functional/behavior/ov_infer_request/multithreading.cpp index 8798cfddeab1ad..3ca3921d645795 100644 --- a/src/plugins/auto_batch/tests/functional/behavior/ov_infer_request/multithreading.cpp +++ b/src/plugins/auto_batch/tests/functional/behavior/ov_infer_request/multithreading.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // @@ -10,7 +10,8 @@ namespace { auto AutoBatchConfigs = []() { return std::vector{ // explicit batch size 4 to avoid fallback to no auto-batching - {{ov::device::priorities.name(), std::string(ov::test::utils::DEVICE_TEMPLATE) + "(4)"}, + {{ov::device::priorities.name(), std::string(ov::test::utils::DEVICE_TEMPLATE)}, + {ov::device::properties(ov::test::utils::DEVICE_TEMPLATE, ov::hint::num_requests(4))}, // no timeout to avoid increasing the test time {ov::auto_batch_timeout(0)}}}; }; diff --git a/src/plugins/auto_batch/tests/functional/behavior/ov_infer_request/perf_counters.cpp b/src/plugins/auto_batch/tests/functional/behavior/ov_infer_request/perf_counters.cpp index aeb5d51d512a77..53b4eca3a074dd 100644 --- a/src/plugins/auto_batch/tests/functional/behavior/ov_infer_request/perf_counters.cpp +++ b/src/plugins/auto_batch/tests/functional/behavior/ov_infer_request/perf_counters.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // @@ -10,7 +10,8 @@ namespace { auto AutoBatchConfigs = []() { return std::vector{ // explicit batch size 4 to avoid fallback to no auto-batching - {{ov::device::priorities.name(), std::string(ov::test::utils::DEVICE_TEMPLATE) + "(4)"}, + {{ov::device::priorities.name(), std::string(ov::test::utils::DEVICE_TEMPLATE)}, + {ov::device::properties(ov::test::utils::DEVICE_TEMPLATE, ov::hint::num_requests(4))}, // no timeout to avoid increasing the test time {ov::auto_batch_timeout(0)}}}; }; diff --git a/src/plugins/auto_batch/tests/functional/behavior/ov_infer_request/wait.cpp b/src/plugins/auto_batch/tests/functional/behavior/ov_infer_request/wait.cpp index 9d10993ba30a4f..321debc2f21e65 100644 --- a/src/plugins/auto_batch/tests/functional/behavior/ov_infer_request/wait.cpp +++ b/src/plugins/auto_batch/tests/functional/behavior/ov_infer_request/wait.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // @@ -10,7 +10,8 @@ namespace { auto AutoBatchConfigs = []() { return std::vector{ // explicit batch size 4 to avoid fallback to no auto-batching - {{ov::device::priorities.name(), std::string(ov::test::utils::DEVICE_TEMPLATE) + "(4)"}, + {{ov::device::priorities.name(), std::string(ov::test::utils::DEVICE_TEMPLATE)}, + {ov::device::properties(ov::test::utils::DEVICE_TEMPLATE, ov::hint::num_requests(4))}, // no timeout to avoid increasing the test time {ov::auto_batch_timeout(0)}}}; }; diff --git a/src/plugins/auto_batch/tests/functional/behavior/ov_plugin/remote.cpp b/src/plugins/auto_batch/tests/functional/behavior/ov_plugin/remote.cpp index e7bd0a5383262f..6512c974d277c3 100644 --- a/src/plugins/auto_batch/tests/functional/behavior/ov_plugin/remote.cpp +++ b/src/plugins/auto_batch/tests/functional/behavior/ov_plugin/remote.cpp @@ -13,7 +13,8 @@ std::vector> generate_remote_params() { auto AutoBatchConfigs = []() { return std::vector{ // explicit batch size 4 to avoid fallback to no auto-batching - {{ov::device::priorities.name(), std::string(ov::test::utils::DEVICE_TEMPLATE) + "(4)"}, + {{ov::device::priorities.name(), std::string(ov::test::utils::DEVICE_TEMPLATE)}, + {ov::device::properties(ov::test::utils::DEVICE_TEMPLATE, ov::hint::num_requests(4))}, // no timeout to avoid increasing the test time ov::auto_batch_timeout(0)}}; }; diff --git a/src/plugins/auto_batch/tests/unit/parse_batch_device_test.cpp b/src/plugins/auto_batch/tests/unit/parse_batch_device_test.cpp index b6696f8d08c4f8..85f25b668cbf41 100644 --- a/src/plugins/auto_batch/tests/unit/parse_batch_device_test.cpp +++ b/src/plugins/auto_batch/tests/unit/parse_batch_device_test.cpp @@ -1,31 +1,35 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // #include "mock_common.hpp" using batch_device_config_params = std::tuple; class ParseBatchDeviceTest : public ::testing::TestWithParam { public: - std::string m_batch_device_config; std::string m_device_name; + ov::AnyMap properties; int m_batch_size; bool m_throw_exception; std::shared_ptr> m_plugin; public: static std::string getTestCaseName(testing::TestParamInfo obj) { - std::string batch_device_config; + ov::AnyMap properties; std::string device_name; int batch_size; bool throw_exception; - std::tie(batch_device_config, device_name, batch_size, throw_exception) = obj.param; - std::string res = batch_device_config; + std::tie(device_name, properties, batch_size, throw_exception) = obj.param; + std::string res = device_name; + for (const auto& it : properties) { + res += "_" + it.first + "_" + it.second.as(); + } + res += "_" + std::to_string(batch_size); if (throw_exception) res += "_throw"; return res; @@ -36,7 +40,7 @@ class ParseBatchDeviceTest : public ::testing::TestWithParamGetParam(); + std::tie(m_device_name, properties, m_batch_size, m_throw_exception) = this->GetParam(); m_plugin = std::shared_ptr>(new NiceMock()); } @@ -44,19 +48,24 @@ class ParseBatchDeviceTest : public ::testing::TestWithParamparse_batch_device(m_batch_device_config)); + ASSERT_ANY_THROW(m_plugin->parse_batch_size(m_device_name, properties)); } else { - auto result = m_plugin->parse_batch_device(m_batch_device_config); - EXPECT_EQ(result.device_name, m_device_name); - EXPECT_EQ(result.device_batch_size, m_batch_size); + auto result = m_plugin->parse_batch_size(m_device_name, properties); + EXPECT_EQ(result, m_batch_size); } } -const std::vector batch_device_test_configs = { - batch_device_config_params{"CPU(4)", "CPU", 4, false}, - batch_device_config_params{"GPU(8)", "GPU", 8, false}, - batch_device_config_params{"CPU(0)", "CPU", 0, true}, - batch_device_config_params{"GPU(-1)", "GPU", 0, true}, +auto DeviceProperties = [](const std::string& device_name, const uint32_t batch_size) { + auto prop = ov::AnyMap({{device_name, ov::AnyMap({ov::hint::num_requests(batch_size)})}}); + return ov::AnyMap({{ov::device::properties.name(), prop}}); +}; + +const std::vector + batch_device_test_configs = { + batch_device_config_params{"CPU", DeviceProperties("CPU", 4), 4, false}, + batch_device_config_params{"CPU", DeviceProperties("CPU", -1), -1, true}, + batch_device_config_params{"CPU", DeviceProperties("CPU", 0), 0, true}, + batch_device_config_params{"CPU", {}, 0, false}, }; INSTANTIATE_TEST_SUITE_P(smoke_AutoBatch_BehaviorTests, diff --git a/src/plugins/auto_batch/tests/unit/parse_meta_device_test.cpp b/src/plugins/auto_batch/tests/unit/parse_meta_device_test.cpp index 0ac4a51d5a4efe..939310cfa49c7b 100644 --- a/src/plugins/auto_batch/tests/unit/parse_meta_device_test.cpp +++ b/src/plugins/auto_batch/tests/unit/parse_meta_device_test.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // @@ -6,7 +6,7 @@ #include "unit_test_utils/mocks/openvino/runtime/mock_icore.hpp" using meta_device_params = std::tuple; // Throw exception @@ -105,20 +105,37 @@ TEST_P(ParseMetaDeviceTest, ParseMetaDeviceTestCase) { } } +auto DeviceProperties = [](const std::string& device_name, const uint32_t batch_size) { + return ov::AnyMap({{device_name, ov::AnyMap({ov::hint::num_requests(batch_size)})}}); +}; + const std::vector meta_device_test_configs = { - meta_device_params{"CPU(4)", {}, DeviceInformation{"CPU", {}, 4}, false}, - meta_device_params{"CPU(4)", {{}}, DeviceInformation{"CPU", {{}}, 4}, true}, - meta_device_params{"CPU(4)", {{ov::cache_dir("./")}}, DeviceInformation{"CPU", {{ov::cache_dir("./")}}, 4}, false}, - meta_device_params{"GPU(4)", {{ov::cache_dir("./")}}, DeviceInformation{"GPU", {{ov::cache_dir("./")}}, 4}, false}, - meta_device_params{"GPU(8)", - {{ov::cache_dir("./")}, {ov::optimal_batch_size.name(), "16"}}, + meta_device_params{"CPU", {}, DeviceInformation{"CPU", {}, 0}, false}, + meta_device_params{"CPU", + {{ov::cache_dir("./")}, {ov::device::properties.name(), DeviceProperties("CPU", 4)}}, + DeviceInformation{"CPU", {{ov::cache_dir("./")}}, 4}, + false}, + meta_device_params{"GPU", + {{ov::cache_dir("./")}, {ov::device::properties.name(), DeviceProperties("GPU", 4)}}, + DeviceInformation{"GPU", {{ov::cache_dir("./")}}, 4}, + false}, + meta_device_params{"GPU", + {{ov::cache_dir("./")}, + {ov::optimal_batch_size.name(), "16"}, + {ov::device::properties.name(), DeviceProperties("GPU", 8)}}, DeviceInformation{"GPU", {{ov::cache_dir("./")}, {ov::optimal_batch_size.name(), "16"}}, 8}, false}, - meta_device_params{"CPU(4)", {{ov::optimal_batch_size.name(), "16"}}, DeviceInformation{"CPU", {{}}, 4}, true}, - meta_device_params{"CPU(4)", + meta_device_params{ + "CPU", + {{ov::optimal_batch_size.name(), "16"}, {ov::device::properties.name(), DeviceProperties("CPU", 4)}}, + DeviceInformation{"CPU", {{}}, 4}, + true}, + meta_device_params{"CPU", {{ov::cache_dir("./")}, {ov::optimal_batch_size.name(), "16"}}, DeviceInformation{"CPU", {{ov::cache_dir("./")}}, 4}, true}, + meta_device_params{"-CPU", {}, DeviceInformation{"CPU", {}, 0}, true}, + meta_device_params{"CPU,GPU", {}, DeviceInformation{"CPU", {}, 0}, true}, }; INSTANTIATE_TEST_SUITE_P(smoke_AutoBatch_BehaviorTests, diff --git a/src/plugins/auto_batch/tests/unit/plugin_compile_model_test.cpp b/src/plugins/auto_batch/tests/unit/plugin_compile_model_test.cpp index 9235bd62f73114..133cdfe02d4b99 100644 --- a/src/plugins/auto_batch/tests/unit/plugin_compile_model_test.cpp +++ b/src/plugins/auto_batch/tests/unit/plugin_compile_model_test.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // @@ -129,6 +129,9 @@ TEST_P(PluginCompileModelTest, PluginCompileModelBatchedModelWithRemoteContextTe ASSERT_NO_THROW(m_plugin->compile_model(m_model, m_plugin_properities, m_remote_context)); } +auto DeviceProperties = [](const std::string& device_name, const uint32_t batch_size) { + return ov::AnyMap({{device_name, ov::AnyMap({ov::hint::num_requests(batch_size)})}}); +}; const std::vector plugin_compile_model_param_test = { // Case 1: explict apply batch size by config of AUTO_BATCH_DEVICE_CONFIG plugin_compile_model_param{{{ov::hint::performance_mode.name(), ov::hint::PerformanceMode::THROUGHPUT}, @@ -136,14 +139,18 @@ const std::vector plugin_compile_model_param_test = {ov::hint::num_requests(12)}, {ov::intel_gpu::memory_statistics.name(), static_cast(1024000)}, {ov::intel_gpu::device_total_mem_size.name(), static_cast(4096000000)}}, - {{ov::auto_batch_timeout(static_cast(200))}, {ov::device::priorities("CPU(32)")}}, + {{ov::auto_batch_timeout(static_cast(200))}, + {ov::device::priorities("CPU")}, + {ov::device::properties.name(), DeviceProperties("CPU", 32)}}, 32}, plugin_compile_model_param{{{ov::hint::performance_mode.name(), ov::hint::PerformanceMode::THROUGHPUT}, {ov::optimal_batch_size.name(), static_cast(16)}, {ov::hint::num_requests(12)}, {ov::intel_gpu::memory_statistics.name(), static_cast(1024000)}, {ov::intel_gpu::device_total_mem_size.name(), static_cast(4096000000)}}, - {{ov::auto_batch_timeout(static_cast(200))}, {ov::device::priorities("GPU(32)")}}, + {{ov::auto_batch_timeout(static_cast(200))}, + {ov::device::priorities("GPU")}, + {ov::device::properties.name(), DeviceProperties("GPU", 32)}}, 32}, // Case 2: CPU batch size is figured out by min of opt_batch_size and infReq_num // If config contains "PERFORMANCE_HINT_NUM_REQUESTS" @@ -206,7 +213,9 @@ const std::vector plugin_compile_model_param_test = {ov::hint::num_requests(12)}, {ov::intel_gpu::memory_statistics.name(), static_cast(1024000)}, {ov::intel_gpu::device_total_mem_size.name(), static_cast(4096000000)}}, - {{ov::auto_batch_timeout(static_cast(200))}, {ov::device::priorities("CPU(32)")}}, + {{ov::auto_batch_timeout(static_cast(200))}, + {ov::device::priorities("CPU")}, + {ov::device::properties.name(), DeviceProperties("CPU", 32)}}, 32}, }; diff --git a/src/plugins/auto_batch/tests/unit/plugin_query_model_test.cpp b/src/plugins/auto_batch/tests/unit/plugin_query_model_test.cpp index 33a006f51a024e..9b22bf553ade60 100644 --- a/src/plugins/auto_batch/tests/unit/plugin_query_model_test.cpp +++ b/src/plugins/auto_batch/tests/unit/plugin_query_model_test.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // @@ -62,11 +62,18 @@ TEST_P(QueryModelTest, QueryModelTestCase) { } } +auto DeviceProperties = [](const std::string& device_name, const uint32_t batch_size) { + return ov::AnyMap({{device_name, ov::AnyMap({ov::hint::num_requests(batch_size)})}}); +}; const std::vector query_model_params_test = { query_model_params{{{}}, true}, query_model_params{{{ov::auto_batch_timeout(static_cast(200))}}, true}, - query_model_params{{{ov::device::priorities("CPU(4)")}}, false}, - query_model_params{{{ov::auto_batch_timeout(static_cast(200))}, {ov::device::priorities("CPU(4)")}}, false}, + query_model_params{{{ov::device::priorities("CPU")}, {ov::device::properties.name(), DeviceProperties("CPU", 4)}}, + false}, + query_model_params{{{ov::auto_batch_timeout(static_cast(200))}, + {ov::device::priorities("CPU")}, + {ov::device::properties.name(), DeviceProperties("CPU", 4)}}, + false}, }; INSTANTIATE_TEST_SUITE_P(smoke_AutoBatch_BehaviorTests, diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/compiled_model/properties.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/compiled_model/properties.cpp index a2f9559cf1a958..fb0f2ac20abb7d 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/compiled_model/properties.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/compiled_model/properties.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // @@ -81,9 +81,14 @@ const std::vector hetero_properties = { }; const std::vector auto_batch_properties = { - {ov::device::priorities(std::string(ov::test::utils::DEVICE_CPU) + "(4)")}, - {ov::device::priorities(std::string(ov::test::utils::DEVICE_CPU) + "(4)"), ov::auto_batch_timeout(1)}, - {ov::device::priorities(std::string(ov::test::utils::DEVICE_CPU) + "(4)"), ov::auto_batch_timeout(10)}, + {ov::device::priorities(ov::test::utils::DEVICE_CPU), + ov::device::properties(ov::test::utils::DEVICE_CPU, ov::hint::num_requests(4))}, + {ov::device::priorities(ov::test::utils::DEVICE_CPU), + ov::device::properties(ov::test::utils::DEVICE_CPU, ov::hint::num_requests(4)), + ov::auto_batch_timeout(1)}, + {ov::device::priorities(ov::test::utils::DEVICE_CPU), + ov::device::properties(ov::test::utils::DEVICE_CPU, ov::hint::num_requests(4)), + ov::auto_batch_timeout(10)}, }; INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_executable_network/exec_net_base.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_executable_network/exec_net_base.cpp index 3750255faedc06..dcf79b3762699c 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_executable_network/exec_net_base.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_executable_network/exec_net_base.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // @@ -13,11 +13,11 @@ auto configs = []() { }; auto autoBatchConfigs = []() { - return std::vector{ - // explicit batch size 4 to avoid fallback to no auto-batching (i.e. plain GPU) - {{ov::device::priorities.name(), std::string(ov::test::utils::DEVICE_GPU) + "(4)"}, - // no timeout to avoid increasing the test time - ov::auto_batch_timeout(0)}}; + return std::vector{// explicit batch size 4 to avoid fallback to no auto-batching (i.e. plain GPU) + {{ov::device::priorities.name(), ov::test::utils::DEVICE_GPU}, + ov::device::properties(ov::test::utils::DEVICE_GPU, ov::hint::num_requests(4)), + // no timeout to avoid increasing the test time + ov::auto_batch_timeout(0)}}; }; INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, OVCompiledModelBaseTest, diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/callback.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/callback.cpp index 08a9803679eb17..a77b552115564c 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/callback.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/callback.cpp @@ -19,7 +19,8 @@ auto configs = []() { auto autoBatchConfigs = []() { return std::vector{// explicit batch size 4 to avoid fallback to no auto-batching (i.e. plain GPU) - {ov::device::priorities(std::string(ov::test::utils::DEVICE_GPU) + "(4)"), + {ov::device::priorities(ov::test::utils::DEVICE_GPU), + ov::device::properties(ov::test::utils::DEVICE_GPU, ov::hint::num_requests(4)), // no timeout to avoid increasing the test time ov::auto_batch_timeout(0)}}; }; diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/cancellation.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/cancellation.cpp index bf10bfdd3075c0..346684d43d1117 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/cancellation.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/cancellation.cpp @@ -15,7 +15,8 @@ auto configs = []() { auto autoBatchConfigs = []() { return std::vector{// explicit batch size 4 to avoid fallback to no auto-batching (i.e. plain GPU) - {ov::device::priorities(std::string(ov::test::utils::DEVICE_GPU) + "(4)"), + {ov::device::priorities(ov::test::utils::DEVICE_GPU), + ov::device::properties(ov::test::utils::DEVICE_GPU, ov::hint::num_requests(4)), // no timeout to avoid increasing the test time ov::auto_batch_timeout(0)}}; }; diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/io_tensor.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/io_tensor.cpp index 90040991dd366b..3ea61792dcae95 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/io_tensor.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/io_tensor.cpp @@ -23,11 +23,11 @@ auto configs = []() { }; auto AutoBatchConfigs = []() { - return std::vector{ - // explicit batch size 4 to avoid fallback to no auto-batching (i.e. plain GPU) - {{ov::device::priorities.name(), std::string(ov::test::utils::DEVICE_GPU) + "(4)"}, - // no timeout to avoid increasing the test time - ov::auto_batch_timeout(0)}}; + return std::vector{// explicit batch size 4 to avoid fallback to no auto-batching (i.e. plain GPU) + {{ov::device::priorities.name(), ov::test::utils::DEVICE_GPU}, + ov::device::properties(ov::test::utils::DEVICE_GPU, ov::hint::num_requests(4)), + // no timeout to avoid increasing the test time + ov::auto_batch_timeout(0)}}; }; INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, OVInferRequestIOTensorTest, diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/multithreading.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/multithreading.cpp index 906f04bd89fe85..70e50daa9862a8 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/multithreading.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/multithreading.cpp @@ -19,7 +19,8 @@ auto configs = []() { auto AutoBatchConfigs = []() { return std::vector{// explicit batch size 4 to avoid fallback to no auto-batching (i.e. plain GPU) - {ov::device::priorities(std::string(ov::test::utils::DEVICE_GPU) + "(4)"), + {ov::device::priorities(ov::test::utils::DEVICE_GPU), + ov::device::properties(ov::test::utils::DEVICE_GPU, ov::hint::num_requests(4)), // no timeout to avoid increasing the test time ov::auto_batch_timeout(0)}}; }; diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/perf_counters.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/perf_counters.cpp index d0af060b80ec94..3edc0c04861b94 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/perf_counters.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/perf_counters.cpp @@ -13,7 +13,8 @@ auto configs = []() { auto AutoBatchConfigs = []() { return std::vector{// explicit batch size 4 to avoid fallback to no auto-batching (i.e. plain GPU) - {ov::device::priorities(std::string(ov::test::utils::DEVICE_GPU) + "(4)"), + {ov::device::priorities(ov::test::utils::DEVICE_GPU), + ov::device::properties(ov::test::utils::DEVICE_GPU, ov::hint::num_requests(4)), // no timeout to avoid increasing the test time ov::auto_batch_timeout(0)}}; }; diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/wait.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/wait.cpp index 93bbb7804c389e..c26694d152e1f3 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/wait.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/wait.cpp @@ -19,7 +19,8 @@ auto configs = []() { auto AutoBatchConfigs = []() { return std::vector{// explicit batch size 4 to avoid fallback to no auto-batching (i.e. plain GPU) - {ov::device::priorities(std::string(ov::test::utils::DEVICE_GPU) + "(4)"), + {ov::device::priorities(ov::test::utils::DEVICE_GPU), + ov::device::properties(ov::test::utils::DEVICE_GPU, ov::hint::num_requests(4)), // no timeout to avoid increasing the test time ov::auto_batch_timeout(0)}}; }; diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_plugin/remote.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_plugin/remote.cpp index 01f02827cab7e3..da954d2223f081 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_plugin/remote.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_plugin/remote.cpp @@ -16,11 +16,11 @@ std::vector> generate_remote_params() { } auto AutoBatchConfigs = []() { - return std::vector{ - // explicit batch size 4 to avoid fallback to no auto-batching (i.e. plain GPU) - {{ov::device::priorities.name(), std::string(ov::test::utils::DEVICE_GPU) + "(4)"}, - // no timeout to avoid increasing the test time - {ov::auto_batch_timeout.name(), "0 "}}}; + return std::vector{// explicit batch size 4 to avoid fallback to no auto-batching (i.e. plain GPU) + {{ov::device::priorities.name(), ov::test::utils::DEVICE_GPU}, + ov::device::properties(ov::test::utils::DEVICE_GPU, ov::hint::num_requests(4)), + // no timeout to avoid increasing the test time + {ov::auto_batch_timeout.name(), "0 "}}}; }; INSTANTIATE_TEST_SUITE_P(DISABLED_smoke_BehaviorTests, OVRemoteTest, diff --git a/src/plugins/template/tests/functional/shared_tests_instances/behavior/ov_executable_network/properties.cpp b/src/plugins/template/tests/functional/shared_tests_instances/behavior/ov_executable_network/properties.cpp index 4f5ba4f1baf4a2..cd57bf15f983fe 100644 --- a/src/plugins/template/tests/functional/shared_tests_instances/behavior/ov_executable_network/properties.cpp +++ b/src/plugins/template/tests/functional/shared_tests_instances/behavior/ov_executable_network/properties.cpp @@ -66,10 +66,13 @@ const std::vector multi_properties = { }; const std::vector auto_batch_properties = { - {{ov::device::priorities.name(), std::string(ov::test::utils::DEVICE_TEMPLATE) + "(4)"}}, - {{ov::device::priorities.name(), std::string(ov::test::utils::DEVICE_TEMPLATE) + "(4)"}, + {{ov::device::priorities.name(), ov::test::utils::DEVICE_TEMPLATE}, + ov::device::properties(ov::test::utils::DEVICE_TEMPLATE, ov::hint::num_requests(4))}, + {{ov::device::priorities.name(), ov::test::utils::DEVICE_TEMPLATE}, + ov::device::properties(ov::test::utils::DEVICE_TEMPLATE, ov::hint::num_requests(4)), {ov::auto_batch_timeout(1)}}, - {{ov::device::priorities.name(), std::string(ov::test::utils::DEVICE_TEMPLATE) + "(4)"}, + {{ov::device::priorities.name(), ov::test::utils::DEVICE_TEMPLATE}, + ov::device::properties(ov::test::utils::DEVICE_TEMPLATE, ov::hint::num_requests(4)), {ov::auto_batch_timeout(10)}}, }; diff --git a/src/tests/functional/plugin/shared/include/behavior/ov_plugin/auto_batching_tests.hpp b/src/tests/functional/plugin/shared/include/behavior/ov_plugin/auto_batching_tests.hpp index 6d1288adbebeb6..93bcce96fff7cc 100644 --- a/src/tests/functional/plugin/shared/include/behavior/ov_plugin/auto_batching_tests.hpp +++ b/src/tests/functional/plugin/shared/include/behavior/ov_plugin/auto_batching_tests.hpp @@ -75,9 +75,10 @@ class AutoBatching_Test : public OVPluginTestBase, // minimize timeout to reduce test time config.insert(ov::auto_batch_timeout(1)); - auto compiled_model = core->compile_model(model, std::string(ov::test::utils::DEVICE_BATCH) + ":" + - target_device + "(" + std::to_string(num_batch) + ")", - config); + // set batch size + config.insert(ov::device::properties(target_device, ov::hint::num_requests(num_batch))); + auto compiled_model = + core->compile_model(model, std::string(ov::test::utils::DEVICE_BATCH) + ":" + target_device, config); auto network_outputs = model->outputs(); ASSERT_EQ(network_outputs.size(), 1) << " Auto-Batching tests use networks with single output";