From 38877db640dab6cbed62eb92da56b70d63543cbc Mon Sep 17 00:00:00 2001 From: Yuan Hu Date: Wed, 10 Jan 2024 13:51:50 +0800 Subject: [PATCH 01/28] [CPU][apiConformance] Support LOG_LEVEL property (#22007) --- src/plugins/intel_cpu/src/compiled_model.cpp | 3 ++ src/plugins/intel_cpu/src/config.cpp | 10 +++++ src/plugins/intel_cpu/src/config.h | 1 + src/plugins/intel_cpu/src/plugin.cpp | 3 ++ .../ov_executable_network/properties.cpp | 42 +++++++++++++++++++ .../behavior/ov_plugin/properties.cpp | 34 +++++++++++++++ 6 files changed, 93 insertions(+) diff --git a/src/plugins/intel_cpu/src/compiled_model.cpp b/src/plugins/intel_cpu/src/compiled_model.cpp index 4da654fc0504b2..f45f1e2a2b70b3 100644 --- a/src/plugins/intel_cpu/src/compiled_model.cpp +++ b/src/plugins/intel_cpu/src/compiled_model.cpp @@ -236,6 +236,7 @@ ov::Any CompiledModel::get_property(const std::string& name) const { RO_property(ov::hint::enable_hyper_threading.name()), RO_property(ov::execution_devices.name()), RO_property(ov::intel_cpu::denormals_optimization.name()), + RO_property(ov::log::level.name()), RO_property(ov::intel_cpu::sparse_weights_decompression_rate.name()), }; } @@ -275,6 +276,8 @@ ov::Any CompiledModel::get_property(const std::string& name) const { return decltype(ov::hint::inference_precision)::value_type(config.inferencePrecision); } else if (name == ov::hint::performance_mode) { return decltype(ov::hint::performance_mode)::value_type(config.hintPerfMode); + } else if (name == ov::log::level) { + return decltype(ov::log::level)::value_type(config.logLevel); } else if (name == ov::hint::enable_cpu_pinning.name()) { const bool use_pin = config.enableCpuPinning; return decltype(ov::hint::enable_cpu_pinning)::value_type(use_pin); diff --git a/src/plugins/intel_cpu/src/config.cpp b/src/plugins/intel_cpu/src/config.cpp index ba5c54d6bfe219..8064339682160c 100644 --- a/src/plugins/intel_cpu/src/config.cpp +++ b/src/plugins/intel_cpu/src/config.cpp @@ -100,6 +100,16 @@ void Config::readProperties(const ov::AnyMap& prop, const ModelType modelType) { key, ". Expected only ov::hint::PerformanceMode::LATENCY/THROUGHPUT/CUMULATIVE_THROUGHPUT."); } + } else if (key == ov::log::level.name()) { + try { + logLevel = val.as(); + } catch (const ov::Exception&) { + OPENVINO_THROW("Wrong value ", + val.as(), + " for property key ", + key, + ". Expected only ov::log::Level::NO/ERR/WARNING/INFO/DEBUG/TRACE."); + } } else if (key == ov::hint::num_requests.name()) { try { ov::Any value = val.as(); diff --git a/src/plugins/intel_cpu/src/config.h b/src/plugins/intel_cpu/src/config.h index 0f88271f2984fd..c757805e272d26 100644 --- a/src/plugins/intel_cpu/src/config.h +++ b/src/plugins/intel_cpu/src/config.h @@ -64,6 +64,7 @@ struct Config { #endif ov::threading::IStreamsExecutor::Config streamExecutorConfig; ov::hint::PerformanceMode hintPerfMode = ov::hint::PerformanceMode::LATENCY; + ov::log::Level logLevel = ov::log::Level::NO; uint32_t hintNumRequests = 0; bool enableCpuPinning = true; bool changedCpuPinning = false; diff --git a/src/plugins/intel_cpu/src/plugin.cpp b/src/plugins/intel_cpu/src/plugin.cpp index 12501b78561e19..e9a64a821431a6 100644 --- a/src/plugins/intel_cpu/src/plugin.cpp +++ b/src/plugins/intel_cpu/src/plugin.cpp @@ -696,6 +696,8 @@ ov::Any Engine::get_property(const std::string& name, const ov::AnyMap& options) auto model_runtime_properties = ov::Any(m_compiled_model_runtime_properties); return decltype(ov::internal::compiled_model_runtime_properties)::value_type( std::move(model_runtime_properties.as())); + } else if (name == ov::log::level) { + return engConfig.logLevel; } else if (name == ov::internal::compiled_model_runtime_properties_supported.name()) { ov::Any res = true; auto it = options.find(ov::internal::compiled_model_runtime_properties.name()); @@ -810,6 +812,7 @@ ov::Any Engine::get_ro_property(const std::string& name, const ov::AnyMap& optio RW_property(ov::hint::enable_hyper_threading.name()), RW_property(ov::device::id.name()), RW_property(ov::intel_cpu::denormals_optimization.name()), + RW_property(ov::log::level.name()), RW_property(ov::intel_cpu::sparse_weights_decompression_rate.name()), }; diff --git a/src/plugins/intel_cpu/tests/functional/behavior/ov_executable_network/properties.cpp b/src/plugins/intel_cpu/tests/functional/behavior/ov_executable_network/properties.cpp index df70d78c11a20a..abf855b3700010 100644 --- a/src/plugins/intel_cpu/tests/functional/behavior/ov_executable_network/properties.cpp +++ b/src/plugins/intel_cpu/tests/functional/behavior/ov_executable_network/properties.cpp @@ -36,6 +36,7 @@ TEST_F(OVClassConfigTestCPU, smoke_CpuExecNetworkSupportedPropertiesAreAvailable RO_property(ov::hint::enable_hyper_threading.name()), RO_property(ov::execution_devices.name()), RO_property(ov::intel_cpu::denormals_optimization.name()), + RO_property(ov::log::level.name()), RO_property(ov::intel_cpu::sparse_weights_decompression_rate.name()), }; @@ -229,4 +230,45 @@ TEST_F(OVClassConfigTestCPU, smoke_CpuExecNetworkCheckModelInferencePrecisionHas ASSERT_EQ(inference_precision_value, inference_precision_expected); } +TEST_F(OVClassConfigTestCPU, smoke_CpuExecNetworkCheckLogLevel) { + ov::Core ie; + + // check default value + { + ov::AnyMap config; + ov::Any value; + ov::CompiledModel compiledModel; + ASSERT_NO_THROW(compiledModel = ie.compile_model(model, deviceName, config)); + ASSERT_NO_THROW(value = compiledModel.get_property(ov::log::level)); + ASSERT_EQ(value.as(), ov::log::Level::NO); + } + //check set and get + const std::vector logLevels = { + ov::log::Level::ERR, + ov::log::Level::NO, + ov::log::Level::WARNING, + ov::log::Level::INFO, + ov::log::Level::DEBUG, + ov::log::Level::TRACE}; + + for (unsigned int i = 0; i < logLevels.size(); i++) { + ov::Any value; + ov::CompiledModel compiledModel; + ov::AnyMap config{ov::log::level(logLevels[i])}; + ASSERT_NO_THROW(compiledModel = ie.compile_model(model, deviceName, config)); + ASSERT_NO_THROW(value = compiledModel.get_property(ov::log::level)); + ASSERT_EQ(value.as(), logLevels[i]); + } + + for (unsigned int i = 0; i < logLevels.size(); i++) { + ov::Any value; + ov::CompiledModel compiledModel; + ASSERT_NO_THROW(ie.set_property(deviceName, ov::log::level(logLevels[i]))); + ASSERT_NO_THROW(compiledModel = ie.compile_model(model, deviceName)); + ASSERT_NO_THROW(value = compiledModel.get_property(ov::log::level)); + ASSERT_EQ(value.as(), logLevels[i]); + } +} + + } // namespace diff --git a/src/plugins/intel_cpu/tests/functional/behavior/ov_plugin/properties.cpp b/src/plugins/intel_cpu/tests/functional/behavior/ov_plugin/properties.cpp index 37d55063a21a3e..263158d43faa32 100644 --- a/src/plugins/intel_cpu/tests/functional/behavior/ov_plugin/properties.cpp +++ b/src/plugins/intel_cpu/tests/functional/behavior/ov_plugin/properties.cpp @@ -2,9 +2,11 @@ // SPDX-License-Identifier: Apache-2.0 // +#include #include #include "test_utils/properties_test.hpp" +#include "common_test_utils/test_assertions.hpp" #include "openvino/runtime/properties.hpp" #include "openvino/runtime/core.hpp" #include "openvino/core/type/element_type.hpp" @@ -45,6 +47,7 @@ TEST_F(OVClassConfigTestCPU, smoke_PluginAllSupportedPropertiesAreAvailable) { RW_property(ov::hint::enable_hyper_threading.name()), RW_property(ov::device::id.name()), RW_property(ov::intel_cpu::denormals_optimization.name()), + RW_property(ov::log::level.name()), RW_property(ov::intel_cpu::sparse_weights_decompression_rate.name()), }; @@ -283,4 +286,35 @@ TEST_F(OVClassConfigTestCPU, smoke_PluginSetConfigExecutionModeAndInferencePreci expect_inference_precision(bf16_if_can_be_emulated); } +TEST_F(OVClassConfigTestCPU, smoke_PluginSetConfigLogLevel) { + ov::Core ie; + //check default value + ov::Any value; + ASSERT_NO_THROW(value = ie.get_property("CPU", ov::log::level)); + ASSERT_EQ(value.as(), ov::log::Level::NO); + + //check set and get + const std::vector logLevels = { + ov::log::Level::ERR, + ov::log::Level::NO, + ov::log::Level::WARNING, + ov::log::Level::INFO, + ov::log::Level::DEBUG, + ov::log::Level::TRACE}; + + for (unsigned int i = 0; i < logLevels.size(); i++) { + ASSERT_NO_THROW(ie.set_property("CPU", ov::log::level(logLevels[i]))); + ASSERT_NO_THROW(value = ie.get_property("CPU", ov::log::level)); + ASSERT_EQ(value.as(), logLevels[i]); + } + + // check throwing message + auto property = ov::PropertyName(ov::log::level.name(), ov::PropertyMutability::RW); + const std::string expect_message = std::string("Wrong value DUMMY VALUE for property key ") + + ov::log::level.name() + ". Expected only ov::log::Level::NO/ERR/WARNING/INFO/DEBUG/TRACE."; + OV_EXPECT_THROW(ie.set_property("CPU", {{property, "DUMMY VALUE"}}), + ov::Exception, + testing::HasSubstr(expect_message)); +} + } // namespace From 21043734e80253b3db6f4ed5e804a0e3fc0d2d54 Mon Sep 17 00:00:00 2001 From: David Nam Date: Wed, 10 Jan 2024 15:04:58 +0900 Subject: [PATCH 02/28] [GPU] fix Crop (Gather) in case of negative indices (#22039) * [GPU] fix Crop (Gather) in case of negative indices - Negative values of indices should be indexing from data.shape[axis] --- .../intel_gpu/src/plugin/ops/gather.cpp | 4 ++- .../single_layer_tests/gather.cpp | 25 ++++++++++++++++--- 2 files changed, 25 insertions(+), 4 deletions(-) diff --git a/src/plugins/intel_gpu/src/plugin/ops/gather.cpp b/src/plugins/intel_gpu/src/plugin/ops/gather.cpp index 0d2426450940b4..d7c06133e04279 100644 --- a/src/plugins/intel_gpu/src/plugin/ops/gather.cpp +++ b/src/plugins/intel_gpu/src/plugin/ops/gather.cpp @@ -119,7 +119,9 @@ void CreateGatherOpBase(ProgramBuilder& p, const std::shared_ptr& op, const i ov::Shape start_offset(input_shape.size()); int32_t new_offset0 = static_cast(result); if (support_neg_ind && new_offset0 < 0) { - new_offset0 += static_cast(start_offset.size()); + // According to Gather-8, + // Negative values of indices indicate reverse indexing from data.shape[axis] + new_offset0 += static_cast(input_shape.get_shape()[axis]); } start_offset[0] = static_cast(new_offset0); diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/gather.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/gather.cpp index d299d303815cb3..544c39f912e3c9 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/gather.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/gather.cpp @@ -548,14 +548,33 @@ gather7ParamsTuple dummyParams2 = { ov::test::utils::DEVICE_GPU, }; -const auto gatherWithNagativeIndicesParams = testing::Combine( +const auto gatherWithNagativeIndicesParams1 = testing::Combine( testing::Values(dummyParams2), testing::ValuesIn(nagativeSingleindicesData) ); -INSTANTIATE_TEST_CASE_P(smoke_Gather8NagativeIndice, +INSTANTIATE_TEST_CASE_P(smoke_Gather8NagativeIndice1, Gather8withIndicesDataLayerTest, - gatherWithNagativeIndicesParams, + gatherWithNagativeIndicesParams1, + Gather8withIndicesDataLayerTest::getTestCaseName +); + +gather7ParamsTuple dummyParams3 = { + ov::test::static_shapes_to_test_representation(std::vector({{6, 8, 2, 2}})), + ov::Shape({}), + std::tuple{0, 0}, + ov::element::f32, + ov::test::utils::DEVICE_GPU, +}; + +const auto gatherWithNagativeIndicesParams2 = testing::Combine( + testing::Values(dummyParams3), + testing::ValuesIn(nagativeSingleindicesData) +); + +INSTANTIATE_TEST_CASE_P(smoke_Gather8NagativeIndice2, + Gather8withIndicesDataLayerTest, + gatherWithNagativeIndicesParams2, Gather8withIndicesDataLayerTest::getTestCaseName ); From 3d627263aa2981c9ab3d27b51c146c2922c2b3c8 Mon Sep 17 00:00:00 2001 From: Vitaliy Urusovskij Date: Wed, 10 Jan 2024 10:38:44 +0400 Subject: [PATCH 03/28] Add support of `ov::Busy` and `ov::Cancelled` in C API (#22034) --- src/bindings/c/src/common.h | 3 +++ src/bindings/c/tests/ov_infer_request_test.cpp | 13 ++++++++++++- src/inference/src/infer_request.cpp | 2 ++ 3 files changed, 17 insertions(+), 1 deletion(-) diff --git a/src/bindings/c/src/common.h b/src/bindings/c/src/common.h index 2a338d0f0d6da0..030f3c72e6a1b8 100644 --- a/src/bindings/c/src/common.h +++ b/src/bindings/c/src/common.h @@ -12,6 +12,7 @@ #include "openvino/core/except.hpp" #include "openvino/openvino.hpp" +#include "openvino/runtime/exception.hpp" #define CATCH_OV_EXCEPTION(StatusCode, ExceptionType) \ catch (const ov::ExceptionType& ex) { \ @@ -20,6 +21,8 @@ } #define CATCH_OV_EXCEPTIONS \ + CATCH_OV_EXCEPTION(REQUEST_BUSY, Busy) \ + CATCH_OV_EXCEPTION(INFER_CANCELLED, Cancelled) \ CATCH_OV_EXCEPTION(NOT_IMPLEMENTED, NotImplemented) \ CATCH_OV_EXCEPTION(GENERAL_ERROR, Exception) \ catch (...) { \ diff --git a/src/bindings/c/tests/ov_infer_request_test.cpp b/src/bindings/c/tests/ov_infer_request_test.cpp index 09a8bccf7643d4..620a7735868787 100644 --- a/src/bindings/c/tests/ov_infer_request_test.cpp +++ b/src/bindings/c/tests/ov_infer_request_test.cpp @@ -298,8 +298,9 @@ TEST_P(ov_infer_request_test, infer) { TEST_P(ov_infer_request_test, cancel) { OV_EXPECT_OK(ov_infer_request_set_tensor(infer_request, in_tensor_name, input_tensor)); - + OV_ASSERT_OK(ov_infer_request_start_async(infer_request)); OV_EXPECT_OK(ov_infer_request_cancel(infer_request)); + EXPECT_EQ(ov_status_e::INFER_CANCELLED, ov_infer_request_start_async(infer_request)); } TEST_P(ov_infer_request_ppp, infer_ppp) { @@ -341,6 +342,16 @@ TEST_P(ov_infer_request_test, infer_async_wait_for) { } } +TEST_P(ov_infer_request_test, infer_async_wait_for_return_busy) { + OV_EXPECT_OK(ov_infer_request_set_input_tensor_by_index(infer_request, 0, input_tensor)); + + OV_ASSERT_OK(ov_infer_request_start_async(infer_request)); + + if (!HasFatalFailure()) { + EXPECT_EQ(ov_status_e::REQUEST_BUSY, ov_infer_request_get_tensor(infer_request, in_tensor_name, &input_tensor)); + } +} + TEST_P(ov_infer_request_test, infer_async_wait_for_return_fail) { OV_EXPECT_NOT_OK(ov_infer_request_wait_for(infer_request, 10)); } diff --git a/src/inference/src/infer_request.cpp b/src/inference/src/infer_request.cpp index d9e9c388947f3c..19954729400f68 100644 --- a/src/inference/src/infer_request.cpp +++ b/src/inference/src/infer_request.cpp @@ -32,6 +32,8 @@ OPENVINO_SUPPRESS_DEPRECATED_START ov::Busy::create(ex.what()); \ } catch (const ov::Busy&) { \ throw; \ + } catch (const ov::Cancelled&) { \ + throw; \ } catch (const std::exception& ex) { \ OPENVINO_THROW(ex.what()); \ } catch (...) { \ From 09f5b0ff86fe14d776c81a854061142c4ba9bbf4 Mon Sep 17 00:00:00 2001 From: Xiping Yan Date: Wed, 10 Jan 2024 15:14:52 +0800 Subject: [PATCH 04/28] [CPU][DEBUG]Fix "getData" throw exception issue in print of debug log. (#21980) --- .../intel_cpu/src/utils/debug_capabilities.cpp | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/src/plugins/intel_cpu/src/utils/debug_capabilities.cpp b/src/plugins/intel_cpu/src/utils/debug_capabilities.cpp index 9a086987435a17..50c6cdb8fb6cac 100644 --- a/src/plugins/intel_cpu/src/utils/debug_capabilities.cpp +++ b/src/plugins/intel_cpu/src/utils/debug_capabilities.cpp @@ -184,6 +184,18 @@ std::ostream & operator<<(std::ostream & os, const Node &c_node) { num_output_port = edge->getInputNum() + 1; } + auto getData = [](const MemoryPtr& ptr) { + std::string ret; + try { + std::stringstream ss; + ss << ptr->getData(); + ret = ss.str(); + } catch (const std::exception& e) { + ret = "?"; + } + return ret; + }; + if (num_output_port) { if (num_output_port > 1) leftside << "("; comma = ""; @@ -199,7 +211,7 @@ std::ostream & operator<<(std::ostream & os, const Node &c_node) { leftside << comma << desc->getPrecision().get_type_name() << "_" << desc->serializeFormat() << "_" << shape_str - << "_" << ptr->getData(); + << "_" << getData(ptr); b_ouputed = true; } else { leftside << "(empty)"; @@ -284,7 +296,7 @@ std::ostream & operator<<(std::ostream & os, const Node &c_node) { os << node_id(*edge->getParent()); auto ptr = edge->getMemoryPtr(); if (ptr) { - os << "_" << ptr->getData(); + os << "_" << getData(ptr); } if (!is_single_output_port(*n)) os << "[" << edge->getInputNum() << "]"; From 9bf703bd43d286f8cb3effc03c96359ac350f3b6 Mon Sep 17 00:00:00 2001 From: Wanglei Shen Date: Wed, 10 Jan 2024 15:55:39 +0800 Subject: [PATCH 05/28] [CPU] Update model prefer threads for ARM Linux (#21285) --- .../intel_cpu/src/cpu_streams_calculation.cpp | 50 +++++++++++-------- 1 file changed, 30 insertions(+), 20 deletions(-) diff --git a/src/plugins/intel_cpu/src/cpu_streams_calculation.cpp b/src/plugins/intel_cpu/src/cpu_streams_calculation.cpp index 5a718a2f3ce135..23550e19548637 100644 --- a/src/plugins/intel_cpu/src/cpu_streams_calculation.cpp +++ b/src/plugins/intel_cpu/src/cpu_streams_calculation.cpp @@ -425,7 +425,19 @@ int get_model_prefer_threads(const int num_streams, ov::MemBandwidthPressure networkToleranceForLowCache = ov::MemBandwidthPressureTolerance(model, L2_cache_size, memThresholdAssumeLimitedForISA); -#if (defined(OPENVINO_ARCH_ARM) || defined(OPENVINO_ARCH_ARM64)) && defined(__APPLE__) +#if ((defined(OPENVINO_ARCH_ARM) || defined(OPENVINO_ARCH_ARM64)) && defined(__linux__)) + config.modelPreferThreads = 4; + if (networkToleranceForLowCache.max_mem_tolerance == ov::MemBandwidthPressure::UNKNOWN) { + if (networkToleranceForLowCache.ratio_compute_convs == ov::MemBandwidthPressure::ALL) { + config.modelPreferThreads = 16; + } + } else if ((networkToleranceForLowCache.ratio_mem_limited_deconvs != ov::MemBandwidthPressure::ALL) && + ((networkToleranceForLowCache.max_mem_tolerance > ov::MemBandwidthPressure::LIMITED) || + (networkToleranceForLowCache.ratio_compute_convs > ov::MemBandwidthPressure::LIMITED) || + (networkToleranceForLowCache.ratio_mem_limited_gemms > ov::MemBandwidthPressure::NONE))) { + config.modelPreferThreads = 8; + } +#elif((defined(OPENVINO_ARCH_ARM) || defined(OPENVINO_ARCH_ARM64)) && defined(__APPLE__)) config.modelPreferThreads = 1; if (networkToleranceForLowCache.max_mem_tolerance == ov::MemBandwidthPressure::UNKNOWN) { if ((networkToleranceForLowCache.ratio_compute_convs == ov::MemBandwidthPressure::ALL) || @@ -447,27 +459,25 @@ int get_model_prefer_threads(const int num_streams, networkToleranceForLowCache.ratio_compute_convs > ov::MemBandwidthPressure::LIMITED) { config.modelPreferThreads = 2; } -#endif - - if (-1 == config.modelPreferThreads) { - config.modelPreferThreads = ov::threading::IStreamsExecutor::Config::StreamMode::DEFAULT; - if (networkToleranceForLowCache.max_mem_tolerance == ov::MemBandwidthPressure::UNKNOWN) { - if ((networkToleranceForLowCache.ratio_compute_convs == ov::MemBandwidthPressure::ALL) || - (networkToleranceForLowCache.ratio_compute_deconvs == ov::MemBandwidthPressure::ALL)) { - // all relevant layers (convs, etc) are compute-limited, the most aggressive val for #streams - config.modelPreferThreads = 1; - } // otherwise (no recognized layers) falling back to the default value - } else if (networkToleranceForLowCache.max_mem_tolerance > memThresholdAssumeLimitedForISA) { - // network is below the ISA-specific threshold +#else + config.modelPreferThreads = 0; + if (networkToleranceForLowCache.max_mem_tolerance == ov::MemBandwidthPressure::UNKNOWN) { + if ((networkToleranceForLowCache.ratio_compute_convs == ov::MemBandwidthPressure::ALL) || + (networkToleranceForLowCache.ratio_compute_deconvs == ov::MemBandwidthPressure::ALL)) { + // all relevant layers (convs, etc) are compute-limited, the most aggressive val for #streams config.modelPreferThreads = 1; - } else if (networkToleranceForLowCache.max_mem_tolerance > ov::MemBandwidthPressure::LIMITED) { - // network is below general threshold - config.modelPreferThreads = 2; - } - if (config.modelPreferThreads == 1 && proc_type_table[0][EFFICIENT_CORE_PROC] == 0 && sockets == 1) { - config.modelPreferThreads = 2; - } + } // otherwise (no recognized layers) falling back to the default value + } else if (networkToleranceForLowCache.max_mem_tolerance > memThresholdAssumeLimitedForISA) { + // network is below the ISA-specific threshold + config.modelPreferThreads = 1; + } else if (networkToleranceForLowCache.max_mem_tolerance > ov::MemBandwidthPressure::LIMITED) { + // network is below general threshold + config.modelPreferThreads = 2; } + if (config.modelPreferThreads == 1 && proc_type_table[0][EFFICIENT_CORE_PROC] == 0 && sockets == 1) { + config.modelPreferThreads = 2; + } +#endif } // latency From 916fcab0e3776c9ae89ca3aafd489c5913d96e69 Mon Sep 17 00:00:00 2001 From: Georgy Krivoruchko Date: Tue, 9 Jan 2024 23:56:29 -0800 Subject: [PATCH 06/28] [ONNX] Frontend refactoring (#22021) * Updated tensor.hpp/cpp * Updated common.hpp/cpp * More changes in tensor.hpp/cpp and corresponding files * Update tensor.hpp: returned meaningful exception --- .../onnx/frontend/src/core/graph.cpp | 6 +- .../onnx/frontend/src/core/sparse_tensor.hpp | 4 +- .../onnx/frontend/src/core/tensor.cpp | 42 +++--- .../onnx/frontend/src/core/tensor.hpp | 128 ++++++++---------- .../onnx/frontend/src/core/value_info.hpp | 16 +-- src/frontends/onnx/frontend/src/editor.cpp | 2 +- .../onnx/frontend/src/node_context.cpp | 4 +- .../onnx/frontend/src/op/blackmanwindow.cpp | 3 +- src/frontends/onnx/frontend/src/op/cast.cpp | 2 +- .../onnx/frontend/src/op/constant.cpp | 10 +- .../frontend/src/op/constant_of_shape.cpp | 2 +- .../onnx/frontend/src/op/eye_like.cpp | 17 ++- .../onnx/frontend/src/op/hammingwindow.cpp | 3 +- .../onnx/frontend/src/op/hannwindow.cpp | 3 +- .../onnx/frontend/src/op/hardmax.cpp | 69 +++++----- .../onnx/frontend/src/op/random_normal.cpp | 2 +- .../frontend/src/op/random_normal_like.cpp | 5 +- .../onnx/frontend/src/op/random_uniform.cpp | 2 +- .../frontend/src/op/random_uniform_like.cpp | 2 +- .../onnx/frontend/src/utils/common.cpp | 68 ++++++---- .../onnx/frontend/src/utils/common.hpp | 30 ++-- 21 files changed, 205 insertions(+), 215 deletions(-) diff --git a/src/frontends/onnx/frontend/src/core/graph.cpp b/src/frontends/onnx/frontend/src/core/graph.cpp index 697cb6d2911e6f..ac94cc4af26746 100644 --- a/src/frontends/onnx/frontend/src/core/graph.cpp +++ b/src/frontends/onnx/frontend/src/core/graph.cpp @@ -151,12 +151,12 @@ Graph::Graph(const std::string& model_dir, std::shared_ptr ov_constant; // For each initializer create a Constant node and store it in cache try { - ov_constant = tensor.get_ng_constant(); + ov_constant = tensor.get_ov_constant(); } catch (const error::invalid_external_data&) { // invalid external data makes initializers creation impossible throw; } catch (const ov::Exception&) { - ov_constant = ngraph::onnx_import::common::make_failsafe_constant(tensor.get_ng_type()); + ov_constant = ngraph::onnx_import::common::make_failsafe_constant(tensor.get_ov_type()); } initializers.emplace(initializer_tensor.name(), tensor); @@ -173,7 +173,7 @@ Graph::Graph(const std::string& model_dir, } ValueInfo value_info{input}; - auto ov_node = value_info.get_ng_node(m_parameters, initializers); + auto ov_node = value_info.get_ov_node(m_parameters, initializers); m_cache->emplace_node(input.name(), std::move(ov_node)); } } diff --git a/src/frontends/onnx/frontend/src/core/sparse_tensor.hpp b/src/frontends/onnx/frontend/src/core/sparse_tensor.hpp index 86892c22259a97..c8818e98114580 100644 --- a/src/frontends/onnx/frontend/src/core/sparse_tensor.hpp +++ b/src/frontends/onnx/frontend/src/core/sparse_tensor.hpp @@ -53,8 +53,8 @@ class SparseTensor { return m_indices; } - const element::Type& get_ng_type() const { - return m_values.get_ng_type(); + const element::Type& get_ov_type() const { + return m_values.get_ov_type(); } private: diff --git a/src/frontends/onnx/frontend/src/core/tensor.cpp b/src/frontends/onnx/frontend/src/core/tensor.cpp index c3320cb0603533..b44515e9c0da54 100644 --- a/src/frontends/onnx/frontend/src/core/tensor.cpp +++ b/src/frontends/onnx/frontend/src/core/tensor.cpp @@ -18,7 +18,7 @@ std::vector Tensor::get_data() const { if (m_tensor_proto->data_type() == ONNX_NAMESPACE::TensorProto_DataType_DOUBLE) { return detail::__get_data(m_tensor_proto->double_data()); } - throw error::tensor::invalid_data_type{m_tensor_proto->data_type()}; + ONNX_INVALID_DATA_TYPE(m_tensor_proto->data_type(), "DOUBLE, raw data"); } template <> @@ -32,45 +32,45 @@ std::vector Tensor::get_data() const { if (m_tensor_proto->data_type() == ONNX_NAMESPACE::TensorProto_DataType_FLOAT) { return detail::__get_data(m_tensor_proto->float_data()); } - throw error::tensor::invalid_data_type{m_tensor_proto->data_type()}; + ONNX_INVALID_DATA_TYPE(m_tensor_proto->data_type(), "FLOAT, raw data"); } template <> -std::vector Tensor::get_data() const { +std::vector Tensor::get_data() const { if (has_external_data()) { return get_external_data(); } if (m_tensor_proto->has_raw_data()) { - return detail::__get_raw_data(m_tensor_proto->raw_data(), m_tensor_proto->data_type()); + return detail::__get_raw_data(m_tensor_proto->raw_data(), m_tensor_proto->data_type()); } if (m_tensor_proto->data_type() == ONNX_NAMESPACE::TensorProto_DataType_FLOAT16) { using std::begin; using std::end; const auto& int32_data = m_tensor_proto->int32_data(); - std::vector float16_data; + std::vector float16_data; float16_data.reserve(int32_data.size()); std::transform(begin(int32_data), end(int32_data), std::back_inserter(float16_data), [](int32_t elem) { - return ngraph::float16::from_bits(static_cast(elem)); + return ov::float16::from_bits(static_cast(elem)); }); - return detail::__get_data(float16_data); + return detail::__get_data(float16_data); } - throw error::tensor::invalid_data_type{m_tensor_proto->data_type()}; + ONNX_INVALID_DATA_TYPE(m_tensor_proto->data_type(), "FLOAT16, raw data"); } template <> -std::vector Tensor::get_data() const { +std::vector Tensor::get_data() const { if (has_external_data()) { return get_external_data(); } if (m_tensor_proto->has_raw_data()) { - return detail::__get_raw_data(m_tensor_proto->raw_data(), m_tensor_proto->data_type()); + return detail::__get_raw_data(m_tensor_proto->raw_data(), m_tensor_proto->data_type()); } if (m_tensor_proto->data_type() == ONNX_NAMESPACE::TensorProto_DataType_BFLOAT16) { - return detail::__get_data(m_tensor_proto->int32_data()); + return detail::__get_data(m_tensor_proto->int32_data()); } - throw error::tensor::invalid_data_type{m_tensor_proto->data_type()}; + ONNX_INVALID_DATA_TYPE(m_tensor_proto->data_type(), "INT32, raw data"); } template <> @@ -84,7 +84,7 @@ std::vector Tensor::get_data() const { if (m_tensor_proto->data_type() == ONNX_NAMESPACE::TensorProto_DataType_INT8) { return detail::__get_data(m_tensor_proto->int32_data()); } - throw error::tensor::invalid_data_type{m_tensor_proto->data_type()}; + ONNX_INVALID_DATA_TYPE(m_tensor_proto->data_type(), "INT8, raw data"); } template <> @@ -98,7 +98,7 @@ std::vector Tensor::get_data() const { if (m_tensor_proto->data_type() == ONNX_NAMESPACE::TensorProto_DataType_INT16) { return detail::__get_data(m_tensor_proto->int32_data()); } - throw error::tensor::invalid_data_type{m_tensor_proto->data_type()}; + ONNX_INVALID_DATA_TYPE(m_tensor_proto->data_type(), "INT16, raw data"); } template <> @@ -112,7 +112,7 @@ std::vector Tensor::get_data() const { if (m_tensor_proto->data_type() == ONNX_NAMESPACE::TensorProto_DataType_INT32) { return detail::__get_data(m_tensor_proto->int32_data()); } - throw error::tensor::invalid_data_type{m_tensor_proto->data_type()}; + ONNX_INVALID_DATA_TYPE(m_tensor_proto->data_type(), "INT32, raw data"); } template <> @@ -126,7 +126,7 @@ std::vector Tensor::get_data() const { if (m_tensor_proto->data_type() == ONNX_NAMESPACE::TensorProto_DataType_INT64) { return detail::__get_data(m_tensor_proto->int64_data()); } - throw error::tensor::invalid_data_type{m_tensor_proto->data_type()}; + ONNX_INVALID_DATA_TYPE(m_tensor_proto->data_type(), "INT64, raw data"); } template <> @@ -140,7 +140,7 @@ std::vector Tensor::get_data() const { if (m_tensor_proto->data_type() == ONNX_NAMESPACE::TensorProto_DataType_UINT8) { return detail::__get_data(m_tensor_proto->int32_data()); } - throw error::tensor::invalid_data_type{m_tensor_proto->data_type()}; + ONNX_INVALID_DATA_TYPE(m_tensor_proto->data_type(), "UINT8, raw data"); } template <> @@ -154,7 +154,7 @@ std::vector Tensor::get_data() const { if (m_tensor_proto->data_type() == ONNX_NAMESPACE::TensorProto_DataType_UINT16) { return detail::__get_data(m_tensor_proto->int32_data()); } - throw error::tensor::invalid_data_type{m_tensor_proto->data_type()}; + ONNX_INVALID_DATA_TYPE(m_tensor_proto->data_type(), "UINT16, raw data"); } template <> @@ -168,7 +168,7 @@ std::vector Tensor::get_data() const { if (m_tensor_proto->data_type() == ONNX_NAMESPACE::TensorProto_DataType_UINT32) { return detail::__get_data(m_tensor_proto->uint64_data()); } - throw error::tensor::invalid_data_type{m_tensor_proto->data_type()}; + ONNX_INVALID_DATA_TYPE(m_tensor_proto->data_type(), "UINT32, raw data"); } template <> @@ -182,7 +182,7 @@ std::vector Tensor::get_data() const { if (m_tensor_proto->data_type() == ONNX_NAMESPACE::TensorProto_DataType_UINT64) { return detail::__get_data(m_tensor_proto->uint64_data()); } - throw error::tensor::invalid_data_type{m_tensor_proto->data_type()}; + ONNX_INVALID_DATA_TYPE(m_tensor_proto->data_type(), "UINT63, raw data"); } template <> @@ -198,7 +198,7 @@ std::vector Tensor::get_data() const { if (m_tensor_proto->data_type() == ONNX_NAMESPACE::TensorProto_DataType_BOOL) { return detail::__get_data(m_tensor_proto->int32_data()); } - throw error::tensor::invalid_data_type{m_tensor_proto->data_type()}; + ONNX_INVALID_DATA_TYPE(m_tensor_proto->data_type(), "BOOL, raw data"); } } // namespace onnx_import diff --git a/src/frontends/onnx/frontend/src/core/tensor.hpp b/src/frontends/onnx/frontend/src/core/tensor.hpp index c82b1aa7a97165..00b20c4010c436 100644 --- a/src/frontends/onnx/frontend/src/core/tensor.hpp +++ b/src/frontends/onnx/frontend/src/core/tensor.hpp @@ -11,10 +11,10 @@ #include #include "exceptions.hpp" -#include "ngraph/op/constant.hpp" -#include "ngraph/shape.hpp" -#include "ngraph/type/element_type.hpp" #include "onnx_common/utils.hpp" +#include "openvino/core/shape.hpp" +#include "openvino/core/type/element_type.hpp" +#include "openvino/frontend/exception.hpp" #include "openvino/runtime/aligned_buffer.hpp" #include "utils/common.hpp" #include "utils/tensor_external_data.hpp" @@ -30,39 +30,13 @@ namespace onnx_import { // generated wrappers. using TensorProto_DataType = decltype(ONNX_NAMESPACE::TensorProto{}.data_type()); -namespace error { -namespace tensor { -OPENVINO_SUPPRESS_DEPRECATED_START -struct invalid_data_type : ngraph_error { - explicit invalid_data_type(TensorProto_DataType type) : ngraph_error{"invalid data type"} {} -}; - -struct unsupported_data_type : ngraph_error { - explicit unsupported_data_type(TensorProto_DataType type) : ngraph_error{"unsupported data type"} {} -}; - -struct unspecified_name : ngraph_error { - unspecified_name() : ngraph_error{"tensor has no name specified"} {} -}; - -struct unspecified_data_type : ngraph_error { - unspecified_data_type() : ngraph_error{"tensor has no data type specified"} {} -}; - -struct data_type_undefined : ngraph_error { - data_type_undefined() : ngraph_error{"data type is not defined"} {} -}; - -struct segments_unsupported : ngraph_error { - segments_unsupported() : ngraph_error{"loading segments not supported"} {} -}; - -struct shape_doesnt_match_data_size : ngraph_error { - shape_doesnt_match_data_size() : ngraph_error{"tensor shape doesn't match data size"} {} -}; -OPENVINO_SUPPRESS_DEPRECATED_END -} // namespace tensor -} // namespace error +#define ONNX_INVALID_DATA_TYPE(data_type, expected) \ + OPENVINO_THROW("Invalid data type ", ONNX_NAMESPACE::TensorProto_DataType_Name(data_type), " expected: ", expected) +#define ONNX_UNSUPPORTED_DATA_TYPE(data_type, expected) \ + OPENVINO_THROW("Unsupported data type ", \ + ONNX_NAMESPACE::TensorProto_DataType_Name(data_type), \ + " expected: ", \ + expected) namespace detail { namespace { @@ -121,7 +95,7 @@ class Tensor { if (m_shape == Shape{0}) { // It's possible to construct a tensor in ONNX with "dims: 0" property // Such tensor contains a scalar. This results in a Shape{0} stored in m_shape. - // In nGraph a scalar is represented with Shape{} and thus this replacement. + // In OpenVINO a scalar is represented with Shape{} and thus this replacement. m_shape = Shape{}; } } @@ -138,28 +112,28 @@ class Tensor { template std::vector get_data() const { if (m_tensor_proto->has_segment()) { - throw error::tensor::segments_unsupported{}; + FRONT_END_THROW("Loading segments isn't supported"); } - throw ngraph::onnx_import::error::tensor::unsupported_data_type{m_tensor_proto->data_type()}; + ONNX_UNSUPPORTED_DATA_TYPE(m_tensor_proto->data_type(), "[nothing expected]"); } const std::string& get_name() const { if (!m_tensor_proto->has_name()) { - throw error::tensor::unspecified_name{}; + FRONT_END_THROW("Tensor has no specified name"); } return m_tensor_proto->name(); } Type get_type() const { if (!m_tensor_proto->has_data_type()) { - throw error::tensor::unspecified_data_type{}; + FRONT_END_THROW("Tensor has no specified data type"); } return static_cast(m_tensor_proto->data_type()); } - const element::Type& get_ng_type() const { + const element::Type& get_ov_type() const { if (!m_tensor_proto->has_data_type()) { - throw error::tensor::unspecified_data_type{}; + FRONT_END_THROW("Tensor has no specified data type"); } switch (m_tensor_proto->data_type()) { case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_BOOL: @@ -189,9 +163,11 @@ class Tensor { case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_BFLOAT16: return element::bf16; case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_UNDEFINED: - throw error::tensor::data_type_undefined{}; + FRONT_END_THROW("Data type is Undefined"); default: - throw error::tensor::unsupported_data_type{m_tensor_proto->data_type()}; + ONNX_UNSUPPORTED_DATA_TYPE( + m_tensor_proto->data_type(), + "BOOL, BFLOAT16, FLOAT, FLOAT16, DOUBLE, INT8, INT16, INT32, INT64, UINT8, UINT16, UINT32, UINT64"); } } @@ -199,39 +175,41 @@ class Tensor { return m_tensor_proto->data_type(); } - std::shared_ptr get_ng_constant() const { + std::shared_ptr get_ov_constant() const { if (m_tensor_proto->has_segment()) { - throw error::tensor::segments_unsupported{}; + FRONT_END_THROW("Loading segments isn't supported"); } switch (m_tensor_proto->data_type()) { case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_BOOL: - return make_ng_constant(element::boolean); + return make_ov_constant(element::boolean); case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_FLOAT: - return make_ng_constant(element::f32); + return make_ov_constant(element::f32); case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_FLOAT16: - return make_ng_constant(element::f16); + return make_ov_constant(element::f16); case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_DOUBLE: - return make_ng_constant(element::f64); + return make_ov_constant(element::f64); case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_INT8: - return make_ng_constant(element::i8); + return make_ov_constant(element::i8); case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_INT16: - return make_ng_constant(element::i16); + return make_ov_constant(element::i16); case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_INT32: - return make_ng_constant(element::i32); + return make_ov_constant(element::i32); case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_INT64: - return make_ng_constant(element::i64); + return make_ov_constant(element::i64); case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_UINT8: - return make_ng_constant(element::u8); + return make_ov_constant(element::u8); case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_UINT16: - return make_ng_constant(element::u16); + return make_ov_constant(element::u16); case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_UINT32: - return make_ng_constant(element::u32); + return make_ov_constant(element::u32); case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_UINT64: - return make_ng_constant(element::u64); + return make_ov_constant(element::u64); case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_BFLOAT16: - return make_ng_constant(element::bf16); + return make_ov_constant(element::bf16); default: - throw error::tensor::unsupported_data_type{m_tensor_proto->data_type()}; + ONNX_UNSUPPORTED_DATA_TYPE( + m_tensor_proto->data_type(), + "BOOL, BFLOAT16, FLOAT, FLOAT16, DOUBLE, INT8, INT16, INT32, INT64, UINT8, UINT16, UINT32, UINT64"); } } @@ -241,19 +219,19 @@ class Tensor { std::is_same::value || std::is_same::value || std::is_same::value, bool>::type = true> - std::shared_ptr make_ng_constant(const element::Type& type) const { - std::shared_ptr constant{nullptr}; + std::shared_ptr make_ov_constant(const element::Type& type) const { + std::shared_ptr constant{nullptr}; size_t data_size = get_data_size(); if (has_external_data()) { const auto ext_data = detail::TensorExternalData(*m_tensor_proto); if (m_mmap_cache) { constant = - std::make_shared(type, + std::make_shared(type, m_shape, ext_data.load_external_mmap_data(m_model_dir, m_mmap_cache)); } else { constant = - std::make_shared(type, m_shape, ext_data.load_external_data(m_model_dir)); + std::make_shared(type, m_shape, ext_data.load_external_data(m_model_dir)); } if (constant->get_byte_size() != ov::shape_size(m_shape) * type.size()) { throw error::invalid_external_data( @@ -261,11 +239,11 @@ class Tensor { "' in the model"); } } else if (data_size == shape_size(m_shape)) { - constant = std::make_shared(type, m_shape, get_data_ptr()); + constant = std::make_shared(type, m_shape, get_data_ptr()); } else if (data_size == 0 && m_shape.size() == 0) { constant = common::make_failsafe_constant(type); } else { - throw error::tensor::shape_doesnt_match_data_size{}; + FRONT_END_THROW("Tensor shape doesn't match data size"); } if (m_tensor_proto->has_name()) { @@ -279,16 +257,16 @@ class Tensor { !std::is_same::value && !std::is_same::value && !std::is_same::value, bool>::type = true> - std::shared_ptr make_ng_constant(const element::Type& type) const { - std::shared_ptr constant{nullptr}; + std::shared_ptr make_ov_constant(const element::Type& type) const { + std::shared_ptr constant{nullptr}; auto data = get_data(); auto data_size = data.size(); if (data_size == shape_size(m_shape)) { - constant = std::make_shared(type, m_shape, data); + constant = std::make_shared(type, m_shape, data); } else if (data_size == 0 && m_shape.size() == 0) { constant = common::make_failsafe_constant(type); } else { - throw error::tensor::shape_doesnt_match_data_size{}; + FRONT_END_THROW("Tensor shape doesn't match data size"); } if (m_tensor_proto->has_name()) { constant->set_friendly_name(get_name()); @@ -330,7 +308,7 @@ class Tensor { case ONNX_NAMESPACE::TensorProto_DataType_DOUBLE: return m_tensor_proto->double_data().data(); } - throw error::tensor::invalid_data_type{m_tensor_proto->data_type()}; + ONNX_INVALID_DATA_TYPE(m_tensor_proto->data_type(), "FLOAT, INT32, INT64, UINT64, DOUBLE"); } size_t get_data_size() const { @@ -349,7 +327,7 @@ class Tensor { case ONNX_NAMESPACE::TensorProto_DataType_DOUBLE: return m_tensor_proto->double_data_size(); } - throw error::tensor::invalid_data_type{m_tensor_proto->data_type()}; + ONNX_INVALID_DATA_TYPE(m_tensor_proto->data_type(), "FLOAT, INT32, INT64, UINT64, DOUBLE"); } const ONNX_NAMESPACE::TensorProto* m_tensor_proto; @@ -369,10 +347,10 @@ template <> std::vector Tensor::get_data() const; template <> -std::vector Tensor::get_data() const; +std::vector Tensor::get_data() const; template <> -std::vector Tensor::get_data() const; +std::vector Tensor::get_data() const; template <> std::vector Tensor::get_data() const; diff --git a/src/frontends/onnx/frontend/src/core/value_info.hpp b/src/frontends/onnx/frontend/src/core/value_info.hpp index f69a83961dc925..99ac5b3bca3dba 100644 --- a/src/frontends/onnx/frontend/src/core/value_info.hpp +++ b/src/frontends/onnx/frontend/src/core/value_info.hpp @@ -47,31 +47,31 @@ class ValueInfo { } const element::Type& get_element_type() const { if (m_value_info_proto->type().tensor_type().has_elem_type()) { - return common::get_ngraph_element_type(m_value_info_proto->type().tensor_type().elem_type()); + return common::get_ov_element_type(m_value_info_proto->type().tensor_type().elem_type()); } return ngraph::element::dynamic; } - std::shared_ptr get_ng_node(ParameterVector& parameters, - const std::map& initializers) const { + std::shared_ptr get_ov_node(ParameterVector& parameters, + const std::map& initializers) const { const auto it = initializers.find(get_name()); if (it != std::end(initializers)) { - return get_ng_constant(it->second); + return get_ov_constant(it->second); } - parameters.push_back(get_ng_parameter()); + parameters.push_back(get_ov_parameter()); return parameters.back(); } protected: - std::shared_ptr get_ng_parameter() const { + std::shared_ptr get_ov_parameter() const { auto parameter = std::make_shared(get_element_type(), get_shape()); parameter->set_friendly_name(get_name()); parameter->get_output_tensor(0).set_names({get_name()}); return parameter; } - std::shared_ptr get_ng_constant(const Tensor& tensor) const { - return tensor.get_ng_constant(); + std::shared_ptr get_ov_constant(const Tensor& tensor) const { + return tensor.get_ov_constant(); } private: diff --git a/src/frontends/onnx/frontend/src/editor.cpp b/src/frontends/onnx/frontend/src/editor.cpp index f16f3ea00648eb..0ea9b1455f0ded 100644 --- a/src/frontends/onnx/frontend/src/editor.cpp +++ b/src/frontends/onnx/frontend/src/editor.cpp @@ -389,7 +389,7 @@ element::Type_t onnx_editor::ONNXModelEditor::get_input_type(const std::string& onnx_input->name()); auto& tensor_type = type_proto.tensor_type(); auto type = tensor_type.elem_type(); - return ngraph::onnx_import::common::get_ngraph_element_type(type); + return ngraph::onnx_import::common::get_ov_element_type(type); } void onnx_editor::ONNXModelEditor::set_input_shapes(const std::map& input_shapes) { diff --git a/src/frontends/onnx/frontend/src/node_context.cpp b/src/frontends/onnx/frontend/src/node_context.cpp index ef6381d8b1d69c..821f89c19e506e 100644 --- a/src/frontends/onnx/frontend/src/node_context.cpp +++ b/src/frontends/onnx/frontend/src/node_context.cpp @@ -31,12 +31,12 @@ size_t ov::frontend::onnx::NodeContext::get_input_size() const { ov::Any ov::frontend::onnx::NodeContext::apply_additional_conversion_rules(const ov::Any& data, const std::type_info& type_info) const { if (data.is() && type_info == typeid(ov::element::Type)) { - return ngraph::onnx_import::common::get_ngraph_element_type(data.as()); + return ngraph::onnx_import::common::get_ov_element_type(data.as()); } else if (data.is>() && type_info == typeid(std::vector)) { const auto& casted = data.as>(); std::vector types(casted.size()); for (size_t i = 0; i < casted.size(); ++i) { - types[i] = ngraph::onnx_import::common::get_ngraph_element_type(casted[i]); + types[i] = ngraph::onnx_import::common::get_ov_element_type(casted[i]); } return types; } diff --git a/src/frontends/onnx/frontend/src/op/blackmanwindow.cpp b/src/frontends/onnx/frontend/src/op/blackmanwindow.cpp index 8ebca88b32f4cf..76df0691a45149 100644 --- a/src/frontends/onnx/frontend/src/op/blackmanwindow.cpp +++ b/src/frontends/onnx/frontend/src/op/blackmanwindow.cpp @@ -17,8 +17,7 @@ namespace op { namespace set_1 { OutputVector blackmanwindow(const Node& node) { const auto size = node.get_ng_inputs().at(0); - const auto output_datatype = - common::get_ngraph_element_type(node.get_attribute_value("output_datatype", 1)); + const auto output_datatype = common::get_ov_element_type(node.get_attribute_value("output_datatype", 1)); const bool periodic = node.get_attribute_value("periodic", 1) == 1; const ov::PartialShape shape = size.get_partial_shape(); diff --git a/src/frontends/onnx/frontend/src/op/cast.cpp b/src/frontends/onnx/frontend/src/op/cast.cpp index 58b18da8bedc2a..ba53f7aa9e3d13 100644 --- a/src/frontends/onnx/frontend/src/op/cast.cpp +++ b/src/frontends/onnx/frontend/src/op/cast.cpp @@ -19,7 +19,7 @@ namespace set_1 { OutputVector cast(const Node& node) { auto data = node.get_ng_inputs().at(0); int64_t target_type = node.get_attribute_value("to"); - element::Type elem_type = common::get_ngraph_element_type(target_type); + element::Type elem_type = common::get_ov_element_type(target_type); return {std::make_shared(data, elem_type)}; } diff --git a/src/frontends/onnx/frontend/src/op/constant.cpp b/src/frontends/onnx/frontend/src/op/constant.cpp index bcf2282414024c..1893cfa63c4d39 100644 --- a/src/frontends/onnx/frontend/src/op/constant.cpp +++ b/src/frontends/onnx/frontend/src/op/constant.cpp @@ -41,13 +41,13 @@ std::shared_ptr make_dense_tensor_as_constant(const std const Shape& shape) { auto values = values_tensor.get_data(); auto dense_vector = get_dense_vector(values, indices, shape_size(shape)); - return default_opset::Constant::create(values_tensor.get_ng_type(), shape, dense_vector); + return default_opset::Constant::create(values_tensor.get_ov_type(), shape, dense_vector); } std::shared_ptr get_dense_tensor_as_constant(const std::vector& absolute_indices, const Tensor& values_tensor, const Shape& shape) { - switch (values_tensor.get_ng_type()) { + switch (values_tensor.get_ov_type()) { case element::boolean: return make_dense_tensor_as_constant(absolute_indices, values_tensor, shape); case element::f32: @@ -75,7 +75,7 @@ std::shared_ptr get_dense_tensor_as_constant(const std: case element::bf16: return make_dense_tensor_as_constant(absolute_indices, values_tensor, shape); default: - throw error::tensor::invalid_data_type{values_tensor}; + FRONT_END_THROW("Tensor has an unsupported data type"); } } @@ -108,7 +108,7 @@ std::vector get_absolute_indices(const Tensor& indices_tensor, const Sh namespace set_1 { OutputVector constant(const onnx_import::Node& node) { auto tensor = node.get_attribute_value("value"); - return {tensor.get_ng_constant()}; + return {tensor.get_ov_constant()}; } } // namespace set_1 @@ -180,7 +180,7 @@ OutputVector constant(const onnx_import::Node& node) { return {get_dense_tensor_as_constant(absolute_indices, values_tensor, shape)}; } auto tensor = node.get_attribute_value(attributes_names[0]); - return {tensor.get_ng_constant()}; + return {tensor.get_ov_constant()}; } } // namespace set_13 } // namespace op diff --git a/src/frontends/onnx/frontend/src/op/constant_of_shape.cpp b/src/frontends/onnx/frontend/src/op/constant_of_shape.cpp index b7dfe3cace5868..559b497ac80e6f 100644 --- a/src/frontends/onnx/frontend/src/op/constant_of_shape.cpp +++ b/src/frontends/onnx/frontend/src/op/constant_of_shape.cpp @@ -21,7 +21,7 @@ OutputVector constant_of_shape(const onnx_import::Node& node) { Output constant_value; if (node.has_attribute("value")) { auto value_tensor = node.get_attribute_value("value"); - constant_value = value_tensor.get_ng_constant(); + constant_value = value_tensor.get_ov_constant(); constant_value = reshape::interpret_as_scalar(constant_value); } else { constant_value = default_opset::Constant::create(element::f32, {}, {0}); diff --git a/src/frontends/onnx/frontend/src/op/eye_like.cpp b/src/frontends/onnx/frontend/src/op/eye_like.cpp index 29c03e04e86ae6..30bb9369d288ba 100644 --- a/src/frontends/onnx/frontend/src/op/eye_like.cpp +++ b/src/frontends/onnx/frontend/src/op/eye_like.cpp @@ -8,7 +8,10 @@ #include "exceptions.hpp" #include "ngraph/output_vector.hpp" +#include "openvino/op/constant.hpp" #include "openvino/op/eye.hpp" +#include "openvino/op/gather.hpp" +#include "openvino/op/shape_of.hpp" #include "utils/common.hpp" OPENVINO_SUPPRESS_DEPRECATED_START @@ -22,13 +25,9 @@ namespace { OutputVector get_shape_width_and_height(const Output& shape) { const auto axis = ngraph::op::Constant::create(ngraph::element::i64, {1}, {0}); const auto height = - std::make_shared(shape, - ngraph::op::Constant::create(ngraph::element::i64, {1}, {0}), - axis); + std::make_shared(shape, ngraph::op::Constant::create(ngraph::element::i64, {1}, {0}), axis); const auto width = - std::make_shared(shape, - ngraph::op::Constant::create(ngraph::element::i64, {1}, {1}), - axis); + std::make_shared(shape, ngraph::op::Constant::create(ngraph::element::i64, {1}, {1}), axis); return {width, height}; } @@ -50,17 +49,17 @@ OutputVector eye_like(const Node& node) { element::Type target_type; if (node.has_attribute("dtype")) { std::int64_t dtype = node.get_attribute_value("dtype"); - target_type = common::get_ngraph_element_type(dtype); + target_type = common::get_ov_element_type(dtype); } else { target_type = input.get_element_type(); } - const auto input_shape = std::make_shared(input); + const auto input_shape = std::make_shared(input); const auto dims = detail::get_shape_width_and_height(input_shape); const auto width = dims.at(0); const auto height = dims.at(1); const auto k = - default_opset::Constant::create(ngraph::element::i64, {1}, {node.get_attribute_value("k", 0)}); + ov::op::v0::Constant::create(ngraph::element::i64, {1}, {node.get_attribute_value("k", 0)}); const auto output = std::make_shared(height, width, k, target_type); diff --git a/src/frontends/onnx/frontend/src/op/hammingwindow.cpp b/src/frontends/onnx/frontend/src/op/hammingwindow.cpp index 25d557f7de6bdc..c8e1709d467853 100644 --- a/src/frontends/onnx/frontend/src/op/hammingwindow.cpp +++ b/src/frontends/onnx/frontend/src/op/hammingwindow.cpp @@ -17,8 +17,7 @@ namespace op { namespace set_1 { OutputVector hammingwindow(const Node& node) { const auto size = node.get_ng_inputs().at(0); - const auto output_datatype = - common::get_ngraph_element_type(node.get_attribute_value("output_datatype", 1)); + const auto output_datatype = common::get_ov_element_type(node.get_attribute_value("output_datatype", 1)); const bool periodic = node.get_attribute_value("periodic", 1) == 1; const ov::PartialShape shape = size.get_partial_shape(); diff --git a/src/frontends/onnx/frontend/src/op/hannwindow.cpp b/src/frontends/onnx/frontend/src/op/hannwindow.cpp index b0e28afd2e5570..4b97458ad207ce 100644 --- a/src/frontends/onnx/frontend/src/op/hannwindow.cpp +++ b/src/frontends/onnx/frontend/src/op/hannwindow.cpp @@ -17,8 +17,7 @@ namespace op { namespace set_1 { OutputVector hannwindow(const Node& node) { const auto size = node.get_ng_inputs().at(0); - const auto output_datatype = - common::get_ngraph_element_type(node.get_attribute_value("output_datatype", 1)); + const auto output_datatype = common::get_ov_element_type(node.get_attribute_value("output_datatype", 1)); const bool periodic = node.get_attribute_value("periodic", 1) == 1; const ov::PartialShape shape = size.get_partial_shape(); diff --git a/src/frontends/onnx/frontend/src/op/hardmax.cpp b/src/frontends/onnx/frontend/src/op/hardmax.cpp index 8079e8cf1fa659..cb799fb66d8e6f 100644 --- a/src/frontends/onnx/frontend/src/op/hardmax.cpp +++ b/src/frontends/onnx/frontend/src/op/hardmax.cpp @@ -5,9 +5,14 @@ #include "op/hardmax.hpp" #include "exceptions.hpp" -#include "ngraph/op/one_hot.hpp" -#include "ngraph/op/topk.hpp" #include "ngraph/validation_util.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/convert.hpp" +#include "openvino/op/gather.hpp" +#include "openvino/op/one_hot.hpp" +#include "openvino/op/reshape.hpp" +#include "openvino/op/shape_of.hpp" +#include "openvino/op/topk.hpp" #include "ov_models/ov_builders/reshape.hpp" #include "utils/common.hpp" #include "utils/reshape.hpp" @@ -31,30 +36,30 @@ OutputVector hardmax(const Node& node) { // reshape to 2D - "batch size" x "input feature dimensions" (NxD) const auto coerced_tensor = ov::op::util::flatten(input, static_cast(axis)); - const auto coerced_tensor_shape = std::make_shared(coerced_tensor); + const auto coerced_tensor_shape = std::make_shared(coerced_tensor); Output row_size = - std::make_shared(coerced_tensor_shape, - default_opset::Constant::create(element::i64, {1}, {1}), - default_opset::Constant::create(element::i64, {}, {0})); + std::make_shared(coerced_tensor_shape, + ov::op::v0::Constant::create(element::i64, {1}, {1}), + ov::op::v0::Constant::create(element::i64, {}, {0})); row_size = ngraph::onnx_import::reshape::interpret_as_scalar(row_size); const auto indices_axis = 1; const auto topk = - std::make_shared(coerced_tensor, - default_opset::Constant::create(ngraph::element::i64, Shape{}, {1}), - indices_axis, - default_opset::TopK::Mode::MAX, - default_opset::TopK::SortType::NONE); + std::make_shared(coerced_tensor, + ov::op::v0::Constant::create(ngraph::element::i64, Shape{}, {1}), + indices_axis, + ov::op::v11::TopK::Mode::MAX, + ov::op::v11::TopK::SortType::NONE); - const auto on_value = default_opset::Constant::create(ngraph::element::i64, Shape{}, {1}); - const auto off_value = default_opset::Constant::create(ngraph::element::i64, Shape{}, {0}); + const auto on_value = ov::op::v0::Constant::create(ngraph::element::i64, Shape{}, {1}); + const auto off_value = ov::op::v0::Constant::create(ngraph::element::i64, Shape{}, {0}); const auto results = - std::make_shared(topk->output(1), row_size, on_value, off_value, indices_axis); - const auto converted_results = std::make_shared(results, input.get_element_type()); + std::make_shared(topk->output(1), row_size, on_value, off_value, indices_axis); + const auto converted_results = std::make_shared(results, input.get_element_type()); - const auto output_shape = std::make_shared(input); - return {std::make_shared(converted_results, output_shape, false)}; + const auto output_shape = std::make_shared(input); + return {std::make_shared(converted_results, output_shape, false)}; } } // namespace set_1 @@ -68,28 +73,28 @@ OutputVector hardmax(const Node& node) { axis = ngraph::normalize_axis(node.get_description(), axis, input_shape.rank()); OPENVINO_SUPPRESS_DEPRECATED_END - const auto input_runtime_shape = std::make_shared(input); + const auto input_runtime_shape = std::make_shared(input); Output row_size = - std::make_shared(input_runtime_shape, - default_opset::Constant::create(element::i64, {1}, {axis}), - default_opset::Constant::create(element::i64, {}, {0})); + std::make_shared(input_runtime_shape, + ov::op::v0::Constant::create(element::i64, {1}, {axis}), + ov::op::v0::Constant::create(element::i64, {}, {0})); row_size = ngraph::onnx_import::reshape::interpret_as_scalar(row_size); const auto topk = - std::make_shared(input, - default_opset::Constant::create(ngraph::element::i64, Shape{}, {1}), - axis, - default_opset::TopK::Mode::MAX, - default_opset::TopK::SortType::NONE); + std::make_shared(input, + ov::op::v0::Constant::create(ngraph::element::i64, Shape{}, {1}), + axis, + ov::op::v11::TopK::Mode::MAX, + ov::op::v11::TopK::SortType::NONE); - const auto on_value = default_opset::Constant::create(ngraph::element::i64, Shape{}, {1}); - const auto off_value = default_opset::Constant::create(ngraph::element::i64, Shape{}, {0}); + const auto on_value = ov::op::v0::Constant::create(ngraph::element::i64, Shape{}, {1}); + const auto off_value = ov::op::v0::Constant::create(ngraph::element::i64, Shape{}, {0}); - const auto results = std::make_shared(topk->output(1), row_size, on_value, off_value, axis); - const auto converted_results = std::make_shared(results, input.get_element_type()); + const auto results = std::make_shared(topk->output(1), row_size, on_value, off_value, axis); + const auto converted_results = std::make_shared(results, input.get_element_type()); - const auto output_shape = std::make_shared(input); - return {std::make_shared(converted_results, output_shape, false)}; + const auto output_shape = std::make_shared(input); + return {std::make_shared(converted_results, output_shape, false)}; } } // namespace set_13 diff --git a/src/frontends/onnx/frontend/src/op/random_normal.cpp b/src/frontends/onnx/frontend/src/op/random_normal.cpp index 5b8ccb80e8e380..426311f0253d99 100644 --- a/src/frontends/onnx/frontend/src/op/random_normal.cpp +++ b/src/frontends/onnx/frontend/src/op/random_normal.cpp @@ -19,7 +19,7 @@ OutputVector random_normal(const Node& node) { const auto dtype = node.get_attribute_value("dtype", static_cast(ONNX_NAMESPACE::TensorProto_DataType_FLOAT)); - const auto target_type = common::get_ngraph_element_type(dtype); + const auto target_type = common::get_ov_element_type(dtype); const auto mean = node.get_attribute_value("mean", 0.0f); const auto scale = node.get_attribute_value("scale", 1.0f); diff --git a/src/frontends/onnx/frontend/src/op/random_normal_like.cpp b/src/frontends/onnx/frontend/src/op/random_normal_like.cpp index 0df669b4ada2aa..a9ecaf39a9c50a 100644 --- a/src/frontends/onnx/frontend/src/op/random_normal_like.cpp +++ b/src/frontends/onnx/frontend/src/op/random_normal_like.cpp @@ -5,6 +5,7 @@ #include "ngraph/shape.hpp" #include "op/random_uniform_like.hpp" #include "openvino/frontend/common/random_normal_helper.hpp" +#include "openvino/op/shape_of.hpp" #include "utils/common.hpp" OPENVINO_SUPPRESS_DEPRECATED_START @@ -19,12 +20,12 @@ OutputVector random_normal_like(const Node& node) { ngraph::element::Type target_type; if (node.has_attribute("dtype")) { const auto dtype = node.get_attribute_value("dtype"); - target_type = common::get_ngraph_element_type(dtype); + target_type = common::get_ov_element_type(dtype); } else { target_type = input.get_element_type(); } - const auto shape = std::make_shared(input); + const auto shape = std::make_shared(input); const auto seed = node.get_attribute_value("seed", 0.0f); const auto mean = node.get_attribute_value("mean", 0.0f); diff --git a/src/frontends/onnx/frontend/src/op/random_uniform.cpp b/src/frontends/onnx/frontend/src/op/random_uniform.cpp index a26ed672a0cc15..e07ddcc1c08585 100644 --- a/src/frontends/onnx/frontend/src/op/random_uniform.cpp +++ b/src/frontends/onnx/frontend/src/op/random_uniform.cpp @@ -27,7 +27,7 @@ OutputVector random_uniform(const Node& node) { const auto seed = node.get_attribute_value("seed", 0.0f); const auto target_shape_const = node.get_attribute_as_constant>("shape"); - const auto target_type = common::get_ngraph_element_type(dtype); + const auto target_type = common::get_ov_element_type(dtype); const uint64_t global_seed = 0; // TODO: This multiplication leads to a mismatch in accuracy. Issue: 123003 const auto seed_uint64 = static_cast(seed * 1000); diff --git a/src/frontends/onnx/frontend/src/op/random_uniform_like.cpp b/src/frontends/onnx/frontend/src/op/random_uniform_like.cpp index 7549b06e0355bb..d3768f3127e5ae 100644 --- a/src/frontends/onnx/frontend/src/op/random_uniform_like.cpp +++ b/src/frontends/onnx/frontend/src/op/random_uniform_like.cpp @@ -24,7 +24,7 @@ OutputVector random_uniform_like(const Node& node) { ngraph::element::Type target_type; if (node.has_attribute("dtype")) { const auto dtype = node.get_attribute_value("dtype"); - target_type = common::get_ngraph_element_type(dtype); + target_type = common::get_ov_element_type(dtype); } else { target_type = input.get_element_type(); } diff --git a/src/frontends/onnx/frontend/src/utils/common.cpp b/src/frontends/onnx/frontend/src/utils/common.cpp index d30b2a5edc2a6b..aeda69b6063f58 100644 --- a/src/frontends/onnx/frontend/src/utils/common.cpp +++ b/src/frontends/onnx/frontend/src/utils/common.cpp @@ -6,17 +6,28 @@ #include // onnx types -#include "default_opset.hpp" #include "ngraph/graph_util.hpp" #include "onnx_framework_node.hpp" #include "openvino/core/deprecated.hpp" #include "openvino/frontend/exception.hpp" +#include "openvino/op/add.hpp" +#include "openvino/op/broadcast.hpp" +#include "openvino/op/concat.hpp" +#include "openvino/op/divide.hpp" +#include "openvino/op/logical_and.hpp" +#include "openvino/op/multiply.hpp" +#include "openvino/op/range.hpp" +#include "openvino/op/reshape.hpp" +#include "openvino/op/shape_of.hpp" +#include "openvino/op/subtract.hpp" + +using namespace ov::op; OPENVINO_SUPPRESS_DEPRECATED_START namespace ngraph { namespace onnx_import { namespace common { -const ngraph::element::Type& get_ngraph_element_type(int64_t onnx_type) { +const ov::element::Type& get_ov_element_type(int64_t onnx_type) { switch (onnx_type) { case ONNX_NAMESPACE::TensorProto_DataType_BOOL: return element::boolean; @@ -50,24 +61,24 @@ const ngraph::element::Type& get_ngraph_element_type(int64_t onnx_type) { OPENVINO_THROW("unsupported element type"); } -std::shared_ptr get_monotonic_range_along_node_rank(const Output& value, - int64_t start_value, - int64_t step) { +std::shared_ptr get_monotonic_range_along_node_rank(const Output& value, + int64_t start_value, + int64_t step) { if (value.get_partial_shape().rank().is_static()) { const auto range_value = get_monotonic_range(value.get_partial_shape().rank().get_length(), start_value, step); - return default_opset::Constant::create(element::i64, {range_value.size()}, range_value); + return v0::Constant::create(element::i64, {range_value.size()}, range_value); } - const auto value_shape = std::make_shared(value); - return std::make_shared(default_opset::Constant::create(element::i64, {}, {start_value}), - std::make_shared(value_shape), - default_opset::Constant::create(element::i64, {}, {step}), - element::i64); + const auto value_shape = std::make_shared(value); + return std::make_shared(v0::Constant::create(element::i64, {}, {start_value}), + std::make_shared(value_shape), + v0::Constant::create(element::i64, {}, {step}), + element::i64); } void validate_scalar_input(const char* input_name, - const std::shared_ptr input, + const std::shared_ptr input, const std::set allowed_types) { const auto validated_input_shape = input->get_output_partial_shape(0); const auto validated_input_rank = validated_input_shape.rank(); @@ -89,8 +100,8 @@ void validate_scalar_input(const char* input_name, template OutputVector handle_opset6_binary_op(const Node& node) { - const Output lhs_node = node.get_ng_inputs().at(0); - Output rhs_node = node.get_ng_inputs().at(1); + const Output lhs_node = node.get_ng_inputs().at(0); + Output rhs_node = node.get_ng_inputs().at(1); const bool broadcast = node.get_attribute_value("broadcast", 0); if (broadcast) { if (node.has_attribute("axis")) { @@ -103,31 +114,30 @@ OutputVector handle_opset6_binary_op(const Node& node) { if (axis < 0) axis += lhs_rank; if (lhs_rank > axis + rhs_rank) { - auto ones = default_opset::Constant::create(element::i64, - Shape{static_cast(lhs_rank - axis - rhs_rank)}, - std::vector(lhs_rank - axis - rhs_rank, 1)); - auto rhs_shape = std::make_shared(rhs_node); - auto new_shape = std::make_shared(OutputVector{rhs_shape, ones}, 0); - rhs_node = std::make_shared(rhs_node, new_shape, false); + auto ones = v0::Constant::create(element::i64, + Shape{static_cast(lhs_rank - axis - rhs_rank)}, + std::vector(lhs_rank - axis - rhs_rank, 1)); + auto rhs_shape = std::make_shared(rhs_node); + auto new_shape = std::make_shared(OutputVector{rhs_shape, ones}, 0); + rhs_node = std::make_shared(rhs_node, new_shape, false); } } else { - rhs_node = std::make_shared(rhs_node, - std::make_shared(lhs_node)); + rhs_node = std::make_shared(rhs_node, std::make_shared(lhs_node)); } } return {std::make_shared(lhs_node, rhs_node)}; } -template OutputVector handle_opset6_binary_op(const Node& node); -template OutputVector handle_opset6_binary_op(const Node& node); -template OutputVector handle_opset6_binary_op(const Node& node); -template OutputVector handle_opset6_binary_op(const Node& node); -template OutputVector handle_opset6_binary_op(const Node& node); +template OutputVector handle_opset6_binary_op(const Node& node); +template OutputVector handle_opset6_binary_op(const Node& node); +template OutputVector handle_opset6_binary_op(const Node& node); +template OutputVector handle_opset6_binary_op(const Node& node); +template OutputVector handle_opset6_binary_op(const Node& node); const std::string FAILSAFE_NODE = "ONNX_FAILSAFE_NODE"; -std::shared_ptr make_failsafe_constant(const ngraph::element::Type& dtype) { - const auto failsafe_constant = default_opset::Constant::create(dtype, Shape{}, {0}); +std::shared_ptr make_failsafe_constant(const ov::element::Type& dtype) { + const auto failsafe_constant = v0::Constant::create(dtype, Shape{}, {0}); auto& rt_info = failsafe_constant->get_rt_info(); rt_info[FAILSAFE_NODE] = true; return failsafe_constant; diff --git a/src/frontends/onnx/frontend/src/utils/common.hpp b/src/frontends/onnx/frontend/src/utils/common.hpp index 0c56a0bffaf090..1209937a748b52 100644 --- a/src/frontends/onnx/frontend/src/utils/common.hpp +++ b/src/frontends/onnx/frontend/src/utils/common.hpp @@ -13,17 +13,17 @@ #include // std::enable_if #include -#include "default_opset.hpp" -#include "ngraph/node.hpp" -#include "ngraph/shape.hpp" -#include "ngraph/type/element_type.hpp" #include "onnx_import/core/node.hpp" #include "openvino/core/deprecated.hpp" +#include "openvino/core/node.hpp" +#include "openvino/core/shape.hpp" +#include "openvino/core/type/element_type.hpp" +#include "openvino/op/constant.hpp" namespace ngraph { namespace onnx_import { namespace common { -const ngraph::element::Type& get_ngraph_element_type(std::int64_t onnx_type); +const ov::element::Type& get_ov_element_type(std::int64_t onnx_type); /// \brief Return a monotonic sequence. /// @@ -60,9 +60,9 @@ std::vector get_monotonic_range(T end_value, T start_value = T{0}, T step = T /// \param[in] step The step value for the sequence. /// /// \return The node which represents monotonic sequence. -std::shared_ptr get_monotonic_range_along_node_rank(const Output& value, - int64_t start_value = 0, - int64_t step = 1); +std::shared_ptr get_monotonic_range_along_node_rank(const Output& value, + int64_t start_value = 0, + int64_t step = 1); /// \brief Creates a shifted square identity matrix. /// \note Shifting in the context of this operator means that @@ -75,9 +75,9 @@ std::shared_ptr get_monotonic_range_along_node_rank(const Output -std::shared_ptr shifted_square_identity(const Shape output_shape, - const element::Type& output_type, - const std::int64_t shift) { +std::shared_ptr shifted_square_identity(const Shape output_shape, + const element::Type& output_type, + const std::int64_t shift) { std::vector identity_matrix(shape_size(output_shape), T{0}); std::int64_t rows = output_shape[0]; std::int64_t cols = output_shape[1]; @@ -91,7 +91,7 @@ std::shared_ptr shifted_square_identity(const Shape out identity_matrix.at(diagonal_element_idx) = T{1}; } - return std::make_shared(output_type, output_shape, identity_matrix); + return std::make_shared(output_type, output_shape, identity_matrix); } /// \brief Creates a square identity matrix. @@ -100,7 +100,7 @@ std::shared_ptr shifted_square_identity(const Shape out /// /// \return A Constant node representing identity matrix with shape (n, n). template -std::shared_ptr square_identity(const size_t n, const element::Type& type) { +std::shared_ptr square_identity(const size_t n, const element::Type& type) { return shifted_square_identity(Shape{n, n}, type, 0); } @@ -111,7 +111,7 @@ std::shared_ptr square_identity(const size_t n, const e /// \param[in] input An input node to be validated /// \param[in] allowed_types An optional set of allowed element types for this input void validate_scalar_input(const char* input_name, - const std::shared_ptr input, + const std::shared_ptr input, const std::set allowed_types = {}); /// \brief Temporary replacement for C++14 std::make_unique. @@ -140,7 +140,7 @@ OPENVINO_SUPPRESS_DEPRECATED_END /// encountered in the original model. /// \return A scalar constant containing a single value of zero /// marked as "failsafe" in the runtime info object -std::shared_ptr make_failsafe_constant(const ngraph::element::Type& dtype); +std::shared_ptr make_failsafe_constant(const ov::element::Type& dtype); /// \brief Checks the node's runtime info object and returns true if this node represents /// a dummy failsafe node created instead of an incorrect node found in the original model From 0cf87a1a571a421338ea51e82bacb202d9adfb84 Mon Sep 17 00:00:00 2001 From: Georgy Krivoruchko Date: Tue, 9 Jan 2024 23:57:16 -0800 Subject: [PATCH 07/28] [ONNX] Frontend refactoring: operations (#22044) * Updated com.microsoft/attention.cpp * Updated com.microsoft/bias_gelu.cpp * Updated com.microsoft/embed_layer_normalization.cpp * Updated com.microsoft/fused_conv.cpp * Updated com.microsoft/fusedgemm.cpp * Updated com.microsoft/skip_layer_normalization.cpp --- .../src/op/com.microsoft/attention.cpp | 360 +++++++++--------- .../src/op/com.microsoft/bias_gelu.cpp | 7 +- .../embed_layer_normalization.cpp | 59 +-- .../src/op/com.microsoft/fused_conv.cpp | 32 +- .../src/op/com.microsoft/fusedgemm.cpp | 37 +- .../skip_layer_normalization.cpp | 21 +- 6 files changed, 276 insertions(+), 240 deletions(-) diff --git a/src/frontends/onnx/frontend/src/op/com.microsoft/attention.cpp b/src/frontends/onnx/frontend/src/op/com.microsoft/attention.cpp index 6a410ebf559aa9..c5ad926f30de80 100644 --- a/src/frontends/onnx/frontend/src/op/com.microsoft/attention.cpp +++ b/src/frontends/onnx/frontend/src/op/com.microsoft/attention.cpp @@ -7,33 +7,65 @@ #include "default_opset.hpp" #include "onnx_import/core/null_node.hpp" #include "openvino/frontend/exception.hpp" +#include "openvino/op/add.hpp" +#include "openvino/op/broadcast.hpp" +#include "openvino/op/concat.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/convert.hpp" +#include "openvino/op/divide.hpp" +#include "openvino/op/equal.hpp" +#include "openvino/op/floor.hpp" +#include "openvino/op/floor_mod.hpp" +#include "openvino/op/gather.hpp" +#include "openvino/op/greater.hpp" +#include "openvino/op/greater_eq.hpp" +#include "openvino/op/less.hpp" +#include "openvino/op/log.hpp" +#include "openvino/op/logical_not.hpp" +#include "openvino/op/logical_or.hpp" +#include "openvino/op/matmul.hpp" +#include "openvino/op/maximum.hpp" +#include "openvino/op/multiply.hpp" +#include "openvino/op/pad.hpp" +#include "openvino/op/range.hpp" +#include "openvino/op/reshape.hpp" +#include "openvino/op/select.hpp" +#include "openvino/op/shape_of.hpp" +#include "openvino/op/softmax.hpp" +#include "openvino/op/sqrt.hpp" +#include "openvino/op/squeeze.hpp" +#include "openvino/op/subtract.hpp" +#include "openvino/op/transpose.hpp" +#include "openvino/op/unsqueeze.hpp" #include "ov_models/ov_builders/split.hpp" +using namespace ov::op; + namespace ngraph { namespace onnx_import { namespace op { namespace detail { namespace { -NodeVector split_to_QKV(const std::shared_ptr& node, +NodeVector split_to_QKV(const std::shared_ptr& node, int64_t num_heads, const std::vector& qkv_hidden_sizes); -using NodeTuple = std::tuple, std::shared_ptr>; +using NodeTuple = std::tuple, std::shared_ptr>; NodeTuple get_attention_mask(const OutputVector& op_inputs, bool unidirectional); -std::shared_ptr attention_softmax(const OutputVector& op_inputs, - const std::shared_ptr& Q, - std::shared_ptr K, - std::shared_ptr V, - const std::shared_ptr& attention_mask, - const std::shared_ptr& bin_mask, - const std::shared_ptr& head_size, - bool unidirectional); - -std::shared_ptr get_present_state(const std::shared_ptr& K, - const std::shared_ptr& V, - const OutputVector& op_inputs); +std::shared_ptr attention_softmax(const OutputVector& op_inputs, + const std::shared_ptr& Q, + std::shared_ptr K, + std::shared_ptr V, + const std::shared_ptr& attention_mask, + const std::shared_ptr& bin_mask, + const std::shared_ptr& head_size, + bool unidirectional); + +std::shared_ptr get_present_state(const std::shared_ptr& K, + const std::shared_ptr& V, + const OutputVector& op_inputs); } // namespace } // namespace detail @@ -52,8 +84,8 @@ OutputVector attention(const Node& node) { // So the approach here is to do a single big matrix multiply // and then split the result into Q, K, V matrices - auto matmul = std::make_shared(input, weights); - auto add = std::make_shared(matmul, bias); + auto matmul = std::make_shared(input, weights); + auto add = std::make_shared(matmul, bias); const auto num_heads = node.get_attribute_value("num_heads"); const auto qkv_hidden_sizes = node.get_attribute_value>("qkv_hidden_sizes", {}); @@ -64,7 +96,7 @@ OutputVector attention(const Node& node) { // broadcastable to (batch_size, num_heads, sequence_length, past_sequence_length + sequence_length) // so it can be added to Q x K' later // past_sequence_length can be 0 if 'past' input is not available - std::shared_ptr attention_mask = nullptr, bin_mask = nullptr; + std::shared_ptr attention_mask = nullptr, bin_mask = nullptr; std::tie(attention_mask, bin_mask) = detail::get_attention_mask(nodes, unidirectional); const auto& Q = split_result[0]; @@ -87,50 +119,48 @@ OutputVector attention(const Node& node) { namespace detail { namespace { -std::shared_ptr get_dimensions(const std::shared_ptr& shape, - const std::vector& dims) { - static const auto zero = default_opset::Constant::create(element::i32, Shape{}, {0}); - const auto dims_const = default_opset::Constant::create(element::i32, Shape{dims.size()}, dims); - return std::make_shared(shape, dims_const, zero); +std::shared_ptr get_dimensions(const std::shared_ptr& shape, const std::vector& dims) { + static const auto zero = v0::Constant::create(element::i32, Shape{}, {0}); + const auto dims_const = v0::Constant::create(element::i32, Shape{dims.size()}, dims); + return std::make_shared(shape, dims_const, zero); } -std::shared_ptr get_dimensions(const std::shared_ptr& node, const std::vector& dims) { - return get_dimensions(std::make_shared(node), dims); +std::shared_ptr get_dimensions(const std::shared_ptr& node, const std::vector& dims) { + return get_dimensions(std::make_shared(node), dims); } -std::shared_ptr get_hidden_size(const std::shared_ptr& node_shape) { +std::shared_ptr get_hidden_size(const std::shared_ptr& node_shape) { // node has shape (batch_size, sequence_length, 3 * hidden_size) - const auto zero = default_opset::Constant::create(element::i32, Shape{}, {0}); + const auto zero = v0::Constant::create(element::i32, Shape{}, {0}); const auto hidden_size_x3 = get_dimensions(node_shape, {2}); - const auto three = default_opset::Constant::create(element::i64, Shape{}, {3}); - const auto hidden_size = std::make_shared(hidden_size_x3, three); + const auto three = v0::Constant::create(element::i64, Shape{}, {3}); + const auto hidden_size = std::make_shared(hidden_size_x3, three); return hidden_size; } -NodeVector split_to_QKV(const std::shared_ptr& node, +NodeVector split_to_QKV(const std::shared_ptr& node, int64_t num_heads, const std::vector& qkv_hidden_sizes) { OutputVector split; - std::shared_ptr head_size = nullptr; + std::shared_ptr head_size = nullptr; const auto& node_type = node->get_element_type(); - const auto node_shape = std::make_shared(node); + const auto node_shape = std::make_shared(node); // node has shape (batch_size, sequence_length, 3 * hidden_size) // fetch the first two dimensions const auto batch_size_seq_len = get_dimensions(node_shape, {0, 1}); - const auto num_heads_node = default_opset::Constant::create(element::i64, Shape{1}, {num_heads}); + const auto num_heads_node = v0::Constant::create(element::i64, Shape{1}, {num_heads}); if (qkv_hidden_sizes.size() == 0) { const auto hidden_size = get_hidden_size(node_shape); // head_size = hidden_size / num_heads - head_size = std::make_shared(hidden_size, num_heads_node); + head_size = std::make_shared(hidden_size, num_heads_node); // split the node into 3 even parts Q, K, V with shape (batch_size, sequence_len, hidden_size) split = ov::op::util::split(node, 3, 2); // and reshape each part to new shape (batch_size, sequence_len, num_heads, head_size) - auto new_shape = - std::make_shared(NodeVector{batch_size_seq_len, num_heads_node, head_size}, 0); + auto new_shape = std::make_shared(NodeVector{batch_size_seq_len, num_heads_node, head_size}, 0); for (size_t i = 0; i < split.size(); i++) { - split[i] = std::make_shared(split[i], new_shape, false); + split[i] = std::make_shared(split[i], new_shape, false); } - head_size = std::make_shared(head_size, node_type); + head_size = std::make_shared(head_size, node_type); } else { // in this case, weights have shape // (input_hidden_size, qkv_hidden_sizes[0] + qkv_hidden_sizes[1] + qkv_hidden_sizes[2]) @@ -145,23 +175,23 @@ NodeVector split_to_QKV(const std::shared_ptr& node, split = ov::op::util::split(node, qkv_hidden_sizes, 2); // and reshape each part to new shape (batch_size, sequence_len, num_heads, head_size) for (size_t i = 0; i < split.size(); i++) { - auto new_shape = std::make_shared( + auto new_shape = std::make_shared( NodeVector{batch_size_seq_len, num_heads_node, - default_opset::Constant::create(element::i64, Shape{1}, {qkv_hidden_sizes[i] / num_heads})}, + v0::Constant::create(element::i64, Shape{1}, {qkv_hidden_sizes[i] / num_heads})}, 0); - split[i] = std::make_shared(split[i], new_shape, false); + split[i] = std::make_shared(split[i], new_shape, false); } float head_size_val = qkv_hidden_sizes[0] > 0 ? static_cast(qkv_hidden_sizes[0]) / num_heads : static_cast(qkv_hidden_sizes[2]) / num_heads; - head_size = default_opset::Constant::create(node_type, Shape{1}, {head_size_val}); + head_size = v0::Constant::create(node_type, Shape{1}, {head_size_val}); } // transpose Q, K and V to (batch_size, num_heads, sequence_len, head_size) - auto perm = default_opset::Constant::create(element::i64, Shape{4}, {0, 2, 1, 3}); - auto Q = std::make_shared(split[0], perm); - auto K = std::make_shared(split[1], perm); - auto V = std::make_shared(split[2], perm); + auto perm = v0::Constant::create(element::i64, Shape{4}, {0, 2, 1, 3}); + auto Q = std::make_shared(split[0], perm); + auto K = std::make_shared(split[1], perm); + auto V = std::make_shared(split[2], perm); return {Q, K, V, head_size}; } @@ -189,7 +219,7 @@ NodeVector split_to_QKV(const std::shared_ptr& node, // e.g., for batch = 1, -10000 values appear within two ranges [0, mask_index[4]] and [mask_index[1]:5] (or [0:2],[4:5]) // // -// This is how it's done with nGraph operations: +// This is how it's done with OpenVINO operations: // // First the 'base' is generated by range + broadcast: // base = range(0, all_seq_len) @@ -236,62 +266,51 @@ NodeVector split_to_QKV(const std::shared_ptr& node, // Handling both mask_index variants (so (batch_size) and (2 * batch_size)) is tricky since we don't // know its dimensions upfront. So we compute both variants and use Select operator to select // the right one in the runtime (unless it gets constantfolded before). -std::shared_ptr attention_mask_from_indices(const Output& mask_index, - const element::Type_t& type, - const std::shared_ptr& batch_size, - const std::shared_ptr& all_seq_len) { - const auto zero = default_opset::Constant::create(element::i64, Shape{}, {0}); - const auto one = default_opset::Constant::create(element::i64, Shape{}, {1}); - const auto stop = std::make_shared(all_seq_len, zero); - std::shared_ptr base = - std::make_shared(zero, stop, one, mask_index.get_element_type()); - const auto target_shape = std::make_shared(NodeVector{batch_size, all_seq_len}, 0); +std::shared_ptr attention_mask_from_indices(const Output& mask_index, + const element::Type_t& type, + const std::shared_ptr& batch_size, + const std::shared_ptr& all_seq_len) { + const auto zero = v0::Constant::create(element::i64, Shape{}, {0}); + const auto one = v0::Constant::create(element::i64, Shape{}, {1}); + const auto stop = std::make_shared(all_seq_len, zero); + std::shared_ptr base = std::make_shared(zero, stop, one, mask_index.get_element_type()); + const auto target_shape = std::make_shared(NodeVector{batch_size, all_seq_len}, 0); // broadcast 'base' to (batch_size, all_seq_len) - base = std::make_shared(base, target_shape); - const auto indices_shape = std::make_shared( - NodeVector{default_opset::Constant::create(element::i64, Shape{1}, {-1}), batch_size}, - 0); - std::shared_ptr indices = std::make_shared(mask_index, indices_shape, false); + base = std::make_shared(base, target_shape); + const auto indices_shape = + std::make_shared(NodeVector{v0::Constant::create(element::i64, Shape{1}, {-1}), batch_size}, 0); + std::shared_ptr indices = std::make_shared(mask_index, indices_shape, false); // fetch first row from indices - std::shared_ptr tail_range_indices = std::make_shared(indices, zero, zero); + std::shared_ptr tail_range_indices = std::make_shared(indices, zero, zero); tail_range_indices = - std::make_shared(tail_range_indices, - default_opset::Constant::create(element::i32, Shape{2}, {-1, 1}), - false); - const auto greater_eq = std::make_shared(base, tail_range_indices); - std::shared_ptr tail_range_mask = - std::make_shared(std::make_shared(greater_eq, type), - default_opset::Constant::create(type, Shape{}, {-10000})); + std::make_shared(tail_range_indices, v0::Constant::create(element::i32, Shape{2}, {-1, 1}), false); + const auto greater_eq = std::make_shared(base, tail_range_indices); + std::shared_ptr tail_range_mask = + std::make_shared(std::make_shared(greater_eq, type), + v0::Constant::create(type, Shape{}, {-10000})); tail_range_mask = - std::make_shared(tail_range_mask, - default_opset::Constant::create(element::i64, Shape{2}, {1, 2})); + std::make_shared(tail_range_mask, v0::Constant::create(element::i64, Shape{2}, {1, 2})); const auto gather_index = - std::make_shared(default_opset::Constant::create(element::i64, Shape{}, {1}), - get_dimensions(indices, {0})); + std::make_shared(v0::Constant::create(element::i64, Shape{}, {1}), get_dimensions(indices, {0})); // fetch indices from the second row (or first if not available) - std::shared_ptr head_range_indices = - std::make_shared(indices, gather_index, zero); + std::shared_ptr head_range_indices = std::make_shared(indices, gather_index, zero); head_range_indices = - std::make_shared(head_range_indices, - default_opset::Constant::create(element::i32, Shape{2}, {-1, 1}), - false); - const auto less = std::make_shared(base, head_range_indices); - std::shared_ptr mask = std::make_shared(less, greater_eq); - mask = std::make_shared(std::make_shared(mask, type), - default_opset::Constant::create(type, Shape{}, {-10000})); + std::make_shared(head_range_indices, v0::Constant::create(element::i32, Shape{2}, {-1, 1}), false); + const auto less = std::make_shared(base, head_range_indices); + std::shared_ptr mask = std::make_shared(less, greater_eq); + mask = std::make_shared(std::make_shared(mask, type), + v0::Constant::create(type, Shape{}, {-10000})); // reshape from (batch_size, all_seq_len) to (batch_size, 1, 1, all_seq_len) - mask = std::make_shared(mask, - default_opset::Constant::create(element::i64, Shape{2}, {1, 2})); + mask = std::make_shared(mask, v0::Constant::create(element::i64, Shape{2}, {1, 2})); const auto mask_index_first_dim = get_dimensions(mask_index.get_node_shared_ptr(), {0}); // compare mask_index.shape[0] with batch_size value // if they're equal - select tail_range_mask // else select full mask - mask = std::make_shared( - std::make_shared(batch_size, mask_index_first_dim), - tail_range_mask, - mask); + mask = std::make_shared(std::make_shared(batch_size, mask_index_first_dim), + tail_range_mask, + mask); return mask; } @@ -321,26 +340,24 @@ std::shared_ptr attention_mask_from_indices(const Output& seq_len, - const std::shared_ptr& all_seq_len, - const std::shared_ptr& past_seq_len) { - const auto zero = default_opset::Constant::create(element::i64, Shape{}, {0}); - const auto one = default_opset::Constant::create(element::i64, Shape{}, {1}); - const auto stop = std::make_shared(all_seq_len, zero); - std::shared_ptr bin_mask = std::make_shared(zero, stop, one, element::i32); - auto target_shape = std::make_shared(NodeVector{seq_len, all_seq_len}, 0); - bin_mask = std::make_shared(bin_mask, target_shape); - auto start = - std::make_shared(std::make_shared(past_seq_len, one), zero); - auto end = std::make_shared(std::make_shared(all_seq_len, one), zero); - auto indices = std::make_shared( - std::make_shared(start, end, one, element::i32), - default_opset::Constant::create(element::i32, Shape{1}, {1})); - bin_mask = std::make_shared(bin_mask, indices); - std::shared_ptr attention_mask = - std::make_shared(std::make_shared(bin_mask, type), - default_opset::Constant::create(type, Shape{}, {-10000})); - bin_mask = std::make_shared(std::make_shared(bin_mask), type); + const std::shared_ptr& seq_len, + const std::shared_ptr& all_seq_len, + const std::shared_ptr& past_seq_len) { + const auto zero = v0::Constant::create(element::i64, Shape{}, {0}); + const auto one = v0::Constant::create(element::i64, Shape{}, {1}); + const auto stop = std::make_shared(all_seq_len, zero); + std::shared_ptr bin_mask = std::make_shared(zero, stop, one, element::i32); + auto target_shape = std::make_shared(NodeVector{seq_len, all_seq_len}, 0); + bin_mask = std::make_shared(bin_mask, target_shape); + auto start = std::make_shared(std::make_shared(past_seq_len, one), zero); + auto end = std::make_shared(std::make_shared(all_seq_len, one), zero); + auto indices = std::make_shared(std::make_shared(start, end, one, element::i32), + v0::Constant::create(element::i32, Shape{1}, {1})); + bin_mask = std::make_shared(bin_mask, indices); + std::shared_ptr attention_mask = + std::make_shared(std::make_shared(bin_mask, type), + v0::Constant::create(type, Shape{}, {-10000})); + bin_mask = std::make_shared(std::make_shared(bin_mask), type); return NodeTuple{attention_mask, bin_mask}; } @@ -355,29 +372,23 @@ NodeTuple unidirectional_mask(const element::Type_t& type, // // Shape (batch_size, 1, max_sequence_length, max_sequence_length) is not supported in onnxruntime: // https://github.com/microsoft/onnxruntime/blob/851554536ca8185b3413ee57449ea5ac93370193/onnxruntime/contrib_ops/cpu/bert/attention_helper.h#L78 -std::shared_ptr raw_mask(const Output& mask_index, - ov::Dimension::value_type mask_rank, - const element::Type_t& type) { - std::shared_ptr mask = std::make_shared(mask_index, type); - mask = std::make_shared(mask, type); - mask = std::make_shared(default_opset::Constant::create(type, Shape{}, {1}), mask); - mask = std::make_shared(mask, default_opset::Constant::create(type, Shape{}, {-10000})); +std::shared_ptr raw_mask(const Output& mask_index, + ov::Dimension::value_type mask_rank, + const element::Type_t& type) { + std::shared_ptr mask = std::make_shared(mask_index, type); + mask = std::make_shared(mask, type); + mask = std::make_shared(v0::Constant::create(type, Shape{}, {1}), mask); + mask = std::make_shared(mask, v0::Constant::create(type, Shape{}, {-10000})); switch (mask_rank) { // Handle mask_index with (batch_size, past_sequence_length + sequence_length) shape // Reshape it to (batch_size, 1, 1, past_sequence_length + sequence_length) case 2: - mask = std::make_shared( - mask, - default_opset::Constant::create(element::i64, Shape{4}, {0, 1, 1, -1}), - true); + mask = std::make_shared(mask, v0::Constant::create(element::i64, Shape{4}, {0, 1, 1, -1}), true); break; // Handle mask_index with (batch_size, sequence_length, past_sequence_length + sequence_length) shape // Reshape it to (batch_size, 1, sequence_length, past_sequence_length + sequence_length) case 3: - mask = std::make_shared( - mask, - default_opset::Constant::create(element::i64, Shape{4}, {0, 1, 0, -1}), - true); + mask = std::make_shared(mask, v0::Constant::create(element::i64, Shape{4}, {0, 1, 0, -1}), true); break; } return mask; @@ -388,10 +399,10 @@ bool is_past_input_available(const OutputVector& op_inputs) { } NodeTuple get_attention_mask(const OutputVector& op_inputs, bool unidirectional) { - const auto zero = default_opset::Constant::create(element::i64, Shape{1}, {0}); - const auto one = default_opset::Constant::create(element::i64, Shape{1}, {1}); + const auto zero = v0::Constant::create(element::i64, Shape{1}, {0}); + const auto one = v0::Constant::create(element::i64, Shape{1}, {1}); - std::shared_ptr past_seq_len; + std::shared_ptr past_seq_len; // get the value of past_sequence_length if (is_past_input_available(op_inputs)) { const auto& past = op_inputs[4]; @@ -402,12 +413,12 @@ NodeTuple get_attention_mask(const OutputVector& op_inputs, bool unidirectional) } // 'input' node has shape (batch_size, sequence_length, input_hidden_size) - auto input_shape = std::make_shared(op_inputs[0]); + auto input_shape = std::make_shared(op_inputs[0]); auto seq_len = get_dimensions(input_shape, {1}); - auto all_seq_len = std::make_shared(seq_len, past_seq_len); + auto all_seq_len = std::make_shared(seq_len, past_seq_len); const auto& type = op_inputs[0].get_element_type(); - std::shared_ptr attention_mask = nullptr; - std::shared_ptr bin_mask = nullptr; + std::shared_ptr attention_mask = nullptr; + std::shared_ptr bin_mask = nullptr; if (unidirectional) { std::tie(attention_mask, bin_mask) = unidirectional_mask(type, seq_len, all_seq_len, past_seq_len); } @@ -418,7 +429,7 @@ NodeTuple get_attention_mask(const OutputVector& op_inputs, bool unidirectional) const auto mask_rank = mask_index.get_partial_shape().rank(); FRONT_END_GENERAL_CHECK(mask_rank.is_static(), "'mask_index' rank must be static"); auto mask_rank_val = mask_rank.get_length(); - std::shared_ptr mask; + std::shared_ptr mask; if (mask_rank_val == 1) { // case when mask_index has shape (batch_size) or (2 * batch_size) // so it contains positions that specify how mask should be generated @@ -431,7 +442,7 @@ NodeTuple get_attention_mask(const OutputVector& op_inputs, bool unidirectional) } // add the mask with unidirectional mask if available if (attention_mask) { - attention_mask = std::make_shared(attention_mask, mask); + attention_mask = std::make_shared(attention_mask, mask); } else { attention_mask = mask; } @@ -440,15 +451,15 @@ NodeTuple get_attention_mask(const OutputVector& op_inputs, bool unidirectional) } // Compute softmax(Q x K' / sqrt(head_size)) x V -std::shared_ptr attention_softmax(const OutputVector& op_inputs, - const std::shared_ptr& Q, - std::shared_ptr K, - std::shared_ptr V, - const std::shared_ptr& attention_mask, - const std::shared_ptr& bin_mask, - const std::shared_ptr& head_size, - bool unidirectional) { - auto zero = default_opset::Constant::create(element::i64, Shape{}, {0}); +std::shared_ptr attention_softmax(const OutputVector& op_inputs, + const std::shared_ptr& Q, + std::shared_ptr K, + std::shared_ptr V, + const std::shared_ptr& attention_mask, + const std::shared_ptr& bin_mask, + const std::shared_ptr& head_size, + bool unidirectional) { + auto zero = v0::Constant::create(element::i64, Shape{}, {0}); if (is_past_input_available(op_inputs)) { // concat past K and V with present ones const auto& past = op_inputs[4]; @@ -458,46 +469,46 @@ std::shared_ptr attention_softmax(const OutputVector& op_inputs, // so we need to split it into two parts, remove first dimension from each part and concatenate first part // with current K and second part with current V const auto split = ov::op::util::split(past, 2, 0); - const auto past_K = std::make_shared(split[0], zero); - K = std::make_shared(NodeVector{past_K, K}, 2); - const auto past_V = std::make_shared(split[1], zero); - V = std::make_shared(NodeVector{past_V, V}, 2); + const auto past_K = std::make_shared(split[0], zero); + K = std::make_shared(NodeVector{past_K, K}, 2); + const auto past_V = std::make_shared(split[1], zero); + V = std::make_shared(NodeVector{past_V, V}, 2); } // perform Q x K' - std::shared_ptr softmax_input = std::make_shared(Q, K, false, true); + std::shared_ptr softmax_input = std::make_shared(Q, K, false, true); // Q x K' + mask if (attention_mask) { if (unidirectional) { // Perform the equivalent of // https://github.com/microsoft/onnxruntime/blob/851554536ca8185b3413ee57449ea5ac93370193/onnxruntime/contrib_ops/cpu/bert/attention_cpu_base.h#L158-L166 // For positions where unidirectional_mask has -10000 values - attention_mask is moved to softmax input - softmax_input = std::make_shared(softmax_input, bin_mask); + softmax_input = std::make_shared(softmax_input, bin_mask); } - softmax_input = std::make_shared(softmax_input, attention_mask); + softmax_input = std::make_shared(softmax_input, attention_mask); } - const auto sqrt = std::make_shared(head_size); + const auto sqrt = std::make_shared(head_size); // (Q x K' + mask) / sqrt(head_size) - softmax_input = std::make_shared(softmax_input, sqrt); + softmax_input = std::make_shared(softmax_input, sqrt); // handle 'extra_add' input if (op_inputs.size() > 5 && !ov::op::util::is_null(op_inputs[5])) { FRONT_END_GENERAL_CHECK(!is_past_input_available(op_inputs), "Cannot use both 'past' and 'extra_add' inputs in the same node"); const auto& extra_add = op_inputs[5]; - softmax_input = std::make_shared(softmax_input, extra_add); + softmax_input = std::make_shared(softmax_input, extra_add); } // softmax((Q x K' + mask) / sqrt(head_size)) - const auto softmax = std::make_shared(softmax_input, 3); + const auto softmax = std::make_shared(softmax_input, 3); // softmax((Q x K' + mask) / sqrt(head_size)) x V - std::shared_ptr output = std::make_shared(softmax, V); + std::shared_ptr output = std::make_shared(softmax, V); // transpose the result from (batch_size, num_heads, sequence_length, head_size) // to (batch_size, sequence_length, num_heads, head_size) - const auto perm = default_opset::Constant::create(element::i64, Shape{4}, {0, 2, 1, 3}); - output = std::make_shared(output, perm); - auto new_shape = default_opset::Constant::create(element::i32, Shape{3}, {0, 0, -1}); + const auto perm = v0::Constant::create(element::i64, Shape{4}, {0, 2, 1, 3}); + output = std::make_shared(output, perm); + auto new_shape = v0::Constant::create(element::i32, Shape{3}, {0, 0, -1}); // reshape the result from (batch_size, sequence_length, num_heads, head_size) to (batch_size, sequence_length, // num_heads * head_size) - output = std::make_shared(output, new_shape, true); + output = std::make_shared(output, new_shape, true); return output; } @@ -506,40 +517,35 @@ std::shared_ptr attention_softmax(const OutputVector& op_inputs, // (batch_size, num_heads, sequence_length, head_size) to (1, batch_size, num_heads, sequence_length, head_size) // and concatenating them along first axis to make 'present' output. // If fifth input ('past') is available, it gets concatenated with 'present' output along fourth axis. -std::shared_ptr get_present_state(const std::shared_ptr& K, - const std::shared_ptr& V, - const OutputVector& op_inputs) { - auto zero = default_opset::Constant::create(element::i64, Shape{1}, {0}); +std::shared_ptr get_present_state(const std::shared_ptr& K, + const std::shared_ptr& V, + const OutputVector& op_inputs) { + auto zero = v0::Constant::create(element::i64, Shape{1}, {0}); // expand K shape (batch_size, num_heads, sequence_length, head_size) to // (1, batch_size, num_heads, sequence_length, head_size) - auto K_unsqueezed = std::make_shared(K, zero); + auto K_unsqueezed = std::make_shared(K, zero); // similarly expand V shape - auto V_unsqueezed = std::make_shared(V, zero); + auto V_unsqueezed = std::make_shared(V, zero); // add padding in case K and V have different shapes (it happens when used provided uneven qkv_hidden_sizes) // if the shapes are equal (so padding will be zero), Pad gets eliminated in NopElimination pass - const auto K_shape = std::make_shared(K_unsqueezed); - const auto V_shape = std::make_shared(V_unsqueezed); - const auto K_pads_end = - std::make_shared(std::make_shared(V_shape, K_shape), zero); - const auto V_pads_end = - std::make_shared(std::make_shared(K_shape, V_shape), zero); - const auto pads_begin = - std::make_shared(zero, std::make_shared(K_shape)); - const auto K_padded = - std::make_shared(K_unsqueezed, pads_begin, K_pads_end, ngraph::op::PadMode::CONSTANT); - const auto V_padded = - std::make_shared(V_unsqueezed, pads_begin, V_pads_end, ngraph::op::PadMode::CONSTANT); + const auto K_shape = std::make_shared(K_unsqueezed); + const auto V_shape = std::make_shared(V_unsqueezed); + const auto K_pads_end = std::make_shared(std::make_shared(V_shape, K_shape), zero); + const auto V_pads_end = std::make_shared(std::make_shared(K_shape, V_shape), zero); + const auto pads_begin = std::make_shared(zero, std::make_shared(K_shape)); + const auto K_padded = std::make_shared(K_unsqueezed, pads_begin, K_pads_end, ov::op::PadMode::CONSTANT); + const auto V_padded = std::make_shared(V_unsqueezed, pads_begin, V_pads_end, ov::op::PadMode::CONSTANT); // concat key and value tensors along first axis to make 'present' state // after that operation, 'present' has shape (2, batch_size, num_heads, sequence_length, head_size) - std::shared_ptr present = std::make_shared(NodeVector{K_padded, V_padded}, 0); + std::shared_ptr present = std::make_shared(NodeVector{K_padded, V_padded}, 0); if (is_past_input_available(op_inputs)) { const auto& past = op_inputs[4]; // concat 'past' to 'present' output along fourth axis // after that operation, 'present' has shape: // (2, batch_size, num_heads, past_sequence_length + sequence_length, head_size) - present = std::make_shared(OutputVector{past, present}, 3); + present = std::make_shared(OutputVector{past, present}, 3); } return present; } diff --git a/src/frontends/onnx/frontend/src/op/com.microsoft/bias_gelu.cpp b/src/frontends/onnx/frontend/src/op/com.microsoft/bias_gelu.cpp index b5a6e58e78dc79..6b929766272bef 100644 --- a/src/frontends/onnx/frontend/src/op/com.microsoft/bias_gelu.cpp +++ b/src/frontends/onnx/frontend/src/op/com.microsoft/bias_gelu.cpp @@ -4,8 +4,11 @@ #include "op/com.microsoft/bias_gelu.hpp" -#include "default_opset.hpp" #include "openvino/frontend/exception.hpp" +#include "openvino/op/add.hpp" +#include "openvino/op/gelu.hpp" + +using namespace ov::op; namespace ngraph { namespace onnx_import { @@ -14,7 +17,7 @@ namespace set_1 { OutputVector bias_gelu(const Node& node) { auto nodes = node.get_ng_inputs(); FRONT_END_GENERAL_CHECK(nodes.size() == 2, "BiasGelu takes 2 inputs. Provided " + std::to_string(nodes.size())); - return {std::make_shared(std::make_shared(nodes.at(0), nodes.at(1)))}; + return {std::make_shared(std::make_shared(nodes.at(0), nodes.at(1)))}; } } // namespace set_1 } // namespace op diff --git a/src/frontends/onnx/frontend/src/op/com.microsoft/embed_layer_normalization.cpp b/src/frontends/onnx/frontend/src/op/com.microsoft/embed_layer_normalization.cpp index 05d1a8e47bba50..13e63051a2dc53 100644 --- a/src/frontends/onnx/frontend/src/op/com.microsoft/embed_layer_normalization.cpp +++ b/src/frontends/onnx/frontend/src/op/com.microsoft/embed_layer_normalization.cpp @@ -4,9 +4,19 @@ #include "op/com.microsoft/embed_layer_normalization.hpp" -#include "default_opset.hpp" #include "onnx_import/core/null_node.hpp" #include "openvino/frontend/exception.hpp" +#include "openvino/op/add.hpp" +#include "openvino/op/broadcast.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/gather.hpp" +#include "openvino/op/multiply.hpp" +#include "openvino/op/mvn.hpp" +#include "openvino/op/reduce_sum.hpp" +#include "openvino/op/shape_of.hpp" +#include "openvino/op/slice.hpp" + +using namespace ov::op; namespace ngraph { namespace onnx_import { @@ -28,15 +38,15 @@ OutputVector embed_layer_normalization(const Node& node) { const auto& gamma = nodes[5]; const auto& beta = nodes[6]; - const auto zero = default_opset::Constant::create(element::i32, Shape{1}, {0}); - std::shared_ptr input = std::make_shared(word_embeddings, input_ids, zero, 0); + const auto zero = v0::Constant::create(element::i32, Shape{1}, {0}); + std::shared_ptr input = std::make_shared(word_embeddings, input_ids, zero, 0); // add position embeddings if (num_nodes > 8 && !ov::op::util::is_null(nodes[8])) { // if we have position_ids const auto& position_ids = nodes[8]; const auto gathered_position_embeddings = - std::make_shared(position_embeddings, position_ids, zero, 0); - input = std::make_shared(input, gathered_position_embeddings); + std::make_shared(position_embeddings, position_ids, zero, 0); + input = std::make_shared(input, gathered_position_embeddings); } else { // input_ids' shape is [batchsize, sequence_length] // input's shape is [batchsize, sequence_length, hidden_size] @@ -44,21 +54,20 @@ OutputVector embed_layer_normalization(const Node& node) { // therefore input and position_embeddings cannot be added together // so we need slice the position_embeddings to [sequence_length, hidden_size] first // then add it with input. - const auto one = default_opset::Constant::create(element::i32, Shape{1}, {1}); - const auto input_ids_shape = std::make_shared(input_ids, element::i32); - const auto seqlen = std::make_shared(input_ids_shape, one, zero, 0); + const auto one = v0::Constant::create(element::i32, Shape{1}, {1}); + const auto input_ids_shape = std::make_shared(input_ids, element::i32); + const auto seqlen = std::make_shared(input_ids_shape, one, zero, 0); const auto gathered_position_embeddings = - std::make_shared(position_embeddings, zero, seqlen, one, zero); - input = std::make_shared(input, gathered_position_embeddings); + std::make_shared(position_embeddings, zero, seqlen, one, zero); + input = std::make_shared(input, gathered_position_embeddings); } // add segment embeddings if available if (!ov::op::util::is_null(segment_ids)) { FRONT_END_GENERAL_CHECK(!ov::op::util::is_null(segment_embeddings), "segment_ids provided, but segment_embedding input is missing"); FRONT_END_GENERAL_CHECK(nodes[1].get_element_type() == element::i32, "segment_ids must have int32 type"); - auto gathered_segment_embeddings = - std::make_shared(segment_embeddings, segment_ids, zero, 0); - input = std::make_shared(input, gathered_segment_embeddings); + auto gathered_segment_embeddings = std::make_shared(segment_embeddings, segment_ids, zero, 0); + input = std::make_shared(input, gathered_segment_embeddings); } float eps = node.get_attribute_value("epsilon"); @@ -66,25 +75,25 @@ OutputVector embed_layer_normalization(const Node& node) { // hidden_size dimension is 2 here, because the shape after Gather(word_embedding, input_ids) // is (batch_size, seq_len, hidden_size) int hidden_size_dim = 2; - const auto reduction_axes = default_opset::Constant::create(element::i32, Shape{1}, {hidden_size_dim}); - std::shared_ptr result = - std::make_shared(input, reduction_axes, true, eps, ov::op::MVNEpsMode::INSIDE_SQRT); + const auto reduction_axes = v0::Constant::create(element::i32, Shape{1}, {hidden_size_dim}); + std::shared_ptr result = + std::make_shared(input, reduction_axes, true, eps, ov::op::MVNEpsMode::INSIDE_SQRT); // result = gamma * result + beta - result = std::make_shared(result, gamma); - result = std::make_shared(result, beta); + result = std::make_shared(result, gamma); + result = std::make_shared(result, beta); // compute mask_index output - std::shared_ptr mask_index; + std::shared_ptr mask_index; if (num_nodes > 7 && !ov::op::util::is_null(nodes[7])) { FRONT_END_GENERAL_CHECK(nodes[7].get_element_type() == element::i32, "mask must have int32 type"); - auto axis = default_opset::Constant::create(element::i32, Shape{}, {1}); - mask_index = std::make_shared(nodes[7], axis, false); + auto axis = v0::Constant::create(element::i32, Shape{}, {1}); + mask_index = std::make_shared(nodes[7], axis, false); } else { - auto batch_size = std::make_shared(std::make_shared(nodes[0]), - zero, // indices - zero); // axis - mask_index = std::make_shared(zero, batch_size); + auto batch_size = std::make_shared(std::make_shared(nodes[0]), + zero, // indices + zero); // axis + mask_index = std::make_shared(zero, batch_size); } return {result, mask_index}; } diff --git a/src/frontends/onnx/frontend/src/op/com.microsoft/fused_conv.cpp b/src/frontends/onnx/frontend/src/op/com.microsoft/fused_conv.cpp index 1feafa08e4a1bb..38c120b332621d 100644 --- a/src/frontends/onnx/frontend/src/op/com.microsoft/fused_conv.cpp +++ b/src/frontends/onnx/frontend/src/op/com.microsoft/fused_conv.cpp @@ -7,9 +7,19 @@ #include #include -#include "default_opset.hpp" #include "exceptions.hpp" #include "op/conv.hpp" +#include "openvino/op/add.hpp" +#include "openvino/op/clamp.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/hard_sigmoid.hpp" +#include "openvino/op/prelu.hpp" +#include "openvino/op/relu.hpp" +#include "openvino/op/sigmoid.hpp" +#include "openvino/op/tan.hpp" +#include "openvino/op/tanh.hpp" + +using namespace ov::op; namespace ngraph { namespace onnx_import { @@ -19,36 +29,36 @@ OutputVector fused_conv(const Node& node) { auto conv_res = conv(node).at(0); if (node.get_ng_inputs().size() == 4) { // Z input provided - conv_res = std::make_shared(conv_res, node.get_ng_inputs()[3]); + conv_res = std::make_shared(conv_res, node.get_ng_inputs()[3]); } const auto activation_type = node.get_attribute_value("activation"); const auto activation_params = node.get_attribute_value>("activation_params", {}); if (activation_type == "Relu") { - return {std::make_shared(conv_res)}; + return {std::make_shared(conv_res)}; } else if (activation_type == "Tanh") { - return {std::make_shared(conv_res)}; + return {std::make_shared(conv_res)}; } else if (activation_type == "Sigmoid") { - return {std::make_shared(conv_res)}; + return {std::make_shared(conv_res)}; } else if (activation_type == "Clip") { CHECK_VALID_NODE(node, activation_params.size() == 2, "min and max attributes of Clip activation function were not provided"); - return {std::make_shared(conv_res, activation_params[0], activation_params[1])}; + return {std::make_shared(conv_res, activation_params[0], activation_params[1])}; } else if (activation_type == "LeakyRelu") { CHECK_VALID_NODE(node, activation_params.size() == 1, "activation_alpha attribute of LeakyRelu activation function was not provided"); - const auto activation_alpha_node = default_opset::Constant::create(element::f32, Shape{}, activation_params); - return {std::make_shared(conv_res, activation_alpha_node)}; + const auto activation_alpha_node = v0::Constant::create(element::f32, Shape{}, activation_params); + return {std::make_shared(conv_res, activation_alpha_node)}; } else if (activation_type == "HardSigmoid") { CHECK_VALID_NODE(node, activation_params.size() == 2, "alpha and beta attributes of HardSigmoid activation function were not provided"); - const auto alpha = default_opset::Constant::create(element::f32, Shape{}, {activation_params[0]}); - const auto beta = default_opset::Constant::create(element::f32, Shape{}, {activation_params[1]}); - return {std::make_shared(conv_res, alpha, beta)}; + const auto alpha = v0::Constant::create(element::f32, Shape{}, {activation_params[0]}); + const auto beta = v0::Constant::create(element::f32, Shape{}, {activation_params[1]}); + return {std::make_shared(conv_res, alpha, beta)}; } CHECK_VALID_NODE(node, !activation_type.empty(), diff --git a/src/frontends/onnx/frontend/src/op/com.microsoft/fusedgemm.cpp b/src/frontends/onnx/frontend/src/op/com.microsoft/fusedgemm.cpp index 4af42e8263bcb5..6f6039e5496f4c 100644 --- a/src/frontends/onnx/frontend/src/op/com.microsoft/fusedgemm.cpp +++ b/src/frontends/onnx/frontend/src/op/com.microsoft/fusedgemm.cpp @@ -6,13 +6,16 @@ #include -#include "default_opset.hpp" -#include "ngraph/op/add.hpp" -#include "ngraph/op/constant.hpp" -#include "ngraph/op/matmul.hpp" -#include "ngraph/op/multiply.hpp" #include "onnx_import/core/null_node.hpp" #include "openvino/frontend/exception.hpp" +#include "openvino/op/add.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/matmul.hpp" +#include "openvino/op/multiply.hpp" +#include "openvino/op/prelu.hpp" +#include "openvino/op/relu.hpp" + +using namespace ov::op; namespace ngraph { namespace onnx_import { @@ -24,14 +27,14 @@ OutputVector fusedgemm(const Node& node) { FRONT_END_GENERAL_CHECK(num_inputs == 2 || num_inputs == 3, "FusedGemm takes 2/3 inputs. Provided " + std::to_string(num_inputs)); - Output input_a = inputs.at(0); - Output input_b = inputs.at(1); - Output input_c; + Output input_a = inputs.at(0); + Output input_b = inputs.at(1); + Output input_c; if (num_inputs == 3 && !ov::op::util::is_null(inputs[2])) { input_c = inputs.at(2); } else { - input_c = default_opset::Constant::create(input_b.get_element_type(), ngraph::Shape{}, {0}); + input_c = v0::Constant::create(input_b.get_element_type(), ov::Shape{}, {0}); } const auto alpha_node = node.get_attribute_as_constant("alpha", 1, input_b.get_element_type()); @@ -40,22 +43,22 @@ OutputVector fusedgemm(const Node& node) { const bool trans_a = node.get_attribute_value("transA", 0); const bool trans_b = node.get_attribute_value("transB", 0); - const auto matmul_node = std::make_shared(input_a, input_b, trans_a, trans_b); - const auto matmul_times_alpha = std::make_shared(matmul_node, alpha_node); + const auto matmul_node = std::make_shared(input_a, input_b, trans_a, trans_b); + const auto matmul_times_alpha = std::make_shared(matmul_node, alpha_node); - const auto beta_times_input_c = std::make_shared(beta_node, input_c); + const auto beta_times_input_c = std::make_shared(beta_node, input_c); const std::string onnx_name = !node.get_name().empty() ? node.get_name() : node.output(0); matmul_node->set_friendly_name(onnx_name + "/WithoutBiases"); - const auto gemm_res = std::make_shared(matmul_times_alpha, beta_times_input_c); + const auto gemm_res = std::make_shared(matmul_times_alpha, beta_times_input_c); const auto activation_type = node.get_attribute_value("activation", "Relu"); if (activation_type == "LeakyRelu") { double activation_alpha = node.get_attribute_value("activation_alpha", 0.01); - std::shared_ptr activation_alpha_node = - default_opset::Constant::create(input_c.get_element_type(), Shape{1}, {activation_alpha}); - return {std::make_shared(gemm_res, activation_alpha_node)}; + std::shared_ptr activation_alpha_node = + v0::Constant::create(input_c.get_element_type(), Shape{1}, {activation_alpha}); + return {std::make_shared(gemm_res, activation_alpha_node)}; } - return {std::make_shared(gemm_res)}; + return {std::make_shared(gemm_res)}; } } // namespace set_1 diff --git a/src/frontends/onnx/frontend/src/op/com.microsoft/skip_layer_normalization.cpp b/src/frontends/onnx/frontend/src/op/com.microsoft/skip_layer_normalization.cpp index aed16be77b9c6b..72d8dc57fb5d36 100644 --- a/src/frontends/onnx/frontend/src/op/com.microsoft/skip_layer_normalization.cpp +++ b/src/frontends/onnx/frontend/src/op/com.microsoft/skip_layer_normalization.cpp @@ -4,8 +4,13 @@ #include "op/com.microsoft/skip_layer_normalization.hpp" -#include "default_opset.hpp" #include "openvino/frontend/exception.hpp" +#include "openvino/op/add.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/multiply.hpp" +#include "openvino/op/mvn.hpp" + +using namespace ov::op; namespace ngraph { namespace onnx_import { @@ -18,22 +23,22 @@ OutputVector skip_layer_normalization(const Node& node) { "SkipLayerNormalization takes 3, 4 or 5 inputs. Provided " + std::to_string(num_nodes)); // input + skip - std::shared_ptr input = std::make_shared(nodes[0], nodes[1]); + std::shared_ptr input = std::make_shared(nodes[0], nodes[1]); // add bias if available if (num_nodes == 5) { - input = std::make_shared(input, nodes[4]); + input = std::make_shared(input, nodes[4]); } float eps = node.get_attribute_value("epsilon"); // reduce over hidden_size int hidden_size_dim = 2; - const auto reduction_axes = default_opset::Constant::create(element::i32, Shape{1}, {hidden_size_dim}); - std::shared_ptr result = - std::make_shared(input, reduction_axes, true, eps, ov::op::MVNEpsMode::INSIDE_SQRT); + const auto reduction_axes = v0::Constant::create(element::i32, Shape{1}, {hidden_size_dim}); + std::shared_ptr result = + std::make_shared(input, reduction_axes, true, eps, ov::op::MVNEpsMode::INSIDE_SQRT); // multiply by gamma - result = std::make_shared(result, nodes[2]); + result = std::make_shared(result, nodes[2]); // add beta if available if (num_nodes > 3) { - result = std::make_shared(result, nodes[3]); + result = std::make_shared(result, nodes[3]); } // spec mentions three outputs (output, mean, inv_std_var) while we support only first one, but: // - onnxruntime also doesn't support the last two From 58af2091206d73f1366ccdb3071a6775731ce6af Mon Sep 17 00:00:00 2001 From: HyungKi Jeong <68764729+testdrive-profiling-master@users.noreply.github.com> Date: Wed, 10 Jan 2024 18:23:57 +0900 Subject: [PATCH 08/28] M_PI(math.h) using fix & dirent.h is existed in MinGW (#22054) * M_PI(math.h) using fix & dirent.h is existed in MinGW * fix for clang codestyle --- samples/cpp/common/utils/src/args_helper.cpp | 2 +- src/frontends/onnx/frontend/src/op/blackmanwindow.cpp | 6 ++++-- src/frontends/onnx/frontend/src/op/hammingwindow.cpp | 6 ++++-- src/frontends/onnx/frontend/src/op/hannwindow.cpp | 6 ++++-- 4 files changed, 13 insertions(+), 7 deletions(-) diff --git a/samples/cpp/common/utils/src/args_helper.cpp b/samples/cpp/common/utils/src/args_helper.cpp index c1e8d773d7d4f6..a67440c2903be3 100644 --- a/samples/cpp/common/utils/src/args_helper.cpp +++ b/samples/cpp/common/utils/src/args_helper.cpp @@ -7,7 +7,7 @@ #include -#ifdef _WIN32 +#if defined(_WIN32) && !defined(__MINGW32__) && !defined(__MINGW64__) # include "samples/os/windows/w_dirent.h" #else # include diff --git a/src/frontends/onnx/frontend/src/op/blackmanwindow.cpp b/src/frontends/onnx/frontend/src/op/blackmanwindow.cpp index 76df0691a45149..d4bb144725dbfe 100644 --- a/src/frontends/onnx/frontend/src/op/blackmanwindow.cpp +++ b/src/frontends/onnx/frontend/src/op/blackmanwindow.cpp @@ -1,14 +1,16 @@ // Copyright (C) 2018-2023 Intel Corporation // SPDX-License-Identifier: Apache-2.0 +#define _USE_MATH_DEFINES + #include "op/blackmanwindow.hpp" +#include + #include #include "default_opset.hpp" #include "utils/common.hpp" -#define _USE_MATH_DEFINES -#include OPENVINO_SUPPRESS_DEPRECATED_START namespace ngraph { diff --git a/src/frontends/onnx/frontend/src/op/hammingwindow.cpp b/src/frontends/onnx/frontend/src/op/hammingwindow.cpp index c8e1709d467853..702e0695422037 100644 --- a/src/frontends/onnx/frontend/src/op/hammingwindow.cpp +++ b/src/frontends/onnx/frontend/src/op/hammingwindow.cpp @@ -1,14 +1,16 @@ // Copyright (C) 2018-2023 Intel Corporation // SPDX-License-Identifier: Apache-2.0 +#define _USE_MATH_DEFINES + #include "op/hammingwindow.hpp" +#include + #include #include "default_opset.hpp" #include "utils/common.hpp" -#define _USE_MATH_DEFINES -#include OPENVINO_SUPPRESS_DEPRECATED_START namespace ngraph { diff --git a/src/frontends/onnx/frontend/src/op/hannwindow.cpp b/src/frontends/onnx/frontend/src/op/hannwindow.cpp index 4b97458ad207ce..47911bf1771c36 100644 --- a/src/frontends/onnx/frontend/src/op/hannwindow.cpp +++ b/src/frontends/onnx/frontend/src/op/hannwindow.cpp @@ -1,14 +1,16 @@ // Copyright (C) 2018-2023 Intel Corporation // SPDX-License-Identifier: Apache-2.0 +#define _USE_MATH_DEFINES + #include "op/hannwindow.hpp" +#include + #include #include "default_opset.hpp" #include "utils/common.hpp" -#define _USE_MATH_DEFINES -#include OPENVINO_SUPPRESS_DEPRECATED_START namespace ngraph { From 6bbc7904f2702c63338941ffcd0ef7ac94fe6b1c Mon Sep 17 00:00:00 2001 From: Vishniakov Nikolai Date: Wed, 10 Jan 2024 10:35:30 +0100 Subject: [PATCH 09/28] [OV JS] Activate validation for mac x86 (#22035) * Extend validation for mac x86 * Remove extra params --- .github/workflows/mac.yml | 17 ++++++++--------- .github/workflows/mac_arm64.yml | 2 -- 2 files changed, 8 insertions(+), 11 deletions(-) diff --git a/.github/workflows/mac.yml b/.github/workflows/mac.yml index cad5b95407952a..c61fe6a4a9cc83 100644 --- a/.github/workflows/mac.yml +++ b/.github/workflows/mac.yml @@ -192,15 +192,16 @@ jobs: cmake --build ${{ env.BUILD_DIR }} --parallel --config ${{ env.CMAKE_BUILD_TYPE }} - name: CMake configure, build and install - OpenVINO JS API - if: ${{ 'false' }} # 128446 - # if: fromJSON(needs.smart_ci.outputs.affected_components).JS_API - run: - cmake -DCPACK_GENERATOR=NPM -DENABLE_SYSTEM_TBB=OFF -UTBB* -S ${{ env.OPENVINO_REPO }} -B ${{ env.BUILD_DIR }} + if: fromJSON(needs.smart_ci.outputs.affected_components).JS_API + run: | + cmake \ + -DCPACK_GENERATOR=NPM \ + -S ${{ env.OPENVINO_REPO }} \ + -B ${{ env.BUILD_DIR }} cmake --build ${{ env.BUILD_DIR }} --parallel cmake -DCMAKE_INSTALL_PREFIX=${{ env.INSTALL_DIR_JS }} -P ${{ env.BUILD_DIR }}/cmake_install.cmake - # # Upload build artifacts # @@ -222,8 +223,7 @@ jobs: if-no-files-found: 'error' - name: Upload openvino js package - if: ${{ 'false' }} # 128446 - # if: fromJSON(needs.smart_ci.outputs.affected_components).JS_API + if: fromJSON(needs.smart_ci.outputs.affected_components).JS_API uses: actions/upload-artifact@v3 with: name: openvino_js_package @@ -241,8 +241,7 @@ jobs: JS_API: name: OpenVINO JS API needs: [ Build, Smart_CI ] - if: ${{ 'false' }} # 128446 - # if: fromJSON(needs.smart_ci.outputs.affected_components).JS_API + if: fromJSON(needs.smart_ci.outputs.affected_components).JS_API uses: ./.github/workflows/job_openvino_js.yml with: runner: 'macos-13' diff --git a/.github/workflows/mac_arm64.yml b/.github/workflows/mac_arm64.yml index 08692b6ccb4fca..02d494c7c16522 100644 --- a/.github/workflows/mac_arm64.yml +++ b/.github/workflows/mac_arm64.yml @@ -195,8 +195,6 @@ jobs: run: | cmake \ -DCPACK_GENERATOR=NPM \ - -DENABLE_SYSTEM_TBB=OFF -UTBB* \ - -DENABLE_INTEL_GPU=OFF \ -S ${{ env.OPENVINO_REPO }} \ -B ${{ env.BUILD_DIR }} From 014e591116262509ac46eb741c77c0a218c66844 Mon Sep 17 00:00:00 2001 From: Andrei Kashchikhin Date: Wed, 10 Jan 2024 11:06:06 +0000 Subject: [PATCH 10/28] [GHA] [DOCS] Runners document (#21967) * slightly adapt wording in overview, start with runners doc * populate table; add section about runners choosing * wording * use runners --- docs/dev/ci/github_actions/overview.md | 23 +++--- .../ci/github_actions/reusable_workflows.md | 1 + docs/dev/ci/github_actions/runners.md | 78 ++++++++++++++++++- 3 files changed, 88 insertions(+), 14 deletions(-) create mode 100644 docs/dev/ci/github_actions/reusable_workflows.md diff --git a/docs/dev/ci/github_actions/overview.md b/docs/dev/ci/github_actions/overview.md index 293cf078eb13c3..1e130e668ac674 100644 --- a/docs/dev/ci/github_actions/overview.md +++ b/docs/dev/ci/github_actions/overview.md @@ -42,7 +42,10 @@ Additionally, several supporting workflows build and test OpenVINO for other ope ### Reusing GitHub Actions -The listed workflows make use of the rich GitHub Actions official and community actions such as `actions/checkout`, `actions/upload-artifact` and others. +The OpenVINO workflows make use of the rich official and community actions such as `actions/checkout`, `actions/upload-artifact` and others. + +Additionally, common jobs, i.e., those featured in several workflows, are extracted into _reusable workflows_. Read more about the used reusable workflows and how to write one [here](./reusable_workflows.md). + You can find more information about reusing actions and workflows [here](https://github.com/marketplace?type=actions) and [here](https://docs.github.com/en/actions/using-workflows/reusing-workflows). ### Workflows' Triggers and Schedule @@ -95,18 +98,18 @@ This workflow runs: ### Required Workflows -The listed above workflows are not required at the moment, but it is strongly encouraged to pay attention to their [results](#finding-results-artefacts-and-logs) while working within the OpenVINO repository. +The listed above workflows are **required**, i.e., a PR could not be merged if any of their stages fail. It is strongly encouraged to pay attention to their [results](#finding-results-artefacts-and-logs) while working within the OpenVINO repository. ### Structure of the Workflows This section provides the structural overview of the Linux, Windows and macOS workflows. -The structure for all of them is the same: +The structure for all of them is mostly the same: 1. Clone OpenVINO repository and required resources 2. Install build dependencies 3. Build OpenVINO from source 4. Pack and upload the artefacts (built OpenVINO and tests) -5. Download and use the artefacts in the parallel jobs with different kinds of tests +5. Download and use the artefacts in the parallel jobs with different tests 6. Collect the test results and upload them as artefacts **NOTE**: some workflows may use the same structure or lack the last 3 steps and have tests present right after the `Build` step. @@ -133,12 +136,12 @@ The `Build` job executes the first 4 steps: * builds from source with `cmake` * packs and uploads the artefacts using `actions/upload-artifact` -The other jobs are responsible for running different kinds of tests using the built artefacts. They: +The other jobs are responsible for running different tests using the built artefacts. They: * download and unpack the artefacts using `actions/download-artifact` * install the needed dependencies * run tests * collect test results -* upload test results as [artefacts](#artefacts) +* upload test results as [pipeline artefacts](#artefacts) #### Single Job Overview @@ -218,17 +221,17 @@ To find logs for a pipeline: ## Custom Actions -There are several actions written specifically for the needs of the OpenVINO workflows. +Several actions are written specifically for the needs of the OpenVINO workflows. -Read more about the available actions and what they do [here](./custom_actions.md). +Read more about the available custom actions and what they do [here](./custom_actions.md). You can find more information about reusing actions and workflows [here](https://github.com/marketplace?type=actions) and [here](https://docs.github.com/en/actions/using-workflows/reusing-workflows). ## Machines -The machines that execute the commands from the workflows are referred to as _runners_ in GitHub Actions. +The machines that execute the commands from the workflows are called _runners_ in GitHub Actions. -There are two types of runners available for the OpenVINO organization: +Two types of runners are available for the OpenVINO organization: * [GitHub Actions Runners](https://docs.github.com/en/actions/using-github-hosted-runners/about-github-hosted-runners/about-github-hosted-runners) - runners provided and managed by GitHub * [Self-hosted Runners](https://docs.github.com/en/actions/hosting-your-own-runners/managing-self-hosted-runners/about-self-hosted-runners) - runners created and managed by the OpenVINO CI team and linked to the OpenVINO repositories diff --git a/docs/dev/ci/github_actions/reusable_workflows.md b/docs/dev/ci/github_actions/reusable_workflows.md new file mode 100644 index 00000000000000..2f3e5734f7fc85 --- /dev/null +++ b/docs/dev/ci/github_actions/reusable_workflows.md @@ -0,0 +1 @@ +# Overview of the Reusable Workflows used in the OpenVINO GitHub Actions CI diff --git a/docs/dev/ci/github_actions/runners.md b/docs/dev/ci/github_actions/runners.md index c438b85388032e..f73864234053f1 100644 --- a/docs/dev/ci/github_actions/runners.md +++ b/docs/dev/ci/github_actions/runners.md @@ -2,22 +2,92 @@ The machines that execute the commands from the workflows are referred to as _runners_ in GitHub Actions. -There are two types of runners available in this repository: - +Two types of runners are available in this repository: + * [GitHub Actions Runners](https://docs.github.com/en/actions/using-github-hosted-runners/about-github-hosted-runners/about-github-hosted-runners) - runners provided and managed by GitHub * [Self-hosted Runners](https://docs.github.com/en/actions/hosting-your-own-runners/managing-self-hosted-runners/about-self-hosted-runners) - runners created and managed by the OpenVINO CI team and linked to the OpenVINO repositories +The runners are specified for each job using the `runs-on` key. + +An example `Build` job from the `linux.yml` workflow: +```yaml +Build: + ... + runs-on: aks-linux-16-cores-32gb + ... +``` + +The `aks-linux-16-cores-32gb` runners group is used for this job. + ## Available GitHub Actions Runners GitHub provides runners with different combinations of available resources and software. The OpenVINO repositories make use of the following runners: -* [The default runners](https://docs.github.com/en/actions/using-github-hosted-runners/about-github-hosted-runners/about-github-hosted-runners#supported-runners-and-hardware-resources): `ubuntu-22/20.04`, `windows-2019/2022`, `macos-12/13` +* [The default runners](https://docs.github.com/en/actions/using-github-hosted-runners/about-github-hosted-runners/about-github-hosted-runners#supported-runners-and-hardware-resources): `ubuntu-22/20.04`, `windows-2019/2022`, `macos-12/13`, etc. * Used for not-so-intensive memory and CPU tasks -* [The larger runners](https://docs.github.com/en/actions/using-github-hosted-runners/about-larger-runners/about-larger-runners#machine-sizes-for-larger-runners): you can find the list of available larger runners [here](https://github.com/openvinotoolkit/openvino/actions/runners) +* [The larger runners](https://docs.github.com/en/actions/using-github-hosted-runners/about-larger-runners/about-larger-runners#machine-sizes-for-larger-runners): you can find the list of the available larger runners [here](https://github.com/openvinotoolkit/openvino/actions/runners) * Used for memory and CPU heavy tasks ## Available Self-hosted Runners +The self-hosted runners are dynamically spawned for each requested pipeline. +Several configurations of the self-hosted runners are available, they are identified by different group names. + +The group names generally follow the pattern: `aks-{OS}-{CORES_N}-cores-|{RAM_SIZE}gb|-|{ARCH}|`, where: +* `{OS}` - the operating system: `win`/`linux` + * **Note**: Currently, only Windows and Linux self-hosted runners are available. +* `{CORES_N}` - the number of cores available to the runners in the group: `4`/`8`/etc. +* `|{RAM_SIZE}gb|` - **_optional_**, the RAM size in GB available to the runners in the group: `8`/`16`/etc. + * **Note**: The groups with unspecified `{RAM_SIZE}` consist of the runners with 32 GB of RAM +* `|{ARCH}|` - **_optional_**, the architecture of the runners in the group: `arm` + * **Note**: The groups with unspecified `{ARCH}` consist of the `x86_64` runners + +Examples: +* `aks-win-16-cores-32gb` - the Windows x86_64 runners with 16 cores and 32 GB of RAM available +* `aks-linux-16-cores-arm` - the Linux ARM64 runners with 16 cores and 32 GB of RAM available + +The available configurations are: + +| | Group Name | CPU Cores | RAM in GB | Architecture | Examples | +|-------------|-------------------|--------------|------------------|----------------------|----------------------------------------------------| +| Windows | `aks-win-*` | `4`/`8`/`16` | `8`/`16`/`32` | `x86_64`* | `aks-win-4-cores-8gb`/`aks-win-16-cores-32gb` | +| Linux | `aks-linux-*` | `4`/`8`/`16` | `16`/`32` | `x86_64`* | `aks-linux-4-cores-16gb`/`aks-linux-16-cores-32gb` | +| Linux ARM64 | `aks-linux-*-arm` | `16` | `32`* | `arm` | `aks-linux-16-cores-arm` | + +* `*` - Not specified in the group name + ## How to choose a Runner + +The configuration of a runner required for a job (building, testing, etc.) stems from the nature of the job: the more memory and/or CPU-intensive it is, +the more robust configuration is required. + +The `Build` job in the `linux.yml` workflow uses the `aks-linux-16-cores-32gb` group as specified in the `runs-on` key: +```yaml +Build: + ... + runs-on: aks-linux-16-cores-32gb + ... +``` + +This group has machines with 16 core CPU and 32 GB of RAM, which could be utilized in parallel by the build tools used in the `Build` job. + +The `C++ unit tests` job in the `linux.yml` workflow uses the `aks-linux-4-cores-16gb` group: +```yaml +CXX_Unit_Tests: + name: C++ unit tests + ... + with: + runner: aks-linux-4-cores-16gb + ... +``` + +As the C++ tests could not utilize the large number of cores for parallel execution as the build tools in the `Build` job could, +it would be pointless to use the `aks-linux-16-cores-32gb` group for them. + +The advice is to use runners with more cores/RAM size for the tasks that could load them. + +It is possible to experiment with different configurations before deciding, i.e., +run a job on runners from different groups and observe the gains; if they are significant, e.g., 60 minutes on a 4-core runner vs. 15 minutes on a 16-core runner, +it is better to use those with more cores. From b7ea17e18d19ec5d7a40027368366c5a741b304f Mon Sep 17 00:00:00 2001 From: Ilya Lavrenov Date: Wed, 10 Jan 2024 15:50:32 +0400 Subject: [PATCH 11/28] Fixed Tensor::set_shape to rely on capacity (#21417) --- src/core/tests/ov_tensor_test.cpp | 49 ++++++++++++++++++++------- src/inference/src/dev/make_tensor.cpp | 41 ++++++++++++---------- 2 files changed, 60 insertions(+), 30 deletions(-) diff --git a/src/core/tests/ov_tensor_test.cpp b/src/core/tests/ov_tensor_test.cpp index 891d26bba8f57d..9dfbe2853bbdaf 100644 --- a/src/core/tests/ov_tensor_test.cpp +++ b/src/core/tests/ov_tensor_test.cpp @@ -369,25 +369,26 @@ TEST_F(OVTensorTest, saveDimsAndSizeAfterMoveStringTensor) { ASSERT_THROW(t.data(), ov::Exception); } -// SetShape +// set_shape TEST_F(OVTensorTest, canSetShape) { const ov::Shape origShape({1, 2, 3}); - ov::Tensor t{ov::element::f32, {1, 2, 3}}; - const ov::Shape newShape({4, 5, 6}); + ov::Tensor t{ov::element::f32, origShape}; + const ov::Shape newShape({4, 5, 6}), newShape2({4, 5, 6, 7}); const void* orig_data = t.data(); ASSERT_EQ(t.get_shape(), origShape); - ASSERT_NO_THROW(t.set_shape({4, 5, 6})); + ASSERT_NO_THROW(t.set_shape(newShape)); ASSERT_EQ(newShape, t.get_shape()); ASSERT_EQ(byteStrides(ov::row_major_strides(newShape), t.get_element_type()), t.get_strides()); ASSERT_NE(orig_data, t.data()); - // check that setShape for copy changes original Tensor + // check that set_shape for copy changes original Tensor { ov::Tensor t2 = t; - ASSERT_NO_THROW(t2.set_shape(newShape)); - ASSERT_EQ(newShape, t.get_shape()); + ASSERT_NO_THROW(t2.set_shape(newShape2)); + ASSERT_EQ(newShape2, t.get_shape()); ASSERT_EQ(t2.get_shape(), t.get_shape()); + ASSERT_EQ(t2.data(), t.data()); orig_data = t.data(); } @@ -402,7 +403,7 @@ TEST_F(OVTensorTest, canSetShape) { TEST_F(OVTensorTest, canSetShapeStringTensor) { const ov::Shape origShape({1, 2, 3}); ov::Tensor t{ov::element::string, {1, 2, 3}}; - const ov::Shape newShape({4, 5, 6}); + const ov::Shape newShape({4, 5, 6}), newShape2({4, 5, 6, 7}); const void* orig_data = t.data(); ASSERT_EQ(t.get_shape(), origShape); @@ -410,22 +411,22 @@ TEST_F(OVTensorTest, canSetShapeStringTensor) { ASSERT_EQ(newShape, t.get_shape()); ASSERT_EQ(byteStrides(ov::row_major_strides(newShape), t.get_element_type()), t.get_strides()); ASSERT_NE(orig_data, t.data()); - const void* new_data = t.data(); // check that setShape for copy changes original Tensor { ov::Tensor t2 = t; - ASSERT_NO_THROW(t2.set_shape(origShape)); - ASSERT_EQ(origShape, t2.get_shape()); - ASSERT_EQ(origShape, t.get_shape()); + ASSERT_NO_THROW(t2.set_shape(newShape2)); + ASSERT_EQ(newShape2, t2.get_shape()); + ASSERT_EQ(t2.get_shape(), t.get_shape()); ASSERT_EQ(t2.data(), t.data()); + orig_data = t.data(); } // set_shape for smaller memory - does not perform reallocation { ASSERT_NO_THROW(t.set_shape(origShape)); ASSERT_EQ(origShape, t.get_shape()); - ASSERT_EQ(new_data, t.data()); + ASSERT_EQ(orig_data, t.data()); } } @@ -497,6 +498,28 @@ TEST_F(OVTensorTest, canSetShapeOfOriginalSizeAfterDecreasingOnPreallocatedMemor ASSERT_NO_THROW(t.set_shape(originalShape)); } +TEST_F(OVTensorTest, canSetShapeOfOriginalSizeAfterDecreasing) { + const ov::Shape shape({4, 5, 6}), small_shape({1, 2, 3}); + ov::Tensor t{ov::element::f32, shape}; + void* data = t.data(); + + ASSERT_NO_THROW(t.set_shape(small_shape)); + EXPECT_EQ(data, t.data()); + ASSERT_NO_THROW(t.set_shape(shape)); + EXPECT_EQ(data, t.data()); +} + +TEST_F(OVTensorTest, canSetShapeOfOriginalSizeAfterDecreasingStringTensor) { + const ov::Shape shape({4, 5, 6}), small_shape({1, 2, 3}); + ov::Tensor t{ov::element::string, shape}; + void* data = t.data(); + + ASSERT_NO_THROW(t.set_shape(small_shape)); + EXPECT_EQ(data, t.data()); + ASSERT_NO_THROW(t.set_shape(shape)); + EXPECT_EQ(data, t.data()); +} + TEST_F(OVTensorTest, canChangeShapeOnStridedTensor) { float data[64 * 4]; ov::Tensor t{ov::element::f32, {4, 2, 2}, data, {64, 16, 4}}; diff --git a/src/inference/src/dev/make_tensor.cpp b/src/inference/src/dev/make_tensor.cpp index 2c5340b046247b..4f536c6cd24a7e 100644 --- a/src/inference/src/dev/make_tensor.cpp +++ b/src/inference/src/dev/make_tensor.cpp @@ -182,35 +182,28 @@ class AllocatedTensor : public ViewTensor { OPENVINO_ASSERT(allocator, "Allocator was not initialized"); auto num_elements = shape_size(shape); auto data = const_cast(allocator).allocate(element_type.size() * num_elements); - init(data, element_type, shape); + initialize_elements(data, element_type, shape); return data; }()}, m_allocator{allocator} {} ~AllocatedTensor() { - auto num_elements = get_size(); - destroy(0, num_elements); - m_allocator.deallocate(m_ptr, get_byte_size()); + destroy_memory(); } void set_shape(ov::Shape new_shape) override { if (m_shape == new_shape) return; - auto old_num_elements = get_size(); - auto old_byte_size = get_byte_size(); m_shape = std::move(new_shape); - auto new_num_elements = get_size(); - if (get_byte_size() > old_byte_size) { + if (get_size() > get_capacity()) { + destroy_memory(); + // allocate buffer and initialize objects from scratch - destroy(0, old_num_elements); - m_allocator.deallocate(m_ptr, old_byte_size); - m_ptr = m_allocator.allocate(get_byte_size()); - init(m_ptr, m_element_type, m_shape); - } else { - // destroy only not needed objects - destroy(new_num_elements, old_num_elements); + m_capacity = m_shape; + m_ptr = m_allocator.allocate(get_bytes_capacity()); + initialize_elements(m_ptr, m_element_type, m_shape); } m_strides.clear(); @@ -218,7 +211,7 @@ class AllocatedTensor : public ViewTensor { } private: - void destroy(size_t begin_ind, size_t end_ind) { + void destroy_elements(size_t begin_ind, size_t end_ind) { // it removes elements from tail if (get_element_type() == element::Type_t::string) { auto strings = static_cast(m_ptr); @@ -229,7 +222,13 @@ class AllocatedTensor : public ViewTensor { } } - void init(void* data, const element::Type& element_type, const Shape& shape) { + void destroy_memory() { + destroy_elements(0, get_capacity()); + m_allocator.deallocate(m_ptr, get_bytes_capacity()); + m_ptr = nullptr; + } + + static void initialize_elements(void* data, const element::Type& element_type, const Shape& shape) { if (element_type == element::Type_t::string) { auto num_elements = shape_size(shape); auto string_ptr = static_cast(data); @@ -237,6 +236,14 @@ class AllocatedTensor : public ViewTensor { } } + size_t get_capacity() const { + return shape_size(m_capacity); + } + + size_t get_bytes_capacity() const { + return (get_capacity() * get_element_type().bitwidth() + 8 - 1) / 8; + } + Allocator m_allocator; }; From 8798d7bae38de5685865814585972107e6133e88 Mon Sep 17 00:00:00 2001 From: Pawel Raasz Date: Wed, 10 Jan 2024 13:15:14 +0100 Subject: [PATCH 12/28] [core] Shape and PartialShape supports negative idx (#22049) * Core Shape and PartialShape supports negative idx * Remove redundant `&` in shape impl * Remove not required include --- src/core/dev_api/openvino/core/shape_util.hpp | 11 +++++ .../include/openvino/core/partial_shape.hpp | 13 +++--- src/core/include/openvino/core/shape.hpp | 36 +++++++++++++++ src/core/src/partial_shape.cpp | 20 +++----- src/core/src/shape.cpp | 27 +++++++++-- src/core/src/shape_util.cpp | 10 ++++ src/core/tests/partial_shape.cpp | 46 +++++++++++++++++++ src/core/tests/shape.cpp | 37 +++++++++++++++ 8 files changed, 176 insertions(+), 24 deletions(-) diff --git a/src/core/dev_api/openvino/core/shape_util.hpp b/src/core/dev_api/openvino/core/shape_util.hpp index 89688526d4f286..dda99a8ddc883c 100644 --- a/src/core/dev_api/openvino/core/shape_util.hpp +++ b/src/core/dev_api/openvino/core/shape_util.hpp @@ -60,5 +60,16 @@ OPENVINO_API Shape reduce_keep_dims(const Shape& input, const AxisSet& axes); * @return Result shape from inputs with applied broadcast specification. */ Shape get_broadcast_shape(const Shape& first, const Shape& second, const ov::op::AutoBroadcastSpec& broadcast_spec); + +/** + * @brief Normalize shape index to the rank + * + * If input index is out of range [-rank, rank) throws exception. + * + * @param idx Shape dimension index. + * @param rank Shape rank. + * @return Normalized shape dimension index. + */ +OPENVINO_API std::ptrdiff_t normalize_shape_index(std::ptrdiff_t idx, size_t rank); } // namespace util } // namespace ov diff --git a/src/core/include/openvino/core/partial_shape.hpp b/src/core/include/openvino/core/partial_shape.hpp index 4f88efcd1bf088..041cb7f6789343 100644 --- a/src/core/include/openvino/core/partial_shape.hpp +++ b/src/core/include/openvino/core/partial_shape.hpp @@ -168,14 +168,15 @@ class OPENVINO_API PartialShape { /// `false`. bool all_non_negative() const; - /// \brief Index operator for PartialShape. - /// \param i The index of the dimension being selected. + /// \brief Index operator for PartialShape, with bound checking. + /// \param i The index of the dimension being selected in range [-rank, rank). /// \return A reference to the `i`th Dimension of this shape. - const Dimension& operator[](size_t i) const; - /// \brief Index operator for PartialShape. - /// \param i The index of the dimension being selected. + Dimension& operator[](std::ptrdiff_t i); + /// \brief Index operator for PartialShape, with bound checking. + /// \param i The index of the dimension being selected in range [-rank, rank). /// \return A reference to the `i`th Dimension of this shape. - Dimension& operator[](size_t i); + const Dimension& operator[](std::ptrdiff_t i) const; + /// \brief Returns a vector of the dimensions. This has no meaning if dynamic. explicit operator std::vector() const { return m_dimensions; diff --git a/src/core/include/openvino/core/shape.hpp b/src/core/include/openvino/core/shape.hpp index a04a864a8394fb..4283e3a4a54ee1 100644 --- a/src/core/include/openvino/core/shape.hpp +++ b/src/core/include/openvino/core/shape.hpp @@ -40,6 +40,42 @@ class Shape : public std::vector { OPENVINO_API Shape& operator=(const Shape& v); OPENVINO_API Shape& operator=(Shape&& v) noexcept; OPENVINO_API std::string to_string() const; + + /** + * @brief Gets dimension at index. + * + * @param i Index to shape dimension [-rank, rank). + * + * @return A reference to i-th dimension of this shape. + */ + OPENVINO_API typename Shape::reference operator[](std::ptrdiff_t i); + + /** + * @brief Gets dimension at index. + * + * @param i Index to shape dimension [-rank, rank). + * + * @return A const reference to i-th dimension of this shape. + */ + OPENVINO_API typename Shape::const_reference operator[](std::ptrdiff_t i) const; + + /** + * @brief Gets dimension at index, with bounds checking. + * + * @param i Index to shape dimension [-rank, rank). + * + * @return A reference to i-th dimension of this shape. + */ + OPENVINO_API typename Shape::reference at(std::ptrdiff_t i); + + /** + * @brief Gets dimension at index, with bounds checking. + * + * @param i Index to shape dimension [-rank, rank). + * + * @return A const reference to i-th dimension of this shape. + */ + OPENVINO_API typename Shape::const_reference at(std::ptrdiff_t i) const; }; /** diff --git a/src/core/src/partial_shape.cpp b/src/core/src/partial_shape.cpp index 38fc53cb88a846..993a957e447fec 100644 --- a/src/core/src/partial_shape.cpp +++ b/src/core/src/partial_shape.cpp @@ -9,11 +9,9 @@ #include #include "openvino/core/dimension_tracker.hpp" +#include "openvino/core/shape_util.hpp" #include "openvino/util/common_util.hpp" - -namespace { -static constexpr char dim_out_range_access_txt[] = "Accessing out-of-range dimension in Dimension[]"; -} +#include "validation_util.hpp" ov::PartialShape::PartialShape() : PartialShape(std::initializer_list{}) {} @@ -374,17 +372,11 @@ bool ov::PartialShape::all_non_negative() const { return true; } -const ov::Dimension& ov::PartialShape::operator[](size_t i) const { - if (i >= m_dimensions.size()) { - OPENVINO_THROW(dim_out_range_access_txt); - } - return m_dimensions[i]; +const ov::Dimension& ov::PartialShape::operator[](std::ptrdiff_t i) const { + return m_dimensions[util::normalize_shape_index(i, m_dimensions.size())]; } -ov::Dimension& ov::PartialShape::operator[](size_t i) { - if (i >= m_dimensions.size()) { - OPENVINO_THROW(dim_out_range_access_txt); - } +ov::Dimension& ov::PartialShape::operator[](std::ptrdiff_t i) { m_shape_type = ShapeType::SHAPE_IS_UPDATED; // We can't guarantee that the shape remains static or dynamic. - return m_dimensions[i]; + return m_dimensions[util::normalize_shape_index(i, m_dimensions.size())]; } diff --git a/src/core/src/shape.cpp b/src/core/src/shape.cpp index 7a91ff20c9c6de..dbc57212952710 100644 --- a/src/core/src/shape.cpp +++ b/src/core/src/shape.cpp @@ -4,9 +4,9 @@ #include "openvino/core/shape.hpp" +#include "openvino/core/shape_util.hpp" #include "openvino/util/common_util.hpp" - -using namespace std; +#include "validation_util.hpp" std::ostream& ov::operator<<(std::ostream& s, const Shape& shape) { s << "["; @@ -16,9 +16,9 @@ std::ostream& ov::operator<<(std::ostream& s, const Shape& shape) { } namespace { -size_t stringToSizeT(const string& valStr) { +size_t stringToSizeT(const std::string& valStr) { size_t ret{0}; - istringstream ss(valStr); + std::istringstream ss(valStr); if (!ss.eof()) { ss >> ret; } @@ -68,3 +68,22 @@ std::string ov::Shape::to_string() const { shape_str_stream << *this; return shape_str_stream.str(); } + +namespace ov { + +typename Shape::reference Shape::operator[](std::ptrdiff_t i) { + return std::vector::operator[](util::normalize(i, size())); +} + +typename Shape::const_reference Shape::operator[](std::ptrdiff_t i) const { + return std::vector::operator[](util::normalize(i, size())); +} + +typename Shape::reference Shape::at(std::ptrdiff_t i) { + return std::vector::operator[](util::normalize_shape_index(i, size())); +} + +typename Shape::const_reference Shape::at(std::ptrdiff_t i) const { + return std::vector::operator[](util::normalize_shape_index(i, size())); +} +} // namespace ov diff --git a/src/core/src/shape_util.cpp b/src/core/src/shape_util.cpp index 810686c9c7f88c..d84d8153a92059 100644 --- a/src/core/src/shape_util.cpp +++ b/src/core/src/shape_util.cpp @@ -8,6 +8,7 @@ #include "openvino/core/partial_shape.hpp" #include "openvino/core/shape_util.hpp" +#include "validation_util.hpp" namespace ngraph { template <> @@ -126,5 +127,14 @@ Shape get_broadcast_shape(const Shape& first, const Shape& second, const op::Aut "Argument shapes are inconsistent"); return out_shape.to_shape(); } + +std::ptrdiff_t normalize_shape_index(std::ptrdiff_t idx, size_t rank) { + idx = normalize(idx, static_cast(rank)); + if (static_cast(idx) >= rank) { + OPENVINO_THROW("Accessing out-of-range dimension"); + } else { + return idx; + } +} } // namespace util } // namespace ov diff --git a/src/core/tests/partial_shape.cpp b/src/core/tests/partial_shape.cpp index 0d23114e219cb7..d3817a7d5c935d 100644 --- a/src/core/tests/partial_shape.cpp +++ b/src/core/tests/partial_shape.cpp @@ -1315,3 +1315,49 @@ TEST(partial_shape, infer_windowed_reduction_rank_static_dynamic_rank_static_dyn NodeValidationFailure); OPENVINO_SUPPRESS_DEPRECATED_END } + +TEST(partial_shape, const_subscribe_operator) { + const auto shape = ov::PartialShape{-1, {2, 10}, 5, 6, 7}; + + EXPECT_EQ(shape[2], ov::Dimension(5)); + EXPECT_EQ(shape[0], ov::Dimension::dynamic()); + EXPECT_EQ(shape[1], ov::Dimension(2, 10)); + EXPECT_EQ(shape[4], ov::Dimension(7)); + + EXPECT_EQ(shape[-3], ov::Dimension(5)); + EXPECT_EQ(shape[-5], ov::Dimension::dynamic()); + EXPECT_EQ(shape[-4], ov::Dimension(2, 10)); + EXPECT_EQ(shape[-1], ov::Dimension(7)); +} + +TEST(partial_shape, subscribe_operator) { + auto shape = ov::PartialShape{-1, {2, 10}, 5, 6, 7}; + + EXPECT_EQ(shape[2], ov::Dimension(5)); + EXPECT_EQ(shape[0], ov::Dimension::dynamic()); + EXPECT_EQ(shape[1], ov::Dimension(2, 10)); + EXPECT_EQ(shape[4], ov::Dimension(7)); + + EXPECT_EQ(shape[-3], ov::Dimension(5)); + EXPECT_EQ(shape[-5], ov::Dimension::dynamic()); + EXPECT_EQ(shape[-4], ov::Dimension(2, 10)); + EXPECT_EQ(shape[-1], ov::Dimension(7)); +} + +TEST(partial_shape, const_subscribe_operator_throw_out_of_range) { + const auto shape = ov::PartialShape::dynamic(7); + + EXPECT_THROW(shape[7], ov::Exception); + EXPECT_THROW(shape[1000], ov::Exception); + EXPECT_THROW(shape[-8], ov::Exception); + EXPECT_THROW(shape[-80000], ov::Exception); +} + +TEST(partial_shape, subscribe_operator_throw_out_of_range) { + auto shape = ov::PartialShape::dynamic(7); + + EXPECT_THROW(shape[7], ov::Exception); + EXPECT_THROW(shape[1000], ov::Exception); + EXPECT_THROW(shape[-8], ov::Exception); + EXPECT_THROW(shape[-80000], ov::Exception); +} diff --git a/src/core/tests/shape.cpp b/src/core/tests/shape.cpp index fc80a7ab30c490..0f543a927d62b2 100644 --- a/src/core/tests/shape.cpp +++ b/src/core/tests/shape.cpp @@ -22,3 +22,40 @@ TEST(shape, test_shape_strides) { ASSERT_EQ((Strides{7, 1}), row_major_strides(Shape{2, 7})); ASSERT_EQ((Strides{84, 12, 1}), row_major_strides(Shape{5, 7, 12})); } + +TEST(shape, at) { + const auto shape = ov::Shape{100, 200, 5, 6, 7}; + + EXPECT_EQ(shape.at(2), 5); + EXPECT_EQ(shape.at(0), 100); + EXPECT_EQ(shape.at(1), 200); + EXPECT_EQ(shape.at(4), 7); + + EXPECT_EQ(shape.at(-3), 5); + EXPECT_EQ(shape.at(-5), 100); + EXPECT_EQ(shape.at(-4), 200); + EXPECT_EQ(shape.at(-1), 7); +} + +TEST(shape, subscribe_operator) { + const auto shape = ov::Shape{100, 200, 5, 6, 7}; + + EXPECT_EQ(shape[2], 5); + EXPECT_EQ(shape[0], 100); + EXPECT_EQ(shape[1], 200); + EXPECT_EQ(shape[4], 7); + + EXPECT_EQ(shape[-3], 5); + EXPECT_EQ(shape[-5], 100); + EXPECT_EQ(shape[-4], 200); + EXPECT_EQ(shape[-1], 7); +} + +TEST(shape, at_throw_exception) { + auto shape = ov::Shape{1, 2, 3, 4, 5, 6, 7}; + + EXPECT_THROW(shape.at(7), ov::Exception); + EXPECT_THROW(shape.at(1000), ov::Exception); + EXPECT_THROW(shape.at(-8), ov::Exception); + EXPECT_THROW(shape.at(-80000), ov::Exception); +} From 78e38ddaef385f487b1a67c3038c8c7a7d5a4a62 Mon Sep 17 00:00:00 2001 From: guy-tamir <40924075+guy-tamir@users.noreply.github.com> Date: Wed, 10 Jan 2024 16:24:47 +0200 Subject: [PATCH 13/28] Removed singledispatchmethod from requirements (#22058) --- src/bindings/python/requirements.txt | 1 - src/bindings/python/requirements_test.txt | 1 - 2 files changed, 2 deletions(-) diff --git a/src/bindings/python/requirements.txt b/src/bindings/python/requirements.txt index c4d3c3e35568aa..4316682284053e 100644 --- a/src/bindings/python/requirements.txt +++ b/src/bindings/python/requirements.txt @@ -1,3 +1,2 @@ numpy>=1.16.6 -singledispatchmethod; python_version<'3.8' openvino-telemetry>=2023.2.1 diff --git a/src/bindings/python/requirements_test.txt b/src/bindings/python/requirements_test.txt index 62320649b6acb9..7bc1157d4edceb 100644 --- a/src/bindings/python/requirements_test.txt +++ b/src/bindings/python/requirements_test.txt @@ -39,7 +39,6 @@ retrying tox types-pkg_resources wheel -singledispatchmethod torch torchvision; platform_machine == 'arm64' and python_version >= '3.8' torchvision; platform_machine != 'arm64' From 3722df74e8d62679945a39afe2e218024d61b783 Mon Sep 17 00:00:00 2001 From: Georgy Krivoruchko Date: Wed, 10 Jan 2024 07:25:29 -0800 Subject: [PATCH 14/28] [Py] Removing an old API from tests (#22047) * Removed usage of old api * Removed use_old_api * Removed use_old_api in common files * Removed use_old_api from tests * MO/OVC related changes * Renamed InferAPI20 * Removed use_old_api from tf_Placeholder test * Removed transpose step * Removed forced use_old_api=False for TF Lite --- tests/layer_tests/common/layer_test_class.py | 18 ++-- tests/layer_tests/common/layer_utils.py | 40 +------- .../common/tf2_layer_test_class.py | 2 +- .../layer_tests/common/tf_layer_test_class.py | 9 +- .../common/tflite_layer_test_class.py | 4 +- tests/layer_tests/common/utils/tf_utils.py | 24 ----- .../layer_tests/common/utils/tflite_utils.py | 7 +- tests/layer_tests/conftest.py | 17 ---- .../test_mo_convert_complex_params.py | 8 +- .../test_mo_convert_extensions.py | 2 +- .../test_mo_convert_pytorch.py | 4 +- tests/layer_tests/onnx_tests/test_abs.py | 8 +- .../onnx_tests/test_add_sub_mul_div.py | 96 +++++++++---------- tests/layer_tests/onnx_tests/test_and.py | 12 +-- tests/layer_tests/onnx_tests/test_argmax.py | 4 +- tests/layer_tests/onnx_tests/test_bn.py | 12 +-- tests/layer_tests/onnx_tests/test_ceil.py | 16 ++-- tests/layer_tests/onnx_tests/test_clip.py | 8 +- tests/layer_tests/onnx_tests/test_concat.py | 26 +++-- tests/layer_tests/onnx_tests/test_conv.py | 32 +++---- .../onnx_tests/test_conv_transpose.py | 16 ++-- tests/layer_tests/onnx_tests/test_cumsum.py | 9 +- .../onnx_tests/test_dequantize_linear.py | 19 ++-- tests/layer_tests/onnx_tests/test_dropout.py | 16 ++-- tests/layer_tests/onnx_tests/test_elu.py | 8 +- .../onnx_tests/test_embedding_bag.py | 5 +- tests/layer_tests/onnx_tests/test_flatten.py | 45 ++++----- tests/layer_tests/onnx_tests/test_floor.py | 8 +- .../layer_tests/onnx_tests/test_fusedgemm.py | 4 +- tests/layer_tests/onnx_tests/test_gather.py | 16 ++-- tests/layer_tests/onnx_tests/test_gemm.py | 20 ++-- .../onnx_tests/test_hard_sigmoid.py | 13 ++- tests/layer_tests/onnx_tests/test_identity.py | 8 +- .../onnx_tests/test_image_scaler.py | 18 ++-- .../onnx_tests/test_instance_normalization.py | 10 +- .../layer_tests/onnx_tests/test_leaky_relu.py | 17 ++-- tests/layer_tests/onnx_tests/test_log.py | 16 ++-- .../layer_tests/onnx_tests/test_logsoftmax.py | 4 +- tests/layer_tests/onnx_tests/test_loop.py | 8 +- tests/layer_tests/onnx_tests/test_lrn.py | 12 +-- tests/layer_tests/onnx_tests/test_lstm.py | 13 ++- tests/layer_tests/onnx_tests/test_matmul.py | 16 ++-- .../test_mean_variance_normalization.py | 4 +- tests/layer_tests/onnx_tests/test_neg.py | 8 +- tests/layer_tests/onnx_tests/test_non_zero.py | 8 +- tests/layer_tests/onnx_tests/test_not.py | 12 +-- tests/layer_tests/onnx_tests/test_or.py | 16 ++-- tests/layer_tests/onnx_tests/test_pad.py | 14 ++- tests/layer_tests/onnx_tests/test_pooling.py | 53 +++++----- tests/layer_tests/onnx_tests/test_prelu.py | 26 +++-- .../layer_tests/onnx_tests/test_reciprocal.py | 17 ++-- tests/layer_tests/onnx_tests/test_reduce.py | 34 ++++--- .../layer_tests/onnx_tests/test_reduce_lp.py | 16 ++-- tests/layer_tests/onnx_tests/test_relu.py | 8 +- tests/layer_tests/onnx_tests/test_reshape.py | 36 +++---- tests/layer_tests/onnx_tests/test_resize.py | 42 ++++---- .../layer_tests/onnx_tests/test_roi_align.py | 4 +- tests/layer_tests/onnx_tests/test_scale.py | 8 +- tests/layer_tests/onnx_tests/test_scatter.py | 8 +- tests/layer_tests/onnx_tests/test_sigmoid.py | 12 +-- tests/layer_tests/onnx_tests/test_sign.py | 8 +- tests/layer_tests/onnx_tests/test_slice.py | 24 ++--- tests/layer_tests/onnx_tests/test_softmax.py | 4 +- tests/layer_tests/onnx_tests/test_softplus.py | 8 +- tests/layer_tests/onnx_tests/test_softsign.py | 8 +- .../onnx_tests/test_split_concat.py | 48 +++++----- tests/layer_tests/onnx_tests/test_sqrt.py | 8 +- tests/layer_tests/onnx_tests/test_squeeze.py | 24 ++--- tests/layer_tests/onnx_tests/test_sum.py | 33 ++++--- tests/layer_tests/onnx_tests/test_topk.py | 13 ++- .../layer_tests/onnx_tests/test_transpose.py | 17 ++-- .../onnx_tests/test_trigonometry.py | 80 ++++++++-------- .../layer_tests/onnx_tests/test_unsqueeze.py | 24 ++--- tests/layer_tests/onnx_tests/test_upsample.py | 19 ++-- tests/layer_tests/onnx_tests/test_where.py | 4 +- tests/layer_tests/onnx_tests/test_xor.py | 12 +-- .../test_complex_params.py | 8 +- .../ovc_python_api_tests/test_extensions.py | 2 +- .../ovc_python_api_tests/test_paddle.py | 2 +- .../ovc_python_api_tests/test_pytorch.py | 4 +- .../ovc_python_api_tests/test_tf.py | 2 +- .../test_tf2_keras_activation.py | 4 +- .../test_tf2_keras_activity_regularization.py | 8 +- .../test_tf2_keras_add.py | 16 ++-- .../test_tf2_keras_additive_attention.py | 8 +- .../test_tf2_keras_alpha_dropout.py | 8 +- .../test_tf2_keras_attention.py | 8 +- .../test_tf2_keras_average.py | 12 +-- .../test_tf2_keras_avg_pool_1D.py | 8 +- .../test_tf2_keras_avg_pool_2D.py | 8 +- .../test_tf2_keras_avg_pool_3D.py | 8 +- .../test_tf2_keras_batch_normalization.py | 8 +- .../test_tf2_keras_bidirectional.py | 4 +- .../test_tf2_keras_concatenate.py | 8 +- .../test_tf2_keras_conv_1d.py | 4 +- .../test_tf2_keras_conv_1d_transpose.py | 4 +- .../test_tf2_keras_conv_2d.py | 4 +- .../test_tf2_keras_conv_2d_transpose.py | 4 +- .../test_tf2_keras_conv_3d.py | 4 +- .../test_tf2_keras_conv_3d_transpose.py | 4 +- .../test_tf2_keras_conv_lstm_2d.py | 4 +- .../test_tf2_keras_cropping_1d.py | 4 +- .../test_tf2_keras_cropping_2d.py | 4 +- .../test_tf2_keras_cropping_3d.py | 4 +- .../test_tf2_keras_dense.py | 8 +- .../test_tf2_keras_depthwiseconv2D.py | 12 +-- .../test_tf2_keras_dot.py | 12 +-- .../test_tf2_keras_dropout.py | 8 +- .../test_tf2_keras_elu.py | 12 +-- .../test_tf2_keras_embedding.py | 8 +- .../test_tf2_keras_flatten.py | 4 +- ...test_tf2_keras_global_average_pooling1D.py | 4 +- ...test_tf2_keras_global_average_pooling2D.py | 4 +- ...test_tf2_keras_global_average_pooling3D.py | 4 +- .../test_tf2_keras_global_maxpool1D.py | 4 +- .../test_tf2_keras_global_maxpool2D.py | 4 +- .../test_tf2_keras_global_maxpool3D.py | 4 +- .../test_tf2_keras_gru.py | 16 ++-- .../test_tf2_keras_gru_cell.py | 4 +- .../test_tf2_keras_lambda.py | 4 +- .../test_tf2_keras_layer_normalization.py | 8 +- .../test_tf2_keras_leakyrelu.py | 4 +- .../test_tf2_keras_locally_connected1D.py | 8 +- .../test_tf2_keras_locally_connected2D.py | 8 +- .../test_tf2_keras_lstm.py | 12 +-- .../test_tf2_keras_lstm_cell.py | 4 +- .../test_tf2_keras_masking.py | 4 +- .../test_tf2_keras_maximum.py | 16 ++-- .../test_tf2_keras_maxpool1D.py | 8 +- .../test_tf2_keras_maxpool2D.py | 4 +- .../test_tf2_keras_maxpool3D.py | 8 +- .../test_tf2_keras_minimum.py | 16 ++-- .../test_tf2_keras_multiheadattention.py | 8 +- .../test_tf2_keras_multiply.py | 16 ++-- .../test_tf2_keras_permute.py | 4 +- .../test_tf2_keras_prelu.py | 12 +-- .../test_tf2_keras_relu.py | 8 +- .../test_tf2_keras_repeatvector.py | 4 +- .../test_tf2_keras_reshape.py | 4 +- .../test_tf2_keras_rnn.py | 12 +-- .../test_tf2_keras_roll.py | 4 +- .../test_tf2_keras_separableconv1d.py | 16 ++-- .../test_tf2_keras_separableconv2d.py | 16 ++-- .../test_tf2_keras_simplernn.py | 16 ++-- .../test_tf2_keras_softmax.py | 8 +- .../test_tf2_keras_softplus.py | 8 +- .../test_tf2_keras_spatialdropout1d.py | 4 +- .../test_tf2_keras_spatialdropout2d.py | 8 +- .../test_tf2_keras_spatialdropout3d.py | 8 +- .../test_tf2_keras_stackedrnncells.py | 4 +- .../test_tf2_keras_subtract.py | 8 +- .../test_tf2_keras_swish.py | 8 +- .../test_tf2_keras_thresholdedrelu.py | 8 +- .../test_tf2_keras_timedistributed.py | 4 +- .../test_tf2_keras_upsampling1d.py | 4 +- .../test_tf2_keras_upsampling2d.py | 4 +- .../test_tf2_keras_upsampling3d.py | 8 +- .../test_tf2_keras_zeropadding1d.py | 4 +- .../test_tf2_keras_zeropadding2d.py | 8 +- .../test_tf2_keras_zeropadding3d.py | 8 +- .../test_tf2_map_fn.py | 16 ++-- .../tensorflow_tests/test_tf_Add.py | 41 ++++---- .../tensorflow_tests/test_tf_AddN.py | 4 +- .../tensorflow_tests/test_tf_AddTypes.py | 4 +- .../test_tf_AdjustContrastv2.py | 4 +- .../tensorflow_tests/test_tf_ArgMinMax.py | 4 +- .../tensorflow_tests/test_tf_Atan2.py | 4 +- .../tensorflow_tests/test_tf_BatchToSpace.py | 12 +-- .../test_tf_BatchToSpaceND.py | 4 +- .../tensorflow_tests/test_tf_BiasAdd.py | 32 +++---- .../tensorflow_tests/test_tf_BinaryOps.py | 4 +- .../tensorflow_tests/test_tf_BroadcastArgs.py | 5 +- .../tensorflow_tests/test_tf_BroadcastTo.py | 4 +- .../tensorflow_tests/test_tf_Bucketize.py | 4 +- .../test_tf_CTCGreedyDecoder.py | 4 +- .../tensorflow_tests/test_tf_CTCLoss.py | 4 +- .../tensorflow_tests/test_tf_Cast.py | 4 +- .../tensorflow_tests/test_tf_CheckNumerics.py | 4 +- .../tensorflow_tests/test_tf_ClipByValue.py | 4 +- .../tensorflow_tests/test_tf_ComplexFFT.py | 16 ++-- .../tensorflow_tests/test_tf_Concat.py | 30 +++--- .../test_tf_ConjugateTranspose.py | 8 +- .../tensorflow_tests/test_tf_Conv2D.py | 4 +- .../test_tf_Conv2DBackprop.py | 4 +- .../tensorflow_tests/test_tf_Conv3D.py | 4 +- .../test_tf_Conv3DBackprop.py | 4 +- .../tensorflow_tests/test_tf_CropAndResize.py | 4 +- .../tensorflow_tests/test_tf_Cumsum.py | 4 +- .../tensorflow_tests/test_tf_DepthToSpace.py | 4 +- .../tensorflow_tests/test_tf_Div.py | 4 +- .../tensorflow_tests/test_tf_DivNoNan.py | 4 +- .../test_tf_DynamicPartition.py | 8 +- .../tensorflow_tests/test_tf_Eltwise.py | 9 +- .../tensorflow_tests/test_tf_EnsureShape.py | 4 +- .../tensorflow_tests/test_tf_Equal.py | 25 ++--- .../tensorflow_tests/test_tf_ExpandDims.py | 5 +- .../test_tf_ExtractImagePatches.py | 5 +- .../tensorflow_tests/test_tf_Eye.py | 5 +- .../test_tf_FakeQuantWithMinMaxVars.py | 10 +- .../tensorflow_tests/test_tf_FakeQuantize.py | 4 +- .../tensorflow_tests/test_tf_Fill.py | 4 +- .../tensorflow_tests/test_tf_FloorDiv.py | 4 +- .../test_tf_FusedBatchNorm.py | 4 +- .../tensorflow_tests/test_tf_GRUBlockCell.py | 4 +- .../tensorflow_tests/test_tf_Gather.py | 5 +- .../tensorflow_tests/test_tf_GatherNd.py | 5 +- .../tensorflow_tests/test_tf_Identity.py | 4 +- .../tensorflow_tests/test_tf_IdentityN.py | 4 +- .../tensorflow_tests/test_tf_If.py | 16 ++-- .../tensorflow_tests/test_tf_Inv.py | 4 +- .../test_tf_InvertPermutation.py | 4 +- .../tensorflow_tests/test_tf_IsFinite.py | 4 +- .../tensorflow_tests/test_tf_IsInf.py | 4 +- .../tensorflow_tests/test_tf_IsNan.py | 4 +- .../tensorflow_tests/test_tf_L2Loss.py | 4 +- .../tensorflow_tests/test_tf_LRN.py | 4 +- .../tensorflow_tests/test_tf_LeakyRelu.py | 4 +- .../tensorflow_tests/test_tf_LinSpace.py | 4 +- .../tensorflow_tests/test_tf_ListDiff.py | 4 +- .../tensorflow_tests/test_tf_Log1p.py | 4 +- .../tensorflow_tests/test_tf_LogSoftmax.py | 4 +- .../tensorflow_tests/test_tf_MatMul.py | 8 +- .../tensorflow_tests/test_tf_MatrixDiag.py | 4 +- .../test_tf_MaxPoolWithArgmax.py | 4 +- .../tensorflow_tests/test_tf_MinMax.py | 4 +- .../tensorflow_tests/test_tf_Mul.py | 44 ++++----- .../tensorflow_tests/test_tf_MulNoNan.py | 4 +- .../tensorflow_tests/test_tf_Multinomial.py | 2 - .../tensorflow_tests/test_tf_NestedWhile.py | 10 +- .../test_tf_NonMaxSupression.py | 14 +-- .../tensorflow_tests/test_tf_NormalizeL2.py | 8 +- .../tensorflow_tests/test_tf_OneHot.py | 10 +- .../tensorflow_tests/test_tf_OnesLike.py | 4 +- .../tensorflow_tests/test_tf_Pack.py | 4 +- .../tensorflow_tests/test_tf_Pad.py | 10 +- .../test_tf_ParallelDynamicStitch.py | 8 +- .../tensorflow_tests/test_tf_Placeholder.py | 4 +- .../tensorflow_tests/test_tf_Pooling.py | 10 +- .../tensorflow_tests/test_tf_RandomUniform.py | 8 +- .../tensorflow_tests/test_tf_Range.py | 4 +- .../tensorflow_tests/test_tf_Rank.py | 4 +- .../tensorflow_tests/test_tf_ReLU6.py | 9 +- .../tensorflow_tests/test_tf_Reciprocal.py | 4 +- .../test_tf_ReduceArithmeticOps.py | 4 +- .../test_tf_ReduceLogicalOps.py | 4 +- .../test_tf_Resample_pattern_new.py | 5 +- .../tensorflow_tests/test_tf_Reshape.py | 8 +- .../tensorflow_tests/test_tf_Resize.py | 4 +- .../tensorflow_tests/test_tf_Reverse.py | 4 +- .../test_tf_ReverseSequence.py | 4 +- .../tensorflow_tests/test_tf_ReverseV2.py | 4 +- .../tensorflow_tests/test_tf_Roll.py | 5 +- .../tensorflow_tests/test_tf_Rsqrt.py | 9 +- .../tensorflow_tests/test_tf_ScatterND.py | 4 +- .../tensorflow_tests/test_tf_SegmentSum.py | 8 +- .../tensorflow_tests/test_tf_Select.py | 4 +- .../tensorflow_tests/test_tf_SelectV2.py | 4 +- .../tensorflow_tests/test_tf_Shape.py | 8 +- .../tensorflow_tests/test_tf_ShapeN.py | 4 +- .../tensorflow_tests/test_tf_Size.py | 4 +- .../tensorflow_tests/test_tf_Slice.py | 5 +- .../tensorflow_tests/test_tf_Softmax.py | 4 +- .../tensorflow_tests/test_tf_Softsign.py | 4 +- .../tensorflow_tests/test_tf_SpaceToBatch.py | 12 +-- .../test_tf_SpaceToBatchND.py | 4 +- .../tensorflow_tests/test_tf_SpaceToDepth.py | 4 +- .../tensorflow_tests/test_tf_Split.py | 4 +- .../tensorflow_tests/test_tf_SplitV.py | 4 +- .../tensorflow_tests/test_tf_Squeeze.py | 33 +++---- .../tensorflow_tests/test_tf_StridedSlice.py | 16 ++-- .../tensorflow_tests/test_tf_Sub.py | 40 ++++---- .../tensorflow_tests/test_tf_Swish.py | 9 +- .../tensorflow_tests/test_tf_SwitchMerge.py | 4 +- .../test_tf_TensorArrayOps.py | 16 ++-- .../test_tf_TensorListConcatV2.py | 4 +- .../test_tf_TensorListLength.py | 8 +- .../test_tf_TensorListResize.py | 4 +- .../tensorflow_tests/test_tf_Tile.py | 4 +- .../tensorflow_tests/test_tf_ToBool.py | 4 +- .../tensorflow_tests/test_tf_TopK.py | 25 ++--- .../tensorflow_tests/test_tf_TopKV2.py | 5 +- .../tensorflow_tests/test_tf_Transpose.py | 8 +- .../tensorflow_tests/test_tf_TruncateDiv.py | 4 +- .../tensorflow_tests/test_tf_TruncateMod.py | 4 +- .../tensorflow_tests/test_tf_UnaryOps.py | 16 ++-- .../tensorflow_tests/test_tf_Unique.py | 8 +- .../test_tf_UniqueWithCounts.py | 4 +- .../tensorflow_tests/test_tf_Unpack.py | 4 +- .../tensorflow_tests/test_tf_UnravelIndex.py | 4 +- .../test_tf_UnsortedSegmentSum.py | 4 +- .../tensorflow_tests/test_tf_Where.py | 4 +- .../tensorflow_tests/test_tf_While.py | 12 +-- .../tensorflow_tests/test_tf_Xlog1py.py | 4 +- .../tensorflow_tests/test_tf_Xlogy.py | 4 +- .../tensorflow_tests/test_tf_ZerosLike.py | 4 +- 295 files changed, 1356 insertions(+), 1546 deletions(-) diff --git a/tests/layer_tests/common/layer_test_class.py b/tests/layer_tests/common/layer_test_class.py index 6faa5d6db6a11b..7c3944ff451975 100644 --- a/tests/layer_tests/common/layer_test_class.py +++ b/tests/layer_tests/common/layer_test_class.py @@ -10,7 +10,7 @@ import numpy as np from common.constants import test_device, test_precision -from common.layer_utils import IEInfer, InferAPI20 +from common.layer_utils import InferAPI from common.utils.common_utils import generate_ir_python_api @@ -25,7 +25,7 @@ def get_framework_results(self, inputs_dict, model_path): raise RuntimeError("This is base class, please implement get_framework_results function for" " the specific framework") - def _test(self, framework_model, ref_net, ie_device, precision, ir_version, temp_dir, use_old_api, + def _test(self, framework_model, ref_net, ie_device, precision, ir_version, temp_dir, use_new_frontend=True, infer_timeout=60, enabled_transforms='', disabled_transforms='', **kwargs): """ @@ -34,7 +34,6 @@ def _test(self, framework_model, ref_net, ie_device, precision, ir_version, temp """ model_path = self.produce_model_path(framework_model=framework_model, save_path=temp_dir) self.use_new_frontend = use_new_frontend - self.use_old_api = use_old_api # TODO Pass environment variables via subprocess environment os.environ['MO_ENABLED_TRANSFORMS'] = enabled_transforms os.environ['MO_DISABLED_TRANSFORMS'] = disabled_transforms @@ -81,15 +80,10 @@ def _test(self, framework_model, ref_net, ie_device, precision, ir_version, temp if ie_device == 'GPU' and precision == 'FP32': config = {'INFERENCE_PRECISION_HINT': 'f32'} - if self.use_old_api: - ie_engine = IEInfer(model=path_to_xml, - weights=path_to_bin, - device=ie_device) - else: - ie_engine = InferAPI20(model=path_to_xml, - weights=path_to_bin, - device=ie_device, - use_new_frontend=use_new_frontend) + ie_engine = InferAPI(model=path_to_xml, + weights=path_to_bin, + device=ie_device, + use_new_frontend=use_new_frontend) # Prepare feed dict if 'kwargs_to_prepare_input' in kwargs and kwargs['kwargs_to_prepare_input']: inputs_dict = self._prepare_input(ie_engine.get_inputs_info(precision), diff --git a/tests/layer_tests/common/layer_utils.py b/tests/layer_tests/common/layer_utils.py index 95fa18d196aab1..b3701af3a1dc26 100644 --- a/tests/layer_tests/common/layer_utils.py +++ b/tests/layer_tests/common/layer_utils.py @@ -4,7 +4,6 @@ import sys from common.utils.multiprocessing_utils import multiprocessing_run -from openvino.inference_engine import IECore, get_version as ie_get_version from openvino.runtime import Core, get_version as ie2_get_version @@ -33,44 +32,7 @@ def infer(self, input_data, config=None, infer_timeout=10): self.res = multiprocessing_run(self.fw_infer, [input_data, config], self.name, infer_timeout) return self.res - -class IEInfer(BaseInfer): - def __init__(self, model, weights, device): - super().__init__('Inference Engine') - self.device = device - self.model = model - self.weights = weights - - def fw_infer(self, input_data, config=None): - - print("Inference Engine version: {}".format(ie_get_version())) - print("Creating IE Core Engine...") - ie = IECore() - print("Reading network files") - net = ie.read_network(self.model, self.weights) - print("Loading network") - exec_net = ie.load_network(net, self.device, config) - print("Starting inference") - result = exec_net.infer(input_data) - - if "exec_net" in locals(): - del exec_net - if "ie" in locals(): - del ie - - return result - - def get_inputs_info(self, precision) -> dict: - core = IECore() - net = core.read_network(self.model, self.weights) - inputs_info = {} - for item in net.input_info.items(): - inputs_info[item[0]] = item[1].tensor_desc.dims - - return inputs_info - - -class InferAPI20(BaseInfer): +class InferAPI(BaseInfer): def __init__(self, model, weights, device, use_new_frontend): super().__init__('Inference Engine') self.device = device diff --git a/tests/layer_tests/common/tf2_layer_test_class.py b/tests/layer_tests/common/tf2_layer_test_class.py index 6c6876f6f93055..4fb62cb76e4cec 100644 --- a/tests/layer_tests/common/tf2_layer_test_class.py +++ b/tests/layer_tests/common/tf2_layer_test_class.py @@ -32,7 +32,7 @@ def get_framework_results(self, inputs_dict, model_path): return self.get_tf2_keras_results(inputs_dict, model_path) else: # get results from tflite - return get_tflite_results(self.use_new_frontend, self.use_old_api, inputs_dict, model_path) + return get_tflite_results(self.use_new_frontend, inputs_dict, model_path) def get_tf2_keras_results(self, inputs_dict, model_path): import tensorflow as tf diff --git a/tests/layer_tests/common/tf_layer_test_class.py b/tests/layer_tests/common/tf_layer_test_class.py index e25e7ceaf1d03f..ab3289d8152b3a 100644 --- a/tests/layer_tests/common/tf_layer_test_class.py +++ b/tests/layer_tests/common/tf_layer_test_class.py @@ -5,7 +5,7 @@ from common.utils.tf_utils import summarize_graph from common.utils.tflite_utils import get_tflite_results, save_pb_to_tflite -from common.utils.tf_utils import save_to_pb, transpose_nhwc_to_nchw, transpose_nchw_to_nhwc +from common.utils.tf_utils import save_to_pb class CommonTFLayerTest(CommonLayerTest): @@ -15,7 +15,7 @@ def prepare_tf_inputs(self, inputs_dict): data = inputs_dict.get(key) if not ':' in key: key += ':0' - input[key] = transpose_nchw_to_nhwc(data, self.use_new_frontend, self.use_old_api) + input[key] = data return input @@ -47,8 +47,7 @@ def get_tf_results(self, inputs_dict, model_path): result = dict() for i, output in enumerate(outputs_list): _tf_res = tf_res[i] - result[output] = transpose_nhwc_to_nchw(_tf_res, self.use_new_frontend, - self.use_old_api) + result[output] = _tf_res return result def get_framework_results(self, inputs_dict, model_path): @@ -59,4 +58,4 @@ def get_framework_results(self, inputs_dict, model_path): return self.get_tf_results(inputs_dict, model_path) else: # get results from tflite - return get_tflite_results(self.use_new_frontend, self.use_old_api, inputs_dict, model_path) + return get_tflite_results(self.use_new_frontend, inputs_dict, model_path) diff --git a/tests/layer_tests/common/tflite_layer_test_class.py b/tests/layer_tests/common/tflite_layer_test_class.py index 8ff5122d8d43bc..1561aca1174810 100644 --- a/tests/layer_tests/common/tflite_layer_test_class.py +++ b/tests/layer_tests/common/tflite_layer_test_class.py @@ -44,7 +44,7 @@ def produce_model_path(self, framework_model, save_path): return self.model_path def get_framework_results(self, inputs_dict, model_path): - return get_tflite_results(self.use_new_frontend, self.use_old_api, inputs_dict, model_path) + return get_tflite_results(self.use_new_frontend, inputs_dict, model_path) def check_tflite_model_has_only_allowed_ops(self): if self.allowed_ops is None: @@ -77,4 +77,4 @@ def _test(self, ie_device, precision, temp_dir, params): model = self.make_model(params) self.model_path = self.produce_tflite_model(model, temp_dir) self.check_tflite_model_has_only_allowed_ops() - super()._test(model, None, ie_device, precision, None, temp_dir, False, True, **params) + super()._test(model, None, ie_device, precision, None, temp_dir, True, **params) diff --git a/tests/layer_tests/common/utils/tf_utils.py b/tests/layer_tests/common/utils/tf_utils.py index 913048acf2e762..188a60bffd9e98 100644 --- a/tests/layer_tests/common/utils/tf_utils.py +++ b/tests/layer_tests/common/utils/tf_utils.py @@ -150,30 +150,6 @@ def permute_axis(axis, permutation_inv): return permutation_inv[axis] -def transpose_nchw_to_nhwc(data, use_new_frontend, use_old_api): - if use_new_frontend or not use_old_api: - return data - - if len(data.shape) == 4: # reshaping for 4D tensors - return data.transpose(0, 2, 3, 1) - elif len(data.shape) == 5: # reshaping for 5D tensors - return data.transpose(0, 2, 3, 4, 1) - else: - return data - - -def transpose_nhwc_to_nchw(data, use_new_frontend, use_old_api): - if use_new_frontend or not use_old_api: - return data - - if len(data.shape) == 4: # reshaping for 4D tensors - return data.transpose(0, 3, 1, 2) # 2, 0, 1 - elif len(data.shape) == 5: # reshaping for 5D tensors - return data.transpose(0, 4, 1, 2, 3) # 3, 0, 1, 2 - else: - return data - - def save_to_pb(tf_model, path_to_saved_tf_model, model_name = 'model.pb'): tf.io.write_graph(tf_model, path_to_saved_tf_model, model_name, False) assert os.path.isfile(os.path.join(path_to_saved_tf_model, model_name)), "model.pb haven't been saved " \ diff --git a/tests/layer_tests/common/utils/tflite_utils.py b/tests/layer_tests/common/utils/tflite_utils.py index 887cded30e5f96..7dc4dc6d001304 100644 --- a/tests/layer_tests/common/utils/tflite_utils.py +++ b/tests/layer_tests/common/utils/tflite_utils.py @@ -5,7 +5,7 @@ import numpy as np import tensorflow as tf -from common.utils.tf_utils import summarize_graph, transpose_nhwc_to_nchw +from common.utils.tf_utils import summarize_graph def make_positive_array(inputs_dict): @@ -80,7 +80,7 @@ def save_pb_to_tflite(pb_model): return tflite_model_path -def get_tflite_results(use_new_frontend, use_old_api, inputs_dict, model_path): +def get_tflite_results(use_new_frontend, inputs_dict, model_path): interpreter = tf.compat.v1.lite.Interpreter(model_path=model_path) interpreter.allocate_tensors() input_details = interpreter.get_input_details() @@ -108,8 +108,7 @@ def get_tflite_results(use_new_frontend, use_old_api, inputs_dict, model_path): result = dict() for out in tf_lite_result.keys(): _tf_res = tf_lite_result[out] - result[out] = transpose_nhwc_to_nchw(_tf_res, use_new_frontend, - use_old_api) + result[out] = _tf_res return tf_lite_result diff --git a/tests/layer_tests/conftest.py b/tests/layer_tests/conftest.py index 5c9e4ea9cc71a9..206d5a8573065e 100644 --- a/tests/layer_tests/conftest.py +++ b/tests/layer_tests/conftest.py @@ -65,11 +65,6 @@ def pytest_addoption(parser): required=False, action="store_true", help="Use Model Optimizer with new FrontEnd") - parser.addoption( - "--use_old_api", - action="store_true", - help="Use old API for model processing in Inference Engine", - ) parser.addoption( "--tflite", required=False, @@ -89,24 +84,12 @@ def use_new_frontend(request): return request.config.getoption('use_new_frontend') -@pytest.fixture(scope="session") -def use_old_api(request): - """Fixture function for command-line option.""" - return request.config.getoption('use_old_api') - - @pytest.fixture(scope="session") def tflite(request): """Fixture function for command-line option.""" return request.config.getoption('tflite') -@pytest.fixture(scope="session", autouse=True) -def checks_for_keys_usage(request): - if request.config.getoption('use_old_api') and request.config.getoption('use_new_frontend'): - pytest.fail("Old API and new FrontEnd usage detected. Old API doesn't support new FrontEnd") - - @pytest.fixture(scope="function") def temp_dir(request): """Create directory for test purposes.""" diff --git a/tests/layer_tests/mo_python_api_tests/test_mo_convert_complex_params.py b/tests/layer_tests/mo_python_api_tests/test_mo_convert_complex_params.py index a9f04a702249cc..8a7ebc01f958a7 100644 --- a/tests/layer_tests/mo_python_api_tests/test_mo_convert_complex_params.py +++ b/tests/layer_tests/mo_python_api_tests/test_mo_convert_complex_params.py @@ -187,7 +187,7 @@ def create_tf_param_res_model(self, tmp_dir): @pytest.mark.parametrize("params", test_data) @pytest.mark.nightly def test_mo_convert_tf_model(self, params, ie_device, precision, ir_version, - temp_dir, use_new_frontend, use_old_api): + temp_dir, use_new_frontend): tf_net_path = self.create_tf_model(temp_dir) test_params = params['params_test'] @@ -221,7 +221,7 @@ def test_mo_convert_tf_model(self, params, ie_device, precision, ir_version, @pytest.mark.parametrize("params", test_data) @pytest.mark.nightly def test_mo_convert_tf_model_no_concat(self, params, ie_device, precision, ir_version, - temp_dir, use_new_frontend, use_old_api): + temp_dir, use_new_frontend): tf_net_path = self.create_tf_model_no_concat(temp_dir) test_params = params['params_test'] @@ -310,7 +310,7 @@ def test_mo_convert_tf_model_no_concat(self, params, ie_device, precision, ir_ve @pytest.mark.nightly @pytest.mark.precommit def test_mo_convert_tf_model_single_input_output(self, params, ie_device, precision, ir_version, - temp_dir, use_new_frontend, use_old_api): + temp_dir, use_new_frontend): tf_net_path = self.create_tf_model_single_input_output(temp_dir) test_params = params['params_test'] @@ -323,7 +323,7 @@ def test_mo_convert_tf_model_single_input_output(self, params, ie_device, precis @pytest.mark.nightly @pytest.mark.precommit def test_mo_convert_clearing_transformation_registry(self, ie_device, precision, ir_version, - temp_dir, use_new_frontend, use_old_api): + temp_dir, use_new_frontend): tf_net_path = self.create_tf_model_single_input_output(temp_dir) from openvino.tools.mo import convert_model diff --git a/tests/layer_tests/mo_python_api_tests/test_mo_convert_extensions.py b/tests/layer_tests/mo_python_api_tests/test_mo_convert_extensions.py index b510104e9e2235..d379e816038c2b 100644 --- a/tests/layer_tests/mo_python_api_tests/test_mo_convert_extensions.py +++ b/tests/layer_tests/mo_python_api_tests/test_mo_convert_extensions.py @@ -113,7 +113,7 @@ def create_ref_graph2(): @pytest.mark.nightly @pytest.mark.precommit def test_mo_convert_extensions(self, params, ie_device, precision, ir_version, - temp_dir, use_new_frontend, use_old_api): + temp_dir, use_new_frontend): onnx_net_path = self.create_onnx_model(temp_dir) test_params = params['params_test'] diff --git a/tests/layer_tests/mo_python_api_tests/test_mo_convert_pytorch.py b/tests/layer_tests/mo_python_api_tests/test_mo_convert_pytorch.py index 47bf252eed295b..9090ead08586ec 100644 --- a/tests/layer_tests/mo_python_api_tests/test_mo_convert_pytorch.py +++ b/tests/layer_tests/mo_python_api_tests/test_mo_convert_pytorch.py @@ -1011,7 +1011,7 @@ class TestMoConvertPyTorch(CommonMOConvertTest): @pytest.mark.nightly @pytest.mark.precommit def test_mo_import_from_memory(self, create_model, ie_device, precision, ir_version, - temp_dir, use_new_frontend, use_old_api): + temp_dir, use_new_frontend): fw_model, graph_ref, mo_params = create_model(temp_dir) test_params = {'input_model': fw_model} @@ -1240,7 +1240,7 @@ class TestPrecisionSensitive(): 'aarch64', 'arm64', 'ARM64'), reason='Ticket - 122714, 122710') - def test_precision_sensitive(self, create_model, ie_device, precision, ir_version, temp_dir, use_new_frontend, use_old_api): + def test_precision_sensitive(self, create_model, ie_device, precision, ir_version, temp_dir, use_new_frontend): import numpy.testing as npt from pathlib import Path diff --git a/tests/layer_tests/onnx_tests/test_abs.py b/tests/layer_tests/onnx_tests/test_abs.py index 77a538f16993b0..17b53d974d3de1 100644 --- a/tests/layer_tests/onnx_tests/test_abs.py +++ b/tests/layer_tests/onnx_tests/test_abs.py @@ -167,12 +167,12 @@ def create_net_const(self, shape, precision, ir_version): @pytest.mark.parametrize("params", test_data) @pytest.mark.nightly - def test_abs(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_abs(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_net(**params, ir_version=ir_version), ie_device, precision, - ir_version, temp_dir=temp_dir, use_old_api=use_old_api) + ir_version, temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data) @pytest.mark.nightly - def test_abs_const(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_abs_const(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_net_const(**params, precision=precision, ir_version=ir_version), - ie_device, precision, ir_version, temp_dir=temp_dir, use_old_api=use_old_api) + ie_device, precision, ir_version, temp_dir=temp_dir) diff --git a/tests/layer_tests/onnx_tests/test_add_sub_mul_div.py b/tests/layer_tests/onnx_tests/test_add_sub_mul_div.py index 59eb731a7557c7..9c71f08b55d50d 100644 --- a/tests/layer_tests/onnx_tests/test_add_sub_mul_div.py +++ b/tests/layer_tests/onnx_tests/test_add_sub_mul_div.py @@ -194,168 +194,168 @@ def create_net_const(self, shape1, shape2, op, precision, ir_version, opset=None @pytest.mark.parametrize("params", test_data) @pytest.mark.nightly - def test_add(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_add(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_net(**params, op='Add', precision=precision, ir_version=ir_version), - ie_device, precision, ir_version, temp_dir=temp_dir, use_old_api=use_old_api) + ie_device, precision, ir_version, temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data) @pytest.mark.nightly - def test_add_const(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_add_const(self, params, ie_device, precision, ir_version, temp_dir): self._test( *self.create_net_const(**params, op='Add', precision=precision, ir_version=ir_version), - ie_device, precision, ir_version, temp_dir=temp_dir, use_old_api=use_old_api) + ie_device, precision, ir_version, temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data) @pytest.mark.nightly - def test_sub(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_sub(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_net(**params, op='Sub', precision=precision, ir_version=ir_version), - ie_device, precision, ir_version, temp_dir=temp_dir, use_old_api=use_old_api) + ie_device, precision, ir_version, temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data) @pytest.mark.nightly - def test_sub_const(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_sub_const(self, params, ie_device, precision, ir_version, temp_dir): self._test( *self.create_net_const(**params, op='Sub', precision=precision, ir_version=ir_version), - ie_device, precision, ir_version, temp_dir=temp_dir, use_old_api=use_old_api) + ie_device, precision, ir_version, temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data) @pytest.mark.nightly - def test_mul(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_mul(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_net(**params, op='Mul', precision=precision, ir_version=ir_version), - ie_device, precision, ir_version, temp_dir=temp_dir, use_old_api=use_old_api) + ie_device, precision, ir_version, temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data) @pytest.mark.nightly - def test_mul_const(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_mul_const(self, params, ie_device, precision, ir_version, temp_dir): self._test( *self.create_net_const(**params, op='Mul', precision=precision, ir_version=ir_version), - ie_device, precision, ir_version, temp_dir=temp_dir, use_old_api=use_old_api) + ie_device, precision, ir_version, temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data) @pytest.mark.nightly - def test_div(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_div(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_net(**params, op='Div', precision=precision, ir_version=ir_version), - ie_device, precision, ir_version, temp_dir=temp_dir, use_old_api=use_old_api) + ie_device, precision, ir_version, temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data) @pytest.mark.nightly - def test_div_const(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_div_const(self, params, ie_device, precision, ir_version, temp_dir): self._test( *self.create_net_const(**params, op='Div', precision=precision, ir_version=ir_version), - ie_device, precision, ir_version, temp_dir=temp_dir, use_old_api=use_old_api) + ie_device, precision, ir_version, temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data_precommit) @pytest.mark.precommit - def test_add_precommit(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_add_precommit(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_net(**params, op='Add', precision=precision, ir_version=ir_version), - ie_device, precision, ir_version, temp_dir=temp_dir, use_old_api=use_old_api) + ie_device, precision, ir_version, temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data_precommit) @pytest.mark.precommit - def test_add_const_precommit(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_add_const_precommit(self, params, ie_device, precision, ir_version, temp_dir): self._test( *self.create_net_const(**params, op='Add', precision=precision, ir_version=ir_version), - ie_device, precision, ir_version, temp_dir=temp_dir, use_old_api=use_old_api) + ie_device, precision, ir_version, temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data_precommit) @pytest.mark.precommit - def test_sub_precommit(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_sub_precommit(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_net(**params, op='Sub', precision=precision, ir_version=ir_version), - ie_device, precision, ir_version, temp_dir=temp_dir, use_old_api=use_old_api) + ie_device, precision, ir_version, temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data_precommit) @pytest.mark.precommit - def test_sub_const_precommit(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_sub_const_precommit(self, params, ie_device, precision, ir_version, temp_dir): self._test( *self.create_net_const(**params, op='Sub', precision=precision, ir_version=ir_version), - ie_device, precision, ir_version, temp_dir=temp_dir, use_old_api=use_old_api) + ie_device, precision, ir_version, temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data_precommit) @pytest.mark.precommit - def test_mul_precommit(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_mul_precommit(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_net(**params, op='Mul', precision=precision, ir_version=ir_version), - ie_device, precision, ir_version, temp_dir=temp_dir, use_old_api=use_old_api) + ie_device, precision, ir_version, temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data_precommit) @pytest.mark.precommit - def test_mul_const_precommit(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_mul_const_precommit(self, params, ie_device, precision, ir_version, temp_dir): self._test( *self.create_net_const(**params, op='Mul', precision=precision, ir_version=ir_version), - ie_device, precision, ir_version, temp_dir=temp_dir, use_old_api=use_old_api) + ie_device, precision, ir_version, temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data_precommit) @pytest.mark.precommit - def test_div_precommit(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_div_precommit(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_net(**params, op='Div', precision=precision, ir_version=ir_version), - ie_device, precision, ir_version, temp_dir=temp_dir, use_old_api=use_old_api) + ie_device, precision, ir_version, temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data_precommit) @pytest.mark.precommit - def test_div_const_precommit(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_div_const_precommit(self, params, ie_device, precision, ir_version, temp_dir): self._test( *self.create_net_const(**params, op='Div', precision=precision, ir_version=ir_version), - ie_device, precision, ir_version, temp_dir=temp_dir, use_old_api=use_old_api) + ie_device, precision, ir_version, temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data) @pytest.mark.nightly @pytest.mark.skip(reason='GREEN_SUITE') - def test_add_opset6(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_add_opset6(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_net(**params, op='Add', precision=precision, opset=6, ir_version=ir_version), - ie_device, precision, ir_version, temp_dir=temp_dir, use_old_api=use_old_api) + ie_device, precision, ir_version, temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data) @pytest.mark.nightly @pytest.mark.skip(reason='GREEN_SUITE') - def test_add_const_opset6(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_add_const_opset6(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_net_const(**params, op='Add', precision=precision, opset=6, ir_version=ir_version), - ie_device, precision, ir_version, temp_dir=temp_dir, use_old_api=use_old_api) + ie_device, precision, ir_version, temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data) @pytest.mark.nightly @pytest.mark.skip(reason='GREEN_SUITE') - def test_sub_opset6(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_sub_opset6(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_net(**params, op='Sub', precision=precision, opset=6, ir_version=ir_version), - ie_device, precision, ir_version, temp_dir=temp_dir, use_old_api=use_old_api) + ie_device, precision, ir_version, temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data) @pytest.mark.nightly @pytest.mark.skip(reason='GREEN_SUITE') - def test_sub_const_opset6(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_sub_const_opset6(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_net_const(**params, op='Sub', precision=precision, opset=6, ir_version=ir_version), - ie_device, precision, ir_version, temp_dir=temp_dir, use_old_api=use_old_api) + ie_device, precision, ir_version, temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data) @pytest.mark.nightly @pytest.mark.skip(reason='GREEN_SUITE') - def test_mul_opset6(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_mul_opset6(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_net(**params, op='Mul', precision=precision, opset=6, ir_version=ir_version), - ie_device, precision, ir_version, temp_dir=temp_dir, use_old_api=use_old_api) + ie_device, precision, ir_version, temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data) @pytest.mark.nightly @pytest.mark.skip(reason='GREEN_SUITE') - def test_mul_const_opset6(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_mul_const_opset6(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_net_const(**params, op='Mul', precision=precision, opset=6, ir_version=ir_version), - ie_device, precision, ir_version, temp_dir=temp_dir, use_old_api=use_old_api) + ie_device, precision, ir_version, temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data) @pytest.mark.nightly @pytest.mark.skip(reason='GREEN_SUITE') - def test_div_opset6(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_div_opset6(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_net(**params, op='Div', precision=precision, opset=6, ir_version=ir_version), - ie_device, precision, ir_version, temp_dir=temp_dir, use_old_api=use_old_api) + ie_device, precision, ir_version, temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data) @pytest.mark.nightly @pytest.mark.skip(reason='GREEN_SUITE') - def test_div_const_opset6(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_div_const_opset6(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_net_const(**params, op='Div', precision=precision, opset=6, ir_version=ir_version), - ie_device, precision, ir_version, temp_dir=temp_dir, use_old_api=use_old_api) + ie_device, precision, ir_version, temp_dir=temp_dir) diff --git a/tests/layer_tests/onnx_tests/test_and.py b/tests/layer_tests/onnx_tests/test_and.py index 63ebc162d5b006..0d8f0f06f97029 100644 --- a/tests/layer_tests/onnx_tests/test_and.py +++ b/tests/layer_tests/onnx_tests/test_and.py @@ -251,21 +251,21 @@ def create_net_const(self, shape1, shape2, ir_version): @pytest.mark.parametrize("params", test_data) @pytest.mark.nightly - def test_and(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_and(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_net(**params, ir_version=ir_version), ie_device, precision, ir_version, - temp_dir=temp_dir, use_old_api=use_old_api) + temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data) @pytest.mark.nightly - def test_and_one_const(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_and_one_const(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_net_one_const(**params, ir_version=ir_version), ie_device, precision, ir_version, - temp_dir=temp_dir, use_old_api=use_old_api) + temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data) @pytest.mark.nightly - def test_and_const(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_and_const(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_net_const(**params, ir_version=ir_version), ie_device, precision, ir_version, - temp_dir=temp_dir, use_old_api=use_old_api) + temp_dir=temp_dir) diff --git a/tests/layer_tests/onnx_tests/test_argmax.py b/tests/layer_tests/onnx_tests/test_argmax.py index 799ffd84429fbf..1afb65a71aae77 100644 --- a/tests/layer_tests/onnx_tests/test_argmax.py +++ b/tests/layer_tests/onnx_tests/test_argmax.py @@ -150,8 +150,8 @@ def create_net(self, shape, axis, keepdims, ir_version): @pytest.mark.parametrize("params", test_data) @pytest.mark.parametrize("keepdims", [None, 0]) @pytest.mark.nightly - def test_argmax(self, params, keepdims, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_argmax(self, params, keepdims, ie_device, precision, ir_version, temp_dir): if ie_device == 'CPU': pytest.skip('GREEN_SUITE') self._test(*self.create_net(**params, ir_version=ir_version, keepdims=keepdims), - ie_device, precision, ir_version, temp_dir=temp_dir, use_old_api=use_old_api) + ie_device, precision, ir_version, temp_dir=temp_dir) diff --git a/tests/layer_tests/onnx_tests/test_bn.py b/tests/layer_tests/onnx_tests/test_bn.py index 0bbd0c82c8c794..3fcf8a8f86b029 100644 --- a/tests/layer_tests/onnx_tests/test_bn.py +++ b/tests/layer_tests/onnx_tests/test_bn.py @@ -116,19 +116,19 @@ def create_net(self, shape, epsilon, precision, ir_version, opset=None): @pytest.mark.parametrize("params", test_data) @pytest.mark.nightly - def test_bn(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_bn(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_net(**params, precision=precision, ir_version=ir_version), - ie_device, precision, ir_version, temp_dir=temp_dir, use_old_api=use_old_api) + ie_device, precision, ir_version, temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data) @pytest.mark.nightly @pytest.mark.skip(reason='GREEN_SUITE') - def test_bn_opset6(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_bn_opset6(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_net(**params, precision=precision, opset=6, ir_version=ir_version), - ie_device, precision, ir_version, temp_dir=temp_dir, use_old_api=use_old_api) + ie_device, precision, ir_version, temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data) @pytest.mark.nightly - def test_bn_opset7(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_bn_opset7(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_net(**params, precision=precision, opset=7, ir_version=ir_version), - ie_device, precision, ir_version, temp_dir=temp_dir, use_old_api=use_old_api) + ie_device, precision, ir_version, temp_dir=temp_dir) diff --git a/tests/layer_tests/onnx_tests/test_ceil.py b/tests/layer_tests/onnx_tests/test_ceil.py index 48c69e817ed856..2892c5e5b25a9e 100644 --- a/tests/layer_tests/onnx_tests/test_ceil.py +++ b/tests/layer_tests/onnx_tests/test_ceil.py @@ -171,26 +171,26 @@ def create_net_const(self, shape, precision, ir_version): @pytest.mark.parametrize("params", test_data_precommit) @pytest.mark.precommit - def test_ceil_precommit(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_ceil_precommit(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_net(**params, ir_version=ir_version), ie_device, precision, ir_version, - temp_dir=temp_dir, use_old_api=use_old_api) + temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data_precommit) @pytest.mark.precommit - def test_ceil_const_precommit(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_ceil_const_precommit(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_net_const(**params, precision=precision, ir_version=ir_version), - ie_device, precision, ir_version, temp_dir=temp_dir, use_old_api=use_old_api) + ie_device, precision, ir_version, temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data) @pytest.mark.nightly - def test_ceil(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_ceil(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_net(**params, ir_version=ir_version), ie_device, precision, ir_version, - temp_dir=temp_dir, use_old_api=use_old_api) + temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data) @pytest.mark.nightly - def test_ceil_const(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_ceil_const(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_net_const(**params, precision=precision, ir_version=ir_version), - ie_device, precision, ir_version, temp_dir=temp_dir, use_old_api=use_old_api) + ie_device, precision, ir_version, temp_dir=temp_dir) diff --git a/tests/layer_tests/onnx_tests/test_clip.py b/tests/layer_tests/onnx_tests/test_clip.py index 3db63c33941b0b..70e697f51b1915 100644 --- a/tests/layer_tests/onnx_tests/test_clip.py +++ b/tests/layer_tests/onnx_tests/test_clip.py @@ -160,14 +160,14 @@ def create_net(self, shape, ir_version, opset, min=None, max=None): @pytest.mark.parametrize("params", test_data) @pytest.mark.nightly - def test_clip_opset6(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_clip_opset6(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_net(**params, ir_version=ir_version, opset=6), ie_device, precision, ir_version, - temp_dir=temp_dir, use_old_api=use_old_api) + temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data) @pytest.mark.nightly - def test_clip_opset11(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_clip_opset11(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_net(**params, ir_version=ir_version, opset=11), ie_device, precision, ir_version, - temp_dir=temp_dir, use_old_api=use_old_api) + temp_dir=temp_dir) diff --git a/tests/layer_tests/onnx_tests/test_concat.py b/tests/layer_tests/onnx_tests/test_concat.py index a8e988cf29c53b..4f539235aaeb4b 100644 --- a/tests/layer_tests/onnx_tests/test_concat.py +++ b/tests/layer_tests/onnx_tests/test_concat.py @@ -252,39 +252,37 @@ def create_concat_net(self, input_shape, output_shape, axis, input_names, ir_ver @pytest.mark.parametrize("params", test_data_3D) @pytest.mark.nightly - def test_concat_3D_const(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_concat_3D_const(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_concat_net_const(**params, ir_version=ir_version), ie_device, - precision, ir_version, temp_dir=temp_dir, use_old_api=use_old_api) + precision, ir_version, temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data_4D_precommit) @pytest.mark.precommit - def test_concat_4D_const_precommit(self, params, ie_device, precision, ir_version, temp_dir, - use_old_api): + def test_concat_4D_const_precommit(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_concat_net_const(**params, ir_version=ir_version), ie_device, - precision, ir_version, temp_dir=temp_dir, use_old_api=use_old_api) + precision, ir_version, temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data_4D) @pytest.mark.nightly - def test_concat_4D_const(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_concat_4D_const(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_concat_net_const(**params, ir_version=ir_version), ie_device, - precision, ir_version, temp_dir=temp_dir, use_old_api=use_old_api) + precision, ir_version, temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data_5D_precommit) @pytest.mark.nightly - def test_concat_5D_const_precommit(self, params, ie_device, precision, ir_version, temp_dir, - use_old_api): + def test_concat_5D_const_precommit(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_concat_net_const(**params, ir_version=ir_version), ie_device, - precision, ir_version, temp_dir=temp_dir, use_old_api=use_old_api) + precision, ir_version, temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data_5D) @pytest.mark.nightly - def test_concat_5D_const(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_concat_5D_const(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_concat_net_const(**params, ir_version=ir_version), ie_device, - precision, ir_version, temp_dir=temp_dir, use_old_api=use_old_api) + precision, ir_version, temp_dir=temp_dir) @pytest.mark.parametrize("params", test_concat_inputs_order_params) @pytest.mark.nightly - def test_concat_inputs_order(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_concat_inputs_order(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_concat_net(**params, ir_version=ir_version), ie_device=ie_device, precision=precision, ir_version=ir_version, temp_dir=temp_dir, - input_names=params['input_names'], use_old_api=use_old_api) + input_names=params['input_names']) diff --git a/tests/layer_tests/onnx_tests/test_conv.py b/tests/layer_tests/onnx_tests/test_conv.py index cd1a28238eb045..99d8d833d1d9a1 100644 --- a/tests/layer_tests/onnx_tests/test_conv.py +++ b/tests/layer_tests/onnx_tests/test_conv.py @@ -414,11 +414,11 @@ def create_net(self, shape, weights_shape, dilations, group, pads, strides, bias @pytest.mark.parametrize("bias", [False, True]) @pytest.mark.nightly def test_conv_3D(self, params, dilations, pads, strides, bias, ie_device, precision, ir_version, - temp_dir, use_old_api): + temp_dir): self._test(*self.create_net(**params, shape=[2, 3, 25], dilations=dilations, pads=pads, strides=strides, bias=bias, ir_version=ir_version), - ie_device, precision, ir_version, temp_dir=temp_dir, use_old_api=use_old_api) + ie_device, precision, ir_version, temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data_3D_autopad[:-1]) @pytest.mark.parametrize("auto_pad", ['SAME_UPPER', 'SAME_LOWER']) @@ -426,10 +426,10 @@ def test_conv_3D(self, params, dilations, pads, strides, bias, ie_device, precis @pytest.mark.nightly @pytest.mark.skip(reason='GREEN_SUITE') def test_conv_3D_autopad(self, params, auto_pad, bias, ie_device, precision, ir_version, - temp_dir, use_old_api): + temp_dir): self._test(*self.create_net(**params, shape=[2, 3, 25], bias=bias, auto_pad=auto_pad, ir_version=ir_version), - ie_device, precision, ir_version, temp_dir=temp_dir, use_old_api=use_old_api) + ie_device, precision, ir_version, temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data_4D_precommit) @pytest.mark.parametrize("dilations", [[3, 5]]) @@ -438,11 +438,11 @@ def test_conv_3D_autopad(self, params, auto_pad, bias, ie_device, precision, ir_ @pytest.mark.parametrize("bias", [False, True]) @pytest.mark.precommit def test_conv_4D_precommit(self, params, dilations, pads, strides, bias, ie_device, precision, - ir_version, temp_dir, use_old_api): + ir_version, temp_dir): self._test(*self.create_net(**params, shape=[2, 3, 25, 25], dilations=dilations, pads=pads, strides=strides, bias=bias, ir_version=ir_version), - ie_device, precision, ir_version, temp_dir=temp_dir, use_old_api=use_old_api) + ie_device, precision, ir_version, temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data_4D) @pytest.mark.parametrize("dilations", [[1, 1], [2, 2], [3, 5]]) @@ -451,12 +451,12 @@ def test_conv_4D_precommit(self, params, dilations, pads, strides, bias, ie_devi @pytest.mark.parametrize("bias", [False, True]) @pytest.mark.nightly def test_conv_4D(self, params, dilations, pads, strides, bias, ie_device, precision, ir_version, - temp_dir, use_old_api): + temp_dir): self._test( *self.create_net(**params, shape=[2, 3, 25, 25], dilations=dilations, pads=pads, strides=strides, bias=bias, ir_version=ir_version), ie_device, precision, ir_version, - temp_dir=temp_dir, use_old_api=use_old_api) + temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data_4D_autopad[:-1]) @pytest.mark.parametrize("auto_pad", ['SAME_UPPER', 'SAME_LOWER']) @@ -464,10 +464,10 @@ def test_conv_4D(self, params, dilations, pads, strides, bias, ie_device, precis @pytest.mark.nightly @pytest.mark.skip(reason='GREEN_SUITE') def test_conv_4D_autopad(self, params, auto_pad, bias, ie_device, precision, ir_version, - temp_dir, use_old_api): + temp_dir): self._test(*self.create_net(**params, shape=[2, 3, 25, 25], bias=bias, auto_pad=auto_pad, ir_version=ir_version), ie_device, precision, ir_version, - temp_dir=temp_dir, use_old_api=use_old_api) + temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data_5D_precommit) @pytest.mark.parametrize("dilations", [[3, 4, 5]]) @@ -476,13 +476,13 @@ def test_conv_4D_autopad(self, params, auto_pad, bias, ie_device, precision, ir_ @pytest.mark.parametrize("bias", [False, True]) @pytest.mark.precommit def test_conv_5D_precommit(self, params, dilations, pads, strides, bias, ie_device, precision, - ir_version, temp_dir, use_old_api): + ir_version, temp_dir): custom_eps_value = 1e-1 if ie_device == 'GPU' and precision == 'FP16' else None self._test( *self.create_net(**params, shape=[2, 3, 25, 25, 25], dilations=dilations, pads=pads, strides=strides, bias=bias, ir_version=ir_version), - ie_device, precision, ir_version, temp_dir=temp_dir, use_old_api=use_old_api, custom_eps=custom_eps_value) + ie_device, precision, ir_version, temp_dir=temp_dir, custom_eps=custom_eps_value) @pytest.mark.parametrize("params", test_data_5D) @pytest.mark.parametrize("dilations", [[1, 1, 1], [2, 2, 2], [3, 4, 5]]) @@ -492,12 +492,12 @@ def test_conv_5D_precommit(self, params, dilations, pads, strides, bias, ie_devi @pytest.mark.nightly @pytest.mark.skip(reason='GREEN_SUITE') def test_conv_5D(self, params, dilations, pads, strides, bias, ie_device, precision, ir_version, - temp_dir, use_old_api): + temp_dir): self._test( *self.create_net(**params, shape=[2, 3, 25, 25, 25], dilations=dilations, pads=pads, strides=strides, bias=bias, ir_version=ir_version), - ie_device, precision, ir_version, temp_dir=temp_dir, use_old_api=use_old_api) + ie_device, precision, ir_version, temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data_5D_autopad[:-1]) @pytest.mark.parametrize("auto_pad", ['SAME_UPPER', 'SAME_LOWER']) @@ -505,8 +505,8 @@ def test_conv_5D(self, params, dilations, pads, strides, bias, ie_device, precis @pytest.mark.nightly @pytest.mark.skip(reason='GREEN_SUITE') def test_conv_5D_autopad(self, params, auto_pad, bias, ie_device, precision, ir_version, - temp_dir, use_old_api): + temp_dir): self._test( *self.create_net(**params, shape=[2, 3, 25, 25, 25], bias=bias, auto_pad=auto_pad, ir_version=ir_version), ie_device, precision, ir_version, - temp_dir=temp_dir, use_old_api=use_old_api) + temp_dir=temp_dir) diff --git a/tests/layer_tests/onnx_tests/test_conv_transpose.py b/tests/layer_tests/onnx_tests/test_conv_transpose.py index 5fcc3715256a83..de3ae2932977fa 100644 --- a/tests/layer_tests/onnx_tests/test_conv_transpose.py +++ b/tests/layer_tests/onnx_tests/test_conv_transpose.py @@ -184,12 +184,12 @@ def create_conv_transpose(self, ir_version, input_shape, output_shape, kernel_sh @pytest.mark.parametrize("auto_pad", ["NOTSET"]) @pytest.mark.precommit def test_conv_transpose_4D_precommit(self, params, bias, ie_device, precision, ir_version, - auto_pad, temp_dir, use_old_api): + auto_pad, temp_dir): if ie_device == 'GPU' and 'dilations' in params: pytest.xfail('dilations are not supported on GPU') self._test(*self.create_conv_transpose(**params, ir_version=ir_version, bias=bias, auto_pad=auto_pad), - ie_device, precision, ir_version, temp_dir=temp_dir, use_old_api=use_old_api) + ie_device, precision, ir_version, temp_dir=temp_dir) @pytest.mark.parametrize("params", explicit_pads_tests_4D) @pytest.mark.parametrize("bias", [False, True]) @@ -197,12 +197,12 @@ def test_conv_transpose_4D_precommit(self, params, bias, ie_device, precision, i @pytest.mark.nightly @pytest.mark.skip(reason='GREEN_SUITE') def test_conv_transpose_4D(self, params, bias, ie_device, precision, ir_version, auto_pad, - temp_dir, use_old_api): + temp_dir): if ie_device == 'GPU' and 'dilations' in params: pytest.xfail('dilations are not supported on GPU') self._test(*self.create_conv_transpose(**params, ir_version=ir_version, bias=bias, auto_pad=auto_pad), - ie_device, precision, ir_version, temp_dir=temp_dir, use_old_api=use_old_api) + ie_device, precision, ir_version, temp_dir=temp_dir) @pytest.mark.parametrize("params", valid_auto_pad_tests_4D) @pytest.mark.parametrize("bias", [False, True]) @@ -210,12 +210,12 @@ def test_conv_transpose_4D(self, params, bias, ie_device, precision, ir_version, @pytest.mark.nightly @pytest.mark.skip(reason='GREEN_SUITE') def test_conv_transpose_valid_auto_pad_4D(self, params, bias, ie_device, precision, ir_version, - auto_pad, temp_dir, use_old_api): + auto_pad, temp_dir): if ie_device == 'GPU' and 'dilations' in params: pytest.xfail('dilations are not supported on GPU') self._test(*self.create_conv_transpose(**params, ir_version=ir_version, bias=bias, auto_pad=auto_pad), - ie_device, precision, ir_version, temp_dir=temp_dir, use_old_api=use_old_api) + ie_device, precision, ir_version, temp_dir=temp_dir) @pytest.mark.parametrize("params", same_auto_pad_tests_4D) @pytest.mark.parametrize("bias", [False, True]) @@ -223,9 +223,9 @@ def test_conv_transpose_valid_auto_pad_4D(self, params, bias, ie_device, precisi @pytest.mark.nightly @pytest.mark.skip(reason='GREEN_SUITE') def test_conv_transpose_same_auto_pad_4D(self, params, bias, ie_device, precision, ir_version, - auto_pad, temp_dir, use_old_api): + auto_pad, temp_dir): if ie_device == 'GPU' and 'dilations' in params: pytest.xfail('dilations are not supported on GPU') self._test(*self.create_conv_transpose(**params, ir_version=ir_version, bias=bias, auto_pad=auto_pad), - ie_device, precision, ir_version, temp_dir=temp_dir, use_old_api=use_old_api) + ie_device, precision, ir_version, temp_dir=temp_dir) diff --git a/tests/layer_tests/onnx_tests/test_cumsum.py b/tests/layer_tests/onnx_tests/test_cumsum.py index 7b37599286cf3f..f052c10102f5f1 100644 --- a/tests/layer_tests/onnx_tests/test_cumsum.py +++ b/tests/layer_tests/onnx_tests/test_cumsum.py @@ -252,22 +252,21 @@ def create_net_const(self, shape, precision, ir_version, axis=None, reverse=None @pytest.mark.parametrize("reverse", [0, 1]) @pytest.mark.parametrize("exclusive", [0, 1]) @pytest.mark.nightly - def test_cumsum(self, params, reverse, exclusive, ie_device, precision, ir_version, temp_dir, - use_old_api): + def test_cumsum(self, params, reverse, exclusive, ie_device, precision, ir_version, temp_dir): if 'axis' not in params: pytest.skip('No axis cases fail in ONNX') elif 'axis' in params and params['axis'] == -2 and exclusive == 1: pytest.skip('Disabled due to an exception thrown by ONNXRuntime for this use case') self._test( *self.create_net(**params, exclusive=exclusive, reverse=reverse, ir_version=ir_version), - ie_device, precision, ir_version, temp_dir=temp_dir, use_old_api=use_old_api) + ie_device, precision, ir_version, temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data) @pytest.mark.parametrize("reverse", [0, 1]) @pytest.mark.parametrize("exclusive", [0, 1]) @pytest.mark.nightly def test_cumsum_const(self, params, reverse, exclusive, ie_device, precision, ir_version, - temp_dir, use_old_api): + temp_dir): if 'axis' not in params: pytest.skip('No axis cases fail in ONNX') elif 'axis' in params and params['axis'] == -2 and exclusive == 1: @@ -275,4 +274,4 @@ def test_cumsum_const(self, params, reverse, exclusive, ie_device, precision, ir self._test(*self.create_net_const(**params, precision=precision, exclusive=exclusive, reverse=reverse, ir_version=ir_version), ie_device, precision, ir_version, - temp_dir=temp_dir, use_old_api=use_old_api) + temp_dir=temp_dir) diff --git a/tests/layer_tests/onnx_tests/test_dequantize_linear.py b/tests/layer_tests/onnx_tests/test_dequantize_linear.py index bd8b0dc4bf0b18..68152bdf75323f 100644 --- a/tests/layer_tests/onnx_tests/test_dequantize_linear.py +++ b/tests/layer_tests/onnx_tests/test_dequantize_linear.py @@ -194,31 +194,28 @@ def create_dequanize_linear(self, shape, y_scale: np.array, y_zero_point=None, a @pytest.mark.parametrize("params", test_data_def_zerop) @pytest.mark.nightly def test_quantize_linear_def_zerop_opset10(self, params, ie_device, precision, ir_version, - temp_dir, use_old_api): + temp_dir): self._test(*self.create_dequanize_linear(**params, ir_version=ir_version), ie_device, - precision, ir_version, temp_dir=temp_dir, use_old_api=use_old_api) + precision, ir_version, temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data) @pytest.mark.nightly - def test_quantize_linear_opset10(self, params, ie_device, precision, ir_version, temp_dir, - use_old_api): + def test_quantize_linear_opset10(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_dequanize_linear(**params, ir_version=ir_version), ie_device, - precision, ir_version, temp_dir=temp_dir, use_old_api=use_old_api) + precision, ir_version, temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data + test_data_def_zerop) @pytest.mark.nightly @pytest.mark.skip(reason='DequantizeLinear-13 is unsupported in MO') - def test_quantize_linear_opset13(self, params, ie_device, precision, ir_version, temp_dir, - use_old_api): + def test_quantize_linear_opset13(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_dequanize_linear(**params, opset=13, ir_version=ir_version), ie_device, precision, - ir_version, temp_dir=temp_dir, use_old_api=use_old_api) + ir_version, temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data_axis) @pytest.mark.nightly @pytest.mark.skip(reason='DequantizeLinear-13 is unsupported in MO') - def test_quantize_linear_axis_opset13(self, params, ie_device, precision, ir_version, temp_dir, - use_old_api): + def test_quantize_linear_axis_opset13(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_dequanize_linear(**params, opset=13, ir_version=ir_version), ie_device, precision, - ir_version, temp_dir=temp_dir, use_old_api=use_old_api) + ir_version, temp_dir=temp_dir) diff --git a/tests/layer_tests/onnx_tests/test_dropout.py b/tests/layer_tests/onnx_tests/test_dropout.py index 68494b6212a730..5fd54ea1f54fa4 100644 --- a/tests/layer_tests/onnx_tests/test_dropout.py +++ b/tests/layer_tests/onnx_tests/test_dropout.py @@ -147,30 +147,30 @@ def create_net_const(self, shape, ratio, ir_version, opset=None): @pytest.mark.parametrize("params", test_data) @pytest.mark.nightly @pytest.mark.skip(reason='GREEN_SUITE') - def test_dropout_opset6(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_dropout_opset6(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_net(**params, opset=6, ir_version=ir_version), ie_device, precision, ir_version, - temp_dir=temp_dir, use_old_api=use_old_api) + temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data) @pytest.mark.nightly @pytest.mark.skip(reason='GREEN_SUITE') - def test_dropout(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_dropout(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_net(**params, ir_version=ir_version), ie_device, precision, ir_version, - temp_dir=temp_dir, use_old_api=use_old_api) + temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data) @pytest.mark.nightly - def test_dropout_const_opset6(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_dropout_const_opset6(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_net_const(**params, opset=6, ir_version=ir_version), ie_device, precision, ir_version, - temp_dir=temp_dir, use_old_api=use_old_api) + temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data) @pytest.mark.nightly @pytest.mark.skip(reason='GREEN_SUITE') - def test_dropout_const(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_dropout_const(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_net_const(**params, ir_version=ir_version), ie_device, precision, ir_version, - temp_dir=temp_dir, use_old_api=use_old_api) + temp_dir=temp_dir) diff --git a/tests/layer_tests/onnx_tests/test_elu.py b/tests/layer_tests/onnx_tests/test_elu.py index be239de0a0746d..d1f67f79e32b2f 100644 --- a/tests/layer_tests/onnx_tests/test_elu.py +++ b/tests/layer_tests/onnx_tests/test_elu.py @@ -175,13 +175,13 @@ def create_net_const(self, shape, alpha, precision, ir_version): @pytest.mark.parametrize("params", test_data) @pytest.mark.nightly - def test_elu(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_elu(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_net(**params, ir_version=ir_version), ie_device, precision, ir_version, - temp_dir=temp_dir, use_old_api=use_old_api) + temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data) @pytest.mark.nightly - def test_elu_const(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_elu_const(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_net_const(**params, precision=precision, ir_version=ir_version), - ie_device, precision, ir_version, temp_dir=temp_dir, use_old_api=use_old_api) + ie_device, precision, ir_version, temp_dir=temp_dir) diff --git a/tests/layer_tests/onnx_tests/test_embedding_bag.py b/tests/layer_tests/onnx_tests/test_embedding_bag.py index 579db898ebe465..77fd4caa63de22 100644 --- a/tests/layer_tests/onnx_tests/test_embedding_bag.py +++ b/tests/layer_tests/onnx_tests/test_embedding_bag.py @@ -132,6 +132,5 @@ def create_net(self, n, m, emb_batch_size, ir_version, per_sample_weights=False, @pytest.mark.parametrize("params", test_data) @pytest.mark.nightly @pytest.mark.skip(reason='GREEN_SUITE') - def test_pytorch_embedding_bag(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): - self._test(*self.create_net(**params), ie_device, precision, ir_version, temp_dir=temp_dir, - use_old_api=use_old_api) + def test_pytorch_embedding_bag(self, params, ie_device, precision, ir_version, temp_dir): + self._test(*self.create_net(**params), ie_device, precision, ir_version, temp_dir=temp_dir) diff --git a/tests/layer_tests/onnx_tests/test_flatten.py b/tests/layer_tests/onnx_tests/test_flatten.py index bc00664545a295..5fcc868409a4dd 100644 --- a/tests/layer_tests/onnx_tests/test_flatten.py +++ b/tests/layer_tests/onnx_tests/test_flatten.py @@ -184,126 +184,121 @@ def create_flatten_net_const(self, axis, input_shape, dim, ir_version, opset=Non @pytest.mark.parametrize("params", test_data_3D) @pytest.mark.parametrize("opset", [6, 9]) @pytest.mark.nightly - def test_flatten_3D(self, params, opset, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_flatten_3D(self, params, opset, ie_device, precision, ir_version, temp_dir): # negative axis not allowed by onnx spec for flatten-1 and flatten-9 if params['axis'] < 0: self.skip_framework = True else: self.skip_framework = False self._test(*self.create_flatten_net(**params, ir_version=ir_version, opset=opset), - ie_device, precision, ir_version, temp_dir=temp_dir, use_old_api=use_old_api) + ie_device, precision, ir_version, temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data_3D) @pytest.mark.parametrize("opset", [6, 9]) @pytest.mark.nightly - def test_flatten_3D_const(self, params, opset, ie_device, precision, ir_version, temp_dir, - use_old_api): + def test_flatten_3D_const(self, params, opset, ie_device, precision, ir_version, temp_dir): # negative axis not allowed by onnx spec for flatten-1 and flatten-9 if params['axis'] < 0: self.skip_framework = True else: self.skip_framework = False self._test(*self.create_flatten_net_const(**params, ir_version=ir_version, opset=opset), - ie_device, precision, ir_version, temp_dir=temp_dir, use_old_api=use_old_api) + ie_device, precision, ir_version, temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data_4D) @pytest.mark.parametrize("opset", [6, 9]) @pytest.mark.nightly - def test_flatten_4D(self, params, opset, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_flatten_4D(self, params, opset, ie_device, precision, ir_version, temp_dir): # negative axis not allowed by onnx spec for flatten-1 and flatten-9 if params['axis'] < 0: self.skip_framework = True else: self.skip_framework = False self._test(*self.create_flatten_net(**params, ir_version=ir_version, opset=opset), - ie_device, precision, ir_version, temp_dir=temp_dir, use_old_api=use_old_api) + ie_device, precision, ir_version, temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data_4D_precommit) @pytest.mark.parametrize("opset", [6, 9]) @pytest.mark.precommit - def test_flatten_4D_precommit(self, params, opset, ie_device, precision, ir_version, temp_dir, - use_old_api): + def test_flatten_4D_precommit(self, params, opset, ie_device, precision, ir_version, temp_dir): # negative axis not allowed by onnx spec for flatten-1 and flatten-9 if params['axis'] < 0: self.skip_framework = True else: self.skip_framework = False self._test(*self.create_flatten_net(**params, ir_version=ir_version, opset=opset), - ie_device, precision, ir_version, temp_dir=temp_dir, use_old_api=use_old_api) + ie_device, precision, ir_version, temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data_4D_precommit) @pytest.mark.parametrize("opset", [6, 9]) @pytest.mark.nightly def test_flatten_4D_const_precommit(self, params, opset, ie_device, precision, ir_version, - temp_dir, use_old_api): + temp_dir): # negative axis not allowed by onnx spec for flatten-1 and flatten-9 if params['axis'] < 0: self.skip_framework = True else: self.skip_framework = False self._test(*self.create_flatten_net_const(**params, ir_version=ir_version, opset=opset), - ie_device, precision, ir_version, temp_dir=temp_dir, use_old_api=use_old_api) + ie_device, precision, ir_version, temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data_4D) @pytest.mark.parametrize("opset", [6, 9]) @pytest.mark.nightly - def test_flatten_4D_const(self, params, opset, ie_device, precision, ir_version, temp_dir, - use_old_api): + def test_flatten_4D_const(self, params, opset, ie_device, precision, ir_version, temp_dir): # negative axis not allowed by onnx spec for flatten-1 and flatten-9 if params['axis'] < 0: self.skip_framework = True else: self.skip_framework = False self._test(*self.create_flatten_net_const(**params, ir_version=ir_version, opset=opset), - ie_device, precision, ir_version, temp_dir=temp_dir, use_old_api=use_old_api) + ie_device, precision, ir_version, temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data_5D_precommit) @pytest.mark.parametrize("opset", [6, 9]) @pytest.mark.nightly - def test_flatten_5D_precommit(self, params, opset, ie_device, precision, ir_version, temp_dir, - use_old_api): + def test_flatten_5D_precommit(self, params, opset, ie_device, precision, ir_version, temp_dir): # negative axis not allowed by onnx spec for flatten-1 and flatten-9 if params['axis'] < 0: self.skip_framework = True else: self.skip_framework = False self._test(*self.create_flatten_net(**params, ir_version=ir_version, opset=opset), - ie_device, precision, ir_version, temp_dir=temp_dir, use_old_api=use_old_api) + ie_device, precision, ir_version, temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data_5D) @pytest.mark.parametrize("opset", [6, 9]) @pytest.mark.nightly - def test_flatten_5D(self, params, opset, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_flatten_5D(self, params, opset, ie_device, precision, ir_version, temp_dir): # negative axis not allowed by onnx spec for flatten-1 and flatten-9 if params['axis'] < 0: self.skip_framework = True else: self.skip_framework = False self._test(*self.create_flatten_net(**params, ir_version=ir_version, opset=opset), - ie_device, precision, ir_version, temp_dir=temp_dir, use_old_api=use_old_api) + ie_device, precision, ir_version, temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data_5D_precommit) @pytest.mark.parametrize("opset", [6, 9]) @pytest.mark.nightly def test_flatten_5D_const_precommit(self, params, opset, ie_device, precision, ir_version, - temp_dir, use_old_api): + temp_dir): # negative axis not allowed by onnx spec for flatten-1 and flatten-9 if params['axis'] < 0: self.skip_framework = True else: self.skip_framework = False self._test(*self.create_flatten_net_const(**params, ir_version=ir_version, opset=opset), - ie_device, precision, ir_version, temp_dir=temp_dir, use_old_api=use_old_api) + ie_device, precision, ir_version, temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data_5D) @pytest.mark.parametrize("opset", [6, 9]) @pytest.mark.nightly - def test_flatten_5D_const(self, params, opset, ie_device, precision, ir_version, temp_dir, - use_old_api): + def test_flatten_5D_const(self, params, opset, ie_device, precision, ir_version, temp_dir): # negative axis not allowed by onnx spec for flatten-1 and flatten-9 if params['axis'] < 0: self.skip_framework = True else: self.skip_framework = False self._test(*self.create_flatten_net_const(**params, ir_version=ir_version, opset=opset), - ie_device, precision, ir_version, temp_dir=temp_dir, use_old_api=use_old_api) + ie_device, precision, ir_version, temp_dir=temp_dir) diff --git a/tests/layer_tests/onnx_tests/test_floor.py b/tests/layer_tests/onnx_tests/test_floor.py index da9214828ee762..ebeda4bb8c04e2 100644 --- a/tests/layer_tests/onnx_tests/test_floor.py +++ b/tests/layer_tests/onnx_tests/test_floor.py @@ -168,13 +168,13 @@ def create_net_const(self, shape, precision, ir_version): @pytest.mark.parametrize("params", test_data) @pytest.mark.nightly - def test_floor(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_floor(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_net(**params, ir_version=ir_version), ie_device, precision, ir_version, - temp_dir=temp_dir, use_old_api=use_old_api) + temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data) @pytest.mark.nightly - def test_floor_const(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_floor_const(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_net_const(**params, precision=precision, ir_version=ir_version), - ie_device, precision, ir_version, temp_dir=temp_dir, use_old_api=use_old_api) + ie_device, precision, ir_version, temp_dir=temp_dir) diff --git a/tests/layer_tests/onnx_tests/test_fusedgemm.py b/tests/layer_tests/onnx_tests/test_fusedgemm.py index ae9ffbecbb6581..cc62a5476bba15 100644 --- a/tests/layer_tests/onnx_tests/test_fusedgemm.py +++ b/tests/layer_tests/onnx_tests/test_fusedgemm.py @@ -138,7 +138,7 @@ def create_net(self, shapeA, shapeB, shapeC, alpha, beta, trans_a, trans_b, @pytest.mark.nightly @pytest.mark.precommit def test_fusedgemm(self, params, alpha, beta, trans_a, trans_b, - ie_device, precision, opset, ir_version, temp_dir, use_old_api): + ie_device, precision, opset, ir_version, temp_dir): self._test( *self.create_net(params['shapeA'], params['shapeB'], params['shapeC'], alpha, beta, trans_a, trans_b, @@ -146,4 +146,4 @@ def test_fusedgemm(self, params, alpha, beta, trans_a, trans_b, params['activation_beta'], params['activation_gamma'], opset, ir_version), ie_device, precision, ir_version, - temp_dir=temp_dir, use_old_api=use_old_api, custom_eps=1e-2) + temp_dir=temp_dir, custom_eps=1e-2) diff --git a/tests/layer_tests/onnx_tests/test_gather.py b/tests/layer_tests/onnx_tests/test_gather.py index 503508288c65c4..418c8a80855ecd 100644 --- a/tests/layer_tests/onnx_tests/test_gather.py +++ b/tests/layer_tests/onnx_tests/test_gather.py @@ -247,24 +247,24 @@ def create_net_const(self, shape, axis, indices, output_shape, ir_version): @pytest.mark.parametrize("params", test_data_precommit) @pytest.mark.precommit - def test_gather(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_gather(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_net(**params, ir_version=ir_version), ie_device, precision, ir_version, - temp_dir=temp_dir, use_old_api=use_old_api) + temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data) @pytest.mark.nightly - def test_gather(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_gather(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_net(**params, ir_version=ir_version), ie_device, precision, ir_version, - temp_dir=temp_dir, use_old_api=use_old_api) + temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data) @pytest.mark.nightly - def test_gather_const(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_gather_const(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_net_const(**params, ir_version=ir_version), ie_device, precision, ir_version, - temp_dir=temp_dir, use_old_api=use_old_api) + temp_dir=temp_dir) test_data_negative_indices = [ dict(shape=[10, 12], axis=0, indices=[3, -1, -4], output_shape=[3, 12]), @@ -278,8 +278,8 @@ def test_gather_const(self, params, ie_device, precision, ir_version, temp_dir, @pytest.mark.parametrize("params", test_data_negative_indices) @pytest.mark.nightly def test_gather_nightly_negative_indices(self, params, ie_device, precision, ir_version, - temp_dir, use_old_api): + temp_dir): if ie_device == 'GPU': pytest.skip('GREEN_SUITE') self._test(*self.create_net(**params, ir_version=ir_version), - ie_device, precision, ir_version, temp_dir=temp_dir, use_old_api=use_old_api) + ie_device, precision, ir_version, temp_dir=temp_dir) diff --git a/tests/layer_tests/onnx_tests/test_gemm.py b/tests/layer_tests/onnx_tests/test_gemm.py index 58077ed98a2753..82bca56f3dee28 100644 --- a/tests/layer_tests/onnx_tests/test_gemm.py +++ b/tests/layer_tests/onnx_tests/test_gemm.py @@ -226,13 +226,13 @@ def create_net_double(self, shapeA, shapeB, shapeC, alpha, beta, trans_a, trans_ @pytest.mark.nightly @pytest.mark.precommit def test_gemm(self, params, alpha, beta, trans_a, trans_b, ie_device, precision, opset, - ir_version, temp_dir, use_old_api): + ir_version, temp_dir): self._test( *self.create_net(params['shapeA'], params['shapeB'], params['shapeC'], alpha, beta, trans_a, trans_b, precision, opset, ir_version), ie_device, precision, ir_version, - temp_dir=temp_dir, use_old_api=use_old_api) + temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data_bc) @pytest.mark.parametrize("alpha", [None, 0.1, 2.0]) @@ -243,13 +243,13 @@ def test_gemm(self, params, alpha, beta, trans_a, trans_b, ie_device, precision, @pytest.mark.nightly @pytest.mark.precommit def test_gemm_bc(self, params, alpha, beta, trans_a, trans_b, ie_device, precision, opset, - ir_version, temp_dir, use_old_api): + ir_version, temp_dir): self._test( *self.create_net(params['shapeA'], params['shapeB'], params['shapeC'], alpha, beta, trans_a, trans_b, precision, opset, ir_version), ie_device, precision, ir_version, - temp_dir=temp_dir, use_old_api=use_old_api) + temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data) @pytest.mark.parametrize("alpha", [None, 0.1, 2.0]) @@ -259,13 +259,13 @@ def test_gemm_bc(self, params, alpha, beta, trans_a, trans_b, ie_device, precisi @pytest.mark.nightly @pytest.mark.precommit def test_gemm_double(self, params, alpha, beta, trans_a, trans_b, ie_device, precision, - ir_version, temp_dir, use_old_api): + ir_version, temp_dir): self._test( *self.create_net_double(params['shapeA'], params['shapeB'], params['shapeC'], alpha, beta, trans_a, trans_b, precision, ir_version), ie_device, precision, ir_version, - temp_dir=temp_dir, use_old_api=use_old_api) + temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data_bc) @pytest.mark.parametrize("alpha", [None, 0.1, 2.0]) @@ -275,13 +275,13 @@ def test_gemm_double(self, params, alpha, beta, trans_a, trans_b, ie_device, pre @pytest.mark.nightly @pytest.mark.precommit def test_gemm_double_bc(self, params, alpha, beta, trans_a, trans_b, ie_device, precision, - ir_version, temp_dir, use_old_api): + ir_version, temp_dir): self._test( *self.create_net_double(params['shapeA'], params['shapeB'], params['shapeC'], alpha, beta, trans_a, trans_b, precision, ir_version), ie_device, precision, ir_version, - temp_dir=temp_dir, use_old_api=use_old_api) + temp_dir=temp_dir) class PytorchLayerTest(CommonLayerTest): @@ -335,7 +335,7 @@ def create_net(self, precision, shape, w_shape, output_shape, ir_version): @pytest.mark.parametrize("params", test_data) @pytest.mark.nightly @pytest.mark.skip(reason='GREEN_SUITE') - def test_pytorch_mm(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_pytorch_mm(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_net(precision, **params, ir_version=ir_version), ie_device, precision, ir_version, - temp_dir=temp_dir, use_old_api=use_old_api) + temp_dir=temp_dir) diff --git a/tests/layer_tests/onnx_tests/test_hard_sigmoid.py b/tests/layer_tests/onnx_tests/test_hard_sigmoid.py index 268ed6c2b76075..c68528d052befd 100644 --- a/tests/layer_tests/onnx_tests/test_hard_sigmoid.py +++ b/tests/layer_tests/onnx_tests/test_hard_sigmoid.py @@ -221,20 +221,19 @@ def create_net_const(self, shape, alpha, beta, precision, ir_version): @pytest.mark.parametrize("params", test_data) @pytest.mark.nightly - def test_hard_sigmoid(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_hard_sigmoid(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_net(**params, ir_version=ir_version), ie_device, precision, ir_version, - temp_dir=temp_dir, use_old_api=use_old_api) + temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data_precommit) @pytest.mark.nightly - def test_hard_sigmoid_const_precommit(self, params, ie_device, precision, ir_version, temp_dir, - use_old_api): + def test_hard_sigmoid_const_precommit(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_net_const(**params, precision=precision, ir_version=ir_version), - ie_device, precision, ir_version, temp_dir=temp_dir, use_old_api=use_old_api) + ie_device, precision, ir_version, temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data) @pytest.mark.nightly - def test_hard_sigmoid_const(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_hard_sigmoid_const(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_net_const(**params, precision=precision, ir_version=ir_version), - ie_device, precision, ir_version, temp_dir=temp_dir, use_old_api=use_old_api) + ie_device, precision, ir_version, temp_dir=temp_dir) diff --git a/tests/layer_tests/onnx_tests/test_identity.py b/tests/layer_tests/onnx_tests/test_identity.py index 99e7e2a1112a4b..b76764baac8f4a 100644 --- a/tests/layer_tests/onnx_tests/test_identity.py +++ b/tests/layer_tests/onnx_tests/test_identity.py @@ -172,13 +172,13 @@ def create_net_const(self, shape, precision, ir_version): @pytest.mark.parametrize("params", test_data) @pytest.mark.nightly @pytest.mark.skip(reason='GREEN_SUITE') - def test_identity(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_identity(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_net(**params, ir_version=ir_version), ie_device, precision, ir_version, - temp_dir=temp_dir, use_old_api=use_old_api) + temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data) @pytest.mark.nightly - def test_identity_const(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_identity_const(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_net_const(**params, precision=precision, ir_version=ir_version), - ie_device, precision, ir_version, temp_dir=temp_dir, use_old_api=use_old_api) + ie_device, precision, ir_version, temp_dir=temp_dir) diff --git a/tests/layer_tests/onnx_tests/test_image_scaler.py b/tests/layer_tests/onnx_tests/test_image_scaler.py index 21c294c3964f49..cf678ca2e64c12 100644 --- a/tests/layer_tests/onnx_tests/test_image_scaler.py +++ b/tests/layer_tests/onnx_tests/test_image_scaler.py @@ -139,29 +139,27 @@ def create_net_const(self, shape, scale, precision, ir_version): dict(shape=[6, 8, 10, 12], scale=4.5)] @pytest.mark.parametrize("params", test_data_precommit) - def test_image_scaler_precommit(self, params, ie_device, precision, ir_version, temp_dir, - use_old_api): + def test_image_scaler_precommit(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_net(**params, ir_version=ir_version), ie_device, precision, ir_version, - temp_dir=temp_dir, use_old_api=use_old_api) + temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data) @pytest.mark.nightly @pytest.mark.skip(reason='GREEN_SUITE') - def test_image_scaler(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_image_scaler(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_net(**params, ir_version=ir_version), ie_device, precision, ir_version, - temp_dir=temp_dir, use_old_api=use_old_api) + temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data_precommit) - def test_image_scaler_const_precommit(self, params, ie_device, precision, ir_version, temp_dir, - use_old_api): + def test_image_scaler_const_precommit(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_net_const(**params, precision=precision, ir_version=ir_version), - ie_device, precision, ir_version, temp_dir=temp_dir, use_old_api=use_old_api) + ie_device, precision, ir_version, temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data) @pytest.mark.nightly @pytest.mark.skip(reason='GREEN_SUITE') - def test_image_scaler_const(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_image_scaler_const(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_net_const(**params, precision=precision, ir_version=ir_version), - ie_device, precision, ir_version, temp_dir=temp_dir, use_old_api=use_old_api) + ie_device, precision, ir_version, temp_dir=temp_dir) diff --git a/tests/layer_tests/onnx_tests/test_instance_normalization.py b/tests/layer_tests/onnx_tests/test_instance_normalization.py index 8ab2bd1dfb8414..936cfbb8d5c633 100644 --- a/tests/layer_tests/onnx_tests/test_instance_normalization.py +++ b/tests/layer_tests/onnx_tests/test_instance_normalization.py @@ -100,14 +100,12 @@ def create_net(self, shape, epsilon, precision, ir_version): @pytest.mark.parametrize("params", test_data_precommit) @pytest.mark.precommit - def test_instance_normalization(self, params, ie_device, precision, ir_version, temp_dir, - use_old_api): + def test_instance_normalization(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_net(**params, precision=precision, ir_version=ir_version), - ie_device, precision, ir_version, temp_dir=temp_dir, use_old_api=use_old_api) + ie_device, precision, ir_version, temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data) @pytest.mark.nightly - def test_instance_normalization(self, params, ie_device, precision, ir_version, temp_dir, - use_old_api): + def test_instance_normalization(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_net(**params, precision=precision, ir_version=ir_version), - ie_device, precision, ir_version, temp_dir=temp_dir, use_old_api=use_old_api) + ie_device, precision, ir_version, temp_dir=temp_dir) diff --git a/tests/layer_tests/onnx_tests/test_leaky_relu.py b/tests/layer_tests/onnx_tests/test_leaky_relu.py index fc531ab5f0b138..10a7895afb8d92 100644 --- a/tests/layer_tests/onnx_tests/test_leaky_relu.py +++ b/tests/layer_tests/onnx_tests/test_leaky_relu.py @@ -181,27 +181,26 @@ def create_net_const(self, shape, alpha, precision, ir_version): @pytest.mark.parametrize("params", test_data_precommit) @pytest.mark.precommit - def test_leaky_relu_precommit(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_leaky_relu_precommit(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_net(**params, ir_version=ir_version), ie_device, precision, ir_version, - temp_dir=temp_dir, use_old_api=use_old_api) + temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data) @pytest.mark.nightly - def test_leaky_relu(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_leaky_relu(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_net(**params, ir_version=ir_version), ie_device, precision, ir_version, - temp_dir=temp_dir, use_old_api=use_old_api) + temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data_precommit) @pytest.mark.precommit - def test_leaky_relu_const_precommit(self, params, ie_device, precision, ir_version, temp_dir, - use_old_api): + def test_leaky_relu_const_precommit(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_net_const(**params, precision=precision, ir_version=ir_version), - ie_device, precision, ir_version, temp_dir=temp_dir, use_old_api=use_old_api) + ie_device, precision, ir_version, temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data) @pytest.mark.nightly - def test_leaky_relu_const(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_leaky_relu_const(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_net_const(**params, precision=precision, ir_version=ir_version), - ie_device, precision, ir_version, temp_dir=temp_dir, use_old_api=use_old_api) + ie_device, precision, ir_version, temp_dir=temp_dir) diff --git a/tests/layer_tests/onnx_tests/test_log.py b/tests/layer_tests/onnx_tests/test_log.py index c13ef33eb31078..f1255640da75f3 100644 --- a/tests/layer_tests/onnx_tests/test_log.py +++ b/tests/layer_tests/onnx_tests/test_log.py @@ -171,26 +171,26 @@ def create_net_const(self, shape, precision, ir_version): @pytest.mark.parametrize("params", test_data_precommit) @pytest.mark.precommit - def test_log_precommit(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_log_precommit(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_net(**params, ir_version=ir_version), ie_device, precision, ir_version, - temp_dir=temp_dir, use_old_api=use_old_api) + temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data) @pytest.mark.nightly - def test_log(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_log(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_net(**params, ir_version=ir_version), ie_device, precision, ir_version, - temp_dir=temp_dir, use_old_api=use_old_api) + temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data_precommit) @pytest.mark.nightly - def test_log_const_precommit(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_log_const_precommit(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_net_const(**params, precision=precision, ir_version=ir_version), - ie_device, precision, ir_version, temp_dir=temp_dir, use_old_api=use_old_api) + ie_device, precision, ir_version, temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data) @pytest.mark.nightly - def test_log_const(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_log_const(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_net_const(**params, precision=precision, ir_version=ir_version), - ie_device, precision, ir_version, temp_dir=temp_dir, use_old_api=use_old_api) + ie_device, precision, ir_version, temp_dir=temp_dir) diff --git a/tests/layer_tests/onnx_tests/test_logsoftmax.py b/tests/layer_tests/onnx_tests/test_logsoftmax.py index 4f8efd40180d64..99e727d8837ab2 100644 --- a/tests/layer_tests/onnx_tests/test_logsoftmax.py +++ b/tests/layer_tests/onnx_tests/test_logsoftmax.py @@ -248,9 +248,9 @@ def create_net(self, shape, logsoftmax_axis, ir_version): @pytest.mark.parametrize("params", test_data) @pytest.mark.nightly - def test_log(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_log(self, params, ie_device, precision, ir_version, temp_dir): if ie_device == 'GPU': pytest.skip('GREEN_SUITE') self._test(*self.create_net(**params, ir_version=ir_version), ie_device, precision, ir_version, - temp_dir=temp_dir, use_old_api=use_old_api) + temp_dir=temp_dir) diff --git a/tests/layer_tests/onnx_tests/test_loop.py b/tests/layer_tests/onnx_tests/test_loop.py index 8771537f87e9f9..293fdd906e1918 100644 --- a/tests/layer_tests/onnx_tests/test_loop.py +++ b/tests/layer_tests/onnx_tests/test_loop.py @@ -273,14 +273,14 @@ def create_loop_in_loop(self): @pytest.mark.precommit @pytest.mark.timeout(250) - def test_loop_simple_precommit(self, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_loop_simple_precommit(self, ie_device, precision, ir_version, temp_dir): self._test(*self.create_loop(), ie_device, precision, ir_version, temp_dir=temp_dir, - infer_timeout=150, use_old_api=use_old_api) + infer_timeout=150) @pytest.mark.precommit @pytest.mark.timeout(250) - def test_loop_in_loop_simple_precommit(self, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_loop_in_loop_simple_precommit(self, ie_device, precision, ir_version, temp_dir): if ie_device == 'GPU': pytest.xfail("Program doesn't contain primitive: constant:res/10/M_2 that is input to: loop") self._test(*self.create_loop_in_loop(), ie_device, precision, ir_version, temp_dir=temp_dir, - infer_timeout=150, use_old_api=use_old_api) + infer_timeout=150) diff --git a/tests/layer_tests/onnx_tests/test_lrn.py b/tests/layer_tests/onnx_tests/test_lrn.py index 69186d4fa1cf0e..80ded7657c5e7d 100644 --- a/tests/layer_tests/onnx_tests/test_lrn.py +++ b/tests/layer_tests/onnx_tests/test_lrn.py @@ -111,29 +111,29 @@ def create_net(self, shape, alpha, beta, bias, size, ir_version): @pytest.mark.parametrize("params", test_data_precommit) @pytest.mark.precommit - def test_lrn_precommit(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_lrn_precommit(self, params, ie_device, precision, ir_version, temp_dir): # onnxruntime only supports 4D tensors for LRN self.skip_framework = True self._test(*self.create_net(**params, ir_version=ir_version), ie_device, precision, ir_version, - temp_dir=temp_dir, use_old_api=use_old_api) + temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data) @pytest.mark.nightly - def test_lrn(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_lrn(self, params, ie_device, precision, ir_version, temp_dir): if ie_device == 'GPU': pytest.skip('GREEN_SUITE') # onnxruntime only supports 4D tensors for LRN self.skip_framework = True self._test(*self.create_net(**params, ir_version=ir_version), ie_device, precision, ir_version, - temp_dir=temp_dir, use_old_api=use_old_api) + temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data_4D) @pytest.mark.nightly @pytest.mark.precommit - def test_lrn_4D(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_lrn_4D(self, params, ie_device, precision, ir_version, temp_dir): self.skip_framework = False self._test(*self.create_net(**params, ir_version=ir_version), ie_device, precision, ir_version, - temp_dir=temp_dir, use_old_api=use_old_api) + temp_dir=temp_dir) diff --git a/tests/layer_tests/onnx_tests/test_lstm.py b/tests/layer_tests/onnx_tests/test_lstm.py index 487baf5f0ff294..eb059326710045 100644 --- a/tests/layer_tests/onnx_tests/test_lstm.py +++ b/tests/layer_tests/onnx_tests/test_lstm.py @@ -144,24 +144,23 @@ def create_lstm(self, direction: str, cell_type: str, hidden_size=128): @pytest.mark.parametrize('direction', ["forward", "bidirectional", "reverse"]) @pytest.mark.parametrize('cell_type', ["LSTM", "GRU", "RNN"]) def test_lstm_simple_precommit(self, direction, cell_type, ie_device, precision, ir_version, - temp_dir, use_old_api): + temp_dir): self._test(*self.create_lstm(direction, cell_type), ie_device, precision, ir_version, - temp_dir=temp_dir, infer_timeout=150, use_old_api=use_old_api) + temp_dir=temp_dir, infer_timeout=150) # LSTM/RNN/GRU Sequence Generation @pytest.mark.parametrize('direction', ["forward", "bidirectional", "reverse"]) @pytest.mark.parametrize('cell_type', ["LSTM", "GRU", "RNN"]) def test_lstm_sequence_generate(self, direction, cell_type, ie_device, precision, ir_version, - temp_dir, use_old_api): + temp_dir): self._test(*self.create_lstm(direction, cell_type), ie_device, precision, ir_version, disabled_transforms='lstm_to_tensor_iterator,gru_and_rnn_to_tensor_iterator', - temp_dir=temp_dir, use_old_api=use_old_api) + temp_dir=temp_dir) # TODO: add more params for nightly @pytest.mark.nightly @pytest.mark.parametrize('direction', ["forward", "bidirectional", "reverse"]) @pytest.mark.parametrize('cell_type', ["LSTM", "GRU", "RNN"]) - def test_lstm_nightly(self, direction, cell_type, ie_device, precision, ir_version, temp_dir, - use_old_api): + def test_lstm_nightly(self, direction, cell_type, ie_device, precision, ir_version, temp_dir): self._test(*self.create_lstm(direction, cell_type), ie_device, precision, ir_version, - temp_dir=temp_dir, use_old_api=use_old_api) + temp_dir=temp_dir) diff --git a/tests/layer_tests/onnx_tests/test_matmul.py b/tests/layer_tests/onnx_tests/test_matmul.py index 86283657d72dbc..733898a78efc15 100644 --- a/tests/layer_tests/onnx_tests/test_matmul.py +++ b/tests/layer_tests/onnx_tests/test_matmul.py @@ -167,30 +167,30 @@ def create_dual_net(self, shape1, shape2, ir_version): @pytest.mark.parametrize("params", test_data) @pytest.mark.nightly - def test_matmul(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_matmul(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_net(**params, precision=precision, ir_version=ir_version), - ie_device, precision, ir_version, temp_dir=temp_dir, use_old_api=use_old_api) + ie_device, precision, ir_version, temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data_broadcasting) @pytest.mark.nightly - def test_matmul_bc(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_matmul_bc(self, params, ie_device, precision, ir_version, temp_dir): if ie_device == 'GPU': pytest.skip('GREEN_SUITE') self._test(*self.create_net(**params, precision=precision, ir_version=ir_version), - ie_device, precision, ir_version, temp_dir=temp_dir, use_old_api=use_old_api) + ie_device, precision, ir_version, temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data) @pytest.mark.nightly - def test_dual_matmul(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_dual_matmul(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_dual_net(**params, ir_version=ir_version), ie_device, precision, ir_version, - temp_dir=temp_dir, use_old_api=use_old_api) + temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data_broadcasting) @pytest.mark.nightly - def test_dual_matmul_bc(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_dual_matmul_bc(self, params, ie_device, precision, ir_version, temp_dir): if ie_device == 'GPU': pytest.skip('GREEN_SUITE') self._test(*self.create_dual_net(**params, ir_version=ir_version), ie_device, precision, ir_version, - temp_dir=temp_dir, use_old_api=use_old_api) + temp_dir=temp_dir) diff --git a/tests/layer_tests/onnx_tests/test_mean_variance_normalization.py b/tests/layer_tests/onnx_tests/test_mean_variance_normalization.py index 2a1e1aa7bea485..41b138391806b8 100644 --- a/tests/layer_tests/onnx_tests/test_mean_variance_normalization.py +++ b/tests/layer_tests/onnx_tests/test_mean_variance_normalization.py @@ -69,6 +69,6 @@ def create_net(self, shape, axes, ir_version): @pytest.mark.parametrize("params", test_data) @pytest.mark.nightly @pytest.mark.skip(reason='GREEN_SUITE') - def test_mvn(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_mvn(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_net(**params, ir_version=ir_version), - ie_device, precision, ir_version, temp_dir=temp_dir, use_old_api=use_old_api) + ie_device, precision, ir_version, temp_dir=temp_dir) diff --git a/tests/layer_tests/onnx_tests/test_neg.py b/tests/layer_tests/onnx_tests/test_neg.py index 4fb926ff69a437..82fad054c69925 100644 --- a/tests/layer_tests/onnx_tests/test_neg.py +++ b/tests/layer_tests/onnx_tests/test_neg.py @@ -80,14 +80,14 @@ def create_neg(self, shape, ir_version): @pytest.mark.parametrize('params', test_data_precommit) @pytest.mark.precommit - def test_neg_precommit(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_neg_precommit(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_neg(**params, ir_version=ir_version), ie_device, precision, ir_version, - temp_dir=temp_dir, use_old_api=use_old_api) + temp_dir=temp_dir) @pytest.mark.parametrize('params', test_data) @pytest.mark.nightly - def test_neg(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_neg(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_neg(**params, ir_version=ir_version), ie_device, precision, ir_version, - temp_dir=temp_dir, use_old_api=use_old_api) + temp_dir=temp_dir) diff --git a/tests/layer_tests/onnx_tests/test_non_zero.py b/tests/layer_tests/onnx_tests/test_non_zero.py index 7a29ade321cac2..448b0f10f0ccca 100644 --- a/tests/layer_tests/onnx_tests/test_non_zero.py +++ b/tests/layer_tests/onnx_tests/test_non_zero.py @@ -187,14 +187,14 @@ def create_net_const(self, input_value, output_value, precision, ir_version): @pytest.mark.parametrize("params", test_data) @pytest.mark.nightly @pytest.mark.skip(reason='GREEN_SUITE') - def test_non_zero(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_non_zero(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_net(**params, ir_version=ir_version), ie_device, precision, ir_version, - temp_dir=temp_dir, use_old_api=use_old_api) + temp_dir=temp_dir) @pytest.mark.parametrize("params", test_const_data) @pytest.mark.nightly @pytest.mark.skip(reason='GREEN_SUITE') - def test_non_zero_const(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_non_zero_const(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_net_const(**params, precision=precision, ir_version=ir_version), - ie_device, precision, ir_version, temp_dir=temp_dir, use_old_api=use_old_api) + ie_device, precision, ir_version, temp_dir=temp_dir) diff --git a/tests/layer_tests/onnx_tests/test_not.py b/tests/layer_tests/onnx_tests/test_not.py index 2a47e85e08c5cd..544e2e5ffae939 100644 --- a/tests/layer_tests/onnx_tests/test_not.py +++ b/tests/layer_tests/onnx_tests/test_not.py @@ -173,21 +173,21 @@ def create_net_const(self, shape, ir_version): @pytest.mark.parametrize("params", test_data_precommit) @pytest.mark.precommit - def test_not_precommit(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_not_precommit(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_net(**params, ir_version=ir_version), ie_device, precision, ir_version, - temp_dir=temp_dir, use_old_api=use_old_api) + temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data) @pytest.mark.nightly - def test_not(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_not(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_net(**params, ir_version=ir_version), ie_device, precision, ir_version, - temp_dir=temp_dir, use_old_api=use_old_api) + temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data) @pytest.mark.nightly - def test_not_const(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_not_const(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_net_const(**params, ir_version=ir_version), ie_device, precision, ir_version, - temp_dir=temp_dir, use_old_api=use_old_api) + temp_dir=temp_dir) diff --git a/tests/layer_tests/onnx_tests/test_or.py b/tests/layer_tests/onnx_tests/test_or.py index 881c9a702ffb4d..6e8474c352740a 100644 --- a/tests/layer_tests/onnx_tests/test_or.py +++ b/tests/layer_tests/onnx_tests/test_or.py @@ -254,28 +254,28 @@ def create_net_const(self, shape1, shape2, ir_version): @pytest.mark.parametrize("params", test_data_precommit) @pytest.mark.precommit - def test_or_precommit(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_or_precommit(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_net(**params, ir_version=ir_version), ie_device, precision, ir_version, - temp_dir=temp_dir, use_old_api=use_old_api) + temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data) @pytest.mark.nightly - def test_or(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_or(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_net(**params, ir_version=ir_version), ie_device, precision, ir_version, - temp_dir=temp_dir, use_old_api=use_old_api) + temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data) @pytest.mark.nightly - def test_or_one_const(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_or_one_const(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_net_one_const(**params, ir_version=ir_version), ie_device, precision, ir_version, - temp_dir=temp_dir, use_old_api=use_old_api) + temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data) @pytest.mark.nightly - def test_or_const(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_or_const(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_net_const(**params, ir_version=ir_version), ie_device, precision, ir_version, - temp_dir=temp_dir, use_old_api=use_old_api) + temp_dir=temp_dir) diff --git a/tests/layer_tests/onnx_tests/test_pad.py b/tests/layer_tests/onnx_tests/test_pad.py index 969ce7b698d1a1..3eee267ef910ff 100644 --- a/tests/layer_tests/onnx_tests/test_pad.py +++ b/tests/layer_tests/onnx_tests/test_pad.py @@ -181,12 +181,11 @@ def create_net(self, shape, mode, pads, value, ir_version, opset=None): ('reflect', None), ('edge', None)]) @pytest.mark.nightly - def test_pad_opset_9(self, params, mode_value, ie_device, precision, ir_version, temp_dir, - use_old_api): + def test_pad_opset_9(self, params, mode_value, ie_device, precision, ir_version, temp_dir): mode, value = mode_value self._test( *self.create_net(**params, mode=mode, value=value, ir_version=ir_version, opset=9), - ie_device, precision, ir_version, temp_dir=temp_dir, use_old_api=use_old_api) + ie_device, precision, ir_version, temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data_precommit) @pytest.mark.parametrize("mode_value", [(None, None), @@ -197,10 +196,10 @@ def test_pad_opset_9(self, params, mode_value, ie_device, precision, ir_version, ('edge', None)]) @pytest.mark.precommit def test_pad_opset_latest_precommit(self, params, mode_value, ie_device, precision, ir_version, - temp_dir, use_old_api): + temp_dir): mode, value = mode_value self._test(*self.create_net(**params, mode=mode, value=value, ir_version=ir_version), - ie_device, precision, ir_version, temp_dir=temp_dir, use_old_api=use_old_api) + ie_device, precision, ir_version, temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data) @pytest.mark.parametrize("mode_value", [(None, None), @@ -210,8 +209,7 @@ def test_pad_opset_latest_precommit(self, params, mode_value, ie_device, precisi ('reflect', None), ('edge', None)]) @pytest.mark.nightly - def test_pad_opset_latest(self, params, mode_value, ie_device, precision, ir_version, temp_dir, - use_old_api): + def test_pad_opset_latest(self, params, mode_value, ie_device, precision, ir_version, temp_dir): mode, value = mode_value self._test(*self.create_net(**params, mode=mode, value=value, ir_version=ir_version), - ie_device, precision, ir_version, temp_dir=temp_dir, use_old_api=use_old_api) + ie_device, precision, ir_version, temp_dir=temp_dir) diff --git a/tests/layer_tests/onnx_tests/test_pooling.py b/tests/layer_tests/onnx_tests/test_pooling.py index 588fa2ac63cdfe..6a8a88949a797b 100644 --- a/tests/layer_tests/onnx_tests/test_pooling.py +++ b/tests/layer_tests/onnx_tests/test_pooling.py @@ -389,23 +389,21 @@ def create_global_net(self, shape, op, ir_version): @pytest.mark.parametrize("params", test_data) @pytest.mark.parametrize("incl_pad", [None, 1]) @pytest.mark.nightly - def test_avgpool_opset7(self, params, incl_pad, ie_device, precision, ir_version, temp_dir, - use_old_api): + def test_avgpool_opset7(self, params, incl_pad, ie_device, precision, ir_version, temp_dir): if not len(params['shape']) in [4, 5]: pytest.skip("Pooling layer support only 4D and 5D input tensors") self._test( *self.create_net(**params, op='AveragePool', count_include_pad=incl_pad, ir_version=ir_version, opset=7), - ie_device, precision, ir_version, temp_dir=temp_dir, use_old_api=use_old_api) + ie_device, precision, ir_version, temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data_autopad) @pytest.mark.nightly - def test_avgpool_opset7_autopad(self, params, ie_device, precision, ir_version, temp_dir, - use_old_api): + def test_avgpool_opset7_autopad(self, params, ie_device, precision, ir_version, temp_dir): if not len(params['shape']) in [4, 5]: pytest.skip("Pooling layer support only 4D and 5D input tensors") self._test(*self.create_net(**params, op='AveragePool', ir_version=ir_version, opset=7), - ie_device, precision, ir_version, temp_dir=temp_dir, use_old_api=use_old_api) + ie_device, precision, ir_version, temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data) @pytest.mark.parametrize("incl_pad", [None, 1]) @@ -413,88 +411,81 @@ def test_avgpool_opset7_autopad(self, params, ie_device, precision, ir_version, @pytest.mark.nightly @pytest.mark.skip(reason='GREEN_SUITE') def test_avgpool_opset10(self, params, incl_pad, ceil, ie_device, precision, ir_version, - temp_dir, use_old_api): + temp_dir): if not len(params['shape']) in [4, 5]: pytest.skip("Pooling layer support only 4D and 5D input tensors") self._test( *self.create_net(**params, op='AveragePool', count_include_pad=incl_pad, ceil=ceil, ir_version=ir_version, - opset=10), ie_device, precision, ir_version, temp_dir=temp_dir, - use_old_api=use_old_api) + opset=10), ie_device, precision, ir_version, temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data_autopad) @pytest.mark.nightly - def test_avgpool_opset10_autopad(self, params, ie_device, precision, ir_version, temp_dir, - use_old_api): + def test_avgpool_opset10_autopad(self, params, ie_device, precision, ir_version, temp_dir): if not len(params['shape']) in [4, 5]: pytest.skip("Pooling layer support only 4D and 5D input tensors") self._test(*self.create_net(**params, op='AveragePool', ir_version=ir_version, opset=10), - ie_device, precision, ir_version, temp_dir=temp_dir, use_old_api=use_old_api) + ie_device, precision, ir_version, temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data) @pytest.mark.parametrize("st_order", [None, 1]) @pytest.mark.nightly - def test_maxpool_opset8(self, params, st_order, ie_device, precision, ir_version, temp_dir, - use_old_api): + def test_maxpool_opset8(self, params, st_order, ie_device, precision, ir_version, temp_dir): if not len(params['shape']) in [4, 5]: pytest.skip("Pooling layer support only 4D and 5D input tensors") self._test( *self.create_net(**params, op='MaxPool', storage_order=st_order, ir_version=ir_version, opset=8), - ie_device, precision, ir_version, temp_dir=temp_dir, use_old_api=use_old_api) + ie_device, precision, ir_version, temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data_autopad) @pytest.mark.nightly - def test_maxpool_opset8_autopad(self, params, ie_device, precision, ir_version, temp_dir, - use_old_api): + def test_maxpool_opset8_autopad(self, params, ie_device, precision, ir_version, temp_dir): if not len(params['shape']) in [4, 5]: pytest.skip("Pooling layer support only 4D and 5D input tensors") self._test(*self.create_net(**params, op='MaxPool', ir_version=ir_version, opset=8), - ie_device, precision, ir_version, temp_dir=temp_dir, use_old_api=use_old_api) + ie_device, precision, ir_version, temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data) @pytest.mark.parametrize("st_order", [None, 1]) @pytest.mark.parametrize("ceil", [True, False]) @pytest.mark.nightly def test_maxpool_opset10(self, params, st_order, ceil, ie_device, precision, ir_version, - temp_dir, use_old_api): + temp_dir): if not len(params['shape']) in [4, 5]: pytest.skip("Pooling layer support only 4D and 5D input tensors") self._test(*self.create_net(**params, op='MaxPool', storage_order=st_order, ceil=ceil, ir_version=ir_version, - opset=10), ie_device, precision, ir_version, temp_dir=temp_dir, - use_old_api=use_old_api) + opset=10), ie_device, precision, ir_version, temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data_autopad_precommit) @pytest.mark.precommit - def test_maxpool_opset10_autopad(self, params, ie_device, precision, ir_version, temp_dir, - use_old_api): + def test_maxpool_opset10_autopad(self, params, ie_device, precision, ir_version, temp_dir): if not len(params['shape']) in [4, 5]: pytest.skip("Pooling layer support only 4D and 5D input tensors") self._test(*self.create_net(**params, op='MaxPool', ir_version=ir_version, opset=10), - ie_device, precision, ir_version, temp_dir=temp_dir, use_old_api=use_old_api) + ie_device, precision, ir_version, temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data_autopad) @pytest.mark.nightly - def test_maxpool_opset10_autopad(self, params, ie_device, precision, ir_version, temp_dir, - use_old_api): + def test_maxpool_opset10_autopad(self, params, ie_device, precision, ir_version, temp_dir): if not len(params['shape']) in [4, 5]: pytest.skip("Pooling layer support only 4D and 5D input tensors") self._test(*self.create_net(**params, op='MaxPool', ir_version=ir_version, opset=10), - ie_device, precision, ir_version, temp_dir=temp_dir, use_old_api=use_old_api) + ie_device, precision, ir_version, temp_dir=temp_dir) @pytest.mark.parametrize("params", global_test_data) @pytest.mark.nightly - def test_global_avgpool(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_global_avgpool(self, params, ie_device, precision, ir_version, temp_dir): if not len(params['shape']) in [4, 5]: pytest.skip("Pooling layer support only 4D and 5D input tensors") self._test(*self.create_global_net(**params, op='GlobalAveragePool', ir_version=ir_version), - ie_device, precision, ir_version, temp_dir=temp_dir, use_old_api=use_old_api) + ie_device, precision, ir_version, temp_dir=temp_dir) @pytest.mark.parametrize("params", global_test_data) @pytest.mark.nightly - def test_global_maxpool(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_global_maxpool(self, params, ie_device, precision, ir_version, temp_dir): if not len(params['shape']) in [4, 5]: pytest.skip("Pooling layer support only 4D and 5D input tensors") self._test(*self.create_global_net(**params, op='GlobalMaxPool', ir_version=ir_version), - ie_device, precision, ir_version, temp_dir=temp_dir, use_old_api=use_old_api) + ie_device, precision, ir_version, temp_dir=temp_dir) diff --git a/tests/layer_tests/onnx_tests/test_prelu.py b/tests/layer_tests/onnx_tests/test_prelu.py index 0e5e85d496b087..6e93d3fd14c59e 100644 --- a/tests/layer_tests/onnx_tests/test_prelu.py +++ b/tests/layer_tests/onnx_tests/test_prelu.py @@ -113,39 +113,37 @@ def create_net(self, shape, slope_shape, precision, ir_version, opset=None): @pytest.mark.parametrize("params", test_data_scalar) @pytest.mark.nightly - def test_prelu_opset7_scalar(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_prelu_opset7_scalar(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_net(**params, precision=precision, opset=7, ir_version=ir_version), - ie_device, precision, ir_version, temp_dir=temp_dir, use_old_api=use_old_api) + ie_device, precision, ir_version, temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data_shared_channels) @pytest.mark.nightly - def test_prelu_opset7_shared_channels(self, params, ie_device, precision, ir_version, temp_dir, - use_old_api): + def test_prelu_opset7_shared_channels(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_net(**params, precision=precision, opset=7, ir_version=ir_version), - ie_device, precision, ir_version, temp_dir=temp_dir, use_old_api=use_old_api) + ie_device, precision, ir_version, temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data_precommit) @pytest.mark.precommit def test_prelu_shared_channels_precommit(self, params, ie_device, precision, ir_version, - temp_dir, use_old_api): + temp_dir): self._test(*self.create_net(**params, precision=precision, ir_version=ir_version), - ie_device, precision, ir_version, temp_dir=temp_dir, use_old_api=use_old_api) + ie_device, precision, ir_version, temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data_scalar_precommit) @pytest.mark.precommit - def test_prelu_scalar_precommit(self, params, ie_device, precision, ir_version, temp_dir, - use_old_api): + def test_prelu_scalar_precommit(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_net(**params, precision=precision, ir_version=ir_version), - ie_device, precision, ir_version, temp_dir=temp_dir, use_old_api=use_old_api) + ie_device, precision, ir_version, temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data_scalar) @pytest.mark.nightly - def test_prelu_scalar(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_prelu_scalar(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_net(**params, precision=precision, ir_version=ir_version), - ie_device, precision, ir_version, temp_dir=temp_dir, use_old_api=use_old_api) + ie_device, precision, ir_version, temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data_shared_channels) @pytest.mark.nightly - def test_prelu_shared_channels(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_prelu_shared_channels(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_net(**params, precision=precision, ir_version=ir_version), - ie_device, precision, ir_version, temp_dir=temp_dir, use_old_api=use_old_api) + ie_device, precision, ir_version, temp_dir=temp_dir) diff --git a/tests/layer_tests/onnx_tests/test_reciprocal.py b/tests/layer_tests/onnx_tests/test_reciprocal.py index 27cac5f8fb3ef3..99b5f329374c1e 100644 --- a/tests/layer_tests/onnx_tests/test_reciprocal.py +++ b/tests/layer_tests/onnx_tests/test_reciprocal.py @@ -148,27 +148,26 @@ def create_net_const(self, shape, precision, ir_version): @pytest.mark.parametrize("params", test_data_precommit) @pytest.mark.precommit - def test_reciprocal_precommit(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_reciprocal_precommit(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_net(**params, ir_version=ir_version), ie_device, precision, ir_version, - temp_dir=temp_dir, use_old_api=use_old_api) + temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data) @pytest.mark.nightly - def test_reciprocal(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_reciprocal(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_net(**params, ir_version=ir_version), ie_device, precision, ir_version, - temp_dir=temp_dir, use_old_api=use_old_api) + temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data_precommit) @pytest.mark.precommit - def test_reciprocal_const_precommit(self, params, ie_device, precision, ir_version, temp_dir, - use_old_api): + def test_reciprocal_const_precommit(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_net_const(**params, precision=precision, ir_version=ir_version), - ie_device, precision, ir_version, temp_dir=temp_dir, use_old_api=use_old_api) + ie_device, precision, ir_version, temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data) @pytest.mark.nightly - def test_reciprocal_const(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_reciprocal_const(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_net_const(**params, precision=precision, ir_version=ir_version), - ie_device, precision, ir_version, temp_dir=temp_dir, use_old_api=use_old_api) + ie_device, precision, ir_version, temp_dir=temp_dir) diff --git a/tests/layer_tests/onnx_tests/test_reduce.py b/tests/layer_tests/onnx_tests/test_reduce.py index 7edfa00011d56c..367c1839d0539a 100644 --- a/tests/layer_tests/onnx_tests/test_reduce.py +++ b/tests/layer_tests/onnx_tests/test_reduce.py @@ -131,70 +131,68 @@ def create_reduce(self, shape, reshapped_shape, op, axes, keep_dims, ir_version) @pytest.mark.parametrize("keep_dims", [True, False]) @pytest.mark.precommit def test_reduce_max_precommit(self, params, keep_dims, ie_device, precision, ir_version, - temp_dir, use_old_api): + temp_dir): self._test(*self.create_reduce(**params, op='ReduceMax', keep_dims=keep_dims, ir_version=ir_version), - ie_device, precision, ir_version, temp_dir=temp_dir, use_old_api=use_old_api) + ie_device, precision, ir_version, temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data) @pytest.mark.parametrize("keep_dims", [True, False]) @pytest.mark.nightly - def test_reduce_max(self, params, keep_dims, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_reduce_max(self, params, keep_dims, ie_device, precision, ir_version, temp_dir): self._test(*self.create_reduce(**params, op='ReduceMax', keep_dims=keep_dims, ir_version=ir_version), - ie_device, precision, ir_version, temp_dir=temp_dir, use_old_api=use_old_api) + ie_device, precision, ir_version, temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data) @pytest.mark.parametrize("keep_dims", [True, False]) @pytest.mark.nightly @pytest.mark.skip(reason='GREEN_SUITE') - def test_reduce_sum(self, params, keep_dims, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_reduce_sum(self, params, keep_dims, ie_device, precision, ir_version, temp_dir): self._test(*self.create_reduce(**params, op='ReduceSum', keep_dims=keep_dims, ir_version=ir_version), - ie_device, precision, ir_version, temp_dir=temp_dir, use_old_api=use_old_api) + ie_device, precision, ir_version, temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data) @pytest.mark.parametrize("keep_dims", [True, False]) @pytest.mark.nightly @pytest.mark.skip(reason='GREEN_SUITE') - def test_reduce_prod(self, params, keep_dims, ie_device, precision, ir_version, temp_dir, - use_old_api): + def test_reduce_prod(self, params, keep_dims, ie_device, precision, ir_version, temp_dir): self._test(*self.create_reduce(**params, op='ReduceProd', keep_dims=keep_dims, ir_version=ir_version), - ie_device, precision, ir_version, temp_dir=temp_dir, use_old_api=use_old_api) + ie_device, precision, ir_version, temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data_precommit) @pytest.mark.parametrize("keep_dims", [True, False]) @pytest.mark.precommit def test_reduce_mean_precommit(self, params, keep_dims, ie_device, precision, ir_version, - temp_dir, use_old_api): + temp_dir): self._test(*self.create_reduce(**params, op='ReduceMean', keep_dims=keep_dims, ir_version=ir_version), - ie_device, precision, ir_version, temp_dir=temp_dir, use_old_api=use_old_api) + ie_device, precision, ir_version, temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data) @pytest.mark.parametrize("keep_dims", [True, False]) @pytest.mark.nightly @pytest.mark.precommit - def test_reduce_mean(self, params, keep_dims, ie_device, precision, ir_version, temp_dir, - use_old_api): + def test_reduce_mean(self, params, keep_dims, ie_device, precision, ir_version, temp_dir): self._test(*self.create_reduce(**params, op='ReduceMean', keep_dims=keep_dims, ir_version=ir_version), - ie_device, precision, ir_version, temp_dir=temp_dir, use_old_api=use_old_api) + ie_device, precision, ir_version, temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data_precommit) @pytest.mark.parametrize("keep_dims", [True, False]) @pytest.mark.precommit def test_reduce_min_precommit(self, params, keep_dims, ie_device, precision, ir_version, - temp_dir, use_old_api): + temp_dir): self._test(*self.create_reduce(**params, op='ReduceMin', keep_dims=keep_dims, ir_version=ir_version), - ie_device, precision, ir_version, temp_dir=temp_dir, use_old_api=use_old_api) + ie_device, precision, ir_version, temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data) @pytest.mark.parametrize("keep_dims", [True, False]) @pytest.mark.nightly - def test_reduce_min(self, params, keep_dims, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_reduce_min(self, params, keep_dims, ie_device, precision, ir_version, temp_dir): self._test(*self.create_reduce(**params, op='ReduceMin', keep_dims=keep_dims, ir_version=ir_version), - ie_device, precision, ir_version, temp_dir=temp_dir, use_old_api=use_old_api) + ie_device, precision, ir_version, temp_dir=temp_dir) diff --git a/tests/layer_tests/onnx_tests/test_reduce_lp.py b/tests/layer_tests/onnx_tests/test_reduce_lp.py index 0c3adab6c4e0dc..1c84a7053884cd 100644 --- a/tests/layer_tests/onnx_tests/test_reduce_lp.py +++ b/tests/layer_tests/onnx_tests/test_reduce_lp.py @@ -239,33 +239,33 @@ def create_reduce_lp_const(self, shape, axes, keep_dims, reduce_p, ir_version): 'arm64', 'ARM64'), reason='Ticket - 122846, 122783, 126312') def test_reduce_lp_precommit(self, params, keep_dims, reduce_p, ie_device, precision, - ir_version, temp_dir, use_old_api): + ir_version, temp_dir): self._test(*self.create_reduce_lp(**params, keep_dims=keep_dims, reduce_p=reduce_p, ir_version=ir_version), - ie_device, precision, ir_version, temp_dir=temp_dir, use_old_api=use_old_api) + ie_device, precision, ir_version, temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data) @pytest.mark.parametrize("keep_dims", [True, False]) @pytest.mark.parametrize("reduce_p", [1, 2]) @pytest.mark.nightly def test_reduce_lp(self, params, keep_dims, reduce_p, ie_device, precision, ir_version, - temp_dir, use_old_api): + temp_dir): if ie_device == 'GPU': pytest.skip('GREEN_SUITE') self._test(*self.create_reduce_lp(**params, keep_dims=keep_dims, reduce_p=reduce_p, ir_version=ir_version), - ie_device, precision, ir_version, temp_dir=temp_dir, use_old_api=use_old_api) + ie_device, precision, ir_version, temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data_precommit) @pytest.mark.parametrize("keep_dims", [True, False]) @pytest.mark.parametrize("reduce_p", [1, 2]) @pytest.mark.precommit def test_reduce_lp_const_precommit(self, params, keep_dims, reduce_p, ie_device, precision, - ir_version, temp_dir, use_old_api): + ir_version, temp_dir): self._test( *self.create_reduce_lp_const(**params, keep_dims=keep_dims, reduce_p=reduce_p, ir_version=ir_version), - ie_device, precision, ir_version, temp_dir=temp_dir, use_old_api=use_old_api) + ie_device, precision, ir_version, temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data) @pytest.mark.parametrize("keep_dims", [True, False]) @@ -273,7 +273,7 @@ def test_reduce_lp_const_precommit(self, params, keep_dims, reduce_p, ie_device, @pytest.mark.nightly @pytest.mark.skip(reason='GREEN_SUITE') def test_reduce_lp_const(self, params, keep_dims, reduce_p, ie_device, precision, ir_version, - temp_dir, use_old_api): + temp_dir): self._test(*self.create_reduce_lp_const(**params, keep_dims=keep_dims, reduce_p=reduce_p, ir_version=ir_version), - ie_device, precision, ir_version, temp_dir=temp_dir, use_old_api=use_old_api) + ie_device, precision, ir_version, temp_dir=temp_dir) diff --git a/tests/layer_tests/onnx_tests/test_relu.py b/tests/layer_tests/onnx_tests/test_relu.py index 7807cb8b5092c7..e30193687cc604 100644 --- a/tests/layer_tests/onnx_tests/test_relu.py +++ b/tests/layer_tests/onnx_tests/test_relu.py @@ -172,13 +172,13 @@ def create_net_const(self, shape, precision, ir_version): @pytest.mark.parametrize("params", test_data) @pytest.mark.nightly - def test_relu(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_relu(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_net(**params, ir_version=ir_version), ie_device, precision, ir_version, - temp_dir=temp_dir, use_old_api=use_old_api) + temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data) @pytest.mark.nightly - def test_relu_const(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_relu_const(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_net_const(**params, precision=precision, ir_version=ir_version), - ie_device, precision, ir_version, temp_dir=temp_dir, use_old_api=use_old_api) + ie_device, precision, ir_version, temp_dir=temp_dir) diff --git a/tests/layer_tests/onnx_tests/test_reshape.py b/tests/layer_tests/onnx_tests/test_reshape.py index 5b4128898f186d..6179d736be3562 100644 --- a/tests/layer_tests/onnx_tests/test_reshape.py +++ b/tests/layer_tests/onnx_tests/test_reshape.py @@ -231,64 +231,64 @@ def create_reshape_net_const(self, input_shape, output_shape, ir_version): @pytest.mark.parametrize("params", test_data_5D_precommit) @pytest.mark.precommit - def test_reshape_5D_precommit(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_reshape_5D_precommit(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_reshape_net(**params, ir_version=ir_version), ie_device, precision, ir_version, - temp_dir=temp_dir, use_old_api=use_old_api) + temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data_4D_precommit) @pytest.mark.precommit - def test_reshape_4D_precommit(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_reshape_4D_precommit(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_reshape_net(**params, ir_version=ir_version), ie_device, precision, ir_version, - temp_dir=temp_dir, use_old_api=use_old_api) + temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data_3D_precommit) @pytest.mark.precommit - def test_reshape_3D_precommit(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_reshape_3D_precommit(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_reshape_net(**params, ir_version=ir_version), ie_device, precision, ir_version, - temp_dir=temp_dir, use_old_api=use_old_api) + temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data_5D) @pytest.mark.nightly @pytest.mark.precommit - def test_reshape_5D(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_reshape_5D(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_reshape_net(**params, ir_version=ir_version), ie_device, precision, ir_version, - temp_dir=temp_dir, use_old_api=use_old_api) + temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data_4D) @pytest.mark.nightly - def test_reshape_4D(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_reshape_4D(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_reshape_net(**params, ir_version=ir_version), ie_device, precision, ir_version, - temp_dir=temp_dir, use_old_api=use_old_api) + temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data_3D) @pytest.mark.nightly - def test_reshape_3D(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_reshape_3D(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_reshape_net(**params, ir_version=ir_version), ie_device, precision, ir_version, - temp_dir=temp_dir, use_old_api=use_old_api) + temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data_5D) @pytest.mark.nightly - def test_reshape_const_5D(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_reshape_const_5D(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_reshape_net_const(**params, ir_version=ir_version), ie_device, precision, ir_version, - temp_dir=temp_dir, use_old_api=use_old_api) + temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data_4D) @pytest.mark.nightly - def test_reshape_const_4D(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_reshape_const_4D(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_reshape_net_const(**params, ir_version=ir_version), ie_device, precision, ir_version, - temp_dir=temp_dir, use_old_api=use_old_api) + temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data_3D) @pytest.mark.nightly - def test_reshape_const_3D(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_reshape_const_3D(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_reshape_net_const(**params, ir_version=ir_version), ie_device, precision, ir_version, - temp_dir=temp_dir, use_old_api=use_old_api) + temp_dir=temp_dir) diff --git a/tests/layer_tests/onnx_tests/test_resize.py b/tests/layer_tests/onnx_tests/test_resize.py index 9742528decf0f7..4024f2aa6ed149 100644 --- a/tests/layer_tests/onnx_tests/test_resize.py +++ b/tests/layer_tests/onnx_tests/test_resize.py @@ -203,10 +203,9 @@ def create_resize_net(self, input_shape, output_shape, scales, sizes, ] @pytest.mark.parametrize("params", test_data) - def test_resize(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_resize(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_resize_net(**params, precision=precision, ir_version=ir_version), - ie_device, precision, ir_version, custom_eps=2.0e-4, temp_dir=temp_dir, - use_old_api=use_old_api) + ie_device, precision, ir_version, custom_eps=2.0e-4, temp_dir=temp_dir) test_data_cubic = [ dict(input_shape=[1, 3, 100, 200], output_shape=[1, 3, 350, 150], @@ -236,14 +235,13 @@ def test_resize(self, params, ie_device, precision, ir_version, temp_dir, use_ol @pytest.mark.parametrize("nearest_mode", ['round_prefer_floor']) def test_resize_combined_cubic(self, params, coordinate_transformation_mode, cubic_coeff_a, mode, - nearest_mode, ie_device, precision, ir_version, temp_dir, use_old_api): + nearest_mode, ie_device, precision, ir_version, temp_dir): self._test(*self.create_resize_net(**params, coordinate_transformation_mode=coordinate_transformation_mode, cubic_coeff_a=cubic_coeff_a, mode=mode, nearest_mode=nearest_mode, precision=precision, ir_version=ir_version), - ie_device, precision, ir_version, custom_eps=2.6e-2, temp_dir=temp_dir, - use_old_api=use_old_api) + ie_device, precision, ir_version, custom_eps=2.6e-2, temp_dir=temp_dir) test_data_nearest = [ dict(input_shape=[1, 3, 100, 200], output_shape=[1, 3, 350, 150], @@ -266,14 +264,13 @@ def test_resize_combined_cubic(self, params, coordinate_transformation_mode, cub 'floor', 'ceil']) def test_resize_combined_nearest(self, params, coordinate_transformation_mode, cubic_coeff_a, mode, - nearest_mode, ie_device, precision, ir_version, temp_dir, - use_old_api): + nearest_mode, ie_device, precision, ir_version, temp_dir): self._test(*self.create_resize_net(**params, coordinate_transformation_mode=coordinate_transformation_mode, cubic_coeff_a=cubic_coeff_a, mode=mode, nearest_mode=nearest_mode, precision=precision, ir_version=ir_version), - ie_device, precision, ir_version, temp_dir=temp_dir, use_old_api=use_old_api) + ie_device, precision, ir_version, temp_dir=temp_dir) test_data_linear = [ dict(input_shape=[1, 3, 100, 200], output_shape=[1, 3, 350, 150], @@ -303,15 +300,13 @@ def test_resize_combined_nearest(self, params, coordinate_transformation_mode, c @pytest.mark.parametrize("nearest_mode", ['round_prefer_floor']) def test_resize_combined_linear(self, params, coordinate_transformation_mode, cubic_coeff_a, mode, - nearest_mode, ie_device, precision, ir_version, temp_dir, - use_old_api): + nearest_mode, ie_device, precision, ir_version, temp_dir): self._test(*self.create_resize_net(**params, coordinate_transformation_mode=coordinate_transformation_mode, cubic_coeff_a=cubic_coeff_a, mode=mode, nearest_mode=nearest_mode, precision=precision, ir_version=ir_version), - ie_device, precision, ir_version, custom_eps=2.0e-2, temp_dir=temp_dir, - use_old_api=use_old_api) + ie_device, precision, ir_version, custom_eps=2.0e-2, temp_dir=temp_dir) test_data_sizes = [ dict(input_shape=[1, 1, 4, 4], output_shape=[1, 1, 3, 3], @@ -357,9 +352,9 @@ def test_resize_combined_linear(self, params, coordinate_transformation_mode, cu ] @pytest.mark.parametrize("params", test_data_sizes) - def test_resize_sizes(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_resize_sizes(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_resize_net(**params, precision=precision, ir_version=ir_version), - ie_device, precision, ir_version, temp_dir=temp_dir, use_old_api=use_old_api) + ie_device, precision, ir_version, temp_dir=temp_dir) test_data_sizes_cubic = [ dict(input_shape=[1, 3, 100, 200], output_shape=[1, 3, 350, 150], @@ -389,15 +384,13 @@ def test_resize_sizes(self, params, ie_device, precision, ir_version, temp_dir, @pytest.mark.parametrize("nearest_mode", ['round_prefer_floor']) def test_resize_combined_sizes_cubic(self, params, coordinate_transformation_mode, cubic_coeff_a, mode, - nearest_mode, ie_device, precision, ir_version, temp_dir, - use_old_api): + nearest_mode, ie_device, precision, ir_version, temp_dir): self._test(*self.create_resize_net(**params, coordinate_transformation_mode=coordinate_transformation_mode, cubic_coeff_a=cubic_coeff_a, mode=mode, nearest_mode=nearest_mode, precision=precision, ir_version=ir_version), - ie_device, precision, ir_version, custom_eps=2.6e-2, temp_dir=temp_dir, - use_old_api=use_old_api) + ie_device, precision, ir_version, custom_eps=2.6e-2, temp_dir=temp_dir) test_data_sizes_nearest = [ dict(input_shape=[1, 3, 100, 200], output_shape=[1, 3, 350, 150], @@ -444,14 +437,13 @@ def test_resize_combined_sizes_cubic(self, params, coordinate_transformation_mod 'floor', 'ceil']) def test_resize_combined_sizes_nearest(self, params, coordinate_transformation_mode, cubic_coeff_a, mode, - nearest_mode, ie_device, precision, ir_version, temp_dir, - use_old_api): + nearest_mode, ie_device, precision, ir_version, temp_dir): self._test(*self.create_resize_net(**params, coordinate_transformation_mode=coordinate_transformation_mode, cubic_coeff_a=cubic_coeff_a, mode=mode, nearest_mode=nearest_mode, precision=precision, ir_version=ir_version), - ie_device, precision, ir_version, temp_dir=temp_dir, use_old_api=use_old_api) + ie_device, precision, ir_version, temp_dir=temp_dir) test_data_sizes_linear = [ dict(input_shape=[1, 3, 100, 200], output_shape=[1, 3, 350, 150], @@ -481,15 +473,13 @@ def test_resize_combined_sizes_nearest(self, params, coordinate_transformation_m @pytest.mark.parametrize("nearest_mode", ['round_prefer_floor']) def test_resize_combined_sizes_linear(self, params, coordinate_transformation_mode, cubic_coeff_a, mode, - nearest_mode, ie_device, precision, ir_version, temp_dir, - use_old_api): + nearest_mode, ie_device, precision, ir_version, temp_dir): self._test(*self.create_resize_net(**params, coordinate_transformation_mode=coordinate_transformation_mode, cubic_coeff_a=cubic_coeff_a, mode=mode, nearest_mode=nearest_mode, precision=precision, ir_version=ir_version), - ie_device, precision, ir_version, custom_eps=2.0e-2, temp_dir=temp_dir, - use_old_api=use_old_api) + ie_device, precision, ir_version, custom_eps=2.0e-2, temp_dir=temp_dir) def create_ref_net_in_sizes_mode(precision, input_shape, output_shape, sizes_value, scales_value, diff --git a/tests/layer_tests/onnx_tests/test_roi_align.py b/tests/layer_tests/onnx_tests/test_roi_align.py index 682c6c33078e86..56f7f3fc28ec3e 100644 --- a/tests/layer_tests/onnx_tests/test_roi_align.py +++ b/tests/layer_tests/onnx_tests/test_roi_align.py @@ -140,10 +140,10 @@ def create_net(self, input_shape, rois_shape, indices_shape, output_shape, 'aarch64', 'arm64', 'ARM64'), reason='Ticket - 122846, 122783, 126312') - def test_roi_alignv10(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_roi_alignv10(self, params, ie_device, precision, ir_version, temp_dir): # TODO: ticket for investigating GPU failures: CVS-86300 if ie_device != "GPU": self._test(*self.create_net(**params, ir_version=ir_version, onnx_version=10), ie_device, precision, ir_version, - temp_dir=temp_dir, use_old_api=use_old_api, + temp_dir=temp_dir, use_legacy_frontend=True) diff --git a/tests/layer_tests/onnx_tests/test_scale.py b/tests/layer_tests/onnx_tests/test_scale.py index dd8c5d2efc0449..f90328867b9259 100644 --- a/tests/layer_tests/onnx_tests/test_scale.py +++ b/tests/layer_tests/onnx_tests/test_scale.py @@ -134,14 +134,14 @@ def create_net_const(self, shape, scale, precision, ir_version): @pytest.mark.parametrize("params", test_data) @pytest.mark.nightly @pytest.mark.skip(reason='GREEN_SUITE') - def test_scale(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_scale(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_net(**params, ir_version=ir_version), ie_device, precision, ir_version, - temp_dir=temp_dir, use_old_api=use_old_api) + temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data) @pytest.mark.nightly @pytest.mark.skip(reason='GREEN_SUITE') - def test_scale_const(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_scale_const(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_net_const(**params, precision=precision, ir_version=ir_version), - ie_device, precision, ir_version, temp_dir=temp_dir, use_old_api=use_old_api) + ie_device, precision, ir_version, temp_dir=temp_dir) diff --git a/tests/layer_tests/onnx_tests/test_scatter.py b/tests/layer_tests/onnx_tests/test_scatter.py index 1e7460efb84482..abeb132cb112f1 100644 --- a/tests/layer_tests/onnx_tests/test_scatter.py +++ b/tests/layer_tests/onnx_tests/test_scatter.py @@ -114,10 +114,10 @@ class TestScatter(TestScatters): @pytest.mark.parametrize("params", test_data) @pytest.mark.nightly @pytest.mark.skip(reason='GREEN_SUITE') - def test_scatter(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_scatter(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_net(**params, ir_version=ir_version), ie_device, precision, ir_version, - temp_dir=temp_dir, use_old_api=use_old_api) + temp_dir=temp_dir) class TestScatterElements(TestScatters): @@ -126,7 +126,7 @@ class TestScatterElements(TestScatters): @pytest.mark.parametrize("params", test_data) @pytest.mark.nightly @pytest.mark.skip(reason='GREEN_SUITE') - def test_scatter_elements(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_scatter_elements(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_net(**params, ir_version=ir_version), ie_device, precision, ir_version, - temp_dir=temp_dir, use_old_api=use_old_api) + temp_dir=temp_dir) diff --git a/tests/layer_tests/onnx_tests/test_sigmoid.py b/tests/layer_tests/onnx_tests/test_sigmoid.py index 6536c083869b01..eb4940f28a25de 100644 --- a/tests/layer_tests/onnx_tests/test_sigmoid.py +++ b/tests/layer_tests/onnx_tests/test_sigmoid.py @@ -179,20 +179,20 @@ def create_net_const(self, shape, precision, ir_version): @pytest.mark.parametrize("params", test_data_precommit) @pytest.mark.precommit - def test_sigmoid_precommit(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_sigmoid_precommit(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_net(**params, ir_version=ir_version), ie_device, precision, ir_version, - temp_dir=temp_dir, use_old_api=use_old_api) + temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data) @pytest.mark.nightly - def test_sigmoid(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_sigmoid(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_net(**params, ir_version=ir_version), ie_device, precision, ir_version, - temp_dir=temp_dir, use_old_api=use_old_api) + temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data) @pytest.mark.nightly - def test_sigmoid_const(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_sigmoid_const(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_net_const(**params, precision=precision, ir_version=ir_version), - ie_device, precision, ir_version, temp_dir=temp_dir, use_old_api=use_old_api) + ie_device, precision, ir_version, temp_dir=temp_dir) diff --git a/tests/layer_tests/onnx_tests/test_sign.py b/tests/layer_tests/onnx_tests/test_sign.py index f66e8b4cffc7b2..0f275e5bb7c796 100644 --- a/tests/layer_tests/onnx_tests/test_sign.py +++ b/tests/layer_tests/onnx_tests/test_sign.py @@ -166,13 +166,13 @@ def create_net_const(self, shape, precision, ir_version): @pytest.mark.parametrize("params", test_data) @pytest.mark.nightly - def test_sign(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_sign(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_net(**params, ir_version=ir_version), ie_device, precision, ir_version, - temp_dir=temp_dir, use_old_api=use_old_api) + temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data) @pytest.mark.nightly - def test_sign_const(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_sign_const(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_net_const(**params, precision=precision, ir_version=ir_version), - ie_device, precision, ir_version, temp_dir=temp_dir, use_old_api=use_old_api) + ie_device, precision, ir_version, temp_dir=temp_dir) diff --git a/tests/layer_tests/onnx_tests/test_slice.py b/tests/layer_tests/onnx_tests/test_slice.py index 2d27e8b8b34937..cac51b58841933 100644 --- a/tests/layer_tests/onnx_tests/test_slice.py +++ b/tests/layer_tests/onnx_tests/test_slice.py @@ -371,48 +371,48 @@ def create_net_const(self, shape, axes, ends, starts, ir_version, opset=6, steps @pytest.mark.parametrize("params", test_data_no_steps) @pytest.mark.nightly - def test_slice_opset6(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_slice_opset6(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_net(**params, opset=6, ir_version=ir_version), ie_device, precision, ir_version, - temp_dir=temp_dir, use_old_api=use_old_api) + temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data_no_steps) @pytest.mark.nightly - def test_slice_const_opset6(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_slice_const_opset6(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_net_const(**params, opset=6, ir_version=ir_version), ie_device, precision, ir_version, - temp_dir=temp_dir, use_old_api=use_old_api) + temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data_no_steps + test_data_with_steps) @pytest.mark.nightly - def test_slice_opset10(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_slice_opset10(self, params, ie_device, precision, ir_version, temp_dir): if ie_device == 'GPU': pytest.skip('GREEN_SUITE') self._test( *self.create_net(**params, opset=10, ir_version=ir_version), ie_device, precision, ir_version, - temp_dir=temp_dir, use_old_api=use_old_api) + temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data_no_steps + test_data_with_steps) @pytest.mark.nightly - def test_slice_const_opset10(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_slice_const_opset10(self, params, ie_device, precision, ir_version, temp_dir): if ie_device == 'GPU': pytest.skip('GREEN_SUITE') self._test(*self.create_net_const(**params, opset=10, ir_version=ir_version), - ie_device, precision, ir_version, temp_dir=temp_dir, use_old_api=use_old_api) + ie_device, precision, ir_version, temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data_no_steps + test_data_with_steps) @pytest.mark.nightly - def test_slice_opset11(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_slice_opset11(self, params, ie_device, precision, ir_version, temp_dir): if ie_device == 'GPU': pytest.skip('GREEN_SUITE') self._test( *self.create_net(**params, opset=11, ir_version=ir_version), ie_device, precision, ir_version, - temp_dir=temp_dir, use_old_api=use_old_api) + temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data_no_steps + test_data_with_steps) @pytest.mark.nightly - def test_slice_const_opset11(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_slice_const_opset11(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_net_const(**params, opset=11, ir_version=ir_version), - ie_device, precision, ir_version, temp_dir=temp_dir, use_old_api=use_old_api) + ie_device, precision, ir_version, temp_dir=temp_dir) diff --git a/tests/layer_tests/onnx_tests/test_softmax.py b/tests/layer_tests/onnx_tests/test_softmax.py index 3f1f4a084e4316..84b692593e85f7 100644 --- a/tests/layer_tests/onnx_tests/test_softmax.py +++ b/tests/layer_tests/onnx_tests/test_softmax.py @@ -166,7 +166,7 @@ def create_net(self, shape, softmax_axis, ir_version): @pytest.mark.parametrize("params", test_data) @pytest.mark.nightly - def test_softmax(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_softmax(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_net(**params, ir_version=ir_version), ie_device, precision, ir_version, - temp_dir=temp_dir, use_old_api=use_old_api) + temp_dir=temp_dir) diff --git a/tests/layer_tests/onnx_tests/test_softplus.py b/tests/layer_tests/onnx_tests/test_softplus.py index 6c8090c9eae7bd..169f1a079160b8 100644 --- a/tests/layer_tests/onnx_tests/test_softplus.py +++ b/tests/layer_tests/onnx_tests/test_softplus.py @@ -171,15 +171,15 @@ def create_net_const(self, shape, precision, ir_version): @pytest.mark.parametrize("params", test_data) @pytest.mark.nightly - def test_softplus(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_softplus(self, params, ie_device, precision, ir_version, temp_dir): if ie_device == 'GPU': pytest.skip('GREEN_SUITE') self._test(*self.create_net(**params, ir_version=ir_version), ie_device, precision, ir_version, - temp_dir=temp_dir, use_old_api=use_old_api) + temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data) @pytest.mark.nightly - def test_softplus_const(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_softplus_const(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_net_const(**params, precision=precision, ir_version=ir_version), - ie_device, precision, ir_version, temp_dir=temp_dir, use_old_api=use_old_api) + ie_device, precision, ir_version, temp_dir=temp_dir) diff --git a/tests/layer_tests/onnx_tests/test_softsign.py b/tests/layer_tests/onnx_tests/test_softsign.py index 96edd1cfd016eb..d9db7e176864c8 100644 --- a/tests/layer_tests/onnx_tests/test_softsign.py +++ b/tests/layer_tests/onnx_tests/test_softsign.py @@ -172,14 +172,14 @@ def create_net_const(self, shape, precision, ir_version): @pytest.mark.parametrize("params", test_data) @pytest.mark.nightly @pytest.mark.precommit - def test_softsign(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_softsign(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_net(**params, ir_version=ir_version), ie_device, precision, ir_version, - temp_dir=temp_dir, use_old_api=use_old_api) + temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data) @pytest.mark.nightly @pytest.mark.precommit - def test_softsign_const(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_softsign_const(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_net_const(**params, precision=precision, ir_version=ir_version), - ie_device, precision, ir_version, temp_dir=temp_dir, use_old_api=use_old_api) + ie_device, precision, ir_version, temp_dir=temp_dir) diff --git a/tests/layer_tests/onnx_tests/test_split_concat.py b/tests/layer_tests/onnx_tests/test_split_concat.py index 5c8970d5a80508..37e93026e8d774 100644 --- a/tests/layer_tests/onnx_tests/test_split_concat.py +++ b/tests/layer_tests/onnx_tests/test_split_concat.py @@ -262,47 +262,47 @@ def create_split_concat_net_const(self, input_shape, output_shapes, axis, ir_ver @pytest.mark.parametrize("params", test_data_3D) @pytest.mark.nightly @pytest.mark.skip(reason='GREEN_SUITE') - def test_split_3D(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_split_3D(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_split_concat_net(**params, ir_version=ir_version), ie_device, - precision, ir_version, temp_dir=temp_dir, use_old_api=use_old_api) + precision, ir_version, temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data_4D) @pytest.mark.nightly @pytest.mark.skip(reason='GREEN_SUITE') - def test_split_4D(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_split_4D(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_split_concat_net(**params, ir_version=ir_version), ie_device, - precision, ir_version, temp_dir=temp_dir, use_old_api=use_old_api) + precision, ir_version, temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data_5D) @pytest.mark.nightly @pytest.mark.skip(reason='GREEN_SUITE') - def test_split_5D(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_split_5D(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_split_concat_net(**params, ir_version=ir_version), ie_device, - precision, ir_version, temp_dir=temp_dir, use_old_api=use_old_api) + precision, ir_version, temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data_3D) @pytest.mark.nightly @pytest.mark.skip(reason='GREEN_SUITE') - def test_split_3D_const(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_split_3D_const(self, params, ie_device, precision, ir_version, temp_dir): self._test( *self.create_split_concat_net_const(**params, ir_version=ir_version), ie_device, - precision, ir_version, temp_dir=temp_dir, use_old_api=use_old_api) + precision, ir_version, temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data_4D) @pytest.mark.nightly @pytest.mark.skip(reason='GREEN_SUITE') - def test_split_4D_const(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_split_4D_const(self, params, ie_device, precision, ir_version, temp_dir): self._test( *self.create_split_concat_net_const(**params, ir_version=ir_version), ie_device, - precision, ir_version, temp_dir=temp_dir, use_old_api=use_old_api) + precision, ir_version, temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data_5D) @pytest.mark.nightly @pytest.mark.skip(reason='GREEN_SUITE') - def test_split_5D_const(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_split_5D_const(self, params, ie_device, precision, ir_version, temp_dir): self._test( *self.create_split_concat_net_const(**params, ir_version=ir_version), ie_device, - precision, ir_version, temp_dir=temp_dir, use_old_api=use_old_api) + precision, ir_version, temp_dir=temp_dir) class TestSplit(OnnxRuntimeLayerTest): @@ -550,37 +550,37 @@ def create_split_net_ordered_outputs_multiple_tensor_names(self, input_shape, ou @pytest.mark.parametrize("params", test_data_3D) @pytest.mark.nightly @pytest.mark.skip(reason='GREEN_SUITE') - def test_split_3D(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_split_3D(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_split_net(**params, ir_version=ir_version), ie_device, precision, - ir_version, temp_dir=temp_dir, use_old_api=use_old_api) + ir_version, temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data_4D) @pytest.mark.nightly @pytest.mark.skip(reason='GREEN_SUITE') - def test_split_4D(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_split_4D(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_split_net(**params, ir_version=ir_version), ie_device, precision, - ir_version, temp_dir=temp_dir, use_old_api=use_old_api) + ir_version, temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data_5D) @pytest.mark.nightly @pytest.mark.skip(reason='GREEN_SUITE') - def test_split_5D(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_split_5D(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_split_net(**params, ir_version=ir_version), ie_device, precision, - ir_version, temp_dir=temp_dir, use_old_api=use_old_api) + ir_version, temp_dir=temp_dir) @pytest.mark.parametrize("params", test_multiple_out) - def test_split_outputs_order(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_split_outputs_order(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_split_net_ordered_outputs(**params, ir_version=ir_version), ie_device, precision, ir_version, temp_dir=temp_dir, - output_names=params['output_names'], use_old_api=use_old_api) + output_names=params['output_names']) @pytest.mark.parametrize("params", test_multiple_out_with_add) def test_split_outputs_order_multiple_connection_before_result_case(self, params, ie_device, precision, ir_version, - temp_dir, use_old_api): + temp_dir): self._test(*self.create_split_net_ordered_outputs_with_add(**params, ir_version=ir_version), ie_device, precision, ir_version, temp_dir=temp_dir, - output_names=params['output_names'], use_old_api=use_old_api) + output_names=params['output_names']) @pytest.mark.parametrize("params", test_multiple_out_with_identity) def test_split_outputs_order_multiple_tensors_before_result_case(self, @@ -588,8 +588,8 @@ def test_split_outputs_order_multiple_tensors_before_result_case(self, ie_device, precision, ir_version, - temp_dir, use_old_api): + temp_dir): self._test(*self.create_split_net_ordered_outputs_multiple_tensor_names(**params, ir_version=ir_version), ie_device, precision, ir_version, temp_dir=temp_dir, - output_names=params['output_names'], use_old_api=use_old_api) + output_names=params['output_names']) diff --git a/tests/layer_tests/onnx_tests/test_sqrt.py b/tests/layer_tests/onnx_tests/test_sqrt.py index f1c1ea0277a7fb..ab91005b762420 100644 --- a/tests/layer_tests/onnx_tests/test_sqrt.py +++ b/tests/layer_tests/onnx_tests/test_sqrt.py @@ -176,13 +176,13 @@ def create_net_const(self, shape, precision, ir_version): @pytest.mark.parametrize("params", test_data) @pytest.mark.nightly - def test_sqrt(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_sqrt(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_net(**params, ir_version=ir_version), ie_device, precision, ir_version, - temp_dir=temp_dir, use_old_api=use_old_api) + temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data) @pytest.mark.nightly - def test_sqrt_const(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_sqrt_const(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_net_const(**params, precision=precision, ir_version=ir_version), - ie_device, precision, ir_version, temp_dir=temp_dir, use_old_api=use_old_api) + ie_device, precision, ir_version, temp_dir=temp_dir) diff --git a/tests/layer_tests/onnx_tests/test_squeeze.py b/tests/layer_tests/onnx_tests/test_squeeze.py index 67b20321d24fe9..e27c3242eed8ae 100644 --- a/tests/layer_tests/onnx_tests/test_squeeze.py +++ b/tests/layer_tests/onnx_tests/test_squeeze.py @@ -176,47 +176,47 @@ def create_squeeze_net_const(self, axes, input_shape, output_shape, ir_version): @pytest.mark.parametrize("params", test_data_5D) @pytest.mark.nightly @pytest.mark.skip(reason='GREEN_SUITE') - def test_squeeze_5D(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_squeeze_5D(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_squeeze_net(**params, ir_version=ir_version), ie_device, precision, ir_version, - temp_dir=temp_dir, use_old_api=use_old_api) + temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data_4D) @pytest.mark.nightly @pytest.mark.skip(reason='GREEN_SUITE') - def test_squeeze_4D(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_squeeze_4D(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_squeeze_net(**params, ir_version=ir_version), ie_device, precision, ir_version, - temp_dir=temp_dir, use_old_api=use_old_api) + temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data_3D) @pytest.mark.nightly @pytest.mark.skip(reason='GREEN_SUITE') - def test_squeeze_3D(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_squeeze_3D(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_squeeze_net(**params, ir_version=ir_version), ie_device, precision, ir_version, - temp_dir=temp_dir, use_old_api=use_old_api) + temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data_5D) @pytest.mark.nightly @pytest.mark.skip(reason='GREEN_SUITE') - def test_squeeze_const_5D(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_squeeze_const_5D(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_squeeze_net_const(**params, ir_version=ir_version), ie_device, precision, ir_version, - temp_dir=temp_dir, use_old_api=use_old_api) + temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data_4D) @pytest.mark.nightly @pytest.mark.skip(reason='GREEN_SUITE') - def test_squeeze_const_4D(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_squeeze_const_4D(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_squeeze_net_const(**params, ir_version=ir_version), ie_device, precision, ir_version, - temp_dir=temp_dir, use_old_api=use_old_api) + temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data_3D) @pytest.mark.nightly @pytest.mark.skip(reason='GREEN_SUITE') - def test_squeeze_const_3D(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_squeeze_const_3D(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_squeeze_net_const(**params, ir_version=ir_version), ie_device, precision, ir_version, - temp_dir=temp_dir, use_old_api=use_old_api) + temp_dir=temp_dir) diff --git a/tests/layer_tests/onnx_tests/test_sum.py b/tests/layer_tests/onnx_tests/test_sum.py index 0d6f318e1269e7..8d31a32d915dc9 100644 --- a/tests/layer_tests/onnx_tests/test_sum.py +++ b/tests/layer_tests/onnx_tests/test_sum.py @@ -285,57 +285,56 @@ def create_const_net(self, const_shapes, ir_version, opset=None): @pytest.mark.parametrize("params", test_data) @pytest.mark.nightly - def test_sum_opset6(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_sum_opset6(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_net(**params, precision=precision, opset=6, ir_version=ir_version), - ie_device, precision, ir_version, temp_dir=temp_dir, use_old_api=use_old_api) + ie_device, precision, ir_version, temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data_precommit) @pytest.mark.precommit - def test_sum_precommit(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_sum_precommit(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_net(**params, precision=precision, ir_version=ir_version), - ie_device, precision, ir_version, temp_dir=temp_dir, use_old_api=use_old_api) + ie_device, precision, ir_version, temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data) @pytest.mark.nightly - def test_sum(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_sum(self, params, ie_device, precision, ir_version, temp_dir): self._test( *self.create_net(**params, precision=precision, ir_version=ir_version), ie_device, precision, ir_version, - temp_dir=temp_dir, use_old_api=use_old_api) + temp_dir=temp_dir) @pytest.mark.parametrize("params", const_test_data) @pytest.mark.nightly - def test_sum_const_opset6(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_sum_const_opset6(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_const_net(**params, opset=6, ir_version=ir_version), ie_device, precision, ir_version, - temp_dir=temp_dir, use_old_api=use_old_api) + temp_dir=temp_dir) @pytest.mark.parametrize("params", const_test_data_precommit) @pytest.mark.precommit - def test_sum_const_precommit(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_sum_const_precommit(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_const_net(**params, ir_version=ir_version), ie_device, precision, ir_version, - temp_dir=temp_dir, use_old_api=use_old_api) + temp_dir=temp_dir) @pytest.mark.parametrize("params", const_test_data) @pytest.mark.nightly - def test_sum_const(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_sum_const(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_const_net(**params, ir_version=ir_version), ie_device, precision, ir_version, - temp_dir=temp_dir, use_old_api=use_old_api) + temp_dir=temp_dir) @pytest.mark.parametrize("params", const_test_data_broadcasting_precommit) @pytest.mark.precommit def test_sum_const_broadcasting_precommit(self, params, ie_device, precision, ir_version, - temp_dir, use_old_api): + temp_dir): self._test(*self.create_const_net(**params, ir_version=ir_version), ie_device, precision, ir_version, - temp_dir=temp_dir, use_old_api=use_old_api) + temp_dir=temp_dir) @pytest.mark.parametrize("params", const_test_data_broadcasting) @pytest.mark.nightly - def test_sum_const_broadcasting(self, params, ie_device, precision, ir_version, temp_dir, - use_old_api): + def test_sum_const_broadcasting(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_const_net(**params, ir_version=ir_version), ie_device, precision, ir_version, - temp_dir=temp_dir, use_old_api=use_old_api) + temp_dir=temp_dir) diff --git a/tests/layer_tests/onnx_tests/test_topk.py b/tests/layer_tests/onnx_tests/test_topk.py index a30c973f8892b3..92b58db92d4480 100644 --- a/tests/layer_tests/onnx_tests/test_topk.py +++ b/tests/layer_tests/onnx_tests/test_topk.py @@ -148,28 +148,27 @@ def create_net(self, shape, k, axis, ir_version, largest=None, sorted=None, opse @pytest.mark.parametrize("params", test_data) @pytest.mark.nightly @pytest.mark.skip(reason='GREEN_SUITE') - def test_topk_opset6(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_topk_opset6(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_net(**params, opset=6, ir_version=ir_version), ie_device, precision, ir_version, - temp_dir=temp_dir, use_old_api=use_old_api) + temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data) @pytest.mark.nightly - def test_topk_opset10(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_topk_opset10(self, params, ie_device, precision, ir_version, temp_dir): if ie_device == 'CPU': pytest.skip('GREEN_SUITE') self._test(*self.create_net(**params, opset=10, ir_version=ir_version), ie_device, precision, ir_version, - temp_dir=temp_dir, use_old_api=use_old_api) + temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data) @pytest.mark.parametrize("largest", [1, 0, None]) @pytest.mark.parametrize("sorted", [1, 0, None]) @pytest.mark.nightly @pytest.mark.skip(reason='GREEN_SUITE') - def test_topk_opset11(self, params, ie_device, precision, ir_version, largest, sorted, temp_dir, - use_old_api): + def test_topk_opset11(self, params, ie_device, precision, ir_version, largest, sorted, temp_dir): self._test(*self.create_net(**params, largest=largest, sorted=sorted, opset=11, ir_version=ir_version), ie_device, precision, ir_version, - temp_dir=temp_dir, use_old_api=use_old_api) + temp_dir=temp_dir) diff --git a/tests/layer_tests/onnx_tests/test_transpose.py b/tests/layer_tests/onnx_tests/test_transpose.py index 7d4cd99130096d..47fefc9015f1d3 100644 --- a/tests/layer_tests/onnx_tests/test_transpose.py +++ b/tests/layer_tests/onnx_tests/test_transpose.py @@ -155,29 +155,28 @@ def create_net_const(self, shape, perm, ir_version): @pytest.mark.parametrize("params", test_data_precommit) @pytest.mark.precommit - def test_transpose_precommit(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_transpose_precommit(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_net(**params, ir_version=ir_version), ie_device, precision, ir_version, - temp_dir=temp_dir, use_old_api=use_old_api) + temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data) @pytest.mark.nightly - def test_transpose(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_transpose(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_net(**params, ir_version=ir_version), ie_device, precision, ir_version, - temp_dir=temp_dir, use_old_api=use_old_api) + temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data_precommit) @pytest.mark.nightly - def test_transpose_const_precommit(self, params, ie_device, precision, ir_version, temp_dir, - use_old_api): + def test_transpose_const_precommit(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_net_const(**params, ir_version=ir_version), ie_device, precision, ir_version, - temp_dir=temp_dir, use_old_api=use_old_api) + temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data) @pytest.mark.nightly - def test_transpose_const(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_transpose_const(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_net_const(**params, ir_version=ir_version), ie_device, precision, ir_version, - temp_dir=temp_dir, use_old_api=use_old_api) + temp_dir=temp_dir) diff --git a/tests/layer_tests/onnx_tests/test_trigonometry.py b/tests/layer_tests/onnx_tests/test_trigonometry.py index d1f1c02810cc6b..946a4e3a214e7d 100644 --- a/tests/layer_tests/onnx_tests/test_trigonometry.py +++ b/tests/layer_tests/onnx_tests/test_trigonometry.py @@ -179,140 +179,140 @@ def create_net_const(self, shape, op, precision, ir_version): @pytest.mark.parametrize("params", test_data) @pytest.mark.nightly - def test_sin(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_sin(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_net(**params, ir_version=ir_version, op='Sin'), ie_device, precision, ir_version, - temp_dir=temp_dir, use_old_api=use_old_api) + temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data) @pytest.mark.nightly - def test_sinh(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_sinh(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_net(**params, ir_version=ir_version, op='Sinh'), ie_device, precision, ir_version, - temp_dir=temp_dir, use_old_api=use_old_api) + temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data) @pytest.mark.nightly - def test_asin(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_asin(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_net(**params, ir_version=ir_version, op='Asin'), ie_device, precision, ir_version, - temp_dir=temp_dir, use_old_api=use_old_api) + temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data_precommit) @pytest.mark.precommit - def test_cos_precommit(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_cos_precommit(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_net(**params, ir_version=ir_version, op='Cos'), ie_device, precision, ir_version, - temp_dir=temp_dir, use_old_api=use_old_api) + temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data) @pytest.mark.nightly - def test_cos(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_cos(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_net(**params, ir_version=ir_version, op='Cos'), ie_device, precision, ir_version, - temp_dir=temp_dir, use_old_api=use_old_api) + temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data) @pytest.mark.nightly - def test_cosh(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_cosh(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_net(**params, ir_version=ir_version, op='Cosh'), ie_device, precision, ir_version, - temp_dir=temp_dir, use_old_api=use_old_api) + temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data) @pytest.mark.nightly - def test_acos(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_acos(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_net(**params, ir_version=ir_version, op='Acos'), ie_device, precision, ir_version, - temp_dir=temp_dir, use_old_api=use_old_api) + temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data) @pytest.mark.nightly - def test_tan(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_tan(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_net(**params, ir_version=ir_version, op='Tan'), ie_device, precision, ir_version, - temp_dir=temp_dir, use_old_api=use_old_api) + temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data) @pytest.mark.nightly - def test_tanh(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_tanh(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_net(**params, ir_version=ir_version, op='Tanh'), ie_device, precision, ir_version, - temp_dir=temp_dir, use_old_api=use_old_api) + temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data) @pytest.mark.nightly - def test_atan(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_atan(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_net(**params, ir_version=ir_version, op='Atan'), ie_device, precision, ir_version, - temp_dir=temp_dir, use_old_api=use_old_api) + temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data) @pytest.mark.nightly - def test_sin_const(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_sin_const(self, params, ie_device, precision, ir_version, temp_dir): self._test( *self.create_net_const(**params, ir_version=ir_version, precision=precision, op='Sin'), - ie_device, precision, ir_version, temp_dir=temp_dir, use_old_api=use_old_api) + ie_device, precision, ir_version, temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data) @pytest.mark.nightly - def test_sinh_const(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_sinh_const(self, params, ie_device, precision, ir_version, temp_dir): self._test( *self.create_net_const(**params, ir_version=ir_version, precision=precision, op='Sinh'), - ie_device, precision, ir_version, temp_dir=temp_dir, use_old_api=use_old_api) + ie_device, precision, ir_version, temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data) @pytest.mark.nightly - def test_asin_const(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_asin_const(self, params, ie_device, precision, ir_version, temp_dir): self._test( *self.create_net_const(**params, ir_version=ir_version, precision=precision, op='Asin'), - ie_device, precision, ir_version, temp_dir=temp_dir, use_old_api=use_old_api) + ie_device, precision, ir_version, temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data_precommit) @pytest.mark.precommit - def test_cos_const_precommit(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_cos_const_precommit(self, params, ie_device, precision, ir_version, temp_dir): self._test( *self.create_net_const(**params, ir_version=ir_version, precision=precision, op='Cos'), - ie_device, precision, ir_version, temp_dir=temp_dir, use_old_api=use_old_api) + ie_device, precision, ir_version, temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data) @pytest.mark.nightly - def test_cos_const(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_cos_const(self, params, ie_device, precision, ir_version, temp_dir): self._test( *self.create_net_const(**params, ir_version=ir_version, precision=precision, op='Cos'), - ie_device, precision, ir_version, temp_dir=temp_dir, use_old_api=use_old_api) + ie_device, precision, ir_version, temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data) @pytest.mark.nightly - def test_cosh_const(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_cosh_const(self, params, ie_device, precision, ir_version, temp_dir): self._test( *self.create_net_const(**params, ir_version=ir_version, precision=precision, op='Cosh'), - ie_device, precision, ir_version, temp_dir=temp_dir, use_old_api=use_old_api) + ie_device, precision, ir_version, temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data) @pytest.mark.nightly - def test_acos_const(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_acos_const(self, params, ie_device, precision, ir_version, temp_dir): self._test( *self.create_net_const(**params, ir_version=ir_version, precision=precision, op='Acos'), - ie_device, precision, ir_version, temp_dir=temp_dir, use_old_api=use_old_api) + ie_device, precision, ir_version, temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data) @pytest.mark.nightly - def test_tan_const(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_tan_const(self, params, ie_device, precision, ir_version, temp_dir): self._test( *self.create_net_const(**params, ir_version=ir_version, precision=precision, op='Tan'), - ie_device, precision, ir_version, temp_dir=temp_dir, use_old_api=use_old_api) + ie_device, precision, ir_version, temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data) @pytest.mark.nightly - def test_tanh_const(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_tanh_const(self, params, ie_device, precision, ir_version, temp_dir): self._test( *self.create_net_const(**params, ir_version=ir_version, precision=precision, op='Tanh'), - ie_device, precision, ir_version, temp_dir=temp_dir, use_old_api=use_old_api) + ie_device, precision, ir_version, temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data) @pytest.mark.nightly - def test_atan_const(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_atan_const(self, params, ie_device, precision, ir_version, temp_dir): self._test( *self.create_net_const(**params, ir_version=ir_version, precision=precision, op='Atan'), - ie_device, precision, ir_version, temp_dir=temp_dir, use_old_api=use_old_api) + ie_device, precision, ir_version, temp_dir=temp_dir) diff --git a/tests/layer_tests/onnx_tests/test_unsqueeze.py b/tests/layer_tests/onnx_tests/test_unsqueeze.py index 9f2dca7aa096bd..d759d68f088a8e 100644 --- a/tests/layer_tests/onnx_tests/test_unsqueeze.py +++ b/tests/layer_tests/onnx_tests/test_unsqueeze.py @@ -176,47 +176,47 @@ def create_unsqueeze_net_const(self, axes, input_shape, output_shape, ir_version @pytest.mark.parametrize("params", test_data_5D) @pytest.mark.nightly @pytest.mark.skip(reason='GREEN_SUITE') - def test_unsqueeze_5D(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_unsqueeze_5D(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_unsqueeze_net(**params, ir_version=ir_version), ie_device, precision, ir_version, - temp_dir=temp_dir, use_old_api=use_old_api) + temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data_4D) @pytest.mark.nightly @pytest.mark.skip(reason='GREEN_SUITE') - def test_unsqueeze_4D(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_unsqueeze_4D(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_unsqueeze_net(**params, ir_version=ir_version), ie_device, precision, ir_version, - temp_dir=temp_dir, use_old_api=use_old_api) + temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data_3D) @pytest.mark.nightly @pytest.mark.skip(reason='GREEN_SUITE') - def test_unsqueeze_3D(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_unsqueeze_3D(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_unsqueeze_net(**params, ir_version=ir_version), ie_device, precision, ir_version, - temp_dir=temp_dir, use_old_api=use_old_api) + temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data_5D) @pytest.mark.nightly @pytest.mark.skip(reason='GREEN_SUITE') - def test_unsqueeze_const_5D(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_unsqueeze_const_5D(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_unsqueeze_net_const(**params, ir_version=ir_version), ie_device, precision, ir_version, - temp_dir=temp_dir, use_old_api=use_old_api) + temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data_4D) @pytest.mark.nightly @pytest.mark.skip(reason='GREEN_SUITE') - def test_unsqueeze_const_4D(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_unsqueeze_const_4D(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_unsqueeze_net_const(**params, ir_version=ir_version), ie_device, precision, ir_version, - temp_dir=temp_dir, use_old_api=use_old_api) + temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data_3D) @pytest.mark.nightly @pytest.mark.skip(reason='GREEN_SUITE') - def test_unsqueeze_const_3D(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_unsqueeze_const_3D(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_unsqueeze_net_const(**params, ir_version=ir_version), ie_device, precision, ir_version, - temp_dir=temp_dir, use_old_api=use_old_api) + temp_dir=temp_dir) diff --git a/tests/layer_tests/onnx_tests/test_upsample.py b/tests/layer_tests/onnx_tests/test_upsample.py index 73bac72f71a101..bb85505ac4e270 100644 --- a/tests/layer_tests/onnx_tests/test_upsample.py +++ b/tests/layer_tests/onnx_tests/test_upsample.py @@ -94,20 +94,18 @@ def create_net(self, shape, mode, scales, opset, ir_version): @pytest.mark.parametrize("mode", [None, 'nearest']) @pytest.mark.parametrize("opset", [7, 9]) @pytest.mark.nightly - def test_upsample_nearest(self, params, mode, opset, ie_device, precision, ir_version, temp_dir, - use_old_api): + def test_upsample_nearest(self, params, mode, opset, ie_device, precision, ir_version, temp_dir): self._test(*self.create_net(**params, mode=mode, opset=opset, ir_version=ir_version), - ie_device, precision, ir_version, temp_dir=temp_dir, use_old_api=use_old_api) + ie_device, precision, ir_version, temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data) @pytest.mark.parametrize("opset", [7, 9]) @pytest.mark.nightly - def test_upsample_linear(self, params, opset, ie_device, precision, ir_version, temp_dir, - use_old_api): + def test_upsample_linear(self, params, opset, ie_device, precision, ir_version, temp_dir): if ie_device == 'GPU': pytest.skip('GREEN_SUITE') self._test(*self.create_net(**params, mode='linear', opset=opset, ir_version=ir_version), - ie_device, precision, ir_version, temp_dir=temp_dir, use_old_api=use_old_api) + ie_device, precision, ir_version, temp_dir=temp_dir) class PytorchLayerTest(CommonLayerTest): @@ -176,21 +174,20 @@ def create_net(self, shape, mode, size, scale_factor, ir_version): @pytest.mark.parametrize("params", test_data_precommit) @pytest.mark.parametrize("mode", [None, 'nearest']) def test_pytorch_upsample_precommit(self, params, mode, ie_device, precision, ir_version, - temp_dir, use_old_api): + temp_dir): if ie_device == 'GPU': pytest.skip('Linear upsampling not supported on GPU') self._test(*self.create_net(**params, mode=mode, ir_version=ir_version), ie_device, precision, ir_version, - temp_dir=temp_dir, use_old_api=use_old_api) + temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data) @pytest.mark.parametrize("mode", [None, 'nearest', 'bilinear']) @pytest.mark.nightly @pytest.mark.skip(reason='GREEN_SUITE') - def test_pytorch_upsample(self, params, mode, ie_device, precision, ir_version, temp_dir, - use_old_api): + def test_pytorch_upsample(self, params, mode, ie_device, precision, ir_version, temp_dir): if ie_device == 'GPU' and mode == 'bilinear': pytest.skip('Linear upsampling not supported on GPU') self._test(*self.create_net(**params, mode=mode, ir_version=ir_version), ie_device, precision, ir_version, - temp_dir=temp_dir, use_old_api=use_old_api) + temp_dir=temp_dir) diff --git a/tests/layer_tests/onnx_tests/test_where.py b/tests/layer_tests/onnx_tests/test_where.py index 4cec7361543321..e09cfd24fdf806 100644 --- a/tests/layer_tests/onnx_tests/test_where.py +++ b/tests/layer_tests/onnx_tests/test_where.py @@ -95,7 +95,7 @@ def create_net(self, condition_shape, shape_than, else_shape, ir_version): @pytest.mark.parametrize("params", test_data) @pytest.mark.nightly @pytest.mark.skip(reason='GREEN_SUITE') - def test_where(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_where(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_net(**params, ir_version=ir_version), ie_device, precision, ir_version, - temp_dir=temp_dir, use_old_api=use_old_api) + temp_dir=temp_dir) diff --git a/tests/layer_tests/onnx_tests/test_xor.py b/tests/layer_tests/onnx_tests/test_xor.py index 59b901e2f0796f..c769ee8658909d 100644 --- a/tests/layer_tests/onnx_tests/test_xor.py +++ b/tests/layer_tests/onnx_tests/test_xor.py @@ -251,21 +251,21 @@ def create_net_const(self, shape1, shape2, ir_version): @pytest.mark.parametrize("params", test_data) @pytest.mark.nightly - def test_xor(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_xor(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_net(**params, ir_version=ir_version), ie_device, precision, ir_version, - temp_dir=temp_dir, use_old_api=use_old_api) + temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data) @pytest.mark.nightly - def test_xor_one_const(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_xor_one_const(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_net_one_const(**params, ir_version=ir_version), ie_device, precision, ir_version, - temp_dir=temp_dir, use_old_api=use_old_api) + temp_dir=temp_dir) @pytest.mark.parametrize("params", test_data) @pytest.mark.nightly - def test_xor_const(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_xor_const(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_net_const(**params, ir_version=ir_version), ie_device, precision, ir_version, - temp_dir=temp_dir, use_old_api=use_old_api) + temp_dir=temp_dir) diff --git a/tests/layer_tests/ovc_python_api_tests/test_complex_params.py b/tests/layer_tests/ovc_python_api_tests/test_complex_params.py index 00f6d9f8bbbd24..fab06da4d3ac5c 100644 --- a/tests/layer_tests/ovc_python_api_tests/test_complex_params.py +++ b/tests/layer_tests/ovc_python_api_tests/test_complex_params.py @@ -110,7 +110,7 @@ def create_tf_model_single_input_output(tmp_dir): @pytest.mark.parametrize("params", test_data) @pytest.mark.nightly def test_mo_convert_tf_model(self, params, ie_device, precision, ir_version, - temp_dir, use_new_frontend, use_old_api): + temp_dir, use_new_frontend): tf_net_path = self.create_tf_model(temp_dir) test_params = params['params_test'] @@ -150,7 +150,7 @@ def test_mo_convert_tf_model(self, params, ie_device, precision, ir_version, @pytest.mark.nightly @pytest.mark.precommit def test_mo_convert_tf_model_single_input_output(self, params, ie_device, precision, ir_version, - temp_dir, use_new_frontend, use_old_api): + temp_dir, use_new_frontend): tf_net_path = self.create_tf_model_single_input_output(temp_dir) test_params = params['params_test'] @@ -259,7 +259,7 @@ def create_onnx_model_with_several_outputs(temp_dir): @pytest.mark.nightly @pytest.mark.precommit def test_ovc_convert_model_with_comma_in_names(self, ie_device, precision, ir_version, - temp_dir, use_new_frontend, use_old_api): + temp_dir, use_new_frontend): onnx_net_path = self.create_onnx_model_with_comma_in_names(temp_dir) ref_model = self.create_ref_graph_with_comma_in_names() test_params = {'input_model': onnx_net_path, 'output': 'relu_1,relu_2'} @@ -269,7 +269,7 @@ def test_ovc_convert_model_with_comma_in_names(self, ie_device, precision, ir_ve @pytest.mark.nightly @pytest.mark.precommit def test_ovc_convert_model_with_several_output(self, ie_device, precision, ir_version, - temp_dir, use_new_frontend, use_old_api): + temp_dir, use_new_frontend): onnx_net_path = self.create_onnx_model_with_several_outputs(temp_dir) convert_model_params = {'input_model': onnx_net_path, 'output': ['Relu_1_data', 'concat']} cli_tool_params = {'input_model': onnx_net_path, 'output': 'Relu_1_data,concat'} diff --git a/tests/layer_tests/ovc_python_api_tests/test_extensions.py b/tests/layer_tests/ovc_python_api_tests/test_extensions.py index 5cc02f76f54416..62037ebeba24d9 100644 --- a/tests/layer_tests/ovc_python_api_tests/test_extensions.py +++ b/tests/layer_tests/ovc_python_api_tests/test_extensions.py @@ -113,7 +113,7 @@ def create_ref_graph2(): @pytest.mark.nightly @pytest.mark.precommit def test_mo_convert_extensions(self, params, ie_device, precision, ir_version, - temp_dir, use_new_frontend, use_old_api): + temp_dir, use_new_frontend): onnx_net_path = self.create_onnx_model(temp_dir) test_params = params['params_test'] diff --git a/tests/layer_tests/ovc_python_api_tests/test_paddle.py b/tests/layer_tests/ovc_python_api_tests/test_paddle.py index 80b8efefea3cf9..1091a30b4b86ef 100644 --- a/tests/layer_tests/ovc_python_api_tests/test_paddle.py +++ b/tests/layer_tests/ovc_python_api_tests/test_paddle.py @@ -126,7 +126,7 @@ class TestPaddleConversionParams(CommonMOConvertTest): @pytest.mark.parametrize("params", test_data) @pytest.mark.nightly def test_conversion_params(self, params, ie_device, precision, ir_version, - temp_dir, use_new_frontend, use_old_api): + temp_dir, use_new_frontend): fw_model = params['fw_model'] test_params = params['params_test'] ref_model = params['ref_model'] diff --git a/tests/layer_tests/ovc_python_api_tests/test_pytorch.py b/tests/layer_tests/ovc_python_api_tests/test_pytorch.py index 13119af29c5ed6..77a088fea8f411 100644 --- a/tests/layer_tests/ovc_python_api_tests/test_pytorch.py +++ b/tests/layer_tests/ovc_python_api_tests/test_pytorch.py @@ -1055,7 +1055,7 @@ class TestMoConvertPyTorch(CommonMOConvertTest): @pytest.mark.nightly @pytest.mark.precommit def test_mo_import_from_memory(self, create_model, ie_device, precision, ir_version, - temp_dir, use_new_frontend, use_old_api): + temp_dir, use_new_frontend): fw_model, graph_ref, mo_params = create_model(temp_dir) test_params = {'input_model': fw_model} @@ -1209,7 +1209,7 @@ class TestPytorchConversionParams(CommonMOConvertTest): @pytest.mark.parametrize("params", test_data) @pytest.mark.nightly def test_conversion_params(self, params, ie_device, precision, ir_version, - temp_dir, use_new_frontend, use_old_api): + temp_dir, use_new_frontend): fw_model = params['fw_model'] test_params = params['params_test'] ref_model = params['ref_model'] diff --git a/tests/layer_tests/ovc_python_api_tests/test_tf.py b/tests/layer_tests/ovc_python_api_tests/test_tf.py index 59abc46105bc76..5f0b8fa5a37253 100644 --- a/tests/layer_tests/ovc_python_api_tests/test_tf.py +++ b/tests/layer_tests/ovc_python_api_tests/test_tf.py @@ -1039,7 +1039,7 @@ class TestTFConversionParams(CommonMOConvertTest): @pytest.mark.parametrize("params", test_data) @pytest.mark.nightly def test_mo_convert_tf_model(self, params, ie_device, precision, ir_version, - temp_dir, use_new_frontend, use_old_api): + temp_dir, use_new_frontend): fw_model = params['fw_model'] test_params = params['params_test'] ref_model = params['ref_model'] diff --git a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_activation.py b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_activation.py index 5bf1a97bfe0d44..d28a4b4bb7ec57 100644 --- a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_activation.py +++ b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_activation.py @@ -70,7 +70,7 @@ def create_keras_activation_net(self, activation_func, input_names, input_shapes @pytest.mark.nightly @pytest.mark.precommit def test_keras_activation_float32(self, params, ie_device, precision, ir_version, temp_dir, - use_old_api, use_new_frontend): + use_new_frontend): self._test(*self.create_keras_activation_net(**params, ir_version=ir_version), ie_device, - precision, temp_dir=temp_dir, ir_version=ir_version, use_old_api=use_old_api, + precision, temp_dir=temp_dir, ir_version=ir_version, use_new_frontend=use_new_frontend, **params) diff --git a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_activity_regularization.py b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_activity_regularization.py index eefbdcdffbe117..65db84fa182120 100644 --- a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_activity_regularization.py +++ b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_activity_regularization.py @@ -32,10 +32,10 @@ def create_keras_activity_regularization_net(self, l1_param, l2_param, input_nam @pytest.mark.nightly @pytest.mark.precommit def test_keras_activity_regularization_case1_float32(self, params, ie_device, precision, - ir_version, temp_dir, use_old_api, + ir_version, temp_dir, use_new_frontend): self._test(*self.create_keras_activity_regularization_net(**params, ir_version=ir_version), - ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, use_old_api=use_old_api, + ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, use_new_frontend=use_new_frontend, **params) test_data_extended_float32 = [ @@ -56,7 +56,7 @@ def test_keras_activity_regularization_case1_float32(self, params, ie_device, pr @pytest.mark.parametrize("params", test_data_extended_float32) @pytest.mark.nightly def test_keras_activity_regularization_case_2_float32(self, params, ie_device, precision, - ir_version, temp_dir, use_old_api): + ir_version, temp_dir): self._test(*self.create_keras_activity_regularization_net(**params, ir_version=ir_version), - ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, use_old_api=use_old_api, + ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, **params) diff --git a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_add.py b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_add.py index 35c363832b9823..d83ff60396922c 100644 --- a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_add.py +++ b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_add.py @@ -92,9 +92,9 @@ def create_keras_add_net(self, input_names, input_shapes, input_type, ir_version @pytest.mark.parametrize("params", test_data_float32_precommit) @pytest.mark.precommit def test_keras_add_float32_precommit(self, params, ie_device, precision, ir_version, temp_dir, - use_old_api, use_new_frontend): + use_new_frontend): self._test(*self.create_keras_add_net(**params, ir_version=ir_version), - ie_device, precision, temp_dir=temp_dir, use_old_api=use_old_api, ir_version=ir_version, + ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, use_new_frontend=use_new_frontend, **params) test_data_float32 = [dict(input_names=["x1", "x2"], input_shapes=[[5, 4], [5, 4]], @@ -109,10 +109,10 @@ def test_keras_add_float32_precommit(self, params, ie_device, precision, ir_vers @pytest.mark.parametrize("params", test_data_float32) @pytest.mark.nightly - def test_keras_add_float32(self, params, ie_device, precision, ir_version, temp_dir, use_old_api, + def test_keras_add_float32(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend): self._test(*self.create_keras_add_net(**params, ir_version=ir_version), - ie_device, precision, ir_version=ir_version, temp_dir=temp_dir, use_old_api=use_old_api, + ie_device, precision, ir_version=ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend, **params) test_data_float32_several_inputs_precommit = [ @@ -123,10 +123,10 @@ def test_keras_add_float32(self, params, ie_device, precision, ir_version, temp_ @pytest.mark.parametrize("params", test_data_float32_several_inputs_precommit) @pytest.mark.precommit def test_keras_add_float32_several_inputs_precommit(self, params, ie_device, precision, - ir_version, temp_dir, use_old_api, + ir_version, temp_dir, use_new_frontend): self._test(*self.create_keras_add_net(**params, ir_version=ir_version), - ie_device, precision, ir_version=ir_version, temp_dir=temp_dir, use_old_api=use_old_api, + ie_device, precision, ir_version=ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend, **params) test_data_float32_several_inputs = [dict(input_names=["x1", "x2", "x3"], @@ -147,7 +147,7 @@ def test_keras_add_float32_several_inputs_precommit(self, params, ie_device, pre @pytest.mark.parametrize("params", test_data_float32_several_inputs) @pytest.mark.nightly def test_keras_add_float32_several_inputs(self, params, ie_device, precision, ir_version, - temp_dir, use_old_api, use_new_frontend): + temp_dir, use_new_frontend): self._test(*self.create_keras_add_net(**params, ir_version=ir_version), - ie_device, precision, temp_dir=temp_dir, use_old_api=use_old_api, ir_version=ir_version, + ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, use_new_frontend=use_new_frontend, **params) diff --git a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_additive_attention.py b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_additive_attention.py index 29360ea89f20d7..76fd741874ed41 100644 --- a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_additive_attention.py +++ b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_additive_attention.py @@ -34,9 +34,9 @@ def create_keras_additive_attention_net(self, causal, dropout, use_scale, input_ @pytest.mark.nightly @pytest.mark.precommit def test_keras_additive_attention_float32_case1(self, params, ie_device, precision, ir_version, - temp_dir, use_old_api, use_new_frontend): + temp_dir, use_new_frontend): self._test(*self.create_keras_additive_attention_net(**params, ir_version=ir_version), - ie_device, precision, temp_dir=temp_dir, use_old_api=use_old_api, ir_version=ir_version, + ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, use_new_frontend=use_new_frontend, **params) test_data_extended_float32 = [ @@ -69,7 +69,7 @@ def test_keras_additive_attention_float32_case1(self, params, ie_device, precisi @pytest.mark.parametrize("params", test_data_extended_float32) @pytest.mark.nightly def test_keras_additive_attention_float32_case2(self, params, ie_device, precision, ir_version, - temp_dir, use_old_api, use_new_frontend): + temp_dir, use_new_frontend): self._test(*self.create_keras_additive_attention_net(**params, ir_version=ir_version), - ie_device, precision, temp_dir=temp_dir, use_old_api=use_old_api, ir_version=ir_version, + ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, use_new_frontend=use_new_frontend, **params) diff --git a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_alpha_dropout.py b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_alpha_dropout.py index 7563f95c4e3953..b7f8e2b6005aed 100644 --- a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_alpha_dropout.py +++ b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_alpha_dropout.py @@ -29,9 +29,9 @@ def create_keras_alpha_dropout_net(self, rate, input_names, input_shapes, input_ @pytest.mark.nightly @pytest.mark.precommit def test_keras_keras_alpha_dropout_case1_float32(self, params, ie_device, precision, ir_version, - temp_dir, use_old_api, use_new_frontend): + temp_dir, use_new_frontend): self._test(*self.create_keras_alpha_dropout_net(**params, ir_version=ir_version), ie_device, - precision, temp_dir=temp_dir, ir_version=ir_version, use_old_api=use_old_api, + precision, temp_dir=temp_dir, ir_version=ir_version, use_new_frontend=use_new_frontend, **params) test_data_extended_float32 = [dict(rate=0.5, input_names=["x1"], input_shapes=[[1]], @@ -48,7 +48,7 @@ def test_keras_keras_alpha_dropout_case1_float32(self, params, ie_device, precis @pytest.mark.parametrize("params", test_data_extended_float32) @pytest.mark.nightly def test_keras_keras_alpha_dropout_case2_float32(self, params, ie_device, precision, ir_version, - temp_dir, use_old_api, use_new_frontend): + temp_dir, use_new_frontend): self._test(*self.create_keras_alpha_dropout_net(**params, ir_version=ir_version), ie_device, - precision, temp_dir=temp_dir, ir_version=ir_version, use_old_api=use_old_api, + precision, temp_dir=temp_dir, ir_version=ir_version, use_new_frontend=use_new_frontend, **params) diff --git a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_attention.py b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_attention.py index e0c81721ac62ec..2143b20ae78a41 100644 --- a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_attention.py +++ b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_attention.py @@ -32,9 +32,9 @@ def create_keras_attention_net(self, causal, dropout, use_scale, input_names, in @pytest.mark.nightly @pytest.mark.precommit def test_keras_attention_float32_case1(self, params, ie_device, precision, ir_version, temp_dir, - use_old_api, use_new_frontend): + use_new_frontend): self._test(*self.create_keras_attention_net(**params, ir_version=ir_version), ie_device, - precision, temp_dir=temp_dir, ir_version=ir_version, use_old_api=use_old_api, + precision, temp_dir=temp_dir, ir_version=ir_version, use_new_frontend=use_new_frontend, **params) test_data_extended_float32 = [ @@ -68,7 +68,7 @@ def test_keras_attention_float32_case1(self, params, ie_device, precision, ir_ve @pytest.mark.parametrize("params", test_data_extended_float32) @pytest.mark.nightly def test_keras_attention_float32_case2(self, params, ie_device, precision, ir_version, temp_dir, - use_old_api, use_new_frontend): + use_new_frontend): self._test(*self.create_keras_attention_net(**params, ir_version=ir_version), ie_device, - precision, temp_dir=temp_dir, ir_version=ir_version, use_old_api=use_old_api, + precision, temp_dir=temp_dir, ir_version=ir_version, use_new_frontend=use_new_frontend, **params) diff --git a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_average.py b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_average.py index 37229eebcdc456..bfd5015a3718db 100644 --- a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_average.py +++ b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_average.py @@ -40,9 +40,9 @@ def create_keras_average_net(self, input_names, input_shapes, input_type, ir_ver @pytest.mark.nightly @pytest.mark.precommit def test_keras_average_case1_float32(self, params, ie_device, precision, ir_version, temp_dir, - use_old_api, use_new_frontend): + use_new_frontend): self._test(*self.create_keras_average_net(**params, ir_version=ir_version), ie_device, - precision, temp_dir=temp_dir, ir_version=ir_version, use_old_api=use_old_api, + precision, temp_dir=temp_dir, ir_version=ir_version, use_new_frontend=use_new_frontend, **params) test_data_extended_float32 = [dict(input_names=["x1", "x2"], input_shapes=[[1], [1]], @@ -59,9 +59,9 @@ def test_keras_average_case1_float32(self, params, ie_device, precision, ir_vers @pytest.mark.parametrize("params", test_data_extended_float32) @pytest.mark.nightly def test_keras_average_case2_float32(self, params, ie_device, precision, ir_version, temp_dir, - use_old_api, use_new_frontend): + use_new_frontend): self._test(*self.create_keras_average_net(**params, ir_version=ir_version), ie_device, - precision, temp_dir=temp_dir, use_old_api=use_old_api, ir_version=ir_version, + precision, temp_dir=temp_dir, ir_version=ir_version, use_new_frontend=use_new_frontend, **params) test_data_float32_several_inputs = [ @@ -76,7 +76,7 @@ def test_keras_average_case2_float32(self, params, ie_device, precision, ir_vers @pytest.mark.parametrize("params", test_data_float32_several_inputs) @pytest.mark.nightly def test_keras_average_float32_several_inputs(self, params, ie_device, precision, ir_version, - temp_dir, use_old_api, use_new_frontend): + temp_dir, use_new_frontend): self._test(*self.create_keras_average_net(**params, ir_version=ir_version), ie_device, - precision, temp_dir=temp_dir, use_old_api=use_old_api, ir_version=ir_version, + precision, temp_dir=temp_dir, ir_version=ir_version, use_new_frontend=use_new_frontend, **params) diff --git a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_avg_pool_1D.py b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_avg_pool_1D.py index 55d5f0567c5d78..51f041f30cd82d 100644 --- a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_avg_pool_1D.py +++ b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_avg_pool_1D.py @@ -34,9 +34,9 @@ def create_keras_avg_pool_1D_net(self, pool_size, strides, padding, data_format, @pytest.mark.nightly @pytest.mark.precommit def test_keras_avg_pool_1D_case1_float32(self, params, ie_device, precision, ir_version, - temp_dir, use_old_api, use_new_frontend): + temp_dir, use_new_frontend): self._test(*self.create_keras_avg_pool_1D_net(**params, ir_version=ir_version), ie_device, - precision, temp_dir=temp_dir, use_old_api=use_old_api, ir_version=ir_version, + precision, temp_dir=temp_dir, ir_version=ir_version, use_new_frontend=use_new_frontend, **params) test_data_extended_float32 = [ @@ -50,7 +50,7 @@ def test_keras_avg_pool_1D_case1_float32(self, params, ie_device, precision, ir_ @pytest.mark.parametrize("params", test_data_extended_float32) @pytest.mark.nightly def test_keras_avg_pool_1D_case2_float32(self, params, ie_device, precision, ir_version, - temp_dir, use_old_api, use_new_frontend): + temp_dir, use_new_frontend): self._test(*self.create_keras_avg_pool_1D_net(**params, ir_version=ir_version), ie_device, - precision, temp_dir=temp_dir, use_old_api=use_old_api, ir_version=ir_version, + precision, temp_dir=temp_dir, ir_version=ir_version, use_new_frontend=use_new_frontend, **params) diff --git a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_avg_pool_2D.py b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_avg_pool_2D.py index 7edefba40f75fc..84f2f8a4809cda 100644 --- a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_avg_pool_2D.py +++ b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_avg_pool_2D.py @@ -34,9 +34,9 @@ def create_keras_avg_pool_2D_net(self, pool_size, strides, padding, data_format, @pytest.mark.nightly @pytest.mark.precommit def test_keras_avg_pool_2D_float32(self, params, ie_device, precision, ir_version, temp_dir, - use_old_api, use_new_frontend): + use_new_frontend): self._test(*self.create_keras_avg_pool_2D_net(**params, ir_version=ir_version), ie_device, - precision, temp_dir=temp_dir, ir_version=ir_version, use_old_api=use_old_api, + precision, temp_dir=temp_dir, ir_version=ir_version, use_new_frontend=use_new_frontend, **params) test_data_extended_float32 = [ @@ -53,7 +53,7 @@ def test_keras_avg_pool_2D_float32(self, params, ie_device, precision, ir_versio @pytest.mark.parametrize("params", test_data_extended_float32) @pytest.mark.nightly def test_keras_avg_pool_2D_extended_float32(self, params, ie_device, precision, ir_version, - temp_dir, use_old_api, use_new_frontend): + temp_dir, use_new_frontend): self._test(*self.create_keras_avg_pool_2D_net(**params, ir_version=ir_version), ie_device, - precision, temp_dir=temp_dir, ir_version=ir_version, use_old_api=use_old_api, + precision, temp_dir=temp_dir, ir_version=ir_version, use_new_frontend=use_new_frontend, **params) diff --git a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_avg_pool_3D.py b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_avg_pool_3D.py index 9a2954ef0e6ef2..2a3a0d2b32f3ba 100644 --- a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_avg_pool_3D.py +++ b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_avg_pool_3D.py @@ -33,9 +33,9 @@ def create_keras_avg_pool_3D_net(self, pool_size, strides, padding, data_format, @pytest.mark.nightly @pytest.mark.precommit def test_keras_avg_pool_3D_float32(self, params, ie_device, precision, ir_version, temp_dir, - use_old_api, use_new_frontend): + use_new_frontend): self._test(*self.create_keras_avg_pool_3D_net(**params, ir_version=ir_version), ie_device, - precision, temp_dir=temp_dir, ir_version=ir_version, use_old_api=use_old_api, + precision, temp_dir=temp_dir, ir_version=ir_version, use_new_frontend=use_new_frontend, **params) test_data_extended_float32 = [ @@ -52,7 +52,7 @@ def test_keras_avg_pool_3D_float32(self, params, ie_device, precision, ir_versio @pytest.mark.parametrize("params", test_data_extended_float32) @pytest.mark.nightly def test_keras_avg_pool_3D_extended_float32(self, params, ie_device, precision, ir_version, - temp_dir, use_old_api, use_new_frontend): + temp_dir, use_new_frontend): self._test(*self.create_keras_avg_pool_3D_net(**params, ir_version=ir_version), ie_device, - precision, temp_dir=temp_dir, ir_version=ir_version, use_old_api=use_old_api, + precision, temp_dir=temp_dir, ir_version=ir_version, use_new_frontend=use_new_frontend, **params) diff --git a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_batch_normalization.py b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_batch_normalization.py index a87762000053c0..941a27f35ca738 100644 --- a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_batch_normalization.py +++ b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_batch_normalization.py @@ -34,10 +34,10 @@ def create_keras_batch_normalization_net(self, axis, momentum, epsilon, center, @pytest.mark.precommit @pytest.mark.precommit_tf_fe def test_keras_batch_normalization_float32(self, params, ie_device, precision, ir_version, - temp_dir, use_old_api, use_new_frontend): + temp_dir, use_new_frontend): self._test(*self.create_keras_batch_normalization_net(**params, ir_version=ir_version), ie_device, precision, - temp_dir=temp_dir, use_old_api=use_old_api, ir_version=ir_version, + temp_dir=temp_dir, ir_version=ir_version, use_new_frontend=use_new_frontend, **params) test_data_extended_float32 = [dict(axis=1, momentum=0.5, epsilon=1e-4, center=True, scale=False, @@ -58,7 +58,7 @@ def test_keras_batch_normalization_float32(self, params, ie_device, precision, i @pytest.mark.parametrize("params", test_data_extended_float32) @pytest.mark.nightly def test_keras_batch_normalization_extended_float32(self, params, ie_device, precision, - ir_version, temp_dir, use_old_api, use_new_frontend): + ir_version, temp_dir, use_new_frontend): self._test(*self.create_keras_batch_normalization_net(**params, ir_version=ir_version), - ie_device, precision, temp_dir=temp_dir, use_old_api=use_old_api, ir_version=ir_version, + ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, use_new_frontend=use_new_frontend, **params) diff --git a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_bidirectional.py b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_bidirectional.py index 019a3477822e5a..55e842d00def5a 100644 --- a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_bidirectional.py +++ b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_bidirectional.py @@ -46,7 +46,7 @@ def create_keras_bidirectional_net(self, n_units, RNN_layer, input_names, input_ @pytest.mark.parametrize("params", test_data_float32) @pytest.mark.nightly def test_keras_bidirectional_float32(self, params, ie_device, precision, ir_version, temp_dir, - use_old_api, use_new_frontend): + use_new_frontend): self._test(*self.create_keras_bidirectional_net(**params, ir_version=ir_version), ie_device, - precision, temp_dir=temp_dir, use_old_api=use_old_api, ir_version=ir_version, + precision, temp_dir=temp_dir, ir_version=ir_version, use_new_frontend=use_new_frontend, **params) diff --git a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_concatenate.py b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_concatenate.py index c51ee978bb5560..64640c0e7bec01 100644 --- a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_concatenate.py +++ b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_concatenate.py @@ -33,10 +33,10 @@ def create_keras_concatenate_net(self, axis, input_names, input_shapes, input_ty @pytest.mark.nightly @pytest.mark.precommit def test_keras_concatenate_float32(self, params, ie_device, precision, ir_version, temp_dir, - use_old_api, use_new_frontend): + use_new_frontend): self._test(*self.create_keras_concatenate_net(**params, ir_version=ir_version), ie_device, precision, - temp_dir=temp_dir, use_old_api=use_old_api, ir_version=ir_version, + temp_dir=temp_dir, ir_version=ir_version, use_new_frontend=use_new_frontend, **params) test_data_extended_float32 = [ @@ -58,8 +58,8 @@ def test_keras_concatenate_float32(self, params, ie_device, precision, ir_versio @pytest.mark.parametrize("params", test_data_extended_float32) @pytest.mark.nightly def test_keras_concatenate_extended_float32(self, params, ie_device, precision, ir_version, - temp_dir, use_old_api, use_new_frontend): + temp_dir, use_new_frontend): self._test(*self.create_keras_concatenate_net(**params, ir_version=ir_version), ie_device, precision, - temp_dir=temp_dir, use_old_api=use_old_api, ir_version=ir_version, + temp_dir=temp_dir, ir_version=ir_version, use_new_frontend=use_new_frontend, **params) diff --git a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_conv_1d.py b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_conv_1d.py index 2364f429349ce7..6f7a7e1e91b2da 100644 --- a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_conv_1d.py +++ b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_conv_1d.py @@ -60,9 +60,9 @@ def create_keras_conv1d_net(self, conv_params, input_names, input_shapes, input_ @pytest.mark.parametrize("params", test_data_float32) @pytest.mark.nightly - def test_keras_conv_1d_float32(self, params, ie_device, precision, ir_version, temp_dir, use_old_api, + def test_keras_conv_1d_float32(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend): self._test(*self.create_keras_conv1d_net(**params, ir_version=ir_version), ie_device, precision, - temp_dir=temp_dir, use_old_api=use_old_api, ir_version=ir_version, + temp_dir=temp_dir, ir_version=ir_version, use_new_frontend=use_new_frontend, **params) diff --git a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_conv_1d_transpose.py b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_conv_1d_transpose.py index 98f77c9d82404d..f6882cb4a727ab 100644 --- a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_conv_1d_transpose.py +++ b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_conv_1d_transpose.py @@ -67,8 +67,8 @@ def create_keras_conv1d_transpose_net(self, params, input_names, input_shapes, i @pytest.mark.nightly @pytest.mark.xfail(reason="Needs tensorflow 2.3.0.") def test_keras_conv_1d_case1_transpose_float32(self, params, ie_device, precision, ir_version, - temp_dir, use_old_api, use_new_frontend): + temp_dir, use_new_frontend): self._test(*self.create_keras_conv1d_transpose_net(**params, ir_version=ir_version), ie_device, precision, - temp_dir=temp_dir, use_old_api=use_old_api, ir_version=ir_version, + temp_dir=temp_dir, ir_version=ir_version, use_new_frontend=use_new_frontend, **params) diff --git a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_conv_2d.py b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_conv_2d.py index b4847f15fd901a..9388042d4fb406 100644 --- a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_conv_2d.py +++ b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_conv_2d.py @@ -59,9 +59,9 @@ def create_keras_conv2d_net(self, conv_params, input_names, input_shapes, input_ @pytest.mark.parametrize("params", test_data_float32) @pytest.mark.nightly - def test_keras_conv_2d_float32(self, params, ie_device, precision, ir_version, temp_dir, use_old_api, + def test_keras_conv_2d_float32(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend): self._test(*self.create_keras_conv2d_net(**params, ir_version=ir_version), ie_device, precision, - temp_dir=temp_dir, use_old_api=use_old_api, ir_version=ir_version, + temp_dir=temp_dir, ir_version=ir_version, use_new_frontend=use_new_frontend, **params) diff --git a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_conv_2d_transpose.py b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_conv_2d_transpose.py index 8d785c0d7b56c6..618e2122f15215 100644 --- a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_conv_2d_transpose.py +++ b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_conv_2d_transpose.py @@ -65,8 +65,8 @@ def create_keras_conv_2d_transpose_net(self, conv_params, input_names, input_sha @pytest.mark.parametrize("params", test_data_float32) @pytest.mark.nightly def test_keras_conv_2d_transpose_float32(self, params, ie_device, precision, ir_version, - temp_dir, use_old_api, use_new_frontend): + temp_dir, use_new_frontend): self._test(*self.create_keras_conv_2d_transpose_net(**params, ir_version=ir_version), ie_device, precision, - temp_dir=temp_dir, use_old_api=use_old_api, ir_version=ir_version, + temp_dir=temp_dir, ir_version=ir_version, use_new_frontend=use_new_frontend, **params) diff --git a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_conv_3d.py b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_conv_3d.py index 1ce35a8b5d5305..ffb7fa2d320e7f 100644 --- a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_conv_3d.py +++ b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_conv_3d.py @@ -62,8 +62,8 @@ def create_keras_conv3d_net(self, conv_params, input_names, input_shapes, input_ @pytest.mark.parametrize("params", test_data_float32) @pytest.mark.nightly - def test_keras_conv_3d_float32(self, params, ie_device, precision, ir_version, temp_dir, use_old_api, + def test_keras_conv_3d_float32(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend): self._test(*self.create_keras_conv3d_net(**params, ir_version=ir_version), ie_device, - precision, temp_dir=temp_dir, ir_version=ir_version, use_old_api=use_old_api, + precision, temp_dir=temp_dir, ir_version=ir_version, use_new_frontend=use_new_frontend, **params) diff --git a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_conv_3d_transpose.py b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_conv_3d_transpose.py index b6179c26093894..26191710b612ea 100644 --- a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_conv_3d_transpose.py +++ b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_conv_3d_transpose.py @@ -65,8 +65,8 @@ def create_keras_conv_3d_transpose_net(self, conv_params, input_names, input_sha @pytest.mark.parametrize("params", test_data_float32) @pytest.mark.nightly def test_keras_conv_3D_transpose_float32(self, params, ie_device, precision, ir_version, - temp_dir, use_old_api, use_new_frontend): + temp_dir, use_new_frontend): self._test(*self.create_keras_conv_3d_transpose_net(**params, ir_version=ir_version), ie_device, precision, - temp_dir=temp_dir, use_old_api=use_old_api, ir_version=ir_version, + temp_dir=temp_dir, ir_version=ir_version, use_new_frontend=use_new_frontend, **params) diff --git a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_conv_lstm_2d.py b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_conv_lstm_2d.py index 35f440c38234d8..a2ed7c8c4ab7ec 100644 --- a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_conv_lstm_2d.py +++ b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_conv_lstm_2d.py @@ -54,8 +54,8 @@ def create_keras_conv_lstm_2d_net(self, params, input_shapes): @pytest.mark.precommit_tf_fe @pytest.mark.nightly def test_keras_conv_lstm_2d_basic(self, params, ie_device, precision, ir_version, temp_dir, - use_old_api, use_new_frontend): + use_new_frontend): self._test(*self.create_keras_conv_lstm_2d_net(**params), ie_device, precision, - temp_dir=temp_dir, use_old_api=use_old_api, ir_version=ir_version, + temp_dir=temp_dir, ir_version=ir_version, use_new_frontend=use_new_frontend, **params) diff --git a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_cropping_1d.py b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_cropping_1d.py index 49bfaec17baeb3..9b7778bdd0a8f0 100644 --- a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_cropping_1d.py +++ b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_cropping_1d.py @@ -34,7 +34,7 @@ def create_keras_cropping_1d_net(self, cropping, input_names, input_shapes, inpu @pytest.mark.parametrize("params", test_data_float32) @pytest.mark.nightly def test_keras_cropping_1d_float32(self, params, ie_device, precision, ir_version, temp_dir, - use_old_api, use_new_frontend): + use_new_frontend): self._test(*self.create_keras_cropping_1d_net(**params, ir_version=ir_version), ie_device, - precision, temp_dir=temp_dir, ir_version=ir_version, use_old_api=use_old_api, + precision, temp_dir=temp_dir, ir_version=ir_version, use_new_frontend=use_new_frontend, **params) diff --git a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_cropping_2d.py b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_cropping_2d.py index 1080d596e0ff85..c19dae3c395123 100644 --- a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_cropping_2d.py +++ b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_cropping_2d.py @@ -36,8 +36,8 @@ def create_keras_cropping_2d_net(self, cropping, input_names, input_shapes, inpu @pytest.mark.parametrize("params", test_data_float32) @pytest.mark.nightly def test_keras_cropping_2d_float32(self, params, ie_device, precision, ir_version, temp_dir, - use_old_api, use_new_frontend): + use_new_frontend): self._test(*self.create_keras_cropping_2d_net(**params, ir_version=ir_version), ie_device, precision, - temp_dir=temp_dir, use_old_api=use_old_api, ir_version=ir_version, + temp_dir=temp_dir, ir_version=ir_version, use_new_frontend=use_new_frontend, **params) diff --git a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_cropping_3d.py b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_cropping_3d.py index 349d2d89187ec1..364b46b0dea804 100644 --- a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_cropping_3d.py +++ b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_cropping_3d.py @@ -36,8 +36,8 @@ def create_keras_cropping_3d_net(self, cropping, input_names, input_shapes, inpu @pytest.mark.parametrize("params", test_data_float32) @pytest.mark.nightly def test_keras_cropping_3d_float32(self, params, ie_device, precision, ir_version, temp_dir, - use_old_api, use_new_frontend): + use_new_frontend): self._test(*self.create_keras_cropping_3d_net(**params, ir_version=ir_version), ie_device, precision, - temp_dir=temp_dir, use_old_api=use_old_api, ir_version=ir_version, + temp_dir=temp_dir, ir_version=ir_version, use_new_frontend=use_new_frontend, **params) diff --git a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_dense.py b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_dense.py index adb6dbc24207d2..ee2461ba7f4804 100644 --- a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_dense.py +++ b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_dense.py @@ -42,10 +42,10 @@ def create_keras_dense_net(self, input_names, input_shapes, input_type, units, a @pytest.mark.parametrize("params", test_data_float32_simple) @pytest.mark.nightly @pytest.mark.precommit - def test_keras_dense_float32(self, params, ie_device, precision, ir_version, temp_dir, use_old_api, + def test_keras_dense_float32(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend): self._test(*self.create_keras_dense_net(**params, ir_version=ir_version), - ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, use_old_api=use_old_api, + ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, use_new_frontend=use_new_frontend, **params) test_data_float32_activation = [ @@ -67,7 +67,7 @@ def test_keras_dense_float32(self, params, ie_device, precision, ir_version, tem @pytest.mark.nightly @pytest.mark.precommit def test_keras_activation_float32(self, params, ie_device, precision, ir_version, temp_dir, - use_old_api, use_new_frontend): + use_new_frontend): self._test(*self.create_keras_dense_net(**params, ir_version=ir_version), - ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, use_old_api=use_old_api, + ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, use_new_frontend=use_new_frontend, **params) diff --git a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_depthwiseconv2D.py b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_depthwiseconv2D.py index 8e39c7cecf7952..1b3a21317c61bf 100644 --- a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_depthwiseconv2D.py +++ b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_depthwiseconv2D.py @@ -51,10 +51,10 @@ def create_keras_dconv2D_net(self, input_names, input_shapes, input_type, kernel @pytest.mark.parametrize("params", test_data_format_padding) @pytest.mark.nightly @pytest.mark.precommit - def test_keras_dconv2D_float32(self, params, ie_device, precision, ir_version, temp_dir, use_old_api, + def test_keras_dconv2D_float32(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend): self._test(*self.create_keras_dconv2D_net(**params, ir_version=ir_version), - ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, use_old_api=use_old_api, + ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, use_new_frontend=use_new_frontend, **params) test_data_use_bias_true = [ @@ -78,9 +78,9 @@ def test_keras_dconv2D_float32(self, params, ie_device, precision, ir_version, t @pytest.mark.nightly @pytest.mark.precommit def test_keras_use_bias_float32(self, params, ie_device, precision, ir_version, temp_dir, - use_old_api, use_new_frontend): + use_new_frontend): self._test(*self.create_keras_dconv2D_net(**params, ir_version=ir_version), - ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, use_old_api=use_old_api, + ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, use_new_frontend=use_new_frontend, **params) test_data_activations = [ @@ -104,7 +104,7 @@ def test_keras_use_bias_float32(self, params, ie_device, precision, ir_version, @pytest.mark.nightly @pytest.mark.precommit def test_keras_activations_float32(self, params, ie_device, precision, ir_version, temp_dir, - use_old_api, use_new_frontend): + use_new_frontend): self._test(*self.create_keras_dconv2D_net(**params, ir_version=ir_version), - ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, use_old_api=use_old_api, + ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, use_new_frontend=use_new_frontend, **params) diff --git a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_dot.py b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_dot.py index a3c62ea26c8bf5..925e21e66ddf92 100644 --- a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_dot.py +++ b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_dot.py @@ -44,10 +44,10 @@ def create_keras_dot_net(self, input_names, input_shapes, input_type, axes, norm @pytest.mark.nightly @pytest.mark.precommit def test_keras_dot_normalize_float32(self, params, ie_device, precision, temp_dir, ir_version, - use_old_api, use_new_frontend): + use_new_frontend): self._test(*self.create_keras_dot_net(**params, ir_version=ir_version), ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, - use_old_api=use_old_api, use_new_frontend=use_new_frontend, **params) + use_new_frontend=use_new_frontend, **params) test_data_difficult_axes_float32 = [ dict(input_names=["x", "y"], input_shapes=[[5, 4, 4], [5, 4, 4]], input_type=tf.float32, @@ -70,10 +70,10 @@ def test_keras_dot_normalize_float32(self, params, ie_device, precision, temp_di @pytest.mark.nightly @pytest.mark.precommit def test_keras_dot_difficult_axes_float32(self, params, ie_device, precision, temp_dir, - ir_version, use_old_api, use_new_frontend): + ir_version, use_new_frontend): self._test(*self.create_keras_dot_net(**params, ir_version=ir_version), ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, - use_old_api=use_old_api, use_new_frontend=use_new_frontend, **params) + use_new_frontend=use_new_frontend, **params) test_data_normalize_higher_rank = [ dict(input_names=["x", "y"], input_shapes=[[5, 1, 4], [5, 1, 4]], input_type=tf.float32, @@ -99,7 +99,7 @@ def test_keras_dot_difficult_axes_float32(self, params, ie_device, precision, te @pytest.mark.nightly @pytest.mark.precommit def test_keras_dot_normalize_higher_rank(self, params, ie_device, precision, temp_dir, - ir_version, use_old_api, use_new_frontend): + ir_version, use_new_frontend): self._test(*self.create_keras_dot_net(**params, ir_version=ir_version), ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, - use_old_api=use_old_api, use_new_frontend=use_new_frontend, **params) + use_new_frontend=use_new_frontend, **params) diff --git a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_dropout.py b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_dropout.py index 9d26a308199602..bcecbf20e31fe0 100644 --- a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_dropout.py +++ b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_dropout.py @@ -32,10 +32,10 @@ def create_keras_dropout_net(self, input_names, input_shapes, input_type, rate, @pytest.mark.parametrize("params", test_data_float32_precommit) @pytest.mark.precommit - def test_keras_dropout_float32(self, params, ie_device, precision, ir_version, temp_dir, use_old_api, + def test_keras_dropout_float32(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend): self._test(*self.create_keras_dropout_net(**params, ir_version=ir_version), - ie_device, precision, temp_dir=temp_dir, use_old_api=use_old_api, ir_version=ir_version, + ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, use_new_frontend=use_new_frontend, **params) test_data_float32 = [ @@ -55,8 +55,8 @@ def test_keras_dropout_float32(self, params, ie_device, precision, ir_version, t @pytest.mark.parametrize("params", test_data_float32) @pytest.mark.nightly - def test_keras_dropout_float32(self, params, ie_device, precision, ir_version, temp_dir, use_old_api, + def test_keras_dropout_float32(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend): self._test(*self.create_keras_dropout_net(**params, ir_version=ir_version), - ie_device, precision, temp_dir=temp_dir, use_old_api=use_old_api, ir_version=ir_version, + ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, use_new_frontend=use_new_frontend, **params) diff --git a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_elu.py b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_elu.py index c5a9ecee9f6c69..c041c5a5f09adb 100644 --- a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_elu.py +++ b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_elu.py @@ -106,10 +106,10 @@ def create_keras_elu_net(self, input_names, input_shapes, input_type, alpha, ir_ @pytest.mark.parametrize("params", test_data_float32_precommit) @pytest.mark.precommit - def test_keras_elu_float32(self, params, ie_device, precision, ir_version, temp_dir, use_old_api, + def test_keras_elu_float32(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend): self._test(*self.create_keras_elu_net(**params, ir_version=ir_version), - ie_device, precision, temp_dir=temp_dir, use_old_api=use_old_api, ir_version=ir_version, + ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, use_new_frontend=use_new_frontend, **params) test_data_float32 = [ @@ -120,10 +120,10 @@ def test_keras_elu_float32(self, params, ie_device, precision, ir_version, temp_ @pytest.mark.parametrize("params", test_data_float32) @pytest.mark.nightly - def test_keras_elu_float32(self, params, ie_device, precision, ir_version, temp_dir, use_old_api, + def test_keras_elu_float32(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend): self._test(*self.create_keras_elu_net(**params, ir_version=ir_version), - ie_device, precision, temp_dir=temp_dir, use_old_api=use_old_api, ir_version=ir_version, + ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, use_new_frontend=use_new_frontend, **params) test_data_float32_alpha2 = [dict(input_names=["x1"], input_shapes=[[5, 4]], @@ -140,7 +140,7 @@ def test_keras_elu_float32(self, params, ie_device, precision, ir_version, temp_ @pytest.mark.precommit @pytest.mark.xfail(reason="51109") def test_keras_elu_float32_alpha2(self, params, ie_device, precision, ir_version, temp_dir, - use_old_api, use_new_frontend): + use_new_frontend): self._test(*self.create_keras_elu_net(**params, ir_version=ir_version), - ie_device, precision, temp_dir=temp_dir, use_old_api=use_old_api, ir_version=ir_version, + ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, use_new_frontend=use_new_frontend, **params) diff --git a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_embedding.py b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_embedding.py index 35105a2033b7d0..4f8d56ab58a54c 100644 --- a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_embedding.py +++ b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_embedding.py @@ -40,10 +40,10 @@ def create_keras_emb_net(self, input_names, input_shapes, input_type, input_dim, @pytest.mark.parametrize("params", test_data_float32) @pytest.mark.nightly @pytest.mark.precommit - def test_keras_emb_float32(self, params, ie_device, precision, ir_version, temp_dir, use_old_api, + def test_keras_emb_float32(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend): self._test(*self.create_keras_emb_net(**params, ir_version=ir_version), - ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, use_old_api=use_old_api, + ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, use_new_frontend=use_new_frontend, **params) test_data_mask_zero_false = [ @@ -61,7 +61,7 @@ def test_keras_emb_float32(self, params, ie_device, precision, ir_version, temp_ @pytest.mark.nightly @pytest.mark.precommit def test_keras_emb_without_zero_mask_float32(self, params, ie_device, precision, ir_version, - temp_dir, use_old_api, use_new_frontend): + temp_dir, use_new_frontend): self._test(*self.create_keras_emb_net(**params, ir_version=ir_version), - ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, use_old_api=use_old_api, + ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, use_new_frontend=use_new_frontend, **params) diff --git a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_flatten.py b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_flatten.py index 413f4a6eaf1bff..62e5539d584e4f 100644 --- a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_flatten.py +++ b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_flatten.py @@ -34,8 +34,8 @@ def create_keras_flatten_net(self, input_names, input_shapes, input_type, data_f @pytest.mark.parametrize("params", test_data_float32) @pytest.mark.nightly @pytest.mark.precommit - def test_keras_flatten_float32(self, params, ie_device, precision, ir_version, temp_dir, use_old_api, + def test_keras_flatten_float32(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend): self._test(*self.create_keras_flatten_net(**params, ir_version=ir_version), - ie_device, precision, temp_dir=temp_dir, use_old_api=use_old_api, ir_version=ir_version, + ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, use_new_frontend=use_new_frontend, **params) diff --git a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_global_average_pooling1D.py b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_global_average_pooling1D.py index e4b412bb96da8f..2041c37065df16 100644 --- a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_global_average_pooling1D.py +++ b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_global_average_pooling1D.py @@ -35,7 +35,7 @@ def create_keras_global_avg_pooling1D_net(self, input_names, input_shapes, input @pytest.mark.nightly @pytest.mark.precommit def test_keras_global_avg_pooling1D_float32(self, params, ie_device, precision, ir_version, - temp_dir, use_old_api, use_new_frontend): + temp_dir, use_new_frontend): self._test(*self.create_keras_global_avg_pooling1D_net(**params, ir_version=ir_version), - ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, use_old_api=use_old_api, + ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, use_new_frontend=use_new_frontend, **params) diff --git a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_global_average_pooling2D.py b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_global_average_pooling2D.py index 16d281c724751a..da02c36dfaf21f 100644 --- a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_global_average_pooling2D.py +++ b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_global_average_pooling2D.py @@ -34,7 +34,7 @@ def create_keras_global_avg_pooling2D_net(self, input_names, input_shapes, input @pytest.mark.nightly @pytest.mark.precommit def test_keras_global_avg_pooling2D_float32(self, params, ie_device, precision, temp_dir, - ir_version, use_old_api, use_new_frontend): + ir_version, use_new_frontend): self._test(*self.create_keras_global_avg_pooling2D_net(**params, ir_version=ir_version), - ie_device, precision, temp_dir=temp_dir, use_old_api=use_old_api, ir_version=ir_version, + ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, use_new_frontend=use_new_frontend, **params) diff --git a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_global_average_pooling3D.py b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_global_average_pooling3D.py index 8b104cb67cb767..f51a9650f3189f 100644 --- a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_global_average_pooling3D.py +++ b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_global_average_pooling3D.py @@ -34,7 +34,7 @@ def create_keras_global_avg_pooling3D_net(self, input_names, input_shapes, input @pytest.mark.nightly @pytest.mark.precommit def test_keras_global_avg_pooling3D_float32(self, params, ie_device, precision, temp_dir, - ir_version, use_old_api, use_new_frontend): + ir_version, use_new_frontend): self._test(*self.create_keras_global_avg_pooling3D_net(**params, ir_version=ir_version), - ie_device, precision, temp_dir=temp_dir, use_old_api=use_old_api, ir_version=ir_version, + ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, use_new_frontend=use_new_frontend, **params) diff --git a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_global_maxpool1D.py b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_global_maxpool1D.py index f2d7d2a6af268f..41c5110014b327 100644 --- a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_global_maxpool1D.py +++ b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_global_maxpool1D.py @@ -35,7 +35,7 @@ def create_keras_global_max_pooling1D_net(self, input_names, input_shapes, input @pytest.mark.nightly @pytest.mark.precommit def test_keras_global_max_pooling1D_float32(self, params, ie_device, precision, temp_dir, - ir_version, use_old_api, use_new_frontend): + ir_version, use_new_frontend): self._test(*self.create_keras_global_max_pooling1D_net(**params, ir_version=ir_version), - ie_device, precision, temp_dir=temp_dir, use_old_api=use_old_api, ir_version=ir_version, + ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, use_new_frontend=use_new_frontend, **params) diff --git a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_global_maxpool2D.py b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_global_maxpool2D.py index 2bbb1ac79b8b0d..77713c444f0c0f 100644 --- a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_global_maxpool2D.py +++ b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_global_maxpool2D.py @@ -35,7 +35,7 @@ def create_keras_global_maxpool2D_net(self, input_names, input_shapes, input_typ @pytest.mark.nightly @pytest.mark.precommit def test_keras_global_max_pooling2D_float32(self, params, ie_device, precision, temp_dir, - ir_version, use_old_api, use_new_frontend): + ir_version, use_new_frontend): self._test(*self.create_keras_global_maxpool2D_net(**params, ir_version=ir_version), - ie_device, precision, temp_dir=temp_dir, use_old_api=use_old_api, ir_version=ir_version, + ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, use_new_frontend=use_new_frontend, **params) diff --git a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_global_maxpool3D.py b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_global_maxpool3D.py index cdbe86e148ac2e..b300717689942c 100644 --- a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_global_maxpool3D.py +++ b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_global_maxpool3D.py @@ -34,7 +34,7 @@ def create_keras_global_maxpool3D_net(self, input_names, input_shapes, input_typ @pytest.mark.nightly @pytest.mark.precommit def test_keras_global_maxpool3D_float32(self, params, ie_device, precision, temp_dir, - ir_version, use_old_api, use_new_frontend): + ir_version, use_new_frontend): self._test(*self.create_keras_global_maxpool3D_net(**params, ir_version=ir_version), - ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, use_old_api=use_old_api, + ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, use_new_frontend=use_new_frontend, **params) diff --git a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_gru.py b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_gru.py index 9873e0774ff8ac..a6f933d90442f6 100644 --- a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_gru.py +++ b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_gru.py @@ -56,9 +56,9 @@ def create_keras_gru_net(self, input_names, input_shapes, input_type, units, act @pytest.mark.precommit @pytest.mark.precommit_tf_fe def test_keras_gru_with_bias_float32(self, params, ie_device, precision, temp_dir, ir_version, - use_old_api, use_new_frontend): + use_new_frontend): self._test(*self.create_keras_gru_net(**params, ir_version=ir_version), - ie_device, precision, temp_dir=temp_dir, use_old_api=use_old_api, ir_version=ir_version, + ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, use_new_frontend=use_new_frontend, **params) test_data_without_bias = [ @@ -77,9 +77,9 @@ def test_keras_gru_with_bias_float32(self, params, ie_device, precision, temp_di @pytest.mark.nightly @pytest.mark.precommit def test_keras_gru_without_bias_float32(self, params, ie_device, precision, temp_dir, - ir_version, use_old_api, use_new_frontend): + ir_version, use_new_frontend): self._test(*self.create_keras_gru_net(**params, ir_version=ir_version), - ie_device, precision, temp_dir=temp_dir, use_old_api=use_old_api, ir_version=ir_version, + ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, use_new_frontend=use_new_frontend, **params) test_data_different_flags = [ @@ -104,9 +104,9 @@ def test_keras_gru_without_bias_float32(self, params, ie_device, precision, temp @pytest.mark.nightly @pytest.mark.precommit def test_keras_gru_flags_float32(self, params, ie_device, precision, temp_dir, ir_version, - use_old_api, use_new_frontend): + use_new_frontend): self._test(*self.create_keras_gru_net(**params, ir_version=ir_version), - ie_device, precision, temp_dir=temp_dir, use_old_api=use_old_api, ir_version=ir_version, + ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, use_new_frontend=use_new_frontend, **params) test_data_zero_recurrent_dropout = [ @@ -129,8 +129,8 @@ def test_keras_gru_flags_float32(self, params, ie_device, precision, temp_dir, i @pytest.mark.precommit @pytest.mark.xfail(reason="50176") def test_keras_gru_flags_zero_recurrent_dropout_float32(self, params, ie_device, precision, - temp_dir, ir_version, use_old_api, + temp_dir, ir_version, use_new_frontend): self._test(*self.create_keras_gru_net(**params, ir_version=ir_version), - ie_device, precision, temp_dir=temp_dir, use_old_api=use_old_api, ir_version=ir_version, + ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, use_new_frontend=use_new_frontend, **params) diff --git a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_gru_cell.py b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_gru_cell.py index 926ec56b7018a7..882b8c3177c579 100644 --- a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_gru_cell.py +++ b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_gru_cell.py @@ -49,8 +49,8 @@ def create_keras_grucell_net(self, input_names, input_shapes, input_type, units, @pytest.mark.nightly @pytest.mark.precommit @pytest.mark.xfail(reason="49537") - def test_keras_grucell_float32(self, params, ie_device, precision, temp_dir, ir_version, use_old_api, + def test_keras_grucell_float32(self, params, ie_device, precision, temp_dir, ir_version, use_new_frontend): self._test(*self.create_keras_grucell_net(**params, ir_version=ir_version), - ie_device, precision, temp_dir=temp_dir, use_old_api=use_old_api, ir_version=ir_version, + ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, use_new_frontend=use_new_frontend, **params) diff --git a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_lambda.py b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_lambda.py index c7003da26bf58c..3dd12f1bac93ce 100644 --- a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_lambda.py +++ b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_lambda.py @@ -79,8 +79,8 @@ def create_keras_lambda_net(self, input_names, input_shapes, input_type, lmbd, e @pytest.mark.parametrize("params", test_data_float32) @pytest.mark.nightly @pytest.mark.precommit - def test_keras_lambda_float32(self, params, ie_device, precision, temp_dir, ir_version, use_old_api, + def test_keras_lambda_float32(self, params, ie_device, precision, temp_dir, ir_version, use_new_frontend): self._test(*self.create_keras_lambda_net(**params, ir_version=ir_version), ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, - use_old_api=use_old_api, use_new_frontend=use_new_frontend, **params) + use_new_frontend=use_new_frontend, **params) diff --git a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_layer_normalization.py b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_layer_normalization.py index 29422fac686231..41b17b7cae5266 100644 --- a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_layer_normalization.py +++ b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_layer_normalization.py @@ -32,10 +32,10 @@ def create_keras_lnorm_net(self, input_names, input_shapes, input_type, axis, ep @pytest.mark.parametrize("params", test_data_float32_precommit) @pytest.mark.precommit - def test_keras_dense_float32(self, params, ie_device, precision, temp_dir, ir_version, use_old_api, + def test_keras_dense_float32(self, params, ie_device, precision, temp_dir, ir_version, use_new_frontend): self._test(*self.create_keras_lnorm_net(**params, ir_version=ir_version), - ie_device, precision, temp_dir=temp_dir, use_old_api=use_old_api, ir_version=ir_version, + ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, use_new_frontend=use_new_frontend, **params) test_data_float32 = [ @@ -53,7 +53,7 @@ def test_keras_dense_float32(self, params, ie_device, precision, temp_dir, ir_ve @pytest.mark.parametrize("params", test_data_float32) @pytest.mark.nightly - def test_keras_dense_float32(self, params, ie_device, precision, temp_dir, ir_version, use_old_api): + def test_keras_dense_float32(self, params, ie_device, precision, temp_dir, ir_version): self._test(*self.create_keras_lnorm_net(**params, ir_version=ir_version), - ie_device, precision, temp_dir=temp_dir, use_old_api=use_old_api, ir_version=ir_version, + ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, **params) diff --git a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_leakyrelu.py b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_leakyrelu.py index 882e60007ba08f..d1ae4af22db139 100644 --- a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_leakyrelu.py +++ b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_leakyrelu.py @@ -33,7 +33,7 @@ def create_keras_leaky_relu_net(self, input_names, input_shapes, input_type, alp @pytest.mark.nightly @pytest.mark.precommit def test_keras_leaky_relu_float32(self, params, ie_device, precision, temp_dir, ir_version, - use_old_api, use_new_frontend): + use_new_frontend): self._test(*self.create_keras_leaky_relu_net(**params, ir_version=ir_version), - ie_device, precision, temp_dir=temp_dir, use_old_api=use_old_api, ir_version=ir_version, + ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, use_new_frontend=use_new_frontend, **params) diff --git a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_locally_connected1D.py b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_locally_connected1D.py index b7f020977c9aec..46dad065a9fae9 100644 --- a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_locally_connected1D.py +++ b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_locally_connected1D.py @@ -49,9 +49,9 @@ def create_keras_locally_connected1D_net(self, input_names, input_shapes, input_ @pytest.mark.nightly @pytest.mark.precommit def test_keras_locally_connected1D_float32(self, params, ie_device, precision, temp_dir, - ir_version, use_old_api, use_new_frontend): + ir_version, use_new_frontend): self._test(*self.create_keras_locally_connected1D_net(**params, ir_version=ir_version), - ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, use_old_api=use_old_api, + ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, use_new_frontend=use_new_frontend, **params) test_data_simple_channels_first = [ @@ -73,8 +73,8 @@ def test_keras_locally_connected1D_float32(self, params, ie_device, precision, t @pytest.mark.nightly @pytest.mark.precommit def test_keras_locally_connected1D_channels_first_float32(self, params, ie_device, precision, - temp_dir, ir_version, use_old_api, + temp_dir, ir_version, use_new_frontend): self._test(*self.create_keras_locally_connected1D_net(**params, ir_version=ir_version), - ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, use_old_api=use_old_api, + ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, use_new_frontend=use_new_frontend, **params) diff --git a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_locally_connected2D.py b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_locally_connected2D.py index 20ac93aa3589a7..a2f8681a11390c 100644 --- a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_locally_connected2D.py +++ b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_locally_connected2D.py @@ -49,9 +49,9 @@ def create_keras_locally_connected2D_net(self, input_names, input_shapes, input_ @pytest.mark.nightly @pytest.mark.precommit def test_keras_locally_connected2D_float32(self, params, ie_device, precision, temp_dir, - ir_version, use_old_api, use_new_frontend): + ir_version, use_new_frontend): self._test(*self.create_keras_locally_connected2D_net(**params, ir_version=ir_version), - ie_device, precision, temp_dir=temp_dir, use_old_api=use_old_api, ir_version=ir_version, + ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, use_new_frontend=use_new_frontend, **params) test_data_simple_channels_first = [ @@ -73,8 +73,8 @@ def test_keras_locally_connected2D_float32(self, params, ie_device, precision, t @pytest.mark.nightly @pytest.mark.precommit def test_keras_locally_connected2D_channels_first_float32(self, params, ie_device, precision, - temp_dir, ir_version, use_old_api, + temp_dir, ir_version, use_new_frontend): self._test(*self.create_keras_locally_connected2D_net(**params, ir_version=ir_version), - ie_device, precision, temp_dir=temp_dir, use_old_api=use_old_api, ir_version=ir_version, + ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, use_new_frontend=use_new_frontend, **params) diff --git a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_lstm.py b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_lstm.py index 85508d95b29163..ab128b0f3bf6da 100644 --- a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_lstm.py +++ b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_lstm.py @@ -56,9 +56,9 @@ def create_keras_lstm_net(self, input_names, input_shapes, input_type, units, ac @pytest.mark.precommit @pytest.mark.precommit_tf_fe def test_keras_lstm_with_bias_float32(self, params, ie_device, precision, temp_dir, ir_version, - use_old_api, use_new_frontend): + use_new_frontend): self._test(*self.create_keras_lstm_net(**params, ir_version=ir_version), - ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, use_old_api=use_old_api, + ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, use_new_frontend=use_new_frontend, **params) test_data_without_bias = [ @@ -79,9 +79,9 @@ def test_keras_lstm_with_bias_float32(self, params, ie_device, precision, temp_d @pytest.mark.nightly @pytest.mark.precommit def test_keras_lstm_without_bias_float32(self, params, ie_device, precision, temp_dir, - ir_version, use_old_api, use_new_frontend): + ir_version, use_new_frontend): self._test(*self.create_keras_lstm_net(**params, ir_version=ir_version), - ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, use_old_api=use_old_api, + ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, use_new_frontend=use_new_frontend, **params) test_data_different_flags = [ @@ -100,7 +100,7 @@ def test_keras_lstm_without_bias_float32(self, params, ie_device, precision, tem @pytest.mark.nightly @pytest.mark.precommit def test_keras_lstm_flags_float32(self, params, ie_device, precision, temp_dir, ir_version, - use_old_api, use_new_frontend): + use_new_frontend): self._test(*self.create_keras_lstm_net(**params, ir_version=ir_version), - ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, use_old_api=use_old_api, + ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, use_new_frontend=use_new_frontend, **params) diff --git a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_lstm_cell.py b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_lstm_cell.py index 0141a4c5f5f8dc..a89d042c5c753a 100644 --- a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_lstm_cell.py +++ b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_lstm_cell.py @@ -54,7 +54,7 @@ def create_keras_lstmcell_net(self, input_names, input_shapes, input_type, units @pytest.mark.precommit @pytest.mark.xfail(reason="49537") def test_keras_lstmcell_float32(self, params, ie_device, precision, temp_dir, ir_version, - use_old_api, use_new_frontend): + use_new_frontend): self._test(*self.create_keras_lstmcell_net(**params, ir_version=ir_version), - ie_device, precision, temp_dir=temp_dir, use_old_api=use_old_api, ir_version=ir_version, + ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, use_new_frontend=use_new_frontend, **params) diff --git a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_masking.py b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_masking.py index e38663be7c2cdd..277620799b8615 100644 --- a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_masking.py +++ b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_masking.py @@ -36,8 +36,8 @@ def create_keras_masking_net(self, input_names, input_shapes, input_type, mask_v @pytest.mark.nightly @pytest.mark.precommit @pytest.mark.xfail(reason="49567") - def test_keras_masking_float32(self, params, ie_device, precision, temp_dir, ir_version, use_old_api, + def test_keras_masking_float32(self, params, ie_device, precision, temp_dir, ir_version, use_new_frontend): self._test(*self.create_keras_masking_net(**params, ir_version=ir_version), - ie_device, precision, temp_dir=temp_dir, use_old_api=use_old_api, ir_version=ir_version, + ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, use_new_frontend=use_new_frontend, **params) diff --git a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_maximum.py b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_maximum.py index e96974703d1b00..4e42c93d43284f 100644 --- a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_maximum.py +++ b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_maximum.py @@ -91,10 +91,10 @@ def create_keras_maximum_net(self, input_names, input_shapes, input_type, ir_ver @pytest.mark.parametrize("params", test_data_float32_precommit) @pytest.mark.precommit - def test_keras_maximum_float32(self, params, ie_device, precision, ir_version, temp_dir, use_old_api, + def test_keras_maximum_float32(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend): self._test(*self.create_keras_maximum_net(**params, ir_version=ir_version), - ie_device, precision, temp_dir=temp_dir, use_old_api=use_old_api, ir_version=ir_version, + ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, use_new_frontend=use_new_frontend, **params) test_data_float32 = [dict(input_names=["x1", "x2"], input_shapes=[[5, 4], [5, 4]], @@ -109,10 +109,10 @@ def test_keras_maximum_float32(self, params, ie_device, precision, ir_version, t @pytest.mark.parametrize("params", test_data_float32) @pytest.mark.nightly - def test_keras_maximum_float32(self, params, ie_device, precision, ir_version, temp_dir, use_old_api, + def test_keras_maximum_float32(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend): self._test(*self.create_keras_maximum_net(**params, ir_version=ir_version), - ie_device, precision, temp_dir=temp_dir, use_old_api=use_old_api, ir_version=ir_version, + ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, use_new_frontend=use_new_frontend, **params) test_data_float32_several_inputs_precommit = [ @@ -123,9 +123,9 @@ def test_keras_maximum_float32(self, params, ie_device, precision, ir_version, t @pytest.mark.parametrize("params", test_data_float32_several_inputs_precommit) @pytest.mark.precommit def test_keras_maximum_float32_several_inputs(self, params, ie_device, precision, ir_version, - temp_dir, use_old_api, use_new_frontend): + temp_dir, use_new_frontend): self._test(*self.create_keras_maximum_net(**params, ir_version=ir_version), - ie_device, precision, temp_dir=temp_dir, use_old_api=use_old_api, ir_version=ir_version, + ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, use_new_frontend=use_new_frontend, **params) test_data_float32_several_inputs = [ @@ -145,7 +145,7 @@ def test_keras_maximum_float32_several_inputs(self, params, ie_device, precision @pytest.mark.parametrize("params", test_data_float32_several_inputs) @pytest.mark.nightly def test_keras_maximum_float32_several_inputs(self, params, ie_device, precision, ir_version, - temp_dir, use_old_api, use_new_frontend): + temp_dir, use_new_frontend): self._test(*self.create_keras_maximum_net(**params, ir_version=ir_version), - ie_device, precision, temp_dir=temp_dir, use_old_api=use_old_api, ir_version=ir_version, + ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, use_new_frontend=use_new_frontend, **params) diff --git a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_maxpool1D.py b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_maxpool1D.py index 96de3e0f8ae29e..8b2dce3181b260 100644 --- a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_maxpool1D.py +++ b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_maxpool1D.py @@ -44,9 +44,9 @@ def create_keras_maxpool1D_net(self, input_names, input_shapes, input_type, pool @pytest.mark.nightly @pytest.mark.precommit def test_keras_maxpool1D_pool_strides_float32(self, params, ie_device, precision, temp_dir, - ir_version, use_old_api, use_new_frontend): + ir_version, use_new_frontend): self._test(*self.create_keras_maxpool1D_net(**params, ir_version=ir_version), - ie_device, precision, temp_dir=temp_dir, use_old_api=use_old_api, ir_version=ir_version, + ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, use_new_frontend=use_new_frontend, **params) test_data_p_dformat_float32 = [ @@ -64,7 +64,7 @@ def test_keras_maxpool1D_pool_strides_float32(self, params, ie_device, precision @pytest.mark.nightly @pytest.mark.precommit def test_keras_maxpool1D_padding_and_data_format(self, params, ie_device, precision, temp_dir, - ir_version, use_old_api, use_new_frontend): + ir_version, use_new_frontend): self._test(*self.create_keras_maxpool1D_net(**params, ir_version=ir_version), - ie_device, precision, temp_dir=temp_dir, use_old_api=use_old_api, ir_version=ir_version, + ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, use_new_frontend=use_new_frontend, **params) diff --git a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_maxpool2D.py b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_maxpool2D.py index 3273f443a09178..392d5d8faa0602 100644 --- a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_maxpool2D.py +++ b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_maxpool2D.py @@ -49,7 +49,7 @@ def create_keras_maxpool2D_net(self, input_names, input_shapes, input_type, pool @pytest.mark.nightly @pytest.mark.precommit def test_keras_maxpool2D_pool_strides_float32(self, params, ie_device, precision, temp_dir, - ir_version, use_old_api, use_new_frontend): + ir_version, use_new_frontend): self._test(*self.create_keras_maxpool2D_net(**params, ir_version=ir_version), - ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, use_old_api=use_old_api, + ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, use_new_frontend=use_new_frontend, **params) diff --git a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_maxpool3D.py b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_maxpool3D.py index 868b2849e7a908..978a6a322fbbec 100644 --- a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_maxpool3D.py +++ b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_maxpool3D.py @@ -45,9 +45,9 @@ def create_keras_maxpool3D_net(self, input_names, input_shapes, input_type, pool @pytest.mark.nightly @pytest.mark.precommit def test_keras_maxpool3D_pool_strides_float32(self, params, ie_device, precision, temp_dir, - ir_version, use_old_api, use_new_frontend): + ir_version, use_new_frontend): self._test(*self.create_keras_maxpool3D_net(**params, ir_version=ir_version), - ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, use_old_api=use_old_api, + ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, use_new_frontend=use_new_frontend, **params) test_data_p_dformat_float32 = [ @@ -65,7 +65,7 @@ def test_keras_maxpool3D_pool_strides_float32(self, params, ie_device, precision @pytest.mark.nightly @pytest.mark.precommit def test_keras_maxpool3D_padding_and_data_format(self, params, ie_device, precision, temp_dir, - ir_version, use_old_api, use_new_frontend): + ir_version, use_new_frontend): self._test(*self.create_keras_maxpool3D_net(**params, ir_version=ir_version), - ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, use_old_api=use_old_api, + ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, use_new_frontend=use_new_frontend, **params) diff --git a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_minimum.py b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_minimum.py index fb72259f6f1610..5bfd831246353c 100644 --- a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_minimum.py +++ b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_minimum.py @@ -91,10 +91,10 @@ def create_keras_minimum_net(self, input_names, input_shapes, input_type, ir_ver @pytest.mark.parametrize("params", test_data_float32_precommit) @pytest.mark.precommit - def test_keras_minimum_float32(self, params, ie_device, precision, ir_version, temp_dir, use_old_api, + def test_keras_minimum_float32(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend): self._test(*self.create_keras_minimum_net(**params, ir_version=ir_version), ie_device, - precision, temp_dir=temp_dir, use_old_api=use_old_api, ir_version=ir_version, + precision, temp_dir=temp_dir, ir_version=ir_version, use_new_frontend=use_new_frontend, **params) test_data_float32 = [dict(input_names=["x1", "x2"], input_shapes=[[5, 4], [5, 4]], @@ -109,10 +109,10 @@ def test_keras_minimum_float32(self, params, ie_device, precision, ir_version, t @pytest.mark.parametrize("params", test_data_float32) @pytest.mark.nightly - def test_keras_minimum_float32(self, params, ie_device, precision, ir_version, temp_dir, use_old_api, + def test_keras_minimum_float32(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend): self._test(*self.create_keras_minimum_net(**params, ir_version=ir_version), ie_device, - precision, temp_dir=temp_dir, use_old_api=use_old_api, ir_version=ir_version, + precision, temp_dir=temp_dir, ir_version=ir_version, use_new_frontend=use_new_frontend, **params) test_data_float32_several_inputs_precommit = [ @@ -123,9 +123,9 @@ def test_keras_minimum_float32(self, params, ie_device, precision, ir_version, t @pytest.mark.parametrize("params", test_data_float32_several_inputs_precommit) @pytest.mark.precommit def test_keras_minimum_float32_several_inputs(self, params, ie_device, precision, ir_version, - temp_dir, use_old_api): + temp_dir): self._test(*self.create_keras_minimum_net(**params, ir_version=ir_version), - ie_device, precision, temp_dir=temp_dir, use_old_api=use_old_api, ir_version=ir_version, + ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, **params) test_data_float32_several_inputs = [ @@ -145,7 +145,7 @@ def test_keras_minimum_float32_several_inputs(self, params, ie_device, precision @pytest.mark.parametrize("params", test_data_float32_several_inputs) @pytest.mark.nightly def test_keras_minimum_float32_several_inputs(self, params, ie_device, precision, ir_version, - temp_dir, use_old_api, use_new_frontend): + temp_dir, use_new_frontend): self._test(*self.create_keras_minimum_net(**params, ir_version=ir_version), - ie_device, precision, temp_dir=temp_dir, use_old_api=use_old_api, ir_version=ir_version, + ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, use_new_frontend=use_new_frontend, **params) diff --git a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_multiheadattention.py b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_multiheadattention.py index 4c41df9b5547c7..a682004167adc4 100644 --- a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_multiheadattention.py +++ b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_multiheadattention.py @@ -80,9 +80,9 @@ def create_keras_multiheadattention_net(self, @pytest.mark.parametrize("params", test_data) @pytest.mark.precommit def test_keras_multiheadattention(self, params, ie_device, precision, ir_version, temp_dir, - use_old_api, use_new_frontend): + use_new_frontend): self._test(*self.create_keras_multiheadattention_net(**params, ir_version=ir_version), - ie_device, precision, temp_dir=temp_dir, use_old_api=use_old_api, ir_version=ir_version, + ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, use_new_frontend=use_new_frontend, **params) # Tests to cover no bias cases @@ -108,7 +108,7 @@ def test_keras_multiheadattention(self, params, ie_device, precision, ir_version @pytest.mark.parametrize("params", test_data_no_bias) @pytest.mark.nightly def test_keras_multiheadattention_no_bias(self, params, ie_device, precision, ir_version, - temp_dir, use_old_api, use_new_frontend): + temp_dir, use_new_frontend): self._test(*self.create_keras_multiheadattention_net(**params, ir_version=ir_version), - ie_device, precision, temp_dir=temp_dir, use_old_api=use_old_api, ir_version=ir_version, + ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, use_new_frontend=use_new_frontend, **params) diff --git a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_multiply.py b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_multiply.py index e00d5abc114d14..8ce780e7b859ae 100644 --- a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_multiply.py +++ b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_multiply.py @@ -92,9 +92,9 @@ def create_keras_multiply_net(self, input_names, input_shapes, input_type, ir_ve @pytest.mark.parametrize("params", test_data_float32_precommit) @pytest.mark.precommit def test_keras_multiply_float32(self, params, ie_device, precision, ir_version, temp_dir, - use_old_api, use_new_frontend): + use_new_frontend): self._test(*self.create_keras_multiply_net(**params, ir_version=ir_version), - ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, use_old_api=use_old_api, + ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, use_new_frontend=use_new_frontend, **params) test_data_float32 = [dict(input_names=["x1", "x2"], input_shapes=[[5, 4], [5, 4]], @@ -110,9 +110,9 @@ def test_keras_multiply_float32(self, params, ie_device, precision, ir_version, @pytest.mark.parametrize("params", test_data_float32) @pytest.mark.nightly def test_keras_multiply_float32(self, params, ie_device, precision, ir_version, temp_dir, - use_old_api, use_new_frontend): + use_new_frontend): self._test(*self.create_keras_multiply_net(**params, ir_version=ir_version), - ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, use_old_api=use_old_api, + ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, use_new_frontend=use_new_frontend, **params) test_data_float32_several_inputs_precommit = [ @@ -123,9 +123,9 @@ def test_keras_multiply_float32(self, params, ie_device, precision, ir_version, @pytest.mark.parametrize("params", test_data_float32_several_inputs_precommit) @pytest.mark.precommit def test_keras_multiply_float32_several_inputs(self, params, ie_device, precision, ir_version, - temp_dir, use_old_api, use_new_frontend): + temp_dir, use_new_frontend): self._test(*self.create_keras_multiply_net(**params, ir_version=ir_version), - ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, use_old_api=use_old_api, + ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, use_new_frontend=use_new_frontend, **params) test_data_float32_several_inputs = [dict(input_names=["x1", "x2", "x3"], @@ -146,7 +146,7 @@ def test_keras_multiply_float32_several_inputs(self, params, ie_device, precisio @pytest.mark.parametrize("params", test_data_float32_several_inputs) @pytest.mark.nightly def test_keras_multiply_float32_several_inputs(self, params, ie_device, precision, ir_version, - temp_dir, use_old_api, use_new_frontend): + temp_dir, use_new_frontend): self._test(*self.create_keras_multiply_net(**params, ir_version=ir_version), - ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, use_old_api=use_old_api, + ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, use_new_frontend=use_new_frontend, **params) diff --git a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_permute.py b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_permute.py index eacac6be7b84ee..c5888a6613e23c 100644 --- a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_permute.py +++ b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_permute.py @@ -30,8 +30,8 @@ def create_keras_permute_net(self, input_names, input_shapes, input_type, dims, @pytest.mark.parametrize("params", test_data_float32) @pytest.mark.nightly @pytest.mark.precommit - def test_keras_permute_float32(self, params, ie_device, precision, ir_version, temp_dir, use_old_api, + def test_keras_permute_float32(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend): self._test(*self.create_keras_permute_net(**params, ir_version=ir_version), - ie_device, precision, temp_dir=temp_dir, use_old_api=use_old_api, ir_version=ir_version, + ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, use_new_frontend=use_new_frontend, **params) diff --git a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_prelu.py b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_prelu.py index d970b9a404d361..cfe77263a83ba3 100644 --- a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_prelu.py +++ b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_prelu.py @@ -27,10 +27,10 @@ def create_keras_prelu_net(self, input_names, input_shapes, input_type, shared_a @pytest.mark.parametrize("params", test_data_float32_precommit) @pytest.mark.precommit - def test_keras_prelu_float32(self, params, ie_device, precision, ir_version, temp_dir, use_old_api, + def test_keras_prelu_float32(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend): self._test(*self.create_keras_prelu_net(**params, ir_version=ir_version), - ie_device, precision, temp_dir=temp_dir, use_old_api=use_old_api, ir_version=ir_version, + ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, use_new_frontend=use_new_frontend, **params) test_data_float32 = [ @@ -43,10 +43,10 @@ def test_keras_prelu_float32(self, params, ie_device, precision, ir_version, tem @pytest.mark.parametrize("params", test_data_float32) @pytest.mark.nightly - def test_keras_prelu_float32(self, params, ie_device, precision, ir_version, temp_dir, use_old_api, + def test_keras_prelu_float32(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend): self._test(*self.create_keras_prelu_net(**params, ir_version=ir_version), - ie_device, precision, temp_dir=temp_dir, use_old_api=use_old_api, ir_version=ir_version, + ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, use_new_frontend=use_new_frontend, **params) test_data_float32_shared_axes = [ @@ -60,7 +60,7 @@ def test_keras_prelu_float32(self, params, ie_device, precision, ir_version, tem @pytest.mark.parametrize("params", test_data_float32_shared_axes) @pytest.mark.nightly def test_keras_prelu_float32_shared_axes(self, params, ie_device, precision, ir_version, - temp_dir, use_old_api, use_new_frontend): + temp_dir, use_new_frontend): self._test(*self.create_keras_prelu_net(**params, ir_version=ir_version), - ie_device, precision, temp_dir=temp_dir, use_old_api=use_old_api, ir_version=ir_version, + ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, use_new_frontend=use_new_frontend, **params) diff --git a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_relu.py b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_relu.py index 78d09cc9894c27..62968e870be8ed 100644 --- a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_relu.py +++ b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_relu.py @@ -53,10 +53,10 @@ def create_keras_relu_net(self, input_names, input_shapes, input_type, ir_versio @pytest.mark.parametrize("params", test_data_float32_precommit) @pytest.mark.precommit - def test_keras_relu_float32(self, params, ie_device, precision, ir_version, temp_dir, use_old_api, + def test_keras_relu_float32(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend): self._test(*self.create_keras_relu_net(**params, ir_version=ir_version), - ie_device, precision, temp_dir=temp_dir, use_old_api=use_old_api, ir_version=ir_version, + ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, use_new_frontend=use_new_frontend, **params) test_data_float32 = [dict(input_names=["x1"], input_shapes=[[5, 4]], @@ -70,8 +70,8 @@ def test_keras_relu_float32(self, params, ie_device, precision, ir_version, temp @pytest.mark.parametrize("params", test_data_float32) @pytest.mark.nightly - def test_keras_relu_float32(self, params, ie_device, precision, ir_version, temp_dir, use_old_api, + def test_keras_relu_float32(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend): self._test(*self.create_keras_relu_net(**params, ir_version=ir_version), - ie_device, precision, temp_dir=temp_dir, use_old_api=use_old_api, ir_version=ir_version, + ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, use_new_frontend=use_new_frontend, **params) diff --git a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_repeatvector.py b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_repeatvector.py index 7bc4c241bc8241..fab54d3019c6ca 100644 --- a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_repeatvector.py +++ b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_repeatvector.py @@ -29,8 +29,8 @@ def create_keras_repeatvector_net(self, input_names, input_shapes, input_type, n @pytest.mark.parametrize("params", test_data) @pytest.mark.nightly @pytest.mark.precommit - def test_keras_repeatvector(self, params, ie_device, precision, ir_version, temp_dir, use_old_api, + def test_keras_repeatvector(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend): self._test(*self.create_keras_repeatvector_net(**params, ir_version=ir_version), - ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, use_old_api=use_old_api, + ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, use_new_frontend=use_new_frontend, **params) diff --git a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_reshape.py b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_reshape.py index 7f30641670cdfc..a11ddfb721d617 100644 --- a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_reshape.py +++ b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_reshape.py @@ -33,8 +33,8 @@ def create_keras_reshape_net(self, input_names, input_shapes, input_type, target @pytest.mark.parametrize("params", test_data) @pytest.mark.nightly @pytest.mark.precommit - def test_keras_reshape(self, params, ie_device, precision, ir_version, temp_dir, use_old_api, + def test_keras_reshape(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend): self._test(*self.create_keras_reshape_net(**params, ir_version=ir_version), - ie_device, precision, temp_dir=temp_dir, use_old_api=use_old_api, ir_version=ir_version, + ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, use_new_frontend=use_new_frontend, **params) diff --git a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_rnn.py b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_rnn.py index cc97eff7d24be6..0b16732de7fe8f 100644 --- a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_rnn.py +++ b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_rnn.py @@ -59,10 +59,10 @@ def create_keras_rnn_net(self, input_names, input_shapes, @pytest.mark.nightly @pytest.mark.precommit @pytest.mark.precommit_tf_fe - def test_keras_rnn(self, params, ie_device, precision, ir_version, temp_dir, use_old_api, + def test_keras_rnn(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend): self._test(*self.create_keras_rnn_net(**params, ir_version=ir_version), - ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, use_old_api=use_old_api, + ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, use_new_frontend=use_new_frontend, **params) # Tests for default parameter values @@ -82,9 +82,9 @@ def test_keras_rnn(self, params, ie_device, precision, ir_version, temp_dir, use @pytest.mark.parametrize("params", test_data_multiple_outputs) @pytest.mark.nightly def test_keras_rnn_multiple_outputs(self, params, ie_device, precision, ir_version, temp_dir, - use_old_api, use_new_frontend): + use_new_frontend): self._test(*self.create_keras_rnn_net(**params, ir_version=ir_version), - ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, use_old_api=use_old_api, + ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, use_new_frontend=use_new_frontend, **params) # Tests for other attributes: go_backward and time_major @@ -103,8 +103,8 @@ def test_keras_rnn_multiple_outputs(self, params, ie_device, precision, ir_versi @pytest.mark.parametrize("params", test_data_others) @pytest.mark.nightly - def test_keras_rnn_others(self, params, ie_device, precision, ir_version, temp_dir, use_old_api, + def test_keras_rnn_others(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend): self._test(*self.create_keras_rnn_net(**params, ir_version=ir_version), - ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, use_old_api=use_old_api, + ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, use_new_frontend=use_new_frontend, **params) diff --git a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_roll.py b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_roll.py index 8dfc80ad6d6fcb..a0425517fa491b 100644 --- a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_roll.py +++ b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_roll.py @@ -40,10 +40,10 @@ def create_keras_roll_net(self, shift, axis, input_names, input_shapes, input_ty @pytest.mark.parametrize("params", test_data) @pytest.mark.nightly - def test_keras_roll(self, params, ie_device, precision, ir_version, temp_dir, use_old_api, + def test_keras_roll(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend): if ie_device == 'GPU': pytest.skip("Roll is not supported on GPU") self._test(*self.create_keras_roll_net(**params, ir_version=ir_version), ie_device, - precision, temp_dir=temp_dir, ir_version=ir_version, use_old_api=use_old_api, + precision, temp_dir=temp_dir, ir_version=ir_version, use_new_frontend=use_new_frontend, **params) diff --git a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_separableconv1d.py b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_separableconv1d.py index 32e2bd8eae54f2..50a19cd6b136f9 100644 --- a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_separableconv1d.py +++ b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_separableconv1d.py @@ -46,10 +46,10 @@ def create_keras_separableconv1d_net(self, input_names, input_shapes, input_type @pytest.mark.parametrize("params", test_data) @pytest.mark.nightly @pytest.mark.precommit - def test_keras_separableconv1d(self, params, ie_device, precision, ir_version, temp_dir, use_old_api, + def test_keras_separableconv1d(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend): self._test(*self.create_keras_separableconv1d_net(**params, ir_version=ir_version), - ie_device, precision, temp_dir=temp_dir, use_old_api=use_old_api, ir_version=ir_version, + ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, use_new_frontend=use_new_frontend, **params) # Tests for different activations @@ -80,10 +80,10 @@ def test_keras_separableconv1d(self, params, ie_device, precision, ir_version, t @pytest.mark.parametrize("params", test_data_different_activations) @pytest.mark.nightly def test_keras_separableconv1d_different_activations(self, params, ie_device, precision, - ir_version, temp_dir, use_old_api, + ir_version, temp_dir, use_new_frontend): self._test(*self.create_keras_separableconv1d_net(**params, ir_version=ir_version), - ie_device, precision, temp_dir=temp_dir, use_old_api=use_old_api, ir_version=ir_version, + ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, use_new_frontend=use_new_frontend, **params) # Tests for different padding @@ -102,9 +102,9 @@ def test_keras_separableconv1d_different_activations(self, params, ie_device, pr @pytest.mark.parametrize("params", test_data_different_padding) @pytest.mark.nightly def test_keras_separableconv1d_different_padding(self, params, ie_device, precision, ir_version, - temp_dir, use_old_api, use_new_frontend): + temp_dir, use_new_frontend): self._test(*self.create_keras_separableconv1d_net(**params, ir_version=ir_version), - ie_device, precision, temp_dir=temp_dir, use_old_api=use_old_api, ir_version=ir_version, + ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, use_new_frontend=use_new_frontend, **params) # Tests for different bias @@ -120,7 +120,7 @@ def test_keras_separableconv1d_different_padding(self, params, ie_device, precis @pytest.mark.parametrize("params", test_data_different_bias) @pytest.mark.nightly def test_keras_separableconv1d_different_bias(self, params, ie_device, precision, ir_version, - temp_dir, use_old_api, use_new_frontend): + temp_dir, use_new_frontend): self._test(*self.create_keras_separableconv1d_net(**params, ir_version=ir_version), - ie_device, precision, temp_dir=temp_dir, use_old_api=use_old_api, ir_version=ir_version, + ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, use_new_frontend=use_new_frontend, **params) diff --git a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_separableconv2d.py b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_separableconv2d.py index e3ca9236997550..7aa848dd913ee1 100644 --- a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_separableconv2d.py +++ b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_separableconv2d.py @@ -49,10 +49,10 @@ def create_keras_separableconv2d_net(self, input_names, input_shapes, input_type @pytest.mark.parametrize("params", test_data) @pytest.mark.nightly @pytest.mark.precommit - def test_keras_separableconv2d(self, params, ie_device, precision, ir_version, temp_dir, use_old_api, + def test_keras_separableconv2d(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend): self._test(*self.create_keras_separableconv2d_net(**params, ir_version=ir_version), - ie_device, precision, temp_dir=temp_dir, use_old_api=use_old_api, ir_version=ir_version, + ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, use_new_frontend=use_new_frontend, **params) # Tests for different activations @@ -85,10 +85,10 @@ def test_keras_separableconv2d(self, params, ie_device, precision, ir_version, t @pytest.mark.parametrize("params", test_data_different_activations) @pytest.mark.nightly def test_keras_separableconv2d_different_activations(self, params, ie_device, precision, - ir_version, temp_dir, use_old_api, + ir_version, temp_dir, use_new_frontend): self._test(*self.create_keras_separableconv2d_net(**params, ir_version=ir_version), - ie_device, precision, temp_dir=temp_dir, use_old_api=use_old_api, ir_version=ir_version, + ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, use_new_frontend=use_new_frontend, **params) # Tests for different padding @@ -104,9 +104,9 @@ def test_keras_separableconv2d_different_activations(self, params, ie_device, pr @pytest.mark.parametrize("params", test_data_different_padding) @pytest.mark.nightly def test_keras_separableconv2d_different_padding(self, params, ie_device, precision, ir_version, - temp_dir, use_old_api, use_new_frontend): + temp_dir, use_new_frontend): self._test(*self.create_keras_separableconv2d_net(**params, ir_version=ir_version), - ie_device, precision, temp_dir=temp_dir, use_old_api=use_old_api, ir_version=ir_version, + ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, use_new_frontend=use_new_frontend, **params) # Tests for different bias @@ -123,7 +123,7 @@ def test_keras_separableconv2d_different_padding(self, params, ie_device, precis @pytest.mark.parametrize("params", test_data_different_bias) @pytest.mark.nightly def test_keras_separableconv2d_different_bias(self, params, ie_device, precision, ir_version, - temp_dir, use_old_api, use_new_frontend): + temp_dir, use_new_frontend): self._test(*self.create_keras_separableconv2d_net(**params, ir_version=ir_version), - ie_device, precision, temp_dir=temp_dir, use_old_api=use_old_api, ir_version=ir_version, + ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, use_new_frontend=use_new_frontend, **params) diff --git a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_simplernn.py b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_simplernn.py index 9382199d9a81bb..5e95a1a8cfd923 100644 --- a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_simplernn.py +++ b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_simplernn.py @@ -57,9 +57,9 @@ def create_keras_simplernn_net(self, input_names, input_shapes, input_type, @pytest.mark.precommit @pytest.mark.precommit_tf_fe def test_keras_simplernn_different_activations(self, params, ie_device, precision, ir_version, - temp_dir, use_old_api, use_new_frontend): + temp_dir, use_new_frontend): self._test(*self.create_keras_simplernn_net(**params, ir_version=ir_version), - ie_device, precision, temp_dir=temp_dir, use_old_api=use_old_api, ir_version=ir_version, + ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, use_new_frontend=use_new_frontend, **params) # Tests for RNN with dropout @@ -77,9 +77,9 @@ def test_keras_simplernn_different_activations(self, params, ie_device, precisio @pytest.mark.parametrize("params", test_data_dropout) @pytest.mark.nightly def test_keras_simplernn_dropout(self, params, ie_device, precision, ir_version, temp_dir, - use_old_api, use_new_frontend): + use_new_frontend): self._test(*self.create_keras_simplernn_net(**params, ir_version=ir_version), - ie_device, precision, temp_dir=temp_dir, use_old_api=use_old_api, ir_version=ir_version, + ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, use_new_frontend=use_new_frontend, **params) # Tests for RNN with other attributes @@ -96,10 +96,10 @@ def test_keras_simplernn_dropout(self, params, ie_device, precision, ir_version, @pytest.mark.parametrize("params", test_data_other) @pytest.mark.nightly - def test_keras_simplernn_other(self, params, ie_device, precision, ir_version, temp_dir, use_old_api, + def test_keras_simplernn_other(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend): self._test(*self.create_keras_simplernn_net(**params, ir_version=ir_version), - ie_device, precision, temp_dir=temp_dir, use_old_api=use_old_api, ir_version=ir_version, + ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, use_new_frontend=use_new_frontend, **params) # Tests for RNN with multiple outputs @@ -125,7 +125,7 @@ def test_keras_simplernn_other(self, params, ie_device, precision, ir_version, t @pytest.mark.parametrize("params", test_data_multipleoutput) @pytest.mark.nightly def test_keras_simplernn_test_data_multipleoutput(self, params, ie_device, precision, - ir_version, temp_dir, use_old_api, use_new_frontend): + ir_version, temp_dir, use_new_frontend): self._test(*self.create_keras_simplernn_net(**params, ir_version=ir_version), - ie_device, precision, temp_dir=temp_dir, use_old_api=use_old_api, ir_version=ir_version, + ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, use_new_frontend=use_new_frontend, **params) diff --git a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_softmax.py b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_softmax.py index 8ff8754e37327d..dfa5e9125d5e31 100644 --- a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_softmax.py +++ b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_softmax.py @@ -53,10 +53,10 @@ def create_keras_softmax_net(self, input_names, input_shapes, input_type, ir_ver @pytest.mark.parametrize("params", test_data_float32_precommit) @pytest.mark.precommit - def test_keras_softmax_float32(self, params, ie_device, precision, ir_version, temp_dir, use_old_api, + def test_keras_softmax_float32(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend): self._test(*self.create_keras_softmax_net(**params, ir_version=ir_version), - ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, use_old_api=use_old_api, + ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, use_new_frontend=use_new_frontend, **params) test_data_float32 = [dict(input_names=["x1"], input_shapes=[[5, 4]], @@ -70,8 +70,8 @@ def test_keras_softmax_float32(self, params, ie_device, precision, ir_version, t @pytest.mark.parametrize("params", test_data_float32) @pytest.mark.nightly - def test_keras_softmax_float32(self, params, ie_device, precision, ir_version, temp_dir, use_old_api, + def test_keras_softmax_float32(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend): self._test(*self.create_keras_softmax_net(**params, ir_version=ir_version), - ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, use_old_api=use_old_api, + ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, use_new_frontend=use_new_frontend, **params) diff --git a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_softplus.py b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_softplus.py index 44740b93dcd2cf..d1bf48aa06bb73 100644 --- a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_softplus.py +++ b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_softplus.py @@ -55,9 +55,9 @@ def create_keras_softplus_net(self, input_names, input_shapes, input_type, ir_ve @pytest.mark.precommit @pytest.mark.xfail(reason="49516") def test_keras_softplus_float32(self, params, ie_device, precision, ir_version, temp_dir, - use_old_api, use_new_frontend): + use_new_frontend): self._test(*self.create_keras_softplus_net(**params, ir_version=ir_version), - ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, use_old_api=use_old_api, + ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, use_new_frontend=use_new_frontend, **params) test_data_float32 = [dict(input_names=["x1"], input_shapes=[[5]], input_type=tf.float32), @@ -73,7 +73,7 @@ def test_keras_softplus_float32(self, params, ie_device, precision, ir_version, @pytest.mark.precommit @pytest.mark.xfail(reason="49516") def test_keras_softplus_float32(self, params, ie_device, precision, ir_version, temp_dir, - use_old_api, use_new_frontend): + use_new_frontend): self._test(*self.create_keras_softplus_net(**params, ir_version=ir_version), - ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, use_old_api=use_old_api, + ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, use_new_frontend=use_new_frontend, **params) diff --git a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_spatialdropout1d.py b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_spatialdropout1d.py index 2909995c27a803..95f0d3c92c7961 100644 --- a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_spatialdropout1d.py +++ b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_spatialdropout1d.py @@ -31,7 +31,7 @@ def create_keras_spatialdropout1d_net(self, input_names, input_shapes, input_typ @pytest.mark.nightly @pytest.mark.precommit def test_keras_spatialdropout1d(self, params, ie_device, precision, ir_version, temp_dir, - use_old_api, use_new_frontend): + use_new_frontend): self._test(*self.create_keras_spatialdropout1d_net(**params, ir_version=ir_version), - ie_device, precision, temp_dir=temp_dir, use_old_api=use_old_api, ir_version=ir_version, + ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, use_new_frontend=use_new_frontend, **params) diff --git a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_spatialdropout2d.py b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_spatialdropout2d.py index 9b0d2ccab6b0ad..b18b4286f61eff 100644 --- a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_spatialdropout2d.py +++ b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_spatialdropout2d.py @@ -34,9 +34,9 @@ def create_keras_spatialdropout2d_net(self, input_names, input_shapes, input_typ @pytest.mark.nightly @pytest.mark.precommit def test_keras_spatialdropout2d(self, params, ie_device, precision, ir_version, temp_dir, - use_old_api, use_new_frontend): + use_new_frontend): self._test(*self.create_keras_spatialdropout2d_net(**params, ir_version=ir_version), - ie_device, precision, temp_dir=temp_dir, use_old_api=use_old_api, ir_version=ir_version, + ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, use_new_frontend=use_new_frontend, **params) test_data_channels_first = [ @@ -51,7 +51,7 @@ def test_keras_spatialdropout2d(self, params, ie_device, precision, ir_version, @pytest.mark.parametrize("params", test_data_channels_first) @pytest.mark.nightly def test_keras_spatialdropout2d_channels_first(self, params, ie_device, precision, ir_version, - temp_dir, use_old_api, use_new_frontend): + temp_dir, use_new_frontend): self._test(*self.create_keras_spatialdropout2d_net(**params, ir_version=ir_version), - ie_device, precision, temp_dir=temp_dir, use_old_api=use_old_api, ir_version=ir_version, + ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, use_new_frontend=use_new_frontend, **params) diff --git a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_spatialdropout3d.py b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_spatialdropout3d.py index 254bdcd8a85832..3a5a993cc7c98d 100644 --- a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_spatialdropout3d.py +++ b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_spatialdropout3d.py @@ -34,9 +34,9 @@ def create_keras_spatialdropout3d_net(self, input_names, input_shapes, input_typ @pytest.mark.nightly @pytest.mark.precommit def test_keras_spatialdropout3d(self, params, ie_device, precision, ir_version, temp_dir, - use_old_api, use_new_frontend): + use_new_frontend): self._test(*self.create_keras_spatialdropout3d_net(**params, ir_version=ir_version), - ie_device, precision, temp_dir=temp_dir, use_old_api=use_old_api, ir_version=ir_version, + ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, use_new_frontend=use_new_frontend, **params) test_data_channels_first = [ @@ -51,7 +51,7 @@ def test_keras_spatialdropout3d(self, params, ie_device, precision, ir_version, @pytest.mark.parametrize("params", test_data_channels_first) @pytest.mark.nightly def test_keras_spatialdropout3d_channels_first(self, params, ie_device, precision, ir_version, - temp_dir, use_old_api, use_new_frontend): + temp_dir, use_new_frontend): self._test(*self.create_keras_spatialdropout3d_net(**params, ir_version=ir_version), - ie_device, precision, temp_dir=temp_dir, use_old_api=use_old_api, ir_version=ir_version, + ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, use_new_frontend=use_new_frontend, **params) diff --git a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_stackedrnncells.py b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_stackedrnncells.py index b1bebba598e373..6e748a5bcc61d2 100644 --- a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_stackedrnncells.py +++ b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_stackedrnncells.py @@ -41,8 +41,8 @@ def create_keras_stackedrnncells_net(self, input_names, input_shapes, input_type @pytest.mark.nightly @pytest.mark.precommit @pytest.mark.precommit_tf_fe - def test_keras_stackedrnncells(self, params, ie_device, precision, ir_version, temp_dir, use_old_api, + def test_keras_stackedrnncells(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend): self._test(*self.create_keras_stackedrnncells_net(**params, ir_version=ir_version), - ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, use_old_api=use_old_api, + ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, use_new_frontend=use_new_frontend, **params) diff --git a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_subtract.py b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_subtract.py index a3662289571c52..4df702d4e797e1 100644 --- a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_subtract.py +++ b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_subtract.py @@ -62,9 +62,9 @@ def create_keras_subtract_net(self, input_names, input_shapes, input_type, ir_ve @pytest.mark.parametrize("params", test_data_float32_precommit) @pytest.mark.precommit def test_keras_subtract_float32(self, params, ie_device, precision, ir_version, temp_dir, - use_old_api, use_new_frontend): + use_new_frontend): self._test(*self.create_keras_subtract_net(**params, ir_version=ir_version), - ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, use_old_api=use_old_api, + ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, use_new_frontend=use_new_frontend, **params) test_data_float32 = [dict(input_names=["x1", "x2"], input_shapes=[[5, 4], [5, 4]], @@ -80,7 +80,7 @@ def test_keras_subtract_float32(self, params, ie_device, precision, ir_version, @pytest.mark.parametrize("params", test_data_float32) @pytest.mark.nightly def test_keras_subtract_float32(self, params, ie_device, precision, ir_version, temp_dir, - use_old_api, use_new_frontend): + use_new_frontend): self._test(*self.create_keras_subtract_net(**params, ir_version=ir_version), - ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, use_old_api=use_old_api, + ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, use_new_frontend=use_new_frontend, **params) diff --git a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_swish.py b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_swish.py index 8f79c69a7e91fc..8d9c7e3e01c1a4 100644 --- a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_swish.py +++ b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_swish.py @@ -53,10 +53,10 @@ def create_keras_swish_net(self, input_names, input_shapes, input_type, ir_versi @pytest.mark.parametrize("params", test_data_float32_precommit) @pytest.mark.precommit - def test_keras_swish_float32(self, params, ie_device, precision, ir_version, temp_dir, use_old_api, + def test_keras_swish_float32(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend): self._test(*self.create_keras_swish_net(**params, ir_version=ir_version), - ie_device, precision, temp_dir=temp_dir, use_old_api=use_old_api, ir_version=ir_version, + ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, use_new_frontend=use_new_frontend, **params) test_data_float32 = [dict(input_names=["x1"], input_shapes=[[5]], input_type=tf.float32), @@ -69,8 +69,8 @@ def test_keras_swish_float32(self, params, ie_device, precision, ir_version, tem @pytest.mark.parametrize("params", test_data_float32) @pytest.mark.nightly - def test_keras_swish_float32(self, params, ie_device, precision, ir_version, temp_dir, use_old_api, + def test_keras_swish_float32(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend): self._test(*self.create_keras_swish_net(**params, ir_version=ir_version), - ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, use_old_api=use_old_api, + ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, use_new_frontend=use_new_frontend, **params) diff --git a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_thresholdedrelu.py b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_thresholdedrelu.py index 8bcca2e7de368c..b56f1798221f58 100644 --- a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_thresholdedrelu.py +++ b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_thresholdedrelu.py @@ -28,9 +28,9 @@ def create_keras_thresholdedrelu_net(self, input_names, input_shapes, input_type @pytest.mark.parametrize("params", test_data_precommit) @pytest.mark.precommit def test_keras_thresholdedrelu_float32(self, params, ie_device, precision, ir_version, temp_dir, - use_old_api, use_new_frontend): + use_new_frontend): self._test(*self.create_keras_thresholdedrelu_net(**params, ir_version=ir_version), - ie_device, precision, temp_dir=temp_dir, use_old_api=use_old_api, ir_version=ir_version, + ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, use_new_frontend=use_new_frontend, **params) test_data = [ @@ -43,7 +43,7 @@ def test_keras_thresholdedrelu_float32(self, params, ie_device, precision, ir_ve @pytest.mark.parametrize("params", test_data) @pytest.mark.nightly def test_keras_thresholdedrelu_float32(self, params, ie_device, precision, ir_version, temp_dir, - use_old_api, use_new_frontend): + use_new_frontend): self._test(*self.create_keras_thresholdedrelu_net(**params, ir_version=ir_version), - ie_device, precision, temp_dir=temp_dir, use_old_api=use_old_api, ir_version=ir_version, + ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, use_new_frontend=use_new_frontend, **params) diff --git a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_timedistributed.py b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_timedistributed.py index 51e0687b9665d8..eca28857bf9662 100644 --- a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_timedistributed.py +++ b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_timedistributed.py @@ -36,8 +36,8 @@ def create_keras_timedistributed_net(self, input_names, input_shapes, input_type @pytest.mark.parametrize("params", test_data) @pytest.mark.nightly @pytest.mark.precommit - def test_keras_timedistributed(self, params, ie_device, precision, ir_version, temp_dir, use_old_api, + def test_keras_timedistributed(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend): self._test(*self.create_keras_timedistributed_net(**params, ir_version=ir_version), - ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, use_old_api=use_old_api, + ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, use_new_frontend=use_new_frontend, **params) diff --git a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_upsampling1d.py b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_upsampling1d.py index c35f4e0e672189..96c0b2bed07129 100644 --- a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_upsampling1d.py +++ b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_upsampling1d.py @@ -32,7 +32,7 @@ def create_keras_upsampling1d_net(self, input_names, input_shapes, input_type, s @pytest.mark.nightly @pytest.mark.precommit def test_keras_upsampling1d_float32(self, params, ie_device, precision, ir_version, temp_dir, - use_old_api, use_new_frontend): + use_new_frontend): self._test(*self.create_keras_upsampling1d_net(**params, ir_version=ir_version), - ie_device, precision, temp_dir=temp_dir, use_old_api=use_old_api, ir_version=ir_version, + ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, use_new_frontend=use_new_frontend, **params) diff --git a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_upsampling2d.py b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_upsampling2d.py index 79e2e5c714bc6d..bd5dc0bd09c251 100644 --- a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_upsampling2d.py +++ b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_upsampling2d.py @@ -53,8 +53,8 @@ def create_keras_upsampling2d_net(self, input_shapes, input_type, size, @pytest.mark.nightly def test_keras_upsampling2d_nearest(self, params, input_type, data_format, interpolation, ie_device, precision, ir_version, temp_dir, - use_old_api, use_new_frontend): + use_new_frontend): self._test(*self.create_keras_upsampling2d_net(**params, input_type=input_type, data_format=data_format, interpolation=interpolation, ir_version=ir_version), - ie_device, precision, temp_dir=temp_dir, use_old_api=use_old_api, ir_version=ir_version, + ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, use_new_frontend=use_new_frontend, **params) diff --git a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_upsampling3d.py b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_upsampling3d.py index 529afceb32fc77..9a6298cea886a7 100644 --- a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_upsampling3d.py +++ b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_upsampling3d.py @@ -34,10 +34,10 @@ def create_keras_upsampling3d_net(self, input_names, input_shapes, input_type, s @pytest.mark.parametrize("params", test_data) @pytest.mark.nightly @pytest.mark.precommit - def test_keras_upsampling3(self, params, ie_device, precision, ir_version, temp_dir, use_old_api, + def test_keras_upsampling3(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend): self._test(*self.create_keras_upsampling3d_net(**params, ir_version=ir_version), - ie_device, precision, temp_dir=temp_dir, use_old_api=use_old_api, ir_version=ir_version, + ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, use_new_frontend=use_new_frontend, **params) test_data_channels_first = [ @@ -54,7 +54,7 @@ def test_keras_upsampling3(self, params, ie_device, precision, ir_version, temp_ @pytest.mark.parametrize("params", test_data_channels_first) @pytest.mark.nightly def test_keras_upsampling2d_channels_first(self, params, ie_device, precision, ir_version, - temp_dir, use_old_api, use_new_frontend): + temp_dir, use_new_frontend): self._test(*self.create_keras_upsampling3d_net(**params, ir_version=ir_version), - ie_device, precision, temp_dir=temp_dir, use_old_api=use_old_api, ir_version=ir_version, + ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, use_new_frontend=use_new_frontend, **params) diff --git a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_zeropadding1d.py b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_zeropadding1d.py index 77845b9cef502e..4947c3833683bb 100644 --- a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_zeropadding1d.py +++ b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_zeropadding1d.py @@ -30,8 +30,8 @@ def create_keras_zeropadding1d_net(self, input_names, input_shapes, input_type, @pytest.mark.parametrize("params", test_data) @pytest.mark.nightly @pytest.mark.precommit - def test_keras_zeropadding1d(self, params, ie_device, precision, ir_version, temp_dir, use_old_api, + def test_keras_zeropadding1d(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend): self._test(*self.create_keras_zeropadding1d_net(**params, ir_version=ir_version), - ie_device, precision, temp_dir=temp_dir, use_old_api=use_old_api, ir_version=ir_version, + ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, use_new_frontend=use_new_frontend, **params) diff --git a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_zeropadding2d.py b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_zeropadding2d.py index 429595bb9243d9..9654c123c90f4e 100644 --- a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_zeropadding2d.py +++ b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_zeropadding2d.py @@ -34,9 +34,9 @@ def create_keras_zeropadding2d_net(self, input_names, input_shapes, input_type, @pytest.mark.nightly @pytest.mark.precommit def test_keras_zeropadding2d_channels_last(self, params, ie_device, precision, ir_version, - temp_dir, use_old_api): + temp_dir): self._test(*self.create_keras_zeropadding2d_net(**params, ir_version=ir_version), - ie_device, precision, temp_dir=temp_dir, use_old_api=use_old_api, ir_version=ir_version, + ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, **params) test_data_channels_first = [ @@ -51,7 +51,7 @@ def test_keras_zeropadding2d_channels_last(self, params, ie_device, precision, i @pytest.mark.parametrize("params", test_data_channels_first) @pytest.mark.nightly def test_keras_zeropadding2d_channels_first(self, params, ie_device, precision, ir_version, - temp_dir, use_old_api, use_new_frontend): + temp_dir, use_new_frontend): self._test(*self.create_keras_zeropadding2d_net(**params, ir_version=ir_version), - ie_device, precision, temp_dir=temp_dir, use_old_api=use_old_api, ir_version=ir_version, + ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, use_new_frontend=use_new_frontend, **params) diff --git a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_zeropadding3d.py b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_zeropadding3d.py index 7fb85a40b1d67e..45de7962bfeca8 100644 --- a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_zeropadding3d.py +++ b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_zeropadding3d.py @@ -34,9 +34,9 @@ def create_keras_zeropadding3d_net(self, input_names, input_shapes, input_type, @pytest.mark.nightly @pytest.mark.precommit def test_keras_zeropadding3d_channels_last(self, params, ie_device, precision, ir_version, - temp_dir, use_old_api, use_new_frontend): + temp_dir, use_new_frontend): self._test(*self.create_keras_zeropadding3d_net(**params, ir_version=ir_version), - ie_device, precision, temp_dir=temp_dir, use_old_api=use_old_api, ir_version=ir_version, + ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, use_new_frontend=use_new_frontend, **params) test_data_channels_first = [ @@ -51,7 +51,7 @@ def test_keras_zeropadding3d_channels_last(self, params, ie_device, precision, i @pytest.mark.parametrize("params", test_data_channels_first) @pytest.mark.nightly def test_keras_zeropadding3d_channels_first(self, params, ie_device, precision, ir_version, - temp_dir, use_old_api, use_new_frontend): + temp_dir, use_new_frontend): self._test(*self.create_keras_zeropadding3d_net(**params, ir_version=ir_version), - ie_device, precision, temp_dir=temp_dir, use_old_api=use_old_api, ir_version=ir_version, + ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, use_new_frontend=use_new_frontend, **params) diff --git a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_map_fn.py b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_map_fn.py index ee540a13afc4d9..04dd3731d23385 100644 --- a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_map_fn.py +++ b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_map_fn.py @@ -53,9 +53,9 @@ def create_map_fn_net(self, fn, input_type, fn_output_signature, back_prop, @pytest.mark.parametrize("params", test_basic) @pytest.mark.precommit @pytest.mark.nightly - def test_basic(self, params, ie_device, precision, ir_version, temp_dir, use_old_api, use_new_frontend): + def test_basic(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend): self._test(*self.create_map_fn_net(**params, ir_version=ir_version), ie_device, precision, - temp_dir=temp_dir, ir_version=ir_version, use_old_api=use_old_api, use_new_frontend=use_new_frontend, + temp_dir=temp_dir, ir_version=ir_version, use_new_frontend=use_new_frontend, **params) test_multiple_inputs = [ @@ -71,9 +71,9 @@ def test_basic(self, params, ie_device, precision, ir_version, temp_dir, use_old @pytest.mark.parametrize("params", test_multiple_inputs) @pytest.mark.nightly - def test_multiple_inputs(self, params, ie_device, precision, ir_version, temp_dir, use_old_api, use_new_frontend): + def test_multiple_inputs(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend): self._test(*self.create_map_fn_net(**params, ir_version=ir_version), ie_device, precision, - temp_dir=temp_dir, ir_version=ir_version, use_old_api=use_old_api, use_new_frontend=use_new_frontend, + temp_dir=temp_dir, ir_version=ir_version, use_new_frontend=use_new_frontend, **params) test_multiple_outputs = [ @@ -91,9 +91,9 @@ def test_multiple_inputs(self, params, ie_device, precision, ir_version, temp_di @pytest.mark.parametrize("params", test_multiple_outputs) @pytest.mark.nightly - def test_multiple_outputs(self, params, ie_device, precision, ir_version, temp_dir, use_old_api, use_new_frontend): + def test_multiple_outputs(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend): self._test(*self.create_map_fn_net(**params, ir_version=ir_version), ie_device, precision, - temp_dir=temp_dir, ir_version=ir_version, use_old_api=use_old_api, use_new_frontend=use_new_frontend, + temp_dir=temp_dir, ir_version=ir_version, use_new_frontend=use_new_frontend, **params) test_multiple_inputs_outputs_int32 = [ @@ -113,7 +113,7 @@ def test_multiple_outputs(self, params, ie_device, precision, ir_version, temp_d @pytest.mark.parametrize("params", test_multiple_inputs_outputs_int32) @pytest.mark.nightly def test_multiple_inputs_outputs_int32(self, params, ie_device, precision, ir_version, temp_dir, - use_old_api, use_new_frontend): + use_new_frontend): self._test(*self.create_map_fn_net(**params, ir_version=ir_version), ie_device, precision, - temp_dir=temp_dir, ir_version=ir_version, use_old_api=use_old_api, use_new_frontend=use_new_frontend, + temp_dir=temp_dir, ir_version=ir_version, use_new_frontend=use_new_frontend, **params) diff --git a/tests/layer_tests/tensorflow_tests/test_tf_Add.py b/tests/layer_tests/tensorflow_tests/test_tf_Add.py index 27e740c4efa89f..7e6f2b54da9578 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_Add.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_Add.py @@ -64,11 +64,11 @@ def create_add_placeholder_const_net(self, x_shape, y_shape, ir_version, use_new @pytest.mark.parametrize("params", test_data_1D) @pytest.mark.nightly def test_add_placeholder_const_1D(self, params, ie_device, precision, ir_version, temp_dir, - use_new_frontend, use_old_api): + use_new_frontend): self._test(*self.create_add_placeholder_const_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) test_data_2D = [ dict(x_shape=[1, 1], y_shape=[1, 1]), @@ -81,11 +81,11 @@ def test_add_placeholder_const_1D(self, params, ie_device, precision, ir_version @pytest.mark.parametrize("params", test_data_2D) @pytest.mark.nightly def test_add_placeholder_const_2D(self, params, ie_device, precision, ir_version, temp_dir, - use_new_frontend, use_old_api): + use_new_frontend): self._test(*self.create_add_placeholder_const_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) test_data_3D = [ dict(x_shape=[1, 1, 1], y_shape=[1, 1, 1]), @@ -101,11 +101,11 @@ def test_add_placeholder_const_2D(self, params, ie_device, precision, ir_version @pytest.mark.parametrize("params", test_data_3D) @pytest.mark.nightly def test_add_placeholder_const_3D(self, params, ie_device, precision, ir_version, temp_dir, - use_new_frontend, use_old_api): + use_new_frontend): self._test(*self.create_add_placeholder_const_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) test_data_4D = [ dict(x_shape=[1, 1, 1, 1], y_shape=[1, 1, 1, 1]), @@ -119,11 +119,11 @@ def test_add_placeholder_const_3D(self, params, ie_device, precision, ir_version @pytest.mark.parametrize("params", test_data_4D) @pytest.mark.nightly def test_add_placeholder_const_4D(self, params, ie_device, precision, ir_version, temp_dir, - use_new_frontend, use_old_api): + use_new_frontend): self._test(*self.create_add_placeholder_const_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) test_data_5D = [ dict(x_shape=[1, 1, 1, 1, 1], y_shape=[1, 1, 1, 1, 1]), @@ -137,11 +137,11 @@ def test_add_placeholder_const_4D(self, params, ie_device, precision, ir_version @pytest.mark.parametrize("params", test_data_5D) @pytest.mark.nightly def test_add_placeholder_const_5D(self, params, ie_device, precision, ir_version, temp_dir, - use_new_frontend, use_old_api): + use_new_frontend): self._test(*self.create_add_placeholder_const_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend), ie_device, precision, ir_version=ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) ############################################################################################### # # @@ -156,11 +156,11 @@ def test_add_placeholder_const_5D(self, params, ie_device, precision, ir_version @pytest.mark.parametrize("params", test_data_broadcast_1D) @pytest.mark.nightly def test_add_placeholder_const_broadcast_1D(self, params, ie_device, precision, ir_version, - temp_dir, use_new_frontend, use_old_api): + temp_dir, use_new_frontend): self._test(*self.create_add_placeholder_const_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend), ie_device, precision, ir_version=ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) test_data_broadcast_2D = [ dict(x_shape=[1, 1], y_shape=[1]), @@ -174,11 +174,11 @@ def test_add_placeholder_const_broadcast_1D(self, params, ie_device, precision, @pytest.mark.parametrize("params", test_data_broadcast_2D) @pytest.mark.nightly def test_add_placeholder_const_broadcast_2D(self, params, ie_device, precision, ir_version, - temp_dir, use_new_frontend, use_old_api): + temp_dir, use_new_frontend): self._test(*self.create_add_placeholder_const_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend), ie_device, precision, ir_version=ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) test_data_broadcast_3D = [ dict(x_shape=[1, 1, 1], y_shape=[1]), @@ -199,11 +199,11 @@ def test_add_placeholder_const_broadcast_2D(self, params, ie_device, precision, @pytest.mark.parametrize("params", test_data_broadcast_3D) @pytest.mark.nightly def test_add_placeholder_const_broadcast_3D(self, params, ie_device, precision, ir_version, - temp_dir, use_new_frontend, use_old_api): + temp_dir, use_new_frontend): self._test(*self.create_add_placeholder_const_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend), ie_device, precision, ir_version=ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) test_data_broadcast_4D = [ dict(x_shape=[1, 1, 1, 1], y_shape=[1]), @@ -222,11 +222,11 @@ def test_add_placeholder_const_broadcast_3D(self, params, ie_device, precision, @pytest.mark.nightly @pytest.mark.precommit def test_add_placeholder_const_broadcast_4D(self, params, ie_device, precision, ir_version, - temp_dir, use_new_frontend, use_old_api): + temp_dir, use_new_frontend): self._test(*self.create_add_placeholder_const_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend), ie_device, precision, ir_version=ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) test_data_broadcast_5D = [ dict(x_shape=[1, 1, 1, 1, 1], y_shape=[1]), @@ -244,10 +244,9 @@ def test_add_placeholder_const_broadcast_4D(self, params, ie_device, precision, @pytest.mark.nightly @pytest.mark.precommit def test_add_placeholder_const_broadcast_5D(self, params, ie_device, precision, ir_version, - temp_dir, use_new_frontend, use_old_api): + temp_dir, use_new_frontend): # we do not perform transpose in the test in case of new frontend self._test(*self.create_add_placeholder_const_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend), ie_device, precision, - ir_version=ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend, - use_old_api=use_old_api) + ir_version=ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend) diff --git a/tests/layer_tests/tensorflow_tests/test_tf_AddN.py b/tests/layer_tests/tensorflow_tests/test_tf_AddN.py index e387e4dea558e5..4ca3d51e41a0e5 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_AddN.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_AddN.py @@ -61,8 +61,8 @@ def create_addn_placeholder_const_net(self, input_shapes, ir_version, use_new_fr @pytest.mark.parametrize("params", test_data) @pytest.mark.nightly def test_addn_placeholder_const(self, params, ie_device, precision, ir_version, temp_dir, - use_new_frontend, use_old_api): + use_new_frontend): self._test(*self.create_addn_placeholder_const_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) diff --git a/tests/layer_tests/tensorflow_tests/test_tf_AddTypes.py b/tests/layer_tests/tensorflow_tests/test_tf_AddTypes.py index c8706528bf2e88..b0f08f1de26f67 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_AddTypes.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_AddTypes.py @@ -50,7 +50,7 @@ def create_add_types_net(self, const_shape, input_type): @pytest.mark.precommit_tf_fe @pytest.mark.nightly def test_add_types(self, const_shape, input_type, ie_device, precision, ir_version, temp_dir, - use_new_frontend, use_old_api): + use_new_frontend): self._test(*self.create_add_types_net(const_shape, input_type), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) diff --git a/tests/layer_tests/tensorflow_tests/test_tf_AdjustContrastv2.py b/tests/layer_tests/tensorflow_tests/test_tf_AdjustContrastv2.py index 896e1789111eaa..1193fcd7752b46 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_AdjustContrastv2.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_AdjustContrastv2.py @@ -43,7 +43,7 @@ def create_adjust_contrast_net(self, input_shape, input_type): @pytest.mark.xfail(condition=platform.system() == 'Darwin' and platform.machine() == 'arm64', reason='Ticket - 122716') def test_adjust_contrast_basic(self, params, ie_device, precision, ir_version, temp_dir, - use_new_frontend, use_old_api): + use_new_frontend): self._test(*self.create_adjust_contrast_net(**params), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) diff --git a/tests/layer_tests/tensorflow_tests/test_tf_ArgMinMax.py b/tests/layer_tests/tensorflow_tests/test_tf_ArgMinMax.py index da60c1ef21a79e..39fb2c62fc63b4 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_ArgMinMax.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_ArgMinMax.py @@ -57,8 +57,8 @@ def create_argmin_max_net(self, input_shape, dimension, input_type, output_type, 'arm64', 'ARM64'], reason='Ticket - 126314') def test_argmin_max_net(self, params, input_type, output_type, op_type, ie_device, precision, ir_version, temp_dir, - use_new_frontend, use_old_api): + use_new_frontend): self._test(*self.create_argmin_max_net(**params, input_type=input_type, output_type=output_type, op_type=op_type), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) diff --git a/tests/layer_tests/tensorflow_tests/test_tf_Atan2.py b/tests/layer_tests/tensorflow_tests/test_tf_Atan2.py index 4c5ed9e3a3bded..b8ac7a9ec291b0 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_Atan2.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_Atan2.py @@ -40,7 +40,7 @@ def create_atan2_net(self, input_shape, input_type): @pytest.mark.precommit_tf_fe @pytest.mark.nightly def test_atan2_basic(self, params, ie_device, precision, ir_version, temp_dir, - use_new_frontend, use_old_api): + use_new_frontend): self._test(*self.create_atan2_net(**params), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) diff --git a/tests/layer_tests/tensorflow_tests/test_tf_BatchToSpace.py b/tests/layer_tests/tensorflow_tests/test_tf_BatchToSpace.py index 0d1b2c5d4f05d9..3f59e4d0896496 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_BatchToSpace.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_BatchToSpace.py @@ -33,10 +33,10 @@ def create_batch_to_space_net(self, in_shape, crops_value, block_shape_value): @pytest.mark.precommit_tf_fe @pytest.mark.nightly def test_batch_to_space_basic(self, params, ie_device, precision, ir_version, temp_dir, - use_new_frontend, use_old_api): + use_new_frontend): self._test(*self.create_batch_to_space_net(**params), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) test_data_4D = [ dict(in_shape=[4, 1, 1, 3], block_shape_value=[2, 2], crops_value=[[0, 0], [0, 0]]), @@ -48,10 +48,10 @@ def test_batch_to_space_basic(self, params, ie_device, precision, ir_version, te @pytest.mark.parametrize("params", test_data_4D) @pytest.mark.nightly def test_batch_to_space_4D(self, params, ie_device, precision, ir_version, temp_dir, - use_new_frontend, use_old_api): + use_new_frontend): self._test(*self.create_batch_to_space_net(**params), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) test_data_5D = [ dict(in_shape=[144, 2, 1, 4, 1], block_shape_value=[3, 4, 2, 2], @@ -61,7 +61,7 @@ def test_batch_to_space_4D(self, params, ie_device, precision, ir_version, temp_ @pytest.mark.parametrize("params", test_data_5D) @pytest.mark.nightly def test_batch_to_space_5D(self, params, ie_device, precision, ir_version, temp_dir, - use_new_frontend, use_old_api): + use_new_frontend): self._test(*self.create_batch_to_space_net(**params), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) diff --git a/tests/layer_tests/tensorflow_tests/test_tf_BatchToSpaceND.py b/tests/layer_tests/tensorflow_tests/test_tf_BatchToSpaceND.py index eed7b2918ce122..06a7931626982a 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_BatchToSpaceND.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_BatchToSpaceND.py @@ -29,7 +29,7 @@ def create_batch_to_space_nd_net(self, input_shape, block_shape, crops): @pytest.mark.precommit_tf_fe @pytest.mark.nightly def test_batch_to_space_nd_basic(self, params, ie_device, precision, ir_version, temp_dir, - use_new_frontend, use_old_api): + use_new_frontend): self._test(*self.create_batch_to_space_nd_net(**params), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) diff --git a/tests/layer_tests/tensorflow_tests/test_tf_BiasAdd.py b/tests/layer_tests/tensorflow_tests/test_tf_BiasAdd.py index 41c505ac0f9585..72009334d2ffd9 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_BiasAdd.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_BiasAdd.py @@ -98,20 +98,20 @@ def create_bias_add_2_consts_net(self, shape, ir_version, use_new_frontend, outp @pytest.mark.parametrize("params", test_data_2D) @pytest.mark.nightly def test_bias_add_placeholder_const_2D(self, params, ie_device, precision, ir_version, temp_dir, - use_new_frontend, use_old_api): + use_new_frontend): self._test(*self.create_bias_add_placeholder_const_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) @pytest.mark.parametrize("params", test_data_2D) @pytest.mark.nightly def test_bias_add_2_consts_2D(self, params, ie_device, precision, ir_version, temp_dir, - use_new_frontend, use_old_api): + use_new_frontend): self._test(*self.create_bias_add_2_consts_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) test_data_3D = [ pytest.param(dict(shape=[1, 1, 224]), marks=pytest.mark.xfail(reason="*-19053")), @@ -121,20 +121,20 @@ def test_bias_add_2_consts_2D(self, params, ie_device, precision, ir_version, te @pytest.mark.parametrize("params", test_data_3D) @pytest.mark.nightly def test_bias_add_placeholder_const_3D(self, params, ie_device, precision, ir_version, temp_dir, - use_new_frontend, use_old_api): + use_new_frontend): self._test(*self.create_bias_add_placeholder_const_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) @pytest.mark.parametrize("params", test_data_3D) @pytest.mark.nightly def test_bias_add_2_consts_3D(self, params, ie_device, precision, ir_version, temp_dir, - use_new_frontend, use_old_api): + use_new_frontend): self._test(*self.create_bias_add_2_consts_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) test_data_4D = [ dict(shape=[1, 1, 100, 224]), @@ -146,20 +146,20 @@ def test_bias_add_2_consts_3D(self, params, ie_device, precision, ir_version, te @pytest.mark.nightly @pytest.mark.precommit def test_bias_add_placeholder_const_4D(self, params, ie_device, precision, ir_version, temp_dir, - use_new_frontend, use_old_api): + use_new_frontend): self._test(*self.create_bias_add_placeholder_const_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) @pytest.mark.parametrize("params", test_data_4D) @pytest.mark.nightly def test_bias_add_2_consts_4D(self, params, ie_device, precision, ir_version, temp_dir, - use_new_frontend, use_old_api): + use_new_frontend): self._test(*self.create_bias_add_2_consts_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) test_data_5D = [ dict(shape=[1, 1, 50, 100, 224]), @@ -170,17 +170,17 @@ def test_bias_add_2_consts_4D(self, params, ie_device, precision, ir_version, te @pytest.mark.nightly @pytest.mark.precommit def test_bias_add_placeholder_const_5D(self, params, ie_device, precision, ir_version, temp_dir, - use_new_frontend, use_old_api): + use_new_frontend): self._test(*self.create_bias_add_placeholder_const_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) @pytest.mark.parametrize("params", test_data_5D) @pytest.mark.nightly def test_bias_add_2_consts_5D(self, params, ie_device, precision, ir_version, temp_dir, - use_new_frontend, use_old_api): + use_new_frontend): self._test(*self.create_bias_add_2_consts_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) diff --git a/tests/layer_tests/tensorflow_tests/test_tf_BinaryOps.py b/tests/layer_tests/tensorflow_tests/test_tf_BinaryOps.py index e542881944ce45..62ecc0161add54 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_BinaryOps.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_BinaryOps.py @@ -140,7 +140,7 @@ def create_add_placeholder_const_net(self, x_shape, y_shape, ir_version, op_type @pytest.mark.xfail(condition=platform.system() == 'Darwin' and platform.machine() == 'arm64', reason='Ticket - 122716') def test_binary_op(self, params, ie_device, precision, ir_version, temp_dir, op_type, - use_new_frontend, use_old_api): + use_new_frontend): if not use_new_frontend and op_type in ['BitwiseAnd', 'BitwiseOr', 'BitwiseXor']: pytest.skip("Bitwise ops are supported only by new TF FE.") if precision == "FP16": @@ -150,4 +150,4 @@ def test_binary_op(self, params, ie_device, precision, ir_version, temp_dir, op_ *self.create_add_placeholder_const_net(**params, ir_version=ir_version, op_type=op_type, use_new_frontend=use_new_frontend), ie_device, precision, - ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend, use_old_api=use_old_api) + ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend) diff --git a/tests/layer_tests/tensorflow_tests/test_tf_BroadcastArgs.py b/tests/layer_tests/tensorflow_tests/test_tf_BroadcastArgs.py index 360c8984ff6003..d7e3608d208395 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_BroadcastArgs.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_BroadcastArgs.py @@ -56,8 +56,7 @@ def create_broadcast_args_net(self, s0_shape, s1_shape, input_type): @pytest.mark.parametrize("params", test_data_basic) @pytest.mark.precommit_tf_fe @pytest.mark.nightly - def test_broadcast_args_basic(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend, - use_old_api): + def test_broadcast_args_basic(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend): self._test(*self.create_broadcast_args_net(**params), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) diff --git a/tests/layer_tests/tensorflow_tests/test_tf_BroadcastTo.py b/tests/layer_tests/tensorflow_tests/test_tf_BroadcastTo.py index 242b510638d147..535ccaeb8bd94c 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_BroadcastTo.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_BroadcastTo.py @@ -48,8 +48,8 @@ def create_broadcastto_placeholder_const_net(self, input_shape, broadcast_shape, @pytest.mark.precommit_tf_fe @pytest.mark.nightly def test_broadcastto_placeholder_const(self, params, ie_device, precision, ir_version, temp_dir, - use_new_frontend, use_old_api): + use_new_frontend): self._test(*self.create_broadcastto_placeholder_const_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) \ No newline at end of file + use_new_frontend=use_new_frontend) \ No newline at end of file diff --git a/tests/layer_tests/tensorflow_tests/test_tf_Bucketize.py b/tests/layer_tests/tensorflow_tests/test_tf_Bucketize.py index ade9c9ea264771..03ae2077011751 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_Bucketize.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_Bucketize.py @@ -45,7 +45,7 @@ def create_bucketize_net(self, input_shape, input_type, boundaries_size): @pytest.mark.xfail(platform.machine() in ["aarch64", "arm64", "ARM64"], reason='Ticket - 122716') def test_bucketize_basic(self, params, ie_device, precision, ir_version, temp_dir, - use_new_frontend, use_old_api): + use_new_frontend): self._test(*self.create_bucketize_net(**params), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) diff --git a/tests/layer_tests/tensorflow_tests/test_tf_CTCGreedyDecoder.py b/tests/layer_tests/tensorflow_tests/test_tf_CTCGreedyDecoder.py index 580202eefd862b..aea955afb02309 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_CTCGreedyDecoder.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_CTCGreedyDecoder.py @@ -61,10 +61,10 @@ def create_ctcgreedydecoder_placeholder_const_net(self, input_shape, merge_repea @pytest.mark.parametrize("merge_repeated", [False, True]) @pytest.mark.nightly def test_ctcgreedydecoder_placeholder_const(self, params, merge_repeated, ie_device, precision, ir_version, temp_dir, - use_new_frontend, use_old_api): + use_new_frontend): if ie_device == 'GPU': pytest.xfail('104860') self._test(*self.create_ctcgreedydecoder_placeholder_const_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend, merge_repeated=merge_repeated), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api, merge_repeated=merge_repeated) + use_new_frontend=use_new_frontend, merge_repeated=merge_repeated) diff --git a/tests/layer_tests/tensorflow_tests/test_tf_CTCLoss.py b/tests/layer_tests/tensorflow_tests/test_tf_CTCLoss.py index 805ab3ff52f6fd..a2d2768685c235 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_CTCLoss.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_CTCLoss.py @@ -63,9 +63,9 @@ def create_ctcloss_placeholder_const_net(self, inputs, targets, preprocess_colla @pytest.mark.skipif(platform == 'darwin', reason="Ticket - 122182") def test_ctcloss_placeholder_const(self, params, preprocess_collapse_repeated, ctc_merge_repeated, ie_device, precision, ir_version, temp_dir, - use_new_frontend, use_old_api): + use_new_frontend): self._test(*self.create_ctcloss_placeholder_const_net(**params, preprocess_collapse_repeated=preprocess_collapse_repeated, ctc_merge_repeated=ctc_merge_repeated), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) diff --git a/tests/layer_tests/tensorflow_tests/test_tf_Cast.py b/tests/layer_tests/tensorflow_tests/test_tf_Cast.py index b751d68fdd0357..24e6f252ca18b6 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_Cast.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_Cast.py @@ -65,9 +65,9 @@ def create_cast_op_placeholder_const_net(self, input_shape, input_type, output_t @pytest.mark.parametrize("truncate", [ False, True ]) @pytest.mark.nightly def test_cast_op_placeholder_const(self, params, input_type, output_type, truncate, ie_device, precision, ir_version, temp_dir, - use_new_frontend, use_old_api): + use_new_frontend): self._test(*self.create_cast_op_placeholder_const_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend, input_type=input_type, output_type=output_type, truncate=truncate), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) diff --git a/tests/layer_tests/tensorflow_tests/test_tf_CheckNumerics.py b/tests/layer_tests/tensorflow_tests/test_tf_CheckNumerics.py index 0034beac23394a..a8f52841929f34 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_CheckNumerics.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_CheckNumerics.py @@ -41,7 +41,7 @@ def create_check_numerics_net(self, input_shape, input_type, op): @pytest.mark.precommit_tf_fe @pytest.mark.nightly def test_check_numerics_basic(self, params, ie_device, precision, ir_version, temp_dir, - use_new_frontend, use_old_api): + use_new_frontend): self._test(*self.create_check_numerics_net(**params), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) diff --git a/tests/layer_tests/tensorflow_tests/test_tf_ClipByValue.py b/tests/layer_tests/tensorflow_tests/test_tf_ClipByValue.py index 1520c6701a72fc..c2dc178781167f 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_ClipByValue.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_ClipByValue.py @@ -43,8 +43,8 @@ def create_clip_by_value_net(self, t_shape): @pytest.mark.precommit_tf_fe @pytest.mark.nightly def test_clip_by_value_basic(self, params, ie_device, precision, ir_version, temp_dir, - use_new_frontend, use_old_api): + use_new_frontend): self._test( *self.create_clip_by_value_net(**params), ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, use_new_frontend=use_new_frontend, - use_old_api=use_old_api, **params) + **params) diff --git a/tests/layer_tests/tensorflow_tests/test_tf_ComplexFFT.py b/tests/layer_tests/tensorflow_tests/test_tf_ComplexFFT.py index 5624d2c2984ad0..21dafffbf58719 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_ComplexFFT.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_ComplexFFT.py @@ -62,11 +62,11 @@ def create_complex_fft_net(self, input_shape, shift_roll, axis_roll, fft_op): reason='Ticket - 126314') def test_complex_fft_basic(self, params, fft_op, ie_device, precision, ir_version, temp_dir, - use_new_frontend, use_old_api): + use_new_frontend): self._test( *self.create_complex_fft_net(**params, fft_op=fft_op), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api, custom_eps=1e-2) + use_new_frontend=use_new_frontend, custom_eps=1e-2) class TestComplexAbs(CommonTFLayerTest): @@ -106,11 +106,11 @@ def create_complex_abs_net(self, input_shape): @pytest.mark.precommit_tf_fe @pytest.mark.nightly def test_complex_abs_basic(self, params, ie_device, precision, ir_version, temp_dir, - use_new_frontend, use_old_api): + use_new_frontend): self._test( *self.create_complex_abs_net(**params), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) class TestComplexRFFT(CommonTFLayerTest): @@ -149,11 +149,11 @@ def create_complex_rfft_net(self, input_shape, fft_length, rfft_op): @pytest.mark.precommit_tf_fe @pytest.mark.nightly def test_complex_rfft_basic(self, params, ie_device, precision, ir_version, temp_dir, - use_new_frontend, use_old_api): + use_new_frontend): self._test( *self.create_complex_rfft_net(**params), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) class TestComplexIRFFT(CommonTFLayerTest): @@ -195,8 +195,8 @@ def create_complex_irfft_net(self, input_shape, fft_length, irfft_op): @pytest.mark.precommit_tf_fe @pytest.mark.nightly def test_complex_irfft_basic(self, params, ie_device, precision, ir_version, temp_dir, - use_new_frontend, use_old_api): + use_new_frontend): self._test( *self.create_complex_irfft_net(**params), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) diff --git a/tests/layer_tests/tensorflow_tests/test_tf_Concat.py b/tests/layer_tests/tensorflow_tests/test_tf_Concat.py index 7d17de58366b1e..89fc66c756003a 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_Concat.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_Concat.py @@ -40,12 +40,11 @@ def create_concat_net(self, input_shapes, axis, is_v2, ir_version, use_new_front @pytest.mark.parametrize("params", test_data_basic) @pytest.mark.precommit_tf_fe @pytest.mark.nightly - def test_concat_basic(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend, - use_old_api): + def test_concat_basic(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend): self._test(*self.create_concat_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) test_data_1D = [ dict(input_shapes=[[1], [2]], axis=0, is_v2=False), @@ -53,12 +52,11 @@ def test_concat_basic(self, params, ie_device, precision, ir_version, temp_dir, @pytest.mark.parametrize("params", test_data_1D) @pytest.mark.nightly - def test_concat_1D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend, - use_old_api): + def test_concat_1D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend): self._test(*self.create_concat_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) test_data_2D = [ dict(input_shapes=[[1, 4], [1, 2]], axis=-1, is_v2=True) @@ -66,12 +64,11 @@ def test_concat_1D(self, params, ie_device, precision, ir_version, temp_dir, use @pytest.mark.parametrize("params", test_data_2D) @pytest.mark.nightly - def test_concat_2D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend, - use_old_api): + def test_concat_2D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend): self._test(*self.create_concat_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) test_data_3D = [ dict(input_shapes=[[1, 3, 2], [1, 3, 5]], axis=-1, is_v2=True), @@ -80,12 +77,11 @@ def test_concat_2D(self, params, ie_device, precision, ir_version, temp_dir, use @pytest.mark.parametrize("params", test_data_3D) @pytest.mark.nightly - def test_concat_3D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend, - use_old_api): + def test_concat_3D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend): self._test(*self.create_concat_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) test_data_4D = [ dict(input_shapes=[[1, 3, 5, 7], [3, 3, 5, 7], [2, 3, 5, 7]], axis=0, is_v2=False), @@ -95,12 +91,11 @@ def test_concat_3D(self, params, ie_device, precision, ir_version, temp_dir, use @pytest.mark.parametrize("params", test_data_4D) @pytest.mark.nightly - def test_concat_4D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend, - use_old_api): + def test_concat_4D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend): self._test(*self.create_concat_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) test_data_5D = [ dict(input_shapes=[[1, 3, 5, 7, 8], [2, 3, 5, 7, 8]], axis=0, is_v2=True), @@ -108,9 +103,8 @@ def test_concat_4D(self, params, ie_device, precision, ir_version, temp_dir, use @pytest.mark.parametrize("params", test_data_5D) @pytest.mark.nightly - def test_concat_5D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend, - use_old_api): + def test_concat_5D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend): self._test(*self.create_concat_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) diff --git a/tests/layer_tests/tensorflow_tests/test_tf_ConjugateTranspose.py b/tests/layer_tests/tensorflow_tests/test_tf_ConjugateTranspose.py index a0f78096c02d50..26d98348afb4ea 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_ConjugateTranspose.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_ConjugateTranspose.py @@ -65,10 +65,10 @@ def create_complex_conjugate_transpose_net(self, input_shape, perm): @pytest.mark.precommit_tf_fe @pytest.mark.nightly def test_conjugate_transpose(self, params, ie_device, precision, ir_version, temp_dir, - use_new_frontend, use_old_api): + use_new_frontend): self._test(*self.create_complex_conjugate_transpose_net(**params), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) class TestConjugateTranspose(CommonTFLayerTest): @@ -116,7 +116,7 @@ def create_conjugate_transpose_net(self, input_shape, perm): @pytest.mark.precommit_tf_fe @pytest.mark.nightly def test_conjugate_transpose(self, params, ie_device, precision, ir_version, temp_dir, - use_new_frontend, use_old_api): + use_new_frontend): self._test(*self.create_conjugate_transpose_net(**params), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) \ No newline at end of file + use_new_frontend=use_new_frontend) \ No newline at end of file diff --git a/tests/layer_tests/tensorflow_tests/test_tf_Conv2D.py b/tests/layer_tests/tensorflow_tests/test_tf_Conv2D.py index 9e2dfdb585c697..b320cbbe160892 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_Conv2D.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_Conv2D.py @@ -82,10 +82,10 @@ def create_conv2d_placeholder_const_net(self, input_shape, input_filter, input_s @pytest.mark.parametrize("padding", ['EXPLICIT', 'SAME', 'VALID']) @pytest.mark.nightly def test_conv2d_placeholder_const(self, params, padding, ie_device, precision, ir_version, temp_dir, - use_new_frontend, use_old_api): + use_new_frontend): if ie_device == 'GPU': pytest.xfail('104862') self._test(*self.create_conv2d_placeholder_const_net(**params, input_padding=padding, ir_version=ir_version, use_new_frontend=use_new_frontend), ie_device, precision, ir_version, input_padding=padding, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) diff --git a/tests/layer_tests/tensorflow_tests/test_tf_Conv2DBackprop.py b/tests/layer_tests/tensorflow_tests/test_tf_Conv2DBackprop.py index 158ed5823bf1f3..76bf60986b9314 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_Conv2DBackprop.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_Conv2DBackprop.py @@ -70,8 +70,8 @@ def create_conv2dbackprop_placeholder_const_net(self, input_shape, input_filter, @pytest.mark.parametrize("params", test_data) @pytest.mark.nightly def test_conv2dbackprop_placeholder_const(self, params, ie_device, precision, ir_version, temp_dir, - use_new_frontend, use_old_api): + use_new_frontend): self._test(*self.create_conv2dbackprop_placeholder_const_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) diff --git a/tests/layer_tests/tensorflow_tests/test_tf_Conv3D.py b/tests/layer_tests/tensorflow_tests/test_tf_Conv3D.py index c7e64cc98da836..70cc58d758f462 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_Conv3D.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_Conv3D.py @@ -76,8 +76,8 @@ def create_conv3d_placeholder_const_net(self, input_shape, input_filter, input_s @pytest.mark.parametrize("padding", ['SAME', 'VALID']) @pytest.mark.nightly def test_conv3d_placeholder_const(self, params, padding, ie_device, precision, ir_version, temp_dir, - use_new_frontend, use_old_api): + use_new_frontend): self._test(*self.create_conv3d_placeholder_const_net(**params, input_padding=padding, ir_version=ir_version, use_new_frontend=use_new_frontend), ie_device, precision, ir_version, input_padding=padding, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) diff --git a/tests/layer_tests/tensorflow_tests/test_tf_Conv3DBackprop.py b/tests/layer_tests/tensorflow_tests/test_tf_Conv3DBackprop.py index 7f0f0fdb5aaabf..25f5cae245264f 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_Conv3DBackprop.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_Conv3DBackprop.py @@ -72,8 +72,8 @@ def create_conv3dbackprop_placeholder_const_net(self, input_shape, input_filter, @pytest.mark.parametrize("params", test_data) @pytest.mark.nightly def test_conv3dbackprop_placeholder_const(self, params, ie_device, precision, ir_version, temp_dir, - use_new_frontend, use_old_api): + use_new_frontend): self._test(*self.create_conv3dbackprop_placeholder_const_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) diff --git a/tests/layer_tests/tensorflow_tests/test_tf_CropAndResize.py b/tests/layer_tests/tensorflow_tests/test_tf_CropAndResize.py index a216dd23beaddf..ff18cbd37c611b 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_CropAndResize.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_CropAndResize.py @@ -58,7 +58,7 @@ def create_crop_and_resize_net(self, image_shape, num_boxes, crop_size_value, me @pytest.mark.xfail(platform.machine() in ["aarch64", "arm64", "ARM64"], reason='Ticket - 122716') def test_crop_and_resize_basic(self, params, ie_device, precision, ir_version, temp_dir, - use_new_frontend, use_old_api): + use_new_frontend): self._test(*self.create_crop_and_resize_net(**params), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) diff --git a/tests/layer_tests/tensorflow_tests/test_tf_Cumsum.py b/tests/layer_tests/tensorflow_tests/test_tf_Cumsum.py index 79bb756d4a0650..d556a98e538f06 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_Cumsum.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_Cumsum.py @@ -50,7 +50,7 @@ def create_cumsum_net(self, input_shape, axis, exclusive, reverse): @pytest.mark.precommit_tf_fe @pytest.mark.nightly def test_cumsum_basic(self, params, exclusive, reverse, ie_device, precision, ir_version, temp_dir, - use_new_frontend, use_old_api): + use_new_frontend): self._test(*self.create_cumsum_net(**params, exclusive=exclusive, reverse=reverse), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) diff --git a/tests/layer_tests/tensorflow_tests/test_tf_DepthToSpace.py b/tests/layer_tests/tensorflow_tests/test_tf_DepthToSpace.py index 5b2ad94356d0d3..946f19414c6700 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_DepthToSpace.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_DepthToSpace.py @@ -29,7 +29,7 @@ def create_depth_to_space_net(self, input_shape, block_size, data_format): @pytest.mark.precommit_tf_fe @pytest.mark.nightly def test_depth_to_space_basic(self, params, ie_device, precision, ir_version, temp_dir, - use_new_frontend, use_old_api): + use_new_frontend): self._test(*self.create_depth_to_space_net(**params), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) diff --git a/tests/layer_tests/tensorflow_tests/test_tf_Div.py b/tests/layer_tests/tensorflow_tests/test_tf_Div.py index 2d71f16a85c66c..9d129e2280e476 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_Div.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_Div.py @@ -44,7 +44,7 @@ def create_div_net(self, input_shape, input_type): @pytest.mark.precommit_tf_fe @pytest.mark.nightly def test_div_basic(self, params, ie_device, precision, ir_version, temp_dir, - use_new_frontend, use_old_api): + use_new_frontend): self._test(*self.create_div_net(**params), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) \ No newline at end of file + use_new_frontend=use_new_frontend) \ No newline at end of file diff --git a/tests/layer_tests/tensorflow_tests/test_tf_DivNoNan.py b/tests/layer_tests/tensorflow_tests/test_tf_DivNoNan.py index 5a6f3883185f23..45521a16ddf5f9 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_DivNoNan.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_DivNoNan.py @@ -46,7 +46,7 @@ def create_div_no_nan_net(self, input_shape, input_type): @pytest.mark.xfail(condition=platform.system() == 'Darwin' and platform.machine() == 'arm64', reason='Ticket - 122716') def test_div_no_nan_basic(self, params, ie_device, precision, ir_version, temp_dir, - use_new_frontend, use_old_api): + use_new_frontend): self._test(*self.create_div_no_nan_net(**params), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) diff --git a/tests/layer_tests/tensorflow_tests/test_tf_DynamicPartition.py b/tests/layer_tests/tensorflow_tests/test_tf_DynamicPartition.py index 06f85d3948185a..aca83d0443f1d3 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_DynamicPartition.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_DynamicPartition.py @@ -45,14 +45,14 @@ def create_dynamic_partition_net(self, data_shape, partitions_shape, num_partiti @pytest.mark.precommit_tf_fe @pytest.mark.nightly def test_dynamic_partition_basic(self, params, ie_device, precision, ir_version, temp_dir, - use_new_frontend, use_old_api): + use_new_frontend): if ie_device == 'GPU': pytest.xfail('104855') if not use_new_frontend: pytest.skip("DynamicPartition operation is not supported via legacy frontend.") self._test(*self.create_dynamic_partition_net(**params), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) test_data_other_types = [ dict(data_shape=[10], partitions_shape=[10], num_partitions=10, data_type=tf.int32), @@ -62,11 +62,11 @@ def test_dynamic_partition_basic(self, params, ie_device, precision, ir_version, @pytest.mark.parametrize("params", test_data_other_types) @pytest.mark.nightly def test_dynamic_partition_other_types(self, params, ie_device, precision, ir_version, temp_dir, - use_new_frontend, use_old_api): + use_new_frontend): if ie_device == 'GPU': pytest.xfail('104855') if not use_new_frontend: pytest.skip("DynamicPartition operation is not supported via legacy frontend.") self._test(*self.create_dynamic_partition_net(**params), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) diff --git a/tests/layer_tests/tensorflow_tests/test_tf_Eltwise.py b/tests/layer_tests/tensorflow_tests/test_tf_Eltwise.py index a7307013e0961d..802dfb8d4e0497 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_Eltwise.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_Eltwise.py @@ -59,12 +59,11 @@ def create_eltwise_net(self, shape, operation, ir_version, use_new_frontend): @pytest.mark.parametrize("params", test_data) @pytest.mark.nightly - def test_eltwise(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend, - use_old_api): + def test_eltwise(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend): self._test(*self.create_eltwise_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) test_data_5D = [] for operation in ['sum', 'max', 'mul']: @@ -74,10 +73,10 @@ def test_eltwise(self, params, ie_device, precision, ir_version, temp_dir, use_n @pytest.mark.precommit @pytest.mark.nightly def test_eltwise_5D_precommit(self, params, ie_device, precision, ir_version, temp_dir, - use_new_frontend, use_old_api): + use_new_frontend): if ie_device == 'GPU': pytest.skip("5D tensors is not supported on GPU") self._test(*self.create_eltwise_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) diff --git a/tests/layer_tests/tensorflow_tests/test_tf_EnsureShape.py b/tests/layer_tests/tensorflow_tests/test_tf_EnsureShape.py index d51de4fdada431..2439dc47dc44c1 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_EnsureShape.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_EnsureShape.py @@ -38,7 +38,7 @@ def create_ensure_shape_net(self, input_shape, input_type, target_shape): @pytest.mark.precommit_tf_fe @pytest.mark.nightly def test_ensure_shape_basic(self, params, ie_device, precision, ir_version, temp_dir, - use_new_frontend, use_old_api): + use_new_frontend): self._test(*self.create_ensure_shape_net(**params), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) diff --git a/tests/layer_tests/tensorflow_tests/test_tf_Equal.py b/tests/layer_tests/tensorflow_tests/test_tf_Equal.py index eefe0945cf983a..8c42c7f3be4920 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_Equal.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_Equal.py @@ -111,13 +111,12 @@ def create_tf_equal_net(self, ir_version, use_new_frontend, x_shape, output_type @pytest.mark.parametrize("params", test_data_int32) @pytest.mark.nightly - def test_tf_equal_int32(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend, - use_old_api): + def test_tf_equal_int32(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend): self._test(*self.create_tf_equal_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend, output_type=np.int32), ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, use_new_frontend=use_new_frontend, - use_old_api=use_old_api, **params) + **params) test_data_int64 = [ pytest.param( @@ -134,13 +133,12 @@ def test_tf_equal_int32(self, params, ie_device, precision, ir_version, temp_dir @pytest.mark.parametrize("params", test_data_int64) @pytest.mark.nightly - def test_tf_equal_int64(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend, - use_old_api): + def test_tf_equal_int64(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend): self._test(*self.create_tf_equal_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend, output_type=np.int64), ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, use_new_frontend=use_new_frontend, - use_old_api=use_old_api, **params) + **params) # Values for checking important corner cases for float values # expect: false false false false false false true false true @@ -160,13 +158,12 @@ def test_tf_equal_int64(self, params, ie_device, precision, ir_version, temp_dir @pytest.mark.parametrize("params", test_data_float16) @pytest.mark.nightly - def test_tf_equal_float16(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend, - use_old_api): + def test_tf_equal_float16(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend): self._test(*self.create_tf_equal_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend, output_type=np.float16), ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, use_new_frontend=use_new_frontend, - use_old_api=use_old_api, **params) + **params) test_data_float32 = [ pytest.param( @@ -181,13 +178,12 @@ def test_tf_equal_float16(self, params, ie_device, precision, ir_version, temp_d @pytest.mark.parametrize("params", test_data_float32) @pytest.mark.nightly - def test_tf_equal_float32(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend, - use_old_api): + def test_tf_equal_float32(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend): self._test(*self.create_tf_equal_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend, output_type=np.float32), ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, use_new_frontend=use_new_frontend, - use_old_api=use_old_api, **params) + **params) test_data_float64 = [ pytest.param( @@ -202,10 +198,9 @@ def test_tf_equal_float32(self, params, ie_device, precision, ir_version, temp_d @pytest.mark.parametrize("params", test_data_float64) @pytest.mark.nightly - def test_tf_equal_float64(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend, - use_old_api): + def test_tf_equal_float64(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend): self._test(*self.create_tf_equal_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend, output_type=np.float64), ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, use_new_frontend=use_new_frontend, - use_old_api=use_old_api, **params) + **params) diff --git a/tests/layer_tests/tensorflow_tests/test_tf_ExpandDims.py b/tests/layer_tests/tensorflow_tests/test_tf_ExpandDims.py index fbb327e5f42164..d0872e075c4655 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_ExpandDims.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_ExpandDims.py @@ -36,8 +36,7 @@ def create_expand_dims_net(self, input_shape, axis): @pytest.mark.parametrize("params", test_basic) @pytest.mark.nightly @pytest.mark.precommit_tf_fe - def test_expand_dims_basic(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend, - use_old_api): + def test_expand_dims_basic(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend): self._test(*self.create_expand_dims_net(**params), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) diff --git a/tests/layer_tests/tensorflow_tests/test_tf_ExtractImagePatches.py b/tests/layer_tests/tensorflow_tests/test_tf_ExtractImagePatches.py index 41441c397f73a9..ef43e0c82d6364 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_ExtractImagePatches.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_ExtractImagePatches.py @@ -37,8 +37,7 @@ def create_extract_image_patches_net(self, images_shape, ksizes, strides, rates, @pytest.mark.nightly @pytest.mark.precommit_tf_fe def test_extract_image_patches_basic(self, params, padding, ie_device, precision, ir_version, temp_dir, - use_new_frontend, - use_old_api): + use_new_frontend): self._test(*self.create_extract_image_patches_net(**params, padding=padding), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) diff --git a/tests/layer_tests/tensorflow_tests/test_tf_Eye.py b/tests/layer_tests/tensorflow_tests/test_tf_Eye.py index 8eaa83d0e3fa5b..e5c45eaefd5ffc 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_Eye.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_Eye.py @@ -53,11 +53,10 @@ def create_tf_eye_net(self, num_rows, num_columns, batch_shape, output_type): @pytest.mark.parametrize("params", test_data) @pytest.mark.nightly @pytest.mark.precommit - def test_tf_eye(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend, - use_old_api=True): + def test_tf_eye(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend): if ie_device == 'GPU': pytest.skip("Roll is not supported on GPU") self._test(*self.create_tf_eye_net(**params), ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, use_new_frontend=use_new_frontend, - use_old_api=use_old_api, **params) + **params) diff --git a/tests/layer_tests/tensorflow_tests/test_tf_FakeQuantWithMinMaxVars.py b/tests/layer_tests/tensorflow_tests/test_tf_FakeQuantWithMinMaxVars.py index 191b46e035a376..4937f34f292312 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_FakeQuantWithMinMaxVars.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_FakeQuantWithMinMaxVars.py @@ -47,11 +47,10 @@ def create_fake_quant_with_min_max_vars_net(self, inputs_shape, min_value, max_v @pytest.mark.xfail(condition=platform.system() == 'Darwin' and platform.machine() == 'arm64', reason='Ticket - 122716') def test_fake_quant_with_min_max_vars_basic(self, params, fake_quant_op, ie_device, precision, ir_version, temp_dir, - use_new_frontend, - use_old_api): + use_new_frontend): self._test(*self.create_fake_quant_with_min_max_vars_net(**params, fake_quant_op=fake_quant_op), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) test_per_channel_basic = [ dict(inputs_shape=[2, 6, 4], min_value=[-4, -3, -5, -8], max_value=[4, 7, 9, 5], num_bits=None, @@ -64,8 +63,7 @@ def test_fake_quant_with_min_max_vars_basic(self, params, fake_quant_op, ie_devi @pytest.mark.nightly @pytest.mark.xfail("104822") def test_fake_quant_with_min_max_vars_per_channel_basic(self, params, ie_device, precision, ir_version, temp_dir, - use_new_frontend, - use_old_api): + use_new_frontend): self._test(*self.create_fake_quant_with_min_max_vars_net(**params), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) diff --git a/tests/layer_tests/tensorflow_tests/test_tf_FakeQuantize.py b/tests/layer_tests/tensorflow_tests/test_tf_FakeQuantize.py index 25b5a6070df038..22ac808b6ed605 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_FakeQuantize.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_FakeQuantize.py @@ -140,9 +140,9 @@ def create_fake_quantize_net(self, il, ih, num_bits, narrow_range, nudged_il, nu @pytest.mark.parametrize("params", test_data) @pytest.mark.nightly def test_fake_quantize(self, params, ie_device, precision, ir_version, temp_dir, - use_new_frontend, use_old_api): + use_new_frontend): self._test(*self.create_fake_quantize_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend), ie_device, precision, ir_version, kwargs_to_prepare_input=params, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) diff --git a/tests/layer_tests/tensorflow_tests/test_tf_Fill.py b/tests/layer_tests/tensorflow_tests/test_tf_Fill.py index 46a4be61d0cae9..3bf8fad866bbcd 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_Fill.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_Fill.py @@ -52,8 +52,8 @@ def create_fill_ops_placeholder_const_net(self, input_shape, value, ir_version, @pytest.mark.precommit_tf_fe @pytest.mark.nightly def test_fill_ops_placeholder_const(self, params, ie_device, precision, ir_version, temp_dir, - use_new_frontend, use_old_api): + use_new_frontend): self._test(*self.create_fill_ops_placeholder_const_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) \ No newline at end of file + use_new_frontend=use_new_frontend) \ No newline at end of file diff --git a/tests/layer_tests/tensorflow_tests/test_tf_FloorDiv.py b/tests/layer_tests/tensorflow_tests/test_tf_FloorDiv.py index 20eea51c3f4018..b49468fa8c7319 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_FloorDiv.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_FloorDiv.py @@ -44,8 +44,8 @@ def create_add_placeholder_const_net(self, x_shape, dtype, ir_version, use_new_f @pytest.mark.nightly @pytest.mark.precommit_tf_fe def test_add_placeholder_const_1D(self, params, ie_device, precision, ir_version, temp_dir, - use_new_frontend, use_old_api): + use_new_frontend): self._test(*self.create_add_placeholder_const_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) diff --git a/tests/layer_tests/tensorflow_tests/test_tf_FusedBatchNorm.py b/tests/layer_tests/tensorflow_tests/test_tf_FusedBatchNorm.py index b2d44b5a07beb9..46c6a14189d717 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_FusedBatchNorm.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_FusedBatchNorm.py @@ -106,7 +106,7 @@ def create_fused_batch_norm_net(self, x_shape, epsilon, exponential_avg_factor, @pytest.mark.nightly @pytest.mark.skipif(platform == 'darwin', reason="Ticket - 122182") def test_fused_batch_norm_basic(self, params, ie_device, precision, ir_version, temp_dir, - use_new_frontend, use_old_api): + use_new_frontend): self._test(*self.create_fused_batch_norm_net(**params), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) diff --git a/tests/layer_tests/tensorflow_tests/test_tf_GRUBlockCell.py b/tests/layer_tests/tensorflow_tests/test_tf_GRUBlockCell.py index 2f2c750725390f..e968c6a78d3d2f 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_GRUBlockCell.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_GRUBlockCell.py @@ -61,9 +61,9 @@ def create_tf_gru_block_cell(self, batch_size, input_size, hidden_size): @pytest.mark.nightly @pytest.mark.precommit def test_tf_gru_block_cell(self, params, ie_device, precision, ir_version, temp_dir, - use_new_frontend, use_old_api): + use_new_frontend): if ie_device == 'GPU': pytest.skip("Skip TF GRUBlockCell test on GPU") self._test(*self.create_tf_gru_block_cell(**params), ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, - use_new_frontend=use_new_frontend, use_old_api=use_old_api, custom_eps=1e-3, **params) + use_new_frontend=use_new_frontend, custom_eps=1e-3, **params) diff --git a/tests/layer_tests/tensorflow_tests/test_tf_Gather.py b/tests/layer_tests/tensorflow_tests/test_tf_Gather.py index 2cec52aab380fd..495adbd666447d 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_Gather.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_Gather.py @@ -69,8 +69,7 @@ def create_gather_net(self, params_shape, params_type, indices_shape, indices_ty @pytest.mark.parametrize("params", test_data_precommit) @pytest.mark.precommit_tf_fe @pytest.mark.nightly - def test_gather(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend, - use_old_api): + def test_gather(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend): self._test(*self.create_gather_net(**params), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) diff --git a/tests/layer_tests/tensorflow_tests/test_tf_GatherNd.py b/tests/layer_tests/tensorflow_tests/test_tf_GatherNd.py index e9d2eefab2b848..b0ae61d132efc2 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_GatherNd.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_GatherNd.py @@ -58,8 +58,7 @@ def create_gather_nd_net(self, params_shape, params_type, indices_shape, indices @pytest.mark.parametrize("params", test_data_precommit) @pytest.mark.precommit_tf_fe @pytest.mark.nightly - def test_gather_nd_basic(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend, - use_old_api): + def test_gather_nd_basic(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend): self._test(*self.create_gather_nd_net(**params), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) diff --git a/tests/layer_tests/tensorflow_tests/test_tf_Identity.py b/tests/layer_tests/tensorflow_tests/test_tf_Identity.py index 2e18d134d22fb4..7721e31631af8e 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_Identity.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_Identity.py @@ -32,7 +32,7 @@ def create_identity_net(self, input_shape, identity_op): @pytest.mark.precommit_tf_fe @pytest.mark.nightly def test_identity_basic(self, params, ie_device, precision, ir_version, temp_dir, - use_new_frontend, use_old_api): + use_new_frontend): self._test(*self.create_identity_net(**params), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) diff --git a/tests/layer_tests/tensorflow_tests/test_tf_IdentityN.py b/tests/layer_tests/tensorflow_tests/test_tf_IdentityN.py index 6605af4f3f9e92..141f0a28c6e202 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_IdentityN.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_IdentityN.py @@ -39,7 +39,7 @@ def create_identityn_net(self, value_shape, size_splits_values, axis_value): @pytest.mark.precommit_tf_fe @pytest.mark.nightly def test_split_basic(self, params, ie_device, precision, ir_version, temp_dir, - use_new_frontend, use_old_api): + use_new_frontend): self._test(*self.create_identityn_net(**params), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) diff --git a/tests/layer_tests/tensorflow_tests/test_tf_If.py b/tests/layer_tests/tensorflow_tests/test_tf_If.py index 20085e6ac86672..cc429414a17930 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_If.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_If.py @@ -72,12 +72,12 @@ def else_branch(): @pytest.mark.xfail(condition=platform.system() == 'Darwin' and platform.machine() == 'arm64', reason='Ticket - 122716') def test_if_basic(self, params, ie_device, precision, ir_version, temp_dir, - use_new_frontend, use_old_api): + use_new_frontend): if ie_device == 'GPU': pytest.xfail('104855') self._test(*self.create_if_net(**params), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) class TestIfInt(CommonTFLayerTest): @@ -144,12 +144,12 @@ def else_branch(): @pytest.mark.xfail(condition=platform.system() == 'Darwin' and platform.machine() == 'arm64', reason='Ticket - 122716') def test_if_basic(self, params, ie_device, precision, ir_version, temp_dir, - use_new_frontend, use_old_api): + use_new_frontend): if ie_device == 'GPU': pytest.xfail('104855') self._test(*self.create_if_net(**params), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) class TestNestedIf(CommonTFLayerTest): @@ -224,12 +224,12 @@ def else_branch(): @pytest.mark.xfail(condition=platform.system() == 'Darwin' and platform.machine() == 'arm64', reason='Ticket - 122716') def test_if_basic(self, params, ie_device, precision, ir_version, temp_dir, - use_new_frontend, use_old_api): + use_new_frontend): if ie_device == 'GPU': pytest.xfail('104855') self._test(*self.create_if_net(**params), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) class TestSequantialIfs(CommonTFLayerTest): @@ -316,9 +316,9 @@ def else_branch(): @pytest.mark.xfail(condition=platform.system() == 'Darwin' and platform.machine() == 'arm64', reason='Ticket - 122716') def test_if_basic(self, params, ie_device, precision, ir_version, temp_dir, - use_new_frontend, use_old_api): + use_new_frontend): if ie_device == 'GPU': pytest.xfail('104855') self._test(*self.create_sequential_ifs_net(**params), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) diff --git a/tests/layer_tests/tensorflow_tests/test_tf_Inv.py b/tests/layer_tests/tensorflow_tests/test_tf_Inv.py index af6e57dda83a93..da519e208e6bab 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_Inv.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_Inv.py @@ -38,7 +38,7 @@ def create_inv_net(self, input_shape, input_type): @pytest.mark.precommit_tf_fe @pytest.mark.nightly def test_inv_basic(self, params, ie_device, precision, ir_version, temp_dir, - use_new_frontend, use_old_api): + use_new_frontend): self._test(*self.create_inv_net(**params), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) \ No newline at end of file + use_new_frontend=use_new_frontend) \ No newline at end of file diff --git a/tests/layer_tests/tensorflow_tests/test_tf_InvertPermutation.py b/tests/layer_tests/tensorflow_tests/test_tf_InvertPermutation.py index 8b0a11f825aab6..cd39046fdccc83 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_InvertPermutation.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_InvertPermutation.py @@ -38,7 +38,7 @@ def create_invert_permutation_net(self, input_shape, input_type): @pytest.mark.precommit_tf_fe @pytest.mark.nightly def test_invert_permutation_basic(self, params, ie_device, precision, ir_version, temp_dir, - use_new_frontend, use_old_api): + use_new_frontend): self._test(*self.create_invert_permutation_net(**params), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) diff --git a/tests/layer_tests/tensorflow_tests/test_tf_IsFinite.py b/tests/layer_tests/tensorflow_tests/test_tf_IsFinite.py index 802200b0e3d9c4..903eb10123a5f8 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_IsFinite.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_IsFinite.py @@ -40,11 +40,11 @@ def create_is_finite_net(self, x_shape, x_type): @pytest.mark.precommit_tf_fe @pytest.mark.nightly def test_is_finite_basic(self, params, ie_device, precision, ir_version, temp_dir, - use_new_frontend, use_old_api): + use_new_frontend): if ie_device == 'GPU': pytest.xfail('104855') if not use_new_frontend: pytest.skip("IsFinite operation is not supported via legacy frontend.") self._test(*self.create_is_finite_net(**params), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) diff --git a/tests/layer_tests/tensorflow_tests/test_tf_IsInf.py b/tests/layer_tests/tensorflow_tests/test_tf_IsInf.py index f2814a5d7aceba..beb36d51c94a50 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_IsInf.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_IsInf.py @@ -38,11 +38,11 @@ def create_is_inf_net(self, x_shape, x_type): @pytest.mark.precommit_tf_fe @pytest.mark.nightly def test_is_inf_basic(self, params, ie_device, precision, ir_version, temp_dir, - use_new_frontend, use_old_api): + use_new_frontend): if ie_device == 'GPU': pytest.xfail('104855') if not use_new_frontend: pytest.skip("IsInf operation is not supported via legacy frontend.") self._test(*self.create_is_inf_net(**params), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) diff --git a/tests/layer_tests/tensorflow_tests/test_tf_IsNan.py b/tests/layer_tests/tensorflow_tests/test_tf_IsNan.py index b0a44b781d7d73..b04104d2461116 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_IsNan.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_IsNan.py @@ -40,11 +40,11 @@ def create_is_nan_net(self, x_shape, x_type): @pytest.mark.precommit_tf_fe @pytest.mark.nightly def test_is_nan_basic(self, params, ie_device, precision, ir_version, temp_dir, - use_new_frontend, use_old_api): + use_new_frontend): if ie_device == 'GPU': pytest.xfail('104855') if not use_new_frontend: pytest.skip("IsNan operation is not supported via legacy frontend.") self._test(*self.create_is_nan_net(**params), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) diff --git a/tests/layer_tests/tensorflow_tests/test_tf_L2Loss.py b/tests/layer_tests/tensorflow_tests/test_tf_L2Loss.py index 9011f2ee9c3e6d..3d7eea6fb2312b 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_L2Loss.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_L2Loss.py @@ -28,11 +28,11 @@ def create_l2_loss_net(self, input_shape): @pytest.mark.precommit_tf_fe @pytest.mark.nightly def test_l2_loss_basic(self, params, ie_device, precision, ir_version, temp_dir, - use_new_frontend, use_old_api): + use_new_frontend): if ie_device == 'GPU': pytest.xfail('104863') if not use_new_frontend: pytest.skip("L2Loss is not supported by legacy FE.") self._test(*self.create_l2_loss_net(**params), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) diff --git a/tests/layer_tests/tensorflow_tests/test_tf_LRN.py b/tests/layer_tests/tensorflow_tests/test_tf_LRN.py index 1d78baca910a91..14d17a5beb8530 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_LRN.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_LRN.py @@ -29,7 +29,7 @@ def create_lrn_net(self, input_shape, depth_radius, bias, alpha, beta): #@pytest.mark.precommit_tf_fe - ticket 116032 @pytest.mark.nightly def test_lrn_basic(self, params, ie_device, precision, ir_version, temp_dir, - use_new_frontend, use_old_api): + use_new_frontend): self._test(*self.create_lrn_net(**params), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) diff --git a/tests/layer_tests/tensorflow_tests/test_tf_LeakyRelu.py b/tests/layer_tests/tensorflow_tests/test_tf_LeakyRelu.py index ea672ac144d987..8d9465a93641dc 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_LeakyRelu.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_LeakyRelu.py @@ -36,7 +36,7 @@ def create_leaky_relu_net(self, x_shape, alpha_value): @pytest.mark.xfail(condition=platform.system() == 'Darwin' and platform.machine() == 'arm64', reason='Ticket - 122716') def test_leaky_relu_basic(self, params, ie_device, precision, ir_version, temp_dir, - use_new_frontend, use_old_api): + use_new_frontend): self._test(*self.create_leaky_relu_net(**params), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) diff --git a/tests/layer_tests/tensorflow_tests/test_tf_LinSpace.py b/tests/layer_tests/tensorflow_tests/test_tf_LinSpace.py index 216fe7b7816de4..a68303967d676f 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_LinSpace.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_LinSpace.py @@ -33,7 +33,7 @@ def create_lin_space_net(self, num_value): @pytest.mark.xfail(condition=platform.system() == 'Darwin' and platform.machine() == 'arm64', reason='Ticket - 122716') def test_lin_space_basic(self, params, ie_device, precision, ir_version, temp_dir, - use_new_frontend, use_old_api): + use_new_frontend): self._test(*self.create_lin_space_net(**params), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) diff --git a/tests/layer_tests/tensorflow_tests/test_tf_ListDiff.py b/tests/layer_tests/tensorflow_tests/test_tf_ListDiff.py index ff9389db41b1bb..36b4480c052bd5 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_ListDiff.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_ListDiff.py @@ -42,7 +42,7 @@ def create_list_diff_net(self, x_shape, y_shape, out_idx): @pytest.mark.precommit_tf_fe @pytest.mark.nightly def test_list_diff_basic(self, params, ie_device, precision, ir_version, temp_dir, - use_new_frontend, use_old_api): + use_new_frontend): self._test(*self.create_list_diff_net(**params), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) diff --git a/tests/layer_tests/tensorflow_tests/test_tf_Log1p.py b/tests/layer_tests/tensorflow_tests/test_tf_Log1p.py index aa1659e2094f95..f8e912520e5db2 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_Log1p.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_Log1p.py @@ -37,7 +37,7 @@ def create_log1p_net(self, x_shape): @pytest.mark.precommit_tf_fe @pytest.mark.nightly def test_log1p_basic(self, params, ie_device, precision, ir_version, temp_dir, - use_new_frontend, use_old_api): + use_new_frontend): self._test(*self.create_log1p_net(**params), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) diff --git a/tests/layer_tests/tensorflow_tests/test_tf_LogSoftmax.py b/tests/layer_tests/tensorflow_tests/test_tf_LogSoftmax.py index 063e310dd8174a..6fe5c4bae502af 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_LogSoftmax.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_LogSoftmax.py @@ -44,7 +44,7 @@ def create_log_softmax_net(self, logits_shape): @pytest.mark.xfail(condition=platform.system() == 'Darwin' and platform.machine() == 'arm64', reason='Ticket - 122716') def test_log_softmax_basic(self, params, ie_device, precision, ir_version, temp_dir, - use_new_frontend, use_old_api): + use_new_frontend): self._test(*self.create_log_softmax_net(**params), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) diff --git a/tests/layer_tests/tensorflow_tests/test_tf_MatMul.py b/tests/layer_tests/tensorflow_tests/test_tf_MatMul.py index 2a93291af28230..94281023dfb630 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_MatMul.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_MatMul.py @@ -59,11 +59,11 @@ def create_net_with_matmul_op(self, x_shape, y_shape, x_bool, y_bool, op_type, i @pytest.mark.precommit_tf_fe @pytest.mark.nightly def test_matmul_op_precommit(self, params, ie_device, precision, ir_version, temp_dir, op_type, - use_new_frontend, use_old_api): + use_new_frontend): self._test(*self.create_net_with_matmul_op(**params, ir_version=ir_version, op_type=op_type, use_new_frontend=use_new_frontend, x_bool=False, y_bool=False), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) test_data = test_data_precommit + [ dict(x_shape=[2, 3, 4, 4], y_shape=[2, 3, 4, 4]), #Tests 4D shapes @@ -85,8 +85,8 @@ def test_matmul_op_precommit(self, params, ie_device, precision, ir_version, tem ]) @pytest.mark.nightly def test_matmul_op_nightly(self, params, ie_device, precision, ir_version, temp_dir, op_type, - x_bool, y_bool, use_new_frontend, use_old_api): + x_bool, y_bool, use_new_frontend): self._test(*self.create_net_with_matmul_op(**params, ir_version=ir_version, op_type=op_type, use_new_frontend=use_new_frontend, x_bool=x_bool, y_bool=y_bool), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) diff --git a/tests/layer_tests/tensorflow_tests/test_tf_MatrixDiag.py b/tests/layer_tests/tensorflow_tests/test_tf_MatrixDiag.py index faffb03835c96c..5359d7f0b387b1 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_MatrixDiag.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_MatrixDiag.py @@ -38,7 +38,7 @@ def create_matrix_diag_net(self, diagonal_shape, diagonal_type): @pytest.mark.precommit_tf_fe @pytest.mark.nightly def test_matrix_diag_basic(self, params, ie_device, precision, ir_version, temp_dir, - use_new_frontend, use_old_api): + use_new_frontend): self._test(*self.create_matrix_diag_net(**params), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) diff --git a/tests/layer_tests/tensorflow_tests/test_tf_MaxPoolWithArgmax.py b/tests/layer_tests/tensorflow_tests/test_tf_MaxPoolWithArgmax.py index 1805863b8c221b..3e3ce395ca6e4c 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_MaxPoolWithArgmax.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_MaxPoolWithArgmax.py @@ -68,10 +68,10 @@ def create_max_pool_with_argmax_net(self, input_shape, ksize, strides, input_typ def test_max_pool_with_argmax_basic(self, params, input_type, padding, targmax, include_batch_in_index, with_second_output, ie_device, precision, ir_version, temp_dir, - use_new_frontend, use_old_api): + use_new_frontend): self._test( *self.create_max_pool_with_argmax_net(**params, input_type=input_type, padding=padding, targmax=targmax, include_batch_in_index=include_batch_in_index, with_second_output=with_second_output), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) diff --git a/tests/layer_tests/tensorflow_tests/test_tf_MinMax.py b/tests/layer_tests/tensorflow_tests/test_tf_MinMax.py index 47082ba44094dc..d6ba1a91e37940 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_MinMax.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_MinMax.py @@ -59,8 +59,8 @@ def create_minmax_ops_placeholder_const_net(self, input_shape, axis, op_type, ke @pytest.mark.precommit @pytest.mark.nightly def test_minmax_ops_placeholder_const(self, params, op_type, keep_dims, ie_device, precision, ir_version, temp_dir, - use_new_frontend, use_old_api): + use_new_frontend): self._test(*self.create_minmax_ops_placeholder_const_net(**params, op_type=op_type, ir_version=ir_version, use_new_frontend=use_new_frontend, keep_dims=keep_dims), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) diff --git a/tests/layer_tests/tensorflow_tests/test_tf_Mul.py b/tests/layer_tests/tensorflow_tests/test_tf_Mul.py index 67f92ea040fdff..b4fbddc4b81572 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_Mul.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_Mul.py @@ -63,11 +63,11 @@ def create_mul_placeholder_const_net(self, x_shape, y_shape, ir_version, use_new @pytest.mark.parametrize("params", test_data_1D) @pytest.mark.nightly def test_mul_placeholder_const_1D(self, params, ie_device, precision, ir_version, temp_dir, - use_new_frontend, use_old_api): + use_new_frontend): self._test(*self.create_mul_placeholder_const_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) test_data_2D = [ dict(x_shape=[1, 1], y_shape=[1, 1]), @@ -80,11 +80,11 @@ def test_mul_placeholder_const_1D(self, params, ie_device, precision, ir_version @pytest.mark.parametrize("params", test_data_2D) @pytest.mark.nightly def test_mul_placeholder_const_2D(self, params, ie_device, precision, ir_version, temp_dir, - use_new_frontend, use_old_api): + use_new_frontend): self._test(*self.create_mul_placeholder_const_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) test_data_3D = [ dict(x_shape=[1, 1, 1], y_shape=[1, 1, 1]), @@ -100,11 +100,11 @@ def test_mul_placeholder_const_2D(self, params, ie_device, precision, ir_version @pytest.mark.parametrize("params", test_data_3D) @pytest.mark.nightly def test_mul_placeholder_const_3D(self, params, ie_device, precision, ir_version, temp_dir, - use_new_frontend, use_old_api): + use_new_frontend): self._test(*self.create_mul_placeholder_const_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) test_data_4D = [ dict(x_shape=[1, 1, 1, 1], y_shape=[1, 1, 1, 1]), @@ -117,11 +117,11 @@ def test_mul_placeholder_const_3D(self, params, ie_device, precision, ir_version @pytest.mark.parametrize("params", test_data_4D) @pytest.mark.nightly def test_mul_placeholder_const_4D(self, params, ie_device, precision, ir_version, temp_dir, - use_new_frontend, use_old_api): + use_new_frontend): self._test(*self.create_mul_placeholder_const_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) test_data_5D = [ dict(x_shape=[1, 1, 1, 1, 1], y_shape=[1, 1, 1, 1, 1]), @@ -135,11 +135,11 @@ def test_mul_placeholder_const_4D(self, params, ie_device, precision, ir_version @pytest.mark.parametrize("params", test_data_5D) @pytest.mark.nightly def test_mul_placeholder_const_5D(self, params, ie_device, precision, ir_version, temp_dir, - use_new_frontend, use_old_api): + use_new_frontend): self._test(*self.create_mul_placeholder_const_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) ############################################################################################### # # @@ -154,11 +154,11 @@ def test_mul_placeholder_const_5D(self, params, ie_device, precision, ir_version @pytest.mark.parametrize("params", test_data_broadcast_1D) @pytest.mark.nightly def test_mul_placeholder_const_broadcast_1D(self, params, ie_device, precision, ir_version, - temp_dir, use_new_frontend, use_old_api): + temp_dir, use_new_frontend): self._test(*self.create_mul_placeholder_const_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) test_data_broadcast_2D = [ dict(x_shape=[1, 1], y_shape=[1]), @@ -172,11 +172,11 @@ def test_mul_placeholder_const_broadcast_1D(self, params, ie_device, precision, @pytest.mark.parametrize("params", test_data_broadcast_2D) @pytest.mark.nightly def test_mul_placeholder_const_broadcast_2D(self, params, ie_device, precision, ir_version, - temp_dir, use_new_frontend, use_old_api): + temp_dir, use_new_frontend): self._test(*self.create_mul_placeholder_const_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) test_data_broadcast_3D = [ dict(x_shape=[1, 1, 1], y_shape=[1]), @@ -198,11 +198,11 @@ def test_mul_placeholder_const_broadcast_2D(self, params, ie_device, precision, @pytest.mark.parametrize("params", test_data_broadcast_3D) @pytest.mark.nightly def test_mul_placeholder_const_broadcast_3D(self, params, ie_device, precision, ir_version, - temp_dir, use_new_frontend, use_old_api): + temp_dir, use_new_frontend): self._test(*self.create_mul_placeholder_const_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) test_data_broadcast_4D = [ dict(x_shape=[1, 1, 1, 1], y_shape=[1]), @@ -221,11 +221,11 @@ def test_mul_placeholder_const_broadcast_3D(self, params, ie_device, precision, @pytest.mark.nightly @pytest.mark.precommit def test_mul_placeholder_const_broadcast_4D(self, params, ie_device, precision, ir_version, - temp_dir, use_new_frontend, use_old_api): + temp_dir, use_new_frontend): self._test(*self.create_mul_placeholder_const_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) test_data_broadcast_5D = [ dict(x_shape=[1, 1, 1, 1, 1], y_shape=[1]), @@ -242,11 +242,11 @@ def test_mul_placeholder_const_broadcast_4D(self, params, ie_device, precision, @pytest.mark.parametrize("params", test_data_broadcast_5D) @pytest.mark.nightly def test_mul_placeholder_const_broadcast_5D(self, params, ie_device, precision, ir_version, - temp_dir, use_new_frontend, use_old_api): + temp_dir, use_new_frontend): self._test(*self.create_mul_placeholder_const_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) class TestComplexMul(CommonTFLayerTest): @@ -297,8 +297,8 @@ def create_complex_mul_net(self, input_shape): @pytest.mark.precommit_tf_fe @pytest.mark.nightly def test_complex_mul(self, params, ie_device, precision, ir_version, temp_dir, - use_new_frontend, use_old_api): + use_new_frontend): self._test( *self.create_complex_mul_net(**params), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) diff --git a/tests/layer_tests/tensorflow_tests/test_tf_MulNoNan.py b/tests/layer_tests/tensorflow_tests/test_tf_MulNoNan.py index 3472147215c991..58d05069b1273a 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_MulNoNan.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_MulNoNan.py @@ -41,7 +41,7 @@ def create_mul_no_nan_net(self, input_shape, input_type): @pytest.mark.precommit_tf_fe @pytest.mark.nightly def test_mul_no_nan_basic(self, params, ie_device, precision, ir_version, temp_dir, - use_new_frontend, use_old_api): + use_new_frontend): self._test(*self.create_mul_no_nan_net(**params), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) \ No newline at end of file + use_new_frontend=use_new_frontend) \ No newline at end of file diff --git a/tests/layer_tests/tensorflow_tests/test_tf_Multinomial.py b/tests/layer_tests/tensorflow_tests/test_tf_Multinomial.py index e83875d1726b9d..b3a8462e8a32e8 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_Multinomial.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_Multinomial.py @@ -121,7 +121,6 @@ def test_multinomial_basic( ir_version, temp_dir, use_new_frontend, - use_old_api, ): if ie_device == "GPU": pytest.skip("Multinomial is not supported on GPU") @@ -139,6 +138,5 @@ def test_multinomial_basic( temp_dir=temp_dir, ir_version=ir_version, use_new_frontend=use_new_frontend, - use_old_api=use_old_api, kwargs_to_prepare_input={"input": input, "num_samples": num_samples}, ) diff --git a/tests/layer_tests/tensorflow_tests/test_tf_NestedWhile.py b/tests/layer_tests/tensorflow_tests/test_tf_NestedWhile.py index 07da61a4681e7b..162970329bd3f4 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_NestedWhile.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_NestedWhile.py @@ -70,14 +70,12 @@ def body_supp(x_slice_arg, v_slice_arg, j_arg, b_combined_arg_arg): return g, None @pytest.mark.nightly - def test_simple_while(self, ie_device, precision, ir_version, temp_dir, use_new_frontend, - use_old_api): + def test_simple_while(self, ie_device, precision, ir_version, temp_dir, use_new_frontend): self._test(*self.create_simple_while(), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) @pytest.mark.precommit_tf_fe @pytest.mark.nightly - def test_nested_while(self, ie_device, precision, ir_version, temp_dir, use_new_frontend, - use_old_api): + def test_nested_while(self, ie_device, precision, ir_version, temp_dir, use_new_frontend): self._test(*self.create_nested_while(), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) diff --git a/tests/layer_tests/tensorflow_tests/test_tf_NonMaxSupression.py b/tests/layer_tests/tensorflow_tests/test_tf_NonMaxSupression.py index 32f6d19aea048c..72fbc401996169 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_NonMaxSupression.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_NonMaxSupression.py @@ -14,7 +14,7 @@ class TestNonMaxSuppression(CommonTFLayerTest): # overload inputs generation to suit NMS use case def _prepare_input(self, inputs_dict): - channel = ':0' if self.use_old_api or not self.use_new_frontend else '' + channel = ':0' if not self.use_new_frontend else '' input_data = {} for input in inputs_dict.keys(): input_data[input + channel] = np.random.uniform(low=0, high=1, @@ -88,23 +88,19 @@ def create_nms_net(self, test_params: dict, with_scores: bool = False): @pytest.mark.xfail(condition=platform.system() == 'Darwin' and platform.machine() == 'arm64', reason='Ticket - 122716') def test_NonMaxSuppression(self, test_params, ie_device, precision, ir_version, temp_dir, - use_new_frontend, use_old_api): + use_new_frontend): if ie_device == 'GPU': pytest.skip("Skip TF NonMaxSuppresion test on GPU") - self.use_old_api = use_old_api self._test(*self.create_nms_net(test_params), ie_device, precision, - ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend, - use_old_api=use_old_api) + ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend) @pytest.mark.parametrize("test_params", test_params) @pytest.mark.nightly @pytest.mark.precommit @pytest.mark.precommit_tf_fe def test_NonMaxSuppressionWithScores(self, test_params, ie_device, precision, ir_version, temp_dir, - use_new_frontend, use_old_api): + use_new_frontend): if ie_device == 'GPU': pytest.skip("Skip TF NonMaxSuppresionWithScores test on GPU") - self.use_old_api = use_old_api self._test(*self.create_nms_net(test_params, with_scores=True), ie_device, precision, - ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend, - use_old_api=use_old_api) + ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend) diff --git a/tests/layer_tests/tensorflow_tests/test_tf_NormalizeL2.py b/tests/layer_tests/tensorflow_tests/test_tf_NormalizeL2.py index cfd06c7d3f32fe..36aafb911db83d 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_NormalizeL2.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_NormalizeL2.py @@ -37,10 +37,10 @@ def create_normalize_l2_net(shape, axes): 'arm64', 'ARM64'), reason='Ticket - 126314, 122716') def test_normalize_l2_basic(self, params, ie_device, precision, ir_version, temp_dir, - use_new_frontend, use_old_api): + use_new_frontend): self._test(*self.create_normalize_l2_net(**params), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) test_data_complex = [ dict(shape=[2, 3, 5, 4], axes=[1, 2, 3]), @@ -50,7 +50,7 @@ def test_normalize_l2_basic(self, params, ie_device, precision, ir_version, temp @pytest.mark.parametrize("params", test_data_complex) @pytest.mark.nightly def test_normalize_l2_complex(self, params, ie_device, precision, ir_version, temp_dir, - use_new_frontend, use_old_api): + use_new_frontend): self._test(*self.create_normalize_l2_net(**params), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) diff --git a/tests/layer_tests/tensorflow_tests/test_tf_OneHot.py b/tests/layer_tests/tensorflow_tests/test_tf_OneHot.py index 5986fa1ad1b90b..58ce30e4c0a23c 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_OneHot.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_OneHot.py @@ -32,11 +32,10 @@ def create_one_hot_net(shape, depth, on_value, off_value, axis): @pytest.mark.parametrize("params", test_data_basic) @pytest.mark.precommit_tf_fe @pytest.mark.nightly - def test_one_hot_basic(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend, - use_old_api): + def test_one_hot_basic(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend): self._test(*self.create_one_hot_net(**params), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) test_data_complex = [ dict(shape=[3, 4], depth=1, on_value=1.0, off_value=-5.0, axis=-2), @@ -45,8 +44,7 @@ def test_one_hot_basic(self, params, ie_device, precision, ir_version, temp_dir, @pytest.mark.parametrize("params", test_data_basic) @pytest.mark.nightly - def test_one_hot_complex(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend, - use_old_api): + def test_one_hot_complex(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend): self._test(*self.create_one_hot_net(**params), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) diff --git a/tests/layer_tests/tensorflow_tests/test_tf_OnesLike.py b/tests/layer_tests/tensorflow_tests/test_tf_OnesLike.py index 71b146edc9c335..8add09b6fc1d51 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_OnesLike.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_OnesLike.py @@ -39,7 +39,7 @@ def create_ones_like_net(self, x_shape, x_type): @pytest.mark.precommit_tf_fe @pytest.mark.nightly def test_ones_like(self, params, ie_device, precision, ir_version, temp_dir, - use_new_frontend, use_old_api): + use_new_frontend): self._test(*self.create_ones_like_net(**params), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) diff --git a/tests/layer_tests/tensorflow_tests/test_tf_Pack.py b/tests/layer_tests/tensorflow_tests/test_tf_Pack.py index cfee54616fcb3b..1e4754a362d22b 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_Pack.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_Pack.py @@ -47,7 +47,7 @@ def create_pack_net(self, input_shape, input_num, axis, input_type): @pytest.mark.precommit_tf_fe @pytest.mark.nightly def test_pack_basic(self, params, ie_device, precision, ir_version, temp_dir, - use_new_frontend, use_old_api): + use_new_frontend): self._test(*self.create_pack_net(**params), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) diff --git a/tests/layer_tests/tensorflow_tests/test_tf_Pad.py b/tests/layer_tests/tensorflow_tests/test_tf_Pad.py index 27899f9f5069c4..ed5cb68bcaa799 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_Pad.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_Pad.py @@ -39,11 +39,10 @@ def create_pad_net(self, input_shape, pads_values, const_value, pad_mode, pad_op @pytest.mark.parametrize("params", test_data_basic) @pytest.mark.precommit_tf_fe @pytest.mark.nightly - def test_pad_basic(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend, - use_old_api): + def test_pad_basic(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend): self._test(*self.create_pad_net(**params), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) class TestComplexPad(CommonTFLayerTest): @@ -84,8 +83,7 @@ def create_pad_complex_net(self, input_shape, pads_values): @pytest.mark.parametrize("params", test_data_basic) @pytest.mark.precommit_tf_fe @pytest.mark.nightly - def test_pad_complex(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend, - use_old_api): + def test_pad_complex(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend): self._test(*self.create_pad_complex_net(**params), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) diff --git a/tests/layer_tests/tensorflow_tests/test_tf_ParallelDynamicStitch.py b/tests/layer_tests/tensorflow_tests/test_tf_ParallelDynamicStitch.py index 0d8555411f7727..7ed991e0cab6e2 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_ParallelDynamicStitch.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_ParallelDynamicStitch.py @@ -71,12 +71,12 @@ def create_parallel_dynamic_stitch_net(self, data_input_cnt, shape_of_element, d @pytest.mark.precommit_tf_fe @pytest.mark.nightly def test_parallel_dynamic_stitch_basic(self, params, ie_device, precision, ir_version, temp_dir, - use_new_frontend, use_old_api): + use_new_frontend): if not use_new_frontend: pytest.skip("DynamicStitch operation is not supported via legacy frontend.") self._test(*self.create_parallel_dynamic_stitch_net(**params), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) test_data_different_types = [ dict(data_input_cnt=4, shape_of_element=[3, 2], data_type=tf.float64), @@ -87,9 +87,9 @@ def test_parallel_dynamic_stitch_basic(self, params, ie_device, precision, ir_ve @pytest.mark.parametrize("params", test_data_different_types) @pytest.mark.nightly def test_parallel_dynamic_stitch_different_types(self, params, ie_device, precision, ir_version, temp_dir, - use_new_frontend, use_old_api): + use_new_frontend): if not use_new_frontend: pytest.skip("DynamicStitch operation is not supported via legacy frontend.") self._test(*self.create_parallel_dynamic_stitch_net(**params), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) diff --git a/tests/layer_tests/tensorflow_tests/test_tf_Placeholder.py b/tests/layer_tests/tensorflow_tests/test_tf_Placeholder.py index a2be815ff092d3..f65206707dd923 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_Placeholder.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_Placeholder.py @@ -49,7 +49,7 @@ def create_placeholder_net(self, input_shape, input_type): @pytest.mark.precommit_tf_fe @pytest.mark.nightly def test_placeholder(self, input_shape, input_type, ie_device, precision, ir_version, temp_dir, - use_new_frontend, use_old_api): + use_new_frontend): self._test(*self.create_placeholder_net(input_shape, input_type), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) diff --git a/tests/layer_tests/tensorflow_tests/test_tf_Pooling.py b/tests/layer_tests/tensorflow_tests/test_tf_Pooling.py index 7c523740d79f96..3c18a0dc1a96c4 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_Pooling.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_Pooling.py @@ -149,12 +149,11 @@ def create_pooling_net(self, kernel_size, strides, pads, in_shape, out_shape, me @pytest.mark.nightly @pytest.mark.xfail(condition=platform.system() == 'Darwin' and platform.machine() == 'arm64', reason='Ticket - 122716') - def test_pool_4D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend, - use_old_api): + def test_pool_4D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend): self._test(*self.create_pooling_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) test_data_5D = [] for method in ['max', 'avg']: @@ -233,11 +232,10 @@ def test_pool_4D(self, params, ie_device, precision, ir_version, temp_dir, use_n @pytest.mark.nightly @pytest.mark.xfail(condition=platform.system() == 'Darwin' and platform.machine() == 'arm64', reason='Ticket - 122716') - def test_pool_5D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend, - use_old_api): + def test_pool_5D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend): if ie_device == 'GPU': pytest.skip("5D tensors is not supported on GPU") self._test(*self.create_pooling_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) diff --git a/tests/layer_tests/tensorflow_tests/test_tf_RandomUniform.py b/tests/layer_tests/tensorflow_tests/test_tf_RandomUniform.py index 1a977145e82f4c..8b6fd00ace3add 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_RandomUniform.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_RandomUniform.py @@ -93,14 +93,14 @@ def create_tf_random_uniform_net(self, global_seed, op_seed, x_shape, min_val, m @pytest.mark.xfail(platform.machine() in ["aarch64", "arm64", "ARM64"], reason='Ticket - 122716') def test_random_uniform_basic(self, params, ie_device, precision, ir_version, temp_dir, - use_new_frontend, use_old_api): + use_new_frontend): if ie_device == 'GPU': pytest.skip("RandomUniform is not supported on GPU") self._test( *self.create_tf_random_uniform_net(**params, precision=precision, ir_version=ir_version, use_new_frontend=use_new_frontend), ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, use_new_frontend=use_new_frontend, - use_old_api=use_old_api, **params) + **params) test_data_other = [ dict(global_seed=None, op_seed=56197, min_val=-100, max_val=100, x_shape=[6], @@ -115,11 +115,11 @@ def test_random_uniform_basic(self, params, ie_device, precision, ir_version, te @pytest.mark.parametrize("params", test_data_other) @pytest.mark.nightly def test_random_uniform_other(self, params, ie_device, precision, ir_version, temp_dir, - use_new_frontend, use_old_api): + use_new_frontend): if ie_device == 'GPU': pytest.skip("RandomUniform is not supported on GPU") self._test( *self.create_tf_random_uniform_net(**params, precision=precision, ir_version=ir_version, use_new_frontend=use_new_frontend), ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, use_new_frontend=use_new_frontend, - use_old_api=use_old_api, **params) + **params) diff --git a/tests/layer_tests/tensorflow_tests/test_tf_Range.py b/tests/layer_tests/tensorflow_tests/test_tf_Range.py index b1d6d72a3c287f..aba0e64b3cd266 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_Range.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_Range.py @@ -52,7 +52,7 @@ def create_range_net(self, input_type, negative_delta): @pytest.mark.precommit_tf_fe @pytest.mark.nightly def test_range_basic(self, params, ie_device, precision, ir_version, temp_dir, - use_new_frontend, use_old_api): + use_new_frontend): self._test(*self.create_range_net(**params), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) diff --git a/tests/layer_tests/tensorflow_tests/test_tf_Rank.py b/tests/layer_tests/tensorflow_tests/test_tf_Rank.py index 7c78ed8b3d8033..dfd4894eee932a 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_Rank.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_Rank.py @@ -29,7 +29,7 @@ def create_rank_net(self, input_shape, input_type): @pytest.mark.precommit_tf_fe @pytest.mark.nightly def test_rank_basic(self, params, ie_device, precision, ir_version, temp_dir, - use_new_frontend, use_old_api): + use_new_frontend): self._test(*self.create_rank_net(**params), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) diff --git a/tests/layer_tests/tensorflow_tests/test_tf_ReLU6.py b/tests/layer_tests/tensorflow_tests/test_tf_ReLU6.py index a5bd07c912ab70..9f6746862eeeaa 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_ReLU6.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_ReLU6.py @@ -66,11 +66,11 @@ def create_relu6_net(self, shape, ir_version, use_new_frontend): @pytest.mark.precommit @pytest.mark.nightly def test_relu6_precommit(self, params, ie_device, precision, ir_version, temp_dir, - use_new_frontend, use_old_api): + use_new_frontend): self._test(*self.create_relu6_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) test_data = [dict(shape=[1]), pytest.param(dict(shape=[1, 224]), marks=pytest.mark.precommit_tf_fe), @@ -80,9 +80,8 @@ def test_relu6_precommit(self, params, ie_device, precision, ir_version, temp_di @pytest.mark.parametrize("params", test_data) @pytest.mark.nightly - def test_relu6(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend, - use_old_api): + def test_relu6(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend): self._test(*self.create_relu6_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) diff --git a/tests/layer_tests/tensorflow_tests/test_tf_Reciprocal.py b/tests/layer_tests/tensorflow_tests/test_tf_Reciprocal.py index 3f99c6a2f4d3a3..39ce7c8f2b9fc6 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_Reciprocal.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_Reciprocal.py @@ -37,7 +37,7 @@ def create_reciprocal_net(self, x_shape, x_type): @pytest.mark.precommit_tf_fe @pytest.mark.nightly def test_reciprocal_basic(self, params, ie_device, precision, ir_version, temp_dir, - use_new_frontend, use_old_api): + use_new_frontend): self._test(*self.create_reciprocal_net(**params), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) diff --git a/tests/layer_tests/tensorflow_tests/test_tf_ReduceArithmeticOps.py b/tests/layer_tests/tensorflow_tests/test_tf_ReduceArithmeticOps.py index e9ef1805ff3c22..ef6c1c0ebe579b 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_ReduceArithmeticOps.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_ReduceArithmeticOps.py @@ -45,8 +45,8 @@ def create_reduce_net(self, shape, axis, operation, keep_dims, ir_version, use_n @pytest.mark.nightly @pytest.mark.precommit_tf_fe def test_reduce(self, params, operation, keep_dims, ie_device, precision, ir_version, temp_dir, - use_new_frontend, use_old_api): + use_new_frontend): self._test(*self.create_reduce_net(**params, operation=operation, keep_dims=keep_dims, ir_version=ir_version, use_new_frontend=use_new_frontend), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) diff --git a/tests/layer_tests/tensorflow_tests/test_tf_ReduceLogicalOps.py b/tests/layer_tests/tensorflow_tests/test_tf_ReduceLogicalOps.py index 2f4aad4f728260..97872aae976cf0 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_ReduceLogicalOps.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_ReduceLogicalOps.py @@ -65,8 +65,8 @@ def create_logical_ops_placeholder_const_net(self, input_shape, axis, op_type, i @pytest.mark.precommit @pytest.mark.nightly def test_logical_ops_placeholder_const(self, params, op_type, ie_device, precision, ir_version, temp_dir, - use_new_frontend, use_old_api): + use_new_frontend): self._test(*self.create_logical_ops_placeholder_const_net(**params, op_type=op_type, ir_version=ir_version, use_new_frontend=use_new_frontend), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) diff --git a/tests/layer_tests/tensorflow_tests/test_tf_Resample_pattern_new.py b/tests/layer_tests/tensorflow_tests/test_tf_Resample_pattern_new.py index c2d948041c5ec2..a05d234afa241f 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_Resample_pattern_new.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_Resample_pattern_new.py @@ -73,8 +73,7 @@ def create_resample_net(self, shape, factor, use_new_frontend): # TODO mark as precommit (after successfully passing in nightly) @pytest.mark.parametrize("params", test_data) @pytest.mark.nightly - def test_resample(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend, - use_old_api): + def test_resample(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend): self._test(*self.create_resample_net(params['shape'], params['factor'], use_new_frontend), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) diff --git a/tests/layer_tests/tensorflow_tests/test_tf_Reshape.py b/tests/layer_tests/tensorflow_tests/test_tf_Reshape.py index 2d03bd7fa13300..9226e8918c56e5 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_Reshape.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_Reshape.py @@ -45,10 +45,10 @@ def create_reshape_net(self, input_shape, input_type, target_shape): @pytest.mark.precommit_tf_fe @pytest.mark.nightly def test_reshape_basic(self, params, ie_device, precision, ir_version, temp_dir, - use_new_frontend, use_old_api): + use_new_frontend): self._test(*self.create_reshape_net(**params), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) class TestComplexReshape(CommonTFLayerTest): def _prepare_input(self, inputs_info): @@ -87,9 +87,9 @@ def create_complex_transpose_net(self, input_shape, target_shape): @pytest.mark.precommit_tf_fe @pytest.mark.nightly def test_complex_reshape(self, params, ie_device, precision, ir_version, temp_dir, - use_new_frontend, use_old_api): + use_new_frontend): self._test( *self.create_complex_transpose_net(**params), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) diff --git a/tests/layer_tests/tensorflow_tests/test_tf_Resize.py b/tests/layer_tests/tensorflow_tests/test_tf_Resize.py index c62492c7a76196..8146226129db62 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_Resize.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_Resize.py @@ -65,7 +65,7 @@ def create_resize_net(self, images_shape, images_type, size_value, align_corners @pytest.mark.xfail(condition=platform.system() == 'Darwin' and platform.machine() == 'arm64', reason='Ticket - 122716') def test_resize_basic(self, params, ie_device, precision, ir_version, temp_dir, - use_new_frontend, use_old_api): + use_new_frontend): self._test(*self.create_resize_net(**params), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) diff --git a/tests/layer_tests/tensorflow_tests/test_tf_Reverse.py b/tests/layer_tests/tensorflow_tests/test_tf_Reverse.py index 523a1ef5832295..43d4aeabeb0b76 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_Reverse.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_Reverse.py @@ -27,6 +27,6 @@ def create_reverse_net(self, shape, dims): @pytest.mark.parametrize("params", test_data_basic) @pytest.mark.precommit_tf_fe @pytest.mark.nightly - def test_reverse_basic(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_reverse_basic(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_reverse_net(**params), - ie_device, precision, ir_version, temp_dir=temp_dir, use_old_api=use_old_api) + ie_device, precision, ir_version, temp_dir=temp_dir) diff --git a/tests/layer_tests/tensorflow_tests/test_tf_ReverseSequence.py b/tests/layer_tests/tensorflow_tests/test_tf_ReverseSequence.py index 110025907d6423..394979ea5984ac 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_ReverseSequence.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_ReverseSequence.py @@ -47,7 +47,7 @@ def create_reverse_sequence_net(self, input_shape, input_type, seq_lengths_type, @pytest.mark.precommit_tf_fe @pytest.mark.nightly def test_reverse_sequence_basic(self, params, ie_device, precision, ir_version, temp_dir, - use_new_frontend, use_old_api): + use_new_frontend): self._test(*self.create_reverse_sequence_net(**params), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) diff --git a/tests/layer_tests/tensorflow_tests/test_tf_ReverseV2.py b/tests/layer_tests/tensorflow_tests/test_tf_ReverseV2.py index a6e2649913ad3d..e839589e35989e 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_ReverseV2.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_ReverseV2.py @@ -28,6 +28,6 @@ def create_reverse_v2_net(self, shape, axis): @pytest.mark.parametrize("params", test_data_basic) @pytest.mark.nightly @pytest.mark.precommit_tf_fe - def test_reverse_v2_basic(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): + def test_reverse_v2_basic(self, params, ie_device, precision, ir_version, temp_dir): self._test(*self.create_reverse_v2_net(**params), - ie_device, precision, ir_version, temp_dir=temp_dir, use_old_api=use_old_api) + ie_device, precision, ir_version, temp_dir=temp_dir) diff --git a/tests/layer_tests/tensorflow_tests/test_tf_Roll.py b/tests/layer_tests/tensorflow_tests/test_tf_Roll.py index 07e75628be953c..15a295b9b68a70 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_Roll.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_Roll.py @@ -41,12 +41,11 @@ def create_tf_roll_net(self, shift, axis, x_shape, input_type, ir_version, use_n @pytest.mark.parametrize("params", test_data) @pytest.mark.nightly - def test_tf_roll(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend, - use_old_api): + def test_tf_roll(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend): if ie_device == 'GPU': pytest.skip("Roll is not supported on GPU") self._test(*self.create_tf_roll_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend), ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, use_new_frontend=use_new_frontend, - use_old_api=use_old_api, **params) + **params) diff --git a/tests/layer_tests/tensorflow_tests/test_tf_Rsqrt.py b/tests/layer_tests/tensorflow_tests/test_tf_Rsqrt.py index 495022eee3f0b8..0de392cfb76fbc 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_Rsqrt.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_Rsqrt.py @@ -54,11 +54,11 @@ def create_rsqrt_net(self, shape, ir_version, use_new_frontend): @pytest.mark.precommit @pytest.mark.nightly def test_rsqrt_precommit(self, params, ie_device, precision, ir_version, temp_dir, - use_new_frontend, use_old_api): + use_new_frontend): self._test(*self.create_rsqrt_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) test_data = [dict(shape=[1]), pytest.param(dict(shape=[1, 224]), marks=pytest.mark.precommit_tf_fe), @@ -68,9 +68,8 @@ def test_rsqrt_precommit(self, params, ie_device, precision, ir_version, temp_di @pytest.mark.parametrize("params", test_data) @pytest.mark.nightly - def test_rsqrt(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend, - use_old_api): + def test_rsqrt(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend): self._test(*self.create_rsqrt_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) diff --git a/tests/layer_tests/tensorflow_tests/test_tf_ScatterND.py b/tests/layer_tests/tensorflow_tests/test_tf_ScatterND.py index 26ddcfdd53bcc2..a6131a34e44918 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_ScatterND.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_ScatterND.py @@ -74,8 +74,8 @@ def create_tf_scatternd_placeholder_const_net(self, x_shape, indices, updates, i @pytest.mark.xfail(condition=platform.system() == 'Darwin' and platform.machine() == 'arm64', reason='Ticket - 122716') def test_tf_scatter_nd(self, params, ie_device, precision, ir_version, temp_dir, - use_new_frontend, use_old_api): + use_new_frontend): self._test(*self.create_tf_scatternd_placeholder_const_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend), ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, - use_new_frontend=use_new_frontend, use_old_api=use_old_api, **params) + use_new_frontend=use_new_frontend, **params) diff --git a/tests/layer_tests/tensorflow_tests/test_tf_SegmentSum.py b/tests/layer_tests/tensorflow_tests/test_tf_SegmentSum.py index f0f99d4b9cf95f..07653fabedbb2c 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_SegmentSum.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_SegmentSum.py @@ -46,12 +46,12 @@ def create_segment_sum_net(self, data_shape, segment_ids_shape, data_type, segme @pytest.mark.xfail(condition=platform.system() == 'Darwin' and platform.machine() == 'arm64', reason='Ticket - 122716') def test_segment_sum_basic(self, params, ie_device, precision, ir_version, temp_dir, - use_new_frontend, use_old_api): + use_new_frontend): if not use_new_frontend: pytest.skip("SegmentSum operation is not supported via legacy frontend.") self._test(*self.create_segment_sum_net(**params), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) test_data_different_types = [ dict(data_shape=[2, 3], segment_ids_shape=[2], data_type=tf.int32, segment_ids_type=tf.int32), @@ -63,9 +63,9 @@ def test_segment_sum_basic(self, params, ie_device, precision, ir_version, temp_ @pytest.mark.parametrize("params", test_data_different_types) @pytest.mark.nightly def test_segment_sum_different_types(self, params, ie_device, precision, ir_version, temp_dir, - use_new_frontend, use_old_api): + use_new_frontend): if not use_new_frontend: pytest.skip("SegmentSum operation is not supported via legacy frontend.") self._test(*self.create_segment_sum_net(**params), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) diff --git a/tests/layer_tests/tensorflow_tests/test_tf_Select.py b/tests/layer_tests/tensorflow_tests/test_tf_Select.py index cb1e7241e8276f..2e23ecb1b9f649 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_Select.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_Select.py @@ -46,9 +46,9 @@ def create_select_net(self, cond_shape, x_shape, y_shape): @pytest.mark.precommit_tf_fe @pytest.mark.nightly def test_select_basic(self, params, ie_device, precision, ir_version, temp_dir, - use_new_frontend, use_old_api): + use_new_frontend): if not use_new_frontend: pytest.skip("Select tests are not passing for the legacy frontend.") self._test(*self.create_select_net(**params), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) diff --git a/tests/layer_tests/tensorflow_tests/test_tf_SelectV2.py b/tests/layer_tests/tensorflow_tests/test_tf_SelectV2.py index 21668cf70240fe..03afbcf24843b0 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_SelectV2.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_SelectV2.py @@ -45,9 +45,9 @@ def create_select_v2_net(self, cond_shape, x_shape, y_shape): @pytest.mark.precommit_tf_fe @pytest.mark.nightly def test_select_v2_basic(self, params, ie_device, precision, ir_version, temp_dir, - use_new_frontend, use_old_api): + use_new_frontend): if not use_new_frontend: pytest.skip("Select tests are not passing for the legacy frontend.") self._test(*self.create_select_v2_net(**params), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) diff --git a/tests/layer_tests/tensorflow_tests/test_tf_Shape.py b/tests/layer_tests/tensorflow_tests/test_tf_Shape.py index 0a4694cc1dd945..1db8d398405cb1 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_Shape.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_Shape.py @@ -51,12 +51,12 @@ def create_shape_net(self, input_shape, input_type, out_type): @pytest.mark.precommit_tf_fe @pytest.mark.nightly def test_shape_basic(self, input_shape, input_type, out_type, ie_device, precision, ir_version, temp_dir, - use_new_frontend, use_old_api): + use_new_frontend): if input_shape == [] and out_type == tf.int64: pytest.skip('129100 - Hangs or segfault') self._test(*self.create_shape_net(input_shape=input_shape, input_type=input_type, out_type=out_type), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) class TestComplexShape(CommonTFLayerTest): @@ -96,8 +96,8 @@ def create_complex_shape_net(self, input_shape): @pytest.mark.precommit_tf_fe @pytest.mark.nightly def test_complex_shape(self, params, ie_device, precision, ir_version, temp_dir, - use_new_frontend, use_old_api): + use_new_frontend): self._test( *self.create_complex_shape_net(**params), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) diff --git a/tests/layer_tests/tensorflow_tests/test_tf_ShapeN.py b/tests/layer_tests/tensorflow_tests/test_tf_ShapeN.py index dd358945ea2103..99fb097465dee3 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_ShapeN.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_ShapeN.py @@ -32,7 +32,7 @@ def create_shape_n_net(self, input_shapes, out_type): @pytest.mark.precommit_tf_fe @pytest.mark.nightly def test_shape_n_basic(self, params, ie_device, precision, ir_version, temp_dir, - use_new_frontend, use_old_api): + use_new_frontend): self._test(*self.create_shape_n_net(**params), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) diff --git a/tests/layer_tests/tensorflow_tests/test_tf_Size.py b/tests/layer_tests/tensorflow_tests/test_tf_Size.py index 2eef22bdc75e74..4917b710e6a131 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_Size.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_Size.py @@ -39,7 +39,7 @@ def create_size_net(self, input_shape, input_type, out_type): @pytest.mark.precommit_tf_fe @pytest.mark.nightly def test_size_basic(self, params, ie_device, precision, ir_version, temp_dir, - use_new_frontend, use_old_api): + use_new_frontend): self._test(*self.create_size_net(**params), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) diff --git a/tests/layer_tests/tensorflow_tests/test_tf_Slice.py b/tests/layer_tests/tensorflow_tests/test_tf_Slice.py index 902c9d1117ba65..b826d1ee3e8b56 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_Slice.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_Slice.py @@ -30,8 +30,7 @@ def create_slice_net(self, input_shape, input_type, begin_value, size_value): @pytest.mark.parametrize("params", test_data_basic) @pytest.mark.precommit_tf_fe @pytest.mark.nightly - def test_slice_basic(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend, - use_old_api): + def test_slice_basic(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend): self._test(*self.create_slice_net(**params), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) diff --git a/tests/layer_tests/tensorflow_tests/test_tf_Softmax.py b/tests/layer_tests/tensorflow_tests/test_tf_Softmax.py index 574fe3d32949f7..1db741b920add4 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_Softmax.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_Softmax.py @@ -39,7 +39,7 @@ def create_softmax_net(self, input_shape): @pytest.mark.xfail(condition=platform.system() == 'Darwin' and platform.machine() == 'arm64', reason='Ticket - 122716') def test_softmax_basic(self, params, ie_device, precision, ir_version, temp_dir, - use_new_frontend, use_old_api): + use_new_frontend): self._test(*self.create_softmax_net(**params), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) diff --git a/tests/layer_tests/tensorflow_tests/test_tf_Softsign.py b/tests/layer_tests/tensorflow_tests/test_tf_Softsign.py index af9c679c2638a9..ab93fb51a3c633 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_Softsign.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_Softsign.py @@ -71,8 +71,8 @@ def create_softsign_net(self, shape, ir_version, use_new_frontend): @pytest.mark.precommit @pytest.mark.nightly def test_softsign(self, params, ie_device, precision, ir_version, temp_dir, - use_new_frontend, use_old_api): + use_new_frontend): self._test(*self.create_softsign_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) diff --git a/tests/layer_tests/tensorflow_tests/test_tf_SpaceToBatch.py b/tests/layer_tests/tensorflow_tests/test_tf_SpaceToBatch.py index 03e83dc39e9c8d..345aa1b8f891cb 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_SpaceToBatch.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_SpaceToBatch.py @@ -38,10 +38,10 @@ def create_space_to_batch_net(self, in_shape, pads_value, block_shape_value): @pytest.mark.xfail(condition=platform.system() == 'Darwin' and platform.machine() == 'arm64', reason='Ticket - 122716') def test_space_to_batch_basic(self, params, ie_device, precision, ir_version, temp_dir, - use_new_frontend, use_old_api): + use_new_frontend): self._test(*self.create_space_to_batch_net(**params), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) test_data_4D = [ dict(in_shape=[1, 2, 2, 3], block_shape_value=[2, 2], pads_value=[[0, 0], [0, 0]]), @@ -51,10 +51,10 @@ def test_space_to_batch_basic(self, params, ie_device, precision, ir_version, te @pytest.mark.parametrize("params", test_data_4D) @pytest.mark.nightly def test_space_to_batch_4D(self, params, ie_device, precision, ir_version, temp_dir, - use_new_frontend, use_old_api): + use_new_frontend): self._test(*self.create_space_to_batch_net(**params), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) test_data_5D = [ dict(in_shape=[3, 3, 4, 5, 2], block_shape_value=[3, 4, 2], @@ -66,7 +66,7 @@ def test_space_to_batch_4D(self, params, ie_device, precision, ir_version, temp_ @pytest.mark.parametrize("params", test_data_5D) @pytest.mark.nightly def test_space_to_batch_5D(self, params, ie_device, precision, ir_version, temp_dir, - use_new_frontend, use_old_api): + use_new_frontend): self._test(*self.create_space_to_batch_net(**params), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) diff --git a/tests/layer_tests/tensorflow_tests/test_tf_SpaceToBatchND.py b/tests/layer_tests/tensorflow_tests/test_tf_SpaceToBatchND.py index 58735fa8fb9fa9..b9dcc77ce115cb 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_SpaceToBatchND.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_SpaceToBatchND.py @@ -29,7 +29,7 @@ def create_space_to_batch_nd_net(self, input_shape, block_shape, paddings): @pytest.mark.precommit_tf_fe @pytest.mark.nightly def test_space_to_batch_nd_basic(self, params, ie_device, precision, ir_version, temp_dir, - use_new_frontend, use_old_api): + use_new_frontend): self._test(*self.create_space_to_batch_nd_net(**params), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) diff --git a/tests/layer_tests/tensorflow_tests/test_tf_SpaceToDepth.py b/tests/layer_tests/tensorflow_tests/test_tf_SpaceToDepth.py index 08df597e327194..1fff68a36596e7 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_SpaceToDepth.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_SpaceToDepth.py @@ -29,7 +29,7 @@ def create_space_to_depth_net(self, input_shape, block_size, data_format): @pytest.mark.precommit_tf_fe @pytest.mark.nightly def test_space_to_depth_basic(self, params, ie_device, precision, ir_version, temp_dir, - use_new_frontend, use_old_api): + use_new_frontend): self._test(*self.create_space_to_depth_net(**params), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) diff --git a/tests/layer_tests/tensorflow_tests/test_tf_Split.py b/tests/layer_tests/tensorflow_tests/test_tf_Split.py index c221bb896c3833..3a9934d4c9bc8b 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_Split.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_Split.py @@ -31,7 +31,7 @@ def create_split_net(self, value_shape, axis_value, num_split): @pytest.mark.precommit_tf_fe @pytest.mark.nightly def test_split_basic(self, params, ie_device, precision, ir_version, temp_dir, - use_new_frontend, use_old_api): + use_new_frontend): self._test(*self.create_split_net(**params), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) diff --git a/tests/layer_tests/tensorflow_tests/test_tf_SplitV.py b/tests/layer_tests/tensorflow_tests/test_tf_SplitV.py index d410d304c3066b..a79ceefaec5d5b 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_SplitV.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_SplitV.py @@ -36,7 +36,7 @@ def create_splitv_net(self, value_shape, size_splits_values, axis_value): @pytest.mark.nightly @pytest.mark.skipif(os.getenv("GITHUB_ACTIONS") == 'true', reason="Ticket - 113359") def test_split_basic(self, params, ie_device, precision, ir_version, temp_dir, - use_new_frontend, use_old_api): + use_new_frontend): self._test(*self.create_splitv_net(**params), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) diff --git a/tests/layer_tests/tensorflow_tests/test_tf_Squeeze.py b/tests/layer_tests/tensorflow_tests/test_tf_Squeeze.py index e8e05f24ea23b0..8a3d76b2afae7b 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_Squeeze.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_Squeeze.py @@ -40,10 +40,10 @@ def create_squeeze_net(self, input_shape, axis, input_type=np.float32): @pytest.mark.precommit_tf_fe @pytest.mark.nightly def test_squeeze_basic(self, params, ie_device, precision, ir_version, temp_dir, - use_new_frontend, use_old_api): + use_new_frontend): self._test(*self.create_squeeze_net(**params), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) test_data_1D = [ dict(input_shape=[1], axis=[], input_type=np.float32), @@ -52,11 +52,10 @@ def test_squeeze_basic(self, params, ie_device, precision, ir_version, temp_dir, @pytest.mark.parametrize("params", test_data_1D) @pytest.mark.nightly - def test_squeeze_1D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend, - use_old_api): + def test_squeeze_1D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend): self._test(*self.create_squeeze_net(**params), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) test_data_2D = [ dict(input_shape=[1, 2], axis=[0], input_type=np.float32), @@ -65,11 +64,10 @@ def test_squeeze_1D(self, params, ie_device, precision, ir_version, temp_dir, us @pytest.mark.parametrize("params", test_data_2D) @pytest.mark.nightly - def test_squeeze_2D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend, - use_old_api): + def test_squeeze_2D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend): self._test(*self.create_squeeze_net(**params), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) test_data_3D = [ dict(input_shape=[2, 1, 3], axis=[], input_type=np.float32), @@ -78,11 +76,10 @@ def test_squeeze_2D(self, params, ie_device, precision, ir_version, temp_dir, us @pytest.mark.parametrize("params", test_data_3D) @pytest.mark.nightly - def test_squeeze_3D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend, - use_old_api): + def test_squeeze_3D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend): self._test(*self.create_squeeze_net(**params), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) test_data_4D = [ dict(input_shape=[1, 1, 5, 10], axis=[], input_type=np.int32), @@ -92,11 +89,10 @@ def test_squeeze_3D(self, params, ie_device, precision, ir_version, temp_dir, us @pytest.mark.parametrize("params", test_data_4D) @pytest.mark.nightly - def test_squeeze_4D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend, - use_old_api): + def test_squeeze_4D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend): self._test(*self.create_squeeze_net(**params), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) test_data_5D = [ dict(input_shape=[1, 1, 5, 10, 22], axis=[], input_type=np.float32), @@ -109,11 +105,10 @@ def test_squeeze_4D(self, params, ie_device, precision, ir_version, temp_dir, us @pytest.mark.parametrize("params", test_data_5D) @pytest.mark.nightly - def test_squeeze_5D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend, - use_old_api): + def test_squeeze_5D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend): self._test(*self.create_squeeze_net(**params), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) class TestComplexSqueeze(CommonTFLayerTest): @@ -155,8 +150,8 @@ def create_complex_squeeze_net(self, input_shape, axis): @pytest.mark.precommit_tf_fe @pytest.mark.nightly def test_complex_squeeze(self, params, ie_device, precision, ir_version, temp_dir, - use_new_frontend, use_old_api): + use_new_frontend): self._test( *self.create_complex_squeeze_net(**params), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) diff --git a/tests/layer_tests/tensorflow_tests/test_tf_StridedSlice.py b/tests/layer_tests/tensorflow_tests/test_tf_StridedSlice.py index 49dd55ae34ad23..5527a7f19a6b3b 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_StridedSlice.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_StridedSlice.py @@ -45,10 +45,10 @@ def create_strided_slice_net(self, input_shape, begin_value, end_value, strides_ @pytest.mark.precommit_tf_fe @pytest.mark.nightly def test_strided_slice_basic(self, params, ie_device, precision, ir_version, - temp_dir, use_new_frontend, use_old_api): + temp_dir, use_new_frontend): self._test(*self.create_strided_slice_net(**params), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) test_squeeze_data = [ dict(input_shape=[1, 5], begin_value=[0, 0], end_value=[1, 5], strides_value=[1, 1], begin_mask=0, @@ -84,10 +84,10 @@ def test_strided_slice_basic(self, params, ie_device, precision, ir_version, @pytest.mark.parametrize('params', test_squeeze_data) @pytest.mark.nightly def test_strided_slice_replace_with_squeeze(self, params, ie_device, precision, ir_version, - temp_dir, use_new_frontend, use_old_api): + temp_dir, use_new_frontend): self._test(*self.create_strided_slice_net(**params), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) test_unsqueeze_data = [ dict(input_shape=[1, 5], begin_value=[0, 0], end_value=[1, 5], strides_value=[1, 1], begin_mask=0, @@ -114,10 +114,10 @@ def test_strided_slice_replace_with_squeeze(self, params, ie_device, precision, @pytest.mark.parametrize('params', test_unsqueeze_data) @pytest.mark.nightly def test_strided_slice_replace_with_unsqueeze(self, params, ie_device, precision, ir_version, - temp_dir, use_new_frontend, use_old_api): + temp_dir, use_new_frontend): self._test(*self.create_strided_slice_net(**params), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) class TestComplexStridedSlice(CommonTFLayerTest): def _prepare_input(self, inputs_info): @@ -222,8 +222,8 @@ def create_complex_strided_slice_net(self, input_shape, begin_value, end_value, @pytest.mark.precommit_tf_fe @pytest.mark.nightly def test_complex_strided_slice(self, params, ie_device, precision, ir_version, temp_dir, - use_new_frontend, use_old_api): + use_new_frontend): self._test( *self.create_complex_strided_slice_net(**params), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) diff --git a/tests/layer_tests/tensorflow_tests/test_tf_Sub.py b/tests/layer_tests/tensorflow_tests/test_tf_Sub.py index db7645c37394b7..b14d375416fcf0 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_Sub.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_Sub.py @@ -64,11 +64,11 @@ def create_sub_placeholder_const_net(self, x_shape, y_shape, ir_version, use_new @pytest.mark.parametrize("params", test_data_1D) @pytest.mark.nightly def test_sub_placeholder_const_1D(self, params, ie_device, precision, ir_version, temp_dir, - use_new_frontend, use_old_api): + use_new_frontend): self._test(*self.create_sub_placeholder_const_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend), ie_device, precision, ir_version, - temp_dir=temp_dir, use_new_frontend=use_new_frontend, use_old_api=use_old_api) + temp_dir=temp_dir, use_new_frontend=use_new_frontend) test_data_2D = [ dict(x_shape=[1, 1], y_shape=[1, 1]), @@ -80,11 +80,11 @@ def test_sub_placeholder_const_1D(self, params, ie_device, precision, ir_version @pytest.mark.parametrize("params", test_data_2D) @pytest.mark.nightly def test_sub_placeholder_const_2D(self, params, ie_device, precision, ir_version, temp_dir, - use_new_frontend, use_old_api): + use_new_frontend): self._test(*self.create_sub_placeholder_const_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend), ie_device, precision, ir_version, - temp_dir=temp_dir, use_new_frontend=use_new_frontend, use_old_api=use_old_api) + temp_dir=temp_dir, use_new_frontend=use_new_frontend) test_data_3D = [ dict(x_shape=[1, 1, 1], y_shape=[1, 1, 1]), @@ -98,11 +98,11 @@ def test_sub_placeholder_const_2D(self, params, ie_device, precision, ir_version @pytest.mark.parametrize("params", test_data_3D) @pytest.mark.nightly def test_sub_placeholder_const_3D(self, params, ie_device, precision, ir_version, temp_dir, - use_new_frontend, use_old_api): + use_new_frontend): self._test(*self.create_sub_placeholder_const_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend), ie_device, precision, ir_version, - temp_dir=temp_dir, use_new_frontend=use_new_frontend, use_old_api=use_old_api) + temp_dir=temp_dir, use_new_frontend=use_new_frontend) test_data_4D = [ dict(x_shape=[1, 1, 1, 1], y_shape=[1, 1, 1, 1]), @@ -115,11 +115,11 @@ def test_sub_placeholder_const_3D(self, params, ie_device, precision, ir_version @pytest.mark.parametrize("params", test_data_4D) @pytest.mark.nightly def test_sub_placeholder_const_4D(self, params, ie_device, precision, ir_version, temp_dir, - use_new_frontend, use_old_api): + use_new_frontend): self._test(*self.create_sub_placeholder_const_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend), ie_device, precision, ir_version, - temp_dir=temp_dir, use_new_frontend=use_new_frontend, use_old_api=use_old_api) + temp_dir=temp_dir, use_new_frontend=use_new_frontend) test_data_5D = [ dict(x_shape=[1, 1, 1, 1, 1], y_shape=[1, 1, 1, 1, 1]), @@ -132,11 +132,11 @@ def test_sub_placeholder_const_4D(self, params, ie_device, precision, ir_version @pytest.mark.parametrize("params", test_data_5D) @pytest.mark.nightly def test_sub_placeholder_const_5D(self, params, ie_device, precision, ir_version, temp_dir, - use_new_frontend, use_old_api): + use_new_frontend): self._test(*self.create_sub_placeholder_const_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend), ie_device, precision, ir_version, - temp_dir=temp_dir, use_new_frontend=use_new_frontend, use_old_api=use_old_api) + temp_dir=temp_dir, use_new_frontend=use_new_frontend) ############################################################################################### # # @@ -151,11 +151,11 @@ def test_sub_placeholder_const_5D(self, params, ie_device, precision, ir_version @pytest.mark.parametrize("params", test_data_broadcast_1D) @pytest.mark.nightly def test_sub_placeholder_const_broadcast_1D(self, params, ie_device, precision, ir_version, - temp_dir, use_new_frontend, use_old_api): + temp_dir, use_new_frontend): self._test(*self.create_sub_placeholder_const_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend), ie_device, precision, ir_version, - temp_dir=temp_dir, use_new_frontend=use_new_frontend, use_old_api=use_old_api) + temp_dir=temp_dir, use_new_frontend=use_new_frontend) test_data_broadcast_2D = [ dict(x_shape=[1, 1], y_shape=[1]), @@ -168,11 +168,11 @@ def test_sub_placeholder_const_broadcast_1D(self, params, ie_device, precision, @pytest.mark.parametrize("params", test_data_broadcast_2D) @pytest.mark.nightly def test_sub_placeholder_const_broadcast_2D(self, params, ie_device, precision, ir_version, - temp_dir, use_new_frontend, use_old_api): + temp_dir, use_new_frontend): self._test(*self.create_sub_placeholder_const_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend), ie_device, precision, ir_version, - temp_dir=temp_dir, use_new_frontend=use_new_frontend, use_old_api=use_old_api) + temp_dir=temp_dir, use_new_frontend=use_new_frontend) test_data_broadcast_3D = [ dict(x_shape=[1, 1, 1], y_shape=[1]), @@ -188,11 +188,11 @@ def test_sub_placeholder_const_broadcast_2D(self, params, ie_device, precision, @pytest.mark.parametrize("params", test_data_broadcast_3D) @pytest.mark.nightly def test_sub_placeholder_const_broadcast_3D(self, params, ie_device, precision, ir_version, - temp_dir, use_new_frontend, use_old_api): + temp_dir, use_new_frontend): self._test(*self.create_sub_placeholder_const_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend), ie_device, precision, ir_version, - temp_dir=temp_dir, use_new_frontend=use_new_frontend, use_old_api=use_old_api) + temp_dir=temp_dir, use_new_frontend=use_new_frontend) test_data_broadcast_4D = [ dict(x_shape=[1, 1, 1, 1], y_shape=[1]), @@ -211,11 +211,11 @@ def test_sub_placeholder_const_broadcast_3D(self, params, ie_device, precision, @pytest.mark.parametrize("params", test_data_broadcast_4D) @pytest.mark.nightly def test_sub_placeholder_const_broadcast_4D(self, params, ie_device, precision, ir_version, - temp_dir, use_new_frontend, use_old_api): + temp_dir, use_new_frontend): self._test(*self.create_sub_placeholder_const_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend), ie_device, precision, ir_version, - temp_dir=temp_dir, use_new_frontend=use_new_frontend, use_old_api=use_old_api) + temp_dir=temp_dir, use_new_frontend=use_new_frontend) test_data_broadcast_5D = [ dict(x_shape=[1, 1, 1, 1, 1], y_shape=[1, 1, 1, 1, 1]), @@ -233,8 +233,8 @@ def test_sub_placeholder_const_broadcast_4D(self, params, ie_device, precision, @pytest.mark.parametrize("params", test_data_broadcast_5D) @pytest.mark.nightly def test_sub_placeholder_const_broadcast_5D(self, params, ie_device, precision, ir_version, - temp_dir, use_new_frontend, use_old_api): + temp_dir, use_new_frontend): self._test(*self.create_sub_placeholder_const_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend), ie_device, precision, ir_version, - temp_dir=temp_dir, use_new_frontend=use_new_frontend, use_old_api=use_old_api) + temp_dir=temp_dir, use_new_frontend=use_new_frontend) diff --git a/tests/layer_tests/tensorflow_tests/test_tf_Swish.py b/tests/layer_tests/tensorflow_tests/test_tf_Swish.py index 2c2093980f747e..6b88c18f162781 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_Swish.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_Swish.py @@ -69,11 +69,11 @@ def create_swish_net(self, shape, ir_version, use_new_frontend): @pytest.mark.precommit @pytest.mark.nightly def test_swish_precommit(self, params, ie_device, precision, ir_version, temp_dir, - use_new_frontend, use_old_api): + use_new_frontend): self._test(*self.create_swish_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) test_data = [dict(shape=[1]), dict(shape=[1, 224]), @@ -83,9 +83,8 @@ def test_swish_precommit(self, params, ie_device, precision, ir_version, temp_di @pytest.mark.parametrize("params", test_data) @pytest.mark.nightly - def test_swish(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend, - use_old_api): + def test_swish(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend): self._test(*self.create_swish_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) diff --git a/tests/layer_tests/tensorflow_tests/test_tf_SwitchMerge.py b/tests/layer_tests/tensorflow_tests/test_tf_SwitchMerge.py index 8acc25c3a608e1..645282fb745e5c 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_SwitchMerge.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_SwitchMerge.py @@ -59,7 +59,7 @@ def merge_eliminating_several_cond_flows_net(self, x_shape, x_type, cond_value): @pytest.mark.nightly def test_merge_eliminating_several_cond_flows(self, params, cond_value, x_type, ie_device, precision, ir_version, temp_dir, - use_new_frontend, use_old_api): + use_new_frontend): self._test(*self.merge_eliminating_several_cond_flows_net(**params, cond_value=cond_value, x_type=x_type), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) diff --git a/tests/layer_tests/tensorflow_tests/test_tf_TensorArrayOps.py b/tests/layer_tests/tensorflow_tests/test_tf_TensorArrayOps.py index a2299b83b746f3..4b46090ca3887b 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_TensorArrayOps.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_TensorArrayOps.py @@ -52,10 +52,10 @@ def create_tensor_array_size_v3(self, data_shape, data_type): @pytest.mark.precommit_tf_fe @pytest.mark.nightly def test_tensor_array_size_v3(self, params, ie_device, precision, ir_version, temp_dir, - use_new_frontend, use_old_api): + use_new_frontend): self._test(*self.create_tensor_array_size_v3(**params), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) class TestTensorArrayReadV3(CommonTFLayerTest): @@ -96,10 +96,10 @@ def create_tensor_array_read_v3(self, data_shape, data_type): @pytest.mark.precommit_tf_fe @pytest.mark.nightly def test_tensor_array_read_v3(self, params, ie_device, precision, ir_version, temp_dir, - use_new_frontend, use_old_api): + use_new_frontend): self._test(*self.create_tensor_array_read_v3(**params), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) class TestTensorArrayWriteGatherV3(CommonTFLayerTest): @@ -162,10 +162,10 @@ def create_tensor_array_write_v3(self, size, data_shape, data_type, index_to_wri @pytest.mark.precommit_tf_fe @pytest.mark.nightly def test_tensor_array_write_v3(self, params, ie_device, precision, ir_version, temp_dir, - use_new_frontend, use_old_api): + use_new_frontend): self._test(*self.create_tensor_array_write_v3(**params), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) class TestTensorArrayConcatV3(CommonTFLayerTest): @@ -205,7 +205,7 @@ def create_tensor_array_concat_v3(self, data_shape, data_type): @pytest.mark.precommit_tf_fe @pytest.mark.nightly def test_tensor_array_concat_v3(self, params, ie_device, precision, ir_version, temp_dir, - use_new_frontend, use_old_api): + use_new_frontend): self._test(*self.create_tensor_array_concat_v3(**params), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) diff --git a/tests/layer_tests/tensorflow_tests/test_tf_TensorListConcatV2.py b/tests/layer_tests/tensorflow_tests/test_tf_TensorListConcatV2.py index 5309d9b0bab9ff..6abba4be7abd93 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_TensorListConcatV2.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_TensorListConcatV2.py @@ -43,7 +43,7 @@ def create_tensor_list_resize(self, input_shape, input_type): @pytest.mark.nightly @pytest.mark.skipif(platform == 'darwin', reason="Ticket - 122182") def test_tensor_list_resize_basic(self, params, ie_device, precision, ir_version, temp_dir, - use_new_frontend, use_old_api): + use_new_frontend): self._test(*self.create_tensor_list_resize(**params), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) diff --git a/tests/layer_tests/tensorflow_tests/test_tf_TensorListLength.py b/tests/layer_tests/tensorflow_tests/test_tf_TensorListLength.py index 8d6005350fb3fd..034b40673465ac 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_TensorListLength.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_TensorListLength.py @@ -42,10 +42,10 @@ def create_tensor_list_length(self, input_shape, input_type): @pytest.mark.nightly @pytest.mark.skipif(platform == 'darwin', reason="Ticket - 122182") def test_tensor_list_length_basic(self, params, ie_device, precision, ir_version, temp_dir, - use_new_frontend, use_old_api): + use_new_frontend): self._test(*self.create_tensor_list_length(**params), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) class TestTensorListLengthEmptyList(CommonTFLayerTest): @@ -81,7 +81,7 @@ def create_tensor_list_length_empty_list(self, tensor_list_size, element_shape): @pytest.mark.nightly @pytest.mark.skipif(platform == 'darwin', reason="Ticket - 122182") def test_tensor_list_length_empty_list(self, params, ie_device, precision, ir_version, temp_dir, - use_new_frontend, use_old_api): + use_new_frontend): self._test(*self.create_tensor_list_length_empty_list(**params), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) diff --git a/tests/layer_tests/tensorflow_tests/test_tf_TensorListResize.py b/tests/layer_tests/tensorflow_tests/test_tf_TensorListResize.py index 709f009a7afa93..15c85b786d3bfe 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_TensorListResize.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_TensorListResize.py @@ -46,7 +46,7 @@ def create_tensor_list_resize(self, input_shape, input_type, new_size): @pytest.mark.nightly @pytest.mark.skipif(platform == 'darwin', reason="Ticket - 122182") def test_tensor_list_resize_basic(self, params, ie_device, precision, ir_version, temp_dir, - use_new_frontend, use_old_api): + use_new_frontend): self._test(*self.create_tensor_list_resize(**params), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) diff --git a/tests/layer_tests/tensorflow_tests/test_tf_Tile.py b/tests/layer_tests/tensorflow_tests/test_tf_Tile.py index f29f391873de03..9d9412a32d0213 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_Tile.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_Tile.py @@ -40,7 +40,7 @@ def create_tile_net(self, input_shape): @pytest.mark.precommit_tf_fe @pytest.mark.nightly def test_tile_basic(self, params, ie_device, precision, ir_version, temp_dir, - use_new_frontend, use_old_api): + use_new_frontend): self._test(*self.create_tile_net(**params), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) diff --git a/tests/layer_tests/tensorflow_tests/test_tf_ToBool.py b/tests/layer_tests/tensorflow_tests/test_tf_ToBool.py index 74da79c36d52a1..96a1f0b6f4fb11 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_ToBool.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_ToBool.py @@ -37,7 +37,7 @@ def create_tobool_net(self, input_shape, input_type): @pytest.mark.precommit_tf_fe @pytest.mark.nightly def test_to_bool_basic(self, params, ie_device, precision, ir_version, temp_dir, - use_new_frontend, use_old_api): + use_new_frontend): self._test(*self.create_tobool_net(**params), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) \ No newline at end of file + use_new_frontend=use_new_frontend) \ No newline at end of file diff --git a/tests/layer_tests/tensorflow_tests/test_tf_TopK.py b/tests/layer_tests/tensorflow_tests/test_tf_TopK.py index 3c7c4c4acf3f16..2c6f86e99a49d0 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_TopK.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_TopK.py @@ -56,12 +56,11 @@ def create_topK_net(shape, k, ir_version, use_new_frontend): @pytest.mark.parametrize("params", test_data_1D) @pytest.mark.nightly - def test_TopK_1D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend, - use_old_api): + def test_TopK_1D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend): self._test(*self.create_topK_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) test_data_2D = [ dict(shape=[14, 15], k=10), @@ -70,12 +69,11 @@ def test_TopK_1D(self, params, ie_device, precision, ir_version, temp_dir, use_n @pytest.mark.parametrize("params", test_data_2D) @pytest.mark.nightly - def test_TopK_2D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend, - use_old_api): + def test_TopK_2D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend): self._test(*self.create_topK_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) test_data_3D = [ dict(shape=[13, 14, 15], k=10), @@ -84,12 +82,11 @@ def test_TopK_2D(self, params, ie_device, precision, ir_version, temp_dir, use_n @pytest.mark.parametrize("params", test_data_3D) @pytest.mark.nightly - def test_TopK_3D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend, - use_old_api): + def test_TopK_3D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend): self._test(*self.create_topK_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) test_data_4D = [ dict(shape=[12, 13, 14, 15], k=10), @@ -98,12 +95,11 @@ def test_TopK_3D(self, params, ie_device, precision, ir_version, temp_dir, use_n @pytest.mark.parametrize("params", test_data_4D) @pytest.mark.nightly - def test_TopK_4D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend, - use_old_api): + def test_TopK_4D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend): self._test(*self.create_topK_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) test_data_5D = [ dict(shape=[11, 12, 13, 14, 15], k=10), @@ -112,9 +108,8 @@ def test_TopK_4D(self, params, ie_device, precision, ir_version, temp_dir, use_n @pytest.mark.parametrize("params", test_data_5D) @pytest.mark.nightly - def test_TopK_5D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend, - use_old_api): + def test_TopK_5D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend): self._test(*self.create_topK_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) diff --git a/tests/layer_tests/tensorflow_tests/test_tf_TopKV2.py b/tests/layer_tests/tensorflow_tests/test_tf_TopKV2.py index 737675de84dac8..0b9fbd5e5137e0 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_TopKV2.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_TopKV2.py @@ -52,8 +52,7 @@ def create_topk_v2_net(self, input_shape, input_type, k, sorted, is_first_output 'aarch64', 'arm64', 'ARM64'), reason='Ticket - 126314, 122716') - def test_topk_v2_basic(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend, - use_old_api): + def test_topk_v2_basic(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend): self._test(*self.create_topk_v2_net(**params), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) diff --git a/tests/layer_tests/tensorflow_tests/test_tf_Transpose.py b/tests/layer_tests/tensorflow_tests/test_tf_Transpose.py index ffbd34a5bd3ad9..07774bf7224c2a 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_Transpose.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_Transpose.py @@ -28,10 +28,10 @@ def create_transpose_net(self, x_shape, perm_value): @pytest.mark.precommit_tf_fe @pytest.mark.nightly def test_transpose_basic(self, params, ie_device, precision, ir_version, temp_dir, - use_new_frontend, use_old_api): + use_new_frontend): self._test(*self.create_transpose_net(**params), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) class TestComplexTranspose(CommonTFLayerTest): @@ -72,8 +72,8 @@ def create_complex_transpose_net(self, input_shape, perm_value): @pytest.mark.precommit_tf_fe @pytest.mark.nightly def test_complex_transpose(self, params, ie_device, precision, ir_version, temp_dir, - use_new_frontend, use_old_api): + use_new_frontend): self._test( *self.create_complex_transpose_net(**params), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) diff --git a/tests/layer_tests/tensorflow_tests/test_tf_TruncateDiv.py b/tests/layer_tests/tensorflow_tests/test_tf_TruncateDiv.py index 55859e62eca12d..8d632898af9aaf 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_TruncateDiv.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_TruncateDiv.py @@ -49,7 +49,7 @@ def create_truncate_div_net(self, input_shape, input_type): 'arm64', 'ARM64'), reason='Ticket - 126314, 122716') def test_truncate_div_basic(self, params, ie_device, precision, ir_version, temp_dir, - use_new_frontend, use_old_api): + use_new_frontend): self._test(*self.create_truncate_div_net(**params), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) diff --git a/tests/layer_tests/tensorflow_tests/test_tf_TruncateMod.py b/tests/layer_tests/tensorflow_tests/test_tf_TruncateMod.py index 48b738095c8bb0..1aba50fa511e88 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_TruncateMod.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_TruncateMod.py @@ -43,7 +43,7 @@ def create_truncate_mod_net(self, input_shape, input_type): @pytest.mark.precommit_tf_fe @pytest.mark.nightly def test_truncate_mod_basic(self, params, ie_device, precision, ir_version, temp_dir, - use_new_frontend, use_old_api): + use_new_frontend): self._test(*self.create_truncate_mod_net(**params), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) diff --git a/tests/layer_tests/tensorflow_tests/test_tf_UnaryOps.py b/tests/layer_tests/tensorflow_tests/test_tf_UnaryOps.py index 9a48b9a75b1253..9ccfde8996a13d 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_UnaryOps.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_UnaryOps.py @@ -163,7 +163,7 @@ def create_net_with_unary_op(self, shape, ir_version, op_type, use_new_frontend) ]) @pytest.mark.precommit def test_unary_op_precommit(self, params, ie_device, precision, ir_version, temp_dir, op_type, - use_new_frontend, use_old_api): + use_new_frontend): if not use_new_frontend and op_type in ['BitwiseNot']: pytest.skip("Bitwise ops are supported only by new TF FE.") if ie_device == 'GPU': @@ -171,14 +171,14 @@ def test_unary_op_precommit(self, params, ie_device, precision, ir_version, temp self._test(*self.create_net_with_unary_op(**params, ir_version=ir_version, op_type=op_type, use_new_frontend=use_new_frontend), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) @pytest.mark.xfail(sys.version_info > (3, 10), reason="tensorflow_addons package is not available for Python 3.11 and higher") @pytest.mark.parametrize("params", test_data_precommit) @pytest.mark.precommit def test_unary_op_mish_precommit(self, params, ie_device, precision, ir_version, temp_dir, - use_new_frontend, use_old_api): + use_new_frontend): """ TODO: Move to `test_unary_op_precommit()` once tensorflow_addons package is available for Python 3.11 """ @@ -187,7 +187,7 @@ def test_unary_op_mish_precommit(self, params, ie_device, precision, ir_version, self._test(*self.create_net_with_mish(**params, ir_version=ir_version, use_new_frontend=use_new_frontend), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) test_data = [pytest.param(dict(shape=[10, 12]), marks=pytest.mark.precommit_tf_fe), dict(shape=[8, 10, 12]), @@ -228,7 +228,7 @@ def test_unary_op_mish_precommit(self, params, ie_device, precision, ir_version, @pytest.mark.skipif(sys.platform == 'darwin', reason="Ticket - 122182") @pytest.mark.xfail(platform.machine() in ["aarch64", "arm64", "ARM64"], reason='Ticket - 122716') def test_unary_op(self, params, ie_device, precision, ir_version, temp_dir, op_type, - use_new_frontend, use_old_api): + use_new_frontend): if not use_new_frontend and op_type in ['BitwiseNot']: pytest.skip("Bitwise ops are supported only by new TF FE.") if ie_device == 'GPU': @@ -236,14 +236,14 @@ def test_unary_op(self, params, ie_device, precision, ir_version, temp_dir, op_t self._test(*self.create_net_with_unary_op(**params, ir_version=ir_version, op_type=op_type, use_new_frontend=use_new_frontend), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) @pytest.mark.xfail(sys.version_info > (3, 10), reason="tensorflow_addons package is not available for Python 3.11 and higher") @pytest.mark.parametrize("params", test_data) @pytest.mark.nightly def test_unary_op_mish(self, params, ie_device, precision, ir_version, temp_dir, op_type, - use_new_frontend, use_old_api): + use_new_frontend): """ TODO: Move to `test_unary_op()` once tensorflow_addons package is available for Python 3.11 """ @@ -252,4 +252,4 @@ def test_unary_op_mish(self, params, ie_device, precision, ir_version, temp_dir, self._test(*self.create_net_with_mish(**params, ir_version=ir_version, use_new_frontend=use_new_frontend), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) diff --git a/tests/layer_tests/tensorflow_tests/test_tf_Unique.py b/tests/layer_tests/tensorflow_tests/test_tf_Unique.py index 14d89fbdebb86f..82a741bbd802da 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_Unique.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_Unique.py @@ -39,12 +39,12 @@ def create_unique_net(self, x_shape, data_type, out_idx): @pytest.mark.precommit_tf_fe @pytest.mark.nightly def test_unique_basic(self, params, ie_device, precision, ir_version, temp_dir, - use_new_frontend, use_old_api): + use_new_frontend): if not use_new_frontend: pytest.skip("Unique operation is not supported via legacy frontend.") self._test(*self.create_unique_net(**params), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) test_data_other_types = [ dict(x_shape=[10], data_type=tf.int32, out_idx=tf.int32), @@ -54,9 +54,9 @@ def test_unique_basic(self, params, ie_device, precision, ir_version, temp_dir, @pytest.mark.parametrize("params", test_data_other_types) @pytest.mark.nightly def test_unique_other_types(self, params, ie_device, precision, ir_version, temp_dir, - use_new_frontend, use_old_api): + use_new_frontend): if not use_new_frontend: pytest.skip("Unique operation is not supported via legacy frontend.") self._test(*self.create_unique_net(**params), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) diff --git a/tests/layer_tests/tensorflow_tests/test_tf_UniqueWithCounts.py b/tests/layer_tests/tensorflow_tests/test_tf_UniqueWithCounts.py index 79726e348d3820..3f2f574a21f889 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_UniqueWithCounts.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_UniqueWithCounts.py @@ -39,9 +39,9 @@ def create_unique_net(self, x_shape, data_type, out_idx): @pytest.mark.precommit_tf_fe @pytest.mark.nightly def test_unique_basic(self, params, ie_device, precision, ir_version, temp_dir, - use_new_frontend, use_old_api): + use_new_frontend): if not use_new_frontend: pytest.skip("Unique operation is not supported via legacy frontend.") self._test(*self.create_unique_net(**params), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) \ No newline at end of file + use_new_frontend=use_new_frontend) \ No newline at end of file diff --git a/tests/layer_tests/tensorflow_tests/test_tf_Unpack.py b/tests/layer_tests/tensorflow_tests/test_tf_Unpack.py index 04db4ad0786fba..b376a7c96dd959 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_Unpack.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_Unpack.py @@ -49,7 +49,7 @@ def create_unpack_net(self, input_shape, num, axis, input_type): @pytest.mark.precommit_tf_fe @pytest.mark.nightly def test_unpack_basic(self, params, ie_device, precision, ir_version, temp_dir, - use_new_frontend, use_old_api): + use_new_frontend): self._test(*self.create_unpack_net(**params), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) diff --git a/tests/layer_tests/tensorflow_tests/test_tf_UnravelIndex.py b/tests/layer_tests/tensorflow_tests/test_tf_UnravelIndex.py index da2b79b9b8c93c..53b624755c387c 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_UnravelIndex.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_UnravelIndex.py @@ -38,7 +38,7 @@ def create_unravel_index_net(self, input_shape, input_type, dims_value): @pytest.mark.precommit_tf_fe @pytest.mark.nightly def test_unravel_index_basic(self, params, ie_device, precision, ir_version, temp_dir, - use_new_frontend, use_old_api): + use_new_frontend): self._test(*self.create_unravel_index_net(**params), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) diff --git a/tests/layer_tests/tensorflow_tests/test_tf_UnsortedSegmentSum.py b/tests/layer_tests/tensorflow_tests/test_tf_UnsortedSegmentSum.py index f7dcf2eeb324f2..63e6e7061a8e35 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_UnsortedSegmentSum.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_UnsortedSegmentSum.py @@ -61,11 +61,11 @@ def create_unsorted_segment_sum_net(self, data_shape, segment_ids_shape, num_seg reason='Ticket - 122716') def test_unsorted_segment_sum_basic(self, params, data_type, segment_ids_type, num_segments_type, ie_device, precision, ir_version, temp_dir, - use_new_frontend, use_old_api): + use_new_frontend): if not use_new_frontend: pytest.skip("UnsortedSegmentSum operation is not supported via legacy frontend.") self._test( *self.create_unsorted_segment_sum_net(**params, data_type=data_type, segment_ids_type=segment_ids_type, num_segments_type=num_segments_type), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) diff --git a/tests/layer_tests/tensorflow_tests/test_tf_Where.py b/tests/layer_tests/tensorflow_tests/test_tf_Where.py index 1f5b627df5b7c1..c68e718ab278c2 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_Where.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_Where.py @@ -37,7 +37,7 @@ def create_where_net(self, condition_shape, condition_type): @pytest.mark.precommit_tf_fe @pytest.mark.nightly def test_where_basic(self, params, ie_device, precision, ir_version, temp_dir, - use_new_frontend, use_old_api): + use_new_frontend): self._test(*self.create_where_net(**params), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) diff --git a/tests/layer_tests/tensorflow_tests/test_tf_While.py b/tests/layer_tests/tensorflow_tests/test_tf_While.py index d4aaedf86854e6..c87224d8097019 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_While.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_While.py @@ -60,10 +60,10 @@ def body(x, y): @pytest.mark.nightly @pytest.mark.skipif(platform == 'darwin', reason="Ticket - 122182") def test_while_basic(self, params, ie_device, precision, ir_version, temp_dir, - use_new_frontend, use_old_api): + use_new_frontend): self._test(*self.create_while_net(**params), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) class TestWhileShapeVariant(CommonTFLayerTest): @@ -120,10 +120,10 @@ def body(x, y): @pytest.mark.nightly @pytest.mark.skipif(platform == 'darwin', reason="Ticket - 122182") def test_while_basic(self, params, ie_device, precision, ir_version, temp_dir, - use_new_frontend, use_old_api): + use_new_frontend): self._test(*self.create_while_net(**params), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) class TestWhileWithNestedIf(CommonTFLayerTest): @@ -194,7 +194,7 @@ def else_branch(): @pytest.mark.nightly @pytest.mark.skipif(platform == 'darwin', reason="Ticket - 122182") def test_while_with_nested_if_basic(self, params, ie_device, precision, ir_version, temp_dir, - use_new_frontend, use_old_api): + use_new_frontend): self._test(*self.create_while_with_nested_if_net(**params), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) diff --git a/tests/layer_tests/tensorflow_tests/test_tf_Xlog1py.py b/tests/layer_tests/tensorflow_tests/test_tf_Xlog1py.py index 4da47e7b5356c4..b6b88906d9f35c 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_Xlog1py.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_Xlog1py.py @@ -47,7 +47,7 @@ def create_xlog1py_net(self, input_shape, input_type): @pytest.mark.xfail(condition=platform.system() == 'Darwin' and platform.machine() == 'arm64', reason='Ticket - 122716') def test_xlog1py_basic(self, params, ie_device, precision, ir_version, temp_dir, - use_new_frontend, use_old_api): + use_new_frontend): self._test(*self.create_xlog1py_net(**params), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) diff --git a/tests/layer_tests/tensorflow_tests/test_tf_Xlogy.py b/tests/layer_tests/tensorflow_tests/test_tf_Xlogy.py index 911c3b0eea2154..f7da244a2d7ef8 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_Xlogy.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_Xlogy.py @@ -47,7 +47,7 @@ def create_xlogy_net(self, input_shape, input_type): @pytest.mark.xfail(condition=platform.system() == 'Darwin' and platform.machine() == 'arm64', reason='Ticket - 122716') def test_xlogy_basic(self, params, ie_device, precision, ir_version, temp_dir, - use_new_frontend, use_old_api): + use_new_frontend): self._test(*self.create_xlogy_net(**params), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) diff --git a/tests/layer_tests/tensorflow_tests/test_tf_ZerosLike.py b/tests/layer_tests/tensorflow_tests/test_tf_ZerosLike.py index ad8ba15a383d1e..4c53d20f99d5ca 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_ZerosLike.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_ZerosLike.py @@ -29,7 +29,7 @@ def create_zeros_like_net(self, x_shape): @pytest.mark.precommit_tf_fe @pytest.mark.nightly def test_zeros_like_basic(self, params, ie_device, precision, ir_version, temp_dir, - use_new_frontend, use_old_api): + use_new_frontend): self._test(*self.create_zeros_like_net(**params), ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + use_new_frontend=use_new_frontend) From 130b60dbecbe2c4265bc3484e8e1daf63c3eb320 Mon Sep 17 00:00:00 2001 From: Andrei Gorbachev Date: Wed, 10 Jan 2024 16:58:23 +0000 Subject: [PATCH 15/28] [GPU] Refactor (#22053) * add experimental_detectron_detection_output * experimental_detectron_generate_proposals_single_image * experimental_detectron_generate_proposals_single_image * add experimental_detectron_roifeatureextractor * add experimental_detectron_topk_rois * add gru_sequence * add interpolate * add memory * edit gru_sequence --- ...xperimental_detectron_detection_output.cpp | 26 +- ...ectron_generate_proposals_single_image.cpp | 182 +------------- ...rimental_detectron_roifeatureextractor.cpp | 18 +- .../experimental_detectron_topk_rois.cpp | 22 +- .../single_layer_tests/gru_sequence.cpp | 55 ++-- .../single_layer_tests/interpolate.cpp | 238 +++++++----------- .../single_layer_tests/memory.cpp | 29 +-- 7 files changed, 157 insertions(+), 413 deletions(-) diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/experimental_detectron_detection_output.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/experimental_detectron_detection_output.cpp index 56544d186fe1d4..30f89bd9d90ff2 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/experimental_detectron_detection_output.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/experimental_detectron_detection_output.cpp @@ -2,20 +2,14 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "single_layer_tests/experimental_detectron_detection_output.hpp" - -#include - -#include "common_test_utils/ov_tensor_utils.hpp" - -using namespace ov::test; -using namespace ov::test::subgraph; +#include "single_op_tests/experimental_detectron_detection_output.hpp" namespace { +using ov::test::ExperimentalDetectronDetectionOutputLayerTest; -const std::vector netPrecisions = { - ov::element::Type_t::f16, - ov::element::Type_t::f32, +const std::vector netPrecisions = { + ov::element::f16, + ov::element::f32, }; const std::vector score_threshold = {0.01f, 0.8f}; @@ -46,15 +40,15 @@ const bool class_agnostic_box_regression_false = false; // specifies deltas of weights const std::vector> deltas_weights = {{10.0f, 10.0f, 5.0f, 5.0f}}; -const std::vector> inputShapes = { +const std::vector inputShapes = { // inputRois / inputDeltas / inputScores / inputImInfos - static_shapes_to_test_representation({{16, 4}, {16, 8}, {16, 2}, {1, 3}}), + ov::test::static_shapes_to_test_representation({{16, 4}, {16, 8}, {16, 2}, {1, 3}}), }; INSTANTIATE_TEST_SUITE_P(smoke_ExperimentalDetectronDetectionOutput, ExperimentalDetectronDetectionOutputLayerTest, - ::testing::Combine(::testing::ValuesIn(inputShapes), + ::testing::Combine(::testing::Values(inputShapes), ::testing::ValuesIn(score_threshold), ::testing::ValuesIn(nms_threshold), ::testing::ValuesIn(max_delta_log_wh), @@ -69,7 +63,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_ExperimentalDetectronDetectionOutput, INSTANTIATE_TEST_SUITE_P(DISABLED_smoke_ExperimentalDetectronDetectionOutputMaxDetectionsPerImage, ExperimentalDetectronDetectionOutputLayerTest, - ::testing::Combine(::testing::ValuesIn(inputShapes), + ::testing::Combine(::testing::Values(inputShapes), ::testing::ValuesIn(score_threshold), ::testing::ValuesIn(nms_threshold), ::testing::ValuesIn(max_delta_log_wh), @@ -84,7 +78,7 @@ INSTANTIATE_TEST_SUITE_P(DISABLED_smoke_ExperimentalDetectronDetectionOutputMaxD INSTANTIATE_TEST_SUITE_P(DISABLED_smoke_ExperimentalDetectronDetectionOutput, ExperimentalDetectronDetectionOutputLayerTest, - ::testing::Combine(::testing::ValuesIn(inputShapes), + ::testing::Combine(::testing::Values(inputShapes), ::testing::ValuesIn(score_threshold), ::testing::ValuesIn(nms_threshold), ::testing::ValuesIn(max_delta_log_wh), diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/experimental_detectron_generate_proposals_single_image.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/experimental_detectron_generate_proposals_single_image.cpp index eeffda94e066fe..d6517d48d13b8e 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/experimental_detectron_generate_proposals_single_image.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/experimental_detectron_generate_proposals_single_image.cpp @@ -2,195 +2,32 @@ // SPDX-License-Identifier: Apache-2.0 // -#include -#include "single_layer_tests/experimental_detectron_generate_proposals_single_image.hpp" -#include "common_test_utils/ov_tensor_utils.hpp" - -using namespace ov::test; -using namespace ov::test::subgraph; +#include +#include "single_op_tests/experimental_detectron_generate_proposals_single_image.hpp" namespace { +using ov::test::ExperimentalDetectronGenerateProposalsSingleImageLayerTest; const std::vector min_size = { 0.0f, 0.1f }; const std::vector nms_threshold = { 0.7f }; const std::vector post_nms_count = { 6 }; const std::vector pre_nms_count = { 14, 1000 }; -template -const std::vector>> getInputTensors() { - std::vector>> input_tensors = { - { - "empty", - { - // 3 - ov::test::utils::create_tensor(ov::element::from(), ov::Shape{3}, - std::vector{1.0f, 1.0f, 1.0f}), - // 36 x 4 = 144 - ov::test::utils::create_tensor(ov::element::from(), ov::Shape{36, 4}, std::vector{ - 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, - 1.0f, 1.0f, - 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, - 1.0f, 1.0f, - 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, - 1.0f, 1.0f, - 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, - 1.0f, 1.0f, - - 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, - 1.0f, 1.0f, - 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, - 1.0f, 1.0f, - 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, - 1.0f, 1.0f, - 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, - 1.0f, 1.0f, - - 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, - 1.0f, 1.0f}), - // 12 x 2 x 6 = 144float - ov::test::utils::create_tensor(ov::element::from(), ov::Shape{12, 2, 6}, - std::vector{ - 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, - 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, - 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, - 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, - 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, - 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, - 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, - 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, - - 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, - 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, - 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, - 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, - 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, - 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, - 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, - 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, - - 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, - 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f}), - // {3 x 2 x 6} = 36 - ov::test::utils::create_tensor(ov::element::from(), ov::Shape{3, 2, 6}, - std::vector{ - 5.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, - 1.0f, 1.0f, 1.0f, 1.0f, - 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, - 4.0f, 1.0f, 1.0f, 1.0f, - 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, - 1.0f, 1.0f, 8.0f, 1.0f}) - } - }, - { - "filled", - { - ov::test::utils::create_tensor(ov::element::from(), ov::Shape{3}, - std::vector{150.0f, 150.0f, 1.0f}), - ov::test::utils::create_tensor(ov::element::from(), ov::Shape{36, 4}, std::vector{ - 12.0f, 68.0f, 102.0f, 123.0f, 46.0f, 80.0f, 79.0f, 128.0f, 33.0f, 71.0f, 127.0f, 86.0f, 33.0f, - 56.0f, 150.0f, 73.0f, - 5.0f, 41.0f, 93.0f, 150.0f, 74.0f, 66.0f, 106.0f, 115.0f, 17.0f, 37.0f, 87.0f, 150.0f, 31.0f, - 27.0f, 150.0f, 39.0f, - 29.0f, 23.0f, 112.0f, 123.0f, 41.0f, 37.0f, 103.0f, 150.0f, 8.0f, 46.0f, 98.0f, 111.0f, 7.0f, - 69.0f, 114.0f, 150.0f, - 70.0f, 21.0f, 150.0f, 125.0f, 54.0f, 19.0f, 132.0f, 68.0f, 62.0f, 8.0f, 150.0f, 101.0f, 57.0f, - 81.0f, 150.0f, 97.0f, - 79.0f, 29.0f, 109.0f, 130.0f, 12.0f, 63.0f, 100.0f, 150.0f, 17.0f, 33.0f, 113.0f, 150.0f, 90.0f, - 78.0f, 150.0f, 111.0f, - 47.0f, 68.0f, 150.0f, 71.0f, 66.0f, 103.0f, 111.0f, 150.0f, 4.0f, 17.0f, 112.0f, 94.0f, 12.0f, - 8.0f, 119.0f, 98.0f, - 54.0f, 56.0f, 120.0f, 150.0f, 56.0f, 29.0f, 150.0f, 31.0f, 42.0f, 3.0f, 139.0f, 92.0f, 41.0f, - 65.0f, 150.0f, 130.0f, - 49.0f, 13.0f, 143.0f, 30.0f, 40.0f, 60.0f, 150.0f, 150.0f, 23.0f, 73.0f, 24.0f, 115.0f, 56.0f, - 84.0f, 107.0f, 108.0f, - 63.0f, 8.0f, 142.0f, 125.0f, 78.0f, 37.0f, 93.0f, 144.0f, 40.0f, 34.0f, 150.0f, 46.0f, 30.0f, - 21.0f, 150.0f, 120.0f}), - ov::test::utils::create_tensor(ov::element::from(), ov::Shape{12, 2, 6}, - std::vector{ - 9.062256f, 10.883133f, 9.8441105f, 12.694285f, - 0.41781136f, 8.749107f, 14.990341f, 6.587644f, - 1.4206103f, - 13.299262f, 12.432549f, 2.736371f, 0.22732796f, - 6.3361835f, 12.268727f, 2.1009045f, 4.771589f, - 2.5131326f, - 5.610736f, 9.3604145f, 4.27379f, 8.317948f, - 0.60510135f, 6.7446275f, 1.0207708f, 1.1352817f, - 1.5785321f, - 1.718335f, 1.8093798f, 0.99247587f, 1.3233583f, - 1.7432803f, 1.8534478f, 1.2593061f, 1.7394226f, - 1.7686696f, - 1.647999f, 1.7611449f, 1.3119122f, 0.03007332f, - 1.1106564f, 0.55669737f, 0.2546148f, 1.9181818f, - 0.7134989f, - 2.0407224f, 1.7211134f, 1.8565536f, 14.562747f, - 2.8786168f, 0.5927796f, 0.2064463f, 7.6794515f, - 8.672126f, - 10.139171f, 8.002429f, 7.002932f, 12.6314945f, - 10.550842f, 0.15784842f, 0.3194304f, 10.752157f, - 3.709805f, - 11.628928f, 0.7136225f, 14.619964f, 15.177284f, - 2.2824087f, 15.381494f, 0.16618137f, 7.507227f, - 11.173228f, - 0.4923559f, 1.8227729f, 1.4749299f, 1.7833921f, - 1.2363617f, -0.23659119f, 1.5737582f, 1.779316f, - 1.9828427f, - 1.0482665f, 1.4900246f, 1.3563544f, 1.5341306f, - 0.7634312f, 4.6216766e-05f, 1.6161222f, 1.7512476f, - 1.9363779f, - 0.9195784f, 1.4906164f, -0.03244795f, 0.681073f, - 0.6192401f, 1.8033613f, 14.146055f, 3.4043705f, - 15.292292f, - 3.5295358f, 11.138999f, 9.952057f, 5.633434f, - 12.114562f, 9.427372f, 12.384038f, 9.583308f, - 8.427233f, - 15.293704f, 3.288159f, 11.64898f, 9.350885f, - 2.0037227f, 13.523184f, 4.4176426f, 6.1057625f, - 14.400079f, - 8.248259f, 11.815807f, 15.713364f, 1.0023532f, - 1.3203261f, 1.7100681f, 0.7407832f, 1.09448f, - 1.7188418f, - 1.4412547f, 1.4862992f, 0.74790007f, 0.31571656f, - 0.6398838f, 2.0236106f, 1.1869069f, 1.7265586f, - 1.2624544f, - 0.09934269f, 1.3508598f, 0.85212964f, -0.38968498f, - 1.7059708f, 1.6533034f, 1.7400402f, 1.8123854f, - -0.43063712f}), - ov::test::utils::create_tensor(ov::element::from(), ov::Shape{3, 2, 6}, - std::vector{ - 0.7719922f, 0.35906568f, 0.29054508f, 0.18124384f, - 0.5604661f, 0.84750974f, 0.98948747f, 0.009793862f, - 0.7184191f, - 0.5560748f, 0.6952493f, 0.6732593f, 0.3306898f, - 0.6790913f, 0.41128764f, 0.34593266f, 0.94296855f, - 0.7348507f, - 0.24478768f, 0.94024557f, 0.05405676f, 0.06466125f, - 0.36244348f, 0.07942984f, 0.10619422f, 0.09412837f, - 0.9053611f, - 0.22870538f, 0.9237487f, 0.20986171f, 0.5067282f, - 0.29709867f, 0.53138554f, 0.189101f, 0.4786443f, - 0.88421875f}), - } - } - }; - return input_tensors; -} - -const std::vector> input_shape = { +const std::vector input_shape = { // im_info / anchors / deltas / scores - static_shapes_to_test_representation({{3}, {36, 4}, {12, 2, 6}, {3, 2, 6}}), + ov::test::static_shapes_to_test_representation({{3}, {36, 4}, {12, 2, 6}, {3, 2, 6}}), }; INSTANTIATE_TEST_SUITE_P( smoke_ExperimentalDetectronGenerateProposalsSingleImageLayerTest_f16, ExperimentalDetectronGenerateProposalsSingleImageLayerTest, ::testing::Combine( - ::testing::ValuesIn(input_shape), + ::testing::Values(input_shape), ::testing::ValuesIn(min_size), ::testing::ValuesIn(nms_threshold), ::testing::ValuesIn(post_nms_count), ::testing::ValuesIn(pre_nms_count), - ::testing::ValuesIn(getInputTensors()), - ::testing::ValuesIn({ov::element::Type_t::f16}), + ::testing::Values(ov::element::f16), ::testing::Values(ov::test::utils::DEVICE_GPU)), ExperimentalDetectronGenerateProposalsSingleImageLayerTest::getTestCaseName); @@ -198,13 +35,12 @@ INSTANTIATE_TEST_SUITE_P( smoke_ExperimentalDetectronGenerateProposalsSingleImageLayerTest_f32, ExperimentalDetectronGenerateProposalsSingleImageLayerTest, ::testing::Combine( - ::testing::ValuesIn(input_shape), + ::testing::Values(input_shape), ::testing::ValuesIn(min_size), ::testing::ValuesIn(nms_threshold), ::testing::ValuesIn(post_nms_count), ::testing::ValuesIn(pre_nms_count), - ::testing::ValuesIn(getInputTensors()), - ::testing::ValuesIn({ov::element::Type_t::f32}), + ::testing::Values(ov::element::f32), ::testing::Values(ov::test::utils::DEVICE_GPU)), ExperimentalDetectronGenerateProposalsSingleImageLayerTest::getTestCaseName); diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/experimental_detectron_roifeatureextractor.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/experimental_detectron_roifeatureextractor.cpp index 188e7c9bbddcb8..0fd8d81fd9f661 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/experimental_detectron_roifeatureextractor.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/experimental_detectron_roifeatureextractor.cpp @@ -2,13 +2,11 @@ // SPDX-License-Identifier: Apache-2.0 // -#include -#include "single_layer_tests/experimental_detectron_roifeatureextractor.hpp" - -using namespace ov::test; -using namespace ov::test::subgraph; +#include "single_op_tests/experimental_detectron_roifeatureextractor.hpp" namespace { +using ov::test::ExperimentalDetectronROIFeatureExtractorLayerTest; + const std::vector outputSize = {7, 14}; const std::vector samplingRatio = {1, 2, 3}; @@ -18,10 +16,10 @@ const std::vector> pyramidScales = { {2, 4, 8, 16} }; -const std::vector> staticInputShape = { - static_shapes_to_test_representation({{1000, 4}, {1, 8, 200, 336}, {1, 8, 100, 168}, {1, 8, 50, 84}, {1, 8, 25, 42}}), - static_shapes_to_test_representation({{1000, 4}, {1, 16, 200, 336}, {1, 16, 100, 168}, {1, 16, 50, 84}, {1, 16, 25, 42}}), - static_shapes_to_test_representation({{1200, 4}, {1, 8, 200, 42}, {1, 8, 100, 336}, {1, 8, 50, 168}, {1, 8, 25, 84}}) +const std::vector> staticInputShape = { + ov::test::static_shapes_to_test_representation({{1000, 4}, {1, 8, 200, 336}, {1, 8, 100, 168}, {1, 8, 50, 84}, {1, 8, 25, 42}}), + ov::test::static_shapes_to_test_representation({{1000, 4}, {1, 16, 200, 336}, {1, 16, 100, 168}, {1, 16, 50, 84}, {1, 16, 25, 42}}), + ov::test::static_shapes_to_test_representation({{1200, 4}, {1, 8, 200, 42}, {1, 8, 100, 336}, {1, 8, 50, 168}, {1, 8, 25, 84}}) }; INSTANTIATE_TEST_SUITE_P(smoke_ExperimentalROI_static, ExperimentalDetectronROIFeatureExtractorLayerTest, @@ -31,7 +29,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_ExperimentalROI_static, ExperimentalDetectronROIF ::testing::ValuesIn(samplingRatio), ::testing::ValuesIn(pyramidScales), ::testing::Values(false), - ::testing::Values(ov::element::Type_t::f32), + ::testing::Values(ov::element::f32), ::testing::Values(ov::test::utils::DEVICE_GPU)), ExperimentalDetectronROIFeatureExtractorLayerTest::getTestCaseName); } // namespace diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/experimental_detectron_topk_rois.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/experimental_detectron_topk_rois.cpp index e08240d81c1a5f..f029c436ceaa50 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/experimental_detectron_topk_rois.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/experimental_detectron_topk_rois.cpp @@ -2,28 +2,26 @@ // SPDX-License-Identifier: Apache-2.0 // -#include -#include "single_layer_tests/experimental_detectron_topkrois.hpp" - -using namespace ov::test; -using namespace ov::test::subgraph; +#include "single_op_tests/experimental_detectron_topkrois.hpp" namespace { +using ov::test::ExperimentalDetectronTopKROIsLayerTest; + std::vector maxRois { 1000, 1500, 2000 }; -std::vector elementTypes { - ElementType::f16, - ElementType::f32 +std::vector elementTypes { + ov::element::f16, + ov::element::f32 }; -const std::vector> staticInputShape = { - static_shapes_to_test_representation({{3000, 4}, {3000}}), - static_shapes_to_test_representation({{4200, 4}, {4200}}), - static_shapes_to_test_representation({{4500, 4}, {4500}}) +const std::vector> staticInputShape = { + ov::test::static_shapes_to_test_representation({{3000, 4}, {3000}}), + ov::test::static_shapes_to_test_representation({{4200, 4}, {4200}}), + ov::test::static_shapes_to_test_representation({{4500, 4}, {4500}}) }; INSTANTIATE_TEST_SUITE_P(smoke_ExperimentalDetectronTopKROIs_static, ExperimentalDetectronTopKROIsLayerTest, diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/gru_sequence.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/gru_sequence.cpp index b40264d2fa1eab..67ee635cacd8cb 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/gru_sequence.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/gru_sequence.cpp @@ -2,25 +2,20 @@ // SPDX-License-Identifier: Apache-2.0 // -#include -#include -#include "single_layer_tests/gru_sequence.hpp" -#include "common_test_utils/test_constants.hpp" - -using namespace LayerTestsDefinitions; +#include "single_op_tests/gru_sequence.hpp" namespace { - std::vector mode{ngraph::helpers::SequenceTestsMode::CONVERT_TO_TI_MAX_SEQ_LEN_CONST, - ngraph::helpers::SequenceTestsMode::CONVERT_TO_TI_RAND_SEQ_LEN_CONST, - ngraph::helpers::SequenceTestsMode::CONVERT_TO_TI_RAND_SEQ_LEN_PARAM, - ngraph::helpers::SequenceTestsMode::PURE_SEQ_RAND_SEQ_LEN_CONST, - ngraph::helpers::SequenceTestsMode::PURE_SEQ_RAND_SEQ_LEN_PARAM, - ngraph::helpers::SequenceTestsMode::PURE_SEQ}; + using ov::test::GRUSequenceTest; + + std::vector mode{ov::test::utils::SequenceTestsMode::CONVERT_TO_TI_MAX_SEQ_LEN_CONST, + ov::test::utils::SequenceTestsMode::CONVERT_TO_TI_RAND_SEQ_LEN_CONST, + ov::test::utils::SequenceTestsMode::CONVERT_TO_TI_RAND_SEQ_LEN_PARAM, + ov::test::utils::SequenceTestsMode::PURE_SEQ_RAND_SEQ_LEN_CONST, + ov::test::utils::SequenceTestsMode::PURE_SEQ_RAND_SEQ_LEN_PARAM, + ov::test::utils::SequenceTestsMode::PURE_SEQ}; // output values increase rapidly without clip, so use only seq_lengths = 2 - std::vector seq_lengths_zero_clip{2}; - std::vector seq_lengths_clip_non_zero{20}; - std::vector batch{10}; - std::vector hidden_size{1, 10}; + std::vector seq_lengths_zero_clip{2}; + std::vector seq_lengths_clip_non_zero{20}; // std::vector input_size{10}; std::vector> activations = {{"relu", "tanh"}, {"tanh", "sigmoid"}, {"sigmoid", "tanh"}, {"tanh", "relu"}}; @@ -28,40 +23,36 @@ namespace { std::vector clip{0.f}; std::vector clip_non_zeros{0.7f}; std::vector direction = {ov::op::RecurrentSequenceDirection::FORWARD, - ov::op::RecurrentSequenceDirection::REVERSE, - ov::op::RecurrentSequenceDirection::BIDIRECTIONAL + ov::op::RecurrentSequenceDirection::REVERSE, + ov::op::RecurrentSequenceDirection::BIDIRECTIONAL }; - std::vector netPrecisions = {InferenceEngine::Precision::FP32, - InferenceEngine::Precision::FP16}; + std::vector netPrecisions = {ov::element::f32, + ov::element::f16}; INSTANTIATE_TEST_SUITE_P(GRUSequenceCommonZeroClip, GRUSequenceTest, ::testing::Combine( ::testing::ValuesIn(mode), - ::testing::ValuesIn(seq_lengths_zero_clip), - ::testing::ValuesIn(batch), - ::testing::ValuesIn(hidden_size), + ::testing::Values(seq_lengths_zero_clip), // ::testing::ValuesIn(input_size), // hardcoded to 10 due to Combine supports up to 10 args ::testing::ValuesIn(activations), ::testing::ValuesIn(clip), ::testing::ValuesIn(linear_before_reset), ::testing::ValuesIn(direction), - ::testing::Values(ngraph::helpers::InputLayerType::CONSTANT), + ::testing::Values(ov::test::utils::InputLayerType::CONSTANT), ::testing::ValuesIn(netPrecisions), ::testing::Values(ov::test::utils::DEVICE_GPU)), GRUSequenceTest::getTestCaseName); INSTANTIATE_TEST_SUITE_P(GRUSequenceCommonZeroClipNonConstantWRB, GRUSequenceTest, ::testing::Combine( - ::testing::Values(ngraph::helpers::SequenceTestsMode::PURE_SEQ), - ::testing::ValuesIn(seq_lengths_zero_clip), - ::testing::ValuesIn(batch), - ::testing::ValuesIn(hidden_size), + ::testing::Values(ov::test::utils::SequenceTestsMode::PURE_SEQ), + ::testing::Values(seq_lengths_zero_clip), // ::testing::ValuesIn(input_size), // hardcoded to 10 due to Combine supports up to 10 args ::testing::ValuesIn(activations), ::testing::ValuesIn(clip), ::testing::ValuesIn(linear_before_reset), ::testing::ValuesIn(direction), - ::testing::Values(ngraph::helpers::InputLayerType::PARAMETER), + ::testing::Values(ov::test::utils::InputLayerType::PARAMETER), ::testing::ValuesIn(netPrecisions), ::testing::Values(ov::test::utils::DEVICE_GPU)), GRUSequenceTest::getTestCaseName); @@ -69,15 +60,13 @@ namespace { INSTANTIATE_TEST_SUITE_P(GRUSequenceCommonClip, GRUSequenceTest, ::testing::Combine( ::testing::ValuesIn(mode), - ::testing::ValuesIn(seq_lengths_clip_non_zero), - ::testing::ValuesIn(batch), - ::testing::ValuesIn(hidden_size), + ::testing::Values(seq_lengths_clip_non_zero), // ::testing::ValuesIn(input_size), // hardcoded to 10 due to Combine supports up to 10 args ::testing::ValuesIn(activations), ::testing::ValuesIn(clip_non_zeros), ::testing::ValuesIn(linear_before_reset), ::testing::ValuesIn(direction), - ::testing::Values(ngraph::helpers::InputLayerType::CONSTANT), + ::testing::Values(ov::test::utils::InputLayerType::CONSTANT), ::testing::ValuesIn(netPrecisions), ::testing::Values(ov::test::utils::DEVICE_GPU)), GRUSequenceTest::getTestCaseName); diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/interpolate.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/interpolate.cpp index 2bb3abc7e38ff1..e6cb885725cb9d 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/interpolate.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/interpolate.cpp @@ -2,128 +2,100 @@ // SPDX-License-Identifier: Apache-2.0 // -#include - -#include "single_layer_tests/interpolate.hpp" +#include "single_op_tests/interpolate.hpp" #include "common_test_utils/test_constants.hpp" -using namespace LayerTestsDefinitions; +namespace { +using ov::test::InterpolateLayerTest; +using ov::test::Interpolate11LayerTest; class GPUInterpolateLayerTest : public InterpolateLayerTest { protected: void SetUp() override { InterpolateLayerTest::SetUp(); - InterpolateLayerTestParams params = GetParam(); - InferenceEngine::Precision netPrecision; - std::string targetDevice; - std::tie(std::ignore, netPrecision, std::ignore, std::ignore, std::ignore, std::ignore, std::ignore, - std::ignore, targetDevice, std::ignore) = params; - // Some rounding float to integer types on GPU may differ from CPU, and as result, - // the actual values may differ from reference ones on 1 when the float is very close to an integer, - // e.g 6,0000023 calculated on CPU may be cast to 5 by OpenCL convert_uchar function. - // That is why the threshold is set 1.f for integer types. - if (targetDevice == "GPU" && - (netPrecision == InferenceEngine::Precision::U8 || netPrecision == InferenceEngine::Precision::I8)) { - threshold = 1.f; - } - } -}; - -namespace v11 { - -class GPUInterpolateLayerTest : public LayerTestsDefinitions::v11::InterpolateLayerTest { -protected: - void SetUp() override { - LayerTestsDefinitions::v11::InterpolateLayerTest::SetUp(); - InterpolateLayerTestParams params = GetParam(); - InferenceEngine::Precision netPrecision; - std::string targetDevice; - std::tie(std::ignore, netPrecision, std::ignore, std::ignore, std::ignore, std::ignore, std::ignore, - std::ignore, targetDevice, std::ignore) = params; + ov::test::InterpolateLayerTestParams params = GetParam(); + ov::test::InterpolateSpecificParams interpolate_params; + ov::element::Type model_type; + std::vector shapes; + ov::Shape target_shape; + std::map additional_config; + std::tie(interpolate_params, model_type, shapes, target_shape, targetDevice, additional_config) = this->GetParam(); // Some rounding float to integer types on GPU may differ from CPU, and as result, // the actual values may differ from reference ones on 1 when the float is very close to an integer, // e.g 6,0000023 calculated on CPU may be cast to 5 by OpenCL convert_uchar function. // That is why the threshold is set 1.f for integer types. if (targetDevice == "GPU" && - (netPrecision == InferenceEngine::Precision::U8 || netPrecision == InferenceEngine::Precision::I8)) { - threshold = 1.f; + (model_type == ov::element::u8 || model_type == ov::element::i8)) { + rel_threshold = 1.f; } } }; -} // namespace v11 - -TEST_P(GPUInterpolateLayerTest, CompareWithRefs) { - Run(); -} - -namespace { - -const std::vector netPrecisions = { - InferenceEngine::Precision::FP16, - InferenceEngine::Precision::FP32, +const std::vector netPrecisions = { + ov::element::f16, + ov::element::f32, }; -const std::vector netOnnx5dPrecisions = { - InferenceEngine::Precision::I8, - InferenceEngine::Precision::U8, - InferenceEngine::Precision::FP16, - InferenceEngine::Precision::FP32, +const std::vector netOnnx5dPrecisions = { + ov::element::i8, + ov::element::u8, + ov::element::f16, + ov::element::f32, }; -const std::vector> inShapes = { +const std::vector inShapes = { {1, 1, 23, 23}, }; -const std::vector> targetShapes = { +const std::vector targetShapes = { {1, 1, 46, 46}, }; -const std::vector> in5dShapes = { +const std::vector in5dShapes = { {1, 1, 2, 2, 2}, }; -const std::vector> target5dShapes = { +const std::vector target5dShapes = { {1, 1, 4, 4, 4}, }; -const std::vector modesWithoutNearest = { - ov::op::v4::Interpolate::InterpolateMode::LINEAR, - ov::op::v4::Interpolate::InterpolateMode::CUBIC, - ov::op::v4::Interpolate::InterpolateMode::LINEAR_ONNX, +const std::vector modesWithoutNearest = { + ov::op::util::InterpolateBase::InterpolateMode::LINEAR, + ov::op::util::InterpolateBase::InterpolateMode::CUBIC, + ov::op::util::InterpolateBase::InterpolateMode::LINEAR_ONNX, }; -const std::vector nearestMode = { - ov::op::v4::Interpolate::InterpolateMode::NEAREST, +const std::vector nearestMode = { + ov::op::util::InterpolateBase::InterpolateMode::NEAREST, }; -const std::vector linearOnnxMode = { - ov::op::v4::Interpolate::InterpolateMode::LINEAR_ONNX, +const std::vector linearOnnxMode = { + ov::op::util::InterpolateBase::InterpolateMode::LINEAR_ONNX, }; -const std::vector coordinateTransformModes = { - ov::op::v4::Interpolate::CoordinateTransformMode::TF_HALF_PIXEL_FOR_NN, - ov::op::v4::Interpolate::CoordinateTransformMode::PYTORCH_HALF_PIXEL, - ov::op::v4::Interpolate::CoordinateTransformMode::HALF_PIXEL, - ov::op::v4::Interpolate::CoordinateTransformMode::ASYMMETRIC, - ov::op::v4::Interpolate::CoordinateTransformMode::ALIGN_CORNERS, +const std::vector coordinateTransformModes = { + ov::op::util::InterpolateBase::CoordinateTransformMode::TF_HALF_PIXEL_FOR_NN, + ov::op::util::InterpolateBase::CoordinateTransformMode::PYTORCH_HALF_PIXEL, + ov::op::util::InterpolateBase::CoordinateTransformMode::HALF_PIXEL, + ov::op::util::InterpolateBase::CoordinateTransformMode::ASYMMETRIC, + ov::op::util::InterpolateBase::CoordinateTransformMode::ALIGN_CORNERS, }; -const std::vector shapeCalculationMode = { - ov::op::v4::Interpolate::ShapeCalcMode::SIZES, - ov::op::v4::Interpolate::ShapeCalcMode::SCALES, +const std::vector shapeCalculationMode = { + ov::op::util::InterpolateBase::ShapeCalcMode::SIZES, + ov::op::util::InterpolateBase::ShapeCalcMode::SCALES, }; -const std::vector nearestModes = { - ov::op::v4::Interpolate::NearestMode::SIMPLE, - ov::op::v4::Interpolate::NearestMode::ROUND_PREFER_FLOOR, - ov::op::v4::Interpolate::NearestMode::FLOOR, - ov::op::v4::Interpolate::NearestMode::CEIL, - ov::op::v4::Interpolate::NearestMode::ROUND_PREFER_CEIL, +const std::vector nearestModes = { + ov::op::util::InterpolateBase::NearestMode::SIMPLE, + ov::op::util::InterpolateBase::NearestMode::ROUND_PREFER_FLOOR, + ov::op::util::InterpolateBase::NearestMode::FLOOR, + ov::op::util::InterpolateBase::NearestMode::CEIL, + ov::op::util::InterpolateBase::NearestMode::ROUND_PREFER_CEIL, }; -const std::vector defaultNearestMode = { - ov::op::v4::Interpolate::NearestMode::ROUND_PREFER_FLOOR, +const std::vector defaultNearestMode = { + ov::op::util::InterpolateBase::NearestMode::ROUND_PREFER_FLOOR, }; const std::vector> pads = { @@ -233,11 +205,7 @@ const auto interpolate5dCasesNearestMode = ::testing::Combine( INSTANTIATE_TEST_SUITE_P(smoke_Interpolate_Basic, InterpolateLayerTest, ::testing::Combine( interpolateCasesWithoutNearest, ::testing::ValuesIn(netPrecisions), - ::testing::Values(InferenceEngine::Precision::UNSPECIFIED), - ::testing::Values(InferenceEngine::Precision::UNSPECIFIED), - ::testing::Values(InferenceEngine::Layout::ANY), - ::testing::Values(InferenceEngine::Layout::ANY), - ::testing::ValuesIn(inShapes), + ::testing::Values(ov::test::static_shapes_to_test_representation(inShapes)), ::testing::ValuesIn(targetShapes), ::testing::Values(ov::test::utils::DEVICE_GPU), ::testing::Values(additional_config)), @@ -246,11 +214,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_Interpolate_Basic, InterpolateLayerTest, ::testin INSTANTIATE_TEST_SUITE_P(smoke_Interpolate_BasicEmptyAxes, InterpolateLayerTest, ::testing::Combine( interpolateCasesWithoutNearestEmptyAxes, ::testing::ValuesIn(netPrecisions), - ::testing::Values(InferenceEngine::Precision::UNSPECIFIED), - ::testing::Values(InferenceEngine::Precision::UNSPECIFIED), - ::testing::Values(InferenceEngine::Layout::ANY), - ::testing::Values(InferenceEngine::Layout::ANY), - ::testing::ValuesIn(inShapes), + ::testing::Values(ov::test::static_shapes_to_test_representation(inShapes)), ::testing::ValuesIn(targetShapes), ::testing::Values(ov::test::utils::DEVICE_GPU), ::testing::Values(additional_config)), @@ -259,11 +223,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_Interpolate_BasicEmptyAxes, InterpolateLayerTest, INSTANTIATE_TEST_SUITE_P(smoke_Interpolate_Nearest, InterpolateLayerTest, ::testing::Combine( interpolateCasesNearesMode, ::testing::ValuesIn(netPrecisions), - ::testing::Values(InferenceEngine::Precision::UNSPECIFIED), - ::testing::Values(InferenceEngine::Precision::UNSPECIFIED), - ::testing::Values(InferenceEngine::Layout::ANY), - ::testing::Values(InferenceEngine::Layout::ANY), - ::testing::ValuesIn(inShapes), + ::testing::Values(ov::test::static_shapes_to_test_representation(inShapes)), ::testing::ValuesIn(targetShapes), ::testing::Values(ov::test::utils::DEVICE_GPU), ::testing::Values(additional_config)), @@ -272,11 +232,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_Interpolate_Nearest, InterpolateLayerTest, ::test INSTANTIATE_TEST_SUITE_P(smoke_Interpolate_5dLinearOnnx, GPUInterpolateLayerTest, ::testing::Combine( interpolate5dCasesLinearOnnxMode, ::testing::ValuesIn(netOnnx5dPrecisions), - ::testing::Values(InferenceEngine::Precision::UNSPECIFIED), - ::testing::Values(InferenceEngine::Precision::UNSPECIFIED), - ::testing::Values(InferenceEngine::Layout::ANY), - ::testing::Values(InferenceEngine::Layout::ANY), - ::testing::ValuesIn(in5dShapes), + ::testing::Values(ov::test::static_shapes_to_test_representation(in5dShapes)), ::testing::ValuesIn(target5dShapes), ::testing::Values(ov::test::utils::DEVICE_GPU), ::testing::Values(additional_config)), @@ -285,11 +241,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_Interpolate_5dLinearOnnx, GPUInterpolateLayerTest INSTANTIATE_TEST_SUITE_P(smoke_Interpolate_5dNearest, GPUInterpolateLayerTest, ::testing::Combine( interpolate5dCasesNearestMode, ::testing::ValuesIn(netOnnx5dPrecisions), - ::testing::Values(InferenceEngine::Precision::UNSPECIFIED), - ::testing::Values(InferenceEngine::Precision::UNSPECIFIED), - ::testing::Values(InferenceEngine::Layout::ANY), - ::testing::Values(InferenceEngine::Layout::ANY), - ::testing::ValuesIn(in5dShapes), + ::testing::Values(ov::test::static_shapes_to_test_representation(in5dShapes)), ::testing::ValuesIn(target5dShapes), ::testing::Values(ov::test::utils::DEVICE_GPU), ::testing::Values(additional_config)), @@ -298,32 +250,28 @@ INSTANTIATE_TEST_SUITE_P(smoke_Interpolate_5dNearest, GPUInterpolateLayerTest, : INSTANTIATE_TEST_SUITE_P(smoke_Interpolate_11_Basic, Interpolate11LayerTest, ::testing::Combine( interpolateCasesWithoutNearest, ::testing::ValuesIn(netPrecisions), - ::testing::Values(InferenceEngine::Precision::UNSPECIFIED), - ::testing::Values(InferenceEngine::Precision::UNSPECIFIED), - ::testing::Values(InferenceEngine::Layout::ANY), - ::testing::Values(InferenceEngine::Layout::ANY), - ::testing::ValuesIn(inShapes), + ::testing::Values(ov::test::static_shapes_to_test_representation(inShapes)), ::testing::ValuesIn(targetShapes), ::testing::Values(ov::test::utils::DEVICE_GPU), ::testing::Values(additional_config)), Interpolate11LayerTest::getTestCaseName); -const std::vector modesPillow = { - ov::op::v4::Interpolate::InterpolateMode::BILINEAR_PILLOW, - ov::op::v4::Interpolate::InterpolateMode::BICUBIC_PILLOW, +const std::vector modesPillow = { + ov::op::util::InterpolateBase::InterpolateMode::BILINEAR_PILLOW, + ov::op::util::InterpolateBase::InterpolateMode::BICUBIC_PILLOW, }; -const std::vector pillowModePrecisions = { - InferenceEngine::Precision::FP16, - InferenceEngine::Precision::FP32, +const std::vector pillowModePrecisions = { + ov::element::f16, + ov::element::f32, }; INSTANTIATE_TEST_SUITE_P(smoke_Interpolate_11_Pillow, Interpolate11LayerTest, ::testing::Combine( ::testing::Combine( ::testing::ValuesIn(modesPillow), - ::testing::Values(ov::op::v4::Interpolate::ShapeCalcMode::SCALES), - ::testing::Values(ov::op::v4::Interpolate::CoordinateTransformMode::TF_HALF_PIXEL_FOR_NN), - ::testing::Values(ov::op::v4::Interpolate::NearestMode::SIMPLE), + ::testing::Values(ov::op::util::InterpolateBase::ShapeCalcMode::SCALES), + ::testing::Values(ov::op::util::InterpolateBase::CoordinateTransformMode::TF_HALF_PIXEL_FOR_NN), + ::testing::Values(ov::op::util::InterpolateBase::NearestMode::SIMPLE), ::testing::Values(false), ::testing::Values(std::vector{0, 0, 1, 1}), ::testing::Values(std::vector{0, 0, 1, 1}), @@ -331,12 +279,8 @@ INSTANTIATE_TEST_SUITE_P(smoke_Interpolate_11_Pillow, Interpolate11LayerTest, :: ::testing::Values(std::vector{2, 3}), ::testing::Values(std::vector{2.f, 2.f})), ::testing::ValuesIn(pillowModePrecisions), - ::testing::Values(InferenceEngine::Precision::UNSPECIFIED), - ::testing::Values(InferenceEngine::Precision::UNSPECIFIED), - ::testing::Values(InferenceEngine::Layout::ANY), - ::testing::Values(InferenceEngine::Layout::ANY), - ::testing::Values(std::vector{1, 1, 23, 23}), - ::testing::Values(std::vector{1, 1, 50, 50}), + ::testing::Values(ov::test::static_shapes_to_test_representation(std::vector{{1, 1, 23, 23}})), + ::testing::Values(ov::Shape{1, 1, 50, 50}), ::testing::Values(ov::test::utils::DEVICE_GPU), ::testing::Values(additional_config)), Interpolate11LayerTest::getTestCaseName); @@ -345,22 +289,18 @@ INSTANTIATE_TEST_SUITE_P(smoke_Interpolate_11_Pillow, Interpolate11LayerTest, :: INSTANTIATE_TEST_SUITE_P(smoke_Interpolate_11_Pillow_Horizontal, Interpolate11LayerTest, ::testing::Combine( ::testing::Combine( ::testing::ValuesIn(modesPillow), - ::testing::Values(ov::op::v4::Interpolate::ShapeCalcMode::SCALES), - ::testing::Values(ov::op::v4::Interpolate::CoordinateTransformMode::TF_HALF_PIXEL_FOR_NN), - ::testing::Values(ov::op::v4::Interpolate::NearestMode::SIMPLE), + ::testing::Values(ov::op::util::InterpolateBase::ShapeCalcMode::SCALES), + ::testing::Values(ov::op::util::InterpolateBase::CoordinateTransformMode::TF_HALF_PIXEL_FOR_NN), + ::testing::Values(ov::op::util::InterpolateBase::NearestMode::SIMPLE), ::testing::Values(false), ::testing::Values(std::vector{0, 0, 1, 1}), ::testing::Values(std::vector{0, 0, 1, 1}), ::testing::ValuesIn(cubeCoefs), ::testing::Values(std::vector{2, 3}), ::testing::Values(std::vector{1.f, 2.f})), - ::testing::Values(InferenceEngine::Precision::FP32), - ::testing::Values(InferenceEngine::Precision::UNSPECIFIED), - ::testing::Values(InferenceEngine::Precision::UNSPECIFIED), - ::testing::Values(InferenceEngine::Layout::ANY), - ::testing::Values(InferenceEngine::Layout::ANY), - ::testing::Values(std::vector{1, 1, 23, 23}), - ::testing::Values(std::vector{1, 1, 25, 50}), + ::testing::Values(ov::element::f32), + ::testing::Values(ov::test::static_shapes_to_test_representation(std::vector{{1, 1, 23, 23}})), + ::testing::Values(ov::Shape{1, 1, 25, 50}), ::testing::Values(ov::test::utils::DEVICE_GPU), ::testing::Values(additional_config)), Interpolate11LayerTest::getTestCaseName); @@ -369,22 +309,18 @@ INSTANTIATE_TEST_SUITE_P(smoke_Interpolate_11_Pillow_Horizontal, Interpolate11La INSTANTIATE_TEST_SUITE_P(smoke_Interpolate_11_Pillow_Vertical, Interpolate11LayerTest, ::testing::Combine( ::testing::Combine( ::testing::ValuesIn(modesPillow), - ::testing::Values(ov::op::v4::Interpolate::ShapeCalcMode::SCALES), - ::testing::Values(ov::op::v4::Interpolate::CoordinateTransformMode::TF_HALF_PIXEL_FOR_NN), - ::testing::Values(ov::op::v4::Interpolate::NearestMode::SIMPLE), + ::testing::Values(ov::op::util::InterpolateBase::ShapeCalcMode::SCALES), + ::testing::Values(ov::op::util::InterpolateBase::CoordinateTransformMode::TF_HALF_PIXEL_FOR_NN), + ::testing::Values(ov::op::util::InterpolateBase::NearestMode::SIMPLE), ::testing::Values(false), ::testing::Values(std::vector{0, 0, 1, 1}), ::testing::Values(std::vector{0, 0, 1, 1}), ::testing::ValuesIn(cubeCoefs), ::testing::Values(std::vector{2, 3}), ::testing::Values(std::vector{2.f, 1.f})), - ::testing::Values(InferenceEngine::Precision::FP32), - ::testing::Values(InferenceEngine::Precision::UNSPECIFIED), - ::testing::Values(InferenceEngine::Precision::UNSPECIFIED), - ::testing::Values(InferenceEngine::Layout::ANY), - ::testing::Values(InferenceEngine::Layout::ANY), - ::testing::Values(std::vector{1, 1, 23, 23}), - ::testing::Values(std::vector{1, 1, 50, 25}), + ::testing::Values(ov::element::f32), + ::testing::Values(ov::test::static_shapes_to_test_representation(std::vector{{1, 1, 23, 23}})), + ::testing::Values(ov::Shape{1, 1, 50, 25}), ::testing::Values(ov::test::utils::DEVICE_GPU), ::testing::Values(additional_config)), Interpolate11LayerTest::getTestCaseName); @@ -393,22 +329,18 @@ INSTANTIATE_TEST_SUITE_P(smoke_Interpolate_11_Pillow_Vertical, Interpolate11Laye INSTANTIATE_TEST_SUITE_P(smoke_Interpolate_11_Pillow_Vertical_BF, Interpolate11LayerTest, ::testing::Combine( ::testing::Combine( ::testing::ValuesIn(modesPillow), - ::testing::Values(ov::op::v4::Interpolate::ShapeCalcMode::SCALES), - ::testing::Values(ov::op::v4::Interpolate::CoordinateTransformMode::TF_HALF_PIXEL_FOR_NN), - ::testing::Values(ov::op::v4::Interpolate::NearestMode::SIMPLE), + ::testing::Values(ov::op::util::InterpolateBase::ShapeCalcMode::SCALES), + ::testing::Values(ov::op::util::InterpolateBase::CoordinateTransformMode::TF_HALF_PIXEL_FOR_NN), + ::testing::Values(ov::op::util::InterpolateBase::NearestMode::SIMPLE), ::testing::Values(false), ::testing::Values(std::vector{2, 1, 0, 0}), ::testing::Values(std::vector{2, 1, 0, 0}), ::testing::ValuesIn(cubeCoefs), ::testing::Values(std::vector{0, 1}), ::testing::Values(std::vector{2.f, 1.f})), - ::testing::Values(InferenceEngine::Precision::FP32), - ::testing::Values(InferenceEngine::Precision::UNSPECIFIED), - ::testing::Values(InferenceEngine::Precision::UNSPECIFIED), - ::testing::Values(InferenceEngine::Layout::ANY), - ::testing::Values(InferenceEngine::Layout::ANY), - ::testing::Values(std::vector{23, 23, 2, 2}), - ::testing::Values(std::vector{52, 26, 2, 2}), + ::testing::Values(ov::element::f32), + ::testing::Values(ov::test::static_shapes_to_test_representation(std::vector{{23, 23, 2, 2}})), + ::testing::Values(ov::Shape{52, 26, 2, 2}), ::testing::Values(ov::test::utils::DEVICE_GPU), ::testing::Values(additional_config)), Interpolate11LayerTest::getTestCaseName); diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/memory.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/memory.cpp index ea1028e4ff8a6c..772e03ef3dd0ab 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/memory.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/memory.cpp @@ -2,44 +2,41 @@ // SPDX-License-Identifier: Apache-2.0 // -#include - -#include "single_layer_tests/memory.h" - -using namespace LayerTestsDefinitions; +#include "single_op_tests/memory.h" namespace { +using ov::test::MemoryLayerTest; +using ov::test::MemoryV3LayerTest; -const std::vector inShapes = { +const std::vector inShapes = { {1}, {3}, {3, 3, 3}, {2, 3, 4, 5}, }; -const std::vector inputPrecisions = { - InferenceEngine::Precision::I32, - InferenceEngine::Precision::FP32, +const std::vector inputPrecisions = { + ov::element::i32, + ov::element::f32, }; const std::vector iterationCount {1, 3, 10}; -INSTANTIATE_TEST_SUITE_P(smoke_MemoryTest, MemoryTest, +INSTANTIATE_TEST_SUITE_P(smoke_MemoryTest, MemoryLayerTest, ::testing::Combine( - ::testing::Values(ngraph::helpers::MemoryTransformation::NONE), + ::testing::Values(ov::test::utils::MemoryTransformation::NONE), ::testing::ValuesIn(iterationCount), ::testing::ValuesIn(inShapes), ::testing::ValuesIn(inputPrecisions), ::testing::Values(ov::test::utils::DEVICE_GPU)), - MemoryTest::getTestCaseName); + MemoryLayerTest::getTestCaseName); -INSTANTIATE_TEST_SUITE_P(smoke_MemoryTestV3, MemoryTestV3, +INSTANTIATE_TEST_SUITE_P(smoke_MemoryTestV3, MemoryV3LayerTest, ::testing::Combine( - ::testing::Values(ngraph::helpers::MemoryTransformation::NONE), + ::testing::Values(ov::test::utils::MemoryTransformation::NONE), ::testing::ValuesIn(iterationCount), ::testing::ValuesIn(inShapes), ::testing::ValuesIn(inputPrecisions), ::testing::Values(ov::test::utils::DEVICE_GPU)), - MemoryTest::getTestCaseName); - + MemoryLayerTest::getTestCaseName); } // namespace From 2a4ae7c5c0f847c707c1818b96dff6623802827d Mon Sep 17 00:00:00 2001 From: Roman Kazantsev Date: Wed, 10 Jan 2024 21:46:43 +0400 Subject: [PATCH 16/28] [TF Hub] Mark up models failing with IndexError for Squeeze (#22070) * [TF Hub] Mark up models failing with IndexError for Squeeze Signed-off-by: Kazantsev, Roman * Add xfail for nightly Signed-off-by: Kazantsev, Roman --------- Signed-off-by: Kazantsev, Roman --- .../tf_hub_tests/nightly_models | 46 +++++++++---------- .../tf_hub_tests/test_tf_hub_convert_model.py | 10 +++- 2 files changed, 31 insertions(+), 25 deletions(-) diff --git a/tests/model_hub_tests/tf_hub_tests/nightly_models b/tests/model_hub_tests/tf_hub_tests/nightly_models index b17b4a44cdd0a4..758ef889f64c0d 100644 --- a/tests/model_hub_tests/tf_hub_tests/nightly_models +++ b/tests/model_hub_tests/tf_hub_tests/nightly_models @@ -65,12 +65,12 @@ biggan-deep-256,https://www.kaggle.com/models/deepmind/biggan/frameworks/tensorF efficientdet/lite2/detection,https://www.kaggle.com/models/tensorflow/efficientdet/frameworks/tensorFlow2/variations/lite2-detection/versions/1 imagenet/mobilenet_v2_050_224/classification,https://www.kaggle.com/models/google/mobilenet-v2/frameworks/tensorFlow2/variations/050-224-classification/versions/2 mobilebert/uncased_L-24_H-128_B-512_A-4_F-4_OPT/squadv1,https://www.kaggle.com/models/google/mobilebert/frameworks/tensorFlow1/variations/uncased-l-24-h-128-b-512-a-4-f-4-opt-squadv1/versions/1 -delf,https://www.kaggle.com/models/google/delf/frameworks/tensorFlow1/variations/delf/versions/1 +delf,https://www.kaggle.com/models/google/delf/frameworks/tensorFlow1/variations/delf/versions/1,xfail,129232 IndexError list index out of range for Squeeze ssd_mobilenet_v2,https://www.kaggle.com/models/tensorflow/ssd-mobilenet-v2/frameworks/tensorFlow2/variations/ssd-mobilenet-v2/versions/1 imagenet/mobilenet_v3_large_075_224/feature_vector,https://www.kaggle.com/models/google/mobilenet-v3/frameworks/tensorFlow2/variations/large-075-224-feature-vector/versions/1 centernet/hourglass_512x512_kpts,https://www.kaggle.com/models/tensorflow/centernet-hourglass/frameworks/tensorFlow2/variations/512x512-kpts/versions/1,skip,129153 TimeoutError or killed with a signal 11 bert_cased_L-12_H-768_A-12,https://www.kaggle.com/models/google/bert/frameworks/tensorFlow1/variations/cased-l-12-h-768-a-12/versions/1,skip,129153 TimeoutError or killed with a signal 11 -biggan-512,https://www.kaggle.com/models/deepmind/biggan/frameworks/tensorFlow1/variations/512/versions/2 +biggan-512,https://www.kaggle.com/models/deepmind/biggan/frameworks/tensorFlow1/variations/512/versions/2,skip,129232 IndexError list index out of range for Squeeze tf2-preview/gnews-swivel-20dim-with-oov,https://www.kaggle.com/models/google/gnews-swivel/frameworks/tensorFlow2/variations/tf2-preview-20dim-with-oov/versions/1,skip,128989 AttributeError NoneType object has no attribute shape or dtype tf2-preview/nnlm-es-dim50-with-normalization,https://www.kaggle.com/models/google/nnlm/frameworks/tensorFlow2/variations/tf2-preview-es-dim50-with-normalization/versions/1,skip,120721 AssertionError No signatures for a model efficientnet/b4/feature-vector,https://www.kaggle.com/models/tensorflow/efficientnet/frameworks/tensorFlow2/variations/b4-feature-vector/versions/1 @@ -87,7 +87,7 @@ imagenet/efficientnet_v2_imagenet21k_ft1k_m/feature_vector,https://www.kaggle.co imagenet/inception_v1/classification,https://www.kaggle.com/models/google/inception-v1/frameworks/tensorFlow2/variations/classification/versions/2 bit/m-r50x1,https://www.kaggle.com/models/google/bit/frameworks/tensorFlow2/variations/m-r50x1/versions/1 progan-128,https://www.kaggle.com/models/google/progan-128/frameworks/tensorFlow1/variations/progan-128/versions/1 -biggan-256,https://www.kaggle.com/models/deepmind/biggan/frameworks/tensorFlow1/variations/256/versions/2 +biggan-256,https://www.kaggle.com/models/deepmind/biggan/frameworks/tensorFlow1/variations/256/versions/2,skip,129232 IndexError list index out of range for Squeeze nnlm-de-dim50-with-normalization,https://www.kaggle.com/models/google/nnlm/frameworks/tensorFlow2/variations/de-dim50-with-normalization/versions/1,skip,120721 AssertionError No signatures for a model imagenet/efficientnet_v2_imagenet21k_b0/feature_vector,https://www.kaggle.com/models/google/efficientnet-v2/frameworks/tensorFlow2/variations/imagenet21k-b0-feature-vector/versions/1 bert_multi_cased_preprocess,https://www.kaggle.com/models/tensorflow/bert/frameworks/tensorFlow2/variations/multi-cased-preprocess/versions/3,skip,128989 AttributeError NoneType object has no attribute shape or dtype @@ -159,7 +159,7 @@ faster_rcnn/inception_resnet_v2_1024x1024,https://www.kaggle.com/models/tensorfl ssd_mobilenet_v2/fpnlite_640x640,https://www.kaggle.com/models/tensorflow/ssd-mobilenet-v2/frameworks/tensorFlow2/variations/fpnlite-640x640/versions/1 imagenet/resnet_v1_50/classification,https://www.kaggle.com/models/google/resnet-v1/frameworks/tensorFlow2/variations/50-classification/versions/2 movinet/a5/base/kinetics-600/classification,https://www.kaggle.com/models/google/movinet/frameworks/tensorFlow2/variations/a5-base-kinetics-600-classification/versions/3 -biggan-128,https://www.kaggle.com/models/deepmind/biggan/frameworks/tensorFlow1/variations/128/versions/2 +biggan-128,https://www.kaggle.com/models/deepmind/biggan/frameworks/tensorFlow1/variations/128/versions/2,skip,129232 IndexError list index out of range for Squeeze Wiki-words-250-with-normalization,https://www.kaggle.com/models/google/wiki-words/frameworks/tensorFlow2/variations/250-with-normalization/versions/1,skip,120721 AssertionError No signatures for a model sentence-t5/st5-base,https://www.kaggle.com/models/google/sentence-t5/frameworks/tensorFlow2/variations/st5-base/versions/1 movinet/a2/base/kinetics-600/classification,https://www.kaggle.com/models/google/movinet/frameworks/tensorFlow2/variations/a2-base-kinetics-600-classification/versions/3 @@ -394,7 +394,7 @@ german-tacotron2,https://www.kaggle.com/models/myusufs/german-tacotron2/framewor convnext_xlarge_21k_224,https://www.kaggle.com/models/spsayakpaul/convnext/frameworks/tensorFlow2/variations/xlarge-21k-224/versions/1 gtr/gtr-large,https://www.kaggle.com/models/google/gtr/frameworks/tensorFlow2/variations/gtr-large/versions/1 AraBERT,https://www.kaggle.com/models/kaggle/arabert/frameworks/tensorFlow2/variations/arabert/versions/1 -trillsson1,https://www.kaggle.com/models/google/trillsson/frameworks/tensorFlow2/variations/1/versions/1 +trillsson1,https://www.kaggle.com/models/google/trillsson/frameworks/tensorFlow2/variations/1/versions/1,skip,129232 IndexError list index out of range for Squeeze imagenet/mobilenet_v1_050_128/classification,https://www.kaggle.com/models/google/mobilenet-v1/frameworks/tensorFlow2/variations/050-128-classification/versions/2 tf2-preview/nnlm-de-dim50-with-normalization,https://www.kaggle.com/models/google/nnlm/frameworks/tensorFlow2/variations/tf2-preview-de-dim50-with-normalization/versions/1,skip,120721 AssertionError No signatures for a model imagenet/mobilenet_v1_100_192/feature_vector,https://www.kaggle.com/models/google/mobilenet-v1/frameworks/tensorFlow2/variations/100-192-feature-vector/versions/2 @@ -423,7 +423,7 @@ wav2vec2-xlsr-53,https://www.kaggle.com/models/kaggle/wav2vec2/frameworks/tensor unet/industrial/class_10,https://tfhub.dev/nvidia/unet/industrial/class_10/1,skip,Model is not available movinet/a5/stream/kinetics-600/classification,https://www.kaggle.com/models/google/movinet/frameworks/tensorFlow2/variations/a5-stream-kinetics-600-classification/versions/2 vit_r26_s32_lightaug_fe,https://www.kaggle.com/models/spsayakpaul/vision-transformer/frameworks/tensorFlow2/variations/vit-r26-s32-lightaug-fe/versions/1 -mmt/architecture_image-q-24,https://www.kaggle.com/models/deepmind/mmt/frameworks/tensorFlow1/variations/architecture-image-q-24/versions/1 +mmt/architecture_image-q-24,https://www.kaggle.com/models/deepmind/mmt/frameworks/tensorFlow1/variations/architecture-image-q-24/versions/1,skip,129232 IndexError list index out of range for Squeeze wiki40b-lm-nl,https://www.kaggle.com/models/google/wiki40b-lm/frameworks/tensorFlow1/variations/nl/versions/1 imagenet/efficientnet_v2_imagenet21k_m/classification,https://www.kaggle.com/models/google/efficientnet-v2/frameworks/tensorFlow2/variations/imagenet21k-m-classification/versions/2 tf2-preview/nnlm-zh-dim50,https://www.kaggle.com/models/google/nnlm/frameworks/tensorFlow2/variations/tf2-preview-zh-dim50/versions/1,skip,120721 AssertionError No signatures for a model @@ -470,7 +470,7 @@ sup-rotation-100,https://www.kaggle.com/models/vtab/sup-rotation-100/frameworks/ wiki40b-lm-ko,https://www.kaggle.com/models/google/wiki40b-lm/frameworks/tensorFlow1/variations/ko/versions/1 imagenet/mobilenet_v2_075_128/classification,https://www.kaggle.com/models/google/mobilenet-v2/frameworks/tensorFlow2/variations/075-128-classification/versions/2 efficientdet/lite3x/feature-vector,https://www.kaggle.com/models/tensorflow/efficientdet/frameworks/tensorFlow2/variations/lite3x-feature-vector/versions/1 -mmt/baseline_baseline,https://www.kaggle.com/models/deepmind/mmt/frameworks/tensorFlow1/variations/baseline-baseline/versions/1 +mmt/baseline_baseline,https://www.kaggle.com/models/deepmind/mmt/frameworks/tensorFlow1/variations/baseline-baseline/versions/1,skip,129232 IndexError list index out of range for Squeeze boundless/three_quarter,https://www.kaggle.com/models/google/boundless/frameworks/tensorFlow1/variations/three-quarter/versions/1 imagenet/mobilenet_v1_100_160/classification,https://www.kaggle.com/models/google/mobilenet-v1/frameworks/tensorFlow2/variations/100-160-classification/versions/2 unet/industrial/class_3,https://tfhub.dev/nvidia/unet/industrial/class_3/1,skip,Model is not available @@ -499,14 +499,14 @@ vgg19-block3-conv2-unpooling-encoder,https://www.kaggle.com/models/emilutz/vgg19 experts/bert/wiki_books/qqp,https://www.kaggle.com/models/google/experts-bert/frameworks/tensorFlow2/variations/wiki-books-qqp/versions/2 remote_sensing/uc_merced-resnet50,https://www.kaggle.com/models/google/resnet50/frameworks/tensorFlow1/variations/remote-sensing-uc-merced-resnet50/versions/1 mixer_b16_i1k_fe,https://www.kaggle.com/models/spsayakpaul/mlp-mixer/frameworks/tensorFlow2/variations/mixer-b16-i1k-fe/versions/1 -mmt/loss_single-modality-contrastive1024,https://www.kaggle.com/models/deepmind/mmt/frameworks/tensorFlow1/variations/loss-single-modality-contrastive1024/versions/1 +mmt/loss_single-modality-contrastive1024,https://www.kaggle.com/models/deepmind/mmt/frameworks/tensorFlow1/variations/loss-single-modality-contrastive1024/versions/1,skip,129232 IndexError list index out of range for Squeeze regnety600mf_feature_extractor,https://www.kaggle.com/models/adityakane/regnety/frameworks/tensorFlow2/variations/600mf-feature-extractor/versions/1 exemplar,https://www.kaggle.com/models/vtab/exemplar/frameworks/tensorFlow1/variations/exemplar/versions/1 imagenet/mobilenet_v1_025_192/classification,https://www.kaggle.com/models/google/mobilenet-v1/frameworks/tensorFlow2/variations/025-192-classification/versions/2 wiki40b-lm-tr,https://www.kaggle.com/models/google/wiki40b-lm/frameworks/tensorFlow1/variations/tr/versions/1 mixer_b16_sam_fe,https://www.kaggle.com/models/spsayakpaul/mlp-mixer/frameworks/tensorFlow2/variations/mixer-b16-sam-fe/versions/1 logit_reconstruction/robust,https://www.kaggle.com/models/google/logit-reconstruction/frameworks/tensorFlow1/variations/robust/versions/1 -mmt/baseline_baseline-no-bert-transfer,https://www.kaggle.com/models/deepmind/mmt/frameworks/tensorFlow1/variations/baseline-baseline-no-bert-transfer/versions/1 +mmt/baseline_baseline-no-bert-transfer,https://www.kaggle.com/models/deepmind/mmt/frameworks/tensorFlow1/variations/baseline-baseline-no-bert-transfer/versions/1,skip,129232 IndexError list index out of range for Squeeze nnlm-ko-dim128,https://www.kaggle.com/models/google/nnlm/frameworks/tensorFlow2/variations/ko-dim128/versions/1,skip,120721 AssertionError No signatures for a model experts/bit/r50x1/in21k/angiosperm,https://www.kaggle.com/models/google/experts-bit/frameworks/tensorFlow2/variations/r50x1-in21k-angiosperm/versions/1 vit_l16_classification,https://www.kaggle.com/models/spsayakpaul/vision-transformer/frameworks/tensorFlow2/variations/vit-l16-classification/versions/1 @@ -518,7 +518,7 @@ spiral/default-wgangp-celebahq64-gen-19steps/agent8,https://www.kaggle.com/model experts/bit/r50x1/in21k/carnivore,https://www.kaggle.com/models/google/experts-bit/frameworks/tensorFlow2/variations/r50x1-in21k-carnivore/versions/1 HRNet/ade20k-hrnetv2-w48,https://www.kaggle.com/models/google/hrnet/frameworks/tensorFlow2/variations/hrnet-ade20k-hrnetv2-w48/versions/1,skip,120721 AssertionError No signatures for a model wae-gan,https://www.kaggle.com/models/vtab/wae-gan/frameworks/tensorFlow1/variations/wae-gan/versions/1 -mmt/architecture_image-q-12,https://www.kaggle.com/models/deepmind/mmt/frameworks/tensorFlow1/variations/architecture-image-q-12/versions/1 +mmt/architecture_image-q-12,https://www.kaggle.com/models/deepmind/mmt/frameworks/tensorFlow1/variations/architecture-image-q-12/versions/1,skip,129232 IndexError list index out of range for Squeeze MuRIL-Large,https://www.kaggle.com/models/google/muril/frameworks/tensorFlow2/variations/large/versions/1 spiral/default-wgangp-celebahq64-gen-19steps/agent9,https://www.kaggle.com/models/deepmind/spiral/frameworks/tensorFlow1/variations/default-wgangp-celebahq64-gen-19steps-agent9/versions/1 swin_large_patch4_window7_224_in22k_fe,https://www.kaggle.com/models/spsayakpaul/swin/frameworks/tensorFlow2/variations/large-patch4-window7-224-in22k-fe/versions/1 @@ -547,14 +547,14 @@ remote_sensing/eurosat-ms-resnet50,https://www.kaggle.com/models/google/resnet50 compare_gan/model_10_lsun_bedroom_resnet19,https://www.kaggle.com/models/google/compare-gan/frameworks/tensorFlow1/variations/model-10-lsun-bedroom-resnet19/versions/1,skip,128817 Model references undeclared parameters convnext_xlarge_21k_1k_384_fe,https://www.kaggle.com/models/spsayakpaul/convnext/frameworks/tensorFlow2/variations/xlarge-21k-1k-384-fe/versions/1 mobilevit_xxs_1k_256_fe,https://www.kaggle.com/models/kaggle/mobilevit/frameworks/tensorFlow2/variations/xxs-1k-256-fe/versions/1 -mmt/data_cc,https://www.kaggle.com/models/deepmind/mmt/frameworks/tensorFlow1/variations/data-cc/versions/1 +mmt/data_cc,https://www.kaggle.com/models/deepmind/mmt/frameworks/tensorFlow1/variations/data-cc/versions/1,skip,129232 IndexError list index out of range for Squeeze spiral/default-wgangp-celebahq64-gen-19steps/agent1,https://www.kaggle.com/models/deepmind/spiral/frameworks/tensorFlow1/variations/default-wgangp-celebahq64-gen-19steps-agent1/versions/1 small_bert/bert_uncased_L-8_H-768_A-12,https://www.kaggle.com/models/google/bert/frameworks/tensorFlow1/variations/bert-uncased-l-8-h-768-a-12/versions/2,skip,129153 TimeoutError or killed with a signal 11 edgetpu/vision/deeplab-edgetpu/fused_argmax/m,https://www.kaggle.com/models/google/deeplab-edgetpu/frameworks/tensorFlow2/variations/fused-argmax-m/versions/1 image_augmentation/nas_imagenet,https://www.kaggle.com/models/google/image-augmentation/frameworks/tensorFlow1/variations/nas-imagenet/versions/1 mixer_b16_i21k_fe,https://www.kaggle.com/models/spsayakpaul/mlp-mixer/frameworks/tensorFlow2/variations/mixer-b16-i21k-fe/versions/1,skip,128695 Inference results mismatch -nonsemantic-speech-benchmark/trillsson1,https://www.kaggle.com/models/google/trillsson/frameworks/tensorFlow2/variations/nonsemantic-speech-benchmark-trillsson1/versions/1 -trillsson5,https://www.kaggle.com/models/google/trillsson/frameworks/tensorFlow2/variations/5/versions/1 +nonsemantic-speech-benchmark/trillsson1,https://www.kaggle.com/models/google/trillsson/frameworks/tensorFlow2/variations/nonsemantic-speech-benchmark-trillsson1/versions/1,skip,129232 IndexError list index out of range for Squeeze +trillsson5,https://www.kaggle.com/models/google/trillsson/frameworks/tensorFlow2/variations/5/versions/1,skip,129232 IndexError list index out of range for Squeeze logit_reconstruction/inceptionv3,https://www.kaggle.com/models/google/logit-reconstruction/frameworks/tensorFlow1/variations/inceptionv3/versions/1 spiral/default-wgangp-celebahq64-gen-19steps/agent6,https://www.kaggle.com/models/deepmind/spiral/frameworks/tensorFlow1/variations/default-wgangp-celebahq64-gen-19steps-agent6/versions/1 tf2-preview/nnlm-id-dim128,https://www.kaggle.com/models/google/nnlm/frameworks/tensorFlow2/variations/tf2-preview-id-dim128/versions/1,skip,120721 AssertionError No signatures for a model @@ -589,7 +589,7 @@ sentence-t5/st5-11b,https://www.kaggle.com/models/google/sentence-t5/frameworks/ gtr/gtr-xxl,https://www.kaggle.com/models/google/gtr/frameworks/tensorFlow2/variations/gtr-xxl/versions/1 swin_base_patch4_window7_224_fe,https://www.kaggle.com/models/spsayakpaul/swin/frameworks/tensorFlow2/variations/base-patch4-window7-224-fe/versions/1 small_bert/bert_uncased_L-8_H-128_A-2,https://www.kaggle.com/models/google/bert/frameworks/tensorFlow1/variations/bert-uncased-l-8-h-128-a-2/versions/2,skip,129153 TimeoutError or killed with a signal 11 -mmt/data_mscoco,https://www.kaggle.com/models/deepmind/mmt/frameworks/tensorFlow1/variations/data-mscoco/versions/1 +mmt/data_mscoco,https://www.kaggle.com/models/deepmind/mmt/frameworks/tensorFlow1/variations/data-mscoco/versions/1,skip,129232 IndexError list index out of range for Squeeze vit_r26_s32_lightaug_classification,https://www.kaggle.com/models/spsayakpaul/vision-transformer/frameworks/tensorFlow2/variations/vit-r26-s32-lightaug-classification/versions/1 compare_gan/s3gan_10_256x256,https://www.kaggle.com/models/google/compare-gan/frameworks/tensorFlow1/variations/s3gan-10-256x256/versions/1 experts/bit/r50x1/in21k/tree,https://www.kaggle.com/models/google/experts-bit/frameworks/tensorFlow2/variations/r50x1-in21k-tree/versions/1 @@ -606,7 +606,7 @@ swin_small_patch244_window877_kinetics400_1k,https://www.kaggle.com/models/kaggl experts/bit/r50x1/in21k/covering,https://www.kaggle.com/models/google/experts-bit/frameworks/tensorFlow2/variations/r50x1-in21k-covering/versions/1 HRNet/msegpcontext-hrnetv2-w48,https://www.kaggle.com/models/google/hrnet/frameworks/tensorFlow2/variations/hrnet-msegpcontext-hrnetv2-w48/versions/1,skip,120721 AssertionError No signatures for a model convnext_large_21k_1k_224_fe,https://www.kaggle.com/models/spsayakpaul/convnext/frameworks/tensorFlow2/variations/large-21k-1k-224-fe/versions/1 -mmt/baseline-ft_baseline-no-bert-transfer,https://www.kaggle.com/models/deepmind/mmt/frameworks/tensorFlow1/variations/baseline-ft-baseline-no-bert-transfer/versions/1 +mmt/baseline-ft_baseline-no-bert-transfer,https://www.kaggle.com/models/deepmind/mmt/frameworks/tensorFlow1/variations/baseline-ft-baseline-no-bert-transfer/versions/1,skip,129232 IndexError list index out of range for Squeeze wiki40b-lm-el,https://www.kaggle.com/models/google/wiki40b-lm/frameworks/tensorFlow1/variations/el/versions/1 imagenet/efficientnet_v2_imagenet21k_ft1k_b2/classification,https://www.kaggle.com/models/google/efficientnet-v2/frameworks/tensorFlow2/variations/imagenet21k-ft1k-b2-classification/versions/1 wiki40b-lm-ro,https://www.kaggle.com/models/google/wiki40b-lm/frameworks/tensorFlow1/variations/ro/versions/1 @@ -622,20 +622,20 @@ experts/bit/r50x1/in21k/abstraction,https://www.kaggle.com/models/google/experts wiki40b-lm-th,https://www.kaggle.com/models/google/wiki40b-lm/frameworks/tensorFlow1/variations/th/versions/1 imagenet/mobilenet_v1_100_160/quantops/classification,https://www.kaggle.com/models/google/mobilenet-v1/frameworks/tensorFlow1/variations/100-160-quantops-classification/versions/2,skip,128695 Inference results mismatch imagenet/efficientnet_v2_imagenet21k_b1/classification,https://www.kaggle.com/models/google/efficientnet-v2/frameworks/tensorFlow2/variations/imagenet21k-b1-classification/versions/1 -mmt/data_sbu,https://www.kaggle.com/models/deepmind/mmt/frameworks/tensorFlow1/variations/data-sbu/versions/1 -mmt/data_combined-instance,https://www.kaggle.com/models/deepmind/mmt/frameworks/tensorFlow1/variations/data-combined-instance/versions/1 +mmt/data_sbu,https://www.kaggle.com/models/deepmind/mmt/frameworks/tensorFlow1/variations/data-sbu/versions/1,skip,129232 IndexError list index out of range for Squeeze +mmt/data_combined-instance,https://www.kaggle.com/models/deepmind/mmt/frameworks/tensorFlow1/variations/data-combined-instance/versions/1,skip,129232 IndexError list index out of range for Squeeze semi-exemplar-10,https://www.kaggle.com/models/vtab/semi-exemplar-10/frameworks/tensorFlow1/variations/semi-exemplar-10/versions/1 swin_tiny_patch4_window7_224,https://www.kaggle.com/models/spsayakpaul/swin/frameworks/tensorFlow2/variations/tiny-patch4-window7-224/versions/1 vila/image,https://www.kaggle.com/models/google/vila/frameworks/tensorFlow2/variations/image/versions/1 imagenet/mobilenet_v1_075_160/quantops/classification,https://www.kaggle.com/models/google/mobilenet-v1/frameworks/tensorFlow1/variations/075-160-quantops-classification/versions/2,skip,128695 Inference results mismatch convnext_large_21k_1k_224,https://www.kaggle.com/models/spsayakpaul/convnext/frameworks/tensorFlow2/variations/large-21k-1k-224/versions/1 -mmt/architecture_vilbert-4block,https://www.kaggle.com/models/deepmind/mmt/frameworks/tensorFlow1/variations/architecture-vilbert-4block/versions/1 +mmt/architecture_vilbert-4block,https://www.kaggle.com/models/deepmind/mmt/frameworks/tensorFlow1/variations/architecture-vilbert-4block/versions/1,skip,129232 IndexError list index out of range for Squeeze imagenet/mobilenet_v1_050_160/quantops/classification,https://www.kaggle.com/models/google/mobilenet-v1/frameworks/tensorFlow1/variations/050-160-quantops-classification/versions/2,skip,128695 Inference results mismatch -mmt/architecture-ft_language-q-12,https://www.kaggle.com/models/deepmind/mmt/frameworks/tensorFlow1/variations/architecture-ft-language-q-12/versions/1 +mmt/architecture-ft_language-q-12,https://www.kaggle.com/models/deepmind/mmt/frameworks/tensorFlow1/variations/architecture-ft-language-q-12/versions/1,skip,129232 IndexError list index out of range for Squeeze convnext_small_1k_224_fe,https://www.kaggle.com/models/spsayakpaul/convnext/frameworks/tensorFlow2/variations/small-1k-224-fe/versions/1 bit_resnet152x2_224_classification,https://www.kaggle.com/models/spsayakpaul/bit-resnet/frameworks/tensorFlow2/variations/bit-resnet152x2-224-classification/versions/1 experts/bit/r50x1/in21k/relation,https://www.kaggle.com/models/google/experts-bit/frameworks/tensorFlow2/variations/r50x1-in21k-relation/versions/1 -mmt/architecture_single-modality,https://www.kaggle.com/models/deepmind/mmt/frameworks/tensorFlow1/variations/architecture-single-modality/versions/1 +mmt/architecture_single-modality,https://www.kaggle.com/models/deepmind/mmt/frameworks/tensorFlow1/variations/architecture-single-modality/versions/1,skip,129232 IndexError list index out of range for Squeeze convnext_xlarge_21k_1k_224_fe,https://www.kaggle.com/models/spsayakpaul/convnext/frameworks/tensorFlow2/variations/xlarge-21k-1k-224-fe/versions/1 convnext_base_1k_384_fe,https://www.kaggle.com/models/spsayakpaul/convnext/frameworks/tensorFlow2/variations/base-1k-384-fe/versions/1 compare_gan/model_4_lsun_bedroom_resnet19,https://www.kaggle.com/models/google/compare-gan/frameworks/tensorFlow1/variations/model-4-lsun-bedroom-resnet19/versions/1,skip,128817 Model references undeclared parameters @@ -649,7 +649,7 @@ deit_base_patch16_384,https://www.kaggle.com/models/spsayakpaul/deit/frameworks/ deit_small_distilled_patch16_224_fe,https://www.kaggle.com/models/spsayakpaul/deit/frameworks/tensorFlow2/variations/small-distilled-patch16-224-fe/versions/1 llr-pretrain-adv/linear,https://www.kaggle.com/models/deepmind/llr-pretrain-adv/frameworks/tensorFlow1/variations/linear/versions/1 edgetpu/vision/mobilenet-edgetpu-v2-feature-vector/m,https://www.kaggle.com/models/google/mobilenet-edgetpu-v2/frameworks/tensorFlow2/variations/feature-vector-m/versions/2 -mmt/architecture_vilbert-2block,https://www.kaggle.com/models/deepmind/mmt/frameworks/tensorFlow1/variations/architecture-vilbert-2block/versions/1 +mmt/architecture_vilbert-2block,https://www.kaggle.com/models/deepmind/mmt/frameworks/tensorFlow1/variations/architecture-vilbert-2block/versions/1,skip,129232 IndexError list index out of range for Squeeze mobilevit_xs_1k_256_fe,https://www.kaggle.com/models/kaggle/mobilevit/frameworks/tensorFlow2/variations/xs-1k-256-fe/versions/1 convnext_base_21k_1k_384,https://www.kaggle.com/models/spsayakpaul/convnext/frameworks/tensorFlow2/variations/base-21k-1k-384/versions/1 imagenet/mobilenet_v2_100_128/feature_vector,https://www.kaggle.com/models/google/mobilenet-v2/frameworks/tensorFlow2/variations/100-128-feature-vector/versions/2 @@ -668,9 +668,9 @@ mixer_b32_sam_fe,https://www.kaggle.com/models/spsayakpaul/mlp-mixer/frameworks/ small_bert/bert_en_uncased_L-8_H-512_A-8,https://www.kaggle.com/models/tensorflow/bert/frameworks/tensorFlow2/variations/bert-en-uncased-l-8-h-512-a-8/versions/2 tiny_video_net/mobile_1,https://www.kaggle.com/models/google/tiny-video-net/frameworks/tensorFlow1/variations/mobile-1/versions/1,skip,128817 Model references undeclared parameters vgg19-block5-conv2-unpooling-encoder,https://www.kaggle.com/models/emilutz/vgg19-unpooling-encoder-decoder/frameworks/tensorFlow2/variations/vgg19-block5-conv2-unpooling-encoder/versions/1,skip,128695 Inference results mismatch -mmt/data-ft_sbu,https://www.kaggle.com/models/deepmind/mmt/frameworks/tensorFlow1/variations/data-ft-sbu/versions/1 +mmt/data-ft_sbu,https://www.kaggle.com/models/deepmind/mmt/frameworks/tensorFlow1/variations/data-ft-sbu/versions/1,skip,129232 IndexError list index out of range for Squeeze swin_s3_base_224,https://www.kaggle.com/models/spsayakpaul/swin/frameworks/tensorFlow2/variations/s3-base-224/versions/1 -mmt/data_cc-with-bert,https://www.kaggle.com/models/deepmind/mmt/frameworks/tensorFlow1/variations/data-cc-with-bert/versions/1 +mmt/data_cc-with-bert,https://www.kaggle.com/models/deepmind/mmt/frameworks/tensorFlow1/variations/data-cc-with-bert/versions/1,skip,129232 IndexError list index out of range for Squeeze experts/bit/r50x1/in21k/instrument,https://www.kaggle.com/models/google/experts-bit/frameworks/tensorFlow2/variations/r50x1-in21k-instrument/versions/1 imagenet/mobilenet_v3_small_100_224/classification,https://www.kaggle.com/models/google/mobilenet-v3/frameworks/tensorFlow2/variations/small-100-224-classification/versions/1 imagenet/efficientnet_v2_imagenet1k_b1/feature_vector,https://www.kaggle.com/models/google/efficientnet-v2/frameworks/tensorFlow2/variations/imagenet1k-b1-feature-vector/versions/2 diff --git a/tests/model_hub_tests/tf_hub_tests/test_tf_hub_convert_model.py b/tests/model_hub_tests/tf_hub_tests/test_tf_hub_convert_model.py index 7a0a1fc4ff97de..8345ad8981ac3b 100644 --- a/tests/model_hub_tests/tf_hub_tests/test_tf_hub_convert_model.py +++ b/tests/model_hub_tests/tf_hub_tests/test_tf_hub_convert_model.py @@ -133,16 +133,22 @@ def teardown_method(self): get_models_list(os.path.join(os.path.dirname(__file__), "precommit_models"))) @pytest.mark.precommit def test_convert_model_precommit(self, model_name, model_link, mark, reason, ie_device): - assert mark is None or mark == 'skip', "Incorrect test case: {}, {}".format(model_name, model_link) + assert mark is None or mark == 'skip' or mark == 'xfail', \ + "Incorrect test case: {}, {}".format(model_name, model_link) if mark == 'skip': pytest.skip(reason) + elif mark == 'xfail': + pytest.xfail(reason) self.run(model_name, model_link, ie_device) @pytest.mark.parametrize("model_name,model_link,mark,reason", get_models_list(os.path.join(os.path.dirname(__file__), "nightly_models"))) @pytest.mark.nightly def test_convert_model_all_models(self, model_name, model_link, mark, reason, ie_device): - assert mark is None or mark == 'skip', "Incorrect test case: {}, {}".format(model_name, model_link) + assert mark is None or mark == 'skip' or mark == 'xfail', \ + "Incorrect test case: {}, {}".format(model_name, model_link) if mark == 'skip': pytest.skip(reason) + elif mark == 'xfail': + pytest.xfail(reason) self.run(model_name, model_link, ie_device) From 0703ac2224a3350678866b28affed0f664538e64 Mon Sep 17 00:00:00 2001 From: Georgy Krivoruchko Date: Wed, 10 Jan 2024 10:32:29 -0800 Subject: [PATCH 17/28] [ONNX] Frontend refactoring (#22064) * Cleanup in frontend.cpp * Cleanup input_model.hpp/cpp * Refactored onnx_framework_node.hpp/cpp * Updated arg_min_max_factory.cpp/hpp * Updated conv_factory.cpp/hpp --- src/frontends/onnx/frontend/src/frontend.cpp | 6 +- .../onnx/frontend/src/input_model.cpp | 10 +-- .../onnx/frontend/src/input_model.hpp | 6 +- .../onnx/frontend/src/onnx_framework_node.cpp | 2 +- .../onnx/frontend/src/onnx_framework_node.hpp | 29 ++++---- .../src/utils/arg_min_max_factory.cpp | 70 +++++++++---------- .../src/utils/arg_min_max_factory.hpp | 12 ++-- .../onnx/frontend/src/utils/conv_factory.cpp | 46 ++++++------ .../onnx/frontend/src/utils/conv_factory.hpp | 18 ++--- .../onnx/frontend/src/utils/onnx_internal.cpp | 8 +-- 10 files changed, 103 insertions(+), 104 deletions(-) diff --git a/src/frontends/onnx/frontend/src/frontend.cpp b/src/frontends/onnx/frontend/src/frontend.cpp index 8fce9bed5f0012..c3355159c27f94 100644 --- a/src/frontends/onnx/frontend/src/frontend.cpp +++ b/src/frontends/onnx/frontend/src/frontend.cpp @@ -89,7 +89,7 @@ InputModel::Ptr FrontEnd::load_impl(const std::vector& variants) const return nullptr; } -std::shared_ptr FrontEnd::convert_partially(const InputModel::Ptr& model) const { +std::shared_ptr FrontEnd::convert_partially(const InputModel::Ptr& model) const { auto model_onnx = std::dynamic_pointer_cast(model); FRONT_END_GENERAL_CHECK(model_onnx != nullptr, "Invalid input model"); @@ -118,7 +118,7 @@ void FrontEnd::normalize(const std::shared_ptr& model) const { manager.run_passes(model); } -std::shared_ptr FrontEnd::convert(const InputModel::Ptr& model) const { +std::shared_ptr FrontEnd::convert(const InputModel::Ptr& model) const { const auto partially_converted = convert_partially(model); const auto error_message = ngraph::onnx_import::common::collect_translation_exceptions(partially_converted); @@ -134,7 +134,7 @@ void FrontEnd::convert(const std::shared_ptr& partially_converted) co normalize(partially_converted); } -std::shared_ptr FrontEnd::decode(const InputModel::Ptr& model) const { +std::shared_ptr FrontEnd::decode(const InputModel::Ptr& model) const { auto model_onnx = std::dynamic_pointer_cast(model); FRONT_END_GENERAL_CHECK(model_onnx != nullptr, "Invalid input model"); return model_onnx->decode(); diff --git a/src/frontends/onnx/frontend/src/input_model.cpp b/src/frontends/onnx/frontend/src/input_model.cpp index 224e9806f9c69f..b4cb7c168da1e6 100644 --- a/src/frontends/onnx/frontend/src/input_model.cpp +++ b/src/frontends/onnx/frontend/src/input_model.cpp @@ -12,7 +12,7 @@ using namespace ov; using namespace ov::frontend::onnx; -NGRAPH_SUPPRESS_DEPRECATED_START +OPENVINO_SUPPRESS_DEPRECATED_START InputModel::InputModel(const std::string& path, const bool enable_mmap, frontend::ExtensionHolder extensions) : m_editor{std::make_shared(path, enable_mmap, std::move(extensions))} {} @@ -152,7 +152,7 @@ void InputModel::free_name_for_tensor(const std::string&) { FRONT_END_THROW("Method free_name_for_tensor is not applicable for ONNX model. ONNX tensor name is an identifier."); } -void InputModel::set_partial_shape(const ov::frontend::Place::Ptr& place, const ngraph::PartialShape& shape) { +void InputModel::set_partial_shape(const ov::frontend::Place::Ptr& place, const ov::PartialShape& shape) { std::string input_name; // name of the model input which should be reshaped const auto input_edge = std::dynamic_pointer_cast(place); if (input_edge) { @@ -172,7 +172,7 @@ void InputModel::set_partial_shape(const ov::frontend::Place::Ptr& place, const m_inputs_to_reshape[input_name] = shape; } -ngraph::PartialShape InputModel::get_partial_shape(const ov::frontend::Place::Ptr& place) const { +ov::PartialShape InputModel::get_partial_shape(const ov::frontend::Place::Ptr& place) const { std::string tensor_name; // name of the model input which should be reshaped const auto input_edge = std::dynamic_pointer_cast(place); const auto output_edge = std::dynamic_pointer_cast(place); @@ -193,8 +193,8 @@ ngraph::PartialShape InputModel::get_partial_shape(const ov::frontend::Place::Pt return m_editor->get_tensor_shape(tensor_name); } -void InputModel::set_element_type(const ov::frontend::Place::Ptr& place, const ngraph::element::Type& type) { - std::map m; +void InputModel::set_element_type(const ov::frontend::Place::Ptr& place, const ov::element::Type& type) { + std::map m; m[place->get_names().at(0)] = type; m_editor->set_input_types(m); } diff --git a/src/frontends/onnx/frontend/src/input_model.hpp b/src/frontends/onnx/frontend/src/input_model.hpp index be093bac9da60c..803766ae25af8c 100644 --- a/src/frontends/onnx/frontend/src/input_model.hpp +++ b/src/frontends/onnx/frontend/src/input_model.hpp @@ -55,9 +55,9 @@ class InputModel : public ov::frontend::InputModel { /// \brief Not applicable for ONNX model. Throws immediately void free_name_for_tensor(const std::string& name) override; - void set_partial_shape(const ov::frontend::Place::Ptr& place, const ngraph::PartialShape& shape) override; - ngraph::PartialShape get_partial_shape(const ov::frontend::Place::Ptr& place) const override; - void set_element_type(const ov::frontend::Place::Ptr& place, const ngraph::element::Type& type) override; + void set_partial_shape(const ov::frontend::Place::Ptr& place, const ov::PartialShape& shape) override; + ov::PartialShape get_partial_shape(const ov::frontend::Place::Ptr& place) const override; + void set_element_type(const ov::frontend::Place::Ptr& place, const ov::element::Type& type) override; ov::element::Type get_element_type(const ov::frontend::Place::Ptr& place) const override; ov::frontend::Place::Ptr add_output(const ov::frontend::Place::Ptr& place) override; void remove_output(const ov::frontend::Place::Ptr& place) override; diff --git a/src/frontends/onnx/frontend/src/onnx_framework_node.cpp b/src/frontends/onnx/frontend/src/onnx_framework_node.cpp index 6cf880d760d47e..6ecdb0658e01c7 100644 --- a/src/frontends/onnx/frontend/src/onnx_framework_node.cpp +++ b/src/frontends/onnx/frontend/src/onnx_framework_node.cpp @@ -23,7 +23,7 @@ std::shared_ptr ONNXFrameworkNode::clone_with_new_inputs(const OutputVecto } std::shared_ptr ONNXSubgraphFrameworkNode::clone_with_new_inputs(const OutputVector& inputs) const { - return std::make_shared(m_node, m_functions, inputs); + return std::make_shared(m_node, m_models, inputs); } std::shared_ptr NotSupportedONNXNode::clone_with_new_inputs(const OutputVector& inputs) const { diff --git a/src/frontends/onnx/frontend/src/onnx_framework_node.hpp b/src/frontends/onnx/frontend/src/onnx_framework_node.hpp index 6489b17868543c..1eeb889dccc826 100644 --- a/src/frontends/onnx/frontend/src/onnx_framework_node.hpp +++ b/src/frontends/onnx/frontend/src/onnx_framework_node.hpp @@ -16,11 +16,10 @@ #pragma once -#include -#include -#include -#include -#include +#include "core/graph.hpp" +#include "onnx_import/core/node.hpp" +#include "openvino/core/model.hpp" +#include "openvino/op/util/framework_node.hpp" namespace ONNX_NAMESPACE { // forward declaration @@ -49,12 +48,12 @@ class ONNXFrameworkNode : public ov::op::util::FrameworkNode { set_attrs(attrs); } - OutputVector get_ng_nodes(const std::shared_ptr& graph) const { - OutputVector ng_nodes{graph->make_ov_nodes(m_node)}; - if (ng_nodes.size() > get_output_size()) { - ng_nodes.resize(get_output_size()); + OutputVector get_ov_nodes(const std::shared_ptr& graph) const { + OutputVector ov_nodes{graph->make_ov_nodes(m_node)}; + if (ov_nodes.size() > get_output_size()) { + ov_nodes.resize(get_output_size()); } - return ng_nodes; + return ov_nodes; } virtual std::shared_ptr clone_with_new_inputs(const OutputVector& inputs) const override; @@ -77,24 +76,24 @@ class ONNXSubgraphFrameworkNode : public ONNXFrameworkNode { OPENVINO_OP("ONNXSubgraphFrameworkNode", "util", ONNXFrameworkNode); ONNXSubgraphFrameworkNode(const onnx_import::Node& node, - const std::vector>& functions, + const std::vector>& models, const OutputVector& inputs) : ONNXFrameworkNode(node, inputs), - m_functions(functions) {} + m_models(models) {} void infer_inputs_from_parent() { for (auto& subgraph : m_node.get_subgraphs()) subgraph.second->infer_inputs_from_parent(); } - const std::vector>& get_subgraph_functions() const { - return m_functions; + const std::vector>& get_subgraph_models() const { + return m_models; } virtual std::shared_ptr clone_with_new_inputs(const OutputVector& inputs) const override; private: - std::vector> m_functions; + std::vector> m_models; }; OPENVINO_SUPPRESS_DEPRECATED_END diff --git a/src/frontends/onnx/frontend/src/utils/arg_min_max_factory.cpp b/src/frontends/onnx/frontend/src/utils/arg_min_max_factory.cpp index a010f5aa149801..2fd399a8e56ac9 100644 --- a/src/frontends/onnx/frontend/src/utils/arg_min_max_factory.cpp +++ b/src/frontends/onnx/frontend/src/utils/arg_min_max_factory.cpp @@ -4,9 +4,18 @@ #include "utils/arg_min_max_factory.hpp" -#include "default_opset.hpp" -#include "ngraph/opsets/opset1.hpp" -#include "ngraph/validation_util.hpp" +#include "openvino/core/validation_util.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/convert.hpp" +#include "openvino/op/gather.hpp" +#include "openvino/op/reverse.hpp" +#include "openvino/op/shape_of.hpp" +#include "openvino/op/squeeze.hpp" +#include "openvino/op/subtract.hpp" +#include "openvino/op/topk.hpp" + +using namespace ov; +using namespace ov::op; namespace ngraph { namespace onnx_import { @@ -19,16 +28,16 @@ ArgMinMaxFactory::ArgMinMaxFactory(const Node& node) m_select_last_index{node.get_attribute_value("select_last_index", 0)} {} OPENVINO_SUPPRESS_DEPRECATED_END -std::shared_ptr ArgMinMaxFactory::make_arg_max() const { - return make_topk_subgraph(default_opset::TopK::Mode::MAX); +std::shared_ptr ArgMinMaxFactory::make_arg_max() const { + return make_topk_subgraph(v11::TopK::Mode::MAX); } -std::shared_ptr ArgMinMaxFactory::make_arg_min() const { - return make_topk_subgraph(default_opset::TopK::Mode::MIN); +std::shared_ptr ArgMinMaxFactory::make_arg_min() const { + return make_topk_subgraph(v11::TopK::Mode::MIN); } -std::shared_ptr ArgMinMaxFactory::make_topk_subgraph(default_opset::TopK::Mode mode) const { - const auto k_node = default_opset::Constant::create(ngraph::element::i64, Shape{}, {1}); +std::shared_ptr ArgMinMaxFactory::make_topk_subgraph(v11::TopK::Mode mode) const { + const auto k_node = v0::Constant::create(element::i64, Shape{}, {1}); if (m_select_last_index == 1) { // Example (ArgMin): @@ -57,49 +66,40 @@ std::shared_ptr ArgMinMaxFactory::make_topk_subgraph(default_opset OPENVINO_SUPPRESS_DEPRECATED_START const int64_t normalized_axis = - normalize_axis(m_input_node.get_node(), m_axis, m_input_node.get_partial_shape().rank()); + ov::normalize_axis(m_input_node.get_node(), m_axis, m_input_node.get_partial_shape().rank()); OPENVINO_SUPPRESS_DEPRECATED_END - const auto axis_node = default_opset::Constant::create(ngraph::element::i64, Shape{1}, {normalized_axis}); - const auto reverse = std::make_shared(m_input_node, axis_node, opset1::Reverse::Mode::INDEX); + const auto axis_node = v0::Constant::create(element::i64, Shape{1}, {normalized_axis}); + const auto reverse = std::make_shared(m_input_node, axis_node, v1::Reverse::Mode::INDEX); - const auto topk = std::make_shared(reverse, - k_node, - normalized_axis, - mode, - default_opset::TopK::SortType::NONE); + const auto topk = std::make_shared(reverse, k_node, normalized_axis, mode, v1::TopK::SortType::NONE); - const auto data_shape = std::make_shared(m_input_node); - const auto dims_on_axis = std::make_shared( - data_shape, - axis_node, - default_opset::Constant::create(ngraph::element::i64, Shape{}, {0})); + const auto data_shape = std::make_shared(m_input_node); + const auto dims_on_axis = + std::make_shared(data_shape, axis_node, v0::Constant::create(element::i64, Shape{}, {0})); - const auto res_index = std::make_shared( - dims_on_axis, - std::make_shared(topk->output(1), element::i64)); - const auto result = std::make_shared( - res_index, - default_opset::Constant::create(ngraph::element::i64, Shape{1}, {1})); + const auto res_index = + std::make_shared(dims_on_axis, std::make_shared(topk->output(1), element::i64)); + const auto result = + std::make_shared(res_index, v0::Constant::create(element::i64, Shape{1}, {1})); if (m_keep_dims == 0) { - const auto axis_to_remove = default_opset::Constant::create(element::u64, Shape{}, {topk->get_axis()}); + const auto axis_to_remove = v0::Constant::create(element::u64, Shape{}, {topk->get_axis()}); - return std::make_shared(result, axis_to_remove); + return std::make_shared(result, axis_to_remove); } return result; } - const auto topk = - std::make_shared(m_input_node, k_node, m_axis, mode, default_opset::TopK::SortType::NONE); + const auto topk = std::make_shared(m_input_node, k_node, m_axis, mode, v11::TopK::SortType::NONE); - const auto result = std::make_shared(topk->output(1), element::i64); + const auto result = std::make_shared(topk->output(1), element::i64); if (m_keep_dims == 0) { - const auto axis_to_remove = default_opset::Constant::create(element::u64, Shape{}, {topk->get_axis()}); + const auto axis_to_remove = v0::Constant::create(element::u64, Shape{}, {topk->get_axis()}); - return std::make_shared(result, axis_to_remove); + return std::make_shared(result, axis_to_remove); } return result; diff --git a/src/frontends/onnx/frontend/src/utils/arg_min_max_factory.hpp b/src/frontends/onnx/frontend/src/utils/arg_min_max_factory.hpp index f8f012e3611d8c..e4547e2366297e 100644 --- a/src/frontends/onnx/frontend/src/utils/arg_min_max_factory.hpp +++ b/src/frontends/onnx/frontend/src/utils/arg_min_max_factory.hpp @@ -7,10 +7,10 @@ #include #include -#include "default_opset.hpp" -#include "ngraph/node.hpp" #include "onnx_import/core/node.hpp" #include "openvino/core/deprecated.hpp" +#include "openvino/core/node.hpp" +#include "openvino/op/topk.hpp" namespace ngraph { namespace onnx_import { @@ -25,17 +25,17 @@ class ArgMinMaxFactory { /// \brief Creates ArgMax ONNX operation. /// \return Sub-graph representing ArgMax op. - std::shared_ptr make_arg_max() const; + std::shared_ptr make_arg_max() const; /// \brief Creates ArgMin ONNX operation. /// \return Sub-graph representing ArgMin op. - std::shared_ptr make_arg_min() const; + std::shared_ptr make_arg_min() const; private: - std::shared_ptr make_topk_subgraph(default_opset::TopK::Mode mode) const; + std::shared_ptr make_topk_subgraph(ov::op::v11::TopK::Mode mode) const; const std::int64_t m_keep_dims; - Output m_input_node; + Output m_input_node; std::int64_t m_axis; std::int64_t m_select_last_index; }; diff --git a/src/frontends/onnx/frontend/src/utils/conv_factory.cpp b/src/frontends/onnx/frontend/src/utils/conv_factory.cpp index 617ae6d3eab1a9..6cd180b6ecc64f 100644 --- a/src/frontends/onnx/frontend/src/utils/conv_factory.cpp +++ b/src/frontends/onnx/frontend/src/utils/conv_factory.cpp @@ -6,9 +6,9 @@ #include "default_opset.hpp" #include "exceptions.hpp" -#include "ngraph/op/group_conv.hpp" -#include "ngraph/op/util/attr_types.hpp" #include "onnx_import/core/null_node.hpp" +#include "openvino/op/group_conv.hpp" +#include "openvino/op/util/attr_types.hpp" #include "ov_models/ov_builders/reshape.hpp" #include "utils/conv_factory.hpp" #include "utils/convpool.hpp" @@ -17,32 +17,32 @@ namespace ngraph { namespace onnx_import { namespace conv_factory { -std::shared_ptr make_ng_convolution(const Output& data, - const Output& filters, - const ngraph::Strides& strides, - const ngraph::Strides& dilations, - const ngraph::CoordinateDiff& padding_below, - const ngraph::CoordinateDiff& padding_above, +std::shared_ptr make_ng_convolution(const Output& data, + const Output& filters, + const ov::Strides& strides, + const ov::Strides& dilations, + const ov::CoordinateDiff& padding_below, + const ov::CoordinateDiff& padding_above, int64_t groups, - const ngraph::op::PadType& auto_pad) { + const ov::op::PadType& auto_pad) { if (groups > 1) { const auto reshaped_filters = convpool::get_reshaped_filters(filters, groups); - return std::make_shared(data, - reshaped_filters, - strides, - padding_below, - padding_above, - dilations, - auto_pad); + return std::make_shared(data, + reshaped_filters, + strides, + padding_below, + padding_above, + dilations, + auto_pad); } else { - return std::make_shared(data, - filters, - strides, - padding_below, - padding_above, - dilations, - auto_pad); + return std::make_shared(data, + filters, + strides, + padding_below, + padding_above, + dilations, + auto_pad); } } } // namespace conv_factory diff --git a/src/frontends/onnx/frontend/src/utils/conv_factory.hpp b/src/frontends/onnx/frontend/src/utils/conv_factory.hpp index 48dc882581a5f3..c73b2e2832447f 100644 --- a/src/frontends/onnx/frontend/src/utils/conv_factory.hpp +++ b/src/frontends/onnx/frontend/src/utils/conv_factory.hpp @@ -4,21 +4,21 @@ #pragma once -#include "ngraph/node.hpp" -#include "ngraph/op/op.hpp" #include "onnx_import/core/node.hpp" +#include "openvino/core/node.hpp" +#include "openvino/op/op.hpp" namespace ngraph { namespace onnx_import { namespace conv_factory { -std::shared_ptr make_ng_convolution(const Output& data, - const Output& filters, - const ngraph::Strides& strides, - const ngraph::Strides& dilations, - const ngraph::CoordinateDiff& padding_below, - const ngraph::CoordinateDiff& padding_above, +std::shared_ptr make_ng_convolution(const Output& data, + const Output& filters, + const ov::Strides& strides, + const ov::Strides& dilations, + const ov::CoordinateDiff& padding_below, + const ov::CoordinateDiff& padding_above, int64_t groups, - const ngraph::op::PadType& auto_pad); + const ov::op::PadType& auto_pad); } // namespace conv_factory } // namespace onnx_import } // namespace ngraph diff --git a/src/frontends/onnx/frontend/src/utils/onnx_internal.cpp b/src/frontends/onnx/frontend/src/utils/onnx_internal.cpp index 9f53aa6e41430f..349b9e8294e256 100644 --- a/src/frontends/onnx/frontend/src/utils/onnx_internal.cpp +++ b/src/frontends/onnx/frontend/src/utils/onnx_internal.cpp @@ -71,12 +71,12 @@ void convert_decoded_function(std::shared_ptr function) { if (auto raw_node = std::dynamic_pointer_cast(node)) { if (auto subgraph_node = std::dynamic_pointer_cast(node)) { subgraph_node->infer_inputs_from_parent(); - for (auto& function : subgraph_node->get_subgraph_functions()) { - convert_decoded_function(function); + for (auto& model : subgraph_node->get_subgraph_models()) { + convert_decoded_function(model); } } - auto ng_nodes = raw_node->get_ng_nodes(onnx_graph); - replace_node(raw_node, ng_nodes); + auto ov_nodes = raw_node->get_ov_nodes(onnx_graph); + replace_node(raw_node, ov_nodes); } else { // Have to revalidate node because new intpus can affect shape/type // propagation for already translated nodes From 1b30e1e266b0e43c7be6a829b8a7ac87381402cb Mon Sep 17 00:00:00 2001 From: Wilson Seok Date: Thu, 11 Jan 2024 08:24:33 +0900 Subject: [PATCH 18/28] exclude gather_compressed in shape_of subgraph (#22060) --- .../mark_shape_of_subgraphs.cpp | 9 +++++ .../passes/mark_shape_of_subgraphs_test.cpp | 38 +++++++++++++++++++ 2 files changed, 47 insertions(+) diff --git a/src/plugins/intel_gpu/src/graph/graph_optimizer/mark_shape_of_subgraphs.cpp b/src/plugins/intel_gpu/src/graph/graph_optimizer/mark_shape_of_subgraphs.cpp index 0f174212cb5851..f685cee254f4f0 100644 --- a/src/plugins/intel_gpu/src/graph/graph_optimizer/mark_shape_of_subgraphs.cpp +++ b/src/plugins/intel_gpu/src/graph/graph_optimizer/mark_shape_of_subgraphs.cpp @@ -7,6 +7,7 @@ #include "reshape_inst.h" #include "eltwise_inst.h" #include "select_inst.h" +#include "gather_inst.h" #include "pass_manager.h" #include "intel_gpu/graph/program.hpp" @@ -69,6 +70,14 @@ bool mark_shape_of_subgraphs::can_mark_node(const program_node& node) { return false; } + // Exclude gather_compressed primitive because gather_cpu_impl doesn't support it. + if (node.is_type()) { + auto& gather_node = node.as(); + auto gather_compressed_weight_mode = gather_node.get_primitive()->compressed_weights; + if (gather_compressed_weight_mode) + return false; + } + auto available_impls = node.type()->get_available_impls(node); auto cpu_impl_found = available_impls.find(impl_types::cpu) != available_impls.end(); diff --git a/src/plugins/intel_gpu/tests/unit/passes/mark_shape_of_subgraphs_test.cpp b/src/plugins/intel_gpu/tests/unit/passes/mark_shape_of_subgraphs_test.cpp index 683c70ca5cab09..29ee42b9ec1367 100644 --- a/src/plugins/intel_gpu/tests/unit/passes/mark_shape_of_subgraphs_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/passes/mark_shape_of_subgraphs_test.cpp @@ -280,3 +280,41 @@ TEST(mark_shape_of_subgraphs, concat_with_empty_tensor_inputs) { ASSERT_EQ(5, output_ptr2[i]); } } + +TEST(mark_shape_of_subgraphs, gather_compressed_no_mark) { + auto& engine = get_test_engine(); + auto input_layout_dynamic = layout{ov::PartialShape{ov::Dimension::dynamic(), ov::Dimension::dynamic()}, + data_types::f32, format::bfyx}; + auto data_0 = engine.allocate_memory({ ov::PartialShape{1}, data_types::i32, format::bfyx }); + auto data_1 = engine.allocate_memory({ ov::PartialShape{1}, data_types::i32, format::bfyx }); + auto decompression_scale = engine.allocate_memory({ ov::PartialShape{1}, data_types::f32, format::bfyx }); + auto decompression_zero_point = engine.allocate_memory({ ov::PartialShape{1}, data_types::f32, format::bfyx }); + set_values(data_0, {0}); + set_values(data_1, {2}); + set_values(decompression_scale, {2}); + set_values(decompression_zero_point, {2}); + + topology topology; + topology.add(input_layout("input", input_layout_dynamic)); + topology.add(data("data_0", data_0)); + topology.add(data("data_1", data_1)); + topology.add(data("decompression_scale", decompression_scale)); + topology.add(data("decompression_zero_point", decompression_zero_point)); + topology.add(shape_of("shape_of", input_info("input"), data_types::i32)); + topology.add(gather("gather_compressed", input_info("shape_of"), input_info("data_0"), 0, + input_info("decompression_scale"), input_info("decompression_zero_point"), ov::element::f32, 0, {})); + topology.add(eltwise("eltwise", input_info("gather_compressed"), input_info("data_1"), eltwise_mode::sum)); + topology.add(concatenation("concat", {input_info("eltwise"), input_info("data_1")}, 0)); + topology.add(broadcast("broadcast", input_info("input"), input_info("concat"), {}, ov::op::BroadcastType::BIDIRECTIONAL)); + + ExecutionConfig config = get_test_default_config(engine); + config.set_property(ov::intel_gpu::allow_new_shape_infer(true)); + config.set_property(ov::intel_gpu::optimize_data(true)); + network network(engine, topology, config); + + auto prog = network.get_program(); + ASSERT_NE(prog, nullptr); + + ASSERT_FALSE(check_subgraph(prog->get_node("shape_of"), prog->get_node("gather_compressed"))); + ASSERT_FALSE(check_subgraph(prog->get_node("shape_of"), prog->get_node("concat"))); +} \ No newline at end of file From 75d9b5ce7497e4bd10ecd6a63d3d62b1b3439846 Mon Sep 17 00:00:00 2001 From: Mingyu Kim Date: Thu, 11 Jan 2024 14:58:00 +0900 Subject: [PATCH 19/28] [GPU] Update onednn to latest 3.4pc (#22030) --- src/plugins/intel_gpu/thirdparty/onednn_gpu | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/plugins/intel_gpu/thirdparty/onednn_gpu b/src/plugins/intel_gpu/thirdparty/onednn_gpu index cb77937ffcf5e8..41e05c66f91fa6 160000 --- a/src/plugins/intel_gpu/thirdparty/onednn_gpu +++ b/src/plugins/intel_gpu/thirdparty/onednn_gpu @@ -1 +1 @@ -Subproject commit cb77937ffcf5e83b5d1cf2940c94e8b508d8f7b4 +Subproject commit 41e05c66f91fa6fbbddedff0eb9b33913cf44fde From 9e2933c29e7db3d07b45284fc17519bdf6b425cc Mon Sep 17 00:00:00 2001 From: Andrei Kashchikhin Date: Thu, 11 Jan 2024 06:29:32 +0000 Subject: [PATCH 20/28] [SCRIPTS] Introduce `setupvars` for Powershell (#22013) * start with pwsh setupvars * formatting, add last sections with python checks * use script path for dir setting; add ps1 script to install * use pwsh script * rm from other distr; rm unnecessary OpenCV * use the same component for both win setupvars; rm obsolete env vars * rm spaces * use single install; rm unused var --- .github/workflows/windows.yml | 129 ++++++++++++++++---------------- scripts/CMakeLists.txt | 21 ++---- scripts/setupvars/setupvars.ps1 | 116 ++++++++++++++++++++++++++++ 3 files changed, 188 insertions(+), 78 deletions(-) create mode 100644 scripts/setupvars/setupvars.ps1 diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml index 768673771212c5..a294f6fb747000 100644 --- a/.github/workflows/windows.yml +++ b/.github/workflows/windows.yml @@ -229,7 +229,7 @@ jobs: if-no-files-found: 'error' Samples: - needs: [Build, Smart_CI] + needs: [ Build, Smart_CI ] if: fromJSON(needs.smart_ci.outputs.affected_components).samples timeout-minutes: 20 defaults: @@ -291,10 +291,10 @@ jobs: & ${{ env.SAMPLES_INSTALL_DIR }}/c/build_samples_msvc.bat -i ${{ env.INSTALL_DIR }} -b ${{ env.BUILD_DIR }}/c_samples - name: Samples tests - shell: cmd run: | python3 -m pip install --ignore-installed PyYAML -r ${{ env.INSTALL_TEST_DIR }}/smoke_tests/requirements.txt - call "${{ env.INSTALL_DIR }}\\setupvars.bat" && python3 -m pytest -sv ${{ env.INSTALL_TEST_DIR }}/smoke_tests --env_conf ${{ env.INSTALL_TEST_DIR }}/smoke_tests/env_config.yml --junitxml=${{ env.INSTALL_TEST_DIR }}/TEST-SamplesSmokeTests.xml + & "${{ env.INSTALL_DIR }}/setupvars.ps1" + python3 -m pytest -sv ${{ env.INSTALL_TEST_DIR }}/smoke_tests --env_conf ${{ env.INSTALL_TEST_DIR }}/smoke_tests/env_config.yml --junitxml=${{ env.INSTALL_TEST_DIR }}/TEST-SamplesSmokeTests.xml env: IE_APP_PATH: ${{ env.INSTALL_DIR }}/samples_bin IE_APP_PYTHON_PATH: ${{ env.INSTALL_DIR }}/samples/python @@ -356,7 +356,7 @@ jobs: Python_Unit_Tests: name: Python unit tests - needs: [Build, Smart_CI] + needs: [ Build, Smart_CI ] timeout-minutes: 75 defaults: run: @@ -551,9 +551,9 @@ jobs: - name: Python Frontend tests if: fromJSON(needs.smart_ci.outputs.affected_components).PyTorch_FE.test || fromJSON(needs.smart_ci.outputs.affected_components).PDPD_FE.test - shell: cmd run: | - call "${{ env.INSTALL_DIR }}\\setupvars.bat" && python3 -m pytest ${{ env.LAYER_TESTS_INSTALL_DIR }}/py_frontend_tests --junitxml=${{ env.INSTALL_TEST_DIR }}/TEST-test_py_fontend.xml + & "${{ env.INSTALL_DIR }}/setupvars.ps1" + python3 -m pytest ${{ env.LAYER_TESTS_INSTALL_DIR }}/py_frontend_tests --junitxml=${{ env.INSTALL_TEST_DIR }}/TEST-test_py_fontend.xml - name: OVC unit tests if: fromJSON(needs.smart_ci.outputs.affected_components).MO.test @@ -570,7 +570,7 @@ jobs: CXX_Unit_Tests: name: C++ unit tests - needs: [Build, Smart_CI] + needs: [ Build, Smart_CI ] timeout-minutes: 25 defaults: run: @@ -604,158 +604,158 @@ jobs: - name: OpenVINO Core unit tests if: fromJSON(needs.smart_ci.outputs.affected_components).Core.test - shell: cmd run: | - call "${{ env.INSTALL_DIR }}\\setupvars.bat" && ${{ env.INSTALL_TEST_DIR }}/ov_core_unit_tests --gtest_print_time=1 --gtest_filter=-*IE_GPU* --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-NGraphUT.xml + & "${{ env.INSTALL_DIR }}/setupvars.ps1" + ${{ env.INSTALL_TEST_DIR }}/ov_core_unit_tests --gtest_print_time=1 --gtest_filter=-*IE_GPU* --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-NGraphUT.xml - name: OpenVINO Inference functional tests if: fromJSON(needs.smart_ci.outputs.affected_components).inference.test - shell: cmd run: | - call "${{ env.INSTALL_DIR }}\\setupvars.bat" && ${{ env.INSTALL_TEST_DIR }}/ov_inference_functional_tests --gtest_print_time=1 --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-InferenceFunc.xml + & "${{ env.INSTALL_DIR }}/setupvars.ps1" + ${{ env.INSTALL_TEST_DIR }}/ov_inference_functional_tests --gtest_print_time=1 --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-InferenceFunc.xml - name: OpenVINO Inference unit tests if: fromJSON(needs.smart_ci.outputs.affected_components).inference.test - shell: cmd run: | - call "${{ env.INSTALL_DIR }}\\setupvars.bat" && ${{ env.INSTALL_TEST_DIR }}/ov_inference_unit_tests --gtest_print_time=1 --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-InferenceUnit.xml + & "${{ env.INSTALL_DIR }}/setupvars.ps1" + ${{ env.INSTALL_TEST_DIR }}/ov_inference_unit_tests --gtest_print_time=1 --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-InferenceUnit.xml - name: Low Precision Transformations Tests if: fromJSON(needs.smart_ci.outputs.affected_components).LP_transformations.test - shell: cmd run: | - call "${{ env.INSTALL_DIR }}\\setupvars.bat" && ${{ env.INSTALL_TEST_DIR }}/ov_lp_transformations_tests --gtest_print_time=1 --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-LpTransformations.xml + & "${{ env.INSTALL_DIR }}/setupvars.ps1" + ${{ env.INSTALL_TEST_DIR }}/ov_lp_transformations_tests --gtest_print_time=1 --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-LpTransformations.xml - name: OpenVINO Conditional compilation tests if: fromJSON(needs.smart_ci.outputs.affected_components).Core.test - shell: cmd run: | - call "${{ env.INSTALL_DIR }}\\setupvars.bat" && ${{ env.INSTALL_TEST_DIR }}/ov_conditional_compilation_tests --gtest_print_time=1 --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-ConditionalCompilation.xml + & "${{ env.INSTALL_DIR }}/setupvars.ps1" + ${{ env.INSTALL_TEST_DIR }}/ov_conditional_compilation_tests --gtest_print_time=1 --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-ConditionalCompilation.xml - name: IR frontend tests if: fromJSON(needs.smart_ci.outputs.affected_components).IR_FE.test - shell: cmd run: | - call "${{ env.INSTALL_DIR }}\\setupvars.bat" && ${{ env.INSTALL_TEST_DIR }}/ov_ir_frontend_tests --gtest_print_time=1 --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-IRFrontend.xml + & "${{ env.INSTALL_DIR }}/setupvars.ps1" + ${{ env.INSTALL_TEST_DIR }}/ov_ir_frontend_tests --gtest_print_time=1 --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-IRFrontend.xml - name: PaddlePaddle frontend tests # Disabled because of CVS-95904 if: ${{ 'false' }} - shell: cmd run: | - call "${{ env.INSTALL_DIR }}\\setupvars.bat" && ${{ env.INSTALL_TEST_DIR }}/paddle_tests --gtest_print_time=1 --gtest_filter=*smoke* --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-PaddleTests.xml + & "${{ env.INSTALL_DIR }}/setupvars.ps1" + ${{ env.INSTALL_TEST_DIR }}/paddle_tests --gtest_print_time=1 --gtest_filter=*smoke* --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-PaddleTests.xml - name: ONNX frontend tests if: fromJSON(needs.smart_ci.outputs.affected_components).ONNX_FE.test - shell: cmd run: | - call "${{ env.INSTALL_DIR }}\\setupvars.bat" && ${{ env.INSTALL_TEST_DIR }}/ov_onnx_frontend_tests --gtest_print_time=1 --gtest_filter=-*IE_GPU* --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-ONNXFrontend.xml + & "${{ env.INSTALL_DIR }}/setupvars.ps1" + ${{ env.INSTALL_TEST_DIR }}/ov_onnx_frontend_tests --gtest_print_time=1 --gtest_filter=-*IE_GPU* --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-ONNXFrontend.xml - name: TensorFlow Common frontend tests if: fromJSON(needs.smart_ci.outputs.affected_components).TF_FE.test || fromJSON(needs.smart_ci.outputs.affected_components).TFL_FE.test - shell: cmd run: | - call "${{ env.INSTALL_DIR }}\\setupvars.bat" && ${{ env.INSTALL_TEST_DIR }}/ov_tensorflow_common_tests --gtest_print_time=1 --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-TensorFlowCommonFrontend.xml + & "${{ env.INSTALL_DIR }}/setupvars.ps1" + ${{ env.INSTALL_TEST_DIR }}/ov_tensorflow_common_tests --gtest_print_time=1 --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-TensorFlowCommonFrontend.xml - name: TensorFlow frontend tests if: fromJSON(needs.smart_ci.outputs.affected_components).TF_FE.test - shell: cmd run: | - call "${{ env.INSTALL_DIR }}\\setupvars.bat" && ${{ env.INSTALL_TEST_DIR }}/ov_tensorflow_frontend_tests --gtest_print_time=1 --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-TensorFlowFrontend.xml + & "${{ env.INSTALL_DIR }}/setupvars.ps1" + ${{ env.INSTALL_TEST_DIR }}/ov_tensorflow_frontend_tests --gtest_print_time=1 --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-TensorFlowFrontend.xml - name: TensorFlow Lite frontend tests if: fromJSON(needs.smart_ci.outputs.affected_components).TFL_FE.test - shell: cmd run: | - :: Skip ticket: 126320 - call "${{ env.INSTALL_DIR }}\\setupvars.bat" && ${{ env.INSTALL_TEST_DIR }}/ov_tensorflow_lite_frontend_tests --gtest_print_time=1 --gtest_filter=-*test_decode_convert_equal_convert*:*test_convert_partially_equal_convert* --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-TensorFlowLiteFrontend.xml + # Skip ticket: 126320 + & "${{ env.INSTALL_DIR }}/setupvars.ps1" + ${{ env.INSTALL_TEST_DIR }}/ov_tensorflow_lite_frontend_tests --gtest_print_time=1 --gtest_filter=-*test_decode_convert_equal_convert*:*test_convert_partially_equal_convert* --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-TensorFlowLiteFrontend.xml - name: Transformations func tests if: fromJSON(needs.smart_ci.outputs.affected_components).transformations.test - shell: cmd run: | - call "${{ env.INSTALL_DIR }}\\setupvars.bat" && ${{ env.INSTALL_TEST_DIR }}/ov_transformations_tests --gtest_print_time=1 --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-Transformations.xml + & "${{ env.INSTALL_DIR }}/setupvars.ps1" + ${{ env.INSTALL_TEST_DIR }}/ov_transformations_tests --gtest_print_time=1 --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-Transformations.xml - name: Common test utils tests - shell: cmd run: | - call "${{ env.INSTALL_DIR }}\\setupvars.bat" && ${{ env.INSTALL_TEST_DIR }}/ov_util_tests --gtest_print_time=1 --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-commonUtilsTests.xml + & "${{ env.INSTALL_DIR }}/setupvars.ps1" + ${{ env.INSTALL_TEST_DIR }}/ov_util_tests --gtest_print_time=1 --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-commonUtilsTests.xml - name: Snippets func tests if: fromJSON(needs.smart_ci.outputs.affected_components).CPU.test - shell: cmd run: | - call "${{ env.INSTALL_DIR }}\\setupvars.bat" && ${{ env.INSTALL_TEST_DIR }}/ov_snippets_func_tests --gtest_print_time=1 --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-SnippetsFuncTests.xml + & "${{ env.INSTALL_DIR }}/setupvars.ps1" + ${{ env.INSTALL_TEST_DIR }}/ov_snippets_func_tests --gtest_print_time=1 --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-SnippetsFuncTests.xml - name: CPU plugin unit tests if: fromJSON(needs.smart_ci.outputs.affected_components).CPU.test - shell: cmd run: | - call "${{ env.INSTALL_DIR }}\\setupvars.bat" && ${{ env.INSTALL_TEST_DIR }}/ov_cpu_unit_tests --gtest_print_time=1 --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-CPUUnitTests.xml + & "${{ env.INSTALL_DIR }}/setupvars.ps1" + ${{ env.INSTALL_TEST_DIR }}/ov_cpu_unit_tests --gtest_print_time=1 --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-CPUUnitTests.xml - name: ov_subgraphs_dumper_tests tests - shell: cmd run: | - call "${{ env.INSTALL_DIR }}\\setupvars.bat" && ${{ env.INSTALL_TEST_DIR }}/ov_subgraphs_dumper_tests --gtest_print_time=1 --device=TEMPLATE --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-SubgraphsDumperTests.xml + & "${{ env.INSTALL_DIR }}/setupvars.ps1" + ${{ env.INSTALL_TEST_DIR }}/ov_subgraphs_dumper_tests --gtest_print_time=1 --device=TEMPLATE --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-SubgraphsDumperTests.xml - name: Template OpImpl tests - shell: cmd run: | - call "${{ env.INSTALL_DIR }}\\setupvars.bat" && ${{ env.INSTALL_TEST_DIR }}/ov_op_conformance_tests --gtest_print_time=1 --gtest_filter="*OpImpl*" --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-TemplateOpImplTests.xml + & "${{ env.INSTALL_DIR }}/setupvars.ps1" + ${{ env.INSTALL_TEST_DIR }}/ov_op_conformance_tests --gtest_print_time=1 --gtest_filter="*OpImpl*" --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-TemplateOpImplTests.xml - name: AUTO unit tests if: fromJSON(needs.smart_ci.outputs.affected_components).AUTO.test - shell: cmd run: | - call "${{ env.INSTALL_DIR }}\\setupvars.bat" && ${{ env.INSTALL_TEST_DIR }}/ov_auto_unit_tests --gtest_print_time=1 --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-ov_auto_unit_tests.xml + & "${{ env.INSTALL_DIR }}/setupvars.ps1" + ${{ env.INSTALL_TEST_DIR }}/ov_auto_unit_tests --gtest_print_time=1 --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-ov_auto_unit_tests.xml - name: AUTO func Tests if: fromJSON(needs.smart_ci.outputs.affected_components).AUTO.test - shell: cmd run: | - call "${{ env.INSTALL_DIR }}\\setupvars.bat" && ${{ env.INSTALL_TEST_DIR }}/ov_auto_func_tests --gtest_print_time=1 --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-ov_auto_func_tests.xml + & "${{ env.INSTALL_DIR }}/setupvars.ps1" + ${{ env.INSTALL_TEST_DIR }}/ov_auto_func_tests --gtest_print_time=1 --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-ov_auto_func_tests.xml - name: Template plugin func tests if: fromJSON(needs.smart_ci.outputs.affected_components).TEMPLATE.test - shell: cmd run: | - call "${{ env.INSTALL_DIR }}\\setupvars.bat" && ${{ env.INSTALL_TEST_DIR }}/ov_template_func_tests --gtest_print_time=1 --gtest_filter=*smoke* --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-TemplateFuncTests.xml + & "${{ env.INSTALL_DIR }}/setupvars.ps1" + ${{ env.INSTALL_TEST_DIR }}/ov_template_func_tests --gtest_print_time=1 --gtest_filter=*smoke* --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-TemplateFuncTests.xml - name: OpenVINO C API tests if: fromJSON(needs.smart_ci.outputs.affected_components).C_API.test - shell: cmd run: | - call "${{ env.INSTALL_DIR }}\\setupvars.bat" && ${{ env.INSTALL_TEST_DIR }}/ov_capi_test --gtest_print_time=1 --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-OpenVINOCAPITests.xml + & "${{ env.INSTALL_DIR }}/setupvars.ps1" + ${{ env.INSTALL_TEST_DIR }}/ov_capi_test --gtest_print_time=1 --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-OpenVINOCAPITests.xml - name: AutoBatch unit tests if: fromJSON(needs.smart_ci.outputs.affected_components).AUTO_BATCH.test - shell: cmd run: | - call "${{ env.INSTALL_DIR }}\\setupvars.bat" && ${{ env.INSTALL_TEST_DIR }}/ov_auto_batch_unit_tests --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-ov_auto_batch_unit_tests.xml + & "${{ env.INSTALL_DIR }}/setupvars.ps1" + ${{ env.INSTALL_TEST_DIR }}/ov_auto_batch_unit_tests --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-ov_auto_batch_unit_tests.xml - name: AutoBatch func tests if: fromJSON(needs.smart_ci.outputs.affected_components).AUTO_BATCH.test - shell: cmd run: | - call "${{ env.INSTALL_DIR }}\\setupvars.bat" && ${{ env.INSTALL_TEST_DIR }}/ov_auto_batch_func_tests --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-ov_auto_batch_func_tests.xml + & "${{ env.INSTALL_DIR }}/setupvars.ps1" + ${{ env.INSTALL_TEST_DIR }}/ov_auto_batch_func_tests --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-ov_auto_batch_func_tests.xml - name: Proxy Plugin func tests if: fromJSON(needs.smart_ci.outputs.affected_components).PROXY.test - shell: cmd run: | - call "${{ env.INSTALL_DIR }}\\setupvars.bat" && ${{ env.INSTALL_TEST_DIR }}/ov_proxy_plugin_tests --gtest_print_time=1 --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-OVProxyTests.xml + & "${{ env.INSTALL_DIR }}/setupvars.ps1" + ${{ env.INSTALL_TEST_DIR }}/ov_proxy_plugin_tests --gtest_print_time=1 --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-OVProxyTests.xml - name: Hetero Unit Tests if: fromJSON(needs.smart_ci.outputs.affected_components).HETERO.test - shell: cmd run: | - call "${{ env.INSTALL_DIR }}\\setupvars.bat" && ${{ env.INSTALL_TEST_DIR }}/ov_hetero_unit_tests --gtest_print_time=1 --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-OVHeteroUnitTests.xml + & "${{ env.INSTALL_DIR }}/setupvars.ps1" + ${{ env.INSTALL_TEST_DIR }}/ov_hetero_unit_tests --gtest_print_time=1 --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-OVHeteroUnitTests.xml - name: Hetero Func Tests if: fromJSON(needs.smart_ci.outputs.affected_components).HETERO.test - shell: cmd run: | - call "${{ env.INSTALL_DIR }}\\setupvars.bat" && ${{ env.INSTALL_TEST_DIR }}/ov_hetero_func_tests --gtest_print_time=1 --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-OVHeteroFuncTests.xml + & "${{ env.INSTALL_DIR }}/setupvars.ps1" + ${{ env.INSTALL_TEST_DIR }}/ov_hetero_func_tests --gtest_print_time=1 --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-OVHeteroFuncTests.xml - name: Upload Test Results uses: actions/upload-artifact@v3 @@ -767,7 +767,7 @@ jobs: CPU_Functional_Tests: name: CPU functional tests - needs: [Build, Smart_CI] + needs: [ Build, Smart_CI ] timeout-minutes: 70 defaults: run: @@ -818,7 +818,6 @@ jobs: self-hosted-runner: 'true' - name: Install python dependencies - shell: cmd run: python3 -m pip install -r ${{ github.workspace }}\install\tests\functional_test_utils\layer_tests_summary\requirements.txt - name: Restore tests execution time @@ -830,9 +829,9 @@ jobs: ${{ runner.os }}-tests-functional-cpu-stamp - name: Intel CPU plugin func tests (parallel) - shell: cmd run: | - call "${{ env.INSTALL_DIR }}\\setupvars.bat" && python3 ${{ env.PARALLEL_TEST_SCRIPT }} -e ${{ env.INSTALL_TEST_DIR }}\ov_cpu_func_tests.exe -c ${{ env.PARALLEL_TEST_CACHE }} -w ${{ env.INSTALL_TEST_DIR }} -s suite -- --gtest_filter=*smoke*" + & "${{ env.INSTALL_DIR }}/setupvars.ps1" + python3 ${{ env.PARALLEL_TEST_SCRIPT }} -e ${{ env.INSTALL_TEST_DIR }}/ov_cpu_func_tests.exe -c ${{ env.PARALLEL_TEST_CACHE }} -w ${{ env.INSTALL_TEST_DIR }} -s suite -- --gtest_filter=*smoke* timeout-minutes: 60 - name: Save tests execution time @@ -860,7 +859,7 @@ jobs: Overall_Status: name: ci/gha_overall_status_windows - needs: [Smart_CI, Build, Samples, CXX_Unit_Tests, Python_Unit_Tests, CPU_Functional_Tests] + needs: [ Smart_CI, Build, Samples, CXX_Unit_Tests, Python_Unit_Tests, CPU_Functional_Tests ] if: ${{ always() }} runs-on: ubuntu-latest steps: diff --git a/scripts/CMakeLists.txt b/scripts/CMakeLists.txt index c76904c7270c03..6506fb80991f13 100644 --- a/scripts/CMakeLists.txt +++ b/scripts/CMakeLists.txt @@ -27,23 +27,18 @@ ov_shellcheck_process(DIRECTORY "${OpenVINO_SOURCE_DIR}" ov_cpack_add_component(${OV_CPACK_COMP_SETUPVARS} HIDDEN) if(UNIX) - set(_setupvars_file_full_path "${CMAKE_CURRENT_SOURCE_DIR}/setupvars/setupvars.sh") + set(_setupvars_files "${CMAKE_CURRENT_SOURCE_DIR}/setupvars/setupvars.sh") elseif(WIN32) + # setupvars.bat set(_setupvars_file "setupvars/setupvars.bat") - set(_setupvars_file_full_path "${CMAKE_CURRENT_SOURCE_DIR}/${_setupvars_file}") - if(USE_BUILD_TYPE_SUBFOLDER AND CMAKE_BUILD_TYPE AND NOT CMAKE_BUILD_TYPE MATCHES "^(Debug|Release)$") - # Patch primary configuration in setupvars.bat which is "Release" by default. - # Note setupvars secondary configuration is always "Debug". - message(STATUS "Patching content of ${_setupvars_file_full_path} for CMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE}") - file(READ "${_setupvars_file_full_path}" _setupvars_content) - string(REPLACE "Release" ${CMAKE_BUILD_TYPE} _setupvars_content "${_setupvars_content}") - set(_setupvars_file_full_path "${OpenVINO_BINARY_DIR}/${_setupvars_file}") - message(STATUS "Writing patched content to ${_setupvars_file_full_path}") - file(WRITE "${_setupvars_file_full_path}" "${_setupvars_content}") - endif() + set(_setupvars_files "${CMAKE_CURRENT_SOURCE_DIR}/${_setupvars_file}") + + # setupvars.ps1 + set(_setupvars_file_pwsh "setupvars/setupvars.ps1") + list(APPEND _setupvars_files "${CMAKE_CURRENT_SOURCE_DIR}/${_setupvars_file_pwsh}") endif() -install(PROGRAMS "${_setupvars_file_full_path}" +install(PROGRAMS ${_setupvars_files} DESTINATION . COMPONENT ${OV_CPACK_COMP_SETUPVARS} ${OV_CPACK_COMP_SETUPVARS_EXCLUDE_ALL}) diff --git a/scripts/setupvars/setupvars.ps1 b/scripts/setupvars/setupvars.ps1 new file mode 100644 index 00000000000000..f64869b04550c0 --- /dev/null +++ b/scripts/setupvars/setupvars.ps1 @@ -0,0 +1,116 @@ +# Copyright (C) 2018-2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +# Arguments parsing +param ( + [string]$python_version +) + +$Env:INTEL_OPENVINO_DIR = Split-Path $MyInvocation.MyCommand.Path + +$Env:OpenVINO_DIR = "$Env:INTEL_OPENVINO_DIR/runtime/cmake" +$Env:OPENVINO_LIB_PATHS = "$Env:INTEL_OPENVINO_DIR/runtime/bin/intel64/Release;$Env:INTEL_OPENVINO_DIR/runtime/bin/intel64/Debug;$Env:OPENVINO_LIB_PATHS" + +# TBB +if (Test-Path -Path "$Env:INTEL_OPENVINO_DIR/runtime/3rdparty/tbb") +{ + $prefix = "" + if (Test-Path -Path "$Env:INTEL_OPENVINO_DIR/runtime/3rdparty/tbb/redist") + { + $prefix = "$Env:INTEL_OPENVINO_DIR/runtime/3rdparty/tbb/redist/intel64/vc14" + } + elseif (Test-Path -Path "$Env:INTEL_OPENVINO_DIR/runtime/3rdparty/tbb/bin/intel64/vc14") + { + $prefix = "$Env:INTEL_OPENVINO_DIR/runtime/3rdparty/tbb/bin/intel64/vc14" + } + elseif (Test-Path -Path "$Env:INTEL_OPENVINO_DIR/runtime/3rdparty/tbb/bin") + { + $prefix = "$Env:INTEL_OPENVINO_DIR/runtime/3rdparty/tbb/bin" + } + + if ($prefix) + { + $Env:OPENVINO_LIB_PATHS = "$prefix;$Env:OPENVINO_LIB_PATHS" + } + + if (Test-Path -Path "$Env:INTEL_OPENVINO_DIR/runtime/3rdparty/tbb/cmake") + { + $Env:TBB_DIR = "$Env:INTEL_OPENVINO_DIR/runtime/3rdparty/tbb/cmake" + } + elseif (Test-Path -Path "$Env:INTEL_OPENVINO_DIR/runtime/3rdparty/tbb/lib/cmake/TBB") + { + $Env:TBB_DIR = "$Env:INTEL_OPENVINO_DIR/runtime/3rdparty/tbb/lib/cmake/TBB" + } + elseif (Test-Path -Path "$Env:INTEL_OPENVINO_DIR/runtime/3rdparty/tbb/lib64/cmake/TBB") + { + $Env:TBB_DIR = "$Env:INTEL_OPENVINO_DIR/runtime/3rdparty/tbb/lib64/cmake/TBB" + } + elseif (Test-Path -Path "$Env:INTEL_OPENVINO_DIR/runtime/3rdparty/tbb/lib/cmake/tbb") + { + $Env:TBB_DIR = "$Env:INTEL_OPENVINO_DIR/runtime/3rdparty/tbb/lib/cmake/tbb" + } +} + +# Add libs directories to the PATH +$Env:PATH = "$Env:OPENVINO_LIB_PATHS;$Env:PATH" + +Write-Host "[setupvars] OpenVINO environment initialized" + +# Check if Python is installed +$PYTHON_VERSION_MAJOR = 3 +$MIN_REQUIRED_PYTHON_VERSION_MINOR = 8 +$MAX_SUPPORTED_PYTHON_VERSION_MINOR = 12 + +try +{ + # Should select the latest installed Python version as per https://docs.python.org/3/using/windows.html#getting-started + (py --version) | Out-Null +} +catch +{ + Write-Host "Warning: Python is not installed. Please install one of Python $PYTHON_VERSION_MAJOR.$MIN_REQUIRED_PYTHON_VERSION_MINOR - $PYTHON_VERSION_MAJOR.$MAX_SUPPORTED_PYTHON_VERSION_MINOR (64-bit) from https://www.python.org/downloads/" + # Python is not mandatory so we can safely exit with 0 + Exit 0 +} + +# Check Python version if user did not pass -python_version +if (-not $python_version) +{ + $installed_python_version_major = [int](py -c "import sys; print(f'{sys.version_info[0]}')") + $installed_python_version_minor = [int](py -c "import sys; print(f'{sys.version_info[1]}')") +} +else +{ + [int]$installed_python_version_major, [int]$installed_python_version_minor = $python_version.Split('.') +} + +if (-not ($PYTHON_VERSION_MAJOR -eq $installed_python_version_major -and $installed_python_version_minor -ge $MIN_REQUIRED_PYTHON_VERSION_MINOR -and $installed_python_version_minor -le $MAX_SUPPORTED_PYTHON_VERSION_MINOR)) +{ + Write-Host "Warning: Unsupported Python version $installed_python_version_major.$installed_python_version_minor. Please install one of Python $PYTHON_VERSION_MAJOR.$MIN_REQUIRED_PYTHON_VERSION_MINOR - $PYTHON_VERSION_MAJOR.$MAX_SUPPORTED_PYTHON_VERSION_MINOR (64-bit) from https://www.python.org/downloads/" + # Python is not mandatory so we can safely exit with 0 + Exit 0 +} + + +# Check Python bitness +try +{ + $python_bitness = (py -c "import sys; print(64 if sys.maxsize > 2**32 else 32)") +} +catch +{ + Write-Host "Warning: Cannot determine installed Python bitness" + # Python is not mandatory so we can safely exit with 0 + Exit 0 +} + +if ($python_bitness -ne "64") +{ + Write-Host "Warning: Unsupported Python bitness. Please install one of Python $PYTHON_VERSION_MAJOR.$MIN_REQUIRED_PYTHON_VERSION_MINOR - $PYTHON_VERSION_MAJOR.$MAX_SUPPORTED_PYTHON_VERSION_MINOR (64-bit) from https://www.python.org/downloads/" + # Python is not mandatory so we can safely exit with 0 + Exit 0 +} + +$Env:PYTHONPATH = "$Env:INTEL_OPENVINO_DIR/python;$Env:INTEL_OPENVINO_DIR/python/python3;$Env:PYTHONPATH" + +Write-Host "[setupvars] OpenVINO Python environment initialized" From ebe366c67d54132f8e5b339921705c892af651ec Mon Sep 17 00:00:00 2001 From: Fang Xu Date: Thu, 11 Jan 2024 14:31:15 +0800 Subject: [PATCH 21/28] set minimal tbb version on arm64 ubuntu22 (#22057) * set minimal tbb version on arm64 ubuntu22 * disable pkg_search_module for tbb on ARM64 Ubuntu22 * set minimal tbb version for all linux arm64 --- cmake/features.cmake | 7 +------ src/cmake/ov_parallel.cmake | 11 ++++++++++- 2 files changed, 11 insertions(+), 7 deletions(-) diff --git a/cmake/features.cmake b/cmake/features.cmake index c1e843ae64d543..6e383edeeb695d 100644 --- a/cmake/features.cmake +++ b/cmake/features.cmake @@ -155,12 +155,7 @@ endif() if(DEFINED ENV{TBBROOT} OR DEFINED ENV{TBB_DIR} OR DEFINED TBB_DIR OR DEFINED TBBROOT) set(ENABLE_SYSTEM_TBB_DEFAULT OFF) else() - if(LINUX AND AARCH64) - # CVS-126984: system TBB is not very stable on Linux ARM64 (at least on Ubuntu 20.04) - set(ENABLE_SYSTEM_TBB_DEFAULT OFF) - else() - set(ENABLE_SYSTEM_TBB_DEFAULT ${ENABLE_SYSTEM_LIBS_DEFAULT}) - endif() + set(ENABLE_SYSTEM_TBB_DEFAULT ${ENABLE_SYSTEM_LIBS_DEFAULT}) endif() ov_dependent_option (ENABLE_SYSTEM_TBB "Enables use of system TBB" ${ENABLE_SYSTEM_TBB_DEFAULT} diff --git a/src/cmake/ov_parallel.cmake b/src/cmake/ov_parallel.cmake index 8ac29116b4396c..0dc8a7671883d8 100644 --- a/src/cmake/ov_parallel.cmake +++ b/src/cmake/ov_parallel.cmake @@ -80,8 +80,17 @@ macro(ov_find_package_tbb) # conan generates TBBConfig.cmake files, which follows cmake's # SameMajorVersion scheme, while TBB itself follows AnyNewerVersion one # see https://cmake.org/cmake/help/latest/module/CMakePackageConfigHelpers.html#generating-a-package-version-file + set(PKG_CONFIG_SEARCH ON) if(CMAKE_TOOLCHAIN_FILE MATCHES "conan_toolchain.cmake" OR CONAN_EXPORTED) set(_ov_minimal_tbb_version 2021.0) + elseif(LINUX AND AARCH64) + # CVS-126984: system TBB is not very stable on Linux ARM64 + set(_ov_minimal_tbb_version 2021.0) + # on Ubuntu22.04, tbb2020 can be installed by "apt install libtbb2-dev", + # after installation, TBB_VERSION is missed in tbb.pc, + # so here skip pkg_search_module for tbb to avoid using TBB 2020 + # that does not meet the minimun version number requirements. + set(PKG_CONFIG_SEARCH OFF) else() set(_ov_minimal_tbb_version 2017.0) endif() @@ -113,7 +122,7 @@ macro(ov_find_package_tbb) unset(TBB_DIR) # try tbb.pc from system - if(ENABLE_SYSTEM_TBB AND PkgConfig_FOUND) + if(ENABLE_SYSTEM_TBB AND PkgConfig_FOUND AND PKG_CONFIG_SEARCH) macro(_ov_pkg_config_tbb_unset) # unset since it affects OpenVINOConfig.cmake.in unset(tbb_FOUND) From 1b9dec187bbcf0bc87f6343ff7da3462edbea523 Mon Sep 17 00:00:00 2001 From: Oleg Pipikin Date: Thu, 11 Jan 2024 07:51:25 +0100 Subject: [PATCH 22/28] Refactor GPU shared beh tests (#21974) * Refactor set input/output type tests * Delete api 1.0 test * Refactor auto batching tests * Apply comments * Fix and apply comments --- .../executable_network/exec_graph_info.cpp | 28 - .../behavior/infer_request/callback.cpp | 34 -- .../behavior/infer_request/config.cpp | 19 - .../behavior/infer_request/multithreading.cpp | 31 -- .../behavior/infer_request/perf_counters.cpp | 33 -- .../infer_request/set_blob_by_type.cpp | 35 -- .../behavior/infer_request/wait.cpp | 32 -- .../ov_executable_network/exec_graph_info.cpp | 18 + .../ov_executable_network/exec_net_base.cpp | 18 + .../properties_tests.cpp | 17 + .../ov_plugin/auto_batching_tests.cpp | 34 ++ .../behavior/plugin/auto_batching_tests.cpp | 62 --- .../compiled_model/compiled_model_base.hpp | 77 +++ .../ov_executable_network/exec_graph_info.hpp | 479 +----------------- .../ov_plugin/auto_batching_tests.hpp | 162 ++++++ .../ov_executable_network/exec_graph_info.cpp | 358 +++++++++++++ 16 files changed, 698 insertions(+), 739 deletions(-) delete mode 100644 src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/executable_network/exec_graph_info.cpp delete mode 100644 src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/infer_request/callback.cpp delete mode 100644 src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/infer_request/config.cpp delete mode 100644 src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/infer_request/multithreading.cpp delete mode 100644 src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/infer_request/perf_counters.cpp delete mode 100644 src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/infer_request/set_blob_by_type.cpp delete mode 100644 src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/infer_request/wait.cpp create mode 100644 src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_executable_network/exec_graph_info.cpp create mode 100644 src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_executable_network/properties_tests.cpp create mode 100644 src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_plugin/auto_batching_tests.cpp delete mode 100644 src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/plugin/auto_batching_tests.cpp create mode 100644 src/tests/functional/plugin/shared/include/behavior/ov_plugin/auto_batching_tests.hpp create mode 100644 src/tests/functional/plugin/shared/src/behavior/ov_executable_network/exec_graph_info.cpp diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/executable_network/exec_graph_info.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/executable_network/exec_graph_info.cpp deleted file mode 100644 index 803163916c9c15..00000000000000 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/executable_network/exec_graph_info.cpp +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include -#include "behavior/executable_network/exec_graph_info.hpp" - -namespace { - -using namespace ExecutionGraphTests; - -INSTANTIATE_TEST_SUITE_P(smoke_serialization, ExecGraphSerializationTest, - ::testing::Values(ov::test::utils::DEVICE_GPU), - ExecGraphSerializationTest::getTestCaseName); - -const std::vector netPrecisions = { - InferenceEngine::Precision::FP32 -}; - -INSTANTIATE_TEST_SUITE_P(smoke_NoReshape, ExecGraphUniqueNodeNames, - ::testing::Combine( - ::testing::ValuesIn(netPrecisions), - ::testing::Values(InferenceEngine::SizeVector({1, 2, 5, 5})), - ::testing::Values(ov::test::utils::DEVICE_GPU)), - ExecGraphUniqueNodeNames::getTestCaseName); - -} // namespace - diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/infer_request/callback.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/infer_request/callback.cpp deleted file mode 100644 index 6cec708b6756ea..00000000000000 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/infer_request/callback.cpp +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "behavior/infer_request/callback.hpp" - -using namespace BehaviorTestsDefinitions; -namespace { -auto configs = []() { - return std::vector>{ - {}, - }; -}; - -auto autoBatchConfigs = []() { - return std::vector>{ - // explicit batch size 4 to avoid fallback to no auto-batching (i.e. plain GPU) - {{CONFIG_KEY(AUTO_BATCH_DEVICE_CONFIG), std::string(ov::test::utils::DEVICE_GPU) + "(4)"}, - // no timeout to avoid increasing the test time - {CONFIG_KEY(AUTO_BATCH_TIMEOUT), "0 "}}}; -}; - -INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, InferRequestCallbackTests, - ::testing::Combine( - ::testing::Values(ov::test::utils::DEVICE_GPU), - ::testing::ValuesIn(configs())), - InferRequestCallbackTests::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_AutoBatch_BehaviorTests, InferRequestCallbackTests, - ::testing::Combine( - ::testing::Values(ov::test::utils::DEVICE_BATCH), - ::testing::ValuesIn(autoBatchConfigs())), - InferRequestCallbackTests::getTestCaseName); -} // namespace diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/infer_request/config.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/infer_request/config.cpp deleted file mode 100644 index 62b0bd6fa64f9e..00000000000000 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/infer_request/config.cpp +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "behavior/infer_request/config.hpp" - -using namespace BehaviorTestsDefinitions; -namespace { -auto configs = []() { - return std::vector>{{}}; -}; - -INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, InferRequestConfigTest, - ::testing::Combine( - ::testing::Values(1u), - ::testing::Values(ov::test::utils::DEVICE_GPU), - ::testing::ValuesIn(configs())), - InferRequestConfigTest::getTestCaseName); -} // namespace diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/infer_request/multithreading.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/infer_request/multithreading.cpp deleted file mode 100644 index 484fee15c99d44..00000000000000 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/infer_request/multithreading.cpp +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include - -#include "behavior/infer_request/multithreading.hpp" -#include "ie_plugin_config.hpp" - -using namespace BehaviorTestsDefinitions; -namespace { -auto auto_batch_configs = []() { - return std::vector>{ - // explicit batch size 4 to avoid fallback to no auto-batching (i.e. plain GPU) - {{CONFIG_KEY(AUTO_BATCH_DEVICE_CONFIG), std::string(ov::test::utils::DEVICE_GPU) + "(4)"}, - // no timeout to avoid increasing the test time - {CONFIG_KEY(AUTO_BATCH_TIMEOUT), "0 "}}}; -}; - -INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, InferRequestMultithreadingTests, - ::testing::Combine( - ::testing::Values(ov::test::utils::DEVICE_GPU), - ::testing::Values(std::map({}))), - InferRequestMultithreadingTests::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_AutoBatch_BehaviorTests, InferRequestMultithreadingTests, - ::testing::Combine( - ::testing::Values(ov::test::utils::DEVICE_BATCH), - ::testing::ValuesIn(auto_batch_configs())), - InferRequestMultithreadingTests::getTestCaseName); -} // namespace diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/infer_request/perf_counters.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/infer_request/perf_counters.cpp deleted file mode 100644 index 3165b94647180e..00000000000000 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/infer_request/perf_counters.cpp +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "behavior/infer_request/perf_counters.hpp" - -using namespace BehaviorTestsDefinitions; -namespace { -auto configs = []() { - return std::vector>{{}}; -}; - -auto AutoBatchConfigs = - []() { - return std::vector>{ - // explicit batch size 4 to avoid fallback to no auto-batching (i.e. plain GPU) - {{CONFIG_KEY(AUTO_BATCH_DEVICE_CONFIG), std::string(ov::test::utils::DEVICE_GPU) + "(4)"}, - // no timeout to avoid increasing the test time - {CONFIG_KEY(AUTO_BATCH_TIMEOUT), "0 "}}}; - }; - -INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, - InferRequestPerfCountersTest, - ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_GPU), - ::testing::ValuesIn(configs())), - InferRequestPerfCountersTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_AutoBatch_BehaviorTests, - InferRequestPerfCountersTest, - ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_BATCH), - ::testing::ValuesIn(AutoBatchConfigs())), - InferRequestPerfCountersTest::getTestCaseName); -} // namespace diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/infer_request/set_blob_by_type.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/infer_request/set_blob_by_type.cpp deleted file mode 100644 index 7ffee02692fbb4..00000000000000 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/infer_request/set_blob_by_type.cpp +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "behavior/infer_request/set_blob_by_type.hpp" -#include "common_test_utils/test_constants.hpp" - -using namespace BehaviorTestsDefinitions; -using namespace InferenceEngine; - -const std::vector BlobTypes = { - FuncTestUtils::BlobType::Compound, - FuncTestUtils::BlobType::Batched, - FuncTestUtils::BlobType::Memory, -}; - -auto gpuConfig = []() { - return std::map{}; -}; // nothing special - -auto heteroConfig = []() { - return std::map{{"TARGET_FALLBACK", ov::test::utils::DEVICE_GPU}}; -}; - -INSTANTIATE_TEST_SUITE_P(smoke_Behavior, InferRequestSetBlobByType, - ::testing::Combine(::testing::ValuesIn(BlobTypes), - ::testing::Values(ov::test::utils::DEVICE_GPU), - ::testing::Values(gpuConfig())), - InferRequestSetBlobByType::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Behavior_Hetero, InferRequestSetBlobByType, - ::testing::Combine(::testing::ValuesIn(BlobTypes), - ::testing::Values(ov::test::utils::DEVICE_HETERO), - ::testing::Values(heteroConfig())), - InferRequestSetBlobByType::getTestCaseName); diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/infer_request/wait.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/infer_request/wait.cpp deleted file mode 100644 index 703fbc677524e2..00000000000000 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/infer_request/wait.cpp +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include - -#include "behavior/infer_request/wait.hpp" -#include "ie_plugin_config.hpp" - -using namespace BehaviorTestsDefinitions; -namespace { -auto autoBatchConfigs = []() { - return std::vector>{ - // explicit batch size 4 to avoid fallback to no auto-batching (i.e. plain GPU) - {{CONFIG_KEY(AUTO_BATCH_DEVICE_CONFIG), std::string(ov::test::utils::DEVICE_GPU) + "(4)"}, - // no timeout to avoid increasing the test time - {CONFIG_KEY(AUTO_BATCH_TIMEOUT), "0 "}}}; -}; - -INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, - InferRequestWaitTests, - ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_GPU), - ::testing::Values(std::map({}))), - InferRequestWaitTests::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_AutoBatch_BehaviorTests, - InferRequestWaitTests, - ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_BATCH), - ::testing::ValuesIn(autoBatchConfigs())), - InferRequestWaitTests::getTestCaseName); - -} // namespace diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_executable_network/exec_graph_info.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_executable_network/exec_graph_info.cpp new file mode 100644 index 00000000000000..d1a31fd98b9a2d --- /dev/null +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_executable_network/exec_graph_info.cpp @@ -0,0 +1,18 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "behavior/ov_executable_network/exec_graph_info.hpp" + +#include "behavior/ov_plugin/properties_tests.hpp" + +namespace { +using ov::test::behavior::OVExecGraphUniqueNodeNames; + +INSTANTIATE_TEST_SUITE_P(smoke_NoReshape, OVExecGraphUniqueNodeNames, + ::testing::Combine( + ::testing::Values(ov::element::f32), + ::testing::Values(ov::Shape{1, 2, 5, 5}), + ::testing::Values(ov::test::utils::DEVICE_GPU)), + OVExecGraphUniqueNodeNames::getTestCaseName); +} // namespace diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_executable_network/exec_net_base.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_executable_network/exec_net_base.cpp index 951108438ed190..f9cbbc18e0662c 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_executable_network/exec_net_base.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_executable_network/exec_net_base.cpp @@ -31,4 +31,22 @@ INSTANTIATE_TEST_SUITE_P(smoke_AutoBatchBehaviorTests, OVCompiledModelBaseTest, ::testing::Values(ov::test::utils::DEVICE_BATCH), ::testing::ValuesIn(autoBatchConfigs())), OVCompiledModelBaseTest::getTestCaseName); + + +std::vector convert_types = {ov::element::f16, + ov::element::i64}; + +INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, CompiledModelSetType, + ::testing::Combine( + ::testing::ValuesIn(convert_types), + ::testing::Values(ov::test::utils::DEVICE_GPU), + ::testing::ValuesIn(configs())), + CompiledModelSetType::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_AutoBatchBehaviorTests, CompiledModelSetType, + ::testing::Combine( + ::testing::ValuesIn(convert_types), + ::testing::Values(ov::test::utils::DEVICE_BATCH), + ::testing::ValuesIn(autoBatchConfigs())), + CompiledModelSetType::getTestCaseName); } // namespace diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_executable_network/properties_tests.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_executable_network/properties_tests.cpp new file mode 100644 index 00000000000000..03543c889ec11c --- /dev/null +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_executable_network/properties_tests.cpp @@ -0,0 +1,17 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "behavior/ov_infer_request/properties_tests.hpp" + +namespace { +using ov::test::behavior::InferRequestPropertiesTest; + +const std::vector configs = {{}}; +INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, + InferRequestPropertiesTest, + ::testing::Combine(::testing::Values(1u), + ::testing::Values(ov::test::utils::DEVICE_GPU), + ::testing::ValuesIn(configs)), + InferRequestPropertiesTest::getTestCaseName); +} // namespace diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_plugin/auto_batching_tests.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_plugin/auto_batching_tests.cpp new file mode 100644 index 00000000000000..a3289b917c5367 --- /dev/null +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_plugin/auto_batching_tests.cpp @@ -0,0 +1,34 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// +#include "behavior/ov_plugin/auto_batching_tests.hpp" +#include "openvino/runtime/properties.hpp" + + +const std::vector num_streams{ 2 }; +const std::vector get_vs_set{ true, false }; +const std::vector num_requests{ 1, 8, 16, 64 }; +const std::vector num_batch{ 1, 8, 32, 256 }; + +namespace { +using ov::test::behavior::AutoBatching_Test; +using ov::test::behavior::AutoBatching_Test_DetectionOutput; + +INSTANTIATE_TEST_SUITE_P(smoke_AutoBatching_GPU, AutoBatching_Test, + ::testing::Combine( + ::testing::Values(ov::test::utils::DEVICE_GPU), + ::testing::ValuesIn(get_vs_set), + ::testing::ValuesIn(num_streams), + ::testing::ValuesIn(num_requests), + ::testing::ValuesIn(num_batch)), + AutoBatching_Test::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_AutoBatching_GPU, AutoBatching_Test_DetectionOutput, + ::testing::Combine( + ::testing::Values(ov::test::utils::DEVICE_GPU), + ::testing::ValuesIn(get_vs_set), + ::testing::ValuesIn(num_streams), + ::testing::ValuesIn(num_requests), + ::testing::ValuesIn(num_batch)), + AutoBatching_Test_DetectionOutput::getTestCaseName); +} // namespace diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/plugin/auto_batching_tests.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/plugin/auto_batching_tests.cpp deleted file mode 100644 index f262e2eb347ee3..00000000000000 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/plugin/auto_batching_tests.cpp +++ /dev/null @@ -1,62 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// -#include "behavior/plugin/auto_batching_tests.hpp" -#include "behavior/plugin/configuration_tests.hpp" -#include "openvino/runtime/properties.hpp" - - -const std::vector num_streams{ 2 }; -const std::vector get_vs_set{ true, false }; -const std::vector num_requests{ 1, 8, 16, 64 }; -const std::vector num_batch{ 1, 8, 32, 256 }; -using namespace AutoBatchingTests; -using namespace BehaviorTestsDefinitions; - -namespace AutoBatchingTests { - -INSTANTIATE_TEST_SUITE_P(smoke_AutoBatching_GPU, AutoBatching_Test, - ::testing::Combine( - ::testing::Values(ov::test::utils::DEVICE_GPU), - ::testing::ValuesIn(get_vs_set), - ::testing::ValuesIn(num_streams), - ::testing::ValuesIn(num_requests), - ::testing::ValuesIn(num_batch)), - AutoBatching_Test::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_AutoBatching_GPU, AutoBatching_Test_DetectionOutput, - ::testing::Combine( - ::testing::Values(ov::test::utils::DEVICE_GPU), - ::testing::ValuesIn(get_vs_set), - ::testing::ValuesIn(num_streams), - ::testing::ValuesIn(num_requests), - ::testing::ValuesIn(num_batch)), - AutoBatching_Test_DetectionOutput::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P( - smoke_AutoBatching_GPU, - DefaultConfigurationTest, - ::testing::Combine( - ::testing::Values(std::string(ov::test::utils::DEVICE_BATCH) + ":" + ov::test::utils::DEVICE_GPU), - ::testing::Values(DefaultParameter{CONFIG_KEY(AUTO_BATCH_TIMEOUT), - InferenceEngine::Parameter{"1000"}})), - DefaultConfigurationTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P( - smoke_AutoBatching_GPU_2_0_string, - DefaultConfigurationTest, - ::testing::Combine( - ::testing::Values(std::string(ov::test::utils::DEVICE_BATCH) + ":" + ov::test::utils::DEVICE_GPU), - ::testing::Values(DefaultParameter{ov::auto_batch_timeout.name(), - InferenceEngine::Parameter{"1000"}})), - DefaultConfigurationTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P( - smoke_AutoBatching_GPU_2_0_uint, - DefaultConfigurationTest, - ::testing::Combine( - ::testing::Values(std::string(ov::test::utils::DEVICE_BATCH) + ":" + ov::test::utils::DEVICE_GPU), - ::testing::Values(DefaultParameter{ov::auto_batch_timeout.name(), - InferenceEngine::Parameter{uint32_t(1000)}})), - DefaultConfigurationTest::getTestCaseName); -} // namespace AutoBatchingTests diff --git a/src/tests/functional/plugin/shared/include/behavior/compiled_model/compiled_model_base.hpp b/src/tests/functional/plugin/shared/include/behavior/compiled_model/compiled_model_base.hpp index 5ea0f5425274da..6692159f296e29 100644 --- a/src/tests/functional/plugin/shared/include/behavior/compiled_model/compiled_model_base.hpp +++ b/src/tests/functional/plugin/shared/include/behavior/compiled_model/compiled_model_base.hpp @@ -635,6 +635,83 @@ TEST_P(OVAutoExecutableNetworkTest, AutoNotImplementedSetConfigToExecNet) { EXPECT_ANY_THROW(execNet.set_property(config)); } +typedef std::tuple< + ov::element::Type, // Type to convert + std::string, // Device name + ov::AnyMap // Config +> CompiledModelSetTypeParams; + +class CompiledModelSetType : public testing::WithParamInterface, + public OVCompiledNetworkTestBase { +public: + static std::string getTestCaseName(testing::TestParamInfo obj) { + ov::element::Type convert_type; + std::string target_device; + ov::AnyMap configuration; + std::tie(convert_type, target_device, configuration) = obj.param; + std::replace(target_device.begin(), target_device.end(), ':', '.'); + + std::ostringstream result; + result << "ConvertType=" << convert_type.get_type_name() << "_"; + result << "targetDevice=" << target_device << "_"; + for (auto& configItem : configuration) { + result << "configItem=" << configItem.first << "_"; + configItem.second.print(result); + result << "_"; + } + return result.str(); + } + + void SetUp() override { + std::tie(convert_type, target_device, configuration) = this->GetParam(); + SKIP_IF_CURRENT_TEST_IS_DISABLED() + APIBaseTest::SetUp(); + } + void TearDown() override { + if (!configuration.empty()) { + PluginCache::get().reset(); + } + APIBaseTest::TearDown(); + } + + ov::element::Type convert_type; + ov::AnyMap configuration; +}; + +TEST_P(CompiledModelSetType, canSetInputTypeAndCompileModel) { + auto model = ov::test::utils::make_conv_pool_relu(); + + ov::Core core = createCoreWithTemplate(); + auto ppp = ov::preprocess::PrePostProcessor(model); + auto& input = ppp.input(); + input.preprocess().convert_element_type(convert_type); + model = ppp.build(); + ASSERT_NO_THROW(core.compile_model(model, target_device, configuration)); +} + +TEST_P(CompiledModelSetType, canSetOutputTypeAndCompileModel) { + auto model = ov::test::utils::make_conv_pool_relu(); + + ov::Core core = createCoreWithTemplate(); + auto ppp = ov::preprocess::PrePostProcessor(model); + auto& output = ppp.output(); + output.postprocess().convert_element_type(convert_type); + model = ppp.build(); + ASSERT_NO_THROW(core.compile_model(model, target_device, configuration)); +} + +TEST_P(CompiledModelSetType, canSetInputOutputTypeAndCompileModel) { + auto model = ov::test::utils::make_conv_pool_relu(); + + ov::Core core = createCoreWithTemplate(); + auto ppp = ov::preprocess::PrePostProcessor(model); + auto& input = ppp.input(); + input.preprocess().convert_element_type(convert_type); + auto& output = ppp.output(); + output.postprocess().convert_element_type(convert_type); + model = ppp.build(); + ASSERT_NO_THROW(core.compile_model(model, target_device, configuration)); +} } // namespace behavior } // namespace test } // namespace ov \ No newline at end of file diff --git a/src/tests/functional/plugin/shared/include/behavior/ov_executable_network/exec_graph_info.hpp b/src/tests/functional/plugin/shared/include/behavior/ov_executable_network/exec_graph_info.hpp index 010704bedd30b1..4ebcfb1c34beb3 100644 --- a/src/tests/functional/plugin/shared/include/behavior/ov_executable_network/exec_graph_info.hpp +++ b/src/tests/functional/plugin/shared/include/behavior/ov_executable_network/exec_graph_info.hpp @@ -1,19 +1,13 @@ // Copyright (C) 2018-2023 Intel Corporation // SPDX-License-Identifcorer: Apache-2.0 // +#pragma once #include -#include -#include -#include +#include "exec_graph_info.hpp" #include "base/ov_behavior_test_utils.hpp" -#include "common_test_utils/ov_test_utils.hpp" -#include "common_test_utils/common_utils.hpp" -#include "common_test_utils/file_utils.hpp" - -#include "functional_test_utils/plugin_cache.hpp" -#include "common_test_utils/subgraph_builders/multiple_input_outpput_double_concat.hpp" +#include "shared_test_classes/base/ov_subgraph.hpp" namespace ov { namespace test { @@ -28,40 +22,11 @@ typedef std::tuple< class OVExecGraphImportExportTest : public testing::WithParamInterface, public OVCompiledNetworkTestBase { public: - static std::string getTestCaseName(testing::TestParamInfo obj) { - ov::element::Type_t elementType; - std::string targetDevice; - ov::AnyMap configuration; - std::tie(elementType, targetDevice, configuration) = obj.param; - std::replace(targetDevice.begin(), targetDevice.end(), ':', '.'); - std::ostringstream result; - result << "targetDevice=" << targetDevice << "_"; - result << "elementType=" << elementType << "_"; - if (!configuration.empty()) { - result << "config=("; - for (const auto& config : configuration) { - result << config.first << "="; - config.second.print(result); - result << "_"; - } - result << ")"; - } - return result.str(); - } + static std::string getTestCaseName(testing::TestParamInfo obj); - void SetUp() override { - std::tie(elementType, target_device, configuration) = this->GetParam(); - // Skip test according to plugin specific disabledTestPatterns() (if any) - SKIP_IF_CURRENT_TEST_IS_DISABLED(); - APIBaseTest::SetUp(); - } + void SetUp() override; - void TearDown() override { - if (!configuration.empty()) { - utils::PluginCache::get().reset(); - } - APIBaseTest::TearDown(); - } + void TearDown() override; protected: std::shared_ptr core = utils::PluginCache::get().core(); @@ -70,431 +35,15 @@ class OVExecGraphImportExportTest : public testing::WithParamInterface function; }; -TEST_P(OVExecGraphImportExportTest, importExportedFunction) { - if (target_device == ov::test::utils::DEVICE_MULTI || target_device == ov::test::utils::DEVICE_AUTO) { - GTEST_SKIP() << "MULTI / AUTO does not support import / export" << std::endl; - } - - ov::CompiledModel execNet; - // Create simple function - function = ov::test::utils::make_multiple_input_output_double_concat({1, 2, 24, 24}, elementType); - execNet = core->compile_model(function, target_device, configuration); - - std::stringstream strm; - execNet.export_model(strm); - - ov::CompiledModel importedExecNet = core->import_model(strm, target_device, configuration); - EXPECT_EQ(function->inputs().size(), 2); - EXPECT_EQ(function->inputs().size(), importedExecNet.inputs().size()); - EXPECT_THROW(importedExecNet.input(), ov::Exception); - EXPECT_EQ(function->input(0).get_tensor().get_names(), importedExecNet.input(0).get_tensor().get_names()); - EXPECT_EQ(function->input(0).get_tensor().get_partial_shape(), - importedExecNet.input(0).get_tensor().get_partial_shape()); - EXPECT_EQ(function->input(0).get_tensor().get_element_type(), - importedExecNet.input(0).get_tensor().get_element_type()); - EXPECT_EQ(function->input(0).get_element_type(), - importedExecNet.input(0).get_tensor().get_element_type()); - EXPECT_EQ(function->input(1).get_tensor().get_names(), importedExecNet.input(1).get_tensor().get_names()); - EXPECT_EQ(function->input(1).get_tensor().get_partial_shape(), - importedExecNet.input(1).get_tensor().get_partial_shape()); - EXPECT_EQ(function->input(1).get_tensor().get_element_type(), - importedExecNet.input(1).get_tensor().get_element_type()); - EXPECT_EQ(function->input(1).get_element_type(), - importedExecNet.input(1).get_tensor().get_element_type()); - EXPECT_EQ(importedExecNet.input(0).get_node(), importedExecNet.input("data1").get_node()); - EXPECT_NE(importedExecNet.input(1).get_node(), importedExecNet.input("data1").get_node()); - EXPECT_EQ(importedExecNet.input(1).get_node(), importedExecNet.input("data2").get_node()); - EXPECT_NE(importedExecNet.input(0).get_node(), importedExecNet.input("data2").get_node()); - EXPECT_EQ(function->outputs().size(), 2); - EXPECT_EQ(function->outputs().size(), importedExecNet.outputs().size()); - EXPECT_THROW(importedExecNet.output(), ov::Exception); - EXPECT_EQ(function->output(0).get_tensor().get_names(), importedExecNet.output(0).get_tensor().get_names()); - EXPECT_EQ(function->output(0).get_tensor().get_partial_shape(), - importedExecNet.output(0).get_tensor().get_partial_shape()); - EXPECT_EQ(function->output(0).get_tensor().get_element_type(), - importedExecNet.output(0).get_tensor().get_element_type()); - EXPECT_EQ(function->output(0).get_element_type(), - importedExecNet.output(0).get_tensor().get_element_type()); - EXPECT_EQ(function->output(1).get_tensor().get_names(), importedExecNet.output(1).get_tensor().get_names()); - EXPECT_EQ(function->output(1).get_tensor().get_partial_shape(), - importedExecNet.output(1).get_tensor().get_partial_shape()); - EXPECT_EQ(function->output(1).get_tensor().get_element_type(), - importedExecNet.output(1).get_tensor().get_element_type()); - EXPECT_EQ(function->output(1).get_element_type(), - importedExecNet.output(1).get_tensor().get_element_type()); - EXPECT_EQ(importedExecNet.output(0).get_node(), importedExecNet.output("concat1").get_node()); - EXPECT_NE(importedExecNet.output(1).get_node(), importedExecNet.output("concat1").get_node()); - EXPECT_EQ(importedExecNet.output(1).get_node(), importedExecNet.output("concat2").get_node()); - EXPECT_NE(importedExecNet.output(0).get_node(), importedExecNet.output("concat2").get_node()); - EXPECT_THROW(importedExecNet.input("param1"), ov::Exception); - EXPECT_THROW(importedExecNet.input("param2"), ov::Exception); - EXPECT_THROW(importedExecNet.output("result1"), ov::Exception); - EXPECT_THROW(importedExecNet.output("result2"), ov::Exception); -} - -TEST_P(OVExecGraphImportExportTest, importExportedFunctionParameterResultOnly) { - if (target_device == ov::test::utils::DEVICE_MULTI || target_device == ov::test::utils::DEVICE_AUTO) { - GTEST_SKIP() << "MULTI / AUTO does not support import / export" << std::endl; - } - - // Create a simple function - { - auto param = std::make_shared(elementType, ngraph::Shape({1, 3, 24, 24})); - param->set_friendly_name("param"); - param->output(0).get_tensor().set_names({"data"}); - auto result = std::make_shared(param); - result->set_friendly_name("result"); - function = std::make_shared(ngraph::ResultVector{result}, - ngraph::ParameterVector{param}); - function->set_friendly_name("ParamResult"); - } - - auto execNet = core->compile_model(function, target_device, configuration); - std::stringstream strm; - execNet.export_model(strm); - - ov::CompiledModel importedCompiledModel = core->import_model(strm, target_device, configuration); - EXPECT_EQ(function->inputs().size(), 1); - EXPECT_EQ(function->inputs().size(), importedCompiledModel.inputs().size()); - EXPECT_NO_THROW(importedCompiledModel.input()); - EXPECT_NO_THROW(importedCompiledModel.input("data").get_node()); - EXPECT_THROW(importedCompiledModel.input("param"), ov::Exception); - - EXPECT_EQ(function->outputs().size(), 1); - EXPECT_EQ(function->outputs().size(), importedCompiledModel.outputs().size()); - EXPECT_NO_THROW(importedCompiledModel.output()); - EXPECT_EQ(function->output(0).get_tensor().get_names(), - importedCompiledModel.output(0).get_tensor().get_names()); - EXPECT_NO_THROW(importedCompiledModel.output("data").get_node()); - EXPECT_THROW(importedCompiledModel.output("param"), ov::Exception); - - EXPECT_EQ(ov::element::Type(elementType), importedCompiledModel.input("data").get_element_type()); - EXPECT_EQ(ov::element::Type(elementType), importedCompiledModel.output("data").get_element_type()); -} - -TEST_P(OVExecGraphImportExportTest, importExportedFunctionConstantResultOnly) { - if (target_device == ov::test::utils::DEVICE_MULTI || target_device == ov::test::utils::DEVICE_AUTO) { - GTEST_SKIP() << "MULTI / AUTO does not support import / export" << std::endl; - } - - // Create a simple function - { - auto constant = std::make_shared(elementType, ngraph::Shape({1, 3, 24, 24})); - constant->set_friendly_name("constant"); - constant->output(0).get_tensor().set_names({"data"}); - auto result = std::make_shared(constant); - result->set_friendly_name("result"); - function = std::make_shared(ngraph::ResultVector{result}, - ngraph::ParameterVector{}); - function->set_friendly_name("ConstResult"); - } - - auto execNet = core->compile_model(function, target_device, configuration); - std::stringstream strm; - execNet.export_model(strm); - - ov::CompiledModel importedCompiledModel = core->import_model(strm, target_device, configuration); - EXPECT_EQ(function->inputs().size(), 0); - EXPECT_EQ(function->inputs().size(), importedCompiledModel.inputs().size()); - EXPECT_THROW(importedCompiledModel.input(), ov::Exception); - EXPECT_THROW(importedCompiledModel.input("data"), ov::Exception); - EXPECT_THROW(importedCompiledModel.input("constant"), ov::Exception); - - EXPECT_EQ(function->outputs().size(), 1); - EXPECT_EQ(function->outputs().size(), importedCompiledModel.outputs().size()); - EXPECT_NO_THROW(importedCompiledModel.output()); - EXPECT_EQ(function->output(0).get_tensor().get_names(), - importedCompiledModel.output(0).get_tensor().get_names()); - EXPECT_NO_THROW(importedCompiledModel.output("data").get_node()); - EXPECT_THROW(importedCompiledModel.output("constant"), ov::Exception); - - EXPECT_EQ(ov::element::Type(elementType), importedCompiledModel.output("data").get_element_type()); -} - -TEST_P(OVExecGraphImportExportTest, readFromV10IR) { - std::string model = R"V0G0N( - - - - - - - 1 - 3 - 22 - 22 - - - - - - - - 1 - 3 - 22 - 22 - - - - - - - 1 - 3 - 22 - 22 - - - 1 - 3 - 22 - 22 - - - - - 1 - 6 - 22 - 22 - - - - - - - 1 - 6 - 22 - 22 - - - - - - - - - - - )V0G0N"; - function = core->read_model(model, ov::Tensor()); - EXPECT_EQ(function->inputs().size(), 2); - EXPECT_EQ(function->outputs().size(), 1); - EXPECT_NO_THROW(function->input("in1")); // remove if read_model does not change function names - EXPECT_NO_THROW(function->input("in2")); // remove if read_model does not change function names - EXPECT_NO_THROW(function->output("concat")); // remove if read_model does not change function names - - ov::CompiledModel execNet = core->compile_model(function, target_device, configuration); - EXPECT_EQ(execNet.inputs().size(), 2); - EXPECT_EQ(execNet.outputs().size(), 1); - EXPECT_NO_THROW(execNet.input("in1")); - EXPECT_NO_THROW(execNet.input("in2")); - EXPECT_NO_THROW(execNet.output("concat")); - - if (target_device == ov::test::utils::DEVICE_MULTI || target_device == ov::test::utils::DEVICE_AUTO) { - GTEST_SKIP() << "MULTI / AUTO does not support import / export" << std::endl; - } - - std::stringstream strm; - execNet.export_model(strm); - - ov::CompiledModel importedExecNet = core->import_model(strm, target_device, configuration); - EXPECT_EQ(importedExecNet.inputs().size(), 2); - EXPECT_EQ(importedExecNet.outputs().size(), 1); - EXPECT_NO_THROW(importedExecNet.input("in1")); - EXPECT_NO_THROW(importedExecNet.input("in2")); - EXPECT_NO_THROW(importedExecNet.output("concat")); - - EXPECT_EQ(importedExecNet.input("in1").get_element_type(), ov::element::f32); - EXPECT_EQ(importedExecNet.input("in2").get_element_type(), ov::element::f32); - EXPECT_EQ(importedExecNet.output().get_element_type(), ov::element::f32); -} - -static std::map any_copy(const ov::AnyMap& params) { - auto to_config_string = [] (const Any& any) -> std::string { - if (any.is()) { - return any.as() ? "YES" : "NO"; - } else { - std::stringstream strm; - any.print(strm); - return strm.str(); - } - }; - std::map result; - for (auto&& value : params) { - result.emplace(value.first, to_config_string(value.second)); - } - return result; -} - -TEST_P(OVExecGraphImportExportTest, importExportedIENetwork) { - if (target_device == ov::test::utils::DEVICE_MULTI || target_device == ov::test::utils::DEVICE_AUTO) { - GTEST_SKIP() << "MULTI / AUTO does not support import / export" << std::endl; - } - - std::shared_ptr ie = ::PluginCache::get().ie(); - InferenceEngine::ExecutableNetwork execNet; - - // Create simple function - function = ov::test::utils::make_multiple_input_output_double_concat({1, 2, 24, 24}, elementType); - - execNet = ie->LoadNetwork(InferenceEngine::CNNNetwork(function), target_device, any_copy(configuration)); +class OVExecGraphUniqueNodeNames : public testing::WithParamInterface, + public OVCompiledNetworkTestBase { +public: + static std::string getTestCaseName(testing::TestParamInfo obj); + void SetUp() override; - std::stringstream strm; - execNet.Export(strm); - - ov::CompiledModel importedExecNet = core->import_model(strm, target_device, configuration); - EXPECT_EQ(function->inputs().size(), 2); - EXPECT_EQ(function->inputs().size(), importedExecNet.inputs().size()); - EXPECT_THROW(importedExecNet.input(), ov::Exception); - EXPECT_NO_THROW(importedExecNet.input("data1").get_node()); - EXPECT_NO_THROW(importedExecNet.input("data2").get_node()); - EXPECT_NO_THROW(importedExecNet.input("param1").get_node()); - EXPECT_NO_THROW(importedExecNet.input("param2").get_node()); - EXPECT_EQ(function->outputs().size(), 2); - EXPECT_EQ(function->outputs().size(), importedExecNet.outputs().size()); - EXPECT_THROW(importedExecNet.output(), ov::Exception); - EXPECT_NE(function->output(0).get_tensor().get_names(), - importedExecNet.output(0).get_tensor().get_names()); - EXPECT_NO_THROW(importedExecNet.output("concat_op1").get_node()); - EXPECT_NO_THROW(importedExecNet.output("concat_op2").get_node()); - EXPECT_NO_THROW(importedExecNet.output("concat1").get_node()); - EXPECT_NO_THROW(importedExecNet.output("concat2").get_node()); - - const auto outputType = elementType == ngraph::element::i32 || - elementType == ngraph::element::u32 || - elementType == ngraph::element::i64 || - elementType == ngraph::element::u64 ? ngraph::element::i32 : ngraph::element::f32; - const auto inputType = elementType == ngraph::element::f16 ? ngraph::element::Type_t::f32 : elementType; - - EXPECT_EQ(inputType, importedExecNet.input("param1").get_element_type()); - EXPECT_EQ(inputType, importedExecNet.input("param2").get_element_type()); - EXPECT_EQ(outputType, importedExecNet.output("concat2").get_element_type()); - EXPECT_EQ(outputType, importedExecNet.output("concat1").get_element_type()); -} - -TEST_P(OVExecGraphImportExportTest, importExportedIENetworkParameterResultOnly) { - if (target_device == ov::test::utils::DEVICE_MULTI || target_device == ov::test::utils::DEVICE_AUTO) { - GTEST_SKIP() << "MULTI / AUTO does not support import / export" << std::endl; - } - - // New plugin API wraps CNNNetwork conversions into model, it is why parameter->result graphs won't work in legacy API with new plugin - std::shared_ptr core = ov::test::utils::PluginCache::get().core(); - ov::CompiledModel compiled_model; - - // Create a simple function - { - auto param = std::make_shared(elementType, ngraph::Shape({1, 3, 24, 24})); - param->set_friendly_name("param"); - param->output(0).get_tensor().set_names({"data"}); - auto result = std::make_shared(param); - result->set_friendly_name("result"); - function = std::make_shared(ngraph::ResultVector{result}, ngraph::ParameterVector{param}); - function->set_friendly_name("ParamResult"); - } - compiled_model = core->compile_model(function, target_device, configuration); - - auto inputPrecision = compiled_model.input().get_element_type(); - auto outputPrecision = compiled_model.output().get_element_type(); - - std::stringstream strm; - compiled_model.export_model(strm); - - ov::CompiledModel importedCompiledModel = core->import_model(strm, target_device, configuration); - EXPECT_EQ(function->inputs().size(), 1); - EXPECT_EQ(function->inputs().size(), importedCompiledModel.inputs().size()); - EXPECT_NO_THROW(importedCompiledModel.input()); - EXPECT_NO_THROW(importedCompiledModel.input("data").get_node()); - - EXPECT_EQ(function->outputs().size(), 1); - EXPECT_EQ(function->outputs().size(), importedCompiledModel.outputs().size()); - EXPECT_NO_THROW(importedCompiledModel.output()); - EXPECT_EQ(function->output(0).get_tensor().get_names(), importedCompiledModel.output(0).get_tensor().get_names()); - EXPECT_NO_THROW(importedCompiledModel.output("data").get_node()); - - EXPECT_EQ(inputPrecision, importedCompiledModel.input("data").get_element_type()); - EXPECT_EQ(outputPrecision, importedCompiledModel.output("data").get_element_type()); -} - -TEST_P(OVExecGraphImportExportTest, importExportedIENetworkConstantResultOnly) { - if (target_device == ov::test::utils::DEVICE_MULTI || target_device == ov::test::utils::DEVICE_AUTO) { - GTEST_SKIP() << "MULTI / AUTO does not support import / export" << std::endl; - } - - std::shared_ptr ie = ::PluginCache::get().ie(); - InferenceEngine::ExecutableNetwork execNet; - - // Create a simple function - { - auto constant = std::make_shared(elementType, ngraph::Shape({1, 3, 24, 24})); - constant->set_friendly_name("constant"); - constant->output(0).get_tensor().set_names({"data"}); - auto result = std::make_shared(constant); - result->set_friendly_name("result"); - function = std::make_shared(ngraph::ResultVector{result}, - ngraph::ParameterVector{}); - function->set_friendly_name("ConstResult"); - } - execNet = ie->LoadNetwork(InferenceEngine::CNNNetwork(function), target_device, any_copy(configuration)); - - auto outputPrecision = InferenceEngine::details::convertPrecision(execNet.GetOutputsInfo().at("constant")->getPrecision()); - - std::stringstream strm; - execNet.Export(strm); - - ov::CompiledModel importedCompiledModel = core->import_model(strm, target_device, configuration); - EXPECT_EQ(function->inputs().size(), 0); - EXPECT_EQ(function->inputs().size(), importedCompiledModel.inputs().size()); - EXPECT_THROW(importedCompiledModel.input(), ov::Exception); - EXPECT_THROW(importedCompiledModel.input("data"), ov::Exception); - EXPECT_THROW(importedCompiledModel.input("constant"), ov::Exception); - - EXPECT_EQ(function->outputs().size(), 1); - EXPECT_EQ(function->outputs().size(), importedCompiledModel.outputs().size()); - EXPECT_NO_THROW(importedCompiledModel.output()); - EXPECT_NE(function->output(0).get_tensor().get_names(), - importedCompiledModel.output(0).get_tensor().get_names()); - - EXPECT_NO_THROW(importedCompiledModel.output("data").get_node()); - EXPECT_NO_THROW(importedCompiledModel.output("constant").get_node()); - EXPECT_EQ(outputPrecision, importedCompiledModel.output("data").get_element_type()); - EXPECT_EQ(outputPrecision, importedCompiledModel.output("constant").get_element_type()); -} - -TEST_P(OVExecGraphImportExportTest, ieImportExportedFunction) { - if (target_device == ov::test::utils::DEVICE_MULTI || target_device == ov::test::utils::DEVICE_AUTO) { - GTEST_SKIP() << "MULTI / AUTO does not support import / export" << std::endl; - } - - std::shared_ptr ie = ::PluginCache::get().ie(); - ov::CompiledModel execNet; - - // Create simple function - function = ov::test::utils::make_multiple_input_output_double_concat({1, 2, 24, 24}, elementType); - execNet = core->compile_model(function, target_device, configuration); - - std::stringstream strm; - execNet.export_model(strm); - - InferenceEngine::ExecutableNetwork importedExecNet = ie->ImportNetwork(strm, target_device, any_copy(configuration)); - EXPECT_EQ(function->inputs().size(), 2); - EXPECT_EQ(function->inputs().size(), importedExecNet.GetInputsInfo().size()); - EXPECT_NO_THROW(importedExecNet.GetInputsInfo()["param1"]); - EXPECT_NO_THROW(importedExecNet.GetInputsInfo()["param2"]); - EXPECT_EQ(function->outputs().size(), 2); - EXPECT_EQ(function->outputs().size(), importedExecNet.GetOutputsInfo().size()); - EXPECT_NO_THROW(importedExecNet.GetOutputsInfo()["concat_op1"]); - EXPECT_NO_THROW(importedExecNet.GetOutputsInfo()["concat_op2"]); - - const auto prc = InferenceEngine::details::convertPrecision(elementType); - - EXPECT_EQ(prc, importedExecNet.GetInputsInfo()["param1"]->getPrecision()); - EXPECT_EQ(prc, importedExecNet.GetInputsInfo()["param2"]->getPrecision()); - EXPECT_EQ(prc, importedExecNet.GetOutputsInfo()["concat_op2"]->getPrecision()); - EXPECT_EQ(prc, importedExecNet.GetOutputsInfo()["concat_op1"]->getPrecision()); -} +protected: + std::shared_ptr fnPtr; +}; } // namespace behavior } // namespace test diff --git a/src/tests/functional/plugin/shared/include/behavior/ov_plugin/auto_batching_tests.hpp b/src/tests/functional/plugin/shared/include/behavior/ov_plugin/auto_batching_tests.hpp new file mode 100644 index 00000000000000..8809f10059ed64 --- /dev/null +++ b/src/tests/functional/plugin/shared/include/behavior/ov_plugin/auto_batching_tests.hpp @@ -0,0 +1,162 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include +#include +#include +#include + +#include "base/ov_behavior_test_utils.hpp" +#include "common_test_utils/ov_tensor_utils.hpp" +#include "common_test_utils/subgraph_builders/single_conv.hpp" +#include "common_test_utils/subgraph_builders/detection_output.hpp" +#include "common_test_utils/subgraph_builders/multi_single_conv.hpp" +#include "functional_test_utils/plugin_cache.hpp" + +namespace ov { +namespace test { +namespace behavior { +using AutoBatchTwoNetsParams = std::tuple< + std::string, // device name + bool, // get or set tensor + size_t, // number of streams + size_t, // number of requests + size_t>; // batch size> + +class AutoBatching_Test : public OVPluginTestBase, + public testing::WithParamInterface { +public: + static std::string getTestCaseName(const testing::TestParamInfo &obj) { + size_t streams, requests, batch; + bool use_get_tensor; + std::string target_device; + std::tie(target_device, use_get_tensor, streams, requests, batch) = obj.param; + return target_device + std::string(use_get_tensor ? "_get_tensor" : "_set_blob") + "_batch_size_" + + std::to_string(batch) + + "_num_streams_" + std::to_string(streams) + "_num_req_" + std::to_string(requests); + } + +protected: + bool use_get_tensor; + size_t num_streams; + size_t num_requests; + size_t num_batch; + std::vector> fn_ptrs; + + void SetUp() override { + std::tie(target_device, use_get_tensor, num_streams, num_requests, num_batch) = this->GetParam(); + fn_ptrs = {ov::test::utils::make_single_conv(), + ov::test::utils::make_multi_single_conv()}; + }; + + void TestAutoBatch() { + auto core = ov::test::utils::PluginCache::get().core(); + + ov::OutputVector outputs; + std::vector, ov::InferRequest>> irs; + std::vector irs_ref; + std::vector outElementsCount; + + for (size_t i = 0; i < fn_ptrs.size(); ++i) { + auto model = fn_ptrs[i]; + auto inputs = model->inputs(); + for (auto const & n : inputs) { + n.get_node()->set_output_type(0, ov::element::f32, n.get_shape()); + } + ov::AnyMap config; + if (target_device.find("GPU") != std::string::npos) { + config.insert(ov::num_streams(num_streams)); + config.insert(ov::hint::inference_precision(ov::element::f32)); + } + + if (target_device.find("CPU") != std::string::npos) { + config.insert(ov::num_streams(num_streams)); + } + // minimize timeout to reduce test time + config.insert(ov::auto_batch_timeout(1)); + + auto compiled_model = core->compile_model(model, std::string(ov::test::utils::DEVICE_BATCH) + ":" + + target_device + "(" + std::to_string(num_batch) + ")", + config); + + auto network_outputs = model->outputs(); + ASSERT_EQ(network_outputs.size(), 1) << " Auto-Batching tests use networks with single output"; + auto const & output = network_outputs[0]; + for (size_t j = 0; j < num_requests; j++) { + outputs.push_back(output); + outElementsCount.push_back( + std::accumulate(begin(fn_ptrs[i]->get_output_shape(0)), end(fn_ptrs[i]->get_output_shape(0)), 1, + std::multiplies())); + + auto inf_req = compiled_model.create_infer_request(); + irs.push_back({model, inf_req}); + + auto compiled_model_ref = core->compile_model(model, ov::test::utils::DEVICE_TEMPLATE); + auto inf_req_ref = compiled_model_ref.create_infer_request(); + irs_ref.push_back(inf_req_ref); + + std::vector inData; + for (auto const & input : inputs) { + auto tensor = ov::test::utils::create_and_fill_tensor(input.get_element_type(), input.get_shape()); + if (use_get_tensor) + memcpy(inf_req.get_tensor(input).data(), tensor.data(), tensor.get_byte_size()); + else + inf_req.set_tensor(input, tensor); + + inf_req_ref.set_tensor(input, tensor); + } + + if (!use_get_tensor) { + auto tensor = ov::test::utils::create_and_fill_tensor(output.get_element_type(), output.get_shape()); + inf_req.set_tensor(output, tensor); + } + + inf_req_ref.infer(); + } + } + + { + for (auto& ir : irs) { + ir.second.start_async(); + } + + for (auto& ir : irs) { + ir.second.wait(); + } + } + + for (size_t i = 0; i < irs.size(); ++i) { + auto output = irs[i].first->get_results().at(0); + auto out = irs[i].second.get_tensor(output); + auto out_ref = irs_ref[i].get_tensor(output); + ov::test::utils::compare(out_ref, out); + } + } +}; + +class AutoBatching_Test_DetectionOutput : public AutoBatching_Test { +public: + void SetUp() override { + std::tie(target_device, use_get_tensor, num_streams, num_requests, num_batch) = GetParam(); + fn_ptrs = {ov::test::utils::make_detection_output(), + ov::test::utils::make_detection_output()}; + }; + + static std::string getTestCaseName(const testing::TestParamInfo &obj) { + return AutoBatching_Test::getTestCaseName(obj); + } +}; + +TEST_P(AutoBatching_Test, compareAutoBatchingToSingleBatch) { + SKIP_IF_CURRENT_TEST_IS_DISABLED() + TestAutoBatch(); +} + +TEST_P(AutoBatching_Test_DetectionOutput, compareAutoBatchingToSingleBatch) { + SKIP_IF_CURRENT_TEST_IS_DISABLED() + TestAutoBatch(); +} +} // namespace behavior +} // namespace test +} // namespace ov diff --git a/src/tests/functional/plugin/shared/src/behavior/ov_executable_network/exec_graph_info.cpp b/src/tests/functional/plugin/shared/src/behavior/ov_executable_network/exec_graph_info.cpp new file mode 100644 index 00000000000000..1e1b93baf5d50d --- /dev/null +++ b/src/tests/functional/plugin/shared/src/behavior/ov_executable_network/exec_graph_info.cpp @@ -0,0 +1,358 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifcorer: Apache-2.0 +// + +#include "behavior/ov_executable_network/exec_graph_info.hpp" + +#include "common_test_utils/ov_test_utils.hpp" +#include "common_test_utils/common_utils.hpp" +#include "common_test_utils/file_utils.hpp" + +#include "common_test_utils/subgraph_builders/multiple_input_outpput_double_concat.hpp" +#include "openvino/pass/serialize.hpp" + +namespace ov { +namespace test { +namespace behavior { + +std::string OVExecGraphImportExportTest::getTestCaseName(testing::TestParamInfo obj) { + ov::element::Type_t elementType; + std::string targetDevice; + ov::AnyMap configuration; + std::tie(elementType, targetDevice, configuration) = obj.param; + std::replace(targetDevice.begin(), targetDevice.end(), ':', '.'); + std::ostringstream result; + result << "targetDevice=" << targetDevice << "_"; + result << "elementType=" << elementType << "_"; + if (!configuration.empty()) { + result << "config=("; + for (const auto& config : configuration) { + result << config.first << "="; + config.second.print(result); + result << "_"; + } + result << ")"; + } + return result.str(); +} + +void OVExecGraphImportExportTest::SetUp() { + std::tie(elementType, target_device, configuration) = this->GetParam(); + // Skip test according to plugin specific disabledTestPatterns() (if any) + SKIP_IF_CURRENT_TEST_IS_DISABLED(); + APIBaseTest::SetUp(); +} + +void OVExecGraphImportExportTest::TearDown() { + if (!configuration.empty()) { + utils::PluginCache::get().reset(); + } + APIBaseTest::TearDown(); +} + +TEST_P(OVExecGraphImportExportTest, importExportedFunction) { + if (target_device == ov::test::utils::DEVICE_MULTI || target_device == ov::test::utils::DEVICE_AUTO) { + GTEST_SKIP() << "MULTI / AUTO does not support import / export" << std::endl; + } + + ov::CompiledModel execNet; + // Create simple function + function = ov::test::utils::make_multiple_input_output_double_concat({1, 2, 24, 24}, elementType); + execNet = core->compile_model(function, target_device, configuration); + + std::stringstream strm; + execNet.export_model(strm); + + ov::CompiledModel importedExecNet = core->import_model(strm, target_device, configuration); + EXPECT_EQ(function->inputs().size(), 2); + EXPECT_EQ(function->inputs().size(), importedExecNet.inputs().size()); + EXPECT_THROW(importedExecNet.input(), ov::Exception); + EXPECT_EQ(function->input(0).get_tensor().get_names(), importedExecNet.input(0).get_tensor().get_names()); + EXPECT_EQ(function->input(0).get_tensor().get_partial_shape(), + importedExecNet.input(0).get_tensor().get_partial_shape()); + EXPECT_EQ(function->input(0).get_tensor().get_element_type(), + importedExecNet.input(0).get_tensor().get_element_type()); + EXPECT_EQ(function->input(0).get_element_type(), + importedExecNet.input(0).get_tensor().get_element_type()); + EXPECT_EQ(function->input(1).get_tensor().get_names(), importedExecNet.input(1).get_tensor().get_names()); + EXPECT_EQ(function->input(1).get_tensor().get_partial_shape(), + importedExecNet.input(1).get_tensor().get_partial_shape()); + EXPECT_EQ(function->input(1).get_tensor().get_element_type(), + importedExecNet.input(1).get_tensor().get_element_type()); + EXPECT_EQ(function->input(1).get_element_type(), + importedExecNet.input(1).get_tensor().get_element_type()); + EXPECT_EQ(importedExecNet.input(0).get_node(), importedExecNet.input("data1").get_node()); + EXPECT_NE(importedExecNet.input(1).get_node(), importedExecNet.input("data1").get_node()); + EXPECT_EQ(importedExecNet.input(1).get_node(), importedExecNet.input("data2").get_node()); + EXPECT_NE(importedExecNet.input(0).get_node(), importedExecNet.input("data2").get_node()); + EXPECT_EQ(function->outputs().size(), 2); + EXPECT_EQ(function->outputs().size(), importedExecNet.outputs().size()); + EXPECT_THROW(importedExecNet.output(), ov::Exception); + EXPECT_EQ(function->output(0).get_tensor().get_names(), importedExecNet.output(0).get_tensor().get_names()); + EXPECT_EQ(function->output(0).get_tensor().get_partial_shape(), + importedExecNet.output(0).get_tensor().get_partial_shape()); + EXPECT_EQ(function->output(0).get_tensor().get_element_type(), + importedExecNet.output(0).get_tensor().get_element_type()); + EXPECT_EQ(function->output(0).get_element_type(), + importedExecNet.output(0).get_tensor().get_element_type()); + EXPECT_EQ(function->output(1).get_tensor().get_names(), importedExecNet.output(1).get_tensor().get_names()); + EXPECT_EQ(function->output(1).get_tensor().get_partial_shape(), + importedExecNet.output(1).get_tensor().get_partial_shape()); + EXPECT_EQ(function->output(1).get_tensor().get_element_type(), + importedExecNet.output(1).get_tensor().get_element_type()); + EXPECT_EQ(function->output(1).get_element_type(), + importedExecNet.output(1).get_tensor().get_element_type()); + EXPECT_EQ(importedExecNet.output(0).get_node(), importedExecNet.output("concat1").get_node()); + EXPECT_NE(importedExecNet.output(1).get_node(), importedExecNet.output("concat1").get_node()); + EXPECT_EQ(importedExecNet.output(1).get_node(), importedExecNet.output("concat2").get_node()); + EXPECT_NE(importedExecNet.output(0).get_node(), importedExecNet.output("concat2").get_node()); + EXPECT_THROW(importedExecNet.input("param1"), ov::Exception); + EXPECT_THROW(importedExecNet.input("param2"), ov::Exception); + EXPECT_THROW(importedExecNet.output("result1"), ov::Exception); + EXPECT_THROW(importedExecNet.output("result2"), ov::Exception); +} + +TEST_P(OVExecGraphImportExportTest, importExportedFunctionParameterResultOnly) { + if (target_device == ov::test::utils::DEVICE_MULTI || target_device == ov::test::utils::DEVICE_AUTO) { + GTEST_SKIP() << "MULTI / AUTO does not support import / export" << std::endl; + } + + // Create a simple function + { + auto param = std::make_shared(elementType, ov::Shape({1, 3, 24, 24})); + param->set_friendly_name("param"); + param->output(0).get_tensor().set_names({"data"}); + auto result = std::make_shared(param); + result->set_friendly_name("result"); + function = std::make_shared(ov::ResultVector{result}, + ov::ParameterVector{param}); + function->set_friendly_name("ParamResult"); + } + + auto execNet = core->compile_model(function, target_device, configuration); + std::stringstream strm; + execNet.export_model(strm); + + ov::CompiledModel importedCompiledModel = core->import_model(strm, target_device, configuration); + EXPECT_EQ(function->inputs().size(), 1); + EXPECT_EQ(function->inputs().size(), importedCompiledModel.inputs().size()); + EXPECT_NO_THROW(importedCompiledModel.input()); + EXPECT_NO_THROW(importedCompiledModel.input("data").get_node()); + EXPECT_THROW(importedCompiledModel.input("param"), ov::Exception); + + EXPECT_EQ(function->outputs().size(), 1); + EXPECT_EQ(function->outputs().size(), importedCompiledModel.outputs().size()); + EXPECT_NO_THROW(importedCompiledModel.output()); + EXPECT_EQ(function->output(0).get_tensor().get_names(), + importedCompiledModel.output(0).get_tensor().get_names()); + EXPECT_NO_THROW(importedCompiledModel.output("data").get_node()); + EXPECT_THROW(importedCompiledModel.output("param"), ov::Exception); + + EXPECT_EQ(ov::element::Type(elementType), importedCompiledModel.input("data").get_element_type()); + EXPECT_EQ(ov::element::Type(elementType), importedCompiledModel.output("data").get_element_type()); +} + +TEST_P(OVExecGraphImportExportTest, importExportedFunctionConstantResultOnly) { + if (target_device == ov::test::utils::DEVICE_MULTI || target_device == ov::test::utils::DEVICE_AUTO) { + GTEST_SKIP() << "MULTI / AUTO does not support import / export" << std::endl; + } + + // Create a simple function + { + auto constant = std::make_shared(elementType, ov::Shape({1, 3, 24, 24})); + constant->set_friendly_name("constant"); + constant->output(0).get_tensor().set_names({"data"}); + auto result = std::make_shared(constant); + result->set_friendly_name("result"); + function = std::make_shared(ov::ResultVector{result}, + ov::ParameterVector{}); + function->set_friendly_name("ConstResult"); + } + + auto execNet = core->compile_model(function, target_device, configuration); + std::stringstream strm; + execNet.export_model(strm); + + ov::CompiledModel importedCompiledModel = core->import_model(strm, target_device, configuration); + EXPECT_EQ(function->inputs().size(), 0); + EXPECT_EQ(function->inputs().size(), importedCompiledModel.inputs().size()); + EXPECT_THROW(importedCompiledModel.input(), ov::Exception); + EXPECT_THROW(importedCompiledModel.input("data"), ov::Exception); + EXPECT_THROW(importedCompiledModel.input("constant"), ov::Exception); + + EXPECT_EQ(function->outputs().size(), 1); + EXPECT_EQ(function->outputs().size(), importedCompiledModel.outputs().size()); + EXPECT_NO_THROW(importedCompiledModel.output()); + EXPECT_EQ(function->output(0).get_tensor().get_names(), + importedCompiledModel.output(0).get_tensor().get_names()); + EXPECT_NO_THROW(importedCompiledModel.output("data").get_node()); + EXPECT_THROW(importedCompiledModel.output("constant"), ov::Exception); + + EXPECT_EQ(ov::element::Type(elementType), importedCompiledModel.output("data").get_element_type()); +} + +TEST_P(OVExecGraphImportExportTest, readFromV10IR) { + std::string model = R"V0G0N( + + + + + + + 1 + 3 + 22 + 22 + + + + + + + + 1 + 3 + 22 + 22 + + + + + + + 1 + 3 + 22 + 22 + + + 1 + 3 + 22 + 22 + + + + + 1 + 6 + 22 + 22 + + + + + + + 1 + 6 + 22 + 22 + + + + + + + + + + + )V0G0N"; + function = core->read_model(model, ov::Tensor()); + EXPECT_EQ(function->inputs().size(), 2); + EXPECT_EQ(function->outputs().size(), 1); + EXPECT_NO_THROW(function->input("in1")); // remove if read_model does not change function names + EXPECT_NO_THROW(function->input("in2")); // remove if read_model does not change function names + EXPECT_NO_THROW(function->output("concat")); // remove if read_model does not change function names + + ov::CompiledModel execNet = core->compile_model(function, target_device, configuration); + EXPECT_EQ(execNet.inputs().size(), 2); + EXPECT_EQ(execNet.outputs().size(), 1); + EXPECT_NO_THROW(execNet.input("in1")); + EXPECT_NO_THROW(execNet.input("in2")); + EXPECT_NO_THROW(execNet.output("concat")); + + if (target_device == ov::test::utils::DEVICE_MULTI || target_device == ov::test::utils::DEVICE_AUTO) { + GTEST_SKIP() << "MULTI / AUTO does not support import / export" << std::endl; + } + + std::stringstream strm; + execNet.export_model(strm); + + ov::CompiledModel importedExecNet = core->import_model(strm, target_device, configuration); + EXPECT_EQ(importedExecNet.inputs().size(), 2); + EXPECT_EQ(importedExecNet.outputs().size(), 1); + EXPECT_NO_THROW(importedExecNet.input("in1")); + EXPECT_NO_THROW(importedExecNet.input("in2")); + EXPECT_NO_THROW(importedExecNet.output("concat")); + + EXPECT_EQ(importedExecNet.input("in1").get_element_type(), ov::element::f32); + EXPECT_EQ(importedExecNet.input("in2").get_element_type(), ov::element::f32); + EXPECT_EQ(importedExecNet.output().get_element_type(), ov::element::f32); +} + +static std::map any_copy(const ov::AnyMap& params) { + auto to_config_string = [] (const Any& any) -> std::string { + if (any.is()) { + return any.as() ? "YES" : "NO"; + } else { + std::stringstream strm; + any.print(strm); + return strm.str(); + } + }; + std::map result; + for (auto&& value : params) { + result.emplace(value.first, to_config_string(value.second)); + } + return result; +} + +std::string OVExecGraphUniqueNodeNames::getTestCaseName(testing::TestParamInfo obj) { + ov::element::Type type; + ov::Shape shape; + std::string device; + std::tie(type, shape, device) = obj.param; + + std::ostringstream result; + result << "IS=" << ov::test::utils::vec2str(shape) << "_"; + result << "inType=" << type.get_type_name() << "_"; + result << "targetDevice=" << device; + return result.str(); +} + +void OVExecGraphUniqueNodeNames::SetUp() { + SKIP_IF_CURRENT_TEST_IS_DISABLED(); + ov::element::Type type; + ov::Shape shape; + std::tie(type, shape, target_device) = this->GetParam(); + + APIBaseTest::SetUp(); + + ov::ParameterVector params{std::make_shared(type, shape)}; + auto split_axis_op = std::make_shared(ov::element::i64, ov::Shape{}, std::vector{1}); + auto split = std::make_shared(params[0], split_axis_op, 2); + + auto concat = std::make_shared(split->outputs(), 1); + + ov::ResultVector results{std::make_shared(concat)}; + fnPtr = std::make_shared(results, params, "SplitConvConcat"); +} + +TEST_P(OVExecGraphUniqueNodeNames, CheckUniqueNodeNames) { + auto core = utils::PluginCache::get().core(); + auto compiled_model = core->compile_model(fnPtr, target_device); + + auto runtime_model = compiled_model.get_runtime_model(); + + std::unordered_set names; + for (auto&& op : runtime_model->get_ops()) { + ASSERT_TRUE(names.find(op->get_friendly_name()) == names.end()) << "Node with name " << op->get_friendly_name() << "already exists"; + names.insert(op->get_friendly_name()); + } +}; + +} // namespace behavior +} // namespace test +} // namespace ov From aa6c589e785bc860fc183e8536df6aeb66f6fb31 Mon Sep 17 00:00:00 2001 From: Georgy Krivoruchko Date: Wed, 10 Jan 2024 22:55:32 -0800 Subject: [PATCH 23/28] Updated org.openvinotoolkit/experimental_detectron/* (#22067) --- .../experimental_detectron/detection_output.cpp | 7 ++++--- .../experimental_detectron/detection_output.hpp | 2 -- .../generate_proposals_single_image.cpp | 7 ++++--- .../generate_proposals_single_image.hpp | 2 -- .../experimental_detectron/prior_grid_generator.cpp | 7 ++++--- .../experimental_detectron/prior_grid_generator.hpp | 2 -- .../experimental_detectron/roi_feature_extractor.cpp | 7 ++++--- .../experimental_detectron/roi_feature_extractor.hpp | 2 -- .../experimental_detectron/topk_rios.cpp | 7 ++++--- .../experimental_detectron/topk_rios.hpp | 2 -- 10 files changed, 20 insertions(+), 25 deletions(-) diff --git a/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/experimental_detectron/detection_output.cpp b/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/experimental_detectron/detection_output.cpp index 11effc7f537261..9f74205ba97e22 100644 --- a/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/experimental_detectron/detection_output.cpp +++ b/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/experimental_detectron/detection_output.cpp @@ -4,16 +4,17 @@ #include "op/org.openvinotoolkit/experimental_detectron/detection_output.hpp" -#include "default_opset.hpp" -#include "ngraph/node.hpp" #include "onnx_import/core/node.hpp" +#include "openvino/op/experimental_detectron_detection_output.hpp" + +using namespace ov::op; namespace ngraph { namespace onnx_import { namespace op { namespace set_1 { OutputVector experimental_detectron_detection_output(const Node& node) { - using DetectionOutput = ngraph::op::v6::ExperimentalDetectronDetectionOutput; + using DetectionOutput = v6::ExperimentalDetectronDetectionOutput; auto inputs = node.get_ng_inputs(); auto rois = inputs[0]; diff --git a/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/experimental_detectron/detection_output.hpp b/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/experimental_detectron/detection_output.hpp index 2254fb98782a34..677e3208152710 100644 --- a/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/experimental_detectron/detection_output.hpp +++ b/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/experimental_detectron/detection_output.hpp @@ -9,8 +9,6 @@ OPENVINO_SUPPRESS_DEPRECATED_START #include -#include "ngraph/node.hpp" -#include "ngraph/op/experimental_detectron_detection_output.hpp" #include "onnx_import/core/node.hpp" namespace ngraph { diff --git a/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/experimental_detectron/generate_proposals_single_image.cpp b/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/experimental_detectron/generate_proposals_single_image.cpp index 085084f85ea41e..48a79a0469a26c 100644 --- a/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/experimental_detectron/generate_proposals_single_image.cpp +++ b/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/experimental_detectron/generate_proposals_single_image.cpp @@ -4,17 +4,18 @@ #include "op/org.openvinotoolkit/experimental_detectron/generate_proposals_single_image.hpp" -#include "default_opset.hpp" -#include "ngraph/node.hpp" #include "onnx_import/core/node.hpp" #include "openvino/frontend/exception.hpp" +#include "openvino/op/experimental_detectron_generate_proposals.hpp" + +using namespace ov::op; namespace ngraph { namespace onnx_import { namespace op { namespace set_1 { OutputVector experimental_detectron_generate_proposals(const Node& node) { - using GenerateProposalsSingleImage = ngraph::op::v6::ExperimentalDetectronGenerateProposalsSingleImage; + using GenerateProposalsSingleImage = v6::ExperimentalDetectronGenerateProposalsSingleImage; const auto inputs = node.get_ng_inputs(); FRONT_END_GENERAL_CHECK(inputs.size() == 4, diff --git a/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/experimental_detectron/generate_proposals_single_image.hpp b/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/experimental_detectron/generate_proposals_single_image.hpp index b17c944fe3def1..633f8810bb95b9 100644 --- a/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/experimental_detectron/generate_proposals_single_image.hpp +++ b/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/experimental_detectron/generate_proposals_single_image.hpp @@ -7,8 +7,6 @@ #include "openvino/core/deprecated.hpp" OPENVINO_SUPPRESS_DEPRECATED_START -#include "ngraph/node.hpp" -#include "ngraph/op/experimental_detectron_generate_proposals.hpp" #include "onnx_import/core/node.hpp" namespace ngraph { diff --git a/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/experimental_detectron/prior_grid_generator.cpp b/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/experimental_detectron/prior_grid_generator.cpp index 5540643cd65e18..97c66233104f77 100644 --- a/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/experimental_detectron/prior_grid_generator.cpp +++ b/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/experimental_detectron/prior_grid_generator.cpp @@ -4,16 +4,17 @@ #include "op/org.openvinotoolkit/experimental_detectron/prior_grid_generator.hpp" -#include "default_opset.hpp" -#include "ngraph/node.hpp" #include "onnx_import/core/node.hpp" +#include "openvino/op/experimental_detectron_prior_grid_generator.hpp" + +using namespace ov::op; namespace ngraph { namespace onnx_import { namespace op { namespace set_1 { OutputVector experimental_detectron_prior_grid_generator(const Node& node) { - using PriorGridGenerator = ngraph::op::v6::ExperimentalDetectronPriorGridGenerator; + using PriorGridGenerator = v6::ExperimentalDetectronPriorGridGenerator; auto inputs = node.get_ng_inputs(); auto priors = inputs[0]; diff --git a/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/experimental_detectron/prior_grid_generator.hpp b/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/experimental_detectron/prior_grid_generator.hpp index 2fdcf1be1034ea..caaefd7403d70a 100644 --- a/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/experimental_detectron/prior_grid_generator.hpp +++ b/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/experimental_detectron/prior_grid_generator.hpp @@ -7,8 +7,6 @@ #include "openvino/core/deprecated.hpp" OPENVINO_SUPPRESS_DEPRECATED_START -#include "ngraph/node.hpp" -#include "ngraph/op/experimental_detectron_prior_grid_generator.hpp" #include "onnx_import/core/node.hpp" namespace ngraph { diff --git a/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/experimental_detectron/roi_feature_extractor.cpp b/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/experimental_detectron/roi_feature_extractor.cpp index 3b4c97191c6ec8..0ea30feed1300f 100644 --- a/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/experimental_detectron/roi_feature_extractor.cpp +++ b/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/experimental_detectron/roi_feature_extractor.cpp @@ -4,16 +4,17 @@ #include "op/org.openvinotoolkit/experimental_detectron/roi_feature_extractor.hpp" -#include "default_opset.hpp" -#include "ngraph/node.hpp" #include "onnx_import/core/node.hpp" +#include "openvino/op/experimental_detectron_roi_feature.hpp" + +using namespace ov::op; namespace ngraph { namespace onnx_import { namespace op { namespace set_1 { OutputVector experimental_detectron_roi_feature_extractor(const Node& node) { - using ROIFeatureExtractor = ngraph::op::v6::ExperimentalDetectronROIFeatureExtractor; + using ROIFeatureExtractor = v6::ExperimentalDetectronROIFeatureExtractor; auto inputs = node.get_ng_inputs(); diff --git a/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/experimental_detectron/roi_feature_extractor.hpp b/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/experimental_detectron/roi_feature_extractor.hpp index c401ab6e74f976..eea0b4eee3b9f5 100644 --- a/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/experimental_detectron/roi_feature_extractor.hpp +++ b/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/experimental_detectron/roi_feature_extractor.hpp @@ -7,8 +7,6 @@ #include "openvino/core/deprecated.hpp" OPENVINO_SUPPRESS_DEPRECATED_START -#include "ngraph/node.hpp" -#include "ngraph/op/experimental_detectron_roi_feature.hpp" #include "onnx_import/core/node.hpp" namespace ngraph { diff --git a/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/experimental_detectron/topk_rios.cpp b/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/experimental_detectron/topk_rios.cpp index da1934de3e799c..68edddae2c25a7 100644 --- a/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/experimental_detectron/topk_rios.cpp +++ b/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/experimental_detectron/topk_rios.cpp @@ -4,16 +4,17 @@ #include "op/org.openvinotoolkit/experimental_detectron/topk_rios.hpp" -#include "default_opset.hpp" -#include "ngraph/node.hpp" #include "onnx_import/core/node.hpp" +#include "openvino/op/experimental_detectron_topkrois.hpp" + +using namespace ov::op; namespace ngraph { namespace onnx_import { namespace op { namespace set_1 { OutputVector experimental_detectron_topk_rois(const Node& node) { - using TopKROIs = ngraph::op::v6::ExperimentalDetectronTopKROIs; + using TopKROIs = v6::ExperimentalDetectronTopKROIs; auto inputs = node.get_ng_inputs(); auto input_rois = inputs[0]; diff --git a/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/experimental_detectron/topk_rios.hpp b/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/experimental_detectron/topk_rios.hpp index 41daf52b6ef447..038b54ac1eb4c1 100644 --- a/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/experimental_detectron/topk_rios.hpp +++ b/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/experimental_detectron/topk_rios.hpp @@ -7,8 +7,6 @@ #include "openvino/core/deprecated.hpp" OPENVINO_SUPPRESS_DEPRECATED_START -#include "ngraph/node.hpp" -#include "ngraph/op/experimental_detectron_topkrois.hpp" #include "onnx_import/core/node.hpp" namespace ngraph { From a4ce11d6d8424b2a37d55f39e3f34e58b6e5438c Mon Sep 17 00:00:00 2001 From: Jan Iwaszkiewicz Date: Thu, 11 Jan 2024 10:29:14 +0100 Subject: [PATCH 24/28] [PyOV] Add `get_then_body` to If operator (#22080) --- .../python/src/pyopenvino/graph/ops/if.cpp | 14 +++++++++++++ .../python/tests/test_graph/test_if.py | 20 +++++++++++++++++++ 2 files changed, 34 insertions(+) diff --git a/src/bindings/python/src/pyopenvino/graph/ops/if.cpp b/src/bindings/python/src/pyopenvino/graph/ops/if.cpp index 10453341e38657..62f30ff8b8d02c 100644 --- a/src/bindings/python/src/pyopenvino/graph/ops/if.cpp +++ b/src/bindings/python/src/pyopenvino/graph/ops/if.cpp @@ -49,6 +49,20 @@ void regclass_graph_op_If(py::module m) { :rtype: openvino.impl.op.If )"); + cls.def( + "get_then_body", + [](ov::op::v8::If& self) { + auto model = self.get_then_body(); + py::type model_class = py::module_::import("openvino.runtime").attr("Model"); + return model_class(py::cast(model)); + }, + R"( + Gets then_body as Model object. + + :return: then_body as Model object. + :rtype: openvino.Model + )"); + cls.def( "get_else_body", [](ov::op::v8::If& self) { diff --git a/src/bindings/python/tests/test_graph/test_if.py b/src/bindings/python/tests/test_graph/test_if.py index 8c26f83fd76135..77a59580af72bd 100644 --- a/src/bindings/python/tests/test_graph/test_if.py +++ b/src/bindings/python/tests/test_graph/test_if.py @@ -2,6 +2,7 @@ # Copyright (C) 2018-2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 +import pytest import numpy as np import openvino.runtime.opset8 as ov from openvino import Model @@ -178,6 +179,25 @@ def test_simple_if_without_body_parameters(): check_if(simple_if_without_parameters, False, ["Relu", 1, []]) +def check_if_getters(if_model, cond_val): + if_op = if_model(cond_val) + assert isinstance(if_op.get_then_body(), Model) + assert if_op.get_function(0)._get_raw_address() == if_op.get_then_body()._get_raw_address() + assert compare_models(if_op.get_function(0), if_op.get_then_body()) + + assert isinstance(if_op.get_else_body(), Model) + assert if_op.get_function(1)._get_raw_address() == if_op.get_else_body()._get_raw_address() + assert compare_models(if_op.get_function(1), if_op.get_else_body()) + + +@pytest.mark.parametrize(("cond_val"), [ + True, + False, +]) +def test_if_getters(cond_val): + check_if_getters(create_simple_if_with_two_outputs, cond_val) + + def test_simple_if_basic(): condition = ov.constant(True, dtype=bool) # then_body From 3f7efef099a18a9bc6075bd0f730492583e0275a Mon Sep 17 00:00:00 2001 From: PuQing Date: Thu, 11 Jan 2024 18:26:15 +0800 Subject: [PATCH 25/28] =?UTF-8?q?=E3=80=90Paddle=20Hackathon=205th=20No.93?= =?UTF-8?q?=E3=80=91add=20paddle=20pool3d=20op=20(#21536)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * add pool3d * add ut * use opset8 maxpool * fix name * add NDHWC support * fix return index * Refactor pool3d function to simplify output handling * Add adaptive pooling tests for 3D models --- src/frontends/paddle/src/op/pool3d.cpp | 328 ++++++++++++++++++ src/frontends/paddle/src/op_table.cpp | 4 + src/frontends/paddle/tests/op_fuzzy.cpp | 29 ++ .../gen_scripts/generate_pool3d.py | 324 +++++++++++++++++ 4 files changed, 685 insertions(+) create mode 100644 src/frontends/paddle/src/op/pool3d.cpp create mode 100644 src/frontends/paddle/tests/test_models/gen_scripts/generate_pool3d.py diff --git a/src/frontends/paddle/src/op/pool3d.cpp b/src/frontends/paddle/src/op/pool3d.cpp new file mode 100644 index 00000000000000..8cde73fa23e911 --- /dev/null +++ b/src/frontends/paddle/src/op/pool3d.cpp @@ -0,0 +1,328 @@ +//***************************************************************************** +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// +//***************************************************************************** + +#include + +#include "default_opset.hpp" +#include "openvino/frontend/paddle/node_context.hpp" + +namespace ov { +namespace frontend { +namespace paddle { +namespace op { +// helper func - get pad_begin and pad_end +static void get_paddings(const NodeContext& node, + ov::Shape& pad_begin, + ov::Shape& pad_end, + ov::op::PadType& auto_pad, + std::string& data_format) { + if (node.has_attribute("padding_algorithm")) { + auto pad_algo = node.get_attribute("padding_algorithm"); + if (pad_algo == "SAME") { + auto_pad = ov::op::PadType::SAME_UPPER; + } else if (pad_algo == "VALID") { + auto_pad = ov::op::PadType::VALID; + } else if (pad_algo == "EXPLICIT") { + auto_pad = ov::op::PadType::EXPLICIT; + } else { + throw std::runtime_error("Unsupported pooling padding_algorithm " + pad_algo); + } + } else { + // adaptive_maxpool with no such attr. + auto_pad = ov::op::PadType::EXPLICIT; + } + + /*If pool padding size is a tuple or list, it could be in three forms: + [pad_depth, pad_height, pad_width] or [pad_depth_front, pad_depth_back, + pad_height_top, pad_height_bottom, pad_width_left, pad_width_right], + and when data_format is “NCDHW”, pool_padding can + be in the form [[0,0], [0,0], [pad_depth_front, pad_depth_back], [pad_height_top, + pad_height_bottom], [pad_width_left, pad_width_right]]. when + data_format is “NDHWC”, pool_padding can be in the form + [[0,0], [pad_depth_front, pad_depth_back], [pad_height_top, + pad_height_bottom], [pad_width_left, pad_width_right], [0,0]]. + Otherwise, the pool padding size will be a square of an int.*/ + auto paddings = node.get_attribute>("paddings"); + + switch (paddings.size()) { + case 3: + pad_begin = + Shape{static_cast(paddings[0]), static_cast(paddings[1]), static_cast(paddings[2])}; + pad_end = pad_begin; + break; + case 6: + pad_begin = + Shape{static_cast(paddings[0]), static_cast(paddings[2]), static_cast(paddings[4])}; + pad_end = Shape{ + static_cast(paddings[1]), + static_cast(paddings[3]), + static_cast(paddings[5]), + }; + break; + default: + throw std::runtime_error("Unsupported pooling paddings " + std::to_string(paddings.size())); + } +} + +NamedOutputs pool3d(const NodeContext& node) { + auto data = node.get_input("X"); + + auto pooling_type = node.get_attribute("pooling_type", {}); + auto global_pooling = node.get_attribute("global_pooling"); + auto adaptive = node.get_attribute("adaptive"); + auto kernel_shape = node.get_attribute>("ksize"); + + auto rounding_type = + node.get_attribute("ceil_mode", false) ? ov::op::RoundingType::CEIL : ov::op::RoundingType::FLOOR; + + if (pooling_type.empty()) { + pooling_type = "max"; + } + + PADDLE_OP_CHECK(node, (pooling_type == "max") || (pooling_type == "avg"), "pool3d: not supported pooling type !"); + PADDLE_OP_CHECK(node, kernel_shape.size() == 1 || kernel_shape.size() == 3, "pool3d: ksize must be 1 or 3!"); + + PartialShape input_shape = data.get_partial_shape(); + + int32_t input_rank = static_cast(input_shape.rank().get_length()); + PADDLE_OP_CHECK(node, input_rank >= 2, "input tensor rank must be greater than 2"); + + auto auto_pad = ov::op::PadType::EXPLICIT; + ov::Shape pad_begin, pad_end; + std::string data_format = node.get_attribute("data_format", "NCDHW"); + + get_paddings(node, pad_begin, pad_end, auto_pad, data_format); + + if (data_format == "NDHWC") { + data = std::make_shared( + data, + std::make_shared(ov::element::i64, Shape{5}, std::vector{0, 4, 1, 2, 3})); + input_shape = data.get_partial_shape(); + } + + std::vector> pool_outputs; + if (global_pooling || (adaptive && std::any_of(kernel_shape.begin(), kernel_shape.end(), [](int32_t i) { + return i == 1; + }))) { + if (pooling_type == "max") { + auto axes = default_opset::Constant::create(ov::element::i64, + {3}, + {input_rank - 3, input_rank - 2, input_rank - 1}); + pool_outputs = std::make_shared(data, axes, true)->outputs(); + } else { + auto axes = default_opset::Constant::create(ov::element::i64, + {3}, + {input_rank - 3, input_rank - 2, input_rank - 1}); + pool_outputs = std::make_shared(data, axes, true)->outputs(); + } + } else if (adaptive) { + auto pool_size = std::vector(3, 0); + + if (kernel_shape.size() == 1) { + // Not tested: implemented according to spec, but can't generate real + // model to test + pool_size[0] = pool_size[1] = pool_size[2] = kernel_shape[0]; + } else { + pool_size[0] = kernel_shape[0]; + pool_size[1] = kernel_shape[1]; + pool_size[2] = kernel_shape[2]; + } + + const Output output_shape = + default_opset::Constant::create(ov::element::i64, {pool_size.size()}, pool_size); + + if (pooling_type == "max") { + pool_outputs = + std::make_shared(data, output_shape, ov::element::i32)->outputs(); + } else { + pool_outputs = std::make_shared(data, output_shape)->outputs(); + } + } else { + auto strides = node.get_attribute>("strides"); + + size_t kernel_d, kernel_h, kernel_w; + if (kernel_shape.size() == 1) { + // Not tested: implemented according to spec, but can't generate real + // model to test + kernel_d = kernel_h = kernel_w = kernel_shape[0]; + } else { + kernel_d = kernel_shape[0]; + kernel_h = kernel_shape[1]; + kernel_w = kernel_shape[2]; + } + + PADDLE_OP_CHECK(node, + kernel_d > 0 && kernel_h > 0 && kernel_w > 0, + "pool3d kernel shape must be greater than 0"); + + // Note: this shape check is only valid when the spatial dim of input_shape + // is static. + if (input_shape[2].is_static() && input_shape[3].is_static() && input_shape[4].is_static()) { + uint64_t input_d = input_shape[input_rank - 3].get_length(); + uint64_t input_h = input_shape[input_rank - 2].get_length(); + uint64_t input_w = input_shape[input_rank - 1].get_length(); + if ((input_d > 0) && (input_d + pad_begin[0] + pad_end[0] < kernel_d)) { + kernel_d = input_d + pad_begin[0] + pad_end[0]; + } + if ((input_h > 0) && (input_h + pad_begin[1] + pad_end[1] < kernel_h)) { + kernel_h = input_h + pad_begin[1] + pad_end[1]; + } + if ((input_w > 0) && (input_w + pad_begin[2] + pad_end[2] < kernel_w)) { + kernel_w = input_w + pad_begin[2] + pad_end[2]; + } + } + + if (pooling_type == "max") { + pool_outputs = std::make_shared(data, + ov::Strides(strides.begin(), strides.end()), + ov::Strides{1, 1, 1}, + pad_begin, + pad_end, + ov::Shape{kernel_d, kernel_h, kernel_w}, + rounding_type, + auto_pad, + ov::element::i32, + 2) + ->outputs(); + } else { + bool exclude_pad = node.get_attribute("exclusive", false); + pool_outputs = std::make_shared(data, + ov::Strides(strides.begin(), strides.end()), + pad_begin, + pad_end, + ov::Shape{kernel_d, kernel_h, kernel_w}, + exclude_pad, + rounding_type, + auto_pad) + ->outputs(); + } + } + + if (data_format == "NDHWC") { + pool_outputs[0] = std::make_shared( + pool_outputs[0], + std::make_shared(ov::element::i64, Shape{5}, std::vector{0, 2, 3, 4, 1})); + } + + return NamedOutputs{{"Out", {pool_outputs[0]}}}; +} + +NamedOutputs pool3d_with_index(const NodeContext& node) { + auto data = node.get_input("X"); + auto pooling_type = node.get_attribute("pooling_type", {}); + auto adaptive = node.get_attribute("adaptive"); + auto kernel_shape = node.get_attribute>("ksize"); + + auto rounding_type = + node.get_attribute("ceil_mode", false) ? ov::op::RoundingType::CEIL : ov::op::RoundingType::FLOOR; + + if (pooling_type.empty()) { + pooling_type = "max"; + } + + PADDLE_OP_CHECK(node, (pooling_type == "max") || (pooling_type == "avg"), "pool3d: not supported pooling type !"); + PADDLE_OP_CHECK(node, kernel_shape.size() == 1 || kernel_shape.size() == 3, "pool3d: ksize must be 1 or 3!"); + + PartialShape input_shape = data.get_partial_shape(); + + int32_t input_rank = static_cast(input_shape.rank().get_length()); + PADDLE_OP_CHECK(node, input_rank >= 2, "input tensor rank must be greater than 2"); + + auto auto_pad = ov::op::PadType::EXPLICIT; + ov::Shape pad_begin, pad_end; + std::string data_format = node.get_attribute("data_format", "NCDHW"); + + get_paddings(node, pad_begin, pad_end, auto_pad, data_format); + + if (data_format == "NDHWC") { + data = std::make_shared( + data, + std::make_shared(ov::element::i64, Shape{5}, std::vector{0, 4, 1, 2, 3})); + input_shape = data.get_partial_shape(); + } + + std::vector> pool_outputs; + if (adaptive) { + auto pool_size = std::vector(3, 0); + + if (kernel_shape.size() == 1) { + // Not tested: implemented according to spec, but can't generate real + // model to test + pool_size[0] = pool_size[1] = pool_size[2] = kernel_shape[0]; + } else { + pool_size[0] = kernel_shape[0]; + pool_size[1] = kernel_shape[1]; + pool_size[2] = kernel_shape[2]; + } + + const Output output_shape = + default_opset::Constant::create(ov::element::i64, {pool_size.size()}, pool_size); + + pool_outputs = + std::make_shared(data, output_shape, ov::element::i32)->outputs(); + + } else { + auto strides = node.get_attribute>("strides"); + + size_t kernel_d, kernel_h, kernel_w; + if (kernel_shape.size() == 1) { + // Not tested: implemented according to spec, but can't generate real + // model to test + kernel_d = kernel_h = kernel_w = kernel_shape[0]; + } else { + kernel_d = kernel_shape[0]; + kernel_h = kernel_shape[1]; + kernel_w = kernel_shape[2]; + } + + PADDLE_OP_CHECK(node, + kernel_d > 0 && kernel_h > 0 && kernel_w > 0, + "pool3d kernel shape must be greater than 0"); + + // Note: this shape check is only valid when the spatial dim of input_shape + // is static. + if (input_shape[2].is_static() && input_shape[3].is_static() && input_shape[4].is_static()) { + uint64_t input_d = input_shape[input_rank - 3].get_length(); + uint64_t input_h = input_shape[input_rank - 2].get_length(); + uint64_t input_w = input_shape[input_rank - 1].get_length(); + if ((input_d > 0) && (input_d + pad_begin[0] + pad_end[0] < kernel_d)) { + kernel_d = input_d + pad_begin[0] + pad_end[0]; + } + if ((input_h > 0) && (input_h + pad_begin[1] + pad_end[1] < kernel_h)) { + kernel_h = input_h + pad_begin[1] + pad_end[1]; + } + if ((input_w > 0) && (input_w + pad_begin[2] + pad_end[2] < kernel_w)) { + kernel_w = input_w + pad_begin[2] + pad_end[2]; + } + } + + pool_outputs = std::make_shared(data, + ov::Strides(strides.begin(), strides.end()), + ov::Strides{1, 1, 1}, + pad_begin, + pad_end, + ov::Shape{kernel_d, kernel_h, kernel_w}, + rounding_type, + auto_pad, + ov::element::i32, + 2) + ->outputs(); + } + + if (data_format == "NDHWC") { + pool_outputs[0] = std::make_shared( + pool_outputs[0], + std::make_shared(ov::element::i64, Shape{5}, std::vector{0, 2, 3, 4, 1})); + } + + auto output_name = node.get_output_names(); + return NamedOutputs{{"Out", {pool_outputs[0]}}, {"Mask", {pool_outputs[1]}}}; +} + +} // namespace op +} // namespace paddle +} // namespace frontend +} // namespace ov diff --git a/src/frontends/paddle/src/op_table.cpp b/src/frontends/paddle/src/op_table.cpp index f39b5f63128a76..c2646dae43ce6a 100644 --- a/src/frontends/paddle/src/op_table.cpp +++ b/src/frontends/paddle/src/op_table.cpp @@ -81,6 +81,8 @@ OP_CONVERTER(partial_concat); OP_CONVERTER(partial_sum); OP_CONVERTER(pow); OP_CONVERTER(pool2d); +OP_CONVERTER(pool3d); +OP_CONVERTER(pool3d_with_index); OP_CONVERTER(prior_box); OP_CONVERTER(quantize_linear); OP_CONVERTER(range); @@ -201,6 +203,7 @@ std::map get_supported_ops() { {"matmul", op::matmul}, {"matmul_v2", op::matmul_v2}, {"max_pool2d_with_index", op::pool2d}, + {"max_pool3d_with_index", op::pool3d_with_index}, {"matrix_nms", op::matrix_nms}, {"meshgrid", op::meshgrid}, {"multiclass_nms3", op::multiclass_nms}, @@ -214,6 +217,7 @@ std::map get_supported_ops() { {"partial_sum", op::partial_sum}, {"pow", op::pow}, {"pool2d", op::pool2d}, + {"pool3d", op::pool3d}, {"prior_box", op::prior_box}, {"quantize_linear", op::quantize_linear}, {"range", op::range}, diff --git a/src/frontends/paddle/tests/op_fuzzy.cpp b/src/frontends/paddle/tests/op_fuzzy.cpp index 13bcacb8e319c5..acc9ade78c0f18 100644 --- a/src/frontends/paddle/tests/op_fuzzy.cpp +++ b/src/frontends/paddle/tests/op_fuzzy.cpp @@ -34,6 +34,20 @@ static const std::vector models{ std::string("avgPool_test7"), std::string("avgPool_test8"), std::string("avgPool_test9"), + std::string("avgAdaptivePool3D_test1"), + std::string("avgAdaptivePool3D_test2"), + std::string("avgAdaptivePool3D_test3"), + std::string("avgAdaptivePool3D_test4"), + std::string("avg3dPool_test1"), + std::string("avg3dPool_test2"), + std::string("avg3dPool_test3"), + std::string("avg3dPool_test4"), + std::string("avg3dPool_test5"), + std::string("avg3dPool_test6"), + std::string("avg3dPool_test7"), + std::string("avg3dPool_test8"), + std::string("avg3dPool_test9"), + std::string("avg3dPool_test10"), std::string("batch_norm_nchw/batch_norm_nchw.pdmodel"), std::string("batch_norm_nhwc/batch_norm_nhwc.pdmodel"), std::string("bicubic_downsample_false_0/bicubic_downsample_false_0.pdmodel"), @@ -330,6 +344,21 @@ static const std::vector models{ std::string("maxPool_test7"), std::string("maxPool_test8"), std::string("maxPool_test9"), + std::string("maxAdaptivePool3D_test1"), + std::string("maxAdaptivePool3D_test2"), + std::string("maxAdaptivePool3D_test3"), + std::string("maxAdaptivePool3D_test4"), + std::string("max3dPool_test1"), + std::string("max3dPool_test2"), + std::string("max3dPool_test3"), + std::string("max3dPool_test4"), + std::string("max3dPool_test5"), + std::string("max3dPool_test6"), + std::string("max3dPool_test7"), + std::string("max3dPool_test8"), + std::string("max3dPool_test9"), + std::string("max3dPool_test10"), + std::string("max3dRetureMask"), std::string("meshgrid/meshgrid.pdmodel"), std::string("multiclass_nms_by_background"), std::string("multiclass_nms_by_class_id"), diff --git a/src/frontends/paddle/tests/test_models/gen_scripts/generate_pool3d.py b/src/frontends/paddle/tests/test_models/gen_scripts/generate_pool3d.py new file mode 100644 index 00000000000000..7d8c621f9ee2de --- /dev/null +++ b/src/frontends/paddle/tests/test_models/gen_scripts/generate_pool3d.py @@ -0,0 +1,324 @@ +# Copyright (C) 2018-2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +# +# pool3d paddle model generator +# +import numpy as np +import sys +from save_model import saveModel + +data_type = "float32" + + +def pool3d(name: str, x, attrs: dict): + import paddle + + paddle.enable_static() + + with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): + node_x = paddle.static.data(name="x", shape=x.shape, dtype=data_type) + if attrs["pool_type"] == "max": + out = paddle.nn.functional.max_pool3d( + node_x, + kernel_size=attrs["pool_size"], + stride=attrs["pool_stride"], + padding=attrs["pool_padding"], + ceil_mode=attrs["ceil_mode"], + data_format=attrs["data_format"], + return_mask=attrs["return_mask"], + ) + else: + out = paddle.nn.functional.avg_pool3d( + node_x, + kernel_size=attrs["pool_size"], + stride=attrs["pool_stride"], + padding=attrs["pool_padding"], + ceil_mode=attrs["ceil_mode"], + data_format=attrs["data_format"], + ) + + cpu = paddle.static.cpu_places(1) + exe = paddle.static.Executor(cpu[0]) + # startup program will call initializer to initialize the parameters. + exe.run(paddle.static.default_startup_program()) + if attrs["return_mask"]: + outs = exe.run(feed={"x": x}, fetch_list=[out[0], out[1]]) + saveModel( + name, + exe, + feedkeys=["x"], + fetchlist=[out[0], out[1]], + inputs=[x], + outputs=[outs[0], outs[1]], + target_dir=sys.argv[1], + ) + else: + outs = exe.run(feed={"x": x}, fetch_list=[out]) + saveModel( + name, + exe, + feedkeys=["x"], + fetchlist=[out], + inputs=[x], + outputs=[outs[0]], + target_dir=sys.argv[1], + ) + + return outs[0] + + +def adaptive_pool3d(name: str, x, attrs: dict): + import paddle + + paddle.enable_static() + + with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): + node_x = paddle.static.data(name="x", shape=x.shape, dtype=data_type) + if attrs["pool_type"] == "max": + out = paddle.nn.functional.adaptive_max_pool3d( + x=node_x, + output_size=attrs["pool_size"], + return_mask=attrs["return_mask"], + ) + else: + out = paddle.nn.functional.adaptive_avg_pool3d( + x=node_x, output_size=attrs["pool_size"] + ) + + cpu = paddle.static.cpu_places(1) + exe = paddle.static.Executor(cpu[0]) + # startup program will call initializer to initialize the parameters. + exe.run(paddle.static.default_startup_program()) + if attrs["return_mask"]: + outs = exe.run(feed={"x": x}, fetch_list=[out[0], out[1]]) + saveModel( + name, + exe, + feedkeys=["x"], + fetchlist=[out[0], out[1]], + inputs=[x], + outputs=[outs[0], outs[1]], + target_dir=sys.argv[1], + ) + else: + outs = exe.run(feed={"x": x}, fetch_list=[out]) + + saveModel( + name, + exe, + feedkeys=["x"], + fetchlist=[out], + inputs=[x], + outputs=[outs[0]], + target_dir=sys.argv[1], + ) + + return outs[0] + + +def main(): + N, C, D, H, W = 2, 3, 4, 4, 4 + data = np.arange(N * C * D * H * W).astype(data_type) + data_NCDHW = data.reshape(N, C, D, H, W) + data_NDHWC = data.reshape(N, D, H, W, C) + + pooling_types = ["max", "avg"] + + for i, pooling_type in enumerate(pooling_types): + # example 1: + # ceil_mode = False + paddle_attrs = { + # input=data_NCDHW, # shape: [2, 3, 4, 4, 4] + "pool_size": [3, 3, 3], + "pool_type": pooling_type, + "pool_stride": [3, 3, 3], + "pool_padding": [ + 1, + 2, + 1, + ], # it is same as pool_padding = [1, 1, 2, 2, 1, 1] + "ceil_mode": False, + "exclusive": True, + "return_mask": False, + "data_format": "NCDHW", + } + pool3d(pooling_type + "3d" + "Pool_test1", data_NCDHW, paddle_attrs) + + # example 2: + # ceil_mode = True (different from example 1) + paddle_attrs = { + # input=data_NCDHW, + "pool_size": [3, 3, 3], + "pool_type": pooling_type, + "pool_stride": [3, 3, 3], + "pool_padding": [ + [0, 0], + [0, 0], + [1, 1], + [2, 2], + [1, 1], + ], # it is same as pool_padding = [1, 1, 2, 2, 1, 1] + "ceil_mode": True, + "exclusive": True, + "return_mask": False, + "data_format": "NCDHW", + } + pool3d(pooling_type + "3d" + "Pool_test2", data_NCDHW, paddle_attrs) + + # example 3: + # pool_padding = "SAME" (different from example 1) + paddle_attrs = { + # input=data_NCDHW, + "pool_size": [3, 3, 3], + "pool_type": pooling_type, + "pool_stride": [3, 3, 3], + "pool_padding": "SAME", + "ceil_mode": False, + "exclusive": True, + "return_mask": False, + "data_format": "NCDHW", + } + pool3d(pooling_type + "3d" + "Pool_test3", data_NCDHW, paddle_attrs) + + # example 4: + # pool_padding = "VALID" (different from example 1) + paddle_attrs = { + # input=data_NCDHW, + "pool_size": [3, 3, 3], + "pool_type": pooling_type, + "pool_stride": [3, 3, 3], + "pool_padding": "VALID", + "ceil_mode": False, + "exclusive": True, + "return_mask": False, + "data_format": "NCDHW", + } + pool3d(pooling_type + "3d" + "Pool_test4", data_NCDHW, paddle_attrs) + + # example 5: + # data_format = "NDHWC" (different from example 1) + paddle_attrs = { + # input=data_NDHWC, # shape: [2, 4, 4, 4, 3] + "pool_size": [3, 3, 3], + "pool_type": pooling_type, + "pool_stride": [3, 3, 3], + "pool_padding": [1, 2, 1], + "ceil_mode": False, + "exclusive": True, + "return_mask": False, + "data_format": "NDHWC", + } + # NOT support data_format = "NDHWC" now + pool3d(pooling_type + "3d" + "Pool_test5", data_NDHWC, paddle_attrs) + + # example 6: + # pool_padding size is 1 + paddle_attrs = { + "pool_size": [3, 3, 3], + "pool_type": pooling_type, + "pool_stride": [3, 3, 3], + "pool_padding": 2, + "ceil_mode": False, + "exclusive": True, + "return_mask": False, + "data_format": "NCDHW", + } + pool3d(pooling_type + "3d" + "Pool_test6", data_NCDHW, paddle_attrs) + + # input data for test7 and test8 + N_data1, C_data1, D_data1, H_data1, W_data1 = 2, 3, 8, 8, 8 + data1 = np.arange(N_data1 * C_data1 * D_data1 * H_data1 * W_data1).astype( + data_type + ) + data1_NCDHW = data1.reshape(N_data1, C_data1, D_data1, H_data1, W_data1) + data1_NDHWC = data1.reshape(N_data1, D_data1, H_data1, W_data1, C_data1) + # example 7: + # pool_padding size is 6: [pad_depth_front, pad_depth_back, pad_height_top, pad_height_bottom, pad_width_left, pad_width_right] + paddle_attrs = { + "pool_size": [3, 3, 3], + "pool_type": pooling_type, + "pool_stride": [3, 3, 3], + "pool_padding": [1, 2, 1, 1, 2, 1], + "ceil_mode": False, + "exclusive": True, + "return_mask": False, + "data_format": "NCDHW", + } + pool3d(pooling_type + "3d" + "Pool_test7", data1_NCDHW, paddle_attrs) + + # example 8: + paddle_attrs = { + "pool_size": [3, 3, 3], + "pool_type": pooling_type, + "pool_stride": [3, 3, 3], + "pool_padding": [[0, 0], [0, 0], [1, 2], [2, 1], [2, 1]], + "ceil_mode": False, + "exclusive": True, + "return_mask": False, + "data_format": "NCDHW", + } + pool3d(pooling_type + "3d" + "Pool_test8", data1_NCDHW, paddle_attrs) + + # example 9: + paddle_attrs = { + "pool_size": 9, + "pool_type": pooling_type, + "pool_stride": [3, 3, 3], + "pool_padding": [[0, 0], [0, 0], [2, 1], [1, 2], [1, 2]], + "ceil_mode": False, + "exclusive": True, + "return_mask": False, + "data_format": "NCDHW", + } + pool3d(pooling_type + "3d" + "Pool_test9", data1_NCDHW, paddle_attrs) + + # example 10: + paddle_attrs = { + "pool_size": 9, + "pool_type": pooling_type, + "pool_stride": 3, + "pool_padding": [[0, 0], [2, 2], [1, 2], [2, 2], [0, 0]], + "ceil_mode": False, + "return_mask": False, + "data_format": "NDHWC", + } + pool3d(pooling_type + "3d" + "Pool_test10", data1_NDHWC, paddle_attrs) + + paddle_attrs = { + "pool_size": 9, + "pool_type": "max", + "pool_stride": 3, + "pool_padding": [3, 3, 3], + "ceil_mode": False, + "return_mask": True, + "data_format": "NCDHW", + } + pool3d("max3dRetureMask", data_NCDHW, paddle_attrs) + + # adaptive_pool3 + for i, pooling_type in enumerate(pooling_types): + paddle_attrs = { + "pool_size": [2, 2, 2], + "pool_type": pooling_type, + "return_mask": False, + } + adaptive_pool3d(pooling_type + "AdaptivePool3D_test1", data_NCDHW, paddle_attrs) + paddle_attrs = {"pool_size": 2, "pool_type": pooling_type, "return_mask": False} + adaptive_pool3d(pooling_type + "AdaptivePool3D_test2", data_NCDHW, paddle_attrs) + paddle_attrs = { + "pool_size": 1, # global pooling case + "pool_type": pooling_type, + "return_mask": False, + } + adaptive_pool3d(pooling_type + "AdaptivePool3D_test3", data_NCDHW, paddle_attrs) + paddle_attrs = { + "pool_size": 1, # global pooling case + "pool_type": pooling_type, + "return_mask": True, + } + adaptive_pool3d(pooling_type + "AdaptivePool3D_test4", data_NCDHW, paddle_attrs) + + +if __name__ == "__main__": + main() From 8988631e0a18e00f1f8ac1c3f43e549ce8c9daff Mon Sep 17 00:00:00 2001 From: River Li Date: Thu, 11 Jan 2024 18:33:39 +0800 Subject: [PATCH 26/28] [CAPI] avoid random timeout issue in test cases (#22056) --- src/bindings/c/tests/ov_infer_request_test.cpp | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/src/bindings/c/tests/ov_infer_request_test.cpp b/src/bindings/c/tests/ov_infer_request_test.cpp index 620a7735868787..1f609bb828cb24 100644 --- a/src/bindings/c/tests/ov_infer_request_test.cpp +++ b/src/bindings/c/tests/ov_infer_request_test.cpp @@ -335,8 +335,14 @@ TEST_P(ov_infer_request_test, infer_async_wait_for) { OV_ASSERT_OK(ov_infer_request_start_async(infer_request)); if (!HasFatalFailure()) { - OV_EXPECT_OK(ov_infer_request_wait_for(infer_request, 10)); - + ov_status_e ret = ov_status_e::OK; + EXPECT_NO_THROW(ret = ov_infer_request_wait_for(infer_request, 10)); + size_t max_times = 10; + // Random timeout in some platform, increase wait() times if timeout occurr. + while (ret != ov_status_e::OK && max_times-- > 0) { + EXPECT_NO_THROW(ret = ov_infer_request_wait_for(infer_request, 10)); + } + EXPECT_EQ(ret, ov_status_e::OK); OV_EXPECT_OK(ov_infer_request_get_output_tensor_by_index(infer_request, 0, &output_tensor)); EXPECT_NE(nullptr, output_tensor); } From 0535f7edf39b1186b6381066646844fed22e8de5 Mon Sep 17 00:00:00 2001 From: "mei, yang" Date: Thu, 11 Jan 2024 18:54:00 +0800 Subject: [PATCH 27/28] collect inplace consumers with LOOK_UP case (#21829) * collect inplace consumers with LOOK_UP case * fix smoke_ShapeOf_no_Inplace_Conflicts issue * update collectConsumers * fix issue * Add cpu unit test case to check ResolveEdgeConflicts() * update variable name * update according to comment * simplify code * update --- src/plugins/intel_cpu/src/edge.cpp | 37 ++++++-- .../intel_cpu/tests/unit/graph/dummy_node.hpp | 12 +-- .../graph/resolve_edge_conflicts_test.cpp | 86 +++++++++++++++++++ 3 files changed, 123 insertions(+), 12 deletions(-) create mode 100644 src/plugins/intel_cpu/tests/unit/graph/resolve_edge_conflicts_test.cpp diff --git a/src/plugins/intel_cpu/src/edge.cpp b/src/plugins/intel_cpu/src/edge.cpp index 090032f8e5ef28..b9d262fcc77281 100644 --- a/src/plugins/intel_cpu/src/edge.cpp +++ b/src/plugins/intel_cpu/src/edge.cpp @@ -52,22 +52,45 @@ bool Edge::isDropped() const { } void Edge::collectConsumers(std::vector& result) const { - if (!this->getChild()->getChildEdges().empty() && this->inPlace(LOOK_DOWN)) { - if (auto peerChildSPD = this->getChild()->getSelectedPrimitiveDescriptor()) { + auto add_result_node = [](std::vector& result, const NodePtr& node) -> bool { + if (Type::ShapeOf == node->getType()) { + // ShapeOf doesn't actually read the data, it only reads shape + return false; + } + result.push_back(node); + return true; + }; + auto childNode = this->getChild(); + if (childNode->getChildEdges().empty()) { + add_result_node(result, childNode); + return; + } + + if (this->inPlace(LOOK_DOWN)) { + if (auto peerChildSPD = childNode->getSelectedPrimitiveDescriptor()) { auto peerOutputNum = this->getOutputNum(); auto peerInPlacePort = peerChildSPD->getConfig().inConfs[peerOutputNum].inPlace(); - auto& vecChildEdges = this->getChild()->getChildEdgesAtPort(peerInPlacePort); + auto& vecChildEdges = childNode->getChildEdgesAtPort(peerInPlacePort); for (auto childEdge : vecChildEdges) { childEdge->collectConsumers(result); } } } else { - auto childNode = this->getChild(); - if (Type::ShapeOf == childNode->getType()) { - // ShapeOf doesn't actually read the data, it only reads shape + if (!add_result_node(result, childNode)) return; + + // collect consumers in case of an upstream in-place memory reference + if (auto peerChildSPD = childNode->getSelectedPrimitiveDescriptor()) { + auto&& conf = peerChildSPD->getConfig(); + for (size_t i = 0; i < conf.outConfs.size(); i++) { + const auto peerOutInPlacePort = conf.outConfs[i].inPlace(); + if (peerOutInPlacePort == this->getOutputNum()) { + for (auto&& childEdge : childNode->getChildEdgesAtPort(i)) { + childEdge->collectConsumers(result); + } + } + } } - result.push_back(childNode); } } diff --git a/src/plugins/intel_cpu/tests/unit/graph/dummy_node.hpp b/src/plugins/intel_cpu/tests/unit/graph/dummy_node.hpp index cae50293028119..7ffa5c9279ddaf 100644 --- a/src/plugins/intel_cpu/tests/unit/graph/dummy_node.hpp +++ b/src/plugins/intel_cpu/tests/unit/graph/dummy_node.hpp @@ -21,7 +21,7 @@ class DummyNode : public Node { const std::string& type, const GraphContext::CPtr context, LayoutType layout = LayoutType::ncsp, - Edge::LOOK in_place_direction = Edge::LOOK::LOOK_UP, + int in_place_direction = Edge::LOOK::LOOK_UP, bool is_executable = false) : Node(type, name, context), m_layout(layout), m_inplace(in_place_direction), m_is_executable(is_executable) { // dummy node of the same shape and precision to both input and output. @@ -46,9 +46,11 @@ class DummyNode : public Node { config.inConfs.resize(1); config.outConfs.resize(1); - config.inConfs[0].inPlace(m_inplace & Edge::LOOK::LOOK_DOWN ? 0 : -1); + config.inConfs[0].inPlace(m_inplace == static_cast(Edge::LOOK::LOOK_DOWN) || + m_inplace == static_cast(Edge::LOOK::LOOK_BOTH)? 0 : -1); config.inConfs[0].constant(false); - config.outConfs[0].inPlace(m_inplace & Edge::LOOK::LOOK_UP ? 0 : -1); + config.outConfs[0].inPlace(m_inplace == static_cast(Edge::LOOK::LOOK_UP) || + m_inplace == static_cast(Edge::LOOK::LOOK_BOTH) ? 0 : -1); config.outConfs[0].constant(false); auto layoutCreator = BlockedDescCreator::getCommonCreators().at(m_layout); @@ -76,9 +78,9 @@ class DummyNode : public Node { private: LayoutType m_layout = LayoutType::ncsp; - Edge::LOOK m_inplace = Edge::LOOK::LOOK_UP; + int m_inplace = Edge::LOOK::LOOK_UP; bool m_is_executable = false; }; } // namespace cpu_unit_test } // namespace intel_cpu -} // namespace ov \ No newline at end of file +} // namespace ov diff --git a/src/plugins/intel_cpu/tests/unit/graph/resolve_edge_conflicts_test.cpp b/src/plugins/intel_cpu/tests/unit/graph/resolve_edge_conflicts_test.cpp new file mode 100644 index 00000000000000..2c048e5e13b0e0 --- /dev/null +++ b/src/plugins/intel_cpu/tests/unit/graph/resolve_edge_conflicts_test.cpp @@ -0,0 +1,86 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// +#include + +#include "dummy_node.hpp" +#include "nodes/input.h" +#include "nodes/concat.h" + +#include "ov_models/builders.hpp" + +using namespace ov::intel_cpu; + +/* + * Test the CPU plugin-in edge method ResolveEdgeConflicts(). + * This case is to check the capability of graph to resolve complex inplace conflicts + */ + +TEST(ResolveEdgeConflictsCPUTest, smoke_Run_ResolveEdgeConflicts) { + /* create graph: + Input + / \ + Dummy1 Dummy2 <*NOTE: unexcutable fake node with inplace from upstream*> + | | + | Dummy3 <*NOTE: excutable fake node with inplace from upstream*> + | | + | Dummy4 <*NOTE: fake node can not be inplace*> + \ / + Concat + | + Output + + Dummy1, Dummy2 and Dummy3 can be inplace. In ResolveEdgeConflicts(), detect Dummy3 is + a modifying node. Collect consumers of edge Input->Dummy1 and find consumer execution + order is after Dummy3. Then insert Reorder in edge Input->Dummy2. + */ + Config conf; + conf.rtCacheCapacity = 100; + auto context = std::make_shared(conf, nullptr, nullptr, false); + const dnnl::engine cpuEngine = context->getEngine(); + + std::unique_ptr graph = std::unique_ptr(new Graph()); + + const ov::element::Type_t testPrec = ov::element::Type_t::f32; + const ov::Shape testShape{2, 1}; + + ov::ParameterVector params{std::make_shared(testPrec, testShape)}; + auto concat = std::make_shared(ov::OutputVector{params[0], params[0]}, 1); + ov::ResultVector results{std::make_shared(concat)}; + auto inputNode = std::make_shared(params[0], context); + auto outputNode = std::make_shared(results[0], context); + auto concatNode = std::make_shared(concat, context); + auto dummyNode1 = std::make_shared( + testShape, testPrec, "Dummy1", "DummyNode", context); + auto dummyNode2 = std::make_shared( + testShape, testPrec, "Dummy2", "DummyNode", context); + auto dummyNode3 = std::make_shared( + testShape, testPrec, "Dummy3", "DummyNode", context, LayoutType::ncsp, Edge::LOOK::LOOK_UP, true); + auto dummyNode4 = std::make_shared( + testShape, testPrec, "Dummy4", "DummyNode", context, LayoutType::ncsp, 0, true); + + std::vector graphNodes; + std::vector graphEdges; + + std::unordered_set nodesSet; + auto addEdge = [&](const NodePtr& parent, const NodePtr& child, size_t parentPort, size_t childPort) -> void { + auto edge = std::make_shared(parent, child, parentPort, childPort); + child->addEdge(edge); + graphEdges.push_back(edge); + nodesSet.insert(parent); + nodesSet.insert(child); + }; + addEdge(inputNode, dummyNode2, 0, 0); + addEdge(dummyNode2, dummyNode3, 0, 0); + addEdge(dummyNode3, dummyNode4, 0, 0); + addEdge(dummyNode4, concatNode, 0, 1); + addEdge(inputNode, dummyNode1, 0, 0); + addEdge(dummyNode1, concatNode, 0, 0); + addEdge(concatNode, outputNode, 0, 0); + for (auto &node : nodesSet) graphNodes.emplace_back(node); + graph->CreateGraph(graphNodes, graphEdges, context, "test_graph"); + + // Check whether reorder is inserted + NodePtr expected_reorder = dummyNode2->getParentEdgeAt(0)->getParent(); + ASSERT_EQ(expected_reorder->getType(), Type::Reorder); +} From 0ad75bd112c9345abafd1727465f20da63d0782d Mon Sep 17 00:00:00 2001 From: Andrzej Kopytko Date: Thu, 11 Jan 2024 11:57:56 +0100 Subject: [PATCH 28/28] Main changes without valid data (#22065) --- .../benchmarks_files/OV-benchmark-data.csv | 862 +++++++++--------- docs/sphinx_setup/_static/js/graphs.js | 48 +- 2 files changed, 468 insertions(+), 442 deletions(-) diff --git a/docs/sphinx_setup/_static/benchmarks_files/OV-benchmark-data.csv b/docs/sphinx_setup/_static/benchmarks_files/OV-benchmark-data.csv index d761ad9de95f70..e3bd18a51b8c1b 100644 --- a/docs/sphinx_setup/_static/benchmarks_files/OV-benchmark-data.csv +++ b/docs/sphinx_setup/_static/benchmarks_files/OV-benchmark-data.csv @@ -1,431 +1,431 @@ -Network model,Release,IE-Type,Platform name,Throughput-INT8,Throughput-FP16,Throughput-FP32,Value,Efficiency,Price,TDP,Sockets,Price/Socket,TDP/Socket,Latency,UOM_T,UOM_V,UOM_E,UOM_L,Latency_FP16,Latency_FP32,Latency_int4,Throughput_INT4 -begin_rec," "," "," "," "," "," "," "," "," "," "," "," "," "," ",FPS,FPS/$,FPS/TDP,msec.,,,, -bert-base-cased,OV-2023.2,core,"Intel® Core™ i3-8100 ",21.27,,,0.182,0.327,117,65,1,117,65,48.62,FPS,FPS/$,FPS/TDP,msec.,,,, -bert-base-cased,OV-2023.2,core,"Intel® Core™ i5-10500TE ",32.04,,21.72,0.150,0.493,214,65,1,214,65,36.77,FPS,FPS/$,FPS/TDP,msec.,,,, -bert-base-cased,OV-2023.2,core,"Intel® Core™ i5-13600K ",112.78,,45.21,0.343,0.902,329,125,1,329,125,17.53,FPS,FPS/$,FPS/TDP,msec.,,,, -bert-base-cased,OV-2023.2,core,Intel® Core™ i7-1185G7 CPU,50.11,,18.36,0.118,1.790,426,28,1,426,28,23.39,FPS,FPS/$,FPS/TDP,msec.,,,, -bert-base-cased,OV-2023.2,core,Intel® Core™ i7-1185GRE CPU,38.17,,13.67,0.078,1.363,490,28,1,490,28,29.4,FPS,FPS/$,FPS/TDP,msec.,,,, -bert-base-cased,OV-2023.2,core,Intel® Core™ i7-12700H CPU,88.62,,35.37,0.177,0.771,502,115,1,502,115,17.1,FPS,FPS/$,FPS/TDP,msec.,,,, -bert-base-cased,OV-2023.2,core,"Intel® Core™ i7-8700T ",27.47,,18.34,0.091,0.785,303,35,1,303,35,43.1,FPS,FPS/$,FPS/TDP,msec.,,,, -bert-base-cased,OV-2023.2,core,"Intel® Core™ i9-10900TE ",33.58,,21.38,0.069,0.960,488,35,1,488,35,37.7,FPS,FPS/$,FPS/TDP,msec.,,,, -bert-base-cased,OV-2023.2,core,"Intel® Core™ i9-12900TE ",52.74,,20.43,0.097,1.507,544,35,1,544,35,23.05,FPS,FPS/$,FPS/TDP,msec.,,,, -bert-base-cased,OV-2023.2,core,"Intel® Core™ i9-13900K ",164.86,,66.26,0.275,1.319,599,125,1,599,125,13.76,FPS,FPS/$,FPS/TDP,msec.,,,, -bert-base-cased,OV-2023.2,xeon,"Intel® Xeon® W1290P ",50.94,,33.27,0.086,0.407,594,125,1,594,125,29.19,FPS,FPS/$,FPS/TDP,msec.,,,, -bert-base-cased,OV-2023.2,xeon,"Intel® Xeon® E-2124G ",20.73,,,0.083,0.292,249,71,1,249,71,49.54,FPS,FPS/$,FPS/TDP,msec.,,,, -bert-base-cased,OV-2023.2,xeon,"Intel® Xeon® Gold 5218T ",215.86,,80.31,0.069,1.028,3144,210,2,1572,105,14.03,FPS,FPS/$,FPS/TDP,msec.,,,, -bert-base-cased,OV-2023.2,xeon,"Intel® Xeon® Platinum 8270 ",569.64,,222.97,0.034,1.389,16954,410,2,8477,205,7.97,FPS,FPS/$,FPS/TDP,msec.,,,, -bert-base-cased,OV-2023.2,xeon,"Intel® Xeon® Platinum 8380 ",876.04,,336.63,0.047,1.622,18718,540,2,9359,270,,FPS,FPS/$,FPS/TDP,msec.,,,, -bert-base-cased,OV-2023.2,xeon,"Intel® Xeon® Platinum 8490H ",3131.74,,505.87,0.092,4.474,34000,700,2,17000,350,4.14,FPS,FPS/$,FPS/TDP,msec.,,,, -bert-base-cased,OV-2023.2,xeon,"Intel® Xeon® Silver 4216R ",205.61,,76.34,0.102,1.028,2022,200,2,1011,100,14.66,FPS,FPS/$,FPS/TDP,msec.,,,, -bert-base-cased,OV-2023.2,xeon,"Intel® Xeon® Silver 4316 ",423.53,,166.33,0.186,1.412,2274,300,2,1137,150,,FPS,FPS/$,FPS/TDP,msec.,,,, -bert-base-cased,OV-2023.2,accel,"Intel® Data Center GPU Flex 170 ",824.04,680.35,,0.428,5.494,1925,150,1,1925,150,19.37,FPS,FPS/$,FPS/TDP,msec.,,,, -bert-base-cased,OV-2023.2,accel,"Intel® Arc®A-Series Graphics ",620.32,554.08,,1.932,4.135,321,150,1,321,150,25.63,FPS,FPS/$,FPS/TDP,msec.,,,, -bert-base-cased,OV-2023.2,core-iGPU,Intel® Core™ i7-1185G7 iGPU,83.66,60.2,42.14,0.196,2.988,426,28,1,426,28,47.28,FPS,FPS/$,FPS/TDP,msec.,,,, -bert-base-cased,OV-2023.2,core-iGPU,Intel® Core™ i7-1185GRE iGPU,72.43,55.39,36.92,0.148,2.587,490,28,1,490,28,55.03,FPS,FPS/$,FPS/TDP,msec.,,,, -bert-base-cased,OV-2023.2,core-iGPU,Intel® Core™ i7-12700H iGPU,91.52,64.83,46.37,0.182,0.796,502,115,1,502,115,43.39,FPS,FPS/$,FPS/TDP,msec.,,,, -bert-base-cased,OV-2023.2,core-CPU+iGPU,Intel® Core™ i7-1185G7 CPU+iGPU,94.49,,39.79,0.222,3.375,426,28,1,426,28,,FPS,FPS/$,FPS/TDP,msec.,,,, -bert-base-cased,OV-2023.2,core-CPU+iGPU,Intel® Core™ i7-12700H CPU+iGPU,88.79,,35.19,0.177,0.772,502,115,1,502,115,,FPS,FPS/$,FPS/TDP,msec.,,,, -end_rec,,,,,,,,,,,,,,,,,,,,,, -begin_rec," "," "," "," "," "," "," "," "," "," "," "," "," "," ",FPS,FPS/$,FPS/TDP,msec.,,,, -bert-large-uncased-whole-word-masking-squad-0001,OV-2023.2,core,"Intel® Core™ i3-8100 ",2.09,,,0.018,0.032,117,65,1,117,65,493.93,FPS,FPS/$,FPS/TDP,msec.,,,, -bert-large-uncased-whole-word-masking-squad-0001,OV-2023.2,core,"Intel® Core™ i5-10500TE ",2.98,,1.86,0.014,0.046,214,65,1,214,65,351.2,FPS,FPS/$,FPS/TDP,msec.,,,, -bert-large-uncased-whole-word-masking-squad-0001,OV-2023.2,core,"Intel® Core™ i5-13600K ",10.07,,3.75,0.031,0.081,329,125,1,329,125,156.29,FPS,FPS/$,FPS/TDP,msec.,,,, -bert-large-uncased-whole-word-masking-squad-0001,OV-2023.2,core,Intel® Core™ i7-1185G7 CPU,5.07,,1.63,0.012,0.181,426,28,1,426,28,219.53,FPS,FPS/$,FPS/TDP,msec.,,,, -bert-large-uncased-whole-word-masking-squad-0001,OV-2023.2,core,Intel® Core™ i7-1185GRE CPU,3.78,,1.21,0.008,0.135,490,28,1,490,28,267.11,FPS,FPS/$,FPS/TDP,msec.,,,, -bert-large-uncased-whole-word-masking-squad-0001,OV-2023.2,core,Intel® Core™ i7-12700H CPU,8.31,,3.08,0.017,0.072,502,115,1,502,115,155.69,FPS,FPS/$,FPS/TDP,msec.,,,, -bert-large-uncased-whole-word-masking-squad-0001,OV-2023.2,core,"Intel® Core™ i7-8700T ",2.7,,1.61,0.009,0.077,303,35,1,303,35,411.88,FPS,FPS/$,FPS/TDP,msec.,,,, -bert-large-uncased-whole-word-masking-squad-0001,OV-2023.2,core,"Intel® Core™ i9-10900TE ",3.28,,1.99,0.007,0.094,488,35,1,488,35,331.95,FPS,FPS/$,FPS/TDP,msec.,,,, -bert-large-uncased-whole-word-masking-squad-0001,OV-2023.2,core,"Intel® Core™ i9-12900TE ",5.12,,1.83,0.009,0.146,544,35,1,544,35,210.03,FPS,FPS/$,FPS/TDP,msec.,,,, -bert-large-uncased-whole-word-masking-squad-0001,OV-2023.2,core,"Intel® Core™ i9-13900K ",15.37,,5.95,0.026,0.123,599,125,1,599,125,113.51,FPS,FPS/$,FPS/TDP,msec.,,,, -bert-large-uncased-whole-word-masking-squad-0001,OV-2023.2,xeon,"Intel® Xeon® W1290P ",4.65,,3.11,0.008,0.037,594,125,1,594,125,228.25,FPS,FPS/$,FPS/TDP,msec.,,,, -bert-large-uncased-whole-word-masking-squad-0001,OV-2023.2,xeon,"Intel® Xeon® E-2124G ",2.11,,,0.008,0.030,249,71,1,249,71,484.55,FPS,FPS/$,FPS/TDP,msec.,,,, -bert-large-uncased-whole-word-masking-squad-0001,OV-2023.2,xeon,"Intel® Xeon® Gold 5218T ",20.81,,6.91,0.007,0.099,3144,210,2,1572,105,106.59,FPS,FPS/$,FPS/TDP,msec.,,,, -bert-large-uncased-whole-word-masking-squad-0001,OV-2023.2,xeon,"Intel® Xeon® Platinum 8270 ",50.63,,17.75,0.003,0.123,16954,410,2,8477,205,54.45,FPS,FPS/$,FPS/TDP,msec.,,,, -bert-large-uncased-whole-word-masking-squad-0001,OV-2023.2,xeon,"Intel® Xeon® Platinum 8380 ",66.76,,27.44,0.004,0.124,18718,540,2,9359,270,231.49,FPS,FPS/$,FPS/TDP,msec.,,,, -bert-large-uncased-whole-word-masking-squad-0001,OV-2023.2,xeon,"Intel® Xeon® Platinum 8490H ",250.47,,45.77,0.007,0.358,34000,700,2,17000,350,27.75,FPS,FPS/$,FPS/TDP,msec.,,,, -bert-large-uncased-whole-word-masking-squad-0001,OV-2023.2,xeon,"Intel® Xeon® Silver 4216R ",20.73,,6.57,0.010,0.104,2022,200,2,1011,100,106.9,FPS,FPS/$,FPS/TDP,msec.,,,, -bert-large-uncased-whole-word-masking-squad-0001,OV-2023.2,xeon,"Intel® Xeon® Silver 4316 ",38.85,,14.53,0.017,0.129,2274,300,2,1137,150,156.69,FPS,FPS/$,FPS/TDP,msec.,,,, -bert-large-uncased-whole-word-masking-squad-0001,OV-2023.2,accel,"Intel® Data Center GPU Flex 170 ",148.54,102.98,,0.077,0.990,1925,150,1,1925,150,107.19,FPS,FPS/$,FPS/TDP,msec.,,,, -bert-large-uncased-whole-word-masking-squad-0001,OV-2023.2,accel,"Intel® Arc®A-Series Graphics ",127.99,94.72,,0.399,0.853,321,150,1,321,150,124.58,FPS,FPS/$,FPS/TDP,msec.,,,, -bert-large-uncased-whole-word-masking-squad-0001,OV-2023.2,core-iGPU,Intel® Core™ i7-1185G7 iGPU,9.07,6.62,4.27,0.021,0.324,426,28,1,426,28,452.44,FPS,FPS/$,FPS/TDP,msec.,,,, -bert-large-uncased-whole-word-masking-squad-0001,OV-2023.2,core-iGPU,Intel® Core™ i7-1185GRE iGPU,5.39,5.77,3.01,0.011,0.192,490,28,1,490,28,741.24,FPS,FPS/$,FPS/TDP,msec.,,,, -bert-large-uncased-whole-word-masking-squad-0001,OV-2023.2,core-iGPU,Intel® Core™ i7-12700H iGPU,10.54,7.57,5,0.021,0.092,502,115,1,502,115,379.08,FPS,FPS/$,FPS/TDP,msec.,,,, -bert-large-uncased-whole-word-masking-squad-0001,OV-2023.2,core-CPU+iGPU,Intel® Core™ i7-1185G7 CPU+iGPU,8.49,,3.46,0.020,0.303,426,28,1,426,28,,FPS,FPS/$,FPS/TDP,msec.,,,, -bert-large-uncased-whole-word-masking-squad-0001,OV-2023.2,core-CPU+iGPU,Intel® Core™ i7-12700H CPU+iGPU,8.53,,3.01,0.017,0.074,502,115,1,502,115,,FPS,FPS/$,FPS/TDP,msec.,,,, -end_rec,,,,,,,,,,,,,,,,,,,,,, -begin_rec," "," "," "," "," "," "," "," "," "," "," "," "," "," ",FPS,FPS/$,FPS/TDP,msec.,,,, -deeplabv3,OV-2023.2,atom,Intel® Atom® X6425E CPU,4.66,,2.87,0.070,0.388,67,12,1,67,12,219.16,FPS,FPS/$,FPS/TDP,msec.,,,, -deeplabv3,OV-2023.2,core,"Intel® Core™ i3-8100 ",22,,14.03,0.188,0.339,117,65,1,117,65,45.56,FPS,FPS/$,FPS/TDP,msec.,,,, -deeplabv3,OV-2023.2,core,"Intel® Core™ i5-10500TE ",35.15,,16.61,0.164,0.541,214,65,1,214,65,33.27,FPS,FPS/$,FPS/TDP,msec.,,,, -deeplabv3,OV-2023.2,core,"Intel® Core™ i5-13600K ",101.57,,41.76,0.309,0.813,329,125,1,329,125,16.21,FPS,FPS/$,FPS/TDP,msec.,,,, -deeplabv3,OV-2023.2,core,Intel® Core™ i7-1185G7 CPU,52.36,,16.31,0.123,1.870,426,28,1,426,28,19.93,FPS,FPS/$,FPS/TDP,msec.,,,, -deeplabv3,OV-2023.2,core,Intel® Core™ i7-1185GRE CPU,31.69,,9.45,0.065,1.132,490,28,1,490,28,29.7,FPS,FPS/$,FPS/TDP,msec.,,,, -deeplabv3,OV-2023.2,core,Intel® Core™ i7-12700H CPU,74.8,,29.13,0.149,0.650,502,115,1,502,115,16.96,FPS,FPS/$,FPS/TDP,msec.,,,, -deeplabv3,OV-2023.2,core,"Intel® Core™ i7-8700T ",32.22,,18.38,0.106,0.921,303,35,1,303,35,37.52,FPS,FPS/$,FPS/TDP,msec.,,,, -deeplabv3,OV-2023.2,core,"Intel® Core™ i9-10900TE ",39.4,,18.25,0.081,1.126,488,35,1,488,35,28.44,FPS,FPS/$,FPS/TDP,msec.,,,, -deeplabv3,OV-2023.2,core,"Intel® Core™ i9-12900TE ",58.11,,22.54,0.107,1.660,544,35,1,544,35,21.65,FPS,FPS/$,FPS/TDP,msec.,,,, -deeplabv3,OV-2023.2,core,"Intel® Core™ i9-13900K ",149.59,,57.89,0.250,1.197,599,125,1,599,125,12.49,FPS,FPS/$,FPS/TDP,msec.,,,, -deeplabv3,OV-2023.2,atom,Intel® Processor N200 CPU,1.72,,1.01,0.009,0.287,193,6,1,193,6,596.5,FPS,FPS/$,FPS/TDP,msec.,,,, -deeplabv3,OV-2023.2,xeon,"Intel® Xeon® W1290P ",51.12,,19.38,0.086,0.409,594,125,1,594,125,21.94,FPS,FPS/$,FPS/TDP,msec.,,,, -deeplabv3,OV-2023.2,xeon,"Intel® Xeon® E-2124G ",21.77,,14.75,0.087,0.307,249,71,1,249,71,45.69,FPS,FPS/$,FPS/TDP,msec.,,,, -deeplabv3,OV-2023.2,xeon,"Intel® Xeon® Gold 5218T ",188.66,,76.86,0.060,0.898,3144,210,2,1572,105,11.81,FPS,FPS/$,FPS/TDP,msec.,,,, -deeplabv3,OV-2023.2,xeon,"Intel® Xeon® Platinum 8270 ",413.18,,154.25,0.024,1.008,16954,410,2,8477,205,5.65,FPS,FPS/$,FPS/TDP,msec.,,,, -deeplabv3,OV-2023.2,xeon,"Intel® Xeon® Platinum 8380 ",564.23,,223.46,0.030,1.045,18718,540,2,9359,270,5.36,FPS,FPS/$,FPS/TDP,msec.,,,, -deeplabv3,OV-2023.2,xeon,"Intel® Xeon® Platinum 8490H ",1001.4,,380.46,0.029,1.431,34000,700,2,17000,350,3.6,FPS,FPS/$,FPS/TDP,msec.,,,, -deeplabv3,OV-2023.2,xeon,"Intel® Xeon® Silver 4216R ",182.57,,74.43,0.090,0.913,2022,200,2,1011,100,12.14,FPS,FPS/$,FPS/TDP,msec.,,,, -deeplabv3,OV-2023.2,xeon,"Intel® Xeon® Silver 4316 ",360.93,,138.14,0.159,1.203,2274,300,2,1137,150,7.04,FPS,FPS/$,FPS/TDP,msec.,,,, -deeplabv3,OV-2023.2,accel,"Intel® Data Center GPU Flex 170 ",732.85,602.34,,0.381,4.886,1925,150,1,1925,150,21.8,FPS,FPS/$,FPS/TDP,msec.,,,, -deeplabv3,OV-2023.2,accel,"Intel® Arc®A-Series Graphics ",597.79,484.98,,1.862,3.985,321,150,1,321,150,26.18,FPS,FPS/$,FPS/TDP,msec.,,,, -deeplabv3,OV-2023.2,atom,Intel® Celeron® 6305E CPU,11.64,,4.56,0.109,0.776,107,15,1,107,15,87.1,FPS,FPS/$,FPS/TDP,msec.,,,, -deeplabv3,OV-2023.2,atom-iGPU,Intel® Atom® X6425E iGPU,10.87,11.07,5.64,0.162,0.906,67,12,1,67,12,367.4,FPS,FPS/$,FPS/TDP,msec.,,,, -deeplabv3,OV-2023.2,core-iGPU,Intel® Core™ i7-1185G7 iGPU,104.71,48.95,27.69,0.246,3.739,426,28,1,426,28,37.85,FPS,FPS/$,FPS/TDP,msec.,,,, -deeplabv3,OV-2023.2,core-iGPU,Intel® Core™ i7-1185GRE iGPU,76.34,36.22,13.67,0.156,2.726,490,28,1,490,28,52.07,FPS,FPS/$,FPS/TDP,msec.,,,, -deeplabv3,OV-2023.2,core-iGPU,Intel® Core™ i7-12700H iGPU,113.65,52.72,33.36,0.226,0.988,502,115,1,502,115,34.81,FPS,FPS/$,FPS/TDP,msec.,,,, -deeplabv3,OV-2023.2,atom-iGPU,Intel® Processor N200 iGPU,3.65,1.92,1.27,0.019,0.609,193,6,1,193,6,1094.07,FPS,FPS/$,FPS/TDP,msec.,,,, -deeplabv3,OV-2023.2,atom-iGPU,Intel® Celeron® 6305E iGPU,60.3,27.73,16.44,0.564,4.020,107,15,1,107,15,66.22,FPS,FPS/$,FPS/TDP,msec.,,,, -deeplabv3,OV-2023.2,atom-CPU+iGPU,Intel® Atom® X6425E CPU+iGPU,11.11,,5.66,0.166,0.926,67,12,1,67,12,,FPS,FPS/$,FPS/TDP,msec.,,,, -deeplabv3,OV-2023.2,core-CPU+iGPU,Intel® Core™ i7-1185G7 CPU+iGPU,86.78,,24.13,0.204,3.099,426,28,1,426,28,,FPS,FPS/$,FPS/TDP,msec.,,,, -deeplabv3,OV-2023.2,core-CPU+iGPU,Intel® Core™ i7-12700H CPU+iGPU,75.71,,28.98,0.151,0.658,502,115,1,502,115,,FPS,FPS/$,FPS/TDP,msec.,,,, -deeplabv3,OV-2023.2,atom-CPU+iGPU,Intel® Processor N200 CPU+iGPU,4.66,,1.9,0.024,0.776,193,6,1,193,6,,FPS,FPS/$,FPS/TDP,msec.,,,, -deeplabv3,OV-2023.2,atom-CPU+iGPU,Intel® Celeron® 6305E CPU+iGPU,61.15,,16.89,0.571,4.077,107,15,1,107,15,,FPS,FPS/$,FPS/TDP,msec.,,,, -end_rec,,,,,,,,,,,,,,,,,,,,,, -begin_rec," "," "," "," "," "," "," "," "," "," "," "," "," "," ",FPS,FPS/$,FPS/TDP,msec.,,,, -efficientdet-d0,OV-2023.2,atom,Intel® Atom® X6425E CPU,7.29,,5.01,0.109,0.608,67,12,1,67,12,140.41,FPS,FPS/$,FPS/TDP,msec.,,,, -efficientdet-d0,OV-2023.2,core,"Intel® Core™ i3-8100 ",36.6,,24.31,0.313,0.563,117,65,1,117,65,28.3,FPS,FPS/$,FPS/TDP,msec.,,,, -efficientdet-d0,OV-2023.2,core,"Intel® Core™ i5-10500TE ",58.84,,29.38,0.275,0.905,214,65,1,214,65,21.11,FPS,FPS/$,FPS/TDP,msec.,,,, -efficientdet-d0,OV-2023.2,core,"Intel® Core™ i5-13600K ",139.32,,77.22,0.423,1.115,329,125,1,329,125,11.92,FPS,FPS/$,FPS/TDP,msec.,,,, -efficientdet-d0,OV-2023.2,core,Intel® Core™ i7-1185G7 CPU,73.76,,41.07,0.173,2.634,426,28,1,426,28,15.53,FPS,FPS/$,FPS/TDP,msec.,,,, -efficientdet-d0,OV-2023.2,core,Intel® Core™ i7-1185GRE CPU,52.57,,21.48,0.107,1.877,490,28,1,490,28,20.87,FPS,FPS/$,FPS/TDP,msec.,,,, -efficientdet-d0,OV-2023.2,core,Intel® Core™ i7-12700H CPU,114.46,,54.77,0.228,0.995,502,115,1,502,115,11.88,FPS,FPS/$,FPS/TDP,msec.,,,, -efficientdet-d0,OV-2023.2,core,"Intel® Core™ i7-8700T ",51.93,,34.22,0.171,1.484,303,35,1,303,35,24.28,FPS,FPS/$,FPS/TDP,msec.,,,, -efficientdet-d0,OV-2023.2,core,"Intel® Core™ i9-10900TE ",66,,35.11,0.135,1.886,488,35,1,488,35,18.95,FPS,FPS/$,FPS/TDP,msec.,,,, -efficientdet-d0,OV-2023.2,core,"Intel® Core™ i9-12900TE ",75.37,,44.41,0.139,2.154,544,35,1,544,35,15.52,FPS,FPS/$,FPS/TDP,msec.,,,, -efficientdet-d0,OV-2023.2,core,"Intel® Core™ i9-13900K ",207,,102.36,0.346,1.656,599,125,1,599,125,9.45,FPS,FPS/$,FPS/TDP,msec.,,,, -efficientdet-d0,OV-2023.2,atom,Intel® Processor N200 CPU,2.09,,1.67,0.011,0.349,193,6,1,193,6,488.71,FPS,FPS/$,FPS/TDP,msec.,,,, -efficientdet-d0,OV-2023.2,xeon,"Intel® Xeon® W1290P ",96.56,,38.61,0.163,0.772,594,125,1,594,125,14.19,FPS,FPS/$,FPS/TDP,msec.,,,, -efficientdet-d0,OV-2023.2,xeon,"Intel® Xeon® E-2124G ",35.01,,25.27,0.141,0.493,249,71,1,249,71,29.41,FPS,FPS/$,FPS/TDP,msec.,,,, -efficientdet-d0,OV-2023.2,xeon,"Intel® Xeon® Gold 5218T ",258.46,,164.63,0.082,1.231,3144,210,2,1572,105,11.88,FPS,FPS/$,FPS/TDP,msec.,,,, -efficientdet-d0,OV-2023.2,xeon,"Intel® Xeon® Platinum 8270 ",518.85,,310.76,0.031,1.265,16954,410,2,8477,205,7.42,FPS,FPS/$,FPS/TDP,msec.,,,, -efficientdet-d0,OV-2023.2,xeon,"Intel® Xeon® Platinum 8380 ",834.12,,495.89,0.045,1.545,18718,540,2,9359,270,4.31,FPS,FPS/$,FPS/TDP,msec.,,,, -efficientdet-d0,OV-2023.2,xeon,"Intel® Xeon® Platinum 8490H ",1043.83,,861.5,0.031,1.491,34000,700,2,17000,350,5.43,FPS,FPS/$,FPS/TDP,msec.,,,, -efficientdet-d0,OV-2023.2,xeon,"Intel® Xeon® Silver 4216R ",248.64,,157.52,0.123,1.243,2022,200,2,1011,100,12.27,FPS,FPS/$,FPS/TDP,msec.,,,, -efficientdet-d0,OV-2023.2,xeon,"Intel® Xeon® Silver 4316 ",469.81,,293.67,0.207,1.566,2274,300,2,1137,150,5.89,FPS,FPS/$,FPS/TDP,msec.,,,, -efficientdet-d0,OV-2023.2,accel,"Intel® Data Center GPU Flex 170 ",846.71,825.71,,0.440,5.645,1925,150,1,1925,150,18.66,FPS,FPS/$,FPS/TDP,msec.,,,, -efficientdet-d0,OV-2023.2,accel,"Intel® Arc®A-Series Graphics ",582.03,590.73,,1.813,3.880,321,150,1,321,150,26.38,FPS,FPS/$,FPS/TDP,msec.,,,, -efficientdet-d0,OV-2023.2,atom,Intel® Celeron® 6305E CPU,18.06,,11.09,0.169,1.204,107,15,1,107,15,57.26,FPS,FPS/$,FPS/TDP,msec.,,,, -efficientdet-d0,OV-2023.2,atom-iGPU,Intel® Atom® X6425E iGPU,19.71,22.48,11.18,0.294,1.643,67,12,1,67,12,202.28,FPS,FPS/$,FPS/TDP,msec.,,,, -efficientdet-d0,OV-2023.2,core-iGPU,Intel® Core™ i7-1185G7 iGPU,110.42,90.58,46.89,0.259,3.944,426,28,1,426,28,35.95,FPS,FPS/$,FPS/TDP,msec.,,,, -efficientdet-d0,OV-2023.2,core-iGPU,Intel® Core™ i7-1185GRE iGPU,62.89,49.91,23.65,0.128,2.246,490,28,1,490,28,63.05,FPS,FPS/$,FPS/TDP,msec.,,,, -efficientdet-d0,OV-2023.2,core-iGPU,Intel® Core™ i7-12700H iGPU,127.71,103.78,54.39,0.254,1.110,502,115,1,502,115,30.98,FPS,FPS/$,FPS/TDP,msec.,,,, -efficientdet-d0,OV-2023.2,atom-iGPU,Intel® Processor N200 iGPU,5.53,4.94,2.75,0.029,0.921,193,6,1,193,6,721.64,FPS,FPS/$,FPS/TDP,msec.,,,, -efficientdet-d0,OV-2023.2,atom-iGPU,Intel® Celeron® 6305E iGPU,72.14,60.61,33.85,0.674,4.809,107,15,1,107,15,55.18,FPS,FPS/$,FPS/TDP,msec.,,,, -efficientdet-d0,OV-2023.2,atom-CPU+iGPU,Intel® Atom® X6425E CPU+iGPU,20.11,,11.51,0.300,1.676,67,12,1,67,12,,FPS,FPS/$,FPS/TDP,msec.,,,, -efficientdet-d0,OV-2023.2,core-CPU+iGPU,Intel® Core™ i7-1185G7 CPU+iGPU,101.03,,48.05,0.237,3.608,426,28,1,426,28,,FPS,FPS/$,FPS/TDP,msec.,,,, -efficientdet-d0,OV-2023.2,core-CPU+iGPU,Intel® Core™ i7-12700H CPU+iGPU,114.83,,55.31,0.229,0.999,502,115,1,502,115,,FPS,FPS/$,FPS/TDP,msec.,,,, -efficientdet-d0,OV-2023.2,atom-CPU+iGPU,Intel® Processor N200 CPU+iGPU,5.73,,3.57,0.030,0.955,193,6,1,193,6,,FPS,FPS/$,FPS/TDP,msec.,,,, -efficientdet-d0,OV-2023.2,atom-CPU+iGPU,Intel® Celeron® 6305E CPU+iGPU,56.19,,32.14,0.525,3.746,107,15,1,107,15,,FPS,FPS/$,FPS/TDP,msec.,,,, -end_rec,,,,,,,,,,,,,,,,,,,,,, -begin_rec," "," "," "," "," "," "," "," "," "," "," "," "," "," ",FPS,FPS/$,FPS/TDP,msec.,,,, -mobilenet-v2,OV-2023.2,atom,Intel® Atom® X6425E CPU,132.01,,79.71,1.970,11.001,67,12,1,67,12,7.97,FPS,FPS/$,FPS/TDP,msec.,,,, -mobilenet-v2,OV-2023.2,core,"Intel® Core™ i3-8100 ",536.37,,,4.584,8.252,117,65,1,117,65,2.02,FPS,FPS/$,FPS/TDP,msec.,,,, -mobilenet-v2,OV-2023.2,core,"Intel® Core™ i5-10500TE ",898.55,,500.27,4.199,13.824,214,65,1,214,65,1.57,FPS,FPS/$,FPS/TDP,msec.,,,, -mobilenet-v2,OV-2023.2,core,"Intel® Core™ i5-13600K ",2785.11,,1237.02,8.465,22.281,329,125,1,329,125,0.88,FPS,FPS/$,FPS/TDP,msec.,,,, -mobilenet-v2,OV-2023.2,core,Intel® Core™ i7-1185G7 CPU,1347.18,,525.71,3.162,48.113,426,28,1,426,28,0.86,FPS,FPS/$,FPS/TDP,msec.,,,, -mobilenet-v2,OV-2023.2,core,Intel® Core™ i7-1185GRE CPU,979.43,,319,1.999,34.980,490,28,1,490,28,1.19,FPS,FPS/$,FPS/TDP,msec.,,,, -mobilenet-v2,OV-2023.2,core,Intel® Core™ i7-12700H CPU,2099.29,,1056.24,4.182,18.255,502,115,1,502,115,1.1,FPS,FPS/$,FPS/TDP,msec.,,,, -mobilenet-v2,OV-2023.2,core,"Intel® Core™ i7-8700T ",741.65,,519.77,2.448,21.190,303,35,1,303,35,1.86,FPS,FPS/$,FPS/TDP,msec.,,,, -mobilenet-v2,OV-2023.2,core,"Intel® Core™ i9-10900TE ",949.26,,604.02,1.945,27.122,488,35,1,488,35,1.5,FPS,FPS/$,FPS/TDP,msec.,,,, -mobilenet-v2,OV-2023.2,core,"Intel® Core™ i9-12900TE ",1300.22,,657.07,2.390,37.149,544,35,1,544,35,1.32,FPS,FPS/$,FPS/TDP,msec.,,,, -mobilenet-v2,OV-2023.2,core,"Intel® Core™ i9-13900K ",4089.6,,2014.33,6.827,32.717,599,125,1,599,125,0.71,FPS,FPS/$,FPS/TDP,msec.,,,, -mobilenet-v2,OV-2023.2,atom,Intel® Processor N200 CPU,41.1,,29.71,0.213,6.851,193,6,1,193,6,27.14,FPS,FPS/$,FPS/TDP,msec.,,,, -mobilenet-v2,OV-2023.2,xeon,"Intel® Xeon® W1290P ",1450.73,,542.77,2.442,11.606,594,125,1,594,125,1.29,FPS,FPS/$,FPS/TDP,msec.,,,, -mobilenet-v2,OV-2023.2,xeon,"Intel® Xeon® E-2124G ",523.03,,,2.101,7.367,249,71,1,249,71,2.07,FPS,FPS/$,FPS/TDP,msec.,,,, -mobilenet-v2,OV-2023.2,xeon,"Intel® Xeon® Gold 5218T ",5410.81,,1915.84,1.721,25.766,3144,210,2,1572,105,1.42,FPS,FPS/$,FPS/TDP,msec.,,,, -mobilenet-v2,OV-2023.2,xeon,"Intel® Xeon® Platinum 8270 ",14207.13,,4438.67,0.838,34.652,16954,410,2,8477,205,0.93,FPS,FPS/$,FPS/TDP,msec.,,,, -mobilenet-v2,OV-2023.2,xeon,"Intel® Xeon® Platinum 8380 ",22308.51,,6801.73,1.192,41.312,18718,540,2,9359,270,0.57,FPS,FPS/$,FPS/TDP,msec.,,,, -mobilenet-v2,OV-2023.2,xeon,"Intel® Xeon® Platinum 8490H ",38064.38,,10986.01,1.120,54.378,34000,700,2,17000,350,0.66,FPS,FPS/$,FPS/TDP,msec.,,,, -mobilenet-v2,OV-2023.2,xeon,"Intel® Xeon® Silver 4216R ",5178.33,,1862.47,2.561,25.892,2022,200,2,1011,100,1.45,FPS,FPS/$,FPS/TDP,msec.,,,, -mobilenet-v2,OV-2023.2,xeon,"Intel® Xeon® Silver 4316 ",12161.33,,3597.47,5.348,40.538,2274,300,2,1137,150,0.56,FPS,FPS/$,FPS/TDP,msec.,,,, -mobilenet-v2,OV-2023.2,accel,"Intel® Data Center GPU Flex 170 ",6748.16,5698.62,,3.506,44.988,1925,150,1,1925,150,2.37,FPS,FPS/$,FPS/TDP,msec.,,,, -mobilenet-v2,OV-2023.2,accel,"Intel® Arc®A-Series Graphics ",4308.65,3849.95,,13.423,28.724,321,150,1,321,150,3.63,FPS,FPS/$,FPS/TDP,msec.,,,, -mobilenet-v2,OV-2023.2,atom,Intel® Celeron® 6305E CPU,265.71,,132.81,2.483,17.714,107,15,1,107,15,3.66,FPS,FPS/$,FPS/TDP,msec.,,,, -mobilenet-v2,OV-2023.2,atom-iGPU,Intel® Atom® X6425E iGPU,191.22,225.68,130.69,2.854,15.935,67,12,1,67,12,20.63,FPS,FPS/$,FPS/TDP,msec.,,,, -mobilenet-v2,OV-2023.2,core-iGPU,Intel® Core™ i7-1185G7 iGPU,1014.86,749.24,525.16,2.382,36.245,426,28,1,426,28,3.77,FPS,FPS/$,FPS/TDP,msec.,,,, -mobilenet-v2,OV-2023.2,core-iGPU,Intel® Core™ i7-1185GRE iGPU,880.63,557.89,349.94,1.797,31.451,490,28,1,490,28,4.25,FPS,FPS/$,FPS/TDP,msec.,,,, -mobilenet-v2,OV-2023.2,core-iGPU,Intel® Core™ i7-12700H iGPU,1319.62,916.27,563.83,2.629,11.475,502,115,1,502,115,2.83,FPS,FPS/$,FPS/TDP,msec.,,,, -mobilenet-v2,OV-2023.2,atom-iGPU,Intel® Processor N200 iGPU,58.68,40.52,26.29,0.304,9.781,193,6,1,193,6,67.34,FPS,FPS/$,FPS/TDP,msec.,,,, -mobilenet-v2,OV-2023.2,atom-iGPU,Intel® Celeron® 6305E iGPU,685.09,513.1,339.2,6.403,45.672,107,15,1,107,15,5.56,FPS,FPS/$,FPS/TDP,msec.,,,, -mobilenet-v2,OV-2023.2,atom-CPU+iGPU,Intel® Atom® X6425E CPU+iGPU,107.78,,137.9,1.609,8.981,67,12,1,67,12,,FPS,FPS/$,FPS/TDP,msec.,,,, -mobilenet-v2,OV-2023.2,core-CPU+iGPU,Intel® Core™ i7-1185G7 CPU+iGPU,2182.22,,612.95,5.123,77.937,426,28,1,426,28,,FPS,FPS/$,FPS/TDP,msec.,,,, -mobilenet-v2,OV-2023.2,core-CPU+iGPU,Intel® Core™ i7-12700H CPU+iGPU,2071.13,,1048.22,4.126,18.010,502,115,1,502,115,,FPS,FPS/$,FPS/TDP,msec.,,,, -mobilenet-v2,OV-2023.2,atom-CPU+iGPU,Intel® Processor N200 CPU+iGPU,115.35,,50.03,0.598,19.224,193,6,1,193,6,,FPS,FPS/$,FPS/TDP,msec.,,,, -mobilenet-v2,OV-2023.2,atom-CPU+iGPU,Intel® Celeron® 6305E CPU+iGPU,1465.46,,396.56,13.696,97.698,107,15,1,107,15,,FPS,FPS/$,FPS/TDP,msec.,,,, -end_rec,,,,,,,,,,,,,,,,,,,,,, -begin_rec," "," "," "," "," "," "," "," "," "," "," "," "," "," ",FPS,FPS/$,FPS/TDP,msec.,,,, -resnet-50,OV-2023.2,atom,Intel® Atom® X6425E CPU,19.92,,8.18,0.297,1.660,67,12,1,67,12,51.27,FPS,FPS/$,FPS/TDP,msec.,,,, -resnet-50,OV-2023.2,core,"Intel® Core™ i3-8100 ",96.91,,50.72,0.828,1.491,117,65,1,117,65,10.72,FPS,FPS/$,FPS/TDP,msec.,,,, -resnet-50,OV-2023.2,core,"Intel® Core™ i5-10500TE ",145.07,,74.01,0.678,2.232,214,65,1,214,65,8.19,FPS,FPS/$,FPS/TDP,msec.,,,, -resnet-50,OV-2023.2,core,"Intel® Core™ i5-13600K ",515.17,,140.11,1.566,4.121,329,125,1,329,125,3.89,FPS,FPS/$,FPS/TDP,msec.,,,, -resnet-50,OV-2023.2,core,Intel® Core™ i7-1185G7 CPU,229.34,,61.85,0.538,8.191,426,28,1,426,28,5.03,FPS,FPS/$,FPS/TDP,msec.,,,, -resnet-50,OV-2023.2,core,Intel® Core™ i7-1185GRE CPU,172.44,,45.06,0.352,6.159,490,28,1,490,28,6.62,FPS,FPS/$,FPS/TDP,msec.,,,, -resnet-50,OV-2023.2,core,Intel® Core™ i7-12700H CPU,445.18,,122.92,0.887,3.871,502,115,1,502,115,3.93,FPS,FPS/$,FPS/TDP,msec.,,,, -resnet-50,OV-2023.2,core,"Intel® Core™ i7-8700T ",122.89,,62.1,0.406,3.511,303,35,1,303,35,9.97,FPS,FPS/$,FPS/TDP,msec.,,,, -resnet-50,OV-2023.2,core,"Intel® Core™ i9-10900TE ",156.59,,75.57,0.321,4.474,488,35,1,488,35,7.6,FPS,FPS/$,FPS/TDP,msec.,,,, -resnet-50,OV-2023.2,core,"Intel® Core™ i9-12900TE ",269.4,,72.67,0.495,7.697,544,35,1,544,35,4.89,FPS,FPS/$,FPS/TDP,msec.,,,, -resnet-50,OV-2023.2,core,"Intel® Core™ i9-13900K ",749.69,,228.22,1.252,5.998,599,125,1,599,125,2.98,FPS,FPS/$,FPS/TDP,msec.,,,, -resnet-50,OV-2023.2,atom,Intel® Processor N200 CPU,6.72,,3.13,0.035,1.120,193,6,1,193,6,159.9,FPS,FPS/$,FPS/TDP,msec.,,,, -resnet-50,OV-2023.2,xeon,"Intel® Xeon® W1290P ",240.85,,96.84,0.405,1.927,594,125,1,594,125,5.5,FPS,FPS/$,FPS/TDP,msec.,,,, -resnet-50,OV-2023.2,xeon,"Intel® Xeon® E-2124G ",92.92,,49.94,0.373,1.309,249,71,1,249,71,11.12,FPS,FPS/$,FPS/TDP,msec.,,,, -resnet-50,OV-2023.2,xeon,"Intel® Xeon® Gold 5218T ",968.92,,267.96,0.308,4.614,3144,210,2,1572,105,2.91,FPS,FPS/$,FPS/TDP,msec.,,,, -resnet-50,OV-2023.2,xeon,"Intel® Xeon® Platinum 8270 ",2902.26,,747.22,0.171,7.079,16954,410,2,8477,205,1.55,FPS,FPS/$,FPS/TDP,msec.,,,, -resnet-50,OV-2023.2,xeon,"Intel® Xeon® Platinum 8380 ",4946.11,,1154.11,0.264,9.159,18718,540,2,9359,270,,FPS,FPS/$,FPS/TDP,msec.,,,, -resnet-50,OV-2023.2,xeon,"Intel® Xeon® Platinum 8490H ",19987.31,,1672.83,0.588,28.553,34000,700,2,17000,350,1.02,FPS,FPS/$,FPS/TDP,msec.,,,, -resnet-50,OV-2023.2,xeon,"Intel® Xeon® Silver 4216R ",931.66,,257,0.461,4.658,2022,200,2,1011,100,3,FPS,FPS/$,FPS/TDP,msec.,,,, -resnet-50,OV-2023.2,xeon,"Intel® Xeon® Silver 4316 ",2276.19,,562.55,1.001,7.587,2274,300,2,1137,150,,FPS,FPS/$,FPS/TDP,msec.,,,, -resnet-50,OV-2023.2,accel,"Intel® Data Center GPU Flex 170 ",3436.03,2103.96,,1.785,22.907,1925,150,1,1925,150,4.65,FPS,FPS/$,FPS/TDP,msec.,,,, -resnet-50,OV-2023.2,accel,"Intel® Arc®A-Series Graphics ",2320.83,1555.26,,7.230,15.472,321,150,1,321,150,6.8,FPS,FPS/$,FPS/TDP,msec.,,,, -resnet-50,OV-2023.2,atom,Intel® Celeron® 6305E CPU,49.59,,14.36,0.463,3.306,107,15,1,107,15,19.89,FPS,FPS/$,FPS/TDP,msec.,,,, -resnet-50,OV-2023.2,atom-iGPU,Intel® Atom® X6425E iGPU,49.36,52.35,27.44,0.737,4.113,67,12,1,67,12,80.69,FPS,FPS/$,FPS/TDP,msec.,,,, -resnet-50,OV-2023.2,core-iGPU,Intel® Core™ i7-1185G7 iGPU,351.53,206.09,116.49,0.825,12.555,426,28,1,426,28,11.12,FPS,FPS/$,FPS/TDP,msec.,,,, -resnet-50,OV-2023.2,core-iGPU,Intel® Core™ i7-1185GRE iGPU,291.8,170.69,95.05,0.596,10.421,490,28,1,490,28,13.6,FPS,FPS/$,FPS/TDP,msec.,,,, -resnet-50,OV-2023.2,core-iGPU,Intel® Core™ i7-12700H iGPU,389.36,224.15,136.98,0.776,3.386,502,115,1,502,115,10.01,FPS,FPS/$,FPS/TDP,msec.,,,, -resnet-50,OV-2023.2,atom-iGPU,Intel® Processor N200 iGPU,14.66,7.82,4.24,0.076,2.444,193,6,1,193,6,271.96,FPS,FPS/$,FPS/TDP,msec.,,,, -resnet-50,OV-2023.2,atom-iGPU,Intel® Celeron® 6305E iGPU,213.06,118.28,67.33,1.991,14.204,107,15,1,107,15,18.66,FPS,FPS/$,FPS/TDP,msec.,,,, -resnet-50,OV-2023.2,atom-CPU+iGPU,Intel® Atom® X6425E CPU+iGPU,73.78,,32.26,1.101,6.148,67,12,1,67,12,,FPS,FPS/$,FPS/TDP,msec.,,,, -resnet-50,OV-2023.2,core-CPU+iGPU,Intel® Core™ i7-1185G7 CPU+iGPU,467.05,,119.19,1.096,16.680,426,28,1,426,28,,FPS,FPS/$,FPS/TDP,msec.,,,, -resnet-50,OV-2023.2,core-CPU+iGPU,Intel® Core™ i7-12700H CPU+iGPU,446.64,,123.08,0.890,3.884,502,115,1,502,115,,FPS,FPS/$,FPS/TDP,msec.,,,, -resnet-50,OV-2023.2,atom-CPU+iGPU,Intel® Processor N200 CPU+iGPU,20.62,,6.29,0.107,3.437,193,6,1,193,6,,FPS,FPS/$,FPS/TDP,msec.,,,, -resnet-50,OV-2023.2,atom-CPU+iGPU,Intel® Celeron® 6305E CPU+iGPU,299.3,,75.5,2.797,19.953,107,15,1,107,15,,FPS,FPS/$,FPS/TDP,msec.,,,, -end_rec,,,,,,,,,,,,,,,,,,,,,, -begin_rec," "," "," "," "," "," "," "," "," "," "," "," "," "," ",FPS,FPS/$,FPS/TDP,msec.,,,, -ssd-resnet34-1200,OV-2023.2,atom,Intel® Atom® X6425E CPU,0.33,,0.13,0.005,0.028,67,12,1,67,12,2993.01,FPS,FPS/$,FPS/TDP,msec.,,,, -ssd-resnet34-1200,OV-2023.2,core,"Intel® Core™ i3-8100 ",1.68,,0.97,0.014,0.026,117,65,1,117,65,601.85,FPS,FPS/$,FPS/TDP,msec.,,,, -ssd-resnet34-1200,OV-2023.2,core,"Intel® Core™ i5-10500TE ",2.42,,1.4,0.011,0.037,214,65,1,214,65,459.92,FPS,FPS/$,FPS/TDP,msec.,,,, -ssd-resnet34-1200,OV-2023.2,core,"Intel® Core™ i5-13600K ",8.24,,2.4,0.025,0.066,329,125,1,329,125,163.48,FPS,FPS/$,FPS/TDP,msec.,,,, -ssd-resnet34-1200,OV-2023.2,core,Intel® Core™ i7-1185G7 CPU,3.91,,1,0.009,0.140,426,28,1,426,28,277.92,FPS,FPS/$,FPS/TDP,msec.,,,, -ssd-resnet34-1200,OV-2023.2,core,Intel® Core™ i7-1185GRE CPU,2.88,,0.77,0.006,0.103,490,28,1,490,28,338.91,FPS,FPS/$,FPS/TDP,msec.,,,, -ssd-resnet34-1200,OV-2023.2,core,Intel® Core™ i7-12700H CPU,7.23,,2.11,0.014,0.063,502,115,1,502,115,160.16,FPS,FPS/$,FPS/TDP,msec.,,,, -ssd-resnet34-1200,OV-2023.2,core,"Intel® Core™ i7-8700T ",2.02,,1.13,0.007,0.058,303,35,1,303,35,564.49,FPS,FPS/$,FPS/TDP,msec.,,,, -ssd-resnet34-1200,OV-2023.2,core,"Intel® Core™ i9-10900TE ",2.65,,1.47,0.005,0.076,488,35,1,488,35,411.44,FPS,FPS/$,FPS/TDP,msec.,,,, -ssd-resnet34-1200,OV-2023.2,core,"Intel® Core™ i9-12900TE ",4.43,,1.32,0.008,0.126,544,35,1,544,35,233.69,FPS,FPS/$,FPS/TDP,msec.,,,, -ssd-resnet34-1200,OV-2023.2,core,"Intel® Core™ i9-13900K ",12.56,,4.02,0.021,0.100,599,125,1,599,125,125.42,FPS,FPS/$,FPS/TDP,msec.,,,, -ssd-resnet34-1200,OV-2023.2,atom,Intel® Processor N200 CPU,0.11,,0.05,0.001,0.019,193,6,1,193,6,8949.48,FPS,FPS/$,FPS/TDP,msec.,,,, -ssd-resnet34-1200,OV-2023.2,xeon,"Intel® Xeon® W1290P ",4.33,,2.45,0.007,0.035,594,125,1,594,125,238.2,FPS,FPS/$,FPS/TDP,msec.,,,, -ssd-resnet34-1200,OV-2023.2,xeon,"Intel® Xeon® E-2124G ",1.6,,0.92,0.006,0.023,249,71,1,249,71,628.09,FPS,FPS/$,FPS/TDP,msec.,,,, -ssd-resnet34-1200,OV-2023.2,xeon,"Intel® Xeon® Gold 5218T ",17.64,,4.57,0.006,0.084,3144,210,2,1572,105,115.69,FPS,FPS/$,FPS/TDP,msec.,,,, -ssd-resnet34-1200,OV-2023.2,xeon,"Intel® Xeon® Platinum 8270 ",57.78,,14.8,0.003,0.141,16954,410,2,8477,205,36.97,FPS,FPS/$,FPS/TDP,msec.,,,, -ssd-resnet34-1200,OV-2023.2,xeon,"Intel® Xeon® Platinum 8380 ",78.79,,20.72,0.004,0.146,18718,540,2,9359,270,108.29,FPS,FPS/$,FPS/TDP,msec.,,,, -ssd-resnet34-1200,OV-2023.2,xeon,"Intel® Xeon® Platinum 8490H ",447.58,,31.29,0.013,0.639,34000,700,2,17000,350,8.52,FPS,FPS/$,FPS/TDP,msec.,,,, -ssd-resnet34-1200,OV-2023.2,xeon,"Intel® Xeon® Silver 4216R ",16.78,,4.35,0.008,0.084,2022,200,2,1011,100,121.64,FPS,FPS/$,FPS/TDP,msec.,,,, -ssd-resnet34-1200,OV-2023.2,xeon,"Intel® Xeon® Silver 4316 ",42.36,,10.47,0.019,0.141,2274,300,2,1137,150,62,FPS,FPS/$,FPS/TDP,msec.,,,, -ssd-resnet34-1200,OV-2023.2,accel,"Intel® Data Center GPU Flex 170 ",212,109.86,,0.110,1.413,1925,150,1,1925,150,75.46,FPS,FPS/$,FPS/TDP,msec.,,,, -ssd-resnet34-1200,OV-2023.2,accel,"Intel® Arc®A-Series Graphics ",147.33,81.3,,0.459,0.982,321,150,1,321,150,107.9,FPS,FPS/$,FPS/TDP,msec.,,,, -ssd-resnet34-1200,OV-2023.2,atom,Intel® Celeron® 6305E CPU,0.89,,0.23,0.008,0.059,107,15,1,107,15,1121.85,FPS,FPS/$,FPS/TDP,msec.,,,, -ssd-resnet34-1200,OV-2023.2,atom-iGPU,Intel® Atom® X6425E iGPU,1.18,1.18,0.6,0.018,0.098,67,12,1,67,12,3388.59,FPS,FPS/$,FPS/TDP,msec.,,,, -ssd-resnet34-1200,OV-2023.2,core-iGPU,Intel® Core™ i7-1185G7 iGPU,9.69,5.42,2.82,0.023,0.346,426,28,1,426,28,422.17,FPS,FPS/$,FPS/TDP,msec.,,,, -ssd-resnet34-1200,OV-2023.2,core-iGPU,Intel® Core™ i7-1185GRE iGPU,8.81,4.73,2.22,0.018,0.315,490,28,1,490,28,454.51,FPS,FPS/$,FPS/TDP,msec.,,,, -ssd-resnet34-1200,OV-2023.2,core-iGPU,Intel® Core™ i7-12700H iGPU,10.57,6.15,3.31,0.021,0.092,502,115,1,502,115,378.05,FPS,FPS/$,FPS/TDP,msec.,,,, -ssd-resnet34-1200,OV-2023.2,atom-iGPU,Intel® Processor N200 iGPU,0.29,0.16,,0.001,0.048,193,6,1,193,6,13815.91,FPS,FPS/$,FPS/TDP,msec.,,,, -ssd-resnet34-1200,OV-2023.2,atom-iGPU,Intel® Celeron® 6305E iGPU,5.07,2.64,1.41,0.047,0.338,107,15,1,107,15,774.3,FPS,FPS/$,FPS/TDP,msec.,,,, -ssd-resnet34-1200,OV-2023.2,atom-CPU+iGPU,Intel® Atom® X6425E CPU+iGPU,0.33,,0.13,0.005,0.028,67,12,1,67,12,,FPS,FPS/$,FPS/TDP,msec.,,,, -ssd-resnet34-1200,OV-2023.2,core-CPU+iGPU,Intel® Core™ i7-1185G7 CPU+iGPU,3.91,,1,0.009,0.140,426,28,1,426,28,,FPS,FPS/$,FPS/TDP,msec.,,,, -ssd-resnet34-1200,OV-2023.2,core-CPU+iGPU,Intel® Core™ i7-12700H CPU+iGPU,7.22,,2.11,0.014,0.063,502,115,1,502,115,,FPS,FPS/$,FPS/TDP,msec.,,,, -ssd-resnet34-1200,OV-2023.2,atom-CPU+iGPU,Intel® Processor N200 CPU+iGPU,0.11,,0.05,0.001,0.019,193,6,1,193,6,,FPS,FPS/$,FPS/TDP,msec.,,,, -ssd-resnet34-1200,OV-2023.2,atom-CPU+iGPU,Intel® Celeron® 6305E CPU+iGPU,0.89,,0.23,0.008,0.059,107,15,1,107,15,,FPS,FPS/$,FPS/TDP,msec.,,,, -end_rec,,,,,,,,,,,,,,,,,,,,,, -begin_rec," "," "," "," "," "," "," "," "," "," "," "," "," "," ",FPS,FPS/$,FPS/TDP,msec.,,,, -ssd_mobilenet_v1_coco,OV-2023.2,atom,Intel® Atom® X6425E CPU,45.25,,21.49,0.675,3.771,67,12,1,67,12,23.03,FPS,FPS/$,FPS/TDP,msec.,,,, -ssd_mobilenet_v1_coco,OV-2023.2,core,"Intel® Core™ i3-8100 ",211.26,,122.9,1.806,3.250,117,65,1,117,65,4.94,FPS,FPS/$,FPS/TDP,msec.,,,, -ssd_mobilenet_v1_coco,OV-2023.2,core,"Intel® Core™ i5-10500TE ",328.11,,171.73,1.533,5.048,214,65,1,214,65,3.6,FPS,FPS/$,FPS/TDP,msec.,,,, -ssd_mobilenet_v1_coco,OV-2023.2,core,"Intel® Core™ i5-13600K ",958.88,,352.8,2.915,7.671,329,125,1,329,125,2.39,FPS,FPS/$,FPS/TDP,msec.,,,, -ssd_mobilenet_v1_coco,OV-2023.2,core,Intel® Core™ i7-1185G7 CPU,516.83,,149.6,1.213,18.458,426,28,1,426,28,1.95,FPS,FPS/$,FPS/TDP,msec.,,,, -ssd_mobilenet_v1_coco,OV-2023.2,core,Intel® Core™ i7-1185GRE CPU,387.14,,100.71,0.790,13.827,490,28,1,490,28,2.82,FPS,FPS/$,FPS/TDP,msec.,,,, -ssd_mobilenet_v1_coco,OV-2023.2,core,Intel® Core™ i7-12700H CPU,851.54,,313.45,1.696,7.405,502,115,1,502,115,2.26,FPS,FPS/$,FPS/TDP,msec.,,,, -ssd_mobilenet_v1_coco,OV-2023.2,core,"Intel® Core™ i7-8700T ",276.74,,157.91,0.913,7.907,303,35,1,303,35,4.32,FPS,FPS/$,FPS/TDP,msec.,,,, -ssd_mobilenet_v1_coco,OV-2023.2,core,"Intel® Core™ i9-10900TE ",364.53,,192.13,0.747,10.415,488,35,1,488,35,3.37,FPS,FPS/$,FPS/TDP,msec.,,,, -ssd_mobilenet_v1_coco,OV-2023.2,core,"Intel® Core™ i9-12900TE ",524.73,,184.04,0.965,14.992,544,35,1,544,35,3.11,FPS,FPS/$,FPS/TDP,msec.,,,, -ssd_mobilenet_v1_coco,OV-2023.2,core,"Intel® Core™ i9-13900K ",1448.44,,577.78,2.418,11.587,599,125,1,599,125,2.07,FPS,FPS/$,FPS/TDP,msec.,,,, -ssd_mobilenet_v1_coco,OV-2023.2,atom,Intel® Processor N200 CPU,14.48,,7.94,0.075,2.413,193,6,1,193,6,72.01,FPS,FPS/$,FPS/TDP,msec.,,,, -ssd_mobilenet_v1_coco,OV-2023.2,xeon,"Intel® Xeon® W1290P ",575.79,,221.76,0.969,4.606,594,125,1,594,125,2.37,FPS,FPS/$,FPS/TDP,msec.,,,, -ssd_mobilenet_v1_coco,OV-2023.2,xeon,"Intel® Xeon® E-2124G ",202.62,,125.71,0.814,2.854,249,71,1,249,71,5.11,FPS,FPS/$,FPS/TDP,msec.,,,, -ssd_mobilenet_v1_coco,OV-2023.2,xeon,"Intel® Xeon® Gold 5218T ",2056.28,,640.87,0.654,9.792,3144,210,2,1572,105,1.56,FPS,FPS/$,FPS/TDP,msec.,,,, -ssd_mobilenet_v1_coco,OV-2023.2,xeon,"Intel® Xeon® Platinum 8270 ",5764.35,,1656.74,0.340,14.059,16954,410,2,8477,205,1.1,FPS,FPS/$,FPS/TDP,msec.,,,, -ssd_mobilenet_v1_coco,OV-2023.2,xeon,"Intel® Xeon® Platinum 8380 ",10274.61,,2320.94,0.549,19.027,18718,540,2,9359,270,0.66,FPS,FPS/$,FPS/TDP,msec.,,,, -ssd_mobilenet_v1_coco,OV-2023.2,xeon,"Intel® Xeon® Platinum 8490H ",22310.17,,3557.58,0.656,31.872,34000,700,2,17000,350,0.82,FPS,FPS/$,FPS/TDP,msec.,,,, -ssd_mobilenet_v1_coco,OV-2023.2,xeon,"Intel® Xeon® Silver 4216R ",1961.85,,610.96,0.970,9.809,2022,200,2,1011,100,1.63,FPS,FPS/$,FPS/TDP,msec.,,,, -ssd_mobilenet_v1_coco,OV-2023.2,xeon,"Intel® Xeon® Silver 4316 ",4825.79,,1246.04,2.122,16.086,2274,300,2,1137,150,0.81,FPS,FPS/$,FPS/TDP,msec.,,,, -ssd_mobilenet_v1_coco,OV-2023.2,accel,"Intel® Data Center GPU Flex 170 ",4044.15,3428.72,,2.101,26.961,1925,150,1,1925,150,3.93,FPS,FPS/$,FPS/TDP,msec.,,,, -ssd_mobilenet_v1_coco,OV-2023.2,accel,"Intel® Arc®A-Series Graphics ",2984.21,2546.5,,9.297,19.895,321,150,1,321,150,5.28,FPS,FPS/$,FPS/TDP,msec.,,,, -ssd_mobilenet_v1_coco,OV-2023.2,atom,Intel® Celeron® 6305E CPU,107.12,,36.58,1.001,7.142,107,15,1,107,15,9.17,FPS,FPS/$,FPS/TDP,msec.,,,, -ssd_mobilenet_v1_coco,OV-2023.2,atom-iGPU,Intel® Atom® X6425E iGPU,92.52,95.67,51.13,1.381,7.710,67,12,1,67,12,42.26,FPS,FPS/$,FPS/TDP,msec.,,,, -ssd_mobilenet_v1_coco,OV-2023.2,core-iGPU,Intel® Core™ i7-1185G7 iGPU,651.76,382.05,253.7,1.530,23.277,426,28,1,426,28,6.02,FPS,FPS/$,FPS/TDP,msec.,,,, -ssd_mobilenet_v1_coco,OV-2023.2,core-iGPU,Intel® Core™ i7-1185GRE iGPU,524.22,312.45,186.78,1.070,18.722,490,28,1,490,28,7.46,FPS,FPS/$,FPS/TDP,msec.,,,, -ssd_mobilenet_v1_coco,OV-2023.2,core-iGPU,Intel® Core™ i7-12700H iGPU,773.55,416.41,274.89,1.541,6.727,502,115,1,502,115,4.96,FPS,FPS/$,FPS/TDP,msec.,,,, -ssd_mobilenet_v1_coco,OV-2023.2,atom-iGPU,Intel® Processor N200 iGPU,29.11,15.38,9.5,0.151,4.852,193,6,1,193,6,136.41,FPS,FPS/$,FPS/TDP,msec.,,,, -ssd_mobilenet_v1_coco,OV-2023.2,atom-iGPU,Intel® Celeron® 6305E iGPU,411.09,221.78,136.65,3.842,27.406,107,15,1,107,15,9.59,FPS,FPS/$,FPS/TDP,msec.,,,, -ssd_mobilenet_v1_coco,OV-2023.2,atom-CPU+iGPU,Intel® Atom® X6425E CPU+iGPU,108.74,,57.49,1.623,9.061,67,12,1,67,12,,FPS,FPS/$,FPS/TDP,msec.,,,, -ssd_mobilenet_v1_coco,OV-2023.2,core-CPU+iGPU,Intel® Core™ i7-1185G7 CPU+iGPU,681.22,,234.33,1.599,24.329,426,28,1,426,28,,FPS,FPS/$,FPS/TDP,msec.,,,, -ssd_mobilenet_v1_coco,OV-2023.2,core-CPU+iGPU,Intel® Core™ i7-12700H CPU+iGPU,846.65,,312.78,1.687,7.362,502,115,1,502,115,,FPS,FPS/$,FPS/TDP,msec.,,,, -ssd_mobilenet_v1_coco,OV-2023.2,atom-CPU+iGPU,Intel® Processor N200 CPU+iGPU,35.06,,14.07,0.182,5.843,193,6,1,193,6,,FPS,FPS/$,FPS/TDP,msec.,,,, -ssd_mobilenet_v1_coco,OV-2023.2,atom-CPU+iGPU,Intel® Celeron® 6305E CPU+iGPU,299.91,,136.25,2.803,19.994,107,15,1,107,15,,FPS,FPS/$,FPS/TDP,msec.,,,, -end_rec,,,,,,,,,,,,,,,,,,,,,, -begin_rec," "," "," "," "," "," "," "," "," "," "," "," "," "," ",FPS,FPS/$,FPS/TDP,msec.,,,, -unet-camvid-onnx-0001,OV-2023.2,atom,Intel® Atom® X6425E CPU,0.48,,0.06,0.007,0.040,67,12,1,67,12,2086.28,FPS,FPS/$,FPS/TDP,msec.,,,, -unet-camvid-onnx-0001,OV-2023.2,core,"Intel® Core™ i3-8100 ",2.42,,1.55,0.021,0.037,117,65,1,117,65,426.14,FPS,FPS/$,FPS/TDP,msec.,,,, -unet-camvid-onnx-0001,OV-2023.2,core,"Intel® Core™ i5-10500TE ",3.6,,2.28,0.017,0.055,214,65,1,214,65,324.72,FPS,FPS/$,FPS/TDP,msec.,,,, -unet-camvid-onnx-0001,OV-2023.2,core,"Intel® Core™ i5-13600K ",11.52,,3.96,0.035,0.092,329,125,1,329,125,121.88,FPS,FPS/$,FPS/TDP,msec.,,,, -unet-camvid-onnx-0001,OV-2023.2,core,Intel® Core™ i7-1185G7 CPU,6.54,,1.63,0.015,0.234,426,28,1,426,28,168.96,FPS,FPS/$,FPS/TDP,msec.,,,, -unet-camvid-onnx-0001,OV-2023.2,core,Intel® Core™ i7-1185GRE CPU,4.87,,1.22,0.010,0.174,490,28,1,490,28,209.5,FPS,FPS/$,FPS/TDP,msec.,,,, -unet-camvid-onnx-0001,OV-2023.2,core,Intel® Core™ i7-12700H CPU,10.23,,3.55,0.020,0.089,502,115,1,502,115,123.74,FPS,FPS/$,FPS/TDP,msec.,,,, -unet-camvid-onnx-0001,OV-2023.2,core,"Intel® Core™ i7-8700T ",3.02,,1.86,0.010,0.086,303,35,1,303,35,385.98,FPS,FPS/$,FPS/TDP,msec.,,,, -unet-camvid-onnx-0001,OV-2023.2,core,"Intel® Core™ i9-10900TE ",3.86,,2.4,0.008,0.110,488,35,1,488,35,286.48,FPS,FPS/$,FPS/TDP,msec.,,,, -unet-camvid-onnx-0001,OV-2023.2,core,"Intel® Core™ i9-12900TE ",6.29,,2.21,0.012,0.180,544,35,1,544,35,167.25,FPS,FPS/$,FPS/TDP,msec.,,,, -unet-camvid-onnx-0001,OV-2023.2,core,"Intel® Core™ i9-13900K ",17.97,,6.61,0.030,0.144,599,125,1,599,125,91.7,FPS,FPS/$,FPS/TDP,msec.,,,, -unet-camvid-onnx-0001,OV-2023.2,atom,Intel® Processor N200 CPU,0.17,,0.09,0.001,0.029,193,6,1,193,6,5851.61,FPS,FPS/$,FPS/TDP,msec.,,,, -unet-camvid-onnx-0001,OV-2023.2,xeon,"Intel® Xeon® W1290P ",6.17,,3.96,0.010,0.049,594,125,1,594,125,180.39,FPS,FPS/$,FPS/TDP,msec.,,,, -unet-camvid-onnx-0001,OV-2023.2,xeon,"Intel® Xeon® E-2124G ",2.31,,1.48,0.009,0.033,249,71,1,249,71,434.36,FPS,FPS/$,FPS/TDP,msec.,,,, -unet-camvid-onnx-0001,OV-2023.2,xeon,"Intel® Xeon® Gold 5218T ",29.19,,7.31,0.009,0.139,3144,210,2,1572,105,71.02,FPS,FPS/$,FPS/TDP,msec.,,,, -unet-camvid-onnx-0001,OV-2023.2,xeon,"Intel® Xeon® Platinum 8270 ",95.18,,21.6,0.006,0.232,16954,410,2,8477,205,23.81,FPS,FPS/$,FPS/TDP,msec.,,,, -unet-camvid-onnx-0001,OV-2023.2,xeon,"Intel® Xeon® Platinum 8380 ",129.12,,31.53,0.007,0.239,18718,540,2,9359,270,73.82,FPS,FPS/$,FPS/TDP,msec.,,,, -unet-camvid-onnx-0001,OV-2023.2,xeon,"Intel® Xeon® Platinum 8490H ",594.44,,48.37,0.017,0.849,34000,700,2,17000,350,8.51,FPS,FPS/$,FPS/TDP,msec.,,,, -unet-camvid-onnx-0001,OV-2023.2,xeon,"Intel® Xeon® Silver 4216R ",27.77,,6.96,0.014,0.139,2022,200,2,1011,100,74.6,FPS,FPS/$,FPS/TDP,msec.,,,, -unet-camvid-onnx-0001,OV-2023.2,xeon,"Intel® Xeon® Silver 4316 ",69.04,,15.92,0.030,0.230,2274,300,2,1137,150,43.95,FPS,FPS/$,FPS/TDP,msec.,,,, -unet-camvid-onnx-0001,OV-2023.2,accel,"Intel® Data Center GPU Flex 170 ",308.2,201.07,,0.160,2.055,1925,150,1,1925,150,51.9,FPS,FPS/$,FPS/TDP,msec.,,,, -unet-camvid-onnx-0001,OV-2023.2,accel,"Intel® Arc®A-Series Graphics ",264.35,182.28,,0.824,1.762,321,150,1,321,150,60.21,FPS,FPS/$,FPS/TDP,msec.,,,, -unet-camvid-onnx-0001,OV-2023.2,atom,Intel® Celeron® 6305E CPU,1.49,,0.38,0.014,0.099,107,15,1,107,15,675.13,FPS,FPS/$,FPS/TDP,msec.,,,, -unet-camvid-onnx-0001,OV-2023.2,atom-iGPU,Intel® Atom® X6425E iGPU,0.98,1.99,0.98,0.015,0.082,67,12,1,67,12,4060.12,FPS,FPS/$,FPS/TDP,msec.,,,, -unet-camvid-onnx-0001,OV-2023.2,core-iGPU,Intel® Core™ i7-1185G7 iGPU,17.25,8.84,4.82,0.040,0.616,426,28,1,426,28,227.85,FPS,FPS/$,FPS/TDP,msec.,,,, -unet-camvid-onnx-0001,OV-2023.2,core-iGPU,Intel® Core™ i7-1185GRE iGPU,15.51,7.82,4.16,0.032,0.554,490,28,1,490,28,257.82,FPS,FPS/$,FPS/TDP,msec.,,,, -unet-camvid-onnx-0001,OV-2023.2,core-iGPU,Intel® Core™ i7-12700H iGPU,18.55,9.77,5.39,0.037,0.161,502,115,1,502,115,215.12,FPS,FPS/$,FPS/TDP,msec.,,,, -unet-camvid-onnx-0001,OV-2023.2,atom-iGPU,Intel® Processor N200 iGPU,0.46,0.25,0.14,0.002,0.077,193,6,1,193,6,8685.49,FPS,FPS/$,FPS/TDP,msec.,,,, -unet-camvid-onnx-0001,OV-2023.2,atom-iGPU,Intel® Celeron® 6305E iGPU,8.41,4.38,2.38,0.079,0.560,107,15,1,107,15,475.59,FPS,FPS/$,FPS/TDP,msec.,,,, -unet-camvid-onnx-0001,OV-2023.2,atom-CPU+iGPU,Intel® Atom® X6425E CPU+iGPU,1.23,,0.79,0.018,0.102,67,12,1,67,12,,FPS,FPS/$,FPS/TDP,msec.,,,, -unet-camvid-onnx-0001,OV-2023.2,core-CPU+iGPU,Intel® Core™ i7-1185G7 CPU+iGPU,13.96,,3.78,0.033,0.499,426,28,1,426,28,,FPS,FPS/$,FPS/TDP,msec.,,,, -unet-camvid-onnx-0001,OV-2023.2,core-CPU+iGPU,Intel® Core™ i7-12700H CPU+iGPU,10.26,,3.52,0.020,0.089,502,115,1,502,115,,FPS,FPS/$,FPS/TDP,msec.,,,, -unet-camvid-onnx-0001,OV-2023.2,atom-CPU+iGPU,Intel® Processor N200 CPU+iGPU,0.57,,0.19,0.003,0.094,193,6,1,193,6,,FPS,FPS/$,FPS/TDP,msec.,,,, -unet-camvid-onnx-0001,OV-2023.2,atom-CPU+iGPU,Intel® Celeron® 6305E CPU+iGPU,8.94,,2.44,0.084,0.596,107,15,1,107,15,,FPS,FPS/$,FPS/TDP,msec.,,,, -end_rec,,,,,,,,,,,,,,,,,,,,,, -begin_rec," "," "," "," "," "," "," "," "," "," "," "," "," "," ",FPS,FPS/$,FPS/TDP,msec.,,,, -yolo_v3,OV-2023.2,atom,Intel® Atom® X6425E CPU,2.09,,0.88,0.031,0.174,67,12,1,67,12,484.41,FPS,FPS/$,FPS/TDP,msec.,,,, -yolo_v3,OV-2023.2,core,"Intel® Core™ i3-8100 ",10.63,,5.8,0.091,0.163,117,65,1,117,65,95.04,FPS,FPS/$,FPS/TDP,msec.,,,, -yolo_v3,OV-2023.2,core,"Intel® Core™ i5-10500TE ",15.37,,8.28,0.072,0.236,214,65,1,214,65,74.25,FPS,FPS/$,FPS/TDP,msec.,,,, -yolo_v3,OV-2023.2,core,"Intel® Core™ i5-13600K ",51.79,,15.62,0.157,0.414,329,125,1,329,125,29.87,FPS,FPS/$,FPS/TDP,msec.,,,, -yolo_v3,OV-2023.2,core,Intel® Core™ i7-1185G7 CPU,24.16,,6.61,0.057,0.863,426,28,1,426,28,46.04,FPS,FPS/$,FPS/TDP,msec.,,,, -yolo_v3,OV-2023.2,core,Intel® Core™ i7-1185GRE CPU,18.04,,4.84,0.037,0.644,490,28,1,490,28,57.2,FPS,FPS/$,FPS/TDP,msec.,,,, -yolo_v3,OV-2023.2,core,Intel® Core™ i7-12700H CPU,45.55,,13.34,0.091,0.396,502,115,1,502,115,29.39,FPS,FPS/$,FPS/TDP,msec.,,,, -yolo_v3,OV-2023.2,core,"Intel® Core™ i7-8700T ",12.71,,6.82,0.042,0.363,303,35,1,303,35,87.06,FPS,FPS/$,FPS/TDP,msec.,,,, -yolo_v3,OV-2023.2,core,"Intel® Core™ i9-10900TE ",16.64,,8.64,0.034,0.475,488,35,1,488,35,67.32,FPS,FPS/$,FPS/TDP,msec.,,,, -yolo_v3,OV-2023.2,core,"Intel® Core™ i9-12900TE ",27.33,,8.16,0.050,0.781,544,35,1,544,35,41.73,FPS,FPS/$,FPS/TDP,msec.,,,, -yolo_v3,OV-2023.2,core,"Intel® Core™ i9-13900K ",78.06,,25.64,0.130,0.624,599,125,1,599,125,23.25,FPS,FPS/$,FPS/TDP,msec.,,,, -yolo_v3,OV-2023.2,atom,Intel® Processor N200 CPU,0.7,,0.34,0.004,0.117,193,6,1,193,6,1470.7,FPS,FPS/$,FPS/TDP,msec.,,,, -yolo_v3,OV-2023.2,xeon,"Intel® Xeon® W1290P ",27.37,,14.08,0.046,0.219,594,125,1,594,125,40.66,FPS,FPS/$,FPS/TDP,msec.,,,, -yolo_v3,OV-2023.2,xeon,"Intel® Xeon® E-2124G ",10.06,,5.64,0.040,0.142,249,71,1,249,71,100.33,FPS,FPS/$,FPS/TDP,msec.,,,, -yolo_v3,OV-2023.2,xeon,"Intel® Xeon® Gold 5218T ",106.36,,29.72,0.034,0.506,3144,210,2,1572,105,21.82,FPS,FPS/$,FPS/TDP,msec.,,,, -yolo_v3,OV-2023.2,xeon,"Intel® Xeon® Platinum 8270 ",313.83,,87.89,0.019,0.765,16954,410,2,8477,205,10.5,FPS,FPS/$,FPS/TDP,msec.,,,, -yolo_v3,OV-2023.2,xeon,"Intel® Xeon® Platinum 8380 ",490.61,,109.01,0.026,0.909,18718,540,2,9359,270,,FPS,FPS/$,FPS/TDP,msec.,,,, -yolo_v3,OV-2023.2,xeon,"Intel® Xeon® Platinum 8490H ",2125.85,,193.93,0.063,3.037,34000,700,2,17000,350,3.31,FPS,FPS/$,FPS/TDP,msec.,,,, -yolo_v3,OV-2023.2,xeon,"Intel® Xeon® Silver 4216R ",101.13,,28.36,0.050,0.506,2022,200,2,1011,100,22.77,FPS,FPS/$,FPS/TDP,msec.,,,, -yolo_v3,OV-2023.2,xeon,"Intel® Xeon® Silver 4316 ",242.25,,62.31,0.107,0.808,2274,300,2,1137,150,13.97,FPS,FPS/$,FPS/TDP,msec.,,,, -yolo_v3,OV-2023.2,accel,"Intel® Data Center GPU Flex 170 ",784.51,385.29,,0.408,5.230,1925,150,1,1925,150,20.34,FPS,FPS/$,FPS/TDP,msec.,,,, -yolo_v3,OV-2023.2,accel,"Intel® Arc®A-Series Graphics ",582.91,341.6,,1.816,3.886,321,150,1,321,150,27.27,FPS,FPS/$,FPS/TDP,msec.,,,, -yolo_v3,OV-2023.2,atom,Intel® Celeron® 6305E CPU,5.45,,1.54,0.051,0.363,107,15,1,107,15,184.42,FPS,FPS/$,FPS/TDP,msec.,,,, -yolo_v3,OV-2023.2,atom-iGPU,Intel® Atom® X6425E iGPU,6.74,6.85,3.38,0.101,0.562,67,12,1,67,12,591.73,FPS,FPS/$,FPS/TDP,msec.,,,, -yolo_v3,OV-2023.2,core-iGPU,Intel® Core™ i7-1185G7 iGPU,63.57,29.68,16.2,0.149,2.270,426,28,1,426,28,63.78,FPS,FPS/$,FPS/TDP,msec.,,,, -yolo_v3,OV-2023.2,core-iGPU,Intel® Core™ i7-1185GRE iGPU,57.51,26.04,13.46,0.117,2.054,490,28,1,490,28,69.04,FPS,FPS/$,FPS/TDP,msec.,,,, -yolo_v3,OV-2023.2,core-iGPU,Intel® Core™ i7-12700H iGPU,70.17,33.53,18.68,0.140,0.610,502,115,1,502,115,56.66,FPS,FPS/$,FPS/TDP,msec.,,,, -yolo_v3,OV-2023.2,atom-iGPU,Intel® Processor N200 iGPU,1.76,0.94,0.5,0.009,0.293,193,6,1,193,6,2270.28,FPS,FPS/$,FPS/TDP,msec.,,,, -yolo_v3,OV-2023.2,atom-iGPU,Intel® Celeron® 6305E iGPU,32.22,15.09,8.06,0.301,2.148,107,15,1,107,15,123.79,FPS,FPS/$,FPS/TDP,msec.,,,, -yolo_v3,OV-2023.2,atom-CPU+iGPU,Intel® Atom® X6425E CPU+iGPU,7.72,,3.75,0.115,0.643,67,12,1,67,12,,FPS,FPS/$,FPS/TDP,msec.,,,, -yolo_v3,OV-2023.2,core-CPU+iGPU,Intel® Core™ i7-1185G7 CPU+iGPU,51.27,,13.81,0.120,1.831,426,28,1,426,28,,FPS,FPS/$,FPS/TDP,msec.,,,, -yolo_v3,OV-2023.2,core-CPU+iGPU,Intel® Core™ i7-12700H CPU+iGPU,45.37,,13.46,0.090,0.395,502,115,1,502,115,,FPS,FPS/$,FPS/TDP,msec.,,,, -yolo_v3,OV-2023.2,atom-CPU+iGPU,Intel® Processor N200 CPU+iGPU,2.17,,0.7,0.011,0.361,193,6,1,193,6,,FPS,FPS/$,FPS/TDP,msec.,,,, -yolo_v3,OV-2023.2,atom-CPU+iGPU,Intel® Celeron® 6305E CPU+iGPU,33.62,,8.52,0.314,2.242,107,15,1,107,15,,FPS,FPS/$,FPS/TDP,msec.,,,, -end_rec,,,,,,,,,,,,,,,,,,,,,, -begin_rec," "," "," "," "," "," "," "," "," "," "," "," "," "," ",FPS,FPS/$,FPS/TDP,msec.,,,, -yolo_v3_tiny,OV-2023.2,atom,Intel® Atom® X6425E CPU,22.9,,10.3,0.342,1.908,67,12,1,67,12,44.81,FPS,FPS/$,FPS/TDP,msec.,,,, -yolo_v3_tiny,OV-2023.2,core,"Intel® Core™ i3-8100 ",111.7,,63.53,0.955,1.718,117,65,1,117,65,9.05,FPS,FPS/$,FPS/TDP,msec.,,,, -yolo_v3_tiny,OV-2023.2,core,"Intel® Core™ i5-10500TE ",167.36,,91.55,0.782,2.575,214,65,1,214,65,6.74,FPS,FPS/$,FPS/TDP,msec.,,,, -yolo_v3_tiny,OV-2023.2,core,"Intel® Core™ i5-13600K ",600,,195.96,1.824,4.800,329,125,1,329,125,3.05,FPS,FPS/$,FPS/TDP,msec.,,,, -yolo_v3_tiny,OV-2023.2,core,Intel® Core™ i7-1185G7 CPU,252.28,,77.33,0.592,9.010,426,28,1,426,28,4.56,FPS,FPS/$,FPS/TDP,msec.,,,, -yolo_v3_tiny,OV-2023.2,core,Intel® Core™ i7-1185GRE CPU,186.61,,55.02,0.381,6.665,490,28,1,490,28,5.72,FPS,FPS/$,FPS/TDP,msec.,,,, -yolo_v3_tiny,OV-2023.2,core,Intel® Core™ i7-12700H CPU,501.3,,153.33,0.999,4.359,502,115,1,502,115,3.07,FPS,FPS/$,FPS/TDP,msec.,,,, -yolo_v3_tiny,OV-2023.2,core,"Intel® Core™ i7-8700T ",137.83,,76.63,0.455,3.938,303,35,1,303,35,8.19,FPS,FPS/$,FPS/TDP,msec.,,,, -yolo_v3_tiny,OV-2023.2,core,"Intel® Core™ i9-10900TE ",184.15,,95.48,0.377,5.261,488,35,1,488,35,6.36,FPS,FPS/$,FPS/TDP,msec.,,,, -yolo_v3_tiny,OV-2023.2,core,"Intel® Core™ i9-12900TE ",293.87,,93.77,0.540,8.396,544,35,1,544,35,4.16,FPS,FPS/$,FPS/TDP,msec.,,,, -yolo_v3_tiny,OV-2023.2,core,"Intel® Core™ i9-13900K ",859.57,,285.93,1.435,6.877,599,125,1,599,125,2.43,FPS,FPS/$,FPS/TDP,msec.,,,, -yolo_v3_tiny,OV-2023.2,atom,Intel® Processor N200 CPU,7.83,,4.08,0.041,1.306,193,6,1,193,6,136.7,FPS,FPS/$,FPS/TDP,msec.,,,, -yolo_v3_tiny,OV-2023.2,xeon,"Intel® Xeon® W1290P ",298.34,,148.85,0.502,2.387,594,125,1,594,125,4.01,FPS,FPS/$,FPS/TDP,msec.,,,, -yolo_v3_tiny,OV-2023.2,xeon,"Intel® Xeon® E-2124G ",106.15,,62.71,0.426,1.495,249,71,1,249,71,9.46,FPS,FPS/$,FPS/TDP,msec.,,,, -yolo_v3_tiny,OV-2023.2,xeon,"Intel® Xeon® Gold 5218T ",1051.35,,338.78,0.334,5.006,3144,210,2,1572,105,2.52,FPS,FPS/$,FPS/TDP,msec.,,,, -yolo_v3_tiny,OV-2023.2,xeon,"Intel® Xeon® Platinum 8270 ",2835.63,,919.26,0.167,6.916,16954,410,2,8477,205,1.22,FPS,FPS/$,FPS/TDP,msec.,,,, -yolo_v3_tiny,OV-2023.2,xeon,"Intel® Xeon® Platinum 8380 ",4653.13,,1373.23,0.249,8.617,18718,540,2,9359,270,0.87,FPS,FPS/$,FPS/TDP,msec.,,,, -yolo_v3_tiny,OV-2023.2,xeon,"Intel® Xeon® Platinum 8490H ",13069.92,,2139.01,0.384,18.671,34000,700,2,17000,350,1.07,FPS,FPS/$,FPS/TDP,msec.,,,, -yolo_v3_tiny,OV-2023.2,xeon,"Intel® Xeon® Silver 4216R ",1009.33,,322.76,0.499,5.047,2022,200,2,1011,100,2.62,FPS,FPS/$,FPS/TDP,msec.,,,, -yolo_v3_tiny,OV-2023.2,xeon,"Intel® Xeon® Silver 4316 ",2219.25,,701.36,0.976,7.397,2274,300,2,1137,150,1.33,FPS,FPS/$,FPS/TDP,msec.,,,, -yolo_v3_tiny,OV-2023.2,accel,"Intel® Data Center GPU Flex 170 ",3774.19,2809.6,,1.961,25.161,1925,150,1,1925,150,4.2,FPS,FPS/$,FPS/TDP,msec.,,,, -yolo_v3_tiny,OV-2023.2,accel,"Intel® Arc®A-Series Graphics ",2481.58,2188.3,,7.731,16.544,321,150,1,321,150,6.38,FPS,FPS/$,FPS/TDP,msec.,,,, -yolo_v3_tiny,OV-2023.2,atom,Intel® Celeron® 6305E CPU,54.03,,17.97,0.505,3.602,107,15,1,107,15,18.27,FPS,FPS/$,FPS/TDP,msec.,,,, -yolo_v3_tiny,OV-2023.2,atom-iGPU,Intel® Atom® X6425E iGPU,65.71,66.39,33.87,0.981,5.476,67,12,1,67,12,60.33,FPS,FPS/$,FPS/TDP,msec.,,,, -yolo_v3_tiny,OV-2023.2,core-iGPU,Intel® Core™ i7-1185G7 iGPU,546.82,290.91,170.54,1.284,19.529,426,28,1,426,28,7.02,FPS,FPS/$,FPS/TDP,msec.,,,, -yolo_v3_tiny,OV-2023.2,core-iGPU,Intel® Core™ i7-1185GRE iGPU,494.07,258.4,135.87,1.008,17.645,490,28,1,490,28,7.98,FPS,FPS/$,FPS/TDP,msec.,,,, -yolo_v3_tiny,OV-2023.2,core-iGPU,Intel® Core™ i7-12700H iGPU,614.04,322.38,201.06,1.223,5.339,502,115,1,502,115,6.27,FPS,FPS/$,FPS/TDP,msec.,,,, -yolo_v3_tiny,OV-2023.2,atom-iGPU,Intel® Processor N200 iGPU,18.67,9.83,5.51,0.097,3.112,193,6,1,193,6,213.14,FPS,FPS/$,FPS/TDP,msec.,,,, -yolo_v3_tiny,OV-2023.2,atom-iGPU,Intel® Celeron® 6305E iGPU,292.06,153.11,86.79,2.730,19.471,107,15,1,107,15,13.59,FPS,FPS/$,FPS/TDP,msec.,,,, -yolo_v3_tiny,OV-2023.2,atom-CPU+iGPU,Intel® Atom® X6425E CPU+iGPU,28.49,,39.7,0.425,2.374,67,12,1,67,12,,FPS,FPS/$,FPS/TDP,msec.,,,, -yolo_v3_tiny,OV-2023.2,core-CPU+iGPU,Intel® Core™ i7-1185G7 CPU+iGPU,485.88,,147.22,1.141,17.353,426,28,1,426,28,,FPS,FPS/$,FPS/TDP,msec.,,,, -yolo_v3_tiny,OV-2023.2,core-CPU+iGPU,Intel® Core™ i7-12700H CPU+iGPU,504.34,,154.35,1.005,4.386,502,115,1,502,115,,FPS,FPS/$,FPS/TDP,msec.,,,, -yolo_v3_tiny,OV-2023.2,atom-CPU+iGPU,Intel® Processor N200 CPU+iGPU,23.01,,8.08,0.119,3.835,193,6,1,193,6,,FPS,FPS/$,FPS/TDP,msec.,,,, -yolo_v3_tiny,OV-2023.2,atom-CPU+iGPU,Intel® Celeron® 6305E CPU+iGPU,322.03,,92.97,3.010,21.469,107,15,1,107,15,,FPS,FPS/$,FPS/TDP,msec.,,,, -end_rec,,,,,,,,,,,,,,,,,,,,,, -begin_rec," "," "," "," "," "," "," "," "," "," "," "," "," "," ",FPS,FPS/$,FPS/TDP,msec.,,,, -yolo_v8n,OV-2023.2,atom,Intel® Atom® X6425E CPU,10.23,,5.1,0.153,0.853,67,12,1,67,12,101.71,FPS,FPS/$,FPS/TDP,msec.,,,, -yolo_v8n,OV-2023.2,core,"Intel® Core™ i3-8100 ",53.43,,33.01,0.457,0.822,117,65,1,117,65,19.24,FPS,FPS/$,FPS/TDP,msec.,,,, -yolo_v8n,OV-2023.2,core,"Intel® Core™ i5-10500TE ",81.28,,46.84,0.380,1.251,214,65,1,214,65,13.7,FPS,FPS/$,FPS/TDP,msec.,,,, -yolo_v8n,OV-2023.2,core,"Intel® Core™ i5-13600K ",249.13,,95.35,0.757,1.993,329,125,1,329,125,6.67,FPS,FPS/$,FPS/TDP,msec.,,,, -yolo_v8n,OV-2023.2,core,Intel® Core™ i7-1185G7 CPU,110.57,,40.76,0.260,3.949,426,28,1,426,28,10.77,FPS,FPS/$,FPS/TDP,msec.,,,, -yolo_v8n,OV-2023.2,core,Intel® Core™ i7-1185GRE CPU,77.4,,27.48,0.158,2.764,490,28,1,490,28,13.63,FPS,FPS/$,FPS/TDP,msec.,,,, -yolo_v8n,OV-2023.2,core,Intel® Core™ i7-12700H CPU,213.22,,81.23,0.425,1.854,502,115,1,502,115,6.64,FPS,FPS/$,FPS/TDP,msec.,,,, -yolo_v8n,OV-2023.2,core,"Intel® Core™ i7-8700T ",71.39,,42.39,0.236,2.040,303,35,1,303,35,16.54,FPS,FPS/$,FPS/TDP,msec.,,,, -yolo_v8n,OV-2023.2,core,"Intel® Core™ i9-10900TE ",92.64,,52.82,0.190,2.647,488,35,1,488,35,12.63,FPS,FPS/$,FPS/TDP,msec.,,,, -yolo_v8n,OV-2023.2,core,"Intel® Core™ i9-12900TE ",132.43,,50.68,0.243,3.784,544,35,1,544,35,9.16,FPS,FPS/$,FPS/TDP,msec.,,,, -yolo_v8n,OV-2023.2,core,"Intel® Core™ i9-13900K ",377.83,,153.02,0.631,3.023,599,125,1,599,125,5.31,FPS,FPS/$,FPS/TDP,msec.,,,, -yolo_v8n,OV-2023.2,atom,Intel® Processor N200 CPU,3.26,,1.94,0.017,0.543,193,6,1,193,6,316.73,FPS,FPS/$,FPS/TDP,msec.,,,, -yolo_v8n,OV-2023.2,xeon,"Intel® Xeon® W1290P ",135.15,,72.61,0.228,1.081,594,125,1,594,125,9.22,FPS,FPS/$,FPS/TDP,msec.,,,, -yolo_v8n,OV-2023.2,xeon,"Intel® Xeon® E-2124G ",52.15,,32.9,0.209,0.735,249,71,1,249,71,19.49,FPS,FPS/$,FPS/TDP,msec.,,,, -yolo_v8n,OV-2023.2,xeon,"Intel® Xeon® Gold 5218T ",450.82,,174.94,0.143,2.147,3144,210,2,1572,105,5.96,FPS,FPS/$,FPS/TDP,msec.,,,, -yolo_v8n,OV-2023.2,xeon,"Intel® Xeon® Platinum 8270 ",998.4,,454.7,0.059,2.435,16954,410,2,8477,205,3.53,FPS,FPS/$,FPS/TDP,msec.,,,, -yolo_v8n,OV-2023.2,xeon,"Intel® Xeon® Platinum 8380 ",1714.12,,554.58,0.092,3.174,18718,540,2,9359,270,2.38,FPS,FPS/$,FPS/TDP,msec.,,,, -yolo_v8n,OV-2023.2,xeon,"Intel® Xeon® Platinum 8490H ",2889.04,,998.41,0.085,4.127,34000,700,2,17000,350,3.61,FPS,FPS/$,FPS/TDP,msec.,,,, -yolo_v8n,OV-2023.2,xeon,"Intel® Xeon® Silver 4216R ",431.74,,165.69,0.214,2.159,2022,200,2,1011,100,6.18,FPS,FPS/$,FPS/TDP,msec.,,,, -yolo_v8n,OV-2023.2,xeon,"Intel® Xeon® Silver 4316 ",862.18,,340.38,0.379,2.874,2274,300,2,1137,150,3.32,FPS,FPS/$,FPS/TDP,msec.,,,, -yolo_v8n,OV-2023.2,accel,"Intel® Data Center GPU Flex 170 ",1539.24,1433.21,,0.800,10.262,1925,150,1,1925,150,10.28,FPS,FPS/$,FPS/TDP,msec.,,,, -yolo_v8n,OV-2023.2,accel,"Intel® Arc®A-Series Graphics ",1005.97,1032.49,,3.134,6.706,321,150,1,321,150,15.79,FPS,FPS/$,FPS/TDP,msec.,,,, -yolo_v8n,OV-2023.2,atom,Intel® Celeron® 6305E CPU,24.25,,9.56,0.227,1.617,107,15,1,107,15,40.75,FPS,FPS/$,FPS/TDP,msec.,,,, -yolo_v8n,OV-2023.2,atom-iGPU,Intel® Atom® X6425E iGPU,32.03,33.52,19.17,0.478,2.669,67,12,1,67,12,124.04,FPS,FPS/$,FPS/TDP,msec.,,,, -yolo_v8n,OV-2023.2,core-iGPU,Intel® Core™ i7-1185G7 iGPU,206.31,140.75,88.2,0.484,7.368,426,28,1,426,28,19.11,FPS,FPS/$,FPS/TDP,msec.,,,, -yolo_v8n,OV-2023.2,core-iGPU,Intel® Core™ i7-1185GRE iGPU,164.45,109.09,61.03,0.336,5.873,490,28,1,490,28,24.02,FPS,FPS/$,FPS/TDP,msec.,,,, -yolo_v8n,OV-2023.2,core-iGPU,Intel® Core™ i7-12700H iGPU,220.75,149.98,96.81,0.440,1.920,502,115,1,502,115,17.82,FPS,FPS/$,FPS/TDP,msec.,,,, -yolo_v8n,OV-2023.2,atom-iGPU,Intel® Processor N200 iGPU,8.37,5.57,3.25,0.043,1.394,193,6,1,193,6,477.17,FPS,FPS/$,FPS/TDP,msec.,,,, -yolo_v8n,OV-2023.2,atom-iGPU,Intel® Celeron® 6305E iGPU,35.14,,20.32,0.524,2.928,67,12,1,67,12,,FPS,FPS/$,FPS/TDP,msec.,,,, -yolo_v8n,OV-2023.2,atom-CPU+iGPU,Intel® Atom® X6425E CPU+iGPU,179.3,,74.12,0.421,6.403,426,28,1,426,28,,FPS,FPS/$,FPS/TDP,msec.,,,, -yolo_v8n,OV-2023.2,core-CPU+iGPU,Intel® Core™ i7-1185G7 CPU+iGPU,212.84,,82.48,0.424,1.851,502,115,1,502,115,,FPS,FPS/$,FPS/TDP,msec.,,,, -yolo_v8n,OV-2023.2,core-CPU+iGPU,Intel® Core™ i7-12700H CPU+iGPU,10.06,,4.44,0.052,1.676,193,6,1,193,6,,FPS,FPS/$,FPS/TDP,msec.,,,, -yolo_v8n,OV-2023.2,atom-CPU+iGPU,Intel® Processor N200 CPU+iGPU,113.96,,48.9,1.065,7.597,107,15,1,107,15,,FPS,FPS/$,FPS/TDP,msec.,,,, -end_rec,,atom-CPU+iGPU,Intel® Celeron® 6305E CPU+iGPU,,,,,,,,,,,,,,,,,,, -begin_rec,,,,,,,,,,,,,,,,,,,,,, -chatGLM2-6B,OV-2023.2,core,"Intel® Core™ i9-13900K ",277,,340,,,,,,,,,msec./token,FPS/$,FPS/TDP,msec./token,,,,374 -chatGLM2-6B,OV-2023.2,xeon,"Intel® Xeon® Platinum 8380 ",,173,,,,,,,,,,msec./token,FPS/$,FPS/TDP,msec./token,,,, -chatGLM2-6B,OV-2023.2,xeon,Intel® Xeon® Platinum 8490H,,114,,,,,,,,,,msec./token,FPS/$,FPS/TDP,msec./token,,,, -chatGLM2-6B,OV-2023.2,accel,"Intel® Data Center GPU Flex 170 ",,,,,,,,,,,,msec./token,FPS/$,FPS/TDP,msec./token,,,, -chatGLM2-6B,OV-2023.2,accel,"Intel® Arc®A-Series Graphics ",95,121,,,,,,,,,,msec./token,FPS/$,FPS/TDP,msec./token,,,, -end_rec,,,,,,,,,,,,,,,,,,,,,, -begin_rec,,,,,,,,,,,,,,,,,,,,,, -Llama-2-7b-chat,OV-2023.2,core,"Intel® Core™ i9-13900K ",415,,420,,,,,,,,,msec./token,FPS/$,FPS/TDP,msec./token,,,,417 -Llama-2-7b-chat,OV-2023.2,xeon,"Intel® Xeon® Platinum 8380 ",179,,201,,,,,,,,,msec./token,FPS/$,FPS/TDP,msec./token,,,,133 -Llama-2-7b-chat,OV-2023.2,xeon,Intel® Xeon® Platinum 8490H,143,,133,,,,,,,,,msec./token,FPS/$,FPS/TDP,msec./token,,,,136 -Llama-2-7b-chat,OV-2023.2,accel,"Intel® Data Center GPU Flex 170 ",111,95,,,,,,,,,,msec./token,FPS/$,FPS/TDP,msec./token,,,,126 -Llama-2-7b-chat,OV-2023.2,accel,"Intel® Arc®A-Series Graphics ",163,163,,,,,,,,,,msec./token,FPS/$,FPS/TDP,msec./token,,,,221 -end_rec,,,,,,,,,,,,,,,,,,,,,, -begin_rec,,,,,,,,,,,,,,,,,,,,,, -Stable-Diffusion-v2-1,OV-2023.2,accel,"Intel® Data Center GPU Flex 170 ",7.1,4.4,,,,,,,,,,sec.,FPS/$,FPS/TDP,sec.,,,, -end_rec,,,,,,,,,,,,,,,,,,,,,, \ No newline at end of file +Network model,Release,IE-Type,Platform name,Throughput-INT8,Throughput-FP16,Throughput-FP32,Value,Efficiency,Price,TDP,Sockets,Price/Socket,TDP/Socket,Latency,UOM_T,UOM_V,UOM_E,UOM_L,Latency_FP16,Latency_FP32,Latency_int4,Throughput_INT4,Latency_BF16,Throughput_BF16 +begin_rec," "," "," "," "," "," "," "," "," "," "," "," "," "," ",FPS,FPS/$,FPS/TDP,msec.,,,,,, +bert-base-cased,OV-2023.2,core,"Intel® Core™ i3-8100 ",21.27,34.33,45.1,0.182,0.327,117,65,1,117,65,48.62,FPS,FPS/$,FPS/TDP,msec.,,,,12,15,30 +bert-base-cased,OV-2023.2,core,"Intel® Core™ i5-10500TE ",32.04,,21.72,0.150,0.493,214,65,1,214,65,36.77,FPS,FPS/$,FPS/TDP,msec.,,,,,, +bert-base-cased,OV-2023.2,core,"Intel® Core™ i5-13600K ",112.78,,45.21,0.343,0.902,329,125,1,329,125,17.53,FPS,FPS/$,FPS/TDP,msec.,,,,,, +bert-base-cased,OV-2023.2,core,Intel® Core™ i7-1185G7 CPU,50.11,,18.36,0.118,1.790,426,28,1,426,28,23.39,FPS,FPS/$,FPS/TDP,msec.,,,,,, +bert-base-cased,OV-2023.2,core,Intel® Core™ i7-1185GRE CPU,38.17,,13.67,0.078,1.363,490,28,1,490,28,29.4,FPS,FPS/$,FPS/TDP,msec.,,,,,, +bert-base-cased,OV-2023.2,core,Intel® Core™ i7-12700H CPU,88.62,,35.37,0.177,0.771,502,115,1,502,115,17.1,FPS,FPS/$,FPS/TDP,msec.,,,,,, +bert-base-cased,OV-2023.2,core,"Intel® Core™ i7-8700T ",27.47,,18.34,0.091,0.785,303,35,1,303,35,43.1,FPS,FPS/$,FPS/TDP,msec.,,,,,,45 +bert-base-cased,OV-2023.2,core,"Intel® Core™ i9-10900TE ",33.58,35.58,21.38,0.069,0.960,488,35,1,488,35,37.7,FPS,FPS/$,FPS/TDP,msec.,,,,34,,45 +bert-base-cased,OV-2023.2,core,"Intel® Core™ i9-12900TE ",52.74,,20.43,0.097,1.507,544,35,1,544,35,23.05,FPS,FPS/$,FPS/TDP,msec.,,,,,, +bert-base-cased,OV-2023.2,core,"Intel® Core™ i9-13900K ",164.86,,66.26,0.275,1.319,599,125,1,599,125,13.76,FPS,FPS/$,FPS/TDP,msec.,,,,,, +bert-base-cased,OV-2023.2,xeon,"Intel® Xeon® W1290P ",50.94,,33.27,0.086,0.407,594,125,1,594,125,29.19,FPS,FPS/$,FPS/TDP,msec.,,,,,, +bert-base-cased,OV-2023.2,xeon,"Intel® Xeon® E-2124G ",20.73,,,0.083,0.292,249,71,1,249,71,49.54,FPS,FPS/$,FPS/TDP,msec.,,,,,, +bert-base-cased,OV-2023.2,xeon,"Intel® Xeon® Gold 5218T ",215.86,,80.31,0.069,1.028,3144,210,2,1572,105,14.03,FPS,FPS/$,FPS/TDP,msec.,,,,,, +bert-base-cased,OV-2023.2,xeon,"Intel® Xeon® Platinum 8270 ",569.64,,222.97,0.034,1.389,16954,410,2,8477,205,7.97,FPS,FPS/$,FPS/TDP,msec.,,,,,, +bert-base-cased,OV-2023.2,xeon,"Intel® Xeon® Platinum 8380 ",876.04,,336.63,0.047,1.622,18718,540,2,9359,270,,FPS,FPS/$,FPS/TDP,msec.,,,,,, +bert-base-cased,OV-2023.2,xeon,"Intel® Xeon® Platinum 8490H ",3131.74,,505.87,0.092,4.474,34000,700,2,17000,350,4.14,FPS,FPS/$,FPS/TDP,msec.,,,,,, +bert-base-cased,OV-2023.2,xeon,"Intel® Xeon® Silver 4216R ",205.61,,76.34,0.102,1.028,2022,200,2,1011,100,14.66,FPS,FPS/$,FPS/TDP,msec.,,,,,, +bert-base-cased,OV-2023.2,xeon,"Intel® Xeon® Silver 4316 ",423.53,,166.33,0.186,1.412,2274,300,2,1137,150,,FPS,FPS/$,FPS/TDP,msec.,,,,,, +bert-base-cased,OV-2023.2,accel,"Intel® Data Center GPU Flex 170 ",824.04,680.35,,0.428,5.494,1925,150,1,1925,150,19.37,FPS,FPS/$,FPS/TDP,msec.,,,,,, +bert-base-cased,OV-2023.2,accel,"Intel® Arc®A-Series Graphics ",620.32,554.08,,1.932,4.135,321,150,1,321,150,25.63,FPS,FPS/$,FPS/TDP,msec.,,,,,, +bert-base-cased,OV-2023.2,core-iGPU,Intel® Core™ i7-1185G7 iGPU,83.66,60.2,42.14,0.196,2.988,426,28,1,426,28,47.28,FPS,FPS/$,FPS/TDP,msec.,,,,,, +bert-base-cased,OV-2023.2,core-iGPU,Intel® Core™ i7-1185GRE iGPU,72.43,55.39,36.92,0.148,2.587,490,28,1,490,28,55.03,FPS,FPS/$,FPS/TDP,msec.,,,,,, +bert-base-cased,OV-2023.2,core-iGPU,Intel® Core™ i7-12700H iGPU,91.52,64.83,46.37,0.182,0.796,502,115,1,502,115,43.39,FPS,FPS/$,FPS/TDP,msec.,,,,,, +bert-base-cased,OV-2023.2,core-CPU+iGPU,Intel® Core™ i7-1185G7 CPU+iGPU,94.49,,39.79,0.222,3.375,426,28,1,426,28,,FPS,FPS/$,FPS/TDP,msec.,,,,,, +bert-base-cased,OV-2023.2,core-CPU+iGPU,Intel® Core™ i7-12700H CPU+iGPU,88.79,,35.19,0.177,0.772,502,115,1,502,115,,FPS,FPS/$,FPS/TDP,msec.,,,,,, +end_rec,,,,,,,,,,,,,,,,,,,,,,,, +begin_rec," "," "," "," "," "," "," "," "," "," "," "," "," "," ",FPS,FPS/$,FPS/TDP,msec.,,,,,, +bert-large-uncased-whole-word-masking-squad-0001,OV-2023.2,core,"Intel® Core™ i3-8100 ",2.09,,,0.018,0.032,117,65,1,117,65,493.93,FPS,FPS/$,FPS/TDP,msec.,,,,,, +bert-large-uncased-whole-word-masking-squad-0001,OV-2023.2,core,"Intel® Core™ i5-10500TE ",2.98,,1.86,0.014,0.046,214,65,1,214,65,351.2,FPS,FPS/$,FPS/TDP,msec.,,,,,, +bert-large-uncased-whole-word-masking-squad-0001,OV-2023.2,core,"Intel® Core™ i5-13600K ",10.07,,3.75,0.031,0.081,329,125,1,329,125,156.29,FPS,FPS/$,FPS/TDP,msec.,,,,,, +bert-large-uncased-whole-word-masking-squad-0001,OV-2023.2,core,Intel® Core™ i7-1185G7 CPU,5.07,,1.63,0.012,0.181,426,28,1,426,28,219.53,FPS,FPS/$,FPS/TDP,msec.,,,,,, +bert-large-uncased-whole-word-masking-squad-0001,OV-2023.2,core,Intel® Core™ i7-1185GRE CPU,3.78,,1.21,0.008,0.135,490,28,1,490,28,267.11,FPS,FPS/$,FPS/TDP,msec.,,,,,, +bert-large-uncased-whole-word-masking-squad-0001,OV-2023.2,core,Intel® Core™ i7-12700H CPU,8.31,,3.08,0.017,0.072,502,115,1,502,115,155.69,FPS,FPS/$,FPS/TDP,msec.,,,,,, +bert-large-uncased-whole-word-masking-squad-0001,OV-2023.2,core,"Intel® Core™ i7-8700T ",2.7,,1.61,0.009,0.077,303,35,1,303,35,411.88,FPS,FPS/$,FPS/TDP,msec.,,,,,, +bert-large-uncased-whole-word-masking-squad-0001,OV-2023.2,core,"Intel® Core™ i9-10900TE ",3.28,,1.99,0.007,0.094,488,35,1,488,35,331.95,FPS,FPS/$,FPS/TDP,msec.,,,,,, +bert-large-uncased-whole-word-masking-squad-0001,OV-2023.2,core,"Intel® Core™ i9-12900TE ",5.12,,1.83,0.009,0.146,544,35,1,544,35,210.03,FPS,FPS/$,FPS/TDP,msec.,,,,,, +bert-large-uncased-whole-word-masking-squad-0001,OV-2023.2,core,"Intel® Core™ i9-13900K ",15.37,,5.95,0.026,0.123,599,125,1,599,125,113.51,FPS,FPS/$,FPS/TDP,msec.,,,,,, +bert-large-uncased-whole-word-masking-squad-0001,OV-2023.2,xeon,"Intel® Xeon® W1290P ",4.65,,3.11,0.008,0.037,594,125,1,594,125,228.25,FPS,FPS/$,FPS/TDP,msec.,,,,,, +bert-large-uncased-whole-word-masking-squad-0001,OV-2023.2,xeon,"Intel® Xeon® E-2124G ",2.11,,,0.008,0.030,249,71,1,249,71,484.55,FPS,FPS/$,FPS/TDP,msec.,,,,,, +bert-large-uncased-whole-word-masking-squad-0001,OV-2023.2,xeon,"Intel® Xeon® Gold 5218T ",20.81,,6.91,0.007,0.099,3144,210,2,1572,105,106.59,FPS,FPS/$,FPS/TDP,msec.,,,,,, +bert-large-uncased-whole-word-masking-squad-0001,OV-2023.2,xeon,"Intel® Xeon® Platinum 8270 ",50.63,,17.75,0.003,0.123,16954,410,2,8477,205,54.45,FPS,FPS/$,FPS/TDP,msec.,,,,,, +bert-large-uncased-whole-word-masking-squad-0001,OV-2023.2,xeon,"Intel® Xeon® Platinum 8380 ",66.76,,27.44,0.004,0.124,18718,540,2,9359,270,231.49,FPS,FPS/$,FPS/TDP,msec.,,,,,, +bert-large-uncased-whole-word-masking-squad-0001,OV-2023.2,xeon,"Intel® Xeon® Platinum 8490H ",250.47,,45.77,0.007,0.358,34000,700,2,17000,350,27.75,FPS,FPS/$,FPS/TDP,msec.,,,,,, +bert-large-uncased-whole-word-masking-squad-0001,OV-2023.2,xeon,"Intel® Xeon® Silver 4216R ",20.73,,6.57,0.010,0.104,2022,200,2,1011,100,106.9,FPS,FPS/$,FPS/TDP,msec.,,,,,, +bert-large-uncased-whole-word-masking-squad-0001,OV-2023.2,xeon,"Intel® Xeon® Silver 4316 ",38.85,,14.53,0.017,0.129,2274,300,2,1137,150,156.69,FPS,FPS/$,FPS/TDP,msec.,,,,,, +bert-large-uncased-whole-word-masking-squad-0001,OV-2023.2,accel,"Intel® Data Center GPU Flex 170 ",148.54,102.98,,0.077,0.990,1925,150,1,1925,150,107.19,FPS,FPS/$,FPS/TDP,msec.,,,,,, +bert-large-uncased-whole-word-masking-squad-0001,OV-2023.2,accel,"Intel® Arc®A-Series Graphics ",127.99,94.72,,0.399,0.853,321,150,1,321,150,124.58,FPS,FPS/$,FPS/TDP,msec.,,,,,, +bert-large-uncased-whole-word-masking-squad-0001,OV-2023.2,core-iGPU,Intel® Core™ i7-1185G7 iGPU,9.07,6.62,4.27,0.021,0.324,426,28,1,426,28,452.44,FPS,FPS/$,FPS/TDP,msec.,,,,,, +bert-large-uncased-whole-word-masking-squad-0001,OV-2023.2,core-iGPU,Intel® Core™ i7-1185GRE iGPU,5.39,5.77,3.01,0.011,0.192,490,28,1,490,28,741.24,FPS,FPS/$,FPS/TDP,msec.,,,,,, +bert-large-uncased-whole-word-masking-squad-0001,OV-2023.2,core-iGPU,Intel® Core™ i7-12700H iGPU,10.54,7.57,5,0.021,0.092,502,115,1,502,115,379.08,FPS,FPS/$,FPS/TDP,msec.,,,,,, +bert-large-uncased-whole-word-masking-squad-0001,OV-2023.2,core-CPU+iGPU,Intel® Core™ i7-1185G7 CPU+iGPU,8.49,,3.46,0.020,0.303,426,28,1,426,28,,FPS,FPS/$,FPS/TDP,msec.,,,,,, +bert-large-uncased-whole-word-masking-squad-0001,OV-2023.2,core-CPU+iGPU,Intel® Core™ i7-12700H CPU+iGPU,8.53,,3.01,0.017,0.074,502,115,1,502,115,,FPS,FPS/$,FPS/TDP,msec.,,,,,, +end_rec,,,,,,,,,,,,,,,,,,,,,,,, +begin_rec," "," "," "," "," "," "," "," "," "," "," "," "," "," ",FPS,FPS/$,FPS/TDP,msec.,,,,,, +deeplabv3,OV-2023.2,atom,Intel® Atom® X6425E CPU,4.66,,2.87,0.070,0.388,67,12,1,67,12,219.16,FPS,FPS/$,FPS/TDP,msec.,,,,,, +deeplabv3,OV-2023.2,core,"Intel® Core™ i3-8100 ",22,,14.03,0.188,0.339,117,65,1,117,65,45.56,FPS,FPS/$,FPS/TDP,msec.,,,,,, +deeplabv3,OV-2023.2,core,"Intel® Core™ i5-10500TE ",35.15,,16.61,0.164,0.541,214,65,1,214,65,33.27,FPS,FPS/$,FPS/TDP,msec.,,,,,, +deeplabv3,OV-2023.2,core,"Intel® Core™ i5-13600K ",101.57,,41.76,0.309,0.813,329,125,1,329,125,16.21,FPS,FPS/$,FPS/TDP,msec.,,,,,, +deeplabv3,OV-2023.2,core,Intel® Core™ i7-1185G7 CPU,52.36,,16.31,0.123,1.870,426,28,1,426,28,19.93,FPS,FPS/$,FPS/TDP,msec.,,,,,, +deeplabv3,OV-2023.2,core,Intel® Core™ i7-1185GRE CPU,31.69,,9.45,0.065,1.132,490,28,1,490,28,29.7,FPS,FPS/$,FPS/TDP,msec.,,,,,, +deeplabv3,OV-2023.2,core,Intel® Core™ i7-12700H CPU,74.8,,29.13,0.149,0.650,502,115,1,502,115,16.96,FPS,FPS/$,FPS/TDP,msec.,,,,,, +deeplabv3,OV-2023.2,core,"Intel® Core™ i7-8700T ",32.22,,18.38,0.106,0.921,303,35,1,303,35,37.52,FPS,FPS/$,FPS/TDP,msec.,,,,,, +deeplabv3,OV-2023.2,core,"Intel® Core™ i9-10900TE ",39.4,,18.25,0.081,1.126,488,35,1,488,35,28.44,FPS,FPS/$,FPS/TDP,msec.,,,,,, +deeplabv3,OV-2023.2,core,"Intel® Core™ i9-12900TE ",58.11,,22.54,0.107,1.660,544,35,1,544,35,21.65,FPS,FPS/$,FPS/TDP,msec.,,,,,, +deeplabv3,OV-2023.2,core,"Intel® Core™ i9-13900K ",149.59,,57.89,0.250,1.197,599,125,1,599,125,12.49,FPS,FPS/$,FPS/TDP,msec.,,,,,, +deeplabv3,OV-2023.2,atom,Intel® Processor N200 CPU,1.72,,1.01,0.009,0.287,193,6,1,193,6,596.5,FPS,FPS/$,FPS/TDP,msec.,,,,,, +deeplabv3,OV-2023.2,xeon,"Intel® Xeon® W1290P ",51.12,,19.38,0.086,0.409,594,125,1,594,125,21.94,FPS,FPS/$,FPS/TDP,msec.,,,,,, +deeplabv3,OV-2023.2,xeon,"Intel® Xeon® E-2124G ",21.77,,14.75,0.087,0.307,249,71,1,249,71,45.69,FPS,FPS/$,FPS/TDP,msec.,,,,,, +deeplabv3,OV-2023.2,xeon,"Intel® Xeon® Gold 5218T ",188.66,,76.86,0.060,0.898,3144,210,2,1572,105,11.81,FPS,FPS/$,FPS/TDP,msec.,,,,,, +deeplabv3,OV-2023.2,xeon,"Intel® Xeon® Platinum 8270 ",413.18,,154.25,0.024,1.008,16954,410,2,8477,205,5.65,FPS,FPS/$,FPS/TDP,msec.,,,,,, +deeplabv3,OV-2023.2,xeon,"Intel® Xeon® Platinum 8380 ",564.23,,223.46,0.030,1.045,18718,540,2,9359,270,5.36,FPS,FPS/$,FPS/TDP,msec.,,,,,, +deeplabv3,OV-2023.2,xeon,"Intel® Xeon® Platinum 8490H ",1001.4,,380.46,0.029,1.431,34000,700,2,17000,350,3.6,FPS,FPS/$,FPS/TDP,msec.,,,,,, +deeplabv3,OV-2023.2,xeon,"Intel® Xeon® Silver 4216R ",182.57,,74.43,0.090,0.913,2022,200,2,1011,100,12.14,FPS,FPS/$,FPS/TDP,msec.,,,,,, +deeplabv3,OV-2023.2,xeon,"Intel® Xeon® Silver 4316 ",360.93,,138.14,0.159,1.203,2274,300,2,1137,150,7.04,FPS,FPS/$,FPS/TDP,msec.,,,,,, +deeplabv3,OV-2023.2,accel,"Intel® Data Center GPU Flex 170 ",732.85,602.34,,0.381,4.886,1925,150,1,1925,150,21.8,FPS,FPS/$,FPS/TDP,msec.,,,,,, +deeplabv3,OV-2023.2,accel,"Intel® Arc®A-Series Graphics ",597.79,484.98,,1.862,3.985,321,150,1,321,150,26.18,FPS,FPS/$,FPS/TDP,msec.,,,,,, +deeplabv3,OV-2023.2,atom,Intel® Celeron® 6305E CPU,11.64,,4.56,0.109,0.776,107,15,1,107,15,87.1,FPS,FPS/$,FPS/TDP,msec.,,,,,, +deeplabv3,OV-2023.2,atom-iGPU,Intel® Atom® X6425E iGPU,10.87,11.07,5.64,0.162,0.906,67,12,1,67,12,367.4,FPS,FPS/$,FPS/TDP,msec.,,,,,, +deeplabv3,OV-2023.2,core-iGPU,Intel® Core™ i7-1185G7 iGPU,104.71,48.95,27.69,0.246,3.739,426,28,1,426,28,37.85,FPS,FPS/$,FPS/TDP,msec.,,,,,, +deeplabv3,OV-2023.2,core-iGPU,Intel® Core™ i7-1185GRE iGPU,76.34,36.22,13.67,0.156,2.726,490,28,1,490,28,52.07,FPS,FPS/$,FPS/TDP,msec.,,,,,, +deeplabv3,OV-2023.2,core-iGPU,Intel® Core™ i7-12700H iGPU,113.65,52.72,33.36,0.226,0.988,502,115,1,502,115,34.81,FPS,FPS/$,FPS/TDP,msec.,,,,,, +deeplabv3,OV-2023.2,atom-iGPU,Intel® Processor N200 iGPU,3.65,1.92,1.27,0.019,0.609,193,6,1,193,6,1094.07,FPS,FPS/$,FPS/TDP,msec.,,,,,, +deeplabv3,OV-2023.2,atom-iGPU,Intel® Celeron® 6305E iGPU,60.3,27.73,16.44,0.564,4.020,107,15,1,107,15,66.22,FPS,FPS/$,FPS/TDP,msec.,,,,,, +deeplabv3,OV-2023.2,atom-CPU+iGPU,Intel® Atom® X6425E CPU+iGPU,11.11,,5.66,0.166,0.926,67,12,1,67,12,,FPS,FPS/$,FPS/TDP,msec.,,,,,, +deeplabv3,OV-2023.2,core-CPU+iGPU,Intel® Core™ i7-1185G7 CPU+iGPU,86.78,,24.13,0.204,3.099,426,28,1,426,28,,FPS,FPS/$,FPS/TDP,msec.,,,,,, +deeplabv3,OV-2023.2,core-CPU+iGPU,Intel® Core™ i7-12700H CPU+iGPU,75.71,,28.98,0.151,0.658,502,115,1,502,115,,FPS,FPS/$,FPS/TDP,msec.,,,,,, +deeplabv3,OV-2023.2,atom-CPU+iGPU,Intel® Processor N200 CPU+iGPU,4.66,,1.9,0.024,0.776,193,6,1,193,6,,FPS,FPS/$,FPS/TDP,msec.,,,,,, +deeplabv3,OV-2023.2,atom-CPU+iGPU,Intel® Celeron® 6305E CPU+iGPU,61.15,,16.89,0.571,4.077,107,15,1,107,15,,FPS,FPS/$,FPS/TDP,msec.,,,,,, +end_rec,,,,,,,,,,,,,,,,,,,,,,,, +begin_rec," "," "," "," "," "," "," "," "," "," "," "," "," "," ",FPS,FPS/$,FPS/TDP,msec.,,,,,, +efficientdet-d0,OV-2023.2,atom,Intel® Atom® X6425E CPU,7.29,,5.01,0.109,0.608,67,12,1,67,12,140.41,FPS,FPS/$,FPS/TDP,msec.,,,,,, +efficientdet-d0,OV-2023.2,core,"Intel® Core™ i3-8100 ",36.6,,24.31,0.313,0.563,117,65,1,117,65,28.3,FPS,FPS/$,FPS/TDP,msec.,,,,,, +efficientdet-d0,OV-2023.2,core,"Intel® Core™ i5-10500TE ",58.84,,29.38,0.275,0.905,214,65,1,214,65,21.11,FPS,FPS/$,FPS/TDP,msec.,,,,,, +efficientdet-d0,OV-2023.2,core,"Intel® Core™ i5-13600K ",139.32,,77.22,0.423,1.115,329,125,1,329,125,11.92,FPS,FPS/$,FPS/TDP,msec.,,,,,, +efficientdet-d0,OV-2023.2,core,Intel® Core™ i7-1185G7 CPU,73.76,,41.07,0.173,2.634,426,28,1,426,28,15.53,FPS,FPS/$,FPS/TDP,msec.,,,,,, +efficientdet-d0,OV-2023.2,core,Intel® Core™ i7-1185GRE CPU,52.57,,21.48,0.107,1.877,490,28,1,490,28,20.87,FPS,FPS/$,FPS/TDP,msec.,,,,,, +efficientdet-d0,OV-2023.2,core,Intel® Core™ i7-12700H CPU,114.46,,54.77,0.228,0.995,502,115,1,502,115,11.88,FPS,FPS/$,FPS/TDP,msec.,,,,,, +efficientdet-d0,OV-2023.2,core,"Intel® Core™ i7-8700T ",51.93,,34.22,0.171,1.484,303,35,1,303,35,24.28,FPS,FPS/$,FPS/TDP,msec.,,,,,, +efficientdet-d0,OV-2023.2,core,"Intel® Core™ i9-10900TE ",66,,35.11,0.135,1.886,488,35,1,488,35,18.95,FPS,FPS/$,FPS/TDP,msec.,,,,,, +efficientdet-d0,OV-2023.2,core,"Intel® Core™ i9-12900TE ",75.37,,44.41,0.139,2.154,544,35,1,544,35,15.52,FPS,FPS/$,FPS/TDP,msec.,,,,,, +efficientdet-d0,OV-2023.2,core,"Intel® Core™ i9-13900K ",207,,102.36,0.346,1.656,599,125,1,599,125,9.45,FPS,FPS/$,FPS/TDP,msec.,,,,,, +efficientdet-d0,OV-2023.2,atom,Intel® Processor N200 CPU,2.09,,1.67,0.011,0.349,193,6,1,193,6,488.71,FPS,FPS/$,FPS/TDP,msec.,,,,,, +efficientdet-d0,OV-2023.2,xeon,"Intel® Xeon® W1290P ",96.56,,38.61,0.163,0.772,594,125,1,594,125,14.19,FPS,FPS/$,FPS/TDP,msec.,,,,,, +efficientdet-d0,OV-2023.2,xeon,"Intel® Xeon® E-2124G ",35.01,,25.27,0.141,0.493,249,71,1,249,71,29.41,FPS,FPS/$,FPS/TDP,msec.,,,,,, +efficientdet-d0,OV-2023.2,xeon,"Intel® Xeon® Gold 5218T ",258.46,,164.63,0.082,1.231,3144,210,2,1572,105,11.88,FPS,FPS/$,FPS/TDP,msec.,,,,,, +efficientdet-d0,OV-2023.2,xeon,"Intel® Xeon® Platinum 8270 ",518.85,,310.76,0.031,1.265,16954,410,2,8477,205,7.42,FPS,FPS/$,FPS/TDP,msec.,,,,,, +efficientdet-d0,OV-2023.2,xeon,"Intel® Xeon® Platinum 8380 ",834.12,,495.89,0.045,1.545,18718,540,2,9359,270,4.31,FPS,FPS/$,FPS/TDP,msec.,,,,,, +efficientdet-d0,OV-2023.2,xeon,"Intel® Xeon® Platinum 8490H ",1043.83,,861.5,0.031,1.491,34000,700,2,17000,350,5.43,FPS,FPS/$,FPS/TDP,msec.,,,,,, +efficientdet-d0,OV-2023.2,xeon,"Intel® Xeon® Silver 4216R ",248.64,,157.52,0.123,1.243,2022,200,2,1011,100,12.27,FPS,FPS/$,FPS/TDP,msec.,,,,,, +efficientdet-d0,OV-2023.2,xeon,"Intel® Xeon® Silver 4316 ",469.81,,293.67,0.207,1.566,2274,300,2,1137,150,5.89,FPS,FPS/$,FPS/TDP,msec.,,,,,, +efficientdet-d0,OV-2023.2,accel,"Intel® Data Center GPU Flex 170 ",846.71,825.71,,0.440,5.645,1925,150,1,1925,150,18.66,FPS,FPS/$,FPS/TDP,msec.,,,,,, +efficientdet-d0,OV-2023.2,accel,"Intel® Arc®A-Series Graphics ",582.03,590.73,,1.813,3.880,321,150,1,321,150,26.38,FPS,FPS/$,FPS/TDP,msec.,,,,,, +efficientdet-d0,OV-2023.2,atom,Intel® Celeron® 6305E CPU,18.06,,11.09,0.169,1.204,107,15,1,107,15,57.26,FPS,FPS/$,FPS/TDP,msec.,,,,,, +efficientdet-d0,OV-2023.2,atom-iGPU,Intel® Atom® X6425E iGPU,19.71,22.48,11.18,0.294,1.643,67,12,1,67,12,202.28,FPS,FPS/$,FPS/TDP,msec.,,,,,, +efficientdet-d0,OV-2023.2,core-iGPU,Intel® Core™ i7-1185G7 iGPU,110.42,90.58,46.89,0.259,3.944,426,28,1,426,28,35.95,FPS,FPS/$,FPS/TDP,msec.,,,,,, +efficientdet-d0,OV-2023.2,core-iGPU,Intel® Core™ i7-1185GRE iGPU,62.89,49.91,23.65,0.128,2.246,490,28,1,490,28,63.05,FPS,FPS/$,FPS/TDP,msec.,,,,,, +efficientdet-d0,OV-2023.2,core-iGPU,Intel® Core™ i7-12700H iGPU,127.71,103.78,54.39,0.254,1.110,502,115,1,502,115,30.98,FPS,FPS/$,FPS/TDP,msec.,,,,,, +efficientdet-d0,OV-2023.2,atom-iGPU,Intel® Processor N200 iGPU,5.53,4.94,2.75,0.029,0.921,193,6,1,193,6,721.64,FPS,FPS/$,FPS/TDP,msec.,,,,,, +efficientdet-d0,OV-2023.2,atom-iGPU,Intel® Celeron® 6305E iGPU,72.14,60.61,33.85,0.674,4.809,107,15,1,107,15,55.18,FPS,FPS/$,FPS/TDP,msec.,,,,,, +efficientdet-d0,OV-2023.2,atom-CPU+iGPU,Intel® Atom® X6425E CPU+iGPU,20.11,,11.51,0.300,1.676,67,12,1,67,12,,FPS,FPS/$,FPS/TDP,msec.,,,,,, +efficientdet-d0,OV-2023.2,core-CPU+iGPU,Intel® Core™ i7-1185G7 CPU+iGPU,101.03,,48.05,0.237,3.608,426,28,1,426,28,,FPS,FPS/$,FPS/TDP,msec.,,,,,, +efficientdet-d0,OV-2023.2,core-CPU+iGPU,Intel® Core™ i7-12700H CPU+iGPU,114.83,,55.31,0.229,0.999,502,115,1,502,115,,FPS,FPS/$,FPS/TDP,msec.,,,,,, +efficientdet-d0,OV-2023.2,atom-CPU+iGPU,Intel® Processor N200 CPU+iGPU,5.73,,3.57,0.030,0.955,193,6,1,193,6,,FPS,FPS/$,FPS/TDP,msec.,,,,,, +efficientdet-d0,OV-2023.2,atom-CPU+iGPU,Intel® Celeron® 6305E CPU+iGPU,56.19,,32.14,0.525,3.746,107,15,1,107,15,,FPS,FPS/$,FPS/TDP,msec.,,,,,, +end_rec,,,,,,,,,,,,,,,,,,,,,,,, +begin_rec," "," "," "," "," "," "," "," "," "," "," "," "," "," ",FPS,FPS/$,FPS/TDP,msec.,,,,,, +mobilenet-v2,OV-2023.2,atom,Intel® Atom® X6425E CPU,132.01,,79.71,1.970,11.001,67,12,1,67,12,7.97,FPS,FPS/$,FPS/TDP,msec.,,,,,, +mobilenet-v2,OV-2023.2,core,"Intel® Core™ i3-8100 ",536.37,,,4.584,8.252,117,65,1,117,65,2.02,FPS,FPS/$,FPS/TDP,msec.,,,,,, +mobilenet-v2,OV-2023.2,core,"Intel® Core™ i5-10500TE ",898.55,,500.27,4.199,13.824,214,65,1,214,65,1.57,FPS,FPS/$,FPS/TDP,msec.,,,,,, +mobilenet-v2,OV-2023.2,core,"Intel® Core™ i5-13600K ",2785.11,,1237.02,8.465,22.281,329,125,1,329,125,0.88,FPS,FPS/$,FPS/TDP,msec.,,,,,, +mobilenet-v2,OV-2023.2,core,Intel® Core™ i7-1185G7 CPU,1347.18,,525.71,3.162,48.113,426,28,1,426,28,0.86,FPS,FPS/$,FPS/TDP,msec.,,,,,, +mobilenet-v2,OV-2023.2,core,Intel® Core™ i7-1185GRE CPU,979.43,,319,1.999,34.980,490,28,1,490,28,1.19,FPS,FPS/$,FPS/TDP,msec.,,,,,, +mobilenet-v2,OV-2023.2,core,Intel® Core™ i7-12700H CPU,2099.29,,1056.24,4.182,18.255,502,115,1,502,115,1.1,FPS,FPS/$,FPS/TDP,msec.,,,,,, +mobilenet-v2,OV-2023.2,core,"Intel® Core™ i7-8700T ",741.65,,519.77,2.448,21.190,303,35,1,303,35,1.86,FPS,FPS/$,FPS/TDP,msec.,,,,,, +mobilenet-v2,OV-2023.2,core,"Intel® Core™ i9-10900TE ",949.26,,604.02,1.945,27.122,488,35,1,488,35,1.5,FPS,FPS/$,FPS/TDP,msec.,,,,,, +mobilenet-v2,OV-2023.2,core,"Intel® Core™ i9-12900TE ",1300.22,,657.07,2.390,37.149,544,35,1,544,35,1.32,FPS,FPS/$,FPS/TDP,msec.,,,,,, +mobilenet-v2,OV-2023.2,core,"Intel® Core™ i9-13900K ",4089.6,,2014.33,6.827,32.717,599,125,1,599,125,0.71,FPS,FPS/$,FPS/TDP,msec.,,,,,, +mobilenet-v2,OV-2023.2,atom,Intel® Processor N200 CPU,41.1,,29.71,0.213,6.851,193,6,1,193,6,27.14,FPS,FPS/$,FPS/TDP,msec.,,,,,, +mobilenet-v2,OV-2023.2,xeon,"Intel® Xeon® W1290P ",1450.73,,542.77,2.442,11.606,594,125,1,594,125,1.29,FPS,FPS/$,FPS/TDP,msec.,,,,,, +mobilenet-v2,OV-2023.2,xeon,"Intel® Xeon® E-2124G ",523.03,,,2.101,7.367,249,71,1,249,71,2.07,FPS,FPS/$,FPS/TDP,msec.,,,,,, +mobilenet-v2,OV-2023.2,xeon,"Intel® Xeon® Gold 5218T ",5410.81,,1915.84,1.721,25.766,3144,210,2,1572,105,1.42,FPS,FPS/$,FPS/TDP,msec.,,,,,, +mobilenet-v2,OV-2023.2,xeon,"Intel® Xeon® Platinum 8270 ",14207.13,,4438.67,0.838,34.652,16954,410,2,8477,205,0.93,FPS,FPS/$,FPS/TDP,msec.,,,,,, +mobilenet-v2,OV-2023.2,xeon,"Intel® Xeon® Platinum 8380 ",22308.51,,6801.73,1.192,41.312,18718,540,2,9359,270,0.57,FPS,FPS/$,FPS/TDP,msec.,,,,,, +mobilenet-v2,OV-2023.2,xeon,"Intel® Xeon® Platinum 8490H ",38064.38,,10986.01,1.120,54.378,34000,700,2,17000,350,0.66,FPS,FPS/$,FPS/TDP,msec.,,,,,, +mobilenet-v2,OV-2023.2,xeon,"Intel® Xeon® Silver 4216R ",5178.33,,1862.47,2.561,25.892,2022,200,2,1011,100,1.45,FPS,FPS/$,FPS/TDP,msec.,,,,,, +mobilenet-v2,OV-2023.2,xeon,"Intel® Xeon® Silver 4316 ",12161.33,,3597.47,5.348,40.538,2274,300,2,1137,150,0.56,FPS,FPS/$,FPS/TDP,msec.,,,,,, +mobilenet-v2,OV-2023.2,accel,"Intel® Data Center GPU Flex 170 ",6748.16,5698.62,,3.506,44.988,1925,150,1,1925,150,2.37,FPS,FPS/$,FPS/TDP,msec.,,,,,, +mobilenet-v2,OV-2023.2,accel,"Intel® Arc®A-Series Graphics ",4308.65,3849.95,,13.423,28.724,321,150,1,321,150,3.63,FPS,FPS/$,FPS/TDP,msec.,,,,,, +mobilenet-v2,OV-2023.2,atom,Intel® Celeron® 6305E CPU,265.71,,132.81,2.483,17.714,107,15,1,107,15,3.66,FPS,FPS/$,FPS/TDP,msec.,,,,,, +mobilenet-v2,OV-2023.2,atom-iGPU,Intel® Atom® X6425E iGPU,191.22,225.68,130.69,2.854,15.935,67,12,1,67,12,20.63,FPS,FPS/$,FPS/TDP,msec.,,,,,, +mobilenet-v2,OV-2023.2,core-iGPU,Intel® Core™ i7-1185G7 iGPU,1014.86,749.24,525.16,2.382,36.245,426,28,1,426,28,3.77,FPS,FPS/$,FPS/TDP,msec.,,,,,, +mobilenet-v2,OV-2023.2,core-iGPU,Intel® Core™ i7-1185GRE iGPU,880.63,557.89,349.94,1.797,31.451,490,28,1,490,28,4.25,FPS,FPS/$,FPS/TDP,msec.,,,,,, +mobilenet-v2,OV-2023.2,core-iGPU,Intel® Core™ i7-12700H iGPU,1319.62,916.27,563.83,2.629,11.475,502,115,1,502,115,2.83,FPS,FPS/$,FPS/TDP,msec.,,,,,, +mobilenet-v2,OV-2023.2,atom-iGPU,Intel® Processor N200 iGPU,58.68,40.52,26.29,0.304,9.781,193,6,1,193,6,67.34,FPS,FPS/$,FPS/TDP,msec.,,,,,, +mobilenet-v2,OV-2023.2,atom-iGPU,Intel® Celeron® 6305E iGPU,685.09,513.1,339.2,6.403,45.672,107,15,1,107,15,5.56,FPS,FPS/$,FPS/TDP,msec.,,,,,, +mobilenet-v2,OV-2023.2,atom-CPU+iGPU,Intel® Atom® X6425E CPU+iGPU,107.78,,137.9,1.609,8.981,67,12,1,67,12,,FPS,FPS/$,FPS/TDP,msec.,,,,,, +mobilenet-v2,OV-2023.2,core-CPU+iGPU,Intel® Core™ i7-1185G7 CPU+iGPU,2182.22,,612.95,5.123,77.937,426,28,1,426,28,,FPS,FPS/$,FPS/TDP,msec.,,,,,, +mobilenet-v2,OV-2023.2,core-CPU+iGPU,Intel® Core™ i7-12700H CPU+iGPU,2071.13,,1048.22,4.126,18.010,502,115,1,502,115,,FPS,FPS/$,FPS/TDP,msec.,,,,,, +mobilenet-v2,OV-2023.2,atom-CPU+iGPU,Intel® Processor N200 CPU+iGPU,115.35,,50.03,0.598,19.224,193,6,1,193,6,,FPS,FPS/$,FPS/TDP,msec.,,,,,, +mobilenet-v2,OV-2023.2,atom-CPU+iGPU,Intel® Celeron® 6305E CPU+iGPU,1465.46,,396.56,13.696,97.698,107,15,1,107,15,,FPS,FPS/$,FPS/TDP,msec.,,,,,, +end_rec,,,,,,,,,,,,,,,,,,,,,,,, +begin_rec," "," "," "," "," "," "," "," "," "," "," "," "," "," ",FPS,FPS/$,FPS/TDP,msec.,,,,,, +resnet-50,OV-2023.2,atom,Intel® Atom® X6425E CPU,19.92,,8.18,0.297,1.660,67,12,1,67,12,51.27,FPS,FPS/$,FPS/TDP,msec.,,,,,, +resnet-50,OV-2023.2,core,"Intel® Core™ i3-8100 ",96.91,,50.72,0.828,1.491,117,65,1,117,65,10.72,FPS,FPS/$,FPS/TDP,msec.,,,,,, +resnet-50,OV-2023.2,core,"Intel® Core™ i5-10500TE ",145.07,,74.01,0.678,2.232,214,65,1,214,65,8.19,FPS,FPS/$,FPS/TDP,msec.,,,,,, +resnet-50,OV-2023.2,core,"Intel® Core™ i5-13600K ",515.17,,140.11,1.566,4.121,329,125,1,329,125,3.89,FPS,FPS/$,FPS/TDP,msec.,,,,,, +resnet-50,OV-2023.2,core,Intel® Core™ i7-1185G7 CPU,229.34,,61.85,0.538,8.191,426,28,1,426,28,5.03,FPS,FPS/$,FPS/TDP,msec.,,,,,, +resnet-50,OV-2023.2,core,Intel® Core™ i7-1185GRE CPU,172.44,,45.06,0.352,6.159,490,28,1,490,28,6.62,FPS,FPS/$,FPS/TDP,msec.,,,,,, +resnet-50,OV-2023.2,core,Intel® Core™ i7-12700H CPU,445.18,,122.92,0.887,3.871,502,115,1,502,115,3.93,FPS,FPS/$,FPS/TDP,msec.,,,,,, +resnet-50,OV-2023.2,core,"Intel® Core™ i7-8700T ",122.89,,62.1,0.406,3.511,303,35,1,303,35,9.97,FPS,FPS/$,FPS/TDP,msec.,,,,,, +resnet-50,OV-2023.2,core,"Intel® Core™ i9-10900TE ",156.59,,75.57,0.321,4.474,488,35,1,488,35,7.6,FPS,FPS/$,FPS/TDP,msec.,,,,,, +resnet-50,OV-2023.2,core,"Intel® Core™ i9-12900TE ",269.4,,72.67,0.495,7.697,544,35,1,544,35,4.89,FPS,FPS/$,FPS/TDP,msec.,,,,,, +resnet-50,OV-2023.2,core,"Intel® Core™ i9-13900K ",749.69,,228.22,1.252,5.998,599,125,1,599,125,2.98,FPS,FPS/$,FPS/TDP,msec.,,,,,, +resnet-50,OV-2023.2,atom,Intel® Processor N200 CPU,6.72,,3.13,0.035,1.120,193,6,1,193,6,159.9,FPS,FPS/$,FPS/TDP,msec.,,,,,, +resnet-50,OV-2023.2,xeon,"Intel® Xeon® W1290P ",240.85,,96.84,0.405,1.927,594,125,1,594,125,5.5,FPS,FPS/$,FPS/TDP,msec.,,,,,, +resnet-50,OV-2023.2,xeon,"Intel® Xeon® E-2124G ",92.92,,49.94,0.373,1.309,249,71,1,249,71,11.12,FPS,FPS/$,FPS/TDP,msec.,,,,,, +resnet-50,OV-2023.2,xeon,"Intel® Xeon® Gold 5218T ",968.92,,267.96,0.308,4.614,3144,210,2,1572,105,2.91,FPS,FPS/$,FPS/TDP,msec.,,,,,, +resnet-50,OV-2023.2,xeon,"Intel® Xeon® Platinum 8270 ",2902.26,,747.22,0.171,7.079,16954,410,2,8477,205,1.55,FPS,FPS/$,FPS/TDP,msec.,,,,,, +resnet-50,OV-2023.2,xeon,"Intel® Xeon® Platinum 8380 ",4946.11,,1154.11,0.264,9.159,18718,540,2,9359,270,,FPS,FPS/$,FPS/TDP,msec.,,,,,, +resnet-50,OV-2023.2,xeon,"Intel® Xeon® Platinum 8490H ",19987.31,,1672.83,0.588,28.553,34000,700,2,17000,350,1.02,FPS,FPS/$,FPS/TDP,msec.,,,,,, +resnet-50,OV-2023.2,xeon,"Intel® Xeon® Silver 4216R ",931.66,,257,0.461,4.658,2022,200,2,1011,100,3,FPS,FPS/$,FPS/TDP,msec.,,,,,, +resnet-50,OV-2023.2,xeon,"Intel® Xeon® Silver 4316 ",2276.19,,562.55,1.001,7.587,2274,300,2,1137,150,,FPS,FPS/$,FPS/TDP,msec.,,,,,, +resnet-50,OV-2023.2,accel,"Intel® Data Center GPU Flex 170 ",3436.03,2103.96,,1.785,22.907,1925,150,1,1925,150,4.65,FPS,FPS/$,FPS/TDP,msec.,,,,,, +resnet-50,OV-2023.2,accel,"Intel® Arc®A-Series Graphics ",2320.83,1555.26,,7.230,15.472,321,150,1,321,150,6.8,FPS,FPS/$,FPS/TDP,msec.,,,,,, +resnet-50,OV-2023.2,atom,Intel® Celeron® 6305E CPU,49.59,,14.36,0.463,3.306,107,15,1,107,15,19.89,FPS,FPS/$,FPS/TDP,msec.,,,,,, +resnet-50,OV-2023.2,atom-iGPU,Intel® Atom® X6425E iGPU,49.36,52.35,27.44,0.737,4.113,67,12,1,67,12,80.69,FPS,FPS/$,FPS/TDP,msec.,,,,,, +resnet-50,OV-2023.2,core-iGPU,Intel® Core™ i7-1185G7 iGPU,351.53,206.09,116.49,0.825,12.555,426,28,1,426,28,11.12,FPS,FPS/$,FPS/TDP,msec.,,,,,, +resnet-50,OV-2023.2,core-iGPU,Intel® Core™ i7-1185GRE iGPU,291.8,170.69,95.05,0.596,10.421,490,28,1,490,28,13.6,FPS,FPS/$,FPS/TDP,msec.,,,,,, +resnet-50,OV-2023.2,core-iGPU,Intel® Core™ i7-12700H iGPU,389.36,224.15,136.98,0.776,3.386,502,115,1,502,115,10.01,FPS,FPS/$,FPS/TDP,msec.,,,,,, +resnet-50,OV-2023.2,atom-iGPU,Intel® Processor N200 iGPU,14.66,7.82,4.24,0.076,2.444,193,6,1,193,6,271.96,FPS,FPS/$,FPS/TDP,msec.,,,,,, +resnet-50,OV-2023.2,atom-iGPU,Intel® Celeron® 6305E iGPU,213.06,118.28,67.33,1.991,14.204,107,15,1,107,15,18.66,FPS,FPS/$,FPS/TDP,msec.,,,,,, +resnet-50,OV-2023.2,atom-CPU+iGPU,Intel® Atom® X6425E CPU+iGPU,73.78,,32.26,1.101,6.148,67,12,1,67,12,,FPS,FPS/$,FPS/TDP,msec.,,,,,, +resnet-50,OV-2023.2,core-CPU+iGPU,Intel® Core™ i7-1185G7 CPU+iGPU,467.05,,119.19,1.096,16.680,426,28,1,426,28,,FPS,FPS/$,FPS/TDP,msec.,,,,,, +resnet-50,OV-2023.2,core-CPU+iGPU,Intel® Core™ i7-12700H CPU+iGPU,446.64,,123.08,0.890,3.884,502,115,1,502,115,,FPS,FPS/$,FPS/TDP,msec.,,,,,, +resnet-50,OV-2023.2,atom-CPU+iGPU,Intel® Processor N200 CPU+iGPU,20.62,,6.29,0.107,3.437,193,6,1,193,6,,FPS,FPS/$,FPS/TDP,msec.,,,,,, +resnet-50,OV-2023.2,atom-CPU+iGPU,Intel® Celeron® 6305E CPU+iGPU,299.3,,75.5,2.797,19.953,107,15,1,107,15,,FPS,FPS/$,FPS/TDP,msec.,,,,,, +end_rec,,,,,,,,,,,,,,,,,,,,,,,, +begin_rec," "," "," "," "," "," "," "," "," "," "," "," "," "," ",FPS,FPS/$,FPS/TDP,msec.,,,,,, +ssd-resnet34-1200,OV-2023.2,atom,Intel® Atom® X6425E CPU,0.33,,0.13,0.005,0.028,67,12,1,67,12,2993.01,FPS,FPS/$,FPS/TDP,msec.,,,,,, +ssd-resnet34-1200,OV-2023.2,core,"Intel® Core™ i3-8100 ",1.68,,0.97,0.014,0.026,117,65,1,117,65,601.85,FPS,FPS/$,FPS/TDP,msec.,,,,,, +ssd-resnet34-1200,OV-2023.2,core,"Intel® Core™ i5-10500TE ",2.42,,1.4,0.011,0.037,214,65,1,214,65,459.92,FPS,FPS/$,FPS/TDP,msec.,,,,,, +ssd-resnet34-1200,OV-2023.2,core,"Intel® Core™ i5-13600K ",8.24,,2.4,0.025,0.066,329,125,1,329,125,163.48,FPS,FPS/$,FPS/TDP,msec.,,,,,, +ssd-resnet34-1200,OV-2023.2,core,Intel® Core™ i7-1185G7 CPU,3.91,,1,0.009,0.140,426,28,1,426,28,277.92,FPS,FPS/$,FPS/TDP,msec.,,,,,, +ssd-resnet34-1200,OV-2023.2,core,Intel® Core™ i7-1185GRE CPU,2.88,,0.77,0.006,0.103,490,28,1,490,28,338.91,FPS,FPS/$,FPS/TDP,msec.,,,,,, +ssd-resnet34-1200,OV-2023.2,core,Intel® Core™ i7-12700H CPU,7.23,,2.11,0.014,0.063,502,115,1,502,115,160.16,FPS,FPS/$,FPS/TDP,msec.,,,,,, +ssd-resnet34-1200,OV-2023.2,core,"Intel® Core™ i7-8700T ",2.02,,1.13,0.007,0.058,303,35,1,303,35,564.49,FPS,FPS/$,FPS/TDP,msec.,,,,,, +ssd-resnet34-1200,OV-2023.2,core,"Intel® Core™ i9-10900TE ",2.65,,1.47,0.005,0.076,488,35,1,488,35,411.44,FPS,FPS/$,FPS/TDP,msec.,,,,,, +ssd-resnet34-1200,OV-2023.2,core,"Intel® Core™ i9-12900TE ",4.43,,1.32,0.008,0.126,544,35,1,544,35,233.69,FPS,FPS/$,FPS/TDP,msec.,,,,,, +ssd-resnet34-1200,OV-2023.2,core,"Intel® Core™ i9-13900K ",12.56,,4.02,0.021,0.100,599,125,1,599,125,125.42,FPS,FPS/$,FPS/TDP,msec.,,,,,, +ssd-resnet34-1200,OV-2023.2,atom,Intel® Processor N200 CPU,0.11,,0.05,0.001,0.019,193,6,1,193,6,8949.48,FPS,FPS/$,FPS/TDP,msec.,,,,,, +ssd-resnet34-1200,OV-2023.2,xeon,"Intel® Xeon® W1290P ",4.33,,2.45,0.007,0.035,594,125,1,594,125,238.2,FPS,FPS/$,FPS/TDP,msec.,,,,,, +ssd-resnet34-1200,OV-2023.2,xeon,"Intel® Xeon® E-2124G ",1.6,,0.92,0.006,0.023,249,71,1,249,71,628.09,FPS,FPS/$,FPS/TDP,msec.,,,,,, +ssd-resnet34-1200,OV-2023.2,xeon,"Intel® Xeon® Gold 5218T ",17.64,,4.57,0.006,0.084,3144,210,2,1572,105,115.69,FPS,FPS/$,FPS/TDP,msec.,,,,,, +ssd-resnet34-1200,OV-2023.2,xeon,"Intel® Xeon® Platinum 8270 ",57.78,,14.8,0.003,0.141,16954,410,2,8477,205,36.97,FPS,FPS/$,FPS/TDP,msec.,,,,,, +ssd-resnet34-1200,OV-2023.2,xeon,"Intel® Xeon® Platinum 8380 ",78.79,,20.72,0.004,0.146,18718,540,2,9359,270,108.29,FPS,FPS/$,FPS/TDP,msec.,,,,,, +ssd-resnet34-1200,OV-2023.2,xeon,"Intel® Xeon® Platinum 8490H ",447.58,,31.29,0.013,0.639,34000,700,2,17000,350,8.52,FPS,FPS/$,FPS/TDP,msec.,,,,,, +ssd-resnet34-1200,OV-2023.2,xeon,"Intel® Xeon® Silver 4216R ",16.78,,4.35,0.008,0.084,2022,200,2,1011,100,121.64,FPS,FPS/$,FPS/TDP,msec.,,,,,, +ssd-resnet34-1200,OV-2023.2,xeon,"Intel® Xeon® Silver 4316 ",42.36,,10.47,0.019,0.141,2274,300,2,1137,150,62,FPS,FPS/$,FPS/TDP,msec.,,,,,, +ssd-resnet34-1200,OV-2023.2,accel,"Intel® Data Center GPU Flex 170 ",212,109.86,,0.110,1.413,1925,150,1,1925,150,75.46,FPS,FPS/$,FPS/TDP,msec.,,,,,, +ssd-resnet34-1200,OV-2023.2,accel,"Intel® Arc®A-Series Graphics ",147.33,81.3,,0.459,0.982,321,150,1,321,150,107.9,FPS,FPS/$,FPS/TDP,msec.,,,,,, +ssd-resnet34-1200,OV-2023.2,atom,Intel® Celeron® 6305E CPU,0.89,,0.23,0.008,0.059,107,15,1,107,15,1121.85,FPS,FPS/$,FPS/TDP,msec.,,,,,, +ssd-resnet34-1200,OV-2023.2,atom-iGPU,Intel® Atom® X6425E iGPU,1.18,1.18,0.6,0.018,0.098,67,12,1,67,12,3388.59,FPS,FPS/$,FPS/TDP,msec.,,,,,, +ssd-resnet34-1200,OV-2023.2,core-iGPU,Intel® Core™ i7-1185G7 iGPU,9.69,5.42,2.82,0.023,0.346,426,28,1,426,28,422.17,FPS,FPS/$,FPS/TDP,msec.,,,,,, +ssd-resnet34-1200,OV-2023.2,core-iGPU,Intel® Core™ i7-1185GRE iGPU,8.81,4.73,2.22,0.018,0.315,490,28,1,490,28,454.51,FPS,FPS/$,FPS/TDP,msec.,,,,,, +ssd-resnet34-1200,OV-2023.2,core-iGPU,Intel® Core™ i7-12700H iGPU,10.57,6.15,3.31,0.021,0.092,502,115,1,502,115,378.05,FPS,FPS/$,FPS/TDP,msec.,,,,,, +ssd-resnet34-1200,OV-2023.2,atom-iGPU,Intel® Processor N200 iGPU,0.29,0.16,,0.001,0.048,193,6,1,193,6,13815.91,FPS,FPS/$,FPS/TDP,msec.,,,,,, +ssd-resnet34-1200,OV-2023.2,atom-iGPU,Intel® Celeron® 6305E iGPU,5.07,2.64,1.41,0.047,0.338,107,15,1,107,15,774.3,FPS,FPS/$,FPS/TDP,msec.,,,,,, +ssd-resnet34-1200,OV-2023.2,atom-CPU+iGPU,Intel® Atom® X6425E CPU+iGPU,0.33,,0.13,0.005,0.028,67,12,1,67,12,,FPS,FPS/$,FPS/TDP,msec.,,,,,, +ssd-resnet34-1200,OV-2023.2,core-CPU+iGPU,Intel® Core™ i7-1185G7 CPU+iGPU,3.91,,1,0.009,0.140,426,28,1,426,28,,FPS,FPS/$,FPS/TDP,msec.,,,,,, +ssd-resnet34-1200,OV-2023.2,core-CPU+iGPU,Intel® Core™ i7-12700H CPU+iGPU,7.22,,2.11,0.014,0.063,502,115,1,502,115,,FPS,FPS/$,FPS/TDP,msec.,,,,,, +ssd-resnet34-1200,OV-2023.2,atom-CPU+iGPU,Intel® Processor N200 CPU+iGPU,0.11,,0.05,0.001,0.019,193,6,1,193,6,,FPS,FPS/$,FPS/TDP,msec.,,,,,, +ssd-resnet34-1200,OV-2023.2,atom-CPU+iGPU,Intel® Celeron® 6305E CPU+iGPU,0.89,,0.23,0.008,0.059,107,15,1,107,15,,FPS,FPS/$,FPS/TDP,msec.,,,,,, +end_rec,,,,,,,,,,,,,,,,,,,,,,,, +begin_rec," "," "," "," "," "," "," "," "," "," "," "," "," "," ",FPS,FPS/$,FPS/TDP,msec.,,,,,, +ssd_mobilenet_v1_coco,OV-2023.2,atom,Intel® Atom® X6425E CPU,45.25,,21.49,0.675,3.771,67,12,1,67,12,23.03,FPS,FPS/$,FPS/TDP,msec.,,,,,, +ssd_mobilenet_v1_coco,OV-2023.2,core,"Intel® Core™ i3-8100 ",211.26,,122.9,1.806,3.250,117,65,1,117,65,4.94,FPS,FPS/$,FPS/TDP,msec.,,,,,, +ssd_mobilenet_v1_coco,OV-2023.2,core,"Intel® Core™ i5-10500TE ",328.11,,171.73,1.533,5.048,214,65,1,214,65,3.6,FPS,FPS/$,FPS/TDP,msec.,,,,,, +ssd_mobilenet_v1_coco,OV-2023.2,core,"Intel® Core™ i5-13600K ",958.88,,352.8,2.915,7.671,329,125,1,329,125,2.39,FPS,FPS/$,FPS/TDP,msec.,,,,,, +ssd_mobilenet_v1_coco,OV-2023.2,core,Intel® Core™ i7-1185G7 CPU,516.83,,149.6,1.213,18.458,426,28,1,426,28,1.95,FPS,FPS/$,FPS/TDP,msec.,,,,,, +ssd_mobilenet_v1_coco,OV-2023.2,core,Intel® Core™ i7-1185GRE CPU,387.14,,100.71,0.790,13.827,490,28,1,490,28,2.82,FPS,FPS/$,FPS/TDP,msec.,,,,,, +ssd_mobilenet_v1_coco,OV-2023.2,core,Intel® Core™ i7-12700H CPU,851.54,,313.45,1.696,7.405,502,115,1,502,115,2.26,FPS,FPS/$,FPS/TDP,msec.,,,,,, +ssd_mobilenet_v1_coco,OV-2023.2,core,"Intel® Core™ i7-8700T ",276.74,,157.91,0.913,7.907,303,35,1,303,35,4.32,FPS,FPS/$,FPS/TDP,msec.,,,,,, +ssd_mobilenet_v1_coco,OV-2023.2,core,"Intel® Core™ i9-10900TE ",364.53,,192.13,0.747,10.415,488,35,1,488,35,3.37,FPS,FPS/$,FPS/TDP,msec.,,,,,, +ssd_mobilenet_v1_coco,OV-2023.2,core,"Intel® Core™ i9-12900TE ",524.73,,184.04,0.965,14.992,544,35,1,544,35,3.11,FPS,FPS/$,FPS/TDP,msec.,,,,,, +ssd_mobilenet_v1_coco,OV-2023.2,core,"Intel® Core™ i9-13900K ",1448.44,,577.78,2.418,11.587,599,125,1,599,125,2.07,FPS,FPS/$,FPS/TDP,msec.,,,,,, +ssd_mobilenet_v1_coco,OV-2023.2,atom,Intel® Processor N200 CPU,14.48,,7.94,0.075,2.413,193,6,1,193,6,72.01,FPS,FPS/$,FPS/TDP,msec.,,,,,, +ssd_mobilenet_v1_coco,OV-2023.2,xeon,"Intel® Xeon® W1290P ",575.79,,221.76,0.969,4.606,594,125,1,594,125,2.37,FPS,FPS/$,FPS/TDP,msec.,,,,,, +ssd_mobilenet_v1_coco,OV-2023.2,xeon,"Intel® Xeon® E-2124G ",202.62,,125.71,0.814,2.854,249,71,1,249,71,5.11,FPS,FPS/$,FPS/TDP,msec.,,,,,, +ssd_mobilenet_v1_coco,OV-2023.2,xeon,"Intel® Xeon® Gold 5218T ",2056.28,,640.87,0.654,9.792,3144,210,2,1572,105,1.56,FPS,FPS/$,FPS/TDP,msec.,,,,,, +ssd_mobilenet_v1_coco,OV-2023.2,xeon,"Intel® Xeon® Platinum 8270 ",5764.35,,1656.74,0.340,14.059,16954,410,2,8477,205,1.1,FPS,FPS/$,FPS/TDP,msec.,,,,,, +ssd_mobilenet_v1_coco,OV-2023.2,xeon,"Intel® Xeon® Platinum 8380 ",10274.61,,2320.94,0.549,19.027,18718,540,2,9359,270,0.66,FPS,FPS/$,FPS/TDP,msec.,,,,,, +ssd_mobilenet_v1_coco,OV-2023.2,xeon,"Intel® Xeon® Platinum 8490H ",22310.17,,3557.58,0.656,31.872,34000,700,2,17000,350,0.82,FPS,FPS/$,FPS/TDP,msec.,,,,,, +ssd_mobilenet_v1_coco,OV-2023.2,xeon,"Intel® Xeon® Silver 4216R ",1961.85,,610.96,0.970,9.809,2022,200,2,1011,100,1.63,FPS,FPS/$,FPS/TDP,msec.,,,,,, +ssd_mobilenet_v1_coco,OV-2023.2,xeon,"Intel® Xeon® Silver 4316 ",4825.79,,1246.04,2.122,16.086,2274,300,2,1137,150,0.81,FPS,FPS/$,FPS/TDP,msec.,,,,,, +ssd_mobilenet_v1_coco,OV-2023.2,accel,"Intel® Data Center GPU Flex 170 ",4044.15,3428.72,,2.101,26.961,1925,150,1,1925,150,3.93,FPS,FPS/$,FPS/TDP,msec.,,,,,, +ssd_mobilenet_v1_coco,OV-2023.2,accel,"Intel® Arc®A-Series Graphics ",2984.21,2546.5,,9.297,19.895,321,150,1,321,150,5.28,FPS,FPS/$,FPS/TDP,msec.,,,,,, +ssd_mobilenet_v1_coco,OV-2023.2,atom,Intel® Celeron® 6305E CPU,107.12,,36.58,1.001,7.142,107,15,1,107,15,9.17,FPS,FPS/$,FPS/TDP,msec.,,,,,, +ssd_mobilenet_v1_coco,OV-2023.2,atom-iGPU,Intel® Atom® X6425E iGPU,92.52,95.67,51.13,1.381,7.710,67,12,1,67,12,42.26,FPS,FPS/$,FPS/TDP,msec.,,,,,, +ssd_mobilenet_v1_coco,OV-2023.2,core-iGPU,Intel® Core™ i7-1185G7 iGPU,651.76,382.05,253.7,1.530,23.277,426,28,1,426,28,6.02,FPS,FPS/$,FPS/TDP,msec.,,,,,, +ssd_mobilenet_v1_coco,OV-2023.2,core-iGPU,Intel® Core™ i7-1185GRE iGPU,524.22,312.45,186.78,1.070,18.722,490,28,1,490,28,7.46,FPS,FPS/$,FPS/TDP,msec.,,,,,, +ssd_mobilenet_v1_coco,OV-2023.2,core-iGPU,Intel® Core™ i7-12700H iGPU,773.55,416.41,274.89,1.541,6.727,502,115,1,502,115,4.96,FPS,FPS/$,FPS/TDP,msec.,,,,,, +ssd_mobilenet_v1_coco,OV-2023.2,atom-iGPU,Intel® Processor N200 iGPU,29.11,15.38,9.5,0.151,4.852,193,6,1,193,6,136.41,FPS,FPS/$,FPS/TDP,msec.,,,,,, +ssd_mobilenet_v1_coco,OV-2023.2,atom-iGPU,Intel® Celeron® 6305E iGPU,411.09,221.78,136.65,3.842,27.406,107,15,1,107,15,9.59,FPS,FPS/$,FPS/TDP,msec.,,,,,, +ssd_mobilenet_v1_coco,OV-2023.2,atom-CPU+iGPU,Intel® Atom® X6425E CPU+iGPU,108.74,,57.49,1.623,9.061,67,12,1,67,12,,FPS,FPS/$,FPS/TDP,msec.,,,,,, +ssd_mobilenet_v1_coco,OV-2023.2,core-CPU+iGPU,Intel® Core™ i7-1185G7 CPU+iGPU,681.22,,234.33,1.599,24.329,426,28,1,426,28,,FPS,FPS/$,FPS/TDP,msec.,,,,,, +ssd_mobilenet_v1_coco,OV-2023.2,core-CPU+iGPU,Intel® Core™ i7-12700H CPU+iGPU,846.65,,312.78,1.687,7.362,502,115,1,502,115,,FPS,FPS/$,FPS/TDP,msec.,,,,,, +ssd_mobilenet_v1_coco,OV-2023.2,atom-CPU+iGPU,Intel® Processor N200 CPU+iGPU,35.06,,14.07,0.182,5.843,193,6,1,193,6,,FPS,FPS/$,FPS/TDP,msec.,,,,,, +ssd_mobilenet_v1_coco,OV-2023.2,atom-CPU+iGPU,Intel® Celeron® 6305E CPU+iGPU,299.91,,136.25,2.803,19.994,107,15,1,107,15,,FPS,FPS/$,FPS/TDP,msec.,,,,,, +end_rec,,,,,,,,,,,,,,,,,,,,,,,, +begin_rec," "," "," "," "," "," "," "," "," "," "," "," "," "," ",FPS,FPS/$,FPS/TDP,msec.,,,,,, +unet-camvid-onnx-0001,OV-2023.2,atom,Intel® Atom® X6425E CPU,0.48,,0.06,0.007,0.040,67,12,1,67,12,2086.28,FPS,FPS/$,FPS/TDP,msec.,,,,,, +unet-camvid-onnx-0001,OV-2023.2,core,"Intel® Core™ i3-8100 ",2.42,,1.55,0.021,0.037,117,65,1,117,65,426.14,FPS,FPS/$,FPS/TDP,msec.,,,,,, +unet-camvid-onnx-0001,OV-2023.2,core,"Intel® Core™ i5-10500TE ",3.6,,2.28,0.017,0.055,214,65,1,214,65,324.72,FPS,FPS/$,FPS/TDP,msec.,,,,,, +unet-camvid-onnx-0001,OV-2023.2,core,"Intel® Core™ i5-13600K ",11.52,,3.96,0.035,0.092,329,125,1,329,125,121.88,FPS,FPS/$,FPS/TDP,msec.,,,,,, +unet-camvid-onnx-0001,OV-2023.2,core,Intel® Core™ i7-1185G7 CPU,6.54,,1.63,0.015,0.234,426,28,1,426,28,168.96,FPS,FPS/$,FPS/TDP,msec.,,,,,, +unet-camvid-onnx-0001,OV-2023.2,core,Intel® Core™ i7-1185GRE CPU,4.87,,1.22,0.010,0.174,490,28,1,490,28,209.5,FPS,FPS/$,FPS/TDP,msec.,,,,,, +unet-camvid-onnx-0001,OV-2023.2,core,Intel® Core™ i7-12700H CPU,10.23,,3.55,0.020,0.089,502,115,1,502,115,123.74,FPS,FPS/$,FPS/TDP,msec.,,,,,, +unet-camvid-onnx-0001,OV-2023.2,core,"Intel® Core™ i7-8700T ",3.02,,1.86,0.010,0.086,303,35,1,303,35,385.98,FPS,FPS/$,FPS/TDP,msec.,,,,,, +unet-camvid-onnx-0001,OV-2023.2,core,"Intel® Core™ i9-10900TE ",3.86,,2.4,0.008,0.110,488,35,1,488,35,286.48,FPS,FPS/$,FPS/TDP,msec.,,,,,, +unet-camvid-onnx-0001,OV-2023.2,core,"Intel® Core™ i9-12900TE ",6.29,,2.21,0.012,0.180,544,35,1,544,35,167.25,FPS,FPS/$,FPS/TDP,msec.,,,,,, +unet-camvid-onnx-0001,OV-2023.2,core,"Intel® Core™ i9-13900K ",17.97,,6.61,0.030,0.144,599,125,1,599,125,91.7,FPS,FPS/$,FPS/TDP,msec.,,,,,, +unet-camvid-onnx-0001,OV-2023.2,atom,Intel® Processor N200 CPU,0.17,,0.09,0.001,0.029,193,6,1,193,6,5851.61,FPS,FPS/$,FPS/TDP,msec.,,,,,, +unet-camvid-onnx-0001,OV-2023.2,xeon,"Intel® Xeon® W1290P ",6.17,,3.96,0.010,0.049,594,125,1,594,125,180.39,FPS,FPS/$,FPS/TDP,msec.,,,,,, +unet-camvid-onnx-0001,OV-2023.2,xeon,"Intel® Xeon® E-2124G ",2.31,,1.48,0.009,0.033,249,71,1,249,71,434.36,FPS,FPS/$,FPS/TDP,msec.,,,,,, +unet-camvid-onnx-0001,OV-2023.2,xeon,"Intel® Xeon® Gold 5218T ",29.19,,7.31,0.009,0.139,3144,210,2,1572,105,71.02,FPS,FPS/$,FPS/TDP,msec.,,,,,, +unet-camvid-onnx-0001,OV-2023.2,xeon,"Intel® Xeon® Platinum 8270 ",95.18,,21.6,0.006,0.232,16954,410,2,8477,205,23.81,FPS,FPS/$,FPS/TDP,msec.,,,,,, +unet-camvid-onnx-0001,OV-2023.2,xeon,"Intel® Xeon® Platinum 8380 ",129.12,,31.53,0.007,0.239,18718,540,2,9359,270,73.82,FPS,FPS/$,FPS/TDP,msec.,,,,,, +unet-camvid-onnx-0001,OV-2023.2,xeon,"Intel® Xeon® Platinum 8490H ",594.44,,48.37,0.017,0.849,34000,700,2,17000,350,8.51,FPS,FPS/$,FPS/TDP,msec.,,,,,, +unet-camvid-onnx-0001,OV-2023.2,xeon,"Intel® Xeon® Silver 4216R ",27.77,,6.96,0.014,0.139,2022,200,2,1011,100,74.6,FPS,FPS/$,FPS/TDP,msec.,,,,,, +unet-camvid-onnx-0001,OV-2023.2,xeon,"Intel® Xeon® Silver 4316 ",69.04,,15.92,0.030,0.230,2274,300,2,1137,150,43.95,FPS,FPS/$,FPS/TDP,msec.,,,,,, +unet-camvid-onnx-0001,OV-2023.2,accel,"Intel® Data Center GPU Flex 170 ",308.2,201.07,,0.160,2.055,1925,150,1,1925,150,51.9,FPS,FPS/$,FPS/TDP,msec.,,,,,, +unet-camvid-onnx-0001,OV-2023.2,accel,"Intel® Arc®A-Series Graphics ",264.35,182.28,,0.824,1.762,321,150,1,321,150,60.21,FPS,FPS/$,FPS/TDP,msec.,,,,,, +unet-camvid-onnx-0001,OV-2023.2,atom,Intel® Celeron® 6305E CPU,1.49,,0.38,0.014,0.099,107,15,1,107,15,675.13,FPS,FPS/$,FPS/TDP,msec.,,,,,, +unet-camvid-onnx-0001,OV-2023.2,atom-iGPU,Intel® Atom® X6425E iGPU,0.98,1.99,0.98,0.015,0.082,67,12,1,67,12,4060.12,FPS,FPS/$,FPS/TDP,msec.,,,,,, +unet-camvid-onnx-0001,OV-2023.2,core-iGPU,Intel® Core™ i7-1185G7 iGPU,17.25,8.84,4.82,0.040,0.616,426,28,1,426,28,227.85,FPS,FPS/$,FPS/TDP,msec.,,,,,, +unet-camvid-onnx-0001,OV-2023.2,core-iGPU,Intel® Core™ i7-1185GRE iGPU,15.51,7.82,4.16,0.032,0.554,490,28,1,490,28,257.82,FPS,FPS/$,FPS/TDP,msec.,,,,,, +unet-camvid-onnx-0001,OV-2023.2,core-iGPU,Intel® Core™ i7-12700H iGPU,18.55,9.77,5.39,0.037,0.161,502,115,1,502,115,215.12,FPS,FPS/$,FPS/TDP,msec.,,,,,, +unet-camvid-onnx-0001,OV-2023.2,atom-iGPU,Intel® Processor N200 iGPU,0.46,0.25,0.14,0.002,0.077,193,6,1,193,6,8685.49,FPS,FPS/$,FPS/TDP,msec.,,,,,, +unet-camvid-onnx-0001,OV-2023.2,atom-iGPU,Intel® Celeron® 6305E iGPU,8.41,4.38,2.38,0.079,0.560,107,15,1,107,15,475.59,FPS,FPS/$,FPS/TDP,msec.,,,,,, +unet-camvid-onnx-0001,OV-2023.2,atom-CPU+iGPU,Intel® Atom® X6425E CPU+iGPU,1.23,,0.79,0.018,0.102,67,12,1,67,12,,FPS,FPS/$,FPS/TDP,msec.,,,,,, +unet-camvid-onnx-0001,OV-2023.2,core-CPU+iGPU,Intel® Core™ i7-1185G7 CPU+iGPU,13.96,,3.78,0.033,0.499,426,28,1,426,28,,FPS,FPS/$,FPS/TDP,msec.,,,,,, +unet-camvid-onnx-0001,OV-2023.2,core-CPU+iGPU,Intel® Core™ i7-12700H CPU+iGPU,10.26,,3.52,0.020,0.089,502,115,1,502,115,,FPS,FPS/$,FPS/TDP,msec.,,,,,, +unet-camvid-onnx-0001,OV-2023.2,atom-CPU+iGPU,Intel® Processor N200 CPU+iGPU,0.57,,0.19,0.003,0.094,193,6,1,193,6,,FPS,FPS/$,FPS/TDP,msec.,,,,,, +unet-camvid-onnx-0001,OV-2023.2,atom-CPU+iGPU,Intel® Celeron® 6305E CPU+iGPU,8.94,,2.44,0.084,0.596,107,15,1,107,15,,FPS,FPS/$,FPS/TDP,msec.,,,,,, +end_rec,,,,,,,,,,,,,,,,,,,,,,,, +begin_rec," "," "," "," "," "," "," "," "," "," "," "," "," "," ",FPS,FPS/$,FPS/TDP,msec.,,,,,, +yolo_v3,OV-2023.2,atom,Intel® Atom® X6425E CPU,2.09,,0.88,0.031,0.174,67,12,1,67,12,484.41,FPS,FPS/$,FPS/TDP,msec.,,,,,, +yolo_v3,OV-2023.2,core,"Intel® Core™ i3-8100 ",10.63,,5.8,0.091,0.163,117,65,1,117,65,95.04,FPS,FPS/$,FPS/TDP,msec.,,,,,, +yolo_v3,OV-2023.2,core,"Intel® Core™ i5-10500TE ",15.37,,8.28,0.072,0.236,214,65,1,214,65,74.25,FPS,FPS/$,FPS/TDP,msec.,,,,,, +yolo_v3,OV-2023.2,core,"Intel® Core™ i5-13600K ",51.79,,15.62,0.157,0.414,329,125,1,329,125,29.87,FPS,FPS/$,FPS/TDP,msec.,,,,,, +yolo_v3,OV-2023.2,core,Intel® Core™ i7-1185G7 CPU,24.16,,6.61,0.057,0.863,426,28,1,426,28,46.04,FPS,FPS/$,FPS/TDP,msec.,,,,,, +yolo_v3,OV-2023.2,core,Intel® Core™ i7-1185GRE CPU,18.04,,4.84,0.037,0.644,490,28,1,490,28,57.2,FPS,FPS/$,FPS/TDP,msec.,,,,,, +yolo_v3,OV-2023.2,core,Intel® Core™ i7-12700H CPU,45.55,,13.34,0.091,0.396,502,115,1,502,115,29.39,FPS,FPS/$,FPS/TDP,msec.,,,,,, +yolo_v3,OV-2023.2,core,"Intel® Core™ i7-8700T ",12.71,,6.82,0.042,0.363,303,35,1,303,35,87.06,FPS,FPS/$,FPS/TDP,msec.,,,,,, +yolo_v3,OV-2023.2,core,"Intel® Core™ i9-10900TE ",16.64,,8.64,0.034,0.475,488,35,1,488,35,67.32,FPS,FPS/$,FPS/TDP,msec.,,,,,, +yolo_v3,OV-2023.2,core,"Intel® Core™ i9-12900TE ",27.33,,8.16,0.050,0.781,544,35,1,544,35,41.73,FPS,FPS/$,FPS/TDP,msec.,,,,,, +yolo_v3,OV-2023.2,core,"Intel® Core™ i9-13900K ",78.06,,25.64,0.130,0.624,599,125,1,599,125,23.25,FPS,FPS/$,FPS/TDP,msec.,,,,,, +yolo_v3,OV-2023.2,atom,Intel® Processor N200 CPU,0.7,,0.34,0.004,0.117,193,6,1,193,6,1470.7,FPS,FPS/$,FPS/TDP,msec.,,,,,, +yolo_v3,OV-2023.2,xeon,"Intel® Xeon® W1290P ",27.37,,14.08,0.046,0.219,594,125,1,594,125,40.66,FPS,FPS/$,FPS/TDP,msec.,,,,,, +yolo_v3,OV-2023.2,xeon,"Intel® Xeon® E-2124G ",10.06,,5.64,0.040,0.142,249,71,1,249,71,100.33,FPS,FPS/$,FPS/TDP,msec.,,,,,, +yolo_v3,OV-2023.2,xeon,"Intel® Xeon® Gold 5218T ",106.36,,29.72,0.034,0.506,3144,210,2,1572,105,21.82,FPS,FPS/$,FPS/TDP,msec.,,,,,, +yolo_v3,OV-2023.2,xeon,"Intel® Xeon® Platinum 8270 ",313.83,,87.89,0.019,0.765,16954,410,2,8477,205,10.5,FPS,FPS/$,FPS/TDP,msec.,,,,,, +yolo_v3,OV-2023.2,xeon,"Intel® Xeon® Platinum 8380 ",490.61,,109.01,0.026,0.909,18718,540,2,9359,270,,FPS,FPS/$,FPS/TDP,msec.,,,,,, +yolo_v3,OV-2023.2,xeon,"Intel® Xeon® Platinum 8490H ",2125.85,,193.93,0.063,3.037,34000,700,2,17000,350,3.31,FPS,FPS/$,FPS/TDP,msec.,,,,,, +yolo_v3,OV-2023.2,xeon,"Intel® Xeon® Silver 4216R ",101.13,,28.36,0.050,0.506,2022,200,2,1011,100,22.77,FPS,FPS/$,FPS/TDP,msec.,,,,,, +yolo_v3,OV-2023.2,xeon,"Intel® Xeon® Silver 4316 ",242.25,,62.31,0.107,0.808,2274,300,2,1137,150,13.97,FPS,FPS/$,FPS/TDP,msec.,,,,,, +yolo_v3,OV-2023.2,accel,"Intel® Data Center GPU Flex 170 ",784.51,385.29,,0.408,5.230,1925,150,1,1925,150,20.34,FPS,FPS/$,FPS/TDP,msec.,,,,,, +yolo_v3,OV-2023.2,accel,"Intel® Arc®A-Series Graphics ",582.91,341.6,,1.816,3.886,321,150,1,321,150,27.27,FPS,FPS/$,FPS/TDP,msec.,,,,,, +yolo_v3,OV-2023.2,atom,Intel® Celeron® 6305E CPU,5.45,,1.54,0.051,0.363,107,15,1,107,15,184.42,FPS,FPS/$,FPS/TDP,msec.,,,,,, +yolo_v3,OV-2023.2,atom-iGPU,Intel® Atom® X6425E iGPU,6.74,6.85,3.38,0.101,0.562,67,12,1,67,12,591.73,FPS,FPS/$,FPS/TDP,msec.,,,,,, +yolo_v3,OV-2023.2,core-iGPU,Intel® Core™ i7-1185G7 iGPU,63.57,29.68,16.2,0.149,2.270,426,28,1,426,28,63.78,FPS,FPS/$,FPS/TDP,msec.,,,,,, +yolo_v3,OV-2023.2,core-iGPU,Intel® Core™ i7-1185GRE iGPU,57.51,26.04,13.46,0.117,2.054,490,28,1,490,28,69.04,FPS,FPS/$,FPS/TDP,msec.,,,,,, +yolo_v3,OV-2023.2,core-iGPU,Intel® Core™ i7-12700H iGPU,70.17,33.53,18.68,0.140,0.610,502,115,1,502,115,56.66,FPS,FPS/$,FPS/TDP,msec.,,,,,, +yolo_v3,OV-2023.2,atom-iGPU,Intel® Processor N200 iGPU,1.76,0.94,0.5,0.009,0.293,193,6,1,193,6,2270.28,FPS,FPS/$,FPS/TDP,msec.,,,,,, +yolo_v3,OV-2023.2,atom-iGPU,Intel® Celeron® 6305E iGPU,32.22,15.09,8.06,0.301,2.148,107,15,1,107,15,123.79,FPS,FPS/$,FPS/TDP,msec.,,,,,, +yolo_v3,OV-2023.2,atom-CPU+iGPU,Intel® Atom® X6425E CPU+iGPU,7.72,,3.75,0.115,0.643,67,12,1,67,12,,FPS,FPS/$,FPS/TDP,msec.,,,,,, +yolo_v3,OV-2023.2,core-CPU+iGPU,Intel® Core™ i7-1185G7 CPU+iGPU,51.27,,13.81,0.120,1.831,426,28,1,426,28,,FPS,FPS/$,FPS/TDP,msec.,,,,,, +yolo_v3,OV-2023.2,core-CPU+iGPU,Intel® Core™ i7-12700H CPU+iGPU,45.37,,13.46,0.090,0.395,502,115,1,502,115,,FPS,FPS/$,FPS/TDP,msec.,,,,,, +yolo_v3,OV-2023.2,atom-CPU+iGPU,Intel® Processor N200 CPU+iGPU,2.17,,0.7,0.011,0.361,193,6,1,193,6,,FPS,FPS/$,FPS/TDP,msec.,,,,,, +yolo_v3,OV-2023.2,atom-CPU+iGPU,Intel® Celeron® 6305E CPU+iGPU,33.62,,8.52,0.314,2.242,107,15,1,107,15,,FPS,FPS/$,FPS/TDP,msec.,,,,,, +end_rec,,,,,,,,,,,,,,,,,,,,,,,, +begin_rec," "," "," "," "," "," "," "," "," "," "," "," "," "," ",FPS,FPS/$,FPS/TDP,msec.,,,,,, +yolo_v3_tiny,OV-2023.2,atom,Intel® Atom® X6425E CPU,22.9,,10.3,0.342,1.908,67,12,1,67,12,44.81,FPS,FPS/$,FPS/TDP,msec.,,,,,, +yolo_v3_tiny,OV-2023.2,core,"Intel® Core™ i3-8100 ",111.7,,63.53,0.955,1.718,117,65,1,117,65,9.05,FPS,FPS/$,FPS/TDP,msec.,,,,,, +yolo_v3_tiny,OV-2023.2,core,"Intel® Core™ i5-10500TE ",167.36,,91.55,0.782,2.575,214,65,1,214,65,6.74,FPS,FPS/$,FPS/TDP,msec.,,,,,, +yolo_v3_tiny,OV-2023.2,core,"Intel® Core™ i5-13600K ",600,,195.96,1.824,4.800,329,125,1,329,125,3.05,FPS,FPS/$,FPS/TDP,msec.,,,,,, +yolo_v3_tiny,OV-2023.2,core,Intel® Core™ i7-1185G7 CPU,252.28,,77.33,0.592,9.010,426,28,1,426,28,4.56,FPS,FPS/$,FPS/TDP,msec.,,,,,, +yolo_v3_tiny,OV-2023.2,core,Intel® Core™ i7-1185GRE CPU,186.61,,55.02,0.381,6.665,490,28,1,490,28,5.72,FPS,FPS/$,FPS/TDP,msec.,,,,,, +yolo_v3_tiny,OV-2023.2,core,Intel® Core™ i7-12700H CPU,501.3,,153.33,0.999,4.359,502,115,1,502,115,3.07,FPS,FPS/$,FPS/TDP,msec.,,,,,, +yolo_v3_tiny,OV-2023.2,core,"Intel® Core™ i7-8700T ",137.83,,76.63,0.455,3.938,303,35,1,303,35,8.19,FPS,FPS/$,FPS/TDP,msec.,,,,,, +yolo_v3_tiny,OV-2023.2,core,"Intel® Core™ i9-10900TE ",184.15,,95.48,0.377,5.261,488,35,1,488,35,6.36,FPS,FPS/$,FPS/TDP,msec.,,,,,, +yolo_v3_tiny,OV-2023.2,core,"Intel® Core™ i9-12900TE ",293.87,,93.77,0.540,8.396,544,35,1,544,35,4.16,FPS,FPS/$,FPS/TDP,msec.,,,,,, +yolo_v3_tiny,OV-2023.2,core,"Intel® Core™ i9-13900K ",859.57,,285.93,1.435,6.877,599,125,1,599,125,2.43,FPS,FPS/$,FPS/TDP,msec.,,,,,, +yolo_v3_tiny,OV-2023.2,atom,Intel® Processor N200 CPU,7.83,,4.08,0.041,1.306,193,6,1,193,6,136.7,FPS,FPS/$,FPS/TDP,msec.,,,,,, +yolo_v3_tiny,OV-2023.2,xeon,"Intel® Xeon® W1290P ",298.34,,148.85,0.502,2.387,594,125,1,594,125,4.01,FPS,FPS/$,FPS/TDP,msec.,,,,,, +yolo_v3_tiny,OV-2023.2,xeon,"Intel® Xeon® E-2124G ",106.15,,62.71,0.426,1.495,249,71,1,249,71,9.46,FPS,FPS/$,FPS/TDP,msec.,,,,,, +yolo_v3_tiny,OV-2023.2,xeon,"Intel® Xeon® Gold 5218T ",1051.35,,338.78,0.334,5.006,3144,210,2,1572,105,2.52,FPS,FPS/$,FPS/TDP,msec.,,,,,, +yolo_v3_tiny,OV-2023.2,xeon,"Intel® Xeon® Platinum 8270 ",2835.63,,919.26,0.167,6.916,16954,410,2,8477,205,1.22,FPS,FPS/$,FPS/TDP,msec.,,,,,, +yolo_v3_tiny,OV-2023.2,xeon,"Intel® Xeon® Platinum 8380 ",4653.13,,1373.23,0.249,8.617,18718,540,2,9359,270,0.87,FPS,FPS/$,FPS/TDP,msec.,,,,,, +yolo_v3_tiny,OV-2023.2,xeon,"Intel® Xeon® Platinum 8490H ",13069.92,,2139.01,0.384,18.671,34000,700,2,17000,350,1.07,FPS,FPS/$,FPS/TDP,msec.,,,,,, +yolo_v3_tiny,OV-2023.2,xeon,"Intel® Xeon® Silver 4216R ",1009.33,,322.76,0.499,5.047,2022,200,2,1011,100,2.62,FPS,FPS/$,FPS/TDP,msec.,,,,,, +yolo_v3_tiny,OV-2023.2,xeon,"Intel® Xeon® Silver 4316 ",2219.25,,701.36,0.976,7.397,2274,300,2,1137,150,1.33,FPS,FPS/$,FPS/TDP,msec.,,,,,, +yolo_v3_tiny,OV-2023.2,accel,"Intel® Data Center GPU Flex 170 ",3774.19,2809.6,,1.961,25.161,1925,150,1,1925,150,4.2,FPS,FPS/$,FPS/TDP,msec.,,,,,, +yolo_v3_tiny,OV-2023.2,accel,"Intel® Arc®A-Series Graphics ",2481.58,2188.3,,7.731,16.544,321,150,1,321,150,6.38,FPS,FPS/$,FPS/TDP,msec.,,,,,, +yolo_v3_tiny,OV-2023.2,atom,Intel® Celeron® 6305E CPU,54.03,,17.97,0.505,3.602,107,15,1,107,15,18.27,FPS,FPS/$,FPS/TDP,msec.,,,,,, +yolo_v3_tiny,OV-2023.2,atom-iGPU,Intel® Atom® X6425E iGPU,65.71,66.39,33.87,0.981,5.476,67,12,1,67,12,60.33,FPS,FPS/$,FPS/TDP,msec.,,,,,, +yolo_v3_tiny,OV-2023.2,core-iGPU,Intel® Core™ i7-1185G7 iGPU,546.82,290.91,170.54,1.284,19.529,426,28,1,426,28,7.02,FPS,FPS/$,FPS/TDP,msec.,,,,,, +yolo_v3_tiny,OV-2023.2,core-iGPU,Intel® Core™ i7-1185GRE iGPU,494.07,258.4,135.87,1.008,17.645,490,28,1,490,28,7.98,FPS,FPS/$,FPS/TDP,msec.,,,,,, +yolo_v3_tiny,OV-2023.2,core-iGPU,Intel® Core™ i7-12700H iGPU,614.04,322.38,201.06,1.223,5.339,502,115,1,502,115,6.27,FPS,FPS/$,FPS/TDP,msec.,,,,,, +yolo_v3_tiny,OV-2023.2,atom-iGPU,Intel® Processor N200 iGPU,18.67,9.83,5.51,0.097,3.112,193,6,1,193,6,213.14,FPS,FPS/$,FPS/TDP,msec.,,,,,, +yolo_v3_tiny,OV-2023.2,atom-iGPU,Intel® Celeron® 6305E iGPU,292.06,153.11,86.79,2.730,19.471,107,15,1,107,15,13.59,FPS,FPS/$,FPS/TDP,msec.,,,,,, +yolo_v3_tiny,OV-2023.2,atom-CPU+iGPU,Intel® Atom® X6425E CPU+iGPU,28.49,,39.7,0.425,2.374,67,12,1,67,12,,FPS,FPS/$,FPS/TDP,msec.,,,,,, +yolo_v3_tiny,OV-2023.2,core-CPU+iGPU,Intel® Core™ i7-1185G7 CPU+iGPU,485.88,,147.22,1.141,17.353,426,28,1,426,28,,FPS,FPS/$,FPS/TDP,msec.,,,,,, +yolo_v3_tiny,OV-2023.2,core-CPU+iGPU,Intel® Core™ i7-12700H CPU+iGPU,504.34,,154.35,1.005,4.386,502,115,1,502,115,,FPS,FPS/$,FPS/TDP,msec.,,,,,, +yolo_v3_tiny,OV-2023.2,atom-CPU+iGPU,Intel® Processor N200 CPU+iGPU,23.01,,8.08,0.119,3.835,193,6,1,193,6,,FPS,FPS/$,FPS/TDP,msec.,,,,,, +yolo_v3_tiny,OV-2023.2,atom-CPU+iGPU,Intel® Celeron® 6305E CPU+iGPU,322.03,,92.97,3.010,21.469,107,15,1,107,15,,FPS,FPS/$,FPS/TDP,msec.,,,,,, +end_rec,,,,,,,,,,,,,,,,,,,,,,,, +begin_rec," "," "," "," "," "," "," "," "," "," "," "," "," "," ",FPS,FPS/$,FPS/TDP,msec.,,,,,, +yolo_v8n,OV-2023.2,atom,Intel® Atom® X6425E CPU,10.23,,5.1,0.153,0.853,67,12,1,67,12,101.71,FPS,FPS/$,FPS/TDP,msec.,,,,,, +yolo_v8n,OV-2023.2,core,"Intel® Core™ i3-8100 ",53.43,,33.01,0.457,0.822,117,65,1,117,65,19.24,FPS,FPS/$,FPS/TDP,msec.,,,,,, +yolo_v8n,OV-2023.2,core,"Intel® Core™ i5-10500TE ",81.28,,46.84,0.380,1.251,214,65,1,214,65,13.7,FPS,FPS/$,FPS/TDP,msec.,,,,,, +yolo_v8n,OV-2023.2,core,"Intel® Core™ i5-13600K ",249.13,,95.35,0.757,1.993,329,125,1,329,125,6.67,FPS,FPS/$,FPS/TDP,msec.,,,,,, +yolo_v8n,OV-2023.2,core,Intel® Core™ i7-1185G7 CPU,110.57,,40.76,0.260,3.949,426,28,1,426,28,10.77,FPS,FPS/$,FPS/TDP,msec.,,,,,, +yolo_v8n,OV-2023.2,core,Intel® Core™ i7-1185GRE CPU,77.4,,27.48,0.158,2.764,490,28,1,490,28,13.63,FPS,FPS/$,FPS/TDP,msec.,,,,,, +yolo_v8n,OV-2023.2,core,Intel® Core™ i7-12700H CPU,213.22,,81.23,0.425,1.854,502,115,1,502,115,6.64,FPS,FPS/$,FPS/TDP,msec.,,,,,, +yolo_v8n,OV-2023.2,core,"Intel® Core™ i7-8700T ",71.39,,42.39,0.236,2.040,303,35,1,303,35,16.54,FPS,FPS/$,FPS/TDP,msec.,,,,,, +yolo_v8n,OV-2023.2,core,"Intel® Core™ i9-10900TE ",92.64,,52.82,0.190,2.647,488,35,1,488,35,12.63,FPS,FPS/$,FPS/TDP,msec.,,,,,, +yolo_v8n,OV-2023.2,core,"Intel® Core™ i9-12900TE ",132.43,,50.68,0.243,3.784,544,35,1,544,35,9.16,FPS,FPS/$,FPS/TDP,msec.,,,,,, +yolo_v8n,OV-2023.2,core,"Intel® Core™ i9-13900K ",377.83,,153.02,0.631,3.023,599,125,1,599,125,5.31,FPS,FPS/$,FPS/TDP,msec.,,,,,, +yolo_v8n,OV-2023.2,atom,Intel® Processor N200 CPU,3.26,,1.94,0.017,0.543,193,6,1,193,6,316.73,FPS,FPS/$,FPS/TDP,msec.,,,,,, +yolo_v8n,OV-2023.2,xeon,"Intel® Xeon® W1290P ",135.15,,72.61,0.228,1.081,594,125,1,594,125,9.22,FPS,FPS/$,FPS/TDP,msec.,,,,,, +yolo_v8n,OV-2023.2,xeon,"Intel® Xeon® E-2124G ",52.15,,32.9,0.209,0.735,249,71,1,249,71,19.49,FPS,FPS/$,FPS/TDP,msec.,,,,,, +yolo_v8n,OV-2023.2,xeon,"Intel® Xeon® Gold 5218T ",450.82,,174.94,0.143,2.147,3144,210,2,1572,105,5.96,FPS,FPS/$,FPS/TDP,msec.,,,,,, +yolo_v8n,OV-2023.2,xeon,"Intel® Xeon® Platinum 8270 ",998.4,,454.7,0.059,2.435,16954,410,2,8477,205,3.53,FPS,FPS/$,FPS/TDP,msec.,,,,,, +yolo_v8n,OV-2023.2,xeon,"Intel® Xeon® Platinum 8380 ",1714.12,,554.58,0.092,3.174,18718,540,2,9359,270,2.38,FPS,FPS/$,FPS/TDP,msec.,,,,,, +yolo_v8n,OV-2023.2,xeon,"Intel® Xeon® Platinum 8490H ",2889.04,,998.41,0.085,4.127,34000,700,2,17000,350,3.61,FPS,FPS/$,FPS/TDP,msec.,,,,,, +yolo_v8n,OV-2023.2,xeon,"Intel® Xeon® Silver 4216R ",431.74,,165.69,0.214,2.159,2022,200,2,1011,100,6.18,FPS,FPS/$,FPS/TDP,msec.,,,,,, +yolo_v8n,OV-2023.2,xeon,"Intel® Xeon® Silver 4316 ",862.18,,340.38,0.379,2.874,2274,300,2,1137,150,3.32,FPS,FPS/$,FPS/TDP,msec.,,,,,, +yolo_v8n,OV-2023.2,accel,"Intel® Data Center GPU Flex 170 ",1539.24,1433.21,,0.800,10.262,1925,150,1,1925,150,10.28,FPS,FPS/$,FPS/TDP,msec.,,,,,, +yolo_v8n,OV-2023.2,accel,"Intel® Arc®A-Series Graphics ",1005.97,1032.49,,3.134,6.706,321,150,1,321,150,15.79,FPS,FPS/$,FPS/TDP,msec.,,,,,, +yolo_v8n,OV-2023.2,atom,Intel® Celeron® 6305E CPU,24.25,,9.56,0.227,1.617,107,15,1,107,15,40.75,FPS,FPS/$,FPS/TDP,msec.,,,,,, +yolo_v8n,OV-2023.2,atom-iGPU,Intel® Atom® X6425E iGPU,32.03,33.52,19.17,0.478,2.669,67,12,1,67,12,124.04,FPS,FPS/$,FPS/TDP,msec.,,,,,, +yolo_v8n,OV-2023.2,core-iGPU,Intel® Core™ i7-1185G7 iGPU,206.31,140.75,88.2,0.484,7.368,426,28,1,426,28,19.11,FPS,FPS/$,FPS/TDP,msec.,,,,,, +yolo_v8n,OV-2023.2,core-iGPU,Intel® Core™ i7-1185GRE iGPU,164.45,109.09,61.03,0.336,5.873,490,28,1,490,28,24.02,FPS,FPS/$,FPS/TDP,msec.,,,,,, +yolo_v8n,OV-2023.2,core-iGPU,Intel® Core™ i7-12700H iGPU,220.75,149.98,96.81,0.440,1.920,502,115,1,502,115,17.82,FPS,FPS/$,FPS/TDP,msec.,,,,,, +yolo_v8n,OV-2023.2,atom-iGPU,Intel® Processor N200 iGPU,8.37,5.57,3.25,0.043,1.394,193,6,1,193,6,477.17,FPS,FPS/$,FPS/TDP,msec.,,,,,, +yolo_v8n,OV-2023.2,atom-iGPU,Intel® Celeron® 6305E iGPU,35.14,,20.32,0.524,2.928,67,12,1,67,12,,FPS,FPS/$,FPS/TDP,msec.,,,,,, +yolo_v8n,OV-2023.2,atom-CPU+iGPU,Intel® Atom® X6425E CPU+iGPU,179.3,,74.12,0.421,6.403,426,28,1,426,28,,FPS,FPS/$,FPS/TDP,msec.,,,,,, +yolo_v8n,OV-2023.2,core-CPU+iGPU,Intel® Core™ i7-1185G7 CPU+iGPU,212.84,,82.48,0.424,1.851,502,115,1,502,115,,FPS,FPS/$,FPS/TDP,msec.,,,,,, +yolo_v8n,OV-2023.2,core-CPU+iGPU,Intel® Core™ i7-12700H CPU+iGPU,10.06,,4.44,0.052,1.676,193,6,1,193,6,,FPS,FPS/$,FPS/TDP,msec.,,,,,, +yolo_v8n,OV-2023.2,atom-CPU+iGPU,Intel® Processor N200 CPU+iGPU,113.96,,48.9,1.065,7.597,107,15,1,107,15,,FPS,FPS/$,FPS/TDP,msec.,,,,,, +end_rec,,atom-CPU+iGPU,Intel® Celeron® 6305E CPU+iGPU,,,,,,,,,,,,,,,,,,,,, +begin_rec,,,,,,,,,,,,,,,,,,,,,,,, +chatGLM2-6B,OV-2023.2,core,"Intel® Core™ i9-13900K ",277,,340,,,,,,,,,msec./token,FPS/$,FPS/TDP,msec./token,,,,374,, +chatGLM2-6B,OV-2023.2,xeon,"Intel® Xeon® Platinum 8380 ",,173,,,,,,,,,,msec./token,FPS/$,FPS/TDP,msec./token,,,,,, +chatGLM2-6B,OV-2023.2,xeon,Intel® Xeon® Platinum 8490H,,114,,,,,,,,,,msec./token,FPS/$,FPS/TDP,msec./token,,,,,, +chatGLM2-6B,OV-2023.2,accel,"Intel® Data Center GPU Flex 170 ",,,,,,,,,,,,msec./token,FPS/$,FPS/TDP,msec./token,,,,,, +chatGLM2-6B,OV-2023.2,accel,"Intel® Arc®A-Series Graphics ",95,121,,,,,,,,,,msec./token,FPS/$,FPS/TDP,msec./token,,,,,, +end_rec,,,,,,,,,,,,,,,,,,,,,,,, +begin_rec,,,,,,,,,,,,,,,,,,,,,,,, +Llama-2-7b-chat,OV-2023.2,core,"Intel® Core™ i9-13900K ",415,,420,,,,,,,,,msec./token,FPS/$,FPS/TDP,msec./token,,,,417,, +Llama-2-7b-chat,OV-2023.2,xeon,"Intel® Xeon® Platinum 8380 ",179,,201,,,,,,,,,msec./token,FPS/$,FPS/TDP,msec./token,,,,133,, +Llama-2-7b-chat,OV-2023.2,xeon,Intel® Xeon® Platinum 8490H,143,,133,,,,,,,,,msec./token,FPS/$,FPS/TDP,msec./token,,,,136,, +Llama-2-7b-chat,OV-2023.2,accel,"Intel® Data Center GPU Flex 170 ",111,95,,,,,,,,,,msec./token,FPS/$,FPS/TDP,msec./token,,,,126,, +Llama-2-7b-chat,OV-2023.2,accel,"Intel® Arc®A-Series Graphics ",163,163,,,,,,,,,,msec./token,FPS/$,FPS/TDP,msec./token,,,,221,, +end_rec,,,,,,,,,,,,,,,,,,,,,,,, +begin_rec,,,,,,,,,,,,,,,,,,,,,,,, +Stable-Diffusion-v2-1,OV-2023.2,accel,"Intel® Data Center GPU Flex 170 ",7.1,4.4,,,,,,,,,,sec.,FPS/$,FPS/TDP,sec.,,,,,, +end_rec,,,,,,,,,,,,,,,,,,,,,,,, \ No newline at end of file diff --git a/docs/sphinx_setup/_static/js/graphs.js b/docs/sphinx_setup/_static/js/graphs.js index cf34ae194fb4fe..9c4badd7a9101a 100644 --- a/docs/sphinx_setup/_static/js/graphs.js +++ b/docs/sphinx_setup/_static/js/graphs.js @@ -121,6 +121,7 @@ class ExcelData { this.throughputInt8 = csvdataline[4]; this.throughputFP16 = csvdataline[5]; this.throughputFP32 = csvdataline[6]; + this.throughputBF16 = csvdataline[24]; this.value = csvdataline[7]; this.efficiency = csvdataline[8]; this.price = csvdataline[9]; @@ -132,6 +133,7 @@ class ExcelData { this.latency16 = csvdataline[19]; this.latency32 = csvdataline[20]; this.latency4 = csvdataline[21]; + this.latencyBF16 = csvdataline[23]; this.throughputUnit = csvdataline[15]; this.valueUnit = csvdataline[16]; this.efficiencyUnit = csvdataline[17]; @@ -168,7 +170,8 @@ class GraphData { 'int4': excelData.throughputInt4, 'int8': excelData.throughputInt8, 'fp16': excelData.throughputFP16, - 'fp32': excelData.throughputFP32 + 'fp32': excelData.throughputFP32, + 'bf16': excelData.throughputBF16 }, excelData.value, excelData.efficiency, @@ -178,7 +181,8 @@ class GraphData { 'int4': excelData.latency4, 'int8': excelData.latency, 'fp16': excelData.latency16, - 'fp32': excelData.latency32 + 'fp32': excelData.latency32, + 'bf16': excelData.latencyBF16 },); this.price = excelData.price; @@ -231,7 +235,7 @@ class Modal { static getPrecisionsLabels(version) { if (version == 'ovms') return ['OV-INT8 (reference)', 'INT8', 'OV-FP32 (reference)', 'FP32']; - return ['INT4', 'INT8', 'FP16', 'FP32']; + return ['INT4', 'INT8', 'FP16', 'FP32', 'BF16']; } static getCoreTypes(labels) { return labels.map((label) => { @@ -262,13 +266,14 @@ class Modal { return 'fp16'; case 'FP32': return 'fp32'; + case 'BF16': + return 'bf16'; default: return ''; } }); } static getUnitDescription(unit) { - console.log(unit) switch (unit) { case 'msec.': return '(lower is better)'; @@ -383,6 +388,8 @@ class Graph { return { data: null, color: '#009fca', label: `FP16` }; case 'fp32': return { data: null, color: '#007797', label: `FP32` }; + case 'bf16': + return { data: null, color: '#00536a', label: `BF16` }; default: return {}; } @@ -402,6 +409,8 @@ class Graph { return { data: null, color: '#8424a9', label: `FP16` }; case 'fp32': return { data: null, color: '#5b037d', label: `FP32` }; + case 'bf16': + return { data: null, color: '#37014c', label: `BF16` }; default: return {}; } @@ -787,7 +796,7 @@ $(document).ready(function () { li.style.alignItems = 'center'; li.style.display = 'block'; li.style.flexDirection = 'column'; - li.style.marginLeft = '10px'; + li.style.marginLeft = '4px'; li.onclick = () => { const {type} = chart.config; @@ -806,7 +815,7 @@ $(document).ready(function () { boxSpan.style.borderColor = item.strokeStyle; boxSpan.style.borderWidth = item.lineWidth + 'px'; boxSpan.style.display = 'inline-block'; - boxSpan.style.height = '12px'; + boxSpan.style.height = '10px'; boxSpan.style.marginRight = '4px'; boxSpan.style.width = '30px'; @@ -815,7 +824,8 @@ $(document).ready(function () { textContainer.style.color = item.fontColor; textContainer.style.margin = 0; textContainer.style.padding = 0; - textContainer.style.fontSize = '0.8rem'; + textContainer.style.fontSize = '0.6rem'; + textContainer.style.marginLeft = '3px'; textContainer.style.textDecoration = item.hidden ? 'line-through' : ''; const text = document.createTextNode(item.text); @@ -871,7 +881,7 @@ $(document).ready(function () { data: item.data, backgroundColor: item.color, borderColor: 'rgba(170,170,170,0)', - barThickness: 12 + barThickness: 10 } }) } @@ -938,26 +948,29 @@ $(document).ready(function () { var graphConfigs = kpis.map((str) => { var kpi = str.toLowerCase(); var groupUnit = model[0]; + var indexes = []; if (kpi === 'throughput') { var throughputData = Graph.getDatabyKPI(model, kpi); var config = Graph.getGraphConfig(kpi, groupUnit, precisions); precisions.forEach((prec, index) => { config.datasets[index].data = throughputData.map(tData => tData[prec]); }); - return config; + return removeEmptyLabel(config, indexes); } else if(kpi === 'latency'){ var latencyData = Graph.getDatabyKPI(model, kpi); var config = Graph.getGraphConfig(kpi, groupUnit, precisions); precisions.forEach((prec, index) => { - config.datasets[index].data = latencyData.map(tData => tData[prec]); + config.datasets[index].data = latencyData.map(tData => tData[prec]); }); - return config; + return removeEmptyLabel(config, indexes); } var config = Graph.getGraphConfig(kpi, groupUnit); config.datasets[0].data = Graph.getDatabyKPI(model, kpi); return config; }); + + // get the client platform labels and create labels for all the graphs var labelsContainer = $('
'); labelsContainer.addClass('chart-labels-container'); @@ -1013,6 +1026,19 @@ $(document).ready(function () { setChartsDisplayDirection(display.mode); adjustHeaderIcons(display.mode); } + function removeEmptyLabel(config, indexes) { + config.datasets.forEach((item, index) =>{ + if(item.data[0] == '') { + indexes.push(index); + } + }) + var sorted = indexes.sort(function(a, b){return b-a}); + + sorted.forEach((index)=>{ + config.datasets.splice(index,1); + }) + return config; + } function processMetricNew(labels, datasets, chartTitle, container, widthClass, id) { // ratio for consistent chart label height