From f594f8f1ce0639eb0ef9697375edc5427f244098 Mon Sep 17 00:00:00 2001 From: River Li Date: Thu, 1 Feb 2024 18:51:30 +0800 Subject: [PATCH 01/10] [CAPI] fixed capi va different data type of surface (#22579) Windows define DEV_OBJECT_HANDLE as a void*, while Linux define it as uint32_t Windows: static constexpr Property dev_object_handle{"DEV_OBJECT_HANDLE"}; Linux: static constexpr Property dev_object_handle{"DEV_OBJECT_HANDLE"}; --- src/bindings/c/src/ov_remote_context.cpp | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/src/bindings/c/src/ov_remote_context.cpp b/src/bindings/c/src/ov_remote_context.cpp index 68e802e256973e..f1b9d7cbd6aacf 100644 --- a/src/bindings/c/src/ov_remote_context.cpp +++ b/src/bindings/c/src/ov_remote_context.cpp @@ -20,12 +20,14 @@ const char* ov_property_key_intel_gpu_dev_object_handle = "DEV_OBJECT_HANDLE"; const char* ov_property_key_intel_gpu_va_plane = "VA_PLANE"; inline bool check_intel_gpu_property_value_is_ptr(std::string& key) { - if ((key == ov_property_key_intel_gpu_ocl_context) || (key == ov_property_key_intel_gpu_ocl_queue) || - (key == ov_property_key_intel_gpu_va_device) || (key == ov_property_key_intel_gpu_mem_handle) || - (key == ov_property_key_intel_gpu_dev_object_handle)) - return true; - else - return false; +#ifdef _WIN32 + return (key == ov_property_key_intel_gpu_ocl_context) || (key == ov_property_key_intel_gpu_ocl_queue) || + (key == ov_property_key_intel_gpu_va_device) || (key == ov_property_key_intel_gpu_mem_handle) || + (key == ov_property_key_intel_gpu_dev_object_handle); +#else + return (key == ov_property_key_intel_gpu_ocl_context) || (key == ov_property_key_intel_gpu_ocl_queue) || + (key == ov_property_key_intel_gpu_va_device) || (key == ov_property_key_intel_gpu_mem_handle); +#endif } //!< Properties of intel gpu cannot be compeletly handled by (char*) type, because it contains non-char pointer which From 739207f20c4bfe4fb31c512fe18d9cd689da87f8 Mon Sep 17 00:00:00 2001 From: Oleg Pipikin Date: Thu, 1 Feb 2024 11:53:05 +0100 Subject: [PATCH 02/10] Add raw data comparison into test utils (#22564) --- .../single_layer_tests/matrix_nms.cpp | 2 +- .../single_layer_tests/multiclass_nms.cpp | 6 +- .../op_reference/base_reference_test.cpp | 172 +++++++++--------- .../ov_infer_request/infer_correctness.cpp | 2 +- .../src/single_op/generate_proposals.cpp | 20 +- .../include/common_test_utils/data_utils.hpp | 46 +++++ 6 files changed, 146 insertions(+), 102 deletions(-) diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/matrix_nms.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/matrix_nms.cpp index 84b25ebf183915..eb49ca4fd79189 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/matrix_nms.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/matrix_nms.cpp @@ -128,7 +128,7 @@ void MatrixNmsLayerTestGPU::compare(const std::vector &expectedOutpu #define CASE(X, Y, _expected_offset, _actual_offset, _size, _threshold) \ case X: \ - LayerTestsUtils::LayerTestsCommon::Compare( \ + ov::test::utils::compare_raw_data( \ reinterpret_cast*>(expectedBuffer) + _expected_offset, \ reinterpret_cast*>(actualBuffer) + _actual_offset, _size, _threshold); \ break; diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/multiclass_nms.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/multiclass_nms.cpp index b02697a9364d5a..8ca141c5ba2d2c 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/multiclass_nms.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/multiclass_nms.cpp @@ -256,9 +256,9 @@ void MulticlassNmsLayerTestGPU::compare(const std::vector &expectedO case ov::element::Type_t::elem_type: { \ using tensor_type = ov::fundamental_type_for; \ using actual_type = ov::fundamental_type_for; \ - LayerTestsUtils::LayerTestsCommon::Compare(reinterpret_cast(expectedBuffer) + expected_offset, \ - reinterpret_cast(actualBuffer) + actual_offset, \ - size, _threshold); \ + ov::test::utils::compare_raw_data(reinterpret_cast(expectedBuffer) + expected_offset, \ + reinterpret_cast(actualBuffer) + actual_offset, \ + size, _threshold); \ break; \ } switch (precision) { diff --git a/src/plugins/template/tests/functional/op_reference/base_reference_test.cpp b/src/plugins/template/tests/functional/op_reference/base_reference_test.cpp index ed8621d0351a3e..845c5c0b15b1c8 100644 --- a/src/plugins/template/tests/functional/op_reference/base_reference_test.cpp +++ b/src/plugins/template/tests/functional/op_reference/base_reference_test.cpp @@ -92,126 +92,124 @@ void CommonReferenceTest::ValidateBlobs(const ov::Tensor& refBlob, const auto& element_type = refBlob.get_element_type(); switch (element_type) { case ov::element::bf16: - LayerTestsUtils::LayerTestsCommon::Compare(refBlob.data(), - outBlob.data(), - actual_comparision_size, - threshold, - abs_threshold); + ov::test::utils::compare_raw_data(refBlob.data(), + outBlob.data(), + actual_comparision_size, + threshold, + abs_threshold); break; case ov::element::f16: - LayerTestsUtils::LayerTestsCommon::Compare(refBlob.data(), - outBlob.data(), - actual_comparision_size, - threshold, - abs_threshold); + ov::test::utils::compare_raw_data(refBlob.data(), + outBlob.data(), + actual_comparision_size, + threshold, + abs_threshold); break; case ov::element::f8e4m3: - LayerTestsUtils::LayerTestsCommon::Compare( - refBlob.data(), - outBlob.data(), - actual_comparision_size, - threshold, - abs_threshold); + ov::test::utils::compare_raw_data(refBlob.data(), + outBlob.data(), + actual_comparision_size, + threshold, + abs_threshold); break; case ov::element::f8e5m2: - LayerTestsUtils::LayerTestsCommon::Compare( - refBlob.data(), - outBlob.data(), - actual_comparision_size, - threshold, - abs_threshold); + ov::test::utils::compare_raw_data(refBlob.data(), + outBlob.data(), + actual_comparision_size, + threshold, + abs_threshold); break; case ov::element::f32: - LayerTestsUtils::LayerTestsCommon::Compare(refBlob.data(), - outBlob.data(), - actual_comparision_size, - threshold, - abs_threshold); + ov::test::utils::compare_raw_data(refBlob.data(), + outBlob.data(), + actual_comparision_size, + threshold, + abs_threshold); break; case ov::element::f64: - LayerTestsUtils::LayerTestsCommon::Compare(refBlob.data(), - outBlob.data(), - actual_comparision_size, - threshold, - abs_threshold); + ov::test::utils::compare_raw_data(refBlob.data(), + outBlob.data(), + actual_comparision_size, + threshold, + abs_threshold); break; case ov::element::i8: - LayerTestsUtils::LayerTestsCommon::Compare(refBlob.data(), - outBlob.data(), - actual_comparision_size, - threshold, - abs_threshold); + ov::test::utils::compare_raw_data(refBlob.data(), + outBlob.data(), + actual_comparision_size, + threshold, + abs_threshold); break; case ov::element::i16: - LayerTestsUtils::LayerTestsCommon::Compare(refBlob.data(), - outBlob.data(), - actual_comparision_size, - threshold, - abs_threshold); + ov::test::utils::compare_raw_data(refBlob.data(), + outBlob.data(), + actual_comparision_size, + threshold, + abs_threshold); break; case ov::element::i32: - LayerTestsUtils::LayerTestsCommon::Compare(refBlob.data(), - outBlob.data(), - actual_comparision_size, - threshold, - abs_threshold); + ov::test::utils::compare_raw_data(refBlob.data(), + outBlob.data(), + actual_comparision_size, + threshold, + abs_threshold); break; case ov::element::i64: - LayerTestsUtils::LayerTestsCommon::Compare(refBlob.data(), - outBlob.data(), - actual_comparision_size, - threshold, - abs_threshold); + ov::test::utils::compare_raw_data(refBlob.data(), + outBlob.data(), + actual_comparision_size, + threshold, + abs_threshold); break; case ov::element::boolean: - LayerTestsUtils::LayerTestsCommon::Compare(refBlob.data(), - outBlob.data(), - actual_comparision_size, - threshold, - abs_threshold); + ov::test::utils::compare_raw_data(refBlob.data(), + outBlob.data(), + actual_comparision_size, + threshold, + abs_threshold); break; case ov::element::u8: - LayerTestsUtils::LayerTestsCommon::Compare(refBlob.data(), - outBlob.data(), - actual_comparision_size, - threshold, - abs_threshold); + ov::test::utils::compare_raw_data(refBlob.data(), + outBlob.data(), + actual_comparision_size, + threshold, + abs_threshold); break; case ov::element::u16: - LayerTestsUtils::LayerTestsCommon::Compare(refBlob.data(), - outBlob.data(), - actual_comparision_size, - threshold, - abs_threshold); + ov::test::utils::compare_raw_data(refBlob.data(), + outBlob.data(), + actual_comparision_size, + threshold, + abs_threshold); break; case ov::element::u32: - LayerTestsUtils::LayerTestsCommon::Compare(refBlob.data(), - outBlob.data(), - actual_comparision_size, - threshold, - abs_threshold); + ov::test::utils::compare_raw_data(refBlob.data(), + outBlob.data(), + actual_comparision_size, + threshold, + abs_threshold); break; case ov::element::u64: - LayerTestsUtils::LayerTestsCommon::Compare(refBlob.data(), - outBlob.data(), - actual_comparision_size, - threshold, - abs_threshold); + ov::test::utils::compare_raw_data(refBlob.data(), + outBlob.data(), + actual_comparision_size, + threshold, + abs_threshold); break; case ov::element::i4: case ov::element::u4: - LayerTestsUtils::LayerTestsCommon::Compare(static_cast(refBlob.data()), - static_cast(outBlob.data()), - actual_comparision_size / 2, - threshold, - abs_threshold); + ov::test::utils::compare_raw_data(static_cast(refBlob.data()), + static_cast(outBlob.data()), + actual_comparision_size / 2, + threshold, + abs_threshold); break; case ov::element::u1: - LayerTestsUtils::LayerTestsCommon::Compare(static_cast(refBlob.data()), - static_cast(outBlob.data()), - actual_comparision_size / 8, - threshold, - abs_threshold); + ov::test::utils::compare_raw_data(static_cast(refBlob.data()), + static_cast(outBlob.data()), + actual_comparision_size / 8, + threshold, + abs_threshold); break; default: FAIL() << "Comparator for " << element_type << " element type isn't supported"; diff --git a/src/tests/functional/plugin/shared/src/behavior/ov_infer_request/infer_correctness.cpp b/src/tests/functional/plugin/shared/src/behavior/ov_infer_request/infer_correctness.cpp index 0e3e84fa317989..dbde8b3f562a37 100644 --- a/src/tests/functional/plugin/shared/src/behavior/ov_infer_request/infer_correctness.cpp +++ b/src/tests/functional/plugin/shared/src/behavior/ov_infer_request/infer_correctness.cpp @@ -117,7 +117,7 @@ bool OVInferConsistencyTest::IsEqual(std::vector& a, } try { // if not equal will throw exception - LayerTestsUtils::LayerTestsCommon::Compare( + ov::test::utils::compare_raw_data( a[j].data(), b[j].data(), a[j].get_size(), 1e-2f); } catch (...) { isEqual = false; diff --git a/src/tests/functional/shared_test_classes/src/single_op/generate_proposals.cpp b/src/tests/functional/shared_test_classes/src/single_op/generate_proposals.cpp index 99e49061376d31..5030a1f3dea04f 100644 --- a/src/tests/functional/shared_test_classes/src/single_op/generate_proposals.cpp +++ b/src/tests/functional/shared_test_classes/src/single_op/generate_proposals.cpp @@ -117,17 +117,17 @@ void GenerateProposalsLayerTest::compare(const std::vector& expected const auto outputSize = i == 0 ? 4 : 1; if (outType == ov::element::f32) { - LayerTestsUtils::LayerTestsCommon::Compare(reinterpret_cast(expectedBuffer), - reinterpret_cast(actualBuffer), - expectedNumRois * outputSize, - rel_threshold, - abs_threshold); + ov::test::utils::compare_raw_data(reinterpret_cast(expectedBuffer), + reinterpret_cast(actualBuffer), + expectedNumRois * outputSize, + rel_threshold, + abs_threshold); } else { - LayerTestsUtils::LayerTestsCommon::Compare(reinterpret_cast(expectedBuffer), - reinterpret_cast(actualBuffer), - expectedNumRois * outputSize, - rel_threshold, - abs_threshold); + ov::test::utils::compare_raw_data(reinterpret_cast(expectedBuffer), + reinterpret_cast(actualBuffer), + expectedNumRois * outputSize, + rel_threshold, + abs_threshold); } if (expectedNumRois < actualNumRois) { diff --git a/src/tests/test_utils/common_test_utils/include/common_test_utils/data_utils.hpp b/src/tests/test_utils/common_test_utils/include/common_test_utils/data_utils.hpp index 45471f0249fa28..a41bbd4e61b2ff 100644 --- a/src/tests/test_utils/common_test_utils/include/common_test_utils/data_utils.hpp +++ b/src/tests/test_utils/common_test_utils/include/common_test_utils/data_utils.hpp @@ -547,6 +547,52 @@ inline ov::float8_e5m2 ie_abs(const ov::float8_e5m2& val) { return ov::float8_e5m2::from_bits(val.to_bits() & 0x7F); } +template +static void compare_raw_data(const T_EXPECTED* expected, + const T_ACTUAL* actual, + std::size_t size, + float threshold, + float abs_threshold = -1.f) { + for (std::size_t i = 0; i < size; ++i) { + const T_EXPECTED& ref = expected[i]; + const auto& res = actual[i]; + const auto absoluteDifference = ov::test::utils::ie_abs(res - ref); + if (abs_threshold > 0.f && absoluteDifference > abs_threshold) { + OPENVINO_THROW("Absolute comparison of values expected: ", + std::to_string(ref), + " and actual: ", + std::to_string(res), + " at index ", + i, + " with absolute threshold ", + abs_threshold, + " failed"); + } + if (absoluteDifference <= threshold) { + continue; + } + double max; + if (sizeof(T_ACTUAL) < sizeof(T_EXPECTED)) { + max = static_cast(std::max(ov::test::utils::ie_abs(T_EXPECTED(res)), ov::test::utils::ie_abs(ref))); + } else { + max = static_cast(std::max(ov::test::utils::ie_abs(res), ov::test::utils::ie_abs(T_ACTUAL(ref)))); + } + double diff = static_cast(absoluteDifference) / max; + if (max == 0 || (diff > static_cast(threshold)) || + (std::isnan(static_cast(res)) ^ std::isnan(static_cast(ref)))) { + OPENVINO_THROW("Relative comparison of values expected: ", + std::to_string(ref), + " and actual: ", + std::to_string(res), + " at index ", + i, + " with threshold ", + threshold, + " failed"); + } + } +} + } // namespace utils } // namespace test } // namespace ov From 15466d7c1080cbe51cc564491fabd46d0436e835 Mon Sep 17 00:00:00 2001 From: Andrzej Kopytko Date: Thu, 1 Feb 2024 11:56:00 +0100 Subject: [PATCH 03/10] DOCS Fixed empty json file on baner load (#22532) --- docs/sphinx_setup/_static/css/custom.css | 5 ++ .../_static/html/banner_data.json | 4 +- docs/sphinx_setup/_static/js/hide_banner.js | 68 ++++++++++--------- 3 files changed, 42 insertions(+), 35 deletions(-) diff --git a/docs/sphinx_setup/_static/css/custom.css b/docs/sphinx_setup/_static/css/custom.css index ac4a3e1f9386e0..7091e346e5a818 100644 --- a/docs/sphinx_setup/_static/css/custom.css +++ b/docs/sphinx_setup/_static/css/custom.css @@ -261,6 +261,11 @@ div.highlight { .bd-sidebar { top: calc(var(--pst-header-height)); } + +.bd-links { + margin-top: 10px; +} + body { padding-top: calc(var(--pst-header-height)); } diff --git a/docs/sphinx_setup/_static/html/banner_data.json b/docs/sphinx_setup/_static/html/banner_data.json index 77f7bb8e4569be..be5c8a950b291a 100644 --- a/docs/sphinx_setup/_static/html/banner_data.json +++ b/docs/sphinx_setup/_static/html/banner_data.json @@ -1,5 +1,5 @@ { - "p": "OpenVINO 2022.1 has introduced OpenVINO API 2.0. For more information on transition steps from the previous API, see the transition guide", + "p": "", "version": "1", - "style": "info" + "style": "warning" } \ No newline at end of file diff --git a/docs/sphinx_setup/_static/js/hide_banner.js b/docs/sphinx_setup/_static/js/hide_banner.js index 76fe65ba041087..54a96ca1a2616c 100644 --- a/docs/sphinx_setup/_static/js/hide_banner.js +++ b/docs/sphinx_setup/_static/js/hide_banner.js @@ -2,40 +2,42 @@ var cookieVersion = 0; window.onload = (e) => { fetch('/build/docs/_build/_static/html/banner_data.json').then((data) => { data.json().then((item) => { - cookieVersion = item.version; - var transitionBanner = document.getElementById("info-banner"); - if (document.cookie.split(';').filter(function (find_cookie_name) { - return find_cookie_name.trim().indexOf('TransitionBannerIsHiddenX' + cookieVersion + '=') === 0; - } - ).length) { - transitionBanner.classList.add("hidden-banner"); - } - transitionBanner.classList.add(item.style); - - const p = document.createElement("p"); - p.innerHTML = item.p; - transitionBanner.append(p); - - const button = document.createElement("button"); - button.className = "close-banner"; - button.type = "button"; - - const span = document.createElement("span"); - span.setAttribute('aria-hidden', 'true'); - span.innerHTML = '×'; - button.appendChild(span); - button.addEventListener("click", function () { - var cookieContent = 'TransitionBannerIsHiddenX' + cookieVersion + '=true;'; - var expiry = 'expires='; - var date = new Date(); - var expirationDate = date.getTime() + (365 * 24 * 60 * 60 * 1000); - date.setTime(expirationDate); - expiry += date.toUTCString(); - document.cookie = cookieContent + expiry; + if(item != null && item.p != "" && item.version != "" && item.style != "" ) { + cookieVersion = item.version; var transitionBanner = document.getElementById("info-banner"); - transitionBanner.classList.add("hidden-banner"); - }); - transitionBanner.append(button) + if (document.cookie.split(';').filter(function (find_cookie_name) { + return find_cookie_name.trim().indexOf('TransitionBannerIsHiddenX' + cookieVersion + '=') === 0; + } + ).length) { + transitionBanner.classList.add("hidden-banner"); + } + transitionBanner.classList.add(item.style); + + const p = document.createElement("p"); + p.innerHTML = item.p; + transitionBanner.append(p); + + const button = document.createElement("button"); + button.className = "close-banner"; + button.type = "button"; + + const span = document.createElement("span"); + span.setAttribute('aria-hidden', 'true'); + span.innerHTML = '×'; + button.appendChild(span); + button.addEventListener("click", function () { + var cookieContent = 'TransitionBannerIsHiddenX' + cookieVersion + '=true;'; + var expiry = 'expires='; + var date = new Date(); + var expirationDate = date.getTime() + (365 * 24 * 60 * 60 * 1000); + date.setTime(expirationDate); + expiry += date.toUTCString(); + document.cookie = cookieContent + expiry; + var transitionBanner = document.getElementById("info-banner"); + transitionBanner.classList.add("hidden-banner"); + }); + transitionBanner.append(button) + } }); }); }; From dbba5f0d748d25776b11ac3f4f47419bd69fb49d Mon Sep 17 00:00:00 2001 From: Andrey Babushkin Date: Thu, 1 Feb 2024 10:57:49 +0000 Subject: [PATCH 04/10] [GHA] Testing solution for GitHub Actions workflows telemetry (#22577) --- .../send_workflows_to_opentelemetry.yml | 42 +++++++++++++++++++ 1 file changed, 42 insertions(+) create mode 100644 .github/workflows/send_workflows_to_opentelemetry.yml diff --git a/.github/workflows/send_workflows_to_opentelemetry.yml b/.github/workflows/send_workflows_to_opentelemetry.yml new file mode 100644 index 00000000000000..42cddd7b88d9dd --- /dev/null +++ b/.github/workflows/send_workflows_to_opentelemetry.yml @@ -0,0 +1,42 @@ +name: Send workflows to OpenTelemetry (BETA) + +on: + workflow_run: + workflows: + - Android ARM64 with vcpkg + - Documentation + - Cleanup PIP caches + - Code snippets + - Code Style + - Code coverage + - Coverity (Ubuntu 20.04, Python 3.11) + - Fedora (RHEL), Python 3.9 + - Linux (Ubuntu 20.04, Python 3.11) + - Linux ARM64 (Ubuntu 20.04, Python 3.11) + - Linux Static CC (Ubuntu 22.04, Python 3.11, Clang) + - Linux RISC-V with Conan (Ubuntu 22.04, Python 3.10) + - macOS (Python 3.11) + - macOS ARM64 (Python 3.11) + - MO + - Python API Checks + - Webassembly + - Windows (VS 2019, Python 3.11) + - Windows Conditional Compilation (VS 2022, Python 3.11) + types: + - completed + +permissions: read-all + +jobs: + otel-export-trace: + name: OpenTelemetry Export Trace + runs-on: ubuntu-latest + + steps: + - name: Export Workflow Trace + uses: inception-health/otel-export-trace-action@7eabc7de1f4753f0b45051b44bb0ba46d05a21ef + with: + otlpEndpoint: grpc://api.honeycomb.io:443/ + otlpHeaders: ${{ secrets.OTLP_HEADERS }} + githubToken: ${{ secrets.GITHUB_TOKEN }} + runId: ${{ github.event.workflow_run.id }} From e87403a8e3c716ff1f1525760ef1dea0dd865645 Mon Sep 17 00:00:00 2001 From: Ilya Lavrenov Date: Thu, 1 Feb 2024 14:58:21 +0400 Subject: [PATCH 05/10] Removed API 1.0 ONNX FE headers install (#22572) --- src/frontends/onnx/frontend/CMakeLists.txt | 5 ----- 1 file changed, 5 deletions(-) diff --git a/src/frontends/onnx/frontend/CMakeLists.txt b/src/frontends/onnx/frontend/CMakeLists.txt index 50782959202168..d5f52a115e8940 100644 --- a/src/frontends/onnx/frontend/CMakeLists.txt +++ b/src/frontends/onnx/frontend/CMakeLists.txt @@ -17,8 +17,3 @@ ov_ncc_naming_style(FOR_TARGET ${TARGET_NAME} SOURCE_DIRECTORIES "${${TARGET_NAME}_INCLUDE_DIR}" DEFINITIONS $) - -install(DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/include/onnx_import - DESTINATION ${FRONTEND_INSTALL_INCLUDE}/ngraph/frontend - COMPONENT ${OV_CPACK_COMP_CORE_DEV} - ${OV_CPACK_COMP_CORE_DEV_EXCLUDE_ALL}) From 10087cccd0a69ff89900643e8dfe4eee5e28876c Mon Sep 17 00:00:00 2001 From: "Wang, Yang" Date: Thu, 1 Feb 2024 19:20:58 +0800 Subject: [PATCH 06/10] [AUTO plugin] auto minor cleanup (#22583) * remove redundant code and update the test case. * Update. --- src/plugins/auto/src/auto_compiled_model.cpp | 2 -- src/plugins/auto/tests/unit/compile_model_metric_test.cpp | 1 + 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/src/plugins/auto/src/auto_compiled_model.cpp b/src/plugins/auto/src/auto_compiled_model.cpp index f682784dfefdc1..e35fdc5a8c9004 100644 --- a/src/plugins/auto/src/auto_compiled_model.cpp +++ b/src/plugins/auto/src/auto_compiled_model.cpp @@ -118,8 +118,6 @@ ov::Any AutoCompiledModel::get_property(const std::string& name) const { return decltype(ov::optimal_number_of_infer_requests)::value_type{real}; } requests = 0; - // check if the real is default value or actual device didn't support this property. - OPENVINO_ASSERT(m_scheduler->m_compile_context[CPU].m_is_already == true); try { // for benchmark through AUTO:CPU,GPU // SetConfig directly set to CPU/GPU in this case diff --git a/src/plugins/auto/tests/unit/compile_model_metric_test.cpp b/src/plugins/auto/tests/unit/compile_model_metric_test.cpp index df612e2ce45af9..41c81fe34edfd1 100644 --- a/src/plugins/auto/tests/unit/compile_model_metric_test.cpp +++ b/src/plugins/auto/tests/unit/compile_model_metric_test.cpp @@ -312,6 +312,7 @@ const std::vector testConfigs = { ConfigParams{false, 3, 5, false, 2, 5, true, ov::test::utils::DEVICE_GPU, 1, 0, false, true}, ConfigParams{true, 3, 5, false, 2, 5, true, ov::test::utils::DEVICE_GPU, 48, 0, false, true}, ConfigParams{false, 3, 5, true, 2, 5, false, ov::test::utils::DEVICE_GPU, 2, 0, false, true}, + ConfigParams{false, 3, 5, false, 2, 5, false, ov::test::utils::DEVICE_GPU, 2, 0, false, true}, ConfigParams{true, 3, 5, true, 2, 5, false, ov::test::utils::DEVICE_GPU, 2, 0, false, true}, ConfigParams{true, 3, 5, false, 2, 5, true, ov::test::utils::DEVICE_GPU, 48, 48, false, true}, ConfigParams{true, 3, 5, false, 2, 5, true, ov::test::utils::DEVICE_GPU, 6, 6, false, true}, From 2749c3ab7729fd4ff21e69ddb827cbf37eeefb1a Mon Sep 17 00:00:00 2001 From: Ilya Lavrenov Date: Thu, 1 Feb 2024 15:37:24 +0400 Subject: [PATCH 07/10] Prefix OpenVINO thirdparty targets with openvino:: (#22542) --- docs/dev/build_mac_arm.md | 2 +- docs/dev/build_mac_intel_cpu.md | 2 +- install_build_dependencies.sh | 21 ++++--------------- src/frontends/paddle/tests/CMakeLists.txt | 2 +- .../tests/frontend/shared/CMakeLists.txt | 2 +- tests/fuzz/src/CMakeLists.txt | 2 +- thirdparty/cnpy/CMakeLists.txt | 5 +++-- thirdparty/zlib/CMakeLists.txt | 4 ++-- 8 files changed, 14 insertions(+), 26 deletions(-) diff --git a/docs/dev/build_mac_arm.md b/docs/dev/build_mac_arm.md index 80678bb6ce4681..fb728d18d5f040 100644 --- a/docs/dev/build_mac_arm.md +++ b/docs/dev/build_mac_arm.md @@ -33,7 +33,7 @@ The software was validated on: ``` - (arm64 only) Product and samples dependencies: ```sh - % brew install tbb pugixml flatbuffers snappy protobuf nlohmann-json zlib gflags + % brew install tbb pugixml flatbuffers snappy protobuf ``` - Additional `pip` dependencies to build OpenVINO Runtime Python API, Development tools (Model Optimizer, POT and others): ```sh diff --git a/docs/dev/build_mac_intel_cpu.md b/docs/dev/build_mac_intel_cpu.md index 606178e3f376fe..8d57aa12aae0c5 100644 --- a/docs/dev/build_mac_intel_cpu.md +++ b/docs/dev/build_mac_intel_cpu.md @@ -30,7 +30,7 @@ The software was validated on: ``` - (Optional; native compilation only, x86_64) Product and samples dependencies: ```sh - % brew install tbb pugixml flatbuffers snappy protobuf nlohmann-json zlib gflags + % brew install tbb pugixml flatbuffers snappy protobuf ``` - Additional `pip` dependencies to build OpenVINO Runtime Python API, Development tools (Model Optimizer, POT and others): ```sh diff --git a/install_build_dependencies.sh b/install_build_dependencies.sh index de2a53a25c9b77..61a8cf5974b202 100755 --- a/install_build_dependencies.sh +++ b/install_build_dependencies.sh @@ -69,9 +69,7 @@ if [ -f /etc/lsb-release ] || [ -f /etc/debian_version ] ; then libffi-dev \ `# spell checking for MO sources` \ python3-enchant \ - `# samples and tools` \ - libgflags-dev \ - zlib1g-dev \ + `# tools` \ wget # TF lite frontend if apt-cache search --names-only '^libflatbuffers-dev'| grep -q libflatbuffers-dev; then @@ -128,10 +126,7 @@ elif [ -f /etc/redhat-release ] || grep -q "rhel" /etc/os-release ; then opencl-headers \ `# python API` \ python3-pip \ - python3-devel \ - `# samples and tools` \ - zlib-devel \ - gflags-devel + python3-devel elif [ -f /etc/os-release ] && grep -q "SUSE" /etc/os-release ; then zypper refresh zypper install -y \ @@ -169,11 +164,7 @@ elif [ -f /etc/os-release ] && grep -q "SUSE" /etc/os-release ; then `# python API` \ python39-pip \ python39-setuptools \ - python39-devel \ - `# samples and tools` \ - zlib-devel \ - gflags-devel-static \ - nlohmann_json-devel + python39-devel elif [ -f /etc/os-release ] && grep -q "raspbian" /etc/os-release; then # Raspbian apt update @@ -200,11 +191,7 @@ elif [ -f /etc/os-release ] && grep -q "raspbian" /etc/os-release; then python3-pip \ python3-venv \ python3-setuptools \ - libpython3-dev \ - `# samples and tools` \ - libgflags-dev \ - zlib1g-dev \ - nlohmann-json-dev + libpython3-dev else echo "Unknown OS, please install build dependencies manually" fi diff --git a/src/frontends/paddle/tests/CMakeLists.txt b/src/frontends/paddle/tests/CMakeLists.txt index 078a133138aa45..1604103638d5a4 100644 --- a/src/frontends/paddle/tests/CMakeLists.txt +++ b/src/frontends/paddle/tests/CMakeLists.txt @@ -33,7 +33,7 @@ ov_add_test_target( openvino_paddle_frontend paddle_fe_standalone_build_test LINK_LIBRARIES - cnpy + openvino::cnpy frontend_shared_test_classes openvino_paddle_frontend openvino::runtime diff --git a/src/frontends/tests/frontend/shared/CMakeLists.txt b/src/frontends/tests/frontend/shared/CMakeLists.txt index f413e359afb738..5f4328d24ac486 100644 --- a/src/frontends/tests/frontend/shared/CMakeLists.txt +++ b/src/frontends/tests/frontend/shared/CMakeLists.txt @@ -23,7 +23,7 @@ target_link_libraries(${TARGET_NAME} openvino::util openvino::runtime PRIVATE - cnpy) + openvino::cnpy) add_dependencies(${TARGET_NAME} test_builtin_extensions) diff --git a/tests/fuzz/src/CMakeLists.txt b/tests/fuzz/src/CMakeLists.txt index fc154c2f5fc74b..6f18bab6476889 100644 --- a/tests/fuzz/src/CMakeLists.txt +++ b/tests/fuzz/src/CMakeLists.txt @@ -17,7 +17,7 @@ foreach(test_source ${tests}) add_fuzzer(${test_name} ${test_source}) target_link_libraries(${test_name} PRIVATE - openvino::runtime cnpy zlib) + openvino::runtime openvino::cnpy openvino::zlib) add_dependencies(fuzz ${test_name}) diff --git a/thirdparty/cnpy/CMakeLists.txt b/thirdparty/cnpy/CMakeLists.txt index c10d23301d9113..c418256aed012b 100644 --- a/thirdparty/cnpy/CMakeLists.txt +++ b/thirdparty/cnpy/CMakeLists.txt @@ -4,16 +4,17 @@ project(cnpy) -set(TARGET_NAME "cnpy") +set(TARGET_NAME "openvino_cnpy") add_library(${TARGET_NAME} STATIC cnpy.cpp) +add_library(openvino::cnpy ALIAS ${TARGET_NAME}) if(CMAKE_COMPILER_IS_GNUCXX OR CMAKE_CXX_COMPILER_ID MATCHES "^(Apple)?Clang$") target_compile_options(${TARGET_NAME} PUBLIC -Wno-unused-variable PRIVATE -Wno-all) endif() -target_link_libraries(${TARGET_NAME} PUBLIC ZLIB::ZLIB) +target_link_libraries(${TARGET_NAME} PUBLIC openvino::zlib) target_include_directories(${TARGET_NAME} PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}") set_target_properties(${TARGET_NAME} PROPERTIES FOLDER thirdparty) diff --git a/thirdparty/zlib/CMakeLists.txt b/thirdparty/zlib/CMakeLists.txt index f52d7bd027b1e5..53974f8029ad7e 100644 --- a/thirdparty/zlib/CMakeLists.txt +++ b/thirdparty/zlib/CMakeLists.txt @@ -24,7 +24,7 @@ else() endif() endif() -set(TARGET_NAME "zlib") +set(TARGET_NAME "openvino_zlib") set(zlib_srcs zlib/adler32.c @@ -59,7 +59,7 @@ set(zlib_ext_hdrs zlib/zconf.h) add_library(${TARGET_NAME} STATIC ${zlib_srcs} ${zlib_hdrs} ${lib_ext_hdrs}) -add_library(ZLIB::ZLIB ALIAS ${TARGET_NAME}) +add_library(openvino::zlib ALIAS ${TARGET_NAME}) target_include_directories(${TARGET_NAME} PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/zlib") From 6b010adef3e22a82fb2d9a5f9708e91291f42b4c Mon Sep 17 00:00:00 2001 From: Ekaterina Aidova Date: Thu, 1 Feb 2024 15:51:36 +0400 Subject: [PATCH 08/10] fix aten::empty_like dtype (#22584) --- src/frontends/pytorch/src/op/full.cpp | 2 +- tests/layer_tests/pytorch_tests/test_empty.py | 26 ++++++++++++++++--- 2 files changed, 23 insertions(+), 5 deletions(-) diff --git a/src/frontends/pytorch/src/op/full.cpp b/src/frontends/pytorch/src/op/full.cpp index 70ff0ba980a7a0..defcbab7095089 100644 --- a/src/frontends/pytorch/src/op/full.cpp +++ b/src/frontends/pytorch/src/op/full.cpp @@ -235,7 +235,7 @@ OutputVector translate_empty_like(const NodeContext& context) { if (!context.input_is_none(dtype_id)) { empty = base_translate_full_with_convert(context, sizes, value, dtype_id); } else { - empty = base_translate_full(context, sizes, value); + empty = base_translate_full_with_convertlike(context, sizes, value, input); } } else if (context.get_input_size() == 4) { auto out = context.input_is_none(3) ? input : context.get_input(3); diff --git a/tests/layer_tests/pytorch_tests/test_empty.py b/tests/layer_tests/pytorch_tests/test_empty.py index da5cff320f8b93..e358f95c9997c1 100644 --- a/tests/layer_tests/pytorch_tests/test_empty.py +++ b/tests/layer_tests/pytorch_tests/test_empty.py @@ -55,11 +55,11 @@ def _prepare_input(self, shape, dtype=np.float32, out=False): return (np.random.randn(*shape).astype(dtype if dtype is not None else np.float32),) return (np.random.randn(*shape), np.ones(shape, dtype=(dtype if dtype is not None else np.float32))) - def create_model(self, dtype, out): + def create_model(self, dtype, out, no_expose_dtype=False): class aten_empty_like(torch.nn.Module): - def __init__(self, dtype=None, out=False): + def __init__(self, dtype=None, out=False, no_expose_dtype=False): dtype_map = { "float32": torch.float32, "float64": torch.float64, @@ -72,6 +72,8 @@ def __init__(self, dtype=None, out=False): self.dtype = dtype_map.get(dtype, None) if out: self.forward = self.forward_out + if no_expose_dtype: + self.forward = self.forward_input_dtype def forward(self, input_tensor): empty = torch.empty_like(input_tensor, dtype=self.dtype) @@ -80,6 +82,14 @@ def forward(self, input_tensor): # produce sporadic errors if nan would be in empty. return torch.zeros_like(empty) + def forward_input_dtype(self, input_tensor): + # We don't want to compare values, just shape and type, + # so we call zeros_like on data. Multiplying by zero would + # produce sporadic errors if nan would be in empty. + input_tensor.to(self.dtype) + empty = torch.empty_like(input_tensor) + return torch.zeros_like(empty) + def forward_out(self, input_tensor, out_tensor): torch.empty_like(input_tensor, out=out_tensor) # We don't want to compare values, just shape and type, @@ -89,17 +99,25 @@ def forward_out(self, input_tensor, out_tensor): ref_net = None - return aten_empty_like(dtype, out), ref_net, "aten::empty_like" + return aten_empty_like(dtype, out, no_expose_dtype), ref_net, "aten::empty_like" @pytest.mark.parametrize('dtype', (None, "float32", "float64", "int64", "int32", "uint8", "int8")) @pytest.mark.parametrize("input_shape", [[2,], [1, 10], [10, 5, 2]]) @pytest.mark.parametrize("out", [True, False]) @pytest.mark.nightly @pytest.mark.precommit - def test_empty(self, ie_device, precision, ir_version, dtype, input_shape, out): + def test_empty_like(self, ie_device, precision, ir_version, dtype, input_shape, out): self._test(*self.create_model(dtype, out), ie_device, precision, ir_version, kwargs_to_prepare_input={"shape": input_shape, "out": out, "dtype": dtype}) + @pytest.mark.parametrize('dtype', (None, "float32", "float64", "int64", "int32", "uint8", "int8")) + @pytest.mark.parametrize("input_shape", [[2,], [1, 10], [10, 5, 2]]) + @pytest.mark.nightly + @pytest.mark.precommit + def test_empty_like_no_dtype(self, ie_device, precision, ir_version, dtype, input_shape): + self._test(*self.create_model(dtype, out=False, no_expose_dtype=True), ie_device, precision, ir_version, + kwargs_to_prepare_input={"shape": input_shape, "out": False, "dtype": dtype}) + class TestEmptyBoolean(PytorchLayerTest): From 3d8b05e2528ef6454615bd7bda991e5f078f8baa Mon Sep 17 00:00:00 2001 From: Evgenya Nugmanova Date: Thu, 1 Feb 2024 17:10:16 +0400 Subject: [PATCH 09/10] LayerNormalization: native translation support (#22508) * LayerNormalization: native translation support * Update layer_normalization.cpp: fixes for condition and error msg text * Update layer_normalization.cpp: copyright * Update layer_normalization.hpp: copyright * Update src/frontends/onnx/frontend/src/op/layer_normalization.cpp * Update layer_normalization.cpp: code style * Update layer_normalization.cpp: set_1 * Update layer_normalization.hpp: set_1 * Update ops_bridge.cpp: set_1 --------- Co-authored-by: Georgy Krivoruchko --- .../onnx/frontend/src/core/transform.hpp | 9 +- .../frontend/src/op/layer_normalization.cpp | 102 ++++++++++++++++++ .../frontend/src/op/layer_normalization.hpp | 20 ++++ .../onnx/frontend/src/ops_bridge.cpp | 2 + .../onnx/tests/tests_python/test_backend.py | 19 ---- 5 files changed, 126 insertions(+), 26 deletions(-) create mode 100644 src/frontends/onnx/frontend/src/op/layer_normalization.cpp create mode 100644 src/frontends/onnx/frontend/src/op/layer_normalization.hpp diff --git a/src/frontends/onnx/frontend/src/core/transform.hpp b/src/frontends/onnx/frontend/src/core/transform.hpp index 77ab1676c22a96..56d751baf0fb96 100644 --- a/src/frontends/onnx/frontend/src/core/transform.hpp +++ b/src/frontends/onnx/frontend/src/core/transform.hpp @@ -10,13 +10,8 @@ namespace ngraph { namespace onnx_import { namespace transform { -static const std::vector onnx_functions_to_expand = {"AffineGrid", - "Bernoulli", - "Celu", - "CenterCropPad", - "NegativeLogLikelihoodLoss", - "SoftmaxCrossEntropyLoss", - "LayerNormalization"}; +static const std::vector onnx_functions_to_expand = + {"AffineGrid", "Bernoulli", "Celu", "CenterCropPad", "NegativeLogLikelihoodLoss", "SoftmaxCrossEntropyLoss"}; /// \brief Replace nodes with expanded body of ONNX functions /// diff --git a/src/frontends/onnx/frontend/src/op/layer_normalization.cpp b/src/frontends/onnx/frontend/src/op/layer_normalization.cpp new file mode 100644 index 00000000000000..0698a37fbdccda --- /dev/null +++ b/src/frontends/onnx/frontend/src/op/layer_normalization.cpp @@ -0,0 +1,102 @@ +// Copyright (C) 2018-2024 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "op/layer_normalization.hpp" + +#include "exceptions.hpp" +#include "openvino/op/add.hpp" +#include "openvino/op/broadcast.hpp" +#include "openvino/op/concat.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/convert.hpp" +#include "openvino/op/convert_like.hpp" +#include "openvino/op/divide.hpp" +#include "openvino/op/multiply.hpp" +#include "openvino/op/negative.hpp" +#include "openvino/op/reduce_mean.hpp" +#include "openvino/op/reshape.hpp" +#include "openvino/op/shape_of.hpp" +#include "openvino/op/slice.hpp" +#include "openvino/op/sqrt.hpp" +#include "openvino/op/subtract.hpp" +#include "ov_models/ov_builders/reshape.hpp" +#include "utils/common.hpp" + +using namespace ov::op; +using namespace ov::op::v0; +using namespace ov::op::v1; +using namespace ov::op::v8; + +OPENVINO_SUPPRESS_DEPRECATED_START +namespace ngraph { +namespace onnx_import { +namespace op { +namespace set_1 { + +ov::OutputVector layer_normalization(const Node& node) { + const auto inputs = node.get_ng_inputs(); + const auto num_inputs = inputs.size(); + CHECK_VALID_NODE(node, + num_inputs == 2 || num_inputs == 3, + "LayerNormalization expects 2 or 3 input tensors. Got: ", + num_inputs); + + const auto& X = inputs.at(0); + const auto& Scale = inputs.at(1); + + auto axis = node.get_attribute_value("axis", -1); + double epsilon = node.get_attribute_value("epsilon", 1e-5); + int64_t stash_type_i = + node.get_attribute_value("stash_type", + static_cast(ONNX_NAMESPACE::TensorProto_DataType_FLOAT)); + element::Type stash_type = common::get_ov_element_type(stash_type_i); + + // following calculations are kept as close to the onnx\defs.cc description as possible + auto FloatEpsilon = Constant::create(ov::element::f32, Shape{}, {epsilon}); + auto Epsilon = std::make_shared(FloatEpsilon, stash_type); + auto XShape = std::make_shared(X); + auto Rank = std::make_shared(XShape); + auto Zero1D = Constant::create(ov::element::i64, {1}, {0}); + auto One1D = Constant::create(ov::element::i64, {1}, {1}); + auto Axis1D = Constant::create(ov::element::i64, {1}, {axis}); + auto PrefixShape = std::make_shared(XShape, Zero1D, Axis1D, One1D); + ov::Output NumReducedAxes = (axis >= 0 ? std::make_shared(Rank, Axis1D)->output(0) + : std::make_shared(Axis1D)->output(0)); + auto SuffixShape = std::make_shared(One1D, NumReducedAxes); + auto ReducedShape = std::make_shared(ov::OutputVector{PrefixShape, SuffixShape}, 0); + + auto X2D = util::flatten(X, static_cast(axis)); + auto XU = std::make_shared(X2D, stash_type); + + auto Mean2D = std::make_shared(XU, One1D, true); + auto Square = std::make_shared(XU, XU); + auto MeanOfSquare = std::make_shared(Square, One1D, true); + auto SquareOfMean = std::make_shared(Mean2D, Mean2D); + + auto Var = std::make_shared(MeanOfSquare, SquareOfMean); + auto VarPlusEpsilon = std::make_shared(Var, Epsilon); + auto StdDev = std::make_shared(VarPlusEpsilon); + auto Deviation = std::make_shared(XU, Mean2D); + auto Normalized = std::make_shared(Deviation, StdDev); + auto NormalizedT = std::make_shared(Normalized, X); + + auto Scale2D = util::flatten(Scale, 0); + auto Scaled = std::make_shared(NormalizedT, Scale2D); + ov::Output Biased = + (num_inputs == 3 ? std::make_shared(Scaled, util::flatten(inputs.at(2), 0))->output(0) + : Scaled->output(0)); + + auto Y = std::make_shared(Biased, XShape, false); + auto InvStdDev2D = std::make_shared(Constant::create(stash_type, {1}, {1}), StdDev); + auto Mean = std::make_shared(Mean2D, ReducedShape, false); + auto InvStdDev = std::make_shared(InvStdDev2D, ReducedShape, false); + + return ov::OutputVector{Y, Mean, InvStdDev}; +} + +} // namespace set_1 +} // namespace op +} // namespace onnx_import +} // namespace ngraph +OPENVINO_SUPPRESS_DEPRECATED_END diff --git a/src/frontends/onnx/frontend/src/op/layer_normalization.hpp b/src/frontends/onnx/frontend/src/op/layer_normalization.hpp new file mode 100644 index 00000000000000..b6ab0a4265b75a --- /dev/null +++ b/src/frontends/onnx/frontend/src/op/layer_normalization.hpp @@ -0,0 +1,20 @@ +// Copyright (C) 2018-2024 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// +#pragma once + +#include "openvino/core/deprecated.hpp" +OPENVINO_SUPPRESS_DEPRECATED_START + +#include "onnx_import/core/node.hpp" + +namespace ngraph { +namespace onnx_import { +namespace op { +namespace set_1 { +ov::OutputVector layer_normalization(const Node& node); +} // namespace set_1 +} // namespace op +} // namespace onnx_import +} // namespace ngraph +OPENVINO_SUPPRESS_DEPRECATED_END diff --git a/src/frontends/onnx/frontend/src/ops_bridge.cpp b/src/frontends/onnx/frontend/src/ops_bridge.cpp index b522d6c733fd30..7d45d51bfe1fee 100644 --- a/src/frontends/onnx/frontend/src/ops_bridge.cpp +++ b/src/frontends/onnx/frontend/src/ops_bridge.cpp @@ -94,6 +94,7 @@ #include "op/is_finite.hpp" #include "op/is_inf.hpp" #include "op/is_nan.hpp" +#include "op/layer_normalization.hpp" #include "op/leaky_relu.hpp" #include "op/less.hpp" #include "op/less_or_equal.hpp" @@ -426,6 +427,7 @@ OperatorsBridge::OperatorsBridge() { REGISTER_OPERATOR("IsFinite", 1, is_finite); REGISTER_OPERATOR("IsInf", 1, is_inf); REGISTER_OPERATOR("IsNaN", 1, is_nan) + REGISTER_OPERATOR("LayerNormalization", 1, layer_normalization); REGISTER_OPERATOR("LeakyRelu", 1, leaky_relu); REGISTER_OPERATOR("Less", 1, less); REGISTER_OPERATOR("LessOrEqual", 1, less_or_equal); diff --git a/src/frontends/onnx/tests/tests_python/test_backend.py b/src/frontends/onnx/tests/tests_python/test_backend.py index 3f59e94c3f3bd1..381e8ce547c7a3 100644 --- a/src/frontends/onnx/tests/tests_python/test_backend.py +++ b/src/frontends/onnx/tests/tests_python/test_backend.py @@ -363,25 +363,6 @@ def expect_fail(test_case_path, xfail): # type: (str) -> None "OnnxBackendNodeModelTest.test_sce_NCd1d2d3d4d5_mean_weight_log_prob_cpu", # ticket: 81976 "OnnxBackendNodeModelTest.test_sce_NCd1d2d3d4d5_none_no_weight_cpu", # ticket: 81976 "OnnxBackendNodeModelTest.test_sce_NCd1d2d3d4d5_none_no_weight_log_prob_cpu", # ticket: 81976 - "OnnxBackendNodeModelTest.test_layer_normalization_2d_axis0_cpu", # ticket: 90649 - "OnnxBackendNodeModelTest.test_layer_normalization_2d_axis1_cpu", # ticket: 90649 - "OnnxBackendNodeModelTest.test_layer_normalization_2d_axis_negative_1_cpu", # ticket: 90649 - "OnnxBackendNodeModelTest.test_layer_normalization_2d_axis_negative_2_cpu", # ticket: 90649 - "OnnxBackendNodeModelTest.test_layer_normalization_3d_axis0_epsilon_cpu", # ticket: 90649 - "OnnxBackendNodeModelTest.test_layer_normalization_3d_axis1_epsilon_cpu", # ticket: 90649 - "OnnxBackendNodeModelTest.test_layer_normalization_3d_axis2_epsilon_cpu", # ticket: 90649 - "OnnxBackendNodeModelTest.test_layer_normalization_3d_axis_negative_1_epsilon_cpu", # ticket: 90649 - "OnnxBackendNodeModelTest.test_layer_normalization_3d_axis_negative_2_epsilon_cpu", # ticket: 90649 - "OnnxBackendNodeModelTest.test_layer_normalization_3d_axis_negative_3_epsilon_cpu", # ticket: 90649 - "OnnxBackendNodeModelTest.test_layer_normalization_4d_axis0_cpu", # ticket: 90649 - "OnnxBackendNodeModelTest.test_layer_normalization_4d_axis1_cpu", # ticket: 90649 - "OnnxBackendNodeModelTest.test_layer_normalization_4d_axis2_cpu", # ticket: 90649 - "OnnxBackendNodeModelTest.test_layer_normalization_4d_axis3_cpu", # ticket: 90649 - "OnnxBackendNodeModelTest.test_layer_normalization_4d_axis_negative_1_cpu", # ticket: 90649 - "OnnxBackendNodeModelTest.test_layer_normalization_4d_axis_negative_2_cpu", # ticket: 90649 - "OnnxBackendNodeModelTest.test_layer_normalization_4d_axis_negative_3_cpu", # ticket: 90649 - "OnnxBackendNodeModelTest.test_layer_normalization_4d_axis_negative_4_cpu", # ticket: 90649 - "OnnxBackendNodeModelTest.test_layer_normalization_default_axis_cpu", # ticket: 90649 ), ( xfail_issue_81976, # SoftmaxCrossEntropyLoss operator From 42ea6068f22613a16872742fd3374722b1c9befa Mon Sep 17 00:00:00 2001 From: Vitaliy Urusovskij Date: Thu, 1 Feb 2024 05:45:00 -0800 Subject: [PATCH 10/10] Clean `LayerTestsUtils::LayerTestsCommon` (#22309) * Delete `LayerTestsCommon::CalculateRefs()` impl * Delete `ConvertRefsParams()` * Delete `LayerTestsCommon::Validate()` impl * Delete `LayerTestsUtils::getRuntimePrecision*()` * Delete `LayerTestsCommon::Serialize()` * Delete `LayerTestsCommon::QueryNetwork()` --- .../include/single_layer_tests/activation.hpp | 12 - .../base/layer_test_utils.hpp | 25 -- .../single_layer/random_uniform.hpp | 2 - .../src/base/layer_test_utils.cpp | 256 +----------------- .../src/single_layer/random_uniform.cpp | 5 - 5 files changed, 2 insertions(+), 298 deletions(-) diff --git a/src/tests/functional/plugin/shared/include/single_layer_tests/activation.hpp b/src/tests/functional/plugin/shared/include/single_layer_tests/activation.hpp index fab256e7766374..c8f8e9647e97d9 100644 --- a/src/tests/functional/plugin/shared/include/single_layer_tests/activation.hpp +++ b/src/tests/functional/plugin/shared/include/single_layer_tests/activation.hpp @@ -20,16 +20,4 @@ TEST_P(ActivationDynamicLayerTest, CompareWithRefs) { Run(); } -TEST_P(ActivationLayerTest, QueryNetwork) { - QueryNetwork(); -} - -TEST_P(ActivationParamLayerTest, QueryNetwork) { - QueryNetwork(); -} - -TEST_P(ActivationDynamicLayerTest, QueryNetwork) { - QueryNetwork(); -} - } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/base/layer_test_utils.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/base/layer_test_utils.hpp index 2f42836f6e8f00..b2a605d89716e8 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/base/layer_test_utils.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/base/layer_test_utils.hpp @@ -43,10 +43,6 @@ class LayerTestsCommon : public ov::test::TestsCommon { virtual void Run(); - virtual void Serialize(ov::pass::Serialize::Version ir_version = ov::pass::Serialize::Version::UNSPECIFIED); - - virtual void QueryNetwork(); - static void Compare(const std::vector>> &expected, const std::vector &actual, float threshold, @@ -66,27 +62,10 @@ class LayerTestsCommon : public ov::test::TestsCommon { virtual void Compare(const InferenceEngine::TensorDesc &actualDesc, const InferenceEngine::TensorDesc &expectedDesc); - virtual void SetRefMode(RefMode mode); - std::shared_ptr GetFunction(); std::map& GetConfiguration(); - // get runtime precision by operation friendly name - std::string getRuntimePrecision(const std::string& layerName); - - // get runtime precision by operation type - std::string getRuntimePrecisionByType(const std::string& layerType); - - // get runtime precision by operation friendly name which can be fused - std::string getRuntimePrecisionByFusedName(const std::string& layerName); - - std::map getRuntimeInfo(); - -#ifndef NDEBUG - void showRuntimePrecisions(); -#endif - template static void Compare(const T_NGRAPH *expected, const T_IE *actual, std::size_t size, float threshold, float abs_threshold = -1.f) { for (std::size_t i = 0; i < size; ++i) { @@ -160,10 +139,6 @@ class LayerTestsCommon : public ov::test::TestsCommon { virtual std::vector>> CalculateRefs(); - /// default method to convert parameters for reference operation. Used before reference implementation execution - /// can be overridden by specific operation test - virtual void ConvertRefsParams(); - virtual std::vector GetOutputs(); InferenceEngine::InferRequest inferRequest; diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/random_uniform.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/random_uniform.hpp index faa9430c80ab6f..5741908b81287a 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/random_uniform.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/random_uniform.hpp @@ -34,8 +34,6 @@ class RandomUniformLayerTest : public testing::WithParamInterface>> getConstData( - const std::shared_ptr& function) { - size_t numOutputs = function->get_output_size(); - std::vector>> outputs(numOutputs); - auto funcResults = function->get_results(); - for (size_t i = 0; i < numOutputs; i++) { - outputs[i].first = funcResults[i]->get_element_type(); - const auto& output = function->output(i).get_node_shared_ptr(); - OPENVINO_ASSERT(output->inputs().size() == 1); - auto parrentNode = output->input_value(0).get_node_shared_ptr(); - OPENVINO_ASSERT(ov::op::util::is_constant(parrentNode), - "Function was not fully folded to constant state!\n", - "Parent node of one of results is not constant and has type ", - parrentNode->get_type_name()); - - const auto data = std::dynamic_pointer_cast(parrentNode)->get_data_ptr(); - const auto dataSize = ov::shape_size(parrentNode->get_shape()) * parrentNode->get_element_type().size(); - outputs[i].second.resize(dataSize); - std::copy(data, data + dataSize, outputs[i].second.data()); - } - return outputs; -} -} // namespace - LayerTestsCommon::LayerTestsCommon() : threshold(1e-2f), abs_threshold(-1.f) { core = PluginCache::get().ie(targetDevice); } @@ -104,54 +79,6 @@ void LayerTestsCommon::Run() { } } -void LayerTestsCommon::Serialize(ov::pass::Serialize::Version ir_version) { - SKIP_IF_CURRENT_TEST_IS_DISABLED(); - - std::string output_name = ov::test::utils::generateTestFilePrefix(); - - std::string out_xml_path = output_name + ".xml"; - std::string out_bin_path = output_name + ".bin"; - - ov::pass::Manager manager; - manager.register_pass(out_xml_path, out_bin_path, ir_version); - manager.run_passes(function); - function->validate_nodes_and_infer_types(); - - auto result = getCore()->ReadNetwork(out_xml_path, out_bin_path); - - bool success; - std::string message; - std::tie(success, message) = - compare_functions(result.getFunction(), function, false, false, false, - true, // precision - true); // attributes - - EXPECT_TRUE(success) << message; - - ov::test::utils::removeIRFiles(out_xml_path, out_bin_path); -} - -void LayerTestsCommon::QueryNetwork() { - SKIP_IF_CURRENT_TEST_IS_DISABLED(); - - cnnNetwork = InferenceEngine::CNNNetwork(function); - - auto queryNetworkResult = PluginCache::get().ie()->QueryNetwork(cnnNetwork, targetDevice); - std::set expected; - for (auto&& node : function->get_ops()) { - expected.insert(node->get_friendly_name()); - } - - std::set actual; - for (auto&& res : queryNetworkResult.supportedLayersMap) { - // compare with originally used device name - ASSERT_EQ(ov::DeviceIDParser(res.second).get_device_name(), targetDevice); - - actual.insert(res.first); - } - ASSERT_EQ(expected, actual); -} - InferenceEngine::Blob::Ptr LayerTestsCommon::GenerateInput(const InferenceEngine::InputInfo& info) const { return FuncTestUtils::createAndFillBlob(info.getTensorDesc()); } @@ -489,60 +416,8 @@ void LayerTestsCommon::Infer() { inferRequest.Infer(); } -void LayerTestsCommon::ConvertRefsParams() { - ngraph::pass::ConvertPrecision().run_on_model(functionRefs); - ngraph::pass::ConvertPrecision().run_on_model(functionRefs); -} - std::vector>> LayerTestsCommon::CalculateRefs() { - ConvertRefsParams(); - functionRefs->validate_nodes_and_infer_types(); - - auto referenceInputs = std::vector>(inputs.size()); - auto refInputsTypes = std::vector(inputs.size()); - for (std::size_t i = 0; i < inputs.size(); ++i) { - const auto &input = inputs[i]; - const auto inputSize = input->byteSize(); - - auto &referenceInput = referenceInputs[i]; - referenceInput.resize(inputSize); - - auto memory = InferenceEngine::as(input); - IE_ASSERT(memory); - const auto lockedMemory = memory->wmap(); - const auto buffer = lockedMemory.as(); - std::copy(buffer, buffer + inputSize, referenceInput.data()); - - refInputsTypes[i] = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(memory->getTensorDesc().getPrecision()); - } - - const auto &&outputsInfo = executableNetwork.GetOutputsInfo(); - std::vector convertType; - convertType.reserve(outputsInfo.size()); - for (const auto &output : outputsInfo) { - convertType.push_back( - FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc( - output.second->getTensorDesc().getPrecision())); - } - - std::vector>> expectedOutputs; - switch (refMode) { - case INTERPRETER: { - expectedOutputs = ngraph::helpers::interpreterFunction(functionRefs, referenceInputs, refInputsTypes); - break; - } - case CONSTANT_FOLDING: { - const auto &foldedFunc = ngraph::helpers::foldFunction(functionRefs, referenceInputs, refInputsTypes); - expectedOutputs = getConstData(foldedFunc); - break; - } - case IE: { - // reference inference on device with other options and nGraph function has to be implemented here - break; - } - } - - return expectedOutputs; + return {}; } std::vector LayerTestsCommon::GetOutputs() { @@ -560,134 +435,7 @@ void LayerTestsCommon::Compare( Compare(expectedOutputs, actualOutputs, threshold); } -void LayerTestsCommon::Validate() { - if (functionRefs == nullptr) { - functionRefs = function->clone(); - } - auto expectedOutputs = CalculateRefs(); - const auto &actualOutputs = GetOutputs(); - - if (expectedOutputs.empty()) { - return; - } - - IE_ASSERT(actualOutputs.size() == expectedOutputs.size()) - << "nGraph interpreter has " << expectedOutputs.size() << " outputs, while IE " << actualOutputs.size(); - - Compare(expectedOutputs, actualOutputs); -} - -std::string LayerTestsCommon::getRuntimePrecision(const std::string& layerName) { - const auto execGraph = executableNetwork.GetExecGraphInfo(); - const auto execFunction = execGraph.getFunction(); - - for (const auto& op : execFunction->get_ops()) { - const auto name = op->get_friendly_name(); - if (name == layerName) { - const auto& rtInfo = op->get_rt_info(); - const auto& it = rtInfo.find("runtimePrecision"); - IE_ASSERT(it != rtInfo.end()) << "Runtime precision is not found for node: " << name; - return it->second.as(); - } - } - - return ""; -} - -std::string LayerTestsCommon::getRuntimePrecisionByType(const std::string& layerType) { - const auto execGraph = executableNetwork.GetExecGraphInfo(); - const auto execFunction = execGraph.getFunction(); - - for (const auto& op : execFunction->get_ops()) { - const auto& rtInfo = op->get_rt_info(); - const auto& typeIt = rtInfo.find("layerType"); - - IE_ASSERT(typeIt != rtInfo.end()) << "Layer is not found for type: " << layerType; - - auto type = typeIt->second.as(); - if (type == layerType) { - const auto& it = rtInfo.find("runtimePrecision"); - IE_ASSERT(it != rtInfo.end()) << "Runtime precision is not found for node: " << type; - return it->second.as(); - } - } - - return ""; -} - -std::string LayerTestsCommon::getRuntimePrecisionByFusedName(const std::string& layerName) { - const auto execGraph = executableNetwork.GetExecGraphInfo(); - const auto execFunction = execGraph.getFunction(); - - const auto parse = [](const std::string& originalLayersNames) -> std::set { - std::set names; - - std::string tmp = originalLayersNames; - size_t beginPosition = 0ul; - size_t endPosition; - while ((endPosition = tmp.find(",", beginPosition)) != std::string::npos) { - names.insert(tmp.substr(beginPosition, endPosition - beginPosition)); - beginPosition = endPosition + 1; - } - - names.insert(tmp.substr(beginPosition, endPosition - beginPosition)); - return names; - }; - - for (const auto& op : execFunction->get_ops()) { - const auto& rtInfo = op->get_rt_info(); - - const auto& nameIt = rtInfo.find("originalLayersNames"); - IE_ASSERT(nameIt != rtInfo.end()) << "originalLayersNames is not found for node: " << layerName; - const auto fusedName = parse(nameIt->second.as()); - if (fusedName.find(layerName) == fusedName.end()) { - continue; - } - - const auto& it = rtInfo.find("runtimePrecision"); - IE_ASSERT(it != rtInfo.end()) << "runtimePrecision is not found for node: " << layerName; - const auto rtPrecisionPtr = it->second.as(); - return rtPrecisionPtr; - } - - return ""; -} - -std::map LayerTestsCommon::getRuntimeInfo() { - const auto execGraph = executableNetwork.GetExecGraphInfo(); - const auto function = execGraph.getFunction(); - std::map runtimeInfo; - for (const auto& op : function->get_ops()) { - runtimeInfo[op->get_friendly_name()] = op->get_rt_info(); - } - return runtimeInfo; -} - -#ifndef NDEBUG -void LayerTestsCommon::showRuntimePrecisions() { - const auto execGraph = executableNetwork.GetExecGraphInfo(); - const auto execFunction = execGraph.getFunction(); - - for (const auto& op : execFunction->get_ops()) { - const auto& rtInfo = op->get_rt_info(); - - const auto& nameIt = rtInfo.find("originalLayersNames"); - const auto name = nameIt->second.as(); - - const auto& typeIt = rtInfo.find("layerType"); - const auto type = typeIt->second.as(); - - const auto& it = rtInfo.find("runtimePrecision"); - const auto rtPrecisionPtr = it->second.as(); - - std::cout << type << "(" << name << "): " << rtPrecisionPtr << std::endl; - } -} -#endif - -void LayerTestsCommon::SetRefMode(RefMode mode) { - refMode = mode; -} +void LayerTestsCommon::Validate() {} std::shared_ptr LayerTestsCommon::GetFunction() { return function; diff --git a/src/tests/functional/shared_test_classes/src/single_layer/random_uniform.cpp b/src/tests/functional/shared_test_classes/src/single_layer/random_uniform.cpp index 41a9e1cbe2db50..caa6dfef8a3c06 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/random_uniform.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/random_uniform.cpp @@ -82,9 +82,4 @@ void RandomUniformLayerTest::SetUp() { function = std::make_shared(results, ov::ParameterVector{input}, "random_uniform"); } -void RandomUniformLayerTest::ConvertRefsParams() { - // we shouldn't use default conversion from f16 to f32 - ngraph::pass::ConvertPrecision().run_on_model(functionRefs); -} - } // namespace LayerTestsDefinitions