From c61fce428ea6e416cc05db12f893bfcd4aa669e4 Mon Sep 17 00:00:00 2001 From: Oleg Pipikin Date: Tue, 10 Oct 2023 21:54:46 +0200 Subject: [PATCH 01/13] Refactor PowerLayerTest, PriorBoxClusteredLayerTest, PriorBoxLayerTest (#20349) * Refactor PowerLayerTest * Refactor PriorBoxClusteredLayerTest * Refactor PriorBoxLayerTest --- .../single_layer_tests/prior_box.cpp | 25 ++-- .../prior_box_clustered.cpp | 32 ++--- .../single_layer_tests/power.cpp | 67 +++++----- .../shared/include/single_op_tests/power.hpp | 15 +++ .../include/single_op_tests/prior_box.hpp | 15 +++ .../single_op_tests/prior_box_clustered.hpp | 15 +++ .../shared_test_classes/single_op/power.hpp | 30 +++++ .../single_op/prior_box.hpp | 46 +++++++ .../single_op/prior_box_clustered.hpp | 40 ++++++ .../src/single_op/power.cpp | 57 +++++++++ .../src/single_op/prior_box.cpp | 117 ++++++++++++++++++ .../src/single_op/prior_box_clustered.cpp | 95 ++++++++++++++ 12 files changed, 485 insertions(+), 69 deletions(-) create mode 100644 src/tests/functional/plugin/shared/include/single_op_tests/power.hpp create mode 100644 src/tests/functional/plugin/shared/include/single_op_tests/prior_box.hpp create mode 100644 src/tests/functional/plugin/shared/include/single_op_tests/prior_box_clustered.hpp create mode 100644 src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/power.hpp create mode 100644 src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/prior_box.hpp create mode 100644 src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/prior_box_clustered.hpp create mode 100644 src/tests/functional/shared_test_classes/src/single_op/power.cpp create mode 100644 src/tests/functional/shared_test_classes/src/single_op/prior_box.cpp create mode 100644 src/tests/functional/shared_test_classes/src/single_op/prior_box_clustered.cpp diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/prior_box.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/prior_box.cpp index 6c6a08be138b74..3b75acd67bc26d 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/prior_box.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/prior_box.cpp @@ -4,14 +4,16 @@ #include -#include "single_layer_tests/prior_box.hpp" +#include "single_op_tests/prior_box.hpp" #include "common_test_utils/test_constants.hpp" -using namespace LayerTestsDefinitions; +namespace { +using ov::test::PriorBoxLayerTest; + +const std::vector model_types = { + ov::element::i32, + ov::element::u16}; -const std::vector netPrecisions = { - InferenceEngine::Precision::I32, - InferenceEngine::Precision::U16}; const std::vector> min_sizes = { {256.0f}}; @@ -53,8 +55,7 @@ const std::vector scale_all_sizes = { const std::vector min_max_aspect_ratios_order = { false, true}; -const std::vector inputShape = {300, 300}; -const std::vector imageShape = {32, 32}; +const std::vector input_shapes_static = {{300, 300}, {32, 32}}; const auto layerSpecificParams = ::testing::Combine( ::testing::ValuesIn(min_sizes), @@ -74,12 +75,8 @@ const auto layerSpecificParams = ::testing::Combine( INSTANTIATE_TEST_SUITE_P(smoke_PriorBox_Basic, PriorBoxLayerTest, ::testing::Combine( layerSpecificParams, - ::testing::ValuesIn(netPrecisions), - ::testing::Values(InferenceEngine::Precision::UNSPECIFIED), - ::testing::Values(InferenceEngine::Precision::UNSPECIFIED), - ::testing::Values(InferenceEngine::Layout::ANY), - ::testing::Values(InferenceEngine::Layout::ANY), - ::testing::Values(inputShape), - ::testing::Values(imageShape), + ::testing::ValuesIn(model_types), + ::testing::Values(ov::test::static_shapes_to_test_representation(input_shapes_static)), ::testing::Values(ov::test::utils::DEVICE_CPU)), PriorBoxLayerTest::getTestCaseName); +} // namespace diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/prior_box_clustered.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/prior_box_clustered.cpp index 34dfbf6031ec6c..2319c7367fb151 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/prior_box_clustered.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/prior_box_clustered.cpp @@ -3,17 +3,15 @@ // #include -#include "single_layer_tests/prior_box_clustered.hpp" +#include "single_op_tests/prior_box_clustered.hpp" #include "common_test_utils/test_constants.hpp" -using namespace LayerTestsDefinitions; -using namespace ngraph::helpers; - namespace { +using ov::test::PriorBoxClusteredLayerTest; // Common params -const std::vector netPrecisions = { - InferenceEngine::Precision::FP32, - InferenceEngine::Precision::FP16 +const std::vector model_types = { + ov::element::f32, + ov::element::f16 }; const std::vector> widths = { @@ -63,18 +61,14 @@ const auto layerSpeficParams = ::testing::Combine( ::testing::ValuesIn(variances) ); +std::vector input_shapes_static = {{4, 4}, {50, 50}}; + INSTANTIATE_TEST_SUITE_P(smoke_PriorBoxClustered_Basic, PriorBoxClusteredLayerTest, - ::testing::Combine( - layerSpeficParams, - ::testing::ValuesIn(netPrecisions), - ::testing::Values(InferenceEngine::Precision::UNSPECIFIED), - ::testing::Values(InferenceEngine::Precision::UNSPECIFIED), - ::testing::Values(InferenceEngine::Layout::ANY), - ::testing::Values(InferenceEngine::Layout::ANY), - ::testing::Values(std::vector({ 4, 4 })), - ::testing::Values(std::vector({ 50, 50 })), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - PriorBoxClusteredLayerTest::getTestCaseName + ::testing::Combine( + layerSpeficParams, + ::testing::ValuesIn(model_types), + ::testing::Values(ov::test::static_shapes_to_test_representation(input_shapes_static)), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + PriorBoxClusteredLayerTest::getTestCaseName ); - } // namespace diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/power.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/power.cpp index d3af6893b3e2f5..951df90937f729 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/power.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/power.cpp @@ -2,47 +2,42 @@ // SPDX-License-Identifier: Apache-2.0 // -#include -#include "single_layer_tests/power.hpp" +#include "single_op_tests/power.hpp" #include "common_test_utils/test_constants.hpp" -using namespace LayerTestsDefinitions; - namespace { +using ov::test::PowerLayerTest; - std::vector>> inShapes = { - {{1, 8}}, - {{2, 16}}, - {{3, 32}}, - {{4, 64}}, - {{5, 128}}, - {{6, 256}}, - {{7, 512}}, - {{8, 1024}} - }; +std::vector> input_shape_static = { + {{1, 8}}, + {{2, 16}}, + {{3, 32}}, + {{4, 64}}, + {{5, 128}}, + {{6, 256}}, + {{7, 512}}, + {{8, 1024}} +}; - std::vector> Power = { - {0.0f}, - {0.5f}, - {1.0f}, - {1.1f}, - {1.5f}, - {2.0f}, - }; +std::vector> powers = { + {0.0f}, + {0.5f}, + {1.0f}, + {1.1f}, + {1.5f}, + {2.0f}, +}; - std::vector netPrecisions = {InferenceEngine::Precision::FP32, - InferenceEngine::Precision::FP16, - }; +std::vector model_types = { + ov::element::f32, + ov::element::f16, +}; - INSTANTIATE_TEST_SUITE_P(smoke_power, PowerLayerTest, - ::testing::Combine( - ::testing::ValuesIn(inShapes), - ::testing::ValuesIn(netPrecisions), - ::testing::Values(InferenceEngine::Precision::UNSPECIFIED), - ::testing::Values(InferenceEngine::Precision::UNSPECIFIED), - ::testing::Values(InferenceEngine::Layout::ANY), - ::testing::Values(InferenceEngine::Layout::ANY), - ::testing::Values(ov::test::utils::DEVICE_GPU), - ::testing::ValuesIn(Power)), - PowerLayerTest::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_power, PowerLayerTest, + ::testing::Combine( + ::testing::ValuesIn(ov::test::static_shapes_to_test_representation(input_shape_static)), + ::testing::ValuesIn(model_types), + ::testing::ValuesIn(powers), + ::testing::Values(ov::test::utils::DEVICE_GPU)), + PowerLayerTest::getTestCaseName); } // namespace diff --git a/src/tests/functional/plugin/shared/include/single_op_tests/power.hpp b/src/tests/functional/plugin/shared/include/single_op_tests/power.hpp new file mode 100644 index 00000000000000..922ccbcbb345cc --- /dev/null +++ b/src/tests/functional/plugin/shared/include/single_op_tests/power.hpp @@ -0,0 +1,15 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "shared_test_classes/single_op/power.hpp" + +namespace ov { +namespace test { +TEST_P(PowerLayerTest, Inference){ + run(); +}; +} // namespace test +} // namespace ov diff --git a/src/tests/functional/plugin/shared/include/single_op_tests/prior_box.hpp b/src/tests/functional/plugin/shared/include/single_op_tests/prior_box.hpp new file mode 100644 index 00000000000000..707aadec0e8b29 --- /dev/null +++ b/src/tests/functional/plugin/shared/include/single_op_tests/prior_box.hpp @@ -0,0 +1,15 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "shared_test_classes/single_op/prior_box.hpp" + +namespace ov { +namespace test { +TEST_P(PriorBoxLayerTest, Inference) { + run(); +} +} // namespace test +} // namespace ov diff --git a/src/tests/functional/plugin/shared/include/single_op_tests/prior_box_clustered.hpp b/src/tests/functional/plugin/shared/include/single_op_tests/prior_box_clustered.hpp new file mode 100644 index 00000000000000..389bf2309b7831 --- /dev/null +++ b/src/tests/functional/plugin/shared/include/single_op_tests/prior_box_clustered.hpp @@ -0,0 +1,15 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "shared_test_classes/single_op/prior_box_clustered.hpp" + +namespace ov { +namespace test { +TEST_P(PriorBoxClusteredLayerTest, Inference) { + run(); +}; +} // namespace test +} // namespace ov diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/power.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/power.hpp new file mode 100644 index 00000000000000..28634d34428819 --- /dev/null +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/power.hpp @@ -0,0 +1,30 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include +#include +#include + +#include "shared_test_classes/base/ov_subgraph.hpp" + +namespace ov { +namespace test { +using PowerParamsTuple = typename std::tuple< + std::vector, // Input shapes + ov::element::Type, // Model type + std::vector, // Power + std::string>; // Device name + +class PowerLayerTest: + public testing::WithParamInterface, + virtual public ov::test::SubgraphBaseTest{ +public: + static std::string getTestCaseName(const testing::TestParamInfo &obj); +protected: + void SetUp() override; +}; +} // namespace test +} // namespace ov diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/prior_box.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/prior_box.hpp new file mode 100644 index 00000000000000..e48759fab47589 --- /dev/null +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/prior_box.hpp @@ -0,0 +1,46 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include +#include +#include + +#include "shared_test_classes/base/ov_subgraph.hpp" + +namespace ov { +namespace test { +using priorBoxSpecificParams = std::tuple< + std::vector, // min_size + std::vector, // max_size + std::vector, // aspect_ratio + std::vector, // density + std::vector, // fixed_ratio + std::vector, // fixed_size + bool, // clip + bool, // flip + float, // step + float, // offset + std::vector, // variance + bool, // scale_all_sizes + bool>; // min_max_aspect_ratios_order + +typedef std::tuple< + priorBoxSpecificParams, + ov::element::Type, // model type + std::vector, // input shape + std::string> priorBoxLayerParams; + +class PriorBoxLayerTest + : public testing::WithParamInterface, + virtual public ov::test::SubgraphBaseStaticTest { +public: + static std::string getTestCaseName(const testing::TestParamInfo& obj); +protected: + void SetUp() override; +}; + +} // namespace test +} // namespace ov diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/prior_box_clustered.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/prior_box_clustered.hpp new file mode 100644 index 00000000000000..61af34ccccf9d2 --- /dev/null +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/prior_box_clustered.hpp @@ -0,0 +1,40 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include +#include +#include + +#include "shared_test_classes/base/ov_subgraph.hpp" + +namespace ov { +namespace test { +typedef std::tuple< + std::vector, // widths + std::vector, // heights + bool, // clip + float, // step_width + float, // step_height + float, // step + float, // offset + std::vector> priorBoxClusteredSpecificParams; + +typedef std::tuple< + priorBoxClusteredSpecificParams, + ov::element::Type, // Model type + std::vector, // Input shape + std::string> priorBoxClusteredLayerParams; + +class PriorBoxClusteredLayerTest : public testing::WithParamInterface, + virtual public ov::test::SubgraphBaseTest { +public: + static std::string getTestCaseName(const testing::TestParamInfo& obj); + +protected: + void SetUp() override; +}; +} // namespace test +} // namespace ov diff --git a/src/tests/functional/shared_test_classes/src/single_op/power.cpp b/src/tests/functional/shared_test_classes/src/single_op/power.cpp new file mode 100644 index 00000000000000..54da5487a41b32 --- /dev/null +++ b/src/tests/functional/shared_test_classes/src/single_op/power.cpp @@ -0,0 +1,57 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "shared_test_classes/single_op/power.hpp" + +#include "openvino/op/parameter.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/result.hpp" +#include "openvino/op/power.hpp" + +namespace ov { +namespace test { +std::string PowerLayerTest::getTestCaseName(const testing::TestParamInfo &obj) { + std::vector shapes; + ov::element::Type model_type; + std::string device_name; + std::vector power; + std::tie(shapes, model_type, power, device_name) = obj.param; + + std::ostringstream result; + result << "IS=("; + for (size_t i = 0lu; i < shapes.size(); i++) { + result << ov::test::utils::partialShape2str({shapes[i].first}) << (i < shapes.size() - 1lu ? "_" : ""); + } + result << ")_TS="; + for (size_t i = 0lu; i < shapes.front().second.size(); i++) { + result << "{"; + for (size_t j = 0lu; j < shapes.size(); j++) { + result << ov::test::utils::vec2str(shapes[j].second[i]) << (j < shapes.size() - 1lu ? "_" : ""); + } + result << "}_"; + } + result << "Power=" << ov::test::utils::vec2str(power) << "_"; + result << "netPRC=" << model_type.get_type_name() << "_"; + result << "trgDev=" << device_name << "_"; + return result.str(); +} + +void PowerLayerTest::SetUp() { + abs_threshold = 0.04f; + + std::vector shapes; + ov::element::Type model_type; + std::vector power; + std::tie(shapes, model_type, power, targetDevice) = this->GetParam(); + init_input_shapes(shapes); + + auto param = std::make_shared(model_type, inputDynamicShapes.front()); + + auto power_const = std::make_shared(model_type, ngraph::Shape{1}, power); + auto pow = std::make_shared(param, power_const); + + function = std::make_shared(pow, ov::ParameterVector{param}, "power"); +} +} // namespace test +} // namespace ov diff --git a/src/tests/functional/shared_test_classes/src/single_op/prior_box.cpp b/src/tests/functional/shared_test_classes/src/single_op/prior_box.cpp new file mode 100644 index 00000000000000..9f297a1f07c505 --- /dev/null +++ b/src/tests/functional/shared_test_classes/src/single_op/prior_box.cpp @@ -0,0 +1,117 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "shared_test_classes/single_op/prior_box.hpp" + +#include "openvino/pass/constant_folding.hpp" +#include "openvino/op/parameter.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/result.hpp" +#include "openvino/op/shape_of.hpp" +#include "openvino/op/prior_box_clustered.hpp" + +namespace ov { +namespace test { +std::string PriorBoxLayerTest::getTestCaseName(const testing::TestParamInfo& obj) { + ov::element::Type model_type; + std::vector shapes; + std::string target_device; + priorBoxSpecificParams spec_params; + std::tie(spec_params, model_type, shapes, target_device) = obj.param; + + std::vector min_size, max_size, aspect_ratio, density, fixed_ratio, fixed_size, variance; + float step, offset; + bool clip, flip, scale_all_sizes, min_max_aspect_ratios_order; + std::tie(min_size, max_size, aspect_ratio, density, fixed_ratio, fixed_size, clip, + flip, step, offset, variance, scale_all_sizes, min_max_aspect_ratios_order) = spec_params; + + std::ostringstream result; + const char separator = '_'; + result << "IS=("; + for (size_t i = 0lu; i < shapes.size(); i++) { + result << ov::test::utils::partialShape2str({shapes[i].first}) << (i < shapes.size() - 1lu ? "_" : ""); + } + result << ")_TS="; + for (size_t i = 0lu; i < shapes.front().second.size(); i++) { + result << "{"; + for (size_t j = 0lu; j < shapes.size(); j++) { + result << ov::test::utils::vec2str(shapes[j].second[i]) << (j < shapes.size() - 1lu ? "_" : ""); + } + result << "}_"; + } + result << "netPRC=" << model_type.get_type_name() << separator; + result << "min_s=" << ov::test::utils::vec2str(min_size) << separator; + result << "max_s=" << ov::test::utils::vec2str(max_size)<< separator; + result << "asp_r=" << ov::test::utils::vec2str(aspect_ratio)<< separator; + result << "dens=" << ov::test::utils::vec2str(density)<< separator; + result << "fix_r=" << ov::test::utils::vec2str(fixed_ratio)<< separator; + result << "fix_s=" << ov::test::utils::vec2str(fixed_size)<< separator; + result << "var=" << ov::test::utils::vec2str(variance)<< separator; + result << "step=" << step << separator; + result << "off=" << offset << separator; + result << "clip=" << clip << separator; + result << "flip=" << flip<< separator; + result << "scale_all=" << scale_all_sizes << separator; + result << "min_max_aspect_ratios_order=" << min_max_aspect_ratios_order << separator; + result << "trgDev=" << target_device; + + return result.str(); +} + +void PriorBoxLayerTest::SetUp() { + ov::element::Type model_type; + std::vector shapes; + std::vector min_size; + std::vector max_size; + std::vector aspect_ratio; + std::vector density; + std::vector fixed_ratio; + std::vector fixed_size; + std::vector variance; + float step; + float offset; + bool clip; + bool flip; + bool scale_all_sizes; + bool min_max_aspect_ratios_order; + + priorBoxSpecificParams spec_params; + std::tie(spec_params, model_type, shapes, targetDevice) = GetParam(); + + std::tie(min_size, max_size, aspect_ratio, density, fixed_ratio, fixed_size, clip, + flip, step, offset, variance, scale_all_sizes, min_max_aspect_ratios_order) = spec_params; + init_input_shapes(shapes); + + ov::ParameterVector params{std::make_shared(model_type, inputDynamicShapes[0]), + std::make_shared(model_type, inputDynamicShapes[1])}; + + ov::op::v8::PriorBox::Attributes attributes; + attributes.min_size = min_size; + attributes.max_size = max_size; + attributes.aspect_ratio = aspect_ratio; + attributes.density = density; + attributes.fixed_ratio = fixed_ratio; + attributes.fixed_size = fixed_size; + attributes.variance = variance; + attributes.step = step; + attributes.offset = offset; + attributes.clip = clip; + attributes.flip = flip; + attributes.scale_all_sizes = scale_all_sizes; + attributes.min_max_aspect_ratios_order = min_max_aspect_ratios_order; + + auto shape_of_1 = std::make_shared(params[0]); + auto shape_of_2 = std::make_shared(params[1]); + auto priorBox = std::make_shared( + shape_of_1, + shape_of_2, + attributes); + + ov::pass::disable_constant_folding(priorBox); + + auto result = std::make_shared(priorBox); + function = std::make_shared (result, params, "PriorBoxFunction"); +} +} // namespace test +} // namespace ov diff --git a/src/tests/functional/shared_test_classes/src/single_op/prior_box_clustered.cpp b/src/tests/functional/shared_test_classes/src/single_op/prior_box_clustered.cpp new file mode 100644 index 00000000000000..a630c498ee69a3 --- /dev/null +++ b/src/tests/functional/shared_test_classes/src/single_op/prior_box_clustered.cpp @@ -0,0 +1,95 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "shared_test_classes/single_op/prior_box_clustered.hpp" + +#include "openvino/op/parameter.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/result.hpp" +#include "openvino/op/shape_of.hpp" +#include "openvino/op/prior_box_clustered.hpp" + +namespace ov { +namespace test { +std::string PriorBoxClusteredLayerTest::getTestCaseName(const testing::TestParamInfo& obj) { + ov::element::Type model_type; + std::vector shapes; + std::string target_device; + priorBoxClusteredSpecificParams specParams; + std::tie(specParams, model_type, shapes, target_device) = obj.param; + + std::vector widths, heights, variances; + float step_width, step_height, step, offset; + bool clip; + std::tie(widths, heights, clip, step_width, step_height, step, offset, variances) = specParams; + + std::ostringstream result; + const char separator = '_'; + result << "IS=("; + for (size_t i = 0lu; i < shapes.size(); i++) { + result << ov::test::utils::partialShape2str({shapes[i].first}) << (i < shapes.size() - 1lu ? "_" : ""); + } + result << ")_TS="; + for (size_t i = 0lu; i < shapes.front().second.size(); i++) { + result << "{"; + for (size_t j = 0lu; j < shapes.size(); j++) { + result << ov::test::utils::vec2str(shapes[j].second[i]) << (j < shapes.size() - 1lu ? "_" : ""); + } + result << "}_"; + } + result << "netPRC=" << model_type.get_type_name() << separator; + result << "widths=" << ov::test::utils::vec2str(widths) << separator; + result << "heights=" << ov::test::utils::vec2str(heights) << separator; + result << "variances="; + if (variances.empty()) + result << "()" << separator; + else + result << ov::test::utils::vec2str(variances) << separator; + result << "stepWidth=" << step_width << separator; + result << "stepHeight=" << step_height << separator; + result << "step=" << step << separator; + result << "offset=" << offset << separator; + result << "clip=" << std::boolalpha << clip << separator; + result << "trgDev=" << target_device; + return result.str(); +} + +void PriorBoxClusteredLayerTest::SetUp() { + std::vector shapes; + ov::element::Type model_type; + std::vector widths; + std::vector heights; + std::vector variances; + float step_width; + float step_height; + float step; + float offset; + bool clip; + priorBoxClusteredSpecificParams specParams; + std::tie(specParams, model_type, shapes, targetDevice) = GetParam(); + std::tie(widths, heights, clip, step_width, step_height, step, offset, variances) = specParams; + init_input_shapes(shapes); + + ov::ParameterVector params{std::make_shared(model_type, inputDynamicShapes[0]), + std::make_shared(model_type, inputDynamicShapes[1])}; + + ov::op::v0::PriorBoxClustered::Attributes attributes; + attributes.widths = widths; + attributes.heights = heights; + attributes.clip = clip; + attributes.step_widths = step_width; + attributes.step_heights = step_height; + attributes.step = step; + attributes.offset = offset; + attributes.variances = variances; + + auto shape_of_1 = std::make_shared(params[0]); + auto shape_of_2 = std::make_shared(params[1]); + auto prior_box_clustered = std::make_shared(shape_of_1, shape_of_2, attributes); + + auto result = std::make_shared(prior_box_clustered); + function = std::make_shared(result, params, "PB_Clustered"); +} +} // namespace test +} // namespace ov From b3ead626310b1eff3fe2d5374b287bc908c162f4 Mon Sep 17 00:00:00 2001 From: Ilya Lavrenov Date: Wed, 11 Oct 2023 00:38:23 +0400 Subject: [PATCH 02/13] Fixed numpy deprecation error (#20375) --- tests/layer_tests/tensorflow_lite_tests/test_tfl_ScatterND.py | 4 ++-- .../tensorflow_lite_tests/test_tfl_StridedSlice.py | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/layer_tests/tensorflow_lite_tests/test_tfl_ScatterND.py b/tests/layer_tests/tensorflow_lite_tests/test_tfl_ScatterND.py index d2393dc4aa5c13..77720238b1feb3 100644 --- a/tests/layer_tests/tensorflow_lite_tests/test_tfl_ScatterND.py +++ b/tests/layer_tests/tensorflow_lite_tests/test_tfl_ScatterND.py @@ -13,7 +13,7 @@ 'shape_shape': [1], 'shape_value': [8]}, {'indices_shape': [4, 1], 'indices_value': [4, 3, 1, 7], 'updates_dtype': np.float32, 'updates_shape': [4], 'shape_shape': [1], 'shape_value': [8]}, - {'indices_shape': [4, 1], 'indices_value': [4, 3, 1, 7], 'updates_dtype': np.bool, 'updates_shape': [4], + {'indices_shape': [4, 1], 'indices_value': [4, 3, 1, 7], 'updates_dtype': bool, 'updates_shape': [4], 'shape_shape': [1], 'shape_value': [8]}, {'indices_shape': [4, 2], 'indices_value': [[0, 0], [1, 0], [0, 2], [1, 2]], 'updates_dtype': np.int32, @@ -22,7 +22,7 @@ 'updates_shape': [4, 5], 'shape_shape': [3], 'shape_value': [2, 3, 5]}, {'indices_shape': [4, 2], 'indices_value': [[0, 0], [1, 0], [0, 2], [1, 2]], 'updates_dtype': np.float32, 'updates_shape': [4, 5], 'shape_shape': [3], 'shape_value': [2, 3, 5]}, - {'indices_shape': [4, 2], 'indices_value': [[0, 0], [1, 0], [0, 2], [1, 2]], 'updates_dtype': np.bool, + {'indices_shape': [4, 2], 'indices_value': [[0, 0], [1, 0], [0, 2], [1, 2]], 'updates_dtype': bool, 'updates_shape': [4, 5], 'shape_shape': [3], 'shape_value': [2, 3, 5]}, ] diff --git a/tests/layer_tests/tensorflow_lite_tests/test_tfl_StridedSlice.py b/tests/layer_tests/tensorflow_lite_tests/test_tfl_StridedSlice.py index fea9364b045651..12c18d02077e2a 100644 --- a/tests/layer_tests/tensorflow_lite_tests/test_tfl_StridedSlice.py +++ b/tests/layer_tests/tensorflow_lite_tests/test_tfl_StridedSlice.py @@ -13,7 +13,7 @@ 'begin_mask': 8, 'end_mask': 3, 'shrink_axis_mask': 4}, {'shape': [12, 2, 2, 5], 'dtype': np.int64, 'strides': [1], 'begin': [0], 'end': [1], 'begin_mask': 8, 'end_mask': 3, 'shrink_axis_mask': None}, - {'shape': [12, 2, 2, 5], 'dtype': np.bool, 'strides': [1], 'begin': [0], 'end': [1], + {'shape': [12, 2, 2, 5], 'dtype': bool, 'strides': [1], 'begin': [0], 'end': [1], 'begin_mask': 8, 'end_mask': 3, 'shrink_axis_mask': None}, ] @@ -24,7 +24,7 @@ class TestTFLiteStridedSliceLayerTest(TFLiteLayerTest): allowed_ops = ['STRIDED_SLICE'] def _prepare_input(self, inputs_dict, generator=None): - if self.input_dtype == np.bool: + if self.input_dtype == bool: inputs_dict['Input'] = np.random.choice([True, False], size=inputs_dict['Input']) else: inputs_dict['Input'] = np.random.randint(-255, 255, inputs_dict['Input']).astype(self.input_dtype) From df55e282e38b43ad581638dceb7d05a9d6818a9e Mon Sep 17 00:00:00 2001 From: Ilya Lavrenov Date: Wed, 11 Oct 2023 02:18:14 +0400 Subject: [PATCH 03/13] Update tensorflow requirement in /src/bindings/python (#20372) Updates the requirements on [tensorflow](https://github.com/tensorflow/tensorflow) to permit the latest version. - [Release notes](https://github.com/tensorflow/tensorflow/releases) - [Changelog](https://github.com/tensorflow/tensorflow/blob/master/RELEASE.md) - [Commits](https://github.com/tensorflow/tensorflow/compare/v1.15.5...v2.14.0) --- updated-dependencies: - dependency-name: tensorflow dependency-type: direct:production ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- tests/constraints.txt | 1 + tools/mo/requirements_tf.txt | 2 +- tools/mo/requirements_tf2.txt | 2 +- 3 files changed, 3 insertions(+), 2 deletions(-) diff --git a/tests/constraints.txt b/tests/constraints.txt index 671c60bc937c83..7abffee14c8ce2 100644 --- a/tests/constraints.txt +++ b/tests/constraints.txt @@ -11,6 +11,7 @@ scipy>=1.11.1; python_version >= "3.9" wheel>=0.38.1 defusedxml>=0.7.1 fastjsonschema~=2.17.1 +tensorflow>=2.5,<2.14.0 test-generator==0.1.2 requests>=2.25.1 opencv-python>=4.5 diff --git a/tools/mo/requirements_tf.txt b/tools/mo/requirements_tf.txt index 548f30808083a1..240b60351a6cad 100644 --- a/tools/mo/requirements_tf.txt +++ b/tools/mo/requirements_tf.txt @@ -1,5 +1,5 @@ -c ../constraints.txt -tensorflow>=1.15.5,<2.13.0 +tensorflow>=1.15.5,<2.14.0 numpy>=1.16.6,<1.26 networkx defusedxml diff --git a/tools/mo/requirements_tf2.txt b/tools/mo/requirements_tf2.txt index a96ed84004f984..1b955f23d0feea 100644 --- a/tools/mo/requirements_tf2.txt +++ b/tools/mo/requirements_tf2.txt @@ -1,5 +1,5 @@ -c ../constraints.txt -tensorflow>=2.5,<2.13.0 +tensorflow>=2.5,<2.14.0 numpy>=1.16.6,<1.26 networkx defusedxml From e24b6211e3fa217b1678913c3bd4f7f972ea7402 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Piotr=20Krzemi=C5=84ski?= Date: Wed, 11 Oct 2023 00:51:59 +0200 Subject: [PATCH 04/13] [BUGFIX][Core][Template] Multinomial shape filling for 1D input (#20359) * [BUGFIX] Fix incorrect shape filling for 1D tensor smaller than requested sample size * [FIX] Remove redeclaration --- .../openvino/reference/multinomial.hpp | 7 ++- src/core/src/op/multinomial.cpp | 9 ++-- .../functional/op_reference/multinomial.cpp | 50 ++++++++++++------- 3 files changed, 40 insertions(+), 26 deletions(-) diff --git a/src/core/reference/include/openvino/reference/multinomial.hpp b/src/core/reference/include/openvino/reference/multinomial.hpp index fc141d1204cbea..ce9fe0a52d7c61 100644 --- a/src/core/reference/include/openvino/reference/multinomial.hpp +++ b/src/core/reference/include/openvino/reference/multinomial.hpp @@ -113,8 +113,7 @@ void multinomial(const T* probs, auto batch_size = probs_shape.size() == 2 ? static_cast(probs_shape[0]) : static_cast(1); auto class_size = probs_shape.size() == 2 ? static_cast(probs_shape[1]) : static_cast(probs_shape[0]); - auto samples_size = - probs_shape.size() == 2 ? static_cast(num_samples[0]) : static_cast(probs_shape[0]); + auto samples_size = static_cast(num_samples[0]); // Iterate over each channel in uniform samples std::vector output_samples(total_output_elements_count); @@ -132,8 +131,8 @@ void multinomial(const T* probs, break; } } - // Additional step with replacement - change probability of a given class to 0, and update the cdf - if (with_replacement) { + // Additional step without replacement - change probability of a given class to 0, and update the cdf + if (!with_replacement) { T class_probability = selected_class_idx ? cdf[i_translated + selected_class_idx] - cdf[i_translated + selected_class_idx - 1] : cdf[i_translated + selected_class_idx]; diff --git a/src/core/src/op/multinomial.cpp b/src/core/src/op/multinomial.cpp index 0dd4a93867d74a..90f41369364879 100644 --- a/src/core/src/op/multinomial.cpp +++ b/src/core/src/op/multinomial.cpp @@ -116,12 +116,13 @@ namespace multinomial { namespace validate { void input_types(const Node* op) { NODE_VALIDATION_CHECK(op, - op->get_input_element_type(0).is_real(), + op->get_input_element_type(0).is_real() || op->get_input_element_type(0).is_dynamic(), "Expected floating point type as element type for the 'probs' input."); - NODE_VALIDATION_CHECK(op, - op->get_input_element_type(1).is_integral_number(), - "Expected integer type as element type for the 'num_samples' input."); + NODE_VALIDATION_CHECK( + op, + op->get_input_element_type(1).is_integral_number() || op->get_input_element_type(1).is_dynamic(), + "Expected integer type as element type for the 'num_samples' input."); } } // namespace validate } // namespace multinomial diff --git a/src/plugins/template/tests/functional/op_reference/multinomial.cpp b/src/plugins/template/tests/functional/op_reference/multinomial.cpp index d2edf5bedd9d60..25159ae3ee2a22 100644 --- a/src/plugins/template/tests/functional/op_reference/multinomial.cpp +++ b/src/plugins/template/tests/functional/op_reference/multinomial.cpp @@ -86,8 +86,11 @@ std::vector generateMultinomialParams() { const ov::Shape prob_2d_shape{2, 4}; const ov::Shape prob_1d_shape{4}; const ov::Shape num_samples_shape{1}; + const ov::Shape prob_1d_shape_expand_small{2}; + const ov::Shape out_1d_shape_expand_big{16}; reference_tests::Tensor num_samples(num_samples_shape, ov::element::Type_t::i32, std::vector{4}); + reference_tests::Tensor num_samples_big(num_samples_shape, ov::element::Type_t::i32, std::vector{16}); reference_tests::Tensor probabilities_2d_no_log(prob_2d_shape, et, @@ -95,50 +98,61 @@ std::vector generateMultinomialParams() { reference_tests::Tensor probabilities_2d_log(prob_2d_shape, et, std::vector{1, 2, 3, 4, 2, 4, 6, 8}); reference_tests::Tensor probabilities_1d_no_log(prob_1d_shape, et, std::vector{0.001, 0.01, 0.1, 0.899}); reference_tests::Tensor probabilities_1d_log(prob_1d_shape, et, std::vector{1, 10, 7, 3}); + reference_tests::Tensor probabilities_1d_expand(prob_1d_shape_expand_small, et, std::vector{0.00001, 0.99999}); - reference_tests::Tensor output_2d_no_log_no_replacement(prob_2d_shape, - ov::element::Type_t::i32, - std::vector{3, 3, 3, 3, 0, 0, 0, 0}); - reference_tests::Tensor output_2d_log_no_replacement(prob_2d_shape, + reference_tests::Tensor output_2d_no_log_replacement(prob_2d_shape, ov::element::Type_t::i32, - std::vector{3, 3, 2, 3, 3, 3, 3, 3}); - reference_tests::Tensor output_1d_no_log_replacement(prob_1d_shape, + std::vector{3, 3, 3, 3, 0, 0, 0, 0}); + reference_tests::Tensor output_2d_log_replacement(prob_2d_shape, + ov::element::Type_t::i32, + std::vector{3, 3, 2, 3, 3, 3, 3, 3}); + reference_tests::Tensor output_1d_no_log_no_replacement(prob_1d_shape, + ov::element::Type_t::i64, + std::vector{3, 2, 1, 0}); + reference_tests::Tensor output_1d_log_no_replacement(prob_1d_shape, ov::element::Type_t::i64, - std::vector{3, 2, 1, 0}); - reference_tests::Tensor output_1d_log_replacement(prob_1d_shape, - ov::element::Type_t::i64, - std::vector{1, 2, 3, 0}); + std::vector{1, 2, 3, 0}); + reference_tests::Tensor output_1d_expand(out_1d_shape_expand_big, + ov::element::Type_t::i64, + std::vector{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}); std::vector params; // probabilities, num_samples, output, convert_type, log_probs, with_replacement, name params.emplace_back(probabilities_2d_no_log, num_samples, - output_2d_no_log_no_replacement, + output_2d_no_log_replacement, ov::element::Type_t::i32, false, - false, + true, "input_2d"); params.emplace_back(probabilities_2d_log, num_samples, - output_2d_log_no_replacement, + output_2d_log_replacement, ov::element::Type_t::i32, true, - false, + true, "input_2d"); params.emplace_back(probabilities_1d_no_log, num_samples, - output_1d_no_log_replacement, + output_1d_no_log_no_replacement, ov::element::Type_t::i64, false, - true, + false, "input_1d"); params.emplace_back(probabilities_1d_log, num_samples, - output_1d_log_replacement, + output_1d_log_no_replacement, ov::element::Type_t::i64, true, - true, + false, "input_1d"); + params.emplace_back(probabilities_1d_expand, + num_samples_big, + output_1d_expand, + ov::element::Type_t::i64, + false, + true, + "input_1d_expand"); return params; } From 35308ce34d2e7603e395f134fecb9db5b50a09e0 Mon Sep 17 00:00:00 2001 From: Ilya Lavrenov Date: Wed, 11 Oct 2023 03:29:56 +0400 Subject: [PATCH 05/13] Use np.float32 instead of np.float (#20377) --- tests/layer_tests/onnx_tests/test_loop.py | 2 +- tools/mo/unit_tests/mo/front/tf/identityN_to_identity_test.py | 4 ++-- tools/mo/unit_tests/mo/middle/UpsampleToResample_test.py | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/tests/layer_tests/onnx_tests/test_loop.py b/tests/layer_tests/onnx_tests/test_loop.py index 579c2dfca32c87..be90ccd106bcdf 100644 --- a/tests/layer_tests/onnx_tests/test_loop.py +++ b/tests/layer_tests/onnx_tests/test_loop.py @@ -16,7 +16,7 @@ def create_const(name, tensor_type, value): if tensor_type == TensorProto.INT64: np_type = np.int64 elif tensor_type == TensorProto.FLOAT: - np_type = np.float + np_type = np.float32 elif tensor_type == TensorProto.BOOL: np_type = bool else: diff --git a/tools/mo/unit_tests/mo/front/tf/identityN_to_identity_test.py b/tools/mo/unit_tests/mo/front/tf/identityN_to_identity_test.py index 1c736ddb6edb2e..7a9ba24cc515e9 100644 --- a/tools/mo/unit_tests/mo/front/tf/identityN_to_identity_test.py +++ b/tools/mo/unit_tests/mo/front/tf/identityN_to_identity_test.py @@ -14,12 +14,12 @@ **regular_op_with_shaped_data('placeholder_0', [1, 227, 227, 3], {'type': 'Parameter'}), **regular_op_with_shaped_data('placeholder_1', [1, 227, 227, 3], {'type': 'Parameter'}), - **regular_op_with_empty_data('identityN', {'op': 'IdentityN', 'type': None, 'data_types': [np.int32, np.float], + **regular_op_with_empty_data('identityN', {'op': 'IdentityN', 'type': None, 'data_types': [np.int32, np.float32], 'name': 'my_identity'}), **empty_data('identityN_1_d'), **regular_op_with_empty_data('identity0', {'op': 'Identity', 'type': None, 'data_type': np.int32, 'name': 'my_identity/0_port'}), - **regular_op_with_empty_data('identity1', {'op': 'Identity', 'type': None, 'data_type': np.float, + **regular_op_with_empty_data('identity1', {'op': 'Identity', 'type': None, 'data_type': np.float32, 'name': 'my_identity/1_port'}), **result('output0'), diff --git a/tools/mo/unit_tests/mo/middle/UpsampleToResample_test.py b/tools/mo/unit_tests/mo/middle/UpsampleToResample_test.py index e65eef945a1705..e1530ff61d37d7 100644 --- a/tools/mo/unit_tests/mo/middle/UpsampleToResample_test.py +++ b/tools/mo/unit_tests/mo/middle/UpsampleToResample_test.py @@ -40,7 +40,7 @@ 'ss_stride_data': {'kind': 'data', 'value': int64_array([1]), 'shape': int64_array([1])}, 'strided_slice': {'type': 'StridedSlice', 'kind': 'op', 'op': 'StridedSlice'}, 'strided_slice_data': {'kind': 'data', 'shape': None, 'value': None}, - 'cast_to_float': {'kind': 'op', 'op': 'Cast', 'type': 'Convert', 'dst_type': np.float}, + 'cast_to_float': {'kind': 'op', 'op': 'Cast', 'type': 'Convert', 'dst_type': np.float32}, 'cast_to_float_d': {'kind': 'data', 'value': None, 'shape': None}, 'factor': {'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([5, 5]), 'shape': int64_array([2])}, 'factor_data': {'kind': 'data', 'value': int64_array([5, 5]), 'shape': int64_array([2])}, @@ -104,7 +104,7 @@ 'ss_end_data': {'kind': 'data', 'value': None, 'shape': None}, 'ss_stride': {'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([1]), 'shape': int64_array([1])}, 'ss_stride_data': {'kind': 'data', 'value': None, 'shape': None}, - 'cast_to_float': {'kind': 'op', 'op': 'Cast', 'type': 'Convert', 'dst_type': np.float}, + 'cast_to_float': {'kind': 'op', 'op': 'Cast', 'type': 'Convert', 'dst_type': np.float32}, 'cast_to_float_d': {'kind': 'data', 'value': None, 'shape': None}, 'mul': {'type': 'Multiply', 'kind': 'op', 'op': 'Multiply'}, 'mul_data': {'kind': 'data', 'shape': None, 'value': None}, From 8020530e678f875e8ac35162522e4bf174921806 Mon Sep 17 00:00:00 2001 From: Ilya Churaev Date: Wed, 11 Oct 2023 07:09:04 +0400 Subject: [PATCH 06/13] Reduce ngraph namespace usage from core component (#20309) * Reduce ngraph namespace usage from core component * Fixed build * Fixed build 2 * Added missed opset to legacy API --- src/core/include/ngraph/opsets/opset.hpp | 2 + src/core/src/graph_util.cpp | 197 ++-- src/core/src/opsets/opset.cpp | 85 +- src/core/src/runtime/aligned_buffer.cpp | 18 +- src/core/src/runtime/tensor.cpp | 4 - src/core/src/shape_util.cpp | 12 +- src/core/src/util.cpp | 216 ++--- src/core/src/validation_util.cpp | 1092 +++++++++++----------- src/inference/src/ie_core.cpp | 4 - 9 files changed, 802 insertions(+), 828 deletions(-) diff --git a/src/core/include/ngraph/opsets/opset.hpp b/src/core/include/ngraph/opsets/opset.hpp index 443de5714e4243..26c21e237b16c3 100644 --- a/src/core/include/ngraph/opsets/opset.hpp +++ b/src/core/include/ngraph/opsets/opset.hpp @@ -67,6 +67,8 @@ const NGRAPH_API OpSet& get_opset8(); const NGRAPH_API OpSet& get_opset9(); const NGRAPH_API OpSet& get_opset10(); const NGRAPH_API OpSet& get_opset11(); +const NGRAPH_API OpSet& get_opset12(); +const NGRAPH_API OpSet& get_opset13(); const NGRAPH_API std::map>& get_available_opsets(); } // namespace ngraph NGRAPH_SUPPRESS_DEPRECATED_END diff --git a/src/core/src/graph_util.cpp b/src/core/src/graph_util.cpp index 93457bd17083e3..8001678dab2601 100644 --- a/src/core/src/graph_util.cpp +++ b/src/core/src/graph_util.cpp @@ -10,30 +10,17 @@ #include #include -#include "ngraph/descriptor/input.hpp" -#include "ngraph/descriptor/output.hpp" -#include "ngraph/function.hpp" -#include "ngraph/log.hpp" -#include "ngraph/node.hpp" -#include "ngraph/op/broadcast.hpp" -#include "ngraph/op/constant.hpp" -#include "ngraph/op/parameter.hpp" -#include "ngraph/op/result.hpp" -#include "ngraph/op/tensor_iterator.hpp" -#include "ngraph/op/util/op_types.hpp" -#include "ngraph/opsets/opset5.hpp" -#include "ngraph/opsets/opset8.hpp" -#include "ngraph/pass/manager.hpp" -#include "ngraph/pass/visualize_tree.hpp" -#include "ngraph/rt_info.hpp" -#include "ngraph/util.hpp" #include "openvino/core/descriptor/tensor.hpp" +#include "openvino/core/rt_info.hpp" +#include "openvino/op/broadcast.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/util/op_types.hpp" +#include "openvino/pass/manager.hpp" +#include "openvino/pass/visualize_tree.hpp" #include "transformations/common_optimizations/compress_float_constants.hpp" #include "transformations/common_optimizations/fused_names_cleanup.hpp" #include "transformations/common_optimizations/mark_precision_sensitive_shapeof_subgraphs.hpp" -using namespace std; - namespace { void clone_ov_nodes(const std::vector>& nodes, @@ -49,7 +36,7 @@ void clone_ov_nodes(const std::vector>& nodes, } std::vector> cloned_dependencies; for (const auto& dependency : node->get_control_dependencies()) { - shared_ptr& dependent = node_map.at(dependency.get()); + std::shared_ptr& dependent = node_map.at(dependency.get()); if (find(cloned_dependencies.begin(), cloned_dependencies.end(), dependent) == cloned_dependencies.end()) { cloned_dependencies.push_back(dependent); @@ -161,7 +148,7 @@ void replace_node(const std::shared_ptr& target, const OutputVector& repla OPENVINO_ASSERT(target->get_output_size() == replacement_values.size()); - unordered_set> replacement_nodes; + std::unordered_set> replacement_nodes; // For each of target's output O with replacement output O_rep: // For each O's connected downstream input I: // Change I's connected upstream output to O_rep @@ -179,15 +166,15 @@ void replace_node(const std::shared_ptr& target, const OutputVector& repla } void replace_node(const std::shared_ptr& target, const std::shared_ptr& replacement) { - auto default_output_order = vector(target->get_output_size()); + auto default_output_order = std::vector(target->get_output_size()); std::iota(default_output_order.begin(), default_output_order.end(), 0); replace_node(target, replacement, default_output_order); } void replace_nodes(const std::shared_ptr& f, - const unordered_map, shared_ptr>& - parameter_replacement_map, - const unordered_map, shared_ptr>& body_replacement_map) { + const std::unordered_map, + std::shared_ptr>& parameter_replacement_map, + const std::unordered_map, std::shared_ptr>& body_replacement_map) { auto& params = f->get_parameters(); for (size_t i = 0; i < params.size(); i++) { @@ -231,7 +218,7 @@ std::shared_ptr clone_ov_model(const Model& func, std::unordered_map node : func.get_results()) { + for (std::shared_ptr node : func.get_results()) { auto result = ov::as_type_ptr(node_map.at(node.get())); if (!result) { OPENVINO_THROW("Results should be of type op::Result"); @@ -240,7 +227,7 @@ std::shared_ptr clone_ov_model(const Model& func, std::unordered_map(node_map.at(node.get()))); + cloned_sinks.push_back(std::static_pointer_cast(node_map.at(node.get()))); } std::vector> cloned_params; @@ -273,8 +260,8 @@ bool compare_constants(const std::shared_ptr& n1, const std::shared_ptr(n1)->get_value_strings() != - static_pointer_cast(n2)->get_value_strings()) { + if (std::static_pointer_cast(n1)->get_value_strings() != + std::static_pointer_cast(n2)->get_value_strings()) { return false; } @@ -368,7 +355,8 @@ void save_model(const std::shared_ptr& m, const std::string& ou OPENVINO_SUPPRESS_DEPRECATED_START -ngraph::NodeVector ngraph::find_common_args(std::shared_ptr node1, std::shared_ptr node2) { +namespace ngraph { +ov::NodeVector find_common_args(std::shared_ptr node1, std::shared_ptr node2) { std::unordered_set> node1_args; auto compute_node1_args = [&node1_args](const std::shared_ptr& node) { @@ -396,15 +384,15 @@ ngraph::NodeVector ngraph::find_common_args(std::shared_ptr node1, std::sh } // Check if all paths from X to a result go through Y -bool ngraph::is_post_dominated(Node* X, Node* Y) { +bool is_post_dominated(Node* X, Node* Y) { std::unordered_set visited; std::stack> stack; stack.push(X); while (stack.size() > 0) { - ngraph::Node* curr = stack.top(); + ov::Node* curr = stack.top(); visited.insert(curr); - if (ngraph::op::is_output(curr)) { + if (ov::op::util::is_output(curr)) { return false; } stack.pop(); @@ -419,8 +407,8 @@ bool ngraph::is_post_dominated(Node* X, Node* Y) { return true; } -std::vector> ngraph::clone_nodes(const std::vector>& nodes, - NodeMap& node_map) { +std::vector> clone_nodes(const std::vector>& nodes, + NodeMap& node_map) { // for each node in topological order auto sorted_nodes = topological_sort(nodes); for (const auto& node : sorted_nodes) { @@ -433,7 +421,7 @@ std::vector> ngraph::clone_nodes(const std::vector } std::vector> cloned_dependencies; for (auto& dependency : node->get_control_dependencies()) { - shared_ptr& dependent = node_map.at(dependency.get()); + std::shared_ptr& dependent = node_map.at(dependency.get()); if (find(cloned_dependencies.begin(), cloned_dependencies.end(), dependent) == cloned_dependencies.end()) { cloned_dependencies.push_back(dependent); @@ -463,18 +451,18 @@ std::vector> ngraph::clone_nodes(const std::vector // create and return vector of cloned nodes // order matches input vector (not necessarily topological) - std::vector> cloned_nodes; + std::vector> cloned_nodes; for (const auto& node : nodes) { cloned_nodes.push_back(node_map.at(node.get())); } return cloned_nodes; } -std::list> ngraph::clone_nodes(const std::vector>& nodes, - RawNodeOutputMap& output_map) { +std::list> clone_nodes(const std::vector>& nodes, + RawNodeOutputMap& output_map) { // for each node in topological order auto sorted_nodes = topological_sort(nodes); - std::list> cloned_nodes; + std::list> cloned_nodes; for (const auto& node : sorted_nodes) { auto node_outputs = node->outputs(); for (const auto& value : node_outputs) { @@ -488,7 +476,7 @@ std::list> ngraph::clone_nodes(const std::vectorget_control_dependencies()) { for (const auto& dependency_value : dependency->outputs()) { - shared_ptr dependent = output_map.at(dependency_value).get_node_shared_ptr(); + std::shared_ptr dependent = output_map.at(dependency_value).get_node_shared_ptr(); if (find(cloned_dependencies.begin(), cloned_dependencies.end(), dependent) == cloned_dependencies.end()) { cloned_dependencies.push_back(dependent); @@ -514,8 +502,8 @@ std::list> ngraph::clone_nodes(const std::vector& reduce_constant) { - if (auto rc = ov::as_type_ptr(reduce_constant.get_node_shared_ptr())) { +bool is_equal_to_const_value(const std::string& const_value, const Output& reduce_constant) { + if (auto rc = ov::as_type_ptr(reduce_constant.get_node_shared_ptr())) { return (rc->get_all_data_elements_bitwise_identical() && rc->convert_value_to_string(0) == const_value); } else { return false; @@ -535,28 +523,28 @@ bool ngraph::is_equal_to_const_value(const std::string& const_value, const Outpu // | +------[2]------> | | | +------[6]------> | | +------[10]-----> | // | <------[3]------+ | | | <------[7]------+ | | <------[11]-----+ | // +-----+ +-----+ | +-----+ +-----+ +-----+ +-----+ -pair, shared_ptr> ngraph::insert_result_parameter_split( - const shared_ptr& src_node, - const shared_ptr& dst_node) { +std::pair, std::shared_ptr> insert_result_parameter_split( + const std::shared_ptr& src_node, + const std::shared_ptr& dst_node) { if (src_node->get_output_size() != 1) { OPENVINO_THROW("Multiple output per op not supported in graph partition yet."); } // Make parameter node - shared_ptr par_node = - make_shared(src_node->get_output_element_type(0), src_node->get_output_shape(0)); + std::shared_ptr par_node = + std::make_shared(src_node->get_output_element_type(0), src_node->get_output_shape(0)); // Fix input / output among src, dst and par std::vector> dst_inputs = get_inputs_from(*src_node, *dst_node); - NGRAPH_CHECK(dst_inputs.size() == 1, - "insert_result_parameter_split encountered more than " - "one input between the source and destination nodes"); + OPENVINO_ASSERT(dst_inputs.size() == 1, + "insert_result_parameter_split encountered more than " + "one input between the source and destination nodes"); auto& dst_input = dst_inputs[0]; std::vector> src_outputs = get_outputs_to(*src_node, *dst_node); - NGRAPH_CHECK(src_outputs.size() == 1, - "insert_result_parameter_split encountered more than " - "one output between the source and destination nodes"); + OPENVINO_ASSERT(src_outputs.size() == 1, + "insert_result_parameter_split encountered more than " + "one output between the source and destination nodes"); auto& src_output = src_outputs[0]; // Remove [0] @@ -567,7 +555,7 @@ pair, shared_ptr> ngraph:: // Add res node // Add [4], [5], [6], [7] - shared_ptr res_node = make_shared(src_node); + std::shared_ptr res_node = std::make_shared(src_node); return make_pair(res_node, par_node); } @@ -612,58 +600,59 @@ pair, shared_ptr> ngraph:: // Typically new_node is connected to src_node already. The reason we don't create `new_node` // inside the function and return it (similar to ngraph::insert_result_parameter_split) is that // we'll have to templatize its function to call new_node's constructor. -void ngraph::insert_new_node_between(const shared_ptr& src_node, - const shared_ptr& dst_node, - const shared_ptr& new_node) { +void insert_new_node_between(const std::shared_ptr& src_node, + const std::shared_ptr& dst_node, + const std::shared_ptr& new_node) { // Fix input / output std::vector> dst_inputs = get_inputs_from(*src_node, *dst_node); - NGRAPH_CHECK(dst_inputs.size() == 1, - "insert_new_node_between encountered more than one " - "input between the source and destination nodes"); + OPENVINO_ASSERT(dst_inputs.size() == 1, + "insert_new_node_between encountered more than one " + "input between the source and destination nodes"); auto& dst_input = dst_inputs[0]; std::vector> src_outputs = get_outputs_to(*src_node, *dst_node); - NGRAPH_CHECK(src_outputs.size() == 1, - "insert_new_node_between encountered more than one " - "output between the source and destination nodes"); + OPENVINO_ASSERT(src_outputs.size() == 1, + "insert_new_node_between encountered more than one " + "output between the source and destination nodes"); auto& src_output = src_outputs[0]; src_output.remove_target_input(dst_input); // Remove [0] dst_input.replace_source_output(new_node->output(0)); // Remove [0] (again), add [8], remove [1], add [9] } -std::shared_ptr ngraph::make_zero(const element::Type& element_type, const Shape& shape) { - auto zero = op::Constant::create(element_type, Shape{}, {0.0}); +std::shared_ptr make_zero(const element::Type& element_type, const Shape& shape) { + auto zero = ov::op::v0::Constant::create(element_type, Shape{}, {0.0}); if (shape.size() > 0) { - return std::make_shared(zero, - op::Constant::create(element::u64, Shape{shape.size()}, shape)); + return std::make_shared( + zero, + op::v0::Constant::create(element::u64, Shape{shape.size()}, shape)); } return zero; } -std::shared_ptr ngraph::make_constant_from_string(std::string val, - const element::Type& element_type, - const Shape& shape) { +std::shared_ptr make_constant_from_string(std::string val, + const element::Type& element_type, + const Shape& shape) { auto cvals = std::vector(shape_size(shape), val); - return std::make_shared(element_type, shape, cvals); + return std::make_shared(element_type, shape, cvals); } -bool ngraph::is_zero(const Output& reduce_constant) { +bool is_zero(const Output& reduce_constant) { auto result_bool = is_equal_to_const_value("0", reduce_constant); return result_bool; } -bool ngraph::is_one(const Output& reduce_constant) { +bool is_one(const Output& reduce_constant) { auto result_bool = is_equal_to_const_value("1", reduce_constant); return result_bool; } -ngraph::NodeVector ngraph::get_subgraph_outputs(const NodeVector& nodes, - const NodeVector& exclusions, - bool ignore_unused, - bool ignore_output_duplicates) { - std::set> exclusions_set(exclusions.begin(), exclusions.end()); - std::set> nodes_set(nodes.begin(), nodes.end()); +ov::NodeVector get_subgraph_outputs(const NodeVector& nodes, + const NodeVector& exclusions, + bool ignore_unused, + bool ignore_output_duplicates) { + std::set> exclusions_set(exclusions.begin(), exclusions.end()); + std::set> nodes_set(nodes.begin(), nodes.end()); NodeVector outputs; @@ -684,7 +673,7 @@ ngraph::NodeVector ngraph::get_subgraph_outputs(const NodeVector& nodes, return outputs; } -ngraph::NodeVector ngraph::extract_subgraph(const NodeVector& results, const NodeVector& args) { +ov::NodeVector extract_subgraph(const NodeVector& results, const NodeVector& args) { NodeVector subgraph; traverse_nodes( results, @@ -695,15 +684,15 @@ ngraph::NodeVector ngraph::extract_subgraph(const NodeVector& results, const Nod return subgraph; } -bool ngraph::is_used(Node* node) { +bool is_used(Node* node) { std::unordered_set instances_seen; std::stack> stack; stack.push(node); while (stack.size() > 0) { - ngraph::Node* n = stack.top(); + ov::Node* n = stack.top(); if (instances_seen.count(n) == 0) { - if (ngraph::op::is_output(n)) { + if (ov::op::util::is_output(n)) { return true; } instances_seen.insert(n); @@ -718,7 +707,7 @@ bool ngraph::is_used(Node* node) { return false; } -size_t ngraph::get_user_count(Node* node) { +size_t get_user_count(Node* node) { size_t count = 0; for (const auto& node_user : node->get_users()) { count += is_used(node_user.get()); @@ -726,13 +715,13 @@ size_t ngraph::get_user_count(Node* node) { return count; } -bool ngraph::is_strided(const Strides& strides) { +bool is_strided(const Strides& strides) { return std::any_of(strides.begin(), strides.end(), [](size_t stride) { return stride != 1; }); } -bool ngraph::is_valid_rank(const std::shared_ptr& node, std::vector valid_ranks) { +bool is_valid_rank(const std::shared_ptr& node, std::vector valid_ranks) { auto node_rank = node->get_shape().size(); for (auto rank : valid_ranks) { if (rank == node_rank) { @@ -742,15 +731,15 @@ bool ngraph::is_valid_rank(const std::shared_ptr& node, std::vector f, - const std::string& filename, - std::function& attributes)> attributes) { - ngraph::pass::Manager pass_manager; - pass_manager.register_pass(filename, attributes); +void plot_graph(std::shared_ptr f, + const std::string& filename, + std::function& attributes)> attributes) { + ov::pass::Manager pass_manager; + pass_manager.register_pass(filename, attributes); pass_manager.run_passes(std::move(f)); } -std::vector> ngraph::get_inputs_from(Node& src, Node& dst) { +std::vector> get_inputs_from(Node& src, Node& dst) { std::vector> result; for (auto& input : dst.inputs()) { @@ -762,7 +751,7 @@ std::vector> ngraph::get_inputs_from(Node& src, Node return result; } -std::vector> ngraph::get_outputs_to(Node& src, Node& dst) { +std::vector> get_outputs_to(Node& src, Node& dst) { std::vector> result; for (auto& output : src.outputs()) { @@ -783,10 +772,10 @@ std::vector> ngraph::get_outputs_to(Node& src, Node return result; } -static bool check_for_cycles_bkwd(const std::shared_ptr& node, - std::deque>& path, - std::unordered_set>& path_set, - ngraph::NodeVector& cycle_nodes) { +static bool check_for_cycles_bkwd(const std::shared_ptr& node, + std::deque>& path, + std::unordered_set>& path_set, + ov::NodeVector& cycle_nodes) { path.push_back(node); path_set.insert(node); for (size_t i = 0; i < node->inputs().size(); i++) { @@ -808,10 +797,10 @@ static bool check_for_cycles_bkwd(const std::shared_ptr& node, return false; } -static bool check_for_cycles_fwd(const std::shared_ptr& node, - std::deque>& path, - std::unordered_set>& path_set, - ngraph::NodeVector& cycle_nodes) { +static bool check_for_cycles_fwd(const std::shared_ptr& node, + std::deque>& path, + std::unordered_set>& path_set, + ov::NodeVector& cycle_nodes) { path.push_back(node); path_set.insert(node); for (auto& arg : node->get_users()) { @@ -832,7 +821,7 @@ static bool check_for_cycles_fwd(const std::shared_ptr& node, return false; } -bool ngraph::check_for_cycles(const ngraph::Function* func, ngraph::NodeVector& cycle_nodes, bool& is_bkwd_cycle) { +bool check_for_cycles(const ov::Model* func, ov::NodeVector& cycle_nodes, bool& is_bkwd_cycle) { for (const auto& res : func->get_results()) { std::deque> path; // mirror of path stack for faster cycle check @@ -865,3 +854,5 @@ bool ngraph::check_for_cycles(const ngraph::Function* func, ngraph::NodeVector& // no cycles return false; } + +} // namespace ngraph diff --git a/src/core/src/opsets/opset.cpp b/src/core/src/opsets/opset.cpp index 1a61c91e7a1133..9adcd22a43cc35 100644 --- a/src/core/src/opsets/opset.cpp +++ b/src/core/src/opsets/opset.cpp @@ -5,17 +5,11 @@ #include "ngraph/opsets/opset.hpp" #include "itt.hpp" -#include "ngraph/deprecated.hpp" #include "ngraph/log.hpp" #include "openvino/op/ops.hpp" #include "openvino/opsets/opset.hpp" #include "openvino/util/log.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START -ngraph::OpSet::OpSet(const ov::OpSet& opset) : ov::OpSet(opset) {} - -ngraph::OpSet::OpSet(const ngraph::OpSet& opset) : ov::OpSet(opset) {} - ov::OpSet::OpSet(const std::string& name) : m_name(name) {} ov::OpSet::OpSet(const ov::OpSet& opset) { @@ -51,24 +45,6 @@ ov::Node* ov::OpSet::create_insensitive(const std::string& name) const { return m_factory_registry.create(type_info_it->second); } -const std::map>& ngraph::get_available_opsets() { -#define _NGRAPH_REG_OPSET(OPSET) \ - { #OPSET, ngraph::get_##OPSET } - const static std::map> opset_map = {_NGRAPH_REG_OPSET(opset1), - _NGRAPH_REG_OPSET(opset2), - _NGRAPH_REG_OPSET(opset3), - _NGRAPH_REG_OPSET(opset4), - _NGRAPH_REG_OPSET(opset5), - _NGRAPH_REG_OPSET(opset6), - _NGRAPH_REG_OPSET(opset7), - _NGRAPH_REG_OPSET(opset8), - _NGRAPH_REG_OPSET(opset9), - _NGRAPH_REG_OPSET(opset10), - _NGRAPH_REG_OPSET(opset11)}; -#undef _NGRAPH_REG_OPSET - return opset_map; -} - const std::map>& ov::get_available_opsets() { #define _OPENVINO_REG_OPSET(OPSET) \ { #OPSET, ov::get_##OPSET } @@ -232,57 +208,96 @@ const ov::OpSet& ov::get_opset13() { return opset; } -const ngraph::OpSet& ngraph::get_opset1() { +OPENVINO_SUPPRESS_DEPRECATED_START +namespace ngraph { + +OpSet::OpSet(const ov::OpSet& opset) : ov::OpSet(opset) {} + +OpSet::OpSet(const OpSet& opset) : ov::OpSet(opset) {} + +const std::map>& get_available_opsets() { +#define _REG_OPSET(OPSET) \ + { #OPSET, get_##OPSET } + const static std::map> opset_map = {_REG_OPSET(opset1), + _REG_OPSET(opset2), + _REG_OPSET(opset3), + _REG_OPSET(opset4), + _REG_OPSET(opset5), + _REG_OPSET(opset6), + _REG_OPSET(opset7), + _REG_OPSET(opset8), + _REG_OPSET(opset9), + _REG_OPSET(opset10), + _REG_OPSET(opset11), + _REG_OPSET(opset12), + _REG_OPSET(opset13)}; +#undef _REG_OPSET + return opset_map; +} + +const OpSet& get_opset1() { static OpSet opset(ov::get_opset1()); return opset; } -const ngraph::OpSet& ngraph::get_opset2() { +const OpSet& get_opset2() { static OpSet opset(ov::get_opset2()); return opset; } -const ngraph::OpSet& ngraph::get_opset3() { +const OpSet& get_opset3() { static OpSet opset(ov::get_opset3()); return opset; } -const ngraph::OpSet& ngraph::get_opset4() { +const OpSet& get_opset4() { static OpSet opset(ov::get_opset4()); return opset; } -const ngraph::OpSet& ngraph::get_opset5() { +const OpSet& get_opset5() { static OpSet opset(ov::get_opset5()); return opset; } -const ngraph::OpSet& ngraph::get_opset6() { +const OpSet& get_opset6() { static OpSet opset(ov::get_opset6()); return opset; } -const ngraph::OpSet& ngraph::get_opset7() { +const OpSet& get_opset7() { static OpSet opset(ov::get_opset7()); return opset; } -const ngraph::OpSet& ngraph::get_opset8() { +const OpSet& get_opset8() { static OpSet opset(ov::get_opset8()); return opset; } -const ngraph::OpSet& ngraph::get_opset9() { +const OpSet& get_opset9() { static OpSet opset(ov::get_opset9()); return opset; } -const ngraph::OpSet& ngraph::get_opset10() { +const OpSet& get_opset10() { static OpSet opset(ov::get_opset10()); return opset; } -const ngraph::OpSet& ngraph::get_opset11() { +const OpSet& get_opset11() { static OpSet opset(ov::get_opset11()); return opset; } + +const OpSet& get_opset12() { + static OpSet opset(ov::get_opset12()); + return opset; +} + +const OpSet& get_opset13() { + static OpSet opset(ov::get_opset13()); + return opset; +} + +} // namespace ngraph diff --git a/src/core/src/runtime/aligned_buffer.cpp b/src/core/src/runtime/aligned_buffer.cpp index 95ab3f44306eb8..d7c5229fcc0efa 100644 --- a/src/core/src/runtime/aligned_buffer.cpp +++ b/src/core/src/runtime/aligned_buffer.cpp @@ -8,17 +8,18 @@ #include #include "ngraph/util.hpp" +#include "openvino/util/log.hpp" -using namespace ngraph; -using namespace std; NGRAPH_SUPPRESS_DEPRECATED_START +namespace ngraph { + runtime::AlignedBuffer::AlignedBuffer() : m_allocated_buffer(nullptr), m_aligned_buffer(nullptr), m_byte_size(0) {} runtime::AlignedBuffer::AlignedBuffer(size_t byte_size, size_t alignment) : m_byte_size(byte_size) { m_byte_size = std::max(1, byte_size); size_t allocation_size = m_byte_size + alignment; - m_allocated_buffer = static_cast(ngraph_malloc(allocation_size)); + m_allocated_buffer = new char[allocation_size]; m_aligned_buffer = m_allocated_buffer; size_t mod = (alignment != 0) ? size_t(m_aligned_buffer) % alignment : 0; @@ -38,14 +39,14 @@ runtime::AlignedBuffer::AlignedBuffer(AlignedBuffer&& other) runtime::AlignedBuffer::~AlignedBuffer() { if (m_allocated_buffer != nullptr) { - free(m_allocated_buffer); + delete[] m_allocated_buffer; } } runtime::AlignedBuffer& runtime::AlignedBuffer::operator=(AlignedBuffer&& other) { if (this != &other) { if (m_allocated_buffer != nullptr) { - free(m_allocated_buffer); + delete[] m_allocated_buffer; } m_allocated_buffer = other.m_allocated_buffer; m_aligned_buffer = other.m_aligned_buffer; @@ -56,9 +57,10 @@ runtime::AlignedBuffer& runtime::AlignedBuffer::operator=(AlignedBuffer&& other) } return *this; } +} // namespace ngraph namespace ov { -AttributeAdapter>::AttributeAdapter( - shared_ptr& value) - : DirectValueAccessor>(value) {} +AttributeAdapter>::AttributeAdapter( + std::shared_ptr& value) + : DirectValueAccessor>(value) {} } // namespace ov diff --git a/src/core/src/runtime/tensor.cpp b/src/core/src/runtime/tensor.cpp index 7f0c51fa45fea0..f7f587d1a95e9d 100644 --- a/src/core/src/runtime/tensor.cpp +++ b/src/core/src/runtime/tensor.cpp @@ -4,10 +4,6 @@ #include "ngraph/runtime/tensor.hpp" -#include "ngraph/log.hpp" -#include "ngraph/runtime/aligned_buffer.hpp" -#include "ngraph/type/element_type.hpp" - using namespace ngraph; using namespace std; diff --git a/src/core/src/shape_util.cpp b/src/core/src/shape_util.cpp index 9ce8512d7a7797..72c72c39b68d4f 100644 --- a/src/core/src/shape_util.cpp +++ b/src/core/src/shape_util.cpp @@ -9,10 +9,9 @@ #include "openvino/core/partial_shape.hpp" #include "openvino/core/shape_util.hpp" -using namespace ngraph; - +namespace ngraph { template <> -PartialShape ngraph::project(const PartialShape& shape, const AxisSet& axes) { +PartialShape project(const PartialShape& shape, const AxisSet& axes) { if (shape.rank().is_dynamic()) { return shape; } else { @@ -29,7 +28,7 @@ PartialShape ngraph::project(const PartialShape& shape, const AxisSet& axes) { } template <> -PartialShape ngraph::reduce(const PartialShape& shape, const AxisSet& deleted_axes, bool keep_dims) { +PartialShape reduce(const PartialShape& shape, const AxisSet& deleted_axes, bool keep_dims) { if (shape.rank().is_dynamic()) { return shape; } else { @@ -49,8 +48,8 @@ PartialShape ngraph::reduce(const PartialShape& shape, const AxisSet& deleted_ax } template <> -PartialShape ngraph::inject_pairs(const PartialShape& shape, - std::vector> new_axis_pos_value_pairs) { +PartialShape inject_pairs(const PartialShape& shape, + std::vector> new_axis_pos_value_pairs) { if (shape.rank().is_dynamic()) { return shape; } else { @@ -76,6 +75,7 @@ PartialShape ngraph::inject_pairs(const PartialShape& shape, return PartialShape{result_dims}; } } +} // namespace ngraph namespace ov { template diff --git a/src/core/src/util.cpp b/src/core/src/util.cpp index 3c12bd5cb10881..49ae1575101e7b 100644 --- a/src/core/src/util.cpp +++ b/src/core/src/util.cpp @@ -13,21 +13,13 @@ #include #include -#include "ngraph/coordinate_diff.hpp" -#include "ngraph/function.hpp" -#include "ngraph/graph_util.hpp" -#include "ngraph/log.hpp" -#include "ngraph/node.hpp" -#include "ngraph/op/result.hpp" -#include "ngraph/partial_shape.hpp" -#include "ngraph/shape.hpp" #include "openvino/util/common_util.hpp" #include "openvino/util/log.hpp" -NGRAPH_SUPPRESS_DEPRECATED_START -using namespace std; +OPENVINO_SUPPRESS_DEPRECATED_START -void ngraph::dump(ostream& out, const void* _data, size_t _size) { +namespace ngraph { +void dump(std::ostream& out, const void* _data, size_t _size) { auto flags = out.flags(); const uint8_t* data = reinterpret_cast(_data); size_t len = _size; @@ -61,27 +53,27 @@ void ngraph::dump(ostream& out, const void* _data, size_t _size) { out.flags(flags); } -std::string ngraph::to_lower(const std::string& s) { +std::string to_lower(const std::string& s) { return ov::util::to_lower(s); } -std::string ngraph::to_upper(const std::string& s) { +std::string to_upper(const std::string& s) { return ov::util::to_upper(s); } -string ngraph::trim(const string& s) { +std::string trim(const std::string& s) { return ov::util::trim(s); } -vector ngraph::split(const string& src, char delimiter, bool do_trim) { +std::vector split(const std::string& src, char delimiter, bool do_trim) { return ov::util::split(src, delimiter, do_trim); } -size_t ngraph::hash_combine(const std::vector& list) { +size_t hash_combine(const std::vector& list) { return ov::util::hash_combine(list); } -void* ngraph::ngraph_malloc(size_t size) { +void* ngraph_malloc(size_t size) { auto ptr = malloc(size); if (size != 0 && !ptr) { OPENVINO_ERR << "malloc failed to allocate memory of size " << size; @@ -90,13 +82,13 @@ void* ngraph::ngraph_malloc(size_t size) { return ptr; } -void ngraph::ngraph_free(void* ptr) { +void ngraph_free(void* ptr) { if (ptr) { free(ptr); } } -size_t ngraph::round_up(size_t size, size_t alignment) { +size_t round_up(size_t size, size_t alignment) { if (alignment == 0) { return size; } @@ -109,27 +101,27 @@ size_t ngraph::round_up(size_t size, size_t alignment) { return size + alignment - remainder; } -size_t ngraph::stopwatch::get_call_count() const { +size_t stopwatch::get_call_count() const { return m_total_count; } -size_t ngraph::stopwatch::get_seconds() const { - return chrono::duration_cast(get_timer_value()).count(); +size_t stopwatch::get_seconds() const { + return std::chrono::duration_cast(get_timer_value()).count(); } -size_t ngraph::stopwatch::get_milliseconds() const { - return chrono::duration_cast(get_timer_value()).count(); +size_t stopwatch::get_milliseconds() const { + return std::chrono::duration_cast(get_timer_value()).count(); } -size_t ngraph::stopwatch::get_microseconds() const { - return chrono::duration_cast(get_timer_value()).count(); +size_t stopwatch::get_microseconds() const { + return std::chrono::duration_cast(get_timer_value()).count(); } -size_t ngraph::stopwatch::get_nanoseconds() const { +size_t stopwatch::get_nanoseconds() const { return get_timer_value().count(); } -chrono::nanoseconds ngraph::stopwatch::get_timer_value() const { +std::chrono::nanoseconds stopwatch::get_timer_value() const { if (m_active) { return (m_clock.now() - m_start_time); } else { @@ -137,23 +129,22 @@ chrono::nanoseconds ngraph::stopwatch::get_timer_value() const { } } -size_t ngraph::stopwatch::get_total_seconds() const { - return chrono::duration_cast(m_total_time).count(); +size_t stopwatch::get_total_seconds() const { + return std::chrono::duration_cast(m_total_time).count(); } -size_t ngraph::stopwatch::get_total_milliseconds() const { - return chrono::duration_cast(m_total_time).count(); +size_t stopwatch::get_total_milliseconds() const { + return std::chrono::duration_cast(m_total_time).count(); } -size_t ngraph::stopwatch::get_total_microseconds() const { - return chrono::duration_cast(m_total_time).count(); +size_t stopwatch::get_total_microseconds() const { + return std::chrono::duration_cast(m_total_time).count(); } -size_t ngraph::stopwatch::get_total_nanoseconds() const { +size_t stopwatch::get_total_nanoseconds() const { return m_total_time.count(); } -namespace ngraph { template <> float parse_string(const std::string& s) { const char* tmp = s.c_str(); @@ -201,40 +192,30 @@ uint8_t parse_string(const std::string& s) { return result; } -} // namespace ngraph -std::ostream& operator<<(std::ostream& os, const ngraph::NodeVector& nv) { - std::vector names; - for (auto n : nv) { - names.push_back(n->get_name()); - } - os << ngraph::vector_to_string(names); - return os; -} - -ngraph::AxisVector ngraph::get_default_order(const Shape& shape) { +AxisVector get_default_order(const Shape& shape) { return get_default_order(shape.size()); } -ngraph::AxisVector ngraph::get_default_order(const PartialShape& shape) { +AxisVector get_default_order(const PartialShape& shape) { return get_default_order(shape.rank()); } -ngraph::AxisVector ngraph::get_default_order(size_t rank) { +AxisVector get_default_order(size_t rank) { AxisVector default_order(rank); std::iota(begin(default_order), end(default_order), 0); return default_order; } -ngraph::AxisVector ngraph::get_default_order(const Rank& rank) { - NGRAPH_CHECK(rank.is_static(), "Can not calculate default order for dynamic rank"); +AxisVector get_default_order(const Rank& rank) { + OPENVINO_ASSERT(rank.is_static(), "Can not calculate default order for dynamic rank"); AxisVector default_order(rank.get_length()); std::iota(begin(default_order), end(default_order), 0); return default_order; } -void ngraph::parse_version_string(std::string version, size_t& major, size_t& minor, size_t& patch, string& extra) { +void parse_version_string(std::string version, size_t& major, size_t& minor, size_t& patch, std::string& extra) { // Since regex is broken in gcc 4.8 I will just manually parse the version string // Version strings look like `0.25.0-rc.0+7c32240` or `v0.25.0-rc.0+7c32240` size_t start; @@ -242,18 +223,18 @@ void ngraph::parse_version_string(std::string version, size_t& major, size_t& mi extra = ""; start = (version[0] == 'v' ? 1 : 0); end = version.find_first_of('.', start); - string major_str = version.substr(start, end - start); + std::string major_str = version.substr(start, end - start); start = end + 1; end = version.find_first_of('.', start); - string minor_str = version.substr(start, end - start); + std::string minor_str = version.substr(start, end - start); start = end + 1; end = version.find_first_of("-+", start); - string patch_str = version.substr(start, end - start); + std::string patch_str = version.substr(start, end - start); start = end; - if (start != string::npos) { + if (start != std::string::npos) { extra = version.substr(start); } @@ -279,73 +260,74 @@ void ngraph::parse_version_string(std::string version, size_t& major, size_t& mi OPENVINO_THROW("Error parsing version string '", version, "'"); } } +} // namespace ngraph -vector read_float_vector(shared_ptr tv) { - vector float_vec; - ngraph::element::Type element_type = tv->get_element_type(); +std::vector read_float_vector(std::shared_ptr tv) { + std::vector float_vec; + ov::element::Type element_type = tv->get_element_type(); - if (element_type == ngraph::element::boolean) { - vector vec = read_vector(tv); + if (element_type == ov::element::boolean) { + std::vector vec = read_vector(tv); // Changed from vector ctor to explicit for loop to add static_cast // This silences MSVC warnings for (char value : vec) { float_vec.push_back(static_cast(value)); } - } else if (element_type == ngraph::element::bf16) { - vector vec = read_vector(tv); - float_vec = ngraph::bfloat16::to_float_vector(vec); - } else if (element_type == ngraph::element::f16) { - vector vec = read_vector(tv); - for (ngraph::float16 value : vec) { + } else if (element_type == ov::element::bf16) { + std::vector vec = read_vector(tv); + float_vec = ov::bfloat16::to_float_vector(vec); + } else if (element_type == ov::element::f16) { + std::vector vec = read_vector(tv); + for (ov::float16 value : vec) { float_vec.push_back(static_cast(value)); } - } else if (element_type == ngraph::element::f32) { - vector vec = read_vector(tv); + } else if (element_type == ov::element::f32) { + std::vector vec = read_vector(tv); for (float value : vec) { float_vec.push_back(static_cast(value)); } - } else if (element_type == ngraph::element::f64) { - vector vec = read_vector(tv); + } else if (element_type == ov::element::f64) { + std::vector vec = read_vector(tv); for (double value : vec) { float_vec.push_back(static_cast(value)); } - } else if (element_type == ngraph::element::i8) { - vector vec = read_vector(tv); + } else if (element_type == ov::element::i8) { + std::vector vec = read_vector(tv); for (int8_t value : vec) { float_vec.push_back(static_cast(value)); } - } else if (element_type == ngraph::element::i16) { - vector vec = read_vector(tv); + } else if (element_type == ov::element::i16) { + std::vector vec = read_vector(tv); for (int16_t value : vec) { float_vec.push_back(static_cast(value)); } - } else if (element_type == ngraph::element::i32) { - vector vec = read_vector(tv); + } else if (element_type == ov::element::i32) { + std::vector vec = read_vector(tv); for (int32_t value : vec) { float_vec.push_back(static_cast(value)); } - } else if (element_type == ngraph::element::i64) { - vector vec = read_vector(tv); + } else if (element_type == ov::element::i64) { + std::vector vec = read_vector(tv); for (int64_t value : vec) { float_vec.push_back(static_cast(value)); } - } else if (element_type == ngraph::element::u8) { - vector vec = read_vector(tv); + } else if (element_type == ov::element::u8) { + std::vector vec = read_vector(tv); for (uint8_t value : vec) { float_vec.push_back(static_cast(value)); } - } else if (element_type == ngraph::element::u16) { - vector vec = read_vector(tv); + } else if (element_type == ov::element::u16) { + std::vector vec = read_vector(tv); for (uint16_t value : vec) { float_vec.push_back(static_cast(value)); } - } else if (element_type == ngraph::element::u32) { - vector vec = read_vector(tv); + } else if (element_type == ov::element::u32) { + std::vector vec = read_vector(tv); for (uint32_t value : vec) { float_vec.push_back(static_cast(value)); } - } else if (element_type == ngraph::element::u64) { - vector vec = read_vector(tv); + } else if (element_type == ov::element::u64) { + std::vector vec = read_vector(tv); for (uint64_t value : vec) { float_vec.push_back(static_cast(value)); } @@ -356,72 +338,72 @@ vector read_float_vector(shared_ptr tv) { return float_vec; } -vector read_index_vector(shared_ptr tv) { - vector index_vec; - ngraph::element::Type element_type = tv->get_element_type(); +std::vector read_index_vector(std::shared_ptr tv) { + std::vector index_vec; + ov::element::Type element_type = tv->get_element_type(); - if (element_type == ngraph::element::boolean) { - vector vec = read_vector(tv); + if (element_type == ov::element::boolean) { + std::vector vec = read_vector(tv); // Changed from vector ctor to explicit for loop to add static_cast // This silences MSVC warnings for (char value : vec) { index_vec.push_back(static_cast(value)); } - } else if (element_type == ngraph::element::bf16) { - vector vec = read_vector(tv); - vector float_vec = ngraph::bfloat16::to_float_vector(vec); + } else if (element_type == ov::element::bf16) { + std::vector vec = read_vector(tv); + std::vector float_vec = ov::bfloat16::to_float_vector(vec); for (float value : float_vec) { index_vec.push_back(static_cast(value)); } - } else if (element_type == ngraph::element::f16) { - vector vec = read_vector(tv); - for (ngraph::float16 value : vec) { + } else if (element_type == ov::element::f16) { + std::vector vec = read_vector(tv); + for (ov::float16 value : vec) { index_vec.push_back(static_cast(static_cast(value))); } - } else if (element_type == ngraph::element::f32) { - vector vec = read_vector(tv); + } else if (element_type == ov::element::f32) { + std::vector vec = read_vector(tv); for (float value : vec) { index_vec.push_back(static_cast(value)); } - } else if (element_type == ngraph::element::f64) { - vector vec = read_vector(tv); + } else if (element_type == ov::element::f64) { + std::vector vec = read_vector(tv); for (double value : vec) { index_vec.push_back(static_cast(value)); } - } else if (element_type == ngraph::element::i8) { - vector vec = read_vector(tv); + } else if (element_type == ov::element::i8) { + std::vector vec = read_vector(tv); for (int8_t value : vec) { index_vec.push_back(static_cast(value)); } - } else if (element_type == ngraph::element::i16) { - vector vec = read_vector(tv); + } else if (element_type == ov::element::i16) { + std::vector vec = read_vector(tv); for (int16_t value : vec) { index_vec.push_back(static_cast(value)); } - } else if (element_type == ngraph::element::i32) { - vector vec = read_vector(tv); + } else if (element_type == ov::element::i32) { + std::vector vec = read_vector(tv); for (int32_t value : vec) { index_vec.push_back(static_cast(value)); } - } else if (element_type == ngraph::element::i64) { + } else if (element_type == ov::element::i64) { index_vec = read_vector(tv); - } else if (element_type == ngraph::element::u8) { - vector vec = read_vector(tv); + } else if (element_type == ov::element::u8) { + std::vector vec = read_vector(tv); for (uint8_t value : vec) { index_vec.push_back(static_cast(value)); } - } else if (element_type == ngraph::element::u16) { - vector vec = read_vector(tv); + } else if (element_type == ov::element::u16) { + std::vector vec = read_vector(tv); for (uint16_t value : vec) { index_vec.push_back(static_cast(value)); } - } else if (element_type == ngraph::element::u32) { - vector vec = read_vector(tv); + } else if (element_type == ov::element::u32) { + std::vector vec = read_vector(tv); for (uint32_t value : vec) { index_vec.push_back(static_cast(value)); } - } else if (element_type == ngraph::element::u64) { - vector vec = read_vector(tv); + } else if (element_type == ov::element::u64) { + std::vector vec = read_vector(tv); for (uint64_t value : vec) { index_vec.push_back(static_cast(value)); } diff --git a/src/core/src/validation_util.cpp b/src/core/src/validation_util.cpp index 3d2f72b8533825..2e1db9dd6864f1 100644 --- a/src/core/src/validation_util.cpp +++ b/src/core/src/validation_util.cpp @@ -5,36 +5,25 @@ #include "ngraph/validation_util.hpp" #include -#include -#include #include #include "bound_evaluate.hpp" #include "compare.hpp" #include "ngraph/evaluator.hpp" -#include "ngraph/op/concat.hpp" -#include "ngraph/op/convert.hpp" -#include "ngraph/op/gather.hpp" -#include "ngraph/op/min.hpp" -#include "ngraph/op/minimum.hpp" -#include "ngraph/op/reshape.hpp" -#include "ngraph/op/shape_of.hpp" -#include "ngraph/op/squeeze.hpp" -#include "ngraph/op/unsqueeze.hpp" -#include "ngraph/shape.hpp" -#include "ngraph/type/element_type_traits.hpp" -#include "ngraph/util.hpp" #include "openvino/core/dimension_tracker.hpp" +#include "openvino/op/concat.hpp" +#include "openvino/op/gather.hpp" #include "openvino/op/ops.hpp" #include "sequnce_generator.hpp" #include "validation_util.hpp" -NGRAPH_SUPPRESS_DEPRECATED_START -using namespace std; +OPENVINO_SUPPRESS_DEPRECATED_START -ngraph::Strides ngraph::conv_default_strides(const Node* /* node */, - const PartialShape& data_batch_shape, - const PartialShape& filters_shape) { +namespace ngraph { + +Strides conv_default_strides(const Node* /* node */, + const PartialShape& data_batch_shape, + const PartialShape& filters_shape) { size_t rank; if (data_batch_shape.rank().is_static() && data_batch_shape.rank().get_length() >= 2) { @@ -48,9 +37,9 @@ ngraph::Strides ngraph::conv_default_strides(const Node* /* node */, return Strides(rank, 1); } -ngraph::CoordinateDiff ngraph::conv_default_padding(const Node* /* node */, - const PartialShape& data_batch_shape, - const PartialShape& filters_shape) { +CoordinateDiff conv_default_padding(const Node* /* node */, + const PartialShape& data_batch_shape, + const PartialShape& filters_shape) { size_t rank; if (data_batch_shape.rank().is_static() && data_batch_shape.rank().get_length() >= 2) { @@ -71,16 +60,16 @@ ngraph::CoordinateDiff ngraph::conv_default_padding(const Node* /* node */, // TODO(amprocte): The messages here would be a bit friendlier if we didn't say "after // padding/after dilation" for cases where there is actually no padding/dilation. // -ngraph::PartialShape ngraph::infer_windowed_reduction_output_shape(const Node* node, - const PartialShape& data_shape, - const Strides& data_dilation, - const CoordinateDiff& data_padding_below, - const CoordinateDiff& data_padding_above, - const PartialShape& window_shape, - const Strides& window_strides, - const Strides& window_dilation, - bool is_window_all_in_padding_allowed, - bool ceil_mode) { +PartialShape infer_windowed_reduction_output_shape(const Node* node, + const PartialShape& data_shape, + const Strides& data_dilation, + const CoordinateDiff& data_padding_below, + const CoordinateDiff& data_padding_above, + const PartialShape& window_shape, + const Strides& window_strides, + const Strides& window_dilation, + bool is_window_all_in_padding_allowed, + bool ceil_mode) { PartialShape data_shape_merged{PartialShape::dynamic()}; NODE_VALIDATION_CHECK( @@ -203,13 +192,13 @@ ngraph::PartialShape ngraph::infer_windowed_reduction_output_shape(const Node* n return output_shape; } -void ngraph::validate_conv_params_spatial_dimensions(const Node* node, - const size_t num_spatial_dims, - const op::PadType auto_pad, - Strides& strides, - Strides& dilations, - CoordinateDiff& pads_begin, - CoordinateDiff& pads_end) { +void validate_conv_params_spatial_dimensions(const Node* node, + const size_t num_spatial_dims, + const op::PadType auto_pad, + Strides& strides, + Strides& dilations, + CoordinateDiff& pads_begin, + CoordinateDiff& pads_end) { if (strides.size() == 0) { strides = Strides(num_spatial_dims, 1); } @@ -233,15 +222,15 @@ void ngraph::validate_conv_params_spatial_dimensions(const Node* node, "Pads should be defined for all and only spatial features."); } -ngraph::PartialShape ngraph::validate_and_infer_convolution_forward_output_shape(const Node* node, - const Rank& result_ps_rank, - const PartialShape& data_batch_pshape, - const PartialShape& filters_pshape, - const op::PadType auto_pad, - Strides& strides, - Strides& dilations, - CoordinateDiff& pads_begin, - CoordinateDiff& pads_end) { +PartialShape validate_and_infer_convolution_forward_output_shape(const Node* node, + const Rank& result_ps_rank, + const PartialShape& data_batch_pshape, + const PartialShape& filters_pshape, + const op::PadType auto_pad, + Strides& strides, + Strides& dilations, + CoordinateDiff& pads_begin, + CoordinateDiff& pads_end) { PartialShape result_shape = PartialShape::dynamic(); if (result_ps_rank.is_static()) { const auto num_spatial_dims = result_ps_rank.get_length() - 2; @@ -267,7 +256,7 @@ ngraph::PartialShape ngraph::validate_and_infer_convolution_forward_output_shape pads_end.clear(); const PartialShape filter_spatial_shape = [filters_pshape]() { - vector filter_dims{filters_pshape}; + std::vector filter_dims{filters_pshape}; filter_dims.erase(filter_dims.begin(), filter_dims.begin() + 2); // Remove {C_OUT, C_IN} return PartialShape{filter_dims}; @@ -299,142 +288,18 @@ ngraph::PartialShape ngraph::validate_and_infer_convolution_forward_output_shape return result_shape; } -// -// Infers the output batch shape and element type for convolution fprop. -// -ngraph::PartialShape ov::infer_convolution_forward(const Node* node, - const PartialShape& data_batch_shape, - const Strides& data_dilation, - const CoordinateDiff& data_padding_below, - const CoordinateDiff& data_padding_above, - const PartialShape& filters_shape, - const Strides& filter_strides, - const Strides& filter_dilation) { - Rank data_batch_filters_rank{Rank::dynamic()}; - - NODE_VALIDATION_CHECK(node, - Rank::merge(data_batch_filters_rank, data_batch_shape.rank(), filters_shape.rank()), - "Data batch and filters rank do not match (data batch shape: ", - data_batch_shape, - ", filters shape: ", - filters_shape, - ")."); - - NODE_VALIDATION_CHECK(node, - data_batch_filters_rank.is_dynamic() || data_batch_filters_rank.get_length() >= 3, - "Data batch and filters must have rank of at least 3 (one batch axis, ", - "one input-channel axis, and at least one spatial dimension) ", - "(data batch shape: ", - data_batch_shape, - ", filters shape: ", - filters_shape, - ")."); - - Rank spatial_rank{Rank::dynamic()}; - NODE_VALIDATION_CHECK(node, - Rank::merge(spatial_rank, spatial_rank, data_batch_filters_rank - 2) && - Rank::merge(spatial_rank, spatial_rank, data_dilation.size()) && - Rank::merge(spatial_rank, spatial_rank, data_padding_below.size()) && - Rank::merge(spatial_rank, spatial_rank, data_padding_above.size()) && - Rank::merge(spatial_rank, spatial_rank, filter_strides.size()) && - Rank::merge(spatial_rank, spatial_rank, filter_dilation.size()), - "Ranks for data item shape/filters shape (data batch has shape ", - data_batch_shape, - ", so data item rank is ", - (data_batch_shape.rank() - 2), - " and filters have shape ", - filters_shape, - ", so filters spatial rank is ", - (filters_shape.rank() - 2), - "), data dilation (", - data_dilation, - "), padding below (", - data_padding_below, - "), padding above (", - data_padding_above, - "), filter strides (", - filter_strides, - "), and filter dilation (", - filter_dilation, - ") do not match."); - - Dimension batch_size = (data_batch_shape.rank().is_static() ? data_batch_shape[0] : Dimension::dynamic()); - Dimension data_channel_count = (data_batch_shape.rank().is_static() ? data_batch_shape[1] : Dimension::dynamic()); - PartialShape data_spatial_shape(PartialShape::dynamic(spatial_rank)); - - Dimension filter_output_channel_count = - (filters_shape.rank().is_static() ? filters_shape[0] : Dimension::dynamic()); - Dimension filter_input_channel_count = (filters_shape.rank().is_static() ? filters_shape[1] : Dimension::dynamic()); - PartialShape filter_spatial_shape(PartialShape::dynamic(spatial_rank)); - - // - // Note: spatial_rank is definitely static at this point. - // - - for (int64_t i = 0; i < spatial_rank.get_length(); i++) { - if (data_batch_shape.rank().is_static()) { - data_spatial_shape[i] = data_batch_shape[i + 2]; - } - - if (filters_shape.rank().is_static()) { - filter_spatial_shape[i] = filters_shape[i + 2]; - } - } - - NODE_VALIDATION_CHECK(node, batch_size.is_dynamic() || batch_size.get_length() > 0, "Batch size is zero."); - - Dimension merged_channel_count; - - NODE_VALIDATION_CHECK(node, - Dimension::merge(merged_channel_count, data_channel_count, filter_input_channel_count), - "Data batch channel count (", - data_channel_count, - ") does not match filter input ", - "channel count (", - filter_input_channel_count, - ")."); - - NODE_VALIDATION_CHECK(node, - merged_channel_count.is_dynamic() || merged_channel_count.get_length() > 0, - "Data batch channel count and/or filter input channel count is zero."); - - NODE_VALIDATION_CHECK(node, - filter_output_channel_count.is_dynamic() || filter_output_channel_count.get_length() > 0, - "Filter output channel count is zero."); - - PartialShape data_output_shape = ngraph::infer_windowed_reduction_output_shape(node, - data_spatial_shape, - data_dilation, - data_padding_below, - data_padding_above, - filter_spatial_shape, - filter_strides, - filter_dilation, - true); - - PartialShape batch_output_shape(PartialShape::dynamic(spatial_rank + 2)); - batch_output_shape[0] = batch_size; - batch_output_shape[1] = filter_output_channel_count; - - for (int64_t i = 0; i < spatial_rank.get_length(); i++) { - batch_output_shape[i + 2] = data_output_shape[i]; - } - - return batch_output_shape; -} - // // Infers the output batch shape and element type for batched pooling fprop. // -ngraph::PartialShape ngraph::infer_batched_pooling_forward(const Node* node, - const PartialShape& data_batch_shape, - const CoordinateDiff& data_padding_below, - const CoordinateDiff& data_padding_above, - const PartialShape& window_shape, - const Strides& window_strides, - bool is_window_all_in_padding_allowed, - bool ceil_mode, - const Strides& window_dilation) { +PartialShape infer_batched_pooling_forward(const Node* node, + const PartialShape& data_batch_shape, + const CoordinateDiff& data_padding_below, + const CoordinateDiff& data_padding_above, + const PartialShape& window_shape, + const Strides& window_strides, + bool is_window_all_in_padding_allowed, + bool ceil_mode, + const Strides& window_dilation) { NODE_VALIDATION_CHECK(node, data_batch_shape.rank().is_dynamic() || (data_batch_shape.rank().get_length() >= 3 && data_batch_shape.rank().get_length() <= 5), @@ -517,15 +382,15 @@ ngraph::PartialShape ngraph::infer_batched_pooling_forward(const Node* node, } struct ChannelShapedInputSpec { - ngraph::element::Type m_element_type; - ngraph::PartialShape m_shape; + element::Type m_element_type; + PartialShape m_shape; std::string m_input_name; }; -static std::tuple infer_batch_norm_forward_helper( - const ngraph::Node* node, - ngraph::element::Type input_element_type, - const ngraph::PartialShape& input_shape, +static std::tuple infer_batch_norm_forward_helper( + const Node* node, + element::Type input_element_type, + const PartialShape& input_shape, const std::vector& channel_shaped_inputs) { // Built up a slash-separated string naming all the channel-shaped inputs, for use in error // messages. @@ -541,11 +406,11 @@ static std::tuple= 2, @@ -570,11 +435,11 @@ static std::tuple ngraph::infer_batch_norm_forward( - const Node* node, - element::Type input_element_type, - element::Type gamma_element_type, - element::Type beta_element_type, - element::Type mean_element_type, - element::Type variance_element_type, - const PartialShape& input_shape, - const PartialShape& gamma_shape, - const PartialShape& beta_shape, - const PartialShape& mean_shape, - const PartialShape& variance_shape) { +std::tuple infer_batch_norm_forward(const Node* node, + element::Type input_element_type, + element::Type gamma_element_type, + element::Type beta_element_type, + element::Type mean_element_type, + element::Type variance_element_type, + const PartialShape& input_shape, + const PartialShape& gamma_shape, + const PartialShape& beta_shape, + const PartialShape& mean_shape, + const PartialShape& variance_shape) { return infer_batch_norm_forward_helper(node, input_element_type, input_shape, @@ -634,14 +498,13 @@ std::tuple ng {variance_element_type, variance_shape, "variance"}}); } -std::tuple ngraph::infer_batch_norm_forward( - const Node* node, - element::Type input_element_type, - element::Type gamma_element_type, - element::Type beta_element_type, - const PartialShape& input_shape, - const PartialShape& gamma_shape, - const PartialShape& beta_shape) { +std::tuple infer_batch_norm_forward(const Node* node, + element::Type input_element_type, + element::Type gamma_element_type, + element::Type beta_element_type, + const PartialShape& input_shape, + const PartialShape& gamma_shape, + const PartialShape& beta_shape) { return infer_batch_norm_forward_helper( node, input_element_type, @@ -649,64 +512,13 @@ std::tuple ng {{gamma_element_type, gamma_shape, "gamma"}, {beta_element_type, beta_shape, "beta"}}); } -void ov::infer_auto_padding(const Shape& image_shape, +bool try_apply_auto_padding(const PartialShape& image_shape, const Shape& filter_shape, const Strides& filter_strides, const Strides& filter_dilations, const op::PadType pad_type, CoordinateDiff& padding_above, CoordinateDiff& padding_below) { - const auto image_dims = std::vector(std::begin(image_shape), std::end(image_shape)); - // because image_shape is fully known result of try_apply_infer_auto_padding is ignored - ov::util::try_apply_auto_padding(image_dims, - filter_shape, - filter_strides, - filter_dilations, - pad_type, - padding_above, - padding_below); -} - -bool ov::util::try_apply_auto_padding(const PartialShape& image_shape, - const Shape& filter_shape, - const Strides& filter_strides, - const Strides& filter_dilations, - const op::PadType pad_type, - CoordinateDiff& padding_above, - CoordinateDiff& padding_below) { - OPENVINO_ASSERT(pad_type == op::PadType::SAME_UPPER || pad_type == op::PadType::SAME_LOWER); - - if (image_shape.rank().is_dynamic()) { - return false; - } - const auto image_dims = static_cast>(image_shape); - for (size_t i = 0; i < static_cast(filter_shape.size()); i++) { - if (image_dims[i + 2].is_static()) { - auto image_size = static_cast(image_dims[i + 2].get_length()); - int64_t filter_size = (static_cast(filter_shape[i]) - 1) * filter_dilations[i] + 1; - auto filter_stride = static_cast(filter_strides[i]); - auto output_size = (image_size + filter_stride - 1) / filter_stride; - - auto padding_needed = std::max(int64_t(0), (output_size - 1) * filter_stride + filter_size - image_size); - auto padding_lhs = padding_needed / 2; - auto padding_rhs = padding_needed - padding_lhs; - padding_below.push_back(pad_type == op::PadType::SAME_UPPER ? padding_lhs : padding_rhs); - padding_above.push_back(pad_type == op::PadType::SAME_UPPER ? padding_rhs : padding_lhs); - } else { - padding_below.push_back(0); - padding_above.push_back(0); - } - } - return true; -} - -bool ngraph::try_apply_auto_padding(const PartialShape& image_shape, - const Shape& filter_shape, - const Strides& filter_strides, - const Strides& filter_dilations, - const op::PadType pad_type, - CoordinateDiff& padding_above, - CoordinateDiff& padding_below) { return ov::util::try_apply_auto_padding(image_shape, filter_shape, filter_strides, @@ -716,16 +528,16 @@ bool ngraph::try_apply_auto_padding(const PartialShape& image_shape, padding_below); } -ngraph::PartialShape ngraph::infer_slice_shape(const Node* node, - const PartialShape& input_shape, - const std::vector& begin, - const std::vector& end, - const std::vector& strides, - const AxisSet& begin_mask, - const AxisSet& end_mask, - const AxisSet& new_axis_mask, - const AxisSet& shrink_axis_mask, - const AxisSet& ellipsis_mask) { +PartialShape infer_slice_shape(const Node* node, + const PartialShape& input_shape, + const std::vector& begin, + const std::vector& end, + const std::vector& strides, + const AxisSet& begin_mask, + const AxisSet& end_mask, + const AxisSet& new_axis_mask, + const AxisSet& shrink_axis_mask, + const AxisSet& ellipsis_mask) { if (begin.size() && end.size()) { NODE_VALIDATION_CHECK(node, begin.size() == end.size(), @@ -869,146 +681,58 @@ ngraph::PartialShape ngraph::infer_slice_shape(const Node* node, return dim; } -namespace { -const auto normalize_axis_to = [](const int64_t& tensor_rank) { - return [&tensor_rank](int64_t& axis) { - if (axis < 0) { - axis += tensor_rank; - } - }; -}; - -std::string normalize_axis_error_msg(const int64_t& axis, const int64_t& lower, const int64_t& upper) { - return std::string(" Parameter axis ") - .append(to_string(axis)) - .append(" out of the tensor rank range [") - .append(to_string(lower)) - .append(", ") - .append(to_string(upper)) - .append("]."); -} -} // namespace +void opset1::infer_conv_backprop_auto_padding(const Shape& input_data_shape, + const Shape& filters_shape, + const Shape& output_shape, + const Strides& strides, + const Strides& dilations, + const op::PadType auto_pad_type, + const CoordinateDiff& output_padding, + CoordinateDiff& pads_begin, + CoordinateDiff& pads_end) { + OPENVINO_ASSERT(auto_pad_type == op::PadType::SAME_UPPER || auto_pad_type == op::PadType::SAME_LOWER); -int64_t ov::util::normalize(const int64_t& value, const int64_t& max) { - return (value < 0) ? value + max : value; -}; + size_t num_spatial_dims = input_data_shape.size(); + OPENVINO_ASSERT(filters_shape.size() == num_spatial_dims && strides.size() == num_spatial_dims && + dilations.size() == num_spatial_dims && pads_begin.size() == num_spatial_dims && + pads_end.size() == num_spatial_dims && output_padding.size() == num_spatial_dims); -void ov::normalize_axes(const Node* node, const int64_t& tensor_rank, std::vector& axes) { - const auto axis_checker = cmp::Between(-tensor_rank, tensor_rank ? (tensor_rank - 1) : 0); - const auto invalid_axis = std::find_if_not(axes.cbegin(), axes.cend(), axis_checker); - NODE_VALIDATION_CHECK(node, - invalid_axis == axes.cend(), - normalize_axis_error_msg(*invalid_axis, axis_checker.lower(), axis_checker.upper())); - std::for_each(axes.begin(), axes.end(), normalize_axis_to(tensor_rank)); -} + pads_begin = CoordinateDiff(num_spatial_dims); + pads_end = CoordinateDiff(num_spatial_dims); -std::vector ov::normalize_axes(const std::string& node_description, - const std::vector& axes, - const Rank& tensor_rank) { - std::vector new_axes; - new_axes.reserve(axes.size()); - for (const auto& axis : axes) { - new_axes.push_back(normalize_axis(node_description, axis, tensor_rank)); + for (uint64_t i = 0; i < num_spatial_dims; ++i) { + int total_padding = std::max( + static_cast(strides[i] * (input_data_shape[i] - 1) + dilations[i] * (filters_shape[i] - 1) + 1 - + output_shape[i] + output_padding[i]), + 0); + if (auto_pad_type != op::PadType::SAME_UPPER) { + pads_begin[i] = total_padding / 2; + pads_end[i] = total_padding - pads_begin[i]; + } else { + pads_end[i] = total_padding / 2; + pads_begin[i] = total_padding - pads_end[i]; + } } - return new_axes; -} - -int64_t ov::normalize_axis(const Node* node, std::int64_t axis, const Rank& tensor_rank) { - return normalize_axis(node->description(), axis, tensor_rank); } -int64_t ov::normalize_axis(const std::string& node_description, std::int64_t axis, const Rank& tensor_rank) { - if (axis < 0) { - // Handling negative axis requires static tensor rank - NGRAPH_CHECK(tensor_rank.is_static(), - node_description, - " Rank must be static in order to normalize negative axis=", - axis); - } - if (tensor_rank.is_dynamic()) { - return axis; +namespace { +/// \brief Scalar variant describes value of an Output, for use in max shape determination +/// +/// For tensor values, we use the maximum value in the tensor +struct MaxValue { + /// \brief No information known about the output + MaxValue() = default; + /// \brief uint64_t assoiated with the output + MaxValue(uint64_t value) : m_value(value) {} + MaxValue(const std::vector& slices, int64_t slice_axis) : m_slices(slices), m_slice_axis(slice_axis) { + m_value = *max_element(m_slices.begin(), m_slices.end()); } + uint64_t m_value{std::numeric_limits::max()}; + std::vector m_slices; + int64_t m_slice_axis{-1}; +}; - const auto tensor_rank_value = tensor_rank.get_length(); - return normalize_axis(node_description, - axis, - tensor_rank_value, - -tensor_rank_value, - tensor_rank_value ? (tensor_rank_value - 1) : 0); -} - -int64_t ov::normalize_axis(const Node* node, - std::int64_t axis, - std::uint64_t tensor_rank, - std::int64_t axis_range_min, - std::int64_t axis_range_max) { - return normalize_axis(node->description(), axis, tensor_rank, axis_range_min, axis_range_max); -} - -int64_t ov::normalize_axis(const std::string& node_description, - std::int64_t axis, - std::uint64_t tensor_rank, - std::int64_t axis_range_min, - std::int64_t axis_range_max) { - // Accepted range of value for axis is [axis_range_min, axis_range_max]. - OPENVINO_ASSERT((axis_range_min <= axis) && (axis <= axis_range_max), - node_description, - normalize_axis_error_msg(axis, axis_range_min, axis_range_max)); - return util::normalize(axis, tensor_rank); -} - -void ngraph::opset1::infer_conv_backprop_auto_padding(const Shape& input_data_shape, - const Shape& filters_shape, - const Shape& output_shape, - const Strides& strides, - const Strides& dilations, - const op::PadType auto_pad_type, - const CoordinateDiff& output_padding, - CoordinateDiff& pads_begin, - CoordinateDiff& pads_end) { - NGRAPH_CHECK(auto_pad_type == op::PadType::SAME_UPPER || auto_pad_type == op::PadType::SAME_LOWER); - - size_t num_spatial_dims = input_data_shape.size(); - NGRAPH_CHECK(filters_shape.size() == num_spatial_dims && strides.size() == num_spatial_dims && - dilations.size() == num_spatial_dims && pads_begin.size() == num_spatial_dims && - pads_end.size() == num_spatial_dims && output_padding.size() == num_spatial_dims); - - pads_begin = CoordinateDiff(num_spatial_dims); - pads_end = CoordinateDiff(num_spatial_dims); - - for (uint64_t i = 0; i < num_spatial_dims; ++i) { - int total_padding = std::max( - static_cast(strides[i] * (input_data_shape[i] - 1) + dilations[i] * (filters_shape[i] - 1) + 1 - - output_shape[i] + output_padding[i]), - 0); - if (auto_pad_type != op::PadType::SAME_UPPER) { - pads_begin[i] = total_padding / 2; - pads_end[i] = total_padding - pads_begin[i]; - } else { - pads_end[i] = total_padding / 2; - pads_begin[i] = total_padding - pads_end[i]; - } - } -} - -namespace { -/// \brief Scalar variant describes value of an Output, for use in max shape determination -/// -/// For tensor values, we use the maximum value in the tensor -struct MaxValue { - /// \brief No information known about the output - MaxValue() = default; - /// \brief uint64_t assoiated with the output - MaxValue(uint64_t value) : m_value(value) {} - MaxValue(const vector& slices, int64_t slice_axis) : m_slices(slices), m_slice_axis(slice_axis) { - m_value = *max_element(m_slices.begin(), m_slices.end()); - } - uint64_t m_value{numeric_limits::max()}; - vector m_slices; - int64_t m_slice_axis{-1}; -}; - -vector exec_constant(ngraph::Node* node, vector& inputs) { +std::vector exec_constant(Node* node, std::vector& inputs) { auto result = MaxValue(); auto op = ov::as_type(node); auto element_type = op->get_output_element_type(0); @@ -1032,44 +756,44 @@ vector exec_constant(ngraph::Node* node, vector& inputs) { return {result}; } -vector exec_minimum(ngraph::Node* node, vector& inputs) { - uint64_t min_value = numeric_limits::max(); +std::vector exec_minimum(Node* node, std::vector& inputs) { + uint64_t min_value = std::numeric_limits::max(); switch (node->get_output_element_type(0)) { - case ngraph::element::Type_t::i8: - min_value = numeric_limits::max(); + case element::Type_t::i8: + min_value = std::numeric_limits::max(); break; - case ngraph::element::Type_t::i16: - min_value = numeric_limits::max(); + case element::Type_t::i16: + min_value = std::numeric_limits::max(); break; - case ngraph::element::Type_t::i32: - min_value = numeric_limits::max(); + case element::Type_t::i32: + min_value = std::numeric_limits::max(); break; - case ngraph::element::Type_t::i64: - min_value = numeric_limits::max(); + case element::Type_t::i64: + min_value = std::numeric_limits::max(); break; - case ngraph::element::Type_t::u8: - min_value = numeric_limits::max(); + case element::Type_t::u8: + min_value = std::numeric_limits::max(); break; - case ngraph::element::Type_t::u16: - min_value = numeric_limits::max(); + case element::Type_t::u16: + min_value = std::numeric_limits::max(); break; - case ngraph::element::Type_t::u32: - min_value = numeric_limits::max(); + case element::Type_t::u32: + min_value = std::numeric_limits::max(); break; - case ngraph::element::Type_t::u64: - min_value = numeric_limits::max(); + case element::Type_t::u64: + min_value = std::numeric_limits::max(); break; default: break; } - min_value = min(min_value, inputs.at(0).m_value); - min_value = min(min_value, inputs.at(1).m_value); + min_value = std::min(min_value, inputs.at(0).m_value); + min_value = std::min(min_value, inputs.at(1).m_value); return {MaxValue(min_value)}; } -vector exec_concat(ngraph::Node* node, vector& inputs) { - auto op = ov::as_type(node); - vector slice_maxen; +std::vector exec_concat(Node* node, std::vector& inputs) { + auto op = ov::as_type(node); + std::vector slice_maxen; for (const auto& input : inputs) { slice_maxen.push_back(input.m_value); } @@ -1077,13 +801,13 @@ vector exec_concat(ngraph::Node* node, vector& inputs) { return {MaxValue(slice_maxen, axis)}; } -vector exec_reduce_min(ngraph::Node* node, vector& inputs) { +std::vector exec_reduce_min(Node* node, std::vector& inputs) { auto data = inputs.at(0); if (data.m_slice_axis >= 0 && data.m_slices.size() > 1) { - if (auto indices_const = ov::as_type(node->get_input_node_ptr(1))) { + if (auto indices_const = ov::as_type(node->get_input_node_ptr(1))) { if (indices_const->get_output_element_type(0).is_integral()) { const auto& indices_shape = indices_const->get_output_shape(0); - if (indices_shape == ngraph::Shape{1}) { + if (indices_shape == Shape{1}) { auto indices = indices_const->cast_vector(); auto axis = indices.at(0); if (axis == data.m_slice_axis) { @@ -1097,7 +821,7 @@ vector exec_reduce_min(ngraph::Node* node, vector& inputs) { return {MaxValue(data.m_value)}; } -vector exec_shape_of(ngraph::Node* node, vector& inputs) { +std::vector exec_shape_of(Node* node, std::vector& inputs) { const auto& inputPS = node->get_input_partial_shape(0); std::vector shapeDims; for (int64_t i = 0; i < inputPS.rank().get_length(); i++) { @@ -1111,11 +835,11 @@ vector exec_shape_of(ngraph::Node* node, vector& inputs) { return {MaxValue(shapeDims, 0)}; } -vector exec_gather(ngraph::Node* node, vector& inputs) { - auto gather = ov::as_type(node); +std::vector exec_gather(Node* node, std::vector& inputs) { + auto gather = ov::as_type(node); - const auto& indices = ov::as_type_ptr(node->input_value(1).get_node_shared_ptr()); - const auto& axis = ov::as_type_ptr(node->input_value(2).get_node_shared_ptr()); + const auto& indices = ov::as_type_ptr(node->input_value(1).get_node_shared_ptr()); + const auto& axis = ov::as_type_ptr(node->input_value(2).get_node_shared_ptr()); if (!indices || !axis) { return {MaxValue()}; @@ -1133,32 +857,33 @@ vector exec_gather(ngraph::Node* node, vector& inputs) { return {MaxValue(inputs[0].m_slices[indicesVec[0]])}; } -vector exec_nop(ngraph::Node* node, vector& inputs) { +std::vector exec_nop(Node* node, std::vector& inputs) { return {inputs.at(0)}; } } // namespace -pair ngraph::maximum_value(const Output& value) { - static Evaluator::op_handler_map handlers = {{op::v0::Concat::get_type_info_static(), exec_concat}, - {op::v0::Constant::get_type_info_static(), exec_constant}, - {op::v0::Convert::get_type_info_static(), exec_nop}, - {op::v1::Gather::get_type_info_static(), exec_gather}, - {op::v1::Minimum::get_type_info_static(), exec_minimum}, - {op::v1::ReduceMin::get_type_info_static(), exec_reduce_min}, - {op::v1::Reshape::get_type_info_static(), exec_nop}, - {op::v3::ShapeOf::get_type_info_static(), exec_shape_of}, - {op::v0::Squeeze::get_type_info_static(), exec_nop}, - {op::v0::Unsqueeze::get_type_info_static(), exec_nop}}; +std::pair maximum_value(const Output& value) { + static ngraph::Evaluator::op_handler_map handlers = { + {ov::op::v0::Concat::get_type_info_static(), exec_concat}, + {ov::op::v0::Constant::get_type_info_static(), exec_constant}, + {ov::op::v0::Convert::get_type_info_static(), exec_nop}, + {ov::op::v1::Gather::get_type_info_static(), exec_gather}, + {ov::op::v1::Minimum::get_type_info_static(), exec_minimum}, + {ov::op::v1::ReduceMin::get_type_info_static(), exec_reduce_min}, + {ov::op::v1::Reshape::get_type_info_static(), exec_nop}, + {ov::op::v3::ShapeOf::get_type_info_static(), exec_shape_of}, + {ov::op::v0::Squeeze::get_type_info_static(), exec_nop}, + {ov::op::v0::Unsqueeze::get_type_info_static(), exec_nop}}; Evaluator::value_map value_map; Evaluator evaluator(handlers, value_map); auto val = evaluator.evaluate(value); - return pair(val.m_value < numeric_limits::max(), val.m_value); + return std::pair(val.m_value < std::numeric_limits::max(), val.m_value); } -void ngraph::evaluate_nodes(std::map& value_map, - std::map& output_tensor_map, - const OutputVector& outputs, - const EvaluationContext& evaluation_context) { +void evaluate_nodes(std::map& value_map, + std::map& output_tensor_map, + const OutputVector& outputs, + const EvaluationContext& evaluation_context) { Evaluator evaluator({}, value_map); evaluator.set_universal_handler( [&output_tensor_map, &evaluation_context](Node* node, @@ -1167,7 +892,7 @@ void ngraph::evaluate_nodes(std::map& value_map, for (const auto& v : node->outputs()) { auto it = output_tensor_map.find(v); if (it == output_tensor_map.end()) { - auto c = make_shared(v); + auto c = std::make_shared(v); output_tensors.push_back(c); } else { output_tensors.push_back(it->second); @@ -1176,7 +901,7 @@ void ngraph::evaluate_nodes(std::map& value_map, if (node->evaluate(output_tensors, input_tensors, evaluation_context)) { return output_tensors; } else { - NGRAPH_CHECK(false, "Evaluation failed on ", node); + OPENVINO_THROW("Evaluation failed on ", node); } }); for (const auto& value : outputs) { @@ -1184,45 +909,8 @@ void ngraph::evaluate_nodes(std::map& value_map, } } -bool ov::evaluate_as_partial_shape(const Output& output, PartialShape& pshape) { - Tensor lb, ub; - std::tie(lb, ub) = ov::evaluate_both_bounds(output); - bool shape_defined = false; - if (lb && ub) { - auto lower_bound = std::make_shared(lb.get_element_type(), lb.get_shape(), lb.data()) - ->cast_vector(); - auto upper_bound = std::make_shared(ub.get_element_type(), ub.get_shape(), ub.data()) - ->cast_vector(); - NGRAPH_CHECK(lower_bound.size() == upper_bound.size()); - const TensorLabel& labels = output.get_tensor().get_value_label(); - NGRAPH_CHECK(labels.empty() || lower_bound.size() == labels.size()); - - vector resulting_pshape(lower_bound.size()); - for (size_t i = 0; i < lower_bound.size(); ++i) { - auto low = lower_bound[i], up = upper_bound[i]; - NGRAPH_CHECK(low >= 0 && up >= 0, "Value for partial shape evaluation can't be lower than zero."); - if (output.get_element_type() == element::i32 && low != up) { - if (up == std::numeric_limits::max()) - up = std::numeric_limits::max(); - if (low == std::numeric_limits::max()) - low = std::numeric_limits::max(); - } - resulting_pshape[i] = {low, up}; - if (!labels.empty() && labels[i]) - ov::DimensionTracker::set_label(resulting_pshape[i], labels[i]); - } - pshape = PartialShape(resulting_pshape); - shape_defined = true; - } - return shape_defined; -} - -bool ov::default_label_evaluator(const Node* node, TensorLabelVector& output_labels) { - return default_label_evaluator(node, {0}, output_labels); -} - -shared_ptr ngraph::get_constant_max_of_type(element::Type_t t) { -#define NGRAPH_TYPE_TO_MAX_CONST(t) \ +std::shared_ptr get_constant_max_of_type(element::Type_t t) { +#define OPENVINO_TYPE_TO_MAX_CONST(t) \ case t: \ return ov::op::v0::Constant::create( \ t, \ @@ -1231,27 +919,27 @@ shared_ptr ngraph::get_constant_max_of_type(element::T break switch (t) { - NGRAPH_TYPE_TO_MAX_CONST(element::boolean); - NGRAPH_TYPE_TO_MAX_CONST(element::bf16); - NGRAPH_TYPE_TO_MAX_CONST(element::f16); - NGRAPH_TYPE_TO_MAX_CONST(element::f32); - NGRAPH_TYPE_TO_MAX_CONST(element::f64); - NGRAPH_TYPE_TO_MAX_CONST(element::i8); - NGRAPH_TYPE_TO_MAX_CONST(element::i16); - NGRAPH_TYPE_TO_MAX_CONST(element::i32); - NGRAPH_TYPE_TO_MAX_CONST(element::i64); - NGRAPH_TYPE_TO_MAX_CONST(element::u1); - NGRAPH_TYPE_TO_MAX_CONST(element::u8); - NGRAPH_TYPE_TO_MAX_CONST(element::u16); - NGRAPH_TYPE_TO_MAX_CONST(element::u32); - NGRAPH_TYPE_TO_MAX_CONST(element::u64); + OPENVINO_TYPE_TO_MAX_CONST(element::boolean); + OPENVINO_TYPE_TO_MAX_CONST(element::bf16); + OPENVINO_TYPE_TO_MAX_CONST(element::f16); + OPENVINO_TYPE_TO_MAX_CONST(element::f32); + OPENVINO_TYPE_TO_MAX_CONST(element::f64); + OPENVINO_TYPE_TO_MAX_CONST(element::i8); + OPENVINO_TYPE_TO_MAX_CONST(element::i16); + OPENVINO_TYPE_TO_MAX_CONST(element::i32); + OPENVINO_TYPE_TO_MAX_CONST(element::i64); + OPENVINO_TYPE_TO_MAX_CONST(element::u1); + OPENVINO_TYPE_TO_MAX_CONST(element::u8); + OPENVINO_TYPE_TO_MAX_CONST(element::u16); + OPENVINO_TYPE_TO_MAX_CONST(element::u32); + OPENVINO_TYPE_TO_MAX_CONST(element::u64); default: return nullptr; } } -shared_ptr ngraph::get_constant_min_of_type(element::Type_t t) { -#define NGRAPH_TYPE_TO_MIN_CONST(t) \ +std::shared_ptr get_constant_min_of_type(element::Type_t t) { +#define OPENVINO_TYPE_TO_MIN_CONST(t) \ case t: \ return ov::op::v0::Constant::create( \ t, \ @@ -1260,27 +948,27 @@ shared_ptr ngraph::get_constant_min_of_type(element::T break switch (t) { - NGRAPH_TYPE_TO_MIN_CONST(element::boolean); - NGRAPH_TYPE_TO_MIN_CONST(element::bf16); - NGRAPH_TYPE_TO_MIN_CONST(element::f16); - NGRAPH_TYPE_TO_MIN_CONST(element::f32); - NGRAPH_TYPE_TO_MIN_CONST(element::f64); - NGRAPH_TYPE_TO_MIN_CONST(element::i8); - NGRAPH_TYPE_TO_MIN_CONST(element::i16); - NGRAPH_TYPE_TO_MIN_CONST(element::i32); - NGRAPH_TYPE_TO_MIN_CONST(element::i64); - NGRAPH_TYPE_TO_MIN_CONST(element::u1); - NGRAPH_TYPE_TO_MIN_CONST(element::u8); - NGRAPH_TYPE_TO_MIN_CONST(element::u16); - NGRAPH_TYPE_TO_MIN_CONST(element::u32); - NGRAPH_TYPE_TO_MIN_CONST(element::u64); + OPENVINO_TYPE_TO_MIN_CONST(element::boolean); + OPENVINO_TYPE_TO_MIN_CONST(element::bf16); + OPENVINO_TYPE_TO_MIN_CONST(element::f16); + OPENVINO_TYPE_TO_MIN_CONST(element::f32); + OPENVINO_TYPE_TO_MIN_CONST(element::f64); + OPENVINO_TYPE_TO_MIN_CONST(element::i8); + OPENVINO_TYPE_TO_MIN_CONST(element::i16); + OPENVINO_TYPE_TO_MIN_CONST(element::i32); + OPENVINO_TYPE_TO_MIN_CONST(element::i64); + OPENVINO_TYPE_TO_MIN_CONST(element::u1); + OPENVINO_TYPE_TO_MIN_CONST(element::u8); + OPENVINO_TYPE_TO_MIN_CONST(element::u16); + OPENVINO_TYPE_TO_MIN_CONST(element::u32); + OPENVINO_TYPE_TO_MIN_CONST(element::u64); default: return nullptr; } } -std::shared_ptr ngraph::get_constant_lowest_of_type(element::Type_t t) { -#define NGRAPH_TYPE_TO_LOWEST_CONST(t) \ +std::shared_ptr get_constant_lowest_of_type(element::Type_t t) { +#define OPENVINO_TYPE_TO_LOWEST_CONST(t) \ case t: \ return op::v0::Constant::create(t, \ {}, \ @@ -1288,20 +976,20 @@ std::shared_ptr ngraph::get_constant_lowest_of_type(el break switch (t) { - NGRAPH_TYPE_TO_LOWEST_CONST(element::boolean); - NGRAPH_TYPE_TO_LOWEST_CONST(element::bf16); - NGRAPH_TYPE_TO_LOWEST_CONST(element::f16); - NGRAPH_TYPE_TO_LOWEST_CONST(element::f32); - NGRAPH_TYPE_TO_LOWEST_CONST(element::f64); - NGRAPH_TYPE_TO_LOWEST_CONST(element::i8); - NGRAPH_TYPE_TO_LOWEST_CONST(element::i16); - NGRAPH_TYPE_TO_LOWEST_CONST(element::i32); - NGRAPH_TYPE_TO_LOWEST_CONST(element::i64); - NGRAPH_TYPE_TO_LOWEST_CONST(element::u1); - NGRAPH_TYPE_TO_LOWEST_CONST(element::u8); - NGRAPH_TYPE_TO_LOWEST_CONST(element::u16); - NGRAPH_TYPE_TO_LOWEST_CONST(element::u32); - NGRAPH_TYPE_TO_LOWEST_CONST(element::u64); + OPENVINO_TYPE_TO_LOWEST_CONST(element::boolean); + OPENVINO_TYPE_TO_LOWEST_CONST(element::bf16); + OPENVINO_TYPE_TO_LOWEST_CONST(element::f16); + OPENVINO_TYPE_TO_LOWEST_CONST(element::f32); + OPENVINO_TYPE_TO_LOWEST_CONST(element::f64); + OPENVINO_TYPE_TO_LOWEST_CONST(element::i8); + OPENVINO_TYPE_TO_LOWEST_CONST(element::i16); + OPENVINO_TYPE_TO_LOWEST_CONST(element::i32); + OPENVINO_TYPE_TO_LOWEST_CONST(element::i64); + OPENVINO_TYPE_TO_LOWEST_CONST(element::u1); + OPENVINO_TYPE_TO_LOWEST_CONST(element::u8); + OPENVINO_TYPE_TO_LOWEST_CONST(element::u16); + OPENVINO_TYPE_TO_LOWEST_CONST(element::u32); + OPENVINO_TYPE_TO_LOWEST_CONST(element::u64); case element::undefined: case element::dynamic: @@ -1310,15 +998,193 @@ std::shared_ptr ngraph::get_constant_lowest_of_type(el } } -shared_ptr ov::get_constant_from_source(const Output& source) { - return ov::util::get_constant_from_source(source); -} - -bool ngraph::validate_host_tensor_vector(const HostTensorVector& tensor_vector, const size_t& size) { +bool validate_host_tensor_vector(const HostTensorVector& tensor_vector, const size_t& size) { return (tensor_vector.size() == size) && std::none_of(tensor_vector.cbegin(), tensor_vector.cend(), ov::cmp::Equal(nullptr)); } +} // namespace ngraph + +void ov::infer_auto_padding(const Shape& image_shape, + const Shape& filter_shape, + const Strides& filter_strides, + const Strides& filter_dilations, + const op::PadType pad_type, + CoordinateDiff& padding_above, + CoordinateDiff& padding_below) { + const auto image_dims = std::vector(std::begin(image_shape), std::end(image_shape)); + // because image_shape is fully known result of try_apply_infer_auto_padding is ignored + ov::util::try_apply_auto_padding(image_dims, + filter_shape, + filter_strides, + filter_dilations, + pad_type, + padding_above, + padding_below); +} + +bool ov::util::try_apply_auto_padding(const PartialShape& image_shape, + const Shape& filter_shape, + const Strides& filter_strides, + const Strides& filter_dilations, + const op::PadType pad_type, + CoordinateDiff& padding_above, + CoordinateDiff& padding_below) { + OPENVINO_ASSERT(pad_type == op::PadType::SAME_UPPER || pad_type == op::PadType::SAME_LOWER); + + if (image_shape.rank().is_dynamic()) { + return false; + } + const auto image_dims = static_cast>(image_shape); + for (size_t i = 0; i < static_cast(filter_shape.size()); i++) { + if (image_dims[i + 2].is_static()) { + auto image_size = static_cast(image_dims[i + 2].get_length()); + int64_t filter_size = (static_cast(filter_shape[i]) - 1) * filter_dilations[i] + 1; + auto filter_stride = static_cast(filter_strides[i]); + auto output_size = (image_size + filter_stride - 1) / filter_stride; + + auto padding_needed = std::max(int64_t(0), (output_size - 1) * filter_stride + filter_size - image_size); + auto padding_lhs = padding_needed / 2; + auto padding_rhs = padding_needed - padding_lhs; + padding_below.push_back(pad_type == op::PadType::SAME_UPPER ? padding_lhs : padding_rhs); + padding_above.push_back(pad_type == op::PadType::SAME_UPPER ? padding_rhs : padding_lhs); + } else { + padding_below.push_back(0); + padding_above.push_back(0); + } + } + return true; +} + +namespace { +const auto normalize_axis_to = [](const int64_t& tensor_rank) { + return [&tensor_rank](int64_t& axis) { + if (axis < 0) { + axis += tensor_rank; + } + }; +}; + +std::string normalize_axis_error_msg(const int64_t& axis, const int64_t& lower, const int64_t& upper) { + return std::string(" Parameter axis ") + .append(std::to_string(axis)) + .append(" out of the tensor rank range [") + .append(std::to_string(lower)) + .append(", ") + .append(std::to_string(upper)) + .append("]."); +} +} // namespace + +int64_t ov::util::normalize(const int64_t& value, const int64_t& max) { + return (value < 0) ? value + max : value; +}; + +void ov::normalize_axes(const Node* node, const int64_t& tensor_rank, std::vector& axes) { + const auto axis_checker = cmp::Between(-tensor_rank, tensor_rank ? (tensor_rank - 1) : 0); + const auto invalid_axis = std::find_if_not(axes.cbegin(), axes.cend(), axis_checker); + NODE_VALIDATION_CHECK(node, + invalid_axis == axes.cend(), + normalize_axis_error_msg(*invalid_axis, axis_checker.lower(), axis_checker.upper())); + std::for_each(axes.begin(), axes.end(), normalize_axis_to(tensor_rank)); +} + +std::vector ov::normalize_axes(const std::string& node_description, + const std::vector& axes, + const Rank& tensor_rank) { + std::vector new_axes; + new_axes.reserve(axes.size()); + for (const auto& axis : axes) { + new_axes.push_back(normalize_axis(node_description, axis, tensor_rank)); + } + return new_axes; +} + +int64_t ov::normalize_axis(const Node* node, std::int64_t axis, const Rank& tensor_rank) { + return normalize_axis(node->description(), axis, tensor_rank); +} + +int64_t ov::normalize_axis(const std::string& node_description, std::int64_t axis, const Rank& tensor_rank) { + if (axis < 0) { + // Handling negative axis requires static tensor rank + OPENVINO_ASSERT(tensor_rank.is_static(), + node_description, + " Rank must be static in order to normalize negative axis=", + axis); + } + if (tensor_rank.is_dynamic()) { + return axis; + } + + const auto tensor_rank_value = tensor_rank.get_length(); + return normalize_axis(node_description, + axis, + tensor_rank_value, + -tensor_rank_value, + tensor_rank_value ? (tensor_rank_value - 1) : 0); +} + +int64_t ov::normalize_axis(const Node* node, + std::int64_t axis, + std::uint64_t tensor_rank, + std::int64_t axis_range_min, + std::int64_t axis_range_max) { + return normalize_axis(node->description(), axis, tensor_rank, axis_range_min, axis_range_max); +} + +int64_t ov::normalize_axis(const std::string& node_description, + std::int64_t axis, + std::uint64_t tensor_rank, + std::int64_t axis_range_min, + std::int64_t axis_range_max) { + // Accepted range of value for axis is [axis_range_min, axis_range_max]. + OPENVINO_ASSERT((axis_range_min <= axis) && (axis <= axis_range_max), + node_description, + normalize_axis_error_msg(axis, axis_range_min, axis_range_max)); + return util::normalize(axis, tensor_rank); +} + +bool ov::evaluate_as_partial_shape(const Output& output, PartialShape& pshape) { + Tensor lb, ub; + std::tie(lb, ub) = ov::evaluate_both_bounds(output); + bool shape_defined = false; + if (lb && ub) { + auto lower_bound = std::make_shared(lb.get_element_type(), lb.get_shape(), lb.data()) + ->cast_vector(); + auto upper_bound = std::make_shared(ub.get_element_type(), ub.get_shape(), ub.data()) + ->cast_vector(); + OPENVINO_ASSERT(lower_bound.size() == upper_bound.size()); + const TensorLabel& labels = output.get_tensor().get_value_label(); + OPENVINO_ASSERT(labels.empty() || lower_bound.size() == labels.size()); + + std::vector resulting_pshape(lower_bound.size()); + for (size_t i = 0; i < lower_bound.size(); ++i) { + auto low = lower_bound[i], up = upper_bound[i]; + OPENVINO_ASSERT(low >= 0 && up >= 0, "Value for partial shape evaluation can't be lower than zero."); + if (output.get_element_type() == element::i32 && low != up) { + if (up == std::numeric_limits::max()) + up = std::numeric_limits::max(); + if (low == std::numeric_limits::max()) + low = std::numeric_limits::max(); + } + resulting_pshape[i] = {low, up}; + if (!labels.empty() && labels[i]) + ov::DimensionTracker::set_label(resulting_pshape[i], labels[i]); + } + pshape = PartialShape(resulting_pshape); + shape_defined = true; + } + return shape_defined; +} + +bool ov::default_label_evaluator(const Node* node, TensorLabelVector& output_labels) { + return default_label_evaluator(node, {0}, output_labels); +} + +std::shared_ptr ov::get_constant_from_source(const Output& source) { + return ov::util::get_constant_from_source(source); +} + bool ov::has_no_labels(const ov::TensorLabel& labels) { return std::all_of(labels.cbegin(), labels.cend(), cmp::Equal(no_label)); } @@ -1381,6 +1247,130 @@ std::shared_ptr ov::util::constantfold_subgraph(const Outp return ov::as_type_ptr(outputs[subgraph_sink.get_index()].get_node_shared_ptr()); } +// +// Infers the output batch shape and element type for convolution fprop. +// +ov::PartialShape ov::infer_convolution_forward(const Node* node, + const PartialShape& data_batch_shape, + const Strides& data_dilation, + const CoordinateDiff& data_padding_below, + const CoordinateDiff& data_padding_above, + const PartialShape& filters_shape, + const Strides& filter_strides, + const Strides& filter_dilation) { + Rank data_batch_filters_rank{Rank::dynamic()}; + + NODE_VALIDATION_CHECK(node, + Rank::merge(data_batch_filters_rank, data_batch_shape.rank(), filters_shape.rank()), + "Data batch and filters rank do not match (data batch shape: ", + data_batch_shape, + ", filters shape: ", + filters_shape, + ")."); + + NODE_VALIDATION_CHECK(node, + data_batch_filters_rank.is_dynamic() || data_batch_filters_rank.get_length() >= 3, + "Data batch and filters must have rank of at least 3 (one batch axis, ", + "one input-channel axis, and at least one spatial dimension) ", + "(data batch shape: ", + data_batch_shape, + ", filters shape: ", + filters_shape, + ")."); + + Rank spatial_rank{Rank::dynamic()}; + NODE_VALIDATION_CHECK(node, + Rank::merge(spatial_rank, spatial_rank, data_batch_filters_rank - 2) && + Rank::merge(spatial_rank, spatial_rank, data_dilation.size()) && + Rank::merge(spatial_rank, spatial_rank, data_padding_below.size()) && + Rank::merge(spatial_rank, spatial_rank, data_padding_above.size()) && + Rank::merge(spatial_rank, spatial_rank, filter_strides.size()) && + Rank::merge(spatial_rank, spatial_rank, filter_dilation.size()), + "Ranks for data item shape/filters shape (data batch has shape ", + data_batch_shape, + ", so data item rank is ", + (data_batch_shape.rank() - 2), + " and filters have shape ", + filters_shape, + ", so filters spatial rank is ", + (filters_shape.rank() - 2), + "), data dilation (", + data_dilation, + "), padding below (", + data_padding_below, + "), padding above (", + data_padding_above, + "), filter strides (", + filter_strides, + "), and filter dilation (", + filter_dilation, + ") do not match."); + + Dimension batch_size = (data_batch_shape.rank().is_static() ? data_batch_shape[0] : Dimension::dynamic()); + Dimension data_channel_count = (data_batch_shape.rank().is_static() ? data_batch_shape[1] : Dimension::dynamic()); + PartialShape data_spatial_shape(PartialShape::dynamic(spatial_rank)); + + Dimension filter_output_channel_count = + (filters_shape.rank().is_static() ? filters_shape[0] : Dimension::dynamic()); + Dimension filter_input_channel_count = (filters_shape.rank().is_static() ? filters_shape[1] : Dimension::dynamic()); + PartialShape filter_spatial_shape(PartialShape::dynamic(spatial_rank)); + + // + // Note: spatial_rank is definitely static at this point. + // + + for (int64_t i = 0; i < spatial_rank.get_length(); i++) { + if (data_batch_shape.rank().is_static()) { + data_spatial_shape[i] = data_batch_shape[i + 2]; + } + + if (filters_shape.rank().is_static()) { + filter_spatial_shape[i] = filters_shape[i + 2]; + } + } + + NODE_VALIDATION_CHECK(node, batch_size.is_dynamic() || batch_size.get_length() > 0, "Batch size is zero."); + + Dimension merged_channel_count; + + NODE_VALIDATION_CHECK(node, + Dimension::merge(merged_channel_count, data_channel_count, filter_input_channel_count), + "Data batch channel count (", + data_channel_count, + ") does not match filter input ", + "channel count (", + filter_input_channel_count, + ")."); + + NODE_VALIDATION_CHECK(node, + merged_channel_count.is_dynamic() || merged_channel_count.get_length() > 0, + "Data batch channel count and/or filter input channel count is zero."); + + NODE_VALIDATION_CHECK(node, + filter_output_channel_count.is_dynamic() || filter_output_channel_count.get_length() > 0, + "Filter output channel count is zero."); + + PartialShape data_output_shape = ngraph::infer_windowed_reduction_output_shape(node, + data_spatial_shape, + data_dilation, + data_padding_below, + data_padding_above, + filter_spatial_shape, + filter_strides, + filter_dilation, + true); + + PartialShape batch_output_shape(PartialShape::dynamic(spatial_rank + 2)); + batch_output_shape[0] = batch_size; + batch_output_shape[1] = filter_output_channel_count; + + for (int64_t i = 0; i < spatial_rank.get_length(); i++) { + batch_output_shape[i + 2] = data_output_shape[i]; + } + + return batch_output_shape; +} + namespace ov { namespace util { using ov::op::v0::Constant; diff --git a/src/inference/src/ie_core.cpp b/src/inference/src/ie_core.cpp index 97dc6382aaccea..a5babd9192768c 100644 --- a/src/inference/src/ie_core.cpp +++ b/src/inference/src/ie_core.cpp @@ -31,10 +31,6 @@ #include "ie_plugin_config.hpp" #include "ie_remote_context.hpp" #include "itt.hpp" -#include "ngraph/graph_util.hpp" -#include "ngraph/ngraph.hpp" -#include "ngraph/opsets/opset.hpp" -#include "ngraph/pass/constant_folding.hpp" #include "openvino/core/except.hpp" #include "openvino/core/so_extension.hpp" #include "openvino/op/parameter.hpp" From ba5878ed2ffe79f5a6cbc71f89308d4b06bb471f Mon Sep 17 00:00:00 2001 From: Ilya Lavrenov Date: Wed, 11 Oct 2023 10:36:00 +0400 Subject: [PATCH 07/13] Removed np.int usage (#20378) --- tools/mo/openvino/tools/mo/front/common/partial_infer/utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/mo/openvino/tools/mo/front/common/partial_infer/utils.py b/tools/mo/openvino/tools/mo/front/common/partial_infer/utils.py index ef788469ba4574..6f87eebcb41a04 100644 --- a/tools/mo/openvino/tools/mo/front/common/partial_infer/utils.py +++ b/tools/mo/openvino/tools/mo/front/common/partial_infer/utils.py @@ -296,7 +296,7 @@ def get_shape_from_slice(input_shape: np.ndarray, slices: List) -> np.ndarray: in_idx += 1 elif s is np.newaxis: output_shape.append(1) - elif type(s) in [int, np.int, np.int32, np.int64]: # shrink_axis + elif type(s) in [int, np.int32, np.int64]: # shrink_axis in_idx += 1 elif s is Ellipsis: for idx in range(num_ellipsis_inserts): From ec644b9a732d73ab266007bfb30d32d3f3be9e49 Mon Sep 17 00:00:00 2001 From: Sergey Shlyapnikov Date: Wed, 11 Oct 2023 10:47:33 +0400 Subject: [PATCH 08/13] [GPU] Fix device tensors reallocation in case of host user's tensors (#20306) --- .../intel_gpu/plugin/remote_tensor.hpp | 2 +- .../intel_gpu/src/plugin/remote_tensor.cpp | 2 +- .../src/plugin/sync_infer_request.cpp | 25 +++++++++++++++++-- .../functional/behavior/infer_request.cpp | 23 +++++++++++++++++ 4 files changed, 48 insertions(+), 4 deletions(-) diff --git a/src/plugins/intel_gpu/include/intel_gpu/plugin/remote_tensor.hpp b/src/plugins/intel_gpu/include/intel_gpu/plugin/remote_tensor.hpp index 939c7b89784fc9..74a07bbcbf38bf 100644 --- a/src/plugins/intel_gpu/include/intel_gpu/plugin/remote_tensor.hpp +++ b/src/plugins/intel_gpu/include/intel_gpu/plugin/remote_tensor.hpp @@ -52,6 +52,7 @@ class RemoteTensorImpl : public ov::IRemoteTensor { bool is_allocated() const noexcept; bool is_surface() const noexcept; + bool is_shared() const noexcept; cldnn::memory::ptr get_memory() const; cldnn::memory::ptr get_original_memory() const; @@ -74,7 +75,6 @@ class RemoteTensorImpl : public ov::IRemoteTensor { uint32_t m_plane; size_t m_hash = 0; - bool is_shared() const; bool supports_caching() const; void update_strides(); void init_properties(); diff --git a/src/plugins/intel_gpu/src/plugin/remote_tensor.cpp b/src/plugins/intel_gpu/src/plugin/remote_tensor.cpp index cd164940027be7..a7c68cd8f81107 100644 --- a/src/plugins/intel_gpu/src/plugin/remote_tensor.cpp +++ b/src/plugins/intel_gpu/src/plugin/remote_tensor.cpp @@ -169,7 +169,7 @@ const std::string& RemoteTensorImpl::get_device_name() const { return m_context->get_device_name(); } -bool RemoteTensorImpl::is_shared() const { +bool RemoteTensorImpl::is_shared() const noexcept { return m_mem_type == TensorType::BT_BUF_SHARED || m_mem_type == TensorType::BT_USM_SHARED || m_mem_type == TensorType::BT_IMG_SHARED || diff --git a/src/plugins/intel_gpu/src/plugin/sync_infer_request.cpp b/src/plugins/intel_gpu/src/plugin/sync_infer_request.cpp index 5e564f3b9a3ec5..6e9e8bbf353803 100644 --- a/src/plugins/intel_gpu/src/plugin/sync_infer_request.cpp +++ b/src/plugins/intel_gpu/src/plugin/sync_infer_request.cpp @@ -270,10 +270,31 @@ void SyncInferRequest::set_tensor(const ov::Output& port, const bool is_input = ov::op::util::is_parameter(port.get_node()); + auto update_tensors_maps = [](const std::string& name, + std::unordered_map& user_tensors, + std::unordered_map& plugin_tensors, + const ov::SoPtr& tensor) { + auto current_tensor_owner = user_tensors[name].owner; + auto is_same_tensor = user_tensors[name].ptr == tensor._ptr; + + // Keep PLUGIN as a tensor owner if current user's tensor owner is PLUGIN and underlying tensor pointer is not changed + auto new_tensor_owner = current_tensor_owner == TensorOwner::PLUGIN && is_same_tensor ? TensorOwner::PLUGIN + : TensorOwner::USER; + + user_tensors[name] = { tensor._ptr, new_tensor_owner }; + + // We need to properly handle PLUGIN -> USER ownership change to prevent invalid PLUGIN's ush_host buffer sharing, + // so remove plugin's tensor to reallocate it in prepare_input() mehtod + if (current_tensor_owner == TensorOwner::PLUGIN && new_tensor_owner == TensorOwner::USER) { + if (plugin_tensors.count(name) && std::dynamic_pointer_cast(plugin_tensors[name].ptr)->is_shared()) + plugin_tensors.erase(plugin_tensors.find(name)); + } + }; + if (is_input) { - m_user_inputs[name] = { tensor._ptr, TensorOwner::USER }; + update_tensors_maps(name, m_user_inputs, m_plugin_inputs, tensor); } else { - m_user_outputs[name] = { tensor._ptr, TensorOwner::USER }; + update_tensors_maps(name, m_user_outputs, m_plugin_outputs, tensor); } ov::ISyncInferRequest::set_tensor(port, tensor); diff --git a/src/plugins/intel_gpu/tests/functional/behavior/infer_request.cpp b/src/plugins/intel_gpu/tests/functional/behavior/infer_request.cpp index 1d64aa74232d7c..af0229d5e81d8b 100644 --- a/src/plugins/intel_gpu/tests/functional/behavior/infer_request.cpp +++ b/src/plugins/intel_gpu/tests/functional/behavior/infer_request.cpp @@ -199,3 +199,26 @@ TEST(TensorTest, smoke_canSetTensorForDynamicInput) { ASSERT_NO_THROW(inf_req.set_input_tensor(t2)); ASSERT_NO_THROW(inf_req.infer()); } + +TEST(TensorTest, smoke_canReallocateDeviceInputForHostTensor) { + auto ov = ov::Core(); + using namespace ov::preprocess; + auto p = PrePostProcessor(ngraph::builder::subgraph::makeSplitMultiConvConcat()); + p.input().tensor().set_element_type(ov::element::i8); + p.input().preprocess().convert_element_type(ov::element::f32); + auto function = p.build(); + + auto compiled_model = ov.compile_model(function, ov::test::utils::DEVICE_GPU); + auto inf_req = compiled_model.create_infer_request(); + + auto input = function->input(); + ov::Tensor host_tensor(input.get_element_type(), input.get_shape()); + + // Infer with pre-allocated input tensor + ASSERT_NO_THROW(inf_req.infer()); + + // Infer with host_tensor + ASSERT_NO_THROW(inf_req.set_input_tensor(host_tensor)); + ASSERT_NO_THROW(inf_req.infer()); +} + From 346893fe6fe4c4c85e8447a96d957e31611e74d9 Mon Sep 17 00:00:00 2001 From: Ilya Churaev Date: Wed, 11 Oct 2023 11:43:27 +0400 Subject: [PATCH 09/13] Move cpu subgraph to new api (#20335) * Move ConvEltwiseFuse to new API * Move subgraph tests till LSTM to new API * Fixed GPU initialization * Remove unsupported GNA tests --- .../subgraph_tests/conv_strides_opt.cpp | 40 +++++----- .../convert_pad_to_group_conv.cpp | 67 +++++++++-------- .../get_output_before_activation.cpp | 44 +++++------ .../matmul_const_transposes_extraction.cpp | 40 +++++----- .../subgraph_tests/matmul_multiply_fusion.cpp | 52 ++++++------- .../subgraph_tests/matmul_squeeze_add.cpp | 57 +++++---------- .../get_output_before_activation.cpp | 26 +++---- .../subgraph_tests/matmul_squeeze_add.cpp | 12 ++- .../get_output_before_activation.cpp | 40 +++++----- .../subgraph_tests/matmul_squeeze_add.cpp | 57 +++++---------- .../subgraph_tests/conv_strides_opt.hpp | 9 ++- .../convert_pad_to_group_conv.hpp | 9 ++- .../get_output_before_activation.hpp | 8 +- .../matmul_const_transposes_extraction.hpp | 11 ++- .../subgraph_tests/matmul_multiply_fusion.hpp | 12 ++- .../subgraph_tests/matmul_squeeze_add.hpp | 8 +- .../subgraph/conv_eltwise_fusion.hpp | 1 - .../subgraph/conv_strides_opt.hpp | 32 ++++---- .../subgraph/convert_pad_to_group_conv.hpp | 44 +++++------ .../subgraph/get_output_before_activation.hpp | 37 +++++----- .../matmul_const_transposes_extraction.hpp | 38 +++++----- .../subgraph/matmul_multiply_fusion.hpp | 42 +++++------ .../subgraph/matmul_squeeze_add.hpp | 26 +++---- .../src/subgraph/conv_eltwise_fusion.cpp | 2 + .../src/subgraph/conv_strides_opt.cpp | 22 +++--- .../subgraph/convert_pad_to_group_conv.cpp | 33 +++++---- .../subgraph/get_output_before_activation.cpp | 52 +++++++------ .../matmul_const_transposes_extraction.cpp | 70 ++++++++++-------- .../src/subgraph/matmul_multiply_fusion.cpp | 73 ++++++++++--------- .../src/subgraph/matmul_squeeze_add.cpp | 57 +++++++++------ 30 files changed, 515 insertions(+), 506 deletions(-) diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/subgraph_tests/conv_strides_opt.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/subgraph_tests/conv_strides_opt.cpp index 6cedcfbbf88241..f33b57d639138f 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/subgraph_tests/conv_strides_opt.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/subgraph_tests/conv_strides_opt.cpp @@ -2,28 +2,30 @@ // SPDX-License-Identifier: Apache-2.0 // -#include - #include "subgraph_tests/conv_strides_opt.hpp" -#include "common_test_utils/test_constants.hpp" -using namespace SubgraphTestsDefinitions; +#include + +using namespace ov::test; namespace { - std::vector input_shapes{ - ngraph::Shape{1, 1, 4, 4}, - ngraph::Shape{1, 64, 56, 56}, - }; - std::vector pads{ - ngraph::op::PadType::SAME_UPPER, - ngraph::op::PadType::SAME_LOWER, - ngraph::op::PadType::EXPLICIT, - }; - INSTANTIATE_TEST_SUITE_P(smoke_Convolution_StridesOpt, ConvStridesOpt, - ::testing::Combine( - ::testing::ValuesIn(input_shapes), - ::testing::ValuesIn(pads), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ConvStridesOpt::getTestCaseName); + +std::vector input_shapes{ + ov::Shape{1, 1, 4, 4}, + ov::Shape{1, 64, 56, 56}, +}; + +std::vector pads{ + ov::op::PadType::SAME_UPPER, + ov::op::PadType::SAME_LOWER, + ov::op::PadType::EXPLICIT, +}; + +INSTANTIATE_TEST_SUITE_P(smoke_Convolution_StridesOpt, + ConvStridesOpt, + ::testing::Combine(::testing::ValuesIn(input_shapes), + ::testing::ValuesIn(pads), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ConvStridesOpt::getTestCaseName); } // namespace diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/subgraph_tests/convert_pad_to_group_conv.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/subgraph_tests/convert_pad_to_group_conv.cpp index 627094f4e717ad..d23a35201b9db7 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/subgraph_tests/convert_pad_to_group_conv.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/subgraph_tests/convert_pad_to_group_conv.cpp @@ -2,42 +2,41 @@ // SPDX-License-Identifier: Apache-2.0 // -#include - #include "subgraph_tests/convert_pad_to_group_conv.hpp" -#include "common_test_utils/test_constants.hpp" -using namespace SubgraphTestsDefinitions; +#include + +using namespace ov::test; namespace { - const std::vector> pads_1d{ - {0, 0, 0}, {0, 0, 1}, {0, 2, 0}, {3, 0, 0} - }; - - const std::vector values{0., 1.}; - - INSTANTIATE_TEST_SUITE_P(smoke_Pad_1D, ConvertPadToConvTests, - ::testing::Combine( - ::testing::Values(ngraph::Shape{1, 8, 64}), - ::testing::ValuesIn(pads_1d), - ::testing::ValuesIn(pads_1d), - ::testing::ValuesIn(values), - ::testing::Values(ngraph::op::PadMode::CONSTANT), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ConvertPadToConvTests::getTestCaseName); - - const std::vector> pads_2d{ - {0, 0, 0, 0}, {0, 0, 1, 2}, {0, 0, 2, 1}, - {0, 0, 10, 10}, {0, 0, 0, 4}, {0, 0, 4, 0} - }; - - INSTANTIATE_TEST_SUITE_P(smoke_Pad_2D, ConvertPadToConvTests, - ::testing::Combine( - ::testing::Values(ngraph::Shape{1, 8, 64, 16}), - ::testing::ValuesIn(pads_2d), - ::testing::ValuesIn(pads_2d), - ::testing::ValuesIn(values), - ::testing::Values(ngraph::op::PadMode::CONSTANT), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ConvertPadToConvTests::getTestCaseName); +const std::vector> pads_1d{{0, 0, 0}, {0, 0, 1}, {0, 2, 0}, {3, 0, 0}}; + +const std::vector values{0., 1.}; + +INSTANTIATE_TEST_SUITE_P(smoke_Pad_1D, + ConvertPadToConvTests, + ::testing::Combine(::testing::Values(ov::Shape{1, 8, 64}), + ::testing::ValuesIn(pads_1d), + ::testing::ValuesIn(pads_1d), + ::testing::ValuesIn(values), + ::testing::Values(ov::op::PadMode::CONSTANT), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ConvertPadToConvTests::getTestCaseName); + +const std::vector> pads_2d{{0, 0, 0, 0}, + {0, 0, 1, 2}, + {0, 0, 2, 1}, + {0, 0, 10, 10}, + {0, 0, 0, 4}, + {0, 0, 4, 0}}; + +INSTANTIATE_TEST_SUITE_P(smoke_Pad_2D, + ConvertPadToConvTests, + ::testing::Combine(::testing::Values(ov::Shape{1, 8, 64, 16}), + ::testing::ValuesIn(pads_2d), + ::testing::ValuesIn(pads_2d), + ::testing::ValuesIn(values), + ::testing::Values(ov::op::PadMode::CONSTANT), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ConvertPadToConvTests::getTestCaseName); } // namespace diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/subgraph_tests/get_output_before_activation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/subgraph_tests/get_output_before_activation.cpp index ca731518e9feea..4b9feeed52d5b3 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/subgraph_tests/get_output_before_activation.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/subgraph_tests/get_output_before_activation.cpp @@ -2,33 +2,27 @@ // SPDX-License-Identifier: Apache-2.0 // -#include -#include "common_test_utils/test_constants.hpp" +#include "subgraph_tests/get_output_before_activation.hpp" + +namespace ov { +namespace test { -namespace SubgraphTestsDefinitions { namespace { - std::vector input_sizes = { - 80, - 32, - 64, - 100 - }; +std::vector input_sizes = {80, 32, 64, 100}; + +std::vector midLayerTypes{midOutputType::Mul, midOutputType::Sub, midOutputType::Sum}; - std::vector midLayerTypes { - midOutputType::Mul, - midOutputType::Sub, - midOutputType::Sum - }; +ov::AnyMap additional_config = {}; +} // namespace - std::map additional_config = {}; -} // namespace +INSTANTIATE_TEST_SUITE_P(OutputBeforeActivation, + OutputBeforeActivation, + ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_CPU), + ::testing::Values(ov::element::f32), + ::testing::ValuesIn(input_sizes), + ::testing::ValuesIn(midLayerTypes), + ::testing::Values(additional_config)), + OutputBeforeActivation::getTestCaseName); -INSTANTIATE_TEST_SUITE_P(OutputBeforeActivation, OutputBeforeActivation, - ::testing::Combine( - ::testing::Values(ov::test::utils::DEVICE_CPU), - ::testing::Values(InferenceEngine::Precision::FP32), - ::testing::ValuesIn(input_sizes), - ::testing::ValuesIn(midLayerTypes), - ::testing::Values(additional_config)), - OutputBeforeActivation::getTestCaseName); -} // namespace SubgraphTestsDefinitions +} // namespace test +} // namespace ov diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/subgraph_tests/matmul_const_transposes_extraction.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/subgraph_tests/matmul_const_transposes_extraction.cpp index 41ef64232302f2..1851fd30789f05 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/subgraph_tests/matmul_const_transposes_extraction.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/subgraph_tests/matmul_const_transposes_extraction.cpp @@ -4,7 +4,7 @@ #include "subgraph_tests/matmul_const_transposes_extraction.hpp" -using namespace SubgraphTestsDefinitions; +using namespace ov::test; namespace { std::vector shape_params = { @@ -21,12 +21,12 @@ std::vector shape_params = { {{2, 3, 5, 10}, {1, 1, 10, 1}, false}, }; -INSTANTIATE_TEST_SUITE_P(smoke_MatMulConstTransposesExtractionTest, MatMulConstTransposesExtractionTest, - ::testing::Combine( - ::testing::ValuesIn(shape_params), - ::testing::Values(true), // can be fused - ::testing::Values(ov::test::utils::DEVICE_CPU)), - MatMulConstTransposesExtractionTest::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_MatMulConstTransposesExtractionTest, + MatMulConstTransposesExtractionTest, + ::testing::Combine(::testing::ValuesIn(shape_params), + ::testing::Values(true), // can be fused + ::testing::Values(ov::test::utils::DEVICE_CPU)), + MatMulConstTransposesExtractionTest::getTestCaseName); std::vector negative_shape_params = { {{5}, {5}, false}, @@ -46,12 +46,12 @@ std::vector negative_shape_param {{2, 3, 5, 10}, {2, 3, 10, 7}, false}, }; -INSTANTIATE_TEST_SUITE_P(smoke_NegativeMatMulConstTransposesExtractionTest, MatMulConstTransposesExtractionTest, - ::testing::Combine( - ::testing::ValuesIn(negative_shape_params), - ::testing::Values(false), // cannot be fused - ::testing::Values(ov::test::utils::DEVICE_CPU)), - MatMulConstTransposesExtractionTest::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_NegativeMatMulConstTransposesExtractionTest, + MatMulConstTransposesExtractionTest, + ::testing::Combine(::testing::ValuesIn(negative_shape_params), + ::testing::Values(false), // cannot be fused + ::testing::Values(ov::test::utils::DEVICE_CPU)), + MatMulConstTransposesExtractionTest::getTestCaseName); std::vector shape_params2 = { {{2, 2}, {2, 2}, false}, @@ -63,11 +63,11 @@ std::vector shape_params2 = { {{2, 3, 5, 10}, {1, 1, 10, 7}, false}, }; -INSTANTIATE_TEST_SUITE_P(smoke_QuantizedMatMulConstTransposesExtractionTest, QuantizedMatMulConstTransposesExtractionTest, - ::testing::Combine( - ::testing::ValuesIn(shape_params2), - ::testing::Values(true), // can be fused - ::testing::Values(ov::test::utils::DEVICE_CPU)), - QuantizedMatMulConstTransposesExtractionTest::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_QuantizedMatMulConstTransposesExtractionTest, + QuantizedMatMulConstTransposesExtractionTest, + ::testing::Combine(::testing::ValuesIn(shape_params2), + ::testing::Values(true), // can be fused + ::testing::Values(ov::test::utils::DEVICE_CPU)), + QuantizedMatMulConstTransposesExtractionTest::getTestCaseName); -} // namespace +} // namespace diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/subgraph_tests/matmul_multiply_fusion.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/subgraph_tests/matmul_multiply_fusion.cpp index 1f10491ad5bf90..026a7595ed9381 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/subgraph_tests/matmul_multiply_fusion.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/subgraph_tests/matmul_multiply_fusion.cpp @@ -4,7 +4,7 @@ #include "subgraph_tests/matmul_multiply_fusion.hpp" -using namespace SubgraphTestsDefinitions; +using namespace ov::test; namespace { std::vector shape_params = { @@ -67,12 +67,12 @@ std::vector shape_params = { {{2, 3, 5, 10}, {2, 3, 7, 10}, true, {2, 3, 1, 7}}, }; -INSTANTIATE_TEST_SUITE_P(smoke_MatMulMultiplyFusion, MatMulMultiplyFusion, - ::testing::Combine( - ::testing::ValuesIn(shape_params), - ::testing::Values(true), // can be fused - ::testing::Values(ov::test::utils::DEVICE_CPU)), - MatMulMultiplyFusion::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_MatMulMultiplyFusion, + MatMulMultiplyFusion, + ::testing::Combine(::testing::ValuesIn(shape_params), + ::testing::Values(true), // can be fused + ::testing::Values(ov::test::utils::DEVICE_CPU)), + MatMulMultiplyFusion::getTestCaseName); std::vector negative_shape_params = { {{5}, {5}, false, {1}}, @@ -108,12 +108,12 @@ std::vector negative_shape_params = { {{2, 3, 5, 10}, {2, 3, 10, 7}, false, {1, 1, 1, 1, 7}}, }; -INSTANTIATE_TEST_SUITE_P(smoke_NegativeMatMulMultiplyFusion, MatMulMultiplyFusion, - ::testing::Combine( - ::testing::ValuesIn(negative_shape_params), - ::testing::Values(false), // cannot be fused - ::testing::Values(ov::test::utils::DEVICE_CPU)), - MatMulMultiplyFusion::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_NegativeMatMulMultiplyFusion, + MatMulMultiplyFusion, + ::testing::Combine(::testing::ValuesIn(negative_shape_params), + ::testing::Values(false), // cannot be fused + ::testing::Values(ov::test::utils::DEVICE_CPU)), + MatMulMultiplyFusion::getTestCaseName); std::vector shape_params2 = { {{2, 2}, {2, 2}, false, {}}, @@ -158,12 +158,12 @@ std::vector shape_params2 = { {{2, 3, 5, 10}, {2, 3, 7, 10}, true, {2, 3, 1, 7}}, }; -INSTANTIATE_TEST_SUITE_P(smoke_QuantizedMatMulMultiplyFusion, QuantizedMatMulMultiplyFusion, - ::testing::Combine( - ::testing::ValuesIn(shape_params2), - ::testing::Values(true), // can be fused - ::testing::Values(ov::test::utils::DEVICE_CPU)), - QuantizedMatMulMultiplyFusion::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_QuantizedMatMulMultiplyFusion, + QuantizedMatMulMultiplyFusion, + ::testing::Combine(::testing::ValuesIn(shape_params2), + ::testing::Values(true), // can be fused + ::testing::Values(ov::test::utils::DEVICE_CPU)), + QuantizedMatMulMultiplyFusion::getTestCaseName); std::vector negative_shape_params2 = { {{2, 2}, {2, 2}, false, {2, 2}}, @@ -198,11 +198,11 @@ std::vector negative_shape_params2 = { {{2, 3, 5, 10}, {3, 7, 10}, true, {2, 3, 5, 7}}, }; -INSTANTIATE_TEST_SUITE_P(smoke_NegativeQuantizedMatMulMultiplyFusion, QuantizedMatMulMultiplyFusion, - ::testing::Combine( - ::testing::ValuesIn(negative_shape_params2), - ::testing::Values(false), // cannot be fused - ::testing::Values(ov::test::utils::DEVICE_CPU)), - QuantizedMatMulMultiplyFusion::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_NegativeQuantizedMatMulMultiplyFusion, + QuantizedMatMulMultiplyFusion, + ::testing::Combine(::testing::ValuesIn(negative_shape_params2), + ::testing::Values(false), // cannot be fused + ::testing::Values(ov::test::utils::DEVICE_CPU)), + QuantizedMatMulMultiplyFusion::getTestCaseName); -} // namespace +} // namespace diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/subgraph_tests/matmul_squeeze_add.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/subgraph_tests/matmul_squeeze_add.cpp index db6bdc22104958..97319465428a5c 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/subgraph_tests/matmul_squeeze_add.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/subgraph_tests/matmul_squeeze_add.cpp @@ -2,46 +2,27 @@ // SPDX-License-Identifier: Apache-2.0 // -#include - -#include "common_test_utils/test_constants.hpp" #include "subgraph_tests/matmul_squeeze_add.hpp" -using namespace SubgraphTestsDefinitions; +#include + +using namespace ov::test; namespace { -const std::vector netPrecisions = { - InferenceEngine::Precision::FP32, - InferenceEngine::Precision::FP16 -}; - -const std::vector> configs = { - { } -}; - -std::vector> input_shapes = { - {1, 8}, - {1, 42}, - {1, 100}, - {1, 128}, - {1, 512} -}; - -std::vector output_sizes = { - 1000, - 512, - 128, - 42, - 16, - 8 -}; - -INSTANTIATE_TEST_SUITE_P(smoke_MatmulSqueezeAdd, MatmulSqueezeAddTest, - ::testing::Combine( - ::testing::ValuesIn(netPrecisions), - ::testing::Values(ov::test::utils::DEVICE_CPU), - ::testing::ValuesIn(configs), - ::testing::ValuesIn(input_shapes), - ::testing::ValuesIn(output_sizes)), - MatmulSqueezeAddTest::getTestCaseName); +const std::vector netPrecisions = {ov::element::f32, ov::element::f16}; + +const std::vector configs = {{}}; + +std::vector input_shapes = {{1, 8}, {1, 42}, {1, 100}, {1, 128}, {1, 512}}; + +std::vector output_sizes = {1000, 512, 128, 42, 16, 8}; + +INSTANTIATE_TEST_SUITE_P(smoke_MatmulSqueezeAdd, + MatmulSqueezeAddTest, + ::testing::Combine(::testing::ValuesIn(netPrecisions), + ::testing::Values(ov::test::utils::DEVICE_CPU), + ::testing::ValuesIn(configs), + ::testing::ValuesIn(input_shapes), + ::testing::ValuesIn(output_sizes)), + MatmulSqueezeAddTest::getTestCaseName); } // namespace diff --git a/src/plugins/intel_gna/tests/functional/shared_tests_instances/subgraph_tests/get_output_before_activation.cpp b/src/plugins/intel_gna/tests/functional/shared_tests_instances/subgraph_tests/get_output_before_activation.cpp index ef6c7eb8257725..07cbd0dd905afc 100644 --- a/src/plugins/intel_gna/tests/functional/shared_tests_instances/subgraph_tests/get_output_before_activation.cpp +++ b/src/plugins/intel_gna/tests/functional/shared_tests_instances/subgraph_tests/get_output_before_activation.cpp @@ -2,31 +2,31 @@ // SPDX-License-Identifier: Apache-2.0 // -#include +#include "subgraph_tests/get_output_before_activation.hpp" -#include "common_test_utils/test_constants.hpp" - -namespace SubgraphTestsDefinitions { +namespace ov { +namespace test { namespace { std::vector input_sizes = {80, 32, 64, 100}; std::vector midLayerTypes{midOutputType::Mul, midOutputType::Sub, midOutputType::Sum}; -std::vector> configs = {{ - {"GNA_COMPACT_MODE", "NO"}, - {"GNA_DEVICE_MODE", "GNA_SW_EXACT"}, - {"GNA_SCALE_FACTOR_0", "1638.4"}, - {"GNA_SCALE_FACTOR_1", "1638.4"}, - }, - {{"GNA_DEVICE_MODE", "GNA_SW_FP32"}}}; +std::vector configs = {{ + {"GNA_COMPACT_MODE", "NO"}, + {"GNA_DEVICE_MODE", "GNA_SW_EXACT"}, + {"GNA_SCALE_FACTOR_0", "1638.4"}, + {"GNA_SCALE_FACTOR_1", "1638.4"}, + }, + {{"GNA_DEVICE_MODE", "GNA_SW_FP32"}}}; } // namespace INSTANTIATE_TEST_SUITE_P(OutputBeforeActivation, OutputBeforeActivation, ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_GNA), - ::testing::Values(InferenceEngine::Precision::FP32), + ::testing::Values(ov::element::f32), ::testing::ValuesIn(input_sizes), ::testing::ValuesIn(midLayerTypes), ::testing::ValuesIn(configs)), OutputBeforeActivation::getTestCaseName); -} // namespace SubgraphTestsDefinitions +} // namespace test +} // namespace ov diff --git a/src/plugins/intel_gna/tests/functional/shared_tests_instances/subgraph_tests/matmul_squeeze_add.cpp b/src/plugins/intel_gna/tests/functional/shared_tests_instances/subgraph_tests/matmul_squeeze_add.cpp index b5ece0ac7f71b6..aafc875bc5866d 100644 --- a/src/plugins/intel_gna/tests/functional/shared_tests_instances/subgraph_tests/matmul_squeeze_add.cpp +++ b/src/plugins/intel_gna/tests/functional/shared_tests_instances/subgraph_tests/matmul_squeeze_add.cpp @@ -8,17 +8,15 @@ #include "common_test_utils/test_constants.hpp" -using namespace SubgraphTestsDefinitions; +using namespace ov::test; namespace { -const std::vector netPrecisions = {InferenceEngine::Precision::FP32, - InferenceEngine::Precision::FP16}; +const std::vector netPrecisions = {ov::element::f32}; -const std::vector> configs = { - {{"GNA_DEVICE_MODE", "GNA_SW_EXACT"}, {"GNA_SCALE_FACTOR_0", "81.9175"}}, - {{"GNA_DEVICE_MODE", "GNA_SW_FP32"}}}; +const std::vector configs = {{{"GNA_DEVICE_MODE", "GNA_SW_EXACT"}, {"GNA_SCALE_FACTOR_0", "81.9175"}}, + {{"GNA_DEVICE_MODE", "GNA_SW_FP32"}}}; -std::vector> input_shapes = {{1, 8}, {1, 42}, {1, 100}, {1, 128}, {1, 512}}; +std::vector input_shapes = {{1, 8}, {1, 42}, {1, 100}, {1, 128}, {1, 512}}; std::vector output_sizes = {1000, 512, 128, 42, 16, 8}; diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/subgraph_tests/get_output_before_activation.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/subgraph_tests/get_output_before_activation.cpp index 1f3ae389ce87ef..f96f89f19124e8 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/subgraph_tests/get_output_before_activation.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/subgraph_tests/get_output_before_activation.cpp @@ -3,32 +3,26 @@ // #include + #include "common_test_utils/test_constants.hpp" -namespace SubgraphTestsDefinitions { +namespace ov { +namespace test { namespace { - std::vector input_sizes = { - 80, - 32, - 64, - 100 - }; +std::vector input_sizes = {80, 32, 64, 100}; - std::vector midLayerTypes { - midOutputType::Mul, - midOutputType::Sub, - midOutputType::Sum - }; +std::vector midLayerTypes{midOutputType::Mul, midOutputType::Sub, midOutputType::Sum}; - std::map additional_config = {}; -} // namespace +ov::AnyMap additional_config = {}; +} // namespace -INSTANTIATE_TEST_SUITE_P(OutputBeforeActivation, OutputBeforeActivation, - ::testing::Combine( - ::testing::Values(ov::test::utils::DEVICE_GPU), - ::testing::Values(InferenceEngine::Precision::FP32), - ::testing::ValuesIn(input_sizes), - ::testing::ValuesIn(midLayerTypes), - ::testing::Values(additional_config)), - OutputBeforeActivation::getTestCaseName); -} // namespace SubgraphTestsDefinitions +INSTANTIATE_TEST_SUITE_P(OutputBeforeActivation, + OutputBeforeActivation, + ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_GPU), + ::testing::Values(ov::element::f32), + ::testing::ValuesIn(input_sizes), + ::testing::ValuesIn(midLayerTypes), + ::testing::Values(additional_config)), + OutputBeforeActivation::getTestCaseName); +} // namespace test +} // namespace ov diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/subgraph_tests/matmul_squeeze_add.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/subgraph_tests/matmul_squeeze_add.cpp index 04a19a95a61ba7..bfeed0cab84a30 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/subgraph_tests/matmul_squeeze_add.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/subgraph_tests/matmul_squeeze_add.cpp @@ -2,46 +2,27 @@ // SPDX-License-Identifier: Apache-2.0 // -#include - -#include "common_test_utils/test_constants.hpp" #include "subgraph_tests/matmul_squeeze_add.hpp" -using namespace SubgraphTestsDefinitions; +#include + +using namespace ov::test; namespace { -const std::vector netPrecisions = { - InferenceEngine::Precision::FP32, - InferenceEngine::Precision::FP16 -}; - -const std::vector> configs = { - { } -}; - -std::vector> input_shapes = { - {1, 8}, - {1, 42}, - {1, 100}, - {1, 128}, - {1, 512} -}; - -std::vector output_sizes = { - 1000, - 512, - 128, - 42, - 16, - 8 -}; - -INSTANTIATE_TEST_SUITE_P(MatmulSqueezeAdd, MatmulSqueezeAddTest, - ::testing::Combine( - ::testing::ValuesIn(netPrecisions), - ::testing::Values(ov::test::utils::DEVICE_GPU), - ::testing::ValuesIn(configs), - ::testing::ValuesIn(input_shapes), - ::testing::ValuesIn(output_sizes)), - MatmulSqueezeAddTest::getTestCaseName); +const std::vector netPrecisions = {ov::element::f32, ov::element::f16}; + +const std::vector configs = {{}}; + +std::vector input_shapes = {{1, 8}, {1, 42}, {1, 100}, {1, 128}, {1, 512}}; + +std::vector output_sizes = {1000, 512, 128, 42, 16, 8}; + +INSTANTIATE_TEST_SUITE_P(MatmulSqueezeAdd, + MatmulSqueezeAddTest, + ::testing::Combine(::testing::ValuesIn(netPrecisions), + ::testing::Values(ov::test::utils::DEVICE_GPU), + ::testing::ValuesIn(configs), + ::testing::ValuesIn(input_shapes), + ::testing::ValuesIn(output_sizes)), + MatmulSqueezeAddTest::getTestCaseName); } // namespace diff --git a/src/tests/functional/plugin/shared/include/subgraph_tests/conv_strides_opt.hpp b/src/tests/functional/plugin/shared/include/subgraph_tests/conv_strides_opt.hpp index 67ae6cda1320aa..9d46654ebd743e 100644 --- a/src/tests/functional/plugin/shared/include/subgraph_tests/conv_strides_opt.hpp +++ b/src/tests/functional/plugin/shared/include/subgraph_tests/conv_strides_opt.hpp @@ -6,9 +6,12 @@ #include "shared_test_classes/subgraph/conv_strides_opt.hpp" -namespace SubgraphTestsDefinitions { +namespace ov { +namespace test { TEST_P(ConvStridesOpt, CompareWithRefs) { - Run(); + run(); } -} // namespace SubgraphTestsDefinitions + +} // namespace test +} // namespace ov diff --git a/src/tests/functional/plugin/shared/include/subgraph_tests/convert_pad_to_group_conv.hpp b/src/tests/functional/plugin/shared/include/subgraph_tests/convert_pad_to_group_conv.hpp index 4d9ce3770aba9a..8547d6b17436a4 100644 --- a/src/tests/functional/plugin/shared/include/subgraph_tests/convert_pad_to_group_conv.hpp +++ b/src/tests/functional/plugin/shared/include/subgraph_tests/convert_pad_to_group_conv.hpp @@ -6,9 +6,12 @@ #include "shared_test_classes/subgraph/convert_pad_to_group_conv.hpp" -namespace SubgraphTestsDefinitions { +namespace ov { +namespace test { TEST_P(ConvertPadToConvTests, CompareWithRefs) { - Run(); + run(); } -} // namespace SubgraphTestsDefinitions + +} // namespace test +} // namespace ov diff --git a/src/tests/functional/plugin/shared/include/subgraph_tests/get_output_before_activation.hpp b/src/tests/functional/plugin/shared/include/subgraph_tests/get_output_before_activation.hpp index 996a42e26cd2b4..eca03aab8e56ca 100644 --- a/src/tests/functional/plugin/shared/include/subgraph_tests/get_output_before_activation.hpp +++ b/src/tests/functional/plugin/shared/include/subgraph_tests/get_output_before_activation.hpp @@ -6,10 +6,12 @@ #include "shared_test_classes/subgraph/get_output_before_activation.hpp" -namespace SubgraphTestsDefinitions { +namespace ov { +namespace test { TEST_P(OutputBeforeActivation, CompareWithRefs) { - Run(); + run(); }; -} // namespace SubgraphTestsDefinitions +} // namespace test +} // namespace ov diff --git a/src/tests/functional/plugin/shared/include/subgraph_tests/matmul_const_transposes_extraction.hpp b/src/tests/functional/plugin/shared/include/subgraph_tests/matmul_const_transposes_extraction.hpp index 48e0755fa65d3c..e16847f17105d5 100644 --- a/src/tests/functional/plugin/shared/include/subgraph_tests/matmul_const_transposes_extraction.hpp +++ b/src/tests/functional/plugin/shared/include/subgraph_tests/matmul_const_transposes_extraction.hpp @@ -4,18 +4,21 @@ #pragma once +#include "functional_test_utils/skip_tests_config.hpp" #include "shared_test_classes/subgraph/matmul_const_transposes_extraction.hpp" -namespace SubgraphTestsDefinitions { +namespace ov { +namespace test { TEST_P(MatMulConstTransposesExtractionTest, CompareWithRefs) { SKIP_IF_CURRENT_TEST_IS_DISABLED(); - Run(); + run(); } TEST_P(QuantizedMatMulConstTransposesExtractionTest, CompareWithRefs) { SKIP_IF_CURRENT_TEST_IS_DISABLED(); - Run(); + run(); } -} // namespace SubgraphTestsDefinitions +} // namespace test +} // namespace ov diff --git a/src/tests/functional/plugin/shared/include/subgraph_tests/matmul_multiply_fusion.hpp b/src/tests/functional/plugin/shared/include/subgraph_tests/matmul_multiply_fusion.hpp index e2db9bee578207..77ff3497cb2e03 100644 --- a/src/tests/functional/plugin/shared/include/subgraph_tests/matmul_multiply_fusion.hpp +++ b/src/tests/functional/plugin/shared/include/subgraph_tests/matmul_multiply_fusion.hpp @@ -4,17 +4,21 @@ #pragma once +#include "functional_test_utils/skip_tests_config.hpp" #include "shared_test_classes/subgraph/matmul_multiply_fusion.hpp" -namespace SubgraphTestsDefinitions { +namespace ov { +namespace test { TEST_P(MatMulMultiplyFusion, CompareWithRefs) { SKIP_IF_CURRENT_TEST_IS_DISABLED(); - Run(); + run(); } TEST_P(QuantizedMatMulMultiplyFusion, CompareWithRefs) { SKIP_IF_CURRENT_TEST_IS_DISABLED(); - Run(); + run(); } -} // namespace SubgraphTestsDefinitions + +} // namespace test +} // namespace ov diff --git a/src/tests/functional/plugin/shared/include/subgraph_tests/matmul_squeeze_add.hpp b/src/tests/functional/plugin/shared/include/subgraph_tests/matmul_squeeze_add.hpp index b745f70da4e238..267053d695162e 100644 --- a/src/tests/functional/plugin/shared/include/subgraph_tests/matmul_squeeze_add.hpp +++ b/src/tests/functional/plugin/shared/include/subgraph_tests/matmul_squeeze_add.hpp @@ -6,10 +6,12 @@ #include "shared_test_classes/subgraph/matmul_squeeze_add.hpp" -namespace SubgraphTestsDefinitions { +namespace ov { +namespace test { TEST_P(MatmulSqueezeAddTest, CompareWithRefImpl) { - Run(); + run(); }; -} // namespace SubgraphTestsDefinitions +} // namespace test +} // namespace ov diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/conv_eltwise_fusion.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/conv_eltwise_fusion.hpp index eff28f7d7f2574..c156ab395cfe29 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/conv_eltwise_fusion.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/conv_eltwise_fusion.hpp @@ -9,7 +9,6 @@ #include #include "ov_models/builders.hpp" -#include "shared_test_classes/base/layer_test_utils.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" namespace ov { diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/conv_strides_opt.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/conv_strides_opt.hpp index 9df042c72de523..ca35c527b6d32a 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/conv_strides_opt.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/conv_strides_opt.hpp @@ -4,29 +4,29 @@ #pragma once -#include #include -#include -#include "shared_test_classes/base/layer_test_utils.hpp" +#include + #include "ov_models/builders.hpp" -#include -#include +#include "shared_test_classes/base/ov_subgraph.hpp" -namespace SubgraphTestsDefinitions { +namespace ov { +namespace test { -typedef std::tuple< - ngraph::Shape, // input shape - ngraph::op::PadType, - std::string // Device name - > ConvStridesOptParams; +typedef std::tuple + ConvStridesOptParams; -class ConvStridesOpt - : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { +class ConvStridesOpt : public testing::WithParamInterface, + virtual public ov::test::SubgraphBaseStaticTest { public: - static std::string getTestCaseName(const testing::TestParamInfo &obj); + static std::string getTestCaseName(const testing::TestParamInfo& obj); protected: void SetUp() override; }; -} // namespace SubgraphTestsDefinitions + +} // namespace test +} // namespace ov diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/convert_pad_to_group_conv.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/convert_pad_to_group_conv.hpp index 53bcd5d850e1c5..108c0086f04b07 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/convert_pad_to_group_conv.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/convert_pad_to_group_conv.hpp @@ -4,32 +4,32 @@ #pragma once -#include #include +#include #include -#include "shared_test_classes/base/layer_test_utils.hpp" -#include "ov_models/builders.hpp" -#include -#include - -namespace SubgraphTestsDefinitions { - -typedef std::tuple< - ngraph::Shape, // input shape - std::vector, // pad_begin - std::vector, // pad_end - float, // pad_value - ngraph::op::PadMode, // pad_mode - std::string // Device name - > PadParams; - -class ConvertPadToConvTests - : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { + +#include "shared_test_classes/base/ov_subgraph.hpp" + +namespace ov { +namespace test { + +typedef std::tuple, // pad_begin + std::vector, // pad_end + float, // pad_value + ov::op::PadMode, // pad_mode + std::string // Device name + > + PadParams; + +class ConvertPadToConvTests : public testing::WithParamInterface, + virtual public ov::test::SubgraphBaseStaticTest { public: - static std::string getTestCaseName(const testing::TestParamInfo &obj); + static std::string getTestCaseName(const testing::TestParamInfo& obj); protected: void SetUp() override; }; -} // namespace SubgraphTestsDefinitions + +} // namespace test +} // namespace ov diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/get_output_before_activation.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/get_output_before_activation.hpp index b6241ee0a049b3..5aac351fe7d01c 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/get_output_before_activation.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/get_output_before_activation.hpp @@ -4,33 +4,34 @@ #pragma once -#include "common_test_utils/test_common.hpp" -#include "shared_test_classes/base/layer_test_utils.hpp" -#include - -namespace SubgraphTestsDefinitions { +#include "shared_test_classes/base/ov_subgraph.hpp" +namespace ov { +namespace test { enum class midOutputType { Sum, Sub, Mul, }; -typedef std::tuple< - std::string, // Target device name - InferenceEngine::Precision, // Network precision - size_t, // Input size - midOutputType, // Type of layer that will be an output - std::map // Configuration -> outputBeforeActivationParams; +typedef std::tuple + outputBeforeActivationParams; -std::ostream& operator<< (std::ostream& os, const midOutputType& oType); +std::ostream& operator<<(std::ostream& os, const midOutputType& oType); -class OutputBeforeActivation : virtual public LayerTestsUtils::LayerTestsCommon, - public testing::WithParamInterface { +class OutputBeforeActivation : virtual public ov::test::SubgraphBaseStaticTest, + public testing::WithParamInterface { protected: void SetUp() override; + public: - static std::string getTestCaseName(const testing::TestParamInfo &obj); - InferenceEngine::Blob::Ptr GenerateInput(const InferenceEngine::InputInfo &info) const override; + static std::string getTestCaseName(const testing::TestParamInfo& obj); + // void generate_inputs(const std::vector& targetInputStaticShapes) override; }; -} // namespace SubgraphTestsDefinitions + +} // namespace test +} // namespace ov diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/matmul_const_transposes_extraction.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/matmul_const_transposes_extraction.hpp index ab345a20167618..e67acee0208017 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/matmul_const_transposes_extraction.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/matmul_const_transposes_extraction.hpp @@ -4,44 +4,46 @@ #pragma once -#include #include -#include "shared_test_classes/base/layer_test_utils.hpp" -#include +#include + +#include "shared_test_classes/base/ov_subgraph.hpp" -namespace SubgraphTestsDefinitions { +namespace ov { +namespace test { struct MatMulConstTransposesExtractionTestShapeParams { - ngraph::Shape input_shape; - ngraph::Shape weights_shape; + ov::Shape input_shape; + ov::Shape weights_shape; bool trans_b; }; -typedef std::tuple< - MatMulConstTransposesExtractionTestShapeParams, - bool, // whether Mul can be fused to MatMul in this case - std::string // Device name - > MatMulConstTransposesExtractionTestParams; +typedef std::tuple + MatMulConstTransposesExtractionTestParams; class MatMulConstTransposesExtractionTest - : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { + : public testing::WithParamInterface, + virtual public ov::test::SubgraphBaseStaticTest { public: - static std::string getTestCaseName(const testing::TestParamInfo &obj); + static std::string getTestCaseName(const testing::TestParamInfo& obj); protected: void SetUp() override; }; class QuantizedMatMulConstTransposesExtractionTest - : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { + : public testing::WithParamInterface, + virtual public ov::test::SubgraphBaseStaticTest { public: - static std::string getTestCaseName(const testing::TestParamInfo &obj); + static std::string getTestCaseName(const testing::TestParamInfo& obj); protected: void SetUp() override; void TearDown() override; }; -} // namespace SubgraphTestsDefinitions +} // namespace test +} // namespace ov diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/matmul_multiply_fusion.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/matmul_multiply_fusion.hpp index ad65d51366276b..3dd24f50746bac 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/matmul_multiply_fusion.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/matmul_multiply_fusion.hpp @@ -4,45 +4,45 @@ #pragma once -#include #include -#include "shared_test_classes/base/layer_test_utils.hpp" -#include +#include + +#include "shared_test_classes/base/ov_subgraph.hpp" -namespace SubgraphTestsDefinitions { +namespace ov { +namespace test { struct MatMulMultiplyFusionShapeParams { - ngraph::Shape input_shape; - ngraph::Shape weights_shape; + ov::Shape input_shape; + ov::Shape weights_shape; bool trans_b; - ngraph::Shape const_shape; + ov::Shape const_shape; }; -typedef std::tuple< - MatMulMultiplyFusionShapeParams, - bool, // whether Mul can be fused to MatMul in this case - std::string // Device name - > MatMulMultiplyFusionParams; +typedef std::tuple + MatMulMultiplyFusionParams; -class MatMulMultiplyFusion - : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { +class MatMulMultiplyFusion : public testing::WithParamInterface, + virtual public ov::test::SubgraphBaseStaticTest { public: - static std::string getTestCaseName(const testing::TestParamInfo &obj); + static std::string getTestCaseName(const testing::TestParamInfo& obj); protected: void SetUp() override; }; -class QuantizedMatMulMultiplyFusion - : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { +class QuantizedMatMulMultiplyFusion : public testing::WithParamInterface, + virtual public ov::test::SubgraphBaseStaticTest { public: - static std::string getTestCaseName(const testing::TestParamInfo &obj); + static std::string getTestCaseName(const testing::TestParamInfo& obj); protected: void SetUp() override; void TearDown() override; }; -} // namespace SubgraphTestsDefinitions +} // namespace test +} // namespace ov diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/matmul_squeeze_add.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/matmul_squeeze_add.hpp index 2aa4039e0cd9a3..fc5270096f0d52 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/matmul_squeeze_add.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/matmul_squeeze_add.hpp @@ -9,22 +9,21 @@ #include #include -#include "shared_test_classes/base/layer_test_utils.hpp" -#include "ov_models/builders.hpp" -#include "ov_models/utils/ov_helpers.hpp" +#include "shared_test_classes/base/ov_subgraph.hpp" -namespace SubgraphTestsDefinitions { +namespace ov { +namespace test { -typedef std::tuple< - InferenceEngine::Precision, // Network Precision - std::string, // Target Device - std::map, // Configuration - std::vector, // Input Shapes - size_t // Output Size -> matmulSqueezeAddParams; +typedef std::tuple + matmulSqueezeAddParams; class MatmulSqueezeAddTest : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { + virtual public ov::test::SubgraphBaseStaticTest { public: static std::string getTestCaseName(const testing::TestParamInfo& obj); @@ -32,4 +31,5 @@ class MatmulSqueezeAddTest : public testing::WithParamInterface &obj) { +std::string ConvStridesOpt::getTestCaseName(const testing::TestParamInfo& obj) { Shape input_shape; op::PadType pad; std::string targetName; @@ -25,20 +24,21 @@ void ConvStridesOpt::SetUp() { Shape input_shape; op::PadType pad_type; std::tie(input_shape, pad_type, targetDevice) = this->GetParam(); - auto param = std::make_shared(element::f32, input_shape); + auto param = std::make_shared(element::f32, input_shape); auto C = input_shape[1]; auto weights1 = ngraph::builder::makeConstant(element::f32, {C, C, 3, 3}, {}, true); auto spatial_dims = input_shape.size() - 2; Strides strides1(spatial_dims, 1); Strides dilations(spatial_dims, 1); CoordinateDiff pad_begin1(spatial_dims, 1), pad_end1(spatial_dims, 1); - auto conv1 = std::make_shared(param, weights1, strides1, pad_begin1, pad_end1, - dilations, pad_type); + auto conv1 = + std::make_shared(param, weights1, strides1, pad_begin1, pad_end1, dilations, pad_type); auto weights2 = ngraph::builder::makeConstant(element::f32, {C, C, 1, 1}, {}, true); CoordinateDiff pad_begin2(spatial_dims, 0), pad_end2(spatial_dims, 0); Strides strides2(spatial_dims, 2); - auto conv2 = std::make_shared(conv1, weights2, strides2, pad_begin2, pad_end2, - dilations); - function = std::make_shared(OutputVector{conv2}, ParameterVector{param}); + auto conv2 = std::make_shared(conv1, weights2, strides2, pad_begin2, pad_end2, dilations); + function = std::make_shared(OutputVector{conv2}, ParameterVector{param}); } -} // namespace SubgraphTestsDefinitions + +} // namespace test +} // namespace ov diff --git a/src/tests/functional/shared_test_classes/src/subgraph/convert_pad_to_group_conv.cpp b/src/tests/functional/shared_test_classes/src/subgraph/convert_pad_to_group_conv.cpp index c8f96576dc3761..5db0177f8afb57 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/convert_pad_to_group_conv.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/convert_pad_to_group_conv.cpp @@ -4,13 +4,14 @@ #include "shared_test_classes/subgraph/convert_pad_to_group_conv.hpp" -namespace SubgraphTestsDefinitions { +namespace ov { +namespace test { -std::string ConvertPadToConvTests::getTestCaseName(const testing::TestParamInfo &obj) { - ngraph::Shape input_shape; +std::string ConvertPadToConvTests::getTestCaseName(const testing::TestParamInfo& obj) { + ov::Shape input_shape; std::string targetName; std::vector pad_begin, pad_end; - ngraph::op::PadMode mode; + ov::op::PadMode mode; float value; std::tie(input_shape, pad_begin, pad_end, value, mode, targetName) = obj.param; std::ostringstream results; @@ -25,20 +26,24 @@ std::string ConvertPadToConvTests::getTestCaseName(const testing::TestParamInfo< } void ConvertPadToConvTests::SetUp() { - ngraph::Shape input_shape; + ov::Shape input_shape; std::vector pad_begin, pad_end; - ngraph::op::PadMode mode; + ov::op::PadMode mode; float value; std::tie(input_shape, pad_begin, pad_end, value, mode, targetDevice) = this->GetParam(); { - auto param = std::make_shared(ngraph::element::f32, input_shape); - auto pad = std::make_shared(param, - ngraph::opset4::Constant::create(ngraph::element::i64, ngraph::Shape{pad_begin.size()}, pad_begin), - ngraph::opset4::Constant::create(ngraph::element::i64, ngraph::Shape{pad_end.size()}, pad_end), - ngraph::opset4::Constant::create(ngraph::element::f32, ngraph::Shape{}, {value}), mode); - auto relu = std::make_shared(pad); - function = std::make_shared(ngraph::OutputVector{relu}, ngraph::ParameterVector{param}, "pad"); + auto param = std::make_shared(ov::element::f32, input_shape); + auto pad = std::make_shared( + param, + ov::op::v0::Constant::create(ov::element::i64, ov::Shape{pad_begin.size()}, pad_begin), + ov::op::v0::Constant::create(ov::element::i64, ov::Shape{pad_end.size()}, pad_end), + ov::op::v0::Constant::create(ov::element::f32, ov::Shape{}, {value}), + mode); + auto relu = std::make_shared(pad); + function = std::make_shared(ov::OutputVector{relu}, ov::ParameterVector{param}, "pad"); } } -} // namespace SubgraphTestsDefinitions + +} // namespace test +} // namespace ov diff --git a/src/tests/functional/shared_test_classes/src/subgraph/get_output_before_activation.cpp b/src/tests/functional/shared_test_classes/src/subgraph/get_output_before_activation.cpp index 8a84303f79acfb..7a566ae58a801c 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/get_output_before_activation.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/get_output_before_activation.cpp @@ -2,10 +2,14 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ov_models/builders.hpp" #include "shared_test_classes/subgraph/get_output_before_activation.hpp" -namespace SubgraphTestsDefinitions { +#include "ov_models/builders.hpp" +#include "shared_test_classes/base/ov_subgraph.hpp" + +namespace ov { +namespace test { + std::ostream& operator<<(std::ostream& os, const midOutputType& oType) { switch (oType) { case midOutputType::Sub: @@ -21,51 +25,50 @@ std::ostream& operator<<(std::ostream& os, const midOutputType& oType) { std::string OutputBeforeActivation::getTestCaseName(const testing::TestParamInfo& obj) { std::string targetDevice; - InferenceEngine::Precision netPrecision; + ov::element::Type element_type; size_t inputSize; midOutputType outputType; - std::map config; - std::tie(targetDevice, netPrecision, inputSize, outputType, config) = obj.param; + ov::AnyMap config; + std::tie(targetDevice, element_type, inputSize, outputType, config) = obj.param; std::ostringstream result; - result << "netPrecision=" << netPrecision.name() << "_"; + result << "InputType=" << element_type << "_"; result << "IS=" << inputSize << "_"; result << "OutputType=" << outputType << "_"; result << "targetDevice=" << targetDevice; for (auto const& configItem : config) { - result << "_configItem=" << configItem.first << "_" << configItem.second; + result << "_configItem=" << configItem.first << "_" << configItem.second.as(); } return result.str(); } void OutputBeforeActivation::SetUp() { - InferenceEngine::Precision netPrecision; - std::map config; + ov::element::Type element_type; + ov::AnyMap config; size_t inputSize; midOutputType outputType; - std::tie(targetDevice, netPrecision, inputSize, outputType, config) = this->GetParam(); + std::tie(targetDevice, element_type, inputSize, outputType, config) = this->GetParam(); configuration.insert(config.begin(), config.end()); - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - std::vector input_dims { 1, inputSize }; + std::vector input_dims{1, inputSize}; - ov::ParameterVector input_parameter {std::make_shared(ngPrc, ov::Shape(input_dims)), - std::make_shared(ngPrc, ov::Shape(input_dims))}; + ov::ParameterVector input_parameter{std::make_shared(element_type, ov::Shape(input_dims)), + std::make_shared(element_type, ov::Shape(input_dims))}; auto input0 = input_parameter[0]; auto input1 = input_parameter[1]; ngraph::OutputVector outputs; std::shared_ptr midLayer; switch (outputType) { - case SubgraphTestsDefinitions::midOutputType::Sum: { + case ov::test::midOutputType::Sum: { midLayer = ngraph::builder::makeEltwise(input0, input1, ngraph::helpers::EltwiseTypes::ADD); break; } - case SubgraphTestsDefinitions::midOutputType::Sub: { + case ov::test::midOutputType::Sub: { midLayer = ngraph::builder::makeEltwise(input0, input1, ngraph::helpers::EltwiseTypes::SUBTRACT); break; } - case SubgraphTestsDefinitions::midOutputType::Mul: { + case ov::test::midOutputType::Mul: { midLayer = ngraph::builder::makeEltwise(input0, input1, ngraph::helpers::EltwiseTypes::MULTIPLY); break; } @@ -73,12 +76,17 @@ void OutputBeforeActivation::SetUp() { GTEST_FAIL() << "Unknown midOutputType"; } - auto act = ngraph::builder::makeActivation(midLayer, ngPrc, ngraph::helpers::ActivationTypes::Tanh); + auto act = ngraph::builder::makeActivation(midLayer, element_type, ngraph::helpers::ActivationTypes::Tanh); outputs.insert(outputs.end(), {midLayer, act}); function = std::make_shared(outputs, input_parameter, "output_before_activation"); } -InferenceEngine::Blob::Ptr OutputBeforeActivation::GenerateInput(const InferenceEngine::InputInfo &info) const { - return FuncTestUtils::createAndFillBlob(info.getTensorDesc(), 2, -1, 100); -} -} // namespace SubgraphTestsDefinitions +// void OutputBeforeActivation::generate_inputs(const std::vector& targetInputStaticShapes) { +// ov::test::SubgraphBaseTest::generate_inputs(targetInputStaticShapes); +// } +// InferenceEngine::Blob::Ptr OutputBeforeActivation::GenerateInput(const InferenceEngine::InputInfo& info) const { +// return FuncTestUtils::createAndFillBlob(info.getTensorDesc(), 2, -1, 100); +// } + +} // namespace test +} // namespace ov diff --git a/src/tests/functional/shared_test_classes/src/subgraph/matmul_const_transposes_extraction.cpp b/src/tests/functional/shared_test_classes/src/subgraph/matmul_const_transposes_extraction.cpp index 59e5d4e397df17..05e434a1307b15 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/matmul_const_transposes_extraction.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/matmul_const_transposes_extraction.cpp @@ -3,15 +3,19 @@ // #include "transformations/common_optimizations/matmul_const_transposes_extraction.hpp" -#include "shared_test_classes/subgraph/matmul_const_transposes_extraction.hpp" -#include "ov_models/builders.hpp" -#include -namespace SubgraphTestsDefinitions { +#include "common_test_utils/graph_comparator.hpp" +#include "functional_test_utils/skip_tests_config.hpp" +#include "openvino/pass/manager.hpp" +#include "openvino/runtime/exec_model_info.hpp" +#include "ov_models/builders.hpp" +#include "shared_test_classes/subgraph/matmul_const_transposes_extraction.hpp" -using namespace ngraph; +namespace ov { +namespace test { -std::string MatMulConstTransposesExtractionTest::getTestCaseName(const testing::TestParamInfo &obj) { +std::string MatMulConstTransposesExtractionTest::getTestCaseName( + const testing::TestParamInfo& obj) { MatMulConstTransposesExtractionTestShapeParams shape_params; std::string device; std::tie(shape_params, std::ignore, device) = obj.param; @@ -33,18 +37,18 @@ void MatMulConstTransposesExtractionTest::SetUp() { const auto& input_shape = shape_params.input_shape; const auto& weights_shape = shape_params.weights_shape; - auto param = std::make_shared(type, input_shape); - auto weights = opset8::Constant::create(type, weights_shape, {0.5}); - auto matmul = std::make_shared(param, weights, false, shape_params.trans_b); - function = std::make_shared(matmul, ParameterVector{param}); + auto param = std::make_shared(type, input_shape); + auto weights = ov::op::v0::Constant::create(type, weights_shape, {0.5}); + auto matmul = std::make_shared(param, weights, false, shape_params.trans_b); + function = std::make_shared(matmul, ParameterVector{param}); - auto transformed_function = clone_function(*function); + auto transformed_function = function->clone(); pass::Manager manager; manager.register_pass(); manager.run_passes(transformed_function); bool functions_equal; - auto orig_function = clone_function(*function); + auto orig_function = function->clone(); std::tie(functions_equal, std::ignore) = compare_functions(transformed_function, orig_function, true); if (can_be_fused) { ASSERT_FALSE(functions_equal); @@ -54,15 +58,19 @@ void MatMulConstTransposesExtractionTest::SetUp() { } std::string QuantizedMatMulConstTransposesExtractionTest::getTestCaseName( - const testing::TestParamInfo &obj) { + const testing::TestParamInfo& obj) { MatMulConstTransposesExtractionTestShapeParams params; std::string device; std::tie(params, std::ignore, device) = obj.param; std::ostringstream results; - results << "input=" << params.input_shape << "_" - "weights=" << params.weights_shape << "_" - "dev=" << device; + results << "input=" << params.input_shape + << "_" + "weights=" + << params.weights_shape + << "_" + "dev=" + << device; return results.str(); } @@ -75,23 +83,23 @@ void QuantizedMatMulConstTransposesExtractionTest::SetUp() { auto weights_shape = params.weights_shape; element::Type type = element::f32; - auto param = std::make_shared(type, input_shape); + auto param = std::make_shared(type, input_shape); std::shared_ptr input; - std::shared_ptr weights = opset8::Constant::create(type, weights_shape, {0.5}); - auto low = opset8::Constant::create(type, {1}, {-2}); - auto high = opset8::Constant::create(type, {1}, {2}); - input = std::make_shared(param, low, high, low, high, 256); - weights = std::make_shared(weights, low, high, low, high, 255); - auto matmul = std::make_shared(input, weights, false, false); - function = std::make_shared(matmul, ParameterVector{param}); - - auto transformed_function = clone_function(*function); + std::shared_ptr weights = ov::op::v0::Constant::create(type, weights_shape, {0.5}); + auto low = ov::op::v0::Constant::create(type, {1}, {-2}); + auto high = ov::op::v0::Constant::create(type, {1}, {2}); + input = std::make_shared(param, low, high, low, high, 256); + weights = std::make_shared(weights, low, high, low, high, 255); + auto matmul = std::make_shared(input, weights, false, false); + function = std::make_shared(matmul, ParameterVector{param}); + + auto transformed_function = function->clone(); pass::Manager manager; manager.register_pass(); manager.run_passes(transformed_function); bool functions_equal; - auto orig_function = clone_function(*function); + auto orig_function = function->clone(); std::tie(functions_equal, std::ignore) = compare_functions(transformed_function, orig_function, true); if (can_be_fused) { ASSERT_FALSE(functions_equal); @@ -102,10 +110,10 @@ void QuantizedMatMulConstTransposesExtractionTest::SetUp() { void QuantizedMatMulConstTransposesExtractionTest::TearDown() { SKIP_IF_CURRENT_TEST_IS_DISABLED(); - auto runtime_function = executableNetwork.GetExecGraphInfo().getFunction(); + auto runtime_function = compiledModel.get_runtime_model(); int ops_found = 0; for (const auto& node : runtime_function->get_ordered_ops()) { - const auto& layer_type = node->get_rt_info().at(ExecGraphInfoSerialization::LAYER_TYPE).as(); + const auto& layer_type = node->get_rt_info().at(ov::exec_model_info::LAYER_TYPE).as(); if (layer_type == "FullyConnected" || layer_type == "MatMul") { ops_found++; auto inputs = node->input_values(); @@ -115,4 +123,6 @@ void QuantizedMatMulConstTransposesExtractionTest::TearDown() { } ASSERT_GT(ops_found, 0); } -} // namespace SubgraphTestsDefinitions + +} // namespace test +} // namespace ov diff --git a/src/tests/functional/shared_test_classes/src/subgraph/matmul_multiply_fusion.cpp b/src/tests/functional/shared_test_classes/src/subgraph/matmul_multiply_fusion.cpp index 02252c96fdf4d1..1764223d930f0f 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/matmul_multiply_fusion.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/matmul_multiply_fusion.cpp @@ -2,16 +2,18 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "transformations/common_optimizations/matmul_multiply_fusion.hpp" #include "shared_test_classes/subgraph/matmul_multiply_fusion.hpp" -#include "ov_models/builders.hpp" -#include -namespace SubgraphTestsDefinitions { +#include "common_test_utils/graph_comparator.hpp" +#include "functional_test_utils/skip_tests_config.hpp" +#include "openvino/pass/manager.hpp" +#include "openvino/runtime/exec_model_info.hpp" +#include "transformations/common_optimizations/matmul_multiply_fusion.hpp" -using namespace ngraph; +namespace ov { +namespace test { -std::string MatMulMultiplyFusion::getTestCaseName(const testing::TestParamInfo &obj) { +std::string MatMulMultiplyFusion::getTestCaseName(const testing::TestParamInfo& obj) { MatMulMultiplyFusionShapeParams shape_params; std::string device; std::tie(shape_params, std::ignore, device) = obj.param; @@ -35,20 +37,20 @@ void MatMulMultiplyFusion::SetUp() { const auto& weights_shape = shape_params.weights_shape; const auto& const_shape = shape_params.const_shape; - auto param = std::make_shared(precision, input_shape); - auto weights = opset8::Constant::create(precision, weights_shape, {0.5}); - auto matmul = std::make_shared(param, weights, false, shape_params.trans_b); - auto mul_const = opset8::Constant::create(precision, const_shape, {2.0}); - auto mul = std::make_shared(matmul, mul_const); - function = std::make_shared(OutputVector{mul}, ParameterVector{param}); + auto param = std::make_shared(precision, input_shape); + auto weights = ov::op::v0::Constant::create(precision, weights_shape, {0.5}); + auto matmul = std::make_shared(param, weights, false, shape_params.trans_b); + auto mul_const = ov::op::v0::Constant::create(precision, const_shape, {2.0}); + auto mul = std::make_shared(matmul, mul_const); + function = std::make_shared(OutputVector{mul}, ParameterVector{param}); - auto transformed_function = clone_function(*function); + auto transformed_function = function->clone(); pass::Manager manager; manager.register_pass(); manager.run_passes(transformed_function); bool functions_equal; - auto orig_function = clone_function(*function); + auto orig_function = function->clone(); std::tie(functions_equal, std::ignore) = compare_functions(transformed_function, orig_function, true); if (can_be_fused) { ASSERT_FALSE(functions_equal); @@ -57,7 +59,8 @@ void MatMulMultiplyFusion::SetUp() { } } -std::string QuantizedMatMulMultiplyFusion::getTestCaseName(const testing::TestParamInfo &obj) { +std::string QuantizedMatMulMultiplyFusion::getTestCaseName( + const testing::TestParamInfo& obj) { MatMulMultiplyFusionShapeParams shape_params; std::string device; std::tie(shape_params, std::ignore, device) = obj.param; @@ -81,31 +84,31 @@ void QuantizedMatMulMultiplyFusion::SetUp() { auto weights_shape = shape_params.weights_shape; const auto& const_shape = shape_params.const_shape; - auto param = std::make_shared(precision, input_shape); - auto low = opset8::Constant::create(precision, {1}, {-2}); - auto high = opset8::Constant::create(precision, {1}, {2}); - auto input_fq = std::make_shared(param, low, high, low, high, 256); - std::shared_ptr weights = opset8::Constant::create(precision, weights_shape, {0.5}); - weights = std::make_shared(weights, low, high, low, high, 255); + auto param = std::make_shared(precision, input_shape); + auto low = ov::op::v0::Constant::create(precision, {1}, {-2}); + auto high = ov::op::v0::Constant::create(precision, {1}, {2}); + auto input_fq = std::make_shared(param, low, high, low, high, 256); + std::shared_ptr weights = ov::op::v0::Constant::create(precision, weights_shape, {0.5}); + weights = std::make_shared(weights, low, high, low, high, 255); if (shape_params.trans_b) { std::vector perm(weights_shape.size(), 0); std::iota(perm.begin(), perm.end(), 0); std::swap(*(perm.end() - 2), *(perm.end() - 1)); - auto perm_const = opset8::Constant::create(element::i32, {perm.size()}, perm); - weights = std::make_shared(weights, perm_const); + auto perm_const = ov::op::v0::Constant::create(element::i32, {perm.size()}, perm); + weights = std::make_shared(weights, perm_const); } - auto matmul = std::make_shared(input_fq, weights); - auto mul_const = opset8::Constant::create(precision, const_shape, {2}); - auto mul = std::make_shared(matmul, mul_const); - function = std::make_shared(OutputVector{mul}, ParameterVector{param}); + auto matmul = std::make_shared(input_fq, weights); + auto mul_const = ov::op::v0::Constant::create(precision, const_shape, {2}); + auto mul = std::make_shared(matmul, mul_const); + function = std::make_shared(OutputVector{mul}, ParameterVector{param}); - auto transformed_function = clone_function(*function); + auto transformed_function = function->clone(); pass::Manager manager; manager.register_pass(); manager.run_passes(transformed_function); bool functions_equal; - auto orig_function = clone_function(*function); + auto orig_function = function->clone(); std::tie(functions_equal, std::ignore) = compare_functions(transformed_function, orig_function, true); if (can_be_fused) { ASSERT_FALSE(functions_equal); @@ -116,14 +119,14 @@ void QuantizedMatMulMultiplyFusion::SetUp() { void QuantizedMatMulMultiplyFusion::TearDown() { SKIP_IF_CURRENT_TEST_IS_DISABLED(); - auto get_layer_type = [] (const std::shared_ptr& node) -> const std::string& { + auto get_layer_type = [](const std::shared_ptr& node) -> const std::string& { const auto& rt_info = node->get_rt_info(); - auto it = rt_info.find(ExecGraphInfoSerialization::LAYER_TYPE); - IE_ASSERT(it != rt_info.end()); + auto it = rt_info.find(ov::exec_model_info::LAYER_TYPE); + OPENVINO_ASSERT(it != rt_info.end()); return it->second.as(); }; - auto runtime_function = executableNetwork.GetExecGraphInfo().getFunction(); + auto runtime_function = compiledModel.get_runtime_model(); int ops_found = 0; for (const auto& node : runtime_function->get_ordered_ops()) { const auto& layer_type = get_layer_type(node); @@ -136,4 +139,6 @@ void QuantizedMatMulMultiplyFusion::TearDown() { } ASSERT_GT(ops_found, 0); } -} // namespace SubgraphTestsDefinitions + +} // namespace test +} // namespace ov diff --git a/src/tests/functional/shared_test_classes/src/subgraph/matmul_squeeze_add.cpp b/src/tests/functional/shared_test_classes/src/subgraph/matmul_squeeze_add.cpp index b91b50bd8a9457..01b628d63cf8fd 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/matmul_squeeze_add.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/matmul_squeeze_add.cpp @@ -3,57 +3,68 @@ // #include "shared_test_classes/subgraph/matmul_squeeze_add.hpp" + +#include "common_test_utils/data_utils.hpp" #include "ov_models/builders.hpp" -namespace SubgraphTestsDefinitions { +namespace ov { +namespace test { std::string MatmulSqueezeAddTest::getTestCaseName(const testing::TestParamInfo& obj) { - InferenceEngine::Precision netPrecision; - std::vector inputShape; + ov::element::Type element_type; + ov::Shape input_shape; std::size_t outputSize; std::string targetDevice; - std::map configuration; - std::tie(netPrecision, targetDevice, configuration, inputShape, outputSize) = obj.param; + ov::AnyMap configuration; + std::tie(element_type, targetDevice, configuration, input_shape, outputSize) = obj.param; std::ostringstream result; - result << "IS=" << ov::test::utils::vec2str(inputShape) << "_"; + result << "IS=" << ov::test::utils::vec2str(input_shape) << "_"; result << "OS=" << outputSize << "_"; - result << "netPRC=" << netPrecision.name() << "_"; + result << "IT=" << element_type << "_"; result << "targetDevice=" << targetDevice; for (auto const& configItem : configuration) { - result << "_configItem=" << configItem.first << "_" << configItem.second; + result << "_configItem=" << configItem.first << "_" << configItem.second.as(); } return result.str(); } void MatmulSqueezeAddTest::SetUp() { auto seed = std::chrono::high_resolution_clock::now().time_since_epoch().count(); - InferenceEngine::Precision netPrecision; - std::map tempConfig; - std::vector inputShape; + ov::element::Type element_type; + ov::AnyMap tempConfig; + ov::Shape inputShape; size_t outputSize; - std::tie(netPrecision, targetDevice, tempConfig, inputShape, outputSize) = this->GetParam(); + std::tie(element_type, targetDevice, tempConfig, inputShape, outputSize) = this->GetParam(); configuration.insert(tempConfig.begin(), tempConfig.end()); - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - - ov::ParameterVector params {std::make_shared(ngPrc, ov::Shape(inputShape))}; + ov::ParameterVector params{std::make_shared(element_type, ov::Shape(inputShape))}; - auto constant_0 = ngraph::builder::makeConstant(ngPrc, { outputSize, inputShape[1] }, - ov::test::utils::generate_float_numbers(outputSize * inputShape[1], 0, 1, seed), false); + auto constant_0 = ngraph::builder::makeConstant( + element_type, + {outputSize, inputShape[1]}, + ov::test::utils::generate_float_numbers(outputSize * inputShape[1], 0, 1, seed), + false); auto matmul_0 = std::make_shared(params[0], constant_0, false, true); - auto constant_1 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 1 }, std::vector{0}); + auto constant_1 = + std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{1}, std::vector{0}); auto unsqueeze_0 = std::make_shared(matmul_0, constant_1); - auto constant_2 = ngraph::builder::makeConstant(ngPrc, { 1, inputShape[0], outputSize }, - ov::test::utils::generate_float_numbers(inputShape[0] * outputSize, 0, 1, seed), false); + auto constant_2 = ngraph::builder::makeConstant( + element_type, + {1, inputShape[0], outputSize}, + ov::test::utils::generate_float_numbers(inputShape[0] * outputSize, 0, 1, seed), + false); auto add_0 = std::make_shared(unsqueeze_0, constant_2); - auto constant_3 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 1 }, std::vector{0}); + auto constant_3 = + std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{1}, std::vector{0}); auto squeeze_0 = std::make_shared(add_0, constant_3); - ngraph::ResultVector results {std::make_shared(squeeze_0)}; + ngraph::ResultVector results{std::make_shared(squeeze_0)}; function = std::make_shared(results, params, "MatmulSqueezeAddTest"); } -} // namespace SubgraphTestsDefinitions + +} // namespace test +} // namespace ov From 6a326455b964359c034a2568942501834b13268e Mon Sep 17 00:00:00 2001 From: Jan Iwaszkiewicz Date: Wed, 11 Oct 2023 09:53:34 +0200 Subject: [PATCH 10/13] [PyOV] Propagate errors on imports from runtime module (#20273) Co-authored-by: Michal Lukaszewski --- src/bindings/python/CMakeLists.txt | 21 ++--- .../src/compatibility/openvino/__init__.py | 82 +++++++++---------- src/bindings/python/src/openvino/__init__.py | 82 +++++++++---------- tools/benchmark_tool/openvino/__init__.py | 82 +++++++++---------- tools/openvino_dev/CMakeLists.txt | 22 ++--- tools/ovc/openvino/__init__.py | 82 +++++++++---------- 6 files changed, 181 insertions(+), 190 deletions(-) diff --git a/src/bindings/python/CMakeLists.txt b/src/bindings/python/CMakeLists.txt index 2093c315a06691..8a572f89a0f47e 100644 --- a/src/bindings/python/CMakeLists.txt +++ b/src/bindings/python/CMakeLists.txt @@ -123,17 +123,8 @@ ov_check_python_build_conditions() # check __init__.py files alignment -function(ov_check_init_files_alignment) +function(ov_check_init_files_alignment init_files) # check the files in pairs - list(APPEND init_files - "${OpenVINOPython_SOURCE_DIR}/src/openvino/__init__.py" - "${OpenVINOPython_SOURCE_DIR}/src/compatibility/openvino/__init__.py" - "${OpenVINO_SOURCE_DIR}/tools/mo/openvino/__init__.py" - "${OpenVINO_SOURCE_DIR}/tools/pot/openvino/__init__.py" - "${OpenVINO_SOURCE_DIR}/tools/ovc/openvino/__init__.py" - "${OpenVINO_SOURCE_DIR}/tools/benchmark_tool/openvino/__init__.py" - "${OpenVINO_SOURCE_DIR}/tools/openvino_dev/src/openvino/__init__.py") - list(LENGTH init_files init_files_count) math(EXPR file_loop_range "${init_files_count}-2") foreach(init_file_idx RANGE 0 ${file_loop_range}) @@ -145,12 +136,18 @@ function(ov_check_init_files_alignment) RESULT_VARIABLE compare_result ) if(compare_result EQUAL 1) - message(FATAL_ERROR "The __init__.py files are misaligned: ${file1} and ${file2}") + message(FATAL_ERROR "The runtime __init__.py files are misaligned: ${file1} and ${file2}") endif() endforeach() endfunction() -ov_check_init_files_alignment() +set(INIT_FILES_RUNTIME +"${OpenVINOPython_SOURCE_DIR}/src/openvino/__init__.py" +"${OpenVINOPython_SOURCE_DIR}/src/compatibility/openvino/__init__.py" +"${OpenVINO_SOURCE_DIR}/tools/ovc/openvino/__init__.py" +"${OpenVINO_SOURCE_DIR}/tools/benchmark_tool/openvino/__init__.py") + +ov_check_init_files_alignment("${INIT_FILES_RUNTIME}") ov_option(ENABLE_PYTHON "Enables OpenVINO Python API build" ${ENABLE_PYTHON_DEFAULT}) diff --git a/src/bindings/python/src/compatibility/openvino/__init__.py b/src/bindings/python/src/compatibility/openvino/__init__.py index 90552e0befed68..b7dc434f3148cc 100644 --- a/src/bindings/python/src/compatibility/openvino/__init__.py +++ b/src/bindings/python/src/compatibility/openvino/__init__.py @@ -12,47 +12,47 @@ except ImportError: pass -# API 2.0 -try: - # Import all public modules - from openvino import runtime as runtime - from openvino import frontend as frontend - from openvino import helpers as helpers - from openvino import preprocess as preprocess - from openvino import utils as utils - from openvino.runtime import properties as properties - - # Import most important classes and functions from openvino.runtime - from openvino.runtime import Model - from openvino.runtime import Core - from openvino.runtime import CompiledModel - from openvino.runtime import InferRequest - from openvino.runtime import AsyncInferQueue - - from openvino.runtime import Dimension - from openvino.runtime import Strides - from openvino.runtime import PartialShape - from openvino.runtime import Shape - from openvino.runtime import Layout - from openvino.runtime import Type - from openvino.runtime import Tensor - from openvino.runtime import OVAny - - from openvino.runtime import compile_model - from openvino.runtime import get_batch - from openvino.runtime import set_batch - from openvino.runtime import serialize - from openvino.runtime import shutdown - from openvino.runtime import tensor_from_file - from openvino.runtime import save_model - from openvino.runtime import layout_helpers - - # Set version for openvino package - from openvino.runtime import get_version - __version__ = get_version() -except ImportError: - import warnings - warnings.warn("openvino package has problems with imports!", ImportWarning, stacklevel=2) +# # +# # API 2.0 +# # This __init__.py forces checking of runtime modules to propagate errors. +# # It is not compared with init files from openvino-dev package. +# # +# Import all public modules +from openvino import runtime as runtime +from openvino import frontend as frontend +from openvino import helpers as helpers +from openvino import preprocess as preprocess +from openvino import utils as utils +from openvino.runtime import properties as properties + +# Import most important classes and functions from openvino.runtime +from openvino.runtime import Model +from openvino.runtime import Core +from openvino.runtime import CompiledModel +from openvino.runtime import InferRequest +from openvino.runtime import AsyncInferQueue + +from openvino.runtime import Dimension +from openvino.runtime import Strides +from openvino.runtime import PartialShape +from openvino.runtime import Shape +from openvino.runtime import Layout +from openvino.runtime import Type +from openvino.runtime import Tensor +from openvino.runtime import OVAny + +from openvino.runtime import compile_model +from openvino.runtime import get_batch +from openvino.runtime import set_batch +from openvino.runtime import serialize +from openvino.runtime import shutdown +from openvino.runtime import tensor_from_file +from openvino.runtime import save_model +from openvino.runtime import layout_helpers + +# Set version for openvino package +from openvino.runtime import get_version +__version__ = get_version() # Tools try: diff --git a/src/bindings/python/src/openvino/__init__.py b/src/bindings/python/src/openvino/__init__.py index 90552e0befed68..b7dc434f3148cc 100644 --- a/src/bindings/python/src/openvino/__init__.py +++ b/src/bindings/python/src/openvino/__init__.py @@ -12,47 +12,47 @@ except ImportError: pass -# API 2.0 -try: - # Import all public modules - from openvino import runtime as runtime - from openvino import frontend as frontend - from openvino import helpers as helpers - from openvino import preprocess as preprocess - from openvino import utils as utils - from openvino.runtime import properties as properties - - # Import most important classes and functions from openvino.runtime - from openvino.runtime import Model - from openvino.runtime import Core - from openvino.runtime import CompiledModel - from openvino.runtime import InferRequest - from openvino.runtime import AsyncInferQueue - - from openvino.runtime import Dimension - from openvino.runtime import Strides - from openvino.runtime import PartialShape - from openvino.runtime import Shape - from openvino.runtime import Layout - from openvino.runtime import Type - from openvino.runtime import Tensor - from openvino.runtime import OVAny - - from openvino.runtime import compile_model - from openvino.runtime import get_batch - from openvino.runtime import set_batch - from openvino.runtime import serialize - from openvino.runtime import shutdown - from openvino.runtime import tensor_from_file - from openvino.runtime import save_model - from openvino.runtime import layout_helpers - - # Set version for openvino package - from openvino.runtime import get_version - __version__ = get_version() -except ImportError: - import warnings - warnings.warn("openvino package has problems with imports!", ImportWarning, stacklevel=2) +# # +# # API 2.0 +# # This __init__.py forces checking of runtime modules to propagate errors. +# # It is not compared with init files from openvino-dev package. +# # +# Import all public modules +from openvino import runtime as runtime +from openvino import frontend as frontend +from openvino import helpers as helpers +from openvino import preprocess as preprocess +from openvino import utils as utils +from openvino.runtime import properties as properties + +# Import most important classes and functions from openvino.runtime +from openvino.runtime import Model +from openvino.runtime import Core +from openvino.runtime import CompiledModel +from openvino.runtime import InferRequest +from openvino.runtime import AsyncInferQueue + +from openvino.runtime import Dimension +from openvino.runtime import Strides +from openvino.runtime import PartialShape +from openvino.runtime import Shape +from openvino.runtime import Layout +from openvino.runtime import Type +from openvino.runtime import Tensor +from openvino.runtime import OVAny + +from openvino.runtime import compile_model +from openvino.runtime import get_batch +from openvino.runtime import set_batch +from openvino.runtime import serialize +from openvino.runtime import shutdown +from openvino.runtime import tensor_from_file +from openvino.runtime import save_model +from openvino.runtime import layout_helpers + +# Set version for openvino package +from openvino.runtime import get_version +__version__ = get_version() # Tools try: diff --git a/tools/benchmark_tool/openvino/__init__.py b/tools/benchmark_tool/openvino/__init__.py index 90552e0befed68..b7dc434f3148cc 100644 --- a/tools/benchmark_tool/openvino/__init__.py +++ b/tools/benchmark_tool/openvino/__init__.py @@ -12,47 +12,47 @@ except ImportError: pass -# API 2.0 -try: - # Import all public modules - from openvino import runtime as runtime - from openvino import frontend as frontend - from openvino import helpers as helpers - from openvino import preprocess as preprocess - from openvino import utils as utils - from openvino.runtime import properties as properties - - # Import most important classes and functions from openvino.runtime - from openvino.runtime import Model - from openvino.runtime import Core - from openvino.runtime import CompiledModel - from openvino.runtime import InferRequest - from openvino.runtime import AsyncInferQueue - - from openvino.runtime import Dimension - from openvino.runtime import Strides - from openvino.runtime import PartialShape - from openvino.runtime import Shape - from openvino.runtime import Layout - from openvino.runtime import Type - from openvino.runtime import Tensor - from openvino.runtime import OVAny - - from openvino.runtime import compile_model - from openvino.runtime import get_batch - from openvino.runtime import set_batch - from openvino.runtime import serialize - from openvino.runtime import shutdown - from openvino.runtime import tensor_from_file - from openvino.runtime import save_model - from openvino.runtime import layout_helpers - - # Set version for openvino package - from openvino.runtime import get_version - __version__ = get_version() -except ImportError: - import warnings - warnings.warn("openvino package has problems with imports!", ImportWarning, stacklevel=2) +# # +# # API 2.0 +# # This __init__.py forces checking of runtime modules to propagate errors. +# # It is not compared with init files from openvino-dev package. +# # +# Import all public modules +from openvino import runtime as runtime +from openvino import frontend as frontend +from openvino import helpers as helpers +from openvino import preprocess as preprocess +from openvino import utils as utils +from openvino.runtime import properties as properties + +# Import most important classes and functions from openvino.runtime +from openvino.runtime import Model +from openvino.runtime import Core +from openvino.runtime import CompiledModel +from openvino.runtime import InferRequest +from openvino.runtime import AsyncInferQueue + +from openvino.runtime import Dimension +from openvino.runtime import Strides +from openvino.runtime import PartialShape +from openvino.runtime import Shape +from openvino.runtime import Layout +from openvino.runtime import Type +from openvino.runtime import Tensor +from openvino.runtime import OVAny + +from openvino.runtime import compile_model +from openvino.runtime import get_batch +from openvino.runtime import set_batch +from openvino.runtime import serialize +from openvino.runtime import shutdown +from openvino.runtime import tensor_from_file +from openvino.runtime import save_model +from openvino.runtime import layout_helpers + +# Set version for openvino package +from openvino.runtime import get_version +__version__ = get_version() # Tools try: diff --git a/tools/openvino_dev/CMakeLists.txt b/tools/openvino_dev/CMakeLists.txt index 12a24082a83a8e..494ac86c725acf 100644 --- a/tools/openvino_dev/CMakeLists.txt +++ b/tools/openvino_dev/CMakeLists.txt @@ -56,17 +56,8 @@ endforeach() # check __init__.py files alignment -function(ov_check_init_files_alignment) +function(ov_check_init_files_alignment init_files) # check the files in pairs - list(APPEND init_files - "${OpenVINO_SOURCE_DIR}/src/bindings/python/src/openvino/__init__.py" - "${OpenVINO_SOURCE_DIR}/src/bindings/python/src/compatibility/openvino/__init__.py" - "${OpenVINO_SOURCE_DIR}/tools/mo/openvino/__init__.py" - "${OpenVINO_SOURCE_DIR}/tools/pot/openvino/__init__.py" - "${OpenVINO_SOURCE_DIR}/tools/ovc/openvino/__init__.py" - "${OpenVINO_SOURCE_DIR}/tools/benchmark_tool/openvino/__init__.py" - "${OpenVINO_SOURCE_DIR}/tools/openvino_dev/src/openvino/__init__.py") - list(LENGTH init_files init_files_count) math(EXPR file_loop_range "${init_files_count}-2") foreach(init_file_idx RANGE 0 ${file_loop_range}) @@ -78,14 +69,17 @@ function(ov_check_init_files_alignment) RESULT_VARIABLE compare_result ) if(compare_result EQUAL 1) - message(STATUS ${file1}) - message(STATUS ${file2}) - message(FATAL_ERROR "The __init__.py files are misaligned: ${file1} and ${file2}") + message(FATAL_ERROR "The tools __init__.py files are misaligned: ${file1} and ${file2}") endif() endforeach() endfunction() -ov_check_init_files_alignment() +set(INIT_FILES_TOOLS +"${OpenVINO_SOURCE_DIR}/tools/mo/openvino/__init__.py" +"${OpenVINO_SOURCE_DIR}/tools/pot/openvino/__init__.py" +"${OpenVINO_SOURCE_DIR}/tools/openvino_dev/src/openvino/__init__.py") + +ov_check_init_files_alignment("${INIT_FILES_TOOLS}") # openvino_dev build diff --git a/tools/ovc/openvino/__init__.py b/tools/ovc/openvino/__init__.py index 90552e0befed68..b7dc434f3148cc 100644 --- a/tools/ovc/openvino/__init__.py +++ b/tools/ovc/openvino/__init__.py @@ -12,47 +12,47 @@ except ImportError: pass -# API 2.0 -try: - # Import all public modules - from openvino import runtime as runtime - from openvino import frontend as frontend - from openvino import helpers as helpers - from openvino import preprocess as preprocess - from openvino import utils as utils - from openvino.runtime import properties as properties - - # Import most important classes and functions from openvino.runtime - from openvino.runtime import Model - from openvino.runtime import Core - from openvino.runtime import CompiledModel - from openvino.runtime import InferRequest - from openvino.runtime import AsyncInferQueue - - from openvino.runtime import Dimension - from openvino.runtime import Strides - from openvino.runtime import PartialShape - from openvino.runtime import Shape - from openvino.runtime import Layout - from openvino.runtime import Type - from openvino.runtime import Tensor - from openvino.runtime import OVAny - - from openvino.runtime import compile_model - from openvino.runtime import get_batch - from openvino.runtime import set_batch - from openvino.runtime import serialize - from openvino.runtime import shutdown - from openvino.runtime import tensor_from_file - from openvino.runtime import save_model - from openvino.runtime import layout_helpers - - # Set version for openvino package - from openvino.runtime import get_version - __version__ = get_version() -except ImportError: - import warnings - warnings.warn("openvino package has problems with imports!", ImportWarning, stacklevel=2) +# # +# # API 2.0 +# # This __init__.py forces checking of runtime modules to propagate errors. +# # It is not compared with init files from openvino-dev package. +# # +# Import all public modules +from openvino import runtime as runtime +from openvino import frontend as frontend +from openvino import helpers as helpers +from openvino import preprocess as preprocess +from openvino import utils as utils +from openvino.runtime import properties as properties + +# Import most important classes and functions from openvino.runtime +from openvino.runtime import Model +from openvino.runtime import Core +from openvino.runtime import CompiledModel +from openvino.runtime import InferRequest +from openvino.runtime import AsyncInferQueue + +from openvino.runtime import Dimension +from openvino.runtime import Strides +from openvino.runtime import PartialShape +from openvino.runtime import Shape +from openvino.runtime import Layout +from openvino.runtime import Type +from openvino.runtime import Tensor +from openvino.runtime import OVAny + +from openvino.runtime import compile_model +from openvino.runtime import get_batch +from openvino.runtime import set_batch +from openvino.runtime import serialize +from openvino.runtime import shutdown +from openvino.runtime import tensor_from_file +from openvino.runtime import save_model +from openvino.runtime import layout_helpers + +# Set version for openvino package +from openvino.runtime import get_version +__version__ = get_version() # Tools try: From 1ca2f9c6de17e33d0f4c0db17faeee172a5163a5 Mon Sep 17 00:00:00 2001 From: Maciej Smyk Date: Wed, 11 Oct 2023 11:03:14 +0200 Subject: [PATCH 11/13] Update openvino_intro.md (#20383) --- docs/articles_en/openvino_workflow/openvino_intro.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/articles_en/openvino_workflow/openvino_intro.md b/docs/articles_en/openvino_workflow/openvino_intro.md index 40db0d15b52bd5..2937189c136a01 100644 --- a/docs/articles_en/openvino_workflow/openvino_intro.md +++ b/docs/articles_en/openvino_workflow/openvino_intro.md @@ -33,7 +33,7 @@ OpenVINO Runtime is a set of C++ libraries with C and Python bindings providing Note that TensorFlow models can be run using the :doc:`torch.compile feature `, as well as the standard ways of :doc:`converting TensorFlow ` - or reading them directly. + or reading them directly. OpenVINO Runtime uses a plugin architecture. Its plugins are software components that contain complete implementation for inference on a particular IntelĀ® hardware device: CPU, GPU, GNA, etc. Each plugin implements the unified API and provides additional hardware-specific APIs for configuring devices or API interoperability between OpenVINO Runtime and underlying plugin backend. From ac11751e9c751417a74e6af587134ebb79fac0c3 Mon Sep 17 00:00:00 2001 From: Pawel Raasz Date: Wed, 11 Oct 2023 11:50:29 +0200 Subject: [PATCH 12/13] [core]Migrate Eye to new API (#20258) * Migrate Eye to new API * Fix `matrix_offset` initialization * get_tensors_shapes -> get_tensors_partial_shapes --- src/core/include/openvino/op/eye.hpp | 4 +- .../openvino/op/util/evaluate_helpers.hpp | 23 +++ .../include/openvino/reference/eye.hpp | 32 ++-- src/core/src/op/eye.cpp | 152 ++++++++---------- src/core/src/op/util/evaluate_helpers.cpp | 17 ++ 5 files changed, 127 insertions(+), 101 deletions(-) create mode 100644 src/core/include/openvino/op/util/evaluate_helpers.hpp diff --git a/src/core/include/openvino/op/eye.hpp b/src/core/include/openvino/op/eye.hpp index feaebafca82264..1096e488aa13eb 100644 --- a/src/core/include/openvino/op/eye.hpp +++ b/src/core/include/openvino/op/eye.hpp @@ -55,9 +55,7 @@ class OPENVINO_API Eye : public Op { m_output_type = output_type; } - OPENVINO_SUPPRESS_DEPRECATED_START - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - OPENVINO_SUPPRESS_DEPRECATED_END + bool evaluate(TensorVector& outputs, const TensorVector& inputs) const override; bool has_evaluate() const override; protected: diff --git a/src/core/include/openvino/op/util/evaluate_helpers.hpp b/src/core/include/openvino/op/util/evaluate_helpers.hpp new file mode 100644 index 00000000000000..616528adf60d08 --- /dev/null +++ b/src/core/include/openvino/op/util/evaluate_helpers.hpp @@ -0,0 +1,23 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/core/partial_shape.hpp" +#include "openvino/runtime/tensor.hpp" + +namespace ov { +namespace op { +namespace util { + +/** + * @brief Get the tensors shapes as ov::PartialShape. + * + * @param tensors Input tensors vector to get its shapes. + * @return Vector of partial shapes sam size as input tensor vector. + */ +std::vector get_tensors_partial_shapes(const TensorVector& tensors); +} // namespace util +} // namespace op +} // namespace ov diff --git a/src/core/reference/include/openvino/reference/eye.hpp b/src/core/reference/include/openvino/reference/eye.hpp index 0991637031538f..2cb997c03f0817 100644 --- a/src/core/reference/include/openvino/reference/eye.hpp +++ b/src/core/reference/include/openvino/reference/eye.hpp @@ -7,31 +7,41 @@ #include #include "openvino/core/shape.hpp" -#include "utils/span.hpp" namespace ov { namespace reference { + +/** + * @brief Reference implementation of Eye operator + * + * @param data Pointer to output data. + * @param out_shape Output data size. + * @param diagonal_index Eye diagonal index to populate matrix with ones + */ template void eye(T* data, const Shape& out_shape, const int64_t diagonal_index) { - const int64_t num_matrices = shape_size(span(out_shape).subspan(0, out_shape.size() - 2)); - const int64_t num_rows = out_shape[out_shape.size() - 2]; - const int64_t num_columns = out_shape[out_shape.size() - 1]; + const auto spatial_dims_offset = out_shape.size() - 2; + const int64_t num_columns = out_shape.back(); + const int64_t num_rows = out_shape[spatial_dims_offset]; const int64_t matrix_size = num_rows * num_columns; + const int64_t out_size = shape_size(out_shape); // fill tensor by zero - std::fill(data, data + num_matrices * matrix_size, T(0)); + std::fill(data, std::next(data, out_size), T(0)); // set ones on diagonal - const int64_t shift_by_columns = std::max(diagonal_index, int64_t(0)); - const int64_t count_by_columns = std::max(num_columns - std::abs(diagonal_index), int64_t(0)); - const int64_t count_by_rows = std::max(num_rows - std::abs(diagonal_index), int64_t(0)); + constexpr int64_t zero{0}; + const auto abs_diag_idx = static_cast(std::abs(diagonal_index)); + const int64_t shift_by_columns = std::max(diagonal_index, zero); + const int64_t count_by_columns = std::max(num_columns - abs_diag_idx, zero); + const int64_t count_by_rows = std::max(num_rows - abs_diag_idx, zero); const int64_t count = diagonal_index > 0 ? std::min(count_by_columns, num_rows) : std::min(count_by_rows, num_columns); - for (auto i = 0; i < num_matrices; i++) { - for (auto j = 0; j < count; j++) { + for (auto matrix_offset = zero; matrix_offset < out_size; matrix_offset += matrix_size) { + for (auto j = 0; j < count; ++j) { const int64_t index = (j + shift_by_columns - diagonal_index) * num_columns + j + shift_by_columns; - data[index + i * matrix_size] = static_cast(1); + data[matrix_offset + index] = T{1}; } } } diff --git a/src/core/src/op/eye.cpp b/src/core/src/op/eye.cpp index 77e4082792e2f6..edf9abbb06f4c4 100644 --- a/src/core/src/op/eye.cpp +++ b/src/core/src/op/eye.cpp @@ -4,62 +4,49 @@ #include "openvino/op/eye.hpp" +#include "element_visitor.hpp" #include "eye_shape_inference.hpp" #include "itt.hpp" -#include "ngraph/validation_util.hpp" +#include "openvino/core/validation_util.hpp" +#include "openvino/op/util/evaluate_helpers.hpp" #include "openvino/reference/eye.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START namespace ov { namespace op { namespace eye { -namespace { -template -bool evaluate(const ngraph::HostTensorPtr& out, const int64_t diagonal_index) { - ov::reference::eye(out->get_data_ptr(), out->get_shape(), diagonal_index); - return true; -} -bool evaluate_eye(const ngraph::HostTensorPtr& out, const int64_t diagonal_index) { - bool rc = true; - switch (out->get_element_type()) { - OPENVINO_TYPE_CASE(evaluate, i8, out, diagonal_index); - OPENVINO_TYPE_CASE(evaluate, u8, out, diagonal_index); - OPENVINO_TYPE_CASE(evaluate, f16, out, diagonal_index); - OPENVINO_TYPE_CASE(evaluate, bf16, out, diagonal_index); - OPENVINO_TYPE_CASE(evaluate, i32, out, diagonal_index); - OPENVINO_TYPE_CASE(evaluate, f32, out, diagonal_index); - OPENVINO_TYPE_CASE(evaluate, f64, out, diagonal_index); - OPENVINO_TYPE_CASE(evaluate, i64, out, diagonal_index); - default: - rc = false; - break; +struct Evaluate : element::NoAction { + using element::NoAction::visit; + + template > + static result_type visit(Tensor& out, const Shape& out_shape, const int64_t diagonal_idx) { + reference::eye(out.data(), out_shape, diagonal_idx); + return true; } - return rc; -} -} // namespace +}; } // namespace eye -ov::op::v9::Eye::Eye(const Output& num_rows, - const Output& num_columns, - const Output& diagonal_index, - const Output& batch_shape, - const ov::element::Type& out_type) +namespace v9 { +Eye::Eye(const Output& num_rows, + const Output& num_columns, + const Output& diagonal_index, + const Output& batch_shape, + const ov::element::Type& out_type) : Op({num_rows, num_columns, diagonal_index, batch_shape}), m_output_type(out_type) { constructor_validate_and_infer_types(); } -ov::op::v9::Eye::Eye(const Output& num_rows, - const Output& num_columns, - const Output& diagonal_index, - const ov::element::Type& out_type) +Eye::Eye(const Output& num_rows, + const Output& num_columns, + const Output& diagonal_index, + const ov::element::Type& out_type) : Op({num_rows, num_columns, diagonal_index}), m_output_type(out_type) { constructor_validate_and_infer_types(); } -void ov::op::v9::Eye::validate_and_infer_types() { +void Eye::validate_and_infer_types() { OV_OP_SCOPE(v9_Eye_validate_and_infer_types); for (size_t i = 0; i < get_input_size(); ++i) { @@ -78,81 +65,72 @@ void ov::op::v9::Eye::validate_and_infer_types() { set_output_type(0, get_out_type(), output_shape); } -bool ov::op::v9::Eye::visit_attributes(ov::AttributeVisitor& visitor) { +bool Eye::visit_attributes(ov::AttributeVisitor& visitor) { OV_OP_SCOPE(v9_Eye_visit_attributes); visitor.on_attribute("output_type", m_output_type); return true; } -std::shared_ptr ov::op::v9::Eye::clone_with_new_inputs(const ov::OutputVector& new_args) const { +std::shared_ptr Eye::clone_with_new_inputs(const ov::OutputVector& new_args) const { OV_OP_SCOPE(v9_Eye_clone_with_new_inputs); check_new_args_count(this, new_args); - if (new_args.size() == 3) { - return std::make_shared(new_args[0], new_args[1], new_args[2], m_output_type); - } else if (new_args.size() == 4) { - return std::make_shared(new_args[0], new_args[1], new_args[2], new_args[3], m_output_type); - } else { + + switch (new_args.size()) { + case 3: + return std::make_shared(new_args[0], new_args[1], new_args[2], m_output_type); + case 4: + return std::make_shared(new_args[0], new_args[1], new_args[2], new_args[3], m_output_type); + default: OPENVINO_THROW("Eye has incorrect input number: ", new_args.size()); } } -bool ov::op::v9::Eye::has_evaluate() const { +bool Eye::has_evaluate() const { OV_OP_SCOPE(v9_Eye_has_evaluate); switch (m_output_type) { - case ov::element::i8: - case ov::element::u8: - case ov::element::f16: - case ov::element::bf16: - case ov::element::i32: - case ov::element::f32: - case ov::element::i64: + case element::bf16: + case element::f16: + case element::f32: + case element::f64: + case element::i8: + case element::i32: + case element::i64: + case element::u8: return true; default: - break; + return false; } - return false; } -bool ov::op::v9::Eye::evaluate(const ngraph::HostTensorVector& outputs, const ngraph::HostTensorVector& inputs) const { +bool Eye::evaluate(TensorVector& outputs, const TensorVector& inputs) const { OV_OP_SCOPE(v9_Eye_evaluate); - OPENVINO_SUPPRESS_DEPRECATED_START - OPENVINO_ASSERT(ngraph::validate_host_tensor_vector(inputs, get_input_size()), "Invalid Eye input TensorVector."); - OPENVINO_ASSERT(ngraph::validate_host_tensor_vector(outputs, 1), "Invalid Eye output TensorVector."); - OPENVINO_SUPPRESS_DEPRECATED_END - - int64_t diagonal_index; - - if (get_input_size() > 1) { - const auto& diagonal_index_data = inputs[2]; - - switch (diagonal_index_data->get_element_type()) { - case element::i32: - diagonal_index = diagonal_index_data->get_data_ptr()[0]; - break; - case element::i64: - diagonal_index = diagonal_index_data->get_data_ptr()[0]; - break; - default: - OPENVINO_THROW("Unsupported type of input `diagonal_index` in Eye operation: ", - diagonal_index_data->get_element_type().to_string()); - } - } else { - diagonal_index = 0; - } - - std::vector input_shapes; - input_shapes.reserve(inputs.size()); - - for (size_t i = 0; i < inputs.size(); ++i) { - input_shapes.push_back(inputs[i]->get_partial_shape()); - } + OPENVINO_ASSERT(outputs.size() == 1); + // Inputs size and shapes checked by shape_infer + const auto input_shapes = util::get_tensors_partial_shapes(inputs); const auto output_shape = shape_infer(this, input_shapes, make_tensor_accessor(inputs)).front().to_shape(); - outputs[0]->set_element_type(get_out_type()); - outputs[0]->set_shape(output_shape); + int64_t diagonal_index; + const auto& diagonal_tensor = inputs[2]; + switch (diagonal_tensor.get_element_type()) { + case element::i32: + diagonal_index = diagonal_tensor.data>()[0]; + break; + case element::i64: + diagonal_index = diagonal_tensor.data>()[0]; + break; + default: + OPENVINO_THROW("Unsupported type of input `diagonal_index` in Eye operation: ", + diagonal_tensor.get_element_type().to_string()); + } - return eye::evaluate_eye(outputs[0], diagonal_index); + outputs[0].set_shape(output_shape); + using namespace ov::element; + return IfTypeOf::apply(outputs[0].get_element_type(), + outputs[0], + output_shape, + diagonal_index); } +} // namespace v9 } // namespace op } // namespace ov diff --git a/src/core/src/op/util/evaluate_helpers.cpp b/src/core/src/op/util/evaluate_helpers.cpp index cffc57e6fbd87c..4e21da40bfe013 100644 --- a/src/core/src/op/util/evaluate_helpers.cpp +++ b/src/core/src/op/util/evaluate_helpers.cpp @@ -4,6 +4,8 @@ #include "ngraph/op/util/evaluate_helpers.hpp" +#include "openvino/op/util/evaluate_helpers.hpp" + namespace ngraph { AxisSet get_normalized_axes_from_tensor(const HostTensorPtr tensor, const ngraph::Rank& rank, @@ -15,3 +17,18 @@ AxisSet get_normalized_axes_from_tensor(const HostTensorPtr tensor, return AxisSet{normalized_axes}; } } // namespace ngraph + +namespace ov { +namespace op { +namespace util { +std::vector get_tensors_partial_shapes(const TensorVector& tensors) { + std::vector shapes; + shapes.reserve(tensors.size()); + for (const auto& t : tensors) { + shapes.emplace_back(t.get_shape()); + } + return shapes; +} +} // namespace util +} // namespace op +} // namespace ov From a3d6d0bca952d206bb1c89eaf8d9114e2bc1a28a Mon Sep 17 00:00:00 2001 From: Andrey Kashchikhin Date: Wed, 11 Oct 2023 11:05:12 +0100 Subject: [PATCH 13/13] [CI] [GHA] Skip `test_div_uint8_cpu` on macOS only; unskip `test_onnx/test_backend.py` in GHA workflows (#20367) * only skip test if mac * unskip * unskip trigger * skip for onnx fe as well * do not skip * return skips and unskip test_backend in Python API 1.0 * rm pr trigger --------- Co-authored-by: Ilya Lavrenov --- .github/workflows/linux.yml | 3 +-- .github/workflows/mac.yml | 5 ++--- .github/workflows/windows.yml | 2 +- .../python/tests_compatibility/test_onnx/test_backend.py | 9 +++++++++ src/frontends/onnx/tests/tests_python/test_backend.py | 9 +++++++++ 5 files changed, 22 insertions(+), 6 deletions(-) diff --git a/.github/workflows/linux.yml b/.github/workflows/linux.yml index a3c7e9a4e1c250..6bd6ef2342afbf 100644 --- a/.github/workflows/linux.yml +++ b/.github/workflows/linux.yml @@ -896,8 +896,7 @@ jobs: run: | python3 -m pytest -s ${INSTALL_TEST_DIR}/pyngraph \ --junitxml=${INSTALL_TEST_DIR}/TEST-Pyngraph.xml \ - --ignore=${INSTALL_TEST_DIR}/pyngraph/tests_compatibility/test_onnx/test_zoo_models.py \ - --ignore=${INSTALL_TEST_DIR}/pyngraph/tests_compatibility/test_onnx/test_backend.py + --ignore=${INSTALL_TEST_DIR}/pyngraph/tests_compatibility/test_onnx/test_zoo_models.py - name: Python API 2.0 Tests run: | diff --git a/.github/workflows/mac.yml b/.github/workflows/mac.yml index 5097a6bb006b87..5f9658fd303f52 100644 --- a/.github/workflows/mac.yml +++ b/.github/workflows/mac.yml @@ -485,12 +485,11 @@ jobs: python3 -m pip install $ov_dev_wheel_name[mxnet,caffe,kaldi,onnx,tensorflow2] popd - - name: nGraph and IE Python Bindings Tests + - name: Python API 1.0 Tests run: | python3 -m pytest -s ${{ env.INSTALL_TEST_DIR }}/pyngraph \ --junitxml=${{ env.INSTALL_TEST_DIR }}/TEST-Pyngraph.xml \ - --ignore=${{ env.INSTALL_TEST_DIR }}/pyngraph/tests/test_onnx/test_zoo_models.py \ - --ignore=${{ env.INSTALL_TEST_DIR }}/pyngraph/tests/test_onnx/test_backend.py + --ignore=${{ env.INSTALL_TEST_DIR }}/pyngraph/tests/test_onnx/test_zoo_models.py - name: Python API 2.0 Tests run: | diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml index 70c7ac216121dc..c23419a4463a47 100644 --- a/.github/workflows/windows.yml +++ b/.github/workflows/windows.yml @@ -310,7 +310,7 @@ jobs: shell: cmd run: | set PYTHONPATH=${{ env.OPENVINO_REPO }}\tools\mo;${{ env.LAYER_TESTS_INSTALL_DIR }};%PYTHONPATH% - call "${{ env.INSTALL_DIR }}\\setupvars.bat" && python3 -m pytest -s ${{ env.INSTALL_TEST_DIR }}/pyngraph ${{ env.PYTHON_STATIC_ARGS }} --junitxml=${{ env.INSTALL_TEST_DIR }}/TEST-Pyngraph.xml --ignore=${{ env.INSTALL_TEST_DIR }}/pyngraph/tests_compatibility/test_onnx/test_zoo_models.py --ignore=${{ env.INSTALL_TEST_DIR }}/pyngraph/tests_compatibility/test_onnx/test_backend.py + call "${{ env.INSTALL_DIR }}\\setupvars.bat" && python3 -m pytest -s ${{ env.INSTALL_TEST_DIR }}/pyngraph ${{ env.PYTHON_STATIC_ARGS }} --junitxml=${{ env.INSTALL_TEST_DIR }}/TEST-Pyngraph.xml --ignore=${{ env.INSTALL_TEST_DIR }}/pyngraph/tests_compatibility/test_onnx/test_zoo_models.py - name: Python API 2.0 Tests shell: cmd diff --git a/src/bindings/python/tests_compatibility/test_onnx/test_backend.py b/src/bindings/python/tests_compatibility/test_onnx/test_backend.py index 87f53223c2d672..396cddb80a598f 100644 --- a/src/bindings/python/tests_compatibility/test_onnx/test_backend.py +++ b/src/bindings/python/tests_compatibility/test_onnx/test_backend.py @@ -3,6 +3,8 @@ import logging +from sys import platform + import onnx.backend.test from tests_compatibility import ( BACKEND_NAME, @@ -32,6 +34,7 @@ xfail_issue_48052, xfail_issue_52463, xfail_issue_58033, + xfail_issue_58676, xfail_issue_63033, xfail_issue_63036, xfail_issue_63043, @@ -809,6 +812,12 @@ def expect_fail(test_case_path, xfail): # type: (str) -> None ), ] +if platform == 'darwin': + tests_expected_to_fail.append(( + xfail_issue_58676, + "OnnxBackendNodeModelTest.test_div_uint8_cpu" + )) + for test_group in tests_expected_to_fail: for test_case in test_group[1:]: expect_fail("{}".format(test_case), test_group[0]) diff --git a/src/frontends/onnx/tests/tests_python/test_backend.py b/src/frontends/onnx/tests/tests_python/test_backend.py index d1ef686bdd4124..d75cfcf77aeefd 100644 --- a/src/frontends/onnx/tests/tests_python/test_backend.py +++ b/src/frontends/onnx/tests/tests_python/test_backend.py @@ -4,6 +4,8 @@ import logging +from sys import platform + import onnx.backend.test from tests import ( BACKEND_NAME, @@ -32,6 +34,7 @@ xfail_issue_48052, xfail_issue_52463, xfail_issue_58033, + xfail_issue_58676, xfail_issue_63033, xfail_issue_63036, xfail_issue_63043, @@ -683,6 +686,12 @@ def expect_fail(test_case_path, xfail): # type: (str) -> None ), ] +if platform == 'darwin': + tests_expected_to_fail.append(( + xfail_issue_58676, + "OnnxBackendNodeModelTest.test_div_uint8_cpu" + )) + for test_group in tests_expected_to_fail: for test_case in test_group[1:]: expect_fail(f"{test_case}", test_group[0])