From f9678a285c9373e86ac459fd4d4c103e91036b9d Mon Sep 17 00:00:00 2001 From: Sergey Shlyapnikov Date: Thu, 28 Sep 2023 13:53:51 +0400 Subject: [PATCH 1/3] [GPU] Do not use usm_host memory buffers for PVC as a device inputs (#19767) --- .../src/plugin/sync_infer_request.cpp | 21 ++++++++++++++++++- 1 file changed, 20 insertions(+), 1 deletion(-) diff --git a/src/plugins/intel_gpu/src/plugin/sync_infer_request.cpp b/src/plugins/intel_gpu/src/plugin/sync_infer_request.cpp index 2e6699333070f9..d5ee5d423fcd13 100644 --- a/src/plugins/intel_gpu/src/plugin/sync_infer_request.cpp +++ b/src/plugins/intel_gpu/src/plugin/sync_infer_request.cpp @@ -28,6 +28,19 @@ namespace { +inline bool can_use_usm_host(const cldnn::engine& engine) { + auto can_use_usm = engine.use_unified_shared_memory(); + + // WA: Disable USM host memory for infer request`s tensors for PVC as + // it has performance issues in case of host <-> device data transfers inside kernels + // Use unsupported SIMD8 as unique attribute of PVC + auto supported_simd_sizes = engine.get_device_info().supported_simd_sizes; + if (std::find(supported_simd_sizes.begin(), supported_simd_sizes.end(), 8) == supported_simd_sizes.end()) + can_use_usm = false; + + return can_use_usm; +} + inline std::string get_port_name(const ov::Output& port, const bool is_legacy_api) { std::string name; // TODO: Should use tensor name as the port name, but many legacy tests still use legacy name @@ -480,6 +493,10 @@ std::shared_ptr SyncInferRequest::create_device_tensor(const ov::Sh tensor_type = TensorType::BT_BUF_INTERNAL; } + // Create OpenCL buffer for PVC if lockable memory is needed due to performance issue with usm host + if (!can_use_usm_host(m_graph->get_engine()) && need_lockable_memory) + tensor_type = TensorType::BT_BUF_INTERNAL; + // Currently, clDeviceMemAllocINTEL returns memory address allocated to other input blob if the current blob is empty // W/A for this issue: // Allocate with non-empty shape and then reinterprete with original shape @@ -512,7 +529,9 @@ TensorWrapper SyncInferRequest::create_or_share_device_tensor(const TensorWrappe auto input_ptr = user_tensor->data(); const auto alloc_type = m_graph->get_engine().detect_usm_allocation_type(input_ptr); const auto is_usm_host = alloc_type == cldnn::allocation_type::usm_host; - bool can_share = is_usm_host && !is_convert_required(user_tensor->get_element_type(), element_type); + bool can_share = is_usm_host && + !is_convert_required(user_tensor->get_element_type(), element_type) && + can_use_usm_host(m_graph->get_engine()); if (can_share) { // For USM case we create host blob using custom USM host allocator From 197e954846aa8d434f9bb86672d4b138d2b34bd1 Mon Sep 17 00:00:00 2001 From: Pawel Raasz Date: Thu, 28 Sep 2023 12:12:21 +0200 Subject: [PATCH 2/3] [core]Migrate reduce ops max min mean prod sum evaluate to new API (#19756) * Migrate ReduceL1, ReduceL2 to new API - add some new utils which are deprecated * Hide helper functions from public API * Migrate reductions ops to new API * Migrate get_constant_from_source to dev API * Rename ref max to reduce_max * Rename ref min to reduce_min * Rename ref mean to reduce_mean * Rename ref sum to reduce_sum * Rename ref product to reduce_prod - minor optimization in ReduceProd operator * Restore custom isfinite for ov float types * Fix type name in reduce_max.hpp * Add missing include in shape_util.hpp * Make count same type as data type in reduce mean * Correct reduce sum doxy comment --- src/core/dev_api/validation_util.hpp | 9 ++ src/core/include/openvino/op/reduce_max.hpp | 4 +- src/core/include/openvino/op/reduce_mean.hpp | 4 +- src/core/include/openvino/op/reduce_min.hpp | 4 +- src/core/include/openvino/op/reduce_prod.hpp | 4 +- src/core/include/openvino/op/reduce_sum.hpp | 4 +- .../reference/group_normalization.hpp | 8 +- .../openvino/reference/log_softmax.hpp | 8 +- .../include/openvino/reference/max.hpp | 46 ------ .../include/openvino/reference/mean.hpp | 58 -------- .../include/openvino/reference/min.hpp | 51 ------- .../include/openvino/reference/mvn.hpp | 12 +- .../openvino/reference/normalize_l2.hpp | 4 +- .../include/openvino/reference/product.hpp | 39 ----- .../include/openvino/reference/reduce_l1.hpp | 2 +- .../include/openvino/reference/reduce_max.hpp | 46 ++++++ .../openvino/reference/reduce_mean.hpp | 36 +++++ .../include/openvino/reference/reduce_min.hpp | 46 ++++++ .../openvino/reference/reduce_prod.hpp | 42 ++++++ .../include/openvino/reference/reduce_sum.hpp | 94 +++++++++++++ .../include/openvino/reference/softmax.hpp | 8 +- .../include/openvino/reference/sum.hpp | 84 ----------- src/core/reference/src/op/einsum.cpp | 6 +- src/core/shape_inference/include/utils.hpp | 9 +- src/core/src/op/reduce_l1.cpp | 4 +- src/core/src/op/reduce_l2.cpp | 2 +- src/core/src/op/reduce_max.cpp | 109 ++++++-------- src/core/src/op/reduce_mean.cpp | 104 ++++++-------- src/core/src/op/reduce_min.cpp | 108 ++++++-------- src/core/src/op/reduce_prod.cpp | 133 ++++++++---------- src/core/src/op/reduce_sum.cpp | 105 ++++++-------- src/core/src/op/util/axes_util.cpp | 2 +- src/core/src/op/util/reduction_base.cpp | 11 +- src/core/src/validation_util.cpp | 24 +++- 34 files changed, 567 insertions(+), 663 deletions(-) delete mode 100644 src/core/reference/include/openvino/reference/max.hpp delete mode 100644 src/core/reference/include/openvino/reference/mean.hpp delete mode 100644 src/core/reference/include/openvino/reference/min.hpp delete mode 100644 src/core/reference/include/openvino/reference/product.hpp create mode 100644 src/core/reference/include/openvino/reference/reduce_max.hpp create mode 100644 src/core/reference/include/openvino/reference/reduce_mean.hpp create mode 100644 src/core/reference/include/openvino/reference/reduce_min.hpp create mode 100644 src/core/reference/include/openvino/reference/reduce_prod.hpp create mode 100644 src/core/reference/include/openvino/reference/reduce_sum.hpp delete mode 100644 src/core/reference/include/openvino/reference/sum.hpp diff --git a/src/core/dev_api/validation_util.hpp b/src/core/dev_api/validation_util.hpp index 452445a055b8a5..fe607828c80148 100644 --- a/src/core/dev_api/validation_util.hpp +++ b/src/core/dev_api/validation_util.hpp @@ -38,5 +38,14 @@ OPENVINO_API int64_t clip(const int64_t& value, const int64_t& min, const int64_ /// /// \return Constant node or nullptr if unable to constantfold the subgraph OPENVINO_API std::shared_ptr constantfold_subgraph(const Output& subgraph_sink); + +/** + * @brief Runs an estimation of source tensor. If it succeeded to calculate both bounds and + * they are the same returns Constant operation from the resulting bound, otherwise nullptr. + * + * @param source Node output used to get its tensor data as constant. + * @return Shared pointer to constant data or nullptr. + */ +OPENVINO_API std::shared_ptr get_constant_from_source(const Output& source); } // namespace util } // namespace ov diff --git a/src/core/include/openvino/op/reduce_max.hpp b/src/core/include/openvino/op/reduce_max.hpp index 594450d69d9532..1e8e508b30c637 100644 --- a/src/core/include/openvino/op/reduce_max.hpp +++ b/src/core/include/openvino/op/reduce_max.hpp @@ -26,9 +26,7 @@ class OPENVINO_API ReduceMax : public util::ArithmeticReductionKeepDims { std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - OPENVINO_SUPPRESS_DEPRECATED_START - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - OPENVINO_SUPPRESS_DEPRECATED_END + bool evaluate(TensorVector& outputs, const TensorVector& inputs) const override; bool has_evaluate() const override; bool evaluate_lower(TensorVector& outputs) const override; bool evaluate_upper(TensorVector& outputs) const override; diff --git a/src/core/include/openvino/op/reduce_mean.hpp b/src/core/include/openvino/op/reduce_mean.hpp index 7b50dd57b7dafc..f8c4bb9bf91eb2 100644 --- a/src/core/include/openvino/op/reduce_mean.hpp +++ b/src/core/include/openvino/op/reduce_mean.hpp @@ -24,9 +24,7 @@ class OPENVINO_API ReduceMean : public util::ArithmeticReductionKeepDims { std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - OPENVINO_SUPPRESS_DEPRECATED_START - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - OPENVINO_SUPPRESS_DEPRECATED_END + bool evaluate(TensorVector& outputs, const TensorVector& inputs) const override; bool has_evaluate() const override; }; } // namespace v1 diff --git a/src/core/include/openvino/op/reduce_min.hpp b/src/core/include/openvino/op/reduce_min.hpp index 830021a0bb2ae0..0fe90c35caf7ec 100644 --- a/src/core/include/openvino/op/reduce_min.hpp +++ b/src/core/include/openvino/op/reduce_min.hpp @@ -26,9 +26,7 @@ class OPENVINO_API ReduceMin : public util::ArithmeticReductionKeepDims { std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - OPENVINO_SUPPRESS_DEPRECATED_START - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - OPENVINO_SUPPRESS_DEPRECATED_END + bool evaluate(TensorVector& outputs, const TensorVector& inputs) const override; bool has_evaluate() const override; bool evaluate_lower(TensorVector& outputs) const override; bool evaluate_upper(TensorVector& outputs) const override; diff --git a/src/core/include/openvino/op/reduce_prod.hpp b/src/core/include/openvino/op/reduce_prod.hpp index 4a9af6339b6797..d53ba7ca4049a2 100644 --- a/src/core/include/openvino/op/reduce_prod.hpp +++ b/src/core/include/openvino/op/reduce_prod.hpp @@ -27,9 +27,7 @@ class OPENVINO_API ReduceProd : public util::ArithmeticReductionKeepDims { std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - OPENVINO_SUPPRESS_DEPRECATED_START - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - OPENVINO_SUPPRESS_DEPRECATED_END + bool evaluate(TensorVector& outputs, const TensorVector& inputs) const override; bool has_evaluate() const override; bool evaluate_lower(TensorVector& outputs) const override; bool evaluate_upper(TensorVector& outputs) const override; diff --git a/src/core/include/openvino/op/reduce_sum.hpp b/src/core/include/openvino/op/reduce_sum.hpp index 7a3221c68e52ef..8a210c60986fdf 100644 --- a/src/core/include/openvino/op/reduce_sum.hpp +++ b/src/core/include/openvino/op/reduce_sum.hpp @@ -73,9 +73,7 @@ class OPENVINO_API ReduceSum : public util::ArithmeticReductionKeepDims { std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - OPENVINO_SUPPRESS_DEPRECATED_START - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - OPENVINO_SUPPRESS_DEPRECATED_END + bool evaluate(TensorVector& outputs, const TensorVector& inputs) const override; bool has_evaluate() const override; }; } // namespace v1 diff --git a/src/core/reference/include/openvino/reference/group_normalization.hpp b/src/core/reference/include/openvino/reference/group_normalization.hpp index 35a215b7123757..d6e5602a586742 100644 --- a/src/core/reference/include/openvino/reference/group_normalization.hpp +++ b/src/core/reference/include/openvino/reference/group_normalization.hpp @@ -8,8 +8,8 @@ #include #include "openvino/core/shape.hpp" -#include "openvino/reference/mean.hpp" -#include "openvino/reference/sum.hpp" +#include "openvino/reference/reduce_mean.hpp" +#include "openvino/reference/reduce_sum.hpp" namespace ov { namespace reference { @@ -38,11 +38,11 @@ void group_normalization(const T* const data, const auto group_begin = data + n * batch_size + g * group_size; const auto group_end = group_begin + group_size; std::vector mean_value(1); - mean(group_begin, mean_value.data(), Shape{group_size}, {0}); + reduce_mean(group_begin, mean_value.data(), Shape{group_size}, {0}); T mean = mean_value[0]; T variance = 0, err = 0; for_each(group_begin, group_end, [&](const T d) { - return details::kahan_summation(static_cast(pow(d - mean, 2)), err, variance); + variance = details::kahan_summation(static_cast(pow(d - mean, 2)), variance, err); }); variance /= group_size; const T standard_deviation = sqrt(variance + eps); diff --git a/src/core/reference/include/openvino/reference/log_softmax.hpp b/src/core/reference/include/openvino/reference/log_softmax.hpp index fb5d455b28a330..7335bd36989b18 100644 --- a/src/core/reference/include/openvino/reference/log_softmax.hpp +++ b/src/core/reference/include/openvino/reference/log_softmax.hpp @@ -7,8 +7,8 @@ #include #include "ngraph/shape_util.hpp" -#include "openvino/reference/max.hpp" -#include "openvino/reference/sum.hpp" +#include "openvino/reference/reduce_max.hpp" +#include "openvino/reference/reduce_sum.hpp" #include "openvino/reference/utils/coordinate_transform.hpp" namespace ov { @@ -21,7 +21,7 @@ void log_softmax(const T* arg, T* out, const Shape& shape, const AxisSet& axes) auto temp_max = std::vector(temp_elements, 0); auto temp_sum = std::vector(temp_elements, 0); - max(arg, temp_max.data(), shape, axes); + reduce_max(arg, temp_max.data(), shape, axes); CoordinateTransform transform(shape); CoordinateTransform temp_transform(temp_shape); @@ -31,7 +31,7 @@ void log_softmax(const T* arg, T* out, const Shape& shape, const AxisSet& axes) static_cast(std::exp(arg[transform.index(coord)] - temp_max[temp_transform.index(temp_coord)])); } - sum(out, temp_sum.data(), shape, axes); + reduce_sum(out, temp_sum.data(), shape, axes); for (const Coordinate& coord : transform) { Coordinate temp_coord = ngraph::reduce(coord, axes, true); diff --git a/src/core/reference/include/openvino/reference/max.hpp b/src/core/reference/include/openvino/reference/max.hpp deleted file mode 100644 index 0cbb810ecafbe5..00000000000000 --- a/src/core/reference/include/openvino/reference/max.hpp +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include -#include - -#include "ngraph/shape_util.hpp" -#include "openvino/reference/utils/coordinate_transform.hpp" - -namespace ov { -namespace reference { -template -void max(const T* arg, T* out, const Shape& in_shape, const AxisSet& reduction_axes) { - T minval = std::numeric_limits::lowest(); - - constexpr bool dont_keep_dims_in_output = false; - OPENVINO_SUPPRESS_DEPRECATED_START - const auto out_shape = ngraph::reduce(in_shape, reduction_axes, dont_keep_dims_in_output); - std::fill(out, out + shape_size(out_shape), minval); - - const auto in_strides = row_major_strides(in_shape); - const auto out_strides = row_major_strides(out_shape); - - CoordinateTransformBasic input_transform(in_shape); - for (const Coordinate& input_coord : input_transform) { - const Coordinate output_coord = ngraph::reduce(input_coord, reduction_axes, dont_keep_dims_in_output); - - const size_t in_idx = - std::inner_product(input_coord.begin(), input_coord.end(), in_strides.begin(), uint64_t(0)); - const size_t out_idx = - std::inner_product(output_coord.begin(), output_coord.end(), out_strides.begin(), uint64_t(0)); - - const T x = arg[in_idx]; - const T max = out[out_idx]; - if (x > max) { - out[out_idx] = x; - } - } - OPENVINO_SUPPRESS_DEPRECATED_END -} -} // namespace reference -} // namespace ov diff --git a/src/core/reference/include/openvino/reference/mean.hpp b/src/core/reference/include/openvino/reference/mean.hpp deleted file mode 100644 index 85fe10eddcf6bd..00000000000000 --- a/src/core/reference/include/openvino/reference/mean.hpp +++ /dev/null @@ -1,58 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include -#include -#include - -#include "ngraph/shape_util.hpp" -#include "ngraph/type/bfloat16.hpp" -#include "ngraph/type/float16.hpp" -#include "openvino/reference/sum.hpp" -#include "openvino/reference/utils/coordinate_transform.hpp" - -namespace ov { -namespace reference { -template -void mean(const T* arg, T* out, const Shape& in_shape, const AxisSet& reduction_axes) { - constexpr bool dont_keep_dims_in_output = false; - OPENVINO_SUPPRESS_DEPRECATED_START - const auto out_shape = ngraph::reduce(in_shape, reduction_axes, dont_keep_dims_in_output); - std::vector cs(shape_size(out_shape), 0); - std::fill(out, out + shape_size(out_shape), T(0)); - - const auto in_strides = row_major_strides(in_shape); - const auto out_strides = row_major_strides(out_shape); - - CoordinateTransformBasic input_transform(in_shape); - std::map index_to_count_map; - - for (const Coordinate& input_coord : input_transform) { - const Coordinate output_coord = ngraph::reduce(input_coord, reduction_axes, dont_keep_dims_in_output); - - const size_t in_idx = - std::inner_product(input_coord.begin(), input_coord.end(), in_strides.begin(), uint64_t(0)); - const size_t out_idx = - std::inner_product(output_coord.begin(), output_coord.end(), out_strides.begin(), uint64_t(0)); - - details::kahan_summation(arg[in_idx], cs[out_idx], out[out_idx]); - - if (index_to_count_map.find(out_idx) == index_to_count_map.end()) { - index_to_count_map[out_idx] = 1; - } else { - index_to_count_map[out_idx]++; - } - } - OPENVINO_SUPPRESS_DEPRECATED_END - - for (size_t i = 0; i < shape_size(out_shape); ++i) { - auto count = index_to_count_map[i]; - out[i] = out[i] / count; - } -} -} // namespace reference -} // namespace ov diff --git a/src/core/reference/include/openvino/reference/min.hpp b/src/core/reference/include/openvino/reference/min.hpp deleted file mode 100644 index 6d6c3e05df3ed1..00000000000000 --- a/src/core/reference/include/openvino/reference/min.hpp +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include -#include - -#include "ngraph/shape_util.hpp" -#include "openvino/reference/utils/coordinate_transform.hpp" - -#ifdef _WIN32 -# undef min -#endif - -namespace ov { -namespace reference { -template -void min(const T* arg, T* out, const Shape& in_shape, const AxisSet& reduction_axes) { - T minval = - std::numeric_limits::has_infinity ? std::numeric_limits::infinity() : std::numeric_limits::max(); - - constexpr bool dont_keep_dims_in_output = false; - OPENVINO_SUPPRESS_DEPRECATED_START - const auto out_shape = ngraph::reduce(in_shape, reduction_axes, dont_keep_dims_in_output); - std::fill(out, out + shape_size(out_shape), minval); - - const auto in_strides = row_major_strides(in_shape); - const auto out_strides = row_major_strides(out_shape); - - CoordinateTransformBasic input_transform(in_shape); - for (const Coordinate& input_coord : input_transform) { - const Coordinate output_coord = ngraph::reduce(input_coord, reduction_axes, dont_keep_dims_in_output); - - const size_t in_idx = - std::inner_product(input_coord.begin(), input_coord.end(), in_strides.begin(), uint64_t(0)); - const size_t out_idx = - std::inner_product(output_coord.begin(), output_coord.end(), out_strides.begin(), uint64_t(0)); - - const T x = arg[in_idx]; - const T min = out[out_idx]; - if (x < min) { - out[out_idx] = x; - } - } - OPENVINO_SUPPRESS_DEPRECATED_END -} -} // namespace reference -} // namespace ov diff --git a/src/core/reference/include/openvino/reference/mvn.hpp b/src/core/reference/include/openvino/reference/mvn.hpp index cd171fe73f24d5..8bcce83295e5eb 100644 --- a/src/core/reference/include/openvino/reference/mvn.hpp +++ b/src/core/reference/include/openvino/reference/mvn.hpp @@ -10,11 +10,11 @@ #include "openvino/reference/add.hpp" #include "openvino/reference/divide.hpp" -#include "openvino/reference/mean.hpp" #include "openvino/reference/multiply.hpp" +#include "openvino/reference/reduce_mean.hpp" +#include "openvino/reference/reduce_sum.hpp" #include "openvino/reference/sqrt.hpp" #include "openvino/reference/subtract.hpp" -#include "openvino/reference/sum.hpp" namespace ov { namespace reference { @@ -28,13 +28,13 @@ void mvn(const T* arg, const double eps) { auto reduced_shape = ngraph::reduce(in_shape, reduction_axes, true); std::vector tmp_buffer(shape_size(in_shape)); - mean(arg, tmp_buffer.data(), in_shape, reduction_axes); + reduce_mean(arg, tmp_buffer.data(), in_shape, reduction_axes); subtract(arg, tmp_buffer.data(), out, in_shape, reduced_shape, op::AutoBroadcastType::NUMPY); if (normalize_variance) { multiply(out, out, tmp_buffer.data(), shape_size(in_shape)); std::vector mean_value(shape_size(reduced_shape)); - mean(tmp_buffer.data(), mean_value.data(), in_shape, reduction_axes); + reduce_mean(tmp_buffer.data(), mean_value.data(), in_shape, reduction_axes); add(mean_value.data(), std::vector(shape_size(reduced_shape), static_cast(eps)).data(), @@ -58,13 +58,13 @@ void mvn_6(const T* arg, op::MVNEpsMode eps_mode) { auto reduced_shape = ngraph::reduce(in_shape, reduction_axes, true); std::vector tmp_buffer(shape_size(in_shape)); - mean(arg, tmp_buffer.data(), in_shape, reduction_axes); + reduce_mean(arg, tmp_buffer.data(), in_shape, reduction_axes); subtract(arg, tmp_buffer.data(), out, in_shape, reduced_shape, op::AutoBroadcastType::NUMPY); if (normalize_variance) { multiply(out, out, tmp_buffer.data(), shape_size(in_shape)); std::vector mean_value(shape_size(reduced_shape)); - mean(tmp_buffer.data(), mean_value.data(), in_shape, reduction_axes); + reduce_mean(tmp_buffer.data(), mean_value.data(), in_shape, reduction_axes); if (eps_mode == op::MVNEpsMode::INSIDE_SQRT) { add(mean_value.data(), diff --git a/src/core/reference/include/openvino/reference/normalize_l2.hpp b/src/core/reference/include/openvino/reference/normalize_l2.hpp index ce737f652afc7c..69c0cff34fdc34 100644 --- a/src/core/reference/include/openvino/reference/normalize_l2.hpp +++ b/src/core/reference/include/openvino/reference/normalize_l2.hpp @@ -7,7 +7,7 @@ #include #include "openvino/reference/autobroadcast_binop.hpp" -#include "openvino/reference/sum.hpp" +#include "openvino/reference/reduce_sum.hpp" namespace ov { namespace reference { @@ -38,7 +38,7 @@ void normalize_l2(const T* data, } std::vector sum_data(shape_size(reduce_shape)); - sum(sqr_data.data(), sum_data.data(), data_shape, reduction_axes); + reduce_sum(sqr_data.data(), sum_data.data(), data_shape, reduction_axes); autobroadcast_binop(data, sum_data.data(), out, diff --git a/src/core/reference/include/openvino/reference/product.hpp b/src/core/reference/include/openvino/reference/product.hpp deleted file mode 100644 index 41ce4cf3b1d841..00000000000000 --- a/src/core/reference/include/openvino/reference/product.hpp +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include - -#include "ngraph/shape_util.hpp" -#include "openvino/reference/utils/coordinate_transform.hpp" - -namespace ov { -namespace reference { -template -void product(const T* arg, T* out, const Shape& in_shape, const AxisSet& reduction_axes) { - constexpr bool dont_keep_dims_in_output = false; - OPENVINO_SUPPRESS_DEPRECATED_START - const auto out_shape = ngraph::reduce(in_shape, reduction_axes, dont_keep_dims_in_output); - std::fill(out, out + shape_size(out_shape), T(1)); - - const auto in_strides = row_major_strides(in_shape); - const auto out_strides = row_major_strides(out_shape); - - CoordinateTransformBasic input_transform(in_shape); - for (const Coordinate& input_coord : input_transform) { - const Coordinate output_coord = ngraph::reduce(input_coord, reduction_axes, dont_keep_dims_in_output); - - const size_t in_idx = - std::inner_product(input_coord.begin(), input_coord.end(), in_strides.begin(), uint64_t(0)); - const size_t out_idx = - std::inner_product(output_coord.begin(), output_coord.end(), out_strides.begin(), uint64_t(0)); - - out[out_idx] = out[out_idx] * arg[in_idx]; - } - OPENVINO_SUPPRESS_DEPRECATED_END -} -} // namespace reference -} // namespace ov diff --git a/src/core/reference/include/openvino/reference/reduce_l1.hpp b/src/core/reference/include/openvino/reference/reduce_l1.hpp index 50228962f334cf..c729f97490890b 100644 --- a/src/core/reference/include/openvino/reference/reduce_l1.hpp +++ b/src/core/reference/include/openvino/reference/reduce_l1.hpp @@ -9,7 +9,7 @@ #include "openvino/core/shape_util.hpp" #include "openvino/reference/abs.hpp" -#include "openvino/reference/sum.hpp" +#include "openvino/reference/reduce_sum.hpp" #include "openvino/reference/utils/type_util.hpp" namespace ov { diff --git a/src/core/reference/include/openvino/reference/reduce_max.hpp b/src/core/reference/include/openvino/reference/reduce_max.hpp new file mode 100644 index 00000000000000..3d16d343de79e0 --- /dev/null +++ b/src/core/reference/include/openvino/reference/reduce_max.hpp @@ -0,0 +1,46 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include +#include +#include + +#include "openvino/core/shape_util.hpp" +#include "openvino/reference/utils/coordinate_index.hpp" +#include "openvino/reference/utils/coordinate_transform.hpp" + +namespace ov { +namespace reference { + +/** + * @brief Reference implementation of ReduceMax operator. + * + * @param in Input pointer to data. + * @param out Output pointer to results. + * @param in_shape Input shape. + * @param reduction_axes Axes on which reduction is applied. + */ +template +void reduce_max(const T* in, T* out, const Shape& in_shape, const AxisSet& reduction_axes) { + constexpr auto min_value = std::numeric_limits::lowest(); + + const auto out_shape = util::reduce(in_shape, reduction_axes); + std::fill(out, std::next(out, shape_size(out_shape)), min_value); + + const auto in_strides = row_major_strides(in_shape); + const auto out_strides = row_major_strides(out_shape); + + CoordinateTransformBasic input_transform(in_shape); + for (const auto& in_coord : input_transform) { + const auto out_coord = util::reduce(in_coord, reduction_axes); + const auto in_idx = coordinate_offset(in_coord, in_strides); + const auto out_idx = coordinate_offset(out_coord, out_strides); + + out[out_idx] = std::max(out[out_idx], in[in_idx]); + } +} +} // namespace reference +} // namespace ov diff --git a/src/core/reference/include/openvino/reference/reduce_mean.hpp b/src/core/reference/include/openvino/reference/reduce_mean.hpp new file mode 100644 index 00000000000000..e7dfd1b9766404 --- /dev/null +++ b/src/core/reference/include/openvino/reference/reduce_mean.hpp @@ -0,0 +1,36 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include +#include + +#include "openvino/core/shape_util.hpp" +#include "openvino/reference/reduce_sum.hpp" + +namespace ov { +namespace reference { + +/** + * @brief Reference implementation of ReduceMean operator. + * + * @param in Input pointer to data. + * @param out Output pointer to results. + * @param in_shape Input shape. + * @param reduction_axes Axes on which reduction is applied. + */ +template +void reduce_mean(const T* in, T* out, const Shape& in_shape, const AxisSet& reduction_axes) { + reduce_sum(in, out, in_shape, reduction_axes); + + const auto out_shape = util::reduce(in_shape, reduction_axes); + const auto out_size = shape_size(out_shape); + const auto count = static_cast(shape_size(in_shape) / out_size); + std::transform(out, std::next(out, out_size), out, [count](const T value) { + return value / count; + }); +} +} // namespace reference +} // namespace ov diff --git a/src/core/reference/include/openvino/reference/reduce_min.hpp b/src/core/reference/include/openvino/reference/reduce_min.hpp new file mode 100644 index 00000000000000..1acc4c29a55f7c --- /dev/null +++ b/src/core/reference/include/openvino/reference/reduce_min.hpp @@ -0,0 +1,46 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include +#include +#include + +#include "openvino/core/shape_util.hpp" +#include "openvino/reference/utils/coordinate_index.hpp" +#include "openvino/reference/utils/coordinate_transform.hpp" + +namespace ov { +namespace reference { +/** + * @brief Reference implementation of ReduceMin operator. + * + * @param in Input pointer to data. + * @param out Output pointer to results. + * @param in_shape Input shape. + * @param reduction_axes Axes on which reduction is applied. + */ +template +void reduce_min(const T* in, T* out, const Shape& in_shape, const AxisSet& reduction_axes) { + constexpr auto max_value = + std::numeric_limits::has_infinity ? std::numeric_limits::infinity() : std::numeric_limits::max(); + + const auto out_shape = util::reduce(in_shape, reduction_axes); + std::fill(out, out + shape_size(out_shape), max_value); + + const auto in_strides = row_major_strides(in_shape); + const auto out_strides = row_major_strides(out_shape); + + CoordinateTransformBasic input_transform(in_shape); + for (const auto& in_coord : input_transform) { + const auto out_coord = util::reduce(in_coord, reduction_axes); + const auto in_idx = coordinate_offset(in_coord, in_strides); + const auto out_idx = coordinate_offset(out_coord, out_strides); + + out[out_idx] = std::min(out[out_idx], in[in_idx]); + } +} +} // namespace reference +} // namespace ov diff --git a/src/core/reference/include/openvino/reference/reduce_prod.hpp b/src/core/reference/include/openvino/reference/reduce_prod.hpp new file mode 100644 index 00000000000000..32741036e8cf22 --- /dev/null +++ b/src/core/reference/include/openvino/reference/reduce_prod.hpp @@ -0,0 +1,42 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include +#include + +#include "openvino/core/shape_util.hpp" +#include "openvino/reference/utils/coordinate_index.hpp" +#include "openvino/reference/utils/coordinate_transform.hpp" + +namespace ov { +namespace reference { +/** + * @brief Reference implementation of ReduceProduct operator. + * + * @param in Input pointer to data. + * @param out Output pointer to results. + * @param in_shape Input shape. + * @param reduction_axes Axes on which reduction is applied. + */ +template +void reduce_prod(const T* arg, T* out, const Shape& in_shape, const AxisSet& reduction_axes) { + const auto out_shape = util::reduce(in_shape, reduction_axes); + std::fill(out, out + shape_size(out_shape), T(1)); + + const auto in_strides = row_major_strides(in_shape); + const auto out_strides = row_major_strides(out_shape); + + CoordinateTransformBasic input_transform(in_shape); + for (const auto& in_coord : input_transform) { + const auto out_coord = util::reduce(in_coord, reduction_axes); + const auto in_idx = coordinate_offset(in_coord, in_strides); + const auto out_idx = coordinate_offset(out_coord, out_strides); + + out[out_idx] *= arg[in_idx]; + } +} +} // namespace reference +} // namespace ov diff --git a/src/core/reference/include/openvino/reference/reduce_sum.hpp b/src/core/reference/include/openvino/reference/reduce_sum.hpp new file mode 100644 index 00000000000000..c7d9ecf98bf2af --- /dev/null +++ b/src/core/reference/include/openvino/reference/reduce_sum.hpp @@ -0,0 +1,94 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include +#include + +#include "openvino/core/shape_util.hpp" +#include "openvino/core/type/bfloat16.hpp" +#include "openvino/core/type/float16.hpp" +#include "openvino/reference/utils/coordinate_index.hpp" +#include "openvino/reference/utils/coordinate_transform.hpp" +#include "openvino/reference/utils/type_util.hpp" + +namespace ov { +namespace reference { +namespace details { + +template ::value, bool>::type = true> +bool isfinite(T x) { + return std::isfinite(x); +} + +template < + typename T, + typename std::enable_if::value || std::is_same::value, bool>::type = true> +bool isfinite(T x) { + return std::isfinite(static_cast(x)); +} + +/** + * @brief Performs one element summation based on Kahan algorithm to significantly reduce (integral types). + * + * @param in Value to add with previous value of summation. + * @param prev_sum Previous value of summation (accumulator). + * @return Compensate sum. + */ +template ::value>::type* = nullptr> +constexpr T kahan_summation(const T in, const T prev_sum, T&) { + return in + prev_sum; +} + +/** + * @brief Performs one element summation based on Kahan algorithm to significantly reduce (floating point types). + * + * @param in Value to add with previous value of summation. + * @param prev_sum Previous value of summation (accumulator). + * @param compensation Accumulates the summation error. + * @return Compensate sum. + */ +template ()>::type* = nullptr> +T kahan_summation(const T in, const T prev_sum, T& compensation) { + if (isfinite(in) && isfinite(prev_sum)) { + T temp = prev_sum + (in - compensation); + compensation = (temp - prev_sum) - (in - compensation); + return temp; + } else { + return in + prev_sum; + } +} +} // namespace details + +/** + * @brief Reference implementation of ReduceSum operator. + * + * @param in Input pointer to data. + * @param out Output pointer to results. + * @param in_shape Input shape. + * @param reduction_axes Axes on which reduction is applied. + */ +template +void reduce_sum(const T* in, T* out, const Shape& in_shape, const AxisSet& reduction_axes) { + const auto out_shape = util::reduce(in_shape, reduction_axes); + + const auto out_size = shape_size(out_shape); + std::vector cs(out_size, T{0}); + std::fill(out, std::next(out, out_size), T{0}); + + const auto in_strides = row_major_strides(in_shape); + const auto out_strides = row_major_strides(out_shape); + + CoordinateTransformBasic input_transform(in_shape); + for (const auto& in_coord : input_transform) { + const auto out_coord = util::reduce(in_coord, reduction_axes); + const auto in_idx = coordinate_offset(in_coord, in_strides); + const auto out_idx = coordinate_offset(out_coord, out_strides); + + out[out_idx] = details::kahan_summation(in[in_idx], out[out_idx], cs[out_idx]); + } +} +} // namespace reference +} // namespace ov diff --git a/src/core/reference/include/openvino/reference/softmax.hpp b/src/core/reference/include/openvino/reference/softmax.hpp index d4cbf5bbaff63f..69ea583fbc6a2a 100644 --- a/src/core/reference/include/openvino/reference/softmax.hpp +++ b/src/core/reference/include/openvino/reference/softmax.hpp @@ -7,8 +7,8 @@ #include #include "ngraph/shape_util.hpp" -#include "openvino/reference/max.hpp" -#include "openvino/reference/sum.hpp" +#include "openvino/reference/reduce_max.hpp" +#include "openvino/reference/reduce_sum.hpp" #include "openvino/reference/utils/coordinate_transform.hpp" namespace ov { @@ -20,7 +20,7 @@ void softmax(const T* arg, T* out, const Shape& shape, const AxisSet& axes) { auto temp_elements = shape_size(temp_shape); auto temp_ptr = new T[temp_elements]; - max(arg, temp_ptr, shape, axes); + reduce_max(arg, temp_ptr, shape, axes); CoordinateTransform transform(shape); CoordinateTransform temp_transform(temp_shape); @@ -30,7 +30,7 @@ void softmax(const T* arg, T* out, const Shape& shape, const AxisSet& axes) { std::exp(arg[transform.index(coord)] - temp_ptr[temp_transform.index(temp_coord)]); } - sum(out, temp_ptr, shape, axes); + reduce_sum(out, temp_ptr, shape, axes); for (const Coordinate& coord : transform) { Coordinate temp_coord = ngraph::reduce(coord, axes, true); diff --git a/src/core/reference/include/openvino/reference/sum.hpp b/src/core/reference/include/openvino/reference/sum.hpp deleted file mode 100644 index 3f811b3d566f43..00000000000000 --- a/src/core/reference/include/openvino/reference/sum.hpp +++ /dev/null @@ -1,84 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include - -#include "ngraph/shape_util.hpp" -#include "ngraph/type/bfloat16.hpp" -#include "ngraph/type/float16.hpp" -#include "openvino/reference/utils/coordinate_transform.hpp" - -namespace ov { -namespace reference { -namespace details { -// Windows doesn't seem to like it if we directly use std::isfinite on integer -// types, so we will roll our own thing here. -template ::value, bool>::type = true> -bool is_finite(T x) { - return std::isfinite(x); -} - -template < - typename T, - typename std::enable_if::value || std::is_same::value, bool>::type = true> -bool is_finite(T x) { - return std::isfinite(static_cast(x)); -} - -template ::value, bool>::type = true> -bool is_finite(T /* x */) { - return true; -} - -/// -/// \brief Performs one element summation based on Kahan algorithm to -/// significantly reduce -/// the numerical error. -/// -/// \param[in] elem Element to add into the accumulator. -/// \param compensation Variable that accumulates the error. -/// \param sum Result of compensated summation. -/// -template -void kahan_summation(const T& elem, T& compensation, T& sum) { - if (is_finite(elem) && is_finite(sum)) { - T temp = sum + (elem - compensation); - compensation = (temp - sum) - (elem - compensation); - sum = temp; - } else { - sum = sum + elem; - } -} -} // namespace details - -template -void sum(const T* arg, T* out, const Shape& in_shape, const AxisSet& reduction_axes) { - constexpr bool dont_keep_dims_in_output = false; - NGRAPH_SUPPRESS_DEPRECATED_START - const auto out_shape = ngraph::reduce(in_shape, reduction_axes, dont_keep_dims_in_output); - - std::vector cs(shape_size(out_shape), 0); - std::fill(out, out + shape_size(out_shape), T(0)); - - const auto in_strides = row_major_strides(in_shape); - const auto out_strides = row_major_strides(out_shape); - - CoordinateTransformBasic input_transform(in_shape); - for (const Coordinate& input_coord : input_transform) { - const Coordinate output_coord = ngraph::reduce(input_coord, reduction_axes, dont_keep_dims_in_output); - - const size_t in_idx = - std::inner_product(input_coord.begin(), input_coord.end(), in_strides.begin(), uint64_t(0)); - const size_t out_idx = - std::inner_product(output_coord.begin(), output_coord.end(), out_strides.begin(), uint64_t(0)); - - details::kahan_summation(arg[in_idx], cs[out_idx], out[out_idx]); - } - NGRAPH_SUPPRESS_DEPRECATED_END -} -} // namespace reference -} // namespace ov diff --git a/src/core/reference/src/op/einsum.cpp b/src/core/reference/src/op/einsum.cpp index 125e33e3aa6b49..a48dc998495286 100644 --- a/src/core/reference/src/op/einsum.cpp +++ b/src/core/reference/src/op/einsum.cpp @@ -10,8 +10,8 @@ #include "openvino/reference/broadcast.hpp" #include "openvino/reference/matmul.hpp" #include "openvino/reference/multiply.hpp" +#include "openvino/reference/reduce_sum.hpp" #include "openvino/reference/reshape.hpp" -#include "openvino/reference/sum.hpp" #include "openvino/reference/transpose.hpp" #include "openvino/reference/utils/span.hpp" @@ -352,7 +352,7 @@ void reduce_input(ov::TensorVector& inputs, auto output_ptr = ov::Tensor(input_ptr.get_element_type(), output_shape); - reference::sum(input_ptr.data(), output_ptr.data(), input_shape, reduced_axes); + reference::reduce_sum(input_ptr.data(), output_ptr.data(), input_shape, reduced_axes); // update a vector of inputs and input subscripts inputs[input_ind] = output_ptr; @@ -595,7 +595,7 @@ void extract_diagonal(ov::TensorVector& inputs, std::vector& input_ ov::op::AutoBroadcastType::NUMPY); auto result = ov::Tensor(input_ptr.get_element_type(), result_shape); - reference::sum(mul_output.data(), result.data(), mul_output.get_shape(), reduced_axes); + reference::reduce_sum(mul_output.data(), result.data(), mul_output.get_shape(), reduced_axes); inputs[input_ind] = result; input_subscripts[input_ind] = resultant_subscript; } diff --git a/src/core/shape_inference/include/utils.hpp b/src/core/shape_inference/include/utils.hpp index 7a1193f3a25a10..360c4eaa5f7b58 100644 --- a/src/core/shape_inference/include/utils.hpp +++ b/src/core/shape_inference/include/utils.hpp @@ -14,6 +14,7 @@ #include "ov_optional.hpp" #include "shape_infer_type_utils.hpp" #include "tensor_data_accessor.hpp" +#include "validation_util.hpp" namespace ov { @@ -277,10 +278,8 @@ std::unique_ptr get_input_const_data_as(const ov::Node* op, UnaryOperation&& func = ov::util::Cast()) { if (auto t = tensor_accessor(idx)) { return std::unique_ptr(new TRes(get_tensor_data_as(t, std::forward(func)))); - OPENVINO_SUPPRESS_DEPRECATED_START } else if (const auto& constant = - (idx < op->get_input_size()) ? ov::get_constant_from_source(op->input_value(idx)) : nullptr) { - OPENVINO_SUPPRESS_DEPRECATED_END + (idx < op->get_input_size()) ? ov::util::get_constant_from_source(op->input_value(idx)) : nullptr) { const auto& et = constant->get_element_type(); const auto& shape = constant->get_shape(); return std::unique_ptr(new TRes(get_raw_data_as(et, @@ -358,9 +357,7 @@ ov::optional get_input_const_data_as_shape(const ov::Node* op, inline element::Type get_input_const_element_type(const ov::Node* const op, size_t port, const ITensorAccessor& ta) { if (auto t = ta(port)) { return t.get_element_type(); - OPENVINO_SUPPRESS_DEPRECATED_START - } else if (const auto& constant = ov::get_constant_from_source(op->input_value(port))) { - OPENVINO_SUPPRESS_DEPRECATED_END + } else if (const auto& constant = ov::util::get_constant_from_source(op->input_value(port))) { return constant->get_element_type(); } else { return element::undefined; diff --git a/src/core/src/op/reduce_l1.cpp b/src/core/src/op/reduce_l1.cpp index 5670ae4609890b..75f8a000580bc3 100644 --- a/src/core/src/op/reduce_l1.cpp +++ b/src/core/src/op/reduce_l1.cpp @@ -21,7 +21,7 @@ struct Evaluate : element::NoAction { template static result_type visit(const Tensor& in0, Tensor& out, const AxisSet& reduction_axes) { using T = fundamental_type_for; - reference::reduce_l1(in0.data(), out.data(), in0.get_shape(), reduction_axes); + reference::reduce_l1(in0.data(), out.data(), in0.get_shape(), reduction_axes); return true; } }; @@ -48,7 +48,7 @@ bool ReduceL1::evaluate(TensorVector& outputs, const TensorVector& inputs) const outputs[0].set_shape(ov::util::reduce(inputs[0].get_shape(), reduction_axes, get_keep_dims())); using namespace ov::element; - return IfTypeOf::apply(inputs[0].get_element_type(), + return IfTypeOf::apply(inputs[0].get_element_type(), inputs[0], outputs[0], reduction_axes); diff --git a/src/core/src/op/reduce_l2.cpp b/src/core/src/op/reduce_l2.cpp index 1e7669ce625f48..5477a56986be16 100644 --- a/src/core/src/op/reduce_l2.cpp +++ b/src/core/src/op/reduce_l2.cpp @@ -20,7 +20,7 @@ struct Evaluate : element::NoAction { template static result_type visit(const Tensor& in0, Tensor& out, const AxisSet& reduction_axes) { using T = fundamental_type_for; - reference::reduce_l2(in0.data(), out.data(), in0.get_shape(), reduction_axes); + reference::reduce_l2(in0.data(), out.data(), in0.get_shape(), reduction_axes); return true; } }; diff --git a/src/core/src/op/reduce_max.cpp b/src/core/src/op/reduce_max.cpp index 35f2216bac8bfc..989f0a771f2b1c 100644 --- a/src/core/src/op/reduce_max.cpp +++ b/src/core/src/op/reduce_max.cpp @@ -2,98 +2,81 @@ // SPDX-License-Identifier: Apache-2.0 // -#include +#include "openvino/op/reduce_max.hpp" #include "bound_evaluate.hpp" +#include "element_visitor.hpp" #include "itt.hpp" -#include "ngraph/graph_util.hpp" -#include "ngraph/op/max.hpp" -#include "ngraph/op/util/evaluate_helpers.hpp" -#include "ngraph/runtime/host_tensor.hpp" -#include "ngraph/shape_util.hpp" -#include "openvino/reference/max.hpp" +#include "openvino/core/shape_util.hpp" +#include "openvino/op/util/axes_util.hpp" +#include "openvino/reference/reduce_max.hpp" -using namespace std; -using namespace ngraph; +namespace ov { +namespace op { +namespace reduce_max { +struct Evaluate : element::NoAction { + using element::NoAction::visit; -OPENVINO_SUPPRESS_DEPRECATED_START -namespace maxop { -namespace { -template -bool evaluate(const HostTensorPtr& arg, const HostTensorPtr& out, const AxisSet& axes, bool keep_dims) { - OPENVINO_SUPPRESS_DEPRECATED_START - out->set_shape(reduce(arg->get_shape(), axes, keep_dims)); - OPENVINO_SUPPRESS_DEPRECATED_END - ov::reference::max(arg->get_data_ptr(), out->get_data_ptr(), arg->get_shape(), axes); - return true; -} - -bool evaluate_max(const HostTensorPtr& arg, const HostTensorPtr& out, const AxisSet& axes, bool keep_dims) { - bool rc = true; - switch (arg->get_element_type()) { - NGRAPH_TYPE_CASE(evaluate_max, i32, arg, out, axes, keep_dims); - NGRAPH_TYPE_CASE(evaluate_max, i64, arg, out, axes, keep_dims); - NGRAPH_TYPE_CASE(evaluate_max, u32, arg, out, axes, keep_dims); - NGRAPH_TYPE_CASE(evaluate_max, u64, arg, out, axes, keep_dims); - NGRAPH_TYPE_CASE(evaluate_max, f16, arg, out, axes, keep_dims); - NGRAPH_TYPE_CASE(evaluate_max, f32, arg, out, axes, keep_dims); - NGRAPH_TYPE_CASE(evaluate_max, u8, arg, out, axes, keep_dims); - NGRAPH_TYPE_CASE(evaluate_max, i8, arg, out, axes, keep_dims); - default: - rc = false; - break; + template + static result_type visit(const Tensor& in0, Tensor& out, const AxisSet& reduction_axes) { + using T = fundamental_type_for; + reference::reduce_max(in0.data(), out.data(), in0.get_shape(), reduction_axes); + return true; } - return rc; -} -} // namespace -} // namespace maxop +}; +} // namespace reduce_max +namespace v1 { -op::v1::ReduceMax::ReduceMax(const Output& arg, const Output& reduction_axes, bool keep_dims) +ReduceMax::ReduceMax(const Output& arg, const Output& reduction_axes, bool keep_dims) : ArithmeticReductionKeepDims(arg, reduction_axes, keep_dims) { constructor_validate_and_infer_types(); } -shared_ptr op::v1::ReduceMax::clone_with_new_inputs(const OutputVector& new_args) const { +std::shared_ptr ReduceMax::clone_with_new_inputs(const OutputVector& new_args) const { OV_OP_SCOPE(v1_ReduceMax_clone_with_new_inputs); check_new_args_count(this, new_args); - return make_shared(new_args.at(0), new_args.at(1), get_keep_dims()); + return std::make_shared(new_args.at(0), new_args.at(1), get_keep_dims()); } -bool op::v1::ReduceMax::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { +bool ReduceMax::evaluate(TensorVector& outputs, const TensorVector& inputs) const { OV_OP_SCOPE(v1_ReduceMax_evaluate); - OPENVINO_SUPPRESS_DEPRECATED_START - OPENVINO_ASSERT(validate_host_tensor_vector(inputs, 2)); - OPENVINO_ASSERT(validate_host_tensor_vector(outputs, 1)); + OPENVINO_ASSERT(outputs.size() == 1); + OPENVINO_ASSERT(inputs.size() == 2); - const auto reduction_axes = - get_normalized_axes_from_tensor(inputs[1], inputs[0]->get_partial_shape().rank(), get_friendly_name()); - OPENVINO_SUPPRESS_DEPRECATED_END + const auto reduction_axes = get_normalized_axes_from_tensor(this, inputs[1], inputs[0].get_shape().size()); + outputs[0].set_shape(ov::util::reduce(inputs[0].get_shape(), reduction_axes, get_keep_dims())); - return maxop::evaluate_max(inputs[0], outputs[0], reduction_axes, get_keep_dims()); + using namespace ov::element; + return IfTypeOf::apply(inputs[0].get_element_type(), + inputs[0], + outputs[0], + reduction_axes); } -bool op::v1::ReduceMax::has_evaluate() const { +bool ReduceMax::has_evaluate() const { OV_OP_SCOPE(v1_ReduceMax_has_evaluate); switch (get_input_element_type(0)) { - case ngraph::element::i32: - case ngraph::element::i64: - case ngraph::element::u32: - case ngraph::element::u64: - case ngraph::element::f16: - case ngraph::element::f32: - case ngraph::element::i8: - case ngraph::element::u8: + case element::i32: + case element::i64: + case element::u32: + case element::u64: + case element::f16: + case element::f32: + case element::i8: + case element::u8: return true; default: - break; + return false; } - return false; } -bool op::v1::ReduceMax::evaluate_lower(ov::TensorVector& output_values) const { +bool ReduceMax::evaluate_lower(ov::TensorVector& output_values) const { return input_value(1).get_tensor().has_and_set_bound() && default_lower_bound_evaluator(this, output_values); } -bool op::v1::ReduceMax::evaluate_upper(ov::TensorVector& output_values) const { +bool ReduceMax::evaluate_upper(ov::TensorVector& output_values) const { return input_value(1).get_tensor().has_and_set_bound() && default_upper_bound_evaluator(this, output_values); } +} // namespace v1 +} // namespace op +} // namespace ov diff --git a/src/core/src/op/reduce_mean.cpp b/src/core/src/op/reduce_mean.cpp index bc425fa0c2095c..762bc1c09719ee 100644 --- a/src/core/src/op/reduce_mean.cpp +++ b/src/core/src/op/reduce_mean.cpp @@ -2,87 +2,69 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph/op/reduce_mean.hpp" - -#include +#include "openvino/op/reduce_mean.hpp" +#include "element_visitor.hpp" #include "itt.hpp" -#include "ngraph/graph_util.hpp" -#include "ngraph/op/broadcast.hpp" -#include "ngraph/op/util/evaluate_helpers.hpp" -#include "ngraph/runtime/host_tensor.hpp" -#include "ngraph/shape_util.hpp" -#include "openvino/reference/mean.hpp" +#include "openvino/core/shape_util.hpp" +#include "openvino/op/util/axes_util.hpp" +#include "openvino/reference/reduce_mean.hpp" -using namespace std; -using namespace ngraph; +namespace ov { +namespace op { +namespace reduce_mean { +struct Evaluate : element::NoAction { + using element::NoAction::visit; -op::v1::ReduceMean::ReduceMean(const Output& arg, const Output& reduction_axes, bool keep_dims) + template + static result_type visit(const Tensor& in0, Tensor& out, const AxisSet& reduction_axes) { + using T = fundamental_type_for; + reference::reduce_mean(in0.data(), out.data(), in0.get_shape(), reduction_axes); + return true; + } +}; +} // namespace reduce_mean +namespace v1 { +ReduceMean::ReduceMean(const Output& arg, const Output& reduction_axes, bool keep_dims) : ArithmeticReductionKeepDims(arg, reduction_axes, keep_dims) { constructor_validate_and_infer_types(); } -shared_ptr op::v1::ReduceMean::clone_with_new_inputs(const OutputVector& new_args) const { +std::shared_ptr ReduceMean::clone_with_new_inputs(const OutputVector& new_args) const { OV_OP_SCOPE(v1_ReduceMean_clone_with_new_inputs); check_new_args_count(this, new_args); - return make_shared(new_args.at(0), new_args.at(1), get_keep_dims()); -} - -OPENVINO_SUPPRESS_DEPRECATED_START -namespace mean { -namespace { -template -bool evaluate(const HostTensorPtr& arg, const HostTensorPtr& out, const AxisSet& axes, bool keep_dims) { - OPENVINO_SUPPRESS_DEPRECATED_START - out->set_shape(reduce(arg->get_shape(), axes, keep_dims)); - OPENVINO_SUPPRESS_DEPRECATED_END - ov::reference::mean(arg->get_data_ptr(), out->get_data_ptr(), arg->get_shape(), axes); - return true; -} - -bool evaluate_mean(const HostTensorPtr& arg, const HostTensorPtr& out, const AxisSet& axes, bool keep_dims) { - bool rc = true; - switch (arg->get_element_type()) { - NGRAPH_TYPE_CASE(evaluate_mean, i32, arg, out, axes, keep_dims); - NGRAPH_TYPE_CASE(evaluate_mean, i64, arg, out, axes, keep_dims); - NGRAPH_TYPE_CASE(evaluate_mean, u32, arg, out, axes, keep_dims); - NGRAPH_TYPE_CASE(evaluate_mean, u64, arg, out, axes, keep_dims); - NGRAPH_TYPE_CASE(evaluate_mean, f16, arg, out, axes, keep_dims); - NGRAPH_TYPE_CASE(evaluate_mean, f32, arg, out, axes, keep_dims); - default: - rc = false; - break; - } - return rc; + return std::make_shared(new_args.at(0), new_args.at(1), get_keep_dims()); } -} // namespace -} // namespace mean -bool op::v1::ReduceMean::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { +bool ReduceMean::evaluate(TensorVector& outputs, const TensorVector& inputs) const { OV_OP_SCOPE(v1_ReduceMean_evaluate); - OPENVINO_SUPPRESS_DEPRECATED_START - OPENVINO_ASSERT(validate_host_tensor_vector(inputs, 2)); - OPENVINO_ASSERT(validate_host_tensor_vector(outputs, 1)); + OPENVINO_ASSERT(outputs.size() == 1); + OPENVINO_ASSERT(inputs.size() == 2); - const auto reduction_axes = - get_normalized_axes_from_tensor(inputs[1], inputs[0]->get_partial_shape().rank(), get_friendly_name()); - OPENVINO_SUPPRESS_DEPRECATED_END + const auto reduction_axes = get_normalized_axes_from_tensor(this, inputs[1], inputs[0].get_shape().size()); + outputs[0].set_shape(ov::util::reduce(inputs[0].get_shape(), reduction_axes, get_keep_dims())); - return mean::evaluate_mean(inputs[0], outputs[0], reduction_axes, get_keep_dims()); + using namespace ov::element; + return IfTypeOf::apply(inputs[0].get_element_type(), + inputs[0], + outputs[0], + reduction_axes); } -bool op::v1::ReduceMean::has_evaluate() const { +bool ReduceMean::has_evaluate() const { OV_OP_SCOPE(v1_ReduceMean_has_evaluate); switch (get_input_element_type(0)) { - case ngraph::element::i32: - case ngraph::element::i64: - case ngraph::element::u32: - case ngraph::element::u64: - case ngraph::element::f16: - case ngraph::element::f32: + case element::f16: + case element::f32: + case element::i32: + case element::i64: + case element::u32: + case element::u64: return true; default: - break; + return false; } - return false; } +} // namespace v1 +} // namespace op +} // namespace ov diff --git a/src/core/src/op/reduce_min.cpp b/src/core/src/op/reduce_min.cpp index 7f6e927748bb56..3334b02d5fa3ea 100644 --- a/src/core/src/op/reduce_min.cpp +++ b/src/core/src/op/reduce_min.cpp @@ -2,97 +2,79 @@ // SPDX-License-Identifier: Apache-2.0 // -#include +#include "openvino/op/reduce_min.hpp" #include "bound_evaluate.hpp" +#include "element_visitor.hpp" #include "itt.hpp" -#include "ngraph/op/min.hpp" -#include "ngraph/op/util/evaluate_helpers.hpp" -#include "ngraph/runtime/host_tensor.hpp" -#include "ngraph/shape_util.hpp" -#include "openvino/reference/min.hpp" +#include "openvino/op/util/axes_util.hpp" +#include "openvino/reference/reduce_min.hpp" -using namespace std; -using namespace ngraph; +namespace ov { +namespace op { +namespace reduce_min { +struct Evaluate : element::NoAction { + using element::NoAction::visit; -OPENVINO_SUPPRESS_DEPRECATED_START -namespace minop { -namespace { -template -bool evaluate(const HostTensorPtr& arg, const HostTensorPtr& out, const AxisSet& axes, const bool keep_dims) { - OPENVINO_SUPPRESS_DEPRECATED_START - out->set_shape(reduce(arg->get_shape(), axes, keep_dims)); - OPENVINO_SUPPRESS_DEPRECATED_END - ov::reference::min(arg->get_data_ptr(), out->get_data_ptr(), arg->get_shape(), axes); - return true; -} - -bool evaluate_min(const HostTensorPtr& arg, const HostTensorPtr& out, const AxisSet& axes, const bool keep_dims) { - bool rc = true; - switch (arg->get_element_type()) { - NGRAPH_TYPE_CASE(evaluate_min, i32, arg, out, axes, keep_dims); - NGRAPH_TYPE_CASE(evaluate_min, i64, arg, out, axes, keep_dims); - NGRAPH_TYPE_CASE(evaluate_min, u32, arg, out, axes, keep_dims); - NGRAPH_TYPE_CASE(evaluate_min, u64, arg, out, axes, keep_dims); - NGRAPH_TYPE_CASE(evaluate_min, f16, arg, out, axes, keep_dims); - NGRAPH_TYPE_CASE(evaluate_min, f32, arg, out, axes, keep_dims); - NGRAPH_TYPE_CASE(evaluate_min, i8, arg, out, axes, keep_dims); - NGRAPH_TYPE_CASE(evaluate_min, u8, arg, out, axes, keep_dims); - default: - rc = false; - break; + template + static result_type visit(const Tensor& in0, Tensor& out, const AxisSet& reduction_axes) { + using T = fundamental_type_for; + reference::reduce_min(in0.data(), out.data(), in0.get_shape(), reduction_axes); + return true; } - return rc; -} -} // namespace -} // namespace minop - -op::v1::ReduceMin::ReduceMin(const Output& arg, const Output& reduction_axes, bool keep_dims) +}; +} // namespace reduce_min +namespace v1 { +ReduceMin::ReduceMin(const Output& arg, const Output& reduction_axes, bool keep_dims) : ArithmeticReductionKeepDims(arg, reduction_axes, keep_dims) { constructor_validate_and_infer_types(); } -shared_ptr op::v1::ReduceMin::clone_with_new_inputs(const OutputVector& new_args) const { +std::shared_ptr ReduceMin::clone_with_new_inputs(const OutputVector& new_args) const { OV_OP_SCOPE(v1_ReduceMin_clone_with_new_inputs); check_new_args_count(this, new_args); - return make_shared(new_args.at(0), new_args.at(1), get_keep_dims()); + return std::make_shared(new_args.at(0), new_args.at(1), get_keep_dims()); } -bool op::v1::ReduceMin::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { +bool ReduceMin::evaluate(TensorVector& outputs, const TensorVector& inputs) const { OV_OP_SCOPE(v1_ReduceMin_evaluate); - OPENVINO_SUPPRESS_DEPRECATED_START - OPENVINO_ASSERT(validate_host_tensor_vector(inputs, 2)); - OPENVINO_ASSERT(validate_host_tensor_vector(outputs, 1)); + OPENVINO_ASSERT(outputs.size() == 1); + OPENVINO_ASSERT(inputs.size() == 2); - const auto reduction_axes = - get_normalized_axes_from_tensor(inputs[1], inputs[0]->get_partial_shape().rank(), get_friendly_name()); - OPENVINO_SUPPRESS_DEPRECATED_END + const auto reduction_axes = get_normalized_axes_from_tensor(this, inputs[1], inputs[0].get_shape().size()); + outputs[0].set_shape(ov::util::reduce(inputs[0].get_shape(), reduction_axes, get_keep_dims())); - return minop::evaluate_min(inputs[0], outputs[0], reduction_axes, get_keep_dims()); + using namespace ov::element; + return IfTypeOf::apply(inputs[0].get_element_type(), + inputs[0], + outputs[0], + reduction_axes); } -bool op::v1::ReduceMin::has_evaluate() const { +bool ReduceMin::has_evaluate() const { OV_OP_SCOPE(v1_ReduceMin_has_evaluate); switch (get_input_element_type(0)) { - case ngraph::element::i8: - case ngraph::element::u8: - case ngraph::element::i32: - case ngraph::element::i64: - case ngraph::element::u32: - case ngraph::element::u64: - case ngraph::element::f16: - case ngraph::element::f32: + case element::i8: + case element::u8: + case element::i32: + case element::i64: + case element::u32: + case element::u64: + case element::f16: + case element::f32: return true; default: - break; + return false; } - return false; } -bool op::v1::ReduceMin::evaluate_lower(ov::TensorVector& output_values) const { +bool ReduceMin::evaluate_lower(ov::TensorVector& output_values) const { return input_value(1).get_tensor().has_and_set_bound() && default_lower_bound_evaluator(this, output_values); } -bool op::v1::ReduceMin::evaluate_upper(ov::TensorVector& output_values) const { +bool ReduceMin::evaluate_upper(ov::TensorVector& output_values) const { return input_value(1).get_tensor().has_and_set_bound() && default_upper_bound_evaluator(this, output_values); } +} // namespace v1 +} // namespace op +} // namespace ov diff --git a/src/core/src/op/reduce_prod.cpp b/src/core/src/op/reduce_prod.cpp index dd915427b2415d..9d345ae63cf301 100644 --- a/src/core/src/op/reduce_prod.cpp +++ b/src/core/src/op/reduce_prod.cpp @@ -2,105 +2,90 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph/op/reduce_prod.hpp" +#include "openvino/op/reduce_prod.hpp" #include "bound_evaluate.hpp" +#include "element_visitor.hpp" #include "itt.hpp" -#include "ngraph/graph_util.hpp" -#include "ngraph/op/util/evaluate_helpers.hpp" -#include "ngraph/runtime/host_tensor.hpp" -#include "ngraph/shape_util.hpp" -#include "openvino/reference/product.hpp" +#include "openvino/core/shape_util.hpp" +#include "openvino/op/util/axes_util.hpp" +#include "openvino/reference/reduce_prod.hpp" -using namespace std; -using namespace ngraph; +namespace ov { +namespace op { +namespace reduce_prod { +namespace { +bool has_positive_bounds_on_data(const Node* const op) { + const auto& lb = op->get_input_tensor(0).get_lower_value(); + const auto& ub = op->get_input_tensor(0).get_upper_value(); + + return lb && ub && tensor_is_positive(lb) && tensor_is_positive(ub); +} +} // namespace + +struct Evaluate : element::NoAction { + using element::NoAction::visit; + + template + static result_type visit(const Tensor& in0, Tensor& out, const AxisSet& reduction_axes) { + using T = fundamental_type_for; + reference::reduce_prod(in0.data(), out.data(), in0.get_shape(), reduction_axes); + return true; + } +}; +} // namespace reduce_prod +namespace v1 { -op::v1::ReduceProd::ReduceProd(const Output& arg, const Output& reduction_axes, bool keep_dims) +ReduceProd::ReduceProd(const Output& arg, const Output& reduction_axes, bool keep_dims) : ArithmeticReductionKeepDims(arg, reduction_axes, keep_dims) { constructor_validate_and_infer_types(); } -shared_ptr op::v1::ReduceProd::clone_with_new_inputs(const OutputVector& new_args) const { +std::shared_ptr ReduceProd::clone_with_new_inputs(const OutputVector& new_args) const { OV_OP_SCOPE(v1_ReduceProd_clone_with_new_inputs); check_new_args_count(this, new_args); - return make_shared(new_args.at(0), new_args.at(1), get_keep_dims()); -} - -OPENVINO_SUPPRESS_DEPRECATED_START -namespace reduce_prod { -namespace { -template -bool evaluate(const HostTensorPtr& arg, const HostTensorPtr& out, const AxisSet& axes, bool keep_dims) { - OPENVINO_SUPPRESS_DEPRECATED_START - out->set_shape(reduce(arg->get_shape(), axes, keep_dims)); - OPENVINO_SUPPRESS_DEPRECATED_END - ov::reference::product(arg->get_data_ptr(), out->get_data_ptr(), arg->get_shape(), axes); - return true; + return std::make_shared(new_args.at(0), new_args.at(1), get_keep_dims()); } -bool evaluate_product(const HostTensorPtr& arg, const HostTensorPtr& out, const AxisSet& axes, bool keep_dims) { - bool rc = true; - switch (arg->get_element_type()) { - NGRAPH_TYPE_CASE(evaluate_product, i32, arg, out, axes, keep_dims); - NGRAPH_TYPE_CASE(evaluate_product, i64, arg, out, axes, keep_dims); - NGRAPH_TYPE_CASE(evaluate_product, u32, arg, out, axes, keep_dims); - NGRAPH_TYPE_CASE(evaluate_product, u64, arg, out, axes, keep_dims); - NGRAPH_TYPE_CASE(evaluate_product, f16, arg, out, axes, keep_dims); - NGRAPH_TYPE_CASE(evaluate_product, f32, arg, out, axes, keep_dims); - default: - rc = false; - break; - } - return rc; -} // namespace -} // namespace -} // namespace reduce_prod - -bool op::v1::ReduceProd::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { +bool ReduceProd::evaluate(TensorVector& outputs, const TensorVector& inputs) const { OV_OP_SCOPE(v1_ReduceProd_evaluate); - OPENVINO_SUPPRESS_DEPRECATED_START - OPENVINO_ASSERT(validate_host_tensor_vector(inputs, 2)); - OPENVINO_ASSERT(validate_host_tensor_vector(outputs, 1)); + OPENVINO_ASSERT(outputs.size() == 1); + OPENVINO_ASSERT(inputs.size() == 2); - const auto reduction_axes = - get_normalized_axes_from_tensor(inputs[1], inputs[0]->get_partial_shape().rank(), get_friendly_name()); - OPENVINO_SUPPRESS_DEPRECATED_END + const auto reduction_axes = get_normalized_axes_from_tensor(this, inputs[1], inputs[0].get_shape().size()); + outputs[0].set_shape(ov::util::reduce(inputs[0].get_shape(), reduction_axes, get_keep_dims())); - return reduce_prod::evaluate_product(inputs[0], outputs[0], reduction_axes, get_keep_dims()); + using namespace ov::element; + return IfTypeOf::apply(inputs[0].get_element_type(), + inputs[0], + outputs[0], + reduction_axes); } -bool op::v1::ReduceProd::has_evaluate() const { +bool ReduceProd::has_evaluate() const { OV_OP_SCOPE(v1_ReduceProd_has_evaluate); switch (get_input_element_type(0)) { - case ngraph::element::i32: - case ngraph::element::i64: - case ngraph::element::u32: - case ngraph::element::u64: - case ngraph::element::f16: - case ngraph::element::f32: + case element::i32: + case element::i64: + case element::u32: + case element::u64: + case element::f16: + case element::f32: return true; default: - break; + return false; } - return false; } -bool op::v1::ReduceProd::evaluate_lower(ov::TensorVector& output_values) const { - if (!input_value(1).get_tensor().has_and_set_bound()) - return false; - - const auto &lb = input_value(0).get_tensor().get_lower_value(), ub = input_value(0).get_tensor().get_upper_value(); - if (!lb || !ub || !tensor_is_positive(lb) || !tensor_is_positive(ub)) - return false; - return default_lower_bound_evaluator(this, output_values); +bool ReduceProd::evaluate_lower(ov::TensorVector& output_values) const { + return reduce_prod::has_positive_bounds_on_data(this) && get_input_tensor(1).has_and_set_bound() && + default_lower_bound_evaluator(this, output_values); } -bool op::v1::ReduceProd::evaluate_upper(ov::TensorVector& output_values) const { - if (!input_value(1).get_tensor().has_and_set_bound()) - return false; - - const auto &lb = input_value(0).get_tensor().get_lower_value(), ub = input_value(0).get_tensor().get_upper_value(); - if (!lb || !ub || !tensor_is_positive(lb) || !tensor_is_positive(ub)) - return false; - return default_upper_bound_evaluator(this, output_values); +bool ReduceProd::evaluate_upper(ov::TensorVector& output_values) const { + return reduce_prod::has_positive_bounds_on_data(this) && get_input_tensor(1).has_and_set_bound() && + default_upper_bound_evaluator(this, output_values); } +} // namespace v1 +} // namespace op +} // namespace ov diff --git a/src/core/src/op/reduce_sum.cpp b/src/core/src/op/reduce_sum.cpp index 54797693251ae1..33e7ced8204faf 100644 --- a/src/core/src/op/reduce_sum.cpp +++ b/src/core/src/op/reduce_sum.cpp @@ -2,88 +2,69 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph/op/reduce_sum.hpp" - -#include +#include "openvino/op/reduce_sum.hpp" +#include "element_visitor.hpp" #include "itt.hpp" -#include "ngraph/graph_util.hpp" -#include "ngraph/op/broadcast.hpp" -#include "ngraph/op/util/evaluate_helpers.hpp" -#include "ngraph/op/util/op_types.hpp" -#include "ngraph/runtime/host_tensor.hpp" -#include "ngraph/shape_util.hpp" -#include "openvino/reference/sum.hpp" +#include "openvino/core/shape_util.hpp" +#include "openvino/op/util/axes_util.hpp" +#include "openvino/reference/reduce_sum.hpp" -using namespace std; -using namespace ngraph; +namespace ov { +namespace op { +namespace reduce_sum { +struct Evaluate : element::NoAction { + using element::NoAction::visit; -op::v1::ReduceSum::ReduceSum(const Output& arg, const Output& reduction_axes, bool keep_dims) + template + static result_type visit(const Tensor& in0, Tensor& out, const AxisSet& reduction_axes) { + using T = fundamental_type_for; + reference::reduce_sum(in0.data(), out.data(), in0.get_shape(), reduction_axes); + return true; + } +}; +} // namespace reduce_sum +namespace v1 { +ReduceSum::ReduceSum(const Output& arg, const Output& reduction_axes, bool keep_dims) : ArithmeticReductionKeepDims(arg, reduction_axes, keep_dims) { constructor_validate_and_infer_types(); } -shared_ptr op::v1::ReduceSum::clone_with_new_inputs(const OutputVector& new_args) const { +std::shared_ptr ReduceSum::clone_with_new_inputs(const OutputVector& new_args) const { OV_OP_SCOPE(v1_ReduceSum_clone_with_new_inputs); check_new_args_count(this, new_args); - return make_shared(new_args.at(0), new_args.at(1), get_keep_dims()); -} - -OPENVINO_SUPPRESS_DEPRECATED_START -namespace reduce_sum { -namespace { -template -bool evaluate(const HostTensorPtr& arg, const HostTensorPtr& out, const AxisSet& axes, bool keep_dims) { - OPENVINO_SUPPRESS_DEPRECATED_START - out->set_shape(reduce(arg->get_shape(), axes, keep_dims)); - OPENVINO_SUPPRESS_DEPRECATED_END - ov::reference::sum(arg->get_data_ptr(), out->get_data_ptr(), arg->get_shape(), axes); - return true; + return std::make_shared(new_args.at(0), new_args.at(1), get_keep_dims()); } -bool evaluate_sum(const HostTensorPtr& arg, const HostTensorPtr& out, const AxisSet& axes, bool keep_dims) { - bool rc = true; - switch (arg->get_element_type()) { - NGRAPH_TYPE_CASE(evaluate_reduce_sum, i32, arg, out, axes, keep_dims); - NGRAPH_TYPE_CASE(evaluate_reduce_sum, i64, arg, out, axes, keep_dims); - NGRAPH_TYPE_CASE(evaluate_reduce_sum, u32, arg, out, axes, keep_dims); - NGRAPH_TYPE_CASE(evaluate_reduce_sum, u64, arg, out, axes, keep_dims); - NGRAPH_TYPE_CASE(evaluate_reduce_sum, f16, arg, out, axes, keep_dims); - NGRAPH_TYPE_CASE(evaluate_reduce_sum, f32, arg, out, axes, keep_dims); - default: - rc = false; - break; - } - return rc; -} // namespace -} // namespace -} // namespace reduce_sum - -bool op::v1::ReduceSum::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { +bool ReduceSum::evaluate(TensorVector& outputs, const TensorVector& inputs) const { OV_OP_SCOPE(v1_ReduceSum_evaluate); - OPENVINO_SUPPRESS_DEPRECATED_START - OPENVINO_ASSERT(validate_host_tensor_vector(inputs, 2)); - OPENVINO_ASSERT(validate_host_tensor_vector(outputs, 1)); + OPENVINO_ASSERT(outputs.size() == 1); + OPENVINO_ASSERT(inputs.size() == 2); - const auto reduction_axes = - get_normalized_axes_from_tensor(inputs[1], inputs[0]->get_partial_shape().rank(), get_friendly_name()); - OPENVINO_SUPPRESS_DEPRECATED_END + const auto reduction_axes = get_normalized_axes_from_tensor(this, inputs[1], inputs[0].get_shape().size()); + outputs[0].set_shape(ov::util::reduce(inputs[0].get_shape(), reduction_axes, get_keep_dims())); - return reduce_sum::evaluate_sum(inputs[0], outputs[0], reduction_axes, get_keep_dims()); + using namespace ov::element; + return IfTypeOf::apply(inputs[0].get_element_type(), + inputs[0], + outputs[0], + reduction_axes); } -bool op::v1::ReduceSum::has_evaluate() const { +bool ReduceSum::has_evaluate() const { OV_OP_SCOPE(v1_ReduceSum_has_evaluate); switch (get_input_element_type(0)) { - case ngraph::element::i32: - case ngraph::element::i64: - case ngraph::element::u32: - case ngraph::element::u64: - case ngraph::element::f16: - case ngraph::element::f32: + case element::i32: + case element::i64: + case element::u32: + case element::u64: + case element::f16: + case element::f32: return true; default: - break; + return false; } - return false; } +} // namespace v1 +} // namespace op +} // namespace ov diff --git a/src/core/src/op/util/axes_util.cpp b/src/core/src/op/util/axes_util.cpp index d5921edf6f580f..1736025527f71e 100644 --- a/src/core/src/op/util/axes_util.cpp +++ b/src/core/src/op/util/axes_util.cpp @@ -11,7 +11,7 @@ namespace ov { namespace op { namespace util { AxisSet get_normalized_axes_from_tensor(const Node* const node, const Tensor& tensor, const Rank& rank) { - const auto axes = ov::get_tensor_data_as(tensor, ov::util::Cast()); + const auto axes = ov::get_tensor_data_as(tensor); OPENVINO_SUPPRESS_DEPRECATED_START return {normalize_axes(node->get_friendly_name(), axes, rank)}; diff --git a/src/core/src/op/util/reduction_base.cpp b/src/core/src/op/util/reduction_base.cpp index dbe7cb4b5228b9..30d56715c248e3 100644 --- a/src/core/src/op/util/reduction_base.cpp +++ b/src/core/src/op/util/reduction_base.cpp @@ -6,6 +6,7 @@ #include "openvino/op/constant.hpp" #include "reduce_shape_inference.hpp" +#include "validation_util.hpp" ov::op::util::ReductionBase::ReductionBase() = default; @@ -24,18 +25,16 @@ bool ov::op::util::ReductionBase::reduction_axes_constant() const { } const ov::AxisSet ov::op::util::ReductionBase::get_reduction_axes() const { - AxisSet axes; - OPENVINO_SUPPRESS_DEPRECATED_START - if (const auto& const_op = get_constant_from_source(input_value(1))) { - OPENVINO_SUPPRESS_DEPRECATED_END + if (const auto& const_op = ov::util::get_constant_from_source(input_value(1))) { const auto const_data = const_op->cast_vector(); const auto input_data_rank = get_input_partial_shape(0).rank(); OPENVINO_SUPPRESS_DEPRECATED_START const auto normalized_axes = ov::normalize_axes(get_friendly_name(), const_data, input_data_rank); OPENVINO_SUPPRESS_DEPRECATED_END - axes = AxisSet{normalized_axes}; + return {normalized_axes}; + } else { + return {}; } - return axes; } void ov::op::util::ReductionBase::set_reduction_axes(const AxisSet& reduction_axes) { diff --git a/src/core/src/validation_util.cpp b/src/core/src/validation_util.cpp index d44e6b0bf5b718..b1f03d198f1152 100644 --- a/src/core/src/validation_util.cpp +++ b/src/core/src/validation_util.cpp @@ -1295,13 +1295,7 @@ std::shared_ptr ngraph::get_constant_lowest_of_type(el } shared_ptr ov::get_constant_from_source(const Output& source) { - if (!has_and_set_equal_bounds(source)) - return nullptr; - if (const auto& c = ov::as_type_ptr(source.get_node_shared_ptr())) - return c; - - const auto t = source.get_tensor().get_upper_value(); - return std::make_shared(t.get_element_type(), t.get_shape(), t.data()); + return ov::util::get_constant_from_source(source); } bool ngraph::validate_host_tensor_vector(const HostTensorVector& tensor_vector, const size_t& size) { @@ -1370,3 +1364,19 @@ std::shared_ptr ov::util::constantfold_subgraph(const Outp return nullptr; return ov::as_type_ptr(outputs[subgraph_sink.get_index()].get_node_shared_ptr()); } + +namespace ov { +namespace util { +using ov::op::v0::Constant; + +std::shared_ptr get_constant_from_source(const Output& source) { + if (const auto& c = ov::as_type_ptr(source.get_node_shared_ptr())) { + return c; + } else if (has_and_set_equal_bounds(source)) { + return std::make_shared(source.get_tensor().get_upper_value()); + } else { + return {}; + } +} +} // namespace util +} // namespace ov From 23e602f06f3a46339b9b7e1393fe740d15ab1d84 Mon Sep 17 00:00:00 2001 From: Ilya Lavrenov Date: Thu, 28 Sep 2023 15:01:52 +0400 Subject: [PATCH 3/3] Fixed NCC style check (#20121) --- .github/workflows/code_style.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/code_style.yml b/.github/workflows/code_style.yml index 96c75ce55aef43..0500ba86b12412 100644 --- a/.github/workflows/code_style.yml +++ b/.github/workflows/code_style.yml @@ -85,7 +85,7 @@ jobs: - name: Install Clang dependency run: | sudo apt update - sudo apt --assume-yes remove clang-7 clang-8 clang-9 clang-10 clang-11 clang-12 clang-13 + sudo apt --assume-yes remove clang-7 clang-8 clang-9 clang-10 clang-11 clang-12 clang-13 clang-15 sudo apt --assume-yes install clang-14 libclang-14-dev - name: Install Python-based dependencies