From 7d1b7b6becf7b75d8d7f675d5262b1182235d90f Mon Sep 17 00:00:00 2001 From: Pawel Raasz Date: Mon, 25 Sep 2023 12:22:59 +0200 Subject: [PATCH] [core] Api 2.0/migrate operators without evaluate (#19988) * Migrate ops evaluate * Remove using ngraph and std from ops * Use OPENVINO_ASSERT instead of NGRAPH_CHECK * Move `shape_util.hpp` to `dev_api/openvino/core/` * Remove visit_attributes, same as base impl * Fix build issues * Fix build issues --- .../{ => openvino/core}/shape_util.hpp | 0 .../ngraph/op/util/recurrent_sequence.hpp | 12 +- src/core/include/openvino/op/convert_like.hpp | 1 - src/core/include/openvino/op/dft.hpp | 2 - src/core/include/openvino/op/gather.hpp | 1 - src/core/include/openvino/op/gather_tree.hpp | 1 - src/core/include/openvino/op/hard_sigmoid.hpp | 1 - src/core/include/openvino/op/idft.hpp | 2 - src/core/include/openvino/op/irdft.hpp | 1 - src/core/include/openvino/op/rdft.hpp | 1 - src/core/include/openvino/op/selu.hpp | 2 - .../openvino/op/util/recurrent_sequence.hpp | 26 +++ .../include/openvino/reference/reduce_l1.hpp | 2 +- .../include/openvino/reference/reduce_l2.hpp | 2 +- src/core/reference/src/op/function.cpp | 2 +- src/core/src/bound_evaluate.cpp | 2 +- src/core/src/node.cpp | 2 +- src/core/src/op/adaptive_avg_pool.cpp | 6 +- src/core/src/op/adaptive_max_pool.cpp | 5 +- src/core/src/op/avg_pool.cpp | 26 +-- src/core/src/op/batch_norm.cpp | 53 ++--- src/core/src/op/batch_to_space.cpp | 4 +- src/core/src/op/binary_convolution.cpp | 22 +- src/core/src/op/broadcast.cpp | 2 +- src/core/src/op/bucketize.cpp | 6 +- src/core/src/op/clamp.cpp | 2 +- src/core/src/op/concat.cpp | 8 +- src/core/src/op/constant.cpp | 18 +- src/core/src/op/convert.cpp | 6 +- src/core/src/op/convert_like.cpp | 32 +-- src/core/src/op/ctc_greedy_decoder.cpp | 28 +-- .../src/op/ctc_greedy_decoder_seq_len.cpp | 33 ++- src/core/src/op/ctc_loss.cpp | 34 ++- src/core/src/op/deformable_convolution.cpp | 1 - src/core/src/op/deformable_psroi_pooling.cpp | 50 ++--- src/core/src/op/detection_output.cpp | 34 ++- src/core/src/op/dft.cpp | 19 +- src/core/src/op/divide.cpp | 8 +- src/core/src/op/einsum.cpp | 33 ++- src/core/src/op/embedding_segments_sum.cpp | 41 ++-- src/core/src/op/embeddingbag_offsets_sum.cpp | 29 ++- src/core/src/op/embeddingbag_packedsum.cpp | 13 +- src/core/src/op/exp.cpp | 2 +- ...xperimental_detectron_detection_output.cpp | 14 +- ...erimental_detectron_generate_proposals.cpp | 13 +- .../op/experimental_detectron_roi_feature.cpp | 6 +- .../op/experimental_detectron_topkrois.cpp | 5 +- src/core/src/op/gather.cpp | 33 ++- src/core/src/op/gather_elements.cpp | 15 +- src/core/src/op/gather_nd.cpp | 15 +- src/core/src/op/gather_tree.cpp | 17 +- src/core/src/op/gelu.cpp | 2 +- src/core/src/op/generate_proposals.cpp | 15 +- src/core/src/op/greater_eq.cpp | 2 +- src/core/src/op/grn.cpp | 13 +- src/core/src/op/group_conv.cpp | 60 +++--- src/core/src/op/gru_cell.cpp | 74 ++++--- src/core/src/op/gru_sequence.cpp | 37 ++-- src/core/src/op/hard_sigmoid.cpp | 15 +- src/core/src/op/hsigmoid.cpp | 2 +- src/core/src/op/hswish.cpp | 2 +- src/core/src/op/idft.cpp | 17 +- src/core/src/op/if.cpp | 49 ++--- src/core/src/op/interpolate.cpp | 25 ++- src/core/src/op/irdft.cpp | 9 +- src/core/src/op/log_softmax.cpp | 11 +- src/core/src/op/logical_and.cpp | 2 +- src/core/src/op/logical_not.cpp | 2 +- src/core/src/op/logical_or.cpp | 2 +- src/core/src/op/loop.cpp | 15 +- src/core/src/op/lrn.cpp | 30 ++- src/core/src/op/lstm_cell.cpp | 197 +++++++++--------- src/core/src/op/lstm_sequence.cpp | 110 +++++----- src/core/src/op/mish.cpp | 2 +- src/core/src/op/mod.cpp | 1 + src/core/src/op/negative.cpp | 4 +- src/core/src/op/not_equal.cpp | 2 +- src/core/src/op/one_hot.cpp | 14 +- src/core/src/op/op.cpp | 8 +- src/core/src/op/parameter.cpp | 38 ++-- src/core/src/op/prelu.cpp | 2 +- src/core/src/op/psroi_pooling.cpp | 22 +- src/core/src/op/rdft.cpp | 9 +- src/core/src/op/reduce_logical_and.cpp | 4 +- src/core/src/op/reduce_logical_or.cpp | 4 +- src/core/src/op/reduce_max.cpp | 4 +- src/core/src/op/reduce_mean.cpp | 4 +- src/core/src/op/reduce_min.cpp | 4 +- src/core/src/op/reduce_prod.cpp | 4 +- src/core/src/op/reduce_sum.cpp | 4 +- src/core/src/op/relu.cpp | 2 +- src/core/src/op/reshape.cpp | 12 +- src/core/src/op/reverse.cpp | 2 +- src/core/src/op/reverse_sequence.cpp | 36 ++-- src/core/src/op/rnn_cell.cpp | 65 +++--- src/core/src/op/rnn_sequence.cpp | 36 ++-- src/core/src/op/roi_pooling.cpp | 8 +- src/core/src/op/scatter_nd_update.cpp | 6 +- src/core/src/op/scatter_update.cpp | 2 +- src/core/src/op/select.cpp | 4 +- src/core/src/op/selu.cpp | 15 +- src/core/src/op/shape_of.cpp | 26 +-- src/core/src/op/sigmoid.cpp | 2 +- src/core/src/op/sign.cpp | 2 +- src/core/src/op/sink.cpp | 6 +- src/core/src/op/softmax.cpp | 16 +- src/core/src/op/softplus.cpp | 2 +- src/core/src/op/softsign.cpp | 6 +- src/core/src/op/squared_difference.cpp | 8 +- src/core/src/op/squeeze.cpp | 4 +- src/core/src/op/strided_slice.cpp | 8 +- src/core/src/op/tensor_iterator.cpp | 16 +- src/core/src/op/util/recurrent_sequence.cpp | 27 ++- src/core/src/op/util/variable_value.cpp | 2 +- src/core/src/op/variadic_split.cpp | 7 +- src/core/src/op/xor.cpp | 1 + src/core/src/runtime/itensor.cpp | 2 +- src/core/src/runtime/ov_tensor.cpp | 2 +- src/core/src/shape_util.cpp | 2 +- src/core/src/tensor_conversion_util.cpp | 2 +- src/plugins/intel_cpu/src/nodes/reference.cpp | 12 +- .../custom_op_internal_dyn.cpp | 6 +- .../src/custom_op_insert_convert_i64.cpp | 6 +- .../template/backend/int_executable.cpp | 2 +- src/plugins/template/backend/ops/if.cpp | 2 +- 125 files changed, 877 insertions(+), 994 deletions(-) rename src/core/dev_api/{ => openvino/core}/shape_util.hpp (100%) create mode 100644 src/core/include/openvino/op/util/recurrent_sequence.hpp diff --git a/src/core/dev_api/shape_util.hpp b/src/core/dev_api/openvino/core/shape_util.hpp similarity index 100% rename from src/core/dev_api/shape_util.hpp rename to src/core/dev_api/openvino/core/shape_util.hpp diff --git a/src/core/include/ngraph/op/util/recurrent_sequence.hpp b/src/core/include/ngraph/op/util/recurrent_sequence.hpp index 1cf9cc95ea8627..f283947fa154b6 100644 --- a/src/core/include/ngraph/op/util/recurrent_sequence.hpp +++ b/src/core/include/ngraph/op/util/recurrent_sequence.hpp @@ -18,20 +18,12 @@ #include "ngraph/deprecated.hpp" #include "ngraph/node.hpp" +#include "openvino/op/util/recurrent_sequence.hpp" namespace ngraph { namespace op { namespace util { -/// -/// \brief Validates static rank and dimension for provided input parameters. -/// Additionally input_size dimension is checked for X and W inputs. -/// Applies to LSTM, GRU and RNN Sequences. -/// -/// -/// \param[in] input Vector with RNNSequence-like op inputs in following order: -/// X, initial_hidden_state, sequence_lengths, W, R and B. -/// -NGRAPH_API_DEPRECATED void validate_seq_input_rank_dimension(const std::vector& input); +using ov::op::util::validate_seq_input_rank_dimension; } // namespace util } // namespace op } // namespace ngraph diff --git a/src/core/include/openvino/op/convert_like.hpp b/src/core/include/openvino/op/convert_like.hpp index 2621a1ce860131..b852a6fde68519 100644 --- a/src/core/include/openvino/op/convert_like.hpp +++ b/src/core/include/openvino/op/convert_like.hpp @@ -23,7 +23,6 @@ class OPENVINO_API ConvertLike : public Op { ConvertLike(const Output& data, const Output& like); void validate_and_infer_types() override; - bool visit_attributes(AttributeVisitor& visitor) override; std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; diff --git a/src/core/include/openvino/op/dft.hpp b/src/core/include/openvino/op/dft.hpp index b87262fd78b919..3783123e4090ea 100644 --- a/src/core/include/openvino/op/dft.hpp +++ b/src/core/include/openvino/op/dft.hpp @@ -45,8 +45,6 @@ class OPENVINO_API DFT : public util::FFTBase { /// \param signal_size Signal sizes for 'axes' DFT(const Output& data, const Output& axes, const Output& signal_size); - bool visit_attributes(AttributeVisitor& visitor) override; - std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; }; } // namespace v7 diff --git a/src/core/include/openvino/op/gather.hpp b/src/core/include/openvino/op/gather.hpp index e752259271cfb4..dcc53a465bf718 100644 --- a/src/core/include/openvino/op/gather.hpp +++ b/src/core/include/openvino/op/gather.hpp @@ -21,7 +21,6 @@ class OPENVINO_API Gather : public op::util::GatherBase { /// \param axis The tensor is a dimension index to gather data from Gather(const Output& params, const Output& indices, const Output& axis); - bool visit_attributes(AttributeVisitor& visitor) override; int64_t get_axis() const override; std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; diff --git a/src/core/include/openvino/op/gather_tree.hpp b/src/core/include/openvino/op/gather_tree.hpp index bc2169019a8cea..b89503ba648efd 100644 --- a/src/core/include/openvino/op/gather_tree.hpp +++ b/src/core/include/openvino/op/gather_tree.hpp @@ -29,7 +29,6 @@ class OPENVINO_API GatherTree : public Op { const Output& max_seq_len, const Output& end_token); - bool visit_attributes(AttributeVisitor& visitor) override; void validate_and_infer_types() override; std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; diff --git a/src/core/include/openvino/op/hard_sigmoid.hpp b/src/core/include/openvino/op/hard_sigmoid.hpp index e6760c71b7edce..fb77a2bdede711 100644 --- a/src/core/include/openvino/op/hard_sigmoid.hpp +++ b/src/core/include/openvino/op/hard_sigmoid.hpp @@ -27,7 +27,6 @@ class OPENVINO_API HardSigmoid : public Op { /// HardSigmoid(const Output& data, const Output& alpha, const Output& beta); - bool visit_attributes(AttributeVisitor& visitor) override; void validate_and_infer_types() override; std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; }; diff --git a/src/core/include/openvino/op/idft.hpp b/src/core/include/openvino/op/idft.hpp index 1fd0948bc9c8a2..f5198e3c6fd71f 100644 --- a/src/core/include/openvino/op/idft.hpp +++ b/src/core/include/openvino/op/idft.hpp @@ -33,8 +33,6 @@ class OPENVINO_API IDFT : public util::FFTBase { /// \param signal_size Signal sizes for 'axes' IDFT(const Output& data, const Output& axes, const Output& signal_size); - bool visit_attributes(AttributeVisitor& visitor) override; - std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; }; } // namespace v7 diff --git a/src/core/include/openvino/op/irdft.hpp b/src/core/include/openvino/op/irdft.hpp index 02cc5d7006cf9c..3b76e2988cbe7b 100644 --- a/src/core/include/openvino/op/irdft.hpp +++ b/src/core/include/openvino/op/irdft.hpp @@ -30,7 +30,6 @@ class OPENVINO_API IRDFT : public util::FFTBase { IRDFT(const Output& data, const Output& axes, const Output& signal_size); void validate_and_infer_types() override; - bool visit_attributes(AttributeVisitor& visitor) override; std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; }; diff --git a/src/core/include/openvino/op/rdft.hpp b/src/core/include/openvino/op/rdft.hpp index 42830fceedd7a2..d37fd78a5fc7a4 100644 --- a/src/core/include/openvino/op/rdft.hpp +++ b/src/core/include/openvino/op/rdft.hpp @@ -30,7 +30,6 @@ class OPENVINO_API RDFT : public util::FFTBase { RDFT(const Output& data, const Output& axes, const Output& signal_size); void validate_and_infer_types() override; - bool visit_attributes(AttributeVisitor& visitor) override; std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; }; diff --git a/src/core/include/openvino/op/selu.hpp b/src/core/include/openvino/op/selu.hpp index d81fd8ac1f3432..df75b71466c0c4 100644 --- a/src/core/include/openvino/op/selu.hpp +++ b/src/core/include/openvino/op/selu.hpp @@ -25,8 +25,6 @@ class OPENVINO_API Selu : public Op { void validate_and_infer_types() override; - bool visit_attributes(AttributeVisitor& visitor) override; - std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; }; } // namespace v0 diff --git a/src/core/include/openvino/op/util/recurrent_sequence.hpp b/src/core/include/openvino/op/util/recurrent_sequence.hpp new file mode 100644 index 00000000000000..452c415057610d --- /dev/null +++ b/src/core/include/openvino/op/util/recurrent_sequence.hpp @@ -0,0 +1,26 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include + +#include "openvino/core/partial_shape.hpp" + +namespace ov { +namespace op { +namespace util { +/// +/// \brief Validates static rank and dimension for provided input parameters. +/// Additionally input_size dimension is checked for X and W inputs. +/// Applies to LSTM, GRU and RNN Sequences. +/// +/// +/// \param[in] input Vector with RNNSequence-like op inputs in following order: +/// X, initial_hidden_state, sequence_lengths, W, R and B. +/// +OPENVINO_API void validate_seq_input_rank_dimension(const std::vector& input); +} // namespace util +} // namespace op +} // namespace ov diff --git a/src/core/reference/include/openvino/reference/reduce_l1.hpp b/src/core/reference/include/openvino/reference/reduce_l1.hpp index a6c41f20a0f16f..50228962f334cf 100644 --- a/src/core/reference/include/openvino/reference/reduce_l1.hpp +++ b/src/core/reference/include/openvino/reference/reduce_l1.hpp @@ -7,10 +7,10 @@ #include #include +#include "openvino/core/shape_util.hpp" #include "openvino/reference/abs.hpp" #include "openvino/reference/sum.hpp" #include "openvino/reference/utils/type_util.hpp" -#include "shape_util.hpp" namespace ov { namespace reference { diff --git a/src/core/reference/include/openvino/reference/reduce_l2.hpp b/src/core/reference/include/openvino/reference/reduce_l2.hpp index 55bba7b7cd4e42..a7b9375cfc9c06 100644 --- a/src/core/reference/include/openvino/reference/reduce_l2.hpp +++ b/src/core/reference/include/openvino/reference/reduce_l2.hpp @@ -8,8 +8,8 @@ #include #include +#include "openvino/core/shape_util.hpp" #include "openvino/reference/utils/coordinate_transform.hpp" -#include "shape_util.hpp" namespace ov { namespace reference { diff --git a/src/core/reference/src/op/function.cpp b/src/core/reference/src/op/function.cpp index a0bedfa1241728..ebf706e3f03c0d 100644 --- a/src/core/reference/src/op/function.cpp +++ b/src/core/reference/src/op/function.cpp @@ -10,8 +10,8 @@ #include "ngraph/runtime/host_tensor.hpp" #include "ngraph/runtime/tensor.hpp" #include "openvino/core/deprecated.hpp" +#include "openvino/core/shape_util.hpp" #include "openvino/reference/concat.hpp" -#include "shape_util.hpp" namespace ov { namespace reference { diff --git a/src/core/src/bound_evaluate.cpp b/src/core/src/bound_evaluate.cpp index 38d09dbcdf6fe4..cf3dc5bf21e3da 100644 --- a/src/core/src/bound_evaluate.cpp +++ b/src/core/src/bound_evaluate.cpp @@ -7,9 +7,9 @@ #include "ngraph/validation_util.hpp" #include "openvino/core/dimension_tracker.hpp" #include "openvino/core/rt_info.hpp" +#include "openvino/core/shape_util.hpp" #include "openvino/op/util/symbolic_info.hpp" #include "openvino/opsets/opset10.hpp" -#include "shape_util.hpp" #include "tensor_conversion_util.hpp" #include "transformations/rt_info/decompression.hpp" #include "transformations/rt_info/is_shape_subgraph.hpp" diff --git a/src/core/src/node.cpp b/src/core/src/node.cpp index 7f20ce4b991b6c..1610d7b2fda4da 100644 --- a/src/core/src/node.cpp +++ b/src/core/src/node.cpp @@ -15,9 +15,9 @@ #include "ngraph/graph_util.hpp" #include "openvino/core/descriptor/input.hpp" #include "openvino/core/rt_info.hpp" +#include "openvino/core/shape_util.hpp" #include "openvino/pass/constant_folding.hpp" #include "openvino/pass/pattern/matcher.hpp" -#include "shape_util.hpp" #include "shape_validation.hpp" #include "shared_node_info.hpp" #include "tensor_conversion_util.hpp" diff --git a/src/core/src/op/adaptive_avg_pool.cpp b/src/core/src/op/adaptive_avg_pool.cpp index b49d2d68f21e12..bf8f1601fc9d65 100644 --- a/src/core/src/op/adaptive_avg_pool.cpp +++ b/src/core/src/op/adaptive_avg_pool.cpp @@ -7,8 +7,6 @@ #include "adaptive_avg_pool_shape_inference.hpp" #include "itt.hpp" -using namespace std; - namespace ov { op::v8::AdaptiveAvgPool::AdaptiveAvgPool(const Output& data, const Output& output_shape) @@ -25,10 +23,10 @@ void op::v8::AdaptiveAvgPool::validate_and_infer_types() { set_output_type(0, get_input_element_type(0), output_shape); } -shared_ptr op::v8::AdaptiveAvgPool::clone_with_new_inputs(const OutputVector& new_args) const { +std::shared_ptr op::v8::AdaptiveAvgPool::clone_with_new_inputs(const OutputVector& new_args) const { OV_OP_SCOPE(v8_AdaptiveAvgPool_clone_with_new_inputs); check_new_args_count(this, new_args); - return make_shared(new_args.at(0), new_args.at(1)); + return std::make_shared(new_args.at(0), new_args.at(1)); } } // namespace ov diff --git a/src/core/src/op/adaptive_max_pool.cpp b/src/core/src/op/adaptive_max_pool.cpp index ecc55b9ca838fb..db67855d54f24a 100644 --- a/src/core/src/op/adaptive_max_pool.cpp +++ b/src/core/src/op/adaptive_max_pool.cpp @@ -7,7 +7,6 @@ #include "adaptive_max_pool_shape_inference.hpp" #include "itt.hpp" -using namespace std; namespace ov { op::v8::AdaptiveMaxPool::AdaptiveMaxPool(const Output& data, @@ -39,10 +38,10 @@ void op::v8::AdaptiveMaxPool::validate_and_infer_types() { set_output_type(1, m_index_element_type, output_shapes[1]); } -shared_ptr op::v8::AdaptiveMaxPool::clone_with_new_inputs(const OutputVector& new_args) const { +std::shared_ptr op::v8::AdaptiveMaxPool::clone_with_new_inputs(const OutputVector& new_args) const { OV_OP_SCOPE(v8_AdaptiveMaxPool_clone_with_new_inputs); check_new_args_count(this, new_args); - return make_shared(new_args.at(0), new_args.at(1), m_index_element_type); + return std::make_shared(new_args.at(0), new_args.at(1), m_index_element_type); } void op::v8::AdaptiveMaxPool::set_index_element_type(const element::Type& type) { diff --git a/src/core/src/op/avg_pool.cpp b/src/core/src/op/avg_pool.cpp index 007ad910bb752e..c7e5477715d831 100644 --- a/src/core/src/op/avg_pool.cpp +++ b/src/core/src/op/avg_pool.cpp @@ -2,15 +2,11 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph/op/avg_pool.hpp" +#include "openvino/op/avg_pool.hpp" #include "avg_pool_shape_inference.hpp" #include "itt.hpp" -#include "ngraph/attribute_visitor.hpp" -#include "ngraph/graph_util.hpp" -#include "ngraph/validation_util.hpp" - -using namespace std; +#include "openvino/core/attribute_visitor.hpp" // *** AvgPool OP SET 1 *** ov::op::v1::AvgPool::AvgPool(const Output& arg, @@ -109,15 +105,15 @@ void ov::op::v1::AvgPool::set_rounding_type(op::RoundingType rounding_type) { m_rounding_type = rounding_type; } -shared_ptr ov::op::v1::AvgPool::clone_with_new_inputs(const OutputVector& new_args) const { +std::shared_ptr ov::op::v1::AvgPool::clone_with_new_inputs(const OutputVector& new_args) const { OV_OP_SCOPE(v1_AvgPool_clone_with_new_inputs); check_new_args_count(this, new_args); - return make_shared(new_args.at(0), - m_strides, - m_pads_begin, - m_pads_end, - m_kernel, - m_exclude_pad, - m_rounding_type, - m_auto_pad); + return std::make_shared(new_args.at(0), + m_strides, + m_pads_begin, + m_pads_end, + m_kernel, + m_exclude_pad, + m_rounding_type, + m_auto_pad); } diff --git a/src/core/src/op/batch_norm.cpp b/src/core/src/op/batch_norm.cpp index 129e1bc54f7743..409fa6d4638a14 100644 --- a/src/core/src/op/batch_norm.cpp +++ b/src/core/src/op/batch_norm.cpp @@ -2,16 +2,16 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph/op/batch_norm.hpp" +#include "openvino/op/batch_norm.hpp" #include #include "itt.hpp" -#include "ngraph/attribute_visitor.hpp" #include "ngraph/validation_util.hpp" +#include "openvino/core/attribute_visitor.hpp" +#include "openvino/core/validation_util.hpp" -using namespace std; -using namespace ngraph; +namespace ov { op::v0::BatchNormInference::BatchNormInference(const Output& input, const Output& gamma, @@ -44,17 +44,17 @@ void op::v0::BatchNormInference::validate_and_infer_types() { set_output_size(1); OPENVINO_SUPPRESS_DEPRECATED_START std::tie(result_et, result_batch_shape, result_channel_shape) = - infer_batch_norm_forward(this, - get_input_element_type(INPUT_DATA), - get_input_element_type(INPUT_GAMMA), - get_input_element_type(INPUT_BETA), - get_input_element_type(INPUT_MEAN), - get_input_element_type(INPUT_VARIANCE), - get_input_partial_shape(INPUT_DATA), - get_input_partial_shape(INPUT_GAMMA), - get_input_partial_shape(INPUT_BETA), - get_input_partial_shape(INPUT_MEAN), - get_input_partial_shape(INPUT_VARIANCE)); + ngraph::infer_batch_norm_forward(this, + get_input_element_type(INPUT_DATA), + get_input_element_type(INPUT_GAMMA), + get_input_element_type(INPUT_BETA), + get_input_element_type(INPUT_MEAN), + get_input_element_type(INPUT_VARIANCE), + get_input_partial_shape(INPUT_DATA), + get_input_partial_shape(INPUT_GAMMA), + get_input_partial_shape(INPUT_BETA), + get_input_partial_shape(INPUT_MEAN), + get_input_partial_shape(INPUT_VARIANCE)); OPENVINO_SUPPRESS_DEPRECATED_END set_output_type(0, result_et, result_batch_shape); } @@ -101,17 +101,17 @@ void op::v5::BatchNormInference::validate_and_infer_types() { set_output_size(1); OPENVINO_SUPPRESS_DEPRECATED_START std::tie(result_et, result_batch_shape, result_channel_shape) = - infer_batch_norm_forward(this, - get_input_element_type(INPUT_DATA), - get_input_element_type(INPUT_GAMMA), - get_input_element_type(INPUT_BETA), - get_input_element_type(INPUT_MEAN), - get_input_element_type(INPUT_VARIANCE), - get_input_partial_shape(INPUT_DATA), - get_input_partial_shape(INPUT_GAMMA), - get_input_partial_shape(INPUT_BETA), - get_input_partial_shape(INPUT_MEAN), - get_input_partial_shape(INPUT_VARIANCE)); + ngraph::infer_batch_norm_forward(this, + get_input_element_type(INPUT_DATA), + get_input_element_type(INPUT_GAMMA), + get_input_element_type(INPUT_BETA), + get_input_element_type(INPUT_MEAN), + get_input_element_type(INPUT_VARIANCE), + get_input_partial_shape(INPUT_DATA), + get_input_partial_shape(INPUT_GAMMA), + get_input_partial_shape(INPUT_BETA), + get_input_partial_shape(INPUT_MEAN), + get_input_partial_shape(INPUT_VARIANCE)); OPENVINO_SUPPRESS_DEPRECATED_END set_output_type(0, result_et, result_batch_shape); } @@ -126,3 +126,4 @@ std::shared_ptr op::v5::BatchNormInference::clone_with_new_inputs(const Ou new_args.at(4), m_epsilon); } +} // namespace ov diff --git a/src/core/src/op/batch_to_space.cpp b/src/core/src/op/batch_to_space.cpp index c7ae3d7580a02c..e9c4acb2253e69 100644 --- a/src/core/src/op/batch_to_space.cpp +++ b/src/core/src/op/batch_to_space.cpp @@ -177,8 +177,8 @@ bool batch_to_space_evaluate(const HostTensorVector& outputs, const HostTensorVe bool ngraph::op::v1::BatchToSpace::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { OV_OP_SCOPE(v1_BatchToSpace_evaluate); OPENVINO_SUPPRESS_DEPRECATED_START - NGRAPH_CHECK(validate_host_tensor_vector(inputs, 4)); - NGRAPH_CHECK(validate_host_tensor_vector(outputs, 1)); + OPENVINO_ASSERT(validate_host_tensor_vector(inputs, 4)); + OPENVINO_ASSERT(validate_host_tensor_vector(outputs, 1)); OPENVINO_SUPPRESS_DEPRECATED_END if (outputs[0]->get_partial_shape().is_dynamic()) { diff --git a/src/core/src/op/binary_convolution.cpp b/src/core/src/op/binary_convolution.cpp index ca89c851ba9cf6..d045818aa48c01 100644 --- a/src/core/src/op/binary_convolution.cpp +++ b/src/core/src/op/binary_convolution.cpp @@ -11,8 +11,6 @@ #include "openvino/core/axis_vector.hpp" #include "openvino/core/coordinate_diff.hpp" -using namespace std; - ov::op::v1::BinaryConvolution::BinaryConvolution(const Output& data, const Output& kernel, const Strides& strides, @@ -67,18 +65,18 @@ void ov::op::v1::BinaryConvolution::validate_and_infer_types() { set_output_type(0, data_batch_et, output_shapes[0]); } -shared_ptr ov::op::v1::BinaryConvolution::clone_with_new_inputs(const OutputVector& new_args) const { +std::shared_ptr ov::op::v1::BinaryConvolution::clone_with_new_inputs(const OutputVector& new_args) const { OV_OP_SCOPE(v1_BinaryConvolution_clone_with_new_inputs); check_new_args_count(this, new_args); - return make_shared(new_args.at(0), - new_args.at(1), - m_strides, - m_pads_begin, - m_pads_end, - m_dilations, - m_mode, - m_pad_value, - m_auto_pad); + return std::make_shared(new_args.at(0), + new_args.at(1), + m_strides, + m_pads_begin, + m_pads_end, + m_dilations, + m_mode, + m_pad_value, + m_auto_pad); } bool ov::op::v1::BinaryConvolution::visit_attributes(AttributeVisitor& visitor) { diff --git a/src/core/src/op/broadcast.cpp b/src/core/src/op/broadcast.cpp index 8113b728ba285e..23661efdfd5ab9 100644 --- a/src/core/src/op/broadcast.cpp +++ b/src/core/src/op/broadcast.cpp @@ -39,7 +39,7 @@ std::pair get_broadcast_axes_bidirectional(const ov::Shape& arg_s AxisSet broadcast_axes; bool axes_known = false; const auto start_axis = static_cast(result_shape.size()) - static_cast(arg_shape.size()); - NGRAPH_CHECK(start_axis >= 0); + OPENVINO_ASSERT(start_axis >= 0); for (size_t i = 0; i < result_shape.size(); i++) { if (i < static_cast(start_axis) || result_shape[i] != arg_shape[i - start_axis]) { broadcast_axes.insert(i); diff --git a/src/core/src/op/bucketize.cpp b/src/core/src/op/bucketize.cpp index d75710e926957b..b4885b58c7e354 100644 --- a/src/core/src/op/bucketize.cpp +++ b/src/core/src/op/bucketize.cpp @@ -9,8 +9,6 @@ #include "itt.hpp" #include "openvino/core/validation_util.hpp" -using namespace std; - namespace ov { op::v3::Bucketize::Bucketize(const Output& data, const Output& buckets, @@ -58,10 +56,10 @@ void op::v3::Bucketize::validate_and_infer_types() { set_output_type(0, m_output_type, output_shapes[0]); } -shared_ptr op::v3::Bucketize::clone_with_new_inputs(const OutputVector& inputs) const { +std::shared_ptr op::v3::Bucketize::clone_with_new_inputs(const OutputVector& inputs) const { OV_OP_SCOPE(v3_Bucketize_clone_with_new_inputs); check_new_args_count(this, inputs); - return make_shared(inputs.at(0), inputs.at(1), m_output_type, m_with_right_bound); + return std::make_shared(inputs.at(0), inputs.at(1), m_output_type, m_with_right_bound); } } // namespace ov diff --git a/src/core/src/op/clamp.cpp b/src/core/src/op/clamp.cpp index b7f7f408905299..91caa3f3fdb48b 100644 --- a/src/core/src/op/clamp.cpp +++ b/src/core/src/op/clamp.cpp @@ -79,7 +79,7 @@ bool evaluate_clamp(const HostTensorPtr& arg, const HostTensorPtr& out, double m bool op::v0::Clamp::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { OV_OP_SCOPE(v0_Clamp_evaluate); OPENVINO_SUPPRESS_DEPRECATED_START - NGRAPH_CHECK(validate_host_tensor_vector(outputs, 1) && validate_host_tensor_vector(inputs, 1)); + OPENVINO_ASSERT(validate_host_tensor_vector(outputs, 1) && validate_host_tensor_vector(inputs, 1)); OPENVINO_SUPPRESS_DEPRECATED_END return clamp::evaluate_clamp(inputs[0], outputs[0], get_min(), get_max()); } diff --git a/src/core/src/op/concat.cpp b/src/core/src/op/concat.cpp index 7c0595d02566c2..05d868b1096acb 100644 --- a/src/core/src/op/concat.cpp +++ b/src/core/src/op/concat.cpp @@ -103,9 +103,9 @@ bool evaluate_concat(const HostTensorVector& args, const HostTensorPtr& out, int bool op::Concat::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { OV_OP_SCOPE(v0_Concat_evaluate); - NGRAPH_CHECK(!inputs.empty()); - NGRAPH_CHECK(validate_host_tensor_vector(inputs, inputs.size())); - NGRAPH_CHECK(validate_host_tensor_vector(outputs, 1)); + OPENVINO_ASSERT(!inputs.empty()); + OPENVINO_ASSERT(validate_host_tensor_vector(inputs, inputs.size())); + OPENVINO_ASSERT(validate_host_tensor_vector(outputs, 1)); auto concat_axis = get_axis() < 0 ? get_axis() + inputs[0]->get_shape().size() : get_axis(); return evaluate_concat(inputs, outputs[0], concat_axis); } @@ -170,7 +170,7 @@ bool op::Concat::evaluate_label(TensorLabelVector& output_labels) const { if (input_label.empty()) { const auto& shape = input.get_partial_shape(); // sanity check. at this point value propagation was successful - NGRAPH_CHECK(shape.is_static()); + OPENVINO_ASSERT(shape.is_static()); const auto& num_elements = shape_size(shape.to_shape()); input_label.resize(num_elements, no_label); } diff --git a/src/core/src/op/constant.cpp b/src/core/src/op/constant.cpp index e06d44467b1f3b..44211151fa2826 100644 --- a/src/core/src/op/constant.cpp +++ b/src/core/src/op/constant.cpp @@ -227,9 +227,9 @@ ov::op::v0::Constant::Constant(const Constant& other) { } ov::op::v0::Constant::Constant(const Constant& other, const ov::Shape& new_shape) { - NGRAPH_CHECK(shape_size(other.m_shape) == shape_size(new_shape), - "ov::Shape size " + std::to_string(shape_size(new_shape)) + " is not equal to " + - std::to_string(shape_size(other.m_shape))); + OPENVINO_ASSERT(shape_size(other.m_shape) == shape_size(new_shape), + "ov::Shape size " + std::to_string(shape_size(new_shape)) + " is not equal to " + + std::to_string(shape_size(other.m_shape))); m_element_type = other.m_element_type; m_shape = new_shape; m_data = other.m_data; @@ -403,7 +403,7 @@ vector ov::op::v0::Constant::get_value_strings() const { } ov::Shape ov::op::v0::Constant::get_shape_val() const { - NGRAPH_CHECK(m_element_type.is_integral_number()); + OPENVINO_ASSERT(m_element_type.is_integral_number()); std::vector out_shape = cast_vector(); ov::Shape output_shape(shape_size(m_shape)); std::transform(out_shape.begin(), out_shape.end(), output_shape.begin(), [&](const int64_t& v) { @@ -413,7 +413,7 @@ ov::Shape ov::op::v0::Constant::get_shape_val() const { } ov::Strides ov::op::v0::Constant::get_strides_val() const { - NGRAPH_CHECK(m_element_type == element::i64); + OPENVINO_ASSERT(m_element_type == element::i64); std::vector out_strides = cast_vector(); Strides output_strides(shape_size(m_shape)); std::transform(out_strides.begin(), out_strides.end(), output_strides.begin(), [&](const int64_t& v) { @@ -423,7 +423,7 @@ ov::Strides ov::op::v0::Constant::get_strides_val() const { } ov::Coordinate ov::op::v0::Constant::get_coordinate_val() const { - NGRAPH_CHECK(m_element_type == element::i64); + OPENVINO_ASSERT(m_element_type == element::i64); std::vector out_coordinate = cast_vector(); Coordinate output_coordinate(shape_size(m_shape)); std::transform(out_coordinate.begin(), out_coordinate.end(), output_coordinate.begin(), [&](const int64_t& v) { @@ -433,7 +433,7 @@ ov::Coordinate ov::op::v0::Constant::get_coordinate_val() const { } ov::CoordinateDiff ov::op::v0::Constant::get_coordinate_diff_val() const { - NGRAPH_CHECK(m_element_type == element::i64); + OPENVINO_ASSERT(m_element_type == element::i64); std::vector out_coordinate_diff = cast_vector(); CoordinateDiff output_coordinate_diff(shape_size(m_shape)); std::transform(out_coordinate_diff.begin(), @@ -446,7 +446,7 @@ ov::CoordinateDiff ov::op::v0::Constant::get_coordinate_diff_val() const { } ov::AxisVector ov::op::v0::Constant::get_axis_vector_val() const { - NGRAPH_CHECK(m_element_type.is_integral_number()); + OPENVINO_ASSERT(m_element_type.is_integral_number()); std::vector out_axis_vector = cast_vector(); AxisVector output_axis_vector(shape_size(m_shape)); std::transform(out_axis_vector.begin(), out_axis_vector.end(), output_axis_vector.begin(), [&](const int64_t& v) { @@ -456,7 +456,7 @@ ov::AxisVector ov::op::v0::Constant::get_axis_vector_val() const { } ov::AxisSet ov::op::v0::Constant::get_axis_set_val() const { - NGRAPH_CHECK(m_element_type.is_integral_number()); + OPENVINO_ASSERT(m_element_type.is_integral_number()); std::vector out_axis_set = cast_vector(); AxisSet output_axis_set; for (auto& axis : out_axis_set) { diff --git a/src/core/src/op/convert.cpp b/src/core/src/op/convert.cpp index d6a34741fa3b5f..b36b8c6ef44449 100644 --- a/src/core/src/op/convert.cpp +++ b/src/core/src/op/convert.cpp @@ -125,7 +125,7 @@ bool evaluate_convert(const HostTensorPtr& arg, const HostTensorPtr& out) { } bool evaluate_bound(const Node* node, ov::TensorVector& output_values, bool is_upper) { - NGRAPH_CHECK(node, output_values.size() == 1); + OPENVINO_ASSERT(node, output_values.size() == 1); const auto& input = node->input_value(0); if (const auto& value = is_upper ? input.get_tensor().get_upper_value() : input.get_tensor().get_lower_value()) { if (is_vector(value.get_shape()) && (value.get_shape().front() == 0)) { @@ -175,8 +175,8 @@ bool evaluate_bound(const Node* node, ov::TensorVector& output_values, bool is_u bool op::v0::Convert::evaluate(const HostTensorVector& output_values, const HostTensorVector& input_values) const { OV_OP_SCOPE(v0_Convert_evaluate); OPENVINO_SUPPRESS_DEPRECATED_START - NGRAPH_CHECK(validate_host_tensor_vector(input_values, 1)); - NGRAPH_CHECK(validate_host_tensor_vector(output_values, 1)); + OPENVINO_ASSERT(validate_host_tensor_vector(input_values, 1)); + OPENVINO_ASSERT(validate_host_tensor_vector(output_values, 1)); OPENVINO_SUPPRESS_DEPRECATED_END return convert::evaluate_convert(input_values[0], output_values[0]); } diff --git a/src/core/src/op/convert_like.cpp b/src/core/src/op/convert_like.cpp index 4ab83d29c85a98..a83ed49516c19c 100644 --- a/src/core/src/op/convert_like.cpp +++ b/src/core/src/op/convert_like.cpp @@ -2,46 +2,46 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph/op/convert_like.hpp" +#include "openvino/op/convert_like.hpp" #include #include "itt.hpp" -#include "ngraph/op/constant.hpp" -#include "ngraph/op/convert.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/convert.hpp" -using namespace std; -using namespace ngraph; +namespace ov { +namespace op { +namespace v1 { -op::v1::ConvertLike::ConvertLike(const Output& data, const Output& like) : Op({data, like}) { +ConvertLike::ConvertLike(const Output& data, const Output& like) : Op({data, like}) { constructor_validate_and_infer_types(); } -void op::v1::ConvertLike::validate_and_infer_types() { +void ConvertLike::validate_and_infer_types() { OV_OP_SCOPE(v1_ConvertLike_validate_and_infer_types); set_output_type(0, get_input_element_type(1), get_input_partial_shape(0)); } -bool op::v1::ConvertLike::visit_attributes(AttributeVisitor& visitor) { - OV_OP_SCOPE(v1_ConvertLike_visit_attributes); - return true; -} - -shared_ptr op::v1::ConvertLike::clone_with_new_inputs(const OutputVector& new_args) const { +std::shared_ptr ConvertLike::clone_with_new_inputs(const OutputVector& new_args) const { OV_OP_SCOPE(v1_ConvertLike_clone_with_new_inputs); check_new_args_count(this, new_args); - return make_shared(new_args.at(0), new_args.at(1)); + return std::make_shared(new_args.at(0), new_args.at(1)); } -bool op::v1::ConvertLike::constant_fold(OutputVector& output_values, const OutputVector& input_values) { +bool ConvertLike::constant_fold(OutputVector& output_values, const OutputVector& input_values) { OV_OP_SCOPE(v1_ConvertLike_constant_fold); if (is_const_fold_disabled()) { return false; } if (auto data_const = std::dynamic_pointer_cast(input_values[0].get_node_shared_ptr())) { - auto convert = make_shared(input_values[0], input_values[1].get_element_type()); + auto convert = std::make_shared(input_values[0], input_values[1].get_element_type()); return convert->constant_fold(output_values, OutputVector{data_const}); } return false; } + +} // namespace v1 +} // namespace op +} // namespace ov diff --git a/src/core/src/op/ctc_greedy_decoder.cpp b/src/core/src/op/ctc_greedy_decoder.cpp index fe436fbea3e416..0e41c01aaf3b48 100644 --- a/src/core/src/op/ctc_greedy_decoder.cpp +++ b/src/core/src/op/ctc_greedy_decoder.cpp @@ -2,24 +2,23 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph/op/ctc_greedy_decoder.hpp" - -#include +#include "openvino/op/ctc_greedy_decoder.hpp" +#include "ctc_greedy_decoder_shape_inference.hpp" #include "itt.hpp" -using namespace std; -using namespace ngraph; - -op::CTCGreedyDecoder::CTCGreedyDecoder(const Output& input, - const Output& seq_len, - const bool ctc_merge_repeated) +namespace ov { +namespace op { +namespace v0 { +CTCGreedyDecoder::CTCGreedyDecoder(const Output& input, + const Output& seq_len, + const bool ctc_merge_repeated) : Op({input, seq_len}), m_ctc_merge_repeated(ctc_merge_repeated) { constructor_validate_and_infer_types(); } -void op::CTCGreedyDecoder::validate_and_infer_types() { +void CTCGreedyDecoder::validate_and_infer_types() { OV_OP_SCOPE(v0_CTCGreedyDecoder_validate_and_infer_types); const auto& logits_pshape = get_input_partial_shape(0); const auto& seq_mask_pshape = get_input_partial_shape(1); @@ -30,14 +29,17 @@ void op::CTCGreedyDecoder::validate_and_infer_types() { set_output_type(0, input_et, output_shapes[0]); } -bool op::CTCGreedyDecoder::visit_attributes(AttributeVisitor& visitor) { +bool CTCGreedyDecoder::visit_attributes(AttributeVisitor& visitor) { OV_OP_SCOPE(v0_CTCGreedyDecoder_visit_attributes); visitor.on_attribute("ctc_merge_repeated", m_ctc_merge_repeated); return true; } -shared_ptr op::CTCGreedyDecoder::clone_with_new_inputs(const OutputVector& new_args) const { +std::shared_ptr CTCGreedyDecoder::clone_with_new_inputs(const OutputVector& new_args) const { OV_OP_SCOPE(v0_CTCGreedyDecoder_clone_with_new_inputs); check_new_args_count(this, new_args); - return make_shared(new_args.at(0), new_args.at(1), m_ctc_merge_repeated); + return std::make_shared(new_args.at(0), new_args.at(1), m_ctc_merge_repeated); } +} // namespace v0 +} // namespace op +} // namespace ov diff --git a/src/core/src/op/ctc_greedy_decoder_seq_len.cpp b/src/core/src/op/ctc_greedy_decoder_seq_len.cpp index e852bc73a380d1..24049ed0b910aa 100644 --- a/src/core/src/op/ctc_greedy_decoder_seq_len.cpp +++ b/src/core/src/op/ctc_greedy_decoder_seq_len.cpp @@ -2,14 +2,12 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph/op/ctc_greedy_decoder_seq_len.hpp" - -#include +#include "openvino/op/ctc_greedy_decoder_seq_len.hpp" +#include "ctc_greedy_decoder_seq_len_shape_inference.hpp" #include "itt.hpp" -using namespace std; -using namespace ngraph; +namespace ov { op::v6::CTCGreedyDecoderSeqLen::CTCGreedyDecoderSeqLen(const Output& input, const Output& seq_len, @@ -65,25 +63,26 @@ bool op::v6::CTCGreedyDecoderSeqLen::visit_attributes(AttributeVisitor& visitor) return true; } -shared_ptr op::v6::CTCGreedyDecoderSeqLen::clone_with_new_inputs(const OutputVector& new_args) const { +std::shared_ptr op::v6::CTCGreedyDecoderSeqLen::clone_with_new_inputs(const OutputVector& new_args) const { OV_OP_SCOPE(v6_CTCGreedyDecoderSeqLen_clone_with_new_inputs); check_new_args_count(this, new_args); size_t args_size = new_args.size(); if (args_size == 2) { - return make_shared(new_args.at(0), - new_args.at(1), - m_merge_repeated, - m_classes_index_type, - m_sequence_length_type); + return std::make_shared(new_args.at(0), + new_args.at(1), + m_merge_repeated, + m_classes_index_type, + m_sequence_length_type); } else if (args_size == 3) { - return make_shared(new_args.at(0), - new_args.at(1), - new_args.at(2), - m_merge_repeated, - m_classes_index_type, - m_sequence_length_type); + return std::make_shared(new_args.at(0), + new_args.at(1), + new_args.at(2), + m_merge_repeated, + m_classes_index_type, + m_sequence_length_type); } else { OPENVINO_THROW("Incorrect number of arguments"); } } +} // namespace ov diff --git a/src/core/src/op/ctc_loss.cpp b/src/core/src/op/ctc_loss.cpp index a6ce8b6bb8f7a2..62aeb33939993b 100644 --- a/src/core/src/op/ctc_loss.cpp +++ b/src/core/src/op/ctc_loss.cpp @@ -8,8 +8,6 @@ #include "itt.hpp" #include "openvino/core/validation_util.hpp" -using namespace std; - namespace ov { op::v4::CTCLoss::CTCLoss(const Output& logits, const Output& logit_length, @@ -76,26 +74,26 @@ bool op::v4::CTCLoss::visit_attributes(AttributeVisitor& visitor) { return true; } -shared_ptr op::v4::CTCLoss::clone_with_new_inputs(const OutputVector& new_args) const { +std::shared_ptr op::v4::CTCLoss::clone_with_new_inputs(const OutputVector& new_args) const { OV_OP_SCOPE(v4_CTCLoss_clone_with_new_inputs); check_new_args_count(this, new_args); if (new_args.size() == 4) { - return make_shared(new_args.at(0), - new_args.at(1), - new_args.at(2), - new_args.at(3), - preprocess_collapse_repeated_, - ctc_merge_repeated_, - unique_); + return std::make_shared(new_args.at(0), + new_args.at(1), + new_args.at(2), + new_args.at(3), + preprocess_collapse_repeated_, + ctc_merge_repeated_, + unique_); } else if (new_args.size() == 5) { - return make_shared(new_args.at(0), - new_args.at(1), - new_args.at(2), - new_args.at(3), - new_args.at(4), - preprocess_collapse_repeated_, - ctc_merge_repeated_, - unique_); + return std::make_shared(new_args.at(0), + new_args.at(1), + new_args.at(2), + new_args.at(3), + new_args.at(4), + preprocess_collapse_repeated_, + ctc_merge_repeated_, + unique_); } else { OPENVINO_THROW("Incorrect number of arguments"); } diff --git a/src/core/src/op/deformable_convolution.cpp b/src/core/src/op/deformable_convolution.cpp index ba9552759716a8..25db7f5fa07559 100644 --- a/src/core/src/op/deformable_convolution.cpp +++ b/src/core/src/op/deformable_convolution.cpp @@ -7,7 +7,6 @@ #include "deformable_convolution_shape_inference.hpp" #include "itt.hpp" -using namespace std; namespace ov { op::v8::DeformableConvolution::DeformableConvolution(const Output& arg, const Output& offsets, diff --git a/src/core/src/op/deformable_psroi_pooling.cpp b/src/core/src/op/deformable_psroi_pooling.cpp index a6a5db8f5c9835..8de8b81109044e 100644 --- a/src/core/src/op/deformable_psroi_pooling.cpp +++ b/src/core/src/op/deformable_psroi_pooling.cpp @@ -2,14 +2,13 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph/op/deformable_psroi_pooling.hpp" +#include "openvino/op/deformable_psroi_pooling.hpp" #include "deformable_psroi_pooling_shape_inference.hpp" #include "itt.hpp" #include "openvino/core/validation_util.hpp" -using namespace std; -using namespace ngraph; +namespace ov { op::v1::DeformablePSROIPooling::DeformablePSROIPooling(const Output& input, const Output& coords, @@ -78,32 +77,32 @@ void op::v1::DeformablePSROIPooling::validate_and_infer_types() { set_output_type(0, input_et, shape_infer(this, input_shapes)[0]); } -shared_ptr op::v1::DeformablePSROIPooling::clone_with_new_inputs(const OutputVector& new_args) const { +std::shared_ptr op::v1::DeformablePSROIPooling::clone_with_new_inputs(const OutputVector& new_args) const { OV_OP_SCOPE(v1_DeformablePSROIPooling_clone_with_new_inputs); check_new_args_count(this, new_args); if (new_args.size() == 3) { - return make_shared(new_args.at(0), - new_args.at(1), - new_args.at(2), - m_output_dim, - m_spatial_scale, - m_group_size, - m_mode, - m_spatial_bins_x, - m_spatial_bins_y, - m_trans_std, - m_part_size); + return std::make_shared(new_args.at(0), + new_args.at(1), + new_args.at(2), + m_output_dim, + m_spatial_scale, + m_group_size, + m_mode, + m_spatial_bins_x, + m_spatial_bins_y, + m_trans_std, + m_part_size); } else if (new_args.size() == 2) { - return make_shared(new_args.at(0), - new_args.at(1), - m_output_dim, - m_spatial_scale, - m_group_size, - m_mode, - m_spatial_bins_x, - m_spatial_bins_y, - m_trans_std, - m_part_size); + return std::make_shared(new_args.at(0), + new_args.at(1), + m_output_dim, + m_spatial_scale, + m_group_size, + m_mode, + m_spatial_bins_x, + m_spatial_bins_y, + m_trans_std, + m_part_size); } else { OPENVINO_THROW("Not supported number of DeformablePSROIPooling args"); } @@ -116,3 +115,4 @@ void op::v1::DeformablePSROIPooling::set_output_dim(int64_t output_dim) { void op::v1::DeformablePSROIPooling::set_group_size(int64_t group_size) { m_group_size = group_size; } +} // namespace ov diff --git a/src/core/src/op/detection_output.cpp b/src/core/src/op/detection_output.cpp index 76b1eb624cecd8..6f13e090c06b2f 100644 --- a/src/core/src/op/detection_output.cpp +++ b/src/core/src/op/detection_output.cpp @@ -8,8 +8,6 @@ #include "itt.hpp" #include "openvino/core/validation_util.hpp" -using namespace std; - // ------------------------------ V0 ------------------------------ ov::op::v0::DetectionOutput::DetectionOutput(const Output& box_logits, const Output& class_preds, @@ -43,7 +41,7 @@ void ov::op::v0::DetectionOutput::validate_and_infer_types() { set_output_type(0, get_input_element_type(0), output_shapes[0]); } -shared_ptr ov::op::v0::DetectionOutput::clone_with_new_inputs(const OutputVector& new_args) const { +std::shared_ptr ov::op::v0::DetectionOutput::clone_with_new_inputs(const OutputVector& new_args) const { OV_OP_SCOPE(v0_DetectionOutput_clone_with_new_inputs); check_new_args_count(this, new_args); @@ -52,14 +50,14 @@ shared_ptr ov::op::v0::DetectionOutput::clone_with_new_inputs(const Ou NODE_VALIDATION_CHECK(this, num_args == 3 || num_args == 5, "DetectionOutput accepts 3 or 5 inputs."); if (num_args == 3) { - return make_shared(new_args.at(0), new_args.at(1), new_args.at(2), m_attrs); + return std::make_shared(new_args.at(0), new_args.at(1), new_args.at(2), m_attrs); } else { - return make_shared(new_args.at(0), - new_args.at(1), - new_args.at(2), - new_args.at(3), - new_args.at(4), - m_attrs); + return std::make_shared(new_args.at(0), + new_args.at(1), + new_args.at(2), + new_args.at(3), + new_args.at(4), + m_attrs); } } @@ -102,7 +100,7 @@ void ov::op::v8::DetectionOutput::validate_and_infer_types() { set_output_type(0, get_input_element_type(0), output_shapes[0]); } -shared_ptr ov::op::v8::DetectionOutput::clone_with_new_inputs(const OutputVector& new_args) const { +std::shared_ptr ov::op::v8::DetectionOutput::clone_with_new_inputs(const OutputVector& new_args) const { OV_OP_SCOPE(v0_DetectionOutput_clone_with_new_inputs); check_new_args_count(this, new_args); @@ -111,14 +109,14 @@ shared_ptr ov::op::v8::DetectionOutput::clone_with_new_inputs(const Ou NODE_VALIDATION_CHECK(this, num_args == 3 || num_args == 5, "DetectionOutput accepts 3 or 5 inputs."); if (num_args == 3) { - return make_shared(new_args.at(0), new_args.at(1), new_args.at(2), m_attrs); + return std::make_shared(new_args.at(0), new_args.at(1), new_args.at(2), m_attrs); } else { - return make_shared(new_args.at(0), - new_args.at(1), - new_args.at(2), - new_args.at(3), - new_args.at(4), - m_attrs); + return std::make_shared(new_args.at(0), + new_args.at(1), + new_args.at(2), + new_args.at(3), + new_args.at(4), + m_attrs); } } diff --git a/src/core/src/op/dft.cpp b/src/core/src/op/dft.cpp index ae02d24dbee8a5..b70c8c742fe328 100644 --- a/src/core/src/op/dft.cpp +++ b/src/core/src/op/dft.cpp @@ -14,23 +14,14 @@ // limitations under the License. //***************************************************************************** -#include "ngraph/op/dft.hpp" +#include "openvino/op/dft.hpp" #include #include -#include #include "itt.hpp" -#include "ngraph/attribute_visitor.hpp" -#include "ngraph/axis_set.hpp" -#include "ngraph/axis_vector.hpp" -#include "ngraph/op/constant.hpp" -#include "ngraph/op/util/op_types.hpp" -#include "ngraph/runtime/host_tensor.hpp" - -using namespace std; -using namespace ngraph; +namespace ov { op::v7::DFT::DFT(const Output& data, const Output& axes) : FFTBase(data, axes) { constructor_validate_and_infer_types(); } @@ -40,11 +31,6 @@ op::v7::DFT::DFT(const Output& data, const Output& axes, const Outpu constructor_validate_and_infer_types(); } -bool op::v7::DFT::visit_attributes(AttributeVisitor& visitor) { - OV_OP_SCOPE(v7_DFT_visit_attributes); - return true; -} - std::shared_ptr op::v7::DFT::clone_with_new_inputs(const OutputVector& new_args) const { OV_OP_SCOPE(v7_DFT_clone_with_new_inputs); check_new_args_count(this, new_args); @@ -56,3 +42,4 @@ std::shared_ptr op::v7::DFT::clone_with_new_inputs(const OutputVector& new return std::make_shared(new_args.at(0), new_args.at(1), new_args.at(2)); } +} // namespace ov diff --git a/src/core/src/op/divide.cpp b/src/core/src/op/divide.cpp index 8ab229a9a742df..c2da6a9c05d00a 100644 --- a/src/core/src/op/divide.cpp +++ b/src/core/src/op/divide.cpp @@ -15,8 +15,8 @@ #include "ngraph/op/or.hpp" #include "ngraph/op/select.hpp" #include "ngraph/runtime/host_tensor.hpp" +#include "openvino/core/shape_util.hpp" #include "openvino/reference/divide.hpp" -#include "shape_util.hpp" using namespace std; using namespace ngraph; @@ -87,14 +87,14 @@ bool evaluate_bound(const Node* node, ov::TensorVector& output_values, bool is_u // for positive arg2 divide will have limits [low/up , up/low] // for negative arg2 limits for divide will be [up/low, low/up] // for arg2 range with both positive and negative values, divide can give any result [-inf, inf] - NGRAPH_CHECK(node, output_values.size() == 1); + OPENVINO_ASSERT(node, output_values.size() == 1); const auto& input1 = node->input_value(0); const auto& input2 = node->input_value(1); // broadcast shapes to allocate tensors of correct size for operations with both inputs PartialShape input_shape = input1.get_partial_shape(); - NGRAPH_CHECK(PartialShape::broadcast_merge_into(input_shape, input2.get_partial_shape(), node->get_autob()), - "Argument shapes in divide operation are inconsistent."); + OPENVINO_ASSERT(PartialShape::broadcast_merge_into(input_shape, input2.get_partial_shape(), node->get_autob()), + "Argument shapes in divide operation are inconsistent."); auto input1_low = ov::evaluate_lower_bound(input1); if (!input1_low) diff --git a/src/core/src/op/einsum.cpp b/src/core/src/op/einsum.cpp index 2d752899038244..d8fd3ddd45cff2 100644 --- a/src/core/src/op/einsum.cpp +++ b/src/core/src/op/einsum.cpp @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph/op/einsum.hpp" +#include "openvino/op/einsum.hpp" #include #include @@ -11,11 +11,9 @@ #include "einsum_shape_inference.hpp" #include "itt.hpp" -#include "ngraph/validation_util.hpp" - -using namespace std; -using namespace ngraph; +#include "openvino/core/validation_util.hpp" +namespace ov { namespace { /// \brief Check that a subscript contains only alphabetic letters or @@ -108,9 +106,9 @@ void op::v7::Einsum::parse_equation(const std::string& equation, for (std::string input_subscript; std::getline(input, input_subscript, ',');) { bool local_is_ellipsis_met = false; // check that input subscript contains only alphabetic letter or ellipsis - NGRAPH_CHECK(is_subscript_correct(input_subscript, local_is_ellipsis_met), - "Input subscript of Einsum equation must consist of either only " - "alphabetic letters or alphabetic letters with one ellipsis."); + OPENVINO_ASSERT(is_subscript_correct(input_subscript, local_is_ellipsis_met), + "Input subscript of Einsum equation must consist of either only " + "alphabetic letters or alphabetic letters with one ellipsis."); // mark that ellipsis is met at least in one input subscript if (local_is_ellipsis_met) { @@ -139,14 +137,14 @@ void op::v7::Einsum::parse_equation(const std::string& equation, bool output_is_ellipsis_met = false; // check that the output subscript has the correct format - NGRAPH_CHECK(is_subscript_correct(output_subscript, output_is_ellipsis_met), - "Output subscript of Einsum equation must consist of either only " - "alphabetic letters or alphabetic letters with one ellipsis."); + OPENVINO_ASSERT(is_subscript_correct(output_subscript, output_is_ellipsis_met), + "Output subscript of Einsum equation must consist of either only " + "alphabetic letters or alphabetic letters with one ellipsis."); // if the ellipsis is met in input subscripts, one ellipsis must be in the output subscript - NGRAPH_CHECK(is_ellipsis_met == output_is_ellipsis_met, - "Output subscript of Einsum equation must contain one ellipsis if " - "ellipsis is met in any input subscript."); + OPENVINO_ASSERT(is_ellipsis_met == output_is_ellipsis_met, + "Output subscript of Einsum equation must contain one ellipsis if " + "ellipsis is met in any input subscript."); } } @@ -164,7 +162,7 @@ std::vector op::v7::Einsum::extract_labels(const std::string& subsc // make additional increment since ellipsis consists of three dots. ch_idx += 2; } else { - NGRAPH_CHECK(false, "Einsum equation has invalid label."); + OPENVINO_ASSERT(false, "Einsum equation has invalid label."); } } @@ -205,13 +203,14 @@ bool op::v7::Einsum::visit_attributes(AttributeVisitor& visitor) { return true; } -shared_ptr op::v7::Einsum::clone_with_new_inputs(const OutputVector& new_args) const { +std::shared_ptr op::v7::Einsum::clone_with_new_inputs(const OutputVector& new_args) const { OV_OP_SCOPE(v7_Einsum_clone_with_new_inputs); check_new_args_count(this, new_args); - return make_shared(new_args, m_equation); + return std::make_shared(new_args, m_equation); } void op::v7::Einsum::set_equation(std::string equation) { remove_whitespaces(equation); m_equation = std::move(equation); } +} // namespace ov diff --git a/src/core/src/op/embedding_segments_sum.cpp b/src/core/src/op/embedding_segments_sum.cpp index b7dc0df1fcadd3..c368e6f748fc27 100644 --- a/src/core/src/op/embedding_segments_sum.cpp +++ b/src/core/src/op/embedding_segments_sum.cpp @@ -2,18 +2,14 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph/op/embedding_segments_sum.hpp" +#include "openvino/op/embedding_segments_sum.hpp" #include #include "embedding_segments_sum_shape_inference.hpp" #include "itt.hpp" -#include "ngraph/op/constant.hpp" -#include "ngraph/opsets/opset3.hpp" - -using namespace std; -using namespace ngraph; +namespace ov { op::v3::EmbeddingSegmentsSum::EmbeddingSegmentsSum(const Output& emb_table, const Output& indices, const Output& segment_ids, @@ -110,28 +106,29 @@ void op::v3::EmbeddingSegmentsSum::validate_and_infer_types() { set_output_type(0, result_et, result_shapes[0]); } -shared_ptr op::v3::EmbeddingSegmentsSum::clone_with_new_inputs(const OutputVector& new_args) const { +std::shared_ptr op::v3::EmbeddingSegmentsSum::clone_with_new_inputs(const OutputVector& new_args) const { OV_OP_SCOPE(v3_EmbeddingSegmentsSum_clone_with_new_inputs); check_new_args_count(this, new_args); if (new_args.size() == 4) { - return make_shared(new_args.at(0), - new_args.at(1), - new_args.at(2), - new_args.at(3)); + return std::make_shared(new_args.at(0), + new_args.at(1), + new_args.at(2), + new_args.at(3)); } else if (new_args.size() == 5) { - return make_shared(new_args.at(0), - new_args.at(1), - new_args.at(2), - new_args.at(3), - new_args.at(4)); + return std::make_shared(new_args.at(0), + new_args.at(1), + new_args.at(2), + new_args.at(3), + new_args.at(4)); } else if (new_args.size() == 6) { - return make_shared(new_args.at(0), - new_args.at(1), - new_args.at(2), - new_args.at(3), - new_args.at(4), - new_args.at(5)); + return std::make_shared(new_args.at(0), + new_args.at(1), + new_args.at(2), + new_args.at(3), + new_args.at(4), + new_args.at(5)); } else { OPENVINO_THROW("Incorrect number of arguments"); } } +} // namespace ov diff --git a/src/core/src/op/embeddingbag_offsets_sum.cpp b/src/core/src/op/embeddingbag_offsets_sum.cpp index f510e380de82ab..fa2e222386bf99 100644 --- a/src/core/src/op/embeddingbag_offsets_sum.cpp +++ b/src/core/src/op/embeddingbag_offsets_sum.cpp @@ -2,13 +2,11 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph/op/embeddingbag_offsets_sum.hpp" +#include "openvino/op/embeddingbag_offsets_sum.hpp" #include "itt.hpp" -#include "ngraph/op/constant.hpp" -using namespace std; -using namespace ngraph; +namespace ov { op::v3::EmbeddingBagOffsetsSum::EmbeddingBagOffsetsSum(const Output& emb_table, const Output& indices, @@ -28,23 +26,24 @@ op::v3::EmbeddingBagOffsetsSum::EmbeddingBagOffsetsSum(const Output& emb_t const Output& offsets) : util::EmbeddingBagOffsetsBase(emb_table, indices, offsets) {} -shared_ptr op::v3::EmbeddingBagOffsetsSum::clone_with_new_inputs(const OutputVector& new_args) const { +std::shared_ptr op::v3::EmbeddingBagOffsetsSum::clone_with_new_inputs(const OutputVector& new_args) const { OV_OP_SCOPE(v3_EmbeddingBagOffsetsSum_clone_with_new_inputs); check_new_args_count(this, new_args); if (new_args.size() == 3) { - return make_shared(new_args.at(0), new_args.at(1), new_args.at(2)); + return std::make_shared(new_args.at(0), new_args.at(1), new_args.at(2)); } else if (new_args.size() == 4) { - return make_shared(new_args.at(0), - new_args.at(1), - new_args.at(2), - new_args.at(3)); + return std::make_shared(new_args.at(0), + new_args.at(1), + new_args.at(2), + new_args.at(3)); } else if (new_args.size() == 5) { - return make_shared(new_args.at(0), - new_args.at(1), - new_args.at(2), - new_args.at(3), - new_args.at(4)); + return std::make_shared(new_args.at(0), + new_args.at(1), + new_args.at(2), + new_args.at(3), + new_args.at(4)); } else { OPENVINO_THROW("Incorrect number of arguments"); } } +} // namespace ov diff --git a/src/core/src/op/embeddingbag_packedsum.cpp b/src/core/src/op/embeddingbag_packedsum.cpp index 319f2569adabf9..59f47715c9c088 100644 --- a/src/core/src/op/embeddingbag_packedsum.cpp +++ b/src/core/src/op/embeddingbag_packedsum.cpp @@ -2,13 +2,11 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph/op/embeddingbag_packedsum.hpp" +#include "openvino/op/embeddingbag_packedsum.hpp" #include "itt.hpp" -#include "ngraph/op/constant.hpp" -using namespace std; -using namespace ngraph; +namespace ov { op::v3::EmbeddingBagPackedSum::EmbeddingBagPackedSum(const Output& emb_table, const Output& indices, @@ -18,14 +16,15 @@ op::v3::EmbeddingBagPackedSum::EmbeddingBagPackedSum(const Output& emb_tab op::v3::EmbeddingBagPackedSum::EmbeddingBagPackedSum(const Output& emb_table, const Output& indices) : util::EmbeddingBagPackedBase(emb_table, indices) {} -shared_ptr op::v3::EmbeddingBagPackedSum::clone_with_new_inputs(const OutputVector& new_args) const { +std::shared_ptr op::v3::EmbeddingBagPackedSum::clone_with_new_inputs(const OutputVector& new_args) const { OV_OP_SCOPE(v3_EmbeddingBagPackedSum_clone_with_new_inputs); check_new_args_count(this, new_args); if (new_args.size() == 2) { - return make_shared(new_args.at(0), new_args.at(1)); + return std::make_shared(new_args.at(0), new_args.at(1)); } else if (new_args.size() == 3) { - return make_shared(new_args.at(0), new_args.at(1), new_args.at(2)); + return std::make_shared(new_args.at(0), new_args.at(1), new_args.at(2)); } else { OPENVINO_THROW("Incorrect number of arguments"); } } +} // namespace ov diff --git a/src/core/src/op/exp.cpp b/src/core/src/op/exp.cpp index d5076c963e4d66..8a8a5fa88d9cd2 100644 --- a/src/core/src/op/exp.cpp +++ b/src/core/src/op/exp.cpp @@ -62,7 +62,7 @@ bool evaluate_exp(const HostTensorPtr& arg0, const HostTensorPtr& out) { bool op::Exp::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { OV_OP_SCOPE(v0_Exp_evaluate); OPENVINO_SUPPRESS_DEPRECATED_START - NGRAPH_CHECK(validate_host_tensor_vector(outputs, 1) && validate_host_tensor_vector(inputs, 1)); + OPENVINO_ASSERT(validate_host_tensor_vector(outputs, 1) && validate_host_tensor_vector(inputs, 1)); OPENVINO_SUPPRESS_DEPRECATED_END return expop::evaluate_exp(inputs[0], outputs[0]); } diff --git a/src/core/src/op/experimental_detectron_detection_output.cpp b/src/core/src/op/experimental_detectron_detection_output.cpp index 0b5d1c4005490e..27c17a3aba7fb0 100644 --- a/src/core/src/op/experimental_detectron_detection_output.cpp +++ b/src/core/src/op/experimental_detectron_detection_output.cpp @@ -11,8 +11,6 @@ #include "itt.hpp" #include "openvino/core/attribute_visitor.hpp" -using namespace std; - namespace ov { op::v6::ExperimentalDetectronDetectionOutput::ExperimentalDetectronDetectionOutput(const Output& input_rois, const Output& input_deltas, @@ -48,15 +46,15 @@ void op::v6::ExperimentalDetectronDetectionOutput::validate_and_infer_types() { set_output_type(2, shapes_and_type.second, output_shapes[2]); } -shared_ptr op::v6::ExperimentalDetectronDetectionOutput::clone_with_new_inputs( +std::shared_ptr op::v6::ExperimentalDetectronDetectionOutput::clone_with_new_inputs( const OutputVector& new_args) const { OV_OP_SCOPE(v6_ExperimentalDetectronDetectionOutput_clone_with_new_inputs); check_new_args_count(this, new_args); - return make_shared(new_args.at(0), - new_args.at(1), - new_args.at(2), - new_args.at(3), - m_attrs); + return std::make_shared(new_args.at(0), + new_args.at(1), + new_args.at(2), + new_args.at(3), + m_attrs); } void op::v6::ExperimentalDetectronDetectionOutput::set_attrs(Attributes attrs) { diff --git a/src/core/src/op/experimental_detectron_generate_proposals.cpp b/src/core/src/op/experimental_detectron_generate_proposals.cpp index 9bf7e050c27a84..2f4067be7d62d6 100644 --- a/src/core/src/op/experimental_detectron_generate_proposals.cpp +++ b/src/core/src/op/experimental_detectron_generate_proposals.cpp @@ -9,7 +9,6 @@ #include "itt.hpp" #include "openvino/core/attribute_visitor.hpp" -using namespace std; namespace ov { namespace op { namespace v6 { @@ -25,15 +24,15 @@ ExperimentalDetectronGenerateProposalsSingleImage::ExperimentalDetectronGenerate constructor_validate_and_infer_types(); } -shared_ptr ExperimentalDetectronGenerateProposalsSingleImage::clone_with_new_inputs( +std::shared_ptr ExperimentalDetectronGenerateProposalsSingleImage::clone_with_new_inputs( const OutputVector& new_args) const { OV_OP_SCOPE(v6_ExperimentalDetectronGenerateProposalsSingleImage_clone_with_new_inputs); check_new_args_count(this, new_args); - return make_shared(new_args.at(0), - new_args.at(1), - new_args.at(2), - new_args.at(3), - m_attrs); + return std::make_shared(new_args.at(0), + new_args.at(1), + new_args.at(2), + new_args.at(3), + m_attrs); } bool ExperimentalDetectronGenerateProposalsSingleImage::visit_attributes(AttributeVisitor& visitor) { diff --git a/src/core/src/op/experimental_detectron_roi_feature.cpp b/src/core/src/op/experimental_detectron_roi_feature.cpp index 5993475bdbf91f..7da0cc6a81c9cb 100644 --- a/src/core/src/op/experimental_detectron_roi_feature.cpp +++ b/src/core/src/op/experimental_detectron_roi_feature.cpp @@ -13,8 +13,6 @@ #include "itt.hpp" #include "openvino/core/attribute_visitor.hpp" -using namespace std; - namespace ov { op::v6::ExperimentalDetectronROIFeatureExtractor::ExperimentalDetectronROIFeatureExtractor(const OutputVector& args, const Attributes& attrs) @@ -46,11 +44,11 @@ void op::v6::ExperimentalDetectronROIFeatureExtractor::validate_and_infer_types( set_output_type(i, shapes_and_type.second, output_shapes[i]); } -shared_ptr op::v6::ExperimentalDetectronROIFeatureExtractor::clone_with_new_inputs( +std::shared_ptr op::v6::ExperimentalDetectronROIFeatureExtractor::clone_with_new_inputs( const OutputVector& new_args) const { OV_OP_SCOPE(v6_ExperimentalDetectronROIFeatureExtractor_clone_with_new_inputs); check_new_args_count(this, new_args); - return make_shared(new_args, m_attrs); + return std::make_shared(new_args, m_attrs); } void op::v6::ExperimentalDetectronROIFeatureExtractor::set_attrs(Attributes attrs) { diff --git a/src/core/src/op/experimental_detectron_topkrois.cpp b/src/core/src/op/experimental_detectron_topkrois.cpp index f476929ed1967f..829a42f968d80b 100644 --- a/src/core/src/op/experimental_detectron_topkrois.cpp +++ b/src/core/src/op/experimental_detectron_topkrois.cpp @@ -9,7 +9,6 @@ #include "openvino/core/attribute_visitor.hpp" #include "openvino/core/validation_util.hpp" -using namespace std; namespace ov { op::v6::ExperimentalDetectronTopKROIs::ExperimentalDetectronTopKROIs(const Output& input_rois, const Output& rois_probs, @@ -25,10 +24,10 @@ bool op::v6::ExperimentalDetectronTopKROIs::visit_attributes(AttributeVisitor& v return true; } -shared_ptr op::v6::ExperimentalDetectronTopKROIs::clone_with_new_inputs(const OutputVector& new_args) const { +std::shared_ptr op::v6::ExperimentalDetectronTopKROIs::clone_with_new_inputs(const OutputVector& new_args) const { OV_OP_SCOPE(v6_ExperimentalDetectronTopKROIs_clone_with_new_inputs); check_new_args_count(this, new_args); - return make_shared(new_args.at(0), new_args.at(1), m_max_rois); + return std::make_shared(new_args.at(0), new_args.at(1), m_max_rois); } void op::v6::ExperimentalDetectronTopKROIs::validate_and_infer_types() { diff --git a/src/core/src/op/gather.cpp b/src/core/src/op/gather.cpp index a5e69144829704..2680d161a0ad94 100644 --- a/src/core/src/op/gather.cpp +++ b/src/core/src/op/gather.cpp @@ -2,22 +2,19 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph/op/gather.hpp" - -#include +#include "openvino/op/gather.hpp" #include "itt.hpp" -#include "ngraph/shape.hpp" +#include "openvino/core/validation_util.hpp" -using namespace std; -using namespace ngraph; +namespace ov { op::v1::Gather::Gather(const Output& params, const Output& indices, const Output& axes) : GatherBase(params, indices, axes) { constructor_validate_and_infer_types(); } -int64_t ngraph::op::v1::Gather::get_axis() const { +int64_t op::v1::Gather::get_axis() const { OPENVINO_SUPPRESS_DEPRECATED_START if (!get_constant_from_source(input_value(2))) { OPENVINO_SUPPRESS_DEPRECATED_END @@ -26,15 +23,10 @@ int64_t ngraph::op::v1::Gather::get_axis() const { return GatherBase::get_axis(); } -bool ngraph::op::v1::Gather::visit_attributes(AttributeVisitor& visitor) { - OV_OP_SCOPE(v1_Gather_visit_attributes); - return true; -} - -shared_ptr op::v1::Gather::clone_with_new_inputs(const OutputVector& new_args) const { +std::shared_ptr op::v1::Gather::clone_with_new_inputs(const OutputVector& new_args) const { OV_OP_SCOPE(v1_Gather_clone_with_new_inputs); check_new_args_count(this, new_args); - return make_shared(new_args.at(0), new_args.at(1), new_args.at(2)); + return std::make_shared(new_args.at(0), new_args.at(1), new_args.at(2)); } op::v7::Gather::Gather(const Output& data, @@ -65,16 +57,16 @@ int64_t op::v7::Gather::get_batch_dims() const { return m_batch_dims; } -bool ngraph::op::v7::Gather::visit_attributes(AttributeVisitor& visitor) { +bool op::v7::Gather::visit_attributes(AttributeVisitor& visitor) { OV_OP_SCOPE(v7_Gather_visit_attributes); visitor.on_attribute("batch_dims", m_batch_dims); return true; } -shared_ptr op::v7::Gather::clone_with_new_inputs(const OutputVector& new_args) const { +std::shared_ptr op::v7::Gather::clone_with_new_inputs(const OutputVector& new_args) const { OV_OP_SCOPE(v7_Gather_clone_with_new_inputs); check_new_args_count(this, new_args); - return make_shared(new_args.at(0), new_args.at(1), new_args.at(2), m_batch_dims); + return std::make_shared(new_args.at(0), new_args.at(1), new_args.at(2), m_batch_dims); } op::v8::Gather::Gather(const Output& data, @@ -105,14 +97,15 @@ int64_t op::v8::Gather::get_batch_dims() const { return m_batch_dims; } -bool ngraph::op::v8::Gather::visit_attributes(AttributeVisitor& visitor) { +bool op::v8::Gather::visit_attributes(AttributeVisitor& visitor) { OV_OP_SCOPE(v8_Gather_visit_attributes); visitor.on_attribute("batch_dims", m_batch_dims); return true; } -shared_ptr op::v8::Gather::clone_with_new_inputs(const OutputVector& new_args) const { +std::shared_ptr op::v8::Gather::clone_with_new_inputs(const OutputVector& new_args) const { OV_OP_SCOPE(v8_Gather_clone_with_new_inputs); check_new_args_count(this, new_args); - return make_shared(new_args.at(0), new_args.at(1), new_args.at(2), m_batch_dims); + return std::make_shared(new_args.at(0), new_args.at(1), new_args.at(2), m_batch_dims); } +} // namespace ov diff --git a/src/core/src/op/gather_elements.cpp b/src/core/src/op/gather_elements.cpp index f0bd0abc6c18f1..47d8ca67d5f726 100644 --- a/src/core/src/op/gather_elements.cpp +++ b/src/core/src/op/gather_elements.cpp @@ -2,16 +2,12 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph/op/gather_elements.hpp" - -#include +#include "openvino/op/gather_elements.hpp" +#include "gather_elements_shape_inference.hpp" #include "itt.hpp" -#include "ngraph/shape.hpp" - -using namespace std; -using namespace ngraph; +namespace ov { // ------------------------------ V6 ------------------------------ op::v6::GatherElements::GatherElements(const Output& data, const Output& indices, const int64_t axis) @@ -43,8 +39,9 @@ bool op::v6::GatherElements::visit_attributes(AttributeVisitor& visitor) { return true; } -shared_ptr op::v6::GatherElements::clone_with_new_inputs(const OutputVector& new_args) const { +std::shared_ptr op::v6::GatherElements::clone_with_new_inputs(const OutputVector& new_args) const { OV_OP_SCOPE(v6_GatherElements_clone_with_new_inputs); check_new_args_count(this, new_args); - return make_shared(new_args.at(0), new_args.at(1), m_axis); + return std::make_shared(new_args.at(0), new_args.at(1), m_axis); } +} // namespace ov diff --git a/src/core/src/op/gather_nd.cpp b/src/core/src/op/gather_nd.cpp index 72c9e0c3ae6020..313eb84266a282 100644 --- a/src/core/src/op/gather_nd.cpp +++ b/src/core/src/op/gather_nd.cpp @@ -2,14 +2,12 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph/op/gather_nd.hpp" +#include "openvino/op/gather_nd.hpp" #include "gather_nd_shape_inference.hpp" #include "itt.hpp" -#include "ngraph/shape.hpp" -using namespace std; -using namespace ngraph; +namespace ov { // ------------------------------ V5 ------------------------------ op::v5::GatherND::GatherND(const Output& data, const Output& indices, const size_t batch_dims) @@ -33,10 +31,10 @@ void op::v5::GatherND::validate_and_infer_types() { set_output_type(0, data_type, out_shapes[0]); } -shared_ptr op::v5::GatherND::clone_with_new_inputs(const OutputVector& new_args) const { +std::shared_ptr op::v5::GatherND::clone_with_new_inputs(const OutputVector& new_args) const { OV_OP_SCOPE(v5_GatherND_clone_with_new_inputs); check_new_args_count(this, new_args); - return make_shared(new_args.at(0), new_args.at(1), m_batch_dims); + return std::make_shared(new_args.at(0), new_args.at(1), m_batch_dims); } // ------------------------------ V8 ------------------------------ @@ -60,8 +58,9 @@ void op::v8::GatherND::validate_and_infer_types() { set_output_type(0, data_type, ov::PartialShape(out_shapes[0])); } -shared_ptr op::v8::GatherND::clone_with_new_inputs(const OutputVector& new_args) const { +std::shared_ptr op::v8::GatherND::clone_with_new_inputs(const OutputVector& new_args) const { OV_OP_SCOPE(v8_GatherND_clone_with_new_inputs); check_new_args_count(this, new_args); - return make_shared(new_args.at(0), new_args.at(1), m_batch_dims); + return std::make_shared(new_args.at(0), new_args.at(1), m_batch_dims); } +} // namespace ov diff --git a/src/core/src/op/gather_tree.cpp b/src/core/src/op/gather_tree.cpp index 67c41e792655c6..5b013546f9e3cd 100644 --- a/src/core/src/op/gather_tree.cpp +++ b/src/core/src/op/gather_tree.cpp @@ -2,16 +2,13 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph/op/gather_tree.hpp" +#include "openvino/op/gather_tree.hpp" #include "gather_tree_shape_inference.hpp" #include "itt.hpp" -#include "ngraph/shape.hpp" #include "openvino/core/validation_util.hpp" -using namespace std; -using namespace ngraph; - +namespace ov { op::v1::GatherTree::GatherTree(const Output& step_ids, const Output& parent_idx, const Output& max_seq_len, @@ -20,15 +17,10 @@ op::v1::GatherTree::GatherTree(const Output& step_ids, constructor_validate_and_infer_types(); } -shared_ptr op::v1::GatherTree::clone_with_new_inputs(const OutputVector& new_args) const { +std::shared_ptr op::v1::GatherTree::clone_with_new_inputs(const OutputVector& new_args) const { OV_OP_SCOPE(v1_GatherTree_clone_with_new_inputs); check_new_args_count(this, new_args); - return make_shared(new_args.at(0), new_args.at(1), new_args.at(2), new_args.at(3)); -} - -bool ngraph::op::v1::GatherTree::visit_attributes(AttributeVisitor& visitor) { - OV_OP_SCOPE(v1_GatherTree_visit_attributes); - return true; + return std::make_shared(new_args.at(0), new_args.at(1), new_args.at(2), new_args.at(3)); } void op::v1::GatherTree::validate_and_infer_types() { @@ -64,3 +56,4 @@ void op::v1::GatherTree::validate_and_infer_types() { OPENVINO_SUPPRESS_DEPRECATED_END set_output_type(0, result_et, output_shape); } +} // namespace ov diff --git a/src/core/src/op/gelu.cpp b/src/core/src/op/gelu.cpp index c858cb1374ded1..752b83fe5cd5b3 100644 --- a/src/core/src/op/gelu.cpp +++ b/src/core/src/op/gelu.cpp @@ -134,7 +134,7 @@ bool evaluate_gelu(const HostTensorPtr& arg0, const HostTensorPtr& out, op::Gelu bool op::v7::Gelu::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { OV_OP_SCOPE(v7_Gelu_evaluate); OPENVINO_SUPPRESS_DEPRECATED_START - NGRAPH_CHECK(validate_host_tensor_vector(outputs, 1) && validate_host_tensor_vector(inputs, 1)); + OPENVINO_ASSERT(validate_host_tensor_vector(outputs, 1) && validate_host_tensor_vector(inputs, 1)); OPENVINO_SUPPRESS_DEPRECATED_END return gelu::evaluate_gelu(inputs[0], outputs[0], m_approximation_mode); } diff --git a/src/core/src/op/generate_proposals.cpp b/src/core/src/op/generate_proposals.cpp index 8dcd6c5e696010..48fd69ac2e5778 100644 --- a/src/core/src/op/generate_proposals.cpp +++ b/src/core/src/op/generate_proposals.cpp @@ -7,7 +7,6 @@ #include "generate_proposals_shape_inference.hpp" #include "itt.hpp" -using namespace std; namespace ov { op::v9::GenerateProposals::GenerateProposals(const Output& im_info, @@ -22,15 +21,15 @@ op::v9::GenerateProposals::GenerateProposals(const Output& im_info, constructor_validate_and_infer_types(); } -shared_ptr op::v9::GenerateProposals::clone_with_new_inputs(const OutputVector& new_args) const { +std::shared_ptr op::v9::GenerateProposals::clone_with_new_inputs(const OutputVector& new_args) const { OV_OP_SCOPE(v9_GenerateProposals_clone_with_new_inputs); check_new_args_count(this, new_args); - return make_shared(new_args.at(0), - new_args.at(1), - new_args.at(2), - new_args.at(3), - m_attrs, - m_roi_num_type); + return std::make_shared(new_args.at(0), + new_args.at(1), + new_args.at(2), + new_args.at(3), + m_attrs, + m_roi_num_type); } bool op::v9::GenerateProposals::visit_attributes(AttributeVisitor& visitor) { diff --git a/src/core/src/op/greater_eq.cpp b/src/core/src/op/greater_eq.cpp index 53d6e9a9e86632..64b761233dc0cc 100644 --- a/src/core/src/op/greater_eq.cpp +++ b/src/core/src/op/greater_eq.cpp @@ -70,7 +70,7 @@ shared_ptr op::v1::GreaterEqual::clone_with_new_inputs(const OutputVector& bool op::v1::GreaterEqual::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { OV_OP_SCOPE(v1_GreaterEqual_evaluate); OPENVINO_SUPPRESS_DEPRECATED_START - NGRAPH_CHECK(validate_host_tensor_vector(outputs, 1) && validate_host_tensor_vector(inputs, 2)); + OPENVINO_ASSERT(validate_host_tensor_vector(outputs, 1) && validate_host_tensor_vector(inputs, 2)); OPENVINO_SUPPRESS_DEPRECATED_END return greater_equalop::evaluate_greater_equal(inputs[0], inputs[1], outputs[0], get_autob()); } diff --git a/src/core/src/op/grn.cpp b/src/core/src/op/grn.cpp index ff95ade95da4d5..c5a5180e91dc97 100644 --- a/src/core/src/op/grn.cpp +++ b/src/core/src/op/grn.cpp @@ -2,14 +2,12 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph/op/grn.hpp" +#include "openvino/op/grn.hpp" #include "itt.hpp" -#include "ngraph/axis_set.hpp" -#include "ngraph/shape.hpp" +#include "openvino/core/axis_set.hpp" -using namespace std; -using namespace ngraph; +namespace ov { op::v0::GRN::GRN(const Output& data, float bias) : util::UnaryElementwiseArithmetic(data), m_bias(bias) { constructor_validate_and_infer_types(); @@ -37,10 +35,11 @@ void op::v0::GRN::validate_and_infer_types() { set_output_type(0, get_input_element_type(0), get_input_partial_shape(0)); } -shared_ptr op::v0::GRN::clone_with_new_inputs(const OutputVector& new_args) const { +std::shared_ptr op::v0::GRN::clone_with_new_inputs(const OutputVector& new_args) const { OV_OP_SCOPE(v0_GRN_clone_with_new_inputs); if (new_args.size() != 1) { OPENVINO_THROW("Incorrect number of new arguments"); } - return make_shared(new_args.at(0), m_bias); + return std::make_shared(new_args.at(0), m_bias); } +} // namespace ov diff --git a/src/core/src/op/group_conv.cpp b/src/core/src/op/group_conv.cpp index 3a7b28108307c0..148564ef401404 100644 --- a/src/core/src/op/group_conv.cpp +++ b/src/core/src/op/group_conv.cpp @@ -10,8 +10,6 @@ #include "itt.hpp" #include "openvino/op/util/precision_sensitive_attribute.hpp" -using namespace std; - //------------------------------------------------------------------------------ // v1::GroupConvolution //------------------------------------------------------------------------------ @@ -70,16 +68,16 @@ void op::v1::GroupConvolution::validate_and_infer_types() { set_num_spatial(num_spatial, input_shapes); } -shared_ptr op::v1::GroupConvolution::clone_with_new_inputs(const OutputVector& new_args) const { +std::shared_ptr op::v1::GroupConvolution::clone_with_new_inputs(const OutputVector& new_args) const { OV_OP_SCOPE(v1_GroupConvolution_clone_with_new_inputs); check_new_args_count(this, new_args); - return make_shared(new_args.at(0), - new_args.at(1), - m_strides, - m_pads_begin, - m_pads_end, - m_dilations, - m_auto_pad); + return std::make_shared(new_args.at(0), + new_args.at(1), + m_strides, + m_pads_begin, + m_pads_end, + m_dilations, + m_auto_pad); } //------------------------------------------------------------------------------ @@ -188,14 +186,14 @@ void op::v1::GroupConvolutionBackpropData::set_output_shape(const ov::Shape& sha } void op::v1::GroupConvolutionBackpropData::infer_conv_backprop_output_spatial_shape( - const vector& input_data_shape, - const vector& filters_shape, + const std::vector& input_data_shape, + const std::vector& filters_shape, const Strides& strides, const Strides& dilations, const CoordinateDiff& pads_begin, const CoordinateDiff& pads_end, const CoordinateDiff& output_padding, - vector& output_spatial_shape) { + std::vector& output_spatial_shape) { size_t num_spatial_dims = input_data_shape.size(); OPENVINO_ASSERT(filters_shape.size() == num_spatial_dims && strides.size() == num_spatial_dims && dilations.size() == num_spatial_dims && pads_begin.size() == num_spatial_dims && @@ -261,28 +259,28 @@ void op::v1::GroupConvolutionBackpropData::validate_and_infer_types() { set_input_is_relevant_to_shape(1); } -shared_ptr op::v1::GroupConvolutionBackpropData::clone_with_new_inputs(const OutputVector& new_args) const { +std::shared_ptr op::v1::GroupConvolutionBackpropData::clone_with_new_inputs(const OutputVector& new_args) const { OV_OP_SCOPE(v1_GroupConvolutionBackpropData_clone_with_new_inputs); check_new_args_count(this, new_args); if (new_args.size() == 3) { - return make_shared(new_args.at(0), - new_args.at(1), - new_args.at(2), - m_strides, - m_pads_begin, - m_pads_end, - m_dilations, - m_auto_pad, - m_output_padding); + return std::make_shared(new_args.at(0), + new_args.at(1), + new_args.at(2), + m_strides, + m_pads_begin, + m_pads_end, + m_dilations, + m_auto_pad, + m_output_padding); } else { - return make_shared(new_args.at(0), - new_args.at(1), - m_strides, - m_pads_begin, - m_pads_end, - m_dilations, - m_auto_pad, - m_output_padding); + return std::make_shared(new_args.at(0), + new_args.at(1), + m_strides, + m_pads_begin, + m_pads_end, + m_dilations, + m_auto_pad, + m_output_padding); } } } // namespace ov diff --git a/src/core/src/op/gru_cell.cpp b/src/core/src/op/gru_cell.cpp index 10412d321d289e..4f040be807f558 100644 --- a/src/core/src/op/gru_cell.cpp +++ b/src/core/src/op/gru_cell.cpp @@ -2,18 +2,15 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph/op/gru_cell.hpp" +#include "openvino/op/gru_cell.hpp" #include #include "gru_cell_shape_inference.hpp" #include "itt.hpp" -#include "ngraph/op/constant.hpp" -#include "ngraph/shape.hpp" -#include "ngraph/type/element_type.hpp" +#include "openvino/core/type/element_type.hpp" -using namespace std; -using namespace ngraph; +namespace ov { op::v3::GRUCell::GRUCell() : m_linear_before_reset(false) { m_activations = {"sigmoid", "tanh"}; @@ -31,9 +28,9 @@ op::v3::GRUCell::GRUCell(const Output& X, W, R, hidden_size, - vector{"sigmoid", "tanh"}, - vector{}, - vector{}, + std::vector{"sigmoid", "tanh"}, + std::vector{}, + std::vector{}, 0.f, false) {} @@ -42,9 +39,9 @@ op::v3::GRUCell::GRUCell(const Output& X, const Output& W, const Output& R, size_t hidden_size, - const vector& activations, - const vector& activations_alpha, - const vector& activations_beta, + const std::vector& activations, + const std::vector& activations_alpha, + const std::vector& activations_beta, float clip, bool linear_before_reset) : RNNCellBase({X, initial_hidden_state, W, R}, hidden_size, clip, activations, activations_alpha, activations_beta), @@ -61,9 +58,9 @@ op::v3::GRUCell::GRUCell(const Output& X, const Output& R, const Output& B, size_t hidden_size, - const vector& activations, - const vector& activations_alpha, - const vector& activations_beta, + const std::vector& activations, + const std::vector& activations_alpha, + const std::vector& activations_beta, float clip, bool linear_before_reset) : RNNCellBase({X, initial_hidden_state, W, R, B}, @@ -109,37 +106,38 @@ void op::v3::GRUCell::add_default_bias_input() { Output B = op::v0::Constant::create(get_input_element_type(0), ov::Shape{(s_gates_count + m_linear_before_reset) * get_hidden_size()}, - vector((s_gates_count + m_linear_before_reset) * get_hidden_size(), 0.f)); + std::vector((s_gates_count + m_linear_before_reset) * get_hidden_size(), 0.f)); set_argument(4, B); } -shared_ptr op::v3::GRUCell::clone_with_new_inputs(const OutputVector& new_args) const { +std::shared_ptr op::v3::GRUCell::clone_with_new_inputs(const OutputVector& new_args) const { OV_OP_SCOPE(v3_GRUCell_clone_with_new_inputs); check_new_args_count(this, new_args); if (new_args.size() == 4) { - return make_shared(new_args.at(0), - new_args.at(1), - new_args.at(2), - new_args.at(3), - get_hidden_size(), - get_activations(), - get_activations_alpha(), - get_activations_beta(), - get_clip(), - m_linear_before_reset); + return std::make_shared(new_args.at(0), + new_args.at(1), + new_args.at(2), + new_args.at(3), + get_hidden_size(), + get_activations(), + get_activations_alpha(), + get_activations_beta(), + get_clip(), + m_linear_before_reset); } else if (new_args.size() == 5) { - return make_shared(new_args.at(0), - new_args.at(1), - new_args.at(2), - new_args.at(3), - new_args.at(4), - get_hidden_size(), - get_activations(), - get_activations_alpha(), - get_activations_beta(), - get_clip(), - m_linear_before_reset); + return std::make_shared(new_args.at(0), + new_args.at(1), + new_args.at(2), + new_args.at(3), + new_args.at(4), + get_hidden_size(), + get_activations(), + get_activations_alpha(), + get_activations_beta(), + get_clip(), + m_linear_before_reset); } else { OPENVINO_THROW("Incorrect number of new arguments"); } } +} // namespace ov diff --git a/src/core/src/op/gru_sequence.cpp b/src/core/src/op/gru_sequence.cpp index 7f2e3541f34274..825520b1431fc4 100644 --- a/src/core/src/op/gru_sequence.cpp +++ b/src/core/src/op/gru_sequence.cpp @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph/op/gru_sequence.hpp" +#include "openvino/op/gru_sequence.hpp" #include #include @@ -10,11 +10,9 @@ #include "gru_sequence_shape_inference.hpp" #include "itt.hpp" -#include "ngraph/op/util/recurrent_sequence.hpp" -#include "ngraph/opsets/opset4.hpp" +#include "openvino/op/util/recurrent_sequence.hpp" -using namespace std; -using namespace ngraph; +namespace ov { op::v5::GRUSequence::GRUSequence() : m_direction(op::RecurrentSequenceDirection::FORWARD), @@ -75,20 +73,21 @@ bool op::v5::GRUSequence::visit_attributes(AttributeVisitor& visitor) { return op::util::RNNCellBase::visit_attributes(visitor); } -shared_ptr op::v5::GRUSequence::clone_with_new_inputs(const OutputVector& new_args) const { +std::shared_ptr op::v5::GRUSequence::clone_with_new_inputs(const OutputVector& new_args) const { OV_OP_SCOPE(v5_GRUSequence_clone_with_new_inputs); check_new_args_count(this, new_args); - return make_shared(new_args.at(0), - new_args.at(1), - new_args.at(2), - new_args.at(3), - new_args.at(4), - new_args.at(5), - m_hidden_size, - m_direction, - m_activations, - m_activations_alpha, - m_activations_beta, - m_clip, - m_linear_before_reset); + return std::make_shared(new_args.at(0), + new_args.at(1), + new_args.at(2), + new_args.at(3), + new_args.at(4), + new_args.at(5), + m_hidden_size, + m_direction, + m_activations, + m_activations_alpha, + m_activations_beta, + m_clip, + m_linear_before_reset); } +} // namespace ov diff --git a/src/core/src/op/hard_sigmoid.cpp b/src/core/src/op/hard_sigmoid.cpp index f4aa6c37de6a50..02c85eec1762bc 100644 --- a/src/core/src/op/hard_sigmoid.cpp +++ b/src/core/src/op/hard_sigmoid.cpp @@ -2,16 +2,13 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph/op/hard_sigmoid.hpp" +#include "openvino/op/hard_sigmoid.hpp" #include #include "itt.hpp" -#include "ngraph/shape.hpp" - -using namespace std; -using namespace ngraph; +namespace ov { op::v0::HardSigmoid::HardSigmoid() : Op() {} op::v0::HardSigmoid::HardSigmoid(const Output& data, const Output& alpha, const Output& beta) @@ -19,11 +16,6 @@ op::v0::HardSigmoid::HardSigmoid(const Output& data, const Output& a constructor_validate_and_infer_types(); } -bool ngraph::op::v0::HardSigmoid::visit_attributes(AttributeVisitor& visitor) { - OV_OP_SCOPE(v0_HardSigmoid_visit_attributes); - return true; -} - void op::v0::HardSigmoid::validate_and_infer_types() { OV_OP_SCOPE(v0_HardSigmoid_validate_and_infer_types); const auto& alpha_pshape = get_input_partial_shape(1); @@ -56,9 +48,10 @@ void op::v0::HardSigmoid::validate_and_infer_types() { set_output_type(0, get_input_element_type(0), get_input_partial_shape(0)); } -shared_ptr op::v0::HardSigmoid::clone_with_new_inputs(const OutputVector& new_args) const { +std::shared_ptr op::v0::HardSigmoid::clone_with_new_inputs(const OutputVector& new_args) const { OV_OP_SCOPE(v0_HardSigmoid_clone_with_new_inputs); check_new_args_count(this, new_args); return std::make_shared(new_args.at(0), new_args.at(1), new_args.at(2)); } +} // namespace ov diff --git a/src/core/src/op/hsigmoid.cpp b/src/core/src/op/hsigmoid.cpp index af129217739fe5..94316792b20732 100644 --- a/src/core/src/op/hsigmoid.cpp +++ b/src/core/src/op/hsigmoid.cpp @@ -59,7 +59,7 @@ bool evaluate_hsigmoid(const HostTensorPtr& arg, const HostTensorPtr& out) { bool op::v5::HSigmoid::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { OV_OP_SCOPE(v5_HSigmoid_evaluate); OPENVINO_SUPPRESS_DEPRECATED_START - NGRAPH_CHECK(validate_host_tensor_vector(outputs, 1) && validate_host_tensor_vector(inputs, 1)); + OPENVINO_ASSERT(validate_host_tensor_vector(outputs, 1) && validate_host_tensor_vector(inputs, 1)); OPENVINO_SUPPRESS_DEPRECATED_END return evaluate_hsigmoid(inputs[0], outputs[0]); } diff --git a/src/core/src/op/hswish.cpp b/src/core/src/op/hswish.cpp index bd2791541c0df5..c5ab6c4a5562a5 100644 --- a/src/core/src/op/hswish.cpp +++ b/src/core/src/op/hswish.cpp @@ -60,7 +60,7 @@ bool evaluate_hswish(const HostTensorPtr& arg, const HostTensorPtr& out) { bool op::v4::HSwish::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { OV_OP_SCOPE(v4_HSwish_evaluate); OPENVINO_SUPPRESS_DEPRECATED_START - NGRAPH_CHECK(validate_host_tensor_vector(outputs, 1) && validate_host_tensor_vector(inputs, 1)); + OPENVINO_ASSERT(validate_host_tensor_vector(outputs, 1) && validate_host_tensor_vector(inputs, 1)); OPENVINO_SUPPRESS_DEPRECATED_END return hswish::evaluate_hswish(inputs[0], outputs[0]); } diff --git a/src/core/src/op/idft.cpp b/src/core/src/op/idft.cpp index 0177d506e5fae7..a8bb171a5adb9e 100644 --- a/src/core/src/op/idft.cpp +++ b/src/core/src/op/idft.cpp @@ -2,21 +2,14 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph/op/idft.hpp" +#include "openvino/op/idft.hpp" #include #include -#include #include "itt.hpp" -#include "ngraph/attribute_visitor.hpp" -#include "ngraph/axis_set.hpp" -#include "ngraph/axis_vector.hpp" -#include "ngraph/op/constant.hpp" -#include "ngraph/op/util/op_types.hpp" -#include "ngraph/runtime/host_tensor.hpp" -using namespace ngraph; +namespace ov { op::v7::IDFT::IDFT(const Output& data, const Output& axes) : FFTBase(data, axes) { constructor_validate_and_infer_types(); @@ -27,11 +20,6 @@ op::v7::IDFT::IDFT(const Output& data, const Output& axes, const Out constructor_validate_and_infer_types(); } -bool op::v7::IDFT::visit_attributes(AttributeVisitor& visitor) { - OV_OP_SCOPE(v7_IDFT_visit_attributes); - return true; -} - std::shared_ptr op::v7::IDFT::clone_with_new_inputs(const OutputVector& new_args) const { OV_OP_SCOPE(v7_IDFT_clone_with_new_inputs); check_new_args_count(this, new_args); @@ -43,3 +31,4 @@ std::shared_ptr op::v7::IDFT::clone_with_new_inputs(const OutputVector& ne return std::make_shared(new_args.at(0), new_args.at(1), new_args.at(2)); } +} // namespace ov diff --git a/src/core/src/op/if.cpp b/src/core/src/op/if.cpp index a8321d2e7ec45b..3bbb7613b0b39e 100644 --- a/src/core/src/op/if.cpp +++ b/src/core/src/op/if.cpp @@ -2,21 +2,17 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph/op/if.hpp" +#include "openvino/op/if.hpp" #include #include -#include #include "itt.hpp" -#include "ngraph/factory.hpp" -#include "ngraph/graph_util.hpp" -#include "ngraph/op/util/multi_subgraph_base.hpp" -#include "ngraph/specialize_function.hpp" +#include "openvino/core/graph_util.hpp" +#include "openvino/core/validation_util.hpp" +#include "openvino/op/util/multi_subgraph_base.hpp" #include "openvino/reference/if.hpp" -using namespace std; - ov::op::v8::If::If() : MultiSubGraphOp(2) {} ov::op::v8::If::If(const Output& execution_condition) : If() { @@ -180,8 +176,13 @@ std::shared_ptr ov::op::v8::If::clone_with_new_inputs(const OutputVect OV_OP_SCOPE(v8_If_clone_with_new_inputs); check_new_args_count(this, new_args); - auto op = make_shared(); - NGRAPH_CHECK(op.get(), op != nullptr, "Cannot clone ", description(), " operation with name ", get_friendly_name()); + auto op = std::make_shared(); + OPENVINO_ASSERT(op.get(), + op != nullptr, + "Cannot clone ", + description(), + " operation with name ", + get_friendly_name()); op->set_arguments(new_args); op->set_output_size(m_output_descriptions[0].size()); @@ -204,30 +205,30 @@ std::shared_ptr ov::op::v8::If::clone_with_new_inputs(const OutputVect void ov::op::v8::If::set_input(const Output& value, const std::shared_ptr& then_parameter, const std::shared_ptr& else_parameter) { - NGRAPH_CHECK(then_parameter != nullptr || else_parameter != nullptr, - "Missing parameters! Both parameters are nullptr!"); + OPENVINO_ASSERT(then_parameter != nullptr || else_parameter != nullptr, + "Missing parameters! Both parameters are nullptr!"); auto then_param_index = m_bodies[THEN_BODY_INDEX]->get_parameter_index(then_parameter); auto else_param_index = m_bodies[ELSE_BODY_INDEX]->get_parameter_index(else_parameter); - NGRAPH_CHECK(then_parameter == nullptr || then_param_index != -1, - "Missing parameter ", - then_parameter->get_friendly_name(), - " for \'then_body\'!"); - NGRAPH_CHECK(else_parameter == nullptr || else_param_index != -1, - "Missing parameter ", - else_parameter->get_friendly_name(), - " for \'else_body\'!"); + OPENVINO_ASSERT(then_parameter == nullptr || then_param_index != -1, + "Missing parameter ", + then_parameter->get_friendly_name(), + " for \'then_body\'!"); + OPENVINO_ASSERT(else_parameter == nullptr || else_param_index != -1, + "Missing parameter ", + else_parameter->get_friendly_name(), + " for \'else_body\'!"); set_invariant_inputs(value, {then_parameter, else_parameter}); } ov::Output ov::op::v8::If::set_output(const std::shared_ptr& then_result, const std::shared_ptr& else_result) { - NGRAPH_CHECK(then_result != nullptr, "Incorrect result in \"then_body\"! Result cant be \'nullptr\'"); - NGRAPH_CHECK(else_result != nullptr, "Incorrect result in \"else_body\"! Result cant be \'nullptr\'"); + OPENVINO_ASSERT(then_result != nullptr, "Incorrect result in \"then_body\"! Result cant be \'nullptr\'"); + OPENVINO_ASSERT(else_result != nullptr, "Incorrect result in \"else_body\"! Result cant be \'nullptr\'"); auto then_result_id = m_bodies[THEN_BODY_INDEX]->get_result_index(then_result); auto else_result_id = m_bodies[ELSE_BODY_INDEX]->get_result_index(else_result); - NGRAPH_CHECK(then_result_id != -1, "Missing result ", then_result->get_friendly_name(), "in \'then_body\'!"); - NGRAPH_CHECK(else_result_id != -1, "Missing result ", else_result->get_friendly_name(), "in \'then_body\'!"); + OPENVINO_ASSERT(then_result_id != -1, "Missing result ", then_result->get_friendly_name(), "in \'then_body\'!"); + OPENVINO_ASSERT(else_result_id != -1, "Missing result ", else_result->get_friendly_name(), "in \'then_body\'!"); return set_body_outputs({then_result, else_result}); } diff --git a/src/core/src/op/interpolate.cpp b/src/core/src/op/interpolate.cpp index dff8920c91682c..d541cc9ed373bf 100644 --- a/src/core/src/op/interpolate.cpp +++ b/src/core/src/op/interpolate.cpp @@ -14,7 +14,6 @@ #include "openvino/op/interpolate.hpp" #include "openvino/op/util/precision_sensitive_attribute.hpp" -using namespace std; namespace ov { ov::op::v0::Interpolate::Interpolate(const Output& image, const Output& output_shape, @@ -51,10 +50,10 @@ void ov::op::v0::Interpolate::validate_and_infer_types() { set_output_type(0, get_input_element_type(0), output_shapes[0]); } -shared_ptr op::v0::Interpolate::clone_with_new_inputs(const OutputVector& new_args) const { +std::shared_ptr op::v0::Interpolate::clone_with_new_inputs(const OutputVector& new_args) const { OV_OP_SCOPE(v0_Interpolate_clone_with_new_inputs); check_new_args_count(this, new_args); - return make_shared(new_args.at(0), new_args.at(1), m_attrs); + return std::make_shared(new_args.at(0), new_args.at(1), m_attrs); } std::ostream& operator<<(std::ostream& s, const op::v0::Interpolate::InterpolateMode& type) { @@ -134,17 +133,17 @@ void ov::op::v4::Interpolate::validate_and_infer_types() { set_output_type(0, get_input_element_type(0), output_shapes[0]); } -shared_ptr ov::op::v4::Interpolate::clone_with_new_inputs(const OutputVector& new_args) const { +std::shared_ptr ov::op::v4::Interpolate::clone_with_new_inputs(const OutputVector& new_args) const { OV_OP_SCOPE(v4_Interpolate_clone_with_new_inputs); check_new_args_count(this, new_args); if (new_args.size() <= 3) { - return make_shared(new_args.at(0), new_args.at(1), new_args.at(2), m_attrs); + return std::make_shared(new_args.at(0), new_args.at(1), new_args.at(2), m_attrs); } - return make_shared(new_args.at(0), - new_args.at(1), - new_args.at(2), - new_args.at(3), - m_attrs); + return std::make_shared(new_args.at(0), + new_args.at(1), + new_args.at(2), + new_args.at(3), + m_attrs); } namespace { @@ -320,13 +319,13 @@ op::v11::Interpolate::Interpolate(const Output& image, constructor_validate_and_infer_types(); } -shared_ptr op::v11::Interpolate::clone_with_new_inputs(const OutputVector& new_args) const { +std::shared_ptr op::v11::Interpolate::clone_with_new_inputs(const OutputVector& new_args) const { OV_OP_SCOPE(v11_Interpolate_clone_with_new_inputs); check_new_args_count(this, new_args); if (new_args.size() == 2) { - return make_shared(new_args.at(0), new_args.at(1), m_attrs); + return std::make_shared(new_args.at(0), new_args.at(1), m_attrs); } - return make_shared(new_args.at(0), new_args.at(1), new_args.at(2), m_attrs); + return std::make_shared(new_args.at(0), new_args.at(1), new_args.at(2), m_attrs); } void op::v11::Interpolate::validate_and_infer_types() { diff --git a/src/core/src/op/irdft.cpp b/src/core/src/op/irdft.cpp index 2c75478832ff03..2f282e9ec5ca6e 100644 --- a/src/core/src/op/irdft.cpp +++ b/src/core/src/op/irdft.cpp @@ -2,15 +2,13 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph/op/irdft.hpp" +#include "openvino/op/irdft.hpp" #include #include "irdft_shape_inference.hpp" #include "itt.hpp" -using namespace std; - ov::op::v9::IRDFT::IRDFT(const Output& data, const Output& axes) : FFTBase(data, axes) { constructor_validate_and_infer_types(); } @@ -20,11 +18,6 @@ ov::op::v9::IRDFT::IRDFT(const Output& data, const Output& axes, con constructor_validate_and_infer_types(); } -bool ov::op::v9::IRDFT::visit_attributes(AttributeVisitor& visitor) { - OV_OP_SCOPE(v9_IRDFT_visit_attributes); - return true; -} - std::shared_ptr ov::op::v9::IRDFT::clone_with_new_inputs(const OutputVector& new_args) const { OV_OP_SCOPE(v9_IRDFT_clone_with_new_inputs); check_new_args_count(this, new_args); diff --git a/src/core/src/op/log_softmax.cpp b/src/core/src/op/log_softmax.cpp index 5c23b3006f9ec9..a16ac224f07b82 100644 --- a/src/core/src/op/log_softmax.cpp +++ b/src/core/src/op/log_softmax.cpp @@ -2,13 +2,11 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph/op/log_softmax.hpp" +#include "openvino/op/log_softmax.hpp" #include "itt.hpp" -#include "ngraph/util.hpp" -using namespace std; -using namespace ngraph; +namespace ov { op::v5::LogSoftmax::LogSoftmax(const Output& arg, const int64_t axis) : Op({arg}), m_axis(axis) { constructor_validate_and_infer_types(); @@ -35,8 +33,9 @@ void op::v5::LogSoftmax::validate_and_infer_types() { set_output_type(0, get_input_element_type(0), input_shape); } -shared_ptr op::v5::LogSoftmax::clone_with_new_inputs(const OutputVector& new_args) const { +std::shared_ptr op::v5::LogSoftmax::clone_with_new_inputs(const OutputVector& new_args) const { OV_OP_SCOPE(v5_LogSoftmax_clone_with_new_inputs); check_new_args_count(this, new_args); - return make_shared(new_args.at(0), m_axis); + return std::make_shared(new_args.at(0), m_axis); } +} // namespace ov diff --git a/src/core/src/op/logical_and.cpp b/src/core/src/op/logical_and.cpp index 6b1c87eb4b541a..ce935e4a188265 100644 --- a/src/core/src/op/logical_and.cpp +++ b/src/core/src/op/logical_and.cpp @@ -67,7 +67,7 @@ bool evaluate_logand(const HostTensorPtr& arg0, bool op::v1::LogicalAnd::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { OV_OP_SCOPE(v1_LogicalAnd_evaluate); OPENVINO_SUPPRESS_DEPRECATED_START - NGRAPH_CHECK(validate_host_tensor_vector(outputs, 1) && validate_host_tensor_vector(inputs, 2)); + OPENVINO_ASSERT(validate_host_tensor_vector(outputs, 1) && validate_host_tensor_vector(inputs, 2)); OPENVINO_SUPPRESS_DEPRECATED_END return logand::evaluate_logand(inputs[0], inputs[1], outputs[0], get_autob()); } diff --git a/src/core/src/op/logical_not.cpp b/src/core/src/op/logical_not.cpp index da08f6260a332f..6870c07921fc0e 100644 --- a/src/core/src/op/logical_not.cpp +++ b/src/core/src/op/logical_not.cpp @@ -65,7 +65,7 @@ bool evaluate_not(const HostTensorPtr& arg0, const HostTensorPtr& out, const siz bool op::v1::LogicalNot::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { OV_OP_SCOPE(v1_LogicalNot_evaluate); OPENVINO_SUPPRESS_DEPRECATED_START - NGRAPH_CHECK(validate_host_tensor_vector(outputs, 1) && validate_host_tensor_vector(inputs, 1)); + OPENVINO_ASSERT(validate_host_tensor_vector(outputs, 1) && validate_host_tensor_vector(inputs, 1)); OPENVINO_SUPPRESS_DEPRECATED_END return notop::evaluate_not(inputs[0], outputs[0], inputs[0]->get_element_count()); } diff --git a/src/core/src/op/logical_or.cpp b/src/core/src/op/logical_or.cpp index fd4e35c0c5081b..c5cbc20c83b13d 100644 --- a/src/core/src/op/logical_or.cpp +++ b/src/core/src/op/logical_or.cpp @@ -61,7 +61,7 @@ bool evaluate_logor(const HostTensorPtr& arg0, bool op::v1::LogicalOr::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { OV_OP_SCOPE(v1_LogicalOr_evaluate); OPENVINO_SUPPRESS_DEPRECATED_START - NGRAPH_CHECK(validate_host_tensor_vector(outputs, 1) && validate_host_tensor_vector(inputs, 2)); + OPENVINO_ASSERT(validate_host_tensor_vector(outputs, 1) && validate_host_tensor_vector(inputs, 2)); OPENVINO_SUPPRESS_DEPRECATED_END return logor::evaluate_logor(inputs[0], inputs[1], outputs[0], get_autob()); } diff --git a/src/core/src/op/loop.cpp b/src/core/src/op/loop.cpp index ef623af65814b6..8bf7710861319f 100644 --- a/src/core/src/op/loop.cpp +++ b/src/core/src/op/loop.cpp @@ -330,7 +330,12 @@ std::shared_ptr op::v5::Loop::clone_with_new_inputs(const OutputVector& ne OV_OP_SCOPE(v5_Loop_clone_with_new_inputs); check_new_args_count(this, new_args); auto op = make_shared(); - NGRAPH_CHECK(op.get(), op != nullptr, "Cannot clone ", description(), " operation with name ", get_friendly_name()); + OPENVINO_ASSERT(op.get(), + op != nullptr, + "Cannot clone ", + description(), + " operation with name ", + get_friendly_name()); clone_to(*op, new_args); return op; } @@ -341,10 +346,10 @@ Output op::v5::Loop::get_concatenated_slices(const Output& value, int64_t part_size, int64_t end, int64_t axis) { - NGRAPH_CHECK(start == 0 && stride == 1 && part_size == 1 && end == -1, - "Invalid start, stride, part_size, or end attribute values in Loop op. " - "Supported values for start {0}, for stride and part_size {1}, for end " - "{-1}"); + OPENVINO_ASSERT(start == 0 && stride == 1 && part_size == 1 && end == -1, + "Invalid start, stride, part_size, or end attribute values in Loop op. " + "Supported values for start {0}, for stride and part_size {1}, for end " + "{-1}"); return SubGraphOp::get_concatenated_slices(value, start, stride, part_size, end, axis); } diff --git a/src/core/src/op/lrn.cpp b/src/core/src/op/lrn.cpp index 05b7ea406dae5c..f80cb91103ce7d 100644 --- a/src/core/src/op/lrn.cpp +++ b/src/core/src/op/lrn.cpp @@ -2,22 +2,19 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph/op/lrn.hpp" - -#include +#include "openvino/op/lrn.hpp" #include "itt.hpp" -#include "ngraph/attribute_visitor.hpp" -#include "ngraph/op/constant.hpp" -#include "ngraph/op/multiply.hpp" - -using namespace std; -using namespace ngraph; +#include "openvino/core/attribute_visitor.hpp" +#include "openvino/core/validation_util.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/multiply.hpp" -op::LRN::LRN(const Output& arg, double alpha, double beta, double bias, size_t size) +namespace ov { +op::v0::LRN::LRN(const Output& arg, double alpha, double beta, double bias, size_t size) : LRN(arg, op::v0::Constant::create(element::i64, ov::Shape{1}, {1}), alpha, beta, bias, size) {} -op::LRN::LRN(const Output& arg, const Output& axes, double alpha, double beta, double bias, size_t size) +op::v0::LRN::LRN(const Output& arg, const Output& axes, double alpha, double beta, double bias, size_t size) : Op({arg, axes}), m_alpha(alpha), m_beta(beta), @@ -26,7 +23,7 @@ op::LRN::LRN(const Output& arg, const Output& axes, double alpha, do constructor_validate_and_infer_types(); } -AxisSet op::LRN::get_reduction_axes() const { +AxisSet op::v0::LRN::get_reduction_axes() const { AxisSet axes{1}; // channel axis as default auto axes_input_node = input_value(1).get_node_shared_ptr(); OPENVINO_SUPPRESS_DEPRECATED_START @@ -37,7 +34,7 @@ AxisSet op::LRN::get_reduction_axes() const { return axes; } -void op::LRN::validate_and_infer_types() { +void op::v0::LRN::validate_and_infer_types() { OV_OP_SCOPE(v0_LRN_validate_and_infer_types); element::Type arg_type = get_input_element_type(0); ov::PartialShape arg_shape = get_input_partial_shape(0); @@ -89,7 +86,7 @@ void op::LRN::validate_and_infer_types() { ")."); } -bool ngraph::op::v0::LRN::visit_attributes(AttributeVisitor& visitor) { +bool op::v0::LRN::visit_attributes(AttributeVisitor& visitor) { OV_OP_SCOPE(v0_LRN_visit_attributes); visitor.on_attribute("alpha", m_alpha); visitor.on_attribute("beta", m_beta); @@ -98,8 +95,9 @@ bool ngraph::op::v0::LRN::visit_attributes(AttributeVisitor& visitor) { return true; } -shared_ptr op::LRN::clone_with_new_inputs(const OutputVector& new_args) const { +std::shared_ptr op::v0::LRN::clone_with_new_inputs(const OutputVector& new_args) const { OV_OP_SCOPE(v0_LRN_clone_with_new_inputs); check_new_args_count(this, new_args); - return make_shared(new_args.at(0), new_args.at(1), m_alpha, m_beta, m_bias, m_size); + return std::make_shared(new_args.at(0), new_args.at(1), m_alpha, m_beta, m_bias, m_size); } +} // namespace ov diff --git a/src/core/src/op/lstm_cell.cpp b/src/core/src/op/lstm_cell.cpp index a2cdf536037c1e..49b6a93a0c71f1 100644 --- a/src/core/src/op/lstm_cell.cpp +++ b/src/core/src/op/lstm_cell.cpp @@ -2,21 +2,19 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph/op/lstm_cell.hpp" +#include "openvino/op/lstm_cell.hpp" #include #include #include "itt.hpp" #include "lstm_cell_shape_inference.hpp" -#include "ngraph/attribute_visitor.hpp" -#include "ngraph/op/concat.hpp" -#include "ngraph/op/constant.hpp" -#include "ngraph/shape.hpp" -#include "ngraph/type/element_type.hpp" +#include "openvino/core/attribute_visitor.hpp" +#include "openvino/core/type/element_type.hpp" +#include "openvino/op/concat.hpp" +#include "openvino/op/constant.hpp" -using namespace std; -using namespace ngraph; +namespace ov { op::v0::LSTMCell::LSTMCell() : m_input_forget(false), m_weights_format(LSTMWeightsFormat::IFCO) { m_activations = {"sigmoid", "tanh", "tanh"}; @@ -32,9 +30,9 @@ op::v0::LSTMCell::LSTMCell(const Output& X, const Output& R, size_t hidden_size, op::LSTMWeightsFormat weights_format, - const vector& activations, - const vector& activations_alpha, - const vector& activations_beta, + const std::vector& activations, + const std::vector& activations_alpha, + const std::vector& activations_beta, float clip, bool input_forget) : RNNCellBase({X, initial_hidden_state, initial_cell_state, W, R}, @@ -61,9 +59,9 @@ op::v0::LSTMCell::LSTMCell(const Output& X, const Output& B, size_t hidden_size, op::LSTMWeightsFormat weights_format, - const vector& activations, - const vector& activations_alpha, - const vector& activations_beta, + const std::vector& activations, + const std::vector& activations_alpha, + const std::vector& activations_beta, float clip, bool input_forget) : RNNCellBase({X, initial_hidden_state, initial_cell_state, W, R, B}, @@ -90,9 +88,9 @@ op::v0::LSTMCell::LSTMCell(const Output& X, const Output& P, size_t hidden_size, op::LSTMWeightsFormat weights_format, - const vector& activations, - const vector& activations_alpha, - const vector& activations_beta, + const std::vector& activations, + const std::vector& activations_alpha, + const std::vector& activations_beta, float clip, bool input_forget) : RNNCellBase({X, initial_hidden_state, initial_cell_state, W, R, B, P}, @@ -109,7 +107,7 @@ op::v0::LSTMCell::LSTMCell(const Output& X, constructor_validate_and_infer_types(); } -bool ngraph::op::v0::LSTMCell::visit_attributes(AttributeVisitor& visitor) { +bool op::v0::LSTMCell::visit_attributes(AttributeVisitor& visitor) { OV_OP_SCOPE(v0_LSTMCell_visit_attributes); visitor.on_attribute("hidden_size", m_hidden_size); visitor.on_attribute("activations", m_activations); @@ -176,74 +174,73 @@ void op::v0::LSTMCell::validate_and_infer_types() { Output op::v0::LSTMCell::get_default_bias_input() const { return Output{op::v0::Constant::create(get_input_element_type(0), Shape{lstm_cell::gates_count * get_hidden_size()}, - vector{0.f})}; + std::vector{0.f})}; } Output op::v0::LSTMCell::get_default_peepholes_input() const { return Output{op::v0::Constant::create(get_input_element_type(0), Shape{lstm_cell::peepholes_count * get_hidden_size()}, - vector{0.f})}; + std::vector{0.f})}; } -shared_ptr op::v0::LSTMCell::clone_with_new_inputs(const OutputVector& new_args) const { +std::shared_ptr op::v0::LSTMCell::clone_with_new_inputs(const OutputVector& new_args) const { OV_OP_SCOPE(v0_LSTMCell_clone_with_new_inputs); check_new_args_count(this, new_args); if (new_args.size() == 5) { - return make_shared(new_args.at(0), - new_args.at(1), - new_args.at(2), - new_args.at(3), - new_args.at(4), - get_hidden_size(), - get_weights_format(), - get_activations(), - get_activations_alpha(), - get_activations_beta(), - get_clip(), - m_input_forget); + return std::make_shared(new_args.at(0), + new_args.at(1), + new_args.at(2), + new_args.at(3), + new_args.at(4), + get_hidden_size(), + get_weights_format(), + get_activations(), + get_activations_alpha(), + get_activations_beta(), + get_clip(), + m_input_forget); } else if (new_args.size() == 6) { - return make_shared(new_args.at(0), - new_args.at(1), - new_args.at(2), - new_args.at(3), - new_args.at(4), - new_args.at(5), - get_hidden_size(), - get_weights_format(), - get_activations(), - get_activations_alpha(), - get_activations_beta(), - get_clip(), - m_input_forget); + return std::make_shared(new_args.at(0), + new_args.at(1), + new_args.at(2), + new_args.at(3), + new_args.at(4), + new_args.at(5), + get_hidden_size(), + get_weights_format(), + get_activations(), + get_activations_alpha(), + get_activations_beta(), + get_clip(), + m_input_forget); } else if (new_args.size() == 7) { - return make_shared(new_args.at(0), - new_args.at(1), - new_args.at(2), - new_args.at(3), - new_args.at(4), - new_args.at(5), - new_args.at(6), - get_hidden_size(), - get_weights_format(), - get_activations(), - get_activations_alpha(), - get_activations_beta(), - get_clip(), - m_input_forget); + return std::make_shared(new_args.at(0), + new_args.at(1), + new_args.at(2), + new_args.at(3), + new_args.at(4), + new_args.at(5), + new_args.at(6), + get_hidden_size(), + get_weights_format(), + get_activations(), + get_activations_alpha(), + get_activations_beta(), + get_clip(), + m_input_forget); } else { OPENVINO_THROW("Incorrect number of new arguments"); } } -namespace ov { template <> -NGRAPH_API EnumNames& EnumNames::get() { - static auto enum_names = EnumNames("op::LSTMWeightsFormat", - {{"fico", ngraph::op::LSTMWeightsFormat::FICO}, - {"icof", ngraph::op::LSTMWeightsFormat::ICOF}, - {"ifco", ngraph::op::LSTMWeightsFormat::IFCO}, - {"ifoc", ngraph::op::LSTMWeightsFormat::IFOC}, - {"iofc", ngraph::op::LSTMWeightsFormat::IOFC}}); +OPENVINO_API EnumNames& EnumNames::get() { + static auto enum_names = EnumNames("op::LSTMWeightsFormat", + {{"fico", op::LSTMWeightsFormat::FICO}, + {"icof", op::LSTMWeightsFormat::ICOF}, + {"ifco", op::LSTMWeightsFormat::IFCO}, + {"ifoc", op::LSTMWeightsFormat::IFOC}, + {"iofc", op::LSTMWeightsFormat::IOFC}}); return enum_names; } @@ -263,9 +260,8 @@ ov::op::util::LSTMWeightsFormat op::convert_lstm_weights_enums(op::LSTMWeightsFo OPENVINO_ASSERT(false, "Incorrect LSTM weights format"); } } -} // namespace ov -std::ostream& ov::operator<<(std::ostream& s, const op::LSTMWeightsFormat& type) { +std::ostream& operator<<(std::ostream& s, const op::LSTMWeightsFormat& type) { return s << as_string(type); } @@ -282,9 +278,9 @@ op::v4::LSTMCell::LSTMCell(const Output& X, const Output& W, const Output& R, size_t hidden_size, - const vector& activations, - const vector& activations_alpha, - const vector& activations_beta, + const std::vector& activations, + const std::vector& activations_alpha, + const std::vector& activations_beta, float clip) : RNNCellBase({X, initial_hidden_state, initial_cell_state, W, R}, hidden_size, @@ -306,9 +302,9 @@ op::v4::LSTMCell::LSTMCell(const Output& X, const Output& R, const Output& B, size_t hidden_size, - const vector& activations, - const vector& activations_alpha, - const vector& activations_beta, + const std::vector& activations, + const std::vector& activations_alpha, + const std::vector& activations_beta, float clip) : RNNCellBase({X, initial_hidden_state, initial_cell_state, W, R, B}, hidden_size, @@ -322,7 +318,7 @@ op::v4::LSTMCell::LSTMCell(const Output& X, constructor_validate_and_infer_types(); } -bool ngraph::op::v4::LSTMCell::visit_attributes(AttributeVisitor& visitor) { +bool op::v4::LSTMCell::visit_attributes(AttributeVisitor& visitor) { OV_OP_SCOPE(v4_LSTMCell_visit_attributes); return op::util::RNNCellBase::visit_attributes(visitor); } @@ -368,36 +364,37 @@ void op::v4::LSTMCell::validate_and_infer_types() { Output op::v4::LSTMCell::get_default_bias_input() const { return Output{op::v0::Constant::create(get_input_element_type(0), Shape{lstm_cell::gates_count * get_hidden_size()}, - vector{0.f})}; + std::vector{0.f})}; } -shared_ptr op::v4::LSTMCell::clone_with_new_inputs(const OutputVector& new_args) const { +std::shared_ptr op::v4::LSTMCell::clone_with_new_inputs(const OutputVector& new_args) const { OV_OP_SCOPE(v4_LSTMCell_clone_with_new_inputs); check_new_args_count(this, new_args); if (new_args.size() == 5) { - return make_shared(new_args.at(0), - new_args.at(1), - new_args.at(2), - new_args.at(3), - new_args.at(4), - get_hidden_size(), - get_activations(), - get_activations_alpha(), - get_activations_beta(), - get_clip()); + return std::make_shared(new_args.at(0), + new_args.at(1), + new_args.at(2), + new_args.at(3), + new_args.at(4), + get_hidden_size(), + get_activations(), + get_activations_alpha(), + get_activations_beta(), + get_clip()); } else if (new_args.size() == 6) { - return make_shared(new_args.at(0), - new_args.at(1), - new_args.at(2), - new_args.at(3), - new_args.at(4), - new_args.at(5), - get_hidden_size(), - get_activations(), - get_activations_alpha(), - get_activations_beta(), - get_clip()); + return std::make_shared(new_args.at(0), + new_args.at(1), + new_args.at(2), + new_args.at(3), + new_args.at(4), + new_args.at(5), + get_hidden_size(), + get_activations(), + get_activations_alpha(), + get_activations_beta(), + get_clip()); } else { OPENVINO_THROW("Incorrect number of new arguments"); } } +} // namespace ov diff --git a/src/core/src/op/lstm_sequence.cpp b/src/core/src/op/lstm_sequence.cpp index 169f10d07f9ca3..492f42d051d7da 100644 --- a/src/core/src/op/lstm_sequence.cpp +++ b/src/core/src/op/lstm_sequence.cpp @@ -2,21 +2,14 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph/op/lstm_sequence.hpp" +#include "openvino/op/lstm_sequence.hpp" #include "itt.hpp" #include "lstm_sequence_shape_inference.hpp" -#include "ngraph/attribute_visitor.hpp" -#include "ngraph/builder/autobroadcast.hpp" -#include "ngraph/builder/reshape.hpp" -#include "ngraph/builder/split.hpp" -#include "ngraph/op/util/recurrent_sequence.hpp" -#include "ngraph/opsets/opset1.hpp" -#include "ngraph/opsets/opset4.hpp" - -using namespace ngraph; -using namespace std; +#include "openvino/core/attribute_visitor.hpp" +#include "openvino/op/util/recurrent_sequence.hpp" +namespace ov { op::v0::LSTMSequence::LSTMSequence(const Output& X, const Output& initial_hidden_state, const Output& initial_cell_state, @@ -95,42 +88,42 @@ bool op::v0::LSTMSequence::visit_attributes(AttributeVisitor& visitor) { return true; } -shared_ptr op::v0::LSTMSequence::clone_with_new_inputs(const OutputVector& new_args) const { +std::shared_ptr op::v0::LSTMSequence::clone_with_new_inputs(const OutputVector& new_args) const { OV_OP_SCOPE(v0_LSTMSequence_clone_with_new_inputs); check_new_args_count(this, new_args); if (new_args.size() == 8) { - return make_shared(new_args.at(0), // X - new_args.at(1), // initial_hidden_state - new_args.at(2), // initial_cell_state - new_args.at(3), // sequence_lengths - new_args.at(4), // W - new_args.at(5), // R - new_args.at(6), // B - new_args.at(7), // P - m_hidden_size, - m_direction, - m_weights_format, - m_activations_alpha, - m_activations_beta, - m_activations, - m_clip, - m_input_forget); + return std::make_shared(new_args.at(0), // X + new_args.at(1), // initial_hidden_state + new_args.at(2), // initial_cell_state + new_args.at(3), // sequence_lengths + new_args.at(4), // W + new_args.at(5), // R + new_args.at(6), // B + new_args.at(7), // P + m_hidden_size, + m_direction, + m_weights_format, + m_activations_alpha, + m_activations_beta, + m_activations, + m_clip, + m_input_forget); } else if (new_args.size() == 7) { - return make_shared(new_args.at(0), // X - new_args.at(1), // initial_hidden_state - new_args.at(2), // initial_cell_state - new_args.at(3), // sequence_lengths - new_args.at(4), // W - new_args.at(5), // R - new_args.at(6), // B - m_hidden_size, - m_direction, - m_weights_format, - m_activations_alpha, - m_activations_beta, - m_activations, - m_clip, - m_input_forget); + return std::make_shared(new_args.at(0), // X + new_args.at(1), // initial_hidden_state + new_args.at(2), // initial_cell_state + new_args.at(3), // sequence_lengths + new_args.at(4), // W + new_args.at(5), // R + new_args.at(6), // B + m_hidden_size, + m_direction, + m_weights_format, + m_activations_alpha, + m_activations_beta, + m_activations, + m_clip, + m_input_forget); } else { OPENVINO_THROW("Incorrect number of new arguments"); } @@ -166,29 +159,29 @@ void op::v0::LSTMSequence::validate_and_infer_types() { set_output_type(2, result_et, output_shapes[2]); } -bool ngraph::op::v5::LSTMSequence::visit_attributes(AttributeVisitor& visitor) { +bool op::v5::LSTMSequence::visit_attributes(AttributeVisitor& visitor) { OV_OP_SCOPE(v5_LSTMSequence_visit_attributes); visitor.on_attribute("direction", m_direction); return op::util::RNNCellBase::visit_attributes(visitor); } -shared_ptr op::v5::LSTMSequence::clone_with_new_inputs(const OutputVector& new_args) const { +std::shared_ptr op::v5::LSTMSequence::clone_with_new_inputs(const OutputVector& new_args) const { OV_OP_SCOPE(v5_LSTMSequence_clone_with_new_inputs); check_new_args_count(this, new_args); if (new_args.size() == 7) { - return make_shared(new_args.at(0), // X - new_args.at(1), // initial_hidden_state - new_args.at(2), // initial_cell_state - new_args.at(3), // sequence_lengths - new_args.at(4), // W - new_args.at(5), // R - new_args.at(6), // B - m_hidden_size, - m_direction, - m_activations_alpha, - m_activations_beta, - m_activations, - m_clip); + return std::make_shared(new_args.at(0), // X + new_args.at(1), // initial_hidden_state + new_args.at(2), // initial_cell_state + new_args.at(3), // sequence_lengths + new_args.at(4), // W + new_args.at(5), // R + new_args.at(6), // B + m_hidden_size, + m_direction, + m_activations_alpha, + m_activations_beta, + m_activations, + m_clip); } else { OPENVINO_THROW("Incorrect number of new arguments"); } @@ -224,3 +217,4 @@ void op::v5::LSTMSequence::validate_and_infer_types() { set_output_type(1, result_et, output_shapes[1]); set_output_type(2, result_et, output_shapes[2]); } +} // namespace ov diff --git a/src/core/src/op/mish.cpp b/src/core/src/op/mish.cpp index fc39df6e999c7d..1974e683029a6b 100644 --- a/src/core/src/op/mish.cpp +++ b/src/core/src/op/mish.cpp @@ -73,7 +73,7 @@ bool evaluate_mish(const HostTensorPtr& arg0, const HostTensorPtr& out) { bool op::v4::Mish::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { OV_OP_SCOPE(v4_Mish_evaluate); OPENVINO_SUPPRESS_DEPRECATED_START - NGRAPH_CHECK(validate_host_tensor_vector(outputs, 1) && validate_host_tensor_vector(inputs, 1)); + OPENVINO_ASSERT(validate_host_tensor_vector(outputs, 1) && validate_host_tensor_vector(inputs, 1)); OPENVINO_SUPPRESS_DEPRECATED_END return mish::evaluate_mish(inputs[0], outputs[0]); } diff --git a/src/core/src/op/mod.cpp b/src/core/src/op/mod.cpp index 6bb45da04004cb..e8aa1a8a009cc2 100644 --- a/src/core/src/op/mod.cpp +++ b/src/core/src/op/mod.cpp @@ -6,6 +6,7 @@ #include "element_visitor.hpp" #include "itt.hpp" +#include "openvino/core/shape_util.hpp" #include "openvino/reference/mod.hpp" #include "utils.hpp" diff --git a/src/core/src/op/negative.cpp b/src/core/src/op/negative.cpp index 3feadeb1c7505e..5b86e98e6ad2a1 100644 --- a/src/core/src/op/negative.cpp +++ b/src/core/src/op/negative.cpp @@ -59,8 +59,8 @@ bool evaluate_negative(const HostTensorPtr& arg0, const HostTensorPtr& out, cons bool op::Negative::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { OV_OP_SCOPE(v0_Negative_evaluate); OPENVINO_SUPPRESS_DEPRECATED_START - NGRAPH_CHECK(validate_host_tensor_vector(inputs, 1)); - NGRAPH_CHECK(validate_host_tensor_vector(outputs, 1)); + OPENVINO_ASSERT(validate_host_tensor_vector(inputs, 1)); + OPENVINO_ASSERT(validate_host_tensor_vector(outputs, 1)); OPENVINO_SUPPRESS_DEPRECATED_END return negativeop::evaluate_negative(inputs[0], outputs[0], shape_size(inputs[0]->get_shape())); } diff --git a/src/core/src/op/not_equal.cpp b/src/core/src/op/not_equal.cpp index 89018aec9bcdb0..80c77cfa58f1c1 100644 --- a/src/core/src/op/not_equal.cpp +++ b/src/core/src/op/not_equal.cpp @@ -67,7 +67,7 @@ shared_ptr op::v1::NotEqual::clone_with_new_inputs(const OutputVector& new bool op::v1::NotEqual::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { OV_OP_SCOPE(v1_NotEqual_evaluate); OPENVINO_SUPPRESS_DEPRECATED_START - NGRAPH_CHECK(validate_host_tensor_vector(outputs, 1) && validate_host_tensor_vector(inputs, 2)); + OPENVINO_ASSERT(validate_host_tensor_vector(outputs, 1) && validate_host_tensor_vector(inputs, 2)); OPENVINO_SUPPRESS_DEPRECATED_END return not_equalop::evaluate_not_equal(inputs[0], inputs[1], outputs[0], get_autob()); } diff --git a/src/core/src/op/one_hot.cpp b/src/core/src/op/one_hot.cpp index 963b4980f02b9c..81186f506e10dd 100644 --- a/src/core/src/op/one_hot.cpp +++ b/src/core/src/op/one_hot.cpp @@ -107,21 +107,21 @@ bool evaluate_onehot(const HostTensorVector& output_values, const HostTensorVect bool op::v1::OneHot::evaluate(const HostTensorVector& output_values, const HostTensorVector& input_values) const { OV_OP_SCOPE(v1_OneHot_evaluate); OPENVINO_SUPPRESS_DEPRECATED_START - NGRAPH_CHECK(validate_host_tensor_vector(input_values, 4)); - NGRAPH_CHECK(validate_host_tensor_vector(output_values, 1)); + OPENVINO_ASSERT(validate_host_tensor_vector(input_values, 4)); + OPENVINO_ASSERT(validate_host_tensor_vector(output_values, 1)); OPENVINO_SUPPRESS_DEPRECATED_END const auto& ind_Pshape = input_values[0]->get_partial_shape(); const auto& out_Pshape = output_values[0]->get_partial_shape(); - NGRAPH_CHECK(ind_Pshape.is_static() && out_Pshape.is_static(), "Only static input/output shapes are supported"); + OPENVINO_ASSERT(ind_Pshape.is_static() && out_Pshape.is_static(), "Only static input/output shapes are supported"); const auto out_shape = out_Pshape.get_shape(); const int64_t axis = get_axis(); - NGRAPH_CHECK(axis >= 0 && static_cast(axis) < out_shape.size(), "Invalid axis value."); + OPENVINO_ASSERT(axis >= 0 && static_cast(axis) < out_shape.size(), "Invalid axis value."); const auto depth = std::make_shared(input_values[1])->cast_vector()[0]; const auto ind_shape = ind_Pshape.get_shape(); - NGRAPH_CHECK(shape_size(ind_shape) * depth == shape_size(out_shape), - "Incompatible I/O shapes or wrong depth value."); - NGRAPH_CHECK(static_cast(out_shape[axis]) == depth, "Incompatible axis and depth values."); + OPENVINO_ASSERT(shape_size(ind_shape) * depth == shape_size(out_shape), + "Incompatible I/O shapes or wrong depth value."); + OPENVINO_ASSERT(static_cast(out_shape[axis]) == depth, "Incompatible axis and depth values."); return one_hot::evaluate_onehot(output_values, input_values, axis); } diff --git a/src/core/src/op/op.cpp b/src/core/src/op/op.cpp index 0dce762c0f5ccc..383de812c94d14 100644 --- a/src/core/src/op/op.cpp +++ b/src/core/src/op/op.cpp @@ -2,15 +2,13 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph/op/op.hpp" +#include "openvino/op/op.hpp" #include #include #include -#include "ngraph/node.hpp" -#include "ngraph/type/element_type.hpp" - -using namespace std; +#include "openvino/core/node.hpp" +#include "openvino/core/type/element_type.hpp" ov::op::Op::Op(const ov::OutputVector& args) : Node(args) {} diff --git a/src/core/src/op/parameter.cpp b/src/core/src/op/parameter.cpp index 7d9fd67bd9cad2..a7e598a7e89177 100644 --- a/src/core/src/op/parameter.cpp +++ b/src/core/src/op/parameter.cpp @@ -2,60 +2,59 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph/op/parameter.hpp" +#include "openvino/op/parameter.hpp" #include #include "itt.hpp" #include "layout_utils.hpp" -#include "ngraph/attribute_visitor.hpp" +#include "openvino/core/attribute_visitor.hpp" -using namespace std; -using namespace ngraph; +namespace ov { -op::Parameter::Parameter(const element::Type& element_type, const ov::PartialShape& pshape) +op::v0::Parameter::Parameter(const element::Type& element_type, const ov::PartialShape& pshape) : m_partial_shape(pshape), m_element_type(element_type), m_is_relevant_to_shapes(false) { constructor_validate_and_infer_types(); } -bool op::Parameter::visit_attributes(AttributeVisitor& visitor) { +bool op::v0::Parameter::visit_attributes(AttributeVisitor& visitor) { OV_OP_SCOPE(v0_Parameter_visit_attributes); visitor.on_attribute("shape", m_partial_shape); visitor.on_attribute("element_type", m_element_type); return true; } -void op::Parameter::validate_and_infer_types() { +void op::v0::Parameter::validate_and_infer_types() { OV_OP_SCOPE(v0_Parameter_validate_and_infer_types); Op::validate_and_infer_types(); set_output_type(0, m_element_type, m_partial_shape); } -shared_ptr op::Parameter::clone_with_new_inputs(const OutputVector& new_args) const { +std::shared_ptr op::v0::Parameter::clone_with_new_inputs(const OutputVector& new_args) const { OV_OP_SCOPE(v0_Parameter_clone_with_new_inputs); check_new_args_count(this, new_args); - return make_shared(m_element_type, m_partial_shape); + return std::make_shared(m_element_type, m_partial_shape); } -bool op::Parameter::is_relevant_to_shapes() const { +bool op::v0::Parameter::is_relevant_to_shapes() const { return m_is_relevant_to_shapes; } -void op::Parameter::set_is_relevant_to_shapes(bool is_relevant) { +void op::v0::Parameter::set_is_relevant_to_shapes(bool is_relevant) { m_is_relevant_to_shapes = is_relevant; } -ov::Layout op::Parameter::get_layout() const { +ov::Layout op::v0::Parameter::get_layout() const { return ov::layout::get_layout(output(0)); } -void op::Parameter::set_layout(const ov::Layout& layout) { +void op::v0::Parameter::set_layout(const ov::Layout& layout) { ov::layout::set_layout(output(0), layout); } -void op::Parameter::set_partial_shape(const PartialShape& partial_shape) { +void op::v0::Parameter::set_partial_shape(const PartialShape& partial_shape) { OPENVINO_ASSERT(ov::layout::utils::is_compatible(get_layout(), partial_shape), "Can't set partial shape ", partial_shape, @@ -67,26 +66,27 @@ void op::Parameter::set_partial_shape(const PartialShape& partial_shape) { m_partial_shape = partial_shape; } -ov::AttributeAdapter::AttributeAdapter(ParameterVector& ref) : m_ref(ref) {} +AttributeAdapter::AttributeAdapter(ParameterVector& ref) : m_ref(ref) {} -bool ov::AttributeAdapter::visit_attributes(AttributeVisitor& visitor) { +bool AttributeAdapter::visit_attributes(AttributeVisitor& visitor) { size_t size = m_ref.size(); visitor.on_attribute("size", size); if (size != m_ref.size()) { m_ref.resize(size); } - ostringstream index; + std::ostringstream index; for (size_t i = 0; i < size; i++) { index.str(""); index << i; - string id; + std::string id; if (m_ref[i]) { id = visitor.get_registered_node_id(m_ref[i]); } visitor.on_attribute(index.str(), id); if (!m_ref[i]) { - m_ref[i] = ov::as_type_ptr(visitor.get_registered_node(id)); + m_ref[i] = ov::as_type_ptr(visitor.get_registered_node(id)); } } return true; } +} // namespace ov diff --git a/src/core/src/op/prelu.cpp b/src/core/src/op/prelu.cpp index 2bc9380e6d4ab8..782ca120733722 100644 --- a/src/core/src/op/prelu.cpp +++ b/src/core/src/op/prelu.cpp @@ -66,7 +66,7 @@ bool evaluate_prelu(const ngraph::HostTensorPtr& arg, bool ov::op::v0::PRelu::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { OV_OP_SCOPE(v0_PRelu_evaluate); OPENVINO_SUPPRESS_DEPRECATED_START - NGRAPH_CHECK(ngraph::validate_host_tensor_vector(outputs, 1) && ngraph::validate_host_tensor_vector(inputs, 2)); + OPENVINO_ASSERT(ngraph::validate_host_tensor_vector(outputs, 1) && ngraph::validate_host_tensor_vector(inputs, 2)); OPENVINO_SUPPRESS_DEPRECATED_END return prelu::evaluate_prelu(inputs[0], inputs[1], outputs[0]); } diff --git a/src/core/src/op/psroi_pooling.cpp b/src/core/src/op/psroi_pooling.cpp index d45999b87c5b81..b45d9c0943fa29 100644 --- a/src/core/src/op/psroi_pooling.cpp +++ b/src/core/src/op/psroi_pooling.cpp @@ -9,8 +9,6 @@ #include "openvino/core/validation_util.hpp" #include "psroi_pooling_shape_inference.hpp" -using namespace std; - namespace ov { namespace op { namespace v0 { @@ -22,7 +20,7 @@ PSROIPooling::PSROIPooling(const Output& input, const float spatial_scale, int spatial_bins_x, int spatial_bins_y, - const string& mode) + const std::string& mode) : Op({input, coords}), m_output_dim(output_dim), m_group_size(group_size), @@ -61,17 +59,17 @@ void PSROIPooling::validate_and_infer_types() { set_output_type(0, feat_maps_et, output_shapes[0]); } -shared_ptr PSROIPooling::clone_with_new_inputs(const OutputVector& new_args) const { +std::shared_ptr PSROIPooling::clone_with_new_inputs(const OutputVector& new_args) const { OV_OP_SCOPE(v0_PSROIPooling_clone_with_new_inputs); check_new_args_count(this, new_args); - return make_shared(new_args.at(0), - new_args.at(1), - m_output_dim, - m_group_size, - m_spatial_scale, - m_spatial_bins_x, - m_spatial_bins_y, - m_mode); + return std::make_shared(new_args.at(0), + new_args.at(1), + m_output_dim, + m_group_size, + m_spatial_scale, + m_spatial_bins_x, + m_spatial_bins_y, + m_mode); } void PSROIPooling::set_output_dim(size_t output_dim) { diff --git a/src/core/src/op/rdft.cpp b/src/core/src/op/rdft.cpp index 9aa7babae1581a..2627a2245c2fb3 100644 --- a/src/core/src/op/rdft.cpp +++ b/src/core/src/op/rdft.cpp @@ -2,15 +2,13 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph/op/rdft.hpp" +#include "openvino/op/rdft.hpp" #include #include "itt.hpp" #include "rdft_shape_inference.hpp" -using namespace std; - ov::op::v9::RDFT::RDFT(const Output& data, const Output& axes) : FFTBase(data, axes) { constructor_validate_and_infer_types(); } @@ -20,11 +18,6 @@ ov::op::v9::RDFT::RDFT(const Output& data, const Output& axes, const constructor_validate_and_infer_types(); } -bool ov::op::v9::RDFT::visit_attributes(AttributeVisitor& visitor) { - OV_OP_SCOPE(v9_RDFT_visit_attributes); - return true; -} - std::shared_ptr ov::op::v9::RDFT::clone_with_new_inputs(const OutputVector& new_args) const { OV_OP_SCOPE(v9_RDFT_clone_with_new_inputs); check_new_args_count(this, new_args); diff --git a/src/core/src/op/reduce_logical_and.cpp b/src/core/src/op/reduce_logical_and.cpp index 59a33cb8716655..6ca1444fd90c08 100644 --- a/src/core/src/op/reduce_logical_and.cpp +++ b/src/core/src/op/reduce_logical_and.cpp @@ -56,8 +56,8 @@ bool evaluate_reduce_logical_and(const HostTensorPtr& data, bool op::v1::ReduceLogicalAnd::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { OV_OP_SCOPE(v1_ReduceLogicalAnd_evaluate); OPENVINO_SUPPRESS_DEPRECATED_START - NGRAPH_CHECK(validate_host_tensor_vector(inputs, 2)); - NGRAPH_CHECK(validate_host_tensor_vector(outputs, 1)); + OPENVINO_ASSERT(validate_host_tensor_vector(inputs, 2)); + OPENVINO_ASSERT(validate_host_tensor_vector(outputs, 1)); const auto& data = inputs[0]; const auto& axes = inputs[1]; diff --git a/src/core/src/op/reduce_logical_or.cpp b/src/core/src/op/reduce_logical_or.cpp index a167d44a884258..dce03e81e40ce0 100644 --- a/src/core/src/op/reduce_logical_or.cpp +++ b/src/core/src/op/reduce_logical_or.cpp @@ -56,8 +56,8 @@ bool evaluate_reduce_logical_or(const HostTensorPtr& data, bool op::v1::ReduceLogicalOr::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { OV_OP_SCOPE(v1_ReduceLogicalOr_evaluate); OPENVINO_SUPPRESS_DEPRECATED_START - NGRAPH_CHECK(validate_host_tensor_vector(inputs, 2)); - NGRAPH_CHECK(validate_host_tensor_vector(outputs, 1)); + OPENVINO_ASSERT(validate_host_tensor_vector(inputs, 2)); + OPENVINO_ASSERT(validate_host_tensor_vector(outputs, 1)); const auto& data = inputs[0]; const auto& axes = inputs[1]; diff --git a/src/core/src/op/reduce_max.cpp b/src/core/src/op/reduce_max.cpp index e7e33b5221c3a1..35f2216bac8bfc 100644 --- a/src/core/src/op/reduce_max.cpp +++ b/src/core/src/op/reduce_max.cpp @@ -62,8 +62,8 @@ shared_ptr op::v1::ReduceMax::clone_with_new_inputs(const OutputVector& ne bool op::v1::ReduceMax::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { OV_OP_SCOPE(v1_ReduceMax_evaluate); OPENVINO_SUPPRESS_DEPRECATED_START - NGRAPH_CHECK(validate_host_tensor_vector(inputs, 2)); - NGRAPH_CHECK(validate_host_tensor_vector(outputs, 1)); + OPENVINO_ASSERT(validate_host_tensor_vector(inputs, 2)); + OPENVINO_ASSERT(validate_host_tensor_vector(outputs, 1)); const auto reduction_axes = get_normalized_axes_from_tensor(inputs[1], inputs[0]->get_partial_shape().rank(), get_friendly_name()); diff --git a/src/core/src/op/reduce_mean.cpp b/src/core/src/op/reduce_mean.cpp index 734efc4d814048..bc425fa0c2095c 100644 --- a/src/core/src/op/reduce_mean.cpp +++ b/src/core/src/op/reduce_mean.cpp @@ -61,8 +61,8 @@ bool evaluate_mean(const HostTensorPtr& arg, const HostTensorPtr& out, const Axi bool op::v1::ReduceMean::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { OV_OP_SCOPE(v1_ReduceMean_evaluate); OPENVINO_SUPPRESS_DEPRECATED_START - NGRAPH_CHECK(validate_host_tensor_vector(inputs, 2)); - NGRAPH_CHECK(validate_host_tensor_vector(outputs, 1)); + OPENVINO_ASSERT(validate_host_tensor_vector(inputs, 2)); + OPENVINO_ASSERT(validate_host_tensor_vector(outputs, 1)); const auto reduction_axes = get_normalized_axes_from_tensor(inputs[1], inputs[0]->get_partial_shape().rank(), get_friendly_name()); diff --git a/src/core/src/op/reduce_min.cpp b/src/core/src/op/reduce_min.cpp index f77e5236b51064..7f6e927748bb56 100644 --- a/src/core/src/op/reduce_min.cpp +++ b/src/core/src/op/reduce_min.cpp @@ -61,8 +61,8 @@ shared_ptr op::v1::ReduceMin::clone_with_new_inputs(const OutputVector& ne bool op::v1::ReduceMin::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { OV_OP_SCOPE(v1_ReduceMin_evaluate); OPENVINO_SUPPRESS_DEPRECATED_START - NGRAPH_CHECK(validate_host_tensor_vector(inputs, 2)); - NGRAPH_CHECK(validate_host_tensor_vector(outputs, 1)); + OPENVINO_ASSERT(validate_host_tensor_vector(inputs, 2)); + OPENVINO_ASSERT(validate_host_tensor_vector(outputs, 1)); const auto reduction_axes = get_normalized_axes_from_tensor(inputs[1], inputs[0]->get_partial_shape().rank(), get_friendly_name()); diff --git a/src/core/src/op/reduce_prod.cpp b/src/core/src/op/reduce_prod.cpp index 14747e9b5d6413..dd915427b2415d 100644 --- a/src/core/src/op/reduce_prod.cpp +++ b/src/core/src/op/reduce_prod.cpp @@ -59,8 +59,8 @@ bool evaluate_product(const HostTensorPtr& arg, const HostTensorPtr& out, const bool op::v1::ReduceProd::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { OV_OP_SCOPE(v1_ReduceProd_evaluate); OPENVINO_SUPPRESS_DEPRECATED_START - NGRAPH_CHECK(validate_host_tensor_vector(inputs, 2)); - NGRAPH_CHECK(validate_host_tensor_vector(outputs, 1)); + OPENVINO_ASSERT(validate_host_tensor_vector(inputs, 2)); + OPENVINO_ASSERT(validate_host_tensor_vector(outputs, 1)); const auto reduction_axes = get_normalized_axes_from_tensor(inputs[1], inputs[0]->get_partial_shape().rank(), get_friendly_name()); diff --git a/src/core/src/op/reduce_sum.cpp b/src/core/src/op/reduce_sum.cpp index 47da7ee137bcf6..54797693251ae1 100644 --- a/src/core/src/op/reduce_sum.cpp +++ b/src/core/src/op/reduce_sum.cpp @@ -62,8 +62,8 @@ bool evaluate_sum(const HostTensorPtr& arg, const HostTensorPtr& out, const Axis bool op::v1::ReduceSum::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { OV_OP_SCOPE(v1_ReduceSum_evaluate); OPENVINO_SUPPRESS_DEPRECATED_START - NGRAPH_CHECK(validate_host_tensor_vector(inputs, 2)); - NGRAPH_CHECK(validate_host_tensor_vector(outputs, 1)); + OPENVINO_ASSERT(validate_host_tensor_vector(inputs, 2)); + OPENVINO_ASSERT(validate_host_tensor_vector(outputs, 1)); const auto reduction_axes = get_normalized_axes_from_tensor(inputs[1], inputs[0]->get_partial_shape().rank(), get_friendly_name()); diff --git a/src/core/src/op/relu.cpp b/src/core/src/op/relu.cpp index cbaa57030bae99..d0795cb646e9b9 100644 --- a/src/core/src/op/relu.cpp +++ b/src/core/src/op/relu.cpp @@ -58,7 +58,7 @@ bool evaluate_relu(const HostTensorPtr& arg0, const HostTensorPtr& out) { bool op::Relu::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { OV_OP_SCOPE(v0_Relu_evaluate); OPENVINO_SUPPRESS_DEPRECATED_START - NGRAPH_CHECK(validate_host_tensor_vector(outputs, 1) && validate_host_tensor_vector(inputs, 1)); + OPENVINO_ASSERT(validate_host_tensor_vector(outputs, 1) && validate_host_tensor_vector(inputs, 1)); OPENVINO_SUPPRESS_DEPRECATED_END return relu::evaluate_relu(inputs[0], outputs[0]); } diff --git a/src/core/src/op/reshape.cpp b/src/core/src/op/reshape.cpp index 058dff135c0484..52b78bfbf8a37e 100644 --- a/src/core/src/op/reshape.cpp +++ b/src/core/src/op/reshape.cpp @@ -96,9 +96,9 @@ void op::v1::Reshape::validate_and_infer_types() { auto upper_bound = std::make_shared(ub.get_element_type(), ub.get_shape(), ub.data()) ->cast_vector(); shape_can_be_calculated = true; - NGRAPH_CHECK(lower_bound.size() == upper_bound.size()); + OPENVINO_ASSERT(lower_bound.size() == upper_bound.size()); const TensorLabel& labels = get_input_source_output(1).get_tensor().get_value_label(); - NGRAPH_CHECK(labels.empty() || lower_bound.size() == labels.size()); + OPENVINO_ASSERT(labels.empty() || lower_bound.size() == labels.size()); for (size_t i = 0; i < lower_bound.size(); ++i) { NODE_VALIDATION_CHECK(this, @@ -125,7 +125,7 @@ void op::v1::Reshape::validate_and_infer_types() { // or equal to 1 if (output_rank.is_static() && output_rank.get_length() == 0 && !lower_bound.empty()) { reshape_pattern.clear(); - NGRAPH_CHECK(lower_bound.size() == 1); + OPENVINO_ASSERT(lower_bound.size() == 1); NODE_VALIDATION_CHECK(this, lower_bound[0] == 1 && upper_bound[0] == 1, "The value of scalar shape pattern should be equal to 1!"); @@ -182,7 +182,7 @@ bool op::v1::Reshape::evaluate_reshape(const HostTensorVector& outputs, const Ho std::vector output_shape(out_shape_val.size()); calculate_output_shape(reshape_pattern, minus_one_idx, inputs[0]->get_partial_shape(), output_shape); - NGRAPH_CHECK(ov::PartialShape(output_shape).is_static()); + OPENVINO_ASSERT(ov::PartialShape(output_shape).is_static()); outputs[0]->set_shape(ov::PartialShape(output_shape).to_shape()); OPENVINO_SUPPRESS_DEPRECATED_START @@ -194,8 +194,8 @@ bool op::v1::Reshape::evaluate_reshape(const HostTensorVector& outputs, const Ho bool op::v1::Reshape::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { OV_OP_SCOPE(v1_Reshape_evaluate); OPENVINO_SUPPRESS_DEPRECATED_START - NGRAPH_CHECK(validate_host_tensor_vector(inputs, 2)); - NGRAPH_CHECK(validate_host_tensor_vector(outputs, 1)); + OPENVINO_ASSERT(validate_host_tensor_vector(inputs, 2)); + OPENVINO_ASSERT(validate_host_tensor_vector(outputs, 1)); OPENVINO_SUPPRESS_DEPRECATED_END return evaluate_reshape(outputs, inputs); } diff --git a/src/core/src/op/reverse.cpp b/src/core/src/op/reverse.cpp index 4caaa2fb01f4d9..7b2f386cb5661a 100644 --- a/src/core/src/op/reverse.cpp +++ b/src/core/src/op/reverse.cpp @@ -99,7 +99,7 @@ bool op::v1::Reverse::evaluate_reverse(const HostTensorVector& outputs, const Ho GET_AXES(u32, axes, inputs[1]); GET_AXES(u64, axes, inputs[1]); default: - NGRAPH_CHECK(false, "Not supported axes type", inputs[1]->get_element_type()); + OPENVINO_ASSERT(false, "Not supported axes type", inputs[1]->get_element_type()); } } else // Mode::MASK { diff --git a/src/core/src/op/reverse_sequence.cpp b/src/core/src/op/reverse_sequence.cpp index af6ab100e627fc..814a87451a09cd 100644 --- a/src/core/src/op/reverse_sequence.cpp +++ b/src/core/src/op/reverse_sequence.cpp @@ -2,38 +2,35 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph/op/reverse_sequence.hpp" +#include "openvino/op/reverse_sequence.hpp" #include #include -#include #include "itt.hpp" -#include "ngraph/attribute_visitor.hpp" -#include "ngraph/node.hpp" -#include "ngraph/validation_util.hpp" +#include "openvino/core/attribute_visitor.hpp" +#include "openvino/core/validation_util.hpp" +#include "reverse_sequence_shape_inference.hpp" -using namespace std; -using namespace ngraph; - -op::ReverseSequence::ReverseSequence(const Output& arg, - const Output& seq_indices, - int64_t batch_axis, - int64_t seq_axis) +namespace ov { +op::v0::ReverseSequence::ReverseSequence(const Output& arg, + const Output& seq_indices, + int64_t batch_axis, + int64_t seq_axis) : Op({arg, seq_indices}), m_batch_axis(batch_axis), m_seq_axis(seq_axis) { constructor_validate_and_infer_types(); } -bool ngraph::op::v0::ReverseSequence::visit_attributes(AttributeVisitor& visitor) { +bool op::v0::ReverseSequence::visit_attributes(AttributeVisitor& visitor) { OV_OP_SCOPE(v0_ReverseSequence_visit_attributes); visitor.on_attribute("batch_axis", m_batch_axis); visitor.on_attribute("seq_axis", m_seq_axis); return true; } -void op::ReverseSequence::validate_and_infer_types() { +void op::v0::ReverseSequence::validate_and_infer_types() { OV_OP_SCOPE(v0_ReverseSequence_validate_and_infer_types); const auto& seq_lengths_et = get_input_element_type(1); NODE_VALIDATION_CHECK(this, @@ -51,26 +48,27 @@ void op::ReverseSequence::validate_and_infer_types() { OPENVINO_SUPPRESS_DEPRECATED_END } -shared_ptr op::ReverseSequence::clone_with_new_inputs(const OutputVector& new_args) const { +std::shared_ptr op::v0::ReverseSequence::clone_with_new_inputs(const OutputVector& new_args) const { OV_OP_SCOPE(v0_ReverseSequence_clone_with_new_inputs); check_new_args_count(this, new_args); - return make_shared(new_args.at(0), new_args.at(1), m_batch_axis, m_seq_axis); + return std::make_shared(new_args.at(0), new_args.at(1), m_batch_axis, m_seq_axis); } -void op::ReverseSequence::set_batch_axis(int64_t batch_axis) { +void op::v0::ReverseSequence::set_batch_axis(int64_t batch_axis) { m_batch_axis = batch_axis; } -size_t op::ReverseSequence::get_batch_axis() const { +size_t op::v0::ReverseSequence::get_batch_axis() const { const auto& data_rank = get_input_partial_shape(0).rank(); OPENVINO_SUPPRESS_DEPRECATED_START return static_cast(ov::normalize_axis(this, m_batch_axis, data_rank)); OPENVINO_SUPPRESS_DEPRECATED_END } -void op::ReverseSequence::set_sequence_axis(int64_t sequence_axis) { +void op::v0::ReverseSequence::set_sequence_axis(int64_t sequence_axis) { m_seq_axis = sequence_axis; OPENVINO_SUPPRESS_DEPRECATED_START m_normalized_seq_axis = ov::normalize_axis(this, m_seq_axis, get_input_partial_shape(0).rank()); OPENVINO_SUPPRESS_DEPRECATED_END } +} // namespace ov diff --git a/src/core/src/op/rnn_cell.cpp b/src/core/src/op/rnn_cell.cpp index 02a85c34faafbf..56c6a41664d89c 100644 --- a/src/core/src/op/rnn_cell.cpp +++ b/src/core/src/op/rnn_cell.cpp @@ -2,19 +2,15 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph/op/rnn_cell.hpp" +#include "openvino/op/rnn_cell.hpp" #include #include "itt.hpp" -#include "ngraph/builder/reshape.hpp" -#include "ngraph/op/constant.hpp" -#include "ngraph/shape.hpp" -#include "ngraph/type/element_type.hpp" +#include "openvino/core/type/element_type.hpp" #include "rnn_cell_shape_inference.hpp" -using namespace std; -using namespace ngraph; +namespace ov { op::v0::RNNCell::RNNCell() { m_activations = {"tanh"}; @@ -26,9 +22,9 @@ op::v0::RNNCell::RNNCell(const Output& X, const Output& W, const Output& R, size_t hidden_size, - const vector& activations, - const vector& activations_alpha, - const vector& activations_beta, + const std::vector& activations, + const std::vector& activations_alpha, + const std::vector& activations_beta, float clip) : RNNCellBase({X, initial_hidden_state, W, R}, hidden_size, clip, activations, activations_alpha, activations_beta), m_activation_f{get_activation_function(0)} { @@ -42,9 +38,9 @@ op::v0::RNNCell::RNNCell(const Output& X, const Output& R, const Output& B, size_t hidden_size, - const vector& activations, - const vector& activations_alpha, - const vector& activations_beta, + const std::vector& activations, + const std::vector& activations_alpha, + const std::vector& activations_beta, float clip) : RNNCellBase({X, initial_hidden_state, W, R, B}, hidden_size, @@ -88,34 +84,35 @@ void op::v0::RNNCell::validate_and_infer_types() { Output op::v0::RNNCell::get_default_bias_input() const { return Output{op::v0::Constant::create(get_input_element_type(0), ov::Shape{s_gates_count * get_hidden_size()}, - vector(s_gates_count * get_hidden_size(), 0.f))}; + std::vector(s_gates_count * get_hidden_size(), 0.f))}; } -shared_ptr op::v0::RNNCell::clone_with_new_inputs(const OutputVector& new_args) const { +std::shared_ptr op::v0::RNNCell::clone_with_new_inputs(const OutputVector& new_args) const { OV_OP_SCOPE(v0_RNNCell_clone_with_new_inputs); check_new_args_count(this, new_args); if (new_args.size() == 4) { - return make_shared(new_args.at(0), - new_args.at(1), - new_args.at(2), - new_args.at(3), - get_hidden_size(), - get_activations(), - get_activations_alpha(), - get_activations_beta(), - get_clip()); + return std::make_shared(new_args.at(0), + new_args.at(1), + new_args.at(2), + new_args.at(3), + get_hidden_size(), + get_activations(), + get_activations_alpha(), + get_activations_beta(), + get_clip()); } else if (new_args.size() == 5) { - return make_shared(new_args.at(0), - new_args.at(1), - new_args.at(2), - new_args.at(3), - new_args.at(4), - get_hidden_size(), - get_activations(), - get_activations_alpha(), - get_activations_beta(), - get_clip()); + return std::make_shared(new_args.at(0), + new_args.at(1), + new_args.at(2), + new_args.at(3), + new_args.at(4), + get_hidden_size(), + get_activations(), + get_activations_alpha(), + get_activations_beta(), + get_clip()); } else { OPENVINO_THROW("Incorrect number of new arguments"); } } +} // namespace ov diff --git a/src/core/src/op/rnn_sequence.cpp b/src/core/src/op/rnn_sequence.cpp index 8e33461510e2bb..3e599bd75b21f1 100644 --- a/src/core/src/op/rnn_sequence.cpp +++ b/src/core/src/op/rnn_sequence.cpp @@ -2,20 +2,17 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph/op/rnn_sequence.hpp" +#include "openvino/op/rnn_sequence.hpp" #include #include #include #include "itt.hpp" -#include "ngraph/op/util/recurrent_sequence.hpp" -#include "ngraph/opsets/opset4.hpp" +#include "openvino/op/util/recurrent_sequence.hpp" #include "rnn_sequence_shape_inference.hpp" -using namespace std; -using namespace ngraph; - +namespace ov { op::v5::RNNSequence::RNNSequence() : m_direction(op::RecurrentSequenceDirection::FORWARD) {} op::v5::RNNSequence::RNNSequence(const Output& X, @@ -73,19 +70,20 @@ bool op::v5::RNNSequence::visit_attributes(AttributeVisitor& visitor) { return op::util::RNNCellBase::visit_attributes(visitor); } -shared_ptr op::v5::RNNSequence::clone_with_new_inputs(const ngraph::OutputVector& new_args) const { +std::shared_ptr op::v5::RNNSequence::clone_with_new_inputs(const OutputVector& new_args) const { OV_OP_SCOPE(v5_RNNSequence_clone_with_new_inputs); check_new_args_count(this, new_args); - return make_shared(new_args.at(0), - new_args.at(1), - new_args.at(2), - new_args.at(3), - new_args.at(4), - new_args.at(5), - m_hidden_size, - m_direction, - m_activations, - m_activations_alpha, - m_activations_beta, - m_clip); + return std::make_shared(new_args.at(0), + new_args.at(1), + new_args.at(2), + new_args.at(3), + new_args.at(4), + new_args.at(5), + m_hidden_size, + m_direction, + m_activations, + m_activations_alpha, + m_activations_beta, + m_clip); } +} // namespace ov diff --git a/src/core/src/op/roi_pooling.cpp b/src/core/src/op/roi_pooling.cpp index 7b508e46a14338..678c58fe8221ca 100644 --- a/src/core/src/op/roi_pooling.cpp +++ b/src/core/src/op/roi_pooling.cpp @@ -8,8 +8,6 @@ #include "openvino/core/validation_util.hpp" #include "roi_pooling_shape_inference.hpp" -using namespace std; - namespace ov { namespace op { namespace v0 { @@ -17,7 +15,7 @@ ROIPooling::ROIPooling(const Output& input, const Output& coords, const ov::Shape& output_size, const float spatial_scale, - const string& method) + const std::string& method) : Op({input, coords}), m_output_size(output_size), m_spatial_scale(spatial_scale), @@ -64,10 +62,10 @@ void ROIPooling::validate_and_infer_types() { } } -shared_ptr ROIPooling::clone_with_new_inputs(const OutputVector& new_args) const { +std::shared_ptr ROIPooling::clone_with_new_inputs(const OutputVector& new_args) const { OV_OP_SCOPE(v0_ROIPooling_clone_with_new_inputs); check_new_args_count(this, new_args); - return make_shared(new_args.at(0), new_args.at(1), m_output_size, m_spatial_scale, m_method); + return std::make_shared(new_args.at(0), new_args.at(1), m_output_size, m_spatial_scale, m_method); } bool ROIPooling::visit_attributes(AttributeVisitor& visitor) { diff --git a/src/core/src/op/scatter_nd_update.cpp b/src/core/src/op/scatter_nd_update.cpp index 2d49046ea115cb..1195b76fa3f7dc 100644 --- a/src/core/src/op/scatter_nd_update.cpp +++ b/src/core/src/op/scatter_nd_update.cpp @@ -82,10 +82,10 @@ bool evaluate_scatter(const HostTensorPtr& arg0, bool op::v3::ScatterNDUpdate::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { OV_OP_SCOPE(v3_ScatterNDUpdate_evaluate); - NGRAPH_CHECK(!inputs.empty()); + OPENVINO_ASSERT(!inputs.empty()); OPENVINO_SUPPRESS_DEPRECATED_START - NGRAPH_CHECK(validate_host_tensor_vector(inputs, 3)); - NGRAPH_CHECK(validate_host_tensor_vector(outputs, 1)); + OPENVINO_ASSERT(validate_host_tensor_vector(inputs, 3)); + OPENVINO_ASSERT(validate_host_tensor_vector(outputs, 1)); OPENVINO_SUPPRESS_DEPRECATED_END return scatter::evaluate_scatter(inputs[0], inputs[1], inputs[2], outputs[0]); diff --git a/src/core/src/op/scatter_update.cpp b/src/core/src/op/scatter_update.cpp index 32c15992c8b7c0..1f531b9a2a42a9 100644 --- a/src/core/src/op/scatter_update.cpp +++ b/src/core/src/op/scatter_update.cpp @@ -55,7 +55,7 @@ bool op::v3::ScatterUpdate::evaluate_scatter_update(const HostTensorVector& outp const auto elem_size = data->get_element_type().size(); out->set_shape(data->get_shape()); - NGRAPH_CHECK(axis->get_element_type().is_integral_number(), "axis element type is not integral data type"); + OPENVINO_ASSERT(axis->get_element_type().is_integral_number(), "axis element type is not integral data type"); OPENVINO_SUPPRESS_DEPRECATED_START int64_t axis_val = host_tensor_2_vector(axis)[0]; diff --git a/src/core/src/op/select.cpp b/src/core/src/op/select.cpp index 7a4668179bfdc8..5108aecdedd9b4 100644 --- a/src/core/src/op/select.cpp +++ b/src/core/src/op/select.cpp @@ -118,8 +118,8 @@ bool evaluate_select(const HostTensorVector& output_values, bool op::v1::Select::evaluate(const HostTensorVector& output_values, const HostTensorVector& input_values) const { OV_OP_SCOPE(v1_Select_evaluate); OPENVINO_SUPPRESS_DEPRECATED_START - NGRAPH_CHECK(validate_host_tensor_vector(input_values, 3)); - NGRAPH_CHECK(validate_host_tensor_vector(output_values, 1)); + OPENVINO_ASSERT(validate_host_tensor_vector(input_values, 3)); + OPENVINO_ASSERT(validate_host_tensor_vector(output_values, 1)); OPENVINO_SUPPRESS_DEPRECATED_END const auto autob = get_auto_broadcast(); return detail::evaluate_select(output_values, input_values, autob, output_values[0]->get_element_type()); diff --git a/src/core/src/op/selu.cpp b/src/core/src/op/selu.cpp index 76bae32ecb431d..7ec6bd6970ad9b 100644 --- a/src/core/src/op/selu.cpp +++ b/src/core/src/op/selu.cpp @@ -2,12 +2,11 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph/op/selu.hpp" +#include "openvino/op/selu.hpp" #include "itt.hpp" -using namespace std; -using namespace ngraph; +namespace ov { op::v0::Selu::Selu(const Output& data, const Output& alpha, const Output& lambda) : Op({data, alpha, lambda}) { @@ -40,13 +39,9 @@ void op::v0::Selu::validate_and_infer_types() { set_output_type(0, result_et, get_input_partial_shape(0)); } -bool op::v0::Selu::visit_attributes(AttributeVisitor& visitor) { - OV_OP_SCOPE(v0_Selu_visit_attributes); - return true; -} - -shared_ptr op::v0::Selu::clone_with_new_inputs(const OutputVector& new_args) const { +std::shared_ptr op::v0::Selu::clone_with_new_inputs(const OutputVector& new_args) const { OV_OP_SCOPE(v0_Selu_clone_with_new_inputs); check_new_args_count(this, new_args); - return make_shared(new_args.at(0), new_args.at(1), new_args.at(2)); + return std::make_shared(new_args.at(0), new_args.at(1), new_args.at(2)); } +} // namespace ov diff --git a/src/core/src/op/shape_of.cpp b/src/core/src/op/shape_of.cpp index 673b6aa1abf22a..3fa225cea017fc 100644 --- a/src/core/src/op/shape_of.cpp +++ b/src/core/src/op/shape_of.cpp @@ -114,7 +114,7 @@ bool constant_fold_shape_of(Node* shape_of_node, Output& replacement, cons } bool evaluate_bound_shape(const Node* shape_of_node, ov::TensorVector& output_values, bool is_upper) { - NGRAPH_CHECK(shape_of_node, output_values.size() == 1); + OPENVINO_ASSERT(shape_of_node, output_values.size() == 1); const auto& input_partial_shape = shape_of_node->get_input_partial_shape(0); if (input_partial_shape.rank().is_dynamic()) return false; @@ -126,7 +126,7 @@ bool evaluate_bound_shape(const Node* shape_of_node, ov::TensorVector& output_va pshape_up[i] = Dimension(interval.get_max_val()).is_dynamic() ? Dimension(interval.get_max_val() - 1) : interval.get_max_val(); } - NGRAPH_CHECK(pshape_up.is_static() && pshape_low.is_static()); + OPENVINO_ASSERT(pshape_up.is_static() && pshape_low.is_static()); const auto output_et = output_values[0].get_element_type(); if (pshape_low.to_shape() == pshape_up.to_shape()) { @@ -156,7 +156,7 @@ bool evaluate_bound_shape(const Node* shape_of_node, ov::TensorVector& output_va bool evaluate_label(const Node* shape_of_node, TensorLabelVector& output_labels) { const auto& shape = shape_of_node->get_input_partial_shape(0); - NGRAPH_CHECK(shape.rank().is_static()); // sanity check. at this point value propagation was successful + OPENVINO_ASSERT(shape.rank().is_static()); // sanity check. at this point value propagation was successful output_labels[0].reserve(shape.size()); bool label_is_set = false; for (const auto& d : shape) { @@ -173,8 +173,8 @@ bool evaluate_label(const Node* shape_of_node, TensorLabelVector& output_labels) bool op::v3::ShapeOf::evaluate(const HostTensorVector& output_values, const HostTensorVector& input_values) const { OV_OP_SCOPE(v3_ShapeOf_evaluate); OPENVINO_SUPPRESS_DEPRECATED_START - NGRAPH_CHECK(validate_host_tensor_vector(input_values, 1)); - NGRAPH_CHECK(validate_host_tensor_vector(output_values, 1)); + OPENVINO_ASSERT(validate_host_tensor_vector(input_values, 1)); + OPENVINO_ASSERT(validate_host_tensor_vector(output_values, 1)); OPENVINO_SUPPRESS_DEPRECATED_END return shape_of::evaluate_shape_of(output_values[0], input_values[0]); } @@ -241,20 +241,20 @@ shared_ptr op::v0::ShapeOf::clone_with_new_inputs(const OutputVector& new_ OV_OP_SCOPE(v0_ShapeOf_clone_with_new_inputs); check_new_args_count(this, new_args); auto new_shape_of = make_shared(new_args.at(0)); - NGRAPH_CHECK(new_shape_of.get(), - new_shape_of != nullptr, - "Cannot clone ", - description(), - " operation with name ", - get_friendly_name()); + OPENVINO_ASSERT(new_shape_of.get(), + new_shape_of != nullptr, + "Cannot clone ", + description(), + " operation with name ", + get_friendly_name()); return new_shape_of; } bool op::v0::ShapeOf::evaluate(const HostTensorVector& output_values, const HostTensorVector& input_values) const { OV_OP_SCOPE(v0_ShapeOf_evaluate); OPENVINO_SUPPRESS_DEPRECATED_START - NGRAPH_CHECK(validate_host_tensor_vector(input_values, 1)); - NGRAPH_CHECK(validate_host_tensor_vector(output_values, 1)); + OPENVINO_ASSERT(validate_host_tensor_vector(input_values, 1)); + OPENVINO_ASSERT(validate_host_tensor_vector(output_values, 1)); OPENVINO_SUPPRESS_DEPRECATED_END return shape_of::evaluate_shape_of(output_values[0], input_values[0]); } diff --git a/src/core/src/op/sigmoid.cpp b/src/core/src/op/sigmoid.cpp index 4eb8b4e02d6d48..c5c872ebc80bb7 100644 --- a/src/core/src/op/sigmoid.cpp +++ b/src/core/src/op/sigmoid.cpp @@ -59,7 +59,7 @@ bool evaluate_sigmoid(const HostTensorPtr& arg0, const HostTensorPtr& out) { bool ov::op::v0::Sigmoid::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { OV_OP_SCOPE(v0_Sigmoid_evaluate); OPENVINO_SUPPRESS_DEPRECATED_START - NGRAPH_CHECK(validate_host_tensor_vector(outputs, 1) && validate_host_tensor_vector(inputs, 1)); + OPENVINO_ASSERT(validate_host_tensor_vector(outputs, 1) && validate_host_tensor_vector(inputs, 1)); OPENVINO_SUPPRESS_DEPRECATED_END return sigmoid::evaluate_sigmoid(inputs[0], outputs[0]); } diff --git a/src/core/src/op/sign.cpp b/src/core/src/op/sign.cpp index a9e9d51b946095..8044ffef21d6f6 100644 --- a/src/core/src/op/sign.cpp +++ b/src/core/src/op/sign.cpp @@ -60,7 +60,7 @@ bool evaluate_sign(const HostTensorPtr& arg0, const HostTensorPtr& out, const si bool op::Sign::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { OV_OP_SCOPE(v0_Sign_evaluate); OPENVINO_SUPPRESS_DEPRECATED_START - NGRAPH_CHECK(validate_host_tensor_vector(outputs, 1) && validate_host_tensor_vector(inputs, 1)); + OPENVINO_ASSERT(validate_host_tensor_vector(outputs, 1) && validate_host_tensor_vector(inputs, 1)); OPENVINO_SUPPRESS_DEPRECATED_END return signop::evaluate_sign(inputs[0], outputs[0], shape_size(inputs[0]->get_shape())); } diff --git a/src/core/src/op/sink.cpp b/src/core/src/op/sink.cpp index 3ff50218b68f48..ae7bdc85245632 100644 --- a/src/core/src/op/sink.cpp +++ b/src/core/src/op/sink.cpp @@ -2,8 +2,6 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph/op/sink.hpp" +#include "openvino/op/sink.hpp" -using namespace ngraph; - -op::Sink::~Sink() = default; +ov::op::Sink::~Sink() = default; diff --git a/src/core/src/op/softmax.cpp b/src/core/src/op/softmax.cpp index 975a83dd932f6a..126c6d82fc7252 100644 --- a/src/core/src/op/softmax.cpp +++ b/src/core/src/op/softmax.cpp @@ -76,7 +76,7 @@ shared_ptr op::v1::Softmax::clone_with_new_inputs(const OutputVector& new_ bool op::v1::Softmax::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { OV_OP_SCOPE(v1_Softmax_evaluate); OPENVINO_SUPPRESS_DEPRECATED_START - NGRAPH_CHECK(validate_host_tensor_vector(outputs, 1) && validate_host_tensor_vector(inputs, 1)); + OPENVINO_ASSERT(validate_host_tensor_vector(outputs, 1) && validate_host_tensor_vector(inputs, 1)); OPENVINO_SUPPRESS_DEPRECATED_END outputs[0]->set_unary(inputs[0]); return evaluate_softmax(inputs[0], outputs[0], AxisSet{m_axis}); @@ -133,16 +133,16 @@ shared_ptr op::v8::Softmax::clone_with_new_inputs(const OutputVector& new_ bool op::v8::Softmax::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { OV_OP_SCOPE(v8_Softmax_evaluate); OPENVINO_SUPPRESS_DEPRECATED_START - NGRAPH_CHECK(validate_host_tensor_vector(outputs, 1) && validate_host_tensor_vector(inputs, 1)); + OPENVINO_ASSERT(validate_host_tensor_vector(outputs, 1) && validate_host_tensor_vector(inputs, 1)); OPENVINO_SUPPRESS_DEPRECATED_END outputs[0]->set_unary(inputs[0]); auto rank = static_cast(inputs[0]->get_shape().size()); - NGRAPH_CHECK(-rank <= m_axis && m_axis < rank, - "Reduction axis (", - m_axis, - ") is out of bounds (argument shape: ", - inputs[0]->get_shape(), - ")."); + OPENVINO_ASSERT(-rank <= m_axis && m_axis < rank, + "Reduction axis (", + m_axis, + ") is out of bounds (argument shape: ", + inputs[0]->get_shape(), + ")."); OPENVINO_SUPPRESS_DEPRECATED_START size_t axis = static_cast(ov::normalize_axis(this->description(), m_axis, rank)); OPENVINO_SUPPRESS_DEPRECATED_END diff --git a/src/core/src/op/softplus.cpp b/src/core/src/op/softplus.cpp index ee59d23623b8fa..70d1e63b17c1ee 100644 --- a/src/core/src/op/softplus.cpp +++ b/src/core/src/op/softplus.cpp @@ -73,7 +73,7 @@ bool evaluate_softplus(const HostTensorPtr& arg, const HostTensorPtr& out) { bool op::v4::SoftPlus::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { OV_OP_SCOPE(v4_SoftPlus_evaluate); OPENVINO_SUPPRESS_DEPRECATED_START - NGRAPH_CHECK(validate_host_tensor_vector(outputs, 1) && validate_host_tensor_vector(inputs, 1)); + OPENVINO_ASSERT(validate_host_tensor_vector(outputs, 1) && validate_host_tensor_vector(inputs, 1)); OPENVINO_SUPPRESS_DEPRECATED_END return softplus::evaluate_softplus(inputs[0], outputs[0]); } diff --git a/src/core/src/op/softsign.cpp b/src/core/src/op/softsign.cpp index 98e63984a8ff4c..9ec3e8038ea3f7 100644 --- a/src/core/src/op/softsign.cpp +++ b/src/core/src/op/softsign.cpp @@ -1,15 +1,15 @@ // Copyright (C) 2018-2023 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // -#include "openvino/reference/softsign.hpp" +#include "openvino/op/softsign.hpp" #include #include "itt.hpp" #include "openvino/core/attribute_visitor.hpp" -#include "openvino/op/softsign.hpp" +#include "openvino/core/shape_util.hpp" +#include "openvino/reference/softsign.hpp" #include "openvino/runtime/tensor.hpp" -#include "shape_util.hpp" namespace { template diff --git a/src/core/src/op/squared_difference.cpp b/src/core/src/op/squared_difference.cpp index 2c3c94dda99368..700e0a4a809d12 100644 --- a/src/core/src/op/squared_difference.cpp +++ b/src/core/src/op/squared_difference.cpp @@ -2,12 +2,10 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph/op/squared_difference.hpp" +#include "openvino/op/squared_difference.hpp" #include "itt.hpp" -using namespace std; - // ------------------------------ v0 ------------------------------------------- ov::op::v0::SquaredDifference::SquaredDifference(const Output& arg0, @@ -17,8 +15,8 @@ ov::op::v0::SquaredDifference::SquaredDifference(const Output& arg0, constructor_validate_and_infer_types(); } -shared_ptr ov::op::v0::SquaredDifference::clone_with_new_inputs(const OutputVector& new_args) const { +std::shared_ptr ov::op::v0::SquaredDifference::clone_with_new_inputs(const OutputVector& new_args) const { OV_OP_SCOPE(v0_SquaredDifference_clone_with_new_inputs); check_new_args_count(this, new_args); - return make_shared(new_args.at(0), new_args.at(1), this->get_autob()); + return std::make_shared(new_args.at(0), new_args.at(1), this->get_autob()); } diff --git a/src/core/src/op/squeeze.cpp b/src/core/src/op/squeeze.cpp index f96f628413acb1..50bf9af02d00a5 100644 --- a/src/core/src/op/squeeze.cpp +++ b/src/core/src/op/squeeze.cpp @@ -61,8 +61,8 @@ OPENVINO_SUPPRESS_DEPRECATED_START bool op::v0::Squeeze::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { OV_OP_SCOPE(v0_Squeeze_evaluate); OPENVINO_SUPPRESS_DEPRECATED_START - NGRAPH_CHECK(validate_host_tensor_vector(inputs, inputs.size())); - NGRAPH_CHECK(validate_host_tensor_vector(outputs, 1)); + OPENVINO_ASSERT(validate_host_tensor_vector(inputs, inputs.size())); + OPENVINO_ASSERT(validate_host_tensor_vector(outputs, 1)); OPENVINO_SUPPRESS_DEPRECATED_END if (has_evaluate()) { diff --git a/src/core/src/op/strided_slice.cpp b/src/core/src/op/strided_slice.cpp index 79647177e654d3..f2e7493804957f 100644 --- a/src/core/src/op/strided_slice.cpp +++ b/src/core/src/op/strided_slice.cpp @@ -61,8 +61,8 @@ shared_ptr calculate_default_strides(const Output& begin, const Outp strides_length = end_pshape[0].get_length(); } else // dynamic case { - NGRAPH_CHECK(begin_pshape.rank().is_static() && begin_pshape.rank().get_length() == 1, - "Begin input must be 1D"); + OPENVINO_ASSERT(begin_pshape.rank().is_static() && begin_pshape.rank().get_length() == 1, + "Begin input must be 1D"); return std::make_shared(op::Constant::create(element::i64, {}, {1}), std::make_shared(begin)); } @@ -237,8 +237,8 @@ bool op::v1::StridedSlice::evaluate(const HostTensorVector& output_values, const OV_OP_SCOPE(v1_StridedSlice_evaluate); // FIXME: 4th input is optional, but it is required by the following code OPENVINO_SUPPRESS_DEPRECATED_START - NGRAPH_CHECK(validate_host_tensor_vector(input_values, 4)); - NGRAPH_CHECK(validate_host_tensor_vector(output_values, 1)); + OPENVINO_ASSERT(validate_host_tensor_vector(input_values, 4)); + OPENVINO_ASSERT(validate_host_tensor_vector(output_values, 1)); OPENVINO_SUPPRESS_DEPRECATED_END return strided_slice::evaluate_strided_slice(input_values[0], input_values[1], diff --git a/src/core/src/op/tensor_iterator.cpp b/src/core/src/op/tensor_iterator.cpp index 1553b74fedd361..dbee0d02f976e7 100644 --- a/src/core/src/op/tensor_iterator.cpp +++ b/src/core/src/op/tensor_iterator.cpp @@ -2,16 +2,13 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph/op/tensor_iterator.hpp" +#include "openvino/op/tensor_iterator.hpp" -#include "itt.hpp" -#include "ngraph/factory.hpp" -#include "ngraph/graph_util.hpp" -#include "ngraph/specialize_function.hpp" +#include -using namespace std; -using namespace ngraph; +#include "itt.hpp" +namespace ov { op::v0::TensorIterator::TensorIterator(const OutputVector& values) : op::util::SubGraphOp(values) {} bool op::v0::TensorIterator::visit_attributes(AttributeVisitor& visitor) { @@ -33,7 +30,7 @@ void op::v0::TensorIterator::revalidate_and_infer_types_for_body_ops() { while (nodes_to_do.size() > 0) { auto node = nodes_to_do.top(); if (nodes_done.count(node) == 0) { - NGRAPH_CHECK(ov::as_type_ptr(node) == nullptr, "No nested TensorIterator"); + OPENVINO_ASSERT(ov::as_type_ptr(node) == nullptr, "No nested TensorIterator"); bool can_add = true; size_t arg_count = node->get_input_size(); for (size_t i = 0; i < arg_count; ++i) { @@ -189,7 +186,7 @@ void op::v0::TensorIterator::try_to_set_num_iterations_if_no_slice_inputs() { std::shared_ptr op::v0::TensorIterator::clone_with_new_inputs(const OutputVector& new_args) const { OV_OP_SCOPE(v0_TensorIterator_clone_with_new_inputs); - auto op = make_shared(); + auto op = std::make_shared(); op->set_arguments(new_args); op->set_output_size(m_output_descriptions.size()); @@ -205,3 +202,4 @@ std::shared_ptr op::v0::TensorIterator::clone_with_new_inputs(const Output op->validate_and_infer_types(); return op; } +} // namespace ov diff --git a/src/core/src/op/util/recurrent_sequence.cpp b/src/core/src/op/util/recurrent_sequence.cpp index fa126874fd9547..4bef269d4b599b 100644 --- a/src/core/src/op/util/recurrent_sequence.cpp +++ b/src/core/src/op/util/recurrent_sequence.cpp @@ -2,33 +2,32 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph/op/util/recurrent_sequence.hpp" +#include "openvino/op/util/recurrent_sequence.hpp" -using namespace std; - -void ngraph::op::util::validate_seq_input_rank_dimension(const std::vector& input) { +void ov::op::util::validate_seq_input_rank_dimension(const std::vector& input) { enum { X, initial_hidden_state, sequence_lengths, W, R, B }; // Verify static ranks for all inputs for (size_t i = 0; i < input.size(); i++) { - NGRAPH_CHECK((input[i].rank().is_static()), "RNN Sequence supports only static rank for input tensors."); + OPENVINO_ASSERT((input[i].rank().is_static()), "RNN Sequence supports only static rank for input tensors."); } for (size_t i = 0; i < input.size(); i++) { if (i == B) { // verify B input dimension which is 2D - NGRAPH_CHECK((input[i].rank().get_length() == 2), "RNN Sequence B input tensor dimension is not correct."); + OPENVINO_ASSERT((input[i].rank().get_length() == 2), + "RNN Sequence B input tensor dimension is not correct."); } else if (i == sequence_lengths) { // verify sequence_length input dimension which is 1D - NGRAPH_CHECK((input[i].rank().get_length() == 1), - "RNN Sequence sequence_lengths input tensor dimension is not correct."); + OPENVINO_ASSERT((input[i].rank().get_length() == 1), + "RNN Sequence sequence_lengths input tensor dimension is not correct."); } else { // Verify all other input dimensions which are 3D tensor types - NGRAPH_CHECK((input[i].rank().get_length() == 3), - "RNN Sequence input tensor dimension is not correct for ", - i, - " input parameter. Current input length: ", - input[i].rank().get_length()); + OPENVINO_ASSERT((input[i].rank().get_length() == 3), + "RNN Sequence input tensor dimension is not correct for ", + i, + " input parameter. Current input length: ", + input[i].rank().get_length()); } } @@ -36,5 +35,5 @@ void ngraph::op::util::validate_seq_input_rank_dimension(const std::vectorget_element_type().is_integral_number(), "axis element type is not integral data type"); - NGRAPH_CHECK(split_lengths_tensor->get_element_type().is_integral_number(), - "split_lengths element type is not integral data type"); + OPENVINO_ASSERT(axis_tensor->get_element_type().is_integral_number(), + "axis element type is not integral data type"); + OPENVINO_ASSERT(split_lengths_tensor->get_element_type().is_integral_number(), + "split_lengths element type is not integral data type"); OPENVINO_SUPPRESS_DEPRECATED_START int64_t axis = host_tensor_2_vector(axis_tensor)[0]; diff --git a/src/core/src/op/xor.cpp b/src/core/src/op/xor.cpp index a571e47618e9d1..eafe1fe465e315 100644 --- a/src/core/src/op/xor.cpp +++ b/src/core/src/op/xor.cpp @@ -6,6 +6,7 @@ #include "element_visitor.hpp" #include "itt.hpp" +#include "openvino/core/shape_util.hpp" #include "openvino/op/logical_xor.hpp" #include "openvino/reference/xor.hpp" #include "utils.hpp" diff --git a/src/core/src/runtime/itensor.cpp b/src/core/src/runtime/itensor.cpp index 9149e3cb5c9bf2..6d966566c65610 100644 --- a/src/core/src/runtime/itensor.cpp +++ b/src/core/src/runtime/itensor.cpp @@ -7,10 +7,10 @@ #include #include "openvino/core/except.hpp" +#include "openvino/core/shape_util.hpp" #include "openvino/runtime/allocator.hpp" #include "openvino/runtime/iremote_tensor.hpp" #include "openvino/runtime/properties.hpp" -#include "shape_util.hpp" namespace ov { diff --git a/src/core/src/runtime/ov_tensor.cpp b/src/core/src/runtime/ov_tensor.cpp index ab9c9eebe443f7..f9182f5ea6b770 100644 --- a/src/core/src/runtime/ov_tensor.cpp +++ b/src/core/src/runtime/ov_tensor.cpp @@ -7,12 +7,12 @@ #include "openvino/core/except.hpp" #include "openvino/core/node_output.hpp" #include "openvino/core/shape.hpp" +#include "openvino/core/shape_util.hpp" #include "openvino/core/strides.hpp" #include "openvino/runtime/itensor.hpp" #include "openvino/runtime/make_tensor.hpp" #include "openvino/runtime/remote_tensor.hpp" #include "openvino/runtime/tensor.hpp" -#include "shape_util.hpp" namespace ov { diff --git a/src/core/src/shape_util.cpp b/src/core/src/shape_util.cpp index 411adb7ba5b6a5..9ce8512d7a7797 100644 --- a/src/core/src/shape_util.cpp +++ b/src/core/src/shape_util.cpp @@ -7,7 +7,7 @@ #include #include "openvino/core/partial_shape.hpp" -#include "shape_util.hpp" +#include "openvino/core/shape_util.hpp" using namespace ngraph; diff --git a/src/core/src/tensor_conversion_util.cpp b/src/core/src/tensor_conversion_util.cpp index 4804ea96109a19..4e0c40d3f21aa2 100644 --- a/src/core/src/tensor_conversion_util.cpp +++ b/src/core/src/tensor_conversion_util.cpp @@ -4,7 +4,7 @@ #include "tensor_conversion_util.hpp" -#include "shape_util.hpp" +#include "openvino/core/shape_util.hpp" namespace ov { namespace util { diff --git a/src/plugins/intel_cpu/src/nodes/reference.cpp b/src/plugins/intel_cpu/src/nodes/reference.cpp index 1d830ff0728e82..b42dc99b390fb4 100644 --- a/src/plugins/intel_cpu/src/nodes/reference.cpp +++ b/src/plugins/intel_cpu/src/nodes/reference.cpp @@ -2,14 +2,16 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "reference.h" -#include -#include #include -#include "openvino/runtime/tensor.hpp" -#include "common/blocked_desc_creator.h" + +#include #include + +#include "common/blocked_desc_creator.h" #include "common/cpu_memcpy.h" +#include "openvino/core/shape_util.hpp" +#include "openvino/runtime/tensor.hpp" +#include "reference.h" using namespace dnnl; using namespace InferenceEngine; diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/custom_op_internal_dyn.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/custom_op_internal_dyn.cpp index eee10595ef4cd7..fb5ce680f5ff63 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/custom_op_internal_dyn.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/custom_op_internal_dyn.cpp @@ -2,11 +2,11 @@ // SPDX-License-Identifier: Apache-2.0 // +#include +#include #include -#include #include -#include -#include + #include "ngraph_functions/utils/ngraph_helpers.hpp" using namespace ov::test; diff --git a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/custom_op_insert_convert_i64.cpp b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/custom_op_insert_convert_i64.cpp index 0e75077664e49c..c04b079b19445b 100644 --- a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/custom_op_insert_convert_i64.cpp +++ b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/custom_op_insert_convert_i64.cpp @@ -2,11 +2,11 @@ // SPDX-License-Identifier: Apache-2.0 // +#include +#include #include -#include #include -#include -#include + #include "test_utils/cpu_test_utils.hpp" using namespace ov::test; diff --git a/src/plugins/template/backend/int_executable.cpp b/src/plugins/template/backend/int_executable.cpp index f866d77b6528b6..7bf130cad87b83 100644 --- a/src/plugins/template/backend/int_executable.cpp +++ b/src/plugins/template/backend/int_executable.cpp @@ -9,12 +9,12 @@ #include "evaluates_map.hpp" #include "openvino/core/except.hpp" +#include "openvino/core/shape_util.hpp" #include "openvino/op/parameter.hpp" #include "openvino/op/result.hpp" #include "openvino/op/util/op_types.hpp" #include "openvino/op/util/variable_context.hpp" #include "perf_counter.hpp" -#include "shape_util.hpp" class TemporaryOverrideOutputs { std::shared_ptr model; diff --git a/src/plugins/template/backend/ops/if.cpp b/src/plugins/template/backend/ops/if.cpp index 6a64bcd1da1eac..dc3a8074996f16 100644 --- a/src/plugins/template/backend/ops/if.cpp +++ b/src/plugins/template/backend/ops/if.cpp @@ -6,7 +6,7 @@ #include "evaluate_node.hpp" #include "evaluates_map.hpp" -#include "shape_util.hpp" +#include "openvino/core/shape_util.hpp" namespace if_op { bool call(ov::TensorVector& func_outputs,