From 14ee99ce1d8a035846170e9a56a8d766ec48dba1 Mon Sep 17 00:00:00 2001 From: Ilya Churaev Date: Mon, 6 Sep 2021 17:40:18 +0300 Subject: [PATCH] Moved operations M-P to ov namespace (#7354) * Moved operations M-P to ov namespace * Fixed code style * Fixed build * Fixed comments --- ngraph/core/include/ngraph/op/matmul.hpp | 40 +- ngraph/core/include/ngraph/op/matrix_nms.hpp | 83 +--- ngraph/core/include/ngraph/op/max.hpp | 19 +- ngraph/core/include/ngraph/op/max_pool.hpp | 117 +---- ngraph/core/include/ngraph/op/maximum.hpp | 24 +- ngraph/core/include/ngraph/op/min.hpp | 21 +- ngraph/core/include/ngraph/op/minimum.hpp | 24 +- ngraph/core/include/ngraph/op/mish.hpp | 22 +- ngraph/core/include/ngraph/op/mod.hpp | 20 +- .../core/include/ngraph/op/multiclass_nms.hpp | 57 +-- ngraph/core/include/ngraph/op/multiply.hpp | 24 +- ngraph/core/include/ngraph/op/mvn.hpp | 131 +----- ngraph/core/include/ngraph/op/negative.hpp | 18 +- .../include/ngraph/op/non_max_suppression.hpp | 400 +---------------- ngraph/core/include/ngraph/op/non_zero.hpp | 55 +-- .../core/include/ngraph/op/normalize_l2.hpp | 39 +- ngraph/core/include/ngraph/op/not.hpp | 20 +- ngraph/core/include/ngraph/op/not_equal.hpp | 23 +- ngraph/core/include/ngraph/op/one_hot.hpp | 42 +- ngraph/core/include/ngraph/op/or.hpp | 27 +- ngraph/core/include/ngraph/op/pad.hpp | 65 +-- ngraph/core/include/ngraph/op/power.hpp | 37 +- ngraph/core/include/ngraph/op/prelu.hpp | 25 +- ngraph/core/include/ngraph/op/prior_box.hpp | 57 +-- .../include/ngraph/op/prior_box_clustered.hpp | 49 +-- ngraph/core/include/ngraph/op/proposal.hpp | 82 +--- .../core/include/ngraph/op/psroi_pooling.hpp | 60 +-- .../core/include/openvino/op/logical_not.hpp | 32 ++ .../core/include/openvino/op/logical_or.hpp | 41 ++ .../include/openvino/op/lstm_sequence.hpp | 8 +- ngraph/core/include/openvino/op/matmul.hpp | 55 +++ .../core/include/openvino/op/matrix_nms.hpp | 91 ++++ ngraph/core/include/openvino/op/max.hpp | 31 ++ ngraph/core/include/openvino/op/max_pool.hpp | 133 ++++++ ngraph/core/include/openvino/op/maximum.hpp | 36 ++ ngraph/core/include/openvino/op/minimum.hpp | 36 ++ ngraph/core/include/openvino/op/mish.hpp | 34 ++ ngraph/core/include/openvino/op/mod.hpp | 32 ++ .../include/openvino/op/multiclass_nms.hpp | 69 +++ ngraph/core/include/openvino/op/multiply.hpp | 36 ++ ngraph/core/include/openvino/op/mvn.hpp | 144 +++++++ ngraph/core/include/openvino/op/negative.hpp | 30 ++ .../openvino/op/non_max_suppression.hpp | 408 ++++++++++++++++++ ngraph/core/include/openvino/op/non_zero.hpp | 67 +++ .../core/include/openvino/op/normalize_l2.hpp | 53 +++ ngraph/core/include/openvino/op/not_equal.hpp | 35 ++ ngraph/core/include/openvino/op/one_hot.hpp | 54 +++ ngraph/core/include/openvino/op/pad.hpp | 79 ++++ ngraph/core/include/openvino/op/power.hpp | 49 +++ ngraph/core/include/openvino/op/prelu.hpp | 37 ++ ngraph/core/include/openvino/op/prior_box.hpp | 67 +++ .../openvino/op/prior_box_clustered.hpp | 59 +++ ngraph/core/include/openvino/op/proposal.hpp | 95 ++++ .../include/openvino/op/psroi_pooling.hpp | 72 ++++ .../core/include/openvino/op/reduce_min.hpp | 33 ++ .../include/openvino/op/tensor_iterator.hpp | 4 +- .../core/src/op/{not.cpp => logical_not.cpp} | 5 +- ngraph/core/src/op/{or.cpp => logical_or.cpp} | 5 +- ngraph/core/src/op/matmul.cpp | 2 +- ngraph/core/src/op/matrix_nms.cpp | 4 +- ngraph/core/src/op/max.cpp | 2 +- ngraph/core/src/op/max_pool.cpp | 6 +- ngraph/core/src/op/maximum.cpp | 2 +- ngraph/core/src/op/minimum.cpp | 2 +- ngraph/core/src/op/mish.cpp | 2 +- ngraph/core/src/op/mod.cpp | 4 +- ngraph/core/src/op/multiclass_nms.cpp | 2 +- ngraph/core/src/op/multiply.cpp | 2 +- ngraph/core/src/op/mvn.cpp | 6 +- ngraph/core/src/op/negative.cpp | 2 +- ngraph/core/src/op/non_max_suppression.cpp | 69 +-- ngraph/core/src/op/non_zero.cpp | 2 +- ngraph/core/src/op/normalize_l2.cpp | 2 +- ngraph/core/src/op/not_equal.cpp | 2 +- ngraph/core/src/op/one_hot.cpp | 10 +- ngraph/core/src/op/pad.cpp | 4 +- ngraph/core/src/op/power.cpp | 2 +- ngraph/core/src/op/prelu.cpp | 35 +- ngraph/core/src/op/prior_box.cpp | 15 +- ngraph/core/src/op/prior_box_clustered.cpp | 18 +- ngraph/core/src/op/proposal.cpp | 8 +- ngraph/core/src/op/psroi_pooling.cpp | 22 +- .../core/src/op/{min.cpp => reduce_min.cpp} | 5 +- 83 files changed, 2102 insertions(+), 1639 deletions(-) create mode 100644 ngraph/core/include/openvino/op/logical_not.hpp create mode 100644 ngraph/core/include/openvino/op/logical_or.hpp create mode 100644 ngraph/core/include/openvino/op/matmul.hpp create mode 100644 ngraph/core/include/openvino/op/matrix_nms.hpp create mode 100644 ngraph/core/include/openvino/op/max.hpp create mode 100644 ngraph/core/include/openvino/op/max_pool.hpp create mode 100644 ngraph/core/include/openvino/op/maximum.hpp create mode 100644 ngraph/core/include/openvino/op/minimum.hpp create mode 100644 ngraph/core/include/openvino/op/mish.hpp create mode 100644 ngraph/core/include/openvino/op/mod.hpp create mode 100644 ngraph/core/include/openvino/op/multiclass_nms.hpp create mode 100644 ngraph/core/include/openvino/op/multiply.hpp create mode 100644 ngraph/core/include/openvino/op/mvn.hpp create mode 100644 ngraph/core/include/openvino/op/negative.hpp create mode 100644 ngraph/core/include/openvino/op/non_max_suppression.hpp create mode 100644 ngraph/core/include/openvino/op/non_zero.hpp create mode 100644 ngraph/core/include/openvino/op/normalize_l2.hpp create mode 100644 ngraph/core/include/openvino/op/not_equal.hpp create mode 100644 ngraph/core/include/openvino/op/one_hot.hpp create mode 100644 ngraph/core/include/openvino/op/pad.hpp create mode 100644 ngraph/core/include/openvino/op/power.hpp create mode 100644 ngraph/core/include/openvino/op/prelu.hpp create mode 100644 ngraph/core/include/openvino/op/prior_box.hpp create mode 100644 ngraph/core/include/openvino/op/prior_box_clustered.hpp create mode 100644 ngraph/core/include/openvino/op/proposal.hpp create mode 100644 ngraph/core/include/openvino/op/psroi_pooling.hpp create mode 100644 ngraph/core/include/openvino/op/reduce_min.hpp rename ngraph/core/src/op/{not.cpp => logical_not.cpp} (98%) rename ngraph/core/src/op/{or.cpp => logical_or.cpp} (96%) rename ngraph/core/src/op/{min.cpp => reduce_min.cpp} (97%) diff --git a/ngraph/core/include/ngraph/op/matmul.hpp b/ngraph/core/include/ngraph/op/matmul.hpp index 2ace4905c0a344..92a1701f12d31b 100644 --- a/ngraph/core/include/ngraph/op/matmul.hpp +++ b/ngraph/core/include/ngraph/op/matmul.hpp @@ -6,48 +6,12 @@ #include "ngraph/node.hpp" #include "ngraph/op/op.hpp" +#include "openvino/op/matmul.hpp" namespace ngraph { namespace op { namespace v0 { -/// \brief Operator performing Matrix Multiplication. -class NGRAPH_API MatMul : public Op { -public: - NGRAPH_RTTI_DECLARATION; - MatMul() = default; - /// \brief Constructs an Matrix Multiplication operation. - /// - /// \param A Matrix A - /// \param B Matrix B - /// \param transpose_a If matrix A should be transposed. - /// \param transpose_b If matrix B should be transposed. - MatMul(const Output& A, const Output& B, const bool& transpose_a = 0, const bool& transpose_b = 0); - - bool visit_attributes(AttributeVisitor& visitor) override; - void validate_and_infer_types() override; - - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; - - bool get_transpose_a() const { - return m_transpose_a; - } - bool get_transpose_b() const { - return m_transpose_b; - } - void set_transpose_a(bool transpose_a) { - m_transpose_a = transpose_a; - } - void set_transpose_b(bool transpose_b) { - m_transpose_b = transpose_b; - } - -private: - bool m_transpose_a; - bool m_transpose_b; -}; +using ov::op::v0::MatMul; } // namespace v0 using v0::MatMul; } // namespace op diff --git a/ngraph/core/include/ngraph/op/matrix_nms.hpp b/ngraph/core/include/ngraph/op/matrix_nms.hpp index e77b7b439c31aa..b7e2b3d730ad9f 100644 --- a/ngraph/core/include/ngraph/op/matrix_nms.hpp +++ b/ngraph/core/include/ngraph/op/matrix_nms.hpp @@ -5,90 +5,13 @@ #pragma once #include "ngraph/op/util/nms_base.hpp" +#include "openvino/op/matrix_nms.hpp" namespace ngraph { namespace op { namespace v8 { -/// \brief MatrixNms operation -/// -class NGRAPH_API MatrixNms : public util::NmsBase { -public: - NGRAPH_RTTI_DECLARATION; - - enum class DecayFunction { GAUSSIAN, LINEAR }; - - /// \brief Structure that specifies attributes of the operation - struct Attributes { - // specifies order of output elements - SortResultType sort_result_type = SortResultType::NONE; - // specifies whenever it is necessary to sort selected boxes across batches or - // not - bool sort_result_across_batch = false; - // specifies the output tensor type - ngraph::element::Type output_type = ngraph::element::i64; - // specifies minimum score to consider box for the processing - float score_threshold = 0.0f; - // specifies maximum number of boxes to be selected per class, -1 meaning to - // keep all boxes - int nms_top_k = -1; - // specifies maximum number of boxes to be selected per batch element, -1 - // meaning to keep all boxes - int keep_top_k = -1; - // specifies the background class id, -1 meaning to keep all classes - int background_class = -1; - // specifies decay function used to decay scores - DecayFunction decay_function = DecayFunction::LINEAR; - // specifies gaussian_sigma parameter for gaussian decay_function - float gaussian_sigma = 2.0f; - // specifies threshold to filter out boxes with low confidence score after - // decaying - float post_threshold = 0.0f; - // specifies whether boxes are normalized or not - bool normalized = true; - }; - - MatrixNms(); - - /// \brief Constructs a MatrixNms operation - /// - /// \param boxes Node producing the box coordinates - /// \param scores Node producing the box scores - /// \param attrs Attributes of the operation - MatrixNms(const Output& boxes, const Output& scores, const Attributes& attrs); - - bool visit_attributes(AttributeVisitor& visitor) override; - - std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - /// \brief Returns attributes of the operation MatrixNms - const Attributes& get_attrs() const { - return m_attrs; - } - -protected: - Attributes m_attrs; - - void validate() override; -}; +using ov::op::v8::MatrixNms; } // namespace v8 } // namespace op -NGRAPH_API -std::ostream& operator<<(std::ostream& s, const op::v8::MatrixNms::DecayFunction& type); +using ov::operator<<; } // namespace ngraph - -namespace ov { - -template <> -class NGRAPH_API AttributeAdapter - : public EnumAttributeAdapterBase { -public: - AttributeAdapter(ngraph::op::v8::MatrixNms::DecayFunction& value) - : EnumAttributeAdapterBase(value) {} - - static constexpr DiscreteTypeInfo type_info{"AttributeAdapter", 1}; - const DiscreteTypeInfo& get_type_info() const override { - return type_info; - } -}; - -} // namespace ov diff --git a/ngraph/core/include/ngraph/op/max.hpp b/ngraph/core/include/ngraph/op/max.hpp index 7d0a2e825b4f87..5d59bf8567d046 100644 --- a/ngraph/core/include/ngraph/op/max.hpp +++ b/ngraph/core/include/ngraph/op/max.hpp @@ -6,27 +6,12 @@ #include "ngraph/op/util/arithmetic_reduction.hpp" #include "ngraph/op/util/arithmetic_reductions_keep_dims.hpp" +#include "openvino/op/max.hpp" namespace ngraph { namespace op { namespace v1 { -class NGRAPH_API ReduceMax : public util::ArithmeticReductionKeepDims { -public: - NGRAPH_RTTI_DECLARATION; - /// \brief Constructs a summation operation. - ReduceMax() = default; - /// \brief Constructs a summation operation. - /// - /// \param arg The tensor to be summed. - /// \param reduction_axes The axis positions (0-based) to be eliminated. - /// \param keep_dims If set to 1 it holds axes that are used for reduction. - ReduceMax(const Output& arg, const Output& reduction_axes, bool keep_dims = false); - - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; -}; +using ov::op::v1::ReduceMax; } // namespace v1 } // namespace op } // namespace ngraph diff --git a/ngraph/core/include/ngraph/op/max_pool.hpp b/ngraph/core/include/ngraph/op/max_pool.hpp index e870a1e0303422..25c4cb50f404a6 100644 --- a/ngraph/core/include/ngraph/op/max_pool.hpp +++ b/ngraph/core/include/ngraph/op/max_pool.hpp @@ -7,127 +7,16 @@ #include #include "ngraph/op/util/max_pool_base.hpp" +#include "openvino/op/max_pool.hpp" namespace ngraph { namespace op { namespace v1 { -/// \brief Batched max pooling operation. -class NGRAPH_API MaxPool : public op::util::MaxPoolBase { -public: - NGRAPH_RTTI_DECLARATION; - - /// \brief Constructs a batched max pooling operation. - MaxPool() = default; - - /// \brief Constructs a batched max pooling operation. - /// - /// \param arg The node producing the input data batch tensor. - /// \param strides The strides. - /// \param pads_begin The beginning of padding shape. - /// \param pads_end The end of padding shape. - /// \param kernel The kernel shape. - /// \param rounding_type Whether to use ceiling or floor rounding type while - /// computing output shape. - /// \param auto_pad The pad type for automatically computing padding sizes. - MaxPool(const Output& arg, - const Strides& strides, - const Shape& pads_begin, - const Shape& pads_end, - const Shape& kernel, - const op::RoundingType rounding_type = op::RoundingType::FLOOR, - const PadType auto_pad = op::PadType::EXPLICIT); - - bool visit_attributes(AttributeVisitor& visitor) override; - void validate_and_infer_types() override; - - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - /// \return The default value for MaxPool. - NGRAPH_SUPPRESS_DEPRECATED_START - virtual std::shared_ptr get_default_value() const override; - NGRAPH_SUPPRESS_DEPRECATED_END - - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; - -private: - bool evaluate_maxpool(const HostTensorVector& outputs, const HostTensorVector& inputs) const; -}; +using ov::op::v1::MaxPool; } // namespace v1 namespace v8 { -/// \brief MaxPooling operation with values and indices calculated as individual outputs -class NGRAPH_API MaxPool : public op::util::MaxPoolBase { -public: - NGRAPH_RTTI_DECLARATION; - - /// \brief Constructs an empty MaxPool operation. - MaxPool() = default; - - /// \brief Constructs a parametrized MaxPool operation. - /// - /// \param arg Output of a node producing the feature tensor to be pooled. - /// \param strides The strides of the pooling filter. - /// \param dilations The dilations of the pooling filter. - /// \param pads_begin Paddings at the beginning of each spatial axis. - /// \param pads_end Paddings at the end of each spatial axis. - /// \param kernel The kernel shape. - /// \param rounding_type Whether to use ceiling or floor rounding type while - /// computing the output shape. - /// \param auto_pad The pad type for automatic calculation of the padding sizes. - /// \param index_element_type The data type used by the second output tensor - /// containing the selected indices. - /// \param axis Indicates a dimension in the input data shape which should be used - /// as a starting point for calculation of the upper bound of allowed - /// values of the indices output. - MaxPool(const Output& arg, - const Strides& strides, - const Strides& dilations, - const Shape& pads_begin, - const Shape& pads_end, - const Shape& kernel, - const op::RoundingType rounding_type = op::RoundingType::FLOOR, - const PadType auto_pad = op::PadType::EXPLICIT, - const element::Type index_element_type = element::i64, - const int64_t axis = 0); - - bool visit_attributes(AttributeVisitor& visitor) override; - void validate_and_infer_types() override; - - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - /// \return The pooling filter's dilations. - const Strides& get_dilations() const noexcept { - return m_dilations; - } - void set_dilations(const Strides& dilations) { - m_dilations = dilations; - } - - /// \return The data type of the second output tensor (indices). - element::Type get_index_element_type() const noexcept { - return m_index_element_type; - } - void set_index_element_type(const element::Type index_element_type) { - m_index_element_type = index_element_type; - } - - // \return The 'axis' attribute value. - int64_t get_axis() const { - return m_axis; - } - void set_axis(const int64_t axis) { - m_axis = axis; - } - - bool has_evaluate() const override; - bool evaluate(const HostTensorVector&, const HostTensorVector&) const override; - -private: - Strides m_dilations; - element::Type m_index_element_type{element::i64}; - int64_t m_axis{0}; -}; +using ov::op::v8::MaxPool; } // namespace v8 } // namespace op } // namespace ngraph diff --git a/ngraph/core/include/ngraph/op/maximum.hpp b/ngraph/core/include/ngraph/op/maximum.hpp index 02bfd1093791e9..11f63f2f532b16 100644 --- a/ngraph/core/include/ngraph/op/maximum.hpp +++ b/ngraph/core/include/ngraph/op/maximum.hpp @@ -5,32 +5,12 @@ #pragma once #include "ngraph/op/util/binary_elementwise_arithmetic.hpp" +#include "openvino/op/maximum.hpp" namespace ngraph { namespace op { namespace v1 { -/// \brief Elementwise maximum operation. -class NGRAPH_API Maximum : public util::BinaryElementwiseArithmetic { -public: - NGRAPH_RTTI_DECLARATION; - - /// \brief Constructs a maximum operation. - Maximum() : util::BinaryElementwiseArithmetic(AutoBroadcastSpec::NUMPY) {} - - /// \brief Constructs a maximum operation. - /// - /// \param arg0 Node that produces the first input tensor. - /// \param arg1 Node that produces the second input tensor. - /// \param auto_broadcast Auto broadcast specification - Maximum(const Output& arg0, - const Output& arg1, - const AutoBroadcastSpec& auto_broadcast = AutoBroadcastSpec(AutoBroadcastType::NUMPY)); - - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; -}; +using ov::op::v1::Maximum; } // namespace v1 } // namespace op } // namespace ngraph diff --git a/ngraph/core/include/ngraph/op/min.hpp b/ngraph/core/include/ngraph/op/min.hpp index 21c3b8c710c6ff..e3b6a610b6c9cf 100644 --- a/ngraph/core/include/ngraph/op/min.hpp +++ b/ngraph/core/include/ngraph/op/min.hpp @@ -6,29 +6,12 @@ #include "ngraph/op/util/arithmetic_reduction.hpp" #include "ngraph/op/util/arithmetic_reductions_keep_dims.hpp" +#include "openvino/op/reduce_min.hpp" namespace ngraph { namespace op { namespace v1 { -class NGRAPH_API ReduceMin : public util::ArithmeticReductionKeepDims { -public: - NGRAPH_RTTI_DECLARATION; - /// \brief Constructs a summation operation. - ReduceMin() = default; - /// \brief Constructs a summation operation. - /// - /// \param arg The tensor to be summed. - /// \param reduction_axes The axis positions (0-based) to be eliminated. - /// \param keep_dims If set to 1 it holds axes that are used for reduction. - ReduceMin(const Output& arg, const Output& reduction_axes, bool keep_dims = false); - - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; - bool evaluate_lower(const HostTensorVector& outputs) const override; - bool evaluate_upper(const HostTensorVector& outputs) const override; -}; +using ov::op::v1::ReduceMin; } // namespace v1 } // namespace op } // namespace ngraph diff --git a/ngraph/core/include/ngraph/op/minimum.hpp b/ngraph/core/include/ngraph/op/minimum.hpp index 38d10ea1e106b9..201fc8a3238231 100644 --- a/ngraph/core/include/ngraph/op/minimum.hpp +++ b/ngraph/core/include/ngraph/op/minimum.hpp @@ -5,32 +5,12 @@ #pragma once #include "ngraph/op/util/binary_elementwise_arithmetic.hpp" +#include "openvino/op/minimum.hpp" namespace ngraph { namespace op { namespace v1 { -/// \brief Elementwise minimum operation. -class NGRAPH_API Minimum : public util::BinaryElementwiseArithmetic { -public: - NGRAPH_RTTI_DECLARATION; - - /// \brief Constructs a minimum operation. - Minimum() : util::BinaryElementwiseArithmetic(AutoBroadcastSpec::NUMPY) {} - - /// \brief Constructs a minimum operation. - /// - /// \param arg0 Node that produces the first input tensor. - /// \param arg1 Node that produces the second input tensor. - /// \param auto_broadcast Auto broadcast specification - Minimum(const Output& arg0, - const Output& arg1, - const AutoBroadcastSpec& auto_broadcast = AutoBroadcastSpec(AutoBroadcastType::NUMPY)); - - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; -}; +using ov::op::v1::Minimum; } // namespace v1 } // namespace op } // namespace ngraph diff --git a/ngraph/core/include/ngraph/op/mish.hpp b/ngraph/core/include/ngraph/op/mish.hpp index 943ef5699d8c39..43884f6f318104 100644 --- a/ngraph/core/include/ngraph/op/mish.hpp +++ b/ngraph/core/include/ngraph/op/mish.hpp @@ -6,30 +6,12 @@ #include "ngraph/node.hpp" #include "ngraph/op/op.hpp" +#include "openvino/op/mish.hpp" namespace ngraph { namespace op { namespace v4 { -/// \brief A Self Regularized Non-Monotonic Neural Activation Function -/// f(x) = x * tanh(log(exp(x) + 1.)) -/// -class NGRAPH_API Mish : public ngraph::op::Op { -public: - NGRAPH_RTTI_DECLARATION; - - Mish() = default; - /// \brief Constructs an Mish operation. - /// - /// \param data Input tensor - Mish(const Output& arg); - bool visit_attributes(AttributeVisitor& visitor) override; - void validate_and_infer_types() override; - - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; -}; +using ov::op::v4::Mish; } // namespace v4 } // namespace op } // namespace ngraph diff --git a/ngraph/core/include/ngraph/op/mod.hpp b/ngraph/core/include/ngraph/op/mod.hpp index 4726c24945c94f..7a86e04230d496 100644 --- a/ngraph/core/include/ngraph/op/mod.hpp +++ b/ngraph/core/include/ngraph/op/mod.hpp @@ -5,28 +5,12 @@ #pragma once #include "ngraph/op/util/binary_elementwise_arithmetic.hpp" +#include "openvino/op/mod.hpp" namespace ngraph { namespace op { namespace v1 { -/// \brief Mod returns an element-wise division reminder with two given tensors applying -/// multi-directional broadcast rules. -class NGRAPH_API Mod : public util::BinaryElementwiseArithmetic { -public: - NGRAPH_RTTI_DECLARATION; - - /// \brief Constructs a Mod node. - Mod() : util::BinaryElementwiseArithmetic(AutoBroadcastSpec::NUMPY) {} - /// - /// \param A - Dividend tensor - /// \param B - Divisor tensor - /// \param auto_broadcast Auto broadcast specification - Mod(const Output& A, - const Output& B, - const AutoBroadcastSpec& auto_broadcast = AutoBroadcastSpec(AutoBroadcastType::NUMPY)); - - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; -}; +using ov::op::v1::Mod; } // namespace v1 } // namespace op } // namespace ngraph diff --git a/ngraph/core/include/ngraph/op/multiclass_nms.hpp b/ngraph/core/include/ngraph/op/multiclass_nms.hpp index 5ecde65cae405d..d88e4d4b6e9d0e 100644 --- a/ngraph/core/include/ngraph/op/multiclass_nms.hpp +++ b/ngraph/core/include/ngraph/op/multiclass_nms.hpp @@ -5,65 +5,12 @@ #pragma once #include "ngraph/op/util/nms_base.hpp" +#include "openvino/op/multiclass_nms.hpp" namespace ngraph { namespace op { namespace v8 { -/// \brief MulticlassNms operation -/// -class NGRAPH_API MulticlassNms : public util::NmsBase { -public: - NGRAPH_RTTI_DECLARATION; - - /// \brief Structure that specifies attributes of the operation - struct Attributes { - // specifies order of output elements - SortResultType sort_result_type = SortResultType::NONE; - // specifies whenever it is necessary to sort selected boxes across batches or - // not - bool sort_result_across_batch = false; - // specifies the output tensor type - ngraph::element::Type output_type = ngraph::element::i64; - // specifies intersection over union threshold - float iou_threshold = 0.0f; - // specifies minimum score to consider box for the processing - float score_threshold = 0.0f; - // specifies maximum number of boxes to be selected per class, -1 meaning to - // keep all boxes - int nms_top_k = -1; - // specifies maximum number of boxes to be selected per batch element, -1 - // meaning to keep all boxes - int keep_top_k = -1; - // specifies the background class id, -1 meaning to keep all classes - int background_class = -1; - // specifies eta parameter for adpative NMS, in close range [0, 1.0] - float nms_eta = 1.0f; - // specifies whether boxes are normalized or not - bool normalized = true; - }; - - MulticlassNms(); - - /// \brief Constructs a MulticlassNms operation - /// - /// \param boxes Node producing the box coordinates - /// \param scores Node producing the box scores - /// \param attrs Attributes of the operation - MulticlassNms(const Output& boxes, const Output& scores, const Attributes& attrs); - - bool visit_attributes(AttributeVisitor& visitor) override; - - std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - /// \brief Returns attributes of the operation MulticlassNms - const Attributes& get_attrs() const { - return m_attrs; - } - -protected: - Attributes m_attrs; - void validate() override; -}; +using ov::op::v8::MulticlassNms; } // namespace v8 } // namespace op } // namespace ngraph diff --git a/ngraph/core/include/ngraph/op/multiply.hpp b/ngraph/core/include/ngraph/op/multiply.hpp index 0826854ebf47f4..2af4196858df3b 100644 --- a/ngraph/core/include/ngraph/op/multiply.hpp +++ b/ngraph/core/include/ngraph/op/multiply.hpp @@ -5,32 +5,12 @@ #pragma once #include "ngraph/op/util/binary_elementwise_arithmetic.hpp" +#include "openvino/op/multiply.hpp" namespace ngraph { namespace op { namespace v1 { -/// \brief Elementwise multiplication operation. -class NGRAPH_API Multiply : public util::BinaryElementwiseArithmetic { -public: - NGRAPH_RTTI_DECLARATION; - - /// \brief Constructs a multiplication operation. - Multiply() : util::BinaryElementwiseArithmetic(AutoBroadcastSpec::NUMPY) {} - - /// \brief Constructs a multiplication operation. - /// - /// \param arg0 Node that produces the first input tensor. - /// \param arg1 Node that produces the second input tensor. - /// \param auto_broadcast Auto broadcast specification - Multiply(const Output& arg0, - const Output& arg1, - const AutoBroadcastSpec& auto_broadcast = AutoBroadcastSpec(AutoBroadcastType::NUMPY)); - - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; -}; +using ov::op::v1::Multiply; } // namespace v1 } // namespace op } // namespace ngraph diff --git a/ngraph/core/include/ngraph/op/mvn.hpp b/ngraph/core/include/ngraph/op/mvn.hpp index 8795a7cbb8f2db..e4d887ab07043b 100644 --- a/ngraph/core/include/ngraph/op/mvn.hpp +++ b/ngraph/core/include/ngraph/op/mvn.hpp @@ -6,142 +6,19 @@ #include "ngraph/node.hpp" #include "ngraph/op/op.hpp" +#include "openvino/op/mvn.hpp" namespace ngraph { namespace op { - namespace v0 { -/// \brief Operator performing Mean Variance Normalization -/// -class NGRAPH_API MVN : public Op { -public: - NGRAPH_RTTI_DECLARATION; - - MVN() = default; - /// \brief Constructs an MVN operation. - /// - /// \param data Input tensor with data - /// \param normalize_variance flag that denotes whether to perform variance - /// normalization. - /// \param across_channels flag that denotes if mean values are shared across - /// channels. - /// \param eps the number to be added to the variance to avoid division by zero when - /// normalizing the value - /// - MVN(const Output& data, bool across_channels = true, bool normalize_variance = true, double eps = 1e-9); - - /// \brief Constructs an MVN operation. - /// - /// \param data Input tensor with data - /// \param reduction_axes A list of axes, along which to reduce. - /// \param normalize_variance flag that denotes whether to perform variance - /// normalization. - /// \param eps the number to be added to the variance to avoid division by zero when - /// normalizing the value - /// - MVN(const Output& data, AxisSet reduction_axes, bool normalize_variance = true, double eps = 1e-9); - - void validate_and_infer_types() override; - - bool visit_attributes(AttributeVisitor& visitor) override; - - std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - double get_eps() const { - return m_eps; - } - bool get_across_channels() const { - return m_across_channels; - } - bool get_normalize_variance() const { - return m_normalize_variance; - } - AxisSet get_reduction_axes() const { - return m_reduction_axes; - } - void set_reduction_axes(AxisSet axes) { - m_reduction_axes = axes; - } - -private: - double m_eps; - bool m_across_channels; - bool m_normalize_variance; - AxisSet m_reduction_axes; -}; +using ov::op::v0::MVN; } // namespace v0 using v0::MVN; -/// \brief Specifies how eps is applied in MVN -enum class MVNEpsMode { - // Apply eps inside sqrt - INSIDE_SQRT, - // Apply eps outside sqrt - OUTSIDE_SQRT -}; - -NGRAPH_API -std::ostream& operator<<(std::ostream& s, const MVNEpsMode& type); +using ov::op::MVNEpsMode; namespace v6 { -/// \brief Operator performing Mean Variance Normalization -/// -class NGRAPH_API MVN : public ngraph::op::Op { -public: - NGRAPH_RTTI_DECLARATION; - - MVN() = default; - /// \brief Constructs an MVN operation. - /// - /// \param data Input tensor with data - /// \param reduction_axes A list of axes, along which to reduce. - /// \param normalize_variance flag that denotes whether to perform variance - /// normalization. - /// \param eps the number to be added to the variance to avoid division by zero when - /// normalizing the value - /// \param eps_mode the mode of applying epsilon - /// - MVN(const Output& data, - const Output& reduction_axes, - bool normalize_variance, - float eps, - MVNEpsMode eps_mode); - - bool visit_attributes(AttributeVisitor& visitor) override; - void validate_and_infer_types() override; - - std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - float get_eps() const { - return m_eps; - } - bool get_normalize_variance() const { - return m_normalize_variance; - } - MVNEpsMode get_eps_mode() const { - return m_eps_mode; - } - -private: - bool m_normalize_variance; - float m_eps; - MVNEpsMode m_eps_mode; -}; +using ov::op::v6::MVN; } // namespace v6 } // namespace op } // namespace ngraph - -namespace ov { - -template <> -class NGRAPH_API AttributeAdapter : public EnumAttributeAdapterBase { -public: - AttributeAdapter(ngraph::op::MVNEpsMode& value) : EnumAttributeAdapterBase(value) {} - - static constexpr DiscreteTypeInfo type_info{"AttributeAdapter", 0}; - const DiscreteTypeInfo& get_type_info() const override { - return type_info; - } -}; - -} // namespace ov diff --git a/ngraph/core/include/ngraph/op/negative.hpp b/ngraph/core/include/ngraph/op/negative.hpp index e3c385dd72c730..b7c078e01f485d 100644 --- a/ngraph/core/include/ngraph/op/negative.hpp +++ b/ngraph/core/include/ngraph/op/negative.hpp @@ -5,26 +5,12 @@ #pragma once #include "ngraph/op/util/unary_elementwise_arithmetic.hpp" +#include "openvino/op/negative.hpp" namespace ngraph { namespace op { namespace v0 { -/// \brief Elementwise negative operation. -class NGRAPH_API Negative : public util::UnaryElementwiseArithmetic { -public: - NGRAPH_RTTI_DECLARATION; - /// \brief Constructs a negative operation. - Negative() = default; - /// \brief Constructs a negative operation. - /// - /// \param arg Node that produces the input tensor. - Negative(const Output& arg); - - bool visit_attributes(AttributeVisitor& visitor) override; - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; -}; +using ov::op::v0::Negative; } // namespace v0 using v0::Negative; } // namespace op diff --git a/ngraph/core/include/ngraph/op/non_max_suppression.hpp b/ngraph/core/include/ngraph/op/non_max_suppression.hpp index ef6e6178139189..3b83d9fbc412ba 100644 --- a/ngraph/core/include/ngraph/op/non_max_suppression.hpp +++ b/ngraph/core/include/ngraph/op/non_max_suppression.hpp @@ -5,413 +5,25 @@ #pragma once #include "ngraph/op/op.hpp" +#include "openvino/op/non_max_suppression.hpp" namespace ngraph { namespace op { namespace v1 { -/// \brief Elementwise addition operation. -/// -class NGRAPH_API NonMaxSuppression : public Op { -public: - enum class BoxEncodingType { CORNER, CENTER }; - - NGRAPH_RTTI_DECLARATION; - - NonMaxSuppression() = default; - - /// \brief Constructs a NonMaxSuppression operation. - /// - /// \param boxes Node producing the box coordinates - /// \param scores Node producing the box scores - /// \param max_output_boxes_per_class Node producing maximum number of boxes to be - /// selected per class - /// \param iou_threshold Node producing intersection over union threshold - /// \param score_threshold Node producing minimum score threshold - /// \param box_encoding Specifies the format of boxes data encoding - NonMaxSuppression(const Output& boxes, - const Output& scores, - const Output& max_output_boxes_per_class, - const Output& iou_threshold, - const Output& score_threshold, - const BoxEncodingType box_encoding = BoxEncodingType::CORNER, - const bool sort_result_descending = true); - - /// \brief Constructs a NonMaxSuppression operation with default values for the last - /// 3 inputs - /// - /// \param boxes Node producing the box coordinates - /// \param scores Node producing the box coordinates - /// \param box_encoding Specifies the format of boxes data encoding - /// \param sort_result_descending Specifies whether it is necessary to sort selected - /// boxes across batches - /// \param output_type Specifies the output tensor type - NonMaxSuppression(const Output& boxes, - const Output& scores, - const BoxEncodingType box_encoding = BoxEncodingType::CORNER, - const bool sort_result_descending = true); - - bool visit_attributes(AttributeVisitor& visitor) override; - void validate_and_infer_types() override; - - std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - BoxEncodingType get_box_encoding() const { - return m_box_encoding; - } - void set_box_encoding(const BoxEncodingType box_encoding) { - m_box_encoding = box_encoding; - } - bool get_sort_result_descending() const { - return m_sort_result_descending; - } - void set_sort_result_descending(const bool sort_result_descending) { - m_sort_result_descending = sort_result_descending; - } - -protected: - BoxEncodingType m_box_encoding = BoxEncodingType::CORNER; - bool m_sort_result_descending = true; - -private: - int64_t max_boxes_output_from_input() const; -}; +using ov::op::v1::NonMaxSuppression; } // namespace v1 namespace v3 { -/// \brief NonMaxSuppression operation -/// -class NGRAPH_API NonMaxSuppression : public Op { -public: - enum class BoxEncodingType { CORNER, CENTER }; - - static constexpr NodeTypeInfo type_info{"NonMaxSuppression", 3}; - const NodeTypeInfo& get_type_info() const override { - return type_info; - } - NonMaxSuppression() = default; - - /// \brief Constructs a NonMaxSuppression operation. - /// - /// \param boxes Node producing the box coordinates - /// \param scores Node producing the box scores - /// \param max_output_boxes_per_class Node producing maximum number of boxes to be - /// selected per class - /// \param iou_threshold Node producing intersection over union threshold - /// \param score_threshold Node producing minimum score threshold - /// \param box_encoding Specifies the format of boxes data encoding - /// \param sort_result_descending Specifies whether it is necessary to sort selected - /// boxes across batches - /// \param output_type Specifies the output tensor type - NonMaxSuppression(const Output& boxes, - const Output& scores, - const Output& max_output_boxes_per_class, - const Output& iou_threshold, - const Output& score_threshold, - const BoxEncodingType box_encoding = BoxEncodingType::CORNER, - const bool sort_result_descending = true, - const ngraph::element::Type& output_type = ngraph::element::i64); - - /// \brief Constructs a NonMaxSuppression operation with default values for the last - /// 3 inputs - /// - /// \param boxes Node producing the box coordinates - /// \param scores Node producing the box coordinates - /// \param box_encoding Specifies the format of boxes data encoding - /// \param sort_result_descending Specifies whether it is necessary to sort selected - /// boxes across batches - /// \param output_type Specifies the output tensor type - NonMaxSuppression(const Output& boxes, - const Output& scores, - const BoxEncodingType box_encoding = BoxEncodingType::CORNER, - const bool sort_result_descending = true, - const ngraph::element::Type& output_type = ngraph::element::i64); - - bool visit_attributes(AttributeVisitor& visitor) override; - void validate_and_infer_types() override; - - std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - BoxEncodingType get_box_encoding() const { - return m_box_encoding; - } - void set_box_encoding(const BoxEncodingType box_encoding) { - m_box_encoding = box_encoding; - } - bool get_sort_result_descending() const { - return m_sort_result_descending; - } - void set_sort_result_descending(const bool sort_result_descending) { - m_sort_result_descending = sort_result_descending; - } - - element::Type get_output_type() const { - return m_output_type; - } - void set_output_type(const element::Type& output_type) { - m_output_type = output_type; - } - using Node::set_output_type; - -protected: - BoxEncodingType m_box_encoding = BoxEncodingType::CORNER; - bool m_sort_result_descending = true; - ngraph::element::Type m_output_type = ngraph::element::i64; - void validate(); - int64_t max_boxes_output_from_input() const; -}; +using ov::op::v3::NonMaxSuppression; } // namespace v3 namespace v4 { -/// \brief NonMaxSuppression operation -/// -class NGRAPH_API NonMaxSuppression : public op::v3::NonMaxSuppression { -public: - static constexpr NodeTypeInfo type_info{"NonMaxSuppression", 4}; - const NodeTypeInfo& get_type_info() const override { - return type_info; - } - NonMaxSuppression() = default; - - /// \brief Constructs a NonMaxSuppression operation. - /// - /// \param boxes Node producing the box coordinates - /// \param scores Node producing the box scores - /// \param max_output_boxes_per_class Node producing maximum number of boxes to be - /// selected per class - /// \param iou_threshold Node producing intersection over union threshold - /// \param score_threshold Node producing minimum score threshold - /// \param box_encoding Specifies the format of boxes data encoding - /// \param sort_result_descending Specifies whether it is necessary to sort selected - /// boxes across batches - /// \param output_type Specifies the output tensor type - NonMaxSuppression(const Output& boxes, - const Output& scores, - const Output& max_output_boxes_per_class, - const Output& iou_threshold, - const Output& score_threshold, - const BoxEncodingType box_encoding = BoxEncodingType::CORNER, - const bool sort_result_descending = true, - const ngraph::element::Type& output_type = ngraph::element::i64); - - /// \brief Constructs a NonMaxSuppression operation with default values for the last - /// 3 inputs - /// - /// \param boxes Node producing the box coordinates - /// \param scores Node producing the box coordinates - /// \param box_encoding Specifies the format of boxes data encoding - /// \param sort_result_descending Specifies whether it is necessary to sort selected - /// boxes across batches - /// \param output_type Specifies the output tensor type - NonMaxSuppression(const Output& boxes, - const Output& scores, - const BoxEncodingType box_encoding = BoxEncodingType::CORNER, - const bool sort_result_descending = true, - const ngraph::element::Type& output_type = ngraph::element::i64); - - void validate_and_infer_types() override; - - std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; -}; +using ov::op::v4::NonMaxSuppression; } // namespace v4 namespace v5 { -/// \brief NonMaxSuppression operation -/// -class NGRAPH_API NonMaxSuppression : public Op { -public: - NGRAPH_RTTI_DECLARATION; - enum class BoxEncodingType { CORNER, CENTER }; - - NonMaxSuppression() = default; - - /// \brief Constructs a NonMaxSuppression operation with default values in the last - /// 4 inputs. - /// - /// \param boxes Node producing the box coordinates - /// \param scores Node producing the box scores - /// \param box_encoding Specifies the format of boxes data encoding - /// \param sort_result_descending Specifies whether it is necessary to sort selected - /// boxes across batches - /// \param output_type Specifies the output tensor type - NonMaxSuppression(const Output& boxes, - const Output& scores, - const BoxEncodingType box_encoding = BoxEncodingType::CORNER, - const bool sort_result_descending = true, - const ngraph::element::Type& output_type = ngraph::element::i64); - - /// \brief Constructs a NonMaxSuppression operation with default values in the last. - /// 3 inputs. - /// - /// \param boxes Node producing the box coordinates - /// \param scores Node producing the box scores - /// \param max_output_boxes_per_class Node producing maximum number of boxes to be - /// selected per class - /// \param box_encoding Specifies the format of boxes data encoding - /// \param sort_result_descending Specifies whether it is necessary to sort selected - /// boxes across batches - /// \param output_type Specifies the output tensor type - NonMaxSuppression(const Output& boxes, - const Output& scores, - const Output& max_output_boxes_per_class, - const BoxEncodingType box_encoding = BoxEncodingType::CORNER, - const bool sort_result_descending = true, - const ngraph::element::Type& output_type = ngraph::element::i64); - - /// \brief Constructs a NonMaxSuppression operation with default values in the last. - /// 2 inputs. - /// - /// \param boxes Node producing the box coordinates - /// \param scores Node producing the box scores - /// \param max_output_boxes_per_class Node producing maximum number of boxes to be - /// selected per class - /// \param iou_threshold Node producing intersection over union threshold - /// \param box_encoding Specifies the format of boxes data encoding - /// \param sort_result_descending Specifies whether it is necessary to sort selected - /// boxes across batches - /// \param output_type Specifies the output tensor type - NonMaxSuppression(const Output& boxes, - const Output& scores, - const Output& max_output_boxes_per_class, - const Output& iou_threshold, - const BoxEncodingType box_encoding = BoxEncodingType::CORNER, - const bool sort_result_descending = true, - const ngraph::element::Type& output_type = ngraph::element::i64); - - /// \brief Constructs a NonMaxSuppression operation with default value in the last. - /// input. - /// - /// \param boxes Node producing the box coordinates - /// \param scores Node producing the box scores - /// \param max_output_boxes_per_class Node producing maximum number of boxes to be - /// selected per class - /// \param iou_threshold Node producing intersection over union threshold - /// \param score_threshold Node producing minimum score threshold - /// \param box_encoding Specifies the format of boxes data encoding - /// \param sort_result_descending Specifies whether it is necessary to sort selected - /// boxes across batches - /// \param output_type Specifies the output tensor type - NonMaxSuppression(const Output& boxes, - const Output& scores, - const Output& max_output_boxes_per_class, - const Output& iou_threshold, - const Output& score_threshold, - const BoxEncodingType box_encoding = BoxEncodingType::CORNER, - const bool sort_result_descending = true, - const ngraph::element::Type& output_type = ngraph::element::i64); - - /// \brief Constructs a NonMaxSuppression operation. - /// - /// \param boxes Node producing the box coordinates - /// \param scores Node producing the box scores - /// \param max_output_boxes_per_class Node producing maximum number of boxes to be - /// selected per class - /// \param iou_threshold Node producing intersection over union threshold - /// \param score_threshold Node producing minimum score threshold - /// \param soft_nms_sigma Node specifying the sigma parameter for Soft-NMS - /// \param box_encoding Specifies the format of boxes data encoding - /// \param sort_result_descending Specifies whether it is necessary to sort selected - /// boxes across batches - /// \param output_type Specifies the output tensor type - NonMaxSuppression(const Output& boxes, - const Output& scores, - const Output& max_output_boxes_per_class, - const Output& iou_threshold, - const Output& score_threshold, - const Output& soft_nms_sigma, - const BoxEncodingType box_encoding = BoxEncodingType::CORNER, - const bool sort_result_descending = true, - const ngraph::element::Type& output_type = ngraph::element::i64); - - bool visit_attributes(AttributeVisitor& visitor) override; - void validate_and_infer_types() override; - - std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - BoxEncodingType get_box_encoding() const { - return m_box_encoding; - } - void set_box_encoding(const BoxEncodingType box_encoding) { - m_box_encoding = box_encoding; - } - bool get_sort_result_descending() const { - return m_sort_result_descending; - } - void set_sort_result_descending(const bool sort_result_descending) { - m_sort_result_descending = sort_result_descending; - } - - element::Type get_output_type() const { - return m_output_type; - } - void set_output_type(const element::Type& output_type) { - m_output_type = output_type; - } - using Node::set_output_type; - - int64_t max_boxes_output_from_input() const; - float iou_threshold_from_input() const; - float score_threshold_from_input() const; - float soft_nms_sigma_from_input() const; - bool is_soft_nms_sigma_constant_and_default() const; - -protected: - BoxEncodingType m_box_encoding = BoxEncodingType::CORNER; - bool m_sort_result_descending = true; - ngraph::element::Type m_output_type = ngraph::element::i64; - void validate(); -}; +using ov::op::v5::NonMaxSuppression; } // namespace v5 } // namespace op - -NGRAPH_API -std::ostream& operator<<(std::ostream& s, const op::v1::NonMaxSuppression::BoxEncodingType& type); - -NGRAPH_API -std::ostream& operator<<(std::ostream& s, const op::v3::NonMaxSuppression::BoxEncodingType& type); - -NGRAPH_API -std::ostream& operator<<(std::ostream& s, const op::v5::NonMaxSuppression::BoxEncodingType& type); +using ov::operator<<; } // namespace ngraph - -namespace ov { - -template <> -class NGRAPH_API AttributeAdapter - : public EnumAttributeAdapterBase { -public: - AttributeAdapter(ngraph::op::v1::NonMaxSuppression::BoxEncodingType& value) - : EnumAttributeAdapterBase(value) {} - - static constexpr DiscreteTypeInfo type_info{"AttributeAdapter", 1}; - const DiscreteTypeInfo& get_type_info() const override { - return type_info; - } -}; - -template <> -class NGRAPH_API AttributeAdapter - : public EnumAttributeAdapterBase { -public: - AttributeAdapter(ngraph::op::v3::NonMaxSuppression::BoxEncodingType& value) - : EnumAttributeAdapterBase(value) {} - - static constexpr DiscreteTypeInfo type_info{"AttributeAdapter", 1}; - const DiscreteTypeInfo& get_type_info() const override { - return type_info; - } -}; - -template <> -class NGRAPH_API AttributeAdapter - : public EnumAttributeAdapterBase { -public: - AttributeAdapter(ngraph::op::v5::NonMaxSuppression::BoxEncodingType& value) - : EnumAttributeAdapterBase(value) {} - - static constexpr DiscreteTypeInfo type_info{"AttributeAdapter", 1}; - const DiscreteTypeInfo& get_type_info() const override { - return type_info; - } -}; - -} // namespace ov diff --git a/ngraph/core/include/ngraph/op/non_zero.hpp b/ngraph/core/include/ngraph/op/non_zero.hpp index 84259e2dd48ead..a7c5d71cacd11d 100644 --- a/ngraph/core/include/ngraph/op/non_zero.hpp +++ b/ngraph/core/include/ngraph/op/non_zero.hpp @@ -5,63 +5,12 @@ #pragma once #include "ngraph/op/op.hpp" +#include "openvino/op/non_zero.hpp" namespace ngraph { namespace op { namespace v3 { -/// \brief NonZero operation returning indices of non-zero elements in the input tensor. -/// -/// \note The indices are returned by-dimension in row-major order. For example -/// the following output contains 3 indices of a 3D input tensor elements: -/// [[0, 0, 2], -/// [0, 1, 1], -/// [0, 1, 2]] -/// The values point to input elements at [0,0,0], [0,1,1] and [2,1,2] -class NGRAPH_API NonZero : public Op { -public: - NGRAPH_RTTI_DECLARATION; - /// \brief Constructs a NonZero operation. - NonZero() = default; - /// \brief Constructs a NonZero operation. - /// - /// \note The output type is int64. - /// - /// \param arg Node that produces the input tensor. - NonZero(const Output& arg); - /// \brief Constructs a NonZero operation. - /// - /// \param arg Node that produces the input tensor. - /// \param output_type produce indices. Currently, only 'int64' or 'int32' - /// are - /// supported - NonZero(const Output& arg, const std::string& output_type); - /// \brief Constructs a NonZero operation. - /// - /// \param arg Node that produces the input tensor. - /// \param output_type produce indices. Currently, only int64 or int32 are - /// supported - NonZero(const Output& arg, const element::Type& output_type); - - bool visit_attributes(AttributeVisitor& visitor) override; - void validate_and_infer_types() override; - - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - element::Type get_output_type() const { - return m_output_type; - } - void set_output_type(element::Type output_type) { - m_output_type = output_type; - } - // Overload collision with method on Node - using Node::set_output_type; - - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; - -protected: - element::Type m_output_type = element::i64; -}; +using ov::op::v3::NonZero; } // namespace v3 using v3::NonZero; } // namespace op diff --git a/ngraph/core/include/ngraph/op/normalize_l2.hpp b/ngraph/core/include/ngraph/op/normalize_l2.hpp index 3979e953ea1536..cad21c0f75a08c 100644 --- a/ngraph/core/include/ngraph/op/normalize_l2.hpp +++ b/ngraph/core/include/ngraph/op/normalize_l2.hpp @@ -9,47 +9,12 @@ #include "ngraph/node.hpp" #include "ngraph/op/op.hpp" #include "ngraph/op/util/attr_types.hpp" +#include "openvino/op/normalize_l2.hpp" namespace ngraph { namespace op { namespace v0 { -/// \brief Normalization with L2 norm. -/// -class NGRAPH_API NormalizeL2 : public Op { -public: - NGRAPH_RTTI_DECLARATION; - - NormalizeL2() = default; - /// - /// \brief Constructs a NormalizeL2 operation. - /// - /// \param data - Node producing the input tensor - /// \param axes - Node indicating axes along which reduction is - /// calculated - /// \param eps - The epsilon added to L2 norm. - /// \param eps_mode - Specifies how eps is combined with L2 value - /// calculated before division - /// - NormalizeL2(const Output& data, const Output& axes, float eps, EpsMode eps_mode); - - void validate_and_infer_types() override; - bool visit_attributes(AttributeVisitor& visitor) override; - - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - float get_eps() const { - return m_eps; - } - EpsMode get_eps_mode() const { - return m_eps_mode; - } - AxisSet get_reduction_axes() const; - -protected: - float m_eps; - EpsMode m_eps_mode; -}; +using ov::op::v0::NormalizeL2; } // namespace v0 -using v0::NormalizeL2; } // namespace op } // namespace ngraph diff --git a/ngraph/core/include/ngraph/op/not.hpp b/ngraph/core/include/ngraph/op/not.hpp index e441e39f903681..8973a13c42c123 100644 --- a/ngraph/core/include/ngraph/op/not.hpp +++ b/ngraph/core/include/ngraph/op/not.hpp @@ -5,28 +5,12 @@ #pragma once #include "ngraph/op/op.hpp" +#include "openvino/op/logical_not.hpp" namespace ngraph { namespace op { namespace v1 { -/// \brief Elementwise logical negation operation. -class NGRAPH_API LogicalNot : public Op { -public: - NGRAPH_RTTI_DECLARATION; - /// \brief Constructs a logical negation operation. - LogicalNot() = default; - /// \brief Constructs a logical negation operation. - /// - /// \param arg Node that produces the input tensor. - LogicalNot(const Output& arg); - - bool visit_attributes(AttributeVisitor& visitor) override; - void validate_and_infer_types() override; - - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; -}; +using ov::op::v1::LogicalNot; } // namespace v1 } // namespace op } // namespace ngraph diff --git a/ngraph/core/include/ngraph/op/not_equal.hpp b/ngraph/core/include/ngraph/op/not_equal.hpp index a2d37109abcd91..4c0aa702b7c4b3 100644 --- a/ngraph/core/include/ngraph/op/not_equal.hpp +++ b/ngraph/core/include/ngraph/op/not_equal.hpp @@ -5,31 +5,12 @@ #pragma once #include "ngraph/op/util/binary_elementwise_comparison.hpp" +#include "openvino/op/not_equal.hpp" namespace ngraph { namespace op { namespace v1 { -/// \brief Elementwise not-equal operation. -class NGRAPH_API NotEqual : public util::BinaryElementwiseComparison { -public: - NGRAPH_RTTI_DECLARATION; - /// \brief Constructs a not-equal operation. - NotEqual() : util::BinaryElementwiseComparison(AutoBroadcastSpec::NUMPY) {} - /// \brief Constructs a not-equal operation. - /// - /// \param arg0 Node that produces the first input tensor. - /// \param arg1 Node that produces the second input tensor. - /// \param auto_broadcast Auto broadcast specification - NotEqual(const Output& arg0, - const Output& arg1, - const AutoBroadcastSpec& auto_broadcast = AutoBroadcastSpec(AutoBroadcastType::NUMPY)); - - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; - bool visit_attributes(AttributeVisitor& visitor) override; -}; +using ov::op::v1::NotEqual; } // namespace v1 } // namespace op } // namespace ngraph diff --git a/ngraph/core/include/ngraph/op/one_hot.hpp b/ngraph/core/include/ngraph/op/one_hot.hpp index 5f203cfd057379..ef96a185eb35e8 100644 --- a/ngraph/core/include/ngraph/op/one_hot.hpp +++ b/ngraph/core/include/ngraph/op/one_hot.hpp @@ -5,50 +5,12 @@ #pragma once #include "ngraph/op/op.hpp" +#include "openvino/op/one_hot.hpp" namespace ngraph { namespace op { namespace v1 { -class NGRAPH_API OneHot : public Op { -public: - NGRAPH_RTTI_DECLARATION; - - /// \brief Constructs a one-hot operation. - OneHot() = default; - /// \brief Constructs a one-hot operation. - /// - /// \param indices Input tensor containing indices. - /// \param depth Specifies number of classes and the size of one-hot dimension. - /// \param on_value Specifies value that the locations in output tensor represented - /// by indices in input take. - /// \param off_value Specifies value that the locations in output tensor not - /// represented - /// by indices in input take. - /// \param axis Axis along which one-hot representation in added. - OneHot(const Output& indices, - const Output& depth, - const Output& on_value, - const Output& off_value, - int64_t axis); - - bool visit_attributes(AttributeVisitor& visitor) override; - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - void validate_and_infer_types() override; - - virtual bool evaluate(const HostTensorVector& output_values, const HostTensorVector& input_values) const override; - bool has_evaluate() const override; - - /// \return The index of the one-hot axis. - int64_t get_axis() const { - return m_axis; - } - void set_axis(int64_t axis) { - m_axis = axis; - } - -protected: - int64_t m_axis; -}; +using ov::op::v1::OneHot; } // namespace v1 } // namespace op } // namespace ngraph diff --git a/ngraph/core/include/ngraph/op/or.hpp b/ngraph/core/include/ngraph/op/or.hpp index 7f3d539122625b..9eccc75caa2953 100644 --- a/ngraph/core/include/ngraph/op/or.hpp +++ b/ngraph/core/include/ngraph/op/or.hpp @@ -7,35 +7,12 @@ #include #include "ngraph/op/util/binary_elementwise_logical.hpp" +#include "openvino/op/logical_or.hpp" namespace ngraph { namespace op { namespace v1 { -/// \brief Elementwise logical-or operation. -/// -class NGRAPH_API LogicalOr : public util::BinaryElementwiseLogical { -public: - NGRAPH_RTTI_DECLARATION; - LogicalOr() = default; - /// \brief Constructs a logical-or operation. - /// - /// \param arg0 Node that produces the first input tensor.
- /// `[d0, ...]` - /// \param arg1 Node that produces the second input tensor.
- /// `[d0, ...]` - /// \param auto_broadcast Auto broadcast specification - /// - /// Output `[d0, ...]` - /// - LogicalOr(const Output& arg0, - const Output& arg1, - const AutoBroadcastSpec& auto_broadcast = AutoBroadcastSpec(AutoBroadcastType::NUMPY)); - - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; -}; +using ov::op::v1::LogicalOr; } // namespace v1 } // namespace op } // namespace ngraph diff --git a/ngraph/core/include/ngraph/op/pad.hpp b/ngraph/core/include/ngraph/op/pad.hpp index 4570b0d65f28e5..c1cbf03c29f39c 100644 --- a/ngraph/core/include/ngraph/op/pad.hpp +++ b/ngraph/core/include/ngraph/op/pad.hpp @@ -7,73 +7,12 @@ #include "ngraph/coordinate_diff.hpp" #include "ngraph/op/op.hpp" #include "ngraph/op/util/attr_types.hpp" +#include "openvino/op/pad.hpp" namespace ngraph { namespace op { namespace v1 { -/// \brief Generic padding operation. -class NGRAPH_API Pad : public Op { -public: - NGRAPH_RTTI_DECLARATION; - - /// \brief Constructs a generic padding operation. - /// - /// \param arg The output producing input tensor to be padded. - /// \param pads_begin The output which specifies the number of padding elements - /// added - /// before position 0 on each axis of arg. - /// \param pads_end The output which specifies the number of padding elements - /// after the last element on each axis. - /// \param arg_pad_value The scalar output with the value used for padding - /// if pad_mode is CONSTANT - /// \param pad_mode The padding mode: CONSTANT, EDGE, REFLECT or SYMMETRIC. - /// CONSTANT initializes new elements with arg_pad_value, EDGE uses the nearest - /// value from arg. REFLECT and SYMMETRIC tile the background by flipping arg - /// at the edge (SYMMETRIC) or on the last row/column/etc. (REFLECT). - Pad(const Output& arg, - const Output& pads_begin, - const Output& pads_end, - const Output& arg_pad_value, - PadMode pad_mode); - - /// \brief Constructs a generic padding operation. - /// - /// \param arg The output producing input tensor to be padded. - /// \param pads_begin The output which specifies the number of padding elements - /// added - /// \param pads_end The output which specifies the number of padding elements - /// after the last element on each axis. - /// \param pad_mode The padding mode: CONSTANT, EDGE, REFLECT or SYMMETRIC. - Pad(const Output& arg, const Output& pads_begin, const Output& pads_end, PadMode pad_mode); - - /// \brief Constructs a generic padding operation. - Pad() = default; - - bool visit_attributes(AttributeVisitor& visitor) override; - void validate_and_infer_types() override; - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - /// return The node which specifies the number of padding elements - /// added at the beginning of each axis - CoordinateDiff get_pads_begin() const; - /// return The node which specifies the number of padding elements - /// added at the end of each axis - CoordinateDiff get_pads_end() const; - - /// \return The padding mode. - PadMode get_pad_mode() const { - return m_pad_mode; - } - void set_pad_mode(PadMode pad_mode) { - m_pad_mode = pad_mode; - } - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; - -private: - PadMode m_pad_mode; - bool evaluate_pad(const HostTensorVector& outputs, const HostTensorVector& inputs) const; -}; +using ov::op::v1::Pad; } // namespace v1 } // namespace op } // namespace ngraph diff --git a/ngraph/core/include/ngraph/op/power.hpp b/ngraph/core/include/ngraph/op/power.hpp index b56f3b12371e8b..2ce0962868d6e5 100644 --- a/ngraph/core/include/ngraph/op/power.hpp +++ b/ngraph/core/include/ngraph/op/power.hpp @@ -5,45 +5,12 @@ #pragma once #include "ngraph/op/util/binary_elementwise_arithmetic.hpp" +#include "openvino/op/power.hpp" namespace ngraph { namespace op { namespace v1 { -// clang-format off - /// \brief Elementwise exponentiation operation. - /// - /// ## Inputs - /// - /// | | Type | Description | - /// | ------ | --------------------------------- | ------------------------------------------------------ | - /// | `arg0` | \f$N[d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of any shape and numeric element type. | - /// | `arg1` | \f$N[d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of the same shape and element type as `arg0`. | - /// - /// ## Output - /// - /// | Type | Description | - /// | ---------------------- | -------------------------------------------------------------------------------------------------------------- | - /// | \f$N[d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_n] = \texttt{arg0}[i_1,\dots,i_n]^{\texttt{arg1}[i_1,\dots,i_n]}\f$ | -// clang-format on -class NGRAPH_API Power : public util::BinaryElementwiseArithmetic { -public: - NGRAPH_RTTI_DECLARATION; - - Power() : util::BinaryElementwiseArithmetic(AutoBroadcastSpec::NUMPY) {} - - /// \brief Constructs an exponentiation operation. - /// - /// \param arg0 Node that produces the first input tensor. - /// \param arg1 Node that produces the second input tensor. - /// \param auto_broadcast Auto broadcast specification - Power(const Output& arg0, - const Output& arg1, - const AutoBroadcastSpec& auto_broadcast = AutoBroadcastSpec(AutoBroadcastType::NUMPY)); - - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; -}; +using ov::op::v1::Power; } // namespace v1 } // namespace op } // namespace ngraph diff --git a/ngraph/core/include/ngraph/op/prelu.hpp b/ngraph/core/include/ngraph/op/prelu.hpp index cc615c958bfbf1..bf69e1c8438b08 100644 --- a/ngraph/core/include/ngraph/op/prelu.hpp +++ b/ngraph/core/include/ngraph/op/prelu.hpp @@ -6,33 +6,12 @@ #include "ngraph/node.hpp" #include "ngraph/op/op.hpp" +#include "openvino/op/prelu.hpp" namespace ngraph { namespace op { namespace v0 { -/// \brief Parametrized Relu -/// x < 0 => f(x) = x * slope -/// x >= 0 => f(x) = x -/// -class NGRAPH_API PRelu : public ngraph::op::Op { -public: - NGRAPH_RTTI_DECLARATION; - PRelu(); - /// \brief Constructs a PRelu operation. - /// - /// \param data Input tensor - /// \param slope Multipliers for negative values - PRelu(const Output& data, const Output& slope); - - bool visit_attributes(AttributeVisitor& visitor) override; - - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - void validate_and_infer_types() override; - - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; -}; +using ov::op::v0::PRelu; } // namespace v0 using v0::PRelu; } // namespace op diff --git a/ngraph/core/include/ngraph/op/prior_box.hpp b/ngraph/core/include/ngraph/op/prior_box.hpp index 12d5b814ac472a..0f0b760ef3ce9b 100644 --- a/ngraph/core/include/ngraph/op/prior_box.hpp +++ b/ngraph/core/include/ngraph/op/prior_box.hpp @@ -5,64 +5,13 @@ #pragma once #include "ngraph/op/op.hpp" +#include "openvino/op/prior_box.hpp" namespace ngraph { namespace op { -struct PriorBoxAttrs { - // min_size Desired min_size of prior boxes - // max_size Desired max_size of prior boxes - // aspect_ratio Aspect ratios of prior boxes - // clip Clip output to [0,1] - // flip Flip aspect ratios - // step Distance between prior box centers - // offset Box offset relative to top center of image - // variance Values to adjust prior boxes with - // scale_all_sizes Scale all sizes - std::vector min_size; - std::vector max_size; - std::vector aspect_ratio; - std::vector density; - std::vector fixed_ratio; - std::vector fixed_size; - bool clip = false; - bool flip = false; - float step = 0.0f; - float offset = 0.0f; - std::vector variance; - bool scale_all_sizes = true; -}; - +using PriorBoxAttrs = ov::op::v0::PriorBox::Attributes; namespace v0 { -/// \brief Layer which generates prior boxes of specified sizes -/// normalized to input image size -class NGRAPH_API PriorBox : public Op { -public: - NGRAPH_RTTI_DECLARATION; - - PriorBox() = default; - /// \brief Constructs a PriorBox operation - /// - /// \param layer_shape Shape of layer for which prior boxes are computed - /// \param image_shape Shape of image to which prior boxes are scaled - /// \param attrs PriorBox attributes - PriorBox(const Output& layer_shape, const Output& image_shape, const PriorBoxAttrs& attrs); - - void validate_and_infer_types() override; - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - static int64_t number_of_priors(const PriorBoxAttrs& attrs); - - static std::vector normalized_aspect_ratio(const std::vector& aspect_ratio, bool flip); - const PriorBoxAttrs& get_attrs() const { - return m_attrs; - } - bool visit_attributes(AttributeVisitor& visitor) override; - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; - -private: - PriorBoxAttrs m_attrs; -}; +using ov::op::v0::PriorBox; } // namespace v0 using v0::PriorBox; } // namespace op diff --git a/ngraph/core/include/ngraph/op/prior_box_clustered.hpp b/ngraph/core/include/ngraph/op/prior_box_clustered.hpp index 77a1d5fca420cf..b5646c37427208 100644 --- a/ngraph/core/include/ngraph/op/prior_box_clustered.hpp +++ b/ngraph/core/include/ngraph/op/prior_box_clustered.hpp @@ -5,57 +5,14 @@ #pragma once #include "ngraph/op/op.hpp" +#include "openvino/op/prior_box_clustered.hpp" namespace ngraph { namespace op { -struct NGRAPH_API PriorBoxClusteredAttrs { - // widths Desired widths of prior boxes - // heights Desired heights of prior boxes - // clip Clip output to [0,1] - // step_widths Distance between prior box centers - // step_heights Distance between prior box centers - // step Distance between prior box centers (when step_w = step_h) - // offset Box offset relative to top center of image - // variances Values to adjust prior boxes with - std::vector widths; - std::vector heights; - bool clip = true; - float step_widths = 0.0f; - float step_heights = 0.0f; - float step = 0.0f; - float offset = 0.0f; - std::vector variances; -}; +using PriorBoxClusteredAttrs = ov::op::v0::PriorBoxClustered::Attributes; namespace v0 { -/// \brief Layer which generates prior boxes of specified sizes -/// normalized to input image size -class NGRAPH_API PriorBoxClustered : public Op { -public: - NGRAPH_RTTI_DECLARATION; - - PriorBoxClustered() = default; - /// \brief Constructs a PriorBoxClustered operation - /// - /// \param layer_shape Shape of layer for which prior boxes are computed - /// \param image_shape Shape of image to which prior boxes are scaled - /// \param attrs PriorBoxClustered attributes - PriorBoxClustered(const Output& layer_shape, - const Output& image_shape, - const PriorBoxClusteredAttrs& attrs); - - void validate_and_infer_types() override; - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - const PriorBoxClusteredAttrs& get_attrs() const { - return m_attrs; - } - bool visit_attributes(AttributeVisitor& visitor) override; - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; - -private: - PriorBoxClusteredAttrs m_attrs; -}; +using ov::op::v0::PriorBoxClustered; } // namespace v0 using v0::PriorBoxClustered; } // namespace op diff --git a/ngraph/core/include/ngraph/op/proposal.hpp b/ngraph/core/include/ngraph/op/proposal.hpp index 0c4e7b023d1638..8d83da40671a31 100644 --- a/ngraph/core/include/ngraph/op/proposal.hpp +++ b/ngraph/core/include/ngraph/op/proposal.hpp @@ -5,93 +5,19 @@ #pragma once #include "ngraph/op/op.hpp" +#include "openvino/op/proposal.hpp" namespace ngraph { namespace op { -// base_size Anchor sizes -// pre_nms_topn Number of boxes before nms -// post_nms_topn Number of boxes after nms -// nms_thresh Threshold for nms -// feat_stride Feature stride -// min_size Minimum box size -// ratio Ratios for anchor generation -// scale Scales for anchor generation -// clip_before_nms Clip before NMs -// clip_after_nms Clip after NMs -// normalize Normalize boxes to [0,1] -// box_size_scale Scale factor for scaling box size -// box_coordinate_scale Scale factor for scaling box coordiate -// framework Calculation frameworkrithm to use -struct ProposalAttrs { - size_t base_size; - size_t pre_nms_topn; - size_t post_nms_topn; - float nms_thresh = 0.0f; - size_t feat_stride = 1; - size_t min_size = 1; - std::vector ratio; - std::vector scale; - bool clip_before_nms = true; - bool clip_after_nms = false; - bool normalize = false; - float box_size_scale = 1.0f; - float box_coordinate_scale = 1.0f; - std::string framework; - bool infer_probs = false; -}; +using ProposalAttrs = ov::op::v0::Proposal::Attributes; namespace v0 { -class NGRAPH_API Proposal : public Op { -public: - NGRAPH_RTTI_DECLARATION; - Proposal() = default; - /// \brief Constructs a Proposal operation - /// - /// \param class_probs Class probability scores - /// \param bbox_deltas Prediction of bounding box deltas - /// \param image_shape Shape of image - /// \param attrs Proposal op attributes - Proposal(const Output& class_probs, - const Output& bbox_deltas, - const Output& image_shape, - const ProposalAttrs& attrs); - - void validate_and_infer_types() override; - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - const ProposalAttrs& get_attrs() const { - return m_attrs; - } - bool visit_attributes(AttributeVisitor& visitor) override; - -protected: - ProposalAttrs m_attrs; -}; +using ov::op::v0::Proposal; } // namespace v0 namespace v4 { -class NGRAPH_API Proposal : public op::v0::Proposal { -public: - NGRAPH_RTTI_DECLARATION; - Proposal() = default; - /// \brief Constructs a Proposal operation - /// - /// \param class_probs Class probability scores - /// \param bbox_deltas Prediction of bounding box deltas - /// \param image_shape Shape of image - /// \param attrs Proposal op attributes - Proposal(const Output& class_probs, - const Output& bbox_deltas, - const Output& image_shape, - const ProposalAttrs& attrs); - - void validate_and_infer_types() override; - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - const ProposalAttrs& get_attrs() const { - return m_attrs; - } -}; +using ov::op::v4::Proposal; } // namespace v4 - using v0::Proposal; } // namespace op } // namespace ngraph diff --git a/ngraph/core/include/ngraph/op/psroi_pooling.hpp b/ngraph/core/include/ngraph/op/psroi_pooling.hpp index 02d7815df99952..2f4ca9c5681e17 100644 --- a/ngraph/core/include/ngraph/op/psroi_pooling.hpp +++ b/ngraph/core/include/ngraph/op/psroi_pooling.hpp @@ -5,68 +5,12 @@ #pragma once #include "ngraph/op/op.hpp" +#include "openvino/op/psroi_pooling.hpp" namespace ngraph { namespace op { namespace v0 { -class NGRAPH_API PSROIPooling : public Op { -public: - NGRAPH_RTTI_DECLARATION; - - PSROIPooling() = default; - /// \brief Constructs a PSROIPooling operation - /// - /// \param input Input feature map {N, C, ...} - /// \param coords Coordinates of bounding boxes - /// \param output_dim Output channel number - /// \param group_size Number of groups to encode position-sensitive scores - /// \param spatial_scale Ratio of input feature map over input image size - /// \param spatial_bins_x Numbers of bins to divide the input feature maps over - /// width - /// \param spatial_bins_y Numbers of bins to divide the input feature maps over - /// height - /// \param mode Mode of pooling - Avg or Bilinear - PSROIPooling(const Output& input, - const Output& coords, - const size_t output_dim, - const size_t group_size, - const float spatial_scale, - int spatial_bins_x, - int spatial_bins_y, - const std::string& mode); - - bool visit_attributes(AttributeVisitor& visitor) override; - void validate_and_infer_types() override; - - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - size_t get_output_dim() const { - return m_output_dim; - } - size_t get_group_size() const { - return m_group_size; - } - float get_spatial_scale() const { - return m_spatial_scale; - } - int get_spatial_bins_x() const { - return m_spatial_bins_x; - } - int get_spatial_bins_y() const { - return m_spatial_bins_y; - } - const std::string& get_mode() const { - return m_mode; - } - -private: - size_t m_output_dim; - size_t m_group_size; - float m_spatial_scale; - int m_spatial_bins_x; - int m_spatial_bins_y; - std::string m_mode; -}; +using ov::op::v0::PSROIPooling; } // namespace v0 using v0::PSROIPooling; } // namespace op diff --git a/ngraph/core/include/openvino/op/logical_not.hpp b/ngraph/core/include/openvino/op/logical_not.hpp new file mode 100644 index 00000000000000..2156e70166a163 --- /dev/null +++ b/ngraph/core/include/openvino/op/logical_not.hpp @@ -0,0 +1,32 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/op.hpp" + +namespace ov { +namespace op { +namespace v1 { +/// \brief Elementwise logical negation operation. +class OPENVINO_API LogicalNot : public Op { +public: + OPENVINO_RTTI_DECLARATION; + /// \brief Constructs a logical negation operation. + LogicalNot() = default; + /// \brief Constructs a logical negation operation. + /// + /// \param arg Node that produces the input tensor. + LogicalNot(const Output& arg); + + bool visit_attributes(AttributeVisitor& visitor) override; + void validate_and_infer_types() override; + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; +}; +} // namespace v1 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/logical_or.hpp b/ngraph/core/include/openvino/op/logical_or.hpp new file mode 100644 index 00000000000000..a95eadd5ef39f6 --- /dev/null +++ b/ngraph/core/include/openvino/op/logical_or.hpp @@ -0,0 +1,41 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include + +#include "openvino/op/util/binary_elementwise_logical.hpp" + +namespace ov { +namespace op { +namespace v1 { +/// \brief Elementwise logical-or operation. +/// +class OPENVINO_API LogicalOr : public util::BinaryElementwiseLogical { +public: + OPENVINO_RTTI_DECLARATION; + LogicalOr() = default; + /// \brief Constructs a logical-or operation. + /// + /// \param arg0 Node that produces the first input tensor.
+ /// `[d0, ...]` + /// \param arg1 Node that produces the second input tensor.
+ /// `[d0, ...]` + /// \param auto_broadcast Auto broadcast specification + /// + /// Output `[d0, ...]` + /// + LogicalOr(const Output& arg0, + const Output& arg1, + const AutoBroadcastSpec& auto_broadcast = AutoBroadcastSpec(AutoBroadcastType::NUMPY)); + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; +}; +} // namespace v1 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/lstm_sequence.hpp b/ngraph/core/include/openvino/op/lstm_sequence.hpp index a89f425a052a15..fdb733c5815f01 100644 --- a/ngraph/core/include/openvino/op/lstm_sequence.hpp +++ b/ngraph/core/include/openvino/op/lstm_sequence.hpp @@ -28,9 +28,9 @@ namespace v0 { /// \sa LSTMCell, RNNCell, GRUCell /// /// -class NGRAPH_API LSTMSequence : public Op { +class OPENVINO_API LSTMSequence : public Op { public: - NGRAPH_RTTI_DECLARATION; + OPENVINO_RTTI_DECLARATION; LSTMSequence(); using direction = RecurrentSequenceDirection; @@ -146,9 +146,9 @@ namespace v5 { /// \sa LSTMCell, RNNCell, GRUCell /// /// -class NGRAPH_API LSTMSequence : public util::RNNCellBase { +class OPENVINO_API LSTMSequence : public util::RNNCellBase { public: - NGRAPH_RTTI_DECLARATION; + OPENVINO_RTTI_DECLARATION; LSTMSequence() = default; using direction = RecurrentSequenceDirection; diff --git a/ngraph/core/include/openvino/op/matmul.hpp b/ngraph/core/include/openvino/op/matmul.hpp new file mode 100644 index 00000000000000..c511300ada2bab --- /dev/null +++ b/ngraph/core/include/openvino/op/matmul.hpp @@ -0,0 +1,55 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/op.hpp" + +namespace ov { +namespace op { +namespace v0 { +/// \brief Operator performing Matrix Multiplication. +class OPENVINO_API MatMul : public Op { +public: + OPENVINO_RTTI_DECLARATION; + MatMul() = default; + /// \brief Constructs an Matrix Multiplication operation. + /// + /// \param A Matrix A + /// \param B Matrix B + /// \param transpose_a If matrix A should be transposed. + /// \param transpose_b If matrix B should be transposed. + MatMul(const Output& A, + const Output& B, + const bool& transpose_a = false, + const bool& transpose_b = false); + + bool visit_attributes(AttributeVisitor& visitor) override; + void validate_and_infer_types() override; + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; + + bool get_transpose_a() const { + return m_transpose_a; + } + bool get_transpose_b() const { + return m_transpose_b; + } + void set_transpose_a(bool transpose_a) { + m_transpose_a = transpose_a; + } + void set_transpose_b(bool transpose_b) { + m_transpose_b = transpose_b; + } + +private: + bool m_transpose_a; + bool m_transpose_b; +}; +} // namespace v0 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/matrix_nms.hpp b/ngraph/core/include/openvino/op/matrix_nms.hpp new file mode 100644 index 00000000000000..e16b290da188cc --- /dev/null +++ b/ngraph/core/include/openvino/op/matrix_nms.hpp @@ -0,0 +1,91 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/util/nms_base.hpp" + +namespace ov { +namespace op { +namespace v8 { +/// \brief MatrixNms operation +/// +class OPENVINO_API MatrixNms : public util::NmsBase { +public: + OPENVINO_RTTI_DECLARATION; + + enum class DecayFunction { GAUSSIAN, LINEAR }; + + /// \brief Structure that specifies attributes of the operation + struct Attributes { + // specifies order of output elements + SortResultType sort_result_type = SortResultType::NONE; + // specifies whenever it is necessary to sort selected boxes across batches or + // not + bool sort_result_across_batch = false; + // specifies the output tensor type + ngraph::element::Type output_type = ngraph::element::i64; + // specifies minimum score to consider box for the processing + float score_threshold = 0.0f; + // specifies maximum number of boxes to be selected per class, -1 meaning to + // keep all boxes + int nms_top_k = -1; + // specifies maximum number of boxes to be selected per batch element, -1 + // meaning to keep all boxes + int keep_top_k = -1; + // specifies the background class id, -1 meaning to keep all classes + int background_class = -1; + // specifies decay function used to decay scores + DecayFunction decay_function = DecayFunction::LINEAR; + // specifies gaussian_sigma parameter for gaussian decay_function + float gaussian_sigma = 2.0f; + // specifies threshold to filter out boxes with low confidence score after + // decaying + float post_threshold = 0.0f; + // specifies whether boxes are normalized or not + bool normalized = true; + }; + + MatrixNms(); + + /// \brief Constructs a MatrixNms operation + /// + /// \param boxes Node producing the box coordinates + /// \param scores Node producing the box scores + /// \param attrs Attributes of the operation + MatrixNms(const Output& boxes, const Output& scores, const Attributes& attrs); + + bool visit_attributes(AttributeVisitor& visitor) override; + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + /// \brief Returns attributes of the operation MatrixNms + const Attributes& get_attrs() const { + return m_attrs; + } + +protected: + Attributes m_attrs; + + void validate() override; +}; +} // namespace v8 +} // namespace op +OPENVINO_API +std::ostream& operator<<(std::ostream& s, const op::v8::MatrixNms::DecayFunction& type); + +template <> +class OPENVINO_API AttributeAdapter + : public EnumAttributeAdapterBase { +public: + AttributeAdapter(op::v8::MatrixNms::DecayFunction& value) + : EnumAttributeAdapterBase(value) {} + + static constexpr DiscreteTypeInfo type_info{"AttributeAdapter", 1}; + const DiscreteTypeInfo& get_type_info() const override { + return type_info; + } +}; + +} // namespace ov diff --git a/ngraph/core/include/openvino/op/max.hpp b/ngraph/core/include/openvino/op/max.hpp new file mode 100644 index 00000000000000..527a3af37803a3 --- /dev/null +++ b/ngraph/core/include/openvino/op/max.hpp @@ -0,0 +1,31 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/util/arithmetic_reductions_keep_dims.hpp" + +namespace ov { +namespace op { +namespace v1 { +class OPENVINO_API ReduceMax : public util::ArithmeticReductionKeepDims { +public: + OPENVINO_RTTI_DECLARATION; + /// \brief Constructs a summation operation. + ReduceMax() = default; + /// \brief Constructs a summation operation. + /// + /// \param arg The tensor to be summed. + /// \param reduction_axes The axis positions (0-based) to be eliminated. + /// \param keep_dims If set to 1 it holds axes that are used for reduction. + ReduceMax(const Output& arg, const Output& reduction_axes, bool keep_dims = false); + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; +}; +} // namespace v1 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/max_pool.hpp b/ngraph/core/include/openvino/op/max_pool.hpp new file mode 100644 index 00000000000000..d64660961d2f8a --- /dev/null +++ b/ngraph/core/include/openvino/op/max_pool.hpp @@ -0,0 +1,133 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include + +#include "openvino/op/util/max_pool_base.hpp" + +namespace ov { +namespace op { +namespace v1 { +/// \brief Batched max pooling operation. +class OPENVINO_API MaxPool : public op::util::MaxPoolBase { +public: + OPENVINO_RTTI_DECLARATION; + + /// \brief Constructs a batched max pooling operation. + MaxPool() = default; + + /// \brief Constructs a batched max pooling operation. + /// + /// \param arg The node producing the input data batch tensor. + /// \param strides The strides. + /// \param pads_begin The beginning of padding shape. + /// \param pads_end The end of padding shape. + /// \param kernel The kernel shape. + /// \param rounding_type Whether to use ceiling or floor rounding type while + /// computing output shape. + /// \param auto_pad The pad type for automatically computing padding sizes. + MaxPool(const Output& arg, + const Strides& strides, + const ngraph::Shape& pads_begin, + const ngraph::Shape& pads_end, + const ngraph::Shape& kernel, + const op::RoundingType rounding_type = op::RoundingType::FLOOR, + const PadType auto_pad = op::PadType::EXPLICIT); + + bool visit_attributes(AttributeVisitor& visitor) override; + void validate_and_infer_types() override; + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + /// \return The default value for MaxPool. + OPENVINO_SUPPRESS_DEPRECATED_START + std::shared_ptr get_default_value() const override; + OPENVINO_SUPPRESS_DEPRECATED_END + + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; + +private: + bool evaluate_maxpool(const HostTensorVector& outputs, const HostTensorVector& inputs) const; +}; +} // namespace v1 + +namespace v8 { +/// \brief MaxPooling operation with values and indices calculated as individual outputs +class OPENVINO_API MaxPool : public op::util::MaxPoolBase { +public: + OPENVINO_RTTI_DECLARATION; + + /// \brief Constructs an empty MaxPool operation. + MaxPool() = default; + + /// \brief Constructs a parametrized MaxPool operation. + /// + /// \param arg Output of a node producing the feature tensor to be pooled. + /// \param strides The strides of the pooling filter. + /// \param dilations The dilations of the pooling filter. + /// \param pads_begin Paddings at the beginning of each spatial axis. + /// \param pads_end Paddings at the end of each spatial axis. + /// \param kernel The kernel shape. + /// \param rounding_type Whether to use ceiling or floor rounding type while + /// computing the output shape. + /// \param auto_pad The pad type for automatic calculation of the padding sizes. + /// \param index_element_type The data type used by the second output tensor + /// containing the selected indices. + /// \param axis Indicates a dimension in the input data shape which should be used + /// as a starting point for calculation of the upper bound of allowed + /// values of the indices output. + MaxPool(const Output& arg, + const Strides& strides, + const Strides& dilations, + const ngraph::Shape& pads_begin, + const ngraph::Shape& pads_end, + const ngraph::Shape& kernel, + const op::RoundingType rounding_type = op::RoundingType::FLOOR, + const PadType auto_pad = op::PadType::EXPLICIT, + const element::Type index_element_type = element::i64, + const int64_t axis = 0); + + bool visit_attributes(AttributeVisitor& visitor) override; + void validate_and_infer_types() override; + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + /// \return The pooling filter's dilations. + const Strides& get_dilations() const noexcept { + return m_dilations; + } + void set_dilations(const Strides& dilations) { + m_dilations = dilations; + } + + /// \return The data type of the second output tensor (indices). + element::Type get_index_element_type() const noexcept { + return m_index_element_type; + } + void set_index_element_type(const element::Type index_element_type) { + m_index_element_type = index_element_type; + } + + // \return The 'axis' attribute value. + int64_t get_axis() const { + return m_axis; + } + void set_axis(const int64_t axis) { + m_axis = axis; + } + + bool has_evaluate() const override; + bool evaluate(const HostTensorVector&, const HostTensorVector&) const override; + +private: + Strides m_dilations; + element::Type m_index_element_type{element::i64}; + int64_t m_axis{0}; +}; +} // namespace v8 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/maximum.hpp b/ngraph/core/include/openvino/op/maximum.hpp new file mode 100644 index 00000000000000..5218eacdce477a --- /dev/null +++ b/ngraph/core/include/openvino/op/maximum.hpp @@ -0,0 +1,36 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/util/binary_elementwise_arithmetic.hpp" + +namespace ov { +namespace op { +namespace v1 { +/// \brief Elementwise maximum operation. +class OPENVINO_API Maximum : public util::BinaryElementwiseArithmetic { +public: + OPENVINO_RTTI_DECLARATION; + + /// \brief Constructs a maximum operation. + Maximum() : util::BinaryElementwiseArithmetic(AutoBroadcastSpec::NUMPY) {} + + /// \brief Constructs a maximum operation. + /// + /// \param arg0 Node that produces the first input tensor. + /// \param arg1 Node that produces the second input tensor. + /// \param auto_broadcast Auto broadcast specification + Maximum(const Output& arg0, + const Output& arg1, + const AutoBroadcastSpec& auto_broadcast = AutoBroadcastSpec(AutoBroadcastType::NUMPY)); + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; +}; +} // namespace v1 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/minimum.hpp b/ngraph/core/include/openvino/op/minimum.hpp new file mode 100644 index 00000000000000..7219af61379966 --- /dev/null +++ b/ngraph/core/include/openvino/op/minimum.hpp @@ -0,0 +1,36 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/util/binary_elementwise_arithmetic.hpp" + +namespace ov { +namespace op { +namespace v1 { +/// \brief Elementwise minimum operation. +class OPENVINO_API Minimum : public util::BinaryElementwiseArithmetic { +public: + OPENVINO_RTTI_DECLARATION; + + /// \brief Constructs a minimum operation. + Minimum() : util::BinaryElementwiseArithmetic(AutoBroadcastSpec::NUMPY) {} + + /// \brief Constructs a minimum operation. + /// + /// \param arg0 Node that produces the first input tensor. + /// \param arg1 Node that produces the second input tensor. + /// \param auto_broadcast Auto broadcast specification + Minimum(const Output& arg0, + const Output& arg1, + const AutoBroadcastSpec& auto_broadcast = AutoBroadcastSpec(AutoBroadcastType::NUMPY)); + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; +}; +} // namespace v1 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/mish.hpp b/ngraph/core/include/openvino/op/mish.hpp new file mode 100644 index 00000000000000..37e1bcab11260c --- /dev/null +++ b/ngraph/core/include/openvino/op/mish.hpp @@ -0,0 +1,34 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/op.hpp" + +namespace ov { +namespace op { +namespace v4 { +/// \brief A Self Regularized Non-Monotonic Neural Activation Function +/// f(x) = x * tanh(log(exp(x) + 1.)) +/// +class OPENVINO_API Mish : public Op { +public: + OPENVINO_RTTI_DECLARATION; + + Mish() = default; + /// \brief Constructs an Mish operation. + /// + /// \param data Input tensor + Mish(const Output& arg); + bool visit_attributes(AttributeVisitor& visitor) override; + void validate_and_infer_types() override; + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; +}; +} // namespace v4 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/mod.hpp b/ngraph/core/include/openvino/op/mod.hpp new file mode 100644 index 00000000000000..679a8d591bafca --- /dev/null +++ b/ngraph/core/include/openvino/op/mod.hpp @@ -0,0 +1,32 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/util/binary_elementwise_arithmetic.hpp" + +namespace ov { +namespace op { +namespace v1 { +/// \brief Mod returns an element-wise division reminder with two given tensors applying +/// multi-directional broadcast rules. +class OPENVINO_API Mod : public util::BinaryElementwiseArithmetic { +public: + OPENVINO_RTTI_DECLARATION; + + /// \brief Constructs a Mod node. + Mod() : util::BinaryElementwiseArithmetic(AutoBroadcastSpec::NUMPY) {} + /// + /// \param A - Dividend tensor + /// \param B - Divisor tensor + /// \param auto_broadcast Auto broadcast specification + Mod(const Output& A, + const Output& B, + const AutoBroadcastSpec& auto_broadcast = AutoBroadcastSpec(AutoBroadcastType::NUMPY)); + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; +}; +} // namespace v1 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/multiclass_nms.hpp b/ngraph/core/include/openvino/op/multiclass_nms.hpp new file mode 100644 index 00000000000000..55d4a10e6610d8 --- /dev/null +++ b/ngraph/core/include/openvino/op/multiclass_nms.hpp @@ -0,0 +1,69 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/util/nms_base.hpp" + +namespace ov { +namespace op { +namespace v8 { +/// \brief MulticlassNms operation +/// +class OPENVINO_API MulticlassNms : public util::NmsBase { +public: + OPENVINO_RTTI_DECLARATION; + + /// \brief Structure that specifies attributes of the operation + struct Attributes { + // specifies order of output elements + SortResultType sort_result_type = SortResultType::NONE; + // specifies whenever it is necessary to sort selected boxes across batches or + // not + bool sort_result_across_batch = false; + // specifies the output tensor type + ngraph::element::Type output_type = ngraph::element::i64; + // specifies intersection over union threshold + float iou_threshold = 0.0f; + // specifies minimum score to consider box for the processing + float score_threshold = 0.0f; + // specifies maximum number of boxes to be selected per class, -1 meaning to + // keep all boxes + int nms_top_k = -1; + // specifies maximum number of boxes to be selected per batch element, -1 + // meaning to keep all boxes + int keep_top_k = -1; + // specifies the background class id, -1 meaning to keep all classes + int background_class = -1; + // specifies eta parameter for adpative NMS, in close range [0, 1.0] + float nms_eta = 1.0f; + // specifies whether boxes are normalized or not + bool normalized = true; + }; + + MulticlassNms(); + + /// \brief Constructs a MulticlassNms operation + /// + /// \param boxes Node producing the box coordinates + /// \param scores Node producing the box scores + /// \param attrs Attributes of the operation + MulticlassNms(const Output& boxes, const Output& scores, const Attributes& attrs); + + bool visit_attributes(AttributeVisitor& visitor) override; + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + /// \brief Returns attributes of the operation MulticlassNms + const Attributes& get_attrs() const { + return m_attrs; + } + +protected: + Attributes m_attrs; + void validate() override; +}; +} // namespace v8 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/multiply.hpp b/ngraph/core/include/openvino/op/multiply.hpp new file mode 100644 index 00000000000000..57fb9cdfceb3ce --- /dev/null +++ b/ngraph/core/include/openvino/op/multiply.hpp @@ -0,0 +1,36 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/util/binary_elementwise_arithmetic.hpp" + +namespace ov { +namespace op { +namespace v1 { +/// \brief Elementwise multiplication operation. +class OPENVINO_API Multiply : public util::BinaryElementwiseArithmetic { +public: + OPENVINO_RTTI_DECLARATION; + + /// \brief Constructs a multiplication operation. + Multiply() : util::BinaryElementwiseArithmetic(AutoBroadcastSpec::NUMPY) {} + + /// \brief Constructs a multiplication operation. + /// + /// \param arg0 Node that produces the first input tensor. + /// \param arg1 Node that produces the second input tensor. + /// \param auto_broadcast Auto broadcast specification + Multiply(const Output& arg0, + const Output& arg1, + const AutoBroadcastSpec& auto_broadcast = AutoBroadcastSpec(AutoBroadcastType::NUMPY)); + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; +}; +} // namespace v1 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/mvn.hpp b/ngraph/core/include/openvino/op/mvn.hpp new file mode 100644 index 00000000000000..8e315f947a92ed --- /dev/null +++ b/ngraph/core/include/openvino/op/mvn.hpp @@ -0,0 +1,144 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include + +#include "openvino/op/op.hpp" + +namespace ov { +namespace op { + +namespace v0 { +/// \brief Operator performing Mean Variance Normalization +/// +class OPENVINO_API MVN : public Op { +public: + OPENVINO_RTTI_DECLARATION; + + MVN() = default; + /// \brief Constructs an MVN operation. + /// + /// \param data Input tensor with data + /// \param normalize_variance flag that denotes whether to perform variance + /// normalization. + /// \param across_channels flag that denotes if mean values are shared across + /// channels. + /// \param eps the number to be added to the variance to avoid division by zero when + /// normalizing the value + /// + MVN(const Output& data, bool across_channels = true, bool normalize_variance = true, double eps = 1e-9); + + /// \brief Constructs an MVN operation. + /// + /// \param data Input tensor with data + /// \param reduction_axes A list of axes, along which to reduce. + /// \param normalize_variance flag that denotes whether to perform variance + /// normalization. + /// \param eps the number to be added to the variance to avoid division by zero when + /// normalizing the value + /// + MVN(const Output& data, AxisSet reduction_axes, bool normalize_variance = true, double eps = 1e-9); + + void validate_and_infer_types() override; + + bool visit_attributes(AttributeVisitor& visitor) override; + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + double get_eps() const { + return m_eps; + } + bool get_across_channels() const { + return m_across_channels; + } + bool get_normalize_variance() const { + return m_normalize_variance; + } + AxisSet get_reduction_axes() const { + return m_reduction_axes; + } + void set_reduction_axes(AxisSet axes) { + m_reduction_axes = std::move(axes); + } + +private: + double m_eps; + bool m_across_channels; + bool m_normalize_variance; + AxisSet m_reduction_axes; +}; +} // namespace v0 + +/// \brief Specifies how eps is applied in MVN +enum class MVNEpsMode { + // Apply eps inside sqrt + INSIDE_SQRT, + // Apply eps outside sqrt + OUTSIDE_SQRT +}; + +OPENVINO_API +std::ostream& operator<<(std::ostream& s, const MVNEpsMode& type); + +namespace v6 { +/// \brief Operator performing Mean Variance Normalization +/// +class OPENVINO_API MVN : public Op { +public: + OPENVINO_RTTI_DECLARATION; + + MVN() = default; + /// \brief Constructs an MVN operation. + /// + /// \param data Input tensor with data + /// \param reduction_axes A list of axes, along which to reduce. + /// \param normalize_variance flag that denotes whether to perform variance + /// normalization. + /// \param eps the number to be added to the variance to avoid division by zero when + /// normalizing the value + /// \param eps_mode the mode of applying epsilon + /// + MVN(const Output& data, + const Output& reduction_axes, + bool normalize_variance, + float eps, + MVNEpsMode eps_mode); + + bool visit_attributes(AttributeVisitor& visitor) override; + void validate_and_infer_types() override; + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + float get_eps() const { + return m_eps; + } + bool get_normalize_variance() const { + return m_normalize_variance; + } + MVNEpsMode get_eps_mode() const { + return m_eps_mode; + } + +private: + bool m_normalize_variance; + float m_eps; + MVNEpsMode m_eps_mode; +}; +} // namespace v6 +} // namespace op + +template <> +class OPENVINO_API AttributeAdapter : public EnumAttributeAdapterBase { +public: + AttributeAdapter(op::MVNEpsMode& value) : EnumAttributeAdapterBase(value) {} + + static constexpr DiscreteTypeInfo type_info{"AttributeAdapter", 0}; + const DiscreteTypeInfo& get_type_info() const override { + return type_info; + } +}; + +} // namespace ov diff --git a/ngraph/core/include/openvino/op/negative.hpp b/ngraph/core/include/openvino/op/negative.hpp new file mode 100644 index 00000000000000..2aa2448df5bf28 --- /dev/null +++ b/ngraph/core/include/openvino/op/negative.hpp @@ -0,0 +1,30 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/util/unary_elementwise_arithmetic.hpp" + +namespace ov { +namespace op { +namespace v0 { +/// \brief Elementwise negative operation. +class OPENVINO_API Negative : public util::UnaryElementwiseArithmetic { +public: + OPENVINO_RTTI_DECLARATION; + /// \brief Constructs a negative operation. + Negative() = default; + /// \brief Constructs a negative operation. + /// + /// \param arg Node that produces the input tensor. + Negative(const Output& arg); + + bool visit_attributes(AttributeVisitor& visitor) override; + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; +}; +} // namespace v0 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/non_max_suppression.hpp b/ngraph/core/include/openvino/op/non_max_suppression.hpp new file mode 100644 index 00000000000000..b51119090054fc --- /dev/null +++ b/ngraph/core/include/openvino/op/non_max_suppression.hpp @@ -0,0 +1,408 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/op.hpp" + +namespace ov { +namespace op { +namespace v1 { +/// \brief Elementwise addition operation. +/// +class OPENVINO_API NonMaxSuppression : public Op { +public: + enum class BoxEncodingType { CORNER, CENTER }; + + OPENVINO_RTTI_DECLARATION; + + NonMaxSuppression() = default; + + /// \brief Constructs a NonMaxSuppression operation. + /// + /// \param boxes Node producing the box coordinates + /// \param scores Node producing the box scores + /// \param max_output_boxes_per_class Node producing maximum number of boxes to be + /// selected per class + /// \param iou_threshold Node producing intersection over union threshold + /// \param score_threshold Node producing minimum score threshold + /// \param box_encoding Specifies the format of boxes data encoding + NonMaxSuppression(const Output& boxes, + const Output& scores, + const Output& max_output_boxes_per_class, + const Output& iou_threshold, + const Output& score_threshold, + const BoxEncodingType box_encoding = BoxEncodingType::CORNER, + const bool sort_result_descending = true); + + /// \brief Constructs a NonMaxSuppression operation with default values for the last + /// 3 inputs + /// + /// \param boxes Node producing the box coordinates + /// \param scores Node producing the box coordinates + /// \param box_encoding Specifies the format of boxes data encoding + /// \param sort_result_descending Specifies whether it is necessary to sort selected + /// boxes across batches + /// \param output_type Specifies the output tensor type + NonMaxSuppression(const Output& boxes, + const Output& scores, + const BoxEncodingType box_encoding = BoxEncodingType::CORNER, + const bool sort_result_descending = true); + + bool visit_attributes(AttributeVisitor& visitor) override; + void validate_and_infer_types() override; + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + BoxEncodingType get_box_encoding() const { + return m_box_encoding; + } + void set_box_encoding(const BoxEncodingType box_encoding) { + m_box_encoding = box_encoding; + } + bool get_sort_result_descending() const { + return m_sort_result_descending; + } + void set_sort_result_descending(const bool sort_result_descending) { + m_sort_result_descending = sort_result_descending; + } + +protected: + BoxEncodingType m_box_encoding = BoxEncodingType::CORNER; + bool m_sort_result_descending = true; + +private: + int64_t max_boxes_output_from_input() const; +}; +} // namespace v1 + +namespace v3 { +/// \brief NonMaxSuppression operation +/// +class OPENVINO_API NonMaxSuppression : public Op { +public: + enum class BoxEncodingType { CORNER, CENTER }; + + OPENVINO_RTTI_DECLARATION; + NonMaxSuppression() = default; + + /// \brief Constructs a NonMaxSuppression operation. + /// + /// \param boxes Node producing the box coordinates + /// \param scores Node producing the box scores + /// \param max_output_boxes_per_class Node producing maximum number of boxes to be + /// selected per class + /// \param iou_threshold Node producing intersection over union threshold + /// \param score_threshold Node producing minimum score threshold + /// \param box_encoding Specifies the format of boxes data encoding + /// \param sort_result_descending Specifies whether it is necessary to sort selected + /// boxes across batches + /// \param output_type Specifies the output tensor type + NonMaxSuppression(const Output& boxes, + const Output& scores, + const Output& max_output_boxes_per_class, + const Output& iou_threshold, + const Output& score_threshold, + const BoxEncodingType box_encoding = BoxEncodingType::CORNER, + const bool sort_result_descending = true, + const ngraph::element::Type& output_type = ngraph::element::i64); + + /// \brief Constructs a NonMaxSuppression operation with default values for the last + /// 3 inputs + /// + /// \param boxes Node producing the box coordinates + /// \param scores Node producing the box coordinates + /// \param box_encoding Specifies the format of boxes data encoding + /// \param sort_result_descending Specifies whether it is necessary to sort selected + /// boxes across batches + /// \param output_type Specifies the output tensor type + NonMaxSuppression(const Output& boxes, + const Output& scores, + const BoxEncodingType box_encoding = BoxEncodingType::CORNER, + const bool sort_result_descending = true, + const ngraph::element::Type& output_type = ngraph::element::i64); + + bool visit_attributes(AttributeVisitor& visitor) override; + void validate_and_infer_types() override; + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + BoxEncodingType get_box_encoding() const { + return m_box_encoding; + } + void set_box_encoding(const BoxEncodingType box_encoding) { + m_box_encoding = box_encoding; + } + bool get_sort_result_descending() const { + return m_sort_result_descending; + } + void set_sort_result_descending(const bool sort_result_descending) { + m_sort_result_descending = sort_result_descending; + } + + element::Type get_output_type() const { + return m_output_type; + } + void set_output_type(const element::Type& output_type) { + m_output_type = output_type; + } + using Node::set_output_type; + +protected: + BoxEncodingType m_box_encoding = BoxEncodingType::CORNER; + bool m_sort_result_descending = true; + ngraph::element::Type m_output_type = ngraph::element::i64; + void validate(); + int64_t max_boxes_output_from_input() const; +}; +} // namespace v3 + +namespace v4 { +/// \brief NonMaxSuppression operation +/// +class OPENVINO_API NonMaxSuppression : public op::v3::NonMaxSuppression { +public: + OPENVINO_RTTI_DECLARATION; + NonMaxSuppression() = default; + + /// \brief Constructs a NonMaxSuppression operation. + /// + /// \param boxes Node producing the box coordinates + /// \param scores Node producing the box scores + /// \param max_output_boxes_per_class Node producing maximum number of boxes to be + /// selected per class + /// \param iou_threshold Node producing intersection over union threshold + /// \param score_threshold Node producing minimum score threshold + /// \param box_encoding Specifies the format of boxes data encoding + /// \param sort_result_descending Specifies whether it is necessary to sort selected + /// boxes across batches + /// \param output_type Specifies the output tensor type + NonMaxSuppression(const Output& boxes, + const Output& scores, + const Output& max_output_boxes_per_class, + const Output& iou_threshold, + const Output& score_threshold, + const BoxEncodingType box_encoding = BoxEncodingType::CORNER, + const bool sort_result_descending = true, + const ngraph::element::Type& output_type = ngraph::element::i64); + + /// \brief Constructs a NonMaxSuppression operation with default values for the last + /// 3 inputs + /// + /// \param boxes Node producing the box coordinates + /// \param scores Node producing the box coordinates + /// \param box_encoding Specifies the format of boxes data encoding + /// \param sort_result_descending Specifies whether it is necessary to sort selected + /// boxes across batches + /// \param output_type Specifies the output tensor type + NonMaxSuppression(const Output& boxes, + const Output& scores, + const BoxEncodingType box_encoding = BoxEncodingType::CORNER, + const bool sort_result_descending = true, + const ngraph::element::Type& output_type = ngraph::element::i64); + + void validate_and_infer_types() override; + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; +}; +} // namespace v4 + +namespace v5 { +/// \brief NonMaxSuppression operation +/// +class OPENVINO_API NonMaxSuppression : public Op { +public: + OPENVINO_RTTI_DECLARATION; + enum class BoxEncodingType { CORNER, CENTER }; + + NonMaxSuppression() = default; + + /// \brief Constructs a NonMaxSuppression operation with default values in the last + /// 4 inputs. + /// + /// \param boxes Node producing the box coordinates + /// \param scores Node producing the box scores + /// \param box_encoding Specifies the format of boxes data encoding + /// \param sort_result_descending Specifies whether it is necessary to sort selected + /// boxes across batches + /// \param output_type Specifies the output tensor type + NonMaxSuppression(const Output& boxes, + const Output& scores, + const BoxEncodingType box_encoding = BoxEncodingType::CORNER, + const bool sort_result_descending = true, + const ngraph::element::Type& output_type = ngraph::element::i64); + + /// \brief Constructs a NonMaxSuppression operation with default values in the last. + /// 3 inputs. + /// + /// \param boxes Node producing the box coordinates + /// \param scores Node producing the box scores + /// \param max_output_boxes_per_class Node producing maximum number of boxes to be + /// selected per class + /// \param box_encoding Specifies the format of boxes data encoding + /// \param sort_result_descending Specifies whether it is necessary to sort selected + /// boxes across batches + /// \param output_type Specifies the output tensor type + NonMaxSuppression(const Output& boxes, + const Output& scores, + const Output& max_output_boxes_per_class, + const BoxEncodingType box_encoding = BoxEncodingType::CORNER, + const bool sort_result_descending = true, + const ngraph::element::Type& output_type = ngraph::element::i64); + + /// \brief Constructs a NonMaxSuppression operation with default values in the last. + /// 2 inputs. + /// + /// \param boxes Node producing the box coordinates + /// \param scores Node producing the box scores + /// \param max_output_boxes_per_class Node producing maximum number of boxes to be + /// selected per class + /// \param iou_threshold Node producing intersection over union threshold + /// \param box_encoding Specifies the format of boxes data encoding + /// \param sort_result_descending Specifies whether it is necessary to sort selected + /// boxes across batches + /// \param output_type Specifies the output tensor type + NonMaxSuppression(const Output& boxes, + const Output& scores, + const Output& max_output_boxes_per_class, + const Output& iou_threshold, + const BoxEncodingType box_encoding = BoxEncodingType::CORNER, + const bool sort_result_descending = true, + const ngraph::element::Type& output_type = ngraph::element::i64); + + /// \brief Constructs a NonMaxSuppression operation with default value in the last. + /// input. + /// + /// \param boxes Node producing the box coordinates + /// \param scores Node producing the box scores + /// \param max_output_boxes_per_class Node producing maximum number of boxes to be + /// selected per class + /// \param iou_threshold Node producing intersection over union threshold + /// \param score_threshold Node producing minimum score threshold + /// \param box_encoding Specifies the format of boxes data encoding + /// \param sort_result_descending Specifies whether it is necessary to sort selected + /// boxes across batches + /// \param output_type Specifies the output tensor type + NonMaxSuppression(const Output& boxes, + const Output& scores, + const Output& max_output_boxes_per_class, + const Output& iou_threshold, + const Output& score_threshold, + const BoxEncodingType box_encoding = BoxEncodingType::CORNER, + const bool sort_result_descending = true, + const ngraph::element::Type& output_type = ngraph::element::i64); + + /// \brief Constructs a NonMaxSuppression operation. + /// + /// \param boxes Node producing the box coordinates + /// \param scores Node producing the box scores + /// \param max_output_boxes_per_class Node producing maximum number of boxes to be + /// selected per class + /// \param iou_threshold Node producing intersection over union threshold + /// \param score_threshold Node producing minimum score threshold + /// \param soft_nms_sigma Node specifying the sigma parameter for Soft-NMS + /// \param box_encoding Specifies the format of boxes data encoding + /// \param sort_result_descending Specifies whether it is necessary to sort selected + /// boxes across batches + /// \param output_type Specifies the output tensor type + NonMaxSuppression(const Output& boxes, + const Output& scores, + const Output& max_output_boxes_per_class, + const Output& iou_threshold, + const Output& score_threshold, + const Output& soft_nms_sigma, + const BoxEncodingType box_encoding = BoxEncodingType::CORNER, + const bool sort_result_descending = true, + const ngraph::element::Type& output_type = ngraph::element::i64); + + bool visit_attributes(AttributeVisitor& visitor) override; + void validate_and_infer_types() override; + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + BoxEncodingType get_box_encoding() const { + return m_box_encoding; + } + void set_box_encoding(const BoxEncodingType box_encoding) { + m_box_encoding = box_encoding; + } + bool get_sort_result_descending() const { + return m_sort_result_descending; + } + void set_sort_result_descending(const bool sort_result_descending) { + m_sort_result_descending = sort_result_descending; + } + + element::Type get_output_type() const { + return m_output_type; + } + void set_output_type(const element::Type& output_type) { + m_output_type = output_type; + } + using Node::set_output_type; + + int64_t max_boxes_output_from_input() const; + float iou_threshold_from_input() const; + float score_threshold_from_input() const; + float soft_nms_sigma_from_input() const; + bool is_soft_nms_sigma_constant_and_default() const; + +protected: + BoxEncodingType m_box_encoding = BoxEncodingType::CORNER; + bool m_sort_result_descending = true; + ngraph::element::Type m_output_type = ngraph::element::i64; + void validate(); +}; +} // namespace v5 +} // namespace op + +OPENVINO_API +std::ostream& operator<<(std::ostream& s, const op::v1::NonMaxSuppression::BoxEncodingType& type); + +OPENVINO_API +std::ostream& operator<<(std::ostream& s, const op::v3::NonMaxSuppression::BoxEncodingType& type); + +OPENVINO_API +std::ostream& operator<<(std::ostream& s, const op::v5::NonMaxSuppression::BoxEncodingType& type); + +template <> +class OPENVINO_API AttributeAdapter + : public EnumAttributeAdapterBase { +public: + AttributeAdapter(op::v1::NonMaxSuppression::BoxEncodingType& value) + : EnumAttributeAdapterBase(value) {} + + static constexpr DiscreteTypeInfo type_info{"AttributeAdapter", 1}; + const DiscreteTypeInfo& get_type_info() const override { + return type_info; + } +}; + +template <> +class OPENVINO_API AttributeAdapter + : public EnumAttributeAdapterBase { +public: + AttributeAdapter(op::v3::NonMaxSuppression::BoxEncodingType& value) + : EnumAttributeAdapterBase(value) {} + + static constexpr DiscreteTypeInfo type_info{"AttributeAdapter", 1}; + const DiscreteTypeInfo& get_type_info() const override { + return type_info; + } +}; + +template <> +class OPENVINO_API AttributeAdapter + : public EnumAttributeAdapterBase { +public: + AttributeAdapter(op::v5::NonMaxSuppression::BoxEncodingType& value) + : EnumAttributeAdapterBase(value) {} + + static constexpr DiscreteTypeInfo type_info{"AttributeAdapter", 1}; + const DiscreteTypeInfo& get_type_info() const override { + return type_info; + } +}; + +} // namespace ov diff --git a/ngraph/core/include/openvino/op/non_zero.hpp b/ngraph/core/include/openvino/op/non_zero.hpp new file mode 100644 index 00000000000000..13fc7728243155 --- /dev/null +++ b/ngraph/core/include/openvino/op/non_zero.hpp @@ -0,0 +1,67 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/op.hpp" + +namespace ov { +namespace op { +namespace v3 { +/// \brief NonZero operation returning indices of non-zero elements in the input tensor. +/// +/// \note The indices are returned by-dimension in row-major order. For example +/// the following output contains 3 indices of a 3D input tensor elements: +/// [[0, 0, 2], +/// [0, 1, 1], +/// [0, 1, 2]] +/// The values point to input elements at [0,0,0], [0,1,1] and [2,1,2] +class OPENVINO_API NonZero : public Op { +public: + OPENVINO_RTTI_DECLARATION; + /// \brief Constructs a NonZero operation. + NonZero() = default; + /// \brief Constructs a NonZero operation. + /// + /// \note The output type is int64. + /// + /// \param arg Node that produces the input tensor. + NonZero(const Output& arg); + /// \brief Constructs a NonZero operation. + /// + /// \param arg Node that produces the input tensor. + /// \param output_type produce indices. Currently, only 'int64' or 'int32' + /// are + /// supported + NonZero(const Output& arg, const std::string& output_type); + /// \brief Constructs a NonZero operation. + /// + /// \param arg Node that produces the input tensor. + /// \param output_type produce indices. Currently, only int64 or int32 are + /// supported + NonZero(const Output& arg, const element::Type& output_type); + + bool visit_attributes(AttributeVisitor& visitor) override; + void validate_and_infer_types() override; + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + element::Type get_output_type() const { + return m_output_type; + } + void set_output_type(element::Type output_type) { + m_output_type = output_type; + } + // Overload collision with method on Node + using Node::set_output_type; + + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; + +protected: + element::Type m_output_type = element::i64; +}; +} // namespace v3 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/normalize_l2.hpp b/ngraph/core/include/openvino/op/normalize_l2.hpp new file mode 100644 index 00000000000000..dee0ef24eebd5d --- /dev/null +++ b/ngraph/core/include/openvino/op/normalize_l2.hpp @@ -0,0 +1,53 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include + +#include "openvino/op/op.hpp" +#include "openvino/op/util/attr_types.hpp" + +namespace ov { +namespace op { +namespace v0 { +/// \brief Normalization with L2 norm. +/// +class OPENVINO_API NormalizeL2 : public Op { +public: + OPENVINO_RTTI_DECLARATION; + + NormalizeL2() = default; + /// + /// \brief Constructs a NormalizeL2 operation. + /// + /// \param data - Node producing the input tensor + /// \param axes - Node indicating axes along which reduction is + /// calculated + /// \param eps - The epsilon added to L2 norm. + /// \param eps_mode - Specifies how eps is combined with L2 value + /// calculated before division + /// + NormalizeL2(const Output& data, const Output& axes, float eps, EpsMode eps_mode); + + void validate_and_infer_types() override; + bool visit_attributes(AttributeVisitor& visitor) override; + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + float get_eps() const { + return m_eps; + } + EpsMode get_eps_mode() const { + return m_eps_mode; + } + AxisSet get_reduction_axes() const; + +protected: + float m_eps; + EpsMode m_eps_mode; +}; +} // namespace v0 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/not_equal.hpp b/ngraph/core/include/openvino/op/not_equal.hpp new file mode 100644 index 00000000000000..b31944e9939bba --- /dev/null +++ b/ngraph/core/include/openvino/op/not_equal.hpp @@ -0,0 +1,35 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/util/binary_elementwise_comparison.hpp" + +namespace ov { +namespace op { +namespace v1 { +/// \brief Elementwise not-equal operation. +class OPENVINO_API NotEqual : public util::BinaryElementwiseComparison { +public: + OPENVINO_RTTI_DECLARATION; + /// \brief Constructs a not-equal operation. + NotEqual() : util::BinaryElementwiseComparison(AutoBroadcastSpec::NUMPY) {} + /// \brief Constructs a not-equal operation. + /// + /// \param arg0 Node that produces the first input tensor. + /// \param arg1 Node that produces the second input tensor. + /// \param auto_broadcast Auto broadcast specification + NotEqual(const Output& arg0, + const Output& arg1, + const AutoBroadcastSpec& auto_broadcast = AutoBroadcastSpec(AutoBroadcastType::NUMPY)); + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; + bool visit_attributes(AttributeVisitor& visitor) override; +}; +} // namespace v1 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/one_hot.hpp b/ngraph/core/include/openvino/op/one_hot.hpp new file mode 100644 index 00000000000000..6680ff1d0993eb --- /dev/null +++ b/ngraph/core/include/openvino/op/one_hot.hpp @@ -0,0 +1,54 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/op.hpp" + +namespace ov { +namespace op { +namespace v1 { +class OPENVINO_API OneHot : public Op { +public: + OPENVINO_RTTI_DECLARATION; + + /// \brief Constructs a one-hot operation. + OneHot() = default; + /// \brief Constructs a one-hot operation. + /// + /// \param indices Input tensor containing indices. + /// \param depth Specifies number of classes and the size of one-hot dimension. + /// \param on_value Specifies value that the locations in output tensor represented + /// by indices in input take. + /// \param off_value Specifies value that the locations in output tensor not + /// represented + /// by indices in input take. + /// \param axis Axis along which one-hot representation in added. + OneHot(const Output& indices, + const Output& depth, + const Output& on_value, + const Output& off_value, + int64_t axis); + + bool visit_attributes(AttributeVisitor& visitor) override; + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + void validate_and_infer_types() override; + + bool evaluate(const HostTensorVector& output_values, const HostTensorVector& input_values) const override; + bool has_evaluate() const override; + + /// \return The index of the one-hot axis. + int64_t get_axis() const { + return m_axis; + } + void set_axis(int64_t axis) { + m_axis = axis; + } + +protected: + int64_t m_axis; +}; +} // namespace v1 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/pad.hpp b/ngraph/core/include/openvino/op/pad.hpp new file mode 100644 index 00000000000000..2f42122e4086a0 --- /dev/null +++ b/ngraph/core/include/openvino/op/pad.hpp @@ -0,0 +1,79 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/core/coordinate_diff.hpp" +#include "openvino/op/op.hpp" +#include "openvino/op/util/attr_types.hpp" + +namespace ov { +namespace op { +namespace v1 { +/// \brief Generic padding operation. +class OPENVINO_API Pad : public Op { +public: + OPENVINO_RTTI_DECLARATION; + + /// \brief Constructs a generic padding operation. + /// + /// \param arg The output producing input tensor to be padded. + /// \param pads_begin The output which specifies the number of padding elements + /// added + /// before position 0 on each axis of arg. + /// \param pads_end The output which specifies the number of padding elements + /// after the last element on each axis. + /// \param arg_pad_value The scalar output with the value used for padding + /// if pad_mode is CONSTANT + /// \param pad_mode The padding mode: CONSTANT, EDGE, REFLECT or SYMMETRIC. + /// CONSTANT initializes new elements with arg_pad_value, EDGE uses the nearest + /// value from arg. REFLECT and SYMMETRIC tile the background by flipping arg + /// at the edge (SYMMETRIC) or on the last row/column/etc. (REFLECT). + Pad(const Output& arg, + const Output& pads_begin, + const Output& pads_end, + const Output& arg_pad_value, + PadMode pad_mode); + + /// \brief Constructs a generic padding operation. + /// + /// \param arg The output producing input tensor to be padded. + /// \param pads_begin The output which specifies the number of padding elements + /// added + /// \param pads_end The output which specifies the number of padding elements + /// after the last element on each axis. + /// \param pad_mode The padding mode: CONSTANT, EDGE, REFLECT or SYMMETRIC. + Pad(const Output& arg, const Output& pads_begin, const Output& pads_end, PadMode pad_mode); + + /// \brief Constructs a generic padding operation. + Pad() = default; + + bool visit_attributes(AttributeVisitor& visitor) override; + void validate_and_infer_types() override; + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + /// return The node which specifies the number of padding elements + /// added at the beginning of each axis + CoordinateDiff get_pads_begin() const; + /// return The node which specifies the number of padding elements + /// added at the end of each axis + CoordinateDiff get_pads_end() const; + + /// \return The padding mode. + PadMode get_pad_mode() const { + return m_pad_mode; + } + void set_pad_mode(PadMode pad_mode) { + m_pad_mode = pad_mode; + } + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; + +private: + PadMode m_pad_mode; + bool evaluate_pad(const HostTensorVector& outputs, const HostTensorVector& inputs) const; +}; +} // namespace v1 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/power.hpp b/ngraph/core/include/openvino/op/power.hpp new file mode 100644 index 00000000000000..0c074760eedb02 --- /dev/null +++ b/ngraph/core/include/openvino/op/power.hpp @@ -0,0 +1,49 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/util/binary_elementwise_arithmetic.hpp" + +namespace ov { +namespace op { +namespace v1 { +// clang-format off +/// \brief Elementwise exponentiation operation. +/// +/// ## Inputs +/// +/// | | Type | Description | +/// | ------ | --------------------------------- | ------------------------------------------------------ | +/// | `arg0` | \f$N[d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of any shape and numeric element type. | +/// | `arg1` | \f$N[d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of the same shape and element type as `arg0`. | +/// +/// ## Output +/// +/// | Type | Description | +/// | ---------------------- | -------------------------------------------------------------------------------------------------------------- | +/// | \f$N[d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_n] = \texttt{arg0}[i_1,\dots,i_n]^{\texttt{arg1}[i_1,\dots,i_n]}\f$ | +// clang-format on +class OPENVINO_API Power : public util::BinaryElementwiseArithmetic { +public: + OPENVINO_RTTI_DECLARATION; + + Power() : util::BinaryElementwiseArithmetic(AutoBroadcastSpec::NUMPY) {} + + /// \brief Constructs an exponentiation operation. + /// + /// \param arg0 Node that produces the first input tensor. + /// \param arg1 Node that produces the second input tensor. + /// \param auto_broadcast Auto broadcast specification + Power(const Output& arg0, + const Output& arg1, + const AutoBroadcastSpec& auto_broadcast = AutoBroadcastSpec(AutoBroadcastType::NUMPY)); + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; +}; +} // namespace v1 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/prelu.hpp b/ngraph/core/include/openvino/op/prelu.hpp new file mode 100644 index 00000000000000..53394810103320 --- /dev/null +++ b/ngraph/core/include/openvino/op/prelu.hpp @@ -0,0 +1,37 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/op.hpp" + +namespace ov { +namespace op { +namespace v0 { +/// \brief Parametrized Relu +/// x < 0 => f(x) = x * slope +/// x >= 0 => f(x) = x +/// +class OPENVINO_API PRelu : public Op { +public: + OPENVINO_RTTI_DECLARATION; + PRelu(); + /// \brief Constructs a PRelu operation. + /// + /// \param data Input tensor + /// \param slope Multipliers for negative values + PRelu(const Output& data, const Output& slope); + + bool visit_attributes(AttributeVisitor& visitor) override; + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + void validate_and_infer_types() override; + + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; +}; +} // namespace v0 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/prior_box.hpp b/ngraph/core/include/openvino/op/prior_box.hpp new file mode 100644 index 00000000000000..2acf9150b1ceed --- /dev/null +++ b/ngraph/core/include/openvino/op/prior_box.hpp @@ -0,0 +1,67 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/op.hpp" + +namespace ov { +namespace op { +namespace v0 { +/// \brief Layer which generates prior boxes of specified sizes +/// normalized to input image size +class OPENVINO_API PriorBox : public Op { +public: + OPENVINO_RTTI_DECLARATION; + struct Attributes { + // min_size Desired min_size of prior boxes + // max_size Desired max_size of prior boxes + // aspect_ratio Aspect ratios of prior boxes + // clip Clip output to [0,1] + // flip Flip aspect ratios + // step Distance between prior box centers + // offset Box offset relative to top center of image + // variance Values to adjust prior boxes with + // scale_all_sizes Scale all sizes + std::vector min_size; + std::vector max_size; + std::vector aspect_ratio; + std::vector density; + std::vector fixed_ratio; + std::vector fixed_size; + bool clip = false; + bool flip = false; + float step = 0.0f; + float offset = 0.0f; + std::vector variance; + bool scale_all_sizes = true; + }; + + PriorBox() = default; + /// \brief Constructs a PriorBox operation + /// + /// \param layer_shape Shape of layer for which prior boxes are computed + /// \param image_shape Shape of image to which prior boxes are scaled + /// \param attrs PriorBox attributes + PriorBox(const Output& layer_shape, const Output& image_shape, const Attributes& attrs); + + void validate_and_infer_types() override; + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + static int64_t number_of_priors(const Attributes& attrs); + + static std::vector normalized_aspect_ratio(const std::vector& aspect_ratio, bool flip); + const Attributes& get_attrs() const { + return m_attrs; + } + bool visit_attributes(AttributeVisitor& visitor) override; + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; + +private: + Attributes m_attrs; +}; +} // namespace v0 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/prior_box_clustered.hpp b/ngraph/core/include/openvino/op/prior_box_clustered.hpp new file mode 100644 index 00000000000000..9c4342def22451 --- /dev/null +++ b/ngraph/core/include/openvino/op/prior_box_clustered.hpp @@ -0,0 +1,59 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/op.hpp" + +namespace ov { +namespace op { + +namespace v0 { +/// \brief Layer which generates prior boxes of specified sizes +/// normalized to input image size +class OPENVINO_API PriorBoxClustered : public Op { +public: + OPENVINO_RTTI_DECLARATION; + struct Attributes { + // widths Desired widths of prior boxes + // heights Desired heights of prior boxes + // clip Clip output to [0,1] + // step_widths Distance between prior box centers + // step_heights Distance between prior box centers + // step Distance between prior box centers (when step_w = step_h) + // offset Box offset relative to top center of image + // variances Values to adjust prior boxes with + std::vector widths; + std::vector heights; + bool clip = true; + float step_widths = 0.0f; + float step_heights = 0.0f; + float step = 0.0f; + float offset = 0.0f; + std::vector variances; + }; + + PriorBoxClustered() = default; + /// \brief Constructs a PriorBoxClustered operation + /// + /// \param layer_shape Shape of layer for which prior boxes are computed + /// \param image_shape Shape of image to which prior boxes are scaled + /// \param attrs PriorBoxClustered attributes + PriorBoxClustered(const Output& layer_shape, const Output& image_shape, const Attributes& attrs); + + void validate_and_infer_types() override; + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + const Attributes& get_attrs() const { + return m_attrs; + } + bool visit_attributes(AttributeVisitor& visitor) override; + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; + +private: + Attributes m_attrs; +}; +} // namespace v0 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/proposal.hpp b/ngraph/core/include/openvino/op/proposal.hpp new file mode 100644 index 00000000000000..0daf18d5744e14 --- /dev/null +++ b/ngraph/core/include/openvino/op/proposal.hpp @@ -0,0 +1,95 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/op.hpp" + +namespace ov { +namespace op { + +namespace v0 { +class OPENVINO_API Proposal : public Op { +public: + OPENVINO_RTTI_DECLARATION; + // base_size Anchor sizes + // pre_nms_topn Number of boxes before nms + // post_nms_topn Number of boxes after nms + // nms_thresh Threshold for nms + // feat_stride Feature stride + // min_size Minimum box size + // ratio Ratios for anchor generation + // scale Scales for anchor generation + // clip_before_nms Clip before NMs + // clip_after_nms Clip after NMs + // normalize Normalize boxes to [0,1] + // box_size_scale Scale factor for scaling box size + // box_coordinate_scale Scale factor for scaling box coordiate + // framework Calculation frameworkrithm to use + struct Attributes { + size_t base_size; + size_t pre_nms_topn; + size_t post_nms_topn; + float nms_thresh = 0.0f; + size_t feat_stride = 1; + size_t min_size = 1; + std::vector ratio; + std::vector scale; + bool clip_before_nms = true; + bool clip_after_nms = false; + bool normalize = false; + float box_size_scale = 1.0f; + float box_coordinate_scale = 1.0f; + std::string framework; + bool infer_probs = false; + }; + Proposal() = default; + /// \brief Constructs a Proposal operation + /// + /// \param class_probs Class probability scores + /// \param bbox_deltas Prediction of bounding box deltas + /// \param image_shape Shape of image + /// \param attrs Proposal op attributes + Proposal(const Output& class_probs, + const Output& bbox_deltas, + const Output& image_shape, + const Attributes& attrs); + + void validate_and_infer_types() override; + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + const Attributes& get_attrs() const { + return m_attrs; + } + bool visit_attributes(AttributeVisitor& visitor) override; + +protected: + Attributes m_attrs; +}; +} // namespace v0 + +namespace v4 { +class OPENVINO_API Proposal : public op::v0::Proposal { +public: + OPENVINO_RTTI_DECLARATION; + Proposal() = default; + /// \brief Constructs a Proposal operation + /// + /// \param class_probs Class probability scores + /// \param bbox_deltas Prediction of bounding box deltas + /// \param image_shape Shape of image + /// \param attrs Proposal op attributes + Proposal(const Output& class_probs, + const Output& bbox_deltas, + const Output& image_shape, + const Attributes& attrs); + + void validate_and_infer_types() override; + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + const Attributes& get_attrs() const { + return m_attrs; + } +}; +} // namespace v4 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/psroi_pooling.hpp b/ngraph/core/include/openvino/op/psroi_pooling.hpp new file mode 100644 index 00000000000000..6574a1dbb449d4 --- /dev/null +++ b/ngraph/core/include/openvino/op/psroi_pooling.hpp @@ -0,0 +1,72 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/op.hpp" + +namespace ov { +namespace op { +namespace v0 { +class OPENVINO_API PSROIPooling : public Op { +public: + OPENVINO_RTTI_DECLARATION; + + PSROIPooling() = default; + /// \brief Constructs a PSROIPooling operation + /// + /// \param input Input feature map {N, C, ...} + /// \param coords Coordinates of bounding boxes + /// \param output_dim Output channel number + /// \param group_size Number of groups to encode position-sensitive scores + /// \param spatial_scale Ratio of input feature map over input image size + /// \param spatial_bins_x Numbers of bins to divide the input feature maps over + /// width + /// \param spatial_bins_y Numbers of bins to divide the input feature maps over + /// height + /// \param mode Mode of pooling - Avg or Bilinear + PSROIPooling(const Output& input, + const Output& coords, + const size_t output_dim, + const size_t group_size, + const float spatial_scale, + int spatial_bins_x, + int spatial_bins_y, + const std::string& mode); + + bool visit_attributes(AttributeVisitor& visitor) override; + void validate_and_infer_types() override; + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + size_t get_output_dim() const { + return m_output_dim; + } + size_t get_group_size() const { + return m_group_size; + } + float get_spatial_scale() const { + return m_spatial_scale; + } + int get_spatial_bins_x() const { + return m_spatial_bins_x; + } + int get_spatial_bins_y() const { + return m_spatial_bins_y; + } + const std::string& get_mode() const { + return m_mode; + } + +private: + size_t m_output_dim; + size_t m_group_size; + float m_spatial_scale; + int m_spatial_bins_x; + int m_spatial_bins_y; + std::string m_mode; +}; +} // namespace v0 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/reduce_min.hpp b/ngraph/core/include/openvino/op/reduce_min.hpp new file mode 100644 index 00000000000000..8509979fc37162 --- /dev/null +++ b/ngraph/core/include/openvino/op/reduce_min.hpp @@ -0,0 +1,33 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/util/arithmetic_reductions_keep_dims.hpp" + +namespace ov { +namespace op { +namespace v1 { +class OPENVINO_API ReduceMin : public util::ArithmeticReductionKeepDims { +public: + OPENVINO_RTTI_DECLARATION; + /// \brief Constructs a summation operation. + ReduceMin() = default; + /// \brief Constructs a summation operation. + /// + /// \param arg The tensor to be summed. + /// \param reduction_axes The axis positions (0-based) to be eliminated. + /// \param keep_dims If set to 1 it holds axes that are used for reduction. + ReduceMin(const Output& arg, const Output& reduction_axes, bool keep_dims = false); + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; + bool evaluate_lower(const HostTensorVector& outputs) const override; + bool evaluate_upper(const HostTensorVector& outputs) const override; +}; +} // namespace v1 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/tensor_iterator.hpp b/ngraph/core/include/openvino/op/tensor_iterator.hpp index 9d8a2e5a8382ce..232ef4c13db334 100644 --- a/ngraph/core/include/openvino/op/tensor_iterator.hpp +++ b/ngraph/core/include/openvino/op/tensor_iterator.hpp @@ -14,9 +14,9 @@ namespace ov { namespace op { namespace v0 { /// \brief Iterate a body over tensors, accumulating into tensors. -class NGRAPH_API TensorIterator : public op::util::SubGraphOp { +class OPENVINO_API TensorIterator : public op::util::SubGraphOp { public: - NGRAPH_RTTI_DECLARATION; + OPENVINO_RTTI_DECLARATION; bool visit_attributes(AttributeVisitor& visitor) override; diff --git a/ngraph/core/src/op/not.cpp b/ngraph/core/src/op/logical_not.cpp similarity index 98% rename from ngraph/core/src/op/not.cpp rename to ngraph/core/src/op/logical_not.cpp index 26dcc0d734525a..c96a4834ffb16f 100644 --- a/ngraph/core/src/op/not.cpp +++ b/ngraph/core/src/op/logical_not.cpp @@ -2,9 +2,8 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph/op/not.hpp" - #include "itt.hpp" +#include "ngraph/op/not.hpp" #include "ngraph/op/op.hpp" #include "ngraph/op/util/elementwise_args.hpp" #include "ngraph/runtime/host_tensor.hpp" @@ -14,7 +13,7 @@ using namespace ngraph; using namespace std; -NGRAPH_RTTI_DEFINITION(op::v1::LogicalNot, "LogicalNot", 1); +OPENVINO_RTTI_DEFINITION(op::v1::LogicalNot, "LogicalNot", 1); op::v1::LogicalNot::LogicalNot(const Output& arg) : Op({arg}) { constructor_validate_and_infer_types(); diff --git a/ngraph/core/src/op/or.cpp b/ngraph/core/src/op/logical_or.cpp similarity index 96% rename from ngraph/core/src/op/or.cpp rename to ngraph/core/src/op/logical_or.cpp index 5c1518e5c281f6..c19eb3d8ab5974 100644 --- a/ngraph/core/src/op/or.cpp +++ b/ngraph/core/src/op/logical_or.cpp @@ -2,9 +2,8 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph/op/or.hpp" - #include "itt.hpp" +#include "ngraph/op/or.hpp" #include "ngraph/runtime/host_tensor.hpp" #include "ngraph/runtime/reference/or.hpp" #include "ngraph/validation_util.hpp" @@ -12,7 +11,7 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::v1::LogicalOr, "LogicalOr", 1, util::BinaryElementwiseLogical); +OPENVINO_RTTI_DEFINITION(op::v1::LogicalOr, "LogicalOr", 1, util::BinaryElementwiseLogical); op::v1::LogicalOr::LogicalOr(const Output& arg0, const Output& arg1, diff --git a/ngraph/core/src/op/matmul.cpp b/ngraph/core/src/op/matmul.cpp index 4175fafcd57629..432d5c73099cbb 100644 --- a/ngraph/core/src/op/matmul.cpp +++ b/ngraph/core/src/op/matmul.cpp @@ -15,7 +15,7 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::MatMul, "MatMul", 0); +OPENVINO_RTTI_DEFINITION(op::v0::MatMul, "MatMul", 0); op::MatMul::MatMul(const Output& A, const Output& B, const bool& transpose_a, const bool& transpose_b) : Op(OutputVector{A, B}), diff --git a/ngraph/core/src/op/matrix_nms.cpp b/ngraph/core/src/op/matrix_nms.cpp index a4f0879a1dbcd7..5df137505ae8b5 100644 --- a/ngraph/core/src/op/matrix_nms.cpp +++ b/ngraph/core/src/op/matrix_nms.cpp @@ -18,7 +18,7 @@ using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::v8::MatrixNms, "MatrixNms", 8, op::util::NmsBase); +OPENVINO_RTTI_DEFINITION(op::v8::MatrixNms, "MatrixNms", 8, op::util::NmsBase); op::v8::MatrixNms::MatrixNms() : NmsBase(m_attrs.output_type, m_attrs.nms_top_k, m_attrs.keep_top_k) {} @@ -64,7 +64,7 @@ bool ngraph::op::v8::MatrixNms::visit_attributes(AttributeVisitor& visitor) { return true; } -std::ostream& ngraph::operator<<(std::ostream& s, const op::v8::MatrixNms::DecayFunction& type) { +std::ostream& ov::operator<<(std::ostream& s, const op::v8::MatrixNms::DecayFunction& type) { return s << as_string(type); } diff --git a/ngraph/core/src/op/max.cpp b/ngraph/core/src/op/max.cpp index fe929584dc1d9c..66def527bf95f0 100644 --- a/ngraph/core/src/op/max.cpp +++ b/ngraph/core/src/op/max.cpp @@ -41,7 +41,7 @@ bool evaluate_max(const HostTensorPtr& arg, const HostTensorPtr& out, const Axis } } // namespace maxop -NGRAPH_RTTI_DEFINITION(op::v1::ReduceMax, "ReduceMax", 1, util::ArithmeticReductionKeepDims); +OPENVINO_RTTI_DEFINITION(op::v1::ReduceMax, "ReduceMax", 1, util::ArithmeticReductionKeepDims); op::v1::ReduceMax::ReduceMax(const Output& arg, const Output& reduction_axes, bool keep_dims) : ArithmeticReductionKeepDims(arg, reduction_axes, keep_dims) { diff --git a/ngraph/core/src/op/max_pool.cpp b/ngraph/core/src/op/max_pool.cpp index 6398db7fc48c07..675d0bc8e6f54f 100644 --- a/ngraph/core/src/op/max_pool.cpp +++ b/ngraph/core/src/op/max_pool.cpp @@ -15,7 +15,7 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::v1::MaxPool, "MaxPool", 1, op::util::MaxPoolBase); +OPENVINO_RTTI_DEFINITION(op::v1::MaxPool, "MaxPool", 1, op::util::MaxPoolBase); op::v1::MaxPool::MaxPool(const Output& arg, const Strides& strides, @@ -62,7 +62,7 @@ shared_ptr op::v1::MaxPool::clone_with_new_inputs(const OutputVector& new_ } shared_ptr op::v1::MaxPool::get_default_value() const { - return op::Constant::create(get_element_type(), get_shape(), {0}); + return op::v0::Constant::create(get_element_type(), get_shape(), {0}); } namespace maxpool { @@ -250,7 +250,7 @@ bool evaluate_maxpool(const HostTensorPtr& data, } } // namespace maxpool_v8 -NGRAPH_RTTI_DEFINITION(op::v8::MaxPool, "MaxPool", 8, op::util::MaxPoolBase); +OPENVINO_RTTI_DEFINITION(op::v8::MaxPool, "MaxPool", 8, op::util::MaxPoolBase); op::v8::MaxPool::MaxPool(const Output& arg, const Strides& strides, diff --git a/ngraph/core/src/op/maximum.cpp b/ngraph/core/src/op/maximum.cpp index 20788312bd638f..b88b7751527245 100644 --- a/ngraph/core/src/op/maximum.cpp +++ b/ngraph/core/src/op/maximum.cpp @@ -57,7 +57,7 @@ bool evaluate_maximum(const HostTensorPtr& arg0, // ------------------------------------ v1 ------------------------------------- -NGRAPH_RTTI_DEFINITION(op::v1::Maximum, "Maximum", 1, op::util::BinaryElementwiseArithmetic); +OPENVINO_RTTI_DEFINITION(op::v1::Maximum, "Maximum", 1, op::util::BinaryElementwiseArithmetic); op::v1::Maximum::Maximum(const Output& arg0, const Output& arg1, const AutoBroadcastSpec& auto_broadcast) : BinaryElementwiseArithmetic(arg0, arg1, auto_broadcast) { diff --git a/ngraph/core/src/op/minimum.cpp b/ngraph/core/src/op/minimum.cpp index 5e233eabd9accc..4a6f6d77c2cd49 100644 --- a/ngraph/core/src/op/minimum.cpp +++ b/ngraph/core/src/op/minimum.cpp @@ -55,7 +55,7 @@ bool evaluate_minimum(const HostTensorPtr& arg0, // ------------------------------ v1 ------------------------------------------- -NGRAPH_RTTI_DEFINITION(op::v1::Minimum, "Minimum", 1, op::util::BinaryElementwiseArithmetic); +OPENVINO_RTTI_DEFINITION(op::v1::Minimum, "Minimum", 1, op::util::BinaryElementwiseArithmetic); op::v1::Minimum::Minimum(const Output& arg0, const Output& arg1, const AutoBroadcastSpec& auto_broadcast) : BinaryElementwiseArithmetic(arg0, arg1, auto_broadcast) { diff --git a/ngraph/core/src/op/mish.cpp b/ngraph/core/src/op/mish.cpp index 970b78d910502e..f09f23e81ea8f9 100644 --- a/ngraph/core/src/op/mish.cpp +++ b/ngraph/core/src/op/mish.cpp @@ -14,7 +14,7 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::v4::Mish, "Mish", 4); +OPENVINO_RTTI_DEFINITION(op::v4::Mish, "Mish", 4); op::v4::Mish::Mish(const Output& arg) : Op({arg}) { constructor_validate_and_infer_types(); diff --git a/ngraph/core/src/op/mod.cpp b/ngraph/core/src/op/mod.cpp index d1a1e54b96fc75..d64b8cd9ecaf2d 100644 --- a/ngraph/core/src/op/mod.cpp +++ b/ngraph/core/src/op/mod.cpp @@ -11,7 +11,7 @@ using namespace ngraph; // ------------------------------ v1 ------------------------------------------- -NGRAPH_RTTI_DEFINITION(op::v1::Mod, "Mod", 1, op::util::BinaryElementwiseArithmetic); +OPENVINO_RTTI_DEFINITION(op::v1::Mod, "Mod", 1, op::util::BinaryElementwiseArithmetic); op::v1::Mod::Mod(const Output& arg0, const Output& arg1, const AutoBroadcastSpec& auto_broadcast) : BinaryElementwiseArithmetic(arg0, arg1, auto_broadcast) { @@ -22,4 +22,4 @@ shared_ptr op::v1::Mod::clone_with_new_inputs(const OutputVector& new_args NGRAPH_OP_SCOPE(v1_Mod_clone_with_new_inputs); check_new_args_count(this, new_args); return make_shared(new_args.at(0), new_args.at(1), this->get_autob()); -} \ No newline at end of file +} diff --git a/ngraph/core/src/op/multiclass_nms.cpp b/ngraph/core/src/op/multiclass_nms.cpp index 20c6174bd29a1b..84078ad25e5648 100644 --- a/ngraph/core/src/op/multiclass_nms.cpp +++ b/ngraph/core/src/op/multiclass_nms.cpp @@ -18,7 +18,7 @@ using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::v8::MulticlassNms, "MulticlassNms", 8, op::util::NmsBase); +OPENVINO_RTTI_DEFINITION(op::v8::MulticlassNms, "MulticlassNms", 8, op::util::NmsBase); op::v8::MulticlassNms::MulticlassNms() : NmsBase(m_attrs.output_type, m_attrs.nms_top_k, m_attrs.keep_top_k) {} diff --git a/ngraph/core/src/op/multiply.cpp b/ngraph/core/src/op/multiply.cpp index 5af58e6205254e..7fc75453ce92da 100644 --- a/ngraph/core/src/op/multiply.cpp +++ b/ngraph/core/src/op/multiply.cpp @@ -50,7 +50,7 @@ bool evaluate_multiply(const HostTensorPtr& arg0, // ------------------------------------ v1 ------------------------------------- -NGRAPH_RTTI_DEFINITION(op::v1::Multiply, "Multiply", 1, util::BinaryElementwiseArithmetic); +OPENVINO_RTTI_DEFINITION(op::v1::Multiply, "Multiply", 1, util::BinaryElementwiseArithmetic); op::v1::Multiply::Multiply(const Output& arg0, const Output& arg1, const AutoBroadcastSpec& auto_broadcast) : BinaryElementwiseArithmetic(arg0, arg1, auto_broadcast) { diff --git a/ngraph/core/src/op/mvn.cpp b/ngraph/core/src/op/mvn.cpp index 0a6ae9a78d7c03..10cd0e74ab4bb8 100644 --- a/ngraph/core/src/op/mvn.cpp +++ b/ngraph/core/src/op/mvn.cpp @@ -13,7 +13,7 @@ using namespace ngraph; // ------------------------------ V0 ------------------------------ -NGRAPH_RTTI_DEFINITION(op::v0::MVN, "MVN", 0); +OPENVINO_RTTI_DEFINITION(op::v0::MVN, "MVN", 0); op::v0::MVN::MVN(const Output& data, bool across_channels, bool normalize_variance, double eps) : Op({data}), @@ -83,11 +83,11 @@ constexpr DiscreteTypeInfo AttributeAdapter::type_info; } // namespace ov -std::ostream& op::operator<<(std::ostream& s, const ngraph::op::MVNEpsMode& type) { +std::ostream& ov::op::operator<<(std::ostream& s, const ngraph::op::MVNEpsMode& type) { return s << as_string(type); } -NGRAPH_RTTI_DEFINITION(op::v6::MVN, "MVN", 6); +OPENVINO_RTTI_DEFINITION(op::v6::MVN, "MVN", 6); op::v6::MVN::MVN(const Output& data, const Output& reduction_axes, diff --git a/ngraph/core/src/op/negative.cpp b/ngraph/core/src/op/negative.cpp index 10454d7a3a172f..2f9360226b8436 100644 --- a/ngraph/core/src/op/negative.cpp +++ b/ngraph/core/src/op/negative.cpp @@ -12,7 +12,7 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::v0::Negative, "Negative", 0, util::UnaryElementwiseArithmetic); +OPENVINO_RTTI_DEFINITION(op::v0::Negative, "Negative", 0, util::UnaryElementwiseArithmetic); op::Negative::Negative(const Output& arg) : UnaryElementwiseArithmetic(arg) { constructor_validate_and_infer_types(); diff --git a/ngraph/core/src/op/non_max_suppression.cpp b/ngraph/core/src/op/non_max_suppression.cpp index 9cdf6615ce3fcf..143b89d60ef0b1 100644 --- a/ngraph/core/src/op/non_max_suppression.cpp +++ b/ngraph/core/src/op/non_max_suppression.cpp @@ -20,7 +20,7 @@ using namespace ngraph; // ------------------------------ V1 ------------------------------ -NGRAPH_RTTI_DEFINITION(op::v1::NonMaxSuppression, "NonMaxSuppression", 1); +OPENVINO_RTTI_DEFINITION(op::v1::NonMaxSuppression, "NonMaxSuppression", 1); op::v1::NonMaxSuppression::NonMaxSuppression(const Output& boxes, const Output& scores, @@ -41,9 +41,9 @@ op::v1::NonMaxSuppression::NonMaxSuppression(const Output& boxes, const bool sort_result_descending) : Op({boxes, scores, - op::Constant::create(element::i64, Shape{}, {0}), - op::Constant::create(element::f32, Shape{}, {.0f}), - op::Constant::create(element::f32, Shape{}, {.0f})}), + op::v0::Constant::create(element::i64, Shape{}, {0}), + op::v0::Constant::create(element::f32, Shape{}, {.0f}), + op::v0::Constant::create(element::f32, Shape{}, {.0f})}), m_box_encoding{box_encoding}, m_sort_result_descending{sort_result_descending} { constructor_validate_and_infer_types(); @@ -54,11 +54,12 @@ std::shared_ptr op::v1::NonMaxSuppression::clone_with_new_inputs(const Out check_new_args_count(this, new_args); NODE_VALIDATION_CHECK(this, new_args.size() >= 2 && new_args.size() <= 5, "Number of inputs must be 2, 3, 4 or 5"); - const auto& arg2 = new_args.size() > 2 ? new_args.at(2) : ngraph::op::Constant::create(element::i32, Shape{}, {0}); + const auto& arg2 = + new_args.size() > 2 ? new_args.at(2) : ngraph::op::v0::Constant::create(element::i32, Shape{}, {0}); const auto& arg3 = - new_args.size() > 3 ? new_args.at(3) : ngraph::op::Constant::create(element::f32, Shape{}, {.0f}); + new_args.size() > 3 ? new_args.at(3) : ngraph::op::v0::Constant::create(element::f32, Shape{}, {.0f}); const auto& arg4 = - new_args.size() > 4 ? new_args.at(4) : ngraph::op::Constant::create(element::f32, Shape{}, {.0f}); + new_args.size() > 4 ? new_args.at(4) : ngraph::op::v0::Constant::create(element::f32, Shape{}, {.0f}); return std::make_shared(new_args.at(0), new_args.at(1), @@ -109,7 +110,7 @@ void op::v1::NonMaxSuppression::validate_and_infer_types() { if (inputs().size() >= 3) { const auto max_boxes_ps = get_input_partial_shape(2); NODE_VALIDATION_CHECK(this, - max_boxes_ps.is_dynamic() || is_scalar(max_boxes_ps.to_shape()), + max_boxes_ps.is_dynamic() || ngraph::is_scalar(max_boxes_ps.to_shape()), "Expected a scalar for the 'max_output_boxes_per_class' input. Got: ", max_boxes_ps); } @@ -117,7 +118,7 @@ void op::v1::NonMaxSuppression::validate_and_infer_types() { if (inputs().size() >= 4) { const auto iou_threshold_ps = get_input_partial_shape(3); NODE_VALIDATION_CHECK(this, - iou_threshold_ps.is_dynamic() || is_scalar(iou_threshold_ps.to_shape()), + iou_threshold_ps.is_dynamic() || ngraph::is_scalar(iou_threshold_ps.to_shape()), "Expected a scalar for the 'iou_threshold' input. Got: ", iou_threshold_ps); } @@ -125,7 +126,7 @@ void op::v1::NonMaxSuppression::validate_and_infer_types() { if (inputs().size() >= 5) { const auto score_threshold_ps = get_input_partial_shape(4); NODE_VALIDATION_CHECK(this, - score_threshold_ps.is_dynamic() || is_scalar(score_threshold_ps.to_shape()), + score_threshold_ps.is_dynamic() || ngraph::is_scalar(score_threshold_ps.to_shape()), "Expected a scalar for the 'score_threshold' input. Got: ", score_threshold_ps); } @@ -189,13 +190,12 @@ constexpr DiscreteTypeInfo AttributeAdapter& boxes, const Output& scores, @@ -219,9 +219,9 @@ op::v3::NonMaxSuppression::NonMaxSuppression(const Output& boxes, const element::Type& output_type) : Op({boxes, scores, - op::Constant::create(element::i64, Shape{}, {0}), - op::Constant::create(element::f32, Shape{}, {.0f}), - op::Constant::create(element::f32, Shape{}, {.0f})}), + op::v0::Constant::create(element::i64, Shape{}, {0}), + op::v0::Constant::create(element::f32, Shape{}, {.0f}), + op::v0::Constant::create(element::f32, Shape{}, {.0f})}), m_box_encoding{box_encoding}, m_sort_result_descending{sort_result_descending}, m_output_type{output_type} { @@ -233,11 +233,12 @@ std::shared_ptr op::v3::NonMaxSuppression::clone_with_new_inputs(const Out check_new_args_count(this, new_args); NODE_VALIDATION_CHECK(this, new_args.size() >= 2 && new_args.size() <= 5, "Number of inputs must be 2, 3, 4 or 5"); - const auto& arg2 = new_args.size() > 2 ? new_args.at(2) : ngraph::op::Constant::create(element::i32, Shape{}, {0}); + const auto& arg2 = + new_args.size() > 2 ? new_args.at(2) : ngraph::op::v0::Constant::create(element::i32, Shape{}, {0}); const auto& arg3 = - new_args.size() > 3 ? new_args.at(3) : ngraph::op::Constant::create(element::f32, Shape{}, {.0f}); + new_args.size() > 3 ? new_args.at(3) : ngraph::op::v0::Constant::create(element::f32, Shape{}, {.0f}); const auto& arg4 = - new_args.size() > 4 ? new_args.at(4) : ngraph::op::Constant::create(element::f32, Shape{}, {.0f}); + new_args.size() > 4 ? new_args.at(4) : ngraph::op::v0::Constant::create(element::f32, Shape{}, {.0f}); return std::make_shared(new_args.at(0), new_args.at(1), @@ -282,7 +283,7 @@ void op::v3::NonMaxSuppression::validate() { if (inputs().size() >= 3) { const auto max_boxes_ps = get_input_partial_shape(2); NODE_VALIDATION_CHECK(this, - max_boxes_ps.is_dynamic() || is_scalar(max_boxes_ps.to_shape()), + max_boxes_ps.is_dynamic() || ngraph::is_scalar(max_boxes_ps.to_shape()), "Expected a scalar for the 'max_output_boxes_per_class' input. Got: ", max_boxes_ps); } @@ -290,7 +291,7 @@ void op::v3::NonMaxSuppression::validate() { if (inputs().size() >= 4) { const auto iou_threshold_ps = get_input_partial_shape(3); NODE_VALIDATION_CHECK(this, - iou_threshold_ps.is_dynamic() || is_scalar(iou_threshold_ps.to_shape()), + iou_threshold_ps.is_dynamic() || ngraph::is_scalar(iou_threshold_ps.to_shape()), "Expected a scalar for the 'iou_threshold' input. Got: ", iou_threshold_ps); } @@ -298,7 +299,7 @@ void op::v3::NonMaxSuppression::validate() { if (inputs().size() >= 5) { const auto score_threshold_ps = get_input_partial_shape(4); NODE_VALIDATION_CHECK(this, - score_threshold_ps.is_dynamic() || is_scalar(score_threshold_ps.to_shape()), + score_threshold_ps.is_dynamic() || ngraph::is_scalar(score_threshold_ps.to_shape()), "Expected a scalar for the 'score_threshold' input. Got: ", score_threshold_ps); } @@ -377,13 +378,13 @@ constexpr DiscreteTypeInfo AttributeAdapter& boxes, const Output& scores, @@ -411,9 +412,9 @@ op::v4::NonMaxSuppression::NonMaxSuppression(const Output& boxes, const element::Type& output_type) : op::v3::NonMaxSuppression(boxes, scores, - op::Constant::create(element::i64, Shape{}, {0}), - op::Constant::create(element::f32, Shape{}, {.0f}), - op::Constant::create(element::f32, Shape{}, {.0f}), + op::v0::Constant::create(element::i64, Shape{}, {0}), + op::v0::Constant::create(element::f32, Shape{}, {.0f}), + op::v0::Constant::create(element::f32, Shape{}, {.0f}), box_encoding, sort_result_descending, output_type) { @@ -425,11 +426,12 @@ std::shared_ptr op::v4::NonMaxSuppression::clone_with_new_inputs(const Out check_new_args_count(this, new_args); NODE_VALIDATION_CHECK(this, new_args.size() >= 2 && new_args.size() <= 5, "Number of inputs must be 2, 3, 4 or 5"); - const auto& arg2 = new_args.size() > 2 ? new_args.at(2) : ngraph::op::Constant::create(element::i32, Shape{}, {0}); + const auto& arg2 = + new_args.size() > 2 ? new_args.at(2) : ngraph::op::v0::Constant::create(element::i32, Shape{}, {0}); const auto& arg3 = - new_args.size() > 3 ? new_args.at(3) : ngraph::op::Constant::create(element::f32, Shape{}, {.0f}); + new_args.size() > 3 ? new_args.at(3) : ngraph::op::v0::Constant::create(element::f32, Shape{}, {.0f}); const auto& arg4 = - new_args.size() > 4 ? new_args.at(4) : ngraph::op::Constant::create(element::f32, Shape{}, {.0f}); + new_args.size() > 4 ? new_args.at(4) : ngraph::op::v0::Constant::create(element::f32, Shape{}, {.0f}); return std::make_shared(new_args.at(0), new_args.at(1), @@ -469,7 +471,7 @@ void op::v4::NonMaxSuppression::validate_and_infer_types() { // ------------------------------ V5 ------------------------------ -NGRAPH_RTTI_DEFINITION(op::v5::NonMaxSuppression, "NonMaxSuppression", 5); +OPENVINO_RTTI_DEFINITION(op::v5::NonMaxSuppression, "NonMaxSuppression", 5); op::v5::NonMaxSuppression::NonMaxSuppression(const Output& boxes, const Output& scores, @@ -776,7 +778,7 @@ bool op::v5::NonMaxSuppression::is_soft_nms_sigma_constant_and_default() const { if (inputs().size() < 6 || !ngraph::op::is_constant(soft_nms_sigma_node)) { return false; } - const auto soft_nms_sigma_input = ov::as_type_ptr(soft_nms_sigma_node); + const auto soft_nms_sigma_input = ov::as_type_ptr(soft_nms_sigma_node); return soft_nms_sigma_input->cast_vector().at(0) == 0.0f; } @@ -817,7 +819,7 @@ void op::v5::NonMaxSuppression::validate_and_infer_types() { set_output_type(2, m_output_type, Shape{1}); } -std::ostream& ngraph::operator<<(std::ostream& s, const op::v5::NonMaxSuppression::BoxEncodingType& type) { +std::ostream& ov::operator<<(std::ostream& s, const op::v5::NonMaxSuppression::BoxEncodingType& type) { return s << as_string(type); } @@ -833,5 +835,4 @@ EnumNames::get() { } constexpr DiscreteTypeInfo AttributeAdapter::type_info; - } // namespace ov diff --git a/ngraph/core/src/op/non_zero.cpp b/ngraph/core/src/op/non_zero.cpp index dce70de461df35..e16d134a556d64 100644 --- a/ngraph/core/src/op/non_zero.cpp +++ b/ngraph/core/src/op/non_zero.cpp @@ -16,7 +16,7 @@ using namespace ngraph; using namespace std; -NGRAPH_RTTI_DEFINITION(op::v3::NonZero, "NonZero", 3); +OPENVINO_RTTI_DEFINITION(op::v3::NonZero, "NonZero", 3); op::v3::NonZero::NonZero(const Output& arg) : Op({arg}) { constructor_validate_and_infer_types(); diff --git a/ngraph/core/src/op/normalize_l2.cpp b/ngraph/core/src/op/normalize_l2.cpp index ed36f34fdbc90b..3ea426b27ca3c9 100644 --- a/ngraph/core/src/op/normalize_l2.cpp +++ b/ngraph/core/src/op/normalize_l2.cpp @@ -15,7 +15,7 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::v0::NormalizeL2, "NormalizeL2", 0); +OPENVINO_RTTI_DEFINITION(op::v0::NormalizeL2, "NormalizeL2", 0); op::v0::NormalizeL2::NormalizeL2(const Output& data, const Output& axes, float eps, EpsMode eps_mode) : Op({data, axes}), diff --git a/ngraph/core/src/op/not_equal.cpp b/ngraph/core/src/op/not_equal.cpp index 56f969b214cdeb..a97751381a68f4 100644 --- a/ngraph/core/src/op/not_equal.cpp +++ b/ngraph/core/src/op/not_equal.cpp @@ -50,7 +50,7 @@ bool evaluate_not_equal(const HostTensorPtr& arg0, // ----------------------------------- v1 -------------------------------------- -NGRAPH_RTTI_DEFINITION(op::v1::NotEqual, "NotEqual", 1, op::util::BinaryElementwiseComparison); +OPENVINO_RTTI_DEFINITION(op::v1::NotEqual, "NotEqual", 1, op::util::BinaryElementwiseComparison); op::v1::NotEqual::NotEqual(const Output& arg0, const Output& arg1, const AutoBroadcastSpec& auto_broadcast) : BinaryElementwiseComparison(arg0, arg1, auto_broadcast) { diff --git a/ngraph/core/src/op/one_hot.cpp b/ngraph/core/src/op/one_hot.cpp index 2ca430fca06db5..7e3443ec0fe4d7 100644 --- a/ngraph/core/src/op/one_hot.cpp +++ b/ngraph/core/src/op/one_hot.cpp @@ -13,7 +13,7 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::v1::OneHot, "OneHot", 1); +OPENVINO_RTTI_DEFINITION(op::v1::OneHot, "OneHot", 1); op::v1::OneHot::OneHot(const Output& indices, const Output& depth, @@ -50,15 +50,15 @@ void op::v1::OneHot::validate_and_infer_types() { const auto& off_value_shape = get_input_partial_shape(3); NODE_VALIDATION_CHECK(this, - depth_shape.is_dynamic() || is_scalar(depth_shape.to_shape()), + depth_shape.is_dynamic() || ngraph::is_scalar(depth_shape.to_shape()), "depth input must be scalar."); NODE_VALIDATION_CHECK(this, - on_value_shape.is_dynamic() || is_scalar(on_value_shape.to_shape()), + on_value_shape.is_dynamic() || ngraph::is_scalar(on_value_shape.to_shape()), "on_value input must be scalar."); NODE_VALIDATION_CHECK(this, - off_value_shape.is_dynamic() || is_scalar(off_value_shape.to_shape()), + off_value_shape.is_dynamic() || ngraph::is_scalar(off_value_shape.to_shape()), "off_value input must be scalar."); PartialShape result_shape{PartialShape::dynamic()}; @@ -77,7 +77,7 @@ void op::v1::OneHot::validate_and_infer_types() { ")."); NODE_VALIDATION_CHECK(this, - is_scalar(depth->get_shape()), + ngraph::is_scalar(depth->get_shape()), "A scalar input should be provided as 'depth' to OneHot", " (got ", depth->get_shape(), diff --git a/ngraph/core/src/op/pad.cpp b/ngraph/core/src/op/pad.cpp index d9353bd0324748..4e2126524c3c2b 100644 --- a/ngraph/core/src/op/pad.cpp +++ b/ngraph/core/src/op/pad.cpp @@ -17,7 +17,7 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::v1::Pad, "Pad", 1); +OPENVINO_RTTI_DEFINITION(op::v1::Pad, "Pad", 1); op::v1::Pad::Pad(const Output& arg, const Output& pads_begin, @@ -33,7 +33,7 @@ op::v1::Pad::Pad(const Output& arg, const Output& pads_begin, const Output& pads_end, PadMode pad_mode) - : Op({arg, pads_begin, pads_end, op::Constant::create(arg.get_element_type(), Shape{}, {0})}), + : Op({arg, pads_begin, pads_end, op::v0::Constant::create(arg.get_element_type(), Shape{}, {0})}), m_pad_mode{pad_mode} { constructor_validate_and_infer_types(); } diff --git a/ngraph/core/src/op/power.cpp b/ngraph/core/src/op/power.cpp index edfb815c10464c..ff3bf65aebd330 100644 --- a/ngraph/core/src/op/power.cpp +++ b/ngraph/core/src/op/power.cpp @@ -53,7 +53,7 @@ bool evaluate_power(const HostTensorPtr& arg0, // ------------------------------ v1 ------------------------------------------- -NGRAPH_RTTI_DEFINITION(op::v1::Power, "Power", 1, op::util::BinaryElementwiseArithmetic); +OPENVINO_RTTI_DEFINITION(op::v1::Power, "Power", 1, op::util::BinaryElementwiseArithmetic); op::v1::Power::Power(const Output& arg0, const Output& arg1, const AutoBroadcastSpec& auto_broadcast) : BinaryElementwiseArithmetic(arg0, arg1, auto_broadcast) { diff --git a/ngraph/core/src/op/prelu.cpp b/ngraph/core/src/op/prelu.cpp index afd178ca4be5a9..20a69dd49549dd 100644 --- a/ngraph/core/src/op/prelu.cpp +++ b/ngraph/core/src/op/prelu.cpp @@ -10,13 +10,12 @@ #include "itt.hpp" using namespace std; -using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::PRelu, "PRelu", 0); +OPENVINO_RTTI_DEFINITION(ov::op::v0::PRelu, "PRelu", 0); -op::PRelu::PRelu() : Op() {} +ov::op::v0::PRelu::PRelu() : Op() {} -op::PRelu::PRelu(const Output& data, const Output& slope) : Op({data, slope}) { +ov::op::v0::PRelu::PRelu(const Output& data, const Output& slope) : Op({data, slope}) { constructor_validate_and_infer_types(); } @@ -29,26 +28,28 @@ void ngraph::op::v0::PRelu::validate_and_infer_types() { set_output_type(0, get_input_element_type(0), get_input_partial_shape(0)); } -shared_ptr op::PRelu::clone_with_new_inputs(const OutputVector& new_args) const { +shared_ptr ov::op::v0::PRelu::clone_with_new_inputs(const OutputVector& new_args) const { NGRAPH_OP_SCOPE(v0_PRelu_clone_with_new_inputs); if (new_args.size() != 2) { - throw ngraph_error("Incorrect number of new arguments"); + throw ov::Exception("Incorrect number of new arguments"); } return make_shared(new_args.at(0), new_args.at(1)); } namespace prelu { -template -bool evaluate(const HostTensorPtr& arg, const HostTensorPtr& slope, const HostTensorPtr& out) { - runtime::reference::prelu(arg->get_data_ptr(), - slope->get_data_ptr(), - out->get_data_ptr(), - arg->get_shape(), - slope->get_shape()); +template +bool evaluate(const ngraph::HostTensorPtr& arg, const ngraph::HostTensorPtr& slope, const ngraph::HostTensorPtr& out) { + ngraph::runtime::reference::prelu(arg->get_data_ptr(), + slope->get_data_ptr(), + out->get_data_ptr(), + arg->get_shape(), + slope->get_shape()); return true; } -bool evaluate_prelu(const HostTensorPtr& arg, const HostTensorPtr& slope, const HostTensorPtr& out) { +bool evaluate_prelu(const ngraph::HostTensorPtr& arg, + const ngraph::HostTensorPtr& slope, + const ngraph::HostTensorPtr& out) { bool rc = true; switch (arg->get_element_type()) { NGRAPH_TYPE_CASE(evaluate_prelu, i8, arg, slope, out); @@ -63,13 +64,13 @@ bool evaluate_prelu(const HostTensorPtr& arg, const HostTensorPtr& slope, const } } // namespace prelu -bool op::PRelu::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { +bool ov::op::v0::PRelu::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { NGRAPH_OP_SCOPE(v0_PRelu_evaluate); - NGRAPH_CHECK(validate_host_tensor_vector(outputs, 1) && validate_host_tensor_vector(inputs, 2)); + NGRAPH_CHECK(ngraph::validate_host_tensor_vector(outputs, 1) && ngraph::validate_host_tensor_vector(inputs, 2)); return prelu::evaluate_prelu(inputs[0], inputs[1], outputs[0]); } -bool op::PRelu::has_evaluate() const { +bool ov::op::v0::PRelu::has_evaluate() const { NGRAPH_OP_SCOPE(v0_PRelu_has_evaluate); switch (get_input_element_type(0)) { case ngraph::element::i8: diff --git a/ngraph/core/src/op/prior_box.cpp b/ngraph/core/src/op/prior_box.cpp index d622176de1aeeb..a782cf08bdd7ac 100644 --- a/ngraph/core/src/op/prior_box.cpp +++ b/ngraph/core/src/op/prior_box.cpp @@ -14,9 +14,11 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::v0::PriorBox, "PriorBox", 0); +OPENVINO_RTTI_DEFINITION(op::v0::PriorBox, "PriorBox", 0); -op::PriorBox::PriorBox(const Output& layer_shape, const Output& image_shape, const PriorBoxAttrs& attrs) +op::PriorBox::PriorBox(const Output& layer_shape, + const Output& image_shape, + const PriorBox::Attributes& attrs) : Op({layer_shape, image_shape}), m_attrs(attrs) { constructor_validate_and_infer_types(); @@ -70,7 +72,7 @@ shared_ptr op::PriorBox::clone_with_new_inputs(const OutputVector& new_arg return make_shared(new_args.at(0), new_args.at(1), m_attrs); } -int64_t op::PriorBox::number_of_priors(const PriorBoxAttrs& attrs) { +int64_t op::PriorBox::number_of_priors(const PriorBox::Attributes& attrs) { // Starting with 0 number of prior and then various conditions on attributes will contribute // real number of prior boxes as PriorBox is a fat thing with several modes of // operation that will be checked in order in the next statements. @@ -129,7 +131,10 @@ bool op::PriorBox::visit_attributes(AttributeVisitor& visitor) { namespace prior_box { template -bool evaluate(const HostTensorPtr& arg0, const HostTensorPtr& arg1, const HostTensorPtr& out, op::PriorBoxAttrs attrs) { +bool evaluate(const HostTensorPtr& arg0, + const HostTensorPtr& arg1, + const HostTensorPtr& out, + op::PriorBox::Attributes attrs) { runtime::reference::prior_box(arg0->get_data_ptr(), arg1->get_data_ptr(), out->get_data_ptr(), @@ -141,7 +146,7 @@ bool evaluate(const HostTensorPtr& arg0, const HostTensorPtr& arg1, const HostTe bool evaluate_prior_box(const HostTensorPtr& arg0, const HostTensorPtr& arg1, const HostTensorPtr& out, - const op::PriorBoxAttrs& attrs) { + const op::PriorBox::Attributes& attrs) { bool rc = true; switch (arg0->get_element_type()) { NGRAPH_TYPE_CASE(evaluate_prior_box, i8, arg0, arg1, out, attrs); diff --git a/ngraph/core/src/op/prior_box_clustered.cpp b/ngraph/core/src/op/prior_box_clustered.cpp index cf1ce9fa23cb2c..6cb8cc8e02c2c2 100644 --- a/ngraph/core/src/op/prior_box_clustered.cpp +++ b/ngraph/core/src/op/prior_box_clustered.cpp @@ -14,17 +14,17 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::PriorBoxClustered, "PriorBoxClustered", 0); +OPENVINO_RTTI_DEFINITION(ov::op::v0::PriorBoxClustered, "PriorBoxClustered", 0); -op::PriorBoxClustered::PriorBoxClustered(const Output& layer_shape, - const Output& image_shape, - const PriorBoxClusteredAttrs& attrs) +ov::op::v0::PriorBoxClustered::PriorBoxClustered(const Output& layer_shape, + const Output& image_shape, + const Attributes& attrs) : Op({layer_shape, image_shape}), m_attrs(attrs) { constructor_validate_and_infer_types(); } -void op::PriorBoxClustered::validate_and_infer_types() { +void ov::op::v0::PriorBoxClustered::validate_and_infer_types() { NGRAPH_OP_SCOPE(v0_PriorBoxClustered_validate_and_infer_types); // shape node should have integer data type. For now we only allow i64 auto layer_shape_et = get_input_element_type(0); @@ -72,13 +72,13 @@ void op::PriorBoxClustered::validate_and_infer_types() { } } -shared_ptr op::PriorBoxClustered::clone_with_new_inputs(const OutputVector& new_args) const { +shared_ptr ov::op::v0::PriorBoxClustered::clone_with_new_inputs(const OutputVector& new_args) const { NGRAPH_OP_SCOPE(v0_PriorBoxClustered_clone_with_new_inputs); check_new_args_count(this, new_args); return make_shared(new_args.at(0), new_args.at(1), m_attrs); } -bool op::PriorBoxClustered::visit_attributes(AttributeVisitor& visitor) { +bool ov::op::v0::PriorBoxClustered::visit_attributes(AttributeVisitor& visitor) { NGRAPH_OP_SCOPE(v0_PriorBoxClustered_visit_attributes); float step = 0; float step_w_tmp = m_attrs.step_widths; @@ -110,7 +110,7 @@ template bool evaluate(const HostTensorPtr& arg0, const HostTensorPtr& arg1, const HostTensorPtr& out, - op::PriorBoxClusteredAttrs attrs) { + ov::op::v0::PriorBoxClustered::Attributes attrs) { runtime::reference::prior_box_clustered(arg0->get_data_ptr(), arg1->get_data_ptr(), out->get_data_ptr(), @@ -122,7 +122,7 @@ bool evaluate(const HostTensorPtr& arg0, bool evaluate_prior_box(const HostTensorPtr& arg0, const HostTensorPtr& arg1, const HostTensorPtr& out, - const op::PriorBoxClusteredAttrs& attrs) { + const ov::op::v0::PriorBoxClustered::Attributes& attrs) { bool rc = true; switch (arg0->get_element_type()) { NGRAPH_TYPE_CASE(evaluate_prior_box, i8, arg0, arg1, out, attrs); diff --git a/ngraph/core/src/op/proposal.cpp b/ngraph/core/src/op/proposal.cpp index 54813c5418fc3a..fe4c95257c5ba8 100644 --- a/ngraph/core/src/op/proposal.cpp +++ b/ngraph/core/src/op/proposal.cpp @@ -10,12 +10,12 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::v0::Proposal, "Proposal", 0); +OPENVINO_RTTI_DEFINITION(op::v0::Proposal, "Proposal", 0); op::v0::Proposal::Proposal(const Output& class_probs, const Output& bbox_deltas, const Output& image_shape, - const ProposalAttrs& attrs) + const Attributes& attrs) : Op({class_probs, bbox_deltas, image_shape}), m_attrs(attrs) { constructor_validate_and_infer_types(); @@ -128,12 +128,12 @@ bool op::v0::Proposal::visit_attributes(AttributeVisitor& visitor) { return true; } -NGRAPH_RTTI_DEFINITION(op::v4::Proposal, "Proposal", 4); +OPENVINO_RTTI_DEFINITION(op::v4::Proposal, "Proposal", 4); op::v4::Proposal::Proposal(const Output& class_probs, const Output& class_bbox_deltas, const Output& image_shape, - const op::ProposalAttrs& attrs) + const op::v0::Proposal::Attributes& attrs) : v0::Proposal(class_probs, class_bbox_deltas, image_shape, attrs) { constructor_validate_and_infer_types(); } diff --git a/ngraph/core/src/op/psroi_pooling.cpp b/ngraph/core/src/op/psroi_pooling.cpp index 9b6fdf41c94edf..becc9425adac65 100644 --- a/ngraph/core/src/op/psroi_pooling.cpp +++ b/ngraph/core/src/op/psroi_pooling.cpp @@ -10,16 +10,16 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::PSROIPooling, "PSROIPooling", 0); +OPENVINO_RTTI_DEFINITION(ov::op::v0::PSROIPooling, "PSROIPooling", 0); -op::PSROIPooling::PSROIPooling(const Output& input, - const Output& coords, - const size_t output_dim, - const size_t group_size, - const float spatial_scale, - int spatial_bins_x, - int spatial_bins_y, - const string& mode) +ov::op::v0::PSROIPooling::PSROIPooling(const Output& input, + const Output& coords, + const size_t output_dim, + const size_t group_size, + const float spatial_scale, + int spatial_bins_x, + int spatial_bins_y, + const string& mode) : Op({input, coords}), m_output_dim(output_dim), m_group_size(group_size), @@ -41,7 +41,7 @@ bool ngraph::op::v0::PSROIPooling::visit_attributes(AttributeVisitor& visitor) { return true; } -void op::PSROIPooling::validate_and_infer_types() { +void ov::op::v0::PSROIPooling::validate_and_infer_types() { NGRAPH_OP_SCOPE(v0_PSROIPooling_validate_and_infer_types); auto feat_maps_et = get_input_element_type(0); auto coords_et = get_input_element_type(1); @@ -104,7 +104,7 @@ void op::PSROIPooling::validate_and_infer_types() { } } -shared_ptr op::PSROIPooling::clone_with_new_inputs(const OutputVector& new_args) const { +shared_ptr ov::op::v0::PSROIPooling::clone_with_new_inputs(const OutputVector& new_args) const { NGRAPH_OP_SCOPE(v0_PSROIPooling_clone_with_new_inputs); check_new_args_count(this, new_args); return make_shared(new_args.at(0), diff --git a/ngraph/core/src/op/min.cpp b/ngraph/core/src/op/reduce_min.cpp similarity index 97% rename from ngraph/core/src/op/min.cpp rename to ngraph/core/src/op/reduce_min.cpp index 06c0f3d4839d99..c3210c1a97f779 100644 --- a/ngraph/core/src/op/min.cpp +++ b/ngraph/core/src/op/reduce_min.cpp @@ -2,11 +2,10 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph/op/min.hpp" - #include #include "itt.hpp" +#include "ngraph/op/min.hpp" #include "ngraph/op/util/evaluate_helpers.hpp" #include "ngraph/runtime/host_tensor.hpp" #include "ngraph/runtime/reference/min.hpp" @@ -40,7 +39,7 @@ bool evaluate_min(const HostTensorPtr& arg, const HostTensorPtr& out, const Axis } } // namespace minop -NGRAPH_RTTI_DEFINITION(op::v1::ReduceMin, "ReduceMin", 1, util::ArithmeticReductionKeepDims); +OPENVINO_RTTI_DEFINITION(op::v1::ReduceMin, "ReduceMin", 1, util::ArithmeticReductionKeepDims); op::v1::ReduceMin::ReduceMin(const Output& arg, const Output& reduction_axes, bool keep_dims) : ArithmeticReductionKeepDims(arg, reduction_axes, keep_dims) {