diff --git a/ngraph/core/include/ngraph/op/random_uniform.hpp b/ngraph/core/include/ngraph/op/random_uniform.hpp index f20ddecccab431..bf0af07c2d14e1 100644 --- a/ngraph/core/include/ngraph/op/random_uniform.hpp +++ b/ngraph/core/include/ngraph/op/random_uniform.hpp @@ -6,80 +6,12 @@ #include "ngraph/node.hpp" #include "ngraph/op/op.hpp" +#include "openvino/op/random_uniform.hpp" namespace ngraph { namespace op { namespace v8 { -/// \brief Tensor RandomUniform operation. -class NGRAPH_API RandomUniform : public Op { -public: - NGRAPH_RTTI_DECLARATION; - - RandomUniform() = default; - - /// - /// \brief Constructs a RandomUniform operation. - /// - /// \param out_shape Node producing the tensor with output shape. - /// \param min_val Node producing the tensor with minimum value. - /// \param max_val Node producing the tensor with maximum value. - /// \param out_type Output type of the tensor. - /// \param global_seed Global seed value. - /// \param op_seed Operational seed value. - RandomUniform(const Output& out_shape, - const Output& min_val, - const Output& max_val, - const ngraph::element::Type& out_type, - uint64_t global_seed = 0, - uint64_t op_seed = 0); - - void validate_and_infer_types() override; - - bool visit_attributes(AttributeVisitor& visitor) override; - - std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - /// \return Turns off constant folding for RandomUniform operation. - bool constant_fold(OutputVector& output_values, const OutputVector& inputs_values) override { - return false; - } - - /// \return The output tensor type. - const ngraph::element::Type& get_out_type() const { - return m_output_type; - } - void set_out_type(const ngraph::element::Type& output_type) { - m_output_type = output_type; - } - - /// \return The global seed value. - uint64_t get_global_seed() const { - return m_global_seed; - } - void set_global_seed(uint64_t seed) { - m_global_seed = seed; - } - - /// \return The operational seed value. - uint64_t get_op_seed() const { - return m_op_seed; - } - void set_op_seed(uint64_t seed2) { - m_op_seed = seed2; - } - - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - - bool has_evaluate() const override; - -protected: - ngraph::element::Type m_output_type; - uint64_t m_global_seed; - uint64_t m_op_seed; - - mutable std::mutex m_state_mutex; - mutable std::pair m_state; -}; +using ov::op::v8::RandomUniform; } // namespace v8 } // namespace op } // namespace ngraph diff --git a/ngraph/core/include/ngraph/op/range.hpp b/ngraph/core/include/ngraph/op/range.hpp index 294804baa21686..72fc1984fc8472 100644 --- a/ngraph/core/include/ngraph/op/range.hpp +++ b/ngraph/core/include/ngraph/op/range.hpp @@ -6,70 +6,15 @@ #include "ngraph/node.hpp" #include "ngraph/op/op.hpp" +#include "openvino/op/range.hpp" namespace ngraph { namespace op { namespace v4 { -/// \brief Range operation, analogous to `arange()` in Numpy. -class NGRAPH_API Range : public Op { -public: - NGRAPH_RTTI_DECLARATION; - /// \brief Constructs an unitialized range operation. - Range() = default; - - /// \brief Constructs a range operation. - /// - /// \param start The tensor producing the start value. Must be a scalar of numeric - /// element type. - /// \param stop The tensor producing the stop value. Must be a scalar of numeric - /// element type. - /// \param step The tensor producing the step value. Must be a scalar of numeric - /// element type. - /// \param output_type The type of the output. - Range(const Output& start, const Output& stop, const Output& step, element::Type output_type); - - bool visit_attributes(AttributeVisitor& visitor) override; - void validate_and_infer_types() override; - - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; - void set_output_type(element::Type output_type) { - m_output_type = output_type; - } - // Overload collision with method on Node - using Node::set_output_type; - -private: - element::Type m_output_type; -}; +using ov::op::v4::Range; } // namespace v4 namespace v0 { -/// \brief Range operation, analogous to `range()` in Python. -class NGRAPH_API Range : public Op { -public: - NGRAPH_RTTI_DECLARATION; - - /// \brief Constructs an unitialized range operation. - Range() = default; - - /// \brief Constructs a range operation. - /// - /// \param start The tensor producing the start value. Must be a scalar of integer - /// element type, and same element type as `stop` and `step`. - /// \param stop The tensor producing the stop value. Must be a scalar of integer - /// element type, and same element type as `start` and `step`. - /// \param step The tensor producing the step value. Must be a scalar of integer - /// element type, and same element type as `start` and `stop`. - Range(const Output& start, const Output& stop, const Output& step); - - bool visit_attributes(AttributeVisitor& visitor) override; - void validate_and_infer_types() override; - - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; -}; +using ov::op::v0::Range; } // namespace v0 using v0::Range; } // namespace op diff --git a/ngraph/core/include/ngraph/op/reduce_l1.hpp b/ngraph/core/include/ngraph/op/reduce_l1.hpp index 991eaa19abf412..aa0dd2001c74a9 100644 --- a/ngraph/core/include/ngraph/op/reduce_l1.hpp +++ b/ngraph/core/include/ngraph/op/reduce_l1.hpp @@ -5,36 +5,12 @@ #pragma once #include "ngraph/op/util/arithmetic_reductions_keep_dims.hpp" +#include "openvino/op/reduce_l1.hpp" namespace ngraph { namespace op { namespace v4 { -/// \brief Reduction operation using L1 norm: L1(x) = sum(abs(x)) if all dimensions are -/// specified for the normalisation. -/// -/// Reduces the tensor, eliminating the specified reduction axes by taking the L1-norm. -class NGRAPH_API ReduceL1 : public util::ArithmeticReductionKeepDims { -public: - NGRAPH_RTTI_DECLARATION; - /// \brief Constructs a reducet L1-norm operation. - ReduceL1() = default; - /// \brief Constructs a reduce L1-norm operation. - /// - /// \param arg The tensor to be reduced. - /// \param reduction_axes The axis positions (0-based) to be eliminated. - /// \param keep_dims If set to true it holds axes that are used for reduction. - ReduceL1(const Output& arg, const Output& reduction_axes, bool keep_dims = false); - - /// \return The default value for Reduce. - NGRAPH_SUPPRESS_DEPRECATED_START - virtual std::shared_ptr get_default_value() const override; - NGRAPH_SUPPRESS_DEPRECATED_END - - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; -}; +using ov::op::v4::ReduceL1; } // namespace v4 } // namespace op } // namespace ngraph diff --git a/ngraph/core/include/ngraph/op/reduce_l2.hpp b/ngraph/core/include/ngraph/op/reduce_l2.hpp index 2629a365396127..7f9aef888ca22d 100644 --- a/ngraph/core/include/ngraph/op/reduce_l2.hpp +++ b/ngraph/core/include/ngraph/op/reduce_l2.hpp @@ -5,35 +5,12 @@ #pragma once #include "ngraph/op/util/arithmetic_reductions_keep_dims.hpp" +#include "openvino/op/reduce_l2.hpp" namespace ngraph { namespace op { namespace v4 { -/// \brief Reduction operation using L2 norm: -/// -/// Reduces the tensor, eliminating the specified reduction axes by taking the L2-norm. -class NGRAPH_API ReduceL2 : public util::ArithmeticReductionKeepDims { -public: - NGRAPH_RTTI_DECLARATION; - /// \brief Constructs a reducet L2-norm operation. - ReduceL2() = default; - /// \brief Constructs a reduce L2-norm operation. - /// - /// \param arg The tensor to be reduced. - /// \param reduction_axes The axis positions (0-based) to be eliminated. - /// \param keep_dims If set to true it holds axes that are used for reduction. - ReduceL2(const Output& arg, const Output& reduction_axes, bool keep_dims = false); - - /// \return The default value for Reduce. - NGRAPH_SUPPRESS_DEPRECATED_START - virtual std::shared_ptr get_default_value() const override; - NGRAPH_SUPPRESS_DEPRECATED_END - - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; -}; +using ov::op::v4::ReduceL2; } // namespace v4 } // namespace op } // namespace ngraph diff --git a/ngraph/core/include/ngraph/op/reduce_logical_and.hpp b/ngraph/core/include/ngraph/op/reduce_logical_and.hpp index 2d2ffcc957596c..ced9f28b8bc1c2 100644 --- a/ngraph/core/include/ngraph/op/reduce_logical_and.hpp +++ b/ngraph/core/include/ngraph/op/reduce_logical_and.hpp @@ -5,31 +5,12 @@ #pragma once #include "ngraph/op/util/logical_reduction_keep_dims.hpp" +#include "openvino/op/reduce_logical_and.hpp" namespace ngraph { namespace op { namespace v1 { -/// \brief Performs a reduction using "logical and" -/// -/// The reduction is performed over slices of the first input. The slices shape depends -/// on the values passed to the second input - the axes. -class NGRAPH_API ReduceLogicalAnd : public util::LogicalReductionKeepDims { -public: - NGRAPH_RTTI_DECLARATION; - ReduceLogicalAnd() = default; - /// \brief Constructs a ReduceLogicalAnd node. - /// - /// \param data - The input tensor with data to be reduced - /// \param reduction_axes - The input tensor with information about axes over which - /// the first tensor should be sliced prior to the reduction operation - /// \param keep_dims - Indicates if the axes used for reduction should be held/kept - ReduceLogicalAnd(const Output& data, const Output& reduction_axes, const bool keep_dims = false); - - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; -}; +using ov::op::v1::ReduceLogicalAnd; } // namespace v1 } // namespace op } // namespace ngraph diff --git a/ngraph/core/include/ngraph/op/reduce_logical_or.hpp b/ngraph/core/include/ngraph/op/reduce_logical_or.hpp index 43ec7a04aa89e4..26111f556b543b 100644 --- a/ngraph/core/include/ngraph/op/reduce_logical_or.hpp +++ b/ngraph/core/include/ngraph/op/reduce_logical_or.hpp @@ -5,31 +5,12 @@ #pragma once #include "ngraph/op/util/logical_reduction_keep_dims.hpp" +#include "openvino/op/reduce_logical_or.hpp" namespace ngraph { namespace op { namespace v1 { -/// \brief Performs a reduction using "logical or" -/// -/// The reduction is performed over slices of the first input. The slices shape depends -/// on the values passed to the second input - the axes. -class NGRAPH_API ReduceLogicalOr : public util::LogicalReductionKeepDims { -public: - NGRAPH_RTTI_DECLARATION; - ReduceLogicalOr() = default; - /// \brief Constructs a ReduceLogicalOr node. - /// - /// \param data - The input tensor with data to be reduced - /// \param reduction_axes - The input tensor with information about axes over which - /// the first tensor should be sliced prior to the reduction operation - /// \param keep_dims - Indicates if the axes used for reduction should be held/kept - ReduceLogicalOr(const Output& data, const Output& reduction_axes, const bool keep_dims = false); - - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; -}; +using ov::op::v1::ReduceLogicalOr; } // namespace v1 } // namespace op } // namespace ngraph diff --git a/ngraph/core/include/ngraph/op/reduce_mean.hpp b/ngraph/core/include/ngraph/op/reduce_mean.hpp index 488f153a2aaedb..836c9ead84f1e5 100644 --- a/ngraph/core/include/ngraph/op/reduce_mean.hpp +++ b/ngraph/core/include/ngraph/op/reduce_mean.hpp @@ -6,25 +6,12 @@ #include "ngraph/axis_set.hpp" #include "ngraph/op/util/arithmetic_reductions_keep_dims.hpp" +#include "openvino/op/reduce_mean.hpp" namespace ngraph { namespace op { namespace v1 { -class NGRAPH_API ReduceMean : public util::ArithmeticReductionKeepDims { -public: - NGRAPH_RTTI_DECLARATION; - ReduceMean() = default; - - /// \param arg The tensor to be summed. - /// \param reduction_axes The axis positions (0-based) to be eliminated. - /// \param keep_dims If set to 1 it holds axes that are used for reduction. - ReduceMean(const Output& arg, const Output& reduction_axes, bool keep_dims = false); - - std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; -}; +using ov::op::v1::ReduceMean; } // namespace v1 } // namespace op } // namespace ngraph diff --git a/ngraph/core/include/ngraph/op/reduce_prod.hpp b/ngraph/core/include/ngraph/op/reduce_prod.hpp index 4199c8b57e88a9..51f85ded5d2007 100644 --- a/ngraph/core/include/ngraph/op/reduce_prod.hpp +++ b/ngraph/core/include/ngraph/op/reduce_prod.hpp @@ -5,37 +5,12 @@ #pragma once #include "ngraph/op/util/arithmetic_reductions_keep_dims.hpp" +#include "openvino/op/reduce_prod.hpp" namespace ngraph { namespace op { namespace v1 { -/// \brief Product reduction operation. -/// -/// Reduces the tensor, eliminating the specified reduction axes by taking the product. -class NGRAPH_API ReduceProd : public util::ArithmeticReductionKeepDims { -public: - NGRAPH_RTTI_DECLARATION; - /// \brief Constructs a product reduction operation. - ReduceProd() = default; - /// \brief Constructs a product reduction operation. - /// - /// \param arg The tensor to be reduced. - /// \param reduction_axes The axis positions (0-based) to be eliminated. - /// \param keep_dims If set to true it holds axes that are used for reduction. - ReduceProd(const Output& arg, const Output& reduction_axes, bool keep_dims = false); - - /// \return The default value for Product. - NGRAPH_SUPPRESS_DEPRECATED_START - virtual std::shared_ptr get_default_value() const override; - NGRAPH_SUPPRESS_DEPRECATED_END - - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; - bool evaluate_lower(const HostTensorVector& outputs) const override; - bool evaluate_upper(const HostTensorVector& outputs) const override; -}; +using ov::op::v1::ReduceProd; } // namespace v1 } // namespace op } // namespace ngraph diff --git a/ngraph/core/include/ngraph/op/reduce_sum.hpp b/ngraph/core/include/ngraph/op/reduce_sum.hpp index 9ef5a433882f81..30d287960551db 100644 --- a/ngraph/core/include/ngraph/op/reduce_sum.hpp +++ b/ngraph/core/include/ngraph/op/reduce_sum.hpp @@ -6,86 +6,12 @@ #include "ngraph/axis_set.hpp" #include "ngraph/op/util/arithmetic_reductions_keep_dims.hpp" +#include "openvino/op/reduce_sum.hpp" namespace ngraph { namespace op { namespace v1 { -// clang-format off - /// \brief Tensor sum operation. - /// - /// Element-wise sums the input tensor, eliminating the specified reduction axes. - /// For example: - /// - /// \f[ - /// \mathit{sum}\left(\{0\}, - /// \left[ \begin{array}{ccc} - /// 1 & 2 \\ 3 & 4 \\ 5 & 6 \end{array} \right]\right) = - /// \left[ (1 + 3 + 5), (2 + 4 + 6) \right] = - /// \left[ 9, 12 \right]~~~\text{(dimension 0 (rows) is eliminated)} - /// \f] - /// - /// \f[ - /// \mathit{sum}\left(\{1\}, - /// \left[ \begin{array}{ccc} - /// 1 & 2 \\ 3 & 4 \\ 5 & 6 \end{array} \right]\right) = - /// \left[ (1 + 2), (3 + 4), (5 + 6) \right] = - /// \left[ 3, 7, 11 \right]~~~\text{(dimension 1 (columns) is eliminated)} - /// \f] - /// - /// \f[ - /// \mathit{sum}\left(\{0,1\}, - /// \left[ \begin{array}{ccc} - /// 1 & 2 \\ 3 & 4 \\ 5 & 6 \end{array} \right]\right) = - /// (1 + 2) + (3 + 4) + (5 + 6) = - /// 21~~~\text{(both dimensions (rows and columns) are eliminated)} - /// \f] - /// - /// ## Parameters - /// - /// | | Description | - /// | -------------------- | ---------------------------------------- | - /// | `reduction_axes` | The axes to eliminate through summation. | - /// | `keep_dims` | If set to 1 it holds axes that are used for reduction. | - /// - /// ## Inputs - /// - /// | | Type | Description | - /// | ----- | --------------------------------- | ------------------------------------------------------ | - /// | `arg` | \f$N[d_1,\dots,d_n]~(n \geq 0)\f$ | An input tensor of any shape and numeric element type. | - /// - /// ## Output - /// - /// | Type | Description | - /// | ----------------------------------------- | ---------------------------------------------------------------------------------------------------------------- | - /// | \f$N[\textit{delete}(A,d_1,\dots,d_n)]\f$ | The tensor \f$T\f$, where \f$T\f$ is the input tensor with the `reduction_axes` \f$A\f$ eliminated by summation. | - // clang-format off - class NGRAPH_API ReduceSum : public util::ArithmeticReductionKeepDims - { - public: - NGRAPH_RTTI_DECLARATION; - /// \brief Constructs a summation operation. - ReduceSum() = default; - /// \brief Constructs a summation operation. - /// - /// \param arg The tensor to be summed. - /// \param reduction_axes The axis positions (0-based) to be eliminated. - /// \param keep_dims If set to 1 it holds axes that are used for reduction. - ReduceSum(const Output& arg, - const Output& reduction_axes, - bool keep_dims = false); - - virtual std::shared_ptr - clone_with_new_inputs(const OutputVector& new_args) const override; - - /// \return The default value for Sum. - NGRAPH_SUPPRESS_DEPRECATED_START - virtual std::shared_ptr get_default_value() const override; - NGRAPH_SUPPRESS_DEPRECATED_END - - bool evaluate(const HostTensorVector& outputs, - const HostTensorVector& inputs) const override; - bool has_evaluate() const override; - }; - } - } -} +using ov::op::v1::ReduceSum; +} // namespace v1 +} // namespace op +} // namespace ngraph diff --git a/ngraph/core/include/ngraph/op/region_yolo.hpp b/ngraph/core/include/ngraph/op/region_yolo.hpp index f3430d3cdb5360..8f3efbc0a76c71 100644 --- a/ngraph/core/include/ngraph/op/region_yolo.hpp +++ b/ngraph/core/include/ngraph/op/region_yolo.hpp @@ -5,80 +5,12 @@ #pragma once #include "ngraph/op/op.hpp" +#include "openvino/op/region_yolo.hpp" namespace ngraph { namespace op { namespace v0 { -class NGRAPH_API RegionYolo : public Op { -public: - NGRAPH_RTTI_DECLARATION; - - RegionYolo() = default; - /// - /// \brief Constructs a RegionYolo operation - /// - /// \param[in] input Input - /// \param[in] coords Number of coordinates for each region - /// \param[in] classes Number of classes for each region - /// \param[in] regions Number of regions - /// \param[in] do_softmax Compute softmax - /// \param[in] mask Mask - /// \param[in] axis Axis to begin softmax on - /// \param[in] end_axis Axis to end softmax on - /// \param[in] anchors A flattened list of pairs `[width, height]` that - /// describes - /// prior box sizes. - /// - RegionYolo(const Output& input, - const size_t coords, - const size_t classes, - const size_t regions, - const bool do_softmax, - const std::vector& mask, - const int axis, - const int end_axis, - const std::vector& anchors = std::vector{}); - - bool visit_attributes(AttributeVisitor& visitor) override; - void validate_and_infer_types() override; - - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - size_t get_num_coords() const { - return m_num_coords; - } - size_t get_num_classes() const { - return m_num_classes; - } - size_t get_num_regions() const { - return m_num_regions; - } - bool get_do_softmax() const { - return m_do_softmax; - } - const std::vector& get_mask() const { - return m_mask; - } - const std::vector& get_anchors() const { - return m_anchors; - } - int get_axis() const { - return m_axis; - } - int get_end_axis() const { - return m_end_axis; - } - -private: - size_t m_num_coords; - size_t m_num_classes; - size_t m_num_regions; - bool m_do_softmax; - std::vector m_mask; - std::vector m_anchors{}; - int m_axis; - int m_end_axis; -}; +using ov::op::v0::RegionYolo; } // namespace v0 using v0::RegionYolo; } // namespace op diff --git a/ngraph/core/include/ngraph/op/relu.hpp b/ngraph/core/include/ngraph/op/relu.hpp index 1b38d55bb303e3..6bf7253ef1fd02 100644 --- a/ngraph/core/include/ngraph/op/relu.hpp +++ b/ngraph/core/include/ngraph/op/relu.hpp @@ -10,27 +10,12 @@ #include "ngraph/op/op.hpp" #include "ngraph/op/util/binary_elementwise_arithmetic.hpp" #include "ngraph/op/util/unary_elementwise_arithmetic.hpp" +#include "openvino/op/relu.hpp" namespace ngraph { namespace op { namespace v0 { -/// \brief Elementwise Relu operation. -/// -class NGRAPH_API Relu : public ngraph::op::util::UnaryElementwiseArithmetic { -public: - NGRAPH_RTTI_DECLARATION; - Relu() = default; - /// \brief Constructs a Relu operation. - /// - /// \param arg Node that produces the input tensor. - Relu(const Output& arg); - - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; - bool visit_attributes(AttributeVisitor& visitor) override; -}; +using ov::op::v0::Relu; } // namespace v0 using v0::Relu; } // namespace op diff --git a/ngraph/core/include/ngraph/op/reorg_yolo.hpp b/ngraph/core/include/ngraph/op/reorg_yolo.hpp index ee0ff0b163329d..1e10622dc7a5b2 100644 --- a/ngraph/core/include/ngraph/op/reorg_yolo.hpp +++ b/ngraph/core/include/ngraph/op/reorg_yolo.hpp @@ -5,37 +5,12 @@ #pragma once #include "ngraph/op/op.hpp" +#include "openvino/op/reorg_yolo.hpp" namespace ngraph { namespace op { namespace v0 { -class NGRAPH_API ReorgYolo : public Op { -public: - NGRAPH_RTTI_DECLARATION; - - ReorgYolo() = default; - /// \brief Constructs a ReorgYolo operation - /// - /// \param input Input - /// \param stride Stride to reorganize input by - ReorgYolo(const Output& input, const size_t stride); - - // Constructor with `strides` for backward compatibility - ReorgYolo(const Output& input, const Strides& strides); - - void validate_and_infer_types() override; - - bool visit_attributes(AttributeVisitor& visitor) override; - - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - Strides get_strides() const { - return m_strides; - } - -private: - Strides m_strides; -}; +using ov::op::v0::ReorgYolo; } // namespace v0 using v0::ReorgYolo; } // namespace op diff --git a/ngraph/core/include/ngraph/op/reshape.hpp b/ngraph/core/include/ngraph/op/reshape.hpp index f5ab9e1263e5c1..72730ee9d7e42e 100644 --- a/ngraph/core/include/ngraph/op/reshape.hpp +++ b/ngraph/core/include/ngraph/op/reshape.hpp @@ -8,62 +8,12 @@ #include "ngraph/node.hpp" #include "ngraph/op/op.hpp" #include "ngraph/runtime/host_tensor.hpp" +#include "openvino/op/reshape.hpp" namespace ngraph { namespace op { namespace v1 { -/// \brief Tensor dynamic reshape operation. -/// -/// "Converts" an input tensor into a new shape with the same number of elements. -/// This op does not touch the actual data. If needed, use Transpose for that purpose. -/// -class NGRAPH_API Reshape : public Op { -public: - NGRAPH_RTTI_DECLARATION; - Reshape() = default; - /// \brief Constructs a dynamic reshape operation. This operation does not perform - /// transpose. - /// - /// \param arg The tensor to be reshaped. - /// \param shape_pattern The node that defines output shape shape_pattern. - /// If the input shape is \f$(a_0,\dots,a_{k-1})\f$ then the output shape - /// must - /// be of the form \f$(b_0,\dots,b_{j-1})\f$ where \f$\Pi(a_i) = \Pi(b_i)\f$. - /// A value of -1 is allowed for at most one dimension, in which case the - /// dimension size is inferred based on element count of input tensor. - /// \param special_zero Treats zeros in `shape_pattern` as wildcard flags indicating - /// a - /// copy from input shape at the same index. - /// - Reshape(const Output& arg, const Output& shape_pattern, bool special_zero); - - bool visit_attributes(AttributeVisitor& visitor) override; - void validate_and_infer_types() override; - - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - bool get_special_zero() const { - return m_special_zero; - } - void set_special_zero(bool special_zero) { - m_special_zero = special_zero; - } - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; - bool evaluate_lower(const HostTensorVector& outputs) const override; - bool evaluate_upper(const HostTensorVector& outputs) const override; - bool constant_fold(OutputVector& output_values, const OutputVector& inputs_values) override; - -protected: - bool m_special_zero; - bool evaluate_reshape(const HostTensorVector& outputs, const HostTensorVector& inputs) const; - -private: - void calculate_output_shape(std::vector& reshape_pattern, - const int64_t& minus_one_idx, - const PartialShape& input_pshape, - std::vector& output_shape) const; -}; +using ov::op::v1::Reshape; } // namespace v1 } // namespace op } // namespace ngraph diff --git a/ngraph/core/include/ngraph/op/reverse.hpp b/ngraph/core/include/ngraph/op/reverse.hpp index cbbdb7500131f3..6874c94977aec9 100644 --- a/ngraph/core/include/ngraph/op/reverse.hpp +++ b/ngraph/core/include/ngraph/op/reverse.hpp @@ -5,76 +5,12 @@ #pragma once #include "ngraph/op/op.hpp" +#include "openvino/op/reverse.hpp" namespace ngraph { namespace op { namespace v1 { -class NGRAPH_API Reverse : public Op { -public: - NGRAPH_RTTI_DECLARATION; - - enum class Mode { INDEX, MASK }; - - Reverse() = default; - /// \brief Constructs a reverse operation. - /// - /// \param data The input tensor, some of whose axes are to be reversed. - /// \param reversed_axes The axes to reverse in a form of a set of indices or - /// boolean mask. - /// \param mode The way reversed_axes should be interpreted - a set or a mask. - Reverse(const Output& data, const Output& reversed_axes, const std::string& mode); - - Reverse(const Output& data, const Output& reversed_axes, const Mode mode); - - bool visit_attributes(AttributeVisitor& visitor) override; - void validate_and_infer_types() override; - - std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - /// \return The second input data interpretation mode. - Mode get_mode() const { - return m_mode; - } - void set_mode(const Mode mode) { - m_mode = mode; - } - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; - -protected: - Mode mode_from_string(const std::string& mode) const; - - /// \brief Indicates how the values from the second input should be interpreted. - /// - /// The second input can contain a set of indices pointing to axes in the data - /// tensor shape. - /// Alternatively it can contain a boolean mask that indicates which axes should be - /// reversed. - Mode m_mode; - -private: - bool evaluate_reverse(const HostTensorVector& outputs, const HostTensorVector& inputs) const; -}; +using ov::op::v1::Reverse; } // namespace v1 } // namespace op - -NGRAPH_API -std::ostream& operator<<(std::ostream& s, const op::v1::Reverse::Mode& type); } // namespace ngraph - -namespace ov { - -template <> -class NGRAPH_API AttributeAdapter - : public EnumAttributeAdapterBase { -public: - AttributeAdapter(ngraph::op::v1::Reverse::Mode& value) - : EnumAttributeAdapterBase(value) {} - - static constexpr DiscreteTypeInfo type_info{"AttributeAdapter", 1}; - const DiscreteTypeInfo& get_type_info() const override { - return type_info; - } -}; - -} // namespace ov diff --git a/ngraph/core/include/ngraph/op/reverse_sequence.hpp b/ngraph/core/include/ngraph/op/reverse_sequence.hpp index 836c89e11ea98b..694a4b9859371b 100644 --- a/ngraph/core/include/ngraph/op/reverse_sequence.hpp +++ b/ngraph/core/include/ngraph/op/reverse_sequence.hpp @@ -5,57 +5,12 @@ #pragma once #include "ngraph/op/op.hpp" +#include "openvino/op/reverse_sequence.hpp" namespace ngraph { namespace op { namespace v0 { -class NGRAPH_API ReverseSequence : public Op { -public: - NGRAPH_RTTI_DECLARATION; - - ReverseSequence() = default; - /// \brief Constructs a ReverseSequence operation. - /// - /// \param arg tensor with input data to reverse - /// \param seq_lengths 1D tensor of integers with sequence lengths in the input - /// tensor. - /// \param batch_axis index of the batch dimension. - /// \param seq_axis index of the sequence dimension. - ReverseSequence(const Output& arg, - const Output& seq_lengths, - int64_t batch_axis = 0, - int64_t seq_axis = 1); - - bool visit_attributes(AttributeVisitor& visitor) override; - void validate_and_infer_types() override; - - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - size_t get_batch_axis() const { - return m_normalized_batch_axis; - } - int64_t get_origin_batch_axis() const { - return m_batch_axis; - } - void set_batch_axis(int64_t batch_axis) { - m_batch_axis = batch_axis; - } - size_t get_sequence_axis() const { - return m_normalized_seq_axis; - } - int64_t get_origin_sequence_axis() const { - return m_seq_axis; - } - void set_sequence_axis(int64_t sequence_axis) { - m_seq_axis = sequence_axis; - } - -private: - int64_t m_batch_axis; - int64_t m_seq_axis = 1; - size_t m_normalized_batch_axis; - size_t m_normalized_seq_axis; -}; +using ov::op::v0::ReverseSequence; } // namespace v0 using v0::ReverseSequence; } // namespace op diff --git a/ngraph/core/include/ngraph/op/rnn_cell.hpp b/ngraph/core/include/ngraph/op/rnn_cell.hpp index 4a1dbad316b547..a176689d25a8b0 100644 --- a/ngraph/core/include/ngraph/op/rnn_cell.hpp +++ b/ngraph/core/include/ngraph/op/rnn_cell.hpp @@ -13,121 +13,12 @@ #include "ngraph/op/op.hpp" #include "ngraph/op/util/activation_functions.hpp" #include "ngraph/op/util/rnn_cell_base.hpp" +#include "openvino/op/rnn_cell.hpp" namespace ngraph { namespace op { namespace v0 { -/// -/// \brief Class for single RNN cell node. -/// -/// \note It follows notation and equations defined as in ONNX standard: -/// https://github.com/onnx/onnx/blob/master/docs/Operators.md#RNN -/// -/// \note It calculates following equations: -/// -/// Ht = f(Xt*(Wi^T) + Ht-1*(Ri^T) + Wbi + Rbi) -/// -/// * - Is a dot product, -/// f - is activation functions. -/// -/// \note This class represents only single *cell* (for current time step) -/// and not the whole RNN Sequence layer -/// -/// \sa LSTMSequence, LSTMCell, GRUCell -/// -class NGRAPH_API RNNCell : public util::RNNCellBase { -public: - NGRAPH_RTTI_DECLARATION; - - RNNCell(); - /// - /// \brief Constructs RNNCell node. - /// - /// \param[in] X The input tensor with shape: [batch_size, - /// input_size]. - /// \param[in] initial_hidden_state The hidden state tensor at current time step - /// with shape: [batch_size, hidden_size]. - /// \param[in] W The weight tensor with shape: [hidden_size, - /// input_size]. - /// \param[in] R The recurrence weight tensor with shape: - /// [hidden_size, hidden_size]. - /// \param[in] hidden_size The number of hidden units for recurrent cell. - /// \param[in] activations The vector of activation functions used inside - /// recurrent cell. - /// \param[in] activations_alpha The vector of alpha parameters for activation - /// functions in order respective to activation - /// list. - /// \param[in] activations_beta The vector of beta parameters for activation - /// functions in order respective to activation - /// list. - /// \param[in] clip The value defining clipping range [-clip, - /// clip] on input of activation functions. - /// - RNNCell(const Output& X, - const Output& initial_hidden_state, - const Output& W, - const Output& R, - std::size_t hidden_size, - const std::vector& activations = std::vector{"tanh"}, - const std::vector& activations_alpha = {}, - const std::vector& activations_beta = {}, - float clip = 0.f); - - /// - /// \brief Constructs RNNCell node. - /// - /// \param[in] X The input tensor with shape: [batch_size, - /// input_size]. - /// \param[in] initial_hidden_state The hidden state tensor at current time step - /// with shape: [batch_size, hidden_size]. - /// \param[in] W The weight tensor with shape: [hidden_size, - /// input_size]. - /// \param[in] R The recurrence weight tensor with shape: - /// [hidden_size, hidden_size]. - /// \param[in] B The bias tensor for input gate with shape: - /// [hidden_size]. - /// \param[in] hidden_size The number of hidden units for recurrent cell. - /// \param[in] activations The vector of activation functions used inside - /// recurrent cell. - /// \param[in] activations_alpha The vector of alpha parameters for activation - /// functions in order respective to activation - /// list. - /// \param[in] activations_beta The vector of beta parameters for activation - /// functions in order respective to activation - /// list. - /// \param[in] clip The value defining clipping range [-clip, - /// clip] on input of activation functions. - /// - RNNCell(const Output& X, - const Output& initial_hidden_state, - const Output& W, - const Output& R, - const Output& B, - std::size_t hidden_size, - const std::vector& activations = std::vector{"tanh"}, - const std::vector& activations_alpha = {}, - const std::vector& activations_beta = {}, - float clip = 0.f); - - void validate_and_infer_types() override; - bool visit_attributes(AttributeVisitor& visitor) override; - std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - -private: - /// - /// \brief Creates the default bias input initialized with zeros. - /// - /// \return The object of Output class. - /// - Output get_default_bias_input() const; - - /// - /// \brief The Activation function f. - /// - util::ActivationFunction m_activation_f; - - static constexpr std::size_t s_gates_count{1}; -}; +using ov::op::v0::RNNCell; } // namespace v0 } // namespace op } // namespace ngraph diff --git a/ngraph/core/include/ngraph/op/rnn_sequence.hpp b/ngraph/core/include/ngraph/op/rnn_sequence.hpp index 4aa6fe1fa758eb..7f982b8f4347f2 100644 --- a/ngraph/core/include/ngraph/op/rnn_sequence.hpp +++ b/ngraph/core/include/ngraph/op/rnn_sequence.hpp @@ -10,42 +10,12 @@ #include "ngraph/op/op.hpp" #include "ngraph/op/util/rnn_cell_base.hpp" +#include "openvino/op/rnn_sequence.hpp" namespace ngraph { namespace op { namespace v5 { -class NGRAPH_API RNNSequence : public util::RNNCellBase { -public: - NGRAPH_RTTI_DECLARATION; - - RNNSequence(); - - RNNSequence(const Output& X, - const Output& H_t, - const Output& sequence_lengths, - const Output& W, - const Output& R, - const Output& B, - size_t hidden_size, - op::RecurrentSequenceDirection direction, - const std::vector& activations = std::vector{"tanh"}, - const std::vector& activations_alpha = {}, - const std::vector& activations_beta = {}, - float clip = 0.f); - - std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - void validate_and_infer_types() override; - - bool visit_attributes(AttributeVisitor& visitor) override; - - op::RecurrentSequenceDirection get_direction() const { - return m_direction; - } - -protected: - op::RecurrentSequenceDirection m_direction; -}; +using ov::op::v5::RNNSequence; } // namespace v5 } // namespace op } // namespace ngraph diff --git a/ngraph/core/include/ngraph/op/roi_align.hpp b/ngraph/core/include/ngraph/op/roi_align.hpp index 03f333c98a1dd9..abb7297998c84d 100644 --- a/ngraph/core/include/ngraph/op/roi_align.hpp +++ b/ngraph/core/include/ngraph/op/roi_align.hpp @@ -5,101 +5,13 @@ #pragma once #include "ngraph/op/op.hpp" +#include "openvino/op/roi_align.hpp" namespace ngraph { namespace op { namespace v3 { -class NGRAPH_API ROIAlign : public Op { -public: - static constexpr NodeTypeInfo type_info{"ROIAlign", 3}; - const NodeTypeInfo& get_type_info() const override { - return type_info; - } - enum class PoolingMode { AVG, MAX }; - - ROIAlign() = default; - /// \brief Constructs a ROIAlign node matching the ONNX ROIAlign specification - /// - /// \param input Input feature map {N, C, H, W} - /// \param rois Regions of interest to pool over - /// \param batch_indices Indices of images in the batch matching - /// the number or ROIs - /// \param pooled_h Height of the ROI output features - /// \param pooled_w Width of the ROI output features - /// \param sampling_ratio Number of sampling points used to compute - /// an output element - /// \param spatial_scale Spatial scale factor used to translate ROI coordinates - /// \param mode Method of pooling - 'avg' or 'max' - ROIAlign(const Output& input, - const Output& rois, - const Output& batch_indices, - const int pooled_h, - const int pooled_w, - const int sampling_ratio, - const float spatial_scale, - const std::string& mode); - - ROIAlign(const Output& input, - const Output& rois, - const Output& batch_indices, - const int pooled_h, - const int pooled_w, - const int sampling_ratio, - const float spatial_scale, - const PoolingMode mode); - - void validate_and_infer_types() override; - bool visit_attributes(AttributeVisitor& visitor) override; - std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - int get_pooled_h() const { - return m_pooled_h; - } - int get_pooled_w() const { - return m_pooled_w; - } - int get_sampling_ratio() const { - return m_sampling_ratio; - } - float get_spatial_scale() const { - return m_spatial_scale; - } - PoolingMode get_mode() const { - return m_mode; - } - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; - -private: - PoolingMode mode_from_string(const std::string& mode) const; - -private: - int m_pooled_h; - int m_pooled_w; - int m_sampling_ratio; - float m_spatial_scale; - PoolingMode m_mode; -}; +using ov::op::v3::ROIAlign; } // namespace v3 using v3::ROIAlign; } // namespace op - -std::ostream& operator<<(std::ostream& s, const op::v3::ROIAlign::PoolingMode& mode); } // namespace ngraph - -namespace ov { - -template <> -class NGRAPH_API AttributeAdapter - : public EnumAttributeAdapterBase { -public: - AttributeAdapter(ngraph::op::v3::ROIAlign::PoolingMode& value) - : EnumAttributeAdapterBase(value) {} - - static constexpr DiscreteTypeInfo type_info{"AttributeAdapter", 3}; - const DiscreteTypeInfo& get_type_info() const override { - return type_info; - } -}; - -} // namespace ov diff --git a/ngraph/core/include/ngraph/op/roi_pooling.hpp b/ngraph/core/include/ngraph/op/roi_pooling.hpp index 9edc39f9ac0670..fcf687897de418 100644 --- a/ngraph/core/include/ngraph/op/roi_pooling.hpp +++ b/ngraph/core/include/ngraph/op/roi_pooling.hpp @@ -5,52 +5,13 @@ #pragma once #include "ngraph/op/op.hpp" +#include "openvino/op/roi_pooling.hpp" namespace ngraph { namespace op { namespace v0 { -class NGRAPH_API ROIPooling : public Op { -public: - NGRAPH_RTTI_DECLARATION; - - ROIPooling() = default; - /// \brief Constructs a ROIPooling operation - /// - /// \param input Input feature map {N, C, H, W} - /// \param coords Coordinates of bounding boxes - /// \param output_size Height/Width of ROI output features - /// \param spatial_scale Ratio of input feature map over input image size - /// \param method Method of pooling - Max or Bilinear - ROIPooling(const Output& input, - const Output& coords, - const Shape& output_size, - const float spatial_scale, - const std::string& method = "max"); - - void validate_and_infer_types() override; - - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - const Shape& get_output_size() const { - return m_output_size; - } - float get_spatial_scale() const { - return m_spatial_scale; - } - const std::string& get_method() const { - return m_method; - } - bool visit_attributes(AttributeVisitor& visitor) override; - -private: - Shape m_output_size{0, 0}; - float m_spatial_scale; - std::string m_method = "max"; -}; - +using ov::op::v0::ROIPooling; } // namespace v0 using v0::ROIPooling; - } // namespace op - } // namespace ngraph diff --git a/ngraph/core/include/ngraph/op/roll.hpp b/ngraph/core/include/ngraph/op/roll.hpp index 6f376a93def0c9..76e1e4675aa884 100644 --- a/ngraph/core/include/ngraph/op/roll.hpp +++ b/ngraph/core/include/ngraph/op/roll.hpp @@ -6,34 +6,12 @@ #include "ngraph/node.hpp" #include "ngraph/op/op.hpp" +#include "openvino/op/roll.hpp" namespace ngraph { namespace op { namespace v7 { -/// \brief Tensor roll operation. -class NGRAPH_API Roll : public Op { -public: - NGRAPH_RTTI_DECLARATION; - - Roll() = default; - - /// - /// \brief Constructs a roll operation. - /// - /// \param data Node producing the tensor to be shifted. - /// \param shift Node producing the 0D or 1D tensor which specifies the - /// number of places by which the elements are shifted. - /// \param axes Node producing the 0D or 1D tensor which specifies axes - /// along which elements are shifted. - /// - Roll(const Output& data, const Output& shift, const Output& axes); - - void validate_and_infer_types() override; - - bool visit_attributes(AttributeVisitor& visitor) override; - - std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; -}; +using ov::op::v7::Roll; } // namespace v7 } // namespace op } // namespace ngraph diff --git a/ngraph/core/include/ngraph/op/round.hpp b/ngraph/core/include/ngraph/op/round.hpp index 38a462132f0631..6e00c0db103b24 100644 --- a/ngraph/core/include/ngraph/op/round.hpp +++ b/ngraph/core/include/ngraph/op/round.hpp @@ -7,64 +7,12 @@ #include "ngraph/node.hpp" #include "ngraph/op/op.hpp" #include "ngraph/op/util/unary_elementwise_arithmetic.hpp" +#include "openvino/op/round.hpp" namespace ngraph { namespace op { namespace v5 { -/// \brief Elementwise round operation. The output is round to the nearest integer -/// for each value. In case of halfs, the rule is defined in attribute 'mode': -/// 'HALF_TO_EVEN' - round halfs to the nearest even integer. -/// 'HALF_AWAY_FROM_ZERO': - round in such a way that the result heads away from -/// zero. - -class NGRAPH_API Round : public ngraph::op::Op { -public: - enum class RoundMode { HALF_TO_EVEN, HALF_AWAY_FROM_ZERO }; - NGRAPH_RTTI_DECLARATION; - - /// \brief Constructs a round operation. - Round() = default; - - /// \brief Constructs a round operation. - /// - /// \param arg Node that produces the input tensor. - /// \param mode Rule to resolve halfs - Round(const Output& arg, const RoundMode mode); - - bool visit_attributes(AttributeVisitor& visitor) override; - void validate_and_infer_types() override; - - std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; - - RoundMode get_mode() const { - return m_mode; - } - -private: - RoundMode m_mode; -}; +using ov::op::v5::Round; } // namespace v5 } // namespace op -NGRAPH_API -std::ostream& operator<<(std::ostream& s, const op::v5::Round::RoundMode& type); } // namespace ngraph - -namespace ov { - -template <> -class NGRAPH_API AttributeAdapter - : public EnumAttributeAdapterBase { -public: - AttributeAdapter(ngraph::op::v5::Round::RoundMode& value) - : EnumAttributeAdapterBase(value) {} - - static constexpr DiscreteTypeInfo type_info{"AttributeAdapter", 5}; - const DiscreteTypeInfo& get_type_info() const override { - return type_info; - } -}; - -} // namespace ov diff --git a/ngraph/core/include/ngraph/op/scatter_elements_update.hpp b/ngraph/core/include/ngraph/op/scatter_elements_update.hpp index 78847c2a33eb95..667b84d30b3cb0 100644 --- a/ngraph/core/include/ngraph/op/scatter_elements_update.hpp +++ b/ngraph/core/include/ngraph/op/scatter_elements_update.hpp @@ -9,36 +9,12 @@ #include "ngraph/op/op.hpp" #include "ngraph/runtime/aligned_buffer.hpp" #include "ngraph/runtime/host_tensor.hpp" +#include "openvino/op/scatter_elements_update.hpp" namespace ngraph { namespace op { namespace v3 { -class NGRAPH_API ScatterElementsUpdate : public Op { -public: - NGRAPH_RTTI_DECLARATION; - - ScatterElementsUpdate() = default; - /// \brief Constructs a ScatterElementsUpdate node - - /// \param data Input data - /// \param indices Data entry index that will be updated - /// \param updates Update values - /// \param axis Axis to scatter on - ScatterElementsUpdate(const Output& data, - const Output& indices, - const Output& updates, - const Output& axis); - - virtual void validate_and_infer_types() override; - bool visit_attributes(AttributeVisitor& visitor) override; - - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& inputs) const override; - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; - -private: - bool evaluate_scatter_element_update(const HostTensorVector& outputs, const HostTensorVector& inputs) const; -}; +using ov::op::v3::ScatterElementsUpdate; } // namespace v3 using v3::ScatterElementsUpdate; } // namespace op diff --git a/ngraph/core/include/ngraph/op/scatter_nd_update.hpp b/ngraph/core/include/ngraph/op/scatter_nd_update.hpp index 044025ee1a2cb3..d43dede27b027e 100644 --- a/ngraph/core/include/ngraph/op/scatter_nd_update.hpp +++ b/ngraph/core/include/ngraph/op/scatter_nd_update.hpp @@ -6,28 +6,12 @@ #include "ngraph/op/op.hpp" #include "ngraph/op/util/scatter_nd_base.hpp" +#include "openvino/op/scatter_nd_update.hpp" namespace ngraph { namespace op { namespace v3 { -/// \brief Add updates to slices from inputs addressed by indices -class NGRAPH_API ScatterNDUpdate : public util::ScatterNDBase { -public: - static constexpr NodeTypeInfo type_info{"ScatterNDUpdate", 3}; - const NodeTypeInfo& get_type_info() const override { - return type_info; - } - ScatterNDUpdate() = default; - /// \param inputs Tensor - /// \param indices Index tensor: Data type must be `element::i32` or `element::i64` - /// \param updates Tensor: Must have same type as inputs - ScatterNDUpdate(const Output& inputs, const Output& indices, const Output& updates) - : util::ScatterNDBase(inputs, indices, updates) {} - - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; -}; +using ov::op::v3::ScatterNDUpdate; } // namespace v3 using v3::ScatterNDUpdate; } // namespace op diff --git a/ngraph/core/include/ngraph/op/scatter_update.hpp b/ngraph/core/include/ngraph/op/scatter_update.hpp index d3ec15f94b3923..f89c2dc8656c5d 100644 --- a/ngraph/core/include/ngraph/op/scatter_update.hpp +++ b/ngraph/core/include/ngraph/op/scatter_update.hpp @@ -7,41 +7,12 @@ #include "ngraph/op/op.hpp" #include "ngraph/op/util/scatter_base.hpp" #include "ngraph/runtime/host_tensor.hpp" +#include "openvino/op/scatter_update.hpp" namespace ngraph { namespace op { namespace v3 { -/// -/// \brief Set new values to slices from data addressed by indices -/// -class NGRAPH_API ScatterUpdate : public util::ScatterBase { -public: - static constexpr NodeTypeInfo type_info{"ScatterUpdate", 3}; - const NodeTypeInfo& get_type_info() const override { - return type_info; - } - ScatterUpdate() = default; - /// - /// \brief Constructs ScatterUpdate operator object. - /// - /// \param data The input tensor to be updated. - /// \param indices The tensor with indexes which will be updated. - /// \param updates The tensor with update values. - /// \param[in] axis The axis at which elements will be updated. - /// - ScatterUpdate(const Output& data, - const Output& indices, - const Output& updates, - const Output& axis); - - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& inputs) const override; - - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; - -private: - bool evaluate_scatter_update(const HostTensorVector& outputs, const HostTensorVector& inputs) const; -}; +using ov::op::v3::ScatterUpdate; } // namespace v3 } // namespace op } // namespace ngraph diff --git a/ngraph/core/include/ngraph/op/select.hpp b/ngraph/core/include/ngraph/op/select.hpp index 2c33bb2b8d6d25..e9eb2e1e17b33e 100644 --- a/ngraph/core/include/ngraph/op/select.hpp +++ b/ngraph/core/include/ngraph/op/select.hpp @@ -5,66 +5,12 @@ #pragma once #include "ngraph/op/op.hpp" +#include "openvino/op/select.hpp" namespace ngraph { namespace op { namespace v1 { -// clang-format off - /// \brief Elementwise selection operation. - /// - /// ## Inputs - /// - /// | | Type | Description | - /// | ------ | --------------------------------------------- | ------------------------------------------------------------ | - /// | `arg0` | \f$\texttt{bool}[d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of any shape, with element `bool`. | - /// | `arg1` | \f$E[d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of a shape that is broadcast-compatible with `arg0`, with any element type. | - /// | `arg2` | \f$E[d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of a shape that is broadcast-compatible with `arg0`, and same element type as `arg1`. | - /// | `auto_broadcast`| AutoBroadcastSpec | Auto broadcast specification. | - /// - /// ## Output - /// - /// | Type | Description | - /// | ---------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------- | - /// | \f$E[d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_n] = \texttt{arg1}[i_1,\dots,i_n]\text{ if }\texttt{arg0}[i_1,\dots,i_n] \neq 0\text{, else }\texttt{arg2}[i_1,\dots,i_n]\f$ | -// clang-format on -class NGRAPH_API Select : public Op { -public: - NGRAPH_RTTI_DECLARATION; - /// \brief Constructs a selection operation. - Select() : m_auto_broadcast(AutoBroadcastSpec(AutoBroadcastType::NUMPY)) {} - - /// \brief Constructs a selection operation. - /// - /// \param arg0 Node that produces the first input tensor. - /// \param arg1 Node that produces the second input tensor. - /// \param arg2 Node that produces the third input tensor. - /// \param auto_broadcast Auto broadcast specification. Default is Numpy-style - /// implicit broadcasting. - Select(const Output& arg0, - const Output& arg1, - const Output& arg2, - const AutoBroadcastSpec& auto_broadcast = AutoBroadcastSpec(AutoBroadcastType::NUMPY)); - - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - bool visit_attributes(AttributeVisitor& visitor) override; - void validate_and_infer_types() override; - - const AutoBroadcastSpec& get_auto_broadcast() const { - return m_auto_broadcast; - } - void set_auto_broadcast(const AutoBroadcastSpec& auto_broadcast) { - m_auto_broadcast = auto_broadcast; - } - // TODO: Move all uses of get_autob to get_auto_broadcast() and remove this. - const AutoBroadcastSpec& get_autob() const override { - return m_auto_broadcast; - } - virtual bool evaluate(const HostTensorVector& output_values, const HostTensorVector& input_values) const override; - bool has_evaluate() const override; - -private: - AutoBroadcastSpec m_auto_broadcast; -}; +using ov::op::v1::Select; } // namespace v1 } // namespace op } // namespace ngraph diff --git a/ngraph/core/include/ngraph/op/selu.hpp b/ngraph/core/include/ngraph/op/selu.hpp index 89e0a08f69e9ca..8236dc6790464f 100644 --- a/ngraph/core/include/ngraph/op/selu.hpp +++ b/ngraph/core/include/ngraph/op/selu.hpp @@ -6,28 +6,12 @@ #include "ngraph/node.hpp" #include "ngraph/op/op.hpp" +#include "openvino/op/selu.hpp" + namespace ngraph { namespace op { namespace v0 { -/// \brief Performs a SELU activation function on all elements of the input node -class NGRAPH_API Selu : public ngraph::op::Op { -public: - NGRAPH_RTTI_DECLARATION; - - Selu() = default; - /// \brief Constructs a Selu node. - /// - /// \param data - Node producing the input tensor - /// \param alpha - Alpha coefficient of SELU operation - /// \param lambda - Lambda coefficient of SELU operation - Selu(const Output& data, const Output& alpha, const Output& lambda); - - void validate_and_infer_types() override; - - bool visit_attributes(AttributeVisitor& visitor) override; - - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; -}; +using ov::op::v0::Selu; } // namespace v0 using v0::Selu; } // namespace op diff --git a/ngraph/core/include/ngraph/op/shape_of.hpp b/ngraph/core/include/ngraph/op/shape_of.hpp index 9e1d97f05a7c7c..00c8f80e0a9a7f 100644 --- a/ngraph/core/include/ngraph/op/shape_of.hpp +++ b/ngraph/core/include/ngraph/op/shape_of.hpp @@ -5,66 +5,16 @@ #pragma once #include "ngraph/op/op.hpp" +#include "openvino/op/shape_of.hpp" namespace ngraph { namespace op { namespace v3 { -/// \brief Operation that returns the shape of its input argument as a tensor. -class NGRAPH_API ShapeOf : public Op { -public: - static constexpr NodeTypeInfo type_info{"ShapeOf", 3}; - const NodeTypeInfo& get_type_info() const override { - return type_info; - } - ShapeOf() = default; - /// \brief Constructs a shape-of operation. - ShapeOf(const Output& arg, const element::Type output_type = element::i64); - - bool visit_attributes(AttributeVisitor& visitor) override; - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - void validate_and_infer_types() override; - - element::Type get_output_type() const { - return m_output_type; - } - void set_output_type(element::Type output_type) { - m_output_type = output_type; - } - // Overload collision with method on Node - using Node::set_output_type; - - bool evaluate(const HostTensorVector& output_values, const HostTensorVector& input_values) const override; - bool has_evaluate() const override; - bool evaluate_lower(const HostTensorVector& output_values) const override; - bool evaluate_upper(const HostTensorVector& output_values) const override; - bool constant_fold(OutputVector& output_values, const OutputVector& input_values) override; - -private: - element::Type m_output_type; -}; +using ov::op::v3::ShapeOf; } // namespace v3 namespace v0 { -/// \brief Operation that returns the shape of its input argument as a tensor. -class NGRAPH_API ShapeOf : public Op { -public: - NGRAPH_RTTI_DECLARATION; - ShapeOf() = default; - /// \brief Constructs a shape-of operation. - ShapeOf(const Output& arg); - - bool visit_attributes(AttributeVisitor& visitor) override; - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - void validate_and_infer_types() override; - - bool evaluate(const HostTensorVector& output_values, const HostTensorVector& input_values) const override; - bool has_evaluate() const override; - bool evaluate_lower(const HostTensorVector& output_values) const override; - bool evaluate_upper(const HostTensorVector& output_values) const override; - bool constant_fold(OutputVector& output_values, const OutputVector& input_values) override; -}; +using ov::op::v0::ShapeOf; } // namespace v0 using v0::ShapeOf; } // namespace op diff --git a/ngraph/core/include/ngraph/op/shuffle_channels.hpp b/ngraph/core/include/ngraph/op/shuffle_channels.hpp index f72ba64b45c1f1..9e68fd92aa7ac1 100644 --- a/ngraph/core/include/ngraph/op/shuffle_channels.hpp +++ b/ngraph/core/include/ngraph/op/shuffle_channels.hpp @@ -8,48 +8,12 @@ #include "ngraph/node.hpp" #include "ngraph/op/op.hpp" +#include "openvino/op/shuffle_channels.hpp" namespace ngraph { namespace op { namespace v0 { -/// \brief Permutes data in the channel dimension of the input -class NGRAPH_API ShuffleChannels : public Op { -public: - NGRAPH_RTTI_DECLARATION; - - ShuffleChannels() = default; - /// \brief Constructs a ShuffleChannels node. - /// - /// \param data Node producing the input tensor. - /// \param axis Channel dimension index in the data tensor. - /// A negative value means that the index should be - /// calculated from the back of the input data shape. - /// \param group Number of group the channel dimension should be split into. - /// - ShuffleChannels(const Output& data, const int64_t axis = 1, const int64_t group = 1); - - bool visit_attributes(AttributeVisitor& visitor) override; - size_t get_zero_based_axis() const; - - virtual void validate_and_infer_types() override; - - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - int64_t get_axis() const { - return m_axis; - } - int64_t get_group() const { - return m_group; - } - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; - -private: - bool evaluate_shuffle_channels(const HostTensorVector& outputs, const HostTensorVector& inputs) const; - - int64_t m_axis; - int64_t m_group; -}; +using ov::op::v0::ShuffleChannels; } // namespace v0 using v0::ShuffleChannels; } // namespace op diff --git a/ngraph/core/include/ngraph/op/sigmoid.hpp b/ngraph/core/include/ngraph/op/sigmoid.hpp index 0bb61ddd78056b..b5c13da1593e23 100644 --- a/ngraph/core/include/ngraph/op/sigmoid.hpp +++ b/ngraph/core/include/ngraph/op/sigmoid.hpp @@ -8,22 +8,12 @@ #include "ngraph/op/util/binary_elementwise_arithmetic.hpp" #include "ngraph/op/util/unary_elementwise_arithmetic.hpp" #include "ngraph/util.hpp" +#include "openvino/op/sigmoid.hpp" namespace ngraph { namespace op { namespace v0 { -class NGRAPH_API Sigmoid : public util::UnaryElementwiseArithmetic { -public: - static constexpr NodeTypeInfo type_info{"Sigmoid", 0}; - const NodeTypeInfo& get_type_info() const override { - return type_info; - } - Sigmoid(const Output& arg); - Sigmoid() = default; - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; -}; +using ov::op::v0::Sigmoid; } // namespace v0 using v0::Sigmoid; } // namespace op diff --git a/ngraph/core/include/ngraph/op/sign.hpp b/ngraph/core/include/ngraph/op/sign.hpp index c647da35c59867..5bca5fe909d1d7 100644 --- a/ngraph/core/include/ngraph/op/sign.hpp +++ b/ngraph/core/include/ngraph/op/sign.hpp @@ -5,27 +5,12 @@ #pragma once #include "ngraph/op/util/unary_elementwise_arithmetic.hpp" +#include "openvino/op/sign.hpp" namespace ngraph { namespace op { namespace v0 { -/// \brief Elementwise sign operation. -/// -class NGRAPH_API Sign : public util::UnaryElementwiseArithmetic { -public: - NGRAPH_RTTI_DECLARATION; - - Sign() = default; - /// \brief Constructs an elementwise sign operation. - /// - /// \param arg Node that produces the input tensor. - Sign(const Output& arg); - - bool visit_attributes(AttributeVisitor& visitor) override; - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; -}; +using ov::op::v0::Sign; } // namespace v0 using v0::Sign; } // namespace op diff --git a/ngraph/core/include/ngraph/op/sin.hpp b/ngraph/core/include/ngraph/op/sin.hpp index edac3b9da51532..4dd276f09ba820 100644 --- a/ngraph/core/include/ngraph/op/sin.hpp +++ b/ngraph/core/include/ngraph/op/sin.hpp @@ -5,42 +5,12 @@ #pragma once #include "ngraph/op/util/unary_elementwise_arithmetic.hpp" +#include "openvino/op/sin.hpp" namespace ngraph { namespace op { namespace v0 { -// clang-format off - /// \brief Elementwise sine operation. - /// - /// ## Inputs - /// - /// | | Type | Description | - /// | ----- | --------------------------------- | ----------------------------------------------- | - /// | `arg` | \f$N[d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of any shape and numeric element type. | - /// - /// ## Output - /// - /// | Type | Description | - /// | ---------------------- | ------------------------------------------------------------------------------------ | - /// | \f$N[d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_n] = \sin(\texttt{arg}[i_1,\dots,i_n])\f$ | -// clang-format on -class NGRAPH_API Sin : public util::UnaryElementwiseArithmetic { -public: - static constexpr NodeTypeInfo type_info{"Sin", 0}; - const NodeTypeInfo& get_type_info() const override { - return type_info; - } - /// \brief Constructs a sine operation. - /// - /// \param arg Node that produces the input tensor. - Sin(const Output& arg); - Sin() = default; - - bool visit_attributes(AttributeVisitor& visitor) override; - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; -}; +using ov::op::v0::Sin; } // namespace v0 using v0::Sin; } // namespace op diff --git a/ngraph/core/include/ngraph/op/sinh.hpp b/ngraph/core/include/ngraph/op/sinh.hpp index 6ddb594fc22246..927f33bdae3b8a 100644 --- a/ngraph/core/include/ngraph/op/sinh.hpp +++ b/ngraph/core/include/ngraph/op/sinh.hpp @@ -5,25 +5,12 @@ #pragma once #include "ngraph/op/util/unary_elementwise_arithmetic.hpp" +#include "openvino/op/sinh.hpp" namespace ngraph { namespace op { namespace v0 { -/// \brief Elementwise hyperbolic sine (sinh) operation. -class NGRAPH_API Sinh : public util::UnaryElementwiseArithmetic { -public: - NGRAPH_RTTI_DECLARATION; - /// \brief Constructs a hyperbolic sine operation. - /// - /// \param arg Node that produces the input tensor. - Sinh(const Output& arg); - Sinh() = default; - - bool visit_attributes(AttributeVisitor& visitor) override; - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; -}; +using ov::op::v0::Sinh; } // namespace v0 using v0::Sinh; } // namespace op diff --git a/ngraph/core/include/ngraph/op/softmax.hpp b/ngraph/core/include/ngraph/op/softmax.hpp index 9eae046c216486..f0fadb841a451f 100644 --- a/ngraph/core/include/ngraph/op/softmax.hpp +++ b/ngraph/core/include/ngraph/op/softmax.hpp @@ -5,42 +5,12 @@ #pragma once #include "ngraph/op/op.hpp" +#include "openvino/op/softmax.hpp" namespace ngraph { namespace op { namespace v1 { -class NGRAPH_API Softmax : public Op { -public: - NGRAPH_RTTI_DECLARATION; - - Softmax() : m_axis(0) {} - /// \brief Constructs a softmax operation. - /// - /// \param arg Node that produces the first input tensor.
- /// `[d0, ...]` - /// \param axis The axis position (0-based) on which to calculate the softmax. - /// - /// Output `[d0, ...]` - /// - Softmax(const Output& arg, const size_t axis = 1); - - bool visit_attributes(AttributeVisitor& visitor) override; - void validate_and_infer_types() override; - - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - size_t get_axis() const { - return m_axis; - } - void set_axis(const size_t axis) { - m_axis = axis; - } - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; - -private: - size_t m_axis; -}; +using ov::op::v1::Softmax; } // namespace v1 } // namespace op } // namespace ngraph diff --git a/ngraph/core/include/ngraph/op/softplus.hpp b/ngraph/core/include/ngraph/op/softplus.hpp index cc49918d5e968c..2c627d19eddb9b 100644 --- a/ngraph/core/include/ngraph/op/softplus.hpp +++ b/ngraph/core/include/ngraph/op/softplus.hpp @@ -6,30 +6,12 @@ #include "ngraph/node.hpp" #include "ngraph/op/op.hpp" +#include "openvino/op/softplus.hpp" namespace ngraph { namespace op { namespace v4 { -/// \brief A Self Regularized Non-Monotonic Neural Activation Function -/// f(x) = ln(exp(x) + 1.) -/// -class NGRAPH_API SoftPlus : public ngraph::op::Op { -public: - NGRAPH_RTTI_DECLARATION; - - SoftPlus() = default; - /// \brief Constructs an SoftPlus operation. - /// - /// \param data Input tensor - SoftPlus(const Output& arg); - bool visit_attributes(AttributeVisitor& visitor) override; - void validate_and_infer_types() override; - - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; -}; +using ov::op::v4::SoftPlus; } // namespace v4 } // namespace op } // namespace ngraph diff --git a/ngraph/core/include/ngraph/op/space_to_batch.hpp b/ngraph/core/include/ngraph/op/space_to_batch.hpp index 6564947339eaa7..6ace3da579391b 100644 --- a/ngraph/core/include/ngraph/op/space_to_batch.hpp +++ b/ngraph/core/include/ngraph/op/space_to_batch.hpp @@ -5,50 +5,12 @@ #pragma once #include "ngraph/op/op.hpp" +#include "openvino/op/space_to_batch.hpp" namespace ngraph { namespace op { namespace v1 { -/// \brief SpaceToBatch permutes data tensor blocks of spatial data into batch -/// dimension. -/// -/// \note Values from spatial blocks dimensions are moved in the batch dimension. -/// -/// Output node produces a tensor with shape: tensor with shape -/// `[batch * block_shape[0] * block_shape[1] * ... * block_shape[N - 1], -/// (pads_begin[1] + D_1 + pads_end[1]) / block_shape[1], -/// (pads_begin[2] + D_2 + pads_end[2]) / block_shape[2], ..., -/// (pads_begin[N - 1] + D_{N - 1} + pads_end[N - 1]) / block_shape[N - 1]` -/// of the same type as `data` input. -class NGRAPH_API SpaceToBatch : public Op { -public: - NGRAPH_RTTI_DECLARATION; - - SpaceToBatch() = default; - - /// \brief Constructs a SpaceToBatch operation. - /// - /// \param data Node producing the data tensor - /// \param block_shape The sizes of the block of values to be moved - /// \param pads_begin Specifies the padding for the beginning along each axis of - /// `data` input - /// \param pads_end Specifies the padding for the ending along each axis of `data` - /// input. - SpaceToBatch(const Output& data, - const Output& block_shape, - const Output& pads_begin, - const Output& pads_end); - - void validate_and_infer_types() override; - std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - bool visit_attributes(AttributeVisitor& visitor) override; - - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; - -private: - bool evaluate_space_to_batch(const HostTensorVector& outputs, const HostTensorVector& inputs) const; -}; +using ov::op::v1::SpaceToBatch; } // namespace v1 using v1::SpaceToBatch; } // namespace op diff --git a/ngraph/core/include/ngraph/op/space_to_depth.hpp b/ngraph/core/include/ngraph/op/space_to_depth.hpp index d72c725f60efda..121279414e6643 100644 --- a/ngraph/core/include/ngraph/op/space_to_depth.hpp +++ b/ngraph/core/include/ngraph/op/space_to_depth.hpp @@ -5,77 +5,13 @@ #pragma once #include "ngraph/op/op.hpp" +#include "openvino/op/space_to_depth.hpp" namespace ngraph { namespace op { namespace v0 { -/// \brief SpaceToDepth permutes input tensor blocks of spatial data into depth -/// dimension. -/// -/// \note Values from the height and width dimensions are moved to the depth dimension. -/// -/// Output node produces a tensor with shape: -/// [N, C * blocksize * blocksize, H / blocksize, W / blocksize] -class NGRAPH_API SpaceToDepth : public Op { -public: - NGRAPH_RTTI_DECLARATION; - - enum class SpaceToDepthMode { - // The output depth is gathered from [block_size, ..., block_size, C] - BLOCKS_FIRST, - // The output depth is gathered from [C, block_size, ..., block_size] - DEPTH_FIRST - }; - - SpaceToDepth() = default; - /// \brief Constructs a SpaceToDepth operation. - /// - /// \param data - Node producing the input tensor - /// \param mode Specifies how the output depth dimension is gathered - /// from block coordinates and the old depth dimension. - /// \param block_size - the size of the block of values to be moved - SpaceToDepth(const Output& data, const SpaceToDepthMode& mode, std::size_t block_size = 1); - - SpaceToDepth(const Output& data, const std::string& mode, std::size_t block_size = 1); - - bool visit_attributes(AttributeVisitor& visitor) override; - std::size_t get_block_size() const { - return m_blocksize; - } - SpaceToDepthMode get_mode() const { - return m_mode; - } - void validate_and_infer_types() override; - std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; - -protected: - std::size_t m_blocksize; - SpaceToDepthMode m_mode; -}; +using ov::op::v0::SpaceToDepth; } // namespace v0 using v0::SpaceToDepth; } // namespace op - -NGRAPH_API -std::ostream& operator<<(std::ostream& s, const op::v0::SpaceToDepth::SpaceToDepthMode& type); } // namespace ngraph - -namespace ov { - -template <> -class NGRAPH_API AttributeAdapter - : public EnumAttributeAdapterBase { -public: - AttributeAdapter(ngraph::op::v0::SpaceToDepth::SpaceToDepthMode& value) - : EnumAttributeAdapterBase(value) {} - - static constexpr DiscreteTypeInfo type_info{"AttributeAdapter", 0}; - const DiscreteTypeInfo& get_type_info() const override { - return type_info; - } -}; - -} // namespace ov diff --git a/ngraph/core/include/ngraph/op/split.hpp b/ngraph/core/include/ngraph/op/split.hpp index fa3023697d29e7..591b5d915d882c 100644 --- a/ngraph/core/include/ngraph/op/split.hpp +++ b/ngraph/core/include/ngraph/op/split.hpp @@ -9,41 +9,12 @@ #include "ngraph/node.hpp" #include "ngraph/op/op.hpp" +#include "openvino/op/split.hpp" namespace ngraph { namespace op { namespace v1 { -/// \brief Splits the input tensor into a list of equal sized tensors -class NGRAPH_API Split : public ngraph::op::Op { -public: - NGRAPH_RTTI_DECLARATION; - - /// \brief Constructs a split operation. - Split() = default; - /// \brief Constructs a split operation. - /// \param data The tensor to be split. - /// \param axis The index of an axis in "data" along which to perform - /// the split. - /// \param num_splits The number of pieces that the data tensor should be - /// split into. - Split(const Output& data, const Output& axis, const size_t num_splits); - - bool visit_attributes(AttributeVisitor& visitor) override; - void validate_and_infer_types() override; - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - size_t get_num_splits() const { - return m_num_splits; - } - void set_num_splits(const size_t num_splits) { - m_num_splits = num_splits; - } - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; - -protected: - size_t m_num_splits; -}; +using ov::op::v1::Split; } // namespace v1 } // namespace op } // namespace ngraph diff --git a/ngraph/core/include/ngraph/op/sqrt.hpp b/ngraph/core/include/ngraph/op/sqrt.hpp index f6b55158c179d2..bcad6a2392bafa 100644 --- a/ngraph/core/include/ngraph/op/sqrt.hpp +++ b/ngraph/core/include/ngraph/op/sqrt.hpp @@ -5,40 +5,12 @@ #pragma once #include "ngraph/op/util/unary_elementwise_arithmetic.hpp" +#include "openvino/op/sqrt.hpp" namespace ngraph { namespace op { namespace v0 { -// clang-format off - /// \brief Elementwise square root operation. - /// - /// ## Inputs - /// - /// | | Type | Description | - /// | ----- | --------------------------------- | ----------------------------------------------- | - /// | `arg` | \f$N[d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of any shape and numeric element type. | - /// - /// ## Output - /// - /// | Type | Description | - /// | ---------------------- | ------------------------------------------------------------------------------------- | - /// | \f$N[d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_n] = \sqrt{\texttt{arg}[i_1,\dots,i_n]}\f$ | -// clang-format on -class NGRAPH_API Sqrt : public util::UnaryElementwiseArithmetic { -public: - NGRAPH_RTTI_DECLARATION; - - /// \brief Constructs a square operation. - /// - /// \param arg Node that produces the input tensor. - Sqrt(const Output& arg); - Sqrt() = default; - - bool visit_attributes(AttributeVisitor& visitor) override; - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; -}; +using ov::op::v0::Sqrt; } // namespace v0 using v0::Sqrt; } // namespace op diff --git a/ngraph/core/include/ngraph/op/squared_difference.hpp b/ngraph/core/include/ngraph/op/squared_difference.hpp index 9c9888e4cf28b6..4b22f17f83a197 100644 --- a/ngraph/core/include/ngraph/op/squared_difference.hpp +++ b/ngraph/core/include/ngraph/op/squared_difference.hpp @@ -5,30 +5,12 @@ #pragma once #include "ngraph/op/util/binary_elementwise_arithmetic.hpp" +#include "openvino/op/squared_difference.hpp" namespace ngraph { namespace op { namespace v0 { -/// \brief Calculates an element-wise squared difference between two tensors -/// -/// y[i] = (x1[i] - x2[i])^2 -class NGRAPH_API SquaredDifference : public util::BinaryElementwiseArithmetic { -public: - NGRAPH_RTTI_DECLARATION; - - /// \brief Constrcuts an uninitialized squared difference operation - SquaredDifference() : util::BinaryElementwiseArithmetic(AutoBroadcastSpec::NUMPY) {} - /// \brief Constructs the squared difference operation. - /// - /// \param x1 First input tensor - /// \param x2 Second input tensor - /// \param auto_broadcast Auto broadcast specification - SquaredDifference(const Output& x1, - const Output& x2, - const AutoBroadcastSpec& auto_broadcast = AutoBroadcastSpec(AutoBroadcastType::NUMPY)); - - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; -}; +using ov::op::v0::SquaredDifference; } // namespace v0 using v0::SquaredDifference; } // namespace op diff --git a/ngraph/core/include/ngraph/op/squeeze.hpp b/ngraph/core/include/ngraph/op/squeeze.hpp index 6c1d78586f86e1..bec910010c0e17 100644 --- a/ngraph/core/include/ngraph/op/squeeze.hpp +++ b/ngraph/core/include/ngraph/op/squeeze.hpp @@ -9,33 +9,12 @@ #include "ngraph/axis_vector.hpp" #include "ngraph/node.hpp" #include "ngraph/op/op.hpp" +#include "openvino/op/squeeze.hpp" namespace ngraph { namespace op { namespace v0 { -class NGRAPH_API Squeeze : public ngraph::op::Op { -public: - NGRAPH_RTTI_DECLARATION; - - Squeeze(); - Squeeze(const Output& data, const Output& axes); - Squeeze(const Output& data); - - bool visit_attributes(AttributeVisitor& visitor) override; - void validate_and_infer_types() override; - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; - bool evaluate_lower(const HostTensorVector& outputs) const override; - bool evaluate_upper(const HostTensorVector& outputs) const override; - bool constant_fold(OutputVector& output_values, const OutputVector& inputs_values) override; - - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - bool is_dynamic() const override; - -private: - Output get_default_axes_input() const; -}; +using ov::op::v0::Squeeze; } // namespace v0 using v0::Squeeze; } // namespace op diff --git a/ngraph/core/include/ngraph/op/strided_slice.hpp b/ngraph/core/include/ngraph/op/strided_slice.hpp index 6c3cff7fa81b3c..8239d67cf6bd10 100644 --- a/ngraph/core/include/ngraph/op/strided_slice.hpp +++ b/ngraph/core/include/ngraph/op/strided_slice.hpp @@ -10,102 +10,12 @@ #include "ngraph/node.hpp" #include "ngraph/op/op.hpp" #include "ngraph/op/util/attr_types.hpp" +#include "openvino/op/strided_slice.hpp" namespace ngraph { namespace op { namespace v1 { -/// \brief Takes a slice of an input tensor, i.e., the sub-tensor that resides within a -/// bounding box, optionally with stride. -class NGRAPH_API StridedSlice : public Op { -public: - NGRAPH_RTTI_DECLARATION; - - StridedSlice() = default; - - /// \brief Constructs a dynamic tensor strided slice operation. - /// - /// \param data The tensor to be sliced. - /// \param begin 1D tensor with begin indexes for input blob slicing. - /// \param end 1D tensor with end indexes for input blob slicing. - /// \param strides The slicing strides; for example, strides of `{n,m}` - /// means to take every nth row and every mth column - /// of the input matrix. - /// \param begin_mask When begin_mask[i] equal to 1 means that the - /// corresponding dimension of the begin input is ignored. - /// \param end_mask When end_mask[i] is 1, the corresponding dimension of - /// the end input is ignored. - /// \param new_axis_mask If new_axis_mask[i] is 1, a length 1 dimension - /// is inserted on the i-th position. - /// \param shrink_axis_mask If shrink_axis_mask[i] is 1, the dimension - /// on the i-th position is deleted. - /// \param ellipsis_mask It inserts missing dimensions - /// on a position of a non-zero bit. - StridedSlice(const Output& data, - const Output& begin, - const Output& end, - const Output& strides, - const std::vector& begin_mask, - const std::vector& end_mask, - const std::vector& new_axis_mask = std::vector{}, - const std::vector& shrink_axis_mask = std::vector{}, - const std::vector& ellipsis_mask = std::vector{}); - - /// \brief Constructs a dynamic tensor strided slice operation. - /// - /// \param data The tensor to be sliced. - /// \param begin 1D tensor with begin indexes for input blob slicing. - /// \param end 1D tensor with end indexes for input blob slicing. - /// \param begin_mask When begin_mask[i] equal to 1 means that the - /// corresponding dimension of the begin input is ignored. - /// \param end_mask When end_mask[i] is 1, the corresponding dimension of - /// the end input is ignored. - /// \param new_axis_mask If new_axis_mask[i] is 1, a length 1 dimension - /// is inserted on the i-th position. - /// \param shrink_axis_mask If shrink_axis_mask[i] is 1, the dimension - /// on the i-th position is deleted. - /// \param ellipsis_mask It inserts missing dimensions - /// on a position of a non-zero bit. - StridedSlice(const Output& data, - const Output& begin, - const Output& end, - const std::vector& begin_mask, - const std::vector& end_mask, - const std::vector& new_axis_mask = std::vector{}, - const std::vector& shrink_axis_mask = std::vector{}, - const std::vector& ellipsis_mask = std::vector{}); - - bool visit_attributes(AttributeVisitor& visitor) override; - const std::vector& get_begin_mask() const { - return m_begin_mask; - } - const std::vector& get_end_mask() const { - return m_end_mask; - } - const std::vector& get_new_axis_mask() const { - return m_new_axis_mask; - } - const std::vector& get_shrink_axis_mask() const { - return m_shrink_axis_mask; - } - const std::vector& get_ellipsis_mask() const { - return m_ellipsis_mask; - } - std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - void validate_and_infer_types() override; - bool evaluate(const HostTensorVector& output_values, const HostTensorVector& input_values) const override; - bool has_evaluate() const override; - bool evaluate_lower(const HostTensorVector& outputs) const override; - bool evaluate_upper(const HostTensorVector& outputs) const override; - -private: - AxisSet convert_mask_to_axis_set(const std::vector& mask) const; - - std::vector m_begin_mask; - std::vector m_end_mask; - std::vector m_new_axis_mask; - std::vector m_shrink_axis_mask; - std::vector m_ellipsis_mask; -}; +using ov::op::v1::StridedSlice; } // namespace v1 } // namespace op } // namespace ngraph diff --git a/ngraph/core/include/ngraph/op/subtract.hpp b/ngraph/core/include/ngraph/op/subtract.hpp index d6ba078714c634..93fdf98e702983 100644 --- a/ngraph/core/include/ngraph/op/subtract.hpp +++ b/ngraph/core/include/ngraph/op/subtract.hpp @@ -5,30 +5,12 @@ #pragma once #include "ngraph/op/util/binary_elementwise_arithmetic.hpp" +#include "openvino/op/subtract.hpp" namespace ngraph { namespace op { namespace v1 { -/// \brief Elementwise subtraction operation. -class NGRAPH_API Subtract : public util::BinaryElementwiseArithmetic { -public: - NGRAPH_RTTI_DECLARATION; - - Subtract() : util::BinaryElementwiseArithmetic(AutoBroadcastSpec::NUMPY) {} - - /// \brief Constructs a subtraction operation. - /// - /// \param arg0 Node that produces the first input tensor. - /// \param arg1 Node that produces the second input tensor. - /// \param auto_broadcast Auto broadcast specification - Subtract(const Output& arg0, - const Output& arg1, - const AutoBroadcastSpec& auto_broadcast = AutoBroadcastSpec(AutoBroadcastType::NUMPY)); - - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; -}; +using ov::op::v1::Subtract; } // namespace v1 } // namespace op } // namespace ngraph diff --git a/ngraph/core/include/ngraph/op/swish.hpp b/ngraph/core/include/ngraph/op/swish.hpp index 980e3390c1b8bd..d125ec55797624 100644 --- a/ngraph/core/include/ngraph/op/swish.hpp +++ b/ngraph/core/include/ngraph/op/swish.hpp @@ -6,34 +6,12 @@ #include "ngraph/node.hpp" #include "ngraph/op/op.hpp" +#include "openvino/op/swish.hpp" namespace ngraph { namespace op { namespace v4 { -/// \brief A Swish Activation Function -/// f(x) = x / (1.0 + exp(-beta * x)) or -/// f(x) = x * sigmoid(beta * x) -/// -class NGRAPH_API Swish : public ngraph::op::Op { -public: - NGRAPH_RTTI_DECLARATION; - Swish() = default; - - /// \brief Constructs an Swish operation. - /// - /// \param data Input tensor - /// \param beta Scalar with beta value. If the argument is not specified then use - /// the default value 1.0 - Swish(const Output& arg, const Output& beta); - explicit Swish(const Output& arg); - - bool visit_attributes(AttributeVisitor& visitor) override; - void validate_and_infer_types() override; - - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; -}; +using ov::op::v4::Swish; } // namespace v4 } // namespace op } // namespace ngraph diff --git a/ngraph/core/include/ngraph/op/tan.hpp b/ngraph/core/include/ngraph/op/tan.hpp index 33dc9e32bec933..992f237e09cf39 100644 --- a/ngraph/core/include/ngraph/op/tan.hpp +++ b/ngraph/core/include/ngraph/op/tan.hpp @@ -5,39 +5,12 @@ #pragma once #include "ngraph/op/util/unary_elementwise_arithmetic.hpp" +#include "openvino/op/tan.hpp" namespace ngraph { namespace op { namespace v0 { -// clang-format off - /// \brief Elementwise tangent operation. - /// - /// ## Inputs - /// - /// | | Type | Description | - /// | ----- | --------------------------------- | ----------------------------------------------- | - /// | `arg` | \f$N[d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of any shape and numeric element type. | - /// - /// ## Output - /// - /// | Type | Description | - /// | ---------------------- | ------------------------------------------------------------------------------------ | - /// | \f$N[d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_n] = \tan(\texttt{arg}[i_1,\dots,i_n])\f$ | -// clang-format on -class NGRAPH_API Tan : public util::UnaryElementwiseArithmetic { -public: - NGRAPH_RTTI_DECLARATION; - /// \brief Constructs a tangent operation. - /// - /// \param arg Node that produces the input tensor. - Tan(const Output& arg); - Tan() = default; - - bool visit_attributes(AttributeVisitor& visitor) override; - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; -}; +using ov::op::v0::Tan; } // namespace v0 using v0::Tan; } // namespace op diff --git a/ngraph/core/include/ngraph/op/tanh.hpp b/ngraph/core/include/ngraph/op/tanh.hpp index a891ff691ce761..0d2d92938865c2 100644 --- a/ngraph/core/include/ngraph/op/tanh.hpp +++ b/ngraph/core/include/ngraph/op/tanh.hpp @@ -5,26 +5,12 @@ #pragma once #include "ngraph/op/util/unary_elementwise_arithmetic.hpp" +#include "openvino/op/tanh.hpp" namespace ngraph { namespace op { namespace v0 { -/// \brief Elementwise hyperbolic tangent operation. -class NGRAPH_API Tanh : public util::UnaryElementwiseArithmetic { -public: - NGRAPH_RTTI_DECLARATION; - - /// \brief Constructs a hyperbolic tangent operation. - /// - /// \param arg Node that produces the input tensor. - Tanh(const Output& arg); - Tanh() = default; - - bool visit_attributes(AttributeVisitor& visitor) override; - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; -}; +using ov::op::v0::Tanh; } // namespace v0 using v0::Tanh; } // namespace op diff --git a/ngraph/core/include/ngraph/op/tile.hpp b/ngraph/core/include/ngraph/op/tile.hpp index cc9c886472ce8e..fb3b5436175eed 100644 --- a/ngraph/core/include/ngraph/op/tile.hpp +++ b/ngraph/core/include/ngraph/op/tile.hpp @@ -6,34 +6,12 @@ #include "ngraph/op/op.hpp" #include "ngraph/runtime/host_tensor.hpp" +#include "openvino/op/tile.hpp" namespace ngraph { namespace op { namespace v0 { -/// \brief Dynamic Tiling operation which repeats a tensor multiple times -/// along each dimension -class NGRAPH_API Tile : public Op { -public: - NGRAPH_RTTI_DECLARATION; - - Tile() = default; - /// \brief Perform dynamic padding of a tensor - /// - /// \param data The node producing input tensor to be padded. - /// \param repeats The node producing the per-dimension replication factor - Tile(const Output& data, const Output& repeats); - - bool visit_attributes(AttributeVisitor& visitor) override; - void validate_and_infer_types() override; - - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; - -private: - bool evaluate_tile(const HostTensorVector& outputs, const HostTensorVector& inputs) const; -}; +using ov::op::v0::Tile; } // namespace v0 } // namespace op } // namespace ngraph diff --git a/ngraph/core/include/ngraph/op/topk.hpp b/ngraph/core/include/ngraph/op/topk.hpp index 8ce1eef6f117c9..c6cb2a9cf842ac 100644 --- a/ngraph/core/include/ngraph/op/topk.hpp +++ b/ngraph/core/include/ngraph/op/topk.hpp @@ -9,157 +9,16 @@ #include "ngraph/axis_set.hpp" #include "ngraph/op/constant.hpp" #include "ngraph/op/op.hpp" +#include "openvino/op/topk.hpp" namespace ngraph { namespace op { namespace v1 { -/// \brief Computes indices and values of the k maximum/minimum values -/// for each slice along specified axis. -class NGRAPH_API TopK : public Op { -public: - NGRAPH_RTTI_DECLARATION; - - using SortType = TopKSortType; - using Mode = TopKMode; - - /// \brief Constructs a TopK operation - TopK() = default; - /// \brief Constructs a TopK operation with two outputs: values and indices. - /// By default the indices output is described by i32 data type. - /// - /// \param data The input tensor - /// \param k Specifies how many maximum/minimum elements should be computed - /// (note: scalar input tensor) - /// \param axis The axis along which to compute top k indices - /// \param mode Specifies which operation (min or max) is used to select - /// the biggest element of two. - /// \param sort Specifies order of output elements and/or indices - /// Accepted values: none, index, value - /// \param index_element_type Specyfies type of produced indices - TopK(const Output& data, - const Output& k, - const int64_t axis, - const std::string& mode, - const std::string& sort, - const element::Type& index_element_type = element::i32); - - TopK(const Output& data, - const Output& k, - const int64_t axis, - const Mode mode, - const SortType sort, - const element::Type& index_element_type = element::i32); - - bool visit_attributes(AttributeVisitor& visitor) override; - void validate_and_infer_types() override; - - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - /// \brief Returns axis value after normalization - /// \note If input rank required to normalization is dynamic, the exception is - /// thrown - uint64_t get_axis() const; - /// \brief Returns axis value before normalization - int64_t get_provided_axis() const { - return m_axis; - } - void set_axis(const int64_t axis); - Mode get_mode() const { - return m_mode; - } - void set_mode(const Mode mode) { - m_mode = mode; - } - SortType get_sort_type() const { - return m_sort; - } - void set_sort_type(const SortType sort) { - m_sort = sort; - } - element::Type get_index_element_type() const { - return m_index_element_type; - } - void set_index_element_type(const element::Type& index_element_type) { - m_index_element_type = index_element_type; - } - /// \brief Returns the value of K, if available - /// - /// \note If the second input to this op is a constant, the value is retrieved - /// and returned. If the input is not constant(dynamic) this method returns 0 - size_t get_k() const; - void set_k(size_t k); - size_t get_default_output_index() const override { - return no_default_index(); - } - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; - -protected: - int64_t m_axis; - uint64_t m_normalized_axis; - Mode m_mode; - SortType m_sort; - element::Type m_index_element_type{element::i32}; - - virtual size_t read_k_from_constant_node(const std::shared_ptr& node, - const element::Type& k_element_type) const; - - template - size_t validate_and_get_k(const std::shared_ptr& k_constant) const; - Shape compute_output_shape(const std::string& node_description, - const PartialShape input_partial_shape, - const int64_t k) const; - void set_axis(const Rank input_rank, const int64_t axis); -}; +using ov::op::v1::TopK; } // namespace v1 namespace v3 { -/// \brief Computes indices and values of the k maximum/minimum values -/// for each slice along specified axis. -class NGRAPH_API TopK : public v1::TopK { -public: - static constexpr NodeTypeInfo type_info{"TopK", 3}; - const NodeTypeInfo& get_type_info() const override { - return type_info; - } - /// \brief Constructs a TopK operation - TopK() = default; - /// \brief Constructs a TopK operation with two outputs: values and indices. - /// By default the indices output is described by i32 data type. - /// - /// \param data The input tensor - /// \param k Specifies how many maximum/minimum elements should be computed - /// (note: scalar input tensor) - /// \param axis The axis along which to compute top k indices - /// \param mode Specifies which operation (min or max) is used to select - /// the biggest element of two. - /// \param sort Specifies order of output elements and/or indices - /// Accepted values: none, index, value - /// \param index_element_type Specyfies type of produced indices - TopK(const Output& data, - const Output& k, - const int64_t axis, - const std::string& mode, - const std::string& sort, - const element::Type& index_element_type = element::i32); - - TopK(const Output& data, - const Output& k, - const int64_t axis, - const Mode mode, - const SortType sort, - const element::Type& index_element_type = element::i32); - bool visit_attributes(AttributeVisitor& visitor) override; - void validate_and_infer_types() override; - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; - -protected: - virtual size_t read_k_from_constant_node(const std::shared_ptr& node, - const element::Type& k_element_type) const override; -}; +using ov::op::v3::TopK; } // namespace v3 } // namespace op } // namespace ngraph diff --git a/ngraph/core/include/ngraph/op/transpose.hpp b/ngraph/core/include/ngraph/op/transpose.hpp index 92127203c4da2f..41620e7539eb33 100644 --- a/ngraph/core/include/ngraph/op/transpose.hpp +++ b/ngraph/core/include/ngraph/op/transpose.hpp @@ -7,35 +7,12 @@ #include "ngraph/axis_vector.hpp" #include "ngraph/node.hpp" #include "ngraph/op/op.hpp" +#include "openvino/op/transpose.hpp" namespace ngraph { namespace op { namespace v1 { -/// \brief Tensor transpose operation. -class NGRAPH_API Transpose : public Op { -public: - NGRAPH_RTTI_DECLARATION; - - Transpose() = default; - /// - /// \brief Constructs a transpose operation. - /// - /// \param arg Node producing the tensor to be transposed. - /// \param input_order Node producing the permutation to apply to the axes - /// of the input shape. Must be a vector with shape [n], - /// where n is the rank of arg. The tensor's value must - /// contain every integer in the range [0, n-1]. - /// - Transpose(const Output& arg, const Output& input_order); - - bool visit_attributes(AttributeVisitor& visitor) override; - void validate_and_infer_types() override; - - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; -}; +using ov::op::v1::Transpose; } // namespace v1 using v1::Transpose; } // namespace op diff --git a/ngraph/core/include/ngraph/op/unsqueeze.hpp b/ngraph/core/include/ngraph/op/unsqueeze.hpp index ffc0b5efdb4269..a71ce25e7c61bc 100644 --- a/ngraph/core/include/ngraph/op/unsqueeze.hpp +++ b/ngraph/core/include/ngraph/op/unsqueeze.hpp @@ -9,28 +9,12 @@ #include "ngraph/axis_vector.hpp" #include "ngraph/node.hpp" #include "ngraph/op/op.hpp" +#include "openvino/op/unsqueeze.hpp" namespace ngraph { namespace op { namespace v0 { -class NGRAPH_API Unsqueeze : public Op { -public: - NGRAPH_RTTI_DECLARATION; - - Unsqueeze() = default; - Unsqueeze(const Output& data, const Output& axes); - - void validate_and_infer_types() override; - bool visit_attributes(AttributeVisitor& visitor) override; - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; - bool evaluate_lower(const HostTensorVector& output_values) const override; - bool evaluate_upper(const HostTensorVector& output_values) const override; - - bool constant_fold(OutputVector& output_values, const OutputVector& inputs_values) override; - - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; -}; +using ov::op::v0::Unsqueeze; } // namespace v0 using v0::Unsqueeze; } // namespace op diff --git a/ngraph/core/include/ngraph/op/variadic_split.hpp b/ngraph/core/include/ngraph/op/variadic_split.hpp index bb09f1fb486588..9a6a9e3cb90eeb 100644 --- a/ngraph/core/include/ngraph/op/variadic_split.hpp +++ b/ngraph/core/include/ngraph/op/variadic_split.hpp @@ -5,44 +5,13 @@ #pragma once #include "ngraph/op/op.hpp" +#include "openvino/op/variadic_split.hpp" namespace ngraph { namespace op { namespace v1 { -/// \brief VariadicSplit operation splits an input tensor into pieces along some axis. -/// The pieces may have variadic lengths depending on "split_lengths" attribute. -class NGRAPH_API VariadicSplit : public Op { -public: - NGRAPH_RTTI_DECLARATION; - - /// \brief Constructs a variadic split operation. - VariadicSplit() = default; - /// \brief Constructs a variadic split operation. - /// - /// \param data The tensor to be split. - /// \param axis The index of an axis in "data" along which to perform the - /// split. - /// \param split_lengths A list containing the sizes of each output tensor - /// along the split "axis". Size of "split_lengths" should be equal to the number of - /// - /// outputs. The sum of split_lengths must match data.shape[axis] - VariadicSplit(const Output& data, const Output& axis, const Output& split_lengths); - - bool visit_attributes(AttributeVisitor& visitor) override; - - void validate_and_infer_types() override; - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - size_t get_default_output_index() const override { - return no_default_index(); - } - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; - -private: - bool evaluate_variadic_split(const HostTensorVector& outputs, const HostTensorVector& inputs) const; -}; +using ov::op::v1::VariadicSplit; } // namespace v1 - using v1::VariadicSplit; } // namespace op } // namespace ngraph diff --git a/ngraph/core/include/ngraph/op/xor.hpp b/ngraph/core/include/ngraph/op/xor.hpp index 34ff6c7675c78e..bfd0c9552095cd 100644 --- a/ngraph/core/include/ngraph/op/xor.hpp +++ b/ngraph/core/include/ngraph/op/xor.hpp @@ -7,68 +7,17 @@ #include #include "ngraph/op/util/binary_elementwise_logical.hpp" +#include "openvino/op/logical_xor.hpp" +#include "openvino/op/xor.hpp" namespace ngraph { namespace op { namespace v1 { -/// \brief Elementwise logical-xor operation. -/// -class NGRAPH_API LogicalXor : public util::BinaryElementwiseLogical { -public: - NGRAPH_RTTI_DECLARATION; - LogicalXor() = default; - /// \brief Constructs a logical-xor operation. - /// - /// \param arg0 Node that produces the first input tensor.
- /// `[d0, ...]` - /// \param arg1 Node that produces the second input tensor.
- /// `[d0, ...]` - /// \param auto_broadcast Auto broadcast specification - /// - /// Output `[d0, ...]` - /// - LogicalXor(const Output& arg0, - const Output& arg1, - const AutoBroadcastSpec& auto_broadcast = AutoBroadcastSpec(AutoBroadcastType::NUMPY)); - - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; -}; +using ov::op::v1::LogicalXor; } // namespace v1 namespace v0 { -/// \brief Elementwise logical-xor operation. -/// -class NGRAPH_API Xor : public util::BinaryElementwiseLogical { -public: - static constexpr NodeTypeInfo type_info{"Xor", 0}; - const NodeTypeInfo& get_type_info() const override { - return type_info; - } - Xor() = default; - /// \brief Constructs a logical-xor operation. - /// - /// \param arg0 Node that produces the first input tensor.
- /// `[d0, ...]` - /// \param arg1 Node that produces the second input tensor.
- /// `[d0, ...]` - /// \param auto_broadcast Auto broadcast specification - /// - /// Output `[d0, ...]` - /// - Xor(const Output& arg0, - const Output& arg1, - const AutoBroadcastSpec& auto_broadcast = AutoBroadcastSpec()); - - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; -}; +using ov::op::v0::Xor; } // namespace v0 - -// default opset version using v0::Xor; } // namespace op } // namespace ngraph diff --git a/ngraph/core/include/openvino/op/logical_xor.hpp b/ngraph/core/include/openvino/op/logical_xor.hpp new file mode 100644 index 00000000000000..beda8749ce2a11 --- /dev/null +++ b/ngraph/core/include/openvino/op/logical_xor.hpp @@ -0,0 +1,41 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include + +#include "openvino/op/util/binary_elementwise_logical.hpp" + +namespace ov { +namespace op { +namespace v1 { +/// \brief Elementwise logical-xor operation. +/// +class OPENVINO_API LogicalXor : public util::BinaryElementwiseLogical { +public: + OPENVINO_RTTI_DECLARATION; + LogicalXor() = default; + /// \brief Constructs a logical-xor operation. + /// + /// \param arg0 Node that produces the first input tensor.
+ /// `[d0, ...]` + /// \param arg1 Node that produces the second input tensor.
+ /// `[d0, ...]` + /// \param auto_broadcast Auto broadcast specification + /// + /// Output `[d0, ...]` + /// + LogicalXor(const Output& arg0, + const Output& arg1, + const AutoBroadcastSpec& auto_broadcast = AutoBroadcastSpec(AutoBroadcastType::NUMPY)); + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; +}; +} // namespace v1 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/random_uniform.hpp b/ngraph/core/include/openvino/op/random_uniform.hpp new file mode 100644 index 00000000000000..470936491ddb93 --- /dev/null +++ b/ngraph/core/include/openvino/op/random_uniform.hpp @@ -0,0 +1,84 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/op.hpp" + +namespace ov { +namespace op { +namespace v8 { +/// \brief Tensor RandomUniform operation. +class OPENVINO_API RandomUniform : public Op { +public: + OPENVINO_RTTI_DECLARATION; + + RandomUniform() = default; + + /// + /// \brief Constructs a RandomUniform operation. + /// + /// \param out_shape Node producing the tensor with output shape. + /// \param min_val Node producing the tensor with minimum value. + /// \param max_val Node producing the tensor with maximum value. + /// \param out_type Output type of the tensor. + /// \param global_seed Global seed value. + /// \param op_seed Operational seed value. + RandomUniform(const Output& out_shape, + const Output& min_val, + const Output& max_val, + const ngraph::element::Type& out_type, + uint64_t global_seed = 0, + uint64_t op_seed = 0); + + void validate_and_infer_types() override; + + bool visit_attributes(AttributeVisitor& visitor) override; + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + /// \return Turns off constant folding for RandomUniform operation. + bool constant_fold(OutputVector& output_values, const OutputVector& inputs_values) override { + return false; + } + + /// \return The output tensor type. + const ngraph::element::Type& get_out_type() const { + return m_output_type; + } + void set_out_type(const ngraph::element::Type& output_type) { + m_output_type = output_type; + } + + /// \return The global seed value. + uint64_t get_global_seed() const { + return m_global_seed; + } + void set_global_seed(uint64_t seed) { + m_global_seed = seed; + } + + /// \return The operational seed value. + uint64_t get_op_seed() const { + return m_op_seed; + } + void set_op_seed(uint64_t seed2) { + m_op_seed = seed2; + } + + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + + bool has_evaluate() const override; + +protected: + ngraph::element::Type m_output_type; + uint64_t m_global_seed; + uint64_t m_op_seed; + + mutable std::mutex m_state_mutex; + mutable std::pair m_state; +}; +} // namespace v8 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/range.hpp b/ngraph/core/include/openvino/op/range.hpp new file mode 100644 index 00000000000000..5dcc53e928d470 --- /dev/null +++ b/ngraph/core/include/openvino/op/range.hpp @@ -0,0 +1,74 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/op.hpp" + +namespace ov { +namespace op { +namespace v4 { +/// \brief Range operation, analogous to `arange()` in Numpy. +class OPENVINO_API Range : public Op { +public: + OPENVINO_RTTI_DECLARATION; + /// \brief Constructs an unitialized range operation. + Range() = default; + + /// \brief Constructs a range operation. + /// + /// \param start The tensor producing the start value. Must be a scalar of numeric + /// element type. + /// \param stop The tensor producing the stop value. Must be a scalar of numeric + /// element type. + /// \param step The tensor producing the step value. Must be a scalar of numeric + /// element type. + /// \param output_type The type of the output. + Range(const Output& start, const Output& stop, const Output& step, element::Type output_type); + + bool visit_attributes(AttributeVisitor& visitor) override; + void validate_and_infer_types() override; + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; + void set_output_type(element::Type output_type) { + m_output_type = output_type; + } + // Overload collision with method on Node + using Node::set_output_type; + +private: + element::Type m_output_type; +}; +} // namespace v4 +namespace v0 { +/// \brief Range operation, analogous to `range()` in Python. +class OPENVINO_API Range : public Op { +public: + OPENVINO_RTTI_DECLARATION; + + /// \brief Constructs an unitialized range operation. + Range() = default; + + /// \brief Constructs a range operation. + /// + /// \param start The tensor producing the start value. Must be a scalar of integer + /// element type, and same element type as `stop` and `step`. + /// \param stop The tensor producing the stop value. Must be a scalar of integer + /// element type, and same element type as `start` and `step`. + /// \param step The tensor producing the step value. Must be a scalar of integer + /// element type, and same element type as `start` and `stop`. + Range(const Output& start, const Output& stop, const Output& step); + + bool visit_attributes(AttributeVisitor& visitor) override; + void validate_and_infer_types() override; + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; +}; +} // namespace v0 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/reduce_l1.hpp b/ngraph/core/include/openvino/op/reduce_l1.hpp new file mode 100644 index 00000000000000..2e5f2abba84905 --- /dev/null +++ b/ngraph/core/include/openvino/op/reduce_l1.hpp @@ -0,0 +1,40 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/util/arithmetic_reductions_keep_dims.hpp" + +namespace ov { +namespace op { +namespace v4 { +/// \brief Reduction operation using L1 norm: L1(x) = sum(abs(x)) if all dimensions are +/// specified for the normalisation. +/// +/// Reduces the tensor, eliminating the specified reduction axes by taking the L1-norm. +class OPENVINO_API ReduceL1 : public util::ArithmeticReductionKeepDims { +public: + OPENVINO_RTTI_DECLARATION; + /// \brief Constructs a reducet L1-norm operation. + ReduceL1() = default; + /// \brief Constructs a reduce L1-norm operation. + /// + /// \param arg The tensor to be reduced. + /// \param reduction_axes The axis positions (0-based) to be eliminated. + /// \param keep_dims If set to true it holds axes that are used for reduction. + ReduceL1(const Output& arg, const Output& reduction_axes, bool keep_dims = false); + + /// \return The default value for Reduce. + OPENVINO_SUPPRESS_DEPRECATED_START + std::shared_ptr get_default_value() const override; + OPENVINO_SUPPRESS_DEPRECATED_END + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; +}; +} // namespace v4 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/reduce_l2.hpp b/ngraph/core/include/openvino/op/reduce_l2.hpp new file mode 100644 index 00000000000000..e257e222b6a38e --- /dev/null +++ b/ngraph/core/include/openvino/op/reduce_l2.hpp @@ -0,0 +1,39 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/util/arithmetic_reductions_keep_dims.hpp" + +namespace ov { +namespace op { +namespace v4 { +/// \brief Reduction operation using L2 norm: +/// +/// Reduces the tensor, eliminating the specified reduction axes by taking the L2-norm. +class OPENVINO_API ReduceL2 : public util::ArithmeticReductionKeepDims { +public: + OPENVINO_RTTI_DECLARATION; + /// \brief Constructs a reducet L2-norm operation. + ReduceL2() = default; + /// \brief Constructs a reduce L2-norm operation. + /// + /// \param arg The tensor to be reduced. + /// \param reduction_axes The axis positions (0-based) to be eliminated. + /// \param keep_dims If set to true it holds axes that are used for reduction. + ReduceL2(const Output& arg, const Output& reduction_axes, bool keep_dims = false); + + /// \return The default value for Reduce. + OPENVINO_SUPPRESS_DEPRECATED_START + std::shared_ptr get_default_value() const override; + OPENVINO_SUPPRESS_DEPRECATED_END + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; +}; +} // namespace v4 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/reduce_logical_and.hpp b/ngraph/core/include/openvino/op/reduce_logical_and.hpp new file mode 100644 index 00000000000000..afbdc06440d368 --- /dev/null +++ b/ngraph/core/include/openvino/op/reduce_logical_and.hpp @@ -0,0 +1,35 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/util/logical_reduction_keep_dims.hpp" + +namespace ov { +namespace op { +namespace v1 { +/// \brief Performs a reduction using "logical and" +/// +/// The reduction is performed over slices of the first input. The slices shape depends +/// on the values passed to the second input - the axes. +class OPENVINO_API ReduceLogicalAnd : public util::LogicalReductionKeepDims { +public: + OPENVINO_RTTI_DECLARATION; + ReduceLogicalAnd() = default; + /// \brief Constructs a ReduceLogicalAnd node. + /// + /// \param data - The input tensor with data to be reduced + /// \param reduction_axes - The input tensor with information about axes over which + /// the first tensor should be sliced prior to the reduction operation + /// \param keep_dims - Indicates if the axes used for reduction should be held/kept + ReduceLogicalAnd(const Output& data, const Output& reduction_axes, const bool keep_dims = false); + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; +}; +} // namespace v1 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/reduce_logical_or.hpp b/ngraph/core/include/openvino/op/reduce_logical_or.hpp new file mode 100644 index 00000000000000..308e11bad38948 --- /dev/null +++ b/ngraph/core/include/openvino/op/reduce_logical_or.hpp @@ -0,0 +1,35 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/util/logical_reduction_keep_dims.hpp" + +namespace ov { +namespace op { +namespace v1 { +/// \brief Performs a reduction using "logical or" +/// +/// The reduction is performed over slices of the first input. The slices shape depends +/// on the values passed to the second input - the axes. +class OPENVINO_API ReduceLogicalOr : public util::LogicalReductionKeepDims { +public: + OPENVINO_RTTI_DECLARATION; + ReduceLogicalOr() = default; + /// \brief Constructs a ReduceLogicalOr node. + /// + /// \param data - The input tensor with data to be reduced + /// \param reduction_axes - The input tensor with information about axes over which + /// the first tensor should be sliced prior to the reduction operation + /// \param keep_dims - Indicates if the axes used for reduction should be held/kept + ReduceLogicalOr(const Output& data, const Output& reduction_axes, const bool keep_dims = false); + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; +}; +} // namespace v1 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/reduce_mean.hpp b/ngraph/core/include/openvino/op/reduce_mean.hpp new file mode 100644 index 00000000000000..8a47de1e6fd1e9 --- /dev/null +++ b/ngraph/core/include/openvino/op/reduce_mean.hpp @@ -0,0 +1,29 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/util/arithmetic_reductions_keep_dims.hpp" + +namespace ov { +namespace op { +namespace v1 { +class OPENVINO_API ReduceMean : public util::ArithmeticReductionKeepDims { +public: + OPENVINO_RTTI_DECLARATION; + ReduceMean() = default; + + /// \param arg The tensor to be summed. + /// \param reduction_axes The axis positions (0-based) to be eliminated. + /// \param keep_dims If set to 1 it holds axes that are used for reduction. + ReduceMean(const Output& arg, const Output& reduction_axes, bool keep_dims = false); + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; +}; +} // namespace v1 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/reduce_prod.hpp b/ngraph/core/include/openvino/op/reduce_prod.hpp new file mode 100644 index 00000000000000..f82bba5e354649 --- /dev/null +++ b/ngraph/core/include/openvino/op/reduce_prod.hpp @@ -0,0 +1,41 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/util/arithmetic_reductions_keep_dims.hpp" + +namespace ov { +namespace op { +namespace v1 { +/// \brief Product reduction operation. +/// +/// Reduces the tensor, eliminating the specified reduction axes by taking the product. +class OPENVINO_API ReduceProd : public util::ArithmeticReductionKeepDims { +public: + OPENVINO_RTTI_DECLARATION; + /// \brief Constructs a product reduction operation. + ReduceProd() = default; + /// \brief Constructs a product reduction operation. + /// + /// \param arg The tensor to be reduced. + /// \param reduction_axes The axis positions (0-based) to be eliminated. + /// \param keep_dims If set to true it holds axes that are used for reduction. + ReduceProd(const Output& arg, const Output& reduction_axes, bool keep_dims = false); + + /// \return The default value for Product. + OPENVINO_SUPPRESS_DEPRECATED_START + std::shared_ptr get_default_value() const override; + OPENVINO_SUPPRESS_DEPRECATED_END + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; + bool evaluate_lower(const HostTensorVector& outputs) const override; + bool evaluate_upper(const HostTensorVector& outputs) const override; +}; +} // namespace v1 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/reduce_sum.hpp b/ngraph/core/include/openvino/op/reduce_sum.hpp new file mode 100644 index 00000000000000..229ed86fd8d14f --- /dev/null +++ b/ngraph/core/include/openvino/op/reduce_sum.hpp @@ -0,0 +1,85 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/util/arithmetic_reductions_keep_dims.hpp" + +namespace ov { +namespace op { +namespace v1 { +// clang-format off +/// \brief Tensor sum operation. +/// +/// Element-wise sums the input tensor, eliminating the specified reduction axes. +/// For example: +/// +/// \f[ +/// \mathit{sum}\left(\{0\}, +/// \left[ \begin{array}{ccc} +/// 1 & 2 \\ 3 & 4 \\ 5 & 6 \end{array} \right]\right) = +/// \left[ (1 + 3 + 5), (2 + 4 + 6) \right] = +/// \left[ 9, 12 \right]~~~\text{(dimension 0 (rows) is eliminated)} +/// \f] +/// +/// \f[ +/// \mathit{sum}\left(\{1\}, +/// \left[ \begin{array}{ccc} +/// 1 & 2 \\ 3 & 4 \\ 5 & 6 \end{array} \right]\right) = +/// \left[ (1 + 2), (3 + 4), (5 + 6) \right] = +/// \left[ 3, 7, 11 \right]~~~\text{(dimension 1 (columns) is eliminated)} +/// \f] +/// +/// \f[ +/// \mathit{sum}\left(\{0,1\}, +/// \left[ \begin{array}{ccc} +/// 1 & 2 \\ 3 & 4 \\ 5 & 6 \end{array} \right]\right) = +/// (1 + 2) + (3 + 4) + (5 + 6) = +/// 21~~~\text{(both dimensions (rows and columns) are eliminated)} +/// \f] +/// +/// ## Parameters +/// +/// | | Description | +/// | -------------------- | ---------------------------------------- | +/// | `reduction_axes` | The axes to eliminate through summation. | +/// | `keep_dims` | If set to 1 it holds axes that are used for reduction. | +/// +/// ## Inputs +/// +/// | | Type | Description | +/// | ----- | --------------------------------- | ------------------------------------------------------ | +/// | `arg` | \f$N[d_1,\dots,d_n]~(n \geq 0)\f$ | An input tensor of any shape and numeric element type. | +/// +/// ## Output +/// +/// | Type | Description | +/// | ----------------------------------------- | ---------------------------------------------------------------------------------------------------------------- | +/// | \f$N[\textit{delete}(A,d_1,\dots,d_n)]\f$ | The tensor \f$T\f$, where \f$T\f$ is the input tensor with the `reduction_axes` \f$A\f$ eliminated by summation. | +// clang-format on +class OPENVINO_API ReduceSum : public util::ArithmeticReductionKeepDims { +public: + OPENVINO_RTTI_DECLARATION; + /// \brief Constructs a summation operation. + ReduceSum() = default; + /// \brief Constructs a summation operation. + /// + /// \param arg The tensor to be summed. + /// \param reduction_axes The axis positions (0-based) to be eliminated. + /// \param keep_dims If set to 1 it holds axes that are used for reduction. + ReduceSum(const Output& arg, const Output& reduction_axes, bool keep_dims = false); + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + /// \return The default value for Sum. + OPENVINO_SUPPRESS_DEPRECATED_START + std::shared_ptr get_default_value() const override; + OPENVINO_SUPPRESS_DEPRECATED_END + + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; +}; +} // namespace v1 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/region_yolo.hpp b/ngraph/core/include/openvino/op/region_yolo.hpp new file mode 100644 index 00000000000000..17bed4408b9973 --- /dev/null +++ b/ngraph/core/include/openvino/op/region_yolo.hpp @@ -0,0 +1,84 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/op.hpp" + +namespace ov { +namespace op { +namespace v0 { +class OPENVINO_API RegionYolo : public Op { +public: + OPENVINO_RTTI_DECLARATION; + + RegionYolo() = default; + /// + /// \brief Constructs a RegionYolo operation + /// + /// \param[in] input Input + /// \param[in] coords Number of coordinates for each region + /// \param[in] classes Number of classes for each region + /// \param[in] regions Number of regions + /// \param[in] do_softmax Compute softmax + /// \param[in] mask Mask + /// \param[in] axis Axis to begin softmax on + /// \param[in] end_axis Axis to end softmax on + /// \param[in] anchors A flattened list of pairs `[width, height]` that + /// describes + /// prior box sizes. + /// + RegionYolo(const Output& input, + const size_t coords, + const size_t classes, + const size_t regions, + const bool do_softmax, + const std::vector& mask, + const int axis, + const int end_axis, + const std::vector& anchors = std::vector{}); + + bool visit_attributes(AttributeVisitor& visitor) override; + void validate_and_infer_types() override; + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + size_t get_num_coords() const { + return m_num_coords; + } + size_t get_num_classes() const { + return m_num_classes; + } + size_t get_num_regions() const { + return m_num_regions; + } + bool get_do_softmax() const { + return m_do_softmax; + } + const std::vector& get_mask() const { + return m_mask; + } + const std::vector& get_anchors() const { + return m_anchors; + } + int get_axis() const { + return m_axis; + } + int get_end_axis() const { + return m_end_axis; + } + +private: + size_t m_num_coords; + size_t m_num_classes; + size_t m_num_regions; + bool m_do_softmax; + std::vector m_mask; + std::vector m_anchors{}; + int m_axis; + int m_end_axis; +}; +} // namespace v0 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/relu.hpp b/ngraph/core/include/openvino/op/relu.hpp new file mode 100644 index 00000000000000..d5e4fb50556eeb --- /dev/null +++ b/ngraph/core/include/openvino/op/relu.hpp @@ -0,0 +1,33 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include + +#include "openvino/op/util/unary_elementwise_arithmetic.hpp" + +namespace ov { +namespace op { +namespace v0 { +/// \brief Elementwise Relu operation. +/// +class OPENVINO_API Relu : public util::UnaryElementwiseArithmetic { +public: + OPENVINO_RTTI_DECLARATION; + Relu() = default; + /// \brief Constructs a Relu operation. + /// + /// \param arg Node that produces the input tensor. + Relu(const Output& arg); + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; + bool visit_attributes(AttributeVisitor& visitor) override; +}; +} // namespace v0 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/reorg_yolo.hpp b/ngraph/core/include/openvino/op/reorg_yolo.hpp new file mode 100644 index 00000000000000..64183776a139b4 --- /dev/null +++ b/ngraph/core/include/openvino/op/reorg_yolo.hpp @@ -0,0 +1,41 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/op.hpp" + +namespace ov { +namespace op { +namespace v0 { +class OPENVINO_API ReorgYolo : public Op { +public: + OPENVINO_RTTI_DECLARATION; + + ReorgYolo() = default; + /// \brief Constructs a ReorgYolo operation + /// + /// \param input Input + /// \param stride Stride to reorganize input by + ReorgYolo(const Output& input, const size_t stride); + + // Constructor with `strides` for backward compatibility + ReorgYolo(const Output& input, const Strides& strides); + + void validate_and_infer_types() override; + + bool visit_attributes(AttributeVisitor& visitor) override; + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + Strides get_strides() const { + return m_strides; + } + +private: + Strides m_strides; +}; +} // namespace v0 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/reshape.hpp b/ngraph/core/include/openvino/op/reshape.hpp new file mode 100644 index 00000000000000..b1d0bbaa2acc43 --- /dev/null +++ b/ngraph/core/include/openvino/op/reshape.hpp @@ -0,0 +1,66 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/op.hpp" + +namespace ov { +namespace op { +namespace v1 { +/// \brief Tensor dynamic reshape operation. +/// +/// "Converts" an input tensor into a new shape with the same number of elements. +/// This op does not touch the actual data. If needed, use Transpose for that purpose. +/// +class OPENVINO_API Reshape : public Op { +public: + OPENVINO_RTTI_DECLARATION; + Reshape() = default; + /// \brief Constructs a dynamic reshape operation. This operation does not perform + /// transpose. + /// + /// \param arg The tensor to be reshaped. + /// \param shape_pattern The node that defines output shape shape_pattern. + /// If the input shape is \f$(a_0,\dots,a_{k-1})\f$ then the output shape + /// must + /// be of the form \f$(b_0,\dots,b_{j-1})\f$ where \f$\Pi(a_i) = \Pi(b_i)\f$. + /// A value of -1 is allowed for at most one dimension, in which case the + /// dimension size is inferred based on element count of input tensor. + /// \param special_zero Treats zeros in `shape_pattern` as wildcard flags indicating + /// a + /// copy from input shape at the same index. + /// + Reshape(const Output& arg, const Output& shape_pattern, bool special_zero); + + bool visit_attributes(AttributeVisitor& visitor) override; + void validate_and_infer_types() override; + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + bool get_special_zero() const { + return m_special_zero; + } + void set_special_zero(bool special_zero) { + m_special_zero = special_zero; + } + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; + bool evaluate_lower(const HostTensorVector& outputs) const override; + bool evaluate_upper(const HostTensorVector& outputs) const override; + bool constant_fold(OutputVector& output_values, const OutputVector& inputs_values) override; + +protected: + bool m_special_zero; + bool evaluate_reshape(const HostTensorVector& outputs, const HostTensorVector& inputs) const; + +private: + void calculate_output_shape(std::vector& reshape_pattern, + const int64_t& minus_one_idx, + const PartialShape& input_pshape, + std::vector& output_shape) const; +}; +} // namespace v1 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/reverse.hpp b/ngraph/core/include/openvino/op/reverse.hpp new file mode 100644 index 00000000000000..23d9e640923c03 --- /dev/null +++ b/ngraph/core/include/openvino/op/reverse.hpp @@ -0,0 +1,75 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "ngraph/op/op.hpp" + +namespace ov { +namespace op { +namespace v1 { +class OPENVINO_API Reverse : public Op { +public: + OPENVINO_RTTI_DECLARATION; + + enum class Mode { INDEX, MASK }; + + Reverse() = default; + /// \brief Constructs a reverse operation. + /// + /// \param data The input tensor, some of whose axes are to be reversed. + /// \param reversed_axes The axes to reverse in a form of a set of indices or + /// boolean mask. + /// \param mode The way reversed_axes should be interpreted - a set or a mask. + Reverse(const Output& data, const Output& reversed_axes, const std::string& mode); + + Reverse(const Output& data, const Output& reversed_axes, const Mode mode); + + bool visit_attributes(AttributeVisitor& visitor) override; + void validate_and_infer_types() override; + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + /// \return The second input data interpretation mode. + Mode get_mode() const { + return m_mode; + } + void set_mode(const Mode mode) { + m_mode = mode; + } + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; + +protected: + Mode mode_from_string(const std::string& mode) const; + + /// \brief Indicates how the values from the second input should be interpreted. + /// + /// The second input can contain a set of indices pointing to axes in the data + /// tensor shape. + /// Alternatively it can contain a boolean mask that indicates which axes should be + /// reversed. + Mode m_mode; + +private: + bool evaluate_reverse(const HostTensorVector& outputs, const HostTensorVector& inputs) const; +}; +} // namespace v1 +} // namespace op + +OPENVINO_API +std::ostream& operator<<(std::ostream& s, const op::v1::Reverse::Mode& type); + +template <> +class OPENVINO_API AttributeAdapter : public EnumAttributeAdapterBase { +public: + AttributeAdapter(op::v1::Reverse::Mode& value) : EnumAttributeAdapterBase(value) {} + + static constexpr DiscreteTypeInfo type_info{"AttributeAdapter", 1}; + const DiscreteTypeInfo& get_type_info() const override { + return type_info; + } +}; + +} // namespace ov diff --git a/ngraph/core/include/openvino/op/reverse_sequence.hpp b/ngraph/core/include/openvino/op/reverse_sequence.hpp new file mode 100644 index 00000000000000..a9212a343ad2c1 --- /dev/null +++ b/ngraph/core/include/openvino/op/reverse_sequence.hpp @@ -0,0 +1,61 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/op.hpp" + +namespace ov { +namespace op { +namespace v0 { +class OPENVINO_API ReverseSequence : public Op { +public: + OPENVINO_RTTI_DECLARATION; + + ReverseSequence() = default; + /// \brief Constructs a ReverseSequence operation. + /// + /// \param arg tensor with input data to reverse + /// \param seq_lengths 1D tensor of integers with sequence lengths in the input + /// tensor. + /// \param batch_axis index of the batch dimension. + /// \param seq_axis index of the sequence dimension. + ReverseSequence(const Output& arg, + const Output& seq_lengths, + int64_t batch_axis = 0, + int64_t seq_axis = 1); + + bool visit_attributes(AttributeVisitor& visitor) override; + void validate_and_infer_types() override; + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + size_t get_batch_axis() const { + return m_normalized_batch_axis; + } + int64_t get_origin_batch_axis() const { + return m_batch_axis; + } + void set_batch_axis(int64_t batch_axis) { + m_batch_axis = batch_axis; + } + size_t get_sequence_axis() const { + return m_normalized_seq_axis; + } + int64_t get_origin_sequence_axis() const { + return m_seq_axis; + } + void set_sequence_axis(int64_t sequence_axis) { + m_seq_axis = sequence_axis; + } + +private: + int64_t m_batch_axis; + int64_t m_seq_axis = 1; + size_t m_normalized_batch_axis; + size_t m_normalized_seq_axis; +}; +} // namespace v0 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/rnn_cell.hpp b/ngraph/core/include/openvino/op/rnn_cell.hpp new file mode 100644 index 00000000000000..3a1c00ca210713 --- /dev/null +++ b/ngraph/core/include/openvino/op/rnn_cell.hpp @@ -0,0 +1,132 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include +#include +#include +#include + +#include "openvino/op/op.hpp" +#include "openvino/op/util/activation_functions.hpp" +#include "openvino/op/util/rnn_cell_base.hpp" + +namespace ov { +namespace op { +namespace v0 { +/// +/// \brief Class for single RNN cell node. +/// +/// \note It follows notation and equations defined as in ONNX standard: +/// https://github.com/onnx/onnx/blob/master/docs/Operators.md#RNN +/// +/// \note It calculates following equations: +/// +/// Ht = f(Xt*(Wi^T) + Ht-1*(Ri^T) + Wbi + Rbi) +/// +/// * - Is a dot product, +/// f - is activation functions. +/// +/// \note This class represents only single *cell* (for current time step) +/// and not the whole RNN Sequence layer +/// +/// \sa LSTMSequence, LSTMCell, GRUCell +/// +class OPENVINO_API RNNCell : public util::RNNCellBase { +public: + OPENVINO_RTTI_DECLARATION; + + RNNCell(); + /// + /// \brief Constructs RNNCell node. + /// + /// \param[in] X The input tensor with shape: [batch_size, + /// input_size]. + /// \param[in] initial_hidden_state The hidden state tensor at current time step + /// with shape: [batch_size, hidden_size]. + /// \param[in] W The weight tensor with shape: [hidden_size, + /// input_size]. + /// \param[in] R The recurrence weight tensor with shape: + /// [hidden_size, hidden_size]. + /// \param[in] hidden_size The number of hidden units for recurrent cell. + /// \param[in] activations The vector of activation functions used inside + /// recurrent cell. + /// \param[in] activations_alpha The vector of alpha parameters for activation + /// functions in order respective to activation + /// list. + /// \param[in] activations_beta The vector of beta parameters for activation + /// functions in order respective to activation + /// list. + /// \param[in] clip The value defining clipping range [-clip, + /// clip] on input of activation functions. + /// + RNNCell(const Output& X, + const Output& initial_hidden_state, + const Output& W, + const Output& R, + std::size_t hidden_size, + const std::vector& activations = std::vector{"tanh"}, + const std::vector& activations_alpha = {}, + const std::vector& activations_beta = {}, + float clip = 0.f); + + /// + /// \brief Constructs RNNCell node. + /// + /// \param[in] X The input tensor with shape: [batch_size, + /// input_size]. + /// \param[in] initial_hidden_state The hidden state tensor at current time step + /// with shape: [batch_size, hidden_size]. + /// \param[in] W The weight tensor with shape: [hidden_size, + /// input_size]. + /// \param[in] R The recurrence weight tensor with shape: + /// [hidden_size, hidden_size]. + /// \param[in] B The bias tensor for input gate with shape: + /// [hidden_size]. + /// \param[in] hidden_size The number of hidden units for recurrent cell. + /// \param[in] activations The vector of activation functions used inside + /// recurrent cell. + /// \param[in] activations_alpha The vector of alpha parameters for activation + /// functions in order respective to activation + /// list. + /// \param[in] activations_beta The vector of beta parameters for activation + /// functions in order respective to activation + /// list. + /// \param[in] clip The value defining clipping range [-clip, + /// clip] on input of activation functions. + /// + RNNCell(const Output& X, + const Output& initial_hidden_state, + const Output& W, + const Output& R, + const Output& B, + std::size_t hidden_size, + const std::vector& activations = std::vector{"tanh"}, + const std::vector& activations_alpha = {}, + const std::vector& activations_beta = {}, + float clip = 0.f); + + void validate_and_infer_types() override; + bool visit_attributes(AttributeVisitor& visitor) override; + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + +private: + /// + /// \brief Creates the default bias input initialized with zeros. + /// + /// \return The object of Output class. + /// + Output get_default_bias_input() const; + + /// + /// \brief The Activation function f. + /// + util::ActivationFunction m_activation_f; + + static constexpr std::size_t s_gates_count{1}; +}; +} // namespace v0 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/rnn_sequence.hpp b/ngraph/core/include/openvino/op/rnn_sequence.hpp new file mode 100644 index 00000000000000..33b1de8f995546 --- /dev/null +++ b/ngraph/core/include/openvino/op/rnn_sequence.hpp @@ -0,0 +1,50 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include +#include +#include + +#include "openvino/op/util/rnn_cell_base.hpp" + +namespace ov { +namespace op { +namespace v5 { +class OPENVINO_API RNNSequence : public util::RNNCellBase { +public: + OPENVINO_RTTI_DECLARATION; + + RNNSequence(); + + RNNSequence(const Output& X, + const Output& H_t, + const Output& sequence_lengths, + const Output& W, + const Output& R, + const Output& B, + size_t hidden_size, + op::RecurrentSequenceDirection direction, + const std::vector& activations = std::vector{"tanh"}, + const std::vector& activations_alpha = {}, + const std::vector& activations_beta = {}, + float clip = 0.f); + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + void validate_and_infer_types() override; + + bool visit_attributes(AttributeVisitor& visitor) override; + + op::RecurrentSequenceDirection get_direction() const { + return m_direction; + } + +protected: + op::RecurrentSequenceDirection m_direction; +}; +} // namespace v5 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/roi_align.hpp b/ngraph/core/include/openvino/op/roi_align.hpp new file mode 100644 index 00000000000000..2353885e5197c4 --- /dev/null +++ b/ngraph/core/include/openvino/op/roi_align.hpp @@ -0,0 +1,98 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/op.hpp" + +namespace ov { +namespace op { +namespace v3 { +class OPENVINO_API ROIAlign : public Op { +public: + OPENVINO_RTTI_DECLARATION; + enum class PoolingMode { AVG, MAX }; + + ROIAlign() = default; + /// \brief Constructs a ROIAlign node matching the ONNX ROIAlign specification + /// + /// \param input Input feature map {N, C, H, W} + /// \param rois Regions of interest to pool over + /// \param batch_indices Indices of images in the batch matching + /// the number or ROIs + /// \param pooled_h Height of the ROI output features + /// \param pooled_w Width of the ROI output features + /// \param sampling_ratio Number of sampling points used to compute + /// an output element + /// \param spatial_scale Spatial scale factor used to translate ROI coordinates + /// \param mode Method of pooling - 'avg' or 'max' + ROIAlign(const Output& input, + const Output& rois, + const Output& batch_indices, + const int pooled_h, + const int pooled_w, + const int sampling_ratio, + const float spatial_scale, + const std::string& mode); + + ROIAlign(const Output& input, + const Output& rois, + const Output& batch_indices, + const int pooled_h, + const int pooled_w, + const int sampling_ratio, + const float spatial_scale, + const PoolingMode mode); + + void validate_and_infer_types() override; + bool visit_attributes(AttributeVisitor& visitor) override; + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + int get_pooled_h() const { + return m_pooled_h; + } + int get_pooled_w() const { + return m_pooled_w; + } + int get_sampling_ratio() const { + return m_sampling_ratio; + } + float get_spatial_scale() const { + return m_spatial_scale; + } + PoolingMode get_mode() const { + return m_mode; + } + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; + +private: + PoolingMode mode_from_string(const std::string& mode) const; + +private: + int m_pooled_h; + int m_pooled_w; + int m_sampling_ratio; + float m_spatial_scale; + PoolingMode m_mode; +}; +} // namespace v3 +} // namespace op + +std::ostream& operator<<(std::ostream& s, const op::v3::ROIAlign::PoolingMode& mode); + +template <> +class OPENVINO_API AttributeAdapter + : public EnumAttributeAdapterBase { +public: + AttributeAdapter(op::v3::ROIAlign::PoolingMode& value) + : EnumAttributeAdapterBase(value) {} + + static constexpr DiscreteTypeInfo type_info{"AttributeAdapter", 3}; + const DiscreteTypeInfo& get_type_info() const override { + return type_info; + } +}; + +} // namespace ov diff --git a/ngraph/core/include/openvino/op/roi_pooling.hpp b/ngraph/core/include/openvino/op/roi_pooling.hpp new file mode 100644 index 00000000000000..d81a14cf144bcc --- /dev/null +++ b/ngraph/core/include/openvino/op/roi_pooling.hpp @@ -0,0 +1,52 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/op.hpp" + +namespace ov { +namespace op { +namespace v0 { +class OPENVINO_API ROIPooling : public Op { +public: + OPENVINO_RTTI_DECLARATION; + + ROIPooling() = default; + /// \brief Constructs a ROIPooling operation + /// + /// \param input Input feature map {N, C, H, W} + /// \param coords Coordinates of bounding boxes + /// \param output_size Height/Width of ROI output features + /// \param spatial_scale Ratio of input feature map over input image size + /// \param method Method of pooling - Max or Bilinear + ROIPooling(const Output& input, + const Output& coords, + const ngraph::Shape& output_size, + const float spatial_scale, + const std::string& method = "max"); + + void validate_and_infer_types() override; + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + const ngraph::Shape& get_output_size() const { + return m_output_size; + } + float get_spatial_scale() const { + return m_spatial_scale; + } + const std::string& get_method() const { + return m_method; + } + bool visit_attributes(AttributeVisitor& visitor) override; + +private: + ngraph::Shape m_output_size{0, 0}; + float m_spatial_scale; + std::string m_method = "max"; +}; +} // namespace v0 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/roll.hpp b/ngraph/core/include/openvino/op/roll.hpp new file mode 100644 index 00000000000000..ae0215b9dbc964 --- /dev/null +++ b/ngraph/core/include/openvino/op/roll.hpp @@ -0,0 +1,38 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/op.hpp" + +namespace ov { +namespace op { +namespace v7 { +/// \brief Tensor roll operation. +class OPENVINO_API Roll : public Op { +public: + OPENVINO_RTTI_DECLARATION; + + Roll() = default; + + /// + /// \brief Constructs a roll operation. + /// + /// \param data Node producing the tensor to be shifted. + /// \param shift Node producing the 0D or 1D tensor which specifies the + /// number of places by which the elements are shifted. + /// \param axes Node producing the 0D or 1D tensor which specifies axes + /// along which elements are shifted. + /// + Roll(const Output& data, const Output& shift, const Output& axes); + + void validate_and_infer_types() override; + + bool visit_attributes(AttributeVisitor& visitor) override; + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; +}; +} // namespace v7 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/round.hpp b/ngraph/core/include/openvino/op/round.hpp new file mode 100644 index 00000000000000..c066af19c41c41 --- /dev/null +++ b/ngraph/core/include/openvino/op/round.hpp @@ -0,0 +1,63 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/op.hpp" + +namespace ov { +namespace op { +namespace v5 { +/// \brief Elementwise round operation. The output is round to the nearest integer +/// for each value. In case of halfs, the rule is defined in attribute 'mode': +/// 'HALF_TO_EVEN' - round halfs to the nearest even integer. +/// 'HALF_AWAY_FROM_ZERO': - round in such a way that the result heads away from +/// zero. +class OPENVINO_API Round : public Op { +public: + enum class RoundMode { HALF_TO_EVEN, HALF_AWAY_FROM_ZERO }; + OPENVINO_RTTI_DECLARATION; + + /// \brief Constructs a round operation. + Round() = default; + + /// \brief Constructs a round operation. + /// + /// \param arg Node that produces the input tensor. + /// \param mode Rule to resolve halfs + Round(const Output& arg, const RoundMode mode); + + bool visit_attributes(AttributeVisitor& visitor) override; + void validate_and_infer_types() override; + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; + + RoundMode get_mode() const { + return m_mode; + } + +private: + RoundMode m_mode; +}; +} // namespace v5 +} // namespace op +OPENVINO_API +std::ostream& operator<<(std::ostream& s, const op::v5::Round::RoundMode& type); + +template <> +class OPENVINO_API AttributeAdapter + : public EnumAttributeAdapterBase { +public: + AttributeAdapter(op::v5::Round::RoundMode& value) : EnumAttributeAdapterBase(value) {} + + static constexpr DiscreteTypeInfo type_info{"AttributeAdapter", 5}; + const DiscreteTypeInfo& get_type_info() const override { + return type_info; + } +}; + +} // namespace ov diff --git a/ngraph/core/include/openvino/op/scatter_elements_update.hpp b/ngraph/core/include/openvino/op/scatter_elements_update.hpp new file mode 100644 index 00000000000000..0f57b3552971f5 --- /dev/null +++ b/ngraph/core/include/openvino/op/scatter_elements_update.hpp @@ -0,0 +1,40 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/op.hpp" + +namespace ov { +namespace op { +namespace v3 { +class OPENVINO_API ScatterElementsUpdate : public Op { +public: + OPENVINO_RTTI_DECLARATION; + + ScatterElementsUpdate() = default; + /// \brief Constructs a ScatterElementsUpdate node + + /// \param data Input data + /// \param indices Data entry index that will be updated + /// \param updates Update values + /// \param axis Axis to scatter on + ScatterElementsUpdate(const Output& data, + const Output& indices, + const Output& updates, + const Output& axis); + + void validate_and_infer_types() override; + bool visit_attributes(AttributeVisitor& visitor) override; + + std::shared_ptr clone_with_new_inputs(const OutputVector& inputs) const override; + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; + +private: + bool evaluate_scatter_element_update(const HostTensorVector& outputs, const HostTensorVector& inputs) const; +}; +} // namespace v3 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/scatter_nd_update.hpp b/ngraph/core/include/openvino/op/scatter_nd_update.hpp new file mode 100644 index 00000000000000..f56eb03db867b0 --- /dev/null +++ b/ngraph/core/include/openvino/op/scatter_nd_update.hpp @@ -0,0 +1,29 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/util/scatter_nd_base.hpp" + +namespace ov { +namespace op { +namespace v3 { +/// \brief Add updates to slices from inputs addressed by indices +class OPENVINO_API ScatterNDUpdate : public util::ScatterNDBase { +public: + OPENVINO_RTTI_DECLARATION; + ScatterNDUpdate() = default; + /// \param inputs Tensor + /// \param indices Index tensor: Data type must be `element::i32` or `element::i64` + /// \param updates Tensor: Must have same type as inputs + ScatterNDUpdate(const Output& inputs, const Output& indices, const Output& updates) + : util::ScatterNDBase(inputs, indices, updates) {} + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; +}; +} // namespace v3 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/scatter_update.hpp b/ngraph/core/include/openvino/op/scatter_update.hpp new file mode 100644 index 00000000000000..78e192160b20e3 --- /dev/null +++ b/ngraph/core/include/openvino/op/scatter_update.hpp @@ -0,0 +1,42 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/util/scatter_base.hpp" + +namespace ov { +namespace op { +namespace v3 { +/// +/// \brief Set new values to slices from data addressed by indices +/// +class OPENVINO_API ScatterUpdate : public util::ScatterBase { +public: + OPENVINO_RTTI_DECLARATION; + ScatterUpdate() = default; + /// + /// \brief Constructs ScatterUpdate operator object. + /// + /// \param data The input tensor to be updated. + /// \param indices The tensor with indexes which will be updated. + /// \param updates The tensor with update values. + /// \param[in] axis The axis at which elements will be updated. + /// + ScatterUpdate(const Output& data, + const Output& indices, + const Output& updates, + const Output& axis); + + std::shared_ptr clone_with_new_inputs(const OutputVector& inputs) const override; + + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; + +private: + bool evaluate_scatter_update(const HostTensorVector& outputs, const HostTensorVector& inputs) const; +}; +} // namespace v3 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/select.hpp b/ngraph/core/include/openvino/op/select.hpp new file mode 100644 index 00000000000000..9dfe70d8b5dfa7 --- /dev/null +++ b/ngraph/core/include/openvino/op/select.hpp @@ -0,0 +1,70 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "ngraph/op/op.hpp" + +namespace ov { +namespace op { +namespace v1 { +// clang-format off +/// \brief Elementwise selection operation. +/// +/// ## Inputs +/// +/// | | Type | Description | +/// | ------ | --------------------------------------------- | ------------------------------------------------------------ | +/// | `arg0` | \f$\texttt{bool}[d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of any shape, with element `bool`. | +/// | `arg1` | \f$E[d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of a shape that is broadcast-compatible with `arg0`, with any element type. | +/// | `arg2` | \f$E[d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of a shape that is broadcast-compatible with `arg0`, and same element type as `arg1`. | +/// | `auto_broadcast`| AutoBroadcastSpec | Auto broadcast specification. | +/// +/// ## Output +/// +/// | Type | Description | +/// | ---------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +/// | \f$E[d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_n] = \texttt{arg1}[i_1,\dots,i_n]\text{ if }\texttt{arg0}[i_1,\dots,i_n] \neq 0\text{, else }\texttt{arg2}[i_1,\dots,i_n]\f$ | +// clang-format on +class OPENVINO_API Select : public Op { +public: + OPENVINO_RTTI_DECLARATION; + /// \brief Constructs a selection operation. + Select() : m_auto_broadcast(AutoBroadcastSpec(AutoBroadcastType::NUMPY)) {} + + /// \brief Constructs a selection operation. + /// + /// \param arg0 Node that produces the first input tensor. + /// \param arg1 Node that produces the second input tensor. + /// \param arg2 Node that produces the third input tensor. + /// \param auto_broadcast Auto broadcast specification. Default is Numpy-style + /// implicit broadcasting. + Select(const Output& arg0, + const Output& arg1, + const Output& arg2, + const AutoBroadcastSpec& auto_broadcast = AutoBroadcastSpec(AutoBroadcastType::NUMPY)); + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + bool visit_attributes(AttributeVisitor& visitor) override; + void validate_and_infer_types() override; + + const AutoBroadcastSpec& get_auto_broadcast() const { + return m_auto_broadcast; + } + void set_auto_broadcast(const AutoBroadcastSpec& auto_broadcast) { + m_auto_broadcast = auto_broadcast; + } + // TODO: Move all uses of get_autob to get_auto_broadcast() and remove this. + const AutoBroadcastSpec& get_autob() const override { + return m_auto_broadcast; + } + bool evaluate(const HostTensorVector& output_values, const HostTensorVector& input_values) const override; + bool has_evaluate() const override; + +private: + AutoBroadcastSpec m_auto_broadcast; +}; +} // namespace v1 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/selu.hpp b/ngraph/core/include/openvino/op/selu.hpp new file mode 100644 index 00000000000000..bdf2ae5c96658f --- /dev/null +++ b/ngraph/core/include/openvino/op/selu.hpp @@ -0,0 +1,33 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/op.hpp" + +namespace ov { +namespace op { +namespace v0 { +/// \brief Performs a SELU activation function on all elements of the input node +class OPENVINO_API Selu : public Op { +public: + OPENVINO_RTTI_DECLARATION; + + Selu() = default; + /// \brief Constructs a Selu node. + /// + /// \param data - Node producing the input tensor + /// \param alpha - Alpha coefficient of SELU operation + /// \param lambda - Lambda coefficient of SELU operation + Selu(const Output& data, const Output& alpha, const Output& lambda); + + void validate_and_infer_types() override; + + bool visit_attributes(AttributeVisitor& visitor) override; + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; +}; +} // namespace v0 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/shape_of.hpp b/ngraph/core/include/openvino/op/shape_of.hpp new file mode 100644 index 00000000000000..6ad940083c58b4 --- /dev/null +++ b/ngraph/core/include/openvino/op/shape_of.hpp @@ -0,0 +1,67 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/op.hpp" + +namespace ov { +namespace op { +namespace v3 { +/// \brief Operation that returns the shape of its input argument as a tensor. +class OPENVINO_API ShapeOf : public Op { +public: + OPENVINO_RTTI_DECLARATION; + ShapeOf() = default; + /// \brief Constructs a shape-of operation. + ShapeOf(const Output& arg, const element::Type output_type = element::i64); + + bool visit_attributes(AttributeVisitor& visitor) override; + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + void validate_and_infer_types() override; + + element::Type get_output_type() const { + return m_output_type; + } + void set_output_type(element::Type output_type) { + m_output_type = output_type; + } + // Overload collision with method on Node + using Node::set_output_type; + + bool evaluate(const HostTensorVector& output_values, const HostTensorVector& input_values) const override; + bool has_evaluate() const override; + bool evaluate_lower(const HostTensorVector& output_values) const override; + bool evaluate_upper(const HostTensorVector& output_values) const override; + bool constant_fold(OutputVector& output_values, const OutputVector& input_values) override; + +private: + element::Type m_output_type; +}; +} // namespace v3 + +namespace v0 { +/// \brief Operation that returns the shape of its input argument as a tensor. +class OPENVINO_API ShapeOf : public Op { +public: + OPENVINO_RTTI_DECLARATION; + ShapeOf() = default; + /// \brief Constructs a shape-of operation. + ShapeOf(const Output& arg); + + bool visit_attributes(AttributeVisitor& visitor) override; + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + void validate_and_infer_types() override; + + bool evaluate(const HostTensorVector& output_values, const HostTensorVector& input_values) const override; + bool has_evaluate() const override; + bool evaluate_lower(const HostTensorVector& output_values) const override; + bool evaluate_upper(const HostTensorVector& output_values) const override; + bool constant_fold(OutputVector& output_values, const OutputVector& input_values) override; +}; +} // namespace v0 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/shuffle_channels.hpp b/ngraph/core/include/openvino/op/shuffle_channels.hpp new file mode 100644 index 00000000000000..0c32977d087c12 --- /dev/null +++ b/ngraph/core/include/openvino/op/shuffle_channels.hpp @@ -0,0 +1,54 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include + +#include "openvino/op/op.hpp" + +namespace ov { +namespace op { +namespace v0 { +/// \brief Permutes data in the channel dimension of the input +class OPENVINO_API ShuffleChannels : public Op { +public: + OPENVINO_RTTI_DECLARATION; + + ShuffleChannels() = default; + /// \brief Constructs a ShuffleChannels node. + /// + /// \param data Node producing the input tensor. + /// \param axis Channel dimension index in the data tensor. + /// A negative value means that the index should be + /// calculated from the back of the input data shape. + /// \param group Number of group the channel dimension should be split into. + /// + ShuffleChannels(const Output& data, const int64_t axis = 1, const int64_t group = 1); + + bool visit_attributes(AttributeVisitor& visitor) override; + size_t get_zero_based_axis() const; + + void validate_and_infer_types() override; + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + int64_t get_axis() const { + return m_axis; + } + int64_t get_group() const { + return m_group; + } + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; + +private: + bool evaluate_shuffle_channels(const HostTensorVector& outputs, const HostTensorVector& inputs) const; + + int64_t m_axis; + int64_t m_group; +}; +} // namespace v0 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/sigmoid.hpp b/ngraph/core/include/openvino/op/sigmoid.hpp new file mode 100644 index 00000000000000..611997932059d0 --- /dev/null +++ b/ngraph/core/include/openvino/op/sigmoid.hpp @@ -0,0 +1,23 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "ngraph/op/util/unary_elementwise_arithmetic.hpp" + +namespace ov { +namespace op { +namespace v0 { +class OPENVINO_API Sigmoid : public util::UnaryElementwiseArithmetic { +public: + OPENVINO_RTTI_DECLARATION; + Sigmoid(const Output& arg); + Sigmoid() = default; + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; +}; +} // namespace v0 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/sign.hpp b/ngraph/core/include/openvino/op/sign.hpp new file mode 100644 index 00000000000000..428784c3e220b2 --- /dev/null +++ b/ngraph/core/include/openvino/op/sign.hpp @@ -0,0 +1,31 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/util/unary_elementwise_arithmetic.hpp" + +namespace ov { +namespace op { +namespace v0 { +/// \brief Elementwise sign operation. +/// +class OPENVINO_API Sign : public util::UnaryElementwiseArithmetic { +public: + OPENVINO_RTTI_DECLARATION; + + Sign() = default; + /// \brief Constructs an elementwise sign operation. + /// + /// \param arg Node that produces the input tensor. + Sign(const Output& arg); + + bool visit_attributes(AttributeVisitor& visitor) override; + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; +}; +} // namespace v0 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/sin.hpp b/ngraph/core/include/openvino/op/sin.hpp new file mode 100644 index 00000000000000..beea3cb5162642 --- /dev/null +++ b/ngraph/core/include/openvino/op/sin.hpp @@ -0,0 +1,43 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/util/unary_elementwise_arithmetic.hpp" + +namespace ov { +namespace op { +namespace v0 { +// clang-format off +/// \brief Elementwise sine operation. +/// +/// ## Inputs +/// +/// | | Type | Description | +/// | ----- | --------------------------------- | ----------------------------------------------- | +/// | `arg` | \f$N[d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of any shape and numeric element type. | +/// +/// ## Output +/// +/// | Type | Description | +/// | ---------------------- | ------------------------------------------------------------------------------------ | +/// | \f$N[d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_n] = \sin(\texttt{arg}[i_1,\dots,i_n])\f$ | +// clang-format on +class OPENVINO_API Sin : public util::UnaryElementwiseArithmetic { +public: + OPENVINO_RTTI_DECLARATION; + /// \brief Constructs a sine operation. + /// + /// \param arg Node that produces the input tensor. + Sin(const Output& arg); + Sin() = default; + + bool visit_attributes(AttributeVisitor& visitor) override; + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; +}; +} // namespace v0 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/sinh.hpp b/ngraph/core/include/openvino/op/sinh.hpp new file mode 100644 index 00000000000000..7759db39134dea --- /dev/null +++ b/ngraph/core/include/openvino/op/sinh.hpp @@ -0,0 +1,29 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/util/unary_elementwise_arithmetic.hpp" + +namespace ov { +namespace op { +namespace v0 { +/// \brief Elementwise hyperbolic sine (sinh) operation. +class OPENVINO_API Sinh : public util::UnaryElementwiseArithmetic { +public: + OPENVINO_RTTI_DECLARATION; + /// \brief Constructs a hyperbolic sine operation. + /// + /// \param arg Node that produces the input tensor. + Sinh(const Output& arg); + Sinh() = default; + + bool visit_attributes(AttributeVisitor& visitor) override; + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; +}; +} // namespace v0 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/softmax.hpp b/ngraph/core/include/openvino/op/softmax.hpp new file mode 100644 index 00000000000000..8818953bd16d8b --- /dev/null +++ b/ngraph/core/include/openvino/op/softmax.hpp @@ -0,0 +1,46 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/op.hpp" + +namespace ov { +namespace op { +namespace v1 { +class OPENVINO_API Softmax : public Op { +public: + OPENVINO_RTTI_DECLARATION; + + Softmax() = default; + /// \brief Constructs a softmax operation. + /// + /// \param arg Node that produces the first input tensor.
+ /// `[d0, ...]` + /// \param axis The axis position (0-based) on which to calculate the softmax. + /// + /// Output `[d0, ...]` + /// + Softmax(const Output& arg, const size_t axis = 1); + + bool visit_attributes(AttributeVisitor& visitor) override; + void validate_and_infer_types() override; + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + size_t get_axis() const { + return m_axis; + } + void set_axis(const size_t axis) { + m_axis = axis; + } + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; + +private: + size_t m_axis{0}; +}; +} // namespace v1 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/softplus.hpp b/ngraph/core/include/openvino/op/softplus.hpp new file mode 100644 index 00000000000000..ae4cea9a6613b5 --- /dev/null +++ b/ngraph/core/include/openvino/op/softplus.hpp @@ -0,0 +1,34 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/op.hpp" + +namespace ov { +namespace op { +namespace v4 { +/// \brief A Self Regularized Non-Monotonic Neural Activation Function +/// f(x) = ln(exp(x) + 1.) +/// +class OPENVINO_API SoftPlus : public Op { +public: + OPENVINO_RTTI_DECLARATION; + + SoftPlus() = default; + /// \brief Constructs an SoftPlus operation. + /// + /// \param data Input tensor + SoftPlus(const Output& arg); + bool visit_attributes(AttributeVisitor& visitor) override; + void validate_and_infer_types() override; + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; +}; +} // namespace v4 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/space_to_batch.hpp b/ngraph/core/include/openvino/op/space_to_batch.hpp new file mode 100644 index 00000000000000..44bc31fa5c1123 --- /dev/null +++ b/ngraph/core/include/openvino/op/space_to_batch.hpp @@ -0,0 +1,54 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/op.hpp" + +namespace ov { +namespace op { +namespace v1 { +/// \brief SpaceToBatch permutes data tensor blocks of spatial data into batch +/// dimension. +/// +/// \note Values from spatial blocks dimensions are moved in the batch dimension. +/// +/// Output node produces a tensor with shape: tensor with shape +/// `[batch * block_shape[0] * block_shape[1] * ... * block_shape[N - 1], +/// (pads_begin[1] + D_1 + pads_end[1]) / block_shape[1], +/// (pads_begin[2] + D_2 + pads_end[2]) / block_shape[2], ..., +/// (pads_begin[N - 1] + D_{N - 1} + pads_end[N - 1]) / block_shape[N - 1]` +/// of the same type as `data` input. +class OPENVINO_API SpaceToBatch : public Op { +public: + OPENVINO_RTTI_DECLARATION; + + SpaceToBatch() = default; + + /// \brief Constructs a SpaceToBatch operation. + /// + /// \param data Node producing the data tensor + /// \param block_shape The sizes of the block of values to be moved + /// \param pads_begin Specifies the padding for the beginning along each axis of + /// `data` input + /// \param pads_end Specifies the padding for the ending along each axis of `data` + /// input. + SpaceToBatch(const Output& data, + const Output& block_shape, + const Output& pads_begin, + const Output& pads_end); + + void validate_and_infer_types() override; + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + bool visit_attributes(AttributeVisitor& visitor) override; + + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; + +private: + bool evaluate_space_to_batch(const HostTensorVector& outputs, const HostTensorVector& inputs) const; +}; +} // namespace v1 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/space_to_depth.hpp b/ngraph/core/include/openvino/op/space_to_depth.hpp new file mode 100644 index 00000000000000..3dee36bc3f4db8 --- /dev/null +++ b/ngraph/core/include/openvino/op/space_to_depth.hpp @@ -0,0 +1,77 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/op.hpp" + +namespace ov { +namespace op { +namespace v0 { +/// \brief SpaceToDepth permutes input tensor blocks of spatial data into depth +/// dimension. +/// +/// \note Values from the height and width dimensions are moved to the depth dimension. +/// +/// Output node produces a tensor with shape: +/// [N, C * blocksize * blocksize, H / blocksize, W / blocksize] +class OPENVINO_API SpaceToDepth : public Op { +public: + OPENVINO_RTTI_DECLARATION; + + enum class SpaceToDepthMode { + // The output depth is gathered from [block_size, ..., block_size, C] + BLOCKS_FIRST, + // The output depth is gathered from [C, block_size, ..., block_size] + DEPTH_FIRST + }; + + SpaceToDepth() = default; + /// \brief Constructs a SpaceToDepth operation. + /// + /// \param data - Node producing the input tensor + /// \param mode Specifies how the output depth dimension is gathered + /// from block coordinates and the old depth dimension. + /// \param block_size - the size of the block of values to be moved + SpaceToDepth(const Output& data, const SpaceToDepthMode& mode, std::size_t block_size = 1); + + SpaceToDepth(const Output& data, const std::string& mode, std::size_t block_size = 1); + + bool visit_attributes(AttributeVisitor& visitor) override; + std::size_t get_block_size() const { + return m_blocksize; + } + SpaceToDepthMode get_mode() const { + return m_mode; + } + void validate_and_infer_types() override; + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; + +protected: + std::size_t m_blocksize; + SpaceToDepthMode m_mode; +}; +} // namespace v0 +} // namespace op + +OPENVINO_API +std::ostream& operator<<(std::ostream& s, const op::v0::SpaceToDepth::SpaceToDepthMode& type); + +template <> +class OPENVINO_API AttributeAdapter + : public EnumAttributeAdapterBase { +public: + AttributeAdapter(op::v0::SpaceToDepth::SpaceToDepthMode& value) + : EnumAttributeAdapterBase(value) {} + + static constexpr DiscreteTypeInfo type_info{"AttributeAdapter", 0}; + const DiscreteTypeInfo& get_type_info() const override { + return type_info; + } +}; + +} // namespace ov diff --git a/ngraph/core/include/openvino/op/split.hpp b/ngraph/core/include/openvino/op/split.hpp new file mode 100644 index 00000000000000..e1cb09bedf6fef --- /dev/null +++ b/ngraph/core/include/openvino/op/split.hpp @@ -0,0 +1,48 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include +#include + +#include "openvino/op/op.hpp" + +namespace ov { +namespace op { +namespace v1 { +/// \brief Splits the input tensor into a list of equal sized tensors +class OPENVINO_API Split : public Op { +public: + OPENVINO_RTTI_DECLARATION; + + /// \brief Constructs a split operation. + Split() = default; + /// \brief Constructs a split operation. + /// \param data The tensor to be split. + /// \param axis The index of an axis in "data" along which to perform + /// the split. + /// \param num_splits The number of pieces that the data tensor should be + /// split into. + Split(const Output& data, const Output& axis, const size_t num_splits); + + bool visit_attributes(AttributeVisitor& visitor) override; + void validate_and_infer_types() override; + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + size_t get_num_splits() const { + return m_num_splits; + } + void set_num_splits(const size_t num_splits) { + m_num_splits = num_splits; + } + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; + +protected: + size_t m_num_splits; +}; +} // namespace v1 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/sqrt.hpp b/ngraph/core/include/openvino/op/sqrt.hpp new file mode 100644 index 00000000000000..2f16e3bdf33c15 --- /dev/null +++ b/ngraph/core/include/openvino/op/sqrt.hpp @@ -0,0 +1,44 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/util/unary_elementwise_arithmetic.hpp" + +namespace ov { +namespace op { +namespace v0 { +// clang-format off +/// \brief Elementwise square root operation. +/// +/// ## Inputs +/// +/// | | Type | Description | +/// | ----- | --------------------------------- | ----------------------------------------------- | +/// | `arg` | \f$N[d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of any shape and numeric element type. | +/// +/// ## Output +/// +/// | Type | Description | +/// | ---------------------- | ------------------------------------------------------------------------------------- | +/// | \f$N[d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_n] = \sqrt{\texttt{arg}[i_1,\dots,i_n]}\f$ | +// clang-format on +class OPENVINO_API Sqrt : public util::UnaryElementwiseArithmetic { +public: + OPENVINO_RTTI_DECLARATION; + + /// \brief Constructs a square operation. + /// + /// \param arg Node that produces the input tensor. + Sqrt(const Output& arg); + Sqrt() = default; + + bool visit_attributes(AttributeVisitor& visitor) override; + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; +}; +} // namespace v0 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/squared_difference.hpp b/ngraph/core/include/openvino/op/squared_difference.hpp new file mode 100644 index 00000000000000..23c23de070b358 --- /dev/null +++ b/ngraph/core/include/openvino/op/squared_difference.hpp @@ -0,0 +1,34 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/util/binary_elementwise_arithmetic.hpp" + +namespace ov { +namespace op { +namespace v0 { +/// \brief Calculates an element-wise squared difference between two tensors +/// +/// y[i] = (x1[i] - x2[i])^2 +class OPENVINO_API SquaredDifference : public util::BinaryElementwiseArithmetic { +public: + OPENVINO_RTTI_DECLARATION; + + /// \brief Constrcuts an uninitialized squared difference operation + SquaredDifference() : util::BinaryElementwiseArithmetic(AutoBroadcastSpec::NUMPY) {} + /// \brief Constructs the squared difference operation. + /// + /// \param x1 First input tensor + /// \param x2 Second input tensor + /// \param auto_broadcast Auto broadcast specification + SquaredDifference(const Output& x1, + const Output& x2, + const AutoBroadcastSpec& auto_broadcast = AutoBroadcastSpec(AutoBroadcastType::NUMPY)); + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; +}; +} // namespace v0 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/squeeze.hpp b/ngraph/core/include/openvino/op/squeeze.hpp new file mode 100644 index 00000000000000..ff212467127ccd --- /dev/null +++ b/ngraph/core/include/openvino/op/squeeze.hpp @@ -0,0 +1,37 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/op.hpp" + +namespace ov { +namespace op { +namespace v0 { +class OPENVINO_API Squeeze : public Op { +public: + OPENVINO_RTTI_DECLARATION; + + Squeeze(); + Squeeze(const Output& data, const Output& axes); + Squeeze(const Output& data); + + bool visit_attributes(AttributeVisitor& visitor) override; + void validate_and_infer_types() override; + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; + bool evaluate_lower(const HostTensorVector& outputs) const override; + bool evaluate_upper(const HostTensorVector& outputs) const override; + bool constant_fold(OutputVector& output_values, const OutputVector& inputs_values) override; + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + bool is_dynamic() const override; + +private: + Output get_default_axes_input() const; +}; +} // namespace v0 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/strided_slice.hpp b/ngraph/core/include/openvino/op/strided_slice.hpp new file mode 100644 index 00000000000000..150595bc145b50 --- /dev/null +++ b/ngraph/core/include/openvino/op/strided_slice.hpp @@ -0,0 +1,110 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include +#include + +#include "ngraph/op/util/attr_types.hpp" +#include "openvino/op/op.hpp" + +namespace ov { +namespace op { +namespace v1 { +/// \brief Takes a slice of an input tensor, i.e., the sub-tensor that resides within a +/// bounding box, optionally with stride. +class OPENVINO_API StridedSlice : public Op { +public: + OPENVINO_RTTI_DECLARATION; + + StridedSlice() = default; + + /// \brief Constructs a dynamic tensor strided slice operation. + /// + /// \param data The tensor to be sliced. + /// \param begin 1D tensor with begin indexes for input blob slicing. + /// \param end 1D tensor with end indexes for input blob slicing. + /// \param strides The slicing strides; for example, strides of `{n,m}` + /// means to take every nth row and every mth column + /// of the input matrix. + /// \param begin_mask When begin_mask[i] equal to 1 means that the + /// corresponding dimension of the begin input is ignored. + /// \param end_mask When end_mask[i] is 1, the corresponding dimension of + /// the end input is ignored. + /// \param new_axis_mask If new_axis_mask[i] is 1, a length 1 dimension + /// is inserted on the i-th position. + /// \param shrink_axis_mask If shrink_axis_mask[i] is 1, the dimension + /// on the i-th position is deleted. + /// \param ellipsis_mask It inserts missing dimensions + /// on a position of a non-zero bit. + StridedSlice(const Output& data, + const Output& begin, + const Output& end, + const Output& strides, + const std::vector& begin_mask, + const std::vector& end_mask, + const std::vector& new_axis_mask = std::vector{}, + const std::vector& shrink_axis_mask = std::vector{}, + const std::vector& ellipsis_mask = std::vector{}); + + /// \brief Constructs a dynamic tensor strided slice operation. + /// + /// \param data The tensor to be sliced. + /// \param begin 1D tensor with begin indexes for input blob slicing. + /// \param end 1D tensor with end indexes for input blob slicing. + /// \param begin_mask When begin_mask[i] equal to 1 means that the + /// corresponding dimension of the begin input is ignored. + /// \param end_mask When end_mask[i] is 1, the corresponding dimension of + /// the end input is ignored. + /// \param new_axis_mask If new_axis_mask[i] is 1, a length 1 dimension + /// is inserted on the i-th position. + /// \param shrink_axis_mask If shrink_axis_mask[i] is 1, the dimension + /// on the i-th position is deleted. + /// \param ellipsis_mask It inserts missing dimensions + /// on a position of a non-zero bit. + StridedSlice(const Output& data, + const Output& begin, + const Output& end, + const std::vector& begin_mask, + const std::vector& end_mask, + const std::vector& new_axis_mask = std::vector{}, + const std::vector& shrink_axis_mask = std::vector{}, + const std::vector& ellipsis_mask = std::vector{}); + + bool visit_attributes(AttributeVisitor& visitor) override; + const std::vector& get_begin_mask() const { + return m_begin_mask; + } + const std::vector& get_end_mask() const { + return m_end_mask; + } + const std::vector& get_new_axis_mask() const { + return m_new_axis_mask; + } + const std::vector& get_shrink_axis_mask() const { + return m_shrink_axis_mask; + } + const std::vector& get_ellipsis_mask() const { + return m_ellipsis_mask; + } + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + void validate_and_infer_types() override; + bool evaluate(const HostTensorVector& output_values, const HostTensorVector& input_values) const override; + bool has_evaluate() const override; + bool evaluate_lower(const HostTensorVector& outputs) const override; + bool evaluate_upper(const HostTensorVector& outputs) const override; + +private: + AxisSet convert_mask_to_axis_set(const std::vector& mask) const; + + std::vector m_begin_mask; + std::vector m_end_mask; + std::vector m_new_axis_mask; + std::vector m_shrink_axis_mask; + std::vector m_ellipsis_mask; +}; +} // namespace v1 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/subtract.hpp b/ngraph/core/include/openvino/op/subtract.hpp new file mode 100644 index 00000000000000..abb15c9305de32 --- /dev/null +++ b/ngraph/core/include/openvino/op/subtract.hpp @@ -0,0 +1,34 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/util/binary_elementwise_arithmetic.hpp" + +namespace ov { +namespace op { +namespace v1 { +/// \brief Elementwise subtraction operation. +class OPENVINO_API Subtract : public util::BinaryElementwiseArithmetic { +public: + OPENVINO_RTTI_DECLARATION; + + Subtract() : util::BinaryElementwiseArithmetic(AutoBroadcastSpec::NUMPY) {} + + /// \brief Constructs a subtraction operation. + /// + /// \param arg0 Node that produces the first input tensor. + /// \param arg1 Node that produces the second input tensor. + /// \param auto_broadcast Auto broadcast specification + Subtract(const Output& arg0, + const Output& arg1, + const AutoBroadcastSpec& auto_broadcast = AutoBroadcastSpec(AutoBroadcastType::NUMPY)); + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; +}; +} // namespace v1 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/swish.hpp b/ngraph/core/include/openvino/op/swish.hpp new file mode 100644 index 00000000000000..7ec3806a38dce4 --- /dev/null +++ b/ngraph/core/include/openvino/op/swish.hpp @@ -0,0 +1,38 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/op.hpp" + +namespace ov { +namespace op { +namespace v4 { +/// \brief A Swish Activation Function +/// f(x) = x / (1.0 + exp(-beta * x)) or +/// f(x) = x * sigmoid(beta * x) +/// +class OPENVINO_API Swish : public Op { +public: + OPENVINO_RTTI_DECLARATION; + Swish() = default; + + /// \brief Constructs an Swish operation. + /// + /// \param data Input tensor + /// \param beta Scalar with beta value. If the argument is not specified then use + /// the default value 1.0 + Swish(const Output& arg, const Output& beta); + explicit Swish(const Output& arg); + + bool visit_attributes(AttributeVisitor& visitor) override; + void validate_and_infer_types() override; + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; +}; +} // namespace v4 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/tan.hpp b/ngraph/core/include/openvino/op/tan.hpp new file mode 100644 index 00000000000000..45f20a91b924fa --- /dev/null +++ b/ngraph/core/include/openvino/op/tan.hpp @@ -0,0 +1,43 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/util/unary_elementwise_arithmetic.hpp" + +namespace ov { +namespace op { +namespace v0 { +// clang-format off +/// \brief Elementwise tangent operation. +/// +/// ## Inputs +/// +/// | | Type | Description | +/// | ----- | --------------------------------- | ----------------------------------------------- | +/// | `arg` | \f$N[d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of any shape and numeric element type. | +/// +/// ## Output +/// +/// | Type | Description | +/// | ---------------------- | ------------------------------------------------------------------------------------ | +/// | \f$N[d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_n] = \tan(\texttt{arg}[i_1,\dots,i_n])\f$ | +// clang-format on +class OPENVINO_API Tan : public util::UnaryElementwiseArithmetic { +public: + OPENVINO_RTTI_DECLARATION; + /// \brief Constructs a tangent operation. + /// + /// \param arg Node that produces the input tensor. + Tan(const Output& arg); + Tan() = default; + + bool visit_attributes(AttributeVisitor& visitor) override; + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; +}; +} // namespace v0 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/tanh.hpp b/ngraph/core/include/openvino/op/tanh.hpp new file mode 100644 index 00000000000000..f981d9037c554d --- /dev/null +++ b/ngraph/core/include/openvino/op/tanh.hpp @@ -0,0 +1,30 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/util/unary_elementwise_arithmetic.hpp" + +namespace ov { +namespace op { +namespace v0 { +/// \brief Elementwise hyperbolic tangent operation. +class OPENVINO_API Tanh : public util::UnaryElementwiseArithmetic { +public: + OPENVINO_RTTI_DECLARATION; + + /// \brief Constructs a hyperbolic tangent operation. + /// + /// \param arg Node that produces the input tensor. + Tanh(const Output& arg); + Tanh() = default; + + bool visit_attributes(AttributeVisitor& visitor) override; + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; +}; +} // namespace v0 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/tile.hpp b/ngraph/core/include/openvino/op/tile.hpp new file mode 100644 index 00000000000000..73bcd627d82744 --- /dev/null +++ b/ngraph/core/include/openvino/op/tile.hpp @@ -0,0 +1,38 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/op.hpp" + +namespace ov { +namespace op { +namespace v0 { +/// \brief Dynamic Tiling operation which repeats a tensor multiple times +/// along each dimension +class OPENVINO_API Tile : public Op { +public: + OPENVINO_RTTI_DECLARATION; + + Tile() = default; + /// \brief Perform dynamic padding of a tensor + /// + /// \param data The node producing input tensor to be padded. + /// \param repeats The node producing the per-dimension replication factor + Tile(const Output& data, const Output& repeats); + + bool visit_attributes(AttributeVisitor& visitor) override; + void validate_and_infer_types() override; + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; + +private: + bool evaluate_tile(const HostTensorVector& outputs, const HostTensorVector& inputs) const; +}; +} // namespace v0 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/topk.hpp b/ngraph/core/include/openvino/op/topk.hpp new file mode 100644 index 00000000000000..dc0c2ddfeb4e21 --- /dev/null +++ b/ngraph/core/include/openvino/op/topk.hpp @@ -0,0 +1,161 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include + +#include "openvino/op/constant.hpp" +#include "openvino/op/op.hpp" + +namespace ov { +namespace op { +namespace v1 { +/// \brief Computes indices and values of the k maximum/minimum values +/// for each slice along specified axis. +class OPENVINO_API TopK : public Op { +public: + OPENVINO_RTTI_DECLARATION; + + using SortType = TopKSortType; + using Mode = TopKMode; + + /// \brief Constructs a TopK operation + TopK() = default; + /// \brief Constructs a TopK operation with two outputs: values and indices. + /// By default the indices output is described by i32 data type. + /// + /// \param data The input tensor + /// \param k Specifies how many maximum/minimum elements should be computed + /// (note: scalar input tensor) + /// \param axis The axis along which to compute top k indices + /// \param mode Specifies which operation (min or max) is used to select + /// the biggest element of two. + /// \param sort Specifies order of output elements and/or indices + /// Accepted values: none, index, value + /// \param index_element_type Specyfies type of produced indices + TopK(const Output& data, + const Output& k, + const int64_t axis, + const std::string& mode, + const std::string& sort, + const element::Type& index_element_type = element::i32); + + TopK(const Output& data, + const Output& k, + const int64_t axis, + const Mode mode, + const SortType sort, + const element::Type& index_element_type = element::i32); + + bool visit_attributes(AttributeVisitor& visitor) override; + void validate_and_infer_types() override; + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + /// \brief Returns axis value after normalization + /// \note If input rank required to normalization is dynamic, the exception is + /// thrown + uint64_t get_axis() const; + /// \brief Returns axis value before normalization + int64_t get_provided_axis() const { + return m_axis; + } + void set_axis(const int64_t axis); + Mode get_mode() const { + return m_mode; + } + void set_mode(const Mode mode) { + m_mode = mode; + } + SortType get_sort_type() const { + return m_sort; + } + void set_sort_type(const SortType sort) { + m_sort = sort; + } + element::Type get_index_element_type() const { + return m_index_element_type; + } + void set_index_element_type(const element::Type& index_element_type) { + m_index_element_type = index_element_type; + } + /// \brief Returns the value of K, if available + /// + /// \note If the second input to this op is a constant, the value is retrieved + /// and returned. If the input is not constant(dynamic) this method returns 0 + size_t get_k() const; + void set_k(size_t k); + size_t get_default_output_index() const override { + return no_default_index(); + } + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; + +protected: + int64_t m_axis; + uint64_t m_normalized_axis; + Mode m_mode; + SortType m_sort; + element::Type m_index_element_type{element::i32}; + + virtual size_t read_k_from_constant_node(const std::shared_ptr& node, + const element::Type& k_element_type) const; + + template + size_t validate_and_get_k(const std::shared_ptr& k_constant) const; + ngraph::Shape compute_output_shape(const std::string& node_description, + const PartialShape input_partial_shape, + const int64_t k) const; + void set_axis(const Rank input_rank, const int64_t axis); +}; +} // namespace v1 + +namespace v3 { +/// \brief Computes indices and values of the k maximum/minimum values +/// for each slice along specified axis. +class OPENVINO_API TopK : public v1::TopK { +public: + OPENVINO_RTTI_DECLARATION; + /// \brief Constructs a TopK operation + TopK() = default; + /// \brief Constructs a TopK operation with two outputs: values and indices. + /// By default the indices output is described by i32 data type. + /// + /// \param data The input tensor + /// \param k Specifies how many maximum/minimum elements should be computed + /// (note: scalar input tensor) + /// \param axis The axis along which to compute top k indices + /// \param mode Specifies which operation (min or max) is used to select + /// the biggest element of two. + /// \param sort Specifies order of output elements and/or indices + /// Accepted values: none, index, value + /// \param index_element_type Specyfies type of produced indices + TopK(const Output& data, + const Output& k, + const int64_t axis, + const std::string& mode, + const std::string& sort, + const element::Type& index_element_type = element::i32); + + TopK(const Output& data, + const Output& k, + const int64_t axis, + const Mode mode, + const SortType sort, + const element::Type& index_element_type = element::i32); + bool visit_attributes(AttributeVisitor& visitor) override; + void validate_and_infer_types() override; + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; + +protected: + size_t read_k_from_constant_node(const std::shared_ptr& node, + const element::Type& k_element_type) const override; +}; +} // namespace v3 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/transpose.hpp b/ngraph/core/include/openvino/op/transpose.hpp new file mode 100644 index 00000000000000..944b6533044107 --- /dev/null +++ b/ngraph/core/include/openvino/op/transpose.hpp @@ -0,0 +1,39 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/op.hpp" + +namespace ov { +namespace op { +namespace v1 { +/// \brief Tensor transpose operation. +class OPENVINO_API Transpose : public Op { +public: + OPENVINO_RTTI_DECLARATION; + + Transpose() = default; + /// + /// \brief Constructs a transpose operation. + /// + /// \param arg Node producing the tensor to be transposed. + /// \param input_order Node producing the permutation to apply to the axes + /// of the input shape. Must be a vector with shape [n], + /// where n is the rank of arg. The tensor's value must + /// contain every integer in the range [0, n-1]. + /// + Transpose(const Output& arg, const Output& input_order); + + bool visit_attributes(AttributeVisitor& visitor) override; + void validate_and_infer_types() override; + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; +}; +} // namespace v1 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/unsqueeze.hpp b/ngraph/core/include/openvino/op/unsqueeze.hpp new file mode 100644 index 00000000000000..6eae736e9a0f46 --- /dev/null +++ b/ngraph/core/include/openvino/op/unsqueeze.hpp @@ -0,0 +1,34 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include + +#include "openvino/op/op.hpp" + +namespace ov { +namespace op { +namespace v0 { +class OPENVINO_API Unsqueeze : public Op { +public: + OPENVINO_RTTI_DECLARATION; + + Unsqueeze() = default; + Unsqueeze(const Output& data, const Output& axes); + + void validate_and_infer_types() override; + bool visit_attributes(AttributeVisitor& visitor) override; + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; + bool evaluate_lower(const HostTensorVector& output_values) const override; + bool evaluate_upper(const HostTensorVector& output_values) const override; + + bool constant_fold(OutputVector& output_values, const OutputVector& inputs_values) override; + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; +}; +} // namespace v0 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/util/scatter_base.hpp b/ngraph/core/include/openvino/op/util/scatter_base.hpp index 6f12ee2b326db8..ffe6952d84c181 100644 --- a/ngraph/core/include/openvino/op/util/scatter_base.hpp +++ b/ngraph/core/include/openvino/op/util/scatter_base.hpp @@ -14,10 +14,7 @@ namespace util { /// class OPENVINO_API ScatterBase : public Op { public: - static constexpr NodeTypeInfo type_info{"ScatterBase", 3}; - const NodeTypeInfo& get_type_info() const override { - return type_info; - } + OPENVINO_RTTI_DECLARATION; void validate_and_infer_types() override; bool visit_attributes(AttributeVisitor& visitor) override; diff --git a/ngraph/core/include/openvino/op/util/scatter_nd_base.hpp b/ngraph/core/include/openvino/op/util/scatter_nd_base.hpp index 84f71a912619e4..36fb2e22b05ce1 100644 --- a/ngraph/core/include/openvino/op/util/scatter_nd_base.hpp +++ b/ngraph/core/include/openvino/op/util/scatter_nd_base.hpp @@ -14,10 +14,7 @@ namespace util { /// class OPENVINO_API ScatterNDBase : public Op { public: - static constexpr NodeTypeInfo type_info{"ScatterNDBase", 3}; - const NodeTypeInfo& get_type_info() const override { - return type_info; - } + OPENVINO_RTTI_DECLARATION; // Respective input ordinal number. static constexpr int INPUTS = 0; static constexpr int INDICES = 1; diff --git a/ngraph/core/include/openvino/op/variadic_split.hpp b/ngraph/core/include/openvino/op/variadic_split.hpp new file mode 100644 index 00000000000000..74c3059dac5ebd --- /dev/null +++ b/ngraph/core/include/openvino/op/variadic_split.hpp @@ -0,0 +1,46 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/op.hpp" + +namespace ov { +namespace op { +namespace v1 { +/// \brief VariadicSplit operation splits an input tensor into pieces along some axis. +/// The pieces may have variadic lengths depending on "split_lengths" attribute. +class OPENVINO_API VariadicSplit : public Op { +public: + OPENVINO_RTTI_DECLARATION; + + /// \brief Constructs a variadic split operation. + VariadicSplit() = default; + /// \brief Constructs a variadic split operation. + /// + /// \param data The tensor to be split. + /// \param axis The index of an axis in "data" along which to perform the + /// split. + /// \param split_lengths A list containing the sizes of each output tensor + /// along the split "axis". Size of "split_lengths" should be equal to the number of + /// + /// outputs. The sum of split_lengths must match data.shape[axis] + VariadicSplit(const Output& data, const Output& axis, const Output& split_lengths); + + bool visit_attributes(AttributeVisitor& visitor) override; + + void validate_and_infer_types() override; + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + size_t get_default_output_index() const override { + return no_default_index(); + } + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; + +private: + bool evaluate_variadic_split(const HostTensorVector& outputs, const HostTensorVector& inputs) const; +}; +} // namespace v1 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/xor.hpp b/ngraph/core/include/openvino/op/xor.hpp new file mode 100644 index 00000000000000..460117ae5996b3 --- /dev/null +++ b/ngraph/core/include/openvino/op/xor.hpp @@ -0,0 +1,41 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include + +#include "openvino/op/util/binary_elementwise_logical.hpp" + +namespace ov { +namespace op { +namespace v0 { +/// \brief Elementwise logical-xor operation. +/// +class OPENVINO_API Xor : public util::BinaryElementwiseLogical { +public: + OPENVINO_RTTI_DECLARATION; + Xor() = default; + /// \brief Constructs a logical-xor operation. + /// + /// \param arg0 Node that produces the first input tensor.
+ /// `[d0, ...]` + /// \param arg1 Node that produces the second input tensor.
+ /// `[d0, ...]` + /// \param auto_broadcast Auto broadcast specification + /// + /// Output `[d0, ...]` + /// + Xor(const Output& arg0, + const Output& arg1, + const AutoBroadcastSpec& auto_broadcast = AutoBroadcastSpec()); + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; +}; +} // namespace v0 +} // namespace op +} // namespace ov diff --git a/ngraph/core/src/op/lstm_sequence.cpp b/ngraph/core/src/op/lstm_sequence.cpp index 0b30eddf153b6d..81a0e58f5fbfb6 100644 --- a/ngraph/core/src/op/lstm_sequence.cpp +++ b/ngraph/core/src/op/lstm_sequence.cpp @@ -17,7 +17,7 @@ using namespace ngraph; using namespace std; OPENVINO_RTTI_DEFINITION(op::v0::LSTMSequence, "LSTMSequence", 0); -OPENVINO_RTTI_DEFINITION(op::v5::LSTMSequence, "LSTMSequence", 5); +OPENVINO_RTTI_DEFINITION(op::v5::LSTMSequence, "LSTMSequence", 5, util::RNNCellBase); op::v0::LSTMSequence::LSTMSequence() : Op(), diff --git a/ngraph/core/src/op/random_uniform.cpp b/ngraph/core/src/op/random_uniform.cpp index 90a356cdef350b..c343c408c4b01c 100644 --- a/ngraph/core/src/op/random_uniform.cpp +++ b/ngraph/core/src/op/random_uniform.cpp @@ -12,7 +12,7 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::v8::RandomUniform, "RandomUniform", 8); +OPENVINO_RTTI_DEFINITION(op::v8::RandomUniform, "RandomUniform", 8); op::v8::RandomUniform::RandomUniform(const Output& out_shape, const Output& min_val, diff --git a/ngraph/core/src/op/range.cpp b/ngraph/core/src/op/range.cpp index e3d7cd1da763c7..7bf31bdf81f325 100644 --- a/ngraph/core/src/op/range.cpp +++ b/ngraph/core/src/op/range.cpp @@ -43,7 +43,7 @@ check_value(T value) { return value == value && value_minus_value == value_minus_value; } -NGRAPH_RTTI_DEFINITION(op::v4::Range, "Range", 4); +OPENVINO_RTTI_DEFINITION(op::v4::Range, "Range", 4); op::v4::Range::Range(const Output& start, const Output& stop, @@ -286,7 +286,7 @@ bool op::v4::Range::has_evaluate() const { return false; } -NGRAPH_RTTI_DEFINITION(op::v0::Range, "Range", 0); +OPENVINO_RTTI_DEFINITION(op::v0::Range, "Range", 0); op::v0::Range::Range(const Output& start, const Output& stop, const Output& step) : Op({start, stop, step}) { diff --git a/ngraph/core/src/op/reduce_l1.cpp b/ngraph/core/src/op/reduce_l1.cpp index c23b63173fca1e..997b9c57af1cf4 100644 --- a/ngraph/core/src/op/reduce_l1.cpp +++ b/ngraph/core/src/op/reduce_l1.cpp @@ -16,7 +16,7 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::v4::ReduceL1, "ReduceL1", 4, util::ArithmeticReductionKeepDims); +OPENVINO_RTTI_DEFINITION(op::v4::ReduceL1, "ReduceL1", 4, util::ArithmeticReductionKeepDims); op::v4::ReduceL1::ReduceL1(const Output& arg, const Output& reduction_axes, bool keep_dims) : ArithmeticReductionKeepDims(arg, reduction_axes, keep_dims) { diff --git a/ngraph/core/src/op/reduce_l2.cpp b/ngraph/core/src/op/reduce_l2.cpp index 396c365f30eb0e..8bdfccd80cf6f5 100644 --- a/ngraph/core/src/op/reduce_l2.cpp +++ b/ngraph/core/src/op/reduce_l2.cpp @@ -16,7 +16,7 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::v4::ReduceL2, "ReduceL2", 4, util::ArithmeticReductionKeepDims); +OPENVINO_RTTI_DEFINITION(op::v4::ReduceL2, "ReduceL2", 4, util::ArithmeticReductionKeepDims); op::v4::ReduceL2::ReduceL2(const Output& arg, const Output& reduction_axes, bool keep_dims) : ArithmeticReductionKeepDims(arg, reduction_axes, keep_dims) { diff --git a/ngraph/core/src/op/reduce_logical_and.cpp b/ngraph/core/src/op/reduce_logical_and.cpp index 081ea960e2362f..9c79c270a168e0 100644 --- a/ngraph/core/src/op/reduce_logical_and.cpp +++ b/ngraph/core/src/op/reduce_logical_and.cpp @@ -15,7 +15,7 @@ using namespace ngraph; using namespace std; -NGRAPH_RTTI_DEFINITION(op::v1::ReduceLogicalAnd, "ReduceLogicalAnd", 1, util::LogicalReductionKeepDims); +OPENVINO_RTTI_DEFINITION(op::v1::ReduceLogicalAnd, "ReduceLogicalAnd", 1, util::LogicalReductionKeepDims); op::v1::ReduceLogicalAnd::ReduceLogicalAnd(const Output& data, const Output& reduction_axes, diff --git a/ngraph/core/src/op/reduce_logical_or.cpp b/ngraph/core/src/op/reduce_logical_or.cpp index f383cc51661744..6cca3903d35255 100644 --- a/ngraph/core/src/op/reduce_logical_or.cpp +++ b/ngraph/core/src/op/reduce_logical_or.cpp @@ -15,7 +15,7 @@ using namespace ngraph; using namespace std; -NGRAPH_RTTI_DEFINITION(op::v1::ReduceLogicalOr, "ReduceLogicalOr", 1, util::LogicalReductionKeepDims); +OPENVINO_RTTI_DEFINITION(op::v1::ReduceLogicalOr, "ReduceLogicalOr", 1, util::LogicalReductionKeepDims); op::v1::ReduceLogicalOr::ReduceLogicalOr(const Output& data, const Output& reduction_axes, diff --git a/ngraph/core/src/op/reduce_mean.cpp b/ngraph/core/src/op/reduce_mean.cpp index b32bd7ac76e03b..ea92204c589353 100644 --- a/ngraph/core/src/op/reduce_mean.cpp +++ b/ngraph/core/src/op/reduce_mean.cpp @@ -17,7 +17,7 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::v1::ReduceMean, "ReduceMean", 1, util::ArithmeticReductionKeepDims); +OPENVINO_RTTI_DEFINITION(op::v1::ReduceMean, "ReduceMean", 1, util::ArithmeticReductionKeepDims); op::v1::ReduceMean::ReduceMean(const Output& arg, const Output& reduction_axes, bool keep_dims) : ArithmeticReductionKeepDims(arg, reduction_axes, keep_dims) { diff --git a/ngraph/core/src/op/reduce_prod.cpp b/ngraph/core/src/op/reduce_prod.cpp index 30bd01de272817..1dcbdb09575465 100644 --- a/ngraph/core/src/op/reduce_prod.cpp +++ b/ngraph/core/src/op/reduce_prod.cpp @@ -16,7 +16,7 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::v1::ReduceProd, "ReduceProd", 1, util::ArithmeticReductionKeepDims); +OPENVINO_RTTI_DEFINITION(op::v1::ReduceProd, "ReduceProd", 1, util::ArithmeticReductionKeepDims); op::v1::ReduceProd::ReduceProd(const Output& arg, const Output& reduction_axes, bool keep_dims) : ArithmeticReductionKeepDims(arg, reduction_axes, keep_dims) { diff --git a/ngraph/core/src/op/reduce_sum.cpp b/ngraph/core/src/op/reduce_sum.cpp index ae0e9a603641b3..1055274312dc29 100644 --- a/ngraph/core/src/op/reduce_sum.cpp +++ b/ngraph/core/src/op/reduce_sum.cpp @@ -18,7 +18,7 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::v1::ReduceSum, "ReduceSum", 1, util::ArithmeticReductionKeepDims); +OPENVINO_RTTI_DEFINITION(op::v1::ReduceSum, "ReduceSum", 1, util::ArithmeticReductionKeepDims); op::v1::ReduceSum::ReduceSum(const Output& arg, const Output& reduction_axes, bool keep_dims) : ArithmeticReductionKeepDims(arg, reduction_axes, keep_dims) { diff --git a/ngraph/core/src/op/region_yolo.cpp b/ngraph/core/src/op/region_yolo.cpp index a481da20de5bec..e8cf031ff3cbae 100644 --- a/ngraph/core/src/op/region_yolo.cpp +++ b/ngraph/core/src/op/region_yolo.cpp @@ -10,7 +10,7 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::RegionYolo, "RegionYolo", 0); +OPENVINO_RTTI_DEFINITION(op::v0::RegionYolo, "RegionYolo", 0); op::RegionYolo::RegionYolo(const Output& input, const size_t coords, diff --git a/ngraph/core/src/op/relu.cpp b/ngraph/core/src/op/relu.cpp index 4c65f2bc573cea..e5a053de0e9afd 100644 --- a/ngraph/core/src/op/relu.cpp +++ b/ngraph/core/src/op/relu.cpp @@ -14,7 +14,7 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::Relu, "Relu", 0, util::UnaryElementwiseArithmetic); +OPENVINO_RTTI_DEFINITION(op::v0::Relu, "Relu", 0, util::UnaryElementwiseArithmetic); op::Relu::Relu(const Output& arg) : UnaryElementwiseArithmetic(arg) { constructor_validate_and_infer_types(); diff --git a/ngraph/core/src/op/reorg_yolo.cpp b/ngraph/core/src/op/reorg_yolo.cpp index ca6b20fa7dd163..f90b07baa0e51a 100644 --- a/ngraph/core/src/op/reorg_yolo.cpp +++ b/ngraph/core/src/op/reorg_yolo.cpp @@ -10,7 +10,7 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::ReorgYolo, "ReorgYolo", 0); +OPENVINO_RTTI_DEFINITION(op::v0::ReorgYolo, "ReorgYolo", 0); op::ReorgYolo::ReorgYolo(const Output& input, const Strides& strides) : Op({input}), m_strides(strides) { constructor_validate_and_infer_types(); diff --git a/ngraph/core/src/op/reshape.cpp b/ngraph/core/src/op/reshape.cpp index de161673a05d7a..6593a7b2fd40f8 100644 --- a/ngraph/core/src/op/reshape.cpp +++ b/ngraph/core/src/op/reshape.cpp @@ -37,7 +37,7 @@ void compute_output_shape(const HostTensorPtr& shape_pattern, std::vector& arg, const Output& shape_pattern, bool zero_flag) : Op({arg, shape_pattern}), @@ -78,8 +78,8 @@ void op::v1::Reshape::validate_and_infer_types() { HostTensorPtr lb, ub; std::tie(lb, ub) = evaluate_both_bounds(get_input_source_output(1)); if (lb && ub) { - const auto lower_bound = std::make_shared(lb)->cast_vector(); - const auto upper_bound = std::make_shared(ub)->cast_vector(); + const auto lower_bound = std::make_shared(lb)->cast_vector(); + const auto upper_bound = std::make_shared(ub)->cast_vector(); shape_can_be_calculated = true; NGRAPH_CHECK(lower_bound.size() == upper_bound.size()); for (size_t i = 0; i < lower_bound.size(); ++i) { @@ -206,8 +206,8 @@ bool op::v1::Reshape::constant_fold(OutputVector& output_values, const OutputVec const auto& shape = get_output_shape(0); - if (auto data_const = std::dynamic_pointer_cast(inputs_values[0].get_node_shared_ptr())) { - output_values[0] = std::make_shared(*data_const, shape); + if (auto data_const = std::dynamic_pointer_cast(inputs_values[0].get_node_shared_ptr())) { + output_values[0] = std::make_shared(*data_const, shape); return true; } return false; diff --git a/ngraph/core/src/op/reverse.cpp b/ngraph/core/src/op/reverse.cpp index 9c7d72afe2c87b..f50ee79ab4d006 100644 --- a/ngraph/core/src/op/reverse.cpp +++ b/ngraph/core/src/op/reverse.cpp @@ -19,7 +19,7 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::v1::Reverse, "Reverse", 1); +OPENVINO_RTTI_DEFINITION(op::v1::Reverse, "Reverse", 1); op::v1::Reverse::Reverse(const Output& data, const Output& reversed_axes, const std::string& mode) : Op({data, reversed_axes}), @@ -197,7 +197,7 @@ bool op::v1::Reverse::has_evaluate() const { } } -std::ostream& ngraph::operator<<(std::ostream& s, const op::v1::Reverse::Mode& type) { +std::ostream& ov::operator<<(std::ostream& s, const op::v1::Reverse::Mode& type) { return s << as_string(type); } diff --git a/ngraph/core/src/op/reverse_sequence.cpp b/ngraph/core/src/op/reverse_sequence.cpp index 1eda1f50b1c8c4..efc380fefaa74a 100644 --- a/ngraph/core/src/op/reverse_sequence.cpp +++ b/ngraph/core/src/op/reverse_sequence.cpp @@ -15,7 +15,7 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::ReverseSequence, "ReverseSequence", 0); +OPENVINO_RTTI_DEFINITION(op::v0::ReverseSequence, "ReverseSequence", 0); op::ReverseSequence::ReverseSequence(const Output& arg, const Output& seq_indices, diff --git a/ngraph/core/src/op/rnn_cell.cpp b/ngraph/core/src/op/rnn_cell.cpp index 43cb8fd5c50314..c67007d483ed65 100644 --- a/ngraph/core/src/op/rnn_cell.cpp +++ b/ngraph/core/src/op/rnn_cell.cpp @@ -15,7 +15,7 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::v0::RNNCell, "RNNCell", 0, util::RNNCellBase); +OPENVINO_RTTI_DEFINITION(op::v0::RNNCell, "RNNCell", 0, util::RNNCellBase); op::v0::RNNCell::RNNCell() { m_activations = {"tanh"}; diff --git a/ngraph/core/src/op/rnn_sequence.cpp b/ngraph/core/src/op/rnn_sequence.cpp index 809baa02592f23..10f6f382608d38 100644 --- a/ngraph/core/src/op/rnn_sequence.cpp +++ b/ngraph/core/src/op/rnn_sequence.cpp @@ -15,7 +15,7 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::v5::RNNSequence, "RNNSequence", 4); +OPENVINO_RTTI_DEFINITION(op::v5::RNNSequence, "RNNSequence", 4, util::RNNCellBase); op::v5::RNNSequence::RNNSequence() : m_direction(op::RecurrentSequenceDirection::FORWARD) {} diff --git a/ngraph/core/src/op/roi_align.cpp b/ngraph/core/src/op/roi_align.cpp index 580b7b8ddd3917..4d4faa8b45d2fd 100644 --- a/ngraph/core/src/op/roi_align.cpp +++ b/ngraph/core/src/op/roi_align.cpp @@ -12,7 +12,7 @@ using namespace std; using namespace ngraph; -constexpr NodeTypeInfo op::v3::ROIAlign::type_info; +OPENVINO_RTTI_DEFINITION(op::v3::ROIAlign, "ROIAlign", 3); op::v3::ROIAlign::ROIAlign(const Output& input, const Output& rois, diff --git a/ngraph/core/src/op/roi_pooling.cpp b/ngraph/core/src/op/roi_pooling.cpp index 433d70c5a6ca36..cd349783b322df 100644 --- a/ngraph/core/src/op/roi_pooling.cpp +++ b/ngraph/core/src/op/roi_pooling.cpp @@ -9,7 +9,7 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::ROIPooling, "ROIPooling", 0); +OPENVINO_RTTI_DEFINITION(op::v0::ROIPooling, "ROIPooling", 0); op::ROIPooling::ROIPooling(const Output& input, const Output& coords, diff --git a/ngraph/core/src/op/roll.cpp b/ngraph/core/src/op/roll.cpp index b9027f12b3a7b8..088a593852f791 100644 --- a/ngraph/core/src/op/roll.cpp +++ b/ngraph/core/src/op/roll.cpp @@ -11,7 +11,7 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::v7::Roll, "Roll", 7); +OPENVINO_RTTI_DEFINITION(op::v7::Roll, "Roll", 7); op::v7::Roll::Roll(const Output& data, const Output& shift, const Output& axes) : Op({data, shift, axes}) { @@ -47,7 +47,7 @@ void op::v7::Roll::validate_and_infer_types() { // If shift is a scalar, than axes can be arbitrary 1d tensor and we don't need // to check shift shape consistency with axes, otherwise the check is needed. - if (!(shift_pshape.is_static() && is_scalar(shift_pshape.to_shape()))) { + if (!(shift_pshape.is_static() && ngraph::is_scalar(shift_pshape.to_shape()))) { NODE_VALIDATION_CHECK(this, shift_pshape.compatible(axes_pshape), "If shift is a 1D vector, axes must be a 1D tensor of the same size."); diff --git a/ngraph/core/src/op/round.cpp b/ngraph/core/src/op/round.cpp index aa33f7ccc18ad9..36f74be1bfa337 100644 --- a/ngraph/core/src/op/round.cpp +++ b/ngraph/core/src/op/round.cpp @@ -61,7 +61,7 @@ bool evaluate_round(const HostTensorPtr& arg0, } } // namespace roundop -NGRAPH_RTTI_DEFINITION(op::v5::Round, "Round", 5); +OPENVINO_RTTI_DEFINITION(op::v5::Round, "Round", 5); op::v5::Round::Round(const Output& arg, RoundMode mode) : Op({arg}), m_mode(mode) { constructor_validate_and_infer_types(); @@ -113,7 +113,7 @@ bool op::v5::Round::has_evaluate() const { return false; } -std::ostream& ngraph::operator<<(std::ostream& s, const op::v5::Round::RoundMode& type) { +std::ostream& ov::operator<<(std::ostream& s, const op::v5::Round::RoundMode& type) { return s << as_string(type); } diff --git a/ngraph/core/src/op/scatter_elements_update.cpp b/ngraph/core/src/op/scatter_elements_update.cpp index 46275a703f4270..b6bcb9c1a0e8cc 100644 --- a/ngraph/core/src/op/scatter_elements_update.cpp +++ b/ngraph/core/src/op/scatter_elements_update.cpp @@ -13,7 +13,7 @@ using namespace ngraph; using namespace std; -NGRAPH_RTTI_DEFINITION(op::ScatterElementsUpdate, "ScatterElementsUpdate", 3); +OPENVINO_RTTI_DEFINITION(op::v3::ScatterElementsUpdate, "ScatterElementsUpdate", 3); op::v3::ScatterElementsUpdate::ScatterElementsUpdate(const Output& data, const Output& indices, diff --git a/ngraph/core/src/op/scatter_nd_update.cpp b/ngraph/core/src/op/scatter_nd_update.cpp index e06660d4992ed2..a1544822a3dec7 100644 --- a/ngraph/core/src/op/scatter_nd_update.cpp +++ b/ngraph/core/src/op/scatter_nd_update.cpp @@ -12,7 +12,7 @@ using namespace std; using namespace ngraph; -constexpr NodeTypeInfo op::v3::ScatterNDUpdate::type_info; +OPENVINO_RTTI_DEFINITION(op::v3::ScatterNDUpdate, "ScatterNDUpdate", 3, util::ScatterNDBase); shared_ptr op::v3::ScatterNDUpdate::clone_with_new_inputs(const OutputVector& new_args) const { NGRAPH_OP_SCOPE(v3_ScatterNDUpdate_clone_with_new_inputs); diff --git a/ngraph/core/src/op/scatter_update.cpp b/ngraph/core/src/op/scatter_update.cpp index 0fb131bf5ff66b..e8182e8b2114c5 100644 --- a/ngraph/core/src/op/scatter_update.cpp +++ b/ngraph/core/src/op/scatter_update.cpp @@ -14,7 +14,7 @@ using namespace std; using namespace ngraph; -constexpr NodeTypeInfo op::v3::ScatterUpdate::type_info; +OPENVINO_RTTI_DEFINITION(op::v3::ScatterUpdate, "ScatterUpdate", 3, util::ScatterBase); op::v3::ScatterUpdate::ScatterUpdate(const Output& data, const Output& indices, diff --git a/ngraph/core/src/op/select.cpp b/ngraph/core/src/op/select.cpp index aff5c697e28547..9a55c76dfd1a72 100644 --- a/ngraph/core/src/op/select.cpp +++ b/ngraph/core/src/op/select.cpp @@ -16,7 +16,7 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::v1::Select, "Select", 1); +OPENVINO_RTTI_DEFINITION(op::v1::Select, "Select", 1); op::v1::Select::Select(const Output& arg0, const Output& arg1, diff --git a/ngraph/core/src/op/selu.cpp b/ngraph/core/src/op/selu.cpp index 08ddf2c31b8577..8987823d612833 100644 --- a/ngraph/core/src/op/selu.cpp +++ b/ngraph/core/src/op/selu.cpp @@ -9,7 +9,7 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::v0::Selu, "Selu", 0); +OPENVINO_RTTI_DEFINITION(op::v0::Selu, "Selu", 0); op::v0::Selu::Selu(const Output& data, const Output& alpha, const Output& lambda) : Op({data, alpha, lambda}) { diff --git a/ngraph/core/src/op/shape_of.cpp b/ngraph/core/src/op/shape_of.cpp index 69090af304a569..e4eb8f370217ca 100644 --- a/ngraph/core/src/op/shape_of.cpp +++ b/ngraph/core/src/op/shape_of.cpp @@ -20,7 +20,7 @@ using namespace std; using namespace ngraph; -constexpr NodeTypeInfo op::v3::ShapeOf::type_info; +OPENVINO_RTTI_DEFINITION(op::v3::ShapeOf, "ShapeOf", 3); op::v3::ShapeOf::ShapeOf(const Output& arg, element::Type output_type) : Op({arg}), m_output_type(output_type) { constructor_validate_and_infer_types(); @@ -180,7 +180,7 @@ bool op::v3::ShapeOf::constant_fold(OutputVector& output_values, const OutputVec } // op::v0::ShapeOf -NGRAPH_RTTI_DEFINITION(op::v0::ShapeOf, "ShapeOf", 0); +OPENVINO_RTTI_DEFINITION(op::v0::ShapeOf, "ShapeOf", 0); op::v0::ShapeOf::ShapeOf(const Output& arg) : Op({arg}) { constructor_validate_and_infer_types(); diff --git a/ngraph/core/src/op/shuffle_channels.cpp b/ngraph/core/src/op/shuffle_channels.cpp index 1e73dac967c6ed..89dd145b705ad2 100644 --- a/ngraph/core/src/op/shuffle_channels.cpp +++ b/ngraph/core/src/op/shuffle_channels.cpp @@ -18,7 +18,7 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::v0::ShuffleChannels, "ShuffleChannels", 0); +OPENVINO_RTTI_DEFINITION(op::v0::ShuffleChannels, "ShuffleChannels", 0); op::ShuffleChannels::ShuffleChannels(const Output& data, const int64_t axis, const int64_t group) : Op({data}), diff --git a/ngraph/core/src/op/sigmoid.cpp b/ngraph/core/src/op/sigmoid.cpp index 1ceaca5144b00e..0565cf62f00999 100644 --- a/ngraph/core/src/op/sigmoid.cpp +++ b/ngraph/core/src/op/sigmoid.cpp @@ -15,15 +15,15 @@ using namespace std; using namespace ngraph; -constexpr NodeTypeInfo op::Sigmoid::type_info; +OPENVINO_RTTI_DEFINITION(ov::op::v0::Sigmoid, "Sigmoid", 0, util::UnaryElementwiseArithmetic); -shared_ptr op::Sigmoid::clone_with_new_inputs(const OutputVector& new_args) const { +shared_ptr ov::op::v0::Sigmoid::clone_with_new_inputs(const OutputVector& new_args) const { NGRAPH_OP_SCOPE(v0_Sigmoid_clone_with_new_inputs); check_new_args_count(this, new_args); return make_shared(new_args.at(0)); } -op::Sigmoid::Sigmoid(const Output& arg) : UnaryElementwiseArithmetic(arg) { +ov::op::v0::Sigmoid::Sigmoid(const Output& arg) : UnaryElementwiseArithmetic(arg) { constructor_validate_and_infer_types(); } @@ -56,13 +56,13 @@ bool evaluate_sigmoid(const HostTensorPtr& arg0, const HostTensorPtr& out) { } } // namespace sigmoid -bool op::Sigmoid::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { +bool ov::op::v0::Sigmoid::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { NGRAPH_OP_SCOPE(v0_Sigmoid_evaluate); NGRAPH_CHECK(validate_host_tensor_vector(outputs, 1) && validate_host_tensor_vector(inputs, 1)); return sigmoid::evaluate_sigmoid(inputs[0], outputs[0]); } -bool op::Sigmoid::has_evaluate() const { +bool ov::op::v0::Sigmoid::has_evaluate() const { NGRAPH_OP_SCOPE(v0_Sigmoid_has_evaluate); switch (get_input_element_type(0)) { case ngraph::element::boolean: diff --git a/ngraph/core/src/op/sign.cpp b/ngraph/core/src/op/sign.cpp index 74b0e89acf90d9..50aa74539b9d0a 100644 --- a/ngraph/core/src/op/sign.cpp +++ b/ngraph/core/src/op/sign.cpp @@ -5,15 +5,14 @@ #include "ngraph/op/sign.hpp" #include "itt.hpp" - -using namespace std; -using namespace ngraph; - #include "ngraph/runtime/host_tensor.hpp" #include "ngraph/runtime/reference/sign.hpp" #include "ngraph/validation_util.hpp" -NGRAPH_RTTI_DEFINITION(op::v0::Sign, "Sign", 0, util::UnaryElementwiseArithmetic); +using namespace std; +using namespace ngraph; + +OPENVINO_RTTI_DEFINITION(op::v0::Sign, "Sign", 0, util::UnaryElementwiseArithmetic); op::Sign::Sign(const Output& arg) : UnaryElementwiseArithmetic(arg) { constructor_validate_and_infer_types(); diff --git a/ngraph/core/src/op/sin.cpp b/ngraph/core/src/op/sin.cpp index 07e91332df3625..9afac15a716cc1 100644 --- a/ngraph/core/src/op/sin.cpp +++ b/ngraph/core/src/op/sin.cpp @@ -13,7 +13,7 @@ using namespace std; using namespace ngraph; -constexpr NodeTypeInfo op::Sin::type_info; +OPENVINO_RTTI_DEFINITION(op::v0::Sin, "Sin", 0, util::UnaryElementwiseArithmetic); op::Sin::Sin(const Output& arg) : UnaryElementwiseArithmetic(arg) { constructor_validate_and_infer_types(); diff --git a/ngraph/core/src/op/sinh.cpp b/ngraph/core/src/op/sinh.cpp index 24484615e3a026..0fd742f575df0a 100644 --- a/ngraph/core/src/op/sinh.cpp +++ b/ngraph/core/src/op/sinh.cpp @@ -12,7 +12,7 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::v0::Sinh, "Sinh", 0, util::UnaryElementwiseArithmetic); +OPENVINO_RTTI_DEFINITION(op::v0::Sinh, "Sinh", 0, util::UnaryElementwiseArithmetic); op::Sinh::Sinh(const Output& arg) : UnaryElementwiseArithmetic(arg) { constructor_validate_and_infer_types(); diff --git a/ngraph/core/src/op/sink.cpp b/ngraph/core/src/op/sink.cpp index a1ed861d5f6226..567f7faa439862 100644 --- a/ngraph/core/src/op/sink.cpp +++ b/ngraph/core/src/op/sink.cpp @@ -6,6 +6,6 @@ using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::Sink, "Sink", 0); +OPENVINO_RTTI_DEFINITION(op::Sink, "Sink", 0); -op::Sink::~Sink() {} +op::Sink::~Sink() = default; diff --git a/ngraph/core/src/op/softmax.cpp b/ngraph/core/src/op/softmax.cpp index 057725b414f202..d7c2858a0de1cd 100644 --- a/ngraph/core/src/op/softmax.cpp +++ b/ngraph/core/src/op/softmax.cpp @@ -46,7 +46,7 @@ bool evaluate_softmax(const HostTensorPtr& arg, const HostTensorPtr& out, const } // namespace // *** SOFTMAX OP SET V1 *** -NGRAPH_RTTI_DEFINITION(op::v1::Softmax, "Softmax", 1); +OPENVINO_RTTI_DEFINITION(op::v1::Softmax, "Softmax", 1); op::v1::Softmax::Softmax(const Output& arg, const size_t axis) : Op({arg}), m_axis(axis) { constructor_validate_and_infer_types(); diff --git a/ngraph/core/src/op/softplus.cpp b/ngraph/core/src/op/softplus.cpp index 778b438015435a..411a5d7b8a1910 100644 --- a/ngraph/core/src/op/softplus.cpp +++ b/ngraph/core/src/op/softplus.cpp @@ -14,7 +14,7 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::v4::SoftPlus, "SoftPlus", 4); +OPENVINO_RTTI_DEFINITION(op::v4::SoftPlus, "SoftPlus", 4); op::v4::SoftPlus::SoftPlus(const Output& arg) : Op({arg}) { constructor_validate_and_infer_types(); diff --git a/ngraph/core/src/op/space_to_batch.cpp b/ngraph/core/src/op/space_to_batch.cpp index f380887b5cb4f5..0bd707e689c0dc 100644 --- a/ngraph/core/src/op/space_to_batch.cpp +++ b/ngraph/core/src/op/space_to_batch.cpp @@ -21,7 +21,7 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::v1::SpaceToBatch, "SpaceToBatch", 1); +OPENVINO_RTTI_DEFINITION(op::v1::SpaceToBatch, "SpaceToBatch", 1); ngraph::op::v1::SpaceToBatch::SpaceToBatch(const ngraph::Output& data, const ngraph::Output& block_shape, diff --git a/ngraph/core/src/op/space_to_depth.cpp b/ngraph/core/src/op/space_to_depth.cpp index c9bd7a92b9e665..f4a3436f4d860c 100644 --- a/ngraph/core/src/op/space_to_depth.cpp +++ b/ngraph/core/src/op/space_to_depth.cpp @@ -17,16 +17,16 @@ using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::SpaceToDepth, "SpaceToDepth", 0); +OPENVINO_RTTI_DEFINITION(ov::op::v0::SpaceToDepth, "SpaceToDepth", 0); -op::SpaceToDepth::SpaceToDepth(const Output& data, const SpaceToDepthMode& mode, size_t block_size) +ov::op::v0::SpaceToDepth::SpaceToDepth(const Output& data, const SpaceToDepthMode& mode, size_t block_size) : Op({data}), m_blocksize(block_size), m_mode(mode) { constructor_validate_and_infer_types(); } -op::SpaceToDepth::SpaceToDepth(const Output& data, const std::string& mode, size_t block_size) +ov::op::v0::SpaceToDepth::SpaceToDepth(const Output& data, const std::string& mode, size_t block_size) : SpaceToDepth(data, as_enum(mode), block_size) {} bool ngraph::op::v0::SpaceToDepth::visit_attributes(AttributeVisitor& visitor) { @@ -36,7 +36,7 @@ bool ngraph::op::v0::SpaceToDepth::visit_attributes(AttributeVisitor& visitor) { return true; } -std::shared_ptr op::SpaceToDepth::clone_with_new_inputs(const OutputVector& new_args) const { +std::shared_ptr ov::op::v0::SpaceToDepth::clone_with_new_inputs(const OutputVector& new_args) const { NGRAPH_OP_SCOPE(v0_SpaceToDepth_clone_with_new_inputs); if (new_args.size() != 1) { throw ngraph_error("Incorrect number of new arguments"); @@ -88,7 +88,7 @@ void ngraph::op::v0::SpaceToDepth::validate_and_infer_types() { bool evaluate_space_to_depth(const HostTensorVector& outputs, const HostTensorVector& inputs, const std::size_t block_size, - const op::SpaceToDepth::SpaceToDepthMode mode) { + const ov::op::v0::SpaceToDepth::SpaceToDepthMode mode) { const auto& in = inputs[0]; const auto& out = outputs[0]; size_t elem_size = in->get_element_type().size(); @@ -116,7 +116,7 @@ bool ngraph::op::v0::SpaceToDepth::has_evaluate() const { return !get_input_partial_shape(0).is_dynamic(); } -std::ostream& ngraph::operator<<(std::ostream& s, const op::v0::SpaceToDepth::SpaceToDepthMode& type) { +std::ostream& ov::operator<<(std::ostream& s, const op::v0::SpaceToDepth::SpaceToDepthMode& type) { return s << as_string(type); } diff --git a/ngraph/core/src/op/split.cpp b/ngraph/core/src/op/split.cpp index f1aaba1057861b..2d70a0be5f1d8e 100644 --- a/ngraph/core/src/op/split.cpp +++ b/ngraph/core/src/op/split.cpp @@ -18,7 +18,7 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::v1::Split, "Split", 1); +OPENVINO_RTTI_DEFINITION(op::v1::Split, "Split", 1); op::v1::Split::Split(const Output& data, const Output& axis, const size_t num_splits) : Op({data, axis}), diff --git a/ngraph/core/src/op/sqrt.cpp b/ngraph/core/src/op/sqrt.cpp index 32533220c5b630..dec1c8622a935b 100644 --- a/ngraph/core/src/op/sqrt.cpp +++ b/ngraph/core/src/op/sqrt.cpp @@ -13,7 +13,7 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::v0::Sqrt, "Sqrt", 0, util::UnaryElementwiseArithmetic); +OPENVINO_RTTI_DEFINITION(op::v0::Sqrt, "Sqrt", 0, util::UnaryElementwiseArithmetic); op::Sqrt::Sqrt(const Output& arg) : UnaryElementwiseArithmetic(arg) { constructor_validate_and_infer_types(); diff --git a/ngraph/core/src/op/squared_difference.cpp b/ngraph/core/src/op/squared_difference.cpp index f8667e0fcb29e3..d9141388a99118 100644 --- a/ngraph/core/src/op/squared_difference.cpp +++ b/ngraph/core/src/op/squared_difference.cpp @@ -7,21 +7,20 @@ #include "itt.hpp" using namespace std; -using namespace ngraph; // ------------------------------ v0 ------------------------------------------- -NGRAPH_RTTI_DEFINITION(op::SquaredDifference, "SquaredDifference", 0, util::BinaryElementwiseArithmetic); +OPENVINO_RTTI_DEFINITION(ov::op::v0::SquaredDifference, "SquaredDifference", 0, util::BinaryElementwiseArithmetic); -op::SquaredDifference::SquaredDifference(const Output& arg0, - const Output& arg1, - const AutoBroadcastSpec& auto_broadcast) +ov::op::v0::SquaredDifference::SquaredDifference(const Output& arg0, + const Output& arg1, + const AutoBroadcastSpec& auto_broadcast) : BinaryElementwiseArithmetic(arg0, arg1, auto_broadcast) { constructor_validate_and_infer_types(); } -shared_ptr op::SquaredDifference::clone_with_new_inputs(const OutputVector& new_args) const { +shared_ptr ov::op::v0::SquaredDifference::clone_with_new_inputs(const OutputVector& new_args) const { NGRAPH_OP_SCOPE(v0_SquaredDifference_clone_with_new_inputs); check_new_args_count(this, new_args); - return make_shared(new_args.at(0), new_args.at(1), this->get_autob()); + return make_shared(new_args.at(0), new_args.at(1), this->get_autob()); } diff --git a/ngraph/core/src/op/squeeze.cpp b/ngraph/core/src/op/squeeze.cpp index d837b661635cd0..0d04d6a08bd3b6 100644 --- a/ngraph/core/src/op/squeeze.cpp +++ b/ngraph/core/src/op/squeeze.cpp @@ -19,7 +19,7 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::v0::Squeeze, "Squeeze", 0); +OPENVINO_RTTI_DEFINITION(op::v0::Squeeze, "Squeeze", 0); op::Squeeze::Squeeze() : Op() {} @@ -268,8 +268,8 @@ bool op::v0::Squeeze::constant_fold(OutputVector& output_values, const OutputVec const auto& shape = get_output_shape(0); - if (auto data_const = std::dynamic_pointer_cast(inputs_values[0].get_node_shared_ptr())) { - output_values[0] = std::make_shared(*data_const, shape); + if (auto data_const = std::dynamic_pointer_cast(inputs_values[0].get_node_shared_ptr())) { + output_values[0] = std::make_shared(*data_const, shape); return true; } return false; diff --git a/ngraph/core/src/op/strided_slice.cpp b/ngraph/core/src/op/strided_slice.cpp index 6ae268a122dba7..c7ef03d52ec9b2 100644 --- a/ngraph/core/src/op/strided_slice.cpp +++ b/ngraph/core/src/op/strided_slice.cpp @@ -23,7 +23,7 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::v1::StridedSlice, "StridedSlice", 1); +OPENVINO_RTTI_DEFINITION(op::v1::StridedSlice, "StridedSlice", 1); op::v1::StridedSlice::StridedSlice(const Output& data, const Output& begin, diff --git a/ngraph/core/src/op/subtract.cpp b/ngraph/core/src/op/subtract.cpp index d7dbfd0182e26a..3554ab1d61eee6 100644 --- a/ngraph/core/src/op/subtract.cpp +++ b/ngraph/core/src/op/subtract.cpp @@ -51,7 +51,7 @@ bool evaluate_subtract(const HostTensorPtr& arg0, // ------------------------------- v1 ------------------------------------------ -NGRAPH_RTTI_DEFINITION(op::v1::Subtract, "Subtract", 1, util::BinaryElementwiseArithmetic); +OPENVINO_RTTI_DEFINITION(op::v1::Subtract, "Subtract", 1, util::BinaryElementwiseArithmetic); op::v1::Subtract::Subtract(const Output& arg0, const Output& arg1, const AutoBroadcastSpec& auto_broadcast) : BinaryElementwiseArithmetic(arg0, arg1, auto_broadcast) { diff --git a/ngraph/core/src/op/swish.cpp b/ngraph/core/src/op/swish.cpp index 4a990cd760931b..6f08453cff915f 100644 --- a/ngraph/core/src/op/swish.cpp +++ b/ngraph/core/src/op/swish.cpp @@ -15,7 +15,7 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::v4::Swish, "Swish", 4); +OPENVINO_RTTI_DEFINITION(op::v4::Swish, "Swish", 4); op::v4::Swish::Swish(const Output& arg) : Op({arg}) { constructor_validate_and_infer_types(); diff --git a/ngraph/core/src/op/tan.cpp b/ngraph/core/src/op/tan.cpp index cc59868935d778..5c23321fe5855d 100644 --- a/ngraph/core/src/op/tan.cpp +++ b/ngraph/core/src/op/tan.cpp @@ -14,7 +14,7 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::v0::Tan, "Tan", 0, util::UnaryElementwiseArithmetic); +OPENVINO_RTTI_DEFINITION(op::v0::Tan, "Tan", 0, util::UnaryElementwiseArithmetic); op::Tan::Tan(const Output& arg) : UnaryElementwiseArithmetic(arg) { constructor_validate_and_infer_types(); diff --git a/ngraph/core/src/op/tanh.cpp b/ngraph/core/src/op/tanh.cpp index 743dc5dfae3d12..e638c97f51ed6a 100644 --- a/ngraph/core/src/op/tanh.cpp +++ b/ngraph/core/src/op/tanh.cpp @@ -13,7 +13,7 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::v0::Tanh, "Tanh", 0, op::util::UnaryElementwiseArithmetic); +OPENVINO_RTTI_DEFINITION(op::v0::Tanh, "Tanh", 0, op::util::UnaryElementwiseArithmetic); op::Tanh::Tanh(const Output& arg) : UnaryElementwiseArithmetic(arg) { constructor_validate_and_infer_types(); diff --git a/ngraph/core/src/op/tile.cpp b/ngraph/core/src/op/tile.cpp index 2d55b04641db8e..210018d06a63bb 100644 --- a/ngraph/core/src/op/tile.cpp +++ b/ngraph/core/src/op/tile.cpp @@ -13,7 +13,7 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::v0::Tile, "Tile", 0); +OPENVINO_RTTI_DEFINITION(op::v0::Tile, "Tile", 0); op::v0::Tile::Tile(const Output& data, const Output& repeats) : Op({data, repeats}) { constructor_validate_and_infer_types(); diff --git a/ngraph/core/src/op/topk.cpp b/ngraph/core/src/op/topk.cpp index e11457eca43df5..ecbc42f5e43e35 100644 --- a/ngraph/core/src/op/topk.cpp +++ b/ngraph/core/src/op/topk.cpp @@ -136,7 +136,7 @@ size_t read_k_from_host_tensor(const HostTensorPtr& arg_k) { } // namespace topk // v1 version starts -NGRAPH_RTTI_DEFINITION(op::v1::TopK, "TopK", 1); +OPENVINO_RTTI_DEFINITION(op::v1::TopK, "TopK", 1); static const std::uint64_t UNKNOWN_NORMALIZED_AXIS = std::numeric_limits::max(); @@ -196,7 +196,7 @@ void op::v1::TopK::validate_and_infer_types() { "Index element type attribute should be either \'i32\' or \'i64\'. Got: ", m_index_element_type); - if (op::is_constant(input_value(1).get_node())) { + if (ov::op::util::is_constant(input_value(1).get_node())) { // Check k value read_k_from_constant_node(input_value(1).get_node_shared_ptr(), get_input_element_type(1)); } @@ -281,7 +281,7 @@ size_t op::v1::TopK::read_k_from_constant_node(const shared_ptr& node, k_element_type, ")."); - const auto k_constant = ov::as_type_ptr(node); + const auto k_constant = ov::as_type_ptr(node); size_t k = 0; @@ -303,7 +303,7 @@ size_t op::v1::TopK::read_k_from_constant_node(const shared_ptr& node, } template -size_t op::v1::TopK::validate_and_get_k(const shared_ptr& k_constant) const { +size_t op::v1::TopK::validate_and_get_k(const shared_ptr& k_constant) const { const auto k_const_contents = k_constant->get_vector(); NODE_VALIDATION_CHECK(this, @@ -334,7 +334,7 @@ shared_ptr op::v1::TopK::clone_with_new_inputs(const OutputVector& new_arg size_t op::v1::TopK::get_k() const { size_t k = 0; - if (op::is_constant(input_value(1).get_node())) { + if (op::util::is_constant(input_value(1).get_node())) { k = read_k_from_constant_node(input_value(1).get_node_shared_ptr(), get_input_element_type(1)); } @@ -345,7 +345,7 @@ size_t op::v1::TopK::get_k() const { } void op::v1::TopK::set_k(size_t k) { - this->input(1).replace_source_output(op::Constant::create(element::i64, Shape{}, {k})->output(0)); + this->input(1).replace_source_output(op::v0::Constant::create(element::i64, Shape{}, {k})->output(0)); } bool op::v1::TopK::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { @@ -358,7 +358,7 @@ bool op::v1::TopK::evaluate(const HostTensorVector& outputs, const HostTensorVec // 2. get value of k - from constant node or from HT size_t k = 0; - if (op::is_constant(input_value(1).get_node())) { + if (op::util::is_constant(input_value(1).get_node())) { k = read_k_from_constant_node(input_value(1).get_node_shared_ptr(), get_input_element_type(1)); NGRAPH_CHECK(k <= arg_shape[axis], "'K' exceeds the dimension of top_k_axis"); } else { @@ -400,7 +400,7 @@ bool op::v1::TopK::has_evaluate() const { return false; } - if (op::is_constant(input_value(1).get_node())) { + if (op::util::is_constant(input_value(1).get_node())) { switch (get_input_element_type(1)) { case ngraph::element::i8: case ngraph::element::i32: @@ -429,7 +429,7 @@ bool op::v1::TopK::has_evaluate() const { } // v3 version starts -constexpr NodeTypeInfo op::v3::TopK::type_info; +OPENVINO_RTTI_DEFINITION(op::v3::TopK, "TopK", 3); op::v3::TopK::TopK(const Output& data, const Output& k, @@ -471,7 +471,7 @@ void op::v3::TopK::validate_and_infer_types() { size_t op::v3::TopK::read_k_from_constant_node(const shared_ptr& node, const element::Type& k_element_type) const { - const auto k_constant = ov::as_type_ptr(node); + const auto k_constant = ov::as_type_ptr(node); size_t k = 0; @@ -536,7 +536,7 @@ bool op::v3::TopK::has_evaluate() const { return false; } - if (op::is_constant(input_value(1).get_node())) { + if (op::util::is_constant(input_value(1).get_node())) { switch (get_input_element_type(1)) { case ngraph::element::i8: case ngraph::element::i32: diff --git a/ngraph/core/src/op/transpose.cpp b/ngraph/core/src/op/transpose.cpp index e0c9bd37a74afc..2b68e94bd3f02c 100644 --- a/ngraph/core/src/op/transpose.cpp +++ b/ngraph/core/src/op/transpose.cpp @@ -12,7 +12,7 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::v1::Transpose, "Transpose", 1); +OPENVINO_RTTI_DEFINITION(op::v1::Transpose, "Transpose", 1); op::v1::Transpose::Transpose(const Output& arg, const Output& input_order) : Op({arg, input_order}) { constructor_validate_and_infer_types(); diff --git a/ngraph/core/src/op/type_relaxed.cpp b/ngraph/core/src/op/type_relaxed.cpp index 6f5881d070d141..cc3bf7253ec108 100644 --- a/ngraph/core/src/op/type_relaxed.cpp +++ b/ngraph/core/src/op/type_relaxed.cpp @@ -10,7 +10,6 @@ namespace ngraph { namespace op { -TypeRelaxedBase::~TypeRelaxedBase() {} - +TypeRelaxedBase::~TypeRelaxedBase() = default; } // namespace op } // namespace ngraph diff --git a/ngraph/core/src/op/unsqueeze.cpp b/ngraph/core/src/op/unsqueeze.cpp index cec998664d3537..5d186ba587358c 100644 --- a/ngraph/core/src/op/unsqueeze.cpp +++ b/ngraph/core/src/op/unsqueeze.cpp @@ -19,7 +19,7 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::v0::Unsqueeze, "Unsqueeze", 0); +OPENVINO_RTTI_DEFINITION(op::v0::Unsqueeze, "Unsqueeze", 0); op::v0::Unsqueeze::Unsqueeze(const Output& data, const Output& axes) : Op({data, axes}) { constructor_validate_and_infer_types(); @@ -166,8 +166,8 @@ bool op::v0::Unsqueeze::constant_fold(OutputVector& output_values, const OutputV const auto& shape = get_output_shape(0); - if (auto data_const = std::dynamic_pointer_cast(inputs_values[0].get_node_shared_ptr())) { - output_values[0] = std::make_shared(*data_const, shape); + if (auto data_const = std::dynamic_pointer_cast(inputs_values[0].get_node_shared_ptr())) { + output_values[0] = std::make_shared(*data_const, shape); return true; } return false; diff --git a/ngraph/core/src/op/util/scatter_base.cpp b/ngraph/core/src/op/util/scatter_base.cpp index 7a13b0801c2b22..219fecb832cd35 100644 --- a/ngraph/core/src/op/util/scatter_base.cpp +++ b/ngraph/core/src/op/util/scatter_base.cpp @@ -11,7 +11,7 @@ using namespace std; -constexpr ov::NodeTypeInfo ov::op::util::ScatterBase::type_info; +OPENVINO_RTTI_DEFINITION(ov::op::util::ScatterBase, "ScatterBase", 0); ov::op::util::ScatterBase::ScatterBase(const Output& data, const Output& indices, diff --git a/ngraph/core/src/op/util/scatter_nd_base.cpp b/ngraph/core/src/op/util/scatter_nd_base.cpp index 3fd67100bbd1c5..0c2b6a2f52e5ca 100644 --- a/ngraph/core/src/op/util/scatter_nd_base.cpp +++ b/ngraph/core/src/op/util/scatter_nd_base.cpp @@ -10,7 +10,7 @@ using namespace std; -constexpr ov::NodeTypeInfo ov::op::util::ScatterNDBase::type_info; +OPENVINO_RTTI_DEFINITION(ov::op::util::ScatterNDBase, "ScatterNDBase", 0); constexpr int ov::op::util::ScatterNDBase::INPUTS; constexpr int ov::op::util::ScatterNDBase::INDICES; constexpr int ov::op::util::ScatterNDBase::UPDATES; diff --git a/ngraph/core/src/op/variadic_split.cpp b/ngraph/core/src/op/variadic_split.cpp index 6482f7d4f6bbd5..2dd5882614669e 100644 --- a/ngraph/core/src/op/variadic_split.cpp +++ b/ngraph/core/src/op/variadic_split.cpp @@ -13,7 +13,7 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::v1::VariadicSplit, "VariadicSplit", 1); +OPENVINO_RTTI_DEFINITION(op::v1::VariadicSplit, "VariadicSplit", 1); op::v1::VariadicSplit::VariadicSplit(const Output& data, const Output& axis, diff --git a/ngraph/core/src/op/xor.cpp b/ngraph/core/src/op/xor.cpp index 00610fc9cebf01..3277da8011626c 100644 --- a/ngraph/core/src/op/xor.cpp +++ b/ngraph/core/src/op/xor.cpp @@ -12,7 +12,7 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::v1::LogicalXor, "LogicalXor", 1, util::BinaryElementwiseLogical); +OPENVINO_RTTI_DEFINITION(op::v1::LogicalXor, "LogicalXor", 1, util::BinaryElementwiseLogical); op::v1::LogicalXor::LogicalXor(const Output& arg0, const Output& arg1, @@ -75,7 +75,7 @@ bool op::v1::LogicalXor::has_evaluate() const { return false; } -constexpr NodeTypeInfo op::v0::Xor::type_info; +OPENVINO_RTTI_DEFINITION(op::v0::Xor, "Xor", 0, util::BinaryElementwiseLogical); op::v0::Xor::Xor(const Output& arg0, const Output& arg1, const AutoBroadcastSpec& auto_broadcast) : BinaryElementwiseLogical(arg0, arg1, auto_broadcast) {