From 2c97cbd72cf08f2fc5f7c98ce43578d9bbadd575 Mon Sep 17 00:00:00 2001 From: Ilya Churaev Date: Thu, 2 Sep 2021 15:14:15 +0300 Subject: [PATCH] Moved operations G-L to ov namespace --- ngraph/core/include/ngraph/graph_util.hpp | 9 +- ngraph/core/include/ngraph/node.hpp | 9 +- ngraph/core/include/ngraph/op/gather.hpp | 66 +-- .../include/ngraph/op/gather_elements.hpp | 27 +- ngraph/core/include/ngraph/op/gather_nd.hpp | 28 +- ngraph/core/include/ngraph/op/gather_tree.hpp | 26 +- ngraph/core/include/ngraph/op/gelu.hpp | 70 +-- ngraph/core/include/ngraph/op/greater.hpp | 21 +- ngraph/core/include/ngraph/op/greater_eq.hpp | 21 +- ngraph/core/include/ngraph/op/grn.hpp | 27 +- ngraph/core/include/ngraph/op/group_conv.hpp | 261 +----------- ngraph/core/include/ngraph/op/gru_cell.hpp | 147 +------ .../core/include/ngraph/op/gru_sequence.hpp | 37 +- .../core/include/ngraph/op/hard_sigmoid.hpp | 23 +- ngraph/core/include/ngraph/op/hsigmoid.hpp | 22 +- ngraph/core/include/ngraph/op/hswish.hpp | 22 +- ngraph/core/include/ngraph/op/idft.hpp | 25 +- ngraph/core/include/ngraph/op/if.hpp | 80 +--- ngraph/core/include/ngraph/op/interpolate.hpp | 348 +-------------- ngraph/core/include/ngraph/op/less.hpp | 21 +- ngraph/core/include/ngraph/op/less_eq.hpp | 22 +- ngraph/core/include/ngraph/op/log.hpp | 18 +- ngraph/core/include/ngraph/op/log_softmax.hpp | 31 +- ngraph/core/include/ngraph/op/loop.hpp | 76 +--- ngraph/core/include/ngraph/op/lrn.hpp | 66 +-- ngraph/core/include/ngraph/op/lstm_cell.hpp | 381 +---------------- .../core/include/ngraph/op/lstm_sequence.hpp | 174 +------- ngraph/core/include/ngraph/op/parameter.hpp | 68 +-- ngraph/core/include/ngraph/op/result.hpp | 52 +-- .../include/ngraph/op/tensor_iterator.hpp | 27 +- ngraph/core/include/openvino/core/node.hpp | 14 +- ngraph/core/include/openvino/op/gather.hpp | 80 ++++ .../include/openvino/op/gather_elements.hpp | 39 ++ ngraph/core/include/openvino/op/gather_nd.hpp | 40 ++ .../core/include/openvino/op/gather_tree.hpp | 38 ++ ngraph/core/include/openvino/op/gelu.hpp | 81 ++++ ngraph/core/include/openvino/op/greater.hpp | 33 ++ .../core/include/openvino/op/greater_eq.hpp | 33 ++ ngraph/core/include/openvino/op/grn.hpp | 41 ++ .../core/include/openvino/op/group_conv.hpp | 273 ++++++++++++ ngraph/core/include/openvino/op/gru_cell.hpp | 160 +++++++ .../core/include/openvino/op/gru_sequence.hpp | 54 +++ .../core/include/openvino/op/hard_sigmoid.hpp | 35 ++ ngraph/core/include/openvino/op/hsigmoid.hpp | 35 ++ ngraph/core/include/openvino/op/hswish.hpp | 35 ++ ngraph/core/include/openvino/op/idft.hpp | 41 ++ ngraph/core/include/openvino/op/if.hpp | 94 +++++ .../core/include/openvino/op/interpolate.hpp | 360 ++++++++++++++++ ngraph/core/include/openvino/op/less.hpp | 33 ++ ngraph/core/include/openvino/op/less_eq.hpp | 34 ++ ngraph/core/include/openvino/op/log.hpp | 30 ++ .../core/include/openvino/op/log_softmax.hpp | 43 ++ ngraph/core/include/openvino/op/loop.hpp | 90 ++++ ngraph/core/include/openvino/op/lrn.hpp | 78 ++++ ngraph/core/include/openvino/op/lstm_cell.hpp | 397 ++++++++++++++++++ .../include/openvino/op/lstm_sequence.hpp | 196 +++++++++ ngraph/core/include/openvino/op/parameter.hpp | 78 ++++ ngraph/core/include/openvino/op/result.hpp | 61 +++ .../include/openvino/op/tensor_iterator.hpp | 43 ++ ngraph/core/src/op/gather.cpp | 6 +- ngraph/core/src/op/gather_elements.cpp | 2 +- ngraph/core/src/op/gather_nd.cpp | 2 +- ngraph/core/src/op/gather_tree.cpp | 2 +- ngraph/core/src/op/gelu.cpp | 8 +- ngraph/core/src/op/greater.cpp | 2 +- ngraph/core/src/op/greater_eq.cpp | 2 +- ngraph/core/src/op/grn.cpp | 2 +- ngraph/core/src/op/group_conv.cpp | 10 +- ngraph/core/src/op/gru_cell.cpp | 8 +- ngraph/core/src/op/gru_sequence.cpp | 2 +- ngraph/core/src/op/hard_sigmoid.cpp | 6 +- ngraph/core/src/op/hsigmoid.cpp | 2 +- ngraph/core/src/op/hswish.cpp | 2 +- ngraph/core/src/op/idft.cpp | 2 +- ngraph/core/src/op/if.cpp | 18 +- ngraph/core/src/op/interpolate.cpp | 18 +- ngraph/core/src/op/less.cpp | 2 +- ngraph/core/src/op/less_eq.cpp | 2 +- ngraph/core/src/op/log.cpp | 2 +- ngraph/core/src/op/log_softmax.cpp | 2 +- ngraph/core/src/op/loop.cpp | 8 +- ngraph/core/src/op/lrn.cpp | 6 +- ngraph/core/src/op/lstm_cell.cpp | 20 +- ngraph/core/src/op/lstm_sequence.cpp | 4 +- ngraph/core/src/op/parameter.cpp | 2 +- ngraph/core/src/op/result.cpp | 2 +- ngraph/core/src/op/tensor_iterator.cpp | 2 +- 87 files changed, 2716 insertions(+), 2204 deletions(-) create mode 100644 ngraph/core/include/openvino/op/gather.hpp create mode 100644 ngraph/core/include/openvino/op/gather_elements.hpp create mode 100644 ngraph/core/include/openvino/op/gather_nd.hpp create mode 100644 ngraph/core/include/openvino/op/gather_tree.hpp create mode 100644 ngraph/core/include/openvino/op/gelu.hpp create mode 100644 ngraph/core/include/openvino/op/greater.hpp create mode 100644 ngraph/core/include/openvino/op/greater_eq.hpp create mode 100644 ngraph/core/include/openvino/op/grn.hpp create mode 100644 ngraph/core/include/openvino/op/group_conv.hpp create mode 100644 ngraph/core/include/openvino/op/gru_cell.hpp create mode 100644 ngraph/core/include/openvino/op/gru_sequence.hpp create mode 100644 ngraph/core/include/openvino/op/hard_sigmoid.hpp create mode 100644 ngraph/core/include/openvino/op/hsigmoid.hpp create mode 100644 ngraph/core/include/openvino/op/hswish.hpp create mode 100644 ngraph/core/include/openvino/op/idft.hpp create mode 100644 ngraph/core/include/openvino/op/if.hpp create mode 100644 ngraph/core/include/openvino/op/interpolate.hpp create mode 100644 ngraph/core/include/openvino/op/less.hpp create mode 100644 ngraph/core/include/openvino/op/less_eq.hpp create mode 100644 ngraph/core/include/openvino/op/log.hpp create mode 100644 ngraph/core/include/openvino/op/log_softmax.hpp create mode 100644 ngraph/core/include/openvino/op/loop.hpp create mode 100644 ngraph/core/include/openvino/op/lrn.hpp create mode 100644 ngraph/core/include/openvino/op/lstm_cell.hpp create mode 100644 ngraph/core/include/openvino/op/lstm_sequence.hpp create mode 100644 ngraph/core/include/openvino/op/parameter.hpp create mode 100644 ngraph/core/include/openvino/op/result.hpp create mode 100644 ngraph/core/include/openvino/op/tensor_iterator.hpp diff --git a/ngraph/core/include/ngraph/graph_util.hpp b/ngraph/core/include/ngraph/graph_util.hpp index 0fc8ae0221b986..7a3900dd10ff64 100644 --- a/ngraph/core/include/ngraph/graph_util.hpp +++ b/ngraph/core/include/ngraph/graph_util.hpp @@ -18,11 +18,18 @@ #include "ngraph/function.hpp" #include "ngraph/node.hpp" +namespace ov { +namespace op { +namespace v0 { +class Parameter; +} +} // namespace op +} // namespace ov namespace ngraph { namespace op { namespace v0 { -class Parameter; +using ov::op::v0::Parameter; } } // namespace op diff --git a/ngraph/core/include/ngraph/node.hpp b/ngraph/core/include/ngraph/node.hpp index 8d4259f8c34722..5910b2b90f5765 100644 --- a/ngraph/core/include/ngraph/node.hpp +++ b/ngraph/core/include/ngraph/node.hpp @@ -38,6 +38,13 @@ #include "ngraph/variant.hpp" #include "openvino/core/node.hpp" +namespace ov { +namespace op { +namespace v0 { +class Result; +} +} // namespace op +} // namespace ov namespace ngraph { using ov::Node; @@ -52,7 +59,7 @@ using HostTensorVector = std::vector; namespace op { namespace v0 { -class Result; +using ov::op::v0::Result; } } // namespace op diff --git a/ngraph/core/include/ngraph/op/gather.hpp b/ngraph/core/include/ngraph/op/gather.hpp index c81397fcf120f9..31f2e1d3ee7428 100644 --- a/ngraph/core/include/ngraph/op/gather.hpp +++ b/ngraph/core/include/ngraph/op/gather.hpp @@ -5,76 +5,18 @@ #pragma once #include "ngraph/op/util/gather_base.hpp" +#include "openvino/op/gather.hpp" namespace ngraph { namespace op { namespace v1 { -/// \brief Gather slices from axis of data according to indices -class NGRAPH_API Gather : public op::util::GatherBase { -public: - NGRAPH_RTTI_DECLARATION; - static const int64_t AXIS_NOT_SET_VALUE = std::numeric_limits::max(); - Gather() = default; - /// \param data The tensor from which slices are gathered - /// \param indices Tensor with indexes to gather - /// \param axis The tensor is a dimension index to gather data from - Gather(const Output& params, const Output& indices, const Output& axis); - - bool visit_attributes(AttributeVisitor& visitor) override; - int64_t get_axis() const override; - - std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; -}; +using ov::op::v1::Gather; } // namespace v1 - namespace v7 { -/// \brief Gather slices from axis of data according to indices -class NGRAPH_API Gather : public op::util::GatherBase { -public: - NGRAPH_RTTI_DECLARATION; - Gather() = default; - - /// \param data The tensor from which slices are gathered - /// \param indices Tensor with indexes to gather - /// \param axis The tensor is a dimension index to gather data from - /// \param batch_dims The number of batch dimension in data and indices tensors. - /// If batch_dims = 0 Gather v7 is identical to Gather v1. - Gather(const Output& data, - const Output& indices, - const Output& axis, - const int64_t batch_dims = 0); - - bool visit_attributes(AttributeVisitor& visitor) override; - void validate_and_infer_types() override; - int64_t get_batch_dims() const; - - std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; -}; +using ov::op::v7::Gather; } // namespace v7 - namespace v8 { -/// \brief Gather slices from axis of data according to indices. Negative indices -/// are supported and indicate reverse indexing from the end -class NGRAPH_API Gather : public op::util::GatherBase { -public: - NGRAPH_RTTI_DECLARATION; - Gather() = default; - - /// \param data The tensor from which slices are gathered - /// \param indices Tensor with indexes to gather - /// \param axis The tensor is a dimension index to gather data from - /// \param batch_dims The number of batch dimension in data and indices tensors. - Gather(const Output& data, - const Output& indices, - const Output& axis, - const int64_t batch_dims = 0); - - bool visit_attributes(AttributeVisitor& visitor) override; - void validate_and_infer_types() override; - int64_t get_batch_dims() const; - - std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; -}; +using ov::op::v8::Gather; } // namespace v8 } // namespace op } // namespace ngraph diff --git a/ngraph/core/include/ngraph/op/gather_elements.hpp b/ngraph/core/include/ngraph/op/gather_elements.hpp index 36c36caeec4f54..9dbfa1c1644299 100644 --- a/ngraph/core/include/ngraph/op/gather_elements.hpp +++ b/ngraph/core/include/ngraph/op/gather_elements.hpp @@ -5,35 +5,12 @@ #pragma once #include "ngraph/op/op.hpp" +#include "openvino/op/gather_elements.hpp" namespace ngraph { namespace op { namespace v6 { -/// \brief GatherElements operation -/// -class NGRAPH_API GatherElements : public Op { -public: - NGRAPH_RTTI_DECLARATION; - GatherElements() = default; - - /// \brief Constructs a GatherElements operation. - /// - /// \param data Node producing data that are gathered - /// \param indices Node producing indices by which the operation gathers elements - /// \param axis specifies axis along which indices are specified - GatherElements(const Output& data, const Output& indices, const int64_t axis); - - void validate_and_infer_types() override; - bool visit_attributes(AttributeVisitor& visitor) override; - std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - int64_t get_axis() const { - return m_axis; - } - -private: - int64_t m_axis; -}; +using ov::op::v6::GatherElements; } // namespace v6 } // namespace op } // namespace ngraph diff --git a/ngraph/core/include/ngraph/op/gather_nd.hpp b/ngraph/core/include/ngraph/op/gather_nd.hpp index fffcc96e653bf1..9689be8b854b0b 100644 --- a/ngraph/core/include/ngraph/op/gather_nd.hpp +++ b/ngraph/core/include/ngraph/op/gather_nd.hpp @@ -5,36 +5,12 @@ #pragma once #include "ngraph/op/op.hpp" +#include "openvino/op/gather_nd.hpp" namespace ngraph { namespace op { namespace v5 { -/// \brief GatherND operation -/// -class NGRAPH_API GatherND : public Op { -public: - NGRAPH_RTTI_DECLARATION; - GatherND() = default; - - /// \brief Constructs a GatherND operation. - /// - /// \param data Node producing data that are gathered - /// \param indices Node producing indices by which the operation gathers elements - /// or slices from data - /// \param batch_dims Specifies a number of batch dimensions - GatherND(const Output& data, const Output& indices, const size_t batch_dims = 0); - - void validate_and_infer_types() override; - bool visit_attributes(AttributeVisitor& visitor) override; - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - size_t get_batch_dims() const { - return m_batch_dims; - } - -private: - size_t m_batch_dims; -}; +using ov::op::v5::GatherND; } // namespace v5 } // namespace op } // namespace ngraph diff --git a/ngraph/core/include/ngraph/op/gather_tree.hpp b/ngraph/core/include/ngraph/op/gather_tree.hpp index 792531f03e2d6a..9f015b9bcd11ea 100644 --- a/ngraph/core/include/ngraph/op/gather_tree.hpp +++ b/ngraph/core/include/ngraph/op/gather_tree.hpp @@ -5,34 +5,12 @@ #pragma once #include "ngraph/op/op.hpp" +#include "openvino/op/gather_tree.hpp" namespace ngraph { namespace op { namespace v1 { -/// \brief Generates the complete beams from the ids per each step and the parent beam -/// ids. -class NGRAPH_API GatherTree : public Op { -public: - NGRAPH_RTTI_DECLARATION; - - GatherTree() = default; - /// \param step_ids Tensor of shape [MAX_TIME, BATCH_SIZE, BEAM_WIDTH] with - /// indices from per each step - /// \param parent_idx Tensor of shape [MAX_TIME, BATCH_SIZE, BEAM_WIDTH] with - /// parent beam indices - /// \param max_seq_len Tensor of shape [BATCH_SIZE] with maximum lengths for each - /// sequence in the batch - /// \param end_token Tensor of shape [MAX_TIME, BATCH_SIZE, BEAM_WIDTH] - GatherTree(const Output& step_ids, - const Output& parent_idx, - const Output& max_seq_len, - const Output& end_token); - - bool visit_attributes(AttributeVisitor& visitor) override; - void validate_and_infer_types() override; - - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; -}; +using ov::op::v1::GatherTree; } // namespace v1 } // namespace op } // namespace ngraph diff --git a/ngraph/core/include/ngraph/op/gelu.hpp b/ngraph/core/include/ngraph/op/gelu.hpp index e662bada82f9a1..08b0e813205af9 100644 --- a/ngraph/core/include/ngraph/op/gelu.hpp +++ b/ngraph/core/include/ngraph/op/gelu.hpp @@ -7,81 +7,19 @@ #include "ngraph/node.hpp" #include "ngraph/op/op.hpp" #include "ngraph/op/util/unary_elementwise_arithmetic.hpp" +#include "openvino/op/gelu.hpp" namespace ngraph { namespace op { namespace v0 { -/// \brief Gaussian Error Linear Unit -/// f(x) = 0.5 * x * (1 + erf( x / sqrt(2) ) -class NGRAPH_API Gelu : public Op { -public: - NGRAPH_RTTI_DECLARATION; - - Gelu(); - /// \brief Constructs a Gelu operation. - /// - /// \param data Input tensor - Gelu(const Output& data); - - bool visit_attributes(AttributeVisitor& visitor) override; - - void validate_and_infer_types() override; - - std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; -}; +using ov::op::v0::Gelu; } // namespace v0 using v0::Gelu; -/// \brief Specifies the approximation to calculate Gelu -enum class GeluApproximationMode { TANH, ERF }; -NGRAPH_API std::ostream& operator<<(std::ostream& s, const GeluApproximationMode& type); +using ov::op::GeluApproximationMode; namespace v7 { -/// \brief Gaussian Error Linear Unit -/// f(x) = 0.5 * x * (1 + erf( x / sqrt(2) ) for "approximation" = "erf" -/// f(x) = 0.5 * x * (1 + tanh([sqrt(2 / pi)] * [x + 0.044715^3]) for "approximation" = -/// "tanh" -class NGRAPH_API Gelu : public util::UnaryElementwiseArithmetic { -public: - NGRAPH_RTTI_DECLARATION; - - Gelu() = default; - /// \brief Constructs a Gelu operation. - /// - /// \param data Input tensor - /// \param mode Approximation mode - Gelu(const Output& data, GeluApproximationMode mode = GeluApproximationMode::ERF); - - bool visit_attributes(AttributeVisitor& visitor) override; - - void validate_and_infer_types() override; - - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; - - std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - GeluApproximationMode get_approximation_mode() const; - -private: - GeluApproximationMode m_approximation_mode = GeluApproximationMode::ERF; -}; +using ov::op::v7::Gelu; } // namespace v7 } // namespace op } // namespace ngraph - -namespace ov { -template <> -class NGRAPH_API AttributeAdapter - : public EnumAttributeAdapterBase { -public: - AttributeAdapter(ngraph::op::GeluApproximationMode& value) - : EnumAttributeAdapterBase(value) {} - - static constexpr DiscreteTypeInfo type_info{"AttributeAdapter", 0}; - const DiscreteTypeInfo& get_type_info() const override { - return type_info; - } -}; - -} // namespace ov diff --git a/ngraph/core/include/ngraph/op/greater.hpp b/ngraph/core/include/ngraph/op/greater.hpp index b89089fbb5fd34..1302e88d867920 100644 --- a/ngraph/core/include/ngraph/op/greater.hpp +++ b/ngraph/core/include/ngraph/op/greater.hpp @@ -5,29 +5,12 @@ #pragma once #include "ngraph/op/util/binary_elementwise_comparison.hpp" +#include "openvino/op/greater.hpp" namespace ngraph { namespace op { namespace v1 { -/// \brief Elementwise greater-than operation. -class NGRAPH_API Greater : public util::BinaryElementwiseComparison { -public: - NGRAPH_RTTI_DECLARATION; - /// \brief Constructs a greater-than operation. - Greater() : util::BinaryElementwiseComparison(AutoBroadcastSpec::NUMPY) {} - /// \brief Constructs a greater-than operation. - /// - /// \param arg0 Node that produces the first input tensor. - /// \param arg1 Node that produces the second input tensor. - /// \param auto_broadcast Auto broadcast specification - Greater(const Output& arg0, - const Output& arg1, - const AutoBroadcastSpec& auto_broadcast = AutoBroadcastSpec(AutoBroadcastType::NUMPY)); - - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; -}; +using ov::op::v1::Greater; } // namespace v1 } // namespace op } // namespace ngraph diff --git a/ngraph/core/include/ngraph/op/greater_eq.hpp b/ngraph/core/include/ngraph/op/greater_eq.hpp index f9e9fc61a583a7..061628e004fedf 100644 --- a/ngraph/core/include/ngraph/op/greater_eq.hpp +++ b/ngraph/core/include/ngraph/op/greater_eq.hpp @@ -5,29 +5,12 @@ #pragma once #include "ngraph/op/util/binary_elementwise_comparison.hpp" +#include "openvino/op/greater_eq.hpp" namespace ngraph { namespace op { namespace v1 { -/// \brief Elementwise greater-than-or-equal operation. -class NGRAPH_API GreaterEqual : public util::BinaryElementwiseComparison { -public: - NGRAPH_RTTI_DECLARATION; - /// \brief Constructs a greater-than-or-equal operation. - GreaterEqual() : util::BinaryElementwiseComparison(AutoBroadcastSpec::NUMPY) {} - /// \brief Constructs a greater-than-or-equal operation. - /// - /// \param arg0 Node that produces the first input tensor. - /// \param arg1 Node that produces the second input tensor. - /// \param auto_broadcast Auto broadcast specification - GreaterEqual(const Output& arg0, - const Output& arg1, - const AutoBroadcastSpec& auto_broadcast = AutoBroadcastSpec(AutoBroadcastType::NUMPY)); - - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; -}; +using ov::op::v1::GreaterEqual; } // namespace v1 } // namespace op } // namespace ngraph diff --git a/ngraph/core/include/ngraph/op/grn.hpp b/ngraph/core/include/ngraph/op/grn.hpp index 58471d0b882619..03133d31f05d71 100644 --- a/ngraph/core/include/ngraph/op/grn.hpp +++ b/ngraph/core/include/ngraph/op/grn.hpp @@ -7,35 +7,12 @@ #include #include "ngraph/op/op.hpp" +#include "openvino/op/grn.hpp" namespace ngraph { namespace op { namespace v0 { -/// \brief Global Response Normalization with L2 norm (across channels only). -/// -class NGRAPH_API GRN : public Op { -public: - NGRAPH_RTTI_DECLARATION; - - GRN() = default; - /// \brief Constructs a GRN operation. - /// - /// \param data - Node producing the input tensor - /// \param bias - The bias added to the variance. - /// - GRN(const Output& data, float bias); - - void validate_and_infer_types() override; - bool visit_attributes(AttributeVisitor& visitor) override; - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - float get_bias() const { - return m_bias; - } - -protected: - float m_bias = 1.0f; -}; +using ov::op::v0::GRN; } // namespace v0 using v0::GRN; } // namespace op diff --git a/ngraph/core/include/ngraph/op/group_conv.hpp b/ngraph/core/include/ngraph/op/group_conv.hpp index 42d79149c87b2b..352093535e89a7 100644 --- a/ngraph/core/include/ngraph/op/group_conv.hpp +++ b/ngraph/core/include/ngraph/op/group_conv.hpp @@ -7,268 +7,13 @@ #include "ngraph/op/convolution.hpp" #include "ngraph/op/op.hpp" #include "ngraph/op/util/attr_types.hpp" +#include "openvino/op/group_conv.hpp" namespace ngraph { namespace op { namespace v1 { -/// \brief Batched convolution operation, with optional window dilation and stride. -class NGRAPH_API GroupConvolution : public Op { -public: - NGRAPH_RTTI_DECLARATION; - - /// \brief Constructs a batched convolution operation. - GroupConvolution() = default; - /// \brief Constructs a batched convolution operation. - /// - /// \param data_batch The node producing the input data batch tensor.
- /// `[N, C_IN, D1, ... Df]` - /// \param filters The node producing the filters tensor.
- /// `[GROUPS, FC_OUT, FC_IN, F1, ... Ff]` - /// \param strides The strides.
- /// `[f]` - /// \param dilations The dilations.
- /// `[f]` - /// \param pads_begin The beginning of padding shape.
- /// `[f]` - /// \param pads_end The end of padding shape.
- /// `[f]` - /// \param auto_pad The pad type for automatically computing padding sizes.
- /// `[f]` - /// - /// Output `[N, FC_OUT * GROUPS, R1, ... Rf]` - /// - GroupConvolution(const Output& data_batch, - const Output& filters, - const Strides& strides, - const CoordinateDiff& pads_begin, - const CoordinateDiff& pads_end, - const Strides& dilations, - const PadType& auto_pad = PadType::EXPLICIT); - - bool visit_attributes(AttributeVisitor& visitor) override; - void validate_and_infer_types() override; - - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - /// \return The strides. - const Strides& get_strides() const { - return m_strides; - } - void set_strides(const Strides& strides) { - m_strides = strides; - } - /// \return The dilations. - const Strides& get_dilations() const { - return m_dilations; - } - void set_dilations(const Strides& dilations) { - m_dilations = dilations; - } - /// \return The padding-below sizes (possibly negative). - const CoordinateDiff& get_pads_begin() const { - return m_pads_begin; - } - void set_pads_begin(const CoordinateDiff& pads_begin) { - m_pads_begin = pads_begin; - } - /// \return The padding-above sizes (possibly negative). - const CoordinateDiff& get_pads_end() const { - return m_pads_end; - } - void set_adding_above(const CoordinateDiff& pads_end) { - m_pads_end = pads_end; - } - /// \return The pad type for convolution. - const PadType& get_auto_pad() const { - return m_auto_pad; - } - void set_auto_pad(const PadType& auto_pad) { - m_auto_pad = auto_pad; - } - /// \return The default value for Convolution. - NGRAPH_SUPPRESS_DEPRECATED_START - virtual std::shared_ptr get_default_value() const override; - NGRAPH_SUPPRESS_DEPRECATED_END - -protected: - Strides m_strides; - Strides m_dilations; - CoordinateDiff m_pads_begin; - CoordinateDiff m_pads_end; - PadType m_auto_pad; -}; - -/// \brief Data batch backprop for batched convolution operation. -class NGRAPH_API GroupConvolutionBackpropData : public Op { -public: - NGRAPH_RTTI_DECLARATION; - - /// \brief Constructs a batched-convolution data batch-backprop operation. - GroupConvolutionBackpropData(); - // clang-format off - // - // \brief Constructs a batched-convolution data batch-backprop operation. - // - // \param data The node producing data from forward-prop. Shape: [N, - // C_INPUT * GROUPS, X1, ..., XD]. - // \param filter The node producing the filter from forward-prop. Shape: - // [GROUPS, C_INPUT, C_OUTPUT, K_D, ..., K_1] - // \param output_shape The shape of the data batch from forward-prop. It's size - // should be equal to number of data spatial dimensions. - // \param strides The strides from forward-prop. - // \param pads_begin The padding-below sizes from forward-prop. - // \param pads_end The padding-above sizes from forward-prop. - // \param dilations The dilations from forward-prop. - // \param auto_pad The pad type for automatically computing padding sizes. - // \param output_padding The output padding adds additional amount of paddings per - // each spatial axis in the output tensor. - // - // clang-format on - // - GroupConvolutionBackpropData(const Output& data, - const Output& filter, - const Output& output_shape, - const Strides& strides, - const CoordinateDiff& pads_begin, - const CoordinateDiff& pads_end, - const Strides& dilations, - const PadType& auto_pad = PadType::EXPLICIT, - const CoordinateDiff& output_padding = {}); - - // clang-format off - // - // \brief Constructs a batched-convolution data batch-backprop operation. - // - // \param data The node producing data from forward-prop. Shape: [N, - // C_INPUT * GROUPS, X1, ..., XD]. - // \param filter The node producing the filter from forward-prop. Shape: - // [GROUPS, C_INPUT, C_OUTPUT, K_D, ..., K_1] - // \param output_shape The shape of the data batch from forward-prop. It's size - // should be equal to number of data spatial dimensions. - // \param strides The strides from forward-prop. - // \param dilations The dilations from forward-prop. - // \param auto_pad The pad type for automatically computing padding sizes. - // \param output_padding The output padding adds additional amount of paddings per - // each spatial axis in the output tensor. - // - // clang-format on - // - GroupConvolutionBackpropData(const Output& data, - const Output& filter, - const Output& output_shape, - const Strides& strides, - const Strides& dilations, - const PadType& auto_pad, - const CoordinateDiff& output_padding = {}); - - // clang-format off - // - // \brief Constructs a batched-convolution data batch-backprop operation. - // - // \param data The node producing data from forward-prop. Shape: - // [N, C_INPUT * GROUPS, X1, ..., XD]. - // \param filter The node producing the filter from forward-prop. Shape: - // [GROUPS, C_INPUT, C_OUTPUT, K_D, ..., K_1] - // \param strides The strides from forward-prop. - // \param pads_begin The padding-below sizes from forward-prop. - // \param pads_end The padding-above sizes from forward-prop. - // \param dilations The dilations from forward-prop. - // \param auto_pad The pad type for automatically computing padding sizes. - // \param output_padding The output padding adds additional amount of paddings per - // each spatial axis in the output tensor. - // - // clang-format on - GroupConvolutionBackpropData(const Output& data, - const Output& filter, - const Strides& strides, - const CoordinateDiff& pads_begin, - const CoordinateDiff& pads_end, - const Strides& dilations, - const PadType& auto_pad = PadType::EXPLICIT, - const CoordinateDiff& output_padding = {}); - /// - /// \brief Calculates output spatial features size. - /// - /// \param[in] input_data_shape The input data partial shape - /// \param[in] filters_shape The filters partial shape - /// \param[in] strides The strides values. - /// \param[in] dilations The dilations values. - /// \param[in] pads_begin The paddings at the beginning of axis. - /// \param[in] pads_end The paddings at the end of axis. - /// \param[in] output_padding The output padding values. - /// \param output_spatial_shape The placeholder for computed output spatial - /// partial - /// shape. - /// - void infer_conv_backprop_output_spatial_shape(const std::vector& input_data_shape, - const std::vector& filters_shape, - const Strides& strides, - const Strides& dilations, - const CoordinateDiff& pads_begin, - const CoordinateDiff& pads_end, - const CoordinateDiff& output_padding, - std::vector& output_spatial_shape); - - bool visit_attributes(AttributeVisitor& visitor) override; - virtual bool is_dynamic() const override; - void validate_and_infer_types() override; - - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - /// \return The spatial shape of the output. - const PartialShape get_convolution_output_shape() const; - void set_output_shape(const Shape& output_shape); - /// \return The strides from the forward prop. - const Strides& get_strides() const { - return m_strides; - } - void set_strides(const Strides& strides) { - m_strides = strides; - } - /// \return The dilations from the forward prop. - const Strides& get_dilations() const { - return m_dilations; - } - void set_dilations(const Strides& dilations) { - m_dilations = dilations; - } - /// \return The number of pixels to add to the beginning along each axis. - const CoordinateDiff& get_pads_begin() const { - return m_pads_begin; - } - void set_pads_begin(const CoordinateDiff& pads_begin) { - m_pads_begin = pads_begin; - } - /// \return The number of pixels to add to the ending along each axis. - const CoordinateDiff& get_pads_end() const { - return m_pads_end; - } - void set_pads_end(const CoordinateDiff& pads_end) { - m_pads_end = pads_end; - } - /// \return The auto pad. - const PadType& get_auto_pad() const { - return m_auto_pad; - } - void set_auto_pad(const PadType& auto_pad) { - m_auto_pad = auto_pad; - } - /// \return The output padding. - const CoordinateDiff& get_output_padding() const { - return m_output_padding; - } - void set_output_padding(const CoordinateDiff& output_padding) { - m_output_padding = output_padding; - } - -protected: - Strides m_strides; - Strides m_dilations; - CoordinateDiff m_pads_begin; - CoordinateDiff m_pads_end; - PadType m_auto_pad; - CoordinateDiff m_output_padding; -}; - +using ov::op::v1::GroupConvolution; +using ov::op::v1::GroupConvolutionBackpropData; } // namespace v1 } // namespace op } // namespace ngraph diff --git a/ngraph/core/include/ngraph/op/gru_cell.hpp b/ngraph/core/include/ngraph/op/gru_cell.hpp index 3fa8e4d6c40757..ed5c7e532a21ee 100644 --- a/ngraph/core/include/ngraph/op/gru_cell.hpp +++ b/ngraph/core/include/ngraph/op/gru_cell.hpp @@ -13,155 +13,12 @@ #include "ngraph/op/op.hpp" #include "ngraph/op/util/activation_functions.hpp" #include "ngraph/op/util/rnn_cell_base.hpp" +#include "openvino/op/gru_cell.hpp" namespace ngraph { namespace op { namespace v3 { -/// -/// \brief Class for GRU cell node. -/// -/// \note It follows notation and equations defined as in ONNX standard: -/// https://github.com/onnx/onnx/blob/master/docs/Operators.md#GRU -/// -/// Note this class represents only single *cell* and not whole GRU *layer*. -/// -class NGRAPH_API GRUCell : public util::RNNCellBase { -public: - static constexpr NodeTypeInfo type_info{"GRUCell", 3}; - const NodeTypeInfo& get_type_info() const override { - return type_info; - } - GRUCell(); - /// - /// \brief Constructs GRUCell node. - /// - /// \param[in] X The input tensor with shape: [batch_size, - /// input_size]. - /// \param[in] initial_hidden_state The hidden state tensor at current time step - /// with shape: [batch_size, hidden_size]. - /// \param[in] W The weight tensor with shape: - /// [gates_count * hidden_size, input_size]. - /// \param[in] R The recurrence weight tensor with shape: - /// [gates_count * hidden_size, hidden_size]. - /// \param[in] hidden_size The number of hidden units for recurrent cell. - /// - GRUCell(const Output& X, - const Output& initial_hidden_state, - const Output& W, - const Output& R, - std::size_t hidden_size); - - /// - /// \brief Constructs GRUCell node. - /// - /// \param[in] X The input tensor with shape: [batch_size, - /// input_size]. - /// \param[in] initial_hidden_state The hidden state tensor at current time step - /// with shape: [batch_size, hidden_size]. - /// \param[in] W The weight tensor with shape: - /// [gates_count * hidden_size, input_size]. - /// \param[in] R The recurrence weight tensor with shape: - /// [gates_count * hidden_size, hidden_size]. - /// \param[in] hidden_size The number of hidden units for recurrent cell. - /// \param[in] activations The vector of activation functions used inside - /// recurrent cell. - /// \param[in] activations_alpha The vector of alpha parameters for activation - /// functions in order respective to activation - /// list. - /// \param[in] activations_beta The vector of beta parameters for activation - /// functions in order respective to activation - /// list. - /// \param[in] clip The value defining clipping range [-clip, - /// clip] on input of activation functions. - /// - GRUCell(const Output& X, - const Output& initial_hidden_state, - const Output& W, - const Output& R, - std::size_t hidden_size, - const std::vector& activations, - const std::vector& activations_alpha, - const std::vector& activations_beta, - float clip, - bool linear_before_reset); - - /// - /// \brief Constructs GRUCell node. - /// - /// \param[in] X The input tensor with shape: [batch_size, - /// input_size]. - /// \param[in] initial_hidden_state The hidden state tensor at current time step - /// with shape: [batch_size, hidden_size]. - /// \param[in] W The weight tensor with shape: [gates_count * - /// hidden_size, input_size]. - /// \param[in] R The recurrence weight tensor with shape: - /// [gates_count * hidden_size, hidden_size]. - /// \param[in] hidden_size The number of hidden units for recurrent cell. - /// \param[in] B The sum of biases (weight and recurrence) for - /// update, reset and hidden gates. - /// If linear_before_reset := true then biases for - /// hidden gates are - /// placed separately (weight and recurrence). - /// Shape: [gates_count * hidden_size] if - /// linear_before_reset := false - /// Shape: [(gates_count + 1) * hidden_size] if - /// linear_before_reset := true - /// \param[in] activations The vector of activation functions used inside - /// recurrent cell. - /// \param[in] activations_alpha The vector of alpha parameters for activation - /// functions in order respective to activation - /// list. - /// \param[in] activations_beta The vector of beta parameters for activation - /// functions in order respective to activation - /// list. - /// \param[in] clip The value defining clipping range [-clip, - /// clip] on input of activation functions. - /// \param[in] linear_before_reset Whether or not to apply the linear - /// transformation before multiplying by the - /// output of the reset gate. - /// - GRUCell(const Output& X, - const Output& initial_hidden_state, - const Output& W, - const Output& R, - const Output& B, - std::size_t hidden_size, - const std::vector& activations = std::vector{"sigmoid", "tanh"}, - const std::vector& activations_alpha = {}, - const std::vector& activations_beta = {}, - float clip = 0.f, - bool linear_before_reset = false); - - virtual void validate_and_infer_types() override; - bool visit_attributes(AttributeVisitor& visitor) override; - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - bool get_linear_before_reset() const { - return m_linear_before_reset; - } - -private: - /// brief Add and initialize bias input to all zeros. - void add_default_bias_input(); - - /// - /// \brief The Activation function f. - /// - util::ActivationFunction m_activation_f; - /// - /// \brief The Activation function g. - /// - util::ActivationFunction m_activation_g; - - static constexpr std::size_t s_gates_count{3}; - /// - /// \brief Control whether or not apply the linear transformation. - /// - /// \note The linear transformation may be applied when computing the output of - /// hidden gate. It's done before multiplying by the output of the reset gate. - /// - bool m_linear_before_reset; -}; +using ov::op::v3::GRUCell; } // namespace v3 } // namespace op } // namespace ngraph diff --git a/ngraph/core/include/ngraph/op/gru_sequence.hpp b/ngraph/core/include/ngraph/op/gru_sequence.hpp index f6e80740fccb6c..f5b5487331664d 100644 --- a/ngraph/core/include/ngraph/op/gru_sequence.hpp +++ b/ngraph/core/include/ngraph/op/gru_sequence.hpp @@ -10,45 +10,12 @@ #include "ngraph/op/op.hpp" #include "ngraph/op/util/rnn_cell_base.hpp" +#include "openvino/op/gru_sequence.hpp" namespace ngraph { namespace op { namespace v5 { -class NGRAPH_API GRUSequence : public util::RNNCellBase { -public: - NGRAPH_RTTI_DECLARATION; - GRUSequence(); - - GRUSequence(const Output& X, - const Output& H_t, - const Output& sequence_lengths, - const Output& W, - const Output& R, - const Output& B, - size_t hidden_size, - op::RecurrentSequenceDirection direction, - const std::vector& activations = std::vector{"sigmoid", "tanh"}, - const std::vector& activations_alpha = {}, - const std::vector& activations_beta = {}, - float clip = 0.f, - bool linear_before_reset = false); - - std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - void validate_and_infer_types() override; - - bool visit_attributes(AttributeVisitor& visitor) override; - bool get_linear_before_reset() const { - return m_linear_before_reset; - } - op::RecurrentSequenceDirection get_direction() const { - return m_direction; - } - -protected: - op::RecurrentSequenceDirection m_direction; - bool m_linear_before_reset; -}; +using ov::op::v5::GRUSequence; } // namespace v5 } // namespace op } // namespace ngraph diff --git a/ngraph/core/include/ngraph/op/hard_sigmoid.hpp b/ngraph/core/include/ngraph/op/hard_sigmoid.hpp index 1a6c56d2fe19fb..03b8a0e72daa3b 100644 --- a/ngraph/core/include/ngraph/op/hard_sigmoid.hpp +++ b/ngraph/core/include/ngraph/op/hard_sigmoid.hpp @@ -6,31 +6,12 @@ #include "ngraph/node.hpp" #include "ngraph/op/op.hpp" +#include "openvino/op/hard_sigmoid.hpp" namespace ngraph { namespace op { namespace v0 { -/// \brief Parameterized, bounded sigmoid-like, piecewise linear -/// function. min(max(alpha*x + beta, 0), 1) -/// -class NGRAPH_API HardSigmoid : public Op { -public: - NGRAPH_RTTI_DECLARATION; - - HardSigmoid(); - - /// \brief Constructs a HardSigmoid operation. - /// - /// \param data Input tensor. - /// \param[in] alpha A scalar value representing the alpha parameter. - /// \param[in] beta A scalar value representing the beta parameter. - /// - HardSigmoid(const Output& data, const Output& alpha, const Output& beta); - - bool visit_attributes(AttributeVisitor& visitor) override; - virtual void validate_and_infer_types() override; - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; -}; +using ov::op::v0::HardSigmoid; } // namespace v0 using v0::HardSigmoid; } // namespace op diff --git a/ngraph/core/include/ngraph/op/hsigmoid.hpp b/ngraph/core/include/ngraph/op/hsigmoid.hpp index 8913c6e809d9b8..8bd8dddb4b5613 100644 --- a/ngraph/core/include/ngraph/op/hsigmoid.hpp +++ b/ngraph/core/include/ngraph/op/hsigmoid.hpp @@ -7,30 +7,12 @@ #include "ngraph/node.hpp" #include "ngraph/op/op.hpp" #include "ngraph/op/util/unary_elementwise_arithmetic.hpp" +#include "openvino/op/hsigmoid.hpp" namespace ngraph { namespace op { namespace v5 { -/// \brief A HSigmoid Activation Function -/// f(x) = min(max(x + 3, 0), 6) / 6 or -/// f(x) = min(ReLU(x + 3), 6) / 6 -/// -class NGRAPH_API HSigmoid : public ngraph::op::util::UnaryElementwiseArithmetic { -public: - NGRAPH_RTTI_DECLARATION; - HSigmoid() = default; - - /// \brief Constructs a HSigmoid operation. - /// - /// \param data Input tensor - HSigmoid(const Output& arg); - - bool visit_attributes(AttributeVisitor& visitor) override; - - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; -}; +using ov::op::v5::HSigmoid; } // namespace v5 } // namespace op } // namespace ngraph diff --git a/ngraph/core/include/ngraph/op/hswish.hpp b/ngraph/core/include/ngraph/op/hswish.hpp index ce469d5508cb4a..60aa9fb4dbf1bf 100644 --- a/ngraph/core/include/ngraph/op/hswish.hpp +++ b/ngraph/core/include/ngraph/op/hswish.hpp @@ -7,30 +7,12 @@ #include "ngraph/node.hpp" #include "ngraph/op/op.hpp" #include "ngraph/op/util/unary_elementwise_arithmetic.hpp" +#include "openvino/op/hswish.hpp" namespace ngraph { namespace op { namespace v4 { -/// \brief A HSwish Activation Function -/// f(x) = x * min(max(x + 3, 0), 6) / 6 or -/// f(x) = x * min(ReLU(x + 3), 6) / 6 -/// -class NGRAPH_API HSwish : public ngraph::op::util::UnaryElementwiseArithmetic { -public: - NGRAPH_RTTI_DECLARATION; - HSwish() = default; - - /// \brief Constructs a HSwish (hard version of Swish) operation. - /// - /// \param data Input tensor - HSwish(const Output& arg); - - bool visit_attributes(AttributeVisitor& visitor) override; - - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; -}; +using ov::op::v4::HSwish; } // namespace v4 } // namespace op } // namespace ngraph diff --git a/ngraph/core/include/ngraph/op/idft.hpp b/ngraph/core/include/ngraph/op/idft.hpp index 7955cbcad829cf..bddd5873814c20 100644 --- a/ngraph/core/include/ngraph/op/idft.hpp +++ b/ngraph/core/include/ngraph/op/idft.hpp @@ -11,33 +11,12 @@ #include "ngraph/op/op.hpp" #include "ngraph/op/util/attr_types.hpp" #include "ngraph/op/util/fft_base.hpp" +#include "openvino/op/idft.hpp" namespace ngraph { namespace op { namespace v7 { -/// \brief An operation IDFT that computes the inverse discrete Fourier transformation. -class NGRAPH_API IDFT : public util::FFTBase { -public: - NGRAPH_RTTI_DECLARATION; - IDFT() = default; - - /// \brief Constructs a IDFT operation. IDFT is performed for full size axes. - /// - /// \param data Input data - /// \param axes Axes to perform IDFT - IDFT(const Output& data, const Output& axes); - - /// \brief Constructs a IDFT operation. - /// - /// \param data Input data - /// \param axes Axes to perform IDFT - /// \param signal_size Signal sizes for 'axes' - IDFT(const Output& data, const Output& axes, const Output& signal_size); - - bool visit_attributes(AttributeVisitor& visitor) override; - - std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; -}; +using ov::op::v7::IDFT; } // namespace v7 } // namespace op } // namespace ngraph diff --git a/ngraph/core/include/ngraph/op/if.hpp b/ngraph/core/include/ngraph/op/if.hpp index 32ed1d5b8467c1..f0cea6373ef9ce 100644 --- a/ngraph/core/include/ngraph/op/if.hpp +++ b/ngraph/core/include/ngraph/op/if.hpp @@ -9,86 +9,12 @@ #include "ngraph/function.hpp" #include "ngraph/op/parameter.hpp" #include "ngraph/op/util/multi_subgraph_base.hpp" +#include "openvino/op/if.hpp" namespace ngraph { namespace op { namespace v8 { -/// \brief If operation. -class NGRAPH_API If : public util::MultiSubGraphOp { -public: - enum BodyIndexes { THEN_BODY_INDEX = 0, ELSE_BODY_INDEX = 1 }; - - NGRAPH_RTTI_DECLARATION; - bool visit_attributes(AttributeVisitor& visitor) override; - - /// \brief Constructs If with condition - /// - /// \param execution_condition condition node. - If(const Output& execution_condition); - If(); - - std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - /// \brief gets then_body as ngraph::Function. - /// - /// \return then_body as ngraph::Function. - const std::shared_ptr& get_then_body() const { - return m_bodies[THEN_BODY_INDEX]; - } - - /// \brief gets else_body as ngraph::Function. - /// - /// \return else_body as ngraph::Function. - const std::shared_ptr& get_else_body() const { - return m_bodies[ELSE_BODY_INDEX]; - } - - /// \brief sets new ngraph::Function as new then_body. - /// - /// \param body new body for 'then' branch. - void set_then_body(const std::shared_ptr& body) { - m_bodies[THEN_BODY_INDEX] = body; - } - - /// \brief sets new ngraph::Function as new else_body. - /// - /// \param body new body for 'else' branch. - void set_else_body(const std::shared_ptr& body) { - m_bodies[ELSE_BODY_INDEX] = body; - } - - /// \brief sets new input to the operation associated with parameters - /// of each sub-graphs - /// - /// \param value input to operation - /// \param then_parameter parameter for then_body or nullptr - /// \param else_parameter parameter for else_body or nullpt - void set_input(const Output& value, - const std::shared_ptr& then_parameter, - const std::shared_ptr& else_parameter); - - /// \brief sets new output from the operation associated with results - /// of each sub-graphs - /// - /// \param then_result result from then_body - /// \param else_parameter result from else_body - /// \return output from operation - Output set_output(const std::shared_ptr& then_result, const std::shared_ptr& else_result); - - void validate_and_infer_types() override; - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - - bool has_evaluate() const override; - -private: - using OutputMap = std::map>; - - void validate_and_infer_type_body(const std::shared_ptr& body, - const ngraph::op::util::MultiSubgraphInputDescriptionVector& input_descriptors); - - OutputMap get_mapping_outputs_on_body_description( - const ngraph::op::util::MultiSubgraphOutputDescriptionVector& output_descriptors); -}; +using ov::op::v8::If; } // namespace v8 } // namespace op -} // namespace ngraph \ No newline at end of file +} // namespace ngraph diff --git a/ngraph/core/include/ngraph/op/interpolate.hpp b/ngraph/core/include/ngraph/op/interpolate.hpp index e2104a8dda778b..0c860817d758df 100644 --- a/ngraph/core/include/ngraph/op/interpolate.hpp +++ b/ngraph/core/include/ngraph/op/interpolate.hpp @@ -10,358 +10,18 @@ #include "ngraph/attribute_adapter.hpp" #include "ngraph/op/op.hpp" #include "ngraph/op/util/attr_types.hpp" +#include "openvino/op/interpolate.hpp" namespace ngraph { namespace op { namespace v0 { -/// \brief Structure that specifies attributes for interpolation -struct InterpolateAttrs { - // specify dimension indices where interpolation is applied, and `axes` is any - // unordered list of indeces of different dimensions of input tensor. Required. - AxisSet axes; - // specifies type of interpolation - // one of `nearest`, `linear`, `cubic`, `area`. Required. - std::string mode; - // a flag that specifies whether to align corners or not. - // `true` (default) means the alignment is applied, - // `false` means the alignment isn't applied. - bool align_corners = true; - // a flag that specifies whether to perform anti-aliasing. default is `false` - bool antialias = false; - // specify the number of pixels to add to the beginning of the image being - // interpolated. This addition of pixels is done before interpolation calculation. - std::vector pads_begin; - // specify the number of pixels to add to the end of the image being interpolated. - // This addition of pixels is done before interpolation calculation. - std::vector pads_end; -}; - -/// \brief Layer which performs bilinear interpolation -class NGRAPH_API Interpolate : public Op { -public: - NGRAPH_RTTI_DECLARATION; - - enum class InterpolateMode { - NEAREST, - LINEAR, - CUBIC, - AREA, - nearest NGRAPH_ENUM_DEPRECATED("Please use NEAREST instead") = NEAREST, - linear NGRAPH_ENUM_DEPRECATED("Please use LINEAR instead") = LINEAR, - cubic NGRAPH_ENUM_DEPRECATED("Please use CUBIC instead") = CUBIC, - area NGRAPH_ENUM_DEPRECATED("Please use AREA instead") = AREA - }; - - Interpolate() = default; - /// \brief Constructs a Interpolate operation - /// - /// \param image Input image - /// \param output_shape Output shape of spatial axes - /// \param attrs Interpolation attributes - Interpolate(const Output& image, const Output& output_shape, const InterpolateAttrs& attrs); - bool visit_attributes(AttributeVisitor& visitor) override; - - void validate_and_infer_types() override; - - std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - const InterpolateAttrs& get_attrs() const { - return m_attrs; - } - -private: - InterpolateAttrs m_attrs; -}; +using InterpolateAttrs = ov::op::v0::Interpolate::Attributes; +using ov::op::v0::Interpolate; } // namespace v0 - namespace v4 { -class NGRAPH_API Interpolate : public Op { -public: - NGRAPH_RTTI_DECLARATION; - - /// \brief Shape calculation mode - /// - /// sizes - output shape for interpolated axes is calculated using input `sizes` - /// scales - output shape for interpolated axes is calculated using input `scales` - enum class ShapeCalcMode { - SIZES, - SCALES, - sizes NGRAPH_ENUM_DEPRECATED("Please use SIZES instead") = SIZES, - scales NGRAPH_ENUM_DEPRECATED("Please use SCALES instead") = SCALES - }; - - /// \brief Interpolation mode - /// - /// nearest - nearest interpolation - /// linear - linear interpolation as in TensorFlow - /// linear_onnx - linear interpolation as in ONNX - /// cubic - cubic interpolation - enum class InterpolateMode { - NEAREST, - LINEAR, - LINEAR_ONNX, - CUBIC, - nearest NGRAPH_ENUM_DEPRECATED("Please use NEAREST instead") = NEAREST, - linear NGRAPH_ENUM_DEPRECATED("Please use LINEAR instead") = LINEAR, - linear_onnx NGRAPH_ENUM_DEPRECATED("Please use LINEAR_ONNX instead") = LINEAR_ONNX, - cubic NGRAPH_ENUM_DEPRECATED("Please use CUBIC instead") = CUBIC - }; - - /// \brief Mode of the calculation of the source coordinate from resized one - /// - /// These modes are modes from ONNX runtime. - enum class CoordinateTransformMode { - HALF_PIXEL, - PYTORCH_HALF_PIXEL, - ASYMMETRIC, - TF_HALF_PIXEL_FOR_NN, - ALIGN_CORNERS, - half_pixel NGRAPH_ENUM_DEPRECATED("Please use HALF_PIXEL instead") = HALF_PIXEL, - pytorch_half_pixel NGRAPH_ENUM_DEPRECATED("Please use PYTORCH_HALF_PIXEL instead") = PYTORCH_HALF_PIXEL, - asymmetric NGRAPH_ENUM_DEPRECATED("Please use ASYMMETRIC instead") = ASYMMETRIC, - tf_half_pixel_for_nn NGRAPH_ENUM_DEPRECATED("Please use TF_HALF_PIXEL_FOR_NN instead") = TF_HALF_PIXEL_FOR_NN, - align_corners NGRAPH_ENUM_DEPRECATED("Please use ALIGN_CORNERS instead") = ALIGN_CORNERS - }; - - /// \brief Round modes for the nearest interpolation. - enum class NearestMode { - ROUND_PREFER_FLOOR, - ROUND_PREFER_CEIL, - FLOOR, - CEIL, - SIMPLE, - round_prefer_floor NGRAPH_ENUM_DEPRECATED("Please use ROUND_PREFER_FLOOR instead") = ROUND_PREFER_FLOOR, - round_prefer_ceil NGRAPH_ENUM_DEPRECATED("Please use ROUND_PREFER_CEIL instead") = ROUND_PREFER_CEIL, - floor NGRAPH_ENUM_DEPRECATED("Please use FLOOR instead") = FLOOR, - ceil NGRAPH_ENUM_DEPRECATED("Please use CEIL instead") = CEIL, - simple NGRAPH_ENUM_DEPRECATED("Please use SIMPLE instead") = SIMPLE - }; - - struct InterpolateAttrs { - // specifies type of interpolation - // one of `nearest`, `linear`, `linear_onnx`, `cubic` Required. - InterpolateMode mode = InterpolateMode::NEAREST; - // specifies shape calculation mode - // one of `sizes`, `scales` Required - ShapeCalcMode shape_calculation_mode = ShapeCalcMode::SIZES; - // specify the number of pixels to add to the beginning of the image being - // interpolated. This addition of pixels is done before interpolation - // calculation. - std::vector pads_begin; - // specify the number of pixels to add to the end of the image being - // interpolated. This addition of pixels is done before interpolation - // calculation. - std::vector pads_end; - // specifies how to transform the coordinate in the resized tensor to the - // coordinate in the original tensor. one of `half_pixel`, `pytorch_half_pixel`, - // `asymmetric`, `tf_half_pixel_for_nn`, `align_corners` - CoordinateTransformMode coordinate_transformation_mode = CoordinateTransformMode::HALF_PIXEL; - // specifies round mode when `mode == nearest` and is used only when `mode == - // nearest`. one of `round_prefer_floor`, `round_prefer_ceil`, `floor`, `ceil`, - // `simple` - NearestMode nearest_mode = NearestMode::ROUND_PREFER_FLOOR; - // a flag that specifies whether to perform anti-aliasing. default is `false` - bool antialias = false; - // specifies the parameter *a* for cubic interpolation (see, e.g. - // [article](https://ieeexplore.ieee.org/document/1163711/)). *cube_coeff* is - // used only when `mode == cubic` - double cube_coeff = -0.75f; - - InterpolateAttrs() = default; - - InterpolateAttrs(InterpolateMode mode, - ShapeCalcMode shape_calculation_mode, - const std::vector& pads_begin, - const std::vector& pads_end, - CoordinateTransformMode coordinate_transformation_mode = CoordinateTransformMode::HALF_PIXEL, - NearestMode nearest_mode = NearestMode::ROUND_PREFER_FLOOR, - bool antialias = false, - double cube_coeff = -0.75) - : mode(mode), - shape_calculation_mode(shape_calculation_mode), - pads_begin(pads_begin), - pads_end(pads_end), - coordinate_transformation_mode(coordinate_transformation_mode), - nearest_mode(nearest_mode), - antialias(antialias), - cube_coeff(cube_coeff) {} - }; - - Interpolate() = default; - /// \brief Constructs a Interpolate operation without 'axes' input. - /// - /// \param image Input image - /// \param output_shape Output shape of spatial axes - /// \param scales Scales of spatial axes, i.e. output_shape / input_shape - /// \param attrs Interpolation attributes - Interpolate(const Output& image, - const Output& output_shape, - const Output& scales, - const InterpolateAttrs& attrs); - - /// \brief Constructs a Interpolate operation with 'axes' input. - /// - /// \param image Input image - /// \param output_shape Output shape of spatial axes - /// \param scales Scales of spatial axes, i.e. output_shape / input_shape - /// \param axes Interpolation axes - /// \param attrs Interpolation attributes - Interpolate(const Output& image, - const Output& output_shape, - const Output& scales, - const Output& axes, - const InterpolateAttrs& attrs); - bool visit_attributes(AttributeVisitor& visitor) override; - - void validate_and_infer_types() override; - - std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; - - const InterpolateAttrs& get_attrs() const { - return m_attrs; - } - -protected: - /// \return The interpolation axes. - std::vector get_axes() const; - -private: - bool evaluate_interpolate(const HostTensorVector& outputs, const HostTensorVector& inputs) const; - InterpolateAttrs m_attrs; - - /// \brief Corrects pads_begin and pads_end attributes. - /// - /// \details When Interpolate-4 is a result of some transformation, it is possible - /// that pads_begin.size() != pads_end.size() or - /// pads_begin.size() != input_rank. In such case, we should correct - /// pads_begin and pads_end, using padding of pads_begin and pads_end by - /// zeros or using pads_begin[0 : input_rank], pads_end[0 : input_rank]. - /// - /// Padding of pads_begin is performed when pads_begin.size() < input_rank, - /// and pads_begin[0 : input_rank] is used when - /// pads_begin.size() < input_rank. - /// - /// Similarly for pads_end. - void correct_pads(); - - /// \brief Calculates input shape after padding. - /// - /// \param input_shape Shape of input data. - /// - /// \return Padded input shape, i.e. input_shape + pads_begin + pads_end - PartialShape get_padded_input_shape(const PartialShape& input_shape) const; - - /// \brief Infers output shape using scales. - /// - /// \param output_shape[in,out] output shape - /// \param axes Interpolation axes - /// \param scales Scales for interpolated axes - /// \param padded_input_shape input shape after padding - void infer_using_scales(PartialShape& output_shape, - const std::vector& axes, - const std::vector& scales, - const PartialShape& padded_input_shape) const; - - /// \brief Infers output shape using sizes. - /// - /// \param output_shape[in,out] output shape - /// \param axes Interpolation axes - /// \param sizes sizes for interpolated axes - void infer_using_shapes(PartialShape& output_shape, - const std::vector& axes, - const std::vector& sizes) const; -}; +using ov::op::v4::Interpolate; } // namespace v4 - using v0::Interpolate; using v0::InterpolateAttrs; } // namespace op - -//---------------------------------------- v0 -------------------------------------------------- -NGRAPH_API -std::ostream& operator<<(std::ostream& s, const op::v0::Interpolate::InterpolateMode& type); - -//---------------------------------------- v4 -------------------------------------------------- - -NGRAPH_API -std::ostream& operator<<(std::ostream& s, const op::v4::Interpolate::InterpolateMode& type); - -NGRAPH_API -std::ostream& operator<<(std::ostream& s, const op::v4::Interpolate::CoordinateTransformMode& type); - -NGRAPH_API -std::ostream& operator<<(std::ostream& s, const op::v4::Interpolate::NearestMode& type); - -NGRAPH_API -std::ostream& operator<<(std::ostream& s, const op::v4::Interpolate::ShapeCalcMode& type); - } // namespace ngraph - -namespace ov { - -template <> -class NGRAPH_API AttributeAdapter - : public EnumAttributeAdapterBase { -public: - AttributeAdapter(ngraph::op::v0::Interpolate::InterpolateMode& value) - : EnumAttributeAdapterBase(value) {} - - static constexpr DiscreteTypeInfo type_info{"AttributeAdapter", 0}; - const DiscreteTypeInfo& get_type_info() const override { - return type_info; - } -}; -template <> -class NGRAPH_API AttributeAdapter - : public EnumAttributeAdapterBase { -public: - AttributeAdapter(ngraph::op::v4::Interpolate::InterpolateMode& value) - : EnumAttributeAdapterBase(value) {} - - static constexpr DiscreteTypeInfo type_info{"AttributeAdapter", 4}; - const DiscreteTypeInfo& get_type_info() const override { - return type_info; - } -}; - -template <> -class NGRAPH_API AttributeAdapter - : public EnumAttributeAdapterBase { -public: - AttributeAdapter(ngraph::op::v4::Interpolate::CoordinateTransformMode& value) - : EnumAttributeAdapterBase(value) {} - - static constexpr DiscreteTypeInfo type_info{"AttributeAdapter", 4}; - const DiscreteTypeInfo& get_type_info() const override { - return type_info; - } -}; - -template <> -class NGRAPH_API AttributeAdapter - : public EnumAttributeAdapterBase { -public: - AttributeAdapter(ngraph::op::v4::Interpolate::NearestMode& value) - : EnumAttributeAdapterBase(value) {} - - static constexpr DiscreteTypeInfo type_info{"AttributeAdapter", 4}; - const DiscreteTypeInfo& get_type_info() const override { - return type_info; - } -}; - -template <> -class NGRAPH_API AttributeAdapter - : public EnumAttributeAdapterBase { -public: - AttributeAdapter(ngraph::op::v4::Interpolate::ShapeCalcMode& value) - : EnumAttributeAdapterBase(value) {} - - static constexpr DiscreteTypeInfo type_info{"AttributeAdapter", 4}; - const DiscreteTypeInfo& get_type_info() const override { - return type_info; - } -}; -} // namespace ov diff --git a/ngraph/core/include/ngraph/op/less.hpp b/ngraph/core/include/ngraph/op/less.hpp index 9df5e153aa38f6..340395cc1da64f 100644 --- a/ngraph/core/include/ngraph/op/less.hpp +++ b/ngraph/core/include/ngraph/op/less.hpp @@ -5,29 +5,12 @@ #pragma once #include "ngraph/op/util/binary_elementwise_comparison.hpp" +#include "openvino/op/less.hpp" namespace ngraph { namespace op { namespace v1 { -/// \brief Elementwise less-than operation. -class NGRAPH_API Less : public util::BinaryElementwiseComparison { -public: - NGRAPH_RTTI_DECLARATION; - /// \brief Constructs a less-than operation. - Less() : util::BinaryElementwiseComparison(AutoBroadcastSpec::NUMPY) {} - /// \brief Constructs a less-than operation. - /// - /// \param arg0 Node that produces the first input tensor. - /// \param arg1 Node that produces the second input tensor. - /// \param auto_broadcast Auto broadcast specification - Less(const Output& arg0, - const Output& arg1, - const AutoBroadcastSpec& auto_broadcast = AutoBroadcastSpec(AutoBroadcastType::NUMPY)); - - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; -}; +using ov::op::v1::Less; } // namespace v1 } // namespace op } // namespace ngraph diff --git a/ngraph/core/include/ngraph/op/less_eq.hpp b/ngraph/core/include/ngraph/op/less_eq.hpp index f45fb9614c335d..fe8bfdff9fa770 100644 --- a/ngraph/core/include/ngraph/op/less_eq.hpp +++ b/ngraph/core/include/ngraph/op/less_eq.hpp @@ -5,30 +5,12 @@ #pragma once #include "ngraph/op/util/binary_elementwise_comparison.hpp" +#include "openvino/op/less_eq.hpp" namespace ngraph { namespace op { namespace v1 { -/// \brief Elementwise less-than-or-equal operation. -class NGRAPH_API LessEqual : public util::BinaryElementwiseComparison { -public: - NGRAPH_RTTI_DECLARATION; - /// \brief Constructs a less-than-or-equal operation. - LessEqual() : util::BinaryElementwiseComparison(AutoBroadcastSpec::NUMPY) {} - - /// \brief Constructs a less-than-or-equal operation. - /// - /// \param arg0 Node that produces the first input tensor. - /// \param arg1 Node that produces the second input tensor. - /// \param auto_broadcast Auto broadcast specification - LessEqual(const Output& arg0, - const Output& arg1, - const AutoBroadcastSpec& auto_broadcast = AutoBroadcastSpec(AutoBroadcastType::NUMPY)); - - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; -}; +using ov::op::v1::LessEqual; } // namespace v1 } // namespace op } // namespace ngraph diff --git a/ngraph/core/include/ngraph/op/log.hpp b/ngraph/core/include/ngraph/op/log.hpp index 6fee3de27d5a49..51c9c232f05df7 100644 --- a/ngraph/core/include/ngraph/op/log.hpp +++ b/ngraph/core/include/ngraph/op/log.hpp @@ -5,26 +5,12 @@ #pragma once #include "ngraph/op/util/unary_elementwise_arithmetic.hpp" +#include "openvino/op/log.hpp" namespace ngraph { namespace op { namespace v0 { -/// \brief Elementwise natural log operation. -class NGRAPH_API Log : public util::UnaryElementwiseArithmetic { -public: - NGRAPH_RTTI_DECLARATION; - /// \brief Constructs a natural log operation. - Log() = default; - /// \brief Constructs a natural log operation. - /// - /// \param arg Node that produces the input tensor. - Log(const Output& arg); - - bool visit_attributes(AttributeVisitor& visitor) override; - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; -}; +using ov::op::v0::Log; } // namespace v0 using v0::Log; } // namespace op diff --git a/ngraph/core/include/ngraph/op/log_softmax.hpp b/ngraph/core/include/ngraph/op/log_softmax.hpp index 5a17ab4e6e65fe..c0d3a2a020de75 100644 --- a/ngraph/core/include/ngraph/op/log_softmax.hpp +++ b/ngraph/core/include/ngraph/op/log_softmax.hpp @@ -5,39 +5,12 @@ #pragma once #include "ngraph/op/op.hpp" +#include "openvino/op/log_softmax.hpp" namespace ngraph { namespace op { namespace v5 { -class NGRAPH_API LogSoftmax : public Op { -public: - NGRAPH_RTTI_DECLARATION; - LogSoftmax() = default; - /// \brief Constructs a LogSoftmax operation. - /// - /// \param arg Node that produces the first input tensor.
- /// `[d0, ...]` - /// \param axis The axis position (0-based) on which to calculate the LogSoftmax. - /// - /// Output `[d0, ...]` - /// - LogSoftmax(const Output& arg, const int64_t axis); - - bool visit_attributes(AttributeVisitor& visitor) override; - void validate_and_infer_types() override; - - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - int64_t get_axis() const { - return m_axis; - } - void set_axis(const int64_t axis) { - m_axis = axis; - } - -private: - int64_t m_axis = 1; -}; +using ov::op::v5::LogSoftmax; } // namespace v5 } // namespace op } // namespace ngraph diff --git a/ngraph/core/include/ngraph/op/loop.hpp b/ngraph/core/include/ngraph/op/loop.hpp index 8d037c9bd37391..33be140ee704f1 100644 --- a/ngraph/core/include/ngraph/op/loop.hpp +++ b/ngraph/core/include/ngraph/op/loop.hpp @@ -12,84 +12,12 @@ #include "ngraph/op/parameter.hpp" #include "ngraph/op/tensor_iterator.hpp" #include "ngraph/op/util/sub_graph_base.hpp" +#include "openvino/op/loop.hpp" namespace ngraph { namespace op { namespace v5 { -/// \brief Iterate a body over tensors, accumulating into tensors. -class NGRAPH_API Loop : public op::util::SubGraphOp { -public: - /// \brief Allows to define the purpose of inputs/outputs in the body - struct SpecialBodyPorts { - SpecialBodyPorts() = default; - SpecialBodyPorts(int64_t in_current_iteration_input_idx, int64_t in_body_condition_output_idx) - : current_iteration_input_idx(in_current_iteration_input_idx), - body_condition_output_idx(in_body_condition_output_idx) {} - // -1 means the input is not provided, this input is optional - int64_t current_iteration_input_idx = -1; - // -1 means the output is not provided, - // this output is required, throw an exception if not provided - int64_t body_condition_output_idx = -1; - }; - - NGRAPH_RTTI_DECLARATION; - - /// \brief Constructs a Loop operation. - Loop() = default; - - /// \brief Constructs a Loop operation. - /// - /// \param trip_count Node specifies the maximum number of iterations. - /// \param execution_condition Node determines whether to execute the first - /// iteration or not. - Loop(const Output& trip_count, const Output& execution_condition); - - Output get_concatenated_slices(const Output& value, - int64_t start, - int64_t stride, - int64_t part_size, - int64_t end, - int64_t axis) override; - - void set_special_body_ports(const SpecialBodyPorts& special_body_ports) { - m_special_body_ports = special_body_ports; - } - - SpecialBodyPorts get_special_body_ports() const { - return m_special_body_ports; - } - void validate_and_infer_types() override; - bool visit_attributes(AttributeVisitor& visitor) override; - std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; - -protected: - Loop(const Loop&); - -private: - void clone_to(Loop& dst, const OutputVector& new_args) const; - - SpecialBodyPorts m_special_body_ports; -}; +using ov::op::v5::Loop; } // namespace v5 } // namespace op } // namespace ngraph - -namespace ov { - -template <> -class NGRAPH_API AttributeAdapter - : public DirectValueAccessor { -public: - AttributeAdapter(ngraph::op::v5::Loop::SpecialBodyPorts& value) - : DirectValueAccessor(value) {} - - static constexpr DiscreteTypeInfo type_info{"AttributeAdapter", 0}; - const DiscreteTypeInfo& get_type_info() const override { - return type_info; - } -}; - -} // namespace ov diff --git a/ngraph/core/include/ngraph/op/lrn.hpp b/ngraph/core/include/ngraph/op/lrn.hpp index a229f9600a1268..908505b9124472 100644 --- a/ngraph/core/include/ngraph/op/lrn.hpp +++ b/ngraph/core/include/ngraph/op/lrn.hpp @@ -5,74 +5,12 @@ #pragma once #include "ngraph/op/op.hpp" +#include "openvino/op/lrn.hpp" namespace ngraph { namespace op { namespace v0 { -// clang-format off - /// \brief Elementwise Local Response Normalization (LRN) operation. - /// - /// ## Inputs - /// - /// | | Type | Description | - /// | ----- | --------------------------------------- | ----------------------------------------------- | - /// | `arg` | \f$N[n, c, d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of any shape and numeric element type. | - /// - /// ## Output - /// - /// | Type | Description | - /// | ---------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | - /// | \f$N[n, c, d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[n, c, d_1,\dots,d_n] = \frac{N[n,i,d_1,\dots,d_n]}{ (bias + alpha * (\sum_{i=max(0,(nsize-1)/2)}^{min(C, (nsize-1)/2)+1} N[n,i,d_1,\dots,d_n]^{2}) ^ {2})}\f$ | -// clang-format on -class NGRAPH_API LRN : public Op { -public: - NGRAPH_RTTI_DECLARATION; - - /// \brief Constructs a LRN operation. - LRN() = default; - /// \brief Constructs a LRN operation. - /// - /// \param arg Node that produces the input tensor. - LRN(const Output& arg, double alpha, double beta, double bias, size_t size); - - LRN(const Output& arg, const Output& axes, double alpha, double beta, double bias, size_t size); - - bool visit_attributes(AttributeVisitor& visitor) override; - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - void validate_and_infer_types() override; - - double get_alpha() const { - return m_alpha; - } - void set_alpha(double alpha) { - m_alpha = alpha; - } - double get_beta() const { - return m_beta; - } - void set_beta(double beta) { - m_beta = beta; - } - double get_bias() const { - return m_bias; - } - void set_bias(double bias) { - m_bias = bias; - } - size_t get_nsize() const { - return m_size; - } - void set_nsize(size_t size) { - m_size = size; - } - AxisSet get_reduction_axes() const; - -protected: - double m_alpha; - double m_beta; - double m_bias; - size_t m_size; -}; +using ov::op::v0::LRN; } // namespace v0 using v0::LRN; } // namespace op diff --git a/ngraph/core/include/ngraph/op/lstm_cell.hpp b/ngraph/core/include/ngraph/op/lstm_cell.hpp index 17c81965e377aa..d6a78beac7a239 100644 --- a/ngraph/core/include/ngraph/op/lstm_cell.hpp +++ b/ngraph/core/include/ngraph/op/lstm_cell.hpp @@ -13,391 +13,18 @@ #include "ngraph/op/op.hpp" #include "ngraph/op/util/activation_functions.hpp" #include "ngraph/op/util/rnn_cell_base.hpp" +#include "openvino/op/lstm_cell.hpp" namespace ngraph { namespace op { -enum class LSTMWeightsFormat { - FICO, // IE - ICOF, // PyTorch - IFCO, // DNNL, TF, MxNet - IFOC, // Caffe - IOFC, // ONNX -}; +using ov::op::LSTMWeightsFormat; namespace v0 { -/// -/// \brief Class for single lstm cell node. -/// -/// \note Following implementation supports: -/// \li \c peepholes Gers & Schmidhuber (2000) -/// https://ieeexplore.ieee.org/document/861302 -/// \li Coupling input and forget gates. -/// -/// \note It calculates following equations: -/// -/// it = f(Xt*(Wi^T) + Ht-1*(Ri^T) + Pi (.) Ct-1 + Wbi + Rbi) -/// ft = f(Xt*(Wf^T) + Ht-1*(Rf^T) + Pf (.) Ct-1 + Wbf + Rbf) -/// ct = g(Xt*(Wc^T) + Ht-1*(Rc^T) + Wbc + Rbc) -/// Ct = ft (.) Ct-1 + it (.) ct -/// ot = f(Xt*(Wo^T) + Ht-1*(Ro^T) + Po (.) Ct + Wbo + Rbo) -/// Ht = ot (.) h(Ct) -/// -/// * - Is a dot product, -/// (.) - is a Hadamard product (element-wise), -/// f, g, h - are activation functions. -/// -/// \note This class represents only single *cell* (for current time step) and not -/// the whole LSTM Sequence layer -/// -/// \sa LSTMSequence, RNNCell, GRUCell -/// -class NGRAPH_API LSTMCell : public util::RNNCellBase { -public: - NGRAPH_RTTI_DECLARATION; - - LSTMCell(); - /// - /// \brief Constructs LSTMCell node. - /// - /// \param[in] X The input tensor with shape: [batch_size, - /// input_size]. - /// \param[in] initial_hidden_state The hidden state tensor at current time step - /// with shape: [batch_size, hidden_size]. - /// \param[in] initial_cell_state The cell state tensor at current time step - /// with shape: [batch_size, hidden_size]. - /// \param[in] W The gate weights tensor with shape: - /// [4*hidden_size, input_size]. - /// \param[in] R The recurrence weights tensor with shape: - /// [4*hidden_size, hidden_size]. - /// \param[in] hidden_size The number of hidden units for recurrent cell. - /// \param[in] weights_format The order of gates in weights tensors. The - /// default format is IFCO since it is used by - /// DNNL. - /// \param[in] activations The vector of activation functions used inside - /// recurrent cell. - /// \param[in] activations_alpha The vector of alpha parameters for activation - /// functions in order respective to activation - /// list. - /// \param[in] activations_beta The vector of beta parameters for activation - /// functions in order respective to activation - /// list. - /// \param[in] clip The value defining clipping range [-clip, - /// clip] on input of activation functions. - /// \param[in] input_forget Controls coupling input and forget gates. - /// - LSTMCell(const Output& X, - const Output& initial_hidden_state, - const Output& initial_cell_state, - const Output& W, - const Output& R, - std::size_t hidden_size, - LSTMWeightsFormat weights_format = LSTMWeightsFormat::IFCO, - const std::vector& activations = std::vector{"sigmoid", "tanh", "tanh"}, - const std::vector& activations_alpha = {}, - const std::vector& activations_beta = {}, - float clip = 0.f, - bool input_forget = false); - - /// - /// \brief Constructs LSTMCell node. - /// - /// \param[in] X The input tensor with shape: [batch_size, - /// input_size]. - /// \param[in] initial_hidden_state The hidden state tensor at current time step - /// with shape: [batch_size, hidden_size]. - /// \param[in] initial_cell_state The cell state tensor at current time step - /// with shape: [batch_size, hidden_size]. - /// \param[in] W The weight tensor with shape: [4*hidden_size, - /// input_size]. - /// \param[in] R The recurrence weight tensor with shape: - /// [4*hidden_size, hidden_size]. - /// \param[in] B The bias tensor for gates with shape: - /// [4*hidden_size]. - /// \param[in] hidden_size The number of hidden units for recurrent cell. - /// \param[in] weights_format The order of gates in weights tensors. The - /// default format is IFCO since it is used by - /// DNNL. - /// \param[in] activations The vector of activation functions used inside - /// recurrent cell. - /// \param[in] activations_alpha The vector of alpha parameters for activation - /// functions in order respective to activation - /// list. - /// \param[in] activations_beta The vector of beta parameters for activation - /// functions in order respective to activation - /// list. - /// \param[in] clip The value defining clipping range [-clip, - /// clip] on input of activation functions. - /// \param[in] input_forget Controls coupling input and forget gates. - /// - LSTMCell(const Output& X, - const Output& initial_hidden_state, - const Output& initial_cell_state, - const Output& W, - const Output& R, - const Output& B, - std::size_t hidden_size, - LSTMWeightsFormat weights_format = LSTMWeightsFormat::IFCO, - const std::vector& activations = std::vector{"sigmoid", "tanh", "tanh"}, - const std::vector& activations_alpha = {}, - const std::vector& activations_beta = {}, - float clip = 0.f, - bool input_forget = false); - - /// - /// \brief Constructs LSTMCell node. - /// - /// \param[in] X The input tensor with shape: [batch_size, - /// input_size]. - /// \param[in] initial_hidden_state The hidden state tensor at current time step - /// with shape: [batch_size, hidden_size]. - /// \param[in] initial_cell_state The cell state tensor at current time step - /// with shape: [batch_size, hidden_size]. - /// \param[in] W The weight tensor with shape: [4*hidden_size, - /// input_size]. - /// \param[in] R The recurrence weight tensor with shape: - /// [4*hidden_size, hidden_size]. - /// \param[in] B The bias tensor for gates with shape: - /// [4*hidden_size]. - /// \param[in] P The weight tensor for peepholes with shape: - /// [3*hidden_size] - 3 equals to only iof gates. - /// The order is: input, output, forget gates. - /// \param[in] hidden_size The number of hidden units for recurrent cell. - /// \param[in] weights_format The order of gates in weights tensors. The - /// default format is IFCO since it is used by - /// DNNL. - /// \param[in] activations The vector of activation functions used inside - /// recurrent cell. - /// \param[in] activations_alpha The vector of alpha parameters for activation - /// functions in order respective to activation - /// list. - /// \param[in] activations_beta The vector of beta parameters for activation - /// functions in order respective to activation - /// list. - /// \param[in] clip The value defining clipping range [-clip, - /// clip] on input of activation functions. - /// \param[in] input_forget Controls coupling input and forget gates. - /// - LSTMCell(const Output& X, - const Output& initial_hidden_state, - const Output& initial_cell_state, - const Output& W, - const Output& R, - const Output& B, - const Output& P, - std::size_t hidden_size, - LSTMWeightsFormat weights_format = LSTMWeightsFormat::IFCO, - const std::vector& activations = std::vector{"sigmoid", "tanh", "tanh"}, - const std::vector& activations_alpha = {}, - const std::vector& activations_beta = {}, - float clip = 0.f, - bool input_forget = false); - - void validate_and_infer_types() override; - bool visit_attributes(AttributeVisitor& visitor) override; - std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - bool get_input_forget() const { - return m_input_forget; - } - LSTMWeightsFormat get_weights_format() const { - return m_weights_format; - } - -private: - /// - /// \brief Creates the default bias input initialized with zeros. - /// - /// \return The object of Output class. - /// - Output get_default_bias_input() const; - - /// - /// \brief Creates the default peepholes input initialized with zeros. - /// - /// \return The object of Output class. - /// - Output get_default_peepholes_input() const; - /// - /// \brief The Activation function f. - /// - util::ActivationFunction m_activation_f; - /// - /// \brief The Activation function g. - /// - util::ActivationFunction m_activation_g; - /// - /// \brief The Activation function h. - /// - util::ActivationFunction m_activation_h; - /// - /// \brief Controls whether to couple input and forget gates. - /// - bool m_input_forget = false; - - /// - /// \brief The order of gates in weights tensors. - /// - LSTMWeightsFormat m_weights_format; - - static constexpr std::size_t s_gates_count{4}; - static constexpr std::size_t s_peepholes_count{3}; -}; +using ov::op::v0::LSTMCell; } // namespace v0 namespace v4 { -/// -/// \brief Class for single lstm cell node. -/// -/// \note Following implementation supports: -/// \li \c peepholes Gers & Schmidhuber (2000) -/// https://ieeexplore.ieee.org/document/861302 -/// \li Coupling input and forget gates. -/// -/// \note It calculates following equations: -/// -/// it = f(Xt*(Wi^T) + Ht-1*(Ri^T) + Wbi + Rbi) -/// ft = f(Xt*(Wf^T) + Ht-1*(Rf^T) + Wbf + Rbf) -/// ct = g(Xt*(Wc^T) + Ht-1*(Rc^T) + Wbc + Rbc) -/// Ct = ft (.) Ct-1 + it (.) ct -/// ot = f(Xt*(Wo^T) + Ht-1*(Ro^T) + Wbo + Rbo) -/// Ht = ot (.) h(Ct) -/// -/// * - Is a dot product, -/// (.) - is a Hadamard product (element-wise), -/// f, g, h - are activation functions. -/// -/// \note This class represents only single *cell* (for current time step) and not -/// the whole LSTM Sequence layer -/// -/// \sa LSTMSequence, RNNCell, GRUCell -/// -class NGRAPH_API LSTMCell : public util::RNNCellBase { -public: - NGRAPH_RTTI_DECLARATION; - - LSTMCell(); - /// - /// \brief Constructs LSTMCell node. - /// - /// \param[in] X The input tensor with shape: [batch_size, - /// input_size]. - /// \param[in] initial_hidden_state The hidden state tensor at current time step - /// with shape: [batch_size, hidden_size]. - /// \param[in] initial_cell_state The cell state tensor at current time step - /// with shape: [batch_size, hidden_size]. - /// \param[in] W The gate weights tensor with shape: - /// [4*hidden_size, input_size]. - /// \param[in] R The recurrence weights tensor with shape: - /// [4*hidden_size, hidden_size]. - /// \param[in] hidden_size The number of hidden units for recurrent cell. - /// \param[in] activations The vector of activation functions used inside - /// recurrent cell. - /// \param[in] activations_alpha The vector of alpha parameters for activation - /// functions in order respective to activation - /// list. - /// \param[in] activations_beta The vector of beta parameters for activation - /// functions in order respective to activation - /// list. - /// \param[in] clip The value defining clipping range [-clip, - /// clip] on input of activation functions. - LSTMCell(const Output& X, - const Output& initial_hidden_state, - const Output& initial_cell_state, - const Output& W, - const Output& R, - std::size_t hidden_size, - const std::vector& activations = std::vector{"sigmoid", "tanh", "tanh"}, - const std::vector& activations_alpha = {}, - const std::vector& activations_beta = {}, - float clip = 0.f); - - /// - /// \brief Constructs LSTMCell node. - /// - /// \param[in] X The input tensor with shape: [batch_size, - /// input_size]. - /// \param[in] initial_hidden_state The hidden state tensor at current time step - /// with shape: [batch_size, hidden_size]. - /// \param[in] initial_cell_state The cell state tensor at current time step - /// with shape: [batch_size, hidden_size]. - /// \param[in] W The weight tensor with shape: [4*hidden_size, - /// input_size]. - /// \param[in] R The recurrence weight tensor with shape: - /// [4*hidden_size, hidden_size]. - /// \param[in] B The bias tensor for gates with shape: - /// [4*hidden_size]. - /// \param[in] hidden_size The number of hidden units for recurrent cell. - /// \param[in] activations The vector of activation functions used inside - /// recurrent cell. - /// \param[in] activations_alpha The vector of alpha parameters for activation - /// functions in order respective to activation - /// list. - /// \param[in] activations_beta The vector of beta parameters for activation - /// functions in order respective to activation - /// list. - /// \param[in] clip The value defining clipping range [-clip, - /// clip] on input of activation functions. - /// - LSTMCell(const Output& X, - const Output& initial_hidden_state, - const Output& initial_cell_state, - const Output& W, - const Output& R, - const Output& B, - std::size_t hidden_size, - const std::vector& activations = std::vector{"sigmoid", "tanh", "tanh"}, - const std::vector& activations_alpha = {}, - const std::vector& activations_beta = {}, - float clip = 0.f); - - void validate_and_infer_types() override; - - bool visit_attributes(AttributeVisitor& visitor) override; - std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - -private: - /// - /// \brief Creates the default bias input initialized with zeros. - /// - /// \return The object of Output class. - /// - Output get_default_bias_input() const; - - /// - /// \brief The Activation function f. - /// - util::ActivationFunction m_activation_f; - /// - /// \brief The Activation function g. - /// - util::ActivationFunction m_activation_g; - /// - /// \brief The Activation function h. - /// - util::ActivationFunction m_activation_h; - - static constexpr std::size_t s_gates_count{4}; -}; +using ov::op::v4::LSTMCell; } // namespace v4 } // namespace op - -NGRAPH_API -std::ostream& operator<<(std::ostream& s, const op::LSTMWeightsFormat& type); } // namespace ngraph - -namespace ov { - -template <> -class NGRAPH_API AttributeAdapter - : public EnumAttributeAdapterBase { -public: - AttributeAdapter(ngraph::op::LSTMWeightsFormat& value) - : EnumAttributeAdapterBase(value) {} - - static constexpr DiscreteTypeInfo type_info{"AttributeAdapter", 1}; - const DiscreteTypeInfo& get_type_info() const override { - return type_info; - } -}; - -} // namespace ov diff --git a/ngraph/core/include/ngraph/op/lstm_sequence.hpp b/ngraph/core/include/ngraph/op/lstm_sequence.hpp index 75638c6c411afb..4898b08666d31c 100644 --- a/ngraph/core/include/ngraph/op/lstm_sequence.hpp +++ b/ngraph/core/include/ngraph/op/lstm_sequence.hpp @@ -15,184 +15,16 @@ #include "ngraph/op/lstm_cell.hpp" #include "ngraph/op/util/attr_types.hpp" #include "ngraph/op/util/rnn_cell_base.hpp" +#include "openvino/op/lstm_sequence.hpp" namespace ngraph { namespace op { namespace v0 { - -/// -/// \brief Class for lstm sequence node. -/// -/// \note It follows notation and equations defined as in ONNX standard: -/// https://github.com/onnx/onnx/blob/master/docs/Operators.md#LSTM -/// -/// \sa LSTMCell, RNNCell, GRUCell -/// -/// -class NGRAPH_API LSTMSequence : public Op { -public: - NGRAPH_RTTI_DECLARATION; - LSTMSequence(); - - using direction = RecurrentSequenceDirection; - - size_t get_default_output_index() const override { - return no_default_index(); - } - explicit LSTMSequence(const Output& X, - const Output& initial_hidden_state, - const Output& initial_cell_state, - const Output& sequence_lengths, - const Output& W, - const Output& R, - const Output& B, - const Output& P, - const std::int64_t hidden_size, - const direction lstm_direction, - LSTMWeightsFormat weights_format = LSTMWeightsFormat::IFCO, - const std::vector activations_alpha = {}, - const std::vector activations_beta = {}, - const std::vector activations = {"sigmoid", "tanh", "tanh"}, - const float clip_threshold = 0, - const bool input_forget = false); - - explicit LSTMSequence(const Output& X, - const Output& initial_hidden_state, - const Output& initial_cell_state, - const Output& sequence_lengths, - const Output& W, - const Output& R, - const Output& B, - const std::int64_t hidden_size, - const direction lstm_direction, - LSTMWeightsFormat weights_format = LSTMWeightsFormat::IFCO, - const std::vector& activations_alpha = {}, - const std::vector& activations_beta = {}, - const std::vector& activations = {"sigmoid", "tanh", "tanh"}, - const float clip_threshold = 0, - const bool input_forget = false); - - virtual void validate_and_infer_types() override; - bool visit_attributes(AttributeVisitor& visitor) override; - - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - std::vector get_activations_alpha() const { - return m_activations_alpha; - } - std::vector get_activations_beta() const { - return m_activations_beta; - } - std::vector get_activations() const { - return m_activations; - } - float get_clip_threshold() const { - return m_clip_threshold; - } - direction get_direction() const { - return m_direction; - } - std::int64_t get_hidden_size() const { - return m_hidden_size; - } - bool get_input_forget() const { - return m_input_forget; - } - LSTMWeightsFormat get_weights_format() const { - return m_weights_format; - } - -private: - /// - /// \brief Gets the masked value according to sequence length in a batch. - /// - /// \note Zeros out values or sets them to default value for inputs with - /// sequence length shorter than currently procssed time step. - /// - /// \param[in] data The input value. - /// \param[in] time_step The current time step denoting sequence length. - /// \param[in] batch_axis The batch axis index of data tensor. - /// \param[in] default_value The default value for masked elements. - /// - /// \return The masked value. - /// - std::shared_ptr get_masked_node(const Output& data, - std::int32_t time_step, - std::size_t batch_axis = 0, - const Output& default_value = Output()) const; - - OutputVector lstm_pass(bool is_reverse = false) const; - - // Split(bi-directional) and squeeze input data to remove 'num_direction' dimension. - std::shared_ptr prepare_input(Output node, bool is_reverse, size_t num_direction_axis = 0) const; - - std::vector m_activations_alpha; - std::vector m_activations_beta; - std::vector m_activations; - float m_clip_threshold; - direction m_direction; - std::int64_t m_hidden_size; - bool m_input_forget; - LSTMWeightsFormat m_weights_format; -}; +using ov::op::v0::LSTMSequence; } // namespace v0 namespace v5 { -/// -/// \brief Class for lstm sequence node. -/// -/// \note It follows notation and equations defined as in ONNX standard: -/// https://github.com/onnx/onnx/blob/master/docs/Operators.md#LSTM -/// -/// \sa LSTMCell, RNNCell, GRUCell -/// -/// -class NGRAPH_API LSTMSequence : public util::RNNCellBase { -public: - NGRAPH_RTTI_DECLARATION; - LSTMSequence() = default; - - using direction = RecurrentSequenceDirection; - - size_t get_default_output_index() const override { - return no_default_index(); - } - explicit LSTMSequence(const Output& X, - const Output& initial_hidden_state, - const Output& initial_cell_state, - const Output& sequence_lengths, - const Output& W, - const Output& R, - const Output& B, - const std::int64_t hidden_size, - const direction lstm_direction, - const std::vector& activations_alpha = {}, - const std::vector& activations_beta = {}, - const std::vector& activations = {"sigmoid", "tanh", "tanh"}, - const float clip = 0.f) - : RNNCellBase({X, initial_hidden_state, initial_cell_state, sequence_lengths, W, R, B}, - hidden_size, - clip, - activations, - activations_alpha, - activations_beta), - m_direction(lstm_direction) { - constructor_validate_and_infer_types(); - } - - void validate_and_infer_types() override; - bool visit_attributes(AttributeVisitor& visitor) override; - - std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - direction get_direction() const { - return m_direction; - } - -private: - direction m_direction; -}; +using ov::op::v5::LSTMSequence; } // namespace v5 } // namespace op - } // namespace ngraph diff --git a/ngraph/core/include/ngraph/op/parameter.hpp b/ngraph/core/include/ngraph/op/parameter.hpp index 9ac3feb7b8faf8..26a7bcbf08c348 100644 --- a/ngraph/core/include/ngraph/op/parameter.hpp +++ b/ngraph/core/include/ngraph/op/parameter.hpp @@ -5,78 +5,14 @@ #pragma once #include "ngraph/op/op.hpp" +#include "openvino/op/parameter.hpp" namespace ngraph { namespace op { namespace v0 { -/// \brief A function parameter. -/// -/// Parameters are nodes that represent the arguments that will be passed to -/// user-defined functions. Function creation requires a sequence of parameters. -/// Basic graph operations do not need parameters attached to a function. -class NGRAPH_API Parameter : public op::Op { -public: - NGRAPH_RTTI_DECLARATION; - /// \brief Constructions a tensor-typed parameter node. - Parameter() = default; - /// \brief Constructions a tensor-typed parameter node. - /// - /// \param element_type The element type of the parameter. - /// \param pshape The partial shape of the parameter. - Parameter(const ngraph::element::Type& element_type, const PartialShape& pshape); - - bool visit_attributes(AttributeVisitor& visitor) override; - - void validate_and_infer_types() override; - - std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - bool is_relevant_to_shapes() const; - void set_is_relevant_to_shapes(bool is_relevant); - - const PartialShape& get_partial_shape() const { - return m_partial_shape; - } - PartialShape& get_partial_shape() { - return m_partial_shape; - } - void set_partial_shape(const PartialShape& partial_shape) { - m_partial_shape = partial_shape; - } - const element::Type& get_element_type() const { - return m_element_type; - } - void set_element_type(const element::Type& element_type) { - m_element_type = element_type; - } - -protected: - PartialShape m_partial_shape; - element::Type m_element_type; - bool m_is_relevant_to_shapes{false}; -}; +using ov::op::v0::Parameter; } // namespace v0 using v0::Parameter; } // namespace op using ParameterVector = std::vector>; } // namespace ngraph - -namespace ov { - -template <> -class NGRAPH_API AttributeAdapter : public VisitorAdapter { -public: - AttributeAdapter(ngraph::ParameterVector& ref); - - bool visit_attributes(AttributeVisitor& visitor) override; - - static constexpr DiscreteTypeInfo type_info{"AttributeAdapter", 0}; - const DiscreteTypeInfo& get_type_info() const override { - return type_info; - } - -protected: - ngraph::ParameterVector& m_ref; -}; - -} // namespace ov diff --git a/ngraph/core/include/ngraph/op/result.hpp b/ngraph/core/include/ngraph/op/result.hpp index da47ff4b83fb3d..4eb82338023253 100644 --- a/ngraph/core/include/ngraph/op/result.hpp +++ b/ngraph/core/include/ngraph/op/result.hpp @@ -7,62 +7,14 @@ #include #include "ngraph/op/op.hpp" +#include "openvino/op/result.hpp" namespace ngraph { namespace op { namespace v0 { -class NGRAPH_API Result : public Op { -public: - NGRAPH_RTTI_DECLARATION; - - /// \brief Allows a value to be used as a function result. - Result() = default; - /// \brief Allows a value to be used as a function result. - /// - /// \param arg Node that produces the input tensor. - Result(const Output& arg, bool needs_default_layout = false); - - bool visit_attributes(AttributeVisitor& visitor) override; - void validate_and_infer_types() override; - - std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - void set_needs_default_layout(bool val) { - m_needs_default_layout = val; - } - bool needs_default_layout() const { - return m_needs_default_layout; - } - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; - bool constant_fold(OutputVector& output_values, const OutputVector& inputs_values) override; - -private: - bool m_needs_default_layout{false}; -}; +using ov::op::v0::Result; } // namespace v0 - using v0::Result; } // namespace op using ResultVector = std::vector>; } // namespace ngraph - -namespace ov { - -template <> -class NGRAPH_API AttributeAdapter : public VisitorAdapter { -public: - AttributeAdapter(ngraph::ResultVector& ref); - - bool visit_attributes(AttributeVisitor& visitor) override; - - static constexpr DiscreteTypeInfo type_info{"AttributeAdapter", 0}; - const DiscreteTypeInfo& get_type_info() const override { - return type_info; - } - -protected: - ngraph::ResultVector& m_ref; -}; - -} // namespace ov diff --git a/ngraph/core/include/ngraph/op/tensor_iterator.hpp b/ngraph/core/include/ngraph/op/tensor_iterator.hpp index 405710363d4eba..528d2394bcfcdd 100644 --- a/ngraph/core/include/ngraph/op/tensor_iterator.hpp +++ b/ngraph/core/include/ngraph/op/tensor_iterator.hpp @@ -9,35 +9,12 @@ #include "ngraph/function.hpp" #include "ngraph/op/parameter.hpp" #include "ngraph/op/util/sub_graph_base.hpp" +#include "openvino/op/tensor_iterator.hpp" namespace ngraph { namespace op { namespace v0 { -/// \brief Iterate a body over tensors, accumulating into tensors. -class NGRAPH_API TensorIterator : public op::util::SubGraphOp { -public: - NGRAPH_RTTI_DECLARATION; - - bool visit_attributes(AttributeVisitor& visitor) override; - - TensorIterator() = default; - explicit TensorIterator(const OutputVector& values); - - std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - /// \return the body of the iteration - std::shared_ptr get_body() const { - return m_bodies[0]; - } - /// \param body set the body of the iteration - void set_body(const std::shared_ptr& body) { - set_function(body); - } - void validate_and_infer_types() override; - void revalidate_and_infer_types_for_body_ops(); - -private: - void try_to_set_num_iterations_if_no_slice_inputs(); -}; +using ov::op::v0::TensorIterator; } // namespace v0 using v0::TensorIterator; } // namespace op diff --git a/ngraph/core/include/openvino/core/node.hpp b/ngraph/core/include/openvino/core/node.hpp index 641d774ba62618..aab675dc62f45f 100644 --- a/ngraph/core/include/openvino/core/node.hpp +++ b/ngraph/core/include/openvino/core/node.hpp @@ -42,19 +42,15 @@ namespace runtime { class HostTensor; } // namespace runtime -namespace op { - -namespace v0 { -class Result; -} // namespace v0 -} // namespace op - } // namespace ngraph namespace ov { namespace op { +namespace v0 { +class Result; +} // namespace v0 struct AutoBroadcastSpec; -} +} // namespace op namespace pass { namespace pattern { class Matcher; @@ -76,7 +72,7 @@ class Node; /// environment) for evaluating ngraph::function. using EvaluationContext = std::map>; -using ResultVector = std::vector>; +using ResultVector = std::vector>; OPENVINO_API std::string node_validation_failure_loc_string(const Node* node); diff --git a/ngraph/core/include/openvino/op/gather.hpp b/ngraph/core/include/openvino/op/gather.hpp new file mode 100644 index 00000000000000..f9546ffc08b912 --- /dev/null +++ b/ngraph/core/include/openvino/op/gather.hpp @@ -0,0 +1,80 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/util/gather_base.hpp" + +namespace ov { +namespace op { +namespace v1 { +/// \brief Gather slices from axis of data according to indices +class OPENVINO_API Gather : public op::util::GatherBase { +public: + OPENVINO_RTTI_DECLARATION; + static const int64_t AXIS_NOT_SET_VALUE = std::numeric_limits::max(); + Gather() = default; + /// \param data The tensor from which slices are gathered + /// \param indices Tensor with indexes to gather + /// \param axis The tensor is a dimension index to gather data from + Gather(const Output& params, const Output& indices, const Output& axis); + + bool visit_attributes(AttributeVisitor& visitor) override; + int64_t get_axis() const override; + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; +}; +} // namespace v1 + +namespace v7 { +/// \brief Gather slices from axis of data according to indices +class OPENVINO_API Gather : public op::util::GatherBase { +public: + OPENVINO_RTTI_DECLARATION; + Gather() = default; + + /// \param data The tensor from which slices are gathered + /// \param indices Tensor with indexes to gather + /// \param axis The tensor is a dimension index to gather data from + /// \param batch_dims The number of batch dimension in data and indices tensors. + /// If batch_dims = 0 Gather v7 is identical to Gather v1. + Gather(const Output& data, + const Output& indices, + const Output& axis, + const int64_t batch_dims = 0); + + bool visit_attributes(AttributeVisitor& visitor) override; + void validate_and_infer_types() override; + int64_t get_batch_dims() const; + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; +}; +} // namespace v7 + +namespace v8 { +/// \brief Gather slices from axis of data according to indices. Negative indices +/// are supported and indicate reverse indexing from the end +class OPENVINO_API Gather : public op::util::GatherBase { +public: + OPENVINO_RTTI_DECLARATION; + Gather() = default; + + /// \param data The tensor from which slices are gathered + /// \param indices Tensor with indexes to gather + /// \param axis The tensor is a dimension index to gather data from + /// \param batch_dims The number of batch dimension in data and indices tensors. + Gather(const Output& data, + const Output& indices, + const Output& axis, + const int64_t batch_dims = 0); + + bool visit_attributes(AttributeVisitor& visitor) override; + void validate_and_infer_types() override; + int64_t get_batch_dims() const; + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; +}; +} // namespace v8 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/gather_elements.hpp b/ngraph/core/include/openvino/op/gather_elements.hpp new file mode 100644 index 00000000000000..ae00119bbed3e9 --- /dev/null +++ b/ngraph/core/include/openvino/op/gather_elements.hpp @@ -0,0 +1,39 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/op.hpp" + +namespace ov { +namespace op { +namespace v6 { +/// \brief GatherElements operation +/// +class OPENVINO_API GatherElements : public Op { +public: + OPENVINO_RTTI_DECLARATION; + GatherElements() = default; + + /// \brief Constructs a GatherElements operation. + /// + /// \param data Node producing data that are gathered + /// \param indices Node producing indices by which the operation gathers elements + /// \param axis specifies axis along which indices are specified + GatherElements(const Output& data, const Output& indices, const int64_t axis); + + void validate_and_infer_types() override; + bool visit_attributes(AttributeVisitor& visitor) override; + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + int64_t get_axis() const { + return m_axis; + } + +private: + int64_t m_axis; +}; +} // namespace v6 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/gather_nd.hpp b/ngraph/core/include/openvino/op/gather_nd.hpp new file mode 100644 index 00000000000000..82fcd2b7ac9f20 --- /dev/null +++ b/ngraph/core/include/openvino/op/gather_nd.hpp @@ -0,0 +1,40 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/op.hpp" + +namespace ov { +namespace op { +namespace v5 { +/// \brief GatherND operation +/// +class OPENVINO_API GatherND : public Op { +public: + OPENVINO_RTTI_DECLARATION; + GatherND() = default; + + /// \brief Constructs a GatherND operation. + /// + /// \param data Node producing data that are gathered + /// \param indices Node producing indices by which the operation gathers elements + /// or slices from data + /// \param batch_dims Specifies a number of batch dimensions + GatherND(const Output& data, const Output& indices, const size_t batch_dims = 0); + + void validate_and_infer_types() override; + bool visit_attributes(AttributeVisitor& visitor) override; + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + size_t get_batch_dims() const { + return m_batch_dims; + } + +private: + size_t m_batch_dims; +}; +} // namespace v5 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/gather_tree.hpp b/ngraph/core/include/openvino/op/gather_tree.hpp new file mode 100644 index 00000000000000..e6f2828d9cd5df --- /dev/null +++ b/ngraph/core/include/openvino/op/gather_tree.hpp @@ -0,0 +1,38 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "ngraph/op/op.hpp" + +namespace ov { +namespace op { +namespace v1 { +/// \brief Generates the complete beams from the ids per each step and the parent beam +/// ids. +class OPENVINO_API GatherTree : public Op { +public: + OPENVINO_RTTI_DECLARATION; + + GatherTree() = default; + /// \param step_ids Tensor of shape [MAX_TIME, BATCH_SIZE, BEAM_WIDTH] with + /// indices from per each step + /// \param parent_idx Tensor of shape [MAX_TIME, BATCH_SIZE, BEAM_WIDTH] with + /// parent beam indices + /// \param max_seq_len Tensor of shape [BATCH_SIZE] with maximum lengths for each + /// sequence in the batch + /// \param end_token Tensor of shape [MAX_TIME, BATCH_SIZE, BEAM_WIDTH] + GatherTree(const Output& step_ids, + const Output& parent_idx, + const Output& max_seq_len, + const Output& end_token); + + bool visit_attributes(AttributeVisitor& visitor) override; + void validate_and_infer_types() override; + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; +}; +} // namespace v1 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/gelu.hpp b/ngraph/core/include/openvino/op/gelu.hpp new file mode 100644 index 00000000000000..f8b4ed66a4a252 --- /dev/null +++ b/ngraph/core/include/openvino/op/gelu.hpp @@ -0,0 +1,81 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/op.hpp" +#include "openvino/op/util/unary_elementwise_arithmetic.hpp" + +namespace ov { +namespace op { +namespace v0 { +/// \brief Gaussian Error Linear Unit +/// f(x) = 0.5 * x * (1 + erf( x / sqrt(2) ) +class OPENVINO_API Gelu : public Op { +public: + OPENVINO_RTTI_DECLARATION; + + Gelu(); + /// \brief Constructs a Gelu operation. + /// + /// \param data Input tensor + Gelu(const Output& data); + + bool visit_attributes(AttributeVisitor& visitor) override; + + void validate_and_infer_types() override; + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; +}; +} // namespace v0 + +/// \brief Specifies the approximation to calculate Gelu +enum class GeluApproximationMode { TANH, ERF }; +OPENVINO_API std::ostream& operator<<(std::ostream& s, const GeluApproximationMode& type); + +namespace v7 { +/// \brief Gaussian Error Linear Unit +/// f(x) = 0.5 * x * (1 + erf( x / sqrt(2) ) for "approximation" = "erf" +/// f(x) = 0.5 * x * (1 + tanh([sqrt(2 / pi)] * [x + 0.044715^3]) for "approximation" = +/// "tanh" +class OPENVINO_API Gelu : public util::UnaryElementwiseArithmetic { +public: + OPENVINO_RTTI_DECLARATION; + + Gelu() = default; + /// \brief Constructs a Gelu operation. + /// + /// \param data Input tensor + /// \param mode Approximation mode + Gelu(const Output& data, GeluApproximationMode mode = GeluApproximationMode::ERF); + + bool visit_attributes(AttributeVisitor& visitor) override; + + void validate_and_infer_types() override; + + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + GeluApproximationMode get_approximation_mode() const; + +private: + GeluApproximationMode m_approximation_mode = GeluApproximationMode::ERF; +}; +} // namespace v7 +} // namespace op + +template <> +class OPENVINO_API AttributeAdapter + : public EnumAttributeAdapterBase { +public: + AttributeAdapter(op::GeluApproximationMode& value) : EnumAttributeAdapterBase(value) {} + + static constexpr DiscreteTypeInfo type_info{"AttributeAdapter", 0}; + const DiscreteTypeInfo& get_type_info() const override { + return type_info; + } +}; +} // namespace ov diff --git a/ngraph/core/include/openvino/op/greater.hpp b/ngraph/core/include/openvino/op/greater.hpp new file mode 100644 index 00000000000000..1edfcafa34f3ad --- /dev/null +++ b/ngraph/core/include/openvino/op/greater.hpp @@ -0,0 +1,33 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/util/binary_elementwise_comparison.hpp" + +namespace ov { +namespace op { +namespace v1 { +/// \brief Elementwise greater-than operation. +class OPENVINO_API Greater : public util::BinaryElementwiseComparison { +public: + OPENVINO_RTTI_DECLARATION; + /// \brief Constructs a greater-than operation. + Greater() : util::BinaryElementwiseComparison(AutoBroadcastSpec::NUMPY) {} + /// \brief Constructs a greater-than operation. + /// + /// \param arg0 Node that produces the first input tensor. + /// \param arg1 Node that produces the second input tensor. + /// \param auto_broadcast Auto broadcast specification + Greater(const Output& arg0, + const Output& arg1, + const AutoBroadcastSpec& auto_broadcast = AutoBroadcastSpec(AutoBroadcastType::NUMPY)); + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; +}; +} // namespace v1 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/greater_eq.hpp b/ngraph/core/include/openvino/op/greater_eq.hpp new file mode 100644 index 00000000000000..7ce10d2f70d4a7 --- /dev/null +++ b/ngraph/core/include/openvino/op/greater_eq.hpp @@ -0,0 +1,33 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/util/binary_elementwise_comparison.hpp" + +namespace ov { +namespace op { +namespace v1 { +/// \brief Elementwise greater-than-or-equal operation. +class OPENVINO_API GreaterEqual : public util::BinaryElementwiseComparison { +public: + OPENVINO_RTTI_DECLARATION; + /// \brief Constructs a greater-than-or-equal operation. + GreaterEqual() : util::BinaryElementwiseComparison(AutoBroadcastSpec::NUMPY) {} + /// \brief Constructs a greater-than-or-equal operation. + /// + /// \param arg0 Node that produces the first input tensor. + /// \param arg1 Node that produces the second input tensor. + /// \param auto_broadcast Auto broadcast specification + GreaterEqual(const Output& arg0, + const Output& arg1, + const AutoBroadcastSpec& auto_broadcast = AutoBroadcastSpec(AutoBroadcastType::NUMPY)); + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; +}; +} // namespace v1 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/grn.hpp b/ngraph/core/include/openvino/op/grn.hpp new file mode 100644 index 00000000000000..d151f908aa9fb8 --- /dev/null +++ b/ngraph/core/include/openvino/op/grn.hpp @@ -0,0 +1,41 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include + +#include "openvino/op/op.hpp" + +namespace ov { +namespace op { +namespace v0 { +/// \brief Global Response Normalization with L2 norm (across channels only). +/// +class OPENVINO_API GRN : public Op { +public: + OPENVINO_RTTI_DECLARATION; + + GRN() = default; + /// \brief Constructs a GRN operation. + /// + /// \param data - Node producing the input tensor + /// \param bias - The bias added to the variance. + /// + GRN(const Output& data, float bias); + + void validate_and_infer_types() override; + bool visit_attributes(AttributeVisitor& visitor) override; + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + float get_bias() const { + return m_bias; + } + +protected: + float m_bias = 1.0f; +}; +} // namespace v0 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/group_conv.hpp b/ngraph/core/include/openvino/op/group_conv.hpp new file mode 100644 index 00000000000000..b2001468eabd0d --- /dev/null +++ b/ngraph/core/include/openvino/op/group_conv.hpp @@ -0,0 +1,273 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/convolution.hpp" +#include "openvino/op/op.hpp" +#include "openvino/op/util/attr_types.hpp" + +namespace ov { +namespace op { +namespace v1 { +/// \brief Batched convolution operation, with optional window dilation and stride. +class OPENVINO_API GroupConvolution : public Op { +public: + OPENVINO_RTTI_DECLARATION; + + /// \brief Constructs a batched convolution operation. + GroupConvolution() = default; + /// \brief Constructs a batched convolution operation. + /// + /// \param data_batch The node producing the input data batch tensor.
+ /// `[N, C_IN, D1, ... Df]` + /// \param filters The node producing the filters tensor.
+ /// `[GROUPS, FC_OUT, FC_IN, F1, ... Ff]` + /// \param strides The strides.
+ /// `[f]` + /// \param dilations The dilations.
+ /// `[f]` + /// \param pads_begin The beginning of padding shape.
+ /// `[f]` + /// \param pads_end The end of padding shape.
+ /// `[f]` + /// \param auto_pad The pad type for automatically computing padding sizes.
+ /// `[f]` + /// + /// Output `[N, FC_OUT * GROUPS, R1, ... Rf]` + /// + GroupConvolution(const Output& data_batch, + const Output& filters, + const Strides& strides, + const CoordinateDiff& pads_begin, + const CoordinateDiff& pads_end, + const Strides& dilations, + const PadType& auto_pad = PadType::EXPLICIT); + + bool visit_attributes(AttributeVisitor& visitor) override; + void validate_and_infer_types() override; + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + /// \return The strides. + const Strides& get_strides() const { + return m_strides; + } + void set_strides(const Strides& strides) { + m_strides = strides; + } + /// \return The dilations. + const Strides& get_dilations() const { + return m_dilations; + } + void set_dilations(const Strides& dilations) { + m_dilations = dilations; + } + /// \return The padding-below sizes (possibly negative). + const CoordinateDiff& get_pads_begin() const { + return m_pads_begin; + } + void set_pads_begin(const CoordinateDiff& pads_begin) { + m_pads_begin = pads_begin; + } + /// \return The padding-above sizes (possibly negative). + const CoordinateDiff& get_pads_end() const { + return m_pads_end; + } + void set_adding_above(const CoordinateDiff& pads_end) { + m_pads_end = pads_end; + } + /// \return The pad type for convolution. + const PadType& get_auto_pad() const { + return m_auto_pad; + } + void set_auto_pad(const PadType& auto_pad) { + m_auto_pad = auto_pad; + } + /// \return The default value for Convolution. + OPENVINO_SUPPRESS_DEPRECATED_START + std::shared_ptr get_default_value() const override; + OPENVINO_SUPPRESS_DEPRECATED_END + +protected: + Strides m_strides; + Strides m_dilations; + CoordinateDiff m_pads_begin; + CoordinateDiff m_pads_end; + PadType m_auto_pad; +}; + +/// \brief Data batch backprop for batched convolution operation. +class OPENVINO_API GroupConvolutionBackpropData : public Op { +public: + OPENVINO_RTTI_DECLARATION; + + /// \brief Constructs a batched-convolution data batch-backprop operation. + GroupConvolutionBackpropData(); + // clang-format off + // + // \brief Constructs a batched-convolution data batch-backprop operation. + // + // \param data The node producing data from forward-prop. Shape: [N, + // C_INPUT * GROUPS, X1, ..., XD]. + // \param filter The node producing the filter from forward-prop. Shape: + // [GROUPS, C_INPUT, C_OUTPUT, K_D, ..., K_1] + // \param output_shape The shape of the data batch from forward-prop. It's size + // should be equal to number of data spatial dimensions. + // \param strides The strides from forward-prop. + // \param pads_begin The padding-below sizes from forward-prop. + // \param pads_end The padding-above sizes from forward-prop. + // \param dilations The dilations from forward-prop. + // \param auto_pad The pad type for automatically computing padding sizes. + // \param output_padding The output padding adds additional amount of paddings per + // each spatial axis in the output tensor. + // + // clang-format on + // + GroupConvolutionBackpropData(const Output& data, + const Output& filter, + const Output& output_shape, + const Strides& strides, + const CoordinateDiff& pads_begin, + const CoordinateDiff& pads_end, + const Strides& dilations, + const PadType& auto_pad = PadType::EXPLICIT, + const CoordinateDiff& output_padding = {}); + + // clang-format off + // + // \brief Constructs a batched-convolution data batch-backprop operation. + // + // \param data The node producing data from forward-prop. Shape: [N, + // C_INPUT * GROUPS, X1, ..., XD]. + // \param filter The node producing the filter from forward-prop. Shape: + // [GROUPS, C_INPUT, C_OUTPUT, K_D, ..., K_1] + // \param output_shape The shape of the data batch from forward-prop. It's size + // should be equal to number of data spatial dimensions. + // \param strides The strides from forward-prop. + // \param dilations The dilations from forward-prop. + // \param auto_pad The pad type for automatically computing padding sizes. + // \param output_padding The output padding adds additional amount of paddings per + // each spatial axis in the output tensor. + // + // clang-format on + // + GroupConvolutionBackpropData(const Output& data, + const Output& filter, + const Output& output_shape, + const Strides& strides, + const Strides& dilations, + const PadType& auto_pad, + const CoordinateDiff& output_padding = {}); + + // clang-format off + // + // \brief Constructs a batched-convolution data batch-backprop operation. + // + // \param data The node producing data from forward-prop. Shape: + // [N, C_INPUT * GROUPS, X1, ..., XD]. + // \param filter The node producing the filter from forward-prop. Shape: + // [GROUPS, C_INPUT, C_OUTPUT, K_D, ..., K_1] + // \param strides The strides from forward-prop. + // \param pads_begin The padding-below sizes from forward-prop. + // \param pads_end The padding-above sizes from forward-prop. + // \param dilations The dilations from forward-prop. + // \param auto_pad The pad type for automatically computing padding sizes. + // \param output_padding The output padding adds additional amount of paddings per + // each spatial axis in the output tensor. + // + // clang-format on + GroupConvolutionBackpropData(const Output& data, + const Output& filter, + const Strides& strides, + const CoordinateDiff& pads_begin, + const CoordinateDiff& pads_end, + const Strides& dilations, + const PadType& auto_pad = PadType::EXPLICIT, + const CoordinateDiff& output_padding = {}); + /// + /// \brief Calculates output spatial features size. + /// + /// \param[in] input_data_shape The input data partial shape + /// \param[in] filters_shape The filters partial shape + /// \param[in] strides The strides values. + /// \param[in] dilations The dilations values. + /// \param[in] pads_begin The paddings at the beginning of axis. + /// \param[in] pads_end The paddings at the end of axis. + /// \param[in] output_padding The output padding values. + /// \param output_spatial_shape The placeholder for computed output spatial + /// partial + /// shape. + /// + void infer_conv_backprop_output_spatial_shape(const std::vector& input_data_shape, + const std::vector& filters_shape, + const Strides& strides, + const Strides& dilations, + const CoordinateDiff& pads_begin, + const CoordinateDiff& pads_end, + const CoordinateDiff& output_padding, + std::vector& output_spatial_shape); + + bool visit_attributes(AttributeVisitor& visitor) override; + bool is_dynamic() const override; + void validate_and_infer_types() override; + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + /// \return The spatial shape of the output. + const PartialShape get_convolution_output_shape() const; + void set_output_shape(const ngraph::Shape& output_shape); + /// \return The strides from the forward prop. + const Strides& get_strides() const { + return m_strides; + } + void set_strides(const Strides& strides) { + m_strides = strides; + } + /// \return The dilations from the forward prop. + const Strides& get_dilations() const { + return m_dilations; + } + void set_dilations(const Strides& dilations) { + m_dilations = dilations; + } + /// \return The number of pixels to add to the beginning along each axis. + const CoordinateDiff& get_pads_begin() const { + return m_pads_begin; + } + void set_pads_begin(const CoordinateDiff& pads_begin) { + m_pads_begin = pads_begin; + } + /// \return The number of pixels to add to the ending along each axis. + const CoordinateDiff& get_pads_end() const { + return m_pads_end; + } + void set_pads_end(const CoordinateDiff& pads_end) { + m_pads_end = pads_end; + } + /// \return The auto pad. + const PadType& get_auto_pad() const { + return m_auto_pad; + } + void set_auto_pad(const PadType& auto_pad) { + m_auto_pad = auto_pad; + } + /// \return The output padding. + const CoordinateDiff& get_output_padding() const { + return m_output_padding; + } + void set_output_padding(const CoordinateDiff& output_padding) { + m_output_padding = output_padding; + } + +protected: + Strides m_strides; + Strides m_dilations; + CoordinateDiff m_pads_begin; + CoordinateDiff m_pads_end; + PadType m_auto_pad; + CoordinateDiff m_output_padding; +}; +} // namespace v1 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/gru_cell.hpp b/ngraph/core/include/openvino/op/gru_cell.hpp new file mode 100644 index 00000000000000..f417b3bb05c701 --- /dev/null +++ b/ngraph/core/include/openvino/op/gru_cell.hpp @@ -0,0 +1,160 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include +#include +#include +#include + +#include "openvino/op/op.hpp" +#include "openvino/op/util/activation_functions.hpp" +#include "openvino/op/util/rnn_cell_base.hpp" + +namespace ov { +namespace op { +namespace v3 { +/// +/// \brief Class for GRU cell node. +/// +/// \note Note this class represents only single *cell* and not whole GRU *layer*. +/// +class OPENVINO_API GRUCell : public util::RNNCellBase { +public: + OPENVINO_RTTI_DECLARATION; + GRUCell(); + /// + /// \brief Constructs GRUCell node. + /// + /// \param[in] X The input tensor with shape: [batch_size, + /// input_size]. + /// \param[in] initial_hidden_state The hidden state tensor at current time step + /// with shape: [batch_size, hidden_size]. + /// \param[in] W The weight tensor with shape: + /// [gates_count * hidden_size, input_size]. + /// \param[in] R The recurrence weight tensor with shape: + /// [gates_count * hidden_size, hidden_size]. + /// \param[in] hidden_size The number of hidden units for recurrent cell. + /// + GRUCell(const Output& X, + const Output& initial_hidden_state, + const Output& W, + const Output& R, + std::size_t hidden_size); + + /// + /// \brief Constructs GRUCell node. + /// + /// \param[in] X The input tensor with shape: [batch_size, + /// input_size]. + /// \param[in] initial_hidden_state The hidden state tensor at current time step + /// with shape: [batch_size, hidden_size]. + /// \param[in] W The weight tensor with shape: + /// [gates_count * hidden_size, input_size]. + /// \param[in] R The recurrence weight tensor with shape: + /// [gates_count * hidden_size, hidden_size]. + /// \param[in] hidden_size The number of hidden units for recurrent cell. + /// \param[in] activations The vector of activation functions used inside + /// recurrent cell. + /// \param[in] activations_alpha The vector of alpha parameters for activation + /// functions in order respective to activation + /// list. + /// \param[in] activations_beta The vector of beta parameters for activation + /// functions in order respective to activation + /// list. + /// \param[in] clip The value defining clipping range [-clip, + /// clip] on input of activation functions. + /// + GRUCell(const Output& X, + const Output& initial_hidden_state, + const Output& W, + const Output& R, + std::size_t hidden_size, + const std::vector& activations, + const std::vector& activations_alpha, + const std::vector& activations_beta, + float clip, + bool linear_before_reset); + + /// + /// \brief Constructs GRUCell node. + /// + /// \param[in] X The input tensor with shape: [batch_size, + /// input_size]. + /// \param[in] initial_hidden_state The hidden state tensor at current time step + /// with shape: [batch_size, hidden_size]. + /// \param[in] W The weight tensor with shape: [gates_count * + /// hidden_size, input_size]. + /// \param[in] R The recurrence weight tensor with shape: + /// [gates_count * hidden_size, hidden_size]. + /// \param[in] hidden_size The number of hidden units for recurrent cell. + /// \param[in] B The sum of biases (weight and recurrence) for + /// update, reset and hidden gates. + /// If linear_before_reset := true then biases for + /// hidden gates are + /// placed separately (weight and recurrence). + /// Shape: [gates_count * hidden_size] if + /// linear_before_reset := false + /// Shape: [(gates_count + 1) * hidden_size] if + /// linear_before_reset := true + /// \param[in] activations The vector of activation functions used inside + /// recurrent cell. + /// \param[in] activations_alpha The vector of alpha parameters for activation + /// functions in order respective to activation + /// list. + /// \param[in] activations_beta The vector of beta parameters for activation + /// functions in order respective to activation + /// list. + /// \param[in] clip The value defining clipping range [-clip, + /// clip] on input of activation functions. + /// \param[in] linear_before_reset Whether or not to apply the linear + /// transformation before multiplying by the + /// output of the reset gate. + /// + GRUCell(const Output& X, + const Output& initial_hidden_state, + const Output& W, + const Output& R, + const Output& B, + std::size_t hidden_size, + const std::vector& activations = std::vector{"sigmoid", "tanh"}, + const std::vector& activations_alpha = {}, + const std::vector& activations_beta = {}, + float clip = 0.f, + bool linear_before_reset = false); + + void validate_and_infer_types() override; + bool visit_attributes(AttributeVisitor& visitor) override; + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + bool get_linear_before_reset() const { + return m_linear_before_reset; + } + +private: + /// brief Add and initialize bias input to all zeros. + void add_default_bias_input(); + + /// + /// \brief The Activation function f. + /// + util::ActivationFunction m_activation_f; + /// + /// \brief The Activation function g. + /// + util::ActivationFunction m_activation_g; + + static constexpr std::size_t s_gates_count{3}; + /// + /// \brief Control whether or not apply the linear transformation. + /// + /// \note The linear transformation may be applied when computing the output of + /// hidden gate. It's done before multiplying by the output of the reset gate. + /// + bool m_linear_before_reset; +}; +} // namespace v3 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/gru_sequence.hpp b/ngraph/core/include/openvino/op/gru_sequence.hpp new file mode 100644 index 00000000000000..3421d5d31a4cf1 --- /dev/null +++ b/ngraph/core/include/openvino/op/gru_sequence.hpp @@ -0,0 +1,54 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include +#include +#include + +#include "openvino/op/op.hpp" +#include "openvino/op/util/rnn_cell_base.hpp" + +namespace ov { +namespace op { +namespace v5 { +class OPENVINO_API GRUSequence : public util::RNNCellBase { +public: + OPENVINO_RTTI_DECLARATION; + GRUSequence(); + + GRUSequence(const Output& X, + const Output& H_t, + const Output& sequence_lengths, + const Output& W, + const Output& R, + const Output& B, + size_t hidden_size, + op::RecurrentSequenceDirection direction, + const std::vector& activations = std::vector{"sigmoid", "tanh"}, + const std::vector& activations_alpha = {}, + const std::vector& activations_beta = {}, + float clip = 0.f, + bool linear_before_reset = false); + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + void validate_and_infer_types() override; + + bool visit_attributes(AttributeVisitor& visitor) override; + bool get_linear_before_reset() const { + return m_linear_before_reset; + } + op::RecurrentSequenceDirection get_direction() const { + return m_direction; + } + +protected: + op::RecurrentSequenceDirection m_direction; + bool m_linear_before_reset; +}; +} // namespace v5 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/hard_sigmoid.hpp b/ngraph/core/include/openvino/op/hard_sigmoid.hpp new file mode 100644 index 00000000000000..bea9b15b0b1a4e --- /dev/null +++ b/ngraph/core/include/openvino/op/hard_sigmoid.hpp @@ -0,0 +1,35 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/op.hpp" + +namespace ov { +namespace op { +namespace v0 { +/// \brief Parameterized, bounded sigmoid-like, piecewise linear +/// function. min(max(alpha*x + beta, 0), 1) +/// +class OPENVINO_API HardSigmoid : public Op { +public: + OPENVINO_RTTI_DECLARATION; + + HardSigmoid(); + + /// \brief Constructs a HardSigmoid operation. + /// + /// \param data Input tensor. + /// \param[in] alpha A scalar value representing the alpha parameter. + /// \param[in] beta A scalar value representing the beta parameter. + /// + HardSigmoid(const Output& data, const Output& alpha, const Output& beta); + + bool visit_attributes(AttributeVisitor& visitor) override; + void validate_and_infer_types() override; + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; +}; +} // namespace v0 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/hsigmoid.hpp b/ngraph/core/include/openvino/op/hsigmoid.hpp new file mode 100644 index 00000000000000..4b7c891724d9a1 --- /dev/null +++ b/ngraph/core/include/openvino/op/hsigmoid.hpp @@ -0,0 +1,35 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/op.hpp" +#include "openvino/op/util/unary_elementwise_arithmetic.hpp" + +namespace ov { +namespace op { +namespace v5 { +/// \brief A HSigmoid Activation Function +/// f(x) = min(max(x + 3, 0), 6) / 6 or +/// f(x) = min(ReLU(x + 3), 6) / 6 +/// +class OPENVINO_API HSigmoid : public util::UnaryElementwiseArithmetic { +public: + OPENVINO_RTTI_DECLARATION; + HSigmoid() = default; + + /// \brief Constructs a HSigmoid operation. + /// + /// \param data Input tensor + HSigmoid(const Output& arg); + + bool visit_attributes(AttributeVisitor& visitor) override; + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; +}; +} // namespace v5 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/hswish.hpp b/ngraph/core/include/openvino/op/hswish.hpp new file mode 100644 index 00000000000000..7c64232eaddbf7 --- /dev/null +++ b/ngraph/core/include/openvino/op/hswish.hpp @@ -0,0 +1,35 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/op.hpp" +#include "openvino/op/util/unary_elementwise_arithmetic.hpp" + +namespace ov { +namespace op { +namespace v4 { +/// \brief A HSwish Activation Function +/// f(x) = x * min(max(x + 3, 0), 6) / 6 or +/// f(x) = x * min(ReLU(x + 3), 6) / 6 +/// +class OPENVINO_API HSwish : public util::UnaryElementwiseArithmetic { +public: + OPENVINO_RTTI_DECLARATION; + HSwish() = default; + + /// \brief Constructs a HSwish (hard version of Swish) operation. + /// + /// \param data Input tensor + HSwish(const Output& arg); + + bool visit_attributes(AttributeVisitor& visitor) override; + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; +}; +} // namespace v4 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/idft.hpp b/ngraph/core/include/openvino/op/idft.hpp new file mode 100644 index 00000000000000..cf0352c679bd34 --- /dev/null +++ b/ngraph/core/include/openvino/op/idft.hpp @@ -0,0 +1,41 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include +#include + +#include "openvino/op/op.hpp" +#include "openvino/op/util/fft_base.hpp" + +namespace ov { +namespace op { +namespace v7 { +/// \brief An operation IDFT that computes the inverse discrete Fourier transformation. +class OPENVINO_API IDFT : public util::FFTBase { +public: + OPENVINO_RTTI_DECLARATION; + IDFT() = default; + + /// \brief Constructs a IDFT operation. IDFT is performed for full size axes. + /// + /// \param data Input data + /// \param axes Axes to perform IDFT + IDFT(const Output& data, const Output& axes); + + /// \brief Constructs a IDFT operation. + /// + /// \param data Input data + /// \param axes Axes to perform IDFT + /// \param signal_size Signal sizes for 'axes' + IDFT(const Output& data, const Output& axes, const Output& signal_size); + + bool visit_attributes(AttributeVisitor& visitor) override; + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; +}; +} // namespace v7 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/if.hpp b/ngraph/core/include/openvino/op/if.hpp new file mode 100644 index 00000000000000..f262a0e71794da --- /dev/null +++ b/ngraph/core/include/openvino/op/if.hpp @@ -0,0 +1,94 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include + +#include "openvino/core/function.hpp" +#include "openvino/op/parameter.hpp" +#include "openvino/op/util/multi_subgraph_base.hpp" + +namespace ov { +namespace op { +namespace v8 { +/// \brief If operation. +class OPENVINO_API If : public util::MultiSubGraphOp { +public: + enum BodyIndexes { THEN_BODY_INDEX = 0, ELSE_BODY_INDEX = 1 }; + + OPENVINO_RTTI_DECLARATION; + bool visit_attributes(AttributeVisitor& visitor) override; + + /// \brief Constructs If with condition + /// + /// \param execution_condition condition node. + If(const Output& execution_condition); + If(); + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + /// \brief gets then_body as ngraph::Function. + /// + /// \return then_body as ngraph::Function. + const std::shared_ptr& get_then_body() const { + return m_bodies[THEN_BODY_INDEX]; + } + + /// \brief gets else_body as ngraph::Function. + /// + /// \return else_body as ngraph::Function. + const std::shared_ptr& get_else_body() const { + return m_bodies[ELSE_BODY_INDEX]; + } + + /// \brief sets new ngraph::Function as new then_body. + /// + /// \param body new body for 'then' branch. + void set_then_body(const std::shared_ptr& body) { + m_bodies[THEN_BODY_INDEX] = body; + } + + /// \brief sets new ngraph::Function as new else_body. + /// + /// \param body new body for 'else' branch. + void set_else_body(const std::shared_ptr& body) { + m_bodies[ELSE_BODY_INDEX] = body; + } + + /// \brief sets new input to the operation associated with parameters + /// of each sub-graphs + /// + /// \param value input to operation + /// \param then_parameter parameter for then_body or nullptr + /// \param else_parameter parameter for else_body or nullpt + void set_input(const Output& value, + const std::shared_ptr& then_parameter, + const std::shared_ptr& else_parameter); + + /// \brief sets new output from the operation associated with results + /// of each sub-graphs + /// + /// \param then_result result from then_body + /// \param else_parameter result from else_body + /// \return output from operation + Output set_output(const std::shared_ptr& then_result, + const std::shared_ptr& else_result); + + void validate_and_infer_types() override; + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + + bool has_evaluate() const override; + +private: + using OutputMap = std::map>; + + void validate_and_infer_type_body(const std::shared_ptr& body, + const MultiSubgraphInputDescriptionVector& input_descriptors); + + OutputMap get_mapping_outputs_on_body_description(const MultiSubgraphOutputDescriptionVector& output_descriptors); +}; +} // namespace v8 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/interpolate.hpp b/ngraph/core/include/openvino/op/interpolate.hpp new file mode 100644 index 00000000000000..f66f1c226f1ef3 --- /dev/null +++ b/ngraph/core/include/openvino/op/interpolate.hpp @@ -0,0 +1,360 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include +#include + +#include "openvino/core/attribute_adapter.hpp" +#include "openvino/op/op.hpp" +#include "openvino/op/util/attr_types.hpp" + +namespace ov { +namespace op { +namespace v0 { + +/// \brief Layer which performs bilinear interpolation +class OPENVINO_API Interpolate : public Op { +public: + OPENVINO_RTTI_DECLARATION; + /// \brief Structure that specifies attributes for interpolation + struct Attributes { + // specify dimension indices where interpolation is applied, and `axes` is any + // unordered list of indeces of different dimensions of input tensor. Required. + AxisSet axes; + // specifies type of interpolation + // one of `nearest`, `linear`, `cubic`, `area`. Required. + std::string mode; + // a flag that specifies whether to align corners or not. + // `true` (default) means the alignment is applied, + // `false` means the alignment isn't applied. + bool align_corners = true; + // a flag that specifies whether to perform anti-aliasing. default is `false` + bool antialias = false; + // specify the number of pixels to add to the beginning of the image being + // interpolated. This addition of pixels is done before interpolation calculation. + std::vector pads_begin; + // specify the number of pixels to add to the end of the image being interpolated. + // This addition of pixels is done before interpolation calculation. + std::vector pads_end; + }; + + enum class InterpolateMode { + NEAREST, + LINEAR, + CUBIC, + AREA, + nearest OPENVINO_ENUM_DEPRECATED("Please use NEAREST instead") = NEAREST, + linear OPENVINO_ENUM_DEPRECATED("Please use LINEAR instead") = LINEAR, + cubic OPENVINO_ENUM_DEPRECATED("Please use CUBIC instead") = CUBIC, + area OPENVINO_ENUM_DEPRECATED("Please use AREA instead") = AREA + }; + + Interpolate() = default; + /// \brief Constructs a Interpolate operation + /// + /// \param image Input image + /// \param output_shape Output shape of spatial axes + /// \param attrs Interpolation attributes + Interpolate(const Output& image, const Output& output_shape, const Attributes& attrs); + bool visit_attributes(AttributeVisitor& visitor) override; + + void validate_and_infer_types() override; + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + const Attributes& get_attrs() const { + return m_attrs; + } + +private: + Attributes m_attrs; +}; +} // namespace v0 + +namespace v4 { +class OPENVINO_API Interpolate : public Op { +public: + OPENVINO_RTTI_DECLARATION; + + /// \brief Shape calculation mode + /// + /// sizes - output shape for interpolated axes is calculated using input `sizes` + /// scales - output shape for interpolated axes is calculated using input `scales` + enum class ShapeCalcMode { + SIZES, + SCALES, + sizes OPENVINO_ENUM_DEPRECATED("Please use SIZES instead") = SIZES, + scales OPENVINO_ENUM_DEPRECATED("Please use SCALES instead") = SCALES + }; + + /// \brief Interpolation mode + /// + /// nearest - nearest interpolation + /// linear - linear interpolation as in TensorFlow + /// linear_onnx - linear interpolation as in ONNX + /// cubic - cubic interpolation + enum class InterpolateMode { + NEAREST, + LINEAR, + LINEAR_ONNX, + CUBIC, + nearest OPENVINO_ENUM_DEPRECATED("Please use NEAREST instead") = NEAREST, + linear OPENVINO_ENUM_DEPRECATED("Please use LINEAR instead") = LINEAR, + linear_onnx OPENVINO_ENUM_DEPRECATED("Please use LINEAR_ONNX instead") = LINEAR_ONNX, + cubic OPENVINO_ENUM_DEPRECATED("Please use CUBIC instead") = CUBIC + }; + + /// \brief Mode of the calculation of the source coordinate from resized one + /// + /// These modes are modes from ONNX runtime. + enum class CoordinateTransformMode { + HALF_PIXEL, + PYTORCH_HALF_PIXEL, + ASYMMETRIC, + TF_HALF_PIXEL_FOR_NN, + ALIGN_CORNERS, + half_pixel OPENVINO_ENUM_DEPRECATED("Please use HALF_PIXEL instead") = HALF_PIXEL, + pytorch_half_pixel OPENVINO_ENUM_DEPRECATED("Please use PYTORCH_HALF_PIXEL instead") = PYTORCH_HALF_PIXEL, + asymmetric OPENVINO_ENUM_DEPRECATED("Please use ASYMMETRIC instead") = ASYMMETRIC, + tf_half_pixel_for_nn OPENVINO_ENUM_DEPRECATED("Please use TF_HALF_PIXEL_FOR_NN instead") = TF_HALF_PIXEL_FOR_NN, + align_corners OPENVINO_ENUM_DEPRECATED("Please use ALIGN_CORNERS instead") = ALIGN_CORNERS + }; + + /// \brief Round modes for the nearest interpolation. + enum class NearestMode { + ROUND_PREFER_FLOOR, + ROUND_PREFER_CEIL, + FLOOR, + CEIL, + SIMPLE, + round_prefer_floor OPENVINO_ENUM_DEPRECATED("Please use ROUND_PREFER_FLOOR instead") = ROUND_PREFER_FLOOR, + round_prefer_ceil OPENVINO_ENUM_DEPRECATED("Please use ROUND_PREFER_CEIL instead") = ROUND_PREFER_CEIL, + floor OPENVINO_ENUM_DEPRECATED("Please use FLOOR instead") = FLOOR, + ceil OPENVINO_ENUM_DEPRECATED("Please use CEIL instead") = CEIL, + simple OPENVINO_ENUM_DEPRECATED("Please use SIMPLE instead") = SIMPLE + }; + + struct InterpolateAttrs { + // specifies type of interpolation + // one of `nearest`, `linear`, `linear_onnx`, `cubic` Required. + InterpolateMode mode = InterpolateMode::NEAREST; + // specifies shape calculation mode + // one of `sizes`, `scales` Required + ShapeCalcMode shape_calculation_mode = ShapeCalcMode::SIZES; + // specify the number of pixels to add to the beginning of the image being + // interpolated. This addition of pixels is done before interpolation + // calculation. + std::vector pads_begin; + // specify the number of pixels to add to the end of the image being + // interpolated. This addition of pixels is done before interpolation + // calculation. + std::vector pads_end; + // specifies how to transform the coordinate in the resized tensor to the + // coordinate in the original tensor. one of `half_pixel`, `pytorch_half_pixel`, + // `asymmetric`, `tf_half_pixel_for_nn`, `align_corners` + CoordinateTransformMode coordinate_transformation_mode = CoordinateTransformMode::HALF_PIXEL; + // specifies round mode when `mode == nearest` and is used only when `mode == + // nearest`. one of `round_prefer_floor`, `round_prefer_ceil`, `floor`, `ceil`, + // `simple` + NearestMode nearest_mode = NearestMode::ROUND_PREFER_FLOOR; + // a flag that specifies whether to perform anti-aliasing. default is `false` + bool antialias = false; + // specifies the parameter *a* for cubic interpolation (see, e.g. + // [article](https://ieeexplore.ieee.org/document/1163711/)). *cube_coeff* is + // used only when `mode == cubic` + double cube_coeff = -0.75f; + + InterpolateAttrs() = default; + + InterpolateAttrs(InterpolateMode mode, + ShapeCalcMode shape_calculation_mode, + const std::vector& pads_begin, + const std::vector& pads_end, + CoordinateTransformMode coordinate_transformation_mode = CoordinateTransformMode::HALF_PIXEL, + NearestMode nearest_mode = NearestMode::ROUND_PREFER_FLOOR, + bool antialias = false, + double cube_coeff = -0.75) + : mode(mode), + shape_calculation_mode(shape_calculation_mode), + pads_begin(pads_begin), + pads_end(pads_end), + coordinate_transformation_mode(coordinate_transformation_mode), + nearest_mode(nearest_mode), + antialias(antialias), + cube_coeff(cube_coeff) {} + }; + + Interpolate() = default; + /// \brief Constructs a Interpolate operation without 'axes' input. + /// + /// \param image Input image + /// \param output_shape Output shape of spatial axes + /// \param scales Scales of spatial axes, i.e. output_shape / input_shape + /// \param attrs Interpolation attributes + Interpolate(const Output& image, + const Output& output_shape, + const Output& scales, + const InterpolateAttrs& attrs); + + /// \brief Constructs a Interpolate operation with 'axes' input. + /// + /// \param image Input image + /// \param output_shape Output shape of spatial axes + /// \param scales Scales of spatial axes, i.e. output_shape / input_shape + /// \param axes Interpolation axes + /// \param attrs Interpolation attributes + Interpolate(const Output& image, + const Output& output_shape, + const Output& scales, + const Output& axes, + const InterpolateAttrs& attrs); + bool visit_attributes(AttributeVisitor& visitor) override; + + void validate_and_infer_types() override; + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; + + const InterpolateAttrs& get_attrs() const { + return m_attrs; + } + +protected: + /// \return The interpolation axes. + std::vector get_axes() const; + +private: + bool evaluate_interpolate(const HostTensorVector& outputs, const HostTensorVector& inputs) const; + InterpolateAttrs m_attrs; + + /// \brief Corrects pads_begin and pads_end attributes. + /// + /// \details When Interpolate-4 is a result of some transformation, it is possible + /// that pads_begin.size() != pads_end.size() or + /// pads_begin.size() != input_rank. In such case, we should correct + /// pads_begin and pads_end, using padding of pads_begin and pads_end by + /// zeros or using pads_begin[0 : input_rank], pads_end[0 : input_rank]. + /// + /// Padding of pads_begin is performed when pads_begin.size() < input_rank, + /// and pads_begin[0 : input_rank] is used when + /// pads_begin.size() < input_rank. + /// + /// Similarly for pads_end. + void correct_pads(); + + /// \brief Calculates input shape after padding. + /// + /// \param input_shape Shape of input data. + /// + /// \return Padded input shape, i.e. input_shape + pads_begin + pads_end + PartialShape get_padded_input_shape(const PartialShape& input_shape) const; + + /// \brief Infers output shape using scales. + /// + /// \param output_shape[in,out] output shape + /// \param axes Interpolation axes + /// \param scales Scales for interpolated axes + /// \param padded_input_shape input shape after padding + void infer_using_scales(PartialShape& output_shape, + const std::vector& axes, + const std::vector& scales, + const PartialShape& padded_input_shape) const; + + /// \brief Infers output shape using sizes. + /// + /// \param output_shape[in,out] output shape + /// \param axes Interpolation axes + /// \param sizes sizes for interpolated axes + void infer_using_shapes(PartialShape& output_shape, + const std::vector& axes, + const std::vector& sizes) const; +}; +} // namespace v4 +} // namespace op + +//---------------------------------------- v0 -------------------------------------------------- +OPENVINO_API +std::ostream& operator<<(std::ostream& s, const op::v0::Interpolate::InterpolateMode& type); + +//---------------------------------------- v4 -------------------------------------------------- + +OPENVINO_API +std::ostream& operator<<(std::ostream& s, const op::v4::Interpolate::InterpolateMode& type); + +OPENVINO_API +std::ostream& operator<<(std::ostream& s, const op::v4::Interpolate::CoordinateTransformMode& type); + +OPENVINO_API +std::ostream& operator<<(std::ostream& s, const op::v4::Interpolate::NearestMode& type); + +OPENVINO_API +std::ostream& operator<<(std::ostream& s, const op::v4::Interpolate::ShapeCalcMode& type); + +template <> +class OPENVINO_API AttributeAdapter + : public EnumAttributeAdapterBase { +public: + AttributeAdapter(op::v0::Interpolate::InterpolateMode& value) + : EnumAttributeAdapterBase(value) {} + + static constexpr DiscreteTypeInfo type_info{"AttributeAdapter", 0}; + const DiscreteTypeInfo& get_type_info() const override { + return type_info; + } +}; +template <> +class OPENVINO_API AttributeAdapter + : public EnumAttributeAdapterBase { +public: + AttributeAdapter(op::v4::Interpolate::InterpolateMode& value) + : EnumAttributeAdapterBase(value) {} + + static constexpr DiscreteTypeInfo type_info{"AttributeAdapter", 4}; + const DiscreteTypeInfo& get_type_info() const override { + return type_info; + } +}; + +template <> +class OPENVINO_API AttributeAdapter + : public EnumAttributeAdapterBase { +public: + AttributeAdapter(op::v4::Interpolate::CoordinateTransformMode& value) + : EnumAttributeAdapterBase(value) {} + + static constexpr DiscreteTypeInfo type_info{"AttributeAdapter", 4}; + const DiscreteTypeInfo& get_type_info() const override { + return type_info; + } +}; + +template <> +class OPENVINO_API AttributeAdapter + : public EnumAttributeAdapterBase { +public: + AttributeAdapter(op::v4::Interpolate::NearestMode& value) + : EnumAttributeAdapterBase(value) {} + + static constexpr DiscreteTypeInfo type_info{"AttributeAdapter", 4}; + const DiscreteTypeInfo& get_type_info() const override { + return type_info; + } +}; + +template <> +class OPENVINO_API AttributeAdapter + : public EnumAttributeAdapterBase { +public: + AttributeAdapter(op::v4::Interpolate::ShapeCalcMode& value) + : EnumAttributeAdapterBase(value) {} + + static constexpr DiscreteTypeInfo type_info{"AttributeAdapter", 4}; + const DiscreteTypeInfo& get_type_info() const override { + return type_info; + } +}; +} // namespace ov diff --git a/ngraph/core/include/openvino/op/less.hpp b/ngraph/core/include/openvino/op/less.hpp new file mode 100644 index 00000000000000..19f8919216a1a2 --- /dev/null +++ b/ngraph/core/include/openvino/op/less.hpp @@ -0,0 +1,33 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/util/binary_elementwise_comparison.hpp" + +namespace ov { +namespace op { +namespace v1 { +/// \brief Elementwise less-than operation. +class OPENVINO_API Less : public util::BinaryElementwiseComparison { +public: + OPENVINO_RTTI_DECLARATION; + /// \brief Constructs a less-than operation. + Less() : util::BinaryElementwiseComparison(AutoBroadcastSpec::NUMPY) {} + /// \brief Constructs a less-than operation. + /// + /// \param arg0 Node that produces the first input tensor. + /// \param arg1 Node that produces the second input tensor. + /// \param auto_broadcast Auto broadcast specification + Less(const Output& arg0, + const Output& arg1, + const AutoBroadcastSpec& auto_broadcast = AutoBroadcastSpec(AutoBroadcastType::NUMPY)); + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; +}; +} // namespace v1 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/less_eq.hpp b/ngraph/core/include/openvino/op/less_eq.hpp new file mode 100644 index 00000000000000..b18c84bef4f3c7 --- /dev/null +++ b/ngraph/core/include/openvino/op/less_eq.hpp @@ -0,0 +1,34 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/util/binary_elementwise_comparison.hpp" + +namespace ov { +namespace op { +namespace v1 { +/// \brief Elementwise less-than-or-equal operation. +class OPENVINO_API LessEqual : public util::BinaryElementwiseComparison { +public: + OPENVINO_RTTI_DECLARATION; + /// \brief Constructs a less-than-or-equal operation. + LessEqual() : util::BinaryElementwiseComparison(AutoBroadcastSpec::NUMPY) {} + + /// \brief Constructs a less-than-or-equal operation. + /// + /// \param arg0 Node that produces the first input tensor. + /// \param arg1 Node that produces the second input tensor. + /// \param auto_broadcast Auto broadcast specification + LessEqual(const Output& arg0, + const Output& arg1, + const AutoBroadcastSpec& auto_broadcast = AutoBroadcastSpec(AutoBroadcastType::NUMPY)); + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; +}; +} // namespace v1 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/log.hpp b/ngraph/core/include/openvino/op/log.hpp new file mode 100644 index 00000000000000..da3ff95949b91a --- /dev/null +++ b/ngraph/core/include/openvino/op/log.hpp @@ -0,0 +1,30 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "ngraph/op/util/unary_elementwise_arithmetic.hpp" + +namespace ov { +namespace op { +namespace v0 { +/// \brief Elementwise natural log operation. +class OPENVINO_API Log : public util::UnaryElementwiseArithmetic { +public: + OPENVINO_RTTI_DECLARATION; + /// \brief Constructs a natural log operation. + Log() = default; + /// \brief Constructs a natural log operation. + /// + /// \param arg Node that produces the input tensor. + Log(const Output& arg); + + bool visit_attributes(AttributeVisitor& visitor) override; + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; +}; +} // namespace v0 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/log_softmax.hpp b/ngraph/core/include/openvino/op/log_softmax.hpp new file mode 100644 index 00000000000000..b737d0815414f6 --- /dev/null +++ b/ngraph/core/include/openvino/op/log_softmax.hpp @@ -0,0 +1,43 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "ngraph/op/op.hpp" + +namespace ov { +namespace op { +namespace v5 { +class OPENVINO_API LogSoftmax : public Op { +public: + OPENVINO_RTTI_DECLARATION; + LogSoftmax() = default; + /// \brief Constructs a LogSoftmax operation. + /// + /// \param arg Node that produces the first input tensor.
+ /// `[d0, ...]` + /// \param axis The axis position (0-based) on which to calculate the LogSoftmax. + /// + /// Output `[d0, ...]` + /// + LogSoftmax(const Output& arg, const int64_t axis); + + bool visit_attributes(AttributeVisitor& visitor) override; + void validate_and_infer_types() override; + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + int64_t get_axis() const { + return m_axis; + } + void set_axis(const int64_t axis) { + m_axis = axis; + } + +private: + int64_t m_axis = 1; +}; +} // namespace v5 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/loop.hpp b/ngraph/core/include/openvino/op/loop.hpp new file mode 100644 index 00000000000000..c19c53a7b2cb15 --- /dev/null +++ b/ngraph/core/include/openvino/op/loop.hpp @@ -0,0 +1,90 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include + +#include "openvino/core/function.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/parameter.hpp" +#include "openvino/op/util/sub_graph_base.hpp" + +namespace ov { +namespace op { +namespace v5 { +/// \brief Iterate a body over tensors, accumulating into tensors. +class NGRAPH_API Loop : public op::util::SubGraphOp { +public: + /// \brief Allows to define the purpose of inputs/outputs in the body + struct SpecialBodyPorts { + SpecialBodyPorts() = default; + SpecialBodyPorts(int64_t in_current_iteration_input_idx, int64_t in_body_condition_output_idx) + : current_iteration_input_idx(in_current_iteration_input_idx), + body_condition_output_idx(in_body_condition_output_idx) {} + // -1 means the input is not provided, this input is optional + int64_t current_iteration_input_idx = -1; + // -1 means the output is not provided, + // this output is required, throw an exception if not provided + int64_t body_condition_output_idx = -1; + }; + + NGRAPH_RTTI_DECLARATION; + + /// \brief Constructs a Loop operation. + Loop() = default; + + /// \brief Constructs a Loop operation. + /// + /// \param trip_count Node specifies the maximum number of iterations. + /// \param execution_condition Node determines whether to execute the first + /// iteration or not. + Loop(const Output& trip_count, const Output& execution_condition); + + Output get_concatenated_slices(const Output& value, + int64_t start, + int64_t stride, + int64_t part_size, + int64_t end, + int64_t axis) override; + + void set_special_body_ports(const SpecialBodyPorts& special_body_ports) { + m_special_body_ports = special_body_ports; + } + + SpecialBodyPorts get_special_body_ports() const { + return m_special_body_ports; + } + void validate_and_infer_types() override; + bool visit_attributes(AttributeVisitor& visitor) override; + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; + +protected: + Loop(const Loop&); + +private: + void clone_to(Loop& dst, const OutputVector& new_args) const; + + SpecialBodyPorts m_special_body_ports; +}; +} // namespace v5 +} // namespace op + +template <> +class NGRAPH_API AttributeAdapter + : public DirectValueAccessor { +public: + AttributeAdapter(op::v5::Loop::SpecialBodyPorts& value) + : DirectValueAccessor(value) {} + + static constexpr DiscreteTypeInfo type_info{"AttributeAdapter", 0}; + const DiscreteTypeInfo& get_type_info() const override { + return type_info; + } +}; + +} // namespace ov diff --git a/ngraph/core/include/openvino/op/lrn.hpp b/ngraph/core/include/openvino/op/lrn.hpp new file mode 100644 index 00000000000000..adf837cbd49b17 --- /dev/null +++ b/ngraph/core/include/openvino/op/lrn.hpp @@ -0,0 +1,78 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "ngraph/op/op.hpp" + +namespace ov { +namespace op { +namespace v0 { +// clang-format off +/// \brief Elementwise Local Response Normalization (LRN) operation. +/// +/// ## Inputs +/// +/// | | Type | Description | +/// | ----- | --------------------------------------- | ----------------------------------------------- | +/// | `arg` | \f$N[n, c, d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of any shape and numeric element type. | +/// +/// ## Output +/// +/// | Type | Description | +/// | ---------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +/// | \f$N[n, c, d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[n, c, d_1,\dots,d_n] = \frac{N[n,i,d_1,\dots,d_n]}{ (bias + alpha * (\sum_{i=max(0,(nsize-1)/2)}^{min(C, (nsize-1)/2)+1} N[n,i,d_1,\dots,d_n]^{2}) ^ {2})}\f$ | +// clang-format on +class NGRAPH_API LRN : public Op { +public: + NGRAPH_RTTI_DECLARATION; + + /// \brief Constructs a LRN operation. + LRN() = default; + /// \brief Constructs a LRN operation. + /// + /// \param arg Node that produces the input tensor. + LRN(const Output& arg, double alpha, double beta, double bias, size_t size); + + LRN(const Output& arg, const Output& axes, double alpha, double beta, double bias, size_t size); + + bool visit_attributes(AttributeVisitor& visitor) override; + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + void validate_and_infer_types() override; + + double get_alpha() const { + return m_alpha; + } + void set_alpha(double alpha) { + m_alpha = alpha; + } + double get_beta() const { + return m_beta; + } + void set_beta(double beta) { + m_beta = beta; + } + double get_bias() const { + return m_bias; + } + void set_bias(double bias) { + m_bias = bias; + } + size_t get_nsize() const { + return m_size; + } + void set_nsize(size_t size) { + m_size = size; + } + AxisSet get_reduction_axes() const; + +protected: + double m_alpha; + double m_beta; + double m_bias; + size_t m_size; +}; +} // namespace v0 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/lstm_cell.hpp b/ngraph/core/include/openvino/op/lstm_cell.hpp new file mode 100644 index 00000000000000..77946d2f0fc7d6 --- /dev/null +++ b/ngraph/core/include/openvino/op/lstm_cell.hpp @@ -0,0 +1,397 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include +#include +#include +#include + +#include "openvino/op/op.hpp" +#include "openvino/op/util/activation_functions.hpp" +#include "openvino/op/util/rnn_cell_base.hpp" + +namespace ov { +namespace op { +enum class LSTMWeightsFormat { + FICO, // IE + ICOF, // PyTorch + IFCO, // DNNL, TF, MxNet + IFOC, // Caffe + IOFC, // ONNX +}; + +namespace v0 { +/// +/// \brief Class for single lstm cell node. +/// +/// \note Following implementation supports: +/// \li \c peepholes Gers & Schmidhuber (2000) +/// https://ieeexplore.ieee.org/document/861302 +/// \li Coupling input and forget gates. +/// +/// \note It calculates following equations: +/// +/// it = f(Xt*(Wi^T) + Ht-1*(Ri^T) + Pi (.) Ct-1 + Wbi + Rbi) +/// ft = f(Xt*(Wf^T) + Ht-1*(Rf^T) + Pf (.) Ct-1 + Wbf + Rbf) +/// ct = g(Xt*(Wc^T) + Ht-1*(Rc^T) + Wbc + Rbc) +/// Ct = ft (.) Ct-1 + it (.) ct +/// ot = f(Xt*(Wo^T) + Ht-1*(Ro^T) + Po (.) Ct + Wbo + Rbo) +/// Ht = ot (.) h(Ct) +/// +/// * - Is a dot product, +/// (.) - is a Hadamard product (element-wise), +/// f, g, h - are activation functions. +/// +/// \note This class represents only single *cell* (for current time step) and not +/// the whole LSTM Sequence layer +/// +/// \sa LSTMSequence, RNNCell, GRUCell +/// +class OPENVINO_API LSTMCell : public util::RNNCellBase { +public: + OPENVINO_RTTI_DECLARATION; + + LSTMCell(); + /// + /// \brief Constructs LSTMCell node. + /// + /// \param[in] X The input tensor with shape: [batch_size, + /// input_size]. + /// \param[in] initial_hidden_state The hidden state tensor at current time step + /// with shape: [batch_size, hidden_size]. + /// \param[in] initial_cell_state The cell state tensor at current time step + /// with shape: [batch_size, hidden_size]. + /// \param[in] W The gate weights tensor with shape: + /// [4*hidden_size, input_size]. + /// \param[in] R The recurrence weights tensor with shape: + /// [4*hidden_size, hidden_size]. + /// \param[in] hidden_size The number of hidden units for recurrent cell. + /// \param[in] weights_format The order of gates in weights tensors. The + /// default format is IFCO since it is used by + /// DNNL. + /// \param[in] activations The vector of activation functions used inside + /// recurrent cell. + /// \param[in] activations_alpha The vector of alpha parameters for activation + /// functions in order respective to activation + /// list. + /// \param[in] activations_beta The vector of beta parameters for activation + /// functions in order respective to activation + /// list. + /// \param[in] clip The value defining clipping range [-clip, + /// clip] on input of activation functions. + /// \param[in] input_forget Controls coupling input and forget gates. + /// + LSTMCell(const Output& X, + const Output& initial_hidden_state, + const Output& initial_cell_state, + const Output& W, + const Output& R, + std::size_t hidden_size, + LSTMWeightsFormat weights_format = LSTMWeightsFormat::IFCO, + const std::vector& activations = std::vector{"sigmoid", "tanh", "tanh"}, + const std::vector& activations_alpha = {}, + const std::vector& activations_beta = {}, + float clip = 0.f, + bool input_forget = false); + + /// + /// \brief Constructs LSTMCell node. + /// + /// \param[in] X The input tensor with shape: [batch_size, + /// input_size]. + /// \param[in] initial_hidden_state The hidden state tensor at current time step + /// with shape: [batch_size, hidden_size]. + /// \param[in] initial_cell_state The cell state tensor at current time step + /// with shape: [batch_size, hidden_size]. + /// \param[in] W The weight tensor with shape: [4*hidden_size, + /// input_size]. + /// \param[in] R The recurrence weight tensor with shape: + /// [4*hidden_size, hidden_size]. + /// \param[in] B The bias tensor for gates with shape: + /// [4*hidden_size]. + /// \param[in] hidden_size The number of hidden units for recurrent cell. + /// \param[in] weights_format The order of gates in weights tensors. The + /// default format is IFCO since it is used by + /// DNNL. + /// \param[in] activations The vector of activation functions used inside + /// recurrent cell. + /// \param[in] activations_alpha The vector of alpha parameters for activation + /// functions in order respective to activation + /// list. + /// \param[in] activations_beta The vector of beta parameters for activation + /// functions in order respective to activation + /// list. + /// \param[in] clip The value defining clipping range [-clip, + /// clip] on input of activation functions. + /// \param[in] input_forget Controls coupling input and forget gates. + /// + LSTMCell(const Output& X, + const Output& initial_hidden_state, + const Output& initial_cell_state, + const Output& W, + const Output& R, + const Output& B, + std::size_t hidden_size, + LSTMWeightsFormat weights_format = LSTMWeightsFormat::IFCO, + const std::vector& activations = std::vector{"sigmoid", "tanh", "tanh"}, + const std::vector& activations_alpha = {}, + const std::vector& activations_beta = {}, + float clip = 0.f, + bool input_forget = false); + + /// + /// \brief Constructs LSTMCell node. + /// + /// \param[in] X The input tensor with shape: [batch_size, + /// input_size]. + /// \param[in] initial_hidden_state The hidden state tensor at current time step + /// with shape: [batch_size, hidden_size]. + /// \param[in] initial_cell_state The cell state tensor at current time step + /// with shape: [batch_size, hidden_size]. + /// \param[in] W The weight tensor with shape: [4*hidden_size, + /// input_size]. + /// \param[in] R The recurrence weight tensor with shape: + /// [4*hidden_size, hidden_size]. + /// \param[in] B The bias tensor for gates with shape: + /// [4*hidden_size]. + /// \param[in] P The weight tensor for peepholes with shape: + /// [3*hidden_size] - 3 equals to only iof gates. + /// The order is: input, output, forget gates. + /// \param[in] hidden_size The number of hidden units for recurrent cell. + /// \param[in] weights_format The order of gates in weights tensors. The + /// default format is IFCO since it is used by + /// DNNL. + /// \param[in] activations The vector of activation functions used inside + /// recurrent cell. + /// \param[in] activations_alpha The vector of alpha parameters for activation + /// functions in order respective to activation + /// list. + /// \param[in] activations_beta The vector of beta parameters for activation + /// functions in order respective to activation + /// list. + /// \param[in] clip The value defining clipping range [-clip, + /// clip] on input of activation functions. + /// \param[in] input_forget Controls coupling input and forget gates. + /// + LSTMCell(const Output& X, + const Output& initial_hidden_state, + const Output& initial_cell_state, + const Output& W, + const Output& R, + const Output& B, + const Output& P, + std::size_t hidden_size, + LSTMWeightsFormat weights_format = LSTMWeightsFormat::IFCO, + const std::vector& activations = std::vector{"sigmoid", "tanh", "tanh"}, + const std::vector& activations_alpha = {}, + const std::vector& activations_beta = {}, + float clip = 0.f, + bool input_forget = false); + + void validate_and_infer_types() override; + bool visit_attributes(AttributeVisitor& visitor) override; + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + bool get_input_forget() const { + return m_input_forget; + } + LSTMWeightsFormat get_weights_format() const { + return m_weights_format; + } + +private: + /// + /// \brief Creates the default bias input initialized with zeros. + /// + /// \return The object of Output class. + /// + Output get_default_bias_input() const; + + /// + /// \brief Creates the default peepholes input initialized with zeros. + /// + /// \return The object of Output class. + /// + Output get_default_peepholes_input() const; + /// + /// \brief The Activation function f. + /// + util::ActivationFunction m_activation_f; + /// + /// \brief The Activation function g. + /// + util::ActivationFunction m_activation_g; + /// + /// \brief The Activation function h. + /// + util::ActivationFunction m_activation_h; + /// + /// \brief Controls whether to couple input and forget gates. + /// + bool m_input_forget = false; + + /// + /// \brief The order of gates in weights tensors. + /// + LSTMWeightsFormat m_weights_format; + + static constexpr std::size_t s_gates_count{4}; + static constexpr std::size_t s_peepholes_count{3}; +}; +} // namespace v0 + +namespace v4 { +/// +/// \brief Class for single lstm cell node. +/// +/// \note Following implementation supports: +/// \li \c peepholes Gers & Schmidhuber (2000) +/// https://ieeexplore.ieee.org/document/861302 +/// \li Coupling input and forget gates. +/// +/// \note It calculates following equations: +/// +/// it = f(Xt*(Wi^T) + Ht-1*(Ri^T) + Wbi + Rbi) +/// ft = f(Xt*(Wf^T) + Ht-1*(Rf^T) + Wbf + Rbf) +/// ct = g(Xt*(Wc^T) + Ht-1*(Rc^T) + Wbc + Rbc) +/// Ct = ft (.) Ct-1 + it (.) ct +/// ot = f(Xt*(Wo^T) + Ht-1*(Ro^T) + Wbo + Rbo) +/// Ht = ot (.) h(Ct) +/// +/// * - Is a dot product, +/// (.) - is a Hadamard product (element-wise), +/// f, g, h - are activation functions. +/// +/// \note This class represents only single *cell* (for current time step) and not +/// the whole LSTM Sequence layer +/// +/// \sa LSTMSequence, RNNCell, GRUCell +/// +class OPENVINO_API LSTMCell : public util::RNNCellBase { +public: + OPENVINO_RTTI_DECLARATION; + + LSTMCell(); + /// + /// \brief Constructs LSTMCell node. + /// + /// \param[in] X The input tensor with shape: [batch_size, + /// input_size]. + /// \param[in] initial_hidden_state The hidden state tensor at current time step + /// with shape: [batch_size, hidden_size]. + /// \param[in] initial_cell_state The cell state tensor at current time step + /// with shape: [batch_size, hidden_size]. + /// \param[in] W The gate weights tensor with shape: + /// [4*hidden_size, input_size]. + /// \param[in] R The recurrence weights tensor with shape: + /// [4*hidden_size, hidden_size]. + /// \param[in] hidden_size The number of hidden units for recurrent cell. + /// \param[in] activations The vector of activation functions used inside + /// recurrent cell. + /// \param[in] activations_alpha The vector of alpha parameters for activation + /// functions in order respective to activation + /// list. + /// \param[in] activations_beta The vector of beta parameters for activation + /// functions in order respective to activation + /// list. + /// \param[in] clip The value defining clipping range [-clip, + /// clip] on input of activation functions. + LSTMCell(const Output& X, + const Output& initial_hidden_state, + const Output& initial_cell_state, + const Output& W, + const Output& R, + std::size_t hidden_size, + const std::vector& activations = std::vector{"sigmoid", "tanh", "tanh"}, + const std::vector& activations_alpha = {}, + const std::vector& activations_beta = {}, + float clip = 0.f); + + /// + /// \brief Constructs LSTMCell node. + /// + /// \param[in] X The input tensor with shape: [batch_size, + /// input_size]. + /// \param[in] initial_hidden_state The hidden state tensor at current time step + /// with shape: [batch_size, hidden_size]. + /// \param[in] initial_cell_state The cell state tensor at current time step + /// with shape: [batch_size, hidden_size]. + /// \param[in] W The weight tensor with shape: [4*hidden_size, + /// input_size]. + /// \param[in] R The recurrence weight tensor with shape: + /// [4*hidden_size, hidden_size]. + /// \param[in] B The bias tensor for gates with shape: + /// [4*hidden_size]. + /// \param[in] hidden_size The number of hidden units for recurrent cell. + /// \param[in] activations The vector of activation functions used inside + /// recurrent cell. + /// \param[in] activations_alpha The vector of alpha parameters for activation + /// functions in order respective to activation + /// list. + /// \param[in] activations_beta The vector of beta parameters for activation + /// functions in order respective to activation + /// list. + /// \param[in] clip The value defining clipping range [-clip, + /// clip] on input of activation functions. + /// + LSTMCell(const Output& X, + const Output& initial_hidden_state, + const Output& initial_cell_state, + const Output& W, + const Output& R, + const Output& B, + std::size_t hidden_size, + const std::vector& activations = std::vector{"sigmoid", "tanh", "tanh"}, + const std::vector& activations_alpha = {}, + const std::vector& activations_beta = {}, + float clip = 0.f); + + void validate_and_infer_types() override; + + bool visit_attributes(AttributeVisitor& visitor) override; + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + +private: + /// + /// \brief Creates the default bias input initialized with zeros. + /// + /// \return The object of Output class. + /// + Output get_default_bias_input() const; + + /// + /// \brief The Activation function f. + /// + util::ActivationFunction m_activation_f; + /// + /// \brief The Activation function g. + /// + util::ActivationFunction m_activation_g; + /// + /// \brief The Activation function h. + /// + util::ActivationFunction m_activation_h; + + static constexpr std::size_t s_gates_count{4}; +}; +} // namespace v4 +} // namespace op + +OPENVINO_API +std::ostream& operator<<(std::ostream& s, const op::LSTMWeightsFormat& type); + +template <> +class OPENVINO_API AttributeAdapter : public EnumAttributeAdapterBase { +public: + AttributeAdapter(op::LSTMWeightsFormat& value) : EnumAttributeAdapterBase(value) {} + + static constexpr DiscreteTypeInfo type_info{"AttributeAdapter", 1}; + const DiscreteTypeInfo& get_type_info() const override { + return type_info; + } +}; + +} // namespace ov diff --git a/ngraph/core/include/openvino/op/lstm_sequence.hpp b/ngraph/core/include/openvino/op/lstm_sequence.hpp new file mode 100644 index 00000000000000..a89f425a052a15 --- /dev/null +++ b/ngraph/core/include/openvino/op/lstm_sequence.hpp @@ -0,0 +1,196 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include +#include +#include +#include +#include + +#include "openvino/op/constant.hpp" +#include "openvino/op/lstm_cell.hpp" +#include "openvino/op/util/attr_types.hpp" +#include "openvino/op/util/rnn_cell_base.hpp" + +namespace ov { +namespace op { +namespace v0 { + +/// +/// \brief Class for lstm sequence node. +/// +/// \note It follows notation and equations defined as in ONNX standard: +/// https://github.com/onnx/onnx/blob/master/docs/Operators.md#LSTM +/// +/// \sa LSTMCell, RNNCell, GRUCell +/// +/// +class NGRAPH_API LSTMSequence : public Op { +public: + NGRAPH_RTTI_DECLARATION; + LSTMSequence(); + + using direction = RecurrentSequenceDirection; + + size_t get_default_output_index() const override { + return no_default_index(); + } + explicit LSTMSequence(const Output& X, + const Output& initial_hidden_state, + const Output& initial_cell_state, + const Output& sequence_lengths, + const Output& W, + const Output& R, + const Output& B, + const Output& P, + const std::int64_t hidden_size, + const direction lstm_direction, + LSTMWeightsFormat weights_format = LSTMWeightsFormat::IFCO, + const std::vector activations_alpha = {}, + const std::vector activations_beta = {}, + const std::vector activations = {"sigmoid", "tanh", "tanh"}, + const float clip_threshold = 0, + const bool input_forget = false); + + explicit LSTMSequence(const Output& X, + const Output& initial_hidden_state, + const Output& initial_cell_state, + const Output& sequence_lengths, + const Output& W, + const Output& R, + const Output& B, + const std::int64_t hidden_size, + const direction lstm_direction, + LSTMWeightsFormat weights_format = LSTMWeightsFormat::IFCO, + const std::vector& activations_alpha = {}, + const std::vector& activations_beta = {}, + const std::vector& activations = {"sigmoid", "tanh", "tanh"}, + const float clip_threshold = 0, + const bool input_forget = false); + + void validate_and_infer_types() override; + bool visit_attributes(AttributeVisitor& visitor) override; + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + std::vector get_activations_alpha() const { + return m_activations_alpha; + } + std::vector get_activations_beta() const { + return m_activations_beta; + } + std::vector get_activations() const { + return m_activations; + } + float get_clip_threshold() const { + return m_clip_threshold; + } + direction get_direction() const { + return m_direction; + } + std::int64_t get_hidden_size() const { + return m_hidden_size; + } + bool get_input_forget() const { + return m_input_forget; + } + LSTMWeightsFormat get_weights_format() const { + return m_weights_format; + } + +private: + /// + /// \brief Gets the masked value according to sequence length in a batch. + /// + /// \note Zeros out values or sets them to default value for inputs with + /// sequence length shorter than currently procssed time step. + /// + /// \param[in] data The input value. + /// \param[in] time_step The current time step denoting sequence length. + /// \param[in] batch_axis The batch axis index of data tensor. + /// \param[in] default_value The default value for masked elements. + /// + /// \return The masked value. + /// + std::shared_ptr get_masked_node(const Output& data, + std::int32_t time_step, + std::size_t batch_axis = 0, + const Output& default_value = Output()) const; + + OutputVector lstm_pass(bool is_reverse = false) const; + + // Split(bi-directional) and squeeze input data to remove 'num_direction' dimension. + std::shared_ptr prepare_input(Output node, bool is_reverse, size_t num_direction_axis = 0) const; + + std::vector m_activations_alpha; + std::vector m_activations_beta; + std::vector m_activations; + float m_clip_threshold; + direction m_direction; + std::int64_t m_hidden_size; + bool m_input_forget; + LSTMWeightsFormat m_weights_format; +}; +} // namespace v0 + +namespace v5 { +/// +/// \brief Class for lstm sequence node. +/// +/// \note It follows notation and equations defined as in ONNX standard: +/// https://github.com/onnx/onnx/blob/master/docs/Operators.md#LSTM +/// +/// \sa LSTMCell, RNNCell, GRUCell +/// +/// +class NGRAPH_API LSTMSequence : public util::RNNCellBase { +public: + NGRAPH_RTTI_DECLARATION; + LSTMSequence() = default; + + using direction = RecurrentSequenceDirection; + + size_t get_default_output_index() const override { + return no_default_index(); + } + explicit LSTMSequence(const Output& X, + const Output& initial_hidden_state, + const Output& initial_cell_state, + const Output& sequence_lengths, + const Output& W, + const Output& R, + const Output& B, + const std::int64_t hidden_size, + const direction lstm_direction, + const std::vector& activations_alpha = {}, + const std::vector& activations_beta = {}, + const std::vector& activations = {"sigmoid", "tanh", "tanh"}, + const float clip = 0.f) + : RNNCellBase({X, initial_hidden_state, initial_cell_state, sequence_lengths, W, R, B}, + hidden_size, + clip, + activations, + activations_alpha, + activations_beta), + m_direction(lstm_direction) { + constructor_validate_and_infer_types(); + } + + void validate_and_infer_types() override; + bool visit_attributes(AttributeVisitor& visitor) override; + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + direction get_direction() const { + return m_direction; + } + +private: + direction m_direction; +}; +} // namespace v5 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/parameter.hpp b/ngraph/core/include/openvino/op/parameter.hpp new file mode 100644 index 00000000000000..7878f582927616 --- /dev/null +++ b/ngraph/core/include/openvino/op/parameter.hpp @@ -0,0 +1,78 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/op.hpp" + +namespace ov { +namespace op { +namespace v0 { +/// \brief A function parameter. +/// +/// Parameters are nodes that represent the arguments that will be passed to +/// user-defined functions. Function creation requires a sequence of parameters. +/// Basic graph operations do not need parameters attached to a function. +class OPENVINO_API Parameter : public op::Op { +public: + OPENVINO_RTTI_DECLARATION; + /// \brief Constructions a tensor-typed parameter node. + Parameter() = default; + /// \brief Constructions a tensor-typed parameter node. + /// + /// \param element_type The element type of the parameter. + /// \param pshape The partial shape of the parameter. + Parameter(const ngraph::element::Type& element_type, const PartialShape& pshape); + + bool visit_attributes(AttributeVisitor& visitor) override; + + void validate_and_infer_types() override; + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + bool is_relevant_to_shapes() const; + void set_is_relevant_to_shapes(bool is_relevant); + + const PartialShape& get_partial_shape() const { + return m_partial_shape; + } + PartialShape& get_partial_shape() { + return m_partial_shape; + } + void set_partial_shape(const PartialShape& partial_shape) { + m_partial_shape = partial_shape; + } + const element::Type& get_element_type() const { + return m_element_type; + } + void set_element_type(const element::Type& element_type) { + m_element_type = element_type; + } + +protected: + PartialShape m_partial_shape; + element::Type m_element_type; + bool m_is_relevant_to_shapes{false}; +}; +} // namespace v0 +} // namespace op +using ParameterVector = std::vector>; + +template <> +class OPENVINO_API AttributeAdapter : public VisitorAdapter { +public: + AttributeAdapter(ParameterVector& ref); + + bool visit_attributes(AttributeVisitor& visitor) override; + + static constexpr DiscreteTypeInfo type_info{"AttributeAdapter", 0}; + const DiscreteTypeInfo& get_type_info() const override { + return type_info; + } + +protected: + ParameterVector& m_ref; +}; + +} // namespace ov diff --git a/ngraph/core/include/openvino/op/result.hpp b/ngraph/core/include/openvino/op/result.hpp new file mode 100644 index 00000000000000..12f6d92510aa54 --- /dev/null +++ b/ngraph/core/include/openvino/op/result.hpp @@ -0,0 +1,61 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/op.hpp" + +namespace ov { +namespace op { +namespace v0 { +class OPENVINO_API Result : public Op { +public: + OPENVINO_RTTI_DECLARATION; + + /// \brief Allows a value to be used as a function result. + Result() = default; + /// \brief Allows a value to be used as a function result. + /// + /// \param arg Node that produces the input tensor. + Result(const Output& arg, bool needs_default_layout = false); + + bool visit_attributes(AttributeVisitor& visitor) override; + void validate_and_infer_types() override; + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + void set_needs_default_layout(bool val) { + m_needs_default_layout = val; + } + bool needs_default_layout() const { + return m_needs_default_layout; + } + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; + bool constant_fold(OutputVector& output_values, const OutputVector& inputs_values) override; + +private: + bool m_needs_default_layout{false}; +}; +} // namespace v0 +} // namespace op +using ResultVector = std::vector>; + +template <> +class OPENVINO_API AttributeAdapter : public VisitorAdapter { +public: + AttributeAdapter(ResultVector& ref); + + bool visit_attributes(AttributeVisitor& visitor) override; + + static constexpr DiscreteTypeInfo type_info{"AttributeAdapter", 0}; + const DiscreteTypeInfo& get_type_info() const override { + return type_info; + } + +protected: + ResultVector& m_ref; +}; + +} // namespace ov diff --git a/ngraph/core/include/openvino/op/tensor_iterator.hpp b/ngraph/core/include/openvino/op/tensor_iterator.hpp new file mode 100644 index 00000000000000..9d8a2e5a8382ce --- /dev/null +++ b/ngraph/core/include/openvino/op/tensor_iterator.hpp @@ -0,0 +1,43 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include + +#include "openvino/core/function.hpp" +#include "openvino/op/parameter.hpp" +#include "openvino/op/util/sub_graph_base.hpp" + +namespace ov { +namespace op { +namespace v0 { +/// \brief Iterate a body over tensors, accumulating into tensors. +class NGRAPH_API TensorIterator : public op::util::SubGraphOp { +public: + NGRAPH_RTTI_DECLARATION; + + bool visit_attributes(AttributeVisitor& visitor) override; + + TensorIterator() = default; + explicit TensorIterator(const OutputVector& values); + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + /// \return the body of the iteration + std::shared_ptr get_body() const { + return m_bodies[0]; + } + /// \param body set the body of the iteration + void set_body(const std::shared_ptr& body) { + set_function(body); + } + void validate_and_infer_types() override; + void revalidate_and_infer_types_for_body_ops(); + +private: + void try_to_set_num_iterations_if_no_slice_inputs(); +}; +} // namespace v0 +} // namespace op +} // namespace ov diff --git a/ngraph/core/src/op/gather.cpp b/ngraph/core/src/op/gather.cpp index e72163839dcb0c..a52eaff000d8bd 100644 --- a/ngraph/core/src/op/gather.cpp +++ b/ngraph/core/src/op/gather.cpp @@ -12,7 +12,7 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::v1::Gather, "Gather", 1, op::util::GatherBase); +OPENVINO_RTTI_DEFINITION(op::v1::Gather, "Gather", 1, op::util::GatherBase); op::v1::Gather::Gather(const Output& params, const Output& indices, const Output& axes) : GatherBase(params, indices, axes) { @@ -36,7 +36,7 @@ shared_ptr op::v1::Gather::clone_with_new_inputs(const OutputVector& new_a return make_shared(new_args.at(0), new_args.at(1), new_args.at(2)); } -NGRAPH_RTTI_DEFINITION(op::v7::Gather, "Gather", 7, op::util::GatherBase); +OPENVINO_RTTI_DEFINITION(op::v7::Gather, "Gather", 7, op::util::GatherBase); op::v7::Gather::Gather(const Output& data, const Output& indices, @@ -78,7 +78,7 @@ shared_ptr op::v7::Gather::clone_with_new_inputs(const OutputVector& new_a return make_shared(new_args.at(0), new_args.at(1), new_args.at(2), m_batch_dims); } -NGRAPH_RTTI_DEFINITION(op::v8::Gather, "Gather", 8, op::util::GatherBase); +OPENVINO_RTTI_DEFINITION(op::v8::Gather, "Gather", 8, op::util::GatherBase); op::v8::Gather::Gather(const Output& data, const Output& indices, diff --git a/ngraph/core/src/op/gather_elements.cpp b/ngraph/core/src/op/gather_elements.cpp index 71f341dd690ce6..be92de695cdc7b 100644 --- a/ngraph/core/src/op/gather_elements.cpp +++ b/ngraph/core/src/op/gather_elements.cpp @@ -12,7 +12,7 @@ using namespace ngraph; // ------------------------------ V6 ------------------------------ -NGRAPH_RTTI_DEFINITION(op::v6::GatherElements, "GatherElements", 6); +OPENVINO_RTTI_DEFINITION(op::v6::GatherElements, "GatherElements", 6); op::v6::GatherElements::GatherElements(const Output& data, const Output& indices, const int64_t axis) : Op({data, indices}), diff --git a/ngraph/core/src/op/gather_nd.cpp b/ngraph/core/src/op/gather_nd.cpp index d94cefa5b24c66..61e1c2708f7f3c 100644 --- a/ngraph/core/src/op/gather_nd.cpp +++ b/ngraph/core/src/op/gather_nd.cpp @@ -12,7 +12,7 @@ using namespace ngraph; // ------------------------------ V5 ------------------------------ -NGRAPH_RTTI_DEFINITION(op::v5::GatherND, "GatherND", 5); +OPENVINO_RTTI_DEFINITION(op::v5::GatherND, "GatherND", 5); op::v5::GatherND::GatherND(const Output& data, const Output& indices, const size_t batch_dims) : Op({data, indices}), diff --git a/ngraph/core/src/op/gather_tree.cpp b/ngraph/core/src/op/gather_tree.cpp index 88b4b3cab7151b..994e27be700a97 100644 --- a/ngraph/core/src/op/gather_tree.cpp +++ b/ngraph/core/src/op/gather_tree.cpp @@ -10,7 +10,7 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::v1::GatherTree, "GatherTree", 1); +OPENVINO_RTTI_DEFINITION(op::v1::GatherTree, "GatherTree", 1); op::v1::GatherTree::GatherTree(const Output& step_ids, const Output& parent_idx, diff --git a/ngraph/core/src/op/gelu.cpp b/ngraph/core/src/op/gelu.cpp index 76555869a89ac4..0fc27a3373ecb8 100644 --- a/ngraph/core/src/op/gelu.cpp +++ b/ngraph/core/src/op/gelu.cpp @@ -14,7 +14,7 @@ using namespace std; using namespace ngraph; // ------------------------------ V0 ------------------------------ -NGRAPH_RTTI_DEFINITION(op::v0::Gelu, "Gelu", 0); +OPENVINO_RTTI_DEFINITION(op::v0::Gelu, "Gelu", 0); op::v0::Gelu::Gelu() : Op() {} @@ -60,13 +60,13 @@ NGRAPH_API EnumNames& EnumNames::type_info; -} // namespace ov std::ostream& op::operator<<(std::ostream& s, const op::GeluApproximationMode& type) { return s << as_string(type); } -NGRAPH_RTTI_DEFINITION(op::v7::Gelu, "Gelu", 7); +constexpr DiscreteTypeInfo AttributeAdapter::type_info; +} // namespace ov +OPENVINO_RTTI_DEFINITION(op::v7::Gelu, "Gelu", 7); op::v7::Gelu::Gelu(const Output& data, GeluApproximationMode mode) : UnaryElementwiseArithmetic(data), diff --git a/ngraph/core/src/op/greater.cpp b/ngraph/core/src/op/greater.cpp index 10d89bcf581a3d..ead2f846b93cb9 100644 --- a/ngraph/core/src/op/greater.cpp +++ b/ngraph/core/src/op/greater.cpp @@ -50,7 +50,7 @@ bool evaluate_greater(const HostTensorPtr& arg0, //-------------------------------------- v1 ------------------------------------ -NGRAPH_RTTI_DEFINITION(op::v1::Greater, "Greater", 1, op::util::BinaryElementwiseComparison); +OPENVINO_RTTI_DEFINITION(op::v1::Greater, "Greater", 1, op::util::BinaryElementwiseComparison); op::v1::Greater::Greater(const Output& arg0, const Output& arg1, const AutoBroadcastSpec& auto_broadcast) : BinaryElementwiseComparison(arg0, arg1, auto_broadcast) { diff --git a/ngraph/core/src/op/greater_eq.cpp b/ngraph/core/src/op/greater_eq.cpp index c42720e5489c4c..f4d6e7ab297a30 100644 --- a/ngraph/core/src/op/greater_eq.cpp +++ b/ngraph/core/src/op/greater_eq.cpp @@ -51,7 +51,7 @@ bool evaluate_greater_equal(const HostTensorPtr& arg0, //---------------------------------- v1 ---------------------------------------- -NGRAPH_RTTI_DEFINITION(op::v1::GreaterEqual, "GreaterEqual", 1, op::util::BinaryElementwiseComparison); +OPENVINO_RTTI_DEFINITION(op::v1::GreaterEqual, "GreaterEqual", 1, op::util::BinaryElementwiseComparison); op::v1::GreaterEqual::GreaterEqual(const Output& arg0, const Output& arg1, diff --git a/ngraph/core/src/op/grn.cpp b/ngraph/core/src/op/grn.cpp index 98cfb19e465763..960ea235e3a0f5 100644 --- a/ngraph/core/src/op/grn.cpp +++ b/ngraph/core/src/op/grn.cpp @@ -16,7 +16,7 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::v0::GRN, "GRN", 0); +OPENVINO_RTTI_DEFINITION(op::v0::GRN, "GRN", 0); op::v0::GRN::GRN(const Output& data, float bias) : Op({data}), m_bias(bias) { constructor_validate_and_infer_types(); diff --git a/ngraph/core/src/op/group_conv.cpp b/ngraph/core/src/op/group_conv.cpp index f43c7aa0d0beac..dba5f693adc1fb 100644 --- a/ngraph/core/src/op/group_conv.cpp +++ b/ngraph/core/src/op/group_conv.cpp @@ -22,10 +22,10 @@ using namespace ngraph; // v1::GroupConvolution //------------------------------------------------------------------------------ -NGRAPH_RTTI_DEFINITION(op::v1::GroupConvolution, "GroupConvolution", 1); +OPENVINO_RTTI_DEFINITION(op::v1::GroupConvolution, "GroupConvolution", 1); shared_ptr op::v1::GroupConvolution::get_default_value() const { - return op::Constant::create(get_element_type(), get_shape(), {0}); + return op::v0::Constant::create(get_element_type(), get_shape(), {0}); } op::v1::GroupConvolution::GroupConvolution(const Output& data_batch, @@ -249,7 +249,7 @@ shared_ptr op::v1::GroupConvolution::clone_with_new_inputs(const OutputVec // v1::GroupConvolutionBackpropData //------------------------------------------------------------------------------ -NGRAPH_RTTI_DEFINITION(op::v1::GroupConvolutionBackpropData, "GroupConvolutionBackpropData", 1); +OPENVINO_RTTI_DEFINITION(op::v1::GroupConvolutionBackpropData, "GroupConvolutionBackpropData", 1); op::v1::GroupConvolutionBackpropData::GroupConvolutionBackpropData() : Op(), @@ -371,7 +371,7 @@ const PartialShape op::v1::GroupConvolutionBackpropData::get_convolution_output_ void op::v1::GroupConvolutionBackpropData::set_output_shape(const Shape& shape) { this->input(2).replace_source_output( - op::Constant::create(this->get_input_element_type(2), Shape{shape.size()}, shape)->output(0)); + op::v0::Constant::create(this->get_input_element_type(2), Shape{shape.size()}, shape)->output(0)); } void op::v1::GroupConvolutionBackpropData::infer_conv_backprop_output_spatial_shape( @@ -393,7 +393,7 @@ void op::v1::GroupConvolutionBackpropData::infer_conv_backprop_output_spatial_sh int64_t val = strides[i] * (input_data_shape[i].get_length() - 1) + dilations[i] * (filters_shape[i].get_length() - 1) + 1 - pads_begin[i] - pads_end[i] + output_padding[i]; - output_spatial_shape.push_back(val); + output_spatial_shape.emplace_back(val); } else { output_spatial_shape.push_back(Dimension::dynamic()); } diff --git a/ngraph/core/src/op/gru_cell.cpp b/ngraph/core/src/op/gru_cell.cpp index a17ba0f834a9ce..e19b73bebf0280 100644 --- a/ngraph/core/src/op/gru_cell.cpp +++ b/ngraph/core/src/op/gru_cell.cpp @@ -14,7 +14,7 @@ using namespace std; using namespace ngraph; -constexpr NodeTypeInfo op::v3::GRUCell::type_info; +OPENVINO_RTTI_DEFINITION(op::v3::GRUCell, "GRUCell", 1); op::v3::GRUCell::GRUCell() : m_linear_before_reset(false) { m_activations = {"sigmoid", "tanh"}; @@ -172,9 +172,9 @@ void op::v3::GRUCell::validate_and_infer_types() { void op::v3::GRUCell::add_default_bias_input() { Output B = - op::Constant::create(get_input_element_type(0), - Shape{(s_gates_count + m_linear_before_reset) * get_hidden_size()}, - vector((s_gates_count + m_linear_before_reset) * get_hidden_size(), 0.f)); + op::v0::Constant::create(get_input_element_type(0), + Shape{(s_gates_count + m_linear_before_reset) * get_hidden_size()}, + vector((s_gates_count + m_linear_before_reset) * get_hidden_size(), 0.f)); set_argument(4, B); } diff --git a/ngraph/core/src/op/gru_sequence.cpp b/ngraph/core/src/op/gru_sequence.cpp index cbbae4895bbd4b..1f980134ebed3f 100644 --- a/ngraph/core/src/op/gru_sequence.cpp +++ b/ngraph/core/src/op/gru_sequence.cpp @@ -15,7 +15,7 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::v5::GRUSequence, "GRUSequence", 5); +OPENVINO_RTTI_DEFINITION(op::v5::GRUSequence, "GRUSequence", 5); op::v5::GRUSequence::GRUSequence() : m_direction(op::RecurrentSequenceDirection::FORWARD), diff --git a/ngraph/core/src/op/hard_sigmoid.cpp b/ngraph/core/src/op/hard_sigmoid.cpp index d397831c96263f..5b5c9cda6e4153 100644 --- a/ngraph/core/src/op/hard_sigmoid.cpp +++ b/ngraph/core/src/op/hard_sigmoid.cpp @@ -12,7 +12,7 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::v0::HardSigmoid, "HardSigmoid", 0); +OPENVINO_RTTI_DEFINITION(op::v0::HardSigmoid, "HardSigmoid", 0); op::v0::HardSigmoid::HardSigmoid() : Op() {} @@ -34,7 +34,7 @@ void op::v0::HardSigmoid::validate_and_infer_types() { if (alpha_pshape.is_static()) { const auto alpha_shape = alpha_pshape.to_shape(); NODE_VALIDATION_CHECK(this, - is_scalar(alpha_shape), + ngraph::is_scalar(alpha_shape), "A scalar is expected for the 'alpha' input. Got: ", alpha_shape); } @@ -42,7 +42,7 @@ void op::v0::HardSigmoid::validate_and_infer_types() { if (beta_pshape.is_static()) { const auto beta_shape = beta_pshape.to_shape(); NODE_VALIDATION_CHECK(this, - is_scalar(beta_shape), + ngraph::is_scalar(beta_shape), "A scalar is expected for the 'beta' input. Got: ", beta_shape); } diff --git a/ngraph/core/src/op/hsigmoid.cpp b/ngraph/core/src/op/hsigmoid.cpp index 32bc9e39b9b439..97e02e763c80e9 100644 --- a/ngraph/core/src/op/hsigmoid.cpp +++ b/ngraph/core/src/op/hsigmoid.cpp @@ -15,7 +15,7 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::v5::HSigmoid, "HSigmoid", 5); +OPENVINO_RTTI_DEFINITION(op::v5::HSigmoid, "HSigmoid", 5); op::v5::HSigmoid::HSigmoid(const Output& arg) : UnaryElementwiseArithmetic(arg) { constructor_validate_and_infer_types(); diff --git a/ngraph/core/src/op/hswish.cpp b/ngraph/core/src/op/hswish.cpp index ffb67eebd76540..d1c85c037129db 100644 --- a/ngraph/core/src/op/hswish.cpp +++ b/ngraph/core/src/op/hswish.cpp @@ -14,7 +14,7 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::v4::HSwish, "HSwish", 4); +OPENVINO_RTTI_DEFINITION(op::v4::HSwish, "HSwish", 4); op::v4::HSwish::HSwish(const Output& arg) : UnaryElementwiseArithmetic(arg) { constructor_validate_and_infer_types(); diff --git a/ngraph/core/src/op/idft.cpp b/ngraph/core/src/op/idft.cpp index 72b6ec077e44e9..8c498fc22cc5bd 100644 --- a/ngraph/core/src/op/idft.cpp +++ b/ngraph/core/src/op/idft.cpp @@ -18,7 +18,7 @@ using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::v7::IDFT, "IDFT", 7, util::FFTBase); +OPENVINO_RTTI_DEFINITION(op::v7::IDFT, "IDFT", 7, util::FFTBase); op::v7::IDFT::IDFT(const Output& data, const Output& axes) : FFTBase(data, axes) { constructor_validate_and_infer_types(); diff --git a/ngraph/core/src/op/if.cpp b/ngraph/core/src/op/if.cpp index ea8c76d0c8e91a..fe4a49c7778ba0 100644 --- a/ngraph/core/src/op/if.cpp +++ b/ngraph/core/src/op/if.cpp @@ -18,7 +18,7 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(ngraph::op::v8::If, "If", 8, MultiSubGraphOp); +OPENVINO_RTTI_DEFINITION(ngraph::op::v8::If, "If", 8, MultiSubGraphOp); op::v8::If::If() : MultiSubGraphOp(2) {} @@ -49,11 +49,11 @@ static ngraph::PartialShape resolve_shape(const ngraph::PartialShape& then_pshap if ((*then_it).is_dynamic() || (*else_it).is_dynamic()) { new_dims.push_back(Dimension::dynamic()); } else if (*then_it == *else_it) { - new_dims.push_back(Dimension(*then_it)); + new_dims.emplace_back(*then_it); } else { auto dim_min = std::min((*then_it).get_min_length(), (*else_it).get_min_length()); auto dim_max = std::max((*then_it).get_min_length(), (*else_it).get_min_length()); - new_dims.push_back(Dimension(dim_min, dim_max)); + new_dims.emplace_back(dim_min, dim_max); } } @@ -125,7 +125,7 @@ void op::v8::If::validate_and_infer_types() { // shape and type inference for outputs from If operations for (const auto& output_descr : m_output_descriptions[cond_index]) { auto body_value = body->get_results().at(output_descr->m_body_value_index)->input_value(0); - auto body_value_partial_shape = body_value.get_partial_shape(); + const auto& body_value_partial_shape = body_value.get_partial_shape(); set_output_type(output_descr->m_output_index, body_value.get_element_type(), body_value_partial_shape); } } else // condition is non constant @@ -236,8 +236,8 @@ bool op::v8::If::has_evaluate() const { } void op::v8::If::set_input(const Output& value, - const std::shared_ptr& then_parameter, - const std::shared_ptr& else_parameter) { + const std::shared_ptr& then_parameter, + const std::shared_ptr& else_parameter) { NGRAPH_CHECK(then_parameter != nullptr || else_parameter != nullptr, "Missing parameters! Both parameters are nullptr!"); auto then_param_index = m_bodies[THEN_BODY_INDEX]->get_parameter_index(then_parameter); @@ -253,8 +253,8 @@ void op::v8::If::set_input(const Output& value, set_invariant_inputs(value, {then_parameter, else_parameter}); } -Output op::v8::If::set_output(const std::shared_ptr& then_result, - const std::shared_ptr& else_result) { +Output op::v8::If::set_output(const std::shared_ptr& then_result, + const std::shared_ptr& else_result) { NGRAPH_CHECK(then_result != nullptr, "Incorrect result in \"then_body\"! Result cant be \'nullptr\'"); NGRAPH_CHECK(else_result != nullptr, "Incorrect result in \"else_body\"! Result cant be \'nullptr\'"); auto then_result_id = m_bodies[THEN_BODY_INDEX]->get_result_index(then_result); @@ -264,4 +264,4 @@ Output op::v8::If::set_output(const std::shared_ptr& then_result, NGRAPH_CHECK(else_result_id != -1, "Missing result ", else_result->get_friendly_name(), "in \'then_body\'!"); return set_body_outputs({then_result, else_result}); -} \ No newline at end of file +} diff --git a/ngraph/core/src/op/interpolate.cpp b/ngraph/core/src/op/interpolate.cpp index 35a71268dd9a4f..a9c22d0de0fe1b 100644 --- a/ngraph/core/src/op/interpolate.cpp +++ b/ngraph/core/src/op/interpolate.cpp @@ -17,11 +17,9 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::v0::Interpolate, "Interpolate", 0); +OPENVINO_RTTI_DEFINITION(op::v0::Interpolate, "Interpolate", 0); -op::v0::Interpolate::Interpolate(const Output& image, - const Output& output_shape, - const op::v0::InterpolateAttrs& attrs) +op::v0::Interpolate::Interpolate(const Output& image, const Output& output_shape, const Attributes& attrs) : Op({image, output_shape}), m_attrs(attrs) { constructor_validate_and_infer_types(); @@ -69,7 +67,7 @@ shared_ptr op::v0::Interpolate::clone_with_new_inputs(const OutputVector& return make_shared(new_args.at(0), new_args.at(1), m_attrs); } -std::ostream& ngraph::operator<<(std::ostream& s, const op::v0::Interpolate::InterpolateMode& type) { +std::ostream& ov::operator<<(std::ostream& s, const op::v0::Interpolate::InterpolateMode& type) { return s << as_string(type); } @@ -92,7 +90,7 @@ constexpr DiscreteTypeInfo AttributeAdapter& image, const Output& output_shape, @@ -481,19 +479,19 @@ bool op::v4::Interpolate::has_evaluate() const { return false; } -std::ostream& ngraph::operator<<(std::ostream& s, const op::v4::Interpolate::InterpolateMode& type) { +std::ostream& operator<<(std::ostream& s, const op::v4::Interpolate::InterpolateMode& type) { return s << as_string(type); } -std::ostream& ngraph::operator<<(std::ostream& s, const op::v4::Interpolate::ShapeCalcMode& type) { +std::ostream& operator<<(std::ostream& s, const op::v4::Interpolate::ShapeCalcMode& type) { return s << as_string(type); } -std::ostream& ngraph::operator<<(std::ostream& s, const op::v4::Interpolate::CoordinateTransformMode& type) { +std::ostream& operator<<(std::ostream& s, const op::v4::Interpolate::CoordinateTransformMode& type) { return s << as_string(type); } -std::ostream& ngraph::operator<<(std::ostream& s, const op::v4::Interpolate::NearestMode& type) { +std::ostream& operator<<(std::ostream& s, const op::v4::Interpolate::NearestMode& type) { return s << as_string(type); } diff --git a/ngraph/core/src/op/less.cpp b/ngraph/core/src/op/less.cpp index 5aaebdd3e54151..f50b2f44468300 100644 --- a/ngraph/core/src/op/less.cpp +++ b/ngraph/core/src/op/less.cpp @@ -50,7 +50,7 @@ bool evaluate_less(const HostTensorPtr& arg0, // ----------------------------- v1 -------------------------------------------- -NGRAPH_RTTI_DEFINITION(op::v1::Less, "Less", 1, op::util::BinaryElementwiseComparison); +OPENVINO_RTTI_DEFINITION(op::v1::Less, "Less", 1, op::util::BinaryElementwiseComparison); op::v1::Less::Less(const Output& arg0, const Output& arg1, const AutoBroadcastSpec& auto_broadcast) : BinaryElementwiseComparison(arg0, arg1, auto_broadcast) { diff --git a/ngraph/core/src/op/less_eq.cpp b/ngraph/core/src/op/less_eq.cpp index 02c2f1c069e7b8..864045e7f6e523 100644 --- a/ngraph/core/src/op/less_eq.cpp +++ b/ngraph/core/src/op/less_eq.cpp @@ -13,7 +13,7 @@ using namespace ngraph; // ---------------------------------- v1 --------------------------------------- -NGRAPH_RTTI_DEFINITION(op::v1::LessEqual, "LessEqual", 1, op::util::BinaryElementwiseComparison); +OPENVINO_RTTI_DEFINITION(op::v1::LessEqual, "LessEqual", 1, op::util::BinaryElementwiseComparison); op::v1::LessEqual::LessEqual(const Output& arg0, const Output& arg1, diff --git a/ngraph/core/src/op/log.cpp b/ngraph/core/src/op/log.cpp index 81cba930c0f292..8b12ac0565a346 100644 --- a/ngraph/core/src/op/log.cpp +++ b/ngraph/core/src/op/log.cpp @@ -12,7 +12,7 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::v0::Log, "Log", 0); +OPENVINO_RTTI_DEFINITION(op::v0::Log, "Log", 0); op::Log::Log(const Output& arg) : UnaryElementwiseArithmetic(arg) { constructor_validate_and_infer_types(); diff --git a/ngraph/core/src/op/log_softmax.cpp b/ngraph/core/src/op/log_softmax.cpp index 1ccf20418b7414..4561cf0b4de524 100644 --- a/ngraph/core/src/op/log_softmax.cpp +++ b/ngraph/core/src/op/log_softmax.cpp @@ -10,7 +10,7 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::v5::LogSoftmax, "LogSoftmax", 5); +OPENVINO_RTTI_DEFINITION(op::v5::LogSoftmax, "LogSoftmax", 5); op::v5::LogSoftmax::LogSoftmax(const Output& arg, const int64_t axis) : Op({arg}), m_axis(axis) { constructor_validate_and_infer_types(); diff --git a/ngraph/core/src/op/loop.cpp b/ngraph/core/src/op/loop.cpp index 8ab9675b86b109..bc21b00db103e8 100644 --- a/ngraph/core/src/op/loop.cpp +++ b/ngraph/core/src/op/loop.cpp @@ -15,7 +15,7 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::v5::Loop, "Loop", 5); +OPENVINO_RTTI_DEFINITION(op::v5::Loop, "Loop", 5); op::v5::Loop::Loop(const Output& trip_count, const Output& execution_condition) : SubGraphOp() { set_argument(0, trip_count); @@ -178,7 +178,7 @@ void op::v5::Loop::validate_and_infer_types() { body_parameter->set_partial_shape(input_partial_shape); } else if (auto invariant_input_description = - ov::as_type_ptr(input_description)) { + ov::as_type_ptr(input_description)) { auto body_parameter = m_bodies[0]->get_parameters().at(invariant_input_description->m_body_parameter_index); auto body_param_partial_shape = body_parameter->get_partial_shape(); @@ -198,7 +198,7 @@ void op::v5::Loop::validate_and_infer_types() { auto body_value = m_bodies[0]->get_results().at(output_description->m_body_value_index)->input_value(0); if (auto concat_output_description = - ov::as_type_ptr(output_description)) { + ov::as_type_ptr(output_description)) { const auto& body_value_partial_shape = body_value.get_partial_shape(); auto out_shape = body_value_partial_shape; if (zero_number_of_iter) { @@ -220,7 +220,7 @@ void op::v5::Loop::validate_and_infer_types() { } else if (auto body_output_description = - ov::as_type_ptr(output_description)) { + ov::as_type_ptr(output_description)) { const PartialShape& ps = body_value.get_partial_shape(); if (ps.is_dynamic()) { set_output_type(index, body_value.get_element_type(), ps); diff --git a/ngraph/core/src/op/lrn.cpp b/ngraph/core/src/op/lrn.cpp index a3884b726ade0e..828fd154bc23ed 100644 --- a/ngraph/core/src/op/lrn.cpp +++ b/ngraph/core/src/op/lrn.cpp @@ -14,10 +14,10 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::LRN, "LRN", 0); +OPENVINO_RTTI_DEFINITION(op::v0::LRN, "LRN", 0); op::LRN::LRN(const Output& arg, double alpha, double beta, double bias, size_t size) - : LRN(arg, op::Constant::create(element::i64, Shape{1}, {1}), alpha, beta, bias, size) { + : LRN(arg, op::v0::Constant::create(element::i64, Shape{1}, {1}), alpha, beta, bias, size) { add_provenance_group_member(input_value(1).get_node_shared_ptr()); } @@ -102,5 +102,5 @@ bool ngraph::op::v0::LRN::visit_attributes(AttributeVisitor& visitor) { shared_ptr op::LRN::clone_with_new_inputs(const OutputVector& new_args) const { NGRAPH_OP_SCOPE(v0_LRN_clone_with_new_inputs); check_new_args_count(this, new_args); - return make_shared(new_args.at(0), new_args.at(1), m_alpha, m_beta, m_bias, m_size); + return make_shared(new_args.at(0), new_args.at(1), m_alpha, m_beta, m_bias, m_size); } diff --git a/ngraph/core/src/op/lstm_cell.cpp b/ngraph/core/src/op/lstm_cell.cpp index aad2148fc6dfe6..724dd267994bca 100644 --- a/ngraph/core/src/op/lstm_cell.cpp +++ b/ngraph/core/src/op/lstm_cell.cpp @@ -17,8 +17,8 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::v0::LSTMCell, "LSTMCell", 0, op::util::RNNCellBase); -NGRAPH_RTTI_DEFINITION(op::v4::LSTMCell, "LSTMCell", 4, op::util::RNNCellBase); +OPENVINO_RTTI_DEFINITION(op::v0::LSTMCell, "LSTMCell", 0, op::util::RNNCellBase); +OPENVINO_RTTI_DEFINITION(op::v4::LSTMCell, "LSTMCell", 4, op::util::RNNCellBase); op::v0::LSTMCell::LSTMCell() : m_input_forget(false), m_weights_format(LSTMWeightsFormat::IFCO) { m_activations = {"sigmoid", "tanh", "tanh"}; @@ -273,14 +273,15 @@ void op::v0::LSTMCell::validate_and_infer_types() { } Output op::v0::LSTMCell::get_default_bias_input() const { - return Output{ - op::Constant::create(get_input_element_type(0), Shape{s_gates_count * get_hidden_size()}, vector{0.f})}; + return Output{op::v0::Constant::create(get_input_element_type(0), + Shape{s_gates_count * get_hidden_size()}, + vector{0.f})}; } Output op::v0::LSTMCell::get_default_peepholes_input() const { - return Output{op::Constant::create(get_input_element_type(0), - Shape{s_peepholes_count * get_hidden_size()}, - vector{0.f})}; + return Output{op::v0::Constant::create(get_input_element_type(0), + Shape{s_peepholes_count * get_hidden_size()}, + vector{0.f})}; } shared_ptr op::v0::LSTMCell::clone_with_new_inputs(const OutputVector& new_args) const { @@ -511,8 +512,9 @@ void op::v4::LSTMCell::validate_and_infer_types() { } Output op::v4::LSTMCell::get_default_bias_input() const { - return Output{ - op::Constant::create(get_input_element_type(0), Shape{s_gates_count * get_hidden_size()}, vector{0.f})}; + return Output{op::v0::Constant::create(get_input_element_type(0), + Shape{s_gates_count * get_hidden_size()}, + vector{0.f})}; } shared_ptr op::v4::LSTMCell::clone_with_new_inputs(const OutputVector& new_args) const { diff --git a/ngraph/core/src/op/lstm_sequence.cpp b/ngraph/core/src/op/lstm_sequence.cpp index 5a1229986a6057..0b30eddf153b6d 100644 --- a/ngraph/core/src/op/lstm_sequence.cpp +++ b/ngraph/core/src/op/lstm_sequence.cpp @@ -16,8 +16,8 @@ using namespace ngraph; using namespace std; -NGRAPH_RTTI_DEFINITION(op::v0::LSTMSequence, "LSTMSequence", 0); -NGRAPH_RTTI_DEFINITION(op::v5::LSTMSequence, "LSTMSequence", 5); +OPENVINO_RTTI_DEFINITION(op::v0::LSTMSequence, "LSTMSequence", 0); +OPENVINO_RTTI_DEFINITION(op::v5::LSTMSequence, "LSTMSequence", 5); op::v0::LSTMSequence::LSTMSequence() : Op(), diff --git a/ngraph/core/src/op/parameter.cpp b/ngraph/core/src/op/parameter.cpp index 8a95f915fa394e..415c1dbda002ac 100644 --- a/ngraph/core/src/op/parameter.cpp +++ b/ngraph/core/src/op/parameter.cpp @@ -12,7 +12,7 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::v0::Parameter, "Parameter", 0); +OPENVINO_RTTI_DEFINITION(op::v0::Parameter, "Parameter", 0); op::Parameter::Parameter(const element::Type& element_type, const PartialShape& pshape) : m_partial_shape(pshape), diff --git a/ngraph/core/src/op/result.cpp b/ngraph/core/src/op/result.cpp index 237b65c79772ce..40ce23ceb8ece8 100644 --- a/ngraph/core/src/op/result.cpp +++ b/ngraph/core/src/op/result.cpp @@ -15,7 +15,7 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::v0::Result, "Result", 0); +OPENVINO_RTTI_DEFINITION(op::v0::Result, "Result", 0); op::Result::Result(const Output& arg, bool needs_default_layout) : Op({arg}), diff --git a/ngraph/core/src/op/tensor_iterator.cpp b/ngraph/core/src/op/tensor_iterator.cpp index 9669769b8bcd4b..2f1760111f632d 100644 --- a/ngraph/core/src/op/tensor_iterator.cpp +++ b/ngraph/core/src/op/tensor_iterator.cpp @@ -12,7 +12,7 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::v0::TensorIterator, "TensorIterator", 0, op::util::SubGraphOp); +OPENVINO_RTTI_DEFINITION(op::v0::TensorIterator, "TensorIterator", 0, op::util::SubGraphOp); op::v0::TensorIterator::TensorIterator(const OutputVector& values) : op::util::SubGraphOp(values) {}