From 5d646ac148e39eb34417773218f3768bd3f22c39 Mon Sep 17 00:00:00 2001 From: Ilya Churaev Date: Thu, 2 Sep 2021 08:28:12 +0300 Subject: [PATCH] Moved operations from A to ov namespace --- ngraph/core/include/ngraph/op/abs.hpp | 29 +- ngraph/core/include/ngraph/op/acos.hpp | 27 +- ngraph/core/include/ngraph/op/acosh.hpp | 26 +- .../include/ngraph/op/adaptive_avg_pool.hpp | 25 +- .../include/ngraph/op/adaptive_max_pool.hpp | 37 +- ngraph/core/include/ngraph/op/add.hpp | 33 +- ngraph/core/include/ngraph/op/and.hpp | 29 +- ngraph/core/include/ngraph/op/asin.hpp | 28 +- ngraph/core/include/ngraph/op/asinh.hpp | 26 +- ngraph/core/include/ngraph/op/assign.hpp | 65 +- ngraph/core/include/ngraph/op/atan.hpp | 26 +- ngraph/core/include/ngraph/op/atanh.hpp | 26 +- ngraph/core/include/ngraph/op/avg_pool.hpp | 75 +- ngraph/core/include/ngraph/op/constant.hpp | 686 +---------------- ngraph/core/include/ngraph/op/read_value.hpp | 74 +- ngraph/core/include/ngraph/op/sink.hpp | 13 +- .../include/ngraph/runtime/host_tensor.hpp | 7 +- ngraph/core/include/openvino/op/abs.hpp | 40 + ngraph/core/include/openvino/op/acos.hpp | 38 + ngraph/core/include/openvino/op/acosh.hpp | 40 + .../include/openvino/op/adaptive_avg_pool.hpp | 38 + .../include/openvino/op/adaptive_max_pool.hpp | 50 ++ ngraph/core/include/openvino/op/add.hpp | 47 ++ ngraph/core/include/openvino/op/asin.hpp | 39 + ngraph/core/include/openvino/op/asinh.hpp | 40 + ngraph/core/include/openvino/op/assign.hpp | 72 ++ ngraph/core/include/openvino/op/atan.hpp | 40 + ngraph/core/include/openvino/op/atanh.hpp | 40 + ngraph/core/include/openvino/op/avg_pool.hpp | 88 +++ ngraph/core/include/openvino/op/constant.hpp | 711 ++++++++++++++++++ .../core/include/openvino/op/logical_and.hpp | 43 ++ .../core/include/openvino/op/read_value.hpp | 79 ++ ngraph/core/include/openvino/op/sink.hpp | 26 + .../include/openvino/op/util/assign_base.hpp | 22 + .../openvino/op/util/read_value_base.hpp | 24 + ngraph/core/src/op/abs.cpp | 25 +- ngraph/core/src/op/acos.cpp | 25 +- ngraph/core/src/op/acosh.cpp | 23 +- ngraph/core/src/op/adaptive_avg_pool.cpp | 2 +- ngraph/core/src/op/adaptive_max_pool.cpp | 2 +- ngraph/core/src/op/add.cpp | 2 +- ngraph/core/src/op/asin.cpp | 2 +- ngraph/core/src/op/asinh.cpp | 2 +- ngraph/core/src/op/assign.cpp | 5 +- ngraph/core/src/op/atan.cpp | 3 +- ngraph/core/src/op/atanh.cpp | 2 +- ngraph/core/src/op/avg_pool.cpp | 4 +- ngraph/core/src/op/constant.cpp | 126 ++-- .../core/src/op/{and.cpp => logical_and.cpp} | 5 +- ngraph/core/src/op/read_value.cpp | 9 +- ngraph/core/src/op/util/assign_base.cpp | 7 + ngraph/core/src/op/util/read_value_base.cpp | 7 + 52 files changed, 1647 insertions(+), 1313 deletions(-) create mode 100644 ngraph/core/include/openvino/op/abs.hpp create mode 100644 ngraph/core/include/openvino/op/acos.hpp create mode 100644 ngraph/core/include/openvino/op/acosh.hpp create mode 100644 ngraph/core/include/openvino/op/adaptive_avg_pool.hpp create mode 100644 ngraph/core/include/openvino/op/adaptive_max_pool.hpp create mode 100644 ngraph/core/include/openvino/op/add.hpp create mode 100644 ngraph/core/include/openvino/op/asin.hpp create mode 100644 ngraph/core/include/openvino/op/asinh.hpp create mode 100644 ngraph/core/include/openvino/op/assign.hpp create mode 100644 ngraph/core/include/openvino/op/atan.hpp create mode 100644 ngraph/core/include/openvino/op/atanh.hpp create mode 100644 ngraph/core/include/openvino/op/avg_pool.hpp create mode 100644 ngraph/core/include/openvino/op/constant.hpp create mode 100644 ngraph/core/include/openvino/op/logical_and.hpp create mode 100644 ngraph/core/include/openvino/op/read_value.hpp create mode 100644 ngraph/core/include/openvino/op/sink.hpp create mode 100644 ngraph/core/include/openvino/op/util/assign_base.hpp create mode 100644 ngraph/core/include/openvino/op/util/read_value_base.hpp rename ngraph/core/src/op/{and.cpp => logical_and.cpp} (96%) create mode 100644 ngraph/core/src/op/util/assign_base.cpp create mode 100644 ngraph/core/src/op/util/read_value_base.cpp diff --git a/ngraph/core/include/ngraph/op/abs.hpp b/ngraph/core/include/ngraph/op/abs.hpp index f85805edde6101..6661027581a060 100644 --- a/ngraph/core/include/ngraph/op/abs.hpp +++ b/ngraph/core/include/ngraph/op/abs.hpp @@ -7,37 +7,12 @@ #include #include "ngraph/op/util/unary_elementwise_arithmetic.hpp" +#include "openvino/op/abs.hpp" namespace ngraph { namespace op { namespace v0 { -/// \brief Elementwise absolute value operation. -/// -class NGRAPH_API Abs : public util::UnaryElementwiseArithmetic { -public: - static constexpr NodeTypeInfo type_info{"Abs", 0}; - const NodeTypeInfo& get_type_info() const override { - return type_info; - } - /// \brief Constructs an absolute value operation. - Abs() = default; - bool visit_attributes(AttributeVisitor&) override { - return true; - } - /// \brief Constructs an absolute value operation. - /// - /// \param arg Output that produces the input tensor.
- /// `[d1, ...]` - /// - /// Output `[d1, ...]` - /// - Abs(const Output& arg); - - std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; -}; +using ov::op::v0::Abs; } // namespace v0 using v0::Abs; } // namespace op diff --git a/ngraph/core/include/ngraph/op/acos.hpp b/ngraph/core/include/ngraph/op/acos.hpp index e4dc89f30a6646..d43aedd7ea757b 100644 --- a/ngraph/core/include/ngraph/op/acos.hpp +++ b/ngraph/core/include/ngraph/op/acos.hpp @@ -7,35 +7,12 @@ #include #include "ngraph/op/util/unary_elementwise_arithmetic.hpp" +#include "openvino/op/acos.hpp" namespace ngraph { namespace op { namespace v0 { -/// \brief Elementwise inverse cosine (arccos) operation. -/// -class NGRAPH_API Acos : public util::UnaryElementwiseArithmetic { -public: - static constexpr NodeTypeInfo type_info{"Acos", 0}; - const NodeTypeInfo& get_type_info() const override { - return type_info; - } - /// \brief Constructs an arccos operation. - Acos() = default; - /// \brief Constructs an arccos operation. - /// - /// \param arg Output that produces the input tensor.
- /// `[d1, ...]` - /// - /// Output `[d1, ...]` - /// - Acos(const Output& arg); - bool visit_attributes(AttributeVisitor&) override { - return true; - } - std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; -}; +using ov::op::v0::Acos; } // namespace v0 using v0::Acos; } // namespace op diff --git a/ngraph/core/include/ngraph/op/acosh.hpp b/ngraph/core/include/ngraph/op/acosh.hpp index 95ad7b297f503f..fd1958f0bc02d0 100644 --- a/ngraph/core/include/ngraph/op/acosh.hpp +++ b/ngraph/core/include/ngraph/op/acosh.hpp @@ -7,34 +7,12 @@ #include #include "ngraph/op/util/unary_elementwise_arithmetic.hpp" +#include "openvino/op/acosh.hpp" namespace ngraph { namespace op { namespace v3 { -/// \brief Elementwise inverse hyperbolic cos operation. -/// -class NGRAPH_API Acosh : public util::UnaryElementwiseArithmetic { -public: - NGRAPH_RTTI_DECLARATION; - - /// \brief Constructs an Acosh operation. - Acosh() = default; - /// \brief Constructs an Acosh operation. - /// - /// \param arg Output that produces the input tensor.
- /// `[d1, ...]` - /// - /// Output `[d1, ...]` - /// - Acosh(const Output& arg); - - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - bool visit_attributes(AttributeVisitor&) override { - return true; - } - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; -}; +using ov::op::v3::Acosh; } // namespace v3 using v3::Acosh; } // namespace op diff --git a/ngraph/core/include/ngraph/op/adaptive_avg_pool.hpp b/ngraph/core/include/ngraph/op/adaptive_avg_pool.hpp index 11b7de1699d054..eb0a8b82ed42b4 100644 --- a/ngraph/core/include/ngraph/op/adaptive_avg_pool.hpp +++ b/ngraph/core/include/ngraph/op/adaptive_avg_pool.hpp @@ -6,33 +6,12 @@ #include "ngraph/op/op.hpp" #include "ngraph/op/util/attr_types.hpp" +#include "openvino/op/adaptive_avg_pool.hpp" namespace ngraph { namespace op { namespace v8 { -/// \brief Adaptive average pooling operation. -/// -class NGRAPH_API AdaptiveAvgPool : public Op { -public: - NGRAPH_RTTI_DECLARATION; - - AdaptiveAvgPool() = default; - - /// - /// \brief Constructs adaptive average pooling operation. - /// - /// \param data Input data - /// - /// \param output_shape 1D tensor describing output shape for spatial - /// dimensions. - /// - AdaptiveAvgPool(const Output& data, const Output& output_shape); - - void validate_and_infer_types() override; - bool visit_attributes(AttributeVisitor& visitor) override; - - std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; -}; +using ov::op::v8::AdaptiveAvgPool; } // namespace v8 } // namespace op } // namespace ngraph diff --git a/ngraph/core/include/ngraph/op/adaptive_max_pool.hpp b/ngraph/core/include/ngraph/op/adaptive_max_pool.hpp index 7554ad272cdcaf..6ba0a20f2e64e9 100644 --- a/ngraph/core/include/ngraph/op/adaptive_max_pool.hpp +++ b/ngraph/core/include/ngraph/op/adaptive_max_pool.hpp @@ -6,45 +6,12 @@ #include "ngraph/op/op.hpp" #include "ngraph/op/util/attr_types.hpp" +#include "openvino/op/adaptive_max_pool.hpp" namespace ngraph { namespace op { namespace v8 { -/// \brief Adaptive max pooling operation. -/// -class NGRAPH_API AdaptiveMaxPool : public Op { -public: - NGRAPH_RTTI_DECLARATION; - - AdaptiveMaxPool() = default; - - /// - /// \brief Constructs adaptive max pooling operation. - /// - /// \param data Input data - /// - /// \param output_shape 1D tensor describing output shape for spatial - /// dimensions. - /// - /// \param index_element_type Specifies the output tensor type for indices - /// output - /// - AdaptiveMaxPool(const Output& data, - const Output& output_shape, - const ngraph::element::Type& index_element_type = ngraph::element::i64); - - void validate_and_infer_types() override; - bool visit_attributes(AttributeVisitor& visitor) override; - - std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - element::Type get_index_element_type() const { - return m_index_element_type; - } - -protected: - ngraph::element::Type m_index_element_type = ngraph::element::i64; -}; +using ov::op::v8::AdaptiveMaxPool; } // namespace v8 } // namespace op } // namespace ngraph diff --git a/ngraph/core/include/ngraph/op/add.hpp b/ngraph/core/include/ngraph/op/add.hpp index ac06fc51a344fa..875f7baf56fa11 100644 --- a/ngraph/core/include/ngraph/op/add.hpp +++ b/ngraph/core/include/ngraph/op/add.hpp @@ -7,41 +7,12 @@ #include #include "ngraph/op/util/binary_elementwise_arithmetic.hpp" +#include "openvino/op/add.hpp" namespace ngraph { namespace op { namespace v1 { -/// \brief Elementwise addition operation. -/// -class NGRAPH_API Add : public util::BinaryElementwiseArithmetic { -public: - NGRAPH_RTTI_DECLARATION; - - /// \brief Constructs an uninitialized addition operation - Add() : util::BinaryElementwiseArithmetic(AutoBroadcastSpec::NUMPY) {} - - /// \brief Constructs an addition operation. - /// - /// \param arg0 Output that produces the first input tensor.
- /// `[d0, ...]` - /// \param arg1 Output that produces the second input tensor.
- /// `[d0, ...]` - /// \param auto_broadcast Auto broadcast specification. Default is Numpy-style - /// implicit broadcasting. - /// - /// Output `[d0, ...]` - /// - Add(const Output& arg0, - const Output& arg1, - const AutoBroadcastSpec& auto_broadcast = AutoBroadcastSpec(AutoBroadcastType::NUMPY)); - - std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - bool visit_attributes(AttributeVisitor& visitor) override; - - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; -}; +using ov::op::v1::Add; } // namespace v1 } // namespace op } // namespace ngraph diff --git a/ngraph/core/include/ngraph/op/and.hpp b/ngraph/core/include/ngraph/op/and.hpp index ea60b6a7190f1f..1186a86ba99595 100644 --- a/ngraph/core/include/ngraph/op/and.hpp +++ b/ngraph/core/include/ngraph/op/and.hpp @@ -7,37 +7,12 @@ #include #include "ngraph/op/util/binary_elementwise_logical.hpp" +#include "openvino/op/logical_and.hpp" namespace ngraph { namespace op { namespace v1 { -/// \brief Elementwise logical-and operation. -/// -class NGRAPH_API LogicalAnd : public util::BinaryElementwiseLogical { -public: - NGRAPH_RTTI_DECLARATION; - /// \brief Constructs a logical-and operation. - LogicalAnd() = default; - - /// \brief Constructs a logical-and operation. - /// - /// \param arg0 Output that produces the first input tensor.
- /// `[d0, ...]` - /// \param arg1 Output that produces the second input tensor.
- /// `[d0, ...]` - /// \param auto_broadcast Auto broadcast specification - /// - /// Output `[d0, ...]` - /// - LogicalAnd(const Output& arg0, - const Output& arg1, - const AutoBroadcastSpec& auto_broadcast = AutoBroadcastSpec(AutoBroadcastType::NUMPY)); - - std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - bool visit_attributes(AttributeVisitor& visitor) override; - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; -}; +using ov::op::v1::LogicalAnd; } // namespace v1 } // namespace op } // namespace ngraph diff --git a/ngraph/core/include/ngraph/op/asin.hpp b/ngraph/core/include/ngraph/op/asin.hpp index 2d0f78378b77b1..d91e863995fb0f 100644 --- a/ngraph/core/include/ngraph/op/asin.hpp +++ b/ngraph/core/include/ngraph/op/asin.hpp @@ -7,36 +7,12 @@ #include #include "ngraph/op/util/unary_elementwise_arithmetic.hpp" +#include "openvino/op/asin.hpp" namespace ngraph { namespace op { namespace v0 { -/// \brief Elementwise inverse sine (arcsin) operation. -/// -class NGRAPH_API Asin : public util::UnaryElementwiseArithmetic { -public: - static constexpr NodeTypeInfo type_info{"Asin", 0}; - const NodeTypeInfo& get_type_info() const override { - return type_info; - } - /// \brief Constructs an arcsin operation. - Asin() = default; - /// \brief Constructs an arcsin operation. - /// - /// \param arg Output that produces the input tensor.
- /// `[d1, ...]` - /// - /// Output `[d1, ...]` - /// - Asin(const Output& arg); - - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - bool visit_attributes(AttributeVisitor&) override { - return true; - } - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; -}; +using ov::op::v0::Asin; } // namespace v0 using v0::Asin; } // namespace op diff --git a/ngraph/core/include/ngraph/op/asinh.hpp b/ngraph/core/include/ngraph/op/asinh.hpp index c3414eee7d447b..c7eb418a95b79f 100644 --- a/ngraph/core/include/ngraph/op/asinh.hpp +++ b/ngraph/core/include/ngraph/op/asinh.hpp @@ -7,34 +7,12 @@ #include #include "ngraph/op/util/unary_elementwise_arithmetic.hpp" +#include "openvino/op/asinh.hpp" namespace ngraph { namespace op { namespace v3 { -/// \brief Elementwise inverse hyperbolic sin operation. -/// -class NGRAPH_API Asinh : public util::UnaryElementwiseArithmetic { -public: - NGRAPH_RTTI_DECLARATION; - - /// \brief Constructs an Asinh operation. - Asinh() = default; - /// \brief Constructs an Asinh operation. - /// - /// \param arg Output that produces the input tensor.
- /// `[d1, ...]` - /// - /// Output `[d1, ...]` - /// - Asinh(const Output& arg); - - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - bool visit_attributes(AttributeVisitor&) override { - return true; - } - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; -}; +using ov::op::v3::Asinh; } // namespace v3 using v3::Asinh; } // namespace op diff --git a/ngraph/core/include/ngraph/op/assign.hpp b/ngraph/core/include/ngraph/op/assign.hpp index c99dc8c321e099..316a61cbd37a04 100644 --- a/ngraph/core/include/ngraph/op/assign.hpp +++ b/ngraph/core/include/ngraph/op/assign.hpp @@ -7,74 +7,17 @@ #include "ngraph/op/sink.hpp" #include "ngraph/op/util/variable.hpp" #include "ngraph/op/util/variable_extension.hpp" +#include "openvino/op/assign.hpp" namespace ngraph { namespace op { -class NGRAPH_API AssignBase : public Sink, public VariableExtension { -public: - NGRAPH_RTTI_DECLARATION; - AssignBase() = default; - /// \brief Constructs an AssignBase operation. - explicit AssignBase(const OutputVector& arguments) : Sink(arguments) {} -}; +using ov::op::util::AssignBase; namespace v3 { -/// \brief Assign operation sets an input value to the variable with `variable_id` -class NGRAPH_API Assign : public AssignBase { -public: - NGRAPH_RTTI_DECLARATION; - Assign() = default; - - /// \brief Constructs an Assign operation. - /// - /// \param new_value Node that produces the input tensor. - /// \param variable_id identifier of the variable to be updated. - Assign(const Output& new_value, const std::string& variable_id); - - void validate_and_infer_types() override; - std::string get_variable_id() const override { - return m_variable_id; - } - - std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - bool visit_attributes(AttributeVisitor& visitor) override; - -private: - std::string m_variable_id; -}; +using ov::op::v3::Assign; } // namespace v3 namespace v6 { -/// \brief Assign operation sets an input value to the variable with `variable_id` -class NGRAPH_API Assign : public AssignBase { -public: - NGRAPH_RTTI_DECLARATION; - Assign() = default; - - /// \brief Constructs an Assign operation. - /// - /// \param new_value Node that produces the input tensor. - /// \param variable Class for storing and synchronizing element types, shapes and - /// identifiers - /// between pairs of Assign/ReadValue nodes. - Assign(const Output& new_value, const std::shared_ptr& variable); - - void validate_and_infer_types() override; - - std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - bool visit_attributes(AttributeVisitor& visitor) override; - - std::string get_variable_id() const override { - NGRAPH_CHECK(m_variable, "Variable is not initialized. Variable_id is unavailable"); - return m_variable->get_info().variable_id; - } - bool evaluate(const HostTensorVector& outputs, - const HostTensorVector& inputs, - const EvaluationContext& evaluation_context) const override; - bool has_evaluate() const override; - bool constant_fold(OutputVector& output_values, const OutputVector& inputs_values) override; -}; +using ov::op::v6::Assign; } // namespace v6 } // namespace op } // namespace ngraph diff --git a/ngraph/core/include/ngraph/op/atan.hpp b/ngraph/core/include/ngraph/op/atan.hpp index d43fd997141acf..af3a168c9a339d 100644 --- a/ngraph/core/include/ngraph/op/atan.hpp +++ b/ngraph/core/include/ngraph/op/atan.hpp @@ -7,34 +7,12 @@ #include #include "ngraph/op/util/unary_elementwise_arithmetic.hpp" +#include "openvino/op/atan.hpp" namespace ngraph { namespace op { namespace v0 { -/// \brief Elementwise inverse tangent (arctan) operation. -/// -class NGRAPH_API Atan : public util::UnaryElementwiseArithmetic { -public: - NGRAPH_RTTI_DECLARATION; - /// \brief Constructs an arctan operation. - Atan() = default; - - /// \brief Constructs an arctan operation. - /// - /// \param arg Output that produces the input tensor.
- /// `[d1, ...]` - /// - /// Output `[d1, ...]` - /// - Atan(const Output& arg); - - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - bool visit_attributes(AttributeVisitor&) override { - return true; - } - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; -}; +using ov::op::v0::Atan; } // namespace v0 using v0::Atan; } // namespace op diff --git a/ngraph/core/include/ngraph/op/atanh.hpp b/ngraph/core/include/ngraph/op/atanh.hpp index c9de03b05aa3c7..dbc4eebb7e1df9 100644 --- a/ngraph/core/include/ngraph/op/atanh.hpp +++ b/ngraph/core/include/ngraph/op/atanh.hpp @@ -7,34 +7,12 @@ #include #include "ngraph/op/util/unary_elementwise_arithmetic.hpp" +#include "openvino/op/atanh.hpp" namespace ngraph { namespace op { namespace v3 { -/// \brief Elementwise inverse hyperbolic tangent operation. -/// -class NGRAPH_API Atanh : public util::UnaryElementwiseArithmetic { -public: - NGRAPH_RTTI_DECLARATION; - - /// \brief Constructs an Atanh operation. - Atanh() = default; - /// \brief Constructs an Atanh operation. - /// - /// \param arg Output that produces the input tensor.
- /// `[d1, ...]` - /// - /// Output `[d1, ...]` - /// - Atanh(const Output& arg); - - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - bool visit_attributes(AttributeVisitor&) override { - return true; - } - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; -}; +using ov::op::v3::Atanh; } // namespace v3 using v3::Atanh; } // namespace op diff --git a/ngraph/core/include/ngraph/op/avg_pool.hpp b/ngraph/core/include/ngraph/op/avg_pool.hpp index 3d1d1aa52c8921..8943d5ac827496 100644 --- a/ngraph/core/include/ngraph/op/avg_pool.hpp +++ b/ngraph/core/include/ngraph/op/avg_pool.hpp @@ -6,83 +6,12 @@ #include "ngraph/op/op.hpp" #include "ngraph/op/util/attr_types.hpp" +#include "openvino/op/avg_pool.hpp" namespace ngraph { namespace op { namespace v1 { -/// \brief Batched average pooling operation. -/// -class NGRAPH_API AvgPool : public Op { -public: - NGRAPH_RTTI_DECLARATION; - - /// \brief Constructs a batched average pooling operation. - AvgPool() = default; - - /// - /// \brief Constructs a batched average pooling operation. - /// - /// \param arg The output producing the input data batch tensor.
- /// `[d1, dn]` - /// \param strides The strides.
`[n]` - /// \param pads_begin The beginning of padding shape.
`[n]` - /// \param pads_end The end of padding shape.
`[n]` - /// \param kernel The kernel shape.
`[n]` - /// \param exclude_pad If false then averages include padding elements, each - /// treated as the number zero. If true, padding - /// elements - /// are entirely ignored when computing averages. - /// \param rounding_type Whether to use ceiling or floor rounding type while - /// computing output shape. - /// \param auto_pad Padding type to use for additional padded dimensions - /// - AvgPool(const Output& arg, - const Strides& strides, - const Shape& pads_begin, - const Shape& pads_end, - const Shape& kernel, - bool exclude_pad, - op::RoundingType rounding_type = op::RoundingType::FLOOR, - const PadType& auto_pad = op::PadType::EXPLICIT); - - void validate_and_infer_types() override; - bool visit_attributes(AttributeVisitor& visitor) override; - - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - /// \return The kernel shape. - const Shape& get_kernel() const; - void set_kernel(const Shape& kernel); - /// \return The strides. - const Strides& get_strides() const; - void set_strides(const Strides& strides); - /// \return The beginning of padding shape. - const Shape& get_pads_begin() const; - void set_pads_begin(const Shape& pads_begin); - /// \return The end of padding shape. - const Shape& get_pads_end() const; - void set_pads_end(const Shape& pads_end); - bool get_exclude_pad() const; - void set_exclude_pad(bool exclude_pad); - /// \return The pad type for pooling. - const PadType& get_auto_pad() const; - void set_auto_pad(const PadType& auto_pad); - op::RoundingType get_rounding_type() const; - void set_rounding_type(op::RoundingType rounding_type); - /// \return The default value for AvgPool. - NGRAPH_SUPPRESS_DEPRECATED_START - virtual std::shared_ptr get_default_value() const override; - NGRAPH_SUPPRESS_DEPRECATED_END - -protected: - Shape m_kernel; - Strides m_strides; - Shape m_pads_begin; - Shape m_pads_end; - bool m_exclude_pad{true}; - PadType m_auto_pad{PadType::EXPLICIT}; - op::RoundingType m_rounding_type{op::RoundingType::FLOOR}; -}; +using ov::op::v1::AvgPool; } // namespace v1 using v1::AvgPool; diff --git a/ngraph/core/include/ngraph/op/constant.hpp b/ngraph/core/include/ngraph/op/constant.hpp index aeca47c5cb84ed..f13fd70fa5afa0 100644 --- a/ngraph/core/include/ngraph/op/constant.hpp +++ b/ngraph/core/include/ngraph/op/constant.hpp @@ -15,694 +15,12 @@ #include "ngraph/type/element_type.hpp" #include "ngraph/type/element_type_traits.hpp" #include "ngraph/util.hpp" +#include "openvino/op/constant.hpp" namespace ngraph { namespace op { namespace v0 { -/// \brief Class for constants. -class NGRAPH_API Constant : public Op { -public: - NGRAPH_RTTI_DECLARATION; - - Constant() = default; - - /// \brief Initialize a constant from tensor - /// \param tensor The tensor with data - Constant(const std::shared_ptr& tensor); - - /// \brief Constructs a tensor constant. - /// - /// \param type The element type of the tensor constant. - /// \param shape The shape of the tensor constant. - /// \param values A vector of literals for initializing the tensor constant. The - /// size of values must match the size of the shape. - template - Constant(const element::Type& type, const Shape& shape, const std::vector& values) : Constant(type, shape) { - NODE_VALIDATION_CHECK(this, - values.size() == 1 || values.size() == shape_size(m_shape), - "Did not get the expected number of literals for a constant of shape ", - m_shape, - " (got ", - values.size(), - ", expected ", - (shape_size(m_shape) == 1 ? "" : "1 or "), - shape_size(m_shape), - ")."); - - if (values.size() == 1) { - fill_data(type, values.front()); - } else { - write_values(values); - } - m_all_elements_bitwise_identical = are_all_data_elements_bitwise_identical(); - } - - /// \brief Create uninitialized constant - Constant(const element::Type& type, const Shape& shape); - /// \brief Constructs a uniform tensor constant. - /// - /// \param type The element type of the tensor constant. - /// \param shape The shape of the tensor constant. - /// \param value A scalar for initializing the uniform tensor constant. The - /// value is broadcast to the specified shape. - template ::value>::type> - Constant(const element::Type& type, const Shape& shape, T value) : Constant(type, shape) { - fill_data(type, value); - m_all_elements_bitwise_identical = true; - } - - template - void fill_data(const element::Type& type, T value) { - using Type_t = element::Type_t; -#if defined(__GNUC__) && !(__GNUC__ == 4 && __GNUC_MINOR__ == 8) -# pragma GCC diagnostic push -# pragma GCC diagnostic error "-Wswitch" -# pragma GCC diagnostic error "-Wswitch-enum" -#endif - switch (type) { - case Type_t::boolean: - fill_data(value); - break; - case Type_t::bf16: - fill_data(value); - break; - case Type_t::f16: - fill_data(value); - break; - case Type_t::f32: - fill_data(value); - break; - case Type_t::f64: - fill_data(value); - break; - case Type_t::i4: - fill_data(value); - break; - case Type_t::i8: - fill_data(value); - break; - case Type_t::i16: - fill_data(value); - break; - case Type_t::i32: - fill_data(value); - break; - case Type_t::i64: - fill_data(value); - break; - case Type_t::u1: - fill_data(value); - break; - case Type_t::u4: - fill_data(value); - break; - case Type_t::u8: - fill_data(value); - break; - case Type_t::u16: - fill_data(value); - break; - case Type_t::u32: - fill_data(value); - break; - case Type_t::u64: - fill_data(value); - break; - case Type_t::undefined: - case Type_t::dynamic: - throw std::runtime_error("unsupported type"); - } -#if defined(__GNUC__) && !(__GNUC__ == 4 && __GNUC_MINOR__ == 8) -# pragma GCC diagnostic pop -#endif - } - - /// \brief Constructs a tensor constant - /// This constructor is mainly to support deserialization of constants. - /// - /// \param type The element type of the tensor constant. - /// \param shape The shape of the tensor constant. - /// \param values A list of string values to use as the constant data. - Constant(const element::Type& type, const Shape& shape, const std::vector& values); - - /// \brief Constructs a tensor constant with the supplied data - /// - /// \param type The element type of the tensor constant. - /// \param shape The shape of the tensor constant. - /// \param data A void* to constant data. - Constant(const element::Type& type, const Shape& shape, const void* data); - - /// \brief Constructs a tensor constant with the supplied data - /// - /// \param type The element type of the tensor constant. - /// \param shape The shape of the tensor constant. - /// \param data A pointer to pre-allocated shared data. - template - Constant(const element::Type& type, const Shape& shape, std::shared_ptr> data) - : m_element_type(type), - m_shape(shape) { - m_data = data; - constructor_validate_and_infer_types(); - } - - Constant(const Constant& other); - Constant(const Constant& other, const Shape& new_shape); - Constant& operator=(const Constant&) = delete; - - virtual ~Constant() override; - - void validate_and_infer_types() override { - infer_element_type(); - set_output_type(0, m_element_type, m_shape); - } - - bool visit_attributes(AttributeVisitor& visitor) override; - - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - bool has_evaluate() const override; - bool evaluate_lower(const HostTensorVector& outputs) const override; - bool evaluate_upper(const HostTensorVector& outputs) const override; - - // Don't constant fold a constant; it would make a copy - bool constant_fold(OutputVector& outputs, const OutputVector& inputs) override { - (void)outputs; - (void)inputs; - return false; - } - - /// \brief Returns the value of the constant node as a Shape object - /// Can only be used on element::i64 nodes and interprets - /// negative values as zeros. - Shape get_shape_val() const; - /// \brief Returns the value of the constant node as a Strides - /// object - /// Can only be used on element::i64 nodes and interprets - /// negative values as zeros. - Strides get_strides_val() const; - /// \brief Returns the value of the constant node as a Coordinate - /// object - /// Can only be used on element::i64 nodes and interprets - /// negative values as zeros. - Coordinate get_coordinate_val() const; - /// \brief Returns the value of the constant node as a - /// CoordinateDiff object - /// Can only be used on element::i64 nodes. - CoordinateDiff get_coordinate_diff_val() const; - /// \brief Returns the value of the constant node as an AxisVector - /// object - /// Can only be used on element::i64 nodes and interprets - /// negative values as zeros. - AxisVector get_axis_vector_val() const; - /// \brief Returns the value of the constant node as an AxisSet - /// object - /// Can only be used on element::i64 nodes and interprets - /// negative values as zeros. - /// Repeated values are allowed. - AxisSet get_axis_set_val() const; - - /// \brief Update Constant shape. New shape size must equal to the data elements - /// count - /// - /// \param shape The shape of the tensor constant. - NGRAPH_DEPRECATED("Use Constant c-tor with shape argument instead") - void set_data_shape(const Shape& shape); - - /// \brief Wrapper around constructing a shared_ptr of a Constant - /// - /// \param type The element type of the tensor constant. - /// \param shape The shape of the tensor constant. - /// \param values A vector of values to use as the constant data. - template - static std::shared_ptr create(const element::Type& type, - const Shape& shape, - const std::vector& values) { - return std::make_shared(type, shape, values); - } - - /// \brief Wrapper around constructing a shared_ptr of a Constant - /// - /// \param type The element type of the tensor constant. - /// \param shape The shape of the tensor constant. - /// \param values An initializer_list of values to use as the constant data. - template - static std::shared_ptr create(const element::Type& type, - const Shape& shape, - std::initializer_list values) { - return std::make_shared(type, shape, std::vector{values}); - } - - /// \brief Wrapper around constructing a shared_ptr of a Constant - /// - /// \param type The element type of the tensor constant. - /// \param shape The shape of the tensor constant. - /// \param memory An continues memory chunk which contains the constant data. - static std::shared_ptr create(const element::Type& type, const Shape& shape, const void* memory) { - return std::make_shared(type, shape, memory); - } - - virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - /// \return The initialization literals for the tensor constant. - std::vector get_value_strings() const; - - template - std::vector get_vector() const { - const T* p = get_data_ptr(); - if (p == nullptr) - throw std::runtime_error("Cannot create vector! Buffer is not allocated."); - return std::vector(p, p + shape_size(m_shape)); - } - - /// \brief Return the Constant's value as a vector cast to type T - /// - /// \tparam T Type to which data vector's entries will be cast. - /// \return Constant's data vector. - template - std::vector cast_vector() const { - auto source_type = get_element_type(); - std::vector rc; - using Type_t = element::Type_t; -#if defined(_MSC_VER) -# pragma warning(push) -# pragma warning(disable : 4244) -#endif - switch (source_type) { - case Type_t::boolean: - cast_vector(rc); - break; - case Type_t::bf16: - cast_vector(rc); - break; - case Type_t::f16: - cast_vector(rc); - break; - case Type_t::f32: - cast_vector(rc); - break; - case Type_t::f64: - cast_vector(rc); - break; - case Type_t::i4: - cast_vector(rc); - break; - case Type_t::i8: - cast_vector(rc); - break; - case Type_t::i16: - cast_vector(rc); - break; - case Type_t::i32: - cast_vector(rc); - break; - case Type_t::i64: - cast_vector(rc); - break; - case Type_t::u1: - cast_vector(rc); - break; - case Type_t::u4: - cast_vector(rc); - break; - case Type_t::u8: - cast_vector(rc); - break; - case Type_t::u16: - cast_vector(rc); - break; - case Type_t::u32: - cast_vector(rc); - break; - case Type_t::u64: - cast_vector(rc); - break; - default: - throw std::runtime_error("unsupported type"); - } -#if defined(_MSC_VER) -# pragma warning(pop) -#endif - return rc; - } - - const void* get_data_ptr() const { - return (m_data ? m_data->get_ptr() : nullptr); - } - template - const T* get_data_ptr() const { - if (sizeof(T) > m_element_type.size() && shape_size(m_shape) > 0) { - throw ngraph_error("Buffer over-read"); - } - - return static_cast(get_data_ptr()); - } - - template - const typename element_type_traits::value_type* get_data_ptr() const { - NGRAPH_CHECK(ET == get_element_type(), "get_data_ptr() called for incorrect element type."); - return static_cast::value_type*>(get_data_ptr()); - } - - bool get_all_data_elements_bitwise_identical() const { - return m_all_elements_bitwise_identical; - } - std::string convert_value_to_string(size_t index) const; - - /** - * \brief Allows to avoid buffer allocation on the visit_attributes call - */ - void alloc_buffer_on_visit_attributes(bool val) { - m_alloc_buffer_on_visit_attributes = val; - } - -private: - template , - typename std::enable_if::type = true> - StorageDataType get_element_value(size_t index) const { - return get_data_ptr()[index]; - } - - template , - typename std::enable_if::type = true> - StorageDataType get_element_value(size_t index) const { - return (get_data_ptr()[index / 8] >> (7 - (index % 8))) & 1; - } - - template , - typename std::enable_if::type = true> - StorageDataType get_element_value(size_t index) const { - return (get_data_ptr()[index / 2] >> (index % 2 ? 0 : 4)) & 0x0F; - } - - template , - typename std::enable_if::type = true> - StorageDataType get_element_value(size_t index) const { - const uint8_t i4data = (get_data_ptr()[index / 2] >> (index % 2 ? 0 : 4)) & 0x0F; - const bool is_negative_number = (i4data >> 3) & 0x01; - const int8_t data = is_negative_number ? i4data | 0xF0 : i4data; - return data; - } - - template ::type = true> - void cast_vector(std::vector& output_vector) const { - // this function is workaround for waring during windows building - // build complains for vector creation based on iterators - // which point on different type than destination vector::value_type - using IN_T = fundamental_type_for; - auto source_vector = get_vector(); - output_vector.reserve(source_vector.size()); - - std::transform(source_vector.begin(), source_vector.end(), std::back_inserter(output_vector), [](IN_T c) { - return static_cast(c); - }); - } - - template ::type = true> - void cast_vector(std::vector& output) const { - using IN_T = fundamental_type_for; - const auto element_number = shape_size(m_shape); - const auto source_begin = get_data_ptr(); - const auto source_end = std::next(source_begin, (element_number + 7) / 8); - const auto round_element_no = element_number % 8 ? element_number - element_number % 8 + 8 : element_number; - output.reserve(round_element_no); // adds 7 more elements here? - std::for_each(source_begin, source_end, [&](IN_T c) { - for (const auto i : {7, 6, 5, 4, 3, 2, 1, 0}) { - const uint8_t data = (c >> i) & 0x01; - output.push_back(data); - } - }); - output.resize(element_number); - } - - template ::type = true> - void cast_vector(std::vector& output) const { - using IN_T = fundamental_type_for; - const auto element_number = shape_size(m_shape); - const auto source_begin = get_data_ptr(); - const auto source_end = std::next(source_begin, (element_number + 1) / 2); - const auto round_element_no = element_number % 2 ? element_number + 1 : element_number; - output.reserve(round_element_no); // adds 1 more elements here? - std::for_each(source_begin, source_end, [&](IN_T c) { - for (const auto i : {4, 0}) { - const uint8_t data = (c >> i) & 0x0F; - output.push_back(data); - } - }); - output.resize(element_number); - } - template ::type = true> - void cast_vector(std::vector& output) const { - using IN_T = fundamental_type_for; - const auto element_number = shape_size(m_shape); - const auto source_begin = get_data_ptr(); - const auto source_end = std::next(source_begin, (element_number + 1) / 2); - const auto round_element_no = element_number % 2 ? element_number + 1 : element_number; - output.reserve(round_element_no); // adds 1 more elements here? - std::for_each(source_begin, source_end, [&](IN_T c) { - for (const auto i : {4, 0}) { - const uint8_t i4data = (c >> i) & 0x0F; - const bool is_negative_number = (i4data >> 3) & 0x01; - const int8_t data = is_negative_number ? i4data | 0xF0 : i4data; - output.push_back(data); - } - }); - output.resize(element_number); - } - - template , - typename std::enable_if::type = true> - void fill_data(const T& value) { - const auto size = shape_size(m_shape); - const auto v = static_cast(value); - std::fill_n(get_data_ptr_nc(), size, v); - } - - template , - typename std::enable_if::type = true> - void fill_data(const T& value) { - const StorageDataType v = value ? 0xFF : 0x00; - std::fill_n(get_data_ptr_nc(), mem_size(), v); - } - - template , - typename std::enable_if::type = true> - void fill_data(const T& value) { - uint8_t v = value_in_range(value); - v &= 0x0F; - v += v << 4; - std::fill_n(get_data_ptr_nc(), mem_size(), v); - } - - void allocate_buffer(); - - void* get_data_ptr_nc() { - return (m_data ? m_data->get_ptr() : nullptr); - } - - template - typename element_type_traits::value_type* get_data_ptr_nc() { - NGRAPH_CHECK(ET == get_element_type(), "get_data_ptr_nc() called for incorrect element type."); - return static_cast::value_type*>(get_data_ptr_nc()); - } - - Constant(const OutputVector& args) : Op(args), m_shape({}) {} - - virtual void infer_element_type() {} - template - void write_values(const std::vector& values) { - write_to_buffer(values); - } - - template , - typename std::enable_if::type = true> - void write_buffer(const std::vector& source) { - auto p = get_data_ptr_nc(); - for (size_t i = 0; i < source.size(); i++) { - p[i] = static_cast(source[i]); - } - } - - template , - typename std::enable_if::type = true> - void write_buffer(const std::vector& source) { - auto p = get_data_ptr_nc(); - size_t i = 0; - for (; i < source.size() / 2; i++) { - const auto v1 = value_in_range(source[i * 2]) & 0x0F; - const auto v2 = value_in_range(source[i * 2 + 1]) & 0x0F; - const auto v = (v1 << 4) | v2; - p[i] = static_cast(v); - } - if (source.size() % 2) { - const auto v1 = value_in_range(source[i * 2]) & 0x0F; - const auto v = v1 << 4; - p[i] = static_cast(v); - } - } - - template , - typename std::enable_if::type = true> - void write_buffer(const std::vector& source) { - auto p = get_data_ptr_nc(); - size_t i = 0; - for (; i < source.size() / 8; i++) { - uint8_t v{}; - for (int j = 0; j != 8; j++) { - const uint8_t b = source[i * 8 + j] ? 0x01 << (7 - j) : 0; - v |= b; - } - p[i] = static_cast(v); - } - uint8_t v{}; - for (unsigned j = 0; j != source.size() % 8; j++) { - const uint8_t b = source[i * 8 + j] ? 0x01 << (7 - j) : 0; - v |= b; - } - p[i] = static_cast(v); - } - - template - void write_to_buffer(const std::vector& source) { - const auto& target_type = m_element_type; - size_t target_element_count = shape_size(m_shape); - if (source.size() != target_element_count) { - throw std::runtime_error("Constant initializer does not match shape"); - } - using Type_t = element::Type_t; -#if defined(__GNUC__) && !(__GNUC__ == 4 && __GNUC_MINOR__ == 8) -# pragma GCC diagnostic push -# pragma GCC diagnostic error "-Wswitch" -# pragma GCC diagnostic error "-Wswitch-enum" -#endif - switch (target_type) { - case Type_t::boolean: - write_buffer(source); - break; - case Type_t::bf16: - write_buffer(source); - break; - case Type_t::f16: - write_buffer(source); - break; - case Type_t::f32: - write_buffer(source); - break; - case Type_t::f64: - write_buffer(source); - break; - case Type_t::i4: - write_buffer(source); - break; - case Type_t::i8: - write_buffer(source); - break; - case Type_t::i16: - write_buffer(source); - break; - case Type_t::i32: - write_buffer(source); - break; - case Type_t::i64: - write_buffer(source); - break; - case Type_t::u1: - write_buffer(source); - break; - case Type_t::u4: - write_buffer(source); - break; - case Type_t::u8: - write_buffer(source); - break; - case Type_t::u16: - write_buffer(source); - break; - case Type_t::u32: - write_buffer(source); - break; - case Type_t::u64: - write_buffer(source); - break; - case element::Type_t::undefined: - case element::Type_t::dynamic: - throw std::runtime_error("unsupported type"); - } -#if defined(__GNUC__) && !(__GNUC__ == 4 && __GNUC_MINOR__ == 8) -# pragma GCC diagnostic pop -#endif - } - template ::type = true> - static ngraph::fundamental_type_for value_in_range(const ValueT& value) { - const auto result = ngraph::fundamental_type_for(value); - NGRAPH_CHECK(0 <= result && result <= 15, "assigned value out of range u4 values"); - return result; - } - - template ::type = true> - static ngraph::fundamental_type_for value_in_range(const ValueT& value) { - const auto result = ngraph::fundamental_type_for(value); - NGRAPH_CHECK(-8 <= result && result <= 7, "assigned value out of range i4 values"); - return result; - } - - bool are_all_data_elements_bitwise_identical() const; - static constexpr size_t host_alignment() { - return 64; - } - - size_t mem_size() const { - const bool bitwidth_less_than_byte = m_element_type.bitwidth() < 8; - if (bitwidth_less_than_byte) { - const auto size = shape_size(m_shape); - const auto bitwidth = size * m_element_type.bitwidth(); - // for rounding by `(bitwidth + 7) / 8` will work for - // `bitwidth < numeric_limits::max() - 7` - return bitwidth / 8 + (bitwidth % 8 ? 1 : 0); - } - return shape_size(m_shape) * m_element_type.size(); - } - - element::Type m_element_type; - Shape m_shape{}; - std::shared_ptr m_data; - bool m_all_elements_bitwise_identical; - bool m_alloc_buffer_on_visit_attributes = true; -}; +using ov::op::v0::Constant; } // namespace v0 using v0::Constant; } // namespace op diff --git a/ngraph/core/include/ngraph/op/read_value.hpp b/ngraph/core/include/ngraph/op/read_value.hpp index 1737cfce678eaf..8106aa9384aa26 100644 --- a/ngraph/core/include/ngraph/op/read_value.hpp +++ b/ngraph/core/include/ngraph/op/read_value.hpp @@ -7,84 +7,18 @@ #include "ngraph/op/op.hpp" #include "ngraph/op/util/variable.hpp" #include "ngraph/op/util/variable_extension.hpp" +#include "openvino/op/read_value.hpp" namespace ngraph { namespace op { -class NGRAPH_API ReadValueBase : public Op, public VariableExtension { -public: - NGRAPH_RTTI_DECLARATION; - - ReadValueBase() = default; - - /// \brief Constructs an AssignBase operation. - explicit ReadValueBase(const OutputVector& arguments) : Op(arguments) {} -}; +using ov::op::util::ReadValueBase; namespace v3 { -/// \brief ReadValue operation creates the variable with `variable_id` and returns value -/// of this variable. -class NGRAPH_API ReadValue : public ReadValueBase { -public: - NGRAPH_RTTI_DECLARATION; - ReadValue() = default; - - /// \brief Constructs a ReadValue operation. - /// - /// \param init_value Node that produces the input tensor. - /// \param variable_id identificator of the variable to create. - ReadValue(const Output& init_value, const std::string& variable_id); - - void validate_and_infer_types() override; - - std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - bool visit_attributes(AttributeVisitor& visitor) override; - - std::string get_variable_id() const override { - return m_variable_id; - } - -private: - std::string m_variable_id; -}; +using ov::op::v3::ReadValue; } // namespace v3 namespace v6 { -/// \brief ReadValue operation gets an input value from the variable with `variable_id` -/// and returns it as an output. -class NGRAPH_API ReadValue : public ReadValueBase { -public: - NGRAPH_RTTI_DECLARATION; - ReadValue() = default; - - /// \brief Constructs a ReadValue operation. - /// - /// \param init_value Node that produces the input tensor. - /// \param variable Class for storing and synchronizing element types, shapes and - /// identifiers - /// between pairs of Assign/ReadValue nodes. - ReadValue(const Output& init_value, const std::shared_ptr& variable); - - void validate_and_infer_types() override; - - void revalidate_and_infer_types() override; - - std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - bool visit_attributes(AttributeVisitor& visitor) override; - - std::string get_variable_id() const override { - NGRAPH_CHECK(m_variable, "Variable is not initialized. Variable_id is unavailable"); - return m_variable->get_info().variable_id; - } - - bool evaluate(const HostTensorVector& outputs, - const HostTensorVector& inputs, - const EvaluationContext& evaluation_context) const override; - bool has_evaluate() const override; - - bool constant_fold(OutputVector& output_values, const OutputVector& inputs_values) override; -}; +using ov::op::v6::ReadValue; } // namespace v6 } // namespace op } // namespace ngraph diff --git a/ngraph/core/include/ngraph/op/sink.hpp b/ngraph/core/include/ngraph/op/sink.hpp index 0dbec0190796d8..53e9fa9d523df2 100644 --- a/ngraph/core/include/ngraph/op/sink.hpp +++ b/ngraph/core/include/ngraph/op/sink.hpp @@ -7,20 +7,11 @@ #include #include "ngraph/op/op.hpp" +#include "openvino/op/sink.hpp" namespace ngraph { namespace op { -/// Root of nodes that can be sink nodes -class NGRAPH_API Sink : public Op { -public: - virtual ~Sink() = 0; - NGRAPH_RTTI_DECLARATION; - -protected: - Sink() : Op() {} - - explicit Sink(const OutputVector& arguments) : Op(arguments) {} -}; +using ov::op::Sink; } // namespace op using SinkVector = std::vector>; } // namespace ngraph diff --git a/ngraph/core/include/ngraph/runtime/host_tensor.hpp b/ngraph/core/include/ngraph/runtime/host_tensor.hpp index 5f49c7f4159d71..157fb27be3bd57 100644 --- a/ngraph/core/include/ngraph/runtime/host_tensor.hpp +++ b/ngraph/core/include/ngraph/runtime/host_tensor.hpp @@ -13,11 +13,16 @@ namespace ov { class Node; +namespace op { +namespace v0 { +class Constant; } +} // namespace op +} // namespace ov namespace ngraph { namespace op { namespace v0 { -class Constant; +using ov::op::v0::Constant; } } // namespace op namespace runtime { diff --git a/ngraph/core/include/openvino/op/abs.hpp b/ngraph/core/include/openvino/op/abs.hpp new file mode 100644 index 00000000000000..b3cc02d724f86e --- /dev/null +++ b/ngraph/core/include/openvino/op/abs.hpp @@ -0,0 +1,40 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include + +#include "openvino/op/util/unary_elementwise_arithmetic.hpp" + +namespace ov { +namespace op { +namespace v0 { +/// \brief Elementwise absolute value operation. +/// +class OPENVINO_API Abs : public util::UnaryElementwiseArithmetic { +public: + OPENVINO_RTTI_DECLARATION; + /// \brief Constructs an absolute value operation. + Abs() = default; + bool visit_attributes(AttributeVisitor&) override { + return true; + } + /// \brief Constructs an absolute value operation. + /// + /// \param arg Output that produces the input tensor.
+ /// `[d1, ...]` + /// + /// Output `[d1, ...]` + /// + Abs(const Output& arg); + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; +}; +} // namespace v0 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/acos.hpp b/ngraph/core/include/openvino/op/acos.hpp new file mode 100644 index 00000000000000..0804926bdda5c4 --- /dev/null +++ b/ngraph/core/include/openvino/op/acos.hpp @@ -0,0 +1,38 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include + +#include "openvino/op/util/unary_elementwise_arithmetic.hpp" + +namespace ov { +namespace op { +namespace v0 { +/// \brief Elementwise inverse cosine (arccos) operation. +/// +class OPENVINO_API Acos : public util::UnaryElementwiseArithmetic { +public: + OPENVINO_RTTI_DECLARATION; + /// \brief Constructs an arccos operation. + Acos() = default; + /// \brief Constructs an arccos operation. + /// + /// \param arg Output that produces the input tensor.
+ /// `[d1, ...]` + /// + /// Output `[d1, ...]` + /// + Acos(const Output& arg); + bool visit_attributes(AttributeVisitor&) override { + return true; + } + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; +}; +} // namespace v0 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/acosh.hpp b/ngraph/core/include/openvino/op/acosh.hpp new file mode 100644 index 00000000000000..5e7e638a3f63c0 --- /dev/null +++ b/ngraph/core/include/openvino/op/acosh.hpp @@ -0,0 +1,40 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include + +#include "openvino/op/util/unary_elementwise_arithmetic.hpp" + +namespace ov { +namespace op { +namespace v3 { +/// \brief Elementwise inverse hyperbolic cos operation. +/// +class OPENVINO_API Acosh : public util::UnaryElementwiseArithmetic { +public: + OPENVINO_RTTI_DECLARATION; + + /// \brief Constructs an Acosh operation. + Acosh() = default; + /// \brief Constructs an Acosh operation. + /// + /// \param arg Output that produces the input tensor.
+ /// `[d1, ...]` + /// + /// Output `[d1, ...]` + /// + Acosh(const Output& arg); + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + bool visit_attributes(AttributeVisitor&) override { + return true; + } + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; +}; +} // namespace v3 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/adaptive_avg_pool.hpp b/ngraph/core/include/openvino/op/adaptive_avg_pool.hpp new file mode 100644 index 00000000000000..fd4419e5d8b108 --- /dev/null +++ b/ngraph/core/include/openvino/op/adaptive_avg_pool.hpp @@ -0,0 +1,38 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/op.hpp" +#include "openvino/op/util/attr_types.hpp" + +namespace ov { +namespace op { +namespace v8 { +/// \brief Adaptive average pooling operation. +/// +class OPENVINO_API AdaptiveAvgPool : public Op { +public: + OPENVINO_RTTI_DECLARATION; + + AdaptiveAvgPool() = default; + + /// + /// \brief Constructs adaptive average pooling operation. + /// + /// \param data Input data + /// + /// \param output_shape 1D tensor describing output shape for spatial + /// dimensions. + /// + AdaptiveAvgPool(const Output& data, const Output& output_shape); + + void validate_and_infer_types() override; + bool visit_attributes(AttributeVisitor& visitor) override; + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; +}; +} // namespace v8 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/adaptive_max_pool.hpp b/ngraph/core/include/openvino/op/adaptive_max_pool.hpp new file mode 100644 index 00000000000000..512131ed0aa157 --- /dev/null +++ b/ngraph/core/include/openvino/op/adaptive_max_pool.hpp @@ -0,0 +1,50 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/op.hpp" +#include "openvino/op/util/attr_types.hpp" + +namespace ov { +namespace op { +namespace v8 { +/// \brief Adaptive max pooling operation. +/// +class OPENVINO_API AdaptiveMaxPool : public Op { +public: + OPENVINO_RTTI_DECLARATION; + + AdaptiveMaxPool() = default; + + /// + /// \brief Constructs adaptive max pooling operation. + /// + /// \param data Input data + /// + /// \param output_shape 1D tensor describing output shape for spatial + /// dimensions. + /// + /// \param index_element_type Specifies the output tensor type for indices + /// output + /// + AdaptiveMaxPool(const Output& data, + const Output& output_shape, + const ngraph::element::Type& index_element_type = ngraph::element::i64); + + void validate_and_infer_types() override; + bool visit_attributes(AttributeVisitor& visitor) override; + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + element::Type get_index_element_type() const { + return m_index_element_type; + } + +protected: + ngraph::element::Type m_index_element_type = ngraph::element::i64; +}; +} // namespace v8 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/add.hpp b/ngraph/core/include/openvino/op/add.hpp new file mode 100644 index 00000000000000..38b3121ab0bb5a --- /dev/null +++ b/ngraph/core/include/openvino/op/add.hpp @@ -0,0 +1,47 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include + +#include "openvino/op/util/binary_elementwise_arithmetic.hpp" + +namespace ov { +namespace op { +namespace v1 { +/// \brief Elementwise addition operation. +/// +class OPENVINO_API Add : public util::BinaryElementwiseArithmetic { +public: + OPENVINO_RTTI_DECLARATION; + + /// \brief Constructs an uninitialized addition operation + Add() : util::BinaryElementwiseArithmetic(AutoBroadcastSpec::NUMPY) {} + + /// \brief Constructs an addition operation. + /// + /// \param arg0 Output that produces the first input tensor.
+ /// `[d0, ...]` + /// \param arg1 Output that produces the second input tensor.
+ /// `[d0, ...]` + /// \param auto_broadcast Auto broadcast specification. Default is Numpy-style + /// implicit broadcasting. + /// + /// Output `[d0, ...]` + /// + Add(const Output& arg0, + const Output& arg1, + const AutoBroadcastSpec& auto_broadcast = AutoBroadcastSpec(AutoBroadcastType::NUMPY)); + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + bool visit_attributes(AttributeVisitor& visitor) override; + + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; +}; +} // namespace v1 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/asin.hpp b/ngraph/core/include/openvino/op/asin.hpp new file mode 100644 index 00000000000000..b08569cddeee0f --- /dev/null +++ b/ngraph/core/include/openvino/op/asin.hpp @@ -0,0 +1,39 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include + +#include "openvino/op/util/unary_elementwise_arithmetic.hpp" + +namespace ov { +namespace op { +namespace v0 { +/// \brief Elementwise inverse sine (arcsin) operation. +/// +class OPENVINO_API Asin : public util::UnaryElementwiseArithmetic { +public: + OPENVINO_RTTI_DECLARATION; + /// \brief Constructs an arcsin operation. + Asin() = default; + /// \brief Constructs an arcsin operation. + /// + /// \param arg Output that produces the input tensor.
+ /// `[d1, ...]` + /// + /// Output `[d1, ...]` + /// + Asin(const Output& arg); + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + bool visit_attributes(AttributeVisitor&) override { + return true; + } + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; +}; +} // namespace v0 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/asinh.hpp b/ngraph/core/include/openvino/op/asinh.hpp new file mode 100644 index 00000000000000..5ad07bddd26557 --- /dev/null +++ b/ngraph/core/include/openvino/op/asinh.hpp @@ -0,0 +1,40 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include + +#include "openvino/op/util/unary_elementwise_arithmetic.hpp" + +namespace ov { +namespace op { +namespace v3 { +/// \brief Elementwise inverse hyperbolic sin operation. +/// +class OPENVINO_API Asinh : public util::UnaryElementwiseArithmetic { +public: + OPENVINO_RTTI_DECLARATION; + + /// \brief Constructs an Asinh operation. + Asinh() = default; + /// \brief Constructs an Asinh operation. + /// + /// \param arg Output that produces the input tensor.
+ /// `[d1, ...]` + /// + /// Output `[d1, ...]` + /// + Asinh(const Output& arg); + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + bool visit_attributes(AttributeVisitor&) override { + return true; + } + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; +}; +} // namespace v3 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/assign.hpp b/ngraph/core/include/openvino/op/assign.hpp new file mode 100644 index 00000000000000..01502de7410875 --- /dev/null +++ b/ngraph/core/include/openvino/op/assign.hpp @@ -0,0 +1,72 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/util/assign_base.hpp" +#include "openvino/op/util/variable.hpp" + +namespace ov { +namespace op { +namespace v3 { +/// \brief Assign operation sets an input value to the variable with `variable_id` +class OPENVINO_API Assign : public util::AssignBase { +public: + OPENVINO_RTTI_DECLARATION; + Assign() = default; + + /// \brief Constructs an Assign operation. + /// + /// \param new_value Node that produces the input tensor. + /// \param variable_id identifier of the variable to be updated. + Assign(const Output& new_value, const std::string& variable_id); + + void validate_and_infer_types() override; + std::string get_variable_id() const override { + return m_variable_id; + } + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + bool visit_attributes(AttributeVisitor& visitor) override; + +private: + std::string m_variable_id; +}; +} // namespace v3 + +namespace v6 { +/// \brief Assign operation sets an input value to the variable with `variable_id` +class OPENVINO_API Assign : public util::AssignBase { +public: + OPENVINO_RTTI_DECLARATION; + Assign() = default; + + /// \brief Constructs an Assign operation. + /// + /// \param new_value Node that produces the input tensor. + /// \param variable Class for storing and synchronizing element types, shapes and + /// identifiers + /// between pairs of Assign/ReadValue nodes. + Assign(const Output& new_value, const std::shared_ptr& variable); + + void validate_and_infer_types() override; + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + bool visit_attributes(AttributeVisitor& visitor) override; + + std::string get_variable_id() const override { + NGRAPH_CHECK(m_variable, "Variable is not initialized. Variable_id is unavailable"); + return m_variable->get_info().variable_id; + } + bool evaluate(const HostTensorVector& outputs, + const HostTensorVector& inputs, + const EvaluationContext& evaluation_context) const override; + bool has_evaluate() const override; + bool constant_fold(OutputVector& output_values, const OutputVector& inputs_values) override; +}; +} // namespace v6 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/atan.hpp b/ngraph/core/include/openvino/op/atan.hpp new file mode 100644 index 00000000000000..3b336946c25700 --- /dev/null +++ b/ngraph/core/include/openvino/op/atan.hpp @@ -0,0 +1,40 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include + +#include "openvino/op/util/unary_elementwise_arithmetic.hpp" + +namespace ov { +namespace op { +namespace v0 { +/// \brief Elementwise inverse tangent (arctan) operation. +/// +class OPENVINO_API Atan : public util::UnaryElementwiseArithmetic { +public: + OPENVINO_RTTI_DECLARATION; + /// \brief Constructs an arctan operation. + Atan() = default; + + /// \brief Constructs an arctan operation. + /// + /// \param arg Output that produces the input tensor.
+ /// `[d1, ...]` + /// + /// Output `[d1, ...]` + /// + Atan(const Output& arg); + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + bool visit_attributes(AttributeVisitor&) override { + return true; + } + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; +}; +} // namespace v0 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/atanh.hpp b/ngraph/core/include/openvino/op/atanh.hpp new file mode 100644 index 00000000000000..5332636a9fd2af --- /dev/null +++ b/ngraph/core/include/openvino/op/atanh.hpp @@ -0,0 +1,40 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include + +#include "openvino/op/util/unary_elementwise_arithmetic.hpp" + +namespace ov { +namespace op { +namespace v3 { +/// \brief Elementwise inverse hyperbolic tangent operation. +/// +class OPENVINO_API Atanh : public util::UnaryElementwiseArithmetic { +public: + OPENVINO_RTTI_DECLARATION; + + /// \brief Constructs an Atanh operation. + Atanh() = default; + /// \brief Constructs an Atanh operation. + /// + /// \param arg Output that produces the input tensor.
+ /// `[d1, ...]` + /// + /// Output `[d1, ...]` + /// + Atanh(const Output& arg); + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + bool visit_attributes(AttributeVisitor&) override { + return true; + } + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; +}; +} // namespace v3 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/avg_pool.hpp b/ngraph/core/include/openvino/op/avg_pool.hpp new file mode 100644 index 00000000000000..66f94c7cf6f528 --- /dev/null +++ b/ngraph/core/include/openvino/op/avg_pool.hpp @@ -0,0 +1,88 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/op.hpp" +#include "openvino/op/util/attr_types.hpp" + +namespace ov { +namespace op { +namespace v1 { +/// \brief Batched average pooling operation. +/// +class OPENVINO_API AvgPool : public Op { +public: + OPENVINO_RTTI_DECLARATION; + + /// \brief Constructs a batched average pooling operation. + AvgPool() = default; + + /// + /// \brief Constructs a batched average pooling operation. + /// + /// \param arg The output producing the input data batch tensor.
+ /// `[d1, dn]` + /// \param strides The strides.
`[n]` + /// \param pads_begin The beginning of padding shape.
`[n]` + /// \param pads_end The end of padding shape.
`[n]` + /// \param kernel The kernel shape.
`[n]` + /// \param exclude_pad If false then averages include padding elements, each + /// treated as the number zero. If true, padding + /// elements + /// are entirely ignored when computing averages. + /// \param rounding_type Whether to use ceiling or floor rounding type while + /// computing output shape. + /// \param auto_pad Padding type to use for additional padded dimensions + /// + AvgPool(const Output& arg, + const Strides& strides, + const ngraph::Shape& pads_begin, + const ngraph::Shape& pads_end, + const ngraph::Shape& kernel, + bool exclude_pad, + op::RoundingType rounding_type = op::RoundingType::FLOOR, + const PadType& auto_pad = op::PadType::EXPLICIT); + + void validate_and_infer_types() override; + bool visit_attributes(AttributeVisitor& visitor) override; + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + /// \return The kernel shape. + const ngraph::Shape& get_kernel() const; + void set_kernel(const ngraph::Shape& kernel); + /// \return The strides. + const Strides& get_strides() const; + void set_strides(const Strides& strides); + /// \return The beginning of padding shape. + const ngraph::Shape& get_pads_begin() const; + void set_pads_begin(const ngraph::Shape& pads_begin); + /// \return The end of padding shape. + const ngraph::Shape& get_pads_end() const; + void set_pads_end(const ngraph::Shape& pads_end); + bool get_exclude_pad() const; + void set_exclude_pad(bool exclude_pad); + /// \return The pad type for pooling. + const PadType& get_auto_pad() const; + void set_auto_pad(const PadType& auto_pad); + op::RoundingType get_rounding_type() const; + void set_rounding_type(op::RoundingType rounding_type); + /// \return The default value for AvgPool. + OPENVINO_SUPPRESS_DEPRECATED_START + std::shared_ptr get_default_value() const override; + OPENVINO_SUPPRESS_DEPRECATED_END + +protected: + ngraph::Shape m_kernel; + Strides m_strides; + ngraph::Shape m_pads_begin; + ngraph::Shape m_pads_end; + bool m_exclude_pad{true}; + PadType m_auto_pad{PadType::EXPLICIT}; + op::RoundingType m_rounding_type{op::RoundingType::FLOOR}; +}; +} // namespace v1 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/constant.hpp b/ngraph/core/include/openvino/op/constant.hpp new file mode 100644 index 00000000000000..f13899b31ca09b --- /dev/null +++ b/ngraph/core/include/openvino/op/constant.hpp @@ -0,0 +1,711 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include +#include + +#include "ngraph/runtime/aligned_buffer.hpp" +#include "ngraph/runtime/host_tensor.hpp" +#include "ngraph/runtime/shared_buffer.hpp" +#include "ngraph/util.hpp" +#include "openvino/core/coordinate_diff.hpp" +#include "openvino/core/node.hpp" +#include "openvino/core/type/element_type.hpp" +#include "openvino/core/type/element_type_traits.hpp" + +namespace ov { +namespace op { +namespace v0 { +/// \brief Class for constants. +class OPENVINO_API Constant : public Op { +public: + OPENVINO_RTTI_DECLARATION; + + Constant() = default; + + /// \brief Initialize a constant from tensor + /// \param tensor The tensor with data + Constant(const std::shared_ptr& tensor); + + /// \brief Constructs a tensor constant. + /// + /// \param type The element type of the tensor constant. + /// \param shape The shape of the tensor constant. + /// \param values A vector of literals for initializing the tensor constant. The + /// size of values must match the size of the shape. + template + Constant(const element::Type& type, const ngraph::Shape& shape, const std::vector& values) + : Constant(type, shape) { + NODE_VALIDATION_CHECK(this, + values.size() == 1 || values.size() == shape_size(m_shape), + "Did not get the expected number of literals for a constant of shape ", + m_shape, + " (got ", + values.size(), + ", expected ", + (shape_size(m_shape) == 1 ? "" : "1 or "), + shape_size(m_shape), + ")."); + + if (values.size() == 1) { + fill_data(type, values.front()); + } else { + write_values(values); + } + m_all_elements_bitwise_identical = are_all_data_elements_bitwise_identical(); + } + + /// \brief Create uninitialized constant + Constant(const element::Type& type, const ngraph::Shape& shape); + /// \brief Constructs a uniform tensor constant. + /// + /// \param type The element type of the tensor constant. + /// \param shape The shape of the tensor constant. + /// \param value A scalar for initializing the uniform tensor constant. The + /// value is broadcast to the specified shape. + template ::value>::type> + Constant(const element::Type& type, const ngraph::Shape& shape, T value) : Constant(type, shape) { + fill_data(type, value); + m_all_elements_bitwise_identical = true; + } + + template + void fill_data(const element::Type& type, T value) { + using Type_t = element::Type_t; +#if defined(__GNUC__) && !(__GNUC__ == 4 && __GNUC_MINOR__ == 8) +# pragma GCC diagnostic push +# pragma GCC diagnostic error "-Wswitch" +# pragma GCC diagnostic error "-Wswitch-enum" +#endif + switch (type) { + case Type_t::boolean: + fill_data(value); + break; + case Type_t::bf16: + fill_data(value); + break; + case Type_t::f16: + fill_data(value); + break; + case Type_t::f32: + fill_data(value); + break; + case Type_t::f64: + fill_data(value); + break; + case Type_t::i4: + fill_data(value); + break; + case Type_t::i8: + fill_data(value); + break; + case Type_t::i16: + fill_data(value); + break; + case Type_t::i32: + fill_data(value); + break; + case Type_t::i64: + fill_data(value); + break; + case Type_t::u1: + fill_data(value); + break; + case Type_t::u4: + fill_data(value); + break; + case Type_t::u8: + fill_data(value); + break; + case Type_t::u16: + fill_data(value); + break; + case Type_t::u32: + fill_data(value); + break; + case Type_t::u64: + fill_data(value); + break; + case Type_t::undefined: + case Type_t::dynamic: + throw std::runtime_error("unsupported type"); + } +#if defined(__GNUC__) && !(__GNUC__ == 4 && __GNUC_MINOR__ == 8) +# pragma GCC diagnostic pop +#endif + } + + /// \brief Constructs a tensor constant + /// This constructor is mainly to support deserialization of constants. + /// + /// \param type The element type of the tensor constant. + /// \param shape The shape of the tensor constant. + /// \param values A list of string values to use as the constant data. + Constant(const element::Type& type, const ngraph::Shape& shape, const std::vector& values); + + /// \brief Constructs a tensor constant with the supplied data + /// + /// \param type The element type of the tensor constant. + /// \param shape The shape of the tensor constant. + /// \param data A void* to constant data. + Constant(const element::Type& type, const ngraph::Shape& shape, const void* data); + + /// \brief Constructs a tensor constant with the supplied data + /// + /// \param type The element type of the tensor constant. + /// \param shape The shape of the tensor constant. + /// \param data A pointer to pre-allocated shared data. + template + Constant(const element::Type& type, + const ngraph::Shape& shape, + std::shared_ptr> data) + : m_element_type(type), + m_shape(shape) { + m_data = data; + constructor_validate_and_infer_types(); + } + + Constant(const Constant& other); + Constant(const Constant& other, const ngraph::Shape& new_shape); + Constant& operator=(const Constant&) = delete; + + ~Constant() override; + + void validate_and_infer_types() override { + infer_element_type(); + set_output_type(0, m_element_type, m_shape); + } + + bool visit_attributes(AttributeVisitor& visitor) override; + + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; + bool evaluate_lower(const HostTensorVector& outputs) const override; + bool evaluate_upper(const HostTensorVector& outputs) const override; + + // Don't constant fold a constant; it would make a copy + bool constant_fold(OutputVector& outputs, const OutputVector& inputs) override { + (void)outputs; + (void)inputs; + return false; + } + + /// \brief Returns the value of the constant node as a Shape object + /// Can only be used on element::i64 nodes and interprets + /// negative values as zeros. + ngraph::Shape get_shape_val() const; + /// \brief Returns the value of the constant node as a Strides + /// object + /// Can only be used on element::i64 nodes and interprets + /// negative values as zeros. + Strides get_strides_val() const; + /// \brief Returns the value of the constant node as a Coordinate + /// object + /// Can only be used on element::i64 nodes and interprets + /// negative values as zeros. + Coordinate get_coordinate_val() const; + /// \brief Returns the value of the constant node as a + /// CoordinateDiff object + /// Can only be used on element::i64 nodes. + CoordinateDiff get_coordinate_diff_val() const; + /// \brief Returns the value of the constant node as an AxisVector + /// object + /// Can only be used on element::i64 nodes and interprets + /// negative values as zeros. + AxisVector get_axis_vector_val() const; + /// \brief Returns the value of the constant node as an AxisSet + /// object + /// Can only be used on element::i64 nodes and interprets + /// negative values as zeros. + /// Repeated values are allowed. + AxisSet get_axis_set_val() const; + + /// \brief Update Constant shape. New shape size must equal to the data elements + /// count + /// + /// \param shape The shape of the tensor constant. + OPENVINO_DEPRECATED("Use Constant c-tor with shape argument instead") + void set_data_shape(const ngraph::Shape& shape); + + /// \brief Wrapper around constructing a shared_ptr of a Constant + /// + /// \param type The element type of the tensor constant. + /// \param shape The shape of the tensor constant. + /// \param values A vector of values to use as the constant data. + template + static std::shared_ptr create(const element::Type& type, + const ngraph::Shape& shape, + const std::vector& values) { + return std::make_shared(type, shape, values); + } + + /// \brief Wrapper around constructing a shared_ptr of a Constant + /// + /// \param type The element type of the tensor constant. + /// \param shape The shape of the tensor constant. + /// \param values An initializer_list of values to use as the constant data. + template + static std::shared_ptr create(const element::Type& type, + const ngraph::Shape& shape, + std::initializer_list values) { + return std::make_shared(type, shape, std::vector{values}); + } + + /// \brief Wrapper around constructing a shared_ptr of a Constant + /// + /// \param type The element type of the tensor constant. + /// \param shape The shape of the tensor constant. + /// \param memory An continues memory chunk which contains the constant data. + static std::shared_ptr create(const element::Type& type, const ngraph::Shape& shape, const void* memory) { + return std::make_shared(type, shape, memory); + } + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + /// \return The initialization literals for the tensor constant. + std::vector get_value_strings() const; + + template + std::vector get_vector() const { + const T* p = get_data_ptr(); + if (p == nullptr) + throw std::runtime_error("Cannot create vector! Buffer is not allocated."); + return std::vector(p, p + shape_size(m_shape)); + } + + /// \brief Return the Constant's value as a vector cast to type T + /// + /// \tparam T Type to which data vector's entries will be cast. + /// \return Constant's data vector. + template + std::vector cast_vector() const { + auto source_type = get_element_type(); + std::vector rc; + using Type_t = element::Type_t; +#if defined(_MSC_VER) +# pragma warning(push) +# pragma warning(disable : 4244) +#endif + switch (source_type) { + case Type_t::boolean: + cast_vector(rc); + break; + case Type_t::bf16: + cast_vector(rc); + break; + case Type_t::f16: + cast_vector(rc); + break; + case Type_t::f32: + cast_vector(rc); + break; + case Type_t::f64: + cast_vector(rc); + break; + case Type_t::i4: + cast_vector(rc); + break; + case Type_t::i8: + cast_vector(rc); + break; + case Type_t::i16: + cast_vector(rc); + break; + case Type_t::i32: + cast_vector(rc); + break; + case Type_t::i64: + cast_vector(rc); + break; + case Type_t::u1: + cast_vector(rc); + break; + case Type_t::u4: + cast_vector(rc); + break; + case Type_t::u8: + cast_vector(rc); + break; + case Type_t::u16: + cast_vector(rc); + break; + case Type_t::u32: + cast_vector(rc); + break; + case Type_t::u64: + cast_vector(rc); + break; + default: + throw std::runtime_error("unsupported type"); + } +#if defined(_MSC_VER) +# pragma warning(pop) +#endif + return rc; + } + + const void* get_data_ptr() const { + return (m_data ? m_data->get_ptr() : nullptr); + } + template + const T* get_data_ptr() const { + if (sizeof(T) > m_element_type.size() && shape_size(m_shape) > 0) { + throw ov::Exception("Buffer over-read"); + } + + return static_cast(get_data_ptr()); + } + + template + const typename element_type_traits::value_type* get_data_ptr() const { + NGRAPH_CHECK(ET == get_element_type(), "get_data_ptr() called for incorrect element type."); + return static_cast::value_type*>(get_data_ptr()); + } + + bool get_all_data_elements_bitwise_identical() const { + return m_all_elements_bitwise_identical; + } + std::string convert_value_to_string(size_t index) const; + + /** + * \brief Allows to avoid buffer allocation on the visit_attributes call + */ + void alloc_buffer_on_visit_attributes(bool val) { + m_alloc_buffer_on_visit_attributes = val; + } + +private: + template , + typename std::enable_if::type = true> + StorageDataType get_element_value(size_t index) const { + return get_data_ptr()[index]; + } + + template , + typename std::enable_if::type = true> + StorageDataType get_element_value(size_t index) const { + return (get_data_ptr()[index / 8] >> (7 - (index % 8))) & 1; + } + + template , + typename std::enable_if::type = true> + StorageDataType get_element_value(size_t index) const { + return (get_data_ptr()[index / 2] >> (index % 2 ? 0 : 4)) & 0x0F; + } + + template , + typename std::enable_if::type = true> + StorageDataType get_element_value(size_t index) const { + const uint8_t i4data = (get_data_ptr()[index / 2] >> (index % 2 ? 0 : 4)) & 0x0F; + const bool is_negative_number = (i4data >> 3) & 0x01; + const int8_t data = is_negative_number ? i4data | 0xF0 : i4data; + return data; + } + + template ::type = true> + void cast_vector(std::vector& output_vector) const { + // this function is workaround for waring during windows building + // build complains for vector creation based on iterators + // which point on different type than destination vector::value_type + using IN_T = fundamental_type_for; + auto source_vector = get_vector(); + output_vector.reserve(source_vector.size()); + + std::transform(source_vector.begin(), source_vector.end(), std::back_inserter(output_vector), [](IN_T c) { + return static_cast(c); + }); + } + + template ::type = true> + void cast_vector(std::vector& output) const { + using IN_T = fundamental_type_for; + const auto element_number = shape_size(m_shape); + const auto source_begin = get_data_ptr(); + const auto source_end = std::next(source_begin, (element_number + 7) / 8); + const auto round_element_no = element_number % 8 ? element_number - element_number % 8 + 8 : element_number; + output.reserve(round_element_no); // adds 7 more elements here? + std::for_each(source_begin, source_end, [&](IN_T c) { + for (const auto i : {7, 6, 5, 4, 3, 2, 1, 0}) { + const uint8_t data = (c >> i) & 0x01; + output.push_back(data); + } + }); + output.resize(element_number); + } + + template ::type = true> + void cast_vector(std::vector& output) const { + using IN_T = fundamental_type_for; + const auto element_number = shape_size(m_shape); + const auto source_begin = get_data_ptr(); + const auto source_end = std::next(source_begin, (element_number + 1) / 2); + const auto round_element_no = element_number % 2 ? element_number + 1 : element_number; + output.reserve(round_element_no); // adds 1 more elements here? + std::for_each(source_begin, source_end, [&](IN_T c) { + for (const auto i : {4, 0}) { + const uint8_t data = (c >> i) & 0x0F; + output.push_back(data); + } + }); + output.resize(element_number); + } + template ::type = true> + void cast_vector(std::vector& output) const { + using IN_T = fundamental_type_for; + const auto element_number = shape_size(m_shape); + const auto source_begin = get_data_ptr(); + const auto source_end = std::next(source_begin, (element_number + 1) / 2); + const auto round_element_no = element_number % 2 ? element_number + 1 : element_number; + output.reserve(round_element_no); // adds 1 more elements here? + std::for_each(source_begin, source_end, [&](IN_T c) { + for (const auto i : {4, 0}) { + const uint8_t i4data = (c >> i) & 0x0F; + const bool is_negative_number = (i4data >> 3) & 0x01; + const int8_t data = is_negative_number ? i4data | 0xF0 : i4data; + output.push_back(data); + } + }); + output.resize(element_number); + } + + template , + typename std::enable_if::type = true> + void fill_data(const T& value) { + const auto size = shape_size(m_shape); + const auto v = static_cast(value); + std::fill_n(get_data_ptr_nc(), size, v); + } + + template , + typename std::enable_if::type = true> + void fill_data(const T& value) { + const StorageDataType v = value ? 0xFF : 0x00; + std::fill_n(get_data_ptr_nc(), mem_size(), v); + } + + template , + typename std::enable_if::type = true> + void fill_data(const T& value) { + uint8_t v = value_in_range(value); + v &= 0x0F; + v += v << 4; + std::fill_n(get_data_ptr_nc(), mem_size(), v); + } + + void allocate_buffer(); + + void* get_data_ptr_nc() { + return (m_data ? m_data->get_ptr() : nullptr); + } + + template + typename element_type_traits::value_type* get_data_ptr_nc() { + NGRAPH_CHECK(ET == get_element_type(), "get_data_ptr_nc() called for incorrect element type."); + return static_cast::value_type*>(get_data_ptr_nc()); + } + + Constant(const OutputVector& args) : Op(args), m_shape({}) {} + + virtual void infer_element_type() {} + template + void write_values(const std::vector& values) { + write_to_buffer(values); + } + + template , + typename std::enable_if::type = true> + void write_buffer(const std::vector& source) { + auto p = get_data_ptr_nc(); + for (size_t i = 0; i < source.size(); i++) { + p[i] = static_cast(source[i]); + } + } + + template , + typename std::enable_if::type = true> + void write_buffer(const std::vector& source) { + auto p = get_data_ptr_nc(); + size_t i = 0; + for (; i < source.size() / 2; i++) { + const auto v1 = value_in_range(source[i * 2]) & 0x0F; + const auto v2 = value_in_range(source[i * 2 + 1]) & 0x0F; + const auto v = (v1 << 4) | v2; + p[i] = static_cast(v); + } + if (source.size() % 2) { + const auto v1 = value_in_range(source[i * 2]) & 0x0F; + const auto v = v1 << 4; + p[i] = static_cast(v); + } + } + + template , + typename std::enable_if::type = true> + void write_buffer(const std::vector& source) { + auto p = get_data_ptr_nc(); + size_t i = 0; + for (; i < source.size() / 8; i++) { + uint8_t v{}; + for (int j = 0; j != 8; j++) { + const uint8_t b = source[i * 8 + j] ? 0x01 << (7 - j) : 0; + v |= b; + } + p[i] = static_cast(v); + } + uint8_t v{}; + for (unsigned j = 0; j != source.size() % 8; j++) { + const uint8_t b = source[i * 8 + j] ? 0x01 << (7 - j) : 0; + v |= b; + } + p[i] = static_cast(v); + } + + template + void write_to_buffer(const std::vector& source) { + const auto& target_type = m_element_type; + size_t target_element_count = shape_size(m_shape); + if (source.size() != target_element_count) { + throw std::runtime_error("Constant initializer does not match shape"); + } + using Type_t = element::Type_t; +#if defined(__GNUC__) && !(__GNUC__ == 4 && __GNUC_MINOR__ == 8) +# pragma GCC diagnostic push +# pragma GCC diagnostic error "-Wswitch" +# pragma GCC diagnostic error "-Wswitch-enum" +#endif + switch (target_type) { + case Type_t::boolean: + write_buffer(source); + break; + case Type_t::bf16: + write_buffer(source); + break; + case Type_t::f16: + write_buffer(source); + break; + case Type_t::f32: + write_buffer(source); + break; + case Type_t::f64: + write_buffer(source); + break; + case Type_t::i4: + write_buffer(source); + break; + case Type_t::i8: + write_buffer(source); + break; + case Type_t::i16: + write_buffer(source); + break; + case Type_t::i32: + write_buffer(source); + break; + case Type_t::i64: + write_buffer(source); + break; + case Type_t::u1: + write_buffer(source); + break; + case Type_t::u4: + write_buffer(source); + break; + case Type_t::u8: + write_buffer(source); + break; + case Type_t::u16: + write_buffer(source); + break; + case Type_t::u32: + write_buffer(source); + break; + case Type_t::u64: + write_buffer(source); + break; + case element::Type_t::undefined: + case element::Type_t::dynamic: + throw std::runtime_error("unsupported type"); + } +#if defined(__GNUC__) && !(__GNUC__ == 4 && __GNUC_MINOR__ == 8) +# pragma GCC diagnostic pop +#endif + } + template ::type = true> + static ngraph::fundamental_type_for value_in_range(const ValueT& value) { + const auto result = ngraph::fundamental_type_for(value); + NGRAPH_CHECK(0 <= result && result <= 15, "assigned value out of range u4 values"); + return result; + } + + template ::type = true> + static ngraph::fundamental_type_for value_in_range(const ValueT& value) { + const auto result = ngraph::fundamental_type_for(value); + NGRAPH_CHECK(-8 <= result && result <= 7, "assigned value out of range i4 values"); + return result; + } + + bool are_all_data_elements_bitwise_identical() const; + static constexpr size_t host_alignment() { + return 64; + } + + size_t mem_size() const { + const bool bitwidth_less_than_byte = m_element_type.bitwidth() < 8; + if (bitwidth_less_than_byte) { + const auto size = shape_size(m_shape); + const auto bitwidth = size * m_element_type.bitwidth(); + // for rounding by `(bitwidth + 7) / 8` will work for + // `bitwidth < numeric_limits::max() - 7` + return bitwidth / 8 + (bitwidth % 8 ? 1 : 0); + } + return shape_size(m_shape) * m_element_type.size(); + } + + element::Type m_element_type; + ngraph::Shape m_shape{}; + std::shared_ptr m_data; + bool m_all_elements_bitwise_identical; + bool m_alloc_buffer_on_visit_attributes = true; +}; +} // namespace v0 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/logical_and.hpp b/ngraph/core/include/openvino/op/logical_and.hpp new file mode 100644 index 00000000000000..a2d09829abe66e --- /dev/null +++ b/ngraph/core/include/openvino/op/logical_and.hpp @@ -0,0 +1,43 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include + +#include "openvino/op/util/binary_elementwise_logical.hpp" + +namespace ov { +namespace op { +namespace v1 { +/// \brief Elementwise logical-and operation. +/// +class OPENVINO_API LogicalAnd : public util::BinaryElementwiseLogical { +public: + OPENVINO_RTTI_DECLARATION; + /// \brief Constructs a logical-and operation. + LogicalAnd() = default; + + /// \brief Constructs a logical-and operation. + /// + /// \param arg0 Output that produces the first input tensor.
+ /// `[d0, ...]` + /// \param arg1 Output that produces the second input tensor.
+ /// `[d0, ...]` + /// \param auto_broadcast Auto broadcast specification + /// + /// Output `[d0, ...]` + /// + LogicalAnd(const Output& arg0, + const Output& arg1, + const AutoBroadcastSpec& auto_broadcast = AutoBroadcastSpec(AutoBroadcastType::NUMPY)); + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + bool visit_attributes(AttributeVisitor& visitor) override; + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + bool has_evaluate() const override; +}; +} // namespace v1 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/read_value.hpp b/ngraph/core/include/openvino/op/read_value.hpp new file mode 100644 index 00000000000000..f3e83254a93769 --- /dev/null +++ b/ngraph/core/include/openvino/op/read_value.hpp @@ -0,0 +1,79 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/util/read_value_base.hpp" +#include "openvino/op/util/variable.hpp" + +namespace ov { +namespace op { +namespace v3 { +/// \brief ReadValue operation creates the variable with `variable_id` and returns value +/// of this variable. +class OPENVINO_API ReadValue : public util::ReadValueBase { +public: + OPENVINO_RTTI_DECLARATION; + ReadValue() = default; + + /// \brief Constructs a ReadValue operation. + /// + /// \param init_value Node that produces the input tensor. + /// \param variable_id identificator of the variable to create. + ReadValue(const Output& init_value, const std::string& variable_id); + + void validate_and_infer_types() override; + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + bool visit_attributes(AttributeVisitor& visitor) override; + + std::string get_variable_id() const override { + return m_variable_id; + } + +private: + std::string m_variable_id; +}; +} // namespace v3 + +namespace v6 { +/// \brief ReadValue operation gets an input value from the variable with `variable_id` +/// and returns it as an output. +class OPENVINO_API ReadValue : public util::ReadValueBase { +public: + OPENVINO_RTTI_DECLARATION; + ReadValue() = default; + + /// \brief Constructs a ReadValue operation. + /// + /// \param init_value Node that produces the input tensor. + /// \param variable Class for storing and synchronizing element types, shapes and + /// identifiers + /// between pairs of Assign/ReadValue nodes. + ReadValue(const Output& init_value, const std::shared_ptr& variable); + + void validate_and_infer_types() override; + + void revalidate_and_infer_types() override; + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + bool visit_attributes(AttributeVisitor& visitor) override; + + std::string get_variable_id() const override { + NGRAPH_CHECK(m_variable, "Variable is not initialized. Variable_id is unavailable"); + return m_variable->get_info().variable_id; + } + + bool evaluate(const HostTensorVector& outputs, + const HostTensorVector& inputs, + const EvaluationContext& evaluation_context) const override; + bool has_evaluate() const override; + + bool constant_fold(OutputVector& output_values, const OutputVector& inputs_values) override; +}; +} // namespace v6 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/sink.hpp b/ngraph/core/include/openvino/op/sink.hpp new file mode 100644 index 00000000000000..e603378183d6a6 --- /dev/null +++ b/ngraph/core/include/openvino/op/sink.hpp @@ -0,0 +1,26 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include + +#include "openvino/op/op.hpp" + +namespace ov { +namespace op { +/// Root of nodes that can be sink nodes +class OPENVINO_API Sink : public Op { +public: + ~Sink() override = 0; + OPENVINO_RTTI_DECLARATION; + +protected: + Sink() : Op() {} + + explicit Sink(const OutputVector& arguments) : Op(arguments) {} +}; +} // namespace op +using SinkVector = std::vector>; +} // namespace ov diff --git a/ngraph/core/include/openvino/op/util/assign_base.hpp b/ngraph/core/include/openvino/op/util/assign_base.hpp new file mode 100644 index 00000000000000..af195f6496a606 --- /dev/null +++ b/ngraph/core/include/openvino/op/util/assign_base.hpp @@ -0,0 +1,22 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/sink.hpp" +#include "openvino/op/util/variable_extension.hpp" + +namespace ov { +namespace op { +namespace util { +class OPENVINO_API AssignBase : public Sink, public VariableExtension { +public: + OPENVINO_RTTI_DECLARATION; + AssignBase() = default; + /// \brief Constructs an AssignBase operation. + explicit AssignBase(const OutputVector& arguments) : Sink(arguments) {} +}; +} // namespace util +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/util/read_value_base.hpp b/ngraph/core/include/openvino/op/util/read_value_base.hpp new file mode 100644 index 00000000000000..97e101c9394906 --- /dev/null +++ b/ngraph/core/include/openvino/op/util/read_value_base.hpp @@ -0,0 +1,24 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/op.hpp" +#include "openvino/op/util/variable_extension.hpp" + +namespace ov { +namespace op { +namespace util { +class OPENVINO_API ReadValueBase : public Op, public VariableExtension { +public: + OPENVINO_RTTI_DECLARATION; + + ReadValueBase() = default; + + /// \brief Constructs an AssignBase operation. + explicit ReadValueBase(const OutputVector& arguments) : Op(arguments) {} +}; +} // namespace util +} // namespace op +} // namespace ov diff --git a/ngraph/core/src/op/abs.cpp b/ngraph/core/src/op/abs.cpp index de7cdf63499601..d863b224b104c7 100644 --- a/ngraph/core/src/op/abs.cpp +++ b/ngraph/core/src/op/abs.cpp @@ -10,30 +10,27 @@ #include "ngraph/runtime/host_tensor.hpp" #include "ngraph/runtime/reference/abs.hpp" -using namespace std; -using namespace ngraph; +NGRAPH_RTTI_DEFINITION(ov::op::v0::Abs, "Abs", 0); -constexpr NodeTypeInfo op::Abs::type_info; - -op::Abs::Abs(const Output& arg) : UnaryElementwiseArithmetic(arg) { +ov::op::v0::Abs::Abs(const Output& arg) : UnaryElementwiseArithmetic(arg) { constructor_validate_and_infer_types(); } -shared_ptr op::Abs::clone_with_new_inputs(const OutputVector& new_args) const { +std::shared_ptr ov::op::v0::Abs::clone_with_new_inputs(const OutputVector& new_args) const { NGRAPH_OP_SCOPE(v0_Abs_clone_with_new_inputs); check_new_args_count(this, new_args); - return make_shared(new_args.at(0)); + return std::make_shared(new_args.at(0)); } namespace absop { -template -inline bool evaluate(const HostTensorPtr& arg0, const HostTensorPtr& out, const size_t count) { - using T = typename element_type_traits::value_type; - runtime::reference::abs((arg0->get_data_ptr()), (out->get_data_ptr()), count); +template +inline bool evaluate(const ngraph::HostTensorPtr& arg0, const ngraph::HostTensorPtr& out, const size_t count) { + using T = typename ov::element_type_traits::value_type; + ngraph::runtime::reference::abs((arg0->get_data_ptr()), (out->get_data_ptr()), count); return true; } -bool evaluate_abs(const HostTensorPtr& arg0, const HostTensorPtr& out, const size_t count) { +bool evaluate_abs(const ngraph::HostTensorPtr& arg0, const ngraph::HostTensorPtr& out, const size_t count) { bool rc = true; out->set_unary(arg0); @@ -54,12 +51,12 @@ bool evaluate_abs(const HostTensorPtr& arg0, const HostTensorPtr& out, const siz } } // namespace absop -bool op::Abs::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { +bool ov::op::v0::Abs::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { NGRAPH_OP_SCOPE(v0_Abs_evaluate); return absop::evaluate_abs(inputs[0], outputs[0], shape_size(get_output_shape(0))); } -bool op::Abs::has_evaluate() const { +bool ov::op::v0::Abs::has_evaluate() const { NGRAPH_OP_SCOPE(v0_Abs_has_evaluate); switch (get_input_element_type(0)) { case ngraph::element::i32: diff --git a/ngraph/core/src/op/acos.cpp b/ngraph/core/src/op/acos.cpp index fb66557d4f4d91..53b56b6762cf57 100644 --- a/ngraph/core/src/op/acos.cpp +++ b/ngraph/core/src/op/acos.cpp @@ -18,30 +18,27 @@ #include "ngraph/runtime/host_tensor.hpp" #include "ngraph/runtime/reference/acos.hpp" -using namespace std; -using namespace ngraph; +NGRAPH_RTTI_DEFINITION(ov::op::v0::Acos, "Acos", 0); -constexpr NodeTypeInfo op::Acos::type_info; - -op::Acos::Acos(const Output& arg) : UnaryElementwiseArithmetic(arg) { +ov::op::v0::Acos::Acos(const Output& arg) : UnaryElementwiseArithmetic(arg) { constructor_validate_and_infer_types(); } -shared_ptr op::Acos::clone_with_new_inputs(const OutputVector& new_args) const { +std::shared_ptr ov::op::v0::Acos::clone_with_new_inputs(const OutputVector& new_args) const { NGRAPH_OP_SCOPE(v0_Acos_clone_with_new_inputs); check_new_args_count(this, new_args); - return make_shared(new_args.at(0)); + return std::make_shared(new_args.at(0)); } namespace acosop { -template -inline bool evaluate(const HostTensorPtr& arg0, const HostTensorPtr& out, const size_t count) { - using T = typename element_type_traits::value_type; - runtime::reference::acos(arg0->get_data_ptr(), out->get_data_ptr(), count); +template +inline bool evaluate(const ngraph::HostTensorPtr& arg0, const ngraph::HostTensorPtr& out, const size_t count) { + using T = typename ov::element_type_traits::value_type; + ngraph::runtime::reference::acos(arg0->get_data_ptr(), out->get_data_ptr(), count); return true; } -bool evaluate_acos(const HostTensorPtr& arg0, const HostTensorPtr& out, const size_t count) { +bool evaluate_acos(const ov::HostTensorPtr& arg0, const ov::HostTensorPtr& out, const size_t count) { bool rc = true; out->set_unary(arg0); @@ -61,12 +58,12 @@ bool evaluate_acos(const HostTensorPtr& arg0, const HostTensorPtr& out, const si } } // namespace acosop -bool op::Acos::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { +bool ov::op::v0::Acos::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { NGRAPH_OP_SCOPE(v0_Acos_evaluate); return acosop::evaluate_acos(inputs[0], outputs[0], shape_size(get_output_shape(0))); } -bool op::Acos::has_evaluate() const { +bool ov::op::v0::Acos::has_evaluate() const { NGRAPH_OP_SCOPE(v0_Acos_has_evaluate); switch (get_input_element_type(0)) { case ngraph::element::i32: diff --git a/ngraph/core/src/op/acosh.cpp b/ngraph/core/src/op/acosh.cpp index 230167c9f9b237..99f9bd03bcaf17 100644 --- a/ngraph/core/src/op/acosh.cpp +++ b/ngraph/core/src/op/acosh.cpp @@ -12,29 +12,26 @@ #include "ngraph/runtime/reference/acosh.hpp" #include "ngraph/type/element_type.hpp" -using namespace std; -using namespace ngraph; +OPENVINO_RTTI_DEFINITION(ov::op::v3::Acosh, "Acosh", 3, util::UnaryElementwiseArithmetic); -NGRAPH_RTTI_DEFINITION(op::v3::Acosh, "Acosh", 3, util::UnaryElementwiseArithmetic); - -op::v3::Acosh::Acosh(const Output& arg) : UnaryElementwiseArithmetic(arg) { +ov::op::v3::Acosh::Acosh(const Output& arg) : UnaryElementwiseArithmetic(arg) { constructor_validate_and_infer_types(); } -shared_ptr op::v3::Acosh::clone_with_new_inputs(const OutputVector& new_args) const { +std::shared_ptr ov::op::v3::Acosh::clone_with_new_inputs(const OutputVector& new_args) const { NGRAPH_OP_SCOPE(v3_Acosh_clone_with_new_inputs); check_new_args_count(this, new_args); - return make_shared(new_args.at(0)); + return std::make_shared(new_args.at(0)); } namespace acoshop { -template -bool evaluate(const HostTensorPtr& arg0, const HostTensorPtr& out) { - runtime::reference::acosh(arg0->get_data_ptr(), out->get_data_ptr(), shape_size(arg0->get_shape())); +template +bool evaluate(const ngraph::HostTensorPtr& arg0, const ngraph::HostTensorPtr& out) { + ngraph::runtime::reference::acosh(arg0->get_data_ptr(), out->get_data_ptr(), shape_size(arg0->get_shape())); return true; } -bool evaluate_acosh(const HostTensorPtr& arg0, const HostTensorPtr& out) { +bool evaluate_acosh(const ngraph::HostTensorPtr& arg0, const ngraph::HostTensorPtr& out) { bool rc = true; out->set_unary(arg0); switch (arg0->get_element_type()) { @@ -52,12 +49,12 @@ bool evaluate_acosh(const HostTensorPtr& arg0, const HostTensorPtr& out) { } } // namespace acoshop -bool op::v3::Acosh::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { +bool ov::op::v3::Acosh::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { NGRAPH_OP_SCOPE(v3_Acosh_evaluate); return acoshop::evaluate_acosh(inputs[0], outputs[0]); } -bool op::v3::Acosh::has_evaluate() const { +bool ov::op::v3::Acosh::has_evaluate() const { NGRAPH_OP_SCOPE(v3_Acosh_has_evaluate); switch (get_input_element_type(0)) { case ngraph::element::i32: diff --git a/ngraph/core/src/op/adaptive_avg_pool.cpp b/ngraph/core/src/op/adaptive_avg_pool.cpp index e8989c9ecebe69..de8c00f5d012b0 100644 --- a/ngraph/core/src/op/adaptive_avg_pool.cpp +++ b/ngraph/core/src/op/adaptive_avg_pool.cpp @@ -12,7 +12,7 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::v8::AdaptiveAvgPool, "AdaptiveAvgPool", 8); +OPENVINO_RTTI_DEFINITION(ov::op::v8::AdaptiveAvgPool, "AdaptiveAvgPool", 8); op::v8::AdaptiveAvgPool::AdaptiveAvgPool(const Output& data, const Output& output_shape) : Op({data, output_shape}) { diff --git a/ngraph/core/src/op/adaptive_max_pool.cpp b/ngraph/core/src/op/adaptive_max_pool.cpp index e5f57db472795c..186e08799f143d 100644 --- a/ngraph/core/src/op/adaptive_max_pool.cpp +++ b/ngraph/core/src/op/adaptive_max_pool.cpp @@ -12,7 +12,7 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::v8::AdaptiveMaxPool, "AdaptiveMaxPool", 8); +OPENVINO_RTTI_DEFINITION(ov::op::v8::AdaptiveMaxPool, "AdaptiveMaxPool", 8); op::v8::AdaptiveMaxPool::AdaptiveMaxPool(const Output& data, const Output& output_shape, diff --git a/ngraph/core/src/op/add.cpp b/ngraph/core/src/op/add.cpp index d6a58e8efb1608..35eec6a242fc78 100644 --- a/ngraph/core/src/op/add.cpp +++ b/ngraph/core/src/op/add.cpp @@ -54,7 +54,7 @@ bool evaluate_add(const HostTensorPtr& arg0, // ------------------------------- v1 ------------------------------------------ -NGRAPH_RTTI_DEFINITION(op::v1::Add, "Add", 1, util::BinaryElementwiseArithmetic); +OPENVINO_RTTI_DEFINITION(ov::op::v1::Add, "Add", 1, util::BinaryElementwiseArithmetic); op::v1::Add::Add(const Output& arg0, const Output& arg1, const AutoBroadcastSpec& auto_broadcast) : BinaryElementwiseArithmetic(arg0, arg1, auto_broadcast) { diff --git a/ngraph/core/src/op/asin.cpp b/ngraph/core/src/op/asin.cpp index b7675408de4a2d..06bf6904bcf01a 100644 --- a/ngraph/core/src/op/asin.cpp +++ b/ngraph/core/src/op/asin.cpp @@ -22,7 +22,7 @@ using namespace std; using namespace ngraph; -constexpr NodeTypeInfo op::Asin::type_info; +OPENVINO_RTTI_DEFINITION(ov::op::v0::Asin, "Asin", 0, util::BinaryElementwiseArithmetic); op::Asin::Asin(const Output& arg) : UnaryElementwiseArithmetic(arg) { constructor_validate_and_infer_types(); diff --git a/ngraph/core/src/op/asinh.cpp b/ngraph/core/src/op/asinh.cpp index e6b78fc0f11038..3c44b1fb9532e5 100644 --- a/ngraph/core/src/op/asinh.cpp +++ b/ngraph/core/src/op/asinh.cpp @@ -16,7 +16,7 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::v3::Asinh, "Asinh", 3, util::UnaryElementwiseArithmetic); +OPENVINO_RTTI_DEFINITION(ov::op::v3::Asinh, "Asinh", 3, util::UnaryElementwiseArithmetic); op::v3::Asinh::Asinh(const Output& arg) : UnaryElementwiseArithmetic(arg) { constructor_validate_and_infer_types(); diff --git a/ngraph/core/src/op/assign.cpp b/ngraph/core/src/op/assign.cpp index 3476abd5d877cb..2b7dc2c8741c98 100644 --- a/ngraph/core/src/op/assign.cpp +++ b/ngraph/core/src/op/assign.cpp @@ -13,9 +13,8 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::AssignBase, "AssignBase", 0); -NGRAPH_RTTI_DEFINITION(op::v3::Assign, "Assign", 3, op::Sink); -NGRAPH_RTTI_DEFINITION(op::v6::Assign, "Assign", 6, op::Sink); +OPENVINO_RTTI_DEFINITION(ov::op::v3::Assign, "Assign", 3, op::Sink); +OPENVINO_RTTI_DEFINITION(ov::op::v6::Assign, "Assign", 6, op::Sink); op::v3::Assign::Assign(const Output& new_value, const std::string& variable_id) : AssignBase({new_value}), diff --git a/ngraph/core/src/op/atan.cpp b/ngraph/core/src/op/atan.cpp index 95a7058164d2e8..131324d18a7282 100644 --- a/ngraph/core/src/op/atan.cpp +++ b/ngraph/core/src/op/atan.cpp @@ -11,7 +11,6 @@ #include "ngraph/axis_set.hpp" #include "ngraph/op/add.hpp" #include "ngraph/op/broadcast.hpp" -#include "ngraph/op/constant.hpp" #include "ngraph/op/divide.hpp" #include "ngraph/op/multiply.hpp" #include "ngraph/runtime/host_tensor.hpp" @@ -21,7 +20,7 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::v0::Atan, "Atan", 0, util::UnaryElementwiseArithmetic); +OPENVINO_RTTI_DEFINITION(ov::op::v0::Atan, "Atan", 0, util::UnaryElementwiseArithmetic); op::Atan::Atan(const Output& arg) : UnaryElementwiseArithmetic(arg) { constructor_validate_and_infer_types(); diff --git a/ngraph/core/src/op/atanh.cpp b/ngraph/core/src/op/atanh.cpp index 980ac6b795222d..1a6c89c3aaa4eb 100644 --- a/ngraph/core/src/op/atanh.cpp +++ b/ngraph/core/src/op/atanh.cpp @@ -15,7 +15,7 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::v3::Atanh, "Atanh", 0, util::UnaryElementwiseArithmetic); +OPENVINO_RTTI_DEFINITION(ov::op::v3::Atanh, "Atanh", 0, util::UnaryElementwiseArithmetic); op::v3::Atanh::Atanh(const Output& arg) : UnaryElementwiseArithmetic(arg) { constructor_validate_and_infer_types(); diff --git a/ngraph/core/src/op/avg_pool.cpp b/ngraph/core/src/op/avg_pool.cpp index 0f1a7f63d3e2f2..b6cef6ad773fee 100644 --- a/ngraph/core/src/op/avg_pool.cpp +++ b/ngraph/core/src/op/avg_pool.cpp @@ -13,7 +13,7 @@ using namespace std; using namespace ngraph; // *** AvgPool OP SET 1 *** -NGRAPH_RTTI_DEFINITION(op::v1::AvgPool, "AvgPool", 1); +OPENVINO_RTTI_DEFINITION(op::v1::AvgPool, "AvgPool", 1); op::v1::AvgPool::AvgPool(const Output& arg, const Strides& strides, @@ -205,5 +205,5 @@ shared_ptr op::v1::AvgPool::clone_with_new_inputs(const OutputVector& new_ } shared_ptr op::v1::AvgPool::get_default_value() const { - return op::Constant::create(get_element_type(), get_shape(), {0}); + return op::v0::Constant::create(get_element_type(), get_shape(), {0}); } diff --git a/ngraph/core/src/op/constant.cpp b/ngraph/core/src/op/constant.cpp index 763f508d16dccb..4374b8691a99d1 100644 --- a/ngraph/core/src/op/constant.cpp +++ b/ngraph/core/src/op/constant.cpp @@ -15,7 +15,6 @@ #include "ngraph/op/util/attr_types.hpp" #include "ngraph/util.hpp" -using namespace ngraph; using namespace std; template @@ -33,15 +32,15 @@ static inline string to_cpp_string(T value) { return rc; } -NGRAPH_RTTI_DEFINITION(op::Constant, "Constant", 0); +OPENVINO_RTTI_DEFINITION(ov::op::v0::Constant, "Constant", 0); -op::Constant::Constant(const shared_ptr& tensor) { +ov::op::v0::Constant::Constant(const shared_ptr& tensor) { m_element_type = tensor->get_element_type(); m_shape = tensor->get_shape(); // Share data from HostTensor if we work with it // And copy data in other cas - if (auto hostTensor = std::dynamic_pointer_cast(tensor)) { - m_data = make_shared>>( + if (auto hostTensor = std::dynamic_pointer_cast(tensor)) { + m_data = make_shared>>( static_cast(hostTensor->get_data_ptr()), tensor->get_size_in_bytes(), tensor); @@ -54,7 +53,9 @@ op::Constant::Constant(const shared_ptr& tensor) { constructor_validate_and_infer_types(); } -op::Constant::Constant(const element::Type& type, const Shape& shape, const std::vector& values) +ov::op::v0::Constant::Constant(const element::Type& type, + const ngraph::Shape& shape, + const std::vector& values) : Constant(type, shape) { NGRAPH_SUPPRESS_DEPRECATED_START NODE_VALIDATION_CHECK(this, @@ -76,49 +77,49 @@ op::Constant::Constant(const element::Type& type, const Shape& shape, const std: fill_data(stoi(values[0])); break; case Type_t::bf16: - fill_data(parse_string(values[0])); + fill_data(ngraph::parse_string(values[0])); break; case Type_t::f16: - fill_data(parse_string(values[0])); + fill_data(ngraph::parse_string(values[0])); break; case Type_t::f32: - fill_data(parse_string(values[0])); + fill_data(ngraph::parse_string(values[0])); break; case Type_t::f64: - fill_data(parse_string(values[0])); + fill_data(ngraph::parse_string(values[0])); break; case Type_t::i4: - fill_data(parse_string(values[0])); + fill_data(ngraph::parse_string(values[0])); break; case Type_t::i8: - fill_data(parse_string(values[0])); + fill_data(ngraph::parse_string(values[0])); break; case Type_t::i16: - fill_data(parse_string(values[0])); + fill_data(ngraph::parse_string(values[0])); break; case Type_t::i32: - fill_data(parse_string(values[0])); + fill_data(ngraph::parse_string(values[0])); break; case Type_t::i64: - fill_data(parse_string(values[0])); + fill_data(ngraph::parse_string(values[0])); break; case Type_t::u1: fill_data(stoi(values[0])); break; case Type_t::u4: - fill_data(parse_string(values[0])); + fill_data(ngraph::parse_string(values[0])); break; case Type_t::u8: - fill_data(parse_string(values[0])); + fill_data(ngraph::parse_string(values[0])); break; case Type_t::u16: - fill_data(parse_string(values[0])); + fill_data(ngraph::parse_string(values[0])); break; case Type_t::u32: - fill_data(parse_string(values[0])); + fill_data(ngraph::parse_string(values[0])); break; case Type_t::u64: - fill_data(parse_string(values[0])); + fill_data(ngraph::parse_string(values[0])); break; case Type_t::undefined: throw std::runtime_error("deserialize unsupported type undefined"); @@ -129,52 +130,52 @@ op::Constant::Constant(const element::Type& type, const Shape& shape, const std: } else { switch (m_element_type) { case Type_t::boolean: - write_buffer(parse_string(values)); + write_buffer(ngraph::parse_string(values)); break; case Type_t::bf16: - write_buffer(parse_string(values)); + write_buffer(ngraph::parse_string(values)); break; case Type_t::f16: - write_buffer(parse_string(values)); + write_buffer(ngraph::parse_string(values)); break; case Type_t::f32: - write_buffer(parse_string(values)); + write_buffer(ngraph::parse_string(values)); break; case Type_t::f64: - write_buffer(parse_string(values)); + write_buffer(ngraph::parse_string(values)); break; case Type_t::i4: - write_buffer(parse_string(values)); + write_buffer(ngraph::parse_string(values)); break; case Type_t::i8: - write_buffer(parse_string(values)); + write_buffer(ngraph::parse_string(values)); break; case Type_t::i16: - write_buffer(parse_string(values)); + write_buffer(ngraph::parse_string(values)); break; case Type_t::i32: - write_buffer(parse_string(values)); + write_buffer(ngraph::parse_string(values)); break; case Type_t::i64: - write_buffer(parse_string(values)); + write_buffer(ngraph::parse_string(values)); break; case Type_t::u1: - write_buffer(parse_string(values)); + write_buffer(ngraph::parse_string(values)); break; case Type_t::u4: - write_buffer(parse_string(values)); + write_buffer(ngraph::parse_string(values)); break; case Type_t::u8: - write_buffer(parse_string(values)); + write_buffer(ngraph::parse_string(values)); break; case Type_t::u16: - write_buffer(parse_string(values)); + write_buffer(ngraph::parse_string(values)); break; case Type_t::u32: - write_buffer(parse_string(values)); + write_buffer(ngraph::parse_string(values)); break; case Type_t::u64: - write_buffer(parse_string(values)); + write_buffer(ngraph::parse_string(values)); break; case Type_t::undefined: throw std::runtime_error("deserialize unsupported type undefined"); @@ -186,23 +187,26 @@ op::Constant::Constant(const element::Type& type, const Shape& shape, const std: NGRAPH_SUPPRESS_DEPRECATED_END } -op::Constant::Constant(const element::Type& type, const Shape& shape) : m_element_type(type), m_shape(shape) { +ov::op::v0::Constant::Constant(const element::Type& type, const ngraph::Shape& shape) + : m_element_type(type), + m_shape(shape) { allocate_buffer(); constructor_validate_and_infer_types(); } -void op::Constant::allocate_buffer() { - m_data = make_shared(mem_size(), host_alignment()); +void ov::op::v0::Constant::allocate_buffer() { + m_data = make_shared(mem_size(), host_alignment()); std::memset(m_data->get_ptr(), 0, m_data->size()); } -op::Constant::Constant(const element::Type& type, const Shape& shape, const void* data) : Constant(type, shape) { +ov::op::v0::Constant::Constant(const element::Type& type, const ngraph::Shape& shape, const void* data) + : Constant(type, shape) { size_t size = ceil(shape_size(m_shape) * m_element_type.bitwidth() / 8.f); std::memcpy(get_data_ptr_nc(), data, size); m_all_elements_bitwise_identical = are_all_data_elements_bitwise_identical(); } -op::Constant::Constant(const Constant& other) { +ov::op::v0::Constant::Constant(const Constant& other) { m_element_type = other.m_element_type; m_shape = other.m_shape; m_data = other.m_data; @@ -210,7 +214,7 @@ op::Constant::Constant(const Constant& other) { constructor_validate_and_infer_types(); } -op::Constant::Constant(const Constant& other, const Shape& new_shape) { +ov::op::v0::Constant::Constant(const Constant& other, const ngraph::Shape& new_shape) { NGRAPH_CHECK(shape_size(other.m_shape) == shape_size(new_shape), "Shape size " + std::to_string(shape_size(new_shape)) + " is not equal to " + std::to_string(shape_size(other.m_shape))); @@ -221,9 +225,9 @@ op::Constant::Constant(const Constant& other, const Shape& new_shape) { constructor_validate_and_infer_types(); } -op::Constant::~Constant() {} +ov::op::v0::Constant::~Constant() = default; -string op::Constant::convert_value_to_string(size_t index) const { +string ov::op::v0::Constant::convert_value_to_string(size_t index) const { string rc; #if defined(__GNUC__) && !(__GNUC__ == 4 && __GNUC_MINOR__ == 8) # pragma GCC diagnostic push @@ -291,7 +295,7 @@ string op::Constant::convert_value_to_string(size_t index) const { return rc; } -vector op::Constant::get_value_strings() const { +vector ov::op::v0::Constant::get_value_strings() const { vector rc; #if defined(__GNUC__) && !(__GNUC__ == 4 && __GNUC_MINOR__ == 8) @@ -387,17 +391,17 @@ vector op::Constant::get_value_strings() const { return rc; } -Shape op::Constant::get_shape_val() const { +ngraph::Shape ov::op::v0::Constant::get_shape_val() const { NGRAPH_CHECK(m_element_type.is_integral_number()); std::vector out_shape = cast_vector(); - Shape output_shape(shape_size(m_shape)); + ngraph::Shape output_shape(shape_size(m_shape)); std::transform(out_shape.begin(), out_shape.end(), output_shape.begin(), [&](const int64_t& v) { return (v > 0) ? v : 0; }); return output_shape; } -Strides op::Constant::get_strides_val() const { +ov::Strides ov::op::v0::Constant::get_strides_val() const { NGRAPH_CHECK(m_element_type == element::i64); std::vector out_strides = cast_vector(); Strides output_strides(shape_size(m_shape)); @@ -407,7 +411,7 @@ Strides op::Constant::get_strides_val() const { return output_strides; } -Coordinate op::Constant::get_coordinate_val() const { +ov::Coordinate ov::op::v0::Constant::get_coordinate_val() const { NGRAPH_CHECK(m_element_type == element::i64); std::vector out_coordinate = cast_vector(); Coordinate output_coordinate(shape_size(m_shape)); @@ -417,7 +421,7 @@ Coordinate op::Constant::get_coordinate_val() const { return output_coordinate; } -CoordinateDiff op::Constant::get_coordinate_diff_val() const { +ov::CoordinateDiff ov::op::v0::Constant::get_coordinate_diff_val() const { NGRAPH_CHECK(m_element_type == element::i64); std::vector out_coordinate_diff = cast_vector(); CoordinateDiff output_coordinate_diff(shape_size(m_shape)); @@ -430,7 +434,7 @@ CoordinateDiff op::Constant::get_coordinate_diff_val() const { return output_coordinate_diff; } -AxisVector op::Constant::get_axis_vector_val() const { +ov::AxisVector ov::op::v0::Constant::get_axis_vector_val() const { NGRAPH_CHECK(m_element_type.is_integral_number()); std::vector out_axis_vector = cast_vector(); AxisVector output_axis_vector(shape_size(m_shape)); @@ -440,7 +444,7 @@ AxisVector op::Constant::get_axis_vector_val() const { return output_axis_vector; } -AxisSet op::Constant::get_axis_set_val() const { +ov::AxisSet ov::op::v0::Constant::get_axis_set_val() const { NGRAPH_CHECK(m_element_type.is_integral_number()); std::vector out_axis_set = cast_vector(); AxisSet output_axis_set; @@ -450,12 +454,12 @@ AxisSet op::Constant::get_axis_set_val() const { return output_axis_set; } -void op::Constant::set_data_shape(const Shape& shape) { +void ov::op::v0::Constant::set_data_shape(const ngraph::Shape& shape) { NGRAPH_CHECK(shape_size(shape) == shape_size(m_shape)); m_shape = shape; } -shared_ptr op::Constant::clone_with_new_inputs(const OutputVector& new_args) const { +shared_ptr ov::op::v0::Constant::clone_with_new_inputs(const OutputVector& new_args) const { NGRAPH_OP_SCOPE(v0_Constant_clone_with_new_inputs); check_new_args_count(this, new_args); return make_shared(*this); @@ -476,7 +480,7 @@ static bool test_bitwise_identical(const T* data, const size_t size) { return data_is_constant; } -bool op::Constant::are_all_data_elements_bitwise_identical() const { +bool ov::op::v0::Constant::are_all_data_elements_bitwise_identical() const { bool rc = false; #if defined(__GNUC__) && !(__GNUC__ == 4 && __GNUC_MINOR__ == 8) # pragma GCC diagnostic push @@ -522,9 +526,9 @@ bool op::Constant::are_all_data_elements_bitwise_identical() const { return rc; } -bool op::v0::Constant::visit_attributes(AttributeVisitor& visitor) { +bool ov::op::v0::Constant::visit_attributes(AttributeVisitor& visitor) { NGRAPH_OP_SCOPE(v0_Constant_visit_attributes); - Shape prev_shape = m_shape; + ngraph::Shape prev_shape = m_shape; element::Type prev_type = m_element_type; visitor.on_attribute("element_type", m_element_type); visitor.on_attribute("shape", m_shape); @@ -539,21 +543,21 @@ bool op::v0::Constant::visit_attributes(AttributeVisitor& visitor) { return true; } -bool op::v0::Constant::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { +bool ov::op::v0::Constant::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { NGRAPH_OP_SCOPE(v0_Constant_evaluate); auto output = outputs[0]; output->write(get_data_ptr(), output->get_size_in_bytes()); return true; } -bool op::v0::Constant::has_evaluate() const { +bool ov::op::v0::Constant::has_evaluate() const { NGRAPH_OP_SCOPE(v0_Constant_has_evaluate); return true; } -bool op::v0::Constant::evaluate_lower(const HostTensorVector& outputs) const { +bool ov::op::v0::Constant::evaluate_lower(const HostTensorVector& outputs) const { return evaluate(outputs, {}); } -bool op::v0::Constant::evaluate_upper(const HostTensorVector& outputs) const { +bool ov::op::v0::Constant::evaluate_upper(const HostTensorVector& outputs) const { return evaluate(outputs, {}); } diff --git a/ngraph/core/src/op/and.cpp b/ngraph/core/src/op/logical_and.cpp similarity index 96% rename from ngraph/core/src/op/and.cpp rename to ngraph/core/src/op/logical_and.cpp index 174eb72653e523..30bd9e43a1b961 100644 --- a/ngraph/core/src/op/and.cpp +++ b/ngraph/core/src/op/logical_and.cpp @@ -2,9 +2,8 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph/op/and.hpp" - #include "itt.hpp" +#include "ngraph/op/and.hpp" #include "ngraph/runtime/host_tensor.hpp" #include "ngraph/runtime/reference/and.hpp" #include "ngraph/validation_util.hpp" @@ -12,7 +11,7 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::v1::LogicalAnd, "LogicalAnd", 1, util::BinaryElementwiseLogical); +OPENVINO_RTTI_DEFINITION(ov::op::v1::LogicalAnd, "LogicalAnd", 1, util::BinaryElementwiseLogical); op::v1::LogicalAnd::LogicalAnd(const Output& arg0, const Output& arg1, diff --git a/ngraph/core/src/op/read_value.cpp b/ngraph/core/src/op/read_value.cpp index 9b77406acd2eb5..41c2242b2e1ae5 100644 --- a/ngraph/core/src/op/read_value.cpp +++ b/ngraph/core/src/op/read_value.cpp @@ -11,9 +11,8 @@ using namespace std; using namespace ngraph; -NGRAPH_RTTI_DEFINITION(op::ReadValueBase, "ReadValueBase", 0); -NGRAPH_RTTI_DEFINITION(op::v3::ReadValue, "ReadValue", 3); -NGRAPH_RTTI_DEFINITION(op::v6::ReadValue, "ReadValue", 6); +OPENVINO_RTTI_DEFINITION(ov::op::v3::ReadValue, "ReadValue", 3); +OPENVINO_RTTI_DEFINITION(ov::op::v6::ReadValue, "ReadValue", 6); op::v3::ReadValue::ReadValue(const Output& init_value, const std::string& variable_id) : ReadValueBase({init_value}), @@ -101,7 +100,7 @@ bool op::v6::ReadValue::evaluate(const HostTensorVector& outputs, bool use_context = var_value != variable_values.end() && !var_value->second->get_reset(); // initial value (inputs[0]) is not supported, use zeros - auto zero_const = make_shared(inputs[0]->get_element_type(), inputs[0]->get_shape(), 0); + auto zero_const = make_shared(inputs[0]->get_element_type(), inputs[0]->get_shape(), 0); auto zero_tensor = make_shared(zero_const); const auto& input_tensor = use_context ? var_value->second->get_value() : zero_tensor; outputs[0]->set_unary(input_tensor); @@ -118,4 +117,4 @@ bool op::v6::ReadValue::has_evaluate() const { bool op::v6::ReadValue::constant_fold(OutputVector& output_values, const OutputVector& inputs_values) { return false; -} \ No newline at end of file +} diff --git a/ngraph/core/src/op/util/assign_base.cpp b/ngraph/core/src/op/util/assign_base.cpp new file mode 100644 index 00000000000000..63e5ab51b8b4b8 --- /dev/null +++ b/ngraph/core/src/op/util/assign_base.cpp @@ -0,0 +1,7 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/op/util/assign_base.hpp" + +OPENVINO_RTTI_DEFINITION(ov::op::util::AssignBase, "AssignBase", 0); diff --git a/ngraph/core/src/op/util/read_value_base.cpp b/ngraph/core/src/op/util/read_value_base.cpp new file mode 100644 index 00000000000000..c2c00ef84c1ee2 --- /dev/null +++ b/ngraph/core/src/op/util/read_value_base.cpp @@ -0,0 +1,7 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/op/util/read_value_base.hpp" + +OPENVINO_RTTI_DEFINITION(ov::op::util::ReadValueBase, "ReadValueBase", 0);