Skip to content

Commit

Permalink
Moved operations G-L to ov namespace (openvinotoolkit#7344)
Browse files Browse the repository at this point in the history
* Moved ngraph::Node to ov namespace

* Fixed code style

* Fixed VPU

* Fixed GNA

* Fixed tests

* Added aliases for backward compatibility

* Fix clDNN

* Try to fix build

* Fixed comment

* Renamed RTTI macros

* Moved op utils to ov namespace

* Fixed ngraph library build

* Fixed unit-tests

* Changed src folder

* Fixed recurrent_sequence

* Changed low latency

* Fixed serialize

* Fixed ieFuncTests

* Try to fix windows

* Remove custom operator<< from tests

* Fixed build

* Moved operations from A to ov namespace

* Moved operations from B and C to ov namespace

* Moved operations D-F to ov namespace

* Update ngraph/core/src/op/embeddingbag_offsets_sum.cpp

Co-authored-by: Katarzyna Mitrus <[email protected]>

* Update ngraph/core/src/op/embeddingbag_packedsum.cpp

Co-authored-by: Katarzyna Mitrus <[email protected]>

* Fixed RTTI

* Moved operations G-L to ov namespace

* Fixed RTTI

Co-authored-by: Ilya Lavrenov <[email protected]>
Co-authored-by: Katarzyna Mitrus <[email protected]>
  • Loading branch information
3 people authored and akuporos committed Sep 6, 2021
1 parent 726e873 commit d0a3660
Show file tree
Hide file tree
Showing 87 changed files with 2,728 additions and 2,216 deletions.
9 changes: 8 additions & 1 deletion ngraph/core/include/ngraph/graph_util.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -18,11 +18,18 @@
#include "ngraph/function.hpp"
#include "ngraph/node.hpp"

namespace ov {
namespace op {
namespace v0 {
class Parameter;
}
} // namespace op
} // namespace ov
namespace ngraph {

namespace op {
namespace v0 {
class Parameter;
using ov::op::v0::Parameter;
}
} // namespace op

Expand Down
9 changes: 8 additions & 1 deletion ngraph/core/include/ngraph/node.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,13 @@
#include "ngraph/variant.hpp"
#include "openvino/core/node.hpp"

namespace ov {
namespace op {
namespace v0 {
class Result;
}
} // namespace op
} // namespace ov
namespace ngraph {

using ov::Node;
Expand All @@ -52,7 +59,7 @@ using HostTensorVector = std::vector<HostTensorPtr>;
namespace op {

namespace v0 {
class Result;
using ov::op::v0::Result;
}
} // namespace op

Expand Down
66 changes: 4 additions & 62 deletions ngraph/core/include/ngraph/op/gather.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -5,76 +5,18 @@
#pragma once

#include "ngraph/op/util/gather_base.hpp"
#include "openvino/op/gather.hpp"

namespace ngraph {
namespace op {
namespace v1 {
/// \brief Gather slices from axis of data according to indices
class NGRAPH_API Gather : public op::util::GatherBase {
public:
NGRAPH_RTTI_DECLARATION;
static const int64_t AXIS_NOT_SET_VALUE = std::numeric_limits<int64_t>::max();
Gather() = default;
/// \param data The tensor from which slices are gathered
/// \param indices Tensor with indexes to gather
/// \param axis The tensor is a dimension index to gather data from
Gather(const Output<Node>& params, const Output<Node>& indices, const Output<Node>& axis);

bool visit_attributes(AttributeVisitor& visitor) override;
int64_t get_axis() const override;

std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;
};
using ov::op::v1::Gather;
} // namespace v1

namespace v7 {
/// \brief Gather slices from axis of data according to indices
class NGRAPH_API Gather : public op::util::GatherBase {
public:
NGRAPH_RTTI_DECLARATION;
Gather() = default;

/// \param data The tensor from which slices are gathered
/// \param indices Tensor with indexes to gather
/// \param axis The tensor is a dimension index to gather data from
/// \param batch_dims The number of batch dimension in data and indices tensors.
/// If batch_dims = 0 Gather v7 is identical to Gather v1.
Gather(const Output<Node>& data,
const Output<Node>& indices,
const Output<Node>& axis,
const int64_t batch_dims = 0);

bool visit_attributes(AttributeVisitor& visitor) override;
void validate_and_infer_types() override;
int64_t get_batch_dims() const;

std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;
};
using ov::op::v7::Gather;
} // namespace v7

namespace v8 {
/// \brief Gather slices from axis of data according to indices. Negative indices
/// are supported and indicate reverse indexing from the end
class NGRAPH_API Gather : public op::util::GatherBase {
public:
NGRAPH_RTTI_DECLARATION;
Gather() = default;

/// \param data The tensor from which slices are gathered
/// \param indices Tensor with indexes to gather
/// \param axis The tensor is a dimension index to gather data from
/// \param batch_dims The number of batch dimension in data and indices tensors.
Gather(const Output<Node>& data,
const Output<Node>& indices,
const Output<Node>& axis,
const int64_t batch_dims = 0);

bool visit_attributes(AttributeVisitor& visitor) override;
void validate_and_infer_types() override;
int64_t get_batch_dims() const;

std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;
};
using ov::op::v8::Gather;
} // namespace v8
} // namespace op
} // namespace ngraph
27 changes: 2 additions & 25 deletions ngraph/core/include/ngraph/op/gather_elements.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -5,35 +5,12 @@
#pragma once

#include "ngraph/op/op.hpp"
#include "openvino/op/gather_elements.hpp"

namespace ngraph {
namespace op {
namespace v6 {
/// \brief GatherElements operation
///
class NGRAPH_API GatherElements : public Op {
public:
NGRAPH_RTTI_DECLARATION;
GatherElements() = default;

/// \brief Constructs a GatherElements operation.
///
/// \param data Node producing data that are gathered
/// \param indices Node producing indices by which the operation gathers elements
/// \param axis specifies axis along which indices are specified
GatherElements(const Output<Node>& data, const Output<Node>& indices, const int64_t axis);

void validate_and_infer_types() override;
bool visit_attributes(AttributeVisitor& visitor) override;
std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;

int64_t get_axis() const {
return m_axis;
}

private:
int64_t m_axis;
};
using ov::op::v6::GatherElements;
} // namespace v6
} // namespace op
} // namespace ngraph
28 changes: 2 additions & 26 deletions ngraph/core/include/ngraph/op/gather_nd.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -5,36 +5,12 @@
#pragma once

#include "ngraph/op/op.hpp"
#include "openvino/op/gather_nd.hpp"

namespace ngraph {
namespace op {
namespace v5 {
/// \brief GatherND operation
///
class NGRAPH_API GatherND : public Op {
public:
NGRAPH_RTTI_DECLARATION;
GatherND() = default;

/// \brief Constructs a GatherND operation.
///
/// \param data Node producing data that are gathered
/// \param indices Node producing indices by which the operation gathers elements
/// or slices from data
/// \param batch_dims Specifies a number of batch dimensions
GatherND(const Output<Node>& data, const Output<Node>& indices, const size_t batch_dims = 0);

void validate_and_infer_types() override;
bool visit_attributes(AttributeVisitor& visitor) override;
virtual std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;

size_t get_batch_dims() const {
return m_batch_dims;
}

private:
size_t m_batch_dims;
};
using ov::op::v5::GatherND;
} // namespace v5
} // namespace op
} // namespace ngraph
26 changes: 2 additions & 24 deletions ngraph/core/include/ngraph/op/gather_tree.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -5,34 +5,12 @@
#pragma once

#include "ngraph/op/op.hpp"
#include "openvino/op/gather_tree.hpp"

namespace ngraph {
namespace op {
namespace v1 {
/// \brief Generates the complete beams from the ids per each step and the parent beam
/// ids.
class NGRAPH_API GatherTree : public Op {
public:
NGRAPH_RTTI_DECLARATION;

GatherTree() = default;
/// \param step_ids Tensor of shape [MAX_TIME, BATCH_SIZE, BEAM_WIDTH] with
/// indices from per each step
/// \param parent_idx Tensor of shape [MAX_TIME, BATCH_SIZE, BEAM_WIDTH] with
/// parent beam indices
/// \param max_seq_len Tensor of shape [BATCH_SIZE] with maximum lengths for each
/// sequence in the batch
/// \param end_token Tensor of shape [MAX_TIME, BATCH_SIZE, BEAM_WIDTH]
GatherTree(const Output<Node>& step_ids,
const Output<Node>& parent_idx,
const Output<Node>& max_seq_len,
const Output<Node>& end_token);

bool visit_attributes(AttributeVisitor& visitor) override;
void validate_and_infer_types() override;

virtual std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;
};
using ov::op::v1::GatherTree;
} // namespace v1
} // namespace op
} // namespace ngraph
70 changes: 4 additions & 66 deletions ngraph/core/include/ngraph/op/gelu.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -7,81 +7,19 @@
#include "ngraph/node.hpp"
#include "ngraph/op/op.hpp"
#include "ngraph/op/util/unary_elementwise_arithmetic.hpp"
#include "openvino/op/gelu.hpp"

namespace ngraph {
namespace op {
namespace v0 {
/// \brief Gaussian Error Linear Unit
/// f(x) = 0.5 * x * (1 + erf( x / sqrt(2) )
class NGRAPH_API Gelu : public Op {
public:
NGRAPH_RTTI_DECLARATION;

Gelu();
/// \brief Constructs a Gelu operation.
///
/// \param data Input tensor
Gelu(const Output<Node>& data);

bool visit_attributes(AttributeVisitor& visitor) override;

void validate_and_infer_types() override;

std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;
};
using ov::op::v0::Gelu;
} // namespace v0
using v0::Gelu;

/// \brief Specifies the approximation to calculate Gelu
enum class GeluApproximationMode { TANH, ERF };
NGRAPH_API std::ostream& operator<<(std::ostream& s, const GeluApproximationMode& type);
using ov::op::GeluApproximationMode;

namespace v7 {
/// \brief Gaussian Error Linear Unit
/// f(x) = 0.5 * x * (1 + erf( x / sqrt(2) ) for "approximation" = "erf"
/// f(x) = 0.5 * x * (1 + tanh([sqrt(2 / pi)] * [x + 0.044715^3]) for "approximation" =
/// "tanh"
class NGRAPH_API Gelu : public util::UnaryElementwiseArithmetic {
public:
NGRAPH_RTTI_DECLARATION;

Gelu() = default;
/// \brief Constructs a Gelu operation.
///
/// \param data Input tensor
/// \param mode Approximation mode
Gelu(const Output<Node>& data, GeluApproximationMode mode = GeluApproximationMode::ERF);

bool visit_attributes(AttributeVisitor& visitor) override;

void validate_and_infer_types() override;

bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override;
bool has_evaluate() const override;

std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;

GeluApproximationMode get_approximation_mode() const;

private:
GeluApproximationMode m_approximation_mode = GeluApproximationMode::ERF;
};
using ov::op::v7::Gelu;
} // namespace v7
} // namespace op
} // namespace ngraph

namespace ov {
template <>
class NGRAPH_API AttributeAdapter<ngraph::op::GeluApproximationMode>
: public EnumAttributeAdapterBase<ngraph::op::GeluApproximationMode> {
public:
AttributeAdapter(ngraph::op::GeluApproximationMode& value)
: EnumAttributeAdapterBase<ngraph::op::GeluApproximationMode>(value) {}

static constexpr DiscreteTypeInfo type_info{"AttributeAdapter<op::GeluApproximationMode>", 0};
const DiscreteTypeInfo& get_type_info() const override {
return type_info;
}
};

} // namespace ov
21 changes: 2 additions & 19 deletions ngraph/core/include/ngraph/op/greater.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -5,29 +5,12 @@
#pragma once

#include "ngraph/op/util/binary_elementwise_comparison.hpp"
#include "openvino/op/greater.hpp"

namespace ngraph {
namespace op {
namespace v1 {
/// \brief Elementwise greater-than operation.
class NGRAPH_API Greater : public util::BinaryElementwiseComparison {
public:
NGRAPH_RTTI_DECLARATION;
/// \brief Constructs a greater-than operation.
Greater() : util::BinaryElementwiseComparison(AutoBroadcastSpec::NUMPY) {}
/// \brief Constructs a greater-than operation.
///
/// \param arg0 Node that produces the first input tensor.
/// \param arg1 Node that produces the second input tensor.
/// \param auto_broadcast Auto broadcast specification
Greater(const Output<Node>& arg0,
const Output<Node>& arg1,
const AutoBroadcastSpec& auto_broadcast = AutoBroadcastSpec(AutoBroadcastType::NUMPY));

virtual std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;
bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override;
bool has_evaluate() const override;
};
using ov::op::v1::Greater;
} // namespace v1
} // namespace op
} // namespace ngraph
21 changes: 2 additions & 19 deletions ngraph/core/include/ngraph/op/greater_eq.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -5,29 +5,12 @@
#pragma once

#include "ngraph/op/util/binary_elementwise_comparison.hpp"
#include "openvino/op/greater_eq.hpp"

namespace ngraph {
namespace op {
namespace v1 {
/// \brief Elementwise greater-than-or-equal operation.
class NGRAPH_API GreaterEqual : public util::BinaryElementwiseComparison {
public:
NGRAPH_RTTI_DECLARATION;
/// \brief Constructs a greater-than-or-equal operation.
GreaterEqual() : util::BinaryElementwiseComparison(AutoBroadcastSpec::NUMPY) {}
/// \brief Constructs a greater-than-or-equal operation.
///
/// \param arg0 Node that produces the first input tensor.
/// \param arg1 Node that produces the second input tensor.
/// \param auto_broadcast Auto broadcast specification
GreaterEqual(const Output<Node>& arg0,
const Output<Node>& arg1,
const AutoBroadcastSpec& auto_broadcast = AutoBroadcastSpec(AutoBroadcastType::NUMPY));

virtual std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;
bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override;
bool has_evaluate() const override;
};
using ov::op::v1::GreaterEqual;
} // namespace v1
} // namespace op
} // namespace ngraph
Loading

0 comments on commit d0a3660

Please sign in to comment.