Skip to content

Commit

Permalink
Moved operations R-Z to ov namespace (#7365)
Browse files Browse the repository at this point in the history
* Moved operations M-P to ov namespace

* Fixed code style

* Fixed build

* Fixed comments

* Moved operations R-Z to ov namespace

* Fixed build

* Fixed comments

Co-authored-by: y <[email protected]>
  • Loading branch information
ilyachur and ilya-lavrenov authored Sep 7, 2021
1 parent c568791 commit 9e68a67
Show file tree
Hide file tree
Showing 161 changed files with 2,956 additions and 2,141 deletions.
72 changes: 2 additions & 70 deletions ngraph/core/include/ngraph/op/random_uniform.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -6,80 +6,12 @@

#include "ngraph/node.hpp"
#include "ngraph/op/op.hpp"
#include "openvino/op/random_uniform.hpp"

namespace ngraph {
namespace op {
namespace v8 {
/// \brief Tensor RandomUniform operation.
class NGRAPH_API RandomUniform : public Op {
public:
NGRAPH_RTTI_DECLARATION;

RandomUniform() = default;

///
/// \brief Constructs a RandomUniform operation.
///
/// \param out_shape Node producing the tensor with output shape.
/// \param min_val Node producing the tensor with minimum value.
/// \param max_val Node producing the tensor with maximum value.
/// \param out_type Output type of the tensor.
/// \param global_seed Global seed value.
/// \param op_seed Operational seed value.
RandomUniform(const Output<Node>& out_shape,
const Output<Node>& min_val,
const Output<Node>& max_val,
const ngraph::element::Type& out_type,
uint64_t global_seed = 0,
uint64_t op_seed = 0);

void validate_and_infer_types() override;

bool visit_attributes(AttributeVisitor& visitor) override;

std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;

/// \return Turns off constant folding for RandomUniform operation.
bool constant_fold(OutputVector& output_values, const OutputVector& inputs_values) override {
return false;
}

/// \return The output tensor type.
const ngraph::element::Type& get_out_type() const {
return m_output_type;
}
void set_out_type(const ngraph::element::Type& output_type) {
m_output_type = output_type;
}

/// \return The global seed value.
uint64_t get_global_seed() const {
return m_global_seed;
}
void set_global_seed(uint64_t seed) {
m_global_seed = seed;
}

/// \return The operational seed value.
uint64_t get_op_seed() const {
return m_op_seed;
}
void set_op_seed(uint64_t seed2) {
m_op_seed = seed2;
}

bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override;

bool has_evaluate() const override;

protected:
ngraph::element::Type m_output_type;
uint64_t m_global_seed;
uint64_t m_op_seed;

mutable std::mutex m_state_mutex;
mutable std::pair<uint64_t, uint64_t> m_state;
};
using ov::op::v8::RandomUniform;
} // namespace v8
} // namespace op
} // namespace ngraph
61 changes: 3 additions & 58 deletions ngraph/core/include/ngraph/op/range.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -6,70 +6,15 @@

#include "ngraph/node.hpp"
#include "ngraph/op/op.hpp"
#include "openvino/op/range.hpp"

namespace ngraph {
namespace op {
namespace v4 {
/// \brief Range operation, analogous to `arange()` in Numpy.
class NGRAPH_API Range : public Op {
public:
NGRAPH_RTTI_DECLARATION;
/// \brief Constructs an unitialized range operation.
Range() = default;

/// \brief Constructs a range operation.
///
/// \param start The tensor producing the start value. Must be a scalar of numeric
/// element type.
/// \param stop The tensor producing the stop value. Must be a scalar of numeric
/// element type.
/// \param step The tensor producing the step value. Must be a scalar of numeric
/// element type.
/// \param output_type The type of the output.
Range(const Output<Node>& start, const Output<Node>& stop, const Output<Node>& step, element::Type output_type);

bool visit_attributes(AttributeVisitor& visitor) override;
void validate_and_infer_types() override;

virtual std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;
bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override;
bool has_evaluate() const override;
void set_output_type(element::Type output_type) {
m_output_type = output_type;
}
// Overload collision with method on Node
using Node::set_output_type;

private:
element::Type m_output_type;
};
using ov::op::v4::Range;
} // namespace v4
namespace v0 {
/// \brief Range operation, analogous to `range()` in Python.
class NGRAPH_API Range : public Op {
public:
NGRAPH_RTTI_DECLARATION;

/// \brief Constructs an unitialized range operation.
Range() = default;

/// \brief Constructs a range operation.
///
/// \param start The tensor producing the start value. Must be a scalar of integer
/// element type, and same element type as `stop` and `step`.
/// \param stop The tensor producing the stop value. Must be a scalar of integer
/// element type, and same element type as `start` and `step`.
/// \param step The tensor producing the step value. Must be a scalar of integer
/// element type, and same element type as `start` and `stop`.
Range(const Output<Node>& start, const Output<Node>& stop, const Output<Node>& step);

bool visit_attributes(AttributeVisitor& visitor) override;
void validate_and_infer_types() override;

virtual std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;
bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override;
bool has_evaluate() const override;
};
using ov::op::v0::Range;
} // namespace v0
using v0::Range;
} // namespace op
Expand Down
28 changes: 2 additions & 26 deletions ngraph/core/include/ngraph/op/reduce_l1.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -5,36 +5,12 @@
#pragma once

#include "ngraph/op/util/arithmetic_reductions_keep_dims.hpp"
#include "openvino/op/reduce_l1.hpp"

namespace ngraph {
namespace op {
namespace v4 {
/// \brief Reduction operation using L1 norm: L1(x) = sum(abs(x)) if all dimensions are
/// specified for the normalisation.
///
/// Reduces the tensor, eliminating the specified reduction axes by taking the L1-norm.
class NGRAPH_API ReduceL1 : public util::ArithmeticReductionKeepDims {
public:
NGRAPH_RTTI_DECLARATION;
/// \brief Constructs a reducet L1-norm operation.
ReduceL1() = default;
/// \brief Constructs a reduce L1-norm operation.
///
/// \param arg The tensor to be reduced.
/// \param reduction_axes The axis positions (0-based) to be eliminated.
/// \param keep_dims If set to true it holds axes that are used for reduction.
ReduceL1(const Output<Node>& arg, const Output<Node>& reduction_axes, bool keep_dims = false);

/// \return The default value for Reduce.
NGRAPH_SUPPRESS_DEPRECATED_START
virtual std::shared_ptr<Node> get_default_value() const override;
NGRAPH_SUPPRESS_DEPRECATED_END

virtual std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;

bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override;
bool has_evaluate() const override;
};
using ov::op::v4::ReduceL1;
} // namespace v4
} // namespace op
} // namespace ngraph
27 changes: 2 additions & 25 deletions ngraph/core/include/ngraph/op/reduce_l2.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -5,35 +5,12 @@
#pragma once

#include "ngraph/op/util/arithmetic_reductions_keep_dims.hpp"
#include "openvino/op/reduce_l2.hpp"

namespace ngraph {
namespace op {
namespace v4 {
/// \brief Reduction operation using L2 norm:
///
/// Reduces the tensor, eliminating the specified reduction axes by taking the L2-norm.
class NGRAPH_API ReduceL2 : public util::ArithmeticReductionKeepDims {
public:
NGRAPH_RTTI_DECLARATION;
/// \brief Constructs a reducet L2-norm operation.
ReduceL2() = default;
/// \brief Constructs a reduce L2-norm operation.
///
/// \param arg The tensor to be reduced.
/// \param reduction_axes The axis positions (0-based) to be eliminated.
/// \param keep_dims If set to true it holds axes that are used for reduction.
ReduceL2(const Output<Node>& arg, const Output<Node>& reduction_axes, bool keep_dims = false);

/// \return The default value for Reduce.
NGRAPH_SUPPRESS_DEPRECATED_START
virtual std::shared_ptr<Node> get_default_value() const override;
NGRAPH_SUPPRESS_DEPRECATED_END

virtual std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;

bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override;
bool has_evaluate() const override;
};
using ov::op::v4::ReduceL2;
} // namespace v4
} // namespace op
} // namespace ngraph
23 changes: 2 additions & 21 deletions ngraph/core/include/ngraph/op/reduce_logical_and.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -5,31 +5,12 @@
#pragma once

#include "ngraph/op/util/logical_reduction_keep_dims.hpp"
#include "openvino/op/reduce_logical_and.hpp"

namespace ngraph {
namespace op {
namespace v1 {
/// \brief Performs a reduction using "logical and"
///
/// The reduction is performed over slices of the first input. The slices shape depends
/// on the values passed to the second input - the axes.
class NGRAPH_API ReduceLogicalAnd : public util::LogicalReductionKeepDims {
public:
NGRAPH_RTTI_DECLARATION;
ReduceLogicalAnd() = default;
/// \brief Constructs a ReduceLogicalAnd node.
///
/// \param data - The input tensor with data to be reduced
/// \param reduction_axes - The input tensor with information about axes over which
/// the first tensor should be sliced prior to the reduction operation
/// \param keep_dims - Indicates if the axes used for reduction should be held/kept
ReduceLogicalAnd(const Output<Node>& data, const Output<Node>& reduction_axes, const bool keep_dims = false);

virtual std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;

bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override;
bool has_evaluate() const override;
};
using ov::op::v1::ReduceLogicalAnd;
} // namespace v1
} // namespace op
} // namespace ngraph
23 changes: 2 additions & 21 deletions ngraph/core/include/ngraph/op/reduce_logical_or.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -5,31 +5,12 @@
#pragma once

#include "ngraph/op/util/logical_reduction_keep_dims.hpp"
#include "openvino/op/reduce_logical_or.hpp"

namespace ngraph {
namespace op {
namespace v1 {
/// \brief Performs a reduction using "logical or"
///
/// The reduction is performed over slices of the first input. The slices shape depends
/// on the values passed to the second input - the axes.
class NGRAPH_API ReduceLogicalOr : public util::LogicalReductionKeepDims {
public:
NGRAPH_RTTI_DECLARATION;
ReduceLogicalOr() = default;
/// \brief Constructs a ReduceLogicalOr node.
///
/// \param data - The input tensor with data to be reduced
/// \param reduction_axes - The input tensor with information about axes over which
/// the first tensor should be sliced prior to the reduction operation
/// \param keep_dims - Indicates if the axes used for reduction should be held/kept
ReduceLogicalOr(const Output<Node>& data, const Output<Node>& reduction_axes, const bool keep_dims = false);

virtual std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;

bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override;
bool has_evaluate() const override;
};
using ov::op::v1::ReduceLogicalOr;
} // namespace v1
} // namespace op
} // namespace ngraph
17 changes: 2 additions & 15 deletions ngraph/core/include/ngraph/op/reduce_mean.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -6,25 +6,12 @@

#include "ngraph/axis_set.hpp"
#include "ngraph/op/util/arithmetic_reductions_keep_dims.hpp"
#include "openvino/op/reduce_mean.hpp"

namespace ngraph {
namespace op {
namespace v1 {
class NGRAPH_API ReduceMean : public util::ArithmeticReductionKeepDims {
public:
NGRAPH_RTTI_DECLARATION;
ReduceMean() = default;

/// \param arg The tensor to be summed.
/// \param reduction_axes The axis positions (0-based) to be eliminated.
/// \param keep_dims If set to 1 it holds axes that are used for reduction.
ReduceMean(const Output<Node>& arg, const Output<Node>& reduction_axes, bool keep_dims = false);

std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;

bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override;
bool has_evaluate() const override;
};
using ov::op::v1::ReduceMean;
} // namespace v1
} // namespace op
} // namespace ngraph
29 changes: 2 additions & 27 deletions ngraph/core/include/ngraph/op/reduce_prod.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -5,37 +5,12 @@
#pragma once

#include "ngraph/op/util/arithmetic_reductions_keep_dims.hpp"
#include "openvino/op/reduce_prod.hpp"

namespace ngraph {
namespace op {
namespace v1 {
/// \brief Product reduction operation.
///
/// Reduces the tensor, eliminating the specified reduction axes by taking the product.
class NGRAPH_API ReduceProd : public util::ArithmeticReductionKeepDims {
public:
NGRAPH_RTTI_DECLARATION;
/// \brief Constructs a product reduction operation.
ReduceProd() = default;
/// \brief Constructs a product reduction operation.
///
/// \param arg The tensor to be reduced.
/// \param reduction_axes The axis positions (0-based) to be eliminated.
/// \param keep_dims If set to true it holds axes that are used for reduction.
ReduceProd(const Output<Node>& arg, const Output<Node>& reduction_axes, bool keep_dims = false);

/// \return The default value for Product.
NGRAPH_SUPPRESS_DEPRECATED_START
virtual std::shared_ptr<Node> get_default_value() const override;
NGRAPH_SUPPRESS_DEPRECATED_END

virtual std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;

bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override;
bool has_evaluate() const override;
bool evaluate_lower(const HostTensorVector& outputs) const override;
bool evaluate_upper(const HostTensorVector& outputs) const override;
};
using ov::op::v1::ReduceProd;
} // namespace v1
} // namespace op
} // namespace ngraph
Loading

0 comments on commit 9e68a67

Please sign in to comment.