Skip to content

Commit

Permalink
Moved operations M-P to ov namespace
Browse files Browse the repository at this point in the history
  • Loading branch information
ilyachur committed Sep 3, 2021
1 parent fd1552a commit c33e418
Show file tree
Hide file tree
Showing 81 changed files with 2,082 additions and 1,619 deletions.
40 changes: 2 additions & 38 deletions ngraph/core/include/ngraph/op/matmul.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -6,48 +6,12 @@

#include "ngraph/node.hpp"
#include "ngraph/op/op.hpp"
#include "openvino/op/matmul.hpp"

namespace ngraph {
namespace op {
namespace v0 {
/// \brief Operator performing Matrix Multiplication.
class NGRAPH_API MatMul : public Op {
public:
NGRAPH_RTTI_DECLARATION;
MatMul() = default;
/// \brief Constructs an Matrix Multiplication operation.
///
/// \param A Matrix A
/// \param B Matrix B
/// \param transpose_a If matrix A should be transposed.
/// \param transpose_b If matrix B should be transposed.
MatMul(const Output<Node>& A, const Output<Node>& B, const bool& transpose_a = 0, const bool& transpose_b = 0);

bool visit_attributes(AttributeVisitor& visitor) override;
void validate_and_infer_types() override;

virtual std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;

bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override;
bool has_evaluate() const override;

bool get_transpose_a() const {
return m_transpose_a;
}
bool get_transpose_b() const {
return m_transpose_b;
}
void set_transpose_a(bool transpose_a) {
m_transpose_a = transpose_a;
}
void set_transpose_b(bool transpose_b) {
m_transpose_b = transpose_b;
}

private:
bool m_transpose_a;
bool m_transpose_b;
};
using ov::op::v0::MatMul;
} // namespace v0
using v0::MatMul;
} // namespace op
Expand Down
82 changes: 2 additions & 80 deletions ngraph/core/include/ngraph/op/matrix_nms.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -5,90 +5,12 @@
#pragma once

#include "ngraph/op/util/nms_base.hpp"
#include "openvino/op/matrix_nms.hpp"

namespace ngraph {
namespace op {
namespace v8 {
/// \brief MatrixNms operation
///
class NGRAPH_API MatrixNms : public util::NmsBase {
public:
NGRAPH_RTTI_DECLARATION;

enum class DecayFunction { GAUSSIAN, LINEAR };

/// \brief Structure that specifies attributes of the operation
struct Attributes {
// specifies order of output elements
SortResultType sort_result_type = SortResultType::NONE;
// specifies whenever it is necessary to sort selected boxes across batches or
// not
bool sort_result_across_batch = false;
// specifies the output tensor type
ngraph::element::Type output_type = ngraph::element::i64;
// specifies minimum score to consider box for the processing
float score_threshold = 0.0f;
// specifies maximum number of boxes to be selected per class, -1 meaning to
// keep all boxes
int nms_top_k = -1;
// specifies maximum number of boxes to be selected per batch element, -1
// meaning to keep all boxes
int keep_top_k = -1;
// specifies the background class id, -1 meaning to keep all classes
int background_class = -1;
// specifies decay function used to decay scores
DecayFunction decay_function = DecayFunction::LINEAR;
// specifies gaussian_sigma parameter for gaussian decay_function
float gaussian_sigma = 2.0f;
// specifies threshold to filter out boxes with low confidence score after
// decaying
float post_threshold = 0.0f;
// specifies whether boxes are normalized or not
bool normalized = true;
};

MatrixNms();

/// \brief Constructs a MatrixNms operation
///
/// \param boxes Node producing the box coordinates
/// \param scores Node producing the box scores
/// \param attrs Attributes of the operation
MatrixNms(const Output<Node>& boxes, const Output<Node>& scores, const Attributes& attrs);

bool visit_attributes(AttributeVisitor& visitor) override;

std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;

/// \brief Returns attributes of the operation MatrixNms
const Attributes& get_attrs() const {
return m_attrs;
}

protected:
Attributes m_attrs;

void validate() override;
};
using ov::op::v8::MatrixNms;
} // namespace v8
} // namespace op
NGRAPH_API
std::ostream& operator<<(std::ostream& s, const op::v8::MatrixNms::DecayFunction& type);
} // namespace ngraph

namespace ov {

template <>
class NGRAPH_API AttributeAdapter<ngraph::op::v8::MatrixNms::DecayFunction>
: public EnumAttributeAdapterBase<ngraph::op::v8::MatrixNms::DecayFunction> {
public:
AttributeAdapter(ngraph::op::v8::MatrixNms::DecayFunction& value)
: EnumAttributeAdapterBase<ngraph::op::v8::MatrixNms::DecayFunction>(value) {}

static constexpr DiscreteTypeInfo type_info{"AttributeAdapter<op::v8::MatrixNms::DecayFunction>", 1};
const DiscreteTypeInfo& get_type_info() const override {
return type_info;
}
};

} // namespace ov
19 changes: 2 additions & 17 deletions ngraph/core/include/ngraph/op/max.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -6,27 +6,12 @@

#include "ngraph/op/util/arithmetic_reduction.hpp"
#include "ngraph/op/util/arithmetic_reductions_keep_dims.hpp"
#include "openvino/op/max.hpp"

namespace ngraph {
namespace op {
namespace v1 {
class NGRAPH_API ReduceMax : public util::ArithmeticReductionKeepDims {
public:
NGRAPH_RTTI_DECLARATION;
/// \brief Constructs a summation operation.
ReduceMax() = default;
/// \brief Constructs a summation operation.
///
/// \param arg The tensor to be summed.
/// \param reduction_axes The axis positions (0-based) to be eliminated.
/// \param keep_dims If set to 1 it holds axes that are used for reduction.
ReduceMax(const Output<Node>& arg, const Output<Node>& reduction_axes, bool keep_dims = false);

virtual std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;

bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override;
bool has_evaluate() const override;
};
using ov::op::v1::ReduceMax;
} // namespace v1
} // namespace op
} // namespace ngraph
114 changes: 3 additions & 111 deletions ngraph/core/include/ngraph/op/max_pool.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -7,124 +7,16 @@
#include <limits>

#include "ngraph/op/util/max_pool_base.hpp"
#include "openvino/op/max_pool.hpp"

namespace ngraph {
namespace op {
namespace v1 {
/// \brief Batched max pooling operation.
class NGRAPH_API MaxPool : public op::util::MaxPoolBase {
public:
NGRAPH_RTTI_DECLARATION;

/// \brief Constructs a batched max pooling operation.
MaxPool() = default;

/// \brief Constructs a batched max pooling operation.
///
/// \param arg The node producing the input data batch tensor.
/// \param strides The strides.
/// \param pads_begin The beginning of padding shape.
/// \param pads_end The end of padding shape.
/// \param kernel The kernel shape.
/// \param rounding_type Whether to use ceiling or floor rounding type while
/// computing output shape.
/// \param auto_pad The pad type for automatically computing padding sizes.
MaxPool(const Output<Node>& arg,
const Strides& strides,
const Shape& pads_begin,
const Shape& pads_end,
const Shape& kernel,
const op::RoundingType rounding_type = op::RoundingType::FLOOR,
const PadType auto_pad = op::PadType::EXPLICIT);

bool visit_attributes(AttributeVisitor& visitor) override;
void validate_and_infer_types() override;

virtual std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;

/// \return The default value for MaxPool.
NGRAPH_SUPPRESS_DEPRECATED_START
virtual std::shared_ptr<Node> get_default_value() const override;
NGRAPH_SUPPRESS_DEPRECATED_END

bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override;
bool has_evaluate() const override;

private:
bool evaluate_maxpool(const HostTensorVector& outputs, const HostTensorVector& inputs) const;
};
using ov::op::v1::MaxPool;
} // namespace v1

namespace v8 {
/// \brief MaxPooling operation with values and indices calculated as individual outputs
class NGRAPH_API MaxPool : public op::util::MaxPoolBase {
public:
NGRAPH_RTTI_DECLARATION;

/// \brief Constructs an empty MaxPool operation.
MaxPool() = default;

/// \brief Constructs a parametrized MaxPool operation.
///
/// \param arg Output of a node producing the feature tensor to be pooled.
/// \param strides The strides of the pooling filter.
/// \param dilations The dilations of the pooling filter.
/// \param pads_begin Paddings at the beginning of each spatial axis.
/// \param pads_end Paddings at the end of each spatial axis.
/// \param kernel The kernel shape.
/// \param rounding_type Whether to use ceiling or floor rounding type while
/// computing the output shape.
/// \param auto_pad The pad type for automatic calculation of the padding sizes.
/// \param index_element_type The data type used by the second output tensor
/// containing the selected indices.
/// \param axis Indicates a dimension in the input data shape which should be used
/// as a starting point for calculation of the upper bound of allowed
/// values of the indices output.
MaxPool(const Output<Node>& arg,
const Strides& strides,
const Strides& dilations,
const Shape& pads_begin,
const Shape& pads_end,
const Shape& kernel,
const op::RoundingType rounding_type = op::RoundingType::FLOOR,
const PadType auto_pad = op::PadType::EXPLICIT,
const element::Type index_element_type = element::i64,
const int64_t axis = 0);

bool visit_attributes(AttributeVisitor& visitor) override;
void validate_and_infer_types() override;

virtual std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;

/// \return The pooling filter's dilations.
const Strides& get_dilations() const noexcept {
return m_dilations;
}
void set_dilations(const Strides& dilations) {
m_dilations = dilations;
}

/// \return The data type of the second output tensor (indices).
element::Type get_index_element_type() const noexcept {
return m_index_element_type;
}
void set_index_element_type(const element::Type index_element_type) {
m_index_element_type = index_element_type;
}

// \return The 'axis' attribute value.
int64_t get_axis() const {
return m_axis;
}
void set_axis(const int64_t axis) {
m_axis = axis;
}

private:
Strides m_dilations;
element::Type m_index_element_type{element::i64};
int64_t m_axis{0};
};
using ov::op::v8::MaxPool;
} // namespace v8
} // namespace op
} // namespace ngraph
24 changes: 2 additions & 22 deletions ngraph/core/include/ngraph/op/maximum.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -5,32 +5,12 @@
#pragma once

#include "ngraph/op/util/binary_elementwise_arithmetic.hpp"
#include "openvino/op/maximum.hpp"

namespace ngraph {
namespace op {
namespace v1 {
/// \brief Elementwise maximum operation.
class NGRAPH_API Maximum : public util::BinaryElementwiseArithmetic {
public:
NGRAPH_RTTI_DECLARATION;

/// \brief Constructs a maximum operation.
Maximum() : util::BinaryElementwiseArithmetic(AutoBroadcastSpec::NUMPY) {}

/// \brief Constructs a maximum operation.
///
/// \param arg0 Node that produces the first input tensor.
/// \param arg1 Node that produces the second input tensor.
/// \param auto_broadcast Auto broadcast specification
Maximum(const Output<Node>& arg0,
const Output<Node>& arg1,
const AutoBroadcastSpec& auto_broadcast = AutoBroadcastSpec(AutoBroadcastType::NUMPY));

virtual std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;

bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override;
bool has_evaluate() const override;
};
using ov::op::v1::Maximum;
} // namespace v1
} // namespace op
} // namespace ngraph
21 changes: 2 additions & 19 deletions ngraph/core/include/ngraph/op/min.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -6,29 +6,12 @@

#include "ngraph/op/util/arithmetic_reduction.hpp"
#include "ngraph/op/util/arithmetic_reductions_keep_dims.hpp"
#include "openvino/op/reduce_min.hpp"

namespace ngraph {
namespace op {
namespace v1 {
class NGRAPH_API ReduceMin : public util::ArithmeticReductionKeepDims {
public:
NGRAPH_RTTI_DECLARATION;
/// \brief Constructs a summation operation.
ReduceMin() = default;
/// \brief Constructs a summation operation.
///
/// \param arg The tensor to be summed.
/// \param reduction_axes The axis positions (0-based) to be eliminated.
/// \param keep_dims If set to 1 it holds axes that are used for reduction.
ReduceMin(const Output<Node>& arg, const Output<Node>& reduction_axes, bool keep_dims = false);

virtual std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;

bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override;
bool has_evaluate() const override;
bool evaluate_lower(const HostTensorVector& outputs) const override;
bool evaluate_upper(const HostTensorVector& outputs) const override;
};
using ov::op::v1::ReduceMin;
} // namespace v1
} // namespace op
} // namespace ngraph
Loading

0 comments on commit c33e418

Please sign in to comment.