diff --git a/ngraph/frontend/paddlepaddle/src/op/expand_v2.cpp b/ngraph/frontend/paddlepaddle/src/op/expand_v2.cpp index bc2bc1d233e59d..1b5ec1dd3d69f2 100644 --- a/ngraph/frontend/paddlepaddle/src/op/expand_v2.cpp +++ b/ngraph/frontend/paddlepaddle/src/op/expand_v2.cpp @@ -61,7 +61,11 @@ namespace ngraph fixed_shape_node, input_shape_node, false); return node.default_single_output_mapping( - {std::make_shared(x, repeated_node)}, {"Out"}); + {std::make_shared( + x, + std::make_shared(repeated_node, + element::i64))}, + {"Out"}); } } // namespace op diff --git a/ngraph/frontend/paddlepaddle/src/op/mul.cpp b/ngraph/frontend/paddlepaddle/src/op/mul.cpp new file mode 100644 index 00000000000000..130fd148084091 --- /dev/null +++ b/ngraph/frontend/paddlepaddle/src/op/mul.cpp @@ -0,0 +1,67 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include +#include + +namespace ngraph +{ + namespace frontend + { + namespace pdpd + { + namespace op + { + NamedOutputs mul(const NodeContext& node) + { + auto x = node.get_ng_input("X"); + auto y = node.get_ng_input("Y"); + PDPD_OP_VALIDATION_CHECK(node, + x.get_partial_shape().rank().is_static(), + "matmul: X rank must be static!"); + int64_t x_rank = x.get_partial_shape().rank().get_length(); + PDPD_OP_VALIDATION_CHECK(node, + y.get_partial_shape().rank().is_static() && + y.get_partial_shape().rank().get_length() == 2, + "matmul: Y rank must be static, and 2!"); + if (x_rank > 2) + { + auto shape = std::make_shared(x); + int64_t x_num_col_dims = node.get_attribute("x_num_col_dims"); + auto axis = ngraph::opset6::Constant::create(ngraph::element::i64, {}, {0}); + auto split_lengths = ngraph::opset6::Constant::create( + ngraph::element::i64, {2}, {x_num_col_dims, x_rank - x_num_col_dims}); + auto split = std::make_shared( + shape, axis, split_lengths); + auto f_dim_red_axis = + ngraph::opset6::Constant::create(ngraph::element::i64, {}, {0}); + auto first_dim_reduce = std::make_shared( + split->output(0), f_dim_red_axis); + auto f_dim_shape = + ngraph::opset6::Constant::create(ngraph::element::i64, {1}, {1}); + auto first_dim = std::make_shared( + first_dim_reduce, f_dim_shape, false); + auto s_dim_red_axis = + ngraph::opset6::Constant::create(ngraph::element::i64, {}, {0}); + auto second_dim_reduce = std::make_shared( + split->output(1), s_dim_red_axis); + auto s_dim_shape = + ngraph::opset6::Constant::create(ngraph::element::i64, {1}, {1}); + auto second_dim = std::make_shared( + second_dim_reduce, s_dim_shape, false); + auto out_shape = std::make_shared( + ngraph::NodeVector{first_dim, second_dim}, 0); + auto x_reshaped = + std::make_shared(x, out_shape, false); + return node.default_single_output_mapping( + {std::make_shared(x_reshaped, y)}, {"Out"}); + } + return node.default_single_output_mapping( + {std::make_shared(x, y)}, {"Out"}); + } + + } // namespace op + } // namespace pdpd + } // namespace frontend +} // namespace ngraph \ No newline at end of file diff --git a/ngraph/frontend/paddlepaddle/src/op/pad3d.cpp b/ngraph/frontend/paddlepaddle/src/op/pad3d.cpp new file mode 100644 index 00000000000000..8b508d0426391d --- /dev/null +++ b/ngraph/frontend/paddlepaddle/src/op/pad3d.cpp @@ -0,0 +1,117 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include +#include + +namespace ngraph +{ + namespace frontend + { + namespace pdpd + { + namespace op + { + NamedOutputs pad3d(const NodeContext& node) + { + auto data = node.get_ng_input("X"); + auto mode = node.get_attribute("mode"); + auto value = node.get_attribute("value", 0.0); + auto data_format = node.get_attribute("data_format"); + + auto paddings = std::vector(6, 0); + + // padding of type int feature only supported by PaddlePaddle 'develop' + // version(>=2.1.0) + if (node.has_attribute>("paddings")) + { + auto paddings_vector = node.get_attribute>("paddings"); + PDPD_OP_VALIDATION_CHECK(node, + paddings_vector.size() == 6, + "paddings Params size should be 6 in pad3d!"); + paddings = paddings_vector; + } + else if (node.has_attribute("paddings")) + { + auto padding_int = node.get_attribute("paddings"); + for (int i = 0; i < 6; i++) + paddings[i] = padding_int; + } + else + { + throw ngraph::ngraph_error("Unsupported paddings attribute!"); + } + + auto pads_begin = std::vector(5, 0); + auto pads_end = std::vector(5, 0); + + Output values; + Output padding_begin; + Output padding_end; + + ngraph::op::PadMode pad_mode; + // TODO Support Circular mode in #55704 + if (mode == "constant") + { + pad_mode = ngraph::op::PadMode::CONSTANT; + values = ngraph::opset6::Constant::create( + element::f32, ngraph::Shape{}, {value}); + } + else if (mode == "reflect") + { + pad_mode = ngraph::op::PadMode::REFLECT; + } + else if (mode == "replicate") + { + pad_mode = ngraph::op::PadMode::EDGE; + } + else + { + throw ngraph::ngraph_error("Unsupported 3d paddings mode: [" + mode + "]"); + } + + if (data_format == "NCDHW") + { + pads_begin[4] = paddings[0]; // left + pads_end[4] = paddings[1]; // right + pads_begin[3] = paddings[2]; // top + pads_end[3] = paddings[3]; // down + pads_begin[2] = paddings[4]; // front + pads_end[2] = paddings[5]; // back + } + else if (data_format == "NDHWC") + { + pads_begin[3] = paddings[0]; // left + pads_end[3] = paddings[1]; // right + pads_begin[2] = paddings[2]; // top + pads_end[2] = paddings[3]; // down + pads_begin[1] = paddings[4]; // front + pads_end[1] = paddings[5]; // back + } + else + { + throw ngraph::ngraph_error("Unsupported 3d paddings data_format: [" + + data_format + "]"); + } + + padding_begin = ngraph::opset6::Constant::create( + element::i32, ngraph::Shape{pads_begin.size()}, pads_begin); + padding_end = ngraph::opset6::Constant::create( + element::i32, ngraph::Shape{pads_end.size()}, pads_end); + + if (mode == "constant") + return node.default_single_output_mapping( + {std::make_shared( + data, padding_begin, padding_end, values, pad_mode)}, + {"Out"}); + else + return node.default_single_output_mapping( + {std::make_shared( + data, padding_begin, padding_end, pad_mode)}, + {"Out"}); + } + } // namespace op + } // namespace pdpd + } // namespace frontend +} // namespace ngraph diff --git a/ngraph/frontend/paddlepaddle/src/op/pool2d.cpp b/ngraph/frontend/paddlepaddle/src/op/pool2d.cpp new file mode 100644 index 00000000000000..65a7c3576b962d --- /dev/null +++ b/ngraph/frontend/paddlepaddle/src/op/pool2d.cpp @@ -0,0 +1,249 @@ +//***************************************************************************** +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// +//***************************************************************************** + +#include +#include +#include + +namespace ngraph +{ + namespace frontend + { + namespace pdpd + { + namespace op + { + // helper func - get pad_begin and pad_end + static void get_paddings(const NodeContext& node, + ngraph::Shape& pad_begin, + ngraph::Shape& pad_end, + ngraph::op::PadType& auto_pad) + { + if (node.has_attribute("padding_algorithm")) + { + auto pad_algo = node.get_attribute("padding_algorithm"); + if (pad_algo == "SAME") + { + auto_pad = ngraph::op::PadType::SAME_UPPER; + } + else if (pad_algo == "VALID") + { + auto_pad = ngraph::op::PadType::VALID; + } + else if (pad_algo == "EXPLICIT") + { + auto_pad = ngraph::op::PadType::EXPLICIT; + } + else + { + throw std::runtime_error("Unsupported pooling padding_algorithm " + + pad_algo); + } + } + else + { + // adaptive_maxpool with no such attr. + auto_pad = ngraph::op::PadType::EXPLICIT; + } + + /*If pool padding size is a tuple or list, it could be in three forms: + [pad_height, pad_width] or [pad_height_top, pad_height_bottom, pad_width_left, + pad_width_right], and when data_format is “NCHW”, pool_padding can be in the + form [[0,0], [0,0], [pad_height_top, pad_height_bottom], [pad_width_left, + pad_width_right]]. when data_format is “NHWC”, pool_padding can be in the form + [[0,0], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right], + [0,0]]. Otherwise, the pool padding size will be a square of an int.*/ + auto paddings = node.get_attribute>("paddings"); + + // Default is empty for 'adaptive max pooling' + auto data_format = node.get_attribute("data_format", {}); + + // TODO: need to support NHWC input #55483 + switch (paddings.size()) + { + case 2: + pad_begin = Shape{static_cast(paddings[0]), + static_cast(paddings[1])}; + pad_end = pad_begin; + break; + case 4: + pad_begin = Shape{static_cast(paddings[0]), + static_cast(paddings[2])}; + pad_end = Shape{static_cast(paddings[1]), + static_cast(paddings[3])}; + break; + default: + throw std::runtime_error("Unsupported pooling paddings " + + std::to_string(paddings.size())); + } + } + + NamedOutputs pool2d(const NodeContext& node) + { + auto data = node.get_ng_input("X"); + + auto pooling_type = node.get_attribute("pooling_type", {}); + auto global_pooling = node.get_attribute("global_pooling"); + auto adaptive = node.get_attribute("adaptive"); + auto kernel_shape = node.get_attribute>("ksize"); + + auto rounding_type = node.get_attribute("ceil_mode", false) + ? ngraph::op::RoundingType::CEIL + : ngraph::op::RoundingType::FLOOR; + + if (pooling_type.empty()) + { + pooling_type = "max"; + } + + PDPD_ASSERT((pooling_type == "max") || (pooling_type == "avg"), + "pool2d: not supported pooling type !"); + PDPD_ASSERT(kernel_shape.size() == 1 || kernel_shape.size() == 2, + "pool2d: ksize must be 1 or 2!"); + + PartialShape input_shape = data.get_partial_shape(); + + int32_t input_rank = input_shape.rank().get_length(); + PDPD_ASSERT(input_rank >= 2, "input tensor rank must be greater than 2"); + + auto auto_pad = ngraph::op::PadType::EXPLICIT; + ngraph::Shape pad_begin, pad_end; + get_paddings(node, pad_begin, pad_end, auto_pad); + + if (global_pooling || + (adaptive && std::any_of(kernel_shape.begin(), + kernel_shape.end(), + [](int32_t i) { return i == 1; }))) + { + if (pooling_type == "max") + { + auto axes = ngraph::opset6::Constant::create( + ngraph::element::i64, {2}, {input_rank - 2, input_rank - 1}); + return node.default_single_output_mapping( + {std::make_shared(data, axes, true)}, + {"Out"}); + } + else + { + auto axes = ngraph::opset6::Constant::create( + ngraph::element::i64, {2}, {input_rank - 2, input_rank - 1}); + return node.default_single_output_mapping( + {std::make_shared(data, axes, true)}, + {"Out"}); + } + } + else if (adaptive) + { + PDPD_ASSERT(input_shape[2].is_static() && input_shape[3].is_static(), + "pool2d: spatial dim must be static when using adaptive pool"); + auto pool_size = std::vector(2, 0); + + if (kernel_shape.size() == 1) + { + // Not tested: implemented according to spec, but can't generate real + // model to test + pool_size[0] = pool_size[1] = kernel_shape[0]; + } + else + { + pool_size[0] = kernel_shape[0]; + pool_size[1] = kernel_shape[1]; + } + + const Output output_shape = ngraph::opset6::Constant::create( + ngraph::element::i64, {pool_size.size()}, pool_size); + + if (pooling_type == "max") + { + std::vector> pool_outputs; + pool_outputs = std::make_shared( + data, output_shape, ngraph::element::i32) + ->outputs(); + NamedOutputs outputs; + outputs["Out"] = {pool_outputs[0]}; + outputs["Mask"] = {pool_outputs[1]}; + return outputs; + } + else + { + return node.default_single_output_mapping( + {std::make_shared(data, + output_shape)}, + {"Out"}); + } + } + else + { + auto strides = node.get_attribute>("strides"); + auto paddings = node.get_attribute>("paddings"); + + uint64_t kernel_h, kernel_w; + if (kernel_shape.size() == 1) + { + // Not tested: implemented according to spec, but can't generate real + // model to test + kernel_h = kernel_w = kernel_shape[0]; + } + else + { + kernel_h = kernel_shape[0]; + kernel_w = kernel_shape[1]; + } + + PDPD_ASSERT(kernel_h > 0 && kernel_w > 0, + "pool2d kernel shape must be greater than 0"); + + // Note: this shape check is only valid when the spatial dim of input_shape + // is static. + if (input_shape[2].is_static() && input_shape[3].is_static()) + { + uint64_t input_h = input_shape[input_rank - 2].get_length(); + uint64_t input_w = input_shape[input_rank - 1].get_length(); + if ((input_h > 0) && (input_h + pad_begin[0] + pad_end[0] < kernel_h)) + { + kernel_h = input_h + pad_begin[0] + pad_end[0]; + } + if ((input_w > 0) && (input_w + pad_begin[1] + pad_end[1] < kernel_w)) + { + kernel_w = input_w + pad_begin[1] + pad_end[1]; + } + } + + if (pooling_type == "max") + { + return node.default_single_output_mapping( + {std::make_shared( + data, + ngraph::Strides(strides.begin(), strides.end()), + pad_begin, + pad_end, + ngraph::Shape{kernel_h, kernel_w}, + rounding_type, + auto_pad)}, + {"Out"}); + } + else + { + bool exclude_pad = node.get_attribute("exclusive", false); + return node.default_single_output_mapping( + {std::make_shared( + data, + ngraph::Strides(strides.begin(), strides.end()), + pad_begin, + pad_end, + ngraph::Shape{kernel_h, kernel_w}, + exclude_pad, + rounding_type, + auto_pad)}, + {"Out"}); + } + } + } + + } // namespace op + } // namespace pdpd + } // namespace frontend +} // namespace ngraph \ No newline at end of file diff --git a/ngraph/frontend/paddlepaddle/src/op/pow.cpp b/ngraph/frontend/paddlepaddle/src/op/pow.cpp new file mode 100644 index 00000000000000..2bab9dc6043f14 --- /dev/null +++ b/ngraph/frontend/paddlepaddle/src/op/pow.cpp @@ -0,0 +1,40 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// +#include +#include +#include + +namespace ngraph +{ + namespace frontend + { + namespace pdpd + { + namespace op + { + NamedOutputs pow(const NodeContext& node) + { + auto x = node.get_ng_input("X"); + auto dtype = x.get_element_type(); + Output factor_node; + if (node.has_ng_input("FactorTensor")) + { + factor_node = node.get_ng_input("FactorTensor"); + if (factor_node.get_element_type() != dtype) + factor_node = std::make_shared(factor_node, dtype); + } + else + { + factor_node = ngraph::opset6::Constant::create( + dtype, Shape{1}, {node.get_attribute("factor")}); + } + + return node.default_single_output_mapping( + {std::make_shared(x, factor_node)}, {"Out"}); + } + + } // namespace op + } // namespace pdpd + } // namespace frontend +} // namespace ngraph diff --git a/ngraph/frontend/paddlepaddle/src/op/range.cpp b/ngraph/frontend/paddlepaddle/src/op/range.cpp new file mode 100644 index 00000000000000..c445c650942493 --- /dev/null +++ b/ngraph/frontend/paddlepaddle/src/op/range.cpp @@ -0,0 +1,37 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include +#include + +namespace ngraph +{ + namespace frontend + { + namespace pdpd + { + namespace op + { + NamedOutputs range(const NodeContext& node) + { + auto start = node.get_ng_input("Start"); + auto stop = node.get_ng_input("End"); + auto step = node.get_ng_input("Step"); + auto type = node.get_out_port_type("Out"); + + const auto axis = ngraph::opset6::Constant::create(element::i64, Shape{}, {0}); + auto start_scalar = std::make_shared(start, axis); + auto stop_scalar = std::make_shared(stop, axis); + auto step_scalar = std::make_shared(step, axis); + + return node.default_single_output_mapping( + {std::make_shared( + start_scalar, stop_scalar, step_scalar, type)}, + {"Out"}); + } + + } // namespace op + } // namespace pdpd + } // namespace frontend +} // namespace ngraph diff --git a/ngraph/frontend/paddlepaddle/src/op/relu6.cpp b/ngraph/frontend/paddlepaddle/src/op/relu6.cpp new file mode 100644 index 00000000000000..8c17505494b6f6 --- /dev/null +++ b/ngraph/frontend/paddlepaddle/src/op/relu6.cpp @@ -0,0 +1,28 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include +#include +#include + +namespace ngraph +{ + namespace frontend + { + namespace pdpd + { + namespace op + { + NamedOutputs relu6(const NodeContext& node) + { + auto data = node.get_ng_input("X"); + auto threshold = node.get_attribute("threshold", 6.0f); + return node.default_single_output_mapping( + {std::make_shared(data, 0.0, threshold)}, {"Out"}); + } + + } // namespace op + } // namespace pdpd + } // namespace frontend +} // namespace ngraph \ No newline at end of file diff --git a/ngraph/frontend/paddlepaddle/src/op/reshape2.cpp b/ngraph/frontend/paddlepaddle/src/op/reshape2.cpp new file mode 100644 index 00000000000000..b39943cece2b68 --- /dev/null +++ b/ngraph/frontend/paddlepaddle/src/op/reshape2.cpp @@ -0,0 +1,56 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include +#include +#include + +namespace ngraph +{ + namespace frontend + { + namespace pdpd + { + namespace op + { + NamedOutputs reshape2(const NodeContext& node) + { + auto data = node.get_ng_input("X"); + if (!node.has_ng_input("Shape") && !node.has_ng_input("ShapeTensor")) + { + auto shape_attr = node.get_attribute>("shape"); + auto shape_node = ngraph::opset6::Constant::create( + ngraph::element::i32, {shape_attr.size()}, shape_attr); + return node.default_single_output_mapping( + {std::make_shared(data, shape_node, true)}, + {"Out"}); + } + else + { + std::string name = "Shape"; + if (node.has_ng_input("ShapeTensor")) + { + name = "ShapeTensor"; + } + + auto nodes = node.get_ng_inputs(name); + ngraph::NodeVector node_vec; + for (auto& input_node : nodes) + { + auto cast = + std::make_shared(input_node, element::i64); + node_vec.push_back(cast); + } + + auto shape_node = std::make_shared(node_vec, 0); + return node.default_single_output_mapping( + {std::make_shared(data, shape_node, true)}, + {"Out"}); + } + } + + } // namespace op + } // namespace pdpd + } // namespace frontend +} // namespace ngraph \ No newline at end of file diff --git a/ngraph/frontend/paddlepaddle/src/op/shape.cpp b/ngraph/frontend/paddlepaddle/src/op/shape.cpp new file mode 100644 index 00000000000000..fe1c2bd72d390e --- /dev/null +++ b/ngraph/frontend/paddlepaddle/src/op/shape.cpp @@ -0,0 +1,26 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include +#include + +namespace ngraph +{ + namespace frontend + { + namespace pdpd + { + namespace op + { + NamedOutputs shape(const NodeContext& node) + { + auto data = node.get_ng_input("Input"); + auto shape_node = std::make_shared(data, element::i32); + return node.default_single_output_mapping({shape_node}, {"Out"}); + } + + } // namespace op + } // namespace pdpd + } // namespace frontend +} // namespace ngraph \ No newline at end of file diff --git a/ngraph/frontend/paddlepaddle/src/op/sigmoid.cpp b/ngraph/frontend/paddlepaddle/src/op/sigmoid.cpp new file mode 100644 index 00000000000000..2f9b8edb48edcc --- /dev/null +++ b/ngraph/frontend/paddlepaddle/src/op/sigmoid.cpp @@ -0,0 +1,26 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include +#include + +namespace ngraph +{ + namespace frontend + { + namespace pdpd + { + namespace op + { + NamedOutputs sigmoid(const NodeContext& node) + { + auto data = node.get_ng_input("X"); + return node.default_single_output_mapping( + {std::make_shared(data)}, {"Out"}); + } + + } // namespace op + } // namespace pdpd + } // namespace frontend +} // namespace ngraph \ No newline at end of file diff --git a/ngraph/frontend/paddlepaddle/src/op/slice.cpp b/ngraph/frontend/paddlepaddle/src/op/slice.cpp new file mode 100644 index 00000000000000..1a245f7384985e --- /dev/null +++ b/ngraph/frontend/paddlepaddle/src/op/slice.cpp @@ -0,0 +1,82 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include +#include +#include + +namespace ngraph +{ + namespace frontend + { + namespace pdpd + { + namespace op + { + NamedOutputs slice(const NodeContext& node) + { + auto data = node.get_ng_input("Input"); + auto axes = node.get_attribute>("axes"); + Output start_idx_node, end_idx_node; + if (node.has_ng_input("StartsTensor")) + { + start_idx_node = node.get_ng_input("StartsTensor"); + } + else if (node.has_ng_input("StartsTensorList")) + { + auto inputs = node.get_ng_inputs("StartsTensorList"); + start_idx_node = std::make_shared(inputs, 0); + } + else + { + auto starts = node.get_attribute>("starts"); + start_idx_node = + opset6::Constant::create(element::i32, {starts.size()}, starts); + } + + if (node.has_ng_input("EndsTensor")) + { + end_idx_node = node.get_ng_input("EndsTensor"); + } + else if (node.has_ng_input("EndsTensorList")) + { + auto inputs = node.get_ng_inputs("EndsTensorList"); + end_idx_node = std::make_shared(inputs, 0); + } + else + { + auto ends = node.get_attribute>("ends"); + end_idx_node = opset6::Constant::create(element::i32, {ends.size()}, ends); + } + + // the shape of input, such as [1, 1, 3, 3] + auto shape_node = std::make_shared(data, element::Type_t::i32); + // the input dim, such as [4] + auto shape_shape_node = + std::make_shared(shape_node, element::i32); + auto const_0_node = opset6::Constant::create(element::i32, {}, {0}); + auto const_max_node = opset6::Constant::create(element::i32, {}, {INT_MAX}); + // array [0:max) + auto start_node = + std::make_shared(const_0_node, shape_shape_node); + auto end_node = + std::make_shared(const_max_node, shape_shape_node); + auto axes_node = opset6::Constant::create(element::i32, {axes.size(), 1}, axes); + auto fixed_start_node = std::make_shared( + start_node, axes_node, start_idx_node); + auto fixed_end_node = std::make_shared( + end_node, axes_node, end_idx_node); + + return node.default_single_output_mapping( + {std::make_shared(data, + fixed_start_node, + fixed_end_node, + std::vector{}, + std::vector{})}, + {"Out"}); + } + } // namespace op + } // namespace pdpd + } // namespace frontend +} // namespace ngraph \ No newline at end of file diff --git a/ngraph/frontend/paddlepaddle/src/op/softmax.cpp b/ngraph/frontend/paddlepaddle/src/op/softmax.cpp new file mode 100644 index 00000000000000..5be1423a19703e --- /dev/null +++ b/ngraph/frontend/paddlepaddle/src/op/softmax.cpp @@ -0,0 +1,34 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include +#include + +namespace ngraph +{ + namespace frontend + { + namespace pdpd + { + namespace op + { + NamedOutputs softmax(const NodeContext& node) + { + auto data = node.get_ng_input("X"); + auto axis = node.get_attribute("axis"); + if (axis < 0) + { + PDPD_OP_VALIDATION_CHECK(node, + data.get_partial_shape().rank().is_static(), + "Softmax rank must be static"); + auto data_rank = data.get_partial_shape().rank().get_length(); + axis = data_rank + axis; + } + return node.default_single_output_mapping( + {std::make_shared(data, axis)}, {"Out"}); + } + } // namespace op + } // namespace pdpd + } // namespace frontend +} // namespace ngraph \ No newline at end of file diff --git a/ngraph/frontend/paddlepaddle/src/op/squeeze.cpp b/ngraph/frontend/paddlepaddle/src/op/squeeze.cpp new file mode 100644 index 00000000000000..a79910af0098e1 --- /dev/null +++ b/ngraph/frontend/paddlepaddle/src/op/squeeze.cpp @@ -0,0 +1,41 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include +#include + +namespace ngraph +{ + namespace frontend + { + namespace pdpd + { + namespace op + { + NamedOutputs squeeze(const NodeContext& node) + { + auto data = node.get_ng_input("X"); + std::vector axes; + if (node.has_attribute>("axes")) + { + axes = node.get_attribute>("axes"); + } + + std::shared_ptr out; + if (!axes.empty()) + { + auto axesNode = ngraph::opset6::Constant::create( + ngraph::element::i32, {axes.size()}, axes); + out = std::make_shared(data, axesNode); + } + else + { + out = std::make_shared(data); + } + return node.default_single_output_mapping(out, {"Out"}); + } + } // namespace op + } // namespace pdpd + } // namespace frontend +} // namespace ngraph \ No newline at end of file diff --git a/ngraph/frontend/paddlepaddle/src/op/unsqueeze.cpp b/ngraph/frontend/paddlepaddle/src/op/unsqueeze.cpp new file mode 100644 index 00000000000000..265fe80025b4b9 --- /dev/null +++ b/ngraph/frontend/paddlepaddle/src/op/unsqueeze.cpp @@ -0,0 +1,41 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include +#include + +namespace ngraph +{ + namespace frontend + { + namespace pdpd + { + namespace op + { + NamedOutputs unsqueeze(const NodeContext& node) + { + auto data = node.get_ng_input("X"); + Output axesNode; + if (node.has_ng_input("AxesTensor")) + { + axesNode = node.get_ng_input("AxesTensor"); + } + else if (node.has_ng_input("AxesTensorList")) + { + auto inputs = node.get_ng_inputs("AxesTensorList"); + axesNode = std::make_shared(inputs, 0); + } + else + { + auto axes = node.get_attribute>("axes"); + axesNode = ngraph::opset6::Constant::create( + ngraph::element::i32, {axes.size()}, axes); + } + return node.default_single_output_mapping( + {std::make_shared(data, axesNode)}, {"Out"}); + } + } // namespace op + } // namespace pdpd + } // namespace frontend +} // namespace ngraph diff --git a/ngraph/frontend/paddlepaddle/src/op/yolo_box.cpp b/ngraph/frontend/paddlepaddle/src/op/yolo_box.cpp new file mode 100644 index 00000000000000..ccbc3f61341b7a --- /dev/null +++ b/ngraph/frontend/paddlepaddle/src/op/yolo_box.cpp @@ -0,0 +1,433 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// +#include // std::numeric_limits +#include + +#include +#include + +namespace ngraph +{ + namespace frontend + { + namespace pdpd + { + namespace op + { + using namespace opset6; + using namespace element; + + // reference + // Paddle/python/paddle/fluid/tests/unittests/test_yolo_box_op.py + // Paddle/paddle/fluid/operators/detection/yolo_box_op.h + // Paddle2ONNX/paddle2onnx/op_mapper/detection/yolo_box.py - clip_bbox is not used + // by Paddle2ONNX. + NamedOutputs yolo_box(const NodeContext& node_context) + { + auto data = node_context.get_ng_input("X"); + auto image_size = node_context.get_ng_input("ImgSize"); + + // get shape of X + auto input_shape = std::make_shared(data, i64); + auto indices_batchsize = Constant::create(i64, {1}, {0}); + auto indices_height = Constant::create(i64, {1}, {2}); + auto indices_width = Constant::create(i64, {1}, {3}); + auto const_axis0 = Constant::create(i64, {1}, {0}); + auto input_height = + std::make_shared(input_shape, indices_height, const_axis0); // H + auto input_width = + std::make_shared(input_shape, indices_width, const_axis0); // W + auto batch_size = + std::make_shared(input_shape, indices_batchsize, const_axis0); // N + + int32_t class_num = node_context.get_attribute("class_num"); + auto const_class_num = Constant::create(i64, {1}, {class_num}); + + // PDPD anchors attribute is of type int32. Convert to float for computing + // convinient. + auto _anchors = node_context.get_attribute>("anchors"); + std::vector anchors(_anchors.begin(), _anchors.end()); + uint32_t num_anchors = anchors.size() / 2; + auto const_num_anchors = Constant::create(i64, {1}, {num_anchors}); + + auto default_scale = 1.0f; + auto scale_x_y = node_context.get_attribute("scale_x_y", default_scale); + + auto downsample_ratio = node_context.get_attribute("downsample_ratio"); + auto const_downsample_ratio = + Constant::create(i64, {1}, {downsample_ratio}); + auto scaled_input_height = + std::make_shared(input_height, const_downsample_ratio); + auto scaled_input_width = + std::make_shared(input_width, const_downsample_ratio); + + // score_shape {batch_size, input_height * input_width * num_anchors, class_num} + auto node_mul_whc = std::make_shared(input_height, input_width); + node_mul_whc = std::make_shared(node_mul_whc, const_num_anchors); + auto score_shape = std::make_shared( + NodeVector{batch_size, node_mul_whc, const_class_num}, 0); + + auto conf_thresh = node_context.get_attribute("conf_thresh"); + auto const_conf_thresh = Constant::create(f32, {1}, {conf_thresh}); + + auto clip_bbox = node_context.get_attribute("clip_bbox"); + + // main X + // node_x_shape {batch_size, num_anchors, 5 + class_num, input_height, + // input_width} + auto const_class_num_plus5 = + Constant::create(i64, {1}, {5 + class_num}); + auto node_x_shape = std::make_shared(NodeVector{batch_size, + const_num_anchors, + const_class_num_plus5, + input_height, + input_width}, + 0); + + auto node_x_reshape = std::make_shared(data, node_x_shape, false); + + auto node_input_order = Constant::create(i64, {5}, {0, 1, 3, 4, 2}); + auto node_x_transpose = + std::make_shared(node_x_reshape, node_input_order); + + // range x/y + // range_x: shape {1, input_width} containing 0...input_width + // range_y: shape {input_height, 1} containing 0...input_height + auto const_start = Constant::create(f32, {}, {0.f}); + auto const_step = Constant::create(f32, {}, {1.f}); + auto reduction_axes = Constant::create(i64, {1}, {0}); + + auto scaler_input_width = + std::make_shared(input_width, reduction_axes, false); + auto range_x = + std::make_shared(const_start, scaler_input_width, const_step, f32); + auto node_range_x = std::make_shared( + range_x, Constant::create(i64, {1}, {0})); + + auto scaler_input_height = + std::make_shared(input_height, reduction_axes, false); + auto range_y = + std::make_shared(const_start, scaler_input_height, const_step, f32); + auto node_range_y = std::make_shared( + range_y, Constant::create(i64, {1}, {1})); + + auto node_range_x_shape = std::make_shared( + NodeVector{Constant::create(i64, {1}, {1}), input_width}, 0); + auto node_range_y_shape = std::make_shared( + NodeVector{input_height, Constant::create(i64, {1}, {1})}, 0); + + auto node_grid_x = + std::make_shared(node_range_x, node_range_y_shape); // shape (H, W) + auto node_grid_y = std::make_shared(node_range_y, node_range_x_shape); + + // main X (part2) + auto node_split_axis = Constant::create(i64, {1}, {-1}); + auto node_split_lengths = + Constant::create(i64, {6}, {1, 1, 1, 1, 1, class_num}); + auto node_split_input = std::make_shared( + node_x_transpose, node_split_axis, node_split_lengths); + + auto node_box_x = + node_split_input->output(0); // shape (batch_size, num_anchors, H, W, 1) + auto node_box_y = node_split_input->output(1); + auto node_box_w = node_split_input->output(2); + auto node_box_h = node_split_input->output(3); + auto node_conf = node_split_input->output(4); + auto node_prob = node_split_input->output(5); + + // x/y + std::shared_ptr node_box_x_sigmoid = + std::make_shared(node_box_x); + std::shared_ptr node_box_y_sigmoid = + std::make_shared(node_box_y); + + if (std::fabs(scale_x_y - default_scale) > 1e-6) + { // float not-equal + float bias_x_y = -0.5 * (scale_x_y - 1.0); + + auto scale_x_y_node = Constant::create(f32, {1}, {scale_x_y}); + auto bias_x_y_node = Constant::create(f32, {1}, {bias_x_y}); + + node_box_x_sigmoid = + std::make_shared(node_box_x_sigmoid, scale_x_y_node); + node_box_x_sigmoid = + std::make_shared(node_box_x_sigmoid, bias_x_y_node); + + node_box_y_sigmoid = + std::make_shared(node_box_y_sigmoid, scale_x_y_node); + node_box_y_sigmoid = + std::make_shared(node_box_y_sigmoid, bias_x_y_node); + } + + auto squeeze_box_x = Constant::create(i64, {1}, {4}); + auto node_box_x_squeeze = + std::make_shared(node_box_x_sigmoid, squeeze_box_x); + + auto squeeze_box_y = Constant::create(i64, {1}, {4}); + auto node_box_y_squeeze = + std::make_shared(node_box_y_sigmoid, squeeze_box_y); + + auto node_box_x_add_grid = + std::make_shared(node_grid_x, node_box_x_squeeze); + auto node_box_y_add_grid = + std::make_shared(node_grid_y, node_box_y_squeeze); + + auto node_input_h = std::make_shared(input_height, element::f32); + auto node_input_w = std::make_shared(input_width, element::f32); + + auto node_box_x_encode = + std::make_shared(node_box_x_add_grid, node_input_w); + auto node_box_y_encode = + std::make_shared(node_box_y_add_grid, node_input_h); + + // w/h + auto node_anchor_tensor = + Constant::create(f32, {num_anchors, 2}, anchors); + auto split_axis = Constant::create(i64, {}, {1}); + auto node_anchor_split = + std::make_shared(node_anchor_tensor, split_axis, 2); + + auto node_anchor_w_origin = node_anchor_split->output(0); + auto node_anchor_h_origin = node_anchor_split->output(1); + + auto float_input_height = + std::make_shared(scaled_input_height, element::f32); + auto node_anchor_h = + std::make_shared(node_anchor_h_origin, float_input_height); + auto float_input_width = + std::make_shared(scaled_input_width, element::f32); + auto node_anchor_w = + std::make_shared(node_anchor_w_origin, float_input_width); + + auto node_new_anchor_shape = + Constant::create(i64, {4}, {1, num_anchors, 1, 1}); + auto node_anchor_w_reshape = + std::make_shared(node_anchor_w, node_new_anchor_shape, false); + auto node_anchor_h_reshape = + std::make_shared(node_anchor_h, node_new_anchor_shape, false); + + auto squeeze_box_wh = Constant::create(i64, {1}, {4}); + auto node_box_w_squeeze = std::make_shared(node_box_w, squeeze_box_wh); + auto node_box_h_squeeze = std::make_shared(node_box_h, squeeze_box_wh); + + auto node_box_w_exp = std::make_shared(node_box_w_squeeze); + auto node_box_h_exp = std::make_shared(node_box_h_squeeze); + + auto node_box_w_encode = + std::make_shared(node_box_w_exp, node_anchor_w_reshape); + auto node_box_h_encode = + std::make_shared(node_box_h_exp, node_anchor_h_reshape); + + // confidence + auto node_conf_sigmoid = std::make_shared(node_conf); + + auto node_concat = std::make_shared( + NodeVector{Constant::create(i64, {1}, {1}), + const_num_anchors, + input_height, + input_width, + Constant::create(i64, {1}, {1})}, + 0); + auto node_conf_thresh = std::make_shared( + const_conf_thresh, + node_concat); // {1, num_anchors, input_height, input_width, 1} + + auto node_conf_sub = + std::make_shared(node_conf_sigmoid, node_conf_thresh); + + auto node_conf_clip = std::make_shared( + node_conf_sub, 0.0f, std::numeric_limits::max()); + + auto node_zeros = Constant::create(f32, {1}, {0}); + auto node_conf_clip_bool = + std::make_shared(node_conf_clip, node_zeros); + + auto node_conf_clip_cast = std::make_shared(node_conf_clip_bool, f32); + + auto node_conf_set_zero = + std::make_shared(node_conf_sigmoid, node_conf_clip_cast); + + /* probability */ + auto node_prob_sigmoid = std::make_shared(node_prob); + + auto node_new_shape = std::make_shared( + NodeVector{batch_size, + const_num_anchors, + input_height, + input_width, + Constant::create(i64, {1}, {1})}, + 0); + auto node_conf_new_shape = std::make_shared( + node_conf_set_zero, + node_new_shape, + false); // {batch_size, int(num_anchors), input_height, input_width, 1} + + // broadcast confidence * probability of each category + auto node_score = + std::make_shared(node_prob_sigmoid, node_conf_new_shape); + + // for bbox which has object (greater than threshold) + auto node_conf_bool = + std::make_shared(node_conf_new_shape, node_zeros); + + auto node_box_x_new_shape = + std::make_shared(node_box_x_encode, node_new_shape, false); + auto node_box_y_new_shape = + std::make_shared(node_box_y_encode, node_new_shape, false); + auto node_box_w_new_shape = + std::make_shared(node_box_w_encode, node_new_shape, false); + auto node_box_h_new_shape = + std::make_shared(node_box_h_encode, node_new_shape, false); + auto node_pred_box = + std::make_shared(OutputVector{node_box_x_new_shape, + node_box_y_new_shape, + node_box_w_new_shape, + node_box_h_new_shape}, + 4); + + auto node_conf_cast = std::make_shared(node_conf_bool, f32); + + auto node_pred_box_mul_conf = + std::make_shared(node_pred_box, node_conf_cast); + + auto node_box_shape = std::make_shared( + NodeVector{ + batch_size, node_mul_whc, Constant::create(i64, {1}, {4})}, + 0); + auto node_pred_box_new_shape = std::make_shared( + node_pred_box_mul_conf, + node_box_shape, + false); // {batch_size, int(num_anchors) * input_height * input_width, 4} + + auto pred_box_split_axis = Constant::create(i64, {}, {2}); + auto node_pred_box_split = + std::make_shared(node_pred_box_new_shape, pred_box_split_axis, 4); + + auto node_pred_box_x = node_pred_box_split->output(0); + auto node_pred_box_y = node_pred_box_split->output(1); + auto node_pred_box_w = node_pred_box_split->output(2); + auto node_pred_box_h = node_pred_box_split->output(3); + + /* x,y,w,h -> x1,y1,x2,y2 */ + auto node_number_two = Constant::create(f32, {1}, {2.0f}); + auto node_half_w = std::make_shared(node_pred_box_w, node_number_two); + auto node_half_h = std::make_shared(node_pred_box_h, node_number_two); + + auto node_pred_box_x1 = + std::make_shared(node_pred_box_x, node_half_w); + auto node_pred_box_y1 = + std::make_shared(node_pred_box_y, node_half_h); + + auto node_pred_box_x2 = std::make_shared(node_pred_box_x, node_half_w); + auto node_pred_box_y2 = std::make_shared(node_pred_box_y, node_half_h); + + /* map normalized coords to original image */ + auto indices_height_imgsize = Constant::create(i64, {1}, {0}); + auto indices_width_imgsize = Constant::create(i64, {1}, {1}); + auto const_axis1 = Constant::create(i64, {1}, {1}); + auto node_img_height = std::make_shared( + image_size, indices_height_imgsize, const_axis1); // shape_image_size[0] + auto node_img_width = std::make_shared( + image_size, indices_width_imgsize, const_axis1); // shape_image_size[1] + + auto node_img_width_cast = std::make_shared(node_img_width, f32); + auto node_img_height_cast = std::make_shared(node_img_height, f32); + + auto squeeze_axes2 = Constant::create(i64, {1}, {2}); + auto node_pred_box_x1_reshape = std::make_shared( + node_pred_box_x1, + squeeze_axes2); // shape (N,C,1) -> (N,C) for upcomping multiply. + auto node_pred_box_y1_reshape = + std::make_shared(node_pred_box_y1, squeeze_axes2); + auto node_pred_box_x2_reshape = + std::make_shared(node_pred_box_x2, squeeze_axes2); + auto node_pred_box_y2_reshape = + std::make_shared(node_pred_box_y2, squeeze_axes2); + + auto node_pred_box_x1_squeeze = + std::make_shared(node_pred_box_x1_reshape, node_img_width_cast); + auto node_pred_box_y1_squeeze = + std::make_shared(node_pred_box_y1_reshape, node_img_height_cast); + auto node_pred_box_x2_squeeze = + std::make_shared(node_pred_box_x2_reshape, node_img_width_cast); + auto node_pred_box_y2_squeeze = + std::make_shared(node_pred_box_y2_reshape, node_img_height_cast); + + std::shared_ptr node_pred_box_result; + if (clip_bbox) + { + auto node_number_one = Constant::create(f32, {1}, {1.0}); + auto node_new_img_height = + std::make_shared(node_img_height_cast, node_number_one); + auto node_new_img_width = + std::make_shared(node_img_width_cast, node_number_one); + auto node_pred_box_x2_sub_w = std::make_shared( + node_pred_box_x2_squeeze, node_new_img_width); // x2 - (w-1) + auto node_pred_box_y2_sub_h = std::make_shared( + node_pred_box_y2_squeeze, node_new_img_height); // y2 - (h-1) + + auto max_const = std::numeric_limits::max(); + auto node_pred_box_x1_clip = + std::make_shared(node_pred_box_x1_squeeze, 0.0f, max_const); + auto node_pred_box_y1_clip = + std::make_shared(node_pred_box_y1_squeeze, 0.0f, max_const); + auto node_pred_box_x2_clip = + std::make_shared(node_pred_box_x2_sub_w, 0.0f, max_const); + auto node_pred_box_y2_clip = + std::make_shared(node_pred_box_y2_sub_h, 0.0f, max_const); + + auto node_pred_box_x2_res = std::make_shared( + node_pred_box_x2_squeeze, node_pred_box_x2_clip); + auto node_pred_box_y2_res = std::make_shared( + node_pred_box_y2_squeeze, node_pred_box_y2_clip); + + auto node_pred_box_x1_clip2 = std::make_shared( + node_pred_box_x1_clip, squeeze_axes2); // reshape back to (N,C,1) + auto node_pred_box_y1_clip2 = + std::make_shared(node_pred_box_y1_clip, squeeze_axes2); + auto node_pred_box_x2_res2 = + std::make_shared(node_pred_box_x2_res, squeeze_axes2); + auto node_pred_box_y2_res2 = + std::make_shared(node_pred_box_y2_res, squeeze_axes2); + + node_pred_box_result = + std::make_shared(OutputVector{node_pred_box_x1_clip2, + node_pred_box_y1_clip2, + node_pred_box_x2_res2, + node_pred_box_y2_res2}, + -1); // outputs=node.output('Boxes') + } + else + { + auto node_pred_box_x1_decode = std::make_shared( + node_pred_box_x1_squeeze, squeeze_axes2); // reshape back to (N,C,1) + auto node_pred_box_y1_decode = + std::make_shared(node_pred_box_y1_squeeze, squeeze_axes2); + auto node_pred_box_x2_decode = + std::make_shared(node_pred_box_x2_squeeze, squeeze_axes2); + auto node_pred_box_y2_decode = + std::make_shared(node_pred_box_y2_squeeze, squeeze_axes2); + + node_pred_box_result = + std::make_shared(OutputVector{node_pred_box_x1_decode, + node_pred_box_y1_decode, + node_pred_box_x2_decode, + node_pred_box_y2_decode}, + -1); // outputs=node.output('Boxes') + } + + // + auto node_score_new_shape = std::make_shared( + node_score, score_shape, false); // outputs=node.output('Scores') + + NamedOutputs outputs; + outputs["Boxes"] = {node_pred_box_result}; + outputs["Scores"] = {node_score_new_shape}; + return outputs; + } + + } // namespace op + } // namespace pdpd + } // namespace frontend +} // namespace ngraph diff --git a/ngraph/frontend/paddlepaddle/src/op_table.cpp b/ngraph/frontend/paddlepaddle/src/op_table.cpp index 5e9d4dc257c3e9..668dc3af942a38 100644 --- a/ngraph/frontend/paddlepaddle/src/op_table.cpp +++ b/ngraph/frontend/paddlepaddle/src/op_table.cpp @@ -16,7 +16,6 @@ namespace ngraph OP_CONVERTER(assign_value); OP_CONVERTER(batch_norm); OP_CONVERTER(bilinear_interp_v2); - OP_CONVERTER(matmul); OP_CONVERTER(cast); OP_CONVERTER(clip); OP_CONVERTER(concat); @@ -43,14 +42,28 @@ namespace ngraph OP_CONVERTER(log); OP_CONVERTER(logical_not); OP_CONVERTER(matmul); + OP_CONVERTER(mul); OP_CONVERTER(matrix_nms); OP_CONVERTER(multiclass_nms); OP_CONVERTER(nearest_interp_v2); - OP_CONVERTER(rnn); + OP_CONVERTER(pad3d); + OP_CONVERTER(pow); + OP_CONVERTER(pool2d); + OP_CONVERTER(range); OP_CONVERTER(relu); + OP_CONVERTER(relu6); + OP_CONVERTER(reshape2); + OP_CONVERTER(rnn); OP_CONVERTER(scale); + OP_CONVERTER(shape); + OP_CONVERTER(slice); + OP_CONVERTER(softmax); + OP_CONVERTER(sigmoid); OP_CONVERTER(split); + OP_CONVERTER(squeeze); OP_CONVERTER(transpose2); + OP_CONVERTER(unsqueeze); + OP_CONVERTER(yolo_box); } // namespace op } // namespace pdpd } // namespace frontend @@ -64,54 +77,68 @@ namespace ngraph { std::map get_supported_ops() { - return { - {"arg_max", op::argmax}, - {"assign_value", op::assign_value}, - {"batch_norm", op::batch_norm}, - {"bilinear_interp_v2", op::bilinear_interp_v2}, - {"bilinear_interp", op::bilinear_interp_v2}, - {"bmm", op::matmul}, - {"cast", op::cast}, - {"clip", op::clip}, - {"concat", op::concat}, - {"conv2d", op::conv2d}, - {"conv2d_transpose", op::conv2d_transpose}, - {"deformable_conv", op::deformable_conv}, - {"deformable_conv_v1", op::deformable_conv}, - {"depthwise_conv2d", op::conv2d}, - {"depthwise_conv2d_transpose", op::conv2d_transpose}, - {"dropout", op::dropout}, - {"elementwise_add", op::elementwise_add}, - {"elementwise_div", op::elementwise_div}, - {"elementwise_max", op::elementwise_max}, - {"elementwise_min", op::elementwise_min}, - {"elementwise_mul", op::elementwise_mul}, - {"elementwise_pow", op::elementwise_pow}, - {"elementwise_sub", op::elementwise_sub}, - {"equal", op::elementwise_equal}, - {"expand_v2", op::expand_v2}, - {"fill_constant_batch_size_like", op::fill_constant_batch_size_like}, - {"fill_constant", op::fill_constant}, - {"flatten_contiguous_range", op::flatten_contiguous_range}, - {"greater_equal", op::elementwise_greater_equal}, - {"hard_sigmoid", op::hard_sigmoid}, - {"hard_swish", op::hard_swish}, - {"leaky_relu", op::leaky_relu}, - {"log", op::log}, - {"logical_not", op::logical_not}, - {"matmul", op::matmul}, - {"matrix_nms", op::matrix_nms}, - {"multiclass_nms3", op::multiclass_nms}, - {"nearest_interp_v2", op::nearest_interp_v2}, - {"nearest_interp", op::nearest_interp_v2}, - {"rnn", op::rnn}, - {"relu", op::relu}, - {"scale", op::scale}, - {"split", op::split}, - {"transpose2", op::transpose2}, - }; + return {{"arg_max", op::argmax}, + {"assign_value", op::assign_value}, + {"batch_norm", op::batch_norm}, + {"bilinear_interp_v2", op::bilinear_interp_v2}, + {"bilinear_interp", op::bilinear_interp_v2}, + {"bmm", op::matmul}, + {"cast", op::cast}, + {"clip", op::clip}, + {"concat", op::concat}, + {"conv2d", op::conv2d}, + {"conv2d_transpose", op::conv2d_transpose}, + {"deformable_conv", op::deformable_conv}, + {"deformable_conv_v1", op::deformable_conv}, + {"depthwise_conv2d", op::conv2d}, + {"depthwise_conv2d_transpose", op::conv2d_transpose}, + {"dropout", op::dropout}, + {"elementwise_add", op::elementwise_add}, + {"elementwise_div", op::elementwise_div}, + {"elementwise_max", op::elementwise_max}, + {"elementwise_min", op::elementwise_min}, + {"elementwise_mul", op::elementwise_mul}, + {"elementwise_pow", op::elementwise_pow}, + {"elementwise_sub", op::elementwise_sub}, + {"equal", op::elementwise_equal}, + {"expand_v2", op::expand_v2}, + {"fill_constant_batch_size_like", op::fill_constant_batch_size_like}, + {"fill_constant", op::fill_constant}, + {"flatten_contiguous_range", op::flatten_contiguous_range}, + {"greater_equal", op::elementwise_greater_equal}, + {"hard_sigmoid", op::hard_sigmoid}, + {"hard_swish", op::hard_swish}, + {"leaky_relu", op::leaky_relu}, + {"log", op::log}, + {"logical_not", op::logical_not}, + {"matmul", op::matmul}, + {"max_pool2d_with_index", op::pool2d}, + {"mul", op::mul}, + {"matrix_nms", op::matrix_nms}, + {"multiclass_nms3", op::multiclass_nms}, + {"nearest_interp_v2", op::nearest_interp_v2}, + {"nearest_interp", op::nearest_interp_v2}, + {"pad3d", op::pad3d}, + {"pow", op::pow}, + {"pool2d", op::pool2d}, + {"range", op::range}, + {"relu", op::relu}, + {"relu6", op::relu6}, + {"reshape2", op::reshape2}, + {"rnn", op::rnn}, + {"scale", op::scale}, + {"shape", op::shape}, + {"slice", op::slice}, + {"softmax", op::softmax}, + {"sigmoid", op::sigmoid}, + {"split", op::split}, + {"squeeze2", op::squeeze}, + {"sync_batch_norm", op::batch_norm}, + {"transpose2", op::transpose2}, + {"unsqueeze2", op::unsqueeze}, + {"yolo_box", op::yolo_box}}; }; } // namespace pdpd } // namespace frontend -} // namespace ngraph +} // namespace ngraph \ No newline at end of file