From a4e9fcaf66c6e031b7ae53e28e60877d217c55fb Mon Sep 17 00:00:00 2001 From: Zhang Yi Date: Fri, 30 Jul 2021 18:53:39 +0800 Subject: [PATCH] [FrontEnd]enable PDPD 16 ops conversion Part2 (#6632) * [FrontEnd]enable 16 ops conversion * [FrontEnd]fix typo in interpolate * param support tensor (#3) * Update hard_sigmoid.cpp (#4) * Yi3/fix paddle part2 * fix paddle conversion * ops forward declaration * use tile instead of broadcast * Add validation check in dropout (#8) * fix conflict * fix code style * fix transpose2 * use perm size in transpose2 * remove check in transpose2 Co-authored-by: Luo Cheng Co-authored-by: Mang Guo --- .../frontend/paddlepaddle/src/op/argmax.cpp | 2 +- .../frontend/paddlepaddle/src/op/argmax.hpp | 20 -- .../paddlepaddle/src/op/assign_value.cpp | 2 +- .../paddlepaddle/src/op/assign_value.hpp | 21 -- .../paddlepaddle/src/op/batch_norm.cpp | 2 +- .../paddlepaddle/src/op/batch_norm.hpp | 20 -- ngraph/frontend/paddlepaddle/src/op/cast.cpp | 2 +- ngraph/frontend/paddlepaddle/src/op/cast.hpp | 20 -- ngraph/frontend/paddlepaddle/src/op/clip.cpp | 2 +- ngraph/frontend/paddlepaddle/src/op/clip.hpp | 20 -- .../frontend/paddlepaddle/src/op/concat.cpp | 2 +- .../frontend/paddlepaddle/src/op/concat.hpp | 20 -- .../frontend/paddlepaddle/src/op/conv2d.cpp | 1 - .../frontend/paddlepaddle/src/op/conv2d.hpp | 21 -- .../paddlepaddle/src/op/conv2d_transpose.cpp | 26 ++ .../frontend/paddlepaddle/src/op/dropout.cpp | 45 ++++ .../paddlepaddle/src/op/elementwise_ops.cpp | 12 +- .../paddlepaddle/src/op/elementwise_ops.hpp | 26 -- .../paddlepaddle/src/op/expand_v2.cpp | 70 ++++++ .../paddlepaddle/src/op/fill_constant.cpp | 73 ++++++ .../src/op/fill_constant_batch_size_like.cpp | 127 ++++++++++ .../src/op/flatten_contiguous_range.cpp | 57 +++++ .../paddlepaddle/src/op/hard_sigmoid.cpp | 33 +++ .../paddlepaddle/src/op/hard_swish.cpp | 45 ++++ .../frontend/paddlepaddle/src/op/interp.cpp | 182 ++++++++++++++ .../paddlepaddle/src/op/leakyrelu.cpp | 27 ++ ngraph/frontend/paddlepaddle/src/op/log.cpp | 26 ++ .../paddlepaddle/src/op/logical_not.cpp | 25 ++ ngraph/frontend/paddlepaddle/src/op/lstm.cpp | 237 ++++++++++++++++++ .../frontend/paddlepaddle/src/op/matmul.cpp | 40 +++ ngraph/frontend/paddlepaddle/src/op/relu.cpp | 2 +- ngraph/frontend/paddlepaddle/src/op/relu.hpp | 21 -- ngraph/frontend/paddlepaddle/src/op/rnn.cpp | 31 +++ ngraph/frontend/paddlepaddle/src/op/scale.cpp | 2 +- ngraph/frontend/paddlepaddle/src/op/scale.hpp | 20 -- ngraph/frontend/paddlepaddle/src/op/split.cpp | 4 +- ngraph/frontend/paddlepaddle/src/op/split.hpp | 20 -- .../paddlepaddle/src/op/transpose2.cpp | 29 +++ ngraph/frontend/paddlepaddle/src/op_table.cpp | 120 ++++++--- 39 files changed, 1186 insertions(+), 269 deletions(-) delete mode 100644 ngraph/frontend/paddlepaddle/src/op/argmax.hpp delete mode 100644 ngraph/frontend/paddlepaddle/src/op/assign_value.hpp delete mode 100644 ngraph/frontend/paddlepaddle/src/op/batch_norm.hpp delete mode 100644 ngraph/frontend/paddlepaddle/src/op/cast.hpp delete mode 100644 ngraph/frontend/paddlepaddle/src/op/clip.hpp delete mode 100644 ngraph/frontend/paddlepaddle/src/op/concat.hpp delete mode 100644 ngraph/frontend/paddlepaddle/src/op/conv2d.hpp create mode 100644 ngraph/frontend/paddlepaddle/src/op/conv2d_transpose.cpp create mode 100644 ngraph/frontend/paddlepaddle/src/op/dropout.cpp delete mode 100644 ngraph/frontend/paddlepaddle/src/op/elementwise_ops.hpp create mode 100644 ngraph/frontend/paddlepaddle/src/op/expand_v2.cpp create mode 100644 ngraph/frontend/paddlepaddle/src/op/fill_constant.cpp create mode 100644 ngraph/frontend/paddlepaddle/src/op/fill_constant_batch_size_like.cpp create mode 100644 ngraph/frontend/paddlepaddle/src/op/flatten_contiguous_range.cpp create mode 100644 ngraph/frontend/paddlepaddle/src/op/hard_sigmoid.cpp create mode 100644 ngraph/frontend/paddlepaddle/src/op/hard_swish.cpp create mode 100644 ngraph/frontend/paddlepaddle/src/op/interp.cpp create mode 100644 ngraph/frontend/paddlepaddle/src/op/leakyrelu.cpp create mode 100644 ngraph/frontend/paddlepaddle/src/op/log.cpp create mode 100644 ngraph/frontend/paddlepaddle/src/op/logical_not.cpp create mode 100644 ngraph/frontend/paddlepaddle/src/op/lstm.cpp create mode 100644 ngraph/frontend/paddlepaddle/src/op/matmul.cpp delete mode 100644 ngraph/frontend/paddlepaddle/src/op/relu.hpp create mode 100644 ngraph/frontend/paddlepaddle/src/op/rnn.cpp delete mode 100644 ngraph/frontend/paddlepaddle/src/op/scale.hpp delete mode 100644 ngraph/frontend/paddlepaddle/src/op/split.hpp create mode 100644 ngraph/frontend/paddlepaddle/src/op/transpose2.cpp diff --git a/ngraph/frontend/paddlepaddle/src/op/argmax.cpp b/ngraph/frontend/paddlepaddle/src/op/argmax.cpp index 7d8c069031d07f..ac63500d59bb8d 100644 --- a/ngraph/frontend/paddlepaddle/src/op/argmax.cpp +++ b/ngraph/frontend/paddlepaddle/src/op/argmax.cpp @@ -2,8 +2,8 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "argmax.hpp" #include +#include namespace ngraph { diff --git a/ngraph/frontend/paddlepaddle/src/op/argmax.hpp b/ngraph/frontend/paddlepaddle/src/op/argmax.hpp deleted file mode 100644 index 20d9db406be0cf..00000000000000 --- a/ngraph/frontend/paddlepaddle/src/op/argmax.hpp +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once -#include "node_context.hpp" - -namespace ngraph -{ - namespace frontend - { - namespace pdpd - { - namespace op - { - NamedOutputs argmax(const NodeContext& node); - } - } // namespace pdpd - } // namespace frontend -} // namespace ngraph \ No newline at end of file diff --git a/ngraph/frontend/paddlepaddle/src/op/assign_value.cpp b/ngraph/frontend/paddlepaddle/src/op/assign_value.cpp index fb503abbba80e8..490acde7155401 100644 --- a/ngraph/frontend/paddlepaddle/src/op/assign_value.cpp +++ b/ngraph/frontend/paddlepaddle/src/op/assign_value.cpp @@ -2,8 +2,8 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "assign_value.hpp" #include +#include namespace ngraph { namespace frontend diff --git a/ngraph/frontend/paddlepaddle/src/op/assign_value.hpp b/ngraph/frontend/paddlepaddle/src/op/assign_value.hpp deleted file mode 100644 index b954b3a04cce50..00000000000000 --- a/ngraph/frontend/paddlepaddle/src/op/assign_value.hpp +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "node_context.hpp" - -namespace ngraph -{ - namespace frontend - { - namespace pdpd - { - namespace op - { - NamedOutputs assign_value(const NodeContext& node); - } - } // namespace pdpd - } // namespace frontend -} // namespace ngraph diff --git a/ngraph/frontend/paddlepaddle/src/op/batch_norm.cpp b/ngraph/frontend/paddlepaddle/src/op/batch_norm.cpp index c38c4189fa04a0..54bcc85dcfacbf 100644 --- a/ngraph/frontend/paddlepaddle/src/op/batch_norm.cpp +++ b/ngraph/frontend/paddlepaddle/src/op/batch_norm.cpp @@ -2,8 +2,8 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "batch_norm.hpp" #include +#include namespace ngraph { diff --git a/ngraph/frontend/paddlepaddle/src/op/batch_norm.hpp b/ngraph/frontend/paddlepaddle/src/op/batch_norm.hpp deleted file mode 100644 index 3757421bba65f5..00000000000000 --- a/ngraph/frontend/paddlepaddle/src/op/batch_norm.hpp +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once -#include "node_context.hpp" - -namespace ngraph -{ - namespace frontend - { - namespace pdpd - { - namespace op - { - NamedOutputs batch_norm(const NodeContext& node); - } - } // namespace pdpd - } // namespace frontend -} // namespace ngraph \ No newline at end of file diff --git a/ngraph/frontend/paddlepaddle/src/op/cast.cpp b/ngraph/frontend/paddlepaddle/src/op/cast.cpp index 2cb181f0b24158..db1161f94e1b93 100644 --- a/ngraph/frontend/paddlepaddle/src/op/cast.cpp +++ b/ngraph/frontend/paddlepaddle/src/op/cast.cpp @@ -2,8 +2,8 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "cast.hpp" #include +#include namespace ngraph { diff --git a/ngraph/frontend/paddlepaddle/src/op/cast.hpp b/ngraph/frontend/paddlepaddle/src/op/cast.hpp deleted file mode 100644 index 1e3a19aaf5975c..00000000000000 --- a/ngraph/frontend/paddlepaddle/src/op/cast.hpp +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once -#include "node_context.hpp" - -namespace ngraph -{ - namespace frontend - { - namespace pdpd - { - namespace op - { - NamedOutputs cast(const NodeContext& node); - } - } // namespace pdpd - } // namespace frontend -} // namespace ngraph \ No newline at end of file diff --git a/ngraph/frontend/paddlepaddle/src/op/clip.cpp b/ngraph/frontend/paddlepaddle/src/op/clip.cpp index 1909e392eaf2f8..61996414b211c1 100644 --- a/ngraph/frontend/paddlepaddle/src/op/clip.cpp +++ b/ngraph/frontend/paddlepaddle/src/op/clip.cpp @@ -2,8 +2,8 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "clip.hpp" #include +#include namespace ngraph { diff --git a/ngraph/frontend/paddlepaddle/src/op/clip.hpp b/ngraph/frontend/paddlepaddle/src/op/clip.hpp deleted file mode 100644 index babfa2ccd95bfd..00000000000000 --- a/ngraph/frontend/paddlepaddle/src/op/clip.hpp +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once -#include "node_context.hpp" - -namespace ngraph -{ - namespace frontend - { - namespace pdpd - { - namespace op - { - NamedOutputs clip(const NodeContext& node); - } - } // namespace pdpd - } // namespace frontend -} // namespace ngraph \ No newline at end of file diff --git a/ngraph/frontend/paddlepaddle/src/op/concat.cpp b/ngraph/frontend/paddlepaddle/src/op/concat.cpp index a9c6fa6388d848..76fe9557db006e 100644 --- a/ngraph/frontend/paddlepaddle/src/op/concat.cpp +++ b/ngraph/frontend/paddlepaddle/src/op/concat.cpp @@ -2,8 +2,8 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "concat.hpp" #include +#include namespace ngraph { diff --git a/ngraph/frontend/paddlepaddle/src/op/concat.hpp b/ngraph/frontend/paddlepaddle/src/op/concat.hpp deleted file mode 100644 index 0d32fa22f6e3bd..00000000000000 --- a/ngraph/frontend/paddlepaddle/src/op/concat.hpp +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once -#include "node_context.hpp" - -namespace ngraph -{ - namespace frontend - { - namespace pdpd - { - namespace op - { - NamedOutputs concat(const NodeContext& node); - } - } // namespace pdpd - } // namespace frontend -} // namespace ngraph \ No newline at end of file diff --git a/ngraph/frontend/paddlepaddle/src/op/conv2d.cpp b/ngraph/frontend/paddlepaddle/src/op/conv2d.cpp index 294e08134f1c27..425022b0209b5e 100644 --- a/ngraph/frontend/paddlepaddle/src/op/conv2d.cpp +++ b/ngraph/frontend/paddlepaddle/src/op/conv2d.cpp @@ -2,7 +2,6 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "conv2d.hpp" #include #include "conv2d_utils.hpp" diff --git a/ngraph/frontend/paddlepaddle/src/op/conv2d.hpp b/ngraph/frontend/paddlepaddle/src/op/conv2d.hpp deleted file mode 100644 index a2368afab9e4dc..00000000000000 --- a/ngraph/frontend/paddlepaddle/src/op/conv2d.hpp +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright (C) 2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once -#include "node_context.hpp" - -namespace ngraph -{ - namespace frontend - { - namespace pdpd - { - namespace op - { - NamedOutputs conv2d(const NodeContext& node_context); - - } - } // namespace pdpd - } // namespace frontend -} // namespace ngraph diff --git a/ngraph/frontend/paddlepaddle/src/op/conv2d_transpose.cpp b/ngraph/frontend/paddlepaddle/src/op/conv2d_transpose.cpp new file mode 100644 index 00000000000000..b30c56652d4328 --- /dev/null +++ b/ngraph/frontend/paddlepaddle/src/op/conv2d_transpose.cpp @@ -0,0 +1,26 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include +#include +#include "conv2d_utils.hpp" + +namespace ngraph +{ + namespace frontend + { + namespace pdpd + { + namespace op + { + NamedOutputs conv2d_transpose(const NodeContext& node) + { + return conv2d_base(node); + } + + } // namespace op + } // namespace pdpd + } // namespace frontend +} // namespace ngraph diff --git a/ngraph/frontend/paddlepaddle/src/op/dropout.cpp b/ngraph/frontend/paddlepaddle/src/op/dropout.cpp new file mode 100644 index 00000000000000..b52b4eeb4c0295 --- /dev/null +++ b/ngraph/frontend/paddlepaddle/src/op/dropout.cpp @@ -0,0 +1,45 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include +#include + +namespace ngraph +{ + namespace frontend + { + namespace pdpd + { + namespace op + { + NamedOutputs dropout(const NodeContext& node) + { + auto data = node.get_ng_input("X"); + auto dropout_implementation = + node.get_attribute("dropout_implementation"); + PDPD_OP_VALIDATION_CHECK(node, + (dropout_implementation == "downgrade_in_infer" || + dropout_implementation == "upscale_in_train"), + "Unsupported dropout mode!"); + if (dropout_implementation == "downgrade_in_infer") + { + auto dropout_prob = ngraph::opset6::Constant::create( + ngraph::element::f32, + {1}, + {1 - node.get_attribute("dropout_prob")}); + return node.default_single_output_mapping( + {std::make_shared(data, dropout_prob)}, + {"Out"}); + } + else + { + return node.default_single_output_mapping(data.get_node_shared_ptr(), + {"Out"}); + } + } + + } // namespace op + } // namespace pdpd + } // namespace frontend +} // namespace ngraph \ No newline at end of file diff --git a/ngraph/frontend/paddlepaddle/src/op/elementwise_ops.cpp b/ngraph/frontend/paddlepaddle/src/op/elementwise_ops.cpp index 9a0ef491fdaa0f..a736d238a56848 100644 --- a/ngraph/frontend/paddlepaddle/src/op/elementwise_ops.cpp +++ b/ngraph/frontend/paddlepaddle/src/op/elementwise_ops.cpp @@ -5,7 +5,7 @@ #include #include -#include "elementwise_ops.hpp" +#include namespace ngraph { @@ -96,6 +96,16 @@ namespace ngraph return elementwise_ops(node_context); } + NamedOutputs elementwise_equal(const NodeContext& node_context) + { + return elementwise_ops(node_context); + } + + NamedOutputs elementwise_greater_equal(const NodeContext& node_context) + { + return elementwise_ops(node_context); + } + } // namespace op } // namespace pdpd } // namespace frontend diff --git a/ngraph/frontend/paddlepaddle/src/op/elementwise_ops.hpp b/ngraph/frontend/paddlepaddle/src/op/elementwise_ops.hpp deleted file mode 100644 index 981dc927421df4..00000000000000 --- a/ngraph/frontend/paddlepaddle/src/op/elementwise_ops.hpp +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once -#include "node_context.hpp" - -namespace ngraph -{ - namespace frontend - { - namespace pdpd - { - namespace op - { - NamedOutputs elementwise_add(const NodeContext& node_context); - NamedOutputs elementwise_sub(const NodeContext& node_context); - NamedOutputs elementwise_mul(const NodeContext& node_context); - NamedOutputs elementwise_div(const NodeContext& node_context); - NamedOutputs elementwise_min(const NodeContext& node_context); - NamedOutputs elementwise_max(const NodeContext& node_context); - NamedOutputs elementwise_pow(const NodeContext& node_context); - } // namespace op - } // namespace pdpd - } // namespace frontend -} // namespace ngraph diff --git a/ngraph/frontend/paddlepaddle/src/op/expand_v2.cpp b/ngraph/frontend/paddlepaddle/src/op/expand_v2.cpp new file mode 100644 index 00000000000000..bc2bc1d233e59d --- /dev/null +++ b/ngraph/frontend/paddlepaddle/src/op/expand_v2.cpp @@ -0,0 +1,70 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include +#include +#include + +namespace ngraph +{ + namespace frontend + { + namespace pdpd + { + namespace op + { + NamedOutputs expand_v2(const NodeContext& node) + { + auto x = node.get_ng_input("X"); + Output shape_expected_node; + if (node.has_ng_input("Shape")) + { + shape_expected_node = node.get_ng_input("Shape"); + } + else if (node.has_ng_input("expand_shapes_tensor")) + { + auto inputs = node.get_ng_inputs("expand_shapes_tensor"); + ngraph::NodeVector node_vec; + for (auto& input : inputs) + { + auto cast = + std::make_shared(input, element::i32); + node_vec.push_back(cast); + } + shape_expected_node = std::make_shared(node_vec, 0); + } + else + { + std::vector shape_expected; + if (node.has_attribute>("shape")) + { + shape_expected = node.get_attribute>("shape"); + } + else + { + throw std::runtime_error("expand: has no shape attribute"); + } + shape_expected_node = ngraph::opset6::Constant::create( + ngraph::element::i32, {shape_expected.size()}, shape_expected); + } + // if -1 in shape we will copy the orginal value from input + auto zero_node = + ngraph::opset6::Constant::create(ngraph::element::i32, {1}, {0}); + auto mask_node = + std::make_shared(shape_expected_node, zero_node); + auto input_shape_node = + std::make_shared(x, element::i32); + auto fixed_shape_node = std::make_shared( + mask_node, shape_expected_node, input_shape_node); + auto repeated_node = std::make_shared( + fixed_shape_node, input_shape_node, false); + + return node.default_single_output_mapping( + {std::make_shared(x, repeated_node)}, {"Out"}); + } + + } // namespace op + } // namespace pdpd + } // namespace frontend +} // namespace ngraph \ No newline at end of file diff --git a/ngraph/frontend/paddlepaddle/src/op/fill_constant.cpp b/ngraph/frontend/paddlepaddle/src/op/fill_constant.cpp new file mode 100644 index 00000000000000..426dbf316420b1 --- /dev/null +++ b/ngraph/frontend/paddlepaddle/src/op/fill_constant.cpp @@ -0,0 +1,73 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include +#include + +namespace ngraph +{ + namespace frontend + { + namespace pdpd + { + namespace op + { + NamedOutputs fill_constant(const NodeContext& node) + { + auto shape = node.get_attribute>("shape"); + auto dtype = node.get_attribute("dtype"); + Output value_node; + Output shape_node; + if (node.has_ng_input("ValueTensor")) + { + value_node = node.get_ng_input("ValueTensor"); + } + else if (dtype == element::i32) + { + int32_t value = static_cast(node.get_attribute("value")); + value_node = opset6::Constant::create(dtype, {1}, {value}); + } + else if (dtype == element::f32) + { + float value = node.get_attribute("value"); + value_node = opset6::Constant::create(dtype, {1}, {value}); + } + else if (dtype == element::i64) + { + int64_t value = static_cast(node.get_attribute("value")); + value_node = opset6::Constant::create(dtype, {1}, {value}); + } + else + { + PDPD_ASSERT(false, "fill_constant only supports i32, f32, i64"); + } + + PDPD_ASSERT(shape.size() > 0 || node.has_ng_input("ShapeTensor") || + node.has_ng_input("ShapeTensorList"), + "fill_constant shape not set"); + + if (node.has_ng_input("ShapeTensor")) + { + shape_node = node.get_ng_input("ShapeTensor"); + } + else if (node.has_ng_input("ShapeTensorList")) + { + auto shape_tensor_list = node.get_ng_inputs("ShapeTensorList"); + shape_node = + Output{std::make_shared(shape_tensor_list, 0)}; + } + else + { + shape_node = opset6::Constant::create(element::i64, {shape.size()}, shape); + } + + return node.default_single_output_mapping( + {std::make_shared(value_node, shape_node)}, + {"Out"}); + } + + } // namespace op + } // namespace pdpd + } // namespace frontend +} // namespace ngraph diff --git a/ngraph/frontend/paddlepaddle/src/op/fill_constant_batch_size_like.cpp b/ngraph/frontend/paddlepaddle/src/op/fill_constant_batch_size_like.cpp new file mode 100644 index 00000000000000..954168ce9f9718 --- /dev/null +++ b/ngraph/frontend/paddlepaddle/src/op/fill_constant_batch_size_like.cpp @@ -0,0 +1,127 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include +#include +#include +#include + +namespace ngraph +{ + namespace frontend + { + namespace pdpd + { + namespace op + { + static std::shared_ptr get_val(int32_t idx, const Output& data) + { + auto startsNode = ngraph::opset6::Constant::create(element::i32, {1}, {idx}); + auto endsNode = ngraph::opset6::Constant::create(element::i32, {1}, {idx + 1}); + auto stridesNode = ngraph::opset6::Constant::create(element::i32, {1}, {1}); + return std::make_shared( + data, + startsNode, + endsNode, + stridesNode, + std::vector(1, 0), + std::vector(1, 0)); + } + + static std::shared_ptr set_val(int32_t idx, + std::shared_ptr val_node, + std::shared_ptr array_node) + { + NodeVector nodes; + if (idx > 0) + { + // [0, idx) + auto startsNode = ngraph::opset6::Constant::create(element::i32, {1}, {0}); + auto endsNode = ngraph::opset6::Constant::create(element::i32, {1}, {idx}); + auto stridesNode = ngraph::opset6::Constant::create(element::i32, {1}, {1}); + auto head = std::make_shared( + array_node, + startsNode, + endsNode, + stridesNode, + std::vector(1, 0), + std::vector(1, 0)); + nodes.push_back(head); + } + nodes.push_back(val_node); + // [idx + 1, max) + auto startsNode = + ngraph::opset6::Constant::create(element::i32, {1}, {idx + 1}); + auto endsNode = ngraph::opset6::Constant::create(element::i32, {1}, {INT_MAX}); + auto stridesNode = ngraph::opset6::Constant::create(element::i32, {1}, {1}); + auto tail = + std::make_shared(array_node, + startsNode, + endsNode, + stridesNode, + std::vector(1, 0), + std::vector(1, 0)); + nodes.push_back(tail); + + return std::make_shared(nodes, 0); + } + + static Output get_seed_node(const NodeContext& node) + { + auto dtype = node.get_attribute("dtype"); + Output val_node; + auto str_value = node.get_attribute("str_value"); + switch (dtype) + { + case element::i32: + val_node = + ngraph::opset6::Constant::create(dtype, {1}, {std::stoi(str_value)}); + break; + case element::i64: + val_node = + ngraph::opset6::Constant::create(dtype, {1}, {std::stoll(str_value)}); + break; + case element::f32: + val_node = + ngraph::opset6::Constant::create(dtype, {1}, {std::stof(str_value)}); + break; + case element::f64: + val_node = + ngraph::opset6::Constant::create(dtype, {1}, {std::stod(str_value)}); + break; + default: + throw std::runtime_error( + "fill_constant_batch_size_like: dtype value is invalid"); + } + + return val_node; + } + + NamedOutputs fill_constant_batch_size_like(const NodeContext& node) + { + auto input_dim_idx = node.get_attribute("input_dim_idx"); + auto output_dim_idx = node.get_attribute("output_dim_idx"); + auto shapes = node.get_attribute>("shape"); + auto input = node.get_ng_input("Input"); + auto input_shape = + std::make_shared(input, element::i32); + // 1, cat the array: + // shape[0, shape[output_dim_idx]) + input_shape[input_dim_idx] + + // shape[shape[output_dim_idx + 1], -1] + auto input_val_node = get_val(input_dim_idx, input_shape); + auto shapes_node = ngraph::opset6::Constant::create( + ngraph::element::i32, {shapes.size()}, shapes); + auto shape_node = set_val(output_dim_idx, input_val_node, shapes_node); + + // 2, use the shape broadcast the node + auto val_node = get_seed_node(node); + return node.default_single_output_mapping( + {std::make_shared(val_node, shape_node)}, + {"Out"}); + } + + } // namespace op + } // namespace pdpd + } // namespace frontend +} // namespace ngraph \ No newline at end of file diff --git a/ngraph/frontend/paddlepaddle/src/op/flatten_contiguous_range.cpp b/ngraph/frontend/paddlepaddle/src/op/flatten_contiguous_range.cpp new file mode 100644 index 00000000000000..e5702fa178526f --- /dev/null +++ b/ngraph/frontend/paddlepaddle/src/op/flatten_contiguous_range.cpp @@ -0,0 +1,57 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include +#include +#include + +namespace ngraph +{ + namespace frontend + { + namespace pdpd + { + namespace op + { + NamedOutputs flatten_contiguous_range(const NodeContext& node) + { + auto x_node = node.get_ng_input("X"); + auto shape_of_x = std::make_shared(x_node); + int dims = x_node.get_partial_shape().rank().get_length(); + auto start_axis = node.get_attribute("start_axis"); + auto stop_axis = node.get_attribute("stop_axis"); + + auto axis1_begin = opset6::Constant::create(element::i64, {1}, {0}); + auto axis1_end = opset6::Constant::create(element::i64, {1}, {start_axis}); + auto axis1 = std::make_shared(shape_of_x, + axis1_begin, + axis1_end, + std::vector{0}, + std::vector{0}); + OutputVector axes{axis1, + opset6::Constant::create(element::i64, Shape{1}, {-1.0})}; + + if (stop_axis < dims - 1) + { + auto axis2_begin = + opset6::Constant::create(element::i64, {1}, {stop_axis + 1}); + auto axis2_end = opset6::Constant::create(element::i64, {1}, {dims}); + auto axis2_node = + std::make_shared(shape_of_x, + axis2_begin, + axis2_end, + std::vector{0}, + std::vector{0}); + axes.push_back(axis2_node); + } + + auto new_shape_node = std::make_shared(axes, 0); + return node.default_single_output_mapping( + {std::make_shared(x_node, new_shape_node, false)}, + {"Out"}); + } + } // namespace op + } // namespace pdpd + } // namespace frontend +} // namespace ngraph diff --git a/ngraph/frontend/paddlepaddle/src/op/hard_sigmoid.cpp b/ngraph/frontend/paddlepaddle/src/op/hard_sigmoid.cpp new file mode 100644 index 00000000000000..cd52f83eda4156 --- /dev/null +++ b/ngraph/frontend/paddlepaddle/src/op/hard_sigmoid.cpp @@ -0,0 +1,33 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include +#include +#include + +namespace ngraph +{ + namespace frontend + { + namespace pdpd + { + namespace op + { + NamedOutputs hard_sigmoid(const NodeContext& node) + { + auto data = node.get_ng_input("X"); + auto dtype = data.get_element_type(); + float slope = node.get_attribute("slope", 0.2f); + float offset = node.get_attribute("offset", 0.5f); + auto alpha = ngraph::opset6::Constant::create(dtype, Shape{}, {slope}); + auto beta = ngraph::opset6::Constant::create(dtype, Shape{}, {offset}); + return node.default_single_output_mapping( + {std::make_shared(data, alpha, beta)}, + {"Out"}); + } + + } // namespace op + } // namespace pdpd + } // namespace frontend +} // namespace ngraph diff --git a/ngraph/frontend/paddlepaddle/src/op/hard_swish.cpp b/ngraph/frontend/paddlepaddle/src/op/hard_swish.cpp new file mode 100644 index 00000000000000..b981dfbe402813 --- /dev/null +++ b/ngraph/frontend/paddlepaddle/src/op/hard_swish.cpp @@ -0,0 +1,45 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include +#include +#include + +namespace ngraph +{ + namespace frontend + { + namespace pdpd + { + namespace op + { + NamedOutputs hard_swish(const NodeContext& node) + { + auto data = node.get_ng_input("X"); + if (node.has_attribute("threshold")) + { + auto threshold = node.get_attribute("threshold"); + PDPD_ASSERT(std::abs(threshold - 6.0) < 0.001, + "hard_swish: Only threshold = 6.0 is currently supported"); + } + if (node.has_attribute("scale")) + { + auto scale = node.get_attribute("scale"); + PDPD_ASSERT(std::abs(scale - 6.0) < 0.001, + "hard_swish: Only scale = 6.0 is currently supported"); + } + if (node.has_attribute("offset")) + { + auto offset = node.get_attribute("offset"); + PDPD_ASSERT(std::abs(offset - 3.0) < 0.001, + "hard_swish: Only offset = 3.0 is currently supported"); + } + return node.default_single_output_mapping( + {std::make_shared(data)}, {"Out"}); + } + + } // namespace op + } // namespace pdpd + } // namespace frontend +} // namespace ngraph \ No newline at end of file diff --git a/ngraph/frontend/paddlepaddle/src/op/interp.cpp b/ngraph/frontend/paddlepaddle/src/op/interp.cpp new file mode 100644 index 00000000000000..6c0f3b9c6804df --- /dev/null +++ b/ngraph/frontend/paddlepaddle/src/op/interp.cpp @@ -0,0 +1,182 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include +#include + +namespace ngraph +{ + namespace frontend + { + namespace pdpd + { + namespace op + { + std::shared_ptr + calculate_output_shape_based_on_scales(const Output& data, + const std::vector& scale, + Output& scales) + { + FRONT_END_GENERAL_CHECK(scale.size() > 0); + if (scale.size() == 1) + scales = opset6::Constant::create( + element::f32, Shape{4}, {1, 1, scale[0], scale[0]}); + else if (scale.size() == 2) + scales = opset6::Constant::create( + element::f32, Shape{4}, {1, 1, scale[0], scale[1]}); + else if (scale.size() == 3) + scales = opset6::Constant::create( + element::f32, Shape{4}, {1, scale[0], scale[1], scale[2]}); + else + scales = opset6::Constant::create( + element::f32, + Shape{scale.size()}, + std::vector(scale.begin(), scale.end())); + const auto shape_of_data = std::make_shared( + std::make_shared(data), scales.get_element_type()); + const auto multiply = std::make_shared(shape_of_data, scales); + const auto output_shape = + std::make_shared(multiply, ngraph::element::i64); + + return output_shape; + } + + std::shared_ptr + calculate_scales_based_on_sizes(const Output& data, + const Output& sizes) + { + const float epsilon = 1.0e-5; + const auto shape_of_data = std::make_shared( + std::make_shared(data), ngraph::element::f32); + const auto converted_sizes = + std::make_shared(sizes, ngraph::element::f32); + const auto divide = + std::make_shared(converted_sizes, shape_of_data); + const auto eps_node = + std::make_shared(ngraph::element::f32, Shape{}, epsilon); + const auto scales = std::make_shared(divide, eps_node); + + return scales; + } + + std::shared_ptr + extract_out_sizes(const Output& data, + const std::vector& out_sizes) + { + const auto shape_of_x = std::make_shared(data); + auto shape_begin = opset6::Constant::create(element::i64, {1}, {0}); + auto shape_end = opset6::Constant::create(element::i64, Shape{1}, {2}); + auto nc_node = std::make_shared(shape_of_x, + shape_begin, + shape_end, + std::vector{0}, + std::vector{0}); + auto hw_node = + opset6::Constant::create(element::i64, Shape{2}, out_sizes); + return std::make_shared(OutputVector{nc_node, hw_node}, 0); + } + + // TODO support different data_layout #55170 + + NamedOutputs interpolate(const NodeContext& node, + const ngraph::opset6::Interpolate::InterpolateMode& mode) + { + auto x = node.get_ng_input("X"); + using InterpolateMode = ngraph::opset6::Interpolate::InterpolateMode; + using CoordinateTransformMode = + ngraph::opset6::Interpolate::CoordinateTransformMode; + using Nearest_mode = ngraph::opset6::Interpolate::NearestMode; + using InterpolateAttrs = ngraph::opset6::Interpolate::InterpolateAttrs; + using ShapeCalcMode = ngraph::opset6::Interpolate::ShapeCalcMode; + + InterpolateAttrs attrs; + + attrs.mode = mode; + + auto out_w = node.get_attribute("out_w"); + auto out_h = node.get_attribute("out_h"); + auto scale = node.get_attribute>("scale"); + Output scales; + Output target_spatial_shape; + + if (node.has_ng_input("OutSize")) + { + attrs.shape_calculation_mode = ShapeCalcMode::sizes; + auto hw_shape = node.get_ng_input("OutSize"); + const auto shape_of_x = std::make_shared(x); + auto shape_begin = opset6::Constant::create(element::i64, {1}, {0}); + auto shape_end = opset6::Constant::create(element::i64, Shape{1}, {2}); + auto nc_node = + std::make_shared(shape_of_x, + shape_begin, + shape_end, + std::vector{0}, + std::vector{0}); + target_spatial_shape = std::make_shared( + OutputVector{nc_node, + std::make_shared(hw_shape, element::i64)}, + 0); + scales = calculate_scales_based_on_sizes(x, target_spatial_shape); + } + else if (out_w <= 0 || out_h <= 0) + { + attrs.shape_calculation_mode = ShapeCalcMode::scales; + target_spatial_shape = + calculate_output_shape_based_on_scales(x, scale, scales); + } + else + { + attrs.shape_calculation_mode = ShapeCalcMode::sizes; + target_spatial_shape = extract_out_sizes(x, {out_h, out_w}); + scales = calculate_scales_based_on_sizes(x, target_spatial_shape); + } + + bool align_corners = node.get_attribute("align_corners"); + int32_t align_mode = node.get_attribute("align_mode"); + + if (mode == InterpolateMode::nearest) + { + attrs.coordinate_transformation_mode = CoordinateTransformMode::asymmetric; + } + else if (!align_corners && align_mode == 1) + { + attrs.coordinate_transformation_mode = CoordinateTransformMode::asymmetric; + } + else if (!align_corners && align_mode == 0) + { + attrs.coordinate_transformation_mode = CoordinateTransformMode::half_pixel; + } + else if (align_corners) + { + attrs.coordinate_transformation_mode = + CoordinateTransformMode::align_corners; + } + + attrs.nearest_mode = Nearest_mode::simple; + attrs.antialias = false; + attrs.pads_begin = {0, 0, 0, 0}; + attrs.pads_end = {0, 0, 0, 0}; + + return node.default_single_output_mapping( + {std::make_shared( + x, target_spatial_shape, scales, attrs)}, + {"Out"}); + } + + NamedOutputs bilinear_interp_v2(const NodeContext& node) + { + auto mode = ngraph::opset6::Interpolate::InterpolateMode::linear_onnx; + return interpolate(node, mode); + } + + NamedOutputs nearest_interp_v2(const NodeContext& node) + { + auto mode = ngraph::opset6::Interpolate::InterpolateMode::nearest; + return interpolate(node, mode); + } + + } // namespace op + } // namespace pdpd + } // namespace frontend +} // namespace ngraph \ No newline at end of file diff --git a/ngraph/frontend/paddlepaddle/src/op/leakyrelu.cpp b/ngraph/frontend/paddlepaddle/src/op/leakyrelu.cpp new file mode 100644 index 00000000000000..6d9daa35bd516e --- /dev/null +++ b/ngraph/frontend/paddlepaddle/src/op/leakyrelu.cpp @@ -0,0 +1,27 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include +#include + +namespace ngraph +{ + namespace frontend + { + namespace pdpd + { + namespace op + { + NamedOutputs leaky_relu(const NodeContext& node) + { + auto data = node.get_ng_input("X"); + auto alpha = ngraph::opset6::Constant::create( + ngraph::element::f32, {1}, {node.get_attribute("alpha")}); + return node.default_single_output_mapping( + {std::make_shared(data, alpha)}, {"Out"}); + } + } // namespace op + } // namespace pdpd + } // namespace frontend +} // namespace ngraph \ No newline at end of file diff --git a/ngraph/frontend/paddlepaddle/src/op/log.cpp b/ngraph/frontend/paddlepaddle/src/op/log.cpp new file mode 100644 index 00000000000000..69dd65586e4037 --- /dev/null +++ b/ngraph/frontend/paddlepaddle/src/op/log.cpp @@ -0,0 +1,26 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include +#include +#include + +namespace ngraph +{ + namespace frontend + { + namespace pdpd + { + namespace op + { + NamedOutputs log(const NodeContext& node) + { + auto x = node.get_ng_input("X"); + return node.default_single_output_mapping( + {std::make_shared(x)}, {"Out"}); + } + } // namespace op + } // namespace pdpd + } // namespace frontend +} // namespace ngraph \ No newline at end of file diff --git a/ngraph/frontend/paddlepaddle/src/op/logical_not.cpp b/ngraph/frontend/paddlepaddle/src/op/logical_not.cpp new file mode 100644 index 00000000000000..8a07fd3bc84c55 --- /dev/null +++ b/ngraph/frontend/paddlepaddle/src/op/logical_not.cpp @@ -0,0 +1,25 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include +#include + +namespace ngraph +{ + namespace frontend + { + namespace pdpd + { + namespace op + { + NamedOutputs logical_not(const NodeContext& node) + { + auto data = node.get_ng_input("X"); + return node.default_single_output_mapping( + {std::make_shared(data)}, {"Out"}); + } + } // namespace op + } // namespace pdpd + } // namespace frontend +} // namespace ngraph diff --git a/ngraph/frontend/paddlepaddle/src/op/lstm.cpp b/ngraph/frontend/paddlepaddle/src/op/lstm.cpp new file mode 100644 index 00000000000000..3f7e71e1f01f73 --- /dev/null +++ b/ngraph/frontend/paddlepaddle/src/op/lstm.cpp @@ -0,0 +1,237 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include +#include +#include "ngraph/builder/reshape.hpp" +#include "paddlepaddle_frontend/utility.hpp" + +namespace ngraph +{ + namespace frontend + { + namespace pdpd + { + namespace op + { + namespace + { + // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ INPUT NODES PARSING ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + enum class LSTMInput + { + LSTM_INPUT_X, + LSTM_INPUT_W, + LSTM_INPUT_R, + LSTM_INPUT_B, + LSTM_INPUT_SEQ_LENGTHS, + LSTM_INPUT_INIT_H, + LSTM_INPUT_INIT_C, + LSTM_INPUT_P + }; + + struct LSTMNgInputMap + { + explicit LSTMNgInputMap(const NodeContext& node, + Output& prev_output, + int layer) + { + auto input_x = builder::opset1::reorder_axes(prev_output, {1, 0, 2}); + //[begin. end) + auto weight_list = node.get_ng_inputs("WeightList"); + auto weight_begin = weight_list.begin(); + auto weight_end = std::next(weight_begin, weight_list.size() / 2); + auto bias_begin = weight_end; + int bidirect_len = node.get_attribute("is_bidirec") ? 4 : 2; + int layer_weight_start = layer * bidirect_len; + int layer_weight_end = bidirect_len + layer * bidirect_len; + int layer_bias_start = layer * bidirect_len; + int layer_bias_end = layer * bidirect_len + bidirect_len; + OutputVector layer_input_weight; + OutputVector layer_hidden_weight; + OutputVector layer_weight_bias; + OutputVector layer_hidden_bias; + + m_input_map[LSTMInput::LSTM_INPUT_X] = input_x; + // Parsing W R B + auto axis_const = + std::make_shared(element::i64, Shape{}, 0); + for (int i = layer_weight_start; i < layer_weight_end; i++) + { + auto weight_node = std::next(weight_begin, i); + if (i & 0x1) + layer_hidden_weight.push_back( + std::make_shared(*weight_node, + axis_const)); + else + layer_input_weight.push_back( + std::make_shared(*weight_node, + axis_const)); + } + + for (int i = layer_bias_start; i < layer_bias_end; i++) + { + auto weight_node = std::next(bias_begin, i); + + if (i & 0x1) + layer_hidden_bias.push_back(std::make_shared( + *weight_node, axis_const)); + else + layer_weight_bias.push_back(std::make_shared( + *weight_node, axis_const)); + } + + auto input_weight = + std::make_shared(layer_input_weight, 0); + auto hidden_weight = + std::make_shared(layer_hidden_weight, 0); + auto weight_bias = + std::make_shared(layer_weight_bias, 0); + auto hidden_bias = + std::make_shared(layer_hidden_bias, 0); + auto bias = std::make_shared(weight_bias, hidden_bias); + m_input_map[LSTMInput::LSTM_INPUT_W] = + ngraph::op::util::convert_lstm_node_format( + input_weight, + ngraph::op::util::LSTMWeightsFormat::IFCO, + ngraph::op::util::LSTMWeightsFormat::FICO, + 1); + m_input_map[LSTMInput::LSTM_INPUT_R] = + ngraph::op::util::convert_lstm_node_format( + hidden_weight, + ngraph::op::util::LSTMWeightsFormat::IFCO, + ngraph::op::util::LSTMWeightsFormat::FICO, + 1); + m_input_map[LSTMInput::LSTM_INPUT_B] = + ngraph::op::util::convert_lstm_node_format( + bias, + ngraph::op::util::LSTMWeightsFormat::IFCO, + ngraph::op::util::LSTMWeightsFormat::FICO, + 1); + + // Get dimensions needed for default inputs creation + // Parsing init hidden state + auto shape_of_x = std::make_shared(input_x); + + auto axes = opset6::Constant::create(element::i64, Shape{1}, {0}); + + auto batch_size_node = std::make_shared( + shape_of_x, + opset6::Constant::create(element::i64, Shape{1}, {0}), + axes); + + auto seq_length_node = std::make_shared( + shape_of_x, + opset6::Constant::create(element::i64, Shape{1}, {1}), + axes); + + // TODO Specify SEQ_LEN for each batch #55404 + m_input_map[LSTMInput::LSTM_INPUT_SEQ_LENGTHS] = + std::make_shared(seq_length_node, + batch_size_node); + + auto init_states = node.get_ng_inputs("PreState"); + // 0 for init_h, 1 for init_cell, update bidirect_len for init states + bidirect_len = node.get_attribute("is_bidirec") ? 2 : 1; + + auto h_begin = + opset6::Constant::create(element::i64, {1}, {layer * bidirect_len}); + auto h_end = opset6::Constant::create( + element::i64, Shape{1}, {layer * bidirect_len + bidirect_len}); + auto c_begin = + opset6::Constant::create(element::i64, {1}, {layer * bidirect_len}); + auto c_end = opset6::Constant::create( + element::i64, {1}, {layer * bidirect_len + bidirect_len}); + + m_input_map[LSTMInput::LSTM_INPUT_INIT_H] = + builder::opset1::reorder_axes( + std::make_shared(init_states[0], + h_begin, + h_end, + std::vector{0}, + std::vector{0}), + {1, 0, 2}); + m_input_map[LSTMInput::LSTM_INPUT_INIT_C] = + builder::opset1::reorder_axes( + std::make_shared(init_states[1], + c_begin, + c_end, + std::vector{0}, + std::vector{0}), + {1, 0, 2}); + } + + Output& at(const LSTMInput& key) + { + return m_input_map.at(key); + } + + std::map> m_input_map; + }; + + // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ATTRIBUTES PARSING ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + struct LSTMAttributes + { + explicit LSTMAttributes(const NodeContext& node) + : m_direction( + node.get_attribute("is_bidirec") + ? ngraph::op::RecurrentSequenceDirection::BIDIRECTIONAL + : ngraph::op::RecurrentSequenceDirection::FORWARD) + , m_hidden_size(node.get_attribute("hidden_size")) + , m_layers(node.get_attribute("num_layers")) + + {}; + + ngraph::op::RecurrentSequenceDirection m_direction; + int32_t m_hidden_size; + int32_t m_layers; + }; + } // namespace + NamedOutputs lstm(const NodeContext& node) + { + auto mode = node.get_attribute("mode"); + PDPD_ASSERT(mode == "LSTM", "RNN only support LSTM now"); + auto prev_inputs = node.get_ng_inputs("Input"); + Output prev_output = prev_inputs[0]; + LSTMAttributes attrs(node); + OutputVector final_h; + OutputVector final_c; + auto axis_const = std::make_shared(element::i64, Shape{}, 0); + for (int i = 0; i < attrs.m_layers; i++) + { + LSTMNgInputMap input_map(node, prev_output, i); + auto lstm_sequence = std::make_shared( + input_map.at(LSTMInput::LSTM_INPUT_X), + input_map.at(LSTMInput::LSTM_INPUT_INIT_H), + input_map.at(LSTMInput::LSTM_INPUT_INIT_C), + input_map.at(LSTMInput::LSTM_INPUT_SEQ_LENGTHS), + input_map.at(LSTMInput::LSTM_INPUT_W), + input_map.at(LSTMInput::LSTM_INPUT_R), + input_map.at(LSTMInput::LSTM_INPUT_B), + attrs.m_hidden_size, + attrs.m_direction); + prev_output = + builder::opset1::reorder_axes(lstm_sequence->output(0), {2, 0, 1, 3}); + auto out_shape = + opset6::Constant::create(element::i64, Shape{3}, {0, 0, -1}); + prev_output = + std::make_shared(prev_output, out_shape, true); + + final_h.push_back( + builder::opset1::reorder_axes(lstm_sequence->output(1), {1, 0, 2})); + final_c.push_back( + builder::opset1::reorder_axes(lstm_sequence->output(2), {1, 0, 2})); + } + + NamedOutputs named_outputs; + named_outputs["Out"] = {prev_output}; + named_outputs["State"] = {std::make_shared(final_h, 0), + std::make_shared(final_c, 0)}; + return named_outputs; + } + + } // namespace op + } // namespace pdpd + } // namespace frontend +} // namespace ngraph diff --git a/ngraph/frontend/paddlepaddle/src/op/matmul.cpp b/ngraph/frontend/paddlepaddle/src/op/matmul.cpp new file mode 100644 index 00000000000000..a29acd6a00e5c0 --- /dev/null +++ b/ngraph/frontend/paddlepaddle/src/op/matmul.cpp @@ -0,0 +1,40 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// +#include +#include + +namespace ngraph +{ + namespace frontend + { + namespace pdpd + { + namespace op + { + NamedOutputs matmul(const NodeContext& node) + { + auto x = node.get_ng_input("X"); + auto y = node.get_ng_input("Y"); + auto alpha = node.get_attribute("alpha", 1); + auto transpose_a = node.get_attribute("transpose_X", false); + auto transpose_b = node.get_attribute("transpose_Y", false); + auto mm = + std::make_shared(x, y, transpose_a, transpose_b); + if (alpha == 1) + { + return node.default_single_output_mapping({mm}, {"Out"}); + } + else + { + auto alpha_node = + ngraph::opset6::Constant::create(ngraph::element::f32, {1}, {alpha}); + return node.default_single_output_mapping( + {std::make_shared(mm, alpha_node)}, {"Out"}); + } + } + + } // namespace op + } // namespace pdpd + } // namespace frontend +} // namespace ngraph \ No newline at end of file diff --git a/ngraph/frontend/paddlepaddle/src/op/relu.cpp b/ngraph/frontend/paddlepaddle/src/op/relu.cpp index 68d1cca3203cd3..8bef2b42690c1f 100644 --- a/ngraph/frontend/paddlepaddle/src/op/relu.cpp +++ b/ngraph/frontend/paddlepaddle/src/op/relu.cpp @@ -2,8 +2,8 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "relu.hpp" #include +#include namespace ngraph { diff --git a/ngraph/frontend/paddlepaddle/src/op/relu.hpp b/ngraph/frontend/paddlepaddle/src/op/relu.hpp deleted file mode 100644 index 7a63e7f89d8317..00000000000000 --- a/ngraph/frontend/paddlepaddle/src/op/relu.hpp +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once -#include "node_context.hpp" - -namespace ngraph -{ - namespace frontend - { - namespace pdpd - { - namespace op - { - NamedOutputs relu(const NodeContext& node); - - } - } // namespace pdpd - } // namespace frontend -} // namespace ngraph \ No newline at end of file diff --git a/ngraph/frontend/paddlepaddle/src/op/rnn.cpp b/ngraph/frontend/paddlepaddle/src/op/rnn.cpp new file mode 100644 index 00000000000000..49beafee38d752 --- /dev/null +++ b/ngraph/frontend/paddlepaddle/src/op/rnn.cpp @@ -0,0 +1,31 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include +#include +#include "paddlepaddle_frontend/utility.hpp" + +namespace ngraph +{ + namespace frontend + { + namespace pdpd + { + namespace op + { + NamedOutputs lstm(const NodeContext& node); + NamedOutputs rnn(const NodeContext& node) + { + auto mode = node.get_attribute("mode"); + PDPD_ASSERT(mode == "LSTM", + "[Paddle Frontend]RNN Only Supports LSTM Ops Conversion now, don't " + "support " + + mode); + return lstm(node); + } + + } // namespace op + } // namespace pdpd + } // namespace frontend +} // namespace ngraph diff --git a/ngraph/frontend/paddlepaddle/src/op/scale.cpp b/ngraph/frontend/paddlepaddle/src/op/scale.cpp index bfc7637dd877c1..be5cafbaaedca6 100644 --- a/ngraph/frontend/paddlepaddle/src/op/scale.cpp +++ b/ngraph/frontend/paddlepaddle/src/op/scale.cpp @@ -2,9 +2,9 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "scale.hpp" #include #include +#include namespace ngraph { diff --git a/ngraph/frontend/paddlepaddle/src/op/scale.hpp b/ngraph/frontend/paddlepaddle/src/op/scale.hpp deleted file mode 100644 index 03c1b151c0c969..00000000000000 --- a/ngraph/frontend/paddlepaddle/src/op/scale.hpp +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once -#include "node_context.hpp" - -namespace ngraph -{ - namespace frontend - { - namespace pdpd - { - namespace op - { - NamedOutputs scale(const NodeContext& node); - } - } // namespace pdpd - } // namespace frontend -} // namespace ngraph \ No newline at end of file diff --git a/ngraph/frontend/paddlepaddle/src/op/split.cpp b/ngraph/frontend/paddlepaddle/src/op/split.cpp index b1ead907c6605e..1303862468201d 100644 --- a/ngraph/frontend/paddlepaddle/src/op/split.cpp +++ b/ngraph/frontend/paddlepaddle/src/op/split.cpp @@ -2,8 +2,8 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "split.hpp" #include +#include #include namespace ngraph @@ -24,7 +24,7 @@ namespace ngraph { auto input = node.get_ng_input("AxisTensor"); auto zero_node = Constant::create(element::i32, {1}, {0}); - axis = std::make_shared(input, zero_node, false); + axis = std::make_shared(input, zero_node, false); } else { diff --git a/ngraph/frontend/paddlepaddle/src/op/split.hpp b/ngraph/frontend/paddlepaddle/src/op/split.hpp deleted file mode 100644 index 3ae3a40018fcaf..00000000000000 --- a/ngraph/frontend/paddlepaddle/src/op/split.hpp +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// -#pragma once -#include "node_context.hpp" - -namespace ngraph -{ - namespace frontend - { - namespace pdpd - { - namespace op - { - NamedOutputs split(const NodeContext& node); - - } // namespace op - } // namespace pdpd - } // namespace frontend -} // namespace ngraph \ No newline at end of file diff --git a/ngraph/frontend/paddlepaddle/src/op/transpose2.cpp b/ngraph/frontend/paddlepaddle/src/op/transpose2.cpp new file mode 100644 index 00000000000000..1e7c1da8a50038 --- /dev/null +++ b/ngraph/frontend/paddlepaddle/src/op/transpose2.cpp @@ -0,0 +1,29 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include +#include + +namespace ngraph +{ + namespace frontend + { + namespace pdpd + { + namespace op + { + NamedOutputs transpose2(const NodeContext& node) + { + auto data = node.get_ng_input("X"); + auto perm = node.get_attribute>("axis"); + auto input_order = + ngraph::opset6::Constant::create(ngraph::element::i64, {perm.size()}, perm); + return node.default_single_output_mapping( + {std::make_shared(data, input_order)}, {"Out"}); + } + + } // namespace op + } // namespace pdpd + } // namespace frontend +} // namespace ngraph diff --git a/ngraph/frontend/paddlepaddle/src/op_table.cpp b/ngraph/frontend/paddlepaddle/src/op_table.cpp index 916737fc0c2ede..77ca3765efe494 100644 --- a/ngraph/frontend/paddlepaddle/src/op_table.cpp +++ b/ngraph/frontend/paddlepaddle/src/op_table.cpp @@ -1,20 +1,59 @@ // Copyright (C) 2018-2021 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // -#include "op/argmax.hpp" -#include "op/assign_value.hpp" -#include "op/batch_norm.hpp" -#include "op/cast.hpp" -#include "op/clip.hpp" -#include "op/concat.hpp" -#include "op/conv2d.hpp" -#include "op/elementwise_ops.hpp" -#include "op/relu.hpp" -#include "op/scale.hpp" -#include "op/split.hpp" #include "op_table.hpp" +namespace ngraph +{ + namespace frontend + { + namespace pdpd + { + namespace op + { +#define OP_CONVERTER(op) NamedOutputs op(const NodeContext& node) + OP_CONVERTER(argmax); + OP_CONVERTER(assign_value); + OP_CONVERTER(batch_norm); + OP_CONVERTER(bilinear_interp_v2); + OP_CONVERTER(matmul); + OP_CONVERTER(cast); + OP_CONVERTER(clip); + OP_CONVERTER(concat); + OP_CONVERTER(conv2d); + OP_CONVERTER(conv2d_transpose); + OP_CONVERTER(dropout); + OP_CONVERTER(elementwise_add); + OP_CONVERTER(elementwise_div); + OP_CONVERTER(elementwise_equal); + OP_CONVERTER(elementwise_greater_equal); + OP_CONVERTER(elementwise_max); + OP_CONVERTER(elementwise_min); + OP_CONVERTER(elementwise_mul); + OP_CONVERTER(elementwise_pow); + OP_CONVERTER(elementwise_sub); + OP_CONVERTER(expand_v2); + OP_CONVERTER(fill_constant_batch_size_like); + OP_CONVERTER(fill_constant); + OP_CONVERTER(flatten_contiguous_range); + OP_CONVERTER(hard_sigmoid); + OP_CONVERTER(hard_swish); + OP_CONVERTER(leaky_relu); + OP_CONVERTER(log); + OP_CONVERTER(logical_not); + OP_CONVERTER(matmul); + OP_CONVERTER(nearest_interp_v2); + OP_CONVERTER(rnn); + OP_CONVERTER(relu); + OP_CONVERTER(scale); + OP_CONVERTER(split); + OP_CONVERTER(transpose2); + } // namespace op + } // namespace pdpd + } // namespace frontend +} // namespace ngraph + namespace ngraph { namespace frontend @@ -23,23 +62,48 @@ namespace ngraph { std::map get_supported_ops() { - return {{"arg_max", op::argmax}, - {"assign_value", op::assign_value}, - {"batch_norm", op::batch_norm}, - {"cast", op::cast}, - {"clip", op::clip}, - {"concat", op::concat}, - {"conv2d", op::conv2d}, - {"elementwise_add", op::elementwise_add}, - {"elementwise_div", op::elementwise_div}, - {"elementwise_max", op::elementwise_max}, - {"elementwise_min", op::elementwise_min}, - {"elementwise_mul", op::elementwise_mul}, - {"elementwise_pow", op::elementwise_pow}, - {"elementwise_sub", op::elementwise_sub}, - {"relu", op::relu}, - {"scale", op::scale}, - {"split", op::split}}; + return { + {"arg_max", op::argmax}, + {"assign_value", op::assign_value}, + {"batch_norm", op::batch_norm}, + {"bilinear_interp_v2", op::bilinear_interp_v2}, + {"bilinear_interp", op::bilinear_interp_v2}, + {"bmm", op::matmul}, + {"cast", op::cast}, + {"clip", op::clip}, + {"concat", op::concat}, + {"conv2d", op::conv2d}, + {"conv2d_transpose", op::conv2d_transpose}, + {"depthwise_conv2d", op::conv2d}, + {"depthwise_conv2d_transpose", op::conv2d_transpose}, + {"dropout", op::dropout}, + {"elementwise_add", op::elementwise_add}, + {"elementwise_div", op::elementwise_div}, + {"elementwise_max", op::elementwise_max}, + {"elementwise_min", op::elementwise_min}, + {"elementwise_mul", op::elementwise_mul}, + {"elementwise_pow", op::elementwise_pow}, + {"elementwise_sub", op::elementwise_sub}, + {"equal", op::elementwise_equal}, + {"expand_v2", op::expand_v2}, + {"fill_constant_batch_size_like", op::fill_constant_batch_size_like}, + {"fill_constant", op::fill_constant}, + {"flatten_contiguous_range", op::flatten_contiguous_range}, + {"greater_equal", op::elementwise_greater_equal}, + {"hard_sigmoid", op::hard_sigmoid}, + {"hard_swish", op::hard_swish}, + {"leaky_relu", op::leaky_relu}, + {"log", op::log}, + {"logical_not", op::logical_not}, + {"matmul", op::matmul}, + {"nearest_interp_v2", op::nearest_interp_v2}, + {"nearest_interp", op::nearest_interp_v2}, + {"rnn", op::rnn}, + {"relu", op::relu}, + {"scale", op::scale}, + {"split", op::split}, + {"transpose2", op::transpose2}, + }; }; } // namespace pdpd