Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

【PaddlePaddle Hackathon 3】Add Paddle box_coder operator #12394

Merged
merged 3 commits into from
Nov 22, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
95 changes: 95 additions & 0 deletions src/frontends/paddle/src/op/box_coder.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,95 @@
// Copyright (C) 2018-2022 Intel Corporation
// SPDX-License-Identifier: Apache-2.0

#include "default_opset.hpp"
#include "openvino/frontend/paddle/node_context.hpp"

namespace ov {
namespace frontend {
namespace paddle {
namespace op {
NamedOutputs box_coder(const NodeContext& node) {
auto prior_box = node.get_input("PriorBox");
auto target_box = node.get_input("TargetBox");
const auto axis = node.get_attribute<int32_t>("axis", 0);
const auto norm = node.get_attribute<bool>("box_normalized", true);
const auto uns_axes = default_opset::Constant::create(ov::element::i64, {1}, {axis});

Output<Node> variance;
if (node.has_input("PriorBoxVar")) {
variance = node.get_input("PriorBoxVar");
variance = std::make_shared<default_opset::Unsqueeze>(variance, uns_axes);
} else {
const std::vector<float> var_vector = node.get_attribute<std::vector<float>>("variance", {1.0, 1.0, 1.0, 1.0});
variance = default_opset::Constant::create(ov::element::f32, {1, 4}, var_vector);
}
const auto code_type = node.get_attribute<std::string>("code_type");
PADDLE_OP_CHECK(node, (code_type == "decode_center_size"), "Currently only support decode mode!");

const auto target_shape = std::make_shared<default_opset::ShapeOf>(target_box);
prior_box = std::make_shared<default_opset::Unsqueeze>(prior_box, uns_axes);
prior_box = std::make_shared<default_opset::Broadcast>(prior_box, target_shape);

// split inputs into 4 elements
auto split_axes = default_opset::Constant::create(element::i64, Shape{}, {2});
const auto prior_split =
std::make_shared<default_opset::Split>(prior_box, split_axes, 4); // pxmin, pymin, pxmax, pymax
const auto target_split = std::make_shared<default_opset::Split>(target_box, split_axes, 4); // tx, ty, tw, th
split_axes = default_opset::Constant::create(element::i64, Shape{}, {-1});
const auto var_split = std::make_shared<default_opset::Split>(variance, split_axes, 4); // pxv, pyv, pwv, phv
OutputVector prior_out(4), target_out(4);

const auto one = default_opset::Constant::create(ov::element::f32, {1}, {1});
const auto two = default_opset::Constant::create(ov::element::f32, {1}, {2});

// convert prior box from [xmin, ymin, xmax, ymax] to [x, y, w, h]
prior_out[2] =
std::make_shared<default_opset::Subtract>(prior_split->outputs()[2], prior_split->outputs()[0]); // pw
prior_out[3] =
std::make_shared<default_opset::Subtract>(prior_split->outputs()[3], prior_split->outputs()[1]); // ph
if (!norm) {
prior_out[2] = std::make_shared<default_opset::Add>(prior_out[2], one);
prior_out[3] = std::make_shared<default_opset::Add>(prior_out[3], one);
}
prior_out[0] = std::make_shared<default_opset::Add>(prior_split->outputs()[0],
std::make_shared<default_opset::Divide>(prior_out[2], two));
prior_out[1] = std::make_shared<default_opset::Add>(prior_split->outputs()[1],
std::make_shared<default_opset::Divide>(prior_out[3], two));

for (int i = 0; i < 4; i++) {
target_out[i] = target_split->outputs()[i];
}

OutputVector outputs(4), half_target(2), target_box_center(2); // ox, oy, ow, oh w / 2, h/ 2
Output<Node> temp;
const int offset = 2;
for (int i = 0; i < 2; i++) {
// get half_target
temp = std::make_shared<default_opset::Multiply>(target_out[offset + i], var_split->outputs()[offset + i]);
temp = std::make_shared<default_opset::Exp>(temp);
temp = std::make_shared<default_opset::Multiply>(temp, prior_out[offset + i]);
half_target[i] = std::make_shared<default_opset::Divide>(temp, two);
// get target_box_center
temp = std::make_shared<default_opset::Multiply>(prior_out[offset + i],
var_split->outputs()[i]); // pw * pxv or ph * pyv
temp = std::make_shared<default_opset::Multiply>(temp, target_out[i]); // pw * pxv * tx or ph * pyv * ty
target_box_center[i] =
std::make_shared<default_opset::Add>(temp, prior_out[i]); // px + pw * pxv * tx or py + ph * pyv * ty
}

outputs[0] = std::make_shared<default_opset::Subtract>(target_box_center[0], half_target[0]);
outputs[1] = std::make_shared<default_opset::Subtract>(target_box_center[1], half_target[1]);
outputs[2] = std::make_shared<default_opset::Add>(target_box_center[0], half_target[0]);
outputs[3] = std::make_shared<default_opset::Add>(target_box_center[1], half_target[1]);
if (!norm) {
outputs[2] = std::make_shared<default_opset::Subtract>(outputs[2], one);
outputs[3] = std::make_shared<default_opset::Subtract>(outputs[3], one);
}

return node.default_single_output_mapping({std::make_shared<default_opset::Concat>(outputs, -1)}, {"OutputBox"});
}

} // namespace op
} // namespace paddle
} // namespace frontend
} // namespace ov
2 changes: 2 additions & 0 deletions src/frontends/paddle/src/op_table.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@ OP_CONVERTER(assign_value);
OP_CONVERTER(batch_norm);
OP_CONVERTER(bicubic_interp_v2);
OP_CONVERTER(bilinear_interp_v2);
OP_CONVERTER(box_coder);
OP_CONVERTER(cast);
OP_CONVERTER(ceil);
OP_CONVERTER(clip);
Expand Down Expand Up @@ -121,6 +122,7 @@ std::map<std::string, CreatorFunction> get_supported_ops() {
{"bilinear_interp_v2", op::bilinear_interp_v2},
{"bilinear_interp", op::bilinear_interp_v2},
{"bmm", op::matmul},
{"box_coder", op::box_coder},
{"cast", op::cast},
{"ceil", op::ceil},
{"clip", op::clip},
Expand Down
3 changes: 3 additions & 0 deletions src/frontends/paddle/tests/op_fuzzy.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -60,6 +60,9 @@ static const std::vector<std::string> models{
std::string("bilinear_upsample_scales2"),
std::string("bilinear_upsample_true_0"),
std::string("bmm"),
std::string("box_coder_1"),
std::string("box_coder_2"),
std::string("box_coder_3"),
std::string("ceil"),
std::string("clip"),
// 95436: sporadic failure
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,119 @@
import sys

import numpy as np
import paddle
from ops import box_coder

from save_model import exportModel, saveModel


def test_box_coder(name: str, prior_box, prior_box_var, target_box, code_type, box_normalized, axis, dtype):
paddle.enable_static()
is_tensor = not isinstance(prior_box_var, list)
with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()):

prior_box_decode = paddle.static.data(name='prior_box',
shape=prior_box.shape,
dtype=dtype)
target_box_decode = paddle.static.data(name='target_box',
shape=target_box.shape,
dtype=dtype)
if is_tensor:
prior_box_var_decode = paddle.static.data(name='prior_box_var',
shape=prior_box_var.shape,
dtype=dtype)
else:
prior_box_var_decode = prior_box_var

out = box_coder(prior_box=prior_box_decode,
prior_box_var=prior_box_var_decode,
target_box=target_box_decode,
code_type=code_type,
box_normalized=box_normalized,
axis=axis)

cpu = paddle.static.cpu_places(1)
exe = paddle.static.Executor(cpu[0])
# startup program will call initializer to initialize the parameters.
exe.run(paddle.static.default_startup_program())

feed_dict = {'prior_box': prior_box, 'target_box': target_box}
if is_tensor:
feed_dict['prior_box_var'] = prior_box_var

outs = exe.run(
feed=feed_dict,
fetch_list=[out])

saveModel(name, exe, feedkeys=[*feed_dict.keys()], fetchlist=[out], inputs=[*feed_dict.values()],
outputs=[outs[0]],
target_dir=sys.argv[1])

return outs[0]


def main():
# For decode
datatype = "float32"
prior_box = np.random.random([8, 4]).astype(datatype)
target_box = np.random.random([8, 4, 4]).astype(datatype)
prior_box_var = [0.1, 0.1, 0.1, 0.1]
code_type = "decode_center_size"
box_normalized = True
axis = 1
paddle_out = test_box_coder("box_coder_1", prior_box, prior_box_var, target_box, code_type, box_normalized, axis,
datatype)

axis = 0
prior_box = np.random.random([4, 4]).astype(datatype)
box_normalized = False
prior_box_var = np.repeat(
np.array([[0.1, 0.2, 0.1, 0.1]], dtype=np.float32), prior_box.shape[0], axis=0)
paddle_out = test_box_coder("box_coder_2", prior_box, prior_box_var, target_box, code_type, box_normalized, axis,
datatype)

box_normalized = True
paddle_out = test_box_coder("box_coder_3", prior_box, prior_box_var, target_box, code_type, box_normalized, axis,
datatype)


def box_coder_dygraph():
paddle.disable_static()

@paddle.jit.to_static
def test_model_1(prior_box, target_box):
prior_box_var = [0.1, 0.1, 0.1, 0.1]
code_type = "decode_center_size"
box_normalized = True
axis = 1
out = box_coder(prior_box, prior_box_var, target_box,
code_type, box_normalized, axis=axis)
return out

datatype = "float32"
prior_box = paddle.rand(shape=[2, 4], dtype=datatype)
target_box = paddle.rand(shape=[2, 4, 4], dtype=datatype)
exportModel("box_coder_dygraph_1", test_model_1, [
prior_box, target_box], target_dir=sys.argv[1])

@paddle.jit.to_static
def test_model_2(prior_box, target_box, prior_box_var):
code_type = "decode_center_size"
box_normalized = True
axis = 0
out = box_coder(prior_box, prior_box_var, target_box,
code_type, box_normalized, axis=axis)
return out

datatype = "float32"
prior_box = paddle.rand(shape=[4, 4], dtype=datatype)
target_box = paddle.rand(shape=[8, 4, 4], dtype=datatype)
prior_box_var = paddle.tile(paddle.to_tensor([[0.1, 0.2, 0.1, 0.1]], dtype=datatype), [prior_box.shape[0], 1])

exportModel("box_coder_dygraph_2", test_model_2, [
prior_box, target_box, prior_box_var], target_dir=sys.argv[1])


if __name__ == "__main__":
main()
# box_coder_dygraph()