diff --git a/python/tvm/relay/frontend/paddlepaddle.py b/python/tvm/relay/frontend/paddlepaddle.py old mode 100644 new mode 100755 index 4927a362522e..0842cd55dae2 --- a/python/tvm/relay/frontend/paddlepaddle.py +++ b/python/tvm/relay/frontend/paddlepaddle.py @@ -1141,6 +1141,32 @@ def convert_mv(g, op, block): g.add_node(op.output("Out")[0], out) +def convert_norm(g, op, block): + """Operator converter for norm.""" + + x = g.get_node(op.input("X")[0]) + axis = op.attr("axis") + axis_l = [axis] + epsilon = op.attr("epsilon") + out = _op.nn.l2_normalize(x, epsilon, axis_l) + g.add_node(op.output("Out")[0], out) + + +def convert_one_hot_v2(g, op, block): + """Operator converter for one_hot_v2.""" + + x = g.get_node(op.input("X")[0]) + depth = op.attr("depth") + dtype = op.attr("dtype") + dtype = _convert_dtype_value(dtype) + ndim = len(infer_shape(x)) + on_value = _op.const(1) + off_value = _op.const(0) + axis = ndim + out = _op.one_hot(x, on_value, off_value, depth, axis, dtype) + g.add_node(op.output("Out")[0], out) + + def convert_padding(g, op, block): """Operator converter for padding.""" @@ -2135,7 +2161,9 @@ def convert_unsqueeze(g, op, block): "mul": convert_mul, "mv": convert_mv, "nearest_interp_v2": convert_interpolate, + "norm": convert_norm, "not_equal": convert_elementwise_op, + "one_hot_v2": convert_one_hot_v2, "pad1d": convert_padding, "pad2d": convert_padding, "pad3d": convert_padding, diff --git a/tests/python/frontend/paddlepaddle/test_forward.py b/tests/python/frontend/paddlepaddle/test_forward.py old mode 100644 new mode 100755 index de6ea1dcf1da..cd2c0be7ef36 --- a/tests/python/frontend/paddlepaddle/test_forward.py +++ b/tests/python/frontend/paddlepaddle/test_forward.py @@ -1723,5 +1723,31 @@ def topk6(inputs): verify_model(topk6, input_data=input_data) +@tvm.testing.uses_gpu +def test_forward_one_hot_v2(): + @paddle.jit.to_static + def one_hot_v2_1(inputs): + return nn.functional.one_hot(inputs, num_classes=4) + + input_data = paddle.to_tensor([1, 1, 3, 0], dtype=paddle.int32) + verify_model(one_hot_v2_1, input_data=input_data) + + +@tvm.testing.uses_gpu +def test_forward_norm(): + @paddle.jit.to_static + def norm_1(inputs): + return paddle.fluid.layers.l2_normalize(inputs, -1, 1e-12) + + def norm_2(inputs): + return paddle.fluid.layers.l2_normalize(inputs, 1, 1e-12) + + input_data = paddle.to_tensor( + [[[1, 2], [3, 1], [4, 5]], [[3, 1], [3, 5], [2, 4]]], dtype=paddle.float32 + ) + verify_model(norm_1, input_data=input_data) + verify_model(norm_2, input_data=input_data) + + if __name__ == "__main__": tvm.testing.main()