Skip to content

Commit

Permalink
[PaddlePaddle Hackathon 4][Frontend][Paddle]add thresholded_relu/inde…
Browse files Browse the repository at this point in the history
…x_select/eye/linspace/take_alone_axis/dist for paddle frontend (#14172)

Add thresholded_relu/index_select/eye/linspace/take_alone_axis/dist for paddle frontend.

But in paddle 2.1.3, eye/linspace/take_alone_axis are not supported. 
The test case has passed completely in version 2.4.2.
  • Loading branch information
XG-zheng authored Mar 12, 2023
1 parent caf6b03 commit 6fa88e3
Show file tree
Hide file tree
Showing 2 changed files with 247 additions and 0 deletions.
107 changes: 107 additions & 0 deletions python/tvm/relay/frontend/paddlepaddle.py
Original file line number Diff line number Diff line change
Expand Up @@ -400,6 +400,30 @@ def convert_conv2d_transpose(g, op, block):
g.add_node(op.output("Output")[0], out)


def convert_dist(g, op, block):
"""Operator converter for dist."""

x = g.get_node(op.input("X")[0])
y = g.get_node(op.input("Y")[0])
z = _op.abs(_op.subtract(x, y))
dtype = infer_type(x).checked_type.dtype
p = op.attr("p")
if p == np.inf:
out = _op.reduce.max(_op.abs(z))
elif p == np.NINF:
out = _op.reduce.min(_op.abs(z))
elif p == 0.0:
out = _op.reduce.sum(_op.sign(_op.abs(z)))
else:
inv_p = _expr.const(1.0 / p, dtype=dtype)
p = _expr.const(p, dtype=dtype)
power_z = _op.power(z, p)
sum_pow = _op.reduce.sum(power_z)
out = _op.power(sum_pow, inv_p)
out = _op.full(out, shape=(1))
g.add_node(op.output("Out")[0], out)


def convert_cumsum(g, op, block):
"""Operator converter for cumsum."""

Expand Down Expand Up @@ -475,6 +499,39 @@ def convert_elementwise_op(g, op, block):
g.add_node(op.output("Out")[0], out)


def convert_linspace(g, op, block):
"""Operator converter for linspace."""

start = g.get_node(op.input("Start")[0])
stop = g.get_node(op.input("Stop")[0])
num = g.get_node(op.input("Num")[0])
dtype = _convert_dtype_value(op.attr("dtype"))

start = _op.cast(start, dtype)
stop = _op.cast(stop, dtype)
num = _op.cast(num, dtype)

if dtype in ["int32", "float32"]:
tmp_dtype = "float32"
else:
tmp_dtype = "float64"
start = _op.cast(start, tmp_dtype)
stop = _op.cast(stop, tmp_dtype)
num = _op.cast(num, tmp_dtype)
const_one = _expr.const(1, tmp_dtype)
const_zero = _expr.const(0, tmp_dtype)
seg_num = _op.where(num > const_one, num - const_one, num - const_zero)
seg_len = _op.subtract(stop, start)
step_len = _op.divide(seg_len, seg_num)
step_cnt = _op.argwhere(_op.ones(num, dtype=tmp_dtype))
step_cnt = _op.cast(step_cnt, dtype=tmp_dtype)
out = _op.multiply(step_len, step_cnt)
out = _op.add(start, out)
out = _op.squeeze(out, axis=[1])
out = _op.cast(out, dtype)
g.add_node(op.output("Out")[0], out)


def convert_elu(g, op, block):
"""Operator converter for elu."""

Expand Down Expand Up @@ -514,6 +571,27 @@ def convert_expand_as(g, op, block):
g.add_node(op.output("Out")[0], out)


def convert_eye(g, op, block):
"""Operator converter for eye."""

num_rows = op.attr("num_rows")
num_columns = op.attr("num_columns")
one_nums = min(num_rows, num_columns)
dtype = op.attr("dtype")
dtype = _convert_dtype_value(dtype)

zeros = _op.zeros((num_rows, num_columns), dtype)
if one_nums == 0:
out = zeros
else:
ones = _op.ones(one_nums, dtype)
indices = _op.arange(
_expr.const(0, dtype="int32"), _expr.const(one_nums, dtype="int32"), dtype="int32"
)
out = _op.scatter_nd(zeros, _op.stack([indices, indices], axis=0), ones, "update")
g.add_node(op.output("Out")[0], out)


def convert_feed(g, op, block):
"""Converter for model input node."""

Expand Down Expand Up @@ -830,6 +908,16 @@ def get_interpolate_mode(op):
g.add_node(op.output("Out")[0], out)


def convert_index_select(g, op, block):
"""Operator converter for index_select."""

x = g.get_node(op.input("X")[0])
index = g.get_node(op.input("Index")[0])
axis = op.attr("dim")
out = _op.transform.take(x, index, axis, mode="wrap")
g.add_node(op.output("Out")[0], out)


def convert_instance_norm(g, op, block):
"""Operator converter for instance_norm."""

Expand Down Expand Up @@ -2072,13 +2160,27 @@ def convert_swish(g, op, block):


def convert_take_along_axis(g, op, block):
"""Operator converter for take_along_axis."""

x = g.get_node(op.input("Input")[0])
idx = g.get_node(op.input("Index")[0])
axis = op.attr("Axis")
out = _op.gather(x, axis, idx)
g.add_node(op.output("Result")[0], out)


def convert_thresholded_relu(g, op, block):
"""Operator converter for thresholded_relu."""

x = g.get_node(op.input("X")[0])
dtype = infer_type(x).checked_type.dtype
threshold = op.attr("threshold")
threshold = _expr.const(threshold, dtype)
zero = _expr.const(0, dtype=dtype)
out = tvm.relay.where(x > threshold, x, zero)
g.add_node(op.output("Out")[0], out)


def convert_tile(g, op, block):
"""Operator converter for tile."""

Expand Down Expand Up @@ -2220,6 +2322,7 @@ def convert_where_index(g, op, block):
"cumsum": convert_cumsum,
"depthwise_conv2d": convert_conv2d,
"depthwise_conv2d_transpose": convert_conv2d_transpose,
"dist": convert_dist,
"dot": convert_dot,
"dropout": convert_dropout,
"elementwise_add": convert_elementwise_op,
Expand All @@ -2238,6 +2341,7 @@ def convert_where_index(g, op, block):
"exp": convert_unary_op,
"expand_v2": convert_expand,
"expand_as_v2": convert_expand_as,
"eye": convert_eye,
"feed": convert_feed,
"fill_any_like": convert_fill_any_like,
"fill_constant": convert_fill_constant,
Expand All @@ -2254,6 +2358,7 @@ def convert_where_index(g, op, block):
"hard_shrink": convert_hard_shrink,
"hard_sigmoid": convert_hard_sigmoid,
"hard_swish": convert_hard_swish,
"index_select": convert_index_select,
"instance_norm": convert_instance_norm,
"isfinite_v2": convert_unary_op,
"isinf_v2": convert_unary_op,
Expand All @@ -2262,6 +2367,7 @@ def convert_where_index(g, op, block):
"leaky_relu": convert_leaky_relu,
"less_equal": convert_elementwise_op,
"less_than": convert_elementwise_op,
"linspace": convert_linspace,
"log": convert_unary_op,
"log2": convert_unary_op,
"log10": convert_unary_op,
Expand Down Expand Up @@ -2333,6 +2439,7 @@ def convert_where_index(g, op, block):
"tan": convert_unary_op,
"tanh": convert_unary_op,
"top_k": convert_topk,
"thresholded_relu": convert_thresholded_relu,
"tile": convert_tile,
"top_k_v2": convert_topk,
"transpose2": convert_transpose,
Expand Down
140 changes: 140 additions & 0 deletions tests/python/frontend/paddlepaddle/test_forward.py
Original file line number Diff line number Diff line change
Expand Up @@ -1992,5 +1992,145 @@ def forward(self, inputs):
verify_model(Mish(), input_data=input_data)


@tvm.testing.uses_gpu
def test_forward_thresholded_relu():
class ThresholdedRelu1(nn.Layer):
@paddle.jit.to_static
def forward(self, inputs):
return nn.functional.thresholded_relu(inputs)

class ThresholdedRelu2(nn.Layer):
@paddle.jit.to_static
def forward(self, inputs):
return nn.functional.thresholded_relu(inputs, threshold=0.5)

input_shapes = [[10], [2, 3], [5, 10, 11], [3, 4, 5, 6]]
for input_shape in input_shapes:
input_data = paddle.randn(shape=input_shape, dtype="float32")
verify_model(ThresholdedRelu1(), input_data=input_data)
verify_model(ThresholdedRelu2(), input_data=input_data)


@tvm.testing.uses_gpu
def test_forward_index_select():
class IndexSelect1(nn.Layer):
@paddle.jit.to_static
def forward(self, x, index):
return paddle.index_select(x, index, axis=0)

class IndexSelect2(nn.Layer):
@paddle.jit.to_static
def forward(self, x, index):
return paddle.index_select(x, index, axis=-1)

input_shapes = [[10], [2, 3], [5, 10, 11], [3, 4, 5, 6]]
for input_shape in input_shapes:
input_data = paddle.randn(shape=input_shape, dtype="float32")
index = paddle.to_tensor([0, 1, 1], dtype="int32")
verify_model(IndexSelect1(), input_data=[input_data, index])
verify_model(IndexSelect2(), input_data=[input_data, index])


@tvm.testing.uses_gpu
def test_forward_eye():
class Eye1(nn.Layer):
@paddle.jit.to_static
def forward(self, inputs):
return paddle.eye(3, 5, dtype="int32"), paddle.eye(3, 5, dtype="float32"), inputs

class Eye2(nn.Layer):
@paddle.jit.to_static
def forward(self, inputs):
return paddle.eye(5, 3, dtype="int64"), paddle.eye(5, 3, dtype="float64"), inputs

class Eye3(nn.Layer):
@paddle.jit.to_static
def forward(self, inputs):
return paddle.eye(0, 3, dtype="int64"), paddle.eye(0, 0, dtype="float64"), inputs

class Eye4(nn.Layer):
@paddle.jit.to_static
def forward(self, inputs):
return paddle.eye(4, None, dtype="int64"), paddle.eye(4, None, dtype="float64"), inputs

x = paddle.to_tensor([1], dtype="float32")
verify_model(Eye1(), input_data=[x])
verify_model(Eye2(), input_data=[x])
verify_model(Eye3(), input_data=[x])
verify_model(Eye4(), input_data=[x])


@tvm.testing.uses_gpu
def test_forward_linspace():
class Linspace1(nn.Layer):
@paddle.jit.to_static
def forward(self, inputs):
out1 = paddle.linspace(0.5, 7, 1, "int32")
out2 = paddle.linspace(1.3, 7.1, 5, "float32")
out3 = paddle.linspace(1, 1000000000, 10, "int64")
out4 = paddle.linspace(1, 7.1, 5, "float64")
return out1, out2, out3, out4, inputs

class Linspace2(nn.Layer):
@paddle.jit.to_static
def forward(self, inputs):
start = paddle.to_tensor([-2.5])
stop = paddle.to_tensor([31.6])
num = paddle.to_tensor([13])
start = paddle.cast(start, "float32")
stop = paddle.cast(stop, "float32")
num = paddle.cast(num, "int32")
out1 = paddle.linspace(start, stop, num, "int32")
out2 = paddle.linspace(start, stop, num, "float32")
out3 = paddle.linspace(start, stop, num, "int64")
out4 = paddle.linspace(start, stop, num, "float64")
return out1, out2, out3, out4, inputs

class Linspace3(nn.Layer):
@paddle.jit.to_static
def forward(self, start, stop, num):
out1 = paddle.linspace(start, stop, num, "int32")
out2 = paddle.linspace(start, stop, num, "float32")
out3 = paddle.linspace(start, stop, num, "int64")
out4 = paddle.linspace(start, stop, num, "float32")
return out1

start = paddle.to_tensor([1.3])
stop = paddle.to_tensor([5.1])
num = paddle.to_tensor([3])
start = paddle.cast(start, "float32")
stop = paddle.cast(stop, "float32")
num = paddle.cast(num, "int32")
x = paddle.to_tensor([1], dtype="float32")
verify_model(Linspace1(), input_data=[x])
verify_model(Linspace2(), input_data=[x])
verify_model(Linspace3(), input_data=[start, stop, num], use_vm=True)
num = paddle.to_tensor([1])
num = paddle.cast(num, "int32")
verify_model(Linspace3(), input_data=[start, stop, num], use_vm=True)


@tvm.testing.uses_gpu
def test_forward_dist():
class Dist(nn.Layer):
@paddle.jit.to_static
def forward(self, x, y):
l0_norm = paddle.dist(x, y, 0)
l2_norm = paddle.dist(x, y, 2)
float_norm = paddle.dist(x, y, 1.3)
inf_norm = paddle.dist(x, y, float("inf"))
ninf_norm = paddle.dist(x, y, float("-inf"))
return l0_norm, l2_norm, float_norm, inf_norm, ninf_norm

x = paddle.to_tensor([[3, 3], [3, 3]], dtype="float32")
y = paddle.to_tensor([[1, 2], [3, 4]], dtype="float32")
w = paddle.to_tensor([[1, 2]], dtype="float32")
v = paddle.to_tensor([[2.1]], dtype="float32")
verify_model(Dist(), input_data=[x, y])
verify_model(Dist(), input_data=[x, w])
verify_model(Dist(), input_data=[w, v])
verify_model(Dist(), input_data=[y, v])


if __name__ == "__main__":
tvm.testing.main()

0 comments on commit 6fa88e3

Please sign in to comment.