Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[Frontend][Paddle] Hackathon No.227 #14205

Open
wants to merge 2 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
75 changes: 75 additions & 0 deletions python/tvm/relay/frontend/paddlepaddle.py
Original file line number Diff line number Diff line change
Expand Up @@ -513,6 +513,17 @@ def convert_expand_as(g, op, block):
out = _op.broadcast_to(x, target_shape)
g.add_node(op.output("Out")[0], out)

def convert_eye(g, op, block):
"""Operator convert for eye"""

num_rows = op.attr("num_rows")
num_columns = op.attr("num_columns")
dtype = op.attr("dtype")
dtype = _convert_dtype_value(dtype)

out = np.eye(num_rows, num_columns).astype(dtype)
out = _op.const(out, dtype=dtype)
g.add_node(op.output("Out")[0], out)

def convert_feed(g, op, block):
"""Converter for model input node."""
Expand Down Expand Up @@ -1083,6 +1094,20 @@ def convert_meshgrid(g, op, block):
for i, out in enumerate(outs):
g.add_node(op.output("Out")[i], out)

def convert_mish(g, op, block):
"""Operator convert for mish."""

x = g.get_node(op.input("X")[0])
dtype = infer_type(x).checked_type.dtype
threshold = op.attr("threshold")
threshold = _op.const(threshold, dtype)
lhs = x * _op.cast((x > threshold), dtype)
one = _expr.const(1.0, dtype = dtype)

rhs = _op.log(one + _op.exp(x)) * _op.cast((x <= threshold), dtype)
softplus = lhs + rhs
out = x*_op.tanh(softplus)
g.add_node(op.output("Out")[0], out)

def convert_mul(g, op, block):
"""Operator converter for mul."""
Expand Down Expand Up @@ -1784,6 +1809,14 @@ def convert_shape(g, op, block):
out = shape_of(x, dtype="int32")
g.add_node(op.output("Out")[0], out)

def convert_silu(g, op, block):
"""Operator converter for silu."""

x = g.get_node(op.input("X")[0])
dtype = infer_type(x).checked_type.dtype

out = x / (_op.exp(-x) + _expr.const(1.0, dtype=dtype))
g.add_node(op.output("Out")[0], out)

def convert_size(g, op, block):
"""Operator converter for size."""
Expand Down Expand Up @@ -1940,6 +1973,22 @@ def convert_softplus(g, op, block):
out = _op.log(_op.exp(x * beta) + _expr.const(1.0, dtype=dtype)) / beta
g.add_node(op.output("Out")[0], out)

def convert_softshrink(g, op, block):
"""Operator converter for softshrink."""

x = g.get_node(op.input("X")[0])
dtype = infer_type(x).checked_type.dtype
threshold = op.attr("lambda")
threshold = _op.const(threshold, dtype)

out = _op.logical_or(x < _op.const(-1.0, dtype) * threshold, x > threshold)
result = threshold
#result *= _op.cast(out, dtype) * _op.const(-1.0, dtype) * ((_op.cast(x > threshold, dtype)) - _op.const(0.5, dtype)) * _op.const(2, dtype)
result *= _op.cast(out, dtype) * _op.where(x > threshold, _op.const(-1.0, dtype), _op.const(1.0, dtype))
out = _op.cast(out, dtype) * x + result
g.add_node(op.output("Out")[0], out)



def convert_softsign(g, op, block):
"""Operator converter for softsign."""
Expand Down Expand Up @@ -2024,6 +2073,16 @@ def convert_swish(g, op, block):
out = x * _op.tensor.sigmoid(x)
g.add_node(op.output("Out")[0], out)

def convert_thresholded_relu(g, op, block):
"""Operator convert for convert_thresholded_relu."""

x = g.get_node(op.input("X")[0])
dtype = infer_type(x).checked_type.dtype
threshold = op.attr("threshold")
threshold = _op.const(threshold, dtype)

out = x * _op.cast((x > threshold), dtype)
g.add_node(op.output("Out")[0], out)

def convert_topk(g, op, block):
"""Operator converter for topk."""
Expand Down Expand Up @@ -2073,6 +2132,16 @@ def convert_unsqueeze(g, op, block):
x = _op.expand_dims(x, axis=axis, num_newaxis=1)
g.add_node(op.output("Out")[0], x)

def convert_where(g, op, block):
"""Operator converter for where"""

condition = g.get_node(op.input("Condition")[0])
x = g.get_node(op.input("X")[0])
y = g.get_node(op.input("Y")[0])

out = _op.where(condition, x, y)
g.add_node(op.output("Out")[0], out)


def convert_where_index(g, op, block):
"""Operator converter for where_index."""
Expand Down Expand Up @@ -2127,6 +2196,7 @@ def convert_where_index(g, op, block):
"exp": convert_unary_op,
"expand_v2": convert_expand,
"expand_as_v2": convert_expand_as,
"eye": convert_eye,
"feed": convert_feed,
"fill_any_like": convert_fill_any_like,
"fill_constant": convert_fill_constant,
Expand Down Expand Up @@ -2166,6 +2236,7 @@ def convert_where_index(g, op, block):
"matmul": convert_matmul,
"matmul_v2": convert_matmul,
"meshgrid": convert_meshgrid,
"mish": convert_mish,
"mul": convert_mul,
"mv": convert_mv,
"nearest_interp_v2": convert_interpolate,
Expand Down Expand Up @@ -2203,11 +2274,13 @@ def convert_where_index(g, op, block):
"sign": convert_unary_op,
"sin": convert_unary_op,
"sinh": convert_unary_op,
"silu": convert_silu,
"size": convert_size,
"slice": convert_slice,
"softmax": convert_softmax,
"softplus": convert_softplus,
"softsign": convert_softsign,
"softshrink": convert_softshrink,
"split": convert_split,
"strided_slice": convert_slice,
"sqrt": convert_unary_op,
Expand All @@ -2216,9 +2289,11 @@ def convert_where_index(g, op, block):
"swish": convert_swish,
"tan": convert_unary_op,
"tanh": convert_unary_op,
"thresholded_relu": convert_thresholded_relu,
"top_k_v2": convert_topk,
"transpose2": convert_transpose,
"unsqueeze2": convert_unsqueeze,
"where": convert_where,
"where_index": convert_where_index,
}

Expand Down
77 changes: 77 additions & 0 deletions tests/python/frontend/paddlepaddle/test_forward.py
Original file line number Diff line number Diff line change
Expand Up @@ -1783,6 +1783,83 @@ def where_index_1(inputs):
input_data = paddle.to_tensor([[1.0, 0.0, 0.0], [0.0, 2.0, 0.0], [0.0, 0.0, 3.0]])
verify_model(where_index_1, input_data=input_data, use_vm=True)

@tvm.testing.uses_gpu
def test_forward_silu():

@paddle.jit.to_static
def silu(inputs):
return nn.functional.silu(inputs)
input_shapes = [[128], [8, 20], [4, 20, 3], [2, 3, 8, 8], [2, 3, 3, 9, 9]]
for shape in input_shapes:
input_data = paddle.rand(shape, dtype="float32")
verify_model(silu, input_data=input_data)

@tvm.testing.uses_gpu
def test_forward_thresholded_relu():
@paddle.jit.to_static
def thresholded_relu_0(inputs):
return nn.functional.thresholded_relu(inputs, threshold=-1.0)
@paddle.jit.to_static
def thresholded_relu_1(inputs):
return nn.functional.thresholded_relu(inputs, threshold=0.0)
@paddle.jit.to_static
def thresholded_relu_2(inputs):
return nn.functional.thresholded_relu(inputs, threshold=1.0)

input_shapes = [[128], [8, 20], [4, 20, 3], [2, 3, 8, 8], [2, 3, 3, 9, 9]]
for shape in input_shapes:
input_data = paddle.rand(shape, dtype="float32")
verify_model(thresholded_relu_0, input_data=input_data)
verify_model(thresholded_relu_1, input_data=input_data)
verify_model(thresholded_relu_2, input_data=input_data)

@tvm.testing.uses_gpu
def test_forward_where():
@paddle.jit.to_static
def where_0(x, y):
return paddle.where(x < y, x, y)

@paddle.jit.to_static
def where_1(x, y):
return paddle.where(x > y, x, y)

input_shapes = [[128], [8, 20], [4, 20, 3], [2, 3, 8, 8], [2, 3, 3, 9, 9]]
for shape in input_shapes:
x = paddle.rand(shape, dtype="float32")
y = paddle.rand(shape, dtype="float32")
verify_model(where_0, [x, y])
verify_model(where_1, [x, y])

@tvm.testing.uses_gpu
def test_forward_eye():
class eye(nn.Layer):
def __init__(self, row=2, col=2):
super(eye, self).__init__()
self.row = row
self.col = col
@paddle.jit.to_static
def forward(self, inputs):
x = inputs
y = paddle.eye(self.row, self.col,dtype=x.dtype)
assert x.shape ==y.shape ,f"{x.shape},{y.shape}"
return x + y

shapes = [[2,3], [2,2], [4,4], [4,5]]
for row, col in shapes:
input_data = paddle.randn([row, col],dtype = "float32")
verify_model(eye(row = row, col = col), input_data = input_data)
input_data = paddle.randn([row, col],dtype = "float64")
verify_model(eye(row = row, col = col), input_data = input_data)

@tvm.testing.uses_gpu
def test_forward_mish():
@paddle.jit.to_static
def mish(inputs):
return nn.functional.mish(inputs)
input_shapes = [[128], [8, 20], [4, 20, 3], [2, 3, 8, 8], [2, 3, 3, 9, 9],[2, 2, 2, 3], [1, 3, 5, 5],[10], [2, 3], [5, 10, 11], [3, 4, 5, 6]]
for shape in input_shapes:
input_data = paddle.rand(shape, dtype="float32")
verify_model(mish, input_data=input_data)

if __name__ == "__main__":
tvm.testing.main()