Skip to content

Commit

Permalink
Merge pull request apache#39 from heliqi/paddle_frontend
Browse files Browse the repository at this point in the history
add 12 activation op
  • Loading branch information
jiangjiajun authored Sep 17, 2021
2 parents 13edb27 + beb425e commit e8c8086
Show file tree
Hide file tree
Showing 2 changed files with 182 additions and 11 deletions.
120 changes: 118 additions & 2 deletions python/tvm/relay/frontend/paddlepaddle.py
Original file line number Diff line number Diff line change
Expand Up @@ -657,7 +657,7 @@ def convert_elu(g, op, block):
x = g.get_node(op.input("X")[0])
dtype = infer_type(x).checked_type.dtype
alpha = op.attr("alpha")
alpha = _expr.const(alpha, dtype=dtype)
alpha = _expr.const(-1.0 * alpha, dtype=dtype)
out = alpha * _op.nn.relu(_expr.const(1, dtype=dtype) - _op.exp(x)) + _op.nn.relu(x)
g.add_node(op.output("Out")[0], out)

Expand Down Expand Up @@ -1401,17 +1401,43 @@ def convert_padding(g, op, block):
g.add_node(op.output("Out")[0], out)


def convert_pixel_shuffle(g, op, block):
"""Operator converter for pixel_shuffle."""

x = g.get_node(op.input("X")[0])
upscale_factor = op.attr("upscale_factor")
out = _op.nn.depth_to_space(x, upscale_factor, mode="CRD")
g.add_node(op.output("Out")[0], out)


def convert_pow(g, op, block):
"""Operator converter for pow."""

x = g.get_node(op.input("X")[0])
factor = op.attr("factor")
factor = _expr.const(factor, dtype="float32").astype("float32")

out = _op.power(x, factor)
g.add_node(op.output("Out")[0], out)


def convert_prelu(g, op, block):
"""Operator converter for prelu."""

x = g.get_node(op.input("X")[0])
alpha = g.get_node(op.input("Alpha")[0])
ndims = len(infer_shape(x))
axis = 0 if ndims <= 1 else 1
mode = op.attr("mode")
if mode == "all":
if ndims == 1:
shape = _op.strided_slice(shape_of(x), [0], [1])
else:
shape = _op.strided_slice(shape_of(x), [1], [2])
alpha = _op.broadcast_to(alpha, shape)
out = _op.nn.prelu(x, alpha, axis)
g.add_node(op.output("Out")[0], out)


def convert_norm(g, op, block):
"""Operator converter for norm."""

Expand Down Expand Up @@ -1495,6 +1521,14 @@ def convert_reduce(g, op, block):
g.add_node(op.output("Out")[0], out)


def convert_relu6(g, op, block):
"""Operator converter for relu6."""

x = g.get_node(op.input("X")[0])
out = _op.clip(x, 0.0, 6.0)
g.add_node(op.output("Out")[0], out)


def convert_reshape(g, op, block):
"""Operator converter for reshape."""

Expand Down Expand Up @@ -1685,6 +1719,22 @@ def convert_scale(g, op, block):
g.add_node(op.output("Out")[0], out)


def convert_selu(g, op, block):
"""Operator converter for selu."""

x = g.get_node(op.input("x")[0])
dtype = infer_type(x).checked_type.dtype
alpha = _op.const(op.attr("alpha"), dtype)
scale = _op.const(op.attr("scale"), dtype)
out = (
_expr.const(-1.0, dtype=dtype)
* alpha
* _op.nn.relu(_expr.const(1.0, dtype=dtype) - _op.exp(x))
)
out = scale * (out + _op.nn.relu(x))
g.add_node(op.output("Out")[0], out)


def convert_shape(g, op, block):
"""Operator converter for shape."""

Expand Down Expand Up @@ -1816,6 +1866,62 @@ def convert_softmax(g, op, block):
g.add_node(op.output("Out")[0], out)


def convert_softplus(g, op, block):
"""Operator converter for softplus."""

x = g.get_node(op.input("X")[0])
dtype = infer_type(x).checked_type.dtype
beta = op.attr("beta")
beta = _expr.const(beta, dtype=dtype)
out = _op.log(_op.exp(x * beta) + _expr.const(1.0, dtype=dtype)) / beta
g.add_node(op.output("Out")[0], out)


def convert_softshrink(g, op, block):
"""Operator converter for softshrink."""

x = g.get_node(op.input("X")[0])
threshold = op.attr("lambda")
out = x - _op.clip(x, -1.0 * threshold, threshold)
g.add_node(op.output("Out")[0], out)


def convert_softsign(g, op, block):
"""Operator converter for softsign."""

x = g.get_node(op.input("X")[0])
dtype = infer_type(x).checked_type.dtype
out = x / (_op.const(1.0, dtype) + _op.abs(x))
g.add_node(op.output("Out")[0], out)


def convert_swish(g, op, block):
"""Operator converter for swish."""

x = g.get_node(op.input("X")[0])
dtype = infer_type(x).checked_type.dtype
out = x / (_op.const(1.0, dtype) + _op.exp(_op.const(-1.0, dtype) * x))
g.add_node(op.output("Out")[0], out)


def convert_tanhshrink(g, op, block):
"""Operator converter for swish."""

x = g.get_node(op.input("X")[0])
out = x - _op.tanh(x)
g.add_node(op.output("Out")[0], out)


def convert_thresholded_relu(g, op, block):
"""Operator converter for thresholded_relu."""

x = g.get_node(op.input("X")[0])
dtype = infer_type(x).checked_type.dtype
threshold = _op.const(op.attr("threshold"), dtype)
out = _op.where(_op.greater(x, threshold), x, _op.const(0.0, dtype))
g.add_node(op.output("Out")[0], out)


def convert_split(g, op, block):
"""Operator converter for split."""

Expand Down Expand Up @@ -2053,7 +2159,9 @@ def convert_where(g, op, block):
"pad1d": convert_padding,
"pad2d": convert_padding,
"pad3d": convert_padding,
"pixel_shuffle": convert_pixel_shuffle,
"pow": convert_pow,
"prelu": convert_prelu,
"p_norm": convert_norm,
"range": convert_range,
"reciprocal": convert_reciprocal,
Expand All @@ -2065,24 +2173,32 @@ def convert_where(g, op, block):
"reduce_sum": convert_reduce,
"reduce_mean": convert_reduce,
"relu": convert_unary_op,
"relu6": convert_relu6,
"reshape2": convert_reshape,
"rnn": convert_rnn,
"rsqrt": convert_unary_op,
"scale": convert_scale,
"selu": convert_selu,
"shape": convert_shape,
"sigmoid": convert_unary_op,
"sin": convert_unary_op,
"size": convert_numel,
"slice": convert_slice,
"softmax": convert_softmax,
"softplus": convert_softplus,
"softshrink": convert_softshrink,
"softsign": convert_softsign,
"split": convert_split,
"square": convert_square,
"squeeze2": convert_squeeze,
"stack": convert_stack,
"strided_slice": convert_slice,
"sum": convert_addn,
"swish": convert_swish,
"tan": convert_unary_op,
"tanh": convert_unary_op,
"tanh_shrink": convert_tanhshrink,
"thresholded_relu": convert_thresholded_relu,
"top_k_v2": convert_topk,
"tile": convert_tile,
"transpose2": convert_transpose,
Expand Down
73 changes: 64 additions & 9 deletions tests/python/frontend/paddlepaddle/test_forward.py
Original file line number Diff line number Diff line change
Expand Up @@ -514,7 +514,6 @@ def forward(self, inputs):

@tvm.testing.uses_gpu
def test_forward_conv():

class Conv2D1(nn.Layer):
def __init__(self):
super(Conv2D1, self).__init__()
Expand All @@ -525,7 +524,6 @@ def __init__(self):
def forward(self, inputs):
return self.softmax(self.conv(inputs))


class Conv2D2(nn.Layer):
def __init__(self):
super(Conv2D2, self).__init__()
Expand All @@ -536,7 +534,6 @@ def __init__(self):
def forward(self, inputs):
return self.softmax(self.conv(inputs))


class Conv2D3(nn.Layer):
def __init__(self):
super(Conv2D3, self).__init__()
Expand All @@ -546,11 +543,12 @@ def __init__(self):
def forward(self, inputs):
return self.conv(inputs)


class Conv2D4(nn.Layer):
def __init__(self):
super(Conv2D4, self).__init__()
self.conv = nn.Conv2D(3, 6, 7, groups=3, bias_attr=False, padding=[1, 2, 0, 1], stride=2, dilation=2)
self.conv = nn.Conv2D(
3, 6, 7, groups=3, bias_attr=False, padding=[1, 2, 0, 1], stride=2, dilation=2
)

@paddle.jit.to_static
def forward(self, inputs):
Expand Down Expand Up @@ -891,12 +889,30 @@ def forward(self, inputs):
return self.func(inputs)

input_shape = [1, 3, 10, 10]
input_data = paddle.rand(input_shape, dtype="float32")
input_data_2 = paddle.rand(input_shape).astype("float16")
op_list = ["elu", "hardshrink", "hardsigmoid", "hardswish", "hardtanh", "relu", "sigmoid"]
input_data = paddle.normal(shape=input_shape) * 10.0
input_data_2 = paddle.normal(shape=input_shape).astype("float64") * 10.0
op_list = [
"elu",
"hardshrink",
"hardsigmoid",
"hardswish",
"hardtanh",
"log_sigmoid",
"log_softmax",
"relu",
"relu6",
"selu",
"sigmoid",
"softplus",
"softshrink",
"softsign",
"swish",
"tanhshrink",
"thresholded_relu",
]
for op_name in op_list:
verify_model(Activation(op_name), input_data=input_data)
verify_model(Activation(op_name), input_data=input_data_2, rtol=1e-3, atol=1e-3)
verify_model(Activation(op_name), input_data=input_data_2, rtol=1e-9, atol=1e-6)


@tvm.testing.uses_gpu
Expand Down Expand Up @@ -1326,6 +1342,43 @@ def pad4(inputs):
verify_model(pad4, input_data=input_data)


@tvm.testing.uses_gpu
def test_forward_pixel_shuffle():
class PixelShuffle(nn.Layer):
def __init__(self, upscale_factor):
super(PixelShuffle, self).__init__()
self.pixel_shuffle = paddle.nn.PixelShuffle(upscale_factor)

@paddle.jit.to_static
def forward(self, x):
return self.pixel_shuffle(x)

x = paddle.rand([2, 9, 5, 5], dtype="float32")
verify_model(PixelShuffle(3), x)
x2 = paddle.rand([3, 8, 9, 9], dtype="float32")
verify_model(PixelShuffle(2), x2)


@tvm.testing.uses_gpu
def test_forward_prelu():
class PRelu(nn.Layer):
@paddle.jit.to_static
def forward(self, x, w):
return paddle.nn.functional.prelu(x, w)

x = paddle.normal(shape=[4, 3, 5, 5])
w = paddle.to_tensor(
np.array(
[
0.25,
]
).astype("float32")
)
verify_model(PRelu(), [x, w])
w2 = paddle.to_tensor(np.array([0.25, 0.5, 0.8]).astype("float32"))
verify_model(PRelu(), [x, w2])


@tvm.testing.uses_gpu
def test_forward_pow():
class Pow(nn.Layer):
Expand Down Expand Up @@ -1747,6 +1800,8 @@ def forward(self, x):
test_forward_norm()
test_forward_pool2d()
test_forward_pad()
test_forward_pixel_shuffle()
test_forward_prelu()
test_forward_pow()
test_forward_reduce_op()
test_forward_reshape()
Expand Down

0 comments on commit e8c8086

Please sign in to comment.