Skip to content

Commit

Permalink
[Frontend][PaddlePaddle] Add operators of interploate/flatten and mod…
Browse files Browse the repository at this point in the history
…ify try_infer_value (apache#9459)

* add interploate and flatten

* fix spells

* fix diff

* rename unit test name

* add parameters for common:try_infer_value

* eliminate unnecessary diff

* fix pylint problem

* fix pylint problem

* eliminate unnecessary diff
  • Loading branch information
jiangjiajun authored and mehrdadh committed Dec 1, 2021
1 parent 11196dd commit e0deb9a
Show file tree
Hide file tree
Showing 3 changed files with 228 additions and 13 deletions.
5 changes: 3 additions & 2 deletions python/tvm/relay/frontend/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -578,14 +578,15 @@ def infer_value_simulated(input_val, params):
return output_value


def try_infer_value(val, on_success=None, on_failure=None):
def try_infer_value(val, on_success=None, on_failure=None, parameters=None):
"""Try running infer_value on the input val, and if successful, return the inferred value or
pass it to on_success callback if provided. Otherwise, run on_failure callback if it is
provided, or return the input val as output. In each case, the second return value
indicates whether infer_value has succeeded or not.
"""
try:
ret = infer_value(val, {}).numpy()
params = parameters if parameters is not None else {}
ret = infer_value(val, params).numpy()
if on_success:
return on_success(ret), True
return ret, True
Expand Down
147 changes: 136 additions & 11 deletions python/tvm/relay/frontend/paddlepaddle.py
Original file line number Diff line number Diff line change
Expand Up @@ -382,10 +382,12 @@ def convert_expand(g, op, block):
x = g.get_node(op.input("X")[0])
if op.input("Shape"):
sizes = g.get_node(op.input("Shape")[0])
sizes = try_infer_value(sizes, g.get_params())[0]
else:
sizes = op.attr("shape")

if isinstance(sizes, _expr.Expr):
sizes = try_infer_value(sizes, parameters=g.get_params())[0]

if isinstance(sizes, np.ndarray):
sizes = sizes.tolist()

Expand Down Expand Up @@ -447,10 +449,11 @@ def convert_fill_constant(g, op, block):
value = _expr.const(value).astype(dtype)
if "ValueTensor" in op.input_names and op.input("ValueTensor"):
shape = g.get_node(op.input("ValueTensor")[0])
shape = try_infer_value(shape, g.get_params())[0]
if "ShapeTensor" in op.input_names and op.input("ShapeTensor"):
shape = g.get_node(op.input("ShapeTensor")[0])
shape = try_infer_value(shape, g.get_params())[0]

if isinstance(shape, _expr.Expr):
shape = try_infer_value(shape, parameters=g.get_params())[0]

if isinstance(shape, np.ndarray):
shape = shape.tolist()
Expand All @@ -459,6 +462,33 @@ def convert_fill_constant(g, op, block):
g.add_node(op.output("Out")[0], out)


def convert_flatten(g, op, block):
"""Operator converter for flatten."""

x = g.get_node(op.input("X")[0])
input_shape = list(infer_shape(x))

start = op.attr("start_axis")
end = op.attr("stop_axis")
ndim = len(input_shape)
if end < 0:
end += ndim
new_shape = [0] * start

new_shape.append(-1)
squeeze_axes = []
for i in range(start + 1, end + 1):
new_shape.append(1)
squeeze_axes.append(i)
for _ in range(end + 1, ndim):
new_shape.append(0)
out = _op.reshape(x, new_shape)
if squeeze_axes:
out = _op.squeeze(out, axis=squeeze_axes)

g.add_node(op.output("Out")[0], out)


def convert_gather(g, op, block):
"""Operator converter for gather."""

Expand Down Expand Up @@ -552,6 +582,99 @@ def convert_hard_swish(g, op, block):
g.add_node(op.output("Out")[0], out)


def convert_interpolate(g, op, block):
"""Operator converter for interpolate."""

def get_interpolate_mode(op):
"""Get parameters for interpolation methods."""

interp_method = op.attr("interp_method")
align_corners = op.attr("align_corners")
align_mode = op.attr("align_mode")

rounding_method = ""
if interp_method == "nearest":
interp_method = "nearest_neighbor"
coordinate_transformation_mode = "asymmetric"
rounding_method = "floor"
elif interp_method == "bilinear":
interp_method = "linear"
if not align_corners and align_mode == 0:
coordinate_transformation_mode = "half_pixel"
else:
if align_corners:
coordinate_transformation_mode = "align_corners"
else:
coordinate_transformation_mode = "asymmetric"
elif interp_method == "bicubic":
interp_method = "cubic"
if align_corners:
coordinate_transformation_mode = "align_corners"
else:
coordinate_transformation_mode = "half_pixel"
else:
msg = "interp_method {} is not supported for PaddlePaddle's interpolate"
raise tvm.error.OpAttributeInvalid(msg.format(interp_method))
return rounding_method, interp_method, coordinate_transformation_mode

layout = op.attr("data_layout")
out_h = op.attr("out_h")
out_w = op.attr("out_w")

x = g.get_node(op.input("X")[0])
x_shape = infer_shape(x)
assert len(x_shape) == 4, "Only 4D input tensor is supported for PaddlePaddle's interpolate"
input_out_size = op.input("OutSize")
input_size_tensor = op.input("SizeTensor")
input_scale = op.input("Scale")
rounding_method, interp_method, coordinate_transformation_mode = get_interpolate_mode(op)

if input_out_size:
# if out_size is a tensor
out_size = g.get_node(input_out_size[0])
out_size, infered = try_infer_value(out_size, parameters=g.get_params())
if infered:
out_size = out_size.tolist()
elif input_size_tensor:
# if out_size is a list of tensor
out_size = list()
for name in input_size_tensor:
size = g.get_node(name)
if len(infer_shape(size)) == 0:
shape = _op.reshape(shape, [-1])
out_size.append(size)
out_size = _op.concatenate(out_size, axis=0)
out_size, infered = try_infer_value(out_size, parameters=g.get_params())
if infered:
out_size = out_size.tolist()
elif input_scale:
# if out_size is not defined, but scale is defined
input_scale = g.get_node(input_scale[0])
input_shape = shape_of(x).astype("float32")
if layout.startswith("NC"):
out_size = _op.strided_slice(input_shape, begin=[2], end=[4]) * input_scale
else:
out_size = _op.strided_slice(input_shape, begin=[1], end=[3]) * input_scale
out_size = out_size.astype("int32")
out_size, infered = try_infer_value(out_size, parameters=g.get_params())
if infered:
out_size = out_size.tolist()
else:
# if out_size is a constant value
out_size = [out_h, out_w]

out = _op.image.resize2d(
x,
size=out_size,
layout=layout,
method=interp_method,
coordinate_transformation_mode=coordinate_transformation_mode,
rounding_method=rounding_method,
cubic_alpha=-0.75,
)
g.add_node(op.output("Out")[0], out)


def convert_layer_norm(g, op, block):
"""Operator converter for layer_norm."""

Expand Down Expand Up @@ -939,18 +1062,16 @@ def convert_reshape(g, op, block):
if input_shape:
new_shape = g.get_node(input_shape[0])
elif input_shape_tensor:
tmp_shape = []
new_shape = []
for shape_name in input_shape_tensor:
shape = g.get_node(shape_name)
if len(infer_shape(shape)) == 0:
shape = _op.reshape(shape, [-1])
if isinstance(shape, _expr.Constant):
tmp_shape.append(shape)
elif isinstance(shape, _expr.Expr):
tmp_shape.append(shape)
else:
tmp_shape.append(_expr.const(np.array(shape).astype("int64")))
new_shape = _op.concatenate(tmp_shape, axis=0)
new_shape.append(shape)
new_shape = _op.concatenate(new_shape, axis=0)
new_shape, infered = try_infer_value(new_shape, parameters=g.get_params())
if infered:
new_shape = new_shape.tolist()
else:
new_shape = op.attr("shape")
out = _op.reshape(data, new_shape)
Expand Down Expand Up @@ -1184,6 +1305,8 @@ def convert_unsqueeze(g, op, block):
"assign_value": convert_assign_value,
"atan": convert_unary_op,
"batch_norm": convert_batch_norm,
"bicubic_interp_v2": convert_interpolate,
"bilinear_interp_v2": convert_interpolate,
"bmm": convert_bmm,
"brelu": convert_brelu,
"cast": convert_cast,
Expand Down Expand Up @@ -1214,6 +1337,7 @@ def convert_unsqueeze(g, op, block):
"feed": convert_feed,
"fill_any_like": convert_fill_any_like,
"fill_constant": convert_fill_constant,
"flatten_contiguous_range": convert_flatten,
"floor": convert_unary_op,
"floor_mod": convert_elementwise_op,
"gather": convert_gather,
Expand Down Expand Up @@ -1243,6 +1367,7 @@ def convert_unsqueeze(g, op, block):
"matmul": convert_matmul,
"matmul_v2": convert_matmul,
"mul": convert_mul,
"nearest_interp_v2": convert_interpolate,
"not_equal": convert_elementwise_op,
"pad1d": convert_padding,
"pad2d": convert_padding,
Expand Down
89 changes: 89 additions & 0 deletions tests/python/frontend/paddlepaddle/test_forward.py
Original file line number Diff line number Diff line change
Expand Up @@ -585,6 +585,24 @@ def forward(self, x, y):
verify_model(ExpandAs(), [x_data, y_data])


@tvm.testing.uses_gpu
def test_forward_flatten():
class Flatten(nn.Layer):
def __init__(self, start_axis=0, stop_axis=-1):
super(Flatten, self).__init__()
self.start_axis = start_axis
self.stop_axis = stop_axis

@paddle.jit.to_static
def forward(self, x):
return paddle.flatten(x, start_axis=self.start_axis, stop_axis=self.stop_axis)

input_data = paddle.rand([2, 3, 4, 5, 2], dtype="float32")
verify_model(Flatten(), input_data=input_data)
verify_model(Flatten(2), input_data=input_data)
verify_model(Flatten(2, -2), input_data=input_data)


@tvm.testing.uses_gpu
def test_forward_gather():
class Gather(nn.Layer):
Expand Down Expand Up @@ -764,6 +782,77 @@ def hard_swish(inputs):
verify_model(hard_swish, input_data=input_data)


@tvm.testing.uses_gpu
def test_forward_interpolate():
class Interpolate(nn.Layer):
def __init__(
self,
mode="nearest",
align_corners=False,
align_mode=0,
data_format="NCHW",
use_scale=False,
use_list=False,
use_const=False,
):
super(Interpolate, self).__init__()
self.mode = mode
self.align_corners = align_corners
self.align_mode = align_mode
self.data_format = data_format
self.use_scale = use_scale
self.use_list = use_list
self.use_const = use_const

@paddle.jit.to_static
def forward(self, x):
size = np.array([15, 19]).astype("int32")
scale = np.array([2.0, 1.0]).astype("float32")
if not self.use_list and not self.use_const:
size = paddle.to_tensor(size)
scale = paddle.to_tensor(scale)
elif not self.use_const:
size0 = paddle.to_tensor(size[0:1])
size = [size0, int(size[1])]
else:
size = size.tolist()
scale = scale.tolist()
if not self.use_scale:
return paddle.nn.functional.interpolate(
x,
size=size,
mode=self.mode,
align_corners=self.align_corners,
align_mode=self.align_mode,
data_format=self.data_format,
)
else:
return paddle.nn.functional.interpolate(
x,
scale_factor=scale,
mode=self.mode,
align_corners=self.align_corners,
align_mode=self.align_mode,
data_format=self.data_format,
)

input_data = paddle.rand([1, 2, 8, 12]).astype("float32")
verify_model(Interpolate(), input_data)
verify_model(Interpolate(use_list=True), input_data)
verify_model(Interpolate(use_scale=True), input_data)
verify_model(Interpolate("bilinear", use_scale=True), input_data)
verify_model(Interpolate("bilinear", use_scale=True, align_corners=True), input_data)
verify_model(
Interpolate(
"bilinear", use_scale=True, align_corners=True, align_mode=1, data_format="NHWC"
),
input_data,
)
verify_model(
Interpolate("bicubic", use_scale=True, align_corners=True, align_mode=1), input_data
)


@tvm.testing.uses_gpu
def test_forward_layer_norm():
@paddle.jit.to_static
Expand Down

0 comments on commit e0deb9a

Please sign in to comment.