Skip to content

Commit

Permalink
Merge pull request apache#24 from wjj19950828/paddle_frontend
Browse files Browse the repository at this point in the history
Fixed Tensor and Dynamic input
  • Loading branch information
jiangjiajun authored Sep 8, 2021
2 parents e690258 + aec532f commit 904f1eb
Show file tree
Hide file tree
Showing 2 changed files with 84 additions and 40 deletions.
67 changes: 53 additions & 14 deletions python/tvm/relay/frontend/paddlepaddle.py
Original file line number Diff line number Diff line change
Expand Up @@ -55,6 +55,16 @@ def _get_pad_size(in_size, dilated_kernel_size, stride_size):
return [pad_before, pad_after]


def shape_of(x, dtype="int32"):
"""Get shape of a tensor"""

ttype = infer_type(x).checked_type
if not _ty.is_dynamic(ttype):
shape = list(ttype.shape)
return _expr.const(shape, dtype)
return _op.shape_of(x, dtype)


def _infer_value(x, params):
"""Try running infer_value, and if successful, return the inferred value.
Otherwise, return input"""
Expand Down Expand Up @@ -404,8 +414,19 @@ def convert_crop(g, op, block):
"""Operator converter for crop."""

x = g.get_node(op.input("X")[0])
offsets = op.attr("offsets")
shape = op.attr("shape")
input_shape = op.input("Shape")
input_offsets = op.input("Offsets")
if input_shape:
shape = g.get_node(input_shape[0])
shape = _infer_value(shape, g.get_params())
else:
shape = op.attr("shape")

if input_offsets:
offsets = g.get_node(input_offsets[0])
offsets = _infer_value(offsets, g.get_params())
else:
offsets = op.attr("offsets")

crop_len = len(shape)
slice_start = [0] * crop_len
Expand Down Expand Up @@ -496,9 +517,13 @@ def convert_expand(g, op, block):
input_shape = list(infer_shape(x))

ndims = len(input_shape)
sizes = op.attr("shape")
out = x
if op.input("Shape"):
sizes = g.get_node(op.input("Shape")[0])
sizes = _infer_value(sizes, g.get_params())
else:
sizes = op.attr("shape")

out = x
out_dims = len(sizes)
if ndims < out_dims:
num_newaxis = out_dims - ndims
Expand Down Expand Up @@ -567,16 +592,28 @@ def convert_fill_constant_batch_size_like(g, op, block):
"""Operator converter for fill_constant_batch_size_like."""

x = g.get_node(op.input("Input")[0])
input_shape = infer_shape(x)
out_shape = op.attr("shape")
value = op.attr("value")
shape = op.attr("shape")
input_dim_idx = op.attr("input_dim_idx")
output_dim_idx = op.attr("output_dim_idx")
value = op.attr("value")

dtype = block.var(op.output("Out")[0]).dtype
dtype = str(dtype).strip().split(".")[1]
out_shape[output_dim_idx] = input_shape[input_dim_idx]
value = np.full(out_shape, value, dtype)
out = _expr.const(value.astype(dtype)).astype(dtype)
input_shape = shape_of(x)
batch = _op.strided_slice(input_shape, begin=[input_dim_idx], end=[input_dim_idx+1]).astype("int32")
shape_before = shape[:output_dim_idx]
shape_before = _expr.const(shape_before, dtype="int32")
shape_after = shape[output_dim_idx+1:]
shape_after = _expr.const(shape_after, dtype="int32")

out_shape = _op.concatenate([shape_before, batch, shape_after], axis=0)
constant = _expr.const(value, dtype=dtype).astype(dtype)
out = _op.full(constant, out_shape, dtype=dtype)

# reshape for dynamic
shape[output_dim_idx] = -1
out = _op.reshape(out, shape)

g.add_node(op.output("Out")[0], out)


Expand Down Expand Up @@ -757,9 +794,9 @@ def convert_matmul(g, op, block):

# This implemention almost keeps same with ONNX
# Need to check input shape as batch matmul must be supported.
a_shape = _op.shape_of(inputs[0])
a_shape = shape_of(inputs[0])
a_rank = infer_shape(a_shape)[0]
b_shape = _op.shape_of(inputs[1])
b_shape = shape_of(inputs[1])
b_rank = infer_shape(b_shape)[0]
# When performing a batch matmul, we need to properly handle N-dim shapes.
if a_rank > 2 or b_rank > 2:
Expand Down Expand Up @@ -1362,8 +1399,10 @@ def get_node(self, name):

def add_node(self, name, node):
"""add a node to graph"""

self.nodes[name] = fold_constant(node)
if self.shape_dict:
self.nodes[name] = fold_constant(node)
else:
self.nodes[name] = node

def get_params(self, name=None):
"""get params from graph"""
Expand Down
57 changes: 31 additions & 26 deletions tests/python/frontend/paddlepaddle/test_forward.py
Original file line number Diff line number Diff line change
Expand Up @@ -376,33 +376,32 @@ def concat_unsqueeze2(inputs):

@tvm.testing.uses_gpu
def test_forward_crop():
input_shape = [10, 10]

class Crop(nn.Layer):
@paddle.jit.to_static
def forward(self, inputs):
return paddle.crop(inputs, shape=[2, 2])
@paddle.jit.to_static
def crop1(inputs):
return paddle.crop(inputs, shape=[2, 2])

class Crop1(nn.Layer):
@paddle.jit.to_static
def forward(self, inputs):
return paddle.crop(inputs, shape=[3, 3], offsets=[0, 1])
@paddle.jit.to_static
def crop2(inputs):
shape = paddle.to_tensor(np.array([3, 3]).astype("int32"))
return paddle.crop(inputs, shape=shape, offsets=[0, 1])

class Crop2(nn.Layer):
@paddle.jit.to_static
def forward(self, inputs):
return paddle.crop(inputs, shape=[3, 3], offsets=[1, 0])
@paddle.jit.to_static
def crop3(inputs):
offsets = paddle.to_tensor(np.array([1, 0]).astype("int32"))
return paddle.crop(inputs, shape=[3, 3], offsets=offsets)

class Crop3(nn.Layer):
@paddle.jit.to_static
def forward(self, inputs):
return paddle.crop(inputs, shape=[3, 3], offsets=[1, 1])
@paddle.jit.to_static
def crop4(inputs):
shape = paddle.to_tensor(np.array([3, 3]).astype("int32"))
offsets = paddle.to_tensor(np.array([1, 1]).astype("int32"))
return paddle.crop(inputs, shape=shape, offsets=offsets)

input_shape = [10, 10]
input_data = paddle.rand(input_shape, dtype="float32")
verify_model(Crop(), input_data=input_data)
verify_model(Crop1(), input_data=input_data)
verify_model(Crop2(), input_data=input_data)
verify_model(Crop3(), input_data=input_data)
verify_model(crop1, input_data=[input_data])
verify_model(crop2, input_data=[input_data])
verify_model(crop3, input_data=[input_data])
verify_model(crop4, input_data=[input_data])


@tvm.testing.uses_gpu
Expand Down Expand Up @@ -543,12 +542,18 @@ def dropout(inputs):
@tvm.testing.uses_gpu
def test_forward_expand():
@paddle.jit.to_static
def expand(inputs):
def expand1(inputs):
return paddle.expand(inputs, shape=[2, 3])

@paddle.jit.to_static
def expand2(inputs):
shape = paddle.to_tensor(np.array([2, 3]).astype("int32"))
return paddle.expand(inputs, shape=shape)

x_shape = [3]
x_data = paddle.rand(x_shape, dtype="float32")
verify_model(expand, input_data=[x_data])
verify_model(expand1, input_data=[x_data])
verify_model(expand2, input_data=[x_data])


@tvm.testing.uses_gpu
Expand Down Expand Up @@ -583,7 +588,7 @@ def shape1(inputs):
input_shape = [1, 3, 10, 10]
input_data = paddle.rand(input_shape, dtype="float32")
verify_model(shape1, input_data=[input_data])
# verify_model(full1, input_data=[input_data])
verify_model(full1, input_data=[input_data])
verify_model(full2, input_data=[input_data])


Expand Down Expand Up @@ -869,7 +874,7 @@ def pool2d3(inputs):
input_data = paddle.uniform(shape=[1, 2, 32, 32], dtype="float32", min=-1, max=1)
verify_model(pool2d1, input_data=input_data)
verify_model(pool2d2, input_data=input_data)
# verify_model(pool2d3, input_data=input_data)
verify_model(pool2d3, input_data=input_data)


@tvm.testing.uses_gpu
Expand Down

0 comments on commit 904f1eb

Please sign in to comment.