Skip to content

Commit

Permalink
[Zero-Dim] reshape/reshape_/reverse 0D support (PaddlePaddle#49357)
Browse files Browse the repository at this point in the history
* [Zero-Dim] reshape/reshape_/reverse 0D support

* rm comment

* change paddle.to_tensor to paddle.full

* fix docs

* update paddle.full
  • Loading branch information
zhaoyinglia authored Jan 3, 2023
1 parent 021085e commit 347d212
Show file tree
Hide file tree
Showing 5 changed files with 267 additions and 15 deletions.
5 changes: 0 additions & 5 deletions paddle/fluid/operators/reshape_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -114,11 +114,6 @@ class ReshapeOp : public framework::OperatorWithKernel {
return;
}

PADDLE_ENFORCE_EQ(!shape.empty(),
true,
platform::errors::InvalidArgument(
"The parameter 'shape' in ReshapeOp must be set. "
"But received 'shape' is empty."));
auto x_dims = ctx->GetInputDim("X");
auto out_dims = ValidateShape(shape, x_dims);
ctx->SetOutputDim("Out", out_dims);
Expand Down
10 changes: 5 additions & 5 deletions python/paddle/fluid/tests/unittests/test_reshape_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,20 +49,20 @@ def test_check_grad(self):
class TestReshapeOp_ZeroDim1(OpTest):
def init_data(self):
self.ori_shape = ()
self.new_shape = 1
self.infered_shape = 1
self.new_shape = (1,)
self.infered_shape = (1,)


class TestReshapeOp_ZeroDim2(OpTest):
def init_data(self):
self.ori_shape = ()
self.new_shape = -1
self.infered_shape = 1
self.new_shape = (-1,)
self.infered_shape = (1,)


class TestReshapeOp_ZeroDim3(OpTest):
def init_data(self):
self.ori_shape = 1
self.ori_shape = (1,)
self.new_shape = ()
self.infered_shape = ()

Expand Down
171 changes: 171 additions & 0 deletions python/paddle/fluid/tests/unittests/test_zero_dim_tensor.py
Original file line number Diff line number Diff line change
Expand Up @@ -756,6 +756,105 @@ def test_floor_divide(self):
np.testing.assert_array_equal(out3_1.numpy(), out3_2.numpy())
np.testing.assert_array_equal(out3_2.numpy(), np.asarray(1))

def test_reshape_list(self):
x = paddle.rand([])
x.stop_gradient = False

out = paddle.reshape(x, [])
out.backward()
self.assertEqual(x.grad.shape, [])
self.assertEqual(out.shape, [])
self.assertEqual(out.grad.shape, [])

out = paddle.reshape(x, [1])
out.backward()
self.assertEqual(x.grad.shape, [])
self.assertEqual(out.shape, [1])
self.assertEqual(out.grad.shape, [1])

out = paddle.reshape(x, [-1])
out.backward()
self.assertEqual(x.grad.shape, [])
self.assertEqual(out.shape, [1])
self.assertEqual(out.grad.shape, [1])

out = paddle.reshape(x, [-1, 1])
out.backward()
self.assertEqual(x.grad.shape, [])
self.assertEqual(out.shape, [1, 1])
self.assertEqual(out.grad.shape, [1, 1])

def test_reshape_tensor(self):
x = paddle.rand([1, 1])
x.stop_gradient = False

out = paddle.reshape(x, [])
out.backward()
self.assertEqual(x.grad.shape, [1, 1])
self.assertEqual(out.shape, [])
self.assertEqual(out.grad.shape, [])

new_shape = paddle.full([1], 1, "int32")
out = paddle.reshape(x, new_shape)
out.backward()
self.assertEqual(x.grad.shape, [1, 1])
self.assertEqual(out.shape, [1])
self.assertEqual(out.grad.shape, [1])

new_shape = paddle.full([1], -1, "int32")
out = paddle.reshape(x, new_shape)
out.backward()
self.assertEqual(x.grad.shape, [1, 1])
self.assertEqual(out.shape, [1])
self.assertEqual(out.grad.shape, [1])

new_shape = [paddle.full([], -1, "int32"), paddle.full([], 1, "int32")]
out = paddle.reshape(x, new_shape)
out.backward()
self.assertEqual(x.grad.shape, [1, 1])
self.assertEqual(out.shape, [1, 1])
self.assertEqual(out.grad.shape, [1, 1])

def test_reshape__list(self):
x = paddle.rand([])
out = paddle.reshape_(x, [])
self.assertEqual(out.shape, [])

out = paddle.reshape_(x, [1])
self.assertEqual(out.shape, [1])

out = paddle.reshape_(x, [-1])
self.assertEqual(out.shape, [1])

out = paddle.reshape_(x, [-1, 1])
self.assertEqual(out.shape, [1, 1])

def test_reshape__tensor(self):
x = paddle.rand([1, 1])
out = paddle.reshape_(x, [])
self.assertEqual(out.shape, [])

new_shape = paddle.full([1], 1, "int32")
out = paddle.reshape_(x, new_shape)
self.assertEqual(out.shape, [1])

new_shape = paddle.full([1], -1, "int32")
out = paddle.reshape_(x, new_shape)
self.assertEqual(out.shape, [1])

new_shape = [paddle.full([], -1, "int32"), paddle.full([], 1, "int32")]
out = paddle.reshape_(x, new_shape)
self.assertEqual(out.shape, [1, 1])

def test_reverse(self):
x = paddle.rand([])
x.stop_gradient = False
out = paddle.reverse(x, axis=[])
out.backward()
self.assertEqual(x.shape, [])
self.assertEqual(out.shape, [])
self.assertEqual(out.grad.shape, [])


class TestSundryAPIStatic(unittest.TestCase):
def setUp(self):
Expand Down Expand Up @@ -1011,6 +1110,78 @@ def test_floor_divide(self):
np.testing.assert_array_equal(out3_1, out3_2)
np.testing.assert_array_equal(out3_2, np.asarray(1))

@prog_scope()
def test_reshape_list(self):
x1 = paddle.rand([])
x2 = paddle.rand([])
x3 = paddle.rand([])
x4 = paddle.rand([])
x1.stop_gradient = False
x2.stop_gradient = False
x3.stop_gradient = False
x4.stop_gradient = False

out1 = paddle.reshape(x1, [])
paddle.static.append_backward(out1)

out2 = paddle.reshape(x2, [1])
paddle.static.append_backward(out2)

out3 = paddle.reshape(x3, [-1])
paddle.static.append_backward(out3)

out4 = paddle.reshape(x4, [-1, 1])
paddle.static.append_backward(out4)

program = paddle.static.default_main_program()
res1, res2, res3, res4 = self.exe.run(
program, fetch_list=[out1, out2, out3, out4]
)
self.assertEqual(res1.shape, ())
self.assertEqual(res2.shape, (1,))
self.assertEqual(res3.shape, (1,))
self.assertEqual(res4.shape, (1, 1))

@prog_scope()
def test_reshape_tensor(self):
x1 = paddle.rand([])
x2 = paddle.rand([])
x3 = paddle.rand([])
x1.stop_gradient = False
x2.stop_gradient = False
x3.stop_gradient = False

new_shape = paddle.full([1], 1, "int32")
out1 = paddle.reshape(x1, new_shape)
paddle.static.append_backward(out1)

new_shape = paddle.full([1], -1, "int32")
out2 = paddle.reshape(x2, new_shape)
paddle.static.append_backward(out2)

new_shape = [paddle.full([], -1, "int32"), paddle.full([], 1, "int32")]
out3 = paddle.reshape(x3, new_shape)
paddle.static.append_backward(out3)

program = paddle.static.default_main_program()
res1, res2, res3 = self.exe.run(program, fetch_list=[out1, out2, out3])
self.assertEqual(res1.shape, (1,))
self.assertEqual(res2.shape, (1,))
self.assertEqual(res3.shape, (1, 1))

@prog_scope()
def test_reverse(self):
x = paddle.rand([])
x.stop_gradient = False

out = paddle.reverse(x, axis=[])
paddle.static.append_backward(out)

program = paddle.static.default_main_program()
res1, res2 = self.exe.run(program, fetch_list=[x, out])
self.assertEqual(res1.shape, ())
self.assertEqual(res2.shape, ())


# Use to test API whose zero-dim input tensors don't have grad and not need to test backward in OpTest.
class TestNoBackwardAPI(unittest.TestCase):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -556,6 +556,96 @@ def test_floor_divide(self):
np.testing.assert_array_equal(out3_1.numpy(), out3_2.numpy())
np.testing.assert_array_equal(out3_2.numpy(), np.asarray(1))

def test_reshape_list(self):
x = paddle.rand([])
x.stop_gradient = False

out = paddle.reshape(x, [])
out.backward()
self.assertEqual(x.grad.shape, [])
self.assertEqual(out.shape, [])
self.assertEqual(out.grad.shape, [])

out = paddle.reshape(x, [1])
out.backward()
self.assertEqual(x.grad.shape, [])
self.assertEqual(out.shape, [1])
self.assertEqual(out.grad.shape, [1])

out = paddle.reshape(x, [-1])
out.backward()
self.assertEqual(x.grad.shape, [])
self.assertEqual(out.shape, [1])
self.assertEqual(out.grad.shape, [1])

out = paddle.reshape(x, [-1, 1])
out.backward()
self.assertEqual(x.grad.shape, [])
self.assertEqual(out.shape, [1, 1])
self.assertEqual(out.grad.shape, [1, 1])

def test_reshape_tensor(self):
x = paddle.rand([1, 1])
x.stop_gradient = False

out = paddle.reshape(x, [])
out.backward()
self.assertEqual(x.grad.shape, [1, 1])
self.assertEqual(out.shape, [])
self.assertEqual(out.grad.shape, [])

new_shape = paddle.full([], 1, "int32")
out = paddle.reshape(x, new_shape)
out.backward()
self.assertEqual(x.grad.shape, [1, 1])
self.assertEqual(out.shape, [1])
self.assertEqual(out.grad.shape, [1])

new_shape = paddle.full([], -1, "int32")
out = paddle.reshape(x, new_shape)
out.backward()
self.assertEqual(x.grad.shape, [1, 1])
self.assertEqual(out.shape, [1])
self.assertEqual(out.grad.shape, [1])

new_shape = [paddle.full([], -1, "int32"), paddle.full([], 1, "int32")]
out = paddle.reshape(x, new_shape)
out.backward()
self.assertEqual(x.grad.shape, [1, 1])
self.assertEqual(out.shape, [1, 1])
self.assertEqual(out.grad.shape, [1, 1])

def test_reshape__list(self):
x = paddle.rand([])
out = paddle.reshape_(x, [])
self.assertEqual(out.shape, [])

out = paddle.reshape_(x, [1])
self.assertEqual(out.shape, [1])

out = paddle.reshape_(x, [-1])
self.assertEqual(out.shape, [1])

out = paddle.reshape_(x, [-1, 1])
self.assertEqual(out.shape, [1, 1])

def test_reshape__tensor(self):
x = paddle.rand([1, 1])
out = paddle.reshape_(x, [])
self.assertEqual(out.shape, [])

new_shape = paddle.full([1], 1, "int32")
out = paddle.reshape_(x, new_shape)
self.assertEqual(out.shape, [1])

new_shape = paddle.full([1], -1, "int32")
out = paddle.reshape_(x, new_shape)
self.assertEqual(out.shape, [1])

new_shape = [paddle.full([], -1, "int32"), paddle.full([], 1, "int32")]
out = paddle.reshape_(x, new_shape)
self.assertEqual(out.shape, [1, 1])


# Use to test API whose zero-dim input tensors don't have grad and not need to test backward in OpTest.
class TestNoBackwardAPI(unittest.TestCase):
Expand Down
6 changes: 1 addition & 5 deletions python/paddle/tensor/manipulation.py
Original file line number Diff line number Diff line change
Expand Up @@ -3450,7 +3450,7 @@ def reshape(x, shape, name=None):
Args:
x (Tensor): An N-D Tensor. The data type is ``float32``, ``float64``, ``int32``, ``int64`` or ``bool``
shape (list|tuple|Tensor): Define the target shape. At most one dimension of the target shape can be -1.
The data type is ``int32`` . If ``shape`` is a list or tuple, the elements of it should be integers or Tensors with shape [1].
The data type is ``int32`` . If ``shape`` is a list or tuple, the elements of it should be integers or Tensors with shape [].
If ``shape`` is an Tensor, it should be an 1-D Tensor .
name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
Expand Down Expand Up @@ -3574,10 +3574,6 @@ def get_attr_shape(list_shape):
shape.stop_gradient = True
inputs["Shape"] = shape
elif isinstance(shape, (list, tuple)):
assert len(shape) > 0, (
"The size of 'shape' in reshape can't be zero, "
"but received %s." % len(shape)
)
attrs["shape"] = get_attr_shape(shape)
if utils._contain_var(shape):
inputs['ShapeTensor'] = utils._convert_to_tensor_list(shape)
Expand Down

0 comments on commit 347d212

Please sign in to comment.