From d595f2985eaac6d58313514ffab9a278c569fb0c Mon Sep 17 00:00:00 2001 From: zhaoyinglia Date: Tue, 27 Dec 2022 11:21:48 +0800 Subject: [PATCH 1/5] [Zero-Dim] reshape/reshape_/reverse 0D support --- paddle/fluid/operators/reshape_op.cc | 10 +- paddle/phi/kernels/gpu/flip_kernel.cu | 3 + .../fluid/tests/unittests/test_reshape_op.py | 6 +- .../tests/unittests/test_zero_dim_tensor.py | 171 ++++++++++++++++++ .../unittests/xpu/test_zero_dim_tensor_xpu.py | 99 ++++++++++ python/paddle/tensor/manipulation.py | 4 - 6 files changed, 281 insertions(+), 12 deletions(-) diff --git a/paddle/fluid/operators/reshape_op.cc b/paddle/fluid/operators/reshape_op.cc index 6e8b962488a56..3592c779a85a6 100644 --- a/paddle/fluid/operators/reshape_op.cc +++ b/paddle/fluid/operators/reshape_op.cc @@ -114,11 +114,11 @@ class ReshapeOp : public framework::OperatorWithKernel { return; } - PADDLE_ENFORCE_EQ(!shape.empty(), - true, - platform::errors::InvalidArgument( - "The parameter 'shape' in ReshapeOp must be set. " - "But received 'shape' is empty.")); + // PADDLE_ENFORCE_EQ(!shape.empty(), + // true, + // platform::errors::InvalidArgument( + // "The parameter 'shape' in ReshapeOp must be set. " + // "But received 'shape' is empty.")); auto x_dims = ctx->GetInputDim("X"); auto out_dims = ValidateShape(shape, x_dims); ctx->SetOutputDim("Out", out_dims); diff --git a/paddle/phi/kernels/gpu/flip_kernel.cu b/paddle/phi/kernels/gpu/flip_kernel.cu index 6e9dbf37a9100..7945d6c8fcbaf 100644 --- a/paddle/phi/kernels/gpu/flip_kernel.cu +++ b/paddle/phi/kernels/gpu/flip_kernel.cu @@ -101,6 +101,9 @@ void FlipKernel(const Context& dev_ctx, DenseTensor* out) { const size_t total_dims = x.dims().size(); switch (total_dims) { + case 0: + LaunchFlipCudaKernel(dev_ctx, x, axis, out); + break; case 1: LaunchFlipCudaKernel(dev_ctx, x, axis, out); break; diff --git a/python/paddle/fluid/tests/unittests/test_reshape_op.py b/python/paddle/fluid/tests/unittests/test_reshape_op.py index a31749d744aea..85a52d58887c4 100755 --- a/python/paddle/fluid/tests/unittests/test_reshape_op.py +++ b/python/paddle/fluid/tests/unittests/test_reshape_op.py @@ -49,20 +49,20 @@ def test_check_grad(self): class TestReshapeOp_ZeroDim1(OpTest): def init_data(self): self.ori_shape = () - self.new_shape = 1 + self.new_shape = (1,) self.infered_shape = 1 class TestReshapeOp_ZeroDim2(OpTest): def init_data(self): self.ori_shape = () - self.new_shape = -1 + self.new_shape = (-1,) self.infered_shape = 1 class TestReshapeOp_ZeroDim3(OpTest): def init_data(self): - self.ori_shape = 1 + self.ori_shape = (1,) self.new_shape = () self.infered_shape = () diff --git a/python/paddle/fluid/tests/unittests/test_zero_dim_tensor.py b/python/paddle/fluid/tests/unittests/test_zero_dim_tensor.py index 9fb6017d446d0..c4863d4094907 100644 --- a/python/paddle/fluid/tests/unittests/test_zero_dim_tensor.py +++ b/python/paddle/fluid/tests/unittests/test_zero_dim_tensor.py @@ -712,6 +712,105 @@ def test_scatter_nd(self): self.assertEqual(out.numpy()[3], 2) self.assertEqual(out.grad.shape, [5]) + def test_reshape_list(self): + x = paddle.rand([]) + x.stop_gradient = False + + out = paddle.reshape(x, []) + out.backward() + self.assertEqual(x.grad.shape, []) + self.assertEqual(out.shape, []) + self.assertEqual(out.grad.shape, []) + + out = paddle.reshape(x, [1]) + out.backward() + self.assertEqual(x.grad.shape, []) + self.assertEqual(out.shape, [1]) + self.assertEqual(out.grad.shape, [1]) + + out = paddle.reshape(x, [-1]) + out.backward() + self.assertEqual(x.grad.shape, []) + self.assertEqual(out.shape, [1]) + self.assertEqual(out.grad.shape, [1]) + + out = paddle.reshape(x, [-1, 1]) + out.backward() + self.assertEqual(x.grad.shape, []) + self.assertEqual(out.shape, [1, 1]) + self.assertEqual(out.grad.shape, [1, 1]) + + def test_reshape_tensor(self): + x = paddle.rand([1, 1]) + x.stop_gradient = False + + out = paddle.reshape(x, []) + out.backward() + self.assertEqual(x.grad.shape, [1, 1]) + self.assertEqual(out.shape, []) + self.assertEqual(out.grad.shape, []) + + new_shape = paddle.to_tensor((1,)) + out = paddle.reshape(x, new_shape) + out.backward() + self.assertEqual(x.grad.shape, [1, 1]) + self.assertEqual(out.shape, [1]) + self.assertEqual(out.grad.shape, [1]) + + new_shape = paddle.to_tensor((-1,)) + out = paddle.reshape(x, new_shape) + out.backward() + self.assertEqual(x.grad.shape, [1, 1]) + self.assertEqual(out.shape, [1]) + self.assertEqual(out.grad.shape, [1]) + + new_shape = [paddle.to_tensor(-1), paddle.to_tensor(1)] + out = paddle.reshape(x, new_shape) + out.backward() + self.assertEqual(x.grad.shape, [1, 1]) + self.assertEqual(out.shape, [1, 1]) + self.assertEqual(out.grad.shape, [1, 1]) + + def test_reshape__list(self): + x = paddle.rand([]) + out = paddle.reshape_(x, []) + self.assertEqual(out.shape, []) + + out = paddle.reshape_(x, [1]) + self.assertEqual(out.shape, [1]) + + out = paddle.reshape_(x, [-1]) + self.assertEqual(out.shape, [1]) + + out = paddle.reshape_(x, [-1, 1]) + self.assertEqual(out.shape, [1, 1]) + + def test_reshape__tensor(self): + x = paddle.rand([1, 1]) + out = paddle.reshape_(x, []) + self.assertEqual(out.shape, []) + + new_shape = paddle.to_tensor((1,)) + out = paddle.reshape_(x, new_shape) + self.assertEqual(out.shape, [1]) + + new_shape = paddle.to_tensor((-1,)) + out = paddle.reshape_(x, new_shape) + self.assertEqual(out.shape, [1]) + + new_shape = [paddle.to_tensor(-1), paddle.to_tensor(1)] + out = paddle.reshape_(x, new_shape) + self.assertEqual(out.shape, [1, 1]) + + def test_reverse(self): + x = paddle.rand([]) + x.stop_gradient = False + out = paddle.reverse(x, axis=[]) + out.backward() + self.assertEqual(x.shape, []) + self.assertEqual(out.shape, []) + self.assertEqual(out.grad.shape, []) + class TestSundryAPIStatic(unittest.TestCase): def setUp(self): @@ -914,6 +1013,78 @@ def test_scatter_nd(self): self.assertEqual(res[0].shape, (5,)) self.assertEqual(res[0][3], 2) + @prog_scope() + def test_reshape_list(self): + x1 = paddle.rand([]) + x2 = paddle.rand([]) + x3 = paddle.rand([]) + x4 = paddle.rand([]) + x1.stop_gradient = False + x2.stop_gradient = False + x3.stop_gradient = False + x4.stop_gradient = False + + out1 = paddle.reshape(x1, []) + paddle.static.append_backward(out1) + + out2 = paddle.reshape(x2, [1]) + paddle.static.append_backward(out2) + + out3 = paddle.reshape(x3, [-1]) + paddle.static.append_backward(out3) + + out4 = paddle.reshape(x4, [-1, 1]) + paddle.static.append_backward(out4) + + program = paddle.static.default_main_program() + res1, res2, res3, res4 = self.exe.run( + program, fetch_list=[out1, out2, out3, out4] + ) + self.assertEqual(res1.shape, ()) + self.assertEqual(res2.shape, (1,)) + self.assertEqual(res3.shape, (1,)) + self.assertEqual(res4.shape, (1, 1)) + + @prog_scope() + def test_reshape_tensor(self): + x1 = paddle.rand([]) + x2 = paddle.rand([]) + x3 = paddle.rand([]) + x1.stop_gradient = False + x2.stop_gradient = False + x3.stop_gradient = False + + new_shape = paddle.to_tensor((1,)) + out1 = paddle.reshape(x1, new_shape) + paddle.static.append_backward(out1) + + new_shape = paddle.to_tensor((-1,)) + out2 = paddle.reshape(x2, new_shape) + paddle.static.append_backward(out2) + + new_shape = [paddle.to_tensor(-1), paddle.to_tensor(1)] + out3 = paddle.reshape(x3, new_shape) + paddle.static.append_backward(out3) + + program = paddle.static.default_main_program() + res1, res2, res3 = self.exe.run(program, fetch_list=[out1, out2, out3]) + self.assertEqual(res1.shape, (1,)) + self.assertEqual(res2.shape, (1,)) + self.assertEqual(res3.shape, (1, 1)) + + @prog_scope() + def test_reverse(self): + x = paddle.rand([]) + x.stop_gradient = False + + out = paddle.reverse(x, axis=[]) + paddle.static.append_backward(out) + + program = paddle.static.default_main_program() + res1, res2 = self.exe.run(program, fetch_list=[x, out]) + self.assertEqual(res1.shape, ()) + self.assertEqual(res2.shape, ()) + # Use to test API whose zero-dim input tensors don't have grad and not need to test backward in OpTest. class TestNoBackwardAPI(unittest.TestCase): diff --git a/python/paddle/fluid/tests/unittests/xpu/test_zero_dim_tensor_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_zero_dim_tensor_xpu.py index 018ecc20e7daf..2223cb8fbceca 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_zero_dim_tensor_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_zero_dim_tensor_xpu.py @@ -521,6 +521,105 @@ def test_scatter__XD(self): for i in range(3): self.assertEqual(out.numpy()[1][i], updates.numpy()[i]) + def test_reshape_list(self): + x = paddle.rand([]) + x.stop_gradient = False + + out = paddle.reshape(x, []) + out.backward() + self.assertEqual(x.grad.shape, []) + self.assertEqual(out.shape, []) + self.assertEqual(out.grad.shape, []) + + out = paddle.reshape(x, [1]) + out.backward() + self.assertEqual(x.grad.shape, []) + self.assertEqual(out.shape, [1]) + self.assertEqual(out.grad.shape, [1]) + + out = paddle.reshape(x, [-1]) + out.backward() + self.assertEqual(x.grad.shape, []) + self.assertEqual(out.shape, [1]) + self.assertEqual(out.grad.shape, [1]) + + out = paddle.reshape(x, [-1, 1]) + out.backward() + self.assertEqual(x.grad.shape, []) + self.assertEqual(out.shape, [1, 1]) + self.assertEqual(out.grad.shape, [1, 1]) + + def test_reshape_tensor(self): + x = paddle.rand([1, 1]) + x.stop_gradient = False + + out = paddle.reshape(x, []) + out.backward() + self.assertEqual(x.grad.shape, [1, 1]) + self.assertEqual(out.shape, []) + self.assertEqual(out.grad.shape, []) + + new_shape = paddle.to_tensor((1,)) + out = paddle.reshape(x, new_shape) + out.backward() + self.assertEqual(x.grad.shape, [1, 1]) + self.assertEqual(out.shape, [1]) + self.assertEqual(out.grad.shape, [1]) + + new_shape = paddle.to_tensor((-1,)) + out = paddle.reshape(x, new_shape) + out.backward() + self.assertEqual(x.grad.shape, [1, 1]) + self.assertEqual(out.shape, [1]) + self.assertEqual(out.grad.shape, [1]) + + new_shape = [paddle.to_tensor(-1), paddle.to_tensor(1)] + out = paddle.reshape(x, new_shape) + out.backward() + self.assertEqual(x.grad.shape, [1, 1]) + self.assertEqual(out.shape, [1, 1]) + self.assertEqual(out.grad.shape, [1, 1]) + + def test_reshape__list(self): + x = paddle.rand([]) + out = paddle.reshape_(x, []) + self.assertEqual(out.shape, []) + + out = paddle.reshape_(x, [1]) + self.assertEqual(out.shape, [1]) + + out = paddle.reshape_(x, [-1]) + self.assertEqual(out.shape, [1]) + + out = paddle.reshape_(x, [-1, 1]) + self.assertEqual(out.shape, [1, 1]) + + def test_reshape__tensor(self): + x = paddle.rand([1, 1]) + out = paddle.reshape_(x, []) + self.assertEqual(out.shape, []) + + new_shape = paddle.to_tensor((1,)) + out = paddle.reshape_(x, new_shape) + self.assertEqual(out.shape, [1]) + + new_shape = paddle.to_tensor((-1,)) + out = paddle.reshape_(x, new_shape) + self.assertEqual(out.shape, [1]) + + new_shape = [paddle.to_tensor(-1), paddle.to_tensor(1)] + out = paddle.reshape_(x, new_shape) + self.assertEqual(out.shape, [1, 1]) + + def test_reverse(self): + x = paddle.rand([]) + x.stop_gradient = False + out = paddle.reverse(x, axis=[]) + out.backward() + self.assertEqual(x.shape, []) + self.assertEqual(out.shape, []) + self.assertEqual(out.grad.shape, []) + # Use to test API whose zero-dim input tensors don't have grad and not need to test backward in OpTest. class TestNoBackwardAPI(unittest.TestCase): diff --git a/python/paddle/tensor/manipulation.py b/python/paddle/tensor/manipulation.py index 8f25e7e762d7b..1fc569063d0b4 100644 --- a/python/paddle/tensor/manipulation.py +++ b/python/paddle/tensor/manipulation.py @@ -3792,10 +3792,6 @@ def get_attr_shape(list_shape): shape.stop_gradient = True inputs["Shape"] = shape elif isinstance(shape, (list, tuple)): - assert len(shape) > 0, ( - "The size of 'shape' in reshape can't be zero, " - "but received %s." % len(shape) - ) attrs["shape"] = get_attr_shape(shape) if utils._contain_var(shape): inputs['ShapeTensor'] = utils._convert_to_tensor_list(shape) From 130dd5c5b5ffeff6236b69a18650f7c9344594ca Mon Sep 17 00:00:00 2001 From: zhaoyinglia Date: Tue, 27 Dec 2022 11:30:01 +0800 Subject: [PATCH 2/5] rm comment --- paddle/fluid/operators/reshape_op.cc | 5 ----- python/paddle/tensor/manipulation.py | 1 + 2 files changed, 1 insertion(+), 5 deletions(-) diff --git a/paddle/fluid/operators/reshape_op.cc b/paddle/fluid/operators/reshape_op.cc index 3592c779a85a6..e980aa66e7ca3 100644 --- a/paddle/fluid/operators/reshape_op.cc +++ b/paddle/fluid/operators/reshape_op.cc @@ -114,11 +114,6 @@ class ReshapeOp : public framework::OperatorWithKernel { return; } - // PADDLE_ENFORCE_EQ(!shape.empty(), - // true, - // platform::errors::InvalidArgument( - // "The parameter 'shape' in ReshapeOp must be set. " - // "But received 'shape' is empty.")); auto x_dims = ctx->GetInputDim("X"); auto out_dims = ValidateShape(shape, x_dims); ctx->SetOutputDim("Out", out_dims); diff --git a/python/paddle/tensor/manipulation.py b/python/paddle/tensor/manipulation.py index d9fc1eb7a2068..eb7a974656326 100644 --- a/python/paddle/tensor/manipulation.py +++ b/python/paddle/tensor/manipulation.py @@ -3484,6 +3484,7 @@ def reshape(x, shape, name=None): """ actual_shape = None + if in_dygraph_mode(): tmp_tensor_type = core.eager.Tensor if isinstance(shape, (list, tuple)): From 9240a64e3360eea6fe07af8a09ff88485bed6858 Mon Sep 17 00:00:00 2001 From: zhaoyinglia Date: Tue, 27 Dec 2022 21:29:38 +0800 Subject: [PATCH 3/5] change paddle.to_tensor to paddle.full --- .../fluid/tests/unittests/test_reshape_op.py | 4 ++-- .../tests/unittests/test_zero_dim_tensor.py | 18 +++++++++--------- .../unittests/xpu/test_zero_dim_tensor_xpu.py | 12 ++++++------ python/paddle/tensor/manipulation.py | 4 ++-- 4 files changed, 19 insertions(+), 19 deletions(-) diff --git a/python/paddle/fluid/tests/unittests/test_reshape_op.py b/python/paddle/fluid/tests/unittests/test_reshape_op.py index 85a52d58887c4..887ce9ff3f741 100755 --- a/python/paddle/fluid/tests/unittests/test_reshape_op.py +++ b/python/paddle/fluid/tests/unittests/test_reshape_op.py @@ -50,14 +50,14 @@ class TestReshapeOp_ZeroDim1(OpTest): def init_data(self): self.ori_shape = () self.new_shape = (1,) - self.infered_shape = 1 + self.infered_shape = (1,) class TestReshapeOp_ZeroDim2(OpTest): def init_data(self): self.ori_shape = () self.new_shape = (-1,) - self.infered_shape = 1 + self.infered_shape = (1,) class TestReshapeOp_ZeroDim3(OpTest): diff --git a/python/paddle/fluid/tests/unittests/test_zero_dim_tensor.py b/python/paddle/fluid/tests/unittests/test_zero_dim_tensor.py index c4863d4094907..7b9b0badfac9a 100644 --- a/python/paddle/fluid/tests/unittests/test_zero_dim_tensor.py +++ b/python/paddle/fluid/tests/unittests/test_zero_dim_tensor.py @@ -750,21 +750,21 @@ def test_reshape_tensor(self): self.assertEqual(out.shape, []) self.assertEqual(out.grad.shape, []) - new_shape = paddle.to_tensor((1,)) + new_shape = paddle.full([], 1, "int32") out = paddle.reshape(x, new_shape) out.backward() self.assertEqual(x.grad.shape, [1, 1]) self.assertEqual(out.shape, [1]) self.assertEqual(out.grad.shape, [1]) - new_shape = paddle.to_tensor((-1,)) + new_shape = paddle.full([], -1, "int32") out = paddle.reshape(x, new_shape) out.backward() self.assertEqual(x.grad.shape, [1, 1]) self.assertEqual(out.shape, [1]) self.assertEqual(out.grad.shape, [1]) - new_shape = [paddle.to_tensor(-1), paddle.to_tensor(1)] + new_shape = [paddle.full([], -1, "int32"), paddle.full([], 1, "int32")] out = paddle.reshape(x, new_shape) out.backward() self.assertEqual(x.grad.shape, [1, 1]) @@ -790,15 +790,15 @@ def test_reshape__tensor(self): out = paddle.reshape_(x, []) self.assertEqual(out.shape, []) - new_shape = paddle.to_tensor((1,)) + new_shape = paddle.full([], 1, "int32") out = paddle.reshape_(x, new_shape) self.assertEqual(out.shape, [1]) - new_shape = paddle.to_tensor((-1,)) + new_shape = paddle.full([], -1, "int32") out = paddle.reshape_(x, new_shape) self.assertEqual(out.shape, [1]) - new_shape = [paddle.to_tensor(-1), paddle.to_tensor(1)] + new_shape = [paddle.full([], -1, "int32"), paddle.full([], 1, "int32")] out = paddle.reshape_(x, new_shape) self.assertEqual(out.shape, [1, 1]) @@ -1054,15 +1054,15 @@ def test_reshape_tensor(self): x2.stop_gradient = False x3.stop_gradient = False - new_shape = paddle.to_tensor((1,)) + new_shape = paddle.full([], 1, "int32") out1 = paddle.reshape(x1, new_shape) paddle.static.append_backward(out1) - new_shape = paddle.to_tensor((-1,)) + new_shape = paddle.full([], -1, "int32") out2 = paddle.reshape(x2, new_shape) paddle.static.append_backward(out2) - new_shape = [paddle.to_tensor(-1), paddle.to_tensor(1)] + new_shape = [paddle.full([], -1, "int32"), paddle.full([], 1, "int32")] out3 = paddle.reshape(x3, new_shape) paddle.static.append_backward(out3) diff --git a/python/paddle/fluid/tests/unittests/xpu/test_zero_dim_tensor_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_zero_dim_tensor_xpu.py index 2223cb8fbceca..39b4407efb586 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_zero_dim_tensor_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_zero_dim_tensor_xpu.py @@ -559,21 +559,21 @@ def test_reshape_tensor(self): self.assertEqual(out.shape, []) self.assertEqual(out.grad.shape, []) - new_shape = paddle.to_tensor((1,)) + new_shape = paddle.full([], 1, "int32") out = paddle.reshape(x, new_shape) out.backward() self.assertEqual(x.grad.shape, [1, 1]) self.assertEqual(out.shape, [1]) self.assertEqual(out.grad.shape, [1]) - new_shape = paddle.to_tensor((-1,)) + new_shape = paddle.full([], -1, "int32") out = paddle.reshape(x, new_shape) out.backward() self.assertEqual(x.grad.shape, [1, 1]) self.assertEqual(out.shape, [1]) self.assertEqual(out.grad.shape, [1]) - new_shape = [paddle.to_tensor(-1), paddle.to_tensor(1)] + new_shape = [paddle.full([], -1, "int32"), paddle.full([], 1, "int32")] out = paddle.reshape(x, new_shape) out.backward() self.assertEqual(x.grad.shape, [1, 1]) @@ -599,15 +599,15 @@ def test_reshape__tensor(self): out = paddle.reshape_(x, []) self.assertEqual(out.shape, []) - new_shape = paddle.to_tensor((1,)) + new_shape = paddle.full([], 1, "int32") out = paddle.reshape_(x, new_shape) self.assertEqual(out.shape, [1]) - new_shape = paddle.to_tensor((-1,)) + new_shape = paddle.full([], -1, "int32") out = paddle.reshape_(x, new_shape) self.assertEqual(out.shape, [1]) - new_shape = [paddle.to_tensor(-1), paddle.to_tensor(1)] + new_shape = [paddle.full([], -1, "int32"), paddle.full([], 1, "int32")] out = paddle.reshape_(x, new_shape) self.assertEqual(out.shape, [1, 1]) diff --git a/python/paddle/tensor/manipulation.py b/python/paddle/tensor/manipulation.py index eb7a974656326..503f794e93670 100644 --- a/python/paddle/tensor/manipulation.py +++ b/python/paddle/tensor/manipulation.py @@ -3450,8 +3450,8 @@ def reshape(x, shape, name=None): Args: x (Tensor): An N-D Tensor. The data type is ``float32``, ``float64``, ``int32``, ``int64`` or ``bool`` shape (list|tuple|Tensor): Define the target shape. At most one dimension of the target shape can be -1. - The data type is ``int32`` . If ``shape`` is a list or tuple, the elements of it should be integers or Tensors with shape [1]. - If ``shape`` is an Tensor, it should be an 1-D Tensor . + The data type is ``int32`` . If ``shape`` is a list or tuple, the elements of it should be integers or Tensors with shape []. + If ``shape`` is an Tensor, it should be an 0-D Tensor . name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: From 72498fb47168b36f6d05f98232077b55f2f6d072 Mon Sep 17 00:00:00 2001 From: zhaoyinglia Date: Wed, 28 Dec 2022 13:22:02 +0800 Subject: [PATCH 4/5] fix docs --- python/paddle/tensor/manipulation.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/paddle/tensor/manipulation.py b/python/paddle/tensor/manipulation.py index 503f794e93670..4192de1b0e57d 100644 --- a/python/paddle/tensor/manipulation.py +++ b/python/paddle/tensor/manipulation.py @@ -3451,7 +3451,7 @@ def reshape(x, shape, name=None): x (Tensor): An N-D Tensor. The data type is ``float32``, ``float64``, ``int32``, ``int64`` or ``bool`` shape (list|tuple|Tensor): Define the target shape. At most one dimension of the target shape can be -1. The data type is ``int32`` . If ``shape`` is a list or tuple, the elements of it should be integers or Tensors with shape []. - If ``shape`` is an Tensor, it should be an 0-D Tensor . + If ``shape`` is an Tensor, it should be an 1-D Tensor . name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: From fa3e4745b5f248bc0f94951203a471f376c8753b Mon Sep 17 00:00:00 2001 From: zhaoyinglia Date: Thu, 29 Dec 2022 21:59:14 +0800 Subject: [PATCH 5/5] update paddle.full --- .../fluid/tests/unittests/test_zero_dim_tensor.py | 12 ++++++------ .../tests/unittests/xpu/test_zero_dim_tensor_xpu.py | 13 ++----------- 2 files changed, 8 insertions(+), 17 deletions(-) diff --git a/python/paddle/fluid/tests/unittests/test_zero_dim_tensor.py b/python/paddle/fluid/tests/unittests/test_zero_dim_tensor.py index 8313d4487accd..cabecdf447ab2 100644 --- a/python/paddle/fluid/tests/unittests/test_zero_dim_tensor.py +++ b/python/paddle/fluid/tests/unittests/test_zero_dim_tensor.py @@ -785,14 +785,14 @@ def test_reshape_tensor(self): self.assertEqual(out.shape, []) self.assertEqual(out.grad.shape, []) - new_shape = paddle.full([], 1, "int32") + new_shape = paddle.full([1], 1, "int32") out = paddle.reshape(x, new_shape) out.backward() self.assertEqual(x.grad.shape, [1, 1]) self.assertEqual(out.shape, [1]) self.assertEqual(out.grad.shape, [1]) - new_shape = paddle.full([], -1, "int32") + new_shape = paddle.full([1], -1, "int32") out = paddle.reshape(x, new_shape) out.backward() self.assertEqual(x.grad.shape, [1, 1]) @@ -825,11 +825,11 @@ def test_reshape__tensor(self): out = paddle.reshape_(x, []) self.assertEqual(out.shape, []) - new_shape = paddle.full([], 1, "int32") + new_shape = paddle.full([1], 1, "int32") out = paddle.reshape_(x, new_shape) self.assertEqual(out.shape, [1]) - new_shape = paddle.full([], -1, "int32") + new_shape = paddle.full([1], -1, "int32") out = paddle.reshape_(x, new_shape) self.assertEqual(out.shape, [1]) @@ -1130,11 +1130,11 @@ def test_reshape_tensor(self): x2.stop_gradient = False x3.stop_gradient = False - new_shape = paddle.full([], 1, "int32") + new_shape = paddle.full([1], 1, "int32") out1 = paddle.reshape(x1, new_shape) paddle.static.append_backward(out1) - new_shape = paddle.full([], -1, "int32") + new_shape = paddle.full([1], -1, "int32") out2 = paddle.reshape(x2, new_shape) paddle.static.append_backward(out2) diff --git a/python/paddle/fluid/tests/unittests/xpu/test_zero_dim_tensor_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_zero_dim_tensor_xpu.py index cb1e3b211f7b1..8ceee04c206b1 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_zero_dim_tensor_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_zero_dim_tensor_xpu.py @@ -634,11 +634,11 @@ def test_reshape__tensor(self): out = paddle.reshape_(x, []) self.assertEqual(out.shape, []) - new_shape = paddle.full([], 1, "int32") + new_shape = paddle.full([1], 1, "int32") out = paddle.reshape_(x, new_shape) self.assertEqual(out.shape, [1]) - new_shape = paddle.full([], -1, "int32") + new_shape = paddle.full([1], -1, "int32") out = paddle.reshape_(x, new_shape) self.assertEqual(out.shape, [1]) @@ -646,15 +646,6 @@ def test_reshape__tensor(self): out = paddle.reshape_(x, new_shape) self.assertEqual(out.shape, [1, 1]) - def test_reverse(self): - x = paddle.rand([]) - x.stop_gradient = False - out = paddle.reverse(x, axis=[]) - out.backward() - self.assertEqual(x.shape, []) - self.assertEqual(out.shape, []) - self.assertEqual(out.grad.shape, []) - # Use to test API whose zero-dim input tensors don't have grad and not need to test backward in OpTest. class TestNoBackwardAPI(unittest.TestCase):