Skip to content

Commit

Permalink
support concat backward refuse forward (PaddlePaddle#45940)
Browse files Browse the repository at this point in the history
  • Loading branch information
Charles-hit committed Sep 18, 2022
1 parent f6dd201 commit 0ff585c
Show file tree
Hide file tree
Showing 2 changed files with 82 additions and 5 deletions.
6 changes: 1 addition & 5 deletions paddle/phi/api/yaml/legacy_backward.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -404,11 +404,7 @@
forward : concat_grad (Tensor[] x, Tensor grad_out, Scalar axis) -> Tensor[](grad_x)
args : (Tensor[] grad_x_grad, Scalar axis = 0)
output : Tensor(grad_out_grad)
infer_meta :
func : ConcatInferMeta
param : [grad_x_grad, axis]
kernel :
func : concat
invoke : concat(grad_x_grad, axis)

- backward_op : concat_grad
forward : concat (Tensor[] x, Scalar axis) -> Tensor(out)
Expand Down
81 changes: 81 additions & 0 deletions python/paddle/fluid/tests/unittests/test_concat_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,9 @@
from paddle.fluid import compiler, Program, program_guard, core
from paddle.fluid.framework import _test_eager_guard
import paddle
import gradient_checker
from decorator_helper import prog_scope
import paddle.fluid.layers as layers


class TestConcatOp(OpTest):
Expand Down Expand Up @@ -451,5 +454,83 @@ def _run_static_mode(self, use_fluid_api):
res[0], np.concatenate([self.x] * self.iter_num, axis=self.axis))


class TestConcatDoubleGradCheck(unittest.TestCase):

def concat_wrapper(self, x):
return paddle.concat(x)

@prog_scope()
def func(self, place):
# the shape of input variable should be clearly specified, not inlcude -1.
eps = 0.005
dtype = np.float32

data1 = layers.data('data1', [2, 3], False, dtype)
data1.persistable = True
data2 = layers.data('data2', [2, 3], False, dtype)
data2.persistable = True
out = paddle.concat([data1, data2])
data1_arr = np.random.uniform(-1, 1, data1.shape).astype(dtype)
data2_arr = np.random.uniform(-1, 1, data2.shape).astype(dtype)
gradient_checker.double_grad_check([data1, data2],
out,
x_init=[data1_arr, data2_arr],
place=place,
eps=eps)
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
gradient_checker.double_grad_check_for_dygraph(
self.concat_wrapper, [data1, data2],
out,
x_init=[data1_arr, data2_arr],
place=place)

def test_grad(self):
paddle.enable_static()
places = [fluid.CPUPlace()]
if core.is_compiled_with_cuda():
places.append(fluid.CUDAPlace(0))
for p in places:
self.func(p)


class TestConcatTripleGradCheck(unittest.TestCase):

def concat_wrapper(self, x):
return paddle.concat(x, 1)

@prog_scope()
def func(self, place):
# the shape of input variable should be clearly specified, not inlcude -1.
eps = 0.005
dtype = np.float32

data1 = layers.data('data1', [2, 3, 4], False, dtype)
data1.persistable = True
data2 = layers.data('data2', [2, 3, 4], False, dtype)
data2.persistable = True
out = paddle.concat([data1, data2], 1)
data1_arr = np.random.uniform(-1, 1, data1.shape).astype(dtype)
data2_arr = np.random.uniform(-1, 1, data2.shape).astype(dtype)
gradient_checker.double_grad_check([data1, data2],
out,
x_init=[data1_arr, data2_arr],
place=place,
eps=eps)
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
gradient_checker.double_grad_check_for_dygraph(
self.concat_wrapper, [data1, data2],
out,
x_init=[data1_arr, data2_arr],
place=place)

def test_grad(self):
paddle.enable_static()
places = [fluid.CPUPlace()]
if core.is_compiled_with_cuda():
places.append(fluid.CUDAPlace(0))
for p in places:
self.func(p)


if __name__ == '__main__':
unittest.main()

0 comments on commit 0ff585c

Please sign in to comment.