diff --git a/paddle/phi/api/yaml/legacy_backward.yaml b/paddle/phi/api/yaml/legacy_backward.yaml index 6267c99e14700..e63920a903d65 100755 --- a/paddle/phi/api/yaml/legacy_backward.yaml +++ b/paddle/phi/api/yaml/legacy_backward.yaml @@ -404,11 +404,7 @@ forward : concat_grad (Tensor[] x, Tensor grad_out, Scalar axis) -> Tensor[](grad_x) args : (Tensor[] grad_x_grad, Scalar axis = 0) output : Tensor(grad_out_grad) - infer_meta : - func : ConcatInferMeta - param : [grad_x_grad, axis] - kernel : - func : concat + invoke : concat(grad_x_grad, axis) - backward_op : concat_grad forward : concat (Tensor[] x, Scalar axis) -> Tensor(out) diff --git a/python/paddle/fluid/tests/unittests/test_concat_op.py b/python/paddle/fluid/tests/unittests/test_concat_op.py index 0bf3d6230d84f..10c7410708341 100644 --- a/python/paddle/fluid/tests/unittests/test_concat_op.py +++ b/python/paddle/fluid/tests/unittests/test_concat_op.py @@ -21,6 +21,9 @@ from paddle.fluid import compiler, Program, program_guard, core from paddle.fluid.framework import _test_eager_guard import paddle +import gradient_checker +from decorator_helper import prog_scope +import paddle.fluid.layers as layers class TestConcatOp(OpTest): @@ -451,5 +454,83 @@ def _run_static_mode(self, use_fluid_api): res[0], np.concatenate([self.x] * self.iter_num, axis=self.axis)) +class TestConcatDoubleGradCheck(unittest.TestCase): + + def concat_wrapper(self, x): + return paddle.concat(x) + + @prog_scope() + def func(self, place): + # the shape of input variable should be clearly specified, not inlcude -1. + eps = 0.005 + dtype = np.float32 + + data1 = layers.data('data1', [2, 3], False, dtype) + data1.persistable = True + data2 = layers.data('data2', [2, 3], False, dtype) + data2.persistable = True + out = paddle.concat([data1, data2]) + data1_arr = np.random.uniform(-1, 1, data1.shape).astype(dtype) + data2_arr = np.random.uniform(-1, 1, data2.shape).astype(dtype) + gradient_checker.double_grad_check([data1, data2], + out, + x_init=[data1_arr, data2_arr], + place=place, + eps=eps) + fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True}) + gradient_checker.double_grad_check_for_dygraph( + self.concat_wrapper, [data1, data2], + out, + x_init=[data1_arr, data2_arr], + place=place) + + def test_grad(self): + paddle.enable_static() + places = [fluid.CPUPlace()] + if core.is_compiled_with_cuda(): + places.append(fluid.CUDAPlace(0)) + for p in places: + self.func(p) + + +class TestConcatTripleGradCheck(unittest.TestCase): + + def concat_wrapper(self, x): + return paddle.concat(x, 1) + + @prog_scope() + def func(self, place): + # the shape of input variable should be clearly specified, not inlcude -1. + eps = 0.005 + dtype = np.float32 + + data1 = layers.data('data1', [2, 3, 4], False, dtype) + data1.persistable = True + data2 = layers.data('data2', [2, 3, 4], False, dtype) + data2.persistable = True + out = paddle.concat([data1, data2], 1) + data1_arr = np.random.uniform(-1, 1, data1.shape).astype(dtype) + data2_arr = np.random.uniform(-1, 1, data2.shape).astype(dtype) + gradient_checker.double_grad_check([data1, data2], + out, + x_init=[data1_arr, data2_arr], + place=place, + eps=eps) + fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True}) + gradient_checker.double_grad_check_for_dygraph( + self.concat_wrapper, [data1, data2], + out, + x_init=[data1_arr, data2_arr], + place=place) + + def test_grad(self): + paddle.enable_static() + places = [fluid.CPUPlace()] + if core.is_compiled_with_cuda(): + places.append(fluid.CUDAPlace(0)) + for p in places: + self.func(p) + + if __name__ == '__main__': unittest.main()