Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add yaml for reduce_sum OP #41295

Merged
merged 6 commits into from
Apr 4, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion python/paddle/fluid/tests/unittests/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -1077,7 +1077,7 @@ set_tests_properties(test_generator_dataloader PROPERTIES TIMEOUT 120)
set_tests_properties(test_partial_concat_op PROPERTIES TIMEOUT 120)
set_tests_properties(test_fuse_optimizer_pass PROPERTIES TIMEOUT 120)
set_tests_properties(test_softmax_with_cross_entropy_op PROPERTIES TIMEOUT 120)
set_tests_properties(test_reduce_op PROPERTIES TIMEOUT 120)
set_tests_properties(test_reduce_op PROPERTIES TIMEOUT 500)
set_tests_properties(test_adam_optimizer_fp32_fp64 PROPERTIES TIMEOUT 120)
set_tests_properties(test_elementwise_nn_grad PROPERTIES TIMEOUT 120)
set_tests_properties(test_buffer_shared_memory_reuse_pass_and_fuse_optimization_op_pass PROPERTIES TIMEOUT 120)
Expand Down
2 changes: 0 additions & 2 deletions python/paddle/fluid/tests/unittests/op_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -1559,8 +1559,6 @@ def calculate_output(self):

def _compare_numpy(self, name, actual_np, expect_np):
with _test_eager_guard():
print(actual_np)
print(expect_np)
super()._compare_numpy(name, actual_np, expect_np)

def convert_uint16_to_float_ifneed(self, actual_np, expect_np):
Expand Down
43 changes: 29 additions & 14 deletions python/paddle/fluid/tests/unittests/test_reduce_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,19 +26,22 @@

class TestSumOp(OpTest):
def setUp(self):
self.python_api = paddle.sum
self.op_type = "reduce_sum"
self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")}
self.outputs = {'Out': self.inputs['X'].sum(axis=0)}
self.attrs = {'dim': [0]}

def test_check_output(self):
self.check_output()
self.check_output(check_eager=True)

def test_check_grad(self):
self.check_grad(['X'], 'Out')
self.check_grad(['X'], 'Out', check_eager=True)


class TestSumOp_fp16(OpTest):
def setUp(self):
self.python_api = paddle.sum
self.op_type = "reduce_sum"
self.inputs = {
'X': np.random.uniform(0, 0.1, (5, 6, 10)).astype("float16")
Expand All @@ -50,22 +53,24 @@ def setUp(self):
self.gradient = self.calc_gradient()

def test_check_output(self):
self.check_output()
self.check_output(check_eager=True)

def calc_gradient(self):
x = self.inputs["X"]
grad = np.ones(x.shape, dtype=x.dtype)
return grad,

def test_check_grad(self):
self.check_grad(['X'], 'Out', user_defined_grads=self.gradient)
self.check_grad(
['X'], 'Out', user_defined_grads=self.gradient, check_eager=True)


@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestSumOp_bf16(OpTest):
def setUp(self):
np.random.seed(100)
self.python_api = paddle.sum
self.op_type = "reduce_sum"
self.dtype = np.uint16
self.x = np.random.uniform(0, 0.1, (2, 5, 10)).astype(np.float32)
Expand All @@ -79,12 +84,15 @@ def setUp(self):

def test_check_output(self):
place = core.CUDAPlace(0)
self.check_output_with_place(place)
self.check_output_with_place(place, check_eager=True)

def test_check_grad(self):
place = core.CUDAPlace(0)
self.check_grad_with_place(
place, ['X'], 'Out', user_defined_grads=self.gradient)
place, ['X'],
'Out',
user_defined_grads=self.gradient,
check_eager=True)

def calc_gradient(self):
x = self.x
Expand All @@ -94,6 +102,7 @@ def calc_gradient(self):

class TestSumOp_fp16_withInt(OpTest):
def setUp(self):
self.python_api = paddle.sum
self.op_type = "reduce_sum"
self.inputs = {
# ref to https://en.wikipedia.org/wiki/Half-precision_floating-point_format
Expand All @@ -107,49 +116,55 @@ def setUp(self):
self.gradient = self.calc_gradient()

def test_check_output(self):
self.check_output()
self.check_output(check_eager=True)

def calc_gradient(self):
x = self.inputs["X"]
grad = np.ones(x.shape, dtype=x.dtype)
return grad,

def test_check_grad(self):
self.check_grad(['X'], 'Out', user_defined_grads=self.gradient)
self.check_grad(
['X'], 'Out', user_defined_grads=self.gradient, check_eager=True)


class TestSumOp5D(OpTest):
def setUp(self):
self.python_api = paddle.sum
self.op_type = "reduce_sum"
self.inputs = {
'X': np.random.random((1, 2, 5, 6, 10)).astype("float64")
}
self.attrs = {'dim': [0]}
self.outputs = {'Out': self.inputs['X'].sum(axis=0)}

def test_check_output(self):
self.check_output()
self.check_output(check_eager=True)

def test_check_grad(self):
self.check_grad(['X'], 'Out')
self.check_grad(['X'], 'Out', check_eager=True)


class TestSumOp6D(OpTest):
def setUp(self):
self.python_api = paddle.sum
self.op_type = "reduce_sum"
self.inputs = {
'X': np.random.random((1, 1, 2, 5, 6, 10)).astype("float64")
}
self.attrs = {'dim': [0]}
self.outputs = {'Out': self.inputs['X'].sum(axis=0)}

def test_check_output(self):
self.check_output()
self.check_output(check_eager=True)

def test_check_grad(self):
self.check_grad(['X'], 'Out')
self.check_grad(['X'], 'Out', check_eager=True)


class TestSumOp8D(OpTest):
def setUp(self):
self.python_api = paddle.sum
self.op_type = "reduce_sum"
self.inputs = {
'X': np.random.random((1, 3, 1, 2, 1, 4, 3, 10)).astype("float64")
Expand All @@ -158,10 +173,10 @@ def setUp(self):
self.outputs = {'Out': self.inputs['X'].sum(axis=(0, 3))}

def test_check_output(self):
self.check_output()
self.check_output(check_eager=True)

def test_check_grad(self):
self.check_grad(['X'], 'Out')
self.check_grad(['X'], 'Out', check_eager=True)


@skip_check_grad_ci(
Expand Down
13 changes: 12 additions & 1 deletion python/paddle/tensor/math.py
Original file line number Diff line number Diff line change
Expand Up @@ -903,7 +903,18 @@ def get_dtype(x, dtype):
return (False, src_type)

dtype_flag, dtype = get_dtype(x, dtype)
if paddle.in_dynamic_mode():

if in_dygraph_mode():
if reduce_all_flag:
axis = range(len(x.shape))
else:
axis = axis if axis != None and axis != [] else [0]

out_dtype = convert_np_dtype_to_dtype_(dtype)
out = _C_ops.final_state_sum(x, axis, out_dtype, keepdim)
return out

if _in_legacy_dygraph():
axis = axis if axis != None and axis != [] else [0]
if dtype_flag:
return _C_ops.reduce_sum(x, 'dim', axis, 'keep_dim', keepdim,
Expand Down
5 changes: 3 additions & 2 deletions python/paddle/utils/code_gen/api.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -1367,13 +1367,14 @@
# no_need_buffer : x, y

- api : sum
args : (Tensor x, int64_t[] axis={}, DataType dtype=DataType::UNDEFINED, bool keep_dim=false)
output : Tensor
args : (Tensor x, int64_t[] dims={}, DataType out_dtype=paddle::experimental::DataType::UNDEFINED, bool keep_dim=false)
output : Tensor(out)
infer_meta :
func : SumInferMeta
kernel :
func : sum
data_type : x
backward : sum_grad

# take_along_axis
- api : take_along_axis
Expand Down
10 changes: 10 additions & 0 deletions python/paddle/utils/code_gen/backward.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -949,6 +949,16 @@
kernel :
func : subtract_grad

- backward_api : sum_grad
forward : sum (Tensor x, int64_t[] dims={}, DataType out_dtype=paddle::experimental::DataType::UNDEFINED, bool keep_dim=false) -> Tensor(out)
args : (Tensor x, Tensor out_grad, int64_t[] dims, bool keep_dim, bool reduce_all=false)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param : [x]
kernel :
func : sum_grad

- backward_api : take_along_axis_grad
forward : take_along_axis (Tensor x, Tensor index, int axis) -> Tensor(out)
args : (Tensor x, Tensor index, Tensor out_grad, int axis)
Expand Down